diff --git "a/3670.jsonl" "b/3670.jsonl" new file mode 100644--- /dev/null +++ "b/3670.jsonl" @@ -0,0 +1,1095 @@ +{"seq_id":"8292072357","text":"def multiples(n1, n2):\n result = 0\n i = 0\n count = 1000\n while i < count :\n if(i % n1 == 0 or i % n2 == 0):\n result = result + i\n i = i + 1\n return result\n\nprint(multiples(3, 5))","repo_name":"Sander011/PythonAssignments","sub_path":"practicalPython1/ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"43224213381","text":"import argparse\nimport logging\nimport os\nimport random\nimport sys\nfrom typing import DefaultDict\nimport warnings\n\nwarnings.filterwarnings('ignore', category=FutureWarning)\n\nimport numpy as np\nimport torch\nimport wandb\n\nsys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), \"FedML\")))\nsys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), \"cifar_gm\")))\n# sys.path.insert(1, os.getcwd())\n# sys.path.insert(2, os.path.abspath(os.path.join(os.getcwd(), \"FedML/fedml_api\")))\n# from graph_matching_based_alignment.FedML import *\n\nfrom fedml_api.data_preprocessing.cifar10.data_loader import load_partition_data_cifar10\nfrom fedml_api.standalone.fedavg.fedavg_api import FedAvgAPI\nfrom fedml_api.standalone.fedavg.my_model_trainer_classification import MyModelTrainer as MyModelTrainerCLS\n\n\n# my own model:\nimport model_gm\n\n\ndef add_args(parser):\n \"\"\"\n parser : argparse.ArgumentParser\n return a parser added with args required by fit\n\n each model trained for [epochs] epochs per communication\n the total epochs trained, therefore, is [comm_round * epochs]\n\n \"\"\"\n # Training settings\n parser.add_argument('--model-name', type=str, default='vgg11_nobias', metavar='N',\n help='neural network used in training')\n parser.add_argument('--second-model-name', type=str, default=None, action='store', help='name of second model!')\n\n parser.add_argument('--dataset', type=str, default='cifar10', metavar='N',\n help='dataset used for training')\n\n parser.add_argument('--data_dir', type=str, default='./FedML/data/cifar10',\n help='data directory')\n # adjust\n parser.add_argument('--partition_method', type=str, default='hetero', metavar='N',\n help='how to partition the dataset on local workers (default: hetero, namely non-iid)')\n\n parser.add_argument('--partition_alpha', type=float, default=0.5, metavar='PA',\n help='partition alpha (default: 0.5)')\n\n parser.add_argument('--batch_size', type=int, default=256, metavar='N',\n help='input batch size for training (default: 64)')\n\n parser.add_argument('--client_optimizer', type=str, default='adam',\n help='SGD with momentum; adam')\n\n parser.add_argument('--lr', type=float, default=0.0005, metavar='LR',\n help='learning rate (default: 0.001)')\n\n parser.add_argument('--wd', help='weight decay parameter;', type=float, default=0.001)\n # adjust\n parser.add_argument('--epochs', type=int, default=10, metavar='EP',\n help='how many epochs will be trained locally')\n\n parser.add_argument('--client_num_in_total', type=int, default=5, metavar='NN',\n help='number of workers in a distributed cluster')\n\n parser.add_argument('--client_num_per_round', type=int, default=5, metavar='NN',\n help='number of workers')\n # adjust\n parser.add_argument('--comm_round', type=int, default=50,\n help='how many round of communications we shoud use')\n # adjust\n parser.add_argument('--frequency_of_the_test', type=int, default=1,\n help='the frequency of the algorithms')\n\n parser.add_argument('--gpu-id', type=int, default=0,\n help='gpu')\n\n parser.add_argument('--ci', type=int, default=0,\n help='CI')\n\n parser.add_argument('--fusion_mode', type=str, default='fusion_gamf_multi',\n help='the method used to fuse different models, [traditional, ot, fusion, fusion_gamf]')\n\n parser.add_argument('--reg', default=1e-2, type=float, help='regularization strength for sinkhorn (default: 1e-2)')\n parser.add_argument('--reg-m', default=1e-3, type=float,\n help='regularization strength for marginals in unbalanced sinkhorn (default: 1e-3)')\n parser.add_argument('--ground-metric', type=str, default='euclidean', choices=['euclidean', 'cosine'],\n help='ground metric for OT calculations.')\n parser.add_argument('--ground-metric-normalize', type=str, default='log',\n choices=['log', 'max', 'none', 'median', 'mean'],\n help='ground metric normalization to consider! ')\n parser.add_argument('--not-squared', action='store_true', help='dont square the ground metric')\n parser.add_argument('--clip-gm', action='store_true', help='to clip ground metric')\n parser.add_argument('--clip-min', action='store', type=float, default=0,\n help='Value for clip-min for gm')\n parser.add_argument('--clip-max', action='store', type=float, default=5,\n help='Value for clip-max for gm')\n parser.add_argument('--tmap-stats', action='store_true', help='print tmap stats')\n parser.add_argument('--ensemble-step', type=float, default=0.5, action='store',\n help='rate of adjustment towards the second model')\n\n parser.add_argument('--ground-metric-eff', action='store_true',\n help='memory efficient calculation of ground metric')\n\n parser.add_argument('--weight-stats', action='store_true', help='log neuron-wise weight vector stats.')\n parser.add_argument('--sinkhorn-type', type=str, default='normal',\n choices=['normal', 'stabilized', 'epsilon', 'gpu'],\n help='Type of sinkhorn algorithm to consider.')\n parser.add_argument('--geom-ensemble-type', type=str, default='wts', choices=['wts', 'acts'],\n help='Ensemble based on weights (wts) or activations (acts).')\n parser.add_argument('--act-bug', action='store_true',\n help='simulate the bug in ground metric calc for act based averaging')\n parser.add_argument('--standardize-acts', action='store_true',\n help='subtract mean and divide by standard deviation across the samples for use in act based alignment')\n parser.add_argument('--transform-acts', action='store_true',\n help='transform activations by transport map for later use in bi_avg mode ')\n parser.add_argument('--center-acts', action='store_true',\n help='subtract mean only across the samples for use in act based alignment')\n parser.add_argument('--prelu-acts', action='store_true',\n help='do activation based alignment based on pre-relu acts')\n parser.add_argument('--pool-acts', action='store_true',\n help='do activation based alignment based on pooling acts')\n parser.add_argument('--pool-relu', action='store_true',\n help='do relu first before pooling acts')\n parser.add_argument('--normalize-acts', action='store_true',\n help='normalize the vector of activations')\n parser.add_argument('--normalize-wts', action='store_true',\n help='normalize the vector of weights')\n parser.add_argument('--gromov', action='store_true', help='use gromov wasserstein distance and barycenters')\n parser.add_argument('--gromov-loss', type=str, default='square_loss', action='store',\n choices=['square_loss', 'kl_loss'],\n help=\"choice of loss function for gromov wasserstein computations\")\n parser.add_argument('--tensorboard-root', action='store', default=\"./tensorboard\", type=str,\n help='Root directory of tensorboard logs')\n parser.add_argument('--tensorboard', action='store_true', help='Use tensorboard to plot the loss values')\n\n parser.add_argument('--same-model', action='store', type=int, default=-1,\n help='Index of the same model to average with itself')\n parser.add_argument('--dist-normalize', action='store_true', help='normalize distances by act num samples')\n parser.add_argument('--update-acts', action='store_true', help='update acts during the alignment of model0')\n parser.add_argument('--past-correction', action='store_true',\n help='use the current weights aligned by multiplying with past transport map')\n parser.add_argument('--partial-reshape', action='store_true',\n help='partially reshape the conv layers in ground metric calculation')\n parser.add_argument('--choice', type=str, default='0 2 4 6 8', action='store',\n help=\"choice of how to partition the labels\")\n parser.add_argument('--diff-init', action='store_true',\n help='different initialization for models in data separated mode')\n\n return parser\n\n\ndef load_data(args, dataset_name):\n # check if the centralized training is enabled\n centralized = True if args.client_num_in_total == 1 else False\n\n # check if the full-batch training is enabled\n args_batch_size = args.batch_size\n if args.batch_size <= 0:\n full_batch = True\n args.batch_size = 128 # temporary batch size\n else:\n full_batch = False\n\n data_loader = load_partition_data_cifar10\n train_data_num, test_data_num, train_data_global, test_data_global, \\\n train_data_local_num_dict, train_data_local_dict, test_data_local_dict, \\\n class_num = data_loader(args.dataset, args.data_dir, args.partition_method,\n args.partition_alpha, args.client_num_in_total, args.batch_size)\n\n if centralized:\n train_data_local_num_dict = {\n 0: sum(user_train_data_num for user_train_data_num in train_data_local_num_dict.values())}\n train_data_local_dict = {\n 0: [batch for cid in sorted(train_data_local_dict.keys()) for batch in train_data_local_dict[cid]]}\n test_data_local_dict = {\n 0: [batch for cid in sorted(test_data_local_dict.keys()) for batch in test_data_local_dict[cid]]}\n args.client_num_in_total = 1\n\n if full_batch:\n train_data_global = combine_batches(train_data_global)\n test_data_global = combine_batches(test_data_global)\n train_data_local_dict = {cid: combine_batches(train_data_local_dict[cid]) for cid in\n train_data_local_dict.keys()}\n test_data_local_dict = {cid: combine_batches(test_data_local_dict[cid]) for cid in test_data_local_dict.keys()}\n args.batch_size = args_batch_size\n\n dataset = [train_data_num, test_data_num, train_data_global, test_data_global,\n train_data_local_num_dict, train_data_local_dict, test_data_local_dict, class_num]\n return dataset\n\n\ndef combine_batches(batches):\n full_x = torch.from_numpy(np.asarray([])).float()\n full_y = torch.from_numpy(np.asarray([])).long()\n for (batched_x, batched_y) in batches:\n full_x = torch.cat((full_x, batched_x), 0)\n full_y = torch.cat((full_y, batched_y), 0)\n return [(full_x, full_y)]\n\n\ndef create_model(args, model_name, output_dim):\n logging.info(\"create_model. model_name = %s, output_dim = %s\" % (model_name, output_dim))\n model = model_gm.get_model_from_name(name=model_name, args=args)\n return model\n\n\ndef custom_model_trainer(args, model):\n return MyModelTrainerCLS(model)\n\n\nif __name__ == \"__main__\":\n logging.basicConfig()\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n\n parser = add_args(argparse.ArgumentParser(description='FedAvg-standalone'))\n args = parser.parse_args()\n logger.info(args)\n device = torch.device(\"cuda:\" + str(args.gpu_id) if torch.cuda.is_available() else \"cpu\")\n logger.info(device)\n\n wandb.init(\n project=\"fedml-cifar10\",\n name=\"FedAVG-\" + str(args.fusion_mode) + \"-r\" + str(args.comm_round) + \"-e\" + str(\n args.epochs) + \"-lr\" + str(args.lr) + \"-c\" + str(args.client_num_per_round),\n config=args\n )\n\n # Set the random seed. The np.random seed determines the dataset partition.\n # The torch_manual_seed determines the initial weight.\n # We fix these two, so that we can reproduce the result.\n random.seed(0)\n np.random.seed(0)\n torch.manual_seed(0)\n torch.cuda.manual_seed_all(0)\n torch.backends.cudnn.deterministic = True\n\n # load data\n dataset = load_data(args, args.dataset)\n\n # create model.\n # Note if the model is DNN (e.g., ResNet), the training will be very slow.\n # In this case, please use our FedML distributed version (./fedml_experiments/distributed_fedavg)\n model = create_model(args, model_name=args.model_name, output_dim=dataset[7])\n model_trainer = custom_model_trainer(args, model)\n logging.info(model)\n\n fedavgAPI = FedAvgAPI(dataset, device, args, model_trainer)\n fedavgAPI.train() # _aggregate: aggregate the parameters\n","repo_name":"Thinklab-SJTU/GAMF","sub_path":"federated_learning_with_FedML.py","file_name":"federated_learning_with_FedML.py","file_ext":"py","file_size_in_byte":12787,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"81"} +{"seq_id":"8225035035","text":"import tensorflow as tf\n\n# Definir constantes\nvalor1 = tf.constant(2)\nvalor2 = tf.constant(3)\n\n# Soma\nsoma = valor1 + valor2\n\n# Resultado diretamente com eager execution\nresultado = soma.numpy()\nprint(resultado)\n\n\n# Definir variáveis\nvalor1 = tf.Variable(10, name='valor1')\nvalor2 = tf.Variable(3, name='valor2')\n\n# Soma\nsoma = valor1 + valor2\n\n# Executar o cálculo diretamente\nresultado = soma.numpy()\nprint(resultado)","repo_name":"eabs86/deep_learning","sub_path":"atividades/variable_tf2.py","file_name":"variable_tf2.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39602838542","text":"#backmap to raw_dataset\n\nimport pandas as pd\nimport csv\nfrom configobj import ConfigObj\n\nconfig = ConfigObj('config.ini')\nchunksize = int(config['chunksize'])\n\nN = int(config['wc_l'])\n\nrawfile = config['rawfile']\ntag_files_path = config['tag_files_path']\nfinal_outfile = config['final_outfile']\nsepr = config['input_file_delimiter']\n\nfiles=[]\n\nfor i in range(0,N,chunksize):\n files.append(tag_files_path+\"yLabels_out_\"+str(i)+\".psv\")\n\nfor i, x in enumerate(pd.read_csv(rawfile, sep = sepr, index_col=['ID'], chunksize=chunksize, error_bad_lines = False, quoting=csv.QUOTE_NONE)):\n #added try for avoid errors if want seelct non exist file in list files\n #print(i,x)\n print(\"back mapping tags to raw data chunk \"+str(i))\n try:\n df = pd.read_csv(files[i], index_col=['id'], sep = sepr,error_bad_lines = False,quoting=csv.QUOTE_NONE)\n #df1 = pd.concat([x, df['tag']], axis=1)\n df1 = pd.concat([x, df['channel'], df['action'], df['merchant'], df['location']], axis=1)\n #print (df1)\n #in first loop create header in output\n if i == 0:\n pd.DataFrame(columns=df1.columns).to_csv(final_outfile)\n \n #remove ','(commas) from raw files as they are used as delimiters for final output file\n df1 = df1.astype(str).replace(',',' ')\n #append data to output file\n df1.to_csv(final_outfile, mode='a', header=False)\n \n except IndexError as e:\n print ('no files in list')\n","repo_name":"aman3loq/transaction_categorization","sub_path":"backmapping.py","file_name":"backmapping.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18608889555","text":"import pandas as pd\n\ncounties = ['Antrim','Armagh','Carlow','Cavan','Clare','Cork','Derry','Donegal','Down','Dublin','Fermanagh','Galway',\n 'Kerry','Kildare','Kilkenny','Laois','Leitrim','Limerick','Longford','Louth','Mayo','Meath','Monaghan',\n 'Offaly','Roscommon','Sligo','Tipperary','Tyrone','Waterford','Westmeath','Wexford','Wicklow']\n\n# Create a string with the value 'IRELAND'\ncountry = \"IRELAND\"\n\n# Create new dictionary\nireland = {'Country':country, 'County':counties}\n\n# Create a DataFrame from the dictionary\ndf = pd.DataFrame(ireland)\n\nprint(df)\n","repo_name":"tstaunton/python_pandas","sub_path":"02 Introduction to Pandas/DataFrames/s2_9_dataframes_from_broadcasting.py","file_name":"s2_9_dataframes_from_broadcasting.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"81"} +{"seq_id":"1178224960","text":"\nfrom datetime import datetime\nimport random\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom rest_framework.authtoken.models import Token as TokenModel\nfrom django.contrib.auth import get_user_model\n\ndef create_token_for_user(user):\n token, _ = TokenModel.objects.get_or_create(user=user)\n user.last_login = datetime.now()\n user.save()\n return token\n\ndef destroy_token_for_user(user):\n try:\n TokenModel.objects.get(user=user).delete()\n except ObjectDoesNotExist:\n pass\n\n\ndef get_similer_username(username: str, existing_set: set[str] = None) -> list[str]: \n \n if existing_set is None or len(existing_set) == 0:\n existing = get_user_model().objects.only(\"username\")\n existing_set = set()\n for user in existing:\n existing_set.add(user.username)\n suggested_names = []\n for _ in range(0,10):\n if len(suggested_names) > 2:\n return suggested_names\n new_username = generate_username(username)\n if new_username in existing_set:\n continue\n suggested_names.append(new_username)\n \n for _ in range(0,10):\n if len(suggested_names) > 2:\n return suggested_names\n new_username = generate_username(username, append_at_start=True)\n if new_username in existing_set:\n continue\n suggested_names.append(new_username)\n \n for _ in range(0,10):\n if len(suggested_names) > 2:\n return suggested_names\n new_username = generate_username(username, append_at_mid=True)\n if new_username in existing_set:\n continue\n suggested_names.append(new_username)\n\n while len(suggested_names) < 3:\n ran_val = random.randint(0,100)\n if ran_val < 34:\n new_username = generate_username(username)\n elif ran_val > 66:\n new_username = generate_username(username, append_at_start=True)\n else:\n new_username = generate_username(username, append_at_mid=True)\n if new_username in existing_set:\n continue\n suggested_names.append(new_username)\n return suggested_names\n \n \n\n\n \ndef generate_username(name_of_user: str, **kwargs) -> str:\n minimum_specia_char = 2\n minimum_digits = 2\n min_len_of_username = 8\n special_chars = ['@','.','+','-', '_']\n \n username = \"\"\n \n name_of_user = \"\".join(name_of_user.split())\n \n name_of_user = name_of_user.lower()\n \n minimum_char_from_name = min_len_of_username-minimum_digits-minimum_specia_char\n \n for i in range(random.randint(minimum_char_from_name,len(name_of_user))):\n username += name_of_user[i]\n \n temp_list = []\n for i in range(minimum_digits):\n temp_list.append(str(random.randint(0,9)))\n \n for i in range(minimum_specia_char):\n temp_list.append(special_chars[random.randint(0,len(special_chars)-1)])\n \n random.shuffle(temp_list)\n temp = \"\".join(temp_list)\n if kwargs.get(\"append_at_start\"):\n username = temp + username\n elif kwargs.get(\"append_at_mid\"):\n username1 = username[:len(username)//2]\n username2 = username[len(username)//2:]\n username = username1 + temp + username2\n else:\n username += temp\n \n return username","repo_name":"sshekhas/todolist-django","sub_path":"authentication/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3252427470","text":"import subprocess\nimport requests\n\nurls = [\"https://www.google.com/\", \"https://www.linkedin.com/\"]\n\nappleScriptCommand = '''\non run argv\n display notification (item 2 of argv) with title (item 1 of argv)\nend run\n'''\n\ndef notify(title, text):\n subprocess.call(['osascript', '-e', appleScriptCommand, title, text])\n\ndef url_health_check(url_list):\n notification_message = \"\"\n for url in url_list:\n r = requests.get(url)\n if str(r.status_code) != \"200\":\n notification_message += str(r.status_code) + \" \" + url + \"\\n\"\n notify(\"Site Health Check Alert\", notification_message)\n\nurl_health_check(urls)\n","repo_name":"mattj3/mac_alert","sub_path":"mac_alert.py","file_name":"mac_alert.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20196428120","text":"from PyQt5 import QtGui\nimport numpy as np\nfrom ...pyqtgraph.pyqtgraph import opengl as gl\nfrom ...pyqtgraph.pyqtgraph.opengl import GLMeshItem\nfrom ...pyqtgraph.pyqtgraph.opengl.MeshData import MeshData\nfrom ...models.parameter_class import ParameterHandler \n\nfrom ..plot_geometries.shaders import ShaderConstructor\n\nfrom .ray_intersec_lib import rayTriangleIntersection\nfrom .ray_intersec_lib import closestPointOnLine\nfrom .ray_intersec_lib import checkBoundingBox\nfrom .ray_intersec_lib import retrievePositionSpheres\n\nclass DistributionRayHandler(ParameterHandler): \n ''' \n This will be the main data class purposed\n to be inherited by variations with different\n variations.\n '''\n def __init__(self):\n ParameterHandler.__init__(self,'Ray handler')\n self.pointer_elements = []\n self.addChild(ShaderConstructor())\n self._initialize()\n self.reset()\n\n def _initialize(self):\n '''\n '''\n self.addParameter(\n 'Active', True, \n method = self._setActive)\n self.addParameter(\n 'Color', QtGui.QColor('red'), \n method = self.dispatchCoordinate)\n self.addParameter(\n 'Offset', 0.001, \n method = self.dispatchCoordinate)\n self.addParameter(\n 'GL options', 'opaque', \n choices = ['opaque','translucent', 'additive'],\n method = self.dispatchCoordinate)\n self.addParameter(\n 'Antialiasing', True,\n method = self.dispatchCoordinate)\n\n def drawGL(self,target):\n '''\n Dummy draw that sets the target of the \n pointer element\n '''\n self.default_target = target\n\n def reset(self):\n '''\n reprocess pointer\n '''\n self.point = None\n self._idx = None\n self.dispatchCoordinate()\n\n def processRay(self, ray, dispatch = True):\n '''\n Process an input ray by the \n canvas widget and perform necessary\n operations\n '''\n self._destroyPointer()\n\n if hasattr(self.parent().childFromName('Distribution'), 'draw_items'):\n temp = self.parent().transformer.getTransform().inverted()[0]\n transform = np.reshape(np.array(temp.data()),(4,4)).transpose()\n new_ray = [np.dot(transform,np.hstack((e,1)))[:3] for e in ray]\n intersec = checkBoundingBox(new_ray, self.parent().childFromName('Data'))\n \n if not intersec is None:\n self.point, self._idx = retrievePositionSpheres(\n new_ray, \n self.parent().childFromName('Data'))\n else:\n self.point = None\n self._idx = None\n\n def _setActive(self):\n '''\n '''\n if not self['Active']:\n self._destroyPointer()\n\n def _destroyPointer(self):\n '''\n '''\n for element in self.pointer_elements:\n if element in self.default_target.view.items:\n self.default_target.view.removeItem(element)\n self.pointer_elements = []\n\n def dispatchCoordinate(self):\n '''\n '''\n if self['Active'] and not self.point is None:\n self._drawContainer()\n else:\n self._destroyPointer()\n\n def _drawContainer(self):\n '''\n '''\n self._destroyPointer()\n\n data = self.parent().childFromName('Data').getData()\n\n mesh = MeshData.sphere(100,100, radius=data[0][self._idx,3]+self['Offset'])\n kwargs = {}\n kwargs['vertexes'] = mesh._vertexes+self.point\n kwargs['faces'] = mesh._faces\n kwargs['color'] = self['Color']\n kwargs['antialias'] = self['Antialiassing']\n\n self.pointer_elements.append(GLMeshItem(**kwargs))\n self.pointer_elements[-1].setGLOptions(self['GL options'])\n self.default_target.view.addItem(self.pointer_elements[-1])\n self.childFromName('Shader').runShader()\n\n def setColor(self):\n '''\n The preference implementation requires the ability to set\n colors without redrawing the entire data. As such we will \n here allow the setting of colors either through the \n color map or through shaders.\n '''\n colors = self.parent()._plot_data.getData()[1]\n for i,element in enumerate(self.pointer_elements):\n element.setShader(self.childFromName('Shader').getShader('light'))\n","repo_name":"AlexanderSchober/simpleplot_qt","sub_path":"simpleplot/ploting/plot_ray_handlers/ray_distribution.py","file_name":"ray_distribution.py","file_ext":"py","file_size_in_byte":4519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39735140893","text":"\"\"\"\n.. _l-example-torch-ort-factory:\n\nONNX in a torch function\n========================\n\nThe ONNX graph used in this example is not really\ninteresting but it shows how to create a custom autograd function\nfollowing torch API (`Extending Pytorch\n`_).\nMethods forward and backward are implemented with ONNX\nand :epkg:`onnxruntime-training`.\n\n.. contents::\n :local:\n\nThe ONNX Graph\n++++++++++++++\n\n\"\"\"\nimport pprint\nimport logging\nimport numpy\nfrom pandas import DataFrame\nimport matplotlib.pyplot as plt\nfrom skl2onnx.common.data_types import FloatTensorType\nfrom skl2onnx.algebra.onnx_ops import (\n OnnxSigmoid, OnnxMatMul, OnnxAdd)\nfrom tqdm import tqdm\nfrom mlprodict.onnx_tools.onnx_manipulations import onnx_rename_names\nfrom mlprodict.plotting.plotting_onnx import plot_onnx\nimport torch\nfrom deeponnxcustom.onnxtorch.torchort import TorchOrtFactory\n\n\ndef from_numpy(v, device=None, requires_grad=False):\n \"\"\"\n Convers a numpy array into a torch array and\n sets *device* and *requires_grad*.\n \"\"\"\n v = torch.from_numpy(v)\n if device is not None:\n v = v.to(device)\n v.requires_grad_(requires_grad)\n return v\n\n\ndef create_onnx_graph(N, d_in=3, d_out=2, n_loops=1, opv=14):\n \"\"\"\n Returns a weird ONNX graph and its weights.\n \"\"\"\n var = [('X', FloatTensorType([N, d_in]))]\n\n sum_node = None\n weights_values = []\n for i in range(n_loops):\n cst = numpy.random.randn(d_in, 1).astype(numpy.float32) / (i + 1)\n weights_values.append(cst)\n mul = OnnxMatMul(var[0], cst, op_version=opv)\n tanh = OnnxSigmoid(mul, op_version=opv)\n if sum_node is None:\n sum_node = tanh\n else:\n sum_node = OnnxAdd(sum_node, tanh, op_version=opv)\n\n cst_mul = numpy.random.randn(1, d_out).astype(numpy.float32)\n weights_values.append(cst_mul)\n mul = OnnxMatMul(sum_node, cst_mul, op_version=opv)\n\n cst_add = numpy.random.randn(1, d_out).astype(numpy.float32)\n weights_values.append(cst_add)\n final = OnnxAdd(mul, cst_add, op_version=opv, output_names=['Y'])\n\n onx = final.to_onnx(\n var, target_opset=opv, outputs=[('Y', FloatTensorType())])\n\n weights_name = [i.name for i in onx.graph.initializer]\n new_names = ['W%03d' % i for i in range(len(weights_name))]\n onx = onnx_rename_names(onx, replace=dict(zip(weights_name, new_names)))\n weights = list(zip(new_names, weights_values))\n return onx, weights\n\n#####################################\n# Let's see how it looks like.\n\n\nN, d_in, d_out = 5, 3, 2\nenable_logging = False\nonx, weights = create_onnx_graph(N)\n\nwith open(\"plot_torch_ort.onnx\", \"wb\") as f:\n f.write(onx.SerializeToString())\nprint(\"weights to train:\", [(w[0], w[1].shape) for w in weights])\n\nax = plot_onnx(onx)\nax.set_title(\"Model to train\")\n\n\n###############################################\n# Wraps ONNX as a torch.autograd.Function\n# +++++++++++++++++++++++++++++++++++++++\n#\n# Class :class:`TorchOrtFactory\n# `\n# uses :epkg:`onnxruntime-training` to build the gradient with ONNX,\n# add calls it following this logic:\n#\n# ::\n#\n# class CustomClass(torch.autograd.Function):\n#\n# @staticmethod\n# def forward(ctx, *input):\n# ctx.save_for_backward(*input)\n# # inference with ONNX\n# return ...\n#\n# @staticmethod\n# def backward(ctx, *grad_output):\n# input, = ctx.saved_tensors\n# # gradient with ONNX = inference with the gradient graph\n# return ...\n#\n# The logic is hidden in :meth:`TorchOrtFactory.create_class\n# `.\n\nfact = TorchOrtFactory(onx, [w[0] for w in weights])\n\nif enable_logging:\n # Logging displays informations about the intermediate steps.\n logger = logging.getLogger('deeponnxcustom')\n logger.setLevel(logging.DEBUG)\n logging.basicConfig(level=logging.DEBUG)\n\ncls = fact.create_class(keep_models=True, enable_logging=enable_logging)\nprint(cls)\n\n##########################################\n# It produces the following inference graphs.\n# The left one is the original one. The model on the left\n# is the same except initializer are also inputs.\n# If the input is missing, the initializer is considered\n# as a default value.\n\nfix, ax = plt.subplots(1, 2, figsize=(15, 10))\nplot_onnx(onx, ax=ax[0])\nplot_onnx(cls._optimized_pre_grad_model, ax=ax[1])\n\n##########################################\n# And the gradient graph. It has the same inputs\n# the previous graph on the right and has an output for every\n# trained parameter.\n\nfix, ax = plt.subplots(1, 1, figsize=(10, 10))\nplot_onnx(cls._trained_onnx, ax=ax)\n\n\n##########################################\n# Training\n# ++++++++\n#\n# We consider a simple example based on torch documentation\n# (`Learning Pytorch with Example\n# `_\n# or `110 - First percepton with pytorch\n# `_).\n\n\ndef train_cls(cls, device, x, y, weights, n_iter=20, learning_rate=1e-2):\n x = from_numpy(x, requires_grad=True, device=device)\n y = from_numpy(y, requires_grad=True, device=device)\n\n weights_tch = [(w[0], from_numpy(w[1], requires_grad=True, device=device))\n for w in weights]\n weights_values = [w[1] for w in weights_tch]\n\n all_losses = []\n for t in tqdm(range(n_iter)):\n # forward - backward\n y_pred = cls.apply(x, *weights_values)\n loss = (y_pred - y).pow(2).sum()\n loss.backward()\n\n # update weights\n with torch.no_grad():\n for name, w in weights_tch:\n w -= w.grad * learning_rate\n w.grad.zero_()\n\n all_losses.append((t, float(loss.cpu().detach().numpy())))\n return all_losses, weights_tch\n\n\ndevice_name = \"cuda:0\" if torch.cuda.is_available() else \"cpu\"\ndevice = torch.device(device_name)\nprint(\"device:\", device)\n\nx = numpy.random.randn(N, d_in).astype(numpy.float32)\ny = numpy.random.randn(N, d_out).astype(numpy.float32)\n\ntrain_losses, final_weights = train_cls(cls, device, x, y, weights)\ntrain_losses = numpy.array(train_losses)\npprint.pprint(final_weights)\n\n#######################################\n# The training loss is decreasing. The function\n# seems to be learning something.\n\ndf = DataFrame(data=train_losses, columns=['iter', 'train_loss'])\ndf.plot(x=\"iter\", y=\"train_loss\", title=\"Training loss\")\n\n\n# plt.show()\n","repo_name":"sdpython/deeponnxcustom","sub_path":"_doc/examples/plot_torch_ort.py","file_name":"plot_torch_ort.py","file_ext":"py","file_size_in_byte":6585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34382487348","text":"#测试break\nwhile True:\n a = input(\"请输入一个字符(输入Q或者q的时候退出): \")\n if a == \"q\" or a == \"Q\":\n print(\"循环结束,退出\")\n break\n else:\n print(a)\n\nprint(\"****************************************************\")\n\n#break和continue小练习\nempNum = 0\nsalarySum = 0\nsalarys = []\nwhile True:\n s = input(\"请输入员工的薪资(按q或者Q结束)\")\n\n if s.upper() == 'Q':\n print(\"录入完毕,退出\")\n break\n if float(s) < 0:\n continue\n empNum += 1\n salarys.append(float(s))\n salarySum += float(s)\n\nprint(\"员工数{0}\".format(empNum))\nprint(\"录入薪资\", salarys)\nprint(\"平均薪资{0}\".format(salarySum/empNum))\n\n","repo_name":"cwjworld/Python","sub_path":"mypypro06.py","file_name":"mypypro06.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"19499481225","text":"import contextlib\nimport uuid\n\nimport tomodachi\nfrom aiohttp import web\nfrom stockholm import Money\nfrom tomodachi.envelope.json_base import JsonBase\nfrom tomodachi_bootstrap import TomodachiServiceBase\nfrom transactional_messaging.idempotent_consumer import MessageAlreadyProcessedError\n\nfrom adapters import dynamodb, inbox, outbox, sns\nfrom orders.commands import ApproveOrderCommand, CancelOrderCommand, CreateOrderCommand, RejectOrderCommand\nfrom service_layer import use_cases, views\nfrom service_layer.response import ResponseTypes\nfrom service_layer.unit_of_work import DynamoDBUnitOfWork\n\nSTATUS_CODES: dict[ResponseTypes, int] = {\n ResponseTypes.SUCCESS: 200,\n ResponseTypes.ORDER_NOT_FOUND_ERROR: 404,\n ResponseTypes.ORDER_ALREADY_EXISTS_ERROR: 400,\n ResponseTypes.PENDING_ORDER_CANNOT_BE_CANCELLED_ERROR: 400,\n ResponseTypes.SYSTEM_ERROR: 500,\n}\n\n\nclass TomodachiService(TomodachiServiceBase):\n name = \"service-orders\"\n\n async def _start_service(self) -> None:\n if self.is_dev_env:\n await sns.create_topics()\n await dynamodb.create_orders_table()\n await inbox.create_inbox_table()\n await outbox.create_outbox_table()\n await outbox.create_dynamodb_streams_outbox()\n\n @tomodachi.http(\"GET\", r\"/orders/health/?\", ignore_logging=[200])\n async def healthcheck(self, request: web.Request, correlation_id: uuid.UUID) -> web.Response:\n return web.json_response({\"status\": \"ok\"}, status=200)\n\n @tomodachi.http_error(status_code=500)\n async def error_500(self, request: web.Request, correlation_id: uuid.UUID) -> web.Response:\n return web.json_response({\"error\": ResponseTypes.SYSTEM_ERROR.value}, status=500)\n\n @tomodachi.http(\"POST\", r\"/orders\")\n async def create_order_handler(self, request: web.Request, correlation_id: uuid.UUID) -> web.Response:\n async with DynamoDBUnitOfWork() as uow:\n data = await request.json()\n cmd = CreateOrderCommand(\n correlation_id=correlation_id,\n customer_id=uuid.UUID(data[\"customer_id\"]),\n order_total=Money.from_sub_units(int(data[\"order_total\"])).as_decimal(),\n )\n response = await use_cases.create_order(uow, cmd)\n return web.json_response(response.to_dict(), status=STATUS_CODES[response.type])\n\n @tomodachi.http(\"GET\", r\"/order/(?P[^/]+?)/?\")\n async def get_order_handler(self, request: web.Request, order_id: str, correlation_id: uuid.UUID) -> web.Response:\n async with DynamoDBUnitOfWork() as uow:\n response = await views.get_order(uow, order_id=uuid.UUID(order_id))\n return web.json_response(response.to_dict(), status=STATUS_CODES[response.type])\n\n @tomodachi.http(\"POST\", r\"/order/(?P[^/]+?)/cancel?\")\n async def cancel_order_handler(\n self, request: web.Request, order_id: str, correlation_id: uuid.UUID\n ) -> web.Response:\n async with DynamoDBUnitOfWork() as uow:\n cmd = CancelOrderCommand(correlation_id=correlation_id, order_id=uuid.UUID(order_id))\n response = await use_cases.cancel_order(uow, cmd)\n return web.json_response(response.to_dict(), status=STATUS_CODES[response.type])\n\n @tomodachi.aws_sns_sqs(\n \"customer--credit-reserved\",\n queue_name=\"order--customer-credit-reserved\",\n dead_letter_queue_name=\"order--customer-credit-reserved--dlq\",\n max_receive_count=3,\n message_envelope=JsonBase,\n )\n async def customer_credit_reserved_handler(self, data: dict, correlation_id: uuid.UUID) -> None:\n with contextlib.suppress(MessageAlreadyProcessedError):\n async with DynamoDBUnitOfWork(message_id=uuid.UUID(data[\"event_id\"])) as uow:\n cmd = ApproveOrderCommand(correlation_id=correlation_id, order_id=uuid.UUID(data[\"order_id\"]))\n await use_cases.approve_order(uow, cmd)\n\n @tomodachi.aws_sns_sqs(\n \"customer--credit-reservation-failed\",\n queue_name=\"order--customer-credit-reservation-failed\",\n dead_letter_queue_name=\"order--customer-credit-reservation-failed--dlq\",\n max_receive_count=3,\n message_envelope=JsonBase,\n )\n async def customer_credit_reservation_failed_handler(self, data: dict, correlation_id: uuid.UUID) -> None:\n with contextlib.suppress(MessageAlreadyProcessedError):\n async with DynamoDBUnitOfWork(message_id=uuid.UUID(data[\"event_id\"])) as uow:\n cmd = RejectOrderCommand(correlation_id=correlation_id, order_id=uuid.UUID(data[\"order_id\"]))\n await use_cases.reject_order(uow, cmd)\n\n @tomodachi.aws_sns_sqs(\n \"customer--validation-failed\",\n queue_name=\"order--customer-validation-failed\",\n dead_letter_queue_name=\"order--customer-validation-failed--dlq\",\n max_receive_count=3,\n message_envelope=JsonBase,\n )\n async def customer_validation_failed_handler(self, data: dict, correlation_id: uuid.UUID) -> None:\n with contextlib.suppress(MessageAlreadyProcessedError):\n async with DynamoDBUnitOfWork(message_id=uuid.UUID(data[\"event_id\"])) as uow:\n cmd = RejectOrderCommand(correlation_id=correlation_id, order_id=uuid.UUID(data[\"order_id\"]))\n await use_cases.reject_order(uow, cmd)\n","repo_name":"filipsnastins/transactional-messaging-patterns-with-aws-dynamodb-streams-sns-sqs-lambda","sub_path":"service-orders/src/entrypoints/tomodachi_app.py","file_name":"tomodachi_app.py","file_ext":"py","file_size_in_byte":5361,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"8751774698","text":"#!/usr/bin/python3\nimport sys\n\n\ndef safe_function(fct, *args):\n \"\"\" function that executes a function safely.\n Args:\n fct: pointer to a function\n args: arguments of fct\n\n Returns:\n the result of the function, Otherwise, returns None\n \"\"\"\n try:\n return fct(*args)\n\n except Exception as err:\n print(\"Exception: {}\".format(str(err)), file=sys.stderr)\n return None\n","repo_name":"Kerzers/alx-higher_level_programming","sub_path":"0x05-python-exceptions/101-safe_function.py","file_name":"101-safe_function.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30781069703","text":"from .common import TestMultiUserCommon\n\n\nclass TestMultiUserPartner(TestMultiUserCommon):\n \"\"\"Test partner methods and fields.\"\"\"\n\n def test_get_shop_partner_default(self):\n self.backend.customer_multi_user = False\n self.assertEqual(self.company.get_shop_partner(self.backend), self.company)\n self.assertEqual(\n self.user_binding.record_id.get_shop_partner(self.backend),\n self.user_binding.record_id,\n )\n\n def test_main_partner_default(self):\n self.assertEqual(\n # this is already the main partner, get nothing\n self.company_binding.main_partner_id,\n self.company.browse(),\n )\n self.assertEqual(self.user_binding.main_partner_id, self.company)\n\n def test_invader_parent(self):\n self.assertEqual(\n self.company_binding.invader_parent_id,\n self.company_binding.browse(),\n )\n self.assertEqual(self.user_binding.invader_parent_id, self.company_binding)\n\n def test_is_invader_user(self):\n self.backend.customer_multi_user = False\n self.assertFalse(self.company_binding.is_invader_user)\n self.assertFalse(self.company.has_invader_user)\n self.assertTrue(self.user_binding.is_invader_user)\n self.assertTrue(self.user_binding.record_id.has_invader_user)\n\n def test_get_shop_partner_multi_enabled_company(self):\n self.backend.customer_multi_user = True\n self.assertEqual(self.backend.multi_user_profile_policy, \"main_partner_id\")\n customer_partner = self.company.get_shop_partner(self.backend)\n self.assertEqual(customer_partner, self.company)\n\n # as we have only the company account, nothing changes when user_partner is on\n self.backend.multi_user_profile_policy = \"record_id\"\n customer_partner = self.company.get_shop_partner(self.backend)\n self.assertEqual(customer_partner, self.company)\n\n def test_get_shop_partner_multi_enabled_simple_user(self):\n self.backend.customer_multi_user = True\n self.assertEqual(self.backend.multi_user_profile_policy, \"main_partner_id\")\n\n customer_partner = self.user_binding.record_id.get_shop_partner(self.backend)\n self.assertEqual(customer_partner, self.user_binding.main_partner_id)\n\n # change policy\n self.backend.multi_user_profile_policy = \"record_id\"\n # now we get the same partner as customer\n customer_partner = self.user_binding.record_id.get_shop_partner(self.backend)\n self.assertEqual(customer_partner, self.user_binding.record_id)\n\n def test_main_partner_recompute(self):\n self.assertEqual(self.user_binding.main_partner_id, self.company)\n\n new_parent = self.env[\"res.partner\"].create({\"name\": \"New parent\"})\n self.user_binding.parent_id = new_parent\n self.assertEqual(self.user_binding.main_partner_id, new_parent)\n\n self.backend.multi_user_main_partner_domain = \"[('type', '=', 'delivery')]\"\n # force recompute\n self.user_binding._compute_main_partner_id()\n # the parent partner does not match the domain\n self.assertEqual(\n self.user_binding.main_partner_id, self.env[\"res.partner\"].browse()\n )\n # now it matches it\n new_parent.type = \"delivery\"\n self.user_binding._compute_main_partner_id()\n self.assertEqual(self.user_binding.main_partner_id, new_parent)\n\n def test_main_partner_manual(self):\n self.assertEqual(self.user_binding.main_partner_id, self.company)\n custom_partner = self.env[\"res.partner\"].create({\"name\": \"Custom\"})\n\n # on write\n self.user_binding.main_partner_id = custom_partner\n self.assertEqual(self.user_binding.main_partner_id, custom_partner)\n\n # on copy\n new_binding = self.user_binding.copy(\n {\n \"name\": \"New Binding\",\n \"email\": \"new@test.com\",\n \"main_partner_id\": custom_partner.id,\n \"external_id\": \"new-binding\",\n }\n )\n self.assertEqual(new_binding.main_partner_id, custom_partner)\n\n # on create\n new_binding2 = self._create_partner_binding(\n name=\"New Binding 2\",\n parent_id=self.company.id,\n main_partner_id=custom_partner.id,\n )\n self.assertEqual(new_binding2.main_partner_id, custom_partner)\n","repo_name":"shopinvader/odoo-shopinvader","sub_path":"shopinvader_customer_multi_user/tests/test_multi_user_partner.py","file_name":"test_multi_user_partner.py","file_ext":"py","file_size_in_byte":4411,"program_lang":"python","lang":"en","doc_type":"code","stars":105,"dataset":"github-code","pt":"81"} +{"seq_id":"72929498826","text":"\"\"\"\n시간초과\n\"\"\"\nfrom sys import stdin\nfrom collections import defaultdict\nfrom heapq import heappush, heappop\n\n\ninput = lambda: stdin.readline().rstrip()\nMAX = 4_000_000\n\n\ndef cycle(start: int) -> int:\n hq = [(start, 0)]\n visit = [False] * (V + 1)\n\n while hq:\n node, total = heappop(hq)\n visit[node] = True\n\n if total > ans:\n return MAX\n\n if node == start and total > 0:\n return total\n\n for neighbor, dist in graph[node]:\n if not visit[neighbor]:\n visit[neighbor] = True\n heappush(hq, (neighbor, total + dist))\n\n return MAX\n\n\nif __name__ == \"__main__\":\n V, E = map(int, input().split())\n graph = defaultdict(list)\n for _ in range(E):\n a, b, c = map(int, input().split())\n graph[a].append((b, c))\n\n ans = MAX\n for i in range(1, V + 1):\n ans = min(cycle(i), ans)\n\n print([ans, -1][ans == MAX])\n","repo_name":"boorooksus/Algorithm-Study","sub_path":"백준/CH18-shortest_path/G4-1956-Work_out.py","file_name":"G4-1956-Work_out.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15679606080","text":"class Solution:\n def isAnagram(self, s: str, t: str) -> bool:\n sCount = Counter(s)\n tCount = Counter(t)\n for key, value in tCount.items():\n if sCount[key] < value:\n return False\n return len(s) == len(t)\n \n# Time: O(N + M)\n# Space: O(N + M)","repo_name":"Tek58/Leetcode","sub_path":"0242-valid-anagram/0242-valid-anagram.py","file_name":"0242-valid-anagram.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4857891925","text":"# -*- coding: utf-8 -*-\nimport test\nimport testData\nimport object\nimport objectMap\nimport re\nimport squishinfo\nimport squish\nimport string\nimport __builtin__\n\nfrom tables import Tables\nfrom createworkorderdialog import CreateWorkOrderDialog\nfrom deletesampledialog import DeleteSampleDialog\nfrom patientinformationdialog import PatientInformationDialog\nfrom analyzereventsdialog import AnalyzerEventsDialog\nfrom config import Config\n\nclass InProcess(Tables):\n \n def __init__(self):\n version = Config().version\n #get_expected_row_count is reserved exclusively for the getExpectedRowCount method\n Tables.__init__(self)\n self.analyzer_object_symbol = \":Bloodhound™ Viewing Station \" + version + \"_JTabbedPane\"\n self.get_expected_row_count = 0\n self.side_bar_sample_pattern = re.compile('.*Open-mode sample taken by Bloodhound\\\\s\\\\d at \\\\d+\\:\\\\d+.*')\n self.table_symbol = \":In Process.Show All_JTable\"\n self.object_symbol = \":Bloodhound™ Viewing Station \" + version + \".In Process_TabProxy\"\n self.first_row_symbol = \":Show All.0_0_TableItemProxy_2\"\n self.delete_sample_button_symbol = \":In Process.Delete Sample_JButton\"\n self.analyzer_combo_box_symbol = \":In Process.Filter by Analyzer_JComboBox\"\n self.create_work_order_button_symbol = \":In Process.Create Work Order_JButton\"\n self.edit_patient_button = \":In Process.Edit Patient_JButton\"\n self.search_edit_box_symbol = \":In Process.Search:_SearchTextField\"\n self.show_all_button_symbol = \":In Process.Show All_JButton\"\n self.status_column_header_symbol = \":Status_TableHeaderItemProxy\"\n self.name_column_header_symbol = \":Name_TableHeaderItemProxy\"\n self.accession_number_column_header_symbol = \":Accession #_TableHeaderItemProxy\" \n self.medicalRecord_column_header_symbol = \":Medical\" \n self.location_column_header_symbol = \":Location_TableHeaderItemProxy\" \n self.date_column_header_symbol = \":Date_TableHeaderItemProxy_2\" \n self.time_column_header_symbol = \":Time_TableHeaderItemProxy\"\n self.turn_around_time_header_symbol = \":Turnaround\" \n self.priority_column_header_symbol = \":Priority_TableHeaderItemProxy\" \n self.analyzer_column_header_symbol = \":Analyzer_TableHeaderItemProxy\" \n self.mode_column_header_symbol = \":Mode_TableHeaderItemProxy\"\n self.sidebar_text_symbol = \":In Process_WrappingDetailsList\"\n \n self.in_process_tab_symbol = \":In Process_JComponent\"\n \n self.createworkorderdialog = CreateWorkOrderDialog()\n self.deletesampledialog = DeleteSampleDialog()\n self.patientinformationdialog = PatientInformationDialog()\n \n def clickTab(self):\n squish.clickTab(squish.findObject(self.object_symbol))\n\n def getTotalNumberOfSamples(self):\n self.clickTab()\n return squish.findObject(self.table_symbol).getRowCount()\n \n def clickOnFirstRowOfTable(self):\n Tables.clickOnTableCellByIndex(self,self.table_symbol,0,0)\n\n #This is a special case function that handles looping sample rack runs \n def waitForRackToFinish(self,starting_sample_number):\n #Wait for a maximum of 300 seconds\n #This does some 'tricky' stuff to handle expected row count while looping.\n #When looping the simulator uses a tube of cleaning solution for every\n #20th sample. \n squish.waitForObject(self.analyzer_object_symbol).getTopLevelAncestor().toFront()\n self.clickTab()\n table = squish.findObject(self.table_symbol)\n counter = 0\n while table.getRowCount() < self.getExpectedRowCount(starting_sample_number):\n squish.snooze(1.0)\n AnalyzerEventsDialog().dismissAnalyzerEvents()\n counter = counter + 1\n if counter == 300:\n break\n total_samples = self.getTotalNumberOfSamples()\n test.log(\"Total Samples is now at \" + str(total_samples))\n return total_samples\n \n def waitForSampleToFinish(self,starting_sample_number):\n #Wait for a maximum of 300 seconds\n waitForObject(self.analyzer_object_symbol).getTopLevelAncestor().toFront()\n self.clickTab()\n table = squish.findObject(self.table_symbol)\n counter = 0\n while table.getRowCount() < starting_sample_number + 1:\n squish.snooze(1.0)\n AnalyzerEventsDialog().dismissAnalyzerEvents()\n counter = counter + 1\n if counter == 300:\n break\n \n test.log(\"Total Samples is now at \" + str(self.getTotalNumberOfSamples()))\n \n def getExpectedRowCount(self,starting_sample_number):\n if self.get_expected_row_count % 2 == 0:\n expected_count = starting_sample_number + 9\n else:\n expected_count = starting_sample_number + 10\n \n self.get_expected_row_count += 1\n return expected_count\n\n def getSideBarText(self):\n sidebartext = Tables.getSideBarText(self,self.sidebar_text_symbol)\n return sidebartext\n \n def confirmRackUnderModeColumn(self, starting_sample_number):\n self.clickTab()\n table = Tables.getTableData(self)\n total_samples_of_this_run = len(table) - starting_sample_number\n \n #for index in range (1, total_samples_of_this_run):\n for index in range (len(table)):\n test.verify(table[index][\"Mode\"],\"Confirm the Mode in every row in the table is Rack \" + table[index][\"Accession #\"])\n \n def confirmSamplePatientIdentityInfoExists(self):\n main_tab = findObject(self.in_process_tab_symbol)\n patient_labels = {}\n for index in range(main_tab.getComponentCount()):\n component = main_tab.getComponents().at(index)\n if \"JLabel\" in component.toString():\n if \"Medical Record\" in component.text:\n patient_labels[component.text] = main_tab.getComponents().at(index + 2).text\n elif \"Age\" in component.toString():\n patient_labels[component.text] = main_tab.getComponents().at(index + 3).text\n elif \"Sex\" in component.toString():\n patient_labels[component.text] = main_tab.getComponents().at(index + 3).text\n elif \"Physician\" in component.toString():\n patient_labels[component.text] = main_tab.getComponents().at(index + 3).text\n \n test.verify(len(patient_labels) == 4, \"Confirm that the following labels exist: Medical Record, Age, Sex, Physician\")\n \n def confirmARunOnTheSideBar(self):\n #For now just select the first row\n sidebartext = \"[]\"\n counter = 0\n #Wait Five Seconds to find sidebar text\n while(str(sidebartext) == \"[]\" or \"No sample selected\" in str(sidebartext[0])):\n Tables.clickOnTableRowIndex(self,0)\n sidebartext = self.getSideBarText()\n test.log(\"inside while sidebartext is: \" + str(sidebartext))\n squish.snooze(1.0)\n counter += 1\n if counter == 5:\n sidebartext = \"Could not find sidebar text\"\n break\n \n test.log(\"Here is sidebartext: \" + str(sidebartext))\n test.verify(str(sidebartext[1]).rfind(\"Ordered tests: CBC, Diff.\") != -1, \"Confirm that runs are also listed on the sidebar\")\n \n def populateTableData(self):\n self.clickTab()\n table_data = Tables.populateTableData(self,self.table_symbol)\n return table_data \n \n def populateTableDataWithAllRowsEqualToReadyforRelease(self):\n waitForObject(self.analyzer_object_symbol).getTopLevelAncestor().toFront()\n Tables.populateTableDataWithAllRowsEqualToReadyforRelease(self, self.table_symbol)\n \n def getSideBarTextByIndex(self,index,status):\n text = Tables.getSideBarTextByIndex(self,index,status)\n return text\n\n def confirmSideBarText(self,index,pattern):\n Tables.confirmSideBarText(self,index,pattern)\n \n #Wait For the row count to settledown.\n def waitForSTATRowCount(self,starting_sample_number):\n #Wait for a maximum of 300 seconds\n #The STAT holds 3 samples\n waitForObject(self.analyzer_object_symbol).getTopLevelAncestor().toFront()\n self.clickTab()\n table = squish.findObject(self.table_symbol)\n counter = 0\n while table.getRowCount() < starting_sample_number + 3:\n squish.snooze(1.0)\n AnalyzerEventsDialog().dismissAnalyzerEvents()\n counter = counter + 1\n if counter == 300:\n break\n \n test.log(\"Total Samples is now at \" + str(self.getTotalNumberOfSamples()))\n \n def confirmSTATSamples(self,spritecanvas,row_count):\n #Select the Inprocess Tab\n test.log(\"Right before self.clickTab()\")\n self.clickTab()\n test.log(\"Right after self.clickTab()\")\n #Get The Table Data\n self.populateTableData()\n #Total number of rows you would like to confirm.\n item = row_count - 1\n for row in self.table_data:\n test.verify(row[\"Accession #\"] == spritecanvas.accession_numbers[item],\"Confirm Accession # in Process List, \" \n + row[\"Accession #\"] + \" to Analyzer Simulator Accession # \" + spritecanvas.accession_numbers[item])\n \n test.verify(row[\"Mode\"] == \"STAT\", \"Confirm that the mode for every row is STAT\")\n if item == 0:\n break\n \n item -= 1\n \n def selectEachFilterByAnalizerItem(self):\n analyzer_combo = squish.findObject(self.analyzer_combo_box_symbol)\n for index in range(analyzer_combo.getItemCount()):\n analyzer_combo.setSelectedIndex(index)\n \n def selectFilterByAnalizerName(self,item_name):\n analyzer_combo = squish.findObject(self.analyzer_combo_box_symbol)\n for index in range(analyzer_combo.getItemCount()):\n if str(analyzer_combo.getItemAt(index)) == item_name:\n analyzer_combo.setSelectedIndex(index)\n break\n \n def clickCreateWorkOrderButton(self):\n squish.clickButton(self.create_work_order_button_symbol)\n \n def clickDeleteSampleButton(self):\n squish.clickButton(self.delete_sample_button_symbol)\n \n def confirmDeleteSampleButtonIsDisabled(self):\n button = findObject(self.delete_sample_button_symbol)\n test.verify(button.enabled == False,\"Confirm that the Delete Sample Button is disabled\")\n \n def confirmDeleteSampleButtonIsEnabled(self):\n button = findObject(self.delete_sample_button_symbol)\n test.verify(button.enabled == True,\"Confirm that the Delete Sample Button is enabled\")\n \n def clickEditPatientButton(self):\n squish.clickButton(self.edit_patient_button)\n \n def enterSearchText(self,search_text):\n squish.findObject(self.search_edit_box_symbol).text = search_text\n \n def clickShowAllButton(self):\n squish.clickButton(self.show_all_button_symbol)\n \n def clickStatusColumnHeader(self):\n squish.mouseClick(squish.waitForObject(self.status_column_header_symbol), 26, 23, 0, squish.Button.Button1)\n \n def clickNameColumnHeader(self):\n squish.mouseClick(squish.waitForObject(self.name_column_header_symbol), 26, 23, 0, squish.Button.Button1)\n \n def clickAccessionNumberColumnHeader(self):\n squish.mouseClick(squish.waitForObject(self.accession_number_column_header_symbol ), 26, 23, 0, squish.Button.Button1)\n \n def clickMedicalRecordColumnHeader(self):\n squish.mouseClick(squish.waitForObject(self.medicalRecord_column_header_symbol), 26, 23, 0, squish.Button.Button1)\n \n def clickLocationColumnHeader(self):\n squish.mouseClick(squish.waitForObject(self.location_column_header_symbol), 26, 23, 0, squish.Button.Button1)\n \n def clickDateColumnHeader(self):\n squish.mouseClick(squish.waitForObject(self.date_column_header_symbol), 26, 23, 0, squish.Button.Button1)\n \n def clickTimeColumnHeader(self):\n squish.mouseClick(squish.waitForObject(self.time_column_header_symbol), 26, 23, 0, squish.Button.Button1)\n \n def clickTurnaroundTimeColumnHeader(self):\n squish.mouseClick(squish.waitForObject(self.turn_around_time_header_symbol), 26, 23, 0, squish.Button.Button1)\n \n def clickPriorityColumnHeader(self):\n squish.mouseClick(squish.waitForObject(self.priority_column_header_symbol), 26, 23, 0, squish.Button.Button1)\n \n def clickAnalyzerColumnHeader(self):\n squish.mouseClick(squish.waitForObject(self.analyzer_column_header_symbol), 26, 23, 0, squish.Button.Button1)\n \n def clickModeColumnHeader(self):\n squish.mouseClick(squish.waitForObject(self.mode_column_header_symbol), 26, 23, 0, squish.Button.Button1)\n\n def checkInProcessAccessionNumbers(self,shared):\n for accession_number in shared.spritecanvas.accession_numbers:\n #Seems like we need a little wait here for the table to redraw\n Tables.waitForTableStatusReadyForReleaseOrAwaitingReview(self,self.table_symbol)\n Tables.populateTableData(self,self.table_symbol)\n row = Tables.clickOnTableRowByName(self,self.table_symbol,\"Accession #\",accession_number)\n name = row[\"Name\"].replace(\"(\",\"\")\n name = name.replace(\")\",\"\")\n sidebar_text = str(self.getSideBarText()[1])\n expected_sidebar_text = \"[\" + name + \" with the “\" + shared.maintenance.newspecialsampledialog.accession_number_types[accession_number] + \"” profile, Normal process.]\"\n test.verify(expected_sidebar_text == sidebar_text,\"Confirm that expected sidebar text \" + expected_sidebar_text + \" is equal to \" + sidebar_text)\n \n def clickOnTableRowByName(self,field_name, cell_value):\n Tables.populateTableData(self,self.table_symbol)\n Tables.clickOnTableRowByName(self,self.table_symbol,field_name, cell_value)\n \n def confirmAccessionNumberExistsInTable(self,accession_number):\n Tables.populateTableData(self,self.table_symbol)\n table_data = Tables.getTableData(self)\n \n for row in range(len(table_data)):\n accession_number_exists = False\n \n if table_data[row]['Accession #'] == accession_number:\n accession_number_exists = True\n \n return accession_number_exists\n \n def waitForTableStatusReadyForRelease(self):\n Tables._waitForTableStatusReadyForRelease(self,self.table_symbol)\n \n def waitForTableStatus(self,status):\n Tables._waitForTableStatus(self,table_symbol,\"Missing Data, Ready For Release\")\n \n def waitToClickOnTableRowByName(self,column_name,cell_name):\n return Tables.waitToClickOnTableRowByName(self,self.table_symbol,column_name, cell_name)\n \n def waitForSidebarTextResults(self):\n timer_counter = 0\n counter = 1\n while (counter < 6 ):\n sidebar_text = self.getSideBarText()\n \n for index in range (len(sidebar_text)):\n if \"Results \" + str(counter) + \" of 5\" in str(sidebar_text[index]):\n test.verify(True,\"Confirm that Results \" + str(counter) + \" of 5 will be ready on Bloodhound 1 (Rack mode) was found.\")\n counter += 1\n \n timer_counter += 1\n \n if timer_counter == 1000:\n test.verify(False,\"Timer timed out. Could not find Results \" + str(counter) + \" of 5 will be ready on Bloodhound 1 (Rack mode) in the sidebar\")\n\n def confirmRowIsNotInTable(self,fieldname,key):\n table_data = Tables.populateTableData(self, self.table_symbol)\n \n found_key = False\n \n for index in range(len(table_data)):\n if key in table_data[index]:\n found_key = True\n break;\n \n if not found_key: \n test.verify(True,\"Did not find this key:\" + key + \" in this column: \" + fieldname + \" of the inprocess table\")\n else:\n test.verify(False,\"Found this key:\" + key + \" in this column: \" + fieldname + \" of the inprocess table\")\n","repo_name":"henrywasserman/suite_Viewing_Station_Automation","sub_path":"shared/scripts/MainTabs/InProcess/inprocess.py","file_name":"inprocess.py","file_ext":"py","file_size_in_byte":16443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74560210186","text":"import sys\n\nfrom twisted.internet import reactor\nfrom twisted.internet.defer import inlineCallbacks\nfrom twisted.python import log\n\n\nfrom magnet.protocol import ClientCreator\nfrom magnet.protocol import RequestResponseLineReceiver\n\n\nlog.startLogging(sys.stdout)\n\nclass AddClient(RequestResponseLineReceiver):\n\n def add(self, a, b):\n to_send = 'add, %d, %d' % (a, b)\n return self.makeRequest(to_send)\n\n\n\n\n@inlineCallbacks\ndef main():\n from magnet.preactor import Preactor\n preactor = yield Preactor()\n\n # ClientCreator for connectMS\n client_creator = ClientCreator(reactor, preactor, AddClient)\n d = client_creator.connectMS('add-service')\n add_client = yield d\n \n # use client\n ans = yield add_client.add(2, 2)\n log.msg('resopnse: %s' % ans)\n\n ans = yield add_client.add(5, 2)\n log.msg('resopnse: %s' % ans)\n\n\n\nif __name__ == '__main__':\n main()\n reactor.run()\n\n","repo_name":"deldotdr/magnet","sub_path":"examples/request_response_client.py","file_name":"request_response_client.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"33666802084","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\n\nfff = 1\n\n# Set the aesthetics for the plot\nsns.set_style('whitegrid')\nsns.set_context(\"paper\", font_scale=1.4)\n\n\nsns.set_style('whitegrid')\nsns.set_context(\"paper\", font_scale=1.5)\nsns.set_palette(\"colorblind\")\n\n# Read the performance data from the CSV file\ndf = pd.read_csv(f'output.csv', \n usecols=['Directory', \n 'Node List', 'Steps', 'CAware', 'Nodes', 'Tasks', 'Elements', \n 'actual-steps', 'mean-perf', 'rem-perf', \n 'mean-perf-per-GPU', 'norm-mean-perf-per-GPU','norm-rem-perf'])\n\nfig, ax = plt.subplots(figsize=(12, 5))\n\n# Create a bar plot for the performance data, with eror bars\nax.errorbar(df['Tasks'], df['norm-mean-perf-per-GPU'], \n yerr=df['norm-rem-perf'], fmt='o-', capsize=5)\n\nax.hlines(1, 0, df['Tasks'].max(), colors='k', linestyles='dashed', label='Ideal scaling')\n\nfirst_entry = df.iloc[0]['mean-perf-per-GPU']/1e9\n\nax.set_title(r'$\\mathbf{Weak-scaling\\ efficiency\\ comparison\\ on\\ FASTER}$'+'\\n'+\n 'Cluster: FASTER, GPU: 40GB NVIDIA A100\\n'+\n 'Solver: PyFR 1.15.0\\n'+\n 'Case setup: Taylor Green Vortex case with mesh size ~256³ DoF/GPU\\n'+\n f'Normalisation performed w.r.t. simulation on first GPU: {first_entry:.2f} GDoF/s/GPU')\n\nax.set_xlabel('Total number of GPUs used')\nax.set_xlim(0, 16)\nax.set_xticks(df['Tasks'])\nax.set_ylabel('Normalised performance')\n \n#ax.set_ylabel('Performance metric\\n'\n# 'Computations performed per unit runtime per GPU \\n '\n# '(GigaDegrees of Freedom per second per GPU ≡ GDoF/s/GPU)')\nax.set_ylim(bottom=0)\n\nax.legend(loc='lower left')\n\n# Create a NOTE box below the plot\n\n# Create a box with the text\ntextstr = '\\n'.join((\n r'$\\mathbf{NOTE}$',\n r'Benchmarking performed in https://doi.org/10.1145/3569951.3597565',\n r'includes non-computation runtime too, like overheads of writing solution files to disk.',))\n\n# Add the box to the plot, just below the xlabel\nax.text(0.0, -0.25, textstr, transform=ax.transAxes, fontsize=14,\n verticalalignment='top', bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.5))\n\n\n\n\n# Save\nplt.savefig(f'perf-per-GPU.png', dpi=300, bbox_inches='tight')\n","repo_name":"sambitmishra98/benchmark","sub_path":"plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":2289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36398451447","text":"import torch\nfrom torch import nn\nimport torch.nn.functional as F\nimport math\n\nclass DenseBlock(nn.Module):\n '''Dense Block\n \n For general purpose use across projects. Dense layers with leaky relu activation. \n\n Args:\n dim: An integer indicating the number of neurons per layer\n layers: An integer indicating the number of layers in\n '''\n def __init__(self, dim: int, layers: int) -> None:\n super().__init__()\n\n block = nn.ModuleList()\n\n for i in range(layers):\n block.extend([\n nn.Linear(in_features = dim, out_features = dim),\n nn.LeakyReLU()\n ])\n \n self.block = nn.Sequential(*block)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n return self.block(x)\n\n# Just copy and pasted the code for this one\nclass NoisyLinear(nn.Module):\n \"\"\"Noisy linear module for NoisyNet.\n \n Attributes:\n in_features (int): input size of linear module\n out_features (int): output size of linear module\n std_init (float): initial std value\n weight_mu (nn.Parameter): mean value weight parameter\n weight_sigma (nn.Parameter): std value weight parameter\n bias_mu (nn.Parameter): mean value bias parameter\n bias_sigma (nn.Parameter): std value bias parameter\n \n \"\"\"\n\n def __init__(self, in_features: int, out_features: int, std_init: float = 0.5):\n \"\"\"Initialization.\"\"\"\n super(NoisyLinear, self).__init__()\n \n self.in_features = in_features\n self.out_features = out_features\n self.std_init = std_init\n\n self.weight_mu = nn.Parameter(torch.Tensor(out_features, in_features))\n self.weight_sigma = nn.Parameter(\n torch.Tensor(out_features, in_features)\n )\n self.register_buffer(\n \"weight_epsilon\", torch.Tensor(out_features, in_features)\n )\n\n self.bias_mu = nn.Parameter(torch.Tensor(out_features))\n self.bias_sigma = nn.Parameter(torch.Tensor(out_features))\n self.register_buffer(\"bias_epsilon\", torch.Tensor(out_features))\n\n self.reset_parameters()\n self.reset_noise()\n\n def reset_parameters(self):\n \"\"\"Reset trainable network parameters (factorized gaussian noise).\"\"\"\n mu_range = 1 / math.sqrt(self.in_features)\n self.weight_mu.data.uniform_(-mu_range, mu_range)\n self.weight_sigma.data.fill_(\n self.std_init / math.sqrt(self.in_features)\n )\n self.bias_mu.data.uniform_(-mu_range, mu_range)\n self.bias_sigma.data.fill_(\n self.std_init / math.sqrt(self.out_features)\n )\n\n def reset_noise(self):\n \"\"\"Make new noise.\"\"\"\n epsilon_in = self.scale_noise(self.in_features)\n epsilon_out = self.scale_noise(self.out_features)\n\n # outer product\n self.weight_epsilon.copy_(epsilon_out.ger(epsilon_in))\n self.bias_epsilon.copy_(epsilon_out)\n\n def zero_noise(self):\n self.weight_epsilon.copy_(torch.zeros(self.out_features, self.in_features))\n self.bias_epsilon.copy_(torch.zeros(self.out_features))\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Forward method implementation.\n \n We don't use separate statements on train / eval mode.\n It doesn't show remarkable difference of performance.\n \"\"\"\n return F.linear(\n x,\n self.weight_mu + self.weight_sigma * self.weight_epsilon,\n self.bias_mu + self.bias_sigma * self.bias_epsilon,\n )\n \n @staticmethod\n def scale_noise(size: int) -> torch.Tensor:\n \"\"\"Set scale to make noise (factorized gaussian noise).\"\"\"\n x = torch.randn(size)\n\n return x.sign().mul(x.abs().sqrt())","repo_name":"notDroid/aigym","sub_path":"rl/model_utils.py","file_name":"model_utils.py","file_ext":"py","file_size_in_byte":3791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38617677144","text":"# -*- coding: utf-8 -*-\n\nimport re\n\nimport nltk\nfrom nltk.stem import WordNetLemmatizer\nlemmatizer = WordNetLemmatizer()\nimport json\nimport pickle\nimport numpy as np\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation, Dropout\nfrom keras.optimizers import SGD\nimport random\n\nfrom M_sugg_bot.bot import listen_to\nfrom M_sugg_bot.bot import respond_to\n\n\n@respond_to('hello$', re.IGNORECASE)\ndef hello_reply(message):\n message.reply('hello sender!')\n\n\n@respond_to('hello_formatting')\n@listen_to('hello_formatting$')\ndef hello_reply_formatting(message):\n # Format message with italic style\n message.reply('_hello_ sender!')\n\n\n@listen_to('hello$')\ndef hello_send(message):\n message.send('hello channel!')\n\n\n@listen_to('hello_decorators')\n@respond_to('hello_decorators')\ndef hello_decorators(message):\n message.send('hello!')\n\n\n@respond_to('hello_web_api', re.IGNORECASE)\ndef web_api_reply(message):\n attachments = [{\n 'fallback': 'Fallback text',\n 'author_name': 'Author',\n 'author_link': 'http://www.github.com',\n 'text': 'Some text here ...',\n 'color': '#59afe1'\n }]\n message.reply_webapi(\n 'Attachments example', attachments,\n username='Mattermost-Bot',\n icon_url='https://goo.gl/OF4DBq',\n )\n\n\n@listen_to('hello_comment', re.IGNORECASE)\ndef hello_comment(message):\n message.comment('some comments ...')\n\n\n@listen_to('hello_react', re.IGNORECASE)\ndef hello_react(message):\n message.react('+1')\n\n\n@listen_to('hello_reply_threaded', re.IGNORECASE)\ndef hello_reply_threaded(message):\n message.reply_thread('hello threaded!')\n","repo_name":"Soumi7/M_sugg_bot","sub_path":"M_sugg_bot/plugins/hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"23759002251","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom __future__ import print_function, division, absolute_import\n\n\nfrom PyQt4 import QtGui\n\n\ndef get():\n return GAUDInspectViewDetails()\n\n\nclass GAUDInspectViewDetails(QtGui.QWidget):\n\n def __init__(self):\n super(GAUDInspectViewDetails, self).__init__()\n self.title = \"Details\"\n self.initUI()\n\n def initUI(self):\n ###\n # Tab 3 - Detailed history\n ###\n self.grid = QtGui.QGridLayout(self)\n\n self.warning = QtGui.QLabel(\n \"

This tab in only a stub

\"\n \"

Deep changes need to be done in GAUDI to implement this feature.
\"\n \"Consider this a preview of future functionality.


\")\n self.grid.addWidget(self.warning, 0, 0, 1, 2)\n\n # Tab 3 - Column 1 - Select individual\n self.col1 = QtGui.QVBoxLayout()\n self.grid.addLayout(self.col1, 1, 0)\n self.col1_ind_box = QtGui.QGroupBox('Individuals')\n self.col1_ind_layout = QtGui.QVBoxLayout(self.col1_ind_box)\n self.col1.addWidget(self.col1_ind_box)\n self.col1_toolbox = QtGui.QToolBox()\n self.col1_ind_layout.addWidget(self.col1_toolbox)\n\n self.col1_generations = QtGui.QListWidget()\n self.col1_toolbox.addItem(self.col1_generations, 'Generations')\n\n self.col1_population = QtGui.QListWidget()\n self.col1_toolbox.addItem(self.col1_population, 'Individuals')\n\n self.col1_genes = QtGui.QWidget()\n self.col1_toolbox.addItem(self.col1_genes, 'Genes')\n self.col1_genes_layout = QtGui.QVBoxLayout(self.col1_genes)\n self.col1_genes_layout.setContentsMargins(0, 0, 0, 0)\n self.col1_genes_table = QtGui.QTableWidget(5, 3)\n self.col1_genes_layout.addWidget(self.col1_genes_table)\n self.col1_genes_table.horizontalHeader().setStretchLastSection(True)\n self.col1_genes_table.verticalHeader().setVisible(False)\n self.col1_genes_table.setColumnWidth(0, 20)\n self.col1_genes_table.setHorizontalHeaderLabels(\n ['', 'Gene', 'Allele'])\n self.col1_genes_buttons = QtGui.QHBoxLayout()\n self.col1_genes_layout.addLayout(self.col1_genes_buttons)\n self.col1_genes_express_btn = QtGui.QPushButton('(Un)express')\n self.col1_genes_buttons.addWidget(self.col1_genes_express_btn)\n self.col1_genes_express_all_btn = QtGui.QPushButton('(Un)express all')\n self.col1_genes_buttons.addWidget(self.col1_genes_express_all_btn)\n [self.col1_genes_table.setCellWidget(i, 0, QtGui.QCheckBox(''))\n for i in range(5)]\n\n # Tab 3 - Column 2 - the environment\n self.col2 = QtGui.QVBoxLayout()\n self.grid.addLayout(self.col2, 1, 1)\n self.col2_env_box = QtGui.QGroupBox('Environment')\n self.col2_env_layout = QtGui.QVBoxLayout(self.col2_env_box)\n self.col2.addWidget(self.col2_env_box)\n self.col2_env_table = QtGui.QTableWidget(10, 2)\n self.col2_env_table.horizontalHeader().setStretchLastSection(True)\n self.col2_env_table.setHorizontalHeaderLabels(['Objective', 'Score'])\n self.col2_env_table.verticalHeader().setVisible(False)\n self.col2_env_layout.addWidget(self.col2_env_table)\n self.col2_env_buttons = QtGui.QHBoxLayout()\n self.col2_env_layout.addLayout(self.col2_env_buttons)\n self.col2_env_evaluate = QtGui.QPushButton('Evaluate')\n self.col2_env_buttons.addWidget(self.col2_env_evaluate)\n self.col2_env_evaluate_all = QtGui.QPushButton('Evaluate all')\n self.col2_env_buttons.addWidget(self.col2_env_evaluate_all)\n","repo_name":"insilichem/gaudinspect","sub_path":"gaudinspect/view/tabs/details.py","file_name":"details.py","file_ext":"py","file_size_in_byte":3663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32441014820","text":"\n\nimport pickle\n\npkl_file=open(\"../pkl_files/items.pkl\",\"rb\")\n\nitems=pickle.load(pkl_file)\n\n\ndef powerset(s, k):\n\tx = len(s)\n\tpowerset = []\n\tlist = None\n\tfor i in range(1, 1 << x):\n\t\tlist = [s[j] for j in range(x) if (i & (1 << j))]\n\t\tif len(list) == k:\n\t\t\tpowerset.append(list)\n\treturn powerset\n\ndef lsupport(minsup,l):\n\tif len(l)>minsup:\n\t\treturn True\n\treturn False\n\n\n","repo_name":"ayushiaks/Association-Rule-Mining","sub_path":"Apriori/powerset.py","file_name":"powerset.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"32613427769","text":"from rest_framework import serializers\n\nfrom .models import RetailOutlets, DivOffice\n\nclass DivOfficeSerilizer(serializers.ModelSerializer):\n class Meta:\n model = DivOffice\n fields = '__all__'\n\nclass RetailOutletsSerilizer(serializers.ModelSerializer):\n FO_name = DivOfficeSerilizer(many=False)\n # FO_name = models.ForeignKey(DivOffice, on_delete=models.CASCADE, related_name='FieldO_Name')\n # FO_name = serializers.StringRelatedField(many=False)\n class Meta:\n model = RetailOutlets\n fields = list_display = ['code','name','area','sales','type','FO_name']\n\n\n\n\n\n # def validate(self, data):\n # id = data.get('id')\n # if id < 0 :\n # raise serializers.ValidationError('ID cant be negative')\n # else:\n # return data\n\n# class officeSerilizer(serializers.Serializer):\n# id = serializers.IntegerField()\n# employee_name = serializers.CharField(max_length=70)\n# employee_role = serializers.CharField(max_length=70)\n# employee_email = serializers.EmailField(max_length=70)\n# employee_city = serializers.CharField(max_length=70)\n\n # def create(self,validate_data):\n # return office.objects.create(**validate_data)\n #\n # def update(self, instance, validated_data):\n # print(instance.employee_name)\n # instance.id = validated_data.get('id', instance.id)\n # instance.employee_name = validated_data.get('employee_name', instance.employee_name)\n # instance.employee_role = validated_data.get('employee_role', instance.employee_role)\n # instance.employee_email = validated_data.get('employee_email', instance.employee_email)\n # #instance.employee_city = validated_data.get('employee_city', instance.employee_city)\n # print(instance.employee_name)\n # instance.save()\n # return instance\n\n","repo_name":"rathorel24/SalesAreaPortal","sub_path":"myportal/seralization.py","file_name":"seralization.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37816632723","text":"\nfrom paper_trading.event import EVENT_TIMER\n\n# 系统相关事件\nEVENT_LOG = 'e_log' # 日志记录的事件\nEVENT_ERROR = 'e_error' # 错误事件,api连接错误或者数据库错误\n\n# 应用相关\nEVENT_MARKET_CLOSE = \"e_market_close\" # 市场关闭事件\nEVENT_ACCOUNT_UPDATE = \"e_a_u\" # 账户更新事件\nEVENT_ACCOUNT_AVL_UPDATE = \"e_a_avl_u\" # 账户可用资金修改事件\nEVENT_ACCOUNT_ASSETS_UPDATE = \"e_a_assets_u\" # 账户资产修改事件\nEVENT_POS_INSERT = \"e_p_i\" # 持仓保存事件\nEVENT_POS_UPDATE = \"e_p_u\" # 持仓更新事件\nEVENT_POS_AVL_UPDATE = \"e_p_a_u\" # 卖出可用股份修改事件\nEVENT_POS_PRICE_UPDATE = \"e_p_p_u\" # 卖出可用股份修改事件\nEVENT_POS_DELETE = \"e_p_d\" # 卖出可用股份修改事件\nEVENT_ORDER_INSERT = \"e_o_i\" # 订单保存事件\nEVENT_ORDER_UPDATE = \"e_o_u\" # 订单更新事件\nEVENT_ORDER_STATUS_UPDATE = \"e_o_s_u\" # 订单状态更新事件\nEVENT_ORDER_DEAL = \"e_o_d\" # 订单成交事件\nEVENT_ORDER_REJECTED = \"e_o_r\" # 订单拒绝事件\nEVENT_ORDER_CANCELED = \"e_o_c\" # 订单取消事件\nEVENT_ACCOUNT_RECORD_INSERT = \"e_a_r_i\" # 账户记录建立事件\nEVENT_POS_RECORD_INSERT = \"e_p_r_i\" # 持仓记录修改事件\nEVENT_POS_RECORD_BUY = \"e_p_r_b\" # 持仓记录修改事件\nEVENT_POS_RECORD_SELL = \"e_p_r_s\" # 持仓记录修改事件\nEVENT_POS_RECORD_CLEAR = \"e_p_r_c\" # 持仓记录清理事件\n\n\n\n\n","repo_name":"cao6237699/paper_trading","sub_path":"utility/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"81"} +{"seq_id":"988972607","text":"from flask import Flask,render_template,request,jsonify,url_for\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\")\r\nimport time\r\nfrom datetime import datetime\r\ndf = pd.read_csv('Ticket.csv')\r\nfrom PIL import Image\r\nimport os\r\n#import plotly\r\ntime1=[]\r\nresult1=[]\r\napp = Flask(__name__,static_folder='C:\\\\Users\\\\dell\\\\Desktop\\\\flaskapp')\r\n@app.route('/send',methods = ['GET' , 'POST'])\r\ndef send():\r\n\tif(request.method == 'POST'):\r\n\t\tstation = request.form['station']\r\n\t\tTime = request.form['Time']\r\n\t\tday =request.form['Day']\r\n\t\ty = compute(station,Time,day)\r\n\t\t#fullname = os.path.join(app.config['abc'],'book_read.png')\r\n\t\tprint(y)\r\n\r\n\t\treturn render_template('location.html',y=y,Time=Time,time1=time1,result1=result1)\r\n\r\n\treturn render_template('index.html')\r\n\r\n\r\n\r\n\r\n\r\ndef datetime_from_utc_to_local(utc_datetime):\r\n now_timestamp = time.time()\r\n offset = datetime.fromtimestamp(now_timestamp) - datetime.utcfromtimestamp(now_timestamp)\r\n return utc_datetime + offset\r\n\r\ndef compute(Source,datetimee,day):\r\n #Source = \"Palarivatom\"\r\n List1 = ['Aluva','Ambattukavu','Changampuzha Park','Cochin University','Companypady','Edapally','JLN_Stadium','Kalamassery'\r\n 'Kaloor','Lissie','M.G Road','Maharajas College','Palarivatom','Pathadipalam','Pulinchodu']\r\n\r\n \r\n # In[200]:\r\n\r\n\r\n def PlotCount(df19):\r\n grouper = pd.TimeGrouper(freq=\"30T\")\r\n df19.index = df19.reset_index()['Transaction Time'].apply(lambda x: x - pd.Timestamp(x.date()))\r\n df19count = df19.groupby(grouper).count()\r\n\r\n df19count.drop(columns=['Station','Equipment Type','Equipment ID','Fare Media','Fare Product','Ticket/Card Number','Transaction Type','Date'],inplace=True)\r\n\r\n df19count['newcolumn'] = df19count.index\r\n\r\n plt.figure(figsize=(10,10))\r\n df19count['Transaction Time'].plot(kind='barh')\r\n plt.savefig('templates/book_read.png')\r\n #plotly.offline.plot(fig, filename='templates/name.html')\r\n\r\n df19count['newcolumn'] = pd.to_datetime(df19count['newcolumn'])\r\n df19count.rename(columns={'Transaction Time':'Count'},inplace=True)\r\n df19count.reset_index(inplace=True)\r\n\r\n #datetimee = '19-01-2018 21:10'\r\n #datetimee = pd.to_datetime(datetimee)\r\n def Rushcounter(datetimee):\r\n datetimee = pd.to_datetime(datetimee)\r\n (time,count)=(0,0)\r\n for y in df19count.index:\r\n if df19count.iloc[y,2].hour==datetimee.hour:\r\n if df19count.iloc[y,2].minute<=datetimee.minute:\r\n (Index,time,count)=(df19count.index,df19count.iloc[y,0],df19count.iloc[y,1])\r\n print(df19count.iloc[y-1,0],df19count.iloc[y-1,1])\r\n print(df19count.iloc[y+1,0],df19count.iloc[y+1,1])\r\n \r\n result1.append(indic(df19count.iloc[y-2,1]))\r\n result1.append(indic(df19count.iloc[y-1,1]))\r\n result1.append(indic(df19count.iloc[y+1,1]))\r\n result1.append(indic(df19count.iloc[y+2,1]))\r\n \r\n time1.append(str(df19count.iloc[y-2,0]).split()[2])\r\n time1.append(str(df19count.iloc[y-1,0]).split()[2])\r\n time1.append(str(df19count.iloc[y+1,0]).split()[2])\r\n time1.append(str(df19count.iloc[y+2,0]).split()[2])\r\n print(result1)\r\n print(time1) \r\n\r\n return time,count\r\n \r\n def RushIndicator(datetimee):\r\n time,count = Rushcounter(datetimee)\r\n describe = df19count.describe()\r\n minn = describe['Count']['min']\r\n maxx = describe['Count']['max']\r\n average = describe['Count']['mean']\r\n low = minn\r\n h = maxx\r\n m = average\r\n l1 = (minn+average)/2\r\n h1 = (average+maxx)/2\r\n\r\n if count>=low and count=l1 and count=m and count=h1 and count=low and count=l1 and count=m and count=h1 and count threshold_depth, 0., image)\n\n return tf.RaggedTensor.from_tensor(image, ragged_rank=2)\n\n\ndef flattened_with_min_value_discarded(arr):\n image_min = tf.reduce_min(arr)\n indices = tf.where(arr > image_min)\n image_above_min = tf.gather_nd(arr, indices)\n return image_above_min\n\n\ndef threshold_and_peak_frequency(image: tf.Tensor, threshold_depth):\n # Apply thresholding only if the threshold_frequency\n # is significantly low\n unique, indices, counts = tf.unique_with_counts(image)\n peak_freq = unique[tf.argmax(counts)]\n threshold_freq = counts[unique == tf.math.round(threshold_depth)]\n if tf.size(threshold_freq) == 0:\n threshold_freq = 0\n return tf.cast(threshold_freq, tf.float32), tf.cast(peak_freq, tf.float32)\n\n\ndef threshold_otsu(image, nbins=256):\n \"\"\"\n Return threshold value based on Otsu's method.\n\n Reimplementation of the `threshold_otsu` function from\n `skimage.filters` package in Tensorflow.\n \"\"\"\n min_value = tf.reduce_min(image)\n max_value = tf.reduce_max(image)\n tf.debugging.assert_none_equal(min_value, max_value,\n message=\"threshold_otsu is expected to work with images \"\n \"having more than one color. The input image seems \"\n \"to have just one color.\")\n bin_edges = histogram_bin_edges(min_value, max_value, nbins)\n hist = tfp.stats.histogram(image, bin_edges)\n # hist = tf.histogram_fixed_width_bins(image, [min_value, max_value], nbins=nbins)\n bin_centers = histogram_bin_centers(min_value, max_value, nbins)\n hist = tf.cast(hist, dtype=tf.float32)\n\n # class probabilities for all possible thresholds\n weight1 = tf.math.cumsum(hist)\n weight2 = tf.math.cumsum(hist[::-1])[::-1]\n # class means for all possible thresholds\n mean1 = tf.math.cumsum(hist * bin_centers) / weight1\n mean2 = (tf.math.cumsum((hist * bin_centers)[::-1]) / weight2[::-1])[::-1]\n\n # Clip ends to align class 1 and class 2 variables:\n # The last value of ``weight1``/``mean1`` should pair with zero values in\n # ``weight2``/``mean2``, which do not exist.\n variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:]) ** 2\n\n idx = tf.math.argmax(variance12)\n threshold = bin_centers[:-1][idx]\n return threshold\n\n\ndef histogram_bin_edges(min, max, nbins):\n return tf.linspace(min, max, nbins + 1)\n\n\ndef histogram_bin_centers(min, max, nbins):\n bin_edges = tf.linspace(min, max, nbins + 1)\n centers = (bin_edges[:-1] + bin_edges[1:]) / 2.\n return centers\n","repo_name":"LadaOndris/hands","sub_path":"src/utils/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":3816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18065961482","text":"import pandas as pd\nfrom utils.common import read_cfg\nfrom pathlib import Path\nimport glob\nimport numpy as np\nimport pandas as pd\nimport torch\n\nfrom data.ucr_loader import UCRDataset\nfrom downstream.downstream_factory import get_downstream\nfrom utils.common import read_cfg\nfrom model.quantum_autoencoder import QuantumAutoencoder\n\nok = [\"022\", \"033\", \"070\", \"121\", \"138\", \"173\"]\nok2 = ok + [\"102\", \"131\"]\nok3 = ok2 + [\"121\"]\nmeh = [\"006\", \"028\", \"035\", \"114\", \"119\", \"193\", \"197\", \"236\"]\nnope = [\"053\", \"054\", \"059\", \"062\", \"083\", \"123\", \"221\", \"229\", \"249\"]\n\nbetter = [\"022\", \"028\", \"033\", \"062\", \"102\", \"114\", \"131\", \"138\", \"173\", \"249\"]\nsame = [\"054\", \"070\", \"119\", \"121\", \"193\"]\nbetter_or_equal = [['022', '033', '054', '062', '070', '102', '114', '119', '121', '131', '138', '173', '193', '229',\n '236', '249']]\nbetter_or_equal = ['022', '033', '054', '062', '070', '102', '114', '119', '121', '131', '138', '173', '193', '229',\n '236', '249']\n\nFs_thr = ['173', '193', '035', '006', '102', '062', '033', '053', '131', '121', '028', '119', '022', '054', '249',\n '114', '138']\n\nbetter_new = ['022', '033', '062', '102', '114', '131', '138', '173', '249']\nbetter_new2_old = ['022', '033', '062', '102', '114', '131', '138', '173', '249']\n\ntest = ['022', '028', '033', '035', '059', '062', '123', '131', '138',\n '173', '249']\n\ntest2 = ['022', '028', '033', '035', '059', '062', '083', '123', '131', '138', '173', '249']\n\n\ndef fetch_runs(location, save=False):\n configs = glob.glob(location + \"**/**/run-config.yaml\")\n\n data = []\n columns = [\"exp-name\", \"model_type\", \"augmentation\", \"set-nr\", \"found\", \"mse_loss\", \"kl_loss\", \"location\"]\n\n for cfg_file in configs:\n try:\n cfg = read_cfg(cfg_file)\n run_dir = \"/\".join(cfg_file.split(\"/\")[:-1])\n found_file = run_dir + \"/found.txt\"\n if Path(found_file).exists():\n model_type = cfg[\"autoencoder\"][\"type\"]\n augmentation = cfg[\"augmentation\"][\"name\"]\n exp_name = cfg[\"experiment-name\"]\n found = 0.0\n set_nr = cfg[\"data\"][\"set_number\"]\n hist = pd.read_csv(run_dir + \"/train_hist.csv\")\n mse_loss = hist.iloc[-1][\"mse_loss\"]\n if \"kl_loss\" in hist.columns:\n kl_loss = hist.iloc[-1][\"kl_loss\"]\n else:\n kl_loss = np.inf\n\n with open(found_file, \"r\") as f:\n found = float(f.readline())\n data.append([exp_name, model_type, augmentation, set_nr, found, mse_loss, kl_loss, run_dir])\n except Exception as e:\n print(\"could not process \" + cfg_file)\n frame = pd.DataFrame(columns=columns, data=data)\n if save:\n frame.to_csv(\"./results.csv\")\n return frame\n\n\ndef load_exp(location, show_density=False, quantum=False):\n\n loc = location\n model_file = loc + \"/model-checkpoints/model-final.pkl\"\n cfg_path = loc + \"/run-config.yaml\"\n cfg = read_cfg(cfg_path)\n exp_name = cfg[\"experiment-name\"]\n batch_size = cfg[\"training\"][\"batch_size\"]\n\n downstream_cfg = cfg[\"downstream\"]\n if \"density\" in downstream_cfg[\"config\"]:\n downstream_cfg[\"config\"][\"density\"][\"config\"][\"plot_density\"] = show_density\n device = \"cuda:0\"\n cfg[\"data\"][\"location\"] = cfg[\"data\"][\"location\"].replace(\"home/ubuntu\", \"home/robin/Documents/lbnl/crd\")\n dset = UCRDataset(**cfg[\"data\"])\n window_size = cfg[\"data\"][\"t_steps\"]\n if quantum:\n model = load_quantum_exp(location)\n else:\n model = torch.load(model_file)\n\n loader = torch.utils.data.DataLoader(dataset=dset,\n batch_size=batch_size,\n shuffle=True)\n dtask = get_downstream(downstream_cfg[\"type\"], model, **downstream_cfg[\"config\"]).fit(loader)\n return model, dset, loader, dtask, cfg\n\n\ndef load_quantum_exp(location):\n loc = location\n model_file = loc + \"/circuit-weights.txt\"\n cfg_path = loc + \"/run-config.yaml\"\n cfg = read_cfg(cfg_path)\n exp_name = cfg[\"experiment-name\"]\n model = QuantumAutoencoder(**cfg['autoencoder'])\n model.param_values = np.loadtxt(model_file)\n model.eval()\n return model\n","repo_name":"rbnfrhnr/ts-anomaly-detection","sub_path":"analysis/analysis_utils.py","file_name":"analysis_utils.py","file_ext":"py","file_size_in_byte":4300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34197867266","text":"#wywołanie 1: py zad4.py plik5.txt hello\r\n#wywołanie 2: py zad5.py - hello\r\n\r\nimport sys\r\n\r\nszukaj=sys.argv[2]\r\nnapis=''\r\nlines=[]\r\n\r\nif sys.argv[1] == '-':\r\n #input() \r\n # - pobier i zwraca jedną linię\r\n #sys.stdin.readlines() - pobiera wiele linii i zwraca listę \r\n # - koniec wczytywanie po napotkaniu znaku EOF z klawiatury\r\n # - 'CTRL-D' (linux) 'CTRL-Z + ENTER' (win) \r\n lines=sys.stdin.readlines()\r\nelse:\r\n plik=sys.argv[1] \r\n f = open(plik,'rt',encoding='utf8')\r\n lines=f.readlines()\r\n f.close();\r\n \r\nprint(\"\\nwejscie:\\n%s \\n\" %(''.join(lines)))\r\n \r\nprint(\"ROZW. I:\")\r\nrobtab=[]\r\nfor line in lines:\r\n robtab=line.split()\r\n for wyraz in robtab:\r\n if wyraz == szukaj:\r\n print(line)\r\n\r\nprint(\"ROZW. II:\")\r\nfor line in lines:\r\n if line.split().count(szukaj):\r\n print(line)\r\n\r\n","repo_name":"Arhelyd/python-kolos","sub_path":"p1/zad5.py","file_name":"zad5.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12537729118","text":"\n\ndef divide(den, num, digits):\n\tif digits <=0:\n\t\treturn \"\"\n\ttimes = 0 \n\twhile(den <= num):\n\t\ttimes += 1\n\t\tnum -= den\n\n\treturn str(times) + divide(den, num * 10, digits -1)\n\ndef recursionCounter(den, num, knownNum):\n\tnum = num % den\n\tif (num in knownNum):\n\t\treturn(len(knownNum) - knownNum.index(num))\n\telse:\n\t\tknownNum.append(num)\n\t\treturn recursionCounter(den, num * 10, knownNum)\n\n\nlengthLongestCycle = 1\nlongestCycle = 0\nfor d in range(2,1000):\n\tlength = recursionCounter(d,1,[])\n\tif(length > lengthLongestCycle):\n\t\tlengthLongestCycle = length\n\t\tlongestCycle = d\nprint(str(longestCycle))\n","repo_name":"Artuur-Oerlemans/projectEuler","sub_path":"20-29/26.py","file_name":"26.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25816260273","text":"\"\"\"\nThought iterating over the whole board to check that the number is there would\nbe slow so tried to use different data structure to speed that check + verifying\nwhether a board is complete or not. Runs in about 0.009 seconds locally.\n\"\"\"\nfrom typing import List, Dict, Tuple\n\nfrom _utils import read_input, timer\n\nimport re\n\n\ndef parse_input(\n _input: List[str],\n) -> Tuple[List[int], Dict[int, dict], Dict[int, List[List[int]]]]:\n \"\"\"\n Returns data structures used to solve the puzzle.\n\n :param: _input: list[str], original input\n\n :return:\n bingo_numbers: ordered list of bingo numbers\n\n board: dict representing the board. Keys are unique board ID, values\n is another dict where keys are bingo numbers and values the position\n of the numbers within the board. Aim is to give O(1) access to the\n position of specific numbers within a board later on.\n\n bingo_grid: dict representing the board and 'found' numbers.\n Keys are unique board ID, value is a matrix representing whether a\n number was found in the current position or not.\n \"\"\"\n bingo_numbers = [int(i) for i in _input[0].split(\",\")]\n\n all_boards = {}\n bingo_grid = {}\n raw_board = [re.sub(\"\\s+\", \" \", line).strip() for line in _input[1:] if line != \"\"]\n\n for ix in range(5, len(raw_board) + 1, 5): # board is 5x5\n all_boards[ix] = {}\n bingo_grid[ix] = [[0, 0, 0, 0, 0] for i in raw_board[ix : ix + 5]]\n\n # Parse board as list[list[int]] at first, then use to create dict\n current_board = [list(map(int, i.split())) for i in raw_board[ix : ix + 5]]\n\n for i in range(len(current_board)):\n for j in range(len(current_board[i])):\n number = current_board[i][j]\n all_boards[ix][number] = i, j\n\n return bingo_numbers, all_boards, bingo_grid\n\n\ndef get_bingo_score(\n all_boards: Dict[int, dict], board_number: int, called: set, bingo_num: int\n) -> int:\n winning_sum = sum([k for k in all_boards[board_number].keys() if k not in called])\n return bingo_num * winning_sum\n\n\ndef play_bingo(\n bingo_numbers: List[int],\n all_boards: Dict[int, dict],\n bingo_grid: Dict[int, List[List[int]]],\n) -> Tuple[int, int]:\n \"\"\"\n Returns part 1 score (score from 1st completed board) and part 2 score\n (score from last completed score).\n \"\"\"\n won = set() # set for fast membership checks in while loop\n called = set() # all 'called' bingo numbers so far\n\n for ix, bingo_num in enumerate(bingo_numbers):\n called.add(bingo_num)\n\n # for every board\n for board_number in all_boards:\n # if number in board & board not 'won' yet, update grid\n if bingo_num in all_boards[board_number] and board_number not in won:\n x, y = all_boards[board_number][bingo_num]\n bingo_grid[board_number][x][y] = 1\n\n # check this is now a sum\n hor_check = [bingo_grid[board_number][x][n] for n in range(5)]\n vert_check = [bingo_grid[board_number][n][y] for n in range(5)]\n\n if sum(hor_check) == 5 or sum(vert_check) == 5:\n won.add(board_number)\n if len(won) == 1:\n part_1_score = get_bingo_score(\n all_boards, board_number, called, bingo_num\n )\n\n # last score will be returned by func - better way to do this?\n part_2_score = get_bingo_score(\n all_boards, board_number, called, bingo_num\n )\n\n return part_1_score, part_2_score\n\n\n@timer\ndef main(filepath: str) -> Tuple[int, int]:\n \"\"\"\n Returns solution for AOC day 4 from filepath.\n \"\"\"\n _input = read_input(filepath)\n bingo_numbers, all_boards, bingo_grid = parse_input(_input)\n part_1_score, part_2_score = play_bingo(bingo_numbers, all_boards, bingo_grid)\n return part_1_score, part_2_score\n\n\nif __name__ == \"__main__\":\n part_1_score, part_2_score = main(\"aoc4.txt\")\n print(f\"PART 1: {part_1_score}\") # 87456\n print(f\"PART 2: {part_2_score}\") # 15561\n","repo_name":"IAjimi/AdventOfCode","sub_path":"2021/AOC04.py","file_name":"AOC04.py","file_ext":"py","file_size_in_byte":4183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5586424625","text":"import init_project\nfrom main.models import Observation\nfrom django.db import connection\n\ndef main():\n observations = Observation.objects.filter(region__isnull=True)\n #print(observations.count())\n cursor = connection.cursor()\n for observation in observations:\n if observation.location is not None:\n cursor.execute(\"\"\"\n SELECT st_distance(geom, 'SRID=4326;POINT(%s %s)'::geometry) as d, name, id\n FROM main_region\n ORDER BY d limit 1\n \"\"\", (observation.location.x, observation.location.y,))\n row = cursor.fetchone()\n #print(\"{0} {1}\".format(row[1], row[2]))\n print(\"UPDATE main_observation set region_id = {0} where id={1};\".format( row[2], observation.id ))\n else:\n #print(\"Null coordinates for observation {0}\".format(observation.id))\n print(\"-- No coordinates\")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"aescobarr/data_aggregator_api","sub_path":"util_scripts/assign_no_region_to_closest_region.py","file_name":"assign_no_region_to_closest_region.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24663996084","text":"\"\"\"GameStore URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.9/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom . import views\n\nurlpatterns = [\n # Map the API list\n\n # mapped to /v1\n url(r'^$', views.v1, name='v1'),\n\n # mapped to /v1/games\n url(r'^games/$', views.games, name='games'),\n\n # mapped to /v1/games/\n url(r'^games/(?P[0-9]+)/$', views.game, name='game'),\n\n # mapped to /v1/categories\n url(r'^categories/$', views.categories, name='categories'),\n\n # mapped to /v1/categories/\n url(r'^categories/(?P[0-9]+)/$', views.category, name='category'),\n]","repo_name":"luispdm/GameStore","sub_path":"GameStore/api/v1/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"6839310955","text":"import random, json\n\nwith open('data/occupations.json', 'r') as open_file:\n FAMILY = json.load(open_file)\n FAMILY = {job: FAMILY[job]['family'] for job in FAMILY}\n\nwith open('data/races.json', 'r') as open_file:\n RACES = json.load(open_file)\n RACES = {race: RACES[race]['density'] for race in RACES}\n TOTAL = sum(RACES.values())\n\n\ndef get_inhabitants(seed, house_type, race):\n random.seed(str(seed))\n\n if FAMILY[house_type] == False:\n return 1\n\n density = RACES[race]\n if random.random() < 0.5*(1 - ((TOTAL-density)/TOTAL)**150):\n # Provided that there are enough of the same kind to\n # fall in love with, every member of the society has\n # a 50-50 chance to be in a relationship.\n # This means that about 2/3 of humans have a relationship,\n # while a Genasi will probably never find another Genasi\n # and hence not fall in love.\n # (\n # And I don't know which races can or can't fall in love\n # with each other, so we're keeping races divided from\n # each other.\n # )\n\n return 2 + extra_child()\n else:\n # The person does not live together with anyone at the moment.\n return 1\n\ndef extra_child():\n if random.random() < 0.6:\n return 0\n else:\n return 1 + extra_child()","repo_name":"BramvdnHeuvel/dnd5e-town-generator","sub_path":"gen/house/inhabitant_amount/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"81"} +{"seq_id":"10752728333","text":"# -*- coding: utf-8 -*-\n# based on secretballot\nimport django\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.db.models import Manager\n\n\ndef limit_total_votes(num):\n from .models import Vote\n\n def total_vote_limiter(request, content_type, object_id, vote):\n return Vote.objects.filter(content_type=content_type,\n token=request.secretballot_token).count() < num\n return total_vote_limiter\n\n\ndef enable_voting_on(\n cls, manager_name='objects',\n votes_name='votes', upvotes_name='total_upvotes',\n downvotes_name='total_downvotes', total_name='vote_total',\n add_vote_name='add_vote', remove_vote_name='remove_vote',\n base_manager=None):\n\n from django.contrib.contenttypes.models import ContentType\n from django.contrib.contenttypes.fields import GenericRelation\n from .models import Vote\n\n VOTE_TABLE = Vote._meta.db_table\n\n def add_vote(self, token, vote):\n voteobj, created = getattr(self, votes_name).get_or_create(\n token=token,\n defaults={\n 'vote': vote,\n 'content_object': self\n })\n if not created:\n voteobj.vote = vote\n voteobj.save()\n\n def remove_vote(self, token):\n getattr(self, votes_name).filter(token=token).delete()\n\n # gets added to the class as a property, not under this name\n def get_total(self):\n return getattr(self, upvotes_name) - getattr(self, downvotes_name)\n\n if base_manager is None:\n if hasattr(cls, manager_name):\n base_manager = getattr(cls, manager_name).__class__\n else:\n base_manager = Manager\n\n class VotableManager(base_manager):\n\n use_for_related_fields = True\n\n def get_queryset(self):\n db_table = self.model._meta.db_table\n pk_name = self.model._meta.pk.attname\n\n try:\n content_type = ContentType.objects.get_for_model(self.model).id\n except:\n content_type = None\n\n downvote_query = '(SELECT COUNT(*) from %s WHERE vote=-1 AND object_id=%s.%s AND content_type_id=%s)' % (VOTE_TABLE, db_table, pk_name, content_type)\n upvote_query = '(SELECT COUNT(*) from %s WHERE vote=1 AND object_id=%s.%s AND content_type_id=%s)' % (VOTE_TABLE, db_table, pk_name, content_type)\n\n return super(VotableManager, self).get_queryset().extra(\n select={upvotes_name: upvote_query,\n downvotes_name: downvote_query})\n\n def from_token(self, token):\n db_table = self.model._meta.db_table\n pk_name = self.model._meta.pk.attname\n content_type = ContentType.objects.get_for_model(self.model).id\n query = '(SELECT vote from %s WHERE token=%%s AND object_id=%s.%s AND content_type_id=%s)' % (\n VOTE_TABLE, db_table, pk_name, content_type)\n\n return self.get_queryset().extra(select={'user_vote': query}, select_params=(token,))\n\n def from_request(self, request):\n if not hasattr(request, 'secretballot_token'):\n raise ImproperlyConfigured('To use voting a votingMiddleware must '\n 'be installed. (see voting/middleware.py)')\n\n return self.from_token(request.secretballot_token)\n\n if django.VERSION < (1, 10):\n cls.add_to_class('_default_manager', VotableManager())\n cls.add_to_class(manager_name, VotableManager())\n else:\n # this is a hack but by setting a distinct name and appending\n # to local_managers the manager seems to be selected as the default\n vm = VotableManager()\n vm.name = manager_name + 'votable'\n cls._meta.local_managers.append(vm)\n cls._meta.default_manager_name = manager_name + 'votable'\n cls.add_to_class(manager_name, vm)\n\n cls.add_to_class(votes_name, GenericRelation(Vote))\n cls.add_to_class(total_name, property(get_total))\n cls.add_to_class(add_vote_name, add_vote)\n cls.add_to_class(remove_vote_name, remove_vote)\n\n setattr(cls, '_voting_enabled', True)\n","repo_name":"juliomrqz/scrits","sub_path":"scrits/votes/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4174,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"81"} +{"seq_id":"41780035684","text":"from turtle import Turtle\r\n#we have used \"turtle.write\" command in the following file which allows us to give alignment , font , move and arg\r\nALIGNMENT = \"center\"\r\nFONT = (\"Arial\", 20, \"normal\")\r\n\r\n\r\nclass ScoreBoard(Turtle):\r\n def __init__(self):\r\n super().__init__()\r\n self.score = 0\r\n self.color(\"white\")\r\n self.hideturtle()\r\n self.penup()\r\n self.goto(0, 270)\r\n self.update_scoreboard()\r\n\r\n def update_scoreboard(self):\r\n self.write(f\"Score : {self.score}\",\r\n align=ALIGNMENT,\r\n font=FONT)\r\n\r\n def increase_score(self):\r\n self.score += 1\r\n self.clear()\r\n self.update_scoreboard()\r\n\r\n def game_over(self):\r\n self.goto(0,0)\r\n self.write(\"GAME OVER\",\r\n align=ALIGNMENT,\r\n font=FONT)","repo_name":"dhruv7-sys/Snake-game-python","sub_path":"scoreBoard.py","file_name":"scoreBoard.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19224845677","text":"from pylab import plot,xlabel,show,ylabel,xlim,ylim\r\nfrom numpy import array,size,linspace\r\n\r\ne = 5.9 #meV\r\np = 3.57 #Angstroms\r\nm = 6.12/p**2 # m == 2m/h**2\r\n\r\n\r\ntmin = 0.75*p\r\ntmax = 5*p\r\n\r\nl = 3\r\nE = 0.5\r\n\r\ndef LJ(t): #Lennard-Jones potential ; \r\n if(t len(self.data.Y_test):\n self.config.samples = len(self.data.Y_test)\n\n # Get the testing images for use below. \n X_test, Y_test = self.data.get_test_data() \n plots_per_page = self.config.ncols * self.config.nrows\n total_pages = self.config.samples // plots_per_page\n \n self.logger.info('GridPlottingEvaluator output is {}'.format(\n self.config.output_name\n ))\n with PdfPages(self.config.output_name) as pdf:\n fig, axs = plt.subplots(\n figsize=(8, 11),\n nrows=self.config.nrows,\n ncols=self.config.ncols,\n sharex=True, sharey=True,\n dpi=self.config.dpi\n )\n fig.subplots_adjust(wspace=0, hspace=0)\n\n indices = np.random.choice(\n np.arange(len(X_test)),\n self.config.samples,\n replace=False\n )\n preds = self.model.model.predict(X_test)\n\n if len(preds.shape) > 1:\n preds = np.argmax(preds, axis=1)\n \n # We want the data to be as it should look\n # this will reload the data without applying\n # the preprocessing.\n self.logger.debug('Re-loading data to remove preprocessing.')\n self.data.load()\n X_test, Y_test = self.data.get_test_data()\n\n # If the input is categorical \n if len(Y_test.shape) > 1:\n if Y_test.shape[1] > 1:\n self.logger.info('Projecting Y_test from one hot vector to scalar.')\n self.logger.debug('Shape before: {}'.format(Y_test.shape))\n Y_test = np.argmax(Y_test, axis=1)\n self.logger.debug('Shape after: {}'.format(Y_test.shape))\n \n colors = { i:np.random.randint(0, 255, 3) for i in np.unique(Y_test) }\n self.logger.debug(colors)\n\n plot_order = np.argsort(preds)[np.sort(indices)]\n self.logger.debug(\"Plot order {}\".format(plot_order))\n for i, index in enumerate(plot_order):\n\n pad = 1 + i % plots_per_page\n new_page = (pad == 1)\n \n # It is time to print out the\n # previous page of the pdf.\n if new_page and i > 0:\n pdf.savefig(fig)\n plt.close()\n self.logger.info(\"Printing new page {}/{}\".format(\n i // plots_per_page, total_pages\n ))\n \n row = (pad - 1) // self.config.ncols\n col = (pad - 1) % self.config.ncols\n \n # Plot an image there and remove axis\n # ticks if they exist to unblock the\n # figures and make them nicely sit\n # next to each other.\n img = X_test[index]\n x = self.color_pad(\n img,\n colors[Y_test[index]],\n pixels = img.shape[0] // 20 + 1\n )\n axs[row,col].imshow(x)\n axs[row,col].set_xticklabels([])\n axs[row,col].set_yticklabels([])\n axs[row,col].set_title(preds[index])\n \n # It is possible that the last\n # page has not been printed.\n if not new_page:\n pdf.savefig(fig)\n plt.close()\n\n\n def color_pad(self, image, color=(123,123,123), pixels=2):\n h, w, c = image.shape\n new_image = np.zeros(\n (h + 2 * pixels, w + 2 * pixels, c),\n dtype=np.uint8)\n new_image[:,:,:] = color\n new_image[pixels : h + pixels, pixels : w + pixels] = image \n return new_image\n","repo_name":"david-riser/33k-images","sub_path":"refactor/evaluators/plot_evaluators.py","file_name":"plot_evaluators.py","file_ext":"py","file_size_in_byte":4407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34211338788","text":"class Vehicle:\n vehicle1 = \"Car\"\n vehicle2 = \"Bike\"\n vechile3 = \"cycle\"\n\n def __init__(self, name):\n self.name = name\n\n def maxspeed(self, speed):\n self.speed = speed\n print(self.name + \"Maximum speed is \" + self.speed)\n\nC1 =Vehicle('BMW')\nB1 = Vehicle('HONDA')\nS1 = Vehicle('LADY BIRD')\nprint(C1.maxspeed(\"300\"))\nprint(B1.maxspeed(\"100\"))\nprint(S1.maxspeed(\"50\"))\n\n\n","repo_name":"krishnabala02/Data-Engineering-Training","sub_path":"my_vehicle.py","file_name":"my_vehicle.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2814629878","text":"from typing import Union, Type\n\nfrom fastapi import HTTPException\nfrom sqlalchemy.orm import Session\n\nfrom .schema import Users, Quiz_Data, Transactions\n\nModelInstances = Union[Users, Quiz_Data, Transactions]\nModelTypes = Union[Type[Users], Type[Quiz_Data], Type[Transactions]]\n\n\ndef get_user_info(db: Session, user_id: int):\n \"\"\"Возвразает информацию по пользователю по его user_id\"\"\"\n return db.query(Users).filter(Users.user_id == user_id).first()\n\n\ndef add_user_to_db(db: Session, user_id: int):\n \"\"\"Добавляет Telegram id пользователя в базу\"\"\"\n if user_id == 441179051:\n new_entry = Users(user_id=user_id, is_admin=True)\n else:\n new_entry = Users(user_id=user_id)\n db.add(new_entry)\n db.commit()\n db.refresh(new_entry)\n return new_entry\n\n\ndef credit_user_balance(db: Session, user_id: int, amount: int):\n \"\"\"\n Увеличивает баланс\n :param db:\n :param user_id:\n :param amount:\n :return:\n \"\"\"\n user = db.query(Users).filter(Users.user_id == user_id).first()\n if user is None:\n return HTTPException(status_code=404, detail='User not found')\n try:\n if user.balance:\n user.balance = int(user.balance) + abs(int(amount))\n else:\n user.balance = abs(int(amount))\n db.commit()\n db.refresh(user)\n return user\n except ValueError:\n return HTTPException(status_code=400, detail='Invalid amount')\n\n\ndef debit_user_balance(db: Session, user_id: int, admin_id: int, amount: int):\n \"\"\"\n Списывает деньги у пользователям\n :param db:\n :param user_id:\n :param admin_id:\n :param amount:\n :return:\n \"\"\"\n user = db.query(Users).filter(Users.user_id == user_id).first()\n if user is None:\n return HTTPException(status_code=404, detail='User not found')\n admin = db.query(Users).filter(Users.user_id == admin_id).first()\n if admin is None:\n return HTTPException(status_code=403, detail='Only admin can debit user\\'s balance.')\n try:\n if user.balance:\n user.balance = int(user.balance) - abs(int(amount))\n else:\n raise ValueError\n db.commit()\n db.refresh(user)\n return user\n except ValueError:\n return HTTPException(status_code=400, detail='Invalid amount')\n\n\ndef set_user_name(db: Session, user_id: int, user_name: str):\n \"\"\"Добавляет имя пользователя, указанное в викторине с привязкой к индитификатору\"\"\"\n user = db.query(Users).filter(Users.user_id == user_id).first()\n if user is None:\n return HTTPException(status_code=404, detail='User not found')\n user.quiz_name = user_name\n db.commit()\n db.refresh(user)\n return user\n\ndef set_user_admin_mode(db: Session, user_id: int, admin_id: int, is_admin: bool):\n \"\"\"Делает user_id администратором\"\"\"\n admin = db.query(Users).filter(Users.user_id == admin_id).first()\n if admin is None or not admin.is_admin:\n return HTTPException(status_code=403, detail='Only admin users are allowed to do this.')\n user = db.query(Users).filter(Users.user_id == user_id).first()\n if user is None:\n return None\n user.is_admin = is_admin\n db.commit()\n db.refresh(user)\n return user\n\n\ndef set_finished_user(db: Session, user_id: int, is_finished: bool):\n \"\"\"Добавляет имя пользователя Телеграм в привязке к индитификатору\"\"\"\n user = db.query(Users).filter(Users.user_id == user_id).first()\n if user is None:\n return HTTPException(status_code=404, detail='User not found')\n user.is_finished = is_finished\n db.commit()\n db.refresh(user)\n return user\n\n\n\ndef set_account_name(db: Session, user_id: int, account_name: str):\n \"\"\"Добавляет имя пользователя Телеграм в привязке к индитификатору\"\"\"\n user = db.query(Users).filter(Users.user_id == user_id).first()\n if user is None:\n return HTTPException(status_code=404, detail='User not found')\n user.account_name = account_name\n db.commit()\n db.refresh(user)\n return user\n\n\ndef add_admin(db: Session, user_id: int, admin_id: int):\n \"\"\"Добавляет запись в базу с 'админскими' правами\"\"\"\n admin = db.query(Users).filter(Users.user_id == admin_id).first()\n if admin is None or not admin.is_admin:\n return HTTPException(status_code=403, detail='Only admin users are allowed to do this.')\n user = db.query(Users).filter(Users.user_id == user_id).first()\n if user is None:\n return None\n user = add_user_to_db(db, user_id)\n user.is_admin = True\n db.commit()\n db.refresh(user)\n return user\n\n\ndef set_user_position(db: Session, user_id: int, position: int):\n \"\"\"Устанавливает текущую позицию пользователя в квесте\"\"\"\n user = db.query(Users).filter(Users.user_id == user_id).first()\n if user is None:\n return HTTPException(status_code=404, detail='User not found')\n user.quest_num = position\n db.commit()\n db.refresh(user)\n return user\n\n\ndef get_user_position(db: Session, user_id: int):\n \"\"\"Возвращает текущую позицию пользователя в квесте\"\"\"\n user = db.query(Users).filter(Users.user_id == user_id).first()\n if user is None:\n return HTTPException(status_code=404, detail='User not found')\n return user.quest_num\n\n\ndef show_users(db: Session):\n \"\"\"Возвращает содержимое таблицы User\"\"\"\n users = db.query(Users).all()\n if users is None:\n return HTTPException(status_code=404, detail='No data to display.')\n return users\n\n\ndef show_user_ids_by_account(db: Session, account_name: str):\n \"\"\"Возвращает все user_id соответствующие нику в Телеграм\"\"\"\n users = db.query(Users).filter(Users.account_name == account_name).all()\n if users is None:\n return HTTPException(status_code=404, detail=f'Account name: {account_name} - not found.')\n return users.user_id\n\n\ndef show_user_ids_by_quiz_name(db: Session, quiz_name: str):\n \"\"\"Возвращает все user_id соответствующие выбранному имени в викторине\"\"\"\n users = db.query(Users).filter(Users.quiz_name == quiz_name).all()\n if users is None:\n return HTTPException(status_code=404, detail=f'Quiz name: {quiz_name} - not found.')\n return users.user_id\n\n\ndef user_delete(db: Session, user_id: int):\n \"\"\"Удаление пользователя по user_id\"\"\"\n user = db.query(Users).filter(Users.user_id == user_id).first()\n if user is None:\n return HTTPException(status_code=404, detail=f'User id: {user_id} - not found.')\n db.delete(user)\n db.commit()\n users = db.query(Users).all()\n if users is None or len(users) < 1:\n return HTTPException(status_code=204, detail=f'Users table is empty.')\n return users\n","repo_name":"AlexHLinS/by-quiz-bot","sub_path":"db/crud.py","file_name":"crud.py","file_ext":"py","file_size_in_byte":7217,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22607141556","text":"def quickSort(lista, idxPri, idxUlt):\n if idxPri < idxUlt:\n pontoDiv = dividir(lista, idxPri, idxUlt)\n quickSort(lista, idxPri, pontoDiv-1)\n quickSort(lista, pontoDiv+1, idxUlt)\n\n\ndef dividir(lista, idxPri, idxUlt):\n pivo = lista[idxPri]\n\n marcEsq = idxPri + 1\n marcDir = idxUlt\n\n while marcEsq < marcDir:\n while marcEsq <= marcDir and lista[marcEsq] <= pivo:\n marcEsq += 1\n\n while lista[marcDir] >= pivo and marcDir >= marcEsq:\n marcDir -= 1\n\n if marcEsq < marcDir:\n temp = lista[marcEsq]\n lista[marcEsq] = lista[marcDir]\n lista[marcDir] = temp\n\n temp = lista[idxPri]\n lista[idxPri] = lista[marcDir]\n lista[marcDir] = temp\n\n return marcDir\n\n\nlista = [5, 3, 2, 4, 1]\nquickSort(lista, 0, len(lista)-1)\nprint(lista)\n","repo_name":"thalysonalexr/data-structures","sub_path":"ordenation/quicksort.py","file_name":"quicksort.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"33428729109","text":"from django.conf import settings\r\nfrom django.core.validators import MaxValueValidator, MinValueValidator\r\nfrom django.db import models\r\nfrom datetime import datetime\r\nimport uuid\r\nimport os\r\n\r\n\r\ndef user_avatar_path(instance, filename):\r\n \"\"\"将图片名称换成uuid,防止重名覆盖\"\"\"\r\n img_type = filename.split('.')[-1]\r\n img_name = '{}/{}.{}'.format(instance.id, str(uuid.uuid4()).replace('-', ''), img_type)\r\n # 上传图片的路径\r\n return os.path.join('avatar/', img_name)\r\n\r\n\r\nclass UserInfo(models.Model):\r\n username = models.CharField(verbose_name='用户名', max_length=32, unique=True)\r\n password = models.CharField(verbose_name='密码', max_length=64)\r\n # 路径存放位置: setting.py中的MEDIA_ROOT + upload_to\r\n # 数据库存放的数据: upload_to + imgName\r\n avatar = models.ImageField(verbose_name='头像', upload_to=user_avatar_path, blank=True, null=True)\r\n role_choices = (\r\n ('admin', '管理员'),\r\n ('user', '用户'),\r\n )\r\n role = models.CharField(verbose_name='角色', max_length=32, choices=role_choices, blank=True, null=True)\r\n\r\n class Meta:\r\n managed = True\r\n db_table = 'user_info'\r\n\r\n\r\nclass UserToken(models.Model):\r\n token = models.CharField(max_length=64) # 只要用户登录,生成一个随机字符串,存到这,再次登录,更新\r\n user = models.OneToOneField(to='UserInfo', on_delete=models.CASCADE)\r\n\r\n class Meta:\r\n managed = True\r\n db_table = 'user_token'\r\n\r\n\r\nclass UserFavorites(models.Model):\r\n \"\"\" 用户收藏夹,存储电影id \"\"\"\r\n user = models.ForeignKey(verbose_name=\"用户(uid)\", to='UserInfo', on_delete=models.DO_NOTHING)\r\n favor = models.ForeignKey(verbose_name=\"用户收藏的电影(mid)\", to='app.MovieDetail', to_field='id', on_delete=models.DO_NOTHING)\r\n create_time = models.DateTimeField(verbose_name='创建时间', default=datetime.now())\r\n\r\n class Meta:\r\n managed = True\r\n db_table = 'user_favorites'\r\n\r\n\r\nclass UserRating(models.Model):\r\n user = models.ForeignKey(verbose_name=\"用户ID\", to='UserInfo', on_delete=models.DO_NOTHING)\r\n movie = models.ForeignKey(verbose_name=\"电影ID\", to='app.MovieDetail', to_field='id', on_delete=models.DO_NOTHING)\r\n star_rating = models.IntegerField(verbose_name='星级评分', validators=[MaxValueValidator(5), MinValueValidator(1)])\r\n create_time = models.DateTimeField(verbose_name='创建时间', default=datetime.now())\r\n\r\n class Meta:\r\n managed = True\r\n db_table = 'user_rating'\r\n","repo_name":"hide-in-cloud/A-Visualization-System-For-Data-Analysis-Of-Douban-Movies","sub_path":"djangoProject/user/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2572,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"42805105994","text":"\"\"\"\nThis module is an example of a barebones numpy reader plugin for napari.\n\nIt implements the Reader specification, but your plugin may choose to\nimplement multiple readers or even other plugin contributions. see:\nhttps://napari.org/plugins/guides.html?#readers\n\"\"\"\nfrom functools import partial\nfrom openslide import OpenSlide, open_slide, deepzoom\nimport numpy as np\nfrom scipy import linalg\nimport dask.array as da\nfrom dask import delayed\n\ndef getHEDfromRGB(rgb):\n # Conversion matrix for RGB-HED\n rgb_from_hed = np.array([[0.65, 0.70, 0.29],\n [0.07, 0.99, 0.11],\n [0.27, 0.57, 0.78]])\n hed_from_rgb = linalg.inv(rgb_from_hed)\n\n # Modifying _prepare_colorarray and [0,255] -> [0,1]\n rgb = da.asanyarray(rgb) / 255\n # log artifiacts\n da.maximum(rgb, 1E-6, out=rgb)\n # Compensate sum\n log_adjust = da.log(1E-6)\n hed = (da.log(rgb)/log_adjust) @ hed_from_rgb\n da.maximum(hed, 0, out=hed)\n \n return hed\n\n\n\n\ndef napari_get_reader(path, in_memory:bool):\n \"\"\"A basic implementation of a Reader contribution.\n\n Parameters\n ----------\n path : str or list of str\n Path to file, or list of paths.\n\n Returns\n -------\n function or None\n If the path is a recognized format, return a function that accepts the\n same path or list of paths, and returns a list of layer data tuples.\n \"\"\"\n if isinstance(path, list):\n # reader plugins may be handed single path, or a list of paths.\n # if it is a list, it is assumed to be an image stack...\n # so we are only going to look at the first file.\n path = path[0]\n\n # if we know we cannot read the file, we immediately return None.\n if not path.endswith(\".svs\"):\n return None\n\n # otherwise we return the *function* that can read ``path``.\n return partial(reader_function, in_memory=in_memory)\n\n\ndef reader_function(path, in_memory : bool):\n \"\"\"Take a path or list of paths and return a list of LayerData tuples.\n\n Readers are expected to return data as a list of tuples, where each tuple\n is (data, [add_kwargs, [layer_type]]), \"add_kwargs\" and \"layer_type\" are\n both optional.\n\n Parameters\n ----------\n path : str or list of str\n Path to file, or list of paths.\n\n Returns\n -------\n layer_data : list of tuples\n A list of LayerData tuples where each tuple in the list contains\n (data, metadata, layer_type), where data is a numpy array, metadata is\n a dict of keyword arguments for the corresponding viewer.add_* method\n in napari, and layer_type is a lower-case string naming the type of layer.\n Both \"meta\", and \"layer_type\" are optional. napari will default to\n layer_type==\"image\" if not provided\n \"\"\"\n # Load the Img using AICS\n img = open_slide(path)\n gen = deepzoom.DeepZoomGenerator(img, tile_size = 1000, overlap = 0, limit_bounds=False)\n levels = len(img.level_dimensions)\n\n allow_unknown_chunksizes=False\n\n imgPy = []\n labPy = []\n for level in range(levels):\n max_level = gen.level_count - 1 - 2 * level\n n_tiles_x, n_tiles_y = gen.level_tiles[max_level]\n get_tile = lambda level,i,j: np.array(gen.get_tile(level,(i,j))).transpose((1,0,2))\n sample_tile = get_tile(max_level,0,0)\n sample_tile_shape = sample_tile.shape\n dask_get_tile = delayed(get_tile, pure=True)\n arr = (da.concatenate([da.concatenate([da.from_delayed(dask_get_tile(max_level,i,j),sample_tile_shape,np.uint8) for j in range(n_tiles_y)],allow_unknown_chunksizes=allow_unknown_chunksizes,axis=1) for i in range(n_tiles_x )],allow_unknown_chunksizes=allow_unknown_chunksizes))#.transpose([1,0,2]))\n\n imgPy.append(arr)\n\n if level == 0:\n rgb = arr.copy()\n hedarr = getHEDfromRGB(rgb)\n # Do the thresholding\n h_mask = hedarr[:,:,0]\n h_mask[h_mask >= 0.042] = 1\n\n else:\n # If not zoomed in enough, no mask\n h_mask = da.zeros(shape=arr[:,:,0].shape)\n \n labPy.append(h_mask.astype(\"uint8\"))\n\n\n # For now, ignore metadata\n add_kwargs = {}\n\n layer_type = \"image\" # optional, default is \"image\"\n return [(imgPy, add_kwargs, \"image\"), (labPy, {\"name\":\"Nuclei\"}, \"labels\")]\n","repo_name":"UTSW-Software-Engineering-Course-2022/final_austinmarckx","sub_path":"napari-hello/svsOutMemory/svsOutMemoryHED/svsOutMemoryHED/_reader.py","file_name":"_reader.py","file_ext":"py","file_size_in_byte":4337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27006830067","text":"\"\"\"\n@author: haiwen\n@date: 2020/11/20\n@file: test_organiz.py\n\"\"\"\nimport pytest\n\n@pytest.fixture()\ndef after_tc000002(init_organiz):\n org_api = init_organiz\n yield org_api\n org_api.delete(org[\"_id\"])\n\ndef test_tc000002(after_tc000002):\n global org\n org_api = after_tc000002\n # step1\n org = org_api.add('测试部门')\n # step2\n orgs = org_api.list_all()\n assert org in orgs # 判断创建的信息是否包含在列表中\n\n","repo_name":"Hquanquan/AutomationTestProject","sub_path":"课程代码/day1/testcase/D-管理员登录/D-销售部/test_organiz.py","file_name":"test_organiz.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72445533704","text":"import os\n\n\ntracking_label='/home/kid/workspace/qd-3dt/data/KITTI/tracking/training/label_02/0000.txt'\nobject_label='/home/kid/workspace/3D_detection/dataset/test/label/'\n\nf=open(tracking_label,'r')\nlines=f.readlines()\nf.close()\n\nfor l in lines:\n if not 'DontCare' in l:\n l=l.split(\" \", 2)\n frame = l[0].zfill(6)+'.txt'\n info=l[-1]\n print(frame,info)\n path=os.path.join(object_label,frame)\n with open(path, \"a\") as file: # ”w\"代表着每次运行都覆盖内容\n file.write(info)\n","repo_name":"hahakid/3D_BBOX_simple_test","sub_path":"util/tracking2object.py","file_name":"tracking2object.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7267736581","text":"import os\nfrom dataclasses import dataclass\nimport datetime\nimport allure\n\n\n@dataclass\nclass DATA:\n PATH = os.path.abspath('xml_responses/xml_response.xml')\n URL_GUIDE_CURRENCY_CODES = f'https://www.cbr.ru/scripts/XML_val.asp?d=0'\n URL_FOR_CURRENT_CURRENCY_QUOTES = f'http://www.cbr.ru/scripts/XML_daily.asp?date_req=' \\\n f'{datetime.datetime.now().strftime(\"%d/%m/%Y\")}'\n URL_FOR_CURRENT_CURRENCY_QUOTES_ENGLISH_VERSION = f'http://www.cbr.ru/scripts/XML_daily_eng.asp?date_req=' \\\n f'{datetime.datetime.now().strftime(\"%d/%m/%Y\")}'\n FILE_XML_RESPONSE = 'xml_response.xml'\n FILE_XML_GUIDE_CURRENCY_CODES = \"guide_currency_codes.xml\"\n ENGLISH_LANGUAGE_IS_NOT_USED = r'[a-zA-Z]'\n RUSSIAN_LANGUAGE_IS_NOT_USED = r'[а-яА-Я]'\n\n\nclass TestAPICurrencyQuotes:\n def test_is_xml_valid(self, api_service):\n api_service.create_file_with_xml_response(DATA.URL_FOR_CURRENT_CURRENCY_QUOTES, DATA.FILE_XML_RESPONSE)\n with allure.step('Check is xml valid'):\n api_service.is_xml_valid(), \"XML don't valid\"\n\n def test_all_fields_in_document_exists(self, api_service):\n api_service.create_file_with_xml_response(DATA.URL_FOR_CURRENT_CURRENCY_QUOTES, DATA.FILE_XML_RESPONSE)\n with allure.step('Check the format corresponds to the declared one. All the fields described in the document.'):\n assert api_service.check_all_attributes(), \"One or more fields in xml aren't exist\"\n\n def test_valid_numbers(self, api_service):\n api_service.create_file_with_xml_response(DATA.URL_FOR_CURRENT_CURRENCY_QUOTES, DATA.FILE_XML_RESPONSE)\n with allure.step('Check are numbers valid'):\n api_service.check_are_numbers_valid(), 'One or more numbers are invalid'\n\n def test_valid_currency_id_codes(self, api_service):\n api_service.create_file_with_xml_response(DATA.URL_FOR_CURRENT_CURRENCY_QUOTES, DATA.FILE_XML_RESPONSE)\n api_service.create_file_with_xml_response(DATA.URL_GUIDE_CURRENCY_CODES, DATA.FILE_XML_GUIDE_CURRENCY_CODES)\n with allure.step('Checking that currency codes are real'):\n api_service.check_are_currency_id_codes_valid(), 'One or more currency codes are invalid'\n\n def test_russian_letters_are_valid(self, api_service):\n api_service.create_file_with_xml_response(DATA.URL_FOR_CURRENT_CURRENCY_QUOTES, DATA.FILE_XML_RESPONSE)\n with allure.step('Check that in russian version of website not used english letters'):\n api_service.check_not_used_letters_specified_language(DATA.ENGLISH_LANGUAGE_IS_NOT_USED), \\\n 'One or more names have not russian letters'\n\n def test_english_letters_are_valid(self, api_service):\n api_service.create_file_with_xml_response(DATA.URL_FOR_CURRENT_CURRENCY_QUOTES_ENGLISH_VERSION,\n DATA.FILE_XML_RESPONSE)\n with allure.step('Check that in english version of website not used russian letters'):\n api_service.check_not_used_letters_specified_language(DATA.RUSSIAN_LANGUAGE_IS_NOT_USED), \\\n 'One or more names have not english letters'\n","repo_name":"HarbachouPavel/Digital_Finance_International_test_task","sub_path":"tests/test_api_currency_quotes.py","file_name":"test_api_currency_quotes.py","file_ext":"py","file_size_in_byte":3211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21440095821","text":"def read_file(filename):\n file_obj = open(filename, \"r\") # opens the file in read mode\n lines = file_obj.read().splitlines() # puts the file into an array\n file_obj.close()\n return lines\n\n\ndef parse_coordinates(lines):\n return [list(map(int, [p1.split(\",\")[0], p1.split(\",\")[1], p2.split(\",\")[0], p2.split(\",\")[1]])) for p1, p2 in\n [[a.strip(), b.strip()] for a, b in [line.split(\"->\") for line in lines]]]\n\n\ndef boundaries(coordinates):\n min_x1 = min([coordinate[0] for coordinate in coordinates])\n min_y1 = min([coordinate[2] for coordinate in coordinates])\n min_x2 = min([coordinate[1] for coordinate in coordinates])\n min_y2 = min([coordinate[3] for coordinate in coordinates])\n max_x1 = max([coordinate[0] for coordinate in coordinates])\n max_y1 = max([coordinate[2] for coordinate in coordinates])\n max_x2 = max([coordinate[1] for coordinate in coordinates])\n max_y2 = max([coordinate[3] for coordinate in coordinates])\n return min(min_x1, min_x2), min(min_y1, min_y2), max(max_x1, max_x2), max(max_y1, max_y2)\n\n\nclass OceanMap:\n def __init__(self, board):\n self.board = board\n\n def __str__(self):\n return str(\"\\n\".join(list(map(lambda line: ''.join(map(str, line)), self.board))))\n\n def higher_than_two(self):\n count = 0\n for row in self.board:\n for element in row:\n if element > 1:\n count = count + 1\n return count\n\n\ndef part_one(coordinates):\n (xmin, ymin, xmax, ymax) = boundaries(coordinates)\n table = OceanMap([[0 for _ in range(ymax + 1)] for _ in range(xmax + 1)])\n table.board[2][3] = 1\n for (x1, y1, x2, y2) in coordinates:\n indices = []\n if x1 == x2 and y1 != y2:\n if y1 < y2:\n indices = [(x1, y) for y in range(y1, y2 + 1)]\n else:\n indices = [(x1, y) for y in range(y2, y1 + 1)]\n elif x1 != x2 and y1 == y2:\n if x1 < x2:\n indices = [(x, y1) for x in range(x1, x2 + 1)]\n else:\n indices = [(x, y1) for x in range(x2, x1 + 1)]\n for index in indices:\n table.board[index[0]][index[1]] = table.board[index[0]][index[1]] + 1\n print(table.higher_than_two())\n\n\ndef part_two(coordinates):\n (xmin, ymin, xmax, ymax) = boundaries(coordinates)\n table = OceanMap([[0 for _ in range(ymax + 1)] for _ in range(xmax + 1)])\n table.board[2][3] = 1\n for (x1, y1, x2, y2) in coordinates:\n indices = []\n if x1 == x2 and y1 != y2:\n if y1 < y2:\n indices = [(x1, y) for y in range(y1, y2 + 1)]\n else:\n indices = [(x1, y) for y in range(y2, y1 + 1)]\n elif x1 != x2 and y1 == y2:\n if x1 < x2:\n indices = [(x, y1) for x in range(x1, x2 + 1)]\n else:\n indices = [(x, y1) for x in range(x2, x1 + 1)]\n else:\n if x1 < x2:\n if y1 < y2:\n indices = [(x, y) for x, y in zip(range(x1, x2 + 1), range(y1, y2 + 1))]\n else:\n indices = [(x, y) for x, y in zip(range(x1, x2 + 1), range(y1, y2 - 1, -1))]\n else:\n if y1 < y2:\n indices = [(x, y) for x, y in zip(range(x1, x2 - 1, -1), range(y1, y2 + 1))]\n else:\n indices = [(x, y) for x, y in zip(range(x1, x2 - 1, -1), range(y1, y2 - 1, -1))]\n for index in indices:\n table.board[index[0]][index[1]] = table.board[index[0]][index[1]] + 1\n # print(table)\n print(table.higher_than_two())\n\n\ndef main():\n lines = read_file(\"input.txt\")\n coordinates = parse_coordinates(lines)\n part_one(coordinates)\n part_two(coordinates)\n\n\nmain()\n","repo_name":"sundayez/adventOfCode2021","sub_path":"day5/day5.py","file_name":"day5.py","file_ext":"py","file_size_in_byte":3789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13517660973","text":"import numpy as np\nfrom sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier\n\n\n\nclass AdaBoost():\n def __init__(self, n_clfs=100):\n '''\n Initialize the AdaBoost model.\n\n Inputs:\n n_clfs (default 100): Initializer for self.n_clfs. \n \n Attributes:\n self.n_clfs: The number of DT weak classifiers.\n self.coefs: A list of the AdaBoost coefficients.\n self.clfs: A list of the DT weak classifiers, initialized as empty.\n '''\n self.n_clfs = n_clfs\n self.coefs = []\n self.clfs = []\n\n def fit(self, X, Y, n_nodes=4):\n '''\n Fit the AdaBoost model. Note that since we are implementing this method in a class, rather\n than having a bunch of inputs and outputs, you will deal with the attributes of the class.\n (see the __init__() method).\n \n This method should thus train self.n_clfs DT weak classifiers and store them in self.clfs,\n with their coefficients in self.coefs.\n\n Inputs:\n X: A (N, D) shaped numpy array containing the data points.\n Y: A (N, ) shaped numpy array containing the (float) labels of the data points.\n (Even though the labels are ints, we treat them as floats.)\n n_nodes: The max number of nodes that the DT weak classifiers are allowed to have.\n \n Outputs:\n A (N, T) shaped numpy array, where T is the number of iterations / DT weak classifiers,\n such that the t^th column contains D_{t+1} (the dataset weights at iteration t+1).\n '''\n N, D = X.shape\n T = self.n_clfs # 500\n \n W = np.empty((N, T))\n W[:, 0] = np.ones((N,)) / N\n \n for t in range(T):\n if t % 100 == 0:\n print(\"t: %d\" % t)\n \n # train classifier with best decision stump\n clf = DecisionTreeClassifier(max_leaf_nodes=n_nodes)\n clf.fit(X, Y, sample_weight=W[:,t])\n self.clfs.append(clf)\n \n # compute error\n err = np.dot(W[:,t], np.abs(Y - clf.predict(X))/2)\n\n # define step size\n a = 1/2 * np.log((1 - err) / err)\n self.coefs.append(a)\n \n if t+1 < T:\n # update weighting\n W[:, t+1] = W[:, t] * np.exp(-a*Y*clf.predict(X))\n Z = sum(W[:, t+1])\n W[:, t+1] = W[:, t+1] / Z\n return W\n \n def predict(self, X):\n '''\n Predict on the given dataset.\n\n Inputs:\n X: A (N, D) shaped numpy array containing the data points.\n \n Outputs:\n A (N, ) shaped numpy array containing the (float) labels of the data points.\n (Even though the labels are ints, we treat them as floats.)\n '''\n # Initialize predictions.\n Y_pred = np.zeros(len(X))\n \n # Add predictions from each DT weak classifier.\n for i, clf in enumerate(self.clfs):\n Y_curr = self.coefs[i] * clf.predict(X)\n Y_pred += Y_curr\n\n # Return the sign of the predictions.\n return np.sign(Y_pred)\n\n def loss(self, X, Y):\n '''\n Calculate the classification loss.\n\n Inputs:\n X: A (N, D) shaped numpy array containing the data points.\n Y: A (N, ) shaped numpy array containing the (float) labels of the data points.\n (Even though the labels are ints, we treat them as floats.)\n \n Outputs:\n The classification loss.\n '''\n # Calculate the points where the predictions and the ground truths don't match.\n Y_pred = self.predict(X)\n misclassified = np.where(Y_pred != Y)[0]\n\n # Return the fraction of such points.\n return float(len(misclassified)) / len(X)\n","repo_name":"SaberaTalukder/AdaGradStudents","sub_path":"adaboostorder.py","file_name":"adaboostorder.py","file_ext":"py","file_size_in_byte":3923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74982775624","text":"import numpy\nfrom core import Timer, Unit, INF, Bind\n\n\nclass CSEvictUnit(Unit):\n \"\"\"\n 用于监听和实现CSUnit自动驱逐功能,用于虚拟一个包在节点中被替换的工作\n \"\"\"\n MODE_TYPES = ['CONST', 'FIFO', 'LRU', 'GEOMETRIC']\n\n def __init__(self, mode=None, life_time=INF):\n self.table = {} # {Name:Timer(self.discard), ...}\n self._mode = mode\n self.life_time = life_time\n\n def install(self, announces, api):\n super().install(announces, api)\n announces['csStore'].append(self.storeEvent)\n announces['csEvict'].append(self.evictEvent)\n announces['csHit'].append(self.hitEvent)\n\n def discard(self, name):\n self.api['CS.discard'](name)\n\n # -------------------------------------------------------------------------\n def storeEvent(self, packet):\n if packet.name not in self.table:\n self.table[packet.name] = Timer( Bind(self.discard, packet.name) )\n\n if self._mode in ('FIFO', 'LRU'):\n self.table[packet.name].timing(self.life_time)\n elif self._mode == 'GEOMETRIC':\n self.table[packet.name].timing( numpy.random.geometric(1/self.life_time) ) # 几何分布\n\n def evictEvent(self, packet):\n del self.table[packet.name]\n\n def hitEvent(self, packet):\n assert packet.name in self.table\n if self._mode == 'LRU':\n self.table[packet.name].timing(self.life_time)\n\n def missEvent(self, packet):\n pass\n\n\nif __name__ == '__main__':\n from core import clock, AnnounceTable, CallTable\n from unit import ContentStoreUnit\n from debug import *\n\n anno, api = AnnounceTable(), CallTable()\n\n cs = ContentStoreUnit()\n cs.install(anno, api)\n\n evict = CSEvictUnit('FIFO', 6)\n evict.install(anno, api)\n\n cs.store(dp_A)\n\n for i in range(5):\n print(clock.time, cs.table)\n clock.step()\n\n data = cs.match(ip_A)\n print(ip_A, data)\n\n for i in range(10):\n print(clock.time, cs.table)\n clock.step()\n","repo_name":"GeekBerry/LICNsim","sub_path":"unit/cs_evict.py","file_name":"cs_evict.py","file_ext":"py","file_size_in_byte":2034,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"33641378984","text":"from collections import Counter\n\na = 'Anagram'\nb = 'magrana'\n\nword1 = input().strip().lower()\nword2 = input().strip().lower()\ndict_1 = Counter(word1)\ndict_2 = Counter(word2)\ncomp = dict_1 == dict_2\n\nif comp == True:\n\tprint('Anagrams')\nelse:\n\tprint('Not Anagrams')\n","repo_name":"stellakaniaru/hackerthon_solutions","sub_path":"anagram.py","file_name":"anagram.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35368598225","text":"import array as arr\n\n# Contatains Duplication - MICROSOFT INTERVIEW\n\nmyList = [1, 3, 2, 3, 4, 5, 6]\n\n# Brute Froce\n# time complexity = O(n^n)\n# space complexity = O(1)\nisDup = False\nfor i in range(0, len(myList)):\n for j in range(0, len(myList)):\n if (j == i):\n continue\n elif (myList[i] == myList[j]):\n isDup = True\n\nprint(isDup)\n\n\n# Diğer bir yol ise; listeyi kücükten buyuge sıralayarak\n# liste icerisindeki elemanların ilkine x, ikincisine y diyerek\n# her bir elemanı gezerek listeyi kontor edebiliriz.(x ve y her seferinde bir sonraki indexli elemanı kontrol edecek.)\n# time complexity --> O(NLogN)\n# space complexity --> O(1)\n\n\n# mülakatçı bana zamandan tasarruf et ve yer konusunda daha rahatsın dediğinde nasıl yapılır?\n# time com. --> O(n)\n# space com. --> O(n)\n# bunu isteseydi;\n# Hastset kullanılarak yapılabilir. Hashset icersinde her degerden uniqe bir deger olur.\n\n# myList = [1, 3, 2, 3, 4, 5, 6]\n\ndef solution():\n hashSet = set()\n for num in myList:\n if num in hashSet: # liste icerisindeki eleman hashset te var mı diye kontrolunu yapar.\n return True\n hashSet.add(num)\n return False\n\n\nsolution()\n\n\n# en kolay cozum bu bakıldıgında.\n# liste icersindeki eleman sayısı ile hashset icersindeki eleman sayısı birbirine esit degilse\n# tekrarlanan eleman yoktur denebilir. Cunku hashset tekrarlana elemanlardan sadece birini alacaktır.\ndef solution2():\n return len(myList) != len(set(myList))\n\n\nsolution2()\n\n# Müllkatçıın bekledigi.\n# Bir sorunun cozumunu bulmak ve bu cozumlerden hangisini sececegimizi\n# time ve space complexity ye gore degerlendirmek ve acıklaya bilmek bizden beklenendir.\n\n\n\n","repo_name":"BaranDgn/DataStructureStudies","sub_path":"InterviewQuestions/ArrayAndListInterviewQ/containsDuplication.py","file_name":"containsDuplication.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17085749606","text":"'''\nMongoDB stores data in JSON-like documents, which makes the database very \nflexible and scalable.\nTo be able to experiment with the code examples in this tutorial, \nyou will need access to a MongoDB database.\nYou can download a free MongoDB database at https://www.mongodb.com.\nOr get started right away with a MongoDB cloud service at https://www.mongodb.com/cloud/atlas.\n\ninstallation:\npip install pymongo\n\nyou will also need a mongodb drive to access the database.\n\ni'm using mongodb Compass\n\n'''\nimport pymongo\n\n\ndef create_database(db_name: str, connection: pymongo.MongoClient) -> str:\n # notice that: a database is not created until it gets content\n mydb = connection[db_name]\n print(connection.list_database_names())\n print(f'{db_name} was succesfully created!')\n return mydb\n\n\ndef create_collection(db_name, collection_name):\n mycol = db_name[collection_name]\n return mycol\n\n\ndef insert_data(data, collection):\n if type(data) == dict:\n x = collection.insert_one(data)\n print(f'Data {x.inserted_id} inserted succesfully!') \n elif type(data) == list:\n x = collection.insert_many(data)\n print('Data inserted succesfully!')\n print(x.inserted_ids) \n else:\n print('Invalid type. Please send a list or a dict to insert data.')\n\n\nif __name__ == '__main__':\n # create mongodb connection\n myclient = pymongo.MongoClient(\"mongodb://localhost:27017/\")\n\n # instantiating a database and its collection\n mydb = myclient['test']\n mycol = mydb['test']\n\n # inserting one data\n # data = {'name': 'Quedma'}\n # insert_data(data, mycol)\n\n # to insert multiple data you need to pass a list of dicts\n # list_data = [{'name': 'Joseph'}, {'name': 'Marlos'}, {'name': 'Kim'}]\n # insert_data(list_data, mycol)\n\n # find one\n x = mycol.find_one()\n print(x)\n\n # find all - it retunrs an iterable\n x = mycol.find()\n for data in x:\n print(data)\n\n # filtering return\n for data in mycol.find(), {'name': 'Quedma'}:\n print(data)\n\n # quering data\n myquery = {'name': 'Quedma'}\n # returns a generator\n mydoc = mycol.find(myquery)\n\n for data in mydoc:\n print(data)\n\n # advanced query: using modifiers\n # $gt = greater than\n # $sort, $position, $slice, $push, $each, $addToSet\n myquery2 = {'name': {'$gt': 'Q'}}\n mydoc = mycol.find(myquery2)\n print('='* 60)\n for x in mydoc:\n print(x)\n\n # filtering with regular expressions\n myquery3 = {'name': {'$regex': '^[Kk]'}}\n mydoc = mycol.find(myquery3)\n print('*' * 60)\n for x in mydoc:\n print(x)\n","repo_name":"eliasantoniorodrigues1/mongodb_university_python","sub_path":"get_started_mongodb.py","file_name":"get_started_mongodb.py","file_ext":"py","file_size_in_byte":2641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5949894734","text":"\"\"\"\nParses streaming tweets\n\"\"\"\n\nimport os\nfrom datetime import datetime, date, time\nfrom threading import Thread\nimport tweepy\nfrom dotenv import load_dotenv, find_dotenv\nfrom classifier import TweetClassifier\nfrom datastore import DataAPI\nimport logger\n\n# load_dotenv(dotenv_path=os.path.join(os.getcwd(), '.env'))\nload_dotenv(find_dotenv())\n\n_logger = logger.get_logger()\n\nclass TweetStreamListener(tweepy.StreamListener):\n \"\"\"\n Listener for tweets\n \"\"\"\n\n def __init__(self, db_interface):\n super(TweetStreamListener, self).__init__()\n self.db_interface = db_interface\n if not self.db_interface.connected:\n self.db_interface.connect()\n\n def on_status(self, status):\n sentiment_score = TweetClassifier.get_tweet_sentiment(status.text)\n # _logger.debug((\"%1.3f \" + status.text), sentiment_score)\n _logger.debug(\"Tweet found, saving sentiment\")\n\n self.db_interface.store_sentiment({\n # 'id': status.id,\n 'date': datetime.combine(\n date.fromtimestamp(int(status.timestamp_ms) / 1000),\n time()\n ),\n 'text': status.text,\n 'sentiment': sentiment_score\n })\n\nclass TweetAPI(object):\n \"\"\"\n Auth for Twitter\n \"\"\"\n\n def __init__(self):\n '''\n Class constructor or initialization method.\n '''\n # keys and tokens from the Twitter Dev Console\n consumer_key = os.getenv('SST_CONSUMER_KEY')\n consumer_secret = os.getenv('SST_CONSUMER_SECRET')\n access_token = os.getenv('SST_ACCESS_TOKEN')\n access_token_secret = os.getenv('SST_ACCESS_TOKEN_SECRET')\n\n # attempt authentication\n try:\n # create OAuthHandler object\n self.auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n # set access token and secret\n self.auth.set_access_token(access_token, access_token_secret)\n # create tweepy API object to fetch tweets\n self.api = tweepy.API(self.auth)\n except Exception as ex:\n _logger.error(\"Error: Authentication Failed...\" + str(ex))\n\nclass TweetStream(object):\n \"\"\"\n Wrapper for threaded streaming\n \"\"\"\n\n def __init__(self):\n self.running = False\n self.thread = None\n self.stream = None\n\n def _start(self):\n api = TweetAPI()\n\n db_interface = DataAPI()\n db_interface.connect()\n tickers = map(lambda a: '#' + a, db_interface.get_tickers_array())\n\n stream_listener = TweetStreamListener(db_interface=db_interface)\n self.stream = tweepy.Stream(auth=api.auth, listener=stream_listener)\n self.stream.filter(languages=['en'], track=tickers)\n\n def start(self):\n \"\"\"\n Begin streaming\n \"\"\"\n self.thread = Thread(target=self._start)\n self.thread.daemon = True\n self.running = True\n self.thread.start()\n\n def stop(self):\n \"\"\"\n End streaming\n \"\"\"\n self.running = False\n self.stream.disconnect()\n # self.thread.join()\n","repo_name":"sanjitdutta/sentiment-stock-trader","sub_path":"monitor/tweetstream.py","file_name":"tweetstream.py","file_ext":"py","file_size_in_byte":3104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8058687951","text":"\"\"\"OmniglotOneShotDataset class.\"\"\"\n\nimport os\nimport copy\n\nfrom PIL import Image\n\nimport torch\nfrom torch.utils.data import Dataset\n\nimport torchvision\nfrom torchvision.datasets.utils import download_and_extract_archive, check_integrity\n\nimport numpy as np\n\nimport imageio\nfrom scipy import ndimage\n\n\n\nclass OmniglotTransformation:\n \"\"\"Transform Omniglot digits by resizing, centring mass and inverting background/foreground.\"\"\"\n\n def __init__(self, centre=True, invert=True, resize_factor=1.0):\n self.centre = centre\n self.invert = invert\n self.resize_factor = resize_factor\n\n def __call__(self, x):\n # Resize\n if self.resize_factor != 1.0:\n height = int(self.resize_factor * x.shape[1])\n width = int(self.resize_factor * x.shape[2])\n\n x = torchvision.transforms.ToPILImage()(x)\n x = torchvision.transforms.functional.resize(x, size=[height, width])\n\n x = torchvision.transforms.functional.to_tensor(x)\n\n # Invert image\n if self.invert:\n x = torch.max(x) - x\n\n # Centre the image\n if self.centre:\n # NCHW => NHWC\n x = x.permute(1, 2, 0)\n\n # Compute centre\n centre = np.array([int(x.shape[0]) * 0.5, int(x.shape[1]) * 0.5])\n\n # Compute centre of mass\n centre_of_mass = ndimage.measurements.center_of_mass(x.numpy())\n centre_of_mass = np.array(centre_of_mass[:-1])\n\n # Compute translation\n translation = (centre - centre_of_mass).tolist()\n translation.reverse()\n\n # Apply transformation\n # NHWC => NCHW\n x = x.permute(2, 0, 1)\n x = torchvision.transforms.ToPILImage()(x)\n x = torchvision.transforms.functional.affine(x, 0, translation, scale=1.0, shear=0, resample=Image.BILINEAR)\n\n # Convert back to tensor\n x = torchvision.transforms.functional.to_tensor(x)\n\n return x\n\n\nclass OmniglotOneShotDataset(Dataset):\n \"\"\"Face Landmarks dataset.\"\"\"\n\n num_runs = 20\n fname_label = 'class_labels.txt'\n\n folder = 'omniglot_oneshot'\n download_url_prefix = 'https://github.com/brendenlake/omniglot/raw/master/python/one-shot-classification'\n zips_md5 = {\n 'all_runs': 'e8996daecdf12afeeb4a53a179f06b19'\n }\n\n def __init__(self, root, train=True, transform=None, target_transform=None, download=False):\n \"\"\"\n Args:\n csv_file (string): Path to the csv file with annotations.\n root_dir (string): Directory with all the images.\n transform (callable, optional): Optional transform to be applied\n on a sample.\n \"\"\"\n self.root = root\n self.train = train\n self.transform = transform\n self.target_transform = target_transform\n\n self.root = os.path.join(root, self.folder)\n self.target_folder = self._get_target_folder()\n self.phase_folder = self._get_phase_folder()\n\n if download:\n self.download()\n\n self.filenames, self.labels = self.get_filenames_and_labels()\n\n if not self._check_integrity():\n raise RuntimeError('Dataset not found or corrupted.' +\n ' You can use download=True to download it')\n\n def __len__(self):\n return len(self.filenames)\n\n def __getitem__(self, idx):\n if torch.is_tensor(idx):\n idx = idx.tolist()\n\n label = self.labels[idx]\n\n image_path = self.filenames[idx]\n image = imageio.imread(image_path)\n\n # Convert to float values in [0, 1\n image = image.astype(np.float32)\n image = (image - image.min()) / (image.max() - image.min())\n\n if self.transform:\n image = self.transform(image)\n\n if self.target_transform:\n label = self.target_transform(label)\n\n return image, label\n\n def _check_integrity(self):\n zip_filename = self._get_target_folder()\n if not check_integrity(os.path.join(self.root, zip_filename + '.zip'), self.zips_md5[zip_filename]):\n return False\n return True\n\n def download(self):\n if self._check_integrity():\n print('Files already downloaded and verified')\n return\n\n filename = self._get_target_folder()\n zip_filename = filename + '.zip'\n url = self.download_url_prefix + '/' + zip_filename\n download_and_extract_archive(url, self.root,\n extract_root=os.path.join(self.root, filename),\n filename=zip_filename, md5=self.zips_md5[filename])\n\n def get_filenames_and_labels(self):\n filenames = []\n labels = []\n\n for r in range(1, self.num_runs + 1):\n rs = str(r)\n if len(rs) == 1:\n rs = '0' + rs\n\n run_folder = 'run' + rs\n target_path = os.path.join(self.root, self.target_folder)\n # run_path = os.path.join(target_path, run_folder)\n\n with open(os.path.join(target_path, run_folder, self.fname_label)) as f:\n content = f.read().splitlines()\n pairs = [line.split() for line in content]\n\n test_files = [pair[0] for pair in pairs]\n train_files = [pair[1] for pair in pairs]\n\n train_labels = list(range(self.num_runs))\n test_labels = copy.copy(train_labels) # same labels as train, because we'll read them in this order\n\n test_files = [os.path.join(target_path, file) for file in test_files]\n train_files = [os.path.join(target_path, file) for file in train_files]\n\n if self.train:\n filenames.extend(train_files)\n labels.extend(train_labels)\n else:\n filenames.extend(test_files)\n labels.extend(test_labels)\n\n return filenames, labels\n\n def _get_target_folder(self):\n return 'all_runs'\n\n def _get_phase_folder(self):\n return 'training' if self.train else 'test'\n\n","repo_name":"Cerenaut/pt-aha","sub_path":"lake/omniglot_one_shot_dataset.py","file_name":"omniglot_one_shot_dataset.py","file_ext":"py","file_size_in_byte":5533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5143373638","text":"import logging\r\nfrom ortools.sat.python import cp_model\r\nimport pandas\r\ndata = pandas.read_csv(\"constraints.csv\")\r\n\r\ndays = [\"Mon\", \"Tue\", \"Wed\", \"Thur\", \"Fri\", \"Sat\", \"Sun\"]\r\ntimes = [\"am\", \"pm\"]\r\nnum_main_workers = 9\r\nnum_standby_workers = 3\r\n\r\n# how much worse main shifts are than standby shifts. Must be an integer.\r\nmain_shift_weighting = 3\r\n# how much worse main shifts are than standby shifts. Must be an integer.\r\nstandby_shift_weighting = 2\r\nbest_weighting = -1\r\n\r\n\r\nmax_time = 60 # max time to look for solutions in seconds\r\ndebug_info = True # Set to True and the rota will record Yes/Maybe requests\r\n\r\ndays_index = range(len(days))\r\npeople_index = list(data['First'].index)\r\ntimes_index = range(len(times))\r\nmodel = cp_model.CpModel()\r\nmain_shifts = {}\r\nstandby_shifts = {}\r\n\r\ntotal_num_shift_costs = len(days_index)*len(times_index) * (num_main_workers*main_shift_weighting + num_standby_workers*standby_shift_weighting)\r\ntotal_people = len(people_index)\r\naverage_num_shift_costs= int(total_num_shift_costs/total_people)\r\n\r\n\r\ntotal_num_main_shift_costs = len(days_index)*len(times_index) * (num_main_workers*main_shift_weighting )\r\n\r\naverage_num_main_shift_costs= int(total_num_main_shift_costs/total_people)\r\n\r\ndef get_variance_component(to_sum, average):\r\n a = model.NewIntVar(-250, 250, '')\r\n model.Add(a ==sum(to_sum) - average)\r\n\r\n\r\n \r\n\r\n e = model.NewIntVar(0, 250, '')\r\n model.AddAbsEquality(e,a)\r\n\r\n\r\n square_x = model.NewIntVar(0, 250, \"\")\r\n model.AddProdEquality(square_x, [e, e])\r\n return square_x\r\n# Here we create True/False for every person,day,shift possibility for main and standby shifts:\r\nfor p in people_index:\r\n for d in days_index:\r\n for t in times_index:\r\n main_shifts[(p, d,\r\n t)] = model.NewBoolVar('shift_n%id%is%i' % (p, d, t))\r\n standby_shifts[(p, d,\r\n t)] = model.NewBoolVar('standby_shift_n%id%is%i' % (p, d, t))\r\n\r\n# Here we state that there must be X main workers and Y standby workers per shift\r\nfor d in days_index:\r\n for t in times_index:\r\n model.Add(sum(main_shifts[(p, d, t)]\r\n for p in people_index) == num_main_workers)\r\n model.Add(sum(standby_shifts[(p, d, t)]\r\n for p in people_index) == num_standby_workers)\r\n\r\n# Here we state that one person can be assigned no more than one shift per day\r\nfor p in people_index:\r\n for d in days_index:\r\n model.Add(\r\n sum(main_shifts[(p, d, t)] + standby_shifts[(p, d, t)] for t in times_index) <= 1)\r\n\r\n# Here we state that people cannot work more days than they have offered to:\r\npeople_num_shifts_list = []\r\npeople_num_main_shifts_list = []\r\nfor p in people_index:\r\n try:\r\n max_shifts = int(data['days'][p])\r\n except ValueError:\r\n # if they have not entered a numeric value assume they are prepared to work five days\r\n max_shifts = 5\r\n to_sum = []\r\n to_sum_main = []\r\n to_sum_raw = []\r\n for d in days_index:\r\n for t in times_index:\r\n to_sum.append(main_shift_weighting *\r\n main_shifts[(p, d, t)] + standby_shift_weighting*standby_shifts[(p, d, t)])\r\n to_sum_main.append(main_shift_weighting *main_shifts[(p, d, t)])\r\n to_sum_raw.append(main_shifts[(p, d, t)] + standby_shifts[(p, d, t)])\r\n # People cannot work more than offered\r\n model.Add(sum(to_sum_raw) <= max_shifts)\r\n\r\n # We define a variable to hold the number of shifts this person is doing so we can then find the maximum of all of these\r\n\r\n\r\n square_x = get_variance_component(to_sum, average_num_shift_costs)\r\n people_num_shifts_list.append(square_x)\r\n\r\n square_x = get_variance_component(to_sum_main, average_num_main_shift_costs)\r\n people_num_main_shifts_list.append(square_x)\r\n\r\n# We force this new variable to be the maximum shifts one person does (with weightings)\r\n\r\n\r\n# Here we work out how much people have to work on days they are only \"Maybe\" willing to work on\r\n# we can then minimise this\r\n\r\nloss_list = []\r\nfor p in people_index:\r\n for d in days_index:\r\n for t in times_index:\r\n entry = data[days[d]+\"_\"+times[t]][p]\r\n try:\r\n entry = entry.lower().strip()\r\n except AttributeError:\r\n entry = \"no\"\r\n logging.warning(f\"'{entry}'' parsed as no.\\n\\n\")\r\n if entry == \"yes\":\r\n pass\r\n elif entry == \"best\":\r\n loss_list.append(main_shifts[(p, d, t)] * main_shift_weighting*best_weighting)\r\n\r\n elif entry == \"maybe\":\r\n # If maybe we count how many times we had to use maybes to try to minimise\r\n loss_list.append(main_shifts[(p, d, t)] * main_shift_weighting)\r\n loss_list.append(standby_shifts[(p, d, t)] * standby_shift_weighting)\r\n else:\r\n # If no we add a hard constraint\r\n model.Add(main_shifts[(p, d, t)] == 0)\r\n model.Add(standby_shifts[(p, d, t)] == 0)\r\n\r\nmodel.Minimize(10*sum(loss_list) + sum(people_num_shifts_list) )\r\n\r\n\r\nsolver = cp_model.CpSolver()\r\nsolver.parameters.max_time_in_seconds = max_time\r\nresult = solver.Solve(model)\r\n\r\nprint(result)\r\n# Statistics.\r\n#print()\r\n#print('Statistics')\r\n#print(' - conflicts : %i' % solver.NumConflicts())\r\n#print(' - branches : %i' % solver.NumBranches())\r\n#print(' - wall time : %f s' % solver.WallTime())\r\nprint(' - objective: %f' % solver.ObjectiveValue() )\r\n\r\n\r\n# The solver has now solved the problem\r\n# The rest of the code is just to print out the outputs\r\n\r\nmain_counts = [0 for x in people_index]\r\nstandby_counts = [0 for x in people_index]\r\nmaybe_counts = [0 for x in people_index]\r\nshift_strings = [ [] for x in people_index]\r\n\r\nmain_output = {}\r\nstandby_output = {}\r\n\r\nfor d in days_index:\r\n for t in times_index:\r\n main_output[(d, t)] = []\r\n standby_output[(d, t)] = []\r\n for p in people_index:\r\n\r\n if solver.Value(main_shifts[(p, d, t)]) == 1:\r\n shift_strings[p].append( days[d]+\" \"+times[t] + \" (main)\")\r\n main_counts[p] = main_counts[p]+1\r\n if debug_info:\r\n main_output[(d, t)].append(data['First'][p] +\r\n \"_\" + data[days[d]+\"_\"+times[t]][p])\r\n else:\r\n main_output[(d, t)].append(data['First'][p])\r\n if data[days[d]+\"_\"+times[t]][p] == \"maybe\":\r\n maybe_counts[p] = maybe_counts[p]+1\r\n\r\n\r\n\r\n\r\n elif solver.Value(standby_shifts[(p, d, t)]) == 1:\r\n shift_strings[p].append(days[d]+\" \"+times[t] + \" (standby)\")\r\n standby_counts[p] = standby_counts[p]+1\r\n if debug_info:\r\n standby_output[(d, t)].append(\r\n data['First'][p] + \"_\" + data[days[d]+\"_\"+times[t]][p])\r\n else:\r\n standby_output[(d, t)].append(data['First'][p])\r\n\r\n if data[days[d]+\"_\"+times[t]][p] == \"maybe\":\r\n maybe_counts[p] = maybe_counts[p]+1\r\n\r\nline = \"\"\r\nfor d in days_index:\r\n for t in times_index:\r\n line = line + days[d]+\"_\"+times[t]+\"\\t\"\r\nprint(line)\r\n\r\nfor r in range(num_main_workers+num_standby_workers):\r\n if r == num_main_workers: # blank line between main and standby\r\n print(\"\".join([\"\\t\" for x in range(d*t)]))\r\n line = \"\"\r\n for d in days_index:\r\n for t in times_index:\r\n if r < num_main_workers:\r\n name = main_output[(d, t)][r]\r\n else:\r\n name = standby_output[(d, t)][r-num_main_workers]\r\n line = line + name + \"\\t\"\r\n print(line)\r\n\r\nprint()\r\nprint()\r\nprint(\"TOTALS\")\r\nprint()\r\nprint(\"Name\\tMain shifts\\tStandby shifts\\tMaybe shifts \\t Scoring \\t Max shifts willing to do\")\r\nfor p in people_index:\r\n try:\r\n max_days = str(int(data['days'][p]))\r\n except:\r\n max_days = \"Inf\"\r\n print(data['First'][p] + \" \" + data['Last'][p] + \"\\t\" +\r\n str(main_counts[p])+\"\\t\" + str(standby_counts[p])+\"\\t\" + str(maybe_counts[p])+\"\\t\" + str(standby_counts[p]*standby_shift_weighting + main_counts[p]*main_shift_weighting) + \"\\t\" + max_days)\r\n\r\n\r\nfor p in people_index:\r\n if main_counts[p]>0 or standby_counts[p]>0:\r\n print(data['email'][p] +\"\\t\"+data['First'][p] +\"\\t\"+ \",\".join(shift_strings[p] )) \r\n","repo_name":"theosanderson/CL3-rota","sub_path":"rota.py","file_name":"rota.py","file_ext":"py","file_size_in_byte":8241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30497070174","text":"import argparse\r\nimport logging\r\nimport time\r\n\r\nimport numpy as np\r\n\r\nimport torch.utils.data\r\n\r\nfrom utils.dataset_processing import evaluation, grasp\r\nfrom utils.data import get_dataset\r\nfrom models.common import post_process_output\r\n\r\nlogging.basicConfig(level=logging.INFO)\r\n\r\n\r\ndef parse_args():\r\n parser = argparse.ArgumentParser(description='Evaluate TCT and FCNN')\r\n\r\n # Network\r\n parser.add_argument('--network', type=str, help='Path to saved network')\r\n \r\n # Dataset & Data & Training\r\n parser.add_argument('--dataset', type=str, help='Dataset Name (\"cornell\" or \"jaquard\")')\r\n parser.add_argument('--dataset-path', type=str, help='Path to dataset')\r\n parser.add_argument('--use-depth', type=int, default=1, help='Use Depth image for evaluation (1/0)')\r\n parser.add_argument('--use-rgb', type=int, default=1, help='Use RGB image for evaluation (0/1)')\r\n parser.add_argument('--augment', action='store_true', help='Whether data augmentation should be applied')\r\n parser.add_argument('--split', type=float, default=0.9, help='Fraction of data for training (remainder is validation)')\r\n parser.add_argument('--ds-rotate', type=float, default=0.0,\r\n help='Shift the start point of the dataset to use a different test/train split')\r\n parser.add_argument('--num-workers', type=int, default=8, help='Dataset workers')\r\n\r\n parser.add_argument('--n-grasps', type=int, default=1, help='Number of grasps to consider per image')\r\n parser.add_argument('--iou-eval', action='store_true', help='Compute success based on IoU metric.')\r\n parser.add_argument('--vis', action='store_true', help='Visualise the network output')\r\n\r\n args = parser.parse_args()\r\n\r\n return args\r\n\r\n# --dataset jacquard --dataset-path datasets\\Jacquard_Dataset --split 0.95 --iou-eval\r\n# --dataset cornell --dataset-path datasets\\Cornell_dataset --iou-eval\r\n\r\nif __name__ == '__main__':\r\n args = parse_args()\r\n\r\n # Load Network\r\n net = torch.load(args.network)\r\n device = torch.device(\"cuda:0\")\r\n\r\n # Load Dataset\r\n logging.info('Loading {} Dataset...'.format(args.dataset.title()))\r\n Dataset = get_dataset(args.dataset)\r\n test_dataset = Dataset(args.dataset_path, ds_rotate=args.ds_rotate,\r\n random_rotate=args.augment, random_zoom=args.augment,\r\n include_depth=args.use_depth, include_rgb=args.use_rgb)\r\n\r\n indices = list(range(test_dataset.length))\r\n split = int(np.floor(args.split * test_dataset.length))\r\n \r\n val_indices = indices[split:]\r\n val_sampler = torch.utils.data.sampler.SubsetRandomSampler(val_indices)\r\n logging.info('Validation size: {}'.format(len(val_indices)))\r\n\r\n test_data = torch.utils.data.DataLoader(\r\n test_dataset,\r\n batch_size=1,\r\n num_workers=args.num_workers,\r\n sampler=val_sampler\r\n )\r\n logging.info('Done')\r\n\r\n results = {'correct': 0, 'failed': 0}\r\n\r\n total_inference_time = 0\r\n\r\n with torch.no_grad():\r\n for idx, (x, y, didx, rot, zoom) in enumerate(test_data):\r\n\r\n logging.info('Processing {}/{}'.format(idx+1, len(test_data)))\r\n\r\n start = time.perf_counter()\r\n\r\n xc = x.to(device)\r\n yc = [yi.to(device) for yi in y]\r\n lossd = net.compute_loss(xc, yc)\r\n\r\n total_inference_time += time.perf_counter() - start\r\n\r\n q_img, ang_img, width_img = post_process_output(lossd['pred']['pos'], lossd['pred']['cos'],\r\n lossd['pred']['sin'], lossd['pred']['width'])\r\n\r\n if args.iou_eval:\r\n s = evaluation.calculate_iou_match(q_img, ang_img, test_data.dataset.get_gtbb(didx, rot, zoom),\r\n no_grasps=args.n_grasps,\r\n grasp_width=width_img,\r\n )\r\n if s:\r\n results['correct'] += 1\r\n else:\r\n results['failed'] += 1\r\n\r\n if args.vis:\r\n evaluation.plot_output(test_data.dataset.get_rgb(didx, rot, zoom, normalise=False),\r\n q_img, \r\n ang_img, \r\n width_img, \r\n no_grasps=5, \r\n )\r\n\r\n avg_inference_time = total_inference_time / len(test_data)\r\n logging.info('Average evaluation time per image: {}ms'.format(avg_inference_time * 1000))\r\n\r\n if args.iou_eval:\r\n logging.info('IOU Results: %d/%d = %f' % (results['correct'],\r\n results['correct'] + results['failed'],\r\n results['correct'] / (results['correct'] + results['failed'])))\r\n\r\n \r\n","repo_name":"UntitledGrub/roboticgrasping","sub_path":"eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":4911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32197491810","text":"from .. import models\nimport sqlalchemy.types\nfrom sqlalchemy.orm.exc import NoResultFound\nfrom sqlalchemy.sql.functions import GenericFunction\n\nfrom contextlib import contextmanager\n\n\ndef get_or_create(model, **kwargs):\n try:\n returned = model.query.filter_by(**kwargs).one()\n except NoResultFound:\n returned = model(**kwargs)\n models.db.session.add(returned)\n models.db.session.flush()\n models.db.session.commit()\n return returned\n\n\n@contextmanager\ndef statement_timeout_context(timeout_seconds=60):\n prev = models.db.session.execute('show statement_timeout').scalar()\n assert prev\n models.db.session.execute('SET statement_timeout={}'.format(timeout_seconds * 1000))\n yield\n models.db.session.execute('SET statement_timeout={}'.format(prev))\n","repo_name":"getslash/backslash","sub_path":"flask_app/utils/db_utils.py","file_name":"db_utils.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"81"} +{"seq_id":"20518213989","text":"# -*- coding: utf-8 -*-\nimport time\nimport logging\n\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\nfrom django.template.response import TemplateResponse\n\nimport socket\nfrom django_redis import get_redis_connection\n\nredis = get_redis_connection('monitor')\nhost = socket.gethostname()\n\nstart_time = time.time()\n\nlogger = logging.getLogger(__name__)\n\n\ndef default_home(request,\n template_name='home.html'):\n\n key = 'test:hits:default_home'\n redis.incr(key)\n\n context = {\n 'hostname': host,\n 'hits': int(redis.get(key)),\n }\n\n logger.info('hits default_home: {hits}'.format(**context))\n logger.error('rollbar test. hits default_home: {hits}'.format(**context))\n\n return TemplateResponse(request, template_name, context)\n\n\n@api_view(['GET'])\ndef heartbeat(request):\n return Response({\n 'startTime': start_time,\n 'upTime': time.time() - start_time,\n 'status': 'running',\n 'mode': 'simple',\n })\n\n\n@api_view(['GET'])\ndef redis_health(request):\n key = 'test:hits:redis_health'\n\n hits = redis.incr(key)\n\n return Response({\n 'startTime': start_time,\n 'upTime': time.time() - start_time,\n 'status': 'running',\n 'mode': 'redis',\n 'hostname': host,\n 'hits': hits,\n })\n","repo_name":"JackonYang/django2-python3-docker-tmpl","sub_path":"heartbeat/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"44073917045","text":"class Solution(object):\n def carPooling(self, trips, capacity):\n \"\"\"\n :type trips: List[List[int]]\n :type capacity: int\n :rtype: bool\n \"\"\"\n \n points = [0] # changes of num of passengers at locations that pick or drop off them\n \n for num, start, end in trips:\n while len(points) <= end:\n points.append(0)\n points[start] += num\n points[end] -= num\n \n \n for i in range(1, len(points)):\n points[i] += points[i - 1]\n \n if points[i] > capacity:\n return False\n \n \n return True\n ","repo_name":"ericcheng09/LeetCodeSolution_Python","sub_path":"Scripts/1094.py","file_name":"1094.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35072733195","text":"\nfrom collections import deque \n\nn, m = map(int, input().split()) \n\narr = [list(map(int, input())) for _ in range(n)] \n\ndx, dy = [-1, 0, 1, 0], [0, -1, 0, 1] \nqueue = deque() \n\ncheck = [[[-1]*2 for _ in range(m)] for _ in range(n)] \n\nqueue.append((0, 0, 0)) \ncheck[0][0][0] = 1 \n\nwhile queue: \n x, y, cnt = queue.popleft() \n\n for i in range(4): \n nx, ny = x+dx[i], y+dy[i] \n if 0 <= nx < n and 0 <= ny < m and check[nx][ny][cnt] == -1: \n if arr[nx][ny] == 0: \n check[nx][ny][cnt] = check[x][y][cnt] + 1 \n queue.append((nx, ny, cnt)) \n else: \n if cnt < 1: \n check[nx][ny][cnt+1] = check[x][y][cnt] + 1 \n queue.append((nx, ny, cnt+1)) \n \nans = -1 \nfor elem in check[n-1][m-1]: \n if elem != -1: \n ans = elem \n elif ans < elem: ans = elem \nprint(ans) \n\n","repo_name":"Woo-Dong/BREAK_coding_test","sub_path":"Level1/Graph_BFS/13_2206.py","file_name":"13_2206.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38641421698","text":"#!/usr/bin/env python3\n\n# Created by: Wajd Mariam\n# Created on: November 2019\n# This program calculates the volume of the Tetrahedron\n\nimport math\n\n\ndef calculate_volume(side):\n # this function calculates volume\n # process\n volume = (side ** 3) / (6 * math.sqrt(2))\n\n # output\n print(\"The volume of the Tetrahedron is {} cm³\".format(round(volume, 2)))\n\n\ndef main():\n # this function calls functions\n # welcome statement\n print(\"This program calculates the Tetrahedron volume\")\n print(\"\")\n\n # try statement\n # input\n try:\n side_from_user = int(input(\"Enter length of Tetrahedron side(cm): \"))\n except Exception:\n print(\"Invalid entry, please try again\")\n else:\n # calling function if side_from_user is valid\n calculate_volume(side_from_user)\n finally:\n print(\"\")\n print(\"Thank you for using my program\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"wajdm/ICS3UR-Assignment-6b-Python","sub_path":"assignment6b.py","file_name":"assignment6b.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"69809700745","text":"'''\nOpenCV Python Tutorial\n\nCovered in this #2 (Image Fundamentals and Manipulation):\n 1. Image Representation\n 2. Values that Represent our Pixels\n 3. Accessing Pixel Values\n 4. Changing Pixel Colors\n 5. Copying & Pasting Parts of Image\n\nPixel in Image:\n blue green red\n [0, 0, 0]\n 0-255\n'''\n\nimport cv2 as cv\nimport random\n\ntry:\n img = cv.imread('../assets/images/Me_2023-07-24_DL.png', cv.IMREAD_COLOR) # numpy.ndarray\n print(f\"img.shape \\n {img.shape}\") # (row, column, channels(D))\n print(f\"img[0] \\n {img[257]}\") # row 257\n print(f\"img[257][45:400] \\n {img[257][45:400]}\")\n \n # random and replace Pixels\n for i in range(100):\n for j in range(img.shape[1]):\n img[i][j] = [random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)]\n \n # copy and replace Pixels\n tag = img[400:700, 600:900]\n img[100:300, 650:950] = tag\n\n cv.imshow('levantrungits', img)\n cv.waitKey(0)\n \nexcept Exception as ex:\n print(f\"Error: {ex}\")\n\ncv.destroyAllWindows()","repo_name":"levantrungits/data-science-cheat-sheets","sub_path":"opencv/opencv_02.py","file_name":"opencv_02.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"2992734698","text":"primeiro_termo = int(input(\"Digite o primeiro termo:\"))\nrazao = int(input(\"Digite a razão: \"))\ncontador_primario = 0\nescolha = 1\ntotal = 0\nescolha = 10\n\nwhile escolha != 0:\n total = total + escolha\n while contador_primario < total:\n termo = primeiro_termo + (contador_primario*razao)\n print(f\"{termo}-\",end=\" \")\n contador_primario += 1\n print(\"PAUSA\")\n escolha = int(input(\"\\nQuantos termos voce quer amostrar a mais?\"))\nprint(f\"Prohrma finalizado com {total} itens escolhidos\")\n","repo_name":"DanielNaiff/Python_Studies","sub_path":"Curso_do_Guanabara/Módulo 2/ex062.py","file_name":"ex062.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27838626480","text":"def plusMinus(arr):\n lst = [0]*3 # pos, neg, zero\n for i in arr:\n if i > 0: lst[0] += 1\n elif i < 0: lst[1] += 1\n elif i == 0: lst[2] += 1\n string = \"\"\n for i in lst: string += \"{:.6f}\".format(i/len(arr)) + \"\\n\"\n print(string)\n return string\n \n \n\nplusMinus([1, 1, 0, -1, -1])\n ","repo_name":"LahiruHW/portfolio","sub_path":"Practice Problems/Hackerrank/plusMinus/plusMinus.py","file_name":"plusMinus.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41441184536","text":"from setuptools import setup, find_packages\n \nclassifiers = [\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Education',\n 'Operating System :: Microsoft :: Windows :: Windows 10',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3'\n]\n \nsetup(\n name='cpf',\n version='2.1',\n description='Gera e checa CPFs de acordo com o padrão brasileiro.',\n long_description=open('README.txt').read() + '\\n\\n' + open('CHANGELOG.txt').read(),\n url='', \n author='pedrokp',\n author_email='pedrokp@protonmail.com',\n license='MIT', \n classifiers=classifiers,\n keywords='cpf', \n packages=find_packages(),\n install_requires=[''] \n)","repo_name":"pedrokpp/gerador-e-checker-de-cpf","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"81"} +{"seq_id":"69801111624","text":"\"\"\"\r\n用两个栈来实现一个队列,完成队列的Push和Pop操作。 队列中的元素为int类型。\r\nhttps://www.nowcoder.com/practice/54275ddae22f475981afa2244dd448c6?tpId=13&tqId=11158&rp=2&ru=%2Factivity%2Foj&qru=%2Fta%2Fcoding-interviews%2Fquestion-ranking\r\n\"\"\"\r\n\r\n\r\nclass Solution:\r\n def __init__(self):\r\n self.stackA = []#push\r\n self.stackB = []#pop\r\n def push(self, node):\r\n self.stackA.append(node)\r\n\r\n # write code here\r\n def pop(self):\r\n if self.stackB:\r\n return self.stackB.pop()\r\n elif self.stackA:\r\n while self.stackA:\r\n self.stackB.append(self.stackA.pop())\r\n return self.stackB.pop()\r\n else:\r\n return None\r\n\r\n","repo_name":"ciecus/leetcode_answers","sub_path":"others/stack_queue.py","file_name":"stack_queue.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"8555054822","text":"import json\nimport requests\n\napiKey = \"7310dd5bc23c33461ad6d71f286c1f05\"\nrootUrl = \"http://api.openweathermap.org/data/2.5/forecast?\"\n\ncityName = input(\"Enter Your City: \")\n\nurl = f\"{rootUrl}appid={apiKey}&q={cityName}\"\n\n\nallWeather = requests.get(url).json()\n\nfiltWeather = {}\n\ncount = 0\n\nlist = allWeather['list']\n\nfor dict in list:\n weatherList = dict['weather']\n weatherDict = weatherList[0]\n tempValue = weatherDict['description']\n tempKey = dict['dt_txt']\n filtWeather[tempKey] = tempValue\n\n\ndayDict = {}\nfor k, v in filtWeather.items():\n if k[:10] in dayDict.keys():\n dayDict[k[:10]].append([k[-9:], v])\n else:\n dayDict[k[:10]] = [[k[-9:], v]]\n\nfor v in dayDict.values():\n for i in v:\n print(i[-1][-4:])\n if i[-1][-4:] == 'rain':\n goodWeather = 'F'\n elif i[-1][-4:] == 'snow':\n goodWeather = 'F'\n else:\n goodWeather = 'T'\n\n i.append(goodWeather)\n\n\n\n\nprint(dayDict)\n","repo_name":"ankou-k/cisc204_project","sub_path":"model-project-template-main/documents/draft/weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32588698505","text":"from unittest import case\nfrom pathlib import Path\nfrom json import dumps, load\nimport shutil\nimport tempfile\nimport os\n\nfrom mdclasses.builder import (create_configuration, read_configuration_objects, read_configuration,\n save_to_json, read_from_json)\nfrom mdclasses import ObjectType, ConfObject, Configuration, Module\nfrom mdclasses.parser import SupportConfigurationParser\n\ntest_data_root = Path(Path(__file__).parent).joinpath('test_data', 'config')\njson_report_path = Path(Path(__file__).parent).joinpath('test_data', 'json_data', 'report.json')\njson_config_path = Path(Path(__file__).parent).joinpath('test_data', 'json_data', 'configuration_not_full.json')\n\nencoding = 'utf-8'\n\n\nclass TestConfiguration(case.TestCase):\n\n def test_read_configuration(self):\n\n conf_path = Path(test_data_root).absolute()\n\n conf = create_configuration(conf_path)\n\n self.assertEqual(conf.name, 'Конфигурация', 'Не верно определено имя конфигурации.')\n self.assertEqual(conf.uuid, '04e5fb66-c0ac-4f0b-8a97-f8f51ce50450', 'Не верно определен uuid конфигурации')\n self.assertEqual(\n conf.root_path,\n conf_path,\n 'Не верно установлен корень конфигурации'\n )\n\n self.assertEqual(len(conf.conf_objects), 46, 'Не корректно загружен состав объектов.')\n\n report = conf.get_object('Отчет1', ObjectType.REPORT)\n\n self.assertTupleEqual(\n (report.name, report.obj_type),\n ('Отчет1', ObjectType.REPORT),\n 'Не корректно найден объект.'\n )\n\n with self.assertRaises(IndexError, msg='Объекта \"Отчет112\" нет в конфигурации должно ожидали ошибку') as _:\n conf.get_object('Отчет11', ObjectType.REPORT)\n\n self.assertEqual(\n dumps(conf.to_dict(), ensure_ascii=False),\n Path(json_config_path).read_text(encoding=encoding),\n 'Конфигурация не верно сериализована'\n )\n\n def test_read_configuration_object(self):\n conf_path = Path(test_data_root).absolute()\n\n conf = create_configuration(conf_path)\n read_configuration_objects(conf)\n\n report = conf.get_object('Отчет1', ObjectType.REPORT)\n\n self.assertEqual(\n report.name,\n 'Отчет1',\n 'Объект имеет неожиданное имя.')\n\n self.assertEqual(\n report.obj_type,\n ObjectType.REPORT,\n 'Объект имеет неожиданный тип.')\n\n self.assertEqual(\n report.line_number,\n 15,\n 'Не верно определена строка в файле')\n\n self.assertEqual(\n report.file_name,\n conf_path.joinpath(Path('Reports/Отчет1.xml')),\n 'Не верно определена строка в файле')\n\n self.assertEqual(report.full_name, 'Report.Отчет1', 'Не верно определено имя')\n\n self.assertEqual(\n dumps(report.to_dict(), ensure_ascii=False),\n Path(json_report_path).read_text(encoding=encoding),\n 'Объект не верно сериализован')\n\n def test_read_props(self):\n pass\n\n def test_read_subsystem(self):\n conf_path = Path(test_data_root).absolute()\n conf = create_configuration(conf_path)\n read_configuration_objects(conf)\n\n sub_2 = conf.get_object('Подсистема2', ObjectType.SUBSYSTEM)\n\n self.assertEqual(sub_2.name, 'Подсистема2', 'Вложенная подсистема не прочитанна')\n\n for obj in conf.conf_objects:\n if obj.obj_type != ObjectType.SUBSYSTEM:\n continue\n ch = obj.childes\n objects = 1\n if obj.name == 'Подсистема1':\n objects = 44\n elif obj.name == 'Подсистема3':\n objects = 3\n\n self.assertEqual(len(ch), objects, f'Не все объекты подсистемы {obj} прочитанны')\n\n def test_read_obj_module(self):\n conf_path = Path(test_data_root).absolute()\n\n conf = create_configuration(conf_path)\n read_configuration_objects(conf)\n\n doc = conf.get_object('Документ1', ObjectType.DOCUMENT)\n doc.read_modules()\n\n self.assertEqual(len(doc.modules), 2, 'не все модули были прочитаны.')\n\n def test_read_forms(self):\n conf_path = Path(test_data_root).absolute()\n\n conf = create_configuration(conf_path)\n read_configuration_objects(conf)\n\n doc = conf.get_object('Документ1', ObjectType.DOCUMENT)\n doc.read_forms()\n self.assertEqual(len(doc.forms), 1, 'не все формы были прочитаны.')\n\n self.assertIsInstance(doc.forms[0].module, Module, 'Модуль не прочитан')\n\n def read_empty_form(self):\n conf_path = Path(test_data_root).absolute()\n conf = read_configuration(conf_path)\n\n filter = conf.get_object('HTTPСервис1', ObjectType.HTTP_SERVICE)\n filter.read_forms()\n\n def test_ext_path(self):\n conf_path = Path(test_data_root).absolute()\n conf = read_configuration(conf_path)\n\n obj = conf.get_object('Форма', ObjectType.COMMON_FORM)\n self.assertEqual(conf_path.joinpath('CommonForms', 'Форма', 'Ext'), obj.ext_path,\n 'Не верно определен путь ext для COMMON_FORM')\n\n obj = conf.get_object('Перечисление1', ObjectType.ENUM)\n self.assertEqual(conf_path.joinpath('Enums', 'Перечисление1', 'Ext'), obj.ext_path,\n 'Не верно определен путь ext для Enums')\n\n def test_form_path(self):\n conf_path = Path(test_data_root).absolute()\n conf = read_configuration(conf_path)\n\n obj = conf.get_object('Форма', ObjectType.COMMON_FORM)\n self.assertEqual(conf_path.joinpath('CommonForms', 'Форма'), obj.form_path,\n 'Не верно определен путь ext для COMMON_FORM')\n\n obj = conf.get_object('Документ1', ObjectType.DOCUMENT)\n self.assertEqual(conf_path.joinpath('Documents', 'Документ1', 'Forms'), obj.form_path,\n 'Не верно определен путь ext для Enums')\n\n def test_read_empty_module(self):\n conf_path = Path(test_data_root).absolute()\n conf = read_configuration(conf_path)\n\n filter = conf.get_object('КритерийОтбора1', ObjectType.FILTER_CRITERION)\n filter.read_modules()\n\n def test_read_all_configuration(self):\n conf_path = Path(test_data_root).absolute()\n read_configuration(conf_path)\n\n def test_read_support(self):\n support_path = test_data_root.parent.joinpath('support', 'ParentConfigurations.bin')\n support_parser = SupportConfigurationParser(support_path)\n data = support_parser.parse()\n\n self.assertIn('Конфигурация', data.keys(), 'Не обнаруженно описание конфигурации')\n conf_data = data['Конфигурация']\n\n self.assertIn('conf_version', conf_data.keys(), 'Не обнаруженно описание версии конфигурации')\n self.assertIn('conf_provider', conf_data.keys(), 'Не обнаруженно описание поставщика конфигурации')\n self.assertIn('conf_objects', conf_data.keys(), 'Не обнаруженно описание объектов')\n\n self.assertEqual(conf_data['conf_version'], '\"0.0.1\"', 'Не верно определена версия')\n self.assertEqual(conf_data['conf_provider'], '\"Прогтехника\"', 'Не верно определен поставщик')\n\n obj_data = conf_data['conf_objects']\n\n self.assertIn('1996326a-5156-4eb1-a9fe-5db6ab532426', obj_data.keys(), 'Не обнаруженно описание объекта')\n self.assertEqual(1, obj_data['1996326a-5156-4eb1-a9fe-5db6ab532426'], 'не верно определено свойство')\n\n self.assertEqual(len(obj_data), 11, 'Количество прочитанных объектов, определено не верно.')\n\n def test_from_json(self):\n config = read_from_json(json_config_path)\n\n def test_save_to_json(self):\n config = read_from_json(json_config_path)\n json_path = json_report_path.parent.joinpath('test')\n save_to_json(config, json_path)\n os.remove(json_path)\n\n\nclass TestChangeConfiguration(case.TestCase):\n\n def setUp(self) -> None:\n self.temp_path = test_data_root.parent.joinpath('temp').resolve().absolute()\n self.temp_config_path = self.temp_path.joinpath('config').resolve().absolute()\n self.base_config_path = self.temp_path.joinpath('config_base').resolve().absolute()\n if not self.temp_path.exists():\n self.temp_path.mkdir()\n shutil.copytree(test_data_root, self.temp_config_path)\n shutil.copytree(test_data_root, self.base_config_path)\n\n def test_save_configuration(self):\n conf = read_configuration(self.temp_config_path)\n\n conf.conf_objects.append(ConfObject('test', ObjectType.REPORT, conf))\n conf.conf_objects.append(ConfObject('test1', ObjectType.DOCUMENT, conf))\n\n tmp_file = tempfile.mktemp()\n shutil.copy(Path(conf.file_name), Path(tmp_file))\n\n conf.save_to_file()\n self.assertIn('test', Path(conf.file_name).read_text(encoding='utf-8'))\n self.assertIn('test1', Path(conf.file_name).read_text(encoding='utf-8'))\n os.remove(Path(conf.file_name))\n shutil.copy(Path(tmp_file), Path(conf.file_name))\n os.remove(Path(tmp_file))\n\n def test_clone_obj(self):\n conf_base = read_configuration(self.base_config_path)\n filter = conf_base.get_object('КритерийОтбора1', ObjectType.FILTER_CRITERION)\n\n conf = read_configuration(self.temp_config_path)\n with self.assertRaises(ValueError) as ex:\n conf.add_object(filter)\n self.assertEqual(ex.exception.args[0], f'Объект {filter} уже есть в конфигурации')\n\n dir_path = filter.obj_dir\n file_name = filter.file_name\n filter.name = 'КритерийОтбора2'\n\n shutil.copytree(dir_path, filter.obj_dir)\n shutil.copy(file_name, filter.file_name)\n\n new_object = conf.add_object(filter)\n\n self.assertTrue(new_object.obj_dir.exists(), 'Данные объекта не перенесены')\n\n def tearDown(self) -> None:\n shutil.rmtree(self.temp_path)\n","repo_name":"AlexanderNiMo/mdclasses_1c","sub_path":"mdclasses/tests/tests_configuration.py","file_name":"tests_configuration.py","file_ext":"py","file_size_in_byte":11171,"program_lang":"python","lang":"ru","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"5942712518","text":"from collections import defaultdict\nfrom itertools import islice, chain\n\nwith open(\"input.txt\") as fin:\n bingo_numbers = [int(x.strip()) for x in next(fin).split(',')]\n boards = defaultdict(list)\n for i, board_string in enumerate(fin.read().split('\\n\\n')):\n # We'll add the boards as a single list and math them into individual sets.\n board = [int(x.strip()) for x in board_string.split()]\n # https://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks\n boards[i].extend([set(board[j : j + 5]) for j in range(0, len(board), 5)]) # Rows\n boards[i].extend([set(islice(board, j, None, 5)) for j in range(5)]) # Columns\n\nremaining_boards = set(range(len(boards)))\n\ndef check_boards(sublist):\n subset = set(sublist)\n\n for board_num, board_sets in boards.items():\n if board_num not in remaining_boards:\n continue\n if any(board_set < subset for board_set in board_sets):\n # Check if the current winner is the last board before removing it from the set\n if len(remaining_boards) == 1:\n unchecked = set()\n last_board_num = remaining_boards.pop()\n for board_set in boards[last_board_num]:\n unchecked |= {x for x in board_set - subset}\n score = sum(unchecked) * sublist[-1]\n print(last_board_num, sum(unchecked), sublist[-1], score)\n quit()\n\n remaining_boards.discard(board_num)\n\nfor i in range(4, len(bingo_numbers)):\n sublist = bingo_numbers[:i]\n result = check_boards(sublist)","repo_name":"noddycode/AdventOfCode2021","sub_path":"Day04/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"7855071267","text":"import re\n\nfrom gearbox.command import TemplateCommand\n\n\nclass CreateCommand(TemplateCommand):\n CLEAN_PACKAGE_NAME_RE = re.compile('[^a-zA-Z0-9_]')\n\n def get_description(self):\n return 'Creates a basic one-app Uiro project'\n\n def get_parser(self, prog_name):\n parser = super(CreateCommand, self).get_parser(prog_name)\n\n parser.add_argument('-n', '--name', dest='package',\n metavar='NAME', required=True,\n help=\"Package Name\")\n\n parser.add_argument('-o', '--output-dir', dest='output_dir',\n metavar='OUTPUT_DIR',\n help=\"Destination directory (by default the package name)\")\n\n return parser\n\n def take_action(self, opts):\n if opts.output_dir is None:\n opts.output_dir = opts.package\n\n self.run_template(opts.output_dir, opts)\n","repo_name":"hirokiky/uiro","sub_path":"uiro/commands/create/command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"75151920585","text":"# Selection sort moves through an array several times, each time picking the\n# smallest of the array, sorting from left to right in this case\n\n# O(n^2) Time where n is the length of our input array\n# O(1) Space or O(n) Space depending on if you are modifying the input array\ndef selectionSort(array):\n\tfor i in range(len(array) - 1):\n\t\ts = i\n\t\tsmallest = array[i]\n\t\tfor j in range(i + 1, len(array)):\n\t\t\tif array[j] < smallest:\n\t\t\t\ts = j\n\t\t\t\tsmallest = array[j]\n\t\tarray[s], array[i] = array[i], array[s]\n\treturn array","repo_name":"jacobpmeyer/leetcode","sub_path":"python/selection_sort.py","file_name":"selection_sort.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14257852309","text":"from bs4 import BeautifulSoup\nfrom urllib.request import urlopen\n\ndef puslapis(addrLink):\n\thtml \t= urlopen(addrLink)\n\tbsObj \t= BeautifulSoup(html.read(),\"lxml\")\n\tfor skelbimas in bsObj.find_all('div',{\"class\":\"desc_m_a_b\"}):\n\t\tlink \t\t= skelbimas.find('a',href=True)\n\t\t#print (\"nuoroda:\\t\",link[\"href\"],\"\\n\")\n\t\tadresas \t= link.text.strip()\n\t\ttry:\n\t\t\tmiestas\t= adresas.split()[3][:-1]\n\t\texcept:\n\t\t\tmiestas\t= \"nera\"\n\t\ttry:\n\t\t\trajonas\t= adresas.split()[4]\n\t\texcept:\n\t\t\trajonas\t= \"nera\"\n\t\tif rajonas.endswith(','):\n\t\t\trajonas\t= rajonas[:-1]\n\t\t\tgatve\t= str(adresas.split()[5:-1]).strip(\"[']\")\n\t\t\tgatve\t= gatve.replace(',','')\n\t\t\tgatve\t= gatve.replace(\"'\",\"\")\n\t\telse:\n\t\t\tgatve\t= \"nera\"\n\n\t\tprice \t\t= skelbimas.find(\"span\",{\"class\":\"main_price\"})\n\t\tdelploto \t= skelbimas.find(\"div\", {\"class\":\"description\"}).text\n\t\ttry:\n\t\t\tplotas\t= int(delploto.split()[10])\n\t\texcept:\n\t\t\tplotas \t= \"nera\"\n\n\t\tdata \t\t= skelbimas.find(\"time\")\n\t\tprint (miestas,\"\\t\",rajonas,\"\\t\",gatve,\"\\t\",price.text.strip(),\"\\t\",plotas,\"\\t\",str(data[\"datetime\"])[:-15],\"\\t\",str(link[\"href\"])[-15:-5],\"\\t\",link[\"href\"])\n\t\t#print (\"adresas:\\t\",adresas,\"\\n\") out\n\t\t#print (\"miestas:\\t\",miestas,\"\\n\")\n\t\t#print (\"rajonas:\\t\",rajonas,\"\\n\")\n\t\t#print (\"gatve:\\t\\t\",gatve,\"\\n\")\n\t\t#print (\"kaina:\\t\\t\",price.text.strip(),\"\\n\")\n\t\t#print (\"plotas:\\t\\t\",plotas,\"m2\\n\")\n\t\t#print (\"ikelimo data:\\t\",str(data[\"datetime\"])[:-15],\"\\n\")\n\t\t#print (\"UID:\\t\\t\",str(link[\"href\"])[-15:-5],\"\\n\")\n\t\t#print (\"-\"*200)\n\n\ttry:\n\t\tnextPg\t= bsObj.find(\"a\",{\"rel\":\"next\"})\n\t\tnextPg\t= nextPg[\"href\"]\n\texcept TypeError:\n\t\tnextPg\t= \"paskutinis\"\n\treturn nextPg\n\npradzia = \"http://www.alio.lt/nekilnojamas-turtas/butai/nuomoja.html\"\nwhile pradzia != \"paskutinis\":\n\t#print (pradzia)\n\tpradzia = puslapis(pradzia)\n","repo_name":"ElijasZ/scrp","sub_path":"all_alio.py","file_name":"all_alio.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42142373301","text":"from __future__ import unicode_literals\nimport time\nimport unittest\nfrom asgi_redis import RedisChannelLayer\nfrom asgiref.conformance import ConformanceTestCase\n\n\n\n# Default conformance tests\nclass RedisLayerTests(ConformanceTestCase):\n\n channel_layer = RedisChannelLayer(expiry=1, group_expiry=2, capacity=5)\n expiry_delay = 1.1\n capacity_limit = 5\n\n # The functionality this test is for is not yet present (it's not required,\n # and will slow stuff down, so will be optional), but it's here for future reference.\n @unittest.expectedFailure\n def test_group_message_eviction(self):\n \"\"\"\n Tests that when messages expire, group expiry also occurs.\n \"\"\"\n # Add things to a group and send a message that should expire\n self.channel_layer.group_add(\"tgme_group\", \"tgme_test\")\n self.channel_layer.send_group(\"tgme_group\", {\"value\": \"blue\"})\n # Wait message expiry plus a tiny bit (must sum to less than group expiry)\n time.sleep(1.2)\n # Send new message to group, ensure message never arrives\n self.channel_layer.send_group(\"tgme_group\", {\"value\": \"blue\"})\n channel, message = self.channel_layer.receive_many([\"tgme_test\"])\n self.assertIs(channel, None)\n self.assertIs(message, None)\n\n\n# Encrypted variant of conformance tests\nclass EncryptedRedisLayerTests(ConformanceTestCase):\n\n channel_layer = RedisChannelLayer(\n expiry=1,\n group_expiry=2,\n capacity=5,\n symmetric_encryption_keys=[\"test\", \"old\"],\n )\n expiry_delay = 1.1\n capacity_limit = 5\n\n\n# Twisted tests\ntry:\n from twisted.internet import defer, reactor\n import twisted.trial.unittest\n import txredisapi\n class TwistedTests(twisted.trial.unittest.TestCase):\n\n def setUp(self):\n super(TwistedTests, self).setUp()\n self.channel_layer = RedisChannelLayer(expiry=1, group_expiry=2, capacity=5)\n\n @defer.inlineCallbacks\n def test_receive_many_twisted(self):\n self.channel_layer.send(\"sr_test\", {\"value\": \"blue\"})\n self.channel_layer.send(\"sr_test\", {\"value\": \"green\"})\n self.channel_layer.send(\"sr_test2\", {\"value\": \"red\"})\n # Get just one first\n channel, message = yield self.channel_layer.receive_many_twisted([\"sr_test\"])\n self.assertEqual(channel, \"sr_test\")\n self.assertEqual(message, {\"value\": \"blue\"})\n # And the second\n channel, message = yield self.channel_layer.receive_many_twisted([\"sr_test\"])\n self.assertEqual(channel, \"sr_test\")\n self.assertEqual(message, {\"value\": \"green\"})\n # And the other channel with multi select\n channel, message = yield self.channel_layer.receive_many_twisted([\"sr_test\", \"sr_test2\"])\n self.assertEqual(channel, \"sr_test2\")\n self.assertEqual(message, {\"value\": \"red\"})\n\n def tearDown(self):\n del self.channel_layer\n reactor.removeAll()\n super(TwistedTests, self).tearDown()\nexcept ImportError:\n pass\n","repo_name":"tykavanaugh/Fall-In-Group-7","sub_path":"backend/group_seven_env/lib/python3.8/site-packages/asgi_redis/tests/test_core.py","file_name":"test_core.py","file_ext":"py","file_size_in_byte":3104,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"17325875146","text":"class Solution:\n def canPartition(self, nums) -> bool:\n if sum(nums) % 2:\n return False\n\n sums = set()\n sums.add(0)\n target = sum(nums) // 2\n\n for i in range(len(nums)-1, -1, -1):\n tmp = sums.copy()\n for sum1 in tmp:\n sums.add(sum1 + nums[i])\n if target in sums:\n return True\n return False\n ","repo_name":"bcvance/LeetCodeSolutions","sub_path":"python_solutions/partition_equal_subset_sum.py","file_name":"partition_equal_subset_sum.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30771148812","text":"# Question 13 - Decode a string recursively encoded as count followed by substring\n\n\n# Solution -\n\n\ndef decode(Str):\n integerstack = []\n stringstack = []\n temp = \"\"\n result = \"\"\n i = 0\n\n while i < len(Str):\n count = 0\n\n if (Str[i] >= '0' and Str[i] <= '9'):\n while (Str[i] >= '0' and Str[i] <= '9'):\n count = count * 10 + ord(Str[i]) - ord('0')\n i += 1\n i -= 1\n integerstack.append(count)\n\n elif (Str[i] == ']'):\n temp = \"\"\n count = 0\n\n if (len(integerstack) != 0):\n count = integerstack[-1]\n integerstack.pop()\n\n while (len(stringstack) != 0 and stringstack[-1] != '['):\n temp = stringstack[-1] + temp\n stringstack.pop()\n\n if (len(stringstack) != 0 and stringstack[-1] == '['):\n stringstack.pop()\n\n for j in range(count):\n result = result + temp\n\n for j in range(len(result)):\n stringstack.append(result[j])\n\n result = \"\"\n\n elif (Str[i] == '['):\n if (Str[i-1] >= '0' and Str[i-1] <= '9'):\n stringstack.append(Str[i])\n\n else:\n stringstack.append(Str[i])\n integerstack.append(1)\n\n else:\n stringstack.append(Str[i])\n\n i += 1\n\n while len(stringstack) != 0:\n result = stringstack[-1] + result\n stringstack.pop()\n\n return result\n\n\n# Driven code\nif __name__ == '__main__':\n Str = \"3[b2[ca]]\"\n print(decode(Str))\n\n\n\n# Thanks in advance if anyone is reviewing this code.\n# Program by - Parth Barse\n# Suggest me anything about this code on email - (parthbarse72@gmail.com) or InstaId - (https://www.instagram.com/parth.barse)\n\n#-------------------------------------------------------------------------------------\n","repo_name":"ParthBarse/6Companies30days","sub_path":"Company 1 - (Goldman Sachs)/question-13.py","file_name":"question-13.py","file_ext":"py","file_size_in_byte":1925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4939875059","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\nfrom fastapp.core.server import create_app\n\n\"\"\"\nfastapi主程序\n\n@File : main.py\n@Time : 2021/03/12 20:58:45\n@Author : snc \n\"\"\"\n\napp = create_app()\n\nif __name__ == \"__main__\":\n import uvicorn\n\n # 输出所有的路由\n for route in app.routes:\n if hasattr(route, \"methods\"):\n print({'path': route.path, 'name': route.name, 'methods': route.methods})\n\n uvicorn.run(app='main:app', host=\"127.0.0.1\",\n port=8010, reload=True, debug=True)\n","repo_name":"SINC-G/Melody","sub_path":"fastapp/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"13583087329","text":"import argparse\nfrom model import TCN\nfrom utils import data_generator\nimport tensorflow as tf\ntf.enable_eager_execution()\n\nparser = argparse.ArgumentParser(description='Sequence Modeling - The Adding Problem')\nparser.add_argument('--batch_size', type=int, default=32, metavar='N', help='batch size')\nparser.add_argument('--dropout', type=float, default=0.0, help='dropout applied to layers (default: 0.0)')\nparser.add_argument('--clip', type=float, default=-1, help='gradient clip, -1 means no clip (default: -1)')\nparser.add_argument('--epochs', type=int, default=10, help='upper epoch limit (default: 10)')\nparser.add_argument('--ksize', type=int, default=8, help='kernel size (default: 8)')\nparser.add_argument('--levels', type=int, default=9, help='# of levels (default: 9)')\nparser.add_argument('--seq_len', type=int, default=600, help='sequence length (default: 600)')\nparser.add_argument('--lr', type=float, default=2e-3, help='initial learning rate (default: 2e-3)')\nparser.add_argument('--nhid', type=int, default=30, help='number of hidden units per layer (default: 30)')\nargs = parser.parse_args()\n\n# Parameter\nn_classes = 1\nbatch_size = args.batch_size\nseq_length = args.seq_len\nepochs = args.epochs\nclip = args.clip\nlr = args.lr\nprint(\"Args:\\n\", args)\n\n# dataset\nprint(\"Producing data...\")\ntrain_data, train_labels = data_generator(100000, seq_length)\nprint(\"train_data.shape:\", train_data.shape, \", train_labels.shape:\", train_labels.shape)\ntrain_dataset = tf.data.Dataset.from_tensor_slices((train_data, train_labels)).shuffle(50000).batch(batch_size)\n\ntest_data, test_labels = data_generator(1000, seq_length)\ntest_data, test_labels = tf.convert_to_tensor(test_data), tf.convert_to_tensor(test_labels)\n\n# build model\n# Note: We use a very simple setting here (assuming all levels have the same # of channels.\nprint(\"Building model...\")\nchannel_sizes = [args.nhid]*args.levels\nkernel_size = args.ksize\ndropout = args.dropout\nmodel = TCN(n_classes, channel_sizes, kernel_size=kernel_size, dropout=dropout)\n\n# Optimizer\noptimizer = tf.train.AdamOptimizer(lr)\n\n# Run \nfor epoch in range(epochs):\n for batch, (train_x, train_y) in enumerate(train_dataset):\n # loss\n with tf.GradientTape() as tape:\n y = model(train_x, training=True) # y.shape == (batch_size, 1)\n loss = tf.reduce_mean(tf.square(y - train_y))\n # gradient\n gradient = tape.gradient(loss, model.trainable_variables)\n if clip != -1:\n gradient, _ = tf.clip_by_global_norm(gradient, clip) \n optimizer.apply_gradients(zip(gradient, model.trainable_variables))\n if batch % 100 == 0:\n print(\"Batch:\", batch, \", Train loss:\", loss.numpy())\n # Eval\n eval_loss = tf.reduce_mean(tf.square(test_labels - model(test_data, training=False)))\n print(\"Epoch:\", epoch, \", Eval loss:\", eval_loss.numpy(), \"\\n---\\n\")\n # save\n model.save_weights(\"weights/model_weight.h5\")\n\n\n","repo_name":"Baichenjia/Tensorflow-TCN","sub_path":"adding_problem/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2944,"program_lang":"python","lang":"en","doc_type":"code","stars":113,"dataset":"github-code","pt":"81"} +{"seq_id":"72176910666","text":"'''\nReference:\nhttps://github.com/hshustc/CVPR19_Incremental_Learning/blob/master/cifar100-class-incremental/modified_linear.py\n'''\nimport math\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom copy import deepcopy\nfrom torch.nn import functional as F\nfrom timm.models.layers.weight_init import trunc_normal_\nfrom timm.models.layers import Mlp\n\n\nclass SimpleLinear(nn.Module):\n '''\n Reference:\n https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/linear.py\n '''\n def __init__(self, in_features, out_features, bias=True):\n super(SimpleLinear, self).__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.weight = nn.Parameter(torch.Tensor(out_features, in_features))\n if bias:\n self.bias = nn.Parameter(torch.Tensor(out_features))\n else:\n self.register_parameter('bias', None)\n self.reset_parameters()\n\n def reset_parameters(self):\n nn.init.kaiming_uniform_(self.weight, nonlinearity='linear')\n nn.init.constant_(self.bias, 0)\n\n def forward(self, input):\n return {'logits': F.linear(input, self.weight, self.bias)}\n\n\nclass CosineLinear(nn.Module):\n def __init__(self, in_features, out_features, nb_proxy=1, to_reduce=False, sigma=True):\n super(CosineLinear, self).__init__()\n self.in_features = in_features\n self.out_features = out_features * nb_proxy\n self.nb_proxy = nb_proxy\n self.to_reduce = to_reduce\n self.weight = nn.Parameter(torch.Tensor(self.out_features, in_features))\n if sigma:\n self.sigma = nn.Parameter(torch.Tensor(1))\n else:\n self.register_parameter('sigma', None)\n self.reset_parameters()\n\n def reset_parameters(self):\n stdv = 1. / math.sqrt(self.weight.size(1))\n self.weight.data.uniform_(-stdv, stdv)\n if self.sigma is not None:\n self.sigma.data.fill_(1)\n\n def forward(self, input):\n out = F.linear(F.normalize(input, p=2, dim=1), F.normalize(self.weight, p=2, dim=1))\n\n if self.to_reduce:\n # Reduce_proxy\n out = reduce_proxies(out, self.nb_proxy)\n\n if self.sigma is not None:\n out = self.sigma * out\n\n return {'logits': out}\n\n\nclass CosineLinearCov(nn.Module):\n def __init__(self, in_features, out_features, nb_proxy=1, to_reduce=False, sigma=True):\n super(CosineLinearCov, self).__init__()\n self.in_features = in_features\n self.out_features = out_features * nb_proxy\n self.nb_proxy = nb_proxy\n self.to_reduce = to_reduce\n self.weight = nn.Parameter(torch.Tensor(self.out_features, in_features))\n # A tensor to store covariance matrix, shape: (classes, in_features, in_features)\n self.covs = nn.Parameter(torch.Tensor(self.out_features, self.in_features, self.in_features), requires_grad=True)\n\n if sigma:\n self.sigma = nn.Parameter(torch.Tensor(1))\n else:\n self.register_parameter('sigma', None)\n self.reset_parameters()\n\n def reset_parameters(self):\n stdv = 1. / math.sqrt(self.weight.size(1))\n self.weight.data.uniform_(-stdv, stdv)\n if self.sigma is not None:\n self.sigma.data.fill_(1)\n\n def forward(self, input):\n out = F.linear(F.normalize(input, p=2, dim=1), F.normalize(self.weight, p=2, dim=1))\n\n if self.to_reduce:\n # Reduce_proxy\n out = reduce_proxies(out, self.nb_proxy)\n\n if self.sigma is not None:\n out = self.sigma * out\n\n return {'logits': out}\n\n\nclass SplitCosineLinear(nn.Module):\n def __init__(self, in_features, out_features1, out_features2, nb_proxy=1, sigma=True):\n super(SplitCosineLinear, self).__init__()\n self.in_features = in_features\n self.out_features = (out_features1 + out_features2) * nb_proxy\n self.nb_proxy = nb_proxy\n self.fc1 = CosineLinear(in_features, out_features1, nb_proxy, False, False)\n self.fc2 = CosineLinear(in_features, out_features2, nb_proxy, False, False)\n if sigma:\n self.sigma = nn.Parameter(torch.Tensor(1))\n self.sigma.data.fill_(1)\n else:\n self.register_parameter('sigma', None)\n\n def forward(self, x):\n out1 = self.fc1(x)\n out2 = self.fc2(x)\n\n out = torch.cat((out1['logits'], out2['logits']), dim=1) # concatenate along the channel\n\n # Reduce_proxy\n out = reduce_proxies(out, self.nb_proxy)\n\n if self.sigma is not None:\n out = self.sigma * out\n\n return {\n 'old_scores': reduce_proxies(out1['logits'], self.nb_proxy),\n 'new_scores': reduce_proxies(out2['logits'], self.nb_proxy),\n 'logits': out\n }\n\n\ndef reduce_proxies(out, nb_proxy):\n if nb_proxy == 1:\n return out\n bs = out.shape[0]\n nb_classes = out.shape[1] / nb_proxy\n assert nb_classes.is_integer(), 'Shape error'\n nb_classes = int(nb_classes)\n\n simi_per_class = out.view(bs, nb_classes, nb_proxy)\n attentions = F.softmax(simi_per_class, dim=-1)\n\n return (attentions * simi_per_class).sum(-1)\n\n\nclass SimpleContinualLinear(nn.Module):\n def __init__(self, embed_dim, nb_classes, feat_expand=False, with_norm=False):\n super().__init__()\n\n self.embed_dim = embed_dim\n self.feat_expand = feat_expand\n self.with_norm = with_norm\n heads = []\n single_head = []\n if with_norm:\n single_head.append(nn.LayerNorm(embed_dim))\n\n single_head.append(nn.Linear(embed_dim, nb_classes, bias=True))\n head = nn.Sequential(*single_head)\n\n heads.append(head)\n self.heads = nn.ModuleList(heads)\n for m in self.modules():\n if isinstance(m, nn.Linear):\n trunc_normal_(m.weight, std=.02)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n\n def backup(self):\n self.old_state_dict = deepcopy(self.state_dict())\n\n def recall(self):\n self.load_state_dict(self.old_state_dict)\n\n def update(self, nb_classes, freeze_old=True):\n single_head = []\n if self.with_norm:\n single_head.append(nn.LayerNorm(self.embed_dim))\n\n _fc = nn.Linear(self.embed_dim, nb_classes, bias=True)\n trunc_normal_(_fc.weight, std=.02)\n nn.init.constant_(_fc.bias, 0)\n single_head.append(_fc)\n new_head = nn.Sequential(*single_head)\n\n if freeze_old:\n for p in self.heads.parameters():\n p.requires_grad = False\n\n self.heads.append(new_head)\n\n def forward(self, x):\n out = []\n for ti in range(len(self.heads)):\n fc_inp = x[ti] if self.feat_expand else x\n out.append(self.heads[ti](fc_inp))\n out = {'logits': torch.cat(out, dim=1)}\n return out\n","repo_name":"IemProg/TTACIL","sub_path":"convs/linears.py","file_name":"linears.py","file_ext":"py","file_size_in_byte":6945,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"17955614908","text":"from tkinter import *\r\nfrom random import *\r\nfrom time import *\r\n\r\nclass Conveyor(Frame):\r\n def __init__(self, master, picture, width):\r\n super(Conveyor, self).__init__()\r\n self.image_number_list = [] # 셔플된 이미지의 번호를 저장하기 위한 리스트. 13개\r\n self.labels = [] # 컨베이어 frame에 추가되는 이미지 label 위젯의 리스트\r\n self.master = master # 컨베이어 frame의 parent 설정\r\n self.width = width # 메인 테이블의 가로 길이. = 4\r\n self.n = width*(width-1)+1 # 컨베이어에 넣을 이미지의 수. = 13\r\n self.picture = picture # app에서 생성한 이미지 받아와서 저장\r\n self.image_flags = list(False for i in range(self.width*self.width)) # 이미지가 컨베이어에 올라갔는지 아닌지 체크하기 위한 리스트. 초기 세팅은 모두 FALSE.\r\n \r\n self.conveyor_canvas = Canvas(self, width=55*self.n-16, height=30) # 현재 위치 표시를 위한 캔버스 위젯 생성\r\n # self.picture_canvas = Canvas(self, width=55*self.n, height=60, bg= 'black')\r\n\r\n # 컨베이어에 올릴 이미지 셔플링\r\n self.random_shuffle()\r\n\r\n for i in range(0, self.n):\r\n # TODO\r\n # 셔플 결과대로 이미지 label 생성하여 리스트에 저장\r\n # self.labels.append(self.picture[self.image_number_list[i]])\r\n self.labels.append(Label(self, image=self.picture[self.image_number_list[i]], background='black'))\r\n\r\n\r\n # 현재 index 설정 = 시작 위치 설정 (10번째)\r\n self.cur_idx = 9\r\n\r\n # 현재 이미지 설정 = 시작 이미지 설정\r\n # 선택한 이미지와 비교 목적으로 저장\r\n self.cur_image = self.picture.index(self.picture[self.image_number_list[self.cur_idx]])\r\n print(self.cur_image)\r\n\r\n # TODO\r\n # 캔버스 세팅\r\n self.conveyor_canvas.pack()\r\n # self.picture_canvas.pack()\r\n # 노란색 삼각형 생성\r\n self.tri_yel = self.conveyor_canvas.create_polygon((7+55*self.cur_idx, 15, 17+55*self.cur_idx, 30, 27+55*self.cur_idx, 15), fill= 'yellow', outline= 'black')\r\n # 빨간색 FINAL 문자열 생성\r\n self.fin_red = self.conveyor_canvas.create_text(14+55*(self.n-1),25, text= 'FINAL', fill= 'red', font=('bold', 9))\r\n # 컨베이어에 카드들 표시\r\n # for conv in range(self.n):\r\n # self.picture_canvas.create_image(29+55*conv,32, image= self.labels[conv])\r\n for i in range(0, self.n):\r\n self.labels[i].pack(side= LEFT)\r\n\r\n\r\n\r\n\r\n\r\n # 이미지 셔플 함수\r\n def random_shuffle(self):\r\n # TODO\r\n # 0~15 숫자 중 임의로 중복되지 않는 13개의 숫자 선택\r\n self.image_number_list = sample(range(self.width*self.width), self.n)\r\n \r\n #event function\r\n def matching(self):\r\n if self.master.table.selected_image == self.cur_image:\r\n print('correct')\r\n self.correct_match()\r\n else:\r\n print('wrong')\r\n self.wrong_match()\r\n\r\n # 선택한 그림이 현재 위치의 그림과 일치하는 경우의 처리 함수\r\n def correct_match(self):\r\n # 마지막 이미지를 찾은 경우\r\n if self.cur_idx == self.n - 1:\r\n # TODO\r\n # 게임 종료\r\n win = True\r\n self.master.quit_game(win= win)\r\n\r\n\r\n # 캔버스 위젯\r\n # 현재 위치 표시 도형 우측 이동\r\n # 현재 이미지 및 현재 위치 재설정\r\n # canvas.itemconfig(도형의객체, outline='white', fill='white', + 추가적인 parameter 세팅) 기존에 생성된 도형 객체의 변경 가능\r\n else:\r\n # 현재 위치가 컨베이어의 가장 우측 도형을 지목할 때\r\n if self.cur_idx == self.n-2:\r\n # TODO\r\n self.conveyor_canvas.move(self.tri_yel, 55,0)\r\n # self.conveyor_canvas.create_text(self.fin_red)\r\n pass\r\n # 그 외 도형 이동\r\n else:\r\n # TODO\r\n # 노란 삼각형을 오른쪽으로 한 칸 이동\r\n self.conveyor_canvas.move(self.tri_yel, 55, 0)\r\n # TODO\r\n # 현재 찾을 이미지와 해당 이미지의 위치 갱신\r\n self.cur_idx += 1\r\n self.cur_image = self.picture.index(self.picture[self.image_number_list[self.cur_idx]])\r\n\r\n\r\n # 선택한 그림이 현재 위치의 그림과 일치하지 않는 경우의 처리 함수\r\n def wrong_match(self):\r\n # 마지막 기회에서 틀린 경우\r\n if(self.cur_idx == 0):\r\n # TODO\r\n # 게임 종료\r\n win = False\r\n self.master.quit_game(win=win)\r\n\r\n # 캔버스 위젯\r\n # 가장 왼쪽의 이미지를 제거\r\n # 기존 이미지들 좌측으로 한 칸씩 이동\r\n # 컨베이어에 추가되지 않은 이미지 중 하나 선택하여 가장 우측에 추가\r\n # 현재 위치 재설정\r\n # canvas.itemconfig(도형의객체, outline='white', fill='white', + 추가적인 parameter 세팅) 기존에 생성된 도형 객체의 변경 가능\r\n else:\r\n # FINAL에서 오답 선택했을 때\r\n if self.cur_idx == self.n-1:\r\n # TODO\r\n # 노란 삼각형 복구\r\n self.conveyor_canvas.move(self.tri_yel, -55, 0)\r\n # 그 외 도형 이동\r\n else:\r\n # TODO\r\n # 노란 삼각형을 왼쪽으로 한 칸 이동\r\n self.conveyor_canvas.move(self.tri_yel, -55, 0)\r\n # 새 이미지 추가\r\n while True:\r\n new_image = randint(0, self.width*self.width-1)\r\n if new_image not in self.image_number_list :\r\n break\r\n\r\n # 기존 이미지 좌측으로 한 칸씩 이동\r\n # label.config(parameter = configuration) 기존의 label 위젯 변경 가능 TODO\r\n for i in range(0,self.n-1):\r\n self.labels[i].config(image=self.picture[self.image_number_list [i+1]])\r\n self.image_number_list [i] = self.image_number_list [i+1]\r\n\r\n # # 새 이미지 추가\r\n self.image_number_list[self.n-1] = new_image\r\n self.labels[self.n-1].config(image=self.picture[self.image_number_list [self.n-1]])\r\n self.cur_idx -= 1\r\n self.cur_image = self.picture.index(self.picture[self.image_number_list[self.cur_idx]])\r\n","repo_name":"nawaka7/TKinter-Card-Game","sub_path":"conveyor.py","file_name":"conveyor.py","file_ext":"py","file_size_in_byte":6650,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72583705545","text":"from django.db import models\nfrom django.dispatch import receiver\nfrom django.db.models.signals import post_save, post_delete\n\nMAX_TAG_LENGTH = 40\n\nclass Help (models.Model) :\n lang = models.CharField (\n max_length = 80,\n blank = True,\n null = True)\n\n markdown = models.TextField (\n blank = True,\n null = True\n )\n\nclass Config (models.Model) :\n appName = models.CharField (\n max_length = 40,\n blank = True,\n null = True)\n\n subtitle = models.CharField (\n max_length = 80,\n blank = True,\n null = True)\n\n tag = models.CharField (\n help_text = 'The main SteemQA application tag',\n max_length = MAX_TAG_LENGTH)\n\n carousel_slide_count = models.PositiveIntegerField (\n help_text = 'The number of questions to load in the home page carousel',\n default = 5,\n blank = True,\n null = True)\n\n carousel_history = models.PositiveIntegerField (\n help_text = 'Time period (in days) to report top questions in carousel',\n default = 7,\n blank = True,\n null = True)\n\n home_blog_history = models.PositiveIntegerField (\n help_text = 'Time period (in days) to report questions and answers on the home page',\n default = 7,\n blank = True,\n null = True)\n\n initial_slides_count = models.PositiveIntegerField (\n help_text = 'Number of initial slides to load in a slide swiper',\n default = 10,\n blank = True,\n null = True)\n\n new_slides_count = models.PositiveIntegerField (\n help_text = 'Number of slides to load in the background when reaching the end of a slide swiper',\n default = 3,\n blank = True,\n null = True)\n\n initial_grid_batch_size = models.PositiveIntegerField (\n help_text = 'Number of elements to load into a card grid',\n default = 20,\n blank = True,\n null = True)\n\n post_addon_msg = models.TextField (\n help_text = 'A message that will be added to the end of each question/answer post and visible only when displayed on an external app',\n blank = True,\n null = True)\n\n news_sources = models.TextField (\n max_length = 256,\n help_text = 'A comma separated list of [username]:[tag] values to identify the news sources',\n blank = True,\n null = True\n )\n\n default_tags = models.TextField (\n help_text = 'A comma separated list of default tags to offer to the user',\n blank = True,\n null = True)\n\nclass Scraper (models.Model) :\n nodes = models.TextField (\n help_text = 'The steem nodes to use (in order of priority)',\n default = 'api.steemit.com,steemd.minnowsupportproject.org,steemd.privex.io,steemd.steemgigs.org,steemd.steemit.com,rpc.curiesteem.com,rpc.steemliberator.com,rpc.steemviz.com')\n\n block_nbr = models.PositiveIntegerField (\n blank = True,\n null = True)\n\n\nclass SteemUser (models.Model) :\n created = models.DateTimeField(auto_now_add=True)\n\n username = models.CharField(\n max_length = 40,\n db_index = True)\n\n class Meta:\n ordering = ('username',)\n\n def __str__ (self) :\n return self.username\n\nclass AccessToken (models.Model) :\n username = models.CharField(\n max_length = 40,\n db_index = True)\n\n token = models.CharField(\n help_text = 'SHA-512 encrypted version of Steemconnect access token',\n max_length = 160,\n db_index = True)\n\n class Meta:\n ordering = ('username',)\n\n\nclass Topic (models.Model) :\n topic = models.CharField(\n max_length = 40,\n unique = True,\n db_index = True)\n\n parent = models.ForeignKey(\n 'self',\n related_name = 'parent_topic',\n on_delete=models.CASCADE,\n blank = True,\n null = True)\n\n def __str__ (self) :\n return self.topic\n\n class Meta :\n ordering = ('topic', )\n\nclass FavouriteTopic (models.Model) :\n user = models.ForeignKey(\n SteemUser,\n on_delete=models.CASCADE)\n\n topic = models.ForeignKey(\n Topic,\n on_delete=models.CASCADE)\n\nclass Discussion (models.Model) :\n created = models.DateTimeField(db_index = True)\n\n author = models.CharField(\n max_length = 40,\n db_index = True)\n\n title = models.CharField (\n max_length = 128,\n db_index = True)\n\n permlink = models.CharField(\n max_length = 160,\n db_index = True)\n\n active = models.DateTimeField(\n help_text = 'The last time this content was “touched” by voting or reply',\n blank = True,\n null = True)\n\n # Must have at the main tag. Questions must also have a second tag for the topic\n tag1 = models.CharField (\n max_length = MAX_TAG_LENGTH)\n\n tag2 = models.CharField (\n max_length = MAX_TAG_LENGTH,\n blank = True,\n null = True)\n\n tag3 = models.CharField (\n max_length = MAX_TAG_LENGTH,\n blank = True,\n null = True)\n\n tag4 = models.CharField (\n max_length = MAX_TAG_LENGTH,\n blank = True,\n null = True)\n\n tag5 = models.CharField (\n max_length = MAX_TAG_LENGTH,\n blank = True,\n null = True)\n\n flagged = models.BooleanField (\n help_text = 'Indicates if this item has been flagged by moderators',\n default = False)\n\n net_votes = models.PositiveIntegerField (\n help_text = 'Net positive votes',\n default = 0)\n\n author_payout_value = models.FloatField (\n help_text = 'Tracks the total payout (in SBD) this content has received over time',\n default = 0)\n\n flagged = models.BooleanField (\n help_text = 'Indicates if this post has been flagged',\n default = False)\n\n total_payout_value = models.FloatField (\n help_text = 'Tracks the total payout this content has received over time, measured in the debt asset.',\n default = 0)\n\n class Meta :\n abstract = True\n\n def __str__ (self) :\n return self.title\n\nclass Question (Discussion) :\n topic = models.ForeignKey(\n Topic,\n on_delete=models.CASCADE)\n\n answer_count = models.PositiveIntegerField (\n default = 0,\n blank = True,\n null = True)\n\nclass Answer (Discussion) :\n question = models.ForeignKey(\n Question,\n on_delete=models.CASCADE)\n\nclass Bookmark (models.Model) :\n user = models.ForeignKey(SteemUser,\n on_delete=models.CASCADE)\n\n question = models.ForeignKey(Question,\n on_delete=models.CASCADE)\n","repo_name":"irelandscape/quearn-server","sub_path":"quearn_server/steem/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6020,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"12928591327","text":"# -*- coding: utf-8 -*-\n# @Time : 2020/7/26 4:43 下午\n# @Author : vadon\n# @Email : vadonical@gmail.com\n# @File : magic_call.py\n# @Project : proxymask\n\n\nclass Test:\n def __init__(self):\n self.a = 1\n\n def __call__(self, *args, **kwargs):\n res = str()\n for i in args:\n res += str(i)\n return f\"the value is {res}\"\n\n\nif __name__ == '__main__':\n t = Test()\n print(t(100, 'string'))\n","repo_name":"Thancoo/proxymask","sub_path":"magic/magic_call.py","file_name":"magic_call.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"28559433680","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n@Author : zhengdongqi\n@Email :\n@Usage : 图像平移\n@FileName: 17.图片旋转.py\n@DateTime: 2023/2/27 21:29\n@SoftWare: PyCharm\n\"\"\"\n\nimport cv2\nimport numpy as np\n\nif __name__ == '__main__':\n dog = cv2.imread('./input/dog.jpeg')\n h, w, ch = dog.shape\n # M = np.float32([[1, 0, 500], [0, 1, 300]])\n # 旋转的角度为逆时针\n # 中心点是 (x,y)\n # M = cv2.getRotationMatrix2D((w/2, h/2), 15, 1.0)\n src = np.float32([[400, 300], [800, 300], [400, 1000]])\n dst = np.float32([[200, 400], [600, 500], [150, 1100]])\n M = cv2.getAffineTransform(src, dst)\n\n # 如果想改变新图像的尺寸,需要修改dsize\n new = cv2.warpAffine(dog, M, (w, h))\n\n print(dog.shape)\n\n cv2.imshow('dog', dog)\n cv2.imshow('new', new)\n cv2.waitKey(0)\n","repo_name":"nickdecodes/opencv-practice","sub_path":"00.python/01.基本操作/18.图像平移.py","file_name":"18.图像平移.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22639477960","text":"# This file is executed on every boot (including wake-boot from deepsleep)\n#import esp\n#esp.osdebug(None)\nimport gc\nimport machine\nimport socket\nimport network\nimport esp\nfrom machine import Pin\n\n#import webrepl\n#webrepl.start()\ngc.collect()\n\n\n\ndef do_connect():\n sta_if = network.WLAN(network.STA_IF)\n if not sta_if.isconnected():\n print('connecting to network...')\n sta_if.active(True)\n sta_if.connect('I9EBE', 'dTG7kKkAUJd4')\n while not sta_if.isconnected():\n pass\n print('network config:', sta_if.ifconfig())\n\n\ndef http_get(url):\n _, _, host, path = url.split('/', 3)\n addr = socket.getaddrinfo(host, 80)[0][-1]\n s = socket.socket()\n s.connect(addr)\n s.send(bytes('GET /%s HTTP/1.0\\r\\nHost: %s\\r\\n\\r\\n' % (path, host), 'utf8'))\n while True:\n data = s.recv(1000)\n if data:\n #print('\\nPrimljeno = ',len(data))\n #print(url)\n #print(str(data, 'utf8'), end='')\n # Zasto ovde radi return ?\n return data.decode('utf-8')\n else:\n break\n s.close()\n\n\n# Zabrana AP\nap_if = network.WLAN(network.AP_IF)\nap_if.active(False)\nprint ('\\nAccess Point = ' , ap_if.active())\n# Dozvola stanice\nsta_if = network.WLAN(network.STA_IF)\nsta_if.active(True)\nprint ('Stanica = ', sta_if.active() )\n\n# Definisem izlaz sa relejeom\ntaster_pali = Pin(5, Pin.OUT, Pin.PULL_UP)\n\n# Prilikom reseta ugasi grejanje_dugme\ntaster_pali.off()\n\n# Povezi se na WiFi\ndo_connect()\n\ntry:\n http_get('http://blynk-cloud.com/15364b6f7e934f859ab8cc3803d2971b/update/V5?value=_Resetovan_Uredjaj%0A')\n print('\\nResetovan uredjaj\\n')\nexcept:\n machine.reset()\n","repo_name":"markodr/kotao","sub_path":"boot.py","file_name":"boot.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7636118631","text":"\"\"\"\nhttps://scikit-learn.org/stable/modules/tree.html\n\nDecision tree are non-parametric supervised learning method used for classification and regression\n\ngoal is create the model which predicts the value of target variable by learning simple decsion rules inferred from data\nA tree can be seen as a piecewise constant approximation.\n\nThe deeper the tree, the more complex the decision rules and the fitter the model.\n\nadvantages:\n1. Simple to understand and to interpret. Trees can be visualized.\n2. Requires little data preparation. Other techniques often require data normalization,\n dummy variables need to be created and blank values to be removed. Note however that this module does not support missing values.\n3. The cost of using the tree (i.e., predicting data) is logarithmic in the number of data points used to train the tree.\n4. Able to handle both numerical and categorical data\n5. Able to handle multi-output problems\n6. Uses a white box model. If a given situation is observable in a model, the explanation for the condition is easily explained by boolean logic.\n7. possible to validate a model using statistical tests. That makes it possible to account for the reliability of the model.\n8. Performs well even if its assumptions are somewhat violated by the true model from which the data were generated.\n\nDisadvantages:\n1. Decision-tree learners can create over-complex trees that do not generalize the data well. This is called overfitting.\n Mechanisms such as pruning,\n setting the minimum number of samples required at a leaf node\n or setting the maximum depth of the tree are necessary to avoid this problem.\n2. Decision trees can be unstable because small variations in the data might result in a completely different tree being generated. This problem is mitigated by using decision trees within an ensemble.\n\n3. Predictions of decision trees are neither smooth nor continuous, but piecewise constant approximations as seen in the above figure. Therefore, they are not good at extrapolation.\n\n4. The problem of learning an optimal decision tree is known to be NP-complete,\n Consequently, practical decision-tree learning algorithms are based on heuristic algorithms such as the greedy algorithm where locally optimal decisions are made at each node. Such algorithms cannot guarantee to return the globally optimal decision tree.\n This can be mitigated by training multiple trees in an ensemble learner, where the features and samples are randomly sampled with replacement.\n\n5. There are concepts that are hard to learn because decision trees do not express them easily, such as XOR, parity or multiplexer problems.\n\n6. Decision tree learners create biased trees if some classes dominate. It is therefore recommended to balance the dataset prior to fitting with the decision tree.\n\n\n\"\"\"\n# DecisionTreeClassifier is a class capable of performing multi-class classification on a dataset.\n\nfrom sklearn import tree\n\n# \"tree.BaseDecisionTree\": Base class for decision trees\n# \"tree.DecisionTreeClassifier\":A decision tree classifier.\n# \"tree.DecisionTreeRegressor\":A decision tree regressor.\n# \"tree.ExtraTreeClassifier\": Extra-trees differ from classic decision trees in the way they are built.\n# When looking for the best split to separate the samples of a node into two\n# groups, random splits are drawn for each of the `max_features` randomly\n# selected features and the best split among those is chosen. When\n# `max_features` is set 1, this amounts to building a totally random\n# decision tree.\n# tree.export_graphviz: Export a decision tree in DOT format.\n# tree.plot_tree: Plot a decision tree.\n# tree.export_text: Build a text report showing the rules of a decision tree.\n\n\n# tree.DecisionTreeClassifier() : take two array in needed x: shape[n_sample, n_features], y:shape[n_labels]\n# parameters:\n# criterion: The function to measure the quality of a split.{\"gini\", \"entropy\", \"log_loss\"}\n# splitter:The strategy used to choose the split at each node.{\"best\", \"random\"}\n# max_depth: The maximum depth of the tree\n# min_samples_split: The minimum number of samples required to split an internal node:\n# min_samples_leaf: The minimum number of samples required to be at a leaf node.\n# min_weight_fraction_leaf: The minimum weighted fraction of the sum total of weights\n# max_features: The number of features to consider when looking for the best split\n# random_state: Controls the randomness of the max_features\n# max_leaf_nodes: Grow a tree with ``max_leaf_nodes`` in best-first fashion.\n# min_impurity_decrease: A node will be split if this split induces a decrease of the impurity >= to this value.\n# class_weight: Weights associated with classes in the form ``{class_label: weight}``.\n# ccp_alpha:cost complexity pruning;The subtree with the largest cost complexity that is < ``ccp_alpha`` will be chosen.\n# Simple Example:\nX = [[0, 0], [1, 1]]\ny = [0, 1]\nclf = tree.DecisionTreeClassifier()\n\nclf = clf.fit(X, y)\nclf.predict([[2, 2]])\nprint(clf.predict([[2, 2]]))\n# print(tree.plot_tree(clf))\n# print(tree.export_graphviz(clf))\n\n\n# # The tree can be exported in graph form using export_graphviz.\nfrom sklearn.datasets import load_iris\nfrom sklearn import tree\n\niris = load_iris()\nX, y = iris.data, iris.target\nclf = tree.DecisionTreeClassifier()\nclf = clf.fit(X, y)\nprint(tree.plot_tree(clf))\n\nimport graphviz\n\ndot_data = tree.export_graphviz(clf, out_file=None)\ngraph = graphviz.Source(dot_data)\ngraph.render(\"iris\")\ndot_data = tree.export_graphviz(\n clf, out_file=None,\n feature_names=iris.feature_names,\n filled=True,\n rounded=True,\n special_characters=True\n)\n\ngraph = graphviz.Source(dot_data)\nprint(graph)\n\n# the tree can also be exported in textual format with function export_text\nfrom sklearn.datasets import load_iris\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.tree import export_text\niris = load_iris()\ndecision_tree = DecisionTreeClassifier(random_state=0, max_depth=2)\ndecision_tree = decision_tree.fit(iris.data, iris.target)\nr = export_text(\n decision_tree,\n feature_names=iris['feature_names']\n)\nprint(r)\n\n\n# # regression.\n# Decision tree can also be used for regression problems, using DecisionTreeRegressor class.\nfrom sklearn import tree\nx = [[0, 0], [2, 2]]\ny = [0.5, 2.5]\nclf = tree.DecisionTreeRegressor()\nclf = clf.fit(x, y)\nclf.predict([1,1])\n\n# # A multi-output problems\n\"\"\"\nA multi-output problem is a supervised learning problem with several outputs to predict, that is when Y is a 2d array of shape (n_samples, n_outputs).\n\nWhen there is no correlation between the outputs, a very simple way to solve this kind of problem is \nto build n independent models, i.e. one for each output, and then to use those models to independently predict each one-\nof the n outputs. However,because it is likely that the output values related to the same input are themselves correlated,\nan often better way is to build a single model capable of predicting simultaneously all n outputs.\n\n& decision trees can be used for multi-output problems. for this, following changes can be required.\n\nto make the multi-output problems, following changes needs to be done.\n1. Store n output values in leaves instead of 1\n2. Use splitting criteria that compute the average reduction across all n outputs.\n\"\"\"\n\n","repo_name":"prabhuiitdhn/Practice_Data_Engineering","sub_path":"machine_learning_pipeline/DecisionTree.py","file_name":"DecisionTree.py","file_ext":"py","file_size_in_byte":7307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4248843645","text":"#\n# Script for fetching email attachments from the inbox of a given gmail account\n#\n# Created by Adam Khoukhi on 08/25/22.\n\nimport os\nimport email\nimport imaplib\nfrom dotenv import load_dotenv\nimport csv\nimport matplotlib\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport math\n\nload_dotenv()\npath = os.getcwd()\n\n\n# Current imap is for Gmail\nimap_url = os.getenv('IMAP_URL')\nconnection = imaplib.IMAP4_SSL(imap_url)\n\nmeans = []\nmedians = []\nweights = []\nalphas = []\nonBetas = []\noffBetas = []\nISO = []\nSS = []\n\n\ndef login():\n \"\"\"\n Login into a gmail account. Required to run before running @read_inbox\n \"\"\"\n # Email's Address and Password\n email_address = os.getenv('EMAIL_ADDRESS')\n password = os.getenv('PASSWORD')\n connection.login(email_address, password)\n\n\ndef read_inbox():\n \"\"\"\n Reads the inbox of the gmail account that was used in the login method\n \"\"\"\n # login into the account\n login()\n\n # No need to check spam or trash, only Inbox\n connection.select('Inbox', readonly=True)\n\n result, msgnums = connection.search(None, 'ALL')\n if result != \"OK\":\n print(\"Error in searching inbox\")\n else:\n index = 1\n for num in msgnums[0].split():\n typ, raw_data = connection.fetch(num, '(RFC822)')\n if typ.__eq__(\"OK\"):\n raw_email = raw_data[0][1]\n raw_email_string = raw_email.decode('utf-8')\n email_message = email.message_from_string(raw_email_string)\n\n subject = str(email_message).split(\"Subject: \", 1)[1].split(\"\\nMessage-Id:\", 1)[0]\n if subject.__eq__(\"iReplica 3.0 Session Data\"):\n for part in email_message.walk():\n if part.get('Content-Disposition') is None:\n continue\n file_name_components = part.get_filename().split('.')\n file_name = file_name_components[0] + \"_\" + str(index) + \".\" + file_name_components[1]\n if bool(file_name):\n file_path = os.path.join((path + '/Datasheets/'), file_name)\n index = index + 1\n if not os.path.isfile(file_path):\n fp = open(file_path, 'wb')\n fp.write(part.get_payload(decode=True))\n fp.close()\n print('Number of downloads: {}'.format((index - 1)))\n connection.logout()\n\n\ndef clear_datasheets():\n \"\"\"\n Removes all the files inside the Datasheets directory\n \"\"\"\n # path of the directory\n # Getting the list of datasheets clearing the directory\n for file in os.listdir((path + '/Datasheets/')):\n os.remove(os.path.join((path + '/Datasheets/'), file))\n\n\ndef calculate_median(arr):\n \"\"\"\n Calculates the median of a given array\n \"\"\"\n percentile = 1524096\n currentSum = 0\n median = 0\n index = 0\n for i in arr:\n if currentSum >= percentile:\n return index\n currentSum += int(i)\n index += 1\n\n\ndef calculate_mean(arr):\n \"\"\"\n Calculates the mean of a given array\n \"\"\"\n sumOfLuminosities = 0\n sampleSize = 3048192.0\n index = 0\n for i in arr:\n sumOfLuminosities += (index * int(i))\n index += 1\n return sumOfLuminosities / sampleSize\n\n\ndef read_csv():\n \"\"\"\n Reads the data in all the csv files located in the Datasheets directory\n \"\"\"\n\n # Getting the list of datasheets clearing the directory\n for file in os.listdir((path + '/Datasheets/')):\n # open csv file\n with open(os.path.join((path + '/Datasheets/'), file)) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n for row in csv_reader:\n if len(row) > 0:\n if row[0] == 'Weight ':\n weights.append(float(row[1]))\n elif row[0] == 'Alpha ':\n alphas.append(float(row[1]))\n elif row[0] == 'ON Beta ':\n onBetas.append(float(row[1]))\n elif row[0] == 'OFF Beta ':\n offBetas.append(float(row[1]))\n elif row[0] == 'ISO (Fastest to slowest) ':\n ISO.append(float(row[3]))\n elif row[0] == 'Exposure Time (Fastest to slowest) ':\n SS.append(float(row[3]))\n elif row[0] == 'luminosity Histograms 0-16383 ':\n means.append(calculate_mean(row[1:]))\n medians.append(calculate_median(row[1:]))\n\n normalizeMeansAndMedians()\n\n\ndef normalizeMeansAndMedians():\n \"\"\"\n Normalizes the mean and median by multiplying them with a factor\n \"\"\"\n\n for i in range(0, len(means)):\n means[i] = math.log(means[i] * ISO[i] * SS[i])\n\n for i in range(0, len(medians)):\n medians[i] = math.log((medians[i] * ISO[i] * SS[i]))\n\n\ndef createAndSavePlot(x, y, x_name, y_name):\n \"\"\"\n Plots and saves the Visualization of the x and y set of values given as a png in the Plots folder\n \"\"\"\n plt.title(x_name + \" vs. \" + y_name)\n plt.scatter(x, y, color=\"blue\")\n (slope, intercept) = np.polyfit(x, y, 1)\n plt.plot(np.unique(x), np.poly1d(np.polyfit(x, y, 1))(np.unique(x)), color=\"red\")\n plt.plot(0, 0, '-r', label='y = ' + \"{:f}\".format(slope) + 'x + ' + str(intercept))\n plt.plot(0, 0, '-b', label='r = ' + \"{:f}\".format(np.corrcoef(x, y)[0][1]))\n plt.legend(loc='upper right')\n plt.savefig('./Plots/' + x_name + '_' + y_name + '.png')\n plt.close()\n\n\ndef visualizeData():\n \"\"\"\n Visualizes the different relations between the different variables\n \"\"\"\n matplotlib.get_backend()\n matplotlib.use('MacOSX')\n\n # Median and Weight\n createAndSavePlot(medians, weights, \"median\", \"weight\")\n createAndSavePlot(means, weights, \"mean\", \"weight\")\n createAndSavePlot(medians, alphas, \"median\", \"alpha\")\n createAndSavePlot(means, alphas, \"mean\", \"alpha\")\n createAndSavePlot(alphas, weights, \"alpha\", \"weight\")\n createAndSavePlot(weights, onBetas, \"weight\", \"nON\")\n createAndSavePlot(weights, offBetas, \"weight\", \"nOFF\")\n createAndSavePlot(alphas, onBetas, \"alpha\", \"nON\")\n createAndSavePlot(alphas, offBetas, \"alpha\", \"nOFF\")\n createAndSavePlot(medians, onBetas, \"median\", \"nON\")\n createAndSavePlot(medians, offBetas, \"median\", \"nOFF\")\n createAndSavePlot(means, onBetas, \"mean\", \"nON\")\n createAndSavePlot(means, offBetas, \"mean\", \"nOFF\")\n # print(sum(onBetas)/len(onBetas))\n # print(sum(offBetas) / len(offBetas))","repo_name":"jmaApps/iReplicaDataAnalysis","sub_path":"iReplicaFunctions.py","file_name":"iReplicaFunctions.py","file_ext":"py","file_size_in_byte":6711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10172995761","text":"#!/usr/bin/python3\n\nimport math\nimport sys\n\n\ndef GetWireCoordinatesList(input):\n init_x = 0\n init_y = 0\n wire = list()\n for inst in input:\n if inst[0] == \"R\":\n for inc in range(1, int(inst[1:]) + 1):\n init_x += 1\n wire.append((init_x, init_y))\n elif inst[0] == \"U\":\n for inc in range(1, int(inst[1:]) + 1):\n init_y += 1\n wire.append((init_x, init_y))\n elif inst[0] == \"L\":\n for inc in range(1, int(inst[1:]) + 1):\n init_x -= 1\n wire.append((init_x, init_y))\n elif inst[0] == \"D\":\n for inc in range(1, int(inst[1:]) + 1):\n init_y -= 1\n wire.append((init_x, init_y))\n return wire\n\n\ndef GetWireCoordinatesSet(input):\n init_x = 0\n init_y = 0\n wire = set()\n for inst in input:\n if inst[0] == \"R\":\n for inc in range(1, int(inst[1:]) + 1):\n init_x += 1\n wire.add((init_x, init_y))\n elif inst[0] == \"U\":\n for inc in range(1, int(inst[1:]) + 1):\n init_y += 1\n wire.add((init_x, init_y))\n elif inst[0] == \"L\":\n for inc in range(1, int(inst[1:]) + 1):\n init_x -= 1\n wire.add((init_x, init_y))\n elif inst[0] == \"D\":\n for inc in range(1, int(inst[1:]) + 1):\n init_y -= 1\n wire.add((init_x, init_y))\n return wire\n\n\ndef main():\n # Get input\n file_name = \"../input/day_03_input\"\n if len(sys.argv) > 1:\n file_name = sys.argv[1]\n file = open(file_name)\n input1 = file.readline().strip(\"\\n\").split(\",\")\n input2 = file.readline().strip(\"\\n\").split(\",\")\n\n # Solve\n listwire = GetWireCoordinatesList(input1)\n setwire = GetWireCoordinatesSet(input2)\n man_dist = math.inf\n for coor in listwire:\n if coor in setwire:\n man_dist = min(man_dist, abs(coor[0]) + abs(coor[1]))\n print(man_dist)\n return man_dist\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"vss2sn/advent_of_code","sub_path":"2019/python/day_03a.py","file_name":"day_03a.py","file_ext":"py","file_size_in_byte":2099,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"81"} +{"seq_id":"20584971629","text":"import sys\r\nimport numpy as np\r\n\r\nmy_data = np.load(sys.argv[1])\r\n\r\nmin_val = -1\r\nmax_val = 1\r\navg_bins = 100\r\n\r\ntargets = my_data['train_targets']\r\nweighted_targets = targets*avg_bins\r\n\r\ndistance_sum = 0.0\r\ndistance_count = 0\r\n# for i in range(min_val*avg_bins, max_val*avg_bins + 1):\r\n# \tfor target in targets:\r\n# \t\tdistance = abs(i - target)\r\n# \t\tdistance_sum += distance\r\n# \t\tdistance_count += 1\r\n\r\nfor i in range(min_val*avg_bins, max_val*avg_bins + 1):\r\n\tfor j in [min_val*avg_bins, max_val*avg_bins]:\r\n\t\tdistance = abs(i-j)\r\n\t\tdistance_sum += distance\r\n\t\tdistance_count += 1\r\n\r\navg_distance = distance_sum/distance_count\r\ncorrected_avg_distance = avg_distance/avg_bins\r\n\r\n","repo_name":"dtaylo95/Compartmentalization_CNN","sub_path":"scripts/unused_scripts/determine_rand_error.py","file_name":"determine_rand_error.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24935944405","text":"# Description:\n# Write a Python program that creates and print a dictionary that maps each element in a list\n# to its corresponding frequency (how many times it occurs in the list).\n# The test should be case-sensitive. Therefore, \"A\" should not be considered the same element as \"a\".\n\ndef cal_frequency(list):\n dict ={}\n count = 0\n for i in set(list):\n for j in list:\n if i == j:\n count += 1\n else:\n continue\n dict[i] = count\n count = 0\n return dict\nprint(cal_frequency([1,1,2,3,4,4,4,1,2]))\nprint(cal_frequency(['a','a']))\n\n\n\n\n\n\n\n\n","repo_name":"moryaziz/my-python-code","sub_path":"extra execise-101/list and tuple _level 2/exercise 6.py","file_name":"exercise 6.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34047579656","text":"import folium\nimport pandas as pd\n\n\nmap4 = folium.Map(location=[40.8979334, -73.885948], zoom_start=15, tiles='Stamen Terrain')\ndf = pd.read_csv('Volcano.txt')\n#create a for loop to use file data to create markers\n#we have 2 iterators in this for loop, and so put them in a zip function\n#read as iterator, and then where is df to grab that iterators value\n\n\nfor lat,lon,name in zip(df['LAT'], df['LON'], df['NAME']):\n folium.Marker(location = [lat, lon], popup=name, icon=folium.Icon(color='cloud')).add_to(map4)\n\nprint(map4.save('test4.html'))\n","repo_name":"gsaukov/python-machine","sub_path":"core/webmap/webmap_3.py","file_name":"webmap_3.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"29607987516","text":"# -*- encoding: utf-8 -*-\n\n# this is a lambda function that handles requests like\n# /record_location/Camilla?lat={lat}&lon={long}&accuracy={acc}&alt={alt}&alt_accuracy={altacc}&battery={batt}&ip={ip}&timediff={timediff}\n# and publishes a payload to an SNS topic.\n\nimport logging\nimport datetime\nimport json\nimport os\nimport boto3\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n\ndef handler(event, context):\n ## event:\n # {\n # \"body\": null,\n # \"resource\": \"/record_location/{device_id+}\",\n # \"requestContext\": {\n # \"requestTime\": \"05/Mar/2018:03:10:54 +0000\",\n # \"protocol\": \"HTTP/1.1\",\n # \"resourceId\": \"84wfaw\",\n # \"apiId\": \"54f0rg5tk0\",\n # \"resourcePath\": \"/record_location/{device_id+}\",\n # \"httpMethod\": \"GET\",\n # \"requestId\": \"d126d53d-2022-11e8-9ed6-a30b73d7c377\",\n # \"path\": \"/prod/record_location/Camilla\",\n # \"accountId\": \"abcdef012345\",\n # \"requestTimeEpoch\": 1520219454476,\n # \"identity\": {\n # \"userArn\": null,\n # \"cognitoAuthenticationType\": null,\n # \"accessKey\": null,\n # \"caller\": null,\n # \"userAgent\": \"curl/7.54.0\",\n # \"user\": null,\n # \"cognitoIdentityPoolId\": null,\n # \"cognitoIdentityId\": null,\n # \"cognitoAuthenticationProvider\": null,\n # \"sourceIp\": \"127.0.0.1\",\n # \"accountId\": null\n # },\n # \"stage\": \"prod\"\n # },\n # \"queryStringParameters\": {\n # \"battery\": \"0\",\n # \"ip\": \"127.0.0.1\",\n # \"lon\": \"-99\",\n # \"alt_accuracy\": \"1\",\n # \"lat\": \"99\",\n # \"timediff\": \"-42\",\n # \"alt\": \"69\",\n # \"accuracy\": \"0\"\n # },\n # \"httpMethod\": \"GET\",\n # \"pathParameters\": {\n # \"device_id\": \"Camilla\"\n # },\n # \"headers\": {\n # \"Via\": \"2.0 3f717e4fd5cbd6c94c4a3860328e1093.cloudfront.net (CloudFront)\",\n # \"CloudFront-Is-Desktop-Viewer\": \"true\",\n # \"CloudFront-Is-SmartTV-Viewer\": \"false\",\n # \"CloudFront-Forwarded-Proto\": \"https\",\n # \"X-Forwarded-For\": \"127.0.0.1, 127.0.0.1\",\n # \"CloudFront-Viewer-Country\": \"US\",\n # \"Accept\": \"*/*\",\n # \"User-Agent\": \"curl/7.54.0\",\n # \"X-Amzn-Trace-Id\": \"Root=1-5a9cb53e-9fd5a9f0115beb6d57ecf7a8\",\n # \"Host\": \"deadbeef.execute-api.us-east-1.amazonaws.com\",\n # \"X-Forwarded-Proto\": \"https\",\n # \"X-Amz-Cf-Id\": \"2p8iR__27vNbp7mCqtfkWWTzMLYSxQDeA30d9xen1OjLU3WV8Olfuw==\",\n # \"CloudFront-Is-Tablet-Viewer\": \"false\",\n # \"X-Forwarded-Port\": \"443\",\n # \"CloudFront-Is-Mobile-Viewer\": \"false\"\n # },\n # \"stageVariables\": null,\n # \"path\": \"/record_location/Camilla\",\n # \"isBase64Encoded\": false\n # }\n sns = boto3.resource(\"sns\")\n\n ## the SNS topic we're publishing to\n topic = sns.Topic(os.environ[\"topic_arn\"])\n\n timestamp = datetime.datetime.utcfromtimestamp(\n float(event[\"requestContext\"][\"requestTimeEpoch\"]) / 1000\n ).isoformat() + \"Z\"\n\n message = {\n \"timestamp\": timestamp,\n \"device_id\": event[\"pathParameters\"][\"device_id\"],\n \"request_id\": event[\"requestContext\"][\"requestId\"],\n \"source\": \"device-locator\",\n\n \"position\": {\n \"lat\": event[\"queryStringParameters\"][\"lat\"],\n \"lon\": event[\"queryStringParameters\"][\"lon\"],\n \"alt\": event[\"queryStringParameters\"][\"alt\"],\n },\n \"meta\": event[\"queryStringParameters\"],\n }\n\n for pos_key in (\"lat\", \"lon\", \"alt\"):\n del message[\"meta\"][pos_key]\n\n logger.info(\"publishing message: {}\".format(message))\n\n ## publish to the topic\n resp = topic.publish(Message=json.dumps(message))\n\n ## abort if the message wasn't accepted\n assert \"MessageId\" in resp\n\n ## return an appropriate response to the API Gateway\n return {\n \"statusCode\": 202,\n \"headers\": {\n \"Content-Type\": \"application/json\",\n },\n \"body\": json.dumps({\n \"MessageId\": resp[\"MessageId\"]\n }),\n }\n","repo_name":"blalor/device-locator-gateway","sub_path":"functions/device-locator/lambda.py","file_name":"lambda.py","file_ext":"py","file_size_in_byte":4318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39468174367","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Author: Sheeba Samuel \n\nfrom __future__ import absolute_import\nimport logging\nimport os\n\n\nlogging.basicConfig(level=int(os.environ.get(\"DEBUG\", logging.INFO)))\nlog = logging.getLogger(\"idr2rdf.Study\")\n\nclass Phenotype():\n \n def __init__(self):\n self.mapping = [\n ('Phenotype Name', 'name', 'Literal', 'reproduce'),\n ('Phenotype Description', 'desciption', 'Literal', 'reproduce'),\n ('Phenotype Term Accession', 'type', 'URIRef', 'other'),\n ('Phenotype Score Type', 'scoretype', 'Literal', 'reproduce'),\n ]","repo_name":"Sheeba-Samuel/REPRODUCE-ME","sub_path":"Datasets/idr2rdf/scripts/Phenotype.py","file_name":"Phenotype.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"29459440341","text":"from flask import Blueprint, request, render_template, session, redirect, url_for, flash\nfrom .db import connect_mongo, usersDAO, mypagesDAO\n\ndb_connection = connect_mongo.ConnectDB().db\nusers = usersDAO.Users(db_connection)\nmypages = mypagesDAO.Mypages(db_connection)\n\nuserAPI = Blueprint('userAPI', __name__, template_folder='templates')\n\n@userAPI.route('/signup', methods = ['GET', 'POST'])\ndef signup():\n\tif request.method == 'GET':\n\t\tif not 'userEmail' in session:\n\t\t\treturn render_template('signup.html')\n\t\t# return render_template('welcome.html', info = session['userEmail'])\n\t\treturn redirect(url_for('portAPI.port'))\n\telif request.method == 'POST':\n\t\tif not 'userEmail' in session:\n\t\t\tif users.userCreate(request.form.to_dict(flat='true')):\n\t\t\t\tmypages.mypageCreate(request.form.to_dict(flat='true')['userEmail'])\n\t\t\t\tsession['userEmail'] = request.form['userEmail']\n\t\t\t\treturn redirect(url_for('mypageAPI.mypage'))\n\t\t\telse:\n\t\t\t\tflash('Email is already Exists, try again with other Email.')\n\t\t\t\treturn redirect(url_for('userAPI.signup'))\t\t\n\t\treturn redirect(url_for('mypageAPI.mypage'))\n@userAPI.route('/')\ndef home():\n\treturn redirect(url_for(\"portAPI.port\"))\n\n@userAPI.route('/login', methods=['GET', 'POST'])\ndef login():\n\tif request.method == 'GET':\n\t\tif 'userEmail' in session:\n\t\t\treturn redirect(url_for('portAPI.port'))\n\t\t\t# return render_template('portfolio.html')\n\t\treturn render_template('login.html')\n\telif request.method == 'POST':\n\t\tif 'userEmail' in session:\n\t\t\treturn redirect(url_for('portAPI.port'))\n\t\telse:\n\t\t\tif users.userAuthentication(request.form.to_dict(flat='true')):\n\t\t\t\tsession['userEmail'] = request.form['userEmail']\t\t\t\t\n\t\t\t\t# return render_template('welcome.html', info = session['userEmail'])\n\t\t\t\t#return render_template('portfolio.html', info= session['userEmail'])\n\t\t\t\treturn redirect(url_for('portAPI.port'))\n\t\t\telse:\n\t\t\t\tflash('Wrong ID or PW, You have to check your ID or PW', 'error')\n\t\t\t\treturn redirect(url_for('userAPI.login'))\n@userAPI.route('/logout')\ndef logout():\n\tif \"userEmail\" in session:\n\t\tsession.pop('userEmail')\n\t\treturn redirect(url_for('userAPI.login'))\n\telse:\n\t\tflash('You have to logged in')\n\t\treturn redirect(url_for('userAPI.login'))\n\n\n\n#if __name__ =='__main__':\n#\tuserAPI.run(host = '0.0.0.0', port = 5000)\n","repo_name":"jun971006/portfolio_project","sub_path":"view/userAPI.py","file_name":"userAPI.py","file_ext":"py","file_size_in_byte":2274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15452812085","text":"__id__ = \"$Id$\"\n__version__ = \"$Revision$\"\n__date__ = \"$Date$\"\n__copyright__ = \"Copyright (c) 2011. Orca Team.\"\n__license__ = \"LGPL\"\n\nfrom gi.repository import GObject\nimport pyatspi\nimport Queue\nimport threading\nimport time\n\nimport braille\nimport debug\nimport input_event\nimport orca\nimport orca_state\nimport settings\nimport speech\n\nfrom orca_i18n import _\n\n_scriptManager = None\n\nclass EventManager:\n\n def __init__(self):\n\n self._listenerCounts = {}\n self.registry = pyatspi.Registry\n self._enqueueCount = 0\n self._dequeueCount = 0\n self._eventQueue = Queue.Queue(0)\n self._gidleId = 0\n self._gidleLock = threading.Lock()\n self.noFocusTimestamp = 0.0\n\n def activate(self):\n \"\"\"Called when this presentation manager is activated.\"\"\"\n\n global _scriptManager\n _scriptManager = getattr(orca, '_scriptManager')\n\n # Tell BrlTTY which commands we care about.\n #\n braille.setupKeyRanges(orca_state.activeScript.brailleBindings.keys())\n\n self._registerListener(\"window:activate\")\n self._registerListener(\"window:deactivate\")\n self._registerListener(\"object:children-changed:remove\")\n\n win = orca_state.activeScript.utilities.activeWindow()\n if win:\n # Generate a fake window activation event so the application\n # can tell the user about itself.\n #\n class _FakeEvent:\n def __init__(self, source, eventType,\n detail1, detail2, any_data):\n self.source = source\n self.type = eventType\n self.detail1 = detail1\n self.detail2 = detail2\n self.any_data = any_data\n self.host_application = None\n\n class _FakeData:\n def __init__(self):\n pass\n def value(self):\n return None\n\n fe = _FakeEvent(win, \"window:activate\", 0, 0, _FakeData())\n self._enqueue(fe)\n\n def deactivate(self):\n \"\"\"Called when this event manager is deactivated.\"\"\"\n\n for eventType in self._listenerCounts.keys():\n self.registry.deregisterEventListener(self._enqueue, eventType)\n self._listenerCounts = {}\n\n def _ignore(self, event):\n \"\"\"Returns True if this event should be ignored.\"\"\"\n\n if not event or not event.source:\n return True\n\n ignoredList = ['object:state-changed:defunct',\n 'object:property-change:accessible-parent']\n ignoredList.extend(settings.ignoredEventsList)\n if filter(event.type.startswith, ignoredList):\n return True\n\n # This should ultimately be changed as there are valid reasons\n # to handle these events at the application level.\n if event.type.startswith('object:children-changed:remove') \\\n and event.source != self.registry.getDesktop(0):\n return True\n\n return False\n\n def _addToQueue(self, event, asyncMode):\n debugging = settings.debugEventQueue\n if debugging:\n debug.println(debug.LEVEL_ALL, \" acquiring lock...\")\n self._gidleLock.acquire()\n\n if debugging:\n debug.println(debug.LEVEL_ALL, \" ...acquired\")\n debug.println(debug.LEVEL_ALL, \" calling queue.put...\")\n debug.println(debug.LEVEL_ALL, \" (full=%s)\" \\\n % self._eventQueue.full())\n\n self._eventQueue.put(event)\n if debugging:\n debug.println(debug.LEVEL_ALL, \" ...put complete\")\n\n if asyncMode and not self._gidleId:\n if settings.gilSleepTime:\n time.sleep(settings.gilSleepTime)\n self._gidleId = GObject.idle_add(self._dequeue)\n\n if debugging:\n debug.println(debug.LEVEL_ALL, \" releasing lock...\")\n self._gidleLock.release()\n if settings.debugEventQueue:\n debug.println(debug.LEVEL_ALL, \" ...released\")\n\n def _queuePrintln(self, e, isEnqueue=True):\n \"\"\"Convenience method to output queue-related debugging info.\"\"\"\n\n if isinstance(e, input_event.KeyboardEvent):\n data = \"'%s' (%d)\" % (e.event_string, e.hw_code)\n elif isinstance(e, input_event.BrailleEvent):\n data = \"'%s'\" % repr(e.event)\n elif not debug.eventDebugFilter or debug.eventDebugFilter.match(e.type):\n data = \"\"\n else:\n return\n\n eType = str(e.type).upper()\n if isEnqueue:\n string = \"----------> QUEUEING %s %s\" % (eType, data)\n else:\n string = \"DEQUEUED %s %s <----------\" % (eType, data)\n\n debug.println(debug.LEVEL_ALL, string)\n\n def _enqueue(self, e):\n \"\"\"Handles the enqueueing of all events destined for scripts.\n\n Arguments:\n - e: an at-spi event.\n \"\"\"\n\n if settings.debugEventQueue:\n if self._enqueueCount:\n msg = \"_enqueue entered before exiting (count = %d)\" \\\n % self._enqueueCount\n debug.println(debug.LEVEL_ALL, msg)\n self._enqueueCount += 1\n\n inputEvents = (input_event.KeyboardEvent, input_event.BrailleEvent)\n isObjectEvent = not isinstance(e, inputEvents)\n if isObjectEvent and self._ignore(e):\n if settings.debugEventQueue:\n self._enqueueCount -= 1\n return\n\n self._queuePrintln(e)\n\n asyncMode = settings.asyncMode\n if isObjectEvent:\n app = e.source.getApplication()\n try:\n toolkitName = app.toolkitName\n except:\n toolkitName = None\n if toolkitName in settings.synchronousToolkits:\n asyncMode = False\n script = _scriptManager.getScript(app, e.source)\n script.eventCache[e.type] = (e, time.time())\n\n self._addToQueue(e, asyncMode)\n if not asyncMode:\n self._dequeue()\n\n if settings.debugEventQueue:\n self._enqueueCount -= 1\n\n def _dequeue(self):\n \"\"\"Handles all events destined for scripts. Called by the GTK\n idle thread.\"\"\"\n\n rerun = True\n\n if settings.debugEventQueue:\n debug.println(debug.LEVEL_ALL,\n \"event_manager._dequeue %d\" % self._dequeueCount)\n self._dequeueCount += 1\n\n try:\n event = self._eventQueue.get_nowait()\n self._queuePrintln(event, isEnqueue=False)\n inputEvents = (input_event.KeyboardEvent, input_event.BrailleEvent)\n if isinstance(event, inputEvents):\n self._processInputEvent(event)\n else:\n orca_state.currentObjectEvent = event\n debugging = not debug.eventDebugFilter \\\n or debug.eventDebugFilter.match(event.type)\n if debugging:\n startTime = time.time()\n debug.println(debug.eventDebugLevel,\n \"\\nvvvvv PROCESS OBJECT EVENT %s vvvvv\" \\\n % event.type)\n self._processObjectEvent(event)\n if debugging:\n debug.println(debug.eventDebugLevel,\n \"TOTAL PROCESSING TIME: %.4f\" \\\n % (time.time() - startTime))\n debug.println(debug.eventDebugLevel,\n \"^^^^^ PROCESS OBJECT EVENT %s ^^^^^\\n\" \\\n % event.type)\n orca_state.currentObjectEvent = None\n\n # [[[TODO: HACK - it would seem logical to only do this if we\n # discover the queue is empty, but this inroduces a hang for\n # some reason if done inside an acquire/release block for a\n # lock. So...we do it here.]]]\n #\n noFocus = \\\n not orca_state.activeScript \\\n or (not orca_state.locusOfFocus \\\n and self.noFocusTimestamp != orca_state.noFocusTimestamp)\n\n self._gidleLock.acquire()\n if self._eventQueue.empty():\n if noFocus:\n if settings.gilSleepTime:\n time.sleep(settings.gilSleepTime)\n # Translators: this is intended to be a short phrase to\n # speak and braille to tell the user that no component\n # has keyboard focus.\n #\n message = _(\"No focus\")\n if settings.brailleVerbosityLevel == \\\n settings.VERBOSITY_LEVEL_VERBOSE:\n braille.displayMessage(message)\n if settings.speechVerbosityLevel == \\\n settings.VERBOSITY_LEVEL_VERBOSE:\n speech.speak(message)\n self.noFocusTimestamp = orca_state.noFocusTimestamp\n self._gidleId = 0\n rerun = False # destroy and don't call again\n self._gidleLock.release()\n except Queue.Empty:\n debug.println(debug.LEVEL_SEVERE,\n \"event_manager._dequeue: the event queue is empty!\")\n self._gidleId = 0\n rerun = False # destroy and don't call again\n except:\n debug.printException(debug.LEVEL_SEVERE)\n\n if settings.debugEventQueue:\n self._dequeueCount -= 1\n debug.println(debug.LEVEL_ALL,\n \"Leaving _dequeue. Count: %d\" % self._dequeueCount)\n\n return rerun\n\n def _registerListener(self, eventType):\n \"\"\"Tells this module to listen for the given event type.\n\n Arguments:\n - eventType: the event type.\n \"\"\"\n\n if eventType in self._listenerCounts:\n self._listenerCounts[eventType] += 1\n else:\n self.registry.registerEventListener(self._enqueue, eventType)\n self._listenerCounts[eventType] = 1\n\n def _deregisterListener(self, eventType):\n \"\"\"Tells this module to stop listening for the given event type.\n\n Arguments:\n - eventType: the event type.\n \"\"\"\n\n if not eventType in self._listenerCounts:\n return\n\n self._listenerCounts[eventType] -= 1\n if self._listenerCounts[eventType] == 0:\n self.registry.deregisterEventListener(self._enqueue, eventType)\n del self._listenerCounts[eventType]\n\n def registerListeners(self, script):\n \"\"\"Tells the FocusTrackingPresenter to listen for all\n the event types of interest to the script.\n\n Arguments:\n - script: the script.\n \"\"\"\n\n for eventType in script.listeners.keys():\n self._registerListener(eventType)\n\n def deregisterListeners(self, script):\n \"\"\"Tells the FocusTrackingPresenter to stop listening for all the\n event types of interest to the script.\n\n Arguments:\n - script: the script.\n \"\"\"\n\n for eventType in script.listeners.keys():\n self._deregisterListener(eventType)\n\n def _processInputEvent(self, event):\n \"\"\"Processes the given input event based on the keybinding from the\n currently-active script.\n\n Arguments:\n - event: an instance of BrailleEvent or a KeyboardEvent\n \"\"\"\n\n if not orca_state.activeScript:\n return\n\n if isinstance(event, input_event.KeyboardEvent):\n function = orca_state.activeScript.processKeyboardEvent\n data = \"'%s' (%d)\" % (event.event_string, event.hw_code)\n elif isinstance(event, input_event.BrailleEvent):\n function = orca_state.activeScript.processBrailleEvent\n data = \"'%s'\" % repr(event.event)\n else:\n return\n\n eType = str(event.type).upper()\n debug.println(debug.eventDebugLevel,\n \"\\nvvvvv PROCESS %s %s vvvvv\" % (eType, data))\n try:\n function(event)\n except:\n debug.printException(debug.LEVEL_WARNING)\n debug.printStack(debug.LEVEL_WARNING)\n debug.println(debug.eventDebugLevel,\n \"^^^^^ PROCESS %s %s ^^^^^\\n\" % (eType, data))\n\n @staticmethod\n def _getScriptForEvent(event):\n \"\"\"Returns the script associated with event.\"\"\"\n\n if event.type.startswith(\"mouse:\"):\n return orca_state.activeScript\n\n script = None\n try:\n app = event.host_application or event.source.getApplication()\n except:\n debug.printException(debug.LEVEL_WARNING)\n else:\n script = _scriptManager.getScript(app, event.source)\n\n return script\n\n def _isActivatableEvent(self, event, script=None):\n \"\"\"Determines if the event is one which should cause us to\n change which script is currently active.\n\n Returns a (boolean, string) tuple indicating whether or not\n this is an activatable event, and our reason (for the purpose\n of debugging).\n \"\"\"\n\n if not script:\n script = self._getScriptForEvent(event)\n\n if not script:\n return False, \"There is no script for this event.\"\n\n if script == orca_state.activeScript:\n return False, \"The script for this event is already active.\"\n\n if not script.isActivatableEvent(event):\n return False, \"The script says not to activate for this event.\"\n\n eType = event.type\n if eType.startswith('window:activate'):\n return True, \"window:activate event\"\n\n if eType.startswith('focus') \\\n or (eType.startswith('object:state-changed:focused')\n and event.detail1):\n return True, \"Event source claimed focus.\"\n\n # This condition appears with gnome-screensave-dialog.\n # See bug 530368.\n if eType.startswith('object:state-changed:showing') \\\n and event.source.getRole() == pyatspi.ROLE_PANEL \\\n and event.source.getState().contains(pyatspi.STATE_MODAL):\n return True, \"Modal panel is showing.\"\n\n return False, \"No reason found to activate a different script.\"\n\n def _processObjectEvent(self, event):\n \"\"\"Handles all object events destined for scripts.\n\n Arguments:\n - e: an at-spi event.\n \"\"\"\n\n debug.printObjectEvent(debug.LEVEL_FINEST, event)\n eType = event.type\n\n if eType.startswith(\"object:children-changed:remove\"):\n try:\n if event.source == self.registry.getDesktop(0):\n _scriptManager.reclaimScripts()\n if settings.debugMemoryUsage:\n orca.cleanupGarbage()\n return\n except (LookupError, RuntimeError):\n # If we got this error here, we'll get it again when we\n # attempt to get the state, catch it, and clean up.\n pass\n except:\n debug.printException(debug.LEVEL_WARNING)\n return\n\n # Clean up any flat review context so that Orca does not get\n # confused (see bgo#609633)\n #\n if eType.startswith(\"window:deactivate\") \\\n and orca_state.activeScript \\\n and orca_state.activeScript.flatReviewContext \\\n and orca_state.activeScript.app == event.host_application:\n orca_state.activeScript.drawOutline(-1, 0, 0, 0)\n orca_state.activeScript.flatReviewContext = None\n\n try:\n state = event.source.getState()\n except (LookupError, RuntimeError):\n debug.println(debug.LEVEL_WARNING,\n \"Error while processing event: %s\" % eType)\n if eType.startswith(\"window:deactivate\"):\n orca.setLocusOfFocus(event, None)\n orca_state.activeWindow = None\n return\n except:\n debug.printException(debug.LEVEL_WARNING)\n return\n\n if state and state.contains(pyatspi.STATE_DEFUNCT):\n debug.println(debug.LEVEL_FINEST, \"IGNORING DEFUNCT OBJECT\")\n if eType.startswith(\"window:deactivate\"):\n orca.setLocusOfFocus(event, None)\n orca_state.activeWindow = None\n return\n\n if state and state.contains(pyatspi.STATE_ICONIFIED):\n debug.println(debug.LEVEL_FINEST, \"IGNORING ICONIFIED OBJECT\")\n return\n\n if not debug.eventDebugFilter or debug.eventDebugFilter.match(eType) \\\n and not eType.startswith(\"mouse:\"):\n debug.printDetails(debug.LEVEL_FINEST, \" \", event.source)\n\n script = self._getScriptForEvent(event)\n setNewActiveScript, reason = self._isActivatableEvent(event, script)\n if setNewActiveScript:\n app = event.host_application or event.source.getApplication()\n _scriptManager.setActiveScript(script, reason)\n\n script.processObjectEvent(event)\n\n def processKeyboardEvent(self, keyboardEvent):\n \"\"\"Processes the given keyboard event based on the keybinding from the\n currently active script. This method is called synchronously from the\n at-spi registry and should be performant. In addition, it must return\n True if it has consumed the event (and False if not).\n\n Arguments:\n - keyboardEvent: an instance of input_event.KeyboardEvent\n\n Returns True if the event should be consumed.\n \"\"\"\n\n consume = False\n if orca_state.activeScript \\\n and orca_state.activeScript.consumesKeyboardEvent(keyboardEvent):\n consume = not orca_state.bypassNextCommand\n if consume:\n self._enqueue(keyboardEvent)\n\n return consume\n\n def processBrailleEvent(self, brailleEvent):\n \"\"\"Called whenever a cursor key is pressed on the Braille display.\n\n Arguments:\n - brailleEvent: an instance of input_event.BrailleEvent\n\n Returns True if the command was consumed; otherwise False\n \"\"\"\n\n if orca_state.activeScript \\\n and orca_state.activeScript.consumesBrailleEvent(brailleEvent):\n self._enqueue(brailleEvent)\n return True\n else:\n return False\n","repo_name":"Alberto-Beralix/Beralix","sub_path":"i386-squashfs-root/usr/share/pyshared/orca/event_manager.py","file_name":"event_manager.py","file_ext":"py","file_size_in_byte":18564,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"44180987196","text":"from final_project_python import Margherita\r\nfrom final_project_python import Pepperoni\r\nfrom final_project_python import Hawaiian\r\nfrom final_project_python import delivery\r\nfrom final_project_python import pickup\r\nfrom final_project_python import bake\r\nimport unittest\r\n\r\n\r\nclass TestCountLetters(unittest.TestCase):\r\n def test_margh_dict_l(self):\r\n actual = Margherita().dict()\r\n expected = {\"tomato sauce\": 1,\r\n \"mozzarella\": 2,\r\n \"tomatoes\": 5}\r\n self.assertEqual(actual, expected)\r\n\r\n def test_pepp_dict_l(self):\r\n actual = Pepperoni().dict()\r\n expected = {\"tomato sauce\": 1,\r\n \"mozzarella\": 2,\r\n \"pepperoni\": 8}\r\n self.assertEqual(actual, expected)\r\n\r\n def test_hawaii_dict_l(self):\r\n actual = Hawaiian().dict()\r\n expected = {\"tomato sauce\": 1,\r\n \"mozzarella\": 2,\r\n \"chicken\": 1,\r\n \"pineapples\": 1}\r\n self.assertEqual(actual, expected)\r\n\r\n def test_margh_dict_xl(self):\r\n actual = Margherita(size=\"XL\").dict()\r\n expected = {\"tomato sauce\": 2,\r\n \"mozzarella\": 3,\r\n \"tomatoes\": 10}\r\n self.assertEqual(actual, expected)\r\n\r\n def test_pepp_dict_xl(self):\r\n actual = Pepperoni(size=\"XL\").dict()\r\n expected = {\"tomato sauce\": 2,\r\n \"mozzarella\": 3,\r\n \"pepperoni\": 12}\r\n self.assertEqual(actual, expected)\r\n\r\n def test_hawaii_dict_xl(self):\r\n actual = Hawaiian(size=\"XL\").dict()\r\n expected = {\"tomato sauce\": 2,\r\n \"mozzarella\": 3,\r\n \"chicken\": 2,\r\n \"pineapples\": 2}\r\n self.assertEqual(actual, expected)\r\n\r\n def test_eq_margh_l(self):\r\n actual_1 = Margherita()\r\n actual_2 = Margherita(size=\"L\")\r\n self.assertFalse(actual_1 != actual_2)\r\n\r\n def test_eq_pepp_l(self):\r\n actual_1 = Pepperoni()\r\n actual_2 = Pepperoni(size=\"L\")\r\n self.assertFalse(actual_1 != actual_2)\r\n\r\n def test_not_eq_margh_l_pepp_l(self):\r\n actual_1 = Margherita()\r\n actual_2 = Pepperoni(size=\"L\")\r\n self.assertFalse(actual_1 == actual_2)\r\n\r\n def test_not_eq_margh_l_xl(self):\r\n actual_1 = Margherita()\r\n actual_2 = Margherita(size=\"XL\")\r\n self.assertFalse(actual_1 == actual_2)\r\n\r\n def test_not_eq_pepp_l_xl(self):\r\n actual_1 = Pepperoni()\r\n actual_2 = Pepperoni(size=\"XL\")\r\n self.assertFalse(actual_1 == actual_2)\r\n\r\n def test_not_eq_hawaii_l_xl(self):\r\n actual_1 = Hawaiian()\r\n actual_2 = Hawaiian(size=\"XL\")\r\n self.assertFalse(actual_1 == actual_2)\r\n\r\n def test_not_eq_margh_l_hawaii_l(self):\r\n actual_1 = Margherita()\r\n actual_2 = Hawaiian(size=\"L\")\r\n self.assertFalse(actual_1 == actual_2)\r\n\r\n def test_not_eq_hawaii_l_pepp_l(self):\r\n actual_1 = Hawaiian()\r\n actual_2 = Pepperoni(size=\"L\")\r\n self.assertFalse(actual_1 == actual_2)\r\n\r\n def test_not_eq_hawaii_xl_pepp_xl(self):\r\n actual_1 = Hawaiian(size=\"Xl\")\r\n actual_2 = Pepperoni(size=\"XL\")\r\n self.assertFalse(actual_1 == actual_2)\r\n\r\n def test_delivery_margh_l(self):\r\n actual = delivery(Margherita())\r\n expected = Margherita(size=\"L\")\r\n self.assertEqual(actual, expected)\r\n\r\n def test_pickup_pepp_l(self):\r\n actual = pickup(Pepperoni())\r\n expected = Pepperoni(size=\"L\")\r\n self.assertEqual(actual, expected)\r\n\r\n def test_bake_hawaii_l(self):\r\n actual = bake(Hawaiian())\r\n expected = Hawaiian(size=\"L\")\r\n self.assertEqual(actual, expected)\r\n","repo_name":"DmitryKuptsov/AAA_f","sub_path":"test_final_project_python.py","file_name":"test_final_project_python.py","file_ext":"py","file_size_in_byte":3762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11491512622","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 11 11:19:40 2023\n\n@author: ignacio\n\nMEAN REVERSION STRATEGY\n\nhttps://raposa.trade/blog/how-to-build-your-first-mean-reversion-trading-strategy-in-python/\n\nThis Python script defines a starting date and a DataFrame called \"resultado\" \nwith four columns. The script also defines two functions.\n\n\"SMAMeanReversion\" takes five arguments: \nticker (a string that specifies the stock ticker), sma (an integer that specifies\nthe moving average period), threshold (a float that specifies the buy/sell \nthreshold), shorts (a Boolean that specifies whether or not to allow short \npositions), and start_date (a string that specifies the start date for the data).\n\nThis function retrieves historical price data from yfinance, calculates the SMA,\nand then computes an \"extension\" value by subtracting the SMA from the closing \nprice and dividing the result by the SMA. Based on the \"extension\" value and \nthe buy/sell threshold, the function calculates a position for each data point. \nThe function then calculates the returns and some statistics related to the \ntrategy, such as cumulative returns and peak returns. Finally, it returns a \nfiltered DataFrame without any missing values.\n\nThe second function is called \"SMAMeanReversionSafety\" and has the same \narguments as the first function, plus an additional argument called \nsafety_threshold (a float that specifies a safety threshold to prevent entering \ninto a position that is too risky). This function works similarly to the first \nfunction but calculates the position based on both the buy/sell threshold and \nthe safety threshold.\n\nThe script then defines a list called \"Cartera\" that contains a single string \nwith a stock ticker. It also defines some variables, such as the SMA period, \nthe buy/sell threshold, and a Boolean for allowing short positions. Finally, it \nloops through the \"Cartera\" list, calls the \"SMAMeanReversion\" function for \neach stock ticker, and saves the resulting DataFrame in the \"resultado\" \nDataFrame. The script can also plot the price data, the SMA, and the long \npositions if the \"plots\" variable is set to True.\n\n\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport yfinance as yf\n\n# end_date='2020-12-31'\nStart_Date = '2018-01-01' #'2000-01-01'\n\ndef SMAMeanReversion(ticker, sma, threshold, shorts=False,\n start_date=Start_Date):\n yfObj = yf.Ticker(ticker)\n data = yfObj.history(start=start_date)#, end=end_date)\n data['SMA'] = data['Close'].rolling(sma).mean()\n data['extension'] = (data['Close'] - data['SMA']) / data['SMA']\n \n data['position'] = np.nan\n data['position'] = np.where(data['extension']<-threshold,\n 1, data['position'])\n if shorts:\n data['position'] = np.where(\n data['extension']>threshold, -1, data['position'])\n \n data['position'] = np.where(np.abs(data['extension'])<0.01,\n 0, data['position'])\n data['position'] = data['position'].ffill().fillna(0)\n \n # Calculate returns and statistics\n data['returns'] = data['Close'] / data['Close'].shift(1)\n data['log_returns'] = np.log(data['returns'])\n data['strat_returns'] = data['position'].shift(1) * \\\n data['returns']\n data['strat_log_returns'] = data['position'].shift(1) * \\\n data['log_returns']\n data['cum_returns'] = np.exp(data['log_returns'].cumsum())\n data['strat_cum_returns'] = np.exp(data['strat_log_returns'].cumsum())\n data['peak'] = data['cum_returns'].cummax()\n data['strat_peak'] = data['strat_cum_returns'].cummax()\n \n return data.dropna()\n\n\n\n\ndef SMAMeanReversionSafety(ticker, sma, threshold, \n safety_threshold=0.25, shorts=False, \n start_date=Start_Date):\n yfObj = yf.Ticker(ticker)\n data = yfObj.history(start=start_date)#, end=end_date)\n data['SMA'] = data['Close'].rolling(sma).mean()\n data['extension'] = (data['Close'] - data['SMA']) / data['SMA']\n \n data['position'] = np.nan\n data['position'] = np.where(\n (data['extension']<-threshold) & \n (data['extension']>-safety_threshold), \n 1, data['position'])\n \n if shorts:\n data['position'] = np.where(\n (data['extension']>threshold) & \n (data['extension'] (DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=127.0.0.1)(PORT=1521))(CONNECT_DATA=(SID=EES)))\n tns_dsn = '(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=127.0.0.1)(PORT=1521))(CONNECT_DATA=(SID=EES)))'\n ora_id = 'EESAPP'\n ora_pwd = 'EESAPP'\n ora_connVal = [ora_id, '/', ora_pwd, '@', hostname, ':', str(port), '/', service_name]\n\n try:\n # 커넥션 연결\n # conn = ora.connect(\"EESAPP/EESAPP@127.0.0.1:1521/EES\") # OK\n # conn = ora.connect(''.join(ora_connVal)) # OK\n # conn = ora.connect(ora_id, ora_pwd, service_name) # NG | TNS_ADMIN 환경변수로 읽기 때문에 Alias로 접속해야함\n # conn = ora.connect(ora_id, ora_pwd, tnsnamesora_alias) # OK\n conn = ora.connect(ora_id, ora_pwd, tns_dsn) # OK\n\n except ora.DatabaseError as ex:\n print('Error 발생 : ', ex)\n print(getTracebackStr())\n except Exception as e:\n print('Error 발생 : ', e)\n print(getTracebackStr())\n\n return conn\n\n\n# close connection\ndef close(conn):\n if conn:\n conn.close()\n\n\n# select (binding)\ndef select(conn, sql, bindVariable={}, type='pandas'):\n\n # 리턴 객체 초기화\n rs = []\n\n # 커서 연결\n cursor = conn.cursor()\n\n if type == 'pandas':\n ### 쿼리문 실행 (pandas에서 제공하는 read_sql 함수를 사용하여 쿼리하면 보기 쉽다)\n if len(bindVariable) == 0:\n rs = pd.read_sql(sql, conn)\n else:\n rs = pd.read_sql(sql, conn, params=bindVariable)\n else:\n # 쿼리문 실행 (cx_Oracle API)\n if len(bindVariable) == 0:\n cursor.execute(sql)\n else:\n cursor.execute(sql, bindVariable)\n\n for record in cursor.fetchall():\n rs.append(record)\n\n # print('*** SELECT 결과 ***')\n # print(rs)\n # print('₩n')\n\n if cursor:\n cursor.close()\n\n return rs\n\n\n\n\n\n\n\n#--------------------------------------------------------------------------------\n\n# oracle connection Test\ndef oracleTest():\n conn = []\n cursor = []\n\n # 오라클 서버 접속 (oci.dll 방식 연결 (32/64 bit client 체크 필수))\n port = 1521\n hostname = '127.0.0.1' # DB Server-IP\n service_name = 'EES'\n tnsnamesora_alias = 'EES'\n # tns_dsn = ora.makedsn(hostname, port, service_name) # NG | SID 연결 -> (DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=127.0.0.1)(PORT=1521))(CONNECT_DATA=(SID=EES)))\n tns_dsn = '(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=127.0.0.1)(PORT=1521))(CONNECT_DATA=(SID=EES)))'\n ora_id = 'EESAPP'\n ora_pwd = 'EESAPP'\n ora_connVal = [ora_id, '/', ora_pwd, '@', hostname, ':', str(port), '/', service_name]\n\n try:\n # 커넥션 연결\n # conn = ora.connect(\"EESAPP/EESAPP@127.0.0.1:1521/EES\") # OK\n # conn = ora.connect(''.join(ora_connVal)) # OK\n # conn = ora.connect(ora_id, ora_pwd, service_name) # NG | TNS_ADMIN 환경변수로 읽기 때문에 Alias로 접속해야함\n # conn = ora.connect(ora_id, ora_pwd, tnsnamesora_alias) # OK\n conn = ora.connect(ora_id, ora_pwd, tns_dsn) # OK\n\n # 커서 연결\n cursor = conn.cursor()\n\n # SQL 쿼리문 (줄바꿈 : 역슬러쉬)\n sql = \"select \\\n * \\\n from tab \\\n where table_name = :1 \\\n and rownum < :2 \\\n order by 1\"\n bindVar_dict = {\n '1': 'emp_table',\n '2': 5,\n }\n bindVar_tuple = ('emp_table', 5,)\n\n ### 방법1. 쿼리문 실행\n # print()\n # cursor.execute(sql, bindVar_tuple)\n # for record in cursor.fetchall():\n # print(record)\n # print()\n\n ### 방법2. 쿼리문 실행 (pandas에서 제공하는 read_sql 함수를 사용하여 쿼리하면 보기 쉽다)\n rs = pd.read_sql(sql, conn, params=bindVar_dict)\n print()\n print(rs)\n print()\n\n except ora.DatabaseError as ex:\n print('Error 발생 : ', ex)\n print(getTracebackStr())\n if conn:\n conn.rollback()\n except Exception as e:\n print('Error 발생 : ', e)\n print(getTracebackStr())\n\n finally:\n if not cursor:\n pass\n else:\n cursor.close()\n\n if not conn:\n pass\n else:\n conn.close()\n\n\n# Exception Stack Trace 정보 이쁘게 정렬\ndef getTracebackStr():\n lines = traceback.format_exc().strip().split('\\n')\n rl = [lines[-1]]\n lines = lines[1:-1]\n lines.reverse()\n nstr = ''\n for i in range(len(lines)):\n line = lines[i].strip()\n if line.startswith('File \"'):\n eles = lines[i].strip().split('\"')\n basename = os.path.basename(eles[1])\n lastdir = os.path.basename(os.path.dirname(eles[1]))\n eles[1] = '%s/%s' % (lastdir, basename)\n rl.append('^\\t%s %s' % (nstr, '\"'.join(eles)))\n nstr = ''\n else:\n nstr += line\n return '\\n'.join(rl)\n\n\n# main\ndef main():\n print('----- stx -----')\n oracleTest()\n print('----- end -----')\n\n\n# python C:\\Users\\Steven\\GitRepository\\stevenlabs-common-develop\\01_DjangoExampleWebApp\\ExamBlogWebSite\\mysiteProject\\DAS.py\nif __name__ == \"__main__\":\n main()\n","repo_name":"Steven-SWS/stevenlabs","sub_path":"01_DjangoExampleWebApp/ExamBlogWebSite/mysiteProject/DAS.py","file_name":"DAS.py","file_ext":"py","file_size_in_byte":5853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12991698808","text":"# -*- coding: utf-8 -*-\n\n'''\nCreated on Dec 5, 2016\n\n@author: Bin Liang\n'''\nimport pandas as pd\nfrom pandas_tools import inspect_dataset, visualize_league_attributes,\\\n visualize_league_attribute_stats, process_missing_data\n\ndataset_path = './dataset/starcraft.csv'\n\n\ndef run_main():\n \"\"\"\n 主函数\n \"\"\"\n \n ## Step.0 加载数据\n df_data = pd.read_csv(dataset_path)\n \n ## Step.1 查看数据\n inspect_dataset(df_data)\n \n ## Step.2 处理缺失数据\n df_data = process_missing_data(df_data)\n \n ## Step.3.1 可视化战队属性,这里选4个属性作为例子展示\n column_names = ['LeagueIndex', # 战队索引号\n 'HoursPerWeek', # 每周游戏时间\n 'Age', # 战队中玩家的年龄\n 'APM', # 手速\n 'WorkersMade' # 单位时间的建造数\n ]\n visualize_league_attributes(df_data[column_names])\n \n ## Step3.2 可视化战队属性统计值\n visualize_league_attribute_stats(df_data[column_names], \n 'APM',\n savedata_path='./league_apm_stats.csv',\n savefig_path='./league_apm_stats.png',)\n \n visualize_league_attribute_stats(df_data[column_names], \n 'HoursPerWeek',\n savedata_path='./league_hrs_stats.csv',\n savefig_path='./league_hrs_stats.png',)\n \n\nif __name__ == '__main__':\n run_main()\n","repo_name":"StupidRabbbit/PycharmProjects","sub_path":"little_elephant/main——05.py","file_name":"main——05.py","file_ext":"py","file_size_in_byte":1613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4589233550","text":"import pandas as pd\nimport japanize_matplotlib\nimport matplotlib.pyplot as plt\n\nlabels = ['サンプル1', 'サンプル2', 'サンプル3']\nx = [10, 3, 1]\nfig, ax = plt.subplots()\nax.pie(x, labels=labels)\nplt.show()\n\"\"\"\n実行結果\n\n\"\"\"\n\nprint(end='\\n')\n\ndf = pd.DataFrame([['テレビ、ラジオや新聞等の広告', 5],\n ['インターネット検索結果', 15],\n ['インターネット上の広告', 25],\n ['SNS広告', 50],\n ['知人からの紹介', 1],\n ['その他', 4]], columns=[\"媒体\", \"集計結果(%)\"])\ndf\nprint(df)\n\"\"\"\n実行結果\n 媒体 集計結果(%) \n0 テレビ、ラジオや新聞等の広告 5\n1 インターネット検索結果 15 \n2 インターネット上の広告 25 \n3 SNS広告 50\n4 知人からの紹介 1 \n5 その他 4\n\"\"\"\n\nprint(end='\\n')\n\nlabels = df.loc[:, \"媒体\"]\nrate = df.loc[:, \"集計結果(%)\"]\n\nfig, ax = plt.subplots()\n# ax.pie(rate, labels=labels)\n# ax.pie(rate, labels=labels, startangle=90)\n# ax.pie(rate, labels=labels, startangle=90, counterclock=False)\n# ax.pie(rate, labels=labels, startangle=90, counterclock=False, shadow=True)\n\"\"\"\nax.pie(rate, labels=labels, startangle=90,\n counterclock=False, autopct='%1.0f%%')\n\"\"\"\nexplode=[0.2, 0, 0, 0, 0, 0]\nax.pie(rate, labels=labels, startangle=90, counterclock=False, explode=explode)\nax.axis('equal')\nplt.show()\n# 実行結果\n","repo_name":"devtitozzzzg/data_analysis","sub_path":"DIVE_INTO_EXAM/MATPLOTLIB/diver_into_exam_matplotlib_8.py","file_name":"diver_into_exam_matplotlib_8.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71455252105","text":"from django.shortcuts import render, redirect\nfrom .models import Article\n\n\n# Create your views here.\ndef index(request):\n # 예전에 적은 글이 제일 위로 오게 표시\n # articles = Article.objects.all()\n\n # 최근에 적은 글이 제일 위로 오게 표시 (pk에 -를 붙여 불러오기)\n articles = Article.objects.order_by('-pk')\n context = {\n 'articles': articles,\n } \n return render(request, 'articles/index.html', context)\n\n\ndef detail(request, pk):\n article = Article.objects.get(pk=pk)\n context = {\n 'article': article,\n }\n return render(request, 'articles/detail.html', context)\n\n\ndef new(request):\n return render(request, 'articles/new.html')\n\n\ndef create(request):\n # new에서 입력 넘겨받는 인자인 name을 각각 title, content로 설정함\n title = request.POST.get('title')\n content = request.POST.get('content')\n\n article = Article(title=title, content=content)\n article.save()\n\n # redirect는 variable routing처럼 url 접근함!!!\n return redirect('articles:detail', article.pk)\n\n\ndef delete(request, pk):\n article = Article.objects.get(pk=pk)\n article.delete()\n return redirect('articles:index')\n\n\ndef edit(request, pk):\n article = Article.objects.get(pk=pk)\n context = {\n 'article': article,\n }\n\n return render(request, 'articles/edit.html', context)\n\n\ndef update(request, pk):\n article = Article.objects.get(pk=pk)\n article.title = request.POST.get('title')\n article.content = request.POST.get('content')\n article.save()\n\n return redirect('articles:detail', article.pk)","repo_name":"sunoftwilight/LecturePractice","sub_path":"오프라인 강의 실습/Django/06-django-orm-with-view/articles/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10862571799","text":"list = [{1:2},{3:4},{1:2},{8:9},{1:2}]\n\nprint('original list : ' , list)\n\nnewList = []\nfor i in range(len(list)):\n if (\n list[i] not in list[i + 1:]\n ) :\n newList.append(list[i])\n\nprint(newList)\n\nlist = [\n 1,2,3,4,5\n]\n\nprint(list)\n\n\n\n","repo_name":"sorvshrma/Python","sub_path":"Python/MachineLearning/removeduplicateDictionary.py","file_name":"removeduplicateDictionary.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73731550984","text":"import numpy as np\nimport gym\nimport pybullet_envs\nimport time\n\nfrom sac_agent import Agent\nfrom utils import plot_learning_curve\n\nif __name__ == '__main__':\n\n # Game settings\n game = 'HopperBulletEnv-v0'\n n_step_max = 20_000\n start_learning = 500\n save_result_data = True\n figure_file = 'results/plot/' + game\n video_file = 'results/video/' + game\n result_data_file = 'results/data/' + game\n save_data_and_plot_interval = 50\n video_start_game = 20\n video_interval_game = 10\n\n # Hyperparameters\n max_size_buffer = 1_000_000\n gamma = 0.99\n tau = 0.005\n alpha = 2\n lr_actor, lr_critic_value = 3e-4, 3e-4\n fc1_dims, fc2_dims = 256, 256\n batch_size = 256\n\n # Ablation study \n two_critics = True\n remove_stochasticity = False\n\n env = gym.make(game)\n\n # Video record\n env = gym.wrappers.RecordEpisodeStatistics(env)\n env = gym.wrappers.RecordVideo(env, video_file,\n episode_trigger=lambda episode_id: \\\n (episode_id>=video_start_game) and (episode_id%video_interval_game==0))\n \n agent = Agent(input_dims=env.observation_space.shape, env=env,\n n_actions=env.action_space.shape[0], max_size=max_size_buffer, \n lr_actor=lr_actor, lr_critic_value=lr_critic_value, \n gamma=gamma, tau=tau, alpha=alpha,\n fc1_dims=fc1_dims, fc2_dims=fc2_dims, batch_size=batch_size,\n two_critics=two_critics, remove_stochasticity=remove_stochasticity)\n\n\n best_score = env.reward_range[0]\n result_dict = {'score_history': [], 'step_history': []}\n\n n_game, n_step = 0, 0\n while n_step < n_step_max:\n start = time.time()\n n_game += 1\n observation = env.reset()\n done = False\n score = 0\n while not done:\n train = (n_step > start_learning)\n action = agent.choose_action(observation, train=train)\n observation_, reward, done, info = env.step(action)\n n_step += 1\n score += reward\n agent.remember(observation, action, reward, observation_, done)\n if train:\n agent.learn()\n observation = observation_\n result_dict['score_history'].append(score)\n result_dict['step_history'].append(n_step)\n avg_score = np.mean(result_dict['score_history'][-100:])\n\n if avg_score > (best_score+abs(best_score)*1.25):\n best_score = avg_score\n\n end = time.time()\n print('episode ', n_game, 'score %.1f' % score, 'avg_score %.1f' % avg_score, 'n_step', n_step, 'time %.2f' % (end-start))\n\n if save_result_data and (n_game%save_data_and_plot_interval == 0):\n print('Saving result data and plot...')\n plot_learning_curve(result_dict, figure_file) # Plot score\n np.save(result_data_file, result_dict) # Save result data \n","repo_name":"thibautvalour/RL_Soft-Actor-Critic","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38780927867","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Apr 30 13:45:31 2018\r\n\r\n@author: User\r\n\"\"\"\r\nfrom firebase import firebase\r\nurl='https://happysorry-2c3e7.firebaseio.com/'\r\n\r\nfb=firebase.FirebaseApplication(url)\r\n\r\nfb.post(\"hello\",\"haha\")\r\n\r\n\r\n","repo_name":"happysorry/project","sub_path":"firebasetest.py","file_name":"firebasetest.py","file_ext":"py","file_size_in_byte":239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70953003785","text":"# -*- coding: utf-8 -*-\n\n\nfrom bx24_orm.core.bases import BxEntity\nfrom bx24_orm.core.fields import BxField, BxDateTime, BxBoolean\n\n\nclass BxLead(BxEntity):\n title = BxField('TITLE')\n name = BxField('NAME')\n second_name = BxField('SECOND_NAME')\n last_name = BxField('LAST_NAME')\n company_title = BxField('COMPANY_TITLE')\n source_id = BxField('SOURCE_ID')\n source_description = BxField('SOURCE_DESCRIPTION')\n status_id = BxField('STATUS_ID')\n status_description = BxField('STATUS_DESCRIPTION')\n post = BxField('POST')\n address = BxField('ADDRESS')\n address_2 = BxField('ADDRESS_2')\n address_city = BxField('ADDRESS_CITY')\n address_postal_code = BxField('ADDRESS_POSTAL_CODE')\n address_region = BxField('ADDRESS_REGION')\n address_province = BxField('ADDRESS_PROVINCE')\n address_country = BxField('ADDRESS_COUNTRY')\n address_country_code = BxField('ADDRESS_COUNTRY_CODE')\n currency_id = BxField('CURRENCY_ID')\n opportunity = BxField('OPPORTUNITY')\n opened = BxBoolean('OPENED')\n comments = BxField('COMMENTS')\n assigned_by_id = BxField('ASSIGNED_BY_ID')\n created_by_id = BxField('CREATED_BY_ID')\n modify_by_id = BxField('MODIFY_BY_ID')\n date_create = BxDateTime('DATE_CREATE')\n date_modify = BxDateTime('DATE_MODIFY')\n company_id = BxField('COMPANY_ID')\n contact_id = BxField('CONTACT_ID')\n date_closed = BxDateTime('DATE_CLOSED')\n phone = BxField('PHONE')\n email = BxField('EMAIL')\n web = BxField('WEB')\n im = BxField('IM')\n originator_id = BxField('ORIGINATOR_ID')\n origin_id = BxField('ORIGIN_ID')\n honorific = BxField('HONORIFIC')\n is_return_customer = BxBoolean('IS_RETURN_CUSTOMER')\n has_phone = BxBoolean('HAS_PHONE')\n has_email = BxBoolean('HAS_EMAIL')\n has_imol = BxBoolean('HAS_IMOL')\n status_semantic_id = BxField('STATUS_SEMANTIC_ID')\n utm_source = BxField('UTM_SOURCE')\n utm_medium = BxField('UTM_MEDIUM')\n utm_campaign = BxField('UTM_CAMPAIGN')\n utm_content = BxField('UTM_CONTENT')\n utm_term = BxField('UTM_TERM')\n _bx_meta = {\n 'entity': 'crm.lead',\n 'default_prefix': 'FIELDS'\n }\n\n\nclass BxDeal(BxEntity):\n title = BxField('TITLE')\n type_id = BxField('TYPE_ID')\n stage = BxField('STAGE_ID')\n probability = BxField('PROBABILITY')\n opportunity = BxField('OPPORTUNITY')\n company_id = BxField('COMPANY_ID')\n contact_id = BxField('CONTACT_ID')\n begin_date = BxDateTime('BEGIN_DATE')\n close_date = BxDateTime('CLOSE_DATE')\n opened = BxBoolean('OPENED')\n closed = BxBoolean('CLOSED')\n comments = BxField('COMMENTS')\n assigned_by_id = BxField('ASSIGNED_BY_ID')\n crated_by_id = BxField('CREATED_BY_ID')\n modify_by_id = BxField('MODIFY_BY_ID')\n date_create = BxDateTime('DATE_CREATE')\n date_modify = BxDateTime('DATE_MODIFY')\n lead_id = BxField('LEAD_ID')\n additional_info = BxField('ADDITIONAL_INFO')\n originator_id = BxField('ORIGINATOR_ID')\n origin_id = BxField('ORIGIN_ID')\n category_id = BxField('CATEGORY_ID')\n currency_id = BxField('CURRENCY_ID')\n tax_value = BxField('TAX_VALUE')\n quote_id = BxField('QUOTE_ID')\n location_id = BxField('LOCATION_ID')\n stage_semantic_id = BxField('STAGE_SEMANTIC_ID')\n is_new = BxBoolean('IS_NEW')\n is_recurring = BxBoolean('IS_RECURRING')\n is_return_customer = BxBoolean('IS_RETURN_CUSTOMER')\n is_repeated_approach = BxBoolean('IS_REPEATED_APPROACH')\n source_id = BxField('SOURCE_ID')\n source_description = BxField('SOURCE_DESCRIPTION')\n utm_source = BxField('UTM_SOURCE')\n utm_medium = BxField('UTM_MEDIUM')\n utm_campaign = BxField('UTM_CAMPAIGN')\n utm_content = BxField('UTM_CONTENT')\n utm_term = BxField('UTM_TERM')\n _bx_meta = {\n 'entity': 'crm.deal',\n 'default_prefix': 'FIELDS'\n }\n\n\nclass BxCompany(BxEntity):\n title = BxField('TITLE')\n company_type = BxField('COMPANY_TYPE')\n logo = BxField('LOGO')\n address = BxField('ADDRESS')\n address_2 = BxField('ADDRESS_2')\n address_city = BxField('ADDRESS_CITY')\n address_postal_code = BxField('ADDRESS_POSTAL_CODE')\n address_region = BxField('ADDRESS_REGION')\n address_province = BxField('ADDRESS_PROVINCE')\n address_country = BxField('ADDRESS_COUNTRY')\n address_country_code = BxField('ADDRESS_COUNTRY_CODE')\n reg_address = BxField('REG_ADDRESS')\n reg_address_2 = BxField('REG_ADDRESS_2')\n reg_address_city = BxField('REG_ADDRESS_CITY')\n reg_address_postal_code = BxField('REG_ADDRESS_POSTAL_CODE')\n reg_address_region = BxField('REG_ADDRESS_REGION')\n reg_address_province = BxField('REG_ADDRESS_PROVINCE')\n reg_address_country = BxField('REG_ADDRESS_COUNTRY')\n reg_address_country_code = BxField('REG_ADDRESS_COUNTRY_CODE')\n address_legal = BxField('ADDRESS_LEGAL')\n banking_details = BxField('BANKING_DETAILS')\n industry = BxField('INDUSTRY')\n currency_id = BxField('CURRENCY_ID')\n revenue = BxField('REVENUE')\n opened = BxBoolean('OPENED')\n comments = BxField('COMMENTS')\n assigned_by_id = BxField('ASSIGNED_BY_ID')\n created_by_id = BxField('CREATED_BY_ID')\n modify_by_id = BxField('MODIFY_BY_ID')\n date_create = BxDateTime('DATE_CREATE')\n date_modify = BxDateTime('DATE_MODIFY')\n lead_id = BxField('LEAD_ID')\n originator_id = BxField('ORIGINATOR_ID')\n origin_id = BxField('ORIGIN_ID')\n phone = BxField('PHONE')\n email = BxField('EMAIL')\n web = BxField('WEB')\n im = BxField('IM')\n has_phone = BxBoolean('HAS_PHONE')\n has_email = BxBoolean('HAS_EMAIL')\n has_imol = BxBoolean('HAS_IMOL')\n employees = BxField('EMPLOYEES')\n is_my_company = BxBoolean('IS_MY_COMPANY')\n origin_version = BxField('ORIGIN_VERSION')\n utm_source = BxField('UTM_SOURCE')\n utm_medium = BxField('UTM_MEDIUM')\n utm_campaign = BxField('UTM_CAMPAIGN')\n utm_content = BxField('UTM_CONTENT')\n utm_term = BxField('UTM_TERM')\n\n _bx_meta = {\n 'entity': 'crm.company',\n 'default_prefix': 'FIELDS'\n }\n\n\nclass BxContact(BxEntity):\n name = BxField('NAME')\n second_name = BxField('SECOND_NAME')\n last_name = BxField('LAST_NAME')\n photo = BxField('PHOTO')\n birthdate = BxField('BIRTHDATE')\n type_id = BxField('TYPE_ID')\n source_id = BxField('SOURCE_ID')\n source_description = BxField('SOURCE_DESCRIPTION')\n post = BxField('POST')\n address = BxField('ADDRESS')\n address_2 = BxField('ADDRESS_2')\n address_city = BxField('ADDRESS_CITY')\n address_postal_code = BxField('ADDRESS_POSTAL_CODE')\n address_region = BxField('ADDRESS_REGION')\n address_province = BxField('ADDRESS_PROVINCE')\n address_country = BxField('ADDRESS_COUNTRY')\n address_country_code = BxField('ADDRESS_COUNTRY_CODE')\n opened = BxBoolean('OPENED')\n comments = BxField('COMMENTS')\n export = BxBoolean('EXPORT')\n assigned_by_id = BxField('ASSIGNED_BY_ID')\n created_by_id = BxField('CREATED_BY_ID')\n modify_by_id = BxField('MODIFY_BY_ID')\n date_create = BxDateTime('DATE_CREATE')\n date_modify = BxDateTime('DATE_MODIFY')\n company_id = BxField('COMPANY_ID')\n lead_id = BxField('LEAD_ID')\n phone = BxField('PHONE')\n email = BxField('EMAIL')\n web = BxField('WEB')\n im = BxField('IM')\n originator_id = BxField('ORIGINATOR_ID')\n origin_id = BxField('ORIGIN_ID')\n honorific = BxField('HONORIFIC')\n has_phone = BxBoolean('HAS_PHONE')\n has_email = BxBoolean('HAS_EMAIL')\n has_imol = BxBoolean('HAS_IMOL')\n origin_version = BxField('ORIGIN_VERSION')\n face_id = BxField('FACE_ID')\n utm_source = BxField('UTM_SOURCE')\n utm_medium = BxField('UTM_MEDIUM')\n utm_campaign = BxField('UTM_CAMPAIGN')\n utm_content = BxField('UTM_CONTENT')\n utm_term = BxField('UTM_TERM')\n\n _bx_meta = {\n 'entity': 'crm.contact',\n 'default_prefix': 'FIELDS'\n }\n\n\nclass BxActivity(BxEntity):\n owner_id = BxField('OWNER_ID')\n owner_type_id = BxField('OWNER_TYPE_ID')\n type_id = BxField('TYPE_ID')\n provider_id = BxField('PROVIDER_ID')\n provider_type_id = BxField('PROVIDER_TYPE_ID')\n provider_group_id = BxField('PROVIDER_GROUP_ID')\n associated_entity_id = BxField('ASSOCIATED_ENTITY_ID')\n subject = BxField('SUBJECT')\n created = BxDateTime('CREATED')\n last_updated = BxDateTime('LAST_UPDATED')\n start_time = BxDateTime('START_TIME')\n end_time = BxDateTime('END_TIME')\n deadline = BxDateTime('DEADLINE')\n completed = BxBoolean('COMPLETED')\n status = BxField('STATUS')\n responsible_id = BxField('RESPONSIBLE_ID')\n priority = BxField('PRIORITY')\n notify_type = BxField('NOTIFY_TYPE')\n notify_value = BxField('NOTIFY_VALUE')\n description = BxField('DESCRIPTION')\n description_type = BxField('DESCRIPTION_TYPE')\n direction = BxField('DIRECTION')\n location = BxField('LOCATION')\n settings = BxField('SETTINGS')\n originator_id = BxField('ORIGINATOR_ID')\n origin_id = BxField('ORIGIN_ID')\n author_id = BxField('AUTHOR_ID')\n editor_id = BxField('EDITOR_ID')\n provider_params = BxField('PROVIDER_PARAMS')\n provider_data = BxField('PROVIDER_DATA')\n result_mark = BxField('RESULT_MARK')\n result_value = BxField('RESULT_VALUE')\n result_sum = BxField('RESULT_SUM')\n result_currency_id = BxField('RESULT_CURRENCY_ID')\n result_status = BxField('RESULT_STATUS')\n result_stream = BxField('RESULT_STREAM')\n result_source_id = BxField('RESULT_SOURCE_ID')\n autocomplete_rule = BxField('AUTOCOMPLETE_RULE')\n\n _bx_meta = {\n 'entity': 'crm.activity',\n 'default_prefix': 'FIELDS'\n }\n\n\nclass BxMeasure(BxEntity):\n code = BxField('CODE')\n measure_title = BxField('MEASURE_TITLE')\n symbol_rus = BxField('SYMBOL_RUS')\n symbol_intl = BxField('SYMBOL_INTL')\n symbol_letter_intl = BxField('SYMBOL_LETTER_INTL')\n is_default = BxBoolean('IS_DEFAULT')\n\n _bx_meta = {\n 'entity': 'crm.measure',\n 'default_prefix': 'FIELDS'\n }\n\n\nclass BxCatalog(BxEntity):\n originator_id = BxField('ORIGINATOR_ID')\n origin_id = BxField('ORIGIN_ID')\n name = BxField('NAME')\n xml_id = BxField('XML_ID')\n\n _bx_meta = {\n 'entity': 'crm.catalog',\n 'default_prefix': 'FIELDS'\n }\n\n\nclass BxQuote(BxEntity):\n title = BxField('ENTITY')\n status_id = BxField('STATUS_ID')\n currency_id = BxField('CURRENCY_ID')\n opportunity = BxField('OPPORTUNITY')\n tax_value = BxField('TAX_VALUE')\n company_id = BxField('COMPANY_ID')\n contact_id = BxField('CONTACT_ID')\n mycompany_id = BxField('MYCOMPANY_ID')\n begin_date = BxField('BEGINDATE')\n close_date = BxField('CLOSEDATE')\n assigned_by_id = BxField('ASSIGNED_BY_ID')\n created_by_id = BxField('CREATED_BY_ID')\n modify_by_id = BxField('MODIFY_BY_ID')\n date_create = BxField('DATE_CREATE')\n date_modify = BxField('DATE_MODIFY')\n opened = BxBoolean('OPENED')\n closed = BxBoolean('CLOSED')\n comments = BxField('COMMENTS')\n lead_id = BxField('LEAD_ID')\n deal_id = BxField('DEAL_ID')\n quote_member = BxField('QUOTE_MEMBER')\n content = BxField('CONTENT')\n terms = BxField('TERMS')\n person_type_id = BxField('PERSON_TYPE_ID')\n location_id = BxField('LOCATION_ID')\n client_title = BxField('CLIENT_TITLE')\n client_addr = BxField('CLIENT_ADDR')\n client_contact = BxField('CLIENT_CONTACT')\n client_email = BxField('CLIENT_EMAIL')\n client_phone = BxField('CLIENT_PHONE')\n client_tp_id = BxField('CLIENT_TP_ID')\n client_tpa_id = BxField('CLIENT_TPA_ID')\n utm_source = BxField('UTM_SOURCE')\n utm_medium = BxField('UTM_MEDIUM')\n utm_campaign = BxField('UTM_CAMPAIGN')\n utm_content = BxField('UTM_CONTENT')\n utm_term = BxField('UTM_TERM')\n\n _bx_meta = {\n 'entity': 'crm.quote',\n 'default_prefix': 'FIELDS'\n }\n\n\nclass BxDealCategoryStage(BxEntity):\n name = BxField('NAME')\n sort = BxField('SORT')\n status_id = BxField('STATUS_ID')\n\n _bx_meta = {\n 'entity': 'crm.dealcategory.stage',\n 'default_prefix': 'FIELDS'\n }\n\n\nclass BxProductSection(BxEntity):\n catalog_id = BxField('CATALOG_ID')\n section_id = BxField('SECTION_ID')\n name = BxField('NAME')\n xml_id = BxField('XML_ID')\n\n _bx_meta = {\n 'entity': 'crm.productsection',\n 'default_prefix': 'FIELDS'\n }\n\n\nclass BxAddress(BxEntity):\n type_id = BxField('ENITY_ID')\n entity_type_id = BxField('ENTITY_TYPE_ID')\n entity_id = BxField('ENTITY_ID')\n address_1 = BxField('ADDRESS_1')\n address_2 = BxField('ADDRESS_2')\n city = BxField('CITY')\n postal_code = BxField('POSTAL_CODE')\n region = BxField('REGION')\n province = BxField('PROVINCE')\n country = BxField('COUNTRY')\n country_code = BxField('COUNTRY_CODE')\n anchor_type_id = BxField('ANCHOR_TYPE_ID')\n anchor_id = BxField('ANCHOR_ID')\n\n _bx_meta = {\n 'entity': 'crm.address',\n 'default_prefix': 'FIELDS'\n }\n\n\nclass BxRequisite(BxEntity):\n entity_type_id = BxField('ENTITY_TYPE_ID')\n entity_id = BxField('ENTITY_ID')\n preset_id = BxField('PRESET_ID')\n date_create = BxDateTime('DATE_CREATE')\n date_modify = BxDateTime('DATE_MODIFY')\n created_by_id = BxField('CREATED_BY_ID')\n modify_by_id = BxField('MODIFY_BY_ID')\n name = BxField('NAME')\n code = BxField('CODE')\n xml_id = BxField('XML_ID')\n originator_id = BxField('ORIGINATOR_ID')\n active = BxBoolean('ACTIVE')\n sort = BxField('SORT')\n rq_name = BxField('RQ_NAME')\n rq_first_name = BxField('RQ_FIRST_NAME')\n rq_last_name = BxField('RQ_LAST_NAME')\n rq_second_name = BxField('RQ_SECOND_NAME')\n rq_company_name = BxField('RQ_COMPANY_NAME')\n rq_company_full_name = BxField('RQ_COMPANY_FULL_NAME')\n rq_company_reg_date = BxField('RQ_COMPANY_REG_DATE')\n rq_director = BxField('RQ_DIRECTOR')\n rq_accountant = BxField('RQ_ACCOUNTANT')\n rq_ceo_name = BxField('RQ_CEO_NAME')\n rq_ceo_work_pos = BxField('RQ_CEO_WORK_POS')\n rq_contact = BxField('RQ_CONTACT')\n rq_email = BxField('RQ_EMAIL')\n rq_phone = BxField('RQ_PHONE')\n rq_fax = BxField('RQ_FAX')\n rq_ident_doc = BxField('RQ_IDENT_DOC')\n rq_ident_doc_ser = BxField('RQ_IDENT_DOC_SER')\n rq_ident_doc_num = BxField('RQ_IDENT_DOC_NUM')\n rq_ident_doc_pers_num = BxField('RQ_IDENT_DOC_PERS_NUM')\n rq_ident_doc_date = BxField('RQ_IDENT_DOC_DATE')\n rq_ident_doc_issued_by = BxField('RQ_IDENT_DOC_ISSUED_BY')\n rq_ident_doc_dep_code = BxField('RQ_IDENT_DOC_DEP_CODE')\n rq_inn = BxField('RQ_INN')\n rq_kpp = BxField('RQ_KPP')\n rq_usrle = BxField('RQ_USRLE')\n rq_ifsn = BxField('RQ_IFSN')\n rq_ogrn = BxField('RQ_OGRN')\n rq_ogrnip = BxField('RQ_OGRNIP')\n rq_okpo = BxField('RQ_OKPO')\n rq_oktmo = BxField('RQ_OKTMO')\n rq_okved = BxField('RQ_OKVED')\n rq_edrpou = BxField('RQ_EDRPOU')\n rq_drfo = BxField('RQ_DRFO')\n rq_kbe = BxField('RQ_KBE')\n rq_iin = BxField('RQ_IIN')\n rq_bin = BxField('RQ_BIN')\n rq_st_cert_ser = BxField('RQ_ST_CERT_SER')\n rq_st_cert_num = BxField('RQ_ST_CERT_NUM')\n rq_st_cert_date = BxField('RQ_ST_CERT_DATE')\n rq_vat_payer = BxBoolean('RQ_VAT_PAYER')\n rq_vat_id = BxField('RQ_VAT_ID')\n rq_vat_cert_ser = BxField('RQ_VAT_CERT_SER')\n rq_vat_cert_num = BxField('RQ_VAT_CERT_NUM')\n rq_vat_cert_date = BxField('RQ_VAT_CERT_DATE')\n rq_recidence_country = BxField('RQ_RECIDENCE_COUNTRY')\n rq_base_doc = BxField('RQ_BASE_DOC')\n\n _bx_meta = {\n 'entity': 'crm.requisite',\n 'default_prefix': 'FIELDS'\n }\n\n\nclass BxRequisiteBankdetail(BxEntity):\n entity_id = BxField('ENTITY_ID')\n country_id = BxField('COUNTRY_ID')\n date_create = BxDateTime('DATE_CREATE')\n date_modify = BxDateTime('DATE_MODIFY')\n name = BxField('NAME')\n code = BxField('CODE')\n xml_id = BxField('XML_ID')\n active = BxBoolean('ACTIVE')\n sort = BxField('SORT')\n rq_bank_name = BxField('RQ_BANK_NAME')\n rq_bank_addr = BxField('RQ_BANK_ADDR')\n rq_bank_route_num = BxField('RQ_BANK_ROUTE_NUM')\n rq_bik = BxField('RQ_BIK')\n rq_mfo = BxField('RQ_MFO')\n rq_acc_name = BxField('RQ_ACC_NAME')\n rq_acc_num = BxField('RQ_ACC_NUM')\n rq_iik = BxField('RQ_IIK')\n rq_acc_currency = BxField('RQ_ACC_CURRENCY')\n rq_cor_acc_num = BxField('RQ_COR_ACC_NUM')\n rq_iban = BxField('RQ_IBAN')\n rq_swift = BxField('RQ_SWIFT')\n rq_bic = BxField('RQ_BIC')\n comments = BxField('COMMENTS')\n originator_id = BxField('ORIGINATOR_ID')\n\n _bx_meta = {\n 'entity': 'crm.requisiste.bankdetail',\n 'default_prefix': 'FIELDS'\n }\n\n\nclass BxRequisiteLink(BxEntity):\n entity_type_id = BxField('ENITY_TYPE_ID')\n entity_id = BxField('ENTITY_ID')\n requisite_id = BxField('REQUISITE_ID')\n bank_detail_id = BxField('BANK_DETAIL_ID')\n mc_requisite_id = BxField('MC_REQUISITE_ID')\n mc_bank_detail_id = BxField('MC_BANK_DETAIL_ID')\n\n _bx_meta = {\n 'entity': 'crm.requisite.link',\n 'default_prefix': 'FIELDS'\n }\n\n\nclass BxRequisitePreset(BxEntity):\n entity_type_id = BxField('ENTITY_TYPE_ID')\n country_id = BxField('COUNTRY_ID')\n name = BxField('NAME')\n date_create = BxDateTime('DATE_CREATE')\n date_modify = BxDateTime('DATE_MODIFY')\n created_by_id = BxField('CREATED_BY_ID')\n modify_by_id = BxField('MODIFY_BY_ID')\n active = BxBoolean('ACTIVE')\n sort = BxField('SORT')\n xml_id = BxField('XML_ID')\n\n _bx_meta = {\n 'entity': 'crm.requisite.preset',\n 'default_prefix': 'FIELDS'\n }\n\n\nclass BxInvoiceStatus(BxEntity):\n entity_id = BxField('ENTITY_ID')\n status_id = BxField('STATUS_ID')\n name = BxField('NAME')\n name_init = BxField('NAME_INIT')\n sort = BxField('SORT')\n system = BxBoolean('SYSTEM')\n\n _bx_meta = {\n 'entity': 'crm.invoice.status',\n 'default_prefix': 'FIELDS'\n }\n\n\nclass BxDealCategory(BxEntity):\n created_date = BxDateTime('CREATED_DATE')\n name = BxDateTime('NAME')\n is_locked = BxBoolean('IS_LOCKED')\n sort = BxField('SORT')\n\n _bx_meta = {\n 'entity': 'crm.dealcategory'\n }\n\n\nclass BxInvoice(BxEntity):\n account_number = BxField('ACCOUNT_NUMBER')\n comments = BxField('COMMENTS')\n currency = BxField('CURRENCY')\n date_bill = BxDateTime('DATE_BILL')\n date_insert = BxDateTime('DATE_INSERT')\n date_marked = BxDateTime('DATE_MARKED')\n date_pay_before = BxDateTime('DATE_PAY_BEFORE')\n date_payed = BxDateTime('DATE_PAYED')\n date_status = BxDateTime('DATE_STATUS')\n date_update = BxDateTime('DATE_UPDATE')\n created_by = BxField('CREATED_BY')\n emp_payed_id = BxField('EMP_PAYED_ID')\n emp_status_id = BxField('EMP_STATUS_ID')\n lid = BxField('LID')\n xml_id = BxField('XML_ID')\n order_topic = BxField('ORDER_TOPIC')\n pay_system_id = BxField('PAY_SYSTEM_ID')\n pay_voucher_date = BxDateTime('PAY_VOUCHER_DATE')\n pay_voucher_num = BxField('PAY_VOUCHER_NUM')\n payed = BxBoolean('PAYED')\n person_type_id = BxField('PERSON_TYPE_ID')\n price = BxField('PRICE')\n reason_marked = BxField('REASON_MARKED')\n responsible_email = BxField('RESPONSIBLE_EMAIL')\n responsible_id = BxField('RESPONSIBLE_ID')\n responsible_last_name = BxField('RESPONSIBLE_LAST_NAME')\n responsible_login = BxField('RESPONSIBLE_LOGIN')\n responsible_name = BxField('RESPONSIBLE_NAME')\n responsible_personal_photo = BxField('RESPONSIBLE_PERSONAL_PHOTO')\n responsible_second_name = BxField('RESPONSIBLE_SECOND_NAME')\n responsible_work_position = BxField('RESPONSIBLE_WORK_POSITION')\n status_id = BxField('STATUS_ID')\n tax_value = BxField('TAX_VALUE')\n company_id = BxField('UF_COMPANY_ID')\n contact_id = BxField('UF_CONTACT_ID')\n mycompany_id = BxField('UF_MYCOMPANY_ID')\n deal_id = BxField('UF_DEAL_ID')\n quote_id = BxField('UF_QUOTE_ID')\n user_description = BxField('USER_DESCRIPTION')\n invoice_properties = BxField('INVOICE_PROPERTIES')\n product_rows = BxField('PRODUCT_ROWS')\n\n _bx_meta = {\n 'entity': 'crm.invoice',\n 'default_prefix': 'FIELDS'\n }\n\n\nclass BxPaySystem(BxEntity):\n name = BxField('NAME')\n active = BxBoolean('ACTIVE')\n sort = BxField('SORT')\n description = BxField('DESCRIPTION')\n person_type_id = BxField('PERSON_TYPE_ID')\n action_file = BxField('ACTION_FILE')\n handler = BxField('HANDLER')\n handler_code = BxField('HANDLER_CODE')\n handler_name = BxField('HANDLER_NAME')\n\n _bx_meta = {\n 'entity': 'crm.paysystem',\n 'default_prefix': 'FIELDS'\n }\n\n\nclass BxPersonType(BxEntity):\n name = BxField('NAME')\n\n _bx_meta = {\n 'entity': 'crm.persontype',\n 'default_prefix': 'FIELDS'\n }\n\n\nclass BxProduct(BxEntity):\n name = BxField('NAME')\n active = BxBoolean('ACTIVE')\n preview_picture = BxField('PREVIEW_PICTURE')\n detail_picture = BxField('DETAIL_PICTURE')\n sort = BxField('SORT')\n xml_id = BxField('XML_ID')\n catalog_id = BxField('CATALOG_ID')\n section_id = BxField('SECTION_ID')\n description = BxField('DESCRIPTION')\n description_type = BxField('DESCRIPTION_TYPE')\n price = BxField('PRICE')\n currency_id = BxField('CURRENCY_ID')\n vat_id = BxField('VAT_ID')\n vat_included = BxField('VAT_INCLUDED')\n measure = BxField('MEASURE')\n\n _bx_meta = {\n 'entity': 'crm.product',\n 'default_prefix': 'FIELDS'\n }\n\n\nclass BxProductProperty(BxEntity):\n iblock_id = BxField('IBLOCK_ID')\n name = BxField('NAME')\n active = BxBoolean('ACTIVE')\n sort = BxField('SORT')\n default_value = BxField('DEFAULT_VALUE')\n property_type = BxField('PROPERTY_TYPE')\n row_count = BxField('ROW_COUNT')\n col_count = BxField('COL_COUNT')\n multiple = BxBoolean('MULTIPLE')\n xml_id = BxField('XML_ID')\n file_type = BxField('FILE_TYPE')\n link_iblock_id = BxField('LINK_IBLOCK_ID')\n is_required = BxBoolean('IS_REQUIRED')\n user_type = BxField('USER_TYPE')\n user_type_settings = BxField('USER_TYPE_SETTINGS')\n values = BxField('VALUES')\n\n _bx_meta = {\n 'entity': 'crm.product.property',\n 'default_prefix': 'FIELDS'\n }\n\n\nclass BxProductPropertyTypes(BxEntity):\n property_type = BxField('PROPERTY_TYPE')\n user_type = BxField('USER_TYPE')\n description = BxField('DESCRIPTION')\n\n _bx_meta = {\n 'entity': 'crm.product.property.types'\n }\n","repo_name":"dmitriilazukov/bx24_orm","sub_path":"bx24_orm/enitiy/crm.py","file_name":"crm.py","file_ext":"py","file_size_in_byte":22272,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"70851051146","text":"# a dictionary of rivers and their countries. The key is the river\nrivers = {\n 'nile': 'egypt',\n 'danube': 'germany',\n 'mississippi': 'united states',\n 'amazon': 'brazil',\n 'yangtze': 'china',\n}\n\nfor river, country in rivers.items():\n print(f\"The {river.title()} runs through {country.title()}.\")\n\nprint(\"\\nAll the rivers are:\")\n\nfor river in rivers.keys():\n print(river.title())\n\nprint(\"\\nAll the countries are:\")\n\nfor country in rivers.values():\n print(country.title())\n","repo_name":"tmroush/CrashCourse","sub_path":"CrashCourse/rivers.py","file_name":"rivers.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22360952580","text":"# 숨바꼭질 3\nimport sys\nfrom collections import deque\ninput = sys.stdin.readline\n\nN, K = map(int, input().split())\nINF = 1e9\nvisited = [INF for _ in range(200001)]\n\nQ = deque()\nvisited[N] = 0\nQ.append(N)\n\nwhile Q:\n x = Q.popleft()\n if x == K:\n print(visited[K])\n break\n nt = x * 2 # 순간이동\n if 0 <= nt <= 200000 and visited[nt] > visited[x]:\n visited[nt] = visited[x]\n Q.appendleft(nt)\n nl = x - 1 # 좌\n nr = x + 1 # 우\n if 0 <= nl <= 200000 and visited[nl] > visited[x]+1:\n visited[nl] = visited[x] + 1\n Q.append(nl)\n if 0 <= nr <= 200000 and visited[nr] > visited[x]+1:\n visited[nr] = visited[x] + 1\n Q.append(nr)","repo_name":"zpqmdh/BOJ","sub_path":"graph/13549.py","file_name":"13549.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4176385885","text":"import click\nfrom click import command\n\nfrom src.commands.yomichan.functions import TtsServer, CreateHanziDict\nfrom src.lookups.PinyinLookup import PinyinLookup\nfrom src.utils import Utils\n\n\n@click.command()\n@click.option(\"--run-server\", is_flag=True, help=\"Will run a server which provides the audio source endpoint\")\n@click.option(\"--create-hanzi-dict\", is_flag=True,\n help=\"Will create the dict for hanzi lookup based on the UberHanzi deck\")\ndef yomichan(run_server: bool, create_hanzi_dict: bool) -> command:\n if run_server:\n TtsServer.runServer()\n # TtsServer.voiceDemo() # For debug to test voices\n exit()\n\n if create_hanzi_dict:\n pinyinLookup = PinyinLookup.create()\n CreateHanziDict.run(pinyinLookup)\n exit()\n\n Utils.exitWithError(\"No command given. Use --help\")\n return\n","repo_name":"jihadichan/ch","sub_path":"src/commands/yomichan/YomiChan.py","file_name":"YomiChan.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2165021865","text":"\n# This file contains all the different transforms for each networks.\n\nimport torchvision.transforms as transforms\n\n# RESNET Transforms\ndef resnet_transforms():\n transform = transforms.Compose([\n transforms.Resize(size=256),\n transforms.CenterCrop(size=224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])\n ])\n return transform\n\n# My simple transform\ndef simple_transforms():\n transform = transforms.Compose([\n transforms.Resize(size=(256,256)),\n transforms.ToTensor(),\n transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))])\n return transform\n\n# MobileNetV3 Small transforms\ndef mobilenetv3_small():\n transform = transforms.Compose([\n transforms.Resize(size=256),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])\n ])\n return transform","repo_name":"Etaion777/GSC_Classification","sub_path":"Transforms/proj_transforms.py","file_name":"proj_transforms.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39977388677","text":"import logging\nfrom sc2gym.define import SCREEN_FEATURES, MINIMAP_FEATURES, ACTIONS\n\nimport sys\nfrom absl import flags\n\nfrom pysc2.env import sc2_env\nfrom pysc2.env.environment import StepType\nfrom pysc2.lib import actions\n\nimport numpy as np\n\n__author__ = 'lizhihao6'\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n\nclass SC2GameEnv():\n\n def __init__(self, **kwargs):\n\n FLAGS = flags.FLAGS\n FLAGS(sys.argv)\n self.env = sc2_env.SC2Env(**kwargs)\n self.episode = 0\n self.num_step = 0\n self.episode_reward = 0\n self.total_reward = 0\n self.screen_size = self.env.observation_spec()[\"screen\"][1:]\n self.minimap_size = self.env.observation_spec()[\"minimap\"][1:]\n\n def step(self, action):\n\n try:\n obs = self.env.step(\n [actions.FunctionCall(action[0], action[1:])])[0]\n except KeyboardInterrupt:\n logger.info(\"Interrupted. Quitting...\")\n return None, 0, True, {}\n except Exception:\n logger.exception(\n \"An unexpected error occurred while applying action to environment.\")\n return None, 0, True, {}\n\n state, useless_state = self.obs2state(obs)\n reward = obs.reward\n done = (obs.step_type == StepType.LAST)\n self.num_step += 1\n self.episode_reward += reward\n self.total_reward += reward\n\n return state, reward, done, useless_state\n\n def reset(self):\n\n if self.episode > 0:\n logger.info(\"Episode %d ended with reward %d after %d steps.\",\n self.episode, self.episode_reward, self.num_step)\n logger.info(\"Got %d total reward so far, with an average reward of %g per episode\",\n self.total_reward, float(self.total_reward) / self.episode)\n\n self.episode += 1\n self.num_step = 0\n self.episode_reward = 0\n\n return self.obs2state(self.env.reset()[0])\n\n def close(self):\n\n if self.episode > 0:\n logger.info(\"Episode %d ended with reward %d after %d steps.\",\n self.episode, self.episode_reward, self.num_step)\n logger.info(\"Got %d total reward, with an average reward of %g per episode\",\n self.total_reward, float(self.total_reward) / self.episode)\n\n if self.env is not None:\n self.env.close()\n\n def obs2state(self, obs):\n\n state_dic = {}\n\n for (name, index) in zip(SCREEN_FEATURES._NAMES, SCREEN_FEATURES._INDEXS):\n state_dic[name] = obs.observation[\"screen\"][index]\n for (name, index) in zip(MINIMAP_FEATURES._NAMES, MINIMAP_FEATURES._INDEXS):\n state_dic[\"mini_\"+name] = obs.observation[\"minimap\"][index]\n\n multi_select = obs.observation[\"multi_select\"]\n state_dic[\"multi_select\"] = self.multi2single(\n obs.observation[\"multi_select\"])\n state_dic[\"build_queue\"] = self.multi2single(\n obs.observation[\"build_queue\"])\n\n available_actions = obs.observation[\"available_actions\"]\n state_dic[\"available_actions\"] = available_actions\n state_dic[\"available_actions_args_max\"] = self.get_args_max(\n available_actions)\n\n other_names = [\"player\", \"game_loop\", \"score_cumulative\",\n \"single_select\", \"control_groups\"]\n for name in other_names:\n state_dic[name] = obs.observation[name]\n\n useless_dict = {}\n useless_name = {\"cargo\", \"cargo_slots_available\",\n \"build_queue\", \"multi_select\"}\n for name in useless_name:\n useless_dict[name] = obs.observation[name]\n\n return state_dic, useless_dict\n\n def get_args_max(self, available_actions):\n\n args_max_list = []\n\n for action_id in available_actions:\n arg_max_list = []\n for arg_name in ACTIONS._ARGS[action_id]:\n if arg_name == \"screen\" or arg_name == \"screen2\":\n arg_max_list.append(self.screen_size)\n elif arg_name == \"minimap\":\n arg_max_list.append(self.minimap_size)\n else:\n arg_max_list.append(ACTIONS._ARGS_MAX[arg_name])\n args_max_list.append(arg_max_list)\n\n return args_max_list\n\n def multi2single(self, multi):\n\n single_list = [0 for i in range(7)]\n if multi.shape[0] > 0:\n id_list = []\n player_relative_list = []\n transport_list = []\n built_progress = 0.0\n single_num = 0.0\n for single in multi:\n id_list.append(single[0])\n player_relative_list.append(single[1])\n for i in range(2, 5):\n single_list[i] += single[i]\n transport_list.append(single[5])\n single_num += 1\n built_progress += single[6]\n id_counts = np.bincount(id_list)\n player_relative_counts = np.bincount(player_relative_list)\n transport_counts = np.bincount(transport_list)\n single_list[0] = np.argmax(id_counts)\n single_list[1] = np.argmax(player_relative_counts)\n single_list[5] = np.argmax(transport_counts)\n single_list[6] = built_progress/single_num\n return single_list\n\n def save_replay(self, replay_dir):\n\n self.env.save_replay(replay_dir)","repo_name":"lizhihao6/sc2gym","sub_path":"sc2gym/sc2gym.py","file_name":"sc2gym.py","file_ext":"py","file_size_in_byte":5458,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"4176275600","text":"#Write a function called not_list. not_list should have two\n#parameters: a list of booleans and a list of integers.\n#\n#The list of integers will represent indices for the list of\n#booleans. not_list should switch the values of all the\n#booleans located at those indices.\n\ndef not_list(boolist, indices):\n for i in indices:\n if boolist[i] == True:\n boolist[i] = False\n elif boolist[i] == False:\n boolist[i] = True\n return boolist\n\n# bool_list = [True, False, False]\n# index_list = [0, 2]\n# not_list(bool_list, index_list) -> [False, False, True]\n#\n#After calling not_list, the booleans at indices 0 and 2\n#have been switched.\n#\n#Note that it may be the case that the same index is present\n#in the second twice. If this happens, you should switch the\n#boolean at that index twice. For example:\n#\n# bool_list = [True, False, False]\n# index_list = [0, 2, 2]\n# not_list(bool_list, index_list) -> [False, False, False]\n#\n#2 is in index_list twice, so the boolean at index 2 is\n#switched twice: False to True, then True back to False.\n\nprint(not_list([True, False, False], [0, 2]))\nprint(not_list([True, False, False], [0, 2, 2]))\n","repo_name":"moehein-92/CS1301","sub_path":"not_list.py","file_name":"not_list.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25064494041","text":"# This file implements the huffman encoding\n\nimport binascii\nimport heapq\nimport os\n\n\n# function to convert from hex to binary string\ndef binary(x):\n return \"\".join(reversed([i+j for i, j in zip(*[[\"{0:04b}\".format(int(c, 16)) for c in reversed(\"0\"+x)][n::2] for n in [1, 0]])]))\n\n\nclass HeapNode:\n def __init__(self, char, freq):\n self.char = char\n self.freq = freq\n self.left = None\n self.right = None\n\n def __lt__(self, other):\n if(other == None):\n return -1\n if(not isinstance(other, HeapNode)):\n return -1\n return self.freq < other.freq\n\n\nclass HuffmanCoding:\n def __init__(self, len):\n self.heap = []\n self.codes = {}\n self.reverse_mapping = {}\n self.freq = {}\n self.saved = 0 # number of chars saved\n self.codeLen = len\n self.trie = {}\n\n # functions for compression\n\n def make_frequency_dict(self, text):\n for i in range(0, len(text), self.codeLen):\n character = text[i:i+self.codeLen]\n if character not in self.freq:\n self.freq[character] = 1\n self.freq[character] += 1\n return self.freq\n\n # this function puts every leaf node in the heap\n def make_heap(self, frequency):\n for key in frequency:\n node = HeapNode(key, frequency[key])\n heapq.heappush(self.heap, node)\n\n # this function takes 2 nodes with min frequency and merges them and inserts into heap\n def merge_nodes(self):\n while(len(self.heap) > 1):\n node1 = heapq.heappop(self.heap)\n node2 = heapq.heappop(self.heap)\n\n merged = HeapNode(None, node1.freq + node2.freq)\n merged.left = node1\n merged.right = node2\n\n heapq.heappush(self.heap, merged)\n\n # this function computes the codeword for each character stored in self.codes\n def make_codes_helper(self, root, current_code):\n if(root == None):\n return\n\n if(root.char != None):\n self.codes[root.char] = current_code\n self.reverse_mapping[current_code] = root.char\n return\n\n self.make_codes_helper(root.left, current_code + \"0\")\n self.make_codes_helper(root.right, current_code + \"1\")\n\n # actually this function is called\n def make_codes(self):\n root = heapq.heappop(self.heap)\n current_code = \"\"\n self.make_codes_helper(root, current_code)\n\n # this function encodes the text\n def get_encoded_text(self, text):\n encoded_text = \"\"\n i = 0\n while i < len(text):\n j = i+2\n seq = \"\"\n current_seq = text[i:j]\n current_dic = self.trie\n # while there is a new path for the pair\n while current_seq in current_dic and j <= len(text):\n current_dic = current_dic[current_seq] # go one level deeper\n seq += current_seq\n current_seq = text[j:j+2]\n j += 2\n while seq not in self.codes:\n seq = seq[:-2]\n j -= 2\n\n encoded_text += self.codes[seq]\n i = j-2\n return encoded_text\n\n # this one adds extra padding used for byte conversion\n def pad_encoded_text(self, encoded_text):\n extra_padding = 8 - len(encoded_text) % 8\n for i in range(extra_padding):\n encoded_text += \"0\"\n\n padded_info = \"{0:08b}\".format(extra_padding) # las byte is info about padding\n encoded_text = padded_info + encoded_text\n return encoded_text\n\n # this one converts encoded text to bytes\n def get_byte_array(self, padded_encoded_text):\n if(len(padded_encoded_text) % 8 != 0):\n print(\"Encoded text not padded properly\")\n exit(0)\n\n b = bytearray()\n for i in range(0, len(padded_encoded_text), 8):\n byte = padded_encoded_text[i:i+8]\n b.append(int(byte, 2))\n return b\n\n def make_tree(self):\n # frequency should be computed before compression\n self.make_heap(self.freq)\n self.merge_nodes()\n self.make_codes()\n\n def make_trie(self):\n _end = '_end_'\n root = dict()\n for word in self.codes.keys():\n current_dict = root\n for i in range(0, len(word), 2):\n letter = word[i:i+2]\n current_dict = current_dict.setdefault(letter, {})\n current_dict[_end] = _end\n self.trie = root\n return root\n\n def isOptimal(self): # if the code does not require more information than the sequence\n for seq in self.codes:\n if len(seq) > 2 and len(seq)*4 <= len(self.codes[seq]):\n return seq\n return True\n\n def compress(self, text):\n text = text.rstrip()\n\n encoded_text = self.get_encoded_text(text)\n padded_encoded_text = self.pad_encoded_text(encoded_text)\n b = self.get_byte_array(padded_encoded_text)\n compressed_bytecode = str(binascii.hexlify(b))[2:-1]\n\n # print(\"Compressed. Total chars saved: \", end=\"\")\n # print(len(text) - len(compressed_bytecode))\n # self.saved += len(text) - len(compressed_bytecode)\n\n return compressed_bytecode\n\n \"\"\" functions for decompression: \"\"\"\n\n def remove_padding(self, padded_encoded_text):\n padded_info = padded_encoded_text[:8]\n extra_padding = int(padded_info, 2)\n\n padded_encoded_text = padded_encoded_text[8:]\n encoded_text = padded_encoded_text[:-extra_padding]\n\n return encoded_text\n\n def decode_text(self, encoded_text):\n current_code = \"\"\n decoded_text = \"\"\n\n for bit in encoded_text:\n current_code += bit\n if(current_code in self.reverse_mapping):\n character = self.reverse_mapping[current_code]\n decoded_text += character\n current_code = \"\"\n\n return decoded_text\n\n def decompress(self, text):\n bit_string = binary(text)\n encoded_text = self.remove_padding(bit_string)\n decompressed_text = self.decode_text(encoded_text)\n return decompressed_text\n","repo_name":"urataps/bytecode_compression","sub_path":"hybridHuff.py","file_name":"hybridHuff.py","file_ext":"py","file_size_in_byte":6222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4190215097","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2022/6/16 11:07\n# @Author : 郑春兴\n# @Email : zhengchunxing@aegis-data.cn\n# @File : sensitivewordpage.py\n# @Project : PlaywrightProject\n\n\"\"\"\n敏感词页面封装\n\"\"\"\nfrom playwright.async_api import Page\n\n\nclass SensitiveWordPage:\n def __init__(self,page:Page):\n self.page = page\n # 日历图标\n self.date_icon = self.page.locator(\"//i[@class=\\\"el-input__icon el-range__icon el-icon-time\\\"]\")\n # 日历部件中的开始日期输入框\n self.start_date_input = self.page.locator(\"//div[@class=\\\"el-picker-panel__body\\\"]//input[@placeholder=\\\"开始日期\\\"]\")\n # 日历部件中的结束日期输入框\n self.end_data_input = self.page.locator(\"//div[@class=\\\"el-picker-panel__body\\\"]//input[@placeholder=\\\"结束日期\\\"]\")\n # 日历部件底部确定按钮\n self.date_panel_confirm_button = self.page.locator(\"//div[@class=\\\"el-picker-panel__footer\\\"]/button[2]/span\")\n # 机器人下拉箭头\n self.robot_drop_down_list = self.page.locator(\"//i[@class=\\\"el-select__caret el-input__icon el-icon-arrow-up\\\"]\")\n # 机器人下拉列表第二个选项\n self.robot_list_second = self.page.locator(\"//ul[@class=\\\"el-scrollbar__view el-select-dropdown__list\\\"]/li[2]\")\n # 搜索按钮\n self.search_button = self.page.locator(\"//button[@class=\\\"el-button el-button--primary el-button--medium index-module_search_3--Jz\\\"]\")\n # 敏感词内容配置按钮\n self.sensitive_config_button = self.page.locator(\"//button[@class=\\\"el-button el-button--default el-button--medium index-module_config_1Vy18\\\"]\")\n # 敏感词配置弹窗文本输入框\n self.sensitive_config_textarea = self.page.locator(\"//div[@class=\\\"ql-editor\\\"][@data-placeholder=\\\"请输入\\\"]\")\n # 敏感词配置弹窗中的保存按钮\n self.config_popups_save_button = self.page.locator(\"//div[@class=\\\"el-dialog__wrapper\\\"]//div[@class=\\\"el-dialog__footer\\\"]//button[2]\")\n # 敏感词配置弹窗关闭按钮\n self.config_popups_close_button = self.page.locator(\"//button[@class=\\\"el-dialog__headerbtn\\\"][@aria-label=\\\"Close\\\"]\")\n # 保存成功提示语\n self.operate_success_alert = self.page.locator(\"//div[@role=\\\"alert\\\"]/p\")\n\n\n\n def assertText(self,element,text_value): #验证机器人ID列的值://tr[@class=\"el-table__row\"]/td[5]/div, D4A0D010020534M1D5\n text = self.page.inner_text(element)\n assert text == text_value","repo_name":"JohnnyZCX/armyonlawUIAutomation","sub_path":"pages/sensitivewordpage.py","file_name":"sensitivewordpage.py","file_ext":"py","file_size_in_byte":2545,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"41695411054","text":"#!/usr/bin/python3\n\"\"\"\nGiven an array nums of integers, you can perform operations on the array.\n\nIn each operation, you pick any nums[i] and delete it to earn nums[i] points.\nAfter, you must delete every element equal to nums[i] - 1 or nums[i] + 1.\n\nYou start with 0 points. Return the maximum number of points you can earn by\napplying such operations.\n\nExample 1:\n\nInput: nums = [3, 4, 2]\nOutput: 6\nExplanation:\nDelete 4 to earn 4 points, consequently 3 is also deleted.\nThen, delete 2 to earn 2 points. 6 total points are earned.\n\n\nExample 2:\n\nInput: nums = [2, 2, 3, 3, 3, 4]\nOutput: 9\nExplanation:\nDelete 3 to earn 3 points, deleting both 2's and the 4.\nThen, delete 3 again to earn 3 points, and 3 again to earn 3 points.\n9 total points are earned.\n\n\nNote:\n\nThe length of nums is at most 20000.\nEach element nums[i] is an integer in the range [1, 10000].\n\"\"\"\nfrom typing import List\nfrom collections import defaultdict\n\n\nclass Solution:\n def deleteAndEarn(self, nums: List[int]) -> int:\n \"\"\"\n reduce to house rob problem\n whether to pick the number or not\n F[n] = max\n F[n-1] if not pick n\n F[n-2] + reward if pick n\n\n \"\"\"\n rewards = [0 for _ in range(10001)]\n for num in nums:\n rewards[num] += num\n\n # whether to pick the number or not\n cur, prev = 0, 0\n for reward in rewards:\n nxt = max(cur, prev + reward)\n prev = cur\n cur = nxt\n\n return cur\n\n def deleteAndEarn_dp(self, nums: List[int]) -> int:\n \"\"\"\n reduce to house rob problem\n whether to pick the number or not\n F[n] = max\n F[n-1] if not pick n\n F[n-2] + reward if pick n\n\n \"\"\"\n counter = defaultdict(int)\n for n in nums:\n counter[n] += 1\n\n F = [0 for _ in range(10000 + 3)]\n for i in range(3, 10000 + 3):\n cur = i - 2\n F[i] = max(\n F[i-1],\n F[i-2] + counter[cur] * cur\n )\n return F[-1]\n\n def deleteAndEarn_slow(self, nums: List[int]) -> int:\n \"\"\"\n geedy + dp: chose to delete max or max - 1\n O(n lg n)\n\n O(n^2)\n \"\"\"\n nums.sort()\n # transform to (num, count)\n counter = []\n i = 0\n j = 0\n while i < len(nums):\n while j < len(nums) and nums[i] == nums[j]:\n j += 1\n counter.append((nums[i], j - i))\n i = j\n\n # F[i] be the max points delete counter[i]\n F = [0 for _ in counter]\n for i in range(len(counter)):\n F[i] = counter[i][0] * counter[i][1]\n F[i] += max(\n [\n F[j]\n for j in range(i)\n if counter[j][0] != counter[i][0] - 1\n ]\n or [0]\n )\n\n return max(F or [0])\n\n\nif __name__ == \"__main__\":\n assert Solution().deleteAndEarn([1,1,1,2,4,5,5,5,6]) == 18\n assert Solution().deleteAndEarn([3, 4, 2]) == 6\n assert Solution().deleteAndEarn([2, 2, 3, 3, 3, 4]) == 9\n","repo_name":"algorhythms/LeetCode","sub_path":"740 Delete and Earn.py","file_name":"740 Delete and Earn.py","file_ext":"py","file_size_in_byte":3136,"program_lang":"python","lang":"en","doc_type":"code","stars":843,"dataset":"github-code","pt":"81"} +{"seq_id":"6002443888","text":"# -*- coding: utf-8 -*-\nfrom PyQt5.QtWidgets import QGroupBox, QVBoxLayout, QPushButton\nfrom pyio.Window.LineEdit import LabelOnSpinBox\nfrom analogdiscovery2.AnalogOut import AnalogOut\n\n\nclass AnalogOutputBox(QGroupBox):\n def __init__(self, devices: list):\n super(AnalogOutputBox, self).__init__()\n self.setTitle(\"AnalogOut\")\n self.devices = devices\n\n self.start_hz = LabelOnSpinBox(\"Start [Hz]\",\n maximum=500000,\n val=1000)\n\n self.stop_hz = LabelOnSpinBox(\"Stop [Hz]\",\n maximum=500000,\n val=3500)\n self.period = LabelOnSpinBox(\"Period [ms]\",\n maximum=10000.00,\n val=50.0)\n self.voltage = LabelOnSpinBox(\"Voltage [V]\",\n maximum=5.0,\n val=2.5)\n\n self.update_button = QPushButton(\"Update\")\n self.update_button.clicked.connect(self.clicked_update)\n layout = QVBoxLayout()\n layout.addWidget(self.start_hz)\n layout.addWidget(self.stop_hz)\n layout.addWidget(self.period)\n layout.addWidget(self.voltage)\n layout.addWidget(self.update_button)\n layout.addStretch()\n self.setLayout(layout)\n\n def clicked_update(self):\n for i in self.devices: # type: AnalogOut\n if i.info[\"name\"] == \"AnalogDiscovery\" and i.info[\"type\"] == \"ao\":\n i.create_sweep(\n startHz=self.start_hz.get_value(),\n stopHz=self.stop_hz.get_value(),\n sweepSec=self.period.get_value()/1000.0,\n outVoltage=self.voltage.get_value()\n )\n","repo_name":"dozou/pyio_plugins","sub_path":"analogdiscovery2/window/AnalogOutputBox.py","file_name":"AnalogOutputBox.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33442606815","text":"from easydict import EasyDict as edict\n\n__C = edict()\n\nmodel_cfg = __C\n\n# PCA model options\n__C.PCA = edict()\n__C.PCA.FEATURE_CHANNEL = 512\n__C.PCA.SK_ITER_NUM = 20\n__C.PCA.SK_EPSILON = 1.0e-10\n__C.PCA.SK_TAU = 0.005\n__C.PCA.GNN_LAYER = 5\n__C.PCA.GNN_FEAT = 1024\n__C.PCA.CROSS_ITER = False\n__C.PCA.CROSS_ITER_NUM = 1\n","repo_name":"Thinklab-SJTU/ThinkMatch","sub_path":"models/PCA/model_config.py","file_name":"model_config.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":759,"dataset":"github-code","pt":"81"} +{"seq_id":"38484094439","text":"def computepay(h, r):\r\n if h > 40 :\r\n return ((40 * r) + ((h-40) * r * 1.5))\r\n else :\r\n return (h * r)\r\n\r\nhrs = input(\"Enter Hours:\")\r\nrate = input(\"Enter rate per hour:\")\r\ntry :\r\n fh = float(hrs)\r\n fr = float(rate)\r\nexcept :\r\n print(\"Error! Please enter the values in numeric form\")\r\n quit()\r\np = computepay(fh, fr)\r\nprint(\"Pay\", p)\r\n","repo_name":"negativeDAS/My_Python_Code_Folder","sub_path":"Assign4_1.py","file_name":"Assign4_1.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27366090321","text":"\nfrom django.shortcuts import render,redirect\nfrom django.forms import ModelForm\nfrom django import forms\nfrom .models import Archivos\nfrom .forms import Archivo_Form # para subir Upload\nfrom django.http import HttpResponseRedirect\nfrom django.core.files.storage import FileSystemStorage\nimport os\n\n\n\n\ndef Lista_archivos(request):\n # muestra los archivos de 'static/media' en 'archivos/archivos.html'\n # de momento sin uso\n path = 'static/media'\n lista_archivos = os.listdir(path)\n contexto = {'listado':lista_archivos}\n return render(request,'archivos/archivos.html',contexto)\n\n\n\nFILE_TYPES = ['png', 'jpg', 'jpeg','mp3','pdf','mp4']\n\n\n\ndef borrar_archivo(request,pk):\n # pk primary key Archivos\n if request.method == 'POST':\n file = Archivos.objects.get(pk=pk) \n file.delete()\n return redirect ('archivos') \n\n\ndef doc_list(request):\n # muestra la lista de archivos disponibles en el servidor\n files = Archivos.objects.all()\n return render(request,'archivos/doc_list.html',{'files':files})\n\n\n\ndef upload(request):\n # segunda versión de subir archivos\n if request.method == 'POST':\n form = Archivo_Form(request.POST,request.FILES)\n if form.is_valid():\n user_pr = form.save(commit=False)\n user_pr.docfile = request.FILES['docfile']\n file_type = user_pr.docfile.url.split('.')[-1]\n file_type = file_type.lower()\n if file_type not in FILE_TYPES:\n return render(request, 'archivos/error.html')\n form.save()\n return redirect('archivos')\n else:\n form = Archivo_Form() \n\n\n return render(request,'archivos/upload.html',{'form':form}) \n","repo_name":"Palisandron/p_banda","sub_path":"apps/archivos/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20203443465","text":"import requests\nimport pandas as pd\nfrom bs4 import BeautifulSoup\n\nurl = \"https://www.cotodigital3.com.ar/sitios/cdigi/browse?Nf=product.endDate%7CGTEQ+1.6747776E12%7C%7Cproduct.startDate%7CLTEQ+1.6747776E12&Nr=AND%28product.sDisp_200%3A1004%2Cproduct.language%3Aespa%C3%B1ol%2COR%28product.siteId%3ACotoDigital%29%29\"\nproductosFinal = []\ni = 0\nwhile url != None:\n print(i)\n response = requests.get(url)\n soup = BeautifulSoup(response.content, 'html.parser')\n try:\n products = soup.find('ul', {'id': 'products'}).find_all('li')\n except:\n break\n for product in products:\n name = product.find('span', {'class': 'span_productName'}).text\n while name.find('\\n') != -1:\n name = name.replace('\\n', '')\n while name.find('\\t') != -1:\n name = name.replace('\\t', '')\n price = product.find('span', {'class': 'atg_store_newPrice'}).text\n while price.find('\\n') != -1:\n price = price.replace('\\n', '')\n while price.find('\\t') != -1:\n price = price.replace('\\t', '')\n while price.find(' ') != -1:\n price = price.replace(' ', '')\n productosFinal.append({'Nombre': name, 'Precio': price})\n try:\n url = \"https://www.cotodigital3.com.ar\" +soup.find('a', {'title': 'Siguiente'})['href']\n except:\n break\n i += 1\n\ndf = pd.DataFrame(productosFinal)\ndf.drop_duplicates(inplace=True)\ndf.to_excel('productosCoto.xlsx', index=False)\n\n\n ","repo_name":"ezqdavid/scrapPreciosSupermercado","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9171691596","text":"# -*- coding: utf-8 -*-\nimport random\nimport sys\n\nfrom PyQt5 import QtCore, QtWidgets\nfrom PyQt5.QtGui import QPainter, QColor\nfrom PyQt5.QtWidgets import QApplication, QMainWindow\n\n\nclass Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(365, 294)\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)\n self.verticalLayout.setObjectName(\"verticalLayout\")\n spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\n self.verticalLayout.addItem(spacerItem)\n self.horizontalLayout = QtWidgets.QHBoxLayout()\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\n spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\n self.horizontalLayout.addItem(spacerItem1)\n self.pushButton = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton.setObjectName(\"pushButton\")\n self.horizontalLayout.addWidget(self.pushButton)\n self.verticalLayout.addLayout(self.horizontalLayout)\n MainWindow.setCentralWidget(self.centralwidget)\n\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"Какое-то окно (Теперь случайный цвет!!!!)\"))\n self.pushButton.setText(_translate(\"MainWindow\", \"какая-то кнопка\"))\n\n\nclass MyWidget(QMainWindow, Ui_MainWindow):\n def __init__(self):\n super().__init__()\n self.setupUi(self)\n self.flag = False\n self.pushButton.clicked.connect(self.paint)\n # Обратите внимание: имя элемента такое же как в QTDesigner\n\n def paint(self):\n self.flag = True\n self.update()\n # Имя элемента совпадает с objectName в QTDesigner\n\n def paintEvent(self, event, ):\n if self.flag:\n qp = QPainter()\n # Начинаем процесс рисования\n qp.begin(self)\n qp.setBrush(QColor(random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)))\n # Рисуем прямоугольник заданной кистью\n scale = random.randint(10, 200)\n qp.drawEllipse(100, 100, scale, scale)\n qp.end()\n self.flag = False\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = MyWidget()\n ex.show()\n sys.exit(app.exec_())\n","repo_name":"Pixel-Clay/beragumbo","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39653259810","text":"import numpy as np\n# from cellpose.metrics import aggregated_jaccard_index, average_precision\n\nfrom inferno.io.volumetric.volumetric_utils import slidingwindowslices\nimport os\nimport json, cv2, random\nimport imageio\nimport shutil\nfrom copy import deepcopy\nimport pandas as pd\nimport vigra\n\nfrom segmfriends.utils.various import check_dir_and_create\n\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport numpy as np\n\nmatplotlib.rc('font',family='sans-serif', size=8)\nplt.rcParams['font.family'] = 'sans-serif'\n# plt.rcParams['font.sans-serif'] = 'Helvetica'\n# plt.rcParams.update({\n# \"text.usetex\": True,\n# \"font.family\": \"sans-serif\",\n# \"font.sans-serif\": [\"Helvetica\"]})\n# # for Palatino and other serif fonts use:\n# plt.rcParams.update({\n# \"text.usetex\": True,\n# \"font.family\": \"serif\",\n# \"font.serif\": [\"Palatino\"],\n# })\n\nplt.rcParams['axes.edgecolor']='#333F4B'\nplt.rcParams['axes.linewidth']=0.8\nplt.rcParams['xtick.color']='#333F4B'\nplt.rcParams['ytick.color']='#333F4B'\n\nif __name__ == \"__main__\":\n\n\n PLOT_DIR = \"/scratch/bailoni/projects/train_cellpose/plots\"\n # df = pd.read_csv(\"/scratch/bailoni/projects/train_cellpose/scores_with_estimated_diameters_all.csv\")\n df = pd.read_csv(\"/scratch/bailoni/projects/train_cellpose/scores_cleaned_LIVECell.csv\")\n\n # df.loc[:, \"aji\"].to_numpy()\n\n # models_to_plot = [\"cyto2_diamEst\", \"trained_on_LIVECell_diamEst\", \"trained_on_cellpose_diamEst\", \"cyto_diamEst\",\n # \"finetuned_LIVECell_lr_02_diamEst\"]\n\n # # ALL MODELS:\n # models_to_plot = {\"cyto2_diamEst\": \"CellPose cyto2 \",\n # \"cyto_diamEst\": \"CellPose cyto\",\n # \"trained_on_LIVECell_noDiamEst\": \"Trained on LIVECell+CellPose data\",\n # \"trained_on_LIVECell_diamEst\": \"Trained on LIVECell+CellPose data (est diam)\",\n # \"trained_on_cellpose_noDiamEst\": \"Trained on CellPose data\",\n # \"trained_on_cellpose_diamEst\": \"Trained on CellPose data (est diam)\",\n # \"finetuned_LIVECell_lr_02_noDiamEst\": \"finetuned_LIVECell_lr_02_noDiamEst\", # \"Fine-tuned on LIVECell+CellPose data\"\n # \"finetuned_LIVECell_lr_00002_noDiamEst\": \"finetuned_LIVECell_lr_00002_noDiamEst\",\n # # \"finetuned_LIVECell_lr_00002_diamEst\": \"finetuned_LIVECell_lr_00002_diamEst\",\n # \"finetuned_LIVECell_lr_02_diamEst\": \"finetuned_LIVECell_lr_02_diamEst\",\n # }\n # spacing = 4\n\n\n models_to_plot = {\"cyto2_diamEst\": \"CellPose cyto2 model\",\n # \"cyto_diamEst\": \"CellPose cyto\",\n # \"trained_on_LIVECell_noDiamEst\": \"Trained on LIVECell+CellPose from scratch\",\n # \"trained_on_LIVECell_diamEst\": \"Trained on LIVECell+CellPose data (est diam)\",\n # \"trained_on_cellpose_noDiamEst\": \"Trained on CellPose data\",\n # \"trained_on_cellpose_diamEst\": \"Trained on CellPose data (est diam)\",\n # \"finetuned_LIVECell_lr_02_noDiamEst\": \"cyto2 finetuned on LIVECell+CellPose\",\n # \"finetuned_LIVECell_lr_00002_noDiamEst\": \"cyto2 finetuned on LIVECell\",\n \"cleaned_finetuned_LIVECell_v1_noDiamEst\": \"cyto2 finetuned on LIVECell+CellPose\",\n # \"cleaned_from_scratch_LIVECell_v1_noDiamEst\": \"cleaned_from_scratch_LIVECell_v1_noDiamEst\",\n # \"finetuned_LIVECell_lr_00002_diamEst\": \"finetuned_LIVECell_lr_00002_diamEst\",\n # \"finetuned_LIVECell_lr_02_diamEst\": \"finetuned_LIVECell_lr_02_diamEst\",\n }\n spacing = 2\n\n\n # scores_to_plot = [\"aji\", \"fp_0.5\", \"fn_0.5\", \"ap_0.5\", \"tp_0.5\"]\n # scores_to_plot_names = [\"Aggregated Jaccard Index\", \"False positives 0.5\", \"False negatives 0.5\",\n # \"Average precision 0.5\", \"True positives 0.5\"]\n\n scores_to_plot = [\"aji\", \"fp_0.9\", \"fn_0.9\", \"ap_0.9\", \"tp_0.9\"]\n scores_to_plot_names = [\"Aggregated Jaccard Index\", \"False positives 0.9\", \"False negatives 0.9\",\n \"Average precision 0.9\", \"True positives 0.9\"]\n\n labels = {\n \"LIVECell_test_cleaned\": 'LIVECell test data',\n # \"LIVECell_test\": 'LIVECell test data',\n \"cellpose_test\":'CellPose test data',\n \"alex\": 'Alex images'}\n labels_to_plot = [lb for _, lb in labels.items()]\n\n bar_width = 0.25 # the width of the bars\n\n\n\n\n\n # ax = df.plot.bar(rot=0)\n\n for score_idx, score_name in enumerate(scores_to_plot):\n r = np.arange(0, len(labels) * spacing, spacing)\n fig, ax = plt.subplots()\n\n # sub_df = df[[\"Model name\", \"Data type\", score_name]]\n # ax = df.plot.bar(rot=0)\n\n\n nb_models = len(models_to_plot)\n for mdoel_idx, model_name in enumerate(models_to_plot):\n model_name_to_plot = models_to_plot[model_name] # model_name.replace(\"_diamEst\", \"\").replace(\"_noDiamEst\", \"\")\n model_results = df.loc[df[\"Model name\"] == model_name]\n\n model_scores = []\n for data_label in labels:\n score = model_results.loc[model_results[\"Data type\"] == data_label, score_name]\n assert len(score) == 1\n model_scores.append(float(score.to_numpy()[0]))\n rects = ax.bar(r, model_scores, bar_width, edgecolor='white', label=model_name_to_plot, )\n # plt.hlines(y=r, xmin=0, xmax=df['percentage'], color='#007acc', alpha=0.2, linewidth=5)\n r = [x + bar_width for x in r]\n # ax.bar_label(rects, padding=3)\n ax.set_ylabel(scores_to_plot_names[score_idx])\n ax.set_title('Comparison between different CellPose models')\n if score_idx in [0, 3] and \"0.5\" in score_name:\n ax.set_ylim([0,1.])\n # print([x + (nb_models/2.)*bar_width for x in range(0, len(labels) * spacing, spacing)])\n # print([x + bar_width for x in range(0, len(labels) * spacing, spacing)])\n ax.set_xticks([x + (nb_models/2.-0.5)*bar_width for x in range(0, len(labels) * spacing, spacing)])\n ax.set_xticklabels(labels_to_plot)\n # ax.set_xlabel('Datasets', fontweight='bold')\n ax.legend()\n\n # fig.tight_layout()\n\n fig.savefig(os.path.join(PLOT_DIR, \"plot_{}.pdf\".format(score_name)), format='pdf')\n\n\n","repo_name":"abailoni/segmentation_spacem","sub_path":"segmUtils/postprocessing/make_bar_plot_scores.py","file_name":"make_bar_plot_scores.py","file_ext":"py","file_size_in_byte":6367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70621759304","text":"import csv\n\nwith open('2015murdersonly.csv','r') as csvinput:\n with open('2015murdersonlyoutput.csv', 'w') as csvoutput:\n writer = csv.writer(csvoutput, lineterminator='\\n')\n reader = csv.reader(csvinput)\n\n all = []\n row = next(reader)\n row.append('offenseID')\n row.append('locationID')\n all.append(row)\n\n locationID = 1000\n\n for row in reader:\n if row[8] == 'false':\n row.append('AAA111')\n elif row[8] == 'true':\n row.append('BBB222')\n else:\n row.append('CCC333')\n\n row.append(locationID)\n locationID += 1\n \n all.append(row)\n\n writer.writerows(all)\n","repo_name":"jlally21/ChicagoCrime","sub_path":"data/addOffenseIDLocationIDcolumns.py","file_name":"addOffenseIDLocationIDcolumns.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33106682134","text":"import telebot\n\nfrom config import *\nfrom extensions import GottenCurrency, APIException\nbot = telebot.TeleBot(TOKEN)\n\n\n@bot.message_handler(commands=['start', 'help'])\ndef handle_start_help(message: telebot.types.Message):\n text = 'Введите сообщение боту в виде: \\n\\\n <имя валюты, цену которой вы хотите узнать> \\\n<имя валюты, в которой надо узнать цену первой валюты> \\\n<количество первой валюты>\\n\\\nПри вводе команды /value выводится информация о всех доступных валютах'\n bot.reply_to(message, text)\n\n\n@bot.message_handler(commands=['value'])\ndef handle_value(message: telebot.types.Message):\n text = 'Доступныe для конвертации валюты:'\n for i in available_currency:\n text = text + f'\\n- {str(i)}'\n bot.reply_to(message, text)\n\n\n@bot.message_handler(content_types=['text'])\ndef convert_currency(message):\n try:\n gotten_data = message.text.lower().split()\n if len(gotten_data) != 3:\n raise APIException(\"Неверное количество переменных!\")\n base, quote, amount = gotten_data\n request_currency, from_, to_, amount = GottenCurrency.get_price(base, quote, amount)\n text = f'Результат конверсии: {amount} {from_} = {request_currency} {to_}'\n except APIException as e:\n bot.send_message(message.chat.id, f'Неправильно введены данные:\\n{e}')\n except Exception as e:\n bot.send_message(message.chat.id, f'Непредвиденная ошибка:\\n{e}')\n else:\n bot.send_message(message.chat.id, text)\n\n\nbot.polling()\n","repo_name":"AleksandrNew86/CurrencyConverter","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38038326387","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport trace_taille_disque_par_pdf_modal as ttdppm\n\n\n\n'''\ntag=['50_shr','50_hhr','50_vhr','50_emhr','50_mhr','50_hr']\nlegend=['8, 10*40', '7, 10*80', '7, 10*40', '7, 10*20', '7, 8*40', '7, 8*20']\nmarker=['.','v','p','s','P','*']\n\noutput_max=[685, 275, 254, 690, 130, 144]\n\nt1=[0.09912+6.95e-6, 0.09708-5.41e-4, 0.09653+5.72e-4-6.75e-5, 0.09243+2.72e-5, 0.09068-5.16e-4, 0.09068+1.84e-6] #Myr, pour recalage temporel et avoir toutes les courbes a peu pres superposees\n\n\nbase_simu='B335_noturb_norot_hydro_pert_asym_aleatoire'\n\n\n#simu=[]\n#for i in range(len(tag)):\n# simu.append(base_simu+tag[i])\n\n\nfor i in range(len(tag)):\n i=len(tag)-i-1\n ttdpp.trace_taille_disque(base_simu+tag[i],tag[i],'None',output_max[i],t1[i],legend[i],'.')\n'''\n\n\n#ttdpp.trace_taille_disque('B335_noturb_norot_hydro_hr2','hr2','None',346,0.09725-1.16e-4,'7, 10*40','.')\n'''\n\n\ntag=['rot1','rot0.5','rot0.1','rot0.01','rot0.001']\nlegend=['','','','','']\nmarker=['.','v','p','s','P']\n\noutput_max=[26,88,270,245,266]\n\nt1=[0.0878226,0.0871526,0.0922875,0.0957122,0.0967315]\n\nbase_simu1='B335_noturb_'\nbase_simu2='_hydro_pert_asym_aleatoire50_vhr'\n\n\nfor i in range(len(tag)):\n i=len(tag)-i-1\n ttdpp.trace_taille_disque(base_simu1+tag[i]+base_simu2,tag[i],'None',output_max[i],t1[i],legend[i],'.')\n'''\n\n\n\n#ttdpp.trace_taille_disque('B335_noturb_norot_hydro_pert_asym_aleatoire50_shr_bigbox','50_shr_bigbox','None',108,0.10093,'8, 10*40','.')\n\n#ttdpp.trace_taille_disque('B335_noturb_norot_hydro_pert_asym_aleatoire50_vhr','50_vhr','None',254,0.09653+5.72e-4-6.75e-5,'7, 10*40','.')\n\n#ttdpp.trace_taille_disque('B335_noturb_norot_hydro_pert_asym_aleatoire50_hr_niMHD','50_hr_niMHD','None',2720,0.10919,'7, 8*20','.')\n\n\n\n\n'''\ntag=['10','20','30','40','50','60']\nlegend=['','','','','','']\nmarker=['.','v','p','s','P','.']\n\noutput_max=[99,93,100,97,110,97]\n\nt1=[0,0,0,0,0,0]\n\nbase_simu='B335_noturb_norot_hydro_pert_asym_aleatoire_shr_bigbox_'\n\n\nfor i in range(len(tag)):\n i=len(tag)-i-1\n ttdpp.trace_taille_disque(base_simu+tag[i]+'pourc',tag[i],'None',output_max[i],t1[i],legend[i],'.')\n'''\n\n\n\n\n'''\ntag=['0','10','20','30','40','50','60']\nlegend=['','','','','','','']\nmarker=['+','.','v','p','s','P','.']\n\noutput_max=[264,78,202,194,181,343,187]\n\nt1=[0,0,0,0,0,0,0]\n\nbase_simu='B335_noturb_norot_hydro_pert_asym_aleatoire_lllr_bigbox_'\n\n\nfor i in range(len(tag)):\n i=len(tag)-i-1\n ttdpp.trace_taille_disque(base_simu+tag[i]+'pourc_sink',tag[i],'None',output_max[i],t1[i],legend[i],'.')\n'''\n\n\n'''\ntag=['10','50']\nlegend=['','']\nmarker=['+','.']\n\noutput_max=[440,480]\n\nt1=[0,0,0,0,0,0,0]\n\nbase_simu='B335_noturb_norot_hydro_pert_asym_aleatoire_bigbox_'\n\n\nfor i in range(len(tag)):\n i=len(tag)-i-1\n ttdpp.trace_taille_disque(base_simu+tag[i]+'pourc_sink_seuil_haut',tag[i],'None',output_max[i],t1[i],legend[i],'.')\n'''\n\n\n'''\ntag=['10','50','50','50','50','50']\ntag2=['','','_lr','_MHD_lr','_niMHD_lr','_rot1']\nlegend=['','','','','','']\nmarker=['+','.','x','p','P','>']\n\noutput_max=[440,480,68,114,85,102]\n\nt1=[0,0,0,0,0,0,0]\n\nbase_simu='B335_noturb_norot_hydro_pert_asym_aleatoire_bigbox_'\n\n\nfor i in range(len(tag)):\n i=len(tag)-i-1\n ttdpp.trace_taille_disque(base_simu+tag[i]+'pourc_sink_seuil_haut'+tag2[i],tag[i],'None',output_max[i],t1[i],legend[i],'.')\n'''\n\n\n\ncmappts = plt.get_cmap('autumn')\ncolorsrot = [cmappts(i) for i in np.linspace(0.05,0.8,4)]\n\ncmappts = plt.get_cmap('summer')\ncolorsnorot = [cmappts(i) for i in np.linspace(0.2,0.8,3)]\n\ncmappts = plt.get_cmap(r'cool')\ncolorsMHD = [cmappts(i) for i in np.linspace(0.3,0.7,2)]\n\ntag=['50','50','20','20','50','50','50','20','10']\ntag2=['_rot1','_rot0.25','_rot1','_rot0.25','_MHD_lr','_MHD_lr_rot1','','','']\nlegend=['','','','','','','','','','']\nmarker=['+','.','x','p','P','>','<','.','+','x']\n\noutput_max=[30,70,20,40,291,103,480,400,440]\n\ncolors=[colorsrot[0],colorsrot[1],colorsrot[2],colorsrot[3],colorsMHD[0],colorsMHD[1],colorsnorot[0],colorsnorot[1],colorsnorot[2]]\n\noutput_frag=[24,63,14,30,'None','None',480,400,440]\n\nt1=[0,0,0,0,0,0,0,0,0]\n\nbase_simu='B335_noturb_norot_hydro_pert_asym_aleatoire_bigbox_'\n\n\nfor i in range(len(tag)):\n i=len(tag)-i-1\n ttdppm.trace_taille_disque(base_simu+tag[i]+'pourc_sink_seuil_haut'+tag2[i],tag[i],tag[i]+tag2[i],'None',output_max[i],colors[i],t1[i],legend[i],'.',output_frag=output_frag[i])\n","repo_name":"averliat/Sapanais_pipelines","sub_path":"trace_plusieurs_taille_disque_par_pdf_modal.py","file_name":"trace_plusieurs_taille_disque_par_pdf_modal.py","file_ext":"py","file_size_in_byte":4372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10471701340","text":"import numpy as np\nimport os\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import ScalarFormatter\nfrom matplotlib import font_manager as fm, rcParams\nfrom ClusterParams import galaxylist as glist\nimport matplotlib as mpl\nfrom numpy import arange,array,ones\nfrom scipy import stats\nimport itertools\n\n\n# fpath = os.path.join(rcParams[\"datapath\"], \"fonts/ttf/nimbusmono-bold.ttf\")\n# prop = fm.FontProperties(fname=fpath)\n# fname = os.path.split(fpath)[1]\n# print(prop)\n\nmpl.rc('font', **{'family':'serif', 'serif':['Computer Modern Roman'], 'monospace': ['Computer Modern Typewriter']})\nparams = {'backend': 'pdf',\n 'axes.labelsize': 12,\n 'legend.fontsize': 12,\n 'xtick.labelsize': 10,\n 'ytick.labelsize': 10,\n 'text.usetex': True,\n 'axes.unicode_minus': True}\nmpl.rcParams.update(params)\n\n\nprop = fm.FontProperties(mpl.rc('font', **{'family':'serif', 'serif':['Computer Modern Roman'],\n 'monospace': ['Computer Modern Typewriter']}))\nmpl.rcParams[\"mathtext.fontset\"] = u\"stix\"\n\nabell = 3676\ndata = np.loadtxt(\"datafiles/A{}.txt\".format(abell))\n# dustdata = np.loadtxt(\"datafiles/dustlist.txt\").readlines()\ndustdata = np.loadtxt(\"datafiles/dustlist.txt\", delimiter=\",\")\nbcgdetails = np.loadtxt(\"BCGs.txt\", delimiter=\"&\")\n\n\nrarcsecs = data[0]\nimagem = data[1]\nmodelm = data[2]\ndustmodifiedm = data[3]\nnodustmodel = data[4]\n\nn = 76, 119, 147, 160, 168, 189, 193, 195, 260, 262, 295, 347, 376, 397, 419, 496, 533, 548, 634, 671, 779, 912, \\\n 999, 1016, 1060, 1142, 1177, 1228, 1308, 1314, 1367, 1631, 1656, 1795, 1836, 1983, 2040, 2052, 2147, 2162, 2197, \\\n 2247, 2572, 2589, 2593, 2634, 2657, 2666, 2877, 3144, 3193, 3376, 3395, 3526, 3528, 3532, 3554, 3556, 3558, 3559,\\\n 3562, 3564, 3565, 3571, 3574, 3656, 3676, 3677, 3698, 3716, 3733, 3736, 3742, 3744, 3747, 4038, 4049, 4059\n\n\nirlist = np.zeros((len(glist), 4)) # This is in Vega Magnitudes, needs conv. to flux and luminosity\nnp.seterr(all='raise')\n\n\ndef chicalc(xi, mi):\n \"\"\"Chi Squared calculator\"\"\"\n chisq = np.sum(((xi - mi)**2)/0.2)\n return chisq\n\n\nchi2 = []\nfor z in range(len(glist)):\n obsv = np.loadtxt(\"datafiles/A{}.txt\".format(n[z]))[1]\n exp = np.loadtxt(\"datafiles/A{}.txt\".format(n[z]))[2]\n chi2.append(chicalc(obsv, exp))\n\nprint(chi2)\nprint(np.sum(chi2)/len(glist))\n\n\ndef sbpfinalplot(x, yimage, ymodel, ydust, n):\n\n xs, ys, ymod, ydm = x, yimage, ymodel, ydust\n xmin, xmax = 0, max(xs) + 2 * max(xs)\n ymin, ymax = min(ys) - 0.05 * min(ys), max(ys) + 0.05 * max(ys)\n\n fig = plt.figure()\n ax1 = fig.add_axes([0.15, 0.3, 0.7, 0.6])\n ax1.set_xscale(\"log\")\n ax1.plot(xs, ys, marker='s', color='black', ms=3)\n ax1.plot(xs, ymod)\n ax1.plot(xs, ydm, color='orange')\n\n plt.text(0.05 * xmax, ymin + 1, 'Abell {}'.format(n), fontproperties=prop, size=15)\n ax1.axis([xmin, xmax, ymax, ymin])\n # ax1.set_yscale(\"log\")\n ax1.get_yaxis().set_tick_params(which='both', direction='in')\n ax1.get_xaxis().set_tick_params(which='both', direction='in')\n ax1.tick_params(axis='both', which='both', top='True', right='True')\n ax1.tick_params(labelbottom=False)\n ax1.set_yticklabels(ax1.get_yticks(), fontproperties=prop, size=12)\n ax1.set_ylabel('M', fontproperties=prop, color='black', rotation=0, labelpad=28, size=13)\n # ax1.set_title('Surface Brightness Profile', fontproperties=prop, color='black', size=12)\n\n ax2 = fig.add_axes([0.15, 0.1, 0.7, 0.2])\n ax2.semilogx(xs, ydm - ys, color='orange') # Must be the same as in axes 3\n # ax2.semilogx(xs, ydm - ymod, color='orange') # Must be the same as in axes 3\n ax2.axis([xmin, xmax, -0.2, 0.2])\n ticks = 0.1, 0, -0.1\n ax2.set_yticks(ticks, minor=False)\n ax2.get_yaxis().set_tick_params(which='both', direction='in')\n ax2.get_xaxis().set_tick_params(which='both', direction='in')\n ax2.tick_params(axis='both', which='both', top='True', right='True')\n ax2.set_xlabel('R (arcseconds)', fontproperties=prop, color='black', size=12)\n ax2.set_xticklabels(ax2.get_xticks(), fontproperties=prop, size=12)\n ax2.set_yticklabels(ax2.get_yticks(), fontproperties=prop, size=11)\n ax2.set_ylabel('$\\Delta$M', fontproperties=prop, color='black', rotation=0, labelpad=16, size=12)\n\n ax3 = fig.add_axes((0.15, 0.2, 0.7, 0.1))\n ax3.yaxis.set_visible(False)\n ax3.get_xaxis().set_tick_params(which='both', direction='inout')\n ax3.tick_params(labelbottom=False)\n ax3.set_xscale(\"log\")\n ax3.axis([xmin, xmax, 0, 0.1])\n ax3.set_xlim(ax1.get_xlim()[0], ax1.get_xlim()[1])\n ax3.spines['bottom'].set_position('zero')\n ax3.patch.set_alpha(0)\n\n for axis in [ax1.xaxis, ax1.yaxis]:\n axis.set_major_formatter(ScalarFormatter())\n for axis in [ax2.xaxis, ax2.yaxis]:\n axis.set_major_formatter(ScalarFormatter())\n\n plt.show()\n\n return 1\n\n\n# def sbpfinalplot2(x, yimage, ymodel, ydust):\n#\n# xs, ys, ymod, ydm = x, yimage, ymodel, ydust\n# xmin, xmax = 0, max(xs) + 2 * max(xs)\n# ymin, ymax = min(ys) - 0.05 * min(ys), max(ys) + 0.05 * max(ys)\n#\n# fig = plt.figure()\n# ax1 = fig.add_axes([0.15, 0.3, 0.7, 0.6])\n# ax1.set_xscale(\"log\")\n# ax1.plot(xs, ys, marker='s', color='black', ms=3)\n# # ax1.plot(xs, ymod)\n# # ax1.plot(xs, ydm, color='orange')\n#\n# plt.text(0.05 * xmax, ymin + 1, 'Abell{}'.format(abell), fontproperties=prop, size=15)\n# ax1.axis([xmin, xmax, ymax, ymin])\n# # ax1.set_yscale(\"log\")\n# ax1.get_yaxis().set_tick_params(which='both', direction='in')\n# ax1.get_xaxis().set_tick_params(which='both', direction='in')\n# ax1.tick_params(axis='both', which='both', top='True', right='True')\n# ax1.tick_params(labelbottom=True)\n# ax1.set_yticklabels(ax1.get_yticks(), fontproperties=prop, size=12)\n# ax1.set_ylabel('M', fontproperties=prop, color='black', rotation=0, labelpad=28, size=13)\n# # ax1.set_title('Surface Brightness Profile', fontproperties=prop, color='black', size=12)\n# ax1.set_xlabel('R [arcseconds]', fontproperties=prop, color='black', size=12)\n#\n# # ax2 = fig.add_axes([0.15, 0.1, 0.7, 0.2])\n# # ax2.semilogx(xs, ymod - ys) # Must be the same as in axes 3\n# # ax2.semilogx(xs, ydm - ymod, color='orange') # Must be the same as in axes 3\n# # ax2.axis([xmin, xmax, -0.2, 0.2])\n# # ticks = 0.1, 0, -0.1\n# # ax2.set_yticks(ticks, minor=False)\n# # ax2.get_yaxis().set_tick_params(which='both', direction='in')\n# # ax2.get_xaxis().set_tick_params(which='both', direction='in')\n# # ax2.tick_params(axis='both', which='both', top='True', right='True')\n# # ax2.set_xlabel('R [arcseconds]', fontproperties=prop, color='black', size=12)\n# # ax2.set_xticklabels(ax2.get_xticks(), fontproperties=prop, size=12)\n# # ax2.set_yticklabels(ax2.get_yticks(), fontproperties=prop, size=11)\n# # ax2.set_ylabel('$\\Delta$M', fontproperties=prop, color='black', rotation=0, labelpad=16, size=12)\n# #\n# # ax3 = fig.add_axes((0.15, 0.2, 0.7, 0.1))\n# # ax3.yaxis.set_visible(False)\n# # ax3.get_xaxis().set_tick_params(which='both', direction='inout')\n# # ax3.tick_params(labelbottom=False)\n# # ax3.set_xscale(\"log\")\n# # ax3.axis([xmin, xmax, 0, 0.1])\n# # ax3.set_xlim(ax1.get_xlim()[0], ax1.get_xlim()[1])\n# # ax3.spines['bottom'].set_position('zero')\n# # ax3.patch.set_alpha(0)\n#\n# for axis in [ax1.xaxis, ax1.yaxis]:\n# axis.set_major_formatter(ScalarFormatter())\n# # for axis in [ax2.xaxis, ax2.yaxis]:\n# # axis.set_major_formatter(ScalarFormatter())\n#\n# plt.show()\n#\n# return 1\n\n\n# sbpfinalplot(rarcsecs, imagem, nodustmodel, dustmodifiedm)\n\n\nfor i in range(len(glist)):\n irlist[i, 0] = glist[i][10][0]\n irlist[i, 1] = glist[i][10][1]\n irlist[i, 2] = glist[i][10][2]\n irlist[i, 3] = glist[i][10][3]\n\n\ndef magtoflux(w1to4mags):\n\n w1flux = 309.54 * 10 ** (-w1to4mags[0] / 2.5)\n w2flux = 171.787 * 10 ** (-w1to4mags[1] / 2.5)\n w3flux = 31.674 * 10 ** (-w1to4mags[2] / 2.5)\n w4flux = 8.363 * 10 ** (-w1to4mags[3] / 2.5)\n\n # w1flux = 10 ** (-w1to4mags[0] / 2.5)\n # w2flux = 10 ** (-w1to4mags[1] / 2.5)\n # w3flux = 10 ** (-w1to4mags[2] / 2.5)\n # w4flux = 10 ** (-w1to4mags[3] / 2.5)\n\n w1to4flux = w1flux, w2flux, w3flux, w4flux\n\n return w1to4flux\n\n\ndef magtoluminosity(w1to4mags, D):\n\n flux12 = magtoflux(w1to4mags)[2] # 12 micrometers + 5.174\n flux24 = magtoflux(w1to4mags)[3] # 24 micrometers + 6.620\n\n flux15 = (3/12)*(flux24 - flux12) + flux12\n r = D * 3.08567758128 * (10 ** 22)\n\n lum15 = (4*np.pi*r**2)*flux15\n irlum = 11.1*(15*(10**-6)*lum15)**0.998\n\n return irlum\n\n\nfluxlist = np.zeros((len(glist), 4))\nfratio4_3 = []\nfratio24_12 = []\nfratio12_4 = []\nfratio24_3 = []\nfratio24_4 = []\n\nfluxratiog6 = []\nfluxratiog42_6 = []\nfluxratiog375_42 = []\nfluxratiog34_375 = []\nfluxratiog31_34 = []\nfluxratiog29_31 = []\nfluxratiog2_29 = []\nlabel6 = []\nlabel42_6 = []\nlabel375_42 = []\nlabel34_375 = []\nlabel31_34 = []\nlabel29_31 = []\nlabel2_29 = []\n\n\ndustfratio12_4 = []\ndustfratio24_3 = []\nnodustfratio12_4 = []\nnodustfratio24_3 = []\n# The flux ratios for each w band (1, 2, 3, 4).\nfor j in range(len(glist)):\n\n fluxlist[j, :] = magtoflux(irlist[j])\n\n print(100*fluxlist[j, :])\n\n fratio4_3.append(fluxlist[j, 1]/fluxlist[j, 0])\n\n fratio24_12.append(fluxlist[j, 3]/fluxlist[j, 2])\n\n fratio12_4.append(fluxlist[j, 2]/fluxlist[j, 1])\n\n fratio24_3.append(fluxlist[j, 3]/fluxlist[j, 0])\n\n fratio24_4.append(fluxlist[j, 3]/fluxlist[j, 1])\n\n if dustdata[j, 1] >= 1.5*10**5:\n dustfratio12_4.append(fluxlist[j, 2]/fluxlist[j, 1])\n dustfratio24_3.append(fluxlist[j, 3]/fluxlist[j, 0])\n else:\n nodustfratio12_4.append(fluxlist[j, 2]/fluxlist[j, 1])\n nodustfratio24_3.append(fluxlist[j, 3]/fluxlist[j, 0])\n\n\n\ndustinfluxband = []\nfor k in range(len(glist)):\n nfratio12_4 = fluxlist[k, 2]/fluxlist[k, 1]\n if nfratio12_4 >= 0.6:\n fluxratiog6.append(fluxlist[k, :])\n label6.append(k)\n elif 0.42 <= nfratio12_4 <= 0.6:\n fluxratiog42_6.append(fluxlist[k, :])\n label42_6.append(k)\n elif 0.375 <= nfratio12_4 <= 0.42:\n fluxratiog375_42.append(fluxlist[k, :])\n label375_42.append(k)\n elif 0.34 <= nfratio12_4 <= 0.375:\n fluxratiog34_375.append(fluxlist[k, :])\n label34_375.append(k)\n elif 0.31 <= nfratio12_4 <= 0.34:\n fluxratiog31_34.append(fluxlist[k, :])\n label31_34.append(k)\n elif 0.2 <= nfratio12_4 <= 0.31:\n fluxratiog29_31.append(fluxlist[k, :])\n label29_31.append(k)\n dustinfluxband.append(dustdata[k, 1])\n # elif 0.2 <= nfratio12_4 <= 0.29:\n # fluxratiog2_29.append(fluxlist[k, :])\n # label2_29.append(k)\n\n\nirluminositylist = np.zeros(len(glist))\nfor b in range(len(glist)):\n irluminositylist[b] = magtoluminosity(irlist[b], glist[b][9])\n\n\nsunlum = 3.839*10**33\nA = 1.57*10**-10\nSFRir = A*(irluminositylist/sunlum)*(1+np.sqrt((10**9)*(sunlum/irluminositylist)))\nprint(SFRir)\n\n\ndef plot(a):\n\n wband = 3.4, 4.6, 12, 22\n fig, ax = plt.subplots()\n # ax.axis([2, 50, 0.1, 1200])\n ax.loglog()\n\n for axis in [ax.xaxis, ax.yaxis]:\n axis.set_major_formatter(ScalarFormatter())\n\n for i in range(len(glist)):\n plt.plot(wband, a[i]/a[i][0])\n\n plt.show()\n\n return 1\n\n\ndef SED(SED, label):\n\n wband = 3.4, 4.6, 12, 22\n fig, ax = plt.subplots()\n ax.axis([2, 50, 0.1, 10])\n ax.loglog()\n\n plt.text(3, 6, r'$F_{12\\mu m} / F_{4.4\\mu m}\\leq0.31$', fontproperties=prop, size=15)\n for axis in [ax.xaxis, ax.yaxis]:\n axis.set_major_formatter(ScalarFormatter())\n ax.tick_params(axis='both', which='both', top='True', right='True')\n ax.get_yaxis().set_tick_params(which='both', direction='in')\n ax.get_xaxis().set_tick_params(which='both', direction='in')\n\n plt.plot(wband, SED[0] / SED[0][0], label=\"Abell {}\".format(n[label[0]]), color=\"black\", marker='s', linestyle=\"--\", linewidth=0.7)\n plt.plot(wband, SED[1] / SED[1][0], label=\"Abell {}\".format(n[label[1]]), color=\"black\", marker='o', linestyle=\"-.\", linewidth=0.7)\n plt.plot(wband, SED[2] / SED[2][0], label=\"Abell {}\".format(n[label[2]]), color=\"black\", marker='x', linestyle=\"-\", linewidth=0.7)\n plt.plot(wband, SED[3] / SED[3][0], label=\"Abell {}\".format(n[label[3]]), color=\"black\", marker='+', linestyle=\"-\", linewidth=0.7)\n plt.plot(wband, SED[4] / SED[4][0], label=\"Abell {}\".format(n[label[4]]), color=\"black\", marker=\"^\", linestyle=\":\", linewidth=0.7)\n plt.plot(wband, SED[5] / SED[5][0], label=\"Abell {}\".format(n[label[5]]), color=\"black\", marker='.', linestyle=\"--\", linewidth=0.7)\n plt.plot(wband, SED[6] / SED[6][0], label=\"Abell {}\".format(n[label[6]]), color=\"black\", marker='h', linestyle=\"-.\", linewidth=0.7)\n plt.plot(wband, SED[7] / SED[7][0], label=\"Abell {}\".format(n[label[7]]), color=\"black\", marker='D', linestyle=\"-\", linewidth=0.7)\n plt.plot(wband, SED[8] / SED[8][0], label=\"Abell {}\".format(n[label[8]]), color=\"black\", marker='d', linestyle=\"-\", linewidth=0.7)\n plt.plot(wband, SED[9] / SED[9][0], label=\"Abell {}\".format(n[label[9]]), color=\"black\", marker=\"X\", linestyle=\":\", linewidth=0.7)\n plt.plot(wband, SED[10] / SED[10][0], label=\"Abell {}\".format(n[label[10]]), color=\"black\", marker='*', linestyle=\"\", linewidth=0.7)\n plt.plot(wband, SED[11] / SED[11][0], label=\"Abell {}\".format(n[label[11]]), color=\"black\", marker='p', linestyle=\"-.\", linewidth=0.7)\n plt.plot(wband, SED[12] / SED[12][0], label=\"Abell {}\".format(n[label[12]]), color=\"black\", marker='P', linestyle=\"-\", linewidth=0.7)\n plt.plot(wband, SED[13] / SED[13][0], label=\"Abell {}\".format(n[label[13]]), color=\"black\", marker=\"H\", linestyle=\":\",linewidth=0.7)\n plt.plot(wband, SED[14] / SED[14][0], label=\"Abell {}\".format(n[label[14]]), color=\"black\", marker='.',linestyle=\"--\", linewidth=0.7)\n plt.plot(wband, SED[15] / SED[15][0], label=\"Abell {}\".format(n[label[15]]), color=\"black\", marker=',', linestyle=\"--\", linewidth=0.7, markerfacecolor='none')\n plt.plot(wband, SED[16] / SED[16][0], label=\"Abell {}\".format(n[label[16]]), color=\"black\", marker=\"s\", linestyle=\"--\", linewidth=0.7, markerfacecolor='none')\n plt.plot(wband, SED[17] / SED[17][0], label=\"Abell {}\".format(n[label[17]]), color=\"black\", marker=\"o\", linestyle=\"--\",linewidth=0.7, markerfacecolor='none')\n plt.plot(wband, SED[18] / SED[18][0], label=\"Abell {}\".format(n[label[18]]), color=\"black\", marker=\"^\",linestyle=\"--\", linewidth=0.7, markerfacecolor='none')\n plt.plot(wband, SED[19] / SED[19][0], label=\"Abell {}\".format(n[label[19]]), color=\"black\", marker=\"h\",linestyle=\"--\", linewidth=0.7, markerfacecolor='none')\n plt.plot(wband, SED[20] / SED[20][0], label=\"Abell {}\".format(n[label[20]]), color=\"black\", marker=\"D\",linestyle=\"--\", linewidth=0.7, markerfacecolor='none')\n plt.plot(wband, SED[21] / SED[21][0], label=\"Abell {}\".format(n[label[21]]), color=\"black\", marker=\"d\",\n linestyle=\"--\", linewidth=0.7, markerfacecolor='none')\n plt.plot(wband, SED[22] / SED[22][0], label=\"Abell {}\".format(n[label[22]]), color=\"black\", marker=\"X\",\n linestyle=\"--\", linewidth=0.7, markerfacecolor='none')\n plt.plot(wband, SED[23] / SED[23][0], label=\"Abell {}\".format(n[label[23]]), color=\"black\", marker=\"p\",\n linestyle=\"--\", linewidth=0.7, markerfacecolor='none')\n plt.plot(wband, SED[24] / SED[24][0], label=\"Abell {}\".format(n[label[24]]), color=\"black\", marker=\"P\",\n linestyle=\"--\", linewidth=0.7, markerfacecolor='none')\n plt.plot(wband, SED[25] / SED[25][0], label=\"Abell {}\".format(n[label[25]]), color=\"black\", marker=\"*\",\n linestyle=\"--\", linewidth=0.7, markerfacecolor='none')\n plt.plot(wband, SED[26] / SED[26][0], label=\"Abell {}\".format(n[label[26]]), color=\"black\", marker=\"H\",\n linestyle=\"--\", linewidth=0.7, markerfacecolor='none')\n plt.xlabel(r'Observed Wavelength ($\\mu m$)', fontproperties=prop, color='black', rotation=0, labelpad=18, size=18)\n plt.ylabel(r'Normalised Flux (erg cm$^{-2}$ s${^-1})$', fontproperties=prop, color='black', rotation=90, labelpad=18, size=18)\n ax.legend(prop={'size': 7})\n plt.legend(frameon=False)\n plt.show()\n\n return 1\n\n\nprint(np.sum(dustinfluxband)/len(dustinfluxband))\n\n# SED(fluxratiog6, label6)\n# SED(fluxratiog42_6, label42_6)\n# SED(fluxratiog375_42, label375_42)\n# SED(fluxratiog34_375, label34_375)\n# SED(fluxratiog31_34, label31_34)\n# SED(fluxratiog29_31, label29_31)\n\n\n# plot(fluxlist)\nprint(sum(dustdata[:, 1])/78)\n\nfig, ax = plt.subplots()\nax.axis([0.18, 1.5, 0.08, 1.3])\nax.loglog()\n# plt.plot((0.5, 0.5), (0, 10), linestyle=\"--\", color=\"grey\", linewidth=\"0.8\")\n#\n# plt.plot((0, 10), (0.5, 0.5), linestyle=\"--\", color=\"grey\", linewidth=\"0.8\")\nax.tick_params(axis='both', which='both', top='True', right='True')\nax.get_yaxis().set_tick_params(which='both', direction='in')\nax.get_xaxis().set_tick_params(which='both', direction='in')\nplt.xlabel(r'$F_{12\\mu m} / F_{4.6\\mu m}$', fontproperties=prop, color='black', rotation=0, labelpad=16, size=15)\nplt.ylabel(r'$F_{24\\mu m} / F_{3\\mu m}$', fontproperties=prop, color='black', rotation=0, labelpad=16, size=15)\nplt.plot(dustfratio12_4, dustfratio24_3, linestyle='', color=\"black\", marker=\"^\", ms=4.5)\nplt.plot(nodustfratio12_4, nodustfratio24_3, linestyle='', color=\"black\", marker=\"s\", markerfacecolor='none', ms=4.5)\n# plt.plot(fratio12_4, fratio24_12, linestyle='', color=\"black\", marker=\"s\", markerfacecolor='none', ms=4.5)\nfor axis in [ax.xaxis, ax.yaxis]:\n formatter = ScalarFormatter()\n formatter.set_scientific(False)\n axis.set_major_formatter(formatter)\nax.xaxis.set_major_formatter(ScalarFormatter())\nax.yaxis.set_major_formatter(ScalarFormatter())\nplt.show()\n\n# fig, ax = plt.subplots()\n# # ax.axis([0.18, 2, 0.1, 3])\n# ax.loglog()\n# for axis in [ax.xaxis, ax.yaxis]:\n# axis.set_major_formatter(ScalarFormatter())\n# plt.plot((0.5, 0.5), (0, 10), linestyle=\"--\", color=\"grey\")\n# plt.plot((0, 10), (1, 1), linestyle=\"--\", color=\"grey\")\n#\n# plt.xlabel(r'$F_{12\\mu m} / F_{4.6\\mu m}$', fontproperties=prop, color='black', rotation=0, labelpad=16, size=12)\n# plt.ylabel(r'$F_{24\\mu m} / F_{3\\mu m}$', fontproperties=prop, color='black', rotation=0, labelpad=16, size=12)\n# plt.plot(fratio12_4, fratio24_3, linestyle='', marker='+' )\n# plt.show()\n\nkpc2 = (1/np.pi)*(np.pi*bcgdetails[:, 1]*10**3/(3600*180))**0.5\nfig, ax2 = plt.subplots()\nplt.plot(fratio12_4, dustdata[:, 1], linestyle=\"\", marker=\".\", color=\"black\", ms=2)\nplt.plot(fratio12_4, dustdata[:, 1], linestyle=\"\", marker=\"o\", color=\"black\", ms=4, markerfacecolor='none')\n# plt.plot((1.0, 19.1631), (26254.7, 2.82*10**8), linestyle=\"-\", color=\"black\", linewidth=\"0.8\")\nax2.set_xscale(\"log\")\nax2.set_yscale(\"log\")\n# logA = np.log(SFRir+1)\n# logB = np.log(100*(dustdata[:, 1]+1))\n# m, c = np.polyfit(logA, logB, 1) # fit log(y) = m*log(x) + c\n# print(m, c)\n# y_fit = np.exp(m*logA + c) # calculate the fitted values of y\n# z = np.linspace(0, 100, len(dustdata[:, 1]))\n# plt.plot(SFRir+1, y_fit, linestyle='-', color=\"black\")\nplt.xlabel(r'$F_{12\\mu m} / F_{4.6\\mu m}$', fontproperties=prop, color='black', rotation=0, labelpad=16, size=15)\nplt.ylabel(r'$M_d (M_{\\odot})$', fontproperties=prop, color='black', rotation=90, labelpad=16, size=15)\n# plt.plot(SFRir+1, 100*dustdata[:, 1], linestyle='', color=\"black\", marker=\"s\", markerfacecolor='none', ms=4.5)\n# for axis in [ax2.xaxis]:\n# formatter = ScalarFormatter()\n# formatter.set_scientific(False)\n# axis.set_major_formatter(formatter)\nax2.tick_params(axis='both', which='both', top='True', right='True')\nax2.get_yaxis().set_tick_params(which='both', direction='in')\nax2.get_xaxis().set_tick_params(which='both', direction='in')\nfor t, txt in enumerate(n):\n ax2.annotate(txt, (SFRir[t] + 1, 100*dustdata[:, 1][t]))\nplt.show()\n\n\n\nSFRd = []\nSFRi = []\nkpcnew = []\nSFRdust = dustdata[:, 1]*100*2.5*1.8*10**(-8)\nfor p in range(len(glist)):\n if dustdata[p, 1] >= 11:\n SFRd.append(dustdata[p, 1])\n SFRi.append(SFRir[p])\n kpcnew.append(kpc2[p])\n\nyerror = np.linspace(0.9, 1.1, len(glist))*500\nSFRi = np.array(SFRi)\nSFRd = np.array(SFRd)\nkpcnew = np.array(kpcnew)\n# SFRi = A*(irluminositylist/sunlum)*(1+np.sqrt((10**9)*(sunlum/irluminositylist)))\nfig, ax2 = plt.subplots()\nmhkenneticut = (kpcnew**0.57)*((1/1.8)*10**8)*SFRi**0.71\nmh2 = (20)*2.5*100*SFRd/(np.pi*(kpcnew)**2)\nplt.plot(SFRi, mh2, linestyle=\"\", marker=\".\", color=\"black\", ms=1)\nplt.plot(SFRi, mh2, linestyle=\"\", marker=\"s\", color=\"black\", ms=5, markerfacecolor='none')\nax2.errorbar(SFRi, mh2, yerr=0.5*mh2, linestyle=\"\")\n# plt.plot(SFRi, 20*mhkenneticut, linestyle=\"\", marker=\"^\", color=\"blue\", ms=5)\n# plt.plot(SFRi, 10*mhkenneticut, linestyle=\"\", marker=\"^\", color=\"green\", ms=5)\n# plt.plot(SFRi, 50*mhkenneticut, linestyle=\"\", marker=\"^\", color=\"orange\", ms=5)\nplt.plot((0.3146, 13.17631), (201498, 4.61*10**6), linestyle=\"--\", color=\"black\", linewidth=\"0.6\")\nplt.plot((0.340549, 12.1631), (4.333*10**8, 8.8012*10**9), linestyle=\"--\", color=\"grey\", linewidth=\"0.6\", label=r\"$d_{50kpc}$\")\nplt.plot((0.340483, 12.1588), (1.73736*10**8, 3.53082*10**9), linestyle=\":\", color=\"grey\", linewidth=\"0.7\", label=r\"$d_{20kpc}$\")\nplt.plot((0.340456, 12.1656), (8.63273*10**7, 1.76617*10**9), linestyle=\"-.\", color=\"grey\", linewidth=\"0.8\", label=r\"$d_{10kpc}$\")\nax2.set_xscale(\"log\")\nax2.set_yscale(\"log\")\nx = np.linspace(0.01, 10, len(SFRi))\n\nlogA = np.log(SFRi)\nlogB = np.log(mh2)\nm, c = np.polyfit(logA, logB, 1) # fit log(y) = m*log(x) + c\nprint(m, c)\ny_fit = np.exp(m*logA + c) # calculate the fitted values of y\nz = np.linspace(0, 100, len(dustdata[:, 1]))\nplt.plot(SFRi, y_fit, linestyle='-', color=\"black\")\n\nplt.xlabel(r'$\\Psi_{IR}(M_{\\odot}yr^{-1})$', fontproperties=prop, color='black', rotation=0, labelpad=16, size=15)\nplt.ylabel(r'mH$_2 (M_{\\odot})$', fontproperties=prop, color='black', rotation=90, labelpad=16, size=15)\n# plt.plot(SFRir+1, 100*dustdata[:, 1], linestyle='', color=\"black\", marker=\"s\", markerfacecolor='none', ms=4.5)\nfor axis in [ax2.xaxis]:\n formatter = ScalarFormatter()\n formatter.set_scientific(False)\n axis.set_major_formatter(formatter)\nax2.tick_params(axis='both', which='both', top='True', right='True')\nax2.get_yaxis().set_tick_params(which='both', direction='in')\nax2.get_xaxis().set_tick_params(which='both', direction='in')\n\n# for t, txt in enumerate(n):\n# ax2.annotate(txt, (SFRir[t] + 1, 100*dustdata[:, 1][t]))\nplt.legend(frameon=False)\nplt.show()\n\n\n# fig, ax3 = plt.subplots()\n# ax3.tick_params(axis='both', which='both', top='True', right='True')\n# ax3.get_yaxis().set_tick_params(which='both', direction='in')\n# ax3.get_xaxis().set_tick_params(which='both', direction='in')\n# ax3.set_ylabel('N', fontproperties=prop, color='black', rotation=0, labelpad=20, size=18)\n# ax3.set_xlabel('z', fontproperties=prop, color='black', rotation=0, labelpad=20, size=20)\n# x = bcgdetails[:, 0]/(3*10**5)\n# num_bins = 15\n# n, bins, patches = plt.hist(x, num_bins, facecolor='dimgrey', alpha=0.4, edgecolor='black', linewidth=0.9)\n# plt.show()\n\ndef deriv(xs, ys):\n dx = xs[1] - xs[0]\n dydx = np.gradient(ys, dx)\n\n return dydx\n\n\nslopelum = np.zeros((len(glist), 2))\nslope = np.zeros(len(glist))\n\nhollow = []\nnothollow = []\nexcess = []\n\nfor m in range(len(glist)):\n mags = (np.loadtxt(\"datafiles/A{}.txt\".format(n[m]))[1])\n slope[m] = -(mags[1]-mags[0])/(np.log(rarcsecs[1]-rarcsecs[0]))\n slopelum[m, 0] = slope[m]*2.5\n slopelum[m, 1] = bcgdetails[m, 2]\n\n fratio12to4 = fratio12_4[m]\n fratio24to3 = fratio24_3[m]\n\n if slope[m] <= 0:\n hollow.append(slopelum[m, :])\n elif fratio12to4 >= 0.5:\n excess.append(slopelum[m, :])\n elif fratio24to3 >= 0.5:\n excess.append(slopelum[m, :])\n else:\n nothollow.append(slopelum[m, :])\n\n # sbpfinalplot(np.loadtxt(\"datafiles/A{}.txt\".format(n[m]))[0], np.loadtxt(\"datafiles/A{}.txt\".format(n[m]))[1],\n # np.loadtxt(\"datafiles/A{}.txt\".format(n[m]))[2], np.loadtxt(\"datafiles/A{}.txt\".format(n[m]))[3], n[m])\n\n\nhollow = np.array(hollow)\nnothollow = np.array(nothollow)\nexcess = np.array(excess)\nfig, ax4 = plt.subplots()\nax4.axis([-21.3, -25.3, -0.1, 0.5])\nplt.plot((-19, -26), (0.3, 0.3), linestyle=\"--\", color=\"grey\", linewidth=\"0.8\")\nplt.plot((-19, -26), (0, 0), color=\"black\", linewidth=\"0.8\")\nplt.plot(nothollow[:,1], nothollow[:,0], linestyle='', marker=\"+\", color='black')\nplt.plot(hollow[:,1], hollow[:,0], linestyle='', marker=\"^\", color='black', ms=3)\nplt.plot(excess[:,1], excess[:,0], linestyle='', marker=\"o\", color='black', ms=3)\nax4.tick_params(axis='both', which='both', top='True', right='True')\nax4.get_yaxis().set_tick_params(which='both', direction='in')\nax4.get_xaxis().set_tick_params(which='both', direction='in')\nplt.xlabel(r'$M_V$', fontproperties=prop, color='black', rotation=0, labelpad=16, size=16)\nplt.ylabel(r'$\\Gamma_{r\\rightarrow 0}$', fontproperties=prop, color='black', rotation=0, labelpad=16, size=16)\nplt.show()\n\n\n\n\nrb = (np.pi*bcgdetails[:, 4]/(3600*180))*bcgdetails[:, 1]*(10**6)\n# slope, intercept, r_value, p_value, std_err = stats.linregress(bcgdetails[:, 2], np.log(rb))\n# line = slope*bcgdetails[:, 2]+intercept\n\n\nfig, ax5 = plt.subplots()\nax5.axis([-21.3, -25.1, 10, 1500])\nplt.plot((-18.65, -25.65), (10, 1600), linestyle=\"-\", color=\"black\", linewidth=\"0.8\")\nplt.plot((-19, -26), (50, 50), linestyle=\"-.\", color=\"darkgray\", linewidth=\"0.8\")\nplt.plot((-22, -22), (10, 1600), linestyle=\"--\", color=\"darkgray\", linewidth=\"0.8\")\nplt.plot(bcgdetails[:, 2], rb, marker=\"+\", color='black', linestyle=\"\")\nrblog = np.log(rb)\nplt.plot(np.unique(bcgdetails[:, 2]), np.poly1d(np.polyfit(bcgdetails[:, 2], rblog, 1))(np.unique(bcgdetails[:, 2])))\nax5.set_yscale(\"log\")\nax5.tick_params(axis='both', which='both', top='True', right='True')\nax5.get_yaxis().set_tick_params(which='both', direction='in')\nax5.get_xaxis().set_tick_params(which='both', direction='in')\nfor axis in [ax5.xaxis, ax5.yaxis]:\n axis.set_major_formatter(ScalarFormatter())\nplt.xlabel(r'$M_V$', fontproperties=prop, color='black', rotation=0, labelpad=16, size=16)\nplt.ylabel(r'$r_b$(pc)', fontproperties=prop, color='black', rotation=0, labelpad=16, size=16)\nplt.show()\n\n\nfig, ax6 = plt.subplots()\nax6.axis([10, 1400, -0.1, 0.5])\nplt.plot((0, 1500), (0, 0), linestyle=\"-.\", color=\"darkgray\", linewidth=\"0.8\")\nplt.plot((0, 1500), (0, 0), linestyle=\"-.\", color=\"darkgray\", linewidth=\"0.8\")\nplt.plot(rb, 2.5*slope, marker=\"+\", color='black', linestyle=\"\")\nax6.set_xscale(\"log\")\nax6.tick_params(axis='both', which='both', top='True', right='True')\nax6.get_yaxis().set_tick_params(which='both', direction='in')\nax6.get_xaxis().set_tick_params(which='both', direction='in')\nfor axis in [ax6.xaxis, ax6.yaxis]:\n axis.set_major_formatter(ScalarFormatter())\nplt.xlabel(r'$r_b$(pc)', fontproperties=prop, color='black', rotation=0, labelpad=16, size=16)\nplt.ylabel(r'$\\Gamma_{r\\rightarrow 0}$', fontproperties=prop, color='black', rotation=0, labelpad=16, size=16)\nplt.show()\n\n\ndustirlist = []\nfor y in range(len(glist)):\n if dustdata[y] >= 10:\n dustirlist[y, 0] = (int(n[y]))\n dustirlist[y, 1] = dustdata[y, 1]\n dustirlist[y, 2] = irluminositylist[y]\n\n\nfig, ax7 = plt.subplots()\nax7.axis([3*10**42, 3*10**44, 100, 5*10**6])\nlogA = np.log(irluminositylist)\nlogB = np.log(dustdata[:, 1] + 1)\n\nm, c = np.polyfit(logA, logB, 1) # fit log(y) = m*log(x) + c\ny_fit = np.exp(m*logA + c) # calculate the fitted values of y\n\nplt.plot(irluminositylist, dustdata[:, 1]+1, \"x\", color=\"black\")\nplt.plot(irluminositylist, y_fit, \":\", color=\"black\")\nax7.set_yscale(\"log\")\nax7.set_xscale(\"log\")\nplt.show()\n","repo_name":"BobbyHemming/bcg-image-analysis","sub_path":"Analysis.py","file_name":"Analysis.py","file_ext":"py","file_size_in_byte":27763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18488528200","text":"from flask import Flask, render_template, request\nimport ipinfo\nimport os\nfrom pyowm import OWM\n\napp = Flask(__name__)\n\naccess_token = os.environ['IPINFO_TOKEN']\nowm = OWM(os.environ['OWM_TOKEN'])\nmgr = owm.weather_manager()\n\n@app.route('/')\ndef index():\n \n visitor_details=locate_ip(remove_port(get_client_ip()))\n\n # See location_detail_validator() for more details.\n \n if location_detail_validator(visitor_details):\n city, region, country, countryname, latitude, longitude = location_detail_extractor(visitor_details)\n else:\n city = 'Munich'\n region = 'Bavaria'\n country = 'DE'\n countryname = 'Germany'\n latitude = 'Unknown latitude'\n longitude = 'Unknown longitude'\n \n # Check if we can find the weather for the city. If not, try the region.\n \n try:\n weather_status, humidity, now_temperature, max_temperature, min_temperature = weather_search(city, countryname)\n location = city\n except:\n weather_status, humidity, now_temperature, max_temperature, min_temperature = weather_search(region, countryname)\n location = region\n\n now_temperature = round(now_temperature)\n max_temperature = round(max_temperature)\n min_temperature = round(min_temperature)\n \n return render_template('index.html', location=location, country=countryname, weather_status=weather_status, humidity=humidity, now_temperature=now_temperature, max_temperature=max_temperature, min_temperature=min_temperature, latitude=latitude, longitude=longitude)\n\ndef get_client_ip():\n \n # Get the client ip address even if the app is behind a proxy\n \n if request.environ.get('HTTP_X_FORWARDED_FOR') is None:\n client_ip = request.environ['REMOTE_ADDR']\n else:\n client_ip = request.environ['HTTP_X_FORWARDED_FOR']\n \n return client_ip\n\ndef remove_port(ip_address_with_port):\n \n # get_client_ip() returns the ip address with the port number. We need to remove the port number to use the ipinfo library\n \n parts = ip_address_with_port.split(':')\n ip_address = parts[0]\n return ip_address\n\ndef locate_ip(client_ip):\n handler = ipinfo.getHandler(access_token)\n details = handler.getDetails(client_ip)\n return details.all\n\ndef location_detail_extractor(detail_dict):\n \n # This only exists because I don't want to type detail_dict['city'] every time I need the city name. \n\n city = detail_dict['city']\n region = detail_dict['region']\n country = detail_dict['country']\n countryname = detail_dict['country_name']\n latitude = detail_dict['latitude']\n longitude = detail_dict['longitude']\n return city, region, country, countryname, latitude, longitude\n\ndef location_detail_validator(detail_dict):\n \n # Local & other invalid ip addresses return a 'bogon' key in the dictionary. If the location is invalid, we will use Munich as the default location.\n \n if 'bogon' in detail_dict:\n return False\n else:\n return True\n\ndef weather_search(location, country):\n observation = mgr.weather_at_place(f'{location},{country}')\n w = observation.weather\n\n weather_status = w.detailed_status # 'clouds'\n wind_dict = w.wind() # {'speed': 4.6, 'deg': 330}\n humidity = w.humidity # 87\n temperature = w.temperature('celsius') # {'temp_max': 10.5, 'temp': 9.7, 'temp_min': 9.0}\n rain = w.rain # {}\n #w.heat_index # None\n #w.clouds # 75\n humidity = humidity\n now_temperature = temperature['temp'] \n max_temperature = temperature['temp_max']\n min_temperature = temperature['temp_min']\n\n return weather_status, humidity, now_temperature, max_temperature, min_temperature\n\nif __name__ == '__main__':\n app.run()","repo_name":"stelioslep/HU","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72929289866","text":"import sys\nfrom typing import List\nimport heapq\nfrom collections import defaultdict\n\n\nclass Solution:\n def findCheapestPrice(self, n: int, flights: List[List[int]], src: int, dst: int, k: int) -> int:\n if src == dst:\n return 0\n\n graph = defaultdict(list)\n weight = [(sys.maxsize, 0) for _ in range(n)]\n for u, v, w in flights:\n graph[u].append((v, w))\n\n hq = [(0, src, k)]\n while hq:\n price, node, dist = heapq.heappop(hq)\n if node == dst:\n return price\n if dist >= 0:\n for v, w in graph[node]:\n alt = price + w\n if alt < weight[v][0] or dist - 1 >= weight[v][1]:\n weight[v] = (alt, dist - 1)\n heapq.heappush(hq, (alt, v, dist - 1))\n return -1\n\n\nprint(Solution().findCheapestPrice(3,\n[[0,1,100],[1,2,100],[0,2,500]],\n0,\n2,\n1))\n","repo_name":"boorooksus/Algorithm-Study","sub_path":"LeetCode/3회차/B41_Cheapest Flights Within K Stops2.py","file_name":"B41_Cheapest Flights Within K Stops2.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36090602290","text":"import sqlite3\n\nconnection = sqlite3.connect('my_friends.db')\n# create the cursor\nc = connection.cursor()\n# execute some sql\n# c.execute(\"CREATE TABLE friends (first_name TEXT, last_name text, closeness INTEGER);\")\n# insert_querys = \"INSERT INTO friends VALUES ('Renata', 'Domingues', 9)\"\n# form_first = 'Micheline'\n# form_last = 'Lopes'\n# form_closeness = 10\n# BAD WAY! DON'' DO IT!\n# query = f'INSERT INTO friends VALUES (\"{form_first}\", \"{form_last}\", \"{form_closeness}\")'\n# BETTERWAY\ndata = ('Leonardo', 'Brighi', 10)\nquery = 'INSERT INTO friends VALUES (?, ?, ?)'\nc.execute(query, data)\n# commit changes\nconnection.commit()\nconnection.close()\n","repo_name":"mhiloca/PythonBootcamp","sub_path":"SQL/friends.py","file_name":"friends.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40062952277","text":"import jax\nimport jax.numpy as jnp\nfrom diffusers.models import vae_flax\n\n\ndef train_step(\n state,\n text_encoder_params,\n batch,\n train_rng,\n noise_scheduler_state,\n static_broadcasted,\n weights=None,\n):\n noise_scheduler, text_encoder, train_cfg, guidance_scale = static_broadcasted\n dropout_rng, sample_rng, new_train_rng = jax.random.split(train_rng, 3)\n\n def compute_loss(params):\n latent_dist = vae_flax.FlaxDiagonalGaussianDistribution(batch[\"vae\"])\n latents = latent_dist.sample(sample_rng)\n # (NHWC) -> (NCHW)\n latents = jnp.transpose(latents, (0, 3, 1, 2))\n latents = latents * 0.18215\n\n # Sample noise that we'll add to the latents\n noise_rng, timestep_rng = jax.random.split(sample_rng)\n noise = jax.random.normal(noise_rng, latents.shape)\n # Sample a random timestep for each image\n batch_size = latents.shape[0]\n timesteps = jax.random.randint(\n timestep_rng,\n shape=(batch_size,),\n minval=0,\n maxval=noise_scheduler.config.num_train_timesteps,\n )\n\n # Add noise to the latents according to the noise magnitude at each timestep\n # (this is the forward diffusion process)\n noisy_latents = noise_scheduler.add_noise(\n noise_scheduler_state,\n latents,\n noise,\n timesteps,\n )\n\n # Get the text embedding for conditioning\n encoder_hidden_states = text_encoder(\n batch[\"input_ids\"],\n params=text_encoder_params,\n train=False,\n )[0]\n\n # Predict the noise residual and compute loss\n unet_outputs = state.apply_fn(\n {\"params\": params},\n noisy_latents,\n timesteps,\n encoder_hidden_states,\n train=True,\n )\n\n if train_cfg:\n # Get the text embedding for null conditioning\n uncond_hidden_states = text_encoder(\n batch[\"uncond_text\"],\n params=text_encoder_params,\n train=False,\n )[0]\n\n uncond_outputs = state.apply_fn(\n {\"params\": params},\n noisy_latents,\n timesteps,\n uncond_hidden_states,\n train=True,\n )\n noise_pred = uncond_outputs.sample + guidance_scale * (\n unet_outputs.sample - uncond_outputs.sample\n )\n else:\n noise_pred = unet_outputs.sample\n\n loss = ((noise - noise_pred) ** 2).mean(axis=range(1, noise.ndim))\n if weights is None:\n ## average over batch dimension\n loss = loss.mean()\n else:\n ## multiply loss by weights\n assert loss.size == weights.size\n loss = (loss * weights).sum()\n\n return loss\n\n grad_fn = jax.value_and_grad(compute_loss)\n loss, grad = grad_fn(state.params)\n\n loss = jax.lax.pmean(loss, \"batch\")\n grad = jax.lax.pmean(grad, \"batch\")\n\n new_state = state.apply_gradients(grads=grad)\n\n return new_state, loss, new_train_rng\n\n\ndef vae_decode(latents, vae_params, apply_fn, decode_fn):\n ## cannot pass in pipeline.vae directly with static_broadcasted_argnums\n ## because it is not hashable;\n ## expects latents in NCHW format (batch_size, 4, 64, 64)\n latents = latents / 0.18215\n images = apply_fn({\"params\": vae_params}, latents, method=decode_fn).sample\n images = (images / 2 + 0.5).clip(0, 1).transpose(0, 2, 3, 1)\n return images\n\n\ndef text_encode(input_ids, params, text_encoder):\n return text_encoder(input_ids, params=params)[0]\n\n\ndef patch_scheduler(pipeline):\n from ddpo import patch\n\n pipeline.scheduler = patch.scheduling_ddim_flax.FlaxDDIMScheduler(\n num_train_timesteps=pipeline.scheduler.config.num_train_timesteps,\n beta_start=pipeline.scheduler.config.beta_start,\n beta_end=pipeline.scheduler.config.beta_end,\n beta_schedule=pipeline.scheduler.config.beta_schedule,\n trained_betas=pipeline.scheduler.config.trained_betas,\n set_alpha_to_one=pipeline.scheduler.config.set_alpha_to_one,\n steps_offset=pipeline.scheduler.config.steps_offset,\n prediction_type=pipeline.scheduler.config.prediction_type,\n )\n","repo_name":"jannerm/ddpo","sub_path":"ddpo/training/diffusion.py","file_name":"diffusion.py","file_ext":"py","file_size_in_byte":4327,"program_lang":"python","lang":"en","doc_type":"code","stars":185,"dataset":"github-code","pt":"81"} +{"seq_id":"35054548795","text":"import os\n\nimport klib.vm\nimport klib.io\nimport klib.interpreter\nfrom klib.parser import parse_error, print_listener\nfrom klib.io import stdout\nfrom klib.native.python import abort_exception\n\ndef print_trace(stack_trace):\n for trace in stack_trace:\n stdout.writeln(\" File \\\"{}\\\", line {}, column {}, in {}\", trace.filename, trace.line, trace.column, trace.blockname)\n\ndef execute_script(input_file, propagate_exception, developer_verbose, test_type, include_dirs):\n module = klib.interpreter.module()\n extra_listeners = None\n if developer_verbose:\n extra_listeners = [print_listener(stdout)]\n try:\n module.load_file(input_file, extra_listeners = extra_listeners, developer_verbose = developer_verbose, include_dirs = include_dirs)\n except klib.parser.parse_error as em:\n stdout.writeln(\" File \\\"{}\\\", line {}, column {}\", em.filename, em.line, em.column)\n stdout.writeln(\" Parse error: {}\", em.message)\n if(propagate_exception):\n raise em\n return -1\n except klib.exception as em:\n if test_type == \"run_success\":\n stdout.writeln(\"Test failed: {}\", em.message)\n return -1\n elif test_type == \"run_fail\":\n return 0\n else:\n stdout.writeln(\"Error: {}\", em.message)\n if(propagate_exception):\n raise em\n return -1\n except abort_exception as ae:\n if test_type == \"run_success\":\n print_trace(ae.stack_trace)\n stdout.writeln(\"Test failed: {}\", ae.args[0])\n return -1\n elif test_type == \"run_fail\":\n return 0\n else:\n print_trace(ae.stack_trace)\n stdout.writeln(\"Aborted: {}\", ae.args[0])\n return -1\n if test_type == \"run_fail\":\n stdout.writeln(\"No failure detected!\")\n return -1\n return 0\n\ndef main(argv):\n \n show_help = False\n show_version = False\n input_file = None\n propagate_exception = False\n developer_verbose = False\n test_type = None\n test_suite = None\n include_dirs = []\n\n arg_i = 0\n while arg_i < len(argv):\n if(argv[arg_i] == \"--help\"):\n show_help = True\n elif(argv[arg_i] == \"--version\"):\n show_version = True\n elif(argv[arg_i] == \"--propagate-exception\"):\n propagate_exception = True\n elif(argv[arg_i] == \"--developer-verbose\"):\n developer_verbose = True\n elif(argv[arg_i] == \"--test\"):\n test_type = argv[arg_i + 1]\n arg_i += 1\n elif(argv[arg_i] == \"--test-suite\"):\n test_suite = argv[arg_i]\n elif argv[arg_i] == \"--include-dir\":\n include_dirs.append(argv[arg_i + 1])\n arg_i += 1\n else:\n input_file = argv[arg_i]\n arg_i += 1\n\n if(show_version):\n stdout.writeln(\"Kernel Language v0.9.0\")\n return 0\n \n if test_suite and test_type:\n stdout.writeln(\"Cannot specify a test suite and test type\")\n return -1\n \n if(show_help or input_file == None):\n stdout.writeln(\"kl [--help] [--version] [input_filename]\")\n stdout.writeln(\"--help: show this help message\")\n stdout.writeln(\"--version: show the version\")\n stdout.writeln(\"--propagate-exception: \")\n stdout.writeln(\"--developer-verbose: \")\n stdout.writeln(\"--test [type]: run a test where type is:\\n\"\n \" * 'run_success' if the test is expected to succeed\\n\"\n \" * 'run_fail' if the test is expected to fail\\n\")\n stdout.writeln(\"--test-suite: run all the tests listed in the file\")\n stdout.writeln(\"input_filename: name of the file to execute\")\n return 0\n \n if test_suite != None:\n test_suite_file = klib.io.input_file_stream(input_file)\n failures = 0\n while not test_suite_file.at_end_of_stream:\n t = test_suite_file.read_line()\n if t != \"\":\n test_file_name = os.path.join(os.path.dirname(input_file), t)\n test_file = klib.io.input_file_stream(test_file_name)\n shebang_line = test_file.read_line()\n if shebang_line[0:2] == \"#!\":\n shebang = shebang_line.split(\" \")\n test_type = None\n shebang_i = 0\n while shebang_i < len(shebang):\n if shebang[shebang_i] == \"--test\":\n test_type = shebang[shebang_i+1]\n break\n shebang_i += 1\n if test_type:\n if(execute_script(test_file_name, False, False, test_type, include_dirs) == 0):\n stdout.writeln(\"{}: success\", t)\n else:\n stdout.writeln(\"{}: failed\", t)\n failures += 1\n else:\n stdout.writeln(\"{}: invalid test, missing '--test'!\", t)\n failures += 1\n else:\n stdout.writeln(\"{}: invalid test, missing shebang!\", t)\n failures += 1\n \n if failures > 0:\n stdout.writeln(\"There was {} failures\".format(failures))\n return -1\n else:\n stdout.writeln(\"Test suite was successful\")\n return 0\n else:\n ret = execute_script(input_file, propagate_exception, developer_verbose, test_type, include_dirs)\n if ret == 0 and test_type != None:\n stdout.writeln(\"Test pass.\")\n return ret\n","repo_name":"weeklyvillain/TDDA69","sub_path":"kl/klib/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16121650917","text":"import json\nimport uuid\nimport logging\n\n\n# content to export sheet cell\nclass ExportDocumentReportCellContent():\n # constructor\n def __init__(self, data, bgc='#ffffff', color=\"#000000\", ta=\"left\", fz=\"10\", fm=\"\", fw=\"normal\", fs=\"normal\", u=\"\",\n ff=\"arial\", width=50, id=-1, row=-1, show_grid=False, contains_data=False, height=30):\n try:\n self.width = width\n self.data = data\n self.bgc = bgc\n self.color = color\n self.ta = ta\n self.fz = fz\n self.fm = fm\n self.fw = fw\n self.fs = fs\n self.u = u\n self.ff = ff\n self.id = id\n self.ww = 'break-word'\n self.ws = 'pre-line'\n self.va = 'middle'\n self.ta = 'center'\n self.height = height\n\n self.bls = 'solid'\n self.brs = 'solid'\n self.bts = 'solid'\n self.bbs = 'solid'\n\n self.blc = '#7f7f7f'\n self.brc = '#7f7f7f'\n self.btc = '#7f7f7f'\n self.bbc = '#7f7f7f'\n\n self.blt = 'solid'\n self.brt = 'solid'\n self.btt = 'solid'\n self.bbt = 'solid'\n # self.bgc = bgc\n except Exception as e:\n logging.error(\"Error initialization. \" + str(e))\n\n\n# export document sheet cell\nclass ExportDocumentReportCell():\n # constructor\n def __init__(self, sheet, row, col, cellJson):\n try:\n self.sheet = sheet\n self.row = row\n self.col = col\n self.json = cellJson\n except Exception as e:\n logging.error(\"Error initialization. \" + str(e))\n\n\n# export document group\nclass ExportDocumentGroup():\n # constructor\n def __init__(self, level, span):\n try:\n self.level = level\n self.span = span\n except Exception as e:\n logging.error(\"Error initialization. \" + str(e))\n\n # convert model to JSON\n def toJSON(self):\n return json.dumps(self, default=lambda o: o.__dict__,\n sort_keys=True, indent=4)\n\n\n# export documnet report floatings model\nclass ExportDocumentReportFloatings():\n # constructor\n def __init__(self, sheet, name, ftype, json):\n self.sheet = sheet\n self.name = name\n self.ftype = ftype\n self.json = json\n pass\n\n\n# export sheet model report\nclass ExportDocumentReportSheet():\n # constructor\n def __init__(self, id, name):\n try:\n self.id = id\n self.name = name\n except Exception as e:\n logging.error(\"Error initialization. \" + str(e))\n\n\n# export sheet report document\nclass ExportDocumentReportModel():\n # constructor\n def __init__(self, fileName):\n try:\n self.fileName = fileName\n self.sheets = []\n self.floatings = []\n self.cells = []\n except Exception as e:\n logging.error(\"Error initialization. \" + str(e))\n\n # start from 1\n def add_sheet(self, id, name):\n try:\n self.sheets.append(ExportDocumentReportSheet(id, name))\n except Exception as e:\n logging.error(\"Error \" + str(e))\n\n # convert model to JSON\n def toJSON(self):\n return json.dumps(self, default=lambda o: o.__dict__,\n sort_keys=True, indent=4)\n","repo_name":"vyadzmak/PKBReportBuilder","sub_path":"PKBReportBuilder/models/export_models/export_document_report_model.py","file_name":"export_document_report_model.py","file_ext":"py","file_size_in_byte":3419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5483919483","text":"from ball import Ball\n\nimport turtle\n\nimport time\n\nimport random\n\nimport math\n\nimport numpy as np\n\n\nfrom Tkinter import *\n\nmaster = Tk()\n\nquestions = [['1+1',2,5,6,1]]\n\n#face_cascade = cv2.CascadeClassifier('/usr//local//lib//python2.7//dist-packages//cv2//data//haarcascade_frontalface_default.xml')\n\n#eye_cascade = cv2.CascadeClassifier('/usr//local//lib//python2.7//dist-packages//cv2//data//haarcascade_eye.xml')\n\n#cap = cv2.VideoCapture(0)\n\nturtle.colormode(255)\n\nturtle.tracer(0)\n\nturtle.ht()\n\nRunning=True\n\nSleep=0.0077\n\nSCREEN_WIDTH=turtle.getcanvas().winfo_width()/2\n\nSCREEN_HIGHT=turtle.getcanvas().winfo_height()/2\n\nPause = False\n\nnumBalls =1\nbvbvbbv\nminR=70\n\nmaxR=100\n\nminDX=-5\n\nmaxDX=5\n\nminDY=-5\n\nmaxDY=5\n\nABC=['A','B','C','D']\n\nBalls = []\n\nfor i in range(numBalls):\n\n\tx = random.randint(-SCREEN_WIDTH+maxR,SCREEN_WIDTH-maxR)\n\n\ty = random.randint(-SCREEN_HIGHT+maxR,SCREEN_HIGHT-maxR)\n\n\tdx = random.randint(minDX,maxDX)\n\n\tdy = random.randint(minDY,maxDY)\n\n\tR = random.randint(0,255)\n\n\tG = random.randint(0,255)\n\n\tB = random.randint(0,255)\n\n\tColor = (R,G,B)\n\n\traduius = random.randint(minR,maxR)\n\n\tenemyBall = Ball(x,y,dx,dy,raduius,Color)\n\n\tBalls.insert(-1,enemyBall)\n\nmyBall = Ball(0,0,5,5,50,Color)\n\ndef moveAllBalls():\n\n\tfor i in Balls:\n\n\t\ti.move(SCREEN_WIDTH,SCREEN_HIGHT)\n\ndef checkCollision(ballA,ballB):\n\n\tif ballA == ballB:\n\n\t\treturn False\n\n\telif (math.sqrt((ballA.x-ballB.x)**2+(ballA.y-ballB.y)**2))= Ball2.r:\n\n\t\t\t\t\tpass\n\n\t\t\t\t\t#Ball1.r = Ball1.r +0.001\n\n\t\t\t\t\t#Ball1.shapesize(Ball2.r/10)\n\n\t\t\t\t\t#Ball2.dx = -Ball2.dx\n\n\t\t\t\t\t#Ball2.dy = -Ball2.dy\n\n\t\t\t\t\t#Ball2.y = random.randint(-SCREEN_WIDTH+maxR,SCREEN_WIDTH-maxR)\n\n\t\t\t\t\t#Ball2.y = random.randint(-SCREEN_HIGHT+maxR,SCREEN_HIGHT-maxR)\n\n\t\t\t\telse:\n\n\t\t\t\t\tpass\n\n\t\t\t\t\t#Ball2.r = Ball2.r +0.001\n\n\t\t\t\t\t#Ball2.shapesize(Ball2.r/10)\n\n\t\t\t\t\t#Ball1.dx = -Ball1.dx\n\n\t\t\t\t\t#Ball1.dy = -Ball1.dy\n\n\n\ndef myBallcollision():\n\n\tfor ball in Balls:\n\n\t\tif checkCollision(myBall,ball):\n\n\t\t\tif myBall.r>ball.r:\n\n\t\t\t\tBalls.remove(ball)\n\n\t\t\t\tball.hideturtle()\n\n\t\t\t\tmyBall.r = myBall.r +1\n\n\t\t\t\tmyBall.shapesize(myBall.r/10)\n\n\t\t\telif myBall.r (len(daily_data_df[title]) * .75):\n daily_data_df.pop(title)\n else:\n for i in range(len(daily_data_df[title])):\n if i == 0:\n daily_data_df[title].bfill(inplace=True)\n elif i == len(daily_data_df[title]):\n daily_data_df[title].ffill(inplace=True)\n else:\n daily_data_df.interpolate()\n\n return daily_data_df\n\ndef get_spotify_df(filename):\n '''\n returns a usable dataframe containing a the spotify data from a JSON file\n Parameter: name of the JSON file containing a users spotify data\n Returns: a Pandas dataframe containing the data\n '''\n import pandas as pd\n\n stream_hist_df = pd.read_json(filename)\n\n endtime_unclean_ser = stream_hist_df[\"endTime\"]\n\n date_list = []\n time_list = []\n\n for i in range(len(endtime_unclean_ser)):\n curr_string = endtime_unclean_ser[i]\n string_as_list = curr_string.split()\n\n date = string_as_list[0]\n time = string_as_list[1]\n\n date_list.append(date)\n time_list.append(time)\n\n\n stream_hist_df.pop(\"endTime\")\n stream_hist_df = stream_hist_df.assign(date=date_list)\n stream_hist_df = stream_hist_df.assign(endTime=time_list)\n\n stream_hist_df.set_index(\"date\", inplace=True)\n\n return stream_hist_df\n\ndef decode_endTime_column(dataframe):\n '''\n decodes the endTime value of each song into an integer between 0 and 24, representing what hour of the day the song was listened to \n Parameter: dataframe to decod, must have a valid endTime column\n Returns: nothing is returned, but dataframe passed in is updated\n '''\n\n for i in range(len(dataframe[\"endTime\"])):\n item_list = dataframe[\"endTime\"][i].split(\":\")\n if item_list[0] == \"01\":\n dataframe[\"endTime\"][i] = 1\n elif item_list[0] == \"02\":\n dataframe[\"endTime\"][i] = 2\n elif item_list[0] == \"03\":\n dataframe[\"endTime\"][i] = 3\n elif item_list[0] == \"04\":\n dataframe[\"endTime\"][i] = 4\n elif item_list[0] == \"05\":\n dataframe[\"endTime\"][i] = 5\n elif item_list[0] == \"06\":\n dataframe[\"endTime\"][i] = 6\n elif item_list[0] == \"07\":\n dataframe[\"endTime\"][i] = 7\n elif item_list[0] == \"08\":\n dataframe[\"endTime\"][i] = 8\n elif item_list[0] == \"09\":\n dataframe[\"endTime\"][i] = 9\n elif item_list[0] == \"10\":\n dataframe[\"endTime\"][i] = 10\n elif item_list[0] == \"11\":\n dataframe[\"endTime\"][i] = 11\n elif item_list[0] == \"12\":\n dataframe[\"endTime\"][i] = 12\n elif item_list[0] == \"13\":\n dataframe[\"endTime\"][i] = 13\n elif item_list[0] == \"14\":\n dataframe[\"endTime\"][i] = 14\n elif item_list[0] == \"15\":\n dataframe[\"endTime\"][i] = 15\n elif item_list[0] == \"16\":\n dataframe[\"endTime\"][i] = 16\n elif item_list[0] == \"17\":\n dataframe[\"endTime\"][i] = 17\n elif item_list[0] == \"18\":\n dataframe[\"endTime\"][i] = 18\n elif item_list[0] == \"19\":\n dataframe[\"endTime\"][i] = 19\n elif item_list[0] == \"20\":\n dataframe[\"endTime\"][i] = 20\n elif item_list[0] == \"21\":\n dataframe[\"endTime\"][i] = 21\n elif item_list[0] == \"22\":\n dataframe[\"endTime\"][i] = 22\n elif item_list[0] == \"23\":\n dataframe[\"endTime\"][i] = 23\n elif item_list[0] == \"24\":\n dataframe[\"endTime\"][i] = 24\n elif item_list[0] == \"00\":\n dataframe[\"endTime\"][i] = 0\n\ndef print_avg_endtime_by_weekday_plot(dataframe, name):\n '''\n prints a graph containing data points of the average endTime value for each day of the week from the dataframe passed in\n Parameters: dataframe containing relevant data, the name of the person's dataframe being passed in\n Retuns: N/A, but grpah will be output\n '''\n import matplotlib.pyplot as plt\n\n groupedby_day = dataframe.groupby(\"Day of Week\")\n monday = groupedby_day.get_group(\"Monday\")\n tuesday = groupedby_day.get_group(\"Tuesday\")\n wednesday = groupedby_day.get_group(\"Wednesday\")\n thursday = groupedby_day.get_group(\"Thursday\")\n friday = groupedby_day.get_group(\"Friday\")\n saturday = groupedby_day.get_group(\"Saturday\")\n sunday = groupedby_day.get_group(\"Sunday\")\n\n plt.figure()\n plt.scatter([1, 2, 3, 4, 5, 6, 7], [monday[\"endTime\"].mean(), tuesday[\"endTime\"].mean(), wednesday[\"endTime\"].mean(), thursday[\"endTime\"].mean(), friday[\"endTime\"].mean(), saturday[\"endTime\"].mean(), sunday[\"endTime\"].mean()])\n plt.xticks([1, 2, 3, 4, 5, 6, 7], [\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\"])\n plt.ylabel(\"Average endTime (hour of day)\")\n plt.title(name + \" Average endTime by Day of Week\")\n\ndef generate_wordcloud(column):\n '''\n Note: repurposed from NLP Yelp lecture in class\n generates a wordcloud based on a column containing text values passed in \n Parameter: column of strings\n Returns: N/A, but wordcloud will be generated\n '''\n from wordcloud import WordCloud\n import matplotlib.pyplot as plt\n\n data_str = \"\"\n\n for i in range(len(column)):\n data = column[i].strip(\"\\\"\")\n data_str = data_str + \" \" + data\n\n wc = WordCloud(background_color=\"white\", colormap=\"prism\", random_state=0, collocations=False)\n plt.figure()\n plt.imshow(wc.generate(data_str))\n plt.xticks([], [])\n plt.yticks([], [])\n plt.show()\n\n","repo_name":"JStirrat/project","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74234205063","text":"from django.urls import path\nfrom .views import *\n\n\n\nurlpatterns = [\n path('', ContractorHome.as_view(), name='contractor'),\n path('contractordata/', ContractorData.as_view(), name='contractordata'),\n path('createContractor/', save_contractorData.as_view(), name='createContractor'),\n path('delete_contractor//', DeleteContractor.as_view(), name='delete_contractor'),\n path('edit_contractor//', EditContractor.as_view(), name='edit_contractor'),\n\n\n path('site/', SiteHome.as_view(), name='site'),\n path('site/sitedata/', SiteData.as_view(), name='sitedata'),\n path('createSite/', CreateSite.as_view(), name='createSite'),\n path('site/delete_site//', DeleteSite.as_view(), name='delete_site'),\n path('site/edit_site//', EditSite.as_view(), name='edit_site'),\n path('updateSite/', UpdateSite.as_view(), name='updateSite'),\n\n\n path('siteReportIndex/', SiteReportIndex.as_view(), name='siteReportIndex'),\n path('dateRangeSiteForm/', DateRangeSiteReport.as_view(), name='dateRangeSiteForm'),\n]","repo_name":"dipu157/python_projects","sub_path":"django_construction/construction/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14670893563","text":"import json\n\nclass GlobalComment():\n def __init__ (self):\n self.comment = self.getComment()\n\n def getComment (self):\n if 'comment' in self.data:\n return self.data['comment']\n else:\n return None\n\n\nclass HAR(GlobalComment):\n def __init__(self, inputFile=None, string=None):\n #eventually add args / kwargs for file/string input\n if inputFile is not None:\n inputFile.open()\n interpreted_dict = json.load(inputFile)\n inputFile.close()\n elif string is not None:\n interpreted_dict = json.loads(string)\n else:\n raise Exception(\"No input provided!\")\n self.data = interpreted_dict['log']\n self.creator = self.getCreator()\n self.browser = self.getBrowser()\n self.version = self.getVersion()\n if 'entries' in self.data:\n self.entries = []\n for item in self.data['entries']:\n self.entries.append(Entry(item))\n else:\n self.entries = None\n\n if 'pages' in self.data:\n self.pages = []\n for item in self.data['pages']:\n self.pages.append(Page(item))\n else:\n self.pages = None #pages are optional, so this is \n\n\n def getBrowser (self): #browser is optional, return null if not present\n try:\n return self.data['browser']\n except:\n return None\n\n def getCreator (self):\n try:\n return self.data['creator']\n except:\n return None\n\n def getVersion (self):\n try:\n version = self.data['version']\n if not len(version) > 0:\n raise TypeError\n else:\n return version\n except:\n return \"1.1\" #1.1 is assumed by default, as per the .HAR file spec\n\n def getMatchedEntries (self, matchString):\n \"\"\"\n For a given matchString that varies by vendor (e.g. /b/ss for Omniture) yield back request objects and time requests\n \n Disregard all entries that aren't HTTP Status == 200, because those aren't real, at least for Omniture.\n \"\"\"\n entries = self.data['log']['entries']\n for entry in entries:\n request = entry['request']\n response = entry['response']\n startTime = entry['startedDateTime'] #in ISO 8601\n if matchString in request['url']: # and (response['status'] is 200 or response['status'] is 302): #look for the matchstring, also check that the status is OK\n #import pdb;pdb.set_trace()\n yield request, startTime\n \n \n def getDictFromListofDicts (self, request):\n stuff = {}\n for item in request['headers']:\n stuff[item['name']] = item['value']\n return stuff\n\n\nclass Entry(GlobalComment):\n #do something\n def __init__(self, data):\n pass\n\n\n\nclass Page(GlobalComment):\n #do something\n def __init__(self):\n pass","repo_name":"genejones/HarHar","sub_path":"HarHar.py","file_name":"HarHar.py","file_ext":"py","file_size_in_byte":3039,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"13871239954","text":"from functools import lru_cache\nfrom typing import Optional\n\nimport boto3\n\n\nclass NoRegionFound(Exception):\n pass\n\n\n@lru_cache(maxsize=1) # Only need to cache one as once deployed, it is not gonna deal with another region.\ndef _get_region_from_session() -> str:\n return boto3.session.Session().region_name\n\n\n@lru_cache(maxsize=1) # Only need to cache one as once deployed, it is not gonna deal with another region.\ndef _region_to_partition(region: str) -> str:\n # setting default partition to aws, this will be overwritten by checking the region below\n region_string = region.lower()\n if region_string.startswith(\"cn-\"):\n return \"aws-cn\"\n if region_string.startswith(\"us-iso-\"):\n return \"aws-iso\"\n if region_string.startswith(\"us-isob\"):\n return \"aws-iso-b\"\n if region_string.startswith(\"us-gov\"):\n return \"aws-us-gov\"\n\n return \"aws\"\n\n\nclass ArnGenerator:\n BOTO_SESSION_REGION_NAME = None\n\n @classmethod\n def generate_arn(cls, partition, service, resource, include_account_id=True): # type: ignore[no-untyped-def]\n if not service or not resource:\n raise RuntimeError(\"Could not construct ARN for resource.\")\n\n arn = \"arn:{0}:{1}:${{AWS::Region}}:\"\n\n if include_account_id:\n arn += \"${{AWS::AccountId}}:\"\n\n arn += \"{2}\"\n\n return arn.format(partition, service, resource)\n\n @classmethod\n def generate_aws_managed_policy_arn(cls, policy_name: str) -> str:\n \"\"\"\n Method to create an ARN of AWS Owned Managed Policy. This uses the right partition name to construct\n the ARN\n\n :param policy_name: Name of the policy\n :return: ARN Of the managed policy\n \"\"\"\n return \"arn:{}:iam::aws:policy/{}\".format(ArnGenerator.get_partition_name(), policy_name)\n\n @classmethod\n def get_partition_name(cls, region: Optional[str] = None) -> str:\n \"\"\"\n Gets the name of the partition given the region name. If region name is not provided, this method will\n use Boto3 to get name of the region where this code is running.\n\n This implementation is borrowed from AWS CLI\n https://github.com/aws/aws-cli/blob/1.11.139/awscli/customizations/emr/createdefaultroles.py#L59\n\n :param region: Optional name of the region\n :return: Partition name\n \"\"\"\n\n if region is None:\n # Use Boto3 to get the region where code is running. This uses Boto's regular region resolution\n # mechanism, starting from AWS_DEFAULT_REGION environment variable.\n\n region = (\n _get_region_from_session()\n if ArnGenerator.BOTO_SESSION_REGION_NAME is None\n else ArnGenerator.BOTO_SESSION_REGION_NAME\n )\n\n # If region is still None, then we could not find the region. This will only happen\n # in the local context. When this is deployed, we will be able to find the region like\n # we did before.\n if region is None:\n raise NoRegionFound(\"AWS Region cannot be found\")\n\n return _region_to_partition(region)\n","repo_name":"isaccanedo/serverless-application-model","sub_path":"samtranslator/translator/arn_generator.py","file_name":"arn_generator.py","file_ext":"py","file_size_in_byte":3126,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"69818043147","text":"# encoding: utf-8\nfrom __future__ import print_function, unicode_literals, absolute_import, division\n\nimport sys\n\n\nif sys.version_info[0] == 2:\n PY2 = True\n Byte, Unicode = str, unicode\nelse:\n PY2 = False\n Byte, Unicode = bytes, str\n\n\nis_string = lambda s: True if isinstance(s, (Byte, Unicode)) else False\nto_unicode = lambda s, e=\"utf-8\": s if isinstance(s, Unicode) else s.decode(e)\nto_bytes = lambda s, e=\"utf-8\": s if isinstance(s, Byte) else s.encode(e)\nto_str = to_bytes if PY2 else to_unicode\n\n\ndef datetime2str(dt):\n if not dt:\n return \"\"\n if is_string(dt):\n return dt\n return dt.strftime(\"%Y-%m-%d %H:%M:%S\")\n","repo_name":"byapi/byapi-python","sub_path":"byapi/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6382984911","text":"\r\n\r\nfrom __future__ import division\r\n\r\nimport cv2\r\nimport numpy as np\r\n\r\n\r\n######################################################\r\ndef illuminationCorrection2(Image, kernel_size):\r\n #input: original RGB image and kernel size\r\n #output: illumination corrected RGB image\r\n ## The return can be a RGB 3 channel image, but better to show the user the green channel only\r\n ##since green channel is more clear and has higher contrast\r\n\r\n\r\n BackgroundIllumImage = cv2.medianBlur(Image, ksize = kernel_size)\r\n\r\n maximumVal = np.max(BackgroundIllumImage)\r\n minimumVal = np.min(BackgroundIllumImage)\r\n constVal = maximumVal - 128\r\n\r\n BackgroundIllumImage[BackgroundIllumImage <=10] = 100\r\n IllumImage = Image * (maximumVal / BackgroundIllumImage) - constVal\r\n IllumImage[IllumImage>255] = 255\r\n IllumImage[IllumImage<0] = 0\r\n IllumImage = np.uint8(IllumImage)\r\n\r\n IllumImage = cv2.medianBlur(IllumImage, ksize=3)\r\n\r\n return IllumImage\r\n","repo_name":"XGGNet/Vessel-Seg","sub_path":"lib/IlluminationCorrection.py","file_name":"IlluminationCorrection.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"81"} +{"seq_id":"32115608295","text":"from django.db import models\n\n# Create your models here.\nclass Identite(models.Model):\n \"\"\"Model definition for Identite.\"\"\"\n\n nom = models.CharField(max_length=50)\n logo = models.ImageField(upload_to='images/config/')\n footer_logo = models.ImageField(upload_to='images/config/')\n maps_link = models.TextField()\n favicone = models.ImageField(upload_to='images/config/')\n newsletter_message=models.TextField()\n adresse=models.CharField(max_length=255)\n date_add = models.DateTimeField( auto_now_add=True)\n date_update = models.DateTimeField(auto_now=True)\n status=models.BooleanField(default=True)\n description = models.TextField()\n\n class Meta:\n \"\"\"Meta definition for Identite.\"\"\"\n\n verbose_name = 'Identite'\n verbose_name_plural = 'Identites'\n\n def __str__(self):\n \"\"\"Unicode representation of Identite.\"\"\"\n return self.nom\n\nclass Bureau(models.Model):\n \"\"\"Model definition for Bureau.\"\"\"\n\n maps_link = models.URLField()\n adresse=models.CharField(max_length=255)\n image = models.ImageField(upload_to='images/config/bureau/')\n contact = models.CharField(max_length=50)\n date_add = models.DateTimeField( auto_now_add=True)\n date_update = models.DateTimeField(auto_now=True)\n status=models.BooleanField(default=True)\n\n class Meta:\n \"\"\"Meta definition for Bureau.\"\"\"\n\n verbose_name = 'Bureau'\n verbose_name_plural = 'Bureaus'\n\n def __str__(self):\n \"\"\"Unicode representation of Bureau.\"\"\"\n return self.adresse\n\nclass Consultant(models.Model):\n \"\"\"Model definition for Consultant.\"\"\"\n\n nom = models.CharField( max_length=50)\n prenoms = models.CharField( max_length=50)\n image = models.ImageField(upload_to='images/config/bureau/')\n contact = models.CharField(max_length=50)\n email = models.EmailField()\n date_add = models.DateTimeField( auto_now_add=True)\n date_update = models.DateTimeField(auto_now=True)\n status=models.BooleanField(default=True)\n\n class Meta:\n \"\"\"Meta definition for Consultant.\"\"\"\n\n verbose_name = 'Consultant'\n verbose_name_plural = 'Consultants'\n\n def __str__(self):\n \"\"\"Unicode representation of Consultant.\"\"\"\n return self.nom\n\nclass Plainte(models.Model):\n \"\"\"Model definition for Plainte.\"\"\"\n\n nom = models.CharField( max_length=50)\n prenoms = models.CharField( max_length=50)\n contact = models.CharField(max_length=50)\n email = models.EmailField()\n cause = models.CharField(max_length=50)\n message = models.TextField()\n date_add = models.DateTimeField( auto_now_add=True)\n date_update = models.DateTimeField(auto_now=True)\n status=models.BooleanField(default=True)\n \n\n class Meta:\n \"\"\"Meta definition for Plainte.\"\"\"\n\n verbose_name = 'Plainte'\n verbose_name_plural = 'Plaintes'\n\n def __str__(self):\n \"\"\"Unicode representation of Plainte.\"\"\"\n return self.cause\n\n\nclass Page(models.Model):\n \"\"\"Model definition for Page.\"\"\"\n\n nom = models.CharField( max_length=50)\n titre = models.CharField( max_length=50)\n link = models.CharField( max_length=50)\n description = models.CharField(max_length=50)\n date_add = models.DateTimeField( auto_now_add=True)\n date_update = models.DateTimeField(auto_now=True)\n status=models.BooleanField(default=True)\n\n class Meta:\n \"\"\"Meta definition for Page.\"\"\"\n\n verbose_name = 'Page'\n verbose_name_plural = 'Pages'\n\n def __str__(self):\n \"\"\"Unicode representation of Page.\"\"\"\n return self.titre\n\n\nclass OtherPage(models.Model):\n \"\"\"Model definition for OtherPage.\"\"\"\n\n nom = models.CharField( max_length=50)\n titre = models.CharField( max_length=50)\n link = models.CharField( max_length=50)\n description = models.CharField(max_length=50)\n date_add = models.DateTimeField( auto_now_add=True)\n date_update = models.DateTimeField(auto_now=True)\n status=models.BooleanField(default=True)\n\n class Meta:\n \"\"\"Meta definition for OtherPage.\"\"\"\n\n verbose_name = 'OtherPage'\n verbose_name_plural = 'OtherPages'\n\n def __str__(self):\n \"\"\"Unicode representation of OtherPage.\"\"\"\n return self.titre\n\nclass HeaderSlide(models.Model):\n \"\"\"Model definition for HeaderSlide.\"\"\"\n\n image = models.ImageField(upload_to='images/config/header/')\n description = models.CharField( max_length=50)\n page= models.ForeignKey(Page, related_name='page', on_delete=models.CASCADE)\n sous_titre=models.CharField(max_length=50, null=True)\n sous_description = models.TextField(null=True)\n date_add = models.DateTimeField( auto_now_add=True)\n date_update = models.DateTimeField(auto_now=True)\n status=models.BooleanField(default=True)\n\n class Meta:\n \"\"\"Meta definition for HeaderSlide.\"\"\"\n\n verbose_name = 'HeaderSlide'\n verbose_name_plural = 'HeaderSlides'\n\n def __str__(self):\n \"\"\"Unicode representation of HeaderSlide.\"\"\"\n return self.description\n\n\n\nclass Header(models.Model):\n \"\"\"Model definition for Header.\"\"\"\n\n image = models.ImageField(upload_to='images/config/header/')\n description = models.CharField( max_length=50)\n page= models.ForeignKey(OtherPage, related_name='page', on_delete=models.CASCADE)\n date_add = models.DateTimeField( auto_now_add=True)\n date_update = models.DateTimeField(auto_now=True)\n status=models.BooleanField(default=True)\n\n class Meta:\n \"\"\"Meta definition for Header.\"\"\"\n\n verbose_name = 'Header'\n verbose_name_plural = 'Headers'\n\n def __str__(self):\n \"\"\"Unicode representation of Header.\"\"\"\n return self.description\n\nclass SocialLink(models.Model):\n \"\"\"Model definition for SocialLink.\"\"\"\n\n nom = models.CharField( max_length=50)\n lien = models.URLField()\n icone_classe = models.CharField(max_length=50, null=True)\n date_add = models.DateTimeField( auto_now_add=True)\n date_update = models.DateTimeField(auto_now=True)\n status=models.BooleanField(default=True)\n\n class Meta:\n \"\"\"Meta definition for SocialLink.\"\"\"\n\n verbose_name = 'SocialLink'\n verbose_name_plural = 'SocialLinks'\n\n def __str__(self):\n \"\"\"Unicode representation of SocialLink.\"\"\"\n return self.nom\n\nclass Caracteristique(models.Model):\n \"\"\"Model definition for Caracteristique.\"\"\"\n\n nom = models.CharField(max_length=50)\n description = models.TextField()\n icone_classe = models.CharField(max_length=50)\n date_add = models.DateTimeField( auto_now_add=True)\n date_update = models.DateTimeField(auto_now=True)\n status=models.BooleanField(default=True)\n\n class Meta:\n \"\"\"Meta definition for Caracteristique.\"\"\"\n\n verbose_name = 'Caracteristique'\n verbose_name_plural = 'Caracteristiques'\n\n def __str__(self):\n \"\"\"Unicode representation of Caracteristique.\"\"\"\n return self.nom\n\n","repo_name":"Sedrickgael/estate","sub_path":"estate/config/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6965,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"6270879158","text":"#Zombie_礼弥\n\n\n#导入模块---------------------------------------------------------------------\nimport urllib.request\nimport urllib.parse\nimport json\nimport re\n\n\n#list---------------------------------------------------------------\ngvl = []\n\n#请求城市ID-----------------------------------------------------------\ndef inp():\n sr = input('输入要查询的城市:')\n sr = urllib.parse.quote(sr)\n\n url = urllib.request.urlopen(f'http://toy1.weather.com.cn/search?cityname={sr}').read().decode(\"utf-8\")\n\n cityid = re.search(r'(\"ref\":\")(\\d*?)(~)',url)\n cityid = cityid.group(2)\n \n gvl.append(cityid)\n\n\n#请求操作-------------------------------------------------------------\n\ndef index():\n i = gvl[0]\n i = urllib.parse.quote(i)\n\n a = urllib.request.urlopen(f'http://t.weather.sojson.com/api/weather/city/{i}').read().decode('utf-8')\n\n b = json.loads(a)\n \n gvl.append(b)\n \n\n#变量(文字)------------------------------------------------------------\nrq = '日期:'\ngw = '最高:'\ndw = '最低:'\nxq = '星期:'\nrc = '日出时间:'\nrl = '日落时间:'\nqi = '空气质量指数:'\nfxi = '风向:'\nfs = '风力:'\ntq = '天气:'\ntx = '建议:'\nsd = '空气湿度:'\npm = 'PM2.5:'\np10 = 'PM10:'\nzl = '质量:'\nwd = '℃'\n\n\n#城市名称\ndef city():\n \n city = gvl[1]['cityInfo']['city']\n parent = gvl[1]['cityInfo']['parent']\n print(f'您是不是要查找:{parent} 的 {city} ?')\n\n\n#实时天气\ndef real_time():\n shidu = gvl[1]['data']['shidu']\n pm25 = gvl[1]['data']['pm25']\n pm10 = gvl[1]['data']['pm10']\n quality = gvl[1]['data']['quality']\n wendu = gvl[1]['data']['wendu']\n ganmao = gvl[1]['data']['ganmao']\n\n print(f'{sd}{shidu}\\n{pm}{pm25}\\n{p10}{pm10}\\n{zl}{quality}\\n温度:{wendu}{wd}\\n{tx}{ganmao}')\n\n\n#天气预报\n#依次为:日期 最高气温 最低气温 星期 日出时间 日落时间 空气质量指数 风向 风力 天气 提醒\n\ndef day(s):\n \n ymd = gvl[1]['data']['forecast'][s]['ymd']\n print(f'------------------------------{ymd}--------------------------------')\n high = gvl[1]['data']['forecast'][s]['high']\n low = gvl[1]['data']['forecast'][s]['low']\n week = gvl[1]['data']['forecast'][s]['week']\n sunrise = gvl[1]['data']['forecast'][s]['sunrise']\n sunset = gvl[1]['data']['forecast'][s]['sunset']\n aqi = gvl[1]['data']['forecast'][s]['aqi']\n fx = gvl[1]['data']['forecast'][s]['fx']\n fl = gvl[1]['data']['forecast'][s]['fl']\n tianqi = gvl[1]['data']['forecast'][s]['type']\n notice = gvl[1]['data']['forecast'][s]['notice']\n\n print(f'{rq}{ymd}\\n{gw}{high}\\n{dw}{low}\\n{xq}{week}\\n{rc}{sunrise}\\n{rl}{sunset}\\n{qi}{aqi}\\n{fxi}{fx}\\n{fs}{fl}\\n{tq}{tianqi}\\n{tx}{notice}')\n\n print('\\n')\n \n\n#显示城市\ndef cils():\n city()\n print(\"\\n\")\n\n\n#列出天气(实时)\ndef ssls():\n print('----------------------------实时天气---------------------------\\n')\n real_time()\n print('\\n')\n\n\n#列出天气(预报)\n\n\ndef ls():\n print('----------------------------预报天气---------------------------\\n')\n s = 0\n while True:\n try:\n day(s)\n s += 1\n except:\n break\n\n\n#执行\nif __name__ == '__main__':\n inp()\n index()\n cils()\n ssls()\n ls()\n \n input('按Enter以退出')\n","repo_name":"wind-Lv/Restart-website","sub_path":"models/weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":3411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9929370444","text":"import unittest\nimport sys\nimport os\n\n# getting the name of the directory\n# where the this file is present.\ncurrent = os.path.dirname(os.path.realpath(__file__))\n\n# Getting the parent directory name\n# where the current directory is present.\nparent = os.path.dirname(current)\n\n# adding the parent directory to\n# the sys.path.\nsys.path.append(parent)\n\nfrom py_files.weather import weather_client\nfrom py_files.nyt import nyt_client\n\nINPUT = \"INPUT\"\nEXPECTED_OUTPUT = \"EXPECTED_OUTPUT\"\n\n\nclass VerifyCityNameTest(unittest.TestCase):\n def setUp(self):\n self.success_test_params = [\n {\n INPUT: \"\",\n EXPECTED_OUTPUT: False,\n },\n {\n INPUT: None,\n EXPECTED_OUTPUT: False,\n },\n {\n INPUT: \"Washington\",\n EXPECTED_OUTPUT: True,\n },\n ]\n\n def test_VerifyCityName(self):\n client = weather_client()\n for test in self.success_test_params:\n self.assertEqual(client.verifyCity(test[INPUT]), test[EXPECTED_OUTPUT])\n\n\nclass getArticleTest(unittest.TestCase):\n def setUp(self):\n self.success_test_params = [\n {\n INPUT: (\"\"),\n EXPECTED_OUTPUT: [\n (\"headlines\", []),\n (\"abstract\", []),\n (\"web_url\", []),\n (\"img_url\", []),\n (\"lead_paragraph\", []),\n ],\n },\n ]\n self.failure_test_params = [\n {\n INPUT: (\"\"),\n EXPECTED_OUTPUT: [\n (\"headlines\", [\"headline1\", \"headline2\", \"headline3\", \"headline4\", \"headline5\"]),\n (\"abstract\", [\"abstract1\", \"abstract2\", \"abstract3\", \"abstract4\", \"abstract5\"]),\n (\"web_url\", [\"web_url1\", \"web_url2\", \"web_url3\", \"web_url4\", \"web_url5\"]),\n (\"img_url\", [\"img_url1\", \"img_url2\", \"img_url3\", \"img_url4\", \"img_url5\"]),\n (\"lead_paragraph\", [\"lead_paragraph1\", \"lead_paragraph2\", \"lead_paragraph3\", \"lead_paragraph4\", \"lead_paragraph5\"]),\n ],\n },\n ]\n\n def test_getArticle(self):\n # Do not assign values to the class and see if the null values are being returned from the function\n client = nyt_client()\n for test in self.success_test_params:\n self.assertEqual(client.getArticle(), test[EXPECTED_OUTPUT])\n\n # Assign values to the class and see if the new values are being returned from the function\n client.headlines = [\"headline1\", \"headline2\", \"headline3\", \"headline4\", \"headline5\"]\n client.abstract = [\"abstract1\", \"abstract2\", \"abstract3\", \"abstract4\", \"abstract5\"]\n client.web_url = [\"web_url1\", \"web_url2\", \"web_url3\", \"web_url4\", \"web_url5\"]\n client.img_url = [\"img_url1\", \"img_url2\", \"img_url3\", \"img_url4\", \"img_url5\"]\n client.lead_paragraph = [\"lead_paragraph1\", \"lead_paragraph2\", \"lead_paragraph3\", \"lead_paragraph4\", \"lead_paragraph5\"]\n for test in self.failure_test_params:\n self.assertEqual(client.getArticle(), test[EXPECTED_OUTPUT])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"amdMUST/swe-project2","sub_path":"tests/unmocked_unit_tests.py","file_name":"unmocked_unit_tests.py","file_ext":"py","file_size_in_byte":3247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37293739938","text":"#!/usr/bin/python3\n\"\"\"base class for the project almost a circle\"\"\"\nimport json\n\n\nclass Base:\n \"\"\"Private class attribute\"\"\"\n __nb_objects = 0\n\n def __init__(self, id=None):\n \"\"\"initilizates the class \"\"\"\n if id is not None:\n self.id = id\n else:\n Base.__nb_objects += 1\n self.id = Base.__nb_objects\n\n @staticmethod\n def to_json_string(list_dictionaries):\n \"\"\"Return the JSON representation of a list of dict\"\"\"\n if list_dictionaries is None or len(list_dictionaries) == 0:\n return \"[]\"\n return json.dumps(list_dictionaries)\n\n @classmethod\n def save_to_file(cls, list_objs):\n \"\"\"JSON string representation of list_objs to a file\"\"\"\n if list_objs is None:\n list_objs = []\n file_name = f\"{cls.__name__}.json\"\n with open(file_name, 'w') as file:\n json_string = (cls.to_json_string([obj.to_dictionary()\n for obj in list_objs]))\n file.write(json_string)\n\n @staticmethod\n def from_json_string(json_string):\n \"\"\"Return a list of dictionaries from the JSON string repr\"\"\"\n if json_string is None or json_string == \"\":\n return []\n return json.loads(json_string)\n\n @classmethod\n def create(cls, **dictionary):\n \"\"\"Return an instance with all attributes set from a dictionary\"\"\"\n if cls.__name__ == \"Rectangle\":\n shape = cls(1, 1)\n elif cls.__name__ == \"Square\":\n shape = cls(1)\n else:\n shape = None\n\n shape.update(**dictionary)\n return shape\n \n @classmethod\n def load_from_file(cls):\n \"\"\"Return a list of instances from a JSON file\"\"\"\n file_name = f\"{cls.__name__}.json\" \n\n try:\n with open(file_name, 'r') as file:\n json_string = file.read()\n dict_list = Base.from_json_string(json_string)\n instance_list = [cls.create(**d) for d in dict_list]\n return instance_list\n except FileNotFoundError:\n return []","repo_name":"LuciaPuppo897/holbertonschool-higher_level_programming","sub_path":"python-almost_a_circle/models/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7886614210","text":"\n\nimport unittest\nfrom cpuinfo import *\nimport helpers\n\n\nclass MockDataSource:\n\tbits = '64bit'\n\tcpu_count = 16\n\tis_windows = True\n\tarch_string_raw = 'AMD64'\n\tuname_string_raw = 'AMD64 Family 23 Model 8 Stepping 2, AuthenticAMD'\n\tcan_cpuid = True\n\n\t@staticmethod\n\tdef winreg_processor_brand():\n\t\treturn 'AMD Ryzen 7 2700X Eight-Core Processor '\n\n\t@staticmethod\n\tdef winreg_vendor_id_raw():\n\t\treturn 'AuthenticAMD'\n\n\t@staticmethod\n\tdef winreg_arch_string_raw():\n\t\treturn 'AMD64'\n\n\t@staticmethod\n\tdef winreg_hz_actual():\n\t\treturn 3693\n\n\t@staticmethod\n\tdef winreg_feature_bits():\n\t\treturn 1010515455\n\n\n\n\nclass TestWindows_10_X86_64_Ryzen7(unittest.TestCase):\n\tdef setUp(self):\n\t\tcpuinfo.CAN_CALL_CPUID_IN_SUBPROCESS = False\n\t\thelpers.backup_data_source(cpuinfo)\n\t\thelpers.monkey_patch_data_source(cpuinfo, MockDataSource)\n\n\t\thelpers.backup_cpuid(cpuinfo)\n\t\thelpers.monkey_patch_cpuid(cpuinfo, 3693000000, [\n\t\t\t# get_max_extension_support\n\t\t\t0x8000001f,\n\t\t\t# get_cache\n\t\t\t0x2006140,\n\t\t\t# get_info\n\t\t\t0x800f82,\n\t\t\t# get_processor_brand\n\t\t\t0x20444d41, 0x657a7952, 0x2037206e,\n\t\t\t0x30303732, 0x69452058, 0x2d746867,\n\t\t\t0x65726f43, 0x6f725020, 0x73736563,\n\t\t\t0x2020726f, 0x20202020, 0x202020,\n\t\t\t# get_vendor_id\n\t\t\t0x68747541, 0x444d4163, 0x69746e65,\n\t\t\t# get_flags\n\t\t\t0x178bfbff, 0x7ed8320b, 0x209c01a9,\n\t\t\t0x0, 0x20000000, 0x35c233ff,\n\t\t])\n\n\tdef tearDown(self):\n\t\thelpers.restore_data_source(cpuinfo)\n\t\thelpers.restore_cpuid(cpuinfo)\n\t\tcpuinfo.CAN_CALL_CPUID_IN_SUBPROCESS = True\n\n\t'''\n\tMake sure calls return the expected number of fields.\n\t'''\n\tdef test_returns(self):\n\t\tself.assertEqual(0, len(cpuinfo._get_cpu_info_from_wmic()));\n\t\tself.assertEqual(7, len(cpuinfo._get_cpu_info_from_registry()))\n\t\tself.assertEqual(0, len(cpuinfo._get_cpu_info_from_cpufreq_info()))\n\t\tself.assertEqual(0, len(cpuinfo._get_cpu_info_from_lscpu()))\n\t\tself.assertEqual(0, len(cpuinfo._get_cpu_info_from_proc_cpuinfo()))\n\t\tself.assertEqual(0, len(cpuinfo._get_cpu_info_from_sysctl()))\n\t\tself.assertEqual(0, len(cpuinfo._get_cpu_info_from_kstat()))\n\t\tself.assertEqual(0, len(cpuinfo._get_cpu_info_from_dmesg()))\n\t\tself.assertEqual(0, len(cpuinfo._get_cpu_info_from_cat_var_run_dmesg_boot()))\n\t\tself.assertEqual(0, len(cpuinfo._get_cpu_info_from_ibm_pa_features()))\n\t\tself.assertEqual(0, len(cpuinfo._get_cpu_info_from_sysinfo()))\n\t\tself.assertEqual(11, len(cpuinfo._get_cpu_info_from_cpuid()))\n\t\tself.assertEqual(3, len(cpuinfo._get_cpu_info_from_platform_uname()))\n\t\tself.assertEqual(20, len(cpuinfo._get_cpu_info_internal()))\n\n\n\tdef test_get_cpu_info_from_cpuid(self):\n\t\tinfo = cpuinfo._get_cpu_info_from_cpuid()\n\n\t\tself.assertEqual('AuthenticAMD', info['vendor_id_raw'])\n\t\tself.assertEqual('AMD Ryzen 7 2700X Eight-Core Processor', info['brand_raw'])\n\t\t#self.assertEqual('3.6930 GHz', info['hz_advertised_friendly'])\n\t\tself.assertEqual('3.6930 GHz', info['hz_actual_friendly'])\n\t\t#self.assertEqual((3693000000, 0), info['hz_advertised'])\n\t\tself.assertEqual((3693000000, 0), info['hz_actual'])\n\n\t\tself.assertEqual(2, info['stepping'])\n\t\tself.assertEqual(8, info['model'])\n\t\tself.assertEqual(23, info['family'])\n\n\t\tself.assertEqual(64 * 1024, info['l2_cache_size'])\n\t\tself.assertEqual(512, info['l2_cache_line_size'])\n\t\tself.assertEqual(6, info['l2_cache_associativity'])\n\n\t\tself.assertEqual(\n\t\t\t['3dnowprefetch', 'abm', 'adx', 'aes', 'apic', 'avx', 'avx2',\n\t\t\t'bmi1', 'bmi2', 'clflush', 'clflushopt', 'cmov', 'cmp_legacy',\n\t\t\t'cr8_legacy', 'cx16', 'cx8', 'dbx', 'de', 'extapic', 'f16c',\n\t\t\t'fma', 'fpu', 'fxsr', 'ht', 'lahf_lm', 'lm', 'mca', 'mce',\n\t\t\t'misalignsse', 'mmx', 'monitor', 'movbe', 'msr', 'mtrr', 'osvw',\n\t\t\t'osxsave', 'pae', 'pat', 'pci_l2i', 'pclmulqdq', 'perfctr_core',\n\t\t\t'perfctr_nb', 'pge', 'pni', 'popcnt', 'pse', 'pse36', 'rdrnd',\n\t\t\t'rdseed', 'sep', 'sha', 'skinit', 'smap', 'smep', 'sse', 'sse2',\n\t\t\t'sse4_1', 'sse4_2', 'sse4a', 'ssse3', 'svm', 'tce', 'topoext',\n\t\t\t'tsc', 'vme', 'wdt', 'xsave']\n\t\t\t,\n\t\t\tinfo['flags']\n\t\t)\n\n\tdef test_get_cpu_info_from_platform_uname(self):\n\t\tinfo = cpuinfo._get_cpu_info_from_platform_uname()\n\n\t\tself.assertEqual(2, info['stepping'])\n\t\tself.assertEqual(8, info['model'])\n\t\tself.assertEqual(23, info['family'])\n\n\tdef test_get_cpu_info_from_registry(self):\n\t\tinfo = cpuinfo._get_cpu_info_from_registry()\n\n\t\tself.assertEqual('AuthenticAMD', info['vendor_id_raw'])\n\t\tself.assertEqual('AMD Ryzen 7 2700X Eight-Core Processor', info['brand_raw'])\n\t\tself.assertEqual('3.6930 GHz', info['hz_advertised_friendly'])\n\t\tself.assertEqual('3.6930 GHz', info['hz_actual_friendly'])\n\t\tself.assertEqual((3693000000, 0), info['hz_advertised'])\n\t\tself.assertEqual((3693000000, 0), info['hz_actual'])\n\n\t\tself.assertEqual(\n\t\t\t['3dnow', 'clflush', 'cmov', 'de', 'dts', 'fxsr', 'ia64', 'mca',\n\t\t\t'mmx', 'msr', 'mtrr', 'pse', 'sep', 'sepamd', 'serial', 'ss',\n\t\t\t'sse', 'sse2', 'tm', 'tsc']\n\t\t\t,\n\t\t\tinfo['flags']\n\t\t)\n\n\tdef test_all(self):\n\t\tinfo = cpuinfo._get_cpu_info_internal()\n\n\t\tself.assertEqual('AuthenticAMD', info['vendor_id_raw'])\n\t\tself.assertEqual('AMD Ryzen 7 2700X Eight-Core Processor', info['brand_raw'])\n\t\tself.assertEqual('3.6930 GHz', info['hz_advertised_friendly'])\n\t\tself.assertEqual('3.6930 GHz', info['hz_actual_friendly'])\n\t\tself.assertEqual((3693000000, 0), info['hz_advertised'])\n\t\tself.assertEqual((3693000000, 0), info['hz_actual'])\n\t\tself.assertEqual('X86_64', info['arch'])\n\t\tself.assertEqual(64, info['bits'])\n\t\tself.assertEqual(16, info['count'])\n\n\t\tself.assertEqual('AMD64', info['arch_string_raw'])\n\n\t\tself.assertEqual(2, info['stepping'])\n\t\tself.assertEqual(8, info['model'])\n\t\tself.assertEqual(23, info['family'])\n\n\t\tself.assertEqual(64 * 1024, info['l2_cache_size'])\n\t\tself.assertEqual(6, info['l2_cache_associativity'])\n\t\tself.assertEqual(512, info['l2_cache_line_size'])\n\n\t\tself.assertEqual(\n\t\t\t['3dnow', '3dnowprefetch', 'abm', 'adx', 'aes', 'apic', 'avx',\n\t\t\t'avx2', 'bmi1', 'bmi2', 'clflush', 'clflushopt', 'cmov',\n\t\t\t'cmp_legacy', 'cr8_legacy', 'cx16', 'cx8', 'dbx', 'de', 'dts',\n\t\t\t'extapic', 'f16c', 'fma', 'fpu', 'fxsr', 'ht', 'ia64', 'lahf_lm',\n\t\t\t'lm', 'mca', 'mce', 'misalignsse', 'mmx', 'monitor', 'movbe',\n\t\t\t'msr', 'mtrr', 'osvw', 'osxsave', 'pae', 'pat', 'pci_l2i',\n\t\t\t'pclmulqdq', 'perfctr_core', 'perfctr_nb', 'pge', 'pni',\n\t\t\t'popcnt', 'pse', 'pse36', 'rdrnd', 'rdseed', 'sep', 'sepamd',\n\t\t\t'serial', 'sha', 'skinit', 'smap', 'smep', 'ss', 'sse', 'sse2',\n\t\t\t'sse4_1', 'sse4_2', 'sse4a', 'ssse3', 'svm', 'tce', 'tm',\n\t\t\t'topoext', 'tsc', 'vme', 'wdt', 'xsave']\n\t\t\t,\n\t\t\tinfo['flags']\n\t\t)\n","repo_name":"workhorsy/py-cpuinfo","sub_path":"tests/test_windows_10_x86_64_ryzen_7.py","file_name":"test_windows_10_x86_64_ryzen_7.py","file_ext":"py","file_size_in_byte":6455,"program_lang":"python","lang":"en","doc_type":"code","stars":285,"dataset":"github-code","pt":"81"} +{"seq_id":"5924643792","text":"\n'''\nDetecting changes in a file:\n\nIf you had a file has before it was modified, you can rehash the file\nand compare it against the original hash to see if it has been modified.\n\n'''\nimport hashlib\n# Shows hashing algorithms found in the module\nprint(hashlib.algorithms_guaranteed)\n\n# Construct a hash object using one of the hashing algorithm\n#h = hashlib.sha256()\n\n# Update the hash using a bytes object\n#h.update('Hello World!'.encode('utf-8'))\n\n'''\n# Print the hash value as a hex string\nprint(h.hexdigest())\n\nprint(h.digest())\n'''\n\nfilename = \"hello_world.py\"\nBLOCK_SIZE = 65536 # The size of each read from the file\n\nfile_hash = hashlib.sha3_256()\n\nwith open(filename, 'rb') as f:\n\twhile True:\n\t\tdata = f.read(BLOCK_SIZE)\n\n\t\tif not data:\n\t\t\tbreak\n\t\t# Update the hash if there is data\n\t\tfile_hash.update(data) \n\n# Get hexadecimal digest of hash\nprint(file_hash.hexdigest())\n\n\n\n\n\n\n\n\n","repo_name":"KyawHtetWin/Peer-to-Peer-File-Sharing-System","sub_path":"Zero-Copy Send/hashing_file.py","file_name":"hashing_file.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"22654630646","text":"import datetime as dt\nimport numpy as np\nimport pandas as pd\nimport yfinance as yf\nfrom pandas_datareader import data as pdr\nimport matplotlib.pyplot as plt\nimport ticker as readData\n\ndef var_historic(r, year,level=1):\n var_hist = []\n\n for x in range(0,3):\n r_rolling = []\n for z in range(0,len(r)):\n if year[z]==(2017+x):\n r_rolling.append(r[z])\n\n var = np.percentile(r_rolling, level) \n var_hist.append(var) \n \n r_test = []\n for t in range(0,len(r)):\n if year[t]==(2018+x):\n r_test.append(r[t])\n print(len(r_test))\n print((sum(r_test < var)))\n \n \n return var_hist\n\ndef var_montecarlo(r, year ,level=1):\n var_mc = []\n for x in range(0,3):\n r_rolling = []\n for z in range(0,len(r)):\n if year[z]==(2018+x):\n r_rolling.append(r[z])\n \n r_mean = np.mean(r_rolling)\n std = np.std(r_rolling)\n Z = np.random.normal(r_mean, std, 1000000)\n var = np.percentile(Z, level) \n var_mc.append(var) \n \n print(len(r_rolling))\n print((sum(r_rolling < var)))\n \n return var_mc\nres = readData.read('PTY')\nPTY = res['return']\nyear_PTY = []\ndf= res.reset_index(inplace=True)\ndf = res.rename(columns = {'index':'Date'})\ndate_PTY = df[\"Date\"] \n\n\nfor y in range(0,len(date_PTY)):\n d = date_PTY[y]\n year_PTY.append(d.year)\nVaR_hist_PTY = var_historic(PTY,year_PTY)\nprint(VaR_hist_PTY)\n\nVaR_mc_PTY = var_montecarlo(PTY,year_PTY)\n\nprint(VaR_mc_PTY)\n\n\n","repo_name":"alfa14290/RiskModelling","sub_path":"varHistMontecarlo.py","file_name":"varHistMontecarlo.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"73286813066","text":"import os\nimport sys\nimport pdb\nimport cv2\nimport glob\nimport json\nimport copy\nimport struct\nimport argparse\nimport datetime\nimport scipy.io\nimport numpy as np\nimport open3d as o3d\nimport numpy.typing as npt\nfrom typing import Literal\nfrom pygltflib import GLTF2\nfrom scipy.spatial.transform import Rotation as R\n\nfrom . import algebra_utils as alg\nfrom .decorators import timer, verify_format\n\n\ndef data_from_accessor(glb, accessor):\n \"\"\"\n return the data from an accessor to a numpy array\n \"\"\"\n bufferView = glb.bufferViews[accessor.bufferView]\n buffer = glb.buffers[bufferView.buffer]\n data_binary = glb.get_data_from_buffer_uri(buffer.uri)\n # pull each vertex from the binary buffer and convert it into a tuple of python floats\n vertices = []\n for i in range(accessor.count):\n index = bufferView.byteOffset + accessor.byteOffset + i*12 # the location in the buffer of this vertex\n d = data_binary[index:index+12] # the vertex data\n v = struct.unpack(\"0]\n elif label in ['piedroit', 'gousset']:\n if '-W' in name:\n points = points[points[...,0]>0]\n elif '-E' in name:\n points = points[points[...,0]<0]\n elif label == 'mur':\n if '-NE' in name:\n points = points[points[...,0]<0]\n points = points[points[...,1]<0]\n elif '-NW' in name:\n points = points[points[...,0]>0]\n points = points[points[...,1]<0]\n elif '-SE' in name:\n points = points[points[...,0]<0]\n points = points[points[...,1]>0]\n elif '-SW' in name:\n points = points[points[...,0]>0]\n points = points[points[...,1]>0]\n elif label == 'corniche':\n if '-N' in name:\n points = points[points[...,1]<0]\n elif '-S' in name:\n points = points[points[...,1]>0]\n\n return points\n\n\n\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n","repo_name":"tati-/AI_assisted_bridge_inspection","sub_path":"src/modules/point_cloud_utils.py","file_name":"point_cloud_utils.py","file_ext":"py","file_size_in_byte":4511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35553691914","text":"from math import *\r\nimport random\r\n\r\nwords = [\"cat\", \"hat\", \"pop\", \"sea\", \"row\", \"cup\", \"lake\"]\r\na = random.randrange(0,6)\r\nwordChoice = words[a]\r\nlength = len(wordChoice)\r\ncount = len(wordChoice)+ 5\r\nuser1 = input((\"Let\\'s play a game of Hangman. The word has {} letters in it. \\n You get {} guesses. Guess a letter \").format(length, count))\r\ncount = count - 1\r\nchar = list(wordChoice)\r\nword = ''\r\nguess = user1\r\ncharSorted = sorted(char)\r\nwhile count > 0:\r\n if guess in char:\r\n print(\"Great Guess!\")\r\n word = word + guess\r\n count = count -1\r\n charGuess = list(word)\r\n charGuessSort = sorted(charGuess)\r\n if charGuessSort == charSorted:\r\n print(\"You got it!\")\r\n break\r\n else:\r\n guess = input(\"Please guess again. \")\r\n else:\r\n guess = input(\"Sorry, try again. \")\r\n count = count -1\r\nprint((\"The word was {}. Thanks for playing!\").format(wordChoice))\r\n\r\n","repo_name":"Ghayani1098/Ppython_rep","sub_path":"hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21610754704","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nclass DataVisualization():\n \n @staticmethod\n def training_test_loss(history):\n plt.plot(history.history['loss'], label='Training set loss')\n plt.plot(history.history['val_loss'], label='Test set loss', color='r')\n plt.legend(['Training set loss', 'Test set loss'])\n\n @staticmethod\n def data_reconstruction(original, reconstruction, index, features):\n plt.figure(figsize=(15,5))\n plt.subplot(1,2,1)\n plt.plot(original[index],'b')\n plt.plot(reconstruction[index],'r')\n plt.fill_between(np.arange(features), reconstruction[index], original[index], color='lightcoral')\n plt.legend(labels=[\"Original\", \"Reconstruction\", \"Error\"])","repo_name":"paupaf3/tfg-anomaly-detection","sub_path":"common/data_visualization.py","file_name":"data_visualization.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5057269344","text":"import pandas as pd\nimport joblib\n\ncolumn = \"Job Title,Employment Type,Experience Level,Expertise Level,Salary,Salary Currency,Company Location,Salary in USD,Employee Residence,Company Size,Year\"\ndata = \"Data Scientist,Full-Time,Senior,Expert,130000,United States Dollar,United States,130000,United States,Medium,2023\"\n\ncolumn = column.split(\",\")\ndata = data.split(\",\")\ndf = pd.DataFrame([data], columns=column)\n\npreprocessor = joblib.load(\"./Data_Science_Salary/label.pkl\")\nlabel_cloumn = [\n \"Experience Level\",\n \"Expertise Level\",\n \"Company Size\",\n \"Job Title\",\n \"Employment Type\",\n \"Salary Currency\",\n \"Company Location\",\n]\n\nencode_data = pd.DataFrame(\n preprocessor.transform(df[label_cloumn]).toarray(),\n columns=preprocessor.get_feature_names_out(label_cloumn),\n)\n\ndf = pd.concat([df[[\"Year\", \"Salary in USD\"]], encode_data], axis=1)\n\nX = df.drop(\"Salary in USD\", axis=1)\ny = df[\"Salary in USD\"]\n\nmodel = joblib.load(\"./Data_Science_Salary/model_final.pkl\")\npredictions = model.predict(X)\npredictions = pd.DataFrame({\"Predicted Salary\": predictions, \"Real Salary\": y})\nprint(predictions)\n","repo_name":"JakeXiaox/Machine-Learning","sub_path":"Data_Science_Salary/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27202621802","text":"from flask import Flask, request, jsonify, json, Response\nfrom flask_sqlalchemy import SQLAlchemy\nfrom models.model import db, Users, Favorites, UserSchema, FavoritesSchema\nfrom key import key\nimport requests\nfrom flask_cors import CORS, cross_origin\nfrom random import randint\n\n\napp = Flask(__name__)\ndb.init_app(app)\nCORS(app)\n\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://root:admin@localhost/RestaurantRoulette'\n\n\nuser_schema = UserSchema()\nusers_schema = UserSchema(many=True)\nfavorite_schema = FavoritesSchema()\nfavorites_schema = FavoritesSchema(many=True, only=('id', 'content'))\n############# Register API requests ###################\n# Accounting for\n@app.route('/register', methods=['POST'])\ndef register():\n try:\n username = request.json['username']\n password = request.json['password']\n insertTest = Users(username, password)\n db.session.add(insertTest)\n print(username, password)\n db.session.commit()\n return 200, \"Success\"\n except:\n return 400, \"Failed\"\n\n\n############# user API requests ###################\n@app.route('/user', methods=['GET', 'POST', 'DELETE'])\ndef user():\n # For profile\n # if (request.method == 'GET'):\n # username = request.headers['username']\n # users = Users.query.filter_by(username=username).all()\n # # Serialize the queryset\n # result = users_schema.dump(users)\n # # return jsonify({'username': result.data[0][\"username\"]})\n # resMessage = {\n # \"username\": result.data[0][\"username\"]\n # }\n # res = jsonify(resMessage)\n # resMessage.status_code = 200\n # return res\n\n # Checking username and password verification\n if (request.method == 'POST'):\n username = request.json['username']\n password = request.json['password']\n users = Users.query.filter_by(username=username).all()\n result = users_schema.dump(users)\n if (username == result.data[0][\"username\"]):\n if (password == result.data[0][\"password\"]):\n resMessage = {\n \"message\": \"Success\",\n }\n res = jsonify(resMessage)\n res.status_code = 200\n else:\n resMessage = {\n \"message\": \"Incorrect username or password\",\n }\n res = jsonify(resMessage)\n resMessage.status_code = 400\n\n # For Deleting accounts\n # Account for relationship with favorites list\n elif (request.method == 'DELETE'):\n try:\n username = request.json['username']\n deleteTest = Users.query.filter_by(username=username).first()\n db.session.delete(deleteTest)\n db.session.commit()\n resMessage = {\n \"message\": \"Success\",\n }\n res = jsonify(resMessage)\n res.status_code = 200\n except Exception as e:\n resMessage = {\n \"message\": \"Failure\",\n }\n res = jsonify(resMessage)\n res.status_code = 400\n\n return res\n\n############## favoites API requests ##########################\n# profile or a way to show the list of what they have as favorites\n@app.route('/favorites', methods=['GET', 'POST', 'DELETE'])\ndef favorites():\n # Need to be able to list all of the restaurants as a json\n if (request.method == 'GET'):\n username = request.json['username']\n email = Favorites.query.filter_by(username=\"Email@email.com\").all()\n # Initialize a employee list\n favList = []\n # create a instances for filling up employee list\n for element in email:\n entry = {\n 'fav_id': element.fav_id,\n 'restaurant_name': element.restaurant_name}\n favList.append(entry)\n\n return(jsonify(favList))\n # This is for favorting a place to eat\n elif (request.method == 'POST'):\n # Still need to accunt for duplicate entries\n # The goal is for DEFAULT be the primary key\n username = request.json['username']\n restaurant_name = request.json['restaurant_name']\n insertTest = Favorites( None , username, restaurant_name)\n db.session.add(insertTest)\n db.session.commit()\n return (\"This is a post request\")\n\n # For deleting an restaurant names\n elif (request.method == 'DELETE'):\n try:\n fav_id = request.json['fav_id']\n deleteTest = Favorites.query.filter_by(fav_id=fav_id).first()\n db.session.delete(deleteTest)\n db.session.commit()\n return (\"This is a delete request\")\n except Exception as e:\n return(\"Failed Delete Request\")\n\n####################### Google Maps API ################################\nsearch_url = \"https://maps.googleapis.com/maps/api/place/textsearch/json\"\ndetails_url = \"https://maps.googleapis.com/maps/api/place/details/json\"\n\n@app.route('/maps')\ndef maps():\n params = {\n 'query': request.args.get('location'),\n 'radius': request.args.get('radius') or '5000',\n 'maxprice': request.args.get('maxprice') or '4',\n 'type': 'restaurant',\n 'key': key\n }\n ########BY DEFAULT IT WILL SEND A LOT OF PLACES!!!! NO FIX YET BY GOOGLE\n search_json = requests.get(search_url, params=params).json()\n # return jsonify(search_json)\n # print search_json\n results = {}\n # for index, restaurant in enumerate(search_json[\"results\"]):\n flag = True\n while flag:\n index = randint(0, len(search_json[\"results\"])-1)\n restaurant = search_json['results'][index]\n if restaurant['opening_hours']['open_now']:\n results['location'] = restaurant['geometry']['location']\n results['name'] = restaurant['name']\n results['address'] = restaurant['formatted_address']\n # Getting Phone Number\n details_payload = {\"key\": key, \"placeid\": restaurant['place_id']}\n details_resp = requests.get(details_url, params=details_payload)\n details_json = details_resp.json()\n results['phone'] = details_json['result']['formatted_phone_number']\n results['price_level'] = restaurant['price_level']\n flag = False\n\n resMessage = results\n res = jsonify(resMessage)\n res.status_code = 200\n return res\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"NathanWang2/RestaurantRoulette","sub_path":"backend/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70476809544","text":"import random\n\ndef get_response():\n return random.randint(1,3)\n\ndef main():\n user_input = input(\"Enter the number of questions you have: \")\n play_times = 3\n if user_input.isnumeric() and int(user_input) < 100:\n play_times = user_input\n else:\n print(\"Bad input. You will play\",play_times,\"times as default.\")\n \n\nmain() \n \n","repo_name":"xubowenhaoren/PythonWorkshops","sub_path":"Workshop 2/defaultValues.py","file_name":"defaultValues.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40971827878","text":"from robot_class import Robot\nimport time as time\n\ndef follow_right(state_machine, rob):\n max_forward = rob.encoder.get_max_forward_speed()\n max_backward = rob.encoder.get_max_backward_speed()\n\n desired_distance = 5\n # Proportional gain\n Kp = 2\n\n## user_input = input(\"Place robot beside wall and press enter to continue.\")\n\n while (True):\n r_distance = rob.distance_sensor.get_right_inches()\n f_distance = rob.distance_sensor.get_front_inches()\n\n if (f_distance >= (desired_distance)):\n rob.check_goal_in_front()\n if (rob.blob_x < 320):\n #check if goal in front\n #check if no wall in front\n print(\"Goal is in front and getting close to it.\")\n rob.goal_in_front(True)\n## rob.no_wall_detected(True)\n rob.encoder.setSpeedsIPS(0,0)\n return\n\n r_proportional_control = saturation_function(Kp * (desired_distance - r_distance),\n max_forward, max_backward)\n f_proportional_control = saturation_function(Kp * (desired_distance - min(f_distance, r_distance)),\n max_forward, max_backward)\n\n if f_distance < (desired_distance * 2):\n #front wall is deteced withing distance*2 inches. Starts to turn\n print(\"Turning\")\n rob.encoder.setSpeedsIPS(min(max_forward + r_proportional_control, f_proportional_control,\n max_forward), max_forward)\n else:\n if (state_machine and (r_distance > (desired_distance * 20))):\n print(\"going straight\")\n rob.encoder.setSpeedsIPS(max_forward,\n max_forward)\n else:\n #No front wall detected\n print(\"Wall following\")\n rob.encoder.setSpeedsIPS(min(max_forward + r_proportional_control, max_forward),\n min(max_forward - r_proportional_control, max_forward))\n time.sleep(0.01)\n\n\ndef follow_left(rob):\n #rob = Robot()\n max_forward = rob.encoder.get_max_forward_speed()\n max_backward = rob.encoder.get_max_backward_speed()\n\n desired_distance = 5\n # Proportional gain\n Kp = 4.7\n\n## user_input = input(\"Place robot beside wall and press enter to continue.\")\n\n while (True):\n l_distance = rob.distance_sensor.get_left_inches()\n f_distance = rob.distance_sensor.get_front_inches()\n\n l_proportional_control = saturation_function(Kp * (desired_distance - l_distance),\n max_forward, max_backward)\n f_proportional_control = saturation_function(Kp * (desired_distance - min(f_distance, l_distance)),\n max_forward, max_backward)\n\n if f_distance < (desired_distance * 2):\n rob.encoder.setSpeedsIPS(max_forward, min(max_forward + l_proportional_control, f_proportional_control,\n max_forward))\n else:\n rob.encoder.setSpeedsIPS(min(max_forward - l_proportional_control, max_forward),\n min(max_forward + l_proportional_control, max_forward))\n time.sleep(0.01)\n\ndef saturation_function(proportional_speed, max_forward_speed, max_backward_speed):\n if proportional_speed > 0.1:\n if -proportional_speed < max_backward_speed:\n return max_backward_speed\n else:\n return -proportional_speed\n elif proportional_speed < -0.1:\n if -proportional_speed > max_forward_speed:\n return max_forward_speed\n else:\n return -proportional_speed\n else:\n return 0\n\ndef main():\n rob = Robot()\n if rob.distance_sensor.get_right_inches() < rob.distance_sensor.get_left_inches():\n follow_right(False,rob)\n else:\n follow_right(False,rob)\n\n## Main program\nif __name__ == \"__main__\":\n main()\n","repo_name":"Sunset-wrkshp/mobile_robot_19","sub_path":"Lab_3/wall_following.py","file_name":"wall_following.py","file_ext":"py","file_size_in_byte":4108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11104827895","text":"from decimal import Decimal \n\nfrom ..helpers.objects import SimpleAttrDictPlus\n\nfrom ..consts import consts\nfrom ..utils import utils\nfrom ..utils import tx\n\nfrom .first_block import FirstBlockParser\nfrom .stop_network import StopNetworkParser\n\nclass Error(Exception):\n pass\n\nclass ParserInterface:\n def init():\n raise Error()\n\n def validate():\n raise Error()\n\n def action():\n raise Error()\n\n def rollback():\n raise Error()\n\n def header():\n return tx.Header()\n\ndef get_db_transaction_model():\n return None\n\nclass Parser(SimpleAttrDictPlus):\n _default_attrs = {\n 'block_data': utils.BlockData(),\n 'prev_block': utils.BlockData(),\n\n 'data_type': -1,\n 'current_version': \"\",\n 'mrkl_root': None,\n 'public_keys': [],\n\n 'tx_binary_data': None,\n 'tx_full_data': None,\n 'tx_hash': None,\n 'tx_slice': None,\n 'tx_map': None,\n 'tx_ids': 0,\n 'tx_ecosystem_id_str': \"\",\n 'tx_type': -1,\n 'tx_cost': -1,\n 'tx_fuel': -1,\n 'tx_used_cost': Decimal('-1'),\n 'tx_key_id': -1,\n 'tx_time': -1,\n 'tx_ecosystem_id': -1,\n 'tx_node_position': -1,\n\n 'tx_ptr': None,\n 'tx_data': None,\n 'tx_smart': None,\n 'tx_contract': tx.SmartContract(),\n 'tx_header': tx.Header(),\n 'tx_parser': ParserInterface(),\n\n 'tx_extra': tx.Extra(),\n\n }\n\n def __init__(self, *args, **kwargs):\n super(Parser, self).__init__(*args, **kwargs)\n\n def update_from_tx_smart(self, tx_smart, **kwargs):\n print(tx_smart)\n src_keys = ['type', 'time', 'ecosystem_id', 'key_id', 'node_position',\n 'bin_signatures', 'token_ecosystem', 'max_sum', 'pay_over',\n 'signed_by', 'data']\n src_dict= {k: v if k in src_keys else None for k, v in dict(tx_smart).items()}\n\ndef get_parser(parser, tx_type):\n if tx_type == consts.TxTypeFirstBlock:\n return FirstBlockParser()\n elif tx_type == consts.TxTypeStopNetwork:\n return StopNetworkParser()\n else:\n raise Error(\"Uknown tx_type: %s\" % tx_type)\n\n","repo_name":"GenesisKernel/blockexplorer","sub_path":"genesis_block_chain/parser/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":2178,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"36489445992","text":"#i did'nt use serialiser\n# i was suppose to use REST API + Ajax.. That was what required the serializer thing..\n# but at the end of the day , i was able to implement it without the RESR API, APIView and all..\n\n# i'm still keeping their files anyway.. i'll most likely delete it soon but right about now,\n# i still want to have it for learning purposes...\n\nfrom rest_framework import serializers\nfrom .models import Meat\n\nclass MeatModelSerializer(serializers.ModelSerializer):\n\tclass Meta:\n\t\tmodel = Meat\n\t\tfields = [\n\t\t\t'meat_type',\n\t\t\t'cut_type',\n\t\t\t'weight',\n\t\t]\n\n","repo_name":"olamigokayphils/ctdepot","sub_path":"chickenandturkeydepot/home/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"29367165188","text":"import random\n\n# Metodos\nclass carta:\n manilha =\"\"\n valor=0\n index=0\n def __init__(self,manilha,valor,index):\n self.manilha = manilha\n self.valor = valor\n self.index= index\n\n def __str__(self):\n return f\"manilha {self.manilha} : valor {self.valor}\"\n\n\ndef card_deck():\n card_value = ['A','2','3','4','5','6','7','8','9','10','J','Q','K']\n card_type = ['Coraçao','Espadinha','Zap','Picafumo']\n deck = []\n k = 0\n for i in card_type:\n for j in card_value:\n k+=1\n cartas = carta(i,j,k)\n deck.append(cartas)\n return deck\n\n\ndef carta_valor(carta,rodada):\n if((carta.valor =='10')or (carta.valor =='J')or(carta.valor =='Q')or(carta.valor =='K')):\n return 10\n elif(carta.valor=='A'):\n if(rodada==1):\n return 11\n else:\n return 1\n else:\n return int(carta.valor)\n\n\n\n\ndef cartas_ini(deck):\n return random.sample(deck,2)\n\ndef carta_add(deck):\n carta= random.sample(deck,1)\n remove_from_deck(carta[0],deck)\n print(carta)\n return carta[0]\n\ndef mao_atual(mao,soma):\n for carta in mao:\n print(carta)\n print(f\"Soma das cartas {soma}\")\n\n\ndef remove_from_deck(carta,deck):\n deck.pop(int(carta.index))\n\n\n\n\n\ndef pricipal():\n deck = card_deck()\n print(len(deck))\n\n mao = cartas_ini(deck)\n soma = 0\n rodada = 1\n while soma<21:\n if rodada ==1:\n for cartas in mao:\n remove_from_deck(cartas,deck)\n soma+= carta_valor(cartas,1)\n print(soma)\n else:\n cont = input(\"Deseja continuar o jogo ?(S/N)\\n\")\n if cont.upper() ==\"S\":\n carta =carta_add(deck)\n mao.append(carta)\n soma+= carta_valor(carta,2)\n\n pass\n else:\n break\n\n\n\n\n\n mao_atual(mao,soma)\n\n rodada+=1\n # print(len(deck))\n\n if soma==21:\n print(\"Vc Ganhou\")\n elif soma>21:\n print(\"vc Perdeu\")\n else:\n print(\"Quase La Amigao\")\n\n\n\n\n# Main prog\n\n#\n# deck = card_deck()\n#\n#\n# mao = cartas_ini(deck)\n#\n#\n#\n# for i in mao:\n# print(i)\n\npricipal()","repo_name":"arthur020/LingProg_2","sub_path":"blackjack21.py","file_name":"blackjack21.py","file_ext":"py","file_size_in_byte":2203,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15016408096","text":"def create_model(vocabulary_size,seq_len):\n\n model=Sequential()\n model.add(Embedding(vocabulary_size,seq_len,input_length=seq_len))\n model.add(LSTM(seq_len*2,return_sequences=True))\n model.add(LSTM(50))\n model.add(Dense(50,activation='relu'))\n\n model.add(Dense(vocabulary_size,activation='softmax'))\n\n model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])\n\n model.summary()\n return model\n","repo_name":"vaibhavmakhloga/Text-Generation-with-LSTM","sub_path":"LSTM Model.py","file_name":"LSTM Model.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25595914300","text":"import pytest\nimport mock\nimport os\n\nfrom mg.input.events import Event\nfrom mg.ui.display import Display\nfrom mg.ui.menu import Menu\nfrom mg.ui.pages.base import Page\nfrom mg.tests.conf import settings\n\nimport mg.ui.pages.main as main_pages\nimport mg.ui.pages.strings as string_pages\nfrom mg.state import State, VoiceState\n\n\n@pytest.fixture\ndef menu(tmpdir):\n output = tmpdir.join('output').ensure()\n display = Display(128, 32, str(output))\n menu = Menu(None, State(settings), display)\n\n class Home(Page):\n title = 'MockHome'\n\n menu.register_page('home', Home)\n yield menu\n menu.cleanup()\n\n\ndef get_testdata_dir():\n current_dir = os.path.dirname(os.path.abspath(__file__))\n return os.path.join(current_dir, 'data')\n\n\n@mock.patch('mg.fluidsynth.api.lib', mock.Mock(**{\n 'get_cpu_load.return_value': 1.1,\n}))\ndef test_goto_main_home(menu):\n menu.goto(main_pages.Home())\n\n\ndef test_goto_main_volume(menu):\n menu.goto(main_pages.VolumeDeck())\n\n\ndef test_sound_list_page(menu):\n voice = VoiceState('melody')\n page = string_pages.SoundListPage(voice=voice)\n menu.push(page)\n page.render()\n\n\ndef _evt(name, action='down', value=None):\n return Event.from_mapping({\n 'type': 'input',\n 'name': name,\n 'action': action,\n 'value': value,\n })\n","repo_name":"midigurdy/mg-core","sub_path":"mgurdy/src/mg/tests/test_pages.py","file_name":"test_pages.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"9986780423","text":"#!/usr/bin/python3.6\nimport doubleTLS\n\nimport tkinter as tk #Standard python GUI library\nimport datetime #Datetime for chat output\nimport textwrap #Neatly splitting up strings when displaying in chat window\nimport re #User input validation\nimport threading #To simultaneously send information with c['localS'], and recieve information with c['remoteS'] \n\nr_text = list() #used to share incoming messages between the chat listener thread, and the tkinter main loop for the chat (tkinter isn't very compatible with multithreading)\nBufferSize = 1024\n\n####################################################################################################################################\n# Chat Functions\n####################################################################################################################################\n\n#Accepts the remote socket, symmetric key, and chat prompt message, and opens a chat conversation\ndef chat(sendSocket,recieveSocket,symmkeyLocal,symmkeyRemote):\n #Create window\n root = tk.Tk()\n root.resizable(False,False)\n\n def sendMessage(event):\n #Message from dialog box\n msg = textIn.get(\"1.0\",tk.END).strip()\n\n #Send message to remote host\n sendSocket.send(symmkeyRemote.encrypt(bytes(msg,'utf-8')))\n\n #Slice the message into 48 character lines, then append each to the console\n msg = textwrap.wrap(msg,48)\n console.config(state='normal')\n is1 = True\n for line in msg:\n if is1:\n console.insert('end',f\"You {getNow()}\" + line + '\\n')\n else:\n console.insert('end',' '*14 + line + '\\n')\n is1 = False\n console.config(state='disabled')\n console.see('end') #Scroll to bottom automatically\n\n #Erase the contents of the input box\n textIn.delete(\"1.0\",tk.END)\n\n def getMessage(msg):\n #Slice the message into 48 character lines, then append each to the console\n msg = textwrap.wrap(msg,48)\n \n console.config(state='normal')\n is1 = True\n for line in msg:\n if is1:\n console.insert('end',f\"Them {getNow()}\" + line + '\\n')\n else:\n console.insert('end',' '*14 + line + '\\n')\n is1 = False\n console.config(state='disabled')\n console.see('end')\n\n def leave():\n root.destroy()\n\n\n #Define its width and height, and position in the center of the screen\n if doubleTLS.win:\n w=499\n h=360\n else:\n w=505\n h=385\n \n root.geometry(f\"{w}x{h}+{round((root.winfo_screenwidth()/2)-(w/2))}+{round((root.winfo_screenheight()/2)-(h/2))}\")\n root.title(\"Crypto-Chat\")\n\n #Window elements\n console = tk.Text(root,height=20,width=62,wrap=tk.WORD,yscrollcommand=True,background=\"#09295c\",foreground=\"white\",state='disabled')\n buttonFrame = tk.Frame(root)\n label = tk.Label(buttonFrame,text=\"Chat:\")\n textIn = tk.Text(buttonFrame,height=1,width=40)\n sendBind = textIn.bind('',sendMessage)\n if doubleTLS.win:\n postBtn = tk.Button(buttonFrame,text=\"Send\",width=10, command= lambda :sendMessage(''))\n exitBtn = tk.Button(buttonFrame,text=\"Exit\",width=6,command=leave)\n else:\n postBtn = tk.Button(buttonFrame,text=\"Send\",width=7, command= lambda :sendMessage(''))\n exitBtn = tk.Button(buttonFrame,text=\"Exit\",width=3,command=leave)\n\n #Positioning window elements\n console.grid(row=0,column=0,sticky=tk.W)\n buttonFrame.grid(row=1,column=0,pady=3)\n label.grid(row=0,column=0,sticky=tk.W)\n textIn.grid(row=0,column=1)\n postBtn.grid(row=0,column=2)\n exitBtn.grid(row=0,column=3)\n\n #Start listener function for recieved messages\n listener = threading.Thread(target=chatlistener,args=(symmkeyLocal,recieveSocket,), daemon=True)\n listener.start()\n\n #Main loop\n rLen = len(r_text)\n while True:\n try:\n if rLen < len(r_text):\n rLen = len(r_text)\n getMessage(r_text[-1])\n\n #Keep main window rolling\n root.update()\n\n #If listener dies, write disconnect message and disable send button. Also, exit the routine and go to default tkinter mainloop \n if not listener.is_alive(): \n postBtn.config(state='disabled')\n console.config(state='normal')\n console.insert('end','Chat partner has disconnected.')\n console.config(state='disabled')\n console.see('end')\n textIn.unbind(sendBind)\n break\n\n #If window is closed, quit without error \n except tk._tkinter.TclError:\n exit()\n\n root.mainloop()\n sendSocket.close()\n recieveSocket.close()\n\n\n#Accepts the remote socket object, and fernet symmetric key, to constantly listen for recieved messages\ndef chatlistener(symmkeyLocal,recieveSocket):\n try:\n while True:\n #Recieve message from remote host\n message = recieveSocket.recv(BufferSize)\n message = symmkeyLocal.decrypt(message)\n message = message.decode('utf8')\n\n #Write message to console\n r_text.append(message)\n\n except ConnectionResetError:\n print(\"Chat partner has disconnected\")\n except:\n pass\n \n#Gets the current hour/minute for showing as a timestamp in the console\ndef getNow():\n now = datetime.datetime.now()\n hm = '('\n if now.hour < 10:\n hm += f'0{now.hour}:'\n else:\n hm += f'{now.hour}:'\n\n if now.minute < 10:\n hm += f'0{now.minute}): '\n else:\n hm += f'{now.minute}): '\n\n return hm\n\ndef main():\n params = {\n 'remoteaddress' :'10.0.0.13', #127.0.0.1 is used as an exit case in the script. So to connect to localhost, be sure to use your PC's LAN IP address\n 'port' : 5001, #Port for the script to listen/connect on\n 'hostpassword' : 'P@ssw0rd', #Password that someone connecting to your device will be required to enter when connecting\n 'remotepassword': 'P@ssw0rd', #Password to submit to the remote host to authenticate the connection\n 'keypassword' : 'G00dP@ssw0rd', #Password to unlock your certificate's private key (on first run, you'll be prompted for this when it's being created)\n 'timeout' : 5 #Connection timeout value as an integer value in seconds. (0 to listen forever)\n }\n\n s = doubleTLS.connect(params)\n if s:\n chat(s['localS'],s['remoteS'],s['localK'],s['remoteK'])\n\nmain()\n","repo_name":"chrispyth42/P2P-TLS","sub_path":"chat.py","file_name":"chat.py","file_ext":"py","file_size_in_byte":6698,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"81"} +{"seq_id":"22391858079","text":"import random\nfrom statistics import mean, median, mode\n\ndef print_greeting(greeting,attempts):\n print(\"-\" * len(greeting) )\n print(greeting)\n #show best score if this is not the first attempt\n if len(attempts) > 0:\n print(f\"The best score so far is:{min(attempts)}\")\n print(\"-\" * len(greeting))\n\n\ndef play_again():\n play_again = input(\"Would you like to play again? [Y/N] \")\n if play_again.upper() == \"Y\":\n return True\n else:\n print(\"Thank you for playing. Goodbye.\")\n return False\n \n\ndef print_statistics(count, list):\n attempts_mean = mean(list)\n attempts_median = median(list)\n attempts_mode = mode(list)\n print(f\"\"\"\n Lets see how you did:\n Your score: {count}\n Mean: {attempts_mean}\n Median: {attempts_median}\n Mode: {attempts_mode}\n \"\"\")\n\n\ndef start_game():\n \n\n answer = random.randrange(1,100)\n attempts = []\n count = 0\n print_greeting(\"Welcome to the number guessing game.\", attempts)\n while True:\n try:\n guess = int(input(\"Guess a number, you must: \"))\n if guess < 1 or guess > 100:\n raise ValueError(\"Please guess a number between 1 and 100\")\n except ValueError as err:\n print(f\"Whoopsies, something went wrong. {err}. Please try again.\")\n else:\n count += 1\n if guess > answer :\n print(\"It's lower\")\n elif guess < answer:\n print(\"It's higher\")\n elif guess == answer:\n print('*' * 40)\n print(f\"You got it! It took you {count} attempts.\")\n print('*' * 40)\n attempts.append(count)\n print_statistics(count, attempts)\n keep_playing = play_again()\n if keep_playing:\n print_greeting(\"Going again? I like it!\", attempts)\n count = 0\n answer = random.randrange(1,100)\n else:\n break\n \n\nstart_game()","repo_name":"HuckleberryKBT/A-Number-Guessing-Game","sub_path":"guessing_game.py","file_name":"guessing_game.py","file_ext":"py","file_size_in_byte":2047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72874784266","text":"from __future__ import unicode_literals\nfrom __future__ import print_function\nimport sys\nfrom subprocess import call, check_output\nfrom glob import glob\nimport videogrep\nimport re\nimport os\nfrom vidpy import Composition, Clip, config\nfrom multiprocessing import Pool\n\nconfig.MELT_BINARY = 'melt'\n\ndef download_subtitles(q, page=1, total_pages=1):\n url = 'https://www.youtube.com/results?search_query={},cc&page={}'.format(q, page)\n call(['youtube-dl', url, '--write-auto-sub', '--skip-download', '-o', '%(id)s'])\n\n if page < total_pages:\n download_subtitles(q, page+1, total_pages)\n\n\ndef get_timestamps(q):\n\n comp = {}\n for f in glob('*.vtt'):\n with open(f, 'r') as infile:\n text = infile.read()\n if '::cue' not in text:\n continue\n sentences = videogrep.parse_auto_sub(text)\n\n for s in sentences:\n for w in s['words']:\n if re.search(q, w['word']):\n vid = f.replace('.en.vtt', '')\n if vid not in comp:\n comp[vid] = []\n if w['end'] - w['start'] > 0:\n comp[vid].append(w)\n\n return comp\n\n\ndef get_vid_url(vid):\n print(vid)\n try:\n url = check_output(['youtube-dl', '-f', '22', '-g', 'https://www.youtube.com/watch?v={})'.format(vid)])\n url = url.decode('utf-8').strip()\n return (vid, url)\n except:\n return (vid, None)\n\n\ndef download_segment(url, start, end, outname):\n args = ['melt', url, 'in=:{}'.format(start), 'out=:{}'.format(end), '-consumer', 'avformat:{}'.format(outname)]\n call(args)\n return outname\n\n\ndef compose(timestamps):\n p = Pool(processes=5)\n _urls = p.map(get_vid_url, timestamps.keys())\n urls = {}\n for v, u in _urls:\n if u:\n urls[v] = u\n\n\n to_download = []\n i = 0\n for vid in timestamps:\n if vid not in urls:\n continue\n\n words = timestamps[vid]\n\n for w in words:\n start = w['start']\n end = w['end'] + 0.02\n outname = str(i).zfill(4) + '.mp4'\n to_download.append((urls[vid], start, end, outname))\n i += 1\n # if os.path.exists(outname):\n # i += 1\n # continue\n\n\n clipnames = p.starmap(download_segment, to_download)\n\n clips = []\n for f in clipnames:\n clips.append(Clip(f))\n\n comp = Composition(clips, singletrack=True)\n comp.save('supercut.mp4')\n\n\ndef main():\n download_subtitles(sys.argv[1], page=1, total_pages=5)\n comp = get_timestamps(sys.argv[1])\n compose(comp)\n\n\nif __name__ == '__main__':\n main()\n\n\n","repo_name":"antiboredom/youtubegrep","sub_path":"search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":2713,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"81"} +{"seq_id":"27382628528","text":"import datetime\n\nfrom pricers_etl.services import api\n\n\ndef _is_empty_row(row):\n return all(not value for value in row.values())\n\n\ndef _is_empty_list(header_row):\n return all(not value for value in header_row)\n\n\ndef extract_table_xlrd(\n sheet,\n header_row_index,\n max_row_index=None,\n find_header=False,\n overwrite_headers=False,\n):\n \"\"\"\n Extract a table from xls file:\n header_row_index:\n int (start of the table scan, unless find_header is True)\n max_row_index:\n int (default: None)\n (if provided, will stop table scan at this row index)\n find_header:\n bool (if True, will assume heder row to be\n first non empty row after header_row_index)\n \"\"\"\n headers = [cell.value for cell in sheet.row(header_row_index)]\n if find_header:\n while _is_empty_list(headers):\n header_row_index = header_row_index + 1\n headers = [cell.value for cell in sheet.row(header_row_index)]\n\n result_rows = []\n last_row_index = header_row_index + 1\n\n for row_idx in range(last_row_index, sheet.nrows): # Iterate through rows\n row = [cell.value for cell in sheet.row(row_idx)]\n if _is_empty_list(row):\n break\n values = {}\n merged_headers_row = zip(headers, row)\n if overwrite_headers:\n merged_headers_row = zip(overwrite_headers, row)\n\n for key, cell_value in merged_headers_row:\n values[key] = cell_value\n\n result_rows.append(values)\n last_row_index = last_row_index + 1\n\n return headers, result_rows, last_row_index\n\n\ndef extract_table_csv(\n reader, header_row_index, max_row_index=None, find_header=False\n):\n \"\"\"\n Extract a table from csv file:\n header_row_index: int\n (start of the table scan, unless find_header is True)\n max_row_index: int (default: None)\n (if provided, will stop table scan at this row index)\n find_header: bool\n (if True, will assume heder row to be first\n non empty row after header_row_index)\n \"\"\"\n headers = []\n result_rows = []\n\n for i, row in enumerate(reader):\n if i == header_row_index:\n headers = row\n\n if find_header:\n # placeholder for future impl.\n raise \"Not implemented\"\n\n # headers parser, row is now data row\n if i > header_row_index:\n values = {}\n for key, cell_value in zip(headers, row):\n values[key] = cell_value\n result_rows.append(values)\n\n if max_row_index and i >= max_row_index:\n break\n\n return headers, result_rows\n\n","repo_name":"Inlinesoft/Python.Reference.Code","sub_path":"app1/src/app1/importers/parser/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42438308270","text":"def readinput():\n first=int(input(\"ENter first number\"))\n second=int(input(\"ENter second number\"))\n\ndef compare(firstnum,secondnum):\n if(first>second):\n return first\n else:\n return second\n\ndef oddeven(firstnum,secondnum):\n if(first%2==0):\n return 0\n else:\n return 1\nif __name__ == '__main__':\n first = int(input(\"ENter first number\"))\n second = int(input(\"ENter second number\"))\n resultcompare=compare(first,second)\n print(\"the greatest number is\",resultcompare);\n resultoddeven = oddeven(first, second)\n if(resultoddeven==1):\n print(\"The number odd\");\n else:\n print(\"the number is even\")\n","repo_name":"sanmathirai/Python-26Dec18-InternShip","sub_path":"python/pythonconditionst.py","file_name":"pythonconditionst.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17896480805","text":"import gspread\nimport math\nfrom oauth2client.service_account import ServiceAccountCredentials\n\nfrom BaseFunctions.ETF2LSkillCheck import getPlayerSkill, teamSkill\nfrom BaseFunctions.ETF2lBase import getCompList, getPlayers, getTeamName\nfrom ProvTiersAuto.ProvTiersBase import makeTeamDict, getTeamIDList, setGameMode\n\n# Input the team ID list and the requested tier list. Either input a list of strings or a string where each item is seperated by a tab\nidList = \"26973\t33328\t33680\t33687\t33656\t13849\t18974\t32334\t25693\t33644\t32388\t32781\t32957\t32366\t33631\t33314\t32786\t32871\t33527\t33603\t32475\t32137\t33559\t33654\t33634\t27530\t33523\t33031\t32897\t32212\t31684\t32303\t33673\t31253\t32362\t32172\t29616\t33696\t32342\t32905\t33337\t33610\t24746\t31678\t25734\t32883\t19335\t32106\t32804\t30263\t32972\t25568\t33316\t32234\t32500\t32396\t31477\t17798\t32422\t33727\t33695\t33729\t32373\t33717\t33611\t32868\t33690\t33526\t33734\t33742\t28121\t33037\t32861\t33692\t33355\t32743\t33721\"\nrequestList = \"Mid\tOpen\tOpen\tOpen\tOpen\tOpen\tPremiership\tLow\tPremiership\tLow\tLow\tLow\tOpen\tLow\tOpen\tOpen\tMid\tLow\tOpen\tLow\tMid\tLow\tOpen\tOpen\tOpen\tMid\tOpen\tMid\tHigh\tHigh\tHigh\tHigh\tOpen\tHigh\tOpen\tHigh\tPremiership\tMid\tMid\tOpen\tOpen\tOpen\tPremiership\tHigh\tHigh\tHigh\tPremiership\tMid\tMid\tHigh\tOpen\tHigh\tOpen\tHigh\tHigh\tOpen\tLow\tMid\tMid\tPremiership\tLow\tPremiership\tMid\tLow\tMid\tLow\tMid\tOpen\tOpen\tMid\tHigh\tHigh\tMid\tMid\tMid\tLow\tLow\"\n# Set the competition ID and the ID of the competition from which on forward results should be taken into account\ncurrentMainCompID = 713\noldCompID = 628\n\n# Enter the name of the season, will be used as the worksheet title\nseasonName = \"HL Season 24\"\n\n# Input the gamemode that needs to be checked. HL for highlander, 6s for 6v6\ngameType = \"HL\"\n\n# Input whether you want to make the \"Base Sheet\" or the sheet to \"iframe\", leave blank to generate both\nsheetMode = \"Base Sheet\"\n\n# Don't edit anything past this point if you have no idea what you are doing\n\nscope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']\ncreds = ServiceAccountCredentials.from_json_keyfile_name('admin_secret.json', scope)\nclient = gspread.authorize(creds)\n\nsheet = client.open('ETF2L Provisional Tiers')\n\n\ndef main(gameType, idList, requestList, sheetMode):\n if sheetMode == \"Base Sheet\":\n divList, teamIDList, counterDict, teamDict = setup(gameType, idList, requestList)\n mainSheet(oldCompID, currentMainCompID, divList, teamIDList, counterDict, teamDict)\n\n if sheetMode == \"iframe\":\n divList, teamIDList, counterDict, teamDict = setup(gameType, idList, requestList)\n iframeSheet(counterDict, divList)\n\n if sheetMode == \"\":\n divList, teamIDList, counterDict, teamDict = setup(gameType, idList, requestList)\n mainSheet(oldCompID, currentMainCompID, divList, teamIDList, counterDict, teamDict)\n iframeSheet(counterDict, divList)\n\n\ndef setup(gameType, idList, requestList):\n divList = setGameMode(gameType)\n teamDict = makeTeamDict(idList, requestList)\n teamIDList, counterDict = getTeamIDList(teamDict)\n\n if gameType == \"HL\":\n counterDict[\"Open\"] += counterDict[\"Low\"]\n counterDict.pop(\"Low\")\n\n return divList, teamIDList, counterDict, teamDict\n\n\ndef mainSheet(oldCompID, compID, divList, teamIDList, counterDict, teamDict):\n try:\n baseSheet = sheet.worksheet(seasonName + \" Base\")\n sheet.del_worksheet(baseSheet)\n baseSheet = sheet.add_worksheet(title=seasonName + \" Base\", rows=\"4\", cols=\"20\")\n except gspread.exceptions.WorksheetNotFound:\n baseSheet = sheet.add_worksheet(title=seasonName + \" Base\", rows=\"4\", cols=\"20\")\n\n compList6v6, compListHL = getCompList(oldCompID, compID)\n counterList = []\n for value in counterDict.values():\n counterList.append(value)\n i = 1\n row = [\"Premiership\", \"\", \"Players on roster\", \"Requested\", \"\", \"\", \"6s total\", \"6s seperate\", \"HL total\", \"HL total\"]\n baseSheet.insert_row(row, i)\n i += 1\n for teamID in teamIDList:\n teamHL = dict(prem=0, div1=0, high=0, div2=0, div3=0, mid=0, div4=0, low=0, div5=0, div6=0, open=0, none=0)\n team6s = dict(prem=0, div1=0, high=0, div2=0, div3=0, mid=0, div4=0, low=0, div5=0, div6=0, open=0, none=0)\n playerIDList = getPlayers(teamID)\n while playerIDList == []:\n playerIDList = getPlayers(teamID)\n teamName = getTeamName(teamID)\n for playerID in playerIDList:\n playerHL, player6s, HLMatchCount, SMatchCount, previousFMC = getPlayerSkill(playerID, compList6v6, compListHL)\n team6s, teamHl = teamSkill(player6s, playerHL, team6s, teamHL, HLMatchCount, SMatchCount)\n\n Sseperate = 'Prem: ' + str(team6s['prem']) + ', Div1: ' + str(team6s['div1']) + ', high: ' + str(\n team6s['high']) + ', Div2: ' + str(team6s['div2']) + ', Div3: ' + str(team6s['div3']) + ', Mid: ' + str(\n team6s['mid']) + ', Div4: ' + str(team6s['div4']) + ', Low: ' + str(\n team6s['low']) + ', Div5: ' + str(team6s['div5']) + ', Open: ' + str(team6s['div6']) + ', None: ' + str(\n team6s['none'])\n STotal = team6s['prem'] * 28 + team6s['div1'] * 24 + team6s['high'] * 22 + team6s['div2'] * 20 + team6s[\n 'div3'] * 16 + team6s['mid'] * 15 + team6s['div4'] * 12 + team6s['low'] * 9 + team6s['div5'] * 8 + team6s[\n 'div6'] * 4\n Hlseperate = 'Prem: ' + str(teamHL['prem']) + ', Div1: ' + str(teamHL['div1']) + ', high: ' + str(\n teamHL['high']) + ', Div2: ' + str(teamHL['div2']) + ', Div3: ' + str(teamHL['div3']) + ', Mid: ' + str(\n teamHL['mid']) + ', Div4: ' + str(teamHL['div4']) + ', Low: ' + str(\n teamHL['low']) + ', Div5: ' + str(teamHL['div5']) + ', Open: ' + str(teamHL['div6']) + ', None: ' + str(\n teamHL['none'])\n HlTotal = teamHL['prem'] * 28 + teamHL['div1'] * 24 + teamHL['high'] * 22 + teamHL['div2'] * 20 + teamHL[\n 'div3'] * 16 + teamHL['mid'] * 15 + teamHL['div4'] * 12 + teamHL['low'] * 9 + teamHL['div5'] * 8 + teamHL[\n 'div6'] * 4\n teamLink = \"https://etf2l.org/teams/\" + str(teamID)\n teamLinkName = '=HYPERLINK(\"' + teamLink + '\";\"' + teamName + '\")'\n\n for k in range(0, len(counterList)):\n if counterList[k] >= 0:\n if counterList[k] == 0:\n baseSheet.insert_row([divList[k]], i)\n i += 1\n try:\n baseSheet.insert_row([divList[k + 1]], i)\n i += 1\n except IndexError:\n break\n counterList[k + 1] -= 1\n counterList[k] -= 1\n\n row = [str(teamID), teamLinkName, str(len(playerIDList)), teamDict[teamID], \"\", \"\", str(STotal), Sseperate, str(HlTotal), Hlseperate]\n baseSheet.insert_row(row, i, value_input_option='USER_ENTERED')\n i += 1\n break\n\n\ndef iframeSheet(counterDict, divList):\n counterDictSum = 0\n for value in counterDict.values():\n counterDictSum += value\n\n frameSheet = sheet.add_worksheet(title=seasonName, rows=math.ceil(counterDictSum / 2 + 9), cols=\"2\")\n frameSheet.update_cell(1, 1, divList[0])\n j = 2\n k = j\n for l in range(0, len(divList)):\n for i in range(k, counterDict[divList[l]] + k):\n referenceSheet = \"='\" + seasonName + \" Base'!B\" + str(j)\n if i < math.ceil(counterDict[divList[l]] / 2) + k:\n frameSheet.update_cell(i, 1, referenceSheet)\n else:\n frameSheet.update_cell(i - math.ceil(counterDict[divList[l]] / 2), 2, referenceSheet)\n j += 1\n\n frameSheet.update_cell(k + math.ceil(counterDict[divList[l]] / 2), 1, divList[l])\n try:\n frameSheet.update_cell(k + 1 + math.ceil(counterDict[divList[l]] / 2), 1, divList[l + 1])\n except IndexError:\n break\n j += 2\n cell = frameSheet.find(divList[l + 1])\n k = cell.row + 1\n\n\nmain(gameType, idList, requestList, sheetMode)\n","repo_name":"NatanVW/ETF2LScripts","sub_path":"ProvTiersAuto/ProvisionalTiersAuto.py","file_name":"ProvisionalTiersAuto.py","file_ext":"py","file_size_in_byte":8055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71455397065","text":"def binary_search(a, n, key):\n start = 0\n end = n - 1\n while start <= end:\n mid = (start + end) // 2\n # 1. middle = key\n if a[mid] == key:\n return True, mid\n # 2. middle > key\n elif a[mid] > key:\n end = mid - 1\n # 3. middle < key\n else:\n start = end + 1\n return False, -1 # 보통 -1을 주는데 파이썬은 -1 인덱스가 존재하므로 False로 주는 것이 안전\n\n\nkey = 7\narr = [2, 4, 7, 9, 11, 19, 23]\nprint(binary_search(arr, len(arr), key))\n","repo_name":"sunoftwilight/LecturePractice","sub_path":"오프라인 강의 실습/알고리즘/0803_binarysearch.py","file_name":"0803_binarysearch.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13255302760","text":"# Time : O(n)\n# Spacce : O(h) h->height of tree\n#https://leetcode.com/problems/flatten-binary-tree-to-linked-list/submissions/\n\n# T: O(N) S:O(H)\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def flatten(self, root: Optional[TreeNode]) -> None:\n \"\"\"\n Do not return anything, modify root in-place instead.\n \"\"\"\n def dfs(root):\n if not root:\n return None\n leftTail = dfs(root.left)\n rightTail = dfs(root.right)\n if root.left:\n leftTail.right = root.right\n root.right = root.left\n root.left = None\n return rightTail or leftTail or root\n \n dfs(root)\n\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def __init__(self):\n self.end = None\n def flatten(self, root: Optional[TreeNode]) -> None:\n \"\"\"\n Do not return anything, modify root in-place instead.\n \"\"\"\n if not root:\n return\n if not root.left and not root.right:\n self.end = root\n return\n self.flatten(root.left)\n if self.end and root.left:\n right = root.right\n root.right = root.left\n self.end.right = right\n root.left = None\n self.flatten(self.end.right)\n else:\n self.flatten(root.right)\n \n","repo_name":"RoyalBenny/LeetCode-Solutions","sub_path":"Flatten Binary Tree to Linked List/space_h.py","file_name":"space_h.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15067146708","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\ndata = pd.read_csv(\"overall_data.csv\", delimiter=\",\", header=0)\ndata = data.iloc[:,:].values\n\n\nconditions = {\n \" IOWA\": 0,\n \" Equal Means\": 1,\n \" Low Var\": 2,\n \" High Var\": 3\n}\n\nvalues = [0 for j in range(4)]\nrep_values = [0 for j in range(4)]\ncount = [0 for j in range(4)]\nrep_count = [0 for j in range(4)]\n\nfor ind, row in enumerate(data):\n values[conditions[row[1]]] += row[2]\n count[conditions[row[1]]] += 1\n rep_values[conditions[row[1]]] += row[3]\n rep_count[conditions[row[1]]] += 1\n\ncondition = list(conditions.keys())\navg_rewards = [x/y for (x, y) in zip(values, count)] \nrep_choices = [x/y for (x, y) in zip(rep_values, rep_count)]\n\nX = list(condition)\n \nX_axis = np.arange(len(X))\n \nplt.bar(X_axis, avg_rewards, 0.4, label = 'untimed') \nplt.xticks(X_axis, X)\nplt.xlabel(\"Payoff Condition\")\nplt.ylabel(\"Average Reward Gained\")\nplt.title(\"Average Reward across each payoff condition\")\nplt.show()\n\nplt.bar(X_axis, rep_choices, 0.4, label = 'untimed') \nplt.xticks(X_axis, X)\nplt.xlabel(\"Payoff Condition\")\nplt.ylabel(\"Repeat Choices made\")\nplt.title(\"Average Repeat Choices across each payoff condition\")\nplt.show()\n","repo_name":"hitesh-anand/MultiArmed_Bandit_Expt","sub_path":"extension_2/analysis_overall.py","file_name":"analysis_overall.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41700691524","text":"from collections import defaultdict\n\nimport h5py\nimport numpy as np\nimport pathlib\nimport pytest\nimport tempfile\n\nimport imageio_ffmpeg as mpg\nfrom ophys_etl.types import ExtractROI\nimport ophys_etl.utils.video_utils as transformations\n\n\n@pytest.mark.parametrize(\n (\"array, input_fps, output_fps, strategy, expected\"),\n [\n (\n # average downsample video file with this dataset:\n np.array([\n [[1, 1], [1, 1]],\n [[2, 2], [2, 2]],\n [[3, 3], [3, 3]],\n [[4, 4], [4, 4]],\n [[5, 5], [5, 5]],\n [[6, 6], [6, 6]],\n [[7, 7], [7, 7]]]),\n 7, 2, 'average',\n np.array([\n [[2.5, 2.5], [2.5, 2.5]],\n [[6.0, 6.0], [6.0, 6.0]]])),\n ])\ndef test_video_downsample(\n array, input_fps, output_fps, strategy, expected, tmp_path):\n\n video_file = tmp_path / \"sample_video_file.h5\"\n with h5py.File(video_file, \"w\") as h5f:\n h5f.create_dataset('data', data=array)\n\n downsampled_video = transformations.downsample_h5_video(\n video_file,\n input_fps,\n output_fps,\n strategy)\n\n assert np.array_equal(downsampled_video, expected)\n\n\ndef compare_videos(encoded_video_path: str, expected_video: np.ndarray):\n \"\"\"Compare an encoded video with its original source\"\"\"\n\n reader = mpg.read_frames(encoded_video_path,\n pix_fmt=\"gray8\",\n bits_per_pixel=8)\n meta = reader.__next__()\n obt_nframes = int(np.round(meta['duration'] * meta['fps']))\n\n assert obt_nframes == len(expected_video)\n\n obt_frames = []\n for frame in reader:\n parsed_frame = np.frombuffer(frame, dtype='uint8')\n parsed_frame = parsed_frame.reshape(meta[\"size\"][::-1])\n obt_frames.append(parsed_frame)\n obt_video = np.array(obt_frames)\n\n assert obt_video.shape == expected_video.shape\n # the default settings for imageio-ffmpeg are not lossless\n # so can't test for exact match\n np.testing.assert_allclose(obt_video, expected_video, atol=20)\n\n\n@pytest.fixture\ndef raw_video_fixture(request):\n video_shape = request.param.get('video_shape', (16, 32))\n nframes = request.param.get('nframes', 25)\n fps = request.param.get('fps', 30)\n rng_seed = request.param.get('rng_seed', 0)\n\n rng = np.random.default_rng(rng_seed)\n\n raw_video = [rng.integers(0, 256, size=video_shape, dtype='uint8')\n for _ in range(nframes)]\n\n result = {}\n result[\"raw_video\"] = np.array(raw_video)\n result[\"fps\"] = fps\n result[\"nframes\"] = nframes\n\n return result\n\n\n@pytest.mark.parametrize(\"raw_video_fixture\", [\n # make the test video a size of at least 16x16\n # otherwise, need to mess with macro_block_size arg\n ({\"video_shape\": (16, 16)}),\n\n ({\"video_shape\": (32, 16)})\n], indirect=[\"raw_video_fixture\"])\ndef test_encode_video(raw_video_fixture, tmp_path):\n output_path = tmp_path / 'test_video.webm'\n\n fps = raw_video_fixture[\"fps\"]\n expected_video = raw_video_fixture[\"raw_video\"]\n\n transformations.encode_video(video=expected_video,\n output_path=output_path.as_posix(),\n fps=fps),\n\n compare_videos(output_path, expected_video)\n\n\n@pytest.fixture\ndef encoded_videos_fixture(request, tmp_path):\n num_videos = request.param.get('num_videos', 2)\n video_shape = request.param.get('video_shape', (32, 32))\n nframes = request.param.get('nframes', 30)\n fps = request.param.get('fps', 30)\n\n rng = np.random.default_rng(0)\n test_videos = defaultdict(list)\n\n for i in range(num_videos):\n data = np.array([rng.integers(0, 256, size=video_shape, dtype='uint8')\n for _ in range(nframes)])\n\n test_video_path = tmp_path / f\"test_video_{i}.webm\"\n transformations.encode_video(video=data,\n output_path=test_video_path.as_posix(),\n fps=fps)\n\n test_videos['raw_data'].append(data)\n test_videos['encoded_videos'].append(test_video_path)\n\n return test_videos\n\n\n@pytest.mark.parametrize('padding, y0, x0, height, width',\n [(5, 3, 2, 10, 12),\n (10, 3, 2, 10, 12),\n (5, 50, 50, 11, 23),\n (10, 50, 50, 11, 23),\n (5, 118, 50, 10, 12),\n (10, 118, 50, 10, 12),\n (5, 50, 118, 12, 10),\n (10, 50, 118, 12, 10),\n (5, 3, 50, 12, 13),\n (10, 50, 3, 12, 13),\n (5, 118, 118, 10, 10),\n (10, 118, 118, 10, 10)])\ndef test_video_bounds_from_ROI(padding, x0, y0, height, width):\n\n roi = ExtractROI(x=x0, y=y0, height=height, width=width)\n x1 = x0 + width\n y1 = y0 + height\n\n origin, fov = transformations.video_bounds_from_ROI(roi,\n (128, 128),\n padding)\n assert fov[0] % 16 == 0\n assert fov[1] % 16 == 0\n assert fov[0] == fov[1] # only considering cases that can give squares\n assert fov[0] >= max(height+2*padding, width+2*padding)\n assert origin[0] <= y0\n assert origin[1] <= x0\n assert origin[0]+fov[0] >= y1\n assert origin[1]+fov[1] >= x1\n assert origin[0] >= 0\n assert origin[1] >= 0\n assert origin[0]+fov[0] <= 128\n assert origin[1]+fov[1] <= 128\n\n\ndef test_get_max_and_mean():\n rng = np.random.default_rng(1234)\n frames_image_size = 100\n data = rng.integers(low=0,\n high=2,\n size=(frames_image_size,\n frames_image_size,\n frames_image_size),\n dtype=int)\n\n _, video_path = tempfile.mkstemp(\n suffix='.h5')\n video_path = pathlib.Path(video_path)\n\n with h5py.File(video_path, 'w') as h5_file:\n h5_file.create_dataset(name='data', data=data)\n\n result = transformations.get_max_and_avg(video_path)\n np.testing.assert_array_equal(result['max'], 1)\n np.testing.assert_allclose(result['avg'], data.mean(axis=0))\n\n video_path.unlink()\n","repo_name":"AllenInstitute/ophys_etl_pipelines","sub_path":"tests/utils/test_video_utils.py","file_name":"test_video_utils.py","file_ext":"py","file_size_in_byte":6466,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"81"} +{"seq_id":"13864816407","text":"import random\nfrom time import time\nfrom bitstring import Bits\nimport itertools\nimport fileinput\nimport configparser\nimport sys\n\ndef UniformMutate(bitstrs):\n\tfor i in range(len(bitstrs)):\n\t\tfor j in range(vertNum):\n\t\t\tif(random.random() < mutatechance):\n\t\t\t\tbitstrs[i][j] += (bitstrs[i][j]+1)%2\n\t\ndef UniformRecombination(bitstrs):\n\treturn [random.choice(bitstrs)[x] for x in range(vertNum)]\n\t\ndef getFitness(soln):\n\tpart1=soln.count(1)\n\tdenominator=min(vertNum-part1, part1)\n\tnumerator=sum(1 for edge in edges if soln[edge[0]-1] != soln[edge[1]-1])\n\treturn (1/numerator, denominator)\n\t\n\ndef fixSolution(soln):\n\tokay=[False, False]\n\tfor i in soln:\n\t\tokay[soln[i]]=True\n\tif not (okay[0] and okay[1]):\n\t\tsoln[0]=(soln[0]+1)%2\n\treturn soln\n\t\n\t\ndef GenerateInitialPopulation():\n\treturn [(x, getFitness(x)) for x in [fixSolution([random.getrandbits(1) for verts in range(vertNum)]) for i in range(populationSize)]]\n\t\n\t\ndef Dominates(x, y):\n\tonegreater=False\n\tfor i in range(len(x)):\n\t\tif x[i] < y[i]:\n\t\t\treturn False\n\t\tif x[i] > y[i]:\n\t\t\tonegreater=True\n\treturn onegreater\n\t\ndef EvaluatePopulation(pop):\n\tevoPop=[]\n\tfront=0\n\tfrontstart=0\n\twhile len(pop) != 0:\n\t\tfor i in pop:\n\t\t\tdominated=False\n\t\t\tfor j in pop:\n\t\t\t\tif Dominates(j[1], i[1]):\n\t\t\t\t\tdominated=True\n\t\t\t\t\tbreak\n\t\t\tif not dominated:\n\t\t\t\tevoPop.append((i, front))\n\t\tfor i in evoPop[frontstart:]:\n\t\t\tpop.remove(i[0])\n\t\tfrontstart=len(evoPop)\n\t\tfront+=1\n\treturn evoPop\n\ndef MOEASelect(evoPop):\n\tif evoPop[populationSize][1] != evoPop[populationSize-1][1]:\n\t\treturn evoPop[:populationSize]\n\t\n\tbrokenFront=evoPop[populationSize][1]\n\tfirst=populationSize\n\tlast=populationSize\n\twhile first >= brokenFront and evoPop[first][1] == brokenFront:\n\t\tfirst-=1\n\tfirst+=1\n\twhile last < len(evoPop) and evoPop[last][1] == brokenFront:\n\t\tlast+=1\n\t\n\tredfront=ReduceFront(evoPop[first:last], populationSize-first)\n\treturn evoPop[:first]+redfront\n\t\ndef ReduceFront(front, limit):\n\tcrowds=[crowdingDistance(front, i) for i in front]\n\twhile len(front) > limit:\n\t\tlilind=0\n\t\tfor i in range(len(crowds)):\n\t\t\tif crowds[i] < crowds[lilind]:\n\t\t\t\tlilind = i \n\t\tcrowds.pop(lilind)\n\t\tfront.pop(lilind)\n\treturn front\n\t\ndef crowdingDistance(evopop, ffsoln):\n\tmyFront=[i for i in evopop if i[1] == ffsoln[1] and i != ffsoln]\n\tif len(myFront) > 0:\n\t\treturn sum((Bits(ffsoln[0][0])^Bits(i[0][0])).count(1) for i in myFront)/len(myFront)\n\treturn 0\n\t\n\t\t\t\t\ndef BinaryTournament(evopop, numoutputs):\n\tif numoutputs == len(evopop):\n\t\treturn evopop\n\tparents=[]\n\twhile len(parents) < numoutputs:\n\t\tpair=random.sample(evopop, 2)\n\t\twhile pair[0] in parents or pair[1] in parents:\n\t\t\tpair=random.sample(evopop, 2)\n\t\t\n\t\tif pair[0][1] < pair[1][1]:\n\t\t\tparents.append(pair[0])\n\t\telif pair[1][1] < pair[0][1]:\n\t\t\tparents.append(pair[0])\n\t\telse:\n\t\t\tcrow1=crowdingDistance(evopop, pair[0])\n\t\t\tcrow2=crowdingDistance(evopop, pair[1])\n\t\t\tif crow1 > crow2:\n\t\t\t\tparents.append(pair[0])\n\t\t\telse:\n\t\t\t\tparents.append(pair[1])\n\treturn parents\t\n\t\ndef CreateOffspring(parents, numChild):\n\tchildren=[]\n\twhile len(children) < numChild:\n\t\tmates=random.sample(parents, 2)\n\t\tchildren.append(Recombination((mates[0][0][0], mates[1][0][0])))\n\treturn children\n\t\t\n\n## debugging functions\ndef printEvoPopulation(epop):\n\tfor i in epop:\n\t\tprintFrontFitSoln(i)\n\t\tprint(\"\")\n\ndef printFrontFitSoln(ffsoln):\n\tprintFitSoln(ffsoln[0])\n\tprint(\"Front:\", ffsoln[1], end=\" \")\n\ndef printPopulation(pop):\n\tfor i in pop:\n\t\tprintFitSoln(i)\n\t\tprint(\"\")\n\ndef printFitSoln(fitSoln):\n\tprintSoln(fitSoln[0])\n\tprint(fitSoln[1], fitSoln[1][1]*fitSoln[1][0], end=\" \")\n\ndef printSolns(solns):\n\tfor i in solns:\n\t\tprintSoln(i)\n\t\tprint(\"\")\n\ndef printSoln(soln):\n\tprint(Bits(soln).bin[2:], end=\" \")\n\t\ndef MakePopFromSolns(solns):\n\tmypop=[]\n\tfor i in solns:\n\t\tsoln=fixSolution(i)\n\t\tfit=getFitness(soln)\n\t\tmypop.append((soln, fit))\n\treturn mypop\n\n\n\t\n## files\ninfile=\"G4.dat\"\nlogfile=\"log-G4.txt\"\noutfile=\"soln-G1.txt\"\nlogout=None\nsolnout=None\n\n## problem\nvertNum=0\nedgeNum=0\nedges=tuple()\n\n## evo vars\nnumRuns=30\nnumEvals=10000\nmu=150\nLMBDR=300\nseed=0\nmutatechance=1/2000\nnumOffspring=LMBDR\npopulationSize=mu\nparentK=50\n\n## evo functions\n#ParentSelect=BinaryTournament\nRecombination=UniformRecombination\nMutate=UniformMutate\nSurvivalSelect=MOEASelect\n\n\ndef GetOpts(cfg):\n\tglobal infile, logfile, outfile, numRuns, numEvals, mu, LMBDR, seed\n\tglobal mutatechance, numOffspring, populationSize, parentK\n\tconfig = configparser.RawConfigParser()\n\tconfig.read(cfg)\n\tinfile=config.get(\"Configuration\", \"input\")\n\tlogfile=config.get(\"Configuration\", \"log\")\n\toutfile=config.get(\"Configuration\",\"output\")\n\tnumRuns=config.getint(\"Configuration\",\"runs\")\n\tnumEvals=config.getint(\"Configuration\",\"evals\")\n\tmu=config.getint(\"Configuration\", \"mu\")\n\tLMBDR=config.getint(\"Configuration\", \"lambda\")\n\tmutatechance=config.getfloat(\"Configuration\", \"mutatechance\")\n\tparentK=config.getint(\"Configuration\", \"numParents\")\n\tseed=config.getint(\"Configuration\", \"seed\")\n\tpopulationSize=mu\n\tnumOffspring=LMBDR\n\t\n\t\n\t\ndef ReadProblem(infile):\n\tglobal vertNum\n\tglobal edgeNum\n\tglobal edges\n\tp=open(infile)\n\tvertNum=int(p.readline())\n\tedgeNum=int(p.readline())\n\tedges=tuple((int(line.split()[0]), int(line.split()[1])) for line in p.readlines())\n\treturn (vertNum, edgeNum, edges)\n\t\ndef EvalRun(runNumber):\n\tlogout.write(\"Run \"+str(runNumber)+\"\\n\")\n\tprint(\"\")\n\tprint(\"Generating population size:\", populationSize)\n\tprint(\"\")\n\t\n\tpopulation = GenerateInitialPopulation()\n\tprint(\"\")\n\tavg=0\t\n\tevalpopulation=EvaluatePopulation(population)\n\tfor evals in itertools.count(mu,LMBDR):\n\t\tprint(\"Evals:\", evals)\n\t\tavg=sum(x[0][1][0]*x[0][1][1] for x in evalpopulation)/len(evalpopulation)\n\t\tbest=max(x[0][1][0]*x[0][1][1] for x in evalpopulation)\n\t\tlogout.write(str(evals)+\"\\t\"+str(avg)+\"\\t\"+str(best)+\"\\n\")\n\t\tif(evals >=numEvals):\n\t\t\tbreak\n\t\tlasttime=time()\n\t\t\n\t\t#print(\"Making Parents...\")\n\t\tparents=BinaryTournament(evalpopulation, parentK)\n\t\t\n\t\t#print(\"Creating Offspring...\")\n\t\toffspring=CreateOffspring(parents, numOffspring)\n\t\t\t\t\n\t\t#print(\"Mutating Offspring...\")\n\t\tMutate(offspring)\n\t\t\n\t\t#print(\"Mixing population and offspring...\")\n\t\tpopulation=MakePopFromSolns(offspring)+[ffsoln[0] for ffsoln in evalpopulation]\n\t\t\n\t\t#print(\"Evaluating bigpop...\")\n\t\tevalpopulation=EvaluatePopulation(population)\n\t\t\n\t\t#print(\"Survival Selection...\")\n\t\tevalpopulation = SurvivalSelect(evalpopulation)\n\t\tprint(time()-lasttime)\n\t\n\treturn ([x for x in evalpopulation if x[1] == 0], avg)\n\ndef main(cfg):\n\tglobal seed, logout, solnout\n\tGetOpts(cfg)\n\tvertNum, edgeNum, edges = ReadProblem(infile)\n\tif seed == 0:\n\t\tseed = int(time()*1000)\n\trandom.seed(seed)\n\t\n\tlogout=open(logfile, \"w\")\n\tlogout.write(\"Result Log\\n\")\n\tprint(\"Seed:\", seed)\n\t# find the best front out of numRuns runs of our algorithm, sorted on average run fitness\n\tbestfront=max(map(EvalRun, list(range(1,numRuns+1))), key=lambda frontavg: frontavg[1])\n\tsolnout=open(outfile, \"w\")\n\tfor soln in bestfront[0]:\n\t\tsolnout.write(str(int(1/soln[0][1][0]))+\"\\t\"+str(soln[0][1][1])+\"\\t\"+Bits(soln[0][0]).bin[2:]+\"\\n\")\n\tsolnout.close()\n\tlogout.close()\n\t\nif __name__ == \"__main__\":\n\tif len(sys.argv) == 1:\n\t\tmain(\"default.cfg\")\n\telse:\n\t\tfor arg in sys.argv[1:]:\n\t\t\tmain(arg)\n\t\t\n\t\t\n","repo_name":"ProdigySim/MST-CS","sub_path":"cs348/asg3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26621735214","text":"#!/usr/bin/env python\nimport rospy\nfrom geometry_msgs.msg import Twist\nfrom sensor_msgs.msg import Joy\n\n\n\nrospy.init_node('joystick', anonymous=True)\nr = rospy.Rate(10) #10hz\nmsg = Twist()\npub = rospy.Publisher('cmd_vel', Twist, queue_size=1)\n\ndef callbackJoy(data):\n global msg\n print (data.axes[1])\n print (data.axes[0])\n msg.linear.x = data.axes[3]\n msg.linear.z = data.axes[2]\n pub.publish(msg)\n \nrospy.Subscriber(\"joy\", Joy, callbackJoy)\ndef talker(): \n rospy.spin()\n\nif __name__ == '__main__':\n try:\n talker()\n except rospy.ROSInterruptException: pass\n\n\n","repo_name":"AutonomyLab/following-ahead","sub_path":"script/joystick.py","file_name":"joystick.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"37412013855","text":"#dependcies\nimport os, csv\n\n#file path\ndir_path = os.path.dirname(os.path.realpath(__file__))\npypoll_data_path = os.path.join(dir_path, 'election_data.csv')\n\n#make dictionarys & lists\nvotes = {}\ntotal_votes = 0\nwin_votes = []\n\n#open file\nwith open(pypoll_data_path, 'r') as pypoll_data:\n csvreader = csv.reader(pypoll_data, delimiter=',')\n \n #loop through and count\n for rows in csvreader:\n\n if 'Candidate' in rows[2]:\n pass\n\n elif rows[2] not in votes.keys():\n votes[rows[2]] = 1\n total_votes +=1\n else:\n votes[rows[2]] += 1\n total_votes +=1\n\npypoll_results_path = os.path.join(dir_path, 'SPR_election_results.txt')\nwith open(pypoll_results_path,'w') as pypoll_results:\n#newline\n print('\\n\\n')\n #actual answer\n print('Election Results')\n print('---------------------------------')\n print('Total Votes: ' + str(total_votes))\n print('---------------------------------')\n\n pypoll_results.write('Election Results')\n pypoll_results.write('\\n---------------------------------\\n')\n pypoll_results.write('Total Votes: ' + str(total_votes))\n pypoll_results.write('\\n---------------------------------\\n')\n\n #loop again\n for key in votes.keys():\n\n #multiply and compute decimals/percent\n percent = 100 * (int(votes[key]) / (total_votes))\n\n #print strings and format to percent\n print(str(key) + ': ' + \"{0:.3f}%\".format(percent) + ', ' +\\\n str(votes[key]) +' votes')\n\n pypoll_results.write(str(key) + ': ' + \"{0:.3f}%\".format(percent) + ', ' +\\\n str(votes[key]) +' votes')\n \n win_votes.append(votes[key])\n\n print('---------------------------------')\n\n pypoll_results.write('\\n---------------------------------\\n')\n #compage max of votes to keys in dictionary\n for key, value in votes.items():\n\n if value == max(win_votes):\n\n print(key + ' won the election!')\n print('---------------------------------')\n\n pypoll_results.write(key + ' won the election!')\n pypoll_results.write('\\n---------------------------------\\n')","repo_name":"Sal-r/Python-Challenge","sub_path":"PyPoll/main_pypoll.py","file_name":"main_pypoll.py","file_ext":"py","file_size_in_byte":2164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13449014947","text":"from __future__ import print_function\nimport sys\nimport os\nimport rpc\n\nimport base64\nimport tensorflow as tf\nimport numpy as np\n\nsys.path.append(\"/tf_models/research/slim\")\n\nfrom nets import inception_v3\nfrom preprocessing import inception_preprocessing\nfrom datasets import imagenet\n\nimage_size = inception_v3.inception_v3.default_image_size\nslim = tf.contrib.slim\n\nclass InceptionClassificationContainer(rpc.ModelContainerBase):\n\n def __init__(self, checkpoint_path):\n self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))\n\n with tf.device(\"/gpu:0\"):\n self.inputs = tf.placeholder(tf.float32, (None, image_size, image_size, 3))\n preprocessed_images = tf.map_fn(lambda input_img : inception_preprocessing.preprocess_image(input_img, image_size, image_size, is_training=False), self.inputs)\n\n with slim.arg_scope(inception_v3.inception_v3_arg_scope()):\n logits, _ = inception_v3.inception_v3(preprocessed_images, num_classes=1001, is_training=False)\n self.all_probabilities = tf.nn.softmax(logits)\n init_fn = slim.assign_from_checkpoint_fn(checkpoint_path, slim.get_model_variables(\"InceptionV3\"))\n\n init_fn(self.sess)\n\n def predict_floats(self, inputs):\n \"\"\"\n Parameters\n -------------\n inputs : np.ndarray\n An image, represented as a flattened 299 x 299 x 3 \n numpy array of floats\n \"\"\"\n reshaped_inputs = [input_item.reshape((299, 299, 3)) for input_item in inputs]\n all_probabilities = self.sess.run([self.all_probabilities], feed_dict={self.inputs: reshaped_inputs})\n\n outputs = []\n for input_probabilities in all_probabilities[0]:\n sorted_inds = [i[0] for i in sorted(\n enumerate(-input_probabilities), key=lambda x:x[1])]\n outputs.append(str(sorted_inds[0]))\n\n return outputs\n\nif __name__ == \"__main__\":\n print(\"Starting Inception Classification Container\")\n try:\n model_name = os.environ[\"CLIPPER_MODEL_NAME\"]\n except KeyError:\n print(\n \"ERROR: CLIPPER_MODEL_NAME environment variable must be set\",\n file=sys.stdout)\n sys.exit(1)\n try:\n model_version = os.environ[\"CLIPPER_MODEL_VERSION\"]\n except KeyError:\n print(\n \"ERROR: CLIPPER_MODEL_VERSION environment variable must be set\",\n file=sys.stdout)\n sys.exit(1)\n try:\n model_checkpoint_path = os.environ[\"CLIPPER_MODEL_PATH\"]\n except KeyError:\n print(\n \"ERROR: CLIPPER_MODEL_PATH environment variable must be set\",\n file=sys.stdout)\n sys.exit(1)\n\n ip = \"127.0.0.1\"\n if \"CLIPPER_IP\" in os.environ:\n ip = os.environ[\"CLIPPER_IP\"]\n else:\n print(\"Connecting to Clipper on localhost\")\n\n print(\"CLIPPER IP: {}\".format(ip))\n\n port = 7000\n if \"CLIPPER_PORT\" in os.environ:\n port = int(os.environ[\"CLIPPER_PORT\"])\n else:\n print(\"Connecting to Clipper with default port: 7000\")\n\n input_type = \"floats\"\n container = InceptionClassificationContainer(model_checkpoint_path)\n rpc_service = rpc.RPCService()\n rpc_service.start(container, ip, port, model_name, model_version,\n input_type)\n","repo_name":"simon-mo/inferline-models","sub_path":"model_containers/old_containers/impl/inception_container.py","file_name":"inception_container.py","file_ext":"py","file_size_in_byte":3324,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"35844765969","text":"\ndef get_value_mappings(filename):\n with open(filename) as f:\n mappings = f.readlines()\n\n field_mappings = {}\n i = 0\n index_mappings = {}\n index_mappings[0] = 'Branch'\n index_mappings[1] = 'POLICY_NUMBER'\n index_mappings[2] = 'Product'\n index_mappings[3] = 'Zone'\n index_mappings[4] = 'Branch'\n index_mappings[5] = 'Status'\n index_mappings[6] = 'AGENT_NAME'\n index_mappings[7] = 'Employee_Name'\n index_mappings[8] = 'Business'\n\n for index_mapping in index_mappings:\n field_mappings[index_mappings[index_mapping]] = set()\n\n for mapping in mappings:\n if i > 0:\n mappings_tokens = mapping.split('~')\n row_count = 0\n for mapping_token in mappings_tokens:\n mapping_str = mapping_token.replace('\\n', '').strip().lower()\n if len(mapping_str) > 0:\n field_mappings[index_mappings[row_count]].add(mapping_str)\n row_count += 1\n i += 1\n return field_mappings\n\n\ndef get_field_mappings(filename):\n with open(filename) as f:\n mappings = f.readlines()\n\n field_mappings = {}\n for mapping in mappings:\n mapping_tokens = mapping.split(':')\n mapping_synonyms = mapping_tokens[1].split(',')\n mapping_synonyms_clean = []\n for mapping_synonym in mapping_synonyms:\n mapping_synonyms_clean.append(mapping_synonym.replace('\\n', '').strip().lower())\n field_mappings[mapping_tokens[0][0:str(mapping_tokens[0]).index(' (')]] = mapping_synonyms_clean\n return field_mappings\n\n\ndef get_comparison_query (qry):\n time_words = ['month', 'months', 'year', 'years', 'week', 'weeks', 'jan', 'january', 'feb', 'february', 'mar', 'march', 'apr', 'april', 'may', 'jun', 'june', 'jul', 'july', 'aug', 'august', 'sep', 'sept', 'september', 'oct', 'october', 'nov', 'november', 'dec', 'december']\n comparison_operators = ['>', '<', '=']\n final_comparison_text = []\n field_mappings = get_field_mappings('mappings.txt')\n compared_text = ''\n for operator in comparison_operators:\n if operator in qry:\n expression = qry.split(operator)\n for i in range (len(expression)):\n for char in expression[i].strip().split():\n if char != ' ' and char.isdigit():\n matched_operator = operator\n compared_text = expression[i].strip()\n break\n else:\n break\n\n if compared_text != '':\n compared_text = compared_text.split()\n matched_index = len(compared_text)\n for i in range (0, len(compared_text)):\n for fields in field_mappings.keys():\n if compared_text[i].lower() in field_mappings[fields]:\n if i < matched_index:\n matched_index = i\n for i in range (0, matched_index+1):\n final_comparison_text.append(compared_text[i])\n\n return \" \".join(final_comparison_text)\n\nprint (get_comparison_query(\"which agents had > 5 garbage branch from 12 march to 25 march\"))\nprint (get_comparison_query(\"how many branches have < 2000000 renewal premium fro 5 march to 15 march\"))\n\n","repo_name":"mishrap9/SQL-Query","sub_path":"comparison_operator.py","file_name":"comparison_operator.py","file_ext":"py","file_size_in_byte":3241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72873888905","text":"import torch\nfrom torch.autograd import Variable\nimport os\nimport argparse\nfrom datetime import datetime\nimport torch.nn.functional as F\nimport os\nimport pandas as pd\nfrom sklearn.preprocessing import LabelEncoder\nfrom PIL import Image\nimport torch.utils.data as data\nimport torchvision.transforms as transforms\nimport math\nimport torch.utils.model_zoo as model_zoo\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\n\n\n\nclass ISIC_2017_csvDataset(data.Dataset):\n def __init__(self, csv_root):\n self.csv_root = csv_root\n self.Haminfo = self.csv2tensors(self.csv_root)\n self.size = self.Haminfo[0].size(0)\n\n def __getitem__(self, index):\n return self.Haminfo[0][index]\n\n def csv2tensors(self, folder_path):\n # 读取指定文件夹中的所有csv文件并将数据存储为张量格式\n csv_tensors = []\n for file_name in os.listdir(folder_path):\n if file_name.endswith('.csv'):\n # 读取csv文件\n file_path = os.path.join(folder_path, file_name)\n data = pd.read_csv(file_path)\n # 将读取的数据进行onehot编码\n le = LabelEncoder()\n # ISIC_2017\n data['age_approximate'] = le.fit_transform(data['age_approximate'])\n data['sex'] = le.fit_transform(data['sex'])\n # # ph2_Dataset\n # data['Histological Diagnosis'] = le.fit_transform(data['Histological Diagnosis'])\n # data['Common Nevus'] = le.fit_transform(data['Common Nevus'])\n # data['Atypical Nevus'] = le.fit_transform(data['Atypical Nevus'])\n # data['Melanoma'] = le.fit_transform(data['Melanoma'])\n # data['Asymmetry'] = le.fit_transform(data['Asymmetry'])\n # data['Pigment Network'] = le.fit_transform(data['Pigment Network'])\n # data['Dots/Globules'] = le.fit_transform(data['Dots/Globules'])\n # data['Streaks'] = le.fit_transform(data['Streaks'])\n # data['Regression Areas'] = le.fit_transform(data['Regression Areas'])\n # data['Blue-Whitish Veil'] = le.fit_transform(data['Blue-Whitish Veil'])\n # data['White'] = le.fit_transform(data['White'])\n # data['Red'] = le.fit_transform(data['Red'])\n # data['Light-Brown'] = le.fit_transform(data['Light-Brown'])\n # data['Dark-Brown'] = le.fit_transform(data['Dark-Brown'])\n # data['Blue-Gray'] = le.fit_transform(data['Blue-Gray'])\n # data['Black'] = le.fit_transform(data['Black'])\n # # HAM10000\n # data['dx'] = le.fit_transform(data['dx'])\n # data['dx_type'] = le.fit_transform(data['dx_type'])\n # data['age'] = le.fit_transform(data['age'])\n # data['sex'] = le.fit_transform(data['sex'])\n # data['localization'] = le.fit_transform(data['localization'])\n # print(data)\n # 读取存为numpy数组再转tensor\n temp_array = np.array(data)\n\n # 将数据转换为张量格式并添加到列表中\n tensor = torch.tensor(temp_array)\n tensor = tensor.float()\n # print(type(tensor))\n bn = nn.BatchNorm1d(2) # 最后一个维度的大小。其实更加标准的说法是\n # 特征的数量,我们这里,一个数据有3个特征,一个特征就是一个数。所以用batchnorm1d。\n # 在计算机视觉中,一张图片就是一个数据,其特征数量就是其channel数量,就是这么定义的,别问为什么。\n # 此时一个特征就是一个224*224的矩阵,所以使用\n # batchnorm2d。\n tensor = bn(tensor)\n # print(tensor)\n csv_tensors.append(tensor)\n # 返回csv文件的张量列表\n return csv_tensors\n\n def __len__(self):\n return self.size\n\nclass ph2_csvDataset(data.Dataset):\n def __init__(self, csv_root):\n self.csv_root = csv_root\n self.Haminfo = self.csv2tensors(self.csv_root)\n self.size = self.Haminfo[0].size(0)\n\n def __getitem__(self, index):\n return self.Haminfo[0][index]\n\n def csv2tensors(self, folder_path):\n # 读取指定文件夹中的所有csv文件并将数据存储为张量格式\n csv_tensors = []\n for file_name in os.listdir(folder_path):\n if file_name.endswith('.csv'):\n # 读取csv文件\n file_path = os.path.join(folder_path, file_name)\n data = pd.read_csv(file_path)\n # 将读取的数据进行onehot编码\n le = LabelEncoder()\n # ISIC_2017\n data['age_approximate'] = le.fit_transform(data['age_approximate'])\n data['sex'] = le.fit_transform(data['sex'])\n # ph2_Dataset\n data['Histological Diagnosis'] = le.fit_transform(data['Histological Diagnosis'])\n data['Common Nevus'] = le.fit_transform(data['Common Nevus'])\n data['Atypical Nevus'] = le.fit_transform(data['Atypical Nevus'])\n data['Melanoma'] = le.fit_transform(data['Melanoma'])\n data['Asymmetry'] = le.fit_transform(data['Asymmetry'])\n data['Pigment Network'] = le.fit_transform(data['Pigment Network'])\n data['Dots/Globules'] = le.fit_transform(data['Dots/Globules'])\n data['Streaks'] = le.fit_transform(data['Streaks'])\n data['Regression Areas'] = le.fit_transform(data['Regression Areas'])\n data['Blue-Whitish Veil'] = le.fit_transform(data['Blue-Whitish Veil'])\n data['White'] = le.fit_transform(data['White'])\n data['Red'] = le.fit_transform(data['Red'])\n data['Light-Brown'] = le.fit_transform(data['Light-Brown'])\n data['Dark-Brown'] = le.fit_transform(data['Dark-Brown'])\n data['Blue-Gray'] = le.fit_transform(data['Blue-Gray'])\n data['Black'] = le.fit_transform(data['Black'])\n # # HAM10000\n # data['dx'] = le.fit_transform(data['dx'])\n # data['dx_type'] = le.fit_transform(data['dx_type'])\n # data['age'] = le.fit_transform(data['age'])\n # data['sex'] = le.fit_transform(data['sex'])\n # data['localization'] = le.fit_transform(data['localization'])\n # print(data)\n # 读取存为numpy数组再转tensor\n temp_array = np.array(data)\n\n # 将数据转换为张量格式并添加到列表中\n tensor = torch.tensor(temp_array)\n tensor = tensor.float()\n # print(type(tensor))\n bn = nn.BatchNorm1d(16) # 最后一个维度的大小。其实更加标准的说法是\n # 特征的数量,我们这里,一个数据有3个特征,一个特征就是一个数。所以用batchnorm1d。\n # 在计算机视觉中,一张图片就是一个数据,其特征数量就是其channel数量,就是这么定义的,别问为什么。\n # 此时一个特征就是一个224*224的矩阵,所以使用\n # batchnorm2d。\n tensor = bn(tensor)\n # print(tensor)\n csv_tensors.append(tensor)\n # 返回csv文件的张量列表\n return csv_tensors\n\n def __len__(self):\n return self.size\n\nclass Ham_csvDataset(data.Dataset):\n def __init__(self, csv_root):\n self.csv_root = csv_root\n self.Haminfo = self.csv2tensors(self.csv_root)\n self.size = self.Haminfo[0].size(0)\n\n def __getitem__(self, index):\n return self.Haminfo[0][index]\n\n def csv2tensors(self, folder_path):\n # 读取指定文件夹中的所有csv文件并将数据存储为张量格式\n csv_tensors = []\n for file_name in os.listdir(folder_path):\n if file_name.endswith('.csv'):\n # 读取csv文件\n file_path = os.path.join(folder_path, file_name)\n data = pd.read_csv(file_path)\n # 将读取的数据进行onehot编码\n le = LabelEncoder()\n data['dx'] = le.fit_transform(data['dx'])\n data['dx_type'] = le.fit_transform(data['dx_type'])\n data['age'] = le.fit_transform(data['age'])\n data['sex'] = le.fit_transform(data['sex'])\n data['localization'] = le.fit_transform(data['localization'])\n # print(data)\n # 读取存为numpy数组再转tensor\n temp_array = np.array(data)\n\n # 将数据转换为张量格式并添加到列表中\n tensor = torch.tensor(temp_array)\n tensor = tensor.float()\n # print(type(tensor))\n bn = nn.BatchNorm1d(5) # 最后一个维度的大小。其实更加标准的说法是\n # 特征的数量,我们这里,一个数据有3个特征,一个特征就是一个数。所以用batchnorm1d。\n # 在计算机视觉中,一张图片就是一个数据,其特征数量就是其channel数量,就是这么定义的,别问为什么。\n # 此时一个特征就是一个224*224的矩阵,所以使用\n # batchnorm2d。\n tensor = bn(tensor)\n # print(tensor)\n csv_tensors.append(tensor)\n # 返回csv文件的张量列表\n return csv_tensors\n\n def __len__(self):\n return self.size\n\n\ndef get_loader_csv(csv_root, batchsize, shuffle=True, pin_memory=True):\n dataset = Ham_csvDataset(csv_root)\n data_loader = data.DataLoader(dataset=dataset,\n batch_size=batchsize,\n shuffle=shuffle,\n pin_memory=pin_memory)\n return data_loader\n\n\nclass SkinDataset(data.Dataset):\n def __init__(self, image_root, gt_root, trainsize):\n self.trainsize = trainsize\n self.images = [image_root + f for f in os.listdir(image_root) if\n f.endswith('.jpg') or f.endswith('.png') or f.endswith('.bmp')]\n self.gts = [gt_root + f for f in os.listdir(gt_root) if f.endswith('.png') or f.endswith('.bmp')]\n self.images = sorted(self.images)\n self.gts = sorted(self.gts)\n self.filter_files()\n self.size = len(self.images)\n self.img_transform = transforms.Compose([\n transforms.Resize((self.trainsize, self.trainsize)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])])\n self.gt_transform = transforms.Compose([\n transforms.Resize((self.trainsize, self.trainsize)),\n transforms.ToTensor()])\n\n def __getitem__(self, index):\n image = self.rgb_loader(self.images[index])\n gt = self.binary_loader(self.gts[index])\n image = self.img_transform(image)\n gt = self.gt_transform(gt)\n return image, gt\n\n def filter_files(self):\n assert len(self.images) == len(self.gts)\n images = []\n gts = []\n for img_path, gt_path in zip(self.images, self.gts):\n img = Image.open(img_path)\n gt = Image.open(gt_path)\n if img.size == gt.size:\n images.append(img_path)\n gts.append(gt_path)\n self.images = images\n self.gts = gts\n\n def rgb_loader(self, path):\n with open(path, 'rb') as f:\n img = Image.open(f)\n return img.convert('RGB')\n\n def binary_loader(self, path):\n with open(path, 'rb') as f:\n img = Image.open(f)\n # return img.convert('1')\n return img.convert('L')\n\n def resize(self, img, gt):\n assert img.size == gt.size\n w, h = img.size\n if h < self.trainsize or w < self.trainsize:\n h = max(h, self.trainsize)\n w = max(w, self.trainsize)\n return img.resize((w, h), Image.BILINEAR), gt.resize((w, h), Image.NEAREST)\n else:\n return img, gt\n\n def __len__(self):\n return self.size\n\n\ndef get_loader(image_root, gt_root, batchsize, trainsize, shuffle=True, num_workers=4, pin_memory=True):\n dataset = SkinDataset(image_root, gt_root, trainsize)\n data_loader = data.DataLoader(dataset=dataset,\n batch_size=batchsize,\n shuffle=shuffle,\n num_workers=num_workers,\n pin_memory=pin_memory)\n return data_loader\n\n\nclass Bottle2neck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, baseWidth=26, scale=4, stype='normal'):\n\n super(Bottle2neck, self).__init__()\n\n width = int(math.floor(planes * (baseWidth / 64.0)))\n self.conv1 = nn.Conv2d(inplanes, width * scale, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(width * scale)\n\n if scale == 1:\n self.nums = 1\n else:\n self.nums = scale - 1\n if stype == 'stage':\n self.pool = nn.AvgPool2d(kernel_size=3, stride=stride, padding=1)\n convs = []\n bns = []\n for i in range(self.nums):\n convs.append(nn.Conv2d(width, width, kernel_size=3, stride=stride, padding=1, bias=False))\n bns.append(nn.BatchNorm2d(width))\n self.convs = nn.ModuleList(convs)\n self.bns = nn.ModuleList(bns)\n\n self.conv3 = nn.Conv2d(width * scale, planes * self.expansion, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes * self.expansion)\n\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stype = stype\n self.scale = scale\n self.width = width\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n spx = torch.split(out, self.width, 1)\n for i in range(self.nums):\n if i == 0 or self.stype == 'stage':\n sp = spx[i]\n else:\n sp = sp + spx[i]\n sp = self.convs[i](sp)\n sp = self.relu(self.bns[i](sp))\n if i == 0:\n out = sp\n else:\n out = torch.cat((out, sp), 1)\n if self.scale != 1 and self.stype == 'normal':\n out = torch.cat((out, spx[self.nums]), 1)\n elif self.scale != 1 and self.stype == 'stage':\n out = torch.cat((out, self.pool(spx[self.nums])), 1)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Res2Net(nn.Module):\n\n def __init__(self, block, layers, baseWidth=26, scale=4, num_classes=1000):\n self.inplanes = 64\n super(Res2Net, self).__init__()\n self.baseWidth = baseWidth\n self.scale = scale\n self.conv1 = nn.Sequential(\n nn.Conv2d(3, 32, 3, 2, 1, bias=False),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 32, 3, 1, 1, bias=False),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, 3, 1, 1, bias=False)\n )\n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU()\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n self.avgpool = nn.AdaptiveAvgPool2d(1)\n self.fc = nn.Linear(512 * block.expansion, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.AvgPool2d(kernel_size=stride, stride=stride,\n ceil_mode=True, count_include_pad=False),\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=1, bias=False),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample=downsample,\n stype='stage', baseWidth=self.baseWidth, scale=self.scale))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes, baseWidth=self.baseWidth, scale=self.scale))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n\n return x\n\n\ndef res2net50_v1b(pretrained=False, **kwargs):\n model = Res2Net(Bottle2neck, [3, 4, 6, 3], baseWidth=26, scale=4, **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model.urls['res2net50_v1b_26w_4s']))\n return model\n\n\ndef res2net101_v1b(pretrained=False, **kwargs):\n model = Res2Net(Bottle2neck, [3, 4, 23, 3], baseWidth=26, scale=4, **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model.urls['res2net101_v1b_26w_4s']))\n return model\n\n\ndef res2net50_v1b_26w_4s(pretrained=False, **kwargs):\n model = Res2Net(Bottle2neck, [3, 4, 6, 3], baseWidth=26, scale=4, **kwargs)\n if pretrained:\n model_state = torch.load('Snapshots/Res2net/res2net50.pth')\n model.load_state_dict(model_state)\n # lib.load_state_dict(model_zoo.load_url(model_urls['res2net50_v1b_26w_4s']))\n return model\n\n\ndef res2net101_v1b_26w_4s(pretrained=False, **kwargs):\n model = Res2Net(Bottle2neck, [3, 4, 23, 3], baseWidth=26, scale=4, **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model.urls['res2net101_v1b_26w_4s']))\n return model\n\n\ndef res2net152_v1b_26w_4s(pretrained=False, **kwargs):\n model = Res2Net(Bottle2neck, [3, 8, 36, 3], baseWidth=26, scale=4, **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model.urls['res2net152_v1b_26w_4s']))\n return model\n\n\ndef clip_gradient(optimizer, grad_clip):\n for group in optimizer.param_groups:\n for param in group['params']:\n if param.grad is not None:\n param.grad.data.clamp_(-grad_clip, grad_clip)\n\n\ndef adjust_lr(optimizer, init_lr, epoch, decay_rate=0.1, decay_epoch=30):\n decay = decay_rate ** (epoch // decay_epoch)\n for param_group in optimizer.param_groups:\n param_group['lr'] *= decay\n\n\nclass AvgMeter(object):\n def __init__(self, num=40):\n self.num = num\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n self.losses = []\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n self.losses.append(val)\n\n def show(self):\n return torch.mean(torch.stack(self.losses[np.maximum(len(self.losses) - self.num, 0):]))\n\n\n\n\n\nclass BasicConv2d(nn.Module):\n def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1):\n super(BasicConv2d, self).__init__()\n self.conv = nn.Conv2d(in_planes, out_planes,\n kernel_size=kernel_size, stride=stride,\n padding=padding, dilation=dilation, bias=False)\n self.bn = nn.BatchNorm2d(out_planes)\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n return x\n\n\nclass RFB_modified(nn.Module):\n def __init__(self, in_channel, out_channel):\n super(RFB_modified, self).__init__()\n self.relu = nn.ReLU(True)\n self.branch0 = nn.Sequential(\n BasicConv2d(in_channel, out_channel, 1),\n )\n self.branch1 = nn.Sequential(\n BasicConv2d(in_channel, out_channel, 1),\n BasicConv2d(out_channel, out_channel, kernel_size=(1, 3), padding=(0, 1)),\n BasicConv2d(out_channel, out_channel, kernel_size=(3, 1), padding=(1, 0)),\n BasicConv2d(out_channel, out_channel, 3, padding=3, dilation=3)\n )\n self.branch2 = nn.Sequential(\n BasicConv2d(in_channel, out_channel, 1),\n BasicConv2d(out_channel, out_channel, kernel_size=(1, 5), padding=(0, 2)),\n BasicConv2d(out_channel, out_channel, kernel_size=(5, 1), padding=(2, 0)),\n BasicConv2d(out_channel, out_channel, 3, padding=5, dilation=5)\n )\n self.branch3 = nn.Sequential(\n BasicConv2d(in_channel, out_channel, 1),\n BasicConv2d(out_channel, out_channel, kernel_size=(1, 7), padding=(0, 3)),\n BasicConv2d(out_channel, out_channel, kernel_size=(7, 1), padding=(3, 0)),\n BasicConv2d(out_channel, out_channel, 3, padding=7, dilation=7)\n )\n self.conv_cat = BasicConv2d(4 * out_channel, out_channel, 3, padding=1)\n self.conv_res = BasicConv2d(in_channel, out_channel, 1)\n\n def forward(self, x):\n x0 = self.branch0(x)\n x1 = self.branch1(x)\n x2 = self.branch2(x)\n x3 = self.branch3(x)\n x_cat = self.conv_cat(torch.cat((x0, x1, x2, x3), 1))\n\n x = self.relu(x_cat + self.conv_res(x))\n return x\n\n\nclass MLF(nn.Module):\n # dense aggregation, it can be replaced by other aggregation previous, such as DSS, amulet, and so on.\n # used after MSF\n def __init__(self, channel, n_class):\n super(MLF, self).__init__()\n self.relu = nn.ReLU(True)\n\n self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)\n self.conv_upsample1 = BasicConv2d(channel, channel, 3, padding=1)\n self.conv_upsample2 = BasicConv2d(channel, channel, 3, padding=1)\n self.conv_upsample3 = BasicConv2d(channel, channel, 3, padding=1)\n self.conv_upsample4 = BasicConv2d(channel, channel, 3, padding=1)\n self.conv_upsample5 = BasicConv2d(2 * channel, 2 * channel, 3, padding=1)\n\n self.conv_concat2 = BasicConv2d(2 * channel, 2 * channel, 3, padding=1)\n self.conv_concat3 = BasicConv2d(3 * channel, 3 * channel, 3, padding=1)\n self.conv4 = BasicConv2d(3 * channel, 3 * channel, 3, padding=1)\n self.conv5 = nn.Conv2d(3 * channel, 1, 1)\n\n def forward(self, x1, x2, x3):\n x1_1 = x1\n x2_1 = self.conv_upsample1(self.upsample(x1)) * x2\n x3_1 = self.conv_upsample2(self.upsample(self.upsample(x1))) \\\n * self.conv_upsample3(self.upsample(x2)) * x3\n\n x2_2 = torch.cat((x2_1, self.conv_upsample4(self.upsample(x1_1))), 1)\n x2_2 = self.conv_concat2(x2_2)\n\n x3_2 = torch.cat((x3_1, self.conv_upsample5(self.upsample(x2_2))), 1)\n x3_2 = self.conv_concat3(x3_2)\n\n x = self.conv4(x3_2)\n x = self.conv5(x)\n\n return x\n\n\nclass MLP(nn.Module):\n\n def __init__(self,\n activation='relu',\n dropout=0.1):\n super(MLP, self).__init__()\n self.input_dim = 5\n self.dimensions = [14, 32]\n self.activation = activation\n self.dropout = dropout\n # Modules\n self.linears = nn.ModuleList([nn.Linear(self.input_dim, self.dimensions[0])])\n for din, dout in zip(self.dimensions[:-1], self.dimensions[1:]):\n self.linears.append(nn.Linear(din, dout))\n\n def forward(self, x):\n x = x.float()\n for i, lin in enumerate(self.linears):\n x = lin(x)\n if (i < len(self.linears) - 1):\n x = F.__dict__[self.activation](x)\n if self.dropout > 0:\n x = F.dropout(x, self.dropout, training=self.training)\n return x\n\n\nclass MRML_Net(nn.Module):\n # res2net based encoder decoder\n def __init__(self, channel=32, n_class=1,\n mm_dim=1200,\n factor=2,\n activ_input='relu',\n activ_output='relu',\n normalize=True,\n dropout_input=0.,\n dropout_pre_norm=0.,\n dropout_output=0.):\n super(MRML_Net, self).__init__()\n # ---- ResNet Backbone ----\n self.resnet = res2net50_v1b_26w_4s(pretrained=False)\n # ---- Receptive Field Block like module ----\n self.rfb2_1 = RFB_modified(512, channel)\n self.rfb3_1 = RFB_modified(1024, channel)\n self.rfb4_1 = RFB_modified(2048, channel)\n # ---- Multilevel_fushion ----\n self.MLF = MLF(channel, n_class)\n # ---- MLP ----\n self.MLP = MLP()\n # ---- MFB ----\n self.input_dims0 = 32\n self.input_dims1 = 32 * 32\n self.input_dims2 = 44 * 44\n self.input_dims3 = 56 * 56\n self.mm_dim = mm_dim\n self.factor = factor\n self.output_dims1 = 32 * 32\n self.output_dims2 = 44 * 44\n self.output_dims3 = 56 * 56\n self.activ_input = activ_input\n self.activ_output = activ_output\n self.normalize = normalize\n self.dropout_input = dropout_input\n self.dropout_pre_norm = dropout_pre_norm\n self.dropout_output = dropout_output\n # Modules\n self.linear0 = nn.Linear(self.input_dims0, mm_dim * factor)\n self.linear1 = nn.Linear(self.input_dims1, mm_dim * factor)\n self.linear2 = nn.Linear(self.input_dims2, mm_dim * factor)\n self.linear3 = nn.Linear(self.input_dims3, mm_dim * factor)\n self.linear_out1 = nn.Linear(mm_dim, self.output_dims1)\n self.linear_out2 = nn.Linear(mm_dim, self.output_dims2)\n self.linear_out3 = nn.Linear(mm_dim, self.output_dims3)\n self.n_params = sum(p.numel() for p in self.parameters() if p.requires_grad)\n\n # ---- reverse attention branch 4 ----\n self.ra4_conv1 = BasicConv2d(2048, 256, kernel_size=1)\n self.ra4_conv2 = BasicConv2d(256, 256, kernel_size=5, padding=2)\n self.ra4_conv3 = BasicConv2d(256, 256, kernel_size=5, padding=2)\n self.ra4_conv4 = BasicConv2d(256, 256, kernel_size=5, padding=2)\n self.ra4_conv5 = BasicConv2d(256, n_class, kernel_size=1)\n # ---- reverse attention branch 3 ----\n self.ra3_conv1 = BasicConv2d(1024, 64, kernel_size=1)\n self.ra3_conv2 = BasicConv2d(64, 64, kernel_size=3, padding=1)\n self.ra3_conv3 = BasicConv2d(64, 64, kernel_size=3, padding=1)\n self.ra3_conv4 = BasicConv2d(64, n_class, kernel_size=3, padding=1)\n # ---- reverse attention branch 2 ----\n self.ra2_conv1 = BasicConv2d(512, 64, kernel_size=1)\n self.ra2_conv2 = BasicConv2d(64, 64, kernel_size=3, padding=1)\n self.ra2_conv3 = BasicConv2d(64, 64, kernel_size=3, padding=1)\n self.ra2_conv4 = BasicConv2d(64, n_class, kernel_size=3, padding=1)\n\n\n def forward(self, x, y):\n x = self.resnet.conv1(x)\n x = self.resnet.bn1(x)\n x = self.resnet.relu(x)\n x = self.resnet.maxpool(x) # bs, 64, 88, 88\n # ---- low-level features ----\n x1 = self.resnet.layer1(x) # bs, 256, 88, 88\n x2 = self.resnet.layer2(x1) # bs, 512, 44, 44\n # ---- high-level features ----\n # x2 = self.resnet.layer2(x1) # bs, 512, 44, 44\n x3 = self.resnet.layer3(x2) # bs, 1024, 22, 22\n x4 = self.resnet.layer4(x3) # bs, 2048, 11, 11\n x2_rfb = self.rfb2_1(x2) # channel -> 32\n x3_rfb = self.rfb3_1(x3) # channel -> 32\n x4_rfb = self.rfb4_1(x4) # channel -> 32\n\n\n # ---- Multilevel_fushion ----\n mlf = self.MLF(x4_rfb, x3_rfb, x2_rfb)\n y = self.MLP(y)\n\n lateral_map_5 = F.interpolate(mlf, scale_factor=8,\n mode='bilinear') # NOTES: Sup-1 (bs, 1, 44, 44) -> (bs, 1, 352, 352)\n # mlf = mlf.flatten(1)\n # print(mlf.size())\n # ---- MFB Fushion Branch----\n fl_mlf = mlf.flatten(1)\n x0 = self.linear0(y)\n if mlf.size(2) == 32:\n x1 = self.linear1(fl_mlf)\n elif mlf.size(2) == 44:\n x1 = self.linear2(fl_mlf)\n else:\n x1 = self.linear3(fl_mlf)\n if self.activ_input:\n x0 = getattr(F, self.activ_input)(x0)\n x1 = getattr(F, self.activ_input)(x1)\n\n if self.dropout_input > 0:\n x0 = F.dropout(x0, p=self.dropout_input, training=self.training)\n x1 = F.dropout(x1, p=self.dropout_input, training=self.training)\n z = x0 * x1\n if self.dropout_pre_norm > 0:\n z = F.dropout(z, p=self.dropout_pre_norm, training=self.training)\n z = z.view(-1, int(z.size(1) / self.factor), self.factor)\n z = z.sum(2)\n if self.normalize:\n z = torch.sqrt(F.relu(z)) - torch.sqrt(F.relu(-z))\n z = F.normalize(z, p=2, dim=0)\n\n if mlf.size(2) == 32:\n z = self.linear_out1(z)\n elif mlf.size(2) == 44:\n z = self.linear_out2(z)\n else:\n z = self.linear_out3(z)\n z = getattr(F, self.activ_output)(z)\n z = F.dropout(z, p=self.dropout_output, training=self.training)\n z = z.reshape([mlf.size(0), 1, mlf.size(2), mlf.size(3)])\n # print(\"z\", z.size())\n # print(z)\n mlf = mlf + z\n\n # ---- reverse attention branch_4 ----\n crop_4 = F.interpolate(mlf, scale_factor=0.25, mode='bilinear')\n x = -1 * (torch.sigmoid(crop_4)) + 1\n x = x.expand(-1, 2048, -1, -1).mul(x4)\n x = self.ra4_conv1(x)\n x = F.relu(self.ra4_conv2(x))\n x = F.relu(self.ra4_conv3(x))\n x = F.relu(self.ra4_conv4(x))\n ra4_feat = self.ra4_conv5(x)\n x = ra4_feat + crop_4\n lateral_map_4 = F.interpolate(x, scale_factor=32,\n mode='bilinear') # NOTES: Sup-2 (bs, 1, 11, 11) -> (bs, 1, 352, 352)\n\n # ---- reverse attention branch_3 ----\n crop_3 = F.interpolate(x, scale_factor=2, mode='bilinear')\n x = -1 * (torch.sigmoid(crop_3)) + 1\n x = x.expand(-1, 1024, -1, -1).mul(x3)\n x = self.ra3_conv1(x)\n x = F.relu(self.ra3_conv2(x))\n x = F.relu(self.ra3_conv3(x))\n ra3_feat = self.ra3_conv4(x)\n x = ra3_feat + crop_3\n lateral_map_3 = F.interpolate(x, scale_factor=16,\n mode='bilinear') # NOTES: Sup-3 (bs, 1, 22, 22) -> (bs, 1, 352, 352)\n\n # ---- reverse attention branch_2 ----\n crop_2 = F.interpolate(x, scale_factor=2, mode='bilinear')\n x = -1 * (torch.sigmoid(crop_2)) + 1\n x = x.expand(-1, 512, -1, -1).mul(x2)\n x = self.ra2_conv1(x)\n x = F.relu(self.ra2_conv2(x))\n x = F.relu(self.ra2_conv3(x))\n ra2_feat = self.ra2_conv4(x)\n x = ra2_feat + crop_2\n lateral_map_2 = F.interpolate(x, scale_factor=8,\n mode='bilinear') # NOTES: Sup-4 (bs, 1, 44, 44) -> (bs, 1, 352, 352)\n\n return lateral_map_5, lateral_map_4, lateral_map_3, lateral_map_2\n\n\"\"\"\n\nTraining \n\n\n\"\"\"\n\n\ndef structure_loss(pred, mask):\n weit = 1 + 5 * torch.abs(F.avg_pool2d(mask, kernel_size=31, stride=1, padding=15) - mask)\n wbce = F.binary_cross_entropy_with_logits(pred, mask, reduce='none')\n wbce = (weit * wbce).sum(dim=(2, 3)) / weit.sum(dim=(2, 3))\n\n pred = torch.sigmoid(pred)\n inter = ((pred * mask) * weit).sum(dim=(2, 3))\n union = ((pred + mask) * weit).sum(dim=(2, 3))\n wiou = 1 - (inter + 1) / (union - inter + 1)\n return (wbce + wiou).mean()\n\nlosslist = []\n\ndef train(train_loader, train_loader_csv, model, optimizer, epoch):\n model.train()\n # ---- multi-scale training ----\n size_rates = [0.75, 1, 1.25]\n save_path = 'Snapshots/{}/'.format(opt.train_save)\n loss_record2, loss_record3, loss_record4, loss_record5 = AvgMeter(), AvgMeter(), AvgMeter(), AvgMeter()\n for pack, info in zip(enumerate(train_loader, start=1), enumerate(train_loader_csv, start=1)):\n for rate in size_rates:\n optimizer.zero_grad()\n # ---- data prepare ----\n i, package = pack\n j, haminfo = info\n images = package[0]\n gts = package[1]\n images = Variable(images).cuda()\n gts = Variable(gts).cuda()\n haminfo = Variable(haminfo).cuda()\n # ---- rescale ----\n trainsize = int(round(opt.trainsize * rate / 32) * 32)\n if rate != 1:\n images = F.upsample(images, size=(trainsize, trainsize), mode='bilinear', align_corners=True)\n gts = F.upsample(gts, size=(trainsize, trainsize), mode='bilinear', align_corners=True)\n # ---- forward ----\n lateral_map_5, lateral_map_4, lateral_map_3, lateral_map_2 = model(images, haminfo)\n # ---- loss function ----\n loss5 = structure_loss(lateral_map_5, gts)\n loss4 = structure_loss(lateral_map_4, gts)\n loss3 = structure_loss(lateral_map_3, gts)\n loss2 = structure_loss(lateral_map_2, gts)\n loss = loss2 + loss3 + loss4 + loss5 # TODO: try different weights for loss\n # ---- backward ----\n loss.backward()\n clip_gradient(optimizer, opt.clip)\n optimizer.step()\n # ---- recording loss ----\n if rate == 1:\n loss_record2.update(loss2.data, opt.batchsize)\n loss_record3.update(loss3.data, opt.batchsize)\n loss_record4.update(loss4.data, opt.batchsize)\n loss_record5.update(loss5.data, opt.batchsize)\n # ---- train visualization ----\n if i % 20 == 0 or i == total_step:\n print('{} Epoch [{:03d}/{:03d}], Step [{:04d}/{:04d}], '\n '[lateral-2: {:.4f}, lateral-3: {:0.4f}, lateral-4: {:0.4f}, lateral-5: {:0.4f}]'.\n format(datetime.now(), epoch, opt.epoch, i, total_step,\n loss_record2.show(), loss_record3.show(), loss_record4.show(), loss_record5.show()))\n\n # 保存loss\n os.makedirs(save_path, exist_ok=True)\n losslist.append(loss_record2.show().cpu().detach().numpy())\n np.savetxt(save_path + 'train_loss.csv', losslist, delimiter=',')\n\n if (epoch + 1) % 10 == 0:\n torch.save(model.state_dict(), save_path + 'MFSNet_dx.pth')\n print('[Saving Snapshot:]', save_path + 'MFSNet_dx.pth')\n\n\n# noinspection LanguageDetectionInspection\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--epoch', type=int,\n default=100, help='epoch number')\n parser.add_argument('--lr', type=float,\n default=1e-4, help='learning rate')\n parser.add_argument('--batchsize', type=int,\n default=3, help='training batch size')\n parser.add_argument('--trainsize', type=int,\n default=352, help='training dataset size')\n parser.add_argument('--clip', type=float,\n default=0.5, help='gradient clipping margin')\n parser.add_argument('--decay_rate', type=float,\n default=0.05, help='decay rate of learning rate')\n parser.add_argument('--decay_epoch', type=int,\n default=25, help='every n epochs decay learning rate')\n parser.add_argument('--train_path', type=str,\n default='train/HAM10000', help='path to train dataset')\n parser.add_argument('--train_save', type=str,\n default='HAM10000/MFSNet_v2/testlabel')\n opt = parser.parse_args()\n\n # ---- build models ----\n # torch.cuda.set_device(0) # set your gpu device\n model = MRML_Net().cuda()\n\n # ---- flops and params ----\n params = model.parameters()\n optimizer = torch.optim.Adam(params, opt.lr)\n\n image_root = '{}/images/'.format(opt.train_path)\n gt_root = '{}/masks/'.format(opt.train_path)\n csv_root = '{}'.format(opt.train_path)\n\n train_loader = get_loader(image_root, gt_root, batchsize=opt.batchsize, trainsize=opt.trainsize)\n train_loader_csv = get_loader_csv(csv_root, batchsize=opt.batchsize)\n total_step = len(train_loader)\n print(\"#\" * 20, \"Start Training\", \"#\" * 20)\n\n for epoch in range(1, opt.epoch):\n adjust_lr(optimizer, opt.lr, epoch, opt.decay_rate, opt.decay_epoch)\n train(train_loader, train_loader_csv, model, optimizer, epoch)\n","repo_name":"ukeLin/MRML-Net","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":37568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26132542577","text":"import glob\nimport os\nimport os.path as op\nimport pickle\nimport numpy as np\nimport neuraltoolkit as ntk\nfrom musclebeachtools import mbt_neurons as mb\n\n\ndef siout(sorted_data, noflylist, rec_time,\n file_datetime_list, ecube_time_list,\n amps=None,\n wf_b=None, wf_e=None,\n filt=None,\n t_ch_size=None,\n model_file='/media/HlabShare/models/xgboost_autoqual_prob',\n sex=None, birthday=None, species=None,\n animal_name=None,\n region_loc=None,\n genotype=None,\n expt_cond=None,\n lskipautoqual=None):\n '''\n function to load neuron objects from the spike interface output\n\n Parameters\n ----------\n datadir : Location of output files\n filenum : File number if there is many blocks (default 0)\n prbnum : Probe number (default 1). Range 1-10.\n filt : filter by quality. filt=[1], loads only quality 1 neurons.\n\n\n Returns\n -------\n n1 : All neurons as a list. For example n1[0] is first neuron.\n\n Raises\n ------\n\n See Also\n --------\n\n Notes\n -----\n\n Examples\n --------\n datadir = \"/hlabhome/kiranbn/Animalname/final/\"\n n1 = ksout(datadir, filenum=0, prbnum=1, filt=[1, 3])\n\n\n '''\n\n # filt to empty list\n if filt is None:\n filt = []\n\n # print(\"Finding unit ids\")\n unique_clusters = sorted_data.get_unit_ids()\n\n # Sampling rate\n # print('Finding sampling rate')\n # mb.Neuron.fs = sorted_data.get_sampling_frequency()\n fs = sorted_data.get_sampling_frequency()\n print(\"Sampling frequency \", fs)\n n = []\n\n # Start and end time\n # print('Finding start and end time')\n # start_time = raw_data_start\n # end_time = raw_data_end\n # # Convert to seconds\n # end_time = (np.double(np.int64(end_time) - np.int64(start_time))/1e9)\n # # reset start to zero\n # start_time = np.double(0.0)\n # print((end_time - start_time))\n # assert (end_time - start_time) > 1.0, \\\n # 'Please check start and end time is more than few seconds apart'\n # print('Start and end times are %f and %f', start_time, end_time)\n\n # mb.Neuron.start_time = 0.0\n start_time = 0.0\n\n # KIRAN this shouldn't be hard coded? max of spt below\n # mb.Neuron.end_time = 300.0\n end_time = rec_time / fs\n print(\"Start time \", start_time, \" end time \", end_time)\n\n # Loop through unique clusters and make neuron list\n for unit_idx, unit in enumerate(unique_clusters):\n ch_group = sorted_data.get_unit_property(unit, \"group\")\n # print(\"Total i \", i, \" unit \", unit)\n if unit_idx not in noflylist:\n # print(\"qual \", cluster_quals[i])\n if len(filt) == 0:\n # this is the unit number for indexing spike times\n # and unit properties\n sp_c = [unit_idx]\n # these are spike times\n sp_t = sorted_data.get_unit_spike_train(unit)\n qual = 0\n # mean WF @ Fs of recording\n mwf_list = sorted_data.get_unit_property(unit, \"template\").T\n # print(\"mwf_list \", mwf_list)\n # print(\"len mwf_list \", len(mwf_list))\n # mwfs = np.arange(0, 100)\n # KIRAN please spline this\n # mwfs = sorted_data.get_unit_property(unit, \"template\").T\n tmp_max_channel = sorted_data.get_unit_property(unit,\n 'max_channel')\n # print(\"t_ch_size \", t_ch_size)\n max_channel = \\\n [sorted_data.get_unit_property(unit, 'max_channel') +\n (t_ch_size * ch_group)]\n # try:\n # print(\"Skipped i \", i, \" unit \", unit)\n print(\"unit_idx \", unit_idx, \" unit \", unit,\n \" max_channel : \", tmp_max_channel,\n \" \", max_channel)\n mwf = [row[tmp_max_channel] for row in mwf_list]\n t = np.arange(0, len(mwf))\n _, mwfs = ntk.data_intpl(t, mwf, 3, intpl_kind='cubic')\n\n # def __init__(self, sp_c, sp_t, qual, mwf, mwfs, max_channel,\n # fs=25000, start_time=0, end_time=12 * 60 * 60,\n # mwft=None,\n # sex=None, birthday=None, species=None):\n if ((len(file_datetime_list) == 2) and\n (len(ecube_time_list) == 2)):\n if (amps is not None):\n # print(\"amps not None\")\n # print(\"amps not None\")\n # print(\"mean amps \", unit_idx)\n tmp_mean_amps = np.mean(np.asarray(amps[unit_idx]))\n if tmp_mean_amps >= 20:\n print(\"mean amps \", unit_idx, \" \", tmp_mean_amps,\n flush=True)\n if ((wf_b is not None) and (wf_e is not None)):\n n.append(mb.Neuron(sp_c, sp_t, qual, mwf,\n mwfs, max_channel,\n fs=fs,\n start_time=start_time,\n end_time=end_time,\n mwft=mwf_list,\n rstart_time=str(file_datetime_list\n [0]),\n rend_time=str(file_datetime_list[1]),\n estart_time=int(ecube_time_list\n [0]),\n eend_time=int(ecube_time_list\n [1]),\n sp_amp=amps[unit_idx],\n wf_b=np.asarray(wf_b[unit_idx]).T,\n wf_e=np.asarray(wf_e[unit_idx]).T,\n sex=sex, birthday=birthday,\n species=species,\n animal_name=animal_name,\n region_loc=region_loc,\n genotype=genotype,\n expt_cond=expt_cond))\n #\n elif (wf_b is None):\n n.append(mb.Neuron(sp_c, sp_t, qual, mwf,\n mwfs, max_channel,\n fs=fs,\n start_time=start_time,\n end_time=end_time,\n mwft=mwf_list,\n rstart_time=str(file_datetime_list\n [0]),\n rend_time=str(file_datetime_list[1]),\n estart_time=int(ecube_time_list\n [0]),\n eend_time=int(ecube_time_list\n [1]),\n sp_amp=amps[unit_idx],\n sex=sex, birthday=birthday,\n species=species,\n animal_name=animal_name,\n region_loc=region_loc,\n genotype=genotype,\n expt_cond=expt_cond))\n\n else:\n print(\"Not added mean amps \", unit_idx, \" \",\n tmp_mean_amps, flush=True)\n\n elif (amps is None):\n n.append(mb.Neuron(sp_c, sp_t, qual, mwf,\n mwfs, max_channel,\n fs=fs,\n start_time=start_time, end_time=end_time,\n mwft=mwf_list,\n rstart_time=str(file_datetime_list[0]),\n rend_time=str(file_datetime_list[1]),\n estart_time=int(ecube_time_list[0]),\n eend_time=int(ecube_time_list[1]),\n sex=sex, birthday=birthday, species=species,\n animal_name=animal_name,\n region_loc=region_loc,\n genotype=genotype,\n expt_cond=expt_cond))\n elif ((len(file_datetime_list) == 2) and\n (len(ecube_time_list) == 0)):\n n.append(mb.Neuron(sp_c, sp_t, qual, mwf,\n mwfs, max_channel,\n fs=fs,\n start_time=start_time, end_time=end_time,\n mwft=mwf_list,\n rstart_time=str(file_datetime_list[0]),\n rend_time=str(file_datetime_list[1]),\n sex=sex, birthday=birthday, species=species,\n animal_name=animal_name,\n region_loc=region_loc,\n genotype=genotype,\n expt_cond=expt_cond))\n else:\n n.append(mb.Neuron(sp_c, sp_t, qual, mwf,\n mwfs, max_channel,\n fs=fs,\n start_time=start_time, end_time=end_time,\n mwft=mwf_list,\n sex=sex, birthday=birthday, species=species,\n animal_name=animal_name,\n region_loc=region_loc))\n # except:\n # pdb.set_trace()\n elif len(filt) > 0:\n print(\"sorry, we don't have qualities set yet, \"\n \"run again with no filter\")\n\n print(f'Found {len(n)} neurons\\n')\n if lskipautoqual is None:\n if op.exists(model_file) and op.isfile(model_file):\n print(\"model_file was used \", model_file, flush=True)\n mb.autoqual(n, model_file)\n else:\n print(\"Model_file {} does not exists\".format(model_file),\n flush=True)\n print(\"neurons[0].quality not calculated\")\n\n return n\n\n\ndef mbt_spkinterface_out(\n clust_out_dir,\n model_file='/media/HlabShare/models/xgboost_autoqual_prob',\n sex=None, birthday=None, species=None,\n animal_name=None,\n region_loc=None,\n genotype=None,\n expt_cond=None):\n\n '''\n Function loads spikeinterface output to neuron\n\n mbt_spkinterface_out('spikeinterface_output_directory',\n model_file,\n sex=None, birthday=None, species=None,\n animal_name=None,\n region_loc=None,\n genotype=None,\n expt_cond=None)\n\n Parameters\n ----------\n spikeinterface_output_directory : spikeinterface output directory\n model_file : path of model file\n sex: 'm' or 'f'\n birthday: datetime.datetime(1970, 1, 1, 0, 0, 0, 0)\n species='r' or 'm', rat or mice\n animal_name : UUU12345\n region_loc : string , ca1, v1, m1\n genotype : \"wt\",\n expt_cond : \"experimental condition\",\n\n Returns\n -------\n cells : clusters found in spikeinterface output\n\n Raises\n ------\n NotADirectoryError\n See Also\n --------\n\n Notes\n -----\n\n Examples\n --------\n mbt_spkinterface_out('/home/kbn/co/',\n model_file,\n sex=None, birthday=None, species=None,\n animal_name=None,\n region_loc=None)\n\n '''\n\n # constants / variables\n filt = None\n noflylist = []\n\n # check file exist\n if op.exists(clust_out_dir) and op.isdir(clust_out_dir):\n print(\"clust_out_dir \", clust_out_dir)\n else:\n raise NotADirectoryError(\"Directory {} not found\"\n .format(clust_out_dir))\n\n os.chdir(clust_out_dir)\n\n try:\n sort_pickle_file = op.join(clust_out_dir,\n 'spi_dict_final.pickle')\n print(\"picklefile_selected \", sort_pickle_file)\n pickle_in = open(sort_pickle_file, \"rb\")\n sorted_data = pickle.load(pickle_in)\n pickle_in.close()\n except Exception as e:\n print(\"Error : \", e)\n raise FileNotFoundError('Error loading spi_dict_final.pickle')\n\n rl = glob.glob('*rec_length0.npy')[0]\n rec_time = mb.load_np(rl, 1)\n\n rl = glob.glob('*_file_datetime_list.npy')[0]\n file_datetime_list = mb.load_np(rl, lpickle=True)\n\n rl = glob.glob('*ecube_time_list.npy')[0]\n ecube_time_list = mb.load_np(rl, lpickle=True)\n\n rl = glob.glob('*_amplitudes0.npy')[0]\n amps = mb.load_np(rl, lpickle=True)\n\n rl = glob.glob('*_b_waveforms_group0.npy')[0]\n wf_b = mb.load_np(rl, lpickle=True)\n\n rl = glob.glob('*_e_waveforms_group0.npy')[0]\n wf_e = mb.load_np(rl, lpickle=True)\n\n try:\n rl = glob.glob('*_t_ch_size0.npy')[0]\n t_ch_size = int(mb.load_np(rl, lpickle=True))\n except Exception as e:\n print(\"Error: \", e)\n t_ch_size = 4\n print(\"Setting t_ch_size to 4\")\n\n cells = siout(sorted_data, noflylist, rec_time,\n file_datetime_list, ecube_time_list,\n amps=amps,\n wf_b=wf_b, wf_e=wf_e,\n filt=filt,\n t_ch_size=t_ch_size,\n model_file=model_file,\n sex=sex, birthday=birthday, species=species,\n animal_name=animal_name,\n region_loc=region_loc,\n genotype=genotype,\n expt_cond=expt_cond)\n return cells\n","repo_name":"hengenlab/musclebeachtools","sub_path":"musclebeachtools/mbt_spkinterface_out.py","file_name":"mbt_spkinterface_out.py","file_ext":"py","file_size_in_byte":14299,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"27323959716","text":"import uvicorn\nfrom fastapi import FastAPI \nfrom fastapi_sqlalchemy import DBSessionMiddleware, db\n\nfrom schema import User as UserSchema\nfrom schema import User\nfrom User import User as UserModel\nfrom env import DB_URL\n\napp = FastAPI()\n\napp.add_middleware(DBSessionMiddleware, db_url=DB_URL)\n\n@app.get(\"/\")\nasync def root():\n return {\"message\": \"CRUD operations in PostgreSQL using Python FastAPI\"}\n\n# Take data as user input and add it into table\n@app.post('/user/', response_model=UserSchema)\nasync def adduser(user: UserSchema):\n db_user = UserModel(\n name=user.name,\n email=user.email,\n phone_number=user.phone_number,\n age=user.age,\n gender=user.gender,\n salary=user.salary,\n )\n db.session.add(db_user)\n db.session.commit()\n return db_user\n\n# Get all users\n@app.get('/users/')\nasync def allusers():\n users = db.session.query(UserModel).all()\n return users\n\n# Get one user\n@app.get('/user/{user_id}', response_model=UserSchema)\nasync def oneuser(user_id: int):\n user = db.session.query(UserModel).filter(UserModel.id == user_id).first()\n return user\n\n# Update one user\n@app.put('/user/{user_id}', response_model=UserSchema)\nasync def updateuser(user_id: int, updateVal: UserSchema):\n #user = db.session.query(UserModel).filter(UserModel.id == user_id).all()\n updated_row = db.session.query(UserModel).filter(UserModel.id == user_id).update({\n name:updateVal.name,\n email: updateVal.email,\n phone_number: updateVal.phone_number,\n age: updateVal.age,\n gender:updateVal.gender,\n salary: updateVal.salary\n })\n db.session.commit()\n updatedUser = db.session.query(UserModel).filter(UserModel.id == user_id).first()\n return updatedUser\n\n# Delete one user\n@app.delete('/user/{user_id}', response_model=list[UserSchema])\nasync def deleteuser(user_id: int):\n row_affected = db.session.query(UserModel).filter(UserModel.id == user_id).delete()\n db.session.commit()\n updated_user_list = db.session.query(UserModel).all()\n return updated_user_list\n\n\nif __name__ == '__main__':\n uvicorn.run(app, host='0.0.0.0', port=8000)","repo_name":"DinoWithCurls/py-gres","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37760344648","text":"import unittest\n\nimport os\nfrom os import path\n\nfrom termcolor import colored\nfrom selenium import webdriver\n\n\nMESSAGE_TEST_START_RUNNING = '\\n===============| Test \"%s\" Started Running |==============='\nMESSAGE_TEST_FINISHED_RUNNING = '===============| Test \"%s\" Finished Running |===============\\n'\n\n\nclass TestBase(unittest.TestCase):\n\n def setUp(self):\n print(colored(MESSAGE_TEST_START_RUNNING % self._testMethodName, \"green\"))\n\n # Boot step 1: Setup logger\n # Logger.get_instance().initialization(self)\n\n # Boot step 2: Open URL\n # self.browser = webdriver.Chrome()\n\n self.__setup_appium_driver__()\n\n def tearDown(self):\n print(colored(MESSAGE_TEST_FINISHED_RUNNING % self._testMethodName, \"green\"))\n # self.take_screenshot()\n # self.browser.quit()\n\n\n def __setup_appium_driver__(self):\n # Logger.get_instance( ).take_screenshot()\n # self.take_screenshot()\n\n # Logger.get_instance( ).take_screenshot(\"application_finished_launching\")\n print('Step 0.1: Application finished launching')\n\n def take_screenshot(self):\n self.test_logs_path = 'logs/screenshots/' + self._testMethodName.split(\".\")[-1] + \"_screenshot.png\"\n\n if path.exists(self.test_logs_path):\n os.remove(self.test_logs_path)\n\n self.browser.save_screenshot(self.test_logs_path)\n\n","repo_name":"Doringber/ImdbAutomationinfrastructure","sub_path":"tests/test_base.py","file_name":"test_base.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32781950801","text":"from conexion.conexion import Conexion\n\n\nclass Curso:\n def __init__(self, codigo, nombre):\n self.__codigo=codigo\n self.__nombre=nombre\n\n @property\n def codigo(cls):\n return cls.__codigo\n\n @property\n def nombre(cls):\n return cls.__nombre\n\n @classmethod\n def registrar_curso(cls, nombre):\n sql=\"INSERT INTO cursos(cur_nombre) VALUES('\"+nombre+\"')\"\n try:\n conexion=Conexion().getConexion()\n cursor=conexion.cursor()\n cursor.execute(sql)\n conexion.commit()\n except Exception as e:\n print(e)\n finally:\n conexion.close()\n\n @classmethod\n def eliminar_curso(cls, codigo):\n sql=\"DELETE FROM cursos WHERE cur_codigo=\"+str(codigo)\n try:\n conexion=Conexion().getConexion()\n cursor=conexion.cursor()\n cursor.execute(sql)\n conexion.commit()\n except Exception as e:\n print(e)\n finally:\n conexion.close()\n\n @classmethod\n def cargar_cursos(cls):\n cursos=[]\n sql='SELECT cur_codigo, cur_nombre FROM cursos'\n try:\n conexion=Conexion().getConexion()\n cursor=conexion.cursor()\n cursor.execute(sql)\n result=cursor.fetchone()\n while result!=None:\n cursos.append(Curso(result[0], result[1]))\n result=cursor.fetchone()\n return cursos\n except Exception as e:\n print(e)\n finally:\n conexion.close()\n","repo_name":"Pierolp1202/colegioTCS","sub_path":"modelos/curso.py","file_name":"curso.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42874879134","text":"'''\nGiven the root of a complete binary tree, return the number of the nodes in the tree.\n\nAccording to Wikipedia, every level, except possibly the last, is completely filled in a complete binary tree, and all nodes in the last level are as far left as possible. It can have between 1 and 2h nodes inclusive at the last level h.\n\nDesign an algorithm that runs in less than O(n) time complexity.\n\n \n\nExample 1:\n\n\nInput: root = [1,2,3,4,5,6]\nOutput: 6\nExample 2:\n\nInput: root = []\nOutput: 0\nExample 3:\n\nInput: root = [1]\nOutput: 1\n'''\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\n\n\nclass Solution(object):\n def countNodes(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n\n if root is None:\n return 0\n\n left_levels = 0\n left_depth = root\n\n while left_depth is not None:\n left_levels += 1\n left_depth = left_depth.left\n\n right_levels = 0\n right_depth = root\n\n while right_depth is not None:\n right_levels += 1\n right_depth = right_depth.right\n\n if left_levels == right_levels:\n return pow(2, left_levels) - 1\n\n return self.countNodes(root.left) + self.countNodes(root.right) + 1\n","repo_name":"prashantchanne12/Leetcode","sub_path":"count complete tree nodes.py","file_name":"count complete tree nodes.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"35110156649","text":"import sys\nimport numpy as np\n\n# find_min_right\n# find the minimum value at or to the right of i\ndef find_min_right(series, peak_idx, first=None, end=None):\n if first == None:\n first = 0\n if end == None:\n end = peak_idx + 1\n\n m = end*[series[end - 1]]\n for i in range(end - 2, -1, first - 1):\n m[i] = min(m[i + 1], series[i])\n\n # m = peak_idx*[0] + [series[peak_idx]]\n # for i in range(end - 1, -1, first - 1):\n # m[i] = min(m[i + 1], series[i])\n\n return m\n\n\n# find_max_left\n# find the maximum value strictly to the left of i\ndef find_max_left(series, peak_idx, first=None, end=None):\n if first == None:\n first = 0\n if end == None:\n end = peak_idx + 1\n\n m = end*[float('-inf')]\n for i in range(first + 1, end):\n m[i] = max(m[i - 1], series[i - 1])\n\n # m = [float('-inf')] + peak_idx*[0]\n # for i in range(first + 1, end + 1):\n # m[i] = max(m[i - 1], series[i - 1])\n\n return m\n\n\n# find_min_left\n# find the minimum value to the left of or at i\ndef find_min_left(series, peak_idx, first=None, end=None):\n if first == None:\n first = peak_idx\n if end == None:\n end = series.shape[0]\n\n m = len(series)*[series[max(0, first - 1)]]\n for i in range(first, end):\n m[i] = min(m[i - 1], series[i])\n\n return m\n\n '''\n m = series.shape[0]*[0]\n\n m = series.shape[0]*[series[0]]\n for i in range(1, series.shape[0]):\n m[i] = min(m[i - 1], series[i])\n '''\n\n # if peak_idx > 0:\n # m[peak_idx - 1] = float('-inf')\n\n # m[peak_idx] = series[peak_idx]\n # for i in range(first + 1, end):\n # m[i] = min(m[i - 1], series[i])\n\n # return m\n\n\n# find_max_right\n# find the maximum value of all values strictly to the right of i\ndef find_max_right(series, peak_idx, first=None, end=None):\n if first == None:\n first = peak_idx\n if end == None:\n end = series.shape[0]\n\n m = len(series)*[float('-inf')]\n for i in range(end - 2, first - 1, -1):\n m[i] = max(m[i + 1], series[i + 1])\n return m\n\n\ndef find_left_plateau(series, l_max, r_min, peak_idx, first, end, threshold_min):\n val_max = threshold_min\n for i in range(first, end):\n val_max = max(val_max, l_max[i])\n if r_min[i] >= l_max[i] and r_min[i] >= threshold_min:\n return i, val_max\n return None, None\n\n\ndef find_right_plateau(series, r_max, l_min, peak_idx, first, end, threshold_min):\n val_max = threshold_min\n for i in range(end - 1, first - 1, -1):\n val_max = max(val_max, r_max[i])\n if l_min[i] >= r_max[i] and l_min[i] >= threshold_min:\n return i, val_max\n return None, None\n\n\ndef find_plateau(series, tolerance, k=1):\n # vals_peak, idxs_peak = torch.topk(series, k, dim=0) # find top-k time position; for now let's do 1\n # for peak, t_peak in zip(vals_peak, idxs_peak):\n # peak, t_peak = vals_peak[0].item(), idxs_peak[0].item()\n\n # t_peak = max(range(len(series)), key=lambda x : series[x])\n # peak = series[t_peak]\n\n # series = series.numpy()\n peak, t_peak = np.max(series), np.argmax(series)\n\n while True:\n\n thr_min = peak - tolerance\n\n r_min, l_max, l_min, r_max = find_min_right(series, t_peak), find_max_left(series, t_peak), \\\n find_min_left(series, t_peak), find_max_right(series, t_peak)\n\n plateau_lb, thr_l = find_left_plateau(series, l_max, r_min, t_peak, first=0, end=min(len(series), t_peak + 1), threshold_min=thr_min)\n plateau_rb, thr_r = find_right_plateau(series, r_max, l_min, t_peak, first=t_peak, end=series.shape[0], threshold_min=thr_min)\n\n while abs(thr_l - thr_r) > 0.1*tolerance:\n r_min, l_max, l_min, r_max = find_min_right(series, t_peak, first=plateau_lb), find_max_left(series, t_peak, first=plateau_lb), \\\n find_min_left(series, t_peak, end=plateau_rb), find_max_right(series, t_peak, end=plateau_rb)\n if thr_l > thr_r:\n plateau_rb, thr_r = find_right_plateau(series, r_max, l_min, t_peak, first=t_peak, end=plateau_rb, threshold_min=thr_l)\n elif thr_l < thr_r:\n plateau_lb, thr_l = find_left_plateau(series, l_max, r_min, t_peak, first=plateau_lb, end=t_peak, threshold_min=thr_r)\n\n if thr_l is None or thr_r is None:\n tolerance *= 2\n break\n\n if plateau_lb is not None and plateau_rb is not None:\n return (plateau_lb, plateau_rb), thr_l\n\nif __name__ == '__main__':\n series = np.load(sys.argv[1])\n thr = float(np.std(series))\n\n (left, right), _ = find_plateau(series, thr)\n print(left, right)","repo_name":"meynmd/c2r","sub_path":"ddt/plateau_real.py","file_name":"plateau_real.py","file_ext":"py","file_size_in_byte":4707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35738231297","text":"\"\"\"\nstanCode Breakout Project\nAdapted from Eric Roberts's Breakout by\nSonja Johnson-Yu, Kylie Jue, Nick Bowman,\nand Jerry Liao.\n\nThe program will run the game Breakout. The user get 3 chances to clear all the bricks in the window.\n\nGame rules:\n1. The game will automatically stop if all the bricks are eliminated(win) or if you lose all 3 chances(lose).\n2. Users can see their lives in the right bottom corner of the window.\n3. The game allows users to enter their name.\n4. Users will get 1000 points when hitting the first 25% bricks, 2000 points for hitting the next 25% bricks,\n 3000 points for the next 25% bricks and 4000 points for the last 25% bricks.\n5. The vertical speed of the ball will increase randomly after the user successfully hits a brick.\n6. If users win the game, the window will show \"YOU WIN\", YOUR SCORE: 250000.\n7. If users lost the game, the window will show if he breaks the previous record.\n8. After hitting every 10 bricks, a bonus ball will drop, and there are four different kinds effect if the user uses\n the paddle to touch the bonus:\n (1) paddle will become longer\n (2) user will gain one HP\n (3) paddle will become shorter\n (4) user will lose one HP\n Note: there will only be one bonus on the screen, if the player hasn't gotten previous bonus, no more bonus\n will be dropped even he hits other bricks.\n\"\"\"\n\nfrom campy.gui.events.timer import pause\nfrom breakoutgraphics_extension import BreakoutGraphics\nimport random\n\nFRAME_RATE = 10 # 100 frames per second\nSPEED_CHANGE = 0.2 # Maximum speed change after the ball touches the paddle or the bricks\n\n\ndef main():\n graphics = BreakoutGraphics()\n\n # Add the animation loop here!\n while True:\n if graphics.is_moving and graphics.chance > 0 and graphics.brick_count > 0:\n # Make sure if the game starts\n dx = graphics.get_dx()\n dy = graphics.get_dy()\n graphics.ball.move(dx, dy)\n # Check if the ball touches the wall\n if graphics.ball.x < 0 or graphics.ball.x > graphics.window.width - graphics.ball.width:\n graphics.set_dx(-dx)\n if graphics.ball.y < 0:\n graphics.set_dy(-dy)\n # Check if the ball touches a brick\n if graphics.check_for_collision_brick():\n # The velocity of the ball will increase everytime it hits a paddle or a brick\n if dy > 0:\n float(graphics.set_dy(-dy)) - random.random() * SPEED_CHANGE\n elif dy < 0:\n float(graphics.set_dy(-dy)) + random.random() * SPEED_CHANGE\n # Check if the ball touches a paddle\n if graphics.check_for_collision_paddle():\n graphics.set_dy(-dy)\n # Check the condition of the bonus\n if graphics.bonus_is_moving:\n graphics.bonus_condition()\n # Check if the user lose one HP\n if graphics.ball.y >= graphics.window.height:\n graphics.life_decrease()\n graphics.reset_condition()\n elif graphics.chance <= 0 or graphics.paddle.width <= 0:\n # The user used all the HP\n graphics.game_over()\n break\n elif graphics.brick_count <= 0:\n # The user win the game by breaking all the bricks\n graphics.game_win()\n break\n pause(FRAME_RATE)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"duanhandsome/MyStanCodeProjects","sub_path":"stanCode_Projects/The breaker game/breakout_extension.py","file_name":"breakout_extension.py","file_ext":"py","file_size_in_byte":3454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7588675017","text":"from django.contrib.auth.models import User\nfrom rest_framework.authtoken.models import Token\nfrom .forms import LoginForm, RegisterForm, MailUsForm\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.contrib.auth import login\nimport hashlib\nimport re\n\ndef reset_context_fields(fields, context):\n \"\"\"Take fields and context, reset only the inputted fields.\n leave all other context variables untouched.\"\"\"\n\n default_values = {\n \"open_login_modal\": \"False\",\n \"open_general_notice_modal\": \"False\",\n \"open_user_activation_modal\": \"False\",\n \"register_fail\": \"\",\n \"email\": \"\",\n \"first_name\": \"\",\n \"last_name\": \"\",\n \"mail_success\": \"\",\n \"login_rest_api_error_list\": \"\",\n }\n\n for field in fields:\n context[field] = default_values[field]\n\n return context\n\ndef reset_context():\n \"\"\"Take the current context, edit its variables\n so that it only contains default data\"\"\"\n\n context = {\n \"login_form\": LoginForm(),\n \"register_form\": RegisterForm(),\n \"mail_us_form\": MailUsForm(),\n }\n context = soft_reset_context(context)\n return context\n\ndef soft_reset_context(context):\n \"\"\"Take the current context, reset everything\n except forms.\"\"\"\n\n fields = [\n \"open_login_modal\",\n \"open_general_notice_modal\",\n \"open_user_activation_modal\",\n \"register_fail\",\n \"email\",\n \"first_name\",\n \"last_name\",\n \"mail_success\",\n \"login_rest_api_error_list\",\n ]\n\n context = reset_context_fields(fields, context)\n return context\n\ndef get_api_errors(response):\n \"\"\"Takes a REST API response, find and translate all\n errors according to API documentation. Return error list.\"\"\"\n\n response['key'] = None\n error_list = []\n for error in response.values():\n if error != None:\n print(error)\n if 'invalid_credentials' in error:\n error_list.append(\"Email or password is incorrect. \")\n if 'user_not_active' in error:\n context[\"open_user_activation_modal\"] = \"True\"\n context[\"open_login_modal\"] = \"False\"\n if 'required_fields_empty' in error:\n error_list.append(\"Please enter email and password. \")\n return error_list\n\ndef token_authentication(request, key):\n \"\"\"Take a token key and look if it belongs to a user.\n If so, log that user in, and return true. Else false.\"\"\"\n\n try:\n token = Token.objects.get(key=key)\n user = User.objects.get(username=token.user)\n login(request, user)\n return True\n except User.DoesNotExist:\n return False\n except Token.DoesNotExist:\n return False\n\ndef generate_mac(SECRET_KEY, *args):\n generated_mac = hashlib.sha256((\"\".join(args)+SECRET_KEY).encode()).hexdigest()\n return generated_mac\n\ndef send_template_mail(plaintext_template=\"\", html_template=\"\", context=\"\", subject=\"\", from_email=\"\", to=\"\"):\n try:\n text_content = plaintext_template.render(context)\n html_content = html_template.render(context)\n\n msg = EmailMultiAlternatives(subject, text_content, from_email, [to])\n msg.attach_alternative(html_content, 'text/html')\n msg.send()\n return True\n\n except:\n return False\n","repo_name":"MaxHXie/StockPy","sub_path":"externalpage/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":3341,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"34178523564","text":"#! python3\n# Chapter 9 Project - Find all the large files through a walk from the current directory\n\nimport sys, os, shutil\n\nprint(\"Searching for files larger than 10MB: \")\n\n# Walk the current directory\nfor folderName, subFolders, fileNames in os.walk('.'):\n for filename in fileNames:\n absPath = os.path.join(os.path.abspath(folderName), filename)\n if os.path.getsize(absPath) > 10000: # Find files larger than 10MB\n print(filename)","repo_name":"Nooder/Python-Automate-The-Boring-Stuff","sub_path":"Chapter 9/FindLargeFiles.py","file_name":"FindLargeFiles.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16059988048","text":"# AStar.py\n# A* Search of a problem space.\n# Ver 0.1, October 19, 2017.\n# Usage:\n# python3 AStar.py EightPuzzleWithHeuristics h_euclidean puzzle2a\n\n\nimport sys\nfrom priorityq import PriorityQ\n\n\nif sys.argv == [''] or len(sys.argv) < 3:\n import EightPuzzleWithHeuristics as Problem\n CHOSEN_HEURISTIC = 'h_manhattan'\n INITIAL_STATE = Problem.CREATE_INITIAL_STATE()\n h_score_fn = Problem.HEURISTICS[CHOSEN_HEURISTIC] # scoring function\n\nelse:\n import importlib\n Problem = importlib.import_module(sys.argv[1])\n CHOSEN_HEURISTIC = sys.argv[2]\n initial_state_file = importlib.import_module(sys.argv[3])\n INITIAL_STATE = initial_state_file.CREATE_INITIAL_STATE()\n h_score_fn = Problem.HEURISTICS[CHOSEN_HEURISTIC] # scoring function\n\n\nprint(\"\\nWelcome to A Star Search\")\nCOUNT = None\nBACKLINKS = {}\n\ndef runAStar():\n initial_state = INITIAL_STATE\n print(\"Initial State:\")\n print(initial_state)\n global COUNT, BACKLINKS\n COUNT = 0\n BACKLINKS = {}\n AStar(initial_state)\n print(str(COUNT) + \" states examined.\")\n\n\ndef AStar(initial_state):\n # print(\"In RecDFS, with depth_limit=\"+str(depth_limit)+\", current_state is \")\n # print(Problem.DESCRIBE_STATE(current_state))\n global COUNT, BACKLINKS\n\n #Tracks most efficient previous step\n BACKLINKS[initial_state] = None\n\n #already evaluated states\n CLOSED = []\n\n #currently discovered, not yet evaluated states\n OPEN = PriorityQ()\n\n #Calculate F, G, H scores\n initialize_scores(initial_state)\n\n #Only initial node is known as of now\n OPEN.insert(initial_state, F_SCORE[initial_state])\n\n while OPEN.isEmpty() !=True:\n S = OPEN.deletemin()\n CLOSED.append(S)\n\n if Problem.GOAL_TEST(S):\n print(Problem.GOAL_MESSAGE_FUNCTION(S))\n backtrace(S)\n return #FOUND GOAL\n\n COUNT += 1\n if (COUNT % 32)==0:\n# if True:\n # print(\".\",end=\"\")\n# if (COUNT % 128*128)==0:\n if True:\n print(\"COUNT = \" + str(COUNT))\n #print(\"len(OPEN)=\" + str(len(OPEN))) #PriorityQ OPEN doesn't have len()\n print(\"len(CLOSED)=\" + str(len(CLOSED)))\n\n\n for op in Problem.OPERATORS:\n if op.precond(S):\n new_state = op.state_transf(S)\n if not occurs_in(new_state, CLOSED): #ignore already evaluated neighbors\n\n #find tentative score of neighbor\n tentative_g_score = G_SCORE[S] + 1\n\n if new_state not in G_SCORE: #Default INFINITY\n BACKLINKS[new_state] = S #First known path to new_state\n elif tentative_g_score >= G_SCORE[new_state]:\n continue #current path is not the best path to the neighbor\n else:\n BACKLINKS[new_state] = S #Found better path to new_State\n\n G_SCORE[new_state] = tentative_g_score\n F_SCORE[new_state] = G_SCORE[new_state] + h_score_fn(new_state)\n\n # discovered a new State\n if not OPEN.__contains__(new_state):\n OPEN.insert(new_state, F_SCORE[new_state])\n\n # print(Problem.DESCRIBE_STATE(new_state))\n #print(OPEN)\n\n #Failure, if goal_test has not succeeded until now\n print(\"COULD NOT FIND GOAL\")\n return\n\n\ndef initialize_scores(start_state):\n reset_Scores()\n G_SCORE[start_state] = 0\n H_SCORE[start_state] = h_score_fn(start_state)\n F_SCORE[start_state] = H_SCORE[start_state]\n\ndef reset_Scores():\n \"\"\"\n Reset just in case run AStar multiple times\n \"\"\"\n G_SCORE = {}\n F_SCORE = {}\n H_SCORE = {}\n\ndef print_state_list(name, lst):\n print(name + \" is now: \", end='')\n for s in lst[:-1]:\n print(str(s), end=', ')\n print(str(lst[-1]))\n\ndef backtrace(S):\n global BACKLINKS\n\n path = []\n while S:\n path.append(S)\n # print(\"In backtrace, S is now: \"+str(S))\n S = BACKLINKS[S]\n path.reverse()\n print(\"Solution path: \")\n for s in path:\n print(s)\n return path\n\ndef occurs_in(s1, lst):\n for s2 in lst:\n if s1 == s2: return True\n return False\n\nif __name__ == '__main__':\n runAStar()","repo_name":"vaibhavi-r/CSE-415","sub_path":"Assignment3/AStarOld.py","file_name":"AStarOld.py","file_ext":"py","file_size_in_byte":4298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73662261384","text":"import json\n\nimport scipy\nimport torch\nimport numpy as np\nfrom transformers import BertTokenizer, BertModel, BertConfig\nfrom sklearn.metrics.pairwise import cosine_similarity\n\n\n\ndef get_bert_correlation_layer_wise(model, tokenizer, dataset, n_layer):\n\n sim=[]\n for val in dataset['X']:\n sentence1, sentence2 = val\n\n tokens1 = ['[CLS]'] + tokenizer.tokenize(sentence1) + ['[SEP]']\n tokens2 = ['[CLS]'] + tokenizer.tokenize(sentence2) + ['[SEP]']\n token_ids1 = tokenizer.convert_tokens_to_ids(tokens1)\n token_ids2 = tokenizer.convert_tokens_to_ids(tokens2)\n input_ids1 = torch.tensor([token_ids1])\n input_ids2 = torch.tensor([token_ids2])\n\n with torch.no_grad():\n outputs1 = model(input_ids1)\n outputs2 = model(input_ids2)\n\n embeddings1 = outputs1['hidden_states'][n_layer].squeeze(0).numpy()\n embeddings2 = outputs2['hidden_states'][n_layer].squeeze(0).numpy()\n\n avg_embeddings1 = np.mean(embeddings1, axis=0)\n avg_embeddings2 = np.mean(embeddings2, axis=0)\n\n similarity = cosine_similarity([avg_embeddings1], [avg_embeddings2])\n sim.append(similarity[0][0])\n \n return scipy.stats.spearmanr(sim, dataset['y']).correlation\n\n\ndef bert_layers_benchmark(datasets):\n\n config = BertConfig.from_pretrained(\"bert-base-uncased\", output_hidden_states=True)\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n model = BertModel.from_pretrained('bert-base-uncased', config=config)\n\n for n_layer in [1,2,11,12]:\n print(f\"Layer {n_layer}\")\n \n dataset_names = ['TR9856', 'MEN', 'SimLex999']\n for data_name in dataset_names:\n print('Dataset size:',len(datasets[data_name]['X']))\n print(\"Spearman correlation of scores on {} {}\".format(\n data_name, get_bert_correlation_layer_wise(model, tokenizer, datasets[data_name], n_layer))\n )\n\ndef main():\n with open('all_datasets.json', 'rb') as file:\n datasets = json.load(file)\n bert_layers_benchmark(datasets)\n\n\nif __name__ == '__init__':\n main()","repo_name":"arushi-08/word_embedding_similarity_benchmarks","sub_path":"bert_layer_wise_benchmark.py","file_name":"bert_layer_wise_benchmark.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72967544904","text":"def response_to_port_tuple(response):\n host_address = response[response.find('(') + 1:]\n host_address = host_address.replace(')', '')\n info_array = host_address.split(',')\n\n host_name = info_array[0] + '.' + \\\n info_array[1] + '.' + \\\n info_array[2] + '.' + \\\n info_array[3]\n port_number = ((int(info_array[4])) * 256) + int(info_array[5])\n\n return (host_name, port_number)\n\n\ndef get_response(socket_file, command=None):\n if command:\n socket_file.write(command)\n socket_file.flush()\n while True:\n line = socket_file.readline()\n if line[0].isdigit() and line[1].isdigit and line[2].isdigit and line[\n 3] == \" \":\n return line\n\n\ndef get_retr_response(created_file):\n string_buffer = ''\n while True:\n new_line = created_file.readline()\n if not new_line:\n return string_buffer\n string_buffer += new_line\n return string_buffer\n","repo_name":"HaydenLikesGold/FTP-Client","sub_path":"ftpclient/response.py","file_name":"response.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"33241187511","text":"SP = 67836.43\nRJ = 36678.66\nMG = 29229.88\nES = 27165.48\nOutros = 19849.53\nTodas = SP + RJ + MG + ES + Outros\n\npercentual=Todas/100\n\npercentual_SP = SP/percentual\npercentual_RJ = RJ/percentual\npercentual_MG = MG/percentual\npercentual_ES = ES/percentual\npercentual_Outras = Outros/percentual\n\npercentual=Todas/100\n\nprint(\"\\nO percenteual da tributa de X empresa :\")\n\nprint(\"\\nSP Com fatura bruta de:\",SP,\"=\",'{:.2f}%'.format(percentual_SP))\nprint(\"\\nRJ Com fatura bruta de:\",RJ,\"=\",'{:.2f}%'.format(percentual_RJ))\nprint(\"\\nMG Com fatura bruta de:\",MG,\"=\",'{:.2f}%'.format(percentual_MG))\nprint(\"\\nES Com fatura bruta de:\",ES,\"=\",'{:.2f}%'.format(percentual_ES))\nprint(\"\\nOutros Estados Com fatura bruta de:\",Outros,\"=\",'{:.2f}%'.format(percentual_Outras))\n\n","repo_name":"EduardoGuedes06/Job-Rotation","sub_path":"Atividades/A4.py","file_name":"A4.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38970486700","text":"'''\n5.\n\nOhms law : Voltage = Resistance x current\nLet say current is 30 amps vary the resistance from 10 ohms by steps of 5 for next 8 readings display the voltage\n\n \n\n '''\n \n \ncurrent=30\nresistance=10\nreadings=8\nwhile readings>0:\n \n Voltage=resistance*current\n resistance += 5\n print(Voltage) \n readings -= 1\n\n\n \n","repo_name":"Som94/Python-repo","sub_path":"12th july/Ohms law Voltage Resistance x current.py","file_name":"Ohms law Voltage Resistance x current.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"996141375","text":"import torch.nn.functional as F\r\nimport torch.nn as nn\r\n\r\nclass Loss(nn.Module):\r\n def __init__(self, args):\r\n super(Loss, self).__init__()\r\n self.args = args\r\n \r\n def forward(self, logits, label):\r\n label = label.float() \r\n loss_cls = F.multilabel_soft_margin_loss(logits[0], label)\r\n loss_cls_ers = F.multilabel_soft_margin_loss(logits[1], label)\r\n loss = loss_cls + loss_cls_ers\r\n return loss","repo_name":"yuhaoliu7456/Adversarial-Complementary-Learning-for-Weakly-Supervised-Object-Localization","sub_path":"loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"35187601571","text":"#\n# @lc app=leetcode.cn id=878 lang=python3\n#\n# [878] 第 N 个神奇数字\n#\n\n# @lc code=start\nclass Solution:\n @staticmethod\n def lcm(x, y):\n if x > y:greater = x\n else:greater = y\n while(True):\n if((greater % x == 0) and (greater % y == 0)):\n lcm = greater\n break\n greater += 1\n return lcm\n def nthMagicalNumber(self, n: int, a: int, b: int) -> int:\n MOD = 10 ** 9 + 7\n c = Solution.lcm(a, b)\n m = c // a + c // b - 1\n r = n % m\n res = c * (n // m) % MOD\n if r == 0:\n return res\n addA = a\n addB = b\n for _ in range(r - 1):\n if addA < addB:\n addA += a\n else:\n addB += b\n return (res + min(addA, addB) % MOD) % MOD\n# @lc code=end\n\nprint(Solution().nthMagicalNumber(4,2,3))","repo_name":"HellOwhatAs/Leetcode","sub_path":"878.第-n-个神奇数字.py","file_name":"878.第-n-个神奇数字.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"74121230026","text":"import torch\nimport torch.nn as nn\nfrom torch.distributions.normal import Normal\nfrom ..neural_networks.feature_extractors.utils import init\n\n\nclass Categorical(nn.Module):\n \"\"\"\n Categorical probability distribution.\n\n Parameters\n ----------\n num_inputs : int\n Size of input feature maps.\n num_outputs : int\n Number of options in output space.\n\n Attributes\n ----------\n linear: nn.Module\n Maps the incoming feature maps to probabilities over the output space.\n \"\"\"\n def __init__(self, num_inputs, num_outputs, multi_discrete=False):\n super(Categorical, self).__init__()\n\n init_ = lambda m: init(\n m,\n nn.init.orthogonal_,\n lambda x: nn.init.constant_(x, 0),\n gain=0.01)\n if not multi_discrete:\n self.linear = init_(nn.Linear(num_inputs, num_outputs))\n self.multi_discrete = multi_discrete\n else:\n # creates output heads depending on the number of action dimensions with the output features as the number of sub action dimensions (3)\n self.linear = [init_(nn.Linear(num_inputs, 3)) for head in range(num_outputs[0])]\n self.num_outputs = num_outputs\n self.multi_discrete = multi_discrete\n\n def forward(self, x, deterministic=False):\n \"\"\"\n Predict distribution parameters from x (obs features) and return\n predictions (sampled and clipped), sampled log\n probability and distribution entropy.\n\n Parameters\n ----------\n x : torch.tensor\n Feature maps extracted from environment observations.\n deterministic : bool\n Whether to randomly sample from predicted distribution or take the mode.\n\n Returns\n -------\n pred: torch.tensor\n Predicted value.\n clipped_pred: torch.tensor\n Predicted value (clipped to be within [-1, 1] range).\n logp : torch.tensor\n Log probability of `pred` according to the predicted distribution.\n entropy_dist : torch.tensor\n Entropy of the predicted distribution.\n \"\"\"\n\n # Predict distribution parameters\n if not self.multi_discrete:\n x = self.linear(x)\n else: \n x = torch.cat([head(x) for head in self.linear])\n #x = x.reshape((self.num_outputs[0], 3))\n\n # Create distribution and sample\n dist = torch.distributions.Categorical(logits=x)\n self.dist = dist # ugly hack to handle sac discrete case\n\n if deterministic:\n pred = clipped_pred = dist.probs.argmax(dim=-1, keepdim=True)\n else:\n pred = clipped_pred = dist.sample().unsqueeze(-1)\n\n # Action log probability\n # logp = dist.log_prob(pred.squeeze( -1)).unsqueeze(-1)\n logp = dist.log_prob(pred.squeeze(-1)).view(pred.size(0), -1).sum(-1).unsqueeze(-1)\n if self.multi_discrete:\n pred = pred.squeeze().unsqueeze(0)\n logp = logp.squeeze().unsqueeze(0)\n # Distribution entropy\n entropy_dist = dist.entropy().mean()\n return pred, clipped_pred, logp, entropy_dist\n\n\n def evaluate_pred(self, x, pred):\n \"\"\"\n Return log prob of `pred` under the distribution generated from\n x (obs features). Also return entropy of the generated distribution.\n\n Parameters\n ----------\n x : torch.tensor\n obs feature map obtained from a policy_net.\n pred : torch.tensor\n Prediction to evaluate.\n\n Returns\n -------\n logp : torch.tensor\n Log probability of `pred` according to the predicted distribution.\n entropy_dist : torch.tensor\n Entropy of the predicted distribution.\n \"\"\"\n\n # Predict distribution parameters\n if not self.multi_discrete:\n x = self.linear(x)\n else: \n x = torch.cat([head(x) for head in self.linear])\n x = x.reshape((pred.shape[0], self.num_outputs[0], 3)) \n\n # Create distribution\n dist = torch.distributions.Categorical(logits=x)\n\n # Evaluate log prob of under dist\n if not self.multi_discrete:\n logp = dist.log_prob(pred.squeeze(-1)).unsqueeze(-1).sum(-1, keepdim=True)\n else:\n logp = dist.log_prob(pred.squeeze(-1)).unsqueeze(-1).sum(-1, keepdim=True).squeeze()\n\n # Distribution entropy\n entropy_dist = dist.entropy().mean()\n\n return logp, entropy_dist","repo_name":"BY571/pytorchrl","sub_path":"pytorchrl/core/actors/distributions/categorical.py","file_name":"categorical.py","file_ext":"py","file_size_in_byte":4532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19444481656","text":"import streamlit as st\nfrom src import soporte as sp\nimport pandas as pd\n\n\nst.image(\"images/churn1.jpeg\")\n\nst.markdown(\"

Real Time Prediction

\", unsafe_allow_html=True)\nst.subheader(\"Optimize Your Profitability\")\nst.subheader(\"Will your customers stay or leave? Find out now!\")\n\n#We collect user input data.\nuser_options = sp.user_input_features()\n\n#st.dataframe(user_options)\n\n#we apply the encoding process to the categorical variables of the user dataset \"user_options\" and create dataframes for each one\nmarried_ = pd.DataFrame(sp.encoding_married.transform(user_options[[\"Married\"]]), columns= [\"Married\"])\nPhone_Service_= pd.DataFrame(sp.encoding_phone_service.transform(user_options[[\"Phone Service\"]]), columns= [\"Phone Service\"])\nInternet_Type_ = pd.DataFrame(sp.encoding_internet_type.transform(user_options[[\"Internet Type\"]]), columns= [\"Internet Type\"])\nOnline_Security_ = pd.DataFrame(sp.encoding_online_security.transform(user_options[[\"Online Security\"]]), columns= [\"Online Security\"])\nOnline_Backup_= pd.DataFrame(sp.encoding_online_backup.transform(user_options[[\"Online Backup\"]]), columns= [\"Online Backup\"])\nDevive_protection_plan_ = pd.DataFrame(sp.encoding_protection.transform(user_options[[\"Device Protection Plan\"]]), columns= [\"Device Protection Plan\"])\nPremium_Tech_Support_ = pd.DataFrame(sp.encoding_premium_support.transform(user_options[[\"Premium Tech Support\"]]), columns= [\"Premium Tech Support\"])\nUnlimited_Data_ = pd.DataFrame(sp.encoding_unlimited_data.transform(user_options[[\"Unlimited Data\"]]), columns= [\"Unlimited Data\"])\nPaperless_Billing_ = pd.DataFrame(sp.encoding_paperless.transform(user_options[[\"Paperless Billing\"]]), columns= [\"Paperless Billing\"])\nPayment_Method_ = pd.DataFrame(sp.encoding_payment_method.transform(user_options[[\"Payment Method\"]]), columns= [\"Payment Method\"])\nGender_ = pd.DataFrame(sp.encoding_gender.transform(user_options[[\"Gender\"]]).toarray(), columns= [\"Gender_Female\",\"Gender_Male\"])\nMultiple_Lines_ = pd.DataFrame(sp.encoding_multiple_lines.transform(user_options[[\"Multiple Lines\"]]).toarray(), columns= [\"Multiple Lines_No\",\"Multiple Lines_Yes\"])\nStreaming_TV_= pd.DataFrame(sp.encoding_streaming_tv.transform(user_options[[\"Streaming TV\"]]).toarray(), columns= [\"Streaming TV_No\",\"Streaming TV_Yes\"])\nStreaming_Music_ = pd.DataFrame(sp.encoding_streaming_music.transform(user_options[[\"Streaming Music\"]]).toarray(), columns= [\"Streaming Music_No\",\"Streaming Music_Yes\"])\nStreaming_Movies_ = pd.DataFrame(sp.encoding_streaming_movies.transform(user_options[[\"Streaming Movies\"]]).toarray(), columns= [\"Streaming Movies_No\",\"Streaming Movies_Yes\"])\n\nuser_options[\"Contract_mapeada\"]= user_options[\"Contract\"].map(sp.map_contract)\nuser_options[\"Offer_mapeada\"]= user_options[\"Offer\"].map(sp.map_offer)\n\nuser_options.drop([\"Offer\",\"Contract\",\"Internet Type\",\"Married\",\"Phone Service\",\"Online Security\",\"Online Backup\",\"Device Protection Plan\",\"Premium Tech Support\",\"Unlimited Data\",\"Paperless Billing\",\"Payment Method\",\"Gender\",\"Multiple Lines\",\"Streaming TV\",\"Streaming Music\",\"Streaming Movies\"], axis=1, inplace=True)\n#\ndf_final = pd.concat([user_options, married_,Phone_Service_,Internet_Type_,Online_Security_,Online_Backup_,Devive_protection_plan_,Premium_Tech_Support_,Unlimited_Data_,Paperless_Billing_,Payment_Method_,Gender_,Multiple_Lines_,Streaming_TV_,Streaming_Music_,Streaming_Movies_], axis = 1)\nprint(df_final.shape)\n\n# We create a list with the correct order of the columns that our dataframe must have for the predictive model\nnew_order=['Age', 'Married', 'Number of Dependents', 'Number of Referrals',\n 'Phone Service', 'Internet Type', 'Avg Monthly GB Download',\n 'Online Security', 'Online Backup', 'Device Protection Plan',\n 'Premium Tech Support', 'Unlimited Data', 'Paperless Billing',\n 'Payment Method', 'Total Revenue', 'Satisfaction Score',\n 'CLTV', 'Gender_Female', 'Gender_Male', 'Multiple Lines_No',\n 'Multiple Lines_Yes', 'Streaming TV_No', 'Streaming TV_Yes',\n 'Streaming Music_No', 'Streaming Music_Yes', 'Streaming Movies_No',\n 'Streaming Movies_Yes', 'Contract_mapeada', 'Offer_mapeada']\n\n#we order the columns\ndf_final=df_final.reindex(columns=new_order)\n\nst.subheader(\"Your chosen Data\")\n# we show the dataframe\nst.dataframe(df_final)\n\n# we predict \npred, prob = sp.prediction_churn (df_final, sp.modelo)\n\n\nst.subheader(\"Prediction \")\n\n# We add a descriptive text\nif pred == 1:\n st.write(\"Based on our predictive model, this customer is Likely to Churn.\")\nelse:\n st.write(\"Based on our predictive model, this customer is Unlikely to churn.\")\n\n\n#st.write(f\"The prediction is : {pred}\")\n#st.write(f\"The probability is : {prob}\")\n\n\n#format output\nformatted_probabilities = [[round(p * 100, 2) for p in prob[0]]]\n#create a Pandas DataFrame from this list of formatted probabilities\ndf = pd.DataFrame(formatted_probabilities, columns=['No Churn', 'Churn'])\ndf = df.applymap('{:.2f}%'.format)\nst.table(df)\n\n\n# Let's add a visualization\nchurn_probability = formatted_probabilities[0][0] / 100\nno_churn_probability = formatted_probabilities[0][1] / 100\nchart_data = { \"Churn\" : no_churn_probability,\"No churn\": churn_probability,}\ndf_chart_data = pd.DataFrame.from_dict(chart_data, orient='index', columns=['Probability'])\nst.write(\"Final odds display:\")\nst.bar_chart(df_chart_data)\n\n","repo_name":"luceromendozab/Churn_Project","sub_path":"streamlit/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33974309530","text":"from hrr_scaling.tools import bootstrap\n\nimport random\nimport gc\nimport psutil\nimport os\n\nimport numpy as np\n\n\nclass ExtractionTester(object):\n\n def __init__(self, corpus_factory, extractor_factory,\n corpus_seed, extractor_seed, test_seed,\n probe_all=False, output_dir=\".\"):\n\n self.corpus_factory = corpus_factory\n self.extractor_factory = extractor_factory\n\n self.corpus_seed = corpus_seed\n self.extractor_seed = extractor_seed\n self.test_seed = test_seed\n\n self.output_dir = output_dir\n self.output_file = os.path.join(output_dir, 'results')\n\n if not os.path.exists(self.output_dir):\n os.makedirs(self.output_dir)\n\n self.probe_all = probe_all\n\n self.corpus_rng = random.Random()\n self.corpus_rng.seed(self.corpus_seed)\n\n self.extractor_rng = random.Random()\n self.extractor_rng.seed(self.extractor_seed)\n\n self.tests = []\n self.bootstrapper = bootstrap.Bootstrapper(\n verbose=True, write_raw_data=True)\n\n def next_extractor_seed(self):\n return self.extractor_rng.randint(0, np.iinfo(np.int32).max)\n\n def next_corpus_seed(self):\n return self.corpus_rng.randint(0, np.iinfo(np.int32).max)\n\n def add_test(self, test):\n self.tests.append(test)\n test.bootstrapper = self.bootstrapper\n test.output_dir = self.output_dir\n test.seed = self.test_seed\n\n def memory_usage_psutil(self):\n # return the memory usage in MB\n process = psutil.Process(os.getpid())\n return process.memory_info().rss / float(2 ** 20)\n\n def initialize(self):\n corpus_seed = self.next_corpus_seed()\n np.random.seed(corpus_seed)\n random.seed(corpus_seed)\n\n self.corpus = self.corpus_factory()\n\n id_vectors = self.corpus.id_vectors\n semantic_pointers = self.corpus.semantic_pointers\n\n if self.probe_all:\n probe_keys = id_vectors.keys()\n else:\n probe_keys = []\n\n extractor_seed = self.next_extractor_seed()\n np.random.seed(extractor_seed)\n random.seed(extractor_seed)\n\n self.extractor = self.extractor_factory(\n id_vectors, semantic_pointers, probe_keys, self.output_dir)\n\n return self.corpus, self.extractor\n\n # Run a series of bootstrap runs, then combine the success rate from each\n # individual run into a total mean success rate with confidence intervals\n # the extractor on the run to be displayed\n def run_bootstrap(self, num_runs):\n\n for test in self.tests:\n test.bootstrap_start(num_runs)\n\n for i in range(num_runs):\n\n corpus, extractor = self.initialize()\n\n for test in self.tests:\n test.corpus = corpus\n test.extractor = extractor\n\n for test in self.tests:\n test.bootstrap_step(i)\n\n corpus = None\n extractor = None\n\n for test in self.tests:\n test.corpus = None\n test.extractor = None\n\n gc.collect()\n\n self.bootstrapper.add_data(\n 'memory_usage_in_mb',\n self.memory_usage_psutil())\n\n self.bootstrapper.print_summary(self.output_file, flush=True)\n\n for test in self.tests:\n test.bootstrap_end()\n","repo_name":"e2crawfo/hrr-scaling","sub_path":"hrr_scaling/extraction_tester.py","file_name":"extraction_tester.py","file_ext":"py","file_size_in_byte":3388,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"81"} +{"seq_id":"71635408904","text":"import time\ndef fib(n):\n global numFibCalls\n numFibCalls+=1\n if n==1:\n return 1\n elif n==2:\n return 2\n else: \n return fib(n-1)+fib(n-2)\n\ndef fib_efficient(n,d):\n global numFibCalls\n numFibCalls+=1\n if n in d:\n return d[n]\n else:\n ans=fib_efficient(n-1,d)+fib_efficient(n-2,d)\n d[n]=ans\n return ans\n\nd={1:1,2:2}\n# print(fib_efficient(n,d))\n\nargToUse=12\nprint(\"\")\nprint('using fib')\n# t1=time.time()\nnumFibCalls=0\nprint(fib(argToUse))\nprint('function calls',numFibCalls)\n# t2=time.time()\n# print('共计用了时间:{:.4}秒'.format(t2-t1))\nprint(\"\")\nnumFibCalls=0\nprint('using fib_efficient')\n# t3=time.time()\n# print(t3)\nprint(fib_efficient(argToUse,d))\nprint('function calls',numFibCalls)\n# t4=time.time()\n# print(t4)\n# print('字典数列共计用了时间:{:.4}秒'.format(t4-t3))","repo_name":"yaoxs7503/pythontest","sub_path":"dict_dictionary.py","file_name":"dict_dictionary.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12246945326","text":"__time__ = '2021/7/18'\n__author__ = 'ZhiYong Sun'\n\n\"\"\"\n在一个 n * m 的二维数组中,每一行都按照从左到右递增的顺序排序,每一列都按照从上到下递增的顺序排序。\n\n请完成一个高效���函数,输入这样的一个二维数组和一个整数,判断数组中是否含有该整数。\n\n解题思路: 从左下角开始,如果比左下角大则往右走,否则往上走\n\"\"\"\n\nfrom typing import List\n\n\nclass Solution:\n def findNumberIn2DArray(self, matrix: List[List[int]], target: int) -> bool:\n if not matrix: return False\n m, n = len(matrix), len(matrix[0])\n\n i, j = m-1, 0\n while 0 <= i < m and 0 <= j < n:\n curr = matrix[i][j]\n if target == curr:\n return True\n elif target > curr:\n j += 1\n else:\n i -= 1\n return False\n\n\nif __name__ == \"__main__\":\n matrix = [\n [1, 4, 7, 11, 15],\n [2, 5, 8, 12, 19],\n [3, 6, 9, 16, 22],\n [10, 13, 14, 17, 24],\n [18, 21, 23, 26, 30]\n ]\n print(Solution().findNumberIn2DArray(matrix=matrix, target=19))\n print(Solution().findNumberIn2DArray(matrix=matrix, target=29))\n\n","repo_name":"Darius-sss/LeetCode","sub_path":"python文件/剑指Offer--04--其他--二维数组中的查找.py","file_name":"剑指Offer--04--其他--二维数组中的查找.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2646448305","text":"import subprocess\nimport sys\nfrom git_utils import make_sure_remote_repo_is_downloaded\nfrom os.path import exists\n\n\ndef format_files(*files: str):\n make_sure_remote_repo_is_downloaded(\n \"swift-format\", \"https://github.com/apple/swift-format.git\", \"release/5.6\"\n )\n make_sure_swift_format_is_compiled()\n print(\"Formatting swift files...\")\n subprocess.check_call(\n f\"./swift-format/.build/release/swift-format -i --configuration ./config/formatting_config.json {' '.join(files)}\".split(\n \" \"\n ),\n stderr=subprocess.STDOUT,\n )\n\n\ndef make_sure_swift_format_is_compiled():\n if exists(\"./swift-format/.build/release/swift-format\"):\n return\n else:\n compile_swift_format()\n\n\ndef compile_swift_format(*files: str):\n print(\"Compiling swift-format...\")\n try:\n subprocess.check_call(\n \"swift build -c release -C swift-format\".split(\" \"),\n stderr=subprocess.STDOUT,\n )\n except subprocess.CalledProcessError as e:\n print_error(\"FAIL: Building swift-format failed\")\n print_error(f\"Executing: {' '.join(e.cmd)}\")\n sys.exit(1)\n\n\ndef print_error(message):\n print(message, file=sys.stderr)\n","repo_name":"michaelvanstraten/swifty-redis","sub_path":"Sources/CodeGen/swift_format.py","file_name":"swift_format.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"81"} +{"seq_id":"26949928581","text":"from .currencyapi import *\nfrom .usefulfuctions import *\nfrom tkinter import Tk, messagebox, ttk\nfrom tkinter import *\nfrom PIL import ImageTk, Image\nimport copy\nimport sys # Module referring to system commands / Modulo referênte aos comandos do sistema\nimport main as myMain\nimport os\n\ndef run():\n\n def clear():\n\n value.delete(0, END)\n result['text'] = \"\"\n cmb1.set('')\n cmb2.set('')\n cmb1List = copy.deepcopy(currency)\n cmb2List = copy.deepcopy(currency)\n cmb1['values'] = (cmb1List)\n cmb2['values'] = (cmb2List)\n\n def updateCmb1List(*args):\n sel = cmb1.get()\n cmb2List = copy.deepcopy(currency)\n cmb2List.remove(sel)\n cmb2.config(values=cmb2List)\n\n def updateCmb2List(*args):\n sel = cmb2.get()\n cmb1List = copy.deepcopy(currency)\n cmb1List.remove(sel)\n cmb1.config(values=cmb1List)\n\n languages = loadLanguage() # Get language / Pegar linguagem\n\n # Colors\n whiteColor = \"#eeeeee\"\n greyColor = \"#1e1e1e\"\n\n # Window setting\n window = Tk()\n window.geometry('300x380')\n window.title(languages[\"info_1\"])\n window.configure(bg=whiteColor)\n window.resizable(height= False, width=False)\n\n if(not is_connected()):\n msg = messagebox.showerror(\"NET\", languages[\"net_error\"])\n if msg == \"OK\":\n window.destroy()\n sys.exit()\n\n # Frames\n top = Frame(window, width= 300, height= 60, bg=whiteColor)\n top.grid(row=0, column=0)\n\n main = Frame(window, width=300, height=280, bg=whiteColor)\n main.grid(row=1, column=0)\n\n #Top frame\n icon = Image.open(myMain.mydir+'/app/icon.png')\n\n mycwd = os.getcwd()\n os.chdir(\"..\")\n #do stuff in parent directory\n os.chdir(mycwd) \n \n icon = icon.resize((40, 40))\n icon = ImageTk.PhotoImage(icon)\n app_name = Label(top, image=icon, compound=LEFT, text=languages[\"info_1\"], height=5, padx=13, pady=30, anchor=CENTER, font=('Arial 16 bold'), bg=whiteColor, foreground=greyColor)\n app_name.place(x=0, y=0)\n\n # Main frame\n result = Label(main, text=\" \", width=16, height=2, pady=7, relief=SOLID, anchor=CENTER, font=('Ivy 15 bold'), bg=whiteColor, foreground=greyColor)\n result.place(x=50,y=10)\n\n currency = loadCurrencys()\n\n cmb1List = copy.deepcopy(currency)\n cmb2List = copy.deepcopy(currency)\n\n myFrom = Label(main, text=languages[\"info_2\"], width=8, height=1, pady=0, padx=0, relief=FLAT, anchor=NW, font=('Ivy 10 bold'), bg=whiteColor, foreground=greyColor)\n myFrom.place(x=48, y=90)\n cmb1 = ttk.Combobox(main, width=8, justify=CENTER, font=(\"Ivy 12 bold\"))\n cmb1['values'] = (cmb1List)\n cmb1.bind(\"<>\", updateCmb1List)\n cmb1.place(x=50, y=115)\n\n myTo = Label(main, text=languages[\"info_3\"], width=8, height=1, pady=0, padx=0, relief=FLAT, anchor=NW, font=('Ivy 10 bold'), bg=whiteColor, foreground=greyColor)\n myTo.place(x=158, y=90)\n cmb2 = ttk.Combobox(main, width=8, justify=CENTER, font=(\"Ivy 12 bold\"))\n cmb2['values'] = (cmb2List)\n cmb2.bind(\"<>\", updateCmb2List)\n cmb2.place(x=160, y=115)\n\n value = Entry(main, width=22, justify=CENTER, font=(\"Ivy 12 bold\"), relief=SOLID)\n value.place(x=50, y=155)\n\n button1 = Button(main, text=languages[\"info_4\"], width=19, padx=5, height=1, bg=greyColor, fg=whiteColor, font=(\"Ivy 12 bold\"), command= lambda: convert(cmb1, cmb2, value, result))\n button1.place(x=50, y=210)\n\n button2 = Button(main, text=languages[\"info_5\"], width=19, padx=5, height=1, bg=greyColor, fg=whiteColor, font=(\"Ivy 12 bold\"), command= lambda: clear())\n button2.place(x=50, y=250)\n\n window.mainloop()","repo_name":"Vivaldo-Roque/CurrencyConverter-ConversordeMoeda","sub_path":"app/modules/window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":3694,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"3439285457","text":"# 백준 24444 : 알고리즘 수업 - 너비우선탐색1 (BFS)\n# 처음 출력값을 잘 못 이해함 \n# 인덱스 출력으로 해서 시간 초과가 나옴\n# timer로 해서 출력 => 그러면 방문 안 했을 경우에 대해 코드를 안 짜도 됨\nimport sys ; input = sys.stdin.readline\nfrom collections import deque\nqueue = deque()\naList = []\n\ndef bfs(graph, visited, start):\n t = 1\n\n visited[start] = True\n queue.append(start)\n while queue :\n a = queue.popleft()\n \n timer[a] = t\n t += 1\n\n for i in graph[a]:\n if visited[i] == False :\n visited[i] = True\n queue.append(i)\n \n # 방문 안 했을 때 코드\n # for num in range(1, n+1):\n # if visited[num] == False:\n # print(0)\n\nn, m, r = map(int, input().strip().split())\ngraph = [[] for _ in range(n+1)]\nvisited = [False] * (n+1)\naIdx = [[] for _ in range(n+1)]\n\n# timer[x] = x번째 정점이 몇 번째로 방문되었는지\ntimer = [0] * (n + 1)\n\nfor j in range(1, m+1):\n a, b = map(int, input().strip().split())\n graph[a].append(b)\n graph[b].append(a)\n\nfor k in range(1, n+1):\n graph[k].sort()\n\nbfs(graph, visited, r)\n\nprint(*timer[1:], sep=\"\\n\")\n","repo_name":"blacklabf/algorithms","sub_path":"algorithms-practice/백준/DFS&BFS/B24444.py","file_name":"B24444.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39421282454","text":"# https://leetcode.com/problems/swapping-nodes-in-a-linked-list/\r\nfrom typing import Optional\r\nfrom tester import Tester\r\nfrom ListNode import ListNode, list_to_nodes as ltn\r\n\r\n\r\nclass Solution:\r\n def swapNodes(self, head: Optional[ListNode], k: int) -> Optional[ListNode]:\r\n \"\"\"Time: O(n), Space: O(n)\r\n \"\"\"\r\n nodes = []\r\n while head:\r\n nodes.append(head)\r\n head = head.next\r\n n = len(nodes)\r\n nodes[k - 1], nodes[n - k] = nodes[n - k], nodes[k - 1]\r\n for a, b in zip(nodes, nodes[1:]):\r\n a.next = b\r\n nodes[-1].next = None\r\n return nodes[0]\r\n\r\n def swapNodes(self, head: Optional[ListNode], k: int) -> Optional[ListNode]:\r\n \"\"\"Time: O(n), Space: O(1)\r\n \"\"\"\r\n n1, n2, tmp = None, None, head\r\n while tmp:\r\n n2 = n2.next if n2 else None\r\n k -= 1\r\n if not k:\r\n n1 = tmp\r\n n2 = head\r\n tmp = tmp.next\r\n n1.val, n2.val = n2.val, n1.val\r\n return head\r\n\r\n\r\nt = Tester(Solution())\r\n\r\nt.test(ltn(\"[1]\"), ltn(\"[1]\"), 1)\r\nt.test(ltn(\"[2,1]\"), ltn(\"[1,2]\"), 2)\r\nt.test(ltn(\"[2,1]\"), ltn(\"[1,2]\"), 1)\r\nt.test(ltn(\"[1,2,3]\"), ltn(\"[3,2,1]\"), 1)\r\nt.test(ltn(\"[3,2,1]\"), ltn(\"[3,2,1]\"), 2)\r\n\r\nt.report()\r\n","repo_name":"thinhntr/cp","sub_path":"leetcode/Swapping Nodes in a Linked List.py","file_name":"Swapping Nodes in a Linked List.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38004028880","text":"from grove_library import arduinoInit, speakerInit, arduinoAnalogRead, arduinoDigitalRead, speakerPlayNote\r\nimport time\r\nimport sys, pygame\r\n\r\npygame.init()\r\n\r\nconnection = arduinoInit('COM4')\r\nspeakerInit(4, connection)\r\n\r\nred = (255, 0, 0)\r\norange = (255, 165, 0)\r\nyellow = (218, 255, 0)\r\ngreen = (0, 255, 0)\r\nblue = (0, 0, 255)\r\nindigo = (75, 0, 130)\r\nviolet = (238, 130, 238)\r\nwhite = (255, 255, 255)\r\nblack = (0, 0, 0)\r\n\r\n\r\ndef GetDirection():\r\n X = arduinoAnalogRead(0) - 512\r\n Y = arduinoAnalogRead(1) - 522\r\n\r\n if (Y ** 2 + X ** 2) <= 625:\r\n print(X, Y)\r\n print(\"none\")\r\n return 'N'\r\n if Y >= X and Y >= (-1) * X:\r\n print(X, Y)\r\n print(\"up\")\r\n return 'U'\r\n if Y >= X and Y <= (-1) * X:\r\n print(X, Y)\r\n print(\"left\")\r\n return 'L'\r\n if Y < X and Y >= (-1) * X:\r\n print(X, Y)\r\n print(\"right\")\r\n return 'R'\r\n if Y < X and Y < (-1) * X:\r\n print(X, Y)\r\n print(\"down\")\r\n return 'D'\r\n\r\n\r\ndef applenoise():\r\n speakerPlayNote(800, 0.3)\r\n time.sleep(0.1) # THIS SLEEP IS ONLY HERE BECAUSE OF BROKEN SPEAKER, WE CAN SWITCH AFTER\r\n speakerPlayNote(1000, 0.3)\r\n time.sleep(0.1)\r\n\r\n\r\ndef deathnoise():\r\n speakerPlayNote(300, 0.3)\r\n time.sleep(0.3)\r\n speakerPlayNote(200, 0.3)\r\n time.sleep(0.3)\r\n speakerPlayNote(100, 0.3)\r\n\r\n\r\ndef buttonpress():\r\n b = arduinoDigitalRead(2)\r\n return b\r\n\r\n\r\ndef clear():\r\n X, Y = 750, 750\r\n Size = (X, Y)\r\n\r\n display_surface = pygame.display.set_mode(Size)\r\n pygame.display.update()\r\n time.sleep(0.001)\r\n\r\n\r\ndef countdown():\r\n X, Y = 750, 750\r\n Size = (X, Y)\r\n\r\n display_surface = pygame.display.set_mode(Size)\r\n fontcountdown = pygame.font.SysFont('comic sans', 60)\r\n countdown_3 = fontcountdown.render(\"3\", True, white)\r\n display_surface.blit(countdown_3, (360, 375))\r\n pygame.display.update()\r\n time.sleep(1)\r\n clear()\r\n\r\n countdown_2 = fontcountdown.render(\"2\", True, white)\r\n display_surface.blit(countdown_2, (360, 375))\r\n pygame.display.update()\r\n time.sleep(1)\r\n clear()\r\n\r\n countdown_1 = fontcountdown.render(\"1\", True, white)\r\n display_surface.blit(countdown_1, (360, 375))\r\n pygame.display.update()\r\n time.sleep(1)\r\n death()\r\n\r\n\r\ndef pregame():\r\n while True:\r\n time.sleep(0.1)\r\n X, Y = 750, 750\r\n Size = (X, Y)\r\n\r\n display_surface = pygame.display.set_mode(Size)\r\n fontstartgame = pygame.font.SysFont('comic sans', 50)\r\n presstostart = fontstartgame.render(\"press button to start game\", True, green)\r\n display_surface.blit(presstostart, (140, 340))\r\n pygame.display.update()\r\n\r\n if buttonpress() == True:\r\n clear()\r\n countdown()\r\n\r\n\r\n\r\n\r\ndef death():\r\n deathnoise()\r\n\r\n X, Y = 750, 750\r\n Size = (X, Y)\r\n\r\n display_surface = pygame.display.set_mode(Size)\r\n fontdeath = pygame.font.SysFont('comic sans', 100)\r\n you_died = fontdeath.render('YOU DIED', True, red)\r\n display_surface.blit(you_died, (200, 330))\r\n pygame.display.update()\r\n time.sleep(2)\r\n clear()\r\n\r\n score = 100\r\n display_surface = pygame.display.set_mode(Size)\r\n fontscore = pygame.font.SysFont('comic sans', 75)\r\n totalscore = fontscore.render(f'total score: {score} pts', True, green)\r\n display_surface.blit(totalscore, (120, 320))\r\n pygame.display.update()\r\n time.sleep(2)\r\n clear()\r\n\r\n i = 1\r\n selection = 2\r\n while i:\r\n\r\n if selection == 1:\r\n display_surface = pygame.display.set_mode(Size)\r\n fontplayagain = pygame.font.SysFont('comic sans', 70)\r\n playagain = fontplayagain.render('->play again', True, green)\r\n display_surface.blit(playagain, (220, 300))\r\n\r\n fontquit = pygame.font.SysFont('comic sans', 70)\r\n quit = fontplayagain.render(' quit', True, white)\r\n display_surface.blit(quit, (220, 400))\r\n pygame.display.update()\r\n\r\n if buttonpress():\r\n pregame()\r\n\r\n if selection == 2:\r\n display_surface = pygame.display.set_mode(Size)\r\n fontplayagain = pygame.font.SysFont('comic sans', 70)\r\n playagain = fontplayagain.render(' play again', True, white)\r\n display_surface.blit(playagain, (220, 300))\r\n\r\n fontquit = pygame.font.SysFont('comic sans', 70)\r\n quit = fontplayagain.render('->quit', True, green)\r\n display_surface.blit(quit, (220, 400))\r\n pygame.display.update()\r\n\r\n if buttonpress():\r\n quit(0.5)\r\n\r\n\r\n if selection == 1 and GetDirection() == 'D':\r\n selection = 2\r\n\r\n if selection == 2 and GetDirection() == 'U':\r\n selection = 1\r\n\r\n time.sleep(0.05)\r\n\r\ndef initialize():\r\n pregame()\r\n\r\n\r\ninitialize()","repo_name":"GreyOwul/School","sub_path":"Snake_4.py","file_name":"Snake_4.py","file_ext":"py","file_size_in_byte":4889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18432500794","text":"import pygame\nimport random\n\npygame.init()\n\n# Створюємо вікно\nscreen = pygame.display.set_mode((800, 600))\n\n# Задаємо назву вікна\npygame.display.set_caption(\"Гонки\")\n\n# Задаємо зображення фону\nbackground = pygame.image.load('background.jpg')\n\n# Задаємо зображення автомобіля\ncarImg = pygame.image.load('car.webp')\n\n# Задаємо початкові координати автомобіля\nx = 30\ny = 48\nx_change = 0\n\n# Задаємо початкові координати препятствів\nobstacle_x = random.randint(0, 736)\nobstacle_y = -600\nobstacle_change_y = 4\n\n# Задаємо функцію для відображення автомобіля\ndef car(x, y):\n screen.blit(carImg, (x, y))\n\n# Задаємо функцію для відображення препятствів\ndef obstacle(obstacle_x, obstacle_y):\n screen.blit(obstacleImg, (obstacle_x, obstacle_y))\n\n# Задаємо зображення препятствів\nobstacleImg = pygame.image.load('obstacle.webp')\n\n# Задаємо головний цикл програми\nrunning = True\nwhile running:\n # Задаємо колір фону\n screen.fill((0, 0, 0))\n # Задаємо фон\n screen.blit(background, (0, 0))\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n # Задаємо переміщення автомобіля\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n x_change = -5\n if event.key == pygame.K_RIGHT:\n x_change = 5\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\n x_change = 0\n x += x_change\n # Задаємо межі переміщення автомобіля\n if x <= 0:\n x = 0\n elif x >= 736:\n x = 736\n # Задаємо переміщення препятствів\n obstacle_y += obstacle_change_y\n # Задаємоповернення препятствів на початкову позицію\n obstacle_height = 1\n if obstacle_y > 600:\n obstacle_y = 0 - obstacle_height\n obstacle_x = random.randint(0, 736)\n # Викликаємо функції для відображення автомобіля та препятствів\n car(x, y)\n obstacle(obstacle_x, obstacle_y)\n # Оновлюємо екран\n pygame.display.update()","repo_name":"elonprogramer/python_lessons","sub_path":"home_drafts/short_examples/test/cars.py","file_name":"cars.py","file_ext":"py","file_size_in_byte":2543,"program_lang":"python","lang":"uk","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"2057047957","text":"import player\r\nfrom minimax_player import *\r\nfrom mcts_player import *\r\nfrom nn_player import get_nn_input, NNPlayer\r\nfrom game import Game\r\nfrom macros import *\r\nimport time\r\n\r\n\r\nclass Match:\r\n def __init__(self, player1, player2, board_size=BOARD_SIZE, handi=0, komi=7.5, gui=True, save_data=False):\r\n # player 1 is black, player 2 is white\r\n self.players = [player1, player2]\r\n self.game = Game(board_size, handi, komi, gui)\r\n self.save_data = save_data\r\n self.states = []\r\n self.actions = []\r\n\r\n def play(self):\r\n while not self.game.done:\r\n if self.game.turn == BLACK:\r\n move = self.players[0].get_move(self.game)\r\n else:\r\n move = self.players[1].get_move(self.game)\r\n if self.save_data:\r\n self.states.append(get_nn_input(self.game))\r\n self.actions.append(from_action(move))\r\n self.game.move(move)\r\n # time.sleep(0.3)\r\n return self.game.winner\r\n\r\n def get_train_data(self):\r\n assert self.save_data\r\n return self.states, self.actions, self.game.winner\r\n\r\n\r\nif __name__ == \"__main__\":\r\n black_wins = 0\r\n white_wins = 0\r\n for i in range(10):\r\n # match = Match(MinimaxPlayer(9, BLACK, simple_evaluation, depth=2), MCTSPlayer(9, WHITE, simple_mcts_evaluation, rollouts=50))\r\n # match = Match(player.InputPlayer(9, BLACK), player.InputPlayer(9, WHITE))\r\n # match = Match(player.InputPlayer(9, BLACK), player.InputPlayer(9, WHITE), board_size=3)\r\n match = Match(NNPlayer(9, BLACK), NNPlayer(9, WHITE))\r\n winner = match.play()\r\n if winner == BLACK:\r\n black_wins += 1\r\n elif winner == WHITE:\r\n white_wins += 1\r\n print(\"black wins: {}; white wins: {}\".format(black_wins, white_wins))\r\n","repo_name":"jchiu342/roost","sub_path":"match.py","file_name":"match.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"25933460883","text":"from collections import defaultdict\nimport logging\n\nimport torch\n\nLOG = logging.getLogger(__name__)\n\n\nclass RunningCache(torch.nn.Module):\n def __init__(self, cached_items):\n super().__init__()\n\n self.cached_items = cached_items\n self.duration = abs(min(cached_items)) + 1\n self.cache = [None for _ in range(self.duration)]\n self.index = 0\n\n LOG.debug('running cache of length %d', len(self.cache))\n\n def incr(self):\n self.index = (self.index + 1) % self.duration\n\n def get_index(self, index):\n while index < 0:\n index += self.duration\n while index >= self.duration:\n index -= self.duration\n LOG.debug('retrieving cache at index %d', index)\n\n v = self.cache[index]\n if v is not None:\n v = v.detach()\n return v\n\n def get(self):\n return [self.get_index(i + self.index) for i in self.cached_items]\n\n def set_next(self, data):\n self.incr()\n self.cache[self.index] = data\n LOG.debug('set new data at index %d', self.index)\n return self\n\n def forward(self, *args):\n LOG.debug('----------- running cache --------------')\n x = args[0]\n\n o = []\n for x_i in x:\n o += self.set_next(x_i).get()\n\n if any(oo is None for oo in o):\n o = [oo if oo is not None else o[0] for oo in o]\n\n # drop images of the wrong size (determine size by majority vote)\n if len(o) >= 2:\n image_sizes = [tuple(oo.shape[-2:]) for oo in o]\n if not all(ims == image_sizes[0] for ims in image_sizes[1:]):\n freq = defaultdict(int)\n for ims in image_sizes:\n freq[ims] += 1\n max_freq = max(freq.values())\n ref_image_size = next(iter(ims for ims, f in freq.items() if f == max_freq))\n\n for i, ims in enumerate(image_sizes):\n if ims == ref_image_size:\n continue\n for s in range(1, len(image_sizes)):\n target_i = (i + s) % len(image_sizes)\n if image_sizes[target_i] == ref_image_size:\n break\n LOG.warning('replacing %d (%s) with %d (%s) for ref %s',\n i, ims,\n target_i, image_sizes[target_i],\n ref_image_size)\n o[i] = o[target_i]\n\n return torch.stack(o)\n","repo_name":"openpifpaf/openpifpaf","sub_path":"src/openpifpaf/network/running_cache.py","file_name":"running_cache.py","file_ext":"py","file_size_in_byte":2543,"program_lang":"python","lang":"en","doc_type":"code","stars":1098,"dataset":"github-code","pt":"81"} +{"seq_id":"30558332158","text":"# 수열 A가 주어졌을 때, 가장 긴 증가하는 부분 수열을 구하는 프로그램을 작성하시오.\n# 예를 들어, 수열 A = {10, 20, 10, 30, 20, 50} 인 경우에 가장 긴 증가하는 부분 수열은 A = {10, 20, 30, 50} 이고, 길이는 4이다.\n\n# 첫째 줄에 수열 A의 크기 N (1 ≤ N ≤ 1,000)이 주어진다.\n# 둘째 줄에는 수열 A를 이루고 있는 Ai가 주어진다. (1 ≤ Ai ≤ 1,000)\n\n# 첫째 줄에 수열 A의 가장 긴 증가하는 부분 수열의 길이를 출력한다.\n\n# 10 20 30 50 : 4\n# 10 20\nn = int(input())\na = list(map(int, input().split()))\n# dp엔 최대 길이를 저장. 비교 필요\ndp = [0 for _ in range(n)]\n\n# 10 20 10 30 20 50 기준\n# 10 : 1\n# 20 : 2\n# 30 : 3\n# 50 : 4\n# 배열로는 [1, 2, 1, 3, 2, 4]\n# 이 중 최대값 출력하면 끝?\n# 나보다 낮은 수의 최댓값 +1 하면 될 거 같다.\nfor i in range(n):\n for k in range(i):\n # 1. a[i] > a[k] : 값을 대체 가능한 상태.\n # 2. dp[i] < dp[k] : 값을 대체 해야하는 상태. ex) 20의 기본값은 0인데, 이 상태에서 10의 최대값인 1을 계승 받아야 함.\n if a[i] > a[k] and dp[i] < dp[k]:\n dp[i] = dp[k]\n dp[i] += 1;\n\nprint(max(dp))\n","repo_name":"Zabee52/study","sub_path":"algorithm/python/algorithm/baekjoon/long_sequence11053.py","file_name":"long_sequence11053.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18941518815","text":"API_SSL_SERVER=\"https://www.google.com/recaptcha/api\"\nAPI_SERVER=\"http://www.google.com/recaptcha/api\"\nVERIFY_SERVER=\"www.google.com\"\n\nclass RecaptchaResponse(object):\n def __init__(self, is_valid, error_code=None):\n self.is_valid = is_valid\n self.error_code = error_code\n\ndef async_submit(recaptcha_challenge_field,\n recaptcha_response_field,\n private_key,\n remoteip,\n callback):\n \"\"\"\n Submits a reCAPTCHA request for verification. Returns RecaptchaResponse\n for the request\n\n recaptcha_challenge_field -- The value of recaptcha_challenge_field from the form\n recaptcha_response_field -- The value of recaptcha_response_field from the form\n private_key -- your reCAPTCHA private key\n remoteip -- the user's ip address\n \"\"\"\n from tornado.httpclient import HTTPRequest, AsyncHTTPClient\n from tornado.httputil import url_concat\n import urllib\n\n if not (recaptcha_response_field and recaptcha_challenge_field and\n len (recaptcha_response_field) and len (recaptcha_challenge_field)):\n callback(RecaptchaResponse (is_valid = False, error_code = 'incorrect-captcha-sol'))\n return\n\n def encode_if_necessary(s):\n if isinstance(s, unicode):\n return s.encode('utf-8')\n return s\n\n # setup a callback for the request to call\n def handle_response(response):\n return_values = response.body.splitlines()\n return_code = return_values[0]\n if (return_code == \"true\"):\n callback( RecaptchaResponse (is_valid=True) )\n else:\n callback( RecaptchaResponse (is_valid=False, error_code = return_values [1]))\n\n # make an async request, then the callback will be called.\n params = {\n 'privatekey': encode_if_necessary(private_key),\n 'remoteip' : encode_if_necessary(remoteip),\n 'challenge': encode_if_necessary(recaptcha_challenge_field),\n 'response' : encode_if_necessary(recaptcha_response_field),\n }\n\n url = url_concat(\"http://%s/recaptcha/api/verify\" % VERIFY_SERVER, params)\n\n request = HTTPRequest(url, headers ={\n \"Content-type\": \"application/x-www-form-urlencoded\",\n \"User-agent\": \"reCAPTCHA Python Async\"\n })\n\n AsyncHTTPClient().fetch( request, handle_response )\n\n\n","repo_name":"peter-the-tea-drinker/tornado-base","sub_path":"tboneold/async_recaptcha.py","file_name":"async_recaptcha.py","file_ext":"py","file_size_in_byte":2384,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"21938899738","text":"\nfrom flask import Flask, render_template\nimport pandas as pd\nimport cx_Oracle as o\nimport json\nimport plotly\nimport plotly.express as px\napp = Flask(__name__)\n\n@app.route('/')\ndef notdash():\n df = pd.DataFrame({\n 'Fruit': ['Apples', 'Oranges', 'Bananas', 'Apples', 'Oranges',\n 'Bananas'],\n 'Amount': [4, 1, 2, 2, 4, 5],\n 'City': ['SF', 'SF', 'SF', 'Montreal', 'Montreal', 'Montreal']\n })\n print(df)\n fig = px.bar(df, x='Fruit', y='Amount', color='City', barmode='group')\n\n graphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\n \n \n return render_template('nodash.html', graphJSON=graphJSON)\n\n@app.route('/a')\ndef a():\n df = px.data.iris()\n df.head()\n fig = px.scatter(df, x='sepal_length', y='sepal_width', color='species')\n graphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\n\n return render_template('nodash.html', graphJSON=graphJSON)\n\n@app.route('/b')\ndef b():\n df = px.data.iris()\n df.head()\n\n fig = px.scatter(df, x='sepal_length', y='sepal_width', color='species')\n graphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\n\n return render_template('nodash.html', graphJSON=graphJSON)\n\n\n@app.route('/student')\ndef student():\n \n sql = 'select * from student'\n dsn =o.makedsn('localhost','1521','xe')\n conn =o.connect( user='goorm',password='goorm', dsn=dsn)\n \n df = pd.read_sql(sql, conn)\n print(df)\n fig = px.bar(df, x=\"NAME\", y=\"AGE\", title=\"타이틀\", width=600, height=400,\n labels={'NAME':'이름','AGE':'나이'},\n color_discrete_map={\"나이\": \"RebeccaPurple\"},\n template=\"simple_white\",text='AGE')\n # fig.update_layout(yaxis_range=[0,100])\n\n graphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\n\n return render_template('nodash.html', graphJSON=graphJSON)\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=8077, debug=True)\n","repo_name":"junseokShim/Flask_Orcle_DB","sub_path":"Flask_Oracle_DB/day_5_lec/flaskplotly.py","file_name":"flaskplotly.py","file_ext":"py","file_size_in_byte":1917,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"320098732","text":"import unittest\nfrom datetime import datetime\n\nimport responses\nimport requests\n\nfrom pytrics.qualtrics_api.client import QualtricsAPIClient\n\n\nclass CreateBlockTestCase(unittest.TestCase):\n\n def setUp(self):\n self.client = QualtricsAPIClient('http://qualtrics.com/api', 'token-456')\n self.question_payload = {\n 'QuestionText': 'What is love?',\n 'DataExportTag': 'Q1',\n 'QuestionType': 'MC',\n 'Selector': 'SAVR',\n 'SubSelector': 'TX',\n 'Configuration': {\n 'QuestionDescriptionOption': 'UseText'\n },\n 'QuestionDescription': 'respondent_what_is_love_mc',\n 'Choices': {\n '1': 'Baby don\\'t hurt me',\n '2': 'Don\\'t hurt me',\n '3': 'No more'\n },\n 'ChoiceOrder': [\n '1',\n '2',\n '3'\n ],\n 'Validation': {\n 'Settings': {\n 'ForceResponse': 'OFF',\n 'ForceResponseType': 'OFF',\n 'Type': 'None'\n }\n },\n 'Language': []\n }\n\n def test_create_question_asserts_survey_id_parameter(self):\n with self.assertRaises(AssertionError):\n _ = self.client.create_question(None, self.question_payload)\n\n with self.assertRaises(AssertionError):\n _ = self.client.create_question('', self.question_payload)\n\n with self.assertRaises(AssertionError):\n _ = self.client.create_question(1, self.question_payload)\n\n def test_create_question_validates_survey_id(self):\n with self.assertRaises(AssertionError):\n self.client.create_question('invalid_survey_id', self.question_payload)\n\n def test_create_question_asserts_question_payload_parameter(self):\n with self.assertRaises(AssertionError):\n _ = self.client.create_question('SV_abcdefghijk', None)\n\n with self.assertRaises(AssertionError):\n _ = self.client.create_question('SV_abcdefghijk', datetime(2019, 8, 16))\n\n with self.assertRaises(AssertionError):\n _ = self.client.create_question('SV_abcdefghijk', ['a', 'b'])\n\n def test_create_question_validates_question_payload_parameter(self):\n payload_without_question_text_key = self.question_payload\n payload_without_question_text_key.pop('QuestionText')\n\n with self.assertRaises(AssertionError):\n _ = self.client.create_question('SV_abcdefghijk', payload_without_question_text_key)\n\n with self.assertRaises(AssertionError):\n _ = self.client.create_question('SV_abcdefghijk', {})\n\n def test_create_question_asserts_optional_block_id_is_string_if_supplied(self):\n with self.assertRaises(AssertionError):\n self.client.create_question('SV_1234567890a', self.question_payload, block_id=123)\n\n def test_create_question_validates_optional_block_id_if_supplied(self):\n with self.assertRaises(AssertionError):\n self.client.create_question('SV_1234567890a', self.question_payload, block_id='invalid_block_id')\n\n @responses.activate\n def test_makes_request_as_expected(self):\n create_question_json = {\n 'result': {\n 'QuestionID': 'QID1'\n },\n 'meta': {\n 'requestId': 'be14851c-7d92-4b1c-a541-a9e03228b15e',\n 'httpStatus': '200 - OK'\n }\n }\n\n responses.add(\n responses.POST, 'http://qualtrics.com/api/survey-definitions/SV_abcdefghijk/questions?blockId=BL_1234567890a', json=create_question_json\n )\n\n result, question_id = self.client.create_question('SV_abcdefghijk', self.question_payload, block_id='BL_1234567890a')\n\n self.assertEqual(result, create_question_json)\n self.assertEqual(question_id, create_question_json['result']['QuestionID'])\n\n @responses.activate\n def test_raises_http_error_for_failed_requests(self):\n responses.add(\n responses.POST, 'http://qualtrics.com/api/survey-definitions/SV_abcdefghijk/questions', json={}, status=404\n )\n with self.assertRaises(requests.HTTPError):\n _, _ = self.client.create_question('SV_abcdefghijk', self.question_payload)\n\n responses.replace(\n responses.POST, 'http://qualtrics.com/api/survey-definitions/SV_abcdefghijk/questions', json={}, status=500\n )\n with self.assertRaises(requests.HTTPError):\n _, _ = self.client.create_question('SV_abcdefghijk', self.question_payload)\n","repo_name":"60decibels/pytrics","sub_path":"tests/qualtrics_api/client/question/create_question_tests.py","file_name":"create_question_tests.py","file_ext":"py","file_size_in_byte":4632,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"74225047623","text":"from tkinter import *\nfrom tkinter.ttk import *\nimport src.utils.utils as utils\n\n\nclass HighscoresWindow(Frame):\n\n def __init__(self, parent, scores):\n style = Style(parent)\n style.theme_use(\"clam\")\n style.configure(\"Treeview\", background=utils.dark_blue, foreground=\"white\")\n style.configure(\"Treeview.Heading\", background=utils.dark_blue, foreground=\"white\")\n style.configure(\"Treeview.Row\", background=utils.dark_blue,\n foreground=\"white\")\n style.configure(\"Treeview.Cell\", background=utils.dark_blue,\n foreground=\"white\")\n style.configure(\"Treeview.Item\", background=utils.dark_blue,\n foreground=\"white\")\n Frame.__init__(self, parent)\n self.CreateUI()\n self.LoadTable(scores)\n self.grid(sticky=(N, S, W, E))\n parent.grid_rowconfigure(0, weight=1)\n parent.grid_columnconfigure(0, weight=1)\n\n def CreateUI(self):\n tv = Treeview(self)\n tv['columns'] = ('name', 'score', 'date')\n tv.heading(\"#0\", text='Index', anchor='w')\n tv.column(\"#0\", anchor=\"w\", width=100)\n tv.heading('name', text='Name')\n tv.column('name', anchor='center', width=100)\n tv.heading('score', text='Score')\n tv.column('score', anchor='center', width=100)\n tv.heading('date', text='Date')\n tv.column('date', anchor='center', width=100)\n tv.grid(sticky=(N, S, W, E))\n self.treeview = tv\n self.grid_rowconfigure(0, weight=1)\n self.grid_columnconfigure(0, weight=1)\n\n def LoadTable(self, scores):\n for index, [name, time, date] in enumerate(scores):\n self.treeview.insert('', 'end', text=str(index + 1), values=(name, time, date), tags=('ttk',))\n","repo_name":"blazejkustra/sudoku-app","sub_path":"src/highscores_window.py","file_name":"highscores_window.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74996422023","text":"import numpy as np\nfrom collections import OrderedDict\nimport copy\nimport time\nimport datetime\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nimport torch\nimport torch.nn as nn\nimport torchvision\nimport torch.distributions as D\n\nfrom robomimic.algo.bc import BC_RNN, BC\nimport robomimic.models.policy_nets as PolicyNets\nimport mimicplay.models.policy_nets as PolicyNetsMimicPlay\nimport robomimic.utils.obs_utils as ObsUtils\nimport robomimic.utils.tensor_utils as TensorUtils\nfrom robomimic.models.obs_nets import MIMO_Transformer\n# import robomimic.utils.geometry as geometry\nfrom robomimic.algo import register_algo_factory_func\n\nfrom mimicplay.algo import Highlevel_GMM_pretrain\n\n@register_algo_factory_func(\"mimic_bilevel\")\ndef algo_config_to_class(algo_config):\n \"\"\"\n Maps algo config to the algo class to instantiate, along with additional algo kwargs.\n\n Args:\n algo_config (Config instance): algo config\n\n Returns:\n algo_class: subclass of Algo\n algo_kwargs (dict): dictionary of additional kwargs to pass to algorithm\n \"\"\"\n\n if not algo_config.bc_bilevel.enabled: # if not using bilevel algo in this file, return mimicplay\n return Highlevel_GMM_pretrain_mimicplay, {}\n\n return BC_Bilevel(), {}\n\nclass Highlevel_GMM_pretrain_mimicplay(Highlevel_GMM_pretrain):\n\n def _create_networks(self):\n \"\"\"\n Creates networks and places them into @self.nets.\n \"\"\"\n assert self.algo_config.highlevel.enabled\n assert not self.algo_config.lowlevel.enabled\n\n # del self.obs_shapes['robot0_eef_pos_future_traj']\n self.ac_dim = self.algo_config.highlevel.ac_dim\n\n self.nets = nn.ModuleDict()\n self.nets[\"policy\"] = PolicyNetsMimicPlay.GMMActorNetwork(\n obs_shapes=self.obs_shapes,\n goal_shapes=self.goal_shapes,\n ac_dim=self.ac_dim,\n mlp_layer_dims=self.algo_config.actor_layer_dims,\n num_modes=self.algo_config.gmm.num_modes,\n min_std=self.algo_config.gmm.min_std,\n std_activation=self.algo_config.gmm.std_activation,\n low_noise_eval=self.algo_config.gmm.low_noise_eval,\n encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config(self.obs_config.encoder),\n )\n\n self.save_count = 0\n\n self.nets = self.nets.float().to(self.device)\n\n def process_batch_for_training(self, batch):\n \"\"\"\n Processes input batch from a data loader to filter out\n relevant information and prepare the batch for training.\n\n Args:\n batch (dict): dictionary with torch.Tensors sampled\n from a data loader\n\n Returns:\n input_batch (dict): processed and filtered batch that\n will be used for training\n \"\"\"\n input_batch = dict()\n input_batch[\"obs\"] = {k: batch[\"obs\"][k][:, 0, :] for k in batch[\"obs\"]} # only keep first obs\n input_batch[\"goal_obs\"] = batch.get(\"goal_obs\", None)\n input_batch[\"actions\"] = batch[\"actions\"].view([batch[\"actions\"].shape[0], -1]) # merge time and ac dims\n assert input_batch[\"actions\"].shape[-1] == self.ac_dim\n\n # we move to device first before float conversion because image observation modalities will be uint8 -\n # this minimizes the amount of data transferred to GPU\n return TensorUtils.to_float(TensorUtils.to_device(input_batch, self.device))\n\n def postprocess_batch_for_training(self, batch, obs_normalization_stats):\n \"\"\"\n Processes input batch from a data loader to filter out\n relevant information and prepare the batch for training.\n\n Args:\n batch (dict): dictionary with torch.Tensors sampled\n from a data loader\n\n Returns:\n input_batch (dict): processed and filtered batch that\n will be used for training\n \"\"\"\n\n # ensure obs_normalization_stats are torch Tensors on proper device\n obs_normalization_stats = TensorUtils.to_float(\n TensorUtils.to_device(TensorUtils.to_tensor(obs_normalization_stats), self.device))\n\n # we will search the nested batch dictionary for the following special batch dict keys\n # and apply the processing function to their values (which correspond to observations)\n obs_keys = [\"obs\", \"next_obs\", \"goal_obs\"]\n\n def recurse_helper(d):\n \"\"\"\n Apply process_obs_dict to values in nested dictionary d that match a key in obs_keys.\n \"\"\"\n for k in d:\n if k in obs_keys:\n # found key - stop search and process observation\n if d[k] is not None:\n d[k] = ObsUtils.process_obs_dict(d[k])\n if obs_normalization_stats is not None:\n d[k] = ObsUtils.normalize_obs(d[k], obs_normalization_stats=obs_normalization_stats)\n elif isinstance(d[k], dict):\n # search down into dictionary\n recurse_helper(d[k])\n\n recurse_helper(batch)\n\n # TODO move line below to above function maybe; or remove fully to use what's in mimicplay\n # batch[\"goal_obs\"][\"agentview_image\"] = batch[\"goal_obs\"][\"agentview_image\"][:, 0]\n\n return TensorUtils.to_device(TensorUtils.to_float(batch), self.device)\n\n def _forward_training(self, batch):\n \"\"\"\n Internal helper function for BC algo class. Compute forward pass\n and return network outputs in @predictions dict.\n\n Args:\n batch (dict): dictionary with torch.Tensors sampled\n from a data loader and filtered by @process_batch_for_training\n\n Returns:\n predictions (dict): dictionary containing network outputs\n \"\"\"\n\n dists = self.nets[\"policy\"].forward_train(\n obs_dict=batch[\"obs\"],\n goal_dict=batch[\"goal_obs\"]\n )\n\n # make sure that this is a batch of multivariate action distributions, so that\n # the log probability computation will be correct\n assert len(dists.batch_shape) == 1\n log_probs = dists.log_prob(batch[\"actions\"])\n\n predictions = OrderedDict(\n log_probs=log_probs,\n )\n return predictions\n\n\nclass BC_Bilevel(BC_RNN):\n def _create_networks(self):\n \"\"\"\n Creates networks and places them into @self.nets.\n \"\"\"\n self.nets = nn.ModuleDict()\n\n # ### LMP style - goal-conditioned output trajectory\n # self.nets[\"posterior\"] = PolicyNets.Latent(**encoder_kwargs) #TODO encoder\n # self.nets[\"decoder\"] = PolicyNets.Decoder(**decoder_kwargs) #TODO decoder\n\n ### TODO RSSM style - unconditioned dist over entire trajectory\n\n self.nets[\"policy\"] = PolicyNets.GMMActorNetwork()\n\n self.nets = self.nets.float().to(self.device)\n\n def _process_batch_for_training(self, batch):\n To = self.algo_config.horizon.observation_horizon # should be same as frame_stack\n Tp = self.algo_config.horizon.prediction_horizon # should be same as the entire seq size (seq_len+frame_stack1-1) \n # TODO decide what the input frames should be -- initial and last(goal)?\n input_batch = dict()\n input_batch[\"obs\"] = {k: batch[\"obs\"][k][:, :To, :] for k in batch[\"obs\"]}\n input_batch[\"goal_obs\"] = batch.get(\"goal_obs\", None) # goals may not be present\n input_batch[\"actions\"] = batch[\"actions\"][:, :Tp, :]\n\n # TODO check for action normalization\n\n return TensorUtils.to_device(TensorUtils.to_float(input_batch), self.device)\n\n def _prior(self):\n prior = D.MultivariateNormal()\n return prior\n\n def _posterior(self, batch):\n z = self.nets[\"posterior\"](batch['obs'])\n posterior = D.MultivariateNormal(z['mean', z['std']])\n # TODO add to logs\n return posterior\n\n def _compute_loss(self, batch):\n \n\n inputs = batch[\"obs\"]\n # Encode input obs and goal into a latent\n # Compute KL_div(posterior||prior)\n z = self.nets[\"posterior\"](inputs)\n\n\n\n # Decode all obs and actions\n # Compute NLL loss\n\n # Make sure everything is pushed to logs\n\n pass\n\n def log_info(self, info):\n pass\n\n def get_action(self, obs_dict, goal_dict=None):\n pass\n\n\n# # requires diffusers==0.11.1\n# from diffusers.schedulers.scheduling_ddpm import DDPMScheduler\n# from diffusers.schedulers.scheduling_ddim import DDIMScheduler\n\nclass LatentDiffusion(BC_RNN): #TODO change base class\n def __create_networks__(self):\n # setup noise scheduler\n noise_scheduler = None\n if self.algo_config.ddpm.enabled:\n noise_scheduler = DDPMScheduler(\n num_train_timesteps=self.algo_config.ddpm.num_train_timesteps,\n beta_schedule=self.algo_config.ddpm.beta_schedule,\n clip_sample=self.algo_config.ddpm.clip_sample,\n prediction_type=self.algo_config.ddpm.prediction_type\n )\n elif self.algo_config.ddim.enabled:\n noise_scheduler = DDIMScheduler(\n num_train_timesteps=self.algo_config.ddim.num_train_timesteps,\n beta_schedule=self.algo_config.ddim.beta_schedule,\n clip_sample=self.algo_config.ddim.clip_sample,\n set_alpha_to_one=self.algo_config.ddim.set_alpha_to_one,\n steps_offset=self.algo_config.ddim.steps_offset,\n prediction_type=self.algo_config.ddim.prediction_type\n )\n else:\n raise RuntimeError()\n self.noise_scheduler = noise_scheduler\n\n self.nets[\"noise_pred\"] = PolicyNets.NoisePred() # TODO\n \n def noise_latents(self, latents):\n batch_size = latents.shape[0]\n\n # sample noise to add to latents\n noise = torch.randn(latents.shape, device=self.device)\n \n # sample a diffusion iteration for each data point\n timesteps = torch.randint(\n 0, self.noise_scheduler.config.num_train_timesteps, \n (batch_size,), device=self.device\n ).long()\n\n # (forward diffusion) add noise to the clean latents according to the noise magnitude at each diffusion iteration\n noisy_latents = self.noise_scheduler.add_noise(\n latents, noise, timesteps)\n \n return noisy_latents, noise\n \n def _compute_losses(self, batch):\n noisy_latents, noise = self.noise_latents(batch['latents'])\n noise_pred = self.nets[\"noise_pred\"](batch)\n loss = nn.MSELoss()(noise_pred, noise)\n # TODO ablate noise_pred, latent_pred, v_pred\n\n # TODO add to logs\n return loss\n\n","repo_name":"vaibhavsaxena11/mimic_bilevel","sub_path":"mimic_bilevel/algo/bc_bilevel.py","file_name":"bc_bilevel.py","file_ext":"py","file_size_in_byte":10747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10024131070","text":"from django.urls import path, include\nfrom eri import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('table/', views.table_view, name='table_view'), \n #path('download_pdf/', views.download_pdf, name='download_pdf'),\n path('download-excel/', views.download_excel, name='download_excel'),\n path('download_docx/', views.download_docx, name='download_docx'),\n path('excel_to_doc/', views.excel_to_doc, name='excel_to_doc'),\n #path('download_xml/', views.download_xml, name='download_xml'),\n path('/', views.subsection_detail, name='subsection_detail'),\n\n]","repo_name":"Mzhumabay/djangoapp","sub_path":"eri/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27376594773","text":"# -*- coding: utf-8 -*-\n\nfrom utils import DataTransformat, ListNode\n\n\nclass Solution(object):\n def flatten(self, head):\n \"\"\"\n :type head: Node\n :rtype: Node\n \"\"\"\n stack = [head] if head else []\n p = None\n while stack:\n node = stack.pop()\n if node.next:\n stack.append(node.next)\n if node.child:\n stack.append(node.child)\n if p:\n p.next = node\n node.prev = p\n p.child = node.child = None\n p = node\n return head\n","repo_name":"halysl/code","sub_path":"leetcode/430-扁平化多级双向链表.py","file_name":"430-扁平化多级双向链表.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25239957449","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Nov 11 01:56:01 2020\r\n\r\n@author: ucobiz\r\n\"\"\"\r\n\r\nfrom Counter import Counter\r\n\r\none = Counter()\r\ntwo = Counter()\r\n\r\none.increment()\r\ntwo.increment()\r\ntwo.increment()\r\n\r\nprint(\"one's total\", one.my_total)\r\nprint(\"class total\", one.__class__.overall_total)\r\nprint(\"two's total\", two.my_total)\r\nprint(\"class total\", two.__class__.overall_total)","repo_name":"ISE2012/ch8","sub_path":"create_counter.py","file_name":"create_counter.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4259514962","text":"from datahub.activity_stream.serializers import ActivitySerializer\nfrom datahub.investment.opportunity.models import LargeCapitalOpportunity\n\n\nclass LargeCapitalOpportunityActivitySerializer(ActivitySerializer):\n \"\"\"Large Capital Opportunity serialiser for Activity Stream.\"\"\"\n\n class Meta:\n model = LargeCapitalOpportunity\n\n def _get_attributed_to(self, instance):\n attributed_to = []\n\n attributed_to.append(\n self._get_adviser(instance.lead_dit_relationship_manager),\n )\n\n if instance.created_by:\n attributed_to.append(\n self._get_adviser_with_team_and_role(\n instance.created_by,\n 'creator',\n 'DataHubLargeCapitalOpportunity',\n ),\n )\n\n if instance.modified_by:\n attributed_to.append(\n self._get_adviser_with_team_and_role(\n instance.modified_by,\n 'modifier',\n 'DataHubLargeCapitalOpportunity',\n ),\n )\n\n return attributed_to\n\n def to_representation(self, instance):\n \"\"\"\n Serialize the interaction as per Activity Stream spec:\n https://www.w3.org/TR/activitystreams-core/\n \"\"\"\n investment_opportunity_id = f'dit:DataHubLargeCapitalOpportunity:{instance.pk}'\n investment_opportunity = {\n 'id': f'{investment_opportunity_id}:Announce',\n 'type': 'Announce',\n 'published': instance.modified_on,\n 'generator': self._get_generator(),\n 'object': {\n 'id': investment_opportunity_id,\n 'type': ['dit:LargeCapitalOpportunity'],\n 'startTime': instance.created_on,\n 'name': instance.name,\n 'description': instance.description,\n 'attributedTo': self._get_attributed_to(instance),\n 'url': instance.get_absolute_url(),\n },\n }\n if instance.promoters:\n investment_opportunity['object']['dit:promoters'] = (\n self._get_companies(instance.promoters)\n )\n\n if instance.required_checks_conducted_by:\n investment_opportunity['object']['dit:requiredChecksConductedBy'] = (\n self._get_adviser_with_team(\n instance.required_checks_conducted_by,\n instance.required_checks_conducted_by.dit_team,\n ),\n )\n\n def format_key(name):\n first, *rest = name.split('_')\n return first + ''.join(word.capitalize() for word in rest)\n\n optional_named_attributes = [\n 'required_checks_conducted',\n 'opportunity_value_type',\n ]\n\n for attr in optional_named_attributes:\n if getattr(instance, attr):\n investment_opportunity['object'][f'dit:{format_key(attr)}'] = {\n 'name': getattr(instance, attr).name,\n }\n\n optional_multiple_named_attributes = [\n 'asset_classes',\n 'investment_types',\n 'construction_risks',\n 'time_horizons',\n 'sources_of_funding',\n 'reasons_for_abandonment',\n 'uk_region_locations',\n ]\n\n for attr in optional_multiple_named_attributes:\n values = getattr(instance, attr).all()\n if len(values) > 0:\n investment_opportunity['object'][f'dit:{format_key(attr)}'] = [\n {'name': value.name} for value in values\n ]\n\n optional_attributes = [\n 'total_investment_sought',\n 'current_investment_secured',\n 'opportunity_value',\n 'required_checks_conducted_on',\n 'dit_support_provided',\n 'status_id',\n 'required_checks_conducted_id',\n 'estimated_return_rate_id',\n ]\n\n for attr in optional_attributes:\n if getattr(instance, attr):\n investment_opportunity['object'][f'dit:{format_key(attr)}'] = getattr(\n instance, attr,\n )\n\n return investment_opportunity\n","repo_name":"uktrade/data-hub-api","sub_path":"datahub/activity_stream/opportunity/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":4241,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"81"} +{"seq_id":"72691170504","text":"#Loop Detection:Given a circular list implement an algorithm that return the node at the beginning of the node\n#Definition\n#Circular LinkedList: A corrupt linked list in which a node's next pointer points to an earlier node, So as to make a loop in \n#the linked list\nclass Node:\n def __init__(self,dataval=None) -> None:\n self.dataval=dataval\n self.nextval=None\n\n\nclass SLinkedlist:\n def __init__(self) -> None:\n self.headval=None\n\n def printList(self):\n printlist=self.headval\n while printlist is not None:\n print(printlist.dataval)\n printlist=printlist.nextval\n \n\n def insertNode(self,newData):\n newNode=Node(newData)\n if self.headval is None:\n self.headval=newNode\n return\n adding=self.headval\n while(adding.nextval is not None):\n adding=adding.nextval\n adding.nextval=newNode\n\n def insertMiddle(self,key,newData):\n newNode=Node(newData)\n adding=self.headval\n while(adding.nextval is not None):\n if(adding.dataval==key):\n newNode.nextval=adding.nextval\n adding.nextval=newNode\n adding=adding.nextval\n \n\n def removenode(self,key):\n removing=self.headval\n if(removing is None):\n return\n if(removing.dataval==key):\n self.headval=removing.nextval\n removing=None\n return\n while(removing is not None):\n if(removing.dataval==key):\n break\n remove=removing\n removing=removing.nextval\n\n remove.nextval=removing.nextval\n removing=None\n\n\ndef findbegin(list1):\n hashtable={}\n pointer1=list1.headval\n counter=0\n while pointer1 is not None:\n counter+=1\n if(pointer1 in hashtable.values()):\n return pointer1.dataval\n else:\n hashtable[counter]=pointer1\n pointer1=pointer1.nextval\n\nHead=SLinkedlist()\nHead.headval=Node(1)\nNode1=Node(2)\nHead.headval.nextval=Node1\nNode2=Node(3)\nNode1.nextval=Node2\nNode3=Node(4)\nNode2.nextval=Node3\nNode4=Node(5)\nNode3.nextval=Node4\nNode4.nextval=Node2\n\nprint(findbegin(Head))","repo_name":"yrrk/Solved-LeetCode-and-DataStructure","sub_path":"Data Structure/LinkedList/q8.py","file_name":"q8.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20076287338","text":"from django.db import models\nfrom kineticapi.models.athlete_event import AthleteEvent\nfrom kineticapi.models.event_sport import EventSport\nfrom datetime import datetime\n\n\nclass Event(models.Model):\n\n organizer = models.ForeignKey(\"Organizer\", on_delete=models.CASCADE)\n name = models.CharField(max_length=50)\n description = models.TextField()\n date = models.DateTimeField()\n city = models.CharField(max_length=50)\n state = models.CharField(max_length=14)\n max_participants = models.IntegerField()\n course_url = models.URLField()\n event_logo = models.URLField()\n \n def __str__ (self): return f\"{self.name} on {self.date}\"\n\n @property\n def total_distance(self):\n \"\"\"Add total distance\"\"\"\n total_distance = 0\n try:\n event_sports = EventSport.objects.filter(event=self)\n for es in event_sports:\n total_distance += es.distance\n return total_distance\n except:\n return total_distance\n\n @property\n def total_elev_gain(self):\n \"\"\"Add total elevation gain\"\"\"\n try:\n event_sports = EventSport.objects.filter(event=self)\n total_elev = 0\n for es in event_sports:\n total_elev += es.elev_gain\n return total_elev\n except:\n return 0\n\n @property\n def spots_remaining(self):\n \"\"\"Calculate spots remaining for event registration\"\"\"\n remaining = self.max_participants\n try:\n remaining -= AthleteEvent.objects.filter(event=self).count()\n return remaining\n except:\n return remaining\n\n @property\n def days_until(self):\n \"\"\"Calculate days until the race\"\"\"\n today = datetime.now().timestamp()\n racetime = self.date.timestamp()\n daysUntil = round((racetime - today)/(3600*24))\n return daysUntil\n\n @property\n def completed(self):\n \"\"\"For the athlete, add completed property\"\"\"\n \n complete = False\n try:\n AthleteEvent.objects.get(event=self, completed=True)\n complete = True\n return complete\n except:\n return complete","repo_name":"DArmstrong87/kinetic-server","sub_path":"kineticapi/models/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73474247624","text":"#!/usr/bin/env python3\n#-*- coding:utf-8 -*-\n\nfrom selenium import webdriver\n\ndriver = webdriver.PhantomJS(executable_path='/usr/selenium/webdriver/phantomjs/phantomjs')\ndriver.get(\"http://www.xjr7670.com\")\ndriver.get_screenshot_as_file(\"/tmp/xjr7670.com.png\")\n\ndriver.close()\n","repo_name":"xjr7670/webscrapingwithpython","sub_path":"take_screenshot.py","file_name":"take_screenshot.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74361991623","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 4 17:43:47 2018\n\n@author: fmueller\n\"\"\"\n\n# Imports\nimport matplotlib as mpl\nmpl.use('Agg')\n\nimport matplotlib.pyplot as plt\n\nimport os\nfrom rnaloc import annotationImporter\nfrom rnaloc import maskGenerator\nfrom rnaloc import FQtoolbox\nimport numpy as np\nimport re\nfrom scipy import ndimage\nimport json\nimport time\nimport csv\n\n# JSON encoder for numpy\n# From https://stackoverflow.com/questions/26646362/numpy-array-is-not-json-serializable\nclass NumpyEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, np.ndarray):\n return obj.tolist()\n return json.JSONEncoder.default(self, obj)\n\n\n# Process specified FQ file\n\ndef process_file(FQ_file, img_size = (960,960), bin_prop = (0,90,20), channels={'cells':'C3-'},data_category={'roi':''},annotation_extension ='__RoiSet.zip',img_extension='.tif',show_plot=None,Zrange=None,dZ=2):\n '''\n Function uses annotations generated in FIJI and creates mask based\n on the specified parameters. The resulting files are zipped and be\n used for training of a neural network with ImJoy.\n\n Args:\n\n\n Zrange [tuple, 2 elements]. [Optional] Tuple specifying minimum and maximum z-value that is considered\n in analysis.\n\n bin_prop [Tuple, 3 elements]. Specifies the bins for the histograms (min, max,delta).\n\n '''\n # Get input args. Has to be FIRST call!\n input_args = locals()\n\n # Make sure input args are correct - assignments with 0 can come from ImJoy\n if Zrange[0] ==0 or Zrange[1] ==0:\n Zrange = None\n\n if bin_prop[1] == 0 or bin_prop[1] == 0:\n bin_prop = (0,90,20)\n\n ## Prepare folder to save results\n drive, path_and_file = os.path.splitdrive(FQ_file)\n path_results, file_results = os.path.split(path_and_file)\n file_base, ext = os.path.splitext(file_results)\n\n path_save = os.path.join(path_results, file_base, 'MembDist_{}'.format(time.strftime(\"%y%m%d-%H%M\", time.localtime())))\n if not os.path.isdir(path_save):\n os.makedirs(path_save)\n\n ## Open FQ results\n fq_dict = FQtoolbox.read_FQ_matlab(FQ_file)\n spots_all = FQtoolbox.get_rna(fq_dict)\n Zrna = spots_all[:,[18]]\n\n # Open annotations\n print('Open annotations')\n if 'RoiSet.zip' in annotation_extension:\n\n path_annot = os.path.join(path_results,'zstack_segmentation')\n folderImporter = annotationImporter.FolderImporter(channels=channels,\n data_category=data_category,\n annot_ext=annotation_extension)\n annotDict = folderImporter.load(path_annot)\n print('average roi size:', annotDict['roi_size'])\n\n # Generate binary masks for a selected data-set\n binaryGen = maskGenerator.BinaryMaskGenerator(erose_size=5,\n obj_size_rem=500,\n save_indiv=True)\n\n # The generate function uses as an input the sub-dictionary for one data-category and one channel\n annotatFiles = annotDict['roi']['cells']\n maskDict = binaryGen.generate(annotatFiles)\n\n #Use a loop and the update function to add the mask dictionary to the loaded annotation dictonary\\n\",\n for k, v in annotatFiles.items():\n v.update(maskDict[k])\n\n # Bins of histogram\n binsHist = np.arange(bin_prop[0],bin_prop[1],bin_prop[2])\n width = 0.8 * (binsHist[1] - binsHist[0])\n center = (binsHist[:-1] + binsHist[1:]) / 2\n\n # Other parameters for calculation\n dist_membr_RNA = np.array([])\n dist_membr_pix = np.array([])\n idx = 0\n\n # Loop over all z-slices\n hist_slice ={}\n print('Loop over slices')\n for k, v in annotatFiles.items():\n\n print(f'Slice: {k}')\n # Get Z coordinate\n m = re.search('.*_Z([0-9]*)\\.tif',k)\n Zmask = int(m.group(1))\n\n # Check if outside of specified z range\n if Zrange is not None:\n if (Zmask <= Zrange[0]) or (Zmask >= Zrange[1]):\n print('Slice outside of range')\n continue\n\n # Get z-range for loop\n Zloop = np.logical_and(Zrna <= Zmask + dZ,Zrna >= Zmask - dZ).flatten()\n spots_loop = spots_all[Zloop,:]\n spots_loop_XY = spots_loop[:,[16, 17]].astype(int)\n\n # Distance transform\n dist_membr = ndimage.distance_transform_edt(~v['mask_edge']) # Negate mask\n\n # Indices have to be inversed to access array\n dist_membr_RNA_loop = dist_membr[spots_loop_XY[:,0],spots_loop_XY[:,1]]\n\n # Get distance from membrane for all pixel in the cell\n mask_all = v['mask_fill'] + v['mask_edge']\n dist_membr_pix_loop = dist_membr[mask_all]\n\n # Save values\n if idx == 0:\n dist_membr_RNA = np.copy(dist_membr_RNA_loop)\n dist_membr_pix = np.copy(dist_membr_pix_loop)\n else:\n dist_membr_RNA = np.append(dist_membr_RNA,dist_membr_RNA_loop,axis=0)\n dist_membr_pix = np.append(dist_membr_pix,dist_membr_pix_loop,axis=0)\n idx+=1\n\n # Calculate histograms\n histRNA, bins = np.histogram(dist_membr_RNA_loop,binsHist ,density=False)\n histpix, bins = np.histogram(dist_membr_pix_loop,binsHist ,density=False)\n\n histRNAnorm = histRNA/histRNA.sum()\n histpixnorm = histpix/histpix.sum()\n\n histRNAnormPix = np.divide(histRNAnorm,histpixnorm)\n histRNAnormPix = np.nan_to_num(histRNAnormPix)\n\n hist_plot = {'width':width,'center':center,'bins':bins,\n 'histRNA':histRNA,'histpix':histpix,\n 'histRNAnormPix':histRNAnormPix}\n\n # Plot results\n name_save = os.path.join(path_save,f'Z-{Zmask}.png')\n plot_results_slice(Zmask,v,mask_all,spots_loop_XY,dist_membr,hist_plot,name_save)\n\n hist_slice[f'Z{Zmask}'] = hist_plot\n\n # Analyze all slices\n histRNA_all, bins = np.histogram(dist_membr_RNA,binsHist ,density=False)\n histRNA_all_norm = histRNA_all/histRNA_all.sum()\n\n # Renormalize with pixel counts\n histpix_all, bins = np.histogram(dist_membr_pix,binsHist ,density=False)\n histpix_all_norm = histpix_all/histpix_all.sum()\n\n hist_RNA_all_normPix = np.divide(histRNA_all_norm,histpix_all_norm)\n hist_RNA_all_normPix = np.nan_to_num(hist_RNA_all_normPix)\n\n hist_plot_all = {'width':width,'center':center, 'bins':bins,\n 'histRNA_all':histRNA_all,\n 'histRNA_all_norm':histRNA_all_norm,\n 'histpix_all_norm':histpix_all_norm,\n 'hist_RNA_all_normPix':hist_RNA_all_normPix}\n name_save = os.path.join(path_save,'_DistanceEnrichmentSummary.png')\n plot_results_all(hist_plot_all,name_save)\n if show_plot:\n show_plot(name_save)\n\n\n # Save entire analysis results as json\n input_args.pop('show_plot', None)\n analysis_results = {'args': input_args,\n 'hist_all': hist_plot_all,\n 'hist_slice': hist_slice}\n\n name_json = os.path.join(path_save, 'DataAll.json')\n\n with open(name_json, 'w') as fp:\n json.dump(analysis_results, fp,sort_keys=True, indent=4, cls=NumpyEncoder)\n\n # Save histogram of pooled data as csv\n name_csv = os.path.join(path_save, '_HistogramPooled.csv')\n hist_plot_all.pop('bins', None)\n hist_plot_all.pop('width', None)\n csv_header = ';'.join(hist_plot_all.keys())\n hist_values = np.array( list(hist_plot_all.values())).transpose()\n np.savetxt(name_csv, hist_values, delimiter=\";\",fmt='%f',header=csv_header,comments='')\n\n return analysis_results\n\n\n #return analysis_results\n #plt.savefig(os.path.join(path_save, 'CellCortexDist.png'),dpi=200)\n #plt.close()\n\ndef plot_results_all(hist_plot,name_save = None):\n\n # Get parameters to plot histogram\n center = hist_plot['center']\n width = hist_plot['width']\n histRNA_all = hist_plot['histRNA_all']\n histRNA_all_norm = hist_plot['histRNA_all_norm']\n histpix_all_norm = hist_plot['histpix_all_norm']\n hist_RNA_all_normPix = hist_plot['hist_RNA_all_normPix']\n\n # Plot results\n fig1, ax = plt.subplots(2,2,num='Distance comparison')\n fig1.set_size_inches((8,8))\n\n ax[0][0].bar(center, histRNA_all, align='center', width=width)\n ax[0][0].set_xlabel('Distance cell cortex [pix]')\n ax[0][0].set_ylabel('# RNAs')\n ax[0][0].set_xticks(center)\n ax[0][0].set_xticklabels(center.astype(int))\n\n ax[0][1].bar(center, histRNA_all_norm, align='center', width=width/2)\n ax[0][1].set_xlabel('Distance cell cortex [pix]')\n ax[0][1].set_ylabel('Frequency')\n ax[0][1].set_xticks(center)\n ax[0][1].set_xticklabels(center.astype(int))\n\n ax[1][0].bar(center, histpix_all_norm, align='center', width=width)\n ax[1][0].set_xlabel('Distance cell cortex [pix]')\n ax[1][0].set_ylabel('Renormalized counts')\n ax[1][0].set_xticks(center)\n ax[1][0].set_xticklabels(center.astype(int))\n\n ax[1][1].bar(center, hist_RNA_all_normPix, align='center', width=width)\n ax[1][1].set_xlabel('Distance cell cortex [pix]')\n ax[1][1].set_ylabel('Renormalized counts')\n ax[1][1].set_xticks(center)\n ax[1][1].set_xticklabels(center.astype(int))\n\n\n ax[0][0].title.set_text('RNA-absolute counts')\n ax[0][1].title.set_text('RNA-normalized counts')\n ax[1][0].title.set_text('All pixel-renormalized')\n ax[1][1].title.set_text('RNA renormalized with pixels')\n\n plt.tight_layout()\n\n if name_save:\n plt.savefig(name_save,dpi=200)\n #plt.close()\n\ndef plot_results_slice(Zmask,mask,mask_all,spots_loop_XY,dist_membr,hist_plot,name_save=None):\n\n # Find min and max values for plotting\n pad = 10\n indMaskAx0 = np.argwhere(mask_all.sum(axis=0))\n minAx0 = indMaskAx0[0]-pad\n maxAx0 = indMaskAx0[-1]+pad\n\n indMaskAx1 = np.argwhere(mask_all.sum(axis=1))\n minAx1 = indMaskAx1[0]-pad\n maxAx1 = indMaskAx1[-1]+pad\n\n # Set distance outside of cell to 0 for better plotting\n dist_membr_plot = np.copy(dist_membr)\n dist_membr_plot[np.logical_not(mask_all)] = 0\n\n # Get parameters to plot histogram\n center = hist_plot['center']\n width = hist_plot['width']\n histRNA = hist_plot['histRNA']\n histpix = hist_plot['histpix']\n histRNAnormPix = hist_plot['histRNAnormPix']\n\n # Generate plot\n fig1, ax = plt.subplots(2,3,num='Distance to cell membrane analysis. Z={}'.format(Zmask))\n fig1.set_size_inches((13,6))\n\n ax[0][0].imshow(mask['image'],cmap=\"hot\")\n ax[0][0].get_xaxis().set_visible(False)\n ax[0][0].get_yaxis().set_visible(False)\n ax[0][0].set_xlim(minAx0, maxAx0)\n ax[0][0].set_ylim(minAx1, maxAx1)\n\n ax[0][1].imshow(mask_all,cmap=\"hot\")\n ax[0][1].get_xaxis().set_visible(False)\n ax[0][1].get_yaxis().set_visible(False)\n ax[0][1].set_xlim(minAx0, maxAx0)\n ax[0][1].set_ylim(minAx1, maxAx1)\n\n imgdum = ax[0][2].imshow(dist_membr_plot,cmap=\"hot\")\n ax[0][2].set_xlim(minAx0, maxAx0)\n ax[0][2].set_ylim(minAx1, maxAx1)\n\n ax[0][2].get_xaxis().set_visible(False)\n ax[0][2].get_yaxis().set_visible(False)\n FQtoolbox.colorbar(imgdum)\n\n for kROI, vROI in mask['roi'].items():\n roi_pos = vROI['pos']\n ax[0][2].plot(roi_pos[:,1],roi_pos[:,0],'b-')\n\n ax[0][2].scatter(spots_loop_XY[:,1],spots_loop_XY[:,0],color='g',s=4)\n\n\n ax[1][0].bar(center, histRNA, align='center', width=width)\n ax[1][0].set_xticks(center)\n ax[1][0].set_xticklabels(center.astype(int))\n ax[1][0].set_xlabel('Distance from cell cortex [pixel]')\n ax[1][0].set_ylabel('# RNAs')\n\n ax[1][1].bar(center, histpix, align='center', width=width)\n ax[1][1].set_xticks(center)\n ax[1][1].set_xticklabels(center.astype(int))\n ax[1][1].set_xlabel('Distance from cell cortex [pixel]')\n ax[1][1].set_ylabel('# pixels')\n\n ax[1][2].bar(center, histRNAnormPix, align='center', width=width)\n ax[1][2].set_xticks(center)\n ax[1][2].set_xticklabels(center.astype(int))\n ax[1][2].set_xlabel('Distance from cell cortex [pixel]')\n ax[1][2].set_ylabel('Renormalized frequency')\n\n # Set titles\n ax[0][0].title.set_text('Cell cortex')\n ax[0][1].title.set_text('Cell mask')\n ax[0][2].title.set_text('Distance transform')\n\n ax[1][0].title.set_text('RNAs')\n ax[1][1].title.set_text('All pixel')\n ax[1][2].title.set_text('Renormalized RNA distance')\n\n plt.tight_layout()\n\n if name_save:\n plt.savefig(name_save,dpi=200)\n #plt.close()\n","repo_name":"ThomasWalter/rna-loc","sub_path":"rnaloc/LOCtoolbox.py","file_name":"LOCtoolbox.py","file_ext":"py","file_size_in_byte":12551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33945174134","text":"from dataclasses import dataclass\n\n\n@dataclass\nclass Message:\n video_fid: str\n username: str\n email: str\n mp3_fid: str\n\n @classmethod\n def from_dict(cls, data: dict) -> \"Message\":\n if not data.get(\"mp3_fid\"):\n raise ValueError(\"mp3_fid is required\")\n return Message(\n video_fid=data.get(\"video_fid\"),\n username=data.get(\"username\"),\n mp3_fid=data.get(\"mp3_fid\"),\n email=data.get(\"email\"),\n )\n\n def to_dict(self) -> dict:\n return {\n \"video_fid\": self.video_fid,\n \"username\": self.username,\n \"mp3_fid\": self.mp3_fid,\n \"email\": self.email,\n }\n\n def mp3_url(self, base_endpoint: str):\n return f\"{base_endpoint}/{self.mp3_fid}\"\n","repo_name":"AksAman/video_to_mp3_microservices","sub_path":"src/notification/src/models/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15680031100","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def getAllElements(self, root1: TreeNode, root2: TreeNode) -> List[int]:\n result1 = []\n result2 = []\n def inline(root, result):\n if not root:\n return\n inline(root.left, result)\n result.append(root.val)\n inline(root.right, result)\n inline(root1, result1)\n inline(root2, result2)\n totalRes = result1 + result2\n res = sorted(totalRes)\n return res\n ","repo_name":"Tek58/Leetcode","sub_path":"1305-all-elements-in-two-binary-search-trees/1305-all-elements-in-two-binary-search-trees.py","file_name":"1305-all-elements-in-two-binary-search-trees.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74197221386","text":"#!/usr/bin/env python3.4\n\nfrom files.config import configure_applications\n\ntable = [\n {'name': '/', 'device': '/dev/sde1'},\n {'name': '/home', 'device': '/dev/sdc2'},\n {'name': '/torrent', 'device': '/dev/sdd2'},\n {'name': '/work', 'device': '/dev/sda2'},\n {'name': '/video', 'device': '/dev/sdc5'},\n {'name': '/music_video', 'device': '/dev/sdb2'}\n]\n\n# http://mirror.yandex.ru/gentoo-distfiles/snapshots/portage-latest.tar.bz2\n# http://mirror.yandex.ru/gentoo-distfiles/releases/amd64/autobuilds/current-stage3-amd64/stage3-amd64-20151105.tar.bz2\n\nconfig = {\n 'path_to_root': '/mnt/gentoo',\n 'user_name': 'aod314',\n 'device': '/dev/sde',\n 'path_to_portage': '/tmp/portage-latest.tar.bz2',\n 'path_to_stage3': '/tmp/stage3-amd64-latest.tar.bz2',\n 'table': table\n}\n\nprint('ONLY PRINT:')\nprint(' download: ' + config['path_to_portage'] + ', ' + config['path_to_stage3'] + ' from http://mirror.yandex.ru/')\n\nprint(' format disk: \\n # mkfs.ext4 /dev/sdaX')\nprint(' for SSD disable journal \\n # tune2fs -o journal_data_writeback /dev/sdaX')\nprint('\\n\\n')\n\nprint('configure applications')\nconfigure_applications(config)\n","repo_name":"AoD314/handbook-gentoo","sub_path":"deploy/gentoo-install.py","file_name":"gentoo-install.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"31577780371","text":"from django.shortcuts import render\n\nDATA = {\n 'omlet': {\n 'яйца, шт': 2,\n 'молоко, л': 0.1,\n 'соль, ч.л.': 0.5,\n },\n 'pasta': {\n 'макароны, г': 0.3,\n 'сыр, г': 0.05,\n },\n 'buter': {\n 'хлеб, ломтик': 1,\n 'колбаса, ломтик': 1,\n 'сыр, ломтик': 1,\n 'помидор, ломтик': 1,\n },\n # можете добавить свои рецепты ;)\n}\n\n\ndef index(request):\n context = {'recipe': DATA}\n return render(request, 'calculator/index.html', context)\n\n\ndef recipes(request, dish):\n qty = int(request.GET.get('servings', '1'))\n recipe = DATA.get(dish)\n if recipe:\n context = {'recipe': {i[0]: round(i[1] * qty, 3) for i in recipe.items()}}\n else:\n context = {}\n return render(request, 'calculator/index.html', context)\n","repo_name":"vidok0577/dj-homeworks","sub_path":"1.2-requests-templates/recipes/calculator/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73277014025","text":"# 5597번 과제 안 내신 분..?\r\nattendence = set(range(1, 31))\r\nstudent = set()\r\nfor i in range(28):\r\n student.add(int(input()))\r\n\r\nans = attendence - student\r\nans = sorted(list(ans))\r\n\r\nfor i in ans:\r\n print(i)\r\n","repo_name":"glaxyt/bojSolv","sub_path":"5597.py","file_name":"5597.py","file_ext":"py","file_size_in_byte":223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32424657016","text":"import numpy as np\nfrom plotData import plotData\nfrom matplotlib import pyplot as plt\n\ndef visualizeBoundary(X, y, model):\n \"\"\"plots a non-linear decision boundary learned by the\n SVM and overlays the data on it\"\"\"\n\n# Plot the training data on top of the boundary\n plotData(X, y)\n\n # Make classification predictions over a grid of values\n x1plot = np.linspace(min(X[:,0]), max(X[:,0]), X.shape[0]).T\n x2plot = np.linspace(min(X[:,1]), max(X[:,1]), X.shape[0]).T\n X1, X2 = np.meshgrid(x1plot, x2plot)\n vals = np.zeros(X1.shape)\n\n for i in range(X1.shape[1]):\n this_X = np.column_stack((X1[:, i], X2[:, i]))\n vals[:, i] = model.predict(this_X)\n\n # Plot the SVM boundary\n #contour(X1, X2, vals, [0 0], 'Color', 'b')\n plt.contour(X1, X2, vals, levels=[0.0, 0.0])\n","repo_name":"mstampfer/Coursera-Stanford-ML-Python","sub_path":"ex6/visualizeBoundary.py","file_name":"visualizeBoundary.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":434,"dataset":"github-code","pt":"81"} +{"seq_id":"70475478984","text":"import traceback\nfrom django.shortcuts import redirect, render, HttpResponse\nfrom django.views import View\nfrom django.http import JsonResponse\n\n#authentication\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.mixins import LoginRequiredMixin\n\nfrom .forms import CreateUserForm\n\n#DAO\nfrom businesslogic.itemDAO.ItemDAO import ItemDAO\nfrom businesslogic.productDAO.ProductDAO import ProductDAO\nfrom businesslogic.categoryDAO.CategoryDAO import CategoryDAO\nfrom businesslogic.cartDAO.CartDAO import CartDAO\nfrom businesslogic.orderDAO.OrderDAO import OrderDAO\nfrom businesslogic.userDAO.UserDAO import UserDAO\n# from businesslogic.userDAO.UserDAO import UserDAO\n\n#serializer\nfrom django.core import serializers\nfrom product.serializers import CategorySerializer, ItemSerializer, ProductSerializer, ReviewSerializer\nfrom cart.serializers import CartSerializer, CartItemSerializer\nfrom user.serializers import UserSerializer\nfrom order.serializers import OrderSerializer\n\n#json\nimport json\n\n#template loader\nfrom django.template.loader import render_to_string\n\nadmin = False\nLOGIN_URL = '/login'\n\n#only client\nclass RegisterPage(View):\n def get(self, request):\n if request.user.is_authenticated and request.user.is_staff == admin:\n return render(request,'client/indexpage/index.html')\n # form = CreateUserForm()\n # context = {'form':form}\n\n logout(request)\n return render(request,'client/registerpage/register.html')\n \n def post(self, request):\n # print(request.POST.get('form'))\n # form = CreateUserForm(request.POST)\n username = request.POST.get('username')\n password1 = request.POST.get('password1')\n password2 = request.POST.get('password2')\n email = request.POST.get('email')\n tel = request.POST.get('tel')\n\n # form = CreateUserForm(username=username,password1=password1,password2=password2)\n # print(form.is_valid())\n \n result = False\n\n if password1==password2:\n try:\n user = UserDAO.createUser(username=username, password=password1, email=email,tel=tel)\n except:\n result = False\n return JsonResponse({\"result\":result}, status=200, safe=False)\n\n if user:\n result=True\n # if request.is_ajax():\n # if form.is_valid():\n # print(\"valid\")\n # form.save()\n # result = True\n \n return JsonResponse({\"result\":result}, status=200, safe=False)\n\n#only client\nclass LoginPage(View):\n def get(self,request):\n if request.user.is_authenticated and request.user.is_staff == admin:\n return render(request,'client/indexpage/index.html')\n \n logout(request)\n return render(request,'client/loginpage/login.html')\n \n def post(self, request):\n if request.is_ajax():\n username = request.POST.get('username')\n password = request.POST.get('password')\n \n user = authenticate(request, username=username, password=password)\n \n result = False\n if user and user.is_staff == admin:\n login(request, user)\n result = True\n\n return JsonResponse({\"result\":result}, status=200, safe=False)\n\nclass LogoutPage(View):\n def get(self,request):\n logout(request)\n return render(request,'client/loginpage/login.html')\n\n#only client\nclass IndexPage(LoginRequiredMixin, View):\n login_url=LOGIN_URL\n def get(self,request):\n if request.user.is_staff == admin:\n return render(request,'client/indexpage/index.html')\n\n logout(request)\n return render(request,'client/loginpage/login.html')\n\nclass CatalogPage(LoginRequiredMixin, View):\n login_url=LOGIN_URL\n def get(self,request):\n # productList = Product.objects.filter(active=True)\n productList = ProductDAO.getActiveProduct()\n categoryList = CategoryDAO.getActiveCategory()\n\n productList = ProductSerializer(productList, many=True)\n categoryList = CategorySerializer(categoryList, many=True)\n\n productList = productList.data\n categoryList = categoryList.data\n\n context = {\"productList\":productList, \"categoryList\": categoryList}\n return render(request,'client/catalogpage/catalog.html',context)\n\nclass CatalogSearch(LoginRequiredMixin, View):\n login_url=LOGIN_URL\n def post(self, request):\n search_query = request.POST.get(\"searchQuery\")\n categoryId = int(request.POST.get(\"category\"))\n\n if search_query is None:\n search_query = ''\n\n # product_qset = Product.objects.filter(title__icontains = search_query, active=True)\n productList = ProductDAO.searchAllProductByName(search_query, categoryId = categoryId)\n\n productList = ProductSerializer(productList, many=True)\n productList = productList.data\n\n return JsonResponse(productList, status=200, safe=False)\n\nclass CatalogFilter(LoginRequiredMixin, View):\n login_url=LOGIN_URL\n def post(self, request, category_id):\n \n if(int(category_id) != 0):\n category = CategoryDAO.getCategoryByID(category_id)\n productList = ProductDAO.searchActiveProductByCategory(category = category, maxSize=0)\n \n else:\n productList = ProductDAO.getActiveProduct() \n \n \n productList = ProductSerializer(productList, many=True)\n productList = productList.data\n # size = int(product_qset.count())\n\n return JsonResponse(productList, status=200, safe=False)\n\nclass ItemDetailPage(LoginRequiredMixin, View):\n login_url=LOGIN_URL\n def get(self, request, item_id):\n # product = Product.objects.get(pk=product_id)\n item = ProductDAO.getProductByID(item_id)\n if not item.active:\n return redirect(\"shopping:catalog\")\n \n imgPathList = ProductDAO.getImgPathByProduct(item)\n\n relatedProductList = ProductDAO.searchRelatedProductByCategory(product=item, category=item.category, maxSize=2)\n # print(item)\n # print(imgPathList)\n # print(relatedProductList)\n \n attrList = ProductDAO.getAttributeListByProduct(product=item)\n\n attrListTmp = []\n index = 0\n\n if attrList:\n for attr in attrList:\n try:\n attrValue = ProductDAO.getAttributeValueListByAttribute(attribute=attr)\n attrValueListTmp = []\n for value in attrValue:\n attrValueListTmp.append(value)\n\n attr = {\"index\": index, \"title\":attr.title, \"value\":attrValueListTmp}\n attrListTmp.append(attr)\n index += 1\n except:\n traceback.print_exc()\n\n user = request.user\n\n reviewStatus = ProductDAO.getReviewStatus(user=user, product=item)\n\n canReview = False\n if reviewStatus.count() > 0:\n for reviewStat in reviewStatus:\n if reviewStat.canReview:\n canReview = True\n\n reviewList = ProductDAO.getAllReviewByProduct(product=item)\n \n #get rating by product\n totalStar = 0\n for review in reviewList:\n totalStar = totalStar + review.rating\n\n reviewList = ReviewSerializer(reviewList, many=True)\n reviewList = reviewList.data\n # print(reviewList)\n\n item = ProductSerializer(item)\n item = item.data\n\n relatedProductList = ProductSerializer(relatedProductList, many=True)\n relatedProductList = relatedProductList.data\n\n context = {\"item\":item, \"imgpathlist\":imgPathList, \"relatedproductlist\": relatedProductList, \"attrlist\":attrListTmp,\n \"canReview\":canReview, \"reviewlist\":reviewList, \"totalStar\": totalStar}\n\n return render(request,'client/productdetailpage/productdetail.html',context)\n\n def post(self, request, item_id): #get variance\n # product = Product.objects.get(pk=product_id)\n product_id = item_id\n\n product = ProductDAO.getProductByID(product_id)\n selectedAttr = request.POST.get('selectedAttr')\n print(selectedAttr)\n\n # if selectedAttr is\n if selectedAttr:\n selectedAttr = json.loads(selectedAttr)\n\n attr = ''\n # print(type(selectedAttr))\n attrSize = len(selectedAttr)\n print(attrSize)\n for i in range(attrSize):\n attr = attr +selectedAttr.get('attribute'+str(i))+',' \n \n print(attr) \n item = ItemDAO.searchActiveItemByAttr(product,attr)\n \n print(item)\n item = serializers.serialize('json', item)\n\n return JsonResponse({\"variance\":item},status=200,safe=False)\n\nclass AddToCart(LoginRequiredMixin, View):\n login_url=LOGIN_URL\n def post(self, request, variance_id): #get variance\n # product = Product.objects.get(pk=product_id)\n\n variance = ItemDAO.getItemByID(variance_id)\n quantity = int(request.POST.get('quantity'))\n\n print(quantity)\n\n #add a cart item to cart\n\n #get the cart from user credentials\n user = request.user\n \n cart_qset = CartDAO.getCartByUser(user) #get the ONLY cart\n # #if none then create new cart\n cart = CartDAO.createCartByUser(user)\n \n for item in cart_qset:\n cart = item #cart could be in cart_qset or new\n\n CartDAO.saveCart(cart) #saving does not harm\n\n cartTotalPrice = float(cart.totalPrice)\n\n #change the cartitem matched with the selected variance\n cartitem_qset = CartDAO.getACartItemByItem(cart, variance)\n \n print(type(variance.unitPrice))\n print(type(quantity)) \n\n totalPrice = float(variance.unitPrice) * quantity\n if cartitem_qset.count() == 0: #does not exist\n cartitem = CartDAO.createCartItem(cart, variance, quantity, totalPrice)\n CartDAO.saveCartItem(cartitem)\n\n else: #exists so add more\n for cartitem in cartitem_qset:\n #save the new quantity\n cartitemquantity = int(cartitem.quantity)\n cartitemquantity += quantity\n cartitem.quantity = cartitemquantity\n\n #save the new totalPrice\n cartitem.totalPrice = cartitem.item.unitPrice * cartitem.quantity\n CartDAO.saveCartItem(cartitem)\n\n #add totalprice to carttotal\n cartTotalPrice += totalPrice\n cart.totalPrice = cartTotalPrice\n CartDAO.saveCart(cart)\n \n return JsonResponse({\"result\":True},status=200,safe=False)\n\nclass CartPage(LoginRequiredMixin, View):\n login_url=LOGIN_URL\n def get(self, request):\n user = request.user\n\n cart_qset = CartDAO.getCartByUser(user) #get the ONLY cart\n #if none then create new cart\n cart = CartDAO.createCartByUser(user)\n \n for item in cart_qset:\n cart = item\n CartDAO.saveCart(cart)\n \n cartitem_qset = CartDAO.getAllCartItem(cart)\n \n context = { \"cart\":cart\n ,\"cartitem_set\":cartitem_qset}\n\n return render(request,'client/cartpage/cart.html',context)\n\nclass CartItemQuantityChange(LoginRequiredMixin, View):\n login_url = '/login'\n def post(self,request,id,quantity):\n user = request.user\n \n cartItem = CartDAO.getACartItemByID(id)\n cart_qset = CartDAO.getCartByUser(user)\n\n for item in cart_qset:\n cart = item\n\n #save new cartitem quantity AND totalprice\n cartItemTotalPriceNew = float(cartItem.item.unitPrice) * int(quantity)\n cartItem.quantity = int(quantity)\n cartItem.totalPrice = cartItemTotalPriceNew\n CartDAO.saveCartItem(cartItem)\n\n #also update the cart\n cartTotalPrice = 0.0\n cartItem_qset = CartDAO.getAllCartItem(cart)\n for cartItem in cartItem_qset:\n cartTotalPrice += float(cartItem.totalPrice)\n\n cart.totalPrice = cartTotalPrice\n CartDAO.saveCart(cart)\n\n return JsonResponse({'result':'ok', 'cartItemTotal': str(cartItemTotalPriceNew),\n 'cartTotal': str(cartTotalPrice)}, status=200)\n\nclass CartItemDelete(LoginRequiredMixin, View):\n login_url=LOGIN_URL\n def post(self, request, id):\n user = request.user\n \n cartItem = CartDAO.getACartItemByID(id)\n cart_qset = CartDAO.getCartByUser(user)\n for item in cart_qset:\n cart = item\n\n cartItemTotalPrice = float(cartItem.totalPrice)\n cartTotalPrice = float(cart.totalPrice) \n cartTotalPrice -= cartItemTotalPrice\n \n cart.totalPrice = cartTotalPrice\n CartDAO.saveCart(cart)\n\n CartDAO.deleteCartItem(cartItem)\n return JsonResponse({'result' : 'ok', 'cartTotal' : str(cartTotalPrice)}, status=200)\n\nclass CheckoutPage(LoginRequiredMixin, View):\n login_url=LOGIN_URL\n def get(self,request):\n user = request.user\n\n cart_qset = CartDAO.getCartByUser(user)\n #if none then create new cart\n cart = CartDAO.createCartByUser(user)\n for item in cart_qset:\n cart = item\n CartDAO.saveCart(cart)\n\n cartitem_qset = CartDAO.getAllCartItem(cart)\n cart = CartSerializer(cart)\n \n context = { \"cart\":cart.data\n ,\"cartitem_set\":cartitem_qset}\n\n return render(request,'client/checkoutpage/checkout.html', context)\n \n def post(self,request):\n user = request.user\n\n hoten = request.POST.get('hoten')\n dienthoai = request.POST.get('dienthoai')\n diachi = request.POST.get('diachi')\n thongtin = request.POST.get('thongtin')\n\n cart_qset = CartDAO.getCartByUser(user)\n for item in cart_qset:\n cart = item\n\n totalPrice = cart.totalPrice\n\n order = OrderDAO.createOrder(clientName = hoten, tel = dienthoai, shippingAddress = diachi, detail = thongtin,\n totalPrice = totalPrice, user = user)\n \n try:\n #save order in DB\n OrderDAO.saveOrder(order)\n\n #create OrderItem and save\n cartItemList = CartDAO.getAllCartItem(cart=cart)\n for cartitem in cartItemList:\n orderItem = OrderDAO.createOrderItem(order = order, item = cartitem.item,\n quantity= cartitem.quantity, totalPrice= cartitem.totalPrice)\n OrderDAO.saveOrderItem(orderItem)\n\n #delete CartItem and set canReview -> true\n for cartitem in cartItemList:\n try:\n reviewStatus = ProductDAO.createReviewStatus(user=user, product=cartitem.item.product, canReview=True)\n ProductDAO.saveReviewStatus(reviewStatus)\n except:\n pass\n CartDAO.deleteCartItem(cartitem)\n # print(\"cartitem after delete:\")\n # print(cartitem)\n \n cart.totalPrice = 0\n CartDAO.saveCart(cart)\n except: #rollback\n cart.totalPrice = totalPrice\n CartDAO.saveCart(cart)\n\n for cartitem in cartItemList:\n CartDAO.saveCartItem(cartitem)\n\n orderItemList = OrderDAO.getAllOrderItemByOrder(order)\n \n for orderItem in orderItemList:\n OrderDAO.deleteOrderItem(orderItem)\n \n OrderDAO.deleteOrder(order)\n\n return JsonResponse({'result' : False}, status=200)\n\n # return HttpResponse(\"hehe\")\n # print(\"ok\")\n html = render_to_string('client/successpage/success.html')\n\n return JsonResponse({'result' : 'ok', 'html':html}, status=200, safe= False)\n\nclass SuccessPage(LoginRequiredMixin, View):\n login_url=LOGIN_URL\n def get(self,request):\n return render(request, \"client/successpage/success.html\")\n\nclass OrderPage(LoginRequiredMixin, View):\n login_url=LOGIN_URL\n def get(self, request):\n user = request.user\n\n orderList = OrderDAO.getAllOrderByUser(user)\n orderList = OrderSerializer(orderList, many=True)\n orderList = orderList.data\n\n context = {\"orderList\": orderList}\n return render(request, 'client/orderpage/order.html', context)\n\nclass OrderFilterPage(LoginRequiredMixin, View):\n login_url=LOGIN_URL\n def post(self, request):\n orderId = str(request.POST.get(\"orderId\"))\n selectedStatus = int(request.POST.get(\"selectedStatus\"))\n user = request.user\n\n orderType = str(request.POST.get(\"orderType\"))\n orderField = str(request.POST.get(\"orderField\"))\n orderType = str(request.POST.get(\"orderType\"))\n\n order = OrderDAO.filterOrderByUser(user=user,idcontain=orderId,status=selectedStatus,orderField=orderField,orderType=orderType)\n order = OrderSerializer(order, many=True)\n order = order.data\n \n return JsonResponse(order,status=200,safe=False)\n\nclass OrderDetailPage(LoginRequiredMixin, View):\n login_url=LOGIN_URL\n def get(self, request, order_id):\n order = OrderDAO.getOrderByID(order_id)\n orderItemList = OrderDAO.getAllOrderItemByOrder(order)\n\n order = OrderSerializer(order)\n order = order.data\n\n context={\"order\": order, \"orderitemlist\":orderItemList}\n return render(request, 'client/orderdetailpage/orderdetail.html', context)\n\nclass OrderAbortPage(LoginRequiredMixin, View):\n login_url=LOGIN_URL\n def post(self, request, order_id):\n order = OrderDAO.getOrderByID(order_id)\n\n if order.status >= 2:\n return JsonResponse({'result':False, 'message':'Đơn đã giao, không thể hủy'},status=200,safe=False)\n\n order.active = False\n try:\n OrderDAO.saveOrder(order)\n except:\n return JsonResponse({'result':False},status=200,safe=False)\n \n return JsonResponse({'result':True},status=200,safe=False)\n\n\nclass ReviewPage(LoginRequiredMixin, View):\n login_url=LOGIN_URL\n\n def post(self, request, product_id):\n user = request.user\n product = ProductDAO.getProductByID(product_id)\n\n rating = request.POST.get(\"rating\")\n title = request.POST.get(\"title\")\n detail = request.POST.get(\"detail\")\n\n reviewStatusList = ProductDAO.getReviewStatus(user=user,product=product)\n \n canReview = False\n didReview = False\n didBuy = False\n\n if reviewStatusList.count() > 0:\n for reviewStatus in reviewStatusList:\n didReview = reviewStatus.didReview\n didBuy = True\n if reviewStatus.canReview and didReview == False:\n canReview = True\n\n if canReview: \n review = ProductDAO.createReview(rating=rating,title=title,detail=detail,user=user,product=product)\n try:\n ProductDAO.saveReview(review)\n for reviewStatus in reviewStatusList:\n reviewStatus.didReview = True\n ProductDAO.saveReviewStatus(reviewStatus)\n\n totalReview = product.totalReview\n totalReview += 1\n product.totalReview = totalReview\n\n totalStar = product.totalStar\n totalStar += int(rating)\n product.totalStar = totalStar\n\n ProductDAO.saveProduct(product)\n \n except:\n return JsonResponse({\"result\":False}, status=200, safe=False)\n review = ReviewSerializer(review)\n review = review.data\n\n product = ProductSerializer(product)\n product = product.data\n\n return JsonResponse({\"result\":True, \"review\":review, \"product\":product}, status=200, safe=False)\n else:\n return JsonResponse({\"result\":False, \"didbuy\":didBuy}, status=200, safe=False)\n\nclass MyAccountPage(LoginRequiredMixin, View):\n login_url=LOGIN_URL\n def get(self,request):\n # productList = Product.objects.filter(active=True)\n\n user = request.user\n print(user.id)\n userdata = UserSerializer(user, many=False)\n\n context = {\"user\":userdata.data}\n return render(request,'client/myaccountpage/myaccount.html',context)\n\n def post(self,request):\n # productList = Product.objects.filter(active=True)\n\n user = request.user\n firstname = request.POST.get('firstname')\n lastname = request.POST.get('lastname')\n email = request.POST.get('email')\n tel = request.POST.get('tel')\n\n # print(user.first_name)\n # print(user.last_name)\n\n user.first_name = firstname\n user.last_name = lastname\n\n if email and user.myuser.email != email:\n #check email\n if UserDAO.checkEmail(email):\n #myuser = null\n # try:\n # myuser = user.myuser\n # except:\n # myuser = UserDAO.createMyUser(user=user, email=email, tel=tel)\n # user.myuser = myuser\n\n user.myuser.email = email\n else:\n return JsonResponse({'result':False}, status=200, safe=False)\n if tel:\n user.myuser.tel = tel\n\n try:\n UserDAO.saveUser(user)\n UserDAO.saveMyUser(user.myuser)\n except:\n traceback.print_exc()\n return JsonResponse({'result':False}, status=200, safe=False)\n\n return JsonResponse({'result':True}, status=200, safe=False)\n\nclass MyAccountEditPasswordPage(LoginRequiredMixin, View):\n def post(self,request):\n # productList = Product.objects.filter(active=True)\n\n user = request.user\n password = request.POST.get('password')\n user.set_password(password)\n\n try:\n UserDAO.saveUser(user)\n except:\n traceback.print_exc()\n return JsonResponse({'result':False}, status=200, safe=False)\n\n return JsonResponse({'result':True}, status=200, safe=False)\n\nclass MyAccountDeactivatePage(LoginRequiredMixin, View):\n def post(self,request):\n # productList = Product.objects.filter(active=True)\n\n user = request.user\n user.is_active = False\n\n try:\n UserDAO.saveUser(user)\n except:\n traceback.print_exc()\n return JsonResponse({'result':False}, status=200, safe=False)\n\n return JsonResponse({'result':True}, status=200, safe=False)\n\nclass FAQPage(LoginRequiredMixin, View):\n login_url=LOGIN_URL\n def get(self,request):\n\n context = {}\n return render(request,'client/FAQpage/FAQ.html', context)","repo_name":"xuanzung0505/shopping2","sub_path":"core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":22974,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"24109724277","text":"import warnings\n\nimport pytest # noqa: F401\nimport torch\n\nimport theseus as th\nfrom theseus.constants import EPS\n\nfrom .common import (\n BATCH_SIZES_TO_TEST,\n check_jacobian_for_local,\n check_projection_for_exp_map,\n check_projection_for_log_map,\n)\n\n\ndef test_xy_point2():\n for _ in range(100):\n for batch_size in BATCH_SIZES_TO_TEST:\n point = th.Point2(tensor=torch.randn(batch_size, 2))\n assert point.x().allclose(point.tensor[:, 0])\n assert point.y().allclose(point.tensor[:, 1])\n\n\ndef test_xyz_point3():\n for _ in range(100):\n for batch_size in BATCH_SIZES_TO_TEST:\n point = th.Point3(tensor=torch.randn(batch_size, 3))\n assert point.x().allclose(point.tensor[:, 0])\n assert point.y().allclose(point.tensor[:, 1])\n assert point.z().allclose(point.tensor[:, 2])\n\n\ndef test_point_operations_return_correct_type():\n for point_cls in [th.Point2, th.Point3]:\n p1 = point_cls()\n p2 = point_cls()\n\n assert isinstance(p1 + p2, point_cls)\n assert isinstance(p1 - p2, point_cls)\n assert isinstance(p1 * p2, point_cls)\n assert isinstance(p1 / p2, point_cls)\n assert isinstance(p1.abs(), point_cls)\n assert isinstance(-p1, point_cls)\n assert isinstance(p1.compose(p2), point_cls)\n assert isinstance(p1.retract(p2.tensor), point_cls)\n\n # for these, test result also since this method was overridden\n p1_copy = p1.copy()\n assert isinstance(p1_copy, point_cls)\n assert p1_copy.allclose(p1)\n exp_map = point_cls.exp_map(p2.tensor)\n assert isinstance(exp_map, point_cls)\n assert exp_map.allclose(p2)\n\n\ndef test_operations_mypy_cast():\n # mypy is optional install, only needed for library contributors\n try:\n import mypy.api\n except ModuleNotFoundError:\n return\n import sys\n\n python_version = sys.version_info\n if python_version[0] == 3 and (\n python_version[1] > 10 or python_version[1] == 10 and python_version[2] > 6\n ):\n warnings.warn(\n \"There is a bug in mypy and Python >= 3.10.7, see \"\n \"https://github.com/python/mypy/pull/13500. \"\n \"Updating mypy is not working for me. Will fix this eventually.\"\n )\n return\n result = mypy.api.run([\"tests/theseus_tests/geometry/point_types_mypy_check.py\"])\n assert result[2] == 0\n\n\ndef test_exp_map():\n rng = torch.Generator()\n rng.manual_seed(0)\n\n for batch_size in BATCH_SIZES_TO_TEST:\n tangent_vector = torch.rand(batch_size, 2, generator=rng).double() - 0.5\n ret = th.Point2.exp_map(tangent_vector)\n\n assert torch.allclose(ret.tensor, tangent_vector, atol=EPS)\n check_projection_for_exp_map(\n tangent_vector, Group=th.Point2, is_projected=False\n )\n\n for batch_size in BATCH_SIZES_TO_TEST:\n tangent_vector = torch.rand(batch_size, 3, generator=rng).double() - 0.5\n ret = th.Point3.exp_map(tangent_vector)\n\n assert torch.allclose(ret.tensor, tangent_vector, atol=EPS)\n check_projection_for_exp_map(\n tangent_vector, Group=th.Point3, is_projected=False\n )\n\n\ndef test_log_map():\n rng = torch.Generator()\n rng.manual_seed(0)\n\n for batch_size in BATCH_SIZES_TO_TEST:\n group = th.Point2.rand(batch_size)\n ret = group.log_map()\n\n assert torch.allclose(ret, group.tensor, atol=EPS)\n check_projection_for_log_map(\n tangent_vector=ret, Group=th.Point2, is_projected=False\n )\n\n for batch_size in BATCH_SIZES_TO_TEST:\n group = th.Point3.rand(batch_size)\n ret = group.log_map()\n\n assert torch.allclose(ret, group.tensor, atol=EPS)\n check_projection_for_log_map(\n tangent_vector=ret, Group=th.Point3, is_projected=False\n )\n\n\ndef test_local_map():\n rng = torch.Generator()\n rng.manual_seed(0)\n\n for batch_size in BATCH_SIZES_TO_TEST:\n group0 = th.Point2.rand(batch_size)\n group1 = th.Point2.rand(batch_size)\n\n check_jacobian_for_local(group0, group1, Group=th.Point2, is_projected=False)\n\n for batch_size in BATCH_SIZES_TO_TEST:\n group0 = th.Point3.rand(batch_size)\n group1 = th.Point3.rand(batch_size)\n\n check_jacobian_for_local(group0, group1, Group=th.Point3, is_projected=False)\n","repo_name":"facebookresearch/theseus","sub_path":"tests/theseus_tests/geometry/test_point_types.py","file_name":"test_point_types.py","file_ext":"py","file_size_in_byte":4396,"program_lang":"python","lang":"en","doc_type":"code","stars":1481,"dataset":"github-code","pt":"81"} +{"seq_id":"29764064097","text":"import SimpleITK as sitk\nimport sys\nimport numpy as np\n\n#nz=209; ny=128; nx=128\n#dx=dy=dz=4.41806\n#Ox=-281.857\n#Oy=-279.547\n#Oz=-460.547\n\nif len ( sys.argv ) != 4 and len ( sys.argv ) != 5:\n print( \"Usage: %s sourceactivityfile headerdatafile outputfile [asciioutfile(0:1)]\" % ( sys.argv[0] ) )\n sys.exit ( 1 )\n\nfilename=str(sys.argv[1])\nheaderdatafile=str(sys.argv[2])\noutputfile=str(sys.argv[3])\n\n# Read image parameters from headerdatafile\n(Ox,Oy,Oz) = np.loadtxt(headerdatafile,skiprows=6,max_rows=1,usecols=(2,3,4),dtype='float')\n(dx,dy,dz) = np.loadtxt(headerdatafile,skiprows=9,max_rows=1,usecols=(2,3,4),dtype='float')\n(nx,ny,nz) = np.loadtxt(headerdatafile,skiprows=10,max_rows=1,usecols=(2,3,4),dtype='int')\n\nisaveres=0\nif len(sys.argv) == 5:\n isaveres=int(sys.argv[4])\n\ndosearray2=np.zeros((nz,ny,nx))\n\nprint(\"Processing \",filename,\"with image parameters from\",headerdatafile)\ndosearray = np.loadtxt(filename,usecols=0)\nvoxz = np.loadtxt(filename,usecols=1,dtype='float').astype(int)\nvoxy = np.loadtxt(filename,usecols=2,dtype='float').astype(int)\nvoxx = np.loadtxt(filename,usecols=3,dtype='float').astype(int)\nprint(\"Array voxz: \",voxz)\nprint(\"Dosearray ndim : \",dosearray.ndim)\nprint(\"Dosearray shape: \",dosearray.shape)\nprint(\"Reshaping ...\")\nfor ivox in range(dosearray.shape[0]):\n dosearray2[voxz[ivox],voxy[ivox],voxx[ivox]]=dosearray[ivox]\nprint(\"Dosearray ndim : \",dosearray2.ndim)\nprint(\"Dosearray shape: \",dosearray2.shape)\nif isaveres==1:\n print(\"Creating image ascii file \",outputfile+'.dat')\n with open(outputfile+'.dat', 'w') as f:\n nbin=0\n for iz in range(nz):\n for iy in range(ny):\n for ix in range(nx):\n f.write(\" {} {} {} {:12.6E} \\n\".format(ix,iy,iz,dosearray2[iz,iy,ix]))\n nbin=nbin+1\n f.close()\nprint(\"Creating image mhd/raw file \",outputfile+'.mhd')\nprint(\"Getting image from numpy array ...\")\nimg = sitk.GetImageFromArray(dosearray2)\nimg.SetSpacing([dx,dy,dz])\nimg.SetOrigin([Ox,Oy,Oz])\nprint(\"Image dim : \",img.GetDimension())\nprint(\"Image size : \",img.GetSize())\nprint(\"Image origin : \",img.GetOrigin())\nprint(\"Image spacing : \",img.GetSpacing())\nprint(\"Image direction: \",img.GetDirection())\nsitk.WriteImage(img,outputfile+\".mhd\")\n","repo_name":"PenRed/PenRed","sub_path":"examples/internal_rt/scripts/CreateImageFromSourceActivityDistribution.py","file_name":"CreateImageFromSourceActivityDistribution.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"81"} +{"seq_id":"3918998705","text":"import math\ndef primeFactors(n):\n a = 0\n e = 0\n d = 0\n s = n\n c = 0\n while n % 2 == 0:\n a = a + 1\n n = n / 2\n if (a>2):\n e = 1000\n for i in range(3,int(math.sqrt(n))+1,2):\n c = 0\n while n % i== 0:\n c = c + 1\n n = n / i\n if (c>2):\n d = 1000\n if (d == 0 and e == 0):\n return s\n else:\n return 0\n\nsum1 = 0\nsum2 = 0\nfor i in range(2,1234567):\n sum1 = sum1 + primeFactors(i)\n #sum2 = sum2 + i\n #print(primeFactors(i))\nprint(sum1+1)\n#print(sum2 + 1)\n#print(sum2-sum1)\n","repo_name":"Gagan6164/Erdos-Questions","sub_path":"146.py","file_name":"146.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42935535944","text":"def solution(record):\n\n user = {}\n for r in record:\n data = r.split(' ')\n userId = data[1]\n nickName = data[2] if len(data) == 3 else ''\n\n if nickName != '':\n user[userId] = nickName\n\n answer = []\n for r in record:\n command = r.split(' ')[0]\n userId = r.split(' ')[1]\n nickName = user.get(userId)\n\n if command == 'Enter':\n answer.append(nickName + '님이 들어왔습니다.')\n elif command == 'Leave':\n answer.append(nickName + '님이 나갔습니다.')\n\n return answer\n\n\nprint(solution([\"Enter uid1234 Muzi\", \"Enter uid4567 Prodo\",\n \"Leave uid1234\", \"Enter uid1234 Prodo\", \"Change uid4567 Ryan\"]))\n# [\"Prodo님이 들어왔습니다.\", \"Ryan님이 들어왔습니다.\", \"Prodo님이 나갔습니다.\", \"Prodo님이 들어왔습니다.\"]\n","repo_name":"miniddo/Algorithm","sub_path":"프로그래머스/20211020_오픈채팅방.py","file_name":"20211020_오픈채팅방.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38268719501","text":"import numpy as np\nimport cv2\nimport glob\n\n# read video \nsrc = 'test_images/checkerboard/MVI_1952.MOV'\n\ncap = cv2.VideoCapture(src)\nret, frame = cap.read()\n\nwhile(cap.isOpened()):\n ret, frame = cap.read()\n if ret==True:\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n frame = cv2.threshold(frame, 100, 255, cv2.THRESH_BINARY)[1]\n cv2.imshow('frame', frame)\n\n #cv2.imshow('frame',frame)\n if cv2.waitKey(2) & 0xFF == ord('q'):\n break\n else:\n break\n\n# Release everything if job is finished\ncap.release()\ncv2.destroyAllWindows()","repo_name":"akleinhesselink/MAPPS","sub_path":"read_video.py","file_name":"read_video.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27164103690","text":"from flask import url_for\nfrom flask_wtf import FlaskForm\nfrom wtforms import (\n StringField,\n IntegerField,\n SubmitField,\n TextAreaField,\n DateField,\n TimeField,\n SelectField,\n SelectMultipleField,\n)\n\nimport requests\nfrom ...misc import get_internal_api_header\n\nfrom wtforms.validators import DataRequired, Optional, NumberRange\n\nfrom datetime import datetime\n\n\nclass NewShelfForm(FlaskForm):\n name = StringField(\n \"Shelf Name\",\n validators=[DataRequired()],\n description=\"A descriptive name for the shelf, something like top shelf.\",\n )\n\n description = TextAreaField(\n \"Shelf Description\", description=\"A brief description of the shelf.\"\n )\n\n submit = SubmitField(\"Register Shelf\")\n\n\ndef RackToShelfForm(racks: list) -> FlaskForm:\n class StaticForm(FlaskForm):\n letters = [(0, \"None\")] + [(i, chr(ord(\"A\") + (i - 1))) for i in range(1, 27)]\n compartment_row = SelectField(\"Compartment (A-Z)\", choices=letters, coerce=int)\n compartment_col = IntegerField(\n \"Compartment (>=0)\", validators=[Optional(), NumberRange(min=0)]\n )\n\n date = DateField(\n \"Entry Date\", validators=[DataRequired()], default=datetime.today()\n )\n time = TimeField(\n \"Entry Time\", validators=[DataRequired()], default=datetime.now()\n )\n entered_by = StringField(\n \"Entered By\",\n description=\"The initials of the person that entered the sample.\",\n validators=[DataRequired()],\n )\n\n submit = SubmitField(\"Submit\")\n\n choices = []\n\n for rack in racks:\n rack_check_response = requests.get(\n url_for(\n \"api.storage_rack_to_shelf_check\", id=int(rack[\"id\"]), _external=True\n ),\n headers=get_internal_api_header(),\n )\n if not rack_check_response.json()[\"content\"] == \"RCT\":\n choices.append(\n [\n rack[\"id\"],\n \"LIMBRACK-%s: %s (%i x %i)\"\n % (rack[\"id\"], rack[\"uuid\"], rack[\"num_rows\"], rack[\"num_cols\"]),\n ]\n )\n\n setattr(\n StaticForm,\n \"racks\",\n SelectField(\n \"Sample Rack\",\n choices=choices,\n coerce=int,\n render_kw={\"onchange\": \"check_rack()\"},\n ),\n )\n\n return StaticForm()\n\n\ndef RacksToShelfForm(racks: list) -> FlaskForm:\n class StaticForm(FlaskForm):\n letters = [(0, \"None\")] + [(i, chr(ord(\"A\") + (i - 1))) for i in range(1, 27)]\n compartment_row = SelectField(\"Compartment (A-Z)\", choices=letters, coerce=int)\n compartment_col = IntegerField(\n \"Compartment (>=0)\", validators=[Optional(), NumberRange(min=0)]\n )\n\n date = DateField(\n \"Entry Date\", validators=[DataRequired()], default=datetime.today()\n )\n time = TimeField(\n \"Entry Time\", validators=[DataRequired()], default=datetime.now()\n )\n entered_by = StringField(\n \"Entered By\",\n description=\"The initials of the person that entered the sample.\",\n validators=[DataRequired()],\n )\n submit = SubmitField(\"Submit\")\n\n choices = [[0, \"--- Select at least one racks ---\"]]\n\n for rack in racks:\n choices.append(\n [\n rack[\"id\"],\n \"LIMBRACK-%s: %s (%i x %i)\"\n % (rack[\"id\"], rack[\"uuid\"], rack[\"num_rows\"], rack[\"num_cols\"]),\n ]\n )\n\n default_choices = [int(s[0]) for s in choices if int(s[0]) > 0]\n setattr(\n StaticForm,\n \"racks\",\n SelectMultipleField(\n \"Sample Rack(s)\",\n choices=choices,\n default=default_choices,\n coerce=int,\n )\n # render_kw={'onchange': \"check_rack()\"})\n )\n\n return StaticForm()\n","repo_name":"AberystwythSystemsBiology/limbus","sub_path":"services/web/app/storage/forms/shelf.py","file_name":"shelf.py","file_ext":"py","file_size_in_byte":3916,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"81"} +{"seq_id":"7429548145","text":"import numpy as np\nimport pandas as pd\n\nfrom arch.unitroot import ADF\nfrom statsmodels.tsa.api import VAR\nimport statsmodels.formula.api as smf\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n# Seaborn theme for graphing\nsns.set()\n\n\ndef split_line(text=None):\n if text == None:\n print('=================================')\n else:\n print(f'''\n=========================================\n{text}\n=========================================\n ''')\n\n\nLIT_NUM = '国内游客(百万人次)'\nLIT_COST = '国内旅游人均花费(元)'\nLIT_GDP = '人均国内生产总值(元)'\n\ndata = pd.read_csv('thesis/tourists.csv')\nyearly = pd.to_datetime({'year': data['年份'], 'month': 1, 'day': 1})\nmdata = pd.DataFrame({\n 'num': data[LIT_NUM],\n 'cost': data[LIT_COST],\n 'GDP': data[LIT_GDP]\n})\nmdata.index = pd.DatetimeIndex(yearly)\nmdata = mdata.dropna().sort_index()\nmdata = np.log(1 + mdata)\n\nnum = mdata['num']\ncost = mdata['cost']\nGDP = mdata['GDP']\n\n\ndef ADF_test(series):\n print(ADF(series).summary())\n\n\nADF_test(mdata['num'])\nADF_test(mdata['cost'])\nADF_test(mdata['GDP'])\n\n# ADF test for delta\nADF_test(mdata['num'].diff().dropna())\nADF_test(mdata['cost'].diff().dropna())\nADF_test(mdata['GDP'].diff().dropna())\n\n# Cointegration\nco_res = smf.ols(formula='GDP ~ cost + num - 1', data=mdata).fit()\nprint(co_res.summary())\nADF_test(co_res.resid)\n\n# Fit with VAR\nVAR_model = VAR(mdata)\nresults = VAR_model.fit(verbose=True)\nresults.plot()\nprint(results.summary())\n# Selected lag order\nprint('Auto-selected Order:', results.k_ar)\nprint(VAR_model.select_order().summary())\n\n# Residual normality\nprint()\nprint(results.test_normality().summary())\n\n# Granger causality\nnames = ['num', 'cost', 'GDP']\nprint()\n\nfor n1 in names:\n for n2 in names:\n if n1 != n2:\n print(results.test_causality(n1, n2).summary())\nsymbols = ['\\\\NUM', '\\\\COST', '\\\\GDP']\nfor n1 in symbols:\n for n2 in symbols:\n if n1 != n2:\n print(f'${n2}$不能Granger引起${n1}$')\n\n# Stability\nprint()\nprint('Stability test:')\nif results.is_stable(True):\n print('Stable :)')\nelse:\n print('Non-stable')\n\n# Impulse Response Analysis\nirf = results.irf(20)\nirf.plot()\n\n# Forecast Error Variance Decomposition\nfevd = results.fevd(20)\nprint()\nprint(fevd.summary())\nfevd.plot()\n\nplt.show()\n","repo_name":"OopsYao/homework","sub_path":"tsa/thesis/var.py","file_name":"var.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"26320675800","text":"\"\"\" Collecting Stacks & Deployments configured for Scaling \"\"\"\nimport os\nimport pathlib\nimport json\nimport logging\nimport shutil\nimport pykube\nimport re\nimport urllib.request\nimport boto3\nfrom resources.Stack import Stack\nfrom resources.Stackset import StackSet\nfrom crontab import CronTab\n\nEXECUTION_TIME = 'datetime.datetime.now().strftime(\"%d-%m-%Y %H:%M UTC\")'\n\n\ndef create_job_directory():\n \"\"\" This directory will hold the temp python scripts to execute the scaling jobs \"\"\"\n temp__dir = '/tmp/scaling_jobs'\n if os.path.isdir(temp__dir):\n shutil.rmtree(temp__dir)\n pathlib.Path(temp__dir).mkdir(parents=True, exist_ok=True)\n\n\ndef clear_cron():\n \"\"\" This is needed so that if any one removes his scaling action\n it should not be trigger again \"\"\"\n my_cron = CronTab(user='root')\n my_cron.remove_all(comment=\"Scheduling_Jobs\")\n my_cron.write()\n\n\ndef get_kube_api():\n \"\"\" Initiating the API from Service Account or when running locally from ~/.kube/config \"\"\"\n try:\n config = pykube.KubeConfig.from_service_account()\n except FileNotFoundError:\n # local testing\n config = pykube.KubeConfig.from_file(os.path.expanduser('~/.kube/config'))\n api = pykube.HTTPClient(config)\n return api\n\n\ndef deployments_to_scale():\n '''\n Getting the deployments configured for schedule scaling...\n '''\n api = get_kube_api()\n deployments = []\n scaling_dict = {}\n for namespace in list(pykube.Namespace.objects(api)):\n namespace = str(namespace)\n for deployment in pykube.Deployment.objects(api).filter(namespace=namespace):\n annotations = deployment.metadata.get('annotations', {})\n f_deployment = str(namespace + '/' + str(deployment))\n\n schedule_actions = parse_content(annotations.get('zalando.org/schedule-actions', None), f_deployment)\n\n if schedule_actions is None or len(schedule_actions) == 0:\n continue\n\n deployments.append([deployment.metadata['name']])\n scaling_dict[f_deployment] = schedule_actions\n if not deployments:\n logging.info('No deployment is configured for schedule scaling')\n\n return scaling_dict\n\n\ndef deploy_job_creator():\n \"\"\" Create CronJobs for configured Deployments \"\"\"\n\n deployments__to_scale = deployments_to_scale()\n print(\"Deployments collected for scaling: \")\n for deploy, schedules in deployments__to_scale.items():\n deployment = deploy.split(\"/\")[1]\n namespace = deploy.split(\"/\")[0]\n for n in range(len(schedules)):\n schedules_n = schedules[n]\n replicas = schedules_n.get('replicas', None)\n minReplicas = schedules_n.get('minReplicas', None)\n maxReplicas = schedules_n.get('maxReplicas', None)\n schedule = schedules_n.get('schedule', None)\n print(\"Deployment: %s, Namespace: %s, Replicas: %s, MinReplicas: %s, MaxReplicas: %s, Schedule: %s\"\n % (deployment, namespace, replicas, minReplicas, maxReplicas, schedule))\n\n with open(\"/root/schedule_scaling/templates/deployment-script.py\", 'r') as script:\n script = script.read()\n deployment_script = script % {\n 'namespace': namespace,\n 'name': deployment,\n 'replicas': replicas,\n 'minReplicas': minReplicas,\n 'maxReplicas': maxReplicas,\n 'time': EXECUTION_TIME,\n }\n i = 0\n while os.path.exists(\"/tmp/scaling_jobs/%s-%s.py\" % (deployment, i)):\n i += 1\n script_creator = open(\"/tmp/scaling_jobs/%s-%s.py\" % (deployment, i), \"w\")\n script_creator.write(deployment_script)\n script_creator.close()\n cmd = ['sleep 50 ; . /root/.profile ; /usr/bin/python', script_creator.name,\n '2>&1 | tee -a /tmp/scale_activities.log']\n cmd = ' '.join(map(str, cmd))\n scaling_cron = CronTab(user='root')\n job = scaling_cron.new(command=cmd)\n try:\n job.setall(schedule)\n job.set_comment(\"Scheduling_Jobs\")\n scaling_cron.write()\n except Exception:\n print('Deployment: %s has syntax error in the schedule' % (deployment))\n pass\n\n\ndef stacks_to_scale():\n '''\n Getting the Stacks configured for schedule scaling...\n '''\n api = get_kube_api()\n stacks = []\n stacks_scaling_dict = {}\n for namespace in list(pykube.Namespace.objects(api)):\n namespace = str(namespace)\n for stackset in StackSet.objects(api).filter(namespace=namespace):\n annotations = stackset.metadata.get('annotations', {})\n\n schedule_actions = annotations.get('zalando.org/schedule-actions', None)\n\n f_stack = str(namespace + '/' + str(stackset))\n\n schedule_actions = parse_content(schedule_actions, f_stack)\n\n if schedule_actions is None or len(schedule_actions) == 0:\n continue\n\n stackset_stacks = list(Stack.objects(api).filter(namespace=namespace, selector={'stackset': stackset}))\n stack_names = list(map(lambda stack: namespace + '/' + stack.metadata.get('name'), stackset_stacks))\n\n stacks.append([stackset.metadata['name']])\n\n for stack_name in stack_names:\n stacks_scaling_dict[stack_name] = schedule_actions\n if not stacks:\n logging.info('No stack is configured for schedule scaling')\n\n return stacks_scaling_dict\n\n\ndef stack_job_creator():\n \"\"\" Create CronJobs for configured Stacks \"\"\"\n\n stacks__to_scale = stacks_to_scale()\n print(\"Stacks collected for scaling: \")\n for stacks, schedules in stacks__to_scale.items():\n stack = stacks.split(\"/\")[1]\n namespace = stacks.split(\"/\")[0]\n for n in range(len(schedules)):\n schedules_n = schedules[n]\n replicas = schedules_n.get('replicas', None)\n minReplicas = schedules_n.get('minReplicas', None)\n maxReplicas = schedules_n.get('maxReplicas', None)\n schedule = schedules_n.get('schedule', None)\n\n print(\"Stack: %s, Namespace: %s, Replicas: %s, MinReplicas: %s, MaxReplicas: %s, minSchedule: %s\" %\n (stack, namespace, replicas, minReplicas, maxReplicas, schedule))\n\n with open(\"/root/schedule_scaling/templates/stack-script.py\", 'r') as script:\n script = script.read()\n stack_script = script % {\n 'namespace': namespace,\n 'name': stack,\n 'replicas': replicas,\n 'minReplicas': minReplicas,\n 'maxReplicas': maxReplicas,\n 'time': EXECUTION_TIME,\n }\n i = 0\n while os.path.exists(\"/tmp/scaling_jobs/%s-%d.py\" % (stack, i)):\n i += 1\n script_creator = open(\"/tmp/scaling_jobs/%s-%d.py\" % (stack, i), \"w\")\n script_creator.write(stack_script)\n script_creator.close()\n cmd = ['sleep 50 ; . /root/.profile ; /usr/bin/python', script_creator.name,\n '2>&1 | tee -a /tmp/scale_activities.log']\n cmd = ' '.join(map(str, cmd))\n scaling_cron = CronTab(user='root')\n job = scaling_cron.new(command=cmd)\n try:\n job.setall(schedule)\n job.set_comment(\"Scheduling_Jobs\")\n scaling_cron.write()\n except Exception:\n print('Stack: %s has syntax error in the schedule' % (stack))\n pass\n\ndef parse_content(content, identifier):\n if content == None:\n return []\n\n if is_valid_s3_url(content):\n schedules = fetch_schedule_actions_s3(content)\n\n if schedules == None:\n return []\n\n return parse_schedules(schedules, identifier)\n\n if is_valid_url(content):\n schedules = fetch_schedule_actions_from_url(content)\n\n if schedules == None:\n return []\n\n return parse_schedules(schedules, identifier)\n\n return parse_schedules(content, identifier)\n\ndef is_valid_url(url):\n return re.search('^(https?)://(\\\\S+)\\.(\\\\S{2,}?)(/\\\\S+)?$', url, re.I) != None\n\ndef is_valid_s3_url(url):\n return parse_s3_url(url) != None\n\ndef parse_s3_url(url):\n match = re.search('^s3://(\\\\S+?)/(\\\\S+)$', url, re.I)\n\n if match == None:\n return None\n\n return {\n 'Bucket': match.group(1),\n 'Key': match.group(2)\n }\n\ndef fetch_schedule_actions_s3(url):\n source = parse_s3_url(url)\n\n print(source)\n\n s3 = boto3.client('s3')\n try:\n element = s3.get_object(**source)\n except:\n print('Couldn\\'t read %s' % (url))\n return '[]'\n\n return element['Body'].read().decode('utf-8')\n\ndef fetch_schedule_actions_from_url(url):\n request = urllib.request.urlopen(url)\n try:\n content = request.read().decode('utf-8')\n except:\n content = None\n finally:\n request.close()\n\n return content\n\ndef parse_schedules(schedules, identifier):\n try:\n return json.loads(schedules)\n except Exception as err:\n print('%s - Error in parsing JSON %s with error' % (identifier, schedules), err)\n return []\n\nif __name__ == '__main__':\n create_job_directory()\n clear_cron()\n deploy_job_creator()\n stack_job_creator()\n","repo_name":"amelbakry/kube-schedule-scaler","sub_path":"schedule_scaling/schedule_scaling.py","file_name":"schedule_scaling.py","file_ext":"py","file_size_in_byte":9451,"program_lang":"python","lang":"en","doc_type":"code","stars":98,"dataset":"github-code","pt":"81"} +{"seq_id":"19048449097","text":"from kedro.io import DataCatalog\nfrom kedro.runner import SequentialRunner\n\nfrom flasking_kedro.pipelines.feature_engineering import create_pipeline\n\n\ndef test_feature_engineering_pipeline(\n sample_data_catalog_train: DataCatalog, runner: SequentialRunner\n):\n train_pipeline = create_pipeline(\n output_X_train_normalized=\"sample_iris_X_train_normalized\",\n output_X_test_normalized=\"sample_iris_X_test_normalized\",\n output_y_train=\"sample_iris_y_train\",\n output_y_test=\"sample_iris_y_test\",\n normalizer=\"sample_normalizer\",\n )\n\n output = runner.run(pipeline=train_pipeline, catalog=sample_data_catalog_train)\n\n assert output[\"sample_iris_X_train_normalized\"].shape == (3, 4)\n assert output[\"sample_iris_X_test_normalized\"].shape == (1, 4)\n assert output[\"sample_iris_y_train\"].shape == (3,)\n assert output[\"sample_iris_y_test\"].shape == (1,)\n\n # predict_pipeline = pipeline(\n # create_pipeline(\n # output_X_test_normalized=\"sample_iris_X_test_normalized\",\n # normalizer=\"sample_normalizer\",\n # ).only_nodes_with_tags(\"prediction\"),\n # inputs={\n # \"sample_iris_X_test_normalized\": \"sample_iris_X_test_normalized\",\n # \"sample_normalizer\": \"sample_normalizer\",\n # },\n # namespace=\"predict\",\n # )\n","repo_name":"sfczekalski/Flasking-Kedro","sub_path":"src/tests/pipelines/feature_engineering/test_pipeline.py","file_name":"test_pipeline.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27717902057","text":"\nimport csv\nimport enum\nfrom msilib.schema import Condition\nimport time\nimport handle_file\nimport play_tts\nimport send_cmd\n \n# Parse to define columns\ndef parse(row):\n tc_no = row[0]\n step_no = row[1]\n type_data = row[2]\n action_data = row[3]\n timeout_no = int(row[4])\n log_ok = row[5]\n log_error = row [6]\n return tc_no, step_no, type_data, action_data, timeout_no, log_ok, log_error\n\ndef handle_step(row):\n tc_no, step_no, type_data, action_data, timeout_no, log_ok, log_error = parse(row)\n if(type_data == \"tts\"):\n print(action_data)\n play_tts.speak(action_data)\n elif (type_data == \"touch\"):\n x = int(action_data.split(',')[0])\n y = int(action_data.split(',')[1])\n print(\"x: \" + str(x) + \" y: \" + str(y))\n try:\n send_cmd.send_touch(x,y)\n except:\n print(\"An exception occured when failed connection\")\n \n# Process test case\ndef handle_testcase(csvreader):\n for row in csvreader:\n if (row[2] != 'type'):\n handle_step(row)\n \n \ndef handle_csv():\n file = handle_file.open_file('speech1.csv')\n csvreader = handle_file.skip_head_line(file) \n handle_testcase(csvreader)\n handle_file.close_file(file)\n ","repo_name":"ThuChau106/sanity-automation-test-tool","sub_path":"handle_testcase.py","file_name":"handle_testcase.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71915255945","text":"from __future__ import print_function\n\nimport mechanize\nimport json\nfrom bs4 import BeautifulSoup\n\nbr = mechanize.Browser()\nbr.set_handle_robots(False) # ignore crawler setting information\n\n####################################################\n# Generate url address for crawling\nentire_url = \"http://www.polygon.com/2016/1/12/10755490/ea-sports-ufc-2-gameplay-trailer-features\"\nres = br.open(entire_url)\nhtml = res.read()\nsoup = BeautifulSoup(html, 'html.parser')\n\n####################################################\n# Finding article meta information\npublished_time = str()\nauthor = str()\nfor meta in soup.head.findAll('meta'):\n meta_property = meta.attrs.get('property')\n meta_name = meta.attrs.get('name')\n if meta_property == 'article:published_time':\n published_time = meta.attrs.get('content')\n if meta_name == 'author':\n author = meta.attrs.get('content')\n\nprint(soup.find(id='comments'))\n\n####################################################\n# Finding youtube video ids\nyoutube_string = str()\n\nfor iframe in soup.findAll('iframe'):\n # taking iframe\n src_string = iframe.get('src')\n if 'youtube' in src_string:\n youtube_string = src_string\n\nprint(youtube_string)\nprint(youtube_string[30:41])\nstart = youtube_string.rfind('/') + 1\nend = youtube_string.rfind('?')\n\nif end is '-1':\n print(youtube_string[start:])\nelse:\n print(youtube_string[start:end])\n\n####################################################\nentry__title = soup.find(class_='m-entry__title')\nentry_id = entry__title.attrs.get('data-remote-admin-entry-id')\n\ncomment_url = 'http://www.polygon.com/comments/load_comments/' + str(entry_id)\ncomment_res = br.open(comment_url)\ncomment_json = json.loads(comment_res.read())\n\n####################################################\ndata = {\n '_id': entire_url,\n 'published_time': published_time,\n 'author': author,\n 'comment_len': len(comment_json['comments']),\n 'youtube_url': youtube_string\n}\n\n# print data\n","repo_name":"koofree/coding-oriented-society","sub_path":"python-data/week5/part_of_sample_crawler.py","file_name":"part_of_sample_crawler.py","file_ext":"py","file_size_in_byte":1982,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"3134667253","text":"from django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render, get_object_or_404\nfrom django.core.paginator import Paginator\nfrom django.shortcuts import redirect\nfrom django.utils import timezone\n\nfrom .models import Post, Comment, Follow, User\nfrom blog.forms import PostForm, UserRegistrationForm, CommentForm\n\n\ndef register(request):\n title_registration = 'Registration'\n if request.method == 'POST':\n user_form = UserRegistrationForm(request.POST)\n if user_form.is_valid():\n # Создание нового юзера, но пока без сохранения (commit=False).\n new_user = user_form.save(commit=False)\n # Установка выбранного пароля.\n new_user.set_password(user_form.cleaned_data['password'])\n # Сохранение юзера.\n new_user.save()\n return redirect('login')\n else:\n user_form = UserRegistrationForm()\n return render(request, 'registration/register.html', {'user_form': user_form, 'title':title_registration})\n\n\n@login_required\ndef logout(request):\n title = 'Logout'\n request.session.flush()\n return render(request, 'registration/logout.html', {'title':title})\n\n\ndef paginator(request, object_list, per_page):\n paginator = Paginator(object_list, per_page) # Show 5 contacts per page.\n per_page = request.GET.get('pag')\n page_obj = paginator.get_page(per_page)\n return page_obj\n\n\ndef post_list(request):\n title = 'Newspaper'\n posts = Post.objects.all().order_by('-published_date')\n per_page = request.GET.get('pag', 5)\n return render(request, 'blog/post_list.html', {'page_obj': paginator(request, posts, per_page), 'title':title})\n\n\n@login_required\ndef subscribed_to(request):\n title = 'Subscriptions'\n subscribers = Follow.objects.filter(user=request.user)\n return render(request, 'blog/follows.html', {'subscribers': subscribers, 'title':title})\n\n\n@login_required\ndef subscribe(request, author):\n subscriber_name = User.objects.get(username=author)\n #исключаем повторное добавление записи в таблицу\n if not Follow.objects.filter(user_id=request.user.id, author_id=subscriber_name.id).exists():\n Follow.objects.create(user_id=request.user.id, author_id=subscriber_name.id)\n return redirect('author_list', id=subscriber_name.id, value=author)\n else: return redirect('author_list', id=subscriber_name.id, value=author)\n\n\n@login_required\ndef unsubscribe(request, author):\n subscriber_name = User.objects.get(username=author)\n subscriber = Follow.objects.get(user_id=request.user.id, author_id=subscriber_name.id)\n subscriber.delete()\n return redirect('author_list', id=subscriber_name.id, value=author)\n\n\n@login_required\ndef profile(request):\n title = 'My Page'\n my_posts = Post.objects.filter(author=request.user).order_by('-published_date')\n per_page = request.GET.get('pag', 5)\n return render(request, 'accounts/profile.html', {'page_obj': paginator(request, my_posts, per_page), 'title':title})\n\n\ndef author_list(request, id, value):\n title = str(value) + ' page'\n list = Post.objects.filter(author_id=id).order_by('-published_date')\n per_page = request.GET.get('pag', 5)\n subscriber_name = User.objects.get(username=value)\n subscribers = Follow.objects.filter(user_id=request.user.id, author_id=subscriber_name.id).exists()\n return render(request, 'blog/author_list.html', {'page_obj': paginator(request, list, per_page), 'author': value, 'subscribers':subscribers, 'title':title})\n\n\ndef post_detail(request, pk):\n post = get_object_or_404(Post, pk=pk)\n title = str(post.title)\n comments = post.comments.filter(active=True).order_by('-created')\n new_comment = None\n\n if request.method == 'POST':\n comment_form = CommentForm(data=request.POST)\n if comment_form.is_valid():\n new_comment = comment_form.save(commit=False)\n try: new_comment.name = request.user\n except ValueError: return redirect('login')\n new_comment.post = post\n new_comment.save()\n return redirect('post_detail', pk=post.pk)\n else:\n comment_form = CommentForm()\n return render(request, 'blog/post_detail.html', {'post': post, 'comments': comments, 'new_comment': new_comment, 'comment_form': comment_form, 'title':title})\n\n\n@login_required\ndef post_new(request):\n title = 'New post'\n if request.method == \"POST\":\n form = PostForm(request.POST)\n if form.is_valid():\n post = form.save(commit=False)\n post.author = request.user\n post.published_date = timezone.now()\n post.save()\n return redirect('post_detail', pk=post.pk)\n else:\n form = PostForm()\n return render(request, 'blog/post_new.html', {'form': form, 'title':title})\n\n\n@login_required\ndef post_edit(request, pk):\n title = 'Editing an entry'\n post = Post.objects.get(pk=pk)\n if request.method == \"POST\":\n post.title = request.POST.get(\"title\")\n post.text = request.POST.get(\"text\")\n post.author = request.user\n post.created_date = timezone.now()\n post.save()\n return redirect('post_detail', pk=post.pk)\n else:\n return render(request, \"blog/edit.html\", {\"post\": post, 'title':title})\n\n\n@login_required\ndef delete(request, pk):\n post = Post.objects.get(pk=pk) #request_pk = pk\n post.delete()\n return redirect('post_list')\n\n\n@login_required\ndef comment_delete(request, pk):\n comment = Comment.objects.get(pk=pk)\n comment.delete()\n return redirect('post_detail', pk=comment.post_id)\n\n","repo_name":"Tokarev-Alexey/One-P","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7429620869","text":"\"\"\" Portfolio Value report \"\"\"\n\nfrom pydatum import Datum\nfrom gnucash_portfolio import BookAggregate\nfrom gnucash_portfolio.lib import portfoliovalue\nfrom gnucash_portfolio.reports.portfolio_models import (PortfolioValueInputModel,\n PortfolioValueViewModel)\nfrom gnucash_portfolio.model.stock_model import StockViewModel\n\n\ndef run(input_model: PortfolioValueInputModel):\n \"\"\" Fetch the report data \"\"\"\n model = __get_model_for_portfolio_value(input_model)\n return model\n\n\ndef __get_model_for_portfolio_value(input_model: PortfolioValueInputModel\n ) -> PortfolioValueViewModel:\n \"\"\" loads the data for portfolio value \"\"\"\n result = PortfolioValueViewModel()\n result.filter = input_model\n\n ref_datum = Datum()\n ref_datum.from_datetime(input_model.as_of_date)\n ref_date = ref_datum.end_of_day()\n\n result.stock_rows = []\n with BookAggregate() as svc:\n book = svc.book\n stocks_svc = svc.securities\n\n if input_model.stock:\n symbols = input_model.stock.split(\",\")\n stocks = stocks_svc.get_stocks(symbols)\n else:\n stocks = stocks_svc.get_all()\n\n for stock in stocks:\n row: StockViewModel = portfoliovalue.get_stock_model_from(\n book, stock, as_of_date=ref_date)\n if row and row.balance > 0:\n result.stock_rows.append(row)\n\n return result\n\nif __name__ == \"__main__\":\n run(input_model=None)\n","repo_name":"alensiljak/gnucash-portfolio","sub_path":"gnucash_portfolio/reports/portfolio_value.py","file_name":"portfolio_value.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"81"} +{"seq_id":"6394247844","text":"import cv2\r\nfrom cvzone.HandTrackingModule import HandDetector\r\nfrom cvzone.ClassificationModule import Classifier\r\nimport numpy as np\r\nimport math\r\nimport tensorflow\r\ncap = cv2.VideoCapture(0)\r\ndetector = HandDetector(maxHands=1)\r\nclassifier = Classifier(\"keras_model.h5\", \"labels.txt\")\r\noffset = 20\r\nimgSize = 300\r\n\r\nfolder = \"Data/Y\"\r\ncounter = 0\r\n\r\nlabels = [\"S\", \"T\", \"U\", \"V\", \"W\", \"X\", \"Y\"]\r\n\r\nRun = True\r\n\r\nclass VideoCamera(object):\r\n def __init__(self):\r\n self.video = cv2.VideoCapture(0)\r\n self.video.set(3, 1000)\r\n self.video.set(4, 800)\r\n\r\n def __del__(self):\r\n self.video.release()\r\n def get_frame(self):\r\n ret, frame = self.video.read()\r\n frame = cv2.flip(frame, 1)\r\n frame = fun(frame)\r\n ret, jpeg = cv2.imencode('.jpg', frame)\r\n return jpeg.tobytes()\r\n\r\ndef fun(img):\r\n while Run:\r\n imgOutput = img.copy()\r\n hands, img = detector.findHands(img)\r\n if hands:\r\n hand = hands[0]\r\n x, y, w, h = hand['bbox']\r\n\r\n imgWhite = np.ones((imgSize, imgSize, 3), np.uint8)*255\r\n imgCrop = img[y-offset:y+h+offset, x-offset:x+w+offset]\r\n\r\n aspectRatio = h/w\r\n\r\n if aspectRatio > 1:\r\n k = imgSize/h\r\n wCal = math.ceil(k*w)\r\n imgResize = cv2.resize(imgCrop, (wCal, imgSize))\r\n imgResizeShape = imgResize.shape\r\n wGap = math.ceil((imgSize-wCal)/2)\r\n imgWhite[:, wGap:wCal+wGap] = imgResize\r\n prediction, index = classifier.getPrediction(imgWhite)\r\n print(prediction, index)\r\n else:\r\n k = imgSize / w\r\n hCal = math.ceil(k * h)\r\n imgResize = cv2.resize(imgCrop, (imgSize, hCal))\r\n imgResizeShape = imgResize.shape\r\n hGap = math.ceil((imgSize - hCal) / 2)\r\n imgWhite[hGap:hCal + hGap, :] = imgResize\r\n prediction, index = classifier.getPrediction(imgWhite)\r\n print(prediction, index)\r\n\r\n cv2.putText(imgOutput, labels[index], (x, y-20), cv2.FONT_HERSHEY_COMPLEX, 2, (255, 0, 255), 2)\r\n # cv2.imshow(\"ImageCrop\", imgCrop)\r\n # cv2.imshow(\"ImageWhite\", imgWhite)\r\n # cv2.imshow(\"Image\", imgOutput)\r\n cv2.waitKey(1)\r\n key = cv2.waitKey(1)\r\n return imgOutput\r\n\r\n\r\n\r\n","repo_name":"thejasrao262003/Sign-Language-Tutor-Website","sub_path":"src/public/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":2431,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"41017541906","text":"#!/usr/bin/python3\n\"\"\" flask \"\"\"\nfrom flask import Flask, render_template\nfrom models import storage\nfrom models.state import State\nfrom models.city import City\nfrom models.amenity import Amenity\napp = Flask(__name__)\n\n\n@app.teardown_appcontext\ndef teardown_db(self):\n \"\"\"Remove the current SQLAlchemy Session.\"\"\"\n storage.close()\n\n\n@app.route('/hbnb_filters', strict_slashes=False)\ndef deploy_hbnb():\n \"\"\"deploy a webpage\"\"\"\n states = storage.all(State).values()\n states = sorted(states, key=lambda k: k.name)\n stateList = []\n\n for state in states:\n stateList.append([state, sorted(state.cities, key=lambda k: k.name)])\n\n amenities = storage.all(Amenity).values()\n amenities = sorted(amenities, key=lambda k: k.name)\n return render_template(\"10-hbnb_filters.html\",\n states=stateList, amenities=amenities)\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5000, debug=True)","repo_name":"LDoualito/AirBnB_clone_v2","sub_path":"web_flask/10-hbnb_filters.py","file_name":"10-hbnb_filters.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21913810028","text":"from setuptools import find_packages, setup\n\n\nwith open(\"Readme.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\nwith open(\"requirements.txt\", \"r\", encoding=\"utf-8\") as fh:\n requirements = fh.read()\n\nsetup(\n name=\"tstbtc\",\n version=\"1.0.0\",\n author=\"Peter Tyonum , Shaswat Gupta\",\n author_email=\"withtvpeter@gmail.com, shaswat2001.sg@gmail.com\",\n license=\"MIT\",\n description=\"transcribes youtube videos/media to bitcointranscript\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/tvpeter/yt2btc\",\n py_modules=[\"transcriber\"],\n packages=find_packages(),\n install_requires=[requirements],\n python_requires=\">=3.9\",\n classifiers=[\n \"Programming Language :: Python :: 3.9\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n entry_points=\"\"\"\n [console_scripts]\n tstbtc=transcriber:add\n \"\"\",\n)\n","repo_name":"bitcointranscripts/tstbtc","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"6362618748","text":"#PF-Exer-26\ndef factorial(number):\n #remove pass and write your logic to find and return the factorial of given number\n if number==0:\n return 1\n else:\n fact=1\n while number>0:\n fact*=number\n number-=1\n return fact\n \n\ndef find_strong_numbers(num_list):\n #remove pass and write your logic to find and return the list of strong numbers from the given list\n strong_num_list=[]\n for num in num_list:\n temp=num \n sum_fact=0\n while temp>0:\n sum_fact+=factorial(temp%10)\n temp//=10\n if sum_fact == num:\n strong_num_list.append(num)\n return strong_num_list\nnum_list=[145,375,100,2,10]\nstrong_num_list=find_strong_numbers(num_list)\nprint(strong_num_list)\n","repo_name":"PulakChandan/p1-pySpark-project","sub_path":"Python_project/src/day3/#PF-Exer-26.py","file_name":"#PF-Exer-26.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22927657574","text":"def create_categorical_embeddings_model(data, catcols):\n import numpy as np\n # from tensorflow.keras.layers import Input, Dense, LSTM, Concatenate, SpatialDropout1D, \\\n # Reshape, BatchNormalization, Dropout, Embedding\n # from tensorflow.keras.models import Model\n from keras.layers import Input, Dense, Concatenate, SpatialDropout1D, \\\n Reshape, BatchNormalization, Dropout, Embedding\n from keras.models import Model\n\n categorical_inputs = []\n categorical_embed_outputs = []\n\n for c in catcols:\n num_unique_values = int(data[c].nunique())\n embed_dim = int(min(np.ceil((num_unique_values) / 2), 50))\n inp = Input(shape=(1,))\n out = Embedding(num_unique_values + 1, embed_dim, name=c)(inp)\n out = SpatialDropout1D(0.3)(out)\n out = Reshape(target_shape=(embed_dim,))(out)\n categorical_inputs.append(inp)\n categorical_embed_outputs.append(out)\n\n categorical_out = Concatenate()(categorical_embed_outputs)\n categorical_out = BatchNormalization()(categorical_out)\n x = Dense(32, activation=\"relu\")(categorical_out)\n x = Dropout(0.2)(x)\n x = BatchNormalization()(x)\n\n x = Dense(8, activation=\"relu\")(x)\n x = Dropout(0.2)(x)\n x = BatchNormalization()(x)\n\n model = Model(inputs=categorical_inputs, outputs=categorical_out)\n return model\n","repo_name":"Copa6/ML_helper_codes","sub_path":"modelling/keras/categorical_embeddings.py","file_name":"categorical_embeddings.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"8383629194","text":"from softgym.envs.cloth_move import ClothMoveEnv\n\nfrom collections import OrderedDict\n\nenv_arg_dict = {\n 'ClothMove': {'observation_mode': 'cam_rgb',\n 'action_mode': 'picker',\n 'num_picker': 2,\n 'render': True,\n 'headless': True,\n 'horizon': 160,\n 'action_repeat': 8,\n 'render_mode': 'cloth',\n 'num_variations': 1000,\n 'use_cached_states': True,\n 'deterministic': False},\n}\n\nSOFTGYM_ENVS = OrderedDict({\n 'ClothMove': ClothMoveEnv,\n})\n","repo_name":"jkk5454/multiddpg","sub_path":"softgym/softgym/registered_env.py","file_name":"registered_env.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"44146568560","text":"def swap_words(sentence, word1, word2):\n words = sentence.split()\n modified_words = []\n # Iterate through the list of words\n for word in words:\n if word.strip('.,!?()') == word1:\n # Append the second word with the same punctuation\n modified_words.append(word2 + word[len(word1):])\n elif word.strip('.,!?()') == word2:\n # Append the first word with the same punctuation\n modified_words.append(word1 + word[len(word2):])\n else:\n # Append the original word\n modified_words.append(word)\n\n # Join the list of modified words back into a sentence\n new_sentence = ' '.join(modified_words)\n\n return new_sentence\n\n\n# Main function\ndef main():\n first = int(input(\"Enter first word number(N) = \"))\n second = 20 - first\n print(f\"Second word number(20 - N) = {second}\")\n text = (\"A programmer is a programming specialist who designs software \"\n \"(in simpler cases, individual programs) for programmable devices \"\n \"that typically contain one or more processors.\")\n temp = text\n text = text.split(' ')\n print(\"Start text: \", *text)\n word1 = text[first].strip('.,!?()')\n word2 = text[second].strip('.,!?()')\n print(\"Word 1 -\", word1)\n print(\"Word 2 -\", word2)\n swap_words(temp, word1, word2)\n print(\"New text: \", swap_words(temp, word1, word2))\n\n\nif __name__ == '__main__':\n main()","repo_name":"bohdanzubko/ai-programming-in-python","sub_path":"labwork2/Task4/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19358081461","text":"# 21.02.10 [���둑질]\n'''\n꼭!!!다시보고 복습해보기\n단순DP는 아닌거같고 그리디 + DP인듯\n점화식을 끌어내는 연습을 해야겠당\n'''\ndef solution(money):\n answer = 0\n dp1 = [0 for x in range(len(money))]\n dp2 = [0 for x in range(len(money))]\n #dp1 0번째 집 터는경우(마지막 집 털 기회X)\n #dp2 1번째 집 터는경우(마지막 집 털 기회O)\n dp1[0] = money[0]\n dp1[1] = dp1[0]\n dp2[1] = money[1]\n \n for i in range(2,len(money)-1):\n dp1[i] = max(dp1[i-1], dp1[i-2]+money[i])\n dp2[i] = max(dp2[i-1], dp2[i-2]+money[i])\n \n #마지막 집 털건지 말건지 check\n dp2[-1] = max(dp2[-2], dp2[-3]+money[-1])\n \n answer = max(max(dp1),max(dp2))\n \n return answer\n","repo_name":"sladuf/Algorithm","sub_path":"Programmers/PRO42897.py","file_name":"PRO42897.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73985763146","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom setuptools import setup, find_packages\nwith open(\"README.md\",\"r\") as fh:\n long_description = fh.read()\n\nsetup(\n url = \"https://github.com/charles-turner-1/Fuelwatch\",\n author=\"Charles Turner\",\n author_email='charlesturner0987@gmail.com',\n classifiers=[\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3',\n ],\n install_requires=[\"requests\",\"feedparser\", \"pandas\"\n ,\"geocoder\"],\n extras_require = {\"dev\": [\"pytest>=3.7\",],},\n description=\"Fuelwatch Stuff\",\n py_modules=[\"Fuelwatch\"],\n package_dir={'': 'src'},\n license=\"MIT license\",\n include_package_data=True,\n name='Fuelwatch',\n version='0.0.1',\n zip_safe=False,\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n)\n","repo_name":"charles-turner-1/FuelWatchWA","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9043443238","text":"from __future__ import division\n\nfrom models import *\nfrom utils.utils import *\nfrom utils.datasets import *\nfrom utils.parse_config import *\nfrom utils import debug\n\nfrom code.architectures import get_architecture, IMAGENET_CLASSIFIERS\n\nimport os\nimport sys\nimport time\nimport datetime\nimport argparse\nimport tqdm\nimport json\nimport torch\nfrom torch.utils.data import DataLoader\nimport torchvision\nfrom torchvision import datasets\nfrom torchvision import transforms\nfrom torch.autograd import Variable\nimport torch.optim as optim\nimport pdb\n\ndef evaluate(model, path, iou_thres, img_size, batch_size, test_count, start_count,\n smooth, smooth_count, smooth_batch_size, sigma, q_u, q_l, bin, sort, loc_bin_count=None, attack=False):\n model.eval()\n\n # Get dataloader\n dataset = ListDataset(path, img_size=img_size, augment=False, multiscale=False)\n dataloader = torch.utils.data.DataLoader(\n dataset, batch_size=batch_size, shuffle=False, num_workers=1, collate_fn=dataset.collate_fn\n )\n\n Tensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n labels = []\n sample_metrics = [] # List of tuples (TP, confs, pred)\n if smooth:\n if bin == \"single\":\n bin = DetectionsAcc.SINGLE_BIN\n elif bin == \"label\":\n bin = DetectionsAcc.LABEL_BIN\n elif bin == \"location\":\n bin = DetectionsAcc.LOCATION_BIN\n elif bin == \"location+label\":\n bin = DetectionsAcc.LOCATION_LABEL_BIN\n else:\n raise ValueError(\"invalid binning option\")\n\n if sort == \"object\":\n sort = DetectionsAcc.OBJECT_SORT\n elif sort == \"center\":\n sort = DetectionsAcc.CENTER_SORT\n else:\n raise ValueError(\"invalid sort option\")\n\n accumulator = DetectionsAcc(bin=bin, sort=sort, loc_bin_count=loc_bin_count)\n smoothed_model = SmoothMedianNMS(model, sigma, accumulator)\n sample_metrics_smooth = [] # List of tuples (TP, pred)\n total_count = 0\n for batch_i, (_, imgs, targets) in enumerate(tqdm.tqdm(dataloader, desc=\"Detecting objects\", total=test_count+start_count)):\n if total_count >= test_count + start_count:\n break\n if total_count < start_count:\n total_count += len(imgs)\n continue\n # Extract labels\n labels += targets[:, 1].tolist()\n\n\n imgs = Variable(imgs.type(Tensor), requires_grad=False)\n if attack:\n ori_img = Variable(imgs.type(Tensor), requires_grad=False)\n adv_img = Variable(imgs.clone().detach().type(Tensor), requires_grad=True)\n targets_clone = Variable(targets.clone().to(device), requires_grad=False)\n\n attack_sample = 5\n first_idx = torch.arange(attack_sample).repeat_interleave(targets_clone.shape[0])\n targets_clone = targets_clone.repeat(attack_sample, 1)\n targets_clone[:, 0] = first_idx\n\n opt = optim.Adam([adv_img], lr=.001)\n radius = 0.36\n for i in range(20):\n noise = torch.randn_like(adv_img.repeat(attack_sample,1,1,1), requires_grad=False) * sigma\n # adv_loss = model[0].adv_loss(adv_img+noise, targets_clone)#/5\n adv_loss = model[1][0].adv_loss(model[0](adv_img+noise), targets_clone)\n # adv_loss = model[0].adv_loss(adv_img, targets_clone)\n opt.zero_grad()\n adv_loss.backward()\n adv_img.data -= adv_img.grad/adv_img.grad.view(adv_img.shape[0], -1).norm(dim=1)*.2*radius\n # opt.step()\n with torch.no_grad():\n diff_ori = (adv_img-ori_img)\n diff = diff_ori.view(diff_ori.shape[0], -1)\n norm = diff.norm(dim=1)\n div = torch.where(norm>radius, norm/radius, torch.ones_like(norm))\n adv_img.data = diff_ori/div[:, None, None, None] + ori_img\n imgs = adv_img.clone().detach().requires_grad_(False)\n # Rescale target\n targets[:, 2:] = xywh2xyxy(targets[:, 2:])\n targets[:, 2:] *= img_size\n with torch.no_grad():\n if smooth:\n outputs, outputs_l, outputs_u = smoothed_model.predict_range(\n imgs, n=smooth_count, batch_size=smooth_batch_size, q_u=q_u, q_l=q_l)\n #outputs.dim (# of images per batch, # of detections, 7)\n #outputs sometimes would contain infinite predictions, that means that even though one of the entries\n # would be used at some percentile of the distribution, but the # of predictions in the base classifier\n # may not be enough to make it into the median/upper bound/lower bound\n sample_metrics_smooth += get_batch_statistics_worst(outputs, outputs_u, outputs_l, targets, iou_threshold=iou_thres)\n\n else:\n outputs = model(imgs)\n\n sample_metrics += get_batch_statistics(outputs, targets, iou_threshold=iou_thres)\n total_count += len(imgs)\n\n\n # Concatenate sample statistics\n true_positives, pred_scores, pred_labels = [np.concatenate(x, 0) for x in list(zip(*sample_metrics))]\n if smooth:\n true_positives_worst, pred_labels_worst = [np.concatenate(x, 0) for x in list(zip(*sample_metrics_smooth))]\n precision_all_worst, recall_all_worst, f1_all_worst = pr_overall(true_positives_worst, labels)\n\n print(\"min correct\", sum(true_positives_worst))\n print(\"max predict\", len(true_positives_worst))\n print(\"total correct\", sum(true_positives))\n print(\"total predict\", len(true_positives))\n print(\"total ground truth\", len(labels))\n else:\n precision_all_worst, recall_all_worst, f1_all_worst = None, None, None\n precision, recall, AP, f1, ap_class = ap_per_class(true_positives, pred_scores, pred_labels, labels)\n precision_all, recall_all, f1_all = pr_overall(true_positives, labels)\n return precision, recall, AP, f1, ap_class, \\\n precision_all, recall_all, f1_all, \\\n precision_all_worst, recall_all_worst, f1_all_worst\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--batch_size\", type=int, default=1, help=\"size of each image batch\")\n\n parser.add_argument(\"--model_type\", type=str, default=\"yolo\", choices=[\"yolo\", \"faster_rcnn\", \"mask_rcnn\"], help=\"types of model\")\n parser.add_argument(\"--model_def\", type=str, default=\"config/yolov3.cfg\", help=\"path to model definition file\")\n parser.add_argument(\"--data_config\", type=str, default=\"config/coco.data\", help=\"path to data config file\")\n parser.add_argument(\"--weights_path\", type=str, default=\"weights/yolov3.weights\", help=\"path to weights file\")\n parser.add_argument(\"--class_path\", type=str, default=\"data/coco.names\", help=\"path to class label file\")\n parser.add_argument(\"--iou_thres\", type=float, default=0.5, help=\"iou threshold required to qualify as detected\")\n parser.add_argument(\"--conf_thres\", type=float, default=0.8, help=\"object confidence threshold\")\n parser.add_argument(\"--nms_thres\", type=float, default=0.4, help=\"iou thresshold for non-maximum suppression\")\n parser.add_argument(\"--n_cpu\", type=int, default=8, help=\"number of cpu threads to use during batch generation\")\n parser.add_argument(\"--img_size\", type=int, default=416, help=\"size of each image dimension\")\n\n parser.add_argument(\"--test_count\", type=int, default=5000, help=\"sample used for evaluation max is 5000\")\n parser.add_argument(\"--start_count\", type=int, default=0, help=\"start count for evaluation\")\n parser.add_argument(\"--smooth\", action='store_true', help=\"use smoothing classifier\")\n parser.add_argument(\"--smooth_count\", type=int, default=2000, help=\"number of samples used to estimate the smooth classifier\")\n parser.add_argument(\"--smooth_batch_size\", type=int, default=20, help=\"batchsize when estimating smooth classifer\")\n parser.add_argument(\"--cert_conf\", type=float, default=.99999, help=\"confidence of certificate\")\n parser.add_argument(\"--sigma\", type=float, default=.25, help=\"sigma for the normal noise\")\n parser.add_argument(\"--eps\", type=float, default=.36, help=\"radius that we try to certify\")\n parser.add_argument(\"--denoise\", action='store_true', help=\"denoise image after smoothing\")\n parser.add_argument(\"--bin\", default=\"single\", help=\"binning method\")\n parser.add_argument(\"--loc_bin_count\", type=int, default=3, help=\"binning count for location binning\")\n parser.add_argument(\"--sort\", default=\"object\", help=\"sorting method\")\n parser.add_argument(\"--attack\", action='store_true', help=\"generate attack against the object detector\")\n\n parser.add_argument(\"--seed\", type=int, default=0, help=\"random seed\")\n opt = parser.parse_args()\n print(json.dumps(vars(opt), indent=4))\n\n torch.manual_seed(opt.seed)\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n data_config = parse_data_config(opt.data_config)\n valid_path = data_config[\"valid\"]\n class_names = load_classes(data_config[\"names\"])\n\n # Get the empirical order statistics that should be used\n if opt.smooth:\n cert_conf = opt.cert_conf\n q_u, q_l = estimated_qu_ql(opt.eps, opt.smooth_count, opt.sigma, conf_thres=cert_conf)\n print(f\"Certified Eps (with {cert_conf:6.6%} confidence): {opt.eps: 0.2f}\")\n print(f\"q_u:{q_u}, q_l:{q_l}\")\n else:\n cert_conf = None\n q_u = None\n q_l = None\n\n\n # Initialize models\n if opt.model_type == \"yolo\":\n model = Darknet(opt.model_def).to(device)\n if opt.weights_path.endswith(\".weights\"):\n # Load darknet weights\n model.load_darknet_weights(opt.weights_path)\n else:\n # Load checkpoint weights\n model.load_state_dict(torch.load(opt.weights_path))\n model = torch.nn.Sequential(model, NMSModule(opt.conf_thres, opt.nms_thres))\n elif opt.model_type == \"faster_rcnn\":\n model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True).to(device)\n model.roi_heads.score_thresh = opt.conf_thres\n model.roi_heads.nms_thresh = opt.nms_thres\n model = torch.nn.Sequential(model, Concat())\n elif opt.model_type == \"mask_rcnn\":\n model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True).to(device)\n model.roi_heads.score_thresh = opt.conf_thres\n model.roi_heads.nms_thresh = opt.nms_thres\n model = torch.nn.Sequential(model, Concat())\n\n if opt.denoise:\n checkpoint = torch.load(\"pretrained_models/trained_denoisers/imagenet/mse_obj/dncnn_5epoch_lr1e-4/noise_0.25/checkpoint.pth.tar\")\n denoiser = get_architecture(\"imagenet_dncnn\", \"imagenet\")\n denoiser.load_state_dict(checkpoint['state_dict'])\n model = torch.nn.Sequential(denoiser, model)\n\n print(\"Compute mAP...\")\n\n precision, recall, AP, f1, ap_class, \\\n precision_all, recall_all, f1_all, \\\n precision_all_worst, recall_all_worst, f1_all_worst = evaluate(\n model,\n path=valid_path,\n iou_thres=opt.iou_thres,\n img_size=opt.img_size,\n batch_size=opt.batch_size,\n test_count=opt.test_count,\n start_count=opt.start_count,\n smooth=opt.smooth,\n smooth_count=opt.smooth_count,\n smooth_batch_size=opt.smooth_batch_size,\n sigma=opt.sigma,\n q_u=q_u,\n q_l=q_l,\n sort=opt.sort,\n bin=opt.bin,\n loc_bin_count=opt.loc_bin_count,\n attack=opt.attack\n )\n\n print(\"Average Precisions:\")\n for i, c in enumerate(ap_class):\n print(f\"+ Class '{c}' ({class_names[c]}) - AP: {AP[i]} Precision: {precision[i]} Recall: {recall[i]} f1: {f1[i]}\")\n\n print(f\"mAP: {AP.mean()}\")\n print(f\"mean Precision: {precision.mean()}\")\n print(f\"mean Recall: {recall.mean()}\")\n print(f\"mean F1: {f1.mean()}\")\n print(f\"overall Precision: {precision_all} / {precision_all_worst}\")\n print(f\"overall Recall: {recall_all} / {recall_all_worst}\")\n print(f\"overall F1: {f1_all} / {f1_all_worst}\")\n","repo_name":"Ping-C/CertifiedObjectDetection","sub_path":"test_smooth.py","file_name":"test_smooth.py","file_ext":"py","file_size_in_byte":12249,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"81"} +{"seq_id":"30760308111","text":"\"\"\"\nThis module contains the functionality for connecting to the TNC's TCP/IP interface, monitoring\npackets and extracting specific kinds of data to be instrumented.\n\"\"\"\nimport asyncio\nfrom urllib.parse import urlparse\nimport socket\nimport logging\nfrom asyncio.events import AbstractEventLoop\nfrom time import sleep\nimport sys\n\n# AGWPE format packet to request version from TNC host\nVERSION_REQUEST = b\"\\x00\\x00\\x00\\x00\\x52\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\" \\\n b\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\" \\\n b\"\\x00\\x00\\x00\\x00\"\n\n# AGWPE format packet to request monitoring packets be send from host\nMONITOR_REQUEST = b\"\\x00\\x00\\x00\\x00\\x6D\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\" \\\n b\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\" \\\n b\"\\x00\\x00\\x00\\x00\"\n\n\nclass Listener:\n \"\"\"Class for creating listener objects that connect to AGWPE TCP/IP API and capture packets\"\"\"\n def __init__(self, tnc_url: str, kiss_mode: bool = False, loop: AbstractEventLoop = None):\n self.parsed_url = urlparse(tnc_url)\n self.tnc_host = self.parsed_url.hostname # tnc host to connect to\n self.tnc_port = int(self.parsed_url.port) # tnc port to listen on\n self.packet_queue = asyncio.Queue()\n self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n if kiss_mode:\n self.kiss_mode = True\n self.connect_kiss(self.tnc_host, self.tnc_port)\n else:\n self.kiss_mode = False\n self.connect_agw(self.tnc_host, self.tnc_port)\n self.api_version = None # version returned by host API\n self.loop = loop or asyncio.get_event_loop()\n\n def connect_agw(self, host: str, port: int, retry_delay: int = 10):\n \"\"\"Connect to a TNC's AGWPE API\"\"\"\n while True:\n try:\n logging.info(f\"Attempting to connect to TNC at {host}:{port}\")\n self.client_socket.connect((host, port))\n except ConnectionRefusedError:\n logging.error(f\"Could not connect to TNC at {host}:{port}, connection refused. \"\n f\"Retrying in {retry_delay} seconds\")\n sleep(retry_delay)\n continue\n else:\n logging.info(f\"Connection established to TNC at {host}:{port}\")\n break\n # send version request packet to TNC\n # this provides a check as to whether you are connecting to an actual TNC that\n # exposes an AGWPE API, as well as logging the version response for debugging\n logging.debug(\"Sending version request to TNC\")\n self.client_socket.sendall(VERSION_REQUEST)\n version_packet = b\"\"\n bytes_recv = 0\n while bytes_recv < 36:\n chunk = self.client_socket.recv(4096)\n if chunk == b'':\n raise ConnectionResetError(\"Socket connection broken\")\n version_packet += chunk\n bytes_recv += len(chunk)\n if chr(version_packet[4]) == 'R':\n # read major and minor versions from packet sent by TNC\n maj_ver = int.from_bytes(version_packet[36:38], 'little')\n min_ver = int.from_bytes(version_packet[40:42], 'little')\n logging.debug(f\"Received TNC version info: {maj_ver}.{min_ver} \")\n self.client_socket.sendall(MONITOR_REQUEST) # ask tnc to send monitor packets\n else:\n # If the version response packet doesn't report the expected R packet type in byte 4,\n # shut everything down as you're probably not communicating with the AGWPE API\n logging.error(\"Did not receive expected reply when connecting to TNC. Quitting.\")\n logging.debug(\"Received the following packet in response to version request:\",\n version_packet)\n sys.exit()\n\n def connect_kiss(self, host: str, port: int, retry_delay: int = 10):\n \"\"\"Connect to a TNC's KISS TCP interface\"\"\"\n while True:\n try:\n logging.info(f\"Attempting to connect to TNC at {host}:{port}\")\n self.client_socket.connect((host, port))\n except ConnectionRefusedError:\n logging.error(f\"Could not connect to TNC at {host}:{port}, connection refused. \"\n f\"Retrying in {retry_delay} seconds\")\n sleep(retry_delay)\n continue\n else:\n logging.info(f\"Connection established to TNC at {host}:{port}\")\n break\n\n def disconnect(self):\n \"\"\"Close client socket connection\"\"\"\n self.client_socket.shutdown(socket.SHUT_RDWR)\n self.client_socket.detach()\n logging.info(\"Closed connection to TNC\")\n\n async def receive_packets(self):\n \"\"\"\n Continually receive packets from the AGWPE API and append them to the packet list\n as byte strings.\n \"\"\"\n # set the socket to non-blocking. If this is not set manually in Python 3.7, sock_recv will\n # block other tasks. It is only set once we begin recieving packets for metric calclations,\n # because the earlier socket operations to create a connection to the TNC can run\n # synchronously. Therefore there is no reason to set nonblocking early\n # and create extra complexity.\n self.client_socket.setblocking(False)\n # loop to listen for packets sent from the TNC and add them to the queue for metrics\n # processing\n while True:\n packet_bytes: bytes = b\"\"\n bytes_recv: int = 0\n while bytes_recv < 36:\n try:\n chunk = await self.loop.sock_recv(self.client_socket, 4096)\n if chunk == b'':\n raise ConnectionResetError(\"Socket connection broken\")\n except ConnectionResetError:\n logging.error(\"Connection to TNC was reset\")\n self.client_socket.close()\n # remake client socket\n self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n if self.kiss_mode:\n self.connect_kiss(self.tnc_host, self.tnc_port)\n else:\n self.connect_agw(self.tnc_host, self.tnc_port)\n continue\n else:\n packet_bytes += chunk\n bytes_recv += len(chunk)\n if self.kiss_mode:\n # sometimes, a KISS interface will pass multiple packets. Split by frame delimiter\n # and add each to the queue\n split_packets = packet_bytes.split(b'\\xc0')\n for p in split_packets:\n if len(p) > 0:\n await self.packet_queue.put(p)\n else:\n await self.packet_queue.put(packet_bytes)\n logging.debug(f\"Received packet, total {self.packet_queue.qsize()} in queue\")\n","repo_name":"rouyng/tncexporter","sub_path":"tncexporter/listener.py","file_name":"listener.py","file_ext":"py","file_size_in_byte":7085,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"81"} +{"seq_id":"23309971130","text":"import telebot\n\nimport random\nfrom telebot import types\n\nfrom template import t\nimport os \nimport shikimori\nimport database\nimport logging\nfrom helper import escape\n\n\n\nlogging.basicConfig(level=logging.INFO, format=\"%(asctime)s %(levelname)s - %(message)s\")\n\ntelebot.apihelper.ENABLE_MIDDLEWARE=True\n\n# env[ironment]\nbot = telebot.TeleBot(os.getenv(\"BOT_TOKEN\"), parse_mode=None)\n\nanswers = {\n \"hello\": \"hi\",\n \"how are you doing?\" : \"well\",\n \"9^2\": \"81\", \n}\n\n\n\n@bot.middleware_handler(update_types=['message'])\ndef set_settings(bot_instance,message):\n settings = database.get_settings(message.from_user.id) \n \n if settings == None:\n settings={}\n \n settings.setdefault('language', 'ru')\n\n message.settings = settings\n \n\n\n#/start /help\n@bot.message_handler(commands=['start', 'help'])\ndef send_welcome(message):\n markup = types.ReplyKeyboardMarkup(row_width=2)\n for key in answers:\n bttn = types.KeyboardButton(key)\n markup.add(bttn)\n bot.reply_to(message, \"Howdy, how are you doing?\", reply_markup=markup)\n\n# /lang en\n# /lang ru\n# /lang jp\n# /lang \n@bot.message_handler(commands=['lang'])\ndef change_language(message):\n cmd = message.text.split() # ['/lang', 'ru']\n\n if len(cmd) != 2 or cmd[1] not in [\"ru\",\"en\"]:\n bot.reply_to(message, t(\"undefined_lang\", message.settings[\"language\"]))\n return\n \n database.update_settings(message.from_user.id, {'language':cmd[1]})\n bot.reply_to(message,t(\"confirmed_lang\",message.settings[\"language\"] ))\n \n\n \n# /anime\n@bot.message_handler(commands=['anime'])\ndef recomendations(message): \n genres = database.get_genres(message.from_user.id)\n res = shikimori.search(genres) \n \n anime = shikimori.get_anime(res.id)\n\n bot.reply_to(message, t(\"anime_inf\",message.settings[\"language\"],anime=anime),parse_mode=\"MarkdownV2\")\n \n logging.info(\"recomendations %s %s\", anime.name, message.from_user.id)\n\n image_url = 'https://shikimori.one' + anime.image['original']\n\n anime_url = 'https://shikimori.one' + anime.url\n\n markup = types.InlineKeyboardMarkup(row_width=2)\n bttn = types.InlineKeyboardButton('follow the shikimori link',url=anime_url )\n markup.add(bttn)\n\n bot.send_photo(message.from_user.id, image_url,reply_markup=markup)\n\n\n\nGENRE_ADDED = '✅'\nGENRE_NOT_ADDED = '❌'\n\n\n#/genre\n@bot.message_handler(commands=['genre'])\ndef genre(message):\n markup = types.ReplyKeyboardMarkup(row_width=2,resize_keyboard=True)\n\n user_genres = database.get_genres(message.from_user.id) # []\n \n buttons = []\n\n for key in sorted(shikimori.genres[message.settings['language']]):\n text = key\n if shikimori.genres[message.settings['language']][key] in user_genres:\n text = GENRE_ADDED + text\n else: \n text = GENRE_NOT_ADDED + text\n\n bttn = types.KeyboardButton(text)\n buttons.append(bttn)\n \n logging.info(\"genre %s\",message.from_user.id)\n\n markup.add(*buttons)\n\n bot.reply_to(message, \"Choose genre:\", reply_markup=markup)\n\n\n# user_genres = {}\n@bot.message_handler(func=lambda m: m.text[1:] in shikimori.genres[m.settings['language']])\ndef genre_pick(message):\n user_genres = database.get_genres(message.from_user.id) \n\n genre_name = message.text[1:]\n genre_id = shikimori.genres[message.settings['language']][genre_name] # 1\n \n if genre_id in user_genres:\n user_genres.remove(genre_id)\n else:\n user_genres.append(genre_id)\n \n database.update_genres(message.from_user.id, user_genres)\n \n print(user_genres)\n genre(message)\n \n\n#without command\n@bot.message_handler(func=lambda m: True)\ndef echo_all(message):\n answer = answers.get(message.text)\n \n if answer is None:\n answer = \"hohohoo\"\n bot.reply_to(message,answer)\n print (answer) \n\nlogging.info('Starting bot')\n\nbot.polling()","repo_name":"Relecto/miharu","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41262374777","text":"#\n# @lc app=leetcode id=452 lang=python\n#\n# [452] Minimum Number of Arrows to Burst Balloons\n#\n\n# @lc code=start\nclass Solution(object):\n def findMinArrowShots(self, points):\n \"\"\"\n :type points: List[List[int]]\n :rtype: int\n \"\"\"\n def comparator(x1, x2):\n if x1[0] == x2[0]:\n return x1[1] - x2[1]\n else:\n return x1[0] - x2[0] \n points.sort(cmp=comparator)\n\n start, end, result = None, None, 0\n for i in range(len(points)):\n if end is None or points[i][0] > end:\n start, end = points[i]\n result += 1\n start = max(start, points[i][0])\n end = min(end, points[i][1])\n return result\n\n \n# @lc code=end\n\n","repo_name":"WuLC/LeetCode","sub_path":"Algorithm/Python/452.minimum-number-of-arrows-to-burst-balloons.py","file_name":"452.minimum-number-of-arrows-to-burst-balloons.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"81"} +{"seq_id":"43846520585","text":"\nimport socket\n\ntarget_host = \"127.0.0.1\"\ntarget_port = 80\n\nclient = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\nclient.sendto('AAAAAA'.encode(encoding='utf-8'), (target_host, target_port))\n\nprint(client.recvfrom(1024))","repo_name":"bermlida/Etherious","sub_path":"Exercise/UDP Client.py","file_name":"UDP Client.py","file_ext":"py","file_size_in_byte":225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8661159186","text":"import sys\nimport random\nimport math\n\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QDialog, QTableWidget, QMessageBox, QAbstractItemView, QTableWidgetItem\nfrom PyQt5.QtGui import QColor\nfrom PyQt5.QtCore import Qt\n\nfrom pagesetting import *\nfrom pagemain import *\nfrom Markov import *\n\nclass Pages:\n def __init__(self):\n self.pageSetting = Ui_pageSetting()\n self.pageMain = Ui_pageMain()\n\npages = Pages()\nmarkov = Markov()\ncountN = 0\nstart = [-1, -1]\n\ndef main():\n global countN\n arr = []\n app = QApplication(sys.argv)\n\n pageMain = QMainWindow()\n pages.pageMain.setupUi(pageMain)\n pageMain.show()\n\n pages.pageMain.btn_map.clicked.connect(lambda: setParams(pageMain))\n QApplication.processEvents()\n pages.pageMain.btn_next.clicked.connect(lambda: getNext())\n\n sys.exit(app.exec_())\n\ndef setParams(pageMain):\n pageSetting = QDialog(pageMain)\n pages.pageSetting.setupUi(pageSetting)\n pageSetting.show()\n\n pages.pageSetting.btn_generate.clicked.connect(lambda: getParams(pageSetting))\n\n pages.pageSetting.btn_random.clicked.connect(lambda: randomParams(pageSetting))\n\n QApplication.processEvents()\n\ndef getParams(pageSetting):\n global start\n xStr = pages.pageSetting.edit_x.text()\n yStr = pages.pageSetting.edit_y.text()\n \n trapsStr = pages.pageSetting.edit_trap.text().split(' ')\n barriersStr = pages.pageSetting.edit_barrier.text().split(' ')\n goalsStr = pages.pageSetting.edit_e.text().split(' ')\n\n sStr = pages.pageSetting.edit_s.text()[1:-1].split(',')\n\n if xStr == '' or yStr == '' or trapsStr == '' or barriersStr == '' or sStr == '' or goalsStr == '':\n QMessageBox.information(pageSetting,\"Warning\", \n pageSetting.tr(\"Please fill in all the blank\")) \n return;\n\n traps = []\n barriers = []\n goals = []\n s = []\n\n try:\n x = int(xStr)\n y = int(yStr)\n\n for i in trapsStr: \n tmpList = []\n for j in i[1:-1].split(','):\n tmpList.append(int(j))\n traps.append(tmpList)\n\n for i in barriersStr: \n tmpList = []\n for j in i[1:-1].split(','):\n tmpList.append(int(j))\n barriers.append(tmpList)\n\n for i in goalsStr: \n tmpList = []\n for j in i[1:-1].split(','):\n tmpList.append(int(j))\n goals.append(tmpList)\n\n for i in sStr: \n s.append(int(i))\n except:\n QMessageBox.information(pageSetting,\"Warning\", \n pageSetting.tr(\"Please use the correct input format, separate each node with a space.\")) \n return\n\n # print(xStr, yStr, pages.pageSetting.edit_trap.text(), '_', pages.pageSetting.edit_barrier.text(), sStr, goalsStr)\n start = s\n\n for i in range(0, x):\n for j in range(0, y):\n item = QTableWidgetItem()\n item.setBackground(QColor('white'))\n item.setText(\"\")\n pages.pageMain.table_left.setItem(i, j, item)\n\n item = QTableWidgetItem()\n item.setBackground(QColor('white'))\n item.setText(\"\")\n pages.pageMain.table_right.setItem(i, j, item)\n\n pages.pageMain.table_left.setRowCount(x)\n pages.pageMain.table_left.setColumnCount(y)\n \n pages.pageMain.table_right.setRowCount(x)\n pages.pageMain.table_right.setColumnCount(y)\n\n xList = []\n for i in range(0, x):\n xList.append(str(i))\n\n yList = []\n for i in range(0, y):\n yList.append(str(i))\n\n pages.pageMain.table_left.setHorizontalHeaderLabels(yList)\n pages.pageMain.table_left.setVerticalHeaderLabels(xList)\n\n pages.pageMain.table_right.setHorizontalHeaderLabels(yList)\n pages.pageMain.table_right.setVerticalHeaderLabels(xList)\n\n pages.pageMain.table_left.setEditTriggers(QAbstractItemView.NoEditTriggers) \n pages.pageMain.table_left.setSelectionMode(QAbstractItemView.NoSelection) \n\n pages.pageMain.table_right.setEditTriggers(QAbstractItemView.NoEditTriggers) \n pages.pageMain.table_right.setSelectionMode(QAbstractItemView.NoSelection) \n\n size = 70\n # if 434 / y * x <= 560:\n # size = 425 / y\n # else:\n # size = 560 / x\n\n pages.pageMain.table_left.horizontalHeader().setDefaultSectionSize(size)\n pages.pageMain.table_left.verticalHeader().setDefaultSectionSize(size)\n\n pages.pageMain.table_right.horizontalHeader().setDefaultSectionSize(size)\n pages.pageMain.table_right.verticalHeader().setDefaultSectionSize(size)\n\n for i in traps:\n item = QTableWidgetItem()\n item.setBackground(QColor('grey'))\n item.setText(\"-1\")\n item.setForeground(QColor('white'))\n item.setTextAlignment(Qt.AlignCenter)\n pages.pageMain.table_left.setItem(i[0], i[1], item)\n item = QTableWidgetItem()\n item.setBackground(QColor('grey'))\n pages.pageMain.table_right.setItem(i[0], i[1], item)\n\n for i in barriers:\n item = QTableWidgetItem()\n item.setBackground(QColor('black'))\n pages.pageMain.table_left.setItem(i[0], i[1], item)\n item = QTableWidgetItem()\n item.setBackground(QColor('black'))\n pages.pageMain.table_right.setItem(i[0], i[1], item)\n\n item = QTableWidgetItem()\n item.setBackground(QColor('blue'))\n pages.pageMain.table_left.setItem(s[0], s[1], item)\n item = QTableWidgetItem()\n item.setBackground(QColor('blue'))\n pages.pageMain.table_right.setItem(s[0], s[1], item)\n\n for i in goals:\n item = QTableWidgetItem()\n item.setBackground(QColor('red'))\n item.setText(\"1\")\n item.setForeground(QColor('white'))\n item.setTextAlignment(Qt.AlignCenter)\n pages.pageMain.table_left.setItem(i[0], i[1], item)\n item = QTableWidgetItem()\n item.setBackground(QColor('red'))\n pages.pageMain.table_right.setItem(i[0], i[1], item)\n\n pageSetting.close()\n\n markov.read_map([x, y], s, barriers, traps, goals)\n markov.initialize_state()\n\n global countN\n countN = 0\n pages.pageMain.label_n.setText(str(countN))\n\n tmpDialog = QDialog()\n tmpDialog.show()\n tmpDialog.close()\n\n QApplication.processEvents()\n\ndef getNext():\n global countN\n global start\n\n if pages.pageMain.label_n.text() != '?':\n markov.update_value_once()\n arrPolicy = markov.get_policy()\n arrValue = markov.get_value()\n\n for i in range(0, len(arrValue)):\n for j in range(0, len(arrValue[0])):\n if arrValue[i][j] != 0:\n item = QTableWidgetItem()\n item.setText(str(arrValue[i][j]))\n item.setTextAlignment(Qt.AlignCenter)\n if i == start[0] and j == start[1]:\n item.setBackground(QColor('blue'))\n pages.pageMain.table_left.setItem(i, j, item)\n\n for i in range(0, len(arrPolicy)):\n for j in range(0, len(arrPolicy[0])):\n if arrPolicy[i][j] != '.':\n item = QTableWidgetItem()\n item.setText(arrPolicy[i][j])\n item.setTextAlignment(Qt.AlignCenter)\n if i == start[0] and j == start[1]:\n item.setBackground(QColor('blue'))\n pages.pageMain.table_right.setItem(i, j, item)\n\n countN = countN + 1\n pages.pageMain.label_n.setText(str(countN))\n tmpDialog = QDialog()\n tmpDialog.show()\n tmpDialog.close()\n\n QApplication.processEvents()\n\ndef randomParams(pageSetting):\n x = random.randint(3, 20)\n y = random.randint(3, 20)\n\n pages.pageSetting.edit_x.setText(str(x))\n pages.pageSetting.edit_y.setText(str(y))\n\n if math.floor(x * y / 8) > 0:\n numTraps = random.randint(1, math.floor(x * y / 8))\n numBarriers = random.randint(1, math.floor(x * y / 7))\n else:\n numTraps = 0\n numBarriers = 0\n\n\n strS = '(' + str(random.randint(0, x - 1)) + ',' + str(random.randint(0, y - 1)) + ')'\n pages.pageSetting.edit_s.setText(strS)\n\n strE = '(' + str(random.randint(0, x - 1)) + ',' + str(random.randint(0, y - 1)) + ')'\n while (strE == strS):\n strE = '(' + str(random.randint(0, x - 1)) + ',' + str(random.randint(0, y - 1)) + ')'\n pages.pageSetting.edit_e.setText(strE)\n\n strTraps = ''\n for i in range(0, numTraps):\n if i == 0:\n tmpStr = '(' + str(random.randint(0, x - 1)) + ',' + str(random.randint(0, y - 1)) + ')'\n while tmpStr == strS or tmpStr == strE:\n tmpStr = '(' + str(random.randint(0, x - 1)) + ',' + str(random.randint(0, y - 1)) + ')'\n\n strTraps = tmpStr\n else:\n tmpStr = '(' + str(random.randint(0, x - 1)) + ',' + str(random.randint(0, y - 1)) + ')'\n while tmpStr == strS or tmpStr == strE:\n tmpStr = '(' + str(random.randint(0, x - 1)) + ',' + str(random.randint(0, y - 1)) + ')'\n\n strTraps = strTraps + ' ' + tmpStr\n pages.pageSetting.edit_trap.setText(strTraps)\n\n strBarriers = ''\n for i in range(0, numTraps):\n if i == 0:\n tmpStr = '(' + str(random.randint(0, x - 1)) + ',' + str(random.randint(0, y - 1)) + ')'\n while tmpStr == strS or tmpStr == strE or strTraps.find(tmpStr) != -1:\n tmpStr = '(' + str(random.randint(0, x - 1)) + ',' + str(random.randint(0, y - 1)) + ')'\n\n strBarriers = tmpStr\n else:\n tmpStr = '(' + str(random.randint(0, x - 1)) + ',' + str(random.randint(0, y - 1)) + ')'\n while tmpStr == strS or tmpStr == strE or strTraps.find(tmpStr) != -1:\n tmpStr = '(' + str(random.randint(0, x - 1)) + ',' + str(random.randint(0, y - 1)) + ')'\n\n strBarriers = strBarriers + ' ' + tmpStr\n pages.pageSetting.edit_barrier.setText(strBarriers)\n\n\n QApplication.processEvents()\n\nif __name__ == '__main__':\n main()","repo_name":"Aiemu/CourseAI-Proj","sub_path":"Project-1/src/Markov/App.py","file_name":"App.py","file_ext":"py","file_size_in_byte":9988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12975733750","text":"from collections import defaultdict\nimport pytest\nfrom dancing_links import SudokuDLX, LatinSquareDLX\n\neasy_ls = [[1, 0],\n [0, 0]]\n\neasy_ls_solution = [[1, 2],\n [2, 1]]\n\nhard_ls = [[3, 0, 5, 0, 7, 0, 1, 0],\n [7, 0, 0, 6, 0, 1, 0, 3],\n [0, 1, 0, 7, 0, 0, 3, 0],\n [8, 0, 6, 0, 0, 0, 0, 2],\n [0, 0, 0, 0, 0, 8, 4, 0],\n [0, 3, 0, 0, 6, 0, 0, 4],\n [1, 0, 8, 0, 0, 4, 0, 0],\n [0, 8, 0, 0, 1, 0, 5, 6]]\n\nhard_ls_solution = [[3, 6, 5, 4, 7, 2, 1, 8],\n [7, 5, 2, 6, 4, 1, 8, 3],\n [2, 1, 4, 7, 8, 6, 3, 5],\n [8, 4, 6, 1, 3, 5, 7, 2],\n [6, 7, 3, 5, 2, 8, 4, 1],\n [5, 3, 1, 8, 6, 7, 2, 4],\n [1, 2, 8, 3, 5, 4, 6, 7],\n [4, 8, 7, 2, 1, 3, 5, 6]]\n\neasy_sudoku = [[0, 0, 6, 1, 0, 0, 0, 0, 8],\n [0, 8, 0, 0, 9, 0, 0, 3, 0],\n [2, 0, 0, 0, 0, 5, 4, 0, 0],\n [4, 0, 0, 0, 0, 1, 8, 0, 0],\n [0, 3, 0, 0, 7, 0, 0, 4, 0],\n [0, 0, 7, 9, 0, 0, 0, 0, 3],\n [0, 0, 8, 4, 0, 0, 0, 0, 6],\n [0, 2, 0, 0, 5, 0, 0, 8, 0],\n [1, 0, 0, 0, 0, 2, 5, 0, 0]]\n\neasy_sudoku_solution = [[3, 4, 6, 1, 2, 7, 9, 5, 8],\n [7, 8, 5, 6, 9, 4, 1, 3, 2],\n [2, 1, 9, 3, 8, 5, 4, 6, 7],\n [4, 6, 2, 5, 3, 1, 8, 7, 9],\n [9, 3, 1, 2, 7, 8, 6, 4, 5],\n [8, 5, 7, 9, 4, 6, 2, 1, 3],\n [5, 9, 8, 4, 1, 3, 7, 2, 6],\n [6, 2, 4, 7, 5, 9, 3, 8, 1],\n [1, 7, 3, 8, 6, 2, 5, 9, 4]]\n\nhard_sudoku = [[8, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 3, 6, 0, 0, 0, 0, 0],\n [0, 7, 0, 0, 9, 0, 2, 0, 0],\n [0, 5, 0, 0, 0, 7, 0, 0, 0],\n [0, 0, 0, 0, 4, 5, 7, 0, 0],\n [0, 0, 0, 1, 0, 0, 0, 3, 0],\n [0, 0, 1, 0, 0, 0, 0, 6, 8],\n [0, 0, 8, 5, 0, 0, 0, 1, 0],\n [0, 9, 0, 0, 0, 0, 4, 0, 0]]\n\nhard_sudoku_solution = [[8, 1, 2, 7, 5, 3, 6, 4, 9],\n [9, 4, 3, 6, 8, 2, 1, 7, 5],\n [6, 7, 5, 4, 9, 1, 2, 8, 3],\n [1, 5, 4, 2, 3, 7, 8, 9, 6],\n [3, 6, 9, 8, 4, 5, 7, 2, 1],\n [2, 8, 7, 1, 6, 9, 5, 3, 4],\n [5, 2, 1, 9, 7, 4, 3, 6, 8],\n [4, 3, 8, 5, 2, 6, 9, 1, 7],\n [7, 9, 6, 3, 1, 8, 4, 5, 2]]\n\n\ndef verify_ls(solution):\n assert len(solution) == len(solution[0]), \"Latin square width must equal height\"\n\n size = len(solution)\n numbers = set(range(1, size + 1))\n\n for row in solution:\n assert numbers == set(row), \"Latin square row is invalid\"\n\n for i in range(size):\n column = [row[i] for row in solution]\n assert numbers == set(column), \"Latin square column is invalid\"\n\n\ndef verify_sudoku(solution):\n assert len(solution) == len(solution[0]), \"Sudoku width must equal height\"\n\n size = len(solution)\n numbers = set(range(1, size + 1))\n\n for row in solution:\n assert numbers == set(row), \"Sudoku row is invalid\"\n\n for i in range(size):\n column = [row[i] for row in solution]\n assert numbers == set(column), \"Sudoku column is invalid\"\n\n def get_zone(row, col):\n \"\"\"Helper function to identify the zone a cell belongs to\"\"\"\n lookup = [[0, 1, 2],\n [3, 4, 5],\n [6, 7, 8]]\n return str(lookup[row // 3][col // 3])\n\n zones = defaultdict(int)\n for row, col in [(row, col) for row in range(size) for col in range(size)]:\n zones[get_zone(row, col)] += 1\n\n assert all(map(lambda x: x == size, zones.values())), \"Sudoku zones are invalid\"\n\n\ndef test_easy_latin_square():\n\n ls = LatinSquareDLX()\n answer = ls.solve(easy_ls)\n\n assert answer == easy_ls_solution, \"Must be able to solve easy latin square.\"\n\n\ndef test_hard_latin_square():\n\n ls = LatinSquareDLX()\n answer = ls.solve(hard_ls)\n\n assert answer == hard_ls_solution, \"Must be able to solve large latin square.\"\n\n\ndef test_easy_sudoku():\n\n s = SudokuDLX()\n answer = s.solve(easy_sudoku)\n\n assert answer == easy_sudoku_solution, \"Must be able to solve easy sudoku.\"\n\n\ndef test_hard_sudoku():\n\n s = SudokuDLX()\n answer = s.solve(hard_sudoku)\n\n assert answer == hard_sudoku_solution, \"Must be able to solve hard sudoku.\"\n\n\ndef test_can_solve_multiple():\n\n s = SudokuDLX()\n for i in range(2):\n problems = [easy_sudoku, hard_sudoku]\n solutions = [easy_sudoku_solution, hard_sudoku_solution]\n answer = s.solve(problems[i])\n\n assert answer == solutions[i], \"Must be able to solve sudoku repeatedly.\"\n\n l = LatinSquareDLX()\n for i in range(2):\n problems = [easy_ls, hard_ls]\n solutions = [easy_ls_solution, hard_ls_solution]\n answer = l.solve(problems[i])\n\n assert answer == solutions[i], \"Must be able to solve latin squares repeatedly.\"\n\n\ndef test_can_generate_correct_latin_square():\n\n l = LatinSquareDLX()\n g = l.generate(size=4)\n\n # Verify correctness of solutions\n for _, solution in zip(range(10), g):\n verify_ls(solution)\n\n # Skip ahead\n for _ in range(100):\n next(g)\n\n # Verify some more solutions\n for _, solution in zip(range(10), g):\n verify_ls(solution)\n\n\ndef test_can_generate_correct_sudoku():\n\n s = SudokuDLX()\n g = s.generate()\n\n # Verify correctness of solutions\n for _, solution in zip(range(10), g):\n verify_sudoku(solution)\n\n # Skip ahead\n for _ in range(100):\n next(g)\n\n # Verify some more solutions\n for _, solution in zip(range(10), g):\n verify_sudoku(solution)\n","repo_name":"wchurc/dancing_links","sub_path":"test_dancing_links.py","file_name":"test_dancing_links.py","file_ext":"py","file_size_in_byte":5882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12557325768","text":"import torch.nn as nn\n\nfrom RNN_Model import BasicRNN, AttentionRNN\nfrom DNN_Model import PostDNN\n\n\nclass Model(nn.Module):\n def __init__(self, rnn_model, input_size, rnn_hidden_size, num_layers, dnn_hidden_size, seq_len,\n attention_type, num_classes=1):\n super(Model, self).__init__()\n self.rnn_model = rnn_model\n self.input_size = input_size\n self.rnn_hidden_size = rnn_hidden_size\n self.num_layers = num_layers\n self.dnn_hidden_size = dnn_hidden_size\n self.seq_len = seq_len\n self.attention_type = attention_type\n self.num_classes = num_classes\n\n if rnn_model == 'BasicRNN':\n self.rnn = BasicRNN(input_size=self.input_size, hidden_size=self.rnn_hidden_size, num_layers=self.num_layers)\n elif rnn_model == 'AttentionRNN':\n self.rnn = AttentionRNN(input_size=self.input_size, hidden_size=self.rnn_hidden_size, seq_len=self.seq_len,\n num_layers=self.num_layers, attention_type=self.attention_type)\n\n self.dnn = PostDNN(input_size=self.rnn_hidden_size, hidden_size=self.dnn_hidden_size,\n num_classes=self.num_classes)\n\n def forward(self, x):\n if self.rnn_model == 'BasicRNN':\n output, hidden = self.rnn(x)\n linear_out, sig_out = self.dnn(output)\n\n return linear_out, sig_out\n\n elif self.rnn_model == 'AttentionRNN':\n output, hidden = self.rnn(x)\n linear_out, sig_out = self.dnn(output)\n\n return linear_out, sig_out\n","repo_name":"Jo0o0Hyung/Dual-Attention-for-VAD","sub_path":"Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"26695379260","text":"import traceback\nimport sys\nfrom random import Random\nfrom args import The\nfrom constants import BIG_S_OPTION, D_OPTION, E_OPTION, F_OPTION, N_OPTION, SMALL_S_OPTION\nfrom data import Data\nfrom fileutils import read_file\nfrom num import Num\nfrom parse import parser\nfrom print import oo\nfrom sym import Sym\n\nclass Example(object):\n def run_examples(self, input_the):\n self.test_the = input_the\n self.rando = Random(input_the[SMALL_S_OPTION])\n failures = 0\n crashes = 0\n successes = 0\n methods = dir(self)\n test_methods = []\n test_opt = self.test_the[E_OPTION].lower()\n for method in methods:\n if method.startswith(\"eg_\" + (test_opt if (test_opt != \"all\") else \"\")):\n test_methods.append(method)\n for test_method in test_methods:\n test_thing = getattr(self,test_method)\n print(\"Running \" + test_method)\n try:\n test_thing()\n print(\"\\033[92m {}\\033[00m\" .format(\"SUCCESS!!\"))\n successes += 1\n except Exception as e:\n if self.test_the[D_OPTION] == True:\n traceback.print_exc()\n if isinstance(e, AssertionError):\n print(\"\\033[91m {}\\033[00m\" .format(\"FAILED\"))\n failures += 1\n else:\n print(\"\\033[91m {}\\033[00m\" .format(\"CRASHED\"))\n crashes += 1\n print()\n sys.exit(0 if (failures == 0 and crashes == 0) else 1)\n \n def eg_num(self):\n num_test = Num()\n for each_num in range(1, 101):\n num_test.add(each_num, self.test_the[N_OPTION], self.rando)\n median, st_dev = num_test.mid(), num_test.div()\n\n oo({\"mid\": median, \"div\": st_dev})\n assert 50 <= median and median <= 52 and 30.5 < st_dev and st_dev < 32\n \n def eg_sym(self):\n sym_test = Sym(None, None)\n\n for letter in [\"a\", \"a\", \"a\", \"a\", \"b\", \"b\", \"c\"]:\n sym_test.add(letter)\n mode, entropy = sym_test.mid(), sym_test.div()\n entropy = entropy * 1000 // 1 / 1000\n oo({ \"mid\": mode, \"div\": entropy })\n assert mode == \"a\" and 1.37 <= entropy and entropy <= 1.38\n\n def eg_bignum(self):\n num_test = Num()\n bignum_the = The().the\n bignum_the[N_OPTION] = 32\n for i in range(1, 1001):\n num_test.add(i, bignum_the[N_OPTION], self.rando)\n\n oo(num_test.nums())\n assert 32 == len(num_test._has)\n\n def eg_the(self):\n self.params = The()\n oo(self.params.the)\n\n def eg_csv(self):\n csv_the = The().the\n csv_the[F_OPTION] = \"./data/auto93.csv\"\n n = 0\n def inner_fun(row):\n nonlocal n\n n = n + 1\n if n <= 10:\n oo(row) \n parser(read_file(csv_the[F_OPTION]), inner_fun, csv_the[BIG_S_OPTION] )\n assert True\n \n def eg_data(self):\n data_the = The().the\n data_the[F_OPTION] = \"./data/auto93.csv\"\n d = Data(read_file(data_the[F_OPTION]), data_the[N_OPTION], data_the[BIG_S_OPTION], data_the[SMALL_S_OPTION])\n for col in d.cols.y:\n oo(col)\n assert True\n\n def eg_stats(self):\n data_the = The().the\n data_the[F_OPTION] = \"./data/auto93.csv\"\n d = Data(read_file(data_the[F_OPTION]), data_the[N_OPTION], data_the[BIG_S_OPTION], data_the[SMALL_S_OPTION])\n div = lambda thing: thing.div()\n mid = lambda thing: thing.mid()\n print(\"xmid\", f\"{d.stats(2,d.cols.x,mid)}\")\n print(\"xdiv\", f\"{d.stats(3,d.cols.x,div)}\")\n print(\"ymid\", f\"{d.stats(2,d.cols.y,mid)}\")\n print(\"ydiv\", f\"{d.stats(3,d.cols.y,div)}\")\n assert True\n\nif __name__ == \"__main__\":\n testthe = The()\n testthe.the[E_OPTION] = \"ALL\"\n eg = Example().run_examples(testthe.the)\n","repo_name":"CSC-510-Team-31/CSC_510-Team-31_HW2345","sub_path":"src/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":3902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27107064847","text":"import sys\nsys.setrecursionlimit(10 ** 9)\n\nn, x, y = map(int, input().split())\n\ndef red(lv, nr):\n global x, y\n if lv < 2:\n return 0\n return red(lv - 1, nr) + blue(lv, x * nr)\n\ndef blue(lv, nr):\n global x, y\n if lv == 1:\n return nr\n return red(lv - 1, nr) + blue(lv - 1, y * nr)\n\nprint(red(n, 1))","repo_name":"silphire/atcoder","sub_path":"abc260/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30126928865","text":"from collections import Counter\n\n\ndef tests():\n ls = []\n ls2 = []\n with open('../data/fiction.txt', 'r', encoding='utf-8') as f:\n for line in f:\n a = line.replace('\\n', '')\n # print(a.split('|'))\n ls.append(a.split('|'))\n for i in ls:\n ls2.append(i[1])\n result = dict(Counter(ls2))\n print(result)\n print([key for key, value in result.items() if value > 1])\n print({key: value for key, value in result.items() if value > 1})\n # ls3 = set(ls2)\n # print(len(ls2))\n # print(len(ls3))\n\n\nif __name__ == \"__main__\":\n tests()\n","repo_name":"yzcyjc/kuguatang","sub_path":"py/fiction.py","file_name":"fiction.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71743179146","text":"from Start_Pane import StartPane\nfrom Get_Info_Pane import GetInfoPane\nfrom Warmup_Exp_Pane import WarmupExpPane\nfrom Main_Exp_Pro_Pane import MainExpProPane\nfrom Setting_Pane import Settings_Pane\nfrom DIY_Pho_Size import DIYPhoSizePane\nfrom PyQt5.Qt import *\n\nimport json\n\n\nif __name__ == '__main__':\n import sys\n app = QApplication(sys.argv)\n\n # 控制面板的创建\n start_pane = StartPane()\n get_info_pane = GetInfoPane()\n warmup_exp_pane = WarmupExpPane()\n main_exp_pane = MainExpProPane()\n settings_pane = Settings_Pane()\n diy_pho_size_pane = DIYPhoSizePane()\n # 设置信息\n configure = 0\n\n\n\n # 槽函数\n def Jumpfrom_start_to_getinfo():\n start_pane.close()\n get_info_pane.show()\n\n def Jumpfrom_getinfo_to_start():\n get_info_pane.close()\n start_pane.show()\n\n def Jumpfrom_getinfo_to_warmup():\n get_info_pane.close()\n warmup_exp_pane.show()\n\n def Jumpfrom_warmup_to_getinfo():\n warmup_exp_pane.close()\n get_info_pane.show()\n\n def Jumpfrom_warmup_to_mainxep():\n warmup_exp_pane.close()\n main_exp_pane.show()\n main_exp_pane.help_tips_pane.wizard.show()\n main_exp_pane.refresh_settings()\n main_exp_pane.GT_pho_pane.show()\n\n\n\n def Jump_to_settings(item):\n settings_pane.show()\n settings_pane.goto_exact_body(item)\n\n def Refresh_exp_settings():\n main_exp_pane.refresh_settings()\n\n def Set_constrain_of_phoszie():\n max_size = int(main_exp_pane.display_widget.width() *0.85)\n diy_pho_size_pane.set_constrain(max_size)\n diy_pho_size_pane.show()\n\n def set_diy_pho_size():\n with open(\"./settings/configure.json\", \"r\", encoding='UTF-8') as f:\n global configure\n configure = json.load(f)\n f.close()\n size_pair = configure['PhotoDis']['PhotoSize']['TestSize']['PhoSizeDict']['1']\n # 这里只是diy_pho_size_pane变了configure,之后setting还会更新setting,因此还要把更新后的pho——size传给setting_pane的configure\n settings_pane.configure['PhotoDis']['PhotoSize']['TestSize']['PhoSizeDict']['1'] = size_pair\n size_pair = size_pair.replace(',', ' x ')\n print('目前的自定义大小:', size_pair)\n\n settings_pane.test_size_cbox.setItemText(1, size_pair)\n settings_pane.gt_size_cbox.setItemText(1, size_pair)\n\n\n\n\n # 信号的连接\n start_pane.jumpfrom_start_to_getinfo_signal.connect(Jumpfrom_start_to_getinfo)\n get_info_pane.jumpfrom_getinfo_to_start_signal.connect(Jumpfrom_getinfo_to_start)\n get_info_pane.jumpfrom_getinfo_to_warmup_signal.connect(Jumpfrom_getinfo_to_warmup)\n warmup_exp_pane.jumpfrom_warmup_to_getinfo_signal.connect(Jumpfrom_warmup_to_getinfo)\n warmup_exp_pane.jumpfrom_warmup_to_mainexp_signal.connect(Jumpfrom_warmup_to_mainxep)\n main_exp_pane.jump_to_settingpane_signal.connect(Jump_to_settings)\n settings_pane.refresh_pho_list_in_mainpane_signal.connect(Refresh_exp_settings)\n settings_pane.goto_diy_pho_size_signal.connect(Set_constrain_of_phoszie)\n diy_pho_size_pane.refresh_pho_size_in_mainpane_signal.connect(set_diy_pho_size)\n\n start_pane.show()\n # get_info_pane.show()\n\n sys.exit(app.exec_())","repo_name":"xmhh/IQA_Software","sub_path":"PyQt5_Photo_Display/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3265,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"27027395195","text":"from random import randint, shuffle\n\ndef getWordLenAmount(length): #gets how many word with set length are in the file\n with open('words.txt', 'r') as words:\n amount = 0\n for rawWord in words:\n word = rawWord.strip()\n if len(word) == length:\n amount += 1\n return amount\n\ndef getWord(length, index): #gets the nth word with set length\n result = ''\n with open('words.txt', 'r') as words:\n passedWords = 0\n for rawWord in words:\n word = rawWord.strip()\n if len(word) == length:\n if passedWords < index:\n passedWords += 1\n elif passedWords == index:\n result = word\n passedWords += 1\n return result\n\ndef scrambleWord(word):\n word = list(word)\n shuffle(word)\n return ''.join(word)\n\ndef isAnagram(word, anagram):\n if len(word) < len(anagram):\n return False\n anagram = list(anagram)\n wIndex = 0\n while wIndex < len(word):\n aIndex = 0\n foundMatch = False\n while aIndex < len(anagram):\n if wIndex < len(word):\n if anagram[aIndex] == word[wIndex]:\n anagram.pop(aIndex)\n wIndex += 1\n foundMatch = True\n else:\n aIndex += 1\n else:\n return False\n if not foundMatch:\n wIndex += 1\n if len(anagram) == 0:\n return True\n else:\n return False\n\ndef getAnagrams(word):\n anagrams = []\n for i in range(len(word)-2):\n anagrams.append([])\n with open('words.txt', 'r') as words:\n for rawWord in words:\n anagram = rawWord.strip()\n if len(anagram) > 2 and isAnagram(word, anagram):\n anagrams[len(anagram)-3].append(anagram)\n return anagrams\n\ndef main(scrambled, anagrams):\n if scrambled:\n anagramBool = []\n for x in range(len(anagrams)): \n if anagrams[x]:\n anagramBool.append(False)\n else:\n anagramBool.append(True)\n isEmpty = True\n for x in anagramBool:\n if x == False:\n isEmpty = False\n if isEmpty == True:\n print('~~~You Win!~~~')\n return\n if not scrambled:\n word = getWord(6, randint(1, getWordLenAmount(6)+1))\n scrambled = scrambleWord(word)\n print('TEST: base word is', word, '\\n')\n if not anagrams:\n anagrams = getAnagrams(scrambled)\n print(str(scrambled) + ':\\n')\n for i in range(len(anagrams)):\n nestListLen = 0\n for j in range(len(anagrams[i])):\n nestListLen += 1\n print(nestListLen, str(i+3) + '-letter words')\n guess = input('\\nEnter a guess: ')\n if guess == 'q':\n print('\\nYou gave up.\\nThe anagrams are:')\n newAnagrams = []\n for i in range(len(anagrams)):\n for j in range(len(anagrams[i])):\n newAnagrams.append(anagrams[i][j])\n print(newAnagrams)\n return\n if len(guess) <= len(scrambled):\n hasMatch = False\n for anagram in anagrams[len(guess)-3]:\n if anagram == guess:\n anagrams[len(guess)-3].remove(anagram)\n print('Correct!\\n')\n hasMatch = True\n main(scrambled, anagrams)\n if hasMatch == False:\n print('Incorrect!\\n')\n main(scrambled, anagrams)\n else:\n print('Invalid guess, words of minimum length three.')\n main(scrambled, anagrams)\n \n\nmain('', [])\n\n# main('win', getAnagrams('win'))","repo_name":"rockyspiker/AnagramsPython","sub_path":"hw4.py","file_name":"hw4.py","file_ext":"py","file_size_in_byte":3178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39420658374","text":"# https://leetcode.com/problems/container-with-most-water/\r\nfrom typing import List\r\nfrom tester import Tester\r\n\r\n\r\nclass Solution:\r\n def maxArea(self, height: List[int]) -> int:\r\n l, r = 0, len(height) - 1\r\n max_area = 0\r\n while l < r:\r\n max_area = max(max_area, min(height[l], height[r]) * (r - l))\r\n if height[l] < height[r]:\r\n l += 1\r\n else:\r\n r -= 1\r\n return max_area\r\n\r\n\r\nt = Tester(Solution())\r\n\r\n\r\nt.test(49, [1, 8, 6, 2, 5, 4, 8, 3, 7])\r\nt.test(1, [1, 1])\r\nt.test(16, [4, 3, 2, 1, 4])\r\nt.test(2, [1, 2, 1])\r\n\r\nt.report()\r\n","repo_name":"thinhntr/cp","sub_path":"leetcode/Container With Most Water.py","file_name":"Container With Most Water.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70082235145","text":"from functools import reduce\nfrom typing import List\n\nclass Solution:\n def singleNumber(self, nums: List[int]) -> List[int]:\n # 关键在于怎么把这两个只出现一次的数字区分开, 可以使用xor_sum的最低位来区分, 因为在那一位上那两个只出现一次的数字肯定一个是1一个是0\n xor_sum = reduce(lambda x, y : x ^ y, nums)\n low_bit = xor_sum & (-xor_sum)\n ans = []\n xor_sum0 = xor_sum1 = 0\n for num in nums:\n if num & low_bit == 0:\n xor_sum0 ^= num\n else:\n xor_sum1 ^= num\n return [xor_sum0, xor_sum1]\n\n","repo_name":"tiandiyijian/myLeetcode","sub_path":"260.py","file_name":"260.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72670351304","text":"#Source code with the blog post at http://monik.in/a-noobs-guide-to-implementing-rnn-lstm-using-tensorflow/\nimport numpy as np\nimport random\nfrom random import shuffle\nimport os\nimport pdb\n\ndef load_data():\n d_prefix='/home/junjuew/deep_learning/data'\n fns = [\n 'char_rnn_data_mat_10k.pkl',\n 'char_rnn_label_mat_10k.pkl', \n 'char_rnn_data_mat_1k_val.pkl', \n 'char_rnn_label_mat_1k_val.pkl'\n ]\n data=[]\n for idx, fn in enumerate(fns):\n data.append(np.load(os.path.join(d_prefix, fn)))\n\n trd=(data[0], data[1])\n ted=(data[2], data[3])\n return trd, ted\n \nprint('training #: {} testing #: {}'.format(len(trd[0]), len(ted[0])))\n\ndef get_batch(data, s_idx, e_idx):\n return (data[0][s_idx:e_idx,:,:], data[1][s_idx:e_idx,:,])\n\ndef get_batch_num(data, batch_size):\n return int(data[0].shape[0]/ float(batch_size))\n","repo_name":"linmagit/10807-Project","sub_path":"cnn/MyUtils.py","file_name":"MyUtils.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"35239744709","text":"from Dude import *\nfrom designs import AlarmDialog\nfrom PyQt5.QtWidgets import *\nimport sqlite3\nimport datetime\n\n\nclass AlarmDialog(QDialog, AlarmDialog.Ui_Dialog):\n def __init__(self, name='', time='00:00:00', dow='', active=True, to_change=False):\n super().__init__()\n self.setFixedSize(551, 348)\n self.setupUi(self)\n self.setWindowModality(QtCore.Qt.ApplicationModal)\n\n self.HourBox.setValue(int(time.split(':')[0]))\n self.MinuteBox.setValue(int(time.split(':')[1]))\n self.olds = [name, time, dow, active]\n for i in self.daysOfWeek.children():\n if type(i) != QHBoxLayout:\n if i.text()[:3] in dow:\n i.setChecked(True)\n\n if active:\n self.Enabled.setChecked(True)\n\n self.to_change = to_change\n\n if not self.to_change:\n self.DeletButton.hide()\n\n self.DeletButton.clicked.connect(self.delet)\n\n self.connect = sqlite3.connect(\"data/AlarmDB.db\")\n self.cur = self.connect.cursor()\n\n self.buttonBox.accepted.connect(self.add_to_db)\n self.buttonBox.rejected.connect(self.cancel)\n\n def delet(self):\n self.cur.execute(f\"\"\"delete from alarm where NameofAlarm = '{self.olds[0]}' and \nTime = '{self.olds[1]}' and \nDaysofWeek = '{self.olds[2]}'\"\"\")\n self.connect.commit()\n self.connect.close()\n self.close()\n\n\n\n\n def add_to_db(self):\n hours = int(self.HourBox.value())\n minutes = int(self.MinuteBox.value())\n\n time_to_add = datetime.time(hours, minutes)\n\n days_of_week = ''\n for i in self.daysOfWeek.children():\n if type(i) != QHBoxLayout:\n if i.isChecked():\n days_of_week += i.text()[0:3] + ' '\n\n name = self.lineEdit.text()\n\n enabled = self.Enabled.isChecked()\n\n if self.to_change:\n if not (self.cur.execute(f\"\"\"select * from alarm where NameOfAlarm = '{name}' \n and daysofweek = '{days_of_week}' and time = '{time_to_add}'\"\"\").fetchall()) or \\\n (self.cur.execute(f\"\"\"select * from alarm where NameOfAlarm = '{name}' \n and daysofweek = '{days_of_week}' and time = '{time_to_add}'\"\"\").fetchall()\n and enabled != self.olds[3]):\n self.cur.execute(f\"\"\"update alarm\n set NameofAlarm = '{name}',\n daysofweek = '{days_of_week}',\n time = '{time_to_add}',\n isactive = {enabled}\n where NameOfAlarm = '{self.olds[0]}' and\n daysofweek = '{self.olds[2]}' and\n time = '{self.olds[1]}'\"\"\")\n\n\n else:\n\n if not (self.cur.execute(f\"\"\"select * from alarm where NameOfAlarm = '{name}' \n and daysofweek = '{days_of_week}' and time = '{time_to_add}'\"\"\").fetchall()):\n self.cur.execute(f\"\"\"insert into \n Alarm(NameOfAlarm, Time, daysofweek, isactive) \n values('{name}', time('{time_to_add}'), '{days_of_week}', {enabled})\"\"\")\n self.connect.commit()\n self.connect.close()\n\n self.close()\n\n def cancel(self):\n self.close()\n","repo_name":"SlavicSandwich/TheChasi","sub_path":"AlarmDialogLogic.py","file_name":"AlarmDialogLogic.py","file_ext":"py","file_size_in_byte":3143,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"72302641226","text":"import re\nimport os\nfrom collections import namedtuple, OrderedDict\n\nEnumImport = namedtuple(\"EnumImport\",\n \"source_name destination_name name_overrides\")\n\n# #define constants.\nconstants = re.compile(r\"#define ([^\\s]+)\\s+(.+)\")\n\n# Enums.\nenums = re.compile(r\"enum[^{]+\\{[^}]+\\};\")\n\n# Name of an enum.\nenum_name = re.compile(r\"enum\\s+([^\\s{]+)\")\n\n# Enum contents between the braces.\nenum_contents = re.compile(r\"{([^}]+)};\")\n\n# Enum value with an explicit value.\nenum_explicit_value = re.compile(r\"(?:\\s*([^\\s]+)\\s*=\\s*([^\\s,]+),?)$\",\n flags=re.MULTILINE)\n\n# Enum value with an implicit value.\nenum_implicit_value = re.compile(r\"(?:^\\s*([^\\s,]+),?$)\", flags=re.MULTILINE)\n\n\nclass ConstantsParser:\n def __init__(self, input_file):\n self.source_files = []\n self.imported_constants = {}\n self.imported_enums = {}\n self.constant_values = OrderedDict()\n self.enum_values = OrderedDict()\n\n manual_suffixes = OrderedDict()\n\n section = None\n for line in input_file:\n # Skip blank lines and comments\n if line == \"\\n\" or line.startswith(\"#\"):\n continue\n\n # Set section based on header\n if line.endswith(\":\\n\"):\n section = line[:-2]\n continue\n\n if section == \"file\":\n self.source_files.append(line[:-1])\n\n elif section == \"constant\":\n words = line.split()\n assert len(words) <= 2\n # Export as the source name if only one is provided; otherwise\n # use the override.\n self.imported_constants[words[0]] = words[-1]\n\n elif section == \"enum\":\n words = line.split()\n assert len(words) >= 2\n\n name_overrides = {}\n if len(words) > 2:\n overrides = words[2:]\n # Pairs: source_name dest_name\n assert len(overrides) % 2 == 0\n index = 0\n while index + 1 < len(overrides):\n name_overrides[overrides[index]] = overrides[index + 1]\n index += 2\n\n target = EnumImport(words[0], words[1], name_overrides)\n\n self.imported_enums[target.source_name] = target\n\n elif section.startswith(\"manual\"):\n target_container = None\n if section == \"manual_prefix\":\n target_container = self.constant_values\n elif section == \"manual\" or section == \"manual_suffix\":\n target_container = manual_suffixes\n\n if target_container is not None:\n # Separate the key, and only the key, by whitespace.\n # The remainder of the line is the value.\n key, value = line.split(None, 1)\n target_container[key] = value.rstrip()\n\n for filename in self.source_files:\n # Resolve paths in input file relative to its location.\n input_dir = os.path.dirname(input_file.name)\n file = open(os.path.join(input_dir, filename)).read()\n\n for constant in constants.findall(file):\n name, value = constant\n\n if name not in self.imported_constants:\n continue\n\n name = self.imported_constants.pop(name)\n self.constant_values[name] = value\n\n for enum in enums.findall(file):\n # TODO: Could generate Enum classes only?\n # TODO: Option to consider an enum individual constants?\n name_search = enum_name.search(enum)\n if not name_search:\n continue\n\n name = name_search.group(1)\n if not name in self.imported_enums:\n continue\n\n enum_definition = self.imported_enums.pop(name)\n\n contents = enum_contents.search(enum).group(1)\n\n name = enum_definition.destination_name\n self.enum_values[name] = OrderedDict()\n enum_values = self.enum_values[name]\n\n explicit_values = enum_explicit_value.findall(contents)\n if explicit_values:\n for name, value in explicit_values:\n if name in enum_definition.name_overrides:\n name = enum_definition.name_overrides[name]\n\n enum_values[name] = value\n\n implicit_values = enum_implicit_value.findall(contents)\n # If there are any explicit values this assumes either all\n # values are explicit or only the first is explicit and the rest\n # are implicit.\n # TODO: Use C constants for extracted enums?\n if implicit_values:\n assert len(explicit_values) <= 1\n value = 0\n if explicit_values:\n name = explicit_values[0][0]\n if name in enum_definition.name_overrides:\n name = enum_definition.name_overrides[name]\n\n value = int(enum_values[name], base=0) + 1\n\n for name in implicit_values:\n if name in enum_definition.name_overrides:\n name = enum_definition.name_overrides[name]\n\n enum_values[name] = value\n value += 1\n\n assert enum_values\n\n if self.imported_constants:\n names = list(self.imported_constants)\n raise Exception(\"constants {} not found\".format(names))\n\n if self.imported_enums:\n names = list(self.imported_enums)\n raise Exception(\"enums {} not found\".format(names))\n\n # Add manual suffixes now that all values are loaded.\n for name, value in manual_suffixes.items():\n self.constant_values[name] = value\n","repo_name":"barracudanetworks/constantsgen","sub_path":"constantsgen/constantsparse.py","file_name":"constantsparse.py","file_ext":"py","file_size_in_byte":6106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34771692685","text":"import sys, os\nimport pandas as pd\n\ndef clr():\n os.system('cls' if os.name == 'nt' else 'clear')\n\ndef banner():\n print(\"\"\"\n ╭━━━╮╱╱╱╭╮╭╮\n ┃╭━╮┃╱╱╭╯╰┫┃\n ┃╰━╯┣╮╱┣╮╭┫╰━┳━━┳━╮\n ┃╭━━┫┃╱┃┃┃┃╭╮┃╭╮┃╭╮╮\n ┃┃╱╱┃╰━╯┃╰┫┃┃┃╰╯┃┃┃┃\n ╰╯╱╱╰━╮╭┻━┻╯╰┻━━┻╯╰╯\n ╱╱╱╱╭━╯┃ V1.0.0\n ╱╱╱╱╰━━╯\n ╭━━━╮╱╱╱╱╱╱╱╱╱╱╱╱╱╭╮\n ┃╭━╮┃╱╱╱╱╱╱╱╱╱╱╱╱╭╯╰╮\n ┃┃╱╰╋━━┳━╮╭╮╭┳━━┳┻╮╭╋━━┳━╮\n ┃┃╱╭┫╭╮┃╭╮┫╰╯┃┃━┫╭┫┃┃┃━┫╭╯\n ┃╰━╯┃╰╯┃┃┃┣╮╭┫┃━┫┃┃╰┫┃━┫┃\n ╰━━━┻━━┻╯╰╯╰╯╰━━┻╯╰━┻━━┻╯\n\n Contact : https://wa.me/+6281251389915\n About Developer : https://github.com/Nux-xader\n ________________________________________________\n\"\"\")\n\nclass Load:\n\tdef __init__(self, path):\n\t\tself.path = path\n\n\n\tdef json_reader(self):\n\t\ttry:\n\t\t\tdf = pd.read_json(self.path)\n\t\t\treturn df\n\t\texcept:\n\t\t\treturn False\n\n\nclass Converter:\n\tdef __init__(self, df):\n\t\tself.df = df\n\n\tdef df_to_dict(self):\n\t\treturn self.df.to_dict()\n\n\nclass Dump():\n\tdef __init__(self, df, path):\n\t\tself.df = df\n\t\tself.path = path\n\n\tdef as_xlsx(self):\n\t\texcel_wr = pd.ExcelWriter(self.path)\n\t\tself.df.to_excel(excel_wr)\n\t\texcel_wr.save()\n\n\ndef main():\n\tclr()\n\tbanner()\n\twhile True:\n\t\ttry:\n\t\t\tpath = str(input(\" Json file : \"))\n\t\t\tdf = Load(path).json_reader()\n\t\t\tbreak\n\t\texcept:\n\t\t\tprint(f\" [!] File {path} not found\")\n\n\tpath_save = str(input(\" Save result to : \"))\n\tprint(\" [+] Converting to xlsx ...\")\n\tif path_save.split(\".\") != \"xlsx\": path_save+=\".xlsx\"\n\tdumper = Dump(df, path_save)\n\tdumper.as_xlsx()\n\tprint(\" [+] Success convert from json to xlsx\")\n\nif __name__ == '__main__':\n\tmain()","repo_name":"Nux-xader/py_converter","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2017,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"30112306031","text":"import scrapy\nfrom residentscrape.items import GoogleMapItem\nimport MySQLdb\nimport os\nimport datetime\nfrom scrapy.utils.project import get_project_settings\nimport logging\nimport json\n# from postal.parser import parse_address\n\nclass GoogleMapSpider(scrapy.Spider):\n name = \"GoogleMapSpider\"\n\n domain = \"https://maps.googleapis.com\"\n\n logger = logging.getLogger(\"GoogleMapSpider\")\n\n project_settings = get_project_settings()\n\n custom_settings = {\n # \"AUTOTHROTTLE_ENABLED\": True,\n # \"AUTOTHROTTLE_START_DELAY\": 1,\n # \"AUTOTHROTTLE_MAX_DELAY\": 2,\n # \"AUTOTHROTTLE_TARGET_CONCURRENCY\": 5,\n \"CONCURRENT_REQUESTS_PER_DOMAIN\": 2,\n # \"DOWNLOAD_DELAY\": 1,\n \"SOURCE_ID\": project_settings['GOOGLEMAP_SOURCE_ID']\n }\n\n\n def start_requests(self):\n self.custom_settings = get_project_settings()\n self.APIURL = 'https://maps.googleapis.com/maps/api/geocode/json?key={}&address='.format(self.custom_settings['GOOGLE_API_KEY'])\n ## Get URLs from SQL\n password = os.environ.get('SECRET_KEY')\n self.conn = MySQLdb.connect(host=self.custom_settings['HOST'], port=3306, user=self.custom_settings['SQLUSERNAME'],\n passwd=password, db=self.custom_settings['DATABASE'])\n self.cursor = self.conn.cursor(MySQLdb.cursors.DictCursor)\n # query = \"SELECT * FROM scrape_Venues where isTBA=0\"\n query = \"SELECT * FROM scrape_Venues where googleResultsCount>2 and googleAddressID = -1;\"\n self.cursor.execute(query)\n rows = self.cursor.fetchall()\n self.logger.info(\"Total Rows: \"+str(len(rows)))\n for row in rows:\n\n query = None\n\n ## check for GoogleMap Links\n if len(row['googleMaps']) > 0 and 'http://maps.google.com/maps?' in row['googleMaps']:\n query = row['googleMaps'].strip('http://maps.google.com/maps?q=').lower()\n else:\n q1 = None\n\n columns = ['venueStreet', 'venueCity', 'venueState', 'venueZip', 'venueCountry']\n qdata = []\n\n for x in columns:\n if len(row[x]) > 1:\n qdata.append(row[x])\n q1 = ', '.join(qdata)\n\n if len(q1) > len(row['venueFullAddress']):\n query = q1\n else:\n query = row['venueFullAddress']\n\n # if row['sourceID'] == 1:\n query = row['venueName']+', '+ query\n\n\n\n\n if query is not None :\n\n ## Check if query in cache\n sqlquery = \"SELECT * FROM scrape_GoogleQueries WHERE query =%s;\"\n results = self.cursor.execute(sqlquery,[query])\n\n if results > 0 and row['googleResultsCount'] < 2:\n\n self.logger.info('Query: {} already exist in cached data'.format(query))\n data = self.cursor.fetchone()\n self.update_venue_google_address_id(data['ID'],data['googleAddressID'],data['count'],row['scrapeVenueID'])\n\n else:\n request = scrapy.Request(url=self.APIURL+query, callback=self.parse, dont_filter=True)\n request.meta['query'] = query\n request.meta['venueID'] = row['scrapeVenueID']\n yield request\n\n def parse(self, response):\n\n item = GoogleMapItem()\n for field in item.fields:\n item.setdefault(field, '')\n item['longitude'] = None\n item['lattitude'] = None\n item['addressID'] = -1\n item['queryID'] = -1\n\n item['query'] = response.meta['query']\n item['venueID'] = response.meta['venueID']\n data = json.loads(response.text)\n item['sourceText'] = data\n item['sourceURL'] = response.url\n item['resultCount'] = len(data['results'])\n\n if data['status'] == 'OK' and item['resultCount'] == 1:\n data = data['results'][0]\n self.extract_info(data,item)\n elif data['status'] == 'OK' and item['resultCount'] > 1:\n required_list = ['bar', 'club', 'stadium']\n for i in data['results']:\n if 'bar' in i['types'] or 'club' in i['types'] or 'stadium' in i['types'] or 'night_club' in i['types']:\n data = i\n self.extract_info(data, item)\n break\n\n\n yield item\n\n def extract_info(self,data,item):\n item['address_types'] = data.get('types', '')\n item['formatted_address'] = data.get('formatted_address', '')\n item['sourceRef'] = data.get('place_id', '-1')\n\n try:\n for x in data['address_components']:\n key = x['types'][0]\n if key in item:\n item[key] = x['long_name']\n except Exception as e:\n self.logger.error(e)\n\n ## Extract Geo Cordinates\n try:\n geo = data['geometry']['location']\n item['longitude'] = geo.get('lng', None)\n item['lattitude'] = geo.get('lat', None)\n except:\n pass\n\n\n def update_venue_google_address_id(self,googleQueryID, googleAddressID,googleResultsCount, scrapeVenueID):\n now = datetime.datetime.now()\n try:\n self.cursor.execute(\"\"\"UPDATE scrape_Venues\n SET googleAddressID=%s,\n googleQueryID=%s,\n googleResultsCount=%s, refreshed=%s\n WHERE scrapeVenueID=%s\"\"\",\n (\n googleAddressID,\n googleQueryID,\n googleResultsCount,\n now,\n scrapeVenueID\n ))\n\n self.conn.commit()\n\n except(MySQLdb.Error) as e:\n self.logger.error(\"Method: (update_venue_google_address_id) Error %d: %s\" % (e.args[0], e.args[1]))\n\n pass\n\n\n","repo_name":"sachinrcz/residentscraper","sub_path":"residentscrape/spiders/GoogleMapSpider.py","file_name":"GoogleMapSpider.py","file_ext":"py","file_size_in_byte":6139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1787523235","text":"#!/usr/bin/env python\nimport io\nimport json\nimport os\nimport requests\nimport requests_mock\nimport six\nimport tempfile\nimport unittest\n\n\nsix.add_move(six.MovedModule('mock', 'mock', 'unittest.mock'))\nfrom six.moves import mock # noqa\n\nfrom cromwell_tools.cromwell_api import CromwellAPI # noqa\nfrom cromwell_tools.cromwell_auth import CromwellAuth # noqa\nfrom cromwell_tools import utilities as utils # noqa\n\n\nclass TestAPI(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n # Change to test directory, as tests may have been invoked from another dir\n dir_ = os.path.abspath(os.path.dirname(__file__))\n os.chdir(dir_)\n\n def setUp(self):\n self.wdl_file = io.BytesIO(b\"wdl_file_content\")\n self.zip_file = io.BytesIO(b\"zip_file_content\")\n self.inputs_file = io.BytesIO(b\"inputs_file_content\")\n self.options_file = io.BytesIO(b\"options_file_content\")\n self.label = io.BytesIO(b'{\"test-label-key\": \"test-label-value\"}')\n self.auth_options = self.set_up_auth()\n\n @mock.patch(\n 'cromwell_tools.cromwell_auth.CromwellAuth.from_service_account_key_file'\n )\n def set_up_auth(self, mock_header):\n # set up authentication options for the tests\n temp_dir = tempfile.mkdtemp()\n secrets_file = temp_dir + 'fake_secrets.json'\n service_account_key = os.path.join(temp_dir, 'fake_key.json')\n username = \"fake_user\"\n password = \"fake_password\"\n url = \"https://fake_url\"\n auth = {\"url\": url, \"username\": username, \"password\": password}\n with open(secrets_file, 'w') as f:\n json.dump(auth, f)\n mock_header.return_value = CromwellAuth(\n url=url, header={\"Authorization\": \"bearer fake_token\"}, auth=None\n )\n\n auth_options = (\n CromwellAuth.harmonize_credentials(**auth), # HTTPBasicAuth\n CromwellAuth.harmonize_credentials(\n **{\"secrets_file\": secrets_file}\n ), # Secret file\n CromwellAuth.harmonize_credentials(\n **{\"service_account_key\": service_account_key, \"url\": url}\n ), # OAuth\n CromwellAuth.harmonize_credentials(url=url), # No Auth\n )\n return auth_options\n\n def _submit_workflows(self, cromwell_auth, mock_request, _request_callback):\n mock_request.post(\n cromwell_auth.url + '/api/workflows/v1', json=_request_callback\n )\n return CromwellAPI.submit(\n auth=cromwell_auth,\n wdl_file=self.wdl_file,\n inputs_files=self.inputs_file,\n options_file=self.options_file,\n dependencies=self.zip_file,\n label_file=self.label,\n )\n\n @requests_mock.mock()\n def test_submit_workflow(self, mock_request):\n def _request_callback(request, context):\n context.status_code = 200\n context.headers['test'] = 'header'\n return {'request': {'body': \"content\"}}\n\n for cromwell_auth in self.auth_options:\n result = self._submit_workflows(\n cromwell_auth, mock_request, _request_callback\n )\n self.assertEqual(result.status_code, 200)\n self.assertEqual(result.headers.get('test'), 'header')\n\n @requests_mock.mock()\n def test_submit_workflow_handlers_error_response(self, mock_request):\n def _request_callback(request, context):\n context.status_code = 500\n context.headers['test'] = 'header'\n return {'status': 'error', 'message': 'Internal Server Error'}\n\n # Check request actions\n for cromwell_auth in self.auth_options:\n with self.assertRaises(requests.HTTPError):\n self._submit_workflows(\n cromwell_auth, mock_request, _request_callback\n ).raise_for_status()\n\n @requests_mock.mock()\n def test_query_workflows_returns_200(self, mock_request):\n query_dict = {\n 'status': ['Running', 'Failed'],\n 'label': {'label_key1': 'label_value1', 'label_key2': 'label_value2'},\n }\n\n def _request_callback(request, context):\n context.status_code = 200\n context.headers['test'] = 'header'\n return {\n 'results': [\n {\n 'name': 'workflow1',\n 'submission': 'submission1',\n 'id': 'id1',\n 'status': 'Failed',\n 'start': 'start1',\n 'end': 'end1',\n },\n {\n 'name': 'workflow2',\n 'submission': 'submission2',\n 'id': 'id2',\n 'status': 'Running',\n 'start': 'start2',\n 'end': 'end2',\n },\n ],\n 'totalResultsCount': 2,\n }\n\n for cromwell_auth in self.auth_options:\n mock_request.post(\n '{}/api/workflows/v1/query'.format(cromwell_auth.url),\n json=_request_callback,\n )\n result = CromwellAPI.query(query_dict, cromwell_auth)\n self.assertEqual(result.status_code, 200)\n self.assertEqual(result.json()['totalResultsCount'], 2)\n\n def test_compose_query_params_can_compose_simple_query_dicts(self):\n query_dict = {\n 'status': 'Running',\n 'start': '2018-01-01T00:00:00.000Z',\n 'end': '2018-01-01T12:00:00.000Z',\n 'label': {'Comment': 'test'},\n 'page': 1,\n 'pageSize': 10,\n }\n\n expect_params = [\n {'status': 'Running'},\n {'start': '2018-01-01T00:00:00.000Z'},\n {'end': '2018-01-01T12:00:00.000Z'},\n {'label': 'Comment:test'},\n {'page': '1'},\n {'pageSize': '10'},\n ]\n\n six.assertCountEqual(\n self, CromwellAPI._compose_query_params(query_dict), expect_params\n )\n\n def test_compose_query_params_can_compose_nested_query_dicts(self):\n query_dict = {\n 'status': ['Running', 'Failed', 'Submitted'],\n 'start': '2018-01-01T00:00:00.000Z',\n 'end': '2018-01-01T12:00:00.000Z',\n 'label': {'Comment1': 'test1', 'Comment2': 'test2', 'Comment3': 'test3'},\n }\n\n expect_params = [\n {'status': 'Running'},\n {'status': 'Failed'},\n {'status': 'Submitted'},\n {'start': '2018-01-01T00:00:00.000Z'},\n {'end': '2018-01-01T12:00:00.000Z'},\n {'label': 'Comment1:test1'},\n {'label': 'Comment2:test2'},\n {'label': 'Comment3:test3'},\n ]\n six.assertCountEqual(\n self, CromwellAPI._compose_query_params(query_dict), expect_params\n )\n\n def test_compose_query_params_can_convert_bools_within_query_dicts(self):\n query_dict = {\n 'status': ['Running', 'Failed', 'Submitted'],\n 'start': '2018-01-01T00:00:00.000Z',\n 'end': '2018-01-01T12:00:00.000Z',\n 'label': {'Comment1': 'test1', 'Comment2': 'test2', 'Comment3': 'test3'},\n 'includeSubworkflows': True,\n }\n\n expect_params = [\n {'status': 'Running'},\n {'status': 'Failed'},\n {'status': 'Submitted'},\n {'start': '2018-01-01T00:00:00.000Z'},\n {'end': '2018-01-01T12:00:00.000Z'},\n {'label': 'Comment1:test1'},\n {'label': 'Comment2:test2'},\n {'label': 'Comment3:test3'},\n {'includeSubworkflows': 'true'},\n ]\n six.assertCountEqual(\n self, CromwellAPI._compose_query_params(query_dict), expect_params\n )\n\n def test_compose_query_params_raises_error_for_invalid_query_dict_that_has_multiple_values_for_exclusive_keys(\n self,\n ):\n query_dict = {\n 'status': ['Running', 'Failed', 'Submitted'],\n 'start': ['2018-01-01T00:00:00.000Z', '2018-01-02T00:00:00.000Z'],\n 'end': '2018-01-01T12:00:00.000Z',\n 'label': {'Comment1': 'test1', 'Comment2': 'test2', 'Comment3': 'test3'},\n }\n\n with self.assertRaises(ValueError):\n CromwellAPI._compose_query_params(query_dict)\n\n @requests_mock.mock()\n def test_path_labels_returns_200(self, mock_request):\n workflow_id = 'labeltest'\n new_label = {'foo': 'bar'}\n\n def _request_callback(request, context):\n context.status_code = 200\n context.headers['test'] = 'header'\n return {'id': request.url.split('/')[-2], 'labels': new_label}\n\n for cromwell_auth in self.auth_options:\n mock_request.patch(\n '{0}/api/workflows/v1/{1}/labels'.format(\n cromwell_auth.url, workflow_id\n ),\n json=_request_callback,\n )\n result = CromwellAPI.patch_labels(workflow_id, new_label, cromwell_auth)\n self.assertEqual(result.status_code, 200)\n self.assertEqual(result.json()['id'], workflow_id)\n self.assertEqual(result.json()['labels'], new_label)\n\n @requests_mock.mock()\n def test_release_onhold_returns_200(self, mock_request):\n workflow_id = '12345abcde'\n\n def _request_callback(request, context):\n context.status_code = 200\n context.headers['test'] = 'header'\n return {'id': request.url.split('/')[-2], 'status': 'Submitted'}\n\n for cromwell_auth in self.auth_options:\n mock_request.post(\n '{0}/api/workflows/v1/{1}/releaseHold'.format(\n cromwell_auth.url, workflow_id\n ),\n json=_request_callback,\n )\n result = CromwellAPI.release_hold(workflow_id, cromwell_auth)\n self.assertEqual(result.status_code, 200)\n self.assertEqual(result.json()['id'], workflow_id)\n self.assertEqual(result.json()['status'], 'Submitted')\n\n @requests_mock.mock()\n def test_release_workflow_that_is_not_on_hold_returns_error(self, mock_request):\n workflow_id = 'test'\n\n def _request_callback(request, context):\n context.status_code = 403\n context.headers['test'] = 'header'\n return {\n 'status': 'error',\n 'message': 'Couldn\\'t change status of workflow {} to \\'Submitted\\' because the workflow is not in '\n '\\'On Hold\\' state'.format(request.url.split('/')[-2]),\n }\n\n for cromwell_auth in self.auth_options:\n mock_request.post(\n '{0}/api/workflows/v1/{1}/releaseHold'.format(\n cromwell_auth.url, workflow_id\n ),\n json=_request_callback,\n )\n with self.assertRaises(requests.exceptions.HTTPError):\n CromwellAPI.release_hold(workflow_id, cromwell_auth).raise_for_status()\n\n @requests_mock.mock()\n def test_metadata_returns_200(self, mock_request):\n workflow_id = '12345abcde'\n test_include_key = 'workflow'\n\n def _request_callback(request, context):\n context.status_code = 200\n context.headers['test'] = 'header'\n return {'id': '12345abcde', 'actualWorkflowLanguageVersion': 'draft-2'}\n\n for cromwell_auth in self.auth_options:\n mock_request.get(\n '{0}/api/workflows/v1/{1}/metadata?expandSubWorkflows=false&includeKey={2}'.format(\n cromwell_auth.url, workflow_id, test_include_key\n ),\n json=_request_callback,\n )\n result = CromwellAPI.metadata(\n workflow_id, cromwell_auth, includeKey=test_include_key\n )\n self.assertEqual(result.status_code, 200)\n self.assertEqual(result.json()['id'], workflow_id)\n\n @requests_mock.mock()\n def test_health_returns_200(self, mock_request):\n expected = {\n \"DockerHub\": {\"ok\": \"true\"},\n \"Engine Database\": {\"ok\": \"true\"},\n \"PAPI\": {\"ok\": \"true\"},\n \"GCS\": {\"ok\": \"true\"},\n }\n\n def _request_callback(request, context):\n context.status_code = 200\n context.headers['test'] = 'header'\n return expected\n\n for cromwell_auth in self.auth_options:\n mock_request.get(\n '{0}/engine/v1/status'.format(cromwell_auth.url), json=_request_callback\n )\n result = CromwellAPI.health(cromwell_auth)\n self.assertEqual(result.status_code, 200)\n self.assertEqual(result.json(), expected)\n\n @requests_mock.mock()\n def test_abort(self, mock_request):\n workflow_id = \"01234\"\n expected = {\"id\": workflow_id, \"status\": \"Aborting\"}\n\n def _request_callback(request, context):\n context.status_code = 200\n context.headers['test'] = 'header'\n return expected\n\n for cromwell_auth in self.auth_options:\n mock_request.post(\n cromwell_auth.url + '/api/workflows/v1/{}/abort'.format(workflow_id),\n json=_request_callback,\n )\n result = CromwellAPI.abort(workflow_id, cromwell_auth)\n self.assertEqual(result.json(), expected)\n\n @requests_mock.mock()\n def test_status(self, mock_request):\n def _request_callback_status(request, context):\n context.status_code = 200\n context.headers['test'] = 'header'\n return {'status': 'Succeeded'}\n\n workflow_id = \"01234\"\n for cromwell_auth in self.auth_options:\n mock_request.get(\n cromwell_auth.url + '/api/workflows/v1/{}/status'.format(workflow_id),\n json=_request_callback_status,\n )\n result = CromwellAPI.status(workflow_id, cromwell_auth)\n self.assertEqual(result.json()['status'], 'Succeeded')\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"broadinstitute/cromwell-tools","sub_path":"cromwell_tools/tests/test_cromwell_api.py","file_name":"test_cromwell_api.py","file_ext":"py","file_size_in_byte":14189,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"81"} +{"seq_id":"71571362186","text":"\"\"\"\nLanding Screen\n\nPlayer will choose to create or join a game from here.\n\"\"\"\nimport asyncio\nimport nest_asyncio\nimport sys\nimport traceback\nimport typing as T\nimport websockets\n\nfrom PyQt5 import QtCore\nfrom PyQt5 import QtWidgets\nfrom PyQt5 import uic\nfrom qasync import asyncSlot\n\nfrom client.screens.battle_window import Ui as BattleWindow\nfrom client.screens.lobby_window import Ui as LobbyWindow\nfrom server.api.lobby import CreateGameResponse\nfrom utils.error_window import error_window\nfrom utils.server_config import ServerConfig\n\nif T.TYPE_CHECKING:\n from utils.client import AsynchronousServerClient\n from server.api.user import User\n\nnest_asyncio.apply()\n\n\nclass Ui(QtWidgets.QMainWindow):\n\n def __init__(self, client: \"AsynchronousServerClient\", server_config: ServerConfig):\n super(Ui, self).__init__()\n self.server_config = server_config\n self.client: \"AsynchronousServerClient\" = client\n uic.loadUi('client/qtassets/landingscreen.ui', self)\n\n # add hooks for UI buttons\n self.createMatch = self.findChild(QtWidgets.QPushButton, \"createMatch\")\n self.createMatch.clicked.connect(self.create_match_callback)\n self.joinMatch = self.findChild(QtWidgets.QPushButton, \"joinMatch\")\n self.joinMatch.clicked.connect(self.join_match_callback)\n self.configMenu = self.findChild(QtWidgets.QPushButton, \"configMenu\")\n self.configMenu.clicked.connect(self.config_callback)\n\n self._lobby_window = None\n self._battle_window = None\n\n self.show()\n\n def set_user(self, user: \"User\"):\n self.user = user\n\n @asyncSlot()\n async def create_match_callback(self):\n self.createMatch.setDisabled(True)\n self.createMatch.setText(\"Creating Match...\")\n try:\n game_response: CreateGameResponse = await self.client.create_game(self.user)\n print(f'Created game with id {game_response.game_id}')\n # join the game and open the lobby window\n join_response = await self.client.join_game(game_response.game_id, self.user)\n if not join_response.success:\n raise Exception(\"Failed to join game\")\n if self._lobby_window is not None:\n self._lobby_window.game_id = game_response.game_id\n else:\n self._lobby_window = LobbyWindow(\n parent=self,\n user=self.user,\n client=self.client,\n game_id=game_response.game_id\n )\n # start subscribing to state updates over pubsub\n self._lobby_window.start_pubsub_subscription()\n print(self.server_config.pubsub_path)\n self._lobby_window.pubsub_client.start_client(self.server_config.pubsub_path)\n # do not transfer windows until pubsub connection established\n await self._lobby_window.pubsub_client.wait_until_ready()\n self._lobby_window.show()\n self.hide()\n except Exception as exc:\n error_window(str(exc))\n finally:\n self.createMatch.setDisabled(False)\n self.createMatch.setText(\"Create Match\")\n\n async def open_battle_window(self):\n # always instantiate a new window\n # TODO: garbage collect the old window...\n # try to get a game ID\n game_id = self._lobby_window.game_id\n\n # open a WebSocket\n try:\n ws_addr = self.server_config.websocket_path\n print(f'Opening WebSocket connection to {ws_addr}')\n ws = await websockets.connect(ws_addr)\n except Exception as exc:\n error_window(f'Unable to connect to the game: {repr(exc)}')\n raise\n\n try:\n print('Init Battle Window')\n self._battle_window = BattleWindow(user=self.user, client=self.client, game_id=game_id, websocket=ws)\n print('show Battle Window')\n self._battle_window.show()\n print('Subscribe States')\n self._battle_window.subscribe_pubsub_state()\n self._battle_window.subscribe_pubsub_messages()\n print('Start client')\n self._battle_window.pubsub_client.start_client(self.server_config.pubsub_path)\n await self._battle_window.pubsub_client.wait_until_ready()\n self.hide()\n print('done')\n except Exception as exc:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n traceback.print_tb(exc_traceback)\n print(f'Failed to open battle window: {repr(exc)}')\n print('Running forever')\n await self._battle_window.pubsub_client.wait_until_done()\n\n @asyncSlot()\n async def join_match_callback(self):\n # TODO: add a lobby-join screen instead of just taking first game found\n\n # try joining the first game\n try:\n self.joinMatch.setDisabled(True)\n self.joinMatch.setText(\"Joining Match...\")\n\n games = await self.client.get_joinable_games()\n if not games:\n error_window(\"No valid games. Try creating one!\")\n return\n\n resp = await self.client.join_game(games[0], self.user)\n if resp.success:\n # create a lobby window with the START GAME button disabled\n self._lobby_window = LobbyWindow(self, self.user, self.client, game_id=games[0])\n # start subscribing to state updates over pubsub\n self._lobby_window.start_pubsub_subscription()\n print(self.server_config.pubsub_path)\n self._lobby_window.pubsub_client.start_client(self.server_config.pubsub_path)\n await self._lobby_window.pubsub_client.wait_until_ready()\n self._lobby_window.show()\n self.hide()\n except Exception as exc:\n error_window(f'Failed to join game: {repr(exc)}')\n finally:\n self.joinMatch.setDisabled(False)\n self.joinMatch.setText(\"Join Match\")\n\n @asyncSlot()\n async def config_callback(self):\n print('Opening config window')\n","repo_name":"chillymango/autobattler_demo","sub_path":"client/screens/landing_window.py","file_name":"landing_window.py","file_ext":"py","file_size_in_byte":6150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4891794080","text":"import pandas as pd\n\nfrom scripts.adapters.pandas import Pandas\n\nstorage_options = {\n \"key\": \"minio\",\n \"secret\": \"minio123\",\n \"client_kwargs\": {\"endpoint_url\": \"http://minio:9000\"},\n}\n\n\ndef test_get_classes():\n\n df_expected = pd.DataFrame(\n {\n \"product_id\": [\"1\", \"2\", \"3\", \"4\", \"5\"],\n \"title\": [\"maça\", \"chuchu\", \"agrião\", \"pera\", \"tomate\"],\n \"concatenated_tags\": [\n \"red gala fuji verde\",\n \"verde mucho maduro\",\n \"amargo verde escuro\",\n \"amarela doce macia\",\n \"vermelho grande cereja\",\n ],\n \"category\": [\"fruta\", \"legume\", \"verdura\", \"fruta\", \"fruta\"],\n }\n )\n # print(df_expected)\n # print(\"carammmaabaaaaaaS\")\n path = \"s3://data/\" + \"test_integration_2.csv\"\n df_expected.to_csv(path, storage_options=storage_options)\n\n # pandas = Pandas(storage_options=storage_options)\n\n # df_received = pandas.get_data(\n # base_path=\"s3://data/\", file_name=\"test_integration_2.csv\"\n # )\n\n df_sent = Pandas._read_data_from_s3(storage_options=storage_options, filepath=path)\n\n print(df_sent)\n categories = Pandas.get_classes(storage_options=storage_options, filepath=path)\n\n print(categories)\n\n assert len(categories) == 3\n assert \"fruta\" in categories\n assert \"verdura\" in categories\n assert \"legume\" in categories\n\n\ndef test_get_data():\n\n df_expected = pd.DataFrame(\n {\n \"product_id\": [\"1\", \"2\", \"3\", \"4\", \"5\"],\n \"title\": [\"maça\", \"chuchu\", \"agrião\", \"pera\", \"tomate\"],\n \"concatenated_tags\": [\n \"red gala fuji verde\",\n \"verde mucho maduro\",\n \"amargo verde escuro\",\n \"amarela doce macia\",\n \"vermelho grande cereja\",\n ],\n \"category\": [\"fruta\", \"legume\", \"verdura\", \"fruta\", \"fruta\"],\n }\n )\n\n path = \"s3://data/\" + \"test_integration_3.csv\"\n df_expected.to_csv(path, storage_options=storage_options)\n\n result = Pandas._get_data(storage_options=storage_options, filepath=path)\n\n assert len(result.columns) == 5\n assert \"fruta\" in result.columns\n assert \"verdura\" in result.columns\n assert \"legume\" in result.columns\n","repo_name":"Fernando-Freire/Machine-Learning-Project-Template","sub_path":"scripts/tests/integration/adapters/test_pandas.py","file_name":"test_pandas.py","file_ext":"py","file_size_in_byte":2285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42377554705","text":"import os\nimport json\nfrom datetime import datetime, timedelta\nfrom analysisfacebook.collect import api\nRESULT_DIRECTORY = '__results__/crawling'\ndef preprocess_post(post):\n #공유수\n if 'shares' not in post:\n post['count_shares'] = 0\n else:\n post['count_shares'] = post['shares']['count']\n #전체 코멘트 수\n if 'comments' not in post:\n post['count_comments'] = 0\n else:\n post['count_comments'] = post['comments']['summary']['total_count']\n\n #KST = UTC +9\n kst = datetime.strptime(post['created_time'], '%Y-%m-%d%H:%S+0000' )\n kst = kst + timedelta(hours=9)\n post['created_time']=kst.strtime('$Y-%m-%d $HL%M:%S')\n\ndef crawling(pagename, since, until) :\n results = []\n filename = '%s/%s_%s_%s.json' % (RESULT_DIRECTORY, pagename, since, until)\n\n for posts in api.pd_fetch_posts(pagename, since, until):\n for post in posts:\n preprocess_post(post)\n\n results += posts\n\n\n #save results to file (저장, 적재)\n with open(filename, 'w', encoding='utf-8' ) as outfile:\n json_string = json.dumps(results, indent=4, sort_keys = True, ensure_ascii=False)\n outfile.write(json_string)\n\nif os.path.exists(RESULT_DIRECTORY) is False:\n os.makedirs(RESULT_DIRECTORY)\n\n","repo_name":"chokwanglae/PycharmProjects","sub_path":"analysisfacebook/collect/crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36460294378","text":"import pytest\r\n\r\nfrom network import Node, CommunicationNetwork, InvalidNetworkException\r\nfrom person import Person\r\nfrom messaging import Key\r\nfrom registry import Registry, RegistryException\r\n\r\n\r\ndef test_registry():\r\n \"\"\"\r\n Create a valid network, then remove a node that makes it invalid, expect an InvalidNetworkException\r\n :return:\r\n \"\"\"\r\n reg = Registry()\r\n \r\n alice = Person(\"alice\", Key(\"this is a simple text to train create the key\"))\r\n bob = Person(\r\n \"bob\",\r\n Key(\"this is another text. this is another text. this is another text. bob\"),\r\n )\r\n\r\n reg.insert(bob._id, 1, \"saldmsla\")\r\n\r\n assert reg.is_connected(bob._id)\r\n\r\n reg.delete(bob._id)\r\n\r\n assert reg.is_connected(bob._id) == False\r\n\r\n assert reg.get_serialized_key(alice._id) is None\r\n\r\n assert reg.get_node_id(alice._key) is None\r\n \r\n reg.insert(bob._id, 1, \"saldmsla\")\r\n \r\n with pytest.raises(RegistryException):\r\n reg.insert(bob._id, 1, \"saldmsla\")\r\n\r\n","repo_name":"AliKhanat88/CommunicationNetwork","sub_path":"tests/test_registry.py","file_name":"test_registry.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10038643625","text":"# Nearest resize 함수 완성하기\n\nimport numpy as np, cv2\n\n\ndef my_resize(logo, dsize, fx=0.0, fy=0.0):\n originY = logo.shape[0]\n originX = logo.shape[1]\n\n # tuple에서 list로 변환\n dsize = list(dsize)\n # 둘 중에 하나라도 0이 아니라면\n if dsize[0] * dsize[1] != 0:\n # X, Y 값이기 때문에 Y, X로 변환해서 행렬에서 사용\n dsize[0], dsize[1] = dsize[1], dsize[0]\n else:\n # 둘 다 0이라면 원래 값에서 해당 비율 늘리거나 줄이기\n dsize[0] = round(originY * fy)\n dsize[1] = round(originX * fx)\n # 3차원\n dsize.append(3)\n dsize = tuple(dsize)\n logoTemp = np.zeros(dsize, image.dtype)\n\n for h in range(0, logoTemp.shape[0]):\n for w in range(0, logoTemp.shape[1]):\n # 각 좌표에 상대적으로 대응되는 좌표로 b g r값 넣어주기\n logoTemp[h][w][0] = logo[h * originY // dsize[0]][w * originX // dsize[1]][0]\n logoTemp[h][w][1] = logo[h * originY // dsize[0]][w * originX // dsize[1]][1]\n logoTemp[h][w][2] = logo[h * originY // dsize[0]][w * originX // dsize[1]][2]\n\n logo = logoTemp\n return logo\n\n\nimage = cv2.imread(\"images/write_test.jpg\", cv2.IMREAD_COLOR)\nif image is None: raise Exception(\"영상파일 읽기 오류\")\n\nX = 0\nY = 0\nFx = 0.0\nFy = 0.0\n\nwhile True:\n X = int(input())\n Y = int(input())\n Fx = float(input())\n Fy = float(input())\n\n # 하나라도 음수가 입력되면 종료\n if X < 0 or Y < 0 or Fx < 0 or Fy < 0: break\n \n img2 = cv2.resize(image, (X, Y), fx=Fx, fy=Fy, interpolation=cv2.INTER_NEAREST)\n img3 = my_resize(image, (X, Y), fx=Fx, fy=Fy)\n\n cv2.imshow(\"opencvresize\", img2)\n cv2.imshow(\"userresize\", img3)\n\n cv2.waitKey(0)\n","repo_name":"IndiaInk10/PatternRecognition","sub_path":"midterm/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42680913991","text":"\"\"\"\r\nModified version of plt_spec\r\n\"\"\"\r\n\r\nexec(open('utils_song.py').read())\r\n\r\nextract_method = 'fox'\r\ndir_spec = '/Users/Julian/Desktop/UofT/Internship/M2FS_Binary/20190809/Reduce/r-side'\r\ndir_fig = '/Users/Julian/Desktop/UofT/Internship/M2FS_Binary/fig/20190809/'\r\nstd_obs = np.array(['r0051'])\r\n\r\ndatasets1 = []\r\ndatasets2 = []\r\n\r\nfname_all = []\r\nfor file in os.listdir(dir_spec):\r\n if file.endswith('%s_specs.fits'%extract_method):\r\n fname_all.append(file)\r\nfname_all.sort()\r\nN_all = len(fname_all)\r\n\r\nfor i_file in range(N_all):\r\n #i_file = 0\r\n shoe_temp = fname_all[i_file][0]\r\n file_temp = fname_all[i_file][0:5]\r\n fnum_temp = fname_all[i_file][1:5]\r\n dir_fig_temp = '%s/%s_cr'%(dir_fig,fnum_temp)\r\n\r\n if np.sum(std_obs==file_temp)!=1: continue\r\n\r\n print(\"Plotting %s ...\"%file_temp)\r\n\r\n if os.path.isdir(dir_fig_temp)==False:\r\n os.mkdir(dir_fig_temp)\r\n\r\n data_temp = fits.open('%s/%s'%(dir_spec,fname_all[i_file]))[0].data\r\n\r\n for i_fiber in range(64):\r\n #i_fiber = 0\r\n print(i_fiber)\r\n mask_o1 = data_temp[0,:,0,i_fiber]>0.\r\n mask_o2 = data_temp[0,:,1,i_fiber]>0.\r\n \r\n datasets1.append([data_temp[0,:,0,i_fiber][mask_o1], data_temp[1,:,0,i_fiber][mask_o1]])\r\n datasets2.append([data_temp[0,:,1,i_fiber][mask_o2], data_temp[1,:,1,i_fiber][mask_o2]])\r\n \r\n ####\r\n fig = plt.figure(0, figsize=(24,16))\r\n fig.clf()\r\n\r\n ax = fig.add_subplot(211)\r\n #ax.set_xlim([8350,8800])\r\n ax.set_xlim([8360,8590])\r\n #ax.set_ylim([6,-1])\r\n ax.plot(data_temp[0,:,0,i_fiber][mask_o1], data_temp[1,:,0,i_fiber][mask_o1], 'r-', label='%s-%s%02d-O1'%(file_temp,shoe_temp,i_fiber+1))\r\n #ax.plot(data_temp[0,:,0,i_fiber][mask_o1], data_temp[2,:,0,i_fiber][mask_o1], 'k-', alpha=0.5) #, label='%s-%s%02d-O1'%(file_temp,shoe_temp,i_fiber+1))\r\n ax.legend(loc='upper left')\r\n ax.set_ylabel(r'Counts', fontsize=22)\r\n\r\n ax = fig.add_subplot(212)\r\n ax.set_xlim([8560,8790])\r\n ax.plot(data_temp[0,:,1,i_fiber][mask_o2], data_temp[1,:,1,i_fiber][mask_o2], 'b-', label='%s-%s%02d-O2'%(file_temp,shoe_temp,i_fiber+1))\r\n #ax.plot(data_temp[0,:,1,i_fiber][mask_o2], data_temp[2,:,1,i_fiber][mask_o2], 'k-', alpha=0.5) #, label='%s-%s%02d-O2'%(file_temp,shoe_temp,i_fiber+1))\r\n ax.legend(loc='upper left')\r\n ax.set_xlabel(r'$\\lambda\\ {\\rm (\\AA)}$', fontsize=22)\r\n ax.set_ylabel(r'Counts', fontsize=22)\r\n\r\n fig.set_tight_layout(True)\r\n #fig.show() # Not working\r\n fig.savefig('%s/%s_%02d_br_%s.pdf'%(dir_fig_temp,file_temp,i_fiber+1,extract_method), format='pdf', transparent=True)\r\n \r\n print(\"Done.\")\r\n \r\n#%%\r\n\"\"\"\r\nSave data\r\n\"\"\"\r\ndatasets1, datasets2 = np.array(datasets1), np.array(datasets2)\r\nfor obs in std_obs:\r\n np.save('/Users/Julian/Desktop/UofT/Internship/M2FS_Binary/m2fs_code/m2fs_plt/numpy_data/{}_data1'.format(obs), datasets1)\r\n np.save('/Users/Julian/Desktop/UofT/Internship/M2FS_Binary/m2fs_code/m2fs_plt/numpy_data/{}_data2'.format(obs), datasets2)","repo_name":"jjm339/Emission-Line-Analysis","sub_path":"plt_spec_el.py","file_name":"plt_spec_el.py","file_ext":"py","file_size_in_byte":3108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33717465465","text":"'''\nreal time road segmentation\n@author Xiangwei Wang wangxiangwei.cpp@gmail.com\n'''\nimport sys\nimport numpy as np\nimport cv2\nimport param\nfrom road_segmentation import RoadSegmentor\ndef main():\n # load image\n images_path = sys.argv[1]\n images = open(images_path)\n image_name = images.readline() # first line is not pointing to a image, so we read and skip it\n image_names = images.read().split('\\n')\n \n poses_m = np.loadtxt(sys.argv[2])\n image_id = 0\n begin_id = 0\n plane = [0,1,0,1.7]\n intrinsic = np.array([[719,0,607],[0,719,185],[0,0,1]])\n road_segmentor = RoadSegmentor(plane[0:3],plane[3],10,intrinsic)\n for image_name in image_names:\n if image_id None:\n \"\"\"\n Do not return anything, modify nums in-place instead.\n \"\"\"\n left = 0\n right = len(nums) - 1\n index = 0\n while index <= right and left < right:\n if nums[index] == 2:\n if nums[right] != 2:\n nums[index], nums[right] = nums[right], nums[index]\n right -= 1\n elif nums[index] == 0:\n if nums[left] != 0:\n nums[left], nums[index] = nums[index], nums[left]\n left += 1\n if left > index:\n index = left\n else:\n index += 1\n\n\ns = Solution()\nl = [0,2,1,1,0]\ns.sortColors(l)\nprint(l)\n","repo_name":"furutuki/LeetCodeSolution","sub_path":"0075. Sort Colors/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14673613159","text":"#!/usr/bim/python\n# -*- coding:utf-8 -*-\nimport numpy as np\nimport cv2 as cv\n\n##################### 第四部分:CV中的图像处理 ##################### \n# 颜色空间转换\n# 本小节目标\n# 学习如何进行图像颜色空间转换,比如:BGR->灰度图,BGR->HSV\n# 从特定图片展提取特定颜色物体\n# 本节函数:cv2.cvtColor(),cv.inRange()等\n\n# 使用hsv\ndef changeColorSpace():\n\timg = cv.imread('./cv_source/start.png')\n\timg_hsv = cv.cvtColor(img,cv.COLOR_BGR2HSV)\n\tcv.namedWindow('image')\n\tcv.imshow('image',img_hsv)\n\tcv.waitKey(0)\n\tcv.destroyAllWindows()\n\n# 函数:cv.inrange()。类似于threshold函数,实现二值化功能\n# 函数原型:void inRange(InputArray src, InputArray lowerb,InputArray upperb, OutputArray dst);\n# 参数1:输入要处理的图像,可以为单通道或多通道。\n# 参数2:包含下边界的数组或标量。\n# 参数3:包含上边界数组或标量。\n# 参数4:输出图像,与输入图像src 尺寸相同且为CV_8U 类型\n# 理解:类似于区间,某个像素数值在传入的参数区间内,变为255,之外变为0\ndef inrangeTest():\n\timg = cv.imread('./cv_source/IMG_4266.JPG')\n\timg_hsv = cv.cvtColor(img,cv.COLOR_BGR2HSV)\n\n\t# 参数是:BGR\n\tuper_hsv = np.array([180,200,200])\n\tlower_hsv = np.array([24,39,42])\n\tmask = cv.inRange(img_hsv,lower_hsv,uper_hsv)\n\t\n\t# 做与运算\n\tres = cv.bitwise_and(img,img,mask=mask)\n\n\tcv.namedWindow('image')\n\tcv.imshow('image',res)\n\tcv.imwrite('./cv_source/new_mask.JPG',img_hsv)\n\tcv.imwrite('./cv_source/new_IMG_4266.JPG',res)\n\tcv.waitKey(0)\n\tcv.destroyAllWindows()\n\n# 提取红黄蓝三种颜色的物体\ndef getBGRColor():\n\timg = cv.imread('./cv_source/opencv.jpeg')\n\timg_hsv = cv.cvtColor(img,cv.COLOR_BGR2HSV)\n\n\t# 蓝色B范围\n\tlower_bule = np.array([110,100,100])\n\tupper_blue = np.array([130,255,255])\n\tmask_b = cv.inRange(img_hsv,lower_bule,upper_blue)\n\tres_b = cv.bitwise_and(img,img,mask=mask_b)\n\n\t# 红色R范围\n\tlower_red = np.array([0,100,100])\n\tupper_red = np.array([10,255,255])\n\tmask_r = cv.inRange(img_hsv,lower_red,upper_red)\n\tres_r = cv.bitwise_and(img,img,mask=mask_r)\n\n\t# 绿色G范围\n\tlower_green = np.array([60,100,100])\n\tupper_green = np.array([70,255,255])\n\tmask_g = cv.inRange(img_hsv,lower_green,upper_green)\n\tres_g = cv.bitwise_and(img,img,mask=mask_g)\n\n\tres = res_b+res_g+res_r\n\n\tcv.namedWindow('image')\n\tcv.imshow('image',res)\n\tcv.imwrite('./cv_source/new_mask.JPG',img_hsv)\n\tcv.imwrite('./cv_source/new_IMG_4266.JPG',res)\n\tcv.waitKey(0)\n\tcv.destroyAllWindows()\n\n\ndef getBlue():\n\tcap = cv.VideoCapture(0)\n\twhile True:\n\t\tret,frame = cap.read()\n\t\t\n\t\t# 转换hsv\n\t\thsv = cv.cvtColor(frame,cv.COLOR_BGR2HSV)\n\n\t\t# 设定蓝色阈值\n\t\tlower_bule = np.array([100,50,50])\n\t\tupper_blue = np.array([130,255,255])\n\n\t\t# 根据阈值构建蒙板\n\t\tmask = cv.inRange(hsv,lower_bule,upper_blue)\n\t\tres = cv.bitwise_and(frame,frame,mask=mask)\n\n\t\t# 显示图像\n\t\tcv.imshow('frame',frame)\n\t\tcv.imshow('mask',mask)\n\t\tcv.imshow('res',res)\n\t\tk = cv.waitKey(5) & 0xFF\n\t\tif k == 27:\n\t\t\tbreak\n\tcv.destroyAllWindows()\n\ndef getHSV(colorSpace):\n\tcolor = np.uint8([[colorSpace]])\n\treturn cv.cvtColor(color,cv.COLOR_BGR2HSV)[0][0]\n\nif __name__ == '__main__':\n\t# 提取红黄蓝三种颜色\n\tgetBlue()\n\t\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"simplismvip/opencv_test","sub_path":"cv13_ColorSpace.py","file_name":"cv13_ColorSpace.py","file_ext":"py","file_size_in_byte":3262,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4880014480","text":"class Utils:\n @staticmethod\n def tokenize_query(examples, **fn_kwargs):\n tokenizer = fn_kwargs[\"tokenizer\"]\n return tokenizer(examples['queryText'], padding=True)\n\n @staticmethod\n def tokenize_ad(examples, **fn_kwargs):\n package_to_id = fn_kwargs[\"package_to_id\"]\n examples[\"package_ids\"] = [package_to_id[examples[\"packageName\"]]]\n return examples\n","repo_name":"mohsenfayyaz/sponsored-search","sub_path":"src/Utils.py","file_name":"Utils.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31036600706","text":"\nDEP = [\"Sindarin.Fundament\", \"Sindarin.Compiler\", \"Sindarin.Architecture\", \"Sindarin.EPR\"]\n\nWORKSPACE_DIR = \"../..\"\n\nimport sys, os.path\n\nhere = os.path.dirname(__file__)\n\nsys.path.append(here)\n\nfor proj in DEP:\n sys.path.append(os.path.join(here, WORKSPACE_DIR, proj, \"src\"))\n\nfor ext in [\"ext/lib\", \"ext/lib/build\"]:\n sys.path.append(os.path.join(here, WORKSPACE_DIR, ext))\n\n# Set default encoding to UTF-8 for __repr__\n# @@@ this is horrible!\nif os.name == 'posix':\n reload(sys); sys.setdefaultencoding(\"utf-8\") # @UndefinedVariable\n\nimport uniconsole # @UnusedImport\n\n\n\nif __name__ == '__main__':\n from filesystem.python.linker import Linker\n import ply, z3\n Linker(here, WORKSPACE_DIR, DEP)\\\n .with_modules([ply, os.path.dirname(z3.__file__)])\\\n .with_filters([lambda x: os.path.basename(x) not in [\"United.zip\", \".gitignore\", \".git\", \"benchmarks.shelf\", \"paper\", \"spanning\", \"learning\", \"obsolete\"]])\\\n .with_copy_to_root([\"benchmarks\", \"README.md\", \"LICENSE.txt\", \"paper/cav15/cav2015_submission_25.pdf\"])\\\n .with_symlink_to_root([\"src/epr_pdr.py\", \"src/report.py\",\n \"reruns-short.sh\", \"reruns-long.sh\", \"reruns-timeout.sh\", \"reruns-other.sh\"])\\\n .zip(toplevel_dir='univ-pdr')\n","repo_name":"lenadank/verification_project","sub_path":"Sindarin.PDR-Universal/src/linker.py","file_name":"linker.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"37156722012","text":"import os\nimport torch\nimport numpy as np\nimport warnings\nimport time\nfrom tqdm.auto import tqdm\nfrom PIL import Image\nfrom movie_util import MovieSaver\nfrom typing import List, Optional\nimport lpips\nfrom utils import interpolate_spherical, interpolate_linear, add_frames_linear_interp, yml_load, yml_save\nwarnings.filterwarnings('ignore')\ntorch.backends.cudnn.benchmark = False\ntorch.set_grad_enabled(False)\n\n\nclass LatentBlending():\n def __init__(\n self,\n dh: None,\n guidance_scale: float = 4,\n guidance_scale_mid_damper: float = 0.5,\n mid_compression_scaler: float = 1.2):\n r\"\"\"\n Initializes the latent blending class.\n Args:\n guidance_scale: float\n Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).\n `guidance_scale` is defined as `w` of equation 2. of [Imagen\n Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >\n 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,\n usually at the expense of lower image quality.\n guidance_scale_mid_damper: float = 0.5\n Reduces the guidance scale towards the middle of the transition.\n A value of 0.5 would decrease the guidance_scale towards the middle linearly by 0.5.\n mid_compression_scaler: float = 2.0\n Increases the sampling density in the middle (where most changes happen). Higher value\n imply more values in the middle. However the inflection point can occur outside the middle,\n thus high values can give rough transitions. Values around 2 should be fine.\n \"\"\"\n assert guidance_scale_mid_damper > 0 \\\n and guidance_scale_mid_damper <= 1.0, \\\n f\"guidance_scale_mid_damper neees to be in interval (0,1], you provided {guidance_scale_mid_damper}\"\n\n self.dh = dh\n self.device = self.dh.device\n self.set_dimensions()\n\n self.guidance_scale_mid_damper = guidance_scale_mid_damper\n self.mid_compression_scaler = mid_compression_scaler\n self.seed1 = 0\n self.seed2 = 0\n\n # Initialize vars\n self.prompt1 = \"\"\n self.prompt2 = \"\"\n\n self.tree_latents = [None, None]\n self.tree_fracts = None\n self.idx_injection = []\n self.tree_status = None\n self.tree_final_imgs = []\n\n self.list_nmb_branches_prev = []\n self.list_injection_idx_prev = []\n self.text_embedding1 = None\n self.text_embedding2 = None\n self.image1_lowres = None\n self.image2_lowres = None\n self.negative_prompt = None\n self.num_inference_steps = self.dh.num_inference_steps\n self.noise_level_upscaling = 20\n self.list_injection_idx = None\n self.list_nmb_branches = None\n\n # Mixing parameters\n self.branch1_crossfeed_power = 0.3\n self.branch1_crossfeed_range = 0.3\n self.branch1_crossfeed_decay = 0.99\n\n self.parental_crossfeed_power = 0.3\n self.parental_crossfeed_range = 0.6\n self.parental_crossfeed_power_decay = 0.9\n\n self.set_guidance_scale(guidance_scale)\n self.multi_transition_img_first = None\n self.multi_transition_img_last = None\n self.dt_per_diff = 0\n self.spatial_mask = None\n self.lpips = lpips.LPIPS(net='alex').cuda(self.device)\n\n self.set_prompt1(\"\")\n self.set_prompt2(\"\")\n\n def set_dimensions(self, size_output=None):\n r\"\"\"\n sets the size of the output video.\n Args:\n size_output: tuple\n width x height\n Note: the size will get automatically adjusted to be divisable by 32.\n \"\"\"\n self.dh.set_dimensions(size_output)\n\n def set_guidance_scale(self, guidance_scale):\n r\"\"\"\n sets the guidance scale.\n \"\"\"\n self.guidance_scale_base = guidance_scale\n self.guidance_scale = guidance_scale\n self.dh.guidance_scale = guidance_scale\n\n def set_negative_prompt(self, negative_prompt):\n r\"\"\"Set the negative prompt. Currenty only one negative prompt is supported\n \"\"\"\n self.negative_prompt = negative_prompt\n self.dh.set_negative_prompt(negative_prompt)\n\n def set_guidance_mid_dampening(self, fract_mixing):\n r\"\"\"\n Tunes the guidance scale down as a linear function of fract_mixing,\n towards 0.5 the minimum will be reached.\n \"\"\"\n mid_factor = 1 - np.abs(fract_mixing - 0.5) / 0.5\n max_guidance_reduction = self.guidance_scale_base * (1 - self.guidance_scale_mid_damper) - 1\n guidance_scale_effective = self.guidance_scale_base - max_guidance_reduction * mid_factor\n self.guidance_scale = guidance_scale_effective\n self.dh.guidance_scale = guidance_scale_effective\n\n def set_branch1_crossfeed(self, crossfeed_power, crossfeed_range, crossfeed_decay):\n r\"\"\"\n Sets the crossfeed parameters for the first branch to the last branch.\n Args:\n crossfeed_power: float [0,1]\n Controls the level of cross-feeding between the first and last image branch.\n crossfeed_range: float [0,1]\n Sets the duration of active crossfeed during development.\n crossfeed_decay: float [0,1]\n Sets decay for branch1_crossfeed_power. Lower values make the decay stronger across the range.\n \"\"\"\n self.branch1_crossfeed_power = np.clip(crossfeed_power, 0, 1)\n self.branch1_crossfeed_range = np.clip(crossfeed_range, 0, 1)\n self.branch1_crossfeed_decay = np.clip(crossfeed_decay, 0, 1)\n\n def set_parental_crossfeed(self, crossfeed_power, crossfeed_range, crossfeed_decay):\n r\"\"\"\n Sets the crossfeed parameters for all transition images (within the first and last branch).\n Args:\n crossfeed_power: float [0,1]\n Controls the level of cross-feeding from the parental branches\n crossfeed_range: float [0,1]\n Sets the duration of active crossfeed during development.\n crossfeed_decay: float [0,1]\n Sets decay for branch1_crossfeed_power. Lower values make the decay stronger across the range.\n \"\"\"\n self.parental_crossfeed_power = np.clip(crossfeed_power, 0, 1)\n self.parental_crossfeed_range = np.clip(crossfeed_range, 0, 1)\n self.parental_crossfeed_power_decay = np.clip(crossfeed_decay, 0, 1)\n\n def set_prompt1(self, prompt: str):\n r\"\"\"\n Sets the first prompt (for the first keyframe) including text embeddings.\n Args:\n prompt: str\n ABC trending on artstation painted by Greg Rutkowski\n \"\"\"\n prompt = prompt.replace(\"_\", \" \")\n self.prompt1 = prompt\n self.text_embedding1 = self.get_text_embeddings(self.prompt1)\n\n def set_prompt2(self, prompt: str):\n r\"\"\"\n Sets the second prompt (for the second keyframe) including text embeddings.\n Args:\n prompt: str\n XYZ trending on artstation painted by Greg Rutkowski\n \"\"\"\n prompt = prompt.replace(\"_\", \" \")\n self.prompt2 = prompt\n self.text_embedding2 = self.get_text_embeddings(self.prompt2)\n\n def set_image1(self, image: Image):\n r\"\"\"\n Sets the first image (keyframe), relevant for the upscaling model transitions.\n Args:\n image: Image\n \"\"\"\n self.image1_lowres = image\n\n def set_image2(self, image: Image):\n r\"\"\"\n Sets the second image (keyframe), relevant for the upscaling model transitions.\n Args:\n image: Image\n \"\"\"\n self.image2_lowres = image\n\n def run_transition(\n self,\n recycle_img1: Optional[bool] = False,\n recycle_img2: Optional[bool] = False,\n num_inference_steps: Optional[int] = 30,\n depth_strength: Optional[float] = 0.3,\n t_compute_max_allowed: Optional[float] = None,\n nmb_max_branches: Optional[int] = None,\n fixed_seeds: Optional[List[int]] = None):\n r\"\"\"\n Function for computing transitions.\n Returns a list of transition images using spherical latent blending.\n Args:\n recycle_img1: Optional[bool]:\n Don't recompute the latents for the first keyframe (purely prompt1). Saves compute.\n recycle_img2: Optional[bool]:\n Don't recompute the latents for the second keyframe (purely prompt2). Saves compute.\n num_inference_steps:\n Number of diffusion steps. Higher values will take more compute time.\n depth_strength:\n Determines how deep the first injection will happen.\n Deeper injections will cause (unwanted) formation of new structures,\n more shallow values will go into alpha-blendy land.\n t_compute_max_allowed:\n Either provide t_compute_max_allowed or nmb_max_branches.\n The maximum time allowed for computation. Higher values give better results but take longer.\n nmb_max_branches: int\n Either provide t_compute_max_allowed or nmb_max_branches. The maximum number of branches to be computed. Higher values give better\n results. Use this if you want to have controllable results independent\n of your computer.\n fixed_seeds: Optional[List[int)]:\n You can supply two seeds that are used for the first and second keyframe (prompt1 and prompt2).\n Otherwise random seeds will be taken.\n \"\"\"\n\n # Sanity checks first\n assert self.text_embedding1 is not None, 'Set the first text embedding with .set_prompt1(...) before'\n assert self.text_embedding2 is not None, 'Set the second text embedding with .set_prompt2(...) before'\n\n # Random seeds\n if fixed_seeds is not None:\n if fixed_seeds == 'randomize':\n fixed_seeds = list(np.random.randint(0, 1000000, 2).astype(np.int32))\n else:\n assert len(fixed_seeds) == 2, \"Supply a list with len = 2\"\n\n self.seed1 = fixed_seeds[0]\n self.seed2 = fixed_seeds[1]\n\n # Ensure correct num_inference_steps in holder\n self.num_inference_steps = num_inference_steps\n self.dh.set_num_inference_steps(num_inference_steps)\n\n # Compute / Recycle first image\n if not recycle_img1 or len(self.tree_latents[0]) != self.num_inference_steps:\n list_latents1 = self.compute_latents1()\n else:\n list_latents1 = self.tree_latents[0]\n\n # Compute / Recycle first image\n if not recycle_img2 or len(self.tree_latents[-1]) != self.num_inference_steps:\n list_latents2 = self.compute_latents2()\n else:\n list_latents2 = self.tree_latents[-1]\n\n # Reset the tree, injecting the edge latents1/2 we just generated/recycled\n self.tree_latents = [list_latents1, list_latents2]\n self.tree_fracts = [0.0, 1.0]\n self.tree_final_imgs = [self.dh.latent2image((self.tree_latents[0][-1])), self.dh.latent2image((self.tree_latents[-1][-1]))]\n self.tree_idx_injection = [0, 0]\n\n # Hard-fix. Apply spatial mask only for list_latents2 but not for transition. WIP...\n self.spatial_mask = None\n\n # Set up branching scheme (dependent on provided compute time)\n list_idx_injection, list_nmb_stems = self.get_time_based_branching(depth_strength, t_compute_max_allowed, nmb_max_branches)\n\n # Run iteratively, starting with the longest trajectory.\n # Always inserting new branches where they are needed most according to image similarity\n for s_idx in tqdm(range(len(list_idx_injection))):\n nmb_stems = list_nmb_stems[s_idx]\n idx_injection = list_idx_injection[s_idx]\n\n for i in range(nmb_stems):\n fract_mixing, b_parent1, b_parent2 = self.get_mixing_parameters(idx_injection)\n self.set_guidance_mid_dampening(fract_mixing)\n list_latents = self.compute_latents_mix(fract_mixing, b_parent1, b_parent2, idx_injection)\n self.insert_into_tree(fract_mixing, idx_injection, list_latents)\n # print(f\"fract_mixing: {fract_mixing} idx_injection {idx_injection}\")\n\n return self.tree_final_imgs\n\n def compute_latents1(self, return_image=False):\n r\"\"\"\n Runs a diffusion trajectory for the first image\n Args:\n return_image: bool\n whether to return an image or the list of latents\n \"\"\"\n print(\"starting compute_latents1\")\n list_conditionings = self.get_mixed_conditioning(0)\n t0 = time.time()\n latents_start = self.get_noise(self.seed1)\n list_latents1 = self.run_diffusion(\n list_conditionings,\n latents_start=latents_start,\n idx_start=0)\n t1 = time.time()\n self.dt_per_diff = (t1 - t0) / self.num_inference_steps\n self.tree_latents[0] = list_latents1\n if return_image:\n return self.dh.latent2image(list_latents1[-1])\n else:\n return list_latents1\n\n def compute_latents2(self, return_image=False):\n r\"\"\"\n Runs a diffusion trajectory for the last image, which may be affected by the first image's trajectory.\n Args:\n return_image: bool\n whether to return an image or the list of latents\n \"\"\"\n print(\"starting compute_latents2\")\n list_conditionings = self.get_mixed_conditioning(1)\n latents_start = self.get_noise(self.seed2)\n # Influence from branch1\n if self.branch1_crossfeed_power > 0.0:\n # Set up the mixing_coeffs\n idx_mixing_stop = int(round(self.num_inference_steps * self.branch1_crossfeed_range))\n mixing_coeffs = list(np.linspace(self.branch1_crossfeed_power, self.branch1_crossfeed_power * self.branch1_crossfeed_decay, idx_mixing_stop))\n mixing_coeffs.extend((self.num_inference_steps - idx_mixing_stop) * [0])\n list_latents_mixing = self.tree_latents[0]\n list_latents2 = self.run_diffusion(\n list_conditionings,\n latents_start=latents_start,\n idx_start=0,\n list_latents_mixing=list_latents_mixing,\n mixing_coeffs=mixing_coeffs)\n else:\n list_latents2 = self.run_diffusion(list_conditionings, latents_start)\n self.tree_latents[-1] = list_latents2\n\n if return_image:\n return self.dh.latent2image(list_latents2[-1])\n else:\n return list_latents2\n\n def compute_latents_mix(self, fract_mixing, b_parent1, b_parent2, idx_injection):\n r\"\"\"\n Runs a diffusion trajectory, using the latents from the respective parents\n Args:\n fract_mixing: float\n the fraction along the transition axis [0, 1]\n b_parent1: int\n index of parent1 to be used\n b_parent2: int\n index of parent2 to be used\n idx_injection: int\n the index in terms of diffusion steps, where the next insertion will start.\n \"\"\"\n list_conditionings = self.get_mixed_conditioning(fract_mixing)\n fract_mixing_parental = (fract_mixing - self.tree_fracts[b_parent1]) / (self.tree_fracts[b_parent2] - self.tree_fracts[b_parent1])\n # idx_reversed = self.num_inference_steps - idx_injection\n\n list_latents_parental_mix = []\n for i in range(self.num_inference_steps):\n latents_p1 = self.tree_latents[b_parent1][i]\n latents_p2 = self.tree_latents[b_parent2][i]\n if latents_p1 is None or latents_p2 is None:\n latents_parental = None\n else:\n latents_parental = interpolate_spherical(latents_p1, latents_p2, fract_mixing_parental)\n list_latents_parental_mix.append(latents_parental)\n\n idx_mixing_stop = int(round(self.num_inference_steps * self.parental_crossfeed_range))\n mixing_coeffs = idx_injection * [self.parental_crossfeed_power]\n nmb_mixing = idx_mixing_stop - idx_injection\n if nmb_mixing > 0:\n mixing_coeffs.extend(list(np.linspace(self.parental_crossfeed_power, self.parental_crossfeed_power * self.parental_crossfeed_power_decay, nmb_mixing)))\n mixing_coeffs.extend((self.num_inference_steps - len(mixing_coeffs)) * [0])\n latents_start = list_latents_parental_mix[idx_injection - 1]\n list_latents = self.run_diffusion(\n list_conditionings,\n latents_start=latents_start,\n idx_start=idx_injection,\n list_latents_mixing=list_latents_parental_mix,\n mixing_coeffs=mixing_coeffs)\n return list_latents\n\n def get_time_based_branching(self, depth_strength, t_compute_max_allowed=None, nmb_max_branches=None):\n r\"\"\"\n Sets up the branching scheme dependent on the time that is granted for compute.\n The scheme uses an estimation derived from the first image's computation speed.\n Either provide t_compute_max_allowed or nmb_max_branches\n Args:\n depth_strength:\n Determines how deep the first injection will happen.\n Deeper injections will cause (unwanted) formation of new structures,\n more shallow values will go into alpha-blendy land.\n t_compute_max_allowed: float\n The maximum time allowed for computation. Higher values give better results\n but take longer. Use this if you want to fix your waiting time for the results.\n nmb_max_branches: int\n The maximum number of branches to be computed. Higher values give better\n results. Use this if you want to have controllable results independent\n of your computer.\n \"\"\"\n idx_injection_base = int(round(self.num_inference_steps * depth_strength))\n list_idx_injection = np.arange(idx_injection_base, self.num_inference_steps - 1, 3)\n list_nmb_stems = np.ones(len(list_idx_injection), dtype=np.int32)\n t_compute = 0\n\n if nmb_max_branches is None:\n assert t_compute_max_allowed is not None, \"Either specify t_compute_max_allowed or nmb_max_branches\"\n stop_criterion = \"t_compute_max_allowed\"\n elif t_compute_max_allowed is None:\n assert nmb_max_branches is not None, \"Either specify t_compute_max_allowed or nmb_max_branches\"\n stop_criterion = \"nmb_max_branches\"\n nmb_max_branches -= 2 # Discounting the outer frames\n else:\n raise ValueError(\"Either specify t_compute_max_allowed or nmb_max_branches\")\n stop_criterion_reached = False\n is_first_iteration = True\n while not stop_criterion_reached:\n list_compute_steps = self.num_inference_steps - list_idx_injection\n list_compute_steps *= list_nmb_stems\n t_compute = np.sum(list_compute_steps) * self.dt_per_diff + 0.15 * np.sum(list_nmb_stems)\n t_compute += 2 * self.num_inference_steps * self.dt_per_diff # outer branches\n increase_done = False\n for s_idx in range(len(list_nmb_stems) - 1):\n if list_nmb_stems[s_idx + 1] / list_nmb_stems[s_idx] >= 2:\n list_nmb_stems[s_idx] += 1\n increase_done = True\n break\n if not increase_done:\n list_nmb_stems[-1] += 1\n\n if stop_criterion == \"t_compute_max_allowed\" and t_compute > t_compute_max_allowed:\n stop_criterion_reached = True\n elif stop_criterion == \"nmb_max_branches\" and np.sum(list_nmb_stems) >= nmb_max_branches:\n stop_criterion_reached = True\n if is_first_iteration:\n # Need to undersample.\n list_idx_injection = np.linspace(list_idx_injection[0], list_idx_injection[-1], nmb_max_branches).astype(np.int32)\n list_nmb_stems = np.ones(len(list_idx_injection), dtype=np.int32)\n else:\n is_first_iteration = False\n\n # print(f\"t_compute {t_compute} list_nmb_stems {list_nmb_stems}\")\n return list_idx_injection, list_nmb_stems\n\n def get_mixing_parameters(self, idx_injection):\n r\"\"\"\n Computes which parental latents should be mixed together to achieve a smooth blend.\n As metric, we are using lpips image similarity. The insertion takes place\n where the metric is maximal.\n Args:\n idx_injection: int\n the index in terms of diffusion steps, where the next insertion will start.\n \"\"\"\n # get_lpips_similarity\n similarities = []\n for i in range(len(self.tree_final_imgs) - 1):\n similarities.append(self.get_lpips_similarity(self.tree_final_imgs[i], self.tree_final_imgs[i + 1]))\n b_closest1 = np.argmax(similarities)\n b_closest2 = b_closest1 + 1\n fract_closest1 = self.tree_fracts[b_closest1]\n fract_closest2 = self.tree_fracts[b_closest2]\n\n # Ensure that the parents are indeed older!\n b_parent1 = b_closest1\n while True:\n if self.tree_idx_injection[b_parent1] < idx_injection:\n break\n else:\n b_parent1 -= 1\n b_parent2 = b_closest2\n while True:\n if self.tree_idx_injection[b_parent2] < idx_injection:\n break\n else:\n b_parent2 += 1\n fract_mixing = (fract_closest1 + fract_closest2) / 2\n return fract_mixing, b_parent1, b_parent2\n\n def insert_into_tree(self, fract_mixing, idx_injection, list_latents):\n r\"\"\"\n Inserts all necessary parameters into the trajectory tree.\n Args:\n fract_mixing: float\n the fraction along the transition axis [0, 1]\n idx_injection: int\n the index in terms of diffusion steps, where the next insertion will start.\n list_latents: list\n list of the latents to be inserted\n \"\"\"\n b_parent1, b_parent2 = self.get_closest_idx(fract_mixing)\n self.tree_latents.insert(b_parent1 + 1, list_latents)\n self.tree_final_imgs.insert(b_parent1 + 1, self.dh.latent2image(list_latents[-1]))\n self.tree_fracts.insert(b_parent1 + 1, fract_mixing)\n self.tree_idx_injection.insert(b_parent1 + 1, idx_injection)\n\n def get_noise(self, seed):\n r\"\"\"\n Helper function to get noise given seed.\n Args:\n seed: int\n \"\"\"\n return self.dh.get_noise(seed)\n\n @torch.no_grad()\n def run_diffusion(\n self,\n list_conditionings,\n latents_start: torch.FloatTensor = None,\n idx_start: int = 0,\n list_latents_mixing=None,\n mixing_coeffs=0.0,\n return_image: Optional[bool] = False):\n r\"\"\"\n Wrapper function for diffusion runners.\n Depending on the mode, the correct one will be executed.\n\n Args:\n list_conditionings: list\n List of all conditionings for the diffusion model.\n latents_start: torch.FloatTensor\n Latents that are used for injection\n idx_start: int\n Index of the diffusion process start and where the latents_for_injection are injected\n list_latents_mixing: torch.FloatTensor\n List of latents (latent trajectories) that are used for mixing\n mixing_coeffs: float or list\n Coefficients, how strong each element of list_latents_mixing will be mixed in.\n return_image: Optional[bool]\n Optionally return image directly\n \"\"\"\n\n # Ensure correct num_inference_steps in Holder\n self.dh.set_num_inference_steps(self.num_inference_steps)\n assert type(list_conditionings) is list, \"list_conditionings need to be a list\"\n\n if self.dh.use_sd_xl:\n text_embeddings = list_conditionings[0]\n return self.dh.run_diffusion_sd_xl(\n text_embeddings=text_embeddings,\n latents_start=latents_start,\n idx_start=idx_start,\n list_latents_mixing=list_latents_mixing,\n mixing_coeffs=mixing_coeffs,\n return_image=return_image)\n\n else:\n text_embeddings = list_conditionings[0]\n return self.dh.run_diffusion_standard(\n text_embeddings=text_embeddings,\n latents_start=latents_start,\n idx_start=idx_start,\n list_latents_mixing=list_latents_mixing,\n mixing_coeffs=mixing_coeffs,\n return_image=return_image)\n\n def run_upscaling(\n self,\n dp_img: str,\n depth_strength: float = 0.65,\n num_inference_steps: int = 100,\n nmb_max_branches_highres: int = 5,\n nmb_max_branches_lowres: int = 6,\n duration_single_segment=3,\n fps=24,\n fixed_seeds: Optional[List[int]] = None):\n r\"\"\"\n Runs upscaling with the x4 model. Requires that you run a transition before with a low-res model and save the results using write_imgs_transition.\n\n Args:\n dp_img: str\n Path to the low-res transition path (as saved in write_imgs_transition)\n depth_strength:\n Determines how deep the first injection will happen.\n Deeper injections will cause (unwanted) formation of new structures,\n more shallow values will go into alpha-blendy land.\n num_inference_steps:\n Number of diffusion steps. Higher values will take more compute time.\n nmb_max_branches_highres: int\n Number of final branches of the upscaling transition pass. Note this is the number\n of branches between each pair of low-res images.\n nmb_max_branches_lowres: int\n Number of input low-res images, subsampling all transition images written in the low-res pass.\n Setting this number lower (e.g. 6) will decrease the compute time but not affect the results too much.\n duration_single_segment: float\n The duration of each high-res movie segment. You will have nmb_max_branches_lowres-1 segments in total.\n fps: float\n frames per second of movie\n fixed_seeds: Optional[List[int)]:\n You can supply two seeds that are used for the first and second keyframe (prompt1 and prompt2).\n Otherwise random seeds will be taken.\n \"\"\"\n fp_yml = os.path.join(dp_img, \"lowres.yaml\")\n fp_movie = os.path.join(dp_img, \"movie_highres.mp4\")\n ms = MovieSaver(fp_movie, fps=fps)\n assert os.path.isfile(fp_yml), \"lowres.yaml does not exist. did you forget run_upscaling_step1?\"\n dict_stuff = yml_load(fp_yml)\n\n # load lowres images\n nmb_images_lowres = dict_stuff['nmb_images']\n prompt1 = dict_stuff['prompt1']\n prompt2 = dict_stuff['prompt2']\n idx_img_lowres = np.round(np.linspace(0, nmb_images_lowres - 1, nmb_max_branches_lowres)).astype(np.int32)\n imgs_lowres = []\n for i in idx_img_lowres:\n fp_img_lowres = os.path.join(dp_img, f\"lowres_img_{str(i).zfill(4)}.jpg\")\n assert os.path.isfile(fp_img_lowres), f\"{fp_img_lowres} does not exist. did you forget run_upscaling_step1?\"\n imgs_lowres.append(Image.open(fp_img_lowres))\n\n # set up upscaling\n text_embeddingA = self.dh.get_text_embedding(prompt1)\n text_embeddingB = self.dh.get_text_embedding(prompt2)\n list_fract_mixing = np.linspace(0, 1, nmb_max_branches_lowres - 1)\n for i in range(nmb_max_branches_lowres - 1):\n print(f\"Starting movie segment {i+1}/{nmb_max_branches_lowres-1}\")\n self.text_embedding1 = interpolate_linear(text_embeddingA, text_embeddingB, list_fract_mixing[i])\n self.text_embedding2 = interpolate_linear(text_embeddingA, text_embeddingB, 1 - list_fract_mixing[i])\n if i == 0:\n recycle_img1 = False\n else:\n self.swap_forward()\n recycle_img1 = True\n\n self.set_image1(imgs_lowres[i])\n self.set_image2(imgs_lowres[i + 1])\n\n list_imgs = self.run_transition(\n recycle_img1=recycle_img1,\n recycle_img2=False,\n num_inference_steps=num_inference_steps,\n depth_strength=depth_strength,\n nmb_max_branches=nmb_max_branches_highres)\n list_imgs_interp = add_frames_linear_interp(list_imgs, fps, duration_single_segment)\n\n # Save movie frame\n for img in list_imgs_interp:\n ms.write_frame(img)\n ms.finalize()\n\n @torch.no_grad()\n def get_mixed_conditioning(self, fract_mixing):\n if self.dh.use_sd_xl:\n text_embeddings_mix = []\n for i in range(len(self.text_embedding1)):\n text_embeddings_mix.append(interpolate_linear(self.text_embedding1[i], self.text_embedding2[i], fract_mixing))\n list_conditionings = [text_embeddings_mix]\n else:\n text_embeddings_mix = interpolate_linear(self.text_embedding1, self.text_embedding2, fract_mixing)\n list_conditionings = [text_embeddings_mix]\n return list_conditionings\n\n @torch.no_grad()\n def get_text_embeddings(\n self,\n prompt: str):\n r\"\"\"\n Computes the text embeddings provided a string with a prompts.\n Adapted from stable diffusion repo\n Args:\n prompt: str\n ABC trending on artstation painted by Old Greg.\n \"\"\"\n return self.dh.get_text_embedding(prompt)\n\n def write_imgs_transition(self, dp_img):\n r\"\"\"\n Writes the transition images into the folder dp_img.\n Requires run_transition to be completed.\n Args:\n dp_img: str\n Directory, into which the transition images, yaml file and latents are written.\n \"\"\"\n imgs_transition = self.tree_final_imgs\n os.makedirs(dp_img, exist_ok=True)\n for i, img in enumerate(imgs_transition):\n img_leaf = Image.fromarray(img)\n img_leaf.save(os.path.join(dp_img, f\"lowres_img_{str(i).zfill(4)}.jpg\"))\n fp_yml = os.path.join(dp_img, \"lowres.yaml\")\n self.save_statedict(fp_yml)\n\n def write_movie_transition(self, fp_movie, duration_transition, fps=30):\n r\"\"\"\n Writes the transition movie to fp_movie, using the given duration and fps..\n The missing frames are linearly interpolated.\n Args:\n fp_movie: str\n file pointer to the final movie.\n duration_transition: float\n duration of the movie in seonds\n fps: int\n fps of the movie\n \"\"\"\n\n # Let's get more cheap frames via linear interpolation (duration_transition*fps frames)\n imgs_transition_ext = add_frames_linear_interp(self.tree_final_imgs, duration_transition, fps)\n\n # Save as MP4\n if os.path.isfile(fp_movie):\n os.remove(fp_movie)\n ms = MovieSaver(fp_movie, fps=fps, shape_hw=[self.dh.height_img, self.dh.width_img])\n for img in tqdm(imgs_transition_ext):\n ms.write_frame(img)\n ms.finalize()\n\n def save_statedict(self, fp_yml):\n # Dump everything relevant into yaml\n imgs_transition = self.tree_final_imgs\n state_dict = self.get_state_dict()\n state_dict['nmb_images'] = len(imgs_transition)\n yml_save(fp_yml, state_dict)\n\n def get_state_dict(self):\n state_dict = {}\n grab_vars = ['prompt1', 'prompt2', 'seed1', 'seed2', 'height', 'width',\n 'num_inference_steps', 'depth_strength', 'guidance_scale',\n 'guidance_scale_mid_damper', 'mid_compression_scaler', 'negative_prompt',\n 'branch1_crossfeed_power', 'branch1_crossfeed_range', 'branch1_crossfeed_decay'\n 'parental_crossfeed_power', 'parental_crossfeed_range', 'parental_crossfeed_power_decay']\n for v in grab_vars:\n if hasattr(self, v):\n if v == 'seed1' or v == 'seed2':\n state_dict[v] = int(getattr(self, v))\n elif v == 'guidance_scale':\n state_dict[v] = float(getattr(self, v))\n\n else:\n try:\n state_dict[v] = getattr(self, v)\n except Exception:\n pass\n return state_dict\n\n def randomize_seed(self):\n r\"\"\"\n Set a random seed for a fresh start.\n \"\"\"\n seed = np.random.randint(999999999)\n self.set_seed(seed)\n\n def set_seed(self, seed: int):\n r\"\"\"\n Set a the seed for a fresh start.\n \"\"\"\n self.seed = seed\n self.dh.seed = seed\n\n def set_width(self, width):\n r\"\"\"\n Set the width of the resulting image.\n \"\"\"\n assert np.mod(width, 64) == 0, \"set_width: value needs to be divisible by 64\"\n self.width = width\n self.dh.width = width\n\n def set_height(self, height):\n r\"\"\"\n Set the height of the resulting image.\n \"\"\"\n assert np.mod(height, 64) == 0, \"set_height: value needs to be divisible by 64\"\n self.height = height\n self.dh.height = height\n\n def swap_forward(self):\n r\"\"\"\n Moves over keyframe two -> keyframe one. Useful for making a sequence of transitions\n as in run_multi_transition()\n \"\"\"\n # Move over all latents\n self.tree_latents[0] = self.tree_latents[-1]\n # Move over prompts and text embeddings\n self.prompt1 = self.prompt2\n self.text_embedding1 = self.text_embedding2\n # Final cleanup for extra sanity\n self.tree_final_imgs = []\n\n def get_lpips_similarity(self, imgA, imgB):\n r\"\"\"\n Computes the image similarity between two images imgA and imgB.\n Used to determine the optimal point of insertion to create smooth transitions.\n High values indicate low similarity.\n \"\"\"\n tensorA = torch.from_numpy(imgA).float().cuda(self.device)\n tensorA = 2 * tensorA / 255.0 - 1\n tensorA = tensorA.permute([2, 0, 1]).unsqueeze(0)\n tensorB = torch.from_numpy(imgB).float().cuda(self.device)\n tensorB = 2 * tensorB / 255.0 - 1\n tensorB = tensorB.permute([2, 0, 1]).unsqueeze(0)\n lploss = self.lpips(tensorA, tensorB)\n lploss = float(lploss[0][0][0][0])\n return lploss\n\n # Auxiliary functions\n def get_closest_idx(\n self,\n fract_mixing: float):\n r\"\"\"\n Helper function to retrieve the parents for any given mixing.\n Example: fract_mixing = 0.4 and self.tree_fracts = [0, 0.3, 0.6, 1.0]\n Will return the two closest values here, i.e. [1, 2]\n \"\"\"\n\n pdist = fract_mixing - np.asarray(self.tree_fracts)\n pdist_pos = pdist.copy()\n pdist_pos[pdist_pos < 0] = np.inf\n b_parent1 = np.argmin(pdist_pos)\n pdist_neg = -pdist.copy()\n pdist_neg[pdist_neg <= 0] = np.inf\n b_parent2 = np.argmin(pdist_neg)\n\n if b_parent1 > b_parent2:\n tmp = b_parent2\n b_parent2 = b_parent1\n b_parent1 = tmp\n\n return b_parent1, b_parent2\n","repo_name":"lunarring/latentblending","sub_path":"latent_blending.py","file_name":"latent_blending.py","file_ext":"py","file_size_in_byte":36145,"program_lang":"python","lang":"en","doc_type":"code","stars":274,"dataset":"github-code","pt":"81"} +{"seq_id":"6844602357","text":"import util\r\nimport sys\r\n\r\n\r\nprint(\"product id: \" + sys.argv[1])\r\n\r\ntmpfile = 'temp.csv'\r\ncategory = sys.argv[3]\r\nbarcodeFile = sys.argv[1]\r\noutfile = sys.argv[2]\r\n\r\nutil.removeDuplicates('products-header.csv', barcodeFile, tmpfile)\r\n\r\nutil.LookupUPCProducts(category, tmpfile, sys.argv[2])\r\n\r\n\r\n\r\n\r\n","repo_name":"tjohnson-github/python-projects","sub_path":"upc-read.py","file_name":"upc-read.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11307901296","text":"# Exceptions are errors which arise due to semantic and syntax errors\n\n# Exception handling involves all actions that are taken to detect and expose exceptions in programs\n\n\n# We use try---catch to detect and capture errors\n\n# try block is responsible to test a program and enable detection of errors\n\n# catch | except in pyhon - handles the exception raised by catch block\n'''\ntry:\n # Code that may raise exception\nexcept Exception as e:\n # Code to handle exceptions\n\n'''\n\n# Ex 1\n# x = 3\n# y = 5\n# try:\n# x > 3 & y < 7\n# except Exception as e:\n# print(\"Exception raised\")\n\n\n# # Ex 2\ndef divide(a, b):\n try:\n result = a / b\n except ZeroDivisionError:\n result = \"Error: Division by zero\"\n print(\"This runs whenever an exception is raised \")\n return result\n\n\ntry:\n numerator = 10\n denominator = 0\n result = divide(numerator, denominator)\n print(f\"Result: {result}\")\nexcept Exception as e:\n print(f\"An exception occurred: {e}\")\nfinally:\n print(\"Execution complete.\")\n\n\n# Ex 3\n\ntry:\n file = open(\"r\", \"r\")\n content = file.read()\n file.close\nexcept FileNotFoundError as e:\n print(f\"File not found error: {e}\")\nexcept Exception as e:\n print(f\"An exception occured: {e}\")\nfinally:\n print(\"File handling complete\")\n","repo_name":"JohnGastone/Tuts","sub_path":"exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"69953948744","text":"from __future__ import print_function\n\nimport os.path\n\nfrom google.auth.transport.requests import Request\nfrom google.oauth2.credentials import Credentials\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom googleapiclient.discovery import build\nfrom googleapiclient.errors import HttpError\nimport pdb\nimport pprint\n\n# If modifying these scopes, delete the file token.json.\nSCOPES = ['https://www.googleapis.com/auth/spreadsheets']\n\n# The ID and range of a sample spreadsheet.\nSPREADSHEET_ID = '1VL6c4AhWZXag6R01ibx0r6q3moqeaJWwstBPxMaUVu8'\nFIRST_EDITABLE_COLUMN = 'G'\nLAST_COLUMN = 'I'\nSAMPLE_RANGE_NAME = 'Database!A1:%s' % LAST_COLUMN\n\nHEADERS = []\nCREDS = None\n\n\ndef main():\n \"\"\"Shows basic usage of the Sheets API.\n Prints values from a sample spreadsheet.\n \"\"\"\n global CREDS\n # The file token.json stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.json'):\n CREDS = Credentials.from_authorized_user_file('token.json', SCOPES)\n # If there are no (valid) credentials available, let the user log in.\n if not CREDS or not CREDS.valid:\n if CREDS and CREDS.expired and CREDS.refresh_token:\n CREDS.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'casino/google_credentials.json', SCOPES)\n CREDS = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.json', 'w') as token:\n token.write(CREDS.to_json())\n\n try:\n people = get_people()\n people['Mark Tai']['Image Link'] = 'https://www.marktai.com/download/mark_tie.png'\n save_person(people['Mark Tai'])\n pprint.pprint(people)\n\n pdb.set_trace()\n except HttpError as err:\n print(err)\n\ndef get_people():\n service = build('sheets', 'v4', credentials=CREDS)\n\n # Call the Sheets API\n sheet = service.spreadsheets()\n result = sheet.values().get(spreadsheetId=SPREADSHEET_ID,\n range=SAMPLE_RANGE_NAME).execute()\n values = result.get('values', [])\n global HEADERS\n HEADERS = values[0]\n\n people = {}\n for i, row in enumerate(values[1:]):\n person = {x[0]: None if x[1] == '' else x[1] for x in zip(HEADERS, row) if x[0] != ''}\n person['row_number'] = i + 2\n people[person['Name']] = person\n\n return people\n\ndef save_person(person):\n row = ['' if person.get(h, '') is None else person.get(h, '') for h in HEADERS]\n range_name = 'Database!%s%d:%s%d' % (FIRST_EDITABLE_COLUMN, person['row_number'], LAST_COLUMN, person['row_number'])\n return update_values(SPREADSHEET_ID, range_name, 'USER_ENTERED', [row[6:]])\n\n\ndef update_values(spreadsheet_id, range_name, value_input_option,\n values):\n \"\"\"\n Creates the batch_update the user has access to.\n Load pre-authorized user credentials from the environment.\n TODO(developer) - See https://developers.google.com/identity\n for guides on implementing OAuth2 for the application.\n \"\"\"\n # pylint: disable=maybe-no-member\n try:\n\n service = build('sheets', 'v4', credentials=CREDS)\n body = {\n 'values': values\n }\n result = service.spreadsheets().values().update(\n spreadsheetId=spreadsheet_id, range=range_name,\n valueInputOption=value_input_option, body=body).execute()\n print(f\"{result.get('updatedCells')} cells updated.\")\n return result\n except HttpError as error:\n print(f\"An error occurred: {error}\")\n return error\n\nif __name__ == '__main__':\n main()","repo_name":"marktai/charity-casino","sub_path":"backend/src/quickstart.py","file_name":"quickstart.py","file_ext":"py","file_size_in_byte":3717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23762743864","text":"# -*- coding: utf-8 -*-\nimport os\nfrom dayu_widgets.divider import MDivider\nfrom dayu_widgets.label import MLabel\nfrom dayu_widgets import dayu_theme\nfrom dayu_widgets.line_tab_widget import MLineTabWidget\nfrom dayu_widgets.qt import QWidget, QVBoxLayout, Qt, QHBoxLayout, QSpacerItem, QSizePolicy, QMenu, QCursor, MIcon\nfrom dayu_widgets.tool_button import MToolButton\nfrom dayu_widgets.line_edit import MLineEdit\nfrom dayu_widgets.field_mixin import MFieldMixin\nfrom dayu_widgets.item_model import MTableModel, MSortFilterModel\nfrom dayu_widgets.item_view import MTreeView\nfrom dayu_widgets.push_button import MPushButton\nfrom dayu_widgets.message import MMessage\nfrom . import _test_data as mock\n\n\nclass TreeView(MTreeView):\n def __init__(self, parent=None):\n super(TreeView, self).__init__(parent)\n self.data_model = MTableModel()\n self.data_model.set_header_list(mock.header_list)\n model_sort = MSortFilterModel()\n model_sort.setSourceModel(self.data_model)\n\n self.setModel(model_sort)\n\n model_sort.set_header_list(mock.header_list)\n self.set_header_list(mock.header_list)\n # self.model.set_data_list(mock.tree_data_list)\n\n self.setStyleSheet(\"border:none;\")\n\n\nclass TeamAssetWin(QWidget):\n def __init__(self, parent=None):\n super(TeamAssetWin, self).__init__(parent)\n self._init_ui()\n\n def _init_ui(self):\n main_layout = QVBoxLayout()\n toolbutton_layout = QHBoxLayout()\n main_layout.setContentsMargins(0, 0, 0, 0)\n toolbutton_layout.setContentsMargins(0, 0, 0, 0)\n\n self.upload_tool_button = MToolButton()\n self.upload_tool_button.set_dayu_svg(f'{os.environ[\"ROOTPATH\"]}/icons/custom/upload.svg')\n toolbutton_layout.addWidget(self.upload_tool_button)\n\n self.new_folder_tool_button = MToolButton()\n self.new_folder_tool_button.set_dayu_svg(f'{os.environ[\"ROOTPATH\"]}/icons/custom/new_folder.svg')\n toolbutton_layout.addWidget(self.new_folder_tool_button)\n\n self.download_tool_button = MToolButton()\n self.download_tool_button.set_dayu_svg(f'{os.environ[\"ROOTPATH\"]}/icons/custom/download.svg')\n toolbutton_layout.addWidget(self.download_tool_button)\n\n self.move_tool_button = MToolButton()\n self.move_tool_button.set_dayu_svg(f'{os.environ[\"ROOTPATH\"]}/icons/custom/move.svg')\n toolbutton_layout.addWidget(self.move_tool_button)\n\n self.copy_tool_button = MToolButton()\n self.copy_tool_button.set_dayu_svg(f'{os.environ[\"ROOTPATH\"]}/icons/custom/copy.svg')\n toolbutton_layout.addWidget(self.copy_tool_button)\n\n self.rename_tool_button = MToolButton()\n self.rename_tool_button.set_dayu_svg(f'{os.environ[\"ROOTPATH\"]}/icons/custom/rename.svg')\n toolbutton_layout.addWidget(self.rename_tool_button)\n\n self.delete_tool_button = MToolButton()\n self.delete_tool_button.set_dayu_svg(f'{os.environ[\"ROOTPATH\"]}/icons/custom/delete.svg')\n toolbutton_layout.addWidget(self.delete_tool_button)\n\n toolbutton_layout.addItem(QSpacerItem(0, 0, QSizePolicy.Expanding, QSizePolicy.Minimum))\n\n self.search_line_edit = MLineEdit().search().small()\n self.search_line_edit.setMaximumWidth(300)\n self.search_line_edit.setStyleSheet('border: 1px solid #1e1e1e;background-color: #313641;')\n toolbutton_layout.addWidget(self.search_line_edit)\n\n main_layout.addLayout(toolbutton_layout)\n\n self.tv = TreeView()\n self.tv.data_model.set_data_list(mock.tree_data_list)\n # self.tv.setContextMenuPolicy(Qt.CustomContextMenu)\n # self.tv.customContextMenuRequested.connect(self.show_menu)\n # self.tv.contextMenu = QMenu(self)\n\n main_layout.addWidget(self.tv)\n self.setLayout(main_layout)\n\n # def show_menu(self):\n # self.tv.contextMenu.clear()\n # self.tv.contextMenu.addAction(MIcon(f'{os.environ[\"ROOTPATH\"]}/icons/custom/analysis.png',\n # dayu_theme.primary_color), '分析文件')\n # self.tv.contextMenu.popup(QCursor.pos())\n # self.tv.contextMenu.show()\n # pass\n\n def upload(self):\n # todo\n MMessage.config(3)\n MMessage.error(self, u'功能咱不可用,敬请期待!')\n\n def create_dir(self):\n # todo\n MMessage.error(self, u'功能咱不可用,敬请期待!')\n\n\n\nclass RenderOutput(QWidget):\n def __init__(self, parent=None):\n super(RenderOutput, self).__init__(parent)\n self._init_ui()\n\n def _init_ui(self):\n main_layout = QVBoxLayout()\n toolbutton_layout = QHBoxLayout()\n main_layout.setContentsMargins(0, 0, 0, 0)\n toolbutton_layout.setContentsMargins(0, 0, 0, 0)\n\n self.download_tool_button = MToolButton()\n self.download_tool_button.set_dayu_svg(f'{os.environ[\"ROOTPATH\"]}/icons/custom/download.svg')\n toolbutton_layout.addWidget(self.download_tool_button)\n\n toolbutton_layout.addItem(QSpacerItem(0, 0, QSizePolicy.Expanding, QSizePolicy.Minimum))\n\n self.search_line_edit = MLineEdit().search().small()\n self.search_line_edit.setMaximumWidth(300)\n toolbutton_layout.addWidget(self.search_line_edit)\n\n main_layout.addLayout(toolbutton_layout)\n render_output = TreeView()\n render_output.data_model.set_data_list(mock.tree_data_list_2)\n main_layout.addWidget(render_output)\n self.setLayout(main_layout)\n\n\nclass AssetManageWin(QWidget):\n def __init__(self, parent=None):\n super(AssetManageWin, self).__init__(parent)\n self._init_ui()\n dayu_theme.apply(self)\n self.setStyleSheet(self.styleSheet().replace('3a3a3a', '2c313c').replace('323232', '2c313c').replace('494949', '2c313c'))\n self.setStyleSheet(self.styleSheet()+'\\nMTreeView::item:selected, \\nMTreeView::item:hover{background-color: #3c414d;}')\n\n def _init_ui(self):\n main_lay = QVBoxLayout()\n\n tab_center = MLineTabWidget()\n\n tab_center.add_tab(TeamAssetWin(),\n {'text': u'团队资产', 'svg': f'{os.environ[\"ROOTPATH\"]}/icons/custom/team.svg'})\n\n # tab_center.add_tab(RenderOutput(),\n # {'text': u'渲染输出', 'svg': f'{os.environ[\"ROOTPATH\"]}/icons/custom/render_output.svg'})\n tab_center.tool_button_group.set_dayu_checked(0)\n\n main_lay.addWidget(tab_center)\n main_lay.addSpacing(20)\n self.setLayout(main_lay)\n\n\nif __name__ == '__main__':\n import sys\n from dayu_widgets.qt import QApplication\n from dayu_widgets import dayu_theme\n\n app = QApplication(sys.argv)\n test = AssetManageWin()\n\n dayu_theme.apply(test)\n test.show()\n sys.exit(app.exec_())\n","repo_name":"DangoWang/RenderFarmManagementDemo","sub_path":"widgets/asset_manage_page.py","file_name":"asset_manage_page.py","file_ext":"py","file_size_in_byte":6783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72353582664","text":"import galpynostatic.make_prediction\nimport galpynostatic.model\n\nimport numpy as np\n\nimport pytest\n\n# =============================================================================\n# TESTS\n# =============================================================================\n\n\n@pytest.mark.parametrize(\n (\"experiment\"),\n [\n (\"nishikawa\"),\n (\"mancini\"),\n (\"he\"),\n (\"wang\"),\n (\"lei\"),\n (\"bak\"),\n (\"dokko\"),\n ],\n)\ndef test_optimal_particle_size(experiment, request, spherical):\n \"\"\"Test the prediction of the optimal particle size.\"\"\"\n experiment = request.getfixturevalue(experiment)\n\n greg = galpynostatic.model.GalvanostaticRegressor(d=experiment[\"d\"], z=3)\n greg.dcoeff_, greg.k0_ = experiment[\"dcoeff\"], experiment[\"k0\"]\n greg._map = galpynostatic.datasets.map.MapSpline(spherical)\n greg.dcoeff_err_ = experiment[\"ref\"][\"dcoeff_err\"]\n\n size, size_err = galpynostatic.make_prediction.optimal_particle_size(greg)\n\n np.testing.assert_array_almost_equal(\n size, experiment[\"ref\"][\"particle_size\"], 6\n )\n np.testing.assert_array_almost_equal(\n size_err, experiment[\"ref\"][\"particle_size_err\"], 6\n )\n\n\ndef test_raise(spherical):\n \"\"\"Test the raise of the ValueError.\"\"\"\n greg = galpynostatic.model.GalvanostaticRegressor(d=0.0015, z=3)\n greg.dcoeff_, greg.k0_ = 1.93e-10, 3.14e-7\n greg._map = galpynostatic.datasets.map.MapSpline(spherical)\n\n with pytest.raises(ValueError):\n galpynostatic.make_prediction.optimal_particle_size(\n greg, minutes=1, loaded=1\n )\n","repo_name":"fernandezfran/galpynostatic","sub_path":"tests/test_make_prediction.py","file_name":"test_make_prediction.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"38522768482","text":"import cv2\nimport numpy as np\n\n# Read an image\nimg = cv2.imread('foto na escada.jpeg')\nprint(img.shape)\n\nblurred_image = cv2.GaussianBlur(img, (9, 9), 0)\nhsv = cv2.cvtColor(blurred_image, cv2.COLOR_BGR2HSV)\n\n\n# Mask da escada\nlower_hsv_escada = np.array([8, 0, 0])\nupper_hsv_escada = np.array([180, 255, 255])\nmaskescada = cv2.inRange(hsv, lower_hsv_escada, upper_hsv_escada)\ninverted_mask_escada = cv2.bitwise_not(maskescada)\n\n# Mask da Pele\nlower_hsv_pele = np.array([19, 0, 0])\nupper_hsv_pele = np.array([180, 255, 255])\nmaskPele = cv2.inRange(hsv, lower_hsv_pele, upper_hsv_pele)\n\n# Mask da Roupa\nlower_hsv_roupa = np.array([0, 0, 0])\nupper_hsv_roupa = np.array([19, 255, 255])\nmaskroupa = cv2.inRange(hsv, lower_hsv_roupa, upper_hsv_roupa)\n\n# Masks combinadas\ncombined_mask = cv2.bitwise_or(inverted_mask_escada, maskroupa)\n\ncombined_mask2 = cv2.bitwise_and(maskPele, combined_mask)\n\ninverted_mask = cv2.bitwise_not(combined_mask2)\n\n# Segmentando com as Masks\nsegmented_image = cv2.bitwise_and(img, img, mask=inverted_mask)\n\n\ncv2.imshow('inverted_mask_escada', maskescada)\ncv2.imshow('maskroupa', maskroupa)\n\n\ncv2.waitKey(0)","repo_name":"davitgouveia/Processamento-Digital-de-Imagens","sub_path":"Prova 1/Imagens/segmentacao_escada.py","file_name":"segmentacao_escada.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74909062023","text":"# 7/18/23, Sophia Cofone, Omnic ML Project\n# Purpose of these functions are to make decision tree models for the class classification\n\nimport pandas as pd\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import accuracy_score, confusion_matrix, classification_report\nimport matplotlib.pyplot as plt\nfrom sklearn import tree\nimport graphviz \n\n\ndef d_tree(X_train,y_train,X_test,y_test):\n dtc = DecisionTreeClassifier(random_state=1)\n dtc.fit(X_train, y_train)\n y_pred = dtc.predict(X_test)\n y_pred_train = dtc.predict(X_train)\n print(\"Train Accuracy:\", accuracy_score(y_train, y_pred_train))\n print(confusion_matrix(y_train, y_pred_train))\n print(classification_report(y_test, y_pred))\n\n print(\"Test Accuracy:\", accuracy_score(y_test, y_pred))\n print(confusion_matrix(y_test, y_pred))\n print(classification_report(y_test, y_pred))\n \n return dtc\n\ndef prune_dtree(X_train,y_train,X_test,y_test,min_samples_leaf,min_samples_split,max_depth):\n dtc = DecisionTreeClassifier(min_samples_leaf=min_samples_leaf,min_samples_split=min_samples_split,max_depth=max_depth,random_state=1)\n dtc.fit(X_train, y_train)\n y_pred = dtc.predict(X_test)\n y_pred_train = dtc.predict(X_train)\n print(\"Train Accuracy:\", accuracy_score(y_train, y_pred_train))\n print(confusion_matrix(y_train, y_pred_train))\n print(classification_report(y_test, y_pred))\n\n print(\"Test Accuracy:\", accuracy_score(y_test, y_pred))\n print(confusion_matrix(y_test, y_pred))\n print(classification_report(y_test, y_pred))\n \n return dtc\n\n# def vis_dtree(dtc, columns,save_path):\n# plt.figure(figsize=(12, 6))\n# plot_tree(dtc, feature_names=columns, class_names=str(dtc.classes_), filled=True)\n# plt.savefig(save_path, dpi=600, bbox_inches='tight')\n# plt.show()\n\ndef vis_dtree(dtc, columns,save_path):\n # DOT data\n dot_data = tree.export_graphviz(dtc, out_file=None,\n feature_names=columns, \n class_names=['Sentinels', ' Controllers', 'Duelists','Initiators'],\n filled=True)\n\n # Draw graph\n graph = graphviz.Source(dot_data, format=save_path) \n graph.render(\"decision_tree\") \n\ndef d_tree_tuning(X_train, y_train, X_test, y_test):\n param_grid = {\n 'min_samples_leaf': [1, 2, 3], # Example values for min_samples_leaf\n 'min_samples_split': [2, 5, 10] # Example values for min_samples_split\n }\n\n dtc = DecisionTreeClassifier(random_state=1)\n grid_search = GridSearchCV(dtc, param_grid, cv=5, verbose=2)\n grid_search.fit(X_train, y_train)\n best_dtc = grid_search.best_estimator_\n y_pred_train = best_dtc.predict(X_train)\n y_pred = best_dtc.predict(X_test)\n\n print(\"Best Parameters:\", grid_search.best_params_)\n print(\"Train Accuracy:\", accuracy_score(y_train, y_pred_train))\n print(confusion_matrix(y_train, y_pred_train))\n print(classification_report(y_train, y_pred_train))\n print(\"Test Accuracy:\", accuracy_score(y_test, y_pred))\n print(confusion_matrix(y_test, y_pred))\n print(classification_report(y_test, y_pred))\n\n return best_dtc\n\ndef f_importance(dtc, columns,csv_name):\n feature_importances = pd.DataFrame(dtc.feature_importances_,\n index = columns,\n columns=['importance']).sort_values('importance', ascending=False)\n \n feature_importances.to_csv(csv_name)\n return feature_importances\n","repo_name":"sophiacofone/valorant_omnic","sub_path":"roles/class_model.py","file_name":"class_model.py","file_ext":"py","file_size_in_byte":3563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24381876955","text":"from bunch import Bunch\nimport json\nimport math\nimport os\nimport pneumodel\nimport sys\nimport time\n\n##################################\n### Functions used in fitting ###\n##################################\n\n##################\n# Loss functions #\n##################\n\ndef _log_factorial(n):\n ''' log(n!) using Ramanujan's approximation '''\n assert n > 0\n return (\n n * math.log(n) - n +\n math.log(1.0 + 1.0 / (2.0 * n) + 1.0 / (8.0 * n * n)) / 6.0 +\n math.log(2.0 * n) / 2.0 +\n math.log(3.141592653589793238462643383) / 2.0\n )\n\ndef _multinomial_loglikelihood(p, x):\n ''' P(x|p) where x ~ Multinomial(p) '''\n assert len(p) == len(x)\n for p_i in p:\n assert 0.0 <= p_i <= 1\n for x_i in x:\n assert x >= 0\n n = sum(x)\n llh = _log_factorial(n)\n for p_i, x_i in zip(p, x):\n llh += (-_log_factorial(x_i) + x_i * math.log(p_i))\n return llh\n\n\ndef total_prevalence_absolute_loss(config_file, theta, output_dir):\n # set point by updating config file\n _update_config(config_file, theta)\n # get results\n pneumodel.run_simulation(config_file, output_dir)\n results = pneumodel.get_simulation_results(output_dir)\n ...\n\n#########################\n# Pertubation functions #\n#########################\n\n \n\n######################\n# Boundary functions #\n######################\n\ndef standard_boundary(theta):\n # beta must be non-negative\n if 'beta' in theta:\n theta.beta = max(0, theta.beta)\n\n # ranks must be in [1, num_serotypes]\n num_serotypes = len(theta.ranks)\n if 'ranks' in theta:\n for i, x in enumerate(theta.ranks):\n theta.ranks[i] = max(1, min(x, num_serotypes))\n\n # vaccine efficacies must be in [0, 1]\n if 'efficacies' in theta:\n for i, x in enumerate(theta.efficacies):\n theta.efficacies[i] = max(0, min(x, 1))\n\n return theta\n\n\n########################\n### Fitting routine ###\n########################\n\ndef fit_model(config_file, output_dir, theta, loss_func, perturb_func, update_func, boundary_func):\n ''' Fits model parameters.\n \n Expects a configuration file that specifies:\n - num_iterations \n - simulation_config_file\n theta : starting point in parameter space\n Points should be represented as an object with the following attributes:\n theta.beta\n theta.ranks : ranks of the serotypes\n theta.efficacies : (vaccine_name, efficacy) pairs\n\n The other arguments are functions. (i is iteration number)\n loss : point, config, output_dir -> loss (scale- or vector-valued)\n perturb : i, current point -> a set of new points\n update : i, set of (p, loss(p)) pairs -> proposed next point\n boundary : proposed next point -> next point satisfying boundary conditions\n \n '''\n try:\n os.makedirs(output_dir)\n except os.error as e:\n raise ValueError('Error creating {}: {}'.format(output_dir, e))\n\n get_output_dir = lambda i, j: os.path.join('iter-{}'.format(i), 'point-{}'.format(j))\n\n print ('Starting fitting process...')\n history = []\n\n # make a working copy of the configuration files\n working_config_file = pneumodel.copy_config_files(config_file, os.path.join(output_dir, 'working-configuration'))\n \n # check loss at the start\n history.append(theta, loss_func(working_config_file, theta, get_output_dir(0, 0)))\n\n # iterations of the fitting process\n for i in range(n_iters):\n t0_iter = time.time()\n print ('Iteration {} of {}...'.format(i + 1, n_iters),)\n\n # perturb points\n perturbed = perturb_func(i, theta)\n\n # calculate losses at perturbed points\n losses = []\n for j, p in enumerate(perturbed):\n print ('\\tCalculating loss for {} of {} points...'.format(j, len(perturbed)),\n losses.append(p, loss(working_config_file, p), get_output_dir(i + 1, j)))\n\n # propose the next point, apply boundary conditions\n proposal = update_func(i, losses)\n theta = boundary_func(proposal)\n\n # check our loss now\n history.append(theta, loss_func(working_config_file, theta, get_output_dir(0, 0)))\n\n # display time elapsed for this iteration\n t_iter = time.time() - t0_iter\n print ('completed. {}m {}s'.format(int(t_iter / 60), t_iter % 60))\n\n\n\n\n","repo_name":"lucymli/Pneumo-ABM","sub_path":"scripts/fitting.py","file_name":"fitting.py","file_ext":"py","file_size_in_byte":4113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24326753476","text":"import urllib\nfrom inscriptis import get_text\nfrom googlesearch import search\nimport pandas as pd\n\nf = open(\"son_crawl.txt\", \"a\",encoding=\"utf-8\")\ndf = pd.read_csv('C:/Users/Lenovo/Desktop/Bitirme/Word2Vec.v3/files/product_catalog.v2.csv')\ndf_clean = df.drop_duplicates(subset=['ProductName', 'ManufacturerName'])\ni =0\nfor query in df_clean['ProductName']:\n print(query,i,len(df_clean['ProductName']))\n for url in search(query, tld=\"co.in\",lang=\"tr\", num=5, stop=5, pause=1):\n print(url)\n if url.find('youtube')==-1 and url.find('wikipedia')==-1 and url.find('carrefour')==-1 :\n try:\n html = urllib.request.urlopen(url,timeout=1).read().decode('utf-8')\n text = get_text(html)\n f.write(text)\n #print(text)\n except:\n print(\"An exception occurred\")\n i+=1\n\n\n\n","repo_name":"simgesaricayir/Machine-Learning-Supported-Semantic-Search-Engine","sub_path":"GoogleCrawl/GoogleCrawl.py","file_name":"GoogleCrawl.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"41735128995","text":"\"\"\"\n Saper - moduł głównego interfejsu graficznego Tkinter.\n\"\"\"\n\n# import biblioteki graficznej do obsługi okienek GUI\nimport tkinter as tk\nfrom tkinter import ttk, N, S, W, E\nfrom game_lib import (\n LMOUSE_CLICK_EVENT,\n RMOUSE_CLICK_EVENT,\n DEFAULT_MINE_COUNT,\n odkryj_pole,\n oznacz_pole,\n nowa_gra\n)\n\n\ndef create_minesweeper_window():\n \"\"\"\n Funkcja inicjuje komponenty graficzne Tk.\n Zwraca obiekt okna gotowy do uruchomienia (wyświetlenia).\n \"\"\"\n\n # Poniżej znajdują się instrukcje inicjalizujące komponenty graficzne okna Tk\n # (deklaracje widgetów, umiejscowienie ich w oknie, przypisanie funkcji do zdarzeń)\n\n # okno główne\n root_window = tk.Tk()\n root_window.title(\"Wężowy saper\")\n\n # ramka główna- podstawowy kontener na pozostałe widgety\n mainframe = ttk.Frame(root_window, padding=\"10 10 10 10\")\n mainframe.grid(column=0, row=0, sticky=(N, W, E, S))\n root_window.columnconfigure(0, weight=1)\n root_window.rowconfigure(0, weight=1)\n\n # dodawanie przycisków reprezentujących pole minowe w rozmiarze 10x10\n fields_grid = {}\n for row_num in range(10):\n for col_num in range(10):\n field_button = ttk.Button(\n mainframe,\n text=\" \",\n width=2,\n state=tk.DISABLED,\n )\n field_button.grid(column=row_num, row=col_num, sticky=W)\n field_button.bind(LMOUSE_CLICK_EVENT, odkryj_pole)\n field_button.bind(RMOUSE_CLICK_EVENT, oznacz_pole)\n # zapamiętanie przycisku na liście pod indeksem\n fields_grid[(row_num, col_num)] = field_button\n\n # liczniki gry\n var_total_mines = tk.IntVar(value=DEFAULT_MINE_COUNT)\n var_games_played = tk.IntVar(value=0)\n var_hidden_mines = tk.IntVar(value=DEFAULT_MINE_COUNT)\n\n # dodanie statycznych etykietek informacyjnych\n ttk.Label(mainframe, text=\"Gra:\").grid(column=0, row=11, columnspan=5, sticky=W)\n ttk.Label(mainframe, text=\"Pozostało min:\").grid(column=0, row=12, columnspan=5, sticky=W)\n # wartości dla etykiet\n ttk.Label(mainframe, textvariable=var_games_played).grid(column=7, row=11, columnspan=5, sticky=E)\n ttk.Label(mainframe, textvariable=var_hidden_mines).grid(column=7, row=12, columnspan=5, sticky=E)\n\n # polte tekstowe na wpisanie liczby min do rozmieszczenia\n mine_count_input = ttk.Entry(mainframe, width=7, textvariable=var_total_mines)\n mine_count_input.grid(column=0, row=13, columnspan=5, sticky=W)\n\n # definicja przycisku nowej gry\n new_game_button = ttk.Button(mainframe, text=\"Nowa gra\")\n new_game_button.grid(column=7, row=13, columnspan=5, sticky=W)\n new_game_button.bind(LMOUSE_CLICK_EVENT, nowa_gra)\n\n # zapamiętanie pod jednym obiektem \"konfiguracji\" okna\n # przycisków oraz liczników gry\n mainframe.game_settings = {\n # liczniki gry\n 'rozegrane_gry': var_games_played, # licznik sesji\n 'liczba_ukrytych_min': var_hidden_mines, # aktualna liczba nierozbrojonych min\n 'liczba_wszystkich_min': var_total_mines, # całkowita liczba min na polu\n # ustawienia min\n 'siatka': fields_grid, # pola (wraz z indeksami)\n 'wszystkie_pola': list(fields_grid.values()), # wszystkie pola\n 'pola_z_minami': [], # pola zazbrojone miną\n 'pola_oznaczone': [], # pole oznaczone przez gracza\n 'liczniki_pol': {} # informacje o licznikach w polach\n }\n\n # zwrócenie gotowego obiektu, który należy aktywować przez wywołanie:\n # root_window.mainloop()\n # uruchomienie rysuje okienka graficzne i uruchamia nasłuch na zdarzenia\n # (np. klikanie przez użytkownika)\n\n return root_window\n","repo_name":"piotr-gomola/python-minesweeper","sub_path":"mineswp.py","file_name":"mineswp.py","file_ext":"py","file_size_in_byte":3729,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16809048214","text":"import logging\r\n\r\nimport numpy as np\r\nimport tables as tb\r\n\r\nimport progressbar\r\n\r\nfrom pybar.fei4.register_utils import make_box_pixel_mask_from_col_row\r\nfrom pybar.fei4_run_base import Fei4RunBase\r\nfrom pybar.run_manager import RunManager\r\nfrom pybar.analysis.plotting.plotting import plot_three_way\r\n\r\n\r\nclass IleakScan(Fei4RunBase):\r\n '''Pixel leakage current scan using external multimeter.\r\n '''\r\n _default_run_conf = {\r\n \"broadcast_commands\": False,\r\n \"threaded_scan\": False,\r\n \"pixels\": (np.dstack(np.where(make_box_pixel_mask_from_col_row([1, 16], [1, 36]) == 1)) + 1).tolist()[0], # list of (col, row) tupels. From 1 to 80/336.\r\n }\r\n\r\n def configure(self):\r\n commands = []\r\n commands.extend(self.register.get_commands(\"ConfMode\"))\r\n self.register.set_pixel_register_value('Imon', 0)\r\n commands.extend(self.register.get_commands(\"WrFrontEnd\", same_mask_for_all_dc=True, name='Imon'))\r\n self.register_utils.send_commands(commands)\r\n self.ileakmap = np.zeros(shape=(80, 336))\r\n\r\n def scan(self):\r\n logging.info(\"Scanning %d pixels\" % len(self.pixels))\r\n progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=len(self.pixels), term_width=80)\r\n progress_bar.start()\r\n\r\n data_out = self.raw_data_file.h5_file.create_carray(self.raw_data_file.h5_file.root, name='Ileak_map', title='Leakage current per pixel in arbitrary units', atom=tb.Atom.from_dtype(self.ileakmap.dtype), shape=self.ileakmap.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))\r\n\r\n for pixel_index, (column, row) in enumerate(self.pixels):\r\n if self.stop_run.is_set():\r\n break\r\n # Set Imon for actual pixel and configure FE\r\n mask = np.zeros(shape=(80, 336))\r\n mask[column - 1, row - 1] = 1\r\n commands = []\r\n commands.extend(self.register.get_commands(\"ConfMode\"))\r\n self.register.set_pixel_register_value('Imon', mask)\r\n commands.extend(self.register.get_commands(\"WrFrontEnd\", same_mask_for_all_dc=False, name='Imon'))\r\n self.register_utils.send_commands(commands)\r\n # Read and store voltage\r\n voltage_string = self.dut['Multimeter'].get_voltage()\r\n voltage = float(voltage_string.split(',')[0][:-4])\r\n self.ileakmap[column - 1, row - 1] = voltage\r\n\r\n progress_bar.update(pixel_index)\r\n\r\n progress_bar.finish()\r\n\r\n data_out[:] = self.ileakmap\r\n\r\n def analyze(self):\r\n with tb.open_file(self.output_filename + '.h5', 'r') as in_file_h5:\r\n data = in_file_h5.root.Ileak_map[:]\r\n data = np.ma.masked_where(data == 0, data)\r\n plot_three_way(hist=data.transpose(), title=\"Ileak\", x_axis_title=\"Ileak\", filename=self.output_filename + '.pdf') # , minimum=0, maximum=np.amax(data))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n with RunManager('configuration.yaml') as runmngr:\r\n runmngr.run_run(IleakScan)\r\n","repo_name":"SiLab-Bonn/pyBAR","sub_path":"pybar/scans/scan_ileak.py","file_name":"scan_ileak.py","file_ext":"py","file_size_in_byte":3158,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"81"} +{"seq_id":"40962055279","text":"\"\"\"\nsome utilities to work with xarray objects\n\"\"\"\n\nimport numpy as np\nimport xarray as xr\n\n\ndef strip_coords(X, coords=None, inplace=False, as_str=True):\n \"\"\"\n strip blanks from string coordinates\n\n Parameters\n ----------\n X : DataArray or Dataset\n\n coords : iterable (Default None)\n Iterable of coordinates to alter.\n If `None`, apply to all (string) coordinates\n\n inplace : bool (Default False):\n if True, do inplace modification\n\n as_str : bool, default=True\n if `True`, apply .astype(str) to output.\n This helps if input has b'foo' types (which are annoying to work with)\n\n\n \"\"\"\n if inplace:\n out = X\n else:\n out = X.copy()\n\n if coords is None:\n coords = out.coords.keys()\n\n for k in coords:\n if out[k].dtype.kind == 'S':\n o = np.char.strip(out[k])\n if as_str:\n o = o.astype(str)\n out[k] = o\n\n if not inplace:\n return out\n\n\ndef where(self, condition, *args, **kwargs):\n \"\"\"\n perform inplace where\n\n Parameters\n ----------\n self : dataset or datarray\n must have `where`` method\n condition: mask or function\n condition to apply\n *args, **kwargs: arguments to self.where\n\n Returns\n -------\n output : self.where(condition, *args, **kwargs)\n\n\n Usage\n -----\n self.pipe(where, lambda x: x > 0.0)\n \"\"\"\n\n if not hasattr(self, 'where'):\n raise AttributeError('self must have `where` method')\n\n if callable(condition):\n return self.where(condition(self), *args, **kwargs)\n else:\n return self.where(condition, *args, **kwargs)\n\n\ndef average(x, w=None,\n dim=None, axis=None,\n var=False, unbiased=True, std=False,\n name=None,\n mask_null=True):\n \"\"\"\n (weighted) average of DataArray\n\n Parameters\n ----------\n x : xarray.DataArray\n array to average over\n w : xarray.DataArray, optional\n array of weights\n dim : str or list of strings, optional\n dimensions to average over. See `xarray.DataArray.sum`\n axis : int or list of ints, optional\n axis to average over. See `xarray.DataArray.sum`\n var : bool, default=False\n If `True`, calculate weighted variance as well\n std : bool, default=False\n If `True`, return standard deviation, i.e., `sqrt(var)`\n unbiased : bool, default=True\n If `True`, return unbiased variance\n name : str, optional\n if supplied, name of output average. Variance is named 'name_var' or 'name_std'\n mask_null : bool, default=True\n if `True`, mask values where x and w are all null across `dim` or `axis`.\n This prevents zero results from nan sums.\n\n Returns\n -------\n average : xarray.DataArray\n averaged data\n err : xarray.DataArray, optional\n weighted variance if `var==True` or standard deviation if `std==True`.\n \"\"\"\n assert type(x) is xr.DataArray\n if w is None:\n w = xr.ones_like(x)\n assert type(w) is xr.DataArray\n # only consider weights with finite x\n # note that this will reshape w to same shape as x as well\n w = w.where(np.isfinite(x))\n # scale w\n w = w / w.sum(dim=dim, axis=axis)\n\n # output names\n if name:\n var_name = name + ('_std' if std else '_var')\n else:\n var_name = None\n\n # mean\n m1 = (w * x).sum(dim=dim, axis=axis)\n\n if mask_null:\n msk = (~x.isnull().all(dim=dim, axis=axis)) & (~w.isnull().all(dim=dim, axis=axis))\n m1 = m1.where(msk)\n \n\n # variance\n if var or std:\n m2 = (w * (x - m1)**2).sum(dim=dim, axis=axis)\n if unbiased:\n w1 = 1.0\n w2 = (w * w).sum(dim=dim, axis=axis)\n m2 *= w1 * w1 / (w1 * w1 - w2)\n\n if std:\n m2 = np.sqrt(m2)\n\n if mask_null:\n m2 = m2.where(msk)\n\n return m1.rename(name), m2.rename(var_name)\n\n else:\n return m1.rename(name)\n","repo_name":"wpk-nist-gov/my_utilities","sub_path":"xarray/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"459580608","text":"import pytest, os, yaml, sys\n\nfrom Base.getData import GetData\n\nsys.path.append(os.getcwd())\n\n\ndef get_sum_data():\n # 定义存储数据列表\n sum_list = []\n # 读取sum.yml数据\n data = GetData().get_yml_data(\"sum.yml\")\n for i in data.values():\n sum_list.append(tuple(i.values()))\n return sum_list\n\n\n\"\"\"\ndata={\n \"test_sum1\": {\"a\": 1, \"b\": 2,\"c\": 3}\n \"test_sum2\": {\"a\": 2, \"b\": 3,\"c\": 5}\n \"test_sum1\": {\"a\": 4, \"b\": 5,\"c\": 7}}\n\"\"\"\n\n\nclass TestSum:\n @pytest.mark.parametrize(\"a,b,c\", get_sum_data())\n def test_sum(self, a, b, c):\n print(\"{}+{}={}\".format(a, b, c))\n assert a + b == c\n","repo_name":"DianaH1027/bj-test17-app09","sub_path":"scripts/test02_sum.py","file_name":"test02_sum.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36090613050","text":"import sqlite3\nfrom bs4 import BeautifulSoup\nfrom requests import get\nfrom time import sleep\n\n\ndef scrape_books(url, page):\n all_books = []\n while page:\n res = get(url + page)\n soup = BeautifulSoup(res.text, 'html.parser')\n books = soup.find_all('article')\n for book in books:\n data = (get_title(book), get_price(book), get_rating(book))\n all_books.append(data)\n next_btn = soup.find(class_='next')\n page = next_btn.find('a')['href'] if next_btn else None\n sleep(1)\n books_data(all_books)\n\n\ndef get_title(book):\n return book.find('h3').find('a')['title']\n\n\ndef get_price(book):\n price = book.select('.price_color')[0].get_text()\n return float(price.replace('Â', '').replace('£', ''))\n\n\ndef get_rating(book):\n rate = book.find('p')['class']\n stars = {'Zero': 0, 'One': 1, 'Two': 2, 'Three': 3, 'Four': 4, 'Five': 5}\n return stars[rate[-1]]\n\n\ndef books_data(all_books):\n connection = sqlite3.connect('books_scraper.db')\n c = connection.cursor()\n c.execute('CREATE TABLE books (title TEXT, price REAL, rate INTEGER)')\n c.executemany('INSERT INTO books VALUES (?, ?, ?)', all_books)\n connection.commit()\n connection.close()\n\n\nurl = 'http://books.toscrape.com/catalogue/'\npage = 'page-1.html'\nscrape_books(url, page)\n","repo_name":"mhiloca/PythonBootcamp","sub_path":"SQL/scrape_books_sql.py","file_name":"scrape_books_sql.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27566455059","text":"from sys import stdin\ninput = stdin.readline\nn, q = map(int ,input().split())\nc = list(map(int, input().split()))\ndata = []\nfor i in range(q):\n data.append([i] + list(map(int, input().split())))\ndata.sort(key=lambda x : x[2])\nans = [0 for i in range(q)]\n#####segfunc######\ndef segfunc(x,y):\n return x + y\n\ndef init(init_val):\n #set_val\n for i in range(len(init_val)):\n seg[i+num-1]=init_val[i] \n #built\n for i in range(num-2,-1,-1) :\n seg[i]=segfunc(seg[2*i+1],seg[2*i+2]) \n \ndef update(k,x):\n k += num-1\n seg[k] = x\n while k:\n k = (k-1)//2\n seg[k] = segfunc(seg[k*2+1],seg[k*2+2])\n \ndef query(p,q):\n if q<=p:\n return ide_ele\n p += num-1\n q += num-2\n res=ide_ele\n while q-p>1:\n if p&1 == 0:\n res = segfunc(res,seg[p])\n if q&1 == 1:\n res = segfunc(res,seg[q])\n q -= 1\n p = p//2\n q = (q-1)//2\n if p == q:\n res = segfunc(res,seg[p])\n else:\n res = segfunc(segfunc(res,seg[p]),seg[q])\n return res\n\n#####単位元######\nide_ele = 0 \nnum = 2**(n-1).bit_length()\nseg=[ide_ele]*(2*num - 1)\ngood_ball = dict()\npre_r = 0\ncount = 0\nfor i in range(q):\n index, l, r = data[i]\n l, r = l-1, r-1\n for j in range(pre_r, r+1):\n if c[j] in good_ball: \n update(good_ball[c[j]], 0) \n update(j, 1)\n good_ball[c[j]] = j\n pre_r = r + 1\n ans[index] = query(l, r+1)\nfor i in range(q):\n print(ans[i])","repo_name":"Yuta123456/AtCoder","sub_path":"python/AtCoder Beginner Contest 174/F.py","file_name":"F.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1095066128","text":"\"\"\"Methods for calculating the Solar Reflective Index (SRI) of materials and constructions.\"\"\"\n\nimport numpy as np\nfrom honeybee_energy.construction.opaque import OpaqueConstruction\nfrom honeybee_energy.material.opaque import EnergyMaterial\nfrom ..bhom.analytics import bhom_analytics\n\n\n@bhom_analytics()\ndef calculate_sri(\n solar_reflectance: float,\n thermal_emittance: float,\n insolation: float = 1000,\n air_temperature: float = 36.85,\n sky_temperature: float = 26.85,\n wind_speed: float = 4,\n) -> float:\n \"\"\"Calculate the SRI of a material from its reflectance and emittance.\n Note, this method assumes a horizontal material (facing the sky).\n\n This method is based on the tool created by Ronnen Levinson, Heat Island\n Group, LBNL. It uses the method from ASTM Standard E1980-11 to calculate\n the SRI of a material, given its solar reflectance and thermal emittance.\n\n Wind speed is used inestead of wind convection coeffeicnt, based on the\n suggested values in ASTM E1980-11.\n\n Args:\n solar_reflectance (float): Solar reflectance of the material. Unitless,\n between 0 (black body) and 1 (white-body).\n thermal_emittance (float): Thermal emittance of the material. Unitless,\n between 0 (white-body) and 1 (black-body).\n insolation (float, optional): Insolation incident on the material.\n Defaults to 1000W/m2.\n air_temperature (float, optional): Air temperature. Defaults to 36.85C.\n sky_temperature (float, optional): Sky temperature. Defaults to 26.85C.\n wind_speed (float, optional): Speed of wind. Defaults to 4m/s.\n\n Returns:\n float: SRI of the material. Unitless.\n \"\"\"\n\n if not 0 <= solar_reflectance <= 1:\n raise ValueError(\n \"Solar reflectance must be between 0 (black body) and 1 (white-body).\"\n )\n if not 0 <= thermal_emittance <= 1:\n raise ValueError(\n \"Thermal emittance must be between 0 (white-body) and 1 (black-body).\"\n )\n if wind_speed < 0:\n raise ValueError(\"Wind speed must be greater than 0.\")\n\n # convert wind speed to wind convection coeffecient\n speeds = [1, 4, 8]\n coeffs = [5, 12, 30]\n wind_convection_coefficient = np.interp(wind_speed, speeds, coeffs)\n\n # set the sensitivity threshold for the iterative calculation. Lower is more accurate but slower.\n threshold = 0.5 # W\n increment = 0.01 # K\n iterations = 100000\n\n air_temperature = air_temperature + 273.15 # K\n sky_temperature = sky_temperature + 273.15 # K\n\n sigma = 5.67e-8 # W m-2 K-4 Stefan-Boltzmann constant\n blackbody_solar_reflectance = 0.05 # 0 - 1\n whitebody_solar_reflectance = 0.8 # 0 - 1\n blackbody_thermal_emittance = 0.9 # 0 - 1\n whitebody_thermal_emittance = 0.9 # 0 - 1\n\n surface_temperature = 200.0 # K\n n = 0\n while not np.isclose(\n (1 - solar_reflectance) * insolation\n - (\n thermal_emittance\n * sigma\n * (surface_temperature**4 - sky_temperature**4)\n + wind_convection_coefficient * (surface_temperature - air_temperature)\n ),\n 0,\n atol=threshold,\n ):\n n += 1\n if n > iterations:\n raise ValueError(\n f\"SRI calculation did not converge. Surface temperature of {surface_temperature - 273.15}C.\"\n )\n surface_temperature += increment\n\n blackbody_surface_temperature = 200.0 # K\n n = 0\n while not np.isclose(\n (1 - blackbody_solar_reflectance) * insolation\n - (\n blackbody_thermal_emittance\n * sigma\n * (blackbody_surface_temperature**4 - sky_temperature**4)\n + wind_convection_coefficient\n * (blackbody_surface_temperature - air_temperature)\n ),\n 0,\n atol=threshold,\n ):\n n += 1\n if n > iterations:\n raise ValueError(\"SRI calculation did not converge.\")\n blackbody_surface_temperature += increment\n\n whitebody_surface_temperature = 200.0 # K\n n = 0\n while not np.isclose(\n (1 - whitebody_solar_reflectance) * insolation\n - (\n whitebody_thermal_emittance\n * sigma\n * (whitebody_surface_temperature**4 - sky_temperature**4)\n + wind_convection_coefficient\n * (whitebody_surface_temperature - air_temperature)\n ),\n 0,\n atol=threshold,\n ):\n n += 1\n if n > iterations:\n raise ValueError(\"SRI calculation did not converge.\")\n whitebody_surface_temperature += increment\n\n solar_reflective_index = (\n 100\n * (blackbody_surface_temperature - surface_temperature)\n / (blackbody_surface_temperature - whitebody_surface_temperature)\n )\n\n if solar_reflective_index < 0:\n return 0\n\n return solar_reflective_index\n\n\n@bhom_analytics()\ndef material_sri(\n material: EnergyMaterial,\n insolation: float = 1000,\n air_temperature: float = 36.85,\n sky_temperature: float = 26.85,\n wind_speed: float = 4,\n) -> float:\n \"\"\"Calculate the SRI of a Honeybee material.\n Note, this method assumes a horizontal material (facing the sky).\n\n Args:\n material (_EnergyMaterialOpaqueBase): A Honeybee opaque material.\n insolation (float, optional): Insolation incident on the material.\n Defaults to 1000W/m2.\n air_temperature (float, optional): Air temperature. Defaults to 36.85C.\n sky_temperature (float, optional): Sky temperature. Defaults to 26.85C.\n wind_speed (float, optional): Speed of wind. Defaults to 4m/s.\n\n Returns:\n float: SRI of the material. Unitless.\n \"\"\"\n\n return calculate_sri(\n solar_reflectance=material.solar_reflectance,\n thermal_emittance=material.thermal_absorptance,\n insolation=insolation,\n air_temperature=air_temperature,\n sky_temperature=sky_temperature,\n wind_speed=wind_speed,\n )\n\n\n@bhom_analytics()\ndef construction_sri(\n construction: OpaqueConstruction,\n insolation: float = 1000,\n air_temperature: float = 36.85,\n sky_temperature: float = 26.85,\n wind_speed: float = 4,\n) -> float:\n \"\"\"Calculate the SRI of a Honeybee construction.\n Note, this method assumes a horizontal construction (facing the sky).\n\n Args:\n construction (Opaqueconstruction): A Honeybee construction material.\n insolation (float, optional): Insolation incident on the material.\n Defaults to 1000W/m2.\n air_temperature (float, optional): Air temperature. Defaults to 36.85C.\n sky_temperature (float, optional): Sky temperature. Defaults to 26.85C.\n wind_speed (float, optional): Speed of wind. Defaults to 4m/s.\n\n Returns:\n float: SRI of the construction. Unitless.\n \"\"\"\n\n return calculate_sri(\n solar_reflectance=construction.outside_solar_reflectance,\n thermal_emittance=construction.outside_emissivity,\n insolation=insolation,\n air_temperature=air_temperature,\n sky_temperature=sky_temperature,\n wind_speed=wind_speed,\n )\n","repo_name":"BHoM/LadybugTools_Toolkit","sub_path":"LadybugTools_Engine/Python/src/ladybugtools_toolkit/honeybee_extension/sri.py","file_name":"sri.py","file_ext":"py","file_size_in_byte":7172,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"1998330467","text":"from sqlalchemy import Column, Integer, String, Date\n\nfrom database import Base\n\n\nclass Customer(Base):\n __tablename__ = \"customers\"\n\n id = Column(Integer, primary_key=True, index=True)\n name = Column(String, unique=True)\n representative = Column(String)\n contract_start = Column(Date)\n Country = Column(String)\n","repo_name":"lazarustanaka11/FastAPI-V1","sub_path":"src/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8291415997","text":"import torch.nn.functional as F\nimport torch\n\n\"\"\"\n Sampling for decoding generation\n currently only works on batch size of 1, so single tokens\n\n Args:\n - Temperature: how strongly we sample from the distribution (at high temperatures,\n everything is uniform, at low temperatures below 1,\n small differences are magnified)\n\n - Top-k: Sample from only top-k tokens in probability distribution\n\n - Top-p : Sample from a selection of the highest probability tokens whose\n cumulative mass is larger than p (nucleus filtering)\n\"\"\"\n\nclass Sampling:\n @classmethod\n def _get_sample(cls, logits):\n \"\"\"\n\n \"\"\"\n \n probabilities = F.softmax(logits, dim=-1)\n return torch.multinomial(probabilities, 1)\n\n\n @classmethod\n def _filter_top_k(cls, logits, top_k):\n \"\"\"\n Set logits below the top-k logits to -inf so softmax sets their probability to 0\n \"\"\"\n\n # safety check\n top_k = min(top_k, logits.size(-1))\n\n if top_k > 0:\n # Get the indices of the logits below the top-k logits\n indices_below_top_k = logits < torch.topk(logits, top_k)[0][..., -1, None]\n\n # Set the logits below the top-k logits to -infinity so softmax gives them probability 0\n logits[indices_below_top_k] = float('-inf')\n\n return logits\n\n \n @classmethod\n def _filter_top_p(cls, logits, top_p):\n \"\"\"\n Set logits above the top-p proability mass to -inf so softmax sets their probability to 0\n \"\"\"\n\n if top_p > 0.0:\n # Sort the logits\n sorted_logits, sorted_indices = torch.sort(logits, descending=True)\n\n # Run through softmax to get probabilities and then get the cumulative probabilities\n cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)\n \n # Remove tokens with cumulative probability above the threshold\n sorted_indices_to_remove = cumulative_probs > top_p\n \n # Shift the indices to the right to keep also the first token above the threshold\n sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()\n sorted_indices_to_remove[..., 0] = 0\n\n # scatter sorted tensors to original indexing and get the indices of the logits below the top-p logits\n indices_above_top_p = sorted_indices_to_remove.scatter(dim=1, index=sorted_indices, src=sorted_indices_to_remove)\n\n # Set the logits below the top-p logits to -infinity so softmax gives them probability 0\n logits[indices_above_top_p] = float('-inf')\n \n return logits\n\n\n @classmethod\n def sample(cls, logits, temperature=1.0, top_k=0, top_p=0.0):\n \"\"\"\n Get sample from logits with temperature control and nucleus filtering (top-k and top-p)\n \"\"\"\n\n if logits.shape[0] > 0:\n logits /= temperature if temperature > 0 else 1\n \n logits = cls._filter_top_k(logits, top_k)\n logits = cls._filter_top_p(logits, top_p)\n\n if temperature == 0: # greedy sampling:\n return torch.argmax(logits, dim=-1)\n else:\n return cls._get_sample(logits)\n else:\n # Return empty long tensor if logits are empty\n return torch.empty(0, dtype=torch.long, device=logits.device)\n\n \n\n \n\n\n","repo_name":"sander102907/autoencoder_program_synthesis","sub_path":"autoencoder_program_synthesis/utils/Sampling.py","file_name":"Sampling.py","file_ext":"py","file_size_in_byte":3585,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"5638357940","text":"\"\"\"\nadd users.created\n\nRevision ID: 58eff9292ee\nRevises: 13d01b369a0\nCreate Date: 2014-07-31 12:49:32.090524\n\"\"\"\n\nrevision = '58eff9292ee'\ndown_revision = '13d01b369a0'\n\nfrom alembic import op\nimport sqlalchemy as sa\nimport datetime\nfrom sqlalchemy.sql.expression import table, column\n\n\nusers = table('users',\n column('id', sa.Integer),\n column('created', sa.DateTime)\n)\n\ndef upgrade():\n op.add_column('users', sa.Column('created', sa.DateTime, index=True))\n op.create_index('ix_users_created', 'users', ['created'])\n\n op.execute(users.update().values({'created': datetime.datetime.now()}))\n\n\ndef downgrade():\n op.drop_column('users', 'created')\n","repo_name":"opmuse/opmuse","sub_path":"database/versions/58eff9292ee_add_users_created.py","file_name":"58eff9292ee_add_users_created.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"81"} +{"seq_id":"40564825491","text":"#!/usr/bin/env python3\nimport math\nimport numpy as np\nfrom geometry_msgs.msg import Pose, Quaternion\nfrom tf.transformations import quaternion_from_euler\nfrom tf.transformations import translation_matrix, quaternion_matrix\n\nclass Marker():\n \"\"\"\n Class for defining an aruco marker\n to be used in ArucoLocalization()\n \"\"\"\n def __init__(self, id_, len_, x, y, z, rot_x, rot_y, rot_z):\n \"\"\"\n :param id: marker id #\n :type id: int\n :param len_: marker length (incl. black boundary) in [m]\n :type id: float\n :param x, y, z, rot_x, rot_y, rot_z: 6DOF pose of the marker in [m] or [rad]\n :type x, y, z, rot_x, rot_y, rot_z: floats\n \"\"\"\n self.id = id_\n self.len = len_\n self.pose = Pose()\n self.pose.position.x = x\n self.pose.position.y = y\n self.pose.position.z = z\n rot_x += math.pi/2\n rot_y += 0\n rot_z += math.pi/2\n\n self.pose.orientation = Quaternion(*quaternion_from_euler(rot_x, rot_y, rot_z))\n tmat = translation_matrix((x, y, z))\n qmat = quaternion_matrix(quaternion_from_euler(rot_x, rot_y, rot_z))\n self.tf_mat = np.dot(tmat, qmat)\n","repo_name":"i2f-omega/qualcomm_cv_ros","sub_path":"src/Marker.py","file_name":"Marker.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73909988106","text":"import tensorrt as trt\nimport numpy as np\nfrom pycuda import driver as cuda, autoinit # Need to import autoinit to allocate cuda memory\nimport os\nfrom preprocessing.tokenizer import Tokenizer\nimport time\nimport re\n\nINPUT_NAME = 'input'\nEND_TOKEN = \"end\"\nTRT_LOGGER = trt.Logger(trt.Logger.INFO)\nINPUT_PRECISION = np.int32\nOUTPUT_PRECISION = np.float32\n\ndef load_engine(engine_path: str):\n if os.path.exists(engine_path) == False:\n print(\"Not Found Engine Path\")\n return None\n\n \n with open(engine_path, 'rb') as file:\n engine_data = file.read()\n \n # Create Runtime and Engine\n runtime = trt.Runtime(TRT_LOGGER)\n engine = runtime.deserialize_cuda_engine(engine_data)\n\n print(\"OK\")\n \n return engine\n\ndef allocate_buffer(max_ctx: int, token_size: int):\n input_buffer = cuda.mem_alloc(max_ctx * np.dtype(INPUT_PRECISION).itemsize)\n output_buffer = cuda.mem_alloc(max_ctx * token_size * np.dtype(OUTPUT_PRECISION).itemsize)\n bindings = [int(input_buffer), int(output_buffer)]\n\n return bindings\n\ndef generate_text(context, bindings, digits: np.ndarray, max_ctx: int, end_token, token_size):\n digits = digits.astype(INPUT_PRECISION)\n for _ in range(max_ctx):\n # Copy Data from CPU to GPU Buffer\n cuda.memcpy_htod(bindings[0], digits)\n # Setup Input Shape\n context.set_input_shape(name=INPUT_NAME, shape=(1, digits.shape[-1]))\n # Inference Stage\n context.execute_v2(bindings=bindings)\n # Get Data from GPU Buffer to CPU\n output_data = np.empty(shape=(1, digits.shape[-1], token_size), dtype=OUTPUT_PRECISION)\n cuda.memcpy_dtoh(output_data, bindings[1])\n # Handle Predicted Token\n pred_token = np.argmax(output_data[:, -1, :], axis=-1).astype(INPUT_PRECISION)\n\n if pred_token[0] == end_token:\n break\n\n digits = np.concatenate((digits, np.expand_dims(pred_token, axis=0)), axis=-1)\n return digits\n\n\ndef cmd_chat(engine_path: str, tokenizer_path: str, max_ctx: int):\n # Load Tokenizer\n if os.path.exists(tokenizer_path) == False:\n return None\n tokenizer = Tokenizer(tokenizer_path)\n end_token = tokenizer.get_special_token(END_TOKEN)\n \n # Load TensorRT Engine\n load_start_time = time.time()\n engine = load_engine(engine_path)\n\n if engine is None:\n return None\n\n token_size = len(tokenizer.dictionary)\n \n # Allocate Buffer Memory in GPU\n bindings = allocate_buffer(max_ctx, token_size)\n \n # Create Engine Context\n context = engine.create_execution_context()\n load_end_time = time.time()\n\n print(f\"Loading Time: {load_end_time - load_start_time}\")\n\n while True:\n # Get Input Message\n message = input(\"Input Message: \")\n # Pre-process Data\n digits = tokenizer.text_to_sequences([message], start_token=True, sep_token=True)\n\n message_length = digits.shape[-1]\n\n try:\n infer_start_time = time.time()\n # Inference\n digits = generate_text(context, bindings, digits, max_ctx, end_token, token_size)\n infer_end_time = time.time()\n\n # Post-Process\n words = tokenizer.decode(digits[0][message_length:])\n print(words)\n response = \"OK\"\n except Exception as e:\n print(str(e))\n response = \"BUG\"\n infer_end_time = 0\n\n \n print(f\"Response:\\n{response}\")\n print(f\"Inference Time: {infer_end_time - infer_start_time}\")\n\n exit = input('Do you want to exit? (y/n): ').lower().strip() == 'y'\n\n if exit:\n break\n\n\nif __name__ == \"__main__\":\n from argparse import ArgumentParser\n\n parser = ArgumentParser()\n\n parser.add_argument(\"--engine_path\", type=str)\n parser.add_argument(\"--tokenizer_path\", type=str)\n parser.add_argument(\"--max_ctx\", type=int, default=250)\n\n args = parser.parse_args()\n\n cmd_chat(\n engine_path=args.engine_path,\n tokenizer_path=args.tokenizer_path,\n max_ctx=args.max_ctx\n )\n\n","repo_name":"Alan-404/GPT","sub_path":"engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":4067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20342061851","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n# Implementing Bag of Words from scratch\n\n\n# In[2]:\n\n\nimport string\nimport pprint\nimport pandas as pd\nfrom collections import Counter\nimport nltk\nnltk.download('punkt')\n\n\n# In[3]:\n\n\ndocuments = ['Hello, how are you!',\n 'Win money, win from home.',\n 'Call me now.',\n 'Hello, Call hello you tomorrow?']\n\n\n# In[4]:\n\n\n# convert into lower case.\nlower_case_documents = []\nfor i in documents:\n lower_case_documents.append(i.lower())\nprint(lower_case_documents)\n\n\n# In[5]:\n\n\n# remove the punctuation.\nwithout_punctuation_documents = []\nfor i in lower_case_documents:\n without_punctuation_documents.append(''.join(c for c in i if c not in string.punctuation))\n \nprint(without_punctuation_documents)\n\n\n# In[6]:\n\n\npreprocessed_documents = []\nfor sentence in without_punctuation_documents:\n preprocessed_documents.append(nltk.word_tokenize(sentence))\nprint(preprocessed_documents)\n\n\n# In[7]:\n\n\nfrequency_list = []\nfor i in preprocessed_documents:\n frequency_list.append(Counter(i))\n \npprint.pprint(frequency_list)\n\n\n# In[8]:\n\n\n# Implementing Bag of Words in scikit-learn\n\n\n# In[9]:\n\n\ndocuments = ['Hello, how are you!',\n 'Win money, win from home.',\n 'Call me now.',\n 'Hello, Call hello you tomorrow?']\n\n\n# In[10]:\n\n\nimport pandas as pd\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\n \nCountVec = CountVectorizer()\n#transform\nCount_data = CountVec.fit_transform(documents)\n\n\n# In[11]:\n\n\n#create dataframe\nfrequency_matrix = pd.DataFrame(Count_data.toarray(),index=documents,\n columns=CountVec.get_feature_names())\nfrequency_matrix\n\n","repo_name":"addi-kamal/Implementing-Bag-of-Words","sub_path":"Implementing Bag-of-Words.py","file_name":"Implementing Bag-of-Words.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"9400981817","text":"import time\nimport collections\n\n\nclass StockMetrics:\n def __init__(self):\n record_5m = collections.defaultdict(collections.deque) # {stock_id : [(timestamp, stock_price)]}\n record_1h = collections.defaultdict(collections.deque)\n record_24h = collections.defaultdict(collections.deque)\n sum_5m = collections.Counter()\n sum_1h = collections.Counter()\n sum_24h = collections.Counter()\n self.interval_record = {300: [record_5m, sum_5m], 3600: [record_1h, sum_1h], 86400: [record_24h, sum_24h]}\n\n def _remove(self, stock_id, currentTime): # amortized O(1)\n for interval, records in self.interval_record.items():\n record, sum = records\n while record[stock_id] and currentTime - record[stock_id][0][0] > interval:\n sum[stock_id] -= record[stock_id].popleft()[1]\n\n def add(self, stock_id, price):\n currentTime = time.time()\n self._remove(stock_id, currentTime)\n for interval, records in self.interval_record.items():\n record, sum = records\n record[stock_id].append((currentTime, price))\n sum[stock_id] += price\n\n def get_avg(self, stock_id, interval):\n if interval not in self.interval_record:\n raise Exception('Invalid interval')\n currentTime = time.time()\n self._remove(stock_id, currentTime)\n numberOfPrices = len(self.interval_record[interval][0][stock_id])\n if numberOfPrices == 0:\n raise Exception('No price record for the stock')\n return self.interval_record[interval][1][stock_id] / numberOfPrices\n\n\ns = StockMetrics()\ns.add(1, 100)\ntime.sleep(1)\ns.add(1, 200)\ns.add(1, 300)\ns.add(2, 100)\ns.add(3, 100)\nprint(s.get_avg(1, 1))\ntime.sleep(1)\nprint(s.get_avg(1, 1))\nprint(s.get_avg(2, 1))\nprint(s.get_avg(3, 1))\n","repo_name":"Jason003/interview","sub_path":"linkedin/Design/Get avg for a period stock.py","file_name":"Get avg for a period stock.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"42131739731","text":"\ndef jump(nums):\n l, r = 0\n count = 0\n\n while r < len(nums)-1:\n farthest = 0\n for i in range(l, r+1):\n farthest = max(farthest, i + nums[i])\n\n l = r + 1\n r = farthest\n count += 1\n\n\nnums = [2, 3, 1, 1, 4]\nans = jump(nums)\nprint(ans)\n","repo_name":"mdiallo98/python-dataStructures-Algos","sub_path":"LeetcodeQuestions/Greedy/jump_gameII.py","file_name":"jump_gameII.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"17170323380","text":"# 05.29 11:20 ~ 13:00\n# 공부하면서 플었다.\n# 10억 이상의 수로 나눈 나머지 -> DP 문제이다.\n# [[2,2]] 의 위치를 보았을 때 집의 위치 [[1,1]] 이다.\n# 좌표가 일반적인경우와 반대로 되어있다.\n\ndef solution(m, n, puddles):\n # 좌표 (m,n)이 일반적인 (n,m)에 맞게 설정한다. (1,1) 부터 시작하므로 1씩 더해서 설정해준다.\n dp = [[0] * (m+1) for i in range(n+1)]\n # puddles 좌표를 바꿔야 된다.\n puddles = [[p, q] for [q, p] in puddles]\n # 집의 위치 표시한다.\n dp[1][1] = 1\n \n # n행 부터 읽어간다.\n for i in range(n+1):\n # m열을 읽는다.\n for j in range(m+1):\n if i==1 and j==1: continue\n # puddle 포함되면 0 값을 취한다.\n if [i, j] in puddles:\n dp[i][j] = 0\n # 왼쪽, 위의 합으로 표현된다.\n else:\n dp[i][j] = (dp[i-1][j] + dp[i][j-1]) % 1000000007\n return dp[n][m]","repo_name":"wearethevisionaries/problem-solving","sub_path":"week12/등굣길_송인성.py","file_name":"등굣길_송인성.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70375822985","text":"from datetime import datetime\nimport logging\nfrom aiogram import Bot, Dispatcher, types\nfrom aiogram.types import ParseMode\nfrom aiogram.utils import executor\nfrom aiogram.contrib.middlewares.logging import LoggingMiddleware\nfrom diffusers import ControlNetModel, StableDiffusionXLControlNetPipeline, AutoencoderKL\nfrom diffusers.utils import load_image\nfrom PIL import Image\nimport torch\nimport numpy as np\nimport cv2\nimport os\n\n# Configure logging\nlogging.basicConfig(level=logging.INFO)\n\n# Telegram bot token (replace with your actual token)\nTOKEN = ''\n\n# Initialize the bot and dispatcher\nbot = Bot(token=TOKEN)\ndp = Dispatcher(bot)\ndp.middleware.setup(LoggingMiddleware())\n\n# Initialize the stable diffusion pipeline and models\ncontrolnet_conditioning_scale = 0.5\ncontrolnet = ControlNetModel.from_pretrained(\n \"diffusers/controlnet-canny-sdxl-1.0\",\n torch_dtype=torch.float16\n)\nvae = AutoencoderKL.from_pretrained(\"madebyollin/sdxl-vae-fp16-fix\", torch_dtype=torch.float16)\npipe = StableDiffusionXLControlNetPipeline.from_pretrained(\n \"stabilityai/stable-diffusion-xl-base-1.0\",\n controlnet=controlnet,\n vae=vae,\n torch_dtype=torch.float16,\n)\npipe.enable_model_cpu_offload()\n\n# Define the start command handler\n@dp.message_handler(commands=['start'])\nasync def cmd_start(message: types.Message):\n await message.reply(\"Welcome to the BugDiffusion Telegram bot. Send an image with caption, get a cup of coffee and enjoy!\")\n\n# Define the text processing handler\n@dp.message_handler(lambda message: message.text and not message.text.startswith('/'))\nasync def process_text(message: types.Message):\n try:\n # Extract the user's message\n message_text = message.text\n user_id = message.from_user.id\n\n # Send instructions to the user\n# await message.reply_text(\"Please send an image to process along with your prompt.\")\n\n except Exception as e:\n logging.error(str(e))\n await message.reply_text('An error occurred. Please try again.')\n\n# Define the image processing handler\n@dp.message_handler(content_types=types.ContentTypes.PHOTO)\nasync def process_image(message: types.Message):\n try:\n # Extract the user's prompt\n message_text = message.caption if message.caption else \"\"\n\n # Download and preprocess the user's image\n user_id = message.from_user.id\n image_path = f\"input_{user_id}.jpg\"\n await message.photo[-1].download(image_path)\n \n user_image = load_image(image_path)\n user_image = np.array(user_image)\n user_image = cv2.Canny(user_image, 100, 200)\n user_image = user_image[:, :, None]\n user_image = np.concatenate([user_image, user_image, user_image], axis=2)\n user_image = Image.fromarray(user_image)\n\n # Generate images using stable diffusion\n images = pipe(\n message_text, negative_prompt=None, image=user_image,\n controlnet_conditioning_scale=controlnet_conditioning_scale,\n ).images\n\n # Save the generated image\n output_path = f\"output_{user_id}.png\"\n images[0].save(output_path)\n\n # Send the generated image back to the user\n with open(output_path, \"rb\") as img_file:\n await message.reply_photo(photo=img_file)\n\n # Clean up generated files\n prefix = datetime.now().strftime(\"%Y-%m-%d %H-%M-%S\")\n os.rename(image_path, prefix+image_path)\n os.rename(output_path, prefix+image_path)\n\n except Exception as e:\n# logging.error(str(e))\n await message.reply_text('Please send an image with a caption.')\n\nif __name__ == '__main__':\n from aiogram import executor\n executor.start_polling(dp, skip_updates=True)\n","repo_name":"bunny-it/bugdiffusion","sub_path":"bugdiffusion.py","file_name":"bugdiffusion.py","file_ext":"py","file_size_in_byte":3721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72071825225","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def deleteDuplicates(self, head: ListNode) -> ListNode:\n if not head: return head\n pre = head.val\n p = head\n while p.next:\n if p.next.val == pre:\n p.next = p.next.next\n else:\n pre = p.next.val\n p = p.next\n\n return head\n","repo_name":"CastleWhite/LeetCodeProblems","sub_path":"83.py","file_name":"83.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"25226571920","text":"message = 'xfmdpnf_up_ipnf'\nLETTERS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n\nmessage = message.upper()\n\nfor key in range(len(LETTERS)):\n translate = ''\n for symbol in message:\n if symbol in LETTERS:\n num = LETTERS.find(symbol)\n num = num - key\n\n if num < 0:\n num = num + len(LETTERS)\n\n translate = translate + LETTERS[num]\n\n else:\n translate = translate + symbol\n\n print('key #{0}: {1}'.format(key, translate))\n","repo_name":"chiewm/cryptography","sub_path":"Caesar_cipher/Caesar_cipher.py","file_name":"Caesar_cipher.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24726197127","text":"# coding=utf-8\n\nfrom . import CompressionSimulator, ColoredParticle, Directions\n\n\nclass NewSeparationSimulator(CompressionSimulator):\n # bias_lambda is the overall compression bias as in the compressionsim\n # bias_alpha is the bias for homogeneous grouping: > 1 wants homogeneity\n def __init__(self, grid, bias_lambda, bias_alpha, allow_swap=True):\n CompressionSimulator.__init__(self, grid, bias_lambda)\n self.bias_alpha = bias_alpha\n self.allow_swap = allow_swap\n\n @staticmethod\n def validate_grid(grid, particle_class=ColoredParticle):\n if len(list(grid.get_all_particles(particle_class))) != len(list(grid.get_all_particles())):\n raise ValueError(\"The configuration contains particles unsupported by NewSeparationSimulator\")\n\n return CompressionSimulator.validate_grid(grid)\n\n def get_bias(self, particle):\n raise ValueError(\"No single bias in the NewSeparationSimulator\")\n\n def get_move_probability(self, particle, current_location, new_location):\n current_neighbors = set(self.grid.get_neighbors(current_location))\n new_neighbors = set(self.grid.get_neighbors(new_location)) - {particle}\n increase_neighbors = len(new_neighbors) - len(current_neighbors)\n\n current_homogeneous = sum(type(n) is type(particle) for n in current_neighbors)\n new_homogeneous = sum(type(n) is type(particle) for n in new_neighbors)\n increase_homogeneous = new_homogeneous - current_homogeneous\n\n return (self.bias ** increase_neighbors) * (self.bias_alpha ** increase_homogeneous)\n\n def move(self, random_particle, random_direction, probability, classes_to_move=None):\n # Check if new location is empty\n current_location = random_particle.axial_coordinates\n new_location = self.grid.get_position_in_direction(current_location, random_direction)\n\n if not self.grid.is_position_in_bounds(new_location):\n # New location out of board bounds\n # print(\"Bounds\")\n return False\n\n if not self.valid_move(random_particle, current_location, new_location,\n random_direction): # TODO: Check classes to move?\n # print(\"Invalid\")\n return False\n\n prob_move = 1\n\n swap_particle = self.grid.get_particle(new_location)\n\n if swap_particle is not None:\n if not self.allow_swap:\n return False\n\n sp_type = type(swap_particle)\n\n if sp_type == type(random_particle):\n # We can only swap particles of different colors\n return False\n\n prob_move *= self.get_move_probability(swap_particle, new_location, current_location)\n\n prob_move *= self.get_move_probability(random_particle, current_location, new_location)\n # print(\"Prob: \" + str(prob_move))\n self.probability_series.append(prob_move)\n\n if not probability < prob_move: # Choose with probability\n # print probability\n return False\n\n # Empty the swap location if we're swapping\n if swap_particle is not None:\n self.grid.remove_particle(swap_particle)\n\n # Move the random particle\n self.grid.move_particle(current_location, new_location)\n\n # Move and reinsert the swap particle\n if swap_particle is not None:\n swap_particle.move(current_location)\n self.grid.add_particle(swap_particle)\n\n # Movement counting\n self.movements += 1\n\n # Round checking\n self.visited[random_particle] = True\n for particle in self.grid.get_all_particles(classes_to_move):\n if not self.visited.get(particle, False):\n return True\n\n # If this point is reached, a round has completed\n self.rounds += 1\n self.visited = {}\n\n return True\n\n #def valid_move(self, particle, old_position, new_position, direction):\n # ptype = type(particle)\n\n # n = self.grid.neighbor_count(old_position, ptype) < 5\n\n # p1 = True #self.property1(old_position, new_position, direction, ptype)\n # p2 = True #self.new_property2(old_position, new_position, direction, ptype)\n\n # p3 = True self.property1(old_position, new_position, direction)\n # p4 = True self.property2(old_position, new_position, direction)\n\n # return n and (p1 or p2) and (p3 or p4)\n\n # def new_property2(self, old_position, new_position, direction, classes_to_consider=None):\n # # This is the same property 2 as before, but the non-empty neighborhood\n # # requirement is removed.\n # s1 = self.grid.get_neighbor_in_direction(old_position, Directions.shift_counterclockwise_by(direction, 5),\n # classes_to_consider)\n # s2 = self.grid.get_neighbor_in_direction(old_position, Directions.shift_counterclockwise_by(direction, 1),\n # classes_to_consider)\n #\n # if s1 is None and s2 is None:\n # if (self.grid.get_neighbor_in_direction(old_position, Directions.shift_counterclockwise_by(direction, 2),\n # classes_to_consider) is not None\n # ) and (\n # self.grid.get_neighbor_in_direction(old_position, Directions.shift_counterclockwise_by(direction, 3),\n # classes_to_consider) is None\n # ) and (\n # self.grid.get_neighbor_in_direction(old_position, Directions.shift_counterclockwise_by(direction, 4),\n # classes_to_consider) is not None\n # ):\n # return False\n #\n # if (self.grid.get_neighbor_in_direction(new_position, Directions.shift_counterclockwise_by(direction, 1),\n # classes_to_consider) is not None\n # ) and (\n # self.grid.get_neighbor_in_direction(new_position, Directions.shift_counterclockwise_by(direction, 0),\n # classes_to_consider) is None\n # ) and (\n # self.grid.get_neighbor_in_direction(new_position, Directions.shift_counterclockwise_by(direction, 5),\n # classes_to_consider) is not None\n # ):\n # return False\n #\n # return True\n # else:\n # return False\n\n def get_metrics(self, classes_to_move=None):\n neighborhoods = self.grid.count_neighborhoods()\n heterogeneous_neighborhoods = self.grid.count_heterogeneous_neighborhoods()\n homogeneous_neighborhoods = neighborhoods - heterogeneous_neighborhoods\n\n metrics = [(\"Lambda bias\", \"%.2f\", self.bias),\n (\"Alpha bias\", \"%.2f\", self.bias_alpha),\n (\"Iterations\", \"%d\", self.iterations_run),\n (\"Movements made\", \"%d\", self.movements),\n (\"Rounds completed:\", \"%d\", self.rounds),\n (\"Center of mass\", \"x = %.2f, y = %.2f\", tuple(self.grid.find_center_of_mass(ColoredParticle))),\n (\"Total neighborhoods\", \"%d\", neighborhoods),\n (\"Homogeneous neighborhoods\", \"%d\", homogeneous_neighborhoods),\n (\"Heterogeneous neighborhoods\", \"%d\", heterogeneous_neighborhoods)]\n\n return metrics\n","repo_name":"cgokmen/amoebot","sub_path":"compsim/simulate/newseparationsimulator.py","file_name":"newseparationsimulator.py","file_ext":"py","file_size_in_byte":7579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22212324792","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api, exceptions, _\n\ndef string_padding_4_right(number):\n return str(number).rjust(4, '0') if type(number) == str else \"0000\"\n\ndef string_padding_3_right(number):\n return str(number).rjust(3, '0') if type(number) == str else \"000\"\n\ndef string_padding_2_right(number):\n return str(number).rjust(2, '0') if type(number) == str else \"00\"\n\n# We add these fields because we can use odoo's Company, Country and State models\nclass Company(models.Model):\n _inherit = \"res.company\"\n \n analytic_code = fields.Char(string=\"Analytic Code\")\n \n _sql_constraints = [(\n 'analytic_code_unique', 'unique(analytic_code)', 'Analytic Code must be unique!'\n )]\n\nclass AnalyticAccounts(models.Model):\n _inherit = \"account.analytic.account\"\n\n country_id = fields.Many2one(string=\"Country\", related=\"company_id.country_id\")\n state_id = fields.Many2one(string=\"State\", related=\"company_id.state_id\")\n \n department = fields.Many2one(\"analytic_accounts.group.department\", string=\"Department\")\n sub_department = fields.Many2one(\"analytic_accounts.group.sub_department\", string=\"Sub Department\")\n type = fields.Many2one(\"analytic_accounts.group.type\", string=\"Type\")\n group = fields.Many2one(\"analytic_accounts.group\", string=\"Group\")\n account = fields.Many2one(\"analytic_accounts.group.account\", string=\"Account\")\n sub_account = fields.Many2one(\"analytic_accounts.group.sub_account\", string=\"Sub Account\")\n item = fields.Many2one(\"analytic_accounts.group.item\", string=\"Item\")\n\n country_rel_id = fields.Integer(string='Country Id', related='country_id.id')\n state_rel_id = fields.Integer(string='State Id', related='state_id.id')\n company_rel_id = fields.Integer(string='Company Id', related='company_id.id')\n department_rel_id = fields.Integer(string='Department Id', related='department.id')\n sub_department_rel_id = fields.Integer(string='Sub Department Id', related='sub_department.id')\n type_rel_id = fields.Integer(string='Type Id', related='type.id')\n group_rel_id = fields.Integer(string='Group Id', related='group.id')\n account_rel_id = fields.Integer(string='Account Id', related='account.id')\n sub_account_rel_id = fields.Integer(string='Sub Account Id', related='sub_account.id')\n item_rel_id = fields.Integer(string='Item Id', related='item.id')\n \n country_rel_code = fields.Char(string='Country Code', related='country_id.code')\n state_rel_code = fields.Char(string='State Code', related='state_id.code')\n company_rel_code = fields.Char(string='Company Code', related='company_id.analytic_code')\n department_rel_code = fields.Char(string='Department Code', related='department.code')\n sub_department_rel_code = fields.Char(string='Sub Department Code', related='sub_department.code')\n type_rel_code = fields.Char(string='Type Code', related='type.code')\n group_rel_code = fields.Char(string='Group Code', related='group.code')\n account_rel_code = fields.Char(string='Account Code', related='account.code')\n sub_account_rel_code = fields.Char(string='Sub Account Code', related='sub_account.code')\n item_rel_code = fields.Char(string='Item Code', related='item.code')\n \n @api.multi\n def test_multi(self):\n print(self)\n\n # Domains\n @api.onchange(\"department\")\n def _onchange_department(self):\n if not self.department:\n self.sub_department = False\n\n @api.onchange(\"type\")\n def _onchange_type(self):\n res = {}\n if self.type:\n res['domain'] = {'group': [('type_id', '=', self.type.id)]}\n self.group = self.type.group_none\n return res\n \n @api.onchange(\"group\")\n def _onchange_group(self):\n res = {}\n if self.group:\n res['domain'] = {'account': [('group_id', '=', self.group.id)]}\n self.account = self.group.account_none\n return res\n\n @api.onchange(\"account\")\n def _onchange_account(self):\n res = {}\n if self.account:\n res['domain'] = {'sub_account': [('account_id', '=', self.account.id)]}\n self.sub_account = self.account.sub_account_none\n return res\n\n @api.onchange(\"sub_account\")\n def _onchange_sub_account(self):\n res = {}\n if self.sub_account:\n res['domain'] = {'item': [('sub_account_id', '=', self.sub_account.id)]}\n self.item = self.sub_account.item_none\n return res\n\n def make_or_change(self, values):\n company_code = string_padding_4_right(self.company_id.analytic_code)\n if \"company_id\" in values:\n if values[\"company_id\"]:\n company = self.env['res.company'].browse(values[\"company_id\"])\n company_code = string_padding_4_right(company.analytic_code)\n if not company_code:\n company_code = \"0000\"\n \n country_code = string_padding_2_right(self.country_id.code)\n if \"country_id\" in values:\n if values[\"country_id\"]:\n country = self.env['res.country'].browse(values[\"country_id\"])\n country_code = country.code\n if not country_code:\n country_code = \"00\"\n\n state_code = string_padding_3_right(self.state_id.code)\n if \"state_id\" in values:\n if values[\"state_id\"]:\n state = self.env['res.country.state'].browse(values[\"state_id\"])\n state_code = state.code\n if not state_code:\n state_code = \"000\"\n\n department_code = string_padding_4_right(self.department.code)\n if \"department\" in values:\n if values[\"department\"]:\n department = self.env['analytic_accounts.group.department'].browse(values[\"department\"])\n department_code = string_padding_4_right(department.code)\n else:\n values[\"sub_department\"] = False\n if not department_code:\n department_code = \"0000\"\n\n sub_department_code = string_padding_4_right(self.sub_department.code)\n if \"sub_department\" in values:\n if values[\"sub_department\"]:\n sub_department = self.env['analytic_accounts.group.sub_department'].browse(values[\"sub_department\"])\n sub_department_code = string_padding_4_right(sub_department.code)\n if not sub_department_code:\n sub_department_code = \"0000\"\n\n type_code = string_padding_4_right(self.type.code)\n if \"type\" in values:\n if values[\"type\"]:\n type_record = self.env['analytic_accounts.group.type'].browse(values[\"type\"])\n type_code = string_padding_4_right(type_record.code)\n if not type_code:\n type_code = \"0000\"\n\n group_code = string_padding_4_right(self.group.code)\n if \"group\" in values:\n if values[\"group\"]:\n group = self.env['analytic_accounts.group'].browse(values[\"group\"])\n group_code = string_padding_4_right(group.code)\n if not group_code:\n group_code = \"0000\"\n\n account_code = string_padding_4_right(self.account.code)\n if \"account\" in values:\n if values[\"account\"]:\n account = self.env['analytic_accounts.group.account'].browse(values[\"account\"])\n account_code = string_padding_4_right(account.code)\n if not account_code:\n account_code = \"0000\"\n\n sub_account_code = string_padding_4_right(self.sub_account.code)\n if \"sub_account\" in values:\n if values[\"sub_account\"]:\n sub_account = self.env['analytic_accounts.group.sub_account'].browse(values[\"sub_account\"])\n sub_account_code = string_padding_4_right(sub_account.code)\n if not sub_account_code:\n sub_account_code = \"0000\"\n\n item_code = string_padding_4_right(self.item.code)\n \n if \"item\" in values:\n if values[\"item\"]:\n item = self.env['analytic_accounts.group.item'].browse(values[\"item\"])\n item_code = string_padding_4_right(item.code)\n if not item_code:\n item_code = \"0000\"\n\n code = \"{}{}{}{}{}{}{}{}{}{}\".format(country_code,\n state_code,\n company_code,\n department_code,\n sub_department_code,\n type_code,\n group_code,\n account_code,\n sub_account_code,\n item_code)\n\n values[\"code\"] = code\n\n def _import(self, values):\n code = values[\"code\"]\n \n REFERENCE_LENGTH = 37\n \n \n print(code)\n if len(code) == REFERENCE_LENGTH:\n country_code = code[0:2]\n region_code = code[2:5]\n company_code = code[5:9]\n \n #===================================================================\n # Verifica si la compañia existen en Odoo\n # Tambien comprueba si el country y el region son\n # igualmente validos\n #===================================================================\n \n CompanyEnv = self.env[\"res.company\"]\n company_id = CompanyEnv.search([(\"analytic_code\", \"=\", company_code)])\n if not company_id:\n raise exceptions.ValidationError(_(\"Invalid company code\"))\n company_record = CompanyEnv.browse([company_id])\n \n if company_record.ids[0].country_id:\n country_code = string_padding_3_right(company_record.ids[0].country_id.code)\n if country_code != country_code:\n raise exceptions.ValidationError(_(\"Invalid country code\"))\n\n if company_record.ids[0].state_id:\n state_code = string_padding_3_right(company_record.ids[0].state_id.code)\n if state_code != region_code:\n raise exceptions.ValidationError(_(\"Invalid region/state code\"))\n else:\n raise exceptions.ValidationError(_(\"Country needs state specified\"))\n else:\n raise exceptions.ValidationError(_(\"Company needs country specified\"))\n \n print(\"TEST COMPANY CODE: {}\".format(company_record.ids[0].id))\n values[\"company_id\"] = company_record.ids[0].id\n \n department_code = code[9:13]\n sub_department_code = code[13:17]\n \n type_code = code[17:21]\n group_code = code[21:25]\n account_code = code[25:29]\n sub_account_code = code[29:33]\n item_code = code[33:37]\n \n #===================================================================\n # Estos son distinto porque sin independientes y ademas opcionales\n #===================================================================\n DeptEnv = self.env[\"analytic_accounts.group.department\"]\n existing_dept = DeptEnv.search([[\"code\", \"=\", department_code]])\n if existing_dept:\n values[\"department\"] = existing_dept.id\n else:\n new_dept= DeptEnv.create({\n \"name\": \"department-{}\".format(department_code),\n \"code\": department_code\n })\n values[\"department\"] = new_dept.id\n \n #===================================================================\n # Type\n #===================================================================\n SubDeptEnv = self.env[\"analytic_accounts.group.sub_department\"]\n existing_sub_department = SubDeptEnv.search([[\"code\", \"=\", sub_department_code]])\n if existing_sub_department:\n values[\"sub_department\"] = existing_sub_department.id\n else:\n new_sub_department = SubDeptEnv.create({\n \"name\": \"sub_department-{}\".format(sub_department_code),\n \"code\": sub_department_code\n })\n values[\"sub_department\"] = new_sub_department.id\n \n #===================================================================\n # Todo esto se hace para hacer un \"merge\" a los\n # datos type, group, account, sub account y item\n #===================================================================\n \n \n \n \n #===================================================================\n # Type\n #===================================================================\n TypeEnv = self.env[\"analytic_accounts.group.type\"]\n existing_type = TypeEnv.search([[\"code\", \"=\", type_code]])\n if existing_type:\n values[\"type\"] = existing_type.id\n else:\n new_type = TypeEnv.create({\n \"name\": \"type-{}\".format(type_code),\n \"code\": type_code\n })\n values[\"type\"] = new_type.id\n \n \n #===================================================================\n # Group\n #===================================================================\n GroupEnv = self.env[\"analytic_accounts.group\"]\n existing_group= GroupEnv.search([\"&\", (\"code\", \"=\", group_code), (\"type_id\", \"=\", values[\"type\"])])\n if existing_group:\n values[\"group\"] = existing_group.id\n else:\n new_group = GroupEnv.create({\n \"name\": \"group-{}\".format(group_code),\n \"type_id\": values[\"type\"],\n \"code\": group_code\n })\n values[\"group\"] = new_group.id\n \n #===================================================================\n # Account\n #===================================================================\n AccountEnv = self.env[\"analytic_accounts.group.account\"]\n existing_account= AccountEnv.search([\"&\", (\"code\", \"=\", account_code), (\"group_id\", \"=\", values[\"group\"])])\n if existing_account:\n values[\"account\"] = existing_account.id\n else:\n new_account = AccountEnv.create({\n \"name\": \"account-{}\".format(account_code),\n \"group_id\": values[\"group\"],\n \"code\": account_code\n })\n values[\"account\"] = new_account.id\n \n \n #===================================================================\n # Sub Account\n #===================================================================\n SubAccountEnv = self.env[\"analytic_accounts.group.sub_account\"]\n existing_sub_account= SubAccountEnv.search([\"&\", (\"code\", \"=\", sub_account_code), (\"account_id\", \"=\", values[\"account\"])])\n if existing_sub_account:\n values[\"sub_account\"] = existing_sub_account.id\n else:\n new_sub_account = SubAccountEnv.create({\n \"name\": \"sub_account-{}\".format(sub_account_code),\n \"account_id\": values[\"account\"],\n \"code\": sub_account_code\n })\n values[\"sub_account\"] = new_sub_account.id\n \n \n #===================================================================\n # Item\n #===================================================================\n ItemEnv = self.env[\"analytic_accounts.group.item\"]\n existing_item = ItemEnv.search([\"&\", (\"code\", \"=\", item_code), (\"sub_account_id\", \"=\", values[\"sub_account\"])])\n if existing_item:\n values[\"item\"] = existing_item.id\n else:\n new_item = ItemEnv.create({\n \"name\": \"account-{}\".format(item_code),\n \"sub_account_id\": values[\"sub_account\"],\n \"code\": item_code\n })\n values[\"item\"] = new_item.id\n else:\n raise exceptions.ValidationError(_(\"The reference must contain 37 characters\"))\n pass\n\n @api.model\n def create(self, values):\n # self.ensure_one()\n if \"group_id\" in values:\n self.make_or_change(values)\n else:\n self._import(values)\n # Create a group based on department and sub department\n \n dept_group_id = None\n \n if \"department\" in values and values[\"department\"]:\n AnalyticGroup = self.env[\"account.analytic.group\"]\n \n Deparments = self.env[\"analytic_accounts.group.department\"]\n dept_record = Deparments.browse([values[\"department\"]])\n \n ExistingDeptGroup = AnalyticGroup.search([[\"name\", \"=\", dept_record.name]])\n \n if not ExistingDeptGroup:\n print(\"DeptGroupBefore: {}\".format(ExistingDeptGroup))\n \n print(\"Departament: {}\".format(dept_record.name))\n \n \n dept_dict_obj = {\n \"name\": dept_record.name,\n \"description\": \"Code: {}\".format(dept_record.code)\n }\n \n dept_group_id = AnalyticGroup.create(dept_dict_obj).id\n \n else:\n dept_group_id = ExistingDeptGroup.id\n \n \n values[\"group_id\"] = dept_group_id\n if \"sub_department\" in values and values[\"sub_department\"]:\n \n SubDeparments = self.env[\"analytic_accounts.group.sub_department\"]\n sub_dept_record = SubDeparments.browse([values[\"sub_department\"]])\n \n ExistingSubDeptGroup = AnalyticGroup.search([[\"name\", \"=\", sub_dept_record.name]])\n \n if not ExistingSubDeptGroup:\n \n print(\"Sub departament: {}\".format(sub_dept_record.name))\n sub_dept_dict_obj = {\n \"name\": sub_dept_record.name,\n \"parent_id\": dept_group_id,\n \"description\": \"Code: {}\".format(sub_dept_record.code)\n }\n \n values[\"group_id\"] = AnalyticGroup.create(sub_dept_dict_obj).id\n else:\n values[\"group_id\"] = ExistingSubDeptGroup.id\n \n return super(AnalyticAccounts, self).create(values)\n\n def reload_dept_code(self, values):\n if \"department_rel_code\" in values:\n dept_record = None\n if \"department\" in values:\n dept_record = self.env[\"analytic_accounts.group.department\"].browse([values[\"department\"]])\n elif self.department:\n dept_record = self.department\n else:\n return\n dept_record.code = values[\"department_rel_code\"]\n\n def reload_sub_dept_code(self, values):\n if \"sub_department_rel_code\" in values:\n sub_dept_record = None\n if \"sub_department\" in values:\n sub_dept_record = self.env[\"analytic_accounts.group.sub_department\"].browse([values[\"sub_department\"]])\n elif self.sub_department:\n sub_dept_record = self.sub_department\n else:\n return\n sub_dept_record.code = values[\"sub_department_rel_code\"]\n\n\n def reload_type_code(self, values):\n if \"type_rel_code\" in values:\n type_record = None\n if \"type\" in values:\n type_record = self.env[\"analytic_accounts.group.type\"].browse([values[\"type\"]])\n elif self.type:\n type_record = self.type\n else:\n return\n type_record.code = values[\"type_rel_code\"]\n\n def reload_group_code(self, values):\n if \"group_rel_code\" in values:\n group_record = None\n if \"group\" in values:\n group_record = self.env[\"analytic_accounts.group.group\"].browse([values[\"group\"]])\n elif self.group:\n group_record = self.group\n else:\n return\n group_record.code = values[\"group_rel_code\"]\n\n def reload_account_code(self, values):\n if \"account_rel_code\" in values:\n account_record = None\n if \"account\" in values:\n account_record = self.env[\"analytic_accounts.group.account\"].browse([values[\"account\"]])\n elif self.account:\n account_record = self.account\n else:\n return\n account_record.code = values[\"account_rel_code\"]\n\n def reload_sub_account_code(self, values):\n if \"sub_account_rel_code\" in values:\n sub_account_record = None\n if \"sub_account\" in values:\n sub_account_record = self.env[\"analytic_accounts.group.sub_account\"].browse([values[\"sub_account\"]])\n elif self.sub_account:\n sub_account_record = self.sub_account\n else:\n return\n sub_account_record.code = values[\"sub_account_rel_code\"]\n\n def reload_item_code(self, values):\n if \"item_rel_code\" in values:\n item_record = None\n if \"item\" in values:\n item_record = self.env[\"analytic_accounts.group.item\"].browse([values[\"item\"]])\n elif self.item:\n item_record = self.item\n else:\n return\n item_record.code = values[\"item_rel_code\"]\n\n @api.multi\n def write(self, values):\n #=======================================================================\n # for record in self:\n #=======================================================================\n \n #=======================================================================\n # self.ensure_one()\n #=======================================================================\n \n print(self)\n \n self.reload_dept_code(values)\n self.reload_sub_dept_code(values)\n\n self.reload_type_code(values)\n self.reload_type_code(values)\n self.reload_account_code(values)\n self.reload_sub_account_code(values)\n self.reload_item_code(values)\n \n return super(AnalyticAccounts, self).write(values)\n\n\n\n\n\n# Just simply to make these groups\nclass GroupBase(models.Model):\n _name = \"analytic_accounts.group.base\"\n\n name = fields.Char(string=\"Name\", required=True)\n code = fields.Char(string=\"Code\", required=True, size=4)\n \n#===============================================================================\n# # constraint\n# @api.constrains('code')\n# @api.one\n# def _check_number(self):\n# code = self.code\n# \n# code_converted = abs(code) if isinstance(code, int) else code\n# \n# if code and len(str(code_converted)) > 4:\n# raise exceptions.ValidationError(_('Number of digits must on exceed 4'))\n#===============================================================================\n \n @api.model\n def create(self, values):\n \n if \"code\" in values:\n values[\"code\"] = values[\"code\"].rjust(4, \"0\")[-4:]\n \n return super(GroupBase, self).create(values)\n \n @api.multi\n def write(self, values):\n \n print(\"Creando un grupo generico\")\n \n if \"code\" in values:\n values[\"code\"] = string_padding_4_right(values[\"code\"])\n \n return super(GroupBase, self).write(values)\n\n\nclass GroupDepartment(GroupBase):\n _name = \"analytic_accounts.group.department\"\n\n\nclass GroupSubDepartment(GroupBase):\n _name = \"analytic_accounts.group.sub_department\"\n\n\nclass GroupType(GroupBase):\n _name = \"analytic_accounts.group.type\"\n \n # sub_department_id = fields.Many2one(\"analytic_accounts.group.sub_department\", required=True)\n group_ids = fields.One2many(\"analytic_accounts.group\", \"type_id\", ondelete=\"cascade\")\n group_none = fields.Many2one(\"analytic_accounts.group\")\n \n @api.model\n def create(self, values):\n res = super().create(values)\n \n #=======================================================================\n # print(res)\n #=======================================================================\n \n none_group = self.env[\"analytic_accounts.group\"].create({\n \"type_id\": res.id,\n \"code\": \"0000\",\n \"name\": \"None\"\n })\n \n res.group_none = none_group.id\n \n return res\n\n\nclass Group(GroupBase):\n _name = \"analytic_accounts.group\"\n\n type_id = fields.Many2one(\"analytic_accounts.group.type\", required=True, ondelete=\"cascade\")\n account_ids = fields.One2many(\"analytic_accounts.group.account\", \"group_id\", ondelete=\"cascade\")\n account_none = fields.Many2one(\"analytic_accounts.group.account\")\n \n @api.model\n def create(self, values):\n res = super().create(values)\n \n #=======================================================================\n # print(res)\n #=======================================================================\n \n none_account = self.env[\"analytic_accounts.group.account\"].create({\n \"group_id\": res.id,\n \"code\": \"0000\",\n \"name\": \"None\"\n })\n \n res.account_none = none_account.id\n \n return res\n\n\nclass GroupAccount(GroupBase):\n _name = \"analytic_accounts.group.account\"\n\n group_id = fields.Many2one(\"analytic_accounts.group\", required=True, ondelete=\"cascade\")\n sub_account_ids = fields.One2many(\"analytic_accounts.group.sub_account\", \"account_id\", ondelete=\"cascade\")\n sub_account_none = fields.Many2one(\"analytic_accounts.group.sub_account\")\n \n @api.model\n def create(self, values):\n res = super().create(values)\n \n #=======================================================================\n # print(res)\n #=======================================================================\n \n none_sub_account = self.env[\"analytic_accounts.group.sub_account\"].create({\n \"account_id\": res.id,\n \"code\": \"0000\",\n \"name\": \"None\"\n })\n \n res.sub_account_none = none_sub_account.id\n \n return res\n\n\nclass GroupSubAccount(GroupBase):\n _name = \"analytic_accounts.group.sub_account\"\n\n account_id = fields.Many2one(\"analytic_accounts.group.account\", required=True, ondelete=\"cascade\")\n item_ids = fields.One2many(\"analytic_accounts.group.item\", \"sub_account_id\", ondelete=\"cascade\")\n item_none = fields.Many2one(\"analytic_accounts.group.item\")\n \n @api.model\n def create(self, values):\n res = super().create(values)\n \n #=======================================================================\n # print(res)\n #=======================================================================\n \n none_item = self.env[\"analytic_accounts.group.item\"].create({\n \"sub_account_id\": res.id,\n \"code\": \"0000\",\n \"name\": \"None\"\n })\n \n res.item_none = none_item.id\n \n return res\n\n\nclass GroupItem(GroupBase):\n _name = \"analytic_accounts.group.item\"\n\n sub_account_id = fields.Many2one(\"analytic_accounts.group.sub_account\", ondelete=\"cascade\")\n","repo_name":"sgcalle/aisj","sub_path":"analytic_accounts/models/analytic_account_models.py","file_name":"analytic_account_models.py","file_ext":"py","file_size_in_byte":28202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36723874196","text":"# Import the required module\nimport json,requests\n\ncity_name = 'london'\nunits = 'metric'\napi_key = '#'\n\nurl = f\"https://api.openweathermap.org/data/2.5/weather?q={city_name}&units={units}&appid={api_key}\"\n\ndef main_function():\n '''This function is used to convert json data to pyhton dictionary'''\n try:\n\n r = requests.get(url)\n r.raise_for_status\n \n # load the json data\n with open('Weatherdata.json','w') as f:\n source = r.json()\n f.write(json.dumps(source,indent=2))\n\n # make python dictionary\n with open('Weatherdata.json','r') as f:\n python_data = json.load(f)\n\n # desired results\n def weather_data(): \n '''This function is responsible to print the weather info''' \n print(f'Getting the weather info of --> {city_name} 🌇')\n\n print(f\"1. {python_data['weather'][0]['description']} \")\n print(f\"2. Minimum Temp --> {python_data['main']['temp_min']} degree celcius 🌡️\")\n print(f\"3. Maximum Temp --> {python_data['main']['temp_max']} degree celcius 🌡���\")\n print(f\"4. Feels Like --> {python_data['main']['temp']} degree celcius 🌡️\")\n print(f\"5. Wind Speed --> {python_data['wind']['speed']} kilometer per hour 🌬️\")\n print(f\"6. Humidity --> {python_data['main']['humidity']} \")\n\n weather_data() \n\n\n except Exception as e:\n print(\"Something Went Wrong 💢 \",e)\n\n\n\nif __name__ == \"__main__\":\n\n main_function()\n print(\"Code Completed 🔥\")","repo_name":"officialGanesh/Sunny-Umbrella","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14675383344","text":"class Solution(object):\n def twoSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n map_of_nums = {}\n for i in range(len(nums)):\n map_of_nums[nums[i]] = i\n\n output_map = []\n for i in range(len(nums)):\n selection = nums[i]\n remainder = target - selection\n if remainder in map_of_nums:\n output_map = [i, map_of_nums[remainder]]\n return output_map\n return output_map\n\n\ninstance = Solution()\n\nresult = instance.twoSum([1,2,3,4,5], 7)\nprint (result)\n","repo_name":"theblueskies/Algos","sub_path":"two_sum.py","file_name":"two_sum.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37200692664","text":"# -*- using: utf-8 -*-\n\n\ndef power(n, m):\n if m == 0:\n return 1\n else:\n return power(n, m - 1) * n\n\n\ndef aux(n, c):\n return lambda r: c(n * r)\n\n\ndef powerCPS(n, m, c):\n if m == 0:\n return 1\n while(m != 0):\n c = aux(n, c)\n m -= 1\n return c(1)\n\n\nprint(power(5, 3))\nprint(powerCPS(5, 3, lambda x: x))\n","repo_name":"s14t284/GraduateTasks","sub_path":"AdvProg/exams/cps.py","file_name":"cps.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10491916868","text":"import numpy as np\nfrom sigmoid import sigmoid\n\n\ndef cost_function_reg(theta, X, y, lmd):\n m = len(y)\n theta_len = len(theta)\n theta = theta.reshape((theta_len, 1))\n # You need to return the following values correctly\n g = np.array(sigmoid(X.dot(theta)))\n cost = (np.sum(-y * np.log(g) - (1 - y) * np.log(1 - g)) + lmd / 2 * np.sum(np.power(theta[1:], 2))) / m\n grad = (1 / m) * X.T.dot(g - y) + lmd / m * theta[1:]\n\n # ===================== Your Code Here =====================\n # Instructions : Compute the cost of a particular choice of theta\n # You should set cost and grad correctly.\n #\n\n\n # ===========================================================\n return cost, grad.values.reshape(theta_len,)\n","repo_name":"GuoHongke/coursera-ml-py","sub_path":"machine-learning-ex2/ex2/costFunctionReg.py","file_name":"costFunctionReg.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30480601996","text":"from rllab.envs.own_envs.point_2d_env import PointEnv\nfrom sandbox.ours.dynamics import MLPDynamicsModel\n\nfrom sandbox.ours.controllers import RandomController, MPCcontroller\nfrom sandbox.ours.model_based_rl.helpers import sample, path_reward\nfrom pprint import pprint\n\nimport tensorflow as tf\nimport os\nimport numpy as np\n\n\n\ndef reward_fn_point_env(state, action, new_state):\n if new_state.ndim == 2:\n assert new_state.shape[1] == 2\n return - np.sum(new_state**2, axis=1)\n elif new_state.ndim == 1:\n assert new_state.shape[0] == 2\n return - np.sum(new_state ** 2)\n else:\n raise AssertionError(\"state must be numpy array with ndim = 1 or ndim == 2\")\n\n\n\ndef train(env,\n reward_fn,\n render=False,\n learning_rate=1e-3,\n onpol_iters=10,\n batch_size=200,\n dynamics_iters=60,\n num_paths_random=1000,\n num_paths_onpol=10,\n num_simulated_paths=10000,\n env_horizon=1000,\n mpc_horizon=15,\n\n ):\n \"\"\"\n\n Arguments:\n\n onpol_iters Number of iterations of onpolicy aggregation for the loop to run.\n\n dynamics_iters Number of iterations of training for the dynamics model\n |_ which happen per iteration of the aggregation loop.\n\n batch_size Batch size for dynamics training.\n\n num_paths_random Number of paths/trajectories/rollouts generated\n | by a random agent. We use these to train our\n |_ initial dynamics model.\n\n num_paths_onpol Number of paths to collect at each iteration of\n |_ aggregation, using the Model Predictive Control policy.\n\n num_simulated_paths How many fictitious rollouts the MPC policy\n | should generate each time it is asked for an\n |_ action.\n\n env_horizon Number of timesteps in each path.\n\n mpc_horizon The MPC policy generates actions by imagining\n | fictitious rollouts, and picking the first action\n | of the best fictitious rollout. This argument is\n | how many timesteps should be in each fictitious\n |_ rollout.\n\n n_layers/size/activations Neural network architecture arguments.\n\n \"\"\"\n\n\n # collect initial data with a random controller\n\n env.reset()\n\n random_controller = RandomController(env)\n\n random_paths = sample(env, random_controller, num_paths=num_paths_random, horizon=env_horizon)\n print(\"Collected {} paths with random policy\".format(len(random_paths)))\n\n\n # Build dynamics model and MPC controllers\n\n sess = tf.Session()\n\n dynamics_model = MLPDynamicsModel(\"dynamics_model\", env, hidden_sizes=(32, 32), hidden_nonlinearity=tf.nn.relu, batch_size=batch_size)\n\n mpc_controller = MPCcontroller(env=env,\n dynamics_model=dynamics_model,\n horizon=mpc_horizon,\n reward_fn=reward_fn,\n num_simulated_paths=num_simulated_paths)\n\n # Tensorflow session building\n\n sess.__enter__()\n tf.global_variables_initializer().run()\n\n\n # Multiple iterations of onpolicy aggregation at each iteration refitting the dynamics model to current dataset\n # and then taking on-policy samples and aggregating to the dataset.\n\n dataset = random_paths\n for itr in range(onpol_iters):\n # fit dynamics model\n\n obs = np.concatenate([path['observations'] for path in dataset], axis=0)\n obs_next = np.concatenate([path['next_observations'] for path in dataset], axis=0)\n act = np.concatenate([path['actions'] for path in dataset], axis=0)\n\n dynamics_model.fit(obs, act, obs_next, verbose=True, epochs=10)\n\n # generate on-policy data\n new_data_rl = sample(env, mpc_controller, num_paths=num_paths_onpol, horizon=env_horizon, verbose=True)\n\n # aggregate data\n dataset.extend(new_data_rl)\n\n # calculate cost and returns\n returns = np.concatenate([path['rewards'] for path in new_data_rl], axis=0)\n rewards = [path_reward(reward_fn, path) for path in new_data_rl]\n\n # LOGGING\n # Statistics for performance of MPC policy using\n # our learned dynamics model\n print('Iteration', itr)\n # In terms of cost function which your MPC controller uses to plan\n print('AverageRewards', np.mean(rewards))\n print('StdRewards', np.std(rewards))\n print('MinimumRewards', np.min(rewards))\n print('MaximumRewards', np.max(rewards))\n # In terms of true environment reward of your rolled out trajectory using the MPC controller\n print('AverageReturn', np.mean(returns))\n print('StdReturn', np.std(returns))\n print('MinimumReturn', np.min(returns))\n print('MaximumReturn', np.max(returns))\n\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--env_name', type=str, default='PointEnv')\n # Experiment meta-params\n parser.add_argument('--exp_name', type=str, default='mb_mpc')\n parser.add_argument('--seed', type=int, default=3)\n parser.add_argument('--render', action='store_true')\n # Training args\n parser.add_argument('--learning_rate', '-lr', type=float, default=1e-3)\n parser.add_argument('--onpol_iters', '-n', type=int, default=15)\n parser.add_argument('--dyn_iters', '-nd', type=int, default=60)\n parser.add_argument('--batch_size', '-b', type=int, default=512)\n # Data collection\n parser.add_argument('--random_paths', '-r', type=int, default=1000) #TODO change back to 10000\n parser.add_argument('--onpol_paths', '-d', type=int, default=10)\n parser.add_argument('--simulated_paths', '-sp', type=int, default=10) #TODO change back to 1000\n parser.add_argument('--ep_len', '-ep', type=int, default=1000)\n # Neural network architecture args\n parser.add_argument('--n_layers', '-l', type=int, default=2)\n parser.add_argument('--size', '-s', type=int, default=500)\n # MPC Controller\n parser.add_argument('--mpc_horizon', '-m', type=int, default=15)\n args = parser.parse_args()\n\n # Set seed\n np.random.seed(args.seed)\n tf.set_random_seed(args.seed)\n\n # Make env\n if args.env_name is \"PointEnv\":\n env = PointEnv()\n reward_fn = reward_fn_point_env\n\n train(env=env,\n reward_fn=reward_fn,\n render=args.render,\n learning_rate=args.learning_rate,\n onpol_iters=args.onpol_iters,\n dynamics_iters=args.dyn_iters,\n batch_size=args.batch_size,\n num_paths_random=args.random_paths,\n num_paths_onpol=args.onpol_paths,\n num_simulated_paths=args.simulated_paths,\n env_horizon=args.ep_len,\n mpc_horizon=args.mpc_horizon,\n )\n\n\nif __name__ == \"__main__\":\n main()\n\n\n\n\n","repo_name":"jonasrothfuss/model_ensemble_meta_learning","sub_path":"sandbox/ours/model_based_rl/model_based_rl_script.py","file_name":"model_based_rl_script.py","file_ext":"py","file_size_in_byte":7117,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"81"} +{"seq_id":"28975407205","text":"import unittest\n\nfrom dust_ast import *\n\nclass TestLoopExpression(unittest.TestCase):\n def test_to_string(self):\n expression = InfiniteLoopExpression(BlockExpression([]))\n loop_expression = LoopExpression(expression)\n result = loop_expression.to_string()\n\n expected = f\"\"\"LoopExpression:\n expression: {expression.to_string(2, 2)}\"\"\"\n\n self.assertEqual(result, expected)\n","repo_name":"santiagodg/dust","sub_path":"compiler/compiler/dust_ast/test/test_loop_expression.py","file_name":"test_loop_expression.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"70355105864","text":"import io\n\nimport pandas as pd\nfrom pandas import DataFrame\nimport mplfinance\nimport matplotlib\nfrom matplotlib import pyplot\n\nmatplotlib.use(\"Agg\")\n\n\ndef prep_chart_dataframe(df: DataFrame) -> DataFrame:\n \"\"\"Update column names and the index of a DataFrame\n so mplfinance can plot a candlestick chart.\n\n :param df: A Dataframe of historical price moves.\n \"\"\"\n df[\"date\"] = pd.to_datetime(df[\"date\"])\n df = df.set_index(\"date\")\n df = df.sort_index(ascending=True)\n return df\n\n\ndef simple_plot(df: DataFrame, days: int, w: int, h: int) -> bytes:\n \"\"\"Plots simple candlestick chart.\n\n :param df: A DataFrame of price moves.\n :param days: Days to display.\n :param w: The figsize width.\n :param h: The figsize height.\n :return: The chart image.\n \"\"\"\n df = prep_chart_dataframe(df)\n idx = -1 * days\n df = df[idx:]\n image_bytes = io.BytesIO()\n mplfinance.plot(df, type=\"candle\", volume=True, savefig=image_bytes, figsize=(w, h))\n pyplot.savefig(image_bytes, format=\"jpg\")\n return image_bytes.getvalue()\n","repo_name":"sfjt/analyst","sub_path":"analyst/algo/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33934879415","text":"from bs4 import BeautifulSoup\nimport pickle\nimport re\nimport requests\n\n\ndef loadCredentials(credentials_pickle_file):\n \"\"\"Get the Genius API credentials by loading a pickle file.\n\n Args:\n credentials_pickle_file (str): Path to pickle file containing creds.\n\n Returns:\n dict: dictionary containing API keys, token, etc.\n\n \"\"\"\n try:\n with open(credentials_pickle_file, 'rb') as c:\n return pickle.load(c)\n except IOError:\n print(\"Error: Couldn't open credentials file. Exiting.\")\n\n\ndef getGeniusAPIBaseURL():\n return \"http://api.genius.com\"\n\n\n# meta: \"meta\" object from Genius API response\ndef getResponseStatus(response):\n status = response[\"meta\"][\"status\"]\n return status\n\n\ndef createSearchGETRequestURLs(song_title, song_artist):\n \"\"\"Create a list of search URLs, each with a variant of (title + artist).\n\n The variants are:\n 1. \"song_title by song_artist\"\n 2. \"song_artist - song_title\"\n 3. Same as 2. but song_title has anything including and after \" - \"\n removed.\n 4. Same as 2, but song_title has anything in parentheses removed.\n\n If modifying song_title in a particular way would not change it, that\n variant is not appended to the list (e.g., if song_title does not contain\n a '-', variant #3 is not created nor appended to the end of the list.\n\n Args:\n song_title (str): Title of song whose lyrics we want.\n song_artist (str): Artist of song whose lyrics we want.\n\n Returns:\n list of str: search URLs corresponding to the search terms\n \"\"\"\n # Variant #1\n search_terms = []\n search_terms.append(song_title + \" by \" + song_artist)\n\n # Variant #2\n search_term_base = song_artist + \" - \"\n search_terms.append(search_term_base + song_title)\n\n # Variant #3\n if \"-\" in song_title:\n hyphen = \"\\ -\\ .*\"\n hyphen_title = re.sub(hyphen, \"\", song_title)\n search_terms.append(search_term_base + hyphen_title)\n\n # Variant #4\n if \"(\" in song_title:\n parens = \"\\ \\(.*\\)\"\n parens_title = re.sub(parens, \"\", song_title)\n search_terms.append(search_term_base + parens_title)\n\n base_url = getGeniusAPIBaseURL()\n search = \"/search?q=\"\n full_urls = [base_url + search + term for term in search_terms]\n\n # print(\"All search URLs: \", \"\\n\".join(full_urls))\n return full_urls\n\n\ndef prepareGETRequestHeaders():\n \"\"\"Create headers for GET request, which includes the API Client token.\"\"\"\n credentials = loadCredentials(\"credentials.p\")\n access_token = credentials[\"GENIUS_API_CLIENT_TOKEN\"]\n headers = {\"Authorization\": \"Bearer \" + access_token}\n return headers\n\n\ndef searchGenius(search_url):\n \"\"\"\n Searches the Genius API with a variety of search terms until it finds a hit\n with a matching artist.\n\n Parameters:\n search_url: URL to GET\n\n Returns:\n JSON-format dict: JSON object containing info about target song\n\n \"\"\"\n # GET request params\n headers = prepareGETRequestHeaders()\n\n # actual GET request\n response = requests.get(search_url, headers=headers)\n response_json = response.json()\n return response_json\n\n\ndef checkTitlesMatch(song_title, search_result):\n \"\"\"Check to see if the current search result matches our song title.\n\n Before checking, we also remove any parentheses (and text inside) and any\n hyphens (and text following the hyphen).\n\n Args:\n song_title (str): The name of the song to be matched.\n search_result (JSON-format dict): JSON object containing song info.\n\n Returns:\n True if search_result corresponds to a song whose title matches.\n False otherwise.\n\n \"\"\"\n result_title = search_result[\"title\"]\n\n # Let's not let capitalization fool us\n result_lower = result_title.lower()\n song_lower = song_title.lower()\n\n # Remove any ' - ' and anything aftwards\n song_minus_hyphen = re.sub(\"\\ -\\ .*\", \"\", song_lower)\n\n # Remove any '()' and anything inbetween\n song_minus_parens = re.sub(\"\\ \\(.*\\)\", \"\", song_minus_hyphen)\n result_minus_parens = re.sub(\"\\ \\(.*\\)\", \"\", result_lower)\n\n if song_minus_parens == result_minus_parens:\n return True\n return False\n\n\ndef checkArtistsMatch(song_artist, search_result):\n \"\"\"Check to see if the current search result matches our song artist.\n\n Before checking, we also remove any ampersands and anything afterwards.\n\n Args:\n song_artist (str): The artist of the song to be matched.\n search_result (JSON-format dict): JSON object containing song info.\n\n Returns:\n True if search_result corresponds to a song whose artist matches.\n False otherwise.\n\n \"\"\"\n result_artist = search_result[\"primary_artist\"][\"name\"]\n\n # \"Zedd & Liam Payne\" was the listed primary artist or \"Get Low\",\n # so I changed this from \"==\" to \"in\" (to make \"Zedd\" as the Spotify artst\n # satisfy the condition). This might actually be worse in the long run, but\n # for now it seems to be a good fix.\n\n # Let's not let capitalization fool us\n result_lower = result_artist.lower()\n song_lower = song_artist.lower()\n\n # Remove any '&' and anything afterwards\n song_minus_amp = re.sub(\"\\&.*\", \"\", song_lower)\n if song_minus_amp in result_lower:\n return True\n return False\n\n\ndef extractSongPathFromGeniusSearchResult(search_result):\n \"\"\"Get the URL to the song lyrics out of the matching search result.\n\n Args:\n search_result (JSON-format dict): JSON object with info about a song.\n\n Returns:\n str: part of URL to page of song lyrics.\n\n \"\"\"\n return search_result[\"path\"]\n\n\ndef findMatchingHitInSearchResults(song_title, song_artist, search_results):\n \"\"\" Looks for a hit with a primary artist that matches artist.\n\n Args:\n song_title (str): Title of song that we are trying to match.\n song_artist (str): Artist of song that we are trying to match.\n search_results (JSON-format dict): JSON object with list of hits.\n\n Returns:\n str: \"result\" field of matching hit in JSON object.\n\n \"\"\"\n for hit in search_results[\"response\"][\"hits\"]:\n artist_match = checkArtistsMatch(song_artist, hit[\"result\"])\n title_match = checkTitlesMatch(song_title, hit[\"result\"])\n if artist_match and title_match:\n return hit[\"result\"]\n return None\n\n\ndef getGeniusWebsiteBaseURL():\n return \"https://genius.com\"\n\n\ndef getLyricsPageHTMLFromPath(song_path):\n \"\"\"Get the HTML of the page containing song lyrics from Genius's website.\n\n Args:\n song_path (str): URL to page with song lyrics\n\n Returns:\n Response object: page HTML that contains lyrics\n\n \"\"\"\n lyrics_url = getGeniusWebsiteBaseURL() + song_path\n page_html = requests.get(lyrics_url)\n return page_html\n\n\ndef parseLyricsPageHTML(html):\n \"\"\"Extract the lyrics from the song page's HTML.\"\"\"\n html_text = BeautifulSoup(html.text, \"html.parser\")\n # [h.extract() for h in lyrics_html('script')]\n lyrics = html_text.find(\"div\", class_=\"lyrics\").get_text()\n return lyrics\n","repo_name":"jkclark/Lyrics-Always","sub_path":"getLyricsFromGenius.py","file_name":"getLyricsFromGenius.py","file_ext":"py","file_size_in_byte":7131,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"30956054032","text":"from decimal import Decimal\nfrom django.db import models\n\nfrom phonenumber_field.modelfields import PhoneNumberField\n\nfrom apps.util.models import HomeCaptainAbstractBaseModel, Address\nfrom apps.event.models import Event\n\nDECIMAL_DEFAULT = Decimal()\n\nclass Service(HomeCaptainAbstractBaseModel):\n name = models.CharField(max_length=128)\n description = models.TextField()\n\n def __str__(self):\n return \"{self.name}\"\n \nclass ServiceProvider(HomeCaptainAbstractBaseModel):\n first_name = models.CharField(max_length=64)\n last_name = models.CharField(max_length=64)\n email = models.EmailField(max_length=255)\n address = models.OneToOneField(Address, on_delete=models.PROTECT, null=True)\n phone_number = PhoneNumberField(blank=True)\n #picture = models.ImageField(upload_to='media/uploads/customer_profile_pics',\n # blank=True)\n about = models.TextField()\n\n services = models.ManyToManyField(Service)\n \n def __str__(self):\n return \"{self.first_name} ({self.uid})\"\n\n\nclass ServiceRequest(HomeCaptainAbstractBaseModel):\n service_provider = models.ForeignKey(ServiceProvider, on_delete=models.CASCADE)\n event = models.ForeignKey(Event, on_delete=models.CASCADE)\n\n bill_amount = models.DecimalField(max_digits=8, decimal_places=2,\n default=DECIMAL_DEFAULT)\n\n is_service_complete = models.BooleanField(default=False)\n service_completion_date = models.DateField(null=True)\n rating = models.IntegerField(choices=[(i,i) for i in range(1,6)])\n feedback = models.TextField()\n #invoice = models.ImageField()\n payment_link = models.URLField(max_length=512, blank=True)\n invoice_paid_date = models.DateField(null=True)\n\n\n def __str__(self):\n return \"{self.event.property.id} {self.event.event_config.name}\"\n","repo_name":"SiriusWhi/HomeCaptain_SAAS","sub_path":"homecaptain/apps/service_provider/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20066040958","text":"from collections import defaultdict, namedtuple\nfrom math import dist, inf\nfrom heapq import heappop, heappush\n\n#file_to_use = 'day15/day15-sample.txt'\nfile_to_use = 'day15/day15-input.txt'\n\n# OK, treating this as a grid is going to be a mess, so \n# let's start by thinking about this as a graph with useful labels\n\ngraph = defaultdict() # point tuple to set of (point, weight) tuples\nPoint = namedtuple('Point', 'x y')\nstart, end = Point(0,0), None\n\nwith open(file_to_use) as f:\n file_lines = f.readlines()\n point_risks = dict() # I don't want to store these in the graph\n\n def risk_calculator(r, shifts):\n # It is past my bedtime and I couldn't get the math fast\n risks = [9,1,2,3,4,5,6,7,8]\n i = risks.index(r)\n return risks[(r + shifts) % 9]\n\n x,y = 0,0\n for y_repeast in range(5):\n for line in [l.strip() for l in file_lines]:\n x = 0\n for x_repeat in range(5):\n for risk in [int(r) for r in line]:\n p = Point(x,y)\n point_risks[p] = risk_calculator(risk, x_repeat + y_repeast)\n graph[p] = set()\n u = Point(p.x, p.y - 1) \n if u in graph:\n graph[u].add((p, point_risks[p]))\n graph[p].add((u, point_risks[u]))\n l = Point(p.x - 1, p.y) \n if l in graph:\n graph[l].add((p, point_risks[p]))\n graph[p].add((l, point_risks[l]))\n # the other ones won't be loaded yet\n x += 1 \n y += 1\n end = Point(x - 1, y - 1)\n\n# part 2\n# Option 1 - djikstra again, maybe with a priority queue or something so it doesn't choke\n# option 2 - solve each grid separately? Map each edge node to every other edge node, then\n# collapse the grid down into those nodes and solve again? \n\nqueue = [(0, start)]\ndistances = defaultdict(lambda: float('inf'))\ndistances[start] = 0\nvisited = set()\n\nwhile end not in visited:\n _, point = heappop(queue)\n if point not in visited:\n visited.add(point)\n dist = distances[point]\n \n for neighbor, neightbor_dist in graph[point]:\n if neighbor not in visited:\n new_dist = dist + neightbor_dist\n if new_dist < distances[neighbor]:\n heappush(queue, (new_dist, neighbor))\n distances[neighbor] = new_dist\n\nprint(distances[end])","repo_name":"ChrisGwinn/adventofcode2021","sub_path":"day15/day15-2.py","file_name":"day15-2.py","file_ext":"py","file_size_in_byte":2476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24285903137","text":"from api.dht.read import Read\nfrom api.dht.MySql import MySql\nimport datetime\nfrom api.models.summary import Summary\n\nfrom api.models.interval_request import IntervalRequest\nfrom api.models.interval import Interval\nfrom api.models.interval import Intervals\nfrom api.chart_service import ChartService\n\nclass SummaryService:\n\n def __init__(self):\n self.sql = MySql()\n self.read = Read()\n\n def getSlaveSummary(self, room):\n now = self.read.getTemp()\n day = [str(self.sql.avgTempToday(room)), str(self.sql.avgHumidityToday(room))]\n nowTime = datetime.datetime.today() + datetime.timedelta(days = 1)\n weekTime = datetime.datetime.today() - datetime.timedelta(days = 7)\n week = [str(self.sql.avgTempBetween(room, weekTime.strftime('%Y-%m-%d'), nowTime.strftime('%Y-%m-%d'))), str(self.sql.avgHumidityBetween(room, weekTime.strftime('%Y-%m-%d'), nowTime.strftime('%Y-%m-%d')))]\n summary = Summary(now, day, week)\n return summary.__dict__\n\n def getSummary(self, room):\n latest = self.sql.latestReadingWithTime(room)\n now = [latest[0], latest[1]]\n day = [str(self.sql.avgTempToday(room)), str(self.sql.avgHumidityToday(room))]\n nowTime = datetime.datetime.today() + datetime.timedelta(days = 1)\n weekTime = datetime.datetime.today() - datetime.timedelta(days = 7)\n week = [str(self.sql.avgTempBetween(room, weekTime.strftime('%Y-%m-%d'), nowTime.strftime('%Y-%m-%d'))), str(self.sql.avgHumidityBetween(room, weekTime.strftime('%Y-%m-%d'), nowTime.strftime('%Y-%m-%d')))]\n summary = Summary(now, day, week, room, latest[2])\n return summary.__dict__\n\n def getSummaries(self, rooms):\n summaries = []\n for room in rooms:\n summaries.append(self.getSummary(room))\n return summaries\n\n def getSummaries(self):\n summaries = []\n for room in self.sql.getRooms():\n summaries.append(self.getSummary(room))\n return summaries\n\n","repo_name":"rjojjr/humidity-temp","sub_path":"src/api/summary_service.py","file_name":"summary_service.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34473089238","text":"import random\nfrom typing import Tuple\n\nfrom lib.gen.automaton import ClassicAutomaton\nfrom .map import TileMap, Object, Objects\nfrom .control import PlayerBugController\n\n\nclass World:\n\n def __init__(self):\n self.tile_map = TileMap((128, 128))\n self.tile_map.set_outside(12, 0, 0, 0)\n #self.tile_map.random()\n self.init_map_gol(self.tile_map)\n self.objects = Objects(static_map=self.tile_map)\n\n pos_set = set()\n for i in range(1000):\n for j in range(1000):\n pos = random.randrange(min(self.width, i+10)), random.randrange(min(self.height, i+10))\n if pos not in pos_set:\n pos_set.add(pos)\n if not self.tile_map.map[pos[1], pos[0], 0]:\n if i == 0:\n controller = PlayerBugController(self.objects, pos=(pos[0] + .5, pos[1] + .5))\n else:\n o = self.objects.add_object(\n shape_type=\"box\" if i != 0 and random.random() < .2 else \"circle\",\n pos=(pos[0] + .5, pos[1] + .5),\n mass=10,\n scale=1,\n )\n break\n\n @property\n def width(self) -> int:\n return self.tile_map.width\n\n @property\n def height(self) -> int:\n return self.tile_map.height\n\n @property\n def player(self) -> Object:\n return self.objects.objects[0]\n\n def update(self, time: float, dt: float):\n if 0:\n for o in self.object_map.objects[1:]:\n if random.random() < dt:\n o.body.position = (\n o.body.position[0] + random.randint(-1, 1) * .1,\n o.body.position[1] + random.randint(-1, 1) * .1,\n )\n\n self.objects.update(time, dt)\n\n def init_map_gol(self, tile_map: TileMap):\n ca = ClassicAutomaton(\n tile_map.width, tile_map.height,\n born=(2, 3, 4, 5),\n survive=(2, 5,),\n )\n ca.init_random(.3, 23)\n ca.step(20)\n tile_map.map[:, :, 0] = 1 - ca.cells\n","repo_name":"defgsus/thegame","sub_path":"dash/world.py","file_name":"world.py","file_ext":"py","file_size_in_byte":2232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4147893501","text":"\"\"\"\nCreated on Tue Jul 30 03:24:06 2019\n\n@author: meimarcel\n\"\"\"\n\nfrom keras.models import Sequential\nfrom keras.layers import Conv2D\nfrom keras.layers import MaxPooling2D\nfrom keras.layers import Flatten\nfrom keras.layers import Dense\n\n#Criaç]ao da rede neural\nrede_neural = Sequential()\nrede_neural.add(Conv2D(64, (3, 3), input_shape = (64, 64, 3), activation = 'relu'))\nrede_neural.add(MaxPooling2D(pool_size = (2, 2)))\n\nrede_neural.add(Conv2D(32, (3, 3), activation = 'relu'))\nrede_neural.add(MaxPooling2D(pool_size = (2, 2)))\n\nrede_neural.add(Flatten())\n\nrede_neural.add(Dense(units = 128, activation = 'relu'))\nrede_neural.add(Dense(units = 1, activation = 'sigmoid'))\n\nrede_neural.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])\n\n#Normalização da imagens de entrada\nfrom keras.preprocessing.image import ImageDataGenerator\n\ntrain_datagen = ImageDataGenerator(rescale = 1./255,\n shear_range = 0.2,\n zoom_range = 0.2,\n horizontal_flip = True)\n\ntest_datagen = ImageDataGenerator(rescale = 1./255)\n\ntraining_set = train_datagen.flow_from_directory('dogs-and-cats-dataset/training_set',\n target_size = (64, 64),\n batch_size = 32,\n class_mode = 'binary')\n\ntest_set = test_datagen.flow_from_directory('dogs-and-cats-dataset/test_set',\n target_size = (64, 64),\n batch_size = 32,\n class_mode = 'binary')\n\n#Treinar a rede\nrede_neural.fit_generator(training_set,\n steps_per_epoch = 8000,\n epochs = 10,\n validation_data = test_set,\n validation_steps = 2000)\n\n\n#Salvar a rede e os pesos\nrede_neural_json = rede_neural.to_json()\nwith open(\"ModeloConv2D.json\",\"w\") as json_file:\n json_file.write(rede_neural_json)\n \nrede_neural.save_weights(\"ModeloConv2D.h5\")\n","repo_name":"meimarcel/rede-neural-convolucional","sub_path":"RedeNeuralConvolucional.py","file_name":"RedeNeuralConvolucional.py","file_ext":"py","file_size_in_byte":2165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16769723022","text":"from django.contrib.auth import get_user_model\nfrom rest_framework import serializers\nfrom rest_framework.generics import get_object_or_404\nfrom rest_framework.response import Response\nfrom rest_framework.validators import UniqueTogetherValidator\n\nfrom .models import Post, Comment, Group, Follow\n\nUser = get_user_model()\n\n\nclass PostSerializer(serializers.ModelSerializer):\n author = serializers.ReadOnlyField(source='author.username')\n group = serializers.SlugRelatedField(slug_field='title', queryset=Group.objects.all(), required=False)\n\n class Meta:\n fields = ('id', 'text', 'author', 'pub_date', 'group')\n model = Post\n\n\nclass CommentSerializer(serializers.ModelSerializer):\n author = serializers.SlugRelatedField(\n read_only=True,\n slug_field='username',\n default=serializers.CurrentUserDefault()\n )\n\n class Meta:\n fields = ('id', 'author', 'post', 'text', 'created')\n model = Comment\n read_only_fields = ['post']\n write_only_fields = ''\n\n\nclass GroupSerializer(serializers.ModelSerializer):\n class Meta:\n fields = ('id', 'title',)\n model = Group\n\n\nclass UserSerializer(serializers.ModelSerializer):\n class Meta:\n fields = ('username',)\n model = User\n\n\nclass FollowSerializer(serializers.ModelSerializer):\n following = serializers.SlugRelatedField(\n slug_field='username',\n queryset=User.objects.all(),\n )\n\n user = serializers.SlugRelatedField(\n slug_field='username',\n read_only=True,\n default=serializers.CurrentUserDefault()\n )\n\n class Meta:\n model = Follow\n fields = ('user', 'following')\n # read_only_fields = ('user',) # needed to to get it from user\n\n validators = [\n UniqueTogetherValidator(queryset=Follow.objects.all(), fields=('user', 'following'))\n ]\n #\n # def create(self, validated_data):\n # print(validated_data)\n # following = validated_data.get('following')\n # user = validated_data.get('user')\n # following = get_object_or_404(User, username=following)\n # return Follow.objects.create(following=following, user=user)\n\n # return super().create(validated_data)\n","repo_name":"alisherbek-rakhimov/api_final_yatube","sub_path":"api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70905784266","text":"def check_win(char, coord, all_char, win_cond):\n # char: str character moved\n # coord: tuple new coordinate of character moved to\n # all_char: dictionary of all characters with coordinates\n # win_cond: dictionary of all winning phrase\n\n # unpack coord tuple\n x, y = coord\n\n # compile list of words\n ls = []\n\n # check left of char\n if all_char.get((x-1, y)) != None:\n left_char = all_char.get((x-1, y))\n ls.append(left_char + char)\n # check top of char\n if all_char.get((x, y+1)) != None:\n top_char = all_char.get((x, y+1))\n ls.append(top_char + char)\n # check right of char\n if all_char.get((x+1, y)) != None:\n right_char = all_char.get((x+1, y))\n ls.append(char + right_char)\n # check bottom of char\n if all_char.get((x, y-1)) != None:\n bottom_char = all_char.get((x, y-1))\n ls.append(char + bottom_char)\n\n # call function to ammend attribute of the phrase and tell if there are any changes made\n changes = update_win_state(ls, win_cond)\n\n # return boolean only??\n return changes\n\n\ndef update_win_state(ls, win_cond):\n # ls: list of phrase that exist\n # win_cond: dictionary of of key object\n\n # initialise no change\n changes = False\n\n # iterate over the ls of possible phrases\n for i in ls:\n # if phrase is in dictionary of phrase\n if i in win_cond:\n # check if the phrase is double counted\n if win_cond[i].won is False:\n # call method .matched to count the score ONCE\n win_cond[i].matched()\n # return boolean to indicate if any change has been made, be it 1 change or many changes\n changes = True\n return changes\n\n#check_win(\"c\", (1,1), {(1,2): \"d\", (2,1): \"e\"}, {})\n\n\ndef calculate_total_score(win_cond):\n total = 0\n for i in win_cond:\n if win_cond[i].won:\n total += win_cond[i].point\n return total\n","repo_name":"Benny1143/1d-ctd-project","sub_path":"help.py","file_name":"help.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29840481619","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.home_view, name=\"home\"),\n path(\"about/\", views.about_view, name=\"about\"),\n path(\"contacts/\", views.contacts_view, name=\"contacts\"),\n path(\"portfolio/\", views.portfolio_view, name=\"portfolio\"),\n path(\"portfolio//\", views.portfolio_detail_view, name=\"portfolio_detail\"),\n path(\n \"categories//\",\n views.subcategory_products,\n name=\"subcategory_detail\",\n ),\n path(\"products//\", views.product_detail, name=\"product_detail\"),\n path(\"send_email/\", views.send_phone_number_to_telegram, name=\"send_email\"),\n path('reviews/', views.reviews_view, name=\"reviews\"),\n\n path('sizes/', views.get_sizes, name='sizes')\n]\n","repo_name":"IldarSaygafarov2/combouz_2.0","sub_path":"combouz/web_site/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71433690186","text":"\"\"\"\r\n This program takes an input that includes the\r\n number of people in a number of groups and outputs\r\n the number of people that rode the ride that day\r\n\"\"\"\r\n\r\nclass Group:\r\n def __init__( self, input_count ):\r\n self.group_count = input_count\r\n self.set_count = None\r\n\r\nride_seats, maximum_ride_count, group_count = ( int( input_data ) for input_data in input().split() )\r\ngroups = [ None ] * group_count\r\n\r\nfor index in range( group_count ): # populating groups\r\n groups[ index ] = Group( int( input() ) )\r\n \r\npeople_ridden_count = 0\r\nindex = 0\r\n\r\nfor ride_count in range( maximum_ride_count ): # for all rides in the day\r\n if groups[ index ].set_count: # if the set already been calculated\r\n people_ridden_count += groups[ index ].set_count # use the saved number\r\n index = groups[ index ].next_set # jump to the group after these\r\n else:\r\n people_on_ride = 0\r\n saved_index = index # keeps the program from counting people twice\r\n \r\n # count out groups until the ride is full\r\n while people_on_ride + groups[ index ].group_count <= ride_seats:\r\n people_on_ride += groups[ index ].group_count\r\n index = ( index + 1 ) % len( groups ) # circularizes the groups list\r\n \r\n if saved_index == index: # all groups are on the ride\r\n break\r\n \r\n # save calculation and next set of groups\r\n groups[ saved_index ].set_count = people_on_ride\r\n groups[ saved_index ].next_set = index\r\n people_ridden_count += people_on_ride\r\n\r\nprint( people_ridden_count )","repo_name":"ArcDM/CodinGame-Hard-Roller-Coaster","sub_path":"Roller Coaster.py","file_name":"Roller Coaster.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39007732446","text":"from bs4 import BeautifulSoup\nimport re\nfrom bs4.element import Script\nimport requests\nimport json\nimport datetime\n# 视频信息爬虫\n\nheaders = {\n \"cookie\": \"buvid3=7BAF3F51-80C7-4A07-AFC1-178C175A1F9D53950infoc; LIVE_BUVID=AUTO2715820713588762; rpdid=|(u))uuRm)Jl0J'ul)kRYuYm|; blackside_state=1; _uuid=E9A99A3D-6222-A977-0429-654A9987DFB723266infoc; buvid_fp=7BAF3F51-80C7-4A07-AFC1-178C175A1F9D53950infoc; CURRENT_QUALITY=80; fingerprint=ce9d251e9a59d1a46c62a6dd41ac8aa8; buvid_fp_plain=7BAF3F51-80C7-4A07-AFC1-178C175A1F9D53950infoc; SESSDATA=4b96168c%2C1648198971%2C8a41d%2A91; bili_jct=ade772c8ae27ec2cf4a2ed2a9068d1c2; DedeUserID=168036077; DedeUserID__ckMd5=de5e5aeec9be9769; sid=ci78nq0y; CURRENT_FNVAL=976; bp_video_offset_168036077=579782870861588529; bp_t_offset_168036077=579782870861588529; innersign=1; bfe_id=fdfaf33a01b88dd4692ca80f00c2de7f; PVID=1\",\n \"origin\": \"https://www.bilibili.com\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.71 Safari/537.36\"\n}\n\nfind_click_num = re.compile(r'')\nfind_dm_num = re.compile(r'')\nfind_initial_state = re.compile(r'')\nfind_video_data = re.compile(r'{(.*)};')\napi_url = \"https://api.bilibili.com/x/player/pagelist?bvid=\"\n\n# 根据视频bv号获取视频的cid\ndef getVideoCid(bv):\n get_cid_url = api_url+bv\n result = requests.get(get_cid_url, headers=headers)\n get_json = result.json()\n info_data = get_json['data'][0]\n return info_data['cid']\n\ndef getVideoInfo(bv):\n video_info = {}\n infoUrl = \"https://www.bilibili.com/video/\" + str(bv)\n response = requests.get(infoUrl, headers=headers)\n html = response.content.decode(\"utf-8\")\n soup = BeautifulSoup(html, \"html.parser\")\n scripts = str(soup.find_all('script'))\n initial_info = re.findall(find_initial_state,scripts)[0]\n after_info = re.findall(find_video_data,initial_info)[0]\n after_info =json.loads('{'+str(after_info)+'}')\n data_info = after_info['videoData']\n video_info['bv_id'] = data_info['bvid']\n video_info['cid'] = data_info['cid']\n video_info['tag'] = data_info['tname']\n video_info['title'] = data_info['title']\n date = datetime.datetime.utcfromtimestamp(data_info['pubdate']).strftime(\"%Y-%m-%d %H:%M:%S\")\n video_info['pub_date'] = date\n video_info['description'] = data_info['desc']\n video_info['owner'] = data_info['owner']\n video_info['link'] = 'https://www.bilibili.com/video/'+bv\n video_info['stat'] = data_info['stat']\n print(video_info)\n return video_info\n","repo_name":"yangjq713/BarrageAnalysis","sub_path":"server/bilibili/InfoSpider.py","file_name":"InfoSpider.py","file_ext":"py","file_size_in_byte":2668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35702021036","text":"#!/usr/bin/env python3\n\n\"\"\"Asynchronous functions\"\"\"\n\nfrom typing import List\nwait_random = __import__('0-basic_async_syntax').wait_random\n\n\nasync def wait_n(n: int, max_delay: int) -> List[float]:\n \"\"\" A function that return the list of all the delays (float values)\n in ascending order without using sort() because of concurrency. \"\"\"\n delay_list: List[float] = []\n\n delays: dict = {i: await wait_random(max_delay) for i in range(n)}\n delay_list = [delay for _, delay in sorted(delays.items(),\n key=lambda x: x[1])]\n\n return delay_list\n","repo_name":"dukeofhazardz/alx-backend-python","sub_path":"0x01-python_async_function/1-concurrent_coroutines.py","file_name":"1-concurrent_coroutines.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14542930344","text":"# ___ _ \n# |_ _| _ __ ___ _ __ ___ _ __ | |_ ___ \n# | | | '_ ` _ \\ | '_ \\ / _ \\ | '__|| __|/ __|\n# | | | | | | | || |_) || (_) || | | |_ \\__ \\\n# |___||_| |_| |_|| .__/ \\___/ |_| \\__||___/\n# |_| \n# ----------------------------------------------------------------------- \n\nimport pathlib\nimport os\nfrom resources.crypt import store_key, create_key\nfrom resources.database import Storage\n\n\n\n\n# __ __ _ _ _ \n# \\ \\ / / __ _ _ __ (_) __ _ | |__ | | ___ ___ \n# \\ \\ / / / _` || '__|| | / _` || '_ \\ | | / _ \\/ __|\n# \\ V / | (_| || | | || (_| || |_) || || __/\\__ \\\n# \\_/ \\__,_||_| |_| \\__,_||_.__/ |_| \\___||___/\n# ----------------------------------------------------------------------- \n\n\n\n# Get path to config file\ncurrent_path = str(pathlib.Path(__file__).parent.absolute()) + \"/\"\ncurrent_path = current_path.replace(\"resources/\", \"\")\nconfigpath = str(current_path) + \"settings.cfg\"\n\n\n\n# _____ _ _ \n# | ___| _ _ _ __ ___ | |_ (_) ___ _ __ ___ \n# | |_ | | | || '_ \\ / __|| __|| | / _ \\ | '_ \\ / __|\n# | _| | |_| || | | || (__ | |_ | || (_) || | | |\\__ \\\n# |_| \\__,_||_| |_| \\___| \\__||_| \\___/ |_| |_||___/\n# ----------------------------------------------------------------------- \n\n\n\ndef register_first_time_setup():\n try: \n open(current_path + \"first_time_setup.txt\", 'a+').close()\n print('registered first time setup')\n return True\n except Exception as e:\n print('Error While Creating First Time Setup File: ', e)\n return False\n\n\n\ndef check_first_time_setup():\n \"\"\"\n Checks to see if the application has been setup. \n\n It does this by checking to see if the first time setup file exists.\n \"\"\"\n # Check if first time setup file exists\n ## If yes, return False to indicate that the application has been setup\n ## if not, create first time setup file & return True to indicate that the application has not been setup\n \n\n if os.path.exists(current_path + \"first_time_setup.txt\"):\n return True\n else:\n return False\n\n\n\ndef delete_first_time_setup():\n \"\"\"\n Deletes the first time setup file.\n \"\"\"\n try:\n os.remove(current_path + \"first_time_setup.txt\")\n return True\n except Exception as e:\n print('Error While Deleting First Time Setup File: ', e)\n return False\n\n\n\ndef initialize_settings():\n \"\"\"\n Will copy settings from the template config file to the config file.\n \"\"\"\n try:\n if not os.path.exists(configpath):\n with open(configpath, 'w') as configfile:\n with open(current_path + \"settings_template.cfg\", 'r') as templatefile:\n configfile.write(templatefile.read())\n print('Settings File Created')\n return True\n except Exception as e:\n print('Error While Initializing Settings File: ', e)\n return False\n\n\n\ndef initialize_app():\n \"\"\"\n Will create all paths & the settings file when the application is first run.\n \"\"\"\n try:\n initialize_settings()\n return True\n except Exception as e:\n print('Error While Initializing Application: ', e)\n return False\n\n\n\ndef initalize_encryption(key=None, key_location=None):\n \"\"\"\n Will create a new key file from an input key text or by generating a new key.\n \"\"\"\n\n from resources.config import settings_core\n\n settings = settings_core()\n\n settings.set_setting_value('encryption', 'key_location', key_location + \"/.key.pem\")\n\n try: \n \n if key:\n key_byte = bytes(key, 'utf-8')\n else:\n key_byte = create_key()\n\n if key_byte:\n \n # check if key file exists\n if os.path.exists(key_location + \"/.key.pem\"):\n raise Exception('Key File Already Exists, please delete the key file to create a new one.')\n else:\n # if path does not exist, create it\n if not os.path.exists(key_location):\n os.mkdir(key_location)\n store_key(key_byte, key_location)\n return key_byte, None\n else:\n raise Exception('Error While Creating Key')\n\n except Exception as e:\n return None, e\n\n\ndef intialize_s3(s3_access, s3_secret, s3_endpoint, s3_bucket):\n \"\"\"\n Write encrypted S3 credentials to the configuration file.\n \"\"\"\n try: \n from resources.config import settings_core\n settings = settings_core()\n settings.set_setting_value(\"accounts\",\"encrypted_s3_access\",s3_access)\n settings.set_setting_value(\"accounts\",\"encrypted_s3_secret\",s3_secret)\n settings.set_setting_value(\"accounts\",\"encrypted_s3_endpoint\",s3_endpoint)\n settings.set_setting_value(\"accounts\",\"encrypted_s3_bucket\",s3_bucket)\n return True\n except: \n return False\n\n\n\ndef setup_check():\n \"\"\"\n Checks to see if the application requires first time setup. \n\n It does this by checking to see if the config file exists.\n \"\"\"\n # Check if config file exists\n ## If yes, return False to indicate that the application is not first time setup\n ## if not, copy settings template to config path & return True to indicate that the application is first time setup\n if not os.path.exists(configpath):\n register_first_time_setup()\n return True\n else:\n return False\n ","repo_name":"ClubNation/Media-Manager","sub_path":"resources/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":5687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"21643007897","text":"# -*- coding: utf-8 -*-\nfrom django.core.management.base import BaseCommand, CommandError\n\nfrom klienti.models import *\t# import models\nfrom pieraksts.models import *\n\nimport datetime\n\n# COMAND BEGIN\nclass Command(BaseCommand):\n def handle(self, *args, **options):\n\n# !!! PIERAKSTI !!!\n pier = Pieraksti.objects.all()\n today = datetime.date.today() # - datetime.timedelta(days=1)\n count = 0\n for p in pier:\n if p.nodarbiba.sakums.date() < today:\n hist = HistPieraksti( pieraksta_laiks = p.pieraksta_laiks, klients = p.klients, nodarbiba = p.nodarbiba )\n hist.save()\n p.delete()\n\n\n# !!! ATTEIKUMI !!!\n cancel = Atteikumi.objects.all()\n today = datetime.date.today() # - datetime.timedelta(days=1)\n for c in cancel:\n if c.nodarbiba.sakums.date() < today:\n hist = HistAtteikumi( ateikuma_laiks = c.ateikuma_laiks, klients = c.klients, nodarbiba = c.nodarbiba )\n hist.save()\n c.delete()\n\n","repo_name":"svabis/vf","sub_path":"klienti/management/commands/arhive_pier.py","file_name":"arhive_pier.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27012987555","text":"from django.shortcuts import render\r\nfrom index.models import *\r\ndef rankingView(request):\r\n # 搜索歌曲\r\n search_song = Dynamic.objects.select_related('song').order_by('-dynamic_search').all()[:4]\r\n # 歌曲分类列表\r\n All_list = Song.objects.values('song_type').distinct()\r\n # 歌曲列表信息\r\n song_type = request.GET.get('type', '')\r\n if song_type:\r\n song_info = Dynamic.objects.select_related('song').filter(song__song_type=song_type).order_by('-dynamic_plays').all()[:10]\r\n else:\r\n song_info = Dynamic.objects.select_related('song').order_by('-dynamic_plays').all()[:10]\r\n return render(request, 'ranking.html', locals())\r\n\r\n\r\n\r\n# 通用视图\r\nfrom django.views.generic import ListView\r\nclass RankingList(ListView):\r\n # context_object_name设置Html模版的某一个变量名称\r\n context_object_name = 'song_info'\r\n # 设定模版文件\r\n template_name = 'ranking.html'\r\n # 查询变量song_info的数据\r\n def get_queryset(self):\r\n # 获取请求参数\r\n song_type = self.request.GET.get('type', '')\r\n if song_type:\r\n song_info = Dynamic.objects.select_related('song').filter(song__song_type=song_type).order_by('-dynamic_plays').all()[:10]\r\n else:\r\n song_info = Dynamic.objects.select_related('song').order_by('-dynamic_plays').all()[:10]\r\n return song_info\r\n\r\n # 添加其他变量\r\n def get_context_data(self, **kwargs):\r\n context = super().get_context_data(**kwargs)\r\n # 搜索歌曲\r\n context['search_song'] = Dynamic.objects.select_related('song').order_by('-dynamic_search').all()[:4]\r\n # 所有歌曲分类\r\n context['All_list'] = Song.objects.values('song_type').distinct()\r\n return context\r\n","repo_name":"Rockyzsu/CodePool","sub_path":"玩转Django2源代码/第11章/music/ranking/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"81"} +{"seq_id":"17634160078","text":"import random\n\nclass Grille(object):\n \"\"\"\n Initialisation de la classe, qui est utilisée pour générer une grille, ainsi qu'effectuer une modification sur cette dernière.\n Elle prends plusieurs paramètres:\n - La longueur n (définie sur 40 par défaut dans Game), qui définis le tableau t (t = n * n//2)\n - Le nombre de bombes (définies sur 15 par défaut).\n Dans le cas ou le nombre de bombes dépasse le nombre de cases disponible, le nombre de bombes sera égale a l'entier \n inférieur au nombre de case divisé par 8 ((longueur*largeur)//2)\n \"\"\"\n\n def __init__ (self, long, nbomb = 15):\n self.grid = [[-1 for x in range(long)] for y in range(long//2)]\n self.bombGrid = [[0 for x in range(long)] for y in range(long//2)]\n self.nbBomb = (long*long//2)//8 if nbomb > long*long//2 else nbomb\n self.randomizeBomb()\n\n def randomizeBomb(self):\n \"\"\"\n Permet de mettre x bombes aléatoirement sur la grille lors du démarage de la partie.\n \"\"\"\n for _ in range(self.nbBomb):\n while True:\n y = random.randint(0, len(self.bombGrid) - 1) #entre 0 et n-1 sur l'axe y (hauteur)\n x = random.randint(0, len(self.bombGrid[1]) - 1) #entre 0 et n-1 sur l'axe x (largeur)\n if self.bombGrid[y][x] == 1: #si la case est déjà occupée, on recommence\n continue\n self.bombGrid[y][x] = 1\n break\n\n def numberNeighborBomb(self, x, y):\n \"\"\"\n Donne le nombre de bombes dans les cases voisinses de la grille en coordonnées y / x\n \"\"\"\n nb = 0\n nb += 1 if x > 0 and self.bombGrid[y][x-1] == 1 else 0 #gauche\n nb += 1 if x < len(self.bombGrid[0]) -1 and self.bombGrid[y][x+1] == 1 else 0 #droite\n nb += 1 if y > 0 and self.bombGrid[y-1][x] == 1 else 0 #haut\n nb += 1 if y < len(self.bombGrid) -1 and self.bombGrid[y+1][x] == 1 else 0 #bas\n nb += 1 if (x > 0 and y > 0) and self.bombGrid[y-1][x-1] == 1 else 0 #haut gauche\n nb += 1 if (x < len(self.bombGrid[0]) -1 and y > 0) and self.bombGrid[y-1][x+1] == 1 else 0 #haut droit\n nb += 1 if (x > 0 and y < len(self.bombGrid) -1) and self.bombGrid[y+1][x-1] == 1 else 0 #bas gauche\n nb += 1 if (x < len(self.bombGrid[0]) -1 and y < len(self.bombGrid) -1) and self.bombGrid[y+1][x+1] == 1 else 0\n return nb\n\n def propagation(self, case):\n \"\"\"\n Permet de faire apparaitre les bombes ou les cases vides dans les cases voisines de la case en coordonnées y / x.\n \"\"\"\n y, x = case\n self.grid[y][x] = self.numberNeighborBomb(x, y)\n if self.grid[y][x] != 0:\n return\n\n if x > 0 and self.grid[y][x-1] == -1: #gauche\n self.propagation((y, x-1))\n\n if x < len(self.bombGrid[0]) - 1 and self.grid[y][x+1] == -1: #droite\n self.propagation((y, x+1))\n\n if y > 0 and self.grid[y-1][x] == -1: #haut\n self.propagation((y-1, x))\n\n if y < len(self.bombGrid) - 1 and self.grid[y+1][x] == -1: #bas\n self.propagation((y+1, x))\n\n if x > 0 and y > 0 and self.grid[y-1][x-1] == -1: #haut gauche:\n self.propagation((y-1, x-1))\n\n if x < len(self.bombGrid[0]) - 1 and y > 0 and self.grid[y-1][x+1] == -1: #haut droit\n self.propagation((y-1, x+1))\n\n if x > 0 and y < len(self.bombGrid) - 1 and self.grid[y+1][x-1] == -1: #bas gauche\n self.propagation((y+1, x-1))\n\n if x < len(self.bombGrid[0]) - 1 and y < len(self.bombGrid) - 1 and self.grid[y+1][x+1] == -1: #bas droit\n self.propagation((y+1, x+1))\n\n def updateGameOver(self, y):\n \"\"\"\n Met à jour la grille en fonction de la fin de la partie.\n \"\"\"\n for x in range(len(self.grid[0])):\n if self.bombGrid[y][x] == 1:\n if self.grid[y][x] == -1 or self.grid[y][x] == -2:\n self.grid[y][x] = -4\n else:\n if self.grid[y][x] == -2:\n self.grid[y][x] = -6\n else:\n self.grid[y][x] = 0\n\n def addNewBomb(self, yb, xb):\n \"\"\"\n Permet d'ajouter une nouvelle bombe sur la grille.\n \"\"\"\n while True:\n x = random.randint(0, len(self.bombGrid[0]) - 1)\n y = random.randint(0, len(self.bombGrid) - 1)\n if self.bombGrid[y][x] == 0 and (y, x) != (yb, xb):\n self.bombGrid[y][x] = 1\n break\n\n","repo_name":"RomainMURIER/demineur-python","sub_path":"Grille.py","file_name":"Grille.py","file_ext":"py","file_size_in_byte":4552,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"16108408047","text":"import asyncio, json, discord\nfrom discord.ext import commands\n\nwith open('info.json') as info_file:\n server_info = json.load(info_file)\n\n# Custom Checks\ndef check_if_mod(ctx):\n mod = discord.utils.get(ctx.guild.roles, id=server_info[str(ctx.guild.id)]['mod'])\n return mod in ctx.author.roles\n\nclass Moderation(commands.Cog):\n\n def __init__(self, bot):\n self.bot = bot\n\n # Function to initialize the bot\n @commands.command(name='init')\n @commands.is_owner()\n async def init(self, ctx, owner: discord.Role, mod: discord.Role, co_mod: discord.Role, nick: discord.TextChannel):\n\n guild = ctx.guild\n muted = discord.utils.get(guild.roles, name='Muted')\n\n if muted is None:\n perms = discord.Permissions(send_messages=False)\n await guild.create_role(name='Muted', permissions=perms)\n\n roles = {member.name: [role.id for role in member.roles] for member in guild.members}\n\n server_info[str(guild.id)] = {'owner': owner.id, 'mod': mod.id, 'co_mod': co_mod.id, 'roles': roles, 'reaction_roles': {}, 'nick': nick.id,\n 'deleted': {}}\n\n with open('info.json', 'w') as info_file_input:\n json.dump(server_info, info_file_input, indent=2)\n\n await ctx.send(f'Successfully initialized in the Guild with :-')\n await ctx.send(f'Owner role : \"{owner}\"')\n await ctx.send(f'Moderator role : \"{mod}\"')\n await ctx.send(f'Co Moderator role : \"{co_mod}\"')\n\n\n # Function to display info of Moderation Roles\n @commands.command(name='info')\n async def info(self, ctx):\n\n guild = ctx.guild\n await ctx.send(f'Owner role : \\\"{guild.get_role(server_info[str(ctx.guild.id)][\"owner\"])}\\\"')\n await ctx.send(f'Moderator role : \\\"{guild.get_role(server_info[str(ctx.guild.id)][\"mod\"])}\\\"')\n await ctx.send(f'Co Moderator role : \\\"{guild.get_role(server_info[str(ctx.guild.id)][\"co_mod\"])}\\\"')\n \n # Function to send invites\n @commands.command(name='invite')\n async def invite(self, ctx):\n invitelink = await ctx.channel.create_invite(max_uses=1,unique=True)\n #dming it to the person\n await ctx.author.send(invitelink)\n\n # Function to change Owner role\n @commands.command(name='owner')\n @commands.is_owner()\n async def owner_change(self, ctx, *, owner: discord.Role):\n\n server_info[str(ctx.guild.id)]['owner'] = owner.id\n\n with open('info.json', 'w') as info_file_input:\n json.dump(server_info, info_file_input, indent=2)\n\n await ctx.send(f'New owner role is \\\"{owner}\\\"')\n\n # Function to change Moderator role\n @commands.command(name='mod')\n @commands.is_owner()\n async def mod_change(self, ctx, *, mod: discord.Role):\n\n server_info[str(ctx.guild.id)]['mod'] = mod.id\n\n with open('info.json', 'w') as info_file_input:\n json.dump(server_info, info_file_input, indent=2)\n\n await ctx.send(f'New Moderator role is {mod}')\n\n # Function to change Co Moderator role\n @commands.command(name='comod')\n @commands.is_owner()\n async def comod_change(self, ctx, *, co_mod: discord.Role):\n\n server_info[str(ctx.guild.id)]['co_mod'] = co_mod.id\n\n with open('info.json', 'w') as info_file_input:\n json.dump(server_info, info_file_input, indent=2)\n\n await ctx.send(f'New Co-Moderator role is {co_mod}')\n\n # Function to kick members along with reason\n @commands.command(name='kick')\n @commands.has_permissions(kick_members=True)\n async def kick(self, ctx, member: discord.Member, *, reason=None):\n\n if not member.bot:\n dm_channel = await member.create_dm()\n await dm_channel.send(f'Ay Bruh, so.... ya got booted off the server {ctx.guild} due to this reason : {reason}')\n await member.kick(reason=reason)\n await ctx.send(f'{member} got booted lmfaoo !!')\n await ctx.send(f'Okay get a load of this, my sources tell me the reason they were kicked is : {reason}')\n\n # Function to ban members along with reason\n @commands.command(name='ban')\n @commands.has_permissions(ban_members=True)\n async def ban(self, ctx, member: discord.Member, *, reason=None):\n\n if not member.bot:\n dm_channel = await member.create_dm()\n await dm_channel.send(f'Ay Bruh, so.... ya got booted permanently off the server {ctx.guild} due to this reason : {reason}')\n await member.ban(reason=reason)\n await ctx.send(f'{member} got booted lmfaoo !!')\n await ctx.send(f'Okay get a load of this, my sources tell me the reason they were banned is : {reason}')\n\n # Function to unban members\n @commands.command(name='unban')\n @commands.has_permissions(ban_members=True)\n async def unban(self, ctx, member, discriminator):\n\n bans = await ctx.guild.bans()\n member = discord.utils.find(lambda banned: banned.user.name == member and banned.user.discriminator == discriminator, bans).user\n if not member.bot:\n dm_channel = await member.create_dm()\n await dm_channel.send(f'Ay Bruh, so.... ya ain\\'t exiled from \"{ctx.guild}\" anymore !!')\n await ctx.guild.unban(member)\n await ctx.send(f'{member} is no longer exiled from this server! Poggers !!')\n\n # Function to Mute members\n @commands.command(name='mute')\n @commands.check(check_if_mod)\n async def mute(self, ctx, member: discord.Member, *, reason=None):\n\n muted = discord.utils.get(ctx.guild.roles, name='Muted')\n\n if muted not in member.roles:\n roles = [role.id for role in member.roles]\n server_info[str(ctx.guild.id)]['roles'][member.name] = roles\n\n with open('info.json', 'w') as info_file_input:\n json.dump(server_info, info_file_input, indent=2)\n\n avatar = member.avatar_url\n\n await member.edit(roles=[muted])\n muted_embed = discord.Embed(title='Member Muted', colour=0xde4035)\n muted_embed.set_author(name=member.name, icon_url=avatar)\n muted_embed.add_field(name='Username : ', value=member)\n muted_embed.add_field(name='\\u200b', value='\\u200b')\n muted_embed.add_field(name='Reason :', value=reason)\n muted_embed.add_field(name='Moderator :', value=ctx.message.author)\n await ctx.send(embed=muted_embed)\n\n else:\n await ctx.send(\"He already muted Boah!!\")\n \n \n\n # Function to Un-Mute Members\n @commands.command(name='unmute')\n @commands.check(check_if_mod)\n async def unmute(self, ctx, member: discord.Member):\n\n muted = discord.utils.get(ctx.guild.roles, name='Muted')\n\n if muted in member.roles:\n roles = server_info[str(ctx.guild.id)]['roles'][member.name]\n roles_list = [ctx.guild.get_role(role) for role in roles]\n\n avatar = member.avatar_url\n\n await member.edit(roles=roles_list)\n unmuted_embed = discord.Embed(title='Member Unmuted', colour=0xde4035)\n unmuted_embed.set_author(name=member.name, icon_url=avatar)\n unmuted_embed.add_field(name='Username : ', value=member)\n unmuted_embed.add_field(name='\\u200b', value='\\u200b')\n unmuted_embed.add_field(name='Moderator :', value=ctx.message.author)\n await ctx.send(embed=unmuted_embed)\n\n else:\n await ctx.send(\"He already has the right to speak lmao.\")\n\n # Function to Temp Mute Members \n @commands.command(aliases= ['tm'])\n @commands.check(check_if_mod)\n async def tempmute(self, ctx, member: discord.Member, duration: int, *, reason=None):\n\n muted = discord.utils.get(ctx.guild.roles, name='Muted') \n\n if muted not in member.roles:\n\n roles = [role.id for role in member.roles]\n avatar = member.avatar_url\n\n await member.edit(roles=[muted])\n muted_embed = discord.Embed(title='Member Temp Muted', colour=0xde4035)\n muted_embed.set_author(name=member.name, icon_url=avatar)\n muted_embed.add_field(name='Username : ', value=member)\n muted_embed.add_field(name='\\u200b', value='\\u200b')\n muted_embed.add_field(name='Reason :', value=reason)\n muted_embed.add_field(name='Moderator :', value=ctx.message.author)\n muted_embed.add_field(name='\\u200b', value='\\u200b')\n muted_embed.add_field(name='Duration :', value=duration)\n await ctx.send(embed=muted_embed)\n\n await asyncio.sleep(duration)\n\n roles = [ctx.guild.get_role(role) for role in roles]\n avatar = member.avatar_url\n\n await member.edit(roles=roles)\n unmuted_embed = discord.Embed(title='Member Unmuted', colour=0xde4035)\n unmuted_embed.set_author(name=member.name, icon_url=avatar)\n unmuted_embed.add_field(name='Username : ', value=member)\n unmuted_embed.add_field(name='\\u200b', value='\\u200b')\n unmuted_embed.add_field(name='Moderator :', value=ctx.message.author)\n await ctx.send(embed=unmuted_embed)\n\n\n else:\n await ctx.send(\"He already muted Boah!!\")\n \n # Function to clear chat\n @commands.command(name='clear')\n @commands.has_permissions(manage_messages=True)\n async def clear(self, ctx, amount: int=1):\n await ctx.channel.purge(limit=amount+1)\n\n # Function for Reaction Roles\n @commands.command(name='react')\n @commands.has_permissions(manage_roles=True)\n async def react(self, ctx, *, role):\n\n author = ctx.author\n top_role = author.top_role\n guild = ctx.guild\n channel = ctx.channel\n\n if top_role.position > discord.utils.get(guild.roles, name=role).position:\n\n bot_msg_embed = discord.Embed(title='Reaction Role', description=f'React with ✅ to get {role} Role',\n colour=0xde4035)\n bot_msg_embed.set_image(url='https://cms.hostelbookers.com/hbblog/wp-content/uploads/sites/3/2012/02/cat-happy-'\n 'cat-e1329931204797.jpg')\n sent_embed = await channel.send(embed=bot_msg_embed)\n await sent_embed.add_reaction('✅')\n\n server_info[str(guild.id)]['reaction_roles'][str(sent_embed.id)] = role\n\n with open('info.json', 'w') as info_file_input:\n json.dump(server_info, info_file_input, indent=2)\n\n else:\n await channel.send('Oof, your top role is not high enough to run this mate.')\n\n @commands.Cog.listener()\n async def on_raw_reaction_add(self, payload: discord.RawReactionActionEvent):\n\n guild = self.bot.get_guild(payload.guild_id)\n if not guild.get_member(payload.user_id).bot and str(payload.emoji) == '✅' and str(payload.message_id) in server_info[str(guild.id)]['reaction_roles']: \n \n member = guild.get_member(payload.user_id)\n if str(payload.emoji) == '✅' and not guild.get_member(payload.user_id).bot:\n required_role = server_info[str(guild.id)]['reaction_roles'][str(payload.message_id)]\n await member.add_roles(discord.utils.get(guild.roles, name = required_role))\n\n @commands.Cog.listener()\n async def on_raw_reaction_remove(self, payload: discord.RawReactionActionEvent):\n\n guild = self.bot.get_guild(payload.guild_id)\n if not guild.get_member(payload.user_id).bot and str(payload.emoji) == '✅' and str(payload.message_id) in server_info[str(guild.id)]['reaction_roles']:\n member = guild.get_member(payload.user_id)\n if str(payload.emoji) == '✅' and not guild.get_member(payload.user_id).bot:\n required_role = server_info[str(guild.id)]['reaction_roles'][str(payload.message_id)]\n await member.remove_roles(discord.utils.get(guild.roles, name = required_role))\n\n\n # Function to Create Role\n @commands.command(aliases=['cr', 'createrole'])\n @commands.has_permissions(manage_roles=True)\n async def create_role(self, ctx, *, role):\n\n perms = discord.Permissions(add_reactions=True, stream=True, read_messages=True, view_channel=True, send_messages=True, attach_files=True, \n read_message_history=True, external_emojis=True, connect=True, speak=True, use_voice_activation=True)\n await ctx.guild.create_role(name=role, permissions=perms)\n await ctx.send(f'Role {role} has been successfully created! Poggers!!')\n\n # Function to Delete Role\n @commands.command(aliases=['deleterole', 'dr'])\n @commands.has_permissions(manage_roles=True)\n async def delete_role(self, ctx, *, role: discord.Role):\n\n top_role = ctx.author.top_role\n if top_role.position > role.position:\n await role.delete()\n await ctx.send(f'Role {role} has been successfully deleted! Poggers!!')\n else:\n await ctx.send('Oof, your top role is not high enough to run this mate.')\n\n # Local Error Handling\n\n @init.error\n async def init_handler(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('You need to enter the names of the roles for Owner, Mod and Co-Mod')\n\n @delete_role.error\n async def dr_handler(self, ctx, error):\n if isinstance(error, commands.CommandInvokeError):\n await ctx.send('You cannot delete a role which doesn\\t exist, Dum Dum')\n\ndef setup(bot):\n bot.add_cog(Moderation(bot))\n","repo_name":"DevastatingRPG/cerberus","sub_path":"cogs/moderation.py","file_name":"moderation.py","file_ext":"py","file_size_in_byte":13539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12759826279","text":"import random\n\nword = input(\"Please enter a word: \")\njumble = \"\"\nfor c in random.sample(word,len(word)):\n jumble = jumble + c\n \nprint(\"A jumble is\", jumble + \".\")\n\n# Here is an alternative version that creates a permutation of indices\n# into the string instead of the characters of the string itself.\n\n#jumble = \"\"\n#for i in random.sample(range(len(word)),len(word)):\n #jumble = jumble + word[i]\n \n#print(\"A jumble is\", jumble + \".\")","repo_name":"kentdlee/SCSI","sub_path":"build/html/_static/lesson6.py","file_name":"lesson6.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5238723298","text":"# -*- coding:utf-8 -*-\n\n# 字符串格式\n# 在Python中,采用的格式化方式和C语言是一致的,用%实现\nname = input(\"请输入您的名字:\")\n\nsex = input(\"请输入你的性别:\")\n\nage = input(\"请输入您的年龄:\")\n\nage = int(age)\n\nchu = ('先生' if sex=='男' else '女士')\n\nprint(\"欢迎 %s %s光临,您的年龄是:%d\" %(name,chu,age))\n\n# 常见的占位符\n\n# %s 字符串\n# %d 整数\n# %f 浮点数\n# %x 十六进制整数\n\n\n# 另一种格式化字符串的方法是使用字符串的format()方法,它会用传入的参数依次替换字符串内的占位符{0}、{1}……,\nname = input(\"请输入您的名字:\")\n\nsex = input(\"请输入你的性别:\")\n\nage = input(\"请输入您的年龄:\")\n\nprint(\"欢迎 {0}光临,性别:{1},您的年龄是:{2}\".format(name,sex,age))\n\n# 格式化 保留几位小数\nprint(\"您的成绩是{0},比上一级进步了{1:.1f}%\".format(80,70.1234))","repo_name":"1181888200/python-demo","sub_path":"day1/five.py","file_name":"five.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20482192109","text":"\"\"\"#TC : O(n) #SC : O(n) car on fait un stack qui peut etre de la taille de s si s est par exemple ((( .\nle code et l'explication sont d'ici :https://leetcode.com/problems/minimum-remove-to-make-valid-parentheses/discuss/663204/Super-simple-Python-solution-with-explanation.-Faster-than-100-Memory-Usage-less-than-100\nl'idee est d'utiliser une stack qui contiendra l'indexe des parenthese \"(\" , des que on rencontre une parenthese \")\" on fait pop au stack si le stack est vide a ce moment cad que cette parenthes n'a pas de paire avec \"(\" \nelle doit donc etre supprimer de la phrase (on remplacera cette parenthese par une empty string \"\"). si apres etre passer sur toute la phrase s le stack est encore plein cad qu'il ya des parenthese \"(\" qui n'ont pas de\npaire avec \")\" alors on doit supprimer ces parenthese \"(\" de la phrase (donc tout valeurs d'index present dans le stack sera remplacer par une empty string). a la fin ou retourn le rassemblement de toute la list qui nous\nreste \n\n\"\"\"\nclass Solution:\n def minRemoveToMakeValid(self, s: str) -> str:\n s = list(s) # transformer la string en list pour pouvoir la modifier \n stack = deque()\n for i, char in enumerate(s):\n if char == '(':\n stack.append(i) #on rajoute l'index de la parenthese dans la stack\n elif char == ')':\n if stack: #si stack pas vide \n stack.pop()\n else: #si stack vide \n s[i] = ''\n while stack:\n s[stack.pop()] = '' #stack pop retourne l'index des parenthese \"(\"\n return ''.join(s)\n","repo_name":"rtn75000/leetcode-pb","sub_path":"1249. Minimum Remove to Make Valid Parentheses/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28239632853","text":"import sys\nimport os\nimport matplotlib.pyplot as plt\n\nmodel = input('model: ')\n\nassert model in ('RNN', 'LSTM')\n\nif model == 'RNN':\n path = './result/RNN_acc.txt'\nelse:\n path = './result/LSTM_acc.txt'\nresult = open('{}'.format(path)).read().splitlines()\n\nlength = list()\nacc = list()\n\nfor i in range(len(result)):\n line = list(map(float, result[i].split()))\n length.append(line[0])\n acc.append(line[1])\n\nfig = plt.figure(figsize=(10,5))\nplt.scatter(length, acc)\nplt.plot(length, acc, linestyle='--')\nplt.xlabel('Length')\nplt.ylabel('Accuracy')\nfig.tight_layout()\nplt.show()\nfig.savefig('./result/{}_plot.eps'.format(model))","repo_name":"jamie0725/Deep-Learning","sub_path":"week2/part1/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33406810901","text":"# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport inspect\nimport os\nimport sys\nfrom datetime import datetime\n\nsys.path.insert(0, os.path.abspath('..'))\n\n# -- Readthedocs theme -------------------------------------------------------\n#on_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n\nhtml_theme = \"furo\"\n\nimport scarches\n\n# -- Retrieve notebooks ------------------------------------------------------\n\nfrom urllib.request import urlretrieve\n\nnotebooks_url = 'https://github.com/theislab/scarches/raw/master/notebooks/'\nnotebooks = [\n 'scanvi_surgery_pipeline.ipynb',\n 'scvi_surgery_pipeline.ipynb',\n 'totalvi_surgery_pipeline.ipynb',\n 'trvae_surgery_pipeline.ipynb',\n 'trVAE_zenodo_pipeline.ipynb',\n 'reference_building_from_scratch.ipynb',\n 'scgen_map_query.ipynb',\n 'expimap_surgery_pipeline_basic.ipynb',\n 'expimap_surgery_pipeline_advanced.ipynb',\n 'treeArches_pbmc.ipynb',\n 'treeArches_identifying_new_ct.ipynb',\n 'SageNet_mouse_embryo.ipynb',\n 'mvTCR_borcherding.ipynb',\n 'multigrate.ipynb',\n 'scpoli_surgery_pipeline.ipynb',\n 'scpoli_ATAC.ipynb',\n 'hlca_map_classify.ipynb'\n]\n\nfor nb in notebooks:\n try:\n urlretrieve(notebooks_url + nb, nb)\n except:\n pass\n\n# -- Project information -----------------------------------------------------\n\nproject = 'scArches'\nauthor = ' Mohammad Lotfollahi, Sergei Rybakov, Mohsen Naghipourfar'\ncopyright = f'{datetime.now():%Y}, ' + author\n\npygments_style = 'sphinx'\ntodo_include_todos = True\nhtml_theme_options = dict(navigation_depth=3, titles_only=False)\nhtml_context = dict(\n display_github=True,\n github_user='theislab',\n github_repo='scarches',\n github_version='master',\n conf_py_path='/docs/',\n)\nhtml_static_path = ['_static']\n\n\ndef setup(app):\n app.add_css_file('custom.css')\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.doctest',\n 'nbsphinx',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.todo',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.graphviz',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.linkcode',\n 'sphinx_rtd_theme',\n 'numpydoc',\n]\n\nadd_module_names = True\nautosummary_generate = True\nnumpydoc_show_class_members = True\n\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3', None),\n 'anndata': ('https://anndata.readthedocs.io/en/latest/', None),\n 'numpy': ('https://numpy.readthedocs.io/en/latest/', None),\n 'scanpy': ('https://scanpy.readthedocs.io/en/latest/', None),\n}\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '**.ipynb_checkpoints']\n\n\ndef linkcode_resolve(domain, info):\n \"\"\"\n Determine the URL corresponding to Python object\n \"\"\"\n if domain != 'py':\n return None\n\n modname = info['module']\n fullname = info['fullname']\n\n submod = sys.modules.get(modname)\n if submod is None:\n return None\n\n obj = submod\n for part in fullname.split('.'):\n try:\n obj = getattr(obj, part)\n except:\n return None\n\n try:\n fn = inspect.getsourcefile(obj)\n except:\n fn = None\n if not fn:\n return None\n\n try:\n source, lineno = inspect.findsource(obj)\n except:\n lineno = None\n\n if lineno:\n linespec = \"#L%d\" % (lineno + 1)\n else:\n linespec = \"\"\n\n fn = os.path.relpath(fn, start=os.path.dirname(scarches.__file__))\n\n github = f\"https://github.com/theislab/scarches/blob/master/scarches/{fn}{linespec}\"\n return github\n","repo_name":"theislab/scarches","sub_path":"docs/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":4501,"program_lang":"python","lang":"en","doc_type":"code","stars":290,"dataset":"github-code","pt":"81"} +{"seq_id":"15371356284","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\n\nclass Profile(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n deficiencia = models.CharField(max_length = 20, blank = True) #criar uma lista de opções e talvez inserir alguns atributos\n\n def __str__(self):\n return self.user.username\n\n@receiver(post_save, sender = User)\ndef update_user_profile(sender, instance, created, **kwargs):\n if(created):\n Profile.objects.create(user = instance)\n instance.profile.save()\n\nclass Jogo(models.Model):\n nome = models.CharField(max_length = 60)\n #imagem\n user = models.ForeignKey(User, on_delete = models.CASCADE)\n pagina_do_jogo = models.URLField(max_length = 100)\n data_criacao = models.DateTimeField(auto_now = True)\n imagem = models.ImageField(upload_to = 'jogos/', max_length = 255)\n #gênero = ...\n\n def __str__(self):\n return self.nome\n\nclass Descricao(models.Model):\n texto = models.TextField(max_length = 5000)\n jogo = models.OneToOneField(Jogo, on_delete = models.CASCADE)\n\n\n@receiver(post_save, sender = Jogo)\ndef update_jogo_descricao(sender, instance, created, **kwargs):\n if(created):\n Descricao.objects.create(jogo = instance)\n instance.descricao.save()\n\nclass Comentarios(models.Model):\n dono = models.ForeignKey(User, on_delete = models.CASCADE)\n jogo = models.ForeignKey(Jogo, on_delete = models.CASCADE, related_name = 'comentarios')\n comentario = models.TextField(max_length = 2000)\n nota_fisica = models.PositiveIntegerField(max_length = 2, )\n nota_visual = models.IntegerField(max_length = 2)\n nota_auditiva = models.IntegerField(max_length = 2)\n\n def __str__(self):\n return self.comentario\n","repo_name":"tredeneo/utfpr","sub_path":"analise_desenvolvimento_de_sistemas/Acesso_Negado/core/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"73106334346","text":"## Common job option for gamma gamma processes in Herwig++\n## MPI tune is not relevant as the pdf_gammagamma_cmds() function disables MPI\n## Contact: Oldrich Kepka\ninclude(\"MC15JobOptions/Herwigpp_Base_Fragment.py\")\nfrom Herwigpp_i import config as hw\ncmds = hw.energy_cmds(runArgs.ecmEnergy) + hw.base_cmds() + hw.pdf_gammagamma_cmds()\ncmds += \"create ThePEG::O1AlphaS /Herwig/AlphaQCD_O1 O1AlphaS.so\\n\"\ncmds += \"set /Herwig/Generators/LHCGenerator:StandardModelParameters:QCD/RunningAlphaS /Herwig/AlphaQCD_O1\\n\"\ngenSeq.Herwigpp.Commands += cmds.splitlines()\ndel cmds\n","repo_name":"btamadio/MadGraphProduction","sub_path":"JobOptsTemplate/joOfficial/common/Herwigpp/Herwigpp_QED_Common.py","file_name":"Herwigpp_QED_Common.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30054516845","text":"from flask import Flask, request\nfrom wabot import WABot\nimport json\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef hello():\n return \"Status Online\"\n\n\n@app.route('/home', methods=['POST'])\ndef home():\n if request.method == 'POST':\n bot = WABot(request.json)\n return bot.processing()\n\nif(__name__) == '__main__':\n app.run(host=\"localhost\", port=8080, debug=True)\n\n\n","repo_name":"rezzaapr/chatapi-whatsappbot","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"74563797703","text":"# numbers_set = {1, 2, 3, 4, 4} # duplicate values removed\n\n# numbers_set = {1, 2, 3, 4, [5, 6]} # cannot use mutable data types\n\nnumbers_set = {1, 2, 3, 4, (5, 6)} # tuples are immutable, OK to use!\n# print(numbers_set)\n\n# Accessing set values\nwords_set = {\"Alpha\", \"Bravo\", \"Charlie\"}\n\n# Iterating through a set using the 'for' loop\nabcd = \"\"\nfor words in words_set:\n abcd += words\nprint(abcd)\n\n# Checking if values are in sets using the 'in' keyword\nif \"Alpha\" in words_set:\n print(\"Alpha is in set\")\nelse:\n print(\"Alpha is not in set\")\n\n# Modifying set values\n\nwords_set.add(\"Delta\")\nprint(words_set)\n\nwords_set.discard(\"Bravo\")\nprint(words_set)\n","repo_name":"dezcalimese/nucamp-bootcamp","sub_path":"1-Fundamentals/week3/using_sets.py","file_name":"using_sets.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1712136154","text":"#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\nimport argparse, bisect\n\ndef writer(path,plist) :\n with open(path, 'w') as f :\n for i in plist: f.write(str(i)+'\\n')\n\ndef reader(path,plist) :\n with open(path) as f : \n for line in f: list_1.append(int(line))\n\ndef check(n,mx) :\n if mx < int(n**0.5+1) : return True\n return False\n\ndef is_prime(n,plist) :\n root = int(n**0.5 + 1)\n index = bisect.bisect_right(plist,root)\n for i in range(index) :\n if n % plist[i] == 0 : return False\n return True\n\ndef expension(n,plist) :\n root = int(n**0.5+1)\n if max(plist) < root :\n for i in range(max(plist)+2,root,2) :\n if is_prime(i,plist):plist.append(i)\n return plist\n\nparser = argparse.ArgumentParser(description='Process some integers.')\nparser.add_argument('integers', metavar='N', type=int, nargs='+',\n help='an integer for the accumulator')\nparser.add_argument('--sum', dest='accumulate', action='store_const',\n const=sum, default=max,\n help='sum the integers (default: find the max)')\nargs = parser.parse_args()\nn=args.accumulate(args.integers)\n\nlist_1 = []\npath = r'/mnt/d/source_code/py/prime_number/prime.txt'\nreader(path,list_1)\n\nroot = int(n**0.5+1)\nif check(root,max(list_1)) :\n expension(root,list_1)\n writer(path,list_1)\n\nindex = bisect.bisect_right(list_1,root)\n\nfor i in range(index) :\n if n % list_1[i] == 0 :\n print(list_1[i])\n exit()\nprint(0)\n \n\n# with open(r'/mnt/d/source_code/py/prime_number/prime.txt') as f : \n # for line in f:\n # list_1.append(int(line))\n\n# with open(r'/mnt/d/source_code/py/prime_number/prime.txt') as list_2:list_2.write()\n# for i in list_1 : print(i, type(i))\n\n# mx = max(list_1)\n# print(mx)\n\n# x = int(input('>>> ')) \n# r = []\n# L = eratosthenes(x,\n# L.sort()\n# # for i in L : print(i, type(i))\n# if check(x,mx) == True:\n # # with open(r'/mnt/d/source_code/py/prime_number/prime.txt', 'w') as f :\n # # for i in L: f.write(str(i)+'\\n')\n # writer(path,plist)\n # for b in L:\n # if x%b == 0:\n # print(\"flush,not a prime number\")\n # break\n # else :\n # print(\"flush,a prime number\")\n # break\n# else :\n # for i in list_1:\n # if i < x**0.5+1:\n # r.append(i)\n # else:\n # r.append(int(x**0.5+1))\n # break\n # print(r)\n # for b in r:\n # if x%b == 0:\n # print(\"not prime\")\n # break\n # else:\n # print('prime')\n # break\n# def prime(n):\n'''\n取得N之平方根\n读入一个列表\n平方根大于列表.max() 补充列表, 若非则进行定位\n根据定位对 N 取模\n若从未发生整出则显示 N 为质数, 发生则返回一个质因子\n'''\n# def eratosthenes(n):\n # IsPrime = [True] * (n + 1)\n # IsPrime[1] = False #1不为素数\n # for i in range(2, int(n ** 0.5) + 1):\n # if IsPrime[i]:\n # for j in range(i * i, n + 1, i):\n # IsPrime[j] = False\n # return [x for x in range(2, n + 1) if IsPrime[x]]\n","repo_name":"UMP-45/Python_Study","sub_path":"prime_number/test_1.py","file_name":"test_1.py","file_ext":"py","file_size_in_byte":2951,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"40936392502","text":"import csv\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nloss = []\nwith open('output/loss.txt') as f:\n reader = csv.reader(f, delimiter=' ')\n for row in reader:\n loss.append(row[-1])\n\ny_num = 25\nY = np.linspace(-0.03, 0.03, num=y_num)\nprint(Y)\n#print(loss)\nloss_map = np.zeros(shape=(25, 29))\n\nfor i in range(int(np.shape(loss)[0]/y_num)):\n loss_map[:, i] = loss[i*y_num:i*y_num+25]\n print(loss[i*y_num:i*y_num+25])\n np.savez('loss_'+str(Y[i]), loss=loss[i*y_num:i*y_num+25])\n\n\nx_num = np.shape(loss_map)[1]\ny_num = np.shape(loss_map)[0]\nx = np.linspace(-0.2, 1.2, num=x_num)\ny = np.linspace(-0.03, 0.03, num=y_num)\nX, Y = np.meshgrid(x, y)\nZ = np.array(loss_map)\n\nfig, ax = plt.subplots()\nCS = ax.contour(X, Y, Z)\nax.clabel(CS, inline=1, fontsize=10)\nax.set_title('Loss landscape')\nax.set_xlabel('ID to GET interpolation')\nax.set_ylabel('Added moise')\nplt.show()\n","repo_name":"attilasimko/drs","sub_path":"BFC/loss_load.py","file_name":"loss_load.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38992442484","text":"\"\"\"\ncontains all the methods required for managing example 1\n\"\"\"\n\nfrom flask import (\n Blueprint, flash, g, redirect, render_template, request, session, url_for, Response\n)\n\nfrom .db_handler import DBHandler\n\nex2 = Blueprint('example2', __name__, url_prefix='/example2')\n\n@ex2.route('/')\ndef example2_root():\n \"\"\"\n serve example2 template to allow users to make query\n for exporting data\n \"\"\"\n return render_template(\"example2.html\")\n\n@ex2.route('/fetch', methods=[\"POST\"])\ndef example2_req():\n \"\"\"\n returns csv data with values beyond the particular date\n \"\"\"\n date = request.form.get('date', None)\n if date is None:\n flask(\"No date entered!\")\n return render_template(\"example2.html\")\n\n csv_data = DBHandler.fetch_beyond(date)\n return Response(\n csv_data,\n mimetype=\"text/csv\",\n headers={\"Content-disposition\":\"attachment; filename=DataExport.csv\"}\n )","repo_name":"thealphadollar/Collect-Clone","sub_path":"collect-demo/example2.py","file_name":"example2.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13922312297","text":"# 다익스트라 알고리즘: 간선의 가중치가 양수인 그래프에서, 특정한 노드에서 출발하여 다른 모든 노드로 가는 각각의 최단 경로를 구해주는 알고리즘이다.\n\n# Dijkstra의 기본적인 동작 과정과 코드\n'''\nDijkstra는 start로 시작하여 인접 노드 중 현재로서 최선의 거리인 노드 now를 확정지어 나가는 것이므로 기본적으로 그리디 알고리즘으로 분류된다.\n또한 재귀적으로 연결된 작은 문제를 반복적으로 해결함으로써 큰 문제를 해결하므로 다이나믹 프로그래밍이기도 하다. dp 테이블은 최단 거리 리스트, 점화식은 distance[w] = min(distance[w], distance[u] + weight[u][w]) \n1. 출발 노드를 설정한다.\n2. 최단 거리 테이블을 초기화한다.\n\n3. 방문하지 않은 노드 중에서 최단거리가 가장 짧은 노드를 선택한다. # 여기서 선형탐색, 우선순위 큐 2가지 방식으로 나뉜다\n4. 해당 노드를 거쳐 다른 노드로 가는 비용을 계산하여 최단 거리 테이블을 초기화한다.\n\n5. 3, 4를 큐가 빌 때 까지 반복. (인접노드를 확인한다는 점은 BFS와 유사)\n'''\n# Dijkstra 최단 경로_ 선형 탐색 O(V^2)\n\nimport sys\ninput = sys.stdin.readline # 빠른 입력 받기(개행문자는 split으로 사라짐)\nINF = int(1e9) # 무한의 값을 의미. 10억으로 설정\n\nn, m = map(int, input().split()) # 노드의 개수n, 간선의 개수m\nstart = int(input()) # 시작 노드의 번호\ngraph = [[] for i in range(n + 1)] # 인접 리스트 방식의 그래프 정의 (2차원 리스트)\nvisited = [False] * (n + 1) # 방문 처리(최단거리 확정) 체크하는 1차원 리스트\ndistance = [INF] * (n + 1) # 최단 거리 테이블 모두 무한으로 초기화 (최초에 갱신할 때를 위해)\n\nfor _ in range(m):\n a, b, c = map(int, input().split()) # a번 노드에서 b번 노드로 가는 비용이 c일 때\n graph[a].append((b, c)) # 인접 리스트 방식으로 그래프 초기화\n\ndef get_smallest_node(): # 방문하지 않은 노드 중 가장 최단 거리가 짧은 노드의 번호를 리턴\n min_value = INF\n index = 0\n for i in range(1, n + 1):\n if distance[i] < min_value and not visited[i]:\n min_value = distance[i]\n index = i\n return index\n\ndef dijkstra(start):\n distance[start] = 0\n visited[start] = True # start ��드부터 출발\n for j in graph[start]:\n distance[j[0]] = j[1] # 시작 노드의 인접 노드부터 최단 거리 테이블 갱신\n for i in range(n - 1): # 시작 노드를 제외한 전체 n - 1개의 노드에 대해 반복 (어차피 한번씩 확인하므로 횟수반복도 됨)\n now = get_smallest_node()\n visited[now] = True # now 노드부터 다시 출발\n for j in graph[now]:\n cost = distance[now] + j[1] # 현재 노드 거쳐서 다른 노드로 이동하는 비용\n if cost < distance[j[0]]: # 더 작다면 최단 거리 테이블 갱신\n distance[j[0]] = cost\n\ndijkstra(start) # start부터 다익스트라 알고리즘 실행\n \n \n# Dijkstra 최단 경로_ 우선순위 큐 O(ElogV)\n# 우선순위 큐를 이용한다는 점에서 최단 경로 문제를 제외하고도 우선순위 큐를 필요로 하는 다른 문제 유형과도 흡사하다. ex) prim 알고리즘\n\nimport heapq # 우선순위 큐 라이브러리 (파이썬에서는 최소 힙) **힙은 삽입시간 logN, 삭제시간 logN. 리스트는 삽입시간 O(1), 삭제시간 O(N)\nimport sys\ninput = sys.stdin.readline\nINF = int(1e9)\n\nn, m = map(int, input().split())\nstart = int(input())\ngraph = [[] for i in range(n + 1)]\n# visited 방문 처리 리스트가 필요 없음. now가 방문 했던 노드라면 더 큰 거리로 큐에 존재하기 때문\ndistance = [INF] * (n + 1)\n\nfor _ in range(m):\n a, b, c = map(int, input().split())\n graph[a].append((b, c))\n\ndef dijkstra(start):\n q = [] # 큐 생성\n heapq.heappush(q, (0, start)) # 시작 노드로 가기 위한 최단 경로를 0으로 설정하여 시작노드를 큐에 삽입.\n distance[start] = 0\n while q: # 큐가 빌 때까지 반복\n dist, now = heapq.heappop(q) # 최소 큐로 가장 최단거리가 짧은 노드의 정보 꺼내기\n if distance[now] < dist: # 이미 처리된 적 있는 노드라면 무시. visited는 필요 없음\n continue\n for i in graph[now]: # now의 인접 노드 확인\n cost = dist + i[1] # 인접노드까지의 비용\n if cost < distance[i[0]]: # 더 작다면 최단 거리 테이블 갱신\n distance[i[0]] = cost\n heapq.heappush(q, (cost, i[0])) # 갱신 후 우선순위 큐에 추가. 다음 최소 큐를 위해\n\ndijkstra(start)\n\n","repo_name":"kyj91032/mainalgorithm","sub_path":"dijkstra.py","file_name":"dijkstra.py","file_ext":"py","file_size_in_byte":4716,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17462589098","text":"from app import app\nimport json\nfrom flask import render_template,request,redirect,url_for\nfrom . import model\nfrom . import twentyquestions as game\n\n\n@app.route('/admin',methods = ['GET'])\ndef admin():\n return render_template(\"admin.html\")\n\n@app.route('/admin/dq',methods = ['GET','POST'])\ndef delete_question():\n if request.method == 'GET':\n list = model.get_questions()\n dict = {}\n for i in list:\n key = i[0]\n value = i[1]\n dict[key]=value\n question = json.dumps(dict)\n return render_template(\"delete_question.html\",questions = dict)\n elif request.method == 'POST':\n id = request.form.getlist('ques_id')\n if len(id) >0:\n for i in id:\n model.delete_question(int(i))\n\n return render_template(\"admin.html\")\n\n\n@app.route('/admin/do',methods = ['GET','POST'])\ndef delete_object():\n if request.method == 'GET':\n objects = model.get_objects()\n dict = {}\n for i in objects:\n key = i[0]\n value = i[1]\n dict[key] = value\n object = json.dumps(dict)\n return render_template(\"delete_object.html\", objects=dict)\n elif request.method == 'POST':\n id = request.form.getlist('obj_id')\n if len(id) > 0:\n for i in id:\n model.delete_object(int(i))\n\n return render_template(\"admin.html\")\n\n@app.route('/admin/data',methods = ['GET'])\ndef get_data():\n objects = model.get_objects()\n dict ={}\n for i in objects:\n key = i[0]\n value = i[1]\n dict[key] = value\n return render_template(\"data.html\",data=dict)\n\n#$def with (object, questions, data)\n@app.route('/admin/retrain/',methods = ['GET','POST'])\ndef retrain(id):\n if request.method == 'GET':\n objects = model.get_object_by_id(int(id))\n obj_id = objects[0]\n obj_val = objects[1]\n\n questions = model.get_questions()\n que_dic ={}\n for i in questions:\n key = i[0]\n value = i[1]\n que_dic[key] = value\n data_dic = model.get_data_dictionary()\n return render_template(\"retrain.html\",obj_id = obj_id, obj_val=obj_val,questions=que_dic,data=data_dic)\n\n elif request.method == 'POST':\n que_id_list = request.form\n for que_id in que_id_list:\n answer = que_id_list[que_id]\n if answer in ['yes', 'no']:\n value = eval('game.' + answer) * game.RETRAIN_SCALE # STRONGLY weights values learned this way\n model.update_data(int(id), int(que_id), value)\n\n return redirect(url_for(\"get_data\"))\n\n ","repo_name":"xajitx/guess-game","sub_path":"app/admin_views.py","file_name":"admin_views.py","file_ext":"py","file_size_in_byte":2658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24239955329","text":"# Run: python -m scrapers.tv_time [public|private]\n\nimport os\nimport sys\n\nfrom .utils import OPTS, Page, get_input, run_playwright\n\nTV_TIME: str = \"https://www.tvtime.com\"\nOPTS[\"userid\"] = os.environ.get(\"TV_TIME_USERID\")\nOPTS[\"username\"] = os.environ.get(\"TV_TIME_USERNAME\")\nOPTS[\"password\"] = os.environ.get(\"TV_TIME_PASSWORD\")\n\n\ndef login(page: Page) -> None:\n page.goto(\"/\")\n page.click(\"text=Login\")\n page.fill('[placeholder=\"Username/Email\"]', OPTS[\"username\"])\n page.fill('[placeholder=\"Password\"]', OPTS[\"password\"])\n\n with page.expect_navigation():\n page.click('input:has-text(\"Login\")')\n\n\ndef get_user_id(page: Page) -> str:\n page.goto(\"/en\")\n page.click(\"text=Profile\")\n OPTS[\"userid\"] = page.url.split(\"/\")[-2]\n return OPTS[\"userid\"]\n\n\ndef get_user_name(page: Page) -> str:\n page.goto(f\"/en/user/{OPTS['userid']}/profile\")\n OPTS[\"username\"] = (\n page.query_selector(\".profile-infos h1.name\").inner_text().strip()\n )\n return OPTS[\"username\"]\n\n\ndef get_all_shows(page: Page) -> list[tuple[str, str]]:\n res = []\n page.goto(f\"/en/user/{OPTS['userid']}/profile\")\n page.click(\"text=Shows\")\n shows = page.query_selector_all(\"#all-shows .poster-details a\")\n\n print(\"\\nList of Shows:\\n\")\n for show in shows:\n show_name = show.inner_text().strip()\n show_url = f\"{TV_TIME}{show.get_attribute('href')}\"\n res.append((show_name, show_url))\n print(f\"{show_name} [{show_url}]\")\n\n print(f\"\\nUser: {OPTS['username']} | ID: {OPTS['userid']}\")\n print(f\"Total Shows: {len(shows)}\")\n return res\n\n\ndef get_stats_screenshot(page: Page) -> None:\n page.goto(f\"/en/user/{OPTS['userid']}/profile\")\n page.click(\"text=Stats\")\n page.screenshot(path=f\"{OPTS['out_dir']}/tv_time_stats.png\", full_page=True)\n\n\nif __name__ == \"__main__\":\n \"\"\"\n For running in a repl, do:\n ```python\n from scrapers.tv_time import *\n play = sync_playwright().start()\n ```\n followed by any commands you want to run.\n \"\"\"\n mode = sys.argv[1] if len(sys.argv) > 1 else \"private\"\n\n with run_playwright(\"chromium\", base_url=TV_TIME) as page:\n page: Page\n page.goto(\"/\")\n page.click(\".optanon-alert-box-close\")\n\n if mode == \"private\":\n get_input([\"username\", \"password\"])\n login(page)\n if OPTS[\"userid\"] is None:\n get_user_id(page)\n elif mode == \"public\":\n get_input([\"userid\"])\n if OPTS[\"username\"] is None:\n get_user_name(page)\n else:\n print(\"\\nInvalid mode.\\n\")\n sys.exit(1)\n\n get_all_shows(page)\n get_stats_screenshot(page)\n","repo_name":"nirantak/scraper","sub_path":"scrapers/tv_time.py","file_name":"tv_time.py","file_ext":"py","file_size_in_byte":2706,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"81"} +{"seq_id":"16882719246","text":"from threading import Thread, Lock\nfrom socket import *\n\nHOST = \"0.0.0.0\"\nPORT = 8899\nADDR = (HOST, PORT)\n\n\ns_listen = socket()\ns_listen.setsockopt(SOL_SOCKET, SO_REUSEADDR, True)\n\ns_listen.bind(ADDR)\ns_listen.listen(3)\n\n\ndef handle(conn):\n while True:\n data = conn.recv(1024).decode()\n if not data:\n break\n print(data)\n conn.send(b\"OK\")\n conn.close()\n\n\nwhile True:\n conn, addr = s_listen.accept()\n print(\"Connect from\", addr)\n t = Thread(target=handle, args=(conn,))\n # t.daemon\n t.start()\n t.join()\n","repo_name":"Charlie1043/Project01","sub_path":"day09/thread_server.py","file_name":"thread_server.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20089400915","text":"import pika\n# import pandas as pd\n\ncredentials = pika.PlainCredentials('user', 'user')\nconnection = pika.BlockingConnection(pika.ConnectionParameters('10.20.1.54', 30672, '/', credentials))\n\nchannel = connection.channel()\n\n# a = channel.queue_declare(queue='lstm-pipeline-data-clean is being used')\n# b = a.method.message_count\n# print(type(b))\n\nmethod_frame, header_frame, body = channel.basic_get(queue = 'lstm-pipeline-model-serving-fun', auto_ack=True)\nprint(method_frame)\nchannel.basic_ack(delivery_tag = method_frame.delivery_tag)\n\n\n# if method_frame:\n# a = body.decode(\"utf-8\").split(\",\")\n# a = pd.Series(a).astype(\"bool\").tolist()\n# print(a)\n# print(type(a))\n# else:\n# print('No message returned')","repo_name":"james426759/ml-faas","sub_path":"lstm-pipeline/lstm-data-preprocess/test-code/mq_receive.py","file_name":"mq_receive.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21116039588","text":"# To check given strings are valid anagrams or not\n\nstr1 = input(\"Enter string1 : \")\nstr2 = input(\"Enter string2 : \")\n\n\n# Solution-1-a : Using Sorted function\ndef anargamCheckUsingSort(str1, str2):\n if len(str1) != len(str2):\n return False\n else:\n return sorted(str1) == sorted(str2)\n\n\nprint(anargamCheckUsingSort(str1, str2))\n\n# Solution-1-b : Using Sorted function\nlist1 = []\nlist2 = []\nfor x in str1:\n list1.append(x)\n\nfor x in str2:\n list2.append(x)\n\nlist1 = sorted(list1)\nlist2 = sorted(list2)\n# print(list1)\n# print(list2)\n\ns1 = \"\"\ns2 = \"\"\ns1 = s1.join(list1)\ns2 = s2.join(list2)\n# print(s1)\n# print(s2)\n\nprint(s1 == s2)\n\n'''\nif s1 == s2:\n print(\"Given strings are valid Anagrams\")\nelse:\n print(\"Given strings are Not valid Anagrams\")\n'''\n\n\n# Solution-2 : Without Using Sorted function\n\ndef anargamCheckUsingFreq(str1, str2):\n if len(str1) != len(str2):\n return False\n\n freq1 = {}\n freq2 = {}\n\n for ch in str1:\n if ch in freq1:\n freq1[ch] = freq1[ch] + 1\n else:\n freq1[ch] = 1\n\n for ch in str2:\n if ch in freq2:\n freq2[ch] = freq2[ch] + 1\n else:\n freq2[ch] = 1\n\n for key in freq1:\n if key not in freq2 or freq1[key] != freq2[key]:\n return False\n return True\n\n\nprint(anargamCheckUsingFreq(str1, str2))\n\n# Solution-3 : Using Counter in Collections module\n\nfrom collections import Counter\n\n\ndef anargamCheckUsingCollections(str1, str2):\n if len(str1) != len(str2):\n return False\n return Counter(str1) == Counter(str2)\n\n\nprint(anargamCheckUsingCollections(str1, str2))\n","repo_name":"ramanamurthy-bure/PythonPractice","sub_path":"DSAprograms/1_ValidAnagram.py","file_name":"1_ValidAnagram.py","file_ext":"py","file_size_in_byte":1634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4698705880","text":"grade1=int(input(\"1과목 점수를 입력하세요 : \"))\r\ngrade2=int(input(\"2과목 점수를 입력하세요 : \"))\r\ngrade3=int(input(\"3과목 점수를 입력하세요 : \"))\r\n\r\nif grade1 < 40 or grade2 < 40 or grade3 < 40 :\r\n print(\"과락\")\r\nelif (grade1 + grade2 + grade3)/3 >= 60 :\r\n print(\"합격\")\r\nelse :\r\n print(\"불합격\")\r\n","repo_name":"HanseamChung/practice","sub_path":"과락 합격 풀합격.py","file_name":"과락 합격 풀합격.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11554870198","text":"#!/usr/bin/python\n#\n# Copyright 2023 Kaggle Inc\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# coding: utf-8\r\n\r\n\"\"\"\r\n Kaggle API\r\n\r\n API for kaggle.com # noqa: E501\r\n\r\n OpenAPI spec version: 1\r\n \r\n Generated by: https://github.com/swagger-api/swagger-codegen.git\r\n\"\"\"\r\n\r\n\r\nimport pprint\r\nimport re # noqa: F401\r\n\r\nimport six\r\n\r\nfrom kaggle.models.upload_file import UploadFile # noqa: F401,E501\r\n\r\n\r\nclass DatasetNewVersionRequest(object):\r\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\r\n\r\n Do not edit the class manually.\r\n \"\"\"\r\n\r\n \"\"\"\r\n Attributes:\r\n swagger_types (dict): The key is attribute name\r\n and the value is attribute type.\r\n attribute_map (dict): The key is attribute name\r\n and the value is json key in definition.\r\n \"\"\"\r\n swagger_types = {\r\n 'version_notes': 'str',\r\n 'subtitle': 'str',\r\n 'description': 'str',\r\n 'files': 'list[UploadFile]',\r\n 'convert_to_csv': 'bool',\r\n 'category_ids': 'list[str]',\r\n 'delete_old_versions': 'bool'\r\n }\r\n\r\n attribute_map = {\r\n 'version_notes': 'versionNotes',\r\n 'subtitle': 'subtitle',\r\n 'description': 'description',\r\n 'files': 'files',\r\n 'convert_to_csv': 'convertToCsv',\r\n 'category_ids': 'categoryIds',\r\n 'delete_old_versions': 'deleteOldVersions'\r\n }\r\n\r\n def __init__(self, version_notes=None, subtitle=None, description=None, files=None, convert_to_csv=True, category_ids=None, delete_old_versions=False): # noqa: E501\r\n \"\"\"DatasetNewVersionRequest - a model defined in Swagger\"\"\" # noqa: E501\r\n\r\n self._version_notes = None\r\n self._subtitle = None\r\n self._description = None\r\n self._files = None\r\n self._convert_to_csv = None\r\n self._category_ids = None\r\n self._delete_old_versions = None\r\n self.discriminator = None\r\n\r\n self.version_notes = version_notes\r\n if subtitle is not None:\r\n self.subtitle = subtitle\r\n if description is not None:\r\n self.description = description\r\n self.files = files\r\n if convert_to_csv is not None:\r\n self.convert_to_csv = convert_to_csv\r\n if category_ids is not None:\r\n self.category_ids = category_ids\r\n if delete_old_versions is not None:\r\n self.delete_old_versions = delete_old_versions\r\n\r\n @property\r\n def version_notes(self):\r\n \"\"\"Gets the version_notes of this DatasetNewVersionRequest. # noqa: E501\r\n\r\n The version notes for the new dataset version # noqa: E501\r\n\r\n :return: The version_notes of this DatasetNewVersionRequest. # noqa: E501\r\n :rtype: str\r\n \"\"\"\r\n return self._version_notes\r\n\r\n @version_notes.setter\r\n def version_notes(self, version_notes):\r\n \"\"\"Sets the version_notes of this DatasetNewVersionRequest.\r\n\r\n The version notes for the new dataset version # noqa: E501\r\n\r\n :param version_notes: The version_notes of this DatasetNewVersionRequest. # noqa: E501\r\n :type: str\r\n \"\"\"\r\n if version_notes is None:\r\n raise ValueError(\"Invalid value for `version_notes`, must not be `None`\") # noqa: E501\r\n\r\n self._version_notes = version_notes\r\n\r\n @property\r\n def subtitle(self):\r\n \"\"\"Gets the subtitle of this DatasetNewVersionRequest. # noqa: E501\r\n\r\n The subtitle to set on the dataset # noqa: E501\r\n\r\n :return: The subtitle of this DatasetNewVersionRequest. # noqa: E501\r\n :rtype: str\r\n \"\"\"\r\n return self._subtitle\r\n\r\n @subtitle.setter\r\n def subtitle(self, subtitle):\r\n \"\"\"Sets the subtitle of this DatasetNewVersionRequest.\r\n\r\n The subtitle to set on the dataset # noqa: E501\r\n\r\n :param subtitle: The subtitle of this DatasetNewVersionRequest. # noqa: E501\r\n :type: str\r\n \"\"\"\r\n\r\n self._subtitle = subtitle\r\n\r\n @property\r\n def description(self):\r\n \"\"\"Gets the description of this DatasetNewVersionRequest. # noqa: E501\r\n\r\n The description to set on the dataset # noqa: E501\r\n\r\n :return: The description of this DatasetNewVersionRequest. # noqa: E501\r\n :rtype: str\r\n \"\"\"\r\n return self._description\r\n\r\n @description.setter\r\n def description(self, description):\r\n \"\"\"Sets the description of this DatasetNewVersionRequest.\r\n\r\n The description to set on the dataset # noqa: E501\r\n\r\n :param description: The description of this DatasetNewVersionRequest. # noqa: E501\r\n :type: str\r\n \"\"\"\r\n\r\n self._description = description\r\n\r\n @property\r\n def files(self):\r\n \"\"\"Gets the files of this DatasetNewVersionRequest. # noqa: E501\r\n\r\n A list of files that should be associated with the dataset # noqa: E501\r\n\r\n :return: The files of this DatasetNewVersionRequest. # noqa: E501\r\n :rtype: list[UploadFile]\r\n \"\"\"\r\n return self._files\r\n\r\n @files.setter\r\n def files(self, files):\r\n \"\"\"Sets the files of this DatasetNewVersionRequest.\r\n\r\n A list of files that should be associated with the dataset # noqa: E501\r\n\r\n :param files: The files of this DatasetNewVersionRequest. # noqa: E501\r\n :type: list[UploadFile]\r\n \"\"\"\r\n if files is None:\r\n raise ValueError(\"Invalid value for `files`, must not be `None`\") # noqa: E501\r\n\r\n self._files = files\r\n\r\n @property\r\n def convert_to_csv(self):\r\n \"\"\"Gets the convert_to_csv of this DatasetNewVersionRequest. # noqa: E501\r\n\r\n Whether or not a tabular dataset should be converted to csv # noqa: E501\r\n\r\n :return: The convert_to_csv of this DatasetNewVersionRequest. # noqa: E501\r\n :rtype: bool\r\n \"\"\"\r\n return self._convert_to_csv\r\n\r\n @convert_to_csv.setter\r\n def convert_to_csv(self, convert_to_csv):\r\n \"\"\"Sets the convert_to_csv of this DatasetNewVersionRequest.\r\n\r\n Whether or not a tabular dataset should be converted to csv # noqa: E501\r\n\r\n :param convert_to_csv: The convert_to_csv of this DatasetNewVersionRequest. # noqa: E501\r\n :type: bool\r\n \"\"\"\r\n\r\n self._convert_to_csv = convert_to_csv\r\n\r\n @property\r\n def category_ids(self):\r\n \"\"\"Gets the category_ids of this DatasetNewVersionRequest. # noqa: E501\r\n\r\n A list of tag IDs to associated with the dataset # noqa: E501\r\n\r\n :return: The category_ids of this DatasetNewVersionRequest. # noqa: E501\r\n :rtype: list[str]\r\n \"\"\"\r\n return self._category_ids\r\n\r\n @category_ids.setter\r\n def category_ids(self, category_ids):\r\n \"\"\"Sets the category_ids of this DatasetNewVersionRequest.\r\n\r\n A list of tag IDs to associated with the dataset # noqa: E501\r\n\r\n :param category_ids: The category_ids of this DatasetNewVersionRequest. # noqa: E501\r\n :type: list[str]\r\n \"\"\"\r\n\r\n self._category_ids = category_ids\r\n\r\n @property\r\n def delete_old_versions(self):\r\n \"\"\"Gets the delete_old_versions of this DatasetNewVersionRequest. # noqa: E501\r\n\r\n Whether or not all previous versions of the dataset should be deleted upon creating the new version # noqa: E501\r\n\r\n :return: The delete_old_versions of this DatasetNewVersionRequest. # noqa: E501\r\n :rtype: bool\r\n \"\"\"\r\n return self._delete_old_versions\r\n\r\n @delete_old_versions.setter\r\n def delete_old_versions(self, delete_old_versions):\r\n \"\"\"Sets the delete_old_versions of this DatasetNewVersionRequest.\r\n\r\n Whether or not all previous versions of the dataset should be deleted upon creating the new version # noqa: E501\r\n\r\n :param delete_old_versions: The delete_old_versions of this DatasetNewVersionRequest. # noqa: E501\r\n :type: bool\r\n \"\"\"\r\n\r\n self._delete_old_versions = delete_old_versions\r\n\r\n def to_dict(self):\r\n \"\"\"Returns the model properties as a dict\"\"\"\r\n result = {}\r\n\r\n for attr, _ in six.iteritems(self.swagger_types):\r\n value = getattr(self, attr)\r\n if isinstance(value, list):\r\n result[attr] = list(map(\r\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\r\n value\r\n ))\r\n elif hasattr(value, \"to_dict\"):\r\n result[attr] = value.to_dict()\r\n elif isinstance(value, dict):\r\n result[attr] = dict(map(\r\n lambda item: (item[0], item[1].to_dict())\r\n if hasattr(item[1], \"to_dict\") else item,\r\n value.items()\r\n ))\r\n else:\r\n result[attr] = value\r\n\r\n return result\r\n\r\n def to_str(self):\r\n \"\"\"Returns the string representation of the model\"\"\"\r\n return pprint.pformat(self.to_dict())\r\n\r\n def __repr__(self):\r\n \"\"\"For `print` and `pprint`\"\"\"\r\n return self.to_str()\r\n\r\n def __eq__(self, other):\r\n \"\"\"Returns true if both objects are equal\"\"\"\r\n if not isinstance(other, DatasetNewVersionRequest):\r\n return False\r\n\r\n return self.__dict__ == other.__dict__\r\n\r\n def __ne__(self, other):\r\n \"\"\"Returns true if both objects are not equal\"\"\"\r\n return not self == other\r\n","repo_name":"Kaggle/kaggle-api","sub_path":"kaggle/models/dataset_new_version_request.py","file_name":"dataset_new_version_request.py","file_ext":"py","file_size_in_byte":9991,"program_lang":"python","lang":"en","doc_type":"code","stars":5653,"dataset":"github-code","pt":"81"} +{"seq_id":"24139354801","text":"from tkinter import ttk\r\nfrom tkinter import LabelFrame,Label,Entry,CENTER,N,S,W,E,Toplevel,END,Entry,StringVar,Button,Tk\r\n\r\nimport sqlite3\r\nimport ctypes\r\n\r\n#Screen and Position\r\nuser32 = ctypes.windll.user32\r\nuser32.SetProcessDPIAware()\r\nwidht, height = user32.GetSystemMetrics(0), user32.GetSystemMetrics(1)\r\n\r\n#Windows\r\nwidht_index,height_index = 400, 402\r\nwidht_edit,height_edit = 195, 110\r\nwidht_delete,height_delete = 126, 47\r\n\r\n#Screen Center\r\nleft = (widht-widht_index)*0.5\r\ntop = (height-height_index)*0.5\r\n\r\nclass Product:\r\n\r\n #Create Database\r\n db_name = 'database_product.db'\r\n backup = 'CREATE TABLE IF NOT EXISTS \"product\" (\"id\" INTEGER NOT NULL, \"name\" TEXT NOT NULL, \"price\" REAL NOT NULL, PRIMARY KEY(\"id\" AUTOINCREMENT));'\r\n con1 = sqlite3.connect(db_name)\r\n Move = con1.cursor()\r\n Move.execute(backup)\r\n con1.commit()\r\n\r\n def __init__(self,window):\r\n\r\n self.wind = window\r\n self.wind.title('Products Application')\r\n self.wind.geometry(\"%dx%d+%d+%d\" % (widht_index, height_index, left, top))\r\n\r\n #Create a Frame Content\r\n frame = LabelFrame(self.wind, text = 'Register A New Product')\r\n frame.grid(row = 0, column = 0, columnspan = 3, pady = 20)\r\n\r\n #Name Input\r\n Label(frame, text = 'Name: ').grid(row = 1, column = 0)\r\n self.name = Entry(frame)\r\n self.name.focus()\r\n self.name.grid(row = 1, column = 1)\r\n\r\n #Price Input\r\n Label(frame, text='Price: ').grid(row=2,column=0)\r\n self.price = Entry(frame)\r\n self.price.grid(row=2,column=1)\r\n\r\n #Button Add Product\r\n ttk.Button(frame, text='Save Product',command=self.add_product).grid(row=3,columnspan=2,sticky=W + E)\r\n\r\n #Output Messages\r\n self.message = Label(text='',fg='red')\r\n self.message.grid(row = 3, column=0,columnspan=2,sticky=W + E)\r\n\r\n #Table\r\n self.tree = ttk.Treeview(height=10,columns = 2)\r\n self.tree.grid(row=4,column=0,columnspan=2)\r\n self.tree.heading('#0',text='Name',anchor=CENTER)\r\n self.tree.heading('#1',text='Price',anchor=CENTER)\r\n \r\n #Buttons\r\n ttk.Button(text='DELETE',command=self.delete_product).grid(row=5,column=0,sticky=W+E)\r\n ttk.Button(text='EDIT',command=self.edit_product).grid(row=5,column=1,sticky=W+E)\r\n\r\n self.get_products()\r\n\r\n def run_query(self,query,parameters = ()):\r\n\r\n #Connection To Database\r\n with sqlite3.connect(self.db_name) as conn:\r\n cursor = conn.cursor()\r\n result = cursor.execute(query,parameters)\r\n conn.commit()\r\n return result\r\n\r\n def get_products(self):\r\n \r\n #Clean Table\r\n records = self.tree.get_children()\r\n for element in records:\r\n self.tree.delete(element)\r\n\r\n #Quering Data\r\n query = 'SELECT * FROM product ORDER BY name DESC'\r\n db_rows = self.run_query(query)\r\n for row in db_rows:\r\n self.tree.insert('',0,text=row[1],value=row[2])\r\n\r\n def validation(self):\r\n\r\n return len(self.name.get()) != 0 and len(self.price.get()) != 0\r\n \r\n def add_product(self):\r\n\r\n if self.validation():\r\n\r\n query = 'INSERT INTO product VALUES(NULL, ?, ?)'\r\n parameters = ((self.name.get()), self.price.get())\r\n\r\n #Validate decimals\r\n if self.price.get().isnumeric() == False:\r\n \r\n #Check Number of Points\r\n if self.price.get().count('.') > 1:\r\n self.message['text'] = 'Many Points'\r\n \r\n #Correct Decimal Point\r\n elif self.price.get().count('.') == 1:\r\n self.run_query(query,parameters)\r\n self.message['text'] = 'Product {} added Successfully'.format(self.name.get())\r\n self.name.delete(0,END)\r\n self.price.delete(0,END)\r\n self.get_products()\r\n\r\n #Input Another Number\r\n else:\r\n self.message['text'] = 'Incorrect Data Price'\r\n self.price.delete(0,END)\r\n\r\n #Input Only Numbers\r\n else: \r\n self.run_query(query,parameters)\r\n self.message['text'] = 'Product {} added Successfully'.format(self.name.get())\r\n self.name.delete(0,END)\r\n self.price.delete(0,END)\r\n self.get_products()\r\n\r\n #Empty Entry\r\n else:\r\n self.message['text'] = 'Name and Price are Required'\r\n\r\n def delete_product(self):\r\n\r\n try:\r\n self.tree.item(self.tree.selection())['text'][0]\r\n except IndexError:\r\n self.message['text'] = 'Please Select a Record'\r\n return\r\n\r\n #Delete Window\r\n self.name_delete = self.tree.item(self.tree.selection())['text']\r\n self.delete_wind = Toplevel()\r\n self.delete_wind.title = 'Delete Product'\r\n self.delete_wind.geometry(\"%dx%d+%d+%d\" % (widht_delete, height_delete, left-150, top))\r\n\r\n #Removal Tools\r\n Label(self.delete_wind,text='Are you sure to delete?').grid(row=1,column=1,columnspan=2)\r\n Button(self.delete_wind,text='YES',command=self.delete).grid(row=2,column=1,sticky=N+E+S+W)\r\n Button(self.delete_wind,text='NO',command=self.delete_wind.destroy).grid(row=2,column=2,sticky=N+E+S+W)\r\n\r\n self.get_products()\r\n\r\n def delete(self): \r\n \r\n #Delete Inyection\r\n query = 'DELETE FROM product WHERE name = ?'\r\n self.run_query(query,(self.name_delete, ))\r\n self.message['text'] = 'Record {} deleted Successfully'.format(self.name_delete)\r\n self.delete_wind.destroy()\r\n\r\n self.get_products()\r\n\r\n def edit_product(self):\r\n\r\n try:\r\n self.tree.item(self.tree.selection())['text'][0]\r\n except IndexError:\r\n self.message['text'] = 'Please Select a Record'\r\n return\r\n\r\n name = self.tree.item(self.tree.selection())['text']\r\n old_price = self.tree.item(self.tree.selection())['values'][0]\r\n self.edit_wind = Toplevel()\r\n self.edit_wind.title('Edit Product')\r\n self.edit_wind.geometry(\"%dx%d+%d+%d\" % (widht_edit, height_edit, left+420, top))\r\n\r\n #Old Name\r\n Label(self.edit_wind,text='Old Name: ').grid(row=0,column=1)\r\n Entry(self.edit_wind,textvariable=StringVar(self.edit_wind,value = name),state='readonly').grid(row=0,column=2)\r\n \r\n #New Name\r\n Label(self.edit_wind, text='New Name: ').grid(row=1,column=1)\r\n new_name = Entry(self.edit_wind)\r\n new_name.grid(row=1,column=2)\r\n\r\n #Old Price\r\n Label(self.edit_wind,text='Old Price: ').grid(row=2,column=1)\r\n Entry(self.edit_wind,textvariable=StringVar(self.edit_wind,value = old_price),state='readonly').grid(row=2,column=2)\r\n \r\n #New Price\r\n Label(self.edit_wind, text='New Price: ').grid(row=3,column=1)\r\n new_price = Entry(self.edit_wind)\r\n self.edit_new_price = new_price\r\n new_price.grid(row=3,column=2)\r\n\r\n #Command Button\r\n Button(self.edit_wind,text='Update',command=lambda:self.edit_records(new_name.get(),new_price.get(),name,old_price)).grid(row=5,column=1,columnspan=2,sticky=W+E)\r\n \r\n def edit_records(self,new_name,new_price,name,old_price):\r\n\r\n if (new_name != '') and (new_price != ''):\r\n \r\n #Validate Decimals\r\n if self.edit_new_price.get().isnumeric() == False:\r\n\r\n #Check Number of Points\r\n if self.edit_new_price.get().count('.') > 1:\r\n self.message['text'] = 'Many Points'\r\n \r\n #Correct Decimal Point\r\n elif self.edit_new_price.get().count('.') == 1:\r\n query = 'UPDATE product SET name = ?, price = ? WHERE name = ? AND price = ?'\r\n parameters = (new_name,new_price,name,old_price)\r\n self.run_query(query,parameters)\r\n self.message['text'] = 'Product {} added Successfully'.format(new_name)\r\n self.edit_wind.destroy()\r\n self.get_products()\r\n\r\n #Input Another Number\r\n else:\r\n self.message['text'] = 'Incorrect Data Price'\r\n self.price.delete(0,END)\r\n \r\n #Input Characters\r\n else:\r\n query = 'UPDATE product SET name = ?, price = ? WHERE name = ? AND price = ?'\r\n parameters = (new_name,new_price,name,old_price)\r\n self.run_query(query,parameters)\r\n self.message['text'] = 'Product {} added Successfully'.format(new_name)\r\n self.edit_wind.destroy() \r\n\r\n #Empty Entry\r\n else:\r\n self.message['text'] = 'Missing Data'\r\n\r\n#Executor\r\nif __name__ == '__main__':\r\n window = Tk()\r\n application = Product(window) \r\n window.mainloop()\r\n","repo_name":"LeonardoCoaquira/lite-proyects","sub_path":"ProductApp/first-version.py","file_name":"first-version.py","file_ext":"py","file_size_in_byte":9122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17722986476","text":"\nclass Node(object):\n def __init__(self, x):\n self.x = x\n self.left = None\n self.right = None\n\nn1 = Node(1)\nn2 = Node(2)\nn3 = Node(3)\nn4 = Node(4)\nn5 = Node(5)\nn6 = Node(6)\nn7 = Node(7)\nn1.left = n2\nn1.right = n3\nn2.left = n4\nn2.right = n5\nn3.left = n6\nn3.right = n7\n\ndef dfs_recursive(node, out):\n if node is not None:\n out.append(node.x)\n dfs_recursive(node.left, out)\n dfs_recursive(node.right, out)\n\ndef dfs_iterative(start):\n out = []\n stack = [start]\n while len(stack) > 0:\n current = stack.pop()\n if current is not None:\n out.append(current.x)\n stack.append(current.right)\n stack.append(current.left)\n return out\n\nfrom collections import deque\ndef bfs(start):\n out = []\n queue = deque([start])\n while len(queue) > 0:\n current = queue.pop()\n if current is not None:\n out.append(current.x)\n queue.appendleft(current.left)\n queue.appendleft(current.right)\n return out\n\ndef inorder(node, out):\n \"\"\"Useful for traversing BST from min to max\"\"\"\n if node is not None:\n inorder(node.left, out)\n out.append(node.x)\n inorder(node.right, out)\n\ndef preorder(node, out):\n \"\"\"Useful for copying tree\"\"\"\n if node is not None:\n out.append(node.x)\n preorder(node.left, out)\n preorder(node.right, out)\n\ndef postorder(node, out):\n \"\"\"Useful for deleting tree\"\"\"\n if node is not None:\n postorder(node.left, out)\n postorder(node.right, out)\n out.append(node.x)\n\nout = []\ndfs_recursive(n1, out)\nprint('DFS recursive: {}'.format(out))\n\nprint('DFS iterative: {}'.format(dfs_iterative(n1)))\n\nprint('BFS: {}'.format(bfs(n1)))\n\nout = []\ninorder(n1, out)\nprint('Inorder: {}'.format(out))\n\nout = []\npreorder(n1, out)\nprint('Preorder: {}'.format(out))\n\nout = []\npostorder(n1, out)\nprint('Postorder: {}'.format(out))\n\n\n","repo_name":"kyleclo/practice","sub_path":"trees/traversal.py","file_name":"traversal.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37545464584","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport libm2k\nfrom multiprocessing.pool import ThreadPool\nimport threading\ndef dig_reset(dig):\n \"\"\" Reset digital object\n \n Arguments:\n dig -- Digital object\\n\n \"\"\"\n dig.setSampleRateIn(10000)\n dig.setSampleRateOut(10000)\n dig.setCyclic(True)\n dig.setDirection(0b1111111111111111)\n for i in range(16):\n dig.setOutputMode(i,1)\n dig.enableAllOut(True)\n return\n\ndef set_digital_trigger(dig):\n \"\"\"Set the digital trigger\n \n Arguments:\n dig -- Digital object\\n\n \"\"\"\n d_trig=dig.getTrigger()\n d_trig.setDigitalMode(0)\n d_trig.setDigitalStreamingFlag(True)\n for i in range(16):\n d_trig.setDigitalCondition(i,5)\n return d_trig\n\ndef ch_0_7_digital_output(dig, buff):\n \"\"\"Channels 0 to 7 are set as digital output and channels 8-to 16 are set as digital input.\n A data buffer is sent to 0-7 and received on 8-16. In ch1 are saved signals for each corresponding channel which will be plotter further\n \n Arguments:\n dig -- Digital object\\n\n buff -- Data buffer to be sent\\n\n \"\"\"\n #enable 8 output channels\n for i in range(8):\n dig.setDirection(i,libm2k.DIO_OUTPUT)\n dig.enableChannel(i,True)\n #enable 8 input channels\n for i in range(8,16):\n dig.setDirection(i,libm2k.DIO_INPUT)\n dig.enableChannel(i, True)\n dig.push(buff)\n ch1=[[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]]\n data = dig.getSamples(100)\n for i in range(16):\n\n if (dig.getDirection(i))==libm2k.DIO_INPUT:\n for val in data:\n if(val&(2**i)==2**i):\n ch1[i]=np.append(ch1[i],1)\n else:\n ch1[i]=np.append(ch1[i],0)\n return ch1\n\ndef ch_8_16_digital_output(dig,buff):\n \"\"\"Channels 8 to 16 are set as digital output and channels 0-to 7 are set as digital input.\n A data buffer is sent to 8-16 and received on 0-7. In ch1 are saved signals for each corresponding channel which will be plotter further\n \n Arguments:\n dig -- Digital object\\n\n buff -- Data buffer to be sent\\n\n \"\"\"\n #enable 8 output channels\n for i in range(8,16):\n dig.setDirection(i,libm2k.DIO_OUTPUT)\n dig.enableChannel(i,True)\n #enable 8 input channels\n for i in range(8):\n dig.setDirection(i,libm2k.DIO_INPUT)\n dig.enableChannel(i, True)\n dig.push(buff)\n ch2=[[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]]\n data = dig.getSamples(100)\n for i in range(16):\n if (dig.getDirection(i))==libm2k.DIO_INPUT:\n for val in data:\n #get the signal from the digital channel i\n if(val&(2**i)==2**i):\n ch2[i]=np.append(ch2[i],1)\n else:\n ch2[i]=np.append(ch2[i],0)\n return ch2\n\ndef get_data_to_check_trig_condition(d_trig,dig, channel,i,buff):\n \"\"\"Set trigger condition and send a buffer. Read and build the signal on the desired channel. The result wil be further used to verify the trigger conditions\n \n Arguments:\n d_trig -- Digital trigger\\n\n dig -- Digital object\\n\n channel --Channel under test\\n\n i -- Trigger condition\\n\n buff -- Data buffer \\n\n \"\"\"\n ch=[]\n n=50\n d_trig.setDigitalCondition(channel,i)\n dig.push(buff)\n data = dig.getSamples(n)\n\n for val in data:\n #print((val))\n if(val&(2**channel)==2**channel):\n ch=np.append(ch,1)\n else:\n ch=np.append(ch,0)\n\n return ch\n\ndef check_digital_trigger(channel,dig, d_trig):\n \"\"\" Verify the digital trigger for the 'rising edge, falling edge, low level, high level, any edge and no trigger' conditions\n \n Arguments:\n channel -- Digital channel under test\\n\n dig -- Digital object\\n\n d_trig -- Digital trigger\\n\n \"\"\"\n dig_reset(dig)\n # low=0\n # high=1\n # level=0.5\n delay=1\n\n condition=[libm2k.RISING_EDGE,libm2k.FALLING_EDGE,libm2k.LOW_LEVEL,libm2k.HIGH_LEVEL,libm2k.ANY_EDGE,libm2k.NO_TRIGGER]\n trig_test=[]\n dig.setDirection(channel,libm2k.DIO_INPUT)\n dig.enableChannel(channel,True)\n dig.setDirection(channel+8,libm2k.DIO_OUTPUT)\n dig.enableChannel(channel+8,True)\n buff=[512,512,512,0,0,0,0,512,512,512]\n\n d_trig.setDigitalDelay(-delay)\n\n #go through all possible trigger conditions\n for i in condition:\n if i==libm2k.RISING_EDGE:\n ch=get_data_to_check_trig_condition(d_trig, dig,channel,i, buff)\n #print(ch)\n #plt.figure(1)\n #plt.plot(ch)\n\n if ch[delay]<=ch[delay+3]:\n print(\"rising\")\n trig_test=np.append(trig_test,1)\n else:\n trig_test=np.append(trig_test,0)\n\n elif i==libm2k.FALLING_EDGE:\n ch=get_data_to_check_trig_condition(d_trig, dig,channel,i, buff)\n # print(ch)\n # plt.figure(2)\n # plt.plot(ch)\n if ch[delay]>=ch[delay+3]:\n print(\"falling\")\n trig_test=np.append(trig_test,1)\n else:\n trig_test=np.append(trig_test,0)\n\n elif i==libm2k.LOW_LEVEL:\n ch=get_data_to_check_trig_condition(d_trig, dig,channel,i, buff)\n # print(ch)\n\n # plt.figure(3)\n # plt.plot(ch)\n if ch[delay]==0 :\n print(\"low\")\n trig_test=np.append(trig_test,1)\n else:\n trig_test=np.append(trig_test,0)\n\n elif i==libm2k.HIGH_LEVEL:\n ch=get_data_to_check_trig_condition(d_trig, dig,channel,i, buff)\n # print(ch)\n # plt.figure(4)\n # plt.plot(ch)\n if ch[delay]==1:\n print(\"high\")\n trig_test=np.append(trig_test,1)\n else:\n trig_test=np.append(trig_test,0)\n\n elif i==libm2k.ANY_EDGE:\n ch=get_data_to_check_trig_condition(d_trig, dig,channel,i, buff)\n # print(ch)\n # plt.figure(5)\n # plt.plot(ch)\n\n if ch[delay]<=ch[delay+3]:\n print(\"rising\")\n trig_test=np.append(trig_test,1)\n elif ch[delay]>=ch[delay+3]:\n print(\"falling\")\n trig_test=np.append(trig_test,1)\n else:\n trig_test=np.append(trig_test,0)\n\n #elif i==5:\n\n plt.show()\n return trig_test\n\ndef check_open_drain_mode(dig, channel):\n \"\"\"Should verify if the digital channel selected works well in open drain mode\n \n Arguments:\n dig -- Digital object\\n\n channel -- Digital Channel under test\\n\n \"\"\"\n dig.enableChannel(channel,True)\n dig.enableChannel(channel+8,True)\n dig.setSampleRateIn(1000)\n dig.setSampleRateOut(1000) # set sample rate and buffer length for a frequency smaller than 450HZ\n dig.setOutputMode(channel,libm2k.DIO_OPENDRAIN)#OD, PP\n dig.setDirection(channel,libm2k.DIO_OUTPUT)\n dig.setDirection(channel+8,libm2k.DIO_INPUT)\n ch=[]\n buff=[1,1,1,1,1,0,0,0,0,0,1]#100Hz frequency\n dig.push(buff)\n data = dig.getSamples(100)\n for val in data:\n print((val))\n if(val&(2**channel)==2**channel):\n ch=np.append(ch,1)\n else:\n ch=np.append(ch,0)\n #dig.setValueRaw(channel,libm2k.HIGH) \n #print(\"raw value of the channel\")\n #print(dig.getValueRaw(channel))\n # plt.figure(15)\n # plt.plot(ch)\n # plt.show()\n\n return\n\ndef task1(nb_samples, dig):\n \"\"\"Parallel process where data is read\n \n Arguments:\n nb_samples -- Number of samples\\n\n dig -- Digital object\\n\n \"\"\"\n print(\"Task 1 assigned to thread: {}\".format(threading.current_thread().name))\n data = dig.getSamples(nb_samples)\n\n\n return data\n\ndef test_digital_cyclic_buffer(dig, d_trig, channel):\n \"\"\"Test for the digital cyclic buffer set to false. \n Arguments:\n dig -- Digital object\\n\n d_trig -- Digital trigger\\n\n channel -- Digital channel under test\\n\n \"\"\"\n dig.setDirection(channel,libm2k.DIO_INPUT)\n dig.enableChannel(channel,True)\n dig.setDirection(channel+8,libm2k.DIO_OUTPUT)\n dig.enableChannel(channel+8,True)\n d_trig.setDigitalCondition(channel,libm2k.LOW_LEVEL)\n dig.setCyclic(True)\n n=50\n buff=[512,512,0,0,0,0,512,512]\n ch=[]\n pool = ThreadPool(processes=1)\n print(\"Main thread name: {}\".format(threading.main_thread().name))\n\n async_result = pool.apply_async(task1, args=[n, dig])\n dig.push(buff)\n return_val = async_result.get()\n for val in return_val:\n #print((val))\n if(val&(2**channel)==2**channel):\n ch=np.append(ch,1)\n else:\n ch=np.append(ch,0)\n plt.plot(ch)\n plt.show()\n return ch\n","repo_name":"AlexandraTrifan/libm2k","sub_path":"tests/digital_functions.py","file_name":"digital_functions.py","file_ext":"py","file_size_in_byte":8913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"33521151955","text":"#!/usr/bin/env python\n\n\"\"\"\n\nGiven a string, if it is even, break the string into 2 even parts, then reverse the parts to get the \"reversencoded\" string.\n\nExample:\n\nonomatopoeia -> tamonoaieopo\n\nIf the string is not of even length, then do not change the position of the middle character, and reverse the letters before and after the middle character.\n\nExample:\n\npizza -> ipzaz\n\ndiscord -> sidcdro\n\nLimitations: The input will not contain any other characters other than AlphaNumeric characters. i.e. No newlines/whitespaces.\n\nGeneral contest, shortest code in each language will be winning. Good luck!\n\nbtw: i made up the word reversencoded, i'm fairly sure such a form of \"encryption\" doesnt exist\n\n\"\"\"\n\ndef reverse_encode(s):\n n = len(s)\n s1 = s[:n//2][::-1]\n s2 = \"\"\n s3 = s[n//2 + n%2:][::-1]\n if n%2 != 0:\n s2 = s[n//2]\n return s1 + s2 + s3\n\ndef main():\n assert(reverse_encode(\"onomatopoeia\") == \"tamonoaieopo\")\n assert(reverse_encode(\"pizza\") == \"ipzaz\")\n assert(reverse_encode(\"discord\") == \"sidcdro\")\n\nmain()\n","repo_name":"qeedquan/challenges","sub_path":"codegolf/reversencode-the-given-string.py","file_name":"reversencode-the-given-string.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15053906699","text":"# -*-coding:utf-8-*-\n'''\n@Author: Zhao Lu\n@Time: 2018.3.31 10:46\n@Function:minst应用\n'''\n\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport tensorflow as tf\n\n#(1)加载数据 one_hot是一个长度为n的数组,只有一个元素是1.0\nmnist = input_data.read_data_sets('/home/260158/code/tensorflow/MNIST_data/',one_hot=True)\n\n#(2)构建回归模型\nx = tf.placeholder(tf.float32, [None,784]) #占位符\nW = tf.Variable(tf.zeros([784,10]))\nb = tf.Variable(tf.zeros([10]))\ny = tf.matmul(x,W) + b #预测值\n\n#定义损失函数和优化器\ny_ = tf.placeholder(tf.float32, [None,10])\n\n#计算预测值y和y_的差值,并取均值\n# cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y,y_))\ncross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_,logits=y))\n#采用SGD作为优化器\ntrain_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)\n\n\n#(3)训练模型\nsess = tf.InteractiveSession() #交互式会话\ntf.global_variables_initializer().run() #初始化变量并在会话中启动模型\n\n#Train\nfor i in range(1000):\n batch_xs, batch_ys = mnist.train.next_batch(100) #随机抓取100个数据点来替换之前的占位符\n sess.run(train_step,feed_dict={x:batch_xs,y_:batch_ys})\n if i % 100 ==0:\n print(sess.run(cross_entropy, feed_dict={x:batch_xs,y_:batch_ys}))\n print(sess.run(y, feed_dict={x: batch_xs, y_: batch_ys}))\n print(sess.run(y_, feed_dict={x: batch_xs, y_: batch_ys}),'\\n\\n')\n # print(y)\n # print(y_,'\\n')\n\n#(4)评估模型\ncorrect_prediction = tf.equal(tf.arg_max(y,1), tf.arg_max(y_,1)) #y是预测值,y_是正确值\naccuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32)) #布尔型转化为浮点数,并取平均值\n\n#计算模型在测试集上的准确率\nprint(sess.run(accuracy,feed_dict={x:mnist.test.images,\n y_:mnist.test.labels}))\n\n\n\n\n\n","repo_name":"wyuzyf/code","sub_path":"my_test-DL/test_tensorflow/test_mnist.py","file_name":"test_mnist.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74690242504","text":"def solution(array):\n array.sort()\n new_array = list(set(array))\n start = new_array[0]\n missing_num = None\n m = 0\n\n for i in range(start, start + len(new_array)):\n if i != new_array[m]:\n if i > 0:\n missing_num = i\n break\n m += 1\n\n if missing_num is None:\n max_num = max(new_array)\n max_num += 1\n while max_num <= 0:\n max_num += 1\n missing_num = max_num\n return missing_num\n\nprint(solution([-6, 8, 4006]))\n","repo_name":"greatertomi/Codility-Practices","sub_path":"Solutions/CountingElements/MissingInteger.py","file_name":"MissingInteger.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18225994963","text":"case = int(input())\nlists = []\nfor i in range(case):\n lists.append(input().split())\nresult = []\nfor i in lists :\n string = []\n for j in i[1] :\n for k in j:\n string.append(k*int(i[0]))\n if len(\"\".join(string)) == len(i[1])*int(i[0]) :\n result.append(\"\".join(string))\nfor i in result :\n print(i)","repo_name":"HorangApple/TIL","sub_path":"Algorithm/Baekjoon/2675_문자열 반복.py","file_name":"2675_문자열 반복.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5304456222","text":"import logging\nimport re\nfrom telegram import Update\nfrom telegram.ext import Updater, CommandHandler, MessageHandler, Filters\nfrom pymongo import MongoClient\nfrom datetime import datetime, timedelta\nfrom bson import ObjectId\n\n\n# Set up logging\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n# Define the regular expression pattern to match URLs\n#URL_PATTERN = r'((https?://[^\\s]+)|^)(www\\.[^\\s]+)'\nURL_PATTERN = r'(https?://[^\\s]+)'\n\n\n\n\n#URL_PATTERN = r'(https?://[^\\s]+)'\nMONGO_DB = \"mongodb+srv://spok:xzNMTKM0HgnL4oI1@cluster0.wkqej.mongodb.net/\"\n#MONGO_DB = \"mongodb://superuser:Viper.2013@192.168.9.66:27017/?authMechanism=DEFAULT&tls=false\"\n\n# Connect to MongoDB\nclient = MongoClient(MONGO_DB)\ndb = client['data']\ncollection = db['data']\n\n# Define the start command handler\n# def start(update: Update, context):\n# context.bot.send_message(chat_id=update.effective_chat.id, text=\"se Grabo ptm.\")\n\n# start_handler = CommandHandler('start', start)\n\n\n# Define the message handler\ndef record_url(update: Update, context):\n user_id = update.message.from_user.id\n first_name = update.message.from_user.first_name\n last_name = update.message.from_user.last_name\n text = update.message.text\n\n # Extract URLs using the regular expression pattern\n urls = []\n if update.message.text:\n text = update.message.text\n urls.extend(re.findall(URL_PATTERN, text))\n\n if update.message.caption:\n caption = update.message.caption\n urls.extend(re.findall(URL_PATTERN, caption))\n\n if update.message.photo:\n for photo in update.message.photo:\n if photo.caption:\n urls.extend(re.findall(URL_PATTERN, photo.caption))\n\n if update.message.document:\n document = update.message.document\n if document.file_name.endswith(('.txt', '.doc', '.docx', '.pdf')):\n file_id = document.file_id\n file_url = context.bot.get_file(file_id).file_path\n urls.append(file_url)\n\n if update.message.video:\n video = update.message.video\n if video.caption:\n urls.extend(re.findall(URL_PATTERN, video.caption))\n\n if update.message.audio:\n audio = update.message.audio\n if audio.title:\n urls.extend(re.findall(URL_PATTERN, audio.title))\n if audio.performer:\n urls.extend(re.findall(URL_PATTERN, audio.performer))\n\n if update.message.voice:\n voice = update.message.voice\n if voice.caption:\n urls.extend(re.findall(URL_PATTERN, voice.caption))\n\n if update.message.sticker:\n sticker = update.message.sticker\n if sticker.emoji:\n urls.extend(re.findall(URL_PATTERN, sticker.emoji))\n\n if update.message.animation:\n animation = update.message.animation\n if animation.caption:\n urls.extend(re.findall(URL_PATTERN, animation.caption))\n\n if update.message.video_note:\n video_note = update.message.video_note\n if video_note.caption:\n urls.extend(re.findall(URL_PATTERN, video_note.caption))\n\n if update.message.contact:\n contact = update.message.contact\n urls.extend(re.findall(URL_PATTERN, contact.phone_number))\n urls.extend(re.findall(URL_PATTERN, contact.first_name))\n urls.extend(re.findall(URL_PATTERN, contact.last_name))\n \n \n\n # Save the user, URLs, name, and date/time to the database\n for url in urls:\n document = {\n '_id':ObjectId(),\n 'user_id': user_id,\n 'url': url,\n 'first_name': first_name,\n 'last_name': last_name,\n 'timestamp': datetime.now()\n }\n collection.insert_one(document)\n \n\n\n # # Send a confirmation message to the user\n # context.bot.send_message(chat_id=update.effective_chat.id, text=\"se grabo la url ptm!\")\n\n#url_handler = MessageHandler(Filters.text & (~Filters.command), record_url)\nurl_handler = MessageHandler(Filters.all, record_url)\n\n\n\n\n\n\n\n\n# Define the top users command handler\ndef top_users(update: Update, context):\n\n\n allowed_user_id = 1160667522 # Replace with the desired user ID\n\n # Check if the command is executed by the allowed user\n if update.message.from_user.id != allowed_user_id:\n context.bot.send_message(chat_id=update.effective_chat.id, text=\"No estas autorizado.\")\n return\n \n # Calculate the date from a week ago\n week_ago = datetime.now() - timedelta(days=7)\n\n # Retrieve all users' URLs in the last week\n pipeline = [\n {\n '$match': {\n 'timestamp': {'$gte': week_ago}\n }\n },\n {\n '$group': {\n '_id': '$user_id',\n 'count': {'$sum': 1},\n 'first_name': {'$first': '$first_name'}\n }\n },\n {\n '$sort': {'count': -1}\n },\n {\n '$limit': 10\n }\n ]\n result = collection.aggregate(pipeline)\n\n # Generate the message with the top users\n message = \"aqui esta tu top 10 usuarios que mas buscan:\\n\"\n for idx, entry in enumerate(result, start=1):\n user_id = entry['_id']\n count = entry['count']\n first_name = entry['first_name']\n user = f\"User {idx}: {first_name} (Count: {count})\\n\"\n message += user\n\n # Send the message to the user\n context.bot.send_message(chat_id=update.effective_chat.id, text=message)\n\ntop_users_handler = CommandHandler('topusers', top_users)\n\ndef main():\n # Create an Updater and pass your bot's API token\n updater = Updater(token='6131243126:AAFvqYfb541Rt-kwQt1lPeaLVIqfaencQEs', use_context=True)\n dispatcher = updater.dispatcher\n\n # Add the handlers\n #dispatcher.add_handler(start_handler)\n dispatcher.add_handler(url_handler)\n dispatcher.add_handler (top_users_handler)\n\n\n updater.start_polling()\n updater.idle()\n\nif __name__ == '__main__':\n main()","repo_name":"jechs83/fazil","sub_path":"user_analytic.py","file_name":"user_analytic.py","file_ext":"py","file_size_in_byte":6023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17850788469","text":"from odoo import fields, models\n\n\nclass HrDepartment(models.Model):\n _inherit = \"hr.department\"\n\n code = fields.Char(\"Code\")\n sequence_id = fields.Many2one(\n \"ir.sequence\", string=\"Workorder Sequence\", readonly=True\n )\n\n def generate_sequence(self):\n if self.sequence_id:\n return\n self.sequence_id = self.env[\"ir.sequence\"].create(\n {\n \"name\": f\"Department Sequence {self.code}\",\n \"code\": self.code,\n \"implementation\": \"no_gap\",\n \"prefix\": f\"{self.code}-\",\n \"padding\": 5,\n \"use_date_range\": True,\n }\n )\n\n\nclass HrEmployee(models.Model):\n _inherit = \"hr.employee\"\n\n def department_by_user(self, user_id):\n empl = self.search([(\"user_id\", \"=\", user_id)], limit=1)\n return empl.department_id\n\n def fullname_report(self):\n return f\"{self.job_title} {self.name}\"\n","repo_name":"oskr1087/elecaustro","sub_path":"workorders/models/hr.py","file_name":"hr.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"75000772103","text":"import pytest\nfrom tendril.utils.types import unitbase\nfrom decimal import Decimal\n\n\ndef test_round_to_n():\n assert unitbase.round_to_n(3.14159, 1) == 3\n assert unitbase.round_to_n(3.14159, 2) == 3.1\n assert unitbase.round_to_n(3.14159, 3) == 3.14\n assert unitbase.round_to_n(3.14159, 4) == 3.142\n\n assert unitbase.round_to_n(31.4159, 1) == 30\n assert unitbase.round_to_n(31.4159, 2) == 31\n assert unitbase.round_to_n(31.4159, 3) == 31.4\n assert unitbase.round_to_n(31.4159, 4) == 31.42\n\n assert unitbase.round_to_n(0, 1) == 0\n\n\ndef test_comparison_base():\n class TestType(unitbase.TypedComparisonMixin):\n def __init__(self, value):\n self.value = value\n\n def _cmpkey(self):\n return self.value\n\n class AnotherTestType(unitbase.TypedComparisonMixin):\n def __init__(self, value):\n self.value = value\n\n def _cmpkey(self):\n return self.value\n\n class BadTestType(unitbase.TypedComparisonMixin):\n def __init__(self, value):\n self.value = value\n\n class AnotherBadTestType(unitbase.TypedComparisonMixin):\n def __init__(self, value):\n self.value = value\n\n def _cmpkey(self):\n return str(self.value)\n\n u1 = TestType(1)\n u2 = TestType(2)\n u3 = TestType(3)\n u4 = TestType(-3)\n u5 = TestType(3)\n u6 = TestType(0)\n\n assert u5 == u3\n assert u1 < u2\n assert u1 <= u2\n assert u3 >= u2\n assert u3 > u2\n assert u4 < 0\n assert u5 > 0\n assert u6 == 0\n\n au1 = AnotherTestType(1)\n\n with pytest.raises(TypeError):\n au1 > u1\n\n with pytest.raises(TypeError):\n au1 >= u1\n\n with pytest.raises(TypeError):\n au1 < u1\n\n with pytest.raises(TypeError):\n au1 <= u1\n\n with pytest.raises(TypeError):\n au1 == u1\n\n with pytest.raises(TypeError):\n au1 != u1\n\n bu1 = BadTestType(10)\n bu2 = BadTestType(10)\n\n with pytest.raises(NotImplementedError):\n bu1 > bu2\n\n with pytest.raises(NotImplementedError):\n bu1 >= bu2\n\n with pytest.raises(NotImplementedError):\n bu1 < bu2\n\n with pytest.raises(NotImplementedError):\n bu1 <= bu2\n\n with pytest.raises(NotImplementedError):\n bu1 == bu2\n\n with pytest.raises(NotImplementedError):\n bu1 != bu2\n\n abu1 = AnotherBadTestType(10)\n\n with pytest.raises(TypeError):\n abu1 != u1\n\n\ndef test_dummyunit():\n d1 = unitbase.DummyUnit()\n d2 = unitbase.DummyUnit(value=10)\n\n assert repr(d1) == 'Dummy Unit'\n\n with pytest.raises(TypeError):\n d1 + d2\n\n with pytest.raises(TypeError):\n d1 + 0\n\n with pytest.raises(TypeError):\n 0 + d1\n\n with pytest.raises(TypeError):\n d1 - d2\n\n with pytest.raises(TypeError):\n d1 - 0\n\n # TODO Figure this out\n # with pytest.raises(NotImplementedError):\n # 0 - d1\n\n with pytest.raises(TypeError):\n d1 * 10\n\n with pytest.raises(TypeError):\n 10 * d1\n\n with pytest.raises((NotImplementedError, TypeError)):\n d1 / 10\n\n # TODO Figure this out\n # with pytest.raises(NotImplementedError):\n # 10 / d1\n\n with pytest.raises((TypeError, NotImplementedError)):\n d1 > d2\n\n with pytest.raises((TypeError, NotImplementedError)):\n d1 < d2\n\n with pytest.raises((TypeError, NotImplementedError)):\n d1 > 0\n\n with pytest.raises((TypeError, NotImplementedError)):\n d1 < 0\n\n # TODO work out py3 behavior\n # with pytest.raises(NotImplementedError):\n # d1 == d2\n #\n # with pytest.raises(NotImplementedError):\n # d1 == 0\n\n\ndef test_parser_none():\n assert unitbase.parse_none(10) == 10\n assert unitbase.parse_none('10') == '10'\n\n\ndef test_parser_percentage():\n assert unitbase.parse_percent('10') == Decimal('10')\n assert unitbase.parse_percent('10.99') == Decimal('10.99')\n\n assert unitbase.parse_percent('10%') == Decimal('10')\n assert unitbase.parse_percent('10 %') == Decimal('10')\n assert unitbase.parse_percent('10.99%') == Decimal('10.99')\n assert unitbase.parse_percent('10.99 %') == Decimal('10.99')\n\n assert unitbase.parse_percent('10pc') == Decimal('10')\n assert unitbase.parse_percent('10 pc') == Decimal('10')\n assert unitbase.parse_percent('10.99pc') == Decimal('10.99')\n assert unitbase.parse_percent('10.99 pc') == Decimal('10.99')\n\n\ndef test_type_percentage():\n p1 = unitbase.Percentage(10)\n assert p1.value == Decimal('10')\n\n p2 = unitbase.Percentage('10')\n assert p2.value == Decimal('10')\n\n p3 = unitbase.Percentage('10%')\n assert p3.value == Decimal('10')\n\n\ndef test_type_gainbase():\n g1 = unitbase.GainBase(10, None, None, None, gtype=str)\n assert g1.in_db() == 20\n assert g1._gtype == str\n\n g1 = unitbase.GainBase(0.1, None, None, None, gtype=str)\n assert g1.in_db() == -20\n\n g1 = unitbase.GainBase(100, None, None, None, gtype=str)\n assert g1.in_db() == 40\n","repo_name":"chintal/tendril","sub_path":"tests/test_utils_types_unitbase.py","file_name":"test_utils_types_unitbase.py","file_ext":"py","file_size_in_byte":4972,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"7661533665","text":"#All dirs relative to root\nDATA_LOADERS_LOC = \"tmp/data_loaders/\"\nCOCO_ANN_LOC = \"tmp/coco_ann/\"\nMODELS_LOC = \"tmp/models/\"\nRES_LOC = \"tmp/res/\"\n\nINFERENCE_RESULTS = \"results\"\nEFUSION_RESULTS = \"results/EfusionPS/\"\n\nTRAIN_DIR = \"data_train/\"\nVAL_DIR = \"data_val/\"\n\nTRAIN_RES_FILENAME = \"training_results.txt\"\nEVAL_RES_FILENAME = \"eval_results.txt\"\nKITTI_TRAIN_RES_FILENAME = \"kitti_training_results.txt\"\nKITTI_TRAIN_RES_FILENAME_NO_INSTANCE = \"vkitti_training_efusionps_no_instance_results.txt\"\nVKITTI_TRAIN_RES_FILENAME_EffPS_NO_INSTANCE = \"vkitti_training_effps_no_instance_results.txt\"\nVKITTI_TRAIN_RES_FILENAME_EffPS = \"vkitti_training_effps_results_parallel.txt\"\n\nKITTI_DATA_LOADER_TRAIN_FILANME = \"kitti_data_loader_train.pth\"\nKITTI_DATA_LOADER_VAL_FILENAME = \"kitti_data_loader_val.pth\"\n\nVKITTI_DATA_LOADER_TRAIN_FILANME = \"vkitti_data_loader_train_625_samples_v2.pth\"\nVKITTI_DATA_LOADER_VAL_FILENAME = \"vkitti_data_loader_val_625_samples_v2.pth\"\n\nDATA_LOADER_TRAIN_FILANME = \"data_loader_train.pth\"\nDATA_LOADER_VAL_FILENAME = \"data_loader_val.pth\"\n\nDATA_LOADER_TRAIN_FILENAME_BG = \"data_loader_train_bg.pth\"\nDATA_LOADER_VAL_FILENAME_BG = \"data_loader_val_bg.pth\"\n\nDATA_LOADER_TRAIN_FILENAME_OBJ = \"data_loader_train_obj.pth\"\nDATA_LOADER_VAL_FILENAME_OBJ = \"data_loader_val_obj.pth\"\n\n\nANN_VAL_DEFAULT_NAME = \"coco_ann_val.json\"\nANN_TRAIN_DEFAULT_NAME = \"coco_ann_train.json\"\n\nANN_VAL_DEFAULT_NAME_OBJ = \"coco_ann_val_obj.json\"\nANN_TRAIN_DEFAULT_NAME_OBJ = \"coco_ann_train_obj.json\"\n\nANN_VAL_DEFAULT_NAME_BG = \"coco_ann_val_bg.json\"\nANN_TRAIN_DEFAULT_NAME_BG = \"coco_ann_train_bg.json\"\n#EVALUATION\n\nCOCO_RES_JSON_FILENAME = \"tmp/coco_ann/COCO_val_res.json\"","repo_name":"juanb09111/semantic_depth","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"21289001117","text":"from botClass import ChitraguptClient\nfrom botCredentials import TOKEN\n# from os import getenv\n\n# token = getenv('DISCORD_TOKEN')\n\nrunningInstance = ChitraguptClient()\n\ntry:\n runningInstance.run(TOKEN,bot=True,reconnect = True)\n\nexcept Exception as runtimeException:\n print(\"runtimeException, Type - {} : {} \".format(runtimeException.__class__.__name__,runtimeException))\n","repo_name":"zen0-5338/chitragupt","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24750285903","text":"# type: ignore\nfrom typing import Callable\n\nfrom prometheus_client import Counter\nfrom prometheus_fastapi_instrumentator import Instrumentator, metrics\nfrom prometheus_fastapi_instrumentator.metrics import Info\n\n\ndef error_metric() -> Callable[[Info], None]:\n \"\"\"Basic error counter metric.\n\n Refer to prometheus_fastapi_instrumentator documentation for details.\n \"\"\"\n metric = Counter(\n \"errors_total\",\n \"Errors counter by http method and error type\",\n labelnames=(\n \"method\",\n \"error_type\",\n ),\n )\n\n def instrumentation(info: Info) -> None:\n if info.response.status_code == 422:\n metric.labels(info.request.method, \"validation error\").inc()\n if info.response.status_code == 409:\n metric.labels(info.request.method, \"duplicate\").inc()\n if info.response.status_code == 404:\n metric.labels(info.request.method, \"not found\").inc()\n\n return instrumentation\n\n\nmetrics_instrumentator = Instrumentator(excluded_handlers=[\"/docs\", \"/openapi.json\", \"/metrics\"])\nmetrics_instrumentator.add(error_metric())\nmetrics.requests()\n","repo_name":"Mindflutter/fastemplate","sub_path":"src/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"13608997407","text":"from logging import getLogger\nfrom typing import Any\n\nfrom eoxserver.services.ows.wps.util import get_process_by_identifier\nfrom eoxserver.services.ows.wps.interfaces import ProcessInterface\nfrom eoxserver.services.ows.wps.exceptions import OperationNotSupportedError\nfrom eoxserver.services.ows.wps.parameters import BoundingBox, ResponseForm\n\nfrom eoxserver.services.ows.wps.v10.execute_util import (\n pack_outputs as pack_outputs_v10,\n parse_params as parse_params_v10,\n decode_output_requests as decode_output_requests_v10,\n)\nfrom eoxserver.services.ows.wps.v10.encoders import WPS10ExecuteResponseRawEncoder\n\nfrom ows.wps.v20 import decoders\nimport ows.wps.v20.types as pyows_types\nimport ows.common.types as pyows_common_types\n\n\nclass WPS20ExecuteHandler(object):\n \"\"\"WPS 2.0 DescribeProcess service handler.\"\"\"\n\n service = \"WPS\"\n versions = (\"2.0.0\",)\n request = \"Execute\"\n methods = [\"POST\"]\n\n def handle(self, request):\n \"\"\"Handle HTTP request.\"\"\"\n logger = getLogger(__name__)\n\n execute_request: pyows_types.ExecuteRequest = decoders.XMLExecuteDecoder(\n request.body\n ).decode()\n process: ProcessInterface = get_process_by_identifier(\n execute_request.process_id\n )\n\n input_defs = parse_params_v10(process.inputs)\n output_defs = parse_params_v10(process.outputs)\n\n # reuse wps 1.0 encoding\n resp_form = ResponseForm()\n for output in execute_request.output_definitions:\n resp_form.set_output(output)\n # these fields are not present in pyows, we set them for compatibility\n output.uom = None\n output.as_reference = None\n\n inputs = {\n name: getattr(optional_input, \"default\", None)\n for (name, optional_input) in input_defs.values()\n if optional_input.is_optional\n }\n inputs.update(decode_output_requests_v10(resp_form, output_defs))\n inputs.update(\n {\n input_.identifier: _input_value(input_)\n for input_ in execute_request.inputs\n }\n )\n\n if execute_request.mode == pyows_types.ExecutionMode.sync:\n logger.debug(\"Execute process %s\", execute_request.process_id)\n outputs = process.execute(**inputs)\n elif execute_request.mode == pyows_types.ExecutionMode.async_:\n raise OperationNotSupportedError(\"Async mode not implemented\")\n else: # auto\n raise OperationNotSupportedError(\"Auto mode not implemented\")\n\n if execute_request.response == pyows_types.ResponseType.raw:\n packed_outputs = pack_outputs_v10(\n outputs,\n response_form=resp_form,\n output_defs=output_defs,\n )\n encoder = WPS10ExecuteResponseRawEncoder(resp_form=resp_form)\n response = encoder.encode_response(packed_outputs)\n return encoder.serialize(response)\n\n else: # document\n raise OperationNotSupportedError(\"Document mode not implemented\")\n\n\ndef _input_value(input_: pyows_types.Input) -> Any:\n if isinstance(input_.data, pyows_types.Data):\n\n data_value = input_.data.value\n\n # TODO: use pattern matching as soon as we can require python 3.10\n if isinstance(data_value, pyows_types.LiteralValue):\n return data_value.value\n elif isinstance(data_value, pyows_common_types.BoundingBox):\n return BoundingBox(\n bbox=(data_value.bbox[:2], data_value.bbox[2:]),\n crs=data_value.crs,\n )\n else:\n # not a common type, process needs to handle it on its own\n return data_value\n else:\n raise OperationNotSupportedError(\"References as input are not implemented\")\n","repo_name":"EOxServer/eoxserver","sub_path":"eoxserver/services/ows/wps/v20/execute.py","file_name":"execute.py","file_ext":"py","file_size_in_byte":3837,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"81"} +{"seq_id":"39675290997","text":"import sys\ninput = sys.stdin.readline\n\ndef input_list_numbers():\n return(list(map(int, input().split())))\n\nn = int(input())\n\npi_list = input_list_numbers()\nresult = [None] * n\n\nfor idx, value in enumerate(pi_list):\n result[value - 1] = idx + 1\n\nprint(' '.join(map(str, result)))\n\n","repo_name":"luis-herasme/Algoritmos-CodeForces","sub_path":"Presents.py","file_name":"Presents.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"19185146831","text":"class ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n def reverse_list(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n The most important thing to this question is TWO pointers. Prev and Current\n \"\"\"\n previous = None\n current = head\n while current:\n nextTmp = current.next\n current.next = previous\n previous = current\n current = nextTmp\n return previous\n \n def reverse_list_recursive(self, head):\n if not head or not head.next:\n return head\n reverse_head = self.reverse_list_recursive(head.next)\n head.next.next = head\n head.next = None\n return reverse_head\n\ndef create_dummy_list():\n a = ListNode(1)\n b = ListNode(2)\n c = ListNode(3)\n d = ListNode(0)\n a.next = b\n b.next = c\n c.next = d\n\n return a\n\ndef print_list(head):\n current = head\n if not current:\n return None\n print(\"{} \".format(current.val), end=\"\")\n current = current.next\n while current:\n print(\"-> {}\".format(current.val), end=\" \")\n current = current.next\n print(\"\\n\")\n\ndef main():\n dummy_list = create_dummy_list()\n print(\"Original List\")\n print_list(dummy_list)\n\n sol = Solution()\n new_head = sol.reverse_list(dummy_list)\n print_list(new_head)\n\n dummy_list = create_dummy_list()\n new_head = sol.reverse_list_recursive(dummy_list)\n print_list(new_head)\n\nif __name__ == '__main__':\n main()\n","repo_name":"pololee/oj-leetcode","sub_path":"companies/facebook/p206/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":1572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7113023833","text":"# task 3 of 7 - Remove vowels from input string.\n\"\"\"\ndef csRemoveTheVowels(input_str):\n vowels = ['a', 'e', 'i','o', 'u', 'A', 'E', 'I', 'O', 'U']\n for x in input_str:\n if x in vowels:\n input_str = input_str.replace(x, '')\n return input_str\n \nprint(csRemoveTheVowels('Lambda School is Awesome!'))\n\n\"\"\"\n\n# task 4 of 7 - Return the shortest word from a string of words.\n\n\"\"\"\n\ndef csShortestWord(input_str):\n stringToSplit = input_str\n splitString = stringToSplit.split()\n print(splitString)\n\n shortestWord = min(splitString, key=len)\n return len(shortestWord)\n\nprint(csShortestWord('Happy Day'))\n\n\"\"\"\n\n# task 5 of 7 - Given an array of integers, return the sum of all the positive integers in the array.\n\n\"\"\"\n\ndef csSumOfPositive(input_arr):\n return sum(x for x in input_arr if x > 0)\n\nprint(csSumOfPositive([1, 2, 3, -4, 5, -3, 7, 8, 9, 6, 4, -7]))\n\n\"\"\"\n\n# task 6 of 7 - Given two strings that include only lowercase alpha characters, str_1 and str_2, write a function that returns a new sorted string that contains any character (only once) that appeared in str_1 or str_2.\n\ndef csLongestPossible(str_1, str_2):\n newString = str_1 + str_2 #combine the two strings\n print(newString)\n\n return ''.join(sorted(set(newString))) #use set() to create a set of the unique characters in the new string; sort them using sorted(); join the characters using ''.join\n\nprint(csLongestPossible('abc', 'zyx'))\n\n# task 7 of 7 - Given a start integer and an ending integer (both inclusive), write a function that returns the count (not the sum) of all integers in the range (except integers that contain the digit 5)\n\n\"\"\"\n\ndef csAnythingButFive(start, end):\n arrayOfNumbers = [] # initialize empty array\n rangeList = list(range(start, end + 1)) # increase the end point of the range by 1 so it ends in the spot we want, then make a list of the numbers in that range\n print(rangeList)\n\n for i in rangeList: # for each index in the list we just created, if the string of that index doesn't contain a 5, we add it to the empty array.\n if '5' not in str(i):\n arrayOfNumbers.append(i)\n print(arrayOfNumbers)\n return len(arrayOfNumbers)\n \nprint(csAnythingButFive(1, 5))\n\n\"\"\"","repo_name":"notrabe/cs-problem-solving","sub_path":"problems.py","file_name":"problems.py","file_ext":"py","file_size_in_byte":2349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"59453954","text":"name = \"Scarlett\"\r\nstate = \"California\"\r\ncity = \"Paris\"\r\ntvshow = \"Flash\"\r\nbook = \"Everything Everything\"\r\nfood = \"Buffalo Chiken Wings\"\r\ncolor = \"Sky Blue\"\r\nsport = \"Archery\"\r\nfriend1 = \"Milli\"\r\nfriend2 = \"Anabel\"\r\nfriend3 = \"Merry The Fairy\"\r\n\r\nprint(name + \" likes to visit \" + state + \" and \" + city + \".\")\r\nprint(name + \" loves to eat \" + food + \".\")\r\nprint(color + \" is a pretty color. \")\r\nprint(name + \" likes to do \" + sport + \". \" + friend1 + \" and \" + friend2 + \" do not do \" + sport + \". \" + friend3 + \" does do \" + sport + \".\")\r\n\r\n","repo_name":"scrakes/pg_sc","sub_path":"Personality_SC.py","file_name":"Personality_SC.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27274420591","text":"\nimport os\nimport sys\n\nimport numpy as np\n\nJPEG2000_EXT = 'jp2'\n\ndef convert(input_path, filename):\n sp_name = filename.split('.')\n if sp_name[1] == JPEG2000_EXT:\n with open(input_path + '/' + filename,mode='rb') as fop:\n data = np.fromfile(fop,np.float64,-1)\n print(len(data))\n\ndef list_convert(input_path):\n list(map(lambda name: convert(input_path, name), os.listdir(input_path)))\n\nif __name__ == '__main__':\n arguments = sys.argv\n\n if len(arguments) == 2:\n list_convert(arguments[1])","repo_name":"takayasu/python-opencv","sub_path":"fileread.py","file_name":"fileread.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21194173237","text":"import Element as element\n\nclass Create(element.Element):\n\n def __init__(self, delay):\n print(self, delay)\n super().__init__(delay)\n super().setTnext(0.0)\n\n def outAct(self):\n super().outAct()\n super().setTnext(super().getTcurr() + super().getDelay())\n p1 = self.nextElement[0]\n p2 = self.nextElement[1]\n if p1.queue <= p2.queue :\n p1.inAct()\n else :\n p2.inAct()\n\n","repo_name":"NastiaBondarenko/modelingOfSystemsLab3","sub_path":"task2/Create.py","file_name":"Create.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34384230385","text":"import platform, os, sys, string\n\nis_windows = True if platform.system() == \"Windows\" else False\n\nif is_windows:\n os.system(\"title NEO-GRABBER @ github.com/i64-sudo/Neo-Grabber\")\n\ndef clear():\n if is_windows:\n os.system(\"cls\")\n else:\n os.system(\"clear\")\n\ndef pause():\n if is_windows:\n os.system(f\"pause >nul\")\n else:\n input()\n\ndef leave():\n try:\n sys.exit()\n except:\n exit()\n\ndef error(error):\n print(red(f\" [!] Error : {error}\"), end=\"\")\n pause(); clear(); leave()\n\ndef red(text):\n os.system(\"\"); faded = \"\"\n for line in text.splitlines():\n green = 250\n for character in line:\n green -= 5\n if green < 0:\n green = 0\n faded += (f\"\\033[38;2;255;{green};0m{character}\\033[0m\")\n faded += \"\\n\"\n return faded\n\ndef blue(text):\n os.system(\"\"); faded = \"\"\n for line in text.splitlines():\n green = 0\n for character in line:\n green += 3\n if green > 255:\n green = 255\n faded += (f\"\\033[38;2;0;{green};255m{character}\\033[0m\")\n faded += \"\\n\"\n return faded\n\ndef water(text):\n os.system(\"\"); faded = \"\"\n green = 10\n for line in text.splitlines():\n faded += (f\"\\033[38;2;0;{green};255m{line}\\033[0m\\n\")\n if not green == 255:\n green += 15\n if green > 255:\n green = 255\n return faded\n\ndef purple(text):\n os.system(\"\")\n faded = \"\"\n down = False\n\n for line in text.splitlines():\n red = 40\n for character in line:\n if down:\n red -= 3\n else:\n red += 3\n if red > 254:\n red = 255\n down = True\n elif red < 1:\n red = 30\n down = False\n faded += (f\"\\033[38;2;{red};0;220m{character}\\033[0m\")\n return faded\n\nclass ui:\n banner = f\"\"\"\n __ _ _______ _______ _______ ___ ___ _______ _______ _______ ______ \n | | | || || | | || | | | | || || || _ | \n | |_| || ___|| _ | ____ | || | | | | _ || _ || ___|| | || \n | || |___ | | | ||____| | || | | | | |_| || |_| || |___ | |_||_ \n | _ || ___|| |_| | | _|| |___ | | | ___|| ___|| ___|| __ |\n | | | || |___ | | | |_ | || | | | | | | |___ | | | |\n |_| |__||_______||_______| |_______||_______||___| |___| |___| |_______||___| |_|\n Answer with either (y/Y) or (n/N) for Yes & No\n\n {purple(f\"[>] Running with Python {sys.version_info[0]}.{sys.version_info[1]}.{sys.version_info[2]}\")}\n\n\"\"\"\n sv = f\" \"","repo_name":"i64-sudo/Neo-Clipper","sub_path":"ui/colors.py","file_name":"colors.py","file_ext":"py","file_size_in_byte":3024,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"25026645824","text":"def solution(x, y, n):\n answer = 0\n dp = [int(1e9)] * (y + 1)\n dp[x] = 0\n\n for i in range(x + 1, y + 1):\n\n num_2, num_3, num_add = int(1e9), int(1e9), int(1e9)\n\n # i가 2로 나눠 질때만\n if i % 2 == 0:\n num_2 = dp[i // 2]\n\n # i가 3으로 나눠 질 때만\n if i % 3 == 0:\n num_3 = dp[i // 3]\n\n # i - n 이 0보다 클때만\n if i - n > 0:\n num_add = dp[i - n]\n\n min_num = min(num_2, min(num_3, num_add))\n\n dp[i] = min_num + 1\n\n if dp[y] > int(1e9):\n return -1\n else:\n return dp[y]\n","repo_name":"parkchanghyup/algorithm","sub_path":"python/프로그래머스/[python] 프로그래머스 - 숫자 변환하기.py","file_name":"[python] 프로그래머스 - 숫자 변환하기.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6883375931","text":"import argparse\nimport itertools as it\nimport os\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--n\", type=int, required=True)\n parser.add_argument(\"--p\", type=int, required=True)\n parser.add_argument(\"--q\", type=int, required=True)\n parser.add_argument(\"--update\", dest=\"update\", action=\"store_true\")\n parser.set_defaults(update=False)\n n = parser.parse_args().n\n p = parser.parse_args().p\n q = parser.parse_args().q\n update = parser.parse_args().update\n\n incomplete = []\n if update:\n top_dirname = f\"tour-reports-{n}\"\n if os.path.exists(top_dirname):\n for gnuproc in range((n-3)**p):\n gnuprocdir = os.path.join(top_dirname, f\"gnuproc{gnuproc}\")\n if not os.path.exists(gnuprocdir):\n incomplete.append(gnuproc)\n elif \"success.txt\" not in os.listdir(gnuprocdir):\n incomplete.append(gnuproc)\n elif \"failed.txt\" in os.listdir(gnuprocdir):\n print(f\"gnuproc {gnuproc} failed\")\n else:\n for gnuproc in range((n-3)**p):\n incomplete.append(gnuproc)\n\n mid_inds = list(range(n-(p+q), n-q))\n mids = [\n list(set(range(n)) - set([(i-1)%n, i, (i+1)%n]))\n for i in mid_inds\n ]\n\n with open(f\"args{n}.txt\", \"w\") as f:\n for idx, mid in enumerate(it.product(*mids)):\n if update and idx not in incomplete:\n continue\n f.write(\n \"python main.py\"\n + f\" --mid=\\\"{list(mid)}\\\"\"\n + f\" --n={n}\"\n + f\" --p={p}\"\n + f\" --q={q}\"\n + f\" --gnuproc={idx}\\n\"\n )\n","repo_name":"samvanderpoel/TSP-vs-Graphs","sub_path":"tspnng/write_args.py","file_name":"write_args.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"71364071945","text":"#\r\n#### Parking ####\r\n#\r\n# Start ; 05/2015\r\n#\r\n\r\nimport tkinter as tk\r\nfrom tkinter import ttk\r\nimport time\r\nfrom random import randint\r\nimport serial\n\r\n############ Fonctions ############\r\n\r\n\r\ndef SetValues():\r\n global nb_empty_slots\r\n setvalues = tk.Tk()\r\n setvalues.wm_title(\"Parking User Interface - Reinitialization\")\r\n \r\n av_slots = tk.Entry(setvalues)\r\n\r\n nbp = ttk.Button(setvalues, text=\"Set: Nb of available slots\", command=lambda: SetAvSlots(av_slots))\r\n av_slots.pack()\r\n nbp.pack()\r\n setvalues.mainloop()\r\n\r\n\r\ndef SetAvSlots(av_slots):\r\n global nb_empty_slots\r\n global nb_total_slots\r\n try:\r\n if int(av_slots.get()) <= nb_total_slots and int(av_slots.get()) >= 0:\r\n nb_empty_slots = int(av_slots.get())\r\n GiveOrders(0,0)\r\n \r\n except:\r\n print('ERROR : Invalid value inserted')\r\n\r\ndef IsValid(var):\r\n a = False\r\n\r\n length = len(var)\r\n \r\n if length > 3:\r\n a = True\r\n\r\n return a\r\n \r\n \r\ndef listen_serialport():\r\n global ser, new_data, action, rcvd_data\r\n rc = \"\"\r\n \r\n rcvd_data = \"0000\"\r\n temp = [None]*4\r\n data_in = \"\"\r\n global nb_empty_slots, start_mark, end_mark, data_in\r\n rcv_in_prog = False\r\n \r\n if ser.inWaiting() > 0 and new_data == False:\r\n\r\n action = True\r\n j = 0\r\n rcvd_data = \"\"\r\n while ser.inWaiting() > 0 and new_data == False:\r\n rc = str(ser.read())\r\n if rcv_in_prog == True:\r\n if rc != end_mark:\r\n if j >=1:\r\n try:\r\n temp[j-1] = rc[2]\r\n rcvd_data += str(temp[j-1])\r\n except:\r\n break\r\n else:\r\n rcv_in_prog = False\r\n new_data = True\r\n elif rc == start_mark:\r\n rcv_in_prog = True\r\n j +=1\r\n ser.flushInput()\r\n data_in = rcvd_data\r\n if IsValid(data_in):\r\n GiveOrders(0,0)\r\n else:\r\n print(\"ERROR: got invalid data_in value\")\r\n\r\ndef open_gate(gate_nb):\r\n global gate, action, ot\r\n ot[gate_nb] = time.time()\r\n action = True\r\n if gate_nb == 0:\r\n GiveOrders(\"1\",\"0\")\r\n elif gate_nb == 1:\r\n GiveOrders(\"0\",\"1\")\r\n\r\n\r\ndef close_gate(gate_nb):\r\n\r\n global ot, gate\r\n if ot[gate_nb]-time.time() <= -5:\r\n gate[gate_nb] = 'CLOSE'\r\n ot[gate_nb] = 999999999999\r\n\r\n \r\n\r\n\r\ndef readcontent_open(file_lines, gate):\r\n \r\n x1 = file_lines[gate][2]\r\n x2 = file_lines[gate][3]\r\n y1 = file_lines[gate][4]\r\n y2 = file_lines[gate][5]\r\n \r\n return x1, x2, y1, y2\r\n\r\ndef readcontent_close(file_lines, gate):\r\n \r\n x1 = file_lines[gate][9]\r\n x2 = file_lines[gate][10]\r\n y1 = file_lines[gate][11]\r\n y2 = file_lines[gate][12]\r\n\r\n return x1, x2, y1, y2\r\n\r\ndef GiveOrders(overwrite1, overwrite2):\r\n global data_out, gate, action, nb_empty_slots, new_data, nb_total_slots, ot, data_in\r\n temp = [None]*5\r\n data_out = \"<\"\r\n temp[0] = str(start_mark)\r\n temp[4] = str(end_mark)\r\n new_data = False\r\n\r\n if action == True:\r\n if nb_empty_slots <= 0:\r\n temp[3] = \"1\"\r\n\r\n if data_in[0] == \"1\" and temp[3] !=\"1\":\r\n temp[1] = \"1\"\r\n nb_empty_slots -=1\r\n gate[0] = 'OPEN'\r\n ot[0] = time.time()\r\n \r\n if data_in[1] == \"1\":\r\n if nb_empty_slots < nb_total_slots:\r\n nb_empty_slots += 1\r\n temp[2] = \"1\"\r\n gate[1] = 'OPEN'\r\n ot[1] = time.time()\r\n temp[3] = \"0\"\r\n\r\n else:\r\n temp[2] = \"0\"\r\n \r\n \r\n\r\n if overwrite1 == \"1\":\r\n gate[0] = 'OPEN'\r\n if nb_empty_slots > 0:\r\n nb_empty_slots -=1\r\n temp[1] = \"1\"\r\n\r\n if overwrite2 == \"1\":\r\n gate[1] = 'OPEN'\r\n if nb_empty_slots < nb_total_slots:\r\n nb_empty_slots += 1\r\n temp[2] = \"1\"\r\n\r\n \r\n for loop in range (3):\r\n if temp[loop+1] == None:\r\n temp[loop+1] = 0\r\n data_out += str(temp[loop+1])\r\n \r\n data_out += \">\"\r\n action = False\r\n\r\n ser.write(data_out.encode())\r\n \r\n\r\n\r\ndef update_can():\r\n global gate, ot\r\n global nb_empty_slots\r\n for loop in range (NB_GATE):\r\n can.delete(gate_draw[loop])\r\n #Limit of park\r\n can.create_line(10,10, 990, 10, width=3)\r\n can.create_line(990,10, 990, 590, width=3)\r\n can.create_line(10,590, 990, 590, width=3)\r\n can.create_line(10,10, 10, 590, width=3)\r\n\r\n #NbPlaces Slider\r\n rect_x2 = 200+(nb_empty_slots/nb_total_slots)*700\r\n can.create_rectangle(200,275,900,325,fill=\"red\", outline='red')\r\n can.create_rectangle(200,275,rect_x2,325,fill=\"green\", outline='green')\r\n can.create_text((200+rect_x2)/2, 300, text=nb_empty_slots, fill='white')\r\n xtext=(900-rect_x2)/2+rect_x2\r\n can.create_text(xtext, 300, text=nb_total_slots-nb_empty_slots, fill='white')\r\n\r\n for loop in range (len(gate)):\r\n close_gate(loop)\r\n\r\n if gate[loop] == 'OPEN':\r\n\r\n \r\n coords[loop] = readcontent_open(file_lines, loop)\r\n gate_draw[loop] = can.create_line(coords[loop][0], coords[loop][1], coords[loop][2], coords[loop][3], width=3, fill='green')\r\n \r\n elif gate[loop] == 'CLOSE':\r\n \r\n \r\n coords[loop] = readcontent_close(file_lines, loop)\r\n gate_draw[loop] = can.create_line(coords[loop][0], coords[loop][1], coords[loop][2], coords[loop][3], width=3, fill='red')\r\n\r\n else:\r\n print(\"ERROR: expected OPEN or CLOSE but got; gate[\",loop,\"] =\",gate[loop] )\r\n\r\n\r\n\r\n listen_serialport()\r\n\r\n\r\n\r\n if run == 1:\r\n wdw.after(1000,update_can)\r\n \r\n############## Main ################\r\n \r\nNB_GATE = 2\r\nrun = 1\r\nnb_empty_slots = 8\r\nnb_total_slots = 8\r\ngate = [None]*NB_GATE\r\ngate_text = [None]*NB_GATE\r\ngate_draw = [None]*NB_GATE\r\ncoords = [None]*NB_GATE\r\nfile_lines = [None]*NB_GATE\r\ngate[0] = 'CLOSE'\r\ngate[1] = 'CLOSE'\r\nwdw = tk.Tk()\r\nwdw.wm_title(\"Parking User Interface\")\r\nwdw.geometry(\"1000x700\")\r\nwdw.resizable(width=False, height=False)\r\nstart_mark = \"b'<'\"\r\nend_mark = \"b'>'\"\r\nnew_data = False\r\nrcvd_data = \"0000\"\r\naction = False\r\not = [0,0]\r\n\r\n### Init ###\r\n\r\ntry:\r\n ser = serial.Serial('/dev/ttyACM0', 9600, timeout=1)\r\n \r\n \r\nexcept:\r\n print(\"INFO: unable to open /dev/ttyACM0. Trying /dev/tty/ACM1\")\r\n try:\r\n ser = serial.Serial('/dev/ttyACM1', 9600, timeout=1)\r\n except:\r\n print(\"ERROR: unable to find Arduino\")\r\nser_str = str(ser)\r\nprint(\"INFO: connected to \", ser_str[43:55])\r\n\r\n#Can\r\ncan = tk.Canvas(wdw,width=1000, height=600, bg='ivory')\r\ncan.grid(row=3, column=5, rowspan=4)\r\n\r\n#Limit of park\r\ncan.create_line(10,10, 990, 10, width=3)\r\ncan.create_line(990,10, 990, 590, width=3)\r\ncan.create_line(10,590, 990, 590, width=3)\r\ncan.create_line(10,10, 10, 590, width=3)\r\n\r\n#NbPlaces Slider\r\nrect_x2 = 200+(nb_empty_slots/nb_total_slots)*700\r\ncan.create_rectangle(200,275,900,325,fill=\"red\", outline='red')\r\ncan.create_rectangle(200,275,rect_x2,325,fill=\"green\", outline='green')\r\ncan.create_text((200+rect_x2)/2, 300, text=nb_empty_slots, fill='white')\r\nxtext=(900-rect_x2)/2+rect_x2\r\ncan.create_text(xtext, 300, text=nb_total_slots-nb_empty_slots, fill='white')\r\n\r\nfor loop in range (len(gate)):\r\n \r\n try :\r\n gate_text[loop] = open('gate_' + str(loop) + '.txt', 'r')\r\n file_lines[loop] = gate_text[loop].readlines()\r\n\r\n except:\r\n print(\"ERROR: file\", 'gate_' + str(loop) + '.txt', 'does not exist')\r\n \r\n if gate[loop] == 'OPEN':\r\n \r\n coords[loop] = readcontent_open(file_lines, loop)\r\n gate_draw[loop] = can.create_line(coords[loop][0], coords[loop][1], coords[loop][2], coords[loop][3], width=3, fill='green')\r\n\r\n elif gate[loop] == 'CLOSE':\r\n \r\n coords[loop] = readcontent_close(file_lines, loop)\r\n gate_draw[loop] = can.create_line(coords[loop][0], coords[loop][1], coords[loop][2], coords[loop][3], width=3, fill='red')\r\n\r\n else:\r\n print(\"ERROR: expected OPEN or CLOSE but got; gate[\",loop,\"] =\",gate[loop] )\r\n\r\n\r\n#ReInit button\r\nreinit = ttk.Button(wdw, text=\"Reinitialize\", command=SetValues)\r\nreinit.grid(row=10, column=5)\r\n\r\ninfo = ttk.Label(wdw, foreground='green', text=(\"Connected to \" + ser_str[43:55] + \", Baudrate : \" + ser_str[67:71]))\r\ninfo.grid(row=0, column=5)\r\n\r\n#Gate commands\r\nopen_0 = ttk.Button(wdw, text=\"Open\", command=lambda: open_gate(0))\r\nopen_1 = ttk.Button(wdw, text=\"Open\", command=lambda: open_gate(1))\r\n\r\nopen_0.grid(row=3, column=0)\r\nopen_1.grid(row=5, column=0)\r\n\r\nupdate_can()\r\nwdw.mainloop()\r\n\r\n\r\n\r\n\r\n","repo_name":"jean-schneider/smart-parking-lot","sub_path":"Python/UserInterface.py","file_name":"UserInterface.py","file_ext":"py","file_size_in_byte":8870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1922650086","text":"import torch.nn as nn\nimport torch\nfrom torch.autograd import Variable\nimport numpy as np\n\nfrom .m_global import dtype\n\nbce_loss = nn.BCELoss().type(dtype)\nmse_loss = nn.MSELoss().type(dtype)\nl1_loss = nn.L1Loss()\n\nclass Charbonnier_loss(nn.Module):\n \"\"\"L1 Charbonnierloss.\"\"\"\n def __init__(self, epsilon=1e-3):\n super(Charbonnier_loss, self).__init__()\n self.eps = epsilon ** 2\n # self.eps = Variable(torch.from_numpy(np.asarray([epsilon ** 2])))\n # self.eps = Variable(torch.ones())\n\n\n def forward(self, X, Y):\n batchsize = X.data.shape[0]\n diff = X - Y\n square_err = diff ** 2\n square_err_sum_list = torch.sum(square_err, dim=1)\n square_err_sum_list = torch.sum(square_err_sum_list, dim=1)\n square_err_sum_list = torch.sum(square_err_sum_list, dim=1)\n square_err_sum_list = square_err_sum_list + self.eps\n error = torch.sqrt(square_err_sum_list)\n loss = torch.sum(error) / batchsize\n return loss\n\n\nclass MSEloss(nn.Module):\n \"\"\"L1 Charbonnierloss.\"\"\"\n def __init__(self, epsilon=1e-3):\n super(MSEloss, self).__init__()\n self.eps = epsilon ** 2\n\n def forward(self, X, Y):\n diff = torch.add(X, -Y)\n sum_square_err = torch.sum(diff * diff)\n loss = sum_square_err / X.data.shape[0] / 2.\n # loss = torch.sum(error)\n return loss\n\nclass weighted_MSEloss(nn.Module):\n def __init__(self):\n super(weighted_MSEloss, self).__init__()\n\n def forward(self, X, Y, weight):\n diff = torch.add(X, -Y)\n diff_square = diff * diff\n positive_label_diff = diff_square * Y\n sum_square_err = torch.sum(diff_square + weight * positive_label_diff)\n loss = sum_square_err / X.data.shape[0] / 2.\n return loss\n\n\nclass Continuity_loss(nn.Module):\n def __init__(self):\n super(Continuity_loss, self).__init__()\n\n def forward(self, X, neighbor):\n loss = Variable(torch.zeros(1)).type(dtype)\n for i in range(neighbor):\n for j in range(neighbor):\n if i == 0 and j == 0:\n continue\n if i > 0 and j == 0:\n corr = X[:-i, :] * X[i:, :]\n if i == 0 and j > 0:\n corr = X[:-i, :] * X[i:, :]\n if i > 0 and j > 0:\n corr = X[:-i, :-j] * X[i:, j:]\n corr = torch.abs(corr)\n loss -= torch.sum(corr * torch.log(corr))\n loss = loss / X.data.shape[0]\n return loss\n\n\ndef continuity_loss(output, neighbor):\n return Continuity_loss()(output, neighbor)\n\n\ndef weightedEuclideanLoss(output, label, weight):\n return weighted_MSEloss()(output, label, weight)\n\n\ndef euclideanLoss(output, label, input_size):\n mse = mse_loss(output, label)\n mse = mse*((input_size)/2.)\n return mse\n\ndef euclideanLoss2(output, label):\n mse = MSEloss()(output, label)\n return mse\n\ndef L1NormLoss(output, label, input_size):\n l1norm = l1_loss(output, label)\n l1norm = l1norm*((input_size)/2.)\n return l1norm\n\ndef C_Loss(output, label):\n c_loss_func = Charbonnier_loss(epsilon=1e-3)\n return c_loss_func(output, label)","repo_name":"Paul-LiPu/DeepWhistle","sub_path":"3.Network_train_and_test/src/losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":3208,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"71262090184","text":"from googletrans import Translator\r\nfrom lxml import etree\r\nfrom datetime import datetime\r\nimport os\r\nimport re\r\nimport shutil\r\nimport time\r\n\r\n\r\ndef writeHtml(tds, file_name, output_html):\r\n output_html.write(\"\\n\")\r\n output_html.write(\"\\n\")\r\n output_html.write(\"\\n\")\r\n output_html.write(\"\" + file_name.replace(\".html\", \"\") + \"\\n\")\r\n output_html.write(\"\\n\")\r\n output_html.write(\"\\n\")\r\n # output_html.write(\"\\n\")\r\n output_html.write(\"\\n\")\r\n output_html.write(\"\\n\")\r\n output_html.write(\"\\n\")\r\n\r\n i = 0\r\n len_ = len(tds)\r\n while i < len_:\r\n output_html.write(\"\\n\")\r\n if tds[i]:\r\n output_html.write(\"\\n\")\r\n else:\r\n output_html.write(\"\\n\")\r\n\r\n if tds[i + 1]:\r\n output_html.write(\"\\n\")\r\n else:\r\n output_html.write(\"\\n\")\r\n output_html.write(\"\\n\")\r\n i += 2\r\n\r\n output_html.write(\"
\" + tds[i] + \"\" + tds[i + 1] + \"
\\n\")\r\n output_html.write(\"\\n\")\r\n output_html.write(\"\\n\")\r\n\r\n\r\n# 待查找的文件夹路径\r\nfolder_path = './Htmls/'\r\noutput_path = './translate/'\r\nlanguages_path = './languages/'\r\n\r\n# if os.path.exists(output_path):\r\n# # shutil.rmtree(output_path)\r\nos.makedirs(os.path.dirname(output_path), exist_ok=True)\r\n\r\n# 21 种语言\r\nlanguages = {\r\n # \"CN\": \"zh-CN\",\r\n # \"TW\": \"zh-tw\",\r\n \"CZ\": \"cs\",\r\n \"DE\": \"de\",\r\n \"EL\": \"el\",\r\n \"EN\": \"en\",\r\n \"ES\": \"es\",\r\n \"FA\": \"fa\",\r\n \"FR\": \"fr\",\r\n \"HE\": \"he\",\r\n \"ID\": \"id\",\r\n \"IT\": \"it\",\r\n \"KR\": \"ko\",\r\n \"PL\": \"pl\",\r\n \"PT\": \"pt\",\r\n \"RU\": \"ru\",\r\n \"TH\": \"th\",\r\n \"TR\": \"tr\",\r\n \"VN\": \"vi\",\r\n \"NL\": \"nl\",\r\n \"SV\": \"sv\"\r\n}\r\n\r\n\r\nlanguages_maps = {\r\n \"CN\": {},\r\n \"TW\": {},\r\n \"CZ\": {},\r\n \"DE\": {},\r\n \"EL\": {},\r\n \"EN\": {},\r\n \"ES\": {},\r\n \"FA\": {},\r\n \"FR\": {},\r\n \"HE\": {},\r\n \"ID\": {},\r\n \"IT\": {},\r\n \"KR\": {},\r\n \"PL\": {},\r\n \"PT\": {},\r\n \"RU\": {},\r\n \"TH\": {},\r\n \"TR\": {},\r\n \"VN\": {},\r\n \"NL\": {},\r\n \"SV\": {}\r\n}\r\n\r\nfor key in languages_maps:\r\n input_file = open(languages_path + key + '.bin', 'r', encoding='utf-8')\r\n while True:\r\n line = input_file.readline()\r\n line = line.replace(\"\\n\", \"\")\r\n if len(line) == 0:\r\n break\r\n pair_ = line.split(\"::\")\r\n languages_maps[key][pair_[0]] = pair_[1]\r\n input_file.close()\r\n\r\n\r\nerr_set_en = set()\r\nerr_set_cn = set()\r\n\r\n\r\ndef translate_language(tds, dest, source_language):\r\n tds_ = []\r\n err_cnt = 0\r\n\r\n for td in tds:\r\n if str(td.text).strip() and str(td.text).strip() != 'None':\r\n try:\r\n tds_.append(languages_maps[dest][str(td.text).upper().strip()])\r\n except:\r\n if source_language == \"CN\":\r\n err_set_cn.add(str(td.text).strip())\r\n else:\r\n err_set_en.add(str(td.text).strip())\r\n err_cnt += 1\r\n else:\r\n tds_.append(\"\")\r\n\r\n if err_cnt:\r\n raise Exception()\r\n\r\n return tds_\r\n\r\n\r\n# 定义正则表达式规则\r\npattern_en = re.compile(r'^EN_.*')\r\npattern_cn = re.compile(r'^CN_.*')\r\n\r\nerror_files = set()\r\n\r\n# 遍历文件夹中所有文件\r\nfor file_name in os.listdir(folder_path):\r\n # 判断文件名是否符合正则表达式规则\r\n if pattern_en.match(file_name):\r\n for (key, val) in languages.items():\r\n new_file_name = file_name.replace(\"EN_\", key + \"_\")\r\n output_html = open(output_path + new_file_name, 'w', encoding='utf-8')\r\n html = etree.parse(folder_path + file_name, etree.HTMLParser())\r\n # 获取所有td标签内容\r\n tds = html.xpath('/html/body/table/tr/td')\r\n print(datetime.now(), file_name, \" to \", new_file_name)\r\n try:\r\n writeHtml(translate_language(tds, key, \"EN\"), new_file_name, output_html)\r\n except:\r\n error_files.add(file_name)\r\n output_html.close()\r\n print(\"\")\r\n os.remove(folder_path + file_name)\r\n elif pattern_cn.match(file_name):\r\n new_file_name = file_name.replace(\"CN_\", \"TW_\")\r\n output_html = open(output_path + new_file_name, 'w', encoding='utf-8')\r\n html = etree.parse(folder_path + file_name, etree.HTMLParser())\r\n # 获取所有td标签内容\r\n tds = html.xpath('/html/body/table/tr/td')\r\n print(datetime.now(), file_name, \" to \", new_file_name)\r\n try:\r\n writeHtml(translate_language(tds, 'TW', \"CN\"), new_file_name, output_html)\r\n except:\r\n error_files.add(file_name)\r\n output_html.close()\r\n shutil.move(folder_path + file_name, output_path)\r\n\r\nif len(err_set_cn) != 0:\r\n output_error = open(\"./error_cn.bin\", 'w', encoding='utf-8')\r\n for err_ in err_set_cn:\r\n output_error.write(err_ + \"\\n\")\r\n output_error.close()\r\n\r\nif len(err_set_en) != 0:\r\n output_error = open(\"./error_en.bin\", 'w', encoding='utf-8')\r\n for err_ in err_set_en:\r\n output_error.write(err_ + \"\\n\")\r\n output_error.close()\r\n\r\nif len(err_set_cn) != 0 or len(err_set_en) != 0:\r\n print(\"translate error size :\", len(err_set_cn) + len(err_set_en))\r\n\r\nfor error in error_files:\r\n print(\"translate error file: \", error)\r\n","repo_name":"Cui-Jiang-Tao/Cui-Jiang-Tao.github.io","sub_path":"posts/work/code/x2.py","file_name":"x2.py","file_ext":"py","file_size_in_byte":5562,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"33279146275","text":"from pwn import *\n\nr = remote(\"mc.ax\",31077)\nelf = ELF(\"./ret2generic-flag-reader\")\naddress = p64(elf.symbols.super_generic_flag_reading_function_please_ret_to_me)\nbuffer = b'A' * 32\npayload = buffer + address\n\nr.sendline(payload)\nr.interactive()","repo_name":"yl-ang/CTF","sub_path":"redpwnCTF2021/pwn/ret2generic-flag-reader/ret2generic-solver.py","file_name":"ret2generic-solver.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"28819696555","text":"\"\"\"\n# Definition for a Node.\nclass Node:\n def __init__(self, val=None, children=None):\n self.val = val\n self.children = children\n\"\"\"\n\nclass Solution:\n def levelOrder(self, root: 'Node') -> List[List[int]]:\n \n if root is None:\n return []\n \n result = []\n q = deque()\n q.append(root)\n while len(q) > 0:\n numnodes= len(q)\n temp = []\n for i in range(numnodes):\n \n node = q.popleft()\n temp.append(node.val)\n for child in node.children:\n q.append(child)\n result.append(temp)\n return result\n \n \n \n \n ","repo_name":"mechtotech/leetcode","sub_path":"429-n-ary-tree-level-order-traversal/429-n-ary-tree-level-order-traversal.py","file_name":"429-n-ary-tree-level-order-traversal.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21771094941","text":"from shutil import copy2, copyfileobj\nimport glob\nimport gzip\nimport os\nimport sys\nimport re\nimport argparse\nimport json\n\nos.environ['MKL_THREADING_LAYER'] = 'GNU'\n\nparser = argparse.ArgumentParser(description='Convertisseur dataset')\nparser.add_argument('-i','--inputFolder', help='Input data folder', required=True)\nparser.add_argument('-t','--taskNumber', help='Task number', required=False, default=\"501\") ##must be > 500\nparser.add_argument('-f','--dataOrganization', help='Data organization file (test and train)', required=True)\n\nargs = parser.parse_args()\n\n\nfrom nnunet.dataset_conversion.utils import generate_dataset_json\n\n# original data : /scratch/tgreni02/Projet/20_Epaule_Pialat/Data/mDixon_e8\noriginal_folder = args.inputFolder + \"/\" #'/local/cnicol03/Segmentation_epaule/mDixon_e8/'\ndest_folder = os.environ['nnUNet_raw_data_base']+\"/nnUNet_raw_data/Task\"+args.taskNumber + \"_Epaule/\"\n\n\ndef createFolderStruct():\n\ttestData= []\n\ttrainData = []\n\t#load data organization from dataOrganization file\n\twith open(args.dataOrganization) as json_file:\n\t\tdata = json.load(json_file)\n\t\tfor te in data['test']:\n\t\t\ttestData.append(te)\n\n\t\tfor tr in data['training']:\n\t\t\ttrainData.append(tr)\n\t\t\n\t#copy test data and labels in test folder \n\tfor folder in testData:\n\t\tsource_folder = original_folder+\"P\"+str(folder)\n\t\tcopy_nifti(source_folder, \"imagesTs\")\n\n\t\tcopy_labels(source_folder, \"labelTs\")\n\t\tcompress_labels(\"labelTs\")\n\n\n\n\t#copy train data and labels in train folder \n\tfor folder in trainData:\n\t\tsource_folder = original_folder+\"P\"+str(folder)\n\t\tcopy_nifti(source_folder, \"imagesTr\")\n\n\t\tcopy_labels(source_folder, \"labelTr\")\n\t\tcompress_labels(\"labelTr\")\n\n\t\n\t#normalize the name of image file\n\trename_files(\"imagesTs\")\n\trename_files(\"imagesTr\")\n\t\n\t#normalize the name of the label file\n\trename_label(\"labelTr\")\n\trename_label(\"labelTs\")\n\n\t#create the json file as a description of the dataset\n\tcreate_json()\n\n\t\n\t\n\n\t\ndef create_json():\n\tprint(\"\\ncreating json\\n\")\n\tdataset = dest_folder + \"dataset.json\"\n\timagesTr = dest_folder + \"imagesTr\"\n\timagesTs = dest_folder + \"imagesTs\"\n\tmodalities = (\"MRI_Dixon\",)\n\tlabels = {0: \"background\", 1: \"Suscapullaire\", 2: \"Supraepineux\", 3: \"Infra-epineux\", 4: \"Petit-rond\", 5: \"Deltoide\"}\n\tname = \"Epaule\"\n\tgenerate_dataset_json(dataset, imagesTr, imagesTs, modalities, labels, name)\n\ndef compress_labels(destSubFolder):\n\tprint(\"\\n\\ncompressing label files\")\n\tto_compress_files = glob.iglob(dest_folder + destSubFolder + \"/*.nii\")\n\tfor uncompressed_file in to_compress_files:\n\t\twith open(uncompressed_file, 'rb') as f_in:\n\t\t\twith gzip.open(uncompressed_file + \".gz\", 'wb') as f_out:\n\t\t\t\tcopyfileobj(f_in, f_out)\n\t\tos.remove(uncompressed_file)\n\ndef copy_nifti(sourceFolder, destSubFolder):\n\tif not os.path.exists(dest_folder + destSubFolder):\n\t\tos.makedirs(dest_folder + destSubFolder)\n\t\n\tnifti_files = glob.iglob(sourceFolder + \"/*.nii.gz\")\n\tfor nifti_file in nifti_files:\n\t\tprint(\"copying file: \" + nifti_file + \" ---> \" + dest_folder + destSubFolder)\n\t\tcopy2(nifti_file, dest_folder + destSubFolder)\n\ndef copy_labels(sourceFolder, desSubFolder):\n\t\tlabel_files = glob.iglob(sourceFolder + \"/*ManualSegmentation.nii\")\n\t\tif not os.path.exists(dest_folder + desSubFolder):\n\t\t\tos.makedirs(dest_folder + desSubFolder)\n\t\n\t\tfor label_file in label_files:\n\t\t\tcopy2(label_file, dest_folder + desSubFolder)\n\ndef rename_files(subFolder): \n\tfor f in glob.iglob(dest_folder + subFolder + '/*.nii.gz'):\n\t\tsearch = re.search('P(\\d*)', f)\n\t\tif search != None:\n\t\t\tnumber = search.group(1)\n\t\t\tnew_name = dest_folder + subFolder + \"/epaule_\"+number.zfill(3) + \"_0000.nii.gz\"\n\t\t\tos.rename(f, new_name)\n\ndef rename_label(subFolder): \n\tfor f in glob.iglob(dest_folder + subFolder + '/*.nii.gz'):\n\t\tsearch = re.search('P(\\d*)', f)\n\t\tif search != None:\n\t\t\tnumber = search.group(1)\n\t\t\tnew_name = dest_folder + subFolder + \"/epaule_\"+number.zfill(3) + \".nii.gz\"\n\t\t\tos.rename(f, new_name)\n\n\n\n\t\n\n\ncreateFolderStruct()\nprint(\"\\n\\n---------Done---------\")\n","repo_name":"gpicaud/TDSI21-Shoulder-Muscle-Segmentation","sub_path":"notebooks/conversion_data_tdsi.py","file_name":"conversion_data_tdsi.py","file_ext":"py","file_size_in_byte":3969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"29614532289","text":"import os\nimport pickle\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nimport pyvips\nimport torch\nimport torch.nn as nn\n# Local libraries\nfrom assets.src.models import DummyModel, ImageTabularModel\nfrom assets.src.process_dataframe import clean_df\nfrom joblib import dump, load\nfrom PIL import Image\nfrom torchvision.transforms import (ColorJitter, Compose, GaussianBlur,\n Normalize, RandomCrop,\n RandomHorizontalFlip, RandomRotation,\n RandomVerticalFlip, ToTensor)\n\n################################################################\n################################################################\n\nDATA_ROOT = Path(\"/code_execution/data\")\n\n\ndef predict(\n device,\n test_dataframe,\n features_names,\n model_type,\n dropout,\n relapse_only,\n ImageTabularModel_path,\n):\n model = ImageTabularModel(\n len(features_names), model_type, dropout=dropout, relapse_only=relapse_only\n )\n model.load_state_dict(torch.load(ImageTabularModel_path)) # TODO\n model = model.to(device)\n model = model.eval()\n\n preds_dict = {\"filename\": [], \"relapse\": []}\n for idx in range(len(test_dataframe)):\n sample = test_dataframe.iloc[idx]\n tabular = torch.tensor(sample[features_names], dtype=torch.float32)\n tabular = tabular.unsqueeze(0)\n tabular = tabular.to(device)\n\n img_embedding = torch.tensor(sample.latent, dtype=torch.float32).to(device)\n img_embedding = img_embedding.unsqueeze(0)\n img_embedding = img_embedding.to(device)\n\n with torch.no_grad():\n breslow, ulceration, relapse_pred = model(img_embedding, tabular)\n pred = relapse_pred.squeeze().cpu().numpy()\n\n if sample.age > 0.9:\n pred = min(pred, max(0.05, pred - 0.27))\n\n preds_dict[\"filename\"].append(sample[\"filename\"])\n preds_dict[\"relapse\"].append(pred)\n\n return preds_dict\n\n\ndef create_test_df(device, data_root=Path(\"/code_execution/data\")):\n test_dataframe = pd.read_csv(\n data_root / \"test_metadata.csv\"\n ) # os.path.join(data_root,\"/test_metadata.csv\"))\n\n # Remove columns :\n filenames = test_dataframe.filename.to_numpy()\n test_dataframe = test_dataframe.drop(\n columns=[\n \"tif_cksum\",\n \"tif_size\",\n \"us_tif_url\",\n \"eu_tif_url\",\n \"as_tif_url\",\n \"ulceration\",\n \"breslow\",\n \"resolution\",\n ],\n errors=\"ignore\",\n )\n test_dataframe = test_dataframe.fillna(\"nan\")\n #### Load Model :\n dummy_model_path = \"assets/trained_models/remapped_best_model_loss_tritrain_wh_rotate.pth\"\n dummy_model = DummyModel(model_path=dummy_model_path)\n dummy_model = dummy_model.to(device)\n dummy_model = dummy_model.eval()\n\n ##### Construct Clinical data + image embedding\n logits_dict = {\"filename\": [], \"latent\": []}\n for filename in filenames:\n logits_dict[\"filename\"].append(filename)\n img = get_image(data_root / filename) # os.join(data_root,img_path))\n with torch.no_grad():\n logits_dict[\"latent\"].append(\n dummy_model(img.unsqueeze(0).to(device)).cpu().squeeze().numpy()\n )\n latent_df = pd.DataFrame.from_dict(logits_dict)\n test_dataframe = test_dataframe.merge(latent_df, how=\"inner\", on=\"filename\")\n\n # Clean dataframe :\n # body_sites = test_dataframe.body_site.unique()\n # body_site_mapping = {label: i for i, label in enumerate(body_sites)}\n sex_mapping = {1: 0, 2: 1, \"nan\": 3}\n melanoma_history_mapping = {\"NO\": 0, \"YES\": 1, \"nan\": 2}\n body_site_mapping = {\n \"nan\": -1,\n \"trunk\": 0,\n \"arm\": 1,\n \"seat\": 2,\n \"head\": 3,\n \"neck\": 3,\n \"head/neck\": 3,\n \"face\": 4,\n \"trunc\": 5,\n \"leg\": 6,\n \"forearm\": 7,\n \"upper limb\": 8,\n \"shoulder\": 8,\n \"upper limb/shoulder\": 8,\n \"lower limb\": 9,\n \"hip\": 9,\n \"lower limb/hip\": 9,\n \"hand\": 10,\n \"toe\": 11,\n \"foot\": 12,\n \"nail\": 12,\n \"hand/foot/nail\": 12,\n \"thigh\": 13,\n \"sole\": 14,\n \"finger\": 15,\n \"scalp\": 16,\n }\n maps = {\n \"sex\": sex_mapping,\n \"melanoma_history\": melanoma_history_mapping,\n \"body_site\": body_site_mapping,\n }\n ####### Apply clean func\n test_dataframe = clean_df(test_dataframe, maps)\n\n return test_dataframe, filenames\n\n\ndef get_image(\n path,\n transforms=Compose(\n [ToTensor(), Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]\n ),\n):\n slide = pyvips.Image.new_from_file(path)\n n = slide.get_n_pages()\n # Height and width of page 0:\n page = 0\n slide = pyvips.Image.new_from_file(path, page=page)\n\n size = slide.width * slide.height\n ### Decide which page to keep : ###\n while size > 1.3e6:\n if page > n: # On sait jamais avec ces fous...\n size = -1\n page = -1\n break\n page += 1\n slide = pyvips.Image.new_from_file(path, page=page)\n size = slide.width * slide.height\n # To array :\n img = np.ndarray(\n buffer=slide.write_to_memory(),\n dtype=np.uint8,\n shape=(slide.height, slide.width, slide.bands),\n )\n img = Image.fromarray(img)\n return transforms(img)\n\n\ndef main():\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n test_dataframe, filenames = create_test_df(device)\n\n features_names = list(test_dataframe.columns.values)\n # features_names.remove(\"relapse\")\n features_names.remove(\"filename\")\n features_names.remove(\"latent\")\n # features_names.remove(\"ulceration\")\n # features_names.remove(\"breslow\")\n # features_names.remove(\"lymph\")\n # features_names.remove(\"macro\")\n # features_names.remove(\"epithelial\")\n # features_names.remove(\"neutro\")\n age_denominator = 100\n body_site_denominator = 14\n test_dataframe[\"age\"] = test_dataframe[\"age\"].div(age_denominator).round(2)\n test_dataframe[\"age\"] = test_dataframe[\"age\"].div(age_denominator).round(2)\n test_dataframe[\"body_site\"] = test_dataframe[\"body_site\"].div(body_site_denominator).round(2)\n test_dataframe[\"body_site\"] = test_dataframe[\"body_site\"].div(body_site_denominator).round(2)\n\n model_type = \"FC\"\n dropout = 0.2\n relapse_only = False\n ImageTabularModel_path = (\n \"assets/trained_models/best_on_val_finetune.pth\" # \"assets/trained_models/best_on_val.pth\"\n )\n # Get preds :\n preds_dict = predict(\n device,\n test_dataframe,\n features_names,\n model_type,\n dropout,\n relapse_only,\n ImageTabularModel_path,\n )\n submission_format = pd.DataFrame.from_dict(preds_dict)\n # save as \"submission.csv\" in the root folder, where it is expected\n submission_format.to_csv(\"submission.csv\", index=False)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"drivendataorg/visiomel-melanoma","sub_path":"2nd Place/inference/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7034,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"23590296210","text":"import pandas as pd\nfrom decimal import Decimal\nimport numpy_financial as npf\nfrom statistics import mean\nfrom array import *\nimport numpy as np\nimport decimal\nimport plotly.graph_objects as go\nfrom Variables import *\nfrom tabulate import tabulate\nimport random\n\nx = []\na = []\nb=[]\n# listOfN = array('f', [])\n\n# rand_list =array('i', [])\n# rate_array=array('i', [])\n\n# num_years_array = array('i', [])\npmt_array = []\npv_array = array('i', [])\nrate_array = []\nfv_array = []\nindex_array =[]\ndecimal.getcontext().prec = 10000\n\n# num_years_array = np.array([])\n# pmt_array= np.array([])\n# pv_array= np.array([])\n# rate_array=np.array([])\n\n# print(\"rate: \",rate,\"%\")\n\nvariables_imported = [\n# [\"pmt: \",pmt],\n# [\"pv: \",pv],\n# [\"Number of entries: \",num_years],\n# [\"periods for each entry: \",L],\n[\"return_lower: \", return_lower],\n[\"return_upper: \", return_upper],\n[\"amount_lower: \", amount_lower],\n[\"amount_upper: \", amount_upper],\n[\"current_age: \", current_age],\n[\"Age_end: \", Age_end],\n[\"simulation_trials: \", simulation_trials],\n]\n\nprint(tabulate(variables_imported))\nnum_years = Age_end - current_age\n\n\ndef randinputs(n_limit, lower, upper):\n n = 0\n # n_limit=10\n # rand_return =[]\n while n < n_limit:\n # for i in n_limit:\n rand_return = round(random.uniform(lower, upper), 2)\n n = n + 1\n return rand_return\n\n# for i in range(num_years):\n# rand_return = (randinputs(num_years, return_lower, return_upper))\n# rand_list.append(rand_return)\n # global rate\n # rate= (rand_return)\n\n# print(\"rant_list\",rand_list)\n\n# rate = (rand_list)\n# rate= [10]\n# for i in rand_list:\n# rate.append(rand_list)\n\n\ndef isfloatnum(promptflt):\n while True:\n try:\n valuefl = float(input(promptflt))\n except ValueError:\n print(\"Try again!\")\n else:\n break\n return valuefl\n\n\ndef isintnum(promptint):\n while True:\n try:\n valueint = int(input(promptint))\n except ValueError:\n print(\"Try again!\")\n else:\n break\n return valueint\n\n# rate = isfloatnum(\"Enter Rate: \")\n# pmt = isfloatnum(\"Enter PMT: \")\n# pv = isfloatnum(\"Enter PV: \")\n# num_years = isintnum(\"Number of entries: \")\n\n# for i in range(num_years):\n \n# #L = isfloatnum(\"Enter the periods for each entry: \")\n# listOfN.append(L)\n\n\ndef future_value(rate, nper, pmt, pv):\n global y\n \n # y = Decimal(npf.fv(rate, nper, pmt, pv))\n y = (npf.fv(rate, nper, pmt, pv))\n \n # y = (round( Decimal(y), 2))\n\n# nper=1\n# for i in range(num_years):\n# nper += 1\n# rate = randinputs(num_years, return_lower, return_upper)\n# future_value((rate/100), nper, pmt, pv)\n# x.append(y)\n\n\ndef indexer(num_years):\n n = 0\n global index_array\n while n < num_years:\n # for n in range(num_years):\n n += 1\n index_array.append(n)\n # print(\"index_array\",index_array)\n # print(\"n\",n)\n a.append(index_array)\n\n\ndef rate(num_years):\n n = 0\n global rate_array\n global rate\n while n < num_years:\n n += 1\n rate = randinputs(1, return_lower, return_upper)\n rate_array.append(rate)\n # print(\"rate\",rate)\n # print(\"rate_array\",rate_array) \n a.append(rate_array)\n\n\ndef payment(num_years):\n n = 0\n global pmt_array\n global amount\n while n < num_years:\n n += 1\n amount = round(randinputs(1, amount_lower, amount_upper) * 12,2)\n pmt_array.append(amount)\n # print(\"rate\",amount)\n # print(\"rate_array\",pmt_array) \n a.append(pmt_array)\n\n \ndef presentvalue(num_years):\n n = 1\n global pv_array\n global pv\n pv_array.append(pv)\n while n < num_years:\n n += 1\n pv_array.append(0)\n a.append(pv_array)\n\n\nindexer(num_years)\nrate(num_years)\npayment(num_years)\npresentvalue(num_years)\n\n# def df_define():\npd.set_option(\"max_colwidth\", None)\na_transposed = []\na_transposed = pd.DataFrame(a).transpose()\na_transposed.columns = [\"index\", \"rate\", \"pmt\", \"pv\"]\n\n\ndef futurevalue(num_years):\n n = 0\n # df_define()\n rate_index = a_transposed['rate'].loc[0]\n pmt_index = a_transposed['pmt'].loc[0]\n pv_index = abs(a_transposed['pv'].loc[0])\n index_index = a_transposed['index'].loc[0]\n\n global fv_array\n global fv\n fv_array.append(round(abs(npf.fv(rate_index/100, 1, pmt_index, pv_index)),2))\n for n in range(num_years - 1):\n n += 1\n rate_index = a_transposed['rate'].loc[n]\n pmt_index = a_transposed['pmt'].loc[n]\n pv_index = fv_array[n - 1]\n index_index = a_transposed['index'].loc[n]\n \n fv = round(abs(npf.fv(rate_index/100, 1, pmt_index, pv_index)), 2)\n fv_array.append(fv)\n \n # print(\"rate\",amount)\n # print(\"rate_array\",pmt_array) \n \n a.append(fv_array)\n n = num_years - 1\n print(\"rate\", a_transposed['rate'].loc[n])\n print(\"pmt\", a_transposed['pmt'].loc[n])\n print(\"PV1\", fv_array[n - 1])\n print(\"PV2\", a_transposed['pmt'].loc[n])\n\n print(\"fv\", round(npf.fv(a_transposed['rate'].loc[n]/100, 1, a_transposed['pmt'].loc[n], fv_array[n - 1]), 2))\n\n\nfuturevalue(num_years)\n\n# for n in range(num_years):\n# rate_index = a_transposed['rate'].loc[n]\n# pmt_index= a_transposed['PMT'].loc[n]\n# pv_index= a_transposed['PV'].loc[n]\n# index_index = a_transposed['index'].loc[n]\n# print(\"rate_index\",rate_index)\n# print(\"pmt_index\",pmt_index)\n# print(\"pv_index\",pv_index)\n# print(\"index_index\",index_index)\n\n\ndef simulate(simulation_trials, num_years):\n n = 0\n # df_define()\n rate_index = a_transposed['rate'].loc[0]\n pmt_index = a_transposed['pmt'].loc[0]\n pv_index = abs(a_transposed['pv'].loc[0])\n index_index = a_transposed['index'].loc[0]\n\n global fv_array\n global fv\n fv_array.append(abs(npf.fv(rate_index/100, 1, pmt_index, pv_index)))\n for n in range(num_years - 1):\n n += 1\n rate_index = a_transposed['rate'].loc[n]\n pmt_index = a_transposed['pmt'].loc[n]\n pv_index = fv_array[n - 1]\n index_index = a_transposed['index'].loc[n]\n \n fv = round(abs(npf.fv(rate_index/100, 1, pmt_index, pv_index)), 2)\n fv_array.append(fv)\n \n # print(\"rate\",amount)\n # print(\"rate_array\",pmt_array) \n \n b.append(fv_array)\n\n\n\n# print(\"simulate(num_years)\",simulate(num_years))\n\n# print(\"num_years_array\",num_years_array)\n\n# print((\"rate_array\",rate_array))\n# print((\"num_years_array\",num_years_array))\n# print((\"pmt_array\",pmt_array))\n# print((\"pv_array\",pv_array))\n\n# print((\"num_years\",num_years))\n\n# print(\"np.matrix(x)\",np.matrix(x))\n\n\n\na_transposed = pd.DataFrame(a).transpose()\na_transposed.columns = [\"index\", \"rate\", \"pmt\", \"pv\", \"fv\"]\nprint(\"pd.DataFrame(a_transposed) \\n\", a_transposed)\na_transposed_row_PV = a_transposed[\"pv\"] + 1\n\nprint(a_transposed_row_PV.loc[0] + 50)\n\n\nif num_years > 1:\n print('Amount contributed: ', round((sum(pmt_array)+sum(pv_array)), 2))\n print('Mean of fv: ', round((mean(fv_array)), 2))\n print('fv', fv_array[num_years - 1])\n print('Gain: ', round((fv_array[num_years - 1]-(sum(pmt_array)+sum(pv_array))), 2))\n \nsimulate(simulation_trials, num_years)\n\n\nfig = go.Figure(data=[go.Table(header=dict(values=[\"index\", \"rate\", \"pmt\", \"pv\", \"fv\"]),\n cells=dict(values=[a_transposed['index'],a_transposed['rate'],a_transposed['pmt'],a_transposed['pv'],a_transposed['fv']]))\n ])\n\nfig.show()\n# if num_years > 1:\n# ##print('mean: ', round(mean(x), 2))\n# print('mean: ', round((mean(x)), 2))\n\n\n# callback same file to repeat another calc\nprint(b)\nprint(\" \")\n\nprint(\"End of script!\")\n\n","repo_name":"nmpereira/MonteCarloSim","sub_path":"src/FV.py","file_name":"FV.py","file_ext":"py","file_size_in_byte":7761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38132444275","text":"# coding=utf-8\n# ----------------\n# author: weiyu\n# create_time : 9/29/2022\n# description :\n\n\n# 给定一个排序数组和一个目标值,在数组中找到目标值,并返回其索引。如果目标值不存在于数组中,返回它将会被按顺序插入的位置。\n#\n# 请必须使用时间复杂度为 O(log n) 的算法。\n#\n#\n#\n# 示例 1:\n#\n#\n# 输入: nums = [1,3,5,6], target = 5\n# 输出: 2\n#\n#\n# 示例 2:\n#\n#\n# 输入: nums = [1,3,5,6], target = 2\n# 输出: 1\n#\n#\n# 示例 3:\n#\n#\n# 输入: nums = [1,3,5,6], target = 7\n# 输出: 4\n#\n# 提示:\n#\n#\n# 1 <= nums.length <= 10⁴\n# -10⁴ <= nums[i] <= 10⁴\n# nums 为 无重复元素 的 升序 排列数组\n# -10⁴ <= target <= 10⁴\n#\n#\n# Related Topics 数组 二分查找 👍 1733 👎 0\n\nfrom typing import List\n# leetcode submit region begin(Prohibit modification and deletion)\nclass Solution:\n def searchInsert(self, nums: List[int], target: int) -> int:\n left = 0\n right = len(nums) -1\n while left <= right:\n mid = int((left + right)/2)\n if nums[mid] < target:\n left =mid + 1\n elif nums[mid] == target:\n return mid\n else:\n right = mid - 1\n if nums[mid] > target:\n return mid + 1\n\n return mid if nums[mid] > target else mid + 1\n\n# leetcode submit region end(Prohibit modification and deletion)\n\nif __name__ == '__main__':\n obj = Solution()\n nums = [1,3,5,6]\n print(obj.searchInsert(nums,5))","repo_name":"WeiYu53111/coding","sub_path":"src/main/python/binarySearch/35.py","file_name":"35.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72929184586","text":"from collections import defaultdict\n\nclass ListNode:\n def __init__(self, key=None, value=None):\n self.key = key\n self.value = value\n self.next = None\n\n\nclass MyHashMap:\n def __init__(self):\n \"\"\"\n Initialize your data structure here.\n \"\"\"\n self.size = 1000\n self.table = defaultdict(ListNode)\n\n def put(self, key: int, value: int) -> None:\n \"\"\"\n value will always be non-negative.\n \"\"\"\n index = key % self.size\n if self.table[index].value is None:\n # 'self.table[index].value is None' 이 아닌\n # 'self.table[index] is None' 을 사용할 경우\n # 존재하지 않는 인덱스를 조회했을 때, 바로 빈 ListNode를 생성하기 때문\n # 즉, 절대로 True가 되지 않는 버그가 발생\n self.table[index] = ListNode(key, value)\n return\n\n # 해당 인덱스에 노드가 존재하는 경우\n p = self.table[index]\n while p:\n if p.key == key:\n p.value = value\n return\n if p.next is None:\n break\n p = p.next\n p.next = ListNode(key, value)\n\n def get(self, key: int) -> int:\n \"\"\"\n Returns the value to which the specified key is mapped, or -1 if this map contains no mapping for the key\n \"\"\"\n index = key % self.size\n if self.table[index].value is None:\n return -1\n p = self.table[index]\n while p:\n if p.key == key:\n return p.value\n p = p.next\n return -1\n\n def remove(self, key: int) -> None:\n \"\"\"\n Removes the mapping of the specified value key if this map contains a mapping for the key\n \"\"\"\n index = key % self.size\n # 해당 key에 아무 것도 없는 경우\n if self.table[index].value is None:\n return\n\n p = self.table[index]\n # case1. 인덱스의 첫 번째 노드 일 때,\n if p.key == key:\n self.table[index] = ListNode() if p.next is None else p.next\n # self.table[index] = None으로 할당한다면,\n # 조회 함수에서 self.table[index].value is None 비교 할 때\n # 에러 발생\n return\n # case2. 연결 리스트 노드 삭제\n prev = p\n while p:\n if p.key == key:\n prev.next = p.next\n return\n prev, p = p, p.next\n# Your MyHashMap object will be instantiated and called as such:\n# obj = MyHashMap()\n# obj.put(key,value)\n# param_2 = obj.get(key)\n# obj.remove(key)\n\n# leetcode: 706\n# 개별 체이닝 방식\n","repo_name":"boorooksus/Algorithm-Study","sub_path":"LeetCode/1회차/B28_DesignHashMap2.py","file_name":"B28_DesignHashMap2.py","file_ext":"py","file_size_in_byte":2723,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18391898105","text":"# Given a lowercase string that has alphabetic characters only and no spaces, return the highest value of consonant substrings. Consonants are any letters of the alphabet except \"aeiou\".\n\n# We shall assign the following values: a = 1, b = 2, c = 3, .... z = 26.\n\nimport re\ndef solve(s):\n alpha = { \"a\": 1, \"b\":2, \"c\":3, \"d\":4, \"e\":5, \"f\": 6,\n \"g\": 7, \"h\":8, \"i\":9, \"j\":10, \"k\":11, \"l\": 12, \n \"m\": 13, \"n\":14, \"o\":15, \"p\":16, \"q\":17, \"r\": 18, \n \"s\": 19, \"t\":20, \"u\":21, \"v\":22, \"w\":23, \"x\": 24, \"y\":25, \"z\":26\n }\n high = 0\n current = 0\n for x in s:\n if re.search('[^aeiou]', x):\n current += alpha[x]\n if high < current:\n high = current\n else:\n current = 0\n return high\n ","repo_name":"ZacRayTho/Leetcode-Codewars-Solutions","sub_path":"Codewars/Python/ConsonantValue.py","file_name":"ConsonantValue.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10040134060","text":"\"\"\"\nChristina Wang\t8/17/22\tCSCI-UA 2 - 006\nAssignment #7 Problem #2\n\"\"\"\n\n#a\nimport re\n# function: cleanup_string\n# input: a string to clean up\n# processing: (1) makes the entire string lowercase.\n# (2) retains only alphabetic, numeric and space characters\n# [all punctuation and special characters removed]\n# output: returns the cleaned up string\ndef cleanup_string(data):\n lowerstring=data.lower()\n cleanstring=re.sub(\"[!@#$%^&*().,?]\",\"\",lowerstring)\n return cleanstring\n\n#b\nimport os\nfiles = os.listdir(\"data\")\n#print (files)\n\n#c\n\ndef convert(lst):\n it = iter(lst)\n res_dct= dict(zip(it,it))\n return res_dct\n\n#create dictionary\n#search = {}\n#open file for reading\n#f_0jOHqNrSpo = open('data/0jOHqNrSpo.txt', 'r')\n#read all data as one long string\n#f_0jOHqNrSpo_all=f_0jOHqNrSpo.read()\n#run cleanup_string on it\n#f_0jOHqNrSpo_clean=cleanup_string(f_0jOHqNrSpo_all)\n#cut apart string based on \" \"\n#f_0jOHqNrSpo_split=f_0jOHqNrSpo_clean.split()\n#if words repeated within string, delete\n#f_0jOHqNrSpo_norepeat=\" \".join(sorted(set(f_0jOHqNrSpo_split), key=f_0jOHqNrSpo_split.index))\n#cut again\n#f_0jOHqNrSpo_norepeatsplit=f_0jOHqNrSpo_norepeat.split()\n#convert list to resemble dictionary\n#for i in range(len(f_0jOHqNrSpo_norepeatsplit)):\n# f_0jOHqNrSpo_norepeatsplit.insert((i*2)+1, '0jOHqNrSpo.txt')\n#enter words into dictionary\n#search=convert(f_0jOHqNrSpo_norepeatsplit)\n#close file\n#f_0jOHqNrSpo.close()\n\n#run for all files\n\ndirectory= 'data'\n\n#create dictionary\nsearch={}\nsearchfile={}\n#run for all files\nfor file in files:\n if file.endswith('.txt'):\n #open file for reading\n f_file = open(os.path.join(directory, file), 'rt')\n #read all data as one long string\n f_file_all=f_file.read()\n #run cleanup_string on it\n f_file_clean=cleanup_string(f_file_all)\n #cut apart string based on \" \"\n f_file_split=f_file_clean.split()\n #if words repeated within string, delete\n f_file_norepeat=\" \".join(sorted(set(f_file_split), key=f_file_split.index))\n #cut again\n f_file_norepeatsplit=f_file_norepeat.split()\n #convert list to resemble dictionary\n for i in range(len(f_file_norepeatsplit)):\n f_file_norepeatsplit.insert((i*2)+1, file)\n #enter words into dictionary\n searchfile=convert(f_file_norepeatsplit)\n #update dictionary to big dictionary\n search.update(searchfile)\n #close file\n f_file.close()\n continue\n else:\n continue\n\nprint ('happy:', search['happy'])\nprint ('cat:', search['cat'])\nprint ('rainbow:', search['rainbow'])\nprint ('apple:', search['apple'])\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"crw333168/cs-002","sub_path":"assign7/WangChristina_assign7_part2.py","file_name":"WangChristina_assign7_part2.py","file_ext":"py","file_size_in_byte":2711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19803784954","text":"import matplotlib.pyplot as plt\nimport fitsio\nimport h5py\nimport scipy as sp\nfrom SaclayMocks import util\nimport argparse\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-i\", type=str, help=\"ex: Out/v2.7.1/from_transmission\")\nparser.add_argument(\"--to-do\", type=str, nargs=\"*\", help=\"ex: cf xcf\")\nparser.add_argument(\"--title\", default=\"\")\nargs = parser.parse_args()\n\nindir = args.i\nif 'cf' in args.to_do:\n cf_file = 'e_cf.fits'\n fit_file = 'result_cf.h5'\n nrp = 50\n nrt = 50\n fit_name = 'LYA(LYA)xLYA(LYA)'\nif 'xcf' in args.to_do:\n cf_file = 'e_xcf.fits'\n fit_file = 'result_xcf.h5'\n nrp = 100\n nrt = 50\n fit_name = 'LYA(LYA)xQSO'\n\ncf = fitsio.FITS(indir+\"/Correlations/\"+cf_file)\nfit = h5py.File(indir+\"/Fit/\"+fit_file)\n\nrp = cf[1].read()['RP']\nrt = cf[1].read()['RT']\nda = cf[1].read()['DA']\nco = cf[1].read()['CO']\nda_2d = util.convert1DTo2D(da, nrp, nrt)\nco_2d = util.convert1DTo2D(sp.diag(co), nrp, nrt)\nda_fit = fit[fit_name]['fit'].value\nda_fit_2d = util.convert1DTo2D(da_fit, nrp, nrt)\nextent = [rt.min(), rt.max(), rp.min(), rp.max()]\n\nplt.imshow((da_2d - da_fit_2d) / sp.sqrt(co_2d), origin='lower',cmap='seismic', vmin=-2., vmax=2., extent=extent)\nplt.colorbar()\nplt.xlabel(r'$rt [Mpc.h^{-1}]$')\nplt.ylabel(r'$rp [Mpc.h^{-1}]$')\nplt.title(args.title)\nplt.show()\n","repo_name":"igmhub/SaclayMocks","sub_path":"analysis/plot_bidim.py","file_name":"plot_bidim.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40918878086","text":"from transformers import pipeline\n\n\nsentences = [\n \"We are very happy to show you the 🤗 Transformers library.\",\n \"This library sucks.\",\n \"One plus one equals two.\"\n]\n\nclassifier = pipeline(\"sentiment-analysis\")\nresult = classifier(sentences)\nprint(result)\n\nzeroshot = pipeline(\"zero-shot-classification\")\nresult = zeroshot(\"Bilibili is a good website\", \ncandidate_labels=[\"website\", \"game\",'sport','music'])","repo_name":"Kira1108/huggingface_pipelines","sub_path":"1.use_huggingface.py","file_name":"1.use_huggingface.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42404578363","text":"#event+signal\r\n# esc누르면 종료, 마우스 좌표 출력\r\nimport sys\r\nfrom PyQt5.QtWidgets import QWidget, QLCDNumber, QSlider, QVBoxLayout, QApplication, QGridLayout, QLabel, QMainWindow\r\nfrom PyQt5.QtCore import Qt, pyqtSignal, QObject\r\n\r\n# class Exam1(QWidget): # Qwidget 상속\r\n# def __init__(self):\r\n# super().__init__() # 상위 개체 생성\r\n# self.initUI()\r\n#\r\n# def initUI(self):\r\n# lcd = QLCDNumber(self) # lcd의 숫자\r\n# sld = QSlider(Qt.Horizontal, self) # slider생성, Horizontal한 막대\r\n#\r\n# vbox = QVBoxLayout()\r\n# vbox.addWidget(lcd)\r\n# vbox.addWidget(sld)\r\n#\r\n# self.setLayout(vbox)\r\n# sld.valueChanged.connect(lcd.display) # 막대가 바뀌면 값이 바뀜\r\n#\r\n# self.setGeometry(300,300,250,150)\r\n# self.setWindowTitle('Signal and slot')\r\n# self.show() # 필수\r\n#\r\n# def keyPressEvent(self, e): ## esc누르면 종료 (overriding 이용)\r\n# if e.key() == Qt.Key_Escape:\r\n# self.close()\r\n#\r\n#\r\n# if __name__ == '__main__':\r\n# app = QApplication(sys.argv) # Qapplication 객체 생성\r\n# w = Exam1()\r\n# sys.exit(app.exec_()) # 시스템 종료 ## 이벤트 메인 루프 실행 # app.exec_가 돌아가다가 끝나면 sys.exit()가 실행\r\n\r\n\r\n# class Exam2(QMainWindow): # Qwidget 상속\r\n# def __init__(self):\r\n# super().__init__() # 상위 개체 생성\r\n# self.initUI()\r\n#\r\n# def initUI(self):\r\n# grid = QGridLayout()\r\n# grid.setSpacing(10)\r\n#\r\n# x, y = 0, 0\r\n#\r\n# self.text = \"x: {0}, y : {1}\".format(x,y)\r\n#\r\n# self.label = QLabel(self.text, self)\r\n# grid.addWidget(self.label, 0, 0, Qt.AlignTop)\r\n#\r\n# self.setMouseTracking(True)\r\n#\r\n# self.setLayout(grid)\r\n#\r\n#\r\n# self.setGeometry(300,300,350,200)\r\n# self.setWindowTitle('Event object')\r\n# self.show() # 필수\r\n#\r\n# def mouseMoveEvent(self, e):\r\n# x = e.x()\r\n# y = e.y()\r\n#\r\n# text = \"x: {0}, y: {1}\".format(x,y)\r\n# self.label.setText(text)\r\n#\r\n# if __name__ == '__main__':\r\n# app = QApplication(sys.argv) # Qapplication 객체 생성\r\n# w = Exam2()\r\n# sys.exit(app.exec_()) # 시스템 종료 ## 이벤트 메인 루프 실행 # app.exec_가 돌아가다가 끝나면 sys.exit()가 실행\r\n\r\nclass Communicate(QObject): # pyqtSignal()을 사용하기 위해 QObject를 상속\r\n closeApp = pyqtSignal()\r\n\r\nclass Exam3(QMainWindow): # Qwidget 상속\r\n def __init__(self):\r\n super().__init__() # 상위 개체 생성\r\n self.initUI()\r\n\r\n def initUI(self):\r\n self.c = Communicate()\r\n self.c.closeApp.connect(self.close)\r\n\r\n self.setGeometry(300,300,350,200)\r\n self.setWindowTitle('Event object')\r\n self.show() # 필수\r\n\r\n def mousePressEvent(self, event):\r\n self.c.closeApp.emit()\r\n\r\nif __name__ == '__main__':\r\n app = QApplication(sys.argv) # Qapplication 객체 생성\r\n w = Exam3()\r\n sys.exit(app.exec_()) # 시스템 종료 ## 이벤트 메인 루프 실행 # app.exec_가 돌아가다가 끝나면 sys.exit()가 실행","repo_name":"dup06087/LAB","sub_path":"6_event+signal.py","file_name":"6_event+signal.py","file_ext":"py","file_size_in_byte":3192,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26632193935","text":"# leetcode 33. 搜索旋转排序数组\n\nclass Solution(object):\n def search(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: int\n \"\"\"\n #先找到两个第二个升序数组的第一项的\n l = 0\n r = len(nums) -1\n while l < r:\n mid = (l + r)//2\n if nums[mid] > nums[r]:\n l = mid + 1\n else:\n r = mid\n pol = l\n ans = self.binary_search(target, nums[:pol])\n if ans == -1:\n ans = self.binary_search(target, nums[pol:])\n if ans != -1:\n ans += len(nums[:pol])\n \n return ans\n # 二分查找index值函数\n def binary_search(self, target, nums):\n index = -1\n l = 0\n r = len(nums) - 1\n while l <= r:\n mid = (l+r)//2\n if nums[mid] < target:\n l = mid + 1\n elif nums[mid] > target:\n r = mid - 1\n else:\n index = mid\n break\n return index\n","repo_name":"Teingi/python-test","sub_path":"leetcode_033.py","file_name":"leetcode_033.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"81"} +{"seq_id":"12549460210","text":"import torch.nn as nn\nfrom transformers import *\n\n\nclass Model(nn.Module):\n def __init__(self):\n super(Model, self).__init__()\n\n self.bert = BertModel.from_pretrained('bert-base-uncased')\n self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n\n self.extra_modules = nn.ModuleDict({\n 'fcn1': nn.Linear(768, 30), # The hidden_size of bert-base-uncased model is 768\n 'relu': nn.ReLU(),\n 'fcn2': nn.Linear(30, 1),\n\n })\n\n def forward(self, input_sentences, input_sentences_2):\n tokenized_input = self.tokenizer(input_sentences, input_sentences_2, add_special_tokens=True, padding=True, return_tensors='pt')\n output = self.bert(input_ids=tokenized_input[\"input_ids\"].to('cuda'), token_type_ids=tokenized_input[\"token_type_ids\"].to('cuda'), attention_mask=tokenized_input[\"attention_mask\"].to('cuda'))\n (last_hidden_state, _) = output\n # last_hidden_state: (batch_size, sequence_length, hidden_size)\n cls_hidden_states = last_hidden_state[:, 0, :]\n # cls_hidden_states: (batch_size, hidden_size)\n\n x = self.extra_modules['fcn1'](cls_hidden_states)\n x = self.extra_modules['relu'](x)\n x = self.extra_modules['fcn2'](x)\n\n # x : (batch_size, 1)\n out = x.squeeze(1)\n\n # out: (batch_size.)\n return out\n\n\n\n\n\n\n\n\n\n","repo_name":"inyukwo1/simple_bert_example","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41332387407","text":"#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n\"\"\"\n\nStrategies\n==========\n..\nThis module implements different strategies for retrieving information in the shape of Graphs.\n\nBased on the :ref:`drivers.graph_builder` and spatial operations.\n\"\"\"\nfrom drivers.tree_builder import TreeNeo,buildTreeNeo\nfrom django.contrib.gis.geos import GEOSGeometry\nfrom mesh.models import initMesh\nimport pandas as pd\nimport geopandas as gpd\nfrom shapely.geometry import Point\n\n__author__ = \"Juan Escamilla Mólgora\"\n__copyright__ = \"Copyright 2017, JEM\"\n__license__ = \"GPL\"\n__version__ = \"2.2.1\"\n__mantainer__ = \"Juan\"\n__email__ =\"molgor@gmail.com\"\n__status__ = \"Prototype\"\n\n\nimport logging\n\nlogger = logging.getLogger('biospytial.traversals')\n\nimport numpy as np\nfrom itertools import imap, chain\n\n####\n## Multifunc\n## These are function for handling dataframes and creating subsets.\ndef toGeoDataFrame(pandas_dataframe,xcoord_name,ycoord_name,srs = 'epsg:4326'):\n \"\"\"\n Convert Pandas objcet to GeoDataFrame\n Inputs:\n pandas_dataframe : the pandas object to spatialise\n xcoord_name : (String) the column name of the x coordinate.\n ycoord_name : (String) the column name of the y coordinate. \n srs : (String) the source referencing system in EPSG code.\n e.g. epsg:4326 .\n \"\"\"\n data = pandas_dataframe\n data['geometry'] = data.apply(lambda z : Point(z[xcoord_name], z[ycoord_name]), axis=1)\n #data['geometry'] = data.apply(lambda z : Point(z.LON, z.LAT), axis=1)\n\n new_data = gpd.GeoDataFrame(data)\n new_data.crs = {'init':'epsg:4326'}\n return new_data\n\n\n\n\n###############\n## Cell-wise: strategies \n###############\n\ndef getEnvironmentAndRichnessFromListOfCells(list_of_cells,taxonomic_level_name,vars=['Elevation','MaxTemperature', 'MeanTemperature','MinTemperature','Precipitation','Vapor','SolarRadiation','WindSpeed']):\n \"\"\"\n A wrapper function that returns Richness, environmental covariates and centroids.\n For a tailored version of this use the individual functions and concatenate dataframes.\n \n Parameters : \n vars : (list) name of the environmental layers. By default select all layers.\n taxonomic_level_name : (String) the name of the taxonomic level to which take the richness from.\n\n Returns : A spatial dataframe (geopandas) \n \n \"\"\"\n env = getEnvironmentalCovariatesFromListOfCells(list_of_cells, vars)\n rich = getRichnessPerListOfCells(list_of_cells, taxonomic_level_name)\n data = pd.concat([rich,env],axis =1)\n data = toGeoDataFrame(data, 'Longitude', 'Latitude')\n return data\n\ndef getEnvironmentalCovariatesFromListOfCells(list_of_cells,vars=['Elevation','MaxTemperature',\n'MeanTemperature','MinTemperature','Precipitation','Vapor','SolarRadiation','WindSpeed'],with_coordinates=True):\n \"\"\"\n Parameters :\n vars (list) name of the environmental layers. By default select all layers.\n \n Returns:\n a Dataframe of the summary statistics of the raster covariates defined in the cell's border (polygon).\n \"\"\" \n \n getdata = lambda cell : cell.getEnvironmentalData(vars)\n rdata = map(getdata,list_of_cells)\n if not with_coordinates:\n return pd.DataFrame(rdata)\n else:\n coords = getCentroidsFromListofCells(list_of_cells)\n data = pd.DataFrame(rdata)\n return pd.concat([data,coords],axis=1)\n\ndef getRichnessPerListOfCells(list_of_cells,taxonomic_level_name,with_centroids=True):\n \"\"\"\n Given a list of cells it returns the respective richness in the shape of pandas object.\n \n Parameters:\n list_of_cells : (List or iterator) the cells to take the richness from.\n taxonomic_level_name : (String) the name of the taxonomic level to which take the richness from.\n with_centroid : (Bool) if True returns the centroids of each corresponding cell\n Returns:\n richness : DataFrame \n \n \"\"\" \n rs = map(lambda cell : cell.getRichnessOf(taxonomic_level_name),list_of_cells)\n richness = pd.DataFrame({'n_'+taxonomic_level_name : rs })\n if with_centroids:\n coords = getCentroidsFromListofCells(list_of_cells, asDataFrame=True)\n return pd.concat([richness,coords],axis=1)\n else:\n return richness\n \ndef getCentroidsFromListofCells(list_of_cells,asDataFrame=True):\n \"\"\"\n Returns list of centroids in numpy array format.\n Params : asDataFrame : (Bool) Returns a DataFrame otherwise returns an iterator. True by default. \n \"\"\"\n \n itercentroid = imap(lambda cell : cell.centroid,list_of_cells) \n if asDataFrame:\n centroids = chain(itercentroid)\n coords = map(lambda p : (p.x,p.y) , centroids)\n points = pd.DataFrame(coords,columns=[\"Longitude\",\"Latitude\"])\n return points\n else:\n return itercentroid\n\n\n###############\n## Treewise Strategies\n###############\n\n\n\n\nsumTrees = lambda tree_list : reduce(lambda a,b : a + b , tree_list)\n\n\ndef PolygonToTrees(polygon_wkt,mesh_level=11):\n \"\"\"\n Receives a polygon str in wkt, get the cells and extracts the trees in that Cell.\n Will work on the cell layer that has a direct link Occurrence - IS_IN - Cell\n \n note: It uses the spatial querying from the Geoprocessing Unit (RDMS) module.\n \"\"\"\n polygon = GEOSGeometry(polygon_wkt)\n mesh = initMesh(mesh_level)\n cells = list(mesh.objects.filter(cell__intersects=polygon))\n logger.info(\"Getting information. Developer! You can make this faster if you use Batchmode for py2neo.\")\n cellnode = map(lambda c : c.toCellNode().first(),cells)\n logger.info(\"Retrieving the Tree Structures. \\n Get a coffee this will take time.\")\n trees = map(lambda cell : buildTreeNeo(cell),cellnode)\n\n return trees\n \n \n\n\n\ndef getEnvironmentalCovariatesFromListOfTrees(list_of_trees):\n \"\"\"\n Returns a Dataframe of the summary statistics of the raster covariates defined in the cell's border (polygon).\n \"\"\" \n \n getdata = lambda tree : tree.associatedData.getEnvironmentalVariablesCells()\n \n rdata = map(getdata,list_of_trees)\n return pd.DataFrame(rdata)\n \ndef getPresencesForNode(TreeNode,list_of_trees, option='presences'):\n \"\"\"\n Given a list of trees and a Tree Node the function returns a binary list if the node was found on that Tree.\n Returns:\n list of presence absences\n \n notes:\n For the future, implement with count data (Poisson)\n \"\"\" \n signal = map(lambda tree : float(tree.hasNode(TreeNode)),list_of_trees)\n y = {TreeNode.name : signal}\n return pd.DataFrame(y)\n \ndef getPresencesForListOfNodes(list_of_tree_nodes,list_of_trees,with_centroids=True):\n \"\"\"\n Given a list of trees and a list of TreeNodes this function returns a binary table if the node was found on each of the trees.\n Similar to getSignalForNode but multivalued.\n \"\"\" \n signals = map(lambda tree_node : getPresencesForNode(tree_node, list_of_trees),list_of_tree_nodes)\n if with_centroids:\n centroids = getCentroidsFromListofTrees(list_of_trees)\n p = pd.concat(signals,axis=1)\n \n return pd.concat([p,centroids],axis=1)\n else:\n return pd.concat(signals,axis=1)\n \n \ndef getCentroidsFromListofTrees(list_of_trees):\n \"\"\"\n Returns list of centroids in numpy array format.\n \"\"\" \n npoints = map(lambda c : np.array(c.getExactCells()[0].centroid),list_of_trees)\n points = pd.DataFrame(npoints,columns=[\"Longitude\",\"Latitude\"])\n \n return points\n\n\n \n \n","repo_name":"molgor/biospytial","sub_path":"traversals/.ipynb_checkpoints/strategies-checkpoint.py","file_name":"strategies-checkpoint.py","file_ext":"py","file_size_in_byte":7513,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"74487973385","text":"\n\nclass SearchRange:\n\n def __init__(self, target):\n self.target = target\n\n def search(self, range, helper=None):\n # can use different helper function\n if helper is None:\n helper = self.helper\n\n # search while low doesn't exceed high\n l, h = range\n while l <= h:\n \n # get middle value and test w/ helper\n m = (l + h) // 2\n guess = helper(m)\n\n # found the target\n if guess == 0:\n return self.target\n # over-estimate of target\n if guess > 0:\n # eliminate [m, r]\n h = m - 1\n # under-estimate of target\n if guess < 0:\n # elimnate [l, m]\n l = m + 1 \n \n def helper(self, x):\n if x > self.target:\n return 1\n if x < self.target:\n return -1\n return 0\n\n def __call__(self, range, helper=None):\n return self.search(range, helper)\n\nif __name__ == \"__main__\":\n search = SearchRange(40)\n print(search((-100, 100)))","repo_name":"Andrew011002/Data-Structures","sub_path":"Search/search_range.py","file_name":"search_range.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19611969124","text":"import pandas as pd\nimport numpy as np\nfrom surprise import dump\nimport os\n\ndf = pd.read_json('./df_all_events.json')\n\n# Define custom function to calculate rating\ndef calculate_rating(group):\n\n relevant_maxn = {\n 'product_clicked':5,\n 'buy_stock':3,\n 'sell_stock':2,\n 'product_detail_viewed':2,\n 'stock_watchlist_initiated':1\n }\n\n # Mapping of values to ratings\n weight_dict = {\n 'product_clicked': 1,\n 'stock_watchlist_initiated': 8,\n 'buy_stock': 10,\n 'sell_stock': 3,\n 'product_detail_viewed':5,\n }\n\n value_counts = group['event'].value_counts()\n rating = min(value_counts.get('product_clicked',0), relevant_maxn['product_clicked']) * weight_dict['product_clicked'] + min(value_counts.get('stock_watchlist_initiated',0), relevant_maxn['stock_watchlist_initiated']) * weight_dict['stock_watchlist_initiated'] + value_counts.get('buy_stock',0) * weight_dict['buy_stock'] + min(value_counts.get('sell_stock',0), relevant_maxn['sell_stock']) * weight_dict['sell_stock'] + min(value_counts.get('product_detail_viewed',0), relevant_maxn['product_detail_viewed']) * weight_dict['product_detail_viewed']\n return pd.Series({'rating': rating})\n\n# group df based on username and stock code\ndf = df.groupby(['username', 'stock_code']).apply(calculate_rating).reset_index()\n\n# scalarization rating\ndef scal_rating(df):\n\n ratingScal = np.zeros(len(df))\n\n for name in df.username.unique():\n\n idx = df[df['username'] == name].index.tolist()\n max_val = df['rating'][df['username'] == name].max()\n value = df['rating'][df['username'] == name].apply(lambda x: 5*(x/max_val)).values\n\n for k,v in enumerate(idx):\n ratingScal[v] = value[k]\n\n return ratingScal\n\ndf['ratingScal'] = scal_rating(df)\n\n# load save model\ndef load_model(model_filename):\n \n file_name = os.path.expanduser(model_filename)\n _, loaded_model = dump.load(file_name)\n return loaded_model\n\n#'ALDIALHA39'\ndef get_collaborative(algo, username, df=df, top_n=5):\n\n all_stock = df['stock_code'].unique()\n\n invest = df['stock_code'][df['username'] == username].values\n\n not_invest = [i for i in all_stock if i not in invest]\n\n score = [algo.predict(username, stock).est for stock in not_invest]\n\n result = pd.DataFrame({'stock':not_invest, 'pred_score':score})\n\n result.sort_values('pred_score', ascending=False, inplace=True)\n\n return result.head(top_n).to_json(orient='records')\n","repo_name":"devpina-ml/recsys-col","sub_path":"deploy-docker/coba.py","file_name":"coba.py","file_ext":"py","file_size_in_byte":2481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11696964237","text":"def solution(words, queries):\n answer = []\n\n words.sort()\n\n words_reverse = []\n for word in words:\n words_reverse.append(word[::-1])\n words_reverse.sort()\n\n for query in queries:\n wild_cnt = query.count('?')\n wild_start = 1 # 1이면 처음에 ?, 아니면 끝에 ?\n\n if query[0] != '?':\n wild_start = 0\n\n count = 0\n if wild_start == 0:\n for word in words:\n if len(word) == len(query) :\n if word[:len(word)-wild_cnt] == query[:len(word)-wild_cnt]:\n count += 1\n else:\n if count != 0:\n break\n elif word[0] > query[0]:\n break\n\n else :\n tmp = query[wild_cnt:]\n tmp = tmp[::-1]\n for drow in words_reverse:\n if len(drow) == len(query):\n if drow[:len(drow) - wild_cnt] == tmp:\n count += 1\n else :\n if count != 0:\n break\n elif drow[0] > tmp[0]:\n break\n\n answer.append(count)\n\n return answer\n\n\nprint(solution([\"frodo\", \"front\", \"frost\", \"frozen\", \"frame\", \"kakao\",] , [\"fro??\", \"????o\", \"fr???\", \"fro???\", \"pro?\"]))","repo_name":"mingso/PS","sub_path":"이취코/문제/q30.py","file_name":"q30.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11342037393","text":"from aiogram import Bot, types\nfrom aiogram.dispatcher import Dispatcher\nfrom aiogram.utils import executor\nfrom aiogram.contrib.fsm_storage.memory import MemoryStorage\nfrom aiogram.contrib.middlewares.logging import LoggingMiddleware\nfrom utils import TestStates\nfrom aiogram.types import ReplyKeyboardRemove, ReplyKeyboardMarkup, KeyboardButton\n\nfrom config import TOKEN\nfrom ftplib import FTP\nfrom datetime import datetime\n\nimport keyboards as kb\nimport pytube\nimport os\n\nbot = Bot(token=TOKEN)\ndp = Dispatcher(bot)\n\ndp = Dispatcher(bot, storage=MemoryStorage())\ndp.middleware.setup(LoggingMiddleware())\n\nbutton_hi = KeyboardButton('Привет! 👋')\n\ngreet_kb = ReplyKeyboardMarkup().add(button_hi)\n\n@dp.message_handler(state='*', commands=['start'])\nasync def start_command(msg: types.Message):\n global state\n state = dp.current_state(user=msg.from_user.id)\n await state.set_state(TestStates.all()[0])\n await bot.send_message(msg.from_user.id, \"Enter video link\")\n print(\"entered command : start\")\n\n@dp.message_handler(state = TestStates.Q1)\nasync def get_url(msg: types.Message):\n url = msg.text\n try:\n global yt\n yt = pytube.YouTube(url)\n\n await bot.send_message(msg.from_user.id, yt.title + '\\n\\n' + yt.author)\n\n streams = yt.streams.filter(progressive = True)\n\n if len(streams) == 2:\n keys = kb.kb_res_all\n elif str(streams[0]).find(\"360p\") != -1:\n keys = kb.kb_res_l\n else:\n keys = kb.kb_res_h\n\n await bot.send_message(msg.from_user.id, \"Choose resolution\", reply_markup=keys)\n await state.set_state(TestStates.all()[1])\n print(\"state setted : 1\")\n\n except pytube.exceptions.RegexMatchError:\n await bot.send_message(msg.from_user.id, \"incorrect link\")\n\n@dp.message_handler(state = TestStates.Q2)\nasync def get_res(msg: types.Message):\n\n res = msg.text\n print(\"choosed resolution : \" + res)\n\n if res[len(res)-1] == 'p':\n video = yt.streams.filter(progressive = True, res = res).first()\n else:\n video = yt.streams.filter(progressive = True, res = res+'p').first()\n\n await bot.send_message(msg.from_user.id, \"Please wait...\", reply_markup=kb.ReplyKeyboardRemove())\n\n print(\"downloading : \" + yt.title + '\\n' + str(video))\n video.download()\n print(\"downloaded\")\n\n video_name = yt.title\n video_name = video_name.replace('/','')\n video_name = video_name.replace('\\\\','')\n video_name = video_name.replace('*','')\n video_name = video_name.replace('.','')\n video_name = video_name.replace('\\\"','')\n video_name = video_name.replace('\\'','')\n video_name = video_name.replace(\"|\",'')\n video_name = video_name.replace(\":\",'')\n video_name = video_name.replace(\"#\",'')\n\n ftp = FTP('c97883yq.beget.tech','c97883yq_dwbot','Onm5b-1ju')\n open_video = open(video_name + '.mp4', \"rb\")\n ftp.storbinary('STOR ' + video_name + '.mp4', open_video)\n open_video.close()\n\n print(\"video recived\")\n\n files = ftp.nlst()\n for v in files:\n timestamp = ftp.voidcmd(\"MDTM \" + v)[4:].strip()\n if (int(timestamp[10:12])-datetime.now().minute >= 10):\n ftp.delete(v)\n\n ftp.quit()\n\n print(\"old videos deleted\")\n\n video_name = video_name.replace(' ', '%20')\n\n await bot.send_message(msg.from_user.id, \"http://c97883yq.beget.tech/DownloadBotTmpVideos/\" + video_name + \".mp4\")\n await bot.send_message(msg.from_user.id, \"File will be deleted in 10 minutes!\")\n\n print(\"link sended\")\n\n await state.set_state(TestStates.all()[0])\n\nif __name__ == '__main__':\n executor.start_polling(dp)\n","repo_name":"aydep/Download-Bot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":3624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19483890161","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport io, os, json\n\nsource_f = io.open(\"toefl.txt\", \"r\")\n\nsource = source_f.read()\nentries = [\n entry.split('\\t')\n for entry in source.split(\"\\n\")\n][:-1]\n\nword_dict = {}\n\nfor entry in entries:\n word_dict[entry[0]] = entry[1:]\n\nout_f = io.open(\"word_dict_toefl.json\", \"w\")\nout_f.write(json.dumps(word_dict))\n\n","repo_name":"QiuFeng54321/vocab-test","sub_path":"toefl_process.py","file_name":"toefl_process.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25472160939","text":"import csv\nimport os\nimport pickle\nfrom pathlib import Path\nimport jsonpickle\nfrom typing import Any, Optional\nfrom collections.abc import Iterator, Callable\nimport yaml\nfrom lukefi.metsi.data.formats.ForestBuilder import VMI13Builder, VMI12Builder, ForestCentreBuilder\nfrom lukefi.metsi.data.formats.io_utils import stands_to_csv_content, csv_content_to_stands, stands_to_rsd_content\nfrom lukefi.metsi.app.app_io import MetsiConfiguration\nfrom lukefi.metsi.app.app_types import SimResults, ForestOpPayload\nfrom lukefi.metsi.domain.forestry_types import StandList\nfrom lukefi.metsi.sim.core_types import CollectedData\n\n\nStandReader = Callable[[str], StandList]\nStandWriter = Callable[[Path, StandList], None]\nObjectLike = StandList or SimResults or CollectedData\nObjectWriter = Callable[[Path, ObjectLike], None]\n\n\ndef prepare_target_directory(path_descriptor: str) -> Path:\n \"\"\"\n Sanity check a given directory path. Existing directory must be accessible for writing. Raise exception if directory\n is not usable. Create the directory if not existing.\n necessary.\n\n :param path_descriptor: relative directory path\n :return: Path instance for directory\n \"\"\"\n if os.path.exists(path_descriptor):\n if os.path.isdir(path_descriptor) and os.access(path_descriptor, os.W_OK):\n return Path(path_descriptor)\n else:\n raise Exception(\"Output directory {} not available. Ensure it is a writable and empty, or a non-existing directory.\".format(path_descriptor))\n else:\n os.makedirs(path_descriptor)\n return Path(path_descriptor)\n\n\ndef stand_writer(container_format: str) -> StandWriter:\n \"\"\"Return a serialization file writer function for a ForestDataPackage\"\"\"\n if container_format == \"pickle\":\n return pickle_writer\n elif container_format == \"json\":\n return json_writer\n elif container_format == \"csv\":\n return csv_writer\n elif container_format == \"rsd\":\n return rsd_writer\n else:\n raise Exception(f\"Unsupported container format '{container_format}'\")\n\n\ndef object_writer(container_format: str) -> ObjectWriter:\n \"\"\"Return a serialization file writer function for arbitrary data\"\"\"\n if container_format == \"pickle\":\n return pickle_writer\n elif container_format == \"json\":\n return json_writer\n else:\n raise Exception(f\"Unsupported container format '{container_format}'\")\n\n\ndef determine_file_path(dir: Path, filename: str) -> Path:\n return Path(dir, filename)\n\n\ndef file_contents(file_path: str) -> str:\n with open(file_path, 'r') as f:\n return f.read()\n\n\ndef fdm_reader(container_format: str) -> StandReader:\n \"\"\"Resolve a reader function for FDM data containers\"\"\"\n if container_format == \"pickle\":\n return pickle_reader\n elif container_format == \"json\":\n return json_reader\n elif container_format == \"csv\":\n return lambda path: csv_content_to_stands(csv_file_reader(path))\n else:\n raise Exception(f\"Unsupported container format '{container_format}'\")\n\n\ndef object_reader(container_format: str) -> Any:\n if container_format == \"pickle\":\n return pickle_reader\n elif container_format == \"json\":\n return json_reader\n else:\n raise Exception(f\"Unsupported container format '{container_format}'\")\n\n\ndef external_reader(state_format: str, **builder_flags) -> StandReader:\n \"\"\"Resolve and prepare a reader function for non-FDM data formats\"\"\"\n if state_format == \"vmi13\":\n return lambda path: VMI13Builder(builder_flags, vmi_file_reader(path)).build()\n elif state_format == \"vmi12\":\n return lambda path: VMI12Builder(builder_flags, vmi_file_reader(path)).build()\n elif state_format == \"forest_centre\":\n return lambda path: ForestCentreBuilder(builder_flags, xml_file_reader(path)).build()\n\n\ndef read_stands_from_file(app_config: MetsiConfiguration) -> StandList:\n \"\"\"\n Read a list of ForestStands from given file with given configuration. Directly reads FDM format data. Utilizes\n FDM ForestBuilder utilities to transform VMI12, VMI13 or Forest Centre data into FDM ForestStand format.\n\n :param app_config: Mela2Configuration\n :return: list of ForestStands as computational units for simulation\n \"\"\"\n if app_config.state_format == \"fdm\":\n return fdm_reader(app_config.state_input_container)(app_config.input_path)\n elif app_config.state_format in (\"vmi13\", \"vmi12\", \"forest_centre\"):\n return external_reader(\n app_config.state_format,\n strata=app_config.strata,\n reference_trees=app_config.reference_trees,\n strata_origin=app_config.strata_origin)(app_config.input_path)\n else:\n raise Exception(f\"Unsupported state format '{app_config.state_format}'\")\n\n\ndef scan_dir_for_file(dirpath: Path, basename: str, suffixes: list[str]) -> Optional[tuple[Path, str]]:\n \"\"\"\n From given directory path, find the filename for given basename with list of possible file suffixes.\n Raises Exception if directory path is not a directory.\n :returns a pair with full filename and matching suffix\n \"\"\"\n if not os.path.isdir(dirpath):\n raise Exception(f\"Given input path {dirpath} is not a directory.\")\n _, _, files = next(os.walk(dirpath))\n filenames_with_suffix = list(map(lambda suffix: (f\"{basename}.{suffix}\", suffix), suffixes))\n for filename, suffix in filenames_with_suffix:\n if filename in files:\n return Path(dirpath, filename), suffix\n return None\n\n\ndef parse_file_or_default(file: Path, reader: Callable[[Path], Any], default=None) -> Optional[Any]:\n \"\"\"Deserialize given file with given reader function or return default\"\"\"\n if os.path.exists(file):\n return reader(file)\n else:\n return default\n\n\ndef read_schedule_payload_from_directory(schedule_path: Path) -> ForestOpPayload:\n \"\"\"\n Create an OperationPayload from a directory which optionally contains usable unit_state and derived_data files.\n Utilizes a scanner function to resolve the files with known container formats. Files may not exist.\n\n :param schedule_path: Path for a schedule directory\n :return: OperationPayload with computational_unit and collected_data if found\n \"\"\"\n unit_state_file, input_container = scan_dir_for_file(schedule_path, \"unit_state\", [\"csv\", \"json\", \"pickle\"])\n derived_data_file, derived_data_container = scan_dir_for_file(schedule_path, \"derived_data\", [\"json\", \"pickle\"])\n stands = [] if unit_state_file is None else parse_file_or_default(unit_state_file, fdm_reader(input_container), [])\n derived_data = None if derived_data_file is None else parse_file_or_default(derived_data_file, object_reader(derived_data_container))\n return ForestOpPayload(\n computational_unit=None if stands == [] else stands[0],\n collected_data=derived_data,\n operation_history=[]\n )\n\n\ndef get_subdirectory_names(path: Path) -> list[str]:\n if not os.path.isdir(path):\n raise Exception(f\"Given input path {path} is not a directory.\")\n _, dirs, _ = next(os.walk(path))\n return dirs\n\n\ndef read_full_simulation_result_dirtree(source_path: Path) -> SimResults:\n \"\"\"\n Read simulation results from a given source directory, packing them into the simulation results dict structure.\n Utilizes a directory scanner function to find unit_state and derived_data files for known possible container\n formats.\n\n :param source_path: Path for simulation results\n :return: simulation results dict structure\n \"\"\"\n def schedulepaths_for_stand(stand_path: Path) -> Iterator[Path]:\n schedules = get_subdirectory_names(stand_path)\n return map(lambda schedule: Path(stand_path, schedule), schedules)\n result = {}\n stand_identifiers = get_subdirectory_names(source_path)\n stands_to_schedules = map(lambda stand_id: (stand_id, schedulepaths_for_stand(Path(source_path, stand_id))), stand_identifiers)\n for stand_id, schedulepaths in stands_to_schedules:\n payloads = list(map(lambda schedulepath: read_schedule_payload_from_directory(schedulepath), schedulepaths))\n result[stand_id] = payloads\n return result\n\n\ndef write_stands_to_file(result: StandList, filepath: Path, state_output_container: str):\n \"\"\"Resolve a writer function for ForestStands matching the given state_output_container. Invokes write.\"\"\"\n writer = stand_writer(state_output_container)\n writer(filepath, result)\n\n\ndef write_derived_data_to_file(result: CollectedData, filepath: Path, derived_data_output_container: str):\n \"\"\"Resolve a writer function for AggregatedResults matching the given derived_data_output_container. Invokes write.\"\"\"\n writer = object_writer(derived_data_output_container)\n writer(filepath, result)\n\n\ndef write_full_simulation_result_dirtree(result: SimResults, app_arguments: MetsiConfiguration):\n \"\"\"\n Unwraps the given simulation result structure into computational units and further into produced schedules.\n Writes these as a matching directory structure, splitting OperationPayloads into unit_state and derived_data files.\n Details for output directory, unit state container format and derived data container format are extracted from\n given app_arguments structure.\n\n :param result: the simulation results structure\n :param app_arguments: application run configuration\n :return: None\n \"\"\"\n for stand_id, schedules in result.items():\n for i, schedule in enumerate(schedules):\n if app_arguments.state_output_container is not None:\n schedule_dir = prepare_target_directory(f\"{app_arguments.target_directory}/{stand_id}/{i}\")\n filepath = determine_file_path(schedule_dir, f\"unit_state.{app_arguments.state_output_container}\")\n write_stands_to_file([schedule.computational_unit], filepath, app_arguments.state_output_container)\n if app_arguments.derived_data_output_container is not None:\n schedule_dir = prepare_target_directory(f\"{app_arguments.target_directory}/{stand_id}/{i}\")\n filepath = determine_file_path(schedule_dir, f\"derived_data.{app_arguments.derived_data_output_container}\")\n write_derived_data_to_file(schedule.collected_data, filepath, app_arguments.derived_data_output_container)\n\n\ndef simulation_declaration_from_yaml_file(file_path: str) -> dict:\n # TODO: content validation\n return yaml.load(file_contents(file_path), Loader=yaml.CLoader)\n\n\ndef pickle_writer(filepath: Path, data: ObjectLike):\n with open(filepath, 'wb') as f:\n pickle.dump(data, f, protocol=5)\n\n\ndef pickle_reader(file_path: str) -> ObjectLike:\n with open(file_path, 'rb') as f:\n return pickle.load(f)\n\n\ndef json_writer(filepath: Path, data: ObjectLike):\n jsonpickle.set_encoder_options(\"json\", indent=2)\n with open(filepath, 'w', newline='\\n') as f:\n f.write(jsonpickle.encode(data))\n\n\ndef csv_writer(filepath: Path, data: StandList):\n row_writer(filepath, stands_to_csv_content(data, ';'))\n\n\ndef rsd_writer(filepath: Path, data: StandList):\n row_writer(filepath, stands_to_rsd_content(data))\n\n\ndef row_writer(filepath: Path, rows: list[str]):\n with open(filepath, 'w', newline='\\n') as file:\n for row in rows:\n file.write(row)\n file.write('\\n')\n\n\ndef json_reader(file_path: str) -> ObjectLike:\n res = jsonpickle.decode(file_contents(file_path))\n return res\n\n\ndef vmi_file_reader(file: Path) -> list[str]:\n with open(file, 'r', encoding='utf-8') as input_file:\n return input_file.readlines()\n\n\ndef xml_file_reader(file: Path) -> str:\n with open(file, 'r', encoding='utf-8') as input_file:\n return input_file.read()\n\n\ndef csv_file_reader(file: Path) -> list[list[str]]:\n with open(file, 'r', encoding='utf-8') as input_file:\n return list(csv.reader(input_file, delimiter=';'))\n","repo_name":"lukefi/metsi","sub_path":"lukefi/metsi/app/file_io.py","file_name":"file_io.py","file_ext":"py","file_size_in_byte":11944,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"14341234065","text":"from math import sqrt\n\ndef isPrime(n):\n for i in range(2, int(sqrt(n)+1)):\n if n % i == 0:\n return False\n return True\n\ndef p3(n):\n c = 0\n for i in range(1,n):\n if isPrime(i):\n c += 1\n return c\n\ndef p2(n):\n L = range(2, n+1)\n for i in xrange(2, int(sqrt(n)) + 1):\n if isPrime(i):\n for j in xrange(len(L)-1, L.index(i), -1):\n if L[j] % i == 0:\n L[j] = 1\n n = 0\n for i in L:\n if i != 1:\n n += 1\n return n\n\ndef p(n):\n L = range(2, n+1)\n for i in xrange(2, int(sqrt(n)) + 1):\n try:\n for j in xrange(len(L)-1, L.index(i), -1):\n if L[j] % i == 0:\n L[j] = 1\n except:\n pass\n n = 0\n for i in L:\n if i != 1:\n n += 1\n return n\n\nimport cProfile\nn=100000\ncProfile.run(\"p3(n)\")\ncProfile.run(\"p2(n)\")","repo_name":"Albyorix/various_python_challenges","sub_path":"prime.py","file_name":"prime.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17272849866","text":"from weixin_po_20200822.page.add_member import AddMember\nfrom weixin_po_20200822.page.home import Home\n\n\nclass Test_Add_Member():\n def setup(self):\n self.home = Home()\n\n def test_add_member(self):\n # 1. 跳转到添加成员 2. 添加成员\n data = {\n 'username' : 'hiii',\n 'engname': 'hi',\n 'acctid': '1233321',\n 'phone': '13788880000',\n }\n result = self.home.goto_add_member().add_member(data)\n # assert \"皮城女警\" in result\n\n def test_add_member_fail(self):\n data = {\n 'username' : 'hiii',\n 'engname': 'hi',\n 'acctid': '1233321',\n 'phone': '13788880000',\n }\n self.home.goto_add_member().add_member(data)\n # result = AddMenber(self.home.driver).get_phone_error_message()\n # assert \"请填写正确的手机号码\" == result\n\n def test_contact_add_member(self):\n self.home.goto_add_contact().upload_file()\n\n def teardown(self):\n self.home.base_quit()\n\n","repo_name":"Jammy0624/HogwartsCode","sub_path":"weixin_po_20200822/testcase/test_add_member.py","file_name":"test_add_member.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73174825544","text":"#! /usr/bin/python3.10\nimport csv\n\nclass Solution(object):\n def __init__(self, filename):\n self.filename = filename\n self.seen = set()\n \n def solve(self):\n with open(self.filename) as f:\n csv_reader = csv.reader(f)\n signal_strength = 0\n register_value = 1\n next_breakpoint = 20\n cycle = 0\n \n for row in csv_reader:\n value = 0\n if row[0] == \"noop\":\n cycle += 1\n else:\n value = int(row[0].split(\" \")[1])\n cycle += 2\n\n while cycle >= next_breakpoint and next_breakpoint <= 220:\n signal_strength += next_breakpoint * register_value\n next_breakpoint += 40\n\n if next_breakpoint > 220:\n break\n \n register_value += value\n\n print(signal_strength)\n\n\nif __name__ == \"__main__\":\n sol = Solution(\"input.csv\")\n sol.solve()\n","repo_name":"danguan/aoc2022","sub_path":"day10/solution1.py","file_name":"solution1.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21360393719","text":"from ISStreamer.Streamer import Streamer\nimport time, math\n\nstreamer = Streamer(bucket_key=\"479DAGGBW86T\", debug_level=3, ini_file_location=\"./isstreamer.ini\")\n\ndef stress_test_loop(i, num):\n\twhile i > 0:\n\t\td = {\n\t\t\t\"iterations_left\": i,\n\t\t\t\"some other value\": math.sqrt(i)\n\t\t}\n\t\tstreamer.log_object(d, key_prefix=\"\")\n\t\ttime.sleep(.1)\n\t\tstreamer.flush()\n\t\ti = i - 1\n\n\nstress_test_loop(5000, 1)\n\nstreamer.close()","repo_name":"initialstate/python_appender","sub_path":"example_app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"81"} +{"seq_id":"30050166945","text":"import cv2\nimport imutils\nimport numpy as np\n\nrun = True\n\nwhile(run):\n \n img = cv2.imread(\"NA102.jpeg\")\n # img = cv2.imread(\"102fil.png\")\n\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n cv2.imshow(\"GRAY\", gray)\n\n bfilter = cv2.bilateralFilter(gray, 11, 17, 17)\n cv2.imshow(\"BILATERAL FILTER\", bfilter)\n\n edged = cv2.Canny(bfilter, 50, 200)\n cv2.imshow(\"CANNY\", edged)\n\n hsv_image = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)\n cv2.imshow('HSV', hsv_image)\n cv2.imshow('Hue channel', hsv_image[:,:,0])\n cv2.imshow('Saturation', hsv_image[:,:,1])\n cv2.imshow('Value', hsv_image[:,:,2])\n\n\n # keypoints = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n # contours = imutils.grab_contours(keypoints)\n # contours = sorted(contours, key=cv2.contourArea, reverse = True)[:10]\n\n # location = None\n # for contour in contours:\n # approx = cv2.approxPolyDP(contour, 10, True)\n # if len(approx) == 4:\n # location = approx\n # break\n\n # mask = np.zeros(gray.shape, np.uint8)\n # new_image = cv2.drawContours(mask, [location], 0,255,-1)\n # new_image = cv2.bitwise_and(img, img, mask=mask)\n\n # cv2.imshow(\"FINAL SEGUN\", cv2.cvtColor(new_image, cv2.COLOR_BGR2RGB))\n \n \n k = cv2.waitKey(5)\n if k == 27:\n run = False","repo_name":"Achoycito/Platelink","sub_path":"filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33487833694","text":"'''\nSay you have an array for which the ith element is the price of a given stock on day i.\n\nDesign an algorithm to find the maximum profit. You may complete as many transactions as you like (i.e., buy one and sell one share of the stock multiple times).\n\nNote: You may not engage in multiple transactions at the same time (i.e., you must sell the stock before you buy again).\n\nExample 1:\n\nInput: [7,1,5,3,6,4]\nOutput: 7\nExplanation: Buy on day 2 (price = 1) and sell on day 3 (price = 5), profit = 5-1 = 4.\n Then buy on day 4 (price = 3) and sell on day 5 (price = 6), profit = 6-3 = 3.\nExample 2:\n\nInput: [1,2,3,4,5]\nOutput: 4\nExplanation: Buy on day 1 (price = 1) and sell on day 5 (price = 5), profit = 5-1 = 4.\n Note that you cannot buy on day 1, buy on day 2 and sell them later, as you are\n engaging multiple transactions at the same time. You must sell before buying again.\nExample 3:\n\nInput: [7,6,4,3,1]\nOutput: 0\nExplanation: In this case, no transaction is done, i.e. max profit = 0.\n'''\nclass Solution(object):\n def maxProfit(self, prices):\n \"\"\"\n :type prices: List[int]\n :rtype: int\n \"\"\"\n hold,sell,cool = -float('inf'),0,-float('inf')\n for p in prices:\n prehold = hold\n hold=max(hold,sell-p)\n sell=max(sell,cool)\n cool=prehold+p\n return max(sell,cool)\n\n\nclass Solution(object):\n def maxProfit(self, prices):\n \"\"\"\n :type prices: List[int]\n :rtype: int\n \"\"\"\n if len(prices) < 2:\n return 0\n\n sell = 0\n buy = -sys.maxsize - 1\n prev_sell = 0\n prev_buy = -sys.maxsize - 1\n\n for price in prices:\n prev_buy = buy\n buy = max(buy, prev_sell - price)\n prev_sell = sell\n sell = max(sell, prev_buy + price)\n\n return sell","repo_name":"XiongQiuQiu/leetcode-slove","sub_path":"Algorithms/122-Best-Time-to-Buy-and-Sell-Stock-II.py","file_name":"122-Best-Time-to-Buy-and-Sell-Stock-II.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"13404425421","text":"# coding=utf-8\n\n\"\"\"\n 参数配置\n\"\"\"\n\n# 模型共有参数\nembedding_dim = 64 # 词向量维度\nseq_length = 600 # 序列长度\nnum_classes = 10 # 类别数\nvocab_size = 5000 # 词汇表达小\n\ndropout_keep_prob = 0.8 # dropout保留比例\nlearning_rate = 1e-3 # 学习率\n\nbatch_size = 64 # 每批训练大小\nnum_epochs = 10 # 总迭代轮次\n\nprint_per_batch = 5 # 每多少轮输出一次结果\n\n# MLP参数\nhidden_dim = 256 # 全连接层神经元\n\n# CNN参数\nnum_filters = 256 # 卷积核数目\nkernel_size = 5 # 卷积核尺寸\n\n# RNN参数\nnum_layers= 2 # rnn的层数\nrnn = 'gru' # lstm 或 gru\n","repo_name":"Mr-OREO/CourseworkOfSE","sub_path":"人工智能导论(课程综合实践Ⅰ)/Experiment/day 5/Day5_王子腾/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"zh","doc_type":"code","stars":10,"dataset":"github-code","pt":"81"} +{"seq_id":"31073190085","text":"\"\"\"\nVasya implemented nonoptimal Enum classes.\nRemove duplications in variables declarations using metaclasses.\n\"\"\"\n\n\nclass SimplifiedEnum(type):\n _instances = {}\n\n def __new__(mcs, name, bases, dct):\n if name not in mcs._instances:\n tmp_dct = {key: key for key in dct[f\"_{name}__keys\"]}\n # code above looks quite bad, is it possible to make more beautiful?\n mcs._instances[name] = super().__new__(mcs, name, bases, tmp_dct)\n else:\n print(f\"Class '{name}' already exists\")\n return mcs._instances[name]\n","repo_name":"ParamonovED/Epam_HW","sub_path":"homework_11/metaclasses.py","file_name":"metaclasses.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33617115186","text":"from langchain_benchmarks.rag.tasks.semi_structured_reports import (\n indexing,\n)\nfrom langchain_benchmarks.rag.tasks.semi_structured_reports.indexing.retriever_registry import (\n load_docs,\n)\nfrom langchain_benchmarks.schema import RetrievalTask\n\n# ID of public Semi-structured Earnings dataset\nDATASET_ID = \"https://smith.langchain.com/public/c47d9617-ab99-4d6e-a6e6-92b8daf85a7d/d\"\n\nSEMI_STRUCTURED_REPORTS_TASK = RetrievalTask(\n name=\"Semi-structured Reports\",\n dataset_id=DATASET_ID,\n retriever_factories=indexing.RETRIEVER_FACTORIES,\n architecture_factories={},\n get_docs=load_docs,\n description=(\n \"\"\"\\\nQuestions and answers based on PDFs containing tables and charts.\n\nThe task provides the raw documents as well as factory methods to easily index them\nand create a retriever.\n\nEach example is composed of a question and reference answer.\n\nSuccess is measured based on the accuracy of the answer relative to the reference answer.\nWe also measure the faithfulness of the model's response relative to the retrieved documents (if any).\n\"\"\" # noqa: E501\n ),\n)\n","repo_name":"langchain-ai/langchain-benchmarks","sub_path":"langchain_benchmarks/rag/tasks/semi_structured_reports/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","stars":88,"dataset":"github-code","pt":"81"} +{"seq_id":"30580137466","text":"import ConfigParser, os, scipy, patsy\nimport numpy as np\n\nfrom gpmultipy.kernel import RBF, White, Hierarchical\nfrom gpmultipy import Prior, Model\n\n\nclass Configuration(object):\n\n def __init__(self,cdir,fname='config.cfg',randomize=False,randomizePriors=False):\n\n # TODO: seperate nf and ndesigns, which are used incorrectly in non-mean design\n\n if not os.path.exists(cdir):\n raise ValueError('directory %s does not exist!'%cdir)\n\n self.config = ConfigParser.ConfigParser()\n self.config.read(os.path.join(cdir,fname))\n\n self._setDefaults()\n\n # this is the number of variation levels in the model\n self.levels = self.config.getint('main','levels')\n\n #experimental design\n self.design = self.config.get('main','design')\n\n #marginalize out hierarchy\n self.hierarchy = self.config.getboolean('main','hierarchy')\n\n self.intergrateBottom = self.config.getboolean('main','integrate-bottom')\n\n self.nf = []\n if self.design == 'mean':\n self.nf.append(1)\n elif self.design=='single-treatment':\n self.tmnts = self.config.getint('main','treatments')\n self.nf.append(self.tmnts)\n elif self.design=='multiple-treatment':\n self.tmnts = self.config.getint('main','treatments')\n self.factors = self.config.getint('main','factors')\n self.crossed = self.config.getboolean('main','crossed')\n\n if self.crossed:\n tot = self.tmnts*self.tmnts*(self.factors-1)\n else:\n tot = self.tmnts*self.factors\n self.nf.append(tot)\n\n self._checkLevelConfig()\n\n self._buildKernels()\n\n # if only one level and nrep provided in main, use that\n if self.levels == 1 and self.config.has_option('main','nrep'):\n self.p = self.config.getint('main','nrep')\n self.nreps = [self.p]\n # otherwise use product of all levels\n else:\n self.nreps = [self.config.getint('level%d'%(i+1),'nrep') for i in range(self.levels)]\n self.p = np.prod(self.nreps)\n\n # don't build functions/priors for bottom level if integrating out\n if self.intergrateBottom:\n stahp = self.levels\n else:\n stahp = self.levels+1\n\n #marginalize only use actual functions\n if self.hierarchy:\n stahp = 1\n self.p = self.nreps[0]\n\n # add the number of functions from replicate structure\n self.nf += [self.nf[0]*np.product(self.nreps[:i]) for i in range(1,stahp)]\n self.cumnf = np.cumsum(self.nf)\n self.cumnreps = np.cumprod(self.nreps)\n self.f = sum(self.nf)\n\n self.buildDesignMatrix()\n\n if self.hierarchy:\n\n if self.levels > 1:\n repeat = np.product(self.nreps[1:])\n else:\n repeat = 1\n\n xactual = np.linspace(self.config.getfloat('main','xmin'),\n self.config.getfloat('main','xmax'),\n self.config.getint('main','n'))\n\n self.x = np.zeros((xactual.shape[0]*repeat,2+self.levels))\n self.x[:,0] = np.repeat(xactual[:,None],repeat,1).ravel(1)\n\n for i in range(1,self.levels):\n step = self.x.shape[0]/np.product(self.nreps[1:i+1])\n for j in range(self.cumnreps[i]):\n self.x[j*step:(j+1)*step,i+1] = j\n\n self.xactual = xactual\n\n else:\n self.x = np.linspace(self.config.getfloat('main','xmin'),\n self.config.getfloat('main','xmax'),\n self.config.getint('main','n'))[:,None]\n\n self.y = np.zeros((self.x.shape[0],self.dm.shape[1]))\n\n self.priors = {'yKernel':{}, 'functions':{}, 'k1':{}}\n for i in range(self.levels+1):\n self.priors['k%d'%(i+1)] = {}\n\n if self.config.getboolean('main','uniformPrior'):\n self.priors['yKernel']['sigma'] = scipy.stats.uniform(\n loc=self.config.getfloat('sigmaYprior','loc'),\n scale=self.config.getfloat('sigmaYprior','scale')\n )\n\n for i in range(self.levels+1):\n self.priors['k%d'%(i+1)]['lengthscale'] = scipy.stats.uniform(\n loc=self.config.getfloat('lengthscalePrior','loc'),\n scale=self.config.getfloat('lengthscalePrior','scale'))\n\n self.priors['k%d'%(i+1)]['sigma'] = scipy.stats.uniform(\n loc=self.config.getfloat('sigmaPrior','loc'),\n scale=self.config.getfloat('sigmaPrior','scale'))\n else:\n self.priors['yKernel']['sigma'] = scipy.stats.lognorm(s=self.config.getfloat('sigmaYprior','s'),scale=self.config.getfloat('sigmaYprior','scale'))\n\n for i in range(self.levels+1):\n self.priors['k%d'%(i+1)]['lengthscale'] = scipy.stats.lognorm(s=self.config.getfloat('lengthscalePrior','s'),scale=self.config.getfloat('lengthscalePrior','scale'))\n self.priors['k%d'%(i+1)]['priors'] = scipy.stats.lognorm(s=self.config.getfloat('sigmaPrior','s'),scale=self.config.getfloat('sigmaPrior','scale'))\n\n # self.priors['functions'][0] = Prior(self.x,self.k1,[0])\n\n for i in range(stahp):\n start = sum(self.nf[:i])\n stahp2 = start+self.nf[i]\n\n self.priors['functions'][i] = Prior(self.x,self.__dict__['k%d'%(i+1)],range(start,stahp2),randomizeOrder=randomizePriors)\n\n def get(self):\n kernels = [self.yKernel, self.k1]\n kernels += [self.__dict__['k%d'%(i+2)] for i in range(self.levels)]\n\n return self.x, self.dm.shape[1], self.dm, kernels, self.priors\n\n def randomize(self):\n self.model = Model(self.x,self.y,self.dm)\n\n for f,prior in self.priors['functions'].iteritems():\n prior.sample(self.model,self.yKernel)\n\n self.yKernel.sigma = self.priors['yKernel']['sigma'].rvs()\n\n for i in range(self.levels+1):\n k = \"k%d\"%(i+1)\n for param in self.priors[k]:\n self.__dict__[k].__dict__[param] = self.priors[k][param].rvs()\n\n\n\n def buildDesignMatrix(self):\n\n # designs: (f,d) f functions by d designs\n self.designs = None\n if self.design == 'mean':\n self.designs = np.ones((1,1))\n elif self.design == 'single-treatment':\n self.contrast = patsy.contrasts.Helmert().code_without_intercept(range(self.tmnts)).matrix\n self.designs = np.row_stack((np.ones((1,self.contrast.shape[0])), self.contrast.T))\n\n k = 0\n self.replicates = np.zeros((np.sum(self.cumnreps),self.p))\n # self.replicates = np.zeros((100,self.p))\n for i in range(self.levels):\n stab = max(np.prod(self.nreps[i+1:]),1) # step-size\n stab = int(stab)\n\n # print i,stab\n\n # for j in range(self.nreps[i]):\n for j in range(self.p/stab):\n # print i,j,k\n self.replicates[k,j*stab:(j+1)*stab] = 1\n k+=1\n\n self.dm = np.zeros((self.f,self.p*self.nf[0]))\n self.dm[:self.nf[0],:] = self.designs.repeat(self.p,1)\n\n if not self.hierarchy:\n offset = self.nf[0]\n for i in range(self.nf[0]):\n col = i*self.p\n row = i*self.replicates.shape[0] + offset\n\n self.dm[row:row+self.replicates.shape[0],col:col+self.replicates.shape[1]] = self.replicates\n\n def setDefault(self,name,value):\n\n if not self.config.has_option(\"DEFAULT\",name):\n self.config.set('DEFAULT',name,value)\n\n def _setDefaults(self):\n\n self.setDefault(\"levels\",'1')\n self.setDefault(\"n\",'50')\n self.setDefault(\"design\",'mean')\n self.setDefault(\"treatments\",'2')\n self.setDefault(\"factors\",'2')\n self.setDefault(\"crossed\",'False')\n self.setDefault(\"variable-selection\",'False')\n self.setDefault(\"sigma\",'1.0')\n self.setDefault(\"lengthscale\",'1.0')\n self.setDefault(\"nrep\",'3')\n self.setDefault('uniformPrior','True')\n self.setDefault('loc','1.0')\n self.setDefault('scale','1.0')\n self.setDefault('s','1.0')\n self.setDefault('xmin','-1')\n self.setDefault('xmax','1')\n self.setDefault('slice-w','.2')\n self.setDefault('slice-m','5')\n self.setDefault('hierarchy','False')\n\n # toggle whether to integrate out the lowest level of hierarchy\n self.setDefault('integrate-bottom','False')\n\n def _checkLevelConfig(self):\n \"\"\"Check that sections are available matching the number of levels, add if needed.\"\"\"\n\n for i in range(self.levels):\n\n # check for kernels for each level\n if not self.config.has_section(\"k%d\"%(i+2)):\n self.config.add_section(\"k%d\"%(i+2))\n\n # check for level config section\n if not self.config.has_section(\"level%d\"%(i+1)):\n self.config.add_section(\"level%d\"%(i+1))\n\n def _buildKernels(self,):\n self.k1 = RBF(1,self.config.getfloat('k1','sigma'),self.config.getfloat('k1','lengthscale'))\n\n if not self.hierarchy:\n self.yKernel = White(1,self.config.getfloat('yKernel','sigma'))\n\n for i in range(self.levels):\n k = 'k%d'%(i+2)\n self.__dict__[k] = RBF(1,self.config.getfloat(k,'sigma'),self.config.getfloat(k,'lengthscale'))\n else:\n args = []\n for i in range(self.levels):\n k = 'k%d'%(i+2)\n args.append(RBF(1,self.config.getfloat(k,'sigma'),self.config.getfloat(k,'lengthscale')))\n self.__dict__[k] = args[-1]\n\n args.append(White(1,self.config.getfloat('yKernel','sigma')))\n\n self.yKernel = Hierarchical(*args)\n","repo_name":"ptonner/experimental-variation","sub_path":"simulations-gpmultipy/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":10108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25819638997","text":"# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n def __repr__(self):\n return str(self.val) + 'node'\n\n\ndef removeNthFromEnd(head, n: int):\n curr, count, d = head, -1, {}\n while curr is not None:\n count += 1\n d[count] = curr\n curr = curr.next\n\n spot = count - n\n if spot == -2:\n return None\n elif spot == -1:\n return head.next\n\n prev = d[spot]\n prev.next = prev.next.next\n return head\n\n\ndef pointers(head, n: int):\n dummy = ListNode(0, head)\n l = dummy\n r = head\n while n > 0 and r:\n r = r.next\n n -= 1\n while r:\n l = l.next\n r = r.next\n l.next = l.next.next\n return dummy.next\n\n\nhead1 = ListNode(1)\nhead = ListNode(1, next=ListNode(2, ListNode(3, ListNode(4, ListNode(5)))))\nremoveNthFromEnd(head, 1)\n","repo_name":"miray-mustafov/LeetCode","sub_path":"08.Linked List/06.19. Remove Nth Node From End of List.py","file_name":"06.19. Remove Nth Node From End of List.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70082297545","text":"from typing import Optional\n\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n def findBottomLeftValue(self, root: Optional[TreeNode]) -> int:\n q = [root]\n ans = None\n for node in q:\n if node.right:\n q.append(node.right)\n if node.left:\n q.append(node.left)\n ans = node.val\n return ans\n","repo_name":"tiandiyijian/myLeetcode","sub_path":"513.py","file_name":"513.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"44244387806","text":"pushed = [1,0]\npopped = [1,0]\n\nfrom collections import deque\n\nstack = deque()\n\npush_index = 0\npop_index = 0\n\nfor i in pushed:\n stack.append(i)\n while (pop_index 0:\n return doc_embedding / valid_words\n else:\n return doc_embedding\n\n\ndef get_relevant_words(search_tok, doc_tok, model, model_type):\n \"\"\"\n Function to reutrn semantically relevant words\n :param search_tok: search tokens\n :param doc_tok: document tokens\n :param model: word embedding model\n :return: relevant words\n \"\"\"\n search_set = set()\n doc_set = set()\n word_array = set()\n for word in search_tok:\n if word in model:\n search_set.add(word)\n\n for word in doc_tok:\n if word in model:\n doc_set.add(word)\n\n for s in itertools.product(search_set, doc_set):\n if model_type == 'magnitude':\n if model.similarity(s[0], s[1]) >= 0.3:\n word_array.add(s[1])\n else:\n if model.wv.similarity(s[0], s[1]) >= 0.3:\n word_array.add(s[1])\n return ', '.join(list(word_array))\n\n\ndef get_docs_embedding(docs_tok, model, dim=300):\n \"\"\"\n Function to generate document embedding\n :param docs_tok: documents' tokens\n :param model: word embedding model\n :param dim: dimension of embedding (default=300)\n :return: documents' embedding\n \"\"\"\n all_docs_embedding = []\n for doc in docs_tok:\n all_docs_embedding.append(text2vec(doc, model, dim))\n cols = [str(i) for i in range(dim)]\n embeddings = pd.DataFrame(data=all_docs_embedding)\n embeddings.columns = cols\n embeddings.to_parquet('../model/docs_embeddings.parquet', index=False)\n return np.array(all_docs_embedding)\n\n\n","repo_name":"sagnikjena/semantic-search","sub_path":"utils/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":3520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26759148312","text":"import argparse\n\nparser = argparse.ArgumentParser(description='Just a little arab->rome converter')\n\nparser.add_argument( \"-n\", \"--number\", help = \"Just write number after that (0-9 format)\" )\n\nargs = parser.parse_args()\n\narab = int(args.number)\nr_list = [1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1]\nr_dict = {1000:'M', 900:'CM', 500:'D', 400:'CD', 100:'C', 90:'XC', 50:'L', 40:'XL', 10:'X', 9:'IX', 5:'V', 4:'IV', 1:'I'}\nrome = ''\nfor x in r_list:\n if (arab - x*3) >= 0:\n arab = arab - x*3\n rome += (r_dict[x])*3\n elif (arab - x*2) >= 0:\n arab = arab - x*2\n rome += (r_dict[x])*2\n elif (arab - x) >= 0:\n arab = arab - x\n rome += (r_dict[x])\nprint(rome)\n","repo_name":"shatterband/romearabrome","sub_path":"arab->rome.py","file_name":"arab->rome.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"44244221026","text":"class HashMap:\n def __init__(self, size = 16):\n self.size = size\n self.buckets = [ [] for i in range(self.size)]\n\n def _hashing_function(self, key):\n return hash(key) % self.size\n\n def insert(self, key, value):\n hashed_key = self._hashing_function(key)\n bucket = self.buckets[hashed_key]\n\n for i, (existing_key, existing_value) in enumerate(bucket):\n if existing_key == key:\n bucket[i] = (key, value)\n return\n \n bucket.append((key, value))\n\n def get_value(self, key):\n hashed_key = self._hashing_function(key)\n bucket = self.buckets[hashed_key]\n\n for (existing_key, existing_value) in bucket:\n if existing_key == key:\n return existing_value\n \n return -1\n\n def delete(self,key):\n hashed_key = self._hashing_function(key)\n bucket = self.buckets[hashed_key]\n\n for i, (existing_key, existing_value) in enumerate(bucket):\n if existing_key == key:\n del bucket[i]\n\n return -1\n\nhashmap = HashMap()\nhashmap.insert(2, \"Divyansh\")\nhashmap.insert(1, None)\nprint(hashmap.get_value(1))\nprint(hashmap.get_value(2))\nhashmap.insert(1, \"Divyansh\")\nprint(hashmap.get_value(1))\nhashmap.delete(2)\nprint(hashmap.get_value(2))","repo_name":"Divyansh3021/Data-Structures-and-Algorithm","sub_path":"Hash Map/HashMap.py","file_name":"HashMap.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20125457845","text":"import requests\nimport json\nimport urllib.parse\n\napi = \"http://127.0.0.1:8080\"\nurl = api+\"/clientes\"\n\ndados = requests.get(url).json()\n#print (json.dumps(dados, indent=4))\n\nfor d in dados:\n print (f\"{d['CustomerId']} - {d['FirstName']} {d['LastName']}\")\n \nid = input (\"Digite o id desejado: \")\n\nurl2 = api+f\"/cliente/{id}\"\n\ndados = requests.get(url2).json()\n#print (json.dumps(dados, indent=4))\n\nif dados[0]['State'] is None:\n print (f\"{dados[0]['FirstName']} {dados[0]['LastName']} mora em {dados[0]['Address']}, {dados[0]['City']}, {dados[0]['Country']}\")\nelse:\n print (f\"{dados[0]['FirstName']} {dados[0]['LastName']} mora em {dados[0]['Address']}, {dados[0]['City']}, {dados[0]['State']}, {dados[0]['Country']}\")\n","repo_name":"mjoaojr/Sistemas-Distribuidos","sub_path":"2021-1/flask_client.py","file_name":"flask_client.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"pt","doc_type":"code","stars":8,"dataset":"github-code","pt":"81"} +{"seq_id":"16057201377","text":"import os\nimport sys\nimport pandas\nimport json\nimport argparse\n\ndef parseArgs():\n parser=argparse.ArgumentParser(description=\"Gathers completed imagelog files for a QFA experiment, concatenates them and writes files to appropriate IMAGELOGS directory. Should be executed from LOGS3 directory. Requires C2.json file (i.e. run C2Find before running C2Merge).\")\n parser.add_argument(\"exptID\", type=str, help=\"QFA experiment ID, e.g. QFA00001\")\n args = parser.parse_args()\n return(args)\n\ndef checkFile(f,verbose=True,deleteEmpty=False):\n if os.path.isfile(f):\n if sum(1 for line in open(f))<1:\n if verbose: print(f+\" EMPTY\")\n if deleteEmpty:\n print(\"Deleting \"+f)\n os.remove(f)\n return(False)\n else:\n if verbose: print(f+\" MISSING\")\n return(False)\n return(True)\n\ndef main():\n\targs=parseArgs()\n\t# Should execute this script from LOGS3 directory\n\trootDir=os.getcwd()\n\n\texpt=str(args.exptID)\n\texptType=expt[0:-4]\n\tdataDir=os.path.join(rootDir,exptType+\"_EXPERIMENTS\")\n\tdictOut=os.path.join(dataDir,expt,\"AUXILIARY\",expt+'_C2.json')\n\n\tprint(\"Reading in dictionary describing image locations\")\n\twith open(dictOut, 'rb') as fp:\n\t\tbarcDict = json.load(fp)\n\n\tprint(\"Generating expected output filenames for images\")\n\tbarcFiles=[item for sublist in barcDict.values() for item in sublist]\n\tbarcFiles.sort()\n\toutFiles=[os.path.join(os.path.dirname(f),\"Output_Data\",os.path.basename(f).split(\".\")[0]+\".out\") for f in barcFiles]\n\tdatFiles=[os.path.join(os.path.dirname(f),\"Output_Data\",os.path.basename(f).split(\".\")[0]+\".dat\") for f in barcFiles]\n\n\tfor i,f in enumerate(outFiles):\n outf=f\n datf=datFiles[i]\n checkout=checkFile(outf,deleteEmpty=True)\n checkdat=checkFile(datf,deleteEmpty=True)\n\n\tprint(\"Reading in expected output files\")\n\toutDFs=[pandas.read_csv(f,sep=\"\\t\") if (os.path.isfile(f) and sum(1 for line in open(f))>0) else pandas.DataFrame() for f in outFiles]\n\tdatDFs=[pandas.read_csv(f,sep=\"\\t\",header=None) if (os.path.isfile(f) and sum(1 for line in open(f))>0) else pandas.DataFrame() for f in datFiles]\n\n\tprint(\"Merging output files\")\n\toutDF=pandas.concat(outDFs)\n\tdatDF=pandas.concat(datDFs)\n\n\tprint(\"Archiving existing output files in IMAGELOGS directory\")\n\timlogs=os.path.join(dataDir,expt,\"IMAGELOGS\")\n\tfor f in os.listdir(imlogs):\n\t\tif f.endswith(\".out\") or f.endswith(\".dat\"):\n\t\t\tos.rename(os.path.join(imlogs,f),os.path.join(imlogs,f+\"_ARCHIVE\"))\n\n\tprint(\"Writing merged output to file\")\n\toutDF.to_csv(os.path.join(dataDir,expt,\"IMAGELOGS\",expt+\"_Concatenated.out\"),\"\\t\",index=False,header=True)\n\tdatDF.to_csv(os.path.join(dataDir,expt,\"IMAGELOGS\",expt+\"_Concatenated.dat\"),\"\\t\",index=False,header=False)\n\t\nif __name__ == '__main__':\n main()\n\n\n","repo_name":"CnrLwlss/HTSauto","sub_path":"HTSscripts/C2Merge.py","file_name":"C2Merge.py","file_ext":"py","file_size_in_byte":2809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4401627571","text":"import re\nfrom pygments.lexer import RegexLexer, words\nfrom pygments.token import *\n\nclass ATermLexer(RegexLexer):\n name = 'ATerm'\n aliases = ['aterm']\n filenames = ['*.aterm']\n\n tokens = {\n 'root': [\n (r'\"[^\"^\\n]*\"', Literal.String),\n (r'\\d+', Literal.Number),\n (r'[\\.\\,\\|\\[\\]\\(\\)\\{\\}]', Text.Punctuation),\n (r'\\s+', Text.Whitespace),\n (r'.', Text),\n ],\n }\n","repo_name":"metaborg/metaborg-pygments","sub_path":"metaborg/pygments/lexers/meta/aterm.py","file_name":"aterm.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"3013130279","text":"'''\nThis module is for pretty printing on the output\n'''\n_colors = {\n \"HEADER\" : '\\033[95m',\n \"OKBLUE\" : '\\033[94m',\n \"OKCYAN\" : '\\033[96m',\n \"OKGREEN\" : '\\033[92m',\n \"WARNING\" : '\\033[93m',\n \"FAIL\" : '\\033[91m',\n \"ENDC\" : '\\033[0m',\n \"BOLD\" : '\\033[1m',\n \"UNDERLINE\" : '\\033[4m'\n}\n\n\ndef human_readable(num, suffix=\"B\"):\n '''\n Takes an integer number of bytes and converts to a human readable form\n '''\n for unit in [\"\", \"Ki\", \"Mi\", \"Gi\", \"Ti\", \"Pi\", \"Ei\", \"Zi\"]:\n if abs(num) < 1024.0:\n return f\"{num:3.1f}{unit}{suffix}\"\n num /= 1024.0\n return f\"{num:.1f}Yi{suffix}\"\n\ndef color(s: str, color: str):\n '''\n Takes the text and wraps it in the chosen format\n '''\n return _colors[color] + s","repo_name":"wkrettek/youmirror","sub_path":"youmirror/printer.py","file_name":"printer.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"3654703347","text":"#!/usr/bin python -w\n\nimport random\nfrom scipy.spatial import KDTree\nimport numpy as np\nimport datetime\nimport os\nimport time\n\ncreator = None\ndef set_creator(cr):\n\tglobal creator\n\tcreator = cr\n\nimport pickle\n\nfrom deap import tools, base, algorithms\n\nfrom diversity_algorithms.algorithms.utils import *\nfrom diversity_algorithms.analysis.population_analysis import *\nfrom diversity_algorithms.analysis.data_utils import *\n\nfrom diversity_algorithms.algorithms.novelty_management import *\n\nimport alphashape\nfrom shapely.geometry import Point, Polygon, LineString\n\nimport jax\nfrom jax import numpy as jnp\nfrom diversity_algorithms.algorithms.jax_utils import *\n\n__all__=[\"novelty_ea\"]\n\nimport sys\n\ndef dist_to_shape(pp, s):\n\tp=Point(pp)\n\td=p.distance(s)\n\tif (d==0.0):\n\t\td=-p.distance(s.exterior)\n\treturn d\n\ndef dist_to_shapes(pp, ls):\n\tif (not hasattr(ls, '__iter__')):\n\t\tls=[ls] \n\tp=Point(pp)\n\timin=-1\n\tdmin=sys.float_info.max\n\tfor i in range(len(ls)):\n\t\td=p.distance(ls[i])\n\t\tif (d=params[\"eval_budget\"])): \n\t\t\tparams[\"nb_gen\"]=gen\n\t\t\tterminates=True\n\t\telse:\n\t\t\tterminates=False\n\n\t\tdump_data(population, gen, params, prefix=\"population\", attrs=[\"all\", \"dist_to_explored_area\", \"dist_to_parent\", \"rank_novelty\"], force=terminates)\n\t\tdump_data(population, gen, params, prefix=\"bd\", complementary_name=\"population\", attrs=[\"bd\"], force=terminates)\n\t\tdump_data(offspring, gen, params, prefix=\"bd\", complementary_name=\"offspring\", attrs=[\"bd\"], force=terminates)\n\t\tdump_data(archive.get_content_as_list(), gen, params, prefix=\"archive\", attrs=[\"all\"], force=terminates)\n\t\t\n\t\t# Update the statistics with the new population\n\t\t# record = params[\"stats\"].compile(population) if params[\"stats\"] is not None else {}\n\t\t# record_offspring = params[\"stats_offspring\"].compile(offspring) if params[\"stats_offspring\"] is not None else {}\n\t\t# logbook.record(gen=gen, nevals=len(invalid_ind), **record, **record_offspring)\n\t\t# if (verbosity(params)):\n\t\t#\t print(logbook.stream)\n\n\t\tfor ind in population:\n\t\t\tind.evolvability_samples=None\n\n\t\tif (terminates):\n\t\t\tbreak\n\t\t\t\n\t\t\t\n\treturn population, archive, logbook, nb_eval\n\n\n\n\n \nif (__name__=='__main__'):\n\tprint(\"Test of the Novelty-based ES\")\n\n\tprintf(\"TODO...\")\n","repo_name":"lincharles123/PROJET_ANDROIDE","sub_path":"diversity_algorithms_dev-master/diversity_algorithms/algorithms/novelty_search.py","file_name":"novelty_search.py","file_ext":"py","file_size_in_byte":11946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36234592697","text":"import sys\nimport numpy as np\nimport warnings\n\nif sys.version_info.major == 3 and sys.version_info.major < 8:\n from collections.abc import Iterable # < py38\nelse:\n from typing import Iterable\n\nimport maia.pytree.cgns_keywords as CGK\n\nfrom . import check\n\nCGNS_STR_SIZE = 32\n\ndef _flatten(items):\n \"\"\"Yield items from any nested iterable; see https://is.gd/gE6gjc \"\"\"\n for x in items:\n if isinstance(x, Iterable) and not isinstance(x, (str, bytes)):\n yield from _flatten(x)\n else:\n yield x\n\ndef _convert_value(value):\n \"\"\"\n Convert a Python input to a compliant pyCGNS value\n \"\"\"\n result = None\n if value is None:\n return result\n # value as a numpy: convert to fortran order\n if isinstance(value, np.ndarray):\n # print(f\"-> value as a numpy\")\n if value.flags.f_contiguous:\n result = value\n else:\n result = np.asfortranarray(value)\n # value as a single value\n elif isinstance(value, float): # \"R4\"\n # print(f\"-> value as float\")\n result = np.array([value],'f')\n elif isinstance(value, int): # \"I4\"\n # print(f\"-> value as int\")\n dtype = np.int32 if abs(value) < np.iinfo(np.int32).max else np.int64\n result = np.array([value], dtype)\n elif isinstance(value, str): # \"C1\"\n # print(f\"-> value as str with {CGK.cgns_to_dtype[CGK.C1]}\")\n result = np.array([c for c in value], CGK.cgns_to_dtype[CGK.C1])\n elif isinstance(value, CGK.dtypes):\n # print(f\"-> value as CGK.dtypes with {np.dtype(value)}\")\n result = np.array([value], np.dtype(value))\n # value as an iterable (list, tuple, set, ...)\n elif isinstance(value, Iterable):\n # print(f\"-> value as Iterable : {_flatten(value)}\")\n try:\n first_value = next(_flatten(value))\n if isinstance(first_value, float): # \"R4\"\n # print(f\"-> first_value as float\")\n result = np.array(value, dtype=np.float32, order='F')\n elif isinstance(first_value, int): # \"I4\"\n # print(f\"-> first_value as int\")\n max_val = max([abs(x) for x in _flatten(value)])\n dtype = np.int32 if max_val < np.iinfo(np.int32).max else np.int64\n result = np.array(value, dtype=dtype, order='F')\n elif isinstance(first_value, str): # \"C1\"\n # print(f\"-> first_value as with {CGK.cgns_to_dtype[CGK.C1]}\")\n # WARNING: string numpy is limited to rank=2\n assert max([len(v) for v in _flatten(value)]) <= CGNS_STR_SIZE\n size = CGNS_STR_SIZE\n if isinstance(value[0], str):\n v = np.empty( (size,len(value) ), dtype='c', order='F')\n for c, i in enumerate(value):\n s = min(len(i),size)\n v[:,c] = ' '\n v[0:s,c] = i[0:s]\n result = v\n else:\n v = np.empty( (size,len(value[0]),len(value) ), dtype='c', order='F')\n v[:,:,:] = ' '\n for c in range(len(value)):\n for d in range(len(value[c])):\n s = min(len(value[c][d]),size)\n v[0:s,d,c] = value[c][d][0:s]\n result = v\n elif isinstance(first_value, CGK.dtypes):\n result = np.array(value, dtype=np.dtype(first_value), order='F')\n except StopIteration:\n # empty iterable\n result = np.array(value, dtype=np.int32, order='F')\n else:\n # print(f\"-> value as unknown type\")\n result = np.array([value], order='F')\n return result\n\ndef _np_to_string(array):\n #Generic: 32 / taille de la premiere liste / nombre de listes\n if array.ndim == 1:\n return array.tobytes().decode().strip()\n elif array.ndim == 2:\n return [_np_to_string(array[:,i]) for i in range(array.shape[1])]\n elif array.ndim == 3:\n return [_np_to_string(array[:,:,i]) for i in range(array.shape[2])]\n raise ValueError(f\"Incorrect dimension for bytes array: {array.ndim}\")\n\ndef get_name(node):\n \"\"\" Return the name of the input CGNSNode \"\"\"\n return node[0]\n\ndef set_name(node, name):\n if check.is_valid_name(name, check_len=False):\n if not check.is_valid_name(name, check_len=True):\n warnings.warn(\"Setting a CGNS node name with a string longer than 32 char\", RuntimeWarning, stacklevel=2)\n node[0] = name\n else:\n raise ValueError(\"Unvalid name for node\")\n\ndef get_value(node, raw=False):\n \"\"\" Return the value of the input CGNSNode \"\"\"\n raw_val = node[1]\n if not raw and isinstance(raw_val, np.ndarray) and raw_val.dtype.kind == 'S':\n return _np_to_string(raw_val)\n else:\n return raw_val\n\ndef get_value_type(node):\n \"\"\" Return the value type of the input CGNSNode \"\"\"\n val = get_value(node, raw=True)\n if val is None:\n return 'MT'\n return CGK.dtype_to_cgns[val.dtype]\n\ndef get_value_kind(node):\n \"\"\" Return the value kind of the input CGNSNode \"\"\"\n val_type = get_value_type(node)\n if val_type != 'MT':\n val_type = val_type[0]\n return val_type\n\ndef set_value(node, value):\n node[1] = _convert_value(value)\n\ndef get_children(node):\n \"\"\" Return the list of children of the input CGNSNode \"\"\"\n return node[2]\n\ndef add_child(node, child):\n if child is None:\n return\n if get_name(child) in [get_name(n) for n in get_children(node)]:\n raise RuntimeError(f'Can not add child {child[0]} to node {node[0]}: a node with the same name already exists')\n node[2].append(child)\n\ndef rm_child(node, child):\n if child is None:\n return\n sub_nodes = get_children(node)\n for i, sub_node in enumerate(sub_nodes):\n if sub_node is child:\n break\n else:\n raise RuntimeError('Can not remove child : not found in node')\n sub_nodes.pop(i)\n\ndef set_children(node, children):\n children_bck = get_children(node)\n node[2] = []\n try:\n for child in children:\n add_child(node, child)\n except Exception as e:\n node[2] = children_bck\n raise e \n\ndef get_label(node):\n \"\"\" Return the label of the input CGNSNode \"\"\"\n return node[3]\n\ndef set_label(node, label):\n if check.is_valid_label(label, only_sids=False):\n if not check.is_valid_label(label, only_sids=True):\n warnings.warn(\"Setting a CGNS node label with a non sids label\", RuntimeWarning, stacklevel=2)\n node[3] = label\n else:\n raise ValueError(\"Unvalid label for node\")\n\ndef get_names(nodes):\n \"\"\" Return a list of name from a list of nodes \"\"\"\n return [get_name(node) for node in nodes]\n","repo_name":"onera/Maia","sub_path":"maia/pytree/node/access.py","file_name":"access.py","file_ext":"py","file_size_in_byte":6259,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"19278328339","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchtext.data import Field, BucketIterator, TabularDataset\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\nimport sys\nfrom konlpy.tag import Komoran\nimport re\nimport dill\n\ntagger = Komoran()\ntagger = tagger.morphs\n\nREVIEW = Field(use_vocab=True, lower=True, # init_token=\"\", eos_token=\"\",\n include_lengths=True, batch_first=True)\n\nwith open(\"data/REVIEW.Field\", \"rb\")as f:\n REVIEW = dill.load(f)\n\nV = len(REVIEW.vocab)\nD = 50\nH = 128\nH_f = 20 * 7\nnum_of_classes = 7\nda = 100 # 50차원으로 줄임.\nr = 5 # 5개의 attention 부분을 찾는다.\nnum_layers = 1\nnum_directions = 2\nbidirec = True\nbatch_size = 32\nLR = 0.005\nSTEP = 20\n\nmodel_path = 'model_attn/Comb_bytitle_cut_Komoran_class7_DropOut.model'\n\nUSE_CUDA = torch.cuda.is_available()\nDEVICE = 0 if USE_CUDA else -1\nbatch_size = 32\nsys.maxsize = 922337203\n\n\nclass bidirec_LSTM(nn.Module):\n def __init__(self, V, dim, H, H_f, num_of_classes,\n da, r, num_layers=3, bidirec=False, use_cuda=False):\n \"\"\"\n V: input_size = vocab_size\n dim: embedding_size\n H: hidden_size of LSTM\n H_f: hidden_size of FC\n num_of_classes (fully-connected)\n da: attenion_dimension (hyperparameter)\n r: keywords (different parts to be extracted from the sentence)\n \"\"\"\n super().__init__()\n\n self.Tag = [\"치과교정과\", \"치과보철과\", \"치과보존과\", \"치주과\", \"구강악안면외과\",\n \"구강내과\", \"소아치과\", \"구강병리과\", \"예방치과\", \"예방치의학과\", \"장애인치과\"]\n self.r = r\n self.da = da\n self.hidden_size = H\n self.num_layers = num_layers\n self.USE_CUDA = use_cuda\n\n if bidirec:\n self.num_directions = 2\n else:\n self.num_directions = 1\n\n # V개의 vocab을 dim차원으로 표현\n self.emb = nn.Embedding(V, dim)\n\n # dim차원을 입력으로 받음. / batch_first - input: (batch, n, dim)\n # bidirec : Bi-LSTM 여부\n self.lstm = nn.LSTM(dim, H, num_layers,\n batch_first=True, bidirectional=bidirec)\n\n # Attention Weights1 / input : (batch, n, 2H )\n self.attn_W1 = nn.Linear(self.num_directions * H, self.da, bias=False)\n self.tanh = nn.Tanh()\n\n # Attention Weights2 / input : (batch, n, da)\n self.attn_W2 = nn.Linear(self.da, self.r, bias=False)\n\n # input : (batch, n, r) 따라서 r에 softmax 적용.\n self.sftmax = nn.Softmax(dim=2)\n\n # M = AH : r x 2H\n\n self.fc = nn.Sequential(\n nn.Linear(r * H * self.num_directions, H_f),\n nn.ReLU(),\n # nn.BatchNorm1d(H_f),\n nn.Dropout(0.5),\n nn.Linear(H_f, num_of_classes),\n )\n\n def init_LSTM(self, batch_size):\n\n # (num_layers * num_directions, batch_size, hidden_size)\n hidden = torch.zeros(self.num_layers * self.num_directions,\n batch_size, self.hidden_size,\n requires_grad=True)\n cell = torch.zeros(self.num_layers * self.num_directions,\n batch_size, self.hidden_size,\n requires_grad=True)\n if self.USE_CUDA:\n hidden = hidden.cuda()\n cell = cell.cuda()\n return hidden, cell\n\n def penalization_term(self, A):\n # AxA_T : (batch_size, r, n) x (batch_size, n , r)\n # eye : batch size x r x r (AA_T size)로 확장\n eye = torch.eye(self.r, requires_grad=True).expand(A.size(0),\n self.r, self.r) # B, r, r\n if self.USE_CUDA:\n eye = eye.cuda()\n\n # (batch_size, r, r)\n P = torch.bmm(A, A.transpose(1, 2)) - eye\n loss_P = ((P ** 2).sum(1).sum(1) + 1e-10) ** 0.5\n loss_P = torch.sum(loss_P) / A.size(0)\n return loss_P\n\n def forward(self, inputs, inputs_lengths):\n \"\"\"\n inputs: batch_size, n(max_len), V\n - batch_size: batch_size\n - n: max_len\n - V: vocab_size\n inputs_lengths: length of each sentences\n \"\"\"\n # (batch_size, n, V) -> (batch_size, n, dim)\n embed = self.emb(inputs)\n # initial hidden state : (num_directions, batch_size, hidden_size)\n hidden, cell = self.init_LSTM(inputs.size(0))\n\n # 패딩된 문장을 패킹(패딩은 연산 안들어가도록)\n packed = pack_padded_sequence(embed, inputs_lengths.tolist(), batch_first=True)\n\n # packed (batch_size, n, dim) -> (batch , n , 2H) < n : seq_len >\n # hidden, cell: num_directions, batch_size, hidden_size\n output, (hidden, cell) = self.lstm(packed, (hidden, cell))\n\n # 패킹된 문장을 다시 unpack\n # output: (batch , n , 2H) < n : Max >\n output, output_lengths = pad_packed_sequence(output, batch_first=True)\n\n # By Attention (batch , n , 2H) -> (batch , n , da)\n tanh_v1 = self.tanh(self.attn_W1(output))\n\n # (batch , n , da) -> (batch, n, r)\n score = self.attn_W2(tanh_v1)\n self.A = self.sftmax(score.transpose(1, 2)) # (batch, r, n)\n\n # (batch, r, n) x (batch, n, 2H)-> (batch, r, 2H)\n self.M = self.A.bmm(output)\n\n # Penalization Term\n loss_P = self.penalization_term(self.A)\n\n # view : ( batch_size , r x 2H ) -> ( batch_size , num_of_classes )\n\n output = self.fc(self.M.view(self.M.size(0), -1))\n\n return output, loss_P\n\n def predict(self, inputs, inputs_lengths):\n preds, _ = self.forward(inputs, inputs_lengths)\n return F.softmax(preds, dim=1).argmax(dim=1)\n\n def showProb(self, inputText):\n self.eval()\n pattern = re.compile(r\"[^ \\n0-9A-Za-z가-힣]\")\n inputText = pattern.sub(\"\", inputText)\n # inputBag = torch.tensor([[REVIEW.vocab.stoi[key]\n # for key in\n # mecab.parse(inputText).split()[::2][:-1]]])\n inputBag = torch.tensor([[REVIEW.vocab.stoi[key]\n for key in\n tagger(inputText)]])\n lengths = torch.tensor([len(inputBag[0])])\n probs = F.softmax(self.forward(inputBag, lengths)[0], dim=1)\n # label = probs.argmax(dim = 1)\n\n dic = {}\n for tag, prob in zip(self.Tag, probs.tolist()[0]):\n dic[tag] = prob\n return dic\n\n\n# Load model\nmodel = bidirec_LSTM(V, D, H, H_f, num_of_classes, da, r, num_layers=num_layers, bidirec=bidirec, use_cuda=USE_CUDA)\nif USE_CUDA:\n model = model.cuda()\n model.load_state_dict(torch.load(model_path))\nelse:\n model.load_state_dict(torch.load(model_path, map_location='cpu'))\n\ndef predicttorch(inputText):\n inputText = inputText\n model.cpu()\n model.eval()\n return model.showProb(inputText)","repo_name":"milkyway103/chichiui","sub_path":"bidirec_LSTM.py","file_name":"bidirec_LSTM.py","file_ext":"py","file_size_in_byte":7000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10373029450","text":"'''\nCreated on Mar 23, 2021\n\n@author: Liam\n'''\nfrom util.math import fullRange\n\n\ndef hexDist(x, y):\n if x * y <= 0:\n return abs(x - y)\n else:\n return max(abs(x), abs(y))\n\n \ndef neighbors(x, y, d=1):\n return [(x + d, y), (x + d, y + d), (x, y + d), (x - d, y), (x - d, y - d), (x, y - d)]\n\n\n# def unit(u):\n# if u = \"x\": return (1,0,0)\n# if u\ndef xyz(x, y):\n# if x == 0 or y == 0:\n# return (x,y,0)\n# if y == x:\n# return (0,0,-x)\n if x >= 0:\n if y <= 0:\n return (x, y, 0)\n elif y <= x:\n return (x - y, 0, -y)\n else: # if y>x:\n return (0, y - x, -x)\n else:\n if y >= 0:\n return (x, y, 0)\n elif y >= x:\n return (x - y, 0, -y)\n else: # if y 0:\n if y < 0:\n return str(x) + \"x\" + str(y) + \"y\"\n if y < x:\n return str(x - y) + \"x\" + str(-y) + \"z\"\n if y > x:\n return str(y - x) + \"y\" + str(-x) + \"z\"\n else:\n if y > 0:\n return str(y) + \"y\" + str(x) + \"x\"\n if y > x:\n return str(-y) + \"z\" + str(x - y) + \"x\"\n if y < x:\n return str(-x) + \"z\" + str(y - x) + \"y\"\n\n\ndef parrallagram(x1, y1, x2, y2):\n (x, y, z) = xyz(x2 - x1, y2 - y1)\n# print(x,y,z)\n out = []\n if x == 0:\n for i in fullRange(0, y):\n for j in fullRange(0, z):\n out.append((x1 - j, y1 + i - j))\n# if y < 0:\n# for i in range(-y + 1):\n# for j in range (z + 1):\n# out.append((x1 - j, y1 - i - j))\n# else:\n# for i in range(y + 1):\n# for j in range (-z + 1):\n# out.append((x1 + j, y1 + i + j))\n \n elif y == 0:\n for i in fullRange(0, x):\n for j in fullRange(0, z):\n out.append((x1 + i - j, y1 - j))\n# if x < 0:\n# for i in range(-x + 1):\n# for j in range (z + 1):\n# out.append((x1 - i - j, y1 - j))\n# else:\n# for i in range(x + 1):\n# for j in range (-z + 1):\n# out.append((x1 + j, y1 + i + j))\n \n else:\n for i in fullRange(0, x):\n for j in fullRange(0, y):\n out.append((x1 + i, y1 + j))\n# if x < 0:\n# for i in range(-x + 1):\n# for j in range (y + 1):\n# out.append((x1 - i, y1 + j))\n# else:\n# for i in range(x + 1):\n# for j in range (-y + 1):\n# out.append((x1 + i, y1 - j))\n return out\n\n\n\n\ndef line(x1, y1, x2, y2):\n line = []\n # straight lines\n if x1 == x2:\n for y in fullRange(y1, y2):\n line.append((x1, y))\n elif y1 == y2:\n for x in fullRange(x1, x2):\n line.append((x, y1))\n elif y1 - x1 == y2 - x2:\n o = y1 - x1\n for x in fullRange(x1, x2):\n line.append((x, x + o))\n else:\n (x, y, z) = xyz(x2 - x1, y2 - y1)\n if x == 0:\n Zig = abs(y) - abs(z)\n if Zig == 1:\n pass\n \n# Zig = zig(x, y, z)\n# if -1<=Zig<=1:\n# pass\n return line\n# if x1 == x2:\n# \n# (x,y,z) = xyz(x2-x1,y2-y1)\n# u = max(abs(x),abs(y),abs(z))\n# if abs(x) == u:\n# if \n \n \nclass HexGrid:\n\n def __init__(self, size, span, baseTile=0):\n self.Y = 2 * size + 1\n self.X = self.Y + span\n self.grid = []\n self.entities = {}\n for x in range(self.X):\n self.grid.append([])\n app = self.grid[x].append\n for y in range(self.Y):\n if self.inBounds(x, y):\n app(baseTile)\n else:\n app(None)\n \n def setPos(self, e, x, y):\n if self.inBounds(x, y):\n self.entities[e] = (x, y)\n \n def remove(self, e):\n self.entities.pop(e)\n \n def inBounds(self, x, y):\n if x < 0 or y < 0 or y >= self.Y or x >= self.X or x - y >= self.X - self.Y / 2 or y - x >= self.Y / 2:\n return False\n return True\n \n def setTile(self, x, y, tile):\n if self.inBounds(x, y):\n self.forceTile(x, y, tile)\n \n def forceTile(self, x, y, tile):\n self.grid[x][y] = tile\n \n def draw(self, Set, tile):\n for (x, y) in Set:\n self.forceTile(x, y, tile)\n \n def drawParrallagram(self, x1, y1, x2, y2, tile):\n if self.inBounds(x1, y1) and self.inBounds(x2, y2):\n self.draw(parrallagram(x1, y1, x2, y2), tile)\n \n def getCirc(self, x, y, d):\n circ = []\n for i in range(self.X):\n for j in range(self.Y):\n if hexDist(i - x, j - y) <= d and self.inBounds(x, y):\n circ.append((i, j))\n return circ\n \n def getRing(self, x, y, d):\n pass\n \n def drawRing(self, x, y, d, tile):\n self.draw(self.getRing(x, y, d), tile)\n \n def drawLine(self, x1, y1, x2, y2, tile):\n self.draw(line(x1, y1, x2, y2), tile)\n \n# def dist(self, x1, y1, x2, y2):\n# return self.dist(x2 - x1, y2 - y1)\n\n\n# hexG = HexGrid(3, 2)\n# print(hexG.grid)\n# print(directions(7,9))\n# print(parrallagram(0, 4, 1, 0))\nprint(line(0, 3, 0, 0))\nprint(line(3, 0, 0, 0))\nprint(line(0, 3, 0, 3))\nprint(line(0, 0, 3, 3))\n","repo_name":"LiamVT2021/Game-Resources","sub_path":"src/hexGrid/hexBaseGrid.py","file_name":"hexBaseGrid.py","file_ext":"py","file_size_in_byte":6015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13826892788","text":"import pickle\nimport numpy as np\nimport pandas as pd\nimport random\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics import classification_report, confusion_matrix\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom prettytable import PrettyTable\n\ndef evaluate_embeddings():\n with open(\"gradient_descent.pkl\", 'rb') as f:\n gd = pickle.load(f) \n with open(\"gradient_descent_fixed.pkl\", 'rb') as f:\n gd_fixed = pickle.load(f) \n with open(\"gradient_descent_backtracking_line_search.pkl\", 'rb') as f:\n gd_ls = pickle.load(f) \n with open(\"adam.pkl\", 'rb') as f:\n adam = pickle.load(f) \n\n emb = [gd, gd_fixed, gd_ls, adam]\n\n test_words = [\"bath\", \"weather\", \"offensive\", \"ambassador\", \"divorced\", \"innocent\", \"suburban\", \"grand\"]\n\n t = PrettyTable(['Method', 'Word', '1', '2', '3', '4', '5'])\n\n for word in test_words:\n similarity = dict()\n for w, vec in gd.items():\n similarity.update({w: np.dot(gd[word], vec)/ (np.linalg.norm(gd[word])*np.linalg.norm(vec))})\n sim = list(similarity.items())\n sim = sorted(sim, key = lambda x: x[1], reverse = True)[1:6]\n sim = [(k, round(v, 3)) for k,v in sim]\n t.add_row([\"Gradient Desent\", word, sim[0], sim[1], \n sim[2], sim[3], sim[4]])\n\n for word in test_words:\n similarity = dict()\n for w, vec in gd_fixed.items():\n similarity.update({w: np.dot(gd_fixed[word], vec)/ (np.linalg.norm(gd_fixed[word])*np.linalg.norm(vec))})\n sim = list(similarity.items())\n sim = sorted(sim, key = lambda x: x[1], reverse = True)[1:6]\n sim = [(k, round(v, 3)) for k,v in sim]\n t.add_row([\"Gradient Desent - Fixed\", word, sim[0], sim[1], \n sim[2], sim[3], sim[4]]) \n\n for word in test_words:\n similarity = dict()\n for w, vec in gd_ls.items():\n similarity.update({w: np.dot(gd_ls[word], vec)/ (np.linalg.norm(gd_ls[word])*np.linalg.norm(vec))})\n sim = list(similarity.items())\n sim = sorted(sim, key = lambda x: x[1], reverse = True)[1:6]\n sim = [(k, round(v, 3)) for k,v in sim]\n t.add_row([\"Gradient Desent - Line Search\", word, sim[0], sim[1], \n sim[2], sim[3], sim[4]]) \n\n for word in test_words:\n similarity = dict()\n for w, vec in adam.items():\n similarity.update({w: np.dot(adam[word], vec)/ (np.linalg.norm(adam[word])*np.linalg.norm(vec))})\n sim = list(similarity.items())\n sim = sorted(sim, key = lambda x: x[1], reverse = True)[1:6]\n sim = [(k, round(v, 3)) for k,v in sim]\n t.add_row([\"Adam\", word, sim[0], sim[1], \n sim[2], sim[3], sim[4]]) \n\n print(t)\n\ndef get_knn_accuracy(input_gd, pos):\n input_gd = pd.DataFrame.from_records(input_gd)\n\n X_train, X_test, y_train, y_test = train_test_split(input_gd, pos, test_size=0.40, random_state = 122019)\n\n scaler = StandardScaler()\n scaler.fit(X_train)\n \n X_train = scaler.transform(X_train)\n X_test = scaler.transform(X_test)\n\n knn = KNeighborsClassifier(n_neighbors=6)\n knn.fit(X_train, y_train)\n\n y_pred = knn.predict(X_test)\n\n r = (y_pred == y_test)\n return len(r[r==True])/len(r)\n\n\ndef classification_task():\n with open(\"gradient_descent.pkl\", 'rb') as f:\n gd = pickle.load(f) \n with open(\"gradient_descent_fixed.pkl\", 'rb') as f:\n gd_fixed = pickle.load(f) \n with open(\"gradient_descent_backtracking_line_search.pkl\", 'rb') as f:\n gd_ls = pickle.load(f) \n with open(\"adam.pkl\", 'rb') as f:\n adam = pickle.load(f) \n\n words = [\"grand\", \"assault\", \"smile\", \"praise\", \"thanks\", \"win\", \"loser\", \"winner\", \"fight\", \"arrest\",\n \"happy\", \"mad\", \"upset\", \"unhappiest\", \"evil\", \"poor\", \"love\", \"lover\", \"like\", \"lovely\",\n \"bad\", \"good\", \"great\", \"honored\", \"dead\", \"impressive\", \"fail\", \"fear\", \"broken\", \"criminal\",\n \"unfair\", \"gross\", \"fun\", \"fair\", \"kind\", \"nice\", \"fine\", \"guilty\", \"hate\", \"ill\",\n \"health\", \"paradise\", \"perfect\", \"just\", \"wrong\", \"sunny\", \"rain\", \"special\", \"yes\", \"no\",\n \"old\", \"young\", \"kill\", \"murder\", \"wonderful\", \"game\", \"offensive\", \"quit\", \"accept\", \"deny\"]\n pos = [1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0,\n 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0]\n\n print(str(sum(pos)))\n\n input_gd = [gd[x] for x in words]\n input_gd_fixed = [gd_fixed [x] for x in words]\n input_gd_ls = [gd_ls[x] for x in words]\n input_adam = [adam[x] for x in words]\n \n emb = [input_gd, input_gd_fixed, input_gd_ls, input_adam]\n\n t = PrettyTable(['Method', 'Accuracy'])\n t.add_row([\"Gradient Descent\", get_knn_accuracy(input_gd, pos)])\n t.add_row([\"Gradient Descent - Fixed\", get_knn_accuracy(input_gd_fixed, pos)])\n t.add_row([\"Gradient Descent - Line Search\", get_knn_accuracy(input_gd_ls, pos)])\n t.add_row([\"Adam\", get_knn_accuracy(input_adam, pos)])\n print(t)\n\nif __name__ == \"__main__\":\n evaluate_embeddings()\n classification_task()","repo_name":"joseph-bongo-220/Word2Vec_Optimization_Project","sub_path":"evaluate_embeddings.py","file_name":"evaluate_embeddings.py","file_ext":"py","file_size_in_byte":5135,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"41154476186","text":"class Solution:\n def findKthLargest(self, nums: List[int], k: int) -> int:\n pq = nums[:k]\n heapq.heapify(pq)\n \n for x in nums[k:]:\n heapq.heappush(pq, x)\n heapq.heappop(pq)\n return pq[0]\n ","repo_name":"tejeshreddy/competitive-programming","sub_path":"215-kth-largest-element-in-an-array/215-kth-largest-element-in-an-array.py","file_name":"215-kth-largest-element-in-an-array.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"29697717845","text":"import random\r\n\r\n\r\ndef get_winners():\r\n \"\"\"Randomly gets winning numbers\"\"\"\r\n\r\n numbers = []\r\n\r\n # Randomly get 5 winning numbers\r\n for i in range(0, 5):\r\n if not numbers:\r\n number = random.randrange(1, 71)\r\n while number > 25:\r\n number = random.randrange(1, 71)\r\n else:\r\n numbers.append(number)\r\n else:\r\n numbers.append(random.randrange(numbers[i - 1], 71))\r\n\r\n # Randomly get multiplying ball\r\n numbers.append(random.randrange(1, 25))\r\n\r\n return numbers\r\n\r\n\r\ndef get_numbers(winning_numbers):\r\n \"\"\"Randomly get players numbers\"\"\"\r\n\r\n numbers = []\r\n jackpot = []\r\n count = 1\r\n\r\n # Keep loop going while list is empty (not a winner)\r\n while not jackpot:\r\n # Randomly get 5 numbers. Use enumeration to index numbers list, create conditions\r\n for i, h in enumerate(range(0, 5)):\r\n\r\n # If numbers list is empty (1st number) dont get a number over 40\r\n if not numbers:\r\n number = random.randrange(1, 71)\r\n while number > 25:\r\n number = random.randrange(1, 71)\r\n else:\r\n numbers.append(number)\r\n else:\r\n # While currently generated number is less than last/index number, get new number. Current index minus 1 (Using enumeration here)\r\n numbers.append(random.randrange(numbers[i - 1], 71))\r\n\r\n # Randomly get multiplying ball\r\n numbers.append(random.randrange(1, 25))\r\n\r\n print(f'Drawn Numbers: {\"-\".join([str(i) for i in numbers]):<20} | Winning Numbers: {\"-\".join([str(i) for i in winning_numbers])} | Play: {count} | Money Spent ($2 Each): {count * 2}')\r\n # Check players if players numbers match jackpot numbers. Break loop if winner\r\n if numbers == winning_numbers:\r\n jackpot.append(f'Matched All 6 - {\"-\".join([str(i) for i in numbers])}')\r\n break\r\n\r\n # If loop is not broken, redelcare numbers list, add one to games played\r\n numbers = []\r\n count += 1\r\n\r\n # Print jackpot\r\n print(f'Winner - {\"-\".join([str(i) for i in jackpot])}')\r\n input(\"Press enter to close\")\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n generate_winning_numbers = get_winners()\r\n print(generate_winning_numbers)\r\n get_numbers(generate_winning_numbers)\r\n","repo_name":"cober2019/FunWithLottery","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35682289886","text":"# Import api related packages\nfrom fastapi import FastAPI, Request, Depends, BackgroundTasks\nfrom fastapi.templating import Jinja2Templates\nimport uvicorn \nimport requests\nfrom datetime import datetime\nfrom dateutil.relativedelta import relativedelta\nfrom fastapi.encoders import jsonable_encoder\nfrom fastapi.responses import JSONResponse\nfrom typing import List\nfrom preprocessing_api import clean_weather_data\n\n\n# Import database packages \nfrom sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.orm import Session\nfrom database import SessionLocal, engine\nfrom pydantic import BaseModel\n\n# Import ML model related packages\nimport pandas as pd\nimport joblib, os\nimport engineer_features\n\n\nxbg = joblib.load(\"pretrained_model/xgb_final.pkl\")\n\napp = FastAPI()\n\n\ntemplates = Jinja2Templates(directory = \"static/templates\")\n\n\n# Define return class from prediction:\nclass Prediction(BaseModel):\n city: str\n region: str\n lat: str\n lon: str\n datetime: List[datetime]\n water_stress: List[float]\n Error: str\n\n\n# Launching database\n# def get_db():\n# try:\n# db = SessionLocal()\n# yield db\n# finally:\n# db.close()\n\n# Get location based on client ip-address\ndef get_location(ip_address: str):\n \"\"\"\n Args: \n Client's IP address\n Output:\n Client's geolocation\n \"\"\"\n try:\n response = requests.get(\"http://ip-api.com/json/{}\".format(ip_address))\n js = response.json()\n region = js['region']\n city = js['city']\n lat = js['lat']\n lon = js['lon']\n return region, city, lat, lon\n except Exception as e:\n return \"Unknown\"\n\n# Get location weather based on client's geolocation \ndef get_weather(lat, lon, lag=23, stride=1):\n \"\"\"\n Args: \n lat, lon: the cooridinate of the geolocation\n stride: the stride at which moving average is taken, default is 4\n lag: the total weather history to consider in unit of hours, default is 24\n Output:\n feature matrix ready for model inference\n the datetime index of the feature maxtrix\n \"\"\"\n # Mesonet API calls, the following code fetches weather data at given location. API keys are hidden...\n # apitoken = ***************\n # radius=str(lat)+','+str(lon)+\",100&limit=1\" \n # variables ='air_temp,relative_humidity,wind_speed,solar_radiation,precip_accum_one_hour'\n # theurl='https://api.synopticdata.com/v2/stations/timeseries?'+'radius='+radius+'recent=120&vars='+variables+'&token='+apitoken\n # theurl='https://api.synopticdata.com/v2/stations/timeseries?stid=kslc'+'&recent=120'+'&token='+apitoken\n # data = requests.get(theurl)\n # json_dict = data.json()\n # req_data = json_dict['STATION'][0]['OBSERVATIONS']\n # df = pd.DataFrame(req_data)\n # df=df.set_index('date_time')\n\n # If we pay for the MeteoBlue history+ and forecast+, we can fetch soil data, Free API is not available, This part is incomplete\n # Use available data stored locally for demostration purpose:\n data = clean_weather_data(\"lib/Napa_weather_data_test.csv\")\n\n # Here, due to the lack of availability of soil data, we used the data from previous year\n # By default, fetch one day history to make inference at next hour. \n start_date = pd.Timestamp.now().round('60min')+relativedelta(hours=-lag)+relativedelta(hours=-stride)+relativedelta(years=-1) \n end_date = pd.Timestamp.now().round('60min')+relativedelta(years=-1)\n hour_range = pd.date_range(start_date, end_date, freq='H')\n data = data[start_date: end_date]\n X_fe = engineer_features.feat_eng(data)\n return X_fe, hour_range\n\ndef get_forecast(lat, lon, period, lag=23, stride=1):\n \"\"\"\n Args: \n lat, lon: the cooridinate of the geolocation\n period: forecast requested by client in unit of hours \n stride: the stride at which moving average is taken, default is 4\n lag: the total weather history to consider in unit of hours, default is 24\n Output:\n weather forecast data from weather api up to forecast period\n \"\"\"\n\n # Note here since I did not pay for forecasting service on the training data, only historical data is used. \n data = clean_weather_data(\"lib/Napa_weather_data_test.csv\")\n start_date = pd.Timestamp.now().round('60min')+relativedelta(hours=-lag)+relativedelta(hours=-stride)+relativedelta(years=-1)\n end_date = pd.to_datetime(start_date)+pd.DateOffset(hours=period)\n data = data[start_date: end_date]\n hour_range = pd.date_range(start_date, end_date, freq='H')\n X_fe = engineer_features.feat_eng(data)\n return X_fe, hour_range\n\n\n@app.get(\"/\")\ndef home(request: Request): \n \"\"\"\n displays the mainpage of the waterUp api with pictures and graphs for water stress \n \"\"\"\n ip_address = request.client.host\n try: \n (region, city, lat, lon) = get_location(ip_address)\n except ValueError as VE:\n return templates.TemplateResponse(\"home.html\", {\n \"request\": request,\n \"water_stress\": \"Unknown location\", \n \"ip_address\": \"None\",\n \"region\": \"None\",\n \"city\": \"None\",\n \"Error\": \"Unknown\" \n })\n data, hour_range= get_weather(lat, lon)\n prediction = xbg.predict(data)\n water_stress=prediction[-1]\n\n return templates.TemplateResponse(\"home.html\", {\n \"request\": request,\n \"water_stress\": water_stress, \n \"ip_address\": ip_address,\n \"region\": region,\n \"city\": city,\n \"Error\": \"None\"\n })\n\n@app.get(\"/forecast/period={period}\")\nasync def get_prediction(request: Request, period:int):\n ip_address = request.client.host\n try: \n (region, city, lat, lon) = get_location(ip_address)\n data, date_range = get_forecast(lat, lon, period)\n predictions = xbg.predict(data)\n re_dict = Prediction(\n city=city,\n region=region,\n lat=str(lat),\n lon=str(lon),\n datetime=date_range.tolist(),\n water_stress=predictions.tolist(),\n Error=\"None\"\n )\n json_re_dict=jsonable_encoder(re_dict)\n except ValueError as VE:\n re_dict = Prediction(\n city=\"None\",\n region=\"None\",\n lat=\"None\",\n lon=\"None\",\n datetime=[],\n water_stress=[],\n Error = \"Location not found\"\n )\n json_re_dict=jsonable_encoder(re_dict)\n return json_re_dict\n\n\nif __name__ == \"__main__\":\n uvicorn.run(app, host=\"0.0.0.0\", port=8000)","repo_name":"hl943/Insight_DS_project","sub_path":"api/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4218871669","text":"from django.conf.urls import url\nfrom rest_framework import routers\nfrom projects.views import ProjectMetaCreateListView, ProjectMetaUpdateView, DataListView\n\n\nurlpatterns = [\n url(r'^api/projects/$',\n ProjectMetaCreateListView.as_view(),\n name='project_metas'),\n url(r'^api/projects/(?P[0-9]+)/$',\n ProjectMetaUpdateView.as_view(),\n name='project_meta'),\n url(r'^api/project-(?P[0-9]+)/project_dataset_inter/$',\n DataListView.as_view(),\n name='project_dataset_inter'),\n]\n","repo_name":"yty-7/bb_platform","sub_path":"bb_platform/projects/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71419282184","text":"import string\nfrom pycorenlp import StanfordCoreNLP\nfrom mung.data import DatumReference\nfrom mung.nlp.annotation import Tokens, PoS, Lemmas, Sentences, Strings, Annotator\n\nTYPE_ANNOTATIONS = \"CoreNLPAnnotations\"\n\nSTANFORD_NLP_PORT = 9000\n\nclass CoreNLPAnnotations:\n KEY_TYPE = \"type\"\n KEY_TOKENS = \"tokens\"\n KEY_POS = \"pos\"\n KEY_LEMMAS = \"lemmas\"\n KEY_SENTENCES = \"sents\"\n KEY_CLEAN_STRINGS = \"clean_strs\"\n KEY_TOKEN_STRINGS = \"token_strs\"\n\n def __init__(self, tokens=None, pos=None, lemmas=None, sentences=None, clean_strs=None, token_strs=None):\n self._tokens = tokens\n self._pos = pos\n self._lemmas = lemmas\n self._sentences = sentences\n self._clean_strs = clean_strs\n self._token_strs = token_strs\n\n def to_dict(self):\n obj = dict()\n obj[self.KEY_TYPE] = TYPE_ANNOTATIONS\n\n if self._tokens is not None:\n obj[self.KEY_TOKENS] = self._tokens.to_dict()\n if self._pos is not None:\n obj[self.KEY_POS] = self._pos.to_dict()\n if self._lemmas is not None:\n obj[self.KEY_LEMMAS] = self._lemmas.to_dict()\n if self._sentences is not None:\n obj[self.KEY_SENTENCES] = self._sentences.to_dict()\n if self._clean_strs is not None:\n obj[self.KEY_CLEAN_STRINGS] = self._clean_strs.to_dict()\n if self._token_strs is not None:\n obj[self.KEY_TOKEN_STRINGS] = self._token_strs.to_dict()\n\n return obj\n\n @staticmethod\n def from_dict(datum, obj):\n if not isinstance(obj, dict) or self.KEY_TYPE not in obj or obj[self.KEY_TYPE] != TYPE_ANNOTATIONS:\n return None\n\n tokens = None\n if self.KEY_TOKENS in obj:\n tokens = Tokens.from_dict(datum, obj[self.KEY_TOKENS])\n\n pos = None\n if self.KEY_POS in obj:\n pos = PoS.from_dict(datum, obj[self.KEY_POS])\n\n lemmas = None\n if self.KEY_LEMMAS in obj:\n lemmas = Lemmas.from_dict(datum, obj[self.KEY_LEMMAS])\n\n sentences = None\n if self.KEY_SENTENCES in obj:\n sentences = Sentences.from_dict(datum, obj[self.KEY_SENTENCES])\n\n clean_strs = None\n if self.KEY_CLEAN_STRINGS in obj:\n clean_strs = Strings.from_dict(datum, obj[self.KEY_CLEAN_STRINGS])\n\n token_strs = None\n if self.KEY_TOKEN_STRINGS in obj:\n token_strs = Strings.from_dict(datum, obj[self.KEY_TOKEN_STRINGS])\n\n return CoreNLPAnnotations(tokens=tokens, pos=pos, lemmas=lemmas, sentences=sentences, clean_strs=clean_strs, token_strs=token_strs)\n\n\nclass CoreNLPAnnotator(Annotator):\n def __init__(self, target_path, target_key, store_key):\n Annotator.__init__(self, target_path, target_key, store_key)\n self._nlp = StanfordCoreNLP('http://localhost:{}'.format(STANFORD_NLP_PORT))\n self._printable = set(string.printable)\n\n def __str__(self):\n return \"corenlp-3.6+\"\n\n def _annotate_in_place(self, datum):\n targets = datum.get(self._target_path, first=False, include_paths=True)\n for (target_path, target) in targets:\n text = str(filter(lambda x: x in self._printable, target[self._target_key]))\n annos = self._annotate_text(datum, text, target_path)\n obj = annos.to_dict()\n datum.set(self._store_key, obj, path=target_path)\n return datum\n\n # Borrowed from https://github.com/futurulus/coop-nets/blob/master/behavioralAnalysis/tagPOS.ipynb\n def _annotate_text(self, datum, text, target_path):\n try:\n text_ref = DatumReference(datum, target_path + \".\" + self._target_key)\n tokens_ref = DatumReference(datum, target_path + \".\" + self._store_key + \".\" + CoreNLPAnnotations.KEY_TOKENS)\n if text.strip() == '':\n tokens = Tokens(text_ref, spans=[])\n pos = PoS(tokens_ref, [])\n lemmas = Lemmas(tokens_ref, [])\n sentences = Sentences(tokens_ref, [])\n clean_strs = Strings(tokens_ref, [])\n return CoreNLPAnnotations(tokens=tokens,pos=pos,lemmas=lemmas,sentences=sentences,clean_strs=clean_strs)\n\n ann = self._nlp.annotate(\n text,\n properties={'annotators': 'pos,lemma',\n 'outputFormat': 'json'})\n sent_spans = []\n spans = []\n lemma_strs = []\n pos_strs = []\n clean_strs = []\n token_strs = []\n if isinstance(ann, basestring):\n ann = json.loads(ann.replace('\\x00', '?').encode('latin-1'), encoding='utf-8', strict=True)\n\n token_index = 0\n for sentence in ann['sentences']:\n for token in sentence['tokens']:\n spans.append((token[\"characterOffsetBegin\"], token[\"characterOffsetEnd\"]))\n lemma_strs.append(token['lemma'])\n pos_strs.append(token['pos'])\n\n token_text = text[token[\"characterOffsetBegin\"]:token[\"characterOffsetEnd\"]]\n token_strs.append(token_text)\n\n token_text = token_text.lower()\n if token_text.endswith(\"er\"):\n clean_strs.append(token_text[:-2])\n clean_strs.append(\"-er\")\n elif token_text.endswith(\"est\"):\n clean_strs.append(token_text[:-3])\n clean_strs.append(\"-est\")\n elif token_text.endswith(\"ish\"):\n clean_strs.append(token_text[:-3])\n clean_strs.append(\"-ish\")\n else:\n clean_strs.append(token_text)\n\n sent_spans.append((token_index, token_index + len(sentence['tokens'])))\n token_index += len(sentence['tokens'])\n\n tokens = Tokens(text_ref, spans=spans)\n pos = PoS(tokens_ref, pos_strs)\n lemmas = Lemmas(tokens_ref, lemma_strs)\n sentences = Sentences(tokens_ref, sent_spans)\n clean_strs = Strings(tokens_ref, clean_strs)\n token_strs = Strings(tokens_ref, token_strs)\n\n return CoreNLPAnnotations(tokens=tokens, pos=pos, lemmas=lemmas, sentences=sentences, clean_strs=clean_strs, token_strs=token_strs)\n except Exception as e:\n raise\n","repo_name":"forkunited/mungpy","sub_path":"src/main/py/mung/nlp/corenlp.py","file_name":"corenlp.py","file_ext":"py","file_size_in_byte":6401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73263025864","text":"import unittest\nimport mock\nfrom usquam.resources.task.service import Service\nfrom mongoengine import ValidationError, DoesNotExist\n\nclass TaskServiceTest(unittest.TestCase):\n\n def setUp(self):\n self.service = Service()\n\n @mock.patch(\"usquam.resources.task.service.Task\")\n def test_insert(self, mock_task):\n \"insert should call the save method\"\n self.service.insert({\"name\": \"Hello\"})\n\n self.assertEqual(mock_task.return_value.name, \"Hello\")\n self.assertTrue(mock_task.return_value.save.called)\n\n def test_insert_name_too_long(self):\n \"insert should fail because the name is too long\"\n name = \"Hellodhsfkjahsdfljhaslkdjfhlkashdfasdhflkjahsl\"\n\n task = {\n 'name': name,\n 'requester_id': '1',\n 'time_indication': '1',\n 'reward': '1'\n }\n\n with self.assertRaises(ValidationError) as context:\n self.service.insert(task)\n\n self.assertTrue('String value is too long' in str(context.exception))\n\n @mock.patch(\"usquam.resources.task.service.Task\")\n def test_get(self, mock_task):\n \"get should return the retrieved item\"\n mock_task.objects.get.return_value = {'name': 'Hello'}\n item = self.service.get(\"1\")\n\n self.assertEqual(item, {\"name\":\"Hello\"})\n\n def test_get_wrong_id(self):\n \"get should fail on wrong id\"\n with self.assertRaises(DoesNotExist) as context:\n self.service.get(\"4f4381f4e779897a2c000009\")\n\n @mock.patch(\"usquam.resources.task.service.Task\")\n def test_getAll(self, mock_task):\n \"get should return the retrieved item\"\n mock_task.objects = [{'name': 'Hello'}, {'name': 'World'}]\n items = self.service.getAll()\n\n self.assertEqual(items, [{'name': 'Hello'}, {'name': 'World'}])","repo_name":"Ekula/uSquam_backend","sub_path":"test/unit/resources/test_task_service.py","file_name":"test_task_service.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72792255944","text":"from collections import Counter\nimport re\n\nclass collectionHandling(object): \n def stringToCountedCollection(self, stringItem, exclusionList): \n stringAsListOfWords = re.findall(r'\\w+', stringItem) \n collectionOfIndividualItems = Counter(stringAsListOfWords)\n\n for word in exclusionList:\n if word in collectionOfIndividualItems:\n del collectionOfIndividualItems[word]\n \n return collectionOfIndividualItems","repo_name":"goatonabicycle/python-lyrics-analisys","sub_path":"_collectionHandling.py","file_name":"_collectionHandling.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"34254935709","text":"# -*- coding: utf-8 -*-\n\"\"\"\nShow the effect a rotating spot has on an absorption line.\n\n\"\"\"\nfrom utils import patch_theano\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport starry\nimport paths\n\n# Get the Ylm expansion of a Gaussian spot\nydeg = 20\nN = (ydeg + 1) ** 2\nspot_map = starry.Map(ydeg, lazy=False)\nspot_map.spot(contrast=0.95, radius=20, lat=30, lon=0, spot_smoothing=0.125)\ny = spot_map[:, :].reshape(-1)\n\n# Generate the dataset\nveq = 60000.0 # m/s\nnt = 16\ntheta = np.append([-180], np.linspace(-90, 90, nt - 1))\nmap = starry.DopplerMap(\n ydeg, veq=veq, vsini_max=veq, inc=90, nt=nt, lazy=False\n)\nmap[:, :] = y\nmap.spectrum = 1.0 - np.exp(-0.5 * (map.wav0 - 643.0) ** 2 / 0.0085 ** 2)\nflux = map.flux(theta=theta)\nF0 = flux[0]\nF = flux[1:]\n\n# Render the images\nimg = map.render(theta=theta[1:], res=300)\nassert np.nanmin(img) > 0\n\n# Set up the plot\nfig, ax = plt.subplots(nt - 1, 3, figsize=(6, 12))\nfig.subplots_adjust(hspace=0.3)\ncmap = plt.get_cmap(\"plasma\")\nvmin = np.nanmin(img)\nvmax = np.nanmax(img)\nrng = vmax - vmin\nvmin -= 0.1 * rng\nvmax += 0.1 * rng\n\n# Plot each spectrum\nfor t in range(nt - 1):\n\n # Plot spectrum\n ax[t, 1].plot(map.wav, F0, \"k:\", lw=1, alpha=0.5)\n ax[t, 1].plot(map.wav, F[t], \"k-\")\n ax[t, 1].axis(\"off\")\n\n # Plot residuals\n color = [cmap(x) for x in np.linspace(0.75, 0.0, 5)]\n lw = np.linspace(2.5, 0.5, 5)\n alpha = np.linspace(0.25, 1, 5)\n for i in range(5):\n ax[t, 2].plot(\n map.wav,\n F[t] - F0,\n ls=\"-\",\n lw=lw[i],\n color=color[i],\n alpha=alpha[i],\n )\n ax[t, 2].axis(\"off\")\n ax[t, 2].set_ylim(-0.022, 0.022)\n\n # Plot current stellar image\n ax[t, 0].imshow(\n img[t],\n origin=\"lower\",\n extent=(-1, 1, -1, 1),\n cmap=cmap,\n vmin=vmin,\n vmax=vmax,\n )\n x = np.linspace(-1, 1, 3000)\n y = np.sqrt(1 - x ** 2)\n ax[t, 0].plot(0.999 * x, 0.999 * y, \"k-\", lw=0.5, zorder=100)\n ax[t, 0].plot(0.999 * x, -0.999 * y, \"k-\", lw=0.5, zorder=100)\n ax[t, 0].set_xlim(-3, 1.05)\n ax[t, 0].set_ylim(-1.05, 1.05)\n ax[t, 0].axis(\"off\")\n\nax[0, 1].set_title(\"spectrum\", y=1.4)\nax[0, 2].set_title(\"difference\", y=1.4)\nfig.savefig(paths.figures / \"spot.pdf\", bbox_inches=\"tight\", dpi=300)\n","repo_name":"rodluger/paparazzi","sub_path":"src/scripts/spot.py","file_name":"spot.py","file_ext":"py","file_size_in_byte":2300,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"21150339609","text":"from __future__ import print_function\nfrom _adb import Adb\nfrom _benchresult import BenchResult\nfrom _hardware import HardwareException, Hardware\nfrom argparse import ArgumentParser\nfrom multiprocessing import Queue\nfrom threading import Thread, Timer\nimport collections\nimport glob\nimport math\nimport re\nimport subprocess\nimport sys\nimport time\n\n__argparse = ArgumentParser(description=\"\"\"\n\nExecutes the skpbench binary with various configs and skps.\n\nAlso monitors the output in order to filter out and re-run results that have an\nunacceptable stddev.\n\n\"\"\")\n\n__argparse.add_argument('skpbench',\n help=\"path to the skpbench binary\")\n__argparse.add_argument('--adb',\n action='store_true', help=\"execute skpbench over adb\")\n__argparse.add_argument('--adb_binary', default='adb',\n help=\"The name of the adb binary to use.\")\n__argparse.add_argument('-s', '--device-serial',\n help=\"if using adb, ID of the specific device to target \"\n \"(only required if more than 1 device is attached)\")\n__argparse.add_argument('-m', '--max-stddev',\n type=float, default=4,\n help=\"initial max allowable relative standard deviation\")\n__argparse.add_argument('-x', '--suffix',\n help=\"suffix to append on config (e.g. '_before', '_after')\")\n__argparse.add_argument('-w','--write-path',\n help=\"directory to save .png proofs to disk.\")\n__argparse.add_argument('-v','--verbosity',\n type=int, default=1, help=\"level of verbosity (0=none to 5=debug)\")\n__argparse.add_argument('-d', '--duration',\n type=int, help=\"number of milliseconds to run each benchmark\")\n__argparse.add_argument('-l', '--sample-ms',\n type=int, help=\"duration of a sample (minimum)\")\n__argparse.add_argument('--gpu',\n action='store_true',\n help=\"perform timing on the gpu clock instead of cpu (gpu work only)\")\n__argparse.add_argument('--fps',\n action='store_true', help=\"use fps instead of ms\")\n__argparse.add_argument('--pr',\n help=\"comma- or space-separated list of GPU path renderers, including: \"\n \"[[~]all [~]default [~]dashline [~]msaa [~]aaconvex \"\n \"[~]aalinearizing [~]small [~]tess]\")\n__argparse.add_argument('--cc',\n action='store_true', help=\"allow coverage counting shortcuts to render paths\")\n__argparse.add_argument('--nocache',\n action='store_true', help=\"disable caching of path mask textures\")\n__argparse.add_argument('--allPathsVolatile',\n action='store_true',\n help=\"Causes all GPU paths to be processed as if 'setIsVolatile' had been called.\")\n__argparse.add_argument('-c', '--config',\n default='gl', help=\"comma- or space-separated list of GPU configs\")\n__argparse.add_argument('-a', '--resultsfile',\n help=\"optional file to append results into\")\n__argparse.add_argument('--ddl',\n action='store_true', help=\"record the skp into DDLs before rendering\")\n__argparse.add_argument('--lock-clocks',\n action='store_true', help=\"Put device in benchmarking mode (locked clocks, no other processes)\")\n__argparse.add_argument('--clock-speed',\n type=float, default=66.0, help=\"A number between 0 and 100 indicating how fast to lock the CPU and GPU clock.\"\n \"Valid speeds are chosen from their respective available frequencies list.\")\n__argparse.add_argument('--ddlNumRecordingThreads',\n type=int, default=0,\n help=\"number of DDL recording threads (0=num_cores)\")\n__argparse.add_argument('--ddlTilingWidthHeight',\n type=int, default=0, help=\"number of tiles along one edge when in DDL mode\")\n__argparse.add_argument('--dontReduceOpsTaskSplitting',\n action='store_true', help=\"don't reorder GPU tasks to reduce render target swaps\")\n__argparse.add_argument('--gpuThreads',\n type=int, default=-1,\n help=\"Create this many extra threads to assist with GPU work, including\"\n \" software path rendering. Defaults to two.\")\n__argparse.add_argument('--internalSamples',\n type=int, default=-1,\n help=\"Number of samples for internal draws that use MSAA.\")\n__argparse.add_argument('srcs',\n nargs='+',\n help=\".skp files or directories to expand for .skp files, and/or .svg files\")\n__argparse.add_argument('--gpuResourceCacheLimit',\n type=int, default=-1,\n help=\"Maximum number of bytes to use for budgeted GPU resources.\")\n\nFLAGS = __argparse.parse_args()\nif FLAGS.adb:\n import _adb_path as _path\n _path.init(FLAGS.device_serial, FLAGS.adb_binary)\nelse:\n import _os_path as _path\n\ndef dump_commandline_if_verbose(commandline):\n if FLAGS.verbosity >= 5:\n quoted = ['\\'%s\\'' % re.sub(r'([\\\\\\'])', r'\\\\\\1', x) for x in commandline]\n print(' '.join(quoted), file=sys.stderr)\n\n\nclass StddevException(Exception):\n pass\n\nclass Message:\n READLINE = 0,\n POLL_HARDWARE = 1,\n EXIT = 2\n def __init__(self, message, value=None):\n self.message = message\n self.value = value\n\nclass SubprocessMonitor(Thread):\n def __init__(self, queue, proc):\n self._queue = queue\n self._proc = proc\n Thread.__init__(self)\n\n def run(self):\n \"\"\"Runs on the background thread.\"\"\"\n for line in iter(self._proc.stdout.readline, b''):\n self._queue.put(Message(Message.READLINE, line.decode('utf-8').rstrip()))\n self._queue.put(Message(Message.EXIT))\n\nclass SKPBench:\n ARGV = [FLAGS.skpbench, '--verbosity', str(FLAGS.verbosity)]\n if FLAGS.duration:\n ARGV.extend(['--duration', str(FLAGS.duration)])\n if FLAGS.sample_ms:\n ARGV.extend(['--sampleMs', str(FLAGS.sample_ms)])\n if FLAGS.gpu:\n ARGV.extend(['--gpuClock', 'true'])\n if FLAGS.fps:\n ARGV.extend(['--fps', 'true'])\n if FLAGS.pr:\n ARGV.extend(['--pr'] + re.split(r'[ ,]', FLAGS.pr))\n if FLAGS.cc:\n ARGV.extend(['--cc', 'true'])\n if FLAGS.nocache:\n ARGV.extend(['--cachePathMasks', 'false'])\n if FLAGS.allPathsVolatile:\n ARGV.extend(['--allPathsVolatile', 'true'])\n if FLAGS.gpuThreads != -1:\n ARGV.extend(['--gpuThreads', str(FLAGS.gpuThreads)])\n if FLAGS.internalSamples != -1:\n ARGV.extend(['--internalSamples', str(FLAGS.internalSamples)])\n\n # DDL parameters\n if FLAGS.ddl:\n ARGV.extend(['--ddl', 'true'])\n if FLAGS.ddlNumRecordingThreads:\n ARGV.extend(['--ddlNumRecordingThreads',\n str(FLAGS.ddlNumRecordingThreads)])\n if FLAGS.ddlTilingWidthHeight:\n ARGV.extend(['--ddlTilingWidthHeight', str(FLAGS.ddlTilingWidthHeight)])\n\n if FLAGS.dontReduceOpsTaskSplitting:\n ARGV.extend(['--dontReduceOpsTaskSplitting'])\n\n if FLAGS.gpuResourceCacheLimit:\n ARGV.extend(['--gpuResourceCacheLimit', str(FLAGS.gpuResourceCacheLimit)])\n\n if FLAGS.adb:\n if FLAGS.device_serial is None:\n ARGV[:0] = [FLAGS.adb_binary, 'shell']\n else:\n ARGV[:0] = [FLAGS.adb_binary, '-s', FLAGS.device_serial, 'shell']\n\n @classmethod\n def get_header(cls, outfile=sys.stdout):\n commandline = cls.ARGV + ['--duration', '0']\n dump_commandline_if_verbose(commandline)\n out = subprocess.check_output(commandline, stderr=subprocess.STDOUT, encoding='utf-8')\n return out.rstrip()\n\n @classmethod\n def run_warmup(cls, warmup_time, config):\n if not warmup_time:\n return\n print('running %i second warmup...' % warmup_time, file=sys.stderr)\n commandline = cls.ARGV + ['--duration', str(warmup_time * 1000),\n '--config', config,\n '--src', 'warmup']\n dump_commandline_if_verbose(commandline)\n output = subprocess.check_output(commandline, stderr=subprocess.STDOUT, encoding='utf-8')\n\n # validate the warmup run output.\n for line in output.split('\\n'):\n match = BenchResult.match(line.rstrip())\n if match and match.bench == 'warmup':\n return\n raise Exception('Invalid warmup output:\\n%s' % output)\n\n def __init__(self, src, config, max_stddev, best_result=None):\n self.src = src\n self.config = config\n self.max_stddev = max_stddev\n self.best_result = best_result\n self._queue = Queue()\n self._proc = None\n self._monitor = None\n self._hw_poll_timer = None\n\n def __enter__(self):\n return self\n\n def __exit__(self, exception_type, exception_value, traceback):\n if self._proc:\n self.terminate()\n if self._hw_poll_timer:\n self._hw_poll_timer.cancel()\n\n def execute(self, hardware):\n hardware.sanity_check()\n self._schedule_hardware_poll()\n\n commandline = self.ARGV + ['--config', self.config,\n '--src', self.src,\n '--suppressHeader', 'true']\n if FLAGS.write_path:\n pngfile = _path.join(FLAGS.write_path, self.config,\n _path.basename(self.src) + '.png')\n commandline.extend(['--png', pngfile])\n dump_commandline_if_verbose(commandline)\n self._proc = subprocess.Popen(commandline, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n self._monitor = SubprocessMonitor(self._queue, self._proc)\n self._monitor.start()\n\n while True:\n message = self._queue.get()\n if message.message == Message.READLINE:\n result = BenchResult.match(message.value)\n if result:\n hardware.sanity_check()\n self._process_result(result)\n elif hardware.filter_line(message.value):\n print(message.value, file=sys.stderr)\n continue\n if message.message == Message.POLL_HARDWARE:\n hardware.sanity_check()\n self._schedule_hardware_poll()\n continue\n if message.message == Message.EXIT:\n self._monitor.join()\n self._proc.wait()\n if self._proc.returncode != 0:\n raise Exception(\"skpbench exited with nonzero exit code %i\" %\n self._proc.returncode)\n self._proc = None\n break\n\n def _schedule_hardware_poll(self):\n if self._hw_poll_timer:\n self._hw_poll_timer.cancel()\n self._hw_poll_timer = \\\n Timer(1, lambda: self._queue.put(Message(Message.POLL_HARDWARE)))\n self._hw_poll_timer.start()\n\n def _process_result(self, result):\n if not self.best_result or result.stddev <= self.best_result.stddev:\n self.best_result = result\n elif FLAGS.verbosity >= 2:\n print(\"reusing previous result for %s/%s with lower stddev \"\n \"(%s%% instead of %s%%).\" %\n (result.config, result.bench, self.best_result.stddev,\n result.stddev), file=sys.stderr)\n if self.max_stddev and self.best_result.stddev > self.max_stddev:\n raise StddevException()\n\n def terminate(self):\n if self._proc:\n self._proc.terminate()\n self._monitor.join()\n self._proc.wait()\n self._proc = None\n\ndef emit_result(line, resultsfile=None):\n print(line)\n sys.stdout.flush()\n if resultsfile:\n print(line, file=resultsfile)\n resultsfile.flush()\n\ndef run_benchmarks(configs, srcs, hardware, resultsfile=None):\n hasheader = False\n benches = collections.deque([(src, config, FLAGS.max_stddev)\n for src in srcs\n for config in configs])\n while benches:\n try:\n with hardware:\n SKPBench.run_warmup(hardware.warmup_time, configs[0])\n if not hasheader:\n emit_result(SKPBench.get_header(), resultsfile)\n hasheader = True\n while benches:\n benchargs = benches.popleft()\n with SKPBench(*benchargs) as skpbench:\n try:\n skpbench.execute(hardware)\n if skpbench.best_result:\n emit_result(skpbench.best_result.format(FLAGS.suffix),\n resultsfile)\n else:\n print(\"WARNING: no result for %s with config %s\" %\n (skpbench.src, skpbench.config), file=sys.stderr)\n\n except StddevException:\n retry_max_stddev = skpbench.max_stddev * math.sqrt(2)\n if FLAGS.verbosity >= 1:\n print(\"stddev is too high for %s/%s (%s%%, max=%.2f%%), \"\n \"re-queuing with max=%.2f%%.\" %\n (skpbench.best_result.config, skpbench.best_result.bench,\n skpbench.best_result.stddev, skpbench.max_stddev,\n retry_max_stddev),\n file=sys.stderr)\n benches.append((skpbench.src, skpbench.config, retry_max_stddev,\n skpbench.best_result))\n\n except HardwareException as exception:\n skpbench.terminate()\n if FLAGS.verbosity >= 4:\n hardware.print_debug_diagnostics()\n if FLAGS.verbosity >= 1:\n print(\"%s; rebooting and taking a %i second nap...\" %\n (exception.message, exception.sleeptime), file=sys.stderr)\n benches.appendleft(benchargs) # retry the same bench next time.\n raise # wake hw up from benchmarking mode before the nap.\n\n except HardwareException as exception:\n time.sleep(exception.sleeptime)\n\ndef main():\n # Delimiter is ',' or ' ', skip if nested inside parens (e.g. gpu(a=b,c=d)).\n DELIMITER = r'[, ](?!(?:[^(]*\\([^)]*\\))*[^()]*\\))'\n configs = re.split(DELIMITER, FLAGS.config)\n srcs = _path.find_skps(FLAGS.srcs)\n assert srcs\n\n\n if FLAGS.adb:\n adb = Adb(FLAGS.device_serial, FLAGS.adb_binary,\n echo=(FLAGS.verbosity >= 5))\n from _hardware_android import HardwareAndroid\n\n model = adb.check('getprop ro.product.model').strip()\n if model == 'Pixel C':\n from _hardware_pixel_c import HardwarePixelC\n hardware = HardwarePixelC(adb)\n elif model == 'Pixel' or model == \"Pixel XL\":\n from _hardware_pixel import HardwarePixel\n hardware = HardwarePixel(adb)\n elif model == 'Pixel 2':\n from _hardware_pixel2 import HardwarePixel2\n hardware = HardwarePixel2(adb)\n elif model == 'Nexus 6P':\n from _hardware_nexus_6p import HardwareNexus6P\n hardware = HardwareNexus6P(adb)\n else:\n print(\"WARNING: %s: don't know how to monitor this hardware; results \"\n \"may be unreliable.\" % model, file=sys.stderr)\n hardware = HardwareAndroid(adb)\n\n if FLAGS.lock_clocks:\n hardware.__enter__()\n print(\"Entered benchmarking mode, not running benchmarks. Reboot to restore.\");\n return;\n\n if FLAGS.clock_speed:\n hardware.setDesiredClock(FLAGS.clock_speed)\n else:\n hardware = Hardware()\n\n if FLAGS.resultsfile:\n with open(FLAGS.resultsfile, mode='a+') as resultsfile:\n run_benchmarks(configs, srcs, hardware, resultsfile=resultsfile)\n else:\n run_benchmarks(configs, srcs, hardware)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"google/skia","sub_path":"tools/skpbench/skpbench.py","file_name":"skpbench.py","file_ext":"py","file_size_in_byte":14389,"program_lang":"python","lang":"en","doc_type":"code","stars":8112,"dataset":"github-code","pt":"81"} +{"seq_id":"41198775187","text":"class Node():\n def __init__(self,name,value):\n self.name = name\n self.value = value\n self.next = None\nclass Stack():\n def __init__(self):\n self.top = None\n self.bottom = None\n self.length = 0\n def peek(self):\n return self.top.name\n def push(self,node):\n if self.length == 0:\n self.top = node\n self.bottom = node\n else:\n tmp = self.top\n self.top = node\n self.top.next = tmp\n self.length+=1\n def pop(self):\n self.length-=1\n if self.top == self.bottom:\n tmp = self.top\n self.bottom = None\n self.top = None\n else:\n tmp = self.top\n self.top = self.top.next\n return tmp\n def isEmpty(self):\n if not self.top and not self.bottom:\n return True\n else:\n return False\n\nif __name__ == \"__main__\":\n a = Node('a',1)\n b = Node('b',2)\n c = Node('c',3)\n d = Node('d',4)\n mystack = Stack()\n mystack.push(a)\n mystack.push(b)\n mystack.push(c)\n mystack.push(d)\n print(mystack.isEmpty())\n print(mystack.peek())\n print(mystack.pop().name)\n print(mystack.peek())\n print(mystack.pop().name)\n print(mystack.peek())\n print(mystack.pop().name)\n print(mystack.peek())\n print(mystack.pop().name)\n #print(mystack.peek())\n print(mystack.isEmpty())\n\n\n","repo_name":"yuan1z/Python-Fun-Projects","sub_path":"Datastructure&algorithm/venv/stack_linkedlist.py","file_name":"stack_linkedlist.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8597546062","text":"from django import forms\n\nfrom user_profile.models import User\nfrom .models import Quota\n\n\nclass QuotaCreateForm(forms.ModelForm):\n\n class Meta:\n model = Quota\n fields = (\n 'type',\n 'expire',\n )\n\n\nclass QuotaUpdateForm(forms.ModelForm):\n\n class Meta:\n model = Quota\n fields = (\n 'type',\n 'expire',\n )\n\n def __init__(self, pk, *args, **kwargs):\n super(QuotaUpdateForm, self).__init__(*args, **kwargs)\n user = User.objects.get(pk=pk)\n qouta = user.qoutas.get(id=1)\n self.fields['type'].initial = qouta.type\n self.fields['expire'].initial = qouta.expire\n","repo_name":"Denworc/profi_control","sub_path":"privileges/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39961848717","text":"from model.group import Group\nimport random\nimport allure\n\ndef test_modify_group(app, db, check_ui):\n with allure.step('Given a group list. Empty - add group'):\n if len(db.get_group_list()) == 0:\n app.group.init_group(Group(group_name=\"test-test\"))\n with allure.step('Given a group list'):\n old_groups = db.get_group_list()\n with allure.step('Given random group from the list'):\n group = random.choice(old_groups)\n index = old_groups.index(group)\n group1 = Group(group_name=\"modify_group\")\n group1.id = group.id\n with allure.step('When i edit a group %s to the list' % group1):\n app.group.modify_group_by_id(group.id, group1)\n with allure.step('Then the new group list is equal to the old list with the edit group'):\n new_groups = db.get_group_list()\n assert len(old_groups) == len(new_groups)\n old_groups[index] = group1\n if check_ui:\n assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)\n","repo_name":"annadenils/python_training","sub_path":"test/test_modify_group.py","file_name":"test_modify_group.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1324001267","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Feb 16 12:38:46 2023\r\n\r\n@author: anush\r\n\"\"\"\r\n\r\nimport cv2\r\nimport numpy as np\r\n\r\nimg = cv2.imread(\"E:\\data\\hand.jpg\")\r\nimg =cv2.resize(img,(600,700))\r\nimg1 = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\r\n\r\nblur =cv2.medianBlur(img1,9)\r\n_,thresh =cv2.threshold(img1,240,255,cv2.THRESH_BINARY_INV)\r\n\r\n\r\n\r\n#findContours(img,countour_retrival_mode,method)\r\ncnts,hier =cv2.findContours(thresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\r\n#cv2.drawContours(img,cnts,-1,(50,50,255),2)\r\n\r\nprint(\"Number of contours\",cnts,\"\\nTotal contour =\",len(cnts))\r\nprint(\"Hierarchy==\\n\",hier)\r\n\r\nfor c in cnts:\r\n epsilon =0.0001*cv2.arcLength(c,True)\r\n data =cv2.approxPolyDP(c,epsilon,True)\r\n \r\n hull =cv2.convexHull(data)\r\n \r\n cv2.drawContours(img,[c],-1,(50,50,150),2)\r\n cv2.drawContours(img,[hull],-1,(0,255,0),2)\r\n \r\n \r\n#find convexity defect\r\nhull2 =cv2.convexHull(cnts[0],returnPoints = False)\r\n#defects returns an array which contains value [start_point,end_point,farthest_point,approx_point]\r\ndefect =cv2.convexityDefects(cnts[0],hull2)\r\n\r\nfor i in range(defect.shape[0]):\r\n s,e,f,d =defect[i,0]\r\n print(s,e,f,d)\r\n start =tuple(c[s][0])\r\n end =tuple(c[e][0])\r\n far =tuple(c[f][0])\r\n cv2.circle(img,far,5,[0,0,255],-1)\r\n \r\n \r\n#EXTREME POINTS\r\n'''it means topmost ,bottom,right,left\r\n\r\nc_max = max(cnts,key=cv2.contourArea)\r\n\r\n#determine the most extreme points\r\nextLeft =tuple(c_max[c_max[:,:,0].argmin()][0])\r\n'''\r\n \r\n \r\ncv2.imshow(\"original\",img)\r\ncv2.imshow(\"gray==\",img1)\r\ncv2.imshow('thresh',thresh)\r\n\r\n\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()\r\n","repo_name":"ANUSHRAV01/spyder-project","sub_path":"spyder projects/demo18_hand_detection_contour.py","file_name":"demo18_hand_detection_contour.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34584218438","text":"import urllib.request\n\nresponse = urllib.request.urlopen('https://s3-eu-west-1.amazonaws.com/knowit-julekalender-2018/input-vekksort.txt')\nnumbers = []\nfor line in response:\n\tnumbers.append(int(line))\n\ndef lis(arr): \n\tn = len(arr) \n\n# Declare the list (array) for LIS and initialize LIS \n# values for all indexes \n\tlis = [1]*n \n\n# Compute optimized LIS values in bottom up manner \n\tfor i in range (1 , n): \n\t\tfor j in range(0 , i): \n\t\t\tif arr[i] >= arr[j] and lis[i] < lis[j] + 1 : \n\t\t\t\tlis[i] = lis[j]+1\n\n# Initialize maximum to 0 to get the maximum of all \n# LIS \n\tmaximum = 0\n\n# Pick maximum of all LIS values \n\tfor i in range(n): \n\t\tmaximum = max(maximum , lis[i]) \n\tprint(lis)\n\treturn maximum \n# end of lis function \nx = numbers\ny=[x[i]+i/len(x) for i in range(len(x))]\nprint(len(y))\n#print(lis([1, 1, 2, 3, 4, 5, 7, 6, 6, 7, 8]))","repo_name":"Mortefal/Julekalender","sub_path":"Luke 07/niceStalin.py","file_name":"niceStalin.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1325174107","text":"import sys\nimport collections\nimport itertools\n\nimport numpy as np\n\nfrom scipy.stats import mode\nfrom multiprocessing import Pool\n\nfrom dtw import dtw_distance\n\nimport copyreg\nimport types\n\n\ndef _reduce_method(m):\n if m.im_self is None:\n return getattr, (m.im_class, m.im_func.func_name)\n else:\n return getattr, (m.im_self, m.im_func.func_name)\ncopyreg.pickle(types.MethodType, _reduce_method)\n\nclass KnnDtw(object):\n \n def __init__(self, k_neighbours=5, max_warping_window=10000):\n self.k_neighbours = k_neighbours\n self.max_warping_window = max_warping_window\n \n # Public Methods\n\n def fit(self, x_training_data, x_labels): \n self.x_training_data = x_training_data\n self.x_labels = x_labels\n \n def predict(self, x, parallel=True):\n\n if parallel: \n p = Pool(3)\n distance_matrix = []\n\n jobs = [ (x, [flight]) for flight in self.x_training_data ]\n parallel_dist = p.map(self._map_single_distance_matrix, jobs)\n \n distance_matrix = np.array([parallel_dist])\n #print(distance_matrix)\n\n else:\n \n distance_matrix = self._distance_matrix(x, self.x_training_data)\n #print(distance_matrix)\n # Retrieve the k nearest neighbours\n # distance_matrix.argsort()\n # Sort the list distance_matrix and returns the sorted indices\n # [:, :self.k_neighbours]\n # returns only the last k neighbours\n knn_indices = distance_matrix.argsort()[:, :self.k_neighbours]\n\n # Retrieve the k nearest labels with the indices\n knn_labels = self.x_labels[knn_indices]\n \n # Compute labels and probabilities using the mode (majority vote) ????\n mode_data = mode(knn_labels, axis=1)\n\n result_label = mode_data[0]\n result_probability = mode_data[1] / self.k_neighbours\n\n # Return tuple. Ravel is a numpy function that flattens an array.\n # Doc: http://docs.scipy.org/doc/numpy-1.10.1/reference/generated/numpy.ravel.html.\n return result_label.ravel(), result_probability.ravel()\n\n\n def _map_single_distance_matrix(self, job_tuple):\n dtw_result = self._distance_matrix(job_tuple[0], job_tuple[1], False)[0]\n return dtw_result[0]\n \n def _distance_matrix(self, x, y, show_progress=True):\n count = 0\n\n x_shape = np.shape(x)\n y_shape = np.shape(y)\n\n distance_matrix = np.zeros((x_shape[0], y_shape[0])) \n distance_matrix_size = x_shape[0] * y_shape[0]\n\n for i in range(0, x_shape[0]):\n for j in range(0, y_shape[0]):\n # Compute DTW\n distance_matrix[i, j] = dtw_distance(x[i], y[j], self.max_warping_window)\n\n # Update progress\n count += 1\n\n if show_progress and count % 50 == 0:\n self._show_progress(distance_matrix_size, count)\n \n\n return distance_matrix\n \n def _show_progress(self, n, i):\n print('\\r%d/%d %f %%' % (i,n, (float(i)/float(n))*100.0))\n sys.stdout.flush()\n\n\n","repo_name":"anushmanukyan/online-degradation-identification-uav","sub_path":"kNNDTW.py","file_name":"kNNDTW.py","file_ext":"py","file_size_in_byte":3191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30046032501","text":"import sys\nfrom functools import partial\nimport pyautogui\nfrom PyQt5 import QtCore\nfrom PyQt5.QtGui import QFont\nfrom PyQt5.QtWidgets import QMainWindow, QApplication, QLabel, QPushButton, QMessageBox\nfrom PyQt5.QtCore import Qt\n\n\nclass MainScreen(QMainWindow):\n def __init__(self):\n super(MainScreen, self).__init__()\n\n self.setWindowTitle(\"Calculator\")\n w, h = pyautogui.size()\n\n self.setGeometry(int(w/2), int(h/2), int(w-w/1.5), int(w-w/1.5))\n self.setFont(QFont('ariel', 15))\n self.label = QLabel(self)\n\n self.uicomponents()\n self.show()\n\n def uicomponents(self):\n\n self.label.setGeometry(5, 5, 350, 70)\n self.label.setStyleSheet(\"QLabel\"\n \"{\"\n \"border : 4px solid grey;\"\n \"background : white;\"\n \"}\")\n self.label.setAlignment(Qt.AlignRight)\n\n self.init(\"1\", 5, 150, 80, 40)\n self.init(\"2\", 95, 150, 80, 40)\n self.init(\"3\", 185, 150, 80, 40)\n self.init(\"5\", 95, 200, 80, 40)\n self.init(\"4\", 5, 200, 80, 40)\n self.init(\"6\", 185, 200, 80, 40)\n self.init(\"7\", 5, 250, 80, 40)\n self.init(\"8\", 95, 250, 80, 40)\n self.init(\"9\", 185, 250, 80, 40)\n self.init(\"0\", 5, 300, 80, 40)\n\n self.init(\"+\", 275, 250, 80, 40)\n self.init(\"-\", 275, 200, 80, 40)\n self.init(\"*\", 275, 150, 80, 40)\n self.init(\"/\", 185, 300, 80, 40)\n self.init(\".\", 95, 300, 80, 40)\n\n self.init(\"=\", 275, 300, 80, 40)\n self.init(\"clear\", 5, 100, 200, 40)\n self.init(\"del\", 210, 100, 145, 40)\n\n def init(self, val, xval, yval, hval, wval):\n btnname = QPushButton(val, self)\n btnname.setGeometry(xval, yval, hval, wval)\n if val == \"=\":\n btnname.clicked.connect(self.action_equal)\n elif val == \"clear\":\n btnname.clicked.connect(self.action_clear)\n elif val == \"del\":\n btnname.clicked.connect(self.action_del)\n else:\n btnname.clicked.connect(partial(self.act, val))\n\n def keyPressEvent(self, event):\n if event.key() == QtCore.Qt.Key_1:\n text = self.label.text()\n self.label.setText(text + \"1\")\n elif event.key() == QtCore.Qt.Key_2:\n text = self.label.text()\n self.label.setText(text + \"2\")\n elif event.key() == QtCore.Qt.Key_3:\n text = self.label.text()\n self.label.setText(text + \"3\")\n elif event.key() == QtCore.Qt.Key_4:\n text = self.label.text()\n self.label.setText(text + \"4\")\n elif event.key() == QtCore.Qt.Key_5:\n text = self.label.text()\n self.label.setText(text + \"5\")\n elif event.key() == QtCore.Qt.Key_6:\n text = self.label.text()\n self.label.setText(text + \"6\")\n elif event.key() == QtCore.Qt.Key_7:\n text = self.label.text()\n self.label.setText(text + \"7\")\n elif event.key() == QtCore.Qt.Key_8:\n text = self.label.text()\n self.label.setText(text + \"8\")\n elif event.key() == QtCore.Qt.Key_9:\n text = self.label.text()\n self.label.setText(text + \"9\")\n elif event.key() == QtCore.Qt.Key_0:\n text = self.label.text()\n self.label.setText(text + \"0\")\n elif event.key() == QtCore.Qt.Key_Plus:\n text = self.label.text()\n self.label.setText(text + \"+\")\n elif event.key() == QtCore.Qt.Key_Minus:\n text = self.label.text()\n self.label.setText(text + \"-\")\n elif event.key() == QtCore.Qt.Key_multiply:\n text = self.label.text()\n self.label.setText(text + \"*\")\n elif event.key() == QtCore.Qt.Key_division:\n text = self.label.text()\n self.label.setText(text + \"/\")\n elif event.key() == QtCore.Qt.Key_Period:\n text = self.label.text()\n self.label.setText(text + \".\")\n elif event.key() == QtCore.Qt.Key_Backspace:\n text = self.label.text()\n self.label.setText(text[:len(text) - 1])\n elif event.key() == QtCore.Qt.Key_Delete:\n self.label.setText(\"\")\n elif event.key() == QtCore.Qt.Key_Enter:\n equation = self.label.text()\n\n try:\n ans = eval(equation)\n self.label.setText(str(ans))\n\n except Exception as e:\n self.label.setText(str(e))\n\n def act(self, val):\n text = self.label.text()\n self.label.setText(text + val)\n\n def action_equal(self):\n # get the label text\n equation = self.label.text()\n\n try:\n # getting the ans\n print(equation)\n print(\"type:\", type(equation))\n ans = eval(equation)\n print(type(ans))\n # setting text to the label\n self.label.setText(str(ans))\n # self.label.setPlaceholderText(str(ans))\n\n except Exception as e:\n self.label.setText(str(e))\n # self.msg_box(str(e))\n\n def action_clear(self):\n # clearing the label text\n self.label.setText(\"\")\n\n def action_del(self):\n # clearing a single digit\n text = self.label.text()\n self.label.setText(text[:len(text) - 1])\n\n def msg_box(self, msg):\n\n ms_box = QMessageBox(self)\n ms_box.setText(msg)\n\n ms_box.setStyleSheet(\"QMessageBox {background: #F2EBE9;} \"\n \"QPushButton { QPushButton {background: #063406; color:white; \"\n \"border-radius:4px;border: #27ae60 1px solid;}\")\n ms_box.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)\n ms_box.exec()\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n app.setStyle('breeze')\n\n obj = MainScreen()\n sys.exit(app.exec())\n","repo_name":"AjitRatadiya/daily","sub_path":"gui/calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":5994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3148620371","text":"import os\nfrom elasticsearch import Elasticsearch\nimport json\nimport time\nimport logging\n\nMAX_ATTEMPTS = 10\nATTEMPT_TIMEOUT = 20\n\ndef get_elastic_ready(host, port):\n es = Elasticsearch(f'http://{host}:{port}')\n\n clinvar_dir = os.path.join(os.path.dirname(__file__), 'data/clinvar_filtered.json')\n with open(clinvar_dir, 'r') as file:\n clinvar = json.load(file)\n\n actions = []\n for i, row in enumerate(clinvar):\n action = {\"index\": {\"_index\": \"clinvar\", \"_id\": i}}\n actions.append(action)\n actions.append(row)\n\n attempt_n = 1\n index_loaded = False\n while not index_loaded:\n try:\n logging.info(f'Loading elasticsearch index attempt #{attempt_n}')\n es.bulk(index=\"clinvar\", operations=actions)\n # print('Success!!!')\n logging.info(f'Elasticsearch index loaded successfully!')\n index_loaded = True\n except Exception as e:\n logging.info(f'Loading elasticsearch index attempt #{attempt_n} failed')\n if attempt_n > MAX_ATTEMPTS:\n raise e\n attempt_n += 1\n time.sleep(ATTEMPT_TIMEOUT)\n return es","repo_name":"mikgur/fastapi_elastic","sub_path":"api/elastic_index.py","file_name":"elastic_index.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"84704989","text":"T = int(input())\nPeak = list(map(int, input().split(\" \")))\ncnt = 0\nanswer = 0\nresult=0\nstate = Peak[0]\nfor i in range(1, len(Peak)):\n if (state > Peak[i]): # 현재값이 오른쪽값보다 크면서, 다음 봉우리가 있을때\n cnt += 1\n\n else: # 현재 봉우리보다 다음 봉우리가 높을때\n state = Peak[i] # 이전 봉우리보다 높은 봉우리로 기준 갱신\n if (answer < cnt): # 여태 최고 처치수보다 클때 최고 처치수 갱신\n answer = cnt\n cnt = 0\n result=max(answer,cnt)#이전 최고 처치수와 마지막 처치수 비교\n\n\nprint(result)\n","repo_name":"Areum0921/Abox","sub_path":"BOJ greedy/BOJ 14659.py","file_name":"BOJ 14659.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"1611123881","text":"class Graph:\n def __init__(self, V) -> None:\n #V - len of vertices\n self._V = V\n self._E = 0\n self._adjList = []\n for i in range(0, V):\n self._adjList.append([False] * V)\n \n def V(self):\n return self._V\n \n def E(self):\n return self._E\n \n def addEdge(self, u, v):\n self._validateVertex(u)\n self._validateVertex(v)\n if not self._adjList[u][v]:\n self._E += 1\n #since it is an undirected graph, we must add edges from u to v and v to u\n self._adjList[u][v] = True\n self._adjList[v][u] = True\n \n def adj(self, v):\n self._validateVertex(v)\n neighbors = []\n for u in range(0, self._V):\n if self._adjList[v][u]:\n neighbors.append(u)\n return neighbors\n \n def _validateVertex(self, v):\n if v < 0 or v >= self._V:\n raise Exception(\"vertex \" + str(v) + \"is invalid. Should be between 0 and \" + str(self._V - 1))\n \n def __str__(self) -> str:\n s = \"\"\n for row in self._adjList:\n s += str(row) + \"\\n\"\n return s\n\nif __name__ == '__main__':\n vertices = [\"Albert\", \"Bob\", \"Christa\", \"Danielle\"]\n V = len(vertices)\n G = Graph(V)\n\n G.addEdge(0,1)\n G.addEdge(0,2)\n G.addEdge(0,3)\n G.addEdge(2,3)\n\n print(G.adj(0))\n print(str(G))","repo_name":"manickaa/CodeBreakersCode","sub_path":"Graphs/adjMatrixGrpahs.py","file_name":"adjMatrixGrpahs.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39118641143","text":"from functools import wraps\nfrom typing import Dict, List, Optional, Tuple\n\nimport numpy as np\n\ntry:\n from shapely.geometry import Polygon\nexcept (ModuleNotFoundError, OSError):\n from ..package_not_installed import PackageNotInstalled\n\n Polygon = PackageNotInstalled\n\n\nfrom nucleus.annotation import CuboidAnnotation\nfrom nucleus.prediction import CuboidPrediction\n\nfrom .base import ScalarResult\n\n\ndef group_cuboids_by_label(\n annotations: List[CuboidAnnotation],\n predictions: List[CuboidPrediction],\n) -> Dict[str, Tuple[List[CuboidAnnotation], List[CuboidPrediction]]]:\n \"\"\"Groups input annotations and predictions by label.\n\n Args:\n annotations: list of input cuboid annotations\n predictions: list of input cuboid predictions\n\n Returns:\n Mapping from each label to (annotations, predictions) tuple\n \"\"\"\n labels = set(annotation.label for annotation in annotations)\n labels |= set(prediction.label for prediction in predictions)\n grouped: Dict[\n str, Tuple[List[CuboidAnnotation], List[CuboidPrediction]]\n ] = {label: ([], []) for label in labels}\n for annotation in annotations:\n grouped[annotation.label][0].append(annotation)\n for prediction in predictions:\n grouped[prediction.label][1].append(prediction)\n return grouped\n\n\ndef label_match_wrapper(metric_fn):\n \"\"\"Decorator to add the ability to only apply metric to annotations and\n predictions with matching labels.\n\n Args:\n metric_fn: Metric function that takes a list of annotations, a list\n of predictions, and optional args and kwargs.\n\n Returns:\n Metric function which can optionally enforce matching labels.\n \"\"\"\n\n @wraps(metric_fn)\n def wrapper(\n annotations: List[CuboidAnnotation],\n predictions: List[CuboidPrediction],\n *args,\n enforce_label_match: bool = False,\n **kwargs,\n ) -> ScalarResult:\n # Simply return the metric if we are not enforcing label matches.\n if not enforce_label_match:\n return metric_fn(annotations, predictions, *args, **kwargs)\n\n # For each bin of annotations/predictions, compute the metric applied\n # only to that bin. Then aggregate results across all bins.\n grouped_inputs = group_cuboids_by_label(annotations, predictions)\n metric_results = []\n for binned_annotations, binned_predictions in grouped_inputs.values():\n metric_result = metric_fn(\n binned_annotations, binned_predictions, *args, **kwargs\n )\n metric_results.append(metric_result)\n assert all(\n isinstance(r, ScalarResult) for r in metric_results\n ), \"Expected every result to be a ScalarResult\"\n return ScalarResult.aggregate(metric_results)\n\n return wrapper\n\n\ndef process_dataitem(dataitem):\n processed_item = {}\n processed_item[\"xyz\"] = np.array(\n [[ann.position.x, ann.position.y, ann.position.z] for ann in dataitem]\n )\n processed_item[\"wlh\"] = np.array(\n [\n [ann.dimensions.x, ann.dimensions.y, ann.dimensions.z]\n for ann in dataitem\n ]\n )\n processed_item[\"yaw\"] = np.array([ann.yaw for ann in dataitem])\n return processed_item\n\n\ndef compute_outer_iou(\n xyz_0: \"np.ndarray\",\n wlh_0: \"np.ndarray\",\n yaw_0: \"np.ndarray\",\n xyz_1: \"np.ndarray\",\n wlh_1: \"np.ndarray\",\n yaw_1: \"np.ndarray\",\n scale_convention: bool = True,\n distance_threshold=25,\n) -> Tuple[\"np.ndarray\", \"np.ndarray\"]:\n \"\"\"\n Computes outer 3D and 2D IoU\n :param xyz_0: (n, 3)\n :param wlh_0: (n, 3)\n :param yaw_0: (n,)\n :param xyz_1: (m, 3)\n :param wlh_1: (m, 3)\n :param yaw_1: (m,)\n :param scale_convention: flag whether the internal Scale convention is used (have to be adjusted by pi/2)\n :param distance_threshold: computes iou only within this distance (~3x speedup)\n :return: (n, m) 3D IoU, (n, m) 2D IoU\n \"\"\"\n\n bottom_z = np.maximum.outer(\n xyz_0[:, 2] - (wlh_0[:, 2] / 2), xyz_1[:, 2] - (wlh_1[:, 2] / 2)\n )\n top_z = np.minimum.outer(\n xyz_0[:, 2] + (wlh_0[:, 2] / 2), xyz_1[:, 2] + (wlh_1[:, 2] / 2)\n )\n height_intersection = np.maximum(0, top_z - bottom_z)\n\n cuboid_corners_0 = get_batch_cuboid_corners(\n xyz_0, wlh_0, yaw_0, scale_convention=scale_convention\n )\n cuboid_corners_1 = get_batch_cuboid_corners(\n xyz_1, wlh_1, yaw_1, scale_convention=scale_convention\n )\n polygons_1 = [\n Polygon(corners_1[[1, 0, 4, 5, 1], :2])\n for corners_1 in cuboid_corners_1\n ]\n area_intersection = np.zeros(\n (cuboid_corners_0.shape[0], cuboid_corners_1.shape[0]),\n dtype=np.float32,\n )\n\n if cuboid_corners_0.shape[0] != 0 and cuboid_corners_1.shape[0] != 0:\n distance_mask = (\n np.linalg.norm(\n xyz_0[:, np.newaxis, :] - xyz_1[np.newaxis, :, :], axis=2\n )\n < distance_threshold\n )\n\n for i, corners_0 in enumerate(cuboid_corners_0):\n for j, polygon_1 in enumerate(polygons_1):\n if distance_mask[i, j]:\n area_intersection[i, j] = (\n Polygon(corners_0[[1, 0, 4, 5, 1], :2])\n .intersection(polygon_1)\n .area\n )\n\n intersection = height_intersection * area_intersection\n area_0 = wlh_0[:, 0] * wlh_0[:, 1]\n area_1 = wlh_1[:, 0] * wlh_1[:, 1]\n union_2d = np.add.outer(area_0, area_1) - area_intersection\n\n volume_0 = area_0 * wlh_0[:, 2]\n volume_1 = area_1 * wlh_1[:, 2]\n union = np.add.outer(volume_0, volume_1) - intersection\n return intersection / union, area_intersection / union_2d\n\n\ndef get_batch_cuboid_corners(\n xyz: \"np.ndarray\",\n wlh: \"np.ndarray\",\n yaw: \"np.ndarray\",\n pitch: Optional[\"np.ndarray\"] = None,\n roll: Optional[\"np.ndarray\"] = None,\n scale_convention: bool = True,\n) -> \"np.ndarray\":\n \"\"\"\n Vectorized batch version of get_cuboid_corners\n :param xyz: (n, 3)\n :param wlh: (n, 3)\n :param yaw: (n,)\n :param pitch: (n,)\n :param roll: (n,)\n :param scale_convention: flag whether the internal Scale convention is used (have to be adjusted by pi/2)\n :return: (n, 8, 3)\n \"\"\"\n if scale_convention:\n yaw = yaw.copy() + np.pi / 2\n\n w, l, h = wlh[:, 0, None], wlh[:, 1, None], wlh[:, 2, None]\n\n x_corners = l / 2 * np.array([1, 1, 1, 1, -1, -1, -1, -1])\n y_corners = w / 2 * np.array([1, -1, -1, 1, 1, -1, -1, 1])\n z_corners = h / 2 * np.array([1, 1, -1, -1, 1, 1, -1, -1])\n corners = np.stack((x_corners, y_corners, z_corners), axis=1)\n\n rot_mats = get_batch_rotation_matrices(yaw, pitch, roll)\n corners = np.matmul(rot_mats, corners)\n\n x, y, z = xyz[:, 0, None], xyz[:, 1, None], xyz[:, 2, None]\n corners[:, 0, :] = corners[:, 0, :] + x\n corners[:, 1, :] = corners[:, 1, :] + y\n corners[:, 2, :] = corners[:, 2, :] + z\n return corners.swapaxes(1, 2)\n\n\ndef get_batch_rotation_matrices(\n yaw: \"np.ndarray\",\n pitch: Optional[\"np.ndarray\"] = None,\n roll: Optional[\"np.ndarray\"] = None,\n) -> \"np.ndarray\":\n if pitch is None:\n pitch = np.zeros_like(yaw)\n if roll is None:\n roll = np.zeros_like(yaw)\n cy = np.cos(yaw)\n sy = np.sin(yaw)\n cp = np.cos(pitch)\n sp = np.sin(pitch)\n cr = np.cos(roll)\n sr = np.sin(roll)\n return np.stack(\n (\n np.stack(\n (cy * cp, cy * sp * sr - sy * cr, cy * sp * cr + sy * sr), 1\n ),\n np.stack(\n (sy * cp, sy * sp * sr + cy * cr, sy * sp * cr - cy * sr), 1\n ),\n np.stack((-sp, cp * sr, cp * cr), 1),\n ),\n 1,\n )\n\n\ndef associate_cuboids_on_iou(\n xyz_0: \"np.ndarray\",\n wlh_0: \"np.ndarray\",\n yaw_0: \"np.ndarray\",\n xyz_1: \"np.ndarray\",\n wlh_1: \"np.ndarray\",\n yaw_1: \"np.ndarray\",\n threshold_in_overlap_ratio: float = 0.1,\n) -> List[Tuple[int, int]]:\n if xyz_0.shape[0] < 1 or xyz_1.shape[0] < 1:\n return []\n iou_matrix, _ = compute_outer_iou(xyz_0, wlh_0, yaw_0, xyz_1, wlh_1, yaw_1)\n mapping = []\n for i, m in enumerate(iou_matrix.max(axis=1)):\n if m >= threshold_in_overlap_ratio:\n mapping.append((i, iou_matrix[i].argmax()))\n return mapping\n\n\ndef recall_precision(\n prediction: List[CuboidPrediction],\n groundtruth: List[CuboidAnnotation],\n threshold_in_overlap_ratio: float,\n) -> Dict[str, float]:\n \"\"\"\n Calculates the precision and recall of each lidar frame.\n\n Args:\n :param predictions: list of cuboid annotation predictions.\n :param ground_truth: list of cuboid annotation groundtruths.\n :param threshold: IOU threshold to consider detection as valid. Must be in [0, 1].\n \"\"\"\n\n tp_sum = 0\n fp_sum = 0\n fn_sum = 0\n num_predicted = 0\n num_instances = 0\n\n gt_items = process_dataitem(groundtruth)\n pred_items = process_dataitem(prediction)\n\n num_predicted += pred_items[\"xyz\"].shape[0]\n num_instances += gt_items[\"xyz\"].shape[0]\n\n tp = np.zeros(pred_items[\"xyz\"].shape[0])\n fp = np.ones(pred_items[\"xyz\"].shape[0])\n fn = np.ones(gt_items[\"xyz\"].shape[0])\n\n mapping = associate_cuboids_on_iou(\n pred_items[\"xyz\"],\n pred_items[\"wlh\"],\n pred_items[\"yaw\"] + np.pi / 2,\n gt_items[\"xyz\"],\n gt_items[\"wlh\"],\n gt_items[\"yaw\"] + np.pi / 2,\n threshold_in_overlap_ratio=threshold_in_overlap_ratio,\n )\n\n for pred_id, gt_id in mapping:\n if fn[gt_id] == 0:\n continue\n tp[pred_id] = 1\n fp[pred_id] = 0\n fn[gt_id] = 0\n\n tp_sum += tp.sum()\n fp_sum += fp.sum()\n fn_sum += fn.sum()\n\n return {\n \"tp_sum\": tp_sum,\n \"fp_sum\": fp_sum,\n \"fn_sum\": fn_sum,\n \"precision\": tp_sum / (tp_sum + fp_sum),\n \"recall\": tp_sum / (tp_sum + fn_sum),\n \"num_predicted\": num_predicted,\n \"num_instances\": num_instances,\n }\n\n\ndef detection_iou(\n prediction: List[CuboidPrediction],\n groundtruth: List[CuboidAnnotation],\n threshold_in_overlap_ratio: float,\n) -> Tuple[\"np.ndarray\", \"np.ndarray\"]:\n \"\"\"\n Calculates the 2D IOU and 3D IOU overlap between predictions and groundtruth.\n Uses linear sum assignment to associate cuboids.\n\n Args:\n :param predictions: list of cuboid annotation predictions.\n :param ground_truth: list of cuboid annotation groundtruths.\n :param threshold: IOU threshold to consider detection as valid. Must be in [0, 1].\n \"\"\"\n\n gt_items = process_dataitem(groundtruth)\n pred_items = process_dataitem(prediction)\n\n meter_2d = []\n meter_3d = []\n\n if gt_items[\"xyz\"].shape[0] == 0 or pred_items[\"xyz\"].shape[0] == 0:\n return np.array([0.0]), np.array([0.0])\n\n iou_3d, iou_2d = compute_outer_iou(\n gt_items[\"xyz\"],\n gt_items[\"wlh\"],\n gt_items[\"yaw\"],\n pred_items[\"xyz\"],\n pred_items[\"wlh\"],\n pred_items[\"yaw\"],\n )\n\n for i, m in enumerate(iou_3d.max(axis=1)):\n if m >= threshold_in_overlap_ratio:\n j = iou_3d[i].argmax()\n meter_3d.append(iou_3d[i, j])\n meter_2d.append(iou_2d[i, j])\n\n return np.array(meter_3d), np.array(meter_2d)\n","repo_name":"scaleapi/nucleus-python-client","sub_path":"nucleus/metrics/cuboid_utils.py","file_name":"cuboid_utils.py","file_ext":"py","file_size_in_byte":11301,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"81"} +{"seq_id":"7823415262","text":"class Solution:\n \n def palindromePairs(self, words: List[str]) -> List[List[int]]:\n def is_palindrome(s):\n return s == s[::-1]\n\n words = {word: i for i, word in enumerate(words)}\n result = []\n for word, i in words.items():\n n = len(word)\n for l in range(n+1):\n prefix = word[:l]\n suffix = word[l:]\n if is_palindrome(prefix):\n match = suffix[::-1]\n if match != word and match in words:\n # prepend to current word at index l\n result.append([words[match], i])\n # Avoid duplicate matches for the empty string suffix, since already used in prefix search\n if l != n and is_palindrome(suffix):\n # append to current word at index l\n match = prefix[::-1]\n if match in words:\n result.append([i, words[match]])\n return result\n \n ","repo_name":"wjjameslee/leetcode","sub_path":"336. Palindrome Pairs.py","file_name":"336. Palindrome Pairs.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37816570853","text":"\nimport json\nfrom flask import Blueprint, request, jsonify, render_template\n\nfrom paper_trading.api.db import MongoDBService\nfrom paper_trading.trade.data_center import (\n get_stock_daily_qfq,\n get_stock_mtime\n)\nfrom paper_trading.trade.account import (\n new_order_generate,\n cancel_order_generate\n)\nfrom paper_trading.trade.db_model import (\n on_account_exist,\n on_account_delete,\n query_account_list,\n query_orders_by_symbol,\n query_order_status,\n query_order_one, query_orders)\n\n# 主引擎\nmain_engine = None\n\n# 账户引擎\naccount_engine = None\n\n# 数据库实例\ndb = None\ntest_db = None\n\n# 行情实例\ntdx = None\n\nblue = Blueprint('main_blue', __name__)\n\ndef init_blue(app, engine):\n \"\"\"初始化蓝图\"\"\"\n app.register_blueprint(blue)\n\n global main_engine, account_engine, db, test_db, tdx\n\n # 绑定主引擎和账户引擎\n main_engine = engine\n account_engine = engine.account_engine\n\n # 连接数据库,用于数据查询\n db = main_engine.creat_db()\n\n # 连接行情源\n tdx = main_engine.creat_hq_api()\n\n # 连接测试行情数据库\n test_db = MongoDBService('192.168.1.251', 27017)\n test_db.connect_db()\n\n\n\"\"\"web page\"\"\"\n\n\n@blue.route('/', methods=['GET', 'POST'])\ndef index():\n \"\"\"主页\"\"\"\n return render_template(\"index.html\")\n\n\n@blue.route('/creatPage', methods=['GET', 'POST'])\ndef my_account():\n \"\"\"创建账户页面\"\"\"\n return render_template(\"account.html\")\n\n\n@blue.route('/trade', methods=['GET', 'POST'])\ndef my_trade():\n \"\"\"模拟交易页面\"\"\"\n return render_template(\"trade.html\")\n\n\n@blue.route('/train_k', methods=['GET'])\ndef my_train_k():\n \"\"\"交易训练页面\"\"\"\n return render_template(\"train_k.html\")\n\n\n@blue.route('/review', methods=['GET'])\ndef trade_review():\n \"\"\"交易回看\"\"\"\n return render_template(\"review.html\")\n\n\n\"\"\"trade api\"\"\"\n\n\n@blue.route('/login', methods=[\"POST\"])\ndef account_login():\n \"\"\"账户登录\"\"\"\n rps = {}\n rps['status'] = True\n\n if request.form.get(\"token\"):\n token = request.form[\"token\"]\n account = account_engine.login(token)\n if account:\n rps['data'] = account\n else:\n rps['status'] = False\n rps['data'] = \"账户不存在\"\n else:\n rps['status'] = False\n rps['data'] = \"请求参数错误\"\n\n return jsonify(rps)\n\n\n@blue.route('/creat', methods=[\"POST\"])\ndef account_creat():\n \"\"\"创建账户\"\"\"\n rps = {}\n rps['status'] = True\n\n if request.form.get(\"info\"):\n info = request.form[\"info\"]\n info_dict = json.loads(info)\n account = account_engine.creat(info_dict)\n if account:\n rps['data'] = account\n else:\n rps['status'] = False\n rps['data'] = \"创建账户失败\"\n else:\n rps['status'] = False\n rps['data'] = \"请求参数错误\"\n\n return jsonify(rps)\n\n\n@blue.route('/delete', methods=[\"POST\"])\ndef account_delete():\n \"\"\"账户删除\"\"\"\n rps = {}\n rps['status'] = True\n\n if request.form.get(\"token\"):\n token = request.form[\"token\"]\n if on_account_exist(token, db):\n account_engine.logout(token)\n result = on_account_delete(token, db)\n if result:\n rps['data'] = \"账户删除成功\"\n else:\n rps['status'] = False\n rps['data'] = \"删除账户失败\"\n else:\n rps['status'] = False\n rps['data'] = \"账户不存在\"\n else:\n rps['status'] = False\n rps['data'] = \"请求参数错误\"\n\n return jsonify(rps)\n\n\n@blue.route('/list', methods=[\"GET\"])\ndef account_list():\n \"\"\"获取账户列表\"\"\"\n rps = {}\n rps['status'] = True\n\n account_list = query_account_list(db)\n\n if account_list:\n rps['data'] = account_list\n else:\n rps['status'] = False\n rps['data'] = \"账户列表为空\"\n\n return jsonify(rps)\n\n\n@blue.route('/account', methods=[\"POST\"])\ndef account_query():\n \"\"\"查询账户信息\"\"\"\n rps = {}\n rps['status'] = True\n\n if request.form.get(\"token\"):\n token = request.form[\"token\"]\n status, account = account_engine.query_account_data(token)\n rps['status'] = status\n rps['data'] = account\n else:\n rps['status'] = False\n rps['data'] = \"请求参数错误\"\n\n return jsonify(rps)\n\n\n@blue.route('/pos', methods=[\"POST\"])\ndef position_query():\n \"\"\"查询持仓信息\"\"\"\n rps = {}\n rps['status'] = True\n\n if request.form.get(\"token\"):\n token = request.form[\"token\"]\n status, pos = account_engine.query_pos_data(token)\n rps['status'] = status\n rps['data'] = pos\n else:\n rps['status'] = False\n rps['data'] = \"请求参数错误\"\n\n return jsonify(rps)\n\n\n@blue.route('/orders', methods=[\"POST\"])\ndef orders_query():\n \"\"\"查询所有订单\"\"\"\n rps = {}\n rps['status'] = True\n\n if request.form.get(\"token\"):\n token = request.form[\"token\"]\n start_date = request.form.get(\"start_date\")\n end_date = request.form.get(\"end_date\")\n if start_date and end_date:\n flt = {\"order_date\": {\"$gte\": start_date, \"$lte\": end_date}}\n else:\n flt = {}\n try:\n data = query_orders(token, db, flt)\n except Exception as e:\n status = False\n data = \"查询订单失败\"\n else:\n status = True\n rps['status'] = status\n rps['data'] = data or []\n else:\n rps['status'] = False\n rps['data'] = \"请求参数错误\"\n\n return jsonify(rps)\n\n\n@blue.route('/orders_today', methods=[\"POST\"])\ndef orders_today_query():\n \"\"\"查询当日订单\"\"\"\n rps = {}\n rps['status'] = True\n\n if request.form.get(\"token\"):\n token = request.form[\"token\"]\n status, orders = account_engine.query_orders_today(token)\n rps['status'] = status\n rps['data'] = orders\n else:\n rps['status'] = False\n rps['data'] = \"请求参数错误\"\n\n return jsonify(rps)\n\n\n@blue.route('/send', methods=[\"POST\"])\ndef order_arrived():\n \"\"\"接收订单\"\"\"\n rps = {}\n rps['status'] = True\n\n if request.form.get(\"order\"):\n data = request.form[\"order\"]\n data = json.loads(data)\n order = new_order_generate(data)\n if order:\n result, msg = main_engine.on_orders_arrived(order)\n if result:\n # 将订单送入交易引擎\n main_engine.order_put(msg)\n rps['data'] = {\"order_id\": msg.order_id}\n else:\n rps['status'] = False\n rps['data'] = msg\n else:\n rps['status'] = False\n rps['data'] = \"订单数据错误\"\n else:\n rps['status'] = False\n rps['data'] = \"请求参数错误\"\n\n return jsonify(rps)\n\n\n@blue.route('/cancel', methods=[\"POST\"])\ndef order_cancel():\n \"\"\"取消订单\"\"\"\n rps = {}\n rps['status'] = True\n\n if request.form.get(\"token\"):\n if request.form.get(\"order_id\"):\n token = request.form[\"token\"]\n order_id = request.form[\"order_id\"]\n result, order = query_order_one(\n token, order_id, db)\n if not result:\n rps['status'] = False\n rps['data'] = \"查询订单失败\"\n else:\n order = cancel_order_generate(token, order_id, code=order[\"code\"], exchange=order[\"exchange\"])\n if main_engine.order_put(order):\n rps['data'] = \"撤单成功\"\n else:\n rps['status'] = False\n rps['data'] = \"撤单失败\"\n else:\n rps['status'] = False\n rps['data'] = \"请求参数错误\"\n else:\n rps['status'] = False\n rps['data'] = \"请求参数错误\"\n\n return jsonify(rps)\n\n\n@blue.route('/status', methods=[\"POST\"])\ndef get_status():\n \"\"\"查询订单状态\"\"\"\n rps = {}\n rps['status'] = True\n\n if request.form.get(\"token\"):\n if request.form.get(\"order_id\"):\n token = request.form[\"token\"]\n order_id = request.form[\"order_id\"]\n result, order_status = query_order_status(\n token, order_id, db)\n if result:\n rps['data'] = order_status\n else:\n rps['status'] = False\n rps['data'] = order_status\n else:\n rps['status'] = False\n rps['data'] = \"请求参数错误\"\n else:\n rps['status'] = False\n rps['data'] = \"请求参数错误\"\n\n return jsonify(rps)\n\n\n@blue.route('/liquidation', methods=[\"POST\"])\ndef liquidation():\n \"\"\"清算\"\"\"\n rps = {}\n rps['status'] = True\n\n if request.form.get(\"token\"):\n token = request.form[\"token\"]\n liq_date = request.form[\"check_date\"]\n price_dict = {}\n if request.form.get(\"price_dict\"):\n price_dict = request.form[\"price_dict\"]\n price_dict = json.loads(price_dict)\n if isinstance(price_dict, dict):\n if account_engine.liq_manual(token, liq_date, price_dict):\n rps['data'] = \"清算完成\"\n else:\n rps['status'] = False\n rps['data'] = \"清算失败\"\n else:\n rps['status'] = False\n rps['data'] = \"请求参数错误\"\n else:\n rps['status'] = False\n rps['data'] = \"请求参数错误\"\n else:\n rps['status'] = False\n rps['data'] = \"��求参数错误\"\n\n return jsonify(rps)\n\n\n@blue.route('/account_record', methods=['POST'])\ndef get_account_record():\n \"\"\"获取账户记录数据\"\"\"\n rps = {}\n rps['status'] = True\n\n if request.form.get(\"token\"):\n token = request.form[\"token\"]\n start = request.form[\"start\"]\n end = request.form[\"end\"]\n status, account_record = account_engine.query_account_record(token, start, end)\n rps['status'] = status\n rps['data'] = account_record\n else:\n rps['status'] = False\n rps['data'] = \"请求参数错误\"\n\n return jsonify(rps)\n\n\n@blue.route('/pos_record', methods=['POST'])\ndef get_pos_record():\n \"\"\"获取持仓记录数据\"\"\"\n rps = {}\n rps['status'] = True\n\n if request.form.get(\"token\"):\n token = request.form[\"token\"]\n start = request.form[\"start\"]\n end = request.form[\"end\"]\n status, pos_record = account_engine.query_pos_record(token, start, end)\n rps['status'] = status\n rps['data'] = pos_record\n else:\n rps['status'] = False\n rps['data'] = \"请求参数错误\"\n\n return jsonify(rps)\n\n\n@blue.route('/persistance', methods=['POST'])\ndef persistance():\n \"\"\"数据持久化\"\"\"\n rps = {}\n\n rps['status'] = True\n\n if request.form.get(\"token\"):\n token = request.form[\"token\"]\n result = account_engine.data_persistance(token)\n if isinstance(result, bool):\n rps['data'] = \"数据保存完毕\"\n else:\n rps['status'] = False\n rps['data'] = result\n else:\n rps['status'] = False\n rps['data'] = \"请求参数错误\"\n\n return jsonify(rps)\n\n\n@blue.route('/test', methods=['POST'])\ndef test():\n \"\"\"数据持久化\"\"\"\n rps = {}\n\n rps['status'] = True\n\n if request.form.get(\"token\"):\n token = request.form[\"token\"]\n main_engine.test()\n rps['data'] = \"\"\n else:\n rps['status'] = False\n rps['data'] = \"请求参数错误\"\n\n return jsonify(rps)\n\n\"\"\"data for web page\"\"\"\n\n\n@blue.route('/orders_page', methods=[\"POST\"])\ndef orders_for_page():\n \"\"\"页面查询所有订单\"\"\"\n rps = []\n if request.form.get(\"token\"):\n token = request.form[\"token\"]\n orders = account_engine.query_orders(token)\n if orders:\n if isinstance(orders, list):\n rps = orders\n\n new_data = {'aaData': rps}\n return jsonify(new_data)\n\n\n@blue.route('/orders_today_page', methods=[\"POST\"])\ndef orders_today_for_page():\n \"\"\"页面查询当日的订单\"\"\"\n rps = []\n if request.form.get(\"token\"):\n token = request.form[\"token\"]\n status, orders = account_engine.query_orders_today(token)\n if orders:\n if isinstance(orders, list):\n rps = orders\n\n new_data = {'aaData':rps}\n return jsonify(new_data)\n\n\n@blue.route('/orders_page_by_symbol', methods=[\"POST\"])\ndef orders_for_page_by_symbol():\n \"\"\"页面查询某只证券的订单\"\"\"\n rps = []\n if request.form.get(\"token\"):\n token = request.form[\"token\"]\n symbol = request.form[\"symbol\"]\n orders = query_orders_by_symbol(token, symbol, db)\n if orders:\n if isinstance(orders, list):\n rps = orders\n\n new_data = {'aaData':rps}\n return jsonify(new_data)\n\n\n@blue.route('/pos_record_page', methods=['POST'])\ndef get_pos_record_for_page():\n \"\"\"获取持仓记录数据\"\"\"\n rps = []\n if request.form.get(\"token\"):\n token = request.form[\"token\"]\n pos_record = account_engine.query_pos_record(token)\n if pos_record:\n if isinstance(pos_record, list):\n rps = pos_record\n\n new_data = {'aaData': rps}\n return jsonify(new_data)\n\n\n\"\"\"stock data\"\"\"\n\n@blue.route('/test_hq_page', methods=['POST'])\ndef get_test_hq_for_page():\n \"\"\"获取测试用k线数据\"\"\"\n rps = {}\n rps['status'] = True\n\n if request.form.get(\"symbol\") or request.form.get(\"start\") or request.form.get(\"end\"):\n symbol = request.form[\"symbol\"]\n start = request.form[\"start\"]\n end = request.form[\"end\"]\n data = get_stock_daily_qfq(symbol, start, end, test_db)\n if data:\n rps['data'] = data\n else:\n rps['status'] = False\n rps['data'] = \"无数据\"\n else:\n rps['status'] = False\n rps['data'] = \"请求参数错误\"\n\n return jsonify(rps)\n\n\n@blue.route('/kline_page', methods=['POST'])\ndef get_kline_for_page():\n \"\"\"获取k线数据\"\"\"\n rps = []\n if request.form.get(\"token\"):\n token = request.form[\"token\"]\n pos = account_engine.query_account_data(token)\n if pos:\n if isinstance(pos, list):\n rps = pos\n\n new_data = {'aaData': rps}\n return jsonify(new_data)\n\n\n@blue.route('/mtime_page', methods=['POST'])\ndef get_mtime_for_page():\n \"\"\"获取分时数据\"\"\"\n rps = {}\n rps['status'] = True\n\n if request.form.get(\"symbol\") or request.form.get(\"timestamp\"):\n symbol = request.form[\"symbol\"]\n timestamp = request.form[\"timestamp\"]\n data = get_stock_mtime(symbol, timestamp, tdx)\n if data:\n rps['data'] = data\n else:\n rps['status'] = False\n rps['data'] = \"无数据\"\n else:\n rps['status'] = False\n rps['data'] = \"请求参数错误\"\n\n return jsonify(rps)\n","repo_name":"cao6237699/paper_trading","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":15060,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"81"} +{"seq_id":"74144006025","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom models.backbone import resnet_dialated as resnet\nfrom models import ASPP\nfrom models.PMMs_new import PMMs\nfrom utils import contrast_loss\n\nclass OneModel(nn.Module):\n def __init__(self, args):\n\n self.inplanes = 64\n self.num_pro = 3\n super(OneModel, self).__init__()\n\n self.model_res = resnet.Res50_Deeplab(pretrained=True)\n self.layer5 = nn.Sequential(\n nn.Conv2d(in_channels=1536, out_channels=256, kernel_size=3, stride=1, padding=2, dilation=2, bias=True),\n nn.BatchNorm2d(256),\n nn.ReLU())\n\n self.layer55 = nn.Sequential(\n nn.Conv2d(in_channels=256 * 2, out_channels=256, kernel_size=3, stride=1, padding=2, dilation=2,\n bias=True),\n nn.BatchNorm2d(256),\n nn.ReLU(),\n #nn.Dropout2d(p=0.5),\n )\n\n self.layer56 = nn.Sequential(\n nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1, dilation=1,\n bias=True),\n nn.BatchNorm2d(256),\n nn.ReLU(),\n #nn.Dropout2d(p=0.5),\n )\n\n self.layer6 = ASPP.PSPnet()\n\n self.layer7 = nn.Sequential(\n nn.Conv2d(1280, 256, kernel_size=1, stride=1, padding=0, bias=True),\n nn.BatchNorm2d(256),\n nn.ReLU(),\n #nn.Dropout2d(p=0.5),\n\n )\n\n self.layer9 = nn.Conv2d(256, 2, kernel_size=1, stride=1, bias=True) # numclass = 2\n\n self.residule1 = nn.Sequential(\n nn.ReLU(),\n nn.Conv2d(256+2, 256, kernel_size=3, stride=1, padding=1, bias=True),\n nn.ReLU(),\n nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True)\n )\n\n self.residule2 = nn.Sequential(\n nn.ReLU(),\n nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),\n nn.ReLU(),\n nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True)\n )\n\n self.residule3 = nn.Sequential(\n nn.ReLU(),\n nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),\n nn.ReLU(),\n nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True)\n )\n self.PMMsU = PMMs(256, self.num_pro, stage_num=10)\n\n self.batch_size = args.batch_size\n self.mode = args.mode\n\n def forward(self, query_rgb, support_rgb, support_mask, query_mask = None):\n if self.mode =='train':\n logits = self.forward_train(query_rgb, support_rgb, support_mask, query_mask)\n else:\n logits = self.forward_test(query_rgb, support_rgb, support_mask)\n\n return logits\n\n def forward_train(self, query_rgb, support_rgb, support_mask, query_mask):\n # important: do not optimize the RESNET backbone\n # A means support set\n # B measns query set\n\n # extract A feature\n support_feature = self.extract_feature_res(support_rgb)\n\n # extract B feature\n query_feature = self.extract_feature_res(query_rgb)\n\n # generate semantic vector\n vec_pos_normal, mu_f_s, mu_b_s = self.PMMsU.generate_prototype(support_feature, support_mask)\n Prob_map_normal, P_normal = self.PMMsU.discriminative_model(query_feature, mu_f_s, mu_b_s)\n\n vec_pos_q, mu_f_q, mu_b_q = self.PMMsU.generate_prototype(query_feature, query_mask)\n # Match Prototypes\n mu_f_s, mu_f_q = contrast_loss.MCMFMatch(mu_f_s, mu_f_q)\n Prob_map_self, P_self = self.PMMsU.discriminative_model(query_feature, mu_f_q, mu_b_q)\n\n exit_feat_in, Prob_Q = self.trans2query(mu_f_s, mu_b_s, query_feature)\n\n\n # segmentation\n out, _ = self.IoM(exit_feat_in, Prob_Q)\n\n return support_feature, P_normal, P_self, out\n\n def trans2query(self, mu_f_s, mu_b_s, query_feature):\n Prob_map_normal, P_normal = self.PMMsU.discriminative_model(query_feature, mu_f_s, mu_b_s)\n b,k,w,h = P_normal.shape\n\n z = P_normal.view(b,k,-1)\n z_t = F.softmax(z, dim=1)\n\n mu = torch.cat([mu_f_s,mu_b_s],dim=1)\n mu = mu.permute(0,2,1)\n x = torch.bmm(mu,z_t)\n c = x.shape[1]\n x=x.view(b,c,w,h)\n\n # sup->query\n exit_feat_in = self.p_match(x, query_feature)\n\n return exit_feat_in, Prob_map_normal\n\n def forward_test(self, query_rgb, support_rgb, support_mask):\n # extract A feature\n support_feature = self.extract_feature_res(support_rgb)\n\n # extract B feature\n query_feature = self.extract_feature_res(query_rgb)\n \n # generate semantic vector\n vec_pos_normal, mu_f_s, mu_b_s = self.PMMsU.generate_prototype(support_feature, support_mask)\n\n # sup->query\n exit_feat_in, Prob_Q = self.trans2query(mu_f_s, mu_b_s, query_feature)\n \n # segmentation\n out, _ = self.IoM(exit_feat_in, Prob_Q)\n\n return support_feature, query_feature, Prob_Q, out\n\n def forward_5shot(self, query_rgb, support_rgb_batch, support_mask_batch):\n # extract B feature\n query_feature = self.extract_feature_res(query_rgb)\n # feature concate\n feature_size = query_feature.shape[-2:]\n\n mean = torch.zeros(5).cuda()\n for i in range(support_rgb_batch.shape[1]):\n mean[i] = torch.sum(torch.sum(support_mask_batch[:, i], dim=3), dim=2)\n avg = torch.mean(mean, dim=0) # mean\n mean = avg / mean\n\n for i in range(support_rgb_batch.shape[1]):\n support_rgb = support_rgb_batch[:, i]\n support_mask = support_mask_batch[:, i]\n # extract A feature\n support_feature = self.extract_feature_res(support_rgb)\n # generate semantic vector\n support_mask_temp = F.interpolate(support_mask, support_feature.shape[-2:], mode='bilinear',\n align_corners=True)\n if i == 0:\n feature_cat = support_feature\n mask_cat = support_mask_temp\n else:\n feature_cat = torch.cat([feature_cat,support_feature],dim=2)\n mask_cat = torch.cat([mask_cat,support_mask_temp],dim=2)\n vec_pos, mu_f_s, mu_b_s = self.PMMsU.generate_prototype(feature_cat, mask_cat)\n exit_feat_in, Prob_Q = self.trans2query(mu_f_s, mu_b_s, query_feature)\n out, _ = self.IoM(exit_feat_in, Prob_Q)\n return out, out, out, out\n\n def p_match(self, vec_pos,query_feature):\n feature_size = query_feature.shape[-2:]\n\n exit_feat_in_ = self.f_v_concate(query_feature, vec_pos, feature_size)\n exit_feat_in = self.layer55(exit_feat_in_)\n\n exit_feat_in = self.layer56(exit_feat_in)\n return exit_feat_in\n\n def extract_feature_res(self, rgb):\n out_resnet = self.model_res(rgb)\n stage2_out = out_resnet[1]\n stage3_out = out_resnet[2]\n out_23 = torch.cat([stage2_out, stage3_out], dim=1)\n feature = self.layer5(out_23)\n\n return feature\n\n def f_v_concate(self, feature, vec_pos, feature_size):\n fea_pos = vec_pos.expand(-1, -1, feature_size[0], feature_size[1]) # tile for cat\n exit_feat_in = torch.cat([feature, fea_pos], dim=1)\n\n return exit_feat_in\n\n def IoM(self, feature, history_mask):\n feature_size = feature.shape[-2:]\n\n history_mask = F.interpolate(history_mask, feature_size, mode='bilinear', align_corners=True)\n out = feature\n out_plus_history = torch.cat([out, history_mask], dim=1)\n out = out + self.residule1(out_plus_history)\n out = out + self.residule2(out)\n out = out + self.residule3(out)\n\n out = self.layer6(out)\n out = self.layer7(out)\n out = self.layer9(out)\n\n out_softmax = F.softmax(out, dim=1)\n\n return out, out_softmax\n\n def get_loss(self, logits, query_label, support_mask):\n bce_logits_func = nn.CrossEntropyLoss()\n support_feature, P_normal, P_self, outB_side = logits\n\n #contrastive loss\n Prob_map, P_label = self.trans_loss(P_normal, P_self)\n loss_bce_seg2 = bce_logits_func(Prob_map, P_label.long())\n\n # segmentation loss\n b, c, w, h = query_label.size()\n outB_side = F.upsample(outB_side, size=(w, h), mode='bilinear')\n query_label = query_label.view(b, -1)\n bb, cc, _, _ = outB_side.size()\n outB_side = outB_side.view(b, cc, w * h)\n loss_bce_seg1 = bce_logits_func(outB_side, query_label.long())\n\n # Merge\n loss = loss_bce_seg1 + 0.1*(loss_bce_seg2)\n\n return loss, loss_bce_seg1, loss_bce_seg2\n\n def trans_loss(self, P_normal, P_self):\n # construct prob map\n Prob_map_b = torch.sum(P_normal[:, self.num_pro:], dim=1).unsqueeze(dim=1) / self.num_pro # background\n Prob_map_f = P_normal[:, :self.num_pro]\n Prob_map = torch.cat([Prob_map_f, Prob_map_b], dim=1)\n\n # construct label\n _, P_label = torch.max(P_self, dim=1)\n P_label[P_label > (self.num_pro-1)] = self.num_pro\n P_label = P_label.long()\n\n return Prob_map, P_label\n\n def get_pred(self, logits, query_image):\n outB, outA_pos, outB_side1, outB_side = logits\n w, h = query_image.size()[-2:]\n outB_side = F.upsample(outB_side, size=(w, h), mode='bilinear')\n out_softmax = F.softmax(outB_side, dim=1) # .squeeze()\n values, pred = torch.max(out_softmax, dim=1)\n return out_softmax, pred\n","repo_name":"Yang-Bob/PST","sub_path":"networks/PST_net.py","file_name":"PST_net.py","file_ext":"py","file_size_in_byte":9573,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"81"} +{"seq_id":"7621302195","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu May 10 11:10:54 2018\r\n\r\n@author: Fan JIANG, Chali SHEN\r\n\"\"\"\r\n# Delta-Gamma distribution with importance sampling for indenpendant case\r\nimport numpy as np\r\nimport scipy.stats as sts\r\nimport scipy.optimize as opt\r\nimport matplotlib.pyplot as plt\r\n\r\n# functions\r\ndef d_plus(sigma,t,x,y):\r\n return 1./(sigma*np.sqrt(t))*np.log(x/y)+0.5*sigma*np.sqrt(t)\r\n\r\ndef d_minus(sigma,t,x,y):\r\n return 1./(sigma*np.sqrt(t))*np.log(x/y)-0.5*sigma*np.sqrt(t)\r\n\r\n# dV/dt \r\ndef f_a0(alpha, beta, S, K, T, t, sigma):\r\n return -np.sum((alpha + beta)*S*sts.norm.pdf(d_plus(sigma,T-t,S,K))*(0.5/sigma*(T-t)**(-1.5)\r\n *np.log(S/K)-sigma/(4*np.sqrt(T-t)))-(alpha+beta)*K*sts.norm.pdf(d_minus(sigma,T-t,S,K))*(\r\n 0.5/sigma*(T-t)**(-1.5)*np.log(S/K)+sigma/(4*np.sqrt(T-t))))\r\n# dV/dS\r\ndef f_a(alpha,beta,S,K,T,t,sigma):\r\n return -((alpha+beta)*sts.norm.cdf(d_plus(sigma,T-t,S,K)) - beta + (alpha+beta)*sts.norm.pdf(\r\n d_plus(sigma,T-t,S,K))/(sigma*np.sqrt(T-t))-(alpha+beta)/(sigma*np.sqrt(T-t))*K/S*sts.norm.pdf(\r\n d_plus(sigma,T-t,K,S)))\r\n#d2V/dS_i dS_j\r\ndef f_COV(alpha,beta,S,K,T,t,sigma):\r\n # return the matrix of covariance\r\n var = (alpha+beta)*sts.norm.pdf(d_plus(sigma,T-t,S,K))/(sigma*np.sqrt(T-t)*S)*(1.-\r\n d_plus(sigma,T-t,S,K)/(sigma*np.sqrt(T-t)))+(alpha+beta)*sts.norm.pdf(d_plus(\r\n sigma,T-t,K,S))*K/(sigma*np.sqrt(T-t)*S*S)*(1.-d_plus(sigma,T-t,K,S)/(sigma*np.sqrt(T-t)))\r\n return np.diag(var)\r\n\r\ndef f_A(alpha,beta,S,K,T,t,sigma):\r\n return -0.5*f_COV(alpha,beta,S,K,T,t,sigma)\r\n\r\n# Quand S sont independant, change of probability\r\ndef f_b(alpha,beta,S,K,T,t,sigma,h):\r\n return f_a(alpha,beta,S,K,T,t,sigma)*sigma*np.sqrt(h)*S\r\n\r\ndef f_lambd(alpha,beta,S,K,T,t,sigma,h):\r\n return np.diag(f_A(alpha,beta,S,K,T,t,sigma))*S*S*sigma*sigma*h\r\n\r\n# generating function\r\ndef f_Psi(theta,lambd,b):\r\n return 0.5*np.sum(theta*theta*b*b/(1.-2.*theta*lambd)-np.log(1.-2.*theta*lambd))\r\n\r\n\r\n#Q = a0 + a'*Del_S + Del_S'*A*Del_S\r\n\r\nsigma = 0.2\r\nh = 10./360. # 10 days, assuming 360 days per year\r\nT = 1.\r\nt = 0.\r\nI = 10\r\nalpha = np.array([10]*5+[10]*5)\r\nbeta = np.array([5]*5+[5]*5)\r\nS = np.ones(I)*1\r\nK = np.ones(I)*2\r\n\r\n\r\nnum_iter = 500\r\nnum_event = 2*10**4\r\n\r\n# for dichotomy\r\na0 = f_a0(alpha, beta, S, K, T, t, sigma)*h\r\nx = a0 # ajuster x et step tel que P(L > lo) > target et P(L < hi) < target\r\nstep = 1.\r\nlo = x - step\r\nhi = -1\r\n\r\nlevel = 0.9999 # level of Var\r\ntarget = 1. - level\r\nstop_threshold = 0.02\r\ncount = 1\r\n\r\n#dichotomy\r\nwhile True:\r\n print(\"*\"*50)\r\n print(\"interation \"+str(count)+\": \")\r\n b = f_b(alpha,beta,S,K,T,t,sigma,h)\r\n #print(\"b: \"+str(b))\r\n lambd = f_lambd(alpha,beta,S,K,T,t,sigma,h)\r\n #print(\"lambda: \"+ str(lambd))\r\n a0 = f_a0(alpha, beta, S, K, T, t, sigma)*h\r\n print(\"a0: \"+str(a0))\r\n def f_Derive_Psi(theta):\r\n return np.sum(theta*b*b*(1.-theta*lambd)/(1.-2.*theta*lambd)**2 + lambd/(1.-2.*theta*lambd)) - (x-a0)\r\n theta =opt.fsolve(f_Derive_Psi,1) # optimal theta with current x\r\n print(\"theta:\" + str(theta))\r\n print(\"x = \"+str(np.sum(theta*b*b*(1.-theta*lambd)/(1.-2.*theta*lambd)**2 + lambd/(1.-2.*theta*lambd))+a0))\r\n psi_theta = f_Psi(theta,lambd,b) \r\n # change of probability\r\n new_sigma2 = 1./(1.-2.*theta*lambd)\r\n new_mu = theta*b* new_sigma2\r\n print(\"\")\r\n print(\"generating Z\")\r\n Z = np.random.multivariate_normal(new_mu,np.diag(new_sigma2),size = (num_event, num_iter))\r\n #print(Z.shape)\r\n Z = Z.reshape((Z.shape[-1],num_event,num_iter))\r\n #print(Z.shape)\r\n b = b.reshape((I,1,1))\r\n lambd = lambd.reshape((I,1,1))\r\n # Q = L-a0\r\n Q = np.sum(b*Z+ lambd*Z*Z,axis = 0)\r\n indicator_Q = (Q >(x-a0)) * np.exp(psi_theta-theta*Q)\r\n probs = np.mean(indicator_Q,axis = 0)\r\n mean = np.mean(probs)\r\n std = np.std(probs)\r\n print(\"P(L>x): \" + str(mean))\r\n print(\"P(L <= x) = \"+ str((1- mean)*100.)+\"%\")\r\n print(\"l'intervalle de confiance de 95% avec \"+str(num_iter)+\" interations: [\"+ \r\n str(mean - 1.96*std/np.sqrt(num_iter))+\", \"+str(mean + 1.96*std/np.sqrt(num_iter))+\"]\") \r\n print(\"l'erreur relative: \"+str(100*2*1.96*std/np.sqrt(num_iter)/mean)+\"%\")\r\n\r\n #Conditional_Var = np.mean(indicator_Q * (Q+a0),axis = 0)/probs\r\n Conditional_Var = []\r\n for i in range(len(probs)):\r\n if probs[i] != 0:\r\n Conditional_Var.append(np.mean(indicator_Q[:,i] * (Q[:,i]+a0))/probs[i])\r\n Conditional_Var = np.array(Conditional_Var)\r\n \r\n \r\n mean_condi = np.mean(Conditional_Var)\r\n std_condi = np.std(Conditional_Var)\r\n print(\"\")\r\n print(\"Conditional Var E[L|L>x] avec P(L>x)=\"+str(mean)+\": \"+ str(mean_condi))\r\n print(\"l'intervalle de confiance de 95% avec \"+str(num_iter)+\" interations: [\"+ \r\n str(mean_condi - 1.96*std_condi/np.sqrt(num_iter))+\", \"+str(mean_condi + 1.96*std_condi/np.sqrt(num_iter))+\"]\") \r\n print(\"l'erreur relative: \"+str(100*2*1.96*std_condi/np.sqrt(num_iter)/mean_condi)+\"%\")\r\n # dichotomy\r\n if abs(mean -target) <= (stop_threshold * target):\r\n break\r\n \r\n if mean == 0.:\r\n step = step/2\r\n hi = x\r\n x = x - step\r\n else:\r\n if mean > target:\r\n if hi == -1.:\r\n lo = x\r\n x = x + step\r\n step = step *2\r\n else:\r\n lo = x\r\n x = (lo + hi)/2\r\n else:\r\n hi = x\r\n x = (x + lo)/2.\r\n \r\n count +=1\r\n\r\n\r\nprint(\"\")\r\nprint(\"-\"*50)\r\nprint(\"x has been optimised (minimiser la variance)\")\r\n# VaR of each iteration\r\nQ_sorted = np.sort(Q,axis = 0)\r\nVaR_iter = np.zeros(num_iter)\r\nfor i in range(num_iter):\r\n tmp_Q = Q_sorted[:,i]\r\n if tmp_Q[-1] > (x-a0):\r\n VaR_iter[i] = tmp_Q[tmp_Q > (x-a0)][0] + a0\r\n else:\r\n VaR_iter[i] = tmp_Q[-1] + a0\r\n\r\n\r\nVaR_mean = np.mean(VaR_iter[:])\r\nVaR_std = np.std(VaR_iter[:])\r\nprint(\"VaR pour \"+str(level*100)+\"%: \"+str(VaR_mean))\r\nprint(\"l'intervalle de confiance de 95% avec \"+str(num_iter)+\" interations de VaR: [\"+ \r\n str(VaR_mean - 1.96*VaR_std/np.sqrt(num_iter))+\", \"+str(VaR_mean + 1.96*VaR_std/np.sqrt(num_iter))+\"]\")\r\nprint(\"l'erreur relative: \"+str(100*2*1.96*VaR_std/np.sqrt(num_iter)/VaR_mean)+\"%\")\r\n\r\n#probability associated with VaR\r\nindicator_Q = (Q > (VaR_mean-a0)) * np.exp(psi_theta-theta*Q)\r\nprobs = np.mean(indicator_Q,axis = 0)\r\nmean = np.mean(probs)\r\nstd = np.std(probs)\r\nprint(\"\")\r\nprint(\"P(L> VaR): \" + str(mean))\r\nprint(\"P(L <= VaR) = \"+ str((1- mean)*100.)+\"%\")\r\nprint(\"l'intervalle de confiance de 95% avec \"+str(num_iter)+\" interations: [\"+ \r\n str(mean - 1.96*std/np.sqrt(num_iter))+\", \"+str(mean + 1.96*std/np.sqrt(num_iter))+\"]\") \r\nprint(\"l'erreur relative: \"+str(100*2*1.96*std/np.sqrt(num_iter)/mean)+\"%\")\r\n\r\n\r\nConditional_Var = []\r\nfor i in range(len(probs)):\r\n if probs[i] != 0:\r\n Conditional_Var.append(np.mean(indicator_Q[:,i] * (Q[:,i]+a0))/probs[i])\r\nConditional_Var = np.array(Conditional_Var)\r\n \r\n\r\nmean_condi = np.mean(Conditional_Var)\r\nstd_condi = np.std(Conditional_Var)\r\nprint(\"\")\r\nprint(\"Conditional Var E[L|L>=VaR] avec P(L>VaR)=\"+str(mean)+\": \"+ str(mean_condi))\r\nprint(\"l'intervalle de confiance de 95% avec \"+str(num_iter)+\" interations: [\"+ \r\n str(mean_condi - 1.96*std_condi/np.sqrt(num_iter))+\", \"+str(mean_condi + 1.96*std_condi/np.sqrt(num_iter))+\"]\") \r\nprint(\"l'erreur relative: \"+str(100*2*1.96*std_condi/np.sqrt(num_iter)/mean_condi)+\"%\")\r\n\r\n\r\n# Conditional Distribution \r\ndistribu = Q[Q>(VaR_mean-a0)] + a0\r\nP = []\r\nintegral = 0.\r\ninter = np.linspace(VaR_mean + 0.01*VaR_mean, min(np.amax(distribu),2*VaR_mean))\r\nfor i in range(len(inter)-1):\r\n P.append(np.mean((Q>(inter[i]-a0))*(Q<(inter[i+1]-a0))*indicator_Q)/mean/(inter[i+1]-inter[i]))\r\n integral += P[-1]*(inter[i+1]-inter[i])\r\n\r\nP.append(0)\r\nprint(\"integral: \" + str(integral))\r\n\r\nplt.figure(dpi = 150)\r\nplt.plot(inter,P)\r\nplt.title(\"Distribution conditionelle au dela de la VaR(mesure P)\")\r\nplt.xlabel(\"Loss\")\r\nplt.ylabel(\"density\")\r\n","repo_name":"FanJiang718/MAP474-Project","sub_path":"IS_independant.py","file_name":"IS_independant.py","file_ext":"py","file_size_in_byte":8065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20846687105","text":"messages = [\n {\n \"label\": \"type_name\",\n \"text\": \"Digite o nome do jogador: \"\n },\n {\n \"label\": \"type_name_blank_to_skip\",\n \"text\": \"Digite o nome de cada jogador, ou deixe em branco para pular\"\n },\n {\n \"label\": \"type_user_dream\",\n \"text\": \"Digite o sonho de \"\n },\n {\n \"label\": \"sorting_order\",\n \"text\": \"Sorteando ordem de jogada...\"\n },\n {\n \"label\": \"game_instructions\",\n \"text\": \"Preencher com instruções\"\n }\n]\n\ndef get_message(label):\n for message in messages:\n if message['label'] == label:\n return message['text']\n","repo_name":"anabuzzi/PI1_Grupo1_2020-2-master","sub_path":"user_messages.py","file_name":"user_messages.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73894014024","text":"import cv2\nfrom pupil_tracker import auto_tracker\nfrom glint_tracker import g_auto_tracker\nfrom optimization import fast_tracker\nimport threading\nimport statistics \n\nclass preprocess:\n def __init__(self, s_center, sf, CPI = None, blur = (16, 16), canny = (40, 50), image = None):\n\n '''\n If we need this function, then pass in the image.\n Otherwise always use the chosen best frame\n '''\n if image is None:\n self.sample = cv2.imread(\"input/chosen_pic.png\")\n else:\n self.sample = image\n '''\n range to find the best blurring\n 10 and 22 are wild gueseses. There might be cases that exceedes this boundary\n '''\n self.brange = range(10, 22, 1)\n '''\n Make public all the necessary parameters\n sf is the resizing factor, to ensure runtime efficiency\n '''\n self.blur = blur\n self.canny = canny\n self.factor = (sf,sf)#this factor might change based on the resize effect\n '''\n Steps to resize image based on resizing factor\n '''\n self.width = int(self.sample.shape[1])\n self.height = int(self.sample.shape[0])\n self.dim = (int(self.width/self.factor[0]),\\\n int(self.height/self.factor[1]))\n self.cropped = cv2.resize(self.sample, self.dim)\n '''\n What we need to search is the area cropped by the user divide by the the shrinking factors\n '''\n self.cropping_factor = CPI\n self.search_area = (int(self.cropping_factor[1][0]/self.factor[0]), int(self.cropping_factor[1][1]/self.factor[0]), \\\n int(self.cropping_factor[0][0]/self.factor[1]), int(self.cropping_factor[0][1]/self.factor[1]))\n '''\n Get the radius using CPI, which is the same as cropping_factor\n '''\n self.radius_h = int(min(self.cropping_factor[0][1] - self.cropping_factor[0][0],\\\n self.cropping_factor[1][1] - self.cropping_factor[1][0])/2)\n\n '''\n We define a minimum radius, which is also divided by the shirking factor to normalize it.\n again, 10 is a wild guess because I suspect there would be any pupil size smaller than 10 pixel.\n '''\n self.radius_l = 10\n self.radius = (int(self.radius_l/self.factor[0]), int(self.radius_h/self.factor[1]))\n \n '''\n Iterate through every single threshold there is.\n '''\n self.threshold_range = (0, 255)\n\n '''\n This function run the hough transform multiple times and return the ideal threshold for all runs\n '''\n def start(self):\n most_vote = 0;\n ideal_thresh = (0, 0)\n ideal_center = None\n #Iterate through both parameters in the threshold parameters\n for i in range(self.threshold_range[0], self.threshold_range[1], 5):\n for j in range(i, self.threshold_range[1]-50, 10):\n setup = fast_tracker(self.cropped, (i, j), self.blur, self.canny, self.radius) #Might be slow\n processed = setup.prepossing()[0] #Processed image using the guessed parameters\n #Run the Hough Transfom, a voting algorithm that will analysis the legidity of processed image.\n #Result is [coordinate] and [max voting]\n result = setup.hough_transform(processed, self.search_area)\n #The biggest vote corresponds to the best threshold\n if most_vote < sum(result[1]):\n most_vote = sum(result[1])\n ideal_thresh = (i, j) \n ideal_center = result[0][0]\n\n return (ideal_thresh) \n\n #This funciton uses statistics to find the best parameters for glint\n def g_count(self, ROI, CPI, parameters_glint, video):\n #Need to run the tracker for glint detection\n gt = g_auto_tracker(video, ROI, CPI, parameters_glint)\n count = 0\n #Max_frame defines how many frames the tracker need to run before having a result\n #The bigger the max_frame, the more precise it will be\n max_frame = 6000\n current = []\n minimum = float('inf');\n result = None\n #For this one, 5 and 13 represents the votes for each circle in hough transform\n for i in range(5, 13): \n vs = cv2.VideoCapture(video)\n vs.set(1, count)\n while True and count < max_frame:\n count += 1\n rframe = vs.read()[1]\n if rframe is None:\n break\n #Find circle\n circle = gt.find_circle(rframe, gt.varied_CPI, i, True)\n current.append(int(circle))\n std = statistics.stdev(current) \n if(std < minimum):\n minimum = std\n result = i\n #Reset current\n current = []\n count = 0;\n return result\n\n \"\"\"\n glint threshold from blurred \"search_area\"\n search_area likely from user drawn box\n \"\"\"\n def d_glint(self):\n if self.search_area[0] == 0 and self.search_area[1] == 0:\n raise Exception('glint search area is empty!')\n sample_glint = self.sample[self.search_area[0]:self.search_area[1], self.search_area[2]:self.search_area[3]]\n sample_glint = cv2.cvtColor(sample_glint, cv2.COLOR_BGR2GRAY)\n sample_glint = cv2.blur(sample_glint, (self.blur[0], self.blur[1]))\n # for i in range(self.glint_range[0], self.glint_range[1]): #Able to make a wild guess for threshold detection\n # for j in range(i, self.glint_range[1], 10):\n offset_thres = cv2.THRESH_BINARY+cv2.THRESH_OTSU\n thre, proc = cv2.threshold(sample_glint, 0, 255, offset_thres)\n print(f\"glint thres in {self.search_area} w/blur {self.blur}: {thre}\")\n # cv2.imwrite(\"look.png\", proc)\n # exit()\n return (thre, thre)\n\n def anal_blur(self, ROI_pupil, ROI_glint, video):\n b_collect = [] #Collection of first 200 blurs, the size would be different.\n #Loop through all probabilities\n g_blur = (0,0) #The return variable\n g_std = float(\"inf\")\n for i in self.brange:\n #First get the threshold, CPI and center should be kept same as the calling function\n self.parameters = {\"blur\":(i, i), \"canny\":self.canny, 'stare_posi':None}\n self.parameters['threshold'] = self.start() #Get the threshold range\n track = auto_tracker(video, ROI_pupil, self.parameters, ROI_glint)\n track.run_tracker(True)\n\n #Get the best blur using standard deviation\n std = statistics.stdev(track.testcircle)\n\n #Clumsy way of determing the best standard deviation\n if std == min(g_std, std):\n # #Get rid of exceptions\n # if track.testcircle.count(0)/len(track.testcircle) > 0.1:\n # continue\n g_std = std\n g_blur = (i,i)\n\n return g_blur\n\nif __name__ == '__main__':\n CPI = [[50, 280], [31, 80]]\n center = (99.0, 87.0)\n parameters_glint = {'threshold': (100, 100), 'blur': (1, 1), 'canny': (40, 50), 'H_count': 8, 'stare_posi':None}\n setup = preprocess(center, 1, CPI)\n setup.g_count(CPI, CPI, parameters_glint, \"input/run1.mov\")\n","repo_name":"Tian99/Robust-eye-gaze-tracker","sub_path":"preProcess.py","file_name":"preProcess.py","file_ext":"py","file_size_in_byte":7318,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"1281120149","text":"'''\n다음 순열 분류\n시간 제한\t메모리 제한\t제출\t정답\t맞은 사람\t정답 비율\n1 초\t256 MB\t16509\t6998\t5025\t43.215%\n문제\n1부터 N까지의 수로 이루어진 순열이 있다. 이때, 사전순으로 다음에 오는 순열을 구하는 프로그램을 작성하시오.\n\n사전 순으로 가장 앞서는 순열은 오름차순으로 이루어진 순열이고, 가장 마지막에 오는 순열은 내림차순으로 이루어진 순열이다.\n\nN = 3인 경우에 사전순으로 순열을 나열하면 다음과 같다.\n\n1, 2, 3\n1, 3, 2\n2, 1, 3\n2, 3, 1\n3, 1, 2\n3, 2, 1\n입력\n첫째 줄에 N(1 ≤ N ≤ 10,000)이 주어진다. 둘째 줄에 순열이 주어진다.\n\n출력\n첫째 줄에 입력으로 주어진 순열의 다음에 오는 순열을 출력한다. 만약, 사전순으로 마지막에 오는 순열인 경우에는 -1을 출력한다.\n\n예제 입력 1 \n4\n1 2 3 4\n예제 출력 1 \n1 2 4 3\n예제 입력 2 \n5\n5 4 3 2 1\n예제 출력 2 \n-1\n출처\n문제를 만든 사람: baekjoon\n'''\nfrom sys import stdin\nreadline = stdin.readline\n\n\ndef get_next_perm(n, arr):\n i = n-1\n\n while i > 0 and arr[i] < arr[i-1]:\n i -= 1\n\n if i < 1:\n return False\n\n j = n-1\n\n while arr[j] <= arr[i-1]:\n j -= 1\n\n arr[i-1], arr[j] = arr[j], arr[i-1]\n\n j = n-1\n\n while i < j:\n arr[i], arr[j] = arr[j], arr[i]\n i += 1\n j -= 1\n\n return True\n\n\nn = int(readline())\nnums = list(map(int, readline().split()))\n\nif get_next_perm(n, nums):\n print(' '.join(map(str, nums)))\nelse:\n print(-1)\n","repo_name":"LEE010/Algorithm","sub_path":"python3/boj/10972_다음_순열.py","file_name":"10972_다음_순열.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21111388489","text":"from tkinter import *\n\n\n\n\n\n\n# class NoteFrame(Frame):\n#\n# def __init__(self, parent):\n# Frame.__init__(self, parent)\n# self.grid(column=0, row=0)\n# self.text_widget: Text = Text(self)\n# self.text_widget.grid(column=0, row=1)\n#\n# self.print_button: Button = Button(self, text='print_info', command=self._print_info)\n# self.print_button.grid(column=0, row=0)\n#\n# self.insert_button: Button = Button(self, text='insert text', command=self._insert_text)\n# self.insert_button.grid(column=1, row=0)\n#\n# self.delete_button: Button = Button(self, text='delete button', command=self._delete_text)\n# self.delete_button.grid(column=2, row=0)\n# \"\"\"\n# Guess I create a save and discard button. Discard will repopoulate the text widget with\n# the notes. Save makes the current contents of the widget canonical.\n# \"\"\"\n#\n# def _print_info(self):\n# print(self.text_widget.get('1.0', END))\n#\n# def _insert_text(self):\n# desired_text = 'HELLO THERE KENOBI'\n# self.text_widget.insert('0.0', desired_text)\n#\n# def _delete_text(self):\n# self.text_widget.delete('1.0', END)\nclass NoteFrame(Frame):\n \"\"\"\" Interface for text widget, enabling user management of recipe notes. \"\"\"\n\n def __init__(self, parent):\n Frame.__init__(self, parent)\n self.grid(column=0, row=0)\n self.text_widget: Text = Text(self)\n self.text_widget.grid(column=0, row=1)\n self.note_string: str = ''\n\n self.save_button: Button = Button(self, text='save', command=self._save_notes)\n self.save_button.grid(column=0, row=0)\n self.discard_button: Button = Button(self, text='discard changes', command=self._discard_changes)\n self.discard_button.grid(column=1, row=0)\n\n def _save_notes(self):\n notes = self.text_widget.get('1.0', END)\n # Save attempt with Model here\n # New window confirming successful saving\n self.note_string = notes\n print(f'saved notes: {self.note_string}')\n\n def _discard_changes(self):\n \"\"\" Changes local to the text widget that will now be reverted \"\"\"\n self.text_widget.delete('1.0', END)\n self.text_widget.insert('1.0', self.note_string)\n\n\n\nroot = Tk()\n\nnf = NoteFrame(root)\nroot.mainloop()","repo_name":"RyanEliopoulos/autoshopper","sub_path":"prototypes/prototype_notebox.py","file_name":"prototype_notebox.py","file_ext":"py","file_size_in_byte":2349,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"8127231930","text":"from flask import request, jsonify\nfrom . import bs\nimport re\nfrom .. models import Business, Review, User, Tokens\n\n\n@bs.route('/api/v1/businesses//review', methods=['POST'])\ndef create_a_business_review(businessid):\n \n \"\"\"Create a business review, only a logged in user can add a review\"\"\"\n\n content1 = request.data.get('content')\n token = User.validate_token()\n\n if not token['access_token']or token['decodable_token'] or token['blacklisted_token']:\n return jsonify({'Error': 'Kindly login first to post a review'}), 401\n\n if not businessid.isdigit():\n return jsonify({\"Error\" :\"Invalid business Id, kindly use an integer for business ID\"}), 400\n business = Business.query.filter_by(businessid=businessid).first()\n\n if not business:\n return jsonify({'Error':'No business with the given id'}), 404\n\n if business.created_by== token['user_id']:\n return jsonify({'Error':'Sorry, You should not review your own business'}), 400\n\n if isinstance(content1, int) or not content1:\n return jsonify({'Error': \"Invalid input, fill in all required input and kindly use a valid string\"}), 400\n\n content = str(content1.strip(' '))\n\n if len(content) < 4 or content.isnumeric() or not content:\n response = jsonify({'Error': \"Kindly add a valid review and use at least 4 characters\"}), 400\n \n else:\n review = Review(content=content, created_by=token['user_id'], businessid=businessid)\n review.save()\n response = jsonify({\n 'Success':'Review added successfully',\n 'content': review.content,\n \"createdBy\":token['user_id'],\n \"creationDate\":review.date_created }), 201\n\n return response\n\n \n@bs.route('/api/v1/businesses//review', methods=['GET'])\ndef get_all_business_reviews(businessid):\n\n \"\"\"Retrieve all reviews for a business using business id\"\"\"\n\n results = []\n\n if not businessid.isdigit():\n return jsonify({\"Error\" :\"Invalid business Id, kindly use an integer for business ID\"}), 400\n\n if not Business.query.filter_by(businessid=businessid).first():\n return jsonify({'Error':'No business with the given id'}), 404\n reviews = Review.get_all(businessid)\n\n if reviews.count() == 0:\n response = jsonify({'message':'No reviews found'}), 404\n\n else:\n for review in reviews:\n user = User.query.filter_by(id=review.created_by).first()\n \n obj={\n \"Id\":review.id,\n \"Review\":review.content,\n \"createdBy\":user.username,\n \"creationDate\":review.date_created\n }\n\n results.append(obj)\n \n response = jsonify(results),200\n\n return response\n","repo_name":"kzyangiro/WeConnect","sub_path":"app/routes/reviews.py","file_name":"reviews.py","file_ext":"py","file_size_in_byte":2776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18037643935","text":"# author wukat\n'''\nThe arithmetic sequence, 1487, 4817, 8147, in which each of the terms increases by 3330, \nis unusual in two ways: (i) each of the three terms are prime, and, (ii) each of the 4-digit \nnumbers are permutations of one another.\n\nThere are no arithmetic sequences made up of three 1-, 2-, or 3-digit primes, exhibiting this \nproperty, but there is one other 4-digit increasing sequence.\n\nWhat 12-digit number do you form by concatenating the three terms in this sequence?\n'''\n\nfrom euler005 import is_prime\nfrom euler041 import make_num\n\ndef generate_parmutations_recursive_better(so_far, rest):\n if rest:\n for i in range(len(rest)):\n element = rest[i]\n for t in generate_parmutations_recursive_better(so_far + [element], [rest[j] for j in range(len(rest)) if i != j]):\n yield t\n else:\n yield so_far\n\ndef check_number(number, used):\n permutations = set()\n for num_list in generate_parmutations_recursive_better([], map(lambda x: ord(x) - ord(\"0\"), str(number))):\n permutations.add(make_num(num_list))\n used.update(permutations)\n \n list_of_permutations = list(permutations)\n list_of_permutations.sort()\n \n for i in range(len(list_of_permutations) - 2):\n first = list_of_permutations[i]\n if first > 1000 and is_prime(first):\n for j in range(i + 1, len(list_of_permutations) - 1):\n second = list_of_permutations[j]\n if is_prime(second):\n for k in range(j + 1, len(list_of_permutations)):\n third = list_of_permutations[k]\n if is_prime(third):\n if 2*second == first + third:\n return (first, second, third)\n \ndef solve():\n used = set()\n for i in range(1234, 3333):\n if i not in used and is_prime(i):\n temp = check_number(i, used)\n if temp and temp != (1487, 4817, 8147):\n print(temp)\n\nif __name__ == '__main__':\n solve()\n \n\n","repo_name":"wubek/ProjectEuler","sub_path":"euler/euler049.py","file_name":"euler049.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11173783278","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom networks.network_utils import Mask\nfrom torch.distributions.normal import Normal\nfrom networks.resnet.resnet import ResNet\nfrom networks.resnet.resnet_util import conv3x3\nfrom networks.sequential import Sequential\nnorm_mean, norm_var = 0.0, 1.0\n\n\nclass SparseBasicBlock_CM(nn.Module):\n expansion = 1\n __constants__ = ['downsample']\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=None, num_groups=2):\n super(SparseBasicBlock_CM, self).__init__()\n # self.num_groups = num_groups\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n\n # Both self.conv1 and self.downsample layers downsample the input when stride != 1\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = norm_layer(planes)\n self.relu1 = nn.ReLU(inplace=True)\n\n # self.mask1 = torch.nn.ModuleList()\n # for g in range(self.num_groups):\n # m = Normal(torch.tensor([norm_mean] * planes), torch.tensor([norm_var] * planes)).sample()\n # self.mask1.append(Mask(m, planes=True))\n\n # m = Normal(torch.tensor([norm_mean] * planes), torch.tensor([norm_var] * planes)).sample()\n # self.mask1 = Mask(m, planes=True)\n self.relu2 = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = norm_layer(planes)\n\n # self.mask2 = torch.nn.ModuleList()\n # for g in range(self.num_groups):\n # m = Normal(torch.tensor([norm_mean] * planes), torch.tensor([norm_var] * planes)).sample()\n # self.mask2.append(Mask(m, planes=True))\n\n # m = Normal(torch.tensor([norm_mean] * planes), torch.tensor([norm_var] * planes)).sample()\n # self.mask2 = Mask(m, planes=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu1(out)\n\n # tmp = self.mask1[0](out) * (group == 0)[:,None,None,None]\n # for i in range(1,self.num_groups):\n # tmp += self.mask1[i](out) * (group == i)[:,None,None,None]\n # out = tmp\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu2(out)\n\n # tmp = self.mask2[0](out) * (group == 0)[:,None,None,None]\n # for i in range(1,self.num_groups):\n # tmp += self.mask2[i](out) * (group == i)[:,None,None,None]\n # out = tmp\n\n # out = self.mask2(out)\n\n return out\n\n\nclass ResNet_decouple(ResNet):\n\n def __init__(self, block, layers, num_classes=1000,\n width_per_group=64, num_groups=2, no_groupmask=False, for_cifar=False, decouple_at=-1):\n super(ResNet_decouple, self).__init__(block=block, layers=layers,\n num_classes=num_classes, width_per_group=width_per_group,\n for_cifar=for_cifar)\n self.num_groups = num_groups\n\n self.no_groupmask = no_groupmask\n self.decouple_at = decouple_at\n if decouple_at == -1:\n if not no_groupmask:\n self.mask = nn.ModuleList()\n for g in range(num_groups):\n m = Normal(torch.tensor([norm_mean] * 512), torch.tensor([norm_var] * 512)).sample()\n self.mask.append(Mask(m, planes=True))\n self.fc = nn.ModuleList()\n for g in range(num_groups):\n self.fc.append(nn.Linear(512 * block.expansion, num_classes))\n\n elif decouple_at == -2:\n if not no_groupmask:\n self.mask = nn.ModuleList()\n for g in range(num_groups):\n m = Normal(torch.tensor([norm_mean] * 256), torch.tensor([norm_var] * 256)).sample()\n self.mask.append(Mask(m, planes=True))\n\n self.head = nn.ModuleList()\n for g in range(num_groups):\n head = []\n head.append(self._make_layer(block, 512, layers[3], stride=2, dilate=False, inplanes=256 * block.expansion))\n head.append(nn.AdaptiveAvgPool2d((1, 1)))\n head.append(nn.Flatten())\n head.append(nn.Linear(512 * block.expansion, num_classes))\n self.head.append(nn.Sequential(*head))\n\n\n def _forward_impl(self, x, group=None, get_inter=False, true_group=None, get_logit=False):\n # See note [TorchScript super()]\n\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n b1 = self.layer1(x)\n b2 = self.layer2(b1)\n b3 = self.layer3(b2)\n\n if self.decouple_at == -1:\n b4 = self.layer4(b3)\n\n if not self.no_groupmask:\n tmp = 0\n for i in range(self.num_groups):\n if true_group is None:\n tmp += self.mask[i](b4) * (group == i)[:, None, None, None]\n else:\n tmp += self.mask[i](b4) * (true_group == i)[:, None, None, None]\n b4 = tmp\n\n h = self.avgpool(b4)\n h = torch.flatten(h, 1)\n\n temp_b4 = 0\n out = 0\n for i in range(self.num_groups):\n tmp_out = self.fc[i](h)\n tmp_logit = tmp_out\n\n if len(group.shape) > 1:\n # when prob is given\n tmp_out = F.softmax(tmp_out, dim=1)\n\n group_for_inter_feature = group if true_group is None else true_group\n temp_b4 += tmp_logit * (group_for_inter_feature == i)[:, None]\n out += tmp_out * (group == i)[:, None] if not len(group.shape) > 1 else tmp_out * group[:, i].unsqueeze(-1)\n\n b4 = temp_b4 if get_logit else b4\n if get_inter or get_logit:\n return b1, b2, b3, b4, out\n else:\n return out\n\n elif self.decouple_at == -2:\n if not self.no_groupmask:\n tmp = 0\n for i in range(self.num_groups):\n if true_group is None:\n tmp += self.mask[i](b3) * (group == i)[:, None, None, None]\n else:\n tmp += self.mask[i](b3) * (true_group == i)[:, None, None, None]\n b3 = tmp\n\n temp_b3 = 0\n out = 0\n\n for i in range(self.num_groups):\n tmp_out = self.head[i](b3)\n tmp_logit = tmp_out\n\n if len(group.shape) > 1:\n # when prob is given\n tmp_out = F.softmax(tmp_out, dim=1)\n\n group_for_inter_feature = group if true_group is None else true_group\n temp_b3 += tmp_logit * (group_for_inter_feature == i)[:, None]\n out += tmp_out * (group == i)[:, None] if not len(group.shape) > 1 else tmp_out * group[:, i].unsqueeze(\n -1)\n\n b3 = temp_b3 if get_logit else b3\n if get_inter or get_logit:\n return b1, b2, b3, b3, out\n else:\n return out\n\n\n def forward(self, x, group=None, get_inter=False, true_group=None, get_logit=False):\n return self._forward_impl(x, group, get_inter, true_group, get_logit)\n\n\ndef resnet18_decouple(**kwargs):\n return ResNet_decouple(SparseBasicBlock_CM, [2, 2, 2, 2], **kwargs)\n","repo_name":"beotborry/fair_prune_fork","sub_path":"networks/resnet/resnet_decouple.py","file_name":"resnet_decouple.py","file_ext":"py","file_size_in_byte":7590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26981417806","text":"import requests\nimport random\nimport string\n\n\nclass Game:\n \n def __init__(self):\n self.api = 'https://api.dicionario-aberto.net/random'\n\n def get_data(self):\n response = requests.get(self.api)\n if response.status_code == 200:\n self.word = response.json()\n return self.word['word']\n\n else:\n print(f\"Olá, ocorreu o erro {response.status_code} com a sua requisição.\")\n\n def hangman(self, word):\n word_letters = set(word) #letters in the word\n alphabet = set(string.ascii_lowercase + 'çãíõéàýêîâôẽ')\n used_letters = set() #what the user has guessed\n\n lives = 6\n\n #getting user input\n while len(word_letters) > 0 and lives > 0:\n print(f'\\nVocê tem {lives} vidas restantes e usou essas letras:', ' '.join(used_letters).upper())\n\n word_list = [letter if letter in used_letters else '-' for letter in word]\n print('\\nPalavra atual:', ' '.join(word_list).upper())\n\n user_letter = input('\\nTente adivinhar uma letra: ').lower()\n if user_letter in alphabet - used_letters:\n used_letters.add(user_letter)\n if user_letter in word_letters:\n word_letters.remove(user_letter)\n\n else:\n lives -= 1\n print('\\nEssa letra não está na palavra')\n\n elif user_letter in used_letters:\n print('\\nVocê já usou essa letra.')\n \n else:\n print('\\nCaractere inválido.')\n\n if lives == 0:\n print(f'\\nVocê morreu a palavra era \"{word.upper()}\"')\n play_again = input('\\nQuer jogar novamente? s/n: ').lower() \n if play_again !='s':\n quit('\\nOk, tchau!')\n else:\n Game().call_class()\n\n else:\n print(f'\\nVocê acertou a palavra \"{word.upper()}\"')\n play_again = input('\\nQuer jogar novamente? s/n: ').lower()\n if play_again !='s':\n quit('\\nOk, tchau!')\n else:\n Game().call_class()\n\n def call_class(self):\n game = Game()\n data = game.get_data()\n hangman = game.hangman(data)\n\ndashs = '-' * 40\n\n\n\n\n\n\n\n\n","repo_name":"slocksert/hangman_portuguese","sub_path":"hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":2305,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19357973861","text":"# 21.01.29 [길찾기(다이아몬드)]\nimport sys\nn = int(sys.stdin.readline())\ndia = []\nfor i in range(2*n-1):\n\ttemp = list(map(int, sys.stdin.readline().split()))\n\tdia.append(temp)\n\nfor i in range(1, n):\n\tfor j in range(len(dia[i])):\n\t\tif j == 0:\n\t\t\tdia[i][0] += dia[i-1][0]\n\t\telif j == len(dia[i])-1 :\n\t\t\tdia[i][j] += dia[i-1][j-1]\n\t\telse:\n\t\t\tdia[i][j] += max(dia[i-1][j-1], dia[i-1][j])\n\nfor i in range(n, 2*n -1):\n\tfor j in range(len(dia[i])):\n\t\tdia[i][j] += max(dia[i-1][j], dia[i-1][j+1])\n\nprint(dia[2*n-2][0])\n","repo_name":"sladuf/Algorithm","sub_path":"Goorm/GRM43145.py","file_name":"GRM43145.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22408657040","text":"class Player:\n def __init__(self, data):\n self.name = data['name']\n self.age = data['age']\n self.position = data['position']\n self.team = data['team']\n\n # info display method\n def display_info(self):\n print(f\"name: {self.name} \\n age: {self.age} \\n position: {self.position} \\n team: {self.team}\")\n\n #class method\n @classmethod\n def get_team(cls, team_list):\n new_team = []\n for player in team_list:\n new_team.append(Player(player))\n return new_team\n\n\n# Challenge 2\n# Data for player creation\nkevin = {\n \"name\": \"Kevin Durant\", \n \"age\": 34, \n \"position\": \"small forward\", \n \"team\": \"Brooklyn Nets\"\n}\njason = {\n \"name\": \"Jason Tatum\", \n\t\"age\": 24, \n\t\"position\": \"small forward\", \n \"team\": \"Boston Celtics\"\n}\nkyrie = {\n \"name\": \"Kyrie Irving\", \n \"age\": 32,\n \"position\": \"Point Guard\", \n \"team\": \"Brooklyn Nets\"\n}\n\n# Create your Player instances here!\nplayer_kevin = Player(kevin)\nplayer_jason = Player(jason)\nplayer_kyrie = Player(kyrie)\n\n\n# Challenge 3\n# Pass in all the values from the dictionary by their keys\n\nplayers = [\n {\n \"name\": \"Kevin Durant\", \n \"age\": 34, \n \"position\": \"small forward\", \n \"team\": \"Brooklyn Nets\"\n },\n {\n \"name\": \"Jason Tatum\", \n \"age\": 24, \n \"position\": \"small forward\", \n \"team\": \"Boston Celtics\"\n },\n {\n \"name\": \"Kyrie Irving\", \n \"age\": 32,\n \"position\": \"Point Guard\", \n \"team\": \"Brooklyn Nets\"\n },\n {\n \"name\": \"Damian Lillard\", \n \"age\": 33,\n \"position\": \"Point Guard\", \n \"team\": \"Portland Trailblazers\"\n },\n {\n \"name\": \"Joel Embiid\", \n \"age\": 32,\n \"position\": \"Power Foward\", \n \"team\": \"Philidelphia 76ers\"\n },\n {\n \"name\": \"DeMar DeRozan\",\n \"age\": 32,\n \"position\": \"Shooting Guard\",\n \"team\": \"Chicago Bulls\"\n }\n]\n\nchallenge3_team = []\n\nfor player in players:\n challenge3_team.append(Player(player))\n\nfor player in challenge3_team:\n player.display_info()\n\n\n# Ninja Bonus\n\nbonus_team = Player.get_team(players)\n\nfor player in bonus_team:\n player.display_info()\n","repo_name":"BrentCleary/Coding-Dojo_Winter_2023","sub_path":"Python/py_w1d3/basketball_dictionaries/basketball_dictionaries.py","file_name":"basketball_dictionaries.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12089445997","text":"import random\nimport io\nimport os\nfrom PIL import ImageDraw\nfrom PIL import ImageFont\nfrom PIL import Image\nfrom django.http import HttpResponse\n\n\n# 获取随机颜色\ndef get_random_color():\n R = random.randrange(255)\n G = random.randrange(255)\n B = random.randrange(255)\n return (R, G, B)\n\n\ndef get_verify_img(request):\n # 定义画布背景颜色\n bg_color = get_random_color()\n # 画布大小\n img_size = (150, 30)\n # 定义画布\n image = Image.new(\"RGB\", img_size, bg_color)\n # 定义画笔\n draw = ImageDraw.Draw(image, \"RGB\")\n # 实例化字体,设置大小是30\n font_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'static/fonts/KumoFont.ttf')\n font = ImageFont.truetype(font_path, 30)\n # 准备画布上的字符集\n source = \"qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM0123456789\"\n # 保存每次随机出来的字符\n code_str = \"\"\n for i in range(4):\n # 获取数字随机颜色\n text_color = get_random_color()\n # 获取随机数字 len\n tmp_num = random.randrange(len(source))\n # 获取随机字符 画布上的字符集\n random_str = source[tmp_num]\n # 将每次随机的字符保存(遍历) 随机四次\n code_str += random_str\n # 将字符画到画布上\n draw.text((10 + 30 * i, 0), random_str, text_color, font)\n # 记录给哪个请求发了什么验证码\n request.session['code'] = code_str\n\n # 使用画笔将文字画到画布上\n # draw.text((10, 20), \"X\", text_color, font)\n # draw.text((40, 20), \"Q\", text_color, font)\n # draw.text((60, 20), \"W\", text_color, font)\n\n # 获得一个缓存区\n buf = io.BytesIO()\n # 将图片保存到缓存区\n image.save(buf, 'png')\n # image.save(open('test.png', 'wb'), 'png')\n # 将缓存区的内容返回给前端 .getvalue 是把缓存区的所有数据读取\n return HttpResponse(buf.getvalue(), 'image/png')","repo_name":"lwaxx/small_example","sub_path":"Python/verify_code.py","file_name":"verify_code.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"8830521670","text":"def busqueda_secuencial(lista, x):\n '''Si x está en la lista devuelve el índice de su primera aparición, \n de lo contrario devuelve -1.\n '''\n pos = -1\n for i,z in enumerate(lista):\n if z == x:\n pos = i\n break\n return pos\n\ndef main():\n pos = busqueda_secuencial([1, 4, 5, 6], 6)\n print(pos)\nmain()","repo_name":"GonzaloMonteodorisio/ejercicios-python-unsam","sub_path":"Clase06/busqueda_secuencial_pos.py","file_name":"busqueda_secuencial_pos.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12719675336","text":"\nfrom django.urls import path , include\n\nfrom .views import *\n\napp_name = 'users'\n\nurlpatterns = [\n # path('', include(\"django.contrib.auth.urls\")),\n path('login/', LoginUser.as_view(), name='login'),\n path('register/', RegisterUser.as_view(), name='register'),\n # path('', order_create, name='order_create' ),\n path('logout_user/', logout_user, name='logout_user'),\n\n]\n\n\n","repo_name":"VasaKuzner/my_shop2","sub_path":"users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14947127061","text":"import streamlit as st\n\nfrom PIL import Image\n\nimport networkx as nx\n\nfrom Visualizer import getGraph, plotGraph\n\n\ndef solve(tasksData, edgesData, metaData):\n method = metaData['method']\n balancedGraph = None\n \n if method == 'RPW':\n graph = getGraph(tasksData, edgesData)\n takeTime = metaData['taktTime']\n \n balancedGraph = calculateRPW(graph, takeTime)\n outputPath = '../Output/balanced.png'\n plotGraph(balancedGraph, outputPath)\n \n balancedGraphImage = Image.open(outputPath)\n st.markdown(\"
\", unsafe_allow_html=True)\n st.image(balancedGraphImage, caption=\"Balanced line\")\n \n elif method == 'SPT':\n pass\n \n elif method == '':\n pass\n \n else:\n pass\n \n \n return balancedGraph\n\n\ndef calculateRPW(G_digraph, limit):\n rpw_weights = {\n i: sum(\n G_digraph.nodes[j].get('weight')\n for j in list(nx.dfs_tree(G_digraph, source=i))\n )\n for i in G_digraph.nodes\n }\n sorted_rpw_weights = dict(\n sorted(rpw_weights.items(), key=lambda item: item[1], reverse=True)\n )\n sorted_rpw_weights_keys = list(sorted_rpw_weights.keys()) \n\n count=0\n totalweight=0\n group={}\n group_key=1\n tmpgrp = []\n nodeweight = [];\n\n for count in range(len(sorted_rpw_weights_keys)):\n totalweight += G_digraph.nodes[sorted_rpw_weights_keys[count]].get('weight')\n tmpgrp += [sorted_rpw_weights_keys[count]]\n \n if (count+1 > len(sorted_rpw_weights_keys)-1):\n group[group_key] = tmpgrp\n nodeweight.append(totalweight)\n break\n \n if (totalweight + G_digraph.nodes[sorted_rpw_weights_keys[count+1]].get('weight')) > limit:\n group[group_key] = tmpgrp\n tmpgrp = []\n group_key += 1\n nodeweight.append(totalweight)\n totalweight = 0\n\n G_balanced_line = nx.DiGraph()\n str1 = \", \"\n G_balanced_line.add_nodes_from({ k: (str1.join(group[k])) for k in range(1, len(group)+1) })\n G_balanced_line.add_edges_from({ k: (k,k+1) for k in range(1,len(group)+1) if (k+1 < len(group)+1) }.values())\n nx.set_node_attributes(\n G_balanced_line,\n {k: {'label': f\"{str(k)} {str(group[k])}\"} for k in group},\n )\n\n nx.set_node_attributes(G_balanced_line, { k+1: {'weight':nodeweight[k]} for k in range(len(nodeweight)) })\n nx.set_node_attributes(G_balanced_line, {k: {'group':group[k]} for k in group})\n \n return G_balanced_line","repo_name":"ImadSaddik/AssemblyLineBalancing","sub_path":"Scripts/Solver.py","file_name":"Solver.py","file_ext":"py","file_size_in_byte":2561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38104809910","text":"import random\n\nx1=int(input('Tente adivinhar um número que escolh entre 1 e 10: '))\ny1=random.randint(1,10)\n\nif x1==y1:\n\tprint('Incrível! Você acertou!')\nelse:\n\tprint(f'Errado. O número era {y1}.')\n\n","repo_name":"ToledoLBC/Aulas","sub_path":"Python/Exercícios Python/aulas/1.1.10 - desafio 28.py","file_name":"1.1.10 - desafio 28.py","file_ext":"py","file_size_in_byte":203,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3268392445","text":"# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nfrom collections import deque\n\nclass Codec:\n\n def serialize(self, root):\n \n data = []\n def bfs(root):\n q = deque([root])\n while q:\n level = []\n for _ in range(len(q)):\n node = q.popleft()\n level.append(node.val if node else None)\n if node is not None:\n q.append(node.left)\n q.append(node.right)\n data.append(level)\n \n bfs(root)\n return '|'.join([','.join(list(map(str,level))) for level in data])\n\n def deserialize(self, data):\n data = data.split('|')\n data = list(map(lambda level: level.split(','), data))\n\n if data[0][0] == 'None':\n return None\n \n root = TreeNode(data[0][0])\n last_nodes = [root]\n for i in range(1, len(data)):\n nodes = []\n for j in range(len(data[i])):\n val = data[i][j]\n if val == 'None':\n nodes.append(None)\n else:\n nodes.append(TreeNode(int(val)))\n\n q = deque(nodes[:])\n for j in range(len(last_nodes)):\n if last_nodes[j]:\n last_nodes[j].left = q.popleft() if q else None\n last_nodes[j].right = q.popleft() if q else None\n last_nodes = nodes\n return root","repo_name":"Dillettant/leetcode","sub_path":"solutions/297-bfs.py","file_name":"297-bfs.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40221778040","text":"from azure.storage.blob import BlobClient\nimport os\nfrom azure.storage.blob import ContainerClient\n\ndef main(container_name):\n \"\"\"\n Main\n Args:\n :param container_name\n Container Name of connection\n \"\"\"\n connection_string = read_config_file(\".connection\")\n\n container = ContainerClient.from_connection_string(\n conn_str=connection_string, container_name=container_name)\n\n blob_list = container.list_blobs()\n\n for blob in blob_list:\n create_folders(blob.name)\n create_file_blob(connection_string, blob, container_name)\n\ndef create_file_blob(connection_string, blob, container_name):\n \"\"\"\n Create File of blob\n Args:\n :param connection_string\n Value of connection string\n :param blob\n Object Blob\n :param container_name\n Name of container\n\n \"\"\"\n file = BlobClient.from_connection_string(\n conn_str=connection_string, container_name=container_name, blob_name=blob.name)\n with open(\"./\" + blob.name, \"wb\") as my_blob:\n blob_data = file.download_blob()\n blob_data.readinto(my_blob)\n\ndef create_folders(name):\n \"\"\"\n Create the folder's structures necessary\n Args:\n :param name\n Blob name with folders structures\n \"\"\"\n\n split = name.split(\"/\")\n\n if(len(split) > 1):\n folder_name = \"\"\n value_range = len(split) - 1\n for i in range(0, value_range):\n folder_name += split[i] + \"/\"\n\n if(os.path.exists(\"./\" + folder_name) is False):\n os.makedirs(\"./\" + folder_name)\n\n\ndef read_config_file(name_file):\n \"\"\"\n Read the config file\n Args:\n :param name_file\n Name of config file\n Return:\n Value of connection string\n \"\"\"\n\n connection_string = \"\"\n\n if(os.path.exists(\"./\" + name_file) is True):\n with open(name_file, 'r') as reader:\n connection_string += reader.read()\n\n return connection_string\n\n\nif __name__ == \"__main__\":\n main(\"teste\")\n","repo_name":"filipemot/azure_blob_download","sub_path":"client_azure_blob.py","file_name":"client_azure_blob.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70493784264","text":"\"\"\"\nThis file contains the model specification for two different tensorflow models.\nThe first model works with signal data (directly from wav files).\nThe second model works with mel spectrogram data (generated with the Librosa library).\nThe model definitions can be adapted if needed and the training parameter can be changed as well.\n\nThe data is taken from the /data folder.\nBoth models are trained at the same run and with the same data since data preparation takes a lot of time.\n\nThe two models are the stored in the /model folder and can be further processed by the other scripts.\n\"\"\"\nimport librosa\nimport os\nimport numpy as np\nfrom keras.layers import Dropout\n\nfrom tqdm import tqdm\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelBinarizer\n\nimport tensorflow as tf\nfrom tensorflow.keras import layers, models\n\nfrom Constants import SAMPLE_RATE, AUDIO_PIECE_LENGTH, IS_MONO, N_MELS, cut_audio\n\nRANDOM_STATE = 42\n\n# -------------------- Preparation of the data set --------------------\npath_to_training_data = \"data/data_long\" # Either the small or large data set can be used for the training\n\nall_labels = []\nall_audios_as_signal = []\nall_audios_as_mel_spec = []\n\nfor label in os.listdir(path_to_training_data):\n # Loop through all three folders of training data\n for audio_track in tqdm(os.listdir(os.path.join(path_to_training_data, label))):\n # Loop through all audio files\n audio_signal, sample_rate_audio = librosa.load(os.path.join(os.path.join(path_to_training_data, label, audio_track)), sr=SAMPLE_RATE, mono=IS_MONO)\n audio_pieces = cut_audio(audio_signal) # Cut all audios to the same length\n for audio_piece in audio_pieces:\n mel_spectrogram = librosa.feature.melspectrogram(y=audio_piece, sr=SAMPLE_RATE, n_mels=N_MELS) # Calculate the melspectrogram\n all_audios_as_mel_spec.append(mel_spectrogram)\n\n audio_piece = audio_piece.reshape((AUDIO_PIECE_LENGTH, 1)) # Reshape the array such that the model can use it\n all_audios_as_signal.append(audio_piece)\n\n all_labels.append(label)\n\nencoder = LabelBinarizer()\nlabels = encoder.fit_transform(np.array(all_labels)) # One-hot encoding of all labels\n\n# ----------------------------------------------------------------------\n# ---------------------- MODEL WITH SIGNAL INPUT -----------------------\n# ----------------------------------------------------------------------\n\nnum_epochs_signal = 100 # The number of epochs that is used for training (taken from the paper)\nbatch_size_signal = 100 # The batch size that is used for training (taken from the paper)\n\n# Train test split of the data\nx_train_signal, x_test_signal, y_train_signal, y_test_signal = train_test_split(all_audios_as_signal, labels, test_size=0.33, random_state=RANDOM_STATE)\nx_val_signal, x_test_signal, y_val_signal, y_test_signal = train_test_split(x_test_signal, y_test_signal, test_size=0.5, random_state=RANDOM_STATE)\n\n# Wrap the arrays, otherwise they cannot be used for training\nx_train_signal = np.array(x_train_signal)\nx_test_signal = np.array(x_test_signal)\nx_val_signal = np.array(x_val_signal)\n\n# The model definition\n# Code adapted from https://github.com/Logan97117/environmental_sound_classification_1DCNN\nmodel_signal = models.Sequential()\nmodel_signal.add(layers.Conv1D(filters=16, kernel_size=64, strides=2, input_shape=(AUDIO_PIECE_LENGTH, 1)))\nmodel_signal.add(layers.Activation(activation='relu'))\nmodel_signal.add(layers.BatchNormalization())\nmodel_signal.add(layers.MaxPooling1D(pool_size=8, strides=8))\nmodel_signal.add(layers.Activation(activation='relu'))\nmodel_signal.add(layers.Conv1D(filters=32, kernel_size=32, strides=2))\nmodel_signal.add(layers.Activation(activation=\"relu\"))\nmodel_signal.add(layers.BatchNormalization())\nmodel_signal.add(layers.MaxPooling1D(pool_size=8, strides=8))\nmodel_signal.add(layers.Activation(activation=\"relu\"))\nmodel_signal.add(layers.Conv1D(filters=64, kernel_size=16, strides=2))\nmodel_signal.add(layers.Activation(activation=\"relu\"))\nmodel_signal.add(layers.BatchNormalization())\nmodel_signal.add(layers.Conv1D(filters=128, kernel_size=8, strides=2))\nmodel_signal.add(layers.Activation(activation=\"relu\"))\nmodel_signal.add(layers.BatchNormalization())\nmodel_signal.add(layers.Conv1D(filters=256, kernel_size=4, strides=2))\nmodel_signal.add(layers.Activation(activation=\"relu\"))\nmodel_signal.add(layers.BatchNormalization())\nmodel_signal.add(layers.MaxPooling1D(pool_size=4, strides=4))\nmodel_signal.add(layers.Activation(activation=\"relu\"))\nmodel_signal.add(layers.Flatten())\nmodel_signal.add(layers.Dense(64, activation='relu'))\nmodel_signal.add(layers.Dense(3, activation=\"softmax\"))\n\nmodel_signal.summary()\n\n# Additional parameters, also taken from the paper\nmodel_signal.compile(loss=tf.keras.losses.MeanSquaredLogarithmicError(), optimizer=tf.keras.optimizers.Adadelta(), metrics='accuracy')\n\nhistory_signal = model_signal.fit(x_train_signal, y_train_signal, epochs=num_epochs_signal, batch_size=batch_size_signal, validation_data=(x_val_signal, y_val_signal))\n\ntest_loss_signal, test_acc_signal = model_signal.evaluate(x_test_signal, y_test_signal, verbose=2)\n\nprint(\"Test Loss: \")\nprint(test_loss_signal)\nprint(\"Test Accuracy: \")\nprint(test_acc_signal)\n\n# Saving two versions of the model\ntf.saved_model.save(model_signal, \"model/signal\")\ntf.keras.models.save_model(model_signal, \"model/signal/keras\")\n\n# -------------------------------------------------------------------------\n# -------------------- MODEL WITH MEL SPECTROGRAM INPUT -------------------\n# -------------------------------------------------------------------------\n\nnum_epochs_melspec = 50 # The number of epochs that is used for training (taken from the paper)\nbatch_size_melspec = 64 # The batch size that is used for training (taken from the paper)\n\n# Train test split of the data\nx_train_melspec, x_test_melspec, y_train_melspec, y_test_melspec = train_test_split(all_audios_as_mel_spec, labels, test_size=0.33, random_state=RANDOM_STATE)\nx_val_melspec, x_test_melspec, y_val_melspec, y_test_melspec = train_test_split(x_test_melspec, y_test_melspec, test_size=0.5, random_state=RANDOM_STATE)\n\n# Wrap the arrays, otherwise they cannot be used for training\nx_train_melspec = np.array(x_train_melspec)\nx_test_melspec = np.array(x_test_melspec)\nx_val_melspec = np.array(x_val_melspec)\n\n# The model definition (taken from the paper)\nmodel_melspec = models.Sequential()\nmodel_melspec.add(layers.Conv2D(24, (6, 6), activation='relu', input_shape=(128, 63, 1)))\nmodel_melspec.add(layers.MaxPooling2D((4, 2), strides=2))\nmodel_melspec.add(layers.Conv2D(48, (5, 5), activation='relu'))\nmodel_melspec.add(layers.MaxPooling2D((4, 2), strides=2))\nmodel_melspec.add(layers.Conv2D(48, (5, 5), activation='relu'))\nmodel_melspec.add(layers.Conv2D(60, (4, 4), activation='relu'))\nmodel_melspec.add(layers.Conv2D(72, (4, 4), activation='relu'))\nmodel_melspec.add(layers.Flatten())\nmodel_melspec.add(layers.Dense(84, activation='relu'))\nmodel_melspec.add(Dropout(0.5))\nmodel_melspec.add(layers.Dense(3, activation=\"softmax\"))\n\nmodel_melspec.summary()\n\n# Additional parameters, also taken from the paper\nmodel_melspec.compile(loss=tf.keras.losses.CategoricalCrossentropy(), optimizer=tf.keras.optimizers.Adam(), metrics='accuracy')\n\nhistory_melspec = model_melspec.fit(x_train_melspec, y_train_melspec, epochs=num_epochs_melspec, batch_size=batch_size_melspec, validation_data=(x_val_melspec, y_val_melspec))\n\ntest_loss_melspec, test_acc_melspec = model_melspec.evaluate(x_test_melspec, y_test_melspec, verbose=2)\n\nprint(\"Test Loss: \")\nprint(test_loss_melspec)\nprint(\"Test Accuracy: \")\nprint(test_acc_melspec)\n\n# Saving two versions of the model\ntf.saved_model.save(model_melspec, \"model/melspectrogram\")\ntf.keras.models.save_model(model_melspec, \"model/melspectrogram/keras\")\n","repo_name":"jenschmid/AudioClassifierAndroidApp","sub_path":"PythonModel/TrainModel.py","file_name":"TrainModel.py","file_ext":"py","file_size_in_byte":7869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41717399205","text":"from django.db.models.fields import SmallIntegerField\nfrom django.shortcuts import render\nfrom django.views.generic import ListView, DetailView\nfrom .models import SMM\n# Create your views here.\nclass Indexview(ListView):\n model = SMM\n context_object_name = 'smm'\n template_name='index_web.html'\n \nclass SMMListView(ListView):\n model = SMM\n template_name = 'smm.html'\n paginate_by = 1\n context_object_name = \"smm_list\"\n \n\n\nclass SMMDetailView(DetailView):\n model = SMM\n template_name = 'smm_detail.html'\n context_object_name = 'smm_det'","repo_name":"Saidazimxonn/dukon","sub_path":"bigshop/smm/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8969868962","text":"DEBUG = True\n\nimport re\n\nfold_regex = re.compile(r\"^fold along (?P[xy])=(?P\\d+)$\")\ncoord_regex = re.compile(r\"^(?P\\d+),(?P\\d+)$\")\n\n\ndef read_src():\n src_file = \"example.txt\" if DEBUG else \"src.txt\"\n with open(src_file, \"r\") as fh:\n content = fh.readlines()\n\n load_coords = True\n coords = set()\n folds = []\n for line in content:\n line = line.strip()\n if line == \"\":\n load_coords = False\n continue\n\n if load_coords:\n coord_match = coord_regex.match(line)\n coords.add((int(coord_match.group(\"x\")), int(coord_match.group(\"y\"))))\n else:\n fold_match = fold_regex.match(line)\n folds.append(\n (fold_match.group(\"axis\") == \"y\", int(fold_match.group(\"val\")))\n )\n return coords, folds\n\n\ndef apply_fold(pt, fold):\n horiz, val = fold\n x, y = pt\n if horiz and y > val:\n y = 2 * val - y\n elif not horiz and x > val:\n x = 2 * val - x\n return (x, y)\n\n\ndef print_grid(coords):\n width = 0\n height = 0\n for x, y in coords:\n width = max(width, x)\n height = max(height, y)\n\n for y in range(height + 1):\n for x in range(width + 1):\n char = \".\"\n if (x, y) in coords:\n char = \"#\"\n print(char, end=\"\")\n print()\n\n\ndef task_1():\n coords, folds = read_src()\n\n coords = {apply_fold(pt, folds[0]) for pt in coords}\n\n print(f\"task 1: {len(coords)}\")\n\n\ndef task_2():\n coords, folds = read_src()\n\n for fold in folds:\n coords = {apply_fold(pt, fold) for pt in coords}\n print(f\"task 2: \")\n print_grid(coords)\n\n\nif __name__ == \"__main__\":\n DEBUG = False\n task_1()\n task_2()\n","repo_name":"Robtom5/AoC","sub_path":"AOC_2021/Day13/soln.py","file_name":"soln.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34660843083","text":"import tkinter\n\nraiz = tkinter.Tk()\nraiz.title(\"Mi programa\")\n\n#Creamos el componente Text que es un texto largo de varias lineas\n\nentrada = tkinter.Text(raiz)\nentrada.config(width=\"20\",height=10, font=(\"Verdana\",15),padx=10, pady=10, fg=\"green\", selectbackground=\"lightgrey\")\nentrada.pack()\n\nraiz.mainloop()\n","repo_name":"Twynzen/Python_udemy","sub_path":"tkinter/entryTexto.py","file_name":"entryTexto.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74795097225","text":"class Solution:\n def lastStoneWeight(self, stones: List[int]) -> int:\n stones.sort()\n \n while len(stones) > 1:\n y = stones.pop()\n x = stones.pop()\n \n if x != y:\n stones.insert(bisect.bisect_left(stones, y-x), y-x)\n\n if not stones:\n return 0\n else:\n return stones[0]\n","repo_name":"novayo/LeetCode","sub_path":"1046_Last_Stone_Weight/try_1.py","file_name":"try_1.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"35614445563","text":"from pyspark.ml.classification import NaiveBayes\nfrom pyspark.ml.evaluation import MulticlassClassificationEvaluator\nimport os\nfrom pyspark.ml.feature import VectorAssembler\nimport numpy as np\n# Load training data\nfrom pyspark.python.pyspark.shell import spark\nfrom pyspark.sql import SparkSession\n\n\nos.environ[\"SPARK_HOME\"] = \"/Users/lalithajetty/Downloads/spark-2.3.1-bin-hadoop2.7/\"\nos.environ[\"HADOOP_HOME\"] = \"/usr/local/Cellar/hadoop/3.1.0\"\n#os.environ[\"PYSPARK_PYTHON\"] = \"/usr/local/Cellar/python/3.6.5_1/bin/python3.6\"\n#os.environ[\"PYSPARK_DRIVER_PYTHON\"] = \"/usr/local/Cellar/python/3.6.5_1/bin/python3.6\"\n\ndata = spark.read.format(\"csv\").option(\"header\", \"true\").load(\"/Users/lalithajetty/PycharmProjects/Spark_ICP6/Immunotherapy.csv\")\n\nspark = SparkSession.builder.getOrCreate()\n\ndata = spark.read.load(\"immunotherapy.csv\", format=\"csv\", header=True, delimiter=\",\")\ndata = data.withColumn(\"AGE_FACTOR\", data['age'] - 0).withColumn(\"Area\", data['Area'] - 0).withColumn(\"I_D\", data[\"induration_diameter\"] - 0).withColumn(\"label\", data['sex'] - 0)\ndata.show(100)\nassem = VectorAssembler(inputCols=[\"AGE_FACTOR\", \"Area\", \"I_D\"], outputCol='features')\ndata = assem.transform(data)\n\n\n# Split the data into train and test\nsplits = data.randomSplit([0.8, 0.2], 1234)\ntrain = splits[0]\ntest = splits[1]\n\n# create the trainer and set its parameters\nnb = NaiveBayes(smoothing=1.0, modelType=\"multinomial\")\n\n# train the model\nmodel = nb.fit(train)\n\n# select example rows to display.\npredictions = model.transform(test)\npredictions.show(100)\n\n# compute accuracy on the test set\nevaluator = MulticlassClassificationEvaluator(labelCol=\"label\", predictionCol=\"prediction\",\n metricName=\"accuracy\")\naccuracy = evaluator.evaluate(predictions)\nprint(\"Test set accuracy = \" + str(accuracy))","repo_name":"vvinay75/BIGDATA-PROGRAMMING-HADDOP-SPARK","sub_path":"M2_ICP6/NaiveBayes.py","file_name":"NaiveBayes.py","file_ext":"py","file_size_in_byte":1826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24031574476","text":"# Complete the function that accepts a string parameter, and reverses each word in the string. All spaces in the string should be retained.\n\n# Examples\n# \"This is an example!\" ==> \"sihT si na !elpmaxe\"\n# \"double spaces\" ==> \"elbuod secaps\"\n\ndef reverse_words(text):\n text = [word for word in text.split(' ')]\n for i, word in enumerate(text):\n word = [letter for letter in word]\n word.reverse()\n text[i] = \"\".join(word)\n return \" \".join(text)","repo_name":"phil-huynh/Problem-Sets","sub_path":"python/CodeWars/7kyu/reverse_words.py","file_name":"reverse_words.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12265501117","text":"import csv\nfrom typing import List\n\nfrom Commit import Commit\n\nimport dateutil.parser as parser\n\n\ndef is_day_row(row: List[str]) -> bool:\n return row[0] != None and len(row[0]) > 0\n\n\ndef process_dados() -> List[str]:\n commits: List[List[str]] = []\n user_map: dict = {}\n user_index: int = 0\n project_map: dict = {}\n project_index: int = 0\n with open('dados.csv', newline='') as dados:\n reader = csv.reader(dados, delimiter=',', quotechar='\\\"')\n next(reader, None)\n i = 1\n date_string = ''\n while True:\n try:\n row = next(reader)\n except StopIteration:\n break\n except Exception as e:\n print(\"Linha \" + str(i) + \" >> READ_ERROR >> \", e)\n i += 1\n continue\n if is_day_row(row):\n try:\n date_string = parser.parse(row[0])\n except Exception as e:\n print(\"Linha \" + str(i) + \" >> DATE_PARSE_ERROR >> \", e)\n continue\n raise e\n if not is_day_row(row):\n new_commit: Commit\n try:\n new_commit = Commit(row, 0, date_string)\n except Exception as e:\n print(\"Linha \" + str(i) + \" >> CONVERT_ERROR >> \", e)\n raise e\n if new_commit.usuario not in user_map.keys():\n user_map[new_commit.usuario] = user_index\n user_index += 1\n if new_commit.projeto not in project_map.keys():\n project_map[new_commit.projeto] = project_index\n project_index += 1\n new_commit.usuario = user_map[new_commit.usuario]\n new_commit.projeto = project_map[new_commit.projeto]\n commits.append(new_commit.to_csv())\n i += 1\n return commits\n\n\ncommits = process_dados()\nwith open('processed_dados.csv', 'w', newline='', encoding='utf-8') as processed_dados:\n writer = csv.writer(processed_dados)\n writer.writerow(Commit.get_csv_headers())\n writer.writerows(commits)\n","repo_name":"arthurhauer/DataMiningTask1A","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74668054343","text":"N = int(input())\r\nlst = list(map(int,input().split()))\r\n\r\nans = [-1] * N\r\nstack = []\r\nfor i in range(N):\r\n #스택 비어있지 않고 리스트의 스택 맨위 인덱스 값이 현재 값보다 작으면\r\n while stack and (lst[stack[-1]] < lst[i]):\r\n idx = stack.pop()\r\n ans[idx] = lst[i]\r\n stack.append(i)\r\n\r\nprint(*ans)","repo_name":"Jeongseunghun/python_algorithm","sub_path":"백준/Gold/17298. 오큰수/오큰수.py","file_name":"오큰수.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74680234186","text":"import os\nos.environ['KERAS_BACKEND']='tensorflow'\n# os.environ['THEANO_FLAGS'] = \"device=gpu\" \nimport numpy as np\nimport math\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import LSTM, Dense, Activation, Embedding, Masking, Dropout, Conv1D, MaxPooling1D, Reshape\nfrom keras.models import load_model\n\nimport time\nimport datetime\nimport sys\n# from keras.utils.visualize_plots import figures \nfrom matplotlib.font_manager import FontProperties\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates \nimport matplotlib as mpl\n\n# coding:utf-8\n\n\npredict_step = 1\nteam_num = 2 \ncnn_output_dim = 64 \nkernel_size = 13\npool_size = 2 \nhidden_size = 256 \nepochs = 2000 \nbatch_size = 300 \nmodel_saved_path = \"../model/keras/\" \n# model_name = '50000_samples_20191029_CNN'\n# model_name = '100000_samples_20191030_CNN+LSTM'\nmodel_name = '20200120_LSTM'\nhero_id_max = 130 \n\n\n\nmpl.rcParams['font.sans-serif'] = ['SimHei'] \nmpl.rcParams['axes.unicode_minus'] = False \n\nwith open('/Users/502944285qq.com/PycharmProjects/SMAA/venv/bin/result.csv', 'r', encoding='utf-8') as fo_1:\n line_matches = fo_1.readlines()\n sample_in = []\n sample_out = []\n for i in range(len(line_matches))[1:]:\n split = line_matches[i].split(', ')\n radiant = split[2]\n dire = split[3]\n # print(split[4][:-1])\n if split[4][:-1] == 'True':\n win = 1.0\n else:\n win = 0.0\n radiant = list(map(int, radiant.split(',')))\n dire = list(map(int, dire.split(',')))\n radiant_vector = np.zeros(hero_id_max)\n dire_vector = np.zeros(hero_id_max)\n for item in radiant:\n radiant_vector[item - 1] = 1\n for item in dire:\n dire_vector[item - 1] = 1\n sample_in.append([radiant_vector, dire_vector])\n sample_out.append(win)\n\n\n# print(sample_in)\n# print(sample_out)\n\n\ndef make_samples():\n train_x = []\n train_y = []\n test_x = []\n test_y = []\n validate_x = []\n validate_y = []\n for i in range(len(sample_in)):\n if i % 10 == 3:\n test_x.append(sample_in[i])\n test_y.append(sample_out[i])\n elif i % 10 == 4:\n validate_x.append(sample_in[i])\n validate_y.append(sample_out[i])\n else:\n train_x.append(sample_in[i])\n train_y.append(sample_out[i])\n return train_x, train_y, test_x, test_y, validate_x, validate_y\n\n\ndef training():\n train_x, train_y, test_x, test_y, validate_x, validate_y = make_samples()\n tx = np.array(train_x).reshape(len(train_x), team_num, hero_id_max)\n ty = np.array(train_y).reshape(len(train_y), 1)\n test_x = np.array(test_x).reshape(len(test_x), team_num, hero_id_max)\n test_y = np.array(test_y).reshape(len(test_y), 1)\n validate_x = np.array(validate_x).reshape(len(validate_x), team_num, hero_id_max)\n validate_y = np.array(validate_y).reshape(len(validate_y), 1)\n # TODO\n print('=========== tx.shape:', tx.shape, ' ===============')\n print('=========== ty.shape:', ty.shape, ' ===============')\n print('=========== test_x.shape:', test_x.shape, ' ===============')\n print('=========== test_y.shape:', test_y.shape, ' ===============')\n print('=========== validate_x.shape:', validate_x.shape, ' ===============')\n print('=========== validate_y.shape:', validate_y.shape, ' ===============')\n\n \n # TODO LSTM \n model = Sequential()\n model.add(LSTM(hidden_size, input_shape=(team_num, hero_id_max),\n return_sequences=False)) \n model.add(Dropout(0.2))\n model.add(Dense(10))\n model.add(Dropout(0.2))\n model.add(Dense(1)) \n model.add(Activation('sigmoid'))\n model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])\n\n # TODO CNN \n # model = Sequential()\n # model.add(Conv1D(cnn_output_dim,kernel_size,padding='same',activation='relu',input_shape=(team_num,hero_id_max))) \n # model.add(MaxPooling1D(pool_size=pool_size,data_format='channels_first')) \n # model.add(Reshape((int(team_num*cnn_output_dim/pool_size),), input_shape=(team_num,int(cnn_output_dim/pool_size))))\n # model.add(Dropout(0.2))\n # model.add(Dense((10),input_shape=(team_num,cnn_output_dim/pool_size)))\n # model.add(Dropout(0.2))\n # model.add(Dense(1)) \n # model.add(Activation('sigmoid'))\n # model.compile(loss='mse',optimizer='adam',metrics=['accuracy'])\n\n callbacks = [keras.callbacks.EarlyStopping(monitor='val_loss', patience=2, verbose=0, mode='min'),\n keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=1, verbose=0, mode='min',\n epsilon=0.0001, cooldown=0, min_lr=0)]\n hist = model.fit(tx, ty, batch_size=batch_size, epochs=epochs, shuffle=True,\n validation_split=0.1, callbacks=callbacks)\n model.save(model_saved_path + model_name + '.h5')\n\n\ndef testing(tag_line):\n train_x, train_y, test_x, test_y, validate_x, validate_y = make_samples()\n tx = np.array(train_x).reshape(len(train_x), team_num, hero_id_max)\n ty = np.array(train_y).reshape(len(train_y), 1)\n test_x = np.array(test_x).reshape(len(test_x), team_num, hero_id_max)\n test_y = np.array(test_y).reshape(len(test_y), 1)\n validate_x = np.array(validate_x).reshape(len(validate_x), team_num, hero_id_max)\n validate_y = np.array(validate_y).reshape(len(validate_y), 1)\n\n # tx = tx[40000:]\n # ty = ty[40000:]\n # test_x = test_x[5000:]\n # test_y = test_y[5000:]\n # validate_x = validate_x[5000:]\n # validate_y = validate_y[5000:]\n\n # TODO \n print('=========== tx.shape:', tx.shape, ' ===============')\n print('=========== ty.shape:', ty.shape, ' ===============')\n print('=========== test_x.shape:', test_x.shape, ' ===============')\n print('=========== test_y.shape:', test_y.shape, ' ===============')\n print('=========== validate_x.shape:', validate_x.shape, ' ===============')\n print('=========== validate_y.shape:', validate_y.shape, ' ===============')\n keras.backend.clear_session() \n model = load_model(model_saved_path + model_name + '.h5')\n out0 = model.predict(test_x)\n correct_num = 0\n for i in range(len(out0)):\n if out0[i][0] < 0.5:\n temp_result = 0.0\n else:\n temp_result = 1.0\n if temp_result == test_y[i][0]:\n correct_num += 1\n print('test accuracy:', float(correct_num) / len(test_x))\n\n out1 = model.predict(tx)\n correct_num = 0\n for i in range(len(out1)):\n if out1[i][0] < 0.5:\n temp_result = 0.0\n else:\n temp_result = 1.0\n if temp_result == ty[i][0]:\n correct_num += 1\n print('training accuracy:', float(correct_num) / len(tx))\n\n out2 = model.predict(validate_x)\n correct_num = 0\n for i in range(len(out2)):\n if out2[i][0] < 0.5:\n temp_result = 0.0\n else:\n temp_result = 1.0\n if temp_result == validate_y[i][0]:\n correct_num += 1\n print('validation accuracy:', float(correct_num) / len(validate_x))\n\n correct_num = 0\n compare_num = 0\n for i in range(len(out0)):\n if out0[i][0] < (1.0 - tag_line) or out0[i][0] > tag_line:\n compare_num += 1\n if out0[i][0] < 0.5:\n temp_result = 0.0\n else:\n temp_result = 1.0\n if temp_result == test_y[i][0]:\n correct_num += 1\n print('test,accuracy over' + str(tag_line) + 'is:', float(correct_num) / compare_num, \\\n ' (' + str(correct_num) + '/' + str(compare_num) + ')')\n\n for i in range(5):\n tag_line = 0.75 + 0.05 * i\n correct_num = 0\n compare_num = 0\n for i in range(len(out0)):\n if out0[i][0] < (1.0 - tag_line) or out0[i][0] > tag_line:\n compare_num += 1\n if out0[i][0] < 0.5:\n temp_result = 0.0\n else:\n temp_result = 1.0\n if temp_result == test_y[i][0]:\n correct_num += 1\n if compare_num != 0:\n print('test,accuracy over' + str(tag_line) + 'is:', float(correct_num) / compare_num, \\\n ' (' + str(correct_num) + '/' + str(compare_num) + ')')\n else:\n print('test,accuracy over' + str(tag_line) + 'is:', '0.0', \\\n ' (' + str(correct_num) + '/' + str(compare_num) + ')')\n\n\nif __name__ == \"__main__\":\n training()\n tag_line = 0.6\n testing(tag_line)\n","repo_name":"Anniecay/SMAA_Project","sub_path":"train model.py","file_name":"train model.py","file_ext":"py","file_size_in_byte":8585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34450904726","text":"user_input = int(input(\"Please select an option between 1 and 5\\n\"))\n\nif user_input == 1:\n print(\"function that adds two numbers together and produces the sum to a user\")\n def add_two_numbers():\n a=int(input(\"Enter first number:\\n\"))\n b=int(input(\"Enter second number:\\n\"))\n total = a + b\n print(\"sum of the two numbers is\", total)\n add_two_numbers()\nelif user_input == 2:\n print(\"function takes two or more numbers, sums them together and tells the user the sum of the numbers\")\n def add_numbers(numbers):\n total = 0\n for number in numbers:\n total = total+ int(number)\n return total\n total=add_numbers(input(\"enter numbers you wish to have added together\\n\").replace(',', ''))\n print(total)\nelif user_input == 3:\n print(\"function takes a word and reverses the word\")\n def reverse_string(word):\n print(word[::-1])\n reverse_string(input(\"enter word you wish to have reversed\"))\nelif user_input == 4:\n print(\"function takes a group of numbers and returns a sorted list of the numbers\")\n def sort_list(numbers):\n _list=[]\n for number in numbers:\n _list.append(int(number))\n _list.sort()\n print(_list)\n sort_list(input(\"enter group of numbers you wish to have sorted\").split(','))\nelif user_input ==5:\n print(\"function calculates average of a group of numbers\")\n def average_of_numbers(numbers):\n total = 0\n for number in numbers:\n total = total+ int(number)\n return total/len(numbers)\n print(\"average is\", average_of_numbers(input(\"enter numbers you wish to find the average of\\n\").replace(',', '')))\n\n \n\n","repo_name":"everybees/parsel_tongue","sub_path":"femi/functions/multipurpose_application.py","file_name":"multipurpose_application.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"19605515724","text":"from typing import List\nfrom collections import deque\nfrom itertools import product\n\n# for tuple addressing purposes\n# better than magic numbers\nrow = 0\ncol = 1\n\n\nclass Grid:\n def __init__(self, size: int, winning_score: int, blank_char=\" \"):\n self.size = size\n self._winning_score = winning_score\n self._blank_char = blank_char\n self._grid = []\n self._victor = None\n self._moves_history = deque()\n for i in range(size):\n self._grid.append([])\n for j in range(size):\n self._grid[i].append(blank_char)\n\n def print_grid(self):\n columns = [f\" {i} \" for i in range(1, self.size + 1)]\n print(\" \", \"\".join(columns))\n for i, line in enumerate(self._grid):\n print(\" +\" + \"---+\" * self.size)\n print(str(i + 1), \"|\", \" | \".join(line), \"|\")\n print(\" +\" + \"---+\" * self.size)\n\n def is_tie(self) -> bool:\n return self.empty_squares() == 0\n\n def is_valid_move(self, move_list: List[int]) -> bool:\n if len(move_list) < 2:\n return False\n return self._grid[move_list[row]][move_list[col]] == \" \"\n\n def _line_score(self, player_mark: str, move: List[int]) -> int:\n score = 0\n for i in range(self.size):\n if self._grid[move[row]][i] == player_mark:\n score += 1\n return score\n\n def _column_score(self, player_mark: str, move: List[int]) -> int:\n score = 0\n for i in range(self.size):\n if self._grid[i][move[col]] == player_mark:\n score += 1\n return score\n\n def _diag_score(self, player_mark: str, move: List[int]) -> int:\n score_diag = score_antidiag = 0\n if move[row] == move[col] or move[row] + move[col] + 1 == self.size:\n for i in range(self.size):\n if self._grid[i][i] == player_mark:\n score_diag += 1\n if self._grid[i][self.size - 1 - i] == player_mark:\n score_antidiag += 1\n return max(score_diag, score_antidiag)\n\n def is_victorious(self, player_mark: str, move: List[int]):\n if self._line_score(player_mark, move) == self._winning_score or \\\n self._column_score(player_mark, move) == self._winning_score or \\\n self._diag_score(player_mark, move) == self._winning_score:\n self._victor = player_mark\n\n def undo(self):\n prev_move = self._moves_history.pop()\n self._grid[prev_move[row]][prev_move[col]] = self._blank_char\n self._victor = None\n\n def victor(self) -> str:\n return self._victor\n\n def execute_move(self, move: List[int], player_mark: str):\n self._grid[move[row]][move[col]] = player_mark\n self._moves_history.append(move)\n self.is_victorious(player_mark, move)\n\n def empty_squares(self) -> int:\n return self.size * self.size - len(self._moves_history)\n\n def possible_moves(self) -> List[List[int]]:\n result = []\n for row in range(self.size):\n for col in range(self.size):\n if self._grid[row][col] == self._blank_char:\n result.append([row, col])\n return result\n\n def possible_moves_optimized(self) -> List[List[int]]:\n result = []\n directions = [-1, 0, 1]\n for i in range(0, len(self._moves_history), 2):\n for direction in product(directions, repeat=2):\n move = [self._moves_history[i][row] + direction[row], self._moves_history[i][col] + direction[col]]\n if 0 <= move[row] < self.size and 0 <= move[col] < self.size and self.is_valid_move(move):\n result.append(move)\n return result\n","repo_name":"VratislavHais/PersonalDevelopment","sub_path":"python/tic-tac-toe/grid.py","file_name":"grid.py","file_ext":"py","file_size_in_byte":3743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15301550612","text":"#! /usr/bin/env python\nimport sys, copy\nimport planner\nfrom planner import Task, Planner_IW1, Planner_BFWS\n \n# Instance 00\nMAX_VARS=3\nMAX_VAL=10\n\n# Goal Conditions\ndef subgoal0(state):\n if (state[0][0]==9):\n return True\n else:\n return False\n\ndef subgoal1(state):\n if (state[1][0]==2):\n return True\n else:\n return False \n\ndef subgoal2(state):\n if (state[2][0]==7):\n return True\n else:\n return False\n \n# Actions \ndef inc_V0(state):\n state[0][0]=(state[0][0]+1)%MAX_VAL\n return state\n\ndef dec_V0(state):\n state[0][0]=(state[0][0]-1)%MAX_VAL\n return state\n\ndef inc_V1(state):\n state[1][0]=(state[1][0]+1)%MAX_VAL\n return state\n\ndef dec_V1(state):\n state[1][0]=(state[1][0]-1)%MAX_VAL\n return state\n\ndef inc_V2(state):\n state[2][0]=(state[2][0]+1)%MAX_VAL\n return state\n\ndef dec_V2(state):\n state[2][0]=(state[2][0]-1)%MAX_VAL\n return state\n\n\n# Creating the task\nt = Task.Task()\n\nfor i in range(0,MAX_VARS):\n t.load_state_variable(5,range(MAX_VAL))\n\nt.load_subgoal_function(subgoal0,[0])\nt.load_subgoal_function(subgoal1,[1])\nt.load_subgoal_function(subgoal2,[2])\n\nt.load_succesor_function(inc_V0,[0])\nt.load_succesor_function(dec_V0,[0])\nt.load_succesor_function(inc_V1,[1])\nt.load_succesor_function(dec_V1,[1])\nt.load_succesor_function(inc_V2,[2])\nt.load_succesor_function(dec_V2,[2])\n\n# Running the IW1 planner on the task\n#p = Planner_IW1.Planner_IW1(t)\n#solution_node = p.solve_IW1()\n\n\n# Running the BFS planner on the task\np = Planner_BFWS.Planner_BFWS(t) \nsys.exit(0)\n","repo_name":"sjimenezgithub/tracking","sub_path":"src/upv-planner/RP-instance.py","file_name":"RP-instance.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"71023606664","text":"# -*- encoding: utf-8 -*-\n\"\"\"\n@file: findAnagrams.py\n@time: 2021/1/7 下午4:18\n@author: shenpinggang\n@contact: 1285456152@qq.com\n@desc: 438. 找到字符串中所有字母异位词\n给定一个字符串 s 和一个非空字符串 p,找到 s 中所有是 p 的字母异位词的子串,返回这些子串的起始索引。\n字符串只包含小写英文字母,并且字符串 s 和 p 的长度都不超过 20100。\n\n说明:\n字母异位词指字母相同,但排列不同的字符串。\n不考虑答案输出的顺序。\n\n示例 1:\n输入:\ns: \"cbaebabacd\" p: \"abc\"\n输出:\n[0, 6]\n解释:\n起始索引等于 0 的子串是 \"cba\", 它是 \"abc\" 的字母异位词。\n起始索引等于 6 的子串是 \"bac\", 它是 \"abc\" 的字母异位词。\n\n示例 2:\n输入:\ns: \"abab\" p: \"ab\"\n输出:\n[0, 1, 2]\n\n解释:\n起始索引等于 0 的子串是 \"ab\", 它是 \"ab\" 的字母异位词。\n起始索引等于 1 的子串是 \"ba\", 它是 \"ab\" 的字母异位词。\n起始索引等于 2 的子串是 \"ab\", 它是 \"ab\" 的字母异位词。\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/find-all-anagrams-in-a-string\n\"\"\"\nfrom collections import Counter\n\n\ndef find_anagrams(s, p):\n \"\"\"\n 保持窗口滑动。时间复杂度 O(N), N = len(s)\n :param s: (str)\n :param p: (str)\n :return: (list[int])\n \"\"\"\n res = []\n p_counter = Counter(p)\n s_counter = Counter(s[:len(p) - 1])\n for i in range(len(p) - 1, len(s)):\n # 滑动获取一个字符\n s_counter[s[i]] += 1\n if s_counter == p_counter:\n res.append(i - len(p) + 1)\n # 删除一个字符\n s_counter[s[i - len(p) + 1]] -= 1\n if s_counter[s[i - len(p) + 1]] == 0:\n del s_counter[s[i - len(p) + 1]]\n return res\n\n\ndef test(s, p, answer):\n outputs = find_anagrams(s, p)\n print(\"Inputs:s={},p={}, Outputs:{}, Except:{}\".format(s, p, outputs, answer))\n\n\ndef main():\n test(\"cbaebabacd\", \"abc\", [0, 6])\n test(\"abab\", \"ab\", [0, 1, 2])\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"boyshen/leetcode_Algorithm_problem","sub_path":"438.找到字符串中所有字母异位词/findAnagrams.py","file_name":"findAnagrams.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17638291745","text":"import optparse\n\nfrom github import Github\n\n\nparser = optparse.OptionParser(conflict_handler=\"resolve\")\nparser.add_option('--string', action='store', type='string',\n dest='string',\n help='Art string for Github Contributions table')\n\nparser.add_option('--dictionary', action='store', type='string',\n dest='dictionary', default='alphanumeric',\n help='Letter dictionary ')\n\nparser.add_option('--path', action='store', type='string',\n dest='path', help='Path to github new project')\n\nparser.add_option('--project', action='store', type='string',\n dest='project', help='Github Proejct Name')\n\nparser.add_option('--username', action='store', type='string',\n dest='username', help='Github Username')\n\noptions, arguments = parser.parse_args()\n\nif not options.string:\n parser.error('String not given')\n\nif not options.path:\n parser.error('Build git path not given')\n\nif not options.dictionary:\n parser.error('Github git URL not given')\n\ngit = Github(options.string, options.path, options.dictionary)\ngit.initialite()\n\nif options.username and options.project:\n git.set_account(options.username, options.project)\n\ngit.run()\n","repo_name":"ufocoder/py-GithubArt","sub_path":"github_art/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"81"} +{"seq_id":"3298052382","text":"from pyspark.ml.classification import MultilayerPerceptronClassifier\nfrom pyspark.ml.tuning import TrainValidationSplit\nfrom pyspark.ml import Pipeline\n\nfrom wave_ml.ml.data.SparkDataObject import SparkDataObject\nfrom wave_ml.ml.model.FDSModel import FDSModel\nfrom wave_ml.ml.common.CommonUtil import CommonUtil as util\nfrom wave_ml.ml.common.CommonProperties import CommonProperties as prop\n\n\nclass FDSMultilayerPerceptronClassifier(FDSModel):\n\n model: MultilayerPerceptronClassifier\n train_validation_split: TrainValidationSplit\n\n def __init__(self, data: SparkDataObject):\n super(FDSMultilayerPerceptronClassifier, self).__init__(data)\n self.model = MultilayerPerceptronClassifier()\n\n def create(self):\n self.model_title = self.model.__class__.__name__\n return self\n\n def set_model(self, block_size: int, solver: str, label: str):\n layers_option: list\n size = self.data.get_columns_size()\n if size > 20:\n layers_option = [int(size-1), int(size/2), int(size/4), int(size/8), 2]\n else:\n layers_option = [int(size-1), 4, 2]\n\n self.model.setLayers(layers_option)\\\n .setBlockSize(block_size)\\\n .setSeed(10)\\\n .setSolver(solver)\\\n .setFeaturesCol(\"features\")\\\n .setLabelCol(label)\n\n pipeline_stage = self.pipeline_array\n pipeline_stage.append(self.model)\n\n self.pipeline = Pipeline(stages=pipeline_stage)\n return self\n","repo_name":"KHM13/wave_ml_nurier","sub_path":"wave_ml/ml/model/FDSMultilayerPerceptronClassifier.py","file_name":"FDSMultilayerPerceptronClassifier.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12940440945","text":"\n\nfrom socket import *\nimport struct\n\ns=socket(AF_INET,SOCK_DGRAM)\ns.bind((\"0.0.0.0\",7777))\nwhile True:\n msg,addr=s.recvfrom(1024)\n\n data=struct.unpack(\"i20sif\",msg)\n info=\"%d %-10s %d %.1f\"%(data[0],data[1].decode(),data[2],data[3])\n f=open(\"1.txt\",\"a\")\n f.write(info)\n f.write(\"\\n\")\n f.flush()\n s.sendto(\"已接受:\".encode(),addr)","repo_name":"zlz2013/zlz","sub_path":"network_base/week02/day02/struct_recv.py","file_name":"struct_recv.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"5300310933","text":"# -*- coding: utf-8 -*-\nfrom __future__ import (print_function, unicode_literals,\n absolute_import, division)\n\nimport os\nimport time\nimport subprocess as sub\nimport cv2\nimport numpy as np\nimport sys\nimport abc\nimport threading\nfrom image_matching import compare\n\nif sys.platform.startswith('win'):\n import win32file\n import win32pipe\n\n# 256x240, 11 byte header, then 4 byte per pixel (first byte is always 0)\n# gd file size: 245771\n# 256x240x4 = 245760\n\n\nclass PipeController(object):\n\n '''\n Abstract Base class for the Linux and windows pipe interfaces\n '''\n __metaclass__ = abc.ABCMeta\n\n @abc.abstractmethod\n def openRead(self):\n '''\n Function: openRead\n Summary: Opens the reading pipe\n Examples:\n Attributes:\n @param (self):\n Returns:\n '''\n raise NotImplementedError(\n \"Please use the specific subclasses for Linux or Win32\")\n\n @abc.abstractmethod\n def openWrite(self):\n '''\n Function: openWrite\n Summary: Opens the writing pipe\n Examples:\n Attributes:\n @param (self):\n Returns:\n '''\n raise NotImplementedError(\n \"Please use the specific subclasses for Linux or Win32\")\n\n @abc.abstractmethod\n def write(self, data):\n raise NotImplementedError(\n \"Please use the specific subclasses for Linux or Win32\")\n\n @abc.abstractmethod\n def read(self, buf):\n raise NotImplementedError(\n \"Please use the specific subclasses for Linux or Win32\")\n\n @abc.abstractmethod\n def startFceux(self):\n '''\n Function: startFceux\n Summary: Starts Fceux, since the windows and linux api have different parameters , they need to be specified here\n Examples:\n Attributes:\n @param (self):\n Returns: Nothing\n '''\n raise NotImplementedError(\n \"Please use the specific subclasses for Linux or Win32\")\n\n @abc.abstractmethod\n def readScreenshot(self, argb=False):\n raise NotImplementedError(\n \"Please use the specific subclasses for Linux or Win32\")\n\n\nclass Pipe(object):\n\n '''\n Abtract base class for the pipes connected to the .lua script\n '''\n __metaclass__ = abc.ABCMeta\n\n def __init__(self, pipepath, mode):\n if os.path.exists(pipepath):\n os.remove(pipepath)\n self._path = pipepath\n self._mode = mode\n\n @abc.abstractmethod\n def open(self):\n raise NotImplementedError(\n \"Please use the specific subclasses for Linux or Win32\")\n\n @abc.abstractmethod\n def read(self, buffer):\n raise NotImplementedError(\n \"Please use the specific subclasses for Linux or Win32\")\n\n @abc.abstractmethod\n def write(self, data):\n raise NotImplementedError(\n \"Please use the specific subclasses for Linux or Win32\")\n\n @abc.abstractmethod\n def close(self):\n raise NotImplementedError(\n \"Please use the specific subclasses for Linux or Win32\")\n\n\nclass LinuxEmulator(PipeController): # <-- This SHOULD work with Mac OSX\n\n filenameData = '/tmp/mariofifo-data'\n filenameData2 = '/tmp/mariofifo-data2'\n filenameCommand = '/tmp/mariofifo-command'\n\n class IOPipe(Pipe):\n\n def __init__(self, path, mode):\n Pipe.__init__(self, path, mode)\n os.mkfifo(path)\n\n def open(self):\n self._openpipe = open(self._path, self._mode)\n\n def write(self, data):\n self._openpipe.write(data)\n self._openpipe.flush()\n\n def read(self, buf):\n return self._openpipe.read(buf)\n\n def close(self):\n self._openpipe.close()\n\n def __del__(self):\n self._openpipe.close()\n\n def __init__(self,rom):\n # Might want to use Pipe.__init__(self, pipepath, mode) for Python 2.7\n self._dataPipe = self.IOPipe(self.filenameData, 'rb')\n self._dataPipe2 = self.IOPipe(self.filenameData2, 'rb')\n self._commandPipe = self.IOPipe(self.filenameCommand, 'wb')\n self._FCEUX = which('fceux')\n if not self._FCEUX:\n raise OSError(\n \"Fceux not found in the /usr/bin directory, are you sure you installed it?\")\n self._rom = rom\n\n def openRead(self):\n self._dataPipe.open()\n self._dataPipe2.open()\n\n def openWrite(self):\n self._commandPipe.open()\n\n def write(self, data):\n self._commandPipe.write(data)\n\n def read(self, buf=245771):\n return self._dataPipe.read(buf)\n\n def read2(self, buf=15):\n return self._dataPipe2.read(buf)\n\n def startFceux(self):\n cmd = [self._FCEUX]\n cmd += ['--loadlua']\n cmd += ['emulator-interface.lua']\n cmd += [self._rom]\n with open('/dev/null', 'w') as output:\n self.emulatorinstance = sub.Popen(cmd, stdout=output)\n self.openRead()\n self.openWrite()\n\n def readScreenshot(self, argb=False):\n # data = self._fifoData.read(245771) # 256x240x4 + 11 bytes header\n data = self.read(245771)\n # remove the header\n data = data[11:]\n # Read into np array\n rawdata = np.frombuffer(data, dtype=np.uint8)\n # Reshape it from 1 dimensional into ARGB format.\n #::-1 will reverse the order of the ARGB to RGBA, so that\n # cv2 can procude a proper output\n out = rawdata.reshape((240, 256, 4))\n\n if argb:\n return out\n else:\n return out[:, :, ::-1]\n\n def readOtherData(self):\n data = self.read2()\n return data\n\n def close(self):\n self._dataPipe.close()\n self._dataPipe2.close()\n self._commandPipe.close()\n\n def __del__(self):\n self._dataPipe.close()\n self._dataPipe2.close()\n self._commandPipe.close()\n\n\nclass WinEmulator(PipeController):\n\n filenameData = \"\\\\\\\\.\\\\pipe\\\\mariofifo-data\"\n filenameCommand = \"\\\\\\\\.\\\\pipe\\\\mariofifo-command\"\n\n\n ###### CHANGE HERE ################\n FCEUX_BASEDIR = os.path.abspath('emulator')\n\n class IOPipe(Pipe):\n\n def __init__(self, path, mode):\n\n Pipe.__init__(self, path, mode)\n self._pipe = win32pipe.CreateNamedPipe(self._path,\n win32pipe.PIPE_ACCESS_DUPLEX,\n win32pipe.PIPE_TYPE_MESSAGE | win32pipe.PIPE_WAIT,\n 1, 65536, 65536, 30, None)\n self._pipeconnection = threading.Thread(target=self._connectPipe)\n self._pipeconnection.daemon = True\n\n def _connectPipe(self):\n win32pipe.ConnectNamedPipe(self._pipe, None)\n\n def open(self):\n self._pipeconnection.start()\n # Returns no value to store?\n\n def write(self, data):\n # Make sure pipe is connected\n self._pipeconnection.join()\n if self._mode == 'wb':\n win32file.WriteFile(self._pipe, data)\n # Maybe flush?\n\n def read(self, buf):\n self._pipeconnection.join()\n if self._mode == 'rb':\n # Reads from the winpipe and closes it\n data = win32file.ReadFile(self._pipe, buf)\n # data will be a 2-element tuple with [result, read data]\n data = data[1]\n # try to read more if the received data is smaller than the\n # requested size\n while len(data) < buf:\n moreData = win32file.ReadFile(self._pipe, buf - len(data))\n if len(moreData[1]) == 0:\n break\n data += moreData[1]\n return data\n\n def close(self):\n win32file.CloseHandle(self._pipe)\n # self._pipeconnection.join()\n\n def __del__(self):\n win32file.CloseHandle(self._pipe)\n # self._pipeconnection.join()\n\n def __init__(self,rom):\n # Might want to use Pipe.__init__(self, pipepath, mode) for Python 2.7\n self._dataPipe = self.IOPipe(self.filenameData, 'rb')\n self._commandPipe = self.IOPipe(self.filenameCommand, 'wb')\n self._FCEUX = which(os.path.join(self.FCEUX_BASEDIR,'fceux.exe'))\n self._rom = rom\n if not self._FCEUX:\n raise OSError(\n \"Fceux not found in the directory fceux-2.2.2-win32. Please specify another one\")\n\n\n def write(self, data):\n self._commandPipe.write(data)\n # win32pipe.ConnectNamedPipe(p, None)\n # win32file.WriteFile(p, data)\n\n def read(self, buf):\n return self._dataPipe.read(buf)\n\n def openRead(self):\n self._dataPipe.open()\n\n def openWrite(self):\n self._commandPipe.open()\n\n def startFceux(self):\n cmd = [self._FCEUX]\n cmd += ['-lua']\n cmd += ['emulator-interface.lua']\n cmd += [self._rom]\n self.openRead()\n self.openWrite()\n self.emulatorinstance = sub.Popen(cmd)\n\n def readScreenshot(self, argb=False):\n # data = self._fifoData.read(245771) # 256x240x4 NO HEADER\n data = self.read(245771)\n # remove the header\n data = data[11:]\n # Read into np array\n rawdata = np.frombuffer(data, dtype=np.uint8)\n # if rawdata.shape < (245760,):\n # return np.zeros(shape=(240, 256, 4))\n # Reshape it from 1 dimensional into ARGB format.\n #::-1 will reverse the order of the ARGB to RGBA, so that\n # cv2 can procude a proper output\n out = rawdata.reshape((240, 256, 4))\n if argb:\n return out\n else:\n return out[:, :, ::-1]\n\n\n\n def close(self):\n win32file.CloseHandle(self._dataPipe)\n win32file.CloseHandle(self._commandPipe)\n\n def __del__(self):\n win32file.CloseHandle(self._dataPipe)\n win32file.CloseHandle(self._commandPipe)\n\n\ndef which(program):\n\n def is_exe(fpath):\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n\n fpath, fname = os.path.split(program)\n if fpath:\n if is_exe(program):\n return program\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n path = path.strip('\"')\n exe_file = os.path.join(path, program)\n if is_exe(exe_file):\n return exe_file\n\n return None\n\n\nclass Emulator(object):\n BUTTON_UP = 1\n BUTTON_DOWN = 2\n BUTTON_LEFT = 4\n BUTTON_RIGHT = 8\n BUTTON_A = 16\n BUTTON_B = 32\n BUTTON_START = 64\n BUTTON_SELECT = 128\n SOFTRESET = 255\n\n def __init__(self, rom='mario.nes'):\n\n if sys.platform.startswith('win'):\n self._pipe = WinEmulator(rom)\n else:\n self._pipe = LinuxEmulator(rom)\n self._rom = rom\n\n # remove the old Data and command pipes and create a new onse\n # self._pipedata = Pipe(self.filenameData, 'rb')\n # self._commandpipe = Pipe(self.filenameCommand, 'wb')\n\n def startFceux(self):\n '''\n Function: startFceux\n Summary: Starts the Emulator FCeux. Fceux needs to be installed on the machine with e.g. sudo apt-get install fceux\n\n Examples: Emulator().startFceux()\n Attributes:\n @param (self):\n Returns: None\n '''\n # Starts the instance and does not wait to finish\n self._pipe.startFceux()\n\n def readScreenshot(self):\n '''\n Function: readScreenshot\n Summary: Reads the screenshot from the pipe and converts it to a numpy array\n Examples: Emulator().readScreenshot()\n Attributes:\n @param (self):\n @param (argb) default=False: If true, returns argb format, else bgra\n Returns: numpy array with dimensions 240,256,4\n '''\n return self._pipe.readScreenshot()\n # data = self._fifoData.read(245771) # 256x240x4 + 11 bytes header\n # data = self._pipe.read(245771)\n # remove the header\n # data = data[11:]\n # Read into np array\n # rawdata = np.frombuffer(data, dtype=np.uint8)\n # Reshape it from 1 dimensional into ARGB format.\n # ::-1 will reverse the order of the ARGB to RGBA, so that\n # cv2 can procude a proper output\n # print (rawdata.shape)\n # out = rawdata.reshape((240, 256, 4))\n\n # if argb:\n # return out\n # else:\n # return out[:, :, ::-1]\n def readOtherData(self):\n return self._pipe.readOtherData()\n\n def simulateFrame(self, buttons=0):\n '''\n Function: simulateFrame\n Summary: Sends the desired button presses to the emulator, which will advance the frame of the Emulator\n Examples: Emulator().simulateFrame(Emulator.BUTTON_RIGHT)\n Attributes:\n @param (self):\n @param (buttons) default=0: Buttons to be pressed\n Returns: None\n '''\n self._pipe.write(chr(buttons))\n # self._fifoCommand.flush()\n\n def __del__(self):\n self._pipe.emulatorinstance.kill()\n self._pipe.close()\n # self._fifoCommand.close()\n # self._fifoData.close()\n\n\nclass MarioEmulator(Emulator):\n\n def __init__(self, displayFrames=5):\n # by default only show every 5th frame in the opencv window\n Emulator.__init__(self)\n self._stop = False\n self._displayFrames = displayFrames\n self.otherData = None\n self.startFceux()\n self.isFinishing = False\n self.timeRemaining = 0\n\n def run(self):\n #cv2.namedWindow('Mario', cv2.WINDOW_NORMAL)\n\n # note that it is extremely important that readScreenshot() and simulateFrame()\n # are called in turns. calling readScreenshot() twice in a row will freeze the\n # emulator!\n t = time.time()\n state = 0\n n = 0\n pressStartFrame = 0\n self.isFinishing = False\n self.timeAtFinish = 0\n self.fitness = 0\n while not self.stop:\n screenshot = self.readScreenshot()\n self.otherData = self.readOtherData()\n self.updateTimeRemaining()\n #print(self.otherData.encode(\"hex\"))\n if not self.isBlackScreen(screenshot):\n obstacles = compare(screenshot)\n #\n #print(obstacles.getHover())\n #print(obstacles.getCanyon())\n nearestObsDist = 250\n for obstacle in obstacles.getFixed():\n if obstacle[0][0] < nearestObsDist:\n nearestObsDist = obstacle[0][0]\n self.nearestObsDist = nearestObsDist\n\n nearestGapDist = 250\n for gap in obstacles.getCanyon():\n if gap[0][0] < nearestGapDist:\n nearestGapDist = gap[0][0]\n self.nearestGapDist = nearestGapDist\n\n\n\n\n if (self.isOnPole()):\n self.isFinishing = True\n\n buttons = 0\n if state == 0:\n if self.marioScreenPos == int('db', 16):\n pressStartFrame = n + 3\n state = 1\n elif state == 1: # the start screen appears\n if n == pressStartFrame:\n buttons = self.BUTTON_START\n state = 2\n elif state == 2:\n if int(self.otherData[7].encode(\"hex\"),16) == 5:\n state = 3\n elif state == 3:\n if int(self.otherData[7].encode(\"hex\"),16) == 6:\n self.gameStarted()\n state = 4\n elif state == 4: # playing\n if self.isOnPole():\n self.isFinishing = True\n self.timeAtFinish = self.timeRemaining\n if self.marioState == 11 or self.marioState == 6 or self.timeSinceLastFitnessUp > 150:\n self.marioDied(n)\n buttons = self.SOFTRESET\n state = 0\n #print(n, 'frames,', n / (time.time() - t), 'fps')\n n = -1\n t = time.time()\n self.stop = True\n else:\n buttons = self.frame(n)\n\n self.simulateFrame(buttons)\n #if self.displayFrames and n % self.displayFrames == 0:\n # cv2.imshow('Mario', screenshot)\n # cv2.waitKey(1)\n n += 1\n\n @property\n def enemyScreenPos(self):\n return [int(i.encode(\"hex\"),16) for i in self.otherData[1:-1]]\n\n @property\n def marioAbsPos(self):\n return int(self.otherData[6].encode(\"hex\"),16)\n\n @property\n def marioState(self):\n return int(self.otherData[13].encode(\"hex\"),16)\n\n @property\n def currentScreen(self):\n return int(self.otherData[14].encode(\"hex\"),16)\n\n def isOnGround(self):\n if int(self.otherData[9].encode(\"hex\"),16) == 0:\n return 1\n else:\n return 0\n\n def isOnPole(self):\n if int(self.otherData[9].encode(\"hex\"),16) == 3:\n return True\n else:\n return False\n\n @property\n def marioScreenPos(self):\n return int(self.otherData[0].encode(\"hex\"),16)\n\n def updateTimeRemaining(self):\n if not self.isFinishing:\n c = int(self.otherData[10].encode(\"hex\"), 16)\n d = int(self.otherData[11].encode(\"hex\"), 16)\n u = int(self.otherData[12].encode(\"hex\"), 16)\n self.timeRemaining = u + d*10 + c*100\n return self.timeRemaining\n\n @property\n def stop(self):\n return self._stop\n\n @stop.setter\n def stop(self, stop):\n self._stop = stop\n\n @property\n def displayFrames(self):\n return self._displayFrames\n\n @property\n def xSpeed(self):\n nb = int(self.otherData[8].encode(\"hex\"), 16)\n if nb >= 128:\n nb = nb - 255\n return nb\n\n @displayFrames.setter\n def displayFrames(self, displayFrames):\n self._displayFrames = displayFrames\n\n def gameStarted(self):\n raise NotImplementedError()\n\n def frame(self, n):\n raise NotImplementedError()\n\n def marioDied(self, n):\n raise NotImplementedError()\n\n def levelFinished(self, n):\n raise NotImplementedError()\n\n def isBlackScreen(self, screenshot):\n return np.all(screenshot[190:200, :, :] == 0)\n\n def isLevel2StartingScreen(self, screenshot):\n # Should return 1 if (x,y) = (154,77) is white.\n return np.all(screenshot[77, 154, :] > 250)\n # TODO: find out if WORLD 1-1 or WORLD 1-2 is displayed in the image\n # proably the easiest way is to just check one pixel that is different in 1 and 2\n # Check if pixel (x,y)=(154,77) is white. If white -> level 1-2 started -> victory\n # This is if the image is (x,y) = (256,224) large.return False\n","repo_name":"nicoding/SJTUMachineLearning","sub_path":"emulator/emulator.py","file_name":"emulator.py","file_ext":"py","file_size_in_byte":19038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12270235346","text":"import os\nimport torch\nimport numpy as np\nfrom skimage.feature import peak_local_max\nfrom src.crowd_count import CrowdCounter\nfrom src.network import load_net\nfrom src.data_loader import ImageDataLoader\nfrom src import utils\nimport cv2 \nimport matplotlib.pyplot as plt\nfrom matplotlib import cm as CM\nfrom utils import make_density_map as mdm\nfrom utils import boundingbox as bb\nfrom utils.dotrect import drawrect\nimport statistics as stat\nimport face_detection\nimport csv\n\nsave_estimado, save_GT=[],[]\n\ndef draw_faces_blur(src2, bboxes):\n for bbox in bboxes:\n x0, y0, x1, y1 = [int(_) for _ in bbox]\n #cv2.rectangle(src2, (x0, y0), (x1, y1), (0, 0, 255), 2)\n x,y= x0,y0\n w, h = x1-x0,y1-y0\n ROI = src2[y:y+h, x:x+w]\n blur = cv2.GaussianBlur(ROI, (13,13), 0) \n # Insert ROI back into image\n src2[y:y+h, x:x+w] = blur\n\n'''\nPara sacar videos con los modelos generados\n'''\n\ndetector = face_detection.build_detector(\"DSFDDetector\")\ntorch.backends.cudnn.enabled = True\ntorch.backends.cudnn.benchmark = True\nvis = False\nsave_output = False\n\nmodel_path = '/data/estudiantes/william/PdG-Code/data_prep/crowdcount-mcnn/final_models/station_1.1_re_2__300.h5'\nmodel_name = os.path.basename(model_path).split('.')[0]\n#file_results = os.path.join(output_dir,'results_' + model_name + '_.txt')\n\nnet = CrowdCounter()\ntrained_model = os.path.join(model_path)\nload_net(trained_model, net)\nnet.cuda(device=\"cuda:3\")\nnet.eval()\n\n\n# frame_number = 100\nfirst = True\nalpha=0.8\nClips = bb.load_clip_list()\n\nnvideo = 19\nClips = [Clips[nvideo]]\n\nfor tclip in Clips:\n window = []\n path_video = tclip.get_vdir()\n boxes = bb.boxes_from_xml(tclip.get_fpath())\n ran = tclip.get_fran()\n fr_id = 1\n first = True\n for frame_number in range(ran[0]+200, ran[1]-400, 1):\n print(\"Doing frame {n} video {s}\".format(n=fr_id, s=nvideo))\n fr_id += 1\n bframe = bb.boxes_in_frame(frame_number, boxes)\n pointList = mdm.get_pointlist(bframe)\n an_cnt = len(pointList)\n \n cap = cv2.VideoCapture(path_video)\n cap.set(cv2.CAP_PROP_POS_FRAMES, frame_number)\n res, frame = cap.read()\n img = frame.copy()\n cv2.imwrite('currentimg2.jpg', img)\n src2 = img\n #GT Map\n denmap = mdm.get_density_map_fixed_gaussian(img, pointList)\n plt.imsave('currentmap2.png', denmap, cmap=CM.jet)\n cnt_csv = round(denmap.sum())\n save_GT.append(cnt_csv)\n #print(img.shape)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n img = img.astype(np.float32, copy=False)\n ht = img.shape[0]\n wd = img.shape[1]\n ht_1 = (ht/4)*4\n wd_1 = (wd/4)*4\n img = cv2.resize(img,(int(wd_1),int(ht_1)))\n img = img.reshape((1,1,img.shape[0],img.shape[1]))\n #im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\n density_map = net(img)\n density_map = density_map.data.cpu().numpy()\n dmap = 255 * density_map / np.max(density_map)\n dmap = dmap[0][0]\n et_count_integral = round(density_map.sum())\n et_count = et_count_integral\n cv2.imwrite('mapnow2.png', dmap)\n save_estimado.append(et_count)\n\n '''\n imagen = cv2.imread('mapnow2.png', 0)\n otsu_threshold, image_result = cv2.threshold(\n imagen, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU,)\n nhead = peak_local_max(imagen, min_distance=5,threshold_abs=otsu_threshold)\n et_count_otsu = len(nhead)\n countnz = np.rint(np.count_nonzero(modmap)/65)\n\n et_count = count_exp_nz(et_count_integral, et_count_otsu, countnz) \n '''\n \n if len(window) < 19:\n window.append(et_count)\n usemedian = False\n if len(window) >= 19:\n movep = window[0:17]\n window[1:18] = movep\n window[0] = et_count\n window_median = stat.median(window)\n usemedian = True\n \n cv2.imwrite('mapnow2.png', dmap)\n original = cv2.imread('mapnow2.png')\n height, width = original.shape[:2]\n output = cv2.resize(original, (640,480), interpolation = cv2.INTER_AREA)\n\n src1= output #map\n # src2 = cv2.imread('currentimg2.jpg') #img\n cmap = cv2.imread('currentmap2.png')\n\n #blur faces \n detections = detector.detect(src2[:, :, ::-1])[:, :4]\n print(len(detections))\n draw_faces_blur(src2, detections)\n\n cv2.imwrite('blur_prueba2.png', src2)\n\n # [blend_images]\n beta = (1.0 - alpha)\n #blended img\n dst = cv2.addWeighted(src1, alpha, src2, beta, 0.0)\n\n drawrect(src2, (25, 40), (615, 465), (0, 0, 255), 2)\n drawrect(cmap, (25, 40), (615, 465), (0, 0, 255), 2)\n drawrect(dst, (25, 40), (615, 465), (0, 0, 255), 2)\n\n scale_percent = 70 # percent of img_3c size\n width = int(src1.shape[1] * scale_percent / 100)\n height = int(src1.shape[0] * scale_percent / 100)\n dim = (width, height)\n\n # resize image\n og_resized = cv2.resize(src2, dim, interpolation=cv2.INTER_AREA)\n gt_resized = cv2.resize(cmap, dim, interpolation=cv2.INTER_AREA)\n blend_resized = cv2.resize(dst, dim, interpolation=cv2.INTER_AREA)\n\n if usemedian:\n estimado = window_median\n else: \n estimado = et_count\n\n textmap = 'GT: '+str(cnt_csv)\n textet = 'ET: '+str(estimado)\n textim = 'AN: '+str(an_cnt)\n font = cv2.FONT_HERSHEY_SIMPLEX\n org = (200, 40)\n fontScale = 1\n color = (0, 255, 0)\n thickness = 2\n gt_resized = cv2.putText(gt_resized, textmap, org, font, fontScale,\n color, thickness, cv2.LINE_AA, False)\n blend_resized = cv2.putText(blend_resized, textet, org, font, fontScale,\n color, thickness, cv2.LINE_AA, False)\n og_resized = cv2.putText(og_resized, textim, org, font, fontScale,\n color, thickness, cv2.LINE_AA, False)\n\n h_img = cv2.hconcat([og_resized, gt_resized, blend_resized])\n\n if first:\n out = cv2.VideoWriter('gtresultsmodel_station'+str(nvideo)+'.avi', cv2.VideoWriter_fourcc(*'DIVX'), 20, (h_img.shape[1], h_img.shape[0]))\n first = False\n\n out.write(h_img)\n \n nvideo += 1\n out.release()\n\ndatos_espaciados=np.round(np.linspace(0,60,num=len(save_estimado)),2)\nwith open(\"datos_clip_19.csv\", \"w\") as csv_file: \n writer = csv.writer(csv_file, delimiter=',')\n level_counter = 0\n max_levels = len(save_estimado)\n while level_counter < max_levels:\n if(level_counter==0):\n writer.writerow((\"GT\",\"Estimado\",\"Intervalo\")) \n else:\n writer.writerow((save_GT[level_counter],save_estimado[level_counter],datos_espaciados[level_counter]))\n level_counter = level_counter +1 ","repo_name":"williamoreno98/Passenger-Counting-in-Mass-Public-Transport-Systems-using-Computer-Vision-and-Deep-Learning","sub_path":"crowdcount-mcnn/anmodelvideo.py","file_name":"anmodelvideo.py","file_ext":"py","file_size_in_byte":6957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15003186751","text":"import os\nimport re\n\nfileDir = os.path.dirname(os.path.abspath('__file__'))\ninput_file = os.path.join(fileDir, '.\\Inputs\\Day-1-Input')\nf = open(input_file)\n\nfreqs = f.readlines()\nfreqs = [int(re.sub('\\s*','',f)) for f in freqs]\n\nprint(sum(freqs)) ","repo_name":"mezankhaja/Advent-of-Code-2018","sub_path":"Python/Day-1.py","file_name":"Day-1.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12418145081","text":"from pprint import pprint\nfrom googleapiclient import discovery\nfrom oauth2client.client import GoogleCredentials\ncredentials = GoogleCredentials.get_application_default()\nservice = discovery.build('compute', 'v1', credentials=credentials)\nproject = \"emerald-circle-374316\"\nzone = \"us-west4-b\"\ninstance = \"instance-1\"\nrequest = service.instances().get(project=project, zone=zone, instance=instance)\nresponse = request.execute()\npprint(response)\n\n","repo_name":"karunakarkondam/kpmg","sub_path":"kpmg_Task_2_3/metadata1.py","file_name":"metadata1.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38806498921","text":"from BasicTools import *\nfrom BarraFactor.barra_factor import *\nfrom AIndustryRotation.FactorTest import *\nfrom usefulTools import *\nfrom AMatketTiming.ANonFactor import PointEfficiency\n\n# 1、均线得分:短期均线 > 长期均线;得1分\ndef LineScore(close,line5,line10,line20,line60,line120,line240):\n '''\n TrendScore = (close > line5).astype(int) + (close > line10).astype(int) + (close > line20).astype(int) + \\\n (close > line60).astype(int) + (close > line120).astype(int) + (close > line240).astype(int) + \\\n (line5 > line10).astype(int) + (line5 > line20).astype(int) + (line5 > line60).astype(int) + \\\n (line5 > line120).astype(int) + (line5 > line240).astype(int) + \\\n (line10 > line20).astype(int) + (line10 > line60).astype(int) + \\\n (line10 > line120).astype(int) + (line10 > line240).astype(int) + \\\n (line20 > line60).astype(int) + (line20 > line120).astype(int) + (line20 > line240).astype(int) + \\\n (line60 > line120).astype(int) + (line60 > line240).astype(int) + \\\n (line120 > line240).astype(int)\n TrendScore = TrendScore / 21\n '''\n TrendScore = (close > line5).astype(int) + (close > line10).astype(int) + (close > line20).astype(int) + \\\n (close > line60).astype(int) + (close > line120).astype(int) + (close > line240).astype(int)\n TrendScore = TrendScore / 6\n TrendScore = TrendScore[~(close.isna())]\n return TrendScore\n# 2、均线距离:\ndef LineDistance(close,line5,line10,line20,line60,line120,line240):\n Distance = (close - line5) + (close - line10) + (close - line20) + (close - line60) + (close - line120) + (close - line240)\n Distance = Distance / close\n\n return Distance\n\n# 3、市场趋势:(6跟均线):收盘价在5根均线上方,那么就是向上趋势;收盘价在1根均线或以下上方,那么就是向下趋势,其余为震荡趋势\ndef DataTrend(close,line5,line10,line20,line60,line120,line240):\n TrendScore = LineScore(close, line5, line10, line20, line60, line120, line240)\n TrendScore = (TrendScore * 6)\n TrendResult = TrendScore.copy()\n TrendResult[(TrendScore >= 5)] = 1\n TrendResult[(TrendScore <= 2)] = -1\n TrendResult[(TrendScore > 2) & (TrendScore < 5)] = 0\n\n return TrendResult\n\n##################################### 该策略主要用于输出短期市场的指数走势情况 ###################################################\nclass MarketIndexTrend(object):\n def __init__(self,start_date = 20150101,end_date = 20201231,ind='SW1', bench_list = ['SZZZ','CYB','wind_A'],save_path = 'E:/StrategyDataShow/MarketSignal/'):\n self.start_date = start_date\n self.end_date = end_date\n self.ind = ind\n # 获取因子和因子值\n date_list = get_date_range(get_pre_trade_date(start_date,252*5), end_date)\n start_date, end_date = date_list[0], date_list[-1]\n self.date_list = date_list\n self.bench_list = bench_list\n # --------------------------------------- 指数的基础数据 ------------------------------------------ #\n bench_open = get_daily_1factor('open',date_list=date_list, code_list=bench_list,type='bench')\n bench_high = get_daily_1factor('high', date_list=date_list, code_list=bench_list, type='bench')\n bench_low = get_daily_1factor('low', date_list=date_list, code_list=bench_list, type='bench')\n bench_close = get_daily_1factor('close', date_list=date_list, code_list=bench_list, type='bench')\n bench_amt = get_daily_1factor('amt', date_list=date_list, code_list=bench_list, type='bench')\n\n self.bench_open = bench_open\n self.bench_high = bench_high\n self.bench_low = bench_low\n self.bench_close = bench_close\n self.bench_amt = bench_amt\n\n # --------------------------------------- 行业的基础数据 ------------------------------------------ #\n code_list = get_real_ind(ind[:-1], int(ind[-1]))\n self.ind_list = code_list\n ind_open = get_daily_1factor('open', date_list=date_list, code_list=code_list, type=ind[:-1])\n ind_high = get_daily_1factor('high', date_list=date_list, code_list=code_list, type=ind[:-1])\n ind_low = get_daily_1factor('low', date_list=date_list, code_list=code_list, type=ind[:-1])\n ind_close = get_daily_1factor('close', date_list=date_list, code_list=code_list, type=ind[:-1])\n ind_amt = get_daily_1factor('amt', date_list=date_list, code_list=code_list, type=ind[:-1])\n\n self.ind_open = ind_open\n self.ind_high = ind_high\n self.ind_low = ind_low\n self.ind_close = ind_close\n self.ind_amt = ind_amt\n\n # --------------------------------------- 个股的基础数据 ------------------------------------------ #\n open = get_daily_1factor('open', date_list=date_list)\n high = get_daily_1factor('high', date_list=date_list)\n low = get_daily_1factor('low', date_list=date_list)\n close = get_daily_1factor('close', date_list=date_list)\n amt = get_daily_1factor('amt', date_list=date_list)\n\n self.open = open\n self.high = high\n self.low = low\n self.close = close\n self.amt = amt\n\n self.save_path = save_path\n\n # 1、指数和行业的趋势指标\n def MarketTrend(self):\n # 指数均线和行业均线 #\n bench_line5, bench_line10, bench_line20, bench_line60, bench_line120, bench_line240 = meanline(self.bench_close)\n ind_line5, ind_line10, ind_line20, ind_line60, ind_line120, ind_line240 = meanline(self.ind_close)\n\n # 市场趋势(6跟均线):收盘价在5根均线上方,那么就是向上趋势;收盘价在1根均线或以下上方,那么就是向下趋势,其余为震荡趋势 #\n MarketTrend = DataTrend(self.bench_close, bench_line5, bench_line10, bench_line20, bench_line60, bench_line120,bench_line240)\n IndTrend = DataTrend(self.ind_close, ind_line5, ind_line10, ind_line20, ind_line60, ind_line120, ind_line240)\n\n MarketAllTrend = MarketTrend.mean(axis=1) + IndTrend.mean(axis=1) # 市场总趋势 = 指数趋势 + 行业趋势\n AllTrend = ts_rank(MarketAllTrend, rol_day=252 * 5).dropna() # 市场总趋势的历史分位数\n\n Trend = AllTrend.copy()\n Trend[AllTrend >= 0.7] = 1\n Trend[(AllTrend <= 0.3)] = -1\n Trend[(AllTrend > 0.3) & (AllTrend < 0.7)] = 0\n\n IndTrendNum = pd.concat([(IndTrend > 0).sum(axis=1).rename('上涨趋势行业'),\n (IndTrend == 0).sum(axis=1).rename('震荡趋势行业'),\n (IndTrend < 0).sum(axis=1).rename('下跌趋势行业')], axis=1)\n\n Trend_Result = pd.concat([MarketTrend, IndTrendNum, Trend.rename('市场总趋势')], axis=1)\n\n # ----------------------- 【横向相比,行业之间进行比较】行业均线:这个Up_ind是用来选取市场中最好的三个行业用来画图的 -------------------------- #\n IndTrendScore = LineScore(self.ind_close, ind_line5, ind_line10, ind_line20, ind_line60, ind_line120,ind_line240)\n IndTrendScore = IndTrendScore[self.ind_close.isna() == False]\n\n IndTrendDistance = LineDistance(self.ind_close, ind_line5, ind_line10, ind_line20, ind_line60, ind_line120,ind_line240)\n IndTrendDistance_TSRANK = ts_rank(IndTrendDistance, rol_day=240 * 5).round(4)\n\n up_ind = IndTrendScore.rank(pct=True, ascending=False, axis=1) + IndTrendDistance_TSRANK.rank(pct=True,ascending=False,axis=1)\n up_ind = up_ind.rolling(10).mean().rank(axis=1, method='min') <= 3\n\n return Trend_Result, IndTrend, up_ind\n\n # 2、指数 / 行业 的成交额情况\n def MarketAmt(self,date, code_type='bench'):\n if code_type == 'bench':\n bench_amt = self.bench_amt / 1e5\n bench_idx = self.bench_list\n bench_pct = self.bench_close.pct_change()\n elif code_type == 'SW1':\n bench_amt = self.ind_amt / 1e5\n bench_idx = self.ind_close.loc[date].dropna().index.to_list()\n bench_pct = self.ind_close.pct_change()\n # --------- 市场的成交额,5日平均成交额,10日平均成交额、历史分位数 ----------------------#\n amt_5days, amt_10days, amt_20days = bench_amt.rolling(5).mean(), bench_amt.rolling(10).mean(), bench_amt.rolling(20).mean()\n amt_history, amt_5days_history, amt_10days_history = ts_rank(bench_amt, rol_day=252 * 3), ts_rank(amt_5days,rol_day=252 * 3), ts_rank(amt_10days, rol_day=252 * 3)\n\n # 1、判断市场当前是放量还是缩量:使用的是相比于5,10,20日均线的放量个数\n big_amt_5days, big_amt_10days, big_amt_20days = bench_amt / amt_5days, bench_amt / amt_10days, bench_amt / amt_20days\n\n big_amt = (big_amt_5days > 1.1).astype(int) + (big_amt_10days > 1.1).astype(int) + (big_amt_20days > 1.1).astype(int)\n small_amt = (big_amt_5days < 0.9).astype(int) + (big_amt_10days < 0.9).astype(int) + (big_amt_20days < 0.9).astype(int)\n\n amt_timing = pd.DataFrame(0, index=big_amt.index, columns=big_amt.columns)\n amt_timing[big_amt >= 2] = 1\n amt_timing[small_amt >= 2] = -1\n\n # 2、放量次数\n big_amt_in_10days, big_amt_in_20days = (big_amt >= 2).rolling(10).sum(), (big_amt >= 2).rolling(20).sum()\n\n # 3、放量上涨和放量下跌的次数\n big_amt_market_up, big_amt_market_down = (big_amt >= 2) & (bench_pct > 0.005), (big_amt >= 2) & (bench_pct < -0.005)\n big_amt_market_up_10days, big_amt_market_down_10days = big_amt_market_up.rolling(10).sum(), big_amt_market_down.rolling(10).sum()\n\n # 市场成交额持续放量/缩量的天数\n big_amt_timing = ContinuousTrueTime(big_amt >= 2)\n\n amt_result = pd.concat([amt_timing.loc[date].rename('成交额情况'),\n bench_amt.loc[date].rename('当日成交额(亿)'), amt_history.loc[date].rename('日成交额分位数'),\n amt_5days.loc[date].rename('近5日成交额(亿)'), amt_5days_history.loc[date].rename('近5日成交额分位数'),\n big_amt_in_10days.loc[date].rename('近10日放量次数'),\n big_amt_market_up_10days.loc[date].rename('放量上涨次数'),\n big_amt_market_down_10days.loc[date].rename('放量下跌次数'),\n big_amt_timing.loc[date].rename('持续放量天数')], axis=1).loc[bench_idx]\n amt_result = amt_result.round(4).T\n amt_result.loc[['当日成交额(亿)', '近5日成交额(亿)']] = amt_result.loc[['当日成交额(亿)', '近5日成交额(亿)']].astype(float).round(2)\n return amt_result\n\n # 3、指数 / 行业的压力位和支撑位,\n def HoldPressDirection(self,date,rate=2, del_pct =0.05):\n ind_name = get_ind_con(ind_type='sw_all', level=self.ind[-1])\n\n point_efficiency = PointEfficiency.PointEfficiency(self.start_date, self.end_date, ind = self.ind, fee=0.001,bench_list = self.bench_list)\n # 参数1:指数的极端点位, 参数2:当前点位的点位效率, 参数3:指数的压力位和支撑位\n MarketPoint, MarketProb, marketPH_dict = point_efficiency.get_point_efficiency(date, 'bench', rate,del_pct)\n IndPoint, IndProb, IndPH_dict = point_efficiency.get_point_efficiency(date, self.ind, rate, del_pct)\n\n market_point,ind_point = MarketPoint.copy(), IndPoint.copy()\n market_point.columns = pd.Series(market_point.columns).apply(lambda x:bench_name[x]+'_point')\n ind_point.columns = pd.Series(ind_point.columns).apply(lambda x:ind_name[x]+'_point')\n\n bench_close,ind_close = self.bench_close.loc[MarketPoint.index, MarketPoint.columns], self.ind_close.loc[IndPoint.index, IndPoint.columns]\n bench_close.columns = pd.Series(bench_close.columns).apply(lambda x: bench_name[x])\n ind_close.columns = pd.Series(ind_close.columns).apply(lambda x: ind_name[x])\n\n PricePoint = pd.concat([market_point, ind_point, bench_close, ind_close], axis=1)\n PriceProb = pd.concat([MarketProb, IndProb], axis=1)\n\n return PricePoint, marketPH_dict, IndPH_dict, PriceProb\n\n # --------------------------- 市场和行业指标汇总 -------------------------------- #\n def AllSignal(self,date):\n ind_name = pd.Series(get_ind_con(ind_type='sw_all', level=self.ind[-1]))\n\n # ---------------- 指数趋势 ---------------------#\n MarketTrend, IndTrend, up_ind = self.MarketTrend()\n MarketAmt = self.MarketAmt(date,'bench')\n IndAmt = self.MarketAmt(date,'SW1')\n\n # 1、市场整体趋势\n market_trend = MarketTrend.iloc[-5:].drop(['上涨趋势行业', '震荡趋势行业', '下跌趋势行业'], axis=1)\n market_trend.columns = pd.Series(market_trend.columns).apply(lambda x: bench_name[x] if x != '市场总趋势' else x)\n for i in market_trend.index:\n for j in market_trend.columns:\n if market_trend.loc[i,j] == -1:\n market_trend.loc[i,j] = '↓'\n elif market_trend.loc[i,j] == 1:\n market_trend.loc[i,j] = '↑'\n elif market_trend.loc[i,j] == 0:\n market_trend.loc[i,j] = '→'\n\n market_trend[['上涨趋势行业', '震荡趋势行业', '下跌趋势行业']] = MarketTrend.iloc[-5:][['上涨趋势行业', '震荡趋势行业', '下跌趋势行业']]\n market_trend = market_trend[['上证指数','创业板指','wind全A','中证500','上涨趋势行业', '震荡趋势行业', '下跌趋势行业','市场总趋势']]\n\n # 2、市场成交情况\n market_amt = MarketAmt[['wind_A', 'SZZZ', 'CYB']].copy()\n market_amt.loc['成交额情况'] = market_amt.loc['成交额情况'].apply(lambda x: '↓' if x <0 else '↑' if x >0 else '→' )\n market_amt.loc[['日成交额分位数', '近5日成交额分位数']] = round(market_amt.loc[['日成交额分位数', '近5日成交额分位数']] // 0.0001 / 100,2).astype(str) + '%'\n market_amt.columns = pd.Series(market_amt.columns).apply(lambda x: bench_name[x])\n # 3、行业趋势\n ind_trend = IndTrend.iloc[-5:].dropna(how='all', axis=1).copy().astype(str)\n ind_trend[ind_trend == '-1'] = '↓'\n ind_trend[ind_trend == '1'] = '↑'\n ind_trend[ind_trend == '0'] = '→'\n ind_trend.columns = pd.Series(ind_trend.columns).apply(lambda x: ind_name.loc[x])\n\n # 4、行业成交情况\n ind_amt = IndAmt.copy()\n ind_amt.loc['成交额情况'] = ind_amt.loc['成交额情况'].apply(lambda x: '↑' if x == 1 else '↓' if x == -1 else '→')\n ind_amt.loc[['日成交额分位数', '近5日成交额分位数']] = round(ind_amt.loc[['日成交额分位数', '近5日成交额分位数']] // 0.0001 / 100,2).astype(str) + '%'\n\n ind_amt.columns = pd.Series(ind_amt.columns).apply(lambda x: ind_name.loc[x])\n\n # 5、指数的压力位和支撑位\n PricePoint, marketPH_dict, IndPH_dict, PriceProb = self.HoldPressDirection(date)\n\n # ----------- 数据保存 ------------- #\n save_path = self.save_path + str(date) +'/'\n os.makedirs(save_path) if os.path.exists(save_path) == False else None\n\n writer = pd.ExcelWriter(save_path + '市场趋势情况.xlsx')\n market_trend.to_excel(writer, sheet_name='市场趋势情况')\n market_amt.to_excel(writer, sheet_name='市场成交情况')\n ind_trend.to_excel(writer, sheet_name='行业趋势情况')\n ind_amt.to_excel(writer, sheet_name='行业成交情况')\n PricePoint.to_excel(writer, sheet_name='行业点位情况')\n PriceProb.to_excel(writer, sheet_name='点位效率')\n writer.close()\n\n # --------- 指数点位和压力,支撑情况 ----------- #\n writer = pd.ExcelWriter(save_path + '压力支撑情况.xlsx')\n for bench in marketPH_dict.keys():\n marketPH_dict[bench].to_excel(writer, sheet_name=bench_name[bench])\n for bench in IndPH_dict.keys():\n if type(IndPH_dict[bench]) == pd.core.frame.DataFrame:\n IndPH_dict[bench].to_excel(writer, sheet_name=ind_name.loc[bench])\n writer.close()\n\n return market_trend, market_amt, ind_trend, ind_amt, PricePoint, marketPH_dict, IndPH_dict,PriceProb\n\n\nif __name__ == '__main__':\n bench_list = ['SZZZ','CYB','wind_A','ZZ500']\n start_date,end_date = 20151231, 20230814#get_recent_trade_date()\n save_path = 'E:/StrategyDataShow/MarketSignal/'\n self = MarketIndexTrend(start_date, end_date, ind='SW1', bench_list = bench_list, save_path=save_path)\n market_trend, market_amt, ind_trend, ind_amt, PricePoint, marketPH_dict, IndPH_dict, PriceProb = self.AllSignal(end_date)\n\n\n","repo_name":"gftaoxin/StrategyPythonCode","sub_path":"AMatketTiming/MarketReplay/MarketIndTrend.py","file_name":"MarketIndTrend.py","file_ext":"py","file_size_in_byte":16992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5029029895","text":"import argparse\nimport json\nimport os\n\nfrom PIL import Image\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description='Convert images to coco format without annotations')\n parser.add_argument('img_path', help='The root path of images')\n parser.add_argument(\n 'train_val', type=str, help='The train.json/val.json file name of storage categories list')\n parser.add_argument(\n '--out',\n dest='out',\n type=str,\n default='test.json',\n help='The output annotation json file name, The save dir is in the '\n 'same directory as img_path')\n parser.add_argument(\n '-e',\n '--exclude-extensions',\n type=str,\n nargs='+',\n help='The suffix of images to be excluded, such as \"png\" and \"bmp\"')\n args = parser.parse_args()\n return args\n\n\ndef scandir_track_iter_progress(path, recursive = True):\n \"\"\"\n 返回所有扫描到的文件\n :param path:\n :param recursive:\n :return:\n \"\"\"\n normpath = os.path.normpath(path)\n for dirpath, dirnames, filenames in os.walk(path):\n if not recursive and os.path.normpath(dirpath) != normpath:\n break\n for filename in filenames:\n yield os.path.join(dirpath, filename)\n\n\ndef collect_image_infos(path, exclude_extensions=None, basename=True):\n img_infos = []\n\n for image_path in scandir_track_iter_progress(path, recursive=True):\n if exclude_extensions is None or (\n exclude_extensions is not None\n and not image_path.lower().endswith(exclude_extensions)):\n img_pillow = Image.open(image_path)\n img_info = {\n 'filename': os.path.basename(image_path) if basename else image_path,\n 'width': img_pillow.width,\n 'height': img_pillow.height,\n }\n img_infos.append(img_info)\n return img_infos\n\n\ndef cvt_to_coco_json(img_infos, categories):\n image_id = 0\n coco = dict()\n coco['images'] = []\n coco['type'] = 'instance'\n coco.update(categories)\n coco['annotations'] = []\n image_set = set()\n\n for img_dict in img_infos:\n file_name = img_dict['filename']\n assert file_name not in image_set\n image_item = dict()\n image_item['id'] = int(image_id)\n image_item['file_name'] = str(file_name)\n image_item['height'] = int(img_dict['height'])\n image_item['width'] = int(img_dict['width'])\n coco['images'].append(image_item)\n image_set.add(file_name)\n\n image_id += 1\n return coco\n\n\ndef categories_from_file(train_val):\n \"\"\"\n 从train.json或者val.json中得到categories\n :param train:\n :return:\n \"\"\"\n with open(train_val, 'rt', encoding='u8') as f:\n train_val_json = json.load(f)\n categories = train_val_json.get('categories', [])\n return dict(categories=categories)\n\n\ndef main():\n args = parse_args()\n assert args.out.endswith(\n 'json'), 'The output file name must be json suffix'\n\n # 1 load image list info\n img_infos = collect_image_infos(args.img_path, args.exclude_extensions, basename=True)\n\n # 2 convert to coco format data\n categories = categories_from_file(args.train_val)\n coco_info = cvt_to_coco_json(img_infos, categories)\n\n # 3 dump\n save_path = args.out\n with open(save_path, 'wt') as f:\n json.dump(coco_info, f)\n print(f'save json file: {save_path}')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"A971RM/yfjh_hw5_didi","sub_path":"images2json.py","file_name":"images2json.py","file_ext":"py","file_size_in_byte":3494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28407924806","text":"#!/usr/bin/python\nimport sys\nimport argparse\nimport random as rnd\n\n# randomness is the only issue\ndef gen_key(key_len):\n ret = []\n for i in range(key_len):\n ret.append(chr(rnd.randint(0, 255)))\n return ''.join(ret)\n\ndef ubc(data, key, pos):\n ret = []\n if len(key[pos:]) < len(data):\n print(\"Key is smaller than data...\")\n sys.exit()\n for i in range(len(data)):\n ret.append(chr(ord(data[i]) ^ ord(key[pos + i])))\n return ''.join(ret)\n\ndef filetobytes(fname):\n t = open(fname, \"rb\")\n ret = t.read()\n t.close()\n return ret\n \ndef bytestofile(fname, data):\n t = open(fname, \"wb\")\n t.write(data)\n t.close()\n\nif __name__ == \"__main__\":\n usage_text = \"ubc.py -g -c 1000 -k master.key\\n\"\n usage_text += \"usage: ubc.py -e -i infile.txt -k master.key -o outfile.crypt\\n\"\n usage_text += \"usage: ubc.py -d -i infile.crypt -k master.key -o outfile.decrypt\"\n parser = argparse.ArgumentParser(usage=usage_text)\n parser.add_argument('-k', help='Key File')\n parser.add_argument('-i', help='Input File')\n parser.add_argument('-o', help='Output File')\n parser.add_argument('-c', default=0, type=int, help='Count or Position')\n parser.add_argument('-g', action=\"store_true\", default=False, help='Generate')\n parser.add_argument('-e', action=\"store_true\", default=False, help='Encrypt')\n parser.add_argument('-d', action=\"store_true\", default=False, help='Decrypt')\n nargs = parser.parse_args()\n if len(sys.argv) == 1:\n parser.print_help() \n if nargs.g and nargs.c and nargs.k:\n ret = gen_key(nargs.c)\n bytestofile(nargs.k, ret)\n print(\"Generated your keyfile\")\n if nargs.e and nargs.i and nargs.k and nargs.o:\n t = filetobytes(nargs.i)\n k = filetobytes(nargs.k)\n r = ubc(t, k, nargs.c)\n bytestofile(nargs.o, r)\n print(\"Used section %d:%d of keyfile\" % (nargs.c, (nargs.c + len(t))))\n if nargs.d and nargs.i and nargs.k and nargs.o:\n t = filetobytes(nargs.i)\n k = filetobytes(nargs.k)\n r = ubc(t, k, nargs.c)\n bytestofile(nargs.o, r)\n print(\"Used section %d:%d of keyfile\" % (nargs.c, (nargs.c + len(t))))\n","repo_name":"vvalien/ubc","sub_path":"ubc.py","file_name":"ubc.py","file_ext":"py","file_size_in_byte":2204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16252118607","text":"from django import template\nfrom django.utils.safestring import *\n\nregister = template.Library()\n\n@register.simple_tag\ndef field_row(form, field_name, after_html=\"\"):\n\tfield = form[field_name]\n\terrors = field.errors.as_text()\n\tif errors:\n\t\terrors = \"%s\" % errors\n\tres = \"
%(label)s %(field)s %(after)s %(errors)s
\" % { \n\t\t'label' : field.label_tag(), \n\t\t'field' : field,\n\t\t'errors' : errors,\n\t\t'after' : after_html}\n\treturn mark_safe(res)\n\n@register.simple_tag\ndef submit_row(label):\n\treturn mark_safe(\"
\" % label)\n","repo_name":"fp7-ofelia/ocf","sub_path":"ofreg/registration/templatetags/form_helper.py","file_name":"form_helper.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"81"} +{"seq_id":"13340470670","text":"import socket\r\nimport urllib\r\n#import htmllib2\r\nfrom spiders import spider\r\nfrom src import clear_src\r\nfrom pathlib import Path\r\n\r\n\r\ndef email(host):\r\n\tclear_src()\r\n\r\n\tdep=spider(host)\r\n\tcount=1\r\n\r\n\twith Path(\"email.txt\").open(\"w+\") as emails:\r\n\t\tif(count float:\n # find the middle element of 2 inputed arrays\n\n # take advantage of Python's Timsort\n new = sorted(nums1 + nums2)\n\n #find median of new sorted array, depending on length of array(odd or even length)\n if len(new) % 2 == 1:\n return (new[int(len(new)/2)])\n else:\n low = int((len(new)-1)/2)\n return ((new[low] + new[low + 1])/2) \n","repo_name":"Jeromeschmidt/SPD-1.4-Testing-and-Architecture","sub_path":"leetcode/median_of_two_sorted_arrays.py","file_name":"median_of_two_sorted_arrays.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71193035465","text":"class Solution:\n def maximumCandies(self, candies: List[int], k: int) -> int:\n \"\"\"\n :type candies: List[int]\n :type k: int\n :rtype: int\n \"\"\"\n def check(x):\n return sum(c//x for c in candies) >= k # k = 3 x = 4 True, x = 6 False, x = 5 True\n\n left, right = 1, max(candies) # 1, 8\n while left <= right: # \n mid = left+(right-left)//2 # 1+(8-1)//2 = 4, 5+(8-5)//2 = 6, 5+(5-5)//2 = 5\n if not check(mid): # if return of check == False\n right = mid-1\n else: # return of check == True\n left = mid+1 \n return right\n ","repo_name":"adamsori/leetcode","sub_path":"Binary Search/Maximum-Candies-Allocated-to-K-Children.py","file_name":"Maximum-Candies-Allocated-to-K-Children.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72762512586","text":"from django.contrib import admin\nfrom .models import Album,Song\n# Register your models here.\n\nclass Albumcustomisation(admin.ModelAdmin):\n list_display = ['artist','album_title']\n list_filter = ['artist']\n class Meta:\n model = Album\n\nclass Songcustomisation(admin.ModelAdmin):\n\n search_fields = ['song_title']\n list_display = ['song_title','album']\n\n class Meta:\n model = Song\n\nadmin.site.register(Album,Albumcustomisation)\nadmin.site.register(Song,Songcustomisation)","repo_name":"gauravsetia1188/player","sub_path":"music/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10801842527","text":"N = int(input())\n\ncount = 0\n\n# 시각\nfor i in range(N+1):\n # 3시면\n if i == 3:\n count += 60*60\n continue\n # 분\n for j in range(60):\n # 십분대가 3이면\n if j // 10 == 3:\n count += 60\n continue\n # 일분대가 3이면\n elif j % 10 == 3:\n count += 60\n continue\n # 초\n for k in range(60):\n # 십초대가 3이면\n if k // 10 == 3:\n count += 1\n continue\n # 일초대가 3이면\n elif k % 10 == 3:\n count += 1\n continue\n\nprint(count)\n\n'''\n5\n'''","repo_name":"Qyupang/Algorithm","sub_path":"implementation/시각/implementationEx1.py","file_name":"implementationEx1.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21158665158","text":"from datetime import datetime\r\nimport pytz\r\n\r\n# current Datetime\r\nunaware = datetime.now()\r\nprint('Timezone naive:', unaware)\r\n\r\n# Standard UTC timezone aware Datetime\r\naware = datetime.now(pytz.utc)\r\nprint('Timezone Aware:', aware)\r\n\r\n# UK/Central timezone datetime\r\naware_uk_central = datetime.now(pytz.timezone('UK/Central'))\r\nprint('UK Central DateTime', aware_uk_central)","repo_name":"midhat81/Midhat_81","sub_path":"from datetime import datetime.py","file_name":"from datetime import datetime.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"35113613121","text":"\"\"\"\nVectorUtils.py\n\nA class containing static methods for manipulating vectors.\n\nAuthor: Cyril Marx\nDate: 09.09.2021\n\"\"\"\n\nimport numpy as np\nimport math\n\n\nclass VectorUtils:\n @staticmethod\n def project_coordinate(coordinate, camera_axis, zoom_factor):\n \"\"\"\n Projects some coordinates on the canvas.\n :param coordinate: The 2D coordinate to project.\n :param camera_axis: A 2D value representing where the camera is looking at.\n :param zoom_factor: The zoom factor as a float value.\n :return: The projected 2D coordinate.\n \"\"\"\n return (coordinate + camera_axis) * zoom_factor\n\n @staticmethod\n def correct_zoom(coord, zoom_factor):\n \"\"\"\n Corrects a coordinate with a zoom factor.\n :param coord: The coordinate to correct.\n :param zoom_factor: The zoom factor used for correction.\n :return: The corrected coordinate.\n \"\"\"\n return coord * 1 / zoom_factor\n\n @staticmethod\n def calc_cursor_collision(x, y, object, zoom_factor):\n \"\"\"\n Calculates if the cursor collides with a certain object.\n :param x: The x coordinate of the cursor.\n :param y: The y coordinate of the cursor.\n :param zoom_factor: The zoom factor of the canvas.\n :return: A boolean indicating if a collision happens.\n \"\"\"\n size = object.size * zoom_factor\n if object.posx + size >= x >= object.posx - size:\n if object.posy + size >= y >= object.posy - size:\n return True\n return False\n\n @staticmethod\n def calc_rect_collision(cursor, bottom_left, top_right):\n \"\"\"\n Calculates if the cursor collides with a rectangle based by two corners of the rectangle.\n :param cursor: The cursor as a 2D vector.\n :param bottom_left: The bottom left corner coordinate of the rectangle.\n :param top_right: The top right corner coordinate of the rectangle.\n :return: A boolean indicating if a collision happens.\n \"\"\"\n if bottom_left[0] < cursor[0] < top_right[0] and bottom_left[1] > cursor[1] > top_right[1]:\n return True\n return False\n\n @staticmethod\n def calc_rect_collision_by_size(cursor, obj_pos, size_x, size_y, zoom_factor):\n \"\"\"\n Calculates if the cursor collides with a rectangle based on its size.\n :param cursor: The cursor as a 2D vector.\n :param obj_pos: The position of the rectangle as a 2D vector.\n :param size_x: The x size of the rectangle.\n :param size_y: The y size of the rectangle.\n :param zoom_factor: The zoom factor of the canvas.\n :return: A boolean indicating if a collision happens.\n \"\"\"\n corr_size_x = size_x * zoom_factor\n corr_size_y = size_y * zoom_factor\n bottom_left = [obj_pos[0] - corr_size_x, obj_pos[1] + corr_size_y]\n top_right = [obj_pos[0] + corr_size_x, obj_pos[1] - corr_size_y]\n\n if bottom_left[0] < cursor[0] < top_right[0] and bottom_left[1] > cursor[1] > top_right[1]:\n return True\n return False\n\n @staticmethod\n def calc_vector(point_1, point_2):\n \"\"\"\n Calculates a vector from two points.\n :param point_1: Point 1\n :param point_2: Point 2\n :return: The vector between both points.\n \"\"\"\n return np.array([point_2[0]-point_1[0], point_2[1]-point_1[1]])\n\n @staticmethod\n def unit_vector(vector):\n \"\"\"\n Calculates the unitvector from a certain vector.\n :param vector: The vector to manipulate.\n :return: The unitvector.\n \"\"\"\n length = math.sqrt(math.pow(vector[0], 2) + math.pow(vector[1], 2))\n if length != 0:\n return np.array([vector[0]/length, vector[1]/length])\n else:\n return np.array([0, 0])\n\n @staticmethod\n def normal_vector(vector):\n \"\"\"\n Calculates the normal vector to a vector.\n :param vector: The vector to normalize.\n :return: The uninormalized vector.\n \"\"\"\n return np.array([-vector[1], vector[0]])\n\n @staticmethod\n def uninormal_vector(vector):\n \"\"\"\n Uninormalizes a vector.\n :param vector: The vector to manipulate.\n :return: The uninormalized vector.\n \"\"\"\n return VectorUtils.unit_vector(VectorUtils.normal_vector(vector))\n\n @staticmethod\n def get_rect_center(c1, c2, c3, c4):\n \"\"\"\n Calculates the center of a rectangle.A\n :param c1: Corner 1\n :param c2: Corner 2\n :param c3: Corner 3\n :param c4: Corner 4\n :return: The center of the rectangle as a 2D vector.\n \"\"\"\n v1 = VectorUtils.calc_vector(c1, c2) * 0.5\n v2 = VectorUtils.calc_vector(c3, c1) * 0.5\n return c1 - v2 + v1\n\n @staticmethod\n def rotate_point(point, angle, origin):\n \"\"\"\n Rotates a point around a certain origin.\n :param point: The point to rotate.\n :param angle: The rotation in degree.\n :param origin: The origin point to rotate around.\n :return: The rotated point position.\n \"\"\"\n translated_point = point - origin\n temp = np.array([translated_point[0] * math.cos(angle) - translated_point[1] * math.sin(angle),\n translated_point[0] * math.sin(angle) + translated_point[1] * math.cos(angle)])\n return temp + origin\n\n @staticmethod\n def get_vector_rotation(vector):\n \"\"\"\n Calculates the rotation of a vector to the x axis.\n :param vector: The vector to check.\n :return: The rotation angle in degree.\n \"\"\"\n coord_vector = np.array([1, 0])\n scalarproduct = (vector[0]*coord_vector[0]+vector[1]*coord_vector[1])\n absolute_vector = math.sqrt(vector[0]**2 + vector[1]**2)\n absolute_coord_vector = math.sqrt(coord_vector[0]**2 + coord_vector[1]**2)\n dividend = (absolute_vector * absolute_coord_vector)\n if dividend != 0:\n return math.acos(scalarproduct / dividend)\n else:\n return 0\n\n @staticmethod\n def connection_cursor_collision(connection, cursor_x, cursor_y, camera_x, camera_y, zoom_factor):\n \"\"\"\n Checks if the cursor collides with a connection.\n :param connection: The connection to check.\n :param cursor_x: The x coordinate of the cursor.\n :param cursor_y: The y coordinate of the cursor.\n :param camera_x: The x coordinate of the camera.\n :param camera_y: The y coordinate of the camera.\n :param zoom_factor: The zoom factor of the canvas\n :return: A boolean indicating if a collision happens.\n \"\"\"\n for vert in range(0, len(connection.vertices) - 1):\n connection_bounding_box_width = 15\n uninormal_vector = np.array(VectorUtils.calc_vector(connection.vertices[vert],\n connection.vertices[vert + 1]))\n uninormal_vector = VectorUtils.uninormal_vector(uninormal_vector)\n uninormal_vector = uninormal_vector * connection_bounding_box_width\n\n c = [connection.vertices[vert] + uninormal_vector,\n connection.vertices[vert] - uninormal_vector,\n connection.vertices[vert + 1] + uninormal_vector,\n connection.vertices[vert + 1] - uninormal_vector]\n for i in range(0, len(c)):\n c[i][0] = VectorUtils.project_coordinate(c[i][0], camera_x, zoom_factor)\n c[i][1] = VectorUtils.project_coordinate(c[i][1], camera_y, zoom_factor)\n\n temp_angle = VectorUtils.get_vector_rotation(np.array(VectorUtils.calc_vector(c[0], c[2])))\n temp_origin = VectorUtils.get_rect_center(c[0], c[1], c[2], c[3])\n temp_cursor = np.array([cursor_x + camera_x, cursor_y + camera_y])\n temp_cursor = temp_cursor * zoom_factor\n if connection.vertices[vert][1] < connection.vertices[vert + 1][1]:\n temp_angle = -temp_angle\n temp_cursor = VectorUtils.rotate_point(temp_cursor, temp_angle, temp_origin)\n\n bottom_left = c[0]\n top_right = c[3]\n\n if VectorUtils.calc_rect_collision(temp_cursor, VectorUtils.rotate_point(bottom_left, temp_angle, temp_origin),\n VectorUtils.rotate_point(top_right, temp_angle, temp_origin)):\n return True\n return False\n","repo_name":"Cycrus/COGNA_Editor","sub_path":"src/VectorUtils.py","file_name":"VectorUtils.py","file_ext":"py","file_size_in_byte":8720,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"31224744680","text":"\r\nimport json\r\nimport ast\r\nimport pymongo\r\nimport gridfs\r\nfrom pymongo import MongoClient\r\nfrom django.template import loader\r\nfrom django.http import HttpResponse\r\nfrom .com.cmpe.svs.accounts.service import MongoService\r\nfrom django.contrib.staticfiles.templatetags.staticfiles import static\r\n\r\n\r\ndef login(request):\r\n\thttprequest = request\r\n\tbody_unicode = request.body.decode('utf-8')\r\n\tdata = json.loads(body_unicode)\r\n\tresponse = MongoService.service(\"login\", data, httprequest)\r\n\tprint(\"Returning data for login command:\")\r\n\tprint(response)\r\n\treturn HttpResponse(json.dumps(response))\r\n\r\n\r\ndef createAccount(request):\r\n\thttprequest = request\r\n\tbody_unicode = request.body.decode('utf-8')\r\n\tdata = json.loads(body_unicode)\r\n\tresponse = MongoService.service(\"createAccount\", data, httprequest)\r\n\tprint(response)\r\n\treturn HttpResponse(json.dumps(response))\r\n\r\ndef logout(request):\r\n\thttprequest = request\r\n\tbody_unicode = request.body.decode('utf-8')\r\n\tdata = json.loads(body_unicode)\r\n\tresponse = MongoService.service(\"logout\", data, httprequest)\r\n\tprint(response)\r\n\treturn HttpResponse(json.dumps(response))\r\n\t\r\ndef whoAmI(request):\r\n\thttprequest = request\r\n\tresponse = MongoService.service(\"whoAmI\", \"\", httprequest)\r\n\tprint(response)\r\n\treturn HttpResponse(json.dumps(response))\r\n\r\ndef editAccountInformation(request):\r\n\thttprequest = request\r\n\tbody_unicode = request.body.decode('utf-8')\r\n\tdata = json.loads(body_unicode)\r\n\tresponse = MongoService.service(\"editAccountInformation\", data, httprequest)\r\n\tprint(\"Returning data for editAccountInformation command:\")\r\n\tprint(response)\r\n\treturn HttpResponse(json.dumps(response))\r\n\r\ndef getAccountInformation(request):\r\n\thttprequest = request\r\n\tresponse = MongoService.service(\"getAccountInformation\", \"\", httprequest)\r\n\tprint(\"Returning data for getAccountInformation command:\")\r\n\tprint(response)\r\n\treturn HttpResponse(json.dumps(response))","repo_name":"chheangdx/CMPE-SVS","sub_path":"system/userAccounts.py","file_name":"userAccounts.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"22528485781","text":"#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport glob, os, datetime\n\nstarted = glob.glob('energy_sources*_0.csv')\nall_complete = True\nfor f in started:\n scen = f[len('energy_sources_'):-len('_0.csv')]\n if not os.path.exists('energy_sources_' + scen + '_.csv'):\n all_complete = False\n iters = glob.glob('energy_sources_' + scen + '_*.csv')\n start_time = datetime.datetime.utcfromtimestamp(os.path.getmtime(f)).strftime('%Y-%m-%d %H:%M:%S')\n print('incomplete ({} iters, started {}): {}'.format(len(iters), start_time, scen))\n\nif all_complete:\n print('no incomplete scenarios')\n\n","repo_name":"switch-hawaii/price_response","sub_path":"find_incomplete.py","file_name":"find_incomplete.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17812813566","text":"from operator import is_\n\n\ndef dfs(r, c):\n \n \n if lst[r][c] == 3:\n return 1\n lst[r][c] = -1\n\n dr = [-1, 1, 0, 0]\n dc = [0, 0, -1, 1]\n \n for d in range(4):\n nr = r + dr[d]\n nc = c + dc[d]\n if N > nr >= 0 and N > nc >= 0 and lst[nr][nc] != 1 and lst[nr][nc] != -1:\n if dfs(nr, nc) == 1:\n return 1\n\n return 0\n\nT = int(input())\n\nfor tc in range(1, T + 1):\n N = int(input())\n lst = [list(map(int, input().strip())) for _ in range(N)]\n is_coor = False\n for i in range(N):\n for j in range(N):\n if lst[i][j] == 2:\n row = i\n col = j\n is_coor = True\n break\n if is_coor:\n break\n\n\n ans = dfs(row, col)\n print(f'#{tc} {ans}')","repo_name":"DailyStudy08/JAEHYEON","sub_path":"DFS/4875_SWEA.py","file_name":"4875_SWEA.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24613176606","text":"import os\nfrom pyspark import SparkConf\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.functions import *\nfrom pyspark.sql.types import *\nfrom pyspark.ml.feature import BucketedRandomProjectionLSH\nfrom pyspark.mllib.feature import Word2Vec\nfrom pyspark.ml.linalg import Vectors\nimport random\nfrom collections import defaultdict\nimport numpy as np\nfrom pyspark.sql import functions as F\nfrom tqdm import tqdm\n\nclass UdfFunction:\n @staticmethod\n def sortF(movie_list, timestamp_list):\n \"\"\"\n sort by time and return the corresponding movie sequence\n eg:\n input: movie_list:[1,2,3]\n timestamp_list:[1112486027,1212546032,1012486033]\n return [3,1,2]\n \"\"\"\n pairs = []\n for m, t in zip(movie_list, timestamp_list):\n pairs.append((m, t))\n # sort by time\n pairs = sorted(pairs, key=lambda x: x[1])\n return [x[0] for x in pairs]\n\n\ndef processItemSequence(spark, rawSampleDataPath):\n # rating data to item pairs\n ratingSamples = spark.read.format(\"csv\").option(\"header\", \"true\").load(rawSampleDataPath)\n ratingSamples.show(5)\n ratingSamples.printSchema()\n # ArrayType(StringType())\n # array_join()\n sortUdf = udf(UdfFunction.sortF, ArrayType(StringType()))\n userSeq = ratingSamples \\\n .where(F.col(\"rating\") >= 3.5) \\\n .withColumn(\"movieId\", concat(F.lit(\"movieId:\"), F.col(\"movieId\"))) \\\n .groupBy(\"userId\") \\\n .agg(sortUdf(F.collect_list(\"movieId\"), F.collect_list(\"timestamp\")).alias('movieIds')) \\\n .withColumn(\"movieIdStr\", array_join(F.col(\"movieIds\"), \" \"))\n itemPairs = userSeq.select('movieIdStr').rdd.map(lambda x: x[0].split(' ')).flatMap(lambda x: generate_pair(x))\n return itemPairs\n\ndef processUserItemSequence(spark, rawSampleDataPath):\n # rating data to user item pairs\n ratingSamples = spark.read.format(\"csv\").option(\"header\", \"true\").load(rawSampleDataPath)\n userItemSeq = ratingSamples \\\n .where(F.col(\"rating\") >= 3.5) \\\n .select(concat(F.lit(\"userId:\"), F.col(\"userId\")).alias(\"userId\"), concat(F.lit(\"movieId:\"), F.col(\"movieId\")).alias(\"movieId\"))\n userItemSeq.show(5)\n userItemSeq.printSchema()\n return userItemSeq.rdd.map(lambda x : (x[0], x[1]))\n\ndef processItemGenreInfo(spark, rawSampleDataPath, itemInfoPath):\n # movie infos to movieId genre pairs\n ratingSamples = spark.read.format(\"csv\").option(\"header\", \"true\").load(rawSampleDataPath)\n movieInfos = spark.read.format(\"csv\").option(\"header\", \"true\").load(itemInfoPath)\n movieInfos.show(5)\n movieInfos.printSchema()\n\n movieInfos = movieInfos.withColumn(\"movieId\", concat(F.lit(\"movieId:\"), F.col(\"movieId\")))\n seenMovies = ratingSamples.select(concat(F.lit(\"movieId:\"), F.col(\"movieId\")).alias(\"movieId_\")).distinct()\n movieInfos = movieInfos.join(seenMovies, movieInfos.movieId == seenMovies.movieId_, \"inner\") \\\n .select(\"movieId\", \"genres\") \\\n .rdd \\\n .flatMap(lambda x :generate_item_genre_pair(x)) \n return movieInfos\n\ndef generate_pair(x):\n # eg:\n # watch sequence:['858', '50', '593', '457']\n # return:[['858', '50'],['50', '593'],['593', '457']]\n pairSeq = []\n previousItem = ''\n for item in x:\n if not previousItem:\n previousItem = item\n else:\n pairSeq.append((previousItem, item))\n previousItem = item\n return pairSeq\n\ndef generate_item_genre_pair(x):\n # eg:\n # in: [\"movieId:12\", \"Action:Comedy\"]\n # return: [(\"moiveId:12\", \"Action\"), (\"movieId:12\", \"Comedy\")]\n pairSeq = []\n item = x[0]\n genres = x[1].split(\"|\")\n for genre in genres:\n pairSeq.append((item, \"genre:\" + genre))\n return pairSeq\n\ndef trainItem2vec(spark, samples, embLength, embOutputPath, saveToRedis, redisKeyPrefix):\n word2vec = Word2Vec().setVectorSize(embLength).setWindowSize(5).setNumIterations(10)\n model = word2vec.fit(samples)\n synonyms = model.findSynonyms(\"movieId:158\", 20)\n for synonym, cosineSimilarity in synonyms:\n print(synonym, cosineSimilarity)\n \n print(\"start saving embeddings ...\")\n embOutputDir = '/'.join(embOutputPath.split('/')[:-1])\n if not os.path.exists(embOutputDir):\n os.makedirs(embOutputDir)\n with open(embOutputPath, 'w') as f:\n for movie_id in model.getVectors():\n vectors = \" \".join([str(emb) for emb in model.getVectors()[movie_id]])\n f.write(movie_id + \":\" + vectors + \"\\n\")\n embeddingLSH(spark, model.getVectors())\n return model\n\ndef embeddingLSH(spark, movieEmbMap):\n movieEmbSeq = []\n for key, embedding_list in movieEmbMap.items():\n embedding_list = [np.float64(embedding) for embedding in embedding_list]\n movieEmbSeq.append((key, Vectors.dense(embedding_list)))\n movieEmbDF = spark.createDataFrame(movieEmbSeq).toDF(\"movieId\", \"emb\")\n bucketProjectionLSH = BucketedRandomProjectionLSH(inputCol=\"emb\", outputCol=\"bucketId\", bucketLength=0.1,\n numHashTables=3)\n bucketModel = bucketProjectionLSH.fit(movieEmbDF)\n embBucketResult = bucketModel.transform(movieEmbDF)\n print(\"movieId, emb, bucketId schema:\")\n embBucketResult.printSchema()\n print(\"movieId, emb, bucketId data result:\")\n embBucketResult.show(10, truncate=False)\n print(\"Approximately searching for 5 nearest neighbors of the sample embedding:\")\n sampleEmb = Vectors.dense(0.795, 0.583, 1.120, 0.850, 0.174, -0.839, -0.0633, 0.249, 0.673, -0.237)\n bucketModel.approxNearestNeighbors(movieEmbDF, sampleEmb, 5).show(truncate=False)\n\ndef generateTransitionMatrix(item2ItemSamples, user2ItemSamples, item2GenreSamples):\n # 从 item - item, user - item, item -genre边中形成转移矩阵\n # 由于metapath随机游走的特性,只需要在同类节点中的转移概率进行归一化\n\n # 先定义转移矩阵,节点矩阵\n transitionCountMatrix = defaultdict(dict)\n itemDistribution = defaultdict(dict)\n userDistribution = defaultdict(dict)\n\n # 1)处理item - item 信息 \n pairSamples = item2ItemSamples\n\n # 将rdd中每一个值作为key 统计出现的次数 返回的值为一个 defaultdict\n pairCountMap = pairSamples.countByValue()\n itemPairTotalCount = 0\n\n # 给每一个字典设置好默认值,防止取不到的情况报错\n itemCountMap = defaultdict(int)\n for key, cnt in pairCountMap.items():\n key1, key2 = key\n if transitionCountMatrix[key1].get(\"item\") == None:\n transitionCountMatrix[key1][\"item\"] = {key2:cnt}\n else:\n transitionCountMatrix[key1][\"item\"][key2] = cnt\n itemCountMap[key1] += cnt\n itemPairTotalCount += cnt\n\n # 2)处理 user - item 信息\n pairSamples = user2ItemSamples\n\n pairCountMap = pairSamples.countByValue()\n userPairTotalCount = 0\n userCountMap = defaultdict(int)\n userDisCountMap = defaultdict(int)\n\n for key, cnt in pairCountMap.items():\n key1, key2 = key\n\n # user node 中加入 item 链接信息\n if transitionCountMatrix[key1].get(\"item\") == None:\n transitionCountMatrix[key1][\"item\"] = {key2:cnt}\n else:\n transitionCountMatrix[key1][\"item\"][key2] = cnt\n \n # item node 中加入 user 链接信息\n if transitionCountMatrix[key2].get(\"user\") == None:\n transitionCountMatrix[key2][\"user\"] = {key1:cnt}\n else:\n transitionCountMatrix[key2][\"user\"][key1] = cnt\n userCountMap[key1] += cnt\n userCountMap[key2] += cnt\n userDisCountMap[key1] += cnt\n userPairTotalCount += cnt\n\n # 3)处理 item - genre 信息\n pairSamples = item2GenreSamples\n\n pairCountMap = pairSamples.countByValue()\n genrePairTotalCount = 0\n genreCountMap = defaultdict(int)\n\n for key, cnt in pairCountMap.items():\n key1, key2 = key\n\n # item node 中加入 genre 链接信息\n if transitionCountMatrix[key1].get(\"genre\") == None:\n transitionCountMatrix[key1][\"genre\"] = {key2:cnt}\n else:\n transitionCountMatrix[key1][\"genre\"][key2] = cnt\n \n # genre node 中加入 item 链接信息\n if transitionCountMatrix[key2].get(\"item\") == None:\n transitionCountMatrix[key2][\"item\"] = {key1:cnt}\n else:\n transitionCountMatrix[key2][\"item\"][key1] = cnt\n genreCountMap[key1] += cnt\n genreCountMap[key2] += cnt\n genrePairTotalCount += cnt\n \n # 4)分类归一化生成 transition Matrix\n transitionMatrix = defaultdict(dict)\n \n for key1, transitionMap in transitionCountMatrix.items():\n # 如果节点是 item \n if key1.split(\":\")[0] == \"movieId\":\n\n if transitionMap.get(\"item\") != None:\n for key2, cnt in transitionMap[\"item\"].items():\n if transitionMatrix[key1].get(\"item\") == None:\n transitionMatrix[key1][\"item\"] = {key2: transitionCountMatrix[key1][\"item\"][key2] / itemCountMap[key1]}\n else:\n transitionMatrix[key1][\"item\"][key2] = transitionCountMatrix[key1][\"item\"][key2] / itemCountMap[key1]\n\n if transitionMap.get(\"user\") != None:\n for key2, cnt in transitionMap[\"user\"].items():\n if transitionMatrix[key1].get(\"user\") == None:\n transitionMatrix[key1][\"user\"] = {key2: transitionCountMatrix[key1][\"user\"][key2] / userCountMap[key1]}\n else:\n transitionMatrix[key1][\"user\"][key2] = transitionCountMatrix[key1][\"user\"][key2] / userCountMap[key1]\n \n if transitionMap.get(\"genre\") != None:\n for key2, cnt in transitionMap[\"genre\"].items():\n if transitionMatrix[key1].get(\"genre\") == None:\n transitionMatrix[key1][\"genre\"] = {key2: transitionCountMatrix[key1][\"genre\"][key2] / genreCountMap[key1]}\n else:\n transitionMatrix[key1][\"genre\"][key2] = transitionCountMatrix[key1][\"genre\"][key2] / genreCountMap[key1]\n \n # 如果节点是 user\n elif key1.split(\":\")[0] == \"userId\":\n if transitionMap.get(\"item\") != None:\n for key2, cnt in transitionMap[\"item\"].items():\n if transitionMatrix[key1].get(\"item\") == None:\n transitionMatrix[key1][\"item\"] = {key2: transitionCountMatrix[key1][\"item\"][key2] / userCountMap[key1]}\n else:\n transitionMatrix[key1][\"item\"][key2] = transitionCountMatrix[key1][\"item\"][key2] / userCountMap[key1]\n\n #如果节点是 genre\n elif key1.split(\":\")[0] == \"genre\":\n if transitionMap.get(\"item\") != None:\n for key2, cnt in transitionMap[\"item\"].items():\n if transitionMatrix[key1].get(\"item\") == None:\n transitionMatrix[key1][\"item\"] = {key2: transitionCountMatrix[key1][\"item\"][key2] / genreCountMap[key1]}\n else:\n transitionMatrix[key1][\"item\"][key2] = transitionCountMatrix[key1][\"item\"][key2] / genreCountMap[key1]\n\n else:\n continue\n \n for itemid, cnt in itemCountMap.items():\n itemDistribution[itemid] = cnt / itemPairTotalCount\n \n for userid, cnt in userDisCountMap.items():\n userDistribution[userid] = cnt / userPairTotalCount\n\n return transitionMatrix, itemDistribution, userDistribution\n\ndef oneMetaPathWalk(transitionMatrix, itemDistribution, userDistribution, metapath):\n sample = []\n\n # pick the first element\n if metapath[0] == \"i\":\n firstNodeDistribution = itemDistribution\n else:\n firstNodeDistribution = userDistribution\n \n randomDouble = random.random()\n firstItem = \"\"\n accumulateProb = 0.0\n for item, prob in firstNodeDistribution.items():\n accumulateProb += prob\n if accumulateProb >= randomDouble:\n firstItem = item\n break\n sample.append(firstItem)\n curElement = firstItem\n\n #pick other elements \n for node in metapath[1:]:\n if curElement not in transitionMatrix:\n break\n\n if node == \"i\":\n meta = \"item\"\n elif node == \"u\":\n meta = \"user\"\n elif node == \"g\":\n meta = \"genre\"\n else:\n break\n\n metaDistribution = transitionMatrix[curElement].get(meta)\n if metaDistribution == None:\n break\n\n randomDouble = random.random()\n accumulateProb = 0.0\n for item, prob in metaDistribution.items():\n accumulateProb += prob\n if accumulateProb >= randomDouble:\n curElement = item\n break\n sample.append(curElement)\n\n return sample\n\n\ndef metaPathRandomWalk(transitionMatrix, itemDistribution, userDistribution, sampleCounts, metapaths):\n samples = []\n for sampleCount, metapath in zip(sampleCounts, metapaths):\n print(\"metapath: \" + metapath + \" is generating\")\n for i in tqdm(range(sampleCount)):\n samples.append(oneMetaPathWalk(transitionMatrix, itemDistribution, userDistribution, metapath))\n return samples\n\ndef metaPathGraphEmb(itemPairSamples, userItemPairSamples, itemGenrePairSamples, spark, embLength, embOutputFilename, saveToRedis, redisKeyPrefix):\n # 从边的信息中生成转移矩阵\n transitionMatrix, itemDistribution, userDistribution = generateTransitionMatrix(itemPairSamples, userItemPairSamples, itemGenrePairSamples)\n \n # 转移矩阵点检\n # print(\"item check:\")\n # print(transitionMatrix[\"movieId:2\"])\n # print(itemDistribution[\"movieId:2\"])\n # print(\"user check:\")\n # print(transitionMatrix[\"userId:1\"])\n # print(userDistribution[\"userId:1\"])\n\n # 开始metapath随机游走生成corpus\n sampleCounts = [20000, 10000, 10000, 30000, 30000]\n metapaths = [\"uiiuiiuiiu\", \"igiuigiuigi\", \"iiuiigiiuii\", \"iiiiiiiiiii\", \"iiiuiiiuiii\"]\n newSamples = metaPathRandomWalk(transitionMatrix, itemDistribution, userDistribution, sampleCounts, metapaths)\n\n #corpus点检\n print(\"corpus length:\", len(newSamples))\n print(newSamples[0])\n\n rddSamples = spark.sparkContext.parallelize(newSamples)\n trainItem2vec(spark, rddSamples, embLength, embOutputFilename, saveToRedis, redisKeyPrefix)\n\nif __name__ == \"__main__\":\n embLength = 10\n \n conf = SparkConf().setAppName(\"embedding\").setMaster(\"local[*]\")\n spark = SparkSession.builder.config(conf=conf).getOrCreate()\n\n out_path = \"/Users/huangqiushi/Desktop/LearningRS/MyRecSys/Embedding/result\"\n rating_data_path = \"/Users/huangqiushi/Desktop/LearningRS/MyRecSys/Datas/ratings.csv\"\n movie_info_path = \"/Users/huangqiushi/Desktop/LearningRS/MyRecSys/Datas/movies.csv\"\n\n # 从用户的打分序列信息中生产item-item pair数据\n itemPairSamples = processItemSequence(spark, rating_data_path)\n print(\"check 5 lines of the item-item pair samples:\")\n print(itemPairSamples.take(5))\n\n # 从用户的打分序列信息中生产user-item pair数据\n userItemPairSamples = processUserItemSequence(spark, rating_data_path)\n print(\"check 5 lines of the user-item pair samples:\")\n print(userItemPairSamples.take(5))\n\n # 从用户的打分序列和电影的作品信息中生产item-genre pair数据\n itemGenrePairSamples = processItemGenreInfo(spark, rating_data_path, movie_info_path)\n print(\"check 5 lines of the item-genre pair samples:\")\n print(itemGenrePairSamples.take(5))\n\n # Metapath2embedding\n metaPathGraphEmb(itemPairSamples, userItemPairSamples, itemGenrePairSamples, spark, embLength, \n embOutputFilename=out_path + \"/itemMetaPathGraphEmb.csv\", saveToRedis=True, redisKeyPrefix=\"metaPathEmb\")\n ","repo_name":"Qinguanhai/MyRecSys","sub_path":"Embedding/Embedding_MetaPath.py","file_name":"Embedding_MetaPath.py","file_ext":"py","file_size_in_byte":15936,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"255022251","text":"import random\nsecurityPin = [8523,1253,7856,1253,7856]\naccounts=[424242424242,535353535353,868686868686,757575757575,434343434343,]\n\ndef AccountInfo():\n accountNumber = int(input(\"enter your account number = \"))\n for i in accounts:\n if accountNumber == i:\n print(i)\n widrawMoney()\n break\n else:\n print(\"you dont have any account please enter your information to open your account\")\n OpenAccount()\n break\n\n\ndef OpenAccount():\n name = (input(\"enter your account name = \"))\n CNICNumber = int(input(\"enter your CNIC number = \"))\n type = (input(\"do you want to open current account or savings? = \"))\n pin = int(input(\"set your pin 4 digit = \"))\n securityPin.append(pin)\n newAccountNumber = random.sample(range(0, 1000000000000), 1)\n accounts.append(newAccountNumber)\n print(f\"{name}your account is succesfully opened and your account number is {newAccountNumber}\")\n deposit()\n \n\ndef widrawMoney():\n enterAmount = float(input(\"enter amount = \"))\n pin =int(input(\"enter your 4 digit pin = \"))\n print(\"money withdraw succesfully\")\n\n\ndef deposit():\n accountNumber = int(input(\"enter your account number = \"))\n enterAmount = float(input(\"enter amount to deposit= \"))\n pin = int(input(\"set your pin 4 digit = \"))\n print(\"amount deposit succesfully\")\n AccountInfo()\n\n\n \nAccountInfo()\n\n\n \n\n","repo_name":"Sohaibkhan100/AI","sub_path":"assignment1/exercise.py","file_name":"exercise.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"15681784980","text":"class Solution:\n def deleteAndEarn(self, nums: List[int]) -> int:\n store = defaultdict(int)\n maxNum = nums[0]\n for i in nums:\n store[i] += i\n maxNum = max(maxNum, i)\n \n @cache\n def maxPoint(num):\n if num == 0:\n return 0\n if num == 1:\n return store[1]\n return max(maxPoint(num-1), maxPoint(num-2) + store[num])\n \n return maxPoint(maxNum)\n \n","repo_name":"Tek58/Leetcode","sub_path":"740-delete-and-earn/740-delete-and-earn.py","file_name":"740-delete-and-earn.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34054353074","text":"import praw\nimport time\n\n#Your Reddit API credentials and user agent\nclient_id='client_id'\nclient_secret='client_secret'\nusername='username'\npassword='password'\nuser_agent='user_agent'\n\nreddit = praw.Reddit(client_id=client_id,\n client_secret=client_secret,\n username=username,\n password=password,\n user_agent=user_agent)\n\n# Function to post in a subreddit\ndef post_in_subreddit(subreddit_name, title, content, image_path=None):\n subreddit = reddit.subreddit(subreddit_name)\n\n # Check if the subreddit allows images and content is an image\n if subreddit.allow_images and image_path:\n try:\n with open(image_path, 'rb') as image_file:\n subreddit.submit_image(title=title, image_path=image_path, flair_id=None, send_replies=True)\n print(f'Posted in {subreddit_name}: {title}')\n except praw.exceptions.PRAWException as e:\n print(f'Error posting: {e}')\n # Check if the subreddit is text-only\n elif not image_path:\n if subreddit.subreddit_type == 'text':\n try:\n subreddit.submit(title=title, selftext=content)\n print(f'Posted in {subreddit_name}: {title}')\n except praw.exceptions.PRAWException as e:\n print(f'Error posting: {e}')\n else:\n print(f'Subreddit {subreddit_name} does not allow text (self) posts.')\n else:\n print(f'Subreddit {subreddit_name} does not allow the specified type of post.')\n\n\n# Read data from the file and post to the specified subreddits\nwith open('data.txt', 'r') as file:\n data = file.read().split('\\n\\n')\n\nfor entry in data:\n lines = entry.split('\\n')\n subreddit_name = lines[0].replace('Subreddit: ', '')\n title = lines[1].replace('Title: ', '')\n content = lines[2].replace('Content: ', '')\n image_path = None\n print(f'Lines in entry: {lines}')\n\n if 'Image:' in lines[3]:\n image_path = lines[3].replace('Image: ', '').strip()\n\n post_in_subreddit(subreddit_name, title, content, image_path)\n print(f'Posted in {subreddit_name}: {title}')\n\n# Sleep to avoid rate limits\ntime.sleep(20)\nprint(\"\\n\")\n\n# Function to upvote all posts in a subreddit\ndef upvote_all_posts_in_subreddit(subreddit_name):\n subreddit = reddit.subreddit(subreddit_name)\n\n for submission in subreddit.new(limit=2):\n try:\n submission.upvote()\n print(f'Upvoted post in {subreddit_name}: {submission.title}')\n time.sleep(2) # Sleep for a few seconds to avoid rate limits\n except praw.exceptions.PRAWException as e:\n print(f'Error: {e}')\n\n# Read subreddit names from the data.txt file and upvote posts in those subreddits\nwith open('data.txt', 'r') as file:\n data = file.read().split('\\n\\n')\n\nfor entry in data:\n lines = entry.split('\\n')\n if len(lines) < 4:\n continue # Skip incomplete entries\n subreddit_name = lines[0].replace('Subreddit: ', '').strip()\n upvote_all_posts_in_subreddit(subreddit_name) # Call the upvote function with the subreddit name\n print(f'Upvoted posts in {subreddit_name}')\n\nprint(\"\\n\")\n\n# Function to save Earth-related posts in a subreddit\ndef save_earth_pics(subreddit_name):\n subreddit = reddit.subreddit(subreddit_name)\n\n # Define keywords to identify Earth-related posts\n keywords = ['earth', 'planet', 'nature', 'landscape']\n\n for submission in subreddit.new(limit=3):\n post_title = submission.title.lower()\n if any(keyword in post_title for keyword in keywords):\n try:\n submission.save()\n print(f'Saved Earth-related post in {subreddit_name}: {submission.title}')\n time.sleep(2) # Sleep for a few seconds to avoid rate limits\n except praw.exceptions.PRAWException as e:\n print(f'Error: {e}')\n\nprint(\"\\n\")\n\n# Read subreddit names from the data.txt file and save Earth-related posts in those subreddits\nwith open('data.txt', 'r') as file:\n data = file.read().split('\\n\\n')\n\nfor entry in data:\n lines = entry.split('\\n')\n if len(lines) < 4:\n continue # Skip incomplete entries\n subreddit_name = lines[0].replace('Subreddit: ', '').strip()\n save_earth_pics(subreddit_name)\n print(f'Saved Earth-related posts in {subreddit_name}') \n\n# Call the function to save Earth-related posts in the \"pics\" subreddit\nsave_earth_pics('pics')\n\nprint(\"\\n\")\n\n# Function to search for subreddits based on a provided query\ndef search_subreddits(query, limit=5):\n try:\n subreddits = list(reddit.subreddits.search(query, limit=limit))\n for subreddit in subreddits:\n print(f'Search result in {subreddit_name} subreddit is {subreddit.display_name}')\n except praw.exceptions.PRAWException as e:\n print(f'Error: {e}')\n\n# Read data from the file and extract the search queries\nwith open('data.txt', 'r') as file:\n data = file.read().split('\\n\\n')\n\nfor entry in data:\n lines = entry.split('\\n')\n if len(lines) < 4:\n continue # Skip incomplete entries\n search_query = lines[3].replace('Search Query: ', '').strip()\n subreddit_name=lines[0].replace('Subreddit: ','').strip()\n # Perform subreddit search for each search query\n search_subreddits(search_query)\n\nprint(\"\\n\")\n\ntime.sleep(10)\n\n","repo_name":"vaish0299/Re-Bot","sub_path":"re-Bot.py","file_name":"re-Bot.py","file_ext":"py","file_size_in_byte":5366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30537202284","text":"from queue import PriorityQueue\nimport math as m\n\n\nclass Solution:\n def kClosest(self, points, k: int):\n pq = PriorityQueue()\n\n def dist(point):\n return m.sqrt(point[0] ** 2 + point[1] ** 2)\n\n for point in points:\n pq.put((dist(point), point))\n result = []\n while k:\n front = pq.get()\n result.append(front[1])\n k -= 1\n\n return result\n","repo_name":"AjayKrP/GoogleInterview","sub_path":"DS_Practice/Heaps/k-closest-points-to-origin.py","file_name":"k-closest-points-to-origin.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39834401804","text":"from sys import stdin\nt=int(stdin.readline())\ninputs = [int(stdin.readline()) for _ in range(t)]\nmax_n=max(inputs)\ndp=[0]*(max_n+1)\ndp[1],dp[2],dp[3] = 1,2,4\nanswers=[]\nfor i in range(4, max_n+1):\n dp[i] = dp[i-3]+dp[i-2]+dp[i-1]\nfor input in inputs:\n print(dp[input])\n\n # dp=[0]*(n+1)\n # dp[1], dp[2], dp[3] = 1, 2, 4\n # for i in range(2, n+1):\n # print(sum([dp[j] for j in range(1,i)]))\n # dp[i] = (i-1)+sum([dp[j] for j in range(1,i)])\n # print('ddf', i, dp[i])\n # print('답')\n # print(dp[n])","repo_name":"devhyojin/Algorithm","sub_path":"BOJ/[BOJ]9095.1,2,3 더하기.py","file_name":"[BOJ]9095.1,2,3 더하기.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"31758236326","text":"\nimport sys\nimport argparse\n#import dkim \nimport smtplib\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\n\n# Definición de colores\n\nclass colors():\n\n magenta = '\\033[95m'\n blue = '\\033[94m'\n cyan = '\\033[96m'\n green = '\\033[92m'\n yellow = '\\033[93m'\n red = '\\033[91m'\n end = '\\033[0m'\n bold = '\\033[1m'\n underline = '\\033[52m'\n blinking= '\\033[5m'\n purple= '\\033[35m'\n strongBlue= '\\033[34m'\n strongYellow= '\\033[33m'\n strongRed= '\\033[31m'\n strongGreen= '\\033[32m'\n gray= '\\033[90m'\n doubleUnderline= '\\033[21m'\n\n# Creación del banner de la herramienta\n\ndef banner():\n print((\"\"\"%s %s\n ) \n ( ( /( \n ( ) ( )\\ )\\()) ( ( ) ( \n ))\\ /(( )\\ ((_)((_)\\ ))\\ )( ( ))\\ ( \n /((_)(_))\\(()_)_ _((_) /((_)(()\\ (() )\\ ()' /((_))\\ %s \"\"\" % (colors.strongRed,colors.blinking, colors.end)),(\"\"\"%s\n _____ _(_) | | | | ___ _ __ _ __ ___ ___ ___ \n / _ \\ \\ / / | | |_| |/ _ \\ '__| '_ ` _ \\ / _ \\/ __| %s \"\"\" % (colors.strongYellow, colors.end)), f\" {colors.blue}Creado por Salvador{colors.end}\", (\"\"\"%s\n | __/\\ V /| | | _ | __/ | | | | | | | __/\\__ \\\\\n \\___| \\_/ |_|_|_| |_|\\___|_| |_| |_| |_|\\___||___/ %s\"\"\" % (colors.strongYellow, colors.end))) \n\n print(f\"\\n{colors.gray}#######################################################################################{colors.end}\")\n\n\n\n# Si hay algún error con los argumentos se muestra mensaje de error\n\ndef parser_error(errmsg):\n banner()\n print(\"\\nUsage: python3 \" + sys.argv[0] + \" [Options] use -h for help\")\n print(\"Error: \" + errmsg + \"\\n\")\n sys.exit()\n\n# Parseamos los argumentos, si no se meten argumentos se muestra mesaje de ayuda\n\ndef parse_args():\n\n if len(sys.argv) < 2:\n banner()\n print(\"\\nUse -h, --help flags for help usage\\n\")\n # parse the arguments\n else:\n parser = argparse.ArgumentParser(\n epilog='\\tExample: \\r\\npython ' + sys.argv[0] + \" -to recipient@gmail.com -subject 'greetings' -data 'hello my friend'\")\n parser.error = parser_error\n parser._optionals.title = \"OPTIONS\"\n parser.add_argument(\n \t '-tls', '--starttls', action='store_true', help=\"Enable STARTTLS command.\")\n\n parser.add_argument(\n '-helo', '--helo', default=None, help=\"Set HELO domain.\")\n parser.add_argument(\n '-mfrom', '--mfrom', default=None, help=\"Set MAIL FROM address .\")\n parser.add_argument(\n '-rcptto', '--rcptto', default=None, help=\"Set RCPT TO address.\")\n parser.add_argument(\n '-data', '--data', default=None, help=\"Set raw email.\")\n parser.add_argument(\n '-ip', '--ip', default=None, help=\"Set mail server ip.\")\n parser.add_argument(\n '-port', '--port', default=None, help=\"Set mail server port.\")\n parser.add_argument(\n '-subject', '--subject', default=None, help=\"Set mail subject.\")\n parser.add_argument(\n '-to', '--to', default=None, help=\"Set mail to address.\")\n\n args = parser.parse_args()\n return args\n\n\n#Función pensada haciendo uso de contraseña de app debido a problemas con el servidor propio\n\ndef send_custom_email(args):\n smtp_server = 'smtp.gmail.com'\n smtp_port = int(args.port)\n smtp_username = 'example@gmail.com' #change\n smtp_password = 'passexample' #change\n\n # Iniciar la conexión con el servidor SMTP\n server = smtplib.SMTP(smtp_server, smtp_port)\n server.starttls()\n server.ehlo()\n\n server.login(smtp_username, smtp_password)\n\n msg = MIMEMultipart()\n msg['From'] = smtp_username\n msg['To'] = str(args.to)\n\n custom_headers = {\n 'rcpto': str(args.rcptto),\n 'helo': str(args.helo),\n 'from': str(args.mfrom),\n }\n\n if custom_headers:\n for header, value in custom_headers.items():\n msg[header] = value\n\n msg['Subject'] = str(args.data)\n msg.attach(MIMEText(str(args.data), 'plain'))\n\n # Envío del correo\n server.sendmail(smtp_username, str(args.to), msg.as_string())\n\n # Cerrar la conexión\n server.quit()\n\n\n\n\n\n\n\n\ndef main():\n\n args= parse_args()\n\n if not len(sys.argv) < 2:\n banner()\n if not (args.helo and args.mfrom and args.rcptto and args.data and args.ip and args.port):\n print(('''%splease set -helo, -mfrom, -rcptto, -data, -ip, and -port %s''' % (colors.strongRed, colors.end) ))\n return -1\n \n print(('''%sSe está enviando el correo... %s''' % (colors.strongRed, colors.end) ))\n\n try:\n send_custom_email(args)\n\n print(('''%sEnviado %s''' % (colors.strongGreen, colors.end) ))\n\n except:\n print(('''%sHa habido un error enviando el correo. %s''' % (colors.strongRed, colors.end) ))\n \n \n \n \n \n\nif __name__ == '__main__':\n\n main()\n \n \n","repo_name":"salvadorJMA/EvilHermes","sub_path":"EvilHermes.py","file_name":"EvilHermes.py","file_ext":"py","file_size_in_byte":4957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7886530820","text":"\n\nimport unittest\nfrom cpuinfo import *\nimport helpers\n\n\nclass MockDataSource:\n\tbits = '64bit'\n\tcpu_count = 6\n\tis_windows = False\n\tarch_string_raw = 'aarch64'\n\tuname_string_raw = ''\n\tcan_cpuid = False\n\n\t@staticmethod\n\tdef has_proc_cpuinfo():\n\t\treturn True\n\n\t@staticmethod\n\tdef has_lscpu():\n\t\treturn True\n\n\t@staticmethod\n\tdef cat_proc_cpuinfo():\n\t\treturncode = 0\n\t\toutput = r'''\nprocessor : 90\nBogoMIPS : 200.00\nFeatures : fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics\nCPU implementer : 0x43\nCPU architecture: 8\nCPU variant : 0x1\nCPU part : 0x0a1\nCPU revision : 0\n\nprocessor : 91\nBogoMIPS : 200.00\nFeatures : fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics\nCPU implementer : 0x43\nCPU architecture: 8\nCPU variant : 0x1\nCPU part : 0x0a1\nCPU revision : 0\n\nprocessor : 92\nBogoMIPS : 200.00\nFeatures : fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics\nCPU implementer : 0x43\nCPU architecture: 8\nCPU variant : 0x1\nCPU part : 0x0a1\nCPU revision : 0\n\nprocessor : 93\nBogoMIPS : 200.00\nFeatures : fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics\nCPU implementer : 0x43\nCPU architecture: 8\nCPU variant : 0x1\nCPU part : 0x0a1\nCPU revision : 0\n\nprocessor : 94\nBogoMIPS : 200.00\nFeatures : fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics\nCPU implementer : 0x43\nCPU architecture: 8\nCPU variant : 0x1\nCPU part : 0x0a1\nCPU revision : 0\n\nprocessor : 95\nBogoMIPS : 200.00\nFeatures : fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics\nCPU implementer : 0x43\nCPU architecture: 8\nCPU variant : 0x1\nCPU part : 0x0a1\nCPU revision : 0\n\n\n'''\n\t\treturn returncode, output\n\n\t@staticmethod\n\tdef lscpu():\n\t\treturncode = 0\n\t\toutput = r'''\nArchitecture: aarch64\nByte Order: Little Endian\nCPU(s): 96\nOn-line CPU(s) list: 0-95\nThread(s) per core: 1\nCore(s) per socket: 48\nSocket(s): 2\nNUMA node(s): 2\nL1d cache: 32K\nL1i cache: 78K\nL2 cache: 16384K\nNUMA node0 CPU(s): 0-47\nNUMA node1 CPU(s): 48-95\n'''\n\t\treturn returncode, output\n\n\nclass TestLinux_Aarch_64(unittest.TestCase):\n\tdef setUp(self):\n\t\thelpers.backup_data_source(cpuinfo)\n\t\thelpers.monkey_patch_data_source(cpuinfo, MockDataSource)\n\n\tdef tearDown(self):\n\t\thelpers.restore_data_source(cpuinfo)\n\n\t'''\n\tMake sure calls return the expected number of fields.\n\t'''\n\tdef test_returns(self):\n\t\tself.assertEqual(0, len(cpuinfo._get_cpu_info_from_registry()))\n\t\tself.assertEqual(0, len(cpuinfo._get_cpu_info_from_cpufreq_info()))\n\t\tself.assertEqual(3, len(cpuinfo._get_cpu_info_from_lscpu()))\n\t\tself.assertEqual(1, len(cpuinfo._get_cpu_info_from_proc_cpuinfo()))\n\t\tself.assertEqual(0, len(cpuinfo._get_cpu_info_from_sysctl()))\n\t\tself.assertEqual(0, len(cpuinfo._get_cpu_info_from_kstat()))\n\t\tself.assertEqual(0, len(cpuinfo._get_cpu_info_from_dmesg()))\n\t\tself.assertEqual(0, len(cpuinfo._get_cpu_info_from_cat_var_run_dmesg_boot()))\n\t\tself.assertEqual(0, len(cpuinfo._get_cpu_info_from_ibm_pa_features()))\n\t\tself.assertEqual(0, len(cpuinfo._get_cpu_info_from_sysinfo()))\n\t\tself.assertEqual(0, len(cpuinfo._get_cpu_info_from_cpuid()))\n\t\tself.assertEqual(11, len(cpuinfo._get_cpu_info_internal()))\n\n\tdef test_get_cpu_info_from_lscpu(self):\n\t\tinfo = cpuinfo._get_cpu_info_from_lscpu()\n\n\t\tself.assertEqual(78 * 1024, info['l1_instruction_cache_size'])\n\t\tself.assertEqual(32 * 1024, info['l1_data_cache_size'])\n\n\t\tself.assertEqual(16384 * 1024, info['l2_cache_size'])\n\n\t\tself.assertEqual(3, len(info))\n\n\tdef test_get_cpu_info_from_proc_cpuinfo(self):\n\t\tinfo = cpuinfo._get_cpu_info_from_proc_cpuinfo()\n\n\t\tself.assertEqual(\n\t\t\t['aes', 'asimd', 'atomics', 'crc32', 'evtstrm',\n\t\t\t'fp', 'pmull', 'sha1', 'sha2']\n\t\t\t,\n\t\t\tinfo['flags']\n\t\t)\n\n\t@unittest.skip(\"FIXME: This fails because it does not have a way to get CPU brand string and Hz.\")\n\tdef test_all(self):\n\t\tinfo = cpuinfo._get_cpu_info_internal()\n\n\t\tself.assertEqual('', info['vendor_id_raw'])\n\t\tself.assertEqual('FIXME', info['hardware_raw'])\n\t\tself.assertEqual('FIXME', info['brand_raw'])\n\t\tself.assertEqual('FIXME', info['hz_advertised_friendly'])\n\t\tself.assertEqual('FIXME', info['hz_actual_friendly'])\n\t\tself.assertEqual((1000000000, 0), info['hz_advertised'])\n\t\tself.assertEqual((1000000000, 0), info['hz_actual'])\n\t\tself.assertEqual('ARM_8', info['arch'])\n\t\tself.assertEqual(64, info['bits'])\n\t\tself.assertEqual(6, info['count'])\n\n\t\tself.assertEqual('aarch64', info['arch_string_raw'])\n\n\t\tself.assertEqual(78 * 1024, info['l1_instruction_cache_size'])\n\t\tself.assertEqual(32 * 1024, info['l1_data_cache_size'])\n\n\t\tself.assertEqual(16384 * 1024, info['l2_cache_size'])\n\t\tself.assertEqual(0, info['l2_cache_line_size'])\n\t\tself.assertEqual(0, info['l2_cache_associativity'])\n\n\t\tself.assertEqual(0, info['l3_cache_size'])\n\n\t\tself.assertEqual(0, info['stepping'])\n\t\tself.assertEqual(0, info['model'])\n\t\tself.assertEqual(0, info['family'])\n\t\tself.assertEqual(0, info['processor_type'])\n\t\tself.assertEqual(\n\t\t\t['aes', 'asimd', 'atomics', 'crc32', 'evtstrm',\n\t\t\t'fp', 'pmull', 'sha1', 'sha2']\n\t\t\t,\n\t\t\tinfo['flags']\n\t\t)\n","repo_name":"workhorsy/py-cpuinfo","sub_path":"tests/test_linux_aarch64_64.py","file_name":"test_linux_aarch64_64.py","file_ext":"py","file_size_in_byte":5204,"program_lang":"python","lang":"en","doc_type":"code","stars":285,"dataset":"github-code","pt":"81"} +{"seq_id":"13065810994","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nModule with logic for the Expenses entity\n\n\"\"\"\n\n__author__ = 'Samir Adrik'\n__email__ = 'samir.adrik@gmail.com'\n\nfrom source.util import Assertor, Tracking\n\nfrom .entity import Entity\nfrom .money import Money\n\n\nclass Expenses(Entity):\n \"\"\"\n Implementation of the Expenses entity\n\n \"\"\"\n\n @Tracking\n def cast_expenses(self, data: dict):\n \"\"\"\n method for making expenses (NOK)\n\n Parameters\n ----------\n data : dict\n dictionary of sifo expenses\n\n Returns\n -------\n out : dict\n dictionary of sifo expenses as nok\n\n \"\"\"\n keys = list(data.keys())\n values = [Money(val) if val != \"0\" else Money(\"0\") for val in list(data.values())]\n return dict(zip(keys, [val.value() for val in values]))\n\n def __init__(self, data: dict):\n \"\"\"\n Constructor / Instantiate the class\n\n Parameters\n ----------\n data : dict\n dictionary of sifo expenses data\n\n \"\"\"\n try:\n super().__init__()\n Assertor.assert_data_types([data], [dict])\n self._verdi = self.cast_expenses(data)\n except Exception as sifo_expenses_error:\n raise sifo_expenses_error\n\n @property\n def verdi(self):\n \"\"\"\n Value getter\n\n Returns\n -------\n out : dict\n active value dictionary in object\n\n \"\"\"\n return self._verdi\n","repo_name":"seemir/stressa","sub_path":"source/domain/expenses.py","file_name":"expenses.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41005237845","text":"\"\"\"\nFaça um programa que lê as duas notas parciais obtidas por um aluno numa disciplina ao longo\nde um semestre, e calcule a sua média. A atribuição de conceitos obedece à tabela abaixo:\n\"\"\"\n\nnota_1 = float(input('Insira a primeira nota ----> '))\nnota_2 = float(input('Insira a segunda nota -----> '))\nmedia = (nota_1 + nota_2) / 2\n\nif media >= 9:\n atribuicao = \"A\"\nelif media >= 7.5:\n atribuicao = \"B\"\nelif media >= 6:\n atribuicao = \"C\"\nelif media >= 4:\n atribuicao = \"D\"\nelse:\n atribuicao = \"E\"\n\nif atribuicao == \"A\" or atribuicao == \"B\" or atribuicao == \"C\":\n situacao = \"Aprovado\"\nelse:\n situacao = \"Reprovado\"\n\nprint('Sua média foi ------>', media)\nprint('Seu conceito é ----->', atribuicao)\nprint('Sua situação é de -->', situacao)\n","repo_name":"iscodand/python-programming-logic","sub_path":"Tasks - LP/Estrutura Sequencial/ex032.py","file_name":"ex032.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"34284036129","text":"# chat/consumers.py\nfrom channels.generic.websocket import AsyncWebsocketConsumer\nfrom .models import Service\nimport json\n\nclass QueueConsumer(AsyncWebsocketConsumer):\n \"\"\" Queue Consumer that handles ws requests\n \"\"\"\n async def connect(self):\n self.room_name = self.scope['url_route']['kwargs']['room_name']\n self.room_group_name = 'chat_%s' % self.room_name\n\n # Join room group\n await self.channel_layer.group_add(\n self.room_group_name,\n self.channel_name\n )\n\n await self.accept()\n\n async def disconnect(self, close_code):\n # Leave room group\n await self.channel_layer.group_discard(\n self.room_group_name,\n self.channel_name\n )\n\n # Receive message from WebSocket\n async def receive(self, text_data):\n\n \"\"\" recieves a json websocket request with text_data that contains type: (changeNumber) \n , serviceID & value then broadcasts it to all connections\n \"\"\"\n # Send message to room group\n #this calls the chat_message method\n await self.channel_layer.group_send(\n self.room_group_name,\n {\n 'type': 'queue_message',\n 'content':json.loads(text_data)\n }\n )\n\n # Receive message from room group\n async def queue_message(self, event):\n message = event['content']\n\n s = Service.objects.get(pk=message[\"serviceID\"])\n s.current_number = message[\"value\"]\n s.save()\n\n # Send message to all (queue group) WebSocket clients\n await self.send(text_data=json.dumps({\n **message\n }))","repo_name":"elmawardy/djqueue","sub_path":"Queue/consumers.py","file_name":"consumers.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"36807554716","text":"\nfrom subprocess import call\n\n\nsizes_distrib = [\n # [10, 5, 20],\n # [5, 10, 20],\n # [5, 5, 40],\n [20, 10, 10],\n]\n\nsizes_factor = 130\nsizes_offset = 5\n\nsizes = [[x * sizes_factor + sizes_offset for x in y] for y in sizes_distrib]\n\nfor i in range(0, 10):\n for n, m, l in sizes:\n with open('data/explog_omp_mtb.csv', 'a+') as myoutfile:\n print(\"SIZE: %i x %i x %i.\\n\" % (n, m, l))\n # call([\"./\" + \"OMPExpMaxTopBox\", str(n), str(m), str(l)], stdout=myoutfile)\n call([\"./\" + \"OMPExpMaxBotBox\", str(n), str(m), str(l)], stdout=myoutfile)\n","repo_name":"victornicolet/loop_experiments","sub_path":"runexps/run_mtb.py","file_name":"run_mtb.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73025022985","text":"#OYUNN YAZILIMINA BAŞLANIYOR NE TARZ OYUN OLDUĞUNU SEÇ\n#kullanılacak modüller eklendi\n\nimport pygame as py\nimport os\nimport random\npy.font.init()\n#penceresi yazılıyor\n\nGENİŞLİK= 750\nYÜKSEKLİK= 600\nEKRAN=py.display.set_mode((GENİŞLİK,YÜKSEKLİK))\nbeklemesuresi = 30\n\n#resimleri yüklemek\n#ARKAplan\nBG=py.image.load(os.path.join(\"proje için\",\"background_space.png\"))\n\n#Görev Gemisi\nGOREV_GEMİSİ = py.image.load(os.path.join(\"proje için\",\"mission_ship.png\"))\n\n#Düşman Gemileri\nKIRMIZI_DÜŞMAN_GEMİSİ = py.image.load(os.path.join(\"proje için\", \"enemy_ship_red.png\"))\nYEŞİL_DÜŞMAN_GEMİSİ = py.image.load(os.path.join(\"proje için\", \"enemy_ship_green.png\"))\nMAVİ_DÜŞMAN_GEMİSİ = py.image.load(os.path.join(\"proje için\", \"enemy_ship_blue.png\"))\n\n#Lazerler\nOYUNCU_LAZER = py.image.load(os.path.join(\"proje için\", \"blue_rocket.png\"))\n\n#Düşmanların Lazerleri\nLAZER01 = py.image.load(os.path.join(\"proje için\", \"laser01.png\")) \nLAZER02 = py.image.load(os.path.join(\"proje için\", \"laser02.png\"))\nLAZER03 = py.image.load(os.path.join(\"proje için\", \"laser03.png\"))\n\n\ndef carpisma(object1, object2):\n offset_x = object2.x - object1.x\n offset_y = object2.y - object1.y\n return object1.mask.overlap(object2.mask, (offset_x, offset_y)) != None\n\nclass Lazer():\n def __init__(self,x,y,img):\n self.x = x\n self.y = y\n self.img = img\n self.mask = py.mask.from_surface(self.img)\n\n\n def hareket(self,velocity):\n self.y += velocity\n \n def çizmek(self, EKRAN):\n EKRAN.blit(self.img, (self.x,self.y))\n\n def collision (self, object):\n return carpisma(object, self)\n\n def off_screen(self, height):\n return not(self.y <= height and self.y >=0)\n\n\nclass Gemi():\n def __init__(self,x,y,sağlık=100):\n self.x=x\n self.y=y\n self.sağlık=sağlık\n self.gemi_img= None\n self.lazer_img = None\n self.lazerler = []\n self.bekleme_suresi_sayaci = 0\n\n def beklemesuresi(self):\n if self.bekleme_suresi_sayaci >= beklemesuresi:\n self.bekleme_suresi_sayaci = 0\n else:\n self.bekleme_suresi_sayaci += 1 \n\n \n def ates(self):\n if self.bekleme_suresi_sayaci == 0:\n lazer = Lazer(self.x, self.y, self.lazer_img)\n self.lazerler.append(lazer)\n\n def hareket_lazerler(self, velocity, object):\n self.beklemesuresi()\n for lazer in self.lazerler:\n lazer.hareket(velocity)\n if lazer.off_screen(YÜKSEKLİK):\n self.lazerler.remove(lazer)\n elif lazer.collision(object):\n object.sağlık -= 10\n self.lazerler.remove(lazer)\n\n def çizmek(self, EKRAN):\n EKRAN.blit(self.gemi_img, (self.x,self.y))\n for lazer in self.lazerler:\n lazer.çizmek(EKRAN)\n\n\n \n\n def get_widht(self):\n return self.ship_img.get_widht()\n\n def get_height(self):\n return self.ship_img.get_height()\n\nclass OyuncuGemisi(Gemi):\n\n\n def __init__(self,x,y,sağlık=100):\n super().__init__(x,y,sağlık)\n self.gemi_img = GOREV_GEMİSİ\n self.lazer_img = OYUNCU_LAZER\n self.mask = py.mask.from_surface(self.gemi_img)\n self.max_sağlık = sağlık\n\n\n def ates(self):\n if self.bekleme_suresi_sayaci == 0:\n lazer = Lazer(self.x+20, self.y, self.lazer_img)\n self.lazerler.append(lazer)\n\n def hareket_lazerler(self, velocity, objects):\n self.beklemesuresi()\n for lazer in self.lazerler: \n lazer.hareket(velocity)\n if lazer.off_screen(YÜKSEKLİK):\n self.lazerler.remove(lazer)\n else:\n for object in objects:\n if lazer.collision(object):\n objects.remove(object)\n if lazer in self.lazerler:\n self.lazerler.remove(lazer)\n\n def can_barı(self, EKRAN):\n py.draw.rect(EKRAN, (255,0,0), (self.x, self.y + self.gemi_img.get_height(),\n self.gemi_img.get_width(), 7))\n\n py.draw.rect(EKRAN, (0,255,0), (self.x, self.y + self.gemi_img.get_height(),\n int (self.gemi_img.get_width() * (self.sağlık/self.max_sağlık)), 7))\n\n def çizmek(self, EKRAN):\n EKRAN.blit(self.gemi_img, (self.x,self.y))\n self.can_barı(EKRAN)\n for lazer in self.lazerler:\n lazer.çizmek(EKRAN)\n\n\n\n\n\nclass DüşmanGemisi(Gemi):\n RENK_HARİTASI = {\n \"red\" : [KIRMIZI_DÜŞMAN_GEMİSİ, LAZER01],\n \"green\" :[YEŞİL_DÜŞMAN_GEMİSİ, LAZER02],\n \"blue\" :[MAVİ_DÜŞMAN_GEMİSİ, LAZER03]\n}\n\n def __init__(self,x,y,renk,sağlık=100):\n super().__init__(x,y,sağlık)\n self.gemi_img, self.lazer_img = self.RENK_HARİTASI[renk]\n self.mask = py.mask.from_surface(self.gemi_img)\n\n def move(self, hızı):\n self.y += hızı\n \n def ates(self):\n if self.bekleme_suresi_sayaci == 0:\n lazer = Lazer(self.x-1, self.y, self.lazer_img)\n self.lazerler.append(lazer)\n\n\ndef main():\n run = True\n FPS= 75\n düşmanlar = []\n düşman_hızı = 1\n lazer_hızı = 5\n düşman_uzunluk = 0\n seviye = 0\n\n main_font=py.font.SysFont(\"Algerian\",30)\n lost_font=py.font.SysFont(\"Algerian\",90)\n \n\n oyuncu_hızı = 5\n\n clock = py.time.Clock()\n\n\n oyuncu = OyuncuGemisi(350,450)\n lost=False\n lost_count=0\n\n\n def çizmek():\n EKRAN.blit(BG, (0,0))\n oyuncu.çizmek(EKRAN)\n seviye_etiketi=main_font.render(\"SEVİYE: {}\".format(seviye),1,(255,255,0))\n EKRAN.blit(seviye_etiketi,(600,10))\n\n\n for düşman in düşmanlar:\n düşman.çizmek(EKRAN)\n\n if lost:\n lost_label = lost_font.render(\"BAŞARAMADINIZ!\" , 1 , (200,0,0))\n EKRAN.blit(lost_label,(int(GENİŞLİK/2-(lost_label.get_width()/2)),int(YÜKSEKLİK/2-(lost_label.get_height()/2))))\n\n py.display.update()\n\n\n while run:\n clock.tick(FPS)\n çizmek()\n\n if oyuncu.sağlık <= 0:\n lost = True\n if lost:\n lost_count+=1\n if lost_count>= FPS*3:\n run = False\n else:\n continue\n\n if len(düşmanlar) == 0:\n düşman_hızı += 1\n lazer_hızı += 1\n düşman_uzunluk += 5\n seviye += 1\n global beklemesuresi\n beklemesuresi -=5\n\n for i in range(düşman_uzunluk):\n düşman = DüşmanGemisi(random.randrange(1,700), random.randrange(-1500,-100),random.choice([\"red\",\"blue\",\"green\"])) \n düşmanlar.append(düşman)\n\n\n for event in py.event.get():\n if event.type == py.QUIT:\n run =False\n\n keys = py.key.get_pressed()\n\n if keys[py.K_LEFT] and oyuncu.x > 0: \n oyuncu.x -= oyuncu_hızı\n if keys[py.K_RIGHT] and oyuncu.x < 670:\n oyuncu.x += oyuncu_hızı\n if keys[py.K_UP] and oyuncu.y > 0:\n oyuncu.y -= oyuncu_hızı\n if keys[py.K_DOWN] and oyuncu.y < 470:\n oyuncu.y += oyuncu_hızı\n if keys [py.K_SPACE]:\n oyuncu.ates()\n \n\n for düşman in düşmanlar:\n düşman.move(düşman_hızı)\n düşman.hareket_lazerler(lazer_hızı, oyuncu)\n if random.randrange(0, 2*60) == 1:\n düşman.ates()\n\n \n if carpisma(düşman, oyuncu):\n oyuncu.sağlık -= 10\n düşmanlar.remove(düşman)\n\n if düşman.y > YÜKSEKLİK:\n düşmanlar.remove(düşman)\n\n\n oyuncu.hareket_lazerler(-lazer_hızı, düşmanlar)\n\ndef main_menu():\n title_font = py.font.SysFont(\"Algerian\", 50)\n run = True\n while run:\n EKRAN.blit (BG, (0,0))\n main_text = title_font.render(\"BAŞLAMAK İÇİN FAREYE TIKLAYINIZ\", 1, (255, 255, 255))\n EKRAN.blit(main_text, (GENİŞLİK/2 - main_text.get_width()/2, YÜKSEKLİK/2 - main_text.get_height()/2))\n \n \n py.display.update()\n for event in py.event.get():\n if event.type == py.QUIT:\n run = False\n if event.type == py.MOUSEBUTTONDOWN:\n main()\n \n py.quit()\n \n\n\n\nmain_menu()","repo_name":"hsynrgt/programlama-proje","sub_path":"Uzayda_Savaş.py","file_name":"Uzayda_Savaş.py","file_ext":"py","file_size_in_byte":8511,"program_lang":"python","lang":"tr","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"33699291828","text":"import random\nimport librosa\nimport numpy as np\nimport sounddevice as sd\nimport time\nimport matplotlib.pyplot as plt\nfrom phue import Bridge\nimport sys\n\ndef rgb_to_xy(red, green, blue):\n # Normalize RGB values\n r = red / 255\n g = green / 255\n b = blue / 255\n\n # Apply gamma correction\n r = r if r <= 0.04045 else ((r + 0.055) / 1.055) ** 2.4\n g = g if g <= 0.04045 else ((g + 0.055) / 1.055) ** 2.4\n b = b if b <= 0.04045 else ((b + 0.055) / 1.055) ** 2.4\n\n # Convert RGB to XYZ\n x = r * 0.649926 + g * 0.103455 + b * 0.197109\n y = r * 0.234327 + g * 0.743075 + b * 0.022598\n z = g * 0.053077 + b * 1.035763\n\n # Convert XYZ to XY\n x_sum = x + y + z\n if x_sum > 0:\n xy = [round(x / x_sum, 4), round(y / x_sum, 4)]\n else:\n xy = [0, 0]\n\n return xy\n\nb = Bridge('59.191.204.143')\n\nusername = b.connect()\nb.get_api()\n\n# Get all lights\nlights = b.get_light_objects('id')\n\n# Set up colors\nCOLORS = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), (255, 0, 255), (0, 255, 255)]\n\n# Load the disco music\nmusic = int(sys.argv[1])\n\nif music == \"0\":\n music_file = \"calmdown.mp3\"\nelif music == \"1\":\n music_file = \"idol_YOASOBI.mp3\"\n\ny, sr = librosa.load(music_file)\n\n# Detect beats in the music\ntempo, beats = librosa.beat.beat_track(y=y, sr=sr, hop_length=3250)\n# Calculate the time interval between high beats\nbeat_interval = 60.0 / (tempo+450)\n\n# Play the disco music\nsd.play(y, sr)\n\n# Main loop\nbeat_index = 0\nrunning = True\nstart_time = time.time()\nprev_color = (0, 0, 0)\nflag = 0\ncnt = 0\n\nindex_thres = 0\n\nwhile running:\n '''\n if flag == 0:\n beat_interval = beat_interval = 60.0 / (tempo+450)\n flag+=1\n elif flag == 1:\n beat_interval = beat_interval = 60.0 / (tempo-100)\n flag-=1\n '''\n current_time = time.time() - start_time\n # print(beat_index)\n if beat_index < len(beats) and current_time >= beats[beat_index] * beat_interval:\n if beat_index >= index_thres:\n cnt+=1\n if cnt == 2 or cnt == 0:\n beat_index+=1\n else:\n current_color = random.choice(COLORS)\n while prev_color == current_color:\n current_color = random.choice(COLORS)\n #print(\"Current color:\", current_color)\n \n # set light color\n xy = rgb_to_xy(current_color[0], current_color[1], current_color[2])\n b.set_light(lights, {'xy': xy})\n \n prev_color = current_color\n beat_index += 1\n \n if cnt == 4:\n cnt = 0\n else:\n beat_index += 1\n\n # Delay to synchronize with music\n time.sleep(0.01) # Adjust the delay as needed\n\n # Stop the program when the music ends\n if beat_index >= len(beats):\n running = False\n\n# End of program\n","repo_name":"gordenlim50/express-server","sub_path":"python/disco.py","file_name":"disco.py","file_ext":"py","file_size_in_byte":2893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25027143478","text":"def find_short(s):\n var1 = s.split(' ')\n print (var1)\n lengths = []\n for i in var1:\n lengths.append(len(i))\n print (lengths)\n\n return min(lengths)\n\nprint (find_short(\"bitcoin take over the world maybe who knows perhaps\"))\n","repo_name":"azizsaad/55-CodeWars-Problems","sub_path":"codewars/7 kyu/shortest_word.py","file_name":"shortest_word.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"38395892026","text":"###########################################################################\n# Auther: Jonathan isakov\n# Date: 31-7-2020\n# Description: Naive bayes algorythem for predicting a spam mail from a real\n# mail\n###########################################################################\n\n# IMPORTS\nimport re\nimport os\nimport glob\nimport shutil\nimport hashlib\n\n# GLOBALS\nfriends_path = '.\\\\friends'\nspam_path = '.\\\\spam'\nsus_path = '.\\\\sus'\nalpha = 1 \n\n# Functions\ndef word_count(path):\n \"\"\"\n word_count(path to the folder of mails) -> number of mails, a dict of words and accournces,,\n word_count is a algo that reads all the *.txt files in a specfied directory\n and returns the count per word in a dict {word : count} \n \"\"\"\n w = {}\n i = 0\n for mail in glob.glob(path +'\\\\'+ \"*.txt\"):\n i = i + 1\n m = open(mail, \"r+\")\n content = m.read()\n content = re.split(r\"\\W+\", content)\n for word in content:\n if word in w:\n w[word] = w[word] + 1\n else:\n w[word] = alpha + 1\n m.close()\n if re.match(r'.*\\.txt', path):\n m = open(path, \"r+\")\n content = m.read()\n w = list(set(re.findall(r\"\\w+\", content)))\n m.close()\n return [i, w]\n\ndef dict_combo(dict1, dict2):\n \"\"\"\n dict_combo(dict1,dict2) --> returns a combined dicted with words at min of alpha,,\n this will return a correct combo list uppon which we can preform the naive bayes algo\n \"\"\"\n for word in dict2:\n if word not in dict1:\n dict1[word] = alpha\n return dict1\ndef porb_dict(d):\n \"\"\"\n num_of_words(dict) --> the number of words in the dict,,\n used to calculate the probability of the word to accour\n \"\"\"\n count = 0\n for word in d:\n count = count + d[word]\n for word in d:\n d[word] = float(d[word] / count)\n return d\n\ndef compare(p_friend, p_spam, friends_words,spam_words, sus_words):\n for word in sus_words:\n if word in friends_words:\n p_friend = p_friend * friends_words[word]\n p_spam = p_spam * spam_words[word]\n print(\"friend prob: \" + str(p_friend))\n print(\"spam prob: \" + str(p_spam))\n if p_friend > p_spam:\n return 'friend'\n return 'spam'\n# Main\ndef main():\n print(\"collecting all mails\")\n a = word_count(friends_path)\n friend_mails = a[0]\n friends_words = a[1]\n a = word_count(spam_path)\n spam_mails = a[0]\n spam_words = a[1]\n\n print(\"preparing the naive bayes vars\")\n p_friend = friend_mails / (friend_mails + spam_mails)\n p_spam = spam_mails / (friend_mails + spam_mails)\n friends_words = porb_dict(dict_combo(friends_words, spam_words))\n spam_words = porb_dict(dict_combo(spam_words, friends_words))\n\n print(\"classifing the new mails and improving algo\")\n for mail in glob.glob(sus_path +'\\\\'+ \"*.txt\"):\n a = word_count(mail)\n sus_words = a[1]\n state = compare(p_friend, p_spam, friends_words,spam_words, sus_words)\n if state == 'friend':\n name = mail.split('\\\\')\n shutil.copyfile(mail, friends_path + '\\\\' + name[2])\n print(mail + \" is classified as friend mail\")\n else:\n name = mail.split('\\\\')\n shutil.copyfile(mail, spam_path + '\\\\' + name[2])\n print(mail + \" is classified as spam mail\")\n\n \n\n \n\n\nif __name__== \"__main__\" :\n main()\n","repo_name":"jonisakov/naive-bayes","sub_path":"naive-bayes.py","file_name":"naive-bayes.py","file_ext":"py","file_size_in_byte":3459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18399228270","text":"from decimal import Decimal\nimport datetime\nfrom trytond.model import ModelSQL, Workflow, fields, ModelView\nfrom trytond.pool import PoolMeta, Pool\nfrom trytond.transaction import Transaction\nfrom trytond.pyson import Bool, Eval, Or, If\nfrom trytond.wizard import (Wizard, StateView, StateAction, StateTransition,\n Button)\nfrom trytond.modules.company import CompanyReport\nfrom trytond.report import Report\nfrom lxml import etree\nimport base64\nimport xmlrpclib\nimport re\nfrom xml.dom.minidom import parse, parseString\nimport time\nfrom trytond.rpc import RPC\nimport os\nfrom trytond.config import config\nfrom trytond import backend\nimport collections\nfrom itertools import islice, ifilter, chain, izip\nimport psycopg2\nimport psycopg2.extras\nimport urllib2\n\ndirectory = config.get('database', 'path')\ndirectory_xml = directory +'/factura.xml'\n\n__all__ = ['EInvoice', 'InvoiceLine', 'EInvoiceReport']\n\n_STATES = {\n 'readonly': Eval('state') != 'draft',\n}\n_DEPENDS = ['state']\n\n_TYPE = [\n ('e_invoice', 'Electronic Invoice'),\n ('e_credit_note', 'Electronic Credit Note'),\n]\n\ntipoDocumento = {\n 'e_invoice': '01',\n 'e_credit_note': '04',\n}\n\ntipoIdentificacion = {\n '04' : '04',\n '05' : '05',\n '06' : '06',\n '07' : '07',\n}\n\nclass EInvoice(Workflow, ModelSQL, ModelView):\n 'EInvoice'\n __name__ = 'einvoice.einvoice'\n _order_name = 'invoice_date'\n\n company = fields.Many2One('company.company', 'Company', required=True,\n readonly=True, select=True, domain=[\n ('id', If(Eval('context', {}).contains('company'), '=', '!='),\n Eval('context', {}).get('company', -1)),\n ],\n depends=_DEPENDS)\n party = fields.Many2One('party.party', 'Party', readonly=True)\n invoice_number = fields.Char('No. Comprobante', readonly=True)\n subtotal = fields.Numeric('Subtotal', readonly=True)\n iva = fields.Numeric('IVA', readonly=True)\n total = fields.Numeric('Total', readonly=True)\n state = fields.Selection([\n ('draft', 'Draft'),\n ('send', 'Send'),\n ], 'State', readonly=True)\n\n type = fields.Selection(_TYPE, 'Type', select=True,\n required=True, states=_STATES, depends=_DEPENDS)\n mensaje = fields.Text('Mensaje de error SRI', readonly=True, states={\n 'invisible': Eval('estado_sri') != 'NO AUTORIZADO',\n })\n estado_sri = fields.Char('Estado Facturacion-Electronica', size=24, readonly=True)\n path_xml = fields.Char(u'Path archivo xml de comprobante', readonly=True)\n path_pdf = fields.Char(u'Path archivo pdf de factura', readonly=True)\n numero_autorizacion = fields.Char(u'Número de Autorización')\n fecha_autorizacion = fields.Char('Fecha Autorizacion', readonly=True)\n invoice_date = fields.Date('Fecha Factura', readonly=True)\n lines = fields.One2Many('einvoice.einvoice.line', 'invoice', 'Lines',\n states=_STATES)\n id_reference = fields.Char('ID referencia factura', readonly=True)\n anulada = fields.Boolean('Anulada con nota de Credito')\n\n @classmethod\n def __setup__(cls):\n super(EInvoice, cls).__setup__()\n cls._check_modify_exclude = ['state', 'lines', 'estado_sri', 'mensaje'\n 'invoice_report_cache', 'invoice_number', 'path_xml', 'path_pdf',\n 'id_reference', 'numero_autorizacion', 'anulada']\n cls.__rpc__['save_invoice'] = RPC(check_access=False, readonly=False)\n cls.__rpc__['get_invoice'] = RPC(check_access=False, readonly=False)\n cls.__rpc__['get_path'] = RPC(check_access=False, readonly=False)\n cls.__rpc__['get_path_adm'] = RPC(check_access=False, readonly=False)\n cls._order.insert(0, ('invoice_date', 'DESC'))\n cls._order.insert(1, ('id', 'DESC'))\n cls._error_messages.update({\n 'modify_einvoice': ('You can not modify invoice \"%s\" because '\n 'it is send.'),\n 'delete_einvoice': ('You can not delete invoice \"%s\" because '\n 'it is send.'),\n })\n\n @classmethod\n def __register__(cls, module_name):\n TableHandler = backend.get('TableHandler')\n sql_table = cls.__table__()\n super(EInvoice, cls).__register__(module_name)\n cursor = Transaction().cursor\n table = TableHandler(cursor, cls, module_name)\n\n @classmethod\n def write(cls, *args):\n actions = iter(args)\n all_invoices = []\n for invoices, values in zip(actions, actions):\n if set(values) - set(cls._check_modify_exclude):\n cls.check_modify(invoices)\n all_invoices += invoices\n super(EInvoice, cls).write(*args)\n\n @classmethod\n def copy(cls, einvoices, default=None):\n if default is None:\n default = {}\n default = default.copy()\n default['state'] = 'draft'\n default['invoice_number'] = None\n default.setdefault('invoice_date', None)\n return super(EInvoice, cls).copy(einvoices, default=default)\n\n\n @staticmethod\n def default_state():\n return 'draft'\n\n @staticmethod\n def default_anulada():\n return False\n\n @staticmethod\n def default_company():\n return Transaction().context.get('company')\n\n @classmethod\n def check_modify(cls, einvoices):\n for einvoice in einvoices:\n if (einvoice.state in ('send')):\n cls.raise_user_error('modify_einvoice', (einvoice.invoice_number,))\n\n @classmethod\n def delete(cls, einvoices):\n cls.check_modify(einvoices)\n for einvoice in einvoices:\n if (einvoice.state in ('send')):\n cls.raise_user_error('delete_einvoice', (einvoice.invoice_number,))\n super(EInvoice, cls).delete(einvoices)\n\n def replace_character(self, cadena):\n reemplazo = {u\"Â\":\"A\", u\"Á\":\"A\", u\"À\":\"A\", u\"Ä\":\"A\", u\"É\":\"E\", u\"È\":\"E\", u\"Ê\":\"E\",u\"Ë\":\"E\",\n u\"Í\":\"I\",u\"Ì\":\"I\",u\"Î\":\"I\",u\"Ï\":\"I\",u\"Ó\":\"O\",u\"Ò\":\"O\",u\"Ö\":\"O\",u\"Ô\":\"O\",u\"Ú\":\"U\",u\"Ù\":\"U\",u\"Ü\":\"U\",\n u\"Û\":\"U\",u\"á\":\"a\",u\"à\":\"a\",u\"â\":\"a\",u\"ä\":\"a\",u\"é\":\"e\",u\"è\":\"e\",u\"ê\":\"e\",u\"ë\":\"e\",u\"í\":\"i\",u\"ì\":\"i\",\n u\"ï\":\"i\",u\"î\":\"i\",u\"ó\":\"o\",u\"ò\":\"o\",u\"ô\":\"o\",u\"ö\":\"o\",u\"ú\":\"u\",u\"ù\":\"u\",u\"ü\":\"u\",u\"û\":\"u\",u\"ñ\":\"ni\",\n u\"Ñ\":\"Ni\", '\\n':\" \"}\n regex = re.compile(\"(%s)\" % \"|\".join(map(re.escape, reemplazo.keys())))\n nueva_cadena = regex.sub(lambda x: str(reemplazo[x.string[x.start():x.end()]]), cadena)\n return nueva_cadena\n\n def set_number(self):\n establecimiento = self.company.establecimiento\n p_emision = self.company.p_emision\n secuencia_factura = self.company.secuencia_factura\n secuencia_notaCredito = self.company.secuencia_notaCredito\n if self. type == 'e_invoice':\n if len(str(secuencia_factura)) == 1:\n number = str(establecimiento)+'-'+str(p_emision)+'-00000000'+str(secuencia_factura)\n elif len(str(secuencia_factura)) == 2:\n number = str(establecimiento)+'-'+str(p_emision)+'-0000000'+str(secuencia_factura)\n elif len(str(secuencia_factura)) == 3:\n number = str(establecimiento)+'-'+str(p_emision)+'-000000'+str(secuencia_factura)\n elif len(str(secuencia_factura)) == 4:\n number = str(establecimiento)+'-'+str(p_emision)+'-00000'+str(secuencia_factura)\n elif len(str(secuencia_factura)) == 5:\n number = str(establecimiento)+'-'+str(p_emision)+'-0000'+str(secuencia_factura)\n elif len(str(secuencia_factura)) == 6:\n number = str(establecimiento)+'-'+str(p_emision)+'-000'+str(secuencia_factura)\n elif len(str(secuencia_factura)) == 7:\n number = str(establecimiento)+'-'+str(p_emision)+'-00'+str(secuencia_factura)\n elif len(str(secuencia_factura)) == 8:\n number = str(establecimiento)+'-'+str(p_emision)+'-0'+str(secuencia_factura)\n elif len(str(secuencia_factura)) == 9:\n number = str(establecimiento)+'-'+str(p_emision)+'-'+str(secuencia_factura)\n company = self.company\n company.secuencia_factura = secuencia_factura + 1\n company.save()\n else:\n if len(str(secuencia_notaCredito)) == 1:\n number = str(establecimiento)+'-'+str(p_emision)+'-00000000'+str(secuencia_notaCredito)\n elif len(str(secuencia_notaCredito)) == 2:\n number = str(establecimiento)+'-'+str(p_emision)+'-0000000'+str(secuencia_notaCredito)\n elif len(str(secuencia_notaCredito)) == 3:\n number = str(establecimiento)+'-'+str(p_emision)+'-000000'+str(secuencia_notaCredito)\n elif len(str(secuencia_notaCredito)) == 4:\n number = str(establecimiento)+'-'+str(p_emision)+'-00000'+str(secuencia_notaCredito)\n elif len(str(secuencia_notaCredito)) == 5:\n number = str(establecimiento)+'-'+str(p_emision)+'-0000'+str(secuencia_notaCredito)\n elif len(str(secuencia_notaCredito)) == 6:\n number = str(establecimiento)+'-'+str(p_emision)+'-000'+str(secuencia_notaCredito)\n elif len(str(secuencia_notaCredito)) == 7:\n number = str(establecimiento)+'-'+str(p_emision)+'-00'+str(secuencia_notaCredito)\n elif len(str(secuencia_notaCredito)) == 8:\n number = str(establecimiento)+'-'+str(p_emision)+'-0'+str(secuencia_notaCredito)\n elif len(str(secuencia_notaCredito)) == 9:\n number = str(establecimiento)+'-'+str(p_emision)+'-'+str(secuencia_notaCredito)\n company = self.company\n company.secuencia_notaCredito = secuencia_notaCredito + 1\n company.save()\n vals = {'invoice_number': number}\n self.write([self], vals)\n\n def web_service(self):\n CONEXION = 'UD NO HA CONFIGURADO LOS DATOS DE CONEXION CON EL WS, \\nCOMUNIQUESE CON EL ADMINISTRADOR DEL SISTEMA'\n pool = Pool()\n conexions = pool.get('res.user')\n conexion = conexions.search([('id', '=', 1)])\n if conexion:\n for c in conexion:\n if c.direccion:\n address = c.cabecera+\"://\"+base64.decodestring(c.usuario)+\":\"+base64.decodestring(c.pass_db)+\"@\"+c.direccion+\":\"+c.puerto+\"/\"+base64.decodestring(c.name_db)\n return address\n else:\n return CONEXION\n\n def send_mail_invoice(self, xml_element, access_key, send_m, s, server=\"localhost\"):\n MAIL= u\"Ud no ha configurado el correo del cliente. Diríjase a: \\nTerceros->General->Medios de Contacto\"\n pool = Pool()\n empresa = self.replace_character(self.company.party.name) #cambiado por self.elimina_tildes(self.company.party.name)\n empresa = empresa.replace(' ','_')\n empresa = empresa.lower()\n ahora = datetime.datetime.now()\n year = str(ahora.year)\n client = self.replace_character(self.party.name) #reemplazo self.party.name\n client = client.upper()\n empresa_ = self.replace_character(self.company.party.name) #reemplazo self.company.party.name\n ruc = self.company.party.vat_number\n if ahora.month < 10:\n month = '0'+ str(ahora.month)\n else:\n month = str(ahora.month)\n\n tipo_comprobante = self.type\n if tipo_comprobante == 'e_invoice':\n tipo = 'fact_'\n n_tipo = \"FACTURA\"\n if tipo_comprobante == 'e_credit_note':\n tipo = 'n_c_'\n n_tipo = \"NOTA DE CREDITO\"\n\n ruc = access_key[10:23]\n est = access_key[24:27]\n emi= access_key[27:30]\n sec = access_key[30:39]\n num_fac = est+'-'+emi+'-'+sec\n numero = ruc+'_'+num_fac\n name_pdf = tipo+numero+ '.pdf'\n name_xml = tipo+numero + '.xml'\n #nuevaruta =os.getcwd() +'/comprobantes/'+empresa+'/'+year+'/'+month +'/'\n nr = s.model.nodux_electronic_invoice_auth.conexiones.path_files(ruc, {})\n nuevaruta = nr +empresa+'/'+year+'/'+month +'/'\n new_save = 'comprobantes/'+empresa+'/'+year+'/'+month +'/'\n ruta_xml = str(new_save+name_xml)\n ruta_pdf = str(new_save+name_pdf)\n self.write([self],{\n 'path_xml': ruta_xml,\n 'path_pdf': ruta_pdf})\n\n correos = pool.get('party.contact_mechanism')\n correo = correos.search([('type','=','email')])\n\n Report = Pool().get('einvoice.einvoice', type='report')\n report = Report.execute([self.id], {})\n email=''\n cont = 0\n for c in correo:\n if c.party == self.party:\n email = c.value\n if c.party == self.company.party:\n cont = cont +1\n f_e = c.value\n\n if email != '':\n to_email= email\n else :\n self.raise_user_error(MAIL)\n\n if send_m == '1':\n from_email = f_e\n else :\n from_email = f_e\n name = access_key + \".xml\"\n reporte = xmlrpclib.Binary(report[1])\n xml_element = self.replace_character(xml_element)\n xml = xmlrpclib.Binary(xml_element.replace('><', '>\\n<'))\n save_files = s.model.nodux_electronic_invoice_auth.conexiones.save_file(empresa, name_pdf, name_xml, reporte, xml, {})\n p_xml = nuevaruta + name_xml\n p_pdf = nuevaruta + name_pdf\n s.model.nodux_electronic_invoice_auth.conexiones.send_mail(name_pdf, name, p_xml, p_pdf, from_email, to_email, n_tipo, num_fac, client, empresa_, ruc, {})\n return True\n\n def connect_db(self):\n\n address_xml = self.web_service()\n s= xmlrpclib.ServerProxy(address_xml)\n\n pool = Pool()\n nombre = self.party.name\n cedula = self.party.vat_number\n ruc = self.company.party.vat_number\n nombre_e = self.company.party.name\n\n if self.type == \"e_invoice\":\n tipo = \"out_invoice\"\n else:\n tipo = \"out_credit_note\"\n\n fecha = str(self.invoice_date)\n empresa = self.company.party.name\n numero = self.invoice_number\n path_xml = self.path_xml\n path_pdf = self.path_pdf\n estado = self.estado_sri\n auth = self.numero_autorizacion\n correos = pool.get('party.contact_mechanism')\n correo = correos.search([('type','=','email')])\n for c in correo:\n if c.party == self.party:\n to_email = c.value\n if c.party == self.company.party:\n to_email_2 = c.value\n email_e= to_email_2\n email = to_email\n total = str(self.total)\n if self.estado_sri == 'AUTORIZADO':\n s.model.nodux_electronic_invoice_auth.conexiones.connect_db( nombre, cedula, ruc, nombre_e, tipo, fecha, empresa, numero, path_xml, path_pdf,estado, auth, email, email_e, total, {})\n\n def action_generate_invoice(self):\n PK12 = u'No ha configurado los datos de la empresa. Dirijase a: \\n Empresa -> NODUX WS'\n AUTHENTICATE_ERROR = u'Error de datos de conexión al autorizador de \\nfacturacion electrónica.\\nVerifique: USUARIO Y CONTRASEÑA .'\n ACTIVE_ERROR = u\"Ud. no se encuentra activo, verifique su pago. \\nComuníquese con NODUX\"\n WAIT_FOR_RECEIPT = 3\n TITLE_NOT_SENT = u'No se puede enviar el comprobante electronico al SRI'\n MESSAGE_SEQUENCIAL = u'Los comprobantes electrónicos deben ser enviados al SRI en orden secuencial'\n MESSAGE_TIME_LIMIT = u'Se ha excedido el límite de tiempo. Los comprobantes electrónicos deben ser enviados al SRI para su autorización, en un plazo máximo de 24 horas'\n WAIT_FOR_RECEIPT = 15\n pool = Pool()\n EInvoice = pool.get('einvoice.einvoice')\n usuario = self.company.user_ws\n password_u= self.company.password_ws\n access_key = self.generate_access_key()\n address_xml = self.web_service()\n s= xmlrpclib.ServerProxy(address_xml)\n\n name = self.company.party.name\n name_l=name.lower()\n name_l=name_l.replace(' ','_')\n name_r = self.replace_character(name_l)\n name_c = name_r+'.p12'\n\n authenticate, send_m, active = s.model.nodux_electronic_invoice_auth.conexiones.authenticate(usuario, password_u, {})\n if authenticate == '1':\n pass\n else:\n return AUTHENTICATE_ERROR\n\n if active == '1':\n return ACTIVE_ERROR\n else:\n pass\n\n nuevaruta = s.model.nodux_electronic_invoice_auth.conexiones.save_pk12(name_l, {})\n if self.type == 'e_invoice':\n factura1 = self.generate_xml_invoice()\n factura = etree.tostring(factura1, encoding = 'utf8', method = 'xml')\n a = s.model.nodux_electronic_invoice_auth.conexiones.validate_xml(factura, 'out_invoice', {})\n if a:\n return a\n file_pk12 = base64.encodestring(nuevaruta+'/'+name_c)\n file_check = (nuevaruta+'/'+name_c)\n password = self.company.password_pk12\n error = s.model.nodux_electronic_invoice_auth.conexiones.check_digital_signature(file_check,{})\n if error == '1':\n return ('No se ha encontrado el archivo de firma digital (.p12)')\n signed_document= s.model.nodux_electronic_invoice_auth.conexiones.apply_digital_signature(factura, file_pk12, password,{})\n result = s.model.nodux_electronic_invoice_auth.conexiones.send_receipt(signed_document, {})\n if result != True:\n return result\n\n if self.company.party.email:\n email = self.company.party.email\n else:\n self.raise_user_error('No ha configurado el correo de la empresa')\n\n doc_xml, m, auth, path, numero, num = s.model.nodux_electronic_invoice_auth.conexiones.request_authorization(access_key, name_r, 'out_invoice', signed_document,{})\n\n if doc_xml is None:\n msg = ' '.join(m)\n raise m\n if auth == 'NO AUTORIZADO':\n self.write([self],{\n 'estado_sri':'NO AUTORIZADO',\n 'numero_autorizacion': access_key,\n 'state':'draft'})\n return \"Comprobante se ha enviado al SRI pero no se ha AUTORIZADO, reenvie su comprobante\"\n else:\n self.write([self],{\n 'estado_sri':'AUTORIZADO',\n 'numero_autorizacion':access_key,\n 'state':'send'})\n self.send_mail_invoice(doc_xml, access_key, send_m, s)\n\n else:\n notaCredito1 = self.generate_xml_credit_note()\n notaCredito = etree.tostring(notaCredito1, encoding = 'utf8', method = 'xml')\n a = s.model.nodux_electronic_invoice_auth.conexiones.validate_xml(notaCredito, 'out_credit_note', {})\n if a:\n return a\n file_pk12 = base64.encodestring(nuevaruta+'/'+name_c)\n file_check = (nuevaruta+'/'+name_c)\n password = self.company.password_pk12\n error = s.model.nodux_electronic_invoice_auth.conexiones.check_digital_signature(file_check,{})\n if error == '1':\n return ('No se ha encontrado el archivo de firma digital (.p12)')\n signed_document= s.model.nodux_electronic_invoice_auth.conexiones.apply_digital_signature(notaCredito, file_pk12, password,{})\n result = s.model.nodux_electronic_invoice_auth.conexiones.send_receipt(signed_document, {})\n if result != True:\n return result\n\n if self.company.party.email:\n email = self.company.party.email\n else:\n self.raise_user_error('No ha configurado el correo de la empresa')\n\n doc_xml, m, auth, path, numero, num = s.model.nodux_electronic_invoice_auth.conexiones.request_authorization(access_key, name_r, 'out_credit_note', signed_document,{})\n\n if doc_xml is None:\n msg = ' '.join(m)\n raise m\n\n if auth == 'NO AUTORIZADO':\n self.write([self],{\n 'estado_sri':'NO AUTORIZADO',\n 'numero_autorizacion': access_key,\n 'state':'draft'})\n return \"Comprobante se ha enviado al SRI pero no se ha AUTORIZADO, reenvie su comprobante\"\n else:\n self.write([self],{\n 'estado_sri':'AUTORIZADO',\n 'numero_autorizacion':access_key,\n 'state':'send'})\n self.send_mail_invoice(doc_xml, access_key, send_m, s)\n\n return access_key\n\n @classmethod\n def get_path(cls, formato, numero_autorizacion, id_reference):\n\n Company = Pool().get('company.company')\n companies = Company.search([('id', '=', 1)])\n for c in companies:\n company = c\n database = base64.decodestring(company.name_database)#base de datos creada para guardar datos de consultas facturas electronicas\n user = base64.decodestring(company.user_databse) #usuario de la base de datos postgres\n password = base64.decodestring(company.password_database) #password de la base de datos postgres\n host = base64.decodestring(company.host_database) #ip host\n\n Invoice = Pool().get('einvoice.einvoice')\n invoices = Invoice.search([('id_reference', '=', id_reference), ('type', '=', 'e_invoice')])\n for i in invoices:\n invoice = i\n numero_autorizacion = invoice.numero_autorizacion\n\n conn = psycopg2.connect(database=database,user= user, password=password, host=host)\n cur = conn.cursor()\n if formato == 'xml':\n invoice = cur.execute(\"SELECT path_xml FROM factura_web WHERE numero_autorizacion=%s\",[numero_autorizacion])\n path_xml = cur.fetchone()\n xml_element = urllib2.urlopen('http://nodux.ec:8085/static/'+str(path_xml[0]))\n xml_element =etree.parse(xml_element)\n xml_element = etree.tostring(xml_element,pretty_print=True ,xml_declaration=True, encoding=\"utf-8\")\n xml_element = xml_element.replace('<', '<').replace('>', '>')\n archivo = xmlrpclib.Binary(xml_element)\n\n\n if formato == 'pdf':\n invoice = cur.execute(\"SELECT path_pdf FROM factura_web WHERE numero_autorizacion=%s\",[numero_autorizacion])\n path_pdf = cur.fetchone()\n pool = Pool()\n Invoices = pool.get('einvoice.einvoice')\n invoices = Invoices.search([('numero_autorizacion', '=', numero_autorizacion)])\n for i in invoices:\n invoice = i\n InvoiceReport = pool.get('einvoice.einvoice', type='report')\n report = InvoiceReport.execute([invoice.id], {})\n archivo = xmlrpclib.Binary(report[1])\n\n return archivo\n\n @classmethod\n def get_path_adm(cls,id_reference):\n\n Company = Pool().get('company.company')\n companies = Company.search([('id', '=', 1)])\n for c in companies:\n company = c\n database = base64.decodestring(company.name_database)#base de datos creada para guardar datos de consultas facturas electronicas\n user = base64.decodestring(company.user_databse) #usuario de la base de datos postgres\n password = base64.decodestring(company.password_database) #password de la base de datos postgres\n host = base64.decodestring(company.host_database) #ip host\n\n Invoice = Pool().get('einvoice.einvoice')\n invoices = Invoice.search([('id_reference', '=', id_reference), ('type', '=', 'e_invoice')])\n for i in invoices:\n invoice = i\n numero_autorizacion = invoice.numero_autorizacion\n\n conn = psycopg2.connect(database=database,user= user, password=password, host=host)\n cur = conn.cursor()\n formato = 'pdf'\n\n if formato == 'pdf':\n invoice = cur.execute(\"SELECT path_pdf FROM factura_web WHERE numero_autorizacion=%s\",[numero_autorizacion])\n path_pdf = cur.fetchone()\n pool = Pool()\n Invoices = pool.get('einvoice.einvoice')\n invoices = Invoices.search([('numero_autorizacion', '=', numero_autorizacion)])\n for i in invoices:\n invoice = i\n InvoiceReport = pool.get('einvoice.einvoice', type='report')\n report = InvoiceReport.execute([invoice.id], {})\n archivo = xmlrpclib.Binary(report[1])\n\n return archivo\n\n\n @classmethod\n def get_invoice(cls, identificacion):\n Company = Pool().get('company.company')\n companies = Company.search([('id', '=', 1)])\n for c in companies:\n company = c\n database = base64.decodestring(company.name_database)#base de datos creada para guardar datos de consultas facturas electronicas\n user = base64.decodestring(company.user_databse) #usuario de la base de datos postgres\n password = base64.decodestring(company.password_database) #password de la base de datos postgres\n host = base64.decodestring(company.host_database) #ip host\n ruc = company.party.vat_number\n conn = psycopg2.connect(database=database,user= user, password=password, host=host)\n cur = conn.cursor()\n cur.execute(\"SELECT tipo, fecha, numero_comprobante, numero_autorizacion, total FROM factura_web WHERE cedula=%s and ruc=%s\", (identificacion, ruc))\n result = cur.fetchall()\n invoices = []\n for r in result:\n invoices.append(r)\n return invoices\n\n @classmethod\n def save_invoice(cls, tipo, id_factura, date, maturity_date, subtotal, tax, identificacion, items, razonSocial, firstname, lastname, email, address, city, state, country, phonenumber ):\n\n data = xmlrpclib.loads(items)\n lineas_producto = []\n for d in data[0]:\n for datas in d:\n for data_s in datas:\n lineas_producto.append( data_s.encode('latin-1'))\n #lineas_producto = str(data[0]).split(\", \")\n pool = Pool()\n Party = pool.get('party.party')\n Lines = pool.get('einvoice.einvoice.line')\n Invoice = pool.get('einvoice.einvoice')\n Template = pool.get('product.template')\n Product = pool.get('product.product')\n Units = pool.get('product.uom')\n e_invoices_c = None\n\n if tipo == 'factura':\n type_ = 'e_invoice'\n else:\n type_ = 'e_credit_note'\n e_invoices_c = Invoice.search([('id_reference', '=', str(id_factura)), ('type', '=', type_), ('anulada', '=', False)])\n\n if e_invoices_c:\n for invoice in e_invoices_c:\n if invoice.estado_sri == \"NO AUTORIZADO\":\n invoice.action_generate_invoice()\n invoice.connect_db()\n return \"Comprobante enviado con exito\"\n elif invoice.estado_sri == \"AUTORIZADO\":\n return \"Comprobante ya ha sido enviado anteriormente\"\n\n products = None\n parties = None\n direccion = \"Loja\"\n phone = \"\"\n name = firstname+ \" \" +lastname\n\n vat_number = str(identificacion)\n if len(vat_number) == 10:\n type_document = \"05\"\n if len(vat_number) == 13:\n type_document = \"04\"\n\n address = address\n importeTotal = Decimal(subtotal)+ Decimal(tax)\n totalSinImpuestos = Decimal(subtotal)\n date_str = str(date)\n parties = Party.search([('vat_number', '=', vat_number)])\n formatter_string = \"%Y-%m-%d\"\n datetime_object = datetime.datetime.strptime(date_str, formatter_string)\n fechaEmision = datetime_object.date()\n lineas = []\n if parties:\n for p in parties:\n party = p\n Contact = pool.get('party.contact_mechanism')\n Address = pool.get('party.address')\n\n if razonSocial != \"\":\n p.commercial_name = razonSocial\n p.name = name\n p.type_document = type_document\n p.vat_number = vat_number\n p.save()\n contact_mechanisms_old = Contact.search([('party', '=', p.id)])\n for contact_mechanism_old in contact_mechanisms_old:\n if email:\n correo = str(email)\n else:\n correo = 'hola@nodux.ec'\n if contact_mechanism_old.type == \"email\":\n contact_mechanism_old.value = correo\n contact_mechanism_old.save()\n if phone != \"\":\n if contact_mechanism_old.type == \"phone\":\n contact_mechanism_old.value = phone\n contact_mechanism_old.save()\n p.addresses[0].street = address\n p.save()\n else:\n party = Party()\n if email:\n correo = str(email)\n else:\n correo = 'hola@nodux.ec'\n Contact = pool.get('party.contact_mechanism')\n Address = pool.get('party.address')\n party.name = name\n if razonSocial != \"\":\n party.commercial_name = razonSocial\n party.type_document = type_document\n party.vat_number = vat_number\n party.save()\n contact_mechanisms = []\n contact_mechanisms.append({\n 'type':'email',\n 'value':correo,\n 'party':party.id\n })\n if phone != \"\":\n contact_mechanisms.append({\n 'type':'phone',\n 'value':phone,\n 'party':party.id,\n })\n party.address = Address.create([{\n 'street': address,\n 'party':party.id\n }])\n contact_mechanisms = Contact.create(contact_mechanisms)\n party.save()\n\n invoice = Invoice()\n invoice.company=1\n invoice.id_reference = str(id_factura)\n if tipo == 'factura':\n invoice.type = 'e_invoice'\n invoice.save()\n else:\n invoice.type = 'e_credit_note'\n invoice.save()\n anull_invoices = None\n anull_invoices = Invoice.search([('id_reference', '=', str(id_factura)), ('type', '=', 'e_invoice'), ('anulada', '=', False)])\n if anull_invoices:\n for a_invoice in anull_invoices:\n a_invoice.anulada = True\n a_invoice.save()\n\n cont = 1\n for l_p in lineas_producto:\n l_p1 = l_p.replace('[','').replace(']','').replace('(','').replace(')','').replace(\"'\",'').replace(',','')\n l_p1 = l_p1.split(' -- ')\n descripcion = l_p1[0]\n precio = l_p1[1]\n if descripcion:\n products = Template.search([('name', '=', descripcion)])\n units = Units.search([('name', '=', 'Unit')])\n unit = 1\n if units:\n for u in units:\n unit = u\n if products:\n for p in products:\n product = p\n else:\n product = Template()\n product.name = descripcion\n product.list_price = Decimal(precio)\n product.cost_price = Decimal(0.0)\n product.type = 'service'\n product.cost_price_method = 'fixed'\n product.default_uom = unit\n product.save()\n product.products = Product.create([{\n 'code': descripcion[0:3]+str(product.id),\n 'template':product.id,\n }])\n product.save()\n\n lineas.append({\n 'producto': product.id,\n 'description': product.name,\n 'unit_price': product.list_price,\n 'quantity': 1,\n 'invoice':invoice.id,\n })\n date = Pool().get('ir.date')\n date_sale = date.today()\n invoice.party= party.id\n invoice.subtotal= totalSinImpuestos\n invoice.iva= Decimal(tax)\n invoice.total= importeTotal\n invoice.invoice_date = date_sale\n lines = Lines.create(lineas)\n invoice.save()\n invoice.set_number()\n invoice.action_generate_invoice()\n invoice.connect_db()\n if invoice.estado_sri == \"AUTORIZADO\":\n return \"Comprobante enviado con éxito\"\n else:\n return \"Comprobante no se ha AUTORIZADO, contacte con el ADMINISTRADOR\"\n\n\n def generate_xml_invoice(self):\n factura = etree.Element('factura')\n factura.set(\"id\", \"comprobante\")\n factura.set(\"version\", \"1.1.0\")\n\n # generar infoTributaria\n infoTributaria = self.get_tax_element()\n factura.append(infoTributaria)\n\n # generar infoFactura\n infoFactura = self.get_invoice_element()\n factura.append(infoFactura)\n\n #generar detalles\n detalles = self.get_detail_element()\n factura.append(detalles)\n return factura\n\n #generar nota de Credito\n def generate_xml_credit_note(self):\n notaCredito = etree.Element('notaCredito')\n notaCredito.set(\"id\", \"comprobante\")\n notaCredito.set(\"version\", \"1.1.0\")\n\n # generar infoTributaria\n infoTributaria = self.get_tax_element()\n notaCredito.append(infoTributaria)\n\n #generar infoNotaCredito\n infoNotaCredito = self.get_credit_note_element()\n notaCredito.append(infoNotaCredito)\n\n #generar detalles\n detalles = self.get_detail_credit_note()\n notaCredito.append(detalles)\n return notaCredito\n\n def get_tax_element(self):\n company = self.company\n number = self.invoice_number\n infoTributaria = etree.Element('infoTributaria')\n etree.SubElement(infoTributaria, 'ambiente').text = self.company.tipo_de_ambiente\n etree.SubElement(infoTributaria, 'tipoEmision').text = self.company.emission_code\n etree.SubElement(infoTributaria, 'razonSocial').text = self.replace_character(self.company.party.name)\n if self.company.party.commercial_name:\n etree.SubElement(infoTributaria, 'nombreComercial').text = self.replace_character(self.company.party.commercial_name)\n etree.SubElement(infoTributaria, 'ruc').text = self.company.party.vat_number\n etree.SubElement(infoTributaria, 'claveAcceso').text = self.generate_access_key()\n etree.SubElement(infoTributaria, 'codDoc').text = tipoDocumento[self.type]\n etree.SubElement(infoTributaria, 'estab').text = number[0:3]\n etree.SubElement(infoTributaria, 'ptoEmi').text = number[4:7]\n etree.SubElement(infoTributaria, 'secuencial').text = number[8:17]\n if self.company.party.addresses:\n etree.SubElement(infoTributaria, 'dirMatriz').text = self.replace_character(self.company.party.addresses[0].street)\n return infoTributaria\n\n def get_invoice_element(self):\n company = self.company\n party = self.party\n infoFactura = etree.Element('infoFactura')\n etree.SubElement(infoFactura, 'fechaEmision').text = self.invoice_date.strftime('%d/%m/%Y')\n if self.company.party.addresses:\n etree.SubElement(infoFactura, 'dirEstablecimiento').text = self.replace_character(self.company.party.addresses[0].street)\n if self.company.party.contribuyente_especial_nro:\n etree.SubElement(infoFactura, 'contribuyenteEspecial').text = self.company.party.contribuyente_especial_nro\n if self.company.party.mandatory_accounting:\n etree.SubElement(infoFactura, 'obligadoContabilidad').text = self.company.party.mandatory_accounting\n else :\n etree.SubElement(infoFactura, 'obligadoContabilidad').text = 'NO'\n if self.party.type_document:\n etree.SubElement(infoFactura, 'tipoIdentificacionComprador').text = tipoIdentificacion[self.party.type_document]\n else:\n self.raise_user_error(\"No ha configurado el tipo de identificacion del cliente\")\n\n if self.party.commercial_name:\n etree.SubElement(infoFactura, 'razonSocialComprador').text = self.replace_character(self.party.commercial_name)\n else:\n etree.SubElement(infoFactura, 'razonSocialComprador').text = self.replace_character(self.party.name)\n\n etree.SubElement(infoFactura, 'identificacionComprador').text = self.party.vat_number\n if self.party.addresses:\n etree.SubElement(infoFactura, 'direccionComprador').text = self.replace_character(self.party.addresses[0].street)\n etree.SubElement(infoFactura, 'totalSinImpuestos').text = '%.2f' % (self.subtotal)\n\n descuento = Decimal(0.0)\n for line in self.lines:\n if line.amount < Decimal(0.0):\n descuento += (line.amount*-1)\n etree.SubElement(infoFactura, 'totalDescuento').text = '%.2f' % (descuento) #descuento esta incluido en el precio poner 0.0 por defecto\n\n totalConImpuestos = etree.Element('totalConImpuestos')\n totalImpuesto = etree.Element('totalImpuesto')\n codigoPorcentaje = '3'\n codigo = '2'\n\n etree.SubElement(totalImpuesto, 'codigo').text = codigo\n etree.SubElement(totalImpuesto, 'codigoPorcentaje').text = codigoPorcentaje\n etree.SubElement(totalImpuesto, 'baseImponible').text = '{:.2f}'.format(self.subtotal)\n etree.SubElement(totalImpuesto, 'valor').text = '{:.2f}'.format(self.iva)\n totalConImpuestos.append(totalImpuesto)\n infoFactura.append(totalConImpuestos)\n etree.SubElement(infoFactura, 'propina').text = '0.00'\n etree.SubElement(infoFactura, 'importeTotal').text = '{:.2f}'.format(self.total)\n etree.SubElement(infoFactura, 'moneda').text = 'DOLAR'\n\n return infoFactura\n\n def get_detail_element(self):\n detalles = etree.Element('detalles')\n\n for line in self.lines:\n if line.amount < Decimal(0.0):\n pass\n else:\n pool = Pool()\n detalle = etree.Element('detalle')\n Product = pool.get('product.product')\n product = None\n products = Product.search([('template', '=', line.producto.id)])\n for p in products:\n product = p\n if product:\n etree.SubElement(detalle, 'codigoPrincipal').text = self.replace_character(product.code)\n else:\n etree.SubElement(detalle, 'codigoPrincipal').text = '[COD0]'\n etree.SubElement(detalle, 'descripcion').text = self.replace_character(line.description)#fix_chars(line.description)\n etree.SubElement(detalle, 'cantidad').text = '%.2f' % (line.quantity)\n etree.SubElement(detalle, 'precioUnitario').text = '%.2f' % (line.unit_price)\n etree.SubElement(detalle, 'descuento').text = '0.00'\n etree.SubElement(detalle, 'precioTotalSinImpuesto').text = '%.2f' % (line.amount)\n impuestos = etree.Element('impuestos')\n impuesto = etree.Element('impuesto')\n etree.SubElement(impuesto, 'codigo').text = \"2\"\n etree.SubElement(impuesto, 'codigoPorcentaje').text = '3' #3iva14, 2iva12, 0iva0\n etree.SubElement(impuesto, 'tarifa').text = '14' #tarifas:14,12,0\n etree.SubElement(impuesto, 'baseImponible').text = '{:.2f}'.format(line.amount)\n etree.SubElement(impuesto, 'valor').text = '{:.2f}'.format(line.amount*Decimal(0.14))\n impuestos.append(impuesto)\n detalle.append(impuestos)\n detalles.append(detalle)\n return detalles\n\n def get_credit_note_element(self):\n pool = Pool()\n company = self.company\n Invoice = pool.get('einvoice.einvoice')\n motivo='Emitir factura con el mismo concepto'\n\n invoices = Invoice.search([('id_reference', '=', self.id_reference), ('type', '=', 'e_invoice'), ('id_reference', '!=', None)])\n for i in invoices:\n invoice = i\n infoNotaCredito = etree.Element('infoNotaCredito')\n etree.SubElement(infoNotaCredito, 'fechaEmision').text = self.invoice_date.strftime('%d/%m/%Y')\n etree.SubElement(infoNotaCredito, 'dirEstablecimiento').text = self.company.party.addresses[0].street\n if self.party.type_document:\n etree.SubElement(infoNotaCredito, 'tipoIdentificacionComprador').text = tipoIdentificacion[self.party.type_document]\n else:\n self.raise_user_error(\"No ha configurado el tipo de identificacion del cliente\")\n etree.SubElement(infoNotaCredito, 'razonSocialComprador').text = self.replace_character(self.party.name) #self.party.name\n etree.SubElement(infoNotaCredito, 'identificacionComprador').text = self.party.vat_number\n if self.company.party.mandatory_accounting:\n etree.SubElement(infoNotaCredito, 'obligadoContabilidad').text = self.company.party.mandatory_accounting\n else :\n etree.SubElement(infoNotaCredito, 'obligadoContabilidad').text = 'NO'\n\n etree.SubElement(infoNotaCredito, 'rise').text = '01'\n etree.SubElement(infoNotaCredito, 'codDocModificado').text = '01'\n etree.SubElement(infoNotaCredito, 'numDocModificado').text = invoice.invoice_number\n etree.SubElement(infoNotaCredito, 'fechaEmisionDocSustento').text = invoice.invoice_date.strftime('%d/%m/%Y')\n etree.SubElement(infoNotaCredito, 'totalSinImpuestos').text = '%.2f'%(self.subtotal)\n etree.SubElement(infoNotaCredito, 'valorModificacion').text = '%.2f'%(self.total)\n etree.SubElement(infoNotaCredito, 'moneda').text = 'DOLAR'\n\n totalConImpuestos = etree.Element('totalConImpuestos')\n totalImpuesto = etree.Element('totalImpuesto')\n codigoPorcentaje = '3'\n codigo = '2'\n\n etree.SubElement(totalImpuesto, 'codigo').text = codigo\n etree.SubElement(totalImpuesto, 'codigoPorcentaje').text = codigoPorcentaje\n etree.SubElement(totalImpuesto, 'baseImponible').text = '{:.2f}'.format(self.subtotal)\n etree.SubElement(totalImpuesto, 'valor').text = '{:.2f}'.format(self.iva)\n totalConImpuestos.append(totalImpuesto)\n infoNotaCredito.append(totalConImpuestos)\n etree.SubElement(infoNotaCredito, 'motivo').text= self.replace_character(motivo)\n return infoNotaCredito\n\n #detalles de nota de credito\n def get_detail_credit_note(self):\n\n detalles = etree.Element('detalles')\n for line in self.lines:\n pool = Pool()\n detalle = etree.Element('detalle')\n Product = pool.get('product.product')\n product = None\n products = Product.search([('template', '=', line.producto.id)])\n for p in products:\n product = p\n if product:\n etree.SubElement(detalle, 'codigoInterno').text = self.replace_character(product.code)\n else:\n etree.SubElement(detalle, 'codigoInterno').text = '[COD0]'\n etree.SubElement(detalle, 'descripcion').text = self.replace_character(line.description)#fix_chars(line.description)\n etree.SubElement(detalle, 'cantidad').text = '%.2f' % (line.quantity)\n etree.SubElement(detalle, 'precioUnitario').text = '%.2f' % (line.unit_price)\n etree.SubElement(detalle, 'descuento').text = '0.00'\n etree.SubElement(detalle, 'precioTotalSinImpuesto').text = '%.2f' % (line.amount)\n impuestos = etree.Element('impuestos')\n impuesto = etree.Element('impuesto')\n etree.SubElement(impuesto, 'codigo').text = \"2\"\n etree.SubElement(impuesto, 'codigoPorcentaje').text = '3' #3iva14, 2iva12, 0iva0\n etree.SubElement(impuesto, 'tarifa').text = '14' #tarifas:14,12,0\n etree.SubElement(impuesto, 'baseImponible').text = '{:.2f}'.format(line.amount)\n etree.SubElement(impuesto, 'valor').text = '{:.2f}'.format(line.amount*Decimal(0.14))\n impuestos.append(impuesto)\n detalle.append(impuestos)\n detalles.append(detalle)\n return detalles\n\n def generate_access_key(self):\n f = self.invoice_date.strftime('%d%m%Y')\n t_cbte = tipoDocumento[self.type]\n ruc = self.company.party.vat_number\n t_amb=self.company.tipo_de_ambiente\n n_cbte= self.invoice_number\n cod= \"14526873\"\n t_ems= self.company.emission_code\n numero_cbte= n_cbte.replace('-','')\n #unimos todos los datos en una sola cadena\n key_temp=f+t_cbte+ruc+t_amb+numero_cbte+cod+t_ems\n\n #recorremos la cadena para ir guardando en una lista de enteros\n key = []\n for c in key_temp:\n key.append(int(c))\n key.reverse()\n factor = [2,3,4,5,6,7]\n stage1 = sum([n*factor[i%6] for i,n in enumerate(key)])\n stage2 = stage1 % 11\n digit = 11 - (stage2)\n if digit == 11:\n digit =0\n if digit == 10:\n digit = 1\n digit=str(digit)\n access_key= key_temp + digit\n return access_key\n\nclass InvoiceLine(ModelSQL, ModelView):\n 'Invoice Line'\n __name__ = 'einvoice.einvoice.line'\n _rec_name = 'description'\n invoice = fields.Many2One('einvoice.einvoice', 'Invoice', ondelete='CASCADE',\n select=True)\n quantity = fields.Integer('Quantity')\n producto = fields.Many2One('product.template', 'Product')\n unit_price = fields.Numeric('Unit Price', digits=(16, 4))\n amount = fields.Function(fields.Numeric('Amount', digits=(16, 4)), 'get_amount')\n description = fields.Text('Description', size=None, required=True)\n sequence = fields.Integer('Sequence',\n states={\n 'invisible': Bool(Eval('context', {}).get('standalone')),\n })\n\n @classmethod\n def __setup__(cls):\n super(InvoiceLine, cls).__setup__()\n\n cls._order.insert(0, ('sequence', 'ASC'))\n cls._error_messages.update({\n 'modify': ('You can not modify line \"%(line)s\" from invoice '\n '\"%(invoice)s\" that is posted or paid.'),\n 'create': ('You can not add a line to invoice \"%(invoice)s\" '\n 'that is posted, paid or cancelled.'),\n 'account_different_company': (\n 'You can not create invoice line '\n '\"%(line)s\" on invoice \"%(invoice)s of company '\n '\"%(invoice_line_company)s because account \"%(account)s '\n 'has company \"%(account_company)s\".'),\n 'same_account_on_invoice': ('You can not create invoice line '\n '\"%(line)s\" on invoice \"%(invoice)s\" because the invoice '\n 'uses the same account (%(account)s).'),\n })\n\n @staticmethod\n def order_sequence(tables):\n table, _ = tables[None]\n return [table.sequence == None, table.sequence]\n\n @fields.depends('quantity', 'unit_price')\n def on_change_with_amount(self):\n amount = (Decimal(str(self.quantity or '0.0'))\n * (self.unit_price or Decimal('0.0')))\n return amount\n\n def get_amount(self, name):\n return self.on_change_with_amount()\n\n\n @classmethod\n def check_modify(cls, lines):\n for line in lines:\n if (line.invoice\n and line.invoice.state in ('posted', 'paid')):\n cls.raise_user_error('modify', {\n 'line': line.rec_name,\n 'invoice': line.invoice.rec_name\n })\n\n @classmethod\n def delete(cls, lines):\n cls.check_modify(lines)\n super(InvoiceLine, cls).delete(lines)\n\n @classmethod\n def write(cls, *args):\n lines = sum(args[0::2], [])\n cls.check_modify(lines)\n super(InvoiceLine, cls).write(*args)\n\n @classmethod\n def copy(cls, lines, default=None):\n if default is None:\n default = {}\n default = default.copy()\n return super(InvoiceLine, cls).copy(lines, default=default)\n\n @classmethod\n def create(cls, vlist):\n Invoice = Pool().get('einvoice.einvoice')\n invoice_ids = []\n for vals in vlist:\n if vals.get('invoice'):\n invoice_ids.append(vals.get('invoice'))\n for invoice in Invoice.browse(invoice_ids):\n if invoice.state in ('send'):\n cls.raise_user_error('create', (invoice.rec_name,))\n return super(InvoiceLine, cls).create(vlist)\n\n\nclass EInvoiceReport(Report):\n __name__ = 'einvoice.einvoice'\n\n @classmethod\n def __setup__(cls):\n super(EInvoiceReport, cls).__setup__()\n cls.__rpc__['execute'] = RPC(False)\n\n @classmethod\n def execute(cls, ids, data):\n EInvoice = Pool().get('einvoice.einvoice')\n\n res = super(EInvoiceReport, cls).execute(ids, data)\n if len(ids) > 1:\n res = (res[0], res[1], True, res[3])\n else:\n einvoice = EInvoice(ids[0])\n if einvoice.invoice_number:\n res = (res[0], res[1], res[2], res[3] + ' - ' + einvoice.invoice_number)\n return res\n\n @classmethod\n def _get_records(cls, ids, model, data):\n with Transaction().set_context(language=False):\n return super(EInvoiceReport, cls)._get_records(ids[:1], model, data)\n\n @classmethod\n def parse(cls, report, records, data, localcontext):\n pool = Pool()\n User = pool.get('res.user')\n EInvoice = pool.get('einvoice.einvoice')\n\n einvoice = records[0]\n\n user = User(Transaction().user)\n localcontext['company'] = user.company\n if einvoice.numero_autorizacion:\n localcontext['barcode_img']=cls._get_barcode_img(EInvoice, einvoice)\n if einvoice.type == 'e_credit_note':\n localcontext['numero'] = cls._get_numero(EInvoice, einvoice)\n localcontext['fecha'] = cls._get_fecha(EInvoice, einvoice)\n localcontext['motivo'] = 'Emitir factura con el mismo concepto'\n localcontext['subtotal0'] = '0.0'\n localcontext['subtotal14'] = einvoice.subtotal\n localcontext['descuento'] = cls._get_descuento(EInvoice, einvoice)\n\n return super(EInvoiceReport, cls).parse(report, records, data,\n localcontext=localcontext)\n\n @classmethod\n def _get_descuento(cls, EInvoice, einvoice):\n numero = None\n descuento = Decimal(0.0)\n for line in einvoice.lines:\n if line.amount < Decimal(0.0):\n descuento += (line.amount*-1)\n\n return descuento\n\n @classmethod\n def _get_numero(cls, EInvoice, einvoice):\n numero = None\n invoices = EInvoice.search([('id_reference', '=', einvoice.id_reference), ('type', '=', 'e_invoice'), ('id_reference', '!=', None)])\n for i in invoices:\n numero = i.invoice_number\n return numero\n\n @classmethod\n def _get_fecha(cls, EInvoice, einvoice):\n fecha = None\n invoices = EInvoice.search([('id_reference', '=', einvoice.id_reference), ('type', '=', 'e_invoice'), ('id_reference', '!=', None)])\n for i in invoices:\n fecha = i.invoice_date\n return fecha\n\n @classmethod\n def _get_barcode_img(cls, EInvoice, einvoice):\n from barras import CodigoBarra\n from cStringIO import StringIO as StringIO\n # create the helper:\n codigobarra = CodigoBarra()\n output = StringIO()\n bars= einvoice.numero_autorizacion\n codigobarra.GenerarImagen(bars, output, basewidth=3, width=380, height=50, extension=\"PNG\")\n image = buffer(output.getvalue())\n output.close()\n return image\n","repo_name":"fabyc/nodux_einvoice_whmcs","sub_path":"invoice.py","file_name":"invoice.py","file_ext":"py","file_size_in_byte":52313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6359923444","text":"import json\n\nfrom flask import Flask\nfrom eve import Eve\nfrom eve.auth import BasicAuth\nimport dropbox\nimport time\nimport os\n\n#!flask/bin/python\nfrom flask import Blueprint, request, redirect, url_for, g, jsonify\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n return \"Hello, World!\"\n\n\n@app.route('/senddata', methods=['POST'])\ndef upload_data():\n # Get your app key and secret from the Dropbox developer website\n app_key = 'wkbpkn13tc7zmew'\n app_secret = 'qqjx29o4ctunmo5'\n flow = dropbox.client.DropboxOAuth2FlowNoRedirect(app_key, app_secret)\n #Use this only if we want to upload to third party Dropbox account. Otherwise use our access token below\n # Have the user sign in and authorize this token\n #authorize_url = flow.start()\n #print ('1. Go to: ' + authorize_url)\n #print ('2. Click \"Allow\" (you might have to log in first)')\n #print ('3. Copy the authorization code.')\n #code = input(\"Enter the authorization code here: \").strip()\n #access_token, user_id = flow.finish(code)\n # This will fail if the user enters an invalid authorization code\n\n\n access_token = 'UdQIYgVxjdAAAAAAAAAAHze8SuNXW5mycFPEdcWl1eYj0KG0GJFGu-je18XlvvWo'\n client = dropbox.client.DropboxClient(access_token)\n print ('linked account: ', client.account_info())\n\n #create temp file\n timestr = time.strftime(\"%Y%m%d-%H%M%S\")\n filename = 'scanfile' + timestr + '.txt'\n file = open(filename,\"w\")\n\n file.write('Hello World')\n print(request)\n file.write(str(request.form))\n file.close()\n\n #upload temp file\n f = open(filename, 'rb')\n response = client.put_file('/' + filename, f)\n print ('uploaded: ', response)\n #remote temp file\n os.remove(filename)\n\n return 'OK'\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n\n\n","repo_name":"nikitph/quiks-api","sub_path":"quiks.py","file_name":"quiks.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2119843226","text":"from fastapi import APIRouter\nfrom ..common.config import config\nfrom .products import products_router\nfrom .categories import categories_router\nfrom .parameters import parameters_router\n\n\napi_router = APIRouter(\n prefix=config.router_prefix\n)\n\n\nROUTERS = (\n (parameters_router, \"/parameters\", \"Parameters\"),\n (categories_router, \"/categories\", \"Categories\"),\n (products_router, \"/products\", \"Products\")\n)\n\n\nfor router in ROUTERS:\n api_router.include_router(\n router=router[0], prefix=router[1], tags=[router[2]]\n )\n","repo_name":"geekmorn/linetok","sub_path":"backend/src/api/routers.py","file_name":"routers.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"35141764146","text":"#! /usr/bin/env python3\n\nimport numpy as np \nfrom matplotlib import pyplot as plt \nfrom velocity_updated import calc_position\n\n\n# multiple \nN = 30\ntemp_min = 0\ntemp_max = 10 \ndelta = 0.1\ntemp = np.arange(temp_min, temp_max, delta)\n\n# matrix\nposition = np.zeros((N, len(temp)))\n\n# count bounced\nN_bounce = np.zeros(N)\n\nvelocity = np.random.rand(N) * 0.5\nstart_pos = np.random.rand(N)\n\nfor i in range(N):\n position[i, :], _, N_bounce[i] = calc_position(start_pos[i], \n velocity[i], \n delta=delta, \n temp_min=temp_min, \n temp_max=temp_max)\n\nplt.hist(velocity, color='red')\nplt.xlabel('Velocity (m/s)')\nplt.ylabel('Number of particles')\nplt.title('Initial velocityes (rand)')\nplt.savefig('fig_for_dist.eps')\nplt.show()\n\nfor i in range(N):\n plt.plot(temp, position[i, :], color='green')\n\nplt.xlabel('Time (s)')\nplt.ylabel('Pos (m)')\nplt.ylim((-0.1, 1.1))\nplt.title('Pos of ->N<- particles versus time')\nplt.savefig('fig_n_bounces.eps')\nplt.show()\n\nplt.scatter(np.abs(velocity), N_bounce, color='gray')\nplt.xlabel('speed (m/s) = |velocity|')\nplt.ylabel('Number of bounces')\nplt.savefig('fig_bounce_vs_speed.eps')\nplt.show()","repo_name":"golanghack/ai_only","sub_path":"practic/phisics/module_2/bouns_histogram.py","file_name":"bouns_histogram.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74196619466","text":"#!/usr/bin/env python3\n\nimport sys\nimport os\nimport unittest\nimport rosunit\nimport rospy\nimport socket\n\nfrom z_laser_viz.zlp_viz import ZLPVisualizer\nfrom z_laser_zlp1.zlp_utils import CoordinateSystemParameters\nfrom z_laser_msgs.srv import ProjectionElementRequest\nfrom z_laser_msgs.msg import Figure\nfrom visualization_msgs.msg import MarkerArray\n\nclass TestViz(unittest.TestCase):\n\n # test creation of base marker\n def test1_create_maker(self):\n\n viz = ZLPVisualizer()\n\n try:\n m = viz.base_marker(\"test_marker\")\n self.assertEquals(m.header.frame_id, \"test_marker\")\n except Exception as e:\n self.fail(\"%s\" %e)\n\n # test creation of axis and frame for coordinate system\n def test2_create_frame(self):\n\n viz = ZLPVisualizer()\n\n cs_params = CoordinateSystemParameters()\n cs_params.P[0].x = -100\n cs_params.P[0].y = -100\n cs_params.P[1].x = 100\n cs_params.P[1].y = -100\n cs_params.P[2].x = 100\n cs_params.P[2].y = 100\n cs_params.P[3].x = -100\n cs_params.P[3].y = 100\n\n try:\n axis_x, axis_y = viz.coord_sys_axes(cs_params)\n frame = viz.coord_sys_frame(cs_params)\n self.assertEquals(len(axis_x.points), 2)\n self.assertEquals(len(axis_y.points), 2)\n self.assertEquals(len(frame.points), 5)\n except Exception as e:\n self.fail(\"%s\" %e)\n\n # test define figure equations\n def test3_figure_eq(self):\n\n viz = ZLPVisualizer()\n\n try:\n line = viz.line_eq(10, 0)\n oval = viz.oval_eq(5, 2, 0)\n circle = viz.circle_eq(5, 0, 360)\n except Exception as e:\n self.fail(\"%s\" %e)\n\n # test translate and rotate\n def test4_operations(self):\n\n viz = ZLPVisualizer()\n m = viz.base_marker(\"test_marker\")\n\n figure = Figure()\n figure.position.x = 0 \n figure.position.y = 0 \n figure.position.z = 0 \n figure.size.append(100) # length\n figure.size.append(10) # height\n figure.angle.append(0)\n figure.angle.append(0)\n\n try: \n viz.translate(m, dx=1)\n self.assertEquals(m.pose.position.x, 1)\n self.assertEquals(m.pose.position.y, 0)\n viz.translate(m, dy=2)\n self.assertEquals(m.pose.position.x, 1)\n self.assertEquals(m.pose.position.y, 2)\n viz.translate(m, dy=-1)\n self.assertEquals(m.pose.position.y, 1)\n viz.rotate(m, 90)\n figure.figure_type = Figure.POLYLINE\n viz.scale(m, 2, figure)\n figure.figure_type = Figure.CIRCLE\n viz.scale(m, 2, figure)\n figure.figure_type = Figure.ARC\n viz.scale(m, 2, figure)\n figure.figure_type = Figure.OVAL\n viz.scale(m, 2, figure)\n figure.figure_type = Figure.TEXT\n viz.scale(m, 2, figure)\n\n except Exception as e:\n self.fail(\"%s\" %e)\n\nif __name__ == '__main__':\n rosunit.unitrun('z_laser_viz', 'test_viz', TestViz, sysargs=None)","repo_name":"fada-catec/z_laser_projector","sub_path":"z_laser_viz/test/test_viz.py","file_name":"test_viz.py","file_ext":"py","file_size_in_byte":3150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70315847626","text":"import unittest\nimport os\nfrom cryptography.hazmat.primitives import serialization\nfrom digital_signature import sign_file\n\nclass TestDigitalSignature(unittest.TestCase):\n def test_sign_file(self):\n # Rutes als arxius de prova\n private_key_path = \"private_key.pem\"\n file_path = \"fitxer.txt\"\n\n # Genera la signatura digital\n signature = sign_file(private_key_path, file_path)\n\n # Comprova si la signatura s'ha generat\n self.assertIsNotNone(signature)\n\n # Comprova si la signatura té la longitud esperada\n self.assertEqual(len(signature), 256)\n\n # Elimina l'arxiu de prova després de cada execució del test\n os.remove(file_path)\n\nif __name__ == '__main__':\n unittest.main()\n\n \n #En aquest exemple, s'utilitza la classe unittest.TestCase de la biblioteca unittest per escriure el test. El mètode test_sign_file prova la funció sign_file amb els arxius de prova private_key.pem i fitxer.txt.\n\n# Dins del test, s'assegura que la signatura s'ha generat i no és None. També es comprova si la longitud de la signatura és la longitud esperada (en aquest cas, 256 bytes). Finalment, s'elimina l'arxiu de prova fitxer.txt després de cada execució del test.\n\n# Per executar el test, guarda'l en un fitxer Python com ara test_digital_signature.py i des de la línia de comandes, executa python test_digital_signature.py. Es mostrarà el resultat del test que s'ha executat.\n","repo_name":"Byblonomikon/Farcast-Network","sub_path":"DigitalSignature/UnitTest_Signature.py","file_name":"UnitTest_Signature.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"ca","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"32700971041","text":"import boto3\n\nfruits = ['apples','oranges','bananas']\n\nfor fruit in fruits:\n print(f'The best fruit now is {fruit}')\n \n# this starts counting at 0\nnumbers = [0,1,2,3,4,5,6,7,8,9,10]\nfor number in numbers:\n print(f'The next number is {number}')\n \n# this starts counting at 1\nfor number in range(1,10):\n print(f'The next number is {number}')\n \n# for only odd or even number counting\nfor number in range(1,10,2):\n print(f'The next number is {number}')\n \n","repo_name":"StaceyM0/HackerRank","sub_path":"AWS_Lab/Counting_code.py","file_name":"Counting_code.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"31764926275","text":"# importa o BeautifulSoup e dá a ele o alias 'bs'\nfrom bs4 import BeautifulSoup as bs\n\n# importa strftime, gmtime e time do módulo 'time' - https://docs.python.org/3/library/time.html\nfrom time import strftime, gmtime, time\n# https://docs.python.org/3/library/time.html#time.strftime\n# strftime(formato, data) - converte uma data retornada por gmtime() ou localtime() para uma string especificada no formato\n# https://docs.python.org/3/library/time.html#time.gmtime\n# gmtime() - converte uma timestamp em um objeto com data e hora (struct_time)\n\nimport requests, json, os, platform\n# requests - módulo python para fazer requisições de conteúdo\n# json - módulo python para trabalhar com JSON\n# os - módulo utilizado para interação com o sistema ou o console executando este código\n# (https://docs.python.org/3/library/os.html)\n# platform - acessa os dados de identificação da plataforma\n# (https://docs.python.org/3/library/platform.html)\n\n# Este código exibe temperatura mínima e máxima da cidade na\n# página inicial do site do National Weather Service (serviço de meteorologia americano)\n\n#encontra espaços em branco em uma string\ndef find_whitespace(string):\n for char in string:\n if char == ' ':\n return string.index(char)\n\n# função que converte temperatura de fahrenheit para celsius e vice-versa\n# https://www.w3resource.com/python-exercises/python-conditional-exercise-2.php\ndef converte_temperatura(temp, callback):\n # remove qualquer texto que tenha antes da temperatura em si\n temperature = temp[find_whitespace(temp)+1:]\n #converte o valor da temperatura para int (para que os cálculos sejam feitos)\n degree = int(temperature[:find_whitespace(temperature)])\n\n #pega o último caractere da string para identificar a escala (celsius ou fahrenheit)\n scale = temp[-1]\n \n if scale.upper() == \"C\":\n result = int(round((9 * degree) / 5 + 32))\n scale = \"F\"\n elif scale.upper() == \"F\":\n result = int(round((degree - 32) * 5 / 9))\n scale = \"C\"\n else:\n print(\"A temperatura informada é inválida.\")\n return callback(result, scale)\n\ndef exibe_previsao_tempo(cidade):\n # parâmetros de busca de informações da cidade selecionada\n payloadBuscaCidade = {\n 'text': cidade['text'], # nome da cidade\n 'magicKey': cidade['magicKey'], # chave identificadora da cidade\n 'f': 'json' # tipo de retorno\n }\n buscaCidade = requests.get(\"https://geocode.arcgis.com/arcgis/rest/services/World/GeocodeServer/find\", params=payloadBuscaCidade)\n retornoBusca = buscaCidade.json()\n\n #os resultados da consulta acima ficam dentro de um objeto 'locations'\n dadosCidade = retornoBusca['locations'][0]\n\n print(\"\\nPrevisão do tempo atual para \",dadosCidade['name'],\":\",sep='')\n\n # com a busca acima ainda é necessário se obter os dados de latitude e longitude\n # para que outro serviço busque os dados meteorológicos com base em coordenadas\n latLon = dadosCidade['feature']['geometry']\n\n # estas coordenadas então são adicionadas aos parâmetros de outra consulta\n payload = {\n 'lat': round(latLon['y'], 4),\n 'lon': round(latLon['x'], 4)\n }\n busca = requests.get(\"https://forecast.weather.gov/MapClick.php?\", params=payload)\n\n # esta consulta, por sua vez, leva à página de previsão do tempo (ao invés de retornar um JSON)\n retorno = bs(busca.text, \"html.parser\")\n # é obtido a condição meteorológica da região\n print(retorno.find(class_=\"myforecast-current\").get_text())\n # e a temperatura (em graus celsius)\n print(retorno.find(class_=\"myforecast-current-sm\").get_text(),\"\\n\")\n\n # esta página também possui informação detalhada da previsão para os próximos dias\n previsao_semana = retorno.find_all(class_=\"forecast-tombstone\")\n cont = 0\n # e para cada um deles...\n for previsao in previsao_semana:\n # retorno o nome do dia\n print(previsao.find(class_=\"period-name\").get_text(separator=u' '),\":\",sep='')\n # a descrição da condição do clima\n print(\" \",previsao.find(class_=\"short-desc\").get_text(separator=u' '))\n # e a temperatura (em ºF, porque eles querem)\n temperatura = previsao.find(class_=\"temp\").get_text(separator=u' ')\n # mas como bom fuçador fiz uma função que converte essa temperatura para facilitar a vida\n # esta função recebe uma string com a temperatura, e retorna duas coisas:\n # valor da temperatura e a escala (celsius ou fahrenheit)\n # como segundo parâmetro fiz uma função lamba que recebe estes 2 parâmetros e printa\n # a temperatura formatada\n converte_temperatura(temperatura, lambda temp, escala: \n print(\" \",temp,\"º\",escala,sep='') \n )\n cont += 1\n if cont % 2 == 1: print(\"-------------------------------\")\n\n# como todo bom programador nutella é importante limpar o console antes de rodar alguma coisa\n# então utilizando o platform.system() para identificar o S.O. e o os.system para interagir\n# com o console eu faço a limpeza\nif platform.system() == 'Windows':\n os.system('cls')\nelse:\n os.system('clear')\n\nencontrou = False\nwhile encontrou == False:\n print(\"(Caso queira sair do programa digite 'sair' no nome da cidade)\")\n cidade = input(\"Digite o nome de uma cidade: \")\n if cidade == 'sair':\n print(\"(Programa encerrado)\")\n break\n payloadBusca = {\n 'f': 'json',\n 'maxSuggestions': 10,\n 'text': cidade\n }\n busca = requests.get(\"https://geocode.arcgis.com/arcgis/rest/services/World/GeocodeServer/suggest\", params=payloadBusca)\n retorno = busca.json()['suggestions']\n if len(retorno) == 0:\n print(\"\\nNenhuma cidade foi encontrada. Tente novamente.\")\n else:\n encontrou = True\n if len(retorno) > 1:\n cont = 0\n for cidade in retorno:\n print(\"\\n\",cont,\" - \",cidade['text'], sep='')\n cont += 1\n \n print(\"\\nVárias cidades foram encontradas.\")\n selecao = input(\"Digite o número antes do nome da cidade desejada para selecioná-la: \")\n selecao = int(selecao)\n cont -= 1\n if selecao > cont:\n print(\"Este número não é válido, olhe novamente a lista.\")\n else:\n exibe_previsao_tempo(retorno[selecao])\n else:\n exibe_previsao_tempo(retorno[0])\n","repo_name":"henrikato/mainrepo","sub_path":"Python/Web Crawling/Web Crawling em sites meteorológicos/tarefa3-2.py","file_name":"tarefa3-2.py","file_ext":"py","file_size_in_byte":6493,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16595763418","text":"import pyrosim\n\nimport constants as c\n\nfrom env_shape import EnvShape, Rectangle\n\nclass Environment:\n def __init__(self, debug = False):\n # the object environment objs\n self.eobjs = {}\n\n # ids of parts that have sensors\n self.ids = {}\n\n self.debug = debug\n self.set_up_env()\n\n def set_up_env(self):\n init_z = c.BASE\n\n plt_comp_x_adj = c.PLT_LENGTH/2 - c.PLT_COMP_WIDTH/2\n plt_comp_y_adj = c.PLT_WIDTH/2 - c.PLT_COMP_WIDTH/2\n plt_comp_hgt_adj = init_z + c.PED_HEIGHT + c.PLT_COMP_HEIGHT/2\n\n plate1 = Rectangle(x=0, y=plt_comp_y_adj, z=plt_comp_hgt_adj,\n l=c.PLT_COMP_WIDTH, w=c.PLT_COMP_LENGTH, h=c.PLT_COMP_HEIGHT,\n fixed=False, sensor='position', ref='plt1')\n self.eobjs[plate1.ref] = plate1\n\n plate2 = Rectangle(x=0, y=-plt_comp_y_adj, z=plt_comp_hgt_adj,\n l=c.PLT_COMP_WIDTH, w=c.PLT_COMP_LENGTH, h=c.PLT_COMP_HEIGHT,\n fixed=True, sensor='position', fixed_to=plate1.ref, ref='plt2')\n self.eobjs[plate2.ref] = plate2\n\n plate3 = Rectangle(x=plt_comp_x_adj, y=0, z=plt_comp_hgt_adj,\n l=c.PLT_COMP_LENGTH - 2*c.PLT_COMP_WIDTH, w=c.PLT_COMP_WIDTH, h=c.PLT_COMP_HEIGHT,\n fixed=True, fixed_to=plate1.ref, ref='plt3')\n self.eobjs[plate3.ref] = plate3\n\n plate4 = Rectangle(x=-plt_comp_x_adj, y=0, z=plt_comp_hgt_adj,\n l=c.PLT_COMP_LENGTH - 2*c.PLT_COMP_WIDTH, w=c.PLT_COMP_WIDTH, h=c.PLT_COMP_HEIGHT,\n fixed=True, fixed_to=plate1.ref, ref='plt4')\n self.eobjs[plate4.ref] = plate4\n\n def is_placement(self, x, y, robot):\n for o in self.eobjs:\n excl_zone = o.get_base_area()\n if not o.is_off_ground() and (x >= excl_zone[0][0] + robot.x/2 and x <= excl_zone[1][0 - robot.x/2]\n or y >= excl_zone[0][1] + robot.y/2 and y <= excl_zone[1][1] + robot.y/2):\n return False;\n return True;\n\n def send_to_sim(self, sim):\n for eobj_key in self.eobjs:\n eobj = self.eobjs[eobj_key]\n b_id = sim.send_box(x=eobj.x, y=eobj.y, z=eobj.z,\n length=eobj.l, width=eobj.w, height=eobj.h,\n mass=10, collision_group=eobj.cg,\n r=eobj.r, g=eobj.g, b=eobj.b)\n if eobj.fixed:\n if eobj.fixed_to == -1:\n sim.send_fixed_joint(first_body_id=b_id, second_body_id=-1)\n else:\n sim.send_fixed_joint(first_body_id=b_id, second_body_id=self.ids[eobj.fixed_to][0])\n if eobj.sensor == 'position':\n self.ids[eobj.ref] = (b_id, sim.send_position_sensor(body_id=b_id))\n","repo_name":"edmudo/SwarmRobotics","sub_path":"environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":2735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25792884348","text":"from abc import abstractmethod, ABC\nfrom typing import List, Tuple, Optional, Dict\n\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data import Dataset\n\n\nclass MILTSCDataset(Dataset, ABC):\n \"\"\"Abstract class implementation for MIL TSC.\"\"\"\n\n def __init__(self, dataset_name: str, split: str, apply_transform: bool = True):\n self.dataset_name = dataset_name\n self.split = split\n self.apply_transform = apply_transform\n data = self.get_time_series_collection_and_targets(self.split)\n self.ts_collection, self.targets = data\n self.n_clz = self._get_n_clz()\n\n @abstractmethod\n def get_time_series_collection_and_targets(self, split: str) -> Tuple[List[torch.Tensor], torch.Tensor]:\n \"\"\"\n Load list of time series and their targets (classes).\n\n :param split: Dataset split, e.g. train or test\n :return: List of time series tensors, tensor of classes\n \"\"\"\n pass\n\n def get_bags(self) -> List[torch.Tensor]:\n \"\"\"\n Get a list of all (un-normalised) time series in this dataset.\n\n :return: List of all time series bags.\n \"\"\"\n return [self.get_bag(idx) for idx in range(len(self))]\n\n def get_bag(self, idx: int) -> torch.Tensor:\n \"\"\"\n Get a single (un-normalised) time series in this dataset.\n\n :param idx: Dataset idx\n :return: Time series bag as a tensor.\n \"\"\"\n return self.ts_collection[idx]\n\n @staticmethod\n def apply_bag_transform(bag: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Apply z-normalisation.\n\n :param bag: Time series bag tensor to be transformed.\n :return: Transformed tensor.\n \"\"\"\n std = torch.std(bag).item()\n # Account for time series that have the exact same value for very time step.\n if std == 0:\n std = 1\n norm_bag = (bag - torch.mean(bag)) / std\n return norm_bag\n\n def get_target(self, idx: int) -> int:\n \"\"\"\n Get the target for a particular time series (by dataset index).\n\n :param idx: Dataset index.\n :return: Integer class label.\n \"\"\"\n return int(self.targets[idx])\n\n def create_dataloader(self, shuffle: bool = False, batch_size: int = 16, num_workers: int = 0) -> DataLoader:\n \"\"\"\n Create a batch dataloader.\n\n :param shuffle: Whether the dataloader should randomise the dataset order or not.\n :param batch_size: Size of each batch returned by the dataloader.\n :param num_workers: Number of works for parallel data loading.\n :return: PyTorch dataloader with custom collate function for MIL bags.\n \"\"\"\n torch_dataloader = DataLoader(\n self,\n shuffle=shuffle,\n batch_size=batch_size,\n num_workers=num_workers,\n collate_fn=mil_collate_fn,\n )\n return torch_dataloader\n\n def get_n_idxs(self, n: int, clz: Optional[int] = None, shuffle: bool = False) -> torch.Tensor:\n \"\"\"\n Get n indices from this dataset, either at random or matching a particular class.\n\n :param n: Number of indices to return.\n :param clz: Only get indices for a particular class.\n :param shuffle: If True, get random indices (matching clz if given). If False, get the first n (matching clz).\n :return: Tensor of dataset indices matching criteria.\n \"\"\"\n if clz is not None:\n candidate_idxs = self.get_clz_idxs(clz)\n else:\n candidate_idxs = torch.arange(len(self))\n if shuffle:\n perm = torch.randperm(len(candidate_idxs))\n candidate_idxs = candidate_idxs[perm]\n selected_idxs = candidate_idxs[:n]\n return selected_idxs\n\n def get_clz_idxs(self, clz: int) -> torch.Tensor:\n \"\"\"\n Get all indices of this dataset that match a particular class.\n\n :param clz: Class target\n :return: Tensor of dataset indices that belong to the particular class.\n \"\"\"\n all_targets = torch.as_tensor(self.targets)\n return (all_targets == clz).nonzero(as_tuple=True)[0]\n\n def _get_n_clz(self) -> int:\n \"\"\"\n Get the number of classes in this dataset.\n Also verifies the classes meet the labelling assumption.\n\n :return: Number of classes.\n \"\"\"\n # Assumes classes are numbered 0,...,c\n n_clz = int(max(self.targets) + 1)\n # Double-check the classes in the dataset match this assumption\n unique_clzs = sorted(torch.unique(self.targets))\n assert unique_clzs == list(range(n_clz))\n return n_clz\n\n def __getitem__(self, idx: int) -> Dict:\n \"\"\"\n Get a dataset item. Applies time series normalisation (if self.apply_transform = True).\n\n :param idx: Dataset index.\n :return: Dictionary containing the bag and its target.\n \"\"\"\n bag = self.get_bag(idx)\n if self.apply_transform:\n bag = self.apply_bag_transform(bag)\n return {\n \"bag\": bag,\n \"target\": self.targets[idx],\n }\n\n def __len__(self) -> int:\n return len(self.targets)\n\n\ndef mil_collate_fn(orig_batch: List[Dict]) -> Dict:\n \"\"\"\n Custom batch collation function for MIL settings.\n\n :param orig_batch: List of dictionaries (one for each item in the batch).\n :return: Dictionary containing lists of bags, targets, and instance targets (if they exist).\n \"\"\"\n bags = []\n targets = []\n instance_targets: Optional[List[torch.Tensor]] = [] if \"instance_targets\" in orig_batch[0] else None\n for batch_item in orig_batch:\n bags.append(batch_item[\"bag\"])\n targets.append(batch_item[\"target\"])\n if instance_targets is not None:\n instance_targets.append(batch_item[\"instance_targets\"])\n new_batch = {\n \"bags\": bags,\n \"targets\": torch.as_tensor(targets),\n \"instance_targets\": instance_targets,\n }\n return new_batch\n","repo_name":"JAEarly/MILTimeSeriesClassification","sub_path":"millet/data/mil_tsc_dataset.py","file_name":"mil_tsc_dataset.py","file_ext":"py","file_size_in_byte":6019,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"40063051807","text":"import re\r\nimport json\r\nimport math\r\nimport discord\r\nimport datetime\r\nimport sqlite3\r\nimport random\r\nimport aiohttp\r\nimport copy\r\nimport asyncio\r\n\r\nfrom fuzzywuzzy import fuzz\r\nfrom discord.ext import commands\r\nfrom cogs.utils.paginator import Pages\r\nfrom cogs.utils import checks, formats\r\nfrom TagScriptEngine import Engine\r\n\r\n\r\n\r\ndef to_keycap(c):\r\n return '\\N{KEYCAP TEN}' if c == 10 else str(c) + '\\u20e3'\r\n\r\nclass Tags:\r\n def __init__(self, bot):\r\n self.bot = bot\r\n self.conn = sqlite3.connect('database.db')\r\n self.c = self.conn.cursor()\r\n self.engine = Engine()\r\n self.variable_regex = re.compile(\r\n r'(?:\\$(\\d)=?((?:\\w|%|{|.*?}|\".*?\")*)?)')\r\n self.random_regex = re.compile(r\"(\\{\\{(.*?)\\}\\})\")\r\n self.c.execute('''CREATE TABLE IF NOT EXISTS tags\r\n (name text, content text, server text, creation real, updated real, uses real, author text)''')\r\n\r\n self.c.execute('''CREATE TABLE IF NOT EXISTS tag_lookup\r\n (server text, is_alias boolean, name text, points_to text, nsfw boolean, mod boolean, restricted boolean)''')\r\n\r\n def clean_tag_content(self, content):\r\n return content.replace('@everyone', '@\\u200beveryone').replace('@here', '@\\u200bhere')\r\n\r\n\r\n # @commands.command()\r\n # async def populate(self, ctx):\r\n # self.c.execute('''SELECT name, server\r\n # FROM tags\r\n # WHERE 1''')\r\n # big_list = self.c.fetchall()\r\n # for row in big_list:\r\n # self.c.execute('''INSERT INTO tag_lookup\r\n # VALUES (?, ?, ?, ?, ?, ?, ?)''',\r\n # (row[1], False, row[0], row[0], False, False, False))\r\n # self.conn.commit()\r\n # await ctx.send(\"all done! :D\")\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n async def do_tag_stuff(self, ctx, name, choices=()):\r\n real_choices = [x for x in choices]\r\n choices = [x for x in choices if not re.match(\"<@!?\\d*>\", x)]\r\n lookup = name.lower()\r\n if ctx.guild is None:\r\n shared_servers = [x for x in self.bot.guilds if ctx.author in x.members]\r\n shared_servers_id = [str(x.id) for x in shared_servers]\r\n self.c.execute('''SELECT *\r\n FROM tag_lookup\r\n WHERE name=?''',\r\n (lookup,))\r\n t = self.c.fetchall()\r\n if not t:\r\n return\r\n tags = [x for x in t if x[0] in shared_servers_id]\r\n if not tags:\r\n return await ctx.send(\"No tags found with that\")\r\n if len(tags) == 1:\r\n server, is_alias, _, points_to, nsfw, mod, restricted = tags[0]\r\n else:\r\n # prompt the user\r\n e = discord.Embed()\r\n keycaps = {}\r\n helptext = \"\"\r\n for n,t in enumerate(tags):\r\n servername = self.bot.get_guild(int(t[0])).name\r\n helptext += f\"{n+1}. {servername}\\n\" # Jesus christ\r\n keycaps[to_keycap(n+1)] = t\r\n e.add_field(name=\"Multiple servers you're in have a tag named that, react with the server you want to fetch the tag from.\", value=helptext) \r\n msg = await ctx.send(embed=e)\r\n \r\n \r\n for emoji, _ in keycaps.items():\r\n await msg.add_reaction(emoji)\r\n try:\r\n reaction, user = await self.bot.wait_for('reaction_add', check=lambda x,y: y.id == ctx.author.id, timeout=90.0)\r\n except asyncio.TimeoutError:\r\n return\r\n await msg.delete()\r\n server, is_alias, _, points_to, nsfw, mod, restricted = keycaps[str(reaction.emoji)]\r\n\r\n\r\n \r\n else:\r\n self.c.execute('''SELECT *\r\n FROM tag_lookup\r\n WHERE (name=? AND server=?)''',\r\n (lookup, ctx.guild.id))\r\n t = self.c.fetchone()\r\n if t is None:\r\n return\r\n server, is_alias, _, points_to, nsfw, mod, restricted = t\r\n \r\n self.c.execute('''SELECT content, uses\r\n FROM tags\r\n WHERE (name=? AND server=?)''',\r\n (points_to, str(server)))\r\n a = self.c.fetchone()\r\n destination = ctx.channel\r\n if a is None:\r\n return await ctx.send(\"Something that shouldn't happen just happened, tell Carl a pointed to tag doesn't exist.\")\r\n tag, uses = a\r\n\r\n \r\n if nsfw and not ctx.channel.is_nsfw():\r\n return await ctx.send(\"<:redtick:318044813444251649> This tag can only be used in NSFW channels.\")\r\n \r\n if restricted:\r\n is_mod = False\r\n bypass = ctx.author.guild_permissions.manage_guild\r\n if ctx.guild.id == 113103747126747136:\r\n mod_role = 175657731426877440\r\n roles = [x.id for x in ctx.author.roles]\r\n if mod_role in roles:\r\n is_mod = True\r\n elif bypass:\r\n is_mod = True\r\n if not is_mod:\r\n self.c.execute('''SELECT bot_channel\r\n FROM servers\r\n WHERE id=?''',\r\n (ctx.guild.id,)) \r\n bot_channel = self.c.fetchone()\r\n if bot_channel[0] is None:\r\n bot_channel = discord.utils.find(lambda m: \"bot\" in m.name, ctx.guild.channels)\r\n if bot_channel is None:\r\n bot_channel = ctx.channel\r\n else:\r\n bot_channel = self.bot.get_channel(int(bot_channel[0]))\r\n destination = bot_channel\r\n if ctx.channel != destination:\r\n await destination.send(ctx.author.mention)\r\n \r\n if mod:\r\n is_mod = False\r\n bypass = ctx.author.guild_permissions.manage_guild\r\n if ctx.guild.id == 113103747126747136:\r\n mod_role = 175657731426877440\r\n roles = [x.id for x in ctx.author.roles]\r\n if mod_role in roles:\r\n is_mod = True\r\n elif bypass:\r\n is_mod = True\r\n if not is_mod:\r\n return\r\n if choices:\r\n for i, item in enumerate(choices):\r\n self.engine.Add_Variable(str(i+1), item)\r\n nix = str(int(datetime.datetime.utcnow().timestamp()))\r\n self.engine.Add_Variable(\"unix\", nix)\r\n self.engine.Add_Variable(\"uses\", str(int(uses)))\r\n self.engine.Add_Variable(\"args\", ' '.join(choices))\r\n self.engine.Add_Variable(\"commandargs\", ' '.join(real_choices))\r\n self.engine.Add_Variable(\"ARGS\", ' '.join(choices).upper())\r\n uavatar = ctx.message.mentions[0].avatar_url if ctx.message.mentions else ctx.author.avatar_url\r\n self.engine.Add_Variable(\"avatar\", uavatar)\r\n # self.engine.Add_Variable(\"usercount\", len(ctx.guild.members))\r\n self.engine.Add_Variable(\"authorid\", str(ctx.author.id))\r\n uid = ctx.message.mentions[0].id if ctx.message.mentions != [] else ctx.message.author.id\r\n self.engine.Add_Variable(\"userid\", str(uid))\r\n tag = self.engine.Process(tag)\r\n self.engine.Clear_Variables()\r\n if r\"$user\" in tag:\r\n user = ctx.message.mentions[0] if ctx.message.mentions != [\r\n ] else ctx.message.author\r\n tag = tag.replace(r\"$user\", user.display_name)\r\n if r\"$author\" in tag:\r\n tag = tag.replace(r\"$author\", ctx.message.author.display_name)\r\n if r\"$channelmention\" in tag:\r\n channel = ctx.message.channel_mentions[0] if ctx.message.channel_mentions != [\r\n ] else ctx.message.channel\r\n tag = tag.replace(r\"$channelmention\", channel.mention)\r\n if r\"$channel\" in tag:\r\n channel = ctx.message.channel_mentions[0] if ctx.message.channel_mentions != [\r\n ] else ctx.message.channel\r\n tag = tag.replace(r\"$channel\", channel.name)\r\n \r\n if r\"$server\" in tag:\r\n tag = tag.replace(r\"$server\", ctx.guild.name)\r\n if r\"$nauthor\" in tag:\r\n tag = tag.replace(r\"$nauthor\", ctx.message.author.name)\r\n if r\"$nuser\" in tag:\r\n user = ctx.message.mentions[0] if ctx.message.mentions != [\r\n ] else ctx.message.author\r\n tag = tag.replace(r\"$nuser\", user.name)\r\n if r\"$mention\" in tag:\r\n if ctx.message.mentions:\r\n user = ctx.message.mentions[0] if ctx.message.mentions != [\r\n ] else ctx.message.author\r\n tag = tag.replace(r\"$mention\", user.name)\r\n else:\r\n tag = tag.replace(r\"$mention\", \"\")\r\n if r\"$nmention\" in tag:\r\n if ctx.message.mentions:\r\n user = ctx.message.mentions[0] if ctx.message.mentions != [\r\n ] else ctx.message.author\r\n tag = tag.replace(r\"$nmention\", user.name)\r\n else:\r\n tag = tag.replace(r\"$nmention\", \"\")\r\n \r\n\r\n\r\n tag = self.clean_tag_content(tag)\r\n\r\n self.c.execute(\r\n 'UPDATE tags SET uses = uses + 1 WHERE (name=? AND server=?)', (points_to, str(server)))\r\n self.conn.commit()\r\n actions = re.search(r'a{(.+?)}', tag)\r\n reactions = re.search(r'react{(.+?)}', tag)\r\n will_be_deleted = False\r\n to_react_with = []\r\n if actions is not None:\r\n actionstring = actions.group(1).lower()\r\n actions = [x.strip() for x in actionstring.split(',')]\r\n if ctx.guild is not None:\r\n if \"f\" in actions:\r\n pay_respects = True\r\n if \"delete\" in actions:\r\n will_be_deleted = True\r\n if \"pmmention\" in actions and ctx.message.mentions:\r\n destination = ctx.message.mentions[0]\r\n elif \"pm\" in actions:\r\n destination = ctx.author\r\n \r\n tag = re.sub(r'a{(.+?)}', '', tag)\r\n if reactions is not None:\r\n reactionstring = reactions.group(1).lower()\r\n reactions = [x.strip() for x in reactionstring.split(',')]\r\n if ctx.guild is not None:\r\n for emoji in reactions:\r\n to_react_with.append(emoji)\r\n tag = re.sub(r'react{(.+?)}', '', tag)\r\n # Here we need to decide if it's a tag or a command\r\n cmd = re.search(r'c{(.+?)}', tag)\r\n if will_be_deleted:\r\n try:\r\n await ctx.message.delete()\r\n except:\r\n pass\r\n if cmd is not None:\r\n # It's a command\r\n context = copy.copy(ctx.message)\r\n if \"tag\" in cmd.group(1).lower():\r\n return await ctx.send(\"Can't have 'tag' in your command.\")\r\n context.content = \"{}{}\".format(ctx.prefix, cmd.group(1))\r\n return await self.bot.process_commands(context)\r\n\r\n \r\n\r\n tag_msg = await destination.send(tag)\r\n if to_react_with:\r\n for emoji in to_react_with:\r\n if emoji.startswith(\"<\"):\r\n emoji = emoji[1:-1]\r\n await tag_msg.add_reaction(emoji)\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n @commands.group(invoke_without_command=True)\r\n async def tag(self, ctx, name: str, *choices: str):\r\n # choices = [x for x in choices if not re.match(\"<@!?\\d*>\", x)]\r\n # lookup = name.lower()\r\n # destination = ctx.channel\r\n # self.c.execute('''SELECT *\r\n # FROM tag_lookup\r\n # WHERE (name=? AND server=?)''',\r\n # (lookup, ctx.guild.id))\r\n # t = self.c.fetchone()\r\n # if t is None:\r\n # return\r\n # _, is_alias, _, points_to, nsfw, mod, restricted = t\r\n \r\n # self.c.execute('''SELECT content, uses\r\n # FROM tags\r\n # WHERE (name=? AND server=?)''',\r\n # (points_to, str(ctx.guild.id)))\r\n # a = self.c.fetchone()\r\n # if a is None:\r\n # return await ctx.send(\"Something that shouldn't happen just happened, tell Carl a pointed to tag doesn't exist.\")\r\n # tag, uses = a\r\n\r\n \r\n # if nsfw and not ctx.channel.is_nsfw():\r\n # return await ctx.send(\"<:redtick:318044813444251649> This tag can only be used in NSFW channels.\")\r\n \r\n # if restricted:\r\n # self.c.execute('''SELECT bot_channel\r\n # FROM servers\r\n # WHERE id=?''',\r\n # (ctx.guild.id,)) \r\n # bot_channel = self.c.fetchone()\r\n # if bot_channel[0] is None:\r\n # bot_channel = discord.utils.find(lambda m: \"bot\" in m.name, ctx.guild.channels)\r\n # if bot_channel is None:\r\n # bot_channel = ctx.channel\r\n # else:\r\n # bot_channel = self.bot.get_channel(int(bot_channel[0]))\r\n # destination = bot_channel\r\n # if ctx.channel != destination:\r\n # await destination.send(ctx.author.mention)\r\n \r\n # if mod:\r\n # bypass = ctx.author.guild_permissions.manage_guild\r\n # if not bypass:\r\n # return\r\n # if choices:\r\n # for i, item in enumerate(choices):\r\n # self.engine.Add_Variable(str(i+1), item)\r\n # nix = str(int(datetime.datetime.utcnow().timestamp()))\r\n # self.engine.Add_Variable(\"unix\", nix)\r\n # self.engine.Add_Variable(\"uses\", str(int(uses)))\r\n # tag = self.engine.Process(tag)\r\n # self.engine.Clear_Variables()\r\n # if r\"$user\" in tag:\r\n # user = ctx.message.mentions[0] if ctx.message.mentions != [\r\n # ] else ctx.message.author\r\n # tag = tag.replace(r\"$user\", user.display_name)\r\n # if r\"$author\" in tag:\r\n # tag = tag.replace(r\"$author\", ctx.message.author.display_name)\r\n # if r\"$channel\" in tag:\r\n # channel = ctx.message.channel_mentions[0] if ctx.message.channel_mentions != [\r\n # ] else ctx.message.channel\r\n # tag = tag.replace(r\"$channel\", channel.name)\r\n # if r\"$server\" in tag:\r\n # tag = tag.replace(r\"$server\", ctx.guild.name)\r\n # if r\"$nauthor\" in tag:\r\n # tag = tag.replace(r\"$nauthor\", ctx.message.author.name)\r\n # if r\"$nuser\" in tag:\r\n # user = ctx.message.mentions[0] if ctx.message.mentions != [\r\n # ] else ctx.message.author\r\n # tag = tag.replace(r\"$nuser\", user.name)\r\n # if r\"$mention\" in tag:\r\n # if ctx.message.mentions:\r\n # user = ctx.message.mentions[0] if ctx.message.mentions != [\r\n # ] else ctx.message.author\r\n # tag = tag.replace(r\"$mention\", user.name)\r\n # else:\r\n # tag = tag.replace(r\"$mention\", \"\")\r\n # if r\"$nmention\" in tag:\r\n # if ctx.message.mentions:\r\n # user = ctx.message.mentions[0] if ctx.message.mentions != [\r\n # ] else ctx.message.author\r\n # tag = tag.replace(r\"$nmention\", user.name)\r\n # else:\r\n # tag = tag.replace(r\"$nmention\", \"\")\r\n\r\n\r\n # tag = self.clean_tag_content(tag)\r\n\r\n # self.c.execute(\r\n # 'UPDATE tags SET uses = uses + 1 WHERE (name=? AND server=?)', (points_to, str(ctx.guild.id)))\r\n # self.conn.commit()\r\n # await destination.send(tag)\r\n await self.do_tag_stuff(ctx, name, choices)\r\n\r\n @tag.error\r\n async def tag_error(self, error, ctx):\r\n if isinstance(error, commands.MissingRequiredArgument):\r\n await ctx.send('You need to pass in a tag name.')\r\n\r\n def verify_lookup(self, lookup):\r\n if '@everyone' in lookup or '@here' in lookup:\r\n raise RuntimeError('That tag is using blocked words.')\r\n\r\n if not lookup:\r\n raise RuntimeError('You need to actually pass in a tag name.')\r\n\r\n if len(lookup) > 50:\r\n raise RuntimeError('Tag name is a maximum of 50 characters.')\r\n\r\n\r\n @tag.command(name=\"get\", hidden=True)\r\n async def _get(self, ctx, name: str, *choices: str):\r\n await self.do_tag_stuff(ctx, name, choices)\r\n\r\n @tag.command(aliases=['++'])\r\n @checks.admin_or_permissions(manage_server=True)\r\n async def procreate(self, ctx, name: str, *, content: str):\r\n \"\"\"\r\n content is a pastebin link (or pastebin raw link) this should only\r\n ever be used for creating tags whose length is >2000 characters\r\n with output guaranteed to be below 2000\r\n \"\"\"\r\n lookup = name.lower().strip().replace(\"@everyone\", \"everyone\").replace(\"@here\", \"here\")\r\n self.c.execute('''SELECT is_alias, points_to\r\n FROM tag_lookup\r\n WHERE (name=? AND server=?)''',\r\n (lookup, ctx.guild.id))\r\n is_alias = self.c.fetchone()\r\n if is_alias is not None:\r\n if is_alias[0]:\r\n return await ctx.send(f\"{lookup} is an alias pointing to {is_alias[1]} already\")\r\n match = re.findall(\"https:\\/\\/pastebin.com\\/(?:raw\\/)?(.*)\", content)\r\n if not match:\r\n await ctx.send(\"You need to pass in a pastebin link\")\r\n return\r\n async with aiohttp.ClientSession() as session:\r\n async with session.get(f'https://pastebin.com/raw/{match[0]}') as res:\r\n content = await res.text()\r\n content = self.clean_tag_content(content)\r\n\r\n \r\n try:\r\n self.verify_lookup(lookup)\r\n except RuntimeError as e:\r\n return await ctx.send(e)\r\n a = self.c.execute(\r\n 'SELECT * FROM tags WHERE (name=? AND server=?)', (lookup, str(ctx.guild.id)))\r\n a = a.fetchall()\r\n word = \"created\"\r\n if a != []:\r\n word = \"updated\"\r\n self.c.execute('UPDATE tags SET content=?, updated=? WHERE (name=? AND server=?)', (\r\n content, datetime.datetime.utcnow().timestamp(), lookup, str(ctx.guild.id)))\r\n self.conn.commit()\r\n await ctx.send(f'Tag \"{name}\" successfully {word}.')\r\n else: \r\n self.c.execute(\"INSERT INTO tags VALUES (?, ?, ?, ?, ?, ?, ?)\", (lookup, content, str(\r\n ctx.guild.id), datetime.datetime.utcnow().timestamp(), datetime.datetime.utcnow().timestamp(), 0, ctx.message.author.id))\r\n self.c.execute('''INSERT INTO tag_lookup VALUES (?, ?, ?, ?, ?, ?, ?)''',\r\n (ctx.guild.id, False, lookup, lookup, False, False, False))\r\n self.conn.commit()\r\n await ctx.send(f'Tag \"{name}\" successfully {word}.')\r\n\r\n @tag.command()\r\n async def nsfw(self, ctx, name: str):\r\n \"\"\"\r\n marks a tag as \"nsfw\", rendering it unusable (by non-mods) outside of nsfw channels\r\n \"\"\"\r\n is_mod = False\r\n bypass = ctx.author.guild_permissions.manage_guild or ctx.author.id == 106429844627169280\r\n if ctx.guild.id == 113103747126747136:\r\n mod_role = 175657731426877440\r\n roles = [x.id for x in ctx.author.roles]\r\n if mod_role in roles:\r\n is_mod = True\r\n elif bypass:\r\n is_mod = True\r\n if not is_mod:\r\n return await ctx.send(\"You need to be a mod to restrict tags\")\r\n lookup = name.lower()\r\n self.c.execute('''SELECT nsfw, points_to\r\n FROM tag_lookup\r\n WHERE (name=? AND server=?)''',\r\n (lookup, ctx.guild.id))\r\n status = self.c.fetchone()\r\n if status is None:\r\n return await ctx.send(\"That tag doesn't seem to exist\")\r\n \r\n new_status = not status[0]\r\n points_to = status[1]\r\n self.c.execute('''UPDATE tag_lookup SET nsfw=? WHERE (points_to=? AND server=?)''', (new_status, points_to, ctx.guild.id))\r\n self.conn.commit()\r\n if new_status:\r\n return await ctx.send(f'\"{points_to}\" is now marked as NSFW')\r\n await ctx.send(f'\"{points_to}\" is now marked as SFW')\r\n\r\n @tag.command()\r\n async def restrict(self, ctx, name: str):\r\n \"\"\"\r\n marks a tag as restricted, when a restricted tag is used outside of the\r\n specified bot channel (use !set bot <#channel> for this) it will instead be\r\n posted in the bot channel\r\n \"\"\"\r\n is_mod = False\r\n bypass = ctx.author.guild_permissions.manage_guild or ctx.author.id == 106429844627169280\r\n if ctx.guild.id == 113103747126747136:\r\n mod_role = 175657731426877440\r\n roles = [x.id for x in ctx.author.roles]\r\n if mod_role in roles:\r\n is_mod = True\r\n elif bypass:\r\n is_mod = True\r\n if not is_mod:\r\n return await ctx.send(\"You need to be a mod to restrict tags\")\r\n lookup = name.lower()\r\n self.c.execute('''SELECT restricted, points_to\r\n FROM tag_lookup\r\n WHERE (name=? AND server=?)''',\r\n (lookup, ctx.guild.id))\r\n status = self.c.fetchone()\r\n if status is None:\r\n return await ctx.send(\"That tag doesn't seem to exist\")\r\n \r\n new_status = not status[0]\r\n points_to = status[1]\r\n self.c.execute('''UPDATE tag_lookup SET restricted=? WHERE (points_to=? AND server=?)''', (new_status, points_to, ctx.guild.id))\r\n self.conn.commit()\r\n if new_status:\r\n return await ctx.send(f'\"{points_to}\" is now restricted to the bot channel')\r\n await ctx.send(f'\"{points_to}\" is no longer restricted')\r\n\r\n @tag.command(name='mod')\r\n async def mod_only(self, ctx, name: str):\r\n \"\"\"\r\n mod tags can only be used by mods\r\n \"\"\"\r\n is_mod = False\r\n bypass = ctx.author.guild_permissions.manage_guild or ctx.author.id == 106429844627169280\r\n if ctx.guild.id == 113103747126747136:\r\n mod_role = 175657731426877440\r\n roles = [x.id for x in ctx.author.roles]\r\n if mod_role in roles:\r\n is_mod = True\r\n elif bypass:\r\n is_mod = True\r\n if not is_mod:\r\n return await ctx.send(\"You need to be a mod to restrict tags\")\r\n lookup = name.lower()\r\n self.c.execute('''SELECT mod, points_to\r\n FROM tag_lookup\r\n WHERE (name=? AND server=?)''',\r\n (lookup, ctx.guild.id))\r\n status = self.c.fetchone()\r\n if status is None:\r\n return await ctx.send(\"That tag doesn't seem to exist\")\r\n \r\n new_status = not status[0]\r\n points_to = status[1]\r\n self.c.execute('''UPDATE tag_lookup SET mod=? WHERE (points_to=? AND server=?)''', (new_status, points_to, ctx.guild.id))\r\n self.conn.commit()\r\n if new_status:\r\n return await ctx.send(f'\"{points_to}\" is now accessable by mods only')\r\n await ctx.send(f'\"{points_to}\" is now free for all to use')\r\n\r\n @tag.command(aliases=['a'])\r\n async def alias(self, ctx, point: str, name: str=None):\r\n \"\"\"\r\n creates a link to an already existing tag\r\n any changes (content, nsfw, mod etc.) will also be reflected\r\n to the alias\r\n \"\"\"\r\n if name is None:\r\n return ctx.send(self.clean_tag_content(f\"You need to specify a tag you want linked with {point}\"))\r\n name = name.lower()\r\n try:\r\n self.verify_lookup(name)\r\n self.verify_lookup(point)\r\n except RuntimeError as e:\r\n return await ctx.send(e)\r\n point = point.lower()\r\n\r\n if name == point:\r\n return await ctx.send(\"You can't alias a tag to itself\")\r\n # If a tag exists, a lookup will also exist\r\n # This means we just have to check if name is in tag_lookup\r\n # for an alias to be created, two things needs to be true:\r\n # 1. The pointed to tag exists\r\n # 2. There's no tag or alias with this name created already\r\n self.c.execute('''SELECT name\r\n FROM tag_lookup\r\n WHERE (points_to=? AND server=?)''',\r\n (name, ctx.guild.id))\r\n all_rows = self.c.fetchall()\r\n if all_rows == []:\r\n return await ctx.send(\"That tag doesn't seem to exist\")\r\n\r\n self.c.execute('''SELECT points_to, is_alias\r\n FROM tag_lookup\r\n WHERE (name=? AND server=?)''',\r\n (point, ctx.guild.id))\r\n alias_row = self.c.fetchone()\r\n # We can safely assume 0 or 1 entries exist\r\n if alias_row is not None:\r\n if alias_row[1]:\r\n return await ctx.send(f'There already exists an alias \"{point}\" pointing to \"{alias_row[0]}\"')\r\n return await ctx.send(f'A tag named \"{point}\" already exists')\r\n\r\n # Our alias is ready to be created\r\n # FINALLY, we need the nsfw, restricted, and mod values for our tag\r\n self.c.execute('''SELECT nsfw, mod, restricted\r\n FROM tag_lookup\r\n WHERE (name=? AND server=?)''',\r\n (name, ctx.guild.id))\r\n nsfw, mod, restricted = self.c.fetchone()\r\n self.c.execute('''INSERT INTO tag_lookup\r\n VALUES (?, ?, ?, ?, ?, ?, ?)''',\r\n (ctx.guild.id, True, point, name, nsfw, mod, restricted))\r\n self.conn.commit()\r\n await ctx.send(f'Tag alias \"{point}\" that points to \"{name}\" created')\r\n\r\n\r\n\r\n\r\n\r\n \r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n @tag.command(aliases=['add', '+'])\r\n async def create(self, ctx, name: str, *, content: str):\r\n \"\"\"\r\n Creates a tag/custom command, commands can then be used with just the prefix\r\n For more customization, see tag nsfw/mod/restrict or the github for special variables\r\n \"\"\"\r\n if ctx.message.mentions:\r\n return\r\n content = self.clean_tag_content(content)\r\n lookup = name.lower().strip()\r\n try:\r\n self.verify_lookup(lookup)\r\n except RuntimeError as e:\r\n return await ctx.send(e)\r\n self.c.execute('''SELECT is_alias, points_to\r\n FROM tag_lookup\r\n WHERE (name=? AND server=?)''',\r\n (lookup, ctx.guild.id))\r\n is_alias = self.c.fetchone()\r\n if is_alias is not None:\r\n if is_alias[0]:\r\n return await ctx.send(f\"{lookup} is an alias pointing to {is_alias[1]} already\")\r\n \r\n self.c.execute('''SELECT *\r\n FROM tags\r\n WHERE (name=? AND server=?)''',\r\n (lookup, str(ctx.guild.id)))\r\n a = self.c.fetchone()\r\n if a is not None:\r\n await ctx.send(\"Tag already exists. **__r__**eplace, **__c__**ancel or **__a__**dd to? (r/c/a)\")\r\n msg = await self.bot.wait_for('message', check=lambda m: m.content.lower() in ['r', 'c', 'a'] and m.author == ctx.message.author)\r\n if msg.content.lower() == \"r\":\r\n self.c.execute('UPDATE tags SET content=?, updated=? WHERE (name=? AND server=?)', (\r\n content, datetime.datetime.utcnow().timestamp(), lookup, str(ctx.guild.id)))\r\n self.conn.commit()\r\n await ctx.send('Tag \"{}\" updated.'.format(name))\r\n return\r\n if msg.content.lower() == \"c\":\r\n await ctx.send(\"Tag unchanged.\")\r\n return\r\n if msg.content.lower() == \"a\":\r\n appended_tag = a[1] + \"\\n{}\".format(content)\r\n if len(appended_tag) >= 2000:\r\n return await ctx.send(\"That would make the tag too long, if you're making a tag with variables where the output would be less than 2k, use `!tag ++` and pass a pastebin instead.\")\r\n self.c.execute('UPDATE tags SET content=?, updated=? WHERE (name=? AND server=?)', (\r\n appended_tag, datetime.datetime.utcnow(), lookup, str(ctx.guild.id)))\r\n self.conn.commit()\r\n await ctx.send('Tag \"{}\" successfully appended.'.format(name))\r\n return\r\n else:\r\n return\r\n self.c.execute(\"INSERT INTO tags VALUES (?, ?, ?, ?, ?, ?, ?)\", (lookup, content, str(\r\n ctx.guild.id), datetime.datetime.utcnow().timestamp(), datetime.datetime.utcnow().timestamp(), 0, ctx.message.author.id))\r\n self.c.execute('''INSERT INTO tag_lookup VALUES (?, ?, ?, ?, ?, ?, ?)''',\r\n (ctx.guild.id, False, lookup, lookup, False, False, False))\r\n self.conn.commit()\r\n await ctx.send('Tag \"{}\" successfully created.'.format(name))\r\n\r\n @tag.command(aliases=['update', '&', 'e'])\r\n async def edit(self, ctx, name: str, *, content: str):\r\n \"\"\"\r\n Skips the confirmation prompt\r\n \"\"\"\r\n if ctx.message.mentions:\r\n return\r\n content = self.clean_tag_content(content)\r\n lookup = name.lower().strip()\r\n try:\r\n self.verify_lookup(lookup)\r\n except RuntimeError as e:\r\n return await ctx.send(e)\r\n self.c.execute('''SELECT points_to\r\n FROM tag_lookup\r\n WHERE (name=? AND server=?)''',\r\n (lookup, ctx.guild.id))\r\n lookup = self.c.fetchone()\r\n if lookup is None:\r\n return await ctx.send(\"No tag or alias with that name could be found\")\r\n lookup = lookup[0]\r\n a = self.c.execute(\r\n 'SELECT * FROM tags WHERE (name=? AND server=?)', (lookup, str(ctx.guild.id)))\r\n a = a.fetchall()\r\n if a != []:\r\n self.c.execute('UPDATE tags SET content=?, updated=? WHERE (name=? AND server=?)', (\r\n content, datetime.datetime.utcnow().timestamp(), lookup, str(ctx.guild.id)))\r\n self.conn.commit()\r\n await ctx.send('Tag \"{}\" edited.'.format(name))\r\n else:\r\n await ctx.send(\"That tag doesn't seem to exist.\")\r\n\r\n @create.error\r\n async def create_error(self, error, ctx):\r\n if isinstance(error, commands.MissingRequiredArgument):\r\n await ctx.send('Tag ' + str(error))\r\n\r\n @tag.command(name=\"append\", aliases=['+='])\r\n async def _append(self, ctx, name: str, *, content: str):\r\n \"\"\"\r\n Appends something to an already existing tag. Please note that a newline will be\r\n inserted before your text\r\n \"\"\"\r\n if ctx.message.mentions:\r\n return\r\n content = self.clean_tag_content(content)\r\n lookup = name.lower().strip()\r\n a = self.c.execute(\r\n 'SELECT * FROM tags WHERE (name=? AND server=?)', (lookup, str(ctx.guild.id)))\r\n a = a.fetchall()\r\n if a == []:\r\n return await ctx.send(\"Can't append non-existent tags\")\r\n try:\r\n appended_tag = a[0][1] + \"\\n{}\".format(content)\r\n except Exception as e:\r\n await ctx.send(e)\r\n if len(appended_tag) >= 2000:\r\n return await ctx.send(\"That would make the tag too long, if you're looking to create a tag with variables where the output would be less than 2k characters, use `!tag ++` and pass a pastebin link\")\r\n self.c.execute('UPDATE tags SET content=?, updated=? WHERE (name=? AND server=?)',\r\n (appended_tag, datetime.datetime.utcnow().timestamp(), lookup, str(ctx.guild.id)))\r\n self.conn.commit()\r\n await ctx.send('Tag \"{}\" successfully appended.'.format(name))\r\n\r\n def top_three_tags(self, server):\r\n emoji = 129351\r\n a = self.c.execute(\r\n 'SELECT * FROM tags WHERE server=? ORDER BY uses DESC LIMIT 3', (server.id,))\r\n popular = a.fetchall()\r\n popular = [x for x in popular]\r\n for tag in popular:\r\n yield (chr(emoji), tag)\r\n emoji += 1\r\n def top_three_tags_user(self, user):\r\n emoji = 129351\r\n a = self.c.execute(\r\n 'SELECT * FROM tags WHERE (author=? AND server=?) ORDER BY uses DESC LIMIT 3', (user.id, user.guild.id,))\r\n popular = a.fetchall()\r\n popular = [x for x in popular]\r\n for tag in popular:\r\n yield (chr(emoji), tag)\r\n emoji += 1\r\n\r\n @tag.command()\r\n async def stats(self, ctx, user: discord.Member=None):\r\n \"\"\"\r\n Shows information about the tags on a server, or the tags a member owns if you mention someone\r\n \"\"\"\r\n server = ctx.guild\r\n e = discord.Embed(title=None)\r\n if user is None:\r\n b = self.c.execute('SELECT Count(*) AS \"hello\" FROM tags')\r\n total_tags = b.fetchone()[0]\r\n t = self.c.execute('SELECT SUM(uses) AS \"hello\" FROM tags')\r\n total_uses = t.fetchone()[0]\r\n e.add_field(name='Global', value='%s tags\\n%s uses' %\r\n (total_tags, int(total_uses)))\r\n sum_of_things = self.c.execute(\r\n 'SELECT Count(*) FROM tags WHERE server=?', (server.id,))\r\n a = sum_of_things.fetchone()[0]\r\n t = self.c.execute(\r\n 'SELECT SUM(uses) AS \"hello\" FROM tags WHERE server=?', (server.id,))\r\n b = t.fetchone()[0]\r\n try:\r\n e.add_field(name=server.name, value='%s tags\\n%s uses' %\r\n (a, int(b)))\r\n except TypeError:\r\n return await ctx.send(\"This server doesn't seem to have any tags\")\r\n fmt = '{} ({} uses)'\r\n for emoji, tag in self.top_three_tags(ctx.guild):\r\n e.add_field(name=emoji + ' Server Tag',\r\n value=fmt.format(tag[0], int(tag[5])))\r\n\r\n else:\r\n # user-specific data, we can't guarantee that the mentioned user even has three tags created\r\n b = self.c.execute('SELECT Count(*) AS \"hello\" FROM tags WHERE (author=? AND server=?)', (user.id, ctx.guild.id))\r\n total_tags = b.fetchone()\r\n if total_tags is None:\r\n return await ctx.send(\"That user has zero tags created.\")\r\n total_tags = total_tags[0]\r\n t = self.c.execute('SELECT SUM(uses) AS \"hello\" FROM tags WHERE (author=? AND server=?)', (user.id, ctx.guild.id))\r\n total_uses = t.fetchone()[0]\r\n e.add_field(name=\"Owned tags\", value=total_tags)\r\n e.add_field(name=\"Owned tag usage\", value=int(total_uses))\r\n for emoji, tag in self.top_three_tags_user(user):\r\n e.add_field(name=emoji + ' ' + tag[0],\r\n value=f\"{int(tag[5])} uses\")\r\n await ctx.send(embed=e)\r\n\r\n\r\n\r\n @tag.command(aliases=['delete', '-', 'del'], no_pm=True)\r\n async def remove(self, ctx, *, name: str):\r\n '''\r\n When a tag is deleted, the tag and all its aliases will be deleted\r\n when an alias is deleted, only the alias is deleted\r\n '''\r\n\r\n lookup = name.lower()\r\n server = ctx.guild\r\n self.c.execute('''SELECT *\r\n FROM tag_lookup\r\n WHERE (name=? AND server=?)''',\r\n (lookup, server.id))\r\n row = self.c.fetchone() \r\n if row is None:\r\n return await ctx.send(\"Tag not found\")\r\n if row[1]:\r\n # It's an alias, just remove the reference\r\n self.c.execute('''DELETE FROM tag_lookup WHERE (name=? AND server=?)''', (lookup, server.id))\r\n self.conn.commit()\r\n return await ctx.send(f'Alias \"{row[2]}\" pointing to \"{row[3]}\" deleted.')\r\n # Since it's not an aliased tag, we know that name = points_to = lookup\r\n msg = 'Tag and all associated aliases successfully removed.'\r\n self.c.execute(\r\n 'DELETE FROM tags WHERE (name=? AND server=?)', (lookup, server.id))\r\n self.conn.commit()\r\n # When it comes to clearing the tag_lookup, sql makes it pretty easy\r\n # all we need to know is which actual _tag_ we're looking for and just\r\n # delete all tags and aliases pointing to it\r\n self.c.execute('''DELETE FROM tag_lookup WHERE (points_to=? AND server=?)''', (lookup, server.id))\r\n self.conn.commit()\r\n await ctx.send(msg)\r\n\r\n @tag.command(aliases=['owner'])\r\n async def info(self, ctx, *, name: str):\r\n \"\"\"\r\n Displays some nifty information about a tag\r\n \"\"\"\r\n lookup = name.lower()\r\n server = ctx.guild\r\n self.c.execute('''SELECT points_to, nsfw, mod, restricted\r\n FROM tag_lookup\r\n WHERE (server=? AND name=?)''',\r\n (ctx.guild.id, lookup))\r\n try:\r\n points_to, nsfw, mod, restricted = self.c.fetchone()\r\n except ValueError:\r\n return await ctx.send(\"Couldn't find a tag with that name, make sure you spelled it correctly.\")\r\n \r\n self.c.execute(\r\n 'SELECT * FROM tags WHERE (server=? AND name=?)', (str(ctx.guild.id), points_to))\r\n name, _, _, created, updated, uses, author = self.c.fetchone()\r\n self.c.execute(\r\n 'SELECT uses FROM tags WHERE server=?', (str(ctx.guild.id),))\r\n rc = self.c.fetchall()\r\n rank = sorted([x[0] for x in rc], reverse=True).index(uses) + 1\r\n # Find tag aliases\r\n self.c.execute('''SELECT name\r\n FROM tag_lookup\r\n WHERE (points_to=? AND server=? AND is_alias)''',\r\n (points_to, ctx.guild.id))\r\n list_of_aliases = self.c.fetchall()\r\n if not list_of_aliases:\r\n alias_string = \"None\"\r\n else:\r\n alias_string = '\\n'.join(x[0] for x in list_of_aliases).replace(lookup, f\"**{lookup}**\")\r\n\r\n \r\n\r\n e = discord.Embed(title=name)\r\n e.add_field(name='Owner', value=\"<@{}>\".format(author))\r\n e.add_field(name=\"Uses\", value=int(uses))\r\n e.add_field(name=\"Rank\", value=rank)\r\n e.add_field(name='Creation date', value=datetime.datetime.fromtimestamp(\r\n created).strftime(\"%b %d, %Y\"))\r\n e.add_field(name='Last update', value=datetime.datetime.fromtimestamp(\r\n updated).strftime(\"%b %d, %Y\"))\r\n prms = lambda x: (\"<:redtick:318044813444251649>\", \"<:greentick:318044721807360010>\")[x]\r\n e.add_field(name='Special permissions', value=f'{prms(nsfw)} nsfw\\n{prms(mod)} mod\\n{prms(restricted)} restricted')\r\n e.add_field(name='Aliases', value=alias_string)\r\n await ctx.send(embed=e)\r\n\r\n @info.error\r\n async def info_error(self, error, ctx):\r\n if isinstance(error, commands.MissingRequiredArgument):\r\n await ctx.send('Missing tag name to get info of.')\r\n\r\n @tag.command()\r\n async def raw(self, ctx, *, name: str):\r\n \"\"\"\r\n Returns a tag without any formatting, bold text becomes **bold**\r\n \"\"\"\r\n lookup = name.lower()\r\n self.c.execute('''SELECT points_to\r\n FROM tag_lookup\r\n WHERE (name=? AND server=?)''',\r\n (lookup, ctx.guild.id))\r\n tag_name = self.c.fetchone()\r\n if tag_name is None:\r\n return await ctx.send(\"That tag doesn't seem to exist\")\r\n lookup = tag_name[0]\r\n tag = self.c.execute(\r\n 'SELECT * FROM tags WHERE (name=? AND server=?)', (lookup, str(ctx.guild.id)))\r\n tag = tag.fetchone()\r\n\r\n transformations = {\r\n re.escape(c): '\\\\' + c\r\n for c in ('*', '`', '_', '~', '\\\\', '<')\r\n }\r\n\r\n def replace(obj):\r\n return transformations.get(re.escape(obj.group(0)), '')\r\n\r\n pattern = re.compile('|'.join(transformations.keys()))\r\n await ctx.send(pattern.sub(replace, tag[1]))\r\n\r\n @tag.command(name='mine')\r\n async def _mine(self, ctx, *, member: discord.Member = None):\r\n \"\"\"\r\n Shows all tags a member has created\r\n \"\"\"\r\n user = ctx.message.author if member is None else member\r\n tags = self.c.execute(\r\n 'SELECT name FROM tags WHERE (server=? AND author=?) ORDER BY name ASC', (str(ctx.guild.id), user.id))\r\n tags = tags.fetchall()\r\n tags = [x[0] for x in tags]\r\n if tags:\r\n try:\r\n if sum(len(t) for t in tags) < 1900:\r\n d = ', '.join(tags)\r\n await ctx.author.send(d)\r\n else:\r\n tempmessage = []\r\n finalmessage = []\r\n for tag in tags:\r\n if len(', '.join(tempmessage)) < 1800:\r\n tempmessage.append(tag)\r\n else:\r\n formatted_tempmessage = ', '.join(tempmessage)\r\n finalmessage.append(formatted_tempmessage)\r\n tempmessage = []\r\n finalmessage.append(', '.join(tempmessage))\r\n for x in finalmessage:\r\n if x != \"\":\r\n await ctx.author.send(x)\r\n except Exception as e:\r\n await ctx.send(e)\r\n else:\r\n await ctx.send('This user has no tags.')\r\n\r\n @tag.command(name='list', no_pm=True)\r\n async def _list(self, ctx):\r\n \"\"\"\r\n Shows you the names of all tags\r\n \"\"\"\r\n tags = self.c.execute(\r\n 'SELECT name FROM tags WHERE (server=? AND LENGTH(name) > 2) ORDER BY name ASC', (str(ctx.guild.id),))\r\n tags = tags.fetchall()\r\n tags = [x[0] for x in tags]\r\n if tags:\r\n try:\r\n if sum(len(t) for t in tags) < 1900:\r\n d = ', '.join(tags)\r\n await ctx.author.send(d)\r\n else:\r\n tempmessage = []\r\n finalmessage = []\r\n for tag in tags:\r\n if len(', '.join(tempmessage)) < 1800:\r\n tempmessage.append(tag)\r\n else:\r\n formatted_tempmessage = ', '.join(tempmessage)\r\n finalmessage.append(formatted_tempmessage)\r\n tempmessage = []\r\n finalmessage.append(', '.join(tempmessage))\r\n for x in finalmessage:\r\n if x != \"\":\r\n await ctx.author.send(x)\r\n except Exception as e:\r\n await ctx.send(e)\r\n else:\r\n await ctx.send('This server has no tags.')\r\n\r\n @commands.command(name='taglist', aliases=['commands', 'tags'], no_pm=True)\r\n async def tag_list(self, ctx):\r\n \"\"\"\r\n Shows you the names of all tags\r\n \"\"\"\r\n tags = self.c.execute(\r\n 'SELECT name FROM tags WHERE server=? ORDER BY name ASC', (str(ctx.guild.id),))\r\n tags = tags.fetchall()\r\n tags = [x[0] for x in tags]\r\n if tags:\r\n try:\r\n if sum(len(t) for t in tags) < 1900:\r\n d = ', '.join(tags)\r\n await ctx.author.send(d)\r\n else:\r\n tempmessage = []\r\n finalmessage = []\r\n for tag in tags:\r\n if len(', '.join(tempmessage)) < 1800:\r\n tempmessage.append(tag)\r\n else:\r\n formatted_tempmessage = ', '.join(tempmessage)\r\n finalmessage.append(formatted_tempmessage)\r\n tempmessage = []\r\n finalmessage.append(', '.join(tempmessage))\r\n for x in finalmessage:\r\n if x != \"\":\r\n await ctx.author.send(x)\r\n except Exception as e:\r\n await ctx.send(e)\r\n else:\r\n await ctx.send('This server has no tags.')\r\n \r\n @tag.command()\r\n async def random(self, ctx):\r\n \"\"\"\r\n Returns a random tag\r\n \"\"\"\r\n tags = self.c.execute(\r\n 'SELECT name, content, author, uses FROM tags WHERE server=? ORDER BY RANDOM() LIMIT 1', (ctx.guild.id,))\r\n tags = tags.fetchone()\r\n name, content, author, uses = tags\r\n e = discord.Embed(title=\"Random tag\")\r\n e.add_field(name=f\"{int(uses)} uses\", value=f'<@{author}>')\r\n e.add_field(name=name, value=content)\r\n await ctx.send(embed=e)\r\n \r\n\r\n\r\n @tag.command()\r\n async def search(self, ctx, *, query: str):\r\n \"\"\"\r\n Searches for a tag.\r\n The query must be at least 2 characters.\r\n \"\"\"\r\n\r\n server = ctx.guild\r\n query = query.lower()\r\n if len(query) < 2:\r\n return await ctx.send('The query length must be at least two characters.')\r\n tags = self.c.execute(\r\n 'SELECT name FROM tags WHERE (server=? AND LENGTH(name) > 2) ORDER BY uses DESC', (server.id,))\r\n tags = tags.fetchall()\r\n\r\n results = [x[0] for x in tags]\r\n tagreturn = \"\"\r\n bad_var = \"\"\r\n i = 1\r\n if results:\r\n for tag in results:\r\n if fuzz.partial_ratio(query, tag) > 80:\r\n tagreturn += \"{}. {}\\n\".format(i, tag)\r\n bad_var += \"{}\\n\".format(tag)\r\n i += 1\r\n else:\r\n continue\r\n list_of_returns = tagreturn.splitlines()\r\n\r\n tempmessage = \"\"\r\n final_list = []\r\n xd = 0\r\n for line in list_of_returns:\r\n if xd < 14:\r\n tempmessage += \"{}\\n\".format(line)\r\n xd += 1\r\n else:\r\n tempmessage += \"{}\\n\".format(line)\r\n final_list.append(tempmessage)\r\n tempmessage = \"\"\r\n xd = 0\r\n final_list.append(tempmessage)\r\n if len(list_of_returns) == 0:\r\n await ctx.send(\"No tags found.\")\r\n return\r\n\r\n em = discord.Embed(title=\"Search results:\",\r\n description=final_list[0], colour=0x738bd7)\r\n em.set_author(name=ctx.message.author.name,\r\n icon_url=ctx.message.author.avatar_url, url=ctx.message.author.avatar_url)\r\n em.set_footer(\r\n text=\"{} results. (page {}/{})\".format(i - 1, 1, math.ceil((i - 1) / 15)))\r\n initial_message = await ctx.send(embed=em)\r\n\r\n def check(mesg):\r\n if mesg.content.isdigit():\r\n return True\r\n elif mesg.content.startswith(\"p\"):\r\n return True\r\n else:\r\n return False\r\n for p in range(5):\r\n msg = await self.bot.wait_for('message', check=lambda x: x.author == ctx.message.author, timeout=30.0)\r\n # if the message is a number, match it with the associated tag\r\n if msg.content.isdigit():\r\n listoflines = bad_var.split('\\n')\r\n tag_name = listoflines[int(msg.content) - 1]\r\n # tag_to_send = self.c.execute(\r\n # 'SELECT content FROM tags WHERE (name=? AND server=?)', (tag_name, str(ctx.guild.id)))\r\n # t = tag_to_send.fetchone()\r\n # t = t[0]\r\n # await ctx.send(t)\r\n await self.do_tag_stuff(ctx, tag_name)\r\n\r\n # await self.bot.send_message(message.channel, self.taglist[listoflines[int(msg.content)-1]])\r\n # await self.bot.delete_message(initial_message)\r\n return\r\n # this is for pages\r\n elif msg.content.startswith(\"p\"):\r\n # try:\r\n page_number = int(msg.content[1:])\r\n em2 = discord.Embed(\r\n title=\"Search results:\", description=final_list[page_number - 1], colour=0xffffff)\r\n em2.set_author(name=ctx.message.author.name,\r\n icon_url=ctx.message.author.avatar_url, url=ctx.message.author.avatar_url)\r\n em2.set_footer(\r\n text=\"{} results. (page {}/{})\".format(i - 1, page_number, math.ceil((i - 1) / 15)))\r\n await initial_message.edit(embed=em2)\r\n # except Exception as e:\r\n # print(e)\r\n # return\r\n else:\r\n return\r\n else:\r\n await ctx.send('No tags found.')\r\n\r\n \r\n\r\n @commands.command(name=\"search\")\r\n async def _search(self, ctx, *, query: str):\r\n \"\"\"Searches for a tag.\r\n The query must be at least 2 characters.\r\n \"\"\"\r\n\r\n server = ctx.guild\r\n query = query.lower()\r\n if len(query) < 2:\r\n await ctx.send('The query length must be at least two characters.')\r\n return\r\n\r\n tags = self.c.execute(\r\n 'SELECT name FROM tags WHERE (server=? AND LENGTH(name) > 2) ORDER BY uses DESC', (server.id,))\r\n tags = tags.fetchall()\r\n\r\n results = [x[0] for x in tags]\r\n tagreturn = \"\"\r\n bad_var = \"\"\r\n i = 1\r\n if results:\r\n for tag in results:\r\n if fuzz.partial_ratio(query, tag) > 80:\r\n tagreturn += \"{}. {}\\n\".format(i, tag)\r\n bad_var += \"{}\\n\".format(tag)\r\n i += 1\r\n else:\r\n continue\r\n list_of_returns = tagreturn.splitlines()\r\n\r\n tempmessage = \"\"\r\n final_list = []\r\n xd = 0\r\n for line in list_of_returns:\r\n if xd < 14:\r\n tempmessage += \"{}\\n\".format(line)\r\n xd += 1\r\n else:\r\n tempmessage += \"{}\\n\".format(line)\r\n final_list.append(tempmessage)\r\n tempmessage = \"\"\r\n xd = 0\r\n final_list.append(tempmessage)\r\n if len(list_of_returns) == 0:\r\n return await ctx.send(\"No tags found.\")\r\n em = discord.Embed(title=\"Search results:\",\r\n description=final_list[0], colour=0x738bd7)\r\n em.set_author(name=ctx.message.author.name,\r\n icon_url=ctx.message.author.avatar_url, url=ctx.message.author.avatar_url)\r\n em.set_footer(\r\n text=\"{} results. (page {}/{})\".format(i - 1, 1, math.ceil((i - 1) / 15)))\r\n initial_message = await ctx.send(embed=em)\r\n\r\n def check(mesg):\r\n if mesg.content.isdigit():\r\n return True\r\n elif mesg.content.startswith(\"p\"):\r\n return True\r\n else:\r\n return False\r\n for p in range(5):\r\n msg = await self.bot.wait_for('message', check=lambda x: x.author == ctx.message.author, timeout=30.0)\r\n # if the message is a number, match it with the associated tag\r\n if msg.content.isdigit():\r\n listoflines = bad_var.split('\\n')\r\n tag_name = listoflines[int(msg.content) - 1]\r\n # tag_to_send = self.c.execute(\r\n # 'SELECT content FROM tags WHERE (name=? AND server=?)', (tag_name, str(ctx.guild.id)))\r\n # t = tag_to_send.fetchone()\r\n # t = t[0]\r\n # await ctx.send(t)\r\n await self.do_tag_stuff(ctx, tag_name)\r\n\r\n # await self.bot.send_message(message.channel, self.taglist[listoflines[int(msg.content)-1]])\r\n # await self.bot.delete_message(initial_message)\r\n return\r\n # this is for pages\r\n elif msg.content.startswith(\"p\"):\r\n # try:\r\n page_number = int(msg.content[1:])\r\n em2 = discord.Embed(\r\n title=\"Search results:\", description=final_list[page_number - 1], colour=0xffffff)\r\n em2.set_author(name=ctx.message.author.name,\r\n icon_url=ctx.message.author.avatar_url, url=ctx.message.author.avatar_url)\r\n em2.set_footer(\r\n text=\"{} results. (page {}/{})\".format(i - 1, page_number, math.ceil((i - 1) / 15)))\r\n await initial_message.edit(embed=em2)\r\n # except Exception as e:\r\n # print(e)\r\n # return\r\n else:\r\n return\r\n # msg = await self.bot.wait_for_message(author=ctx.message.author, check=lambda m: m.content.isdigit(), timeout=30.0)\r\n # listoflines = bad_var.split('\\n')\r\n # if msg is not None:\r\n # tag_name = listoflines[int(msg.content)-1]\r\n # else:\r\n # return\r\n # server = ctx.guild\r\n # tag_to_send = self.c.execute('SELECT content FROM tags WHERE (name=? AND server=?)', (tag_name, server.id))\r\n # t = tag_to_send.fetchone()\r\n # t = t[0]\r\n # await ctx.send(t)\r\n else:\r\n await ctx.send('No tags found.')\r\n\r\n @search.error\r\n async def search_error(self, error, ctx):\r\n if isinstance(error, commands.MissingRequiredArgument):\r\n await ctx.send('Missing query to search for.')\r\n\r\n\r\ndef setup(bot):\r\n bot.add_cog(Tags(bot))\r\n","repo_name":"CarlGroth/Carl-Bot","sub_path":"cogs/tags.py","file_name":"tags.py","file_ext":"py","file_size_in_byte":54653,"program_lang":"python","lang":"en","doc_type":"code","stars":292,"dataset":"github-code","pt":"81"} +{"seq_id":"2820607149","text":"#coding:utf-8\nfrom django.conf.urls import url\nfrom views import *\nurlpatterns = [\n url(r\"^$\", eqList),\n url(r\"^eqList/$\",eqList),\n url(r\"^eqDatas/(\\d+)$\", eqDatas),\n url(r\"^equip_api/\", equip_api),\n url(r\"^gateone/$\", gateone),\n url(r\"^addEquipment/$\", addEquipment),\n url(r\"^Terminal/(\\d{1,3})/$\", Terminal),\n url(r\"^get_auth_obj/$\", get_auth_obj)\n]","repo_name":"yangkun6/OurCMDB","sub_path":"Equipment/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22215308091","text":"import torch\nimport torch.nn as nn\nfrom torchvision.transforms.functional import resize\n\nfrom ....utils import DECODER_REGISTRY\n\n\nclass DoubleConv(nn.Module):\n \"\"\"\n Module consisting of two convolution layers and activations\n \"\"\"\n\n def __init__(\n self,\n in_channels,\n out_channels,\n ):\n super(DoubleConv, self).__init__()\n\n self.conv = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, 3, 1, 1, bias=False),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(inplace=True),\n nn.Conv2d(out_channels, out_channels, 3, 1, 1, bias=False),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(inplace=True),\n )\n\n def forward(self, x):\n return self.conv(x)\n\n\n@DECODER_REGISTRY.register()\nclass SegmentationHead(nn.Module):\n \"\"\"\n U-net like up-sampling block\n \"\"\"\n\n def __init__(\n self,\n out_channels=1,\n embed_dims=[64, 128, 256, 512],\n ):\n super(SegmentationHead, self).__init__()\n\n self.ups = nn.ModuleList()\n self.pool = nn.MaxPool2d(kernel_size=2, stride=2)\n\n for feature in reversed(embed_dims):\n self.ups.append(\n nn.ConvTranspose2d(\n feature * 2,\n feature,\n kernel_size=2,\n stride=2,\n )\n )\n self.ups.append(DoubleConv(feature * 2, feature))\n\n self.bottleneck = DoubleConv(embed_dims[-1], embed_dims[-1] * 2)\n self.conv1 = nn.Conv2d(embed_dims[0], out_channels, kernel_size=1)\n self.conv2 = nn.ConvTranspose2d(\n out_channels, out_channels, kernel_size=4, stride=4\n )\n\n def forward(self, skip_connections):\n\n x = self.bottleneck(skip_connections[-1])\n skip_connections = skip_connections[::-1]\n\n for idx in range(0, len(self.ups), 2):\n\n x = self.ups[idx](x)\n skip_connection = skip_connections[idx // 2]\n\n if x.shape != skip_connection.shape:\n x = resize(x, size=skip_connection.shape[2:])\n\n concat_skip = torch.cat((skip_connection, x), dim=1)\n x = self.ups[idx + 1](concat_skip)\n\n x = self.conv1(x)\n\n return self.conv2(x)\n","repo_name":"SforAiDl/vformer","sub_path":"vformer/decoder/task_heads/segmentation/head.py","file_name":"head.py","file_ext":"py","file_size_in_byte":2285,"program_lang":"python","lang":"en","doc_type":"code","stars":158,"dataset":"github-code","pt":"81"} +{"seq_id":"4813138397","text":"import cv2\nimport numpy as np\nimport time\nimport PoseDetector as pm\n\ncap = cv2.VideoCapture(\"new_vids/pexels-tima-miroshnichenko-5319753.mp4\")\n\ndic = {'right arm': [12,14,16], 'left arm': [11,13,15], 'right back': [12,24,26], 'left back': [25,23,11], 'right leg': [30,26,24], 'left leg': [23,25,29], 'right arm back': [14,12,24], 'left arm back': [13,11,23]}\n\ndetector = pm.poseDetector()\ncount = 0\ndir = 0\npTime = 0\nwhile True:\n\tsuccess, img = cap.read()\n\t#img = cv2.resize(img, (1280, 720))\n\t# img = cv2.imread(\"AiTrainer/test.jpg\")\n\timg = detector.findPose(img, False)\n\tlmList = detector.findPosition(img, False)\n\t# print(lmList)\n\tif len(lmList) != 0:\n\t\t# Right Arm\n\t\t#angle = detector.findAngle(img, 12, 14, 16)\n\n\t\t#p1, p2, p3 = dic['right leg']\n\t\t#angle = detector.findAngle(img, p1, p2, p3)\n\n\n\n\t\tp1, p2, p3 = dic['right leg']\n\t\tangle = detector.findAngle(img, p3, p2, p1)\n\t\tp1, p2, p3 = dic['left leg']\n\t\tangle = detector.findAngle(img, p3, p2, p1)\n\n\n\t\t# # Left Arm\n\t\t#angle = detector.findAngle(img, 11, 13, 15,False)\n\t\t#per = np.interp(angle, (210, 310), (0, 100))\n\t\t#bar = np.interp(angle, (220, 310), (650, 100))\n\t\t# print(angle, per)\n\n\t\t\n\tcTime = time.time()\n\tfps = 1 / (cTime - pTime)\n\tpTime = cTime\n\t#cv2.putText(img, str(int(fps)), (50, 100), cv2.FONT_HERSHEY_PLAIN, 5, (255, 0, 0), 5)\n\n\tcv2.imshow(\"Image\", img)\n\tcv2.waitKey(1)\n\n\n","repo_name":"jgframoz/AI_trainer","sub_path":"ai_trainer.py","file_name":"ai_trainer.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71690534666","text":"from flask import Flask, g, jsonify, request\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import scoped_session, sessionmaker\n\nfrom blueprints.volunteer_venice import vv_blueprint\n\nSECRET_KEY = 'development key'\n\n\ndef register_server(url_prefix='', settings={}):\n app = Flask(__name__, **settings)\n app.secret_key = SECRET_KEY\n\n app.register_blueprint(vv_blueprint,\n url_prefix='{}'.format(url_prefix))\n\n @app.before_request\n def before_request():\n local_engine = create_engine(\"sqlite:///test_db\")\n g.db = scoped_session(sessionmaker(\n autocommit=False,\n autoflush=False,\n bind=local_engine))\n\n @app.teardown_request\n def teardown_request(error=None):\n if hasattr(g, 'db'):\n g.db.close()\n g.db.remove()\n\n @app.after_request\n def after_request(response):\n origin = request.headers.get('Origin', '*')\n response.headers.add('Access-Control-Allow-Origin', origin)\n response.headers.add('Access-Control-Allow-Credentials', 'true')\n\n return response\n\n @app.route('{}/location'.format(url_prefix))\n def location():\n result = {\n 'location': [\n {\n 'name': 'Venice Beach',\n 'lat_lng': {'lat':33.990, \"lng\":-118.459},\n 'id': 1\n }\n ]\n }\n return jsonify(result)\n\n @app.errorhandler(404)\n def page_not_found(e):\n return \"Error: Invalid API Path\", 404\n\n return app\n","repo_name":"KyleJamesWalker/volunteer_venice","sub_path":"back_end/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11215346461","text":"from turtle import Turtle\r\nPADDLE_SHAPE = \"square\"\r\nPADDLE_COLOR = \"white\"\r\nPADDLE_WIDTH = 5\r\nPADDLE_HEIGHT = 1\r\nSTEP_SIZE = 20\r\n\r\nclass Paddle(Turtle,):\r\n def __init__(self,x_pos):\r\n super().__init__()\r\n self.speed(\"fastest\")\r\n self.penup()\r\n self.shape(PADDLE_SHAPE)\r\n self.color(PADDLE_COLOR)\r\n self.shapesize(stretch_wid=PADDLE_WIDTH,stretch_len=PADDLE_HEIGHT)\r\n self.hideturtle()\r\n self.setpos(x=x_pos,y=0)\r\n self.showturtle()\r\n def move_up(self):\r\n if self.ycor() < 230:\r\n self.goto(self.xcor(),self.ycor() + STEP_SIZE)\r\n def move_down(self):\r\n # self.maintain_direction()\r\n if self.ycor() > -230:\r\n self.goto(self.xcor(), self.ycor() - STEP_SIZE)\r\n\r\n","repo_name":"Dinesh-0239/Bounce-The-Ball-aka-PONG-Using-Python-s-Turtle-Graphics","sub_path":"paddle.py","file_name":"paddle.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74794755145","text":"class Solution:\n def topKFrequent(self, nums: List[int], k: int) -> List[int]:\n counter = collections.Counter(nums)\n numbers = list(counter.keys())\n \n def quick_select(numbers, _k):\n if not numbers:\n return []\n \n pivot_index = random.randint(0, len(numbers)-1)\n numbers[pivot_index], numbers[-1] = numbers[-1], numbers[pivot_index]\n count_pivot = counter[numbers[-1]]\n \n i = j = 0\n while j < len(numbers):\n count_cur = counter[numbers[j]]\n \n if count_cur < count_pivot:\n numbers[i], numbers[j] = numbers[j], numbers[i]\n i += 1\n j += 1\n \n numbers[i], numbers[-1] = numbers[-1], numbers[i]\n \n if i == _k:\n return numbers[i:]\n elif i > _k:\n return numbers[i:] + quick_select(numbers[:i], _k)\n else:\n return quick_select(numbers[i:], _k-i)\n \n return quick_select(numbers, len(numbers)-k)\n","repo_name":"novayo/LeetCode","sub_path":"0347_Top_K_Frequent_Elements/try_6.py","file_name":"try_6.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"42898510785","text":"#!/bin/python\n\nimport os\nimport sys\nimport requests\nfrom bs4 import BeautifulSoup\nfrom pycoingecko import CoinGeckoAPI\nfrom datetime import datetime\nfrom dotenv import load_dotenv\nimport requests.exceptions\n\n# Load environment variables from .env file\nload_dotenv()\n\nfile_path = sys.argv[1]\nfile_name = sys.argv[2]\ntimeout = 5\napi_key = os.getenv(\"API_KEY\")\ncoinmarketcap_api_key = os.getenv(\"COINMARKETCAP_API_KEY\")\ncg = CoinGeckoAPI()\n\n\ndef get_api_data(base_currency, currencies, source):\n current_date = datetime.today().strftime(\"%Y-%m-%d\")\n headers = {}\n if source == 'apilayer':\n headers = {'apikey': api_key}\n url = f'https://api.apilayer.com/exchangerates_data/latest?base={base_currency}&symbols={\",\".join(currencies)}&date={current_date}'\n elif source == 'coinmarketcap':\n url = f'https://pro-api.coinmarketcap.com/v1/cryptocurrency/quotes/latest?symbol={\",\".join(crypto_symbols)}&convert=USD&CMC_PRO_API_KEY={coinmarketcap_api_key}'\n else:\n print(f\"Unknown API source: {source}\")\n return None\n print(url)\n try:\n response = requests.get(url, headers=headers, timeout=timeout)\n if response.status_code != 200:\n print(\"Failed to retrieve exchange rates data.\")\n return None\n data = response.json()\n if source == 'apilayer':\n return data.get('rates', {})\n else:\n data = data['data']\n rtn = {}\n for symbol, details in data.items():\n exchange_rate = details['quote']['USD']['price']\n rtn[symbol.lower()] = { 'usd': exchange_rate }\n return rtn\n except requests.exceptions.Timeout:\n print('The request to the exchange rates API timed out.')\n return None\n except Exception as e:\n print('An error occurred while retrieving exchange rates:', str(e))\n return None\n\n\n\nwith open(file_path + file_name, 'r') as f:\n data = f.read()\n\nfile = open(file_path + '.~' + file_name, 'w')\nfile.write(str(data))\nfile.close()\n\nbs_data = BeautifulSoup(data, \"xml\")\n\nrows = {}\ncrypto_symbols = []\nforex_symbols = []\n\nfor tag in bs_data.find_all('gnm:Cell', {'Col':'1'}):\n if tag.string != \"coin\":\n symbol = tag.contents[0]\n if symbol.upper() in ['EUR', 'GBP', 'SGD', 'IDR', 'THB', 'USD']:\n forex_symbols.append(symbol.upper())\n else:\n crypto_symbols.append(symbol)\n rows[tag['Row']] = symbol\n\n# remove duplicate symbols\ncrypto_symbols = list(set(crypto_symbols))\nforex_symbols = list(set(forex_symbols))\n\n# data from coingecko\ndef get_coingecko_data(crypt_data, crypto_symbols):\n try:\n new_data = cg.get_price(ids=crypto_symbols, vs_currencies='usd', timeout=timeout)\n print(new_data)\n return crypt_data\n except requests.exceptions.Timeout:\n print('The request to CoinGecko timed out.')\n return crypt_data\n except Exception as e:\n print('An error occurred while retrieving exchange rates from CoinGecko:', str(e))\n return crypt_data\n\n# forex data\nforex_data = get_api_data(base_currency='USD', currencies=forex_symbols, source='apilayer')\ncrypto_data = get_api_data(base_currency='USD', currencies=crypto_symbols, source='coinmarketcap')\nprint('marketcap', crypto_data)\n\n# crypto_data = get_coingecko_data(crypto_data, crypto_symbols)\n#print('coingecko', crypto_data)\n\nchanges = {}\nfor tag in bs_data.find_all('gnm:Cell', {'Col':'3'}):\n symbol = None\n if tag['Row'] in rows:\n symbol = rows[tag['Row']]\n if symbol is not None:\n ex = tag.string.strip()\n if ex and not ex == '':\n new_ex = float(ex)\n if crypto_data is not None and symbol in crypto_data:\n new_ex = crypto_data[symbol]['usd']\n elif forex_data is not None and symbol.upper() in forex_data:\n new_ex = 1 / forex_data[symbol.upper()]\n else:\n print('Exchange rate not found for:', symbol)\n changes[symbol] = float(new_ex) - float(ex)\n tag.string = str(new_ex)\n else:\n changes.append('')\n\nfor tag in bs_data.find_all('gnm:Cell', {'Col':'6'}):\n if tag['Row'] in rows:\n tag.string = str(changes[rows[tag['Row']]])\n\nfile = open(file_path + file_name, 'w')\nfile.write(str(bs_data))\nfile.close()\n","repo_name":"waotzi/geld","sub_path":"update_money.py","file_name":"update_money.py","file_ext":"py","file_size_in_byte":4345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25211505404","text":"from setuptools import setup, find_packages\n\nLONG_DESCRIPTION = \"Superdesk Server Core\"\n\ninstall_requires = [\n # temporary requirement to get urllib in a version compatible with requests\n # to be kept until requests update its requirements\n # (cf. https://github.com/psf/requests/issues/5654\n # and https://github.com/psf/requests/pull/5651)\n \"urllib3<1.26\",\n \"eve==1.1.2\",\n \"eve-elastic>=7.3.2,<7.4.0\",\n \"flask>=1.1,<1.2\",\n \"flask-mail>=0.9,<0.10\",\n \"flask-script>=2.0.5,<3.0\",\n \"flask-babel>=1.0,<1.1\",\n \"pillow>=9.2,<9.3\",\n \"arrow>=0.4,<=0.13\",\n \"bcrypt>=3.1.1,<3.2\",\n \"blinker>=1.3,<1.5\",\n \"celery[redis]>=5.2.7,<5.3\",\n \"cerberus>=1.3.2,<1.4\",\n \"redis>=4.5.2,<4.6\",\n \"kombu>=5.2.4,<5.3\",\n \"feedparser>=6.0.8,<6.1\",\n \"hachoir<=3.0a3\",\n \"HermesCache>=0.10.0,<0.11.0\",\n \"python-magic>=0.4,<0.5\",\n \"ldap3>=2.2.4,<2.6\",\n \"pytz>=2021.3\",\n \"tzlocal>=2.1,<3.0\",\n \"raven[flask]>=5.10,<7.0\",\n \"requests>=2.7.0,<3.0\",\n \"boto3>=1.26,<2.0\",\n \"websockets==10.3\",\n \"mongolock>=1.3.4,<1.4\",\n \"PyYAML>=6.0.1\",\n \"lxml>=4,<4.7\",\n \"python-twitter==3.5\",\n \"chardet<4.0\",\n \"pymongo>=3.8,<3.12\",\n \"croniter<0.4\",\n \"python-dateutil<2.8\",\n \"unidecode==0.04.21\",\n \"authlib>0.14,<0.15\",\n \"draftjs-exporter[lxml]<2.2\",\n \"regex==2020.7.14\",\n \"flask-oidc-ex==0.5.5\",\n # to be replaced by stdlib version when we use Python 3.8+\n \"typing_extensions>=3.7.4\",\n \"elastic-apm[flask]>=6.15.1,<7.0\",\n # Fix an issue with MarkupSafe 2.1.0 not exporting `soft_unicode`\n \"MarkupSafe<2.1\",\n \"Werkzeug>=1.0,<1.1\",\n \"Jinja2>=2.11,<3.0\",\n \"Click>=8.0.3,<9.0\",\n \"itsdangerous>=1.1,<2.0\",\n \"pymemcache>=4.0,<4.1\",\n]\n\npackage_data = {\n \"superdesk\": [\n \"templates/*.txt\",\n \"templates/*.html\",\n \"locators/data/*.json\",\n \"io/data/*.json\",\n \"data_updates/*.py\",\n \"data_updates/*.js\",\n \"translations/*.po\",\n \"translations/*.mo\",\n ],\n \"apps\": [\n \"prepopulate/*.json\",\n \"prepopulate/data_init/*.json\",\n \"io/data/*.json\",\n ],\n}\n\nsetup(\n name=\"Superdesk-Core\",\n version=\"2.7.0dev\",\n description=\"Superdesk Core library\",\n long_description=LONG_DESCRIPTION,\n author=\"petr jasek\",\n author_email=\"petr.jasek@sourcefabric.org\",\n url=\"https://github.com/superdesk/superdesk-core\",\n license=\"GPLv3\",\n platforms=[\"any\"],\n packages=find_packages(exclude=[\"tests*\", \"features*\"]),\n package_data=package_data,\n include_package_data=True,\n # setup_requires=[\"setuptools_scm\"],\n install_requires=install_requires,\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: GNU General Public License v3 (GPLv3)\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Topic :: Internet :: WWW/HTTP :: Dynamic Content\",\n ],\n)\n","repo_name":"superdesk/superdesk-core","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3064,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"81"} +{"seq_id":"31446989875","text":"import reference_manual as manual\nimport even_count_color_generator as generator\n\ndef test_get_all_color_codes():\n contents = manual.get_all_color_codes()\n for content in contents:\n pairNumber = content.split(' ')[0].strip()\n color_code = content.split(' ')[1:-1]\n major_color, minor_color = generator.get_color_from_pair_number(int(pairNumber))\n assert( major_color in color_code)\n assert( minor_color in color_code)","repo_name":"clean-code-craft-tcq-2/well-named-in-py-SanjaySaatyaki","sub_path":"reference_manual_test.py","file_name":"reference_manual_test.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38970508220","text":"'''\nIf input like Ravi Chandra Aswin output should be : RCA\nif input like Ravi only output should be : Ravi\n'''\n\nString=input(\"Enter any String : \")\n\nresult=''\n\ncount_space=0\nfor i in String:\n \n if i==' ':\n count_space += 1\n \nfor j in String:\n if count_space>0:\n if j.isupper():\n result += j\n else:\n print(j,end='')\nprint()\n \nprint(result)\n\nprint(\"-------Other way----------\")\n\nstr1=String.split()\nstr2=''\nfor j in str1:\n str2=str2+j[0]\nprint(str2)","repo_name":"Som94/Python-repo","sub_path":"14 July/assignment-1.py","file_name":"assignment-1.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36706203499","text":"import sys\nimport numpy as np\nimport pdb\nfrom sklearn.cluster import KMeans\nimport cv2\n\nfin = sys.argv[1]\nf = open(fin,'r')\nflines = f.readlines()\npdb.set_trace()\nall_wh=np.zeros((len(flines),2))\nprev_im=\"\"\n\nfor line_idx in range(len(flines)):\n line = flines[line_idx]\n vals = line.split(',')\n x0 =int(vals[1])\n x1 =int(vals[3])\n y0 =int(vals[2])\n y1 =int(vals[4])\n tag=vals[5]\n text=vals[6].strip()\n im_file=vals[0]\n gt_im_path = im_file.split('.')[0]+'_gt.jpg'\n if im_file != prev_im:\n if line_idx>0:\n cv2.imwrite(gt_im_path,im)\n im = cv2.imread(im_file)\n\n cv2.rectangle(im,(x0,y0),(x1,y1),(0,0,0))\n cv2.putText(im,text,(x0,y0),cv2.FONT_HERSHEY_PLAIN, 1, (0,0,0), 2)\n cv2.putText(im,tag,(x1,y1),cv2.FONT_HERSHEY_PLAIN, 1, (0,0,0), 2)\n\n prev_im=im_file\n\n","repo_name":"omni-us/research-e2e-pagereader","sub_path":"visualize_gt.py","file_name":"visualize_gt.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"15593480258","text":"import os\nfrom java.lang import Runnable, Thread\nfrom com.pnfsoftware.jeb.client.api import IScript, IGraphicalClientContext, IconType, ButtonGroupType\nfrom com.pnfsoftware.jeb.core import RuntimeProjectUtil\nfrom com.pnfsoftware.jeb.core.units.code import ICodeUnit, ICodeItem\nfrom com.pnfsoftware.jeb.core.output.text import ITextDocument\nfrom com.pnfsoftware.jeb.core.actions import Actions, ActionContext, ActionCreatePackageData, ActionMoveToPackageData\n\"\"\"\nSample client script for PNF Software' JEB.\nCluster DEX classes in order to rebuild an obfuscated+flattened code hierarchy.\n\"\"\"\nclass DexCluster(IScript):\n\n OUTDIR = 'CUSTOMIZE_THIS'\n TARGETP = 'o.' # customize\n\n def run(self, ctx):\n self.ctx = ctx\n\n # customize this\n self.outputDir = DexCluster.OUTDIR\n\n prj = ctx.getMainProject()\n codeUnit = prj.findUnit(ICodeUnit)\n\n print('Clustering: %s' % codeUnit)\n\n self.clusterUnit(codeUnit, DexCluster.TARGETP)\n\n print('Done.')\n\n\n def clusterUnit(self, codeUnit, basePackage=''):\n #print(codeUnit)\n typeToInternalMethods = {}\n typeToExternalMethods = {}\n # method -> list of called methods\n methodToMethods = {}\n # reverse map: method -> list of methods callers\n xmethodToMethods = {}\n methodToType = {}\n\n for classObject in codeUnit.getClasses():\n if (classObject.getGenericFlags() & ICodeItem.FLAG_INNER) != 0:\n #print('Inner class, skipping: %s' % classObject)\n continue\n\n pname = classObject.getPackage().getSignature(True)\n pname = pname[1:-1].replace('/', '.') + '.'\n if not pname.startswith(basePackage):\n #print('Class not in target package, skipping: %s' % classObject)\n continue\n\n typeIndex = classObject.getClassType().getIndex()\n typeToInternalMethods[typeIndex] = []\n methodObjects = classObject.getMethods()\n if not methodObjects:\n continue\n\n print('Processing: %s (type: %d)' % (classObject, typeIndex))\n for methodObject in methodObjects:\n methodIndex = methodObject.getIndex()\n typeToInternalMethods[typeIndex].append(methodIndex)\n\n methodToType[methodIndex] = typeIndex\n\n #print(methodObject)\n instructions = methodObject.getInstructions()\n if not instructions:\n continue\n\n print(' %s' % methodObject)\n for insn in instructions:\n s = insn.format(None)\n refMethodIndex = self.extractMethodIndex(s)\n if refMethodIndex >= 0:\n print(' %d -> %d' % (methodIndex, refMethodIndex))\n if methodIndex not in methodToMethods:\n methodToMethods[methodIndex] = []\n methodToMethods[methodIndex].append(refMethodIndex)\n #if refMethodIndex not in xmethodToMethods:\n # xmethodToMethods[refMethodIndex] = []\n #xmethodToMethods[refMethodIndex].append(methodIndex)\n\n # derive type-to-type connections\n # map: edge(couple=src,dst) TO weight(int)\n edgemap = {}\n vertices = set()\n for methodIndex in methodToMethods:\n typeIndex = methodToType[methodIndex]\n for targetMethodIndex in methodToMethods[methodIndex]:\n targetTypeIndex = methodToType.get(targetMethodIndex, -1)\n if targetTypeIndex >= 0 and targetTypeIndex != typeIndex:\n #print ('%d -> %d' % (typeIndex, targetTypeIndex))\n edge = (typeIndex, targetTypeIndex)\n vertices.add(typeIndex)\n vertices.add(targetTypeIndex)\n if edge not in edgemap:\n edgemap[edge] = 0\n edgemap[edge] += 1\n\n types = codeUnit.getTypes()\n\n # graph definition (graphviz)\n gd = 'digraph {\\n'\n for typeIndex in vertices:\n gd += ' %d [label=\"%s\"]\\n' % (typeIndex, self.getTypeLabel(types[typeIndex]))\n gd += '\\n'\n for edge, weight in edgemap.items():\n gd += ' %d -> %d [weight=%d]\\n' % (edge[0], edge[1], weight)\n gd += '}'\n with open(os.path.join(self.outputDir, 'graph.dot'), 'w') as f:\n f.write(gd)\n\n # graph definition (custom, for igraph)\n gd = '# vertices (%d)\\n' % len(vertices)\n for typeIndex in vertices:\n gd += 'v,%d\\n' % typeIndex\n gd += '# edges (%d)\\n' % len(edgemap)\n for edge, weight in edgemap.items():\n gd += 'e,%d,%d,%d\\n' % (edge[0], edge[1], weight)\n fileGraph = os.path.join(self.outputDir, 'graph.txt')\n with open(fileGraph, 'w') as f:\n f.write(gd)\n\n # clustering (external)\n fileClusteringScript = os.path.join(self.outputDir, 'cluster.py')\n fileClusters = os.path.join(self.outputDir, 'graph-clusters.txt')\n task = ClusterTask(fileClusteringScript, fileGraph, fileClusters)\n if isinstance(self.ctx, IGraphicalClientContext):\n self.ctx.executeAsync('Clustering...', task)\n else:\n task.run()\n\n # reading clusters\n clusters = self.readClusters(fileClusters)\n print('Clusters(types): %s' % clusters)\n\n # refactoring\n for i, cluster in enumerate(clusters):\n pname = basePackage + 'cluster%03d' % i\n\n # create a package clusterX\n data = ActionCreatePackageData()\n data.setFqname(pname)\n codeUnit.executeAction(ActionContext(codeUnit, Actions.CREATE_PACKAGE, 0, None), data)\n\n # move related classes to the virtual package\n for typeIndex in cluster:\n t = types[typeIndex]\n c = t.getImplementingClass()\n itemId = c.getItemId()\n\n data = ActionMoveToPackageData()\n data.setDstPackageFqname(pname)\n codeUnit.executeAction(ActionContext(codeUnit, Actions.MOVE_TO_PACKAGE, itemId, None), data)\n\n\n def readClusters(self, filepath):\n clusters = []\n with open(filepath) as f:\n lines = f.readlines()\n for line in lines:\n line = line.strip()\n if not line or line.startswith('#'):\n continue\n cluster = [int(elt) for elt in line.split(',')]\n clusters.append(cluster)\n return clusters\n\n\n def getTypeLabel(self, t):\n #t.getSignature(True)\n return 'type_%d' % t.getIndex()\n\n\n def extractMethodIndex(self, s):\n if s.startswith('invoke'):\n #print(s)\n i = s.find('method@')\n if i >= 0:\n i += 7\n j = s.find(',', i)\n if j < 0:\n j = len(s)\n return int(s[i:j])\n return -1\n\n\nclass ClusterTask(Runnable):\n\n def __init__(self, fileClusteringScript, fileGraph, fileClusters):\n self.fileClusteringScript = fileClusteringScript\n self.fileGraph = fileGraph\n self.fileClusters = fileClusters\n\n def run(self):\n os.system('%s %s %s' % (self.fileClusteringScript, self.fileGraph, self.fileClusters))\n","repo_name":"pnfsoftware/jeb-samplecode","sub_path":"scripts/cluster/DexCluster.py","file_name":"DexCluster.py","file_ext":"py","file_size_in_byte":6564,"program_lang":"python","lang":"en","doc_type":"code","stars":175,"dataset":"github-code","pt":"81"} +{"seq_id":"37030232827","text":"def gradingStudents(grades):\n new_grade=[]\n n=len(grades)\n for i in range(n):\n c=grades[i]\n while(c%5!=0):\n c=c+1\n if(c>=40):\n if(c-grades[i]<3):\n new_grade.append(c)\n elif(c-grades[i]>=3):\n new_grade.append(grades[i])\n else:\n new_grade.append(grades[i])\n return new_grade\n # Write your code here\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n grades_count = int(raw_input().strip())\n\n grades = []\n\n for _ in xrange(grades_count):\n grades_item = int(raw_input().strip())\n grades.append(grades_item)\n\n result = gradingStudents(grades)\n\n fptr.write('\\n'.join(map(str, result)))\n fptr.write('\\n')\n\n fptr.close()\n","repo_name":"SrivastavaCharu/Competitive-programs","sub_path":"Desktop/python_exp/grade.py","file_name":"grade.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3268166485","text":"# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\nfrom typing import Optional\n\nclass Solution:\n def reorderList(self, head: Optional[ListNode]) -> None:\n \"\"\"\n Do not return anything, modify head in-place instead.\n \"\"\"\n # find the middle of the list\n def find_middle(head):\n dummy = ListNode(next=head)\n slow, fast = dummy, dummy\n while slow and fast and fast.next:\n slow = slow.next\n fast = fast.next.next\n return slow, slow.next\n prev, middle = find_middle(head)\n\n # the reverse the l2\n def revserse(head):\n prev, curr = None, head\n while curr:\n tmp = curr.next\n curr.next = prev\n prev = curr\n curr = tmp\n return prev\n prev.next = None\n l1, l2 = head, revserse(middle)\n # print_list(l1), print_list(l2)\n\n # merge l1 and l2\n def merge(l1, l2):\n dummy = ListNode()\n curr = dummy\n while l1 and l2:\n curr.next = l1\n l1 = l1.next\n curr = curr.next\n curr.next = l2\n curr = curr.next\n l2 = l2.next\n while l1:\n curr.next = l1\n curr = curr.next\n l1 = l1.next\n return dummy.next\n return merge(l1, l2)\n\n\ndef build_list(nums):\n head = ListNode(nums[0])\n curr = head\n for n in nums[1:]:\n curr.next = ListNode(n)\n curr = curr.next\n return head\n\ndef print_list(head):\n res = []\n while head:\n res.append(head.val)\n head = head.next\n print(res)\n return res\n\nsol = Solution()\nnums = [1,2,3,4]\nprint_list(sol.reorderList(build_list(nums)))\nnums = [1,2,3,4,5]\nprint_list(sol.reorderList(build_list(nums)))","repo_name":"Dillettant/leetcode","sub_path":"solutions/143.py","file_name":"143.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7413383941","text":"import cv2\n\ncam = cv2.VideoCapture(0) # 기본 웹\ncam.set(cv2.CAP_PROP_FRAME_WIDTH, 640) # 창 넓이\ncam.set(cv2.CAP_PROP_FRAME_HEIGHT, 480) # 창 높이\nwhile True: \n ret, frame = cam.read() #웹캠 열기\n frame = cv2.flip(frame, 0) \n\n if ret:\n cv2.imshow('CAM', frame) # 카메라 영상 CAM이라는 이름으로 창에 띄움\n\n key = cv2.waitKey(1) # q를 입력받으면\n if key == ord('q'):\n break \n\ncam.release()\ncv2.destroyAllWindows()\n","repo_name":"jacksimuse/Raspberry_pi","sub_path":"opencv/opencv_ex01.py","file_name":"opencv_ex01.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"ko","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"39604595025","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def rob(self, root: TreeNode) -> int:\n memo = {}\n def robsub(root):\n if root is None:\n return 0\n if root in memo:\n return memo[root]\n now, left, right = root.val, root.left, root.right\n if left is not None:\n now += robsub(left.left) + robsub(left.right)\n if right is not None:\n now += robsub(right.left) + robsub(right.right)\n \n later = robsub(left) + robsub(right)\n res = max(now, later)\n memo[root] = res\n return res\n return robsub(root)\n \n\nclass Solution:\n def rob(self, root: TreeNode) -> int:\n def robsub(root):\n if root is None:\n return (0, 0)\n left, right = robsub(root.left), robsub(root.right)\n now = root.val + left[1] + right[1]\n later = max(left) + max(right)\n return (now, later)\n return max(robsub(root))\n","repo_name":"allenhyp/LeetCodePractice","sub_path":"337_House_Robber_III.py","file_name":"337_House_Robber_III.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32914075618","text":"from webdriver_manager.core.driver import Driver\nfrom webdriver_manager.core.logger import log\nfrom webdriver_manager.core.utils import is_arch, is_mac_os\n\n\nclass GeckoDriver(Driver):\n def __init__(\n self,\n name,\n version,\n os_type,\n url,\n latest_release_url,\n mozila_release_tag,\n http_client,\n ):\n super(GeckoDriver, self).__init__(\n name,\n version,\n os_type,\n url,\n latest_release_url,\n http_client,\n )\n self._mozila_release_tag = mozila_release_tag\n self._os_type = self.get_os_type()\n\n def get_latest_release_version(self) -> str:\n determined_browser_version = self.get_browser_version_from_os()\n log(f\"Get LATEST {self._name} version for {determined_browser_version} firefox\")\n resp = self._http_client.get(\n url=self.latest_release_url,\n headers=self.auth_header\n )\n return resp.json()[\"tag_name\"]\n\n def get_driver_download_url(self):\n \"\"\"Like https://github.com/mozilla/geckodriver/releases/download/v0.11.1/geckodriver-v0.11.1-linux64.tar.gz\"\"\"\n driver_version_to_download = self.get_driver_version_to_download()\n log(f\"Getting latest mozilla release info for {driver_version_to_download}\")\n resp = self._http_client.get(\n url=self.tagged_release_url(driver_version_to_download),\n headers=self.auth_header\n )\n assets = resp.json()[\"assets\"]\n name = f\"{self.get_name()}-{driver_version_to_download}-{self._os_type}.\"\n output_dict = [\n asset for asset in assets if asset[\"name\"].startswith(name)]\n return output_dict[0][\"browser_download_url\"]\n\n def get_os_type(self):\n os_type = super().get_os_type()\n if not is_mac_os(os_type):\n return os_type\n\n macos = 'macos'\n if is_arch(os_type):\n return f\"{macos}-aarch64\"\n return macos\n\n @property\n def latest_release_url(self):\n return self._latest_release_url\n\n def tagged_release_url(self, version):\n return self._mozila_release_tag.format(version)\n\n def get_browser_type(self):\n return \"firefox\"\n","repo_name":"prrvchr/gDriveOOo","sub_path":"uno/lib/python/webdriver_manager/drivers/firefox.py","file_name":"firefox.py","file_ext":"py","file_size_in_byte":2288,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"81"} +{"seq_id":"18163003228","text":"import pygame \r\nfrom support import import_folder\r\n\r\nclass Player(pygame.sprite.Sprite):\r\n\tdef __init__(self,pos,surface):\r\n\t\tsuper().__init__()\r\n\r\n\t\t# Animaciones del jugador \r\n\t\tself.import_anim_jugador()\r\n\t\tself.frame_index = 0\r\n\t\tself.velocidad_animacion = 0.20\r\n\t\tself.image = self.animaciones['inactivo'][self.frame_index]\r\n\t\tself.rect = self.image.get_rect(topleft = pos)\r\n\t\t\r\n\t\t# Movimiento del jugador\r\n\t\tself.direccion = pygame.math.Vector2(0,0)\r\n\t\tself.velocidad = 6\r\n\t\tself.gravedad = 0.8\r\n\t\tself.vel_salto = -16\r\n\r\n\t\t# Estado del jugador\r\n\t\tself.estado = 'inactivo'\r\n\t\tself.mirando_derecha = True\r\n\t\tself.on_ground = False\r\n\t\tself.on_ceiling = False\r\n\t\tself.on_left = False\r\n\t\tself.on_right = False\r\n\r\n\tdef import_anim_jugador(self):\r\n\t\tcharacter_path = '../pygame_avance/sprites/punk/'\r\n\t\tself.animaciones = {'inactivo':[],'correr':[],'saltar':[],'cayendo':[]}\r\n\t\t#Crea un diccionario para las animaciones del jugador con ayuda de => support.py\r\n\r\n\t\tfor animation in self.animaciones.keys():\r\n\t\t\tfull_path = character_path + animation\r\n\t\t\tself.animaciones[animation] = import_folder(full_path)\r\n\r\n\tdef animar(self):\r\n\t\tanimacion = self.animaciones[self.estado]\r\n\r\n\t\t# Bucle para controlar el indice de las animaciones \r\n\t\tself.frame_index += self.velocidad_animacion\r\n\t\tif self.frame_index >= len(animacion):\r\n\t\t\tself.frame_index = 0\r\n\r\n\t\timage = animacion[int(self.frame_index)]\r\n\t\tif self.mirando_derecha:\r\n\t\t\tself.image = image\r\n\t\telse:\r\n\t\t\tflipped_image = pygame.transform.flip(image,True,False)\r\n\t\t\tself.image = flipped_image\r\n\r\n\t\t# Quita rectangulo que sobra \r\n\t\tif self.on_ground and self.on_right:\r\n\t\t\tself.rect = self.image.get_rect(bottomright = self.rect.bottomright)\r\n\t\telif self.on_ground and self.on_left:\r\n\t\t\tself.rect = self.image.get_rect(bottomleft = self.rect.bottomleft)\r\n\t\telif self.on_ground:\r\n\t\t\tself.rect = self.image.get_rect(midbottom = self.rect.midbottom)\r\n\t\telif self.on_ceiling and self.on_right:\r\n\t\t\tself.rect = self.image.get_rect(topright = self.rect.topright)\r\n\t\telif self.on_ceiling and self.on_left:\r\n\t\t\tself.rect = self.image.get_rect(topleft = self.rect.topleft)\r\n\t\telif self.on_ceiling:\r\n\t\t\tself.rect = self.image.get_rect(midtop = self.rect.midtop)\r\n\r\n\tdef get_input(self):\r\n\r\n\t\t# Teclas para el movimiento del jugador \r\n\t\t# si se quiere cambiar una tecla seria: K_[la letra que se desee implementar]\r\n\t\t\r\n\t\tkeys = pygame.key.get_pressed()\r\n\r\n\t\tif keys[pygame.K_d]:\r\n\t\t\tself.direccion.x = 1\r\n\t\t\tself.mirando_derecha = True\r\n\t\telif keys[pygame.K_a]:\r\n\t\t\tself.direccion.x = -1\r\n\t\t\tself.mirando_derecha = False\r\n\t\telse:\r\n\t\t\tself.direccion.x = 0\r\n\r\n\t\tif keys[pygame.K_w] and self.on_ground:\r\n\t\t\tself.jump()\r\n\r\n\t\t\t# Agregar sonido de salto, al presionar la w, tecla de salto.\r\n\t\t\tsonido = pygame.mixer.Sound(\"musica/salto_final.wav\")\r\n\t\t\tsonido.play()\r\n\r\n\t\t\r\n\r\n\tdef get_status(self):\r\n\r\n\t\t#Se obtienen los estados del jugador para ir cambiando de animaciones \r\n\t\t\r\n\t\tif self.direccion.y < 0:\r\n\t\t\tself.estado = 'saltar'\r\n\t\telif self.direccion.y > 1:\r\n\t\t\tself.estado = 'cayendo'\r\n\t\telse:\r\n\t\t\tif self.direccion.x != 0:\r\n\t\t\t\tself.estado = 'correr'\r\n\t\t\telse:\r\n\t\t\t\tself.estado = 'inactivo'\r\n\r\n\tdef apply_gravity(self):\r\n\t\tself.direccion.y += self.gravedad\r\n\t\tself.rect.y += self.direccion.y\r\n\r\n\tdef jump(self):\r\n\t\tself.direccion.y = self.vel_salto\r\n\r\n\tdef update(self):\r\n\t\tself.get_input()\r\n\t\tself.get_status()\r\n\t\tself.animar()\r\n","repo_name":"alexmiralda/Algoritmos","sub_path":"pygame_avance/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":3372,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"807997483","text":"from django.db import models\nfrom django.contrib.auth import get_user_model\n\n\nclass Project(models.Model):\n \"\"\"\n Ce modèle représente un projet. Un projet a un auteur, un titre, une description, un type \n et peut avoir plusieurs contributeurs.\n \"\"\"\n \n # Champs de choix pour le type de projet\n PROJECT_TYPE_CHOICES = [('WEB', 'Web'), \n ('IOS', 'iOS'), \n ('ANDROID', 'Android')]\n \n # Clé étrangère vers le modèle utilisateur. Si l'utilisateur est supprimé, tous ses projets sont supprimés.\n author = models.ForeignKey(get_user_model(), on_delete=models.CASCADE, related_name=\"authored_projects\")\n title = models.CharField(max_length=200)\n description = models.TextField(max_length=2048)\n type = models.CharField(max_length=7, choices=PROJECT_TYPE_CHOICES)\n \n # Modèle \"Contributor\" est un modèle intermédiaire, il représente la relation \"contribuer à\" entre l'utilisateur et projet.\n contributors = models.ManyToManyField(get_user_model(), through='Contributor', related_name='contributed_projects')\n \n\nclass Contributor(models.Model):\n \"\"\"\n Ce modèle représente un contributeur à un projet. Un contributeur est lié à un utilisateur et à un projet.\n \"\"\"\n \n # Clé étrangère vers le modèle utilisateur. Si l'utilisateur est supprimé, toutes ses contributions sont supprimées.\n user = models.ForeignKey(get_user_model(), on_delete=models.CASCADE, related_name='contributions')\n \n # Clé étrangère vers le modèle Project. Si le projet est supprimé, toutes les contributions à ce projet sont supprimées.\n project = models.ForeignKey(Project, on_delete=models.CASCADE, related_name='project_contributors')\n\n\nclass Issue(models.Model):\n \"\"\"\n Ce modèle représente un problème liée à un projet. Un problème a un titre, une description, une priorité,\n une étiquette (tag), un statut, un auteur et une date de création.\n \"\"\"\n \n # Choix du niveau de difficulté du problème\n PRIORITY_CHOICES = [(\"FAIBLE\", \"faible\"), \n (\"MOYEN\", \"moyen\"), \n (\"ELEVEE\", \"élevée\")]\n \n # Champs de choix pour l'étiquette du problème\n TAG_CHOICES = [('BUG', 'Bug'), \n ('AMELIORATION', 'Amélioration'), \n ('TACHE', 'Tâche')]\n \n # Champs de choix pour le statut du problème\n STATUS_CHOICES = [('A_FAIRE', 'A faire'), \n ('EN_COURS', 'En cours'), \n ('TERMINE', 'Terminé')]\n \n title = models.CharField(max_length=200)\n description = models.TextField()\n priority = models.CharField(max_length=8, choices=PRIORITY_CHOICES)\n tag = models.CharField(max_length=12, choices=TAG_CHOICES)\n status = models.CharField(max_length=8, choices=STATUS_CHOICES)\n \n # Clé étrangère vers le modèle Project. Si le projet est supprimé, toutes les problèmes liées sont supprimées.\n project = models.ForeignKey(Project, on_delete=models.CASCADE, related_name=\"issues\")\n \n # Clé étrangère vers le modèle utilisateur. Si l'utilisateur est supprimé, toutes ses problèmes sont supprimées.\n author = models.ForeignKey(get_user_model(), on_delete=models.CASCADE, related_name=\"authored_issues\")\n \n # La date et l'heure de la création du prblème\n created_time = models.DateTimeField(auto_now_add=True)\n\n\nclass Comment(models.Model):\n \"\"\"\n Ce modèle représente un commentaire sur un problème. Un commentaire a une description,\n un auteur et une date de création.\n \"\"\"\n description = models.TextField()\n \n # Clé étrangère vers le modèle utilisateur. Si l'utilisateur est supprimé, tous ses commentaires sont supprimés.\n author = models.ForeignKey(get_user_model(), on_delete=models.CASCADE, related_name=\"authored_comments\")\n \n # Clé étrangère vers le modèle Issue. Si l'issue est supprimée, tous les commentaires liés sont supprimés.\n issue = models.ForeignKey(Issue, on_delete=models.CASCADE, related_name=\"comments\")\n \n # La date et l'heure de la création du commentaire\n created_time = models.DateTimeField(auto_now_add=True)\n","repo_name":"barseille/ponn_barseille_SoftDesk","sub_path":"projects/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4227,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10987729422","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Cyber Security Data Analysis\n\n# # Importing necessary libraries\n\n# In[1]:\n\n\nimport pandas as pd\nimport numpy as np\nget_ipython().run_line_magic('matplotlib', 'inline')\nimport matplotlib.pyplot as plt\nimport seaborn as sns; sns.set()\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.metrics import confusion_matrix\n\n\n# # Loading Data and Preprocessing\n\n# In[2]:\n\n\ndf = pd.read_csv(r'C:\\Users\\Rushabh Shah\\Downloads\\cyber-operations-incidents.csv')\n\n\n# In[3]:\n\n\ndf.tail(5)\n\n\n# In[4]:\n\n\ny=df[\"Type\"]\nID_X=df[\"Description\"]\n\n\n# In[5]:\n\n\ntype(ID_X)\n\n\n# # Data Cleaning\n\n# In[6]:\n\n\nID_X=ID_X.astype(str)\n\n\n# In[7]:\n\n\nimport re\nimport string\n\n\n# In[8]:\n\n\ndef text_clean_1(text):\n text=text.lower()\n text=re.sub('\\[.*?\\]','',text)\n text=re.sub('[%s]' % re.escape(string.punctuation),'',text)\n text=re.sub('\\w*\\d\\w*','',text)\n return text\n\ncleaned1 = lambda x: text_clean_1(x)\n\n\n# In[9]:\n\n\nID_X=ID_X.apply(cleaned1)\n\n\n# In[10]:\n\n\nID_X\n\n\n# In[11]:\n\n\ndef text_clean_2(text):\n text= re.sub('[''\"\"...]','',text)\n text=re.sub('\\n','',text)\n return text\n\ncleaned2 = lambda x: text_clean_2(x)\n\n\n# In[12]:\n\n\nID_X=ID_X.apply(cleaned2)\n\n\n# In[13]:\n\n\ndef text_clean_3(text):\n text= re.sub('_',' ',text)\n return text\n\ncleaned3 = lambda x: text_clean_3(x)\n\n\n# In[14]:\n\n\n\nID_X=ID_X.apply(cleaned3)\n\n\n# # Incident category prediction Model building from Incident detail\n\n# In[15]:\n\n\nID_X_train, ID_X_test, y_train, y_test = train_test_split(ID_X, y,test_size=0.25, random_state=20)\n\n\n# In[16]:\n\n\nfrom wordcloud import STOPWORDS\nstopwords = set(STOPWORDS)\n\n\n# In[17]:\n\n\nID_text_clf = Pipeline([('vect', CountVectorizer(stop_words=stopwords)),\n ('tfidf', TfidfTransformer()),\n ('clf', RandomForestClassifier(n_estimators=100)),\n ])\n\n\n# In[18]:\n\n\nID_text_clf.fit(ID_X_train,y_train)\n\n\n# In[19]:\n\n\nlabels=ID_text_clf.predict(ID_X_test)\nlabels\n\n\n# In[20]:\n\n\ntype(y_train)\n\n\n# In[21]:\n\n\ny_test=pd.DataFrame(y_test)\n\n\n# In[22]:\n\n\ny_test\n\n\n# In[23]:\n\n\nfrom sklearn.metrics import accuracy_score, precision_score\n\nprint(\"Accuracy: \", round(accuracy_score(labels, y_test)*100,2) , \"%\")\nprint(\"Precision: \", round(precision_score(labels,y_test, average = \"weighted\")*100,2) , \"%\")\n\n\n# In[24]:\n\n\nexample=[\"Hidden Cobra used a variety of malware tools to hack into and steal money from banks, cryptocurrency exchanges, and ATMs.\"]\nlevel=ID_text_clf.predict(example)\nlevel\n\n\n# In[25]:\n\n\ndef classify_category(text):\n \n text=ID_text_clf.predict(text)\n return text\n\n\n# In[26]:\n\n\na=classify_category([\"Hidden Cobra used a variety of malware tools to hack into and steal money from banks, cryptocurrency exchanges, and ATMs.\"])\n\n\n# In[27]:\n\n\na\n\n\n# # Model Prediction using Naive Bayes\n\n# In[ ]:\n\n\nnb_model = make_pipeline (TfidfVectorizer(),MultinomialNB())\n\n\n# In[ ]:\n\n\nnb_model.fit(ID_X_train,y_train)\n\n\n# In[ ]:\n\n\nlabels= nb_model.predict(ID_X_test)\n\n\n# In[ ]:\n\n\nfrom sklearn.metrics import accuracy_score, precision_score\n\nprint(\"Accuracy: \", round(accuracy_score(labels, y_test)*100,2) , \"%\")\nprint(\"Precision: \", round(precision_score(labels,y_test, average = \"weighted\")*100,2) , \"%\")\n\n","repo_name":"shahrushabhh/SIMPAC-2022-01","sub_path":"Cyber Security Incident Analysis.py","file_name":"Cyber Security Incident Analysis.py","file_ext":"py","file_size_in_byte":3617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9400734337","text":"import collections\n\n\nclass Solution:\n def frequencySort(self, s: str) -> str:\n # l, res = sorted(list(collections.Counter(s).items()), key = lambda x: -x[1]), ''\n # for k, v in l:\n # res += k * v\n # return res\n\n cnt = collections.Counter(s)\n buckets = [[] for _ in range(max(cnt.values() or [0]) + 1)]\n for k, v in cnt.items():\n buckets[v].append(k)\n res = ''\n for i in range(len(buckets) - 1, 0, -1):\n for c in buckets[i]:\n res += i * c\n return res","repo_name":"Jason003/interview","sub_path":"Nuro/Sort Characters By Frequency.py","file_name":"Sort Characters By Frequency.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"30804843755","text":"\"\"\"\nFile: hailstones.py\n-------------------\n** The description of this program is summarized/paraphrased from the assignment provided\nin Stanford's Code in Place class offered in 2020 **\nThis is a file for the optional Hailstones problem, if\nyou'd like to try solving it.\nThe hailstones problem takes whatever number is input by the user and divides\neven numbers by 2, and multiplies resulting odd numbers by 3 and adds 1 until\nthe final number is 1. The program then prints how many steps it took to reach 1.\n\"\"\"\n\n\ndef main():\n #Takes in what number the user wants to run the hailstones process on\n num = int(input(\"Enter a number: \"))\n # starts hailstone process count at 0 steps\n i = 0\n # makes sure that the process repeats while the resulting number is\n # greater than 1\n while num > 1:\n # if the number is even (ie remainder when divided by 2 is 0\n # divides the number by two and prints the resulting number\n if num % 2 == 0:\n even = num // 2\n print(str(num)+ \" is even so I take half: \"+ str(even) +\".\")\n # redefines num as even so that the process will repeat\n num = even\n # adds step to the step counter\n i = i + 1\n # if the number is odd multiplies by 3 and adds 1\n else:\n odd = (3 * num)+1\n # prints the initial number and the altered number\n # after multiplying by 3 and adding 1\n print(str(num)+ \" is odd so I make 3n+1: \"+str(odd)+ \".\")\n # redefines num as odd so that the process repeats with the\n # new number after multiplying and adding\n num = odd\n # adds step to step counter\n i = i + 1\n # once the process ends the loop ends and this phrase prints how many\n # steps it took to reach 1\n print(\"The process took \"+ str(i)+ \" steps to reach 1.\")\n\n\n\n\n\n# This provided line is required at the end of a Python file\n# to call the main() function.\nif __name__ == '__main__':\n main()\n","repo_name":"akalezic/python","sub_path":"Code_In_Place/Numerical_Projects/hailstones.py","file_name":"hailstones.py","file_ext":"py","file_size_in_byte":2029,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"15046646387","text":"import random\nimport tifffile\nimport itertools\nimport torch.utils.data as data\n\nfrom tqdm import tqdm\nfrom skimage import exposure\nfrom utils import *\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.sampler import Sampler\nfrom skimage.exposure import match_histograms\n\n\nclass AugAxonsDataset(data.Dataset):\n def __init__(self, opt):\n data_path = opt.dataroot\n n_samples = opt.n_samples\n input_dim = opt.input_dim\n data_mix = not opt.noartifacts\n self.opt = opt\n self.augment = dataAugmentation()\n self.unlabel = opt.semi\n self.flag = opt.isTrain\n\n self.datas = []\n self.labels = []\n self.labels_ori = []\n\n data_path = join(data_path, 'train') if self.flag else join(data_path, 'val')\n self.data_path = data_path\n volumes_folder_path = join(data_path, \"volumes\")\n labels_folder_path = join(data_path, \"labels_sk\") if self.flag else join(data_path, 'labels')\n\n volumes_path = get_dir(volumes_folder_path)\n labels_path = get_dir(labels_folder_path)\n\n assert len(labels_path) == len(volumes_path)\n if n_samples == None:\n n_samples = len(labels_path)\n\n total_volumes = 0\n with tqdm(total=len(volumes_path) * n_samples, desc=f'volumes numbers') as pbar:\n for vpath, lpath in zip(volumes_path, labels_path):\n # assert (vpath.split('/')[-1].replace('volume', 'label')) == lpath.split('/')[-1]\n volume = read_tiff_stack(vpath)\n label = read_tiff_stack(lpath)\n if volume.shape[0] < opt.input_dim or volume.shape[1] < opt.input_dim \\\n or volume.shape[2] < opt.input_dim:\n continue\n # volume = (volume - volume.min()) / (volume.max() - volume.min())\n self.datas.append(volume)\n self.labels.append(label)\n\n total_volumes += 1\n pbar.update()\n self.axon_nums = total_volumes\n if data_mix:\n artifacts_folder_path = data_path + '/artifacts/'\n artifacts_path = get_dir(artifacts_folder_path)\n with tqdm(total=max(len(volumes_path) * n_samples, len(artifacts_path)), desc=f'artifacts numbers') as pbar:\n for apath in artifacts_path:\n artifact = read_tiff_stack(apath)\n if artifact.shape[0] < opt.input_dim or artifact.shape[1] < opt.input_dim \\\n or artifact.shape[2] < opt.input_dim:\n print(artifact.shape)\n continue\n\n # artifact = (artifact - artifact.min()) / (artifact.max() - artifact.min())\n self.datas.append(artifact)\n self.labels.append(np.zeros_like(label))\n\n total_volumes += 1\n pbar.update()\n\n self.labeled_num = total_volumes\n if self.unlabel and self.flag:\n nonlabel_path = get_dir(data_path + '/nonlabel')\n with tqdm(total=len(nonlabel_path) * n_samples, desc=f'volumes for semi-supervised learning') as pbar:\n for vpath in nonlabel_path:\n volume = read_tiff_stack(vpath)\n if volume.shape[0] < opt.input_dim or volume.shape[1] < opt.input_dim \\\n or volume.shape[2] < opt.input_dim:\n continue\n self.datas.append(volume)\n self.labels.append(np.zeros_like(label))\n\n total_volumes += 1\n pbar.update()\n\n def __getitem__(self, index):\n volumes = self.datas[index]\n labels = self.labels[index]\n\n # ------------------------------------------------------------------ #\n # cutmix\n if index < self.axon_nums:\n artifact = self.datas[random.randint(self.axon_nums, len(self) - 1)]\n z = random.randint(0, labels.shape[0])\n x = random.randint(0, labels.shape[1])\n y = random.randint(0, labels.shape[2])\n artifact_chunk = artifact[:z, :x, :y].copy()\n volumes[:z, :x, :y] = artifact_chunk\n labels[:z, :x, :y] = np.zeros_like(artifact_chunk)\n\n data, label = self.augment.data_augmentation(volumes, labels)\n # ------------------------------------------------------------------ #\n # histograms match\n # if index < self.axon_nums and self.opt.isTrain:\n # reference = self.datas[random.randint(0, self.axon_nums-1)]\n # data = match_histograms(data, reference)\n # elif index >= self.axon_nums and self.opt.isTrain:\n # reference = self.datas[random.randint(self.axon_nums, len(self)-1)]\n # data = match_histograms(data, reference)\n # ------------------------------------------------------------------ #\n data = data[np.newaxis, ...].astype(np.float32)\n data = data / 6553\n # data = (data - data.min()) / (data.max() - data.min())\n if self.opt.output_nc != 1:\n label = label.astype(np.long)\n else:\n label = label[np.newaxis, ...].astype(np.float32)\n return data, label\n\n def __len__(self):\n return len(self.datas)\n","repo_name":"LLveSzy/Sunmap-master","sub_path":"data/augaxons_dataset.py","file_name":"augaxons_dataset.py","file_ext":"py","file_size_in_byte":5305,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"17973732056","text":"import threading\nimport time\n\n\nbarrier = threading.Barrier(2, timeout=20)\n\n\ndef worker():\n print(\"worker starting\")\n time.sleep(10)\n print(\"worker started\")\n print(\"waiting other workers\")\n barrier.wait()\n print(\"worker ok\")\n\n\ndef worker2():\n print(\"worker2 starting\")\n time.sleep(1)\n print(\"worker2 started\")\n print(\"waiting other workers\")\n barrier.wait()\n print(\"worker2 ok\")\n\n\nt1 = threading.Thread(target=worker)\nt2 = threading.Thread(target=worker2)\n\nt1.start()\nt2.start()\n\nt1.join()\nt2.join()\n","repo_name":"agalera/course_python","sub_path":"examples/threading/barrier_ex.py","file_name":"barrier_ex.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41348121298","text":"import time\nimport numpy as np\n\n\ndef exercise_009(triplet_sum):\n \"\"\"\n A Pythagorean triplet is a set of three natural numbers, a < b < c, for\n which,\n\n a^2 + b^2 = c^2\n For example, 3^2 + 4^2 = 9 + 16 = 25 = 5^2.\n\n There exists exactly one Pythagorean triplet for which a + b + c = 1000.\n Find the product abc.\n\n :param int triplet_sum: Sum of Pythagorean triplet\n :type triplet_sum: int\n\n :return: product of abc that sums .\n :rtype: int\n \"\"\"\n\n for b in range(1, triplet_sum):\n for a in range(1, b):\n c = np.sqrt(a ** 2 + b ** 2)\n\n if a + b + c == triplet_sum:\n return a * b * c\n\n return 0\n\n\nif __name__ == '__main__':\n start_time = time.time()\n triplet_sum = 1000\n print(exercise_009(triplet_sum))\n end_time = time.time()\n print('{} s'.format(end_time - start_time))\n","repo_name":"psanchezc23/project_euler","sub_path":"exercises/exercise_009.py","file_name":"exercise_009.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8838303931","text":"# -*- coding: utf-8 -*-\nimport os\n\nimport w3lib\n\nimport scrapy\nfrom scrapy.http import Request, Response\n\nfrom crawlpic.items import CrawlpicItem\n# from scrapy.utils.url import urljoin_rfc\nfrom scrapy.utils.response import get_base_url\nfrom scrapy.utils.url import urlparse\n\n\nclass SkyPicSpider(scrapy.Spider):\n name = 'skypic'\n allowed_domains = ['ivsky.com']\n start_urls = ['https://www.ivsky.com/']\n\n def __init__(self, *args, **kargs):\n self.save_root_dir = \"skypic_files/\"\n\n def start_requests(self):\n for url in self.start_urls:\n yield Request(url, self.parse_home_url)\n\n def parse_home_url(self, response=Response(\"\")):\n # self.logger.debug(response.headers)\n self.logger.debug('')\n\n save_file = self.save_root_dir + 'home.html'\n with open(save_file, 'wb') as tfile:\n tfile.write(response.text.encode(encoding=\"utf8\"))\n\n hrefs = response.xpath(\"//ul[@class='sy_list']//a/@href\")\n # self.logger.debug(hrefs)\n for link in hrefs:\n # self.logger.debug(link.get())\n yield response.follow(link, self.parse_cat_url)\n\n def parse_cat_url(self, response=Response(\"\")):\n # self.logger.debug(\"============ begin parse cat url ===============\")\n # split_path = response.url.split(\"/\")\n\n # new_flag = \"default\"\n # if split_path[-1] == \"\":\n # new_flag = split_path[-2]\n # else:\n # new_flag = split_path[-1]\n\n # new_subdir = os.path.join(self.save_root_dir, new_flag)\n # os.mkdir(new_subdir)\n\n # save_file = self.save_root_dir + new_flag + '.html'\n # with open(save_file, 'wb') as tfile:\n # tfile.write(response.text.encode(encoding=\"utf8\"))\n\n urlitem = CrawlpicItem()\n base_url = get_base_url(response)\n\n allimgs = response.xpath(\"//ul[@class='pli']//img/@src\")\n tmp_urls = []\n for url in allimgs:\n # urlitem.pic_url = url.get()\n # urlitem[\"image_urls\"] = 'https:' + url.get()\n tmp_urls.append(urlparse(base_url, )[0] + \":\" + url.get())\n # self.logger.debug(urlitem[\"image_urls\"])\n # yield response.follow(url, self.down_img)\n\n urlitem[\"image_urls\"] = tmp_urls\n # return urlitem\n yield urlitem\n\n # self.logger.debug(\"============ end parse cat url ================\")\n\n def down_img(self, response=Response(\"\")):\n # self.logger.debug(\"================= downing img ===================\")\n self.logger.debug(response.url)\n split_path = response.url.split(\"/\")\n save_file = self.save_root_dir + split_path[-1]\n self.logger.debug(save_file)\n with open(save_file, 'wb') as tfile:\n tfile.write(response.body)\n\n # urlitem = CrawlpicItem()\n # urlitem.pic_url = response.url\n # yield urlitem\n\n def parse(self, response):\n pass\n\n def closed(self, reason):\n pass\n","repo_name":"taniey/pycrawler","sub_path":"crawlpic/spiders/skypic.py","file_name":"skypic.py","file_ext":"py","file_size_in_byte":2982,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"4260034132","text":"import factory\nimport pytest\nfrom rest_framework import status\nfrom rest_framework.reverse import reverse\n\nfrom datahub.company.models import CompanyPermission\nfrom datahub.company.test.factories import AdviserFactory, CompanyFactory\nfrom datahub.company.test.factories import (\n OneListCoreTeamMemberFactory,\n)\nfrom datahub.company.test.utils import random_non_ita_one_list_tier\nfrom datahub.core.test_utils import (\n APITestMixin,\n create_test_user,\n)\n\n\n@pytest.fixture\ndef one_list_company():\n \"\"\"Get One List company.\"\"\"\n yield CompanyFactory(\n global_headquarters=None,\n one_list_tier=random_non_ita_one_list_tier(),\n one_list_account_owner=AdviserFactory(),\n )\n\n\nclass TestOneListGroupCoreTeam(APITestMixin):\n \"\"\"Tests for getting the One List Core Team of a company's group.\"\"\"\n\n @pytest.mark.parametrize(\n 'build_company',\n (\n # as subsidiary\n lambda: CompanyFactory(\n global_headquarters=CompanyFactory(one_list_account_owner=None),\n ),\n # as single company\n lambda: CompanyFactory(\n global_headquarters=None,\n one_list_account_owner=None,\n ),\n ),\n ids=('as_subsidiary', 'as_non_subsidiary'),\n )\n def test_empty_list(self, build_company):\n \"\"\"\n Test that if there's no Global Account Manager and no Core Team\n member for a company's Global Headquarters, the endpoint returns\n an empty list.\n \"\"\"\n company = build_company()\n\n url = reverse(\n 'api-v4:company:one-list-group-core-team',\n kwargs={'pk': company.pk},\n )\n response = self.api_client.get(url)\n\n assert response.status_code == status.HTTP_200_OK\n assert response.json() == []\n\n @pytest.mark.parametrize(\n 'build_company',\n (\n # as subsidiary\n lambda gam: CompanyFactory(\n global_headquarters=CompanyFactory(one_list_account_owner=gam),\n ),\n # as single company\n lambda gam: CompanyFactory(\n global_headquarters=None,\n one_list_account_owner=gam,\n ),\n ),\n ids=('as_subsidiary', 'as_non_subsidiary'),\n )\n def test_with_only_global_account_manager(self, build_company):\n \"\"\"\n Test that if there is a Global Account Manager but no Core Team\n member for a company's Global Headquarters, the endpoint returns\n a list with only that adviser in it.\n \"\"\"\n global_account_manager = AdviserFactory()\n company = build_company(global_account_manager)\n\n url = reverse(\n 'api-v4:company:one-list-group-core-team',\n kwargs={'pk': company.pk},\n )\n response = self.api_client.get(url)\n\n assert response.status_code == status.HTTP_200_OK\n assert response.json() == [\n {\n 'adviser': {\n 'id': str(global_account_manager.pk),\n 'name': global_account_manager.name,\n 'first_name': global_account_manager.first_name,\n 'last_name': global_account_manager.last_name,\n 'contact_email': global_account_manager.contact_email,\n 'dit_team': {\n 'id': str(global_account_manager.dit_team.pk),\n 'name': global_account_manager.dit_team.name,\n 'uk_region': {\n 'id': str(global_account_manager.dit_team.uk_region.pk),\n 'name': global_account_manager.dit_team.uk_region.name,\n },\n 'country': {\n 'id': str(global_account_manager.dit_team.country.pk),\n 'name': global_account_manager.dit_team.country.name,\n },\n },\n },\n 'is_global_account_manager': True,\n },\n ]\n\n @pytest.mark.parametrize(\n 'build_company',\n (\n # as subsidiary\n lambda gam: CompanyFactory(\n global_headquarters=CompanyFactory(one_list_account_owner=gam),\n ),\n # as single company\n lambda gam: CompanyFactory(\n global_headquarters=None,\n one_list_account_owner=gam,\n ),\n ),\n ids=('as_subsidiary', 'as_non_subsidiary'),\n )\n @pytest.mark.parametrize(\n 'with_global_account_manager',\n (True, False),\n ids=lambda val: f'{\"With\" if val else \"Without\"} global account manager',\n )\n def test_with_core_team_members(self, build_company, with_global_account_manager):\n \"\"\"\n Test that if there are Core Team members for a company's Global Headquarters,\n the endpoint returns a list with these advisers in it.\n \"\"\"\n team_member_advisers = AdviserFactory.create_batch(\n 3,\n first_name=factory.Iterator(\n ('Adam', 'Barbara', 'Chris'),\n ),\n )\n global_account_manager = team_member_advisers[0] if with_global_account_manager else None\n\n company = build_company(global_account_manager)\n group_global_headquarters = company.global_headquarters or company\n OneListCoreTeamMemberFactory.create_batch(\n len(team_member_advisers),\n company=group_global_headquarters,\n adviser=factory.Iterator(team_member_advisers),\n )\n\n url = reverse(\n 'api-v4:company:one-list-group-core-team',\n kwargs={'pk': company.pk},\n )\n response = self.api_client.get(url)\n\n assert response.status_code == status.HTTP_200_OK\n assert response.json() == [\n {\n 'adviser': {\n 'id': str(adviser.pk),\n 'name': adviser.name,\n 'first_name': adviser.first_name,\n 'last_name': adviser.last_name,\n 'contact_email': adviser.contact_email,\n 'dit_team': {\n 'id': str(adviser.dit_team.pk),\n 'name': adviser.dit_team.name,\n 'uk_region': {\n 'id': str(adviser.dit_team.uk_region.pk),\n 'name': adviser.dit_team.uk_region.name,\n },\n 'country': {\n 'id': str(adviser.dit_team.country.pk),\n 'name': adviser.dit_team.country.name,\n },\n },\n },\n 'is_global_account_manager': adviser is global_account_manager,\n }\n for adviser in team_member_advisers\n ]\n\n def test_404_with_invalid_company(self):\n \"\"\"\n Test that if the company doesn't exist, the endpoint returns 404.\n \"\"\"\n url = reverse(\n 'api-v4:company:one-list-group-core-team',\n kwargs={'pk': '00000000-0000-0000-0000-000000000000'},\n )\n response = self.api_client.get(url)\n\n assert response.status_code == status.HTTP_404_NOT_FOUND\n\n\nclass TestUpdateOneListCoreTeam(APITestMixin):\n \"\"\"\n Tests for updating the Core Team of One List company.\n\n (Implemented in CompanyViewSet.remove_from_one_list().)\n \"\"\"\n\n @staticmethod\n def _get_url(company):\n return reverse(\n 'api-v4:company:update-one-list-core-team',\n kwargs={\n 'pk': company.pk,\n },\n )\n\n def test_returns_401_if_unauthenticated(self, api_client):\n \"\"\"Test that a 401 is returned if no credentials are provided.\"\"\"\n company = CompanyFactory()\n url = self._get_url(company)\n response = api_client.patch(url)\n assert response.status_code == status.HTTP_401_UNAUTHORIZED\n\n @pytest.mark.parametrize(\n 'permission_codenames',\n (\n (),\n (CompanyPermission.change_company,),\n (CompanyPermission.change_regional_account_manager,),\n ),\n )\n def test_returns_403_if_without_permission(self, permission_codenames):\n \"\"\"\n Test that a 403 is returned if the user does not have all of the required\n permissions.\n \"\"\"\n company = CompanyFactory()\n user = create_test_user(permission_codenames=permission_codenames, dit_team=None)\n api_client = self.create_api_client(user=user)\n url = self._get_url(company)\n\n response = api_client.patch(url)\n assert response.status_code == status.HTTP_403_FORBIDDEN\n\n @pytest.mark.parametrize(\n 'existing_team_count,new_team_count',\n (\n (0, 2),\n (2, 2),\n (2, 0),\n ),\n )\n def test_can_update_core_team_members(\n self,\n one_list_company,\n one_list_editor,\n existing_team_count,\n new_team_count,\n ):\n \"\"\"Test that core team members can be updated.\"\"\"\n api_client = self.create_api_client(user=one_list_editor)\n url = self._get_url(one_list_company)\n\n if existing_team_count:\n team_member_advisers = AdviserFactory.create_batch(existing_team_count)\n OneListCoreTeamMemberFactory.create_batch(\n len(team_member_advisers),\n company=one_list_company,\n adviser=factory.Iterator(team_member_advisers),\n )\n\n old_core_team_members = [\n core_team_member.adviser.id\n for core_team_member in one_list_company.one_list_core_team_members.all()\n ]\n\n new_core_team_members = [\n adviser.id for adviser in AdviserFactory.create_batch(2)\n ] if new_team_count else []\n\n response = api_client.patch(\n url,\n {\n 'core_team_members':\n [\n {\n 'adviser': adviser_id,\n } for adviser_id in new_core_team_members\n ],\n },\n )\n assert response.status_code == status.HTTP_204_NO_CONTENT\n\n core_team_members = [\n core_team_member.adviser.id\n for core_team_member in one_list_company.one_list_core_team_members.all()\n ]\n\n assert core_team_members != old_core_team_members\n assert core_team_members == new_core_team_members\n\n def test_cannot_update_duplicate_core_team_members(self, one_list_company, one_list_editor):\n \"\"\"Test that duplicate team members cannot be updated.\"\"\"\n api_client = self.create_api_client(user=one_list_editor)\n url = self._get_url(one_list_company)\n\n adviser_id = str(AdviserFactory().id)\n response = api_client.patch(\n url,\n {\n 'core_team_members':\n [\n {\n 'adviser': adviser_id,\n },\n ] * 2,\n },\n )\n assert response.status_code == status.HTTP_400_BAD_REQUEST\n assert response.json() == {\n 'core_team_members':\n [\n {\n 'adviser':\n [\n 'You cannot add the same adviser more than once.',\n ],\n },\n {\n 'adviser': [\n 'You cannot add the same adviser more than once.',\n ],\n },\n ],\n }\n","repo_name":"uktrade/data-hub-api","sub_path":"datahub/company/test/test_company_views_core_team.py","file_name":"test_company_views_core_team.py","file_ext":"py","file_size_in_byte":11703,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"81"} +{"seq_id":"25117516804","text":"import win32gui\nimport win32ui\nimport win32con\nfrom win32api import GetSystemMetrics\n\nclass ScreenShot(object):\n\n\n def __init__(self):\n \n self.hwnd = win32gui.FindWindow(\"SAP_FRONTEND_SESSION\", None)\n\n def screenShot(self, soOrigin, width=GetSystemMetrics(0), height=GetSystemMetrics(1)):\n wDC = win32gui.GetWindowDC(self.hwnd)\n dcObj=win32ui.CreateDCFromHandle(wDC)\n cDC=dcObj.CreateCompatibleDC()\n dataBitMap = win32ui.CreateBitmap()\n dataBitMap.CreateCompatibleBitmap(dcObj, width, height)\n cDC.SelectObject(dataBitMap)\n cDC.BitBlt((0,0),(width, height) , dcObj, (0,0), win32con.SRCCOPY)\n dataBitMap.SaveBitmapFile(cDC, str(soOrigin) + '.bmp')\n dcObj.DeleteDC()\n cDC.DeleteDC()\n win32gui.ReleaseDC(self.hwnd, wDC)\n win32gui.DeleteObject(dataBitMap.GetHandle())\n","repo_name":"luancdz/pythonAutomate","sub_path":"screenShotClass.py","file_name":"screenShotClass.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71198070346","text":"\"\"\"\nThis repo is largely based on the code base of \"SharinGAN: Combining Synthetic and Real Data for Unsupervised GeometryEstimation\"\n(https://github.com/koutilya-pnvr/SharinGAN) and heavily borrows from \"EndoSLAM\" (https://github.com/CapsuleEndoscope/EndoSLAM).\n\nEdited by Anita Rau, a.rau.16@ucl.ac.uk, 2023\n\"\"\"\n\nimport os\nimport numpy as np\nimport random\nimport argparse\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms as tr\nfrom networks import all_networks\nimport matplotlib.pyplot as plt\nfrom Dataloaders.HCULB_dataloader import Hculb as real_dataset\n\n\nclass Solver():\n def __init__(self, opt):\n\n self.opt = opt\n self.seed = 1729 # The famous Hardy-Ramanujan number\n random.seed(self.seed)\n torch.manual_seed(self.seed)\n np.random.seed(self.seed)\n torch.cuda.manual_seed_all(self.seed)\n\n self.netG = all_networks.define_G(3, 3, 64, 9, 'batch',\n 'PReLU', 'ResNet', 'kaiming', 0,\n False, [0])\n\n\n self.netT = all_networks.define_G(3, 1, 64, 4, 'instance',\n 'ReLU', 'endo', 'kaiming', 0,\n False, [0], 0.1, alpha=100)\n\n self.netG.cuda()\n self.netT.cuda()\n\n self.normalizer = tr.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n self.get_validation_data()\n\n\n def get_validation_data(self):\n self.real_val_loader = DataLoader(real_dataset(self.opt.test_seq, data_root=self.opt.data_root, train=False, frames_apart=1, norm=False, gap=1, shifts=[1]), batch_size=1, shuffle=False, num_workers=2, drop_last=True)\n \n\n def test(self):\n print('Loading the trained model from {}'.format(self.opt.model_path))\n model_state = torch.load(self.opt.model_path)\n\n self.netT.load_state_dict(model_state['netT_state_dict'])\n self.netG.load_state_dict(model_state['netG_state_dict'])\n self.name = 'replicate_results'\n self.Validate()\n\n\n def Validate(self):\n self.netG.eval()\n self.netT.eval()\n path = os.path.join(self.opt.output_path, self.name)\n if not os.path.exists(path):\n os.mkdir(path)\n\n with torch.no_grad():\n for i, (data, depth_filenames) in enumerate(self.real_val_loader):\n real_val_image = data['left_img'].cuda()\n _, real_recon_image = self.netG(real_val_image)\n real_recon_image = (real_recon_image + 1) / 2\n depth = self.netT(self.normalizer(real_recon_image))\n\n fig2, ax2 = plt.subplots(1, 3, figsize=(9, 3))\n ax2[0].imshow(depth[0, 0, :, :].cpu().numpy(), cmap='viridis_r')\n ax2[0].set_title('Predicted Depth')\n ax2[1].imshow(real_val_image[0, :, :, :].permute((1,2,0)).cpu().numpy())\n ax2[1].set_title('Input Image')\n ax2[2].imshow(real_recon_image[0, :, :, :].permute((1, 2, 0)).cpu().numpy())\n ax2[2].set_title('Translated Image')\n plt.subplots_adjust(wspace=0.03, hspace=0.03)\n [axi.set_xticks([]) for axi in ax2.ravel()]\n [axi.set_yticks([]) for axi in ax2.ravel()]\n fig2.savefig(path + '/' + depth_filenames[0].split('/')[-1], bbox_inches='tight')\n plt.close(fig2)\n\n\ndef get_params():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--output_path', default='outputs/')\n parser.add_argument('--test_seq', type=list, default=[33])\n parser.add_argument('--data_root')\n parser.add_argument('--model_path', default='trained_models/DepthModel.pth.tar')\n opt = parser.parse_args()\n return opt\n\n\nif __name__=='__main__':\n opt = get_params()\n solver = Solver(opt)\n solver.test()","repo_name":"anitarau/Domain-Gap-Reduction-Endoscopy","sub_path":"src/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39420685274","text":"# https://leetcode.com/problems/count-artifacts-that-can-be-extracted/\r\nfrom tester import Tester\r\nfrom typing import List\r\n\r\n\r\nclass Solution:\r\n def digArtifacts(\r\n self, n: int, artifacts: List[List[int]], dig: List[List[int]]\r\n ) -> int:\r\n cells = [set() for _ in range(len(artifacts))]\r\n mapping = [[-1] * n for _ in range(n)]\r\n for k, a in enumerate(artifacts):\r\n for i in range(a[0], a[2] + 1):\r\n for j in range(a[1], a[3] + 1):\r\n cells[k].add(i * n + j)\r\n mapping[i][j] = k\r\n\r\n count = 0\r\n for i, j in dig:\r\n a_id = mapping[i][j]\r\n if not (0 <= a_id and a_id < len(cells)):\r\n continue\r\n cell = i * n + j\r\n if cell in cells[a_id]:\r\n cells[a_id].remove(cell)\r\n if not cells[a_id]:\r\n count += 1\r\n return count\r\n\r\nt = Tester(Solution())\r\n\r\nt.test(1, n = 2, artifacts = [[0,0,0,0],[0,1,1,1]], dig = [[0,0],[0,1]])\r\nt.test(2, n = 2, artifacts = [[0,0,0,0],[0,1,1,1]], dig = [[0,0],[0,1],[1,1]])\r\n\r\nt.report()","repo_name":"thinhntr/cp","sub_path":"leetcode/Count Artifacts That Can Be Extracted.py","file_name":"Count Artifacts That Can Be Extracted.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12497841627","text":"hour = int(input(\"Hour :\"))\nmin = int(input(\"Minutes :\"))\nsec = int(input(\"Seconds :\"))\n\nif(hour > 0 and hour < 12 and min <= 59 and sec <= 59):\n print(\"GOOD MORNING SIR\")\nelif(hour > 13 and hour < 18 and min <= 59 and sec <= 59):\n print(\"GOOD EVENING SIR\")\nelif(hour >19 and hour < 23 and min <= 59 and sec <= 59):\n print(\"GOOD NIGHT SIR\")\nelse :\n print(\"NOT A VALID TIME\")","repo_name":"Saahhhiiiil/PYTHON","sub_path":"Exercise/3.a.time.py","file_name":"3.a.time.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73605757706","text":"import pandas as pd\nimport numpy as np\nimport sklearn\nfrom sklearn.cluster import KMeans\nfrom sklearn.cluster import AgglomerativeClustering\n\n\n\ntrain = pd.read_csv('water-treatment.data.txt')\n\nprint(train.shape)\nprint(train.describe())\nprint(train.head())\n\n\ndef num_missing(x):\n return sum(x.isnull())\n\n\nprint(train.apply(num_missing, axis=0))\n\ntrain = train.apply(lambda x: x.fillna(x.value_counts().index[0]))\n\ntrain = train.drop('G1', axis=1)\n\nfor k in range(2, 21):\n kmeans = KMeans(n_clusters=k, random_state=0).fit(train)\n print(\"k =\", k)\n print(sklearn.metrics.silhouette_score(train, kmeans.labels_))\n\n # print(kmeans.labels_)\n\t\n\t\n\t\n#train = pd.read_csv('./HTRU2/HTRU_2.csv')\n\n#print(train.head())\n\n#model = AgglomerativeClustering(2, linkage='ward')\n#model.fit(train)\n\n#Y = train['G8']\n#X = train.drop('G8', axis=1)\n\n\n#for index, linkage in enumerate(('complete', 'ward')):\n# plt.subplot(1, 3, index + 1)\n# model = AgglomerativeClustering(linkage=linkage, n_clusters=2)\n# predict = model.fit(X)\n# print(normalized_mutual_info_score(predict.labels_, Y))\n\n","repo_name":"amirbavand/DataMining-Projects","sub_path":"HW3/clustering/p2.py","file_name":"p2.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27124856585","text":"import random\nfrom hand import Hand\nfrom best_melds import identify_melds, find_best_meld, card_value, find_possible_deadwood\n\n# Define a player class\nclass Player:\n def __init__(self):\n self.hand = Hand() # Player's hand\n self.score = 0 # Player's score\n\n\n# Define a bot class that inherits from player\nclass Bot(Player):\n\n def __init__(self, gin_rummy_instance):\n super().__init__() # Call the parent class's initializer\n self.gin_rummy = gin_rummy_instance # Instance of the game\n self.current_deadwood_sum = self.calculate_deadwood_sum() # Current sum of deadwood\n\n\n # Method to calculate sum of deadwood\n def calculate_deadwood_sum(self):\n melds = identify_melds(self.hand.cards)\n chosen_melds, non_meld_cards = find_best_meld(melds, self.hand.cards)\n possible_deadwood, complete_deadwood = find_possible_deadwood(non_meld_cards)\n return sum(card_value(card) for card in possible_deadwood + complete_deadwood)\n \n\n # Method to update sum of deadwood\n def update_deadwood_sum(self):\n self.current_deadwood_sum = self.calculate_deadwood_sum()\n\n\n # Method to choose a card to pick from the deck or discard pile\n def choose_card_to_pick(self, discard_pile, deck):\n # If there are cards in the discard pile, bot checks if the top card can improve its hand\n if discard_pile:\n top_discard = discard_pile[-1]\n self.hand.add_card(top_discard)\n\n melds = identify_melds(self.hand.cards)\n chosen_melds, non_meld_cards = find_best_meld(melds, self.hand.cards)\n possible_deadwood, complete_deadwood = find_possible_deadwood(non_meld_cards)\n new_deadwood_sum = sum(card_value(card) for card in possible_deadwood + complete_deadwood)\n\n if new_deadwood_sum < self.current_deadwood_sum: # If the new deadwood sum is less than current, bot keeps the card\n self.current_deadwood_sum = new_deadwood_sum\n discard_pile.pop()\n return\n else:\n self.hand.discard_card(top_discard) # If not, bot discards the card\n\n # If deck is empty, reshuffle the discard pile into the deck\n if not deck.cards:\n print(\"Deck is empty! Reshuffling discarded pile into the deck.\")\n deck.cards.extend(discard_pile)\n deck.shuffle()\n discard_pile.clear()\n\n self.hand.add_card(deck.deal_card())\n self.hand.sort() # Sort the bot's hand\n\n\n # Method to choose a card to discard\n def choose_card_to_discard(self):\n melds = identify_melds(self.hand.cards)\n chosen_melds, non_meld_cards = find_best_meld(melds, self.hand.cards)\n possible_deadwood, complete_deadwood = find_possible_deadwood(non_meld_cards)\n\n all_deadwood = complete_deadwood + possible_deadwood # Sum of all deadwood\n all_deadwood.sort(key=card_value)\n\n # If there is no deadwood, bot considers to declare \"gin\"\n if len(all_deadwood) == 0:\n for meld in chosen_melds:\n if len(meld) > 3:\n if all(card.rank == meld[0].rank for card in meld):\n return (\"gin\", random.choice(meld))\n else:\n return (\"gin\", max(meld, key=card_value))\n\n # If there is only one deadwood, bot considers to declare \"gin\"\n if len(all_deadwood) == 1:\n return (\"gin\", all_deadwood[0])\n\n total_deadwood_score = sum(card_value(card) for card in all_deadwood[:-1])\n\n # If total score of deadwood is less than or equal to 10, bot considers to \"knock\"\n if total_deadwood_score <= 10:\n return (\"knock\", all_deadwood[-1])\n\n # If there is complete deadwood, bot discards the card with highest value\n if complete_deadwood:\n return (\"discard\", max(complete_deadwood, key=card_value))\n\n # If no complete deadwood, bot discards the card from possible deadwood with highest value\n return (\"discard\", max(possible_deadwood, key=card_value))\n","repo_name":"rawbeen248/Gin-Rummy-AI-vs-Human","sub_path":"player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":4115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24745586470","text":"import csv\nimport json\nimport datetime\nimport sys\n\nFILENAMES = [\n \"SCDB_2020_01_caseCentered_Citation.csv\",\n \"SCDB_Legacy_06_caseCentered_Citation.csv\",\n]\n\nINTERESTING_PARTIES = {\n \"27\", # the United States\n \"28\", # a State\n}\n\nSTATES = {\n \"1\": \"Alabama\",\n \"2\": \"Alaska\",\n \"3\": \"American Samoa\",\n \"4\": \"Arizona\",\n \"5\": \"Arkansas\",\n \"6\": \"California\",\n \"7\": \"Colorado\",\n \"8\": \"Connecticut\",\n \"9\": \"Delaware\",\n \"10\": \"District of Columbia\",\n \"11\": \"Federated States of Micronesia\",\n \"12\": \"Florida\",\n \"13\": \"Georgia\",\n \"14\": \"Guam\",\n \"15\": \"Hawaii\",\n \"16\": \"Idaho\",\n \"17\": \"Illinois\",\n \"18\": \"Indiana\",\n \"19\": \"Iowa\",\n \"20\": \"Kansas\",\n \"21\": \"Kentucky\",\n \"22\": \"Louisiana\",\n \"23\": \"Maine\",\n \"24\": \"Marshall Islands\",\n \"25\": \"Maryland\",\n \"26\": \"Massachusetts\",\n \"27\": \"Michigan\",\n \"28\": \"Minnesota\",\n \"29\": \"Mississippi\",\n \"30\": \"Missouri\",\n \"31\": \"Montana\",\n \"32\": \"Nebraska\",\n \"33\": \"Nevada\",\n \"34\": \"New Hampshire\",\n \"35\": \"New Jersey\",\n \"36\": \"New Mexico\",\n \"37\": \"New York\",\n \"38\": \"North Carolina\",\n \"39\": \"North Dakota\",\n \"40\": \"Northern Mariana Islands\",\n \"41\": \"Ohio\",\n \"42\": \"Oklahoma\",\n \"43\": \"Oregon\",\n \"44\": \"Palau\",\n \"45\": \"Pennsylvania\",\n \"46\": \"Puerto Rico\",\n \"47\": \"Rhode Island\",\n \"48\": \"South Carolina\",\n \"49\": \"South Dakota\",\n \"50\": \"Tennessee\",\n \"51\": \"Texas\",\n \"52\": \"Utah\",\n \"53\": \"Vermont\",\n \"54\": \"Virgin Islands\",\n \"55\": \"Virginia\",\n \"56\": \"Washington\",\n \"57\": \"West Virginia\",\n \"58\": \"Wisconsin\",\n \"59\": \"Wyoming\",\n \"60\": \"United States\",\n \"61\": \"Interstate Compact\",\n \"62\": \"Philippines\",\n \"63\": \"Indian\",\n \"64\": \"Dakota\",\n}\nREVERSE_STATES = {v: k for k, v in STATES.items()}\nSTATE_ABBRV = {\n \"Alabama\": \"AL\",\n \"Alaska\": \"AK\",\n \"American Samoa\": \"AS\",\n \"Arizona\": \"AZ\",\n \"Arkansas\": \"AR\",\n \"California\": \"CA\",\n \"Colorado\": \"CO\",\n \"Connecticut\": \"CT\",\n \"Delaware\": \"DE\",\n \"District of Columbia\": \"DC\",\n \"Florida\": \"FL\",\n \"Georgia\": \"GA\",\n \"Guam\": \"GU\",\n \"Hawaii\": \"HI\",\n \"Idaho\": \"ID\",\n \"Illinois\": \"IL\",\n \"Indiana\": \"IN\",\n \"Iowa\": \"IA\",\n \"Kansas\": \"KS\",\n \"Kentucky\": \"KY\",\n \"Louisiana\": \"LA\",\n \"Maine\": \"ME\",\n \"Marshall Islands\": \"MP\",\n \"Maryland\": \"MD\",\n \"Massachusetts\": \"MA\",\n \"Michigan\": \"MI\",\n \"Minnesota\": \"MN\",\n \"Mississippi\": \"MS\",\n \"Missouri\": \"MO\",\n \"Montana\": \"MT\",\n \"Nebraska\": \"NE\",\n \"Nevada\": \"NV\",\n \"New Hampshire\": \"NH\",\n \"New Jersey\": \"NJ\",\n \"New Mexico\": \"NM\",\n \"New York\": \"NY\",\n \"North Carolina\": \"NC\",\n \"North Dakota\": \"ND\",\n \"Ohio\": \"OH\",\n \"Oklahoma\": \"OK\",\n \"Oregon\": \"OR\",\n \"Pennsylvania\": \"PA\",\n \"Puerto Rico\": \"PR\",\n \"Rhode Island\": \"RI\",\n \"South Carolina\": \"SC\",\n \"South Dakota\": \"SD\",\n \"Tennessee\": \"TN\",\n \"Texas\": \"TX\",\n \"Utah\": \"UT\",\n \"Vermont\": \"VT\",\n \"Virgin Islands\": \"VI\",\n \"Virginia\": \"VA\",\n \"Washington\": \"WA\",\n \"West Virginia\": \"WV\",\n \"Wisconsin\": \"WI\",\n \"Wyoming\": \"WY\",\n \"United States\": \"US\",\n}\n\n\nMULTIPLE_STATE_CASES = {\n \"1990-106-01\": ([\"Oklahoma\", \"Texas\"], [\"New Mexico\"]),\n \"1992-055-01\": ([\"Nebraska\"], [\"Wyoming\", \"Colorado\"]),\n \"1993-010-01\": ([\"Oklahoma\", \"Texas\"], [\"New Mexico\"]),\n \"1994-062-01\": ([\"Nebraska\"], [\"Wyoming\", \"Colorado\"]),\n \"2001-003-01\": ([\"Nebraska\"], [\"Wyoming\", \"Colorado\"]),\n \"2002-056-01\": ([\"Kansas\"], [\"Nebraska\", \"Colorado\"]),\n \"2010-039-01\": ([\"Montana\"], [\"Wyoming\", \"North Dakota\"]),\n \"2014-007-01\": ([\"Kansas\"], [\"Nebraska\", \"Colorado\"]),\n \"2015-073-01\": ([\"Montana\"], [\"Wyoming\", \"North Dakota\"]),\n \"2017-032-01\": ([\"Texas\"], [\"New Mexico\", \"Colorado\"]),\n \"1904-001-01\": ([\"Nebraska\"], [\"Missouri\"]), # This one is undecided winning\n \"1904-003-01\": ([\"Nebraska\"], [\"Missouri\"]), # This one is Nebraska winning\n \"1939-130-01\": ([\"Wisconsin\", \"Minnesota\", \"Ohio\", \"Pennsylvania\"], [\"Illinois\"]),\n \"1940-042-01\": ([\"Wisconsin\", \"Minnesota\", \"Ohio\", \"Pennsylvania\"], [\"Illinois\"]),\n \"2009-060-01\": ([\"Alabama\", \"Florida\", \"Tennessee\", \"Virginia\"], [\"North Carolina\"]),\n\n \"1974-062-01\": ([\"United States\"], [\"Maine\", \"New Hampshire\", \"Massachusetts\", \"Rhode Island\", \"New York\", \"New Jersey\",\n \"Delaware\", \"Maryland\", \"Virginia\", \"North Carolina\", \"South Carolina\", \"Georgia\",\n \"Florida\"]),\n \"1975-001-01\": ([\"United States\"], [\"Maine\", \"New Hampshire\", \"Massachusetts\", \"Rhode Island\", \"New York\", \"New Jersey\",\n \"Delaware\", \"Maryland\", \"Virginia\", \"North Carolina\", \"South Carolina\", \"Georgia\",\n \"Florida\"]),\n \"1984-089-01\": ([\"United States\"], [\"Maine\", \"New Hampshire\", \"Massachusetts\", \"Rhode Island\", \"New York\", \"New Jersey\",\n \"Delaware\", \"Maryland\", \"Virginia\", \"North Carolina\", \"South Carolina\", \"Georgia\",\n \"Florida\"]),\n \"1984-028-01\": ([\"United States\"], [\"Maine\", \"New Hampshire\", \"Massachusetts\", \"Rhode Island\", \"New York\", \"New Jersey\",\n \"Delaware\", \"Maryland\", \"Virginia\", \"North Carolina\", \"South Carolina\", \"Georgia\",\n \"Florida\"]),\n\n \"1984-034-01\": ([\"United States\"], [\"Mississippi\", \"Alabama\"]),\n \"1987-039-01\": ([\"United States\"], [\"Mississippi\", \"Alabama\"]),\n \"1990-003-01\": ([\"United States\"], [\"Mississippi\", \"Alabama\"]),\n \"1992-028-01\": ([\"United States\"], [\"Mississippi\", \"Alabama\"]),\n}\n\n\ndef cases():\n for filename in FILENAMES:\n with open(filename, encoding=\"ISO-8859-1\") as fd:\n reader = csv.DictReader(fd)\n for row in reader:\n yield row\n\n\ndef interesting_cases():\n for case in cases():\n if (\n case[\"petitioner\"] in INTERESTING_PARTIES\n and case[\"respondent\"] in INTERESTING_PARTIES\n ):\n yield case\n\n\ndef ci_lower_bound(positive, total):\n # https://www.evanmiller.org/how-not-to-sort-by-average-rating.html\n if total == 0:\n return 0\n z = 1.96\n phat = 1.0 * positive / total\n return (phat + z*z/(2*total) - z * ((phat*(1-phat)+z*z/(4*total))/total)**.5)/(1+z*z/total)\n\n\ndef get_name(party_id, party_state):\n if party_id == \"27\":\n return \"United States\"\n else:\n return STATES[party_state]\n\n\ndef main():\n results = {}\n for case in interesting_cases():\n if case[\"docketId\"] in MULTIPLE_STATE_CASES:\n petitioner_names, respondent_names = MULTIPLE_STATE_CASES[case[\"docketId\"]]\n else:\n petitioner = case[\"petitioner\"]\n respondent = case[\"respondent\"]\n petitioner_state = case[\"petitionerState\"]\n respondent_state = case[\"respondentState\"]\n petitioner_names = [get_name(petitioner, petitioner_state)]\n respondent_names = [get_name(respondent, respondent_state)]\n\n winner = case[\"partyWinning\"]\n\n us_cite = case['usCite']\n if len(us_cite) == 0:\n print(f\"Missing US Cite: {case['lexisCite']}\", file=sys.stderr)\n continue\n us_cite_bits = [int(x) for x in us_cite.split('U.S.')]\n\n decided_date = [int(x) for x in case[\"dateDecision\"].split(\"/\")]\n decided_date = datetime.date(decided_date[2], decided_date[0], decided_date[1])\n\n case_name = case[\"caseName\"].title().replace(' V. ', ' v. ')\n\n citation_name = f'{case_name}, {us_cite} ({decided_date.year})'\n citation = {\"name\": citation_name, \"us\": us_cite_bits}\n\n result_keys = [\n tuple(sorted((petitioner_name, respondent_name)))\n for petitioner_name in petitioner_names\n for respondent_name in respondent_names\n ]\n\n for result_key in result_keys:\n if result_key not in results:\n results[result_key] = [[], [], []]\n\n petitioner_idxes = [\n result_key.index(petitioner_name) if petitioner_name in result_key else None\n for petitioner_name in petitioner_names\n for result_key in result_keys\n ]\n respondent_idxes = [\n result_key.index(respondent_name) if respondent_name in result_key else None\n for respondent_name in respondent_names\n for result_key in result_keys\n ]\n\n if winner == \"0\":\n # petitioner lost\n for result_key, respondent_idx in zip(result_keys, respondent_idxes):\n if respondent_idx is None:\n continue\n results[result_key][respondent_idx].append(citation)\n elif winner == \"1\":\n # petitioner won\n for result_key, petitioner_idx in zip(result_keys, petitioner_idxes):\n if petitioner_idx is None:\n continue\n results[result_key][petitioner_idx].append(citation)\n else:\n # inconclusive\n for result_key in result_keys:\n results[result_key][2].append(citation)\n\n # from pprint import pprint\n # pprint(results)\n\n # build the json-compatible, denormalized data structure\n\n json_results = {}\n for state_pair in results:\n state1, state2 = state_pair\n state1_code = STATE_ABBRV[state1]\n state2_code = STATE_ABBRV[state2]\n\n if state1_code == \"DC\" or state2_code == \"DC\":\n continue\n\n for state in (state1_code, state2_code):\n if state not in json_results:\n json_results[state] = {}\n cases = results[state_pair]\n state1_won, state2_won, undecided = cases\n\n if state2_code not in json_results[state1_code]:\n json_results[state1_code][state2_code] = {\n \"name\": state2,\n \"wins\": [],\n \"losses\": [],\n \"undecided\": [],\n }\n if state1_code not in json_results[state2_code]:\n json_results[state2_code][state1_code] = {\n \"name\": state1,\n \"wins\": [],\n \"losses\": [],\n \"undecided\": [],\n }\n\n json_results[state1_code][state2_code][\"wins\"] += state1_won\n json_results[state1_code][state2_code][\"losses\"] += state2_won\n json_results[state1_code][state2_code][\"undecided\"] += undecided\n\n json_results[state2_code][state1_code][\"wins\"] += state2_won\n json_results[state2_code][state1_code][\"losses\"] += state1_won\n json_results[state2_code][state1_code][\"undecided\"] += undecided\n\n for state1 in json_results:\n for state2 in json_results[state1]:\n state2_obj = json_results[state1][state2]\n win_cnt = len(state2_obj[\"wins\"])\n loss_cnt = len(state2_obj[\"losses\"])\n undecided_cnt = len(state2_obj[\"undecided\"])\n total_cnt = win_cnt + loss_cnt + undecided_cnt\n\n if win_cnt + loss_cnt == 0:\n win_pct = 0.0\n else:\n win_pct = (win_cnt - loss_cnt) / (win_cnt + loss_cnt)\n win_offset = round((win_pct+1)*127.5)\n\n state2_obj[\"win_pct\"] = win_pct\n state2_obj[\"win_offset\"] = win_offset\n state2_obj[\"ci_lower_bound\"] = ci_lower_bound(win_cnt, total_cnt)\n\n for idx, state2 in enumerate(sorted(json_results[state1].items(), key=lambda x: x[1][\"ci_lower_bound\"], reverse=True)):\n state2[1][\"ci_order\"] = idx\n\n return json_results\n\n\nif __name__ == \"__main__\":\n print(json.dumps(main(), indent=2))\n","repo_name":"dgilman/scotuswars","sub_path":"scotuswars.py","file_name":"scotuswars.py","file_ext":"py","file_size_in_byte":11692,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"43943384795","text":"import sys\n\nfrom struct import pack, unpack\nfrom .types import WLObject, objects\n\nLITTLE_UINT = \" bytes:\n \"\"\"\n Pack arguments into byte arrays.\n \n See https://wayland.freedesktop.org/docs/html/ch04.html#sect-Protocol-Wire-Format\n\n :param vals: values to be packed.\n :return:\n \"\"\"\n ret = bytearray()\n for val in vals:\n val_type = type(val)\n if val_type is int:\n ret += pack(LITTLE_UINT, val)\n elif val_type is str:\n val += '\\0'\n s = pack(LITTLE_UINT, len(val)) + val.encode(\"utf8\")\n ret += s\n mod = len(s) % WORD_SIZE\n if mod != 0:\n ret += bytearray(WORD_SIZE - mod)\n elif isinstance(val, WLObject):\n ret += pack(LITTLE_UINT, val.id)\n else:\n raise Exception(f\"Unsupported type {val_type}\")\n return ret\n\n\ndef wl_msg(obj_id, opcode, msg) -> bytes:\n \"\"\"\n Create Wayland message.\n \n See https://wayland.freedesktop.org/docs/html/ch04.html#sect-Protocol-Wire-Format\n \"\"\"\n msg_len = len(msg)\n if msg_len % WORD_SIZE != 0:\n raise Exception('Invalid message length')\n header = pack(LITTLE_UINT, obj_id)\n header += pack(LITTLE_UINT, (msg_len + HEADER_SIZE) << 16 | (opcode & 0x0000ffff))\n return header + msg\n\n\ndef read_msg(con):\n \"\"\"\n Read and unpack message.\n \"\"\"\n raw1 = con.recv(6)\n if len(raw1) == 0:\n return None\n raw2 = con.recv(2)\n msg_len = unpack(' bool:\n \"\"\"\n \"\"\"\n if msg == None:\n return False\n obj_id, opcode, raw_args = msg\n obj = objects[obj_id]\n name, type_codes = obj.events[opcode]\n args = []\n i = 0\n\n def incr():\n nonlocal i\n i += WORD_SIZE\n return i\n\n for t in type_codes:\n if t in 'iu': # int or unsigned int\n fmt = ' bool:\n \"\"\"\n Read Wayland message from connection and execute.\n\n :param con: the connection socket.\n :return:\n \"\"\"\n return invoke(read_msg(con))\n","repo_name":"andirady/pyland","sub_path":"pyland/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":3442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26332404021","text":"import streamlit as st\r\nimport pandas as pd\r\nimport numpy as np\r\nimport pickle\r\nfrom sklearn.preprocessing import StandardScaler, OneHotEncoder\r\nimport datetime\r\n\r\n# Define the preprocess_inputs function\r\ndef preprocess_inputs(date, store_nbr, family, onpromotion, city, store_type, cluster, transactions, dcoilwtico, holiday_type):\r\n # Combine the inputs into a dictionary\r\n inputs = {'date': date, 'store_nbr': store_nbr, 'family': family, 'onpromotion': onpromotion, 'city': city, 'store_type': store_type, 'cluster': cluster, 'transactions': transactions, 'dcoilwtico': dcoilwtico, 'holiday_type': holiday_type}\r\n\r\n # Create a pandas DataFrame from the dictionary\r\n input_df = pd.DataFrame(inputs, index=[0])\r\n scaler = StandardScaler()\r\n input_df[num_cols] = scaler.fit_transform(input_df[num_cols])\r\n\r\n # Load the StandardScaler and OneHotEncoder from scikit-learn\r\n with open('encoder.pkl', 'rb') as file:\r\n encoder = pickle.load(file)\r\n\r\n cat_cols = ['store_nbr', 'family', 'city', 'store_type', 'cluster', 'holiday_type']\r\n encoded_data = encoder.transform(input_df[cat_cols]).toarray()\r\n encoded_df = pd.DataFrame(encoded_data, columns=encoder.get_feature_names(cat_cols))\r\n final_df = pd.concat([input_df[num_cols], encoded_df], axis=1)\r\n return final_df\r\n\r\n# Define the predict_sales function\r\n\r\ndef predict_sales(date, store_nbr, family, onpromotion, city, store_type, cluster, transactions, dcoilwtico, holiday_type):\r\n # preprocess the inputs\r\n num_cols = ['onpromotion', 'transactions', 'dcoilwtico']\r\n inputs = preprocess_inputs(date, store_nbr, family, onpromotion, city, store_type, cluster, transactions, dcoilwtico, holiday_type, num_cols)\r\n\r\n # Load the saved model from disk\r\n with open('best_model.pkl', 'rb') as file:\r\n model = pickle.load(file)\r\n\r\n # initialize prediction to None\r\n prediction = None\r\n if model is not None:\r\n # predict using the model\r\n prediction = model.predict(inputs)\r\n\r\n return prediction\r\n\r\ndef app():\r\n # Add a title\r\n # Set the page title and header\r\n st.set_page_config(page_title='Sales Prediction App', page_icon=':bar_chart:', layout='wide')\r\n st.title('Sales Prediction App')\r\n\r\n # Add a subtitle\r\n st.subheader(\"Enter the details to predict sales\")\r\n\r\n # Add the input fields\r\n date = st.date_input(\"Date\", datetime.date(2017, 1, 1))\r\n store_nbr = st.selectbox(\"Store number\", [i for i in range(1, 55)])\r\n family = st.selectbox(\"Family\", ['Others', 'Food', 'Beverages', 'Personal Care', 'Clothing', 'Home and Kitchen'])\r\n #sales = st.number_input(\"Sales\")\r\n onpromotion = st.selectbox(\"On Promotion\", [i for i in range(0, 200)])\r\n city = st.selectbox(\"City\", ['Quito', 'Santo Domingo', 'Cayambe', 'Latacunga', 'Riobamba', 'Ibarra', 'Guaranda', 'Ambato', 'Puyo', 'Loja', 'Machala', 'Cuenca'])\r\n store_type = st.selectbox(\"Store type\", ['A', 'D', 'B', 'C', 'E'])\r\n cluster = st.selectbox(\"Cluster\", [i for i in range(1, 18)])\r\n transactions = st.number_input(\"Transactions\")\r\n dcoilwtico = st.number_input(\"Crude Oil Price\")\r\n holiday_type = st.selectbox(\"Holiday Type\", ['Holiday', 'Additional', 'Transfer'])\r\n\r\n # Add a button to predict the sales\r\n if st.button(\"Predict\"):\r\n prediction = predict_sales(date, store_nbr, family, onpromotion, city, store_type, cluster, transactions, dcoilwtico, holiday_type)\r\n if prediction is not None:\r\n st.write(\"The predicted for the given input is:\", round(prediction[0], 2))\r\n\r\nif __name__ == '__main__':\r\n app()\r\n","repo_name":"AlbieCofie/Forecast_sales_with_streamlit_app_.","sub_path":"Ecuador_grocery10.py","file_name":"Ecuador_grocery10.py","file_ext":"py","file_size_in_byte":3603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9607321041","text":"# -*- conding:utf-8 -*-\n#Author:lyc\nimport os, sys\n\n# 题目:\n# 实现 int sqrt(int x) 函数。\n# 计算并返回 x 的平方根,其中 x 是非负整数。\n# 由于返回类型是整数,结果只保留整数的部分,小数部分将被舍去。\n#\n# 示例 1:\n# 输入: 4\n# 输出: 2\n#\n# 示例 2:\n# 输入: 8\n# 输出: 2\n# 说明: 8 的平方根是 2.82842...,\n#   由于返回类型是整数,小数部分将被舍去。\n\nclass Solution:\n def mySqrt(self, x: int) -> int:\n \"\"\"\n 暴力解法\n :param x:\n :return:\n \"\"\"\n if x == 0: return 0\n\n result = 0\n # 先每次平方增长\n for i in range(1,x+2):\n result = i*2\n if pow(result,2) > x:\n break\n\n # 一旦超过,每次减一迭代\n while pow(result,2) > x:\n result -= 1\n\n return result\n\n\nclass Solution:\n def mySqrt(self, x: int) -> int:\n \"\"\"\n 二分法\n :param x:\n :return:\n \"\"\"\n l,r,res = 0,x,-1\n while l <= r:\n mid = (l+r) // 2\n if pow(mid,2) <= x:\n res = mid\n l = mid + 1\n else:\n r = mid - 1\n return res\n\n\nclass Solution:\n def mySqrt(self, x: int) -> int:\n \"\"\"\n 牛顿迭代法\n :param x:\n :return:\n \"\"\"\n if x == 0:return 0\n C, res = float(x),float(x)\n while True:\n tmp = 0.5*(res + C/res)\n if res - tmp < 0.00000001:\n break\n res = tmp\n return int(res)\n\n\nresult = Solution()\nn = 8\nprint(result.mySqrt(n))\n\n\n","repo_name":"g-lyc/LeetCode","sub_path":"Python/t69.py","file_name":"t69.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"21736832238","text":"# This app will use your built-in webcam to control your slides presentation.\r\n# For a one-handed presentation, use Gesture 1 (thumbs up) to go to the previous slide and Gesture 2 (whole hand pointing up) to go to the next slide.\r\n\r\nimport win32com.client\r\nfrom cvzone.HandTrackingModule import HandDetector\r\nimport cv2\r\nimport os\r\nimport numpy as np\r\nimport aspose.slides as slides\r\nimport aspose.pydrawing as drawing\r\nApplication = win32com.client.Dispatch(\"PowerPoint.Application\" )\r\nPresentation = Application.Presentations.Open(\"C:\\\\Users\\Zouaoui\\Documents\\programmation\\python\\zani.pptx\")\r\nprint(Presentation.Name)\r\nPresentation.SlideShowSettings.Run()\r\n# Parameters\r\nwidth, height = 900, 720\r\ngestureThreshold = 300\r\n# Camera Setup\r\ncap = cv2.VideoCapture(0)\r\ncap.set(3, width)\r\ncap.set(4, height) \r\n# Hand Detector\r\ndetectorHand = HandDetector(detectionCon=0.8, maxHands=1)\r\n# Variables\r\nimgList = []\r\ndelay = 30\r\nbuttonPressed = False\r\ncounter = 0\r\ndrawMode = False\r\nimgNumber = 20\r\ndelayCounter = 0\r\nannotations = [[]]\r\nannotationNumber = -1\r\nannotationStart = False\r\nwhile True:\r\n # Get image frame\r\n success, img = cap.read()\r\n # Find the hand and its landmarks\r\n hands, img = detectorHand.findHands(img) # with draw\r\n if hands and buttonPressed is False: # If hand is detected\r\n hand = hands[0]\r\n cx, cy = hand[\"center\"]\r\n lmList = hand[\"lmList\"] # List of 21 Landmark points\r\n fingers = detectorHand.fingersUp(hand) # List of which fingers are up\r\n if cy <= gestureThreshold: # If hand is at the height of the face\r\n if fingers == [1, 1, 1, 1, 1]:\r\n print(\"Next\")\r\n buttonPressed = True\r\n if imgNumber > 0:\r\n Presentation.SlideShowWindow.View.Next()\r\n imgNumber -= 1\r\n annotations = [[]]\r\n annotationNumber = -1\r\n annotationStart = False\r\n if fingers == [1, 0, 0, 0, 0]:\r\n print(\"Previous\")\r\n buttonPressed = True\r\n if imgNumber >0 :\r\n Presentation.SlideShowWindow.View.Previous()\r\n imgNumber += 1\r\n annotations = [[]]\r\n annotationNumber = -1\r\n annotationStart = False\r\n \r\n else:\r\n annotationStart = False\r\n \r\n if buttonPressed:\r\n counter += 1\r\n if counter > delay:\r\n counter = 0\r\n buttonPressed = False\r\n \r\n for i, annotation in enumerate(annotations):\r\n for j in range(len(annotation)):\r\n if j != 0:\r\n cv2.line(imgCurrent, annotation[j - 1], annotation[j], (0, 0, 200), 12)\r\n \r\n cv2.imshow(\"Image\", img)\r\n \r\n key = cv2.waitKey(1)\r\n if key == ord('q'):\r\n break\r\n\r\n","repo_name":"ranizouaoui/PPT-Presentation-controlled-by-hand-gesture","sub_path":"Code.py","file_name":"Code.py","file_ext":"py","file_size_in_byte":2837,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"27380869181","text":"import pathlib, re, urllib.parse\n\nfrom django.core.management.base import BaseCommand\nfrom django.core.management import call_command\nfrom django.conf import settings\n\nfrom hunt.app.core.assets.refs import hash_asset_type, from_bytes, to_bytes\nfrom hunt.data_loader.puzzle import get_all_puzzle_data\nfrom hunt.data_loader.round import get_all_round_data\nfrom hunt.data_loader.chunks import get_all_shared_chunks\nfrom hunt.data_loader.auxiliary import get_all_auxiliary_files\n\nfrom ._common import confirm_command\n\n_PUZZLE_FILES_TO_SKIP = [\n pathlib.PurePath('config.json'),\n pathlib.PurePath('metadata.json'),\n pathlib.PurePath('hints.json')\n]\n\n_REWRITE_CHUNKS_PATTERN = re.compile(r\"\"\"\\\"/(?Pchunks/chunk-[^.\"]+.js)\\\"\"\"\")\n\n_SKIP_EXTENSIONS = ('.tmpl', '.ts', '.scss', '.py')\n\n_GZIP_EXTENSIONS = ('.html', '.js', '.css')\n_GZIP_SUBFOLDER = 'gzip'\n_NOT_GZIP_SUBFOLDER = 'not-gzip'\n\nclass Command(BaseCommand):\n help = 'Collects all puzzle and round files to the static temp directory, sharded by gzippable and not gzippable'\n\n def add_arguments(self, parser):\n parser.add_argument(\n '--noinput', action='store_false', dest='interactive',\n help=\"Do NOT prompt the user for input of any kind.\",\n )\n parser.add_argument(\n '--gzip-files', action='store', dest='gzip_files',\n default='true', help=\"Whether to split some files out as gzippable.\",\n )\n\n def handle(self, *args, **options):\n if options['interactive'] and not confirm_command('This will first delete all files from the static temp directory!'):\n return\n\n gzip_files = options['gzip_files'] == 'true'\n\n call_command('collectstatic', interactive=False, clear=True)\n call_command('removepuzzlefiles', interactive=False)\n\n _collect_shared_chunks(debug=options['interactive'], gzip_files=gzip_files)\n _collect_static(debug=options['interactive'], gzip_files=gzip_files)\n _collect_static_temp(\n get_all_puzzle_data(), 'puzzle', debug=options['interactive'], gzip_files=gzip_files)\n _collect_static_temp(\n get_all_round_data(), 'round', debug=options['interactive'], gzip_files=gzip_files)\n _collect_static_temp(\n get_all_auxiliary_files(), 'auxiliary', debug=options['interactive'], gzip_files=gzip_files)\n\ndef _collect_static_temp(data_path, asset_type, *, debug, gzip_files):\n gzip_static_dest_path = pathlib.Path(settings.HUNT_ASSETS_TEMP_FOLDER, _GZIP_SUBFOLDER, settings.HUNT_ASSETS_STATIC_PREFIX)\n not_gzip_static_dest_path = pathlib.Path(settings.HUNT_ASSETS_TEMP_FOLDER, _NOT_GZIP_SUBFOLDER, settings.HUNT_ASSETS_STATIC_PREFIX)\n\n count = 0\n for path in data_path.rglob('*'):\n if path.is_dir():\n continue\n\n if any(path.name.endswith(raw_ext) for raw_ext in _SKIP_EXTENSIONS):\n continue\n\n relative_path = path.relative_to(data_path)\n relative_path_parts = relative_path.parts\n assert len(relative_path_parts) >= 2\n\n asset_url = relative_path_parts[0]\n\n variant = asset_type\n resource_path_parts = relative_path_parts[1:]\n is_gzippable = any(path.name.endswith(raw_ext) for raw_ext in _GZIP_EXTENSIONS)\n should_gzip = is_gzippable and gzip_files\n\n if resource_path_parts[0] == 'solution':\n variant = 'solution'\n resource_path_parts = resource_path_parts[1:]\n elif asset_type == 'puzzle' and resource_path_parts[0] == 'posthunt':\n variant = 'posthunt'\n resource_path_parts = resource_path_parts[1:]\n elif resource_path_parts[0] == '__build':\n continue\n\n assert len(resource_path_parts)\n\n resource_path = pathlib.PurePath(*resource_path_parts)\n if not _should_copy_file(\n asset_type=asset_type, variant=variant, resource_path=resource_path):\n continue\n\n key = hash_asset_type(asset_type, asset_url, variant)\n if debug:\n print(f'Processing {asset_type} {asset_url}{\" solution\" if variant else \"\"}: {resource_path} (key={key})')\n\n base_path = gzip_static_dest_path if should_gzip else not_gzip_static_dest_path\n dest = base_path.joinpath(key, resource_path)\n dest.parent.mkdir(parents=True, exist_ok=True)\n\n assert not dest.exists()\n content = path.read_bytes()\n rewritten_content = _maybe_rewrite(path.name, content)\n dest.write_bytes(rewritten_content)\n count += 1\n print(f'Processed {count} {asset_type} files')\n\ndef _collect_shared_chunks(*, debug, gzip_files):\n data_path = get_all_shared_chunks()\n\n gzip_static_dest_path = pathlib.Path(settings.HUNT_ASSETS_TEMP_FOLDER, _GZIP_SUBFOLDER, settings.HUNT_DATA_PACKAGE_CHUNKS)\n not_gzip_static_dest_path = pathlib.Path(settings.HUNT_ASSETS_TEMP_FOLDER, _NOT_GZIP_SUBFOLDER, settings.HUNT_DATA_PACKAGE_CHUNKS)\n base_path = gzip_static_dest_path if gzip_files else not_gzip_static_dest_path\n\n count = 0\n for path in data_path.rglob('*'):\n relative_path = path.relative_to(data_path)\n dest = base_path.joinpath(relative_path)\n dest.parent.mkdir(parents=True, exist_ok=True)\n\n assert not dest.exists()\n content = path.read_bytes()\n rewritten_content = _maybe_rewrite(path.name, content)\n dest.write_bytes(rewritten_content)\n count += 1\n print(f'Processed {count} chunks')\n\ndef _collect_static(*, debug, gzip_files):\n data_path = pathlib.Path(settings.STATIC_ROOT)\n gzip_static_dest_path = pathlib.Path(settings.HUNT_ASSETS_TEMP_FOLDER, _GZIP_SUBFOLDER)\n not_gzip_static_dest_path = pathlib.Path(settings.HUNT_ASSETS_TEMP_FOLDER, _NOT_GZIP_SUBFOLDER)\n\n count = 0\n for path in data_path.rglob('*'):\n if path.is_dir():\n continue\n\n is_gzippable = any(path.name.endswith(raw_ext) for raw_ext in _GZIP_EXTENSIONS)\n should_gzip = is_gzippable and gzip_files\n base_path = gzip_static_dest_path if should_gzip else not_gzip_static_dest_path\n\n relative_path = path.relative_to(data_path)\n dest = base_path.joinpath(relative_path)\n dest.parent.mkdir(parents=True, exist_ok=True)\n\n assert not dest.exists()\n content = path.read_bytes()\n dest.write_bytes(content)\n count += 1\n print(f'Processed {count} assets')\n\ndef _should_copy_file(*, asset_type, variant, resource_path):\n # Note: we could skip index.html/style.css/round_common.css (or\n # their compiled versions) as these are served by Django. However, it\n # doesn't hurt to deploy them, and deploying could be useful. For example,\n # if index.html embeds another HTML file in an iframe, then that iframed\n # file could reuse style.css.\n if asset_type == 'puzzle' and variant == 'puzzle':\n return resource_path not in _PUZZLE_FILES_TO_SKIP\n return True\n\ndef _maybe_rewrite(name, content):\n # Rewrite JS references to chunks. esbuild is annoying and won't let me use\n # absolute paths to chunks. So when we use static serving, we need to rewrite\n # them ourselves.\n if name.endswith('.js'):\n return to_bytes(_REWRITE_CHUNKS_PATTERN.sub(_replace_js_chunk_refs, from_bytes(content)))\n return content\n\ndef _replace_js_chunk_refs(match):\n static_root = settings.STATIC_URL\n named_matches = match.groupdict()\n static_path = named_matches['static_path']\n return f'\"{urllib.parse.urljoin(static_root, static_path)}\"'\n","repo_name":"Palindrome-Puzzles/2022-hunt","sub_path":"hunt/deploy/management/commands/collectpuzzlefiles.py","file_name":"collectpuzzlefiles.py","file_ext":"py","file_size_in_byte":7509,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"81"} +{"seq_id":"13175581830","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n Name : c10_17_calendarSpread.py\n Book : Python for Finance (2nd ed.)\n Publisher: Packt Publishing Ltd. \n Author : Yuxing Yan\n Date : 6/6/2017\n email : yany@canisius.edu\n paulyxy@hotmail.com\n\"\"\"\n\nimport p4f\nimport numpy as np\nimport matplotlib.pyplot as plt\nsT = np.arange(20,70,5) \ns=40;x=40;T1=0.5;T2=1;sigma=0.3;r=0.05\npayoff=(abs(sT-x)+sT-x)/2 \ncall_01=p4f.bs_call(s,x,T1,r,sigma)\t# short \ncall_02=p4f.bs_call(s,x,T2,r,sigma)\t# long\n\nprofit_01=payoff-call_01 \ncall_03=p4f.bs_call(sT,x,(T2-T1),r,sigma) \ncalendar_spread=call_03-payoff+call_01 -call_02 \ny0=np.zeros(len(sT))\nplt.ylim(-20,20) \nplt.xlim(20,60) \nplt.plot(sT,call_03,'b-.')\nplt.plot(sT,call_02-call_01-payoff,'b-.') \nplt.plot(sT,calendar_spread,'r') \nplt.plot([x,x],[-20,-15])\nplt.title(\"Calendar spread with calls\") \nplt.xlabel('Stock price at maturity (sT)') \nplt.ylabel('Profit (loss)')\nplt.annotate('Buy a call with T1\tand sell a call with T2', xy=(25,16)) \nplt.annotate('where T1 int:\n product, maxp = 1, nums[0]\n #from left to right\n for num in nums:\n product *= num\n maxp = max(maxp, product)\n if num == 0:\n product = 1\n\n #from right to left\n product = 1\n for i in range(len(nums)-1, -1, -1):\n product *= nums[i]\n maxp = max(maxp, product)\n if nums[i] == 0:\n product = 1\n\n return maxp\n","repo_name":"yangmingxuan/pythonalgorithms","sub_path":"arrayandsort/MaximumProductSubarray.py","file_name":"MaximumProductSubarray.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19484199421","text":"\"\"\"\nYou are given an integer array nums. The unique elements of an array are the elements that appear exactly once in the array.\n\nReturn the sum of all the unique elements of nums.\n#leetcode\n\"\"\"\nclass Solution:\n def sumOfUnique(self, nums: List[int]) -> int:\n result=0\n leng=len(nums)\n for i in nums:\n if nums.count(i)==1:\n result=result+i\n return result\n \n\"\"\"\nInput: nums = [1,2,3,2]\nOutput: 4\nExplanation: The unique elements are [1,3], and the sum is 4.\n\"\"\"\n","repo_name":"varshachary/MyPracticeProblems","sub_path":"unique_sum.py","file_name":"unique_sum.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"35156058634","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Apr 19 16:05:53 2017\r\n\r\n@author: User\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport cv2\r\nfrom grabscreen import grab_screen\r\nimport time\r\nfrom getkeys import key_check\r\nimport os\r\n\r\n#define key to output to be a one-hot vector\r\ndef keys_to_output(keys):\r\n #[W,S,I,J,K,L,1]\r\n output = [0,0,0,0,0,0,0]\r\n \r\n if 'W' in keys:\r\n output[0] = 1\r\n elif 'S' in keys:\r\n output[1] = 1\r\n elif 'I' in keys:\r\n output[2] = 1\r\n elif 'J' in keys:\r\n output[3] = 1\r\n elif 'K' in keys:\r\n output[4] = 1\r\n elif 'L' in keys:\r\n output[5] = 1\r\n elif '1' in keys:\r\n output[6] = 1\r\n \r\n return output\r\n\r\n#save the key\r\nfile_name = 'training_data.npy'\r\n\r\nif os.path.isfile(file_name):\r\n print(\"File exists, loading data!\")\r\n training_data = list(np.load(file_name))\r\nelse:\r\n print(\"starting a new one\")\r\n training_data = []\r\n\r\ndef main(): \r\n for i in range(4):\r\n print(i+1)\r\n time.sleep(1)\r\n \r\n# last_time = time.time()\r\n paused = False\r\n \r\n while(True):\r\n \r\n if not paused:\r\n \r\n printscreen = grab_screen(region=(0,40,1260,750))\r\n printscreen = cv2.cvtColor(printscreen, cv2.COLOR_BGR2GRAY)\r\n printscreen = cv2.resize(printscreen,(300,179))\r\n \r\n keys = key_check()\r\n output = keys_to_output(keys)\r\n training_data.append([printscreen, output])\r\n # print('respond time: ', time.time()-last_time)\r\n # last_time = time.time()\r\n \r\n if len(training_data) % 500 == 0:\r\n print(len(training_data))\r\n if len(training_data) % 4500 ==0:\r\n np.save(file_name, training_data)\r\n print(\"Saved, please re\")\r\n \r\n if 'T' in keys:\r\n if paused:\r\n paused = False\r\n print('Start again')\r\n time.sleep(2)\r\n else:\r\n paused = True\r\n print('Paused')\r\n time.sleep(2)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"Jimsparkle/AI_Starwars_X-wing","sub_path":"collecting_data.py","file_name":"collecting_data.py","file_ext":"py","file_size_in_byte":2146,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"81"} +{"seq_id":"37620725575","text":"#Faça um Programa que leia um número inteiro menor que 1000 e imprima a quantidade de centenas,\n#dezenas e unidades do mesmo.\n#Observando os termos no plural a colocação do \"e\", da vírgula entre outros. Exemplo:\n#326 = 3 centenas, 2 dezenas e 6 unidades\n#12 = 1 dezena e 2 unidades Testar com: 326, 300, 100, 320, 310,305,\n#301, 101, 311, 111, 25, 20, 10, 21, 11, 1, 7 e 16\n\n\nnumero = int(input(\"Digite um numero inteiro menor que 1000\\n\"))\n\ncentena = numero//100\ndezena = (numero%100)//10\nunidade = (numero%100)%10\n\ndef centenas (a):\n if centena == 1:\n return \"centena\"\n else:\n return \"centenas\"\n\ndef dezenas (a):\n if dezena == 1:\n return \"dezena\"\n elif dezena > 1:\n return \"dezenas\"\n\n\ndef unidades (a):\n if unidade == 1:\n return \"unidade\"\n elif unidade > 1:\n return \"unidades\"\n\nnum1 = centenas(numero)\nnum2 = dezenas(numero)\nnum3 = unidades(numero)\n\nif (numero%100) == 0 and numero>=100:\n print (numero, \"=\", centena, num1)\nelif numero <10:\n print(numero, \"=\", unidade, num3)\nelif numero <100:\n if numero%10 == 0:\n print (numero, \"=\", dezena, num2)\n else:\n print (numero, \"=\", dezena, num2, \"e\", unidade, num3)\nelse:\n if (numero%100) >= 10:\n if (numero%100)%10 == 0:\n print (numero, \"=\", centena, num1, \"e \", dezena, num2)\n else:\n print (numero, \"=\", centena, num1, \",\" , dezena, num2, \"e\", unidade, num3 )\n else:\n print (numero, \"=\", centena, num1, \"e\", unidade, num3 )\n","repo_name":"samuelcm/estrutura_decisao","sub_path":"dezenas.py","file_name":"dezenas.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19865321396","text":"from sklearn.metrics import roc_curve, auc, roc_auc_score\nfrom keras.preprocessing.sequence import pad_sequences\nimport itertools\nimport matplotlib.pyplot as plt\nimport utils\nimport keras\nimport train\nfrom sklearn.model_selection import train_test_split\n\nFLAGS = train.FLAGS\n\ndataframe = utils.load_dataset_with_random_corruption(FLAGS.targets)\ndataframe = dataframe.sample(frac=1).reset_index(drop=True)\n\n# Data preprocessing\ndataframe[0] = utils.encode_instruction_to_id(dataframe[0])\ndataframe[1] = utils.encode_instruction_to_id(dataframe[1])\n\nX = dataframe[[0, 1]]\nY = dataframe[2].astype(int)\n\n_, X_validation, _, Y_validation = train_test_split(X, Y, test_size=0.33)\n\nX_validation = {\n \"left\": X_validation[0],\n \"right\": X_validation[1],\n}\n# Zero padding\nfor dataset, side in itertools.product([X_validation], [\"left\", \"right\"]):\n dataset[side] = pad_sequences(dataset[side], maxlen=FLAGS.max_seq_len)\n\nY_validation = Y_validation.values\n\n# Load a trained model\nexponent_neg_manhattan_distance = utils.exponent_neg_manhattan_distance\ninnereye_bb = keras.models.load_model(FLAGS.saved_weights)\n\n# Make predictions\npred = innereye_bb.predict([X_validation[\"left\"], X_validation[\"right\"]])\n\nfpr, tpr, _ = roc_curve(Y_validation, pred, pos_label=1)\nroc_auc = auc(fpr, tpr) * 100\n\ntry:\n print(f\"AUC: {roc_auc_score(Y_validation, pred)}\")\nexcept:\n pass\n\nplt.figure()\nplt.plot(\n fpr,\n tpr,\n color=\"red\",\n linewidth=1.2,\n label=\"Siamese Model (AUC = %0.2f%%)\" % roc_auc,\n)\n\nplt.plot([0, 1], [0, 1], color=\"silver\", linestyle=\":\", linewidth=1.2)\nplt.xlim([0.0, 1.0])\nplt.ylim([0.0, 1.05])\nplt.xlabel(\"False Positive Rate\", fontsize=12)\nplt.ylabel(\"True Positive Rate\", fontsize=12)\nplt.title(\"Receiver Operating Characteristic (ROC)\")\nplt.legend(loc=\"lower right\")\nplt.show()\n","repo_name":"posgnu/innereye","sub_path":"validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"18833735814","text":"#!/usr/bin/env python\n\n# Node that emulates communication functions of the drivetrain\n# Used for device testing purposes\n#\n# Written by Eric Miers, 1/31/2020\n\nimport rospy\nfrom main_node.msg import comp_stop, light_detected, move_dt, move_arm\n\nclass SDDT(object):\n\t\n\tdef __init__(self):\n\t\t# Initialize the node\n\t\trospy.init_node('software_defined_drivetrain_node')\n\n\t\t# Initiailize subscriber\n\t\trospy.Subscriber('comp_stop', comp_stop, self.compStopCallback)\n\t\trospy.Subscriber('light_detected', light_detected, self.lightDetectedCallback)\n\t\trospy.Subscriber('move_dt', move_dt, self. moveDtCallback)\n\t\t\n\t\t# Initialize Publisher\n\t\tself.moveArmPub = rospy.Publisher('move_arm', move_arm, queue_size=1)\n\n\t# Callback to shutdown the node when competition time expiration message is received\n\tdef compStopCallback(self, msg):\n\t\trospy.loginfo(rospy.get_caller_id() + \": Time has expired. Shutting down...\")\n\t\tif (msg.stop == 1):\n\t\t\texit()\n\n\t# Callback to publish the \"move_arm\" message when light detected message is received\n\tdef lightDetectedCallback(self, msg):\n\t\tif (msg.detected == 1):\n\t\t\trospy.loginfo(rospy.get_caller_id() + \": Light Detected!\")\n\t\t\trospy.sleep(1)\n\t\t\t\n\t\t\tself.moveArmPub.publish(move=1)\n\t\t\n\t# Callback to print \"move_dt\" message contents to terminal when message is received\n\tdef moveDtCallback(self, msg):\n\t\trospy.loginfo(rospy.get_caller_id() + \": Drivetrain instructed to go to digit at index %s\", msg.index)\n\t\t\n\t\t\n\tdef run(self):\n\t\t# spin() simply keeps python from exiting until this node is stopped\n\t\trospy.spin()\n\n\nif __name__ == '__main__':\n node = SDDT()\n node.run()\n","repo_name":"ejmiers/2020-CPEN498-Capstone","sub_path":"main_node/scripts/software_defined_drivetrain_node.py","file_name":"software_defined_drivetrain_node.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73227748106","text":"\"\"\"\nYou are given a doubly linked list which in addition to the next and previous pointers, it could have a child pointer, which may or may not point to a separate doubly linked list. These child lists may have one or more children of their own, and so on, to produce a multilevel data structure, as shown in the example below.\n\nFlatten the list so that all the nodes appear in a single-level, doubly linked list. You are given the head of the first level of the list.\n\nInput: head = [1,2,null,3]\nOutput: [1,3,2]\nExplanation:\n\nThe input multilevel linked list is as follows:\n\n 1---2---NULL\n |\n 3---NULL\n\"\"\"\n\n\"\"\"\n# Definition for a Node.\nclass Node:\n def __init__(self, val, prev, next, child):\n self.val = val\n self.prev = prev\n self.next = next\n self.child = child\n\"\"\"\nclass Solution:\n def flatten(self, head: 'Node') -> 'Node':\n \n if not head:\n return None\n \n new_node = Node(0, None, head, None)\n pre_order_dfs(new_node, head)\n \n new_node.next.prev = None\n return new_node.next\n \n \ndef pre_order_dfs(prev, curr):\n \n if not curr:\n return prev\n \n else:\n \n curr.prev = prev\n prev.next = curr\n \n temp = curr.next\n\n tail = pre_order_dfs(curr, curr.child)\n curr.child = None\n \n return pre_order_dfs(tail, temp)\n \n ","repo_name":"christian-miljkovic/interview","sub_path":"Leetcode/Algorithms/Medium/LinkedLists/FlattenMultiLevel.py","file_name":"FlattenMultiLevel.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30406180459","text":"def solution(s):\n answer = ''\n arr = list(s.split(\" \"))\n for arr_val in arr:\n for index, value in enumerate(arr_val):\n if index % 2 == 0:\n answer += value.upper()\n else:\n answer += value.lower()\n answer += \" \"\n return answer[:-1]","repo_name":"Manna-na/algorithm-study","sub_path":"프로그래머스/1/12930. 이상한 문자 만들기/이상한 문자 만들기.py","file_name":"이상한 문자 만들기.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34420518567","text":"# -*- coding: utf-8 -*-\nimport RPi.GPIO as GPIO\n#for the sleep method\nimport time\nled = 8\n#set numbering mode for the program \nGPIO.setmode(GPIO.BOARD)\n#setup led(pin 8) as output pin\nGPIO.setup(led, GPIO.OUT,initial=0)\n#turn on and off the led in intervals of 1 second\nwhile(True):\n \n #turn on, set as HIGH or 1\n GPIO.output(led,GPIO.HIGH)\n print(\"ON\")\n time.sleep(1)\n #turn off, set as LOW or 0\n GPIO.output(led, GPIO.LOW)\n print(\"OFF\")\n time.sleep(1)\n \n","repo_name":"iotgeeks/iot_raspberrypi","sub_path":"Python Examples/led_blink.py","file_name":"led_blink.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"14453950528","text":"\r\n# -*- coding: utf-8 -*- \r\n\r\nfrom PyQt4.QtGui import QFont, QPainter, QBrush, QPalette\r\nfrom PyQt4.QtGui import QColor, QDialog\r\nfrom PyQt4.QtGui import QHBoxLayout\r\nfrom PyQt4.QtGui import QVBoxLayout\r\nfrom PyQt4.QtGui import QLabel, QPixmap,QWidget\r\nfrom PyQt4.QtCore import Qt, SIGNAL, QEvent, QRect, QTranslator,\\\r\n QCoreApplication\r\n\r\nfrom tabwidget import TabWidget\r\nimport globalfunc\r\n \r\nclass AboutTab(QWidget):\r\n \r\n def __init__(self,parent = None):\r\n super(AboutTab,self).__init__(parent)\r\n self.setWindowFlags(Qt.Dialog | Qt.FramelessWindowHint)\r\n version, rDate = globalfunc.getVersionAndReleaseDate()\r\n textpalette = QPalette()\r\n textpalette.setColor(QPalette.WindowText, QColor(33,97,143))\r\n self.adminLabel = QLabel(self.tr(\"Mcos-student\"))\r\n self.adminLabel.setFixedWidth(200)\r\n self.adminLabel.setFont(QFont(\"simhei\",20))\r\n self.adminLabel.setPalette(textpalette)\r\n if version:\r\n self.banbenLabel = QLabel(self.tr(\"Version:\") + \"%s\" % version)\r\n self.banbenLabel.setPalette(textpalette)\r\n else:\r\n self.banbenLabel = QLabel()\r\n \r\n if rDate:\r\n self.releaseDate = QLabel(self.tr(\"Version Time:\") + \"%s\" % rDate)\r\n self.releaseDate.setPalette(textpalette)\r\n else:\r\n self.releaseDate = QLabel()\r\n textLayout = QVBoxLayout()\r\n textLayout.addWidget(self.adminLabel)\r\n textLayout.addWidget(self.banbenLabel)\r\n textLayout.addWidget(self.releaseDate)\r\n textLayout.setMargin(20)\r\n textLayout.setSpacing(20)\r\n \r\n \r\n self.logoLabel = QLabel()\r\n self.logoLabel.setPixmap(QPixmap(\"images/productlogo.png\"))\r\n \r\n mainLayout = QHBoxLayout()\r\n mainLayout.addWidget(self.logoLabel)\r\n mainLayout.addLayout(textLayout)\r\n mainLayout.setMargin(50)\r\n \r\n self.setLayout(mainLayout)\r\n \r\n def updateWindow(self):\r\n version, rDate = globalfunc.getVersionAndReleaseDate()\r\n self.adminLabel.setText(self.tr(\"Mcos-student\"))\r\n self.banbenLabel.setText(self.tr(\"Version:\") + \"%s\" % version)\r\n self.releaseDate.setText(self.tr(\"Version Time:\") + \"%s\" % rDate)\r\n \r\n \r\nclass AboutWidget(QWidget):\r\n \r\n def __init__(self,parent = None):\r\n super(AboutWidget,self).__init__(parent)\r\n self.setWindowFlags(Qt.FramelessWindowHint|Qt.WindowStaysOnTopHint)\r\n #self.setWindowModality(Qt.WindowModal)\r\n self.tabWidget = TabWidget(self)\r\n self.aboutTab = AboutTab()\r\n self.tabWidget.addTab(self.aboutTab,self.tr(\"About\"))\r\n self.tabWidget.setHasUnderLine(False)\r\n \r\n mainLayout = QVBoxLayout()\r\n mainLayout.setMargin(0)\r\n mainLayout.setSpacing(0)\r\n mainLayout.addWidget(self.tabWidget)\r\n self.setLayout(mainLayout)\r\n \r\n self.mousePressed = False\r\n \r\n self.tabWidget.closeBtn().installEventFilter(self)\r\n \r\n self.connect(self.tabWidget, SIGNAL(\"closeWidget\"),self.close)\r\n \r\n def updateWindow(self):\r\n# m_pTranslator = QTranslator()\r\n# exePath = \"/root/workspace/nwclient/\"\r\n# language1 = \"chinese\"\r\n# if language1 == \"chinese\":\r\n# QmName = \"zh_CN.qm\"\r\n# else:\r\n# QmName = \"en_US.qm\"\r\n# \r\n# if(m_pTranslator.load(QmName, exePath)):\r\n# QCoreApplication.instance().installTranslator(m_pTranslator)\r\n \r\n self.aboutTab.updateWindow()\r\n self.tabWidget.removeTab(0)\r\n self.tabWidget.addTab(self.aboutTab,self.tr(\"About\"))\r\n \r\n def eventFilter(self, target, event):\r\n if target == self.tabWidget.closeBtn():\r\n if event.type() == QEvent.Enter:\r\n self.tabWidget.closeBtn().setStyleSheet(\"background:rgb(255, 255, 255)\")\r\n self.tabWidget.closeBtn().setAutoFillBackground(True);\r\n elif event.type() == QEvent.Leave:\r\n self.tabWidget.closeBtn().setAutoFillBackground(False);\r\n return True\r\n \r\n return QWidget.eventFilter(self, target, event)\r\n \r\n # 添加鼠标移动窗口\r\n def mouseMoveEvent(self,event):\r\n if self.mousePressed:\r\n self.move(self.pos() + event.pos() - self.currentPos)\r\n \r\n def mousePressEvent(self,event):\r\n if event.buttons() == Qt.LeftButton:\r\n self.currentPos = event.pos()\r\n self.mousePressed = True\r\n \r\n def mouseReleaseEvent(self,event):\r\n if event.buttons() == Qt.LeftButton:\r\n self.mousePressed = False\r\n \r\n def paintEvent(self,event):\r\n painterBack = QPainter(self)\r\n backBrush = QBrush(QColor(244,250,250))\r\n painterBack.setBrush(backBrush)\r\n backRect = QRect(0,0,self.width(),self.height())\r\n painterBack.fillRect(backRect, backBrush)\r\n \n","repo_name":"siwenhu/test_client_broadcast","sub_path":"about/aboutwidget.py","file_name":"aboutwidget.py","file_ext":"py","file_size_in_byte":5015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5137888727","text":"import os\nfrom flask import Flask, render_template, request\nimport requests\n\nCREATE_FLIGHT = os.environ.get('SEARCH_FLIGHT_URL')\napp = Flask(__name__)\n\n@app.route('/')\ndef home():\n return render_template('index.html')\n\n@app.route('/flight', methods=['POST'])\ndef searchFlights():\n from_city = request.form[\"from_city\"]\n to_city = request.form[\"to_city\"]\n price = request.form[\"price\"]\n response = requests.post(CREATE_FLIGHT,data={\"from_city\": from_city, \"to_city\": to_city, \"price\":price})\n flight_data = response.json()\n return render_template(\"details.html\",flight_data=flight_data)\n\nif __name__ == '__main__':\n app.run(debug=True, port=5000, host='0.0.0.0')","repo_name":"Abotabraham/cloud-native-kata-project","sub_path":"python-frontend/search-flights/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12582033246","text":"from django.conf.urls.defaults import patterns\n\nfrom twod.wsgi import make_wsgi_view\n\nfrom tests import MockApp\nfrom tests.fixtures.sampledjango import mock_view\n\napp = make_wsgi_view(MockApp(\"206 One step at a time\",\n [(\"X-SALUTATION\", \"Hey\")]))\n\nok_app = make_wsgi_view(MockApp(\"200 OK\", [(\"X-SALUTATION\", \"Hey\")]))\n\nurlpatterns = patterns('',\n (r'^blog', mock_view),\n (r'^admin', mock_view),\n (r'^secret', mock_view),\n (r\"wsgi-view-ok(/.*)?\", ok_app),\n (r\"wsgi-view(/.*)?\", app),\n )\n","repo_name":"orientalperil/twod.wsgi","sub_path":"tests/fixtures/sampledjango/app1/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"2949415106","text":"# https://en.wikipedia.org/wiki/Radix_tree\n\nclass Node():\n def __init__(self):\n self.edges = []\n\n def is_leaf(self):\n return not self.edges\n\n def info(self, i = 0):\n for edge in self.edges:\n print('{} {}'.format(' ' * i, edge))\n if edge is not None and edge.node is not None:\n edge.node.info(i + 1)\n\nclass Edge():\n def __init__(self, label):\n self.label = label\n self.score = 1\n self.node = None\n\n def __str__(self):\n return '{}:{}'.format(self.label, self.score)\n\ndef insert(root = None, word = ''):\n '''\n Insert adds a word/prefix into the tree by iterating and adding the prefix\n to existing ones before traversing down the tree to extend the prefix.\n '''\n if not root and not word: return\n\n # There are no edges. Create new.\n if root.is_leaf():\n # Conventional to return None for mutating items in python.\n return root.edges.append(Edge(word))\n\n next_edge = [(i, edge) for i, edge in enumerate(root.edges)\n if similar_prefix(word, edge.label)]\n\n if not next_edge:\n return root.edges.append(Edge(word))\n\n i, next_edge = next_edge[0]\n k = similar_prefix(word, next_edge.label)\n\n if not k:\n # There are no matching prefix, which means that the word does not\n # exist yet.\n return root.edges.append(Edge(word))\n if k == len(word):\n # The len of both words matches. Increment the score as the same\n # pattern has been found.\n next_edge.score += 1\n return\n if k == len(next_edge.label):\n # The label is a prefix of the word. E.g, `car` is a prefix of `cars`.\n # Insert the difference, which is `s` in the example above.\n next_edge.score += 1\n if next_edge.node is None:\n next_edge.node = Node()\n insert(next_edge.node, word[k:])\n return\n\n # We need to split the prefix now since the length of k does not match any\n # of them. A possible example is `john` and `jojo`, where k will be equal\n # to 3, matching the prefix `jo` (john[:k] or jojo[:k]). In this scenario,\n # we will add the new prefix `jo` and split the edges into `hn` and `jo`.\n\n # Remove the old edge.\n edge = root.edges.pop(i)\n\n # Tmp variable to store the original label.\n label = edge.label\n\n # Update the existing label.\n edge.label = label[k:]\n\n # The new split prefix.\n new_edge = Edge(label[:k])\n new_edge.node = Node()\n new_edge.score += edge.score\n new_edge.node.edges.append(edge)\n\n insert(new_edge.node, word[k:])\n root.edges.append(new_edge)\n\ndef similar_prefix(a, b):\n '''\n Checks if string a and b has similar prefixes by comparing each characters.\n If they do not have similar prefix, it should return index 0, indicating\n that a[:index] or b[:index] is empty string. If they are perfect match, it\n would return the exact length.\n '''\n values = zip(a, b)\n for i, (left, right) in enumerate(values):\n if left != right:\n return i\n return min(len(a), len(b)) \n\ndef lookup(root, word='', result = None):\n '''\n Lookup will look for the words matching the prefix and return the\n suggestions.\n '''\n if result is None:\n result = set()\n\n i = 0\n traverse_node = root\n while traverse_node is not None and not traverse_node.is_leaf() and i < len(word):\n next_edge = [edge for edge in traverse_node.edges\n if word[i:].startswith(edge.label)]\n if not next_edge:\n traverse_node = None\n break\n edge = next_edge[0]\n traverse_node = edge.node\n i += len(edge.label)\n\n if traverse_node is None:\n return result\n\n if i > len(word):\n return set()\n\n out = [word[:i] + edge.label for edge in traverse_node.edges]\n for o in out:\n result.add(o)\n lookup(root, o, result)\n return result\n\ndef debug(root, word):\n insert(root, word)\n root.info()\n print('')\n\nroot = Node()\ninsert(root, 'a')\ninsert(root, 'b')\ninsert(root, 'bc')\ninsert(root, 'cb')\ninsert(root, 'ab')\ninsert(root, 'ac')\ninsert(root, 'abc')\ninsert(root, 'a')\ninsert(root, 'b')\ninsert(root, 'john')\ninsert(root, 'johns')\ninsert(root, 'jojo')\ninsert(root, 'johny')\ninsert(root, 'john doe')\ninsert(root, 'jess')\ninsert(root, 'je')\ninsert(root, 'jessie')\ndebug(root, 'jessica')\n\nprint(lookup(root, 'jo'))\nprint(lookup(root, 'a'))\nprint(lookup(root, 'je'))\n","repo_name":"alextanhongpin/data-structures-and-algorithms","sub_path":"trie/radix.py","file_name":"radix.py","file_ext":"py","file_size_in_byte":4500,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"41371335912","text":"import requests\nfrom xhtml2pdf import pisa\nimport json\nimport requests\nfrom bs4 import BeautifulSoup as bs\nfrom datetime import datetime\nimport urllib\nfrom selenium import webdriver\nimport os\nfrom selenium.webdriver.common.keys import Keys\nimport time\n\noptions = webdriver.ChromeOptions()\noptions.add_argument(\"--headless\")\n\ndriver = webdriver.Chrome(chrome_options=options)\ndriver.maximize_window()\n\naulatica_session = requests.session()\n\n\nclass Downloader:\n\n def index(self):\n headers_for_login = {\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36',\n 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',\n }\n login_page = bs(aulatica_session.get('https://aulatica.eadbox.com/login', headers=headers_for_login).content, 'html.parser') \n authenticity_token = login_page.find('meta', {'name': 'csrf-token'})['content']\n #print(authenticity_token)\n\n data = {\n 'utf8': '✓',\n 'authenticity_token': authenticity_token,\n 'user[email]': 'santosfelipe298@gmail.com',\n 'user[password]': 'upaparamim',\n 'commit': 'Login',\n 'user[remember_me]': 0\n }\n\n aulatica_session.post('https://aulatica.eadbox.com/login', headers=headers_for_login, data=data)\n self.headers = {\n 'authority': 'aulatica.eadbox.com',\n 'accept': 'application/json, text/plain, */*',\n 'x-newrelic-id': 'VgIGWV9XDhADUFBRBAgEUVc=',\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.106 Safari/537.36',\n 'sec-fetch-site': 'same-origin',\n 'sec-fetch-mode': 'cors',\n 'sec-fetch-dest': 'empty',\n 'referer': 'https://aulatica.eadbox.com/ng/student/courses/?page=1',\n 'accept-language': 'pt-BR,pt;q=0.9,en-US;q=0.8,en;q=0.7',\n 'cookie': '__cfduid=d0faa7b7aff4162bb41d5360d104b71ab1592441134; ajs_group_id=null; ajs_anonymous_id=^%^228f9b535e-3be2-4592-8913-642b06141f33^%^22; ajs_user_id=^%^225ee8f795c04a12001988270f^%^22; intercom-id-dlobwdcf=a0a28ee5-9e9c-4b28-be89-7c10aad77c9b; intercom-session-dlobwdcf=; _53542=http://10.60.27.12:8080; _session_id=ce7afabad8e34f778fb25223bb701a49',\n 'if-none-match': 'W/^\\\\^eb4a6b2229a7fe0145826bbc69917e04^\\\\^',\n 'if-modified-since': 'Thu, 18 Jun 2020 17:25:59 GMT',\n }\n self.selenium_part()\n self.get_infos()\n\n def selenium_part(self):\n\n driver.get('https://aulatica.eadbox.com/login')\n driver.find_element_by_id(\"user_email\").send_keys('santosfelipe298@gmail.com')\n driver.find_element_by_id(\"user_password\").send_keys('reliquiazero')\n driver.find_element_by_id(\"user_password\").send_keys(Keys.RETURN)\n \n def get_infos(self):\n \n self.subscription_id = json.loads(aulatica_session.get('https://aulatica.eadbox.com/ng/api/saas.json', headers=self.headers).content)['_id']\n subscriptions = []\n get_pages = json.loads(aulatica_session.get('https://aulatica.eadbox.com/ng/api/student/subscriptions.json', headers=self.headers).content)['links']['total_pages']\n #print(get_pages)\n for pages in range(1, get_pages+1):\n #print(pages)\n get_page = json.loads(aulatica_session.get(f'https://aulatica.eadbox.com/ng/api/student/subscriptions.json?page={pages}', headers=self.headers).content)['subscriptions']\n for item in get_page:\n subscriptions.append(item)\n self.get_course(subscriptions)\n print(f'\\n\\n\\nForam baixados {self.videos_done} videos')\n\n def get_course(self, subscriptions):\n #print(self.subscription_id)\n #Trabalhando com a lista recebida\n print(f'Aulática | {datetime.now().strftime(\"%H:%M:%S\")}')\n self.videos_done = 0\n videos_error = 0\n for curso in subscriptions:\n category_name = self.replacer(str(curso['category_name']).strip())\n course_title = self.replacer(str(curso[\"course\"][\"title\"]).strip())\n print(f'\\t{category_name}')\n print(f'\\t\\t{course_title}')\n\n course_slug = curso[\"course\"][\"course_slug\"]\n \n self.course_id = json.loads(aulatica_session.get(f'https://aulatica.eadbox.com/ng/api/student/courses/{course_slug}.json', headers=self.headers).content)['course_id']\n\n self.lectures = json.loads(aulatica_session.get(f'https://aulatica.eadbox.com/ng/api/student/courses/{course_slug}/subscription.json', headers=self.headers).content)['lectures']\n\n for lecture in self.lectures:\n lecture_title = self.replacer(str(lecture['title']).strip())\n print(f'\\t\\t\\t{lecture_title}')\n contents = lecture['contents']\n lecture_slug = lecture['lecture_slug']\n for count, content in enumerate(contents):\n count += 1\n self.content_id = content['content_id']\n content_title = self.replacer(str(content['title']).strip())\n path = f'Aulática/{category_name}/{course_title}/{lecture_title}'\n if os.path.exists(path) is False:\n os.makedirs(path)\n \n if content['type'] == 'coconut_video' or content['type'] == 's3_video':\n video_link = f'https://media.eadbox.com/saas_uploads/{self.subscription_id}/{self.course_id}#/{self.content_id}/contents/coconut_video/fhd_mp4_file/fhd_mp4_video.mp4'\n filename = f'{path}/{count} - {content_title}.mp4'\n video_down = aulatica_session.get(video_link, headers=self.headers)\n if os.path.exists(filename) is False:\n if video_down.status_code == 200:\n os.popen(f'aria2c -o \"{filename}\" {video_link} -q')\n print(f'\\t\\t\\t\\t{content_title} - Video - Requests!')\n else:\n driver.get(f'https://aulatica.eadbox.com/ng//student/courses/{course_slug}/lectures/{lecture_slug}/contents/{self.content_id}')\n time.sleep(10)\n try:\n time.sleep(5)\n video_link = driver.find_element_by_id('vjs_video_3_html5_api')#.get_attribute(\"src\")\n video_link = video_link.find_elements_by_tag_name('source')[-1]\n except:\n time.sleep(5)\n try:\n video_link = driver.find_element_by_class_name('vjs-tech')#.get_attribute(\"src\")\n video_link = video_link.find_elements_by_tag_name('source')[-1]\n except:\n print(f'\\t\\t\\t\\t{content_title} - VIDEO COM ERRO')\n continue\n \n video_download = video_link.get_attribute(\"src\")\n #urllib.request.urlretrieve(video_download, filename=filename)\n os.popen(f'aria2c -o \"{filename}\" {video_download} -q')\n print(f'\\t\\t\\t\\t{content_title} - Video - Selenium!')\n self.videos_done += 1\n \n if content['type'] == 'article':\n print(f'\\t\\t\\t\\t{count} - {content_title} - Artigo')\n article = str(json.loads(aulatica_session.get(f'https://aulatica.eadbox.com/ng/api/student/courses/{course_slug}/subscription/lectures/{lecture_slug}/contents/{self.content_id}.json', headers=self.headers).content)['article'])\n filename = f'{path}/{count} - {content_title}.pdf'\n filename_html = f'{path}/{count} - {content_title}.html'\n if os.path.exists(filename) is False:\n try:\n self.convert_html_to_pdf(article, filename)\n except:\n with open(filename_html, 'w') as output:\n output.write(article)\n \n if content['type'] == 'new_box_view_document':\n print(f'\\t\\t\\t\\t{count} - {content_title} - PDF')\n filename = f'{path}/{count} - {content_title}.pdf'\n if os.path.exists(filename) is False:\n try:\n pdf_file = json.loads(aulatica_session.get(f'https://aulatica.eadbox.com/ng/api/student/courses/{course_slug}/subscription/lectures/{lecture_slug}/contents/{self.content_id}.json', headers=self.headers).content)['pdf_download_url']\n except:\n pdf_file = json.loads(aulatica_session.get(f'https://aulatica.eadbox.com/ng/api/student/courses/{course_slug}/subscription/lectures/{lecture_slug}/contents/{self.content_id}.json', headers=self.headers).content)['file'] \n urllib.request.urlretrieve(pdf_file, filename=filename)\n \n\n\n #['', 'article', 'new_box_view_document', 'coconut_video', 's3_video']\n\n def convert_html_to_pdf(self, source_html, output_filename):\n\n result_file = open(output_filename, \"w+b\")\n\n pisa_status = pisa.CreatePDF(source_html, dest=result_file)\n result_file.close() \n\n\n\n def replacer(self, text):\n invalid = {r'\"': r\"'\", '\\\\': \" - \", '/': \"-\", '|': \" - \", '<': \"«\", '>': \"»\", '*': \"x\", ':': ' -', '?': \"¿\", '\\n': ' - ', '\\t': ' - '}\n for char in invalid:\n if char in text:\n text = text.replace(char, invalid[char])\n return text \n\n\"\"\"start_time = datetime.now().strftime(\"%H:%M:%S\")\n#start = Downloader()\nos.system('cls')\n#start.index()\n\nprint(f'Aulática\\nInicio: {start_time}\\nFim: {datetime.now().strftime(\"%H:%M:%S\")}')\"\"\"","repo_name":"FranciscoAlveJr/Bot_Telegram","sub_path":"Cursos/Aulatica.py","file_name":"Aulatica.py","file_ext":"py","file_size_in_byte":10464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72171363144","text":"from selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import NoSuchElementException\n\nimport mysql.connector\nfrom time import sleep\nimport logging\n\n# Logging Configaration\nlogging.basicConfig(filename='bot_file.txt', filemode='w',level=logging.INFO, format= '%(asctime)s - %(levelname)s - %(message)s - %(lineno)d')\n\n# Building Database connection\ndef connection_database(host,user,password,database):\n try:\n logging.info('Building Database Connection')\n conn = mysql.connector.connect(\n host= host,\n user= user, \n password= password, \n database= database \n )\n except Exception as e:\n logging.critical(e)\n else:\n return conn\n\n# Function for creating Table, if table exist first delete it and then creat.\ndef Table_creation(connection):\n conn = connection\n cursor = conn.cursor()\n table_name = \"Laptop_Products\"\n\n# MySQL query that give result if table exist\n table_exists_query = \"\"\"\n SELECT COUNT(*)\n FROM information_schema.tables\n WHERE table_schema = '{database}'\n AND table_name = '{table}'\n \"\"\".format(database= 'daraz_laptops_products', table=table_name)\n\n cursor.execute(table_exists_query)\n table_exists = cursor.fetchone()[0]\n\n# Checking Table exist or not\n if table_exists:\n # Table exists, so delete it\n delete_table_query = \"DROP TABLE {table}\".format(table=table_name)\n cursor.execute(delete_table_query)\n logging.info(\"Table deleted successfully.\")\n else:\n logging.info(\"Table does not exist.\")\n\n# MySQL query for creating table\n create_table_query = \"\"\"\n CREATE TABLE {table} (\n id INT AUTO_INCREMENT PRIMARY KEY,\n Product_title VARCHAR(500),\n Product_price VARCHAR(200),\n No_of_Reviews INT,\n Product_Rating FLOAT,\n Product_description VARCHAR(10000)\n )\n \"\"\".format(table=table_name)\n\n # Execute the table creation query\n try:\n cursor.execute(create_table_query)\n except Exception as e:\n logging.error(e)\n else:\n logging.info(\"Table created successfully.\")\n\ndef webdriver_connection():\n try:\n service = Service(executable_path = \"/chromedriver\")\n driver = webdriver.Chrome(service=service)\n except Exception as e:\n logging.critical(e)\n else:\n driver.maximize_window()\n return driver\n\ndef open_page(page,driver):\n website = \"https://www.daraz.pk/laptops/?page=\"+str(page)+\"&style=list\"\n try:\n driver.set_page_load_timeout(100)\n driver.implicitly_wait(50)\n driver.get(website)\n except Exception as e:\n logging.error(e)\n\ndef find_element_extract_text(driver,el_xpath):\n try:\n description = WebDriverWait(driver, 20).until(\n EC.presence_of_element_located((By.XPATH,el_xpath))\n )\n except NoSuchElementException as e:\n logging.error(e)\n return ''\n else:\n return description.text\n \n# Scraping Daraz Laptop category Products\ndef Scraping(start_page,page_increment,max_retrieves,connection):\n\n count = 1\n page = start_page\n pageincrement = page_increment\n maxretrieves = max_retrieves\n\n# Connecting with Chrome browser and opening daraz laptops category page\n\n driver = webdriver_connection()\n open_page(page,driver)\n\n print(\"Scraping page \",page)\n while True:\n \n try:\n if pageincrement*page > maxretrieves:\n break\n if count>pageincrement:\n count = 1\n page +=1\n\n print(\"Scraping page \",page)\n open_page(page,driver)\n\n # Scraping products titles\n try:\n driver.implicitly_wait(20)\n xpathTitle = '//*[@id=\"root\"]/div/div[2]/div[1]/div/div[1]/div[2]/div['+str(count)+']/div/div[2]/div[2]/a'\n title = driver.find_element(by='xpath',value=xpathTitle)\n except NoSuchElementException as e:\n logging.error(e)\n title_text = ''\n else:\n title_text = title.text\n title.click()\n\n # Scraping products discription\n xpathdescription = '//*[@id=\"module_product_detail\"]/div/div[1]/div[1]/ul'\n description = find_element_extract_text(driver,xpathdescription)\n\n # Scraping products price\n xpathprice = '//*[@id=\"module_product_price_1\"]/div/div/span'\n price = find_element_extract_text(driver,xpathprice)\n\n driver.execute_script(\"window.scrollBy(0,2000)\",\"\")\n\n # Scraping products Rating\n xpathRating = '//*[@id=\"module_product_review\"]/div/div/div[1]/div[2]/div/div/div[1]/div[1]'\n Rating =find_element_extract_text(driver,xpathRating).split('/')[0]\n \n # Scraping products Reviews\n xpathTotal_reviews = '//*[@id=\"module_product_review\"]/div/div/div[1]/div[2]/div/div/div[1]/div[3]'\n Total_reviews = find_element_extract_text(driver,xpathTotal_reviews).split('R')[0]\n\n # Inserting Data to Database\n conn = connection\n cursor = conn.cursor()\n insert_query = f\"INSERT INTO Laptop_Products (Product_title, Product_price, No_of_Reviews, Product_Rating, Product_description) VALUES (%s,%s,%s,%s,%s)\"\n data = (title_text, price, int(Total_reviews), float(Rating), description)\n cursor.execute(insert_query,data)\n conn.commit()\n\n open_page(page,driver)\n \n count+=1\n\n except Exception as e:\n logging.error(e)\n count+=1\n\n if pageincrement*page >maxretrieves:\n break\n if count>pageincrement:\n count = 1\n page +=1\n \n open_page(page,driver)\n\n # driver.close()\n # driver.quit()\n \n\n\nif __name__ == '__main__':\n\n# Getting MySQL database connection requirements\n\n print(\"\\nGive following information to build MySQL connection.\")\n Host = input(\"Enter your host.\\n\")\n User = input(\"Enter your MySQL username.\\n\")\n Password = input(\"Enter your MySQL password.\\n\")\n Database = input(\"Enter your MySQL database name.\\n\")\n\n conn = connection_database(Host,User,Password,Database)\n Table_creation(conn)\n\n print(\"\\nEnter basic Scraping details: \\n\")\n\n startpage = int(input(\"Enter the page number from which you want to start scraping.\\n\"))\n pageincrement = int(input(\"Enter how many products you want to scrap per page.\\n(Note: the number should be <= to total number of products per page)\\n\"))\n maxretrieve = int(input(\"Enter how many max number of products you want to scrap.\\n\"))\n\n Scraping(1,pageincrement,maxretrieve,conn)\n","repo_name":"mTalha-1/Selenium_Bot_web_scraping","sub_path":"selenium_bot1.py","file_name":"selenium_bot1.py","file_ext":"py","file_size_in_byte":7053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19643008952","text":"import pymc\nfrom pymc import raftery_lewis\nfrom utils import _get_filename, _fill_parameters, _get_chain\nimport numpy\n\nsystems=['8','43','36','109','70','47','86','88','93','123','95','106','79','84','25','12','50','28','13']\n\nfor system in systems:\n print('\\n\\n System=',system)\n CHAIN=numpy.zeros(1)\n for c in ['ganymede','stampede']:\n for i in range(1,6):\n chain_filename=_get_filename(system,c,i)\n filled_name=_fill_parameters(chain_filename)\n chain=_get_chain('logQ',filled_name)\n CHAIN=numpy.concatenate((CHAIN,chain),axis=None)\n\n raftery_lewis(CHAIN,q=0.90, r=0.1)\n","repo_name":"ruskin23/QstarFromTidalSynchronization","sub_path":"MCMC/version1_metropolis_hasting/SAVED_CHAINS/convergence_tests/rafterY_lewis.py","file_name":"rafterY_lewis.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"24393287937","text":"import collections\r\n\r\nA = int(input())\r\nB = int(input())\r\nC = int(input())\r\n\r\nmlt_string = str(A * B * C)\r\nnums_cnt = collections.Counter(mlt_string)\r\n\r\n\r\nfor n in range(10):\r\n if str(n) in nums_cnt:\r\n print(nums_cnt[str(n)])\r\n else:\r\n print(0)","repo_name":"andtomorrow/algorithm","sub_path":"백준/Bronze/2577. 숫자의 개수/숫자의 개수.py","file_name":"숫자의 개수.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28244684758","text":"from typing import List, Any, Union\n\nfrom pydantic import BaseModel\nfrom pydantic.error_wrappers import ValidationError\n\nfrom pycamel.src.errors.ValidationErrors import (\n AbsentValidationItems, IncorrectValidationPath\n)\n\n\nclass Validator:\n \"\"\"\n Class responses for validation received object according to pydantic\n schema. After success validation gives possibility to get parsed\n instances of pydantic model with all data.\n \"\"\"\n def __init__(\n self,\n schema,\n response_data: dict,\n validation_key: str = None\n ) -> None:\n \"\"\"\n Constructor of validator.\n :param schema: Pydantic schema. BaseModel that will be applied\n to received data from BE for validation.\n :param response_data: Data received from BE\n :param validation_key: Validation key for getting concreate value from\n response data. Same parameter has CamelConfig class. Natively\n receiving this parameter in constructor has highly priority than\n parameter filled in the CamelConfig.\n For example: if you have dict like that:\n {\"data\": {\"some\": \"data\"}}, you can fill validation key with\n \"data\" value, so as a result, pydantic schema will be applied to\n the part of dict {\"some\": \"data\"}.\n \"\"\"\n self.schema = schema\n self.response_data = response_data\n self.validation_key = validation_key\n\n def _iterator(\n self,\n searching_key: str,\n data_to_search: dict = None\n ) -> [None, dict, list]:\n \"\"\"\n Recursive method that try to detect part of object that should be\n validated according to received key. In case when key is absent,\n returns None.\n :param searching_key: string that equal to searching key in dict\n :param data_to_search: dict with data\n :return: return data according to searching key. If key is absent,\n returns None.\n \"\"\"\n if data_to_search is None:\n data_to_search = self.response_data\n if isinstance(data_to_search, dict):\n for key in data_to_search:\n if key == searching_key:\n return data_to_search.get(key)\n elif isinstance(data_to_search.get(key), dict):\n self._iterator(searching_key, data_to_search.get(key))\n return None\n\n def _data_searcher(self) -> Any:\n \"\"\"\n According to set keys, try to get data from response data.\n If search key has been populated with more than 2 values, we try to\n get it without searching around of all data, if not, try to find needed\n data in dict by key.\n :return: Data for validation.\n \"\"\"\n path = self.validation_key.split(':')\n if len(path) > 1:\n result = self.response_data\n for path_item in path:\n try:\n result = result.get(path_item)\n except AttributeError as exception:\n raise IncorrectValidationPath(\n f\"Check path to validation item. \"\n f\"Current it isn't correct: {path}\"\n ) from exception\n else:\n result = self._iterator(*path)\n return result\n\n def _validate(self, data_to_validate: Union[dict, list]) -> List[BaseModel]:\n \"\"\"\n Method applies pydantic schema for data_to_validate object. In case\n when it is an array, method will apply schema to each array item\n in loop. If data_to_validate will be equal to one of [], {}, None value\n it will raise AbsentValidationItems exception.\n\n :param data_to_validate: It could be dict or list.\n :return: list of instances of pydantic class BaseModel\n \"\"\"\n result = []\n if data_to_validate not in ([], {}, None):\n if isinstance(data_to_validate, list):\n for item in data_to_validate:\n result.append(self.schema.parse_obj(item))\n elif isinstance(data_to_validate, dict):\n result.append(self.schema.parse_obj(data_to_validate))\n else:\n raise AbsentValidationItems(\n 'Nothing has been passed for validation.'\n 'Validation data should not be equal to None, {} or []'\n )\n return result\n\n def fetch(self) -> List[BaseModel]:\n \"\"\"\n Method that applies validation.\n :return: List of instances of class BaseModel\n \"\"\"\n if self.validation_key is not None:\n data_to_validate = self._data_searcher()\n else:\n data_to_validate = self.response_data\n try:\n initiated_objects = self._validate(data_to_validate)\n return initiated_objects\n except ValidationError as exception:\n raise AssertionError(\n f\"\\n\\nException: {exception}\"\n f\"\\nData passed to validator: {data_to_validate}\"\n f\"\\nValidation schema: {self.schema.schema_json()}\"\n ) from exception\n","repo_name":"canyoupleasecreateanaccount/pycamel","sub_path":"pycamel/src/modules/core/validator.py","file_name":"validator.py","file_ext":"py","file_size_in_byte":5162,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"20735950572","text":"from models.players import Player, PlayersId\nfrom models.tournament import Tournaments, Tournament\n\n\nclass ControllerMenu:\n \"\"\"\n Menu for report and manage players\n \"\"\"\n\n load = 0\n\n def __init__(self, view):\n \"\"\"\n :param view: views.base.py\n \"\"\"\n self.view = view\n\n def case_1(self): # 1) Enregistrer un nouveau joueur\n # demander à la view infos\n attribut_player = self.view.menu_manage_club_case_1()\n player = Player(*attribut_player)\n # enregistrer dans la base\n player_id = Player.create(player)\n PlayersId.players_IDs.append(player_id)\n return\n\n def case_2(self): # 2) Listes :\n choice = self.view.menu_manage_club_case_2()\n if choice == 0:\n return\n\n # ●\t1) Liste de tous les acteurs :\n # ○\tpar ordre alphabétique ;\n # ○\tpar classement.\n elif choice == 1:\n sort = self.view.menu_manage_club_case_2_a()\n text = PlayersId.print_list_club(sort)\n self.view.menu_manage_club_case_2_print(text)\n return\n\n # ●\t2) Liste de tous les joueurs d'un tournoi :\n # ○\tpar ordre alphabétique ;\n # ○\tpar classement.\n elif choice == 2:\n # choix du tournoi\n Tournaments.load_all()\n name_tournament = self.view.menu_manage_club_case_2_2_choice(\n Tournaments.list_tournament)\n if name_tournament == 0:\n return\n tournament = Tournament.load(name_tournament)\n # choix du classement\n sort = self.view.menu_manage_club_case_2_a()\n # Affichage\n text = \"\"\n if sort == 1:\n text = PlayersId.print_list_player_sort_abc(tournament['players'])\n elif sort == 2:\n text = PlayersId.print_list_player_sort_rank(tournament['players'])\n self.view.menu_manage_club_case_2_print(text)\n return\n\n # ●\t3) Liste de tous les tournois.\n elif choice == 3:\n Tournaments.load_all()\n text = Tournaments.print_all()\n self.view.menu_manage_club_case_2_print(text)\n\n # ●\t4) Liste de tous les rounds d'un tournoi.\n elif choice == 4:\n Tournaments.load_all()\n name_tournament = self.view.menu_manage_club_case_2_2_choice(\n Tournaments.list_tournament)\n if name_tournament == 0:\n return\n tournament: Tournament = Tournament.load(name_tournament)\n text = \"\"\n for ronde in tournament['rounds']:\n text += f\"{ronde}\\n\"\n self.view.menu_manage_club_case_2_print(text)\n return\n\n # ●\t5) Liste de tous les matchs d'un tournoi.\n elif choice == 5:\n Tournaments.load_all()\n name_tournament = self.view.menu_manage_club_case_2_2_choice(\n Tournaments.list_tournament)\n if name_tournament == 0:\n return\n tournament: Tournament = Tournament.load(name_tournament)\n for ronde in tournament['rounds']:\n text = f\"\\n{ronde['name']}\"\n self.view.menu_manage_club_case_2_print(text)\n for match in ronde['list_matches']:\n\n try:\n ([player1_id, player1_score], [player2_id, player2_score]) = \\\n match['result_match']\n except ValueError:\n [player1, player2] = match.match_players_ids_to_players()\n text = f\"Match {player1['family_name']} {player1['first_name']} \" \\\n f\"contre {player2['family_name']} {player2['first_name']} :\\n\" \\\n f\"match non joué\\n\"\n else:\n player1 = PlayersId.id_to_dict(player1_id)\n player2 = PlayersId.id_to_dict(player2_id)\n if player1_score > player2_score:\n winner = f\"{player1['family_name']} {player1['first_name']}\"\n elif player1_score < player2_score:\n winner = f\"{player1['family_name']} {player1['first_name']}\"\n else:\n winner = \"match nul\"\n text = f\"Match {player1['family_name']} {player1['first_name']} \" \\\n f\"contre {player2['family_name']} {player2['first_name']}\\n\" \\\n f\"Le gagnant est : {winner}\\n\"\n\n self.view.menu_manage_club_case_2_print(text)\n return\n\n def case_3(self): # 3) Modifier un joueur\n # afficher la list du club afin de choisir le joueur\n text = PlayersId.print_list_club()\n nb_player = len(PlayersId.players_IDs)\n player_id = self.view.menu_manage_club_case_3_1(text, nb_player)\n # modification des valeurs par la vue\n player = PlayersId.id_to_dict(player_id)\n player_modify = self.view.menu_manage_club_case_3_2(player)\n # enregistrement du joueur et dire ok\n text2 = \"\"\n try:\n ok = Player.modify(player_modify, player_id)\n if ok == \"ok\":\n text2 = f\"Joueur {player_modify['family_name']}\" \\\n f\" {player_modify['first_name']} est bien modifié\"\n except ValueError:\n text2 = f\"Joueur {player_modify['family_name']} \" \\\n f\"{player_modify['first_name']} n'a pas été enregistré, veuillez avertir \" \\\n \"l'administrateur\"\n finally:\n self.view.menu_manage_club_case_3_3(text2)\n return\n\n def case_4(self): # 4) Enregistrer le tournoi\n tournaments = Tournaments.tournaments_actif\n tournament = self.view.menu_manage_club_case_4_choice(tournaments)\n if isinstance(tournament, Tournament):\n tournament.save()\n self.view.menu_manage_club_case_4_done(tournament['name'])\n else:\n raise ValueError(\"Erreur Tournament not save \")\n return\n\n def case_5(self): # 5) Charger un tournoi\n self.load = 1\n return 'load'\n\n @staticmethod\n def case_6(): # 6) Quitter\n exit()\n\n @staticmethod\n def case_7(): # 7) Continuer\n return 'quit'\n\n def choice(self, cases):\n default = \"Mauvais choix\"\n switch = 'case_' + str(cases)\n return getattr(self, switch, lambda: default)()\n\n def run(self):\n menu = \"\"\n while menu != 'quit':\n choice = self.view.menu_manage_club()\n menu = self.choice(choice)\n if menu == 'load':\n return 'load'\n return\n","repo_name":"Nathom78/Developpez_un_programme_logiciel_en_Python-Club_d_echec","sub_path":"controllers/club_manage.py","file_name":"club_manage.py","file_ext":"py","file_size_in_byte":6821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28905910881","text":"from tabulate import tabulate\nfrom modules.load_users import load_projects\n\ndef view_projects():\n projects_db = load_projects()\n if len(projects_db) == 0:\n print(\"Empty Projects DB, please insert some data\")\n else:\n proj_tab = []\n headers = [\"Email\", \"Title\", \"Details\", \"Total_Target\", \"Start_Date\", \"End_Date\"]\n # print(tabulate([(k,) + (v.values) for k,v in projects_db.items()], headers=headers))\n for email, proj_lst in projects_db.items():\n for proj in proj_lst:\n proj_tab.append((email,) + tuple(proj.values()))\n \n print(tabulate(proj_tab,headers=headers))","repo_name":"Ziad-Tawfik/Crowd-Funding-App","sub_path":"modules/view_projects.py","file_name":"view_projects.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34304477268","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 29 23:42:45 2017\n\n@author: Goutham\n\"\"\"\nfrom scipy.spatial import distance\ndef euc(a,b):\n return distance.euclidian(a,b)\n\nclass ScrappyKNN(): #writing a classifier\n def fit(self,x_train,y_train): #takes and features of training set as input\n self.x_train = x_train\n self.y_train = y_train\n \n def predict(self,x_test):\n predictions = []\n for row in x_test:\n label = self.closest(row)\n predictions.append(label)\n return predictions\n\n def closest(self,row):\n best_dist = euc(row,self.x_train[0])\n best_index = 0\n for i in range(1,len(self.x_train)):\n dist = euc(row,self.x_train[i])\n if dist>> from datetime import timedelta as td\n >>> nice_repr(td(days=1, hours=2, minutes=3, seconds=4))\n '1 day, 2 hours, 3 minutes, 4 seconds'\n >>> nice_repr(td(days=1, seconds=1), \"minimal\")\n '1d, 1s'\n \"\"\"\n \n assert isinstance(timedelta, datetime.timedelta), \"First argument must be a timedelta.\"\n \n result = []\n \n weeks = timedelta.days / 7\n days = timedelta.days % 7\n hours = timedelta.seconds / 3600\n minutes = (timedelta.seconds % 3600) / 60\n seconds = timedelta.seconds % 60\n \n if display == \"sql\":\n days += weeks * 7\n return \"%i %02i:%02i:%02i\" % (days, hours, minutes, seconds)\n elif display == 'minimal':\n words = [\"w\", \"d\", \"h\", \"m\", \"s\"]\n elif display == 'short':\n words = [\" wks\", \" days\", \" hrs\", \" min\", \" sec\"]\n else:\n words = [\" weeks\", \" days\", \" hours\", \" minutes\", \" seconds\"]\n \n values = [weeks, days, hours, minutes, seconds]\n \n for i in range(len(values)):\n if values[i]:\n if values[i] == 1 and len(words[i]) > 1:\n result.append(\"%i%s\" % (values[i], words[i].rstrip('s')))\n else:\n result.append(\"%i%s\" % (values[i], words[i]))\n \n return sep.join(result)\n\n\ndef iso8601_repr(timedelta):\n \"\"\"\n Represent a timedelta as an ISO8601 duration.\n http://en.wikipedia.org/wiki/ISO_8601#Durations\n\n >>> from datetime import timedelta as td\n >>> iso8601_repr(td(days=1, hours=2, minutes=3, seconds=4))\n 'P1DT2H3M4S'\n \"\"\"\n years = timedelta.days / 365\n weeks = (timedelta.days % 365) / 7\n days = timedelta.days % 7\n\n hours = timedelta.seconds / 3600\n minutes = (timedelta.seconds % 3600) / 60\n seconds = timedelta.seconds % 60\n\n formatting = (\n ('P', (\n ('Y', years),\n ('W', weeks),\n ('D', days),\n )),\n ('T', (\n ('H', hours),\n ('M', minutes),\n ('S', seconds),\n )),\n )\n\n result = []\n for category, subcats in formatting:\n result += category\n for format, value in subcats:\n if value:\n result.append('%d%c' % (value, format))\n\n return \"\".join(result)\n\n@register.filter(name='timedelta')\ndef timedelta(value, display=\"long\"):\n if value is None:\n return value\n return nice_repr(value, display)\n\n@register.filter(name='iso8601')\ndef iso8601(value):\n if value is None:\n return value\n return iso8601_repr(value)\n\n","repo_name":"jjmartinr01/gauss3","sub_path":"my_templatetags/templatetags/extra_filters.py","file_name":"extra_filters.py","file_ext":"py","file_size_in_byte":2813,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"18263380668","text":"\r\nimport numpy as np\r\nimport pandas as pd\r\nimport streamlit as st\r\nimport matplotlib.pyplot as plt\r\nfrom io import BytesIO\r\nfrom sklearn.metrics import roc_auc_score, roc_curve\r\nfrom eva import eva_dfkslift, eva_pks\r\n\r\nplot_type = ['ks']\r\ntitle=''\r\n\r\ndef scoring(df_dum, X_dum, y_dum, target, lr, target_score = 450, target_odds = 1, pts_double_odds = 80):\r\n \r\n df_dum['logit']=np.log(lr.predict_proba(X_dum)[:,0]/lr.predict_proba(X_dum)[:,1])\r\n df_dum['odds'] = np.exp(df_dum['logit'])\r\n df_dum['probs'] = df_dum['odds'] / (df_dum['odds'] + 1)\r\n factor = pts_double_odds / np.log(2)\r\n offset = target_score - factor * np.log(target_odds)\r\n df_dum['score'] = offset + factor * df_dum['logit']\r\n \r\n intercept=offset-factor*lr.intercept_\r\n intercept_rounded=intercept.round(0)\r\n coefs=-factor*lr.coef_\r\n coefs_rounded=coefs.round(0)\r\n \r\n df_dum['score_rounded']=df_dum.loc[:, ~df_dum.columns.isin([target,'logit','odds','probs','score'])].dot(coefs_rounded[0])+intercept_rounded\r\n \r\n groupnum=len(df_dum.index)\r\n def n0(x): return sum(x==0)\r\n def n1(x): return sum(x==1)\r\n df_kslift = df_dum.sort_values('score_rounded', ascending=True).reset_index(drop=True)\\\r\n .assign(group=lambda x: np.ceil((x.index+1)/(len(x.index)/groupnum)))\\\r\n .groupby('group')[target].agg([n0,n1])\\\r\n .reset_index().rename(columns={'n0':'good','n1':'bad'})\\\r\n .assign(\r\n group=lambda x: (x.index+1)/len(x.index),\r\n good_distri=lambda x: x.good/sum(x.good), \r\n bad_distri=lambda x: x.bad/sum(x.bad), \r\n badrate=lambda x: x.bad/(x.good+x.bad),\r\n cumbadrate=lambda x: np.cumsum(x.bad)/np.cumsum(x.good+x.bad),\r\n lift=lambda x: (np.cumsum(x.bad)/np.cumsum(x.good+x.bad))/(sum(x.bad)/sum(x.good+x.bad)),\r\n cumgood=lambda x: np.cumsum(x.good)/sum(x.good), \r\n cumbad=lambda x: np.cumsum(x.bad)/sum(x.bad)\r\n ).assign(ks=lambda x:abs(x.cumbad-x.cumgood))\r\n df_kslift=pd.concat([\r\n pd.DataFrame({'group':0, 'good':0, 'bad':0, 'good_distri':0, 'bad_distri':0, 'badrate':0, 'cumbadrate':np.nan, 'cumgood':0, 'cumbad':0, 'ks':0, 'lift':np.nan}, index=np.arange(1)),\r\n df_kslift\r\n ], ignore_index=True)\r\n \r\n score_list=df_dum['score_rounded'].sort_values(ascending=True).tolist()\r\n df_kslift['score']=[np.nan]+score_list\r\n optimal_cutoff=df_kslift[df_kslift['ks']==df_kslift['ks'].max()]['group'].tolist()[0]\r\n \r\n fig=plt.figure(figsize=(16,8))\r\n plt.hist([df_dum[df_dum[target]==0]['score_rounded'],df_dum[df_dum[target]==1]['score_rounded']],\r\n bins=80,\r\n edgecolor='white',\r\n color = ['g','r'],\r\n linewidth=1.2)\r\n\r\n plt.title('Scorecard Distribution', fontweight=\"bold\", fontsize=14)\r\n plt.axvline(np.percentile(df_dum['score_rounded'],optimal_cutoff*100), color='k', linestyle='dashed', linewidth=1.5, alpha=0.5)\r\n plt.xlabel('Score')\r\n plt.ylabel('Count');\r\n buf=BytesIO()\r\n fig.savefig(buf, format='png')\r\n st.image(buf)\r\n st.write('Optimal cutoff = ', np.percentile(df_dum['score_rounded'],optimal_cutoff*100).round(0))\r\n \r\n df_ks = df_kslift\r\n \r\n ks_score = round(df_ks.loc[lambda x: x.ks==max(x.ks),'ks'].iloc[0],4)\r\n plist = [\"eva_p\"+i+'(df_'+i+',title)' for i in plot_type]\r\n subplot_nrows = int(np.ceil(len(plist)/2))\r\n subplot_ncols = int(np.ceil(len(plist)/subplot_nrows))\r\n \r\n fig = plt.figure(figsize=(8,8))\r\n for i in np.arange(len(plist)):\r\n plt.subplot(subplot_nrows,subplot_ncols,i+1)\r\n eval(plist[i])\r\n plt.show()\r\n buf=BytesIO()\r\n fig.savefig(buf, format='png')\r\n st.image(buf)\r\n \r\n logit_roc_auc = roc_auc_score(y_dum, -1*df_dum['score_rounded'])\r\n fpr, tpr, thresholds = roc_curve(y_dum, -1*df_dum['score_rounded'])\r\n fig=plt.figure(figsize=(8,8))\r\n plt.plot(fpr, tpr, label='Logistic Regression (area = %0.2f)' % logit_roc_auc)\r\n plt.plot([0, 1], [0, 1],'r--')\r\n plt.xlim([0.0, 1.0])\r\n plt.ylim([0.0, 1.01])\r\n plt.xlabel('False Positive Rate')\r\n plt.ylabel('True Positive Rate')\r\n plt.title('Receiver operating characteristic')\r\n plt.legend(loc=\"lower right\")\r\n plt.savefig('Log_ROC')\r\n plt.show()\r\n buf=BytesIO()\r\n fig.savefig(buf, format='png')\r\n st.image(buf)\r\n \r\n max_ks=(100*df_kslift['ks']).max()\r\n \r\n col1, col2, col3 = st.columns(3)\r\n col1.metric(label=\"KS-score\",value=round(max_ks, 2))\r\n col2.metric(label=\"AUC ROC\",value=logit_roc_auc.round(2))\r\n col3.metric(label=\"Gini\", value=(100* (2*logit_roc_auc-1.0)).round(2))\r\n \r\n df_ks['score'].fillna(method='bfill', inplace=True)\r\n df_ks['score_prev']=df_ks['score'].astype(int)\r\n df_ks['score_next']=df_ks['score'].astype(int)+1\r\n \r\n df_ppt=pd.DataFrame(data={'cutoff_score': df_ks['score_prev'].sort_values(ascending=False).unique().tolist()})\r\n\r\n df_ppt['approval rate']=0\r\n for score in df_ks['score_prev'].sort_values(ascending=False).unique().tolist():\r\n df_ppt.loc[df_ppt['cutoff_score']==score, 'approval rate']=df_ks[df_ks['score']>score]['group'].count()/df_ks['group'].count()\r\n\r\n df_ppt['marginal odds ratio']=np.exp((df_ppt['cutoff_score']-offset)/factor)\r\n df_ppt['marginal good rate']=df_ppt['marginal odds ratio']/(1+df_ppt['marginal odds ratio'])\r\n df_ppt['good rate for total accepted']=0\r\n for score in df_ks['score_prev'].sort_values(ascending=False).unique().tolist():\r\n df_ppt.loc[df_ppt['cutoff_score']==score, 'good rate for total accepted']=df_ks[(df_ks['score']>=score)&(df_ks['good']==1)]['group'].count()/df_ks[df_ks['score']>=score]['group'].count()\r\n\r\n df_ppt['odds for total accepted']=df_ppt['good rate for total accepted']/(1-df_ppt['good rate for total accepted'])\r\n df_ppt['good rate for total rejected']=0\r\n for score in df_ks['score_prev'].sort_values(ascending=False).unique().tolist():\r\n df_ppt.loc[df_ppt['cutoff_score']==score, 'good rate for total rejected']=df_ks[(df_ks['score']<=score)&(df_ks['good']==1)]['group'].count()/df_ks[df_ks['score']<=score]['group'].count()\r\n\r\n df_ppt.loc[df_ppt['good rate for total rejected'].isna()==True, 'good rate for total rejected']=0\r\n df_ppt['odds for total rejected']=df_ppt['good rate for total rejected']/(1-df_ppt['good rate for total rejected'])\r\n \r\n df_scorecard=pd.DataFrame()\r\n\r\n df_scorecard['Feature']=np.concatenate((['Intercept'], lr.feature_names_in_))\r\n df_scorecard['Score']=np.concatenate((intercept_rounded, coefs_rounded[0]))\r\n\r\n with pd.option_context('display.max_rows', None,):\r\n st.write('Scorecard:')\r\n st.dataframe(df_scorecard.sort_values(by=['Feature']).reset_index(drop=True))\r\n \r\n #df_scored=pd.concat([df,df_dum['score_rounded']], axis=1)\r\n \r\n return df_ppt, df_scorecard\r\n","repo_name":"afanasiev-d/custom_scoring_model_test_version","sub_path":"scoring.py","file_name":"scoring.py","file_ext":"py","file_size_in_byte":6878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28192558030","text":"# you can write to stdout for debugging purposes, e.g.\r\n# print(\"this is a debug message\")\r\n\r\ndef solution(S):\r\n S = S.split(',')\r\n result = 0\r\n for num in S:\r\n if '+' in num:\r\n num = num.replace('+','')\r\n result += int(num)\r\n return result","repo_name":"danrasband/coding-experiment-reviews","sub_path":"responses/4N53A7-Z4R/1_sum_of_integers_in_a_string.py","file_name":"1_sum_of_integers_in_a_string.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6390783219","text":"import flet\nfrom flet import Page,Row,Column,ElevatedButton,Text,TextField\n\ndef main(page):\n \n def on_message(msg):\n messages.controls.append(Text(msg))\n page.update()\n page.pubsub.subscribe(on_message)\n\n def send_click(e):\n page.pubsub.send_all(f\" {user.value} : {message.value} \")\n message.value = \"\"\n page.update()\n\n messages = Column()\n user = TextField(hint_text = \"your name\",width = 150)\n message = TextField(hint_text = \"your message\",expand = True)\n send = ElevatedButton(\"send\",on_click=send_click)\n page.add(messages,Row(controls=[user,message,send]))\n page.update()\n \nflet.app(target=main)\n\n\n","repo_name":"hilfa007/FLET","sub_path":"flet_Chat.py","file_name":"flet_Chat.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"8723325165","text":"import json\nimport boto3\nimport torch\nimport os\nimport re\nfrom transformers import AutoTokenizer, BertForSequenceClassification\n\n\ndef predict_labels(inputs, model_path, labels):\n tokenizer = AutoTokenizer.from_pretrained(model_path) # Load the tokenizer\n model = BertForSequenceClassification.from_pretrained(\n model_path\n ) # Load the fine-tuned BERT model (it will be on CPU by default)\n model.eval() # Ensure the model is in evaluation mode\n\n inputs_encoded = tokenizer(\n inputs, padding=True, truncation=True, return_tensors=\"pt\"\n ) # Tokenize the input texts and convert them to tensors\n\n with torch.no_grad(): # Make predictions\n outputs = model(**inputs_encoded)\n\n # Get the predicted labels\n logits = outputs.logits\n probabilities = torch.softmax(logits, dim=1)\n predicted_labels = [labels[i] for i in torch.argmax(probabilities, dim=1)]\n\n return predicted_labels\n\n\n\"\"\"\nPart for inferernce\n\"\"\"\n\n\ndef lambda_handler(event, context):\n print(event)\n # version check\n function_arn = context.invoked_function_arn\n env = function_arn.split(\":\")[-1]\n print(env)\n env = \"prod\"\n\n # parsing message\n try:\n message_body = json.loads(event[\"Records\"][0][\"body\"])\n user_id = message_body[\"user_id\"]\n time_stamp = message_body[\"time_stamp\"]\n except:\n print(\"parsing fail!\")\n\n # get story\n try:\n dynamodb_client = boto3.client(\"dynamodb\")\n query = f\"SELECT * FROM Dy_gpt_story_{env} where user_id='{user_id}' and time_stamp='{time_stamp}'\"\n print(query)\n result_story = dynamodb_client.execute_statement(Statement=query)\n except:\n print(\"story fail!\")\n\n # get story keys\n try:\n temp = list(result_story[\"Items\"][0].keys())\n temp.remove(\"user_id\")\n temp.remove(\"time_stamp\")\n temp.sort()\n story_keys = temp\n except:\n print(\"parsing fail!\")\n return {\"statusCode\": 200, \"body\": json.dumps(\"Hello\")}\n\n # page, sentence parsing -> . [SEP]\n inputs = []\n for page_num in range(len(story_keys)):\n text = result_story[\"Items\"][0][story_keys[page_num]][\"S\"]\n sentence = re.split(\"(?<=[.!?]) +\", text)\n inputs.append(\" [SEP]\".join(sentence))\n\n print(inputs)\n\n labels = [0, 1, 2, 3, 4, 5, 6] # Replaced with our own label names\n model_path = (\n \"/var/task/klue_finetuned_distilled_ckpt/3-klue_distilled/checkpoint-3856\"\n )\n ar = [] # to store predictd labels\n dc = {} # to count predicted labels\n predicted_labels = predict_labels(\n inputs, model_path, labels\n ) # send to model for prediction\n\n for predicted_label in predicted_labels:\n ar.append(int(predicted_label))\n\n # part for sorting and counting numbers of predicted label\n for value in ar:\n if value in dc:\n dc[value] += 1\n else:\n dc[value] = 1\n total = 0\n for d in dc:\n total += dc[d]\n print(f\"label: {d}, count: {dc[d]}\\n\")\n\n # gpt글 reject 기준\n check = 0\n for label in range(3, 7):\n if label in dc:\n check += dc[label]\n\n if check / total > 0.5:\n print(\"reject\")\n # sqs 재전송 (SQS_make_story)\n try:\n # 최종 처리는 sqs에 연결된 lambda가 진행\n sqs = boto3.resource(\"sqs\", region_name=\"ap-northeast-2\")\n queue = sqs.get_queue_by_name(QueueName=f\"SQS_make_story_{env}\")\n\n temp_json = {}\n temp_json[\"user_id\"] = user_id\n temp_json[\"time_stamp\"] = time_stamp\n message_body = json.dumps(temp_json)\n response = queue.send_message(\n MessageBody=message_body,\n )\n except:\n print(\"sqs fail!\")\n else:\n try:\n # to SQS_post_midjourney_story\n sqs = boto3.resource(\"sqs\", region_name=\"ap-northeast-2\")\n queue = sqs.get_queue_by_name(QueueName=f\"SQS_post_midjourney_story_{env}\")\n temp_json = {}\n temp_json[\"user_id\"] = user_id\n temp_json[\"time_stamp\"] = time_stamp\n message_body = json.dumps(temp_json)\n response = queue.send_message(\n MessageBody=message_body,\n )\n except:\n print(\"sqs fail!\")\n\n print(\"good\")\n return {\"statusCode\": 200, \"body\": json.dumps(\"Hello from Lambda!\")}\n","repo_name":"FILO-DEV-TEAM/klue_finetuned_distilled_ckpt","sub_path":"La_gpt_validation.py","file_name":"La_gpt_validation.py","file_ext":"py","file_size_in_byte":4420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11648127287","text":"import datetime\nimport json\nimport os\nimport six\nfrom .utils import suck_out_variations_only\nfrom .utils import suck_out_editions\n\n\n# noinspection PyBroadException\ndef datetime_parser(dct):\n for k, v in dct.items():\n if isinstance(v, six.string_types):\n try:\n dct[k] = datetime.datetime.strptime(v, \"%Y-%m-%dT%H:%M:%S\")\n except:\n pass\n return dct\n\ndb_root = os.path.dirname(os.path.realpath(__file__))\nwith open(os.path.join(db_root, 'data', 'reporters.json')) as f:\n REPORTERS = json.load(f, object_hook=datetime_parser)\n\n\nwith open(os.path.join(db_root, 'data', 'state_abbreviations.json')) as f:\n STATE_ABBREVIATIONS = json.load(f)\n\n\nwith open(os.path.join(db_root, 'data', 'case_name_abbreviations.json')) as f:\n CASE_NAME_ABBREVIATIONS = json.load(f)\n\n\nVARIATIONS_ONLY = suck_out_variations_only(REPORTERS)\nEDITIONS = suck_out_editions(REPORTERS)\n","repo_name":"limc/project","sub_path":"pip/venv/lib/python3.6/site-packages/reporters_db/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41230511899","text":"import os\n\nfrom flask import Flask\n\n\nfrom flask import Flask, redirect, url_for, request\n\ndef create_app(test_config=None):\n # create and configure the app\n app = Flask(__name__)\n app.config.from_mapping(\n SECRET_KEY='dev',\n DATABASE=os.path.join(app.instance_path, 'flaskr.sqlite'),\n )\n\n if test_config is None:\n # load the instance config, if it exists, when not testing\n app.config.from_pyfile('config.py', silent=True)\n else:\n # load the test config if passed in\n app.config.from_mapping(test_config)\n\n # ensure the instance folder exists\n try:\n os.makedirs(app.instance_path)\n except OSError:\n pass\n\n # a simple page that says hello\n #this sets path of the web application address and the path run corresponding function \n @app.route('/')#here thr path is some imtegern id.if we write some number ir will get the number and print tje number \n def hello(postid):#function that runs ehrn the path is valled \n return 'Hello, World!'+str(postid)\n \n @app.route('/admin')#path \n def hello_admin():#function that runs ehrn the path is valied \n return 'Hello, admin'\n\n @app.route('/guest')#path \n def hello_guest():#function that runs ehrn the path is valied \n return 'Hello, guest'\n \n @app.route('/login_success/')#path \n def hello_login(name):#function that runs ehrn the path is valled \n return 'welcome '+str(name)\n\n '''\n url_for() function calls a route or url already build '''\n\n @app.route('/')#path \n def hello_name(name):#function that runs ehrn the path is valled \n if name=='admin':\n return redirect(url_for(hello_admin))#redircting tje url to admin\n else:\n return redirect(url_for(hello_guest))#redircting the url to guest\n\n\n#HTML PAGE Manipulation\n#created html login page\n\n\n \n @app.route('/login',methods=['POST'])#path \n def login():#function that runs ehrn the path is valled \n if request.method == 'POST':\n user = request.form['nm']\n return redirect(url_for('hello_login',name = user))\n\n return app\n","repo_name":"viveklalex/flask_basics","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73802440264","text":"from ast import Assert\nimport unittest\nimport unittest.mock\nimport module\nimport urllib.request\n\nfrom utilities import POSITIONAL_ARGUMENTS, KEYWORD_ARGUMENTS\n\n\nclass TestHelpers(unittest.TestCase):\n\n maxDiff = None\n\n def test_sentinel(self):\n real = module.Class()\n real.method = unittest.mock.Mock(name='method')\n real.method.return_value = unittest.mock.sentinel.some_object\n\n self.assertEqual(real.method(), unittest.mock.sentinel.some_object)\n\n def test_default(self):\n self.assertEqual(unittest.mock.DEFAULT, unittest.mock.sentinel.DEFAULT)\n\n def test_call(self):\n mocked_object = unittest.mock.MagicMock(return_value=None)\n mocked_object(*POSITIONAL_ARGUMENTS, **KEYWORD_ARGUMENTS)\n mocked_object()\n self.assertEqual(\n mocked_object.call_args_list,\n [\n unittest.mock.call(*POSITIONAL_ARGUMENTS, **KEYWORD_ARGUMENTS),\n unittest.mock.call()\n ]\n )\n\n def test_use_call_list_to_make_assertions_on_chained_calls(self):\n mock_object = unittest.mock.MagicMock()\n mock_object(1).method(arg='foo').other('bar')(2.0)\n call_list = unittest.mock.call(1).method(arg='foo').other('bar')(2.0)\n\n self.assertEqual(\n mock_object.mock_calls,\n call_list.call_list()\n )\n\n def test_call_args(self):\n mock_object = unittest.mock.MagicMock(return_value=None)\n mock_object(*POSITIONAL_ARGUMENTS, **KEYWORD_ARGUMENTS)\n self.assertEqual(\n mock_object.call_args,\n unittest.mock.call(*POSITIONAL_ARGUMENTS, **KEYWORD_ARGUMENTS)\n )\n self.assertEqual(\n mock_object.call_args.args,\n POSITIONAL_ARGUMENTS\n )\n self.assertEqual(\n mock_object.call_args.kwargs,\n KEYWORD_ARGUMENTS\n )\n\n def test_call_args_with_name(self):\n mock_object = unittest.mock.MagicMock()\n mock_object.foo(*POSITIONAL_ARGUMENTS, **KEYWORD_ARGUMENTS)\n self.assertEqual(\n mock_object.mock_calls,\n [\n unittest.mock.call.foo(\n *POSITIONAL_ARGUMENTS,\n **KEYWORD_ARGUMENTS\n )\n ]\n )\n\n name, positional_arguments, keyword_arguments = mock_object.mock_calls[0]\n self.assertEqual(name, 'foo')\n self.assertEqual(positional_arguments, POSITIONAL_ARGUMENTS)\n self.assertEqual(keyword_arguments, KEYWORD_ARGUMENTS)\n\n def test_using_any_to_ignore_certain_arguments(self):\n mock_object = unittest.mock.Mock(return_value=None)\n mock_object('foo', bar=object())\n mock_object.assert_called_once_with('foo', bar=unittest.mock.ANY)\n\n def test_using_any_in_comparisons(self):\n mock_object = unittest.mock.MagicMock(return_value=None)\n mock_object(1)\n mock_object(1, 2)\n mock_object(object())\n self.assertEqual(\n mock_object.mock_calls,\n [\n unittest.mock.call(1),\n unittest.mock.call(1, 2),\n unittest.mock.ANY\n ]\n )\n\n def test_filter_dir(self):\n self.assertEqual(\n sorted(dir(unittest.mock.Mock())),\n [\n 'assert_any_call',\n 'assert_called',\n 'assert_called_once',\n 'assert_called_once_with',\n 'assert_called_with',\n 'assert_has_calls',\n 'assert_not_called',\n 'attach_mock',\n 'call_args',\n 'call_args_list',\n 'call_count',\n 'called',\n 'configure_mock',\n 'method_calls',\n 'mock_add_spec',\n 'mock_calls',\n 'reset_mock',\n 'return_value',\n 'side_effect'\n ]\n )\n\n self.assertEqual(\n sorted(dir(unittest.mock.Mock(spec=urllib.request))),\n [\n 'AbstractBasicAuthHandler',\n 'AbstractDigestAuthHandler',\n 'AbstractHTTPHandler',\n 'BaseHandler',\n 'CacheFTPHandler',\n 'ContentTooShortError',\n 'DataHandler',\n 'FTPHandler',\n 'FancyURLopener',\n 'FileHandler',\n 'HTTPBasicAuthHandler',\n 'HTTPCookieProcessor',\n 'HTTPDefaultErrorHandler',\n 'HTTPDigestAuthHandler',\n 'HTTPError',\n 'HTTPErrorProcessor',\n 'HTTPHandler',\n 'HTTPPasswordMgr',\n 'HTTPPasswordMgrWithDefaultRealm',\n 'HTTPPasswordMgrWithPriorAuth',\n 'HTTPRedirectHandler',\n 'HTTPSHandler',\n 'MAXFTPCACHE',\n 'OpenerDirector',\n 'ProxyBasicAuthHandler',\n 'ProxyDigestAuthHandler',\n 'ProxyHandler',\n 'Request',\n 'URLError',\n 'URLopener',\n 'UnknownHandler',\n '__all__',\n '__builtins__',\n '__cached__',\n '__doc__',\n '__file__',\n '__loader__',\n '__name__',\n '__package__',\n '__spec__',\n '__version__',\n '_cut_port_re',\n '_ftperrors',\n '_get_proxies',\n '_get_proxy_settings',\n '_have_ssl',\n '_localhost',\n '_noheaders',\n '_opener',\n '_parse_proxy',\n '_proxy_bypass_macosx_sysconf',\n '_randombytes',\n '_safe_gethostbyname',\n '_splitattr',\n '_splithost',\n '_splitpasswd',\n '_splitport',\n '_splitquery',\n '_splittag',\n '_splittype',\n '_splituser',\n '_splitvalue',\n '_thishost',\n '_to_bytes',\n '_url_tempfiles',\n 'addclosehook',\n 'addinfourl',\n 'assert_any_call',\n 'assert_called',\n 'assert_called_once',\n 'assert_called_once_with',\n 'assert_called_with',\n 'assert_has_calls',\n 'assert_not_called',\n 'attach_mock',\n 'base64',\n 'bisect',\n 'build_opener',\n 'call_args',\n 'call_args_list',\n 'call_count',\n 'called',\n 'configure_mock',\n 'contextlib',\n 'email',\n 'ftpcache',\n 'ftperrors',\n 'ftpwrapper',\n 'getproxies',\n 'getproxies_environment',\n 'getproxies_macosx_sysconf',\n 'hashlib',\n 'http',\n 'install_opener',\n 'io',\n 'localhost',\n 'method_calls',\n 'mock_add_spec',\n 'mock_calls',\n 'noheaders',\n 'os',\n 'parse_http_list',\n 'parse_keqv_list',\n 'pathname2url',\n 'posixpath',\n 'proxy_bypass',\n 'proxy_bypass_environment',\n 'proxy_bypass_macosx_sysconf',\n 'quote',\n 're',\n 'request_host',\n 'reset_mock',\n 'return_value',\n 'side_effect',\n 'socket',\n 'ssl',\n 'string',\n 'sys',\n 'tempfile',\n 'thishost',\n 'time',\n 'unquote',\n 'unquote_to_bytes',\n 'unwrap',\n 'url2pathname',\n 'urlcleanup',\n 'urljoin',\n 'urlopen',\n 'urlparse',\n 'urlretrieve',\n 'urlsplit',\n 'urlunparse',\n 'warnings'\n ]\n )\n\n def test_filter_dir_set_to_false(self):\n unittest.mock.FILTER_DIR = False\n\n self.assertEqual(\n sorted(dir(unittest.mock.Mock())),\n [\n '_NonCallableMock__get_return_value',\n '_NonCallableMock__get_side_effect',\n '_NonCallableMock__return_value_doc',\n '_NonCallableMock__set_return_value',\n '_NonCallableMock__set_side_effect',\n '__call__',\n '__class__',\n '__delattr__',\n '__dict__',\n '__dir__',\n '__doc__',\n '__eq__',\n '__format__',\n '__ge__',\n '__getattr__',\n '__getattribute__',\n '__gt__',\n '__hash__',\n '__init__',\n '__init_subclass__',\n '__le__',\n '__lt__',\n '__module__',\n '__ne__',\n '__new__',\n '__reduce__',\n '__reduce_ex__',\n '__repr__',\n '__setattr__',\n '__sizeof__',\n '__str__',\n '__subclasshook__',\n '__weakref__',\n '_call_matcher',\n '_calls_repr',\n '_execute_mock_call',\n '_extract_mock_name',\n '_format_mock_call_signature',\n '_format_mock_failure_message',\n '_get_call_signature_from_name',\n '_get_child_mock',\n '_increment_mock_call',\n '_mock_add_spec',\n '_mock_call',\n '_mock_call_args',\n '_mock_call_args_list',\n '_mock_call_count',\n '_mock_called',\n '_mock_check_sig',\n '_mock_children',\n '_mock_delegate',\n '_mock_methods',\n '_mock_mock_calls',\n '_mock_name',\n '_mock_new_name',\n '_mock_new_parent',\n '_mock_parent',\n '_mock_return_value',\n '_mock_sealed',\n '_mock_side_effect',\n '_mock_unsafe',\n '_mock_wraps',\n '_spec_asyncs',\n '_spec_class',\n '_spec_set',\n '_spec_signature',\n 'assert_any_call',\n 'assert_called',\n 'assert_called_once',\n 'assert_called_once_with',\n 'assert_called_with',\n 'assert_has_calls',\n 'assert_not_called',\n 'attach_mock',\n 'call_args',\n 'call_args_list',\n 'call_count',\n 'called',\n 'configure_mock',\n 'method_calls',\n 'mock_add_spec',\n 'mock_calls',\n 'reset_mock',\n 'return_value',\n 'side_effect'\n ]\n )\n self.assertEqual(\n vars(unittest.mock.Mock()),\n {\n '_mock_call_args': None,\n '_mock_call_args_list': [],\n '_mock_call_count': 0,\n '_mock_called': False,\n '_mock_children': {},\n '_mock_delegate': None,\n '_mock_methods': None,\n '_mock_mock_calls': [],\n '_mock_name': None,\n '_mock_new_name': '',\n '_mock_new_parent': None,\n '_mock_parent': None,\n '_mock_return_value': unittest.mock.sentinel.DEFAULT,\n '_mock_sealed': False,\n '_mock_side_effect': None,\n '_mock_unsafe': False,\n '_mock_wraps': None,\n '_spec_asyncs': [],\n '_spec_class': None,\n '_spec_set': None,\n '_spec_signature': None,\n 'method_calls': []\n }\n )\n\n def test_mock_open_for_writing_files(self):\n with unittest.mock.patch(\n '__main__.open',\n unittest.mock.mock_open()\n ) as mock_object:\n with open('foo', 'w') as mock_file:\n mock_file.write('some stuff')\n\n self.assertEqual(mock_object.mock_calls, [])\n with self.assertRaises(AssertionError):\n mock_object.assert_called_once_with('foo', 'w')\n\n def test_mock_open_for_reading_files(self):\n with unittest.mock.patch(\n '__main__.open',\n unittest.mock.mock_open(read_data='bibble')\n ) as mock_object:\n with open('foo') as file:\n self.assertEqual(file.read(), 'some stuff')\n\n with self.assertRaises(AssertionError):\n mock_object.assert_called_once_with('foo')\n\n def test_sealing_mocks(self):\n mock_object = unittest.mock.Mock()\n mock_object.sub_mock.attribute_a = 1\n mock_object.not_sub_mock = unittest.mock.Mock(name='not_sub_mock')\n unittest.mock.seal(mock_object)\n\n with self.assertRaises(AttributeError):\n mock_object.new_attribute\n\n mock_object.sub_mock.attribute_b\n mock_object.not_sub_mock.attribute_a","repo_name":"jadecobra/gym","sub_path":"python_tdd/tests/test_unittest_mocks/tests/test_helpers.py","file_name":"test_helpers.py","file_ext":"py","file_size_in_byte":13655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11546305408","text":"import requests\n\nclass HDBData:\n @staticmethod\n def getData(callback):\n # define the API endpoint and the authentication token\n endpoint = 'https://api.data.gov.sg/v1/hdb/data'\n token = 'YOUR_AUTH_TOKEN'\n\n # make a request to the HDB DataMall API to retrieve the HDB data\n data = requests.get(endpoint, headers={'Authorization': f'Bearer {token}'})\n data = data.json()\n\n # process the data and return it to the callback function\n processedData = []\n for row in data['items']:\n processedData.append({\n 'location': row['location'],\n 'type': row['type'],\n 'size': row['size'],\n 'sales_price': row['sales_price'],\n 'rental_price': row['rental_price'],\n 'demographics': row['demographics'],\n 'infrastructure': row['infrastructure'],\n 'building_details': row['building_details']\n })\n callback(processedData)\n","repo_name":"williamchew85/sg-hdb-infograhpic","sub_path":"HDBData.py","file_name":"HDBData.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20204952555","text":"import torch\ndef eval(model,test_data):\n model.eval()\n with torch.no_grad():\n correct = 0\n total = 0\n for i in test_data:\n sentences, labels = i\n outputs = model(sentences)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted+1 == labels).sum().item()\n #print(predicted,labels)\n \n print(\"=\"* 33)\n print('Accuracy : {:.2f} %'.format(100 * correct / total))\n print(\"=\"* 33)\n return correct","repo_name":"Eminent01/Text-Classification-using-Fully-connected-FC-NN","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39861318036","text":"# what do we need to import?\nfrom bs4 import BeautifulSoup\nimport requests\n#import requests_cache\n\n#requests_cache.install_cache('meetings')\n\n# pick a keyword\nkeyword = input(\"> Keyword ... ? \")\n\n# find meetings where the keyword is mentioned in the minutes\n\n# get the meeting details\n# committee name, meeting number, meeting date\n# get the minute item name\n# get the url for the minutes\n\nclass SingleMeeting():\n def __init__(self, base_url, meeting_id):\n self.base_url = base_url\n self.meeting_id = meeting_id\n self.get_meeting()\n\n def get_meeting(self):\n url = \"{}/GetMeeting?lMeetingId={}\".format(\n self.base_url,\n self.meeting_id\n )\n self.soup = BeautifulSoup(requests.get(url).text, \"html.parser\")\n return self.soup\n \nif __name__ == \"__main__\":\n s = SingleMeeting(\"http://democracy.devon.gov.uk/mgWebService.asmx\", 206)\n for topic in s.soup.findAll('agendaitem'):\n item = topic.get_text()\n if keyword in item:\n print ((\"ITEMS CONTAINING %s\" % keyword), item.strip())\n\n\n","repo_name":"jargonautical/policy-pages","sub_path":"keyword_search.py","file_name":"keyword_search.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18712962889","text":"#!/usr/bin/python3\n#-*- coding:utf-8 -*-\n'''\n auth:gzy\n date:20190623\n version:0.1.0\n'''\n\n'''\n 1、使用threading模块的Thread类的构造器创建线程\n 2、继承threading模块的Thread类创建线程类\n'''\n\nimport threading\n# 定义action函数,该函数准备作为线程执行体\ndef action(max):\n for i in range(max):\n # 调用threading 模块的current_thread()函数获取当前线程\n # 线程对象的getName()方法获取当前线程的名字\n print(threading.current_thread().getName() + ' ' + str(i))\n\n\n# 主程序执行体\nfor j in range(100):\n print(threading.current_thread().getName() + ' ' + str(j))\n\n if j == 20:\n # 创建并启动第一个线程\n t1 = threading.Thread(target=action,args=(100,))\n t1.start()\n\n t2 = threading.Thread(target=action,args=(100,))\n t2.start()\n\nprint(\"主线程执行结束\")\n","repo_name":"OcaenEyes/python_basic","sub_path":"并发(多线程多进程)/python_创建线程.py","file_name":"python_创建线程.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39421298594","text":"from collections import deque\r\n\r\n\r\nclass TreeNode:\r\n def __init__(self, x):\r\n self.val = x\r\n self.left = None\r\n self.right = None\r\n\r\n def __eq__(self, x):\r\n if id(self) == id(x):\r\n return True\r\n if x and self.val == x.val:\r\n return self.left == x.left and self.right == x.right\r\n return False\r\n\r\n def __str__(self):\r\n vals = [str(node.val) if node else 'null' for node in self]\r\n while vals[-1] == 'null':\r\n vals.pop()\r\n return f\"[{','.join(vals)}]\"\r\n\r\n def __repr__(self):\r\n return f\"TreeNode({self.val})\"\r\n\r\n def __iter__(self):\r\n self.__nodes = deque([self])\r\n return self\r\n\r\n def __next__(self):\r\n if not self.__nodes:\r\n raise StopIteration\r\n\r\n node = self.__nodes.popleft()\r\n if node:\r\n self.__nodes.append(node.left)\r\n self.__nodes.append(node.right)\r\n return node\r\n\r\n\r\ndef list_to_treenodes(nums: str):\r\n if not nums:\r\n return None\r\n\r\n nodes = [\r\n TreeNode(int(val)) if val != \"null\" else None\r\n for val in nums.strip(\"[]{}\").split(\",\")\r\n ]\r\n\r\n childs = nodes[::-1]\r\n root = childs.pop()\r\n\r\n for node in nodes:\r\n if not node:\r\n continue\r\n if childs:\r\n node.left = childs.pop()\r\n if childs:\r\n node.right = childs.pop()\r\n\r\n return root\r\n\r\n\r\nif __name__ == \"__main__\":\r\n from icecream import ic\r\n\r\n tree = list_to_treenodes(\"[1,3,2,5,3,null,9]\")\r\n ic(str(tree))\r\n","repo_name":"thinhntr/cp","sub_path":"leetcode/TreeNode.py","file_name":"TreeNode.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25029889964","text":"def solution(s):\n answer = ''\n info = ['zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine']\n info_two = '0123456789'\n s_list = []\n for i in s:\n s_list.append(i)\n print(info)\n cnt = 0\n\n string = ''\n while s_list:\n string += s_list.pop(0)\n if string in info_two:\n answer += string\n string = ''\n else:\n if string in info:\n answer += str(info.index(string))\n string = ''\n\n return int(answer)\n\n\n# 다른 사람 풀이\n\nnum_dic = {\"zero\":\"0\", \"one\":\"1\", \"two\":\"2\", \"three\":\"3\", \"four\":\"4\", \"five\":\"5\", \"six\":\"6\", \"seven\":\"7\", \"eight\":\"8\", \"nine\":\"9\"}\n\ndef solution(s):\n answer = s\n for key, value in num_dic.items():\n answer = answer.replace(key, value)\n return int(answer)","repo_name":"parkbum11/Algorithm","sub_path":"Programmers/210508_KAKAO인턴/숫자문자열과영단어.py","file_name":"숫자문자열과영단어.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14364973398","text":"cube = lambda x: x ** 3\n\ndef fibonacci(n):\n # return a list of fibonacci numbers\n fib_se = [0, 1]\n if n ==0 :\n return []\n elif n == 1:\n return [0]\n else:\n while len(fib_se) < n:\n fib_se.append(fib_se[-1] + fib_se[-2])\n return fib_se\n\nif __name__ == '__main__':\n n = int(input())\n print(list(map(cube, fibonacci(n))))","repo_name":"maple24/hackerrank","sub_path":"python/fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21672721462","text":"import time\n\nfrom argparse import Namespace\n\nimport pytest\n\nfrom bioservices import UniProt\n\nfrom ncbi_cds_from_protein.scripts import ncfp\nfrom utils import check_files, modify_namespace\n\n\n@pytest.fixture\ndef mock_uniprot_download_and_log(monkeypatch):\n \"\"\"Mock remote service call to UniProt for download_and_log test.\n\n Returns the result expected from l125 of sequences.py, for each query of\n the path_uniprot_stockholm_small dataset\n\n u_service.search(match.group(0), columns=\"database(EMBL)\") # type: ignore\n \"\"\"\n\n def mock_search(*args, **kwargs):\n \"\"\"Mock call to UniProt.search() method.\n\n This output specific to the download_and_log() test\n\n This mock updated to reflect UniProt API changes in June 2022\n \"\"\"\n return \"EMBL\\nCM000618;\\n\"\n\n monkeypatch.setattr(UniProt, \"search\", mock_search)\n\n\n@pytest.fixture\ndef mock_basic_uniprot(monkeypatch):\n \"\"\"Mock remote service call to UniProt for test_basic_uniprot test.\n\n Returns the result expected from l.129 of sequences.py, for each query of\n the path_uniprot_stockholm_small dataset\n\n u_service.search(match.group(0), columns=\"xref_embl\") # type: ignore\n\n This mock updated to reflect UniProt API changes in June 2022\n \"\"\"\n qstring_results = iter(\n [\n \"EMBL\\nJNBS01004944;\\n\",\n \"EMBL\\nJNBS01000225;\\n\",\n \"EMBL\\nAZIL01000691;\\n\",\n \"EMBL\\nGL833138;\\n\",\n \"EMBL\\nJNBR01001477;\\n\",\n \"EMBL\\nJNBS01001796;\\n\",\n \"EMBL\\nJNBS01000295;\\n\",\n \"EMBL\\nKI913977;\\n\",\n \"EMBL\\nFN648069;\\n\",\n ]\n )\n\n def mock_search(*args, **kwargs):\n \"\"\"Mock call to UniProt.search() method.\n\n This output specific to the test_basic_uniprot() test\n \"\"\"\n if kwargs[\"columns\"] == \"xref_embl\":\n return next(qstring_results)\n else:\n return \"\\n\"\n\n monkeypatch.setattr(UniProt, \"search\", mock_search)\n\n\n@pytest.fixture\ndef namespace_base(email_address, path_ncbi, tmp_path):\n \"\"\"Cmd-line arguments for passing a nonexistent input file.\"\"\"\n yield Namespace(\n infname=path_ncbi,\n outdirname=tmp_path,\n email=email_address,\n stockholm=False,\n cachedir=tmp_path / \".ncfp_cache\",\n cachestem=time.strftime(\"%Y-%m-%d-%H-%m-%S\"),\n batchsize=100,\n retries=10,\n limit=None,\n filestem=\"ncfp\",\n keepcache=False,\n skippedfname=\"skipped.fasta\",\n use_protein_ids=False,\n unify_seqid=False,\n alternative_start_codon=False,\n logfile=None,\n verbose=False,\n disabletqdm=True,\n debug=False,\n )\n\n\ndef test_alternative_start(\n namespace_base, path_altstart, path_altstart_targets, tmp_path\n):\n \"\"\"ncfp collects correct coding sequences for NCBI input with alternative start codon.\n\n Makefile target:\n ncfp --allow_alternative_start_codon \\\n tests/fixtures/sequences/input_alternative_start.fasta \\\n tests/fixtures/targets/alternative_start dev@null.com -v\n \"\"\"\n infile = path_altstart\n outdir = tmp_path / \"alternative_start\"\n args = modify_namespace(\n namespace_base, infname=infile, outdirname=outdir, alternative_start_codon=True\n )\n\n # Run ersatz command-line\n ncfp.run_main(args)\n\n # Compare output\n check_files(outdir, path_altstart_targets,\n (\"ncfp_aa.fasta\", \"ncfp_nt.fasta\"))\n\n\ndef test_ambiguous(\n namespace_base, path_ambiguous, path_ambiguous_targets, tmp_path\n):\n \"\"\"ncfp collects correct coding sequences for ambiguous UniProt GN field.\n\n Makefile target:\n ncfp -s tests/fixtures/sequences/input_ambiguous.fasta \\\n tests/fixtures/targets/ambiguous dev@null.com -v\n \"\"\"\n infile = path_ambiguous\n outdir = tmp_path / \"ambiguous\"\n args = modify_namespace(namespace_base, infname=infile,\n outdirname=outdir, stockholm=True)\n\n # Run ersatz command-line\n ncfp.run_main(args)\n\n # Compare output\n check_files(outdir, path_ambiguous_targets,\n (\"ncfp_aa.fasta\", \"ncfp_nt.fasta\"))\n\n\ndef test_basic_ncbi(namespace_base, path_ncbi, path_ncbi_targets, tmp_path):\n \"\"\"ncfp collects correct coding sequences for basic NCBI input.\n\n Makefile target:\n ncfp tests/fixtures/sequences/input_ncbi.fasta \\\n tests/fixtures/targets/ncbi dev@null.com -v\n \"\"\"\n # Modify default arguments\n infile = path_ncbi\n outdir = tmp_path / \"basic_ncbi\"\n args = modify_namespace(namespace_base, infname=infile, outdirname=outdir)\n\n # Run ersatz command-line\n ncfp.run_main(args)\n\n # Compare output\n check_files(outdir, path_ncbi_targets, (\"ncfp_aa.fasta\", \"ncfp_nt.fasta\"))\n\n\ndef test_basic_uniprot(\n namespace_base, path_uniprot, path_uniprot_targets, tmp_path, mock_basic_uniprot\n):\n \"\"\"ncfp collects correct coding sequences for basic UniProt input.\n\n Makefie target: \n ncfp tests/fixtures/sequences/input_uniprot.fasta \\\n tests/fixtures/targets/basic_uniprot dev@null.com -v\n \"\"\"\n # Modify default arguments\n infile = path_uniprot\n outdir = tmp_path / \"basic_uniprot\"\n args = modify_namespace(namespace_base, infname=infile, outdirname=outdir)\n\n # Run ersatz command-line\n ncfp.run_main(args)\n\n # Compare output\n check_files(\n outdir,\n path_uniprot_targets,\n (\"ncfp_aa.fasta\", \"ncfp_nt.fasta\", \"skipped.fasta\"),\n )\n\n\n@pytest.mark.skip(\n reason=\"Database caching needs to be rewritten to account for multiple cross-references to EMBL\"\n)\ndef test_basic_stockholm(\n namespace_base, path_stockholm, path_stockholm_targets, tmp_path\n):\n \"\"\"ncfp collects correct coding sequences for basic UniProt/Stockholm input.\"\"\"\n # Modify default arguments\n infile = path_stockholm\n outdir = tmp_path / \"basic_stockholm\"\n args = modify_namespace(\n namespace_base, infname=infile, outdirname=outdir, stockholm=True\n )\n\n # Run ersatz command-line\n ncfp.run_main(args)\n\n # Compare output (should be no skipped files)\n check_files(\n outdir,\n path_stockholm_targets,\n (\"ncfp_aa.fasta\", \"ncfp_nt.fasta\"),\n )\n\n\ndef test_small_stockholm(\n namespace_base,\n path_uniprot_stockholm_small,\n path_uniprot_stockholm_small_targets,\n tmp_path,\n):\n \"\"\"ncfp collects correct coding sequences for small UniProt/Stockholm input.\n\n Makefile target:\n ncfp -s tests/fixtures/sequences/input_uniprot_stockholm_small.fasta \\\n tests/fixtures/targets/small_stockholm dev@null.com -v\n \"\"\"\n # Modify default arguments\n infile = path_uniprot_stockholm_small\n outdir = tmp_path / \"small_stockholm\"\n args = modify_namespace(\n namespace_base, infname=infile, outdirname=outdir, stockholm=True\n )\n\n # Run ersatz command-line\n ncfp.run_main(args)\n\n # Compare output (should be no skipped files)\n check_files(\n outdir, path_uniprot_stockholm_small_targets, (\n \"ncfp_aa.fasta\", \"ncfp_nt.fasta\")\n )\n\n\ndef test_small_stockholm_unified(\n namespace_base,\n path_uniprot_stockholm_small,\n path_uniprot_stockholm_small_unified_targets,\n tmp_path,\n):\n \"\"\"ncfp collects correct coding sequences for small UniProt/Stockholm input.\n\n Makefile target:\n ncfp -s --unify_seqid \\\n tests/fixtures/sequences/input_uniprot_stockholm_small.fasta \\\n tests/fixtures/targets/small_stockholm_unified/ dev@null.com -v\n \"\"\"\n # Modify default arguments\n infile = path_uniprot_stockholm_small\n outdir = tmp_path / \"small_stockholm_unified\"\n args = modify_namespace(\n namespace_base,\n infname=infile,\n outdirname=outdir,\n stockholm=True,\n unify_seqid=True,\n )\n\n # Run ersatz command-line\n ncfp.run_main(args)\n\n # Compare output (should be no skipped files)\n check_files(\n outdir,\n path_uniprot_stockholm_small_unified_targets,\n (\"ncfp_aa.fasta\", \"ncfp_nt.fasta\"),\n )\n\n\ndef test_small_stockholm_use_protein_id(\n namespace_base,\n path_uniprot_stockholm_small,\n path_uniprot_stockholm_small_use_proteinid_targets,\n tmp_path,\n):\n \"\"\"ncfp collects correct coding sequences for small UniProt/Stockholm input.\n\n Makefile target:\n ncfp -s --use_protein_id \\\n tests/fixtures/sequences/input_uniprot_stockholm_small.fasta \\\n tests/fixtures/targets/small_stockholm_use_proteinid/ dev@null.com -v\n \"\"\"\n # Modify default arguments\n infile = path_uniprot_stockholm_small\n outdir = tmp_path / \"small_stockholm_use_proteinid\"\n args = modify_namespace(\n namespace_base,\n infname=infile,\n outdirname=outdir,\n stockholm=True,\n use_protein_ids=True,\n )\n\n # Run ersatz command-line\n ncfp.run_main(args)\n\n # Compare output (should be no skipped files)\n check_files(\n outdir,\n path_uniprot_stockholm_small_use_proteinid_targets,\n (\"ncfp_aa.fasta\", \"ncfp_nt.fasta\"),\n )\n\n\ndef test_ncbi_stockholm(\n namespace_base,\n path_ncbi_stockholm,\n path_ncbi_stockholm_targets,\n tmp_path,\n):\n \"\"\"ncfp collects correct coding sequences for NCBI/Stockholm input.\n\n This test was added as a check for the fix in issue 31.\n\n Makefile target:\n ncfp -s \\\n tests/fixtures/sequences/input_ncbi_stockholm.fasta \\\n tests/fixtures/targets/ncbi_stockholm dev@null.com -v \n \"\"\"\n # Modify default arguments\n infile = path_ncbi_stockholm\n outdir = tmp_path / \"ncbi_stockholm\"\n args = modify_namespace(\n namespace_base, infname=infile, outdirname=outdir, stockholm=True\n )\n\n # Run ersatz command-line\n ncfp.run_main(args)\n\n # Compare output (should be no skipped files)\n check_files(\n outdir,\n path_ncbi_stockholm_targets,\n (\"ncfp_aa.fasta\", \"ncfp_nt.fasta\"),\n )\n","repo_name":"widdowquinn/ncfp","sub_path":"tests/test_ncfp.py","file_name":"test_ncfp.py","file_ext":"py","file_size_in_byte":9941,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"72292508744","text":"import csv\nfile = open(\"training2(use this one).csv\",\"r\",encoding='utf-8',errors='ignore')\ncsv_reader = csv.reader(file,delimiter=',')\n\ndef ngrams(string, n=3):\n\tngrams = zip(*[string[i:] for i in range(n)])\n\treturn [''.join(ngram) for ngram in ngrams]\n\nfor row in csv_reader:\n\tprint(ngrams(row[0]))\n\n\n","repo_name":"edrapac/NCUR2019","sub_path":"Iteration3(5000)/Scripts/ngram_vectorizer.py","file_name":"ngram_vectorizer.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29746256295","text":"from tkinter import *\nimport tkinter as tk\nimport math\nimport numpy as np\nimport sys\nimport crep_gui as cg\nimport crep_def as cd\nimport crep_np as cn\nfrom sympy import symbols, Eq, solve\n\ndef find_basic_BR(parent_in): # return index coordinates of BRs\n # Player 1 (going down each column)\n match_p1 = np.zeros((parent_in.rows, parent_in.cols), dtype=bool)\n match_p2 = np.zeros((parent_in.rows, parent_in.cols), dtype=bool)\n for i in range(parent_in.matrix.shape[1]): # increment right\n local_br_val = (-1*sys.maxsize)-1\n curr_col = (parent_in.matrix[:,i])\n curr_col_indexed = [x[0] for x in curr_col]\n for j in range(parent_in.matrix.shape[0]): # scan down\n curr = parent_in.matrix[j][i][0]\n if (curr > local_br_val):\n local_br_val = curr\n comp_col = np.zeros((1, parent_in.rows))\n comp_col.fill(local_br_val)\n bool_col = (curr_col_indexed == comp_col)\n for x in range(match_p1.shape[0]):\n match_p1[x,i] = bool_col[0][x]\n \n # Player 2 (going right each row)\n for i in range(parent_in.matrix.shape[0]): # increment down\n local_br_val = (-1*sys.maxsize)-1\n curr_row = (parent_in.matrix[i,:])\n curr_row_indexed = [x[1] for x in curr_row]\n for j in range(parent_in.matrix.shape[1]): # scan right\n curr = parent_in.matrix[i][j][1]\n if (curr > local_br_val):\n local_br_val = curr\n comp_row = np.zeros((1, parent_in.cols))\n comp_row.fill(local_br_val)\n bool_row = (curr_row_indexed == comp_row)\n for x in range(match_p1.shape[0]):\n match_p2[i,x] = bool_row[0][x]\n return match_p1, match_p2\n\ndef find_BRNE(parent_in): \n # super duper ineffienct nested for-looping, \n # use numpy functions later for optimization\n max_index = (-1,-1)\n p1_max_val = (-1*sys.maxsize)-1\n p2_max_val = (-1*sys.maxsize)-1\n for i, ie in enumerate(parent_in.matrix):\n for j, je in enumerate(ie):\n max_index = (-1,-1)\n if (parent_in.p1_br[i][j] and parent_in.p2_br[i][j] and \n (je[parent_in.p1_index] > p1_max_val) and \n (je[parent_in.p2_index] > p2_max_val)):\n p1_max_val = je[parent_in.p1_index]\n p2_max_val = je[parent_in.p2_index]\n max_index = (i,j) # CAUTION: (i == p1, j == p2)\n parent_in.BRNE = []\n parent_in.BRNE.append(max_index)\n elif (parent_in.p1_br[i][j] and parent_in.p2_br[i][j] and \n (je[parent_in.p1_index] == p1_max_val) and \n (je[parent_in.p2_index] == p2_max_val)):\n max_index = (i,j)\n parent_in.BRNE.append(max_index)\n print(\"BRNE:\",parent_in.BRNE)\n\n\ndef find_folk_triggers(parent_in):\n \"\"\"\n 1. Nash equlibrium\n 2. Max deviation\n 3. Alternative Strictly better outcome for both players (cooperative eq)\n \"\"\"\n \"\"\"\n Process:\n 1. Find BR Nash Eq\n 2. Find all outcomes with strictly better outcomes than the NE\n 3. Do a temporal discounting factor calculation for each Folk Eq\n \"\"\"\n # folk_arr: array of delta pairs: list of list of pairs\n parent_in.folk_arr = []\n parent_in.folk_indexes = []\n for a, ae in enumerate(parent_in.BRNE):\n\n parent_in.folk_arr.append([])\n parent_in.folk_indexes.append([])\n br_x = ae[parent_in.p1_index]\n br_y = ae[parent_in.p2_index]\n p1_br_val = parent_in.matrix[br_x][br_y][parent_in.p1_index]\n p2_br_val = parent_in.matrix[br_x][br_y][parent_in.p2_index]\n for i, ie in enumerate(parent_in.matrix):\n for j, je in enumerate(ie):\n if (je[parent_in.p1_index] > p1_br_val and\n je[parent_in.p2_index] > p2_br_val):\n p1_delta, p2_delta = find_discount_shift(parent_in, i, j, ae)\n if (p1_delta and p2_delta):\n parent_in.folk_arr[a].append((p1_delta, p2_delta))\n parent_in.folk_indexes[a].append((i,j))\n else:\n print(\"Deltas do not exist for coordinates [{},{}]\".format(str(i), str(j)))\n\ndef find_discount_shift(parent_in, i_in, j_in, brne_in):\n c_p1 = i_in\n c_p2 = j_in\n\n d_p1 = brne_in[parent_in.p1_index]\n d_p2 = brne_in[parent_in.p2_index]\n\n c_eq_p1 = parent_in.matrix[c_p1][c_p2][parent_in.p1_index]\n atck_p1 = parent_in.matrix[d_p1][c_p2][parent_in.p1_index]\n d_eq_p1 = parent_in.matrix[d_p1][d_p2][parent_in.p1_index]\n\n # Infinite summation formula\n p1_delta = symbols('d1')\n exprC1 = c_eq_p1/(1-p1_delta)\n exprD1 = atck_p1 + (d_eq_p1*p1_delta)/(1-p1_delta)\n \n p1_delta_expr = solve(Eq(exprC1, exprD1), p1_delta)\n if(bool(p1_delta_expr )):\n p1_delta_solution = round(float(p1_delta_expr[0]),2)\n else:\n print(\"[Undefined p1 delta solution]\")\n\n c_eq_p2 = parent_in.matrix[c_p1][c_p2][parent_in.p2_index]\n atck_p2 = parent_in.matrix[c_p1][d_p2][parent_in.p2_index]\n d_eq_p2 = parent_in.matrix[d_p1][d_p2][parent_in.p2_index]\n\n # Infinite summation formula\n p2_delta = symbols('d2')\n exprC2 = c_eq_p2/(1-p2_delta)\n exprD2 = atck_p2 + (d_eq_p2*p2_delta)/(1-p2_delta)\n\n p2_delta_expr = solve(Eq(exprC2, exprD2), p2_delta)\n if(bool(p2_delta_expr)):\n p2_delta_solution = round(float(p2_delta_expr[0]),2)\n else:\n print(\"[Undefined p2 delta solution]\")\n print(\"atck_p1:\",atck_p1)\n print(\"atck_p2:\",atck_p2)\n\n\n return p1_delta_solution, p2_delta_solution\n\n### Logic GUI ###\n\ndef show_payoffs(parent_in, canvas_in, p1_br, p2_br):\n # super duper ineffienct nested for-looping, \n # use numpy functions later for optimization\n parent_in.initH_offset = parent_in.top+parent_in.boxlen/2\n parent_in.initW_offset = parent_in.left+parent_in.boxlen/2\n digscA = 0\n digscB = 0\n for i in range(parent_in.rows):\n for j in range(parent_in.cols):\n p1_font = \"\"\n p2_font = \"\"\n coord_x = parent_in.initW_offset+(parent_in.boxlen*(j))\n coord_y = parent_in.initH_offset+(parent_in.boxlen*(i))\n digscA = 0\n digscB = 0\n if (p1_br[i][j]):\n if (parent_in.matrix[i][j][parent_in.p1_index] >= 100):\n digscA = 2\n elif (parent_in.matrix[i][j][parent_in.p1_index] >= 10):\n digscA = 1\n\n # Payoff highlighting - P1 RED\n canvas_in.create_rectangle(\n coord_x-parent_in.poh-parent_in.poh*0.50-digscA*parent_in.poh*0.4 +(parent_in.rows-2)*parent_in.poh*0.00, \n coord_y-parent_in.poh*0.6, \n coord_x-parent_in.poh+parent_in.poh*0.5 +digscA*parent_in.poh*0.25 -(parent_in.cols-2)*parent_in.poh*0.05, \n coord_y+parent_in.poh*0.6, \n fill= cd.mute_red)\n p1_font = cd.paybold_font\n else:\n p1_font = cd.payoff_font\n if (p2_br[i][j]):\n if (parent_in.matrix[i][j][parent_in.p2_index] >= 100):\n digscB = 2\n elif (parent_in.matrix[i][j][parent_in.p2_index] >= 10):\n digscB = 1\n # Payoff highlighting - P2 BLUE\n canvas_in.create_rectangle(\n coord_x+parent_in.poh-parent_in.poh*0.5-digscB*parent_in.poh*0.15 +(parent_in.rows-2)*parent_in.poh*0.0 , coord_y-parent_in.poh*0.6, \n coord_x+parent_in.poh+parent_in.poh*0.5+digscB*parent_in.poh*0.55 -(parent_in.cols-2)*parent_in.poh*0.05, coord_y+parent_in.poh*0.6, \n fill= cd.mute_blue)\n p2_font = cd.paybold_font\n else:\n p2_font = cd.payoff_font\n\n # Find NEs\n if (p1_br[i][j] and p2_br[i][j]):\n tuple_check = (i,j)\n tuple_bool = False\n br_color = \"\"\n for a in parent_in.BRNE:\n if a == tuple_check:\n tuple_bool = True\n if tuple_bool:\n br_color = cd.rich_yellow\n else:\n br_color = cd.pale_yellow\n canvas_in.create_rectangle(\n coord_x-3.5*parent_in.poh +(parent_in.rows-2)*parent_in.poh*0.8, coord_y-parent_in.poh*1.5, \n coord_x+3.5*parent_in.poh -(parent_in.cols-2)*parent_in.poh*0.8, coord_y+parent_in.poh*1.5, \n outline= br_color, width = 3)\n canvas_in.create_text(coord_x-parent_in.poh-digscA*parent_in.poh*0.1, coord_y, \n text=parent_in.matrix[i][j][0], fill=\"black\", font=p1_font)\n canvas_in.create_text(coord_x, coord_y, \n text=',', fill=\"black\", font=(cd.payoff_font))\n canvas_in.create_text(coord_x+parent_in.poh+digscB*parent_in.poh*0.2, coord_y, \n text=parent_in.matrix[i][j][1], fill=\"black\", font=p2_font)\n\ndef draw_alt_paretos(parent_in, canvas_in, i_in, j_in):\n coord_x = parent_in.initW_offset+(parent_in.boxlen*(j_in))\n coord_y = parent_in.initH_offset+(parent_in.boxlen*(i_in))\n canvas_in.create_rectangle(\n coord_x-3.5*parent_in.poh +(parent_in.rows-2)*parent_in.poh*0.8, coord_y-parent_in.poh*1.5, \n coord_x+3.5*parent_in.poh -(parent_in.cols-2)*parent_in.poh*0.8, coord_y+parent_in.poh*1.5, \n outline= \"lime green\", width = 2)\n\ndef draw_delta_label(parent_in, subcan_in, i_in, j_in, br_i_in, br_j_in, p1_delta, p2_delta):\n coord_x1 = parent_in.initW_offset+(parent_in.boxlen *(j_in))\n coord_x2 = parent_in.initW_offset+(parent_in.boxlen *(br_j_in))\n coord_y1 = parent_in.initH_offset+(parent_in.boxlen * (br_i_in))\n coord_y2 = parent_in.initH_offset+(parent_in.boxlen *(i_in))\n # P1-delta\n subcan_in.create_text(\n coord_x1, coord_y1+parent_in.offset*1.5, \n text = \"d1: \"+str(p1_delta), fill=\"green\", font=(cd.delta_font))\n # h-v\n bl = parent_in.boxlen\n subcan_in.create_line(coord_x1 +bl*0.3, coord_y1, coord_x2 -bl*0.3, coord_y1, fill=\"lime green\", width ='2',arrow=tk.LAST, dash=(3,5))\n subcan_in.create_line(coord_x1, coord_y2 +bl*0.25, coord_x1, coord_y1 -bl*0.25, fill=\"lime green\", width ='2',arrow=tk.LAST, dash=(3,5))\n \n # P2-delta\n subcan_in.create_text(\n coord_x2, coord_y2+parent_in.offset*1.5, \n text = \"d2: \"+str(p2_delta), fill=\"green\", font=(cd.delta_font))\n # v-h\n subcan_in.create_line(coord_x2, coord_y2 +bl*0.25, coord_x2, coord_y1 -bl*0.25, fill=\"lime green\", width ='2',arrow=tk.LAST, dash=(3,5))\n subcan_in.create_line(coord_x1 +bl*0.3, coord_y2, coord_x2 -bl*0.3, coord_y2, fill=\"lime green\", width ='2',arrow=tk.LAST, dash=(3,5))\n\ndef gen_BR_grid(parent_in, match_p1, match_p2, rep_bool):\n subroot = tk.Tk()\n subcan = Canvas(subroot, bg='white')\n cg.create_matrix_grid(parent_in, subroot, subcan)\n cg.gen_labels(parent_in, subcan)\n show_payoffs(parent_in, subcan, match_p1, match_p2)\n \n\n # folk_arr = list of pareto efficient tuples: [(0.3,0.7), (0.2,0.8)] \n # folk_indexes = list of pareto efficient tuples: [(2,2), (1,2)]\n if (rep_bool):\n # detected folk coordinates [green box]\n for a, ae in enumerate(parent_in.folk_indexes[0]):\n i = ae[0]\n j = ae[1] \n \n # parent_in.BRNE: [(0,0), (1,1)]\n # our BRNE coordinates [yellow box]\n for c, ce in enumerate(parent_in.BRNE):\n p1_delta = parent_in.folk_arr[0][a][0]\n p2_delta = parent_in.folk_arr[0][a][1]\n br_i = ce[0]\n br_j = ce[1]\n draw_alt_paretos(parent_in, subcan, i, j)\n draw_delta_label(parent_in, subcan, i, j, br_i, br_j, p1_delta, p2_delta)\n \n #subcan.create_text(parent_in.cenh, parent_in.top-100, text = \"delta: \"+str(parent_in.delta_solution), font=(cd.label_font))\n gen_payoff_buttons(parent_in, subroot, subcan)\n subroot.mainloop()\n\ndef gen_payoff_buttons(parent_in, root, canvas):\n quit_btn = tk.Button(root, text=\"Exit\", bg = cd.lite_ornge, command=root.destroy, width = 6*int(parent_in.boxlen/20), height = 5)\n canvas.create_window(parent_in.cenv, parent_in.bot + 1.5 *(parent_in.boxlen/4), window=quit_btn)\n\n\n","repo_name":"willruiz/gametheory","sub_path":"RNFG/crep_logic.py","file_name":"crep_logic.py","file_ext":"py","file_size_in_byte":12424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13189755440","text":"\"\"\"\nmavsim_python\n - Chapter 5 assignment for Beard & McLain, PUP, 2012\n - Last Update:\n 2/2/2019 - RWB\n\"\"\"\nimport sys\nsys.path.append('..')\nimport numpy as np\nimport parameters.simulation_parameters as SIM\n\nfrom chap2.mav_viewer import MavViewer\nfrom chap3.data_viewer import DataViewer\nfrom chap4.mav_dynamics import MavDynamics\nfrom chap4.wind_simulation import WindSimulation\nfrom chap5.trim import compute_trim\nfrom chap5.compute_models import compute_model\nfrom tools.signals import Signals\n\n# initialize the visualization\nVIDEO = False # True==write video, False==don't write video\nmav_view = MavViewer() # initialize the mav viewer\ndata_view = DataViewer() # initialize view of data plots\nif VIDEO is True:\n from chap2.video_writer import VideoWriter\n video = VideoWriter(video_name=\"chap5_video.avi\",\n bounding_box=(0, 0, 1000, 1000),\n output_rate=SIM.ts_video)\n\n# initialize elements of the architecture\nwind = WindSimulation(SIM.ts_simulation)\nmav = MavDynamics(SIM.ts_simulation)\n\n# use compute_trim function to compute trim state and trim input\nVa = 25.\ngamma = 0.*np.pi/180.\ntrim_state, trim_input = compute_trim(mav, Va, gamma)\nmav._state = trim_state # set the initial state of the mav to the trim state\ndelta = trim_input # set input to constant constant trim input\n\n# # compute the state space model linearized about trim\ncompute_model(mav, trim_state, trim_input)\n\n# this signal will be used to excite modes\ninput_signal = Signals(amplitude=.05,\n duration=0.01,\n start_time=2.0)\n\n# initialize the simulation time\nsim_time = SIM.start_time\n\n# main simulation loop\nprint(\"Press Command-Q to exit...\")\nwhile sim_time < SIM.end_time:\n\n # -------physical system-------------\n #current_wind = wind.update() # get the new wind vector\n current_wind = np.zeros((6, 1))\n # this input excites the phugoid mode by adding an impulse at t=5.0\n # delta.elevator += input_signal.impulse(sim_time)\n # delta.rudder += input_signal.doublet(sim_time)\n mav.update(delta, current_wind) # propagate the MAV dynamics\n\n # -------update viewer-------------\n mav_view.update(mav.true_state) # plot body of MAV\n data_view.update(mav.true_state, # true states\n mav.true_state, # estimated states\n mav.true_state, # commanded states\n delta, # input to aircraft\n SIM.ts_simulation)\n if VIDEO is True:\n video.update(sim_time)\n\n # -------increment time-------------\n sim_time += SIM.ts_simulation\n\nif VIDEO is True:\n video.close()\n\n\n\n\n","repo_name":"randybeard/mavsim_public","sub_path":"legacy_mavsim_python/chap5/mavsim_chap5.py","file_name":"mavsim_chap5.py","file_ext":"py","file_size_in_byte":2671,"program_lang":"python","lang":"en","doc_type":"code","stars":294,"dataset":"github-code","pt":"81"} +{"seq_id":"187739972","text":"#a function for creating a new funtest\ndef nowy_test(nazwa=\"\"):\n licznik = 0\n if nazwa == \"\":\n nazwa = input(\"podaj nazwe testu(pliku)\")+\".txt\"\n plik = open(nazwa, \"a\", encoding=\"utf-8\")\n while True:\n a = 0\n pytanie_nowe = input(\"wpisz tekst swojego pytania, 'k' by zakonczyc\")\n if pytanie_nowe == \"k\":\n if licznik > 0:\n break\n else:\n print(\"nie ma jeszcze żadnych pytań\")\n continue\n elif len(pytanie_nowe) < 3:\n print(\"zadaj prawdziwe pytanie!\")\n continue\n else:\n plik.write(f\"{pytanie_nowe}\\n\")\n while a < 15:\n answer = input(f\"podaj wariant {warianty[a]}) lub wcisnij 'k' jesli to wszystkie warianty\")\n if answer == \"k\":\n if a > 1:\n while True:\n poprawna = input(\"podaj poprawna odpowiedz\")\n if poprawna in warianty[:a] and len(poprawna) == 1:\n plik.write(f\"{poprawna}\\n\")\n break\n else:\n print(\"podaj literkę poprawnej odpowiedzi\")\n break\n else:\n print(\"podaj przynajmniej 2 odpowiedzi\")\n continue\n elif len(answer) == 0:\n print(\"podaj prawdziwy wariant!\")\n continue\n else:\n plik.write(f\"{warianty[a]}) {answer}\\n\")\n a += 1\n licznik += 1\n plik.close()\n\n\nwarianty = \"abcdefghijklmno\" # a string for options\nchwilowa = [] # initiating a variable for appending a list\n\n# main program loop\nwhile True:\n\n pytania = [] # a list for questions, options and answers\n\n print(\"witaj w FUNteście\")\n o_kim = input(\"wpisz nazwę testu(pliku) lub naciśnij 'n' by stworzyć nowy test\\n\")+\".txt\"\n if o_kim == \"n.txt\":\n nowy_test()\n continue\n try:\n f = open(o_kim, \"r\", encoding=\"utf-8\")\n pass\n except IOError:\n nowy = input(\"nie ma takiego pliku, czy chciałbyś/chciałabyś stworzyć funtest o takiej nazwie? ('t')\")\n if nowy == \"t\":\n nowy_test(nazwa=o_kim)\n continue\n\n f1 = f.readlines()\n for x in f1:\n if x != \"\":\n chwilowa.append(x.strip())\n if len(x) == 2:\n chwilowa[-1] = x[0]\n pytania.append(chwilowa)\n chwilowa = []\n f.close()\n\n # a loop for answering the test\n while True:\n imie = input(\"podaj swoje imie by zacząć\\n\")\n numer = 0 # a variable holding the number of the question\n wynik = 0 # a variable holding the score of the user\n zle_odpowiedzi = [] # a list of wrong answers\n highscores = [] # a list for highscores\n\n for pytanie in pytania:\n numer += 1\n for x in pytanie[:-1]:\n print(x)\n odpowiedz = input().lower()\n while odpowiedz not in warianty[:len(pytanie)-2] or len(odpowiedz) != 1:\n print(\"nie ma takiej odpowiedzi\")\n for x in pytanie[:-1]:\n print(x)\n odpowiedz = input().lower()\n if odpowiedz == pytanie[-1]:\n wynik += 1\n else:\n zle_odpowiedzi.append([numer, odpowiedz])\n\n print(f\"Twój wynik to {wynik}pkt\", \"BRAWO!!!\" if wynik/len(pytania) > 0.75 else \"cienko :/\")\n high = imie + \"-\" + str(wynik) + \"\\n\"\n with open(f\"highscores.{o_kim}\", \"a\", encoding=\"utf-8\") as f:\n f.write(high)\n\n with open(f\"highscores.{o_kim}\", \"r+\", encoding=\"utf-8\") as f:\n f1 = f.readlines()\n for x in f1:\n name, score = x.split(sep=\"-\")\n score = int(score)\n highscores.append([name, score])\n highscores.sort(key=lambda s: s[1], reverse=True)\n\n f = open(f\"highscores.{o_kim}\", \"w\")\n f.close()\n\n with open(f\"highscores.{o_kim}\", \"r+\", encoding=\"utf-8\") as f:\n for x in highscores:\n f.write(f\"{x[0]}-{x[1]}\\n\")\n\n while True:\n co_dalej = input(\"nacisnij 'z' by poznac swoje bledne odpowiedzi, 'q' zeby wyjsc z programu,\"\n \" 'd' by wyswietlic dobre odpowiedzi,\\n'h' by wyswietlic najwyzsze wyniki,\"\n \" 'n' by sprobowac jeszcze raz, cokolwiek innego by wrócić\\n\")\n if co_dalej == \"q\":\n exit()\n elif co_dalej == 'z':\n for x in zle_odpowiedzi:\n print(x[0], \"-\", x[1])\n elif co_dalej == 'd':\n for x in range(numer-1):\n print(x+1, \"-\", pytania[x][-1])\n elif co_dalej == 'h':\n najdluzszy = len(highscores[0][0])\n for x in highscores:\n dlugosc = len(x[0])\n if dlugosc > najdluzszy:\n najdluzszy = dlugosc\n for x in highscores:\n dlugosc = len(x[0])\n if dlugosc > 20:\n x[0] = x[0][:18]+\"(...)\"\n dlugosc = 23\n print(f\"{x[0]}{'-' * (23 - dlugosc)}-{x[1]}\")\n else:\n break\n if co_dalej == \"n\":\n continue\n break\n","repo_name":"Jelowpat/Patryk-Jelowicki","sub_path":"projects_from_infoshare_course/funtests/funtests.py","file_name":"funtests.py","file_ext":"py","file_size_in_byte":5677,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74080580104","text":"from collections import Counter\nimport printer\nimport random\nimport re\nimport sys\n\n# Use the correct input for python 2 and 3\nif sys.version_info[0] < 3:\n string_input = raw_input\nelse:\n string_input = input\n\n\nclass Game:\n \"\"\"The game object that holds all of the gameplay logic and vars\"\"\"\n # global vars (stats)\n _totalGamesHuman = 0\n _totalWinsHuman = 0\n _totalGuessesHuman = 0\n _totalGamesPC = 0\n _totalWinsPC = 0\n _totalGuessesPC = 0\n\n # init new game\n #Defaults to game of 10 guesses, list of RGBWYO and 4-digit code\n def __init__(self, codeLength=4, maxGuesses=10, symbolList=None):\n if symbolList is None:\n self.symbolList = ['R', 'G', 'B', 'W', 'Y', 'O']\n else:\n self.symbolList = symbolList\n self.codeLength = codeLength\n self.maxGuesses = maxGuesses\n self.solution = ''\n # using a tuple (\"RGBY\", (1,2)) -> (code, (blackPegs, whitePegs))\n self.guesses = []\n self.didWin = False\n self.printer = printer.Printer(self)\n\n # compare two codes and get number of white & black pegs\n @staticmethod\n def compareCodes(code1, code2):\n # Gets a count for each color in each code\n code1ColorCount = Counter(code1)\n code2ColorCount = Counter(code2)\n # Gets a count for the colors in both\n overlap = code1ColorCount & code2ColorCount\n # Total pegs tot be shown\n totalPegs = sum(overlap.values())\n # Calculate number of black pegs\n blackPegs = 0\n for i in range(0, len(code1)):\n if (code1[i] == code2[i]):\n blackPegs += 1\n # Calculate number of white pegs\n whitePegs = totalPegs - blackPegs\n return blackPegs, whitePegs\n\n # validate that a given code is in the solution space\n def validateCode(self, code):\n reString = ('[' + (''.join(self.symbolList)) + ']{'\n + str(self.codeLength) + '}\\Z')\n if re.match(reString, code):\n return True\n return False\n\n #these must be implemented by Game's children\n def endGame(self, didWin):\n raise NotImplementedError(printer.Printer.subclassError)\n\n def generateSolution(self):\n raise NotImplementedError(printer.Printer.subclassError)\n\n def makeGuess(self):\n raise NotImplementedError(printer.Printer.subclassError)\n\n\nclass UserGame(Game):\n \"\"\"Child of the Game class for user games (codebreaker mode).\"\"\"\n def __init__(self, codeLength=4, maxGuesses=10, symbolList=None):\n Game.__init__(self, codeLength, maxGuesses, symbolList)\n\n # Generate a random solution\n def generateSolution(self):\n codeString = \"\"\n for i in range(0, self.codeLength):\n codeString += random.choice(self.symbolList)\n # print(codeString)\n self.solution = codeString\n\n # Logic for user making a guess\n def makeGuess(self):\n Game._totalGuessesHuman += 1\n codeGuess = string_input(printer.Printer.codeInputHuman).upper()\n #Validates code, if H print a hint otherwise reprompt\n while not self.validateCode(codeGuess):\n if(codeGuess == 'H'):\n self.printer.printGame()\n print(printer.Printer.hintText + self.solution[random.randint(0,self.codeLength - 1)])\n codeGuess = string_input(printer.Printer.codeInputHuman).upper()\n else:\n codeGuess = string_input(printer.Printer.invalidInputHuman).upper()\n #Compares the user code to the solution and return white/black pegs\n blackPegs, whitePegs = Game.compareCodes(codeGuess, self.solution)\n #Adds the guessed string and corresponding white and black pegs to the total list\n self.guesses.append((codeGuess, (blackPegs, whitePegs)))\n #If the code is correct return true to end the current game\n if blackPegs is self.codeLength:\n return True\n return False\n\n # Check to see if the game has been won\n def endGame(self, didWin):\n self.didWin = didWin\n Game._totalGamesHuman += 1\n if(didWin):\n print(printer.Printer.winTextHuman)\n Game._totalWinsHuman += 1\n else:\n print(printer.Printer.loseTextHuman + self.solution)\n string_input(printer.Printer.continueText)\n\n\nclass ComputerGame(Game):\n \"\"\"Child of the Game class for computer games (mastermind mode).\"\"\"\n def __init__(self, codeLength=4, maxGuesses=10, symbolList=None):\n Game.__init__(self, codeLength, maxGuesses, symbolList)\n self.possibleCode = self.symbolList[:]\n self.guessSoFar = \"\"\n\n # Get the solution from the user\n def generateSolution(self):\n #Reprompts until valid input\n codeSolution = string_input(printer.Printer.codeInputPC).upper()\n while not self.validateCode(codeSolution):\n codeSolution = string_input(printer.Printer.invalidInputPC).upper()\n self.solution = codeSolution\n\n # Take a guess as the computer\n def makeGuess(self):\n #Increments totalguesses by the PC\n Game._totalGuessesPC += 1\n #If the guess hasnt figured out all the colors yet\n if len(self.guessSoFar) < self.codeLength:\n guess = self.guessSoFar\n #Creates a random value and populates the correct portion of the array\n randVal = random.choice(self.possibleCode)\n guess += randVal * (self.codeLength - len(self.guessSoFar))\n #Removes the last random value as it will not be guessed again\n self.possibleCode.remove(randVal)\n #Compares the code if black is equal to the code length then the computer guessed it\n blackPegs, whitePegs = self.compareCodes(self.solution, guess)\n self.guesses.append((guess, (blackPegs, whitePegs)))\n if blackPegs == self.codeLength:\n return True\n #Sums the ticks adding the correct amount to the guess so far\n #IE; two ticks on RRRR would add RR to the guess so far\n sumTicks = blackPegs + whitePegs\n self.guessSoFar += randVal * (sumTicks - len(self.guessSoFar))\n #When guess has a list of all the colors\n else:\n #Shuffles the guess and then compares, if all black computer won\n guessList = list(self.guessSoFar)\n random.shuffle(guessList)\n guess = ''.join(guessList)\n blackPegs, whitePegs = self.compareCodes(guess, self.solution)\n self.guesses.append((guess, (blackPegs, whitePegs)))\n if blackPegs == self.codeLength:\n return True\n string_input(printer.Printer.continueText)\n return False\n\n # Check if the game is over\n def endGame(self, didWin):\n self.didWin = didWin\n Game._totalGamesPC += 1\n if didWin:\n print(printer.Printer.winTextPC)\n Game._totalWinsPC += 1\n else:\n print(printer.Printer.loseTextPC + self.solution)\n string_input(printer.Printer.continueText)\n\n\n# self test\ndef test():\n uGame = UserGame()\n uGame.generateSolution()\n print(uGame.validateCode(uGame.solution))\n print(\"Code Length: \" + str(uGame.codeLength))\n print(\"Max Guesses: \" + str(uGame.maxGuesses))\n print(\"Symbols: \" + str(uGame.symbolList))\n\n\nif (__name__ == \"__main__\"):\n test()\n","repo_name":"Kokopelli13/mastermind","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":7414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21014201569","text":"import math\nprint(\"Kalkulator równania kwadratowego\")\na=int(input(\"Podaj a:\"))\nb=int(input(\"Podaj b:\"))\nc=int(input(\"Podaj c:\"))\nif a==0:\n print(\"To nie jest równanie kwadratowe, podaj inne a.\")\nelse:\n delta=b*b-4*a*c\n if delta>0:\n delta=math.sqrt(delta)\n x1=(-b-delta)/(2*a)\n x2=(-b+delta)/(2*a)\n print(\"Są dwa pierwiastki równania:\",x1,x2)\n else:\n if delta==0:\n x0=(-b)/(2*a)\n print(\"Jest jeden pierwiastek równania:\",x0)\n else:\n print(\"Równanie nie ma pierwiastków.\")\n","repo_name":"MalgorzataMarucha/jsp2021","sub_path":"Lista3/Zadanie3.py","file_name":"Zadanie3.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22261519423","text":"class Solution:\n def moveZeroes(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: void Do not return anything, modify nums in-place instead.\n \"\"\"\n i = 0\n j = 0\n for i in range(0, len(nums)):\n if nums[i] != 0:\n nums[i], nums[j] = nums[j], nums[i]\n j += 1\n\n def moveZeroes2(self, nums):\n i = 0\n for num in nums:\n if num != 0:\n nums[i] = num\n i += 1\n for j in range(i, len(nums)):\n nums[j] = 0\n\n\ns = Solution()\nnums = [0, 1, 0, 3, 12]\nnums2 = [4, 2, 4, 0, 0, 3, 0, 5, 1, 0]\ns.moveZeroes2(nums2)\nprint(nums2)\n","repo_name":"GuanzhouSong/Leetcode_Python","sub_path":"Leetcode/283. Move Zeroes.py","file_name":"283. Move Zeroes.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26767936720","text":"from django.db import models\n\n\nclass tb_Pessoa(models.Model):\n pes_id = models.IntegerField('id', primary_key=True)\n pes_nome = models.CharField('Nome', max_length=90)\n pes_contato = models.BigIntegerField('Contato')\n\n\nclass tb_Projeto(models.Model):\n prj_id = models.IntegerField('id', primary_key=True)\n prj_nome = models.CharField('Nome', max_length=60)\n prj_escopo = models.CharField('Escopo', max_length=119)\n prj_datainicio = models.DateField('Data Inicio')\n prj_prazoentrega = models.DateField('Prazo de Entrega')\n prj_color = models.CharField('Cor', max_length=60, default='')\n\n\nclass tb_Tarefa(models.Model):\n trf_id = models.IntegerField('id', primary_key=True)\n trf_name = models.CharField('nome', max_length=120, default='')\n trf_datainicial = models.DateField('Data Inicial')\n trf_datafinal = models.DateField('Data Final', null=True)\n trf_prazo = models.DateField('Prazo')\n trf_interdependencia = models.IntegerField('Interdenpencia', null=True, default=0)\n trf_entregavel = models.BooleanField('Entregavel', default=False)\n trf_color = models.CharField('cor', max_length=16, default='')\n fk_prj_id = models.ForeignKey(tb_Projeto, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.trf_name\n\n\nclass tb_Dev_Trf(models.Model):\n fk_pes_id = models.ForeignKey(tb_Pessoa, on_delete=models.CASCADE)\n fk_trf_id = models.ForeignKey(tb_Tarefa, on_delete=models.CASCADE)\n fk_prj_id = models.ForeignKey(tb_Projeto, on_delete=models.CASCADE)\n class Meta:\n unique_together = ((\"fk_pes_id\", \"fk_trf_id\", 'fk_prj_id'),)\n\nclass tbTeste(models.Model):\n teste = models.CharField(max_length=1)\n\n\n# Create your models here.\n","repo_name":"JessicaIsri/GANTT-PLANNER","sub_path":"pi/gantt/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4431064414","text":"\"\"\"\nDISCLAIMER:\nThis algorithm (finding all paths in graph) is very inefficient for\nbig problems set.\nYou can think of it like about Brute Force algorithm with very bad\ntime complexity O(2^n).\n\nThis is a classical problem which shows you how to use dynamic programming.\nThis concept is a core component of many optimisation tasks\n\"\"\"\n\n\"\"\"\nConsider a tuple of tuples in which the first tuple has one integer and each\nconsecutive tuple has one more integer then the last. Such a tuple of tuples\nwould look like a triangle. You should write a program that will help Stephan\nfind the highest possible sum on the most profitable route down the pyramid.\nAll routes down the pyramid involve stepping down and to the left or down and\nto the right.\n\nTips: Think of each step down to the left as moving to the same index location\nor to the right as one index location higher. Be very careful if you plan to\nuse recursion here.\n\nFor example we have pyramids like:\n\n(1,),\n(2, 3),\n(3, 3, 1),\n(3, 1, 5, 4),\n(3, 1, 3, 1, 3),\n(2, 2, 2, 2, 2, 2),\n(5, 6, 4, 5, 6, 4, 3)\n\nMaximum gold count here is 23, and path is:\n(0,0) 1 -> (1,1) 3 -> (2,1) 3 -> (3,2) 5 -> (4,2) 3 -> (5,3) 2 -> (6,4) 6\nand it takes 23\n\"\"\"\n\n\n_ROOT, _DEPTH, _BREADTH = range(3)\n\n\nclass Node:\n def __init__(self, identifier):\n self.__identifier = identifier\n self.__children = []\n\n @property\n def identifier(self):\n return self.__identifier\n\n @property\n def children(self):\n return self.__children\n\n def add_child(self, identifier):\n self.__children.append(identifier)\n\n def __str__(self):\n return str(self.identifier)\n\n def __repr__(self):\n return str(self.identifier)\n\n\nclass Tree:\n def __init__(self):\n self.__nodes = {}\n\n @property\n def nodes(self):\n return self.__nodes\n\n def add_node(self, identifier, parent=None):\n node = Node(identifier)\n self[identifier] = node\n\n if parent is not None:\n self[parent].add_child(identifier)\n\n return node\n\n def display(self, identifier, depth=_ROOT):\n children = self[identifier].children\n if depth == _ROOT:\n print(\"{0}\".format(identifier))\n else:\n print(\"\\t\"*depth, \"{0}\".format(identifier))\n\n depth += 1\n for child in children:\n self.display(child, depth) # recursive call\n\n def __getitem__(self, key):\n return self.__nodes[key]\n\n def __setitem__(self, key, item):\n self.__nodes[key] = item\n\n\ndef find_all_paths(graph, start, end, path=[]):\n \"\"\"\n Algorithm implementation idea taken from\n https://www.python.org/doc/essays/graphs/\n\n It finds all paths between start and end node in graph\n :param graph: tree like:\n (0, 0)\n\t (1, 0)\n\t\t (2, 0)\n\t\t\t (3, 0)\n\t\t\t (3, 1)\n\t\t (2, 1)\n\t\t\t (3, 1)\n\t\t\t (3, 2)\n\n :param start: root node e.g.; (0, 0)\n :param end: end node e.g.: (3, 2)\n :param path:\n :return: list of all paths between start and end node in graph\n \"\"\"\n path = path + [start]\n if start == end:\n return [path]\n if not start in graph.nodes:\n return []\n paths = []\n for node in graph[start].children:\n if node not in path:\n new_paths = find_all_paths(graph, node, end, path)\n for newpath in new_paths:\n paths.append(newpath)\n return paths\n\n\ndef test_count_gold(pyramid):\n \"\"\"\n ((1,),golden_pyramid\n (2, 3),\n (3, 3, 1),\n (3, 1, 5, 4),\n \"\"\"\n\n tree = Tree()\n tree.add_node((0,0)) # root node\n tree.add_node((1,0), (0,0))\n tree.add_node((1,1), (0,0))\n tree.add_node((2,0), (1,0))\n tree.add_node((2,1), (1,0))\n tree.add_node((2,1), (1,1))\n tree.add_node((2,2), (1,1))\n tree.add_node((3,0), (2,0))\n tree.add_node((3,1), (2,0))\n tree.add_node((3,1), (2,1))\n tree.add_node((3,1), (2,2))\n tree.add_node((3,2), (2,1))\n tree.add_node((3,2), (2,2))\n tree.add_node((3,3), (2,2))\n\n tree.display((0,0))\n endpoints = [(3,1), (3,2), (3,3)]\n max_gold = []\n for endpoint in endpoints:\n paths = find_all_paths(tree, (0,0), endpoint)\n values = []\n for path in paths:\n values.append([pyramid[node[0]][node[1]] for node in path])\n\n max_gold_path = [sum(path) for path in values]\n max_gold.append(max(max_gold_path))\n\n print(\"MAX GOLD: \", max(max_gold))\n\n\ndef count_gold(pyramid):\n\n root_row_idx, root_col_idx = 0, 0\n root = (root_row_idx, root_col_idx)\n\n # create Tree with root (0, 0)\n tree = Tree()\n tree.add_node(root) # root node\n\n # list of roots: [((0,0)), ((1,0), (1,1), (1, 2)) ,,,((n, n+1),(n, n+2))]\n root_col_idxs = [(row_idx, range(len(row)))\n for row_idx, row in enumerate(pyramid[:-1])]\n\n for row_idx, col_idxs in root_col_idxs: # iterate over list of roots\n for col_idx in col_idxs:\n root = (row_idx, col_idx)\n\n # find children nodes\n next_row = min(row_idx + 1, len(pyramid) - 1)\n next_col = min(col_idx + 1, len(pyramid[next_row]) - 1)\n\n # create new node in tree\n [tree.add_node(node, root) for node in\n set([(next_row, col_idx),\n (next_row, next_col)])]\n\n # uncomment this to print tree\n #tree.display((0,0))\n\n # find tree endpoints (tree leaves)\n endpoints = [(len(pyramid[-1]) - 1, col)\n for col in range(len(pyramid[-1]))]\n\n max_gold = []\n for endpoint in endpoints: # iterate over endpoints\n # find all possible paths in tree (from root to given endpoint)\n paths = find_all_paths(tree, (0, 0), endpoint)\n\n # translate node coordinates to node value (values list)\n # for example from (0, 0) to 1\n values = []\n for path in paths:\n values.append([pyramid[node[0]][node[1]] for node in path])\n\n # sum all values for given path\n max_gold_path = [sum(path) for path in values]\n\n # add maximum value for path to max_gold list\n max_gold.append(max(max_gold_path))\n\n print(\"MAX GOLD: \", max(max_gold))\n return max(max_gold)\n\n\nif __name__ == '__main__':\n\n test_count_gold((\n (1,),\n (2, 3),\n (3, 3, 1),\n (3, 1, 5, 4)))\n\n assert count_gold((\n (1,),\n (2, 3),\n (3, 3, 1),\n (3, 1, 5, 4),\n (3, 1, 3, 1, 3),\n (2, 2, 2, 2, 2, 2),\n (5, 6, 4, 5, 6, 4, 3)\n )) == 23, \"First example\"\n assert count_gold((\n (1,),\n (2, 1),\n (1, 2, 1),\n (1, 2, 1, 1),\n (1, 2, 1, 1, 1),\n (1, 2, 1, 1, 1, 1),\n (1, 2, 1, 1, 1, 1, 9)\n )) == 15, \"Second example\"\n assert count_gold((\n (9,),\n (2, 2),\n (3, 3, 3),\n (4, 4, 4, 4)\n )) == 18, \"Third example\"\n\n assert count_gold((\n [2],\n [7, 9],\n [0, 8, 6],\n [4, 7, 6, 8],\n [0, 5, 5, 4, 1],\n [9, 1, 0, 1, 6, 9])) == 35, \"last example\"\n\n\n \"\"\"\n This algorithm is very inefficient for big problems set like this\n last pyramid - so processing may take a while ...\n \"\"\"\n \"\"\"\n assert count_gold((\n [4],\n [1, 7],\n [9, 9, 7],\n [4, 9, 9, 3],\n [3, 5, 3, 7, 5],\n [1, 7, 5, 3, 5, 6],\n [6, 5, 5, 8, 3, 3, 3],\n [6, 8, 6, 8, 7, 3, 7, 5],\n [7, 9, 9, 1, 6, 8, 7, 5, 9],\n [2, 8, 2, 5, 5, 5, 2, 5, 7, 8],\n [1, 3, 5, 2, 4, 5, 3, 5, 1, 1, 6],\n [8, 6, 1, 1, 3, 4, 7, 5, 3, 6, 1, 9],\n [5, 8, 6, 6, 2, 6, 9, 3, 7, 4, 6, 9, 9],\n [3, 3, 5, 4, 4, 6, 9, 2, 5, 7, 7, 1, 6, 7],\n [8, 1, 4, 4, 6, 8, 4, 9, 7, 6, 1, 8, 4, 2, 9],\n [6, 5, 8, 6, 8, 3, 2, 4, 8, 8, 1, 5, 6, 8, 8, 7],\n [6, 3, 9, 1, 5, 6, 7, 7, 2, 2, 6, 2, 2, 1, 8, 8, 6],\n [4, 7, 8, 7, 5, 2, 8, 8, 2, 2, 7, 1, 3, 8, 1, 9, 4, 7],\n [1, 7, 8, 1, 4, 3, 8, 6, 6, 9, 6, 3, 5, 4, 7, 6, 4, 5, 6],\n [1, 1, 4, 9, 9, 8, 3, 3, 8, 1, 8, 1, 7, 6, 6, 3, 2, 1, 1, 6],\n )) == 139, \"Veeery big pyramid\"\n \"\"\"","repo_name":"hopsmdev/playground","sub_path":"checkio/home/golden_pyramid_find_paths.py","file_name":"golden_pyramid_find_paths.py","file_ext":"py","file_size_in_byte":8076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24894558844","text":"import sys\nfrom src.grid import Grid\nfrom src.robot import Robot\nfrom src.cleaner_algorithm import dfs\n\nif __name__ == '__main__':\n\n debug = False\n # To print all robot movement\n if len(sys.argv) > 1 and sys.argv[1].startswith('debug'):\n debug = True\n\n grid = Grid([\n [1, 0, 0, 0, 0, 0, 0, 1],\n [0, 0, 0, 1, 0, 0, 0, 0],\n [0, 2, 0, 0, 0, 0, 1, 0],\n [2, 2, 2, 0, 2, 2, 2, 2],\n [0, 1, 0, 3, 0, 1, 0, 1],\n ], debug)\n robot = Robot(grid, debug)\n\n grid.print_room()\n\n # -------------------\n dfs(robot)\n\n # ----------------\n grid.print_room()\n","repo_name":"diegoauyon/robot_cleaner","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41852754542","text":"import re\nfrom dice_classes import NamedDiceCollection, DiceExpressionTerm, DiceBag, Dice\n\nTOKEN_TAKE_HIGHEST = 'h'\nTOKEN_TAKE_LOWEST = 'l'\n\ndef check_string_format(a_string):\n '''\n ok: 1d4\n ok: 2d6+4-3d8h2\n ok: 3d9l1\n !ok: 3d8h9\n !ok: 2d\n '''\n terms = re.split('[+-]', a_string)\n is_valid = next((False for t in terms if not re.match('^(\\d+d\\d+|\\d+d\\d+[hl]{1,1}\\d+|\\d+)$', t,re.IGNORECASE)), True)\n return is_valid\n\ndef compile_dices_re():\n pattern = '([+-]{0,1}\\d+d\\d+(?:[hl]{1,1}\\d+){0,1})'\n return re.compile(pattern, re.IGNORECASE)\n\ndef get_dices_expressions(a_string):\n return compile_dices_re().findall(a_string)\n\ndef get_constants_expressions(a_string):\n cs = compile_dices_re().sub('', a_string)\n pattern = '([+-]*\\d+)'\n compiled_expr = re.compile(pattern, re.IGNORECASE)\n return compiled_expr.findall(cs)\n\ndef calculate_constant(a_string):\n k = 0\n cs = get_constants_expressions(a_string)\n for c in cs:\n if c[0] == '-':\n k -= int(c[1:])\n else:\n k+= int(c)\n return k\n\ndef process_dice_expression(a_string):\n groups = re.findall('([+-]){0,1}(\\d+)(?:d)(\\d+)([hl]\\d+){0,1}', a_string, flags=re.IGNORECASE)[0]\n is_negative = bool(re.match('-', groups[0]))\n ammount = groups[1]\n sides = groups[2]\n take_highest_n = None\n take_lowest_n = None\n if groups[3]:\n if re.match('h', groups[3],flags=re.IGNORECASE):\n take_highest_n = int(groups[3][1:])\n else:\n take_lowest_n = int(groups[3][1:])\n return {\n 'ammount': ammount,\n 'sides': sides,\n 'is_negative': is_negative,\n 'take_highest_n': take_highest_n,\n 'take_lowest_n': take_lowest_n\n }\n\ndef get_dices_terms(a_string):\n terms = []\n for dice_expr in get_dices_expressions(a_string):\n terms.append(process_dice_expression(dice_expr))\n return terms\n\ndef build_dice_expr_terms(expression):\n dice_expression_terms = []\n for dice_term in get_dices_terms(expression):\n ammount = int(dice_term['ammount'])\n sides = int(dice_term['sides'])\n is_negative = dice_term['is_negative']\n take_highest_n = dice_term['take_highest_n']\n take_lowest_n = dice_term['take_lowest_n']\n dices = []\n for i in range(0, ammount):\n dices.append(Dice(sides))\n dice_expression_terms.append(DiceExpressionTerm(DiceBag(dices), is_negative, take_highest_n, take_lowest_n))\n return dice_expression_terms\n\ndef build_named_dice_collection(name, expression):\n constant = calculate_constant(expression)\n d_terms = build_dice_expr_terms(expression)\n return NamedDiceCollection(expression, name, constant, d_terms)\n","repo_name":"francisco-pinchentti/pydice","sub_path":"dice_parser.py","file_name":"dice_parser.py","file_ext":"py","file_size_in_byte":2743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15328517862","text":"from prometheus_client import start_http_server, Summary, Counter , Gauge\nimport random\nimport time\n\n# Create a metric to track time spent and requests made.\nREQUEST_TIME = Summary('request_processing_seconds', 'Time spent processing request')\nc = Counter('my_failures', 'Description of counter')\nd = Gauge('data_objects', 'Number of objects')\n\n\n\n\n# Decorate function with metric.\n@REQUEST_TIME.time()\ndef process_request(t):\n \"\"\"A dummy function that takes some time.\"\"\"\n time.sleep(t)\n c.inc() # Increment by \n c.inc(1.6) # Increment by given value\n my_dict = {\"ruleengine_sftr_gsl\":1, \"ruleengine_sftr_ml\":2}\n d.set_function(lambda: len(my_dict))\n\n\nif __name__ == '__main__':\n # Start up the server to expose the metrics.\n start_http_server(8000)\n # Generate some requests.\n while True:\n process_request(random.random())\n","repo_name":"shegoj/simple-prometheus-exporter","sub_path":"simple_client.py","file_name":"simple_client.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23980730704","text":"'''\n시작시간: 00시 39분\n종료시간: 00시 41분\n'''\n\nt = int(input())\nfor tc in range(1, t + 1):\n a, b = input().split()\n a = a.replace(b, 'a')\n print('#{} {}'.format(tc, len(a)))","repo_name":"Ui-Seok/Solve-Problems","sub_path":"SWEA/D4/3143. 가장 빠른 문자열 타이핑/가장 빠른 문자열 타이핑.py","file_name":"가장 빠른 문자열 타이핑.py","file_ext":"py","file_size_in_byte":196,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9729043628","text":"import math\r\nimport sys\r\n\r\nwith open(\"javaout.txt\") as java:\r\n javaAns = java.readlines()\r\nwith open(\"stout.txt\") as std:\r\n Ans = std.readlines()\r\nsign = False\r\nfor i in range (0, min(len(javaAns), len(Ans))):\r\n if not math.isclose(float(Ans[i]), float(javaAns[i])) :\r\n if sign:\r\n sys.exit(1)\r\n else:\r\n sign=True\r\nsys.exit(0)","repo_name":"WANDY666/OO_2_ACG","sub_path":"comp.py","file_name":"comp.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30780945963","text":"from datetime import timedelta\n\nfrom odoo import fields, models\n\nLOG_ACTIVITY_THROTTLE = 60 # seconds\n\n\nclass ShopinvaderPartner(models.Model):\n _inherit = \"shopinvader.partner\"\n\n first_active_date = fields.Datetime(\n string=\"First Activity on\",\n help=\"Date of the first user activity\",\n readonly=True,\n )\n last_active_date = fields.Datetime(\n string=\"Last Activity on\",\n help=\"Date of the last user activity\",\n readonly=True,\n )\n\n def _log_active_date(self):\n self.ensure_one()\n now = fields.Datetime.now()\n throttle = timedelta(seconds=LOG_ACTIVITY_THROTTLE)\n if not self.last_active_date or self.last_active_date < (now - throttle):\n self.last_active_date = now\n if not self.first_active_date:\n self.first_active_date = self.last_active_date\n","repo_name":"shopinvader/odoo-shopinvader","sub_path":"shopinvader_customer_activity/models/shopinvader_partner.py","file_name":"shopinvader_partner.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","stars":105,"dataset":"github-code","pt":"81"} +{"seq_id":"4569066713","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n\nfrom jinja2 import FileSystemLoader, Environment\n\nloader = FileSystemLoader(\"templates\", encoding=\"utf-8\")\nenvironment = Environment(loader=loader)\ntpl = environment.get_template(\"vars.conf.tpl\")\ndata = {\"ip\" : \"1.1.1.1\"}\noutput = tpl.render(data=data)\nprint(output)","repo_name":"GitJasonseven/learning_jinja2","sub_path":"3-vars.py","file_name":"3-vars.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15642798714","text":"# -*- coding:UTF-8 -*-\n__author__ = \"浮萍\"\n__Date__ = \"2018/7/5\"\n\n\"\"\"\n参考https://cloud.tencent.com/document/product/866/17600\n\"\"\"\n\nimport requests\nimport json\nimport cv2\n\n\ndef getColor(type):\n\tif type == 0:\n\t\treturn (0,0,255)\n\telif type == 1:\n\t\treturn (0,255,255)\n\telif type == 2:\n\t\treturn (255,0,0)\nheaders = {\n 'Authorization': 'xxx',\n}\nurl = 'http://recognition.image.myqcloud.com/ocr/general'\ndata = {\n \"appid\":\"xxx\",\n \"bucket\":\"test\",\n}\n\nap_path='ap_1534406211.png'\nmp_path='mp_1534406211.png'\n\n\nap_files = {'image': open(ap_path, 'rb')}\nmp_files = {'image': open(mp_path, 'rb')}\n\n\nap_res = requests.post(url,data=data, files=ap_files, headers=headers)\nmp_res = requests.post(url,data=data, files=mp_files, headers=headers)\nap_res_json = json.loads(ap_res.text)\nap_items_json = ap_res_json['data']['items']\nap_str = ap_items_json[0]['itemstring']\nap_need_str = ap_str[-3:]\n\n\nmp_res_json = json.loads(mp_res.text)\nmp_items_json = mp_res_json['data']['items']\nmp_items_len = len(mp_items_json)\nimg_mp = cv2.imread(mp_path)\nfor i in range(len(ap_need_str)):\n\tfor j in range(mp_items_len):\n\t\tif mp_items_json[j]['itemstring'] == ap_need_str[i]:\n\t\t\tcv2.rectangle(img_mp, (mp_items_json[j]['itemcoord']['x'],mp_items_json[j]['itemcoord']['y']), (mp_items_json[j]['itemcoord']['x'] + mp_items_json[j]['itemcoord']['width'], mp_items_json[j]['itemcoord']['y'] + mp_items_json[j]['itemcoord']['height']), getColor(i), 2)\ncv2.imshow('Detected', img_mp)\ncv2.waitKey(0)\ncv2.destroyAllWindows()","repo_name":"fupinglee/MyPython","sub_path":"captcha/Pointselection/qcloud_demo.py","file_name":"qcloud_demo.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","stars":147,"dataset":"github-code","pt":"81"} +{"seq_id":"4394969827","text":"# 两个非空链表代表两个非负整数,数字逆序存储在每个结点,返回相加的链表\n# Input: (2 -> 4 -> 3) + (5 -> 6 -> 4)\n# Output: 7 -> 0 -> 8\n# Explanation: 342 + 465 = 807\n\n# 思路:\n# a + b >= 10,写个进位出来。\n\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n def print(self):\n while self:\n print(self.val)\n self = self.next\n\nclass Linklist:\n def __init__(self):\n self.head = None\n\nclass Solution:\n def addTwoNumbers(self, l1, l2):\n head = ListNode(0)\n result = head\n carry = 0\n if not l1 and not l2:\n return False\n while l1 and l2:\n sum = l1.val + l2.val + carry\n if sum >= 10:\n head.next = ListNode(0)\n carry = 1\n else:\n head.next = ListNode(sum)\n head = head.next\n l1 = l1.next\n l2 = l2.next\n return result.next\n\nif __name__ == '__main__':\n l1 = ListNode(2)\n l1.next = ListNode(4)\n l1.next.next = ListNode(3)\n\n l2 = ListNode(5)\n l2.next = ListNode(6)\n l2.next.next = ListNode(4)\n\n s = Solution()\n result = s.addTwoNumbers(l1, l2)\n ListNode.print(result)\n\n\n","repo_name":"huhuzwxy/leetcode","sub_path":"Linked List/2_add_two_numbers.py","file_name":"2_add_two_numbers.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"37038663200","text":"import tensorflow as tf\nfrom tensorflow.python.keras.losses import LossFunctionWrapper\nfrom tensorflow.python.keras.utils import losses_utils\n\n\nclass VaeLoss(LossFunctionWrapper):\n\t\"\"\"docstring for VaeLoss\"\"\"\n\tdef __init__(self, mu, log_var, r_loss_factor, name='vae_loss'):\n\t\tsuper(VaeLoss, self).__init__(\n\t\t\tfn=self.vae_loss,\n\t\t\treduction=losses_utils.ReductionV2.AUTO,\n\t\t\tname=name)\n\t\t\t#mu=mu,\n\t\t\t#log_var=log_var,\n\t\t\t#r_loss_factor=r_loss_factor)\n\t\tself.mu = mu\n\t\tself.log_var = log_var\n\t\tself.r_loss_factor = r_loss_factor\n\n\tdef vae_r_loss(self, y_true, y_pred):\n\t\tr_loss = tf.reduce_mean(tf.square(y_true - y_pred), axis=[1,2,3])\n\t\treturn r_loss* self.r_loss_factor\n\n\tdef vae_kl_loss(self, y_true, y_pred):\n\t\t#1 + self.log_var - K.square(self.mu) - K.exp(self.log_var)\n\t\tkl_loss = -0.5 * tf.reduce_sum(1+self.log_var - tf.square(self.mu) - tf.exp(self.log_var), axis=1)\n\t\treturn kl_loss\t\n\n\tdef vae_loss(self, y_true, y_pred):\n\t\tr_loss = self.vae_r_loss(y_true, y_pred)\n\t\tkl_loss = self.vae_kl_loss(y_true, y_pred)\n\n\t\treturn r_loss+kl_loss\t\n\n\n\n","repo_name":"pk00095/variational_auto_encoder","sub_path":"losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33442044435","text":"from typing import Any, Optional, TYPE_CHECKING\n\nfrom azure.mgmt.core import AsyncARMPipelineClient\nfrom msrest import Deserializer, Serializer\n\nif TYPE_CHECKING:\n # pylint: disable=unused-import,ungrouped-imports\n from azure.core.credentials_async import AsyncTokenCredential\n\nfrom ._configuration import DevicesCloudPrintConfiguration\nfrom .operations import PrintPrintOperations\nfrom .operations import PrintOperations\nfrom .operations import PrintPrintersOperations\nfrom .operations import PrintPrintersTaskTriggersOperations\nfrom .operations import PrintPrinterSharesOperations\nfrom .operations import PrintPrinterSharesPrinterOperations\nfrom .operations import PrintReportsOperations\nfrom .operations import PrintServicesOperations\nfrom .operations import PrintSharesOperations\nfrom .operations import PrintSharesPrinterOperations\nfrom .operations import PrintTaskDefinitionsOperations\nfrom .operations import PrintTaskDefinitionsTasksOperations\nfrom .. import models\n\n\nclass DevicesCloudPrint(object):\n \"\"\"DevicesCloudPrint.\n\n :ivar print_print: PrintPrintOperations operations\n :vartype print_print: devices_cloud_print.aio.operations.PrintPrintOperations\n :ivar print: PrintOperations operations\n :vartype print: devices_cloud_print.aio.operations.PrintOperations\n :ivar print_printers: PrintPrintersOperations operations\n :vartype print_printers: devices_cloud_print.aio.operations.PrintPrintersOperations\n :ivar print_printers_task_triggers: PrintPrintersTaskTriggersOperations operations\n :vartype print_printers_task_triggers: devices_cloud_print.aio.operations.PrintPrintersTaskTriggersOperations\n :ivar print_printer_shares: PrintPrinterSharesOperations operations\n :vartype print_printer_shares: devices_cloud_print.aio.operations.PrintPrinterSharesOperations\n :ivar print_printer_shares_printer: PrintPrinterSharesPrinterOperations operations\n :vartype print_printer_shares_printer: devices_cloud_print.aio.operations.PrintPrinterSharesPrinterOperations\n :ivar print_reports: PrintReportsOperations operations\n :vartype print_reports: devices_cloud_print.aio.operations.PrintReportsOperations\n :ivar print_services: PrintServicesOperations operations\n :vartype print_services: devices_cloud_print.aio.operations.PrintServicesOperations\n :ivar print_shares: PrintSharesOperations operations\n :vartype print_shares: devices_cloud_print.aio.operations.PrintSharesOperations\n :ivar print_shares_printer: PrintSharesPrinterOperations operations\n :vartype print_shares_printer: devices_cloud_print.aio.operations.PrintSharesPrinterOperations\n :ivar print_task_definitions: PrintTaskDefinitionsOperations operations\n :vartype print_task_definitions: devices_cloud_print.aio.operations.PrintTaskDefinitionsOperations\n :ivar print_task_definitions_tasks: PrintTaskDefinitionsTasksOperations operations\n :vartype print_task_definitions_tasks: devices_cloud_print.aio.operations.PrintTaskDefinitionsTasksOperations\n :param credential: Credential needed for the client to connect to Azure.\n :type credential: ~azure.core.credentials_async.AsyncTokenCredential\n :param top: Show only the first n items.\n :type top: int\n :param skip: Skip the first n items.\n :type skip: int\n :param search: Search items by search phrases.\n :type search: str\n :param filter: Filter items by property values.\n :type filter: str\n :param count: Include count of items.\n :type count: bool\n :param str base_url: Service URL\n \"\"\"\n\n def __init__(\n self,\n credential: \"AsyncTokenCredential\",\n top: Optional[int] = None,\n skip: Optional[int] = None,\n search: Optional[str] = None,\n filter: Optional[str] = None,\n count: Optional[bool] = None,\n base_url: Optional[str] = None,\n **kwargs: Any\n ) -> None:\n if not base_url:\n base_url = 'https://graph.microsoft.com/beta'\n self._config = DevicesCloudPrintConfiguration(credential, top, skip, search, filter, count, **kwargs)\n self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)\n\n client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}\n self._serialize = Serializer(client_models)\n self._serialize.client_side_validation = False\n self._deserialize = Deserializer(client_models)\n\n self.print_print = PrintPrintOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.print = PrintOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.print_printers = PrintPrintersOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.print_printers_task_triggers = PrintPrintersTaskTriggersOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.print_printer_shares = PrintPrinterSharesOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.print_printer_shares_printer = PrintPrinterSharesPrinterOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.print_reports = PrintReportsOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.print_services = PrintServicesOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.print_shares = PrintSharesOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.print_shares_printer = PrintSharesPrinterOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.print_task_definitions = PrintTaskDefinitionsOperations(\n self._client, self._config, self._serialize, self._deserialize)\n self.print_task_definitions_tasks = PrintTaskDefinitionsTasksOperations(\n self._client, self._config, self._serialize, self._deserialize)\n\n async def close(self) -> None:\n await self._client.close()\n\n async def __aenter__(self) -> \"DevicesCloudPrint\":\n await self._client.__aenter__()\n return self\n\n async def __aexit__(self, *exc_details) -> None:\n await self._client.__aexit__(*exc_details)\n","repo_name":"BrianTJackett/msgraph-cli","sub_path":"msgraph-cli-extensions/beta/devicescloudprint_beta/azext_devicescloudprint_beta/vendored_sdks/devicescloudprint/aio/_devices_cloud_print.py","file_name":"_devices_cloud_print.py","file_ext":"py","file_size_in_byte":6366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"42496291395","text":"# entry history: IDEA, TICKER, PRICE, DATE, TIME\n# history of entry universe : DATE, OHLCV, TICKER\n# input: exit_ndays, fee_perc\n# build dateindex : Date, Index\n# entry: merge(entry, history, on=TICKER, how=inner) -> ENTRYPRICE from history\n# exit: merge(entry, history adjusted by dateindex, on=TICKER, how=left) -> EXITPRICE from history, replace Nan with 0.0\n# gain : EXITPRICE/ENTRYPRICE-1 - fee_perc\n# aggregate gain ,cnt, by = idea, date\n\nimport pandas as pd\nimport os\nimport logging\nimport numpy as np\n\n\ndef gain_report(entry, history, exit_ndays, fee_perc):\n dates = sorted(list(history.DATE.unique()))\n dateindex = pd.DataFrame(zip(dates, list(range(len(dates)))), columns=[\"DATE\", \"INDEX\"])\n exclude_index = -1*(exit_ndays+1)\n last_ndays = sorted(list(dateindex.DATE.unique()))[exclude_index:]\n entry_index = pd.merge(entry, dateindex, on=\"DATE\", how=\"inner\")\n entry_index[\"EXIT_INDEX\"] = entry_index[\"INDEX\"].map(lambda x: x + exit_ndays)\n history_index = pd.merge(history, dateindex, on=\"DATE\", how=\"inner\")\n entry_history_index = history_index.copy(True).rename(columns={\"CLOSE\": \"ENTRY_PRICE\"})[\n [\"TICKER\", \"INDEX\", \"ENTRY_PRICE\"]]\n\n exit_history_index = (\n history_index.copy(True).rename(columns={\"OPEN\": \"EXIT_PRICE\", \"VOLUME\": \"EXIT_VOLUME\", \"INDEX\": \"EXIT_INDEX\"})[\n [\"TICKER\", \"EXIT_INDEX\", \"EXIT_PRICE\", \"EXIT_VOLUME\"]])\n entry_exit = pd.merge(entry_index, entry_history_index, on=[\"INDEX\", \"TICKER\"], how=\"left\")\n entry_exit[\"ENTRY_PRICE\"] = entry_exit[\"ENTRY_PRICE\"].map(lambda x: 1 if pd.isna(x) else x)\n entry_exit = pd.merge(entry_exit, exit_history_index, on=[\"EXIT_INDEX\", \"TICKER\"], how=\"left\")\n entry_exit[\"EXIT_PRICE\"] = entry_exit[\"EXIT_PRICE\"].map(lambda x: 0 if pd.isna(x) else x)\n entry_exit[\"GAIN\"] = entry_exit[\"EXIT_PRICE\"] / entry_exit[\"ENTRY_PRICE\"] - 1 - fee_perc\n\n entry_exit = entry_exit[entry_exit[\"DATE\"].map(lambda x: x not in last_ndays)]\n logging.info(\"after excluding last ndays\")\n logging.info(entry_exit.shape)\n logging.info(entry_exit.head())\n report = entry_exit.groupby([\"IDEA\", \"DATE\"]).agg(GM=(\"GAIN\", \"mean\"), CNT=(\"TICKER\", \"count\")).reset_index()\n report[\"EXITNDAYS\"] = exit_ndays\n return report, entry_exit\n\n\ndef test_gain_report():\n test_entry = pd.DataFrame([(\"2020-01-01\", 100, \"TEST1\", \"IDEA1\"),\n (\"2020-01-01\", 1000, \"TEST2\", \"IDEA2\"),\n (\"2020-01-01\", 100, \"TEST11\", \"IDEA1\"),\n (\"2020-01-02\", 1000, \"TEST11\", \"IDEA1\"),\n (\"2020-01-02\", 110, \"TEST1\", \"IDEA1\"),\n (\"2020-01-02\", 1100, \"TEST2\", \"IDEA2\")], columns=[\"DATE\", \"CLOSE\", \"TICKER\", \"IDEA\"])\n test_history = pd.DataFrame([(\"2020-01-01\", 100, 0, \"TEST1\", 100),\n (\"2020-01-02\", 111, 1, \"TEST1\", 101),\n (\"2020-01-03\", 222, 2, \"TEST1\", 202),\n (\"2020-01-04\", 333, 3, \"TEST1\", 303),\n (\"2020-01-01\", 1000, 0, \"TEST2\", 1000),\n (\"2020-01-02\", 1110, 1, \"TEST2\", 1001),\n (\"2020-01-03\", 2220, 2, \"TEST2\", 2002),\n (\"2020-01-04\", 3330, 3, \"TEST2\", 3003)],\n columns=[\"DATE\", \"OPEN\", \"VOLUME\", \"TICKER\", \"CLOSE\"])\n test_exit_ndays = 2\n re, ee = gain_report(test_entry, test_history, test_exit_ndays, 0)\n logging.info(re.to_string())\n logging.info(ee.to_string())\n\n\ndef agg_performance(report):\n def _cal_mdd(cr_list):\n df = pd.concat([pd.Series(cr_list).cummax(), pd.Series(cr_list)], axis=1)\n df.columns = [\"CRMAX\", \"CR\"]\n df[\"MDD\"] = df[\"CR\"] / df[\"CRMAX\"] - 1\n return min(df.MDD.to_list())\n\n report[\"NORMGM\"] = (1 + report[\"GM\"] / report[\"EXITNDAYS\"])\n df = report.groupby(\"IDEA\")[\"NORMGM\"].apply(list).reset_index(name=\"NORMGM_LIST\")\n df[\"CR\"] = df[\"NORMGM_LIST\"].map(lambda x: list(np.array(x).cumprod()))\n df[\"CAGR\"] = df[\"CR\"].map(lambda x: x[-1] ** (1 / (len(x) / 250)) - 1)\n df[\"LASTCR\"] = df[\"CR\"].map(lambda x: x[-1])\n df[\"DAYS\"] = df[\"CR\"].map(lambda x: len(x))\n df[\"MDD\"] = df[\"CR\"].map(lambda x: _cal_mdd(x))\n return df[[\"IDEA\", \"CAGR\", \"LASTCR\", \"DAYS\", \"MDD\"]]\n","repo_name":"testkevinkim/py_stock_research","sub_path":"evaluation/us_gain_report.py","file_name":"us_gain_report.py","file_ext":"py","file_size_in_byte":4347,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"15466202912","text":"import cv2\n\nvideo = cv2.VideoCapture(0)\n\nwidth, height = 600, 500\n\nwhile True:\n success, frame = video.read()\n frame_size = cv2.resize(frame, (width, height))\n cv2.imshow('img', frame_size)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\nvideo.release()\ncv2.destroyAllWindows()","repo_name":"alaminbhuyan/OpenCV","sub_path":"Project1/readWebcam2.py","file_name":"readWebcam2.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34886784257","text":"\"\"\" this package is used to check the customer fin status\"\"\"\n\"\"\"\nchecking fin status\nchecking age\nchecking valid estonia id\n\"\"\"\n\n\n# check finanical status\nclass FinStatus:\n def __init__(self, account_balance, debt_balance, monthly_income):\n self.account_balance = account_balance\n self.debt_balance = debt_balance\n self.monthly_income = monthly_income\n\n def check_fin_status(self):\n if (\n self.account_balance > 0\n and self.debt_balance == 0\n and self.monthly_income > 0\n ):\n return \"You are in good financial standing\"\n elif self.account_balance < 0 < self.debt_balance and self.monthly_income > 0:\n return \"You are in bad financial standing\"\n else:\n return \"You are in neutral financial standing\"\n\n\n# check age\nclass Age:\n def __init__(self, age):\n self.age = age\n\n def check_age(self):\n if self.age >= 20:\n return \"You can open an account\"\n else:\n return \"You are not old enough to open an account\"\n\n\n# check valid estonia id\nclass EstoniaID:\n def __init__(self, estonia_id):\n self.estonia_id = estonia_id\n\n def check_estonia_id(self):\n if len(self.estonia_id) == 11:\n return \"You have a valid estonia id\"\n else:\n return \"You do not have a valid estonia id\"\n","repo_name":"T6nisValk/School","sub_path":"Basics/Python_Technology/Class_8-9_03.06-04.06.23/fin_check/bank_checks.py","file_name":"bank_checks.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"43206085558","text":"\n# Prepreation for FEP and\n# free energy decomposition\n# =========================\n# input :\n# pdbid.solv.gro, pdbid.solv.pdb, pdbid.solv.top\n# output:\n# pdbid.solv.system*.xml - define biasing potentials\n# pdbid.solv.state.xml - include the equilibrium state for md productions\n#\n# nonboned energy formulas\n# REF. https://github.com/openmm/openmm/issues/3281\n# How to reproduce standard nonbonded force as a custom nonbonded force? #3281\n\n# OpenMM\nimport simtk.openmm as omm # contains functions for MD\nimport simtk.openmm.app as app # contains functions for i/o\nfrom simtk import unit # controls unique object types for physical units\nimport sys\nimport numpy as np\nimport mdtraj as md\nimport time\nfrom parmed import gromacs\nimport cProfile\nfrom pymbar import timeseries\n\ntime_start = time.time()\nstart = time.time()\n\n# setting\n#pdbid = 'ala2'\n#namea0 = 'HR'\n#namea1 = 'OL'\n#deviceid = '0'\npdbid = sys.argv[1]\nnamea0 = sys.argv[2]\nnamea1 = sys.argv[3]\ndeviceid = sys.argv[4]\n\nONE_4PI_EPS0 = 138.935456e0 * unit.kilojoule_per_mole * unit.nanometers /unit.elementary_charges**2\nkB = 0.593e0/298.e0 # kcal/mol\nT = 300.e0 # kelvin\ntemperature = T*unit.kelvin\nbeta = 1.e0/(kB*T)\n\n# Platforms\nplatform = omm.Platform.getPlatformByName('CUDA')\nproperties = {'CudaPrecision': 'mixed', 'DeviceIndex': deviceid}\n\n# create a modeller\ngromacs.GROMACS_TOPDIR = \"../../required/\"\ngro = gromacs.GromacsGroFile.parse(pdbid+'.solv.gro')\ntop = gromacs.GromacsTopologyFile(pdbid+'.solv.top')\ntop.box = gro.box\nmodeller = app.Modeller(top.topology, gro.positions)\nprint(\"# topology: \", top)\n\n# create a system\nsystemEtot = top.createSystem(nonbondedMethod=app.CutoffPeriodic,\n nonbondedCutoff=1.2e0*unit.nanometers,\n rigidWater=True,)\nprint(\"# pbc?: \", systemEtot.usesPeriodicBoundaryConditions())\nprint(\"# default box size: \", systemEtot.getDefaultPeriodicBoxVectors())\n\n# add force to fix residues\npdb = md.load_pdb(pdbid+'.solv.pdb')\ninxa0 = pdb.topology.select('resid 0 and name '+namea0)\ninxa1 = pdb.topology.select('resid 1 and name '+namea1)\nresid = pdb.topology.select('not waters')\nfor k in resid:\n systemEtot.setParticleMass(int(k), 0.e0*unit.dalton)\n\n# remove dispersion corrections of nonbonded forces\nfor force in systemEtot.getForces():\n print(\"# force: \", force)\n if isinstance(force, omm.NonbondedForce):\n force.setUseDispersionCorrection(False)\n eps_solvent = force.getReactionFieldDielectric()\n CUTOFF = force.getCutoffDistance()\n if isinstance(force, omm.CustomNonbondedForce):\n force.setUseLongRangeCorrection(False)\nprint('# CUTOFF: ', CUTOFF)\nprint(\"# eps_solvent = \", eps_solvent)\nkrf = (1/ (CUTOFF**3)) * (eps_solvent - 1) / (2*eps_solvent + 1)\ncrf = (1/ CUTOFF) * (3* eps_solvent) / (2*eps_solvent + 1)\n\n# save a system\nwith open(pdbid+'.solv.systemEtot.xml', 'w') as f:\n f.write(omm.XmlSerializer.serialize(systemEtot))\n\n# create an integrator and simulation\nnsteps = 50*1000\nprnfrq = nsteps\ndt = 1.e0*unit.femtoseconds\nintegratorEtot = omm.LangevinIntegrator(temperature, 1.e0/unit.picosecond, dt)\nsimulationEtot = app.Simulation(modeller.topology, systemEtot, integratorEtot,\n platform=platform, platformProperties=properties)\nsimulationEtot.context.setPositions(gro.positions)\nsimulationEtot.reporters.append(app.DCDReporter(pdbid+'.solv.dcd', prnfrq))\nrep = app.StateDataReporter(sys.stdout, prnfrq, separator=' ', step=True,\n time=True, potentialEnergy=True, kineticEnergy=True, temperature=True, totalEnergy=True)\nsimulationEtot.reporters.append(rep)\n\n# md equilibrium\nsimulationEtot.step(nsteps)\n\n# save an equilibrium state\nstate = simulationEtot.context.getState(getPositions=True)\nxyz = state.getPositions(asNumpy=True)\ndist = np.abs(xyz[inxa0[0],2]-xyz[inxa1[0],2])*10.e0\nprint(\"# initial z-distance(\"+namea0+','+namea1+')', dist)\nsimulationEtot.saveState(pdbid+'.solv.state.xml')\n\n# free energy decomposition --> define additional systems\n# -------\n# Evac_pp\nwith open(pdbid+'.solv.systemEtot.xml','r') as f:\n systemEvac = omm.XmlSerializer.deserialize(f.read())\nforces = systemEvac.getForces()\nfor k in range(0,len(forces)):\n # once the force is removed, then force indeices will be reduced by one\n systemEvac.removeForce(0)\n# forceEvac = omm.CustomBondForce('ONE_4PI_EPS0*q12/r + 4*eps12*((sig12/r)^12 - (sig12/r)^6)')\nforceEvac = omm.CustomBondForce('ONE_4PI_EPS0*q12*(1/r+krf*r*r-crf)*step(1-r/CUTOFF) + 4*eps12*((sig12/r)^12 - (sig12/r)^6)*step(1-r/CUTOFF)')\nforceEvac.setUsesPeriodicBoundaryConditions(True)\nforceEvac.addGlobalParameter('ONE_4PI_EPS0', ONE_4PI_EPS0)\nforceEvac.addGlobalParameter('CUTOFF', CUTOFF)\nforceEvac.addGlobalParameter('krf', krf)\nforceEvac.addGlobalParameter('crf', crf)\nforceEvac.addPerBondParameter('q12')\nforceEvac.addPerBondParameter('sig12')\nforceEvac.addPerBondParameter('eps12')\n# add forces\ninx_pp = pdb.topology.select_pairs(selection1='resid 1',selection2='resid 0')\nfor inx in inx_pp:\n p1 = int(inx[0])\n p2 = int(inx[1])\n q1 = top.atoms[p1].ucharge\n sig1 = top.atoms[p1].usigma\n eps1 = top.atoms[p1].uepsilon\n q2 = top.atoms[p2].ucharge\n sig2 = top.atoms[p2].usigma\n eps2 = top.atoms[p2].uepsilon\n q12 = q1*q2\n sig12 = (sig1+sig2)*0.5e0\n eps12 = unit.Quantity.sqrt(eps1*eps2)\n type1 = top.atoms[p1].type\n type2 = top.atoms[p2].type\n if (type1=='OB' and type2=='HB') or (type1=='HB' and type2=='OB'):\n sig12 = 0.15e0\n eps12 = 1.2552e0 # OB HB 1 0.150 1.2552\n forceEvac.addBond(p1,p2,[q12,sig12,eps12])\nsystemEvac.addForce(forceEvac)\n# Eele_pw\nwith open(pdbid+'.solv.systemEtot.xml','r') as f:\n systemEele = omm.XmlSerializer.deserialize(f.read())\nforces = systemEele.getForces()\nfor k in range(0,len(forces)):\n systemEele.removeForce(0)\n# forceEele = omm.CustomBondForce('ONE_4PI_EPS0*q12/r')\nforceEele = omm.CustomBondForce('ONE_4PI_EPS0*q12*(1/r+krf*r*r-crf)*step(1-r/CUTOFF)')\nforceEele.setUsesPeriodicBoundaryConditions(True)\nforceEele.addGlobalParameter('ONE_4PI_EPS0', ONE_4PI_EPS0)\nforceEele.addGlobalParameter('CUTOFF', CUTOFF)\nforceEele.addGlobalParameter('krf', krf)\nforceEele.addGlobalParameter('crf', crf)\nforceEele.addPerBondParameter('q12')\n# Erep_pw\nwith open(pdbid+'.solv.systemEtot.xml','r') as f:\n systemErep = omm.XmlSerializer.deserialize(f.read())\nforces = systemErep.getForces()\nfor k in range(0,len(forces)):\n systemErep.removeForce(0)\nforceErep = omm.CustomBondForce('(4*eps12*((sig12/r)^12 - (sig12/r)^6)+eps12)*step(2^(1/6)*sig12-r)')\nforceErep.setUsesPeriodicBoundaryConditions(True)\nforceErep.addPerBondParameter('sig12')\nforceErep.addPerBondParameter('eps12')\n# Edis_pw\nwith open(pdbid+'.solv.systemEtot.xml','r') as f:\n systemEdis = omm.XmlSerializer.deserialize(f.read())\nforces = systemEdis.getForces()\nfor k in range(0,len(forces)):\n systemEdis.removeForce(0)\n# forceEdis = omm.CustomBondForce('((4*eps12*((sig12/r)^12 - (sig12/r)^6)+eps12)*step(r-2^(1/6)*sig12)-eps12)')\nforceEdis = omm.CustomBondForce('((4*eps12*((sig12/r)^12 - (sig12/r)^6)+eps12)*step(r-2^(1/6)*sig12)-eps12)*step(1-r/CUTOFF)')\nforceEdis.setUsesPeriodicBoundaryConditions(True)\nforceEdis.addGlobalParameter('CUTOFF', CUTOFF)\nforceEdis.addPerBondParameter('sig12')\nforceEdis.addPerBondParameter('eps12')\n# Evdw_pw\nwith open(pdbid+'.solv.systemEtot.xml','r') as f:\n systemEvdw = omm.XmlSerializer.deserialize(f.read())\nforces = systemEvdw.getForces()\nfor k in range(0,len(forces)):\n systemEvdw.removeForce(0)\n# add new forces\n# forceEvdw = omm.CustomBondForce('4*eps12*((sig12/r)^12 - (sig12/r)^6)')\nforceEvdw = omm.CustomBondForce('4*eps12*((sig12/r)^12 - (sig12/r)^6)*step(1-r/CUTOFF)')\nforceEvdw.setUsesPeriodicBoundaryConditions(True)\nforceEvdw.addGlobalParameter('CUTOFF', CUTOFF)\nforceEvdw.addPerBondParameter('sig12')\nforceEvdw.addPerBondParameter('eps12')\n# add forces\ninx_pw = pdb.topology.select_pairs(selection1='resid 1', selection2='waters')\nfor inx in inx_pw:\n p1 = int(inx[0])\n p2 = int(inx[1])\n q1 = top.atoms[p1].ucharge\n sig1 = top.atoms[p1].usigma\n eps1 = top.atoms[p1].uepsilon\n q2 = top.atoms[p2].ucharge\n sig2 = top.atoms[p2].usigma\n eps2 = top.atoms[p2].uepsilon\n q12 = q1*q2\n sig12 = (sig1+sig2)*0.5e0\n eps12 = unit.Quantity.sqrt(eps1*eps2)\n #print(q12, sig12, eps12)\n forceErep.addBond(p1,p2,[sig12,eps12])\n forceEdis.addBond(p1,p2,[sig12,eps12])\n forceEvdw.addBond(p1,p2,[sig12,eps12])\n forceEele.addBond(p1,p2,[q12,])\nsystemErep.addForce(forceErep)\nsystemEdis.addForce(forceEdis)\nsystemEvdw.addForce(forceEvdw)\nsystemEele.addForce(forceEele)\n\n# save additional systems\nwith open(pdbid+'.solv.systemEvac.xml', 'w') as f:\n f.write(omm.XmlSerializer.serialize(systemEvac))\nwith open(pdbid+'.solv.systemErep.xml', 'w') as f:\n f.write(omm.XmlSerializer.serialize(systemErep))\nwith open(pdbid+'.solv.systemEdis.xml', 'w') as f:\n f.write(omm.XmlSerializer.serialize(systemEdis))\nwith open(pdbid+'.solv.systemEvdw.xml', 'w') as f:\n f.write(omm.XmlSerializer.serialize(systemEvdw))\nwith open(pdbid+'.solv.systemEele.xml', 'w') as f:\n f.write(omm.XmlSerializer.serialize(systemEele))\n\n","repo_name":"XipingGong/fep","sub_path":"required/prefep.py","file_name":"prefep.py","file_ext":"py","file_size_in_byte":9144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17395059256","text":"\r\nfirst_arr = [1,3,5,4,6,13,10,9,8,15,17]\r\ninsert_arr = [1,3,5,4,6,13,10,9,8,15]\r\ndef order(arr):\r\n last_leaf = int(len(arr)/2)-1\r\n for i in range(last_leaf,-1,-1):\r\n arr = swap(arr,i,last_leaf)\r\n #print('done with swap:',abs(i-last_leaf-1))\r\n print('arr:',arr)\r\n\r\ndef swap(arr,n,lastLeafInd):\r\n #print('check:',arr[n])\r\n if 2*n+2arr[2*n+1] and arr[2*n+2]>arr[n]:\r\n #print('swapped:',arr[n],' and ',arr[2*n+2])\r\n temp = arr[n]\r\n arr[n] = arr[2*n+2]\r\n arr[2*n+2] = temp\r\n return swap(arr,2*n+2,lastLeafInd)\r\n elif 2*n+1< len(arr) and arr[2*n+1]>arr[n]:\r\n #print('swapped:',arr[n],' and ',arr[2*n+1])\r\n temp = arr[n]\r\n arr[n] = arr[2*n+1]\r\n arr[2*n+1] = temp\r\n return swap(arr,2*n+1,lastLeafInd)\r\n else:\r\n return arr\r\ndef insert_swap(arr,n,lastLeafInd):\r\n if 2*n+2arr[2*n+1] and arr[2*n+2]>arr[n]:\r\n temp = arr[n]\r\n arr[n] = arr[2*n+2]\r\n arr[2*n+2] = temp\r\n if n != 0:\r\n return insert_swap(arr,int((n-1)/2),lastLeafInd)\r\n elif 2*n+1< len(arr) and arr[2*n+1]>arr[n]:\r\n temp = arr[n]\r\n arr[n] = arr[2*n+1]\r\n arr[2*n+1] = temp\r\n if n!= 0:\r\n return insert_swap(arr,int((n-1)/2),lastLeafInd)\r\n else:\r\n return arr\r\ndef insert(arr,num):\r\n arr.append(num)\r\n last_leaf = int(len(arr)/2)-1\r\n insert_swap(arr,last_leaf,last_leaf)\r\n print('arr:',arr)\r\n\r\ndef merge_sort(arr):\r\n size = len(arr)\r\n if size>2:\r\n L = merge_sort(arr[:size//2])\r\n R = merge_sort(arr[size//2:])\r\n indexL = 0\r\n indexR = 0\r\n while indexL+indexR != size:\r\n if indexL == len(L):\r\n arr[indexL+indexR] = R[indexR]\r\n indexR += 1\r\n elif indexR == len(R):\r\n arr[indexL+indexR] = L[indexL]\r\n indexL += 1\r\n elif L[indexL] > R[indexR]:\r\n arr[indexL+indexR] = R[indexR]\r\n indexR += 1\r\n else:\r\n arr[indexL+indexR] = L[indexL]\r\n indexL += 1\r\n return arr \r\n elif size == 1:\r\n return arr\r\n elif arr[0] > arr[1]:\r\n temp = arr[1]\r\n arr[1] = arr[0]\r\n arr[0] = temp\r\n return arr\r\n else:\r\n return arr\r\n'''\r\norder(insert_arr)\r\ninsert(insert_arr,17)\r\norder(arr)\r\n'''\r\nprint('arr:',merge_sort(first_arr))\r\n","repo_name":"bcverdict/Sorting-Algoithms","sub_path":"bin_heap.py","file_name":"bin_heap.py","file_ext":"py","file_size_in_byte":2485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73268539785","text":"\"\"\"Training pipeline. Logs to MLflow.\r\n\"\"\"\r\n\r\nimport argparse\r\nimport os\r\nimport tempfile\r\n\r\nimport mlflow\r\nimport mlflow.keras\r\nimport numpy as np\r\nimport plotly\r\nimport plotly.graph_objects as go\r\nimport tensorflow as tf\r\nfrom src.classifiers.MobileNetV2 import InferMobileNetV2, MobileNetV2\r\nfrom src.settings.settings import logging, mobilenetv2_params, xception_params, efficientnetb0_params, efficientnetb4_params\r\nfrom src.tools.dataset import manual_get_datasets\r\nimport pandas as pd\r\nfrom src.classifiers.Xception import Xception\r\nfrom src.classifiers.EfficientNetB0 import EfficientNetB0\r\nfrom src.classifiers.EfficientNetB4 import EfficientNetB4\r\nfrom src.settings.settings import paths\r\n\r\n\r\ngpus = tf.config.experimental.list_physical_devices(\"GPU\")\r\nfor gpu in gpus:\r\n tf.config.experimental.set_memory_growth(gpu, True)\r\n\r\n\r\n\r\n# tf.debugging.set_log_device_placement(True)\r\n\r\n# logging.debug(mobilenetv2_params)\r\n\r\n\r\ndef train(exp_name, tracking_uri):\r\n\r\n if exp_name == \"mobilenetv2\":\r\n params = mobilenetv2_params\r\n elif exp_name == \"xception\":\r\n params = xception_params\r\n elif exp_name == \"efficientnetb0\":\r\n params = efficientnetb0_params\r\n elif exp_name == \"efficientnetb4\":\r\n params = efficientnetb4_params\r\n\r\n data_path = paths[\"data\"][\"DATA_DIR\"]\r\n train_dir = os.path.join(data_path, params[\"dataset\"], \"train_directory\")\r\n test_dir = os.path.join(data_path, params[\"dataset\"], \"test_directory\")\r\n # mobilenetv2_weights = weights_path\r\n\r\n print(\r\n \"Num GPUs Available: {}\".format(\r\n len(tf.config.experimental.list_physical_devices(\"GPU\"))\r\n )\r\n )\r\n print(\"List of GPUs {}\".format(tf.config.list_physical_devices(\"GPU\")))\r\n\r\n # tf.debugging.set_log_device_placement(True)\r\n # tf.config.set_soft_device_placement(True)\r\n\r\n print(\"Tracking uri : {}\".format(tracking_uri))\r\n logging.debug(\"Tracking URI : {}\".format(tracking_uri))\r\n mlflow.set_tracking_uri(tracking_uri)\r\n mlflow.set_experiment(exp_name)\r\n\r\n with mlflow.start_run() as run:\r\n\r\n logging.info(\"Run ID : {}\".format(run.info.run_id))\r\n\r\n # All parameters are logged once yaml file\r\n mlflow.log_params(params)\r\n\r\n train_dataset, validation_dataset = manual_get_datasets(train_dir, test_dir, params)\r\n # train_dataset, validation_dataset = auto_get_datasets(train_dir, test_dir, params)\r\n\r\n if params[\"test_mode\"]:\r\n train_dataset = train_dataset.take(10)\r\n validation_dataset = validation_dataset.take(2)\r\n\r\n if exp_name == \"mobilenetv2\":\r\n # No need to change the parameters inside this script\r\n # Change the parameters in the mobilenetv2_params.yaml\r\n mobilenetv2_weights = os.path.join(paths[\"model\"][\"WEIGHTS_DIR\"], params[\"weights\"])\r\n model = MobileNetV2(\r\n mobilenetv2_weights,\r\n (\r\n params[\"img_height\"],\r\n params[\"img_width\"],\r\n params[\"img_n_channels\"],\r\n ),\r\n )\r\n elif exp_name == \"xception\":\r\n model = Xception((params[\"img_height\"], params[\"img_width\"], params[\"img_n_channels\"]))\r\n elif exp_name == \"efficientnetb0\":\r\n model = EfficientNetB0(transfer_learning=params[\"transfer_learning\"], fine_tuning=params[\"fine_tuning\"], input_shape=(params[\"img_height\"], params[\"img_width\"], params[\"img_n_channels\"]), weights=os.path.join(paths[\"model\"][\"WEIGHTS_DIR\"], params[\"weights\"]))\r\n elif exp_name == \"efficientnetb4\":\r\n model = EfficientNetB4(transfer_learning=params[\"transfer_learning\"], fine_tuning=params[\"fine_tuning\"], input_shape=(params[\"img_height\"], params[\"img_width\"], params[\"img_n_channels\"]), weights=os.path.join(paths[\"model\"][\"WEIGHTS_DIR\"], params[\"weights\"]))\r\n\r\n\r\n\r\n # history, cc = mobilenet.train(\r\n # train_dataset,\r\n # validation_dataset,\r\n # n_epochs=mobilenetv2_params[\"n_epochs\"],\r\n # learning_rate=mobilenetv2_params[\"learning_rate\"],\r\n # fine_tuning=mobilenetv2_params[\"fine_tuning\"],\r\n # fine_tuning_epochs=mobilenetv2_params[\"fine_tuning_epochs\"],\r\n # )\r\n history, cc = model.train(\r\n train_dataset,\r\n validation_dataset,\r\n transfer_learning=params[\"transfer_learning\"],\r\n n_epochs=params[\"n_epochs\"],\r\n learning_rate=params[\"learning_rate\"],\r\n fine_tuning=params[\"fine_tuning\"],\r\n fine_tuning_epochs=params[\"fine_tuning_epochs\"],\r\n fine_tuning_lr=params[\"fine_tuning_lr\"]\r\n )\r\n\r\n loss = history.history[\"loss\"]\r\n val_loss = history.history[\"val_loss\"]\r\n categorical_accuracy = history.history[\"categorical_accuracy\"]\r\n val_categorical_accuracy = history.history[\"val_categorical_accuracy\"]\r\n\r\n loss_fig = go.Figure()\r\n loss_fig.add_trace(\r\n go.Scatter(x=np.arange(params[\"n_epochs\"]), y=loss, name=\"loss\")\r\n )\r\n loss_fig.add_trace(\r\n go.Scatter(\r\n x=np.arange(params[\"n_epochs\"]), y=val_loss, name=\"val_loss\"\r\n )\r\n )\r\n\r\n metric_fig = go.Figure()\r\n metric_fig.add_trace(\r\n go.Scatter(\r\n x=np.arange(params[\"n_epochs\"]),\r\n y=categorical_accuracy,\r\n name=\"categorical_accuracy\",\r\n )\r\n )\r\n metric_fig.add_trace(\r\n go.Scatter(\r\n x=np.arange(params[\"n_epochs\"]),\r\n y=val_categorical_accuracy,\r\n name=\"val_categorical_accuracy\",\r\n )\r\n )\r\n\r\n tmpdir = tempfile.mkdtemp()\r\n loss_path = os.path.join(tmpdir, \"loss.html\")\r\n metric_path = os.path.join(tmpdir, \"accuracy.html\")\r\n plotly.offline.plot(loss_fig, filename=loss_path, auto_open=False)\r\n plotly.offline.plot(metric_fig, filename=metric_path, auto_open=False)\r\n logging.debug(tmpdir)\r\n mlflow.log_artifact(loss_path, artifact_path=\"graphs\")\r\n mlflow.log_artifact(metric_path, artifact_path=\"graphs\")\r\n\r\n # Save the best metric\r\n\r\n # Save best metric\r\n best_epoch = np.argmax(val_categorical_accuracy)\r\n mlflow.log_metric(\r\n \"best_val_categorical_accuracy\", max(val_categorical_accuracy)\r\n )\r\n mlflow.log_metric(\"best_epoch\", best_epoch)\r\n mlflow.log_metric(\"n_epochs\", len(val_categorical_accuracy))\r\n\r\n # Save report\r\n report = cc.classification_reports[best_epoch]\r\n report_path = os.path.join(tmpdir, \"report.csv\")\r\n pd.DataFrame(report).transpose().to_csv(report_path)\r\n mlflow.log_artifact(report_path, artifact_path=\"reports\")\r\n\r\n # Save confusion matrix\r\n cm = cc.cm_images[best_epoch]\r\n cm_path = os.path.join(tmpdir, \"confusion_matrix.png\")\r\n cm.savefig(cm_path)\r\n mlflow.log_artifact(cm_path, artifact_path=\"confusion_matrix\")\r\n\r\n # Save the summaries\r\n if params[\"transfer_learning\"]:\r\n mlflow.log_artifact(\"summary_transfer-learning.txt\", artifact_path=\"summaries\")\r\n if params[\"fine_tuning\"]:\r\n mlflow.log_artifact(\"summary_fine-tuning.txt\", artifact_path=\"summaries\")\r\n\r\n\r\n # Save the model\r\n\r\n # First log the Keras model thanks to MLFlow Keras API\r\n mlflow.keras.log_model(tf.keras.models.load_model(\"best_model.h5\"), \"model\")\r\n\r\n # Then give the path to this artifact for the model to be used in inference\r\n artifacts = {\r\n \"keras_model\": \"{}/model/data/model.h5\".format(run.info.artifact_uri),\r\n }\r\n\r\n # Log the model to be used in Inference\r\n mlflow.pyfunc.log_model(\r\n artifact_path=\"py_model\",\r\n python_model=InferMobileNetV2(),\r\n conda_env=\"environment.yml\",\r\n artifacts=artifacts,\r\n code_path=[\"src/\"],\r\n )\r\n\r\n # Clean up\r\n os.remove(\"cm.png\")\r\n os.remove(\"best_model.h5\")\r\n\r\n return run\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # retrieve the 2 arguments configured through `arguments` in the ScriptRunConfig\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"--exp-name\", type=str, dest=\"exp_name\")\r\n parser.add_argument(\"--tracking-uri\", type=str, dest=\"tracking_uri\")\r\n parser.add_argument(\"--data-path\", type=str, dest=\"data_path\")\r\n parser.add_argument(\"--weights-path\", type=str, dest=\"weights_path\")\r\n\r\n args = parser.parse_args()\r\n exp_name = args.exp_name\r\n tracking_uri = args.tracking_uri\r\n data_path = args.data_path\r\n weights_path = args.weights_path\r\n\r\n train(exp_name, tracking_uri, data_path, weights_path)\r\n","repo_name":"ValentinRicher/food-classification","sub_path":"src/pipelines/mlflow_train_pipeline.py","file_name":"mlflow_train_pipeline.py","file_ext":"py","file_size_in_byte":8797,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"2625904858","text":"import calendar\nimport utils\n\nclass ui:\n\n ## @param cal - calendar object that will be interated with\n def __init__(self, cal):\n self.cal = cal\n # variables for creating a new event\n self.day = ''\n self.s_time = ''\n self.e_time = ''\n self.processes_L = set()\n \n # variables for deleting an event\n self.d_uid = -1\n self.d_number = -1\n\n ## main ui loop\n def run(self): \n # value to read input into \n val = ''\n\n print('Type q to exit.\\nWould you like to add or remove a calendar event, or display a calendar?')\n while val != 'q' and val != 'quit' and val != 'exit':\n val = input('(add/remove/mine/everyone) > ').lower()\n\n # adding an event\n if val == 'add':\n val = self.get_a_name()\n if val == 'day' and self.name != '':\n val = self.get_day()\n if val == 's_time' and self.day != '':\n val = self.get_s_time()\n if val == 'e_time' and self.s_time != '':\n val = self.get_e_time()\n if val == 'processes' and self.e_time != '':\n val = self.get_processes()\n if val == 'fin_add' and self.processes_L:\n # when we get all of our info, add an event\n self.cal.add_event(self.name, self.day, self.s_time, self.e_time, list(self.processes_L))\n # clear event info\n self.s_day = ''\n self.s_time = ''\n self.e_day = ''\n self.e_time = ''\n self.processes_L = set()\n \n # removing an event\n if val == 'remove':\n val = self.get_d_name() \n if val == 'name' and self.name != '':\n self.cal.remove_from_calendar(self.name)\n self.name = ''\n\n # display calendar\n if val == 'mine' or val == 'everyone':\n val = self.display(val)\n self.cal.poll = False\n \n \n ## method to check for valid time\n ## @param time - string to check as valid day\n ## @param end - boolean for if end time\n def check_time(self, time, end=False):\n hour, minute = time.split(':')\n\n # if this is an end time and is on same day as start, make sure it comes after start\n if end:\n s_hour, s_minute = self.s_time.split(':')\n if int(hour) < int(s_hour):\n print('End time must come after start time')\n return False\n elif int(hour) == int(s_hour) and int(minute) < int(minute):\n print('End time must come after start time')\n return False\n\n # make sure hour is less than 24 (valid)\n if int(hour) < 24: \n if int(minute) == 30 or int(minute) == 0:\n return True\n else:\n print('Appointments can only be scheduled on the hour or on the 30')\n return False\n\n ## method to check for valid day\n ## @param day - string to check if is valid day\n ## @param end - boolean for if end day\n def check_day(self, day):\n day = day.lower()\n\n # return true if valid day\n if day == 'monday':\n return True\n elif day == 'tuesday':\n return True\n elif day == 'wednesday':\n return True\n elif day == 'thursday':\n return True\n elif day == 'friday':\n return True\n elif day == 'saturday':\n return True\n elif day == 'sunday':\n return True\n return False\n\n ## displays log\n def display(self, who):\n if who == 'everyone':\n self.cal.print_calendar(True)\n else:\n self.cal.print_calendar()\n return ''\n\n ## ui loop for inputting name of event to be deleted\n def get_a_name(self):\n val = ''\n print('What is the name of the event you would like to add? (Enter cancel to cancel.)')\n while val != 'q' and val != 'quit' and val != 'exit':\n val = input('(/cancel) > ').lower()\n if val == 'cancel':\n return ''\n # check to see if it is a string\n if val != '': \n self.name = val\n return 'day'\n return val\n \n ## ui loop for inputting name of event to be deleted\n def get_d_name(self):\n val = ''\n print('What is the name of the event you would like to delete? (Enter cancel to cancel.)')\n while val != 'q' and val != 'quit' and val != 'exit':\n val = input('(/cancel) > ').lower()\n if val == 'cancel':\n return ''\n # lazy way to check to see if it is a number\n if val != '': \n self.name = val\n return 'name'\n return val\n \n ## ui loop for inputting start time\n def get_s_time(self):\n val = ''\n print('What time does this appointment start? (Enter cancel to cancel.)')\n while val != 'q' and val != 'quit' and val != 'exit':\n val = input('(hh:mm/cancel) > ').lower()\n if val == 'cancel':\n self.s_day = ''\n self.s_time = ''\n self.e_day = ''\n self.e_time = ''\n self.processes_L = set()\n return ''\n if self.check_time(val): \n self.s_time = val\n return 'e_time'\n return val\n\n # ui loop for inputting start time\n def get_e_time(self):\n val = ''\n print('What time does this appointment end? (Enter cancel to cancel.)')\n while val != 'q' and val != 'quit' and val != 'exit':\n val = input('(hh:mm/cancel) > ').lower()\n if val == 'cancel':\n self.s_day = ''\n self.s_time = ''\n self.e_day = ''\n self.e_time = ''\n self.processes_L = set()\n return ''\n if self.check_time(val, True): \n self.e_time = val\n return 'processes'\n return val\n \n # ui loop for inputting a start day\n def get_day(self):\n val = ''\n print('What day is this appointment? (Enter cancel to cancel.)')\n while val != 'q' and val != 'quit' and val != 'exit':\n val = input('(sunday-saturday/cancel) > ').lower()\n if val == 'cancel':\n self.s_day = ''\n self.s_time = ''\n self.e_day = ''\n self.e_time = ''\n self.processes_L = set()\n return ''\n if self.check_day(val): \n self.day = val\n return 's_time'\n return val \n\n ## get processes involved in appointment\n def get_processes(self):\n val = ''\n print('Who is going to this appointment? (Enter e when done/cancel to cancel.)')\n while val != 'q' and val != 'quit' and val != 'exit':\n val = input('(0/1/2/3/e/cancel) > ').lower()\n if val == 'cancel':\n self.s_day = ''\n self.s_time = ''\n self.e_day = ''\n self.e_time = ''\n self.processes_L = set()\n return ''\n # if we see an e, return\n if val == 'e' and self.processes_L:\n print('Added:', self.processes_L)\n return 'fin_add'\n elif val == 'e':\n print('You must enter at least one participant')\n if int(val) < 4: \n print('Adding', val, 'to event.')\n self.processes_L.add(int(val))\n return val\n \n","repo_name":"irish3725/DistributedLog","sub_path":"ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":7771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42299073875","text":"#예제1\nimport random\n\ndice = 0\ndiceCount = 0\nwhile True:\n dice = random.randint(1, 8)\n diceCount += 1\n\n print(\"dice = %d / diceCount = %d\" % (dice, diceCount), \"\\n\")\n\n if dice == 5 or diceCount == 8:\n break\n\n#예제2\nprice = 300\namountCoffee = 10\n\nprint(\"돈 투입 :\", end=\"\")\n\nmoney = int(input())\n\nwhile True :\n amountCoffee -= 1\n money -= price\n if amountCoffee == 0:\n print(\"믹스커피가 부족합니다. 반환되는 잔액은 %d입니다.\" % money)\n break\n if money < 300:\n print(\"잔액이 부족합니다. 반환되는 잔액은 %d 원입니다.\" % money)\n","repo_name":"lilpsj/learning_python","sub_path":"chapter6/ex8.py","file_name":"ex8.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38303980105","text":"import sys\r\nimport time\r\nfrom pymavlink import mavutil\r\nimport pprint\r\n\r\n\r\nmodelist = [\r\n 'STABILIZE', \r\n 'ACRO', \r\n 'ALT_HOLD', \r\n 'AUTO', \r\n 'GUIDED', \r\n 'LOITER', \r\n 'RTL', \r\n 'CIRCLE', \r\n 'POSITION', \r\n 'LAND', \r\n 'OF_LOITER', \r\n 'DRIFT', \r\n 'SPORT', \r\n 'FLIP', \r\n 'AUTOTUNE', \r\n 'POSHOLD', \r\n 'BRAKE', \r\n 'THROW', \r\n 'AVOID_ADSB', \r\n 'GUIDED_NOGPS', \r\n 'SMART_RTL', \r\n 'FLOWHOLD', \r\n 'FOLLOW', \r\n 'ZIGZAG', \r\n 'SYSTEMID', \r\n 'AUTOROTATE', \r\n 'AUTO_RTL'\r\n ]\r\n\r\n\r\ndef countdown(secs):\r\n print(\"Counting %s seconds\" %secs)\r\n for i in range (1, secs+1):\r\n print(\"Count \" + str(i))\r\n time.sleep(1)\r\n\r\n\r\ndef get_mode(master):\r\n # Ver el modo actual\r\n print(\"Current mode directamente:\")\r\n print(master.flightmode)\r\n\r\n print(\"Current mode con heartbeat:\")\r\n mode_num = 0\r\n for i in range(0, 2):\r\n try:\r\n msg = master.recv_match(type=\"HEARTBEAT\", blocking=True).to_dict()\r\n pprint.pprint(msg['base_mode'])\r\n pprint.pprint(msg['custom_mode'])\r\n if msg['custom_mode'] > mode_num:\r\n mode_num = msg['custom_mode']\r\n print(\"*************************************************\")\r\n except Exception as e:\r\n print(e)\r\n time.sleep(0.1)\r\n\r\n mode = modelist[mode_num]\r\n print(\"Current mode: \" + mode)\r\n return mode\r\n\r\n\r\n# Connection with the autopilot\r\nmaster = mavutil.mavlink_connection('/dev/ttyAMA0', 921600)\r\nmaster.wait_heartbeat()\r\n\r\n# # Send heartbeat from a MAVLink application (from the script running on Raspberry Pi)\r\n# master.mav.heartbeat_send(\r\n# mavutil.mavlink.MAV_TYPE_ONBOARD_CONTROLLER,\r\n# mavutil.mavlink.MAV_AUTOPILOT_INVALID, 0, 0, 0)\r\n\r\n#countdown(5)\r\n\r\nget_mode(master)\r\n\r\n# Choose a mode\r\nmode = input(\"Introduce the new mode: \")\r\nmode = mode.upper()\r\n\r\n# Check if mode is available\r\nif mode not in master.mode_mapping():\r\n print('Unknown mode : {}'.format(mode))\r\n print('Try:', list(master.mode_mapping().keys()))\r\n sys.exit(1)\r\n\r\n\r\n# Imprimimos la tabla hash de modos y su numero asociado\r\nprint(\"HASH TABLE - mode_mapping\")\r\nfor i in modelist:\r\n mode_num = master.mode_mapping()[i]\r\n print(i + \": \" + str(mode_num))\r\n\r\n# Get mode ID\r\nmode_id = master.mode_mapping()[mode]\r\nprint(\"Mode id: \" + str(mode_id))\r\n\r\n# Set new mode\r\n'''\r\n# ESTE FUNCIONA COJONUDO\r\nmaster.mav.command_long_send(\r\n master.target_system, master.target_component,\r\n mavutil.mavlink.MAV_CMD_DO_SET_MODE, \r\n 0,\r\n 1, \r\n mode_id, 0, 0, 0, 0, 0)\r\n'''\r\n\r\n# Probar este\r\nmaster.set_mode(mode_id)\r\n\r\n# ESTE FUNCIONA BIEN CREO\r\n# master.mav.set_mode_send(\r\n# master.target_system,\r\n# mavutil.mavlink.MAV_MODE_FLAG_CUSTOM_MODE_ENABLED,\r\n# mode_id)\r\n\r\n#countdown(5)\r\n\r\n\r\nwhile True:\r\n # Wait for ACK command\r\n # Would be good to add mechanism to avoid endlessly blocking\r\n # if the autopilot sends a NACK or never receives the message\r\n ack_msg = master.recv_match(type='COMMAND_ACK', blocking=True)\r\n ack_msg = ack_msg.to_dict()\r\n print (ack_msg)\r\n\r\n # Continue waiting if the acknowledged command is not `set_mode`\r\n if ack_msg['command'] != mavutil.mavlink.MAV_CMD_DO_SET_MODE:\r\n continue\r\n\r\n # Print the ACK result !\r\n print(mavutil.mavlink.enums['MAV_RESULT'][ack_msg['result']].description)\r\n break\r\n\r\n\r\nget_mode(master)\r\n\r\n'''\r\n# Chequear el nuevo modo directamente\r\nprint(\"Mode changed to:\")\r\nprint(master.flightmode)\r\n\r\n# chequear nuevo modo con heartbeat\r\nprint(\"Nuevo mode con heartbeat:\")\r\nfor i in range(0, 2):\r\n try:\r\n msg = master.recv_match(type=\"HEARTBEAT\", blocking=True).to_dict()\r\n pprint.pprint(msg['base_mode'])\r\n pprint.pprint(msg['custom_mode'])\r\n print(\"Current mode: \" + modelist[msg['custom_mode']])\r\n print(\"*************************************************\")\r\n except Exception as e:\r\n print(e)\r\n\r\n time.sleep(0.1)\r\n'''","repo_name":"javi94diaz/raspberry_scripts","sub_path":"pymavlink/change_mode.py","file_name":"change_mode.py","file_ext":"py","file_size_in_byte":3994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32922000183","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jan 11 22:11:43 2017\r\n\r\n@author: Edward\r\n\"\"\"\r\nfrom __future__ import print_function\r\n\r\nimport os\r\nimport time\r\nimport numpy as np\r\nimport cv2\r\n\"\"\"\r\nfrom scipy import ndimage as ndi\r\nfrom scipy import ndimage\r\nfrom scipy import interpolate\r\nfrom scipy.interpolate import interp1d\r\n\"\"\"\r\nimport sys\r\n\r\n#from matplotlib import pyplot as plt\r\n\r\n#from lineManager import lineMgr\r\nfrom circleManager import CircleManager, SingleCircleDrawingManager\r\nfrom pointsManager import PointsManager\r\nfrom messageBox import inputFileMBox\r\nfrom mouseControl import MouseControl\r\nfrom frameControl import FrameControl\r\n\r\ncurrentFrameNum = -1 # Frame number (starting from 1)\r\nframeID = None\r\n\r\n# 1 - video\r\n# 2 - curves\r\n# 3 - laser\r\nbuttonMode = 1\r\n \r\ntextLineHeight = 20\r\nnextTextLineY = textLineHeight*2\r\nfont = cv2.FONT_HERSHEY_SIMPLEX\r\ndef writeInformationText(img):\r\n global textLineHeight,nextTextLineY,font\r\n # Write some Text\r\n \r\n linesInfo = [\"c - Close poly\",\"r - Remove last shape\",\"Mouse left button: add point\",\"Mouse right button: remove point/poly/circle\"];\r\n #cv2.putText(img,\"-- Mode 3 --\",(10,textLineHeight), font, 0.5,(255,255,255),1) \r\n nextTextLineY = textLineHeight*1; \r\n for i in range(0,len(linesInfo)):\r\n cv2.putText(img,linesInfo[i],(10,nextTextLineY+textLineHeight*i), font, 0.5,(255,255,255),2);\r\n nextTextLineY += textLineHeight*(1+np.max([len(linesInfo)]));\r\n \r\n\"\"\"\r\n This is called whenever the fame is changed, but we don't know\r\n if it is the next frame, previous frame, or a reset!\r\n\"\"\"\r\ndef newFrame():\r\n pass\r\n \r\nVIDEO_FILE_MODE2 = inputFileMBox(\"organoidTracker.config\", None, None)\r\n\r\n\"\"\"\r\n Frame ctrl\r\n\"\"\"\r\nframeCtrl = FrameControl()\r\nframeCtrl.setInputVideo(VIDEO_FILE_MODE2)\r\ntotNumFrames = frameCtrl.getTotalFrameCount()\r\nprint('Tot frame #: '+np.str(totNumFrames))\r\n\r\n\"\"\"\r\n Managers and controllers\r\n\"\"\"\r\ncirclesMgr = CircleManager()\r\ncircleDrawingMgr = SingleCircleDrawingManager()\r\nshapeMgr = PointsManager(totNumFrames, 0)\r\nmouseCtrl = MouseControl(circlesMgr, circleDrawingMgr, shapeMgr)\r\n\r\nif True:\r\n lastFrameDrawingTime = time.time()\r\n \r\n flagReset = False # Reset video in mode2\r\n flagFirstOpen = True\r\n isPaused = False\r\n flagOneFrame = False\r\n flagOneFrameBack = False\r\n\r\n prevFrameID = -1\r\n prevThreshold = -1\r\n \r\n prevTime = time.time() # The last time a button was pressed\r\n buttonMode = 2 # The mode for button meaning, default = video \r\n factor = 1.5 # The factor for rezing the frame, or zooming in\r\n \r\n #\r\n ## Mode 3 settings\r\n #################################\r\n mode = 3\r\n isPaused = True\r\n circleIntensitiesSaved = True\r\n \r\n flagOneFrameForward = True\r\n flagOneFrameBack = False\r\n \r\n while(frameCtrl.isRunning()):\r\n if flagFirstOpen==True:\r\n cv2.namedWindow('frame')\r\n cv2.setMouseCallback('frame', mouseCtrl.callback)\r\n flagFirstOpen = False\r\n \r\n # Show frame after cuve additions\r\n circleIntensity = None\r\n if flagOneFrameForward==True or flagOneFrameBack==True or flagReset==True:\r\n if flagReset==True:\r\n frameCtrl.resetVideo()\r\n flagReset = False\r\n if flagOneFrameForward==True:\r\n # Measure in frame 1 before moving to frame 2\r\n if currentFrameNum==1:\r\n grayFrame = frameCtrl.getCurrentGrayFrame()\r\n \r\n # Measure avg intensity for all circles\r\n circlesMgr.measureAverageCircleIntensityForAllCircles(grayFrame,frameCtrl.getCurrentFrame(),currentFrameNum-1)\r\n \r\n # Measure avg intensity for the polygon\r\n shapeMgr.measureAveragePolyIntensityForAllPolys(grayFrame,frameCtrl.getCurrentFrame(),currentFrameNum-1)\r\n \r\n # Load new frame\r\n #print('frameCtrl.forward()')\r\n frameCtrl.forward()\r\n \r\n # If we passed the last frame, get back to the first one\r\n if frameCtrl.getCurrentFrame() is None:\r\n frameCtrl.backward()\r\n \r\n # Save intensities here?\r\n \r\n if flagOneFrameBack==True:\r\n frameCtrl.backward()\r\n \r\n # Have we gone too far back?\r\n if frameCtrl.getCurrentFrame() is None:\r\n frameCtrl.forward()\r\n \r\n flagOneFrameBack = False\r\n \r\n currentFrameNum = frameCtrl.getCurrentFrameNumber()\r\n if currentFrameNum==1:\r\n circleIntensitiesSaved = False;\r\n \r\n # Resize the frame or zoom in\r\n width = frameCtrl.getFrameWidth()\r\n height = frameCtrl.getFrameHeight()\r\n grayFrame = frameCtrl.getCurrentGrayFrame()\r\n \r\n if flagOneFrameForward==True:\r\n if isPaused==True:\r\n flagOneFrameForward = False;\r\n else:\r\n if circleIntensitiesSaved==False: \r\n # Measure avg intensity for all circles\r\n circlesMgr.measureAverageCircleIntensityForAllCircles(grayFrame,frameCtrl.getCurrentFrame(),frameCtrl.getCurrentFrameNumber()-1);\r\n \r\n # Measure avg intensity for the polygon\r\n shapeMgr.measureAveragePolyIntensityForAllPolys(grayFrame,frameCtrl.getCurrentFrame(),frameCtrl.getCurrentFrameNumber()-1);\r\n\r\n \r\n # Write button information\r\n writeInformationText(frameCtrl.getCurrentFrame())\r\n \r\n # Send new frame # to shapeMgr\r\n shapeMgr.setCurrentFrame(frameCtrl.getCurrentFrameNumber()-1)\r\n \r\n # Start our frame from the base frame\r\n img_ch1_draw = np.copy(frameCtrl.getCurrentFrame())\r\n \r\n # Draw frame number\r\n cv2.putText(img_ch1_draw,\"Frame: \"+str(frameCtrl.getCurrentFrameNumber()),(10,nextTextLineY+textLineHeight), font, 0.5,(255,255,255),2);\r\n if circleIntensity is not None:\r\n cv2.putText(img_ch1_draw,\"Circle intenisty: \"+str(circleIntensity),(10,nextTextLineY+textLineHeight*2), font, 0.5,(0,0,200),2);\r\n \r\n if buttonMode==1:\r\n # Show current circle\r\n circleDrawingMgr.showCircle(img_ch1_draw)\r\n \r\n # Show all circles\r\n circlesMgr.showAllCircles(img_ch1_draw)\r\n elif buttonMode==2:\r\n # Show polygons / shapes\r\n shapeMgr.showAllShapes(img_ch1_draw)\r\n \r\n elapsed = time.time()-lastFrameDrawingTime\r\n if True:#elapsed>0.5:\r\n #print('showing frame')\r\n cv2.imshow(\"frame\", img_ch1_draw)\r\n lastFrameDrawingTime = time.time()\r\n pass;\r\n \r\n key = cv2.waitKey(100)\r\n \r\n # Reset it\r\n if key&0xFF == ord('r'):\r\n flagReset = True;\r\n isPaused = True\r\n flagOneFrameForward = False\r\n \r\n # Remove last polygon\r\n shapeMgr.removeLastShape()\r\n \r\n # Button mode - curves\r\n if key&0xFF == ord('c'):\r\n if mode==3 and buttonMode==2:\r\n \"\"\"\r\n polyMgr.closePoly()\r\n polyMgr.addPolygon(polyMgr.getCurrentPoly())\r\n \"\"\"\r\n if shapeMgr.shapeType(shapeMgr.getCurrentShape())=='polygon':\r\n shapeMgr.closePoly()\r\n shapeMgr.addShape(shapeMgr.getCurrentShape())\r\n shapeMgr.clearCurrentShape()\r\n editingPoly = False\r\n \r\n a = shapeMgr.getPolyArea(factor)\r\n print('Poly area = ',np.round(a,3))\r\n continue;\r\n \r\n # Quit\r\n if (key&0xFF == ord('q')) or (key&0xFF == ord('Q')):\r\n break;\r\n \r\n # Mode=2, Saving the curve fit\r\n if key&0xFF == ord('s'):\r\n continue\r\n \r\n # Paused\r\n if key&0xFF == ord(' '):\r\n if mode==2 and buttonMode==2:\r\n pass\r\n if mode==2 and buttonMode==3:\r\n pass\r\n if mode==3:\r\n isPaused = not isPaused;\r\n if isPaused==False:\r\n flagOneFrameForward = True\r\n continue\r\n \r\n # Set threshold\r\n if key&0xFF == ord('v'):\r\n if mode==1:\r\n #gThreshold = np.float(mbox(\"Enter the thereshold:\", entry=True));\r\n #print('threshold=',threshold);\r\n pass\r\n if mode==2:\r\n buttonMode = 1\r\n mouseCtrl.setButtonMode(1)\r\n continue\r\n \r\n thisTime = time.time()\r\n #print('thisTime-prevTime=',thisTime-prevTime);\r\n\r\n # One frame (right arrow) \r\n if int(key) == 2555904:\r\n # Move to next frame in mode==2\r\n if (mode==2 and buttonMode==1) or mode==3:\r\n flagOneFrameForward = True\r\n pass\r\n # Move to next frame in mode==1\r\n if mode==1:\r\n flagOneFrame = True\r\n isPaused = True\r\n continue\r\n \r\n # One frame (left arrow)\r\n if int(key) == 2424832:\r\n # Move to previous frame in mode==2\r\n if (mode==2 and buttonMode==1) or mode==3:\r\n flagOneFrameBack = True\r\n pass\r\n # Move to previous frame in mode==1\r\n if mode==1: \r\n flagOneFrameBack = True\r\n isPaused = True\r\n continue\r\n \r\n # Up arrow\r\n if int(key) == 2490368: \r\n continue\r\n \r\n # Down arrow\r\n if int(key) == 2621440:\r\n continue\r\n \r\n if key&0xFF == ord('-'):\r\n if mode==1:\r\n pass\r\n continue\r\n \r\n # Save last button press time\r\n prevTime = time.time()\r\n\r\n\"\"\"\r\n Free resources\r\n\"\"\"\r\nframeCtrl.release()\r\ncv2.destroyAllWindows()","repo_name":"esudzilovsky/organoidTracker","sub_path":"organoidTracker-v0.01.py","file_name":"organoidTracker-v0.01.py","file_ext":"py","file_size_in_byte":10600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2975785090","text":"import logging\nimport os\nimport pickle\nimport tempfile\nfrom base64 import b64encode, b64decode\nfrom collections import namedtuple\n\nimport pytest\nimport six\n\nfrom petastorm.spark import spark_dataset_converter, SparkDatasetConverter\nfrom petastorm.tests.test_common import create_test_dataset, create_test_scalar_dataset, \\\n create_many_columns_non_petastorm_dataset\nfrom pyspark.sql import SparkSession\n\nSyntheticDataset = namedtuple('SyntheticDataset', ['url', 'data', 'path'])\n\n# Number of rows in a fake dataset\n_ROWS_COUNT = 100\n\n_CACHE_FAKE_DATASET_OPTION_SHORT = '-Y'\n_CACHE_FAKE_DATASET_OPTION = '--cache-synthetic-dataset'\n\nlogger = logging.getLogger(__name__)\n\n\ndef pytest_logger_config(logger_config):\n logger_config.add_loggers(\n [\n 'petastorm.workers_pool.process_pool',\n 'petastorm.workers_pool.thread_pool',\n 'petastorm.workers_pool.dummy_pool',\n 'petastorm.workers_pool.ventilator',\n 'petastorm.reader',\n ], stdout_level='debug')\n\n\ndef pytest_addoption(parser):\n parser.addoption(\n _CACHE_FAKE_DATASET_OPTION_SHORT, _CACHE_FAKE_DATASET_OPTION, action=\"store_true\", default=False,\n help='Use a cached version of synthetic dataset if available. This helps speedup local tests reruns as '\n 'we don\\'t have to rerun spark. CAUTION: you won\\'t be exercising dataset generating parts of petastorm '\n 'hence tests results maybe inaccurate'\n )\n\n\ndef maybe_cached_dataset(config, name, generating_func):\n \"\"\"Returns cached dataset instance if caching of datasets is enabled and a valid dataset is available.\n\n We speedup test startup time by caching previously generated synthetic dataset.\n This is useful while developing for tests reruns, but can be dangerous since we can\n get stale results when petastorm code participating in dataset generation is used.\n\n :param config: request.config object.\n :param name: name of the cached dataset. Used as a cache key.\n :param generating_func: This function will be called (`generating_func()`) if dataset cache is disabled or\n no valid dataset is found in cache.\n :return:\n \"\"\"\n if config.getoption(_CACHE_FAKE_DATASET_OPTION):\n cache_key = '{}_{}'.format(name, 'PY2' if six.PY2 else 'PY3')\n serialized = config.cache.get(cache_key, None)\n dataset = pickle.loads(b64decode(serialized)) if serialized else None\n if not dataset or not os.path.exists(dataset.path):\n dataset = generating_func()\n config.cache.set(cache_key, b64encode(pickle.dumps(dataset)).decode('ascii'))\n else:\n logger.warning('CAUTION: %s HAS BEEN USED. USING %s CACHED TEST DATASET! MAYBE STALE!',\n _CACHE_FAKE_DATASET_OPTION, name)\n else:\n dataset = generating_func()\n\n return dataset\n\n\n@pytest.fixture(scope=\"session\")\ndef synthetic_dataset(request, tmpdir_factory):\n def _synthetic_dataset_no_cache():\n path = tmpdir_factory.mktemp(\"data\").strpath\n url = 'file://' + path\n data = create_test_dataset(url, range(_ROWS_COUNT))\n dataset = SyntheticDataset(url=url, path=path, data=data)\n return dataset\n\n return maybe_cached_dataset(request.config, 'synthetic_dataset', _synthetic_dataset_no_cache)\n\n\n@pytest.fixture(scope=\"session\")\ndef scalar_dataset(request, tmpdir_factory):\n def _pure_parquet_dataset_no_cache():\n path = tmpdir_factory.mktemp(\"data\").strpath\n url = 'file://' + path\n data = create_test_scalar_dataset(url, 100)\n dataset = SyntheticDataset(url=url, path=path, data=data)\n return dataset\n\n return maybe_cached_dataset(request.config, 'scalar', _pure_parquet_dataset_no_cache)\n\n\n@pytest.fixture(scope=\"session\")\ndef many_columns_non_petastorm_dataset(request, tmpdir_factory):\n \"\"\"This dataset has 1000 columns. All of the same int32 type.\"\"\"\n\n def _dataset_no_cache():\n path = tmpdir_factory.mktemp(\"data\").strpath\n url = 'file://' + path\n data = create_many_columns_non_petastorm_dataset(url, 10)\n dataset = SyntheticDataset(url=url, path=path, data=data)\n return dataset\n\n return maybe_cached_dataset(request.config, 'many_column_non_petastorm', _dataset_no_cache)\n\n\nclass SparkTestContext(object):\n def __init__(self):\n self.spark = SparkSession.builder \\\n .master(\"local[2]\") \\\n .appName(\"petastorm.spark tests\") \\\n .getOrCreate()\n self.tempdir = tempfile.mkdtemp('_spark_converter_test')\n self.temp_url = 'file://' + self.tempdir.replace(os.sep, '/')\n self.spark.conf.set(SparkDatasetConverter.PARENT_CACHE_DIR_URL_CONF, self.temp_url)\n spark_dataset_converter._FILE_AVAILABILITY_WAIT_TIMEOUT_SECS = 2\n\n def tear_down(self):\n # restore default file availability wait timeout\n spark_dataset_converter._FILE_AVAILABILITY_WAIT_TIMEOUT_SECS = 30\n self.spark.stop()\n\n\n@pytest.fixture(scope='module')\ndef spark_test_ctx():\n ctx = SparkTestContext()\n try:\n yield ctx\n finally:\n ctx.tear_down()\n","repo_name":"uber/petastorm","sub_path":"petastorm/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":5136,"program_lang":"python","lang":"en","doc_type":"code","stars":1690,"dataset":"github-code","pt":"81"} +{"seq_id":"71285812426","text":"from torch.optim.lr_scheduler import _LRScheduler\nfrom torch.utils.data import Dataset, DataLoader, Subset\nimport pandas as pd\nimport torch\nimport torch.nn as nn\nimport logging\nimport numpy as np\nimport sys\nimport os\nimport time\nimport datetime as dt\nimport argparse\nfrom modules.model import SpatioTemporalModel\nfrom modules.utils import setup_logger\n\nlogger = setup_logger(\"training\", \"logs/training.log\", logging.INFO)\n\n\n# TODO: refactor this class to be in preprocessing or new module just for data\nclass ClimateDataset(Dataset):\n def __init__(self, data):\n # non_target_cols = [\"lon\", \"lat\", \"aod\", \"date\", \"date_index\", \"T\"]\n # target_cols = [col for col in data.columns if col not in non_target_cols]\n target_cols = [\"T_scaled\"]\n feature_cols = [\"lon\", \"lat\", \"aod\"]\n features = data[feature_cols].copy()\n targets = data[target_cols].copy()\n indices = data[\"date_index\"].copy()\n\n # self.scaler = StandardScaler()\n # scaled_features = self.scaler.fit_transform(features)\n\n self.features = torch.from_numpy(features.values).float()\n self.targets = torch.from_numpy(targets.values).float()\n self.indices = torch.from_numpy(indices.values).long()\n\n # standard scale features\n # ? should I keep features saved if I use only scaled features\n self.scaled_features = (\n self.features - self.features.mean(dim=0)\n ) / self.features.std(dim=0)\n\n def __len__(self):\n return len(self.features)\n\n def __getitem__(self, idx):\n scaled_features = self.scaled_features[idx]\n targets = self.targets[idx]\n indices = self.indices[idx]\n\n return (scaled_features, targets, indices)\n\n # -NOTE: maybe add inverse transofrm method for sclaer\n\n\nclass OneCycleScheduler(_LRScheduler):\n def __init__(self, optimizer, max_lr, total_steps, last_step=-1):\n super().__init__(optimizer, last_step)\n\n self.max_lr = max_lr\n self.total_steps = total_steps\n self.half_step = total_steps // 2\n\n def get_lr(self):\n if self.last_step < self.half_step:\n return [\n (base_lr + (self.max_lr - base_lr) * (self.last_step / self.half_step))\n for base_lr in self.base_lrs\n ]\n elif self.last_step < 2 * self.half_step:\n return [\n (\n self.max_lr\n - (self.max_lr - base_lr)\n * ((self.last_step - self.half_step) / self.half_step)\n )\n for base_lr in self.base_lrs\n ]\n else:\n return [\n (\n base_lr\n - (\n base_lr\n * (self.half_step - 2 * self.half_step)\n / (self.total_steps - 2 * self.half_step)\n )\n )\n for base_lr in self.base_lrs\n ]\n\n\nclass TrainingApp:\n # TODO: need to add test/dataset in the init\n def __init__(\n self, model, optimizer, training_data, validation_data, test_data, sys_argv=None\n ) -> None:\n self.cli_args = self.parse_cli_args(sys_argv)\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n self.model = self.load_model(model)\n self.optimizer = optimizer\n self.training_data = training_data\n self.validation_data = validation_data\n self.test_data = test_data\n self.loss_func = nn.MSELoss(reduction=\"none\")\n self.best_val_loss = float(\"inf\")\n self.patience_counter = 0\n self.patience = 10\n\n @staticmethod\n def parse_cli_args(sys_argv):\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--num-workers\", default=8, type=int)\n parser.add_argument(\"--epochs\", default=1, type=int)\n parser.add_argument(\"--batch-size\", default=32, type=int)\n parser.add_argument(\"--name\", default=\"train\", type=str)\n # ! provide argument for setting seed\n\n return parser.parse_args(sys_argv)\n\n def load_model(self, model):\n if self.device.type == \"cuda\":\n logger.info(f\"Using CUDA with {torch.cuda.device_count()} devices\")\n\n # distribute over mult/batchple GPUs if available\n if torch.cuda.device_count() > 1:\n model = nn.DataParallel(model)\n model = model.to(self.device)\n\n return model\n\n def init_dataloaders(self, seed=1):\n train_dataset = ClimateDataset(self.training_data)\n val_dataset = ClimateDataset(self.validation_data)\n test_dataset = ClimateDataset(self.test_data)\n\n batch_size = self.cli_args.batch_size\n if self.device.type == \"cuda\":\n batch_size *= torch.cuda.device_count()\n\n train_dl = DataLoader(\n train_dataset,\n batch_size=batch_size,\n # ? does shuffle matter?\n shuffle=True,\n num_workers=self.cli_args.num_workers,\n pin_memory=self.device.type == \"cuda\",\n )\n \n val_dl = DataLoader(\n val_dataset,\n batch_size=batch_size,\n shuffle=False,\n num_workers=self.cli_args.num_workers,\n pin_memory=self.device.type == \"cuda\",\n )\n\n test_dl = DataLoader(\n test_dataset,\n batch_size=batch_size,\n shuffle=False,\n num_workers=self.cli_args.num_workers,\n pin_memory=self.device.type == \"cuda\",\n )\n\n logger.info(\n f\"Train size of: {len(train_dl)}\\n\"\n f\"Validation size of: {len(val_dl)}\\n\"\n f\"Test size of: {len(test_dl)}\"\n )\n return train_dl, val_dl, test_dl\n\n def train_one_epoch(self, epoch_index, train_dl):\n self.model.train()\n\n train_metrics = torch.zeros(len(train_dl.dataset), device=self.device)\n start_time = time.time()\n\n for batch_idx, batch_tuple in enumerate(train_dl):\n self.optimizer.zero_grad()\n\n loss = self.compute_batch_loss(batch_idx, batch_tuple, train_metrics, phase=\"train\")\n loss.backward()\n self.optimizer.step()\n\n # ? move to train function?\n if batch_idx % 10000 == 0:\n log_progress(epoch_index, batch_idx, len(train_dl), start_time)\n\n # average_loss = train_metrics.sum() / len(train_dl.dataset)\n # logger.info(\n # f\"Average training loss for epoch {epoch_index}: {average_loss.item()}\"\n # )\n\n return train_metrics.to(\"cpu\")\n\n def validate_one_epoch(self, epoch_index, val_dl):\n self.model.eval()\n\n val_metrics = torch.zeros(len(val_dl.dataset), device=self.device)\n start_time = time.time()\n\n # no gradients needed for validation\n with torch.no_grad():\n for batch_idx, batch_tuple in enumerate(val_dl):\n loss = self.compute_batch_loss(batch_idx, batch_tuple, val_metrics, phase=\"val\")\n\n # if batch_idx % 10 == 0:\n # log_progress(epoch_index, batch_idx, len(val_dl), start_time)\n\n # average_loss = val_metrics.sum() / len(val_dl.dataset)\n # logger.info(\n # f\"Average validation loss for epoch {epoch_index}: {average_loss.item()}\"\n # )\n\n return val_metrics.to(\"cpu\")\n\n def compute_batch_loss(self, batch_idx, batch_tuple, metrics, phase):\n features, targets, time_indices = batch_tuple\n\n features = features.to(self.device, non_blocking=True)\n targets = targets.to(self.device, non_blocking=True)\n time_indices = time_indices.to(self.device, non_blocking=True)\n\n preds = self.model(features, time_indices, phase)\n loss = self.loss_func(preds, targets)\n batch_size = features.size(0)\n start_idx = batch_idx * batch_size\n # handle the case where the last batch might be smaller than batch size\n end_idx = start_idx + targets.size(0)\n # detach as metrics don't need to hold gradients\n metrics[start_idx:end_idx] = loss.detach().squeeze()\n\n return loss.mean()\n\n def train(self):\n logger.info(f\"Starting training with {type(self).__name__, self.cli_args}\")\n\n train_dl, val_dl, _ = self.init_dataloaders()\n\n for epoch in range(1, self.cli_args.epochs + 1):\n train_metrics = self.train_one_epoch(epoch, train_dl)\n\n if epoch % 2 == 0 and val_dl is not None:\n val_metrics = self.validate_one_epoch(epoch, val_dl)\n\n # if self.patience_counter > self.patience:\n # logger.info(\"Early stopping triggered\")\n # break\n else:\n # carry forward last val metrics if not validating this epoch\n val_metrics = None if epoch==1 else val_metrics\n\n self.log_metrics(epoch, train_metrics, val_metrics)\n\n # check if directory exits\n model_directory = \"saved_models\"\n os.makedirs(model_directory, exist_ok=True)\n\n # save the model after training is complete\n model_path = os.path.join(model_directory, f\"model_{self.cli_args.name}.pth\")\n torch.save(self.model.state_dict(), model_path)\n logger.info(f\"Model saved to {model_path}\")\n\n # ! need to add test functionality \n def test(self, test_dl):\n pass\n\n def log_metrics(self, epoch_idx, train_metrics, val_metrics=None):\n train_loss = train_metrics.mean().item()\n val_loss = val_metrics.mean().item() if val_metrics is not None else None\n\n message = f\"Epoch {epoch_idx}, Training Loss: {train_loss:.4f}\"\n if val_loss is not None:\n if val_loss < self.best_val_loss:\n self.best_val_loss = val_loss\n self.patience_counter = 0\n else:\n self.patience_counter += 1\n\n message += f\", Validation Loss: {val_loss:.4f}\"\n\n logger.info(message)\n\n\n################################################\n############## Helper functions ################\n#################################################\n\n\ndef log_progress(epoch_idx, batch_idx, num_batches, start_time):\n elapsed_time = time.time() - start_time\n batches_left = num_batches - batch_idx\n estimated_total_time = elapsed_time / (batch_idx + 1) * num_batches\n estimated_end_time = start_time + estimated_total_time\n estimated_time_left = estimated_end_time - time.time()\n\n end_time_str = dt.datetime.fromtimestamp(estimated_end_time).strftime(\n \"'%Y-%m-%d %H:%M:%S'\"\n )\n estimated_time_left_str = str(dt.timedelta(seconds=estimated_time_left)).split(\".\")[\n 0\n ]\n\n logger.info(\n f\"Epoch {epoch_idx}, Batch {batch_idx}/{num_batches}: \"\n f\"{batches_left} batches left, \"\n f\"Estimated completion at {end_time_str}, \"\n f\"Time left: {estimated_time_left_str}\"\n )\n","repo_name":"GregoryTomy/pinatubo-eruption","sub_path":"modules/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":10980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"43262570173","text":"import docx2txt\nimport xlsxwriter\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\n\nresumes = [\"JohnnyDeppResume.docx\",\"JohnWickResume.docx\",\"MannyJohnsResume.docx\",\"TimothyFlowersResume.docx\"]\ncandidates=[\"JohnnyDeppResume\",\"JohnWickResume\",\"MannyJohnsResume\",\"TimothyFlowersResume\"]\noutWorkbook = xlsxwriter.Workbook(\"scores.xlsx\")\noutsheet = outWorkbook.add_worksheet(\"scores\")\noutsheet.write(\"A1\",\"Candidate Name\")\noutsheet.write(\"B1\",\"Resume Score\")\nalphas =\"AB\"\nnames = []\nscrs = []\nfor i in range(len(resumes)):\n names.append(alphas[0]+str(i+2))\n scrs.append(alphas[1]+str(i+2))\nprint(\"Similarity scores of each candidates are listed below:\")\nscores={}\nfor member in range(len(resumes)):\n resume = docx2txt.process(resumes[member])\n job_description = docx2txt.process(\"Web Developer Job Description.docx\")\n text = [resume, job_description]\n count_vector= CountVectorizer()\n count_matrix = count_vector.fit_transform(text)\n #print(\"\\n Similarity Scores:\")\n #print(cosine_similarity(count_matrix))\n match_percentage = cosine_similarity(count_matrix)[0][1]*100\n match_percentage= round(match_percentage,2)\n scores[candidates[member]]=match_percentage\n outsheet.write(names[member],candidates[member])\n outsheet.write(scrs[member],match_percentage)\n\noutWorkbook.close()\n","repo_name":"pulasandeep/AI-Recruiter-Bot","sub_path":"code/Resume Analysis/ResumeAnalysis.py","file_name":"ResumeAnalysis.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"1542246519","text":"import fileinput\nimport re\nimport numpy\n\nclass Die:\n def __init__(self):\n self.next = 1\n self.throws = 0\n def roll(self):\n ret = self.next\n self.next = (self.next) % 100 + 1\n self.throws += 1\n return ret\n\n def getNoOfThrows(self):\n return self.throws\n\nclass Game:\n def __init__(self, p1start, p2start):\n self.p1 = p1start\n self.p2 = p2start\n self.p1Score = 0\n self.p2Score = 0\n def moveP1(self, steps):\n self.p1 = (self.p1 + steps -1) % 10 +1\n self.p1Score += self.p1\n return (self.p1Score >= 1000)\n def moveP2(self, steps):\n self.p2 = (self.p2 + steps -1) % 10 +1\n self.p2Score += self.p2\n return (self.p2Score >= 1000)\n def getP1Score(self):\n return self.p1Score\n def getP2Score(self):\n return self.p2Score\n\ndef sum2tuple(tuple1, tuple2):\n return (tuple1[0]+tuple2[0], tuple1[1]+tuple2[1])\n\ndef scale2tuple(scale, tuple):\n return (scale*tuple[0], scale*tuple[1])\ngamestates = {}\ndef playMultiverseGame(p1pos, p2pos, p1score, p2score, turn):\n dist = {3: 1, 4: 3, 5: 6, 6: 7, 7: 6, 8: 3, 9: 1 }\n if p1score >= 21:\n assert turn == 'p2'\n return (1,0)\n if p2score >= 21:\n assert turn == 'p1'\n return (0,1)\n if (p1pos, p2pos, p1score, p2score, turn) in gamestates:\n return gamestates[((p1pos, p2pos, p1score, p2score, turn))]\n\n if turn == 'p1':\n result = (0,0)\n for die, mult in dist.items():\n new_p1pos = (p1pos + die -1) % 10 +1\n new_p1score = p1score + new_p1pos\n wins = playMultiverseGame(new_p1pos, p2pos, new_p1score, p2score, 'p2')\n result = sum2tuple(result, scale2tuple(mult, wins))\n return result\n elif turn == 'p2':\n result = (0,0)\n for die, mult in dist.items():\n new_p2pos = (p2pos + die -1) % 10 +1\n new_p2score = p2score + new_p2pos\n wins = playMultiverseGame(p1pos, new_p2pos, p1score, new_p2score, 'p1')\n result = sum2tuple(result, scale2tuple(mult, wins))\n gamestates[p1pos, p2pos, p1score, p2score, turn] = result\n return result\n\ndef AoC21():\n startPlaces = dict()\n for line in fileinput.input(files='input/day21.txt'):\n line = line.strip()\n regex = re.compile(r'Player (?P\\d) starting position\\: (?P\\d)')\n match = regex.search(line)\n startPlaces[int(match.group('playerNo'))] = int(match.group('startPos'))\n\n #startPlaces[1] = 4\n #startPlaces[2] = 8\n game = Game(startPlaces[1], startPlaces[2])\n die = Die()\n p1won = False\n p2won = False\n\n while True:\n totalDie = 0\n for _ in range(3):\n totalDie += die.roll()\n p1won = game.moveP1(totalDie)\n if p1won:\n break\n totalDie = 0\n for _ in range(3):\n totalDie += die.roll()\n p2won = game.moveP2(totalDie)\n if p2won:\n break\n dieThrown = die.getNoOfThrows()\n if p1won:\n score = game.getP2Score()\n if p2won:\n score = game.getP1Score()\n print(f\"When the game end the loser have a score of {score} ,and {dieThrown} dice have been thrown.\")\n print(f\"The product of the two is {score*dieThrown}\\n\")\n\n# wins = playMultiverseGame(startPlaces[1], startPlaces[2], 0, 0, 'p1')\n wins = playMultiverseGame(1, 3, 0, 0, 'p1')\n print(f\"Player 1 wins {wins[0]} games and Player 2 wins {wins[1]} games\")\n winner = 'Player 1' if wins[0] > wins[1] else 'Player 2'\n print(f\"Most wins have {winner} with {max(wins)} wins!\")","repo_name":"MiLeW/AdventOfCode","sub_path":"2021/AoC21.py","file_name":"AoC21.py","file_ext":"py","file_size_in_byte":3631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74406530186","text":"#!/usr/bin/env python3.1\n\nfrom tkinter import *\n\n\nclass Application(object):\n def __init__(self, parent):\n \n self.propertyname = StringVar()\n self.propertyname.set('foreground') # valore di default\n self.propertyvalue = StringVar()\n self.propertyvalue.set('green')\n self.parent = parent\n\n properties = ['background',\n 'foreground',\n 'activebackground',\n 'disabledforeground',\n 'highlightcolor',\n 'highlightbackground',\n 'width',\n 'height',\n 'padx',\n 'pady',\n ]\n\n self.PropertyMenu = OptionMenu(parent, self.propertyname, *properties)\n self.PropertyMenu.pack()\n\n self.MyInputBox = Entry(parent, textvariable=self.propertyvalue)\n self.MyInputBox.pack()\n\n self.MyButton = Button(parent, text=\"Fai clic qui\")\n self.MyButton['background']=\"#FFFFFF\"\n self.MyButton['foreground']=\"red\"\n self.MyButton['command']=self.MyButton_Click\n self.MyButton.pack({\"side\":\"top\", \"padx\": 10, \"pady\": 20})\n\n self.StatusBar = Label(parent, text=\"...\")\n self.StatusBar['background']=\"#FFFFFF\"\n self.StatusBar['foreground']=\"blue\"\n self.StatusBar.pack({\"side\":\"bottom\", \"expand\":\"yes\", \"fill\":\"x\"})\n\n def MyButton_Click(self):\n print('name:', self.propertyname.get())\n print('value', self.propertyvalue.get())\n self.MyButton[self.propertyname.get()]=self.propertyvalue.get()\n\n\ndef main():\n root = Tk()\n myapp = Application(root)\n root.mainloop() \n\nif __name__=='__main__':\n main()\n \n","repo_name":"loristissino/oopython","sub_path":"lessons/19/select_box.pyw","file_name":"select_box.pyw","file_ext":"pyw","file_size_in_byte":1751,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"71276736905","text":"from dateutil.relativedelta import relativedelta as rd\r\nfrom configparser import ConfigParser\r\nfrom datetime import datetime as dt\r\nimport tweepy\r\n\r\n\r\n\r\nclass TwitterApi():\r\n\r\n def __init__(self, img=None, date = None, msg=None, username=None,token=None , token_secret=None):\r\n self.img = img\r\n self.date = date\r\n self.msg = msg\r\n self.uname = username\r\n self.token = token\r\n self.token_secret = token_secret\r\n\r\n config = ConfigParser()\r\n config.read('config.ini')\r\n self.key = config['tw_creds']['app_key']\r\n self.secret = config['tw_creds']['app_secret']\r\n\r\n try:\r\n\r\n self.auth = tweepy.OAuthHandler(self.key, self.secret)\r\n self.auth.set_access_token(self.token,self.token_secret)\r\n self.api = tweepy.API(self.auth,wait_on_rate_limit=True,wait_on_rate_limit_notify=True)\r\n \r\n except tweepy.error.TweepError as error:\r\n print({'error': error})\r\n\r\n def user_details(self):\r\n \r\n try:\r\n me = self.api.me()\r\n details = dict()\r\n\r\n details['user_id'] = me.id\r\n details['name'] = me.name\r\n details['followers'] = me.followers_count\r\n details['followings'] = me.friends_count\r\n details['screen_name'] = me.screen_name\r\n details['user_time_zone'] = me.time_zone\r\n\r\n return details\r\n \r\n except tweepy.error.TweepError as error:\r\n return {'error': error}\r\n \r\n except KeyError as error:\r\n return {'error': error}\r\n\r\n def get_tweets(self):\r\n\r\n try:\r\n tweets_data = dict()\r\n tweets_data['tweets'] = []\r\n tweets_data['id'] = []\r\n tweets_data['created_on'] = []\r\n tweets_data['retweet_count'] = []\r\n tweets_data['favorite_count'] = []\r\n tweets_data['language'] = []\r\n\r\n tweets = self.api.user_timeline(tweet_mode='extended')\r\n\r\n for i in tweets:\r\n tweets_data['tweets'].append(i.full_text)\r\n tweets_data['id'].append(i.id)\r\n tweets_data['created_on'].append(i.created_at)\r\n tweets_data['retweet_count'].append(i.retweet_count)\r\n tweets_data['favorite_count'].append(i.favorite_count)\r\n tweets_data['language'].append(i.lang)\r\n\r\n return tweets_data\r\n \r\n except tweepy.error.TweepError as error:\r\n return {'error': error}\r\n \r\n except KeyError as error:\r\n return {'error': error}\r\n\r\n def get_replies(self):\r\n \r\n try:\r\n mentions = []\r\n for i in tweepy.Cursor(self.api.search,q=f\"to:{self.uname}\",count=200,tweet_mode='extended').items(1000):\r\n\r\n rid = i.in_reply_to_status_id\r\n replied_user_sname = i.user.screen_name\r\n replied_user_name = i.user.name\r\n reply = i.full_text\r\n tweet = None\r\n tweet_id = None\r\n\r\n try:\r\n tweet_data = self.api.get_status(rid, tweet_mode='extended')\r\n tweet = tweet_data.full_text\r\n tweet_id = tweet_data.id\r\n except tweepy.error.TweepError:\r\n pass\r\n \r\n reply_time = i.created_at\r\n beforemonth = rd(months=-2) + dt.now()\r\n if reply_time >= beforemonth:\r\n if tweet is not None:\r\n mentions.append((replied_user_name,replied_user_sname,reply,tweet,tweet_id))\r\n else:\r\n break\r\n\r\n return mentions\r\n \r\n except tweepy.error.TweepError as error:\r\n return {'error': error}\r\n \r\n except ValueError as error:\r\n return {'error': error}\r\n \r\n except KeyError as error:\r\n return {'error': error}\r\n\r\n def post_tweet(self):\r\n\r\n try:\r\n if self.img is not None:\r\n print('tw - posting with the img')\r\n self.api.update_with_media(self.img,self.msg)\r\n else:\r\n print('tw - posting without img')\r\n self.api.update_status(self.msg)\r\n\r\n except tweepy.error.TweepError as error:\r\n return {'error': error}","repo_name":"k00lawn/OnePostMan","sub_path":"backend/pythonserver/tw_class.py","file_name":"tw_class.py","file_ext":"py","file_size_in_byte":4418,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"39834530924","text":"def solution(triangle):\n n = len(triangle)\n dp=[[0 for _ in range(i+1)] for i in range(n)]\n dp[0]=triangle[0]\n for i in range(1,n):\n dp[i][0] = dp[i-1][0]+triangle[i][0]\n dp[i][-1] = dp[i-1][-1]+triangle[i][-1]\n for j in range(1,len(triangle[i])-1):\n dp[i][j] = max(dp[i-1][j-1], dp[i-1][j])+triangle[i][j]\n return max(dp[n-1])","repo_name":"devhyojin/Algorithm","sub_path":"Programmers/[Programmers]코딩테스트고득점Kit_동적계획법_정수삼각형.py","file_name":"[Programmers]코딩테스트고득점Kit_동적계획법_정수삼각형.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"33487812174","text":"'''\nGiven a binary tree, flatten it to a linked list in-place.\n\nFor example, given the following tree:\n\n 1\n / \\\n 2 5\n / \\ \\\n3 4 6\nThe flattened tree should look like:\n\n1\n \\\n 2\n \\\n 3\n \\\n 4\n \\\n 5\n \\\n 6\n'''\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def flatten(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: void Do not return anything, modify root in-place instead.\n \"\"\"\n top =dummpy = TreeNode(None)\n stack = [root]\n while stack:\n node = stack.pop()\n if not node: continue\n stack.append(node.right)\n stack.append(node.left)\n dummpy.right = node\n dummpy.left = None\n dummpy = node","repo_name":"XiongQiuQiu/leetcode-slove","sub_path":"Algorithms/114-Flatten-Binary-Tree-to-Linked-List.py","file_name":"114-Flatten-Binary-Tree-to-Linked-List.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"13166529891","text":"# =============================================================================\r\n# LDA functions\r\n# =============================================================================\r\nimport csv\r\nimport os\r\nimport re\r\nimport operator\r\nimport matplotlib.pyplot as plt\r\nimport warnings\r\nimport gensim\r\nimport numpy as np\r\nwarnings.filterwarnings('ignore')\r\n\r\nimport tqdm \r\n\r\nfrom gensim.models import LdaModel\r\nfrom gensim.models import CoherenceModel, LdaModel, LsiModel, HdpModel\r\nfrom gensim.models.wrappers import LdaMallet\r\nfrom gensim.corpora import Dictionary\r\nfrom pprint import pprint\r\n\r\nimport pyLDAvis.gensim\r\n#%matplotlib inline\r\nfrom IPython import get_ipython\r\nget_ipython().run_line_magic('matplotlib', 'inline')\r\n\r\n\"stopwords 3!!!\"\r\nf = open('stopwords3.txt', 'r')\r\nstopwords3 = f.read().split(\", \")\r\nf.close()\r\n#\r\ndef remove_stopwords2(sentence):\r\n newsentence = \"\"\r\n tokens = nltk.word_tokenize(sentence)\r\n words = [word for word in tokens if word not in stopwords3]\r\n newsentence = \" \".join(str(x) for x in words)\r\n return newsentence\r\n\r\n\"test to remove station names\"\r\n\r\ndef replace_stn_names(sentence):\r\n newsentence = re.sub('|'.join(r'\\b%s\\b' % re.escape(s) for s in stationnames),\"\", sentence)\r\n nltktokenizer = RegexpTokenizer(\"[\\\\w']+|[^\\\\w\\\\s]+\")\r\n tokens = nltktokenizer.tokenize(newsentence) \r\n return \" \".join(str(x) for x in tokens)\r\n\r\n\"standardize mrt, lrt, and monorail etc.\"\r\ndef standardize4(sentence): \r\n newsentence = \"\"\r\n wordlist = []\r\n sentence = str(sentence)\r\n nltktokenizer = RegexpTokenizer(\"[\\\\w']+|[^\\\\w\\\\s]+\")\r\n tokens = nltktokenizer.tokenize(sentence) \r\n for token in tokens:\r\n if token in [\"Lrt\", \"Mrt\", \"Monorail\", \"train\",\"Rapidkl\"]:\r\n wordlist.append(\"\") \r\n else:\r\n wordlist.append(token)\r\n newsentence = \" \".join(str(x) for x in wordlist) \r\n return newsentence\r\n\r\ndef make_bigrams(texts):\r\n return [bigram_mod[doc] for doc in texts]\r\n\r\ndef make_trigrams(texts):\r\n return [trigram_mod[bigram_mod[doc]] for doc in texts]\r\n\r\n\r\n\"to use lda model to predict new doc\"\r\ndef pre_new(doc):\r\n# one = cleaning(doc).split()\r\n one = doc.split() #remove cleaning\r\n two = dictionary.doc2bow(one)\r\n return two\r\n\r\ndef belong(sentence):\r\n overall = loading[(pre_new(sentence))]\r\n return overall\r\n\r\ndef topic_belong(sentence):\r\n elements = loading[(pre_new(sentence))]\r\n n=1\r\n scores = [x[n] for x in elements]\r\n return scores\r\n\r\n\r\n# To produce graph\r\ndef evaluate_graph(dictionary, corpus, texts, limit):\r\n \"\"\"\r\n Function to display num_topics - LDA graph using c_v coherence\r\n \r\n Parameters:\r\n ----------\r\n dictionary : Gensim dictionary\r\n corpus : Gensim corpus\r\n limit : topic limit\r\n \r\n Returns:\r\n -------\r\n lm_list : List of LDA topic models\r\n c_v : Coherence values corresponding to the LDA model with respective number of topics\r\n \"\"\"\r\n c_v = []\r\n lm_list = []\r\n for num_topics in range(1, limit):\r\n lm = LdaModel(corpus=corpus, num_topics=num_topics, id2word=dictionary, passes=100, random_state=100)\r\n lm_list.append(lm)\r\n cm = CoherenceModel(model=lm, texts=texts, dictionary=dictionary, coherence='c_v')\r\n c_v.append(cm.get_coherence())\r\n \r\n # Show graph\r\n x = range(1, limit)\r\n plt.plot(x, c_v)\r\n plt.xlabel(\"num_topics\")\r\n plt.ylabel(\"Coherence score\")\r\n plt.legend((\"c_v\"), loc='best')\r\n plt.show()\r\n \r\n return lm_list, c_v\r\n\r\n \r\ndef ret_top_model():\r\n \"\"\"\r\n Since LDAmodel is a probabilistic model, it comes up different topics each time we run it. To control the\r\n quality of the topic model we produce, we can see what the interpretability of the best topic is and keep\r\n evaluating the topic model until this threshold is crossed. \r\n \r\n Returns:\r\n -------\r\n lm: Final evaluated topic model\r\n top_topics: ranked topics in decreasing order. List of tuples\r\n \"\"\"\r\n top_topics = [(0, 0)]\r\n while top_topics[0][1] < 0.97:\r\n lm = LdaModel(corpus=corpus, id2word=dictionary)\r\n coherence_values = {}\r\n for n, topic in lm.show_topics(num_topics=-1, formatted=False):\r\n topic = [word for word, _ in topic]\r\n cm = CoherenceModel(topics=[topic], texts=train_texts, dictionary=dictionary, window_size=10)\r\n coherence_values[n] = cm.get_coherence()\r\n top_topics = sorted(coherence_values.items(), key=operator.itemgetter(1), reverse=True)\r\n return lm, top_topics\r\n\r\n# For tuning - long runtime!!\r\n\"For hyperparameter tuning\"\r\n\r\ndef compute_coherence_values(corpus, dictionary, k, a, b):\r\n \r\n lda_model = gensim.models.LdaMulticore(corpus=corpus,\r\n id2word=dictionary,\r\n num_topics=k, \r\n random_state=100,\r\n chunksize=100,\r\n passes=50,\r\n alpha=a,\r\n eta=b)\r\n \r\n coherence_model_lda = CoherenceModel(model=lda_model, texts=texts, dictionary=dictionary, coherence='c_v')\r\n \r\n return coherence_model_lda.get_coherence()\r\n\r\n","repo_name":"NoraishaYusuf/Research-Project-WQD7002","sub_path":"RP_LDA_functions.py","file_name":"RP_LDA_functions.py","file_ext":"py","file_size_in_byte":5318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73708802825","text":"import math\n\n\ndef max_common_divisor_except_self(number):\n if number <= 1:\n return None # 自然数必须大于1才能计算最大公约数\n max_gcd = 1\n for i in range(2, number):\n if number % i == 0:\n max_gcd = max(max_gcd, math.gcd(number, i))\n return max_gcd\n\n\n# 测试示例\nnumber = int(input(\"请输入自然数:\"))\nresult = max_common_divisor_except_self(number)\nif result is not None:\n print(f\"{number}最大公约数为: {result}\")\nelse:\n print(f\"{number}不是一个有效的自然数。\")\n","repo_name":"FoggyMemories/python_class_work","sub_path":"Python数据分析实践/work_2023/spet_12/p54/T3_3.py","file_name":"T3_3.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36459307227","text":"\"\"\"empty message\n\nRevision ID: 92ac74c278ca\nRevises: 2807edd385a2\nCreate Date: 2022-04-11 19:39:10.642399\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '92ac74c278ca'\ndown_revision = '2807edd385a2'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('calendar',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('appointments', sa.String(length=120), nullable=False),\n sa.Column('is_active', sa.Boolean(), nullable=False),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('appointments')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('calendar')\n # ### end Alembic commands ###\n","repo_name":"intracosm/Final-project-","sub_path":"migrations/versions/92ac74c278ca_.py","file_name":"92ac74c278ca_.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38026711136","text":"import torch.nn as nn\nimport torch.nn.functional as F\n\nclass BasicConv2d(nn.Module):\n def __init__(self, input_dim, output_dim, kernel_size, bn):\n super().__init__()\n self.conv = nn.Conv2d(\n input_dim, output_dim, \n kernel_size=kernel_size, \n padding=(kernel_size[0] // 2, kernel_size[1] // 2)\n )\n self.bn = nn.BatchNorm2d(output_dim) if bn else None\n\n def forward(self, x):\n h = self.conv(x)\n h = self.bn(h) if self.bn is not None else h\n return h\n\n\nclass LuxNet(nn.Module):\n def __init__(self):\n super().__init__()\n layers, filters = 12, 32\n self.conv0 = BasicConv2d(20, filters, (3, 3), True)\n self.blocks = nn.ModuleList([BasicConv2d(filters, filters, (3, 3), True) for _ in range(layers)])\n self.head_p = nn.Linear(filters, 5, bias=False)\n\n def forward(self, x):\n h = F.relu_(self.conv0(x))\n for block in self.blocks:\n h = F.relu_(h + block(h))\n h_head = (h * x[:,:1]).view(h.size(0), h.size(1), -1).sum(-1)\n p = self.head_p(h_head)\n return p","repo_name":"lannguyen0910/lux-rl","sub_path":"imitation-learning/models/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"32853485966","text":"#Inicializo listas de pasajeros y ciudades\r\nlista_pasajeros = []\r\nlista_ciudades = []\r\n\r\n#Función para agregar pasajeros a la lista de viajeros\r\ndef agregar_pasajero():\r\n nombre = input(\"Nombre del pasajero: \")\r\n dni = int(input(\"DNI del pasajero: \"))\r\n destino = input(\"Ciudad de destino: \")\r\n lista_pasajeros.append((nombre, dni, destino))\r\n print(\"Pasajero agregado exitosamente.\")\r\n\r\n#Función para agregar ciudades a la lista de ciudades\r\ndef agregar_ciudad():\r\n ciudad = input(\"Nombre de la ciudad: \")\r\n pais = input(\"País al que pertenece: \")\r\n lista_ciudades.append((ciudad, pais))\r\n print(\"Ciudad agregada exitosamente.\")\r\n\r\n#Función para buscar la ciudad de destino de un pasajero por DNI\r\ndef buscar_ciudad_por_dni():\r\n dni = int(input(\"DNI del pasajero: \"))\r\n for pasajero in lista_pasajeros:\r\n if pasajero[1] == dni:\r\n destino = pasajero[2]\r\n print(f\"El pasajero con DNI {dni} viaja a {destino}.\")\r\n return\r\n print(f\"No se encontró un pasajero con DNI {dni}.\")\r\n\r\n#Función para mostrar la cantidad de pasajeros que viajan a una ciudad\r\ndef contar_pasajeros_por_ciudad():\r\n ciudad = input(\"Ciudad: \")\r\n count = sum(1 for pasajero in lista_pasajeros if pasajero[2] == ciudad)\r\n print(f\"La cantidad de pasajeros que viajan a {ciudad} es {count}.\")\r\n\r\n#Función para buscar el país de destino de un pasajero por DNI\r\ndef buscar_pais_por_dni():\r\n dni = int(input(\"DNI del pasajero: \"))\r\n for pasajero in lista_pasajeros:\r\n if pasajero[1] == dni:\r\n destino = pasajero[2]\r\n for ciudad, pais in lista_ciudades:\r\n if ciudad == destino:\r\n print(f\"El pasajero con DNI {dni} viaja a {destino}, en el país {pais}.\")\r\n return\r\n print(f\"No se encontró un pasajero con DNI {dni}.\")\r\n\r\n#Función para mostrar la cantidad de pasajeros que viajan a un país\r\ndef contar_pasajeros_por_pais():\r\n pais = input(\"País: \")\r\n count = sum(1 for pasajero in lista_pasajeros for ciudad, p in lista_ciudades if ciudad == pasajero[2] and p == pais)\r\n print(f\"La cantidad de pasajeros que viajan a {pais} es {count}.\")\r\n\r\n#Menú iterativo\r\nwhile True:\r\n print(\"\\n--- Menú ---\")\r\n print(\"1. Agregar pasajero\")\r\n print(\"2. Agregar ciudad\")\r\n print(\"3. Ver ciudad por DNI\")\r\n print(\"4. Ver cantidad de pasajeros por ciudad\")\r\n print(\"5. Ver país por DNI\")\r\n print(\"6. Ver cantidad de pasajeros por país\")\r\n print(\"7. Salir\")\r\n \r\n opcion = input(\"Seleccione una opción: \")\r\n \r\n if opcion == '1':\r\n agregar_pasajero()\r\n elif opcion == '2':\r\n agregar_ciudad()\r\n elif opcion == '3':\r\n buscar_ciudad_por_dni()\r\n elif opcion == '4':\r\n contar_pasajeros_por_ciudad()\r\n elif opcion == '5':\r\n buscar_pais_por_dni()\r\n elif opcion == '6':\r\n contar_pasajeros_por_pais()\r\n elif opcion == '7':\r\n print(\"Gracias por usar el programa. ¡Hasta luego!\")\r\n break\r\n else:\r\n print(\"Opción no válida. Por favor, elija una opción válida.\")\r\n","repo_name":"Joako64110/TrabajosS1-ProgramacionI-Comision4","sub_path":"Ejercicios de Variables Dimensionadas/primer_ejercico.py","file_name":"primer_ejercico.py","file_ext":"py","file_size_in_byte":3117,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17043247685","text":"import random, sys\nfrom app import app, db\nfrom flask_security.utils import hash_password\nfrom app.models import User, Role, Question, Answer\nfrom app.security import user_datastore\n\n\ndef role_gen():\n #Генерация ролей\n user_datastore.create_role(name='admin', description='Administrator')\n user_datastore.create_role(name='manager', description='can manage questions and question lists')\n user_datastore.create_role(name='user', description='basic role')\n db.session.commit()\n print('Roles generation done')\n\ndef add_roles_to_user(user_id):\n user = User.query.get(user_id)\n user_datastore.add_role_to_user(user, Role.query.filter(Role.name == 'admin').first())\n user_datastore.add_role_to_user(user, Role.query.filter(Role.name == 'manager').first())\n db.session.commit()\n print('Roles was add')\n\ndef question_generator(manager_id, number):\n # Генерация вопросов\n n = int(number)\n id = int(manager_id)\n for i in range(0, n):\n print(i)\n new_question = Question(text='Question {}'.format(i), manager=id, single_answer=random.choice([True, False]))\n db.session.add(new_question)\n db.session.commit()\n new_q_id = new_question.id\n answer1 = Answer(text='Answer 1 for Question {}'.format(i), question=new_q_id)\n db.session.add(answer1)\n answer2 = Answer(text='Answer 2 for Question {}'.format(i), question=new_q_id)\n db.session.add(answer2)\n answer3 = Answer(text='Answer 3 for Question {}'.format(i), question=new_q_id)\n db.session.add(answer3)\n answer4 = Answer(text='Answer 4 for Question {}'.format(i), question=new_q_id)\n db.session.add(answer4)\n db.session.commit()\n print('questions was generated')\n\ndef help():\n print('Заполнение базы первичными данными.')\n print('usage: python gendata.py [option args]')\n print(' rolesgen - без аргументов, добавляет в таблицу роли админа, менеджера и юзера')\n print(' roles-to-user id - аргумент id пользователя, добавит ему роль менеджера и админа')\n print(' qgen id num - сгенерирует менеджеру с id вопросы в колличестве num')\n print(' help - эта справка')\n print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n print('Внимание чтобы добавить роль пользователю создайте его!')\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 2 or sys.argv[1] == 'help':\n help()\n elif sys.argv[1] == 'rolesgen':\n role_gen()\n elif sys.argv[1] == 'roles-to-user':\n add_roles_to_user(sys.argv[2])\n elif sys.argv[1] == 'qgen':\n question_generator(sys.argv[2], sys.argv[3])","repo_name":"umqambi/fp-backend","sub_path":"gendata.py","file_name":"gendata.py","file_ext":"py","file_size_in_byte":2970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39507539823","text":"for T in range(1,int(input())+1):\n t,c=input().split()\n t=list(t)\n c=int(c)\n ans=0\n for i in range(len(t)-c+1):\n if t[i]=='-':\n ans+=1\n for j in range(i,i+c): t[j]='+' if t[j]=='-' else '-'\n if not '-' in t: print('Case #%d:'%T,ans)\n else: print('Case #%d: IMPOSSIBLE'%T)","repo_name":"njw1204/BOJ-AC","sub_path":"problem/10000~19999/14788/14788.py3.py","file_name":"14788.py3.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"12938512048","text":"# PROBLEM 67\n\nimport math\n\n# efficiently compute euler phi\n# start with a sieve\n\ndef sieve(n):\n\tprimeBool = [0]*(n+1)\n\tprimeList = []\n\n\tfor i in range(2, n+1):\n\t\tif primeBool[i] == 0:\n\t\t\tprimeList.append(i)\n\n\t\t\tj = 2\n\t\t\twhile (i*j <= n):\n\t\t\t\tprimeBool[i*j] = 1\n\t\t\t\tj += 1\n\treturn primeList\n\nprimes = sieve(1000001)\n\ndef eulerPhi(n):\n\n\tval = n\n\n\ti = 0\n\twhile primes[i]**2 <= n:\n\t\tif n%primes[i] == 0:\n\t\t\tval -= n//primes[i]\n\t\t\twhile n%primes[i] == 0:\n\t\t\t\tn = n//primes[i]\n\t\ti += 1\n\tif n>1:\n\t\tval -= val//n\n\t\n\treturn val\n\n# We can use the functions above but an analytical solution\n# is much nicer. Some manipulation shows that we want \n# to maximize prod_{p|n} p/(p-1). p/(p-1) is a decreasing\n# function, to maximize, we choose p as small as possible\n\nprimes = [2,3,5,7,11,13,17,19,23,29,31,37,41]\nn = 1\nk = 0\nwhile n*primes[k] <= 1000000:\n\tn *= primes[k]\n\tk += 1\nprint(n) \n\n\n\n","repo_name":"wgrewe/Project-Euler","sub_path":"69prob.py","file_name":"69prob.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9602339079","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render, get_object_or_404\nfrom django.template import RequestContext\nfrom django.contrib.auth.models import User\nfrom tribus.web.profile.forms import data_change\nfrom tribus.web.registration.ldap.utils import edit_ldap_user\nfrom waffle.decorators import waffle_switch\n\n@waffle_switch('profile')\ndef SearchProfile(request, nick):\n \"\"\"\n Vista que busca el perfil de un usuario si existe y lo redirecciona a su muro,\n sino redirecciona al perfil de usuario logeado. En caso de que no este logueado\n redirecciona a la pagina principal.\n \n @param request: request \n @param nick: string que representa el username del usuario a quien buscas. \n \"\"\"\n if request.user.is_authenticated():\n try:\n user = User.objects.get(username=request.user.username)\n except:\n user = None\n try:\n user_view = User.objects.get(username=nick)\n except:\n return HttpResponseRedirect('/profile')\n\n if request.user.username == nick:\n return HttpResponseRedirect('/profile')\n\n # Cargamos la librería AngujarJS junto con sus plugins\n render_js = ['angular', 'angular.sanitize', 'angular.resource',\n 'angular.infinite-scroll', 'angular.bootstrap',\n 'angular.moment']\n\n # Cargamos las funciones de Tribus para AngularJS\n render_js += ['controllers.angular', 'services.angular',\n 'elements.angular', 'profiles.angular',\n 'navbar.angular']\n\n # Cargamos otras funciones adicionales\n render_js += ['moment', 'md5']\n\n return render(request, 'profile/profiles_view.html', {\n 'render_js': render_js,\n 'user': user,\n 'user_view': user_view,\n })\n return HttpResponseRedirect(\"/\")\n\n\n@waffle_switch('profile')\ndef UserProfile(request):\n \"\"\"\n vista que verifica que maneja la peticion del perfil del usuario logueado,\n incluyendo el manejo del formulario de cambio de datos.\n \"\"\"\n\n # Cargamos la librería AngujarJS junto con sus plugins\n render_js = ['angular', 'angular.sanitize', 'angular.resource',\n 'angular.infinite-scroll', 'angular.bootstrap',\n 'angular.moment']\n\n # Cargamos las funciones de Tribus para AngularJS\n render_js += ['controllers.angular', 'services.angular',\n 'elements.angular', 'profiles.angular',\n 'navbar.angular']\n\n # Cargamos otras funciones adicionales\n render_js += ['moment', 'md5']\n\n if request.user.is_authenticated():\n if request.method == \"POST\":\n u = User.objects.get(username__exact = request.user.username)\n u.description = request.POST['descripcion']\n if 'emailVisible' in request.POST:\n u.emailVisible = request.POST['emailVisible']\n else:\n u.emailVisible = False\n #u.emailVisible = request.POST['emailVisible']\n # u.email = request.POST['email']\n u.save()\n edit_ldap_user(u)\n return HttpResponseRedirect('/profile')\n\n else:\n form = data_change()\n return render(request, 'profile/profiles.html', {\n 'render_js': render_js,\n 'user_view': request.user,\n 'editForm': form\n })\n\n return HttpResponseRedirect('/')\n","repo_name":"CanaimaKueka/tribus","sub_path":"tribus/web/profile/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3552,"program_lang":"python","lang":"es","doc_type":"code","stars":7,"dataset":"github-code","pt":"81"} +{"seq_id":"5250465321","text":"# 폰켓몬\n\"\"\"\nN 마리 중에서 N/2 마리를 가짐\n-> 최대한 다양한 종류의 폰켓몬을 가지도록\n-> 그때의 폰켓몬 종류 번호의 개수 반환\n\n중복되는 번호는 의미가 없음 -> 중복 제거 -> set로 바꿈\nN/2보다 갯수가 적으면, 모든 종류의 폰켓몬을 선택해야함\nN/2보다 갯수가 많으면, 아무리 가져도 N/2개이므로 N/2\n\"\"\"\n\n\ndef solution(nums):\n N = len(nums)\n count = N / 2\n\n answer = min(len(set(nums)), N / 2)\n\n return answer","repo_name":"ribo0715/algorithm_solution","sub_path":"프로그래머스/폰켓몬.py","file_name":"폰켓몬.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8271039332","text":"print(\"\\n\\n==== VERIFICAÇÃO DE NOTAS DOS ESTUDANTES ==== \\n\")\r\n\r\ndef cad_nota():\r\n nome = input(\"Digite o nome do Aluno: \")\r\n while nome.isnumeric():\r\n print(\"\\n\\nOPS, OCORREU UM ERRO TENTE NOVAMENTE, O nome deve conter apenas letras! Tente novamente.\\n\\n\")\r\n cad_nota()\r\n continue\r\n nota1 = float((input(f\"Digite o Valor da primeira nota do(a) {nome}: \")))\r\n nota2 = float((input(f\"Digite o valor da segunda nota do(a) {nome}: \")))\r\n nota3 = float((input(f\"Digite o valor da terceira nota do(a) {nome} \")))\r\n nota4 = float((input(f'Digite o valor da quarta nota do(a) {nome} ')))\r\n media = float((nota1 + nota2 + nota3 + nota4) / 4)\r\n while (media > 10) or (media < 0):\r\n print(\"\\nOPS, ALGO DEU ERRADO, Verifique os valores e tente novamente, A média não pode ser menor que 0 \",\r\n end=\" \")\r\n print(\"e nem maior que 10. Tente novamente!\\n\\n\")\r\n cad_nota()\r\n continue\r\n\r\n if (media >= 0) and (media <= 4.5):\r\n print(f\"\\n As notas do(a) {nome} são: Primeira: {nota1:.2f}, Segunda: {nota2:.2f}, Terceira: {nota3:.2f} e \",\r\n end=\" \")\r\n print(f\"Quarta {nota4:.2f}.\")\r\n print(f\" A média dele(a) é: {media:.2f}\")\r\n print(f\" O(A) Aluno(a) está reprovado!\")\r\n exit()\r\n else:\r\n print(f\"\\n As notas do(a) {nome} são: Primeira: {nota1:.2f}, Segunda: {nota2:.2f}, Terceira: {nota3:.2f} e \",\r\n end=\" \")\r\n print(f\"Quarta {nota4:.2f}.\")\r\n print(f\" A média dele(a) é: {media:.2f}\")\r\n print(f\" O(A) Aluno(a) foi Aprovado!\")\r\n\r\n print('\\nObrigado por usar nosso Sistema, Buscaremos sempre nos aperfeiçoar!')\r\n exit()\r\n\r\ncad_nota()\r\n","repo_name":"Ebuchini/Python","sub_path":"Sistema_Média/sistema_media.py","file_name":"sistema_media.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12721574305","text":"bags = {}\n\ndef walk( parent ):\n # count the bag we're in as 1. It'll be multiplied by the method caller\n childCount = 1\n\n # If this bag holds no others, return the count and end the treewalk.\n if ( parent[2:] not in bags ):\n return childCount\n \n # Extract each child's multipler and use it with the returned recursive walk() value.\n children = bags.get( parent[2:] )\n for child in children:\n multiplier = int( child[0] )\n childCount = childCount + ( multiplier * walk(child) )\n\n return childCount\n\nwith open('day 7 input.txt') as f:\n lines = f.readlines()\n\n # part two is a more conventional travel down the tree problem, but we need to count the numbers this time!\n for line in lines:\n \tline = line.split( \" contain \" )\n \tparent = line[0].replace( \" bags\",\"\" ).strip()\n \tchildren = line[1].strip().split(\",\")\n\n \tfor child in children:\n \t\t# This special stirng marks the end of the tree\n\t \tif ( child == 'no other bags.' ):\n\t \t\tcontinue\n\n\t \t# Keep the numbers in this one. we'll do the counting when we walk the tree rather than \n # build a special data structure now.\n\t \tchild = child.strip()\n\t \tchild = child.replace( \" bags\",\"\" ).replace( \" bag\", \"\" ).replace( \".\", \"\" )\n\n # this time we build the dictionary parent -> children\n\t \t# ensure there's a set to enter the parent into, and then do so.\n\t \tif ( parent not in bags ):\n\t \t\tbags.update( {parent: set()} )\n\t \tchildren = bags.get( parent )\n\t \tchildren.add( child )\n\n# This tree counts the size of the tree, but the puzzle wants us to NOT include the tree's root\n# (how many bags inside your bag) so we subtract one from the treewalk answer. \nprint( walk( \"1 shiny gold\") - 1 )","repo_name":"davidmcglashan/advent-of-code-2020","sub_path":"day 7 pt 2.py","file_name":"day 7 pt 2.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5162986429","text":"from ast import excepthandler\nimport imp\nfrom io import StringIO\nfrom multiprocessing import parent_process\nfrom tkinter import *\nfrom tkinter import messagebox\nfrom tkinter.ttk import *\nfrom db_con import con,cur\n\nfrom department_master import department_master\n\nclass charge_master(Toplevel):\n des = 0 # des = 0 add mode else des == 1 then it is in editing mode\n edit_id = 0 #this is for storing charge id which user want to edit \n dept_id_lst = []\n def __init__(self,master=None):\n super().__init__(master=master)\n self.charge_master_main = self\n \n self.charge_master_main.title(\"Fee / Charge Master\")\n self.charge_master_main.resizable(False,False)\n\n try:\n p1 = PhotoImage(file = './images/logo.png')\n self.charge_master_main.iconphoto(False,p1)\n except Exception as e:\n messagebox.showerror(\"Error\",\"Could not find logo file\")\n \n\n window_height = 600\n window_width = 325\n \n screen_width = self.charge_master_main.winfo_screenwidth()\n screen_height = self.charge_master_main.winfo_screenheight()\n\n x_cordinate = int((screen_width/2)-(window_width/2))\n y_cordinate = int((screen_height/2)-(window_height/2))\n\n self.charge_master_main.geometry(f\"{window_width}x{window_height}+{x_cordinate}+{y_cordinate}\")\n\n self.charge_master_main.focus_force()\n self.charge_master_main.update()\n\n #charge name\n Label(self.charge_master_main,text=\"Fee Name :- \").place(x=0,y=4)\n self.name_var = StringVar()\n self.name_entry = Entry(self.charge_master_main,width=31,textvariable=self.name_var)\n self.name_entry.focus()\n self.name_entry.place(x=82,y=1,height=30)\n #dept section \n\n Label(self.charge_master_main,text=\"Department :-\").place(x=0,y=42)\n \n #department dropdown \n\n\n \n try:\n self.dept_var = StringVar() # var of dropdown\n self.dept_menu = Combobox(self.charge_master_main,textvariable = self.dept_var)\n self.dept_menu['values']= ['Select'] #this list will come from database\n self.name_entry.bind(\"\",lambda f:self.dept_menu.focus())\n\n\n #get department from database \n val = ['Select']\n try:\n cur.execute(\"select name,id from department\")\n for i in cur.fetchall():\n val.append(i[0]+\" - \"+str(i[1]))\n self.dept_id_lst.append(i[1])\n \n if len(val)>1:\n self.dept_menu['values']=val\n else:\n messagebox.showerror(\"!\",\"Please enter department first...\",parent=self.charge_master_main)\n except:\n messagebox.showerror(\"Error\",\"can not able to fetch department...\",parent=self.charge_master_main)\n \n\n\n self.dept_menu.set(\"Select\")\n self.dept_menu['state']='readonly'\n self.dept_menu.config(width=28)\n self.dept_menu.place(x=82,y=35,height=30)\n\n except Exception as e:\n print(e+\"line no 52\")\n messagebox.showerror(\"No Department Found\",\"Please Enter Department First\")\n\n \n \n\n #old case and net case \n\n Label(self.charge_master_main,text=\"New Case :-\").place(x=0,y=72)\n self.new_case_var=IntVar()\n self.new_case_entry = Entry(self.charge_master_main,width=10,textvariable=self.new_case_var)\n self.new_case_entry.place(x=80,y=70,height=28)\n\n Label(self.charge_master_main,text=\"Old Case\").place(x=170,y=72)\n self.old_case_var = IntVar()\n self.old_case_entry = Entry(self.charge_master_main,width=10,textvariable=self.old_case_var)\n self.old_case_entry.place(x=230,y=70,height=28)\n \n\n self.dept_menu.bind(\"\",lambda f:self.set_focus(self.new_case_entry,self.new_case_var))\n self.new_case_entry.bind(\"\",lambda f:self.set_focus(self.old_case_entry,self.old_case_var))\n self.old_case_entry.bind(\"\",lambda f:self.add_to_db())\n \n col = ['id','fee_name','new_case','old_case','dept_id']\n self.tree = Treeview(self.charge_master_main,column=col,show='headings',height=17)\n\n self.tree.heading('id',text=\"FID\")\n self.tree.heading('fee_name',text=\"Fee/Charge Name\")\n self.tree.heading('new_case',text=\"NewCase\")\n self.tree.heading('old_case',text=\"OldCase\")\n self.tree.heading('dept_id',text=\"DeptId\")\n\n self.tree.column('0',width=30)\n self.tree.column('1',width=120)\n self.tree.column('2',width=50)\n self.tree.column('3',width=50)\n self.tree.column('4',width=30)\n \n self.tree.place(x=0,y=135)\n\n scrollbar = Scrollbar(self.charge_master_main,orient=VERTICAL,command=self.tree.yview)\n self.tree.configure(yscroll=scrollbar.set)\n scrollbar.place(x=316,y=135,height=400)\n\n \n\n self.display_data()\n \n #add \n self.add = Button(self.charge_master_main,width=15,text=\"Add\",command=self.add_to_db)\n self.add.place(x=105,y=100)\n\n self.edit = Button(self.charge_master_main,width=8,text=\"Edit\",command=self.update_record)\n self.edit.place(x=20,y=540)\n\n self.clear = Button(self.charge_master_main,width=15,text=\"Clear\",command=self.clear_selection)\n\n\n #delete\n self.delete = Button(self.charge_master_main,width=8,text=\"Delete\",command=self.delete_record)\n self.delete.place(x=110,y=540)\n\n #disable by default\n self.edit['state']='disable'\n self.delete['state']='disable'\n\n #exit\n self.exit = Button(self.charge_master_main,width=8,text=\"Exit\",command=self.charge_master_main.destroy)\n self.exit.place(x=200,y=540)\n \n #shortcut label \n Label(self.charge_master_main,text=\"ctrl+d\").place(x=135,y=578)\n Label(self.charge_master_main,text=\"ctrl+e\").place(x=40,y=578)\n Label(self.charge_master_main,text=\"esc\").place(x=224,y=578)\n\n #bind edit,delete,esc\n self.charge_master_main.bind(\"\",self.ctrl_d)\n self.charge_master_main.bind(\"\",self.ctrl_e)\n self.charge_master_main.bind(\"\",lambda e:self.charge_master_main.destroy())\n\n self.charge_master_main.bind(\"<>\",self.get_selection)\n self.charge_master_main.mainloop()\n\n def get_selection(self,e):\n self.des = 1 #set to 1 for editing mode on \n self.edit['state']='normal'\n self.delete['state']='normal'\n values = []\n for item in self.tree.selection():\n values = self.tree.item(item,'values')\n \n \n if len(values)>0 and len(values)>3:\n try:\n self.name_var.set(values[1])\n try:\n self.dept_var.set(self.dept_menu['values'][self.dept_id_lst.index(int(values[4]))+1])\n except ValueError:\n messagebox.showerror(\"Error\",\"cannot find department with this particular id\",parent=self.charge_master_main)\n self.new_case_var.set(int(values[2]))\n self.old_case_var.set(int(values[3]))\n \n self.clear.place(x=105,y=100)\n\n except Exception as e:\n print(e)\n\n \n def clear_selection(self):\n for i in self.tree.selection():\n self.tree.selection_remove(i)\n\n self.charge_master_main.update()\n self.edit['state']='disable'\n self.delete['state']='disable'\n\n self.name_var.set(\"\")\n self.dept_var.set(self.dept_menu['values'][0])\n self.new_case_var.set(0)\n self.old_case_var.set(0)\n self.des=0\n self.name_entry.focus()\n self.clear.place_forget()\n\n def add_to_db(self):\n if self.des ==0 : #check if des == 0 then it is in add mode else if des == 1 then itis in edit mode \n self.add['state']='disable'\n try:\n name = str(self.name_var.get()).replace(\"'\",\"\").replace('\"','').strip()\n dept_nm = str(self.dept_var.get()).replace(\"'\",\"\").replace('\"','').strip() \n if dept_nm !=\"Select\":\n if name !=\"\" :\n try:\n index = self.dept_menu['values'].index(dept_nm)\n dept_id = self.dept_id_lst[index-1]\n except Exception as e:\n print(e)\n self.add['state']='normal'\n messagebox.showerror(\"!\",\"can not find department id\",parent=self.charge_master_main)\n return\n try:\n new_case_fee = int(str(self.new_case_var.get()).replace(\"'\",\"\").replace('\"','').strip())\n old_case_fee = int(str(self.old_case_var.get()).replace(\"'\",\"\").replace('\"','').strip())\n except:\n self.add['state']='normal'\n messagebox.showerror(\"New Fees / Old Fees\",\"Invalid input for fees \",parent=self.charge_master_main)\n return \n cur.execute(f\"insert into charge(name,dep_id,new_case_fee,old_case_fee) values('{name}',{dept_id},{new_case_fee},{old_case_fee})\")\n con.commit()\n messagebox.showinfo(\"Success\",\"Charges added succesfully\",parent=self.charge_master_main)\n self.name_var.set(\"\")\n self.dept_var.set(\"Select\")\n self.new_case_var.set(0)\n self.old_case_var.set(0)\n self.display_data()\n self.add['state']='normal'\n self.name_entry.focus()\n else: #else of blank name\n self.add['state']='normal'\n messagebox.showerror(\"Invalid input\",\"Empty Fields\",parent=self.charge_master_main)\n return\n else: #else of select drop down\n self.add['state']='normal'\n messagebox.showerror(\"Error\",\"Please Select department\",parent=self.charge_master_main)\n except Exception as e:\n print(e)\n self.add['state']='normal'\n messagebox.showerror(\"Error while inserting charges\",\"can not able to add new charges\",parent=self.charge_master_main) \n\n def update_record(self):\n res = messagebox.askquestion(\"?\",\"Are you sure you want to update this charges ?\",parent=self.charge_master_main)\n if res == \"yes\":\n self.edit['state'] = 'disable'\n values = []\n for item in self.tree.selection():\n values = self.tree.item(item,'values')\n\n name = str(self.name_var.get()).replace(\"'\",\"\").replace('\"','').strip()\n dept_nm = str(self.dept_var.get()).replace(\"'\",\"\").replace('\"','').strip()\n try:\n new_case = int(str(self.new_case_var.get()).replace(\"'\",\"\").replace('\"','').strip())\n old_case = int(str(self.old_case_var.get()).replace(\"'\",\"\").replace('\"','').strip())\n except :\n self.edit['state']='normal'\n messagebox.showerror(\"!\",\"Invalid Input for fees (only accepted number not a-z)\",parent=self.charge_master_main)\n return\n if dept_nm !=\"Select\":\n if len(values)>0:\n if name !=\"\":\n try:\n index = self.dept_menu['values'].index(dept_nm)\n dept_id = self.dept_id_lst[index-1]\n except Exception as e:\n print(e)\n self.add['state']='normal'\n messagebox.showerror(\"!\",\"can not find department id\",parent=self.charge_master_main)\n return\n try:\n cur.execute(f\"update charge set name='{name}',dep_id={dept_id},new_case_fee={new_case},old_case_fee={old_case} where id={values[0]} \")\n con.commit()\n messagebox.showinfo(\"Success\",\"Charges Updated Successfully\",parent=self.charge_master_main)\n self.display_data()\n self.clear_selection()\n except:\n messagebox.showerror(\"Error\",\"can not able to update charges\",parent=self.charge_master_main)\n else:\n self.edit['state']='normal'\n messagebox.showerror(\"Invalid input\",\"Empty Fields\",parent=self.charge_master_main)\n return\n else:\n pass\n else:\n self.edit['state']='normal'\n messagebox.showwarning(\"!\",\"Please select department \",parent=self.charge_master_main)\n return\n\n def delete_record(self):\n res = messagebox.askquestion(\"Are you sure?\",\"really want to delete this charges ? \",parent=self.charge_master_main)\n if (res ==\"yes\"):\n value =[] #selected tree value\n for item in self.tree.selection():\n value = self.tree.item(item,'values')\n if len(value)>0:\n try:\n cur.execute(f\"delete from charge where id = {value[0]}\")\n con.commit()\n messagebox.showinfo(\"!\",\"charges deleted succesfully\",parent=self.charge_master_main)\n self.clear_selection()\n self.display_data()\n except:\n messagebox.showerror(\"there is something wrong !\",\"cannot able to delete this charges \",parent=self.charge_master_main)\n\n def display_data(self):\n try:\n for row in self.tree.get_children():\n self.tree.delete(row)\n cur.execute(\"select id,name,new_case_fee,old_case_fee,dep_id from charge\")\n for i in cur.fetchall():\n self.tree.insert('','end',text=\"\",values=i)\n except:\n messagebox.showerror(\"Error\",\"Cannot able to fetch charges\")\n \n def set_focus(self,widget,widget_var):\n pos = len(str(widget_var.get())) \n widget.focus()\n widget.icursor(pos)\n \n def ctrl_d(self,e):\n if self.des==1:\n self.delete_record()\n else:\n messagebox.showerror(\"Unable to delete charges\",\"Please select charges that you want to delete\",parent=self.charge_master_main)\n\n def ctrl_e(self,e):\n if self.des ==1:\n self.update_record()\n else:\n messagebox.showerror(\"Unable to edit charges\",\"Please select charges that you want to edit\",parent=self.charge_master_main)\n\n ","repo_name":"shivam-soni-333/Patient-Managment-System","sub_path":"charge_master.py","file_name":"charge_master.py","file_ext":"py","file_size_in_byte":15052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6943360241","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\n@Time : 8/18/20 1:56 PM\n@Author : Rodney Cheung\n@File : log.py\n\"\"\"\n\nimport io\nimport logging\nimport os\nimport time\nimport traceback\n\nfrom colorlog import ColoredFormatter\n\nfrom wisbec.filesystem.filesystem import FilesystemUtil\n\n\nclass MyLogger(logging.Logger):\n def findCaller(self, stack_info=False, stack_level=1):\n n_frames_upper = 2\n f = logging.currentframe()\n for _ in range(n_frames_upper): # <-- correct frame\n if f is not None:\n f = f.f_back\n rv = \"(unknown file)\", 0, \"(unknown function)\", None\n while hasattr(f, \"f_code\"):\n co = f.f_code\n filename = os.path.normcase(co.co_filename)\n if filename == logging._srcfile:\n f = f.f_back\n continue\n sinfo = None\n if stack_info:\n sio = io.StringIO()\n sio.write('Stack (most recent call last):\\n')\n traceback.print_stack(f, file=sio)\n sinfo = sio.getvalue()\n if sinfo[-1] == '\\n':\n sinfo = sinfo[:-1]\n sio.close()\n rv = (co.co_filename, f.f_lineno, co.co_name, sinfo)\n break\n return rv\n\n\nclass Log:\n logging.setLoggerClass(MyLogger)\n console_logger = None\n console_handler = None\n file_log_formatter = logging.Formatter(\n \"[%(levelname)s] %(asctime)s %(funcName)s %(lineno)d: %(message)s\")\n console_log_formatter = ColoredFormatter(\n \"%(log_color)s[%(levelname)s] %(asctime)s %(funcName)s %(lineno)d : %(message)s\",\n datefmt=None,\n reset=True,\n log_colors={\n 'DEBUG': 'cyan',\n 'INFO': 'green',\n 'WARNING': 'yellow',\n 'ERROR': 'red',\n 'CRITICAL': 'red,bg_white',\n },\n secondary_log_colors={},\n style='%')\n is_log_to_file = False\n is_log_to_console = True\n file_loggers = {}\n\n log_dir = None\n log_debug_path = None\n log_info_path = None\n log_err_path = None\n log_warning_path = None\n log_critical_path = None\n log_path_dict = {}\n\n file_handler_created_flag = {\n logging.DEBUG: False,\n logging.WARNING: False,\n logging.ERROR: False,\n logging.CRITICAL: False,\n logging.INFO: False\n }\n\n @classmethod\n def __init_log_dir(cls, log_dir: str):\n FilesystemUtil.create_directories(log_dir)\n cls.log_dir = log_dir\n cls.log_debug_path = os.path.join(log_dir, \"debug.log\")\n cls.log_info_path = os.path.join(log_dir, \"info.log\")\n cls.log_err_path = os.path.join(log_dir, \"error.log\")\n cls.log_warning_path = os.path.join(log_dir, \"warning.log\")\n cls.log_critical_path = os.path.join(log_dir, \"critical.log\")\n cls.log_path_dict = {\n logging.DEBUG: cls.log_debug_path,\n logging.WARNING: cls.log_warning_path,\n logging.ERROR: cls.log_err_path,\n logging.CRITICAL: cls.log_critical_path,\n logging.INFO: cls.log_info_path\n }\n\n @classmethod\n def __init_log_handler(\n cls,\n is_log_to_file: bool,\n is_log_to_console: bool,\n console_log_name: str,\n console_log_level: int):\n cls.is_log_to_console = is_log_to_console\n if is_log_to_console:\n cls.console_handler = logging.StreamHandler()\n cls.console_handler.setFormatter(cls.console_log_formatter)\n cls.console_handler.setLevel(console_log_level)\n cls.console_logger = logging.getLogger(console_log_name)\n cls.console_logger.setLevel(logging.DEBUG)\n cls.console_logger.addHandler(cls.console_handler)\n cls.is_log_to_file = is_log_to_file\n\n @classmethod\n def init_logger(cls,\n log_dir=os.path.join(os.getcwd(),\n \"runtime\", \"log\",\n time.strftime(\"%Y-%m-%d %H:%M:%S\",\n time.localtime())),\n is_log_to_file=True,\n is_log_to_console=True,\n console_log_name='console_log',\n console_log_level=logging.DEBUG,\n ):\n \"\"\"\n init logger\n Args:\n log_dir: log save path\n is_log_to_file:is output log to file\n is_log_to_console:is output log to console\n console_log_name: logger name\n console_log_level:minimum log level,if not set,all level logs will be recorded\n Returns:\n None\n \"\"\"\n if is_log_to_file:\n cls.__init_log_dir(log_dir)\n cls.__init_log_handler(\n is_log_to_file,\n is_log_to_console,\n console_log_name,\n console_log_level)\n\n @classmethod\n def __position_format(cls, msg: str, *args, **kwargs) -> str:\n # position_str = '{}'\n # position_str_list = list()\n # arg_len = len(args)\n # while arg_len > 0:\n # position_str_list.append(position_str)\n # arg_len -= 1\n # return \"\".join(position_str_list).format(*args)\n return msg.format(*args, **kwargs)\n\n @classmethod\n def debug(cls, msg: str, *args, **kwargs):\n data = cls.__position_format(msg, *args, **kwargs)\n if cls.is_log_to_console:\n cls.console_logger.debug(data)\n if cls.is_log_to_file:\n if not cls.file_handler_created_flag[logging.DEBUG]:\n cls.add_file_handler(cls.log_debug_path, logging.DEBUG)\n cls.file_handler_created_flag[logging.DEBUG] = True\n cls.file_loggers[logging.DEBUG].debug(data)\n\n @classmethod\n def info(cls, msg: str, *args, **kwargs):\n data = cls.__position_format(msg, *args, **kwargs)\n if cls.is_log_to_console:\n cls.console_logger.info(data)\n if cls.is_log_to_file:\n if not cls.file_handler_created_flag[logging.INFO]:\n cls.add_file_handler(cls.log_info_path, logging.INFO)\n cls.file_handler_created_flag[logging.INFO] = True\n cls.file_loggers[logging.INFO].info(data)\n\n @classmethod\n def warning(cls, msg: str, *args, **kwargs):\n data = cls.__position_format(msg, *args, **kwargs)\n if cls.is_log_to_console:\n cls.console_logger.warning(data)\n if cls.is_log_to_file:\n if not cls.file_handler_created_flag[logging.WARNING]:\n cls.add_file_handler(cls.log_warning_path, logging.WARNING)\n cls.file_handler_created_flag[logging.WARNING] = True\n cls.file_loggers[logging.WARNING].warning(data)\n\n @classmethod\n def error(cls, msg: str, *args, **kwargs):\n data = cls.__position_format(msg, *args, **kwargs)\n if cls.is_log_to_console:\n cls.console_logger.error(data)\n if cls.is_log_to_file:\n if not cls.file_handler_created_flag[logging.ERROR]:\n cls.add_file_handler(cls.log_err_path, logging.ERROR)\n cls.file_handler_created_flag[logging.ERROR] = True\n cls.file_loggers[logging.ERROR].error(data)\n\n @classmethod\n def critical(cls, msg: str, *args, **kwargs):\n data = cls.__position_format(msg, *args, **kwargs)\n if cls.is_log_to_console:\n cls.console_logger.critical(data)\n if cls.is_log_to_file:\n if not cls.file_handler_created_flag[logging.CRITICAL]:\n cls.add_file_handler(cls.log_critical_path, logging.CRITICAL)\n cls.file_handler_created_flag[logging.CRITICAL] = True\n cls.file_loggers[logging.CRITICAL].critical(data)\n\n @classmethod\n def add_file_handler(cls, log_file, log_level):\n file_handler = logging.FileHandler(log_file)\n file_handler.setFormatter(cls.file_log_formatter)\n file_handler.setLevel(log_level)\n file_logger = logging.getLogger(str(log_level))\n file_logger.setLevel(logging.DEBUG)\n file_logger.addHandler(file_handler)\n cls.file_loggers.update({log_level: file_logger})\n\n @classmethod\n def set_console_log_level(cls, log_level: int):\n cls.console_handler.setLevel(log_level)\n\n @classmethod\n def close(cls):\n cls.clear_file_handler()\n\n @classmethod\n def clear_file_handler(cls):\n for file_logger in cls.file_loggers:\n cls.file_loggers[file_logger].handlers[0].close()\n cls.file_handler_created_flag[file_logger] = False\n cls.file_loggers.clear()\n","repo_name":"Future-Walkers/python-mod","sub_path":"src/wisbec/logging/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":8689,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"3558498361","text":"class HashItem():\n def __init__(self, key, value):\n self.key = key\n self.value = value\n \n def __repr__(self):\n return f'{{{self.key}: {self.value}}}'\n\nclass HashTable():\n def __init__(self, size=256):\n self.size = size\n self.slots = [None] * size\n self.used_slots = 0\n \n def __repr__(self):\n text = ''\n for index, slot in enumerate(self.slots):\n if slot:\n text += f', {index}: {slot}'\n plural = '' if self.used_slots == 1 else 's'\n return f' 20.0:\n # print(fin_name, \" \", dist0, \" \", key0)\n continue\n if key0 not in huge_repo:\n huge_repo[key0] = []\n huge_repo[key0].append(dist0)\n\ndef main():\n import numpy as np\n import matplotlib\n import matplotlib.pyplot as plt\n import matplotlib.mlab as mlab\n import matplotlib.axis as axis\n import glob\n for filename in glob.glob(r'./*.dat'):\n # print(filename)\n anaf(filename)\n\n pro_names = ['ARG', 'LYS', 'ASP', 'GLU', 'HIS',\\\n 'SER', 'THR', 'GLN', 'ASN', 'ALA',\\\n 'VAL', 'LEU', 'ILE', 'MET', 'PHE',\\\n 'TYR', 'TRP', 'PRO', 'CYS', 'GLY']\n na_names = ['A', 'T', 'G', 'C', 'P', 'S']\n pna_names = pro_names + na_names\n\n # -------------------- INTER-PROTEIN --------------------\n pcommand = input(\" Produce pro-pro distance distribution fig? \")\n if pcommand == 'y' or pcommand == 'yes':\n for i in pro_names:\n fig, axes = plt.subplots(nrows=5, ncols=4, figsize=(10,10))\n out_name = \"inter_pro_\" + i + \".png\"\n print(\" Plotting \", out_name)\n\n for j, k in enumerate(pro_names):\n reskey = (i, k) if i > k else (k, i)\n m, n = j // 4, j % 4 # sub fig index\n x = np.array(huge_repo[reskey])\n num_bins = 20\n # the histogram of data\n p, bins, patches = axes[m, n].hist(x, num_bins, normed=1, facecolor='green', alpha=0.5)\n # y is Cumulative P\n y_sum = sum(p[:])\n y = [sum(p[:i])/y_sum for i in range(len(p)+1)]\n\n axes[m, n].plot(bins, y, 'r--')\n axes[m, n].grid(axis='y', linestyle='-', alpha=0.3)\n axes[m, n].set_xlabel(r'distance $\\AA$')\n axes[m, n].set_ylabel('Probability')\n axes[m, n].set_xlim(0, 15)\n axes[m, n].set_ylim(0, 0.6)\n axes[m, n].set_xticks(np.arange(0, 15, 3))\n axes[m, n].set_title(i + ' - ' + k, fontsize=10)\n fig.subplots_adjust(hspace=0.8)\n fig.subplots_adjust(wspace=0.8)\n fig.savefig(out_name, dpi=150)\n plt.clf()\n\n # -------------------- PROTEIN -- DNA --------------------\n pcommand = input(\" Produce pro-DNA distance distribution fig? \")\n if pcommand == 'y' or pcommand == 'yes':\n for i in na_names:\n fig, axes = plt.subplots(nrows=5, ncols=4, figsize=(10,10))\n out_name = \"DNA_pro_\" + i + \".png\"\n print(\" Plotting \", out_name)\n\n for j, k in enumerate(pro_names):\n reskey = (k, i)\n m, n = j // 4, j % 4 # sub fig index\n x = np.array(huge_repo[reskey])\n num_bins = 20\n # the histogram of data\n p, bins, patches = axes[m, n].hist(x, num_bins, normed=1, facecolor='green', alpha=0.5)\n # y is Cumulative P\n y_sum = sum(p[:])\n y = [sum(p[:i])/y_sum for i in range(len(p)+1)]\n # local_list = []\n # for kb, ky in zip(bins, y):\n # local_list.append( (kb, ky) )\n # p_repo[reskey] = local_list\n\n axes[m, n].plot(bins, y, 'r--')\n axes[m, n].grid(axis='y', linestyle='-', alpha=0.3)\n axes[m, n].set_xlabel(r'distance $\\AA$')\n axes[m, n].set_ylabel('Probability')\n axes[m, n].set_xlim(0, 15)\n axes[m, n].set_ylim(0, 0.6)\n axes[m, n].set_xticks(np.arange(0, 15, 3))\n axes[m, n].set_title(i + ' - ' + k, fontsize=10)\n fig.subplots_adjust(hspace=0.8)\n fig.subplots_adjust(wspace=0.8)\n fig.savefig(out_name, dpi=150)\n plt.clf()\n\n pcommand = input(\" Produce pro-DNA minimal distance map? (q to quit): \")\n if pcommand == 'q':\n pass\n elif pcommand == 'y' or pcommand == 'yes':\n for k, l in huge_repo.items():\n sl = sorted(l)\n sl_L, sl_S = max(sl), min(sl)\n delta = (sl_L - sl_S) / 50\n y = [0 for i in range(51)]\n for dist in l:\n n_sl = int((dist - sl_S) / delta)\n y[n_sl] += 1\n y_sum = sum(y)\n for i in range(51):\n y[i] /= y_sum\n local_dict = [(sl_S + i * delta, sum(y[:i])) for i in range(50)]\n p_repo[k] = local_dict[:]\n\n p_cutoff_list = [0.01 + 0.01 * i for i in range(100)]\n it = 0\n while True:\n # p_cutoff_s = input(\" Please give me a cutoff for P (q to quit): \")\n # if p_cutoff_s == 'q':\n # break\n # p_cutoff = float(p_cutoff_s)\n\n if it >= 100:\n break\n p_cutoff = p_cutoff_list[it]\n p_cutoff_s = str(round(p_cutoff, 2))\n if len(p_cutoff_s) < 4:\n p_cutoff_s += '0'\n it += 1\n print(it, \" \", p_cutoff_s)\n\n # -------------------- compute the distance matrix -----------------\n imatrix = []\n for i, ni in enumerate(pna_names):\n row = []\n for j, nj in enumerate(pna_names):\n reskey = (ni, nj) if (ni > nj and len(ni) == len(nj)) or len(ni) > len(nj) else (nj, ni)\n if reskey not in p_repo:\n row.append(0)\n else:\n for dist_p in p_repo[reskey]:\n if dist_p[1] > p_cutoff:\n break\n row.append(dist_p[0])\n imatrix.append(row[:])\n row.clear()\n # print(imatrix)\n\n x = [i for i in range(len(pna_names))]\n # -------------------- least square - compute sigmas ---------------\n # ---------- calc from prot first, then from prot-DNA --------------\n sigma, sigma_i_sum = [0 for i in range(26)], [0 for i in range(26)]\n total_sigma_sum = 0\n for i in range(20):\n sigma_sum = sum(imatrix[i][:20])\n sigma_i_sum[i] = sigma_sum\n total_sigma_sum += sigma_sum\n total_sigma_sum /= 40\n for i in range(20, 26):\n sigma_i_sum[i] = sum(imatrix[i][:20])\n for i in range(26):\n sigma[i] = 0.05 * (sigma_i_sum[i] - total_sigma_sum) * 2\n # ---------- calc directly from prot-DNA --------------\n sigma2, sigma_i_sum2 = [0 for i in range(26)], [0 for i in range(26)]\n total_sigma_sum2 = 0\n for i in range(20, 26):\n sigma_sum2 = sum(imatrix[i][:20])\n total_sigma_sum2 += sigma_sum2\n for i in range(26):\n sigma_i_sum2[i] = sum(imatrix[i][:])\n ts1 = total_sigma_sum + 0.05 * (total_sigma_sum2 - 6 * total_sigma_sum)\n for i in range(20):\n sigma2[i] = 2 * (sigma_i_sum2[i] - ts1) / 26\n ts2 = sum(sigma2[:20]) / 2\n for i in range(20, 26):\n sigma2[i] = 0.05 * (sigma_i_sum2[i] - ts2) * 2\n plt.plot(x, sigma, 'r-', linewidth=2, label='pro-pro')\n plt.plot(x, sigma2, 'g-', linewidth=2, label='pro-DNA')\n plt.xticks(x, pna_names, rotation='vertical')\n plt.ylim(3, 20)\n plt.ylabel(r'Distance ($\\AA$)')\n plt.title(r'$\\sigma$ (Quantile = '+p_cutoff_s+' )')\n plt.grid(axis='x', linestyle='--', alpha=0.3)\n plt.grid(axis='y', linestyle='-', alpha=0.3)\n plt.legend(prop={'size': 16}, loc='upper left')\n plt.savefig(\"sigma_quantile_\"+p_cutoff_s+\".png\", dpi=150)\n # plt.show()\n plt.clf()\n\n # ==================== plot the matrix! ====================\n plt.xticks(x, pna_names, rotation='vertical')\n plt.yticks(x, pna_names)\n cax = plt.imshow(imatrix, cmap=plt.cm.BuGn, interpolation='none', origin='lower', \\\n vmin=2.0, vmax=10.0)\n plt.title(\"Distance Matrix Quantile = \"+p_cutoff_s)\n cbar = plt.colorbar(cax, ticks=[2,4,6,8,10])\n cbar.ax.set_yticklabels([r'<2$\\AA$', r'4$\\AA$', r'6$\\AA$', r'8$\\AA$', r'10$\\AA$'])\n plt.savefig(\"dist_matrix_quantile_\"+p_cutoff_s+\".png\", dpi=150)\n # plt.show()\n plt.clf()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"noinil/pinang","sub_path":"examples/utils/pdi_dist_statistics.py","file_name":"pdi_dist_statistics.py","file_ext":"py","file_size_in_byte":9480,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"34555080357","text":"# Django\nfrom django.shortcuts import render\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http.response import JsonResponse\nfrom django.core.files.storage import default_storage\n\n# RestFramework\nfrom rest_framework.parsers import JSONParser\n\n# Models and Serializers\nfrom EmployeeApp.models import Departement, Employees\nfrom EmployeeApp.serializers import DeparmentSerializer, EmployeeSerializer\n\n\n# Create your views here.\n\n\n@csrf_exempt\ndef deparmetnApi(request, id=0):\n if request.method == 'GET':\n deparment = Departement.objects.all()\n deparment_serializer = DeparmentSerializer(deparment, many=True)\n return JsonResponse(deparment_serializer.data, safe=False)\n elif request.method == 'POST':\n deparment_data = JSONParser().parse(request)\n deparment_serializer = DeparmentSerializer(data=deparment_data)\n if deparment_serializer.is_valid():\n deparment_serializer.save()\n return JsonResponse(\"Added Successfully\", safe=False)\n return JsonResponse(\"Failed to add\", safe=False)\n elif request.method == 'PUT':\n deparment_data = JSONParser().parse(request)\n deparment = Departement.objects.get(DeparmentId=deparment_data['DeparmentId'])\n deparment_serializer = DeparmentSerializer(deparment, data=deparment_data)\n if deparment_serializer.is_valid():\n deparment_serializer.save()\n return JsonResponse(\"Updated Successfully\", safe=False)\n return JsonResponse(\"Failed to update\", safe=False)\n elif request.method == 'DELETE':\n deparment = Departement.objects.get(DeparmentId=id)\n deparment.delete()\n return JsonResponse(\"Delete Successfully\", safe=False)\n\n\n@csrf_exempt\ndef employeeApi(request, id=0):\n if request.method == 'GET':\n employee = Employees.objects.all()\n employee_serializer = EmployeeSerializer(employee, many=True)\n return JsonResponse(employee_serializer.data, safe=False)\n elif request.method == 'POST':\n employee_data = JSONParser().parse(request)\n employee_serializer = EmployeeSerializer(data=employee_data)\n if employee_serializer.is_valid():\n employee_serializer.save()\n return JsonResponse(\"Added Successfully\", safe=False)\n return JsonResponse(\"Failed to add\", safe=False)\n elif request.method == 'PUT':\n employee_data = JSONParser().parse(request)\n employee = Employees.objects.get(EmployeeId=employee_data['EmployeeId'])\n employee_serializer = EmployeeSerializer(employee, data=employee_data)\n if employee_serializer.is_valid():\n employee_serializer.save()\n return JsonResponse(\"Updated Successfully\", safe=False)\n return JsonResponse(\"Failed to update\", safe=False)\n elif request.method == 'DELETE':\n employee = Employees.objects.get(EmployeeId=id)\n employee.delete()\n return JsonResponse(\"Delete Successfully\", safe=False)\n\n\n@csrf_exempt\ndef SaveFile(request):\n file = request.FILES['file']\n file_name = default_storage.save(file.name, file)\n return JsonResponse(file_name, safe=False)\n","repo_name":"Danielmc09/register","sub_path":"EmployeeApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19472664617","text":"import os\nimport sys\nimport requests\nfrom bs4 import BeautifulSoup\nfrom colorama import init\n\n# write your code here\nargs = sys.argv\n\n# Create directory\ndirName = args[1]\n\ntry:\n # Create target Directory\n os.mkdir(dirName)\nexcept FileExistsError:\n print('')\n\n\ndef get_data(url):\n return requests.get('https://' + url).content\n\n\nwhile True:\n address_bar = input()\n if address_bar == 'exit':\n break\n # elif address_bar.replace('.', '_') not in web_list:\n # print(\"Error: Incorrect URL\")\n elif '.' in address_bar:\n contents = BeautifulSoup(get_data(address_bar), 'html.parser')\n final_address = address_bar.replace('.', '')\n file_name = dirName + '/' + final_address + '.txt'\n with open(file_name, 'a+') as f:\n for x in range(len(contents.findAll('a'))):\n f.write(contents.findAll('a')[x].get_text() + '\\n')\n # print(contents.findAll('a')[x].get_text())\n with open(file_name, 'r') as d:\n print(d.read())\n else:\n print(\"Error: Incorrect URLs\")","repo_name":"pyrrhus-ich/Python-Basics","sub_path":"Uebungsprojekte/Text-Based Browser/Text-Based Browser/task/browser/browser.py","file_name":"browser.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72949244744","text":"from scipy.stats import binom_test\r\nimport pickle\r\nfrom math import log10\r\n\r\n\r\ndef calculate(goset, go_back, N, resultpath):\r\n gosetp = dict()\r\n for term in goset.keys():\r\n if goset[term] / N > go_back[term][0] / go_back[term][1]:\r\n gosetp[term] = binom_test(goset[term], N, go_back[term][0] / go_back[term][1])\r\n\r\n terms = list(gosetp.keys())\r\n terms.sort(key=lambda x: gosetp[x])\r\n i = 0\r\n gosetq = dict()\r\n for term in terms:\r\n i += 1\r\n p = gosetp[term]\r\n gosetq[term] = p * len(terms) / i\r\n\r\n terms.sort(key=lambda x: gosetq[x])\r\n g = open(resultpath, 'w')\r\n for term in terms:\r\n if gosetp[term] > 0.05:\r\n break\r\n g.write(term + '\\t' + go[term]['name'])\r\n g.write('\\t' + str(goset[term]) + '\\t' + str(N) + '\\t' + str(go_back[term][0] / go_back[term][1]))\r\n g.write('\\t' + str(gosetp[term]) + '\\t')\r\n if gosetp[term] != 0:\r\n g.write(str(-log10(gosetp[term])))\r\n g.write('\\t' + str(gosetq[term]) + '\\t')\r\n if gosetq[term] != 0:\r\n g.write(str(-log10(gosetq[term])))\r\n g.write('\\n')\r\n g.close()\r\n return 0\r\n\r\n\r\ndef main(bedfile, dataset, resultpath):\r\n g = open(dataset + 'MF' + 're-term-fmaxrawinter.pkl', 'rb+')\r\n resmf = pickle.load(g)\r\n g = open(dataset + 'MF' + '_backpro_fmaxrawinter.pkl', 'rb+')\r\n go_backmf = pickle.load(g)\r\n\r\n g = open(dataset + 'BP' + 're-term-fmaxrawinter.pkl', 'rb+')\r\n resbp = pickle.load(g)\r\n g = open(dataset + 'BP' + '_backpro_fmaxrawinter.pkl', 'rb+')\r\n go_backbp = pickle.load(g)\r\n\r\n g = open(dataset + 'CC' + 're-term-fmaxrawinter.pkl', 'rb+')\r\n rescc = pickle.load(g)\r\n g = open(dataset + 'CC' + '_backpro_fmaxrawinter.pkl', 'rb+')\r\n go_backcc = pickle.load(g)\r\n\r\n g = open(dataset + 'GRN.pkl', 'rb+')\r\n grn = pickle.load(g)\r\n\r\n geneset = dict()\r\n\r\n N = 0\r\n res = dict()\r\n for chr in resmf:\r\n res[chr] = dict()\r\n for re in resmf[chr]:\r\n res[chr][re] = resmf[chr][re]\r\n for re in resbp[chr]:\r\n res[chr][re] = resbp[chr][re]\r\n for re in rescc[chr]:\r\n res[chr][re] = rescc[chr][re]\r\n\r\n gosetmf = dict()\r\n gosetbp = dict()\r\n gosetcc = dict()\r\n with open(bedfile, 'r') as f:\r\n for line in f:\r\n line = line.split()\r\n chr = line[0]\r\n start = int(line[1])\r\n end = int(line[2])\r\n dismin = 99999999\r\n if not chr in res:\r\n continue\r\n for re in res[chr].keys():\r\n if res[chr][re]['end'] < start:\r\n dis = start - res[chr][re]['end']\r\n elif res[chr][re]['start'] > end:\r\n dis = res[chr][re]['start'] - end\r\n else:\r\n dismin = 0\r\n cloest_re = re\r\n break\r\n if dis < dismin:\r\n dismin = dis\r\n cloest_re = re\r\n distances.append(dismin)\r\n if dismin < 1000:\r\n N += 1\r\n if cloest_re in resmf[chr]:\r\n for term in resmf[chr][cloest_re]['terms']:\r\n if not term in gosetmf:\r\n gosetmf[term] = 0\r\n gosetmf[term] += 1\r\n if cloest_re in resbp[chr]:\r\n for term in resbp[chr][cloest_re]['terms']:\r\n if not term in gosetbp:\r\n gosetbp[term] = 0\r\n gosetbp[term] += 1\r\n if cloest_re in rescc[chr]:\r\n for term in rescc[chr][cloest_re]['terms']:\r\n if not term in gosetcc:\r\n gosetcc[term] = 0\r\n gosetcc[term] += 1\r\n for gene in grn['res'][cloest_re]['reg']:\r\n if not gene in geneset:\r\n geneset[gene] = 0\r\n geneset[gene] += 1\r\n calculate(gosetmf, go_backmf, N, resultpath + '_MF.txt')\r\n calculate(gosetbp, go_backbp, N, resultpath + '_BP.txt')\r\n calculate(gosetcc, go_backcc, N, resultpath + '_CC.txt')\r\n\r\n genes = list(geneset.keys())\r\n genes.sort(key=lambda x: geneset[x], reverse=True)\r\n with open(resultpath + '_genes.txt', 'w') as f:\r\n for gene in genes:\r\n f.write(gene + '\\t' + str(geneset[gene]) + '\\n')\r\n return 0\r\n\r\n\r\ndef importgo(filename='./go.obo'):\r\n # Reading Gene Ontology from OBO Formatted file\r\n go = dict()\r\n obj = None\r\n ns = {'biological_process': 'BP', 'molecular_function': 'MF', 'cellular_component': 'CC'}\r\n with open(filename, 'r') as f:\r\n for line in f:\r\n line = line.strip()\r\n if not line:\r\n continue\r\n if line == '[Term]':\r\n if obj is not None:\r\n go[obj['id']] = obj\r\n obj = dict()\r\n obj['is_a'] = set()\r\n obj['part_of'] = set()\r\n obj['regulates'] = set()\r\n obj['is_obsolete'] = False\r\n obj['ancesent'] = set()\r\n obj['descent'] = set()\r\n continue\r\n elif line == '[Typedef]':\r\n if obj is not None:\r\n go[obj['id']] = obj\r\n obj = None\r\n else:\r\n if obj is None:\r\n continue\r\n l = line.split(\": \")\r\n if l[0] == 'id':\r\n obj['id'] = l[1]\r\n elif l[0] == 'is_a':\r\n obj['is_a'].add(l[1].split(' ! ')[0])\r\n elif l[0] == 'name':\r\n obj['name'] = l[1]\r\n elif l[0] == 'is_obsolete' and l[1] == 'true':\r\n obj['is_obsolete'] = True\r\n elif l[0] == 'namespace':\r\n obj['namespace'] = ns[l[1]]\r\n if obj is not None:\r\n go[obj['id']] = obj\r\n for go_id in list(go.keys()):\r\n if go[go_id]['is_obsolete']:\r\n del go[go_id]\r\n for go_id, val in go.items():\r\n if 'children' not in val:\r\n val['children'] = set()\r\n for p_id in val['is_a']:\r\n if p_id in go:\r\n if 'children' not in go[p_id]:\r\n go[p_id]['children'] = set()\r\n go[p_id]['children'].add(go_id)\r\n for go_id in go.keys():\r\n if 'ancesent' not in go[go_id]:\r\n go[go_id]['ancesent'] = set()\r\n temp = list(go[go_id]['is_a'])\r\n while temp:\r\n now = temp.pop()\r\n go[go_id]['ancesent'].add(now)\r\n temp = list(set(temp) | go[now]['is_a'])\r\n if 'descent' not in go[now]:\r\n go[now]['descent'] = set()\r\n go[now]['descent'].add(go_id)\r\n return go\r\n\r\n\r\nif __name__ == '__main__':\r\n global go\r\n go = importgo()\r\n path = './inputfiledict/'\r\n datapath = './datamgi/'\r\n resultpath = './outputfiledict/'\r\n count = 0\r\n file = 'exampleinput.bed'\r\n\r\n with open('./configures.txt','r') as f:\r\n sets = list()\r\n for line in f:\r\n sets.append(line.split()[1])\r\n path = sets[0]\r\n datapath = sets[1]\r\n resultpath = sets[2]\r\n file = sets[3]\r\n global distances\r\n distances = list()\r\n\r\n main(bedfile=path + file, dataset=datapath, resultpath=resultpath + file[:-4])\r\n","repo_name":"AMSSwanglab/RE-GOA","sub_path":"peaksanalysis.py","file_name":"peaksanalysis.py","file_ext":"py","file_size_in_byte":7470,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"26338798240","text":"import os\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\" \nos.environ[\"CUDA_VISIBLE_DEVICES\"]='0'\nimport sys\nsys.path.append('/home/brianyao/Documents/stad-cvpr2020')\nimport torch\nfrom torch import nn\nimport torch.optim as optim\nimport numpy as np\n\nfrom a3d import make_dataloader\nfrom conv2d_ae import TemporalRegularityDetector, OrigConvAE\nfrom convlstm_ae import ConvLSTMED\nfrom tqdm import tqdm\nimport time\nimport datetime\nimport json\nimport glob\nfrom collections import defaultdict\n\nimport argparse\n\nfrom detection.utils.logger import Logger\nfrom detection.utils.metric_logger import MetricLogger\n\nfrom mpi4py import MPI\nimport apex\nfrom apex.parallel import DistributedDataParallel as DDP\nfrom detection.utils.comm import get_world_size\nfrom detection.utils.comm import is_main_process, all_gather, synchronize\nfrom sklearn import metrics\n\nimport matplotlib.pyplot as plt\nfrom utils.flow_utils import flow_to_image\nimport pdb\nimport logging\nfrom stauc import get_tarr, ST_AUC\n\nroot = '/mnt/workspace/datasets/A3D_2.0/' #'/home/data/vision7/A3D_2.0/' #'/home/data/vision7/A3D_2.0/'\ntrain_split = os.path.join(root, 'A3D_2.0_train.json')\nval_split = os.path.join(root, 'A3D_2.0_val.json')\nsave_dir = 'checkpoints/'\ndevice = 'cuda'\nnum_workers = 24\nbatch_per_gpu = 24\nlr = 0.01\nmax_iters = 20000\nshuffle = True\nseq_len = 10\ncheckpoint_period = 2000\n# mode = 'gray'\nW, H = 227, 227\n\ndef reduce_loss_dict(loss_dict):\n \"\"\"\n Reduce the loss dictionary from all processes so that process with rank\n 0 has the averaged results. Returns a dict with the same fields as\n loss_dict, after reduction.\n \"\"\"\n world_size = get_world_size()\n if world_size < 2:\n return loss_dict\n with torch.no_grad():\n loss_names = []\n all_losses = []\n for k in sorted(loss_dict.keys()):\n loss_names.append(k)\n all_losses.append(loss_dict[k])\n # print(\"all_losses:\", all_losses)\n # print(\"\\n\")\n all_losses = torch.stack(all_losses, dim=0)\n dist.reduce(all_losses, dst=0)\n\n if dist.get_rank() == 0:\n # only main process gets accumulated, so only divide by\n # world_size in this case\n all_losses /= world_size\n \n reduced_losses = {k: v for k, v in zip(loss_names, all_losses)}\n return reduced_losses\n\ndef weights_init(m):\n if isinstance(m,nn.Conv2d):\n \tnn.init.xavier_normal_(m.weight.data)\n \tnn.init.constant_(m.bias.data,0.1)\n if isinstance(m,nn.ConvTranspose2d):\n \tnn.init.xavier_normal_(m.weight.data)\n \tnn.init.constant_(m.bias.data,0.1)\n if isinstance(m, nn.BatchNorm2d):\n \tnn.init.normal_(m.weight.data, mean=1.0, std=0.001)\n \tnn.init.constant_(m.bias.data, 0.001)\n\ndef do_val(args, model, dataloader): #, logger=None, step=0):\n all_anomaly_scores = defaultdict(list)\n all_labels = defaultdict(list)\n all_tarr = defaultdict(list)\n step_to_viz = torch.randint(len(dataloader), (1,)).squeeze()\n for iters, batch in enumerate(tqdm(dataloader)):\n video_name, t_idx, inputs, labels = batch\n inputs = inputs.to(device)\n \n outputs = model(inputs)\n if args.mode == 'gray':\n outputs = outputs.clamp(min=0, max=1)\n inputs = inputs.detach().cpu()\n outputs = outputs.detach().cpu()\n if args.mode == 'gray':\n errors = ((outputs - inputs)**2).mean(dim=(2,3)).squeeze(0).detach().cpu()\n error_maps = ((outputs - inputs)**2).detach().cpu()\n elif args.mode == 'flow':\n tmp_errors = ((outputs - inputs)**2).mean(dim=(2,3)).squeeze(0).detach().cpu()\n errors = []\n error_maps = []\n for i in range(0, 20, 2):\n errors.append(tmp_errors[:,i:i+2].mean(dim=1, keepdim=True))\n error_maps.append(((outputs[:,i:i+2] - inputs[:,i:i+2])**2).mean(dim=1, keepdim=True))\n errors = torch.cat(errors, dim=1)\n error_maps = torch.cat(error_maps, dim=1)\n for vid, err, error_map, label, start in zip(video_name, errors, error_maps, labels, t_idx):\n all_anomaly_scores[vid].append(err)\n all_labels[vid].append(label)\n # get annotated bboxes\n annotated_bboxes = dataloader.dataset.all_annotated_objs[vid][start:start+seq_len]\n # compute tarr\n for e_map, l, bboxes, in zip(error_map, label, annotated_bboxes):\n tarr, mask = get_tarr(difference_map=e_map, \n label=l, \n bboxes=bboxes)\n all_tarr[vid].append(float(tarr))\n # if iters > 2:\n # break\n # if iters == step_to_viz:\n # inputs_viz = torch.cat([img.unsqueeze(0) for img in inputs[0]], dim=2) * 255\n # outputs_viz = torch.cat([img.unsqueeze(0) for img in outputs[0]], dim=2) * 255\n # logger.log_image(inputs_viz, label='input_images', step=step)\n # logger.log_image(outputs_viz, label='reconstructed_images', step=step)\n for vid in all_anomaly_scores.keys():\n all_anomaly_scores[vid] = torch.cat(all_anomaly_scores[vid])\n # normalize\n _min = all_anomaly_scores[vid].min()\n _max = all_anomaly_scores[vid].max()\n all_anomaly_scores[vid] = (all_anomaly_scores[vid] - _min)/(_max - _min + 1e-6)\n all_labels[vid] = torch.cat(all_labels[vid])\n all_tarr[vid] = np.array(all_tarr[vid])\n all_anomaly_scores = np.concatenate([scores for scores in all_anomaly_scores.values()])\n all_labels = np.concatenate([labels for labels in all_labels.values()])\n all_tarr = np.concatenate([tarr for tarr in all_tarr.values()])\n\n # NOTE Feb 5, Compute ROC and AUC, STAUC...\n assert len(all_anomaly_scores) == len(all_labels)\n # auc, fpr, tpr = compute_AUC(all_anomaly_scores, all_labels)\n # all_labels = np.concatenate([v for v in all_labels.values()], axis=0)\n # all_anomaly_scores = np.concatenate([v for v in all_anomaly_scores.values()], axis=0)\n\n metric = ST_AUC(labels=all_labels, scores=all_anomaly_scores, tarrs=all_tarr)\n fpr, tpr, sttpr, thresholds = metric.roc_curve(pos_label=1)\n stauc = metrics.auc(fpr, sttpr)\n auc = metrics.auc(fpr, tpr)\n gap = all_anomaly_scores[all_labels==1].mean() - all_anomaly_scores[all_labels==0].mean()\n \n return auc, stauc, gap\n\ndef compute_AUC(scores, labels, normalize=True, ignore=[]): \n scores, labels, zero_score_videos = get_score_label(scores, \n labels,\n normalize=normalize,\n ignore=ignore)\n fpr, tpr, thresholds = metrics.roc_curve(labels, scores, pos_label=1)\n\n auc = metrics.auc(fpr, tpr)\n return auc, fpr, tpr\n\ndef get_score_label(all_anomaly_scores, all_labels, normalize=True, ignore=[]):\n '''\n Params:\n all_anomaly_scores: a dict of anomaly scores of each video\n all_labels: a dict of anomaly labels of each video\n '''\n anomaly_scores = []\n labels = []\n # video normalization\n zero_score_videos = []\n for key, scores in all_anomaly_scores.items():\n if key in ignore:\n continue\n if scores.max() - scores.min() >= 0:\n if normalize:\n scores = (scores - scores.min())/(scores.max() - scores.min() + 1e-7) \n anomaly_scores.append(scores)\n labels.append(all_labels[key])\n else:\n zero_score_videos.append(key)\n anomaly_scores = torch.cat(anomaly_scores)\n labels = torch.cat(labels)\n return anomaly_scores, labels, zero_score_videos\n\ndef main(args):\n # logger \n num_gpus = MPI.COMM_WORLD.Get_size()\n distributed = False\n if num_gpus > 1:\n distributed = True\n\n local_rank = MPI.COMM_WORLD.Get_rank() % torch.cuda.device_count()\n\n if distributed:\n torch.cuda.set_device(local_rank)\n host = os.environ[\"MASTER_ADDR\"] if \"MASTER_ADDR\" in os.environ else \"127.0.0.1\"\n torch.distributed.init_process_group(\n backend=\"nccl\",\n init_method='tcp://{}:12345'.format(host),\n rank=MPI.COMM_WORLD.Get_rank(),\n world_size=MPI.COMM_WORLD.Get_size()\n )\n\n synchronize()\n # logger must be initialized after distributed!\n cfg = {'PROJECT': 'conv_ae'}\n if args.use_wandb:\n logger = Logger(\"CONV_AE\",\n cfg,#convert_to_dict(cfg, []),\n project = 'conv_ae',\n viz_backend=\"wandb\"\n )\n else:\n logger = logging.Logger('CONV_AE')\n\n logger.info(\"Using {} GPUs\".format(num_gpus))\n train_dataloader = make_dataloader(root, \n train_split, \n is_train=True, \n mode=args.mode, \n shuffle=shuffle, \n distributed=distributed,\n batch_per_gpu=batch_per_gpu,\n num_workers=num_workers,\n max_iters=max_iters)\n\n val_dataloader = make_dataloader(root, \n val_split, \n is_train=False, \n mode=args.mode, \n shuffle=False, \n distributed=False,\n batch_per_gpu=batch_per_gpu,\n num_workers=num_workers,\n max_iters=None)\n\n # load model\n if args.mode == 'gray':\n input_shape = seq_len\n elif args.mode == 'flow':\n input_shape = seq_len * 2\n # model = OrigConvAE(input_shape=input_shape).apply(weights_init)\n model = TemporalRegularityDetector(input_shape=input_shape).apply(weights_init)\n # model = ConvLSTMED(args)\n model.to(device)\n model.train()\n\n rec_loss = nn.MSELoss(reduction='none')\n\n # optimizer \n optimizer = optim.Adagrad(model.parameters(),lr=lr,weight_decay=0.0005)\n\n # train\n logger.info(\"Start training\")\n meters = MetricLogger(delimiter=\" \")\n\n end = time.time()\n pdb.set_trace()\n for iters, ret in enumerate(tqdm(train_dataloader)):\n iters += 1\n\n data_time = time.time() - end\n _, _, inputs, _ = ret\n inputs = inputs.to(device) \n outputs = model(inputs)\n if args.mode == 'gray':\n outputs = outputs.clamp(min=0, max=1)\n pdb.set_trace()\n loss = rec_loss(outputs, inputs)\n loss = 0.5 * loss.sum(dim=(1,2,3)).mean()\n \n # track time\n batch_time = time.time() - end\n end = time.time()\n # reduce losses over all GPUs for logging purposes\n loss_dict = {\"loss\": loss} #{\"loss_loc\": loc_loss, \"loss_cls\": cls_loss}\n loss_dict_reduced = reduce_loss_dict(loss_dict)\n losses_reduced = loss_dict_reduced['loss']\n \n optimizer.zero_grad()\n loss.backward()\n\n grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), 5)\n meters.update(loss=losses_reduced)\n meters.update(time=batch_time, data=data_time)\n\n # estimate the rest of the running time\n eta_seconds = meters.time.global_avg * (max_iters - iters)\n eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))\n\n \n optimizer.step()\n \n if iters%20 == 0:\n # NOTE: Add log file \n logger.info(\n meters.delimiter.join(\n [\n \"eta: {eta}\",\n \"iter: {iter}\",\n \"{meters}\",\n \"lr: {lr:.6f}\",\n \"max mem: {memory:.0f}\",\n ]\n ).format(\n eta=eta_string,\n iter=iters,\n meters=str(meters),\n lr=optimizer.param_groups[0][\"lr\"],\n memory=torch.cuda.max_memory_allocated() / 1024.0 / 1024.0,\n )\n ) \n if hasattr(logger, 'log_values'):\n for name, meter in meters.meters.items():\n logger.log_values({name: meter.median}, step=iters)\n logger.log_values({\"grad_norm\": grad_norm}, step=iters)\n\n if iters % 500 == 0 and is_main_process():\n inputs = inputs.detach().cpu()\n outputs = outputs.detach().cpu()\n if args.mode == 'gray':\n inputs_viz = torch.cat([img.unsqueeze(0) for img in inputs[0]], dim=2) * 255\n outputs_viz = torch.cat([img.unsqueeze(0) for img in outputs[0]], dim=2) * 255\n elif args.mode == 'flow':\n inputs_viz = [flow_to_image(inputs[0][i:i+2].permute(1,2,0).numpy()) for i in range(0, input_shape, 2)]\n inputs_viz = np.concatenate(inputs_viz, axis=1)\n outputs_viz = [flow_to_image(outputs[0][i:i+2].permute(1,2,0).numpy()) for i in range(0, input_shape, 2)]\n outputs_viz = np.concatenate(outputs_viz, axis=1)\n logger.log_image(inputs_viz, label='train_input_images', step=iters)\n logger.log_image(outputs_viz, label='train_reconstructed_images', step=iters)\n\n # save checkpoints\n if iters % checkpoint_period == 0:\n model.eval()\n auc, stauc, gap = do_val(args, model, val_dataloader) \n model.train()\n\n if hasattr(logger, 'log_values'):\n logger.info(\"AUC: {}; STAUC: {}; GAP: {}\".format(auc, stauc, gap))\n logger.log_values({'AUC': auc}, step=iters)\n logger.log_values({'STAUC': stauc}, step=iters)\n logger.log_values({'GAP': gap}, step=iters)\n\n # # Draw ROC curve\n # fig = plt.figure(iters)\n # plt.plot(fpr, tpr, label='ROC')\n # plt.plot(fpr, sttpr, label='STROC')\n # logger.log_plot(fig, label='ROC', caption='ROC', step=iters)\n\n else:\n print(\"AUC: {}; STAUC: {}; GAP: {}\".format(auc, stauc, gap))\n\n save_path = os.path.join(save_dir, logger.run_id)\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n save_file = os.path.join(save_path, \n 'model_{}_auc_{:.4f}_stauc_{:.4f}_gap_{:.4f}.pth'.format(iters,\n auc, \n stauc, \n gap))\n if hasattr(model, 'module'):\n torch.save(model.module.state_dict(), save_file)\n else:\n torch.save(model.state_dict(), save_file)\n \nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--mode', type=str, default='gray', help='gray or flow')\n # parser.add_argument('-val_split', type=str)\n parser.add_argument('--use_wandb', const=True, nargs='?')\n args = parser.parse_args()\n main(args)","repo_name":"MoonBlvd/Detection-of-Traffic-Anomaly","sub_path":"ConvAE/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":15315,"program_lang":"python","lang":"en","doc_type":"code","stars":134,"dataset":"github-code","pt":"81"} +{"seq_id":"19926978710","text":"#!/usr/bin/python\nfrom random import random\nfrom flask import Flask, jsonify\nimport os\nimport random\nimport psycopg2\nimport json\n\napp = Flask(__name__)\n\n# def get_db_connect():\n# # conn_string = \"host='db' port= '5432' dbname='web' user='root' password='pass'\"\n# # return psycopg2.connect(conn_string)\n\n# conn_string = psycopg2.connect( \n# database = os.environ.get('PG_DB'), #\"db\",\n# user = os.environ.get('PG_USER'), #\"root\",\n# password ='pass', #os.environ.get('PG_PASS'),\n# host = os.environ.get('DB_HOST'), #\"db\", \n# port = os.environ.get('PG_PORT') #\"5432\" \n# )\n # return psycopg2.connect(conn_string)\n # return psycopg2.connect(host=os.environ.get(database=\"web\", user='root', password='pass', host='127.0.0.1', port= '5432'))\n\n# def get_rec_from_db():\n# q=('SELECT ProductName, ProductPrice FROM meals ORDER BY RANDOM() LIMIT 1;')\n\n# conn= get_db_connect()\n# cursor= conn.cursor()\n# cursor.execute(q)\n# ml = cursor.fetchall()\n\n# conn.close()\n# return ml\n\ndef getVersion():\n conn = psycopg2.connect(\n database=\"web\", user='root', password='pass', host='db', port= '5432'\n )\n#Creating a cursor object using the cursor() method\n cursor = conn.cursor()\n\n#Executing an MYSQL function using the execute() method\n cursor.execute(\"SELECT mealname, mealprice FROM meals ORDER BY RANDOM() LIMIT 1\")\n\n# Fetch a single row using fetchone() method.\n data = cursor.fetchone()\n # print(\"Connection established to: \",data)\n\n return json.dumps(data)\n\n#Closing the connection\n conn.close()\n return data\n\n\n# def get_db_connect():\n\n# # return psycopg2.connect(host=os.environ.get('DB_HOST')\n# # return psycopg2.connect(port=os.environ.get('DB_PORT')\n# # return psycopg2.connect(user=os.environ.get('PG_USER')\n# # return psycopg2.connect(pass=os.environ.get('PG_PASS')\n# # return psycopg2.connect(host=os.environ.get('DB_HOST'), port, database, user, password)\n\n\n# def get_rec_from_db():\n# q = SELECT MealName, MealPrice FROM meals\n \n\n# conn = get_db_connect()\n# cursor = conn.cursor()\n# cursor.execute(q)\n# mr = cursor.fetchall()\n\n# conn.close()\n# return mr\n\n# meals = [\n# {\n# 'name' : 'Paneer Tikka Masala',\n# 'Cuisine' : 'Indian',\n# 'Price' : '$19.99'\n# },\n# {\n# 'name' : 'Chole Bhature',\n# 'Cuisine' : 'Indian',\n# 'Price' : '$16.99'\n# },\n# {\n# 'name' : 'Rajma Chawal',\n# 'Cuisine' : 'Indian',\n# 'Price' : '$14.99'\n# },\n# {\n# 'name' : 'Aloo Muter',\n# 'Cuisine' : 'Indian',\n# 'Price' : '$15.99'\n# },\n# {\n# 'name' : 'Dal Tadka',\n# 'Cuisine' : 'Indian',\n# 'Price' : '$10.99'\n# },\n# {\n# 'name' : 'Shahi Paneer',\n# 'Cuisine' : 'Indian',\n# 'Price' : '$19.99'\n# },\n# {\n# 'name' : 'Arrabiatta Pasta',\n# 'Cuisine' : 'Italian',\n# 'Price' : '$17.99'\n# },\n# {\n# 'name' : 'Alfredo Pasta',\n# 'Cuisine' : 'Italian',\n# 'Price' : '$14.99'\n# },\n# {\n# 'name' : 'Enchiladas',\n# 'Cuisine' : 'Mexican',\n# 'Price' : '$10.99'\n# },\n# {\n# 'name' : 'Tacos',\n# 'Cuisine' : 'Mexican',\n# 'Price' : '$4.99'\n# },\n# {\n# 'name' : 'Burritos',\n# 'Cuisine' : 'Mexican',\n# 'Price' : '$4.99'\n# },\n# {\n# 'name' : 'Nachos',\n# 'Cuisine' : 'Mexican',\n# 'Price' : '$3.99'\n# },\n# {\n# 'name' : 'Italian Cheese Pizza',\n# 'Cuisine' : 'Italian',\n# 'Price' : '$14.99'\n# },\n# {\n# 'name' : 'Cheese Burger',\n# 'Cuisine' : 'American',\n# 'Price' : '$6.99'\n# },\n# {\n# 'name' : 'Fries',\n# 'Cuisine' : 'American',\n# 'Price' : '$3.99'\n# }\n# ]\n\n\n# os.environ[\"API_ENDPOINT\"]='meal'\n# api_endpoint = os.environ.get(\"API_ENDPOINT\")\n\n@app.route('/')\ndef get_reco():\n # return get_rec_from_db()\n # random_choice = random.randint(0,15)\n # return meals[random_choice]\n return getVersion()\n\n\n\nif __name__ == '__main__':\n port = os.environ.get('API_PORT', 5000)\n app.run(host='0.0.0.0',port=port)\n \n #Amey Darwhekar\n ","repo_name":"SurbhiZambad2510/CloudComputing","sub_path":"FinalLab5/api/code/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":4600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14639552461","text":"def solution(drum):\n result = 0\n N = len(drum)\n\n board = [list(i) for i in drum]\n\n for c in range(len(board)):\n star_count = 0\n r = 0\n while True:\n point = board[r][c]\n\n if point == \"#\":\n r += 1\n elif point == \">\":\n c += 1\n elif point == \"<\":\n c -= 1\n else:\n if star_count < 1:\n star_count += 1\n r += 1\n else:\n break\n\n if r <= -1 or r >= N or c <= -1 or c >= N:\n break\n if r == (N - 1) and board[r][c] == \"#\":\n result += 1\n break\n if r == (N - 1) and board[r][c] == \"*\" and star_count <= 1:\n result += 1\n break\n return result\n\n\ndrum = [\"######\", \">#*###\", \"####*#\", \"#<#>>#\", \">#*#*<\", \"######\"]\n# drum = [\"******\", \">#*###\", \"####*#\", \"#<#>>#\", \">#*#*<\", \"######\"]\nprint(solution(drum))\n","repo_name":"hugehoo/problem-solving","sub_path":"2021/04DEC devmatch01.py","file_name":"04DEC devmatch01.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39337661027","text":"import aiohttp\nfrom typing import Coroutine\nfrom TelegramBot.classess import OrderInfo\nimport config as cfg\n\n\nclass Http:\n async def request(self, url, data=None, method='POST'):\n for i in range(3):\n try:\n async with aiohttp.request(method,\n f'http://localhost:{cfg.port}/{url}', json=data) as resp:\n if resp.status != 200:\n raise\n return await resp.json()\n except Exception as e:\n print(e)\n print(data)\n pass\n\n def execute_db(self, list_args: list) -> Coroutine:\n return self.request('execute_db',\n data={'list_args': list_args}, method='POST')\n\n\nclass HttpUsers(Http):\n\n async def add_admin(self, user_id):\n check_admin = await self.execute_db(['SELECT * FROM User WHERE user_id=?', [user_id]])\n print(check_admin)\n if len(check_admin) == 0:\n await self.execute_db(['INSERT INTO User VALUES(?,?,?)', [user_id, True, True]])\n else:\n await self.execute_db(['UPDATE User SET admin=? WHERE user_id=?', [True, user_id]])\n\n async def get_admins(self) -> list[int, str]:\n ans = await self.execute_db(['SELECT user_id FROM User WHERE admin',])\n return [i[0] for i in ans]\n\n\nclass HttpOrders(Http):\n\n async def get_orders(self, user_id: int) -> list[OrderInfo]:\n if user_id in cfg.admin_list:\n orders = await self.execute_db(['SELECT * FROM Orders'])\n else:\n orders = await self.execute_db(['SELECT * FROM Orders WHERE user_id=?', [user_id]])\n\n return [OrderInfo(*i) for i in orders]\n\n async def new_order(self, data: OrderInfo):\n await self.execute_db(['INSERT INTO Orders VALUES(?,?,?,?,?,?,?,?,?,?, ?)', list(data.__dict__.values())])\n\n async def update_order(self, order: OrderInfo):\n data = list(order.__dict__.values())[2:]\n data.append(order.uid_order)\n await self.execute_db(['UPDATE Orders SET region=?, type_com=?, section=?, rate=?, billing=?, pay=?, active=?, tx_hash=?, user_url=? WHERE uid_order=?', data])\n\nhttp_users = HttpUsers()\nhttp_orders = HttpOrders()\n","repo_name":"ilyaermi/AD_TG_Bot","sub_path":"api/http_api.py","file_name":"http_api.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28907880138","text":"import pytest\n# from scpipelines.funcs.processing import extract_parameter_from_fname\nimport panpipes.funcs as pnp\nfrom anndata import AnnData\nfrom muon import MuData\nimport pandas as pd\nimport numpy as np\n\n@pytest.fixture()\ndef mudata():\n yield MuData(\n {\n \"mod1\": AnnData(np.arange(0, 100, 0.1).reshape(-1, 10)),\n \"mod2\": AnnData(np.arange(101, 2101, 1).reshape(-1, 20)),\n }\n )\n\n@pytest.fixture()\ndef anndata():\n yield AnnData(np.arange(0, 50, 1).reshape(-1, 10), \n obs=pd.DataFrame(index=[f\"cell{i}\" for i in range(5)],\n data={'sample_id': ['a', 'a', 'b', 'b', 'c']}),\n var=pd.DataFrame(index=[f\"gene{i}\" for i in range(10)]))\n\n\n\n@pytest.fixture()\ndef anndata_with_obs():\n yield AnnData(np.arange(0, 50, 1).reshape(-1, 10), \n obs=pd.DataFrame(index=[f\"cell{i}\" for i in range(5)],\n data={'sample_id': ['a', 'a', 'b', 'b', 'c'],\n 'batch': [1,1,2,2,3]}),\n var=pd.DataFrame(index=[f\"gene{i}\" for i in range(10)])\n )\n\n@pytest.fixture()\ndef anndata_with_var():\n yield AnnData(np.arange(0, 50, 1).reshape(-1, 10), \n obs=pd.DataFrame(index=[f\"cell{i}\" for i in range(5)],\n data={'sample_id': ['a', 'a', 'b', 'b', 'c']}),\n var=pd.DataFrame(index=[f\"gene{i}\" for i in range(10)],\n data={'feature_type': \"Gene Expression\",\n 'new_index': [f\"newgene{i}\" for i in range(10)]}\n )\n )\n\n\ndef test_extract_parameter_from_fname():\n assert pnp.pp.extract_parameter_from_fname(\"test_res0.6_cluster.txt.gz\", \"res\", \"test\") == 0.6\n assert pnp.pp.extract_parameter_from_fname(\"test_res1.0_cluster.txt.gz\", \"res\", \"test\") == 1\n assert pnp.pp.extract_parameter_from_fname(\"test_methodeuclidean_cluster.txt.gz\", \"method\", \"test\") == \"euclidean\"\n assert pnp.pp.extract_parameter_from_fname(\"method_methodeuclidean_cluster.txt.gz\", \"method\", \"method\") == \"euclidean\"\n\ndef test_is_float_try():\n assert pnp.pp.is_float_try(\"1\") is True\n assert pnp.pp.is_float_try(\"cheese\") is False\n\ndef test_splitall():\n assert pnp.pp.splitall(\"path\") == ['path']\n assert pnp.pp.splitall(\"path/to/cheese\") == ['path', 'to', 'cheese']\n assert pnp.pp.splitall(\"/path/to/cheese\") == ['path', 'to', 'cheese']\n\ndef test_test_file_or_value():\n assert pnp.pp.test_file_or_value(4) == \"value\"\n assert pnp.pp.test_file_or_value(__file__) == \"file\"\n with pytest.raises(ValueError):\n pnp.pp.test_file_or_value(\"cheese\")\n\ndef test_which_ind():\n assert pnp.pp.which_ind([True, True, False]) == [0,1]\n assert pnp.pp.which_val([True, True, False], [1,2,3]) == [1,2]\n\ndef test_check_for_bool():\n assert pnp.pp.check_for_bool('True') is True\n assert pnp.pp.check_for_bool(True) is True\n assert pnp.pp.check_for_bool('False') is False\n assert pnp.pp.check_for_bool(False) is False\n with pytest.raises(TypeError):\n pnp.pp.check_for_bool('cheese')\n with pytest.raises(TypeError):\n pnp.pp.check_for_bool(4)\n\ndef test_intersection():\n assert pnp.pp.intersection(['a', 'b', 'c'], ['c', 'd', 'e']) == ['c']\n\n\ndef assert_match_anndata(ad1, ad2):\n assert np.array_equal(ad1.X, ad2.X)\n assert all(ad1.obs == ad2.obs)\n assert all(ad1.var == ad2.var)\n\ndef test_concat_adatas(anndata):\n ad1 = anndata\n ad1 = ad1.copy()\n ad1.obs['sample_id'] = 'a'\n ad2 = anndata\n ad2 = ad2.copy()\n ad2.obs['sample_id'] = 'b'\n # this is what the merged version should look like :-) \n adata_full = AnnData(\n np.concatenate([np.arange(0, 50, 1),np.arange(0, 50, 1)]).reshape(-1, 10),\n obs = pd.DataFrame(index=['cell0-a', 'cell1-a', 'cell2-a', 'cell3-a', 'cell4-a', \n 'cell0-b', 'cell1-b','cell2-b', 'cell3-b', 'cell4-b'],\n data={'sample_id': ['a', 'a', 'a', 'a', 'a', \n 'b', 'b', 'b', 'b', 'b']}, dtype=\"category\"),\n var=pd.DataFrame(index=[f\"gene{i}\" for i in range(10)])\n )\n merg_data = pnp.pp.concat_adatas([ad1, ad2],\n batch_key=\"sample_id\", \n batch_categories=['a', 'b'], \n join_type=\"inner\")\n # testing the case where there are two anndatas to be merged\n assert np.array_equal(merg_data.X, adata_full.X)\n assert_match_anndata(merg_data, adata_full)\n # testing the case where there is only 1 anndata sent to function\n assert_match_anndata(pnp.pp.concat_adatas([ad1], batch_key=\"sample_id\", \n batch_categories=['a']), ad1)\n # test the corner case where an anddata is passed\n with pytest.raises(TypeError):\n pnp.pp.concat_adatas(ad1, batch_key=\"sample_id\", \n batch_categories=['a'])\n\ndef test_merge_with_adata_obs(anndata, anndata_with_obs):\n new_obs = pd.DataFrame(data={'sample_id': ['a', 'b', 'c'],\n 'batch': [1, 2, 3]})\n # test exceptions\n with pytest.raises(TypeError):\n pnp.pp.merge_with_adata_obs(\"cheese\", new_obs, \"sample_id\")\n with pytest.raises(TypeError):\n pnp.pp.merge_with_adata_obs(anndata, \"cheese\", \"sample_id\")\n with pytest.raises(KeyError):\n pnp.pp.merge_with_adata_obs(anndata, new_obs, \"cheese\")\n with pytest.raises(KeyError):\n pnp.pp.merge_with_adata_obs(anndata, new_obs, \"batch\")\n anndata.obs = pnp.pp.merge_with_adata_obs(anndata, new_obs, \"sample_id\")\n assert_match_anndata(anndata, anndata_with_obs)\n\n\ndef test_merge_with_adata_obs_inplace(anndata, anndata_with_obs):\n new_obs = pd.DataFrame(data={'sample_id': ['a', 'b', 'c'],\n 'batch': [1, 2, 3]})\n pnp.pp.merge_with_adata_obs(anndata, new_obs, \"sample_id\", inplace=True)\n assert_match_anndata(anndata, anndata_with_obs)\n\ndef test_remove_unused_categories():\n df = pd.DataFrame(data={\"a1\":['a', 'b', 'c'], 'a2':['c', 'd', 'e']}, dtype='category')\n df = df.iloc[0:2,:]\n pnp.pp.remove_unused_categories(df)\n assert df['a1'].cat.categories.tolist() == ['a', 'b']\n assert df['a2'].cat.categories.tolist() == ['c', 'd']\n with pytest.raises(TypeError):\n pnp.pp.remove_unused_categories(\"cheese\")\n\ndef test_add_var_mtd(anndata, anndata_with_var):\n new_var = pd.DataFrame(index=[f\"gene{i}\" for i in range(10)],\n data={'feature_type': \"Gene Expression\",\n 'new_index': [f\"newgene{i}\" for i in range(10)]}\n )\n pnp.pp.add_var_mtd(anndata, new_var, left_on=\"index\", right_on=\"new_index\")\n assert_match_anndata(anndata, anndata_with_var)\n\n\ndef test_add_var_mtd_types(anndata):\n with pytest.raises(TypeError):\n pnp.pp.add_var_mtd(anndata, 'cheese')\n new_var = pd.DataFrame(index=[f\"gene{i}\" for i in range(10)],\n data={'feature_type': \"Gene Expression\",\n 'new_index': [f\"newgene{i}\" for i in range(10)]}\n )\n with pytest.raises(TypeError):\n pnp.pp.add_var_mtd('cheese', new_var)\n # check mismatch var df raises coorect errors\n\n\n# def test_add_var_mtd_warnings(anndata, anndata_with_var):\n# ad = anndata.copy()\n# new_var = pd.DataFrame(index=[f\"gene{i}\" for i in range(9)],\n# data={'feature_type': \"Gene Expression\",\n# 'new_index': [f\"newgene{i}\" for i in range(9)]})\n# # with pytest.warns(UserWarning):\n# # pnp.pp.add_var_mtd(ad, new_var)\n\n\ndef test_update_var_index(anndata):\n new_index = [f\"newgene{i}\" for i in range(10)]\n anndata.var['new_index'] = new_index\n pnp.pp.update_var_index(anndata, \"new_index\")\n assert all(anndata.var.index == new_index)\n\n\n","repo_name":"DendrouLab/panpipes","sub_path":"tests/test_processing.py","file_name":"test_processing.py","file_ext":"py","file_size_in_byte":7785,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"81"} +{"seq_id":"14110182069","text":"import os\nfrom pathlib import Path\nimport argparse\nimport pandas as pd\n\nPROT_PREP = '/Program Files/Schrodinger2020-4/utilities/prepwizard'\nPROT_ASSIGN = '/Program Files/Schrodinger2020-4/utilities/protassign'\nPROT_IMPREF = '/Program Files/Schrodinger2020-4/utilities/impref'\n\nLIG__PREP = '/Program Files/Schrodinger2020-4/ligprep'\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--mode', default = 'protein', type=str, help='schrodinger mode : protein or ligand')\nparser.add_argument('--input_table', default='target_pdb.csv', type=str)\nparser.add_argument('--input_ligand', default='ligand.smi', type=str)\nargs = parser.parse_args() \n\nif args.mode == 'protein':\n # Open the .tsv file of Targets and PDB ids\n df = pd.read_csv(args.input_table, sep=',', header=0)\n df = df.dropna(subset = ['PDB'])\n gene_list = df['Target'].values.tolist()\n pdb_list = df['PDB'].values.tolist()\n pdb_valid = []\n\n # Download the PDB files (pdb-tools must be installed)\n print(\"Number of PDB names: \", len(pdb_list))\n for pdb, gene in zip(pdb_list, gene_list):\n PDB_PATH = f'{pdb}.pdb'\n #pdb = pdb.replace(\" \", \"\")\n if pdb == 'SWISS': \n print(f'This target {gene} may be available in SWISS. Please manually download and put it in your schrodinger folder')\n continue\n if not Path(PDB_PATH).is_file():\n try: \n os.system(f'pdb_fetch {pdb} > {PDB_PATH}')\n except Exception as e: \n print(f\"Cannot fetch {pdb}\", e)\n os.remove(PDB_PATH)\n if Path(PDB_PATH).is_file():\n pdb_valid.append(pdb)\n\n # Run Schrodinger's Protein Prep Wizard\n print(\"Number of valid PDBs: \", len(pdb_valid))\n #os.chdir('/opt/')\n # print(os.getcwd())\n for pdb in pdb_valid:\n PDB_PATH = f'{pdb}.pdb'\n PREP_OUT_PATH = f'{pdb}_prep.mae'\n ASSIGN_OUT_PATH = f'{pdb}_assign.mae'\n LOG_PATH = f'{pdb}.log'\n os.system(f'\"{PROT_PREP}\" -f 3 -fillsidechains -fillloops -delwater_hbond_cutoff 5 -minimize_adj_h {PDB_PATH} {PREP_OUT_PATH}')\n while os.path.isfile(PREP_OUT_PATH) == False:\n f = open(LOG_PATH)\n data = f.read()\n if 'Error' in data:\n print(f'{pdb}.pdb makes error. Schrodinger will skip this target')\n break\n if os.path.isfile(PREP_OUT_PATH) == True:\n break\n if os.path.isfile(PREP_OUT_PATH) == True:\n os.system(f'\"{PROT_ASSIGN}\" -propka_pH 7.0 -minimize {PREP_OUT_PATH} {ASSIGN_OUT_PATH}')\n while os.path.isfile(ASSIGN_OUT_PATH) == False:\n if os.path.isfile(ASSIGN_OUT_PATH) == True:\n break\n if os.path.isfile(ASSIGN_OUT_PATH) == True:\n os.system(f'\"{PROT_IMPREF}\" -f 3 {ASSIGN_OUT_PATH}')\n \nif args.mode == 'ligand':\n # Run Schrodinger's LigPrep\n LIG_PATH = args.input_ligand\n OUT_PATH = f'{args.input_ligand}_prep.mae'\n os.system(f'\"{LIG__PREP}\" -epik -ph 7.0 -pht 2.0 -s 1 -bff 16 -ismi {LIG_PATH} -omae {OUT_PATH}')\n","repo_name":"minjaeju/Schrodinger-Program-Operation","sub_path":"schrodinger.py","file_name":"schrodinger.py","file_ext":"py","file_size_in_byte":3087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41830677708","text":"# !/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nExample state machine: A global timer triggers passage through two infinite loops. It is\ntriggered in the first state, but begins measuring its 3-second Duration\nafter a 1.5s onset delay. During the onset delay, an infinite loop\ntoggles two port LEDs (Port1, Port3) at low intensity. When the timer begins measuring,\nit sets port 2 LED to maximum brightness, and triggers transition to a second infinite loop with brighter port 1+3 LEDs.\nWhen the timer's 3 second duration elapses, Port2LED is returned low,\nand a GlobalTimer1_End event occurs (handled by exiting the state machine).\n\n\nExample adapted from Josh Sanders' original version on Sanworks Bpod repository\n\"\"\"\nfrom pybpodapi.protocol import Bpod, StateMachine\n\n\"\"\"\nRun this protocol now\n\"\"\"\n\nmy_bpod = Bpod()\n\nsma = StateMachine(my_bpod)\n\n# Set global timer 1 for 3 seconds, following a 1.5 second onset delay after trigger. Link to LED of port 2.\nsma.set_global_timer(timer_id=1, timer_duration=3, on_set_delay=1.5, channel=Bpod.OutputChannels.PWM2, on_message=255)\n\nsma.add_state(\n\tstate_name='TimerTrig', # Trigger global timer\n\tstate_timer=0,\n\tstate_change_conditions={Bpod.Events.Tup: 'Port1Lit_Pre'},\n\toutput_actions=[('GlobalTimerTrig', 1)])\n\nsma.add_state(\n\tstate_name='Port1Lit_Pre',\n\tstate_timer=.25,\n\tstate_change_conditions={Bpod.Events.Tup: 'Port3Lit_Pre', Bpod.Events.GlobalTimer1_Start: 'Port1Lit_Post'},\n\toutput_actions=[(Bpod.OutputChannels.PWM1, 16)])\n\nsma.add_state(\n\tstate_name='Port3Lit_Pre',\n\tstate_timer=.25,\n\tstate_change_conditions={Bpod.Events.Tup: 'Port1Lit_Pre', Bpod.Events.GlobalTimer1_Start: 'Port3Lit_Post'},\n\toutput_actions=[(Bpod.OutputChannels.PWM3, 16)])\n\nsma.add_state(\n\tstate_name='Port1Lit_Post',\n\tstate_timer=.25,\n\tstate_change_conditions={Bpod.Events.Tup: 'Port3Lit_Post', Bpod.Events.GlobalTimer1_End: 'exit'},\n\toutput_actions=[(Bpod.OutputChannels.PWM1, 255)])\n\nsma.add_state(\n\tstate_name='Port3Lit_Post',\n\tstate_timer=.25,\n\tstate_change_conditions={Bpod.Events.Tup: 'Port1Lit_Post', Bpod.Events.GlobalTimer1_End: 'exit'},\n\toutput_actions=[(Bpod.OutputChannels.PWM3, 255)])\n\nmy_bpod.send_state_machine(sma)\n\nmy_bpod.run_state_machine(sma)\n\nprint(\"Current trial info: {0}\".format(my_bpod.session.current_trial))\n\nmy_bpod.close()","repo_name":"pybpod/pybpod-api","sub_path":"examples/state_machine_examples/global_timer_start_and_end_events.py","file_name":"global_timer_start_and_end_events.py","file_ext":"py","file_size_in_byte":2280,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"1806321127","text":"import turtle\r\nimport pixart\r\n\r\nPixel_Size = 15\r\n\r\ndef initialisation():\r\n turtle.speed(0)\r\n turtle.tracer(False)\r\n turtle.up()\r\n turtle.goto(-300, -300)\r\n turtle.pencolor(\"black\")\r\n turtle.fillcolor(\"pink\")\r\n turtle.pensize(1)\r\n \r\ndef draw():\r\n i = 0\r\n turtle.begin_fill()\r\n turtle.down()\r\n while (i < 4):\r\n turtle.forward(Pixel_Size)\r\n turtle.right(90)\r\n i += 1\r\n turtle.up()\r\n turtle.end_fill()\r\n turtle.forward(Pixel_Size)\r\n \r\n \r\ndef pixart_move(row, col):\r\n xcor = -300\r\n ycor = 300\r\n turtle.goto(xcor, ycor)\r\n xcor += Pixel_Size * col\r\n ycor += ycor + Pixel_Size * row\r\n turtle.goto(xcor, ycor)\r\n \r\ndef draw_row(number_pixels):\r\n for i in range(20):\r\n turtle.penup()\r\n turtle.goto(0, Pixel_Size*i)\r\n turtle.pendown()\r\n \r\n for x in range(number_pixels):\r\n if (x + i) % 2 == 0:\r\n color = \"black\"\r\n else:\r\n color = \"red\"\r\n turtle.fillcolor(color)\r\n draw()\r\n \r\n\r\n \r\n\r\ndef main():\r\n initialisation()\r\n draw_row(20)\r\n input(\"END: \")\r\n \r\nmain()","repo_name":"NapalmStryke/GCIS123-All-Codes","sub_path":"pixart.py","file_name":"pixart.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"6017029147","text":"# Import libraries\nimport math as m\nimport numpy as np\nfrom numba import njit\nfrom random import random\n\n\n# maximum resistance values\n@njit(cahce=True)\ndef _Qmax_api_clay(\n Su: float,\n) -> float:\n # Unit end-bearing [kPa]\n return 9 * Su\n\n\n@njit(cache=True)\ndef _Qmax_api_sand(\n sig: float,\n delta: float,\n) -> float:\n # important variables\n delta_table = np.array([0, 15, 20, 25, 30, 35, 100], dtype=np.float32)\n Nq_table = np.array([8, 8, 12, 20, 40, 50, 50], dtype=np.float32)\n Qmax_table = np.array([1900, 1900, 2900, 4800, 9600, 12000, 12000], dtype=np.float32)\n\n Nq = np.interp(delta, delta_table, Nq_table)\n Qmax = np.interp(delta, delta_table, Qmax_table)\n\n # Unit end-bearing [kPa]\n return min(Qmax, sig * Nq)\n\n\n@njit(cache=True)\ndef _fmax_api_clay(\n sig: float,\n Su: float,\n) -> float:\n \"\"\"Creates the maximum skin friction.\n\n The methdology follows the API clay method of axial capacity found in .\n\n Parameters\n ----------\n sig : float\n vertical effcitve stress in kPa.\n Su : float\n undrained shear strength in kPa.\n\n Returns\n -------\n float\n unit skin friction in kPa.\n \"\"\"\n # important variables\n if sig == 0.0:\n psi = Su / 0.001\n else:\n psi = Su / sig\n\n if psi > 1.0:\n alpha = min(0.5 * psi ** (-0.25), 1.0)\n else:\n alpha = min(0.5 * psi ** (-0.5), 1.0)\n\n # Unit skin friction [kPa]\n return alpha * Su\n\n\n# SPRING FUNCTIONS --------------------------------------------\n\n# API sand function\n@njit(cache=True)\ndef _fmax_api_sand(\n sig: float,\n delta: float,\n K: float = 0.8,\n) -> float:\n \"\"\"Creates the maximum skin friction.\n\n The methdology follows the API sand method of axial capacity found in .\n\n Parameters\n ----------\n sig : float\n vertical effcitve stress in kPa.\n delta: float\n interface friction angle in degrees\n K: float\n coefficient of lateral pressure.\n (0.8 for open-ended piles and 1.0 for cloased-ended)\n\n Returns\n -------\n float\n unit skin friction in kPa.\n \"\"\"\n\n # important variables\n delta_table = np.array([0, 15, 20, 25, 30, 35, 100], dtype=np.float32)\n fs_max_table = np.array([47.8, 47.8, 67, 81.3, 95.7, 114.8, 114.8], dtype=np.float32)\n\n # limit unit skin friction according to API ref page 59\n fs_max = np.interp(delta, delta_table, fs_max_table)\n\n # Unit skin friction [kPa]\n return min(fs_max, K * sig * m.tan(delta * m.pi / 180.0))\n","repo_name":"TchilDill/openpile","sub_path":"src/openpile/utils/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":2516,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"41367260652","text":"from pprint import pprint\n\nfrom bs4 import BeautifulSoup\nimport requests\n\ndate = input(\"Which year do you want to travel to? Type the date in this format YYYY-MM-DD: \")\n\nURL = \"https://www.billboard.com/charts/hot-100/2000-08-12\"\nresponse = requests.get(url=URL)\nsoup = BeautifulSoup(response.text, 'html.parser')\nsong_names_spans = soup.select(\"li ul li h3\")\nsong_names = [song.getText().strip() for song in song_names_spans]\npprint(song_names)\n","repo_name":"leviwilsonestevez/100Days_Python_Course","sub_path":"Intermediate/com.learn.python.module46/BilboardHits.py","file_name":"BilboardHits.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37294294076","text":"#!/usr/bin/env python\nimport os\nimport subprocess\nimport webbrowser\nfrom contextlib import contextmanager\n\nimport pytest\nfrom flask.ext.migrate import MigrateCommand\nfrom flask.ext.script import Manager, Server\nfrom flask.ext.script.commands import ShowUrls, Clean\n\nfrom scrappyr.app import create_app\nfrom scrappyr.common import db\n\n\n# Global constants\nPROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))\nSTATIC_ROOT = os.path.join(PROJECT_ROOT, 'scrappyr', 'static')\nKARMA_EXEC = os.path.join(STATIC_ROOT, 'node_modules',\n 'karma-cli', 'bin', 'karma')\nKARMA_CONFIG = os.path.join(STATIC_ROOT, 'karma.conf.js')\nWEBPACK_DEV_SERVER_EXEC = os.path.join(STATIC_ROOT, 'node_modules',\n '.bin', 'webpack-dev-server')\n\n\nmanager = Manager(create_app)\n\n# Options passed to `create_app`.\nmanager.add_option('-c', '--config-file',\n help=\"Configuration file (*.py or *.json) for Flask app.\")\nmanager.add_option('-d', '--db-uri',\n help=\"SQLAlchemy database URI.\")\nmanager.add_option('-w', '--with-webpack-dev-server', action='store_true',\n help=\"Configure Flask app to run with webpack dev server\")\n\n\nmanager.add_command('server', Server())\nmanager.add_command('db', MigrateCommand)\nmanager.add_command('show-urls', ShowUrls())\nmanager.add_command('clean', Clean())\n\n\nclass StartAndOpenServer(Server):\n\n help = description = (\"Runs the Flask development server i.e. app.run() \"\n \"and opens browser.\")\n\n def __call__(self, *args, **kwargs):\n webbrowser.open('http://{host}:{port}/'.format(**kwargs))\n super().__call__(*args, **kwargs)\n\nmanager.add_command('start', StartAndOpenServer())\n\n\n@manager.shell\ndef make_shell_context():\n \"\"\"Open ipython with several default imports in the context of the app.\"\"\"\n return dict(app=manager.app, db=db)\n\n\n@manager.option('-s', '--skip-coverage', dest='skip_coverage',\n action='store_true', help=\"Display test-coverage summary\")\ndef test(skip_coverage=False):\n \"\"\"Run python test suite.\n\n If you want to run individual tests, run py.test directly. If you want to\n debug a test, turn off output capture with `-s`:\n\n py.test -s path/to/test.py::TestClass::test_method\n \"\"\"\n opts = ['.']\n if not skip_coverage:\n opts.extend(['--cov', 'scrappyr', '--cov-report', 'term-missing'])\n\n # Execute command in subprocess instead of `pytest.main(opts)` since the\n # `scrappyr` imports in this file execute before test execution and\n # so reports of coverage display imported code as un-covered.\n cmd = ['py.test'] + opts\n with temp_working_directory(PROJECT_ROOT):\n run_command(cmd)\n\n\n@manager.command\ndef js_test_server():\n \"\"\"Run Javascript test server.\n\n Unlike the backend tests, this runs a server that reruns tests when files\n are edited.\n \"\"\"\n with temp_working_directory(STATIC_ROOT):\n run_subprocess([KARMA_EXEC, 'start', KARMA_CONFIG],\n exit_message=\"\\nQuitting JS test server...\")\n\n\n@manager.command\ndef webpack_dev_server():\n \"\"\"Run webpack dev server to sync app bundle.\"\"\"\n with temp_working_directory(STATIC_ROOT):\n run_subprocess([WEBPACK_DEV_SERVER_EXEC, '--hot', '--inline'],\n exit_message=\"\\nQuitting webpack dev server...\")\n\n\ndef run_command(args_list):\n proc = subprocess.Popen(args_list)\n proc.communicate()\n\n\ndef run_subprocess(args_list, exit_message=\"\\nQuitting...\"):\n try:\n run_command(args_list)\n except KeyboardInterrupt:\n print(exit_message)\n\n\n@contextmanager\ndef temp_working_directory(path):\n original_dir = os.path.abspath(os.path.curdir)\n os.chdir(path);\n try:\n yield\n finally:\n os.chdir(original_dir)\n\n\nif __name__ == '__main__':\n manager.run()\n","repo_name":"tonysyu/scrappyr","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":3870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41199302513","text":"import unittest\n\nfrom django.core.exceptions import ValidationError\n\nfrom localflavor.generic import validators\n\n\nclass TestVATINValidator(unittest.TestCase):\n validator = validators.VATINValidator()\n\n VALID_VATIN = 'DE284754038'\n\n def test_valid_vatin(self):\n self.validator(self.VALID_VATIN)\n\n def test_invalid_vatin(self):\n with self.assertRaises(ValidationError) as cm:\n self.validator('DE99999999')\n e = cm.exception\n self.assertIn(\"DE99999999 is not a valid VAT identification number.\", e.messages)\n\n def test_invalid_country_code(self):\n with self.assertRaises(ValidationError) as cm:\n self.validator('XX99999999')\n e = cm.exception\n self.assertIn(\"XX is not a valid country code.\", e.messages)\n","repo_name":"django/django-localflavor","sub_path":"tests/test_generic/test_validators.py","file_name":"test_validators.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":774,"dataset":"github-code","pt":"81"} +{"seq_id":"23316187899","text":"import requests\nfrom bs4 import BeautifulSoup as Bs\nimport fake_useragent\nfrom selenium import webdriver\nimport time\n\ndef data_get_selenium(url):\n driver = webdriver.Chrome()\n try:\n driver.get(url=url)\n time.sleep(7)\n with open('mechta_selenium.html','w',encoding='utf-8') as f:\n f.write(driver.page_source)\n except Exception as ex:\n print(ex)\n finally:\n driver.close()\n driver.quit()\n \ndef data_for_uniq(url):\n driver = webdriver.Chrome()\n try:\n driver.get(url=url)\n time.sleep(5)\n with open('mechta_selenium_uniq.html','w',encoding='utf-8') as f:\n f.write(driver.page_source)\n except Exception as ex:\n print(ex)\n finally:\n driver.close()\n driver.quit()\n\ndef get_stop_flag(url):\n with open('mechta_selenium.html',encoding='utf-8') as f:\n src = f.read()\n soup = Bs(src,'lxml')\n flag = soup.find_all(class_ = 'q-mb-lg text-color3')\n if len(flag) > 0:\n return True\n else:\n return False\n \ndef read_local_html(html):\n urls = []\n with open(html,encoding='utf-8') as f:\n src = f.read()\n soup = Bs(src,'lxml')\n\n all_urls = soup.find_all('a')\n for items in all_urls:\n urls_text = items.get('href')\n if 'https://www.mechta.kz/product/' in str(urls_text):\n urls.append(urls_text)\n return urls\n\ndef data_get_from_requsets(html):\n with open(html,encoding='utf-8') as f:\n src = f.read()\n soup = Bs(src,'lxml')\n\n name = soup.find_all(class_='text-ts5')\n price = soup.find_all(class_='text-bold text-ts5 text-color1')\n return [name[0].text,price[0].text]\n\n \ndef get_sku(excel):\n df = pd.read_excel(excel)\n return df\n \ndef get_all_info(excel):\n df = get_sku(excel)\n all_info = []\n for product in df['Model']:\n url = f'https://www.mechta.kz/search/?q={product}&setcity=al'\n data_for_uniq(url)\n if get_stop_flag():\n all_info.append({'SKU':f'{product}','links':0,'name':0,'price':0,'exist':'no'})\n else:\n info = data_get_from_requsets('mechta_selenium_uniq.html')\n all_info.append({'SKU':f'{product}','links':url,'name':info[0],'price':info[1],'exist':'yes'})\n print(all_info)\n\n# get_sku('All SKU Monitors.xlsx')\nget_all_info('All SKU Monitors.xlsx')\n# a = get_stop_flag('https://www.mechta.kz/search/?q=iphone&setcity=al&page=19')\n# print(a) \n# data_for_uniq('https://www.mechta.kz/product/televizor-samsung-led-ue55au7100uxce-uhd-smart/')\n# info = data_get_from_requsets('mechta_selenium_uniq.html')\n# data_get_selenium('https://www.mechta.kz/search/?q=iphone&setcity=al&page=19')\n# a = read_local_html('mechta_selenium.html')\n# for links in a:\n# data_get_from_requsets(links)\n","repo_name":"KadyrbekAnalyst/Parser","sub_path":"pars_mechta.py","file_name":"pars_mechta.py","file_ext":"py","file_size_in_byte":2811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14641029271","text":"# CodeByte\ndef FirstFactorial(num):\n\n # code goes here\n i, o = int(num), 1\n while 1 <= i:\n o = o * i\n i = i - 1\n return o\n\n# keep this function call here \nprint(FirstFactorial(input()))\n","repo_name":"iamgithum/student","sub_path":"Python/factorial.py","file_name":"factorial.py","file_ext":"py","file_size_in_byte":196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73443208586","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Feb 8 13:49:29 2020\r\n\r\n@author: cmchico\r\n\"\"\"\r\n\r\nimport edslab_prophet as ed\r\n\r\nimport dask.bag as db\r\n\r\n\r\n\r\ndef parallel_model_prophet(series, scenario,holi_set, dev_length = 729, period='1 days', horizon = '1 days'): \r\n \r\n# If choosing among holiday lists, get only 1 best\r\n holi_set_no = range(len(holi_set))\r\n dbseries = db.from_sequence([series for i in holi_set_no])\r\n dbhol = db.from_sequence(holi_set)\r\n\r\n dbmaster = dbseries.map(ed.model_prophet,dbhol,weekly=0,monthly=0, \r\n dev_length=dev_length, period=period, horizon=horizon)\r\n\r\n model = dbmaster.compute()\r\n \r\n rmse_list = [model[i][2]['rmse'].mean() for i in holi_set_no]\r\n i = rmse_list.index(min(rmse_list))\r\n\r\n holiday = holi_set[i]\r\n select = (scenario.hol_num == i) & ~(scenario.week + scenario.mon == 0)\r\n \r\n# Run only for selected holiday\r\n scenario_filter = scenario[select].reset_index(drop=True)\r\n\r\n dbseries = db.from_sequence(series for i in range(scenario_filter.shape[0]))\r\n dbhol = db.from_sequence(holiday for i in scenario_filter.hol_num)\r\n dbweek = db.from_sequence(week for week in scenario_filter.week)\r\n dbmon = db.from_sequence(mon for mon in scenario_filter.mon)\r\n\r\n dbmaster = dbseries.map(ed.model_prophet,dbhol,dbweek,dbmon,dev_length=dev_length,period=period, horizon = horizon)\r\n \r\n model = dbmaster.compute()\r\n\r\n \r\n return scenario_filter,model\r\n\r\ndef parallel_model_prophet_oneholi(series, scenario,holiday,dev_length = 729, period='1 days', horizon = '1 days'): \r\n \r\n# If choosing among holiday lists, get only 1 best\r\n dbseries = db.from_sequence(series for i in range(scenario.shape[0]))\r\n dbhol = db.from_sequence(holiday for i in range(scenario.shape[0]))\r\n dbweek = db.from_sequence(week for week in scenario.week)\r\n dbmon = db.from_sequence(mon for mon in scenario.mon)\r\n\r\n dbmaster = dbseries.map(ed.model_prophet,dbhol,dbweek,dbmon,dev_length=dev_length,period=period, horizon = horizon)\r\n \r\n model = dbmaster.compute()\r\n\r\n \r\n return model","repo_name":"roadmaptoawesomeness/Project-Seer","sub_path":"Development/edslab_prophet_dask.py","file_name":"edslab_prophet_dask.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37713016698","text":"# 寻找重复的子树\nfrom typing import List, Optional\nfrom collections import Counter\nfrom collections import defaultdict\n\n\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n # def findDuplicateSubtrees(self, root: TreeNode) -> List[TreeNode]:\n # res = []\n # # count = Counter()\n # count = defaultdict(int)\n #\n # def serialize(node):\n # if not node:\n # return ''\n # string = str(node.val) + ',' + serialize(node.left) + ',' + serialize(node.right) # 两个逗号不能去掉\n # count[string] += 1\n # if count[string] == 2: # 保证不管重复几次,只记录一次\n # res.append(node)\n # return string\n #\n # serialize(root)\n # return res\n\n # def findDuplicateSubtrees(self, root: TreeNode) -> List[TreeNode]:\n # res = []\n # record = set()\n #\n # def serialize(node):\n # if not node:\n # return ''\n # s = str(node.val) + ',' + serialize(node.left) + ',' + serialize(node.right)\n # if s in record:\n # res.append(node)\n # else:\n # record.add(s)\n # return s\n #\n # serialize(root)\n # return res\n\n # def findDuplicateSubtrees(self, root: TreeNode) -> List[TreeNode]:\n # dic = defaultdict(int)\n # res = []\n #\n # def serialize(root):\n # if not root:\n # return ''\n # s = str(root.val) + ',' + serialize(root.left) + ',' + serialize(root.right)\n # dic[s] += 1\n # if dic[s] == 2:\n # res.append(root)\n # return s\n # serialize(root)\n # return res\n\n def findDuplicateSubtrees(self, root: Optional[TreeNode]) -> List[Optional[TreeNode]]:\n dic = defaultdict(int)\n res = []\n\n def serialize(root):\n if not root:\n return ''\n s = str(root.val) + ',' + serialize(root.left) + ',' + serialize(root.right)\n dic[s] += 1\n if dic[s] == 2:\n res.append(root)\n return s\n\n serialize(root)\n return res\n","repo_name":"BruceHi/leetcode","sub_path":"month6/findDuplicateSubtrees.py","file_name":"findDuplicateSubtrees.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"21299091305","text":"n, k = map(int,input().split()) # n: 동전개수, k: 목표금액\r\ncoin = [0] * n\r\n\r\nfor i in range(n):\r\n coin[i] = int(input())\r\n\r\ncnt = 0 # 동전 수\r\n\r\nfor i in range(n - 1, -1, -1): # n - 1 ~ 0 역순으로 반복\r\n if coin[i] <= k: # k 보다 동전 가치가 작거나 같으면\r\n cnt += int(k / coin[i])\r\n k = k % coin[i]\r\n\r\nprint(cnt)\r\n","repo_name":"rhoeunbin/Algorithm","sub_path":"백준/Silver/11047. 동전 0/동전 0.py","file_name":"동전 0.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3588080289","text":"#\n# @lc app=leetcode id=41 lang=python3\n#\n# [41] First Missing Positive\n#\n\n# @lc code=start\nclass Solution:\n def firstMissingPositive(self, nums: List[int]) -> int:\n res = 1\n\n for i in range(len(nums)):\n nums[i] = nums[i] - 1\n\n res = min(res, min(nums))\n\n return res\n \n# @lc code=end\n\n","repo_name":"henry202-TENG/LeetCode","sub_path":"41.first-missing-positive.py","file_name":"41.first-missing-positive.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6202391667","text":"'''A simple example to how handle a Child Window '''\r\n\r\nfrom selenium import webdriver\r\nimport time\r\n\r\ndrive = webdriver.Chrome(r\"C:\\Users\\pieri\\PycharmProjects\\appPython\\Selenium\\chromedriver.exe\")\r\ndrive.get(\"https://the-internet.herokuapp.com/windows\")\r\n\r\ndrive.find_element_by_link_text(\"Click Here\").click()\r\nprint(drive.find_element_by_xpath(\"//div/h3\").text)\r\n\r\nnewWindow = drive.window_handles[1]\r\ndrive.switch_to.window(newWindow)\r\nprint(drive.find_element_by_xpath(\"//div/h3\").text)\r\ndrive.close()\r\ndrive.switch_to.window(drive.window_handles[0])\r\n\r\nassert \"Opening a new window\" == drive.find_element_by_tag_name(\"h3\").text\r\n\r\n","repo_name":"pierimoreno/selenium_webdriverPractice","sub_path":"childWindow.py","file_name":"childWindow.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8477278937","text":"import sys\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.types import StructType\nimport pyspark.sql.functions as F\n\n\ndef main(file) -> None:\n spark = SparkSession \\\n .builder \\\n .appName(\"StreamingParkingMalaga\") \\\n .getOrCreate()\n\n # Create DataFrame representing the stream of input lines from connection to localhost:9999\n userSchema = StructType()\\\n .add(\"poiID\",\"integer\")\\\n .add(\"nombre\",\"string\")\\\n .add(\"direccion\",\"string\")\\\n .add(\"telefono\",\"string\")\\\n .add(\"correoelectronico\",\"string\")\\\n .add(\"latitude\",\"string\")\\\n .add(\"longitude\",\"string\")\\\n .add(\"altitud\",\"string\")\\\n .add(\"capacidad\",\"integer\",True)\\\n .add(\"capacidad_discapacitados\",\"string\")\\\n .add(\"fechahora_ultima_actualizacion\",\"timestamp\")\\\n .add(\"libres\",\"integer\",True)\\\n .add(\"libres_discapacitados\",\"string\")\\\n .add(\"nivelocupacion_naranja\",\"string\")\\\n .add(\"nivelocupacion_rojo\",\"string\")\\\n .add(\"smassa_sector_sare\",\"string\")\n\n\n\n lines = spark \\\n .readStream \\\n .format(\"csv\") \\\n .schema(userSchema)\\\n .option(\"header\", \"true\") \\\n .load(file)\n #lines.show()\n #lines.printSchema()\n\n lines = lines.select(\"nombre\",\"fechahora_ultima_actualizacion\",\"capacidad\", \"libres\")\n\n #lines = lines.select(lines[\"nombre\"], (lines[\"capacidad\"] - lines[\"libres\"]).alias(\"ocupation\"),lines[\"fechahora_ultima_actualizacion\"]) \\\n # .filter(lines[\"capacidad\"] > 0)\n # Split the lines into words\n #agg = lines.withWatermark(\"fechahora_ultima_actualizacion\",\"2 minutes\").groupBy(F.window(\"fechahora_ultima_actualizacion\",\"8 minutes\",\"2 minutes\"),\"nombre\")\\\n # .agg({\"ocupation\":\"avg\"})\n\n\n\n\n #words.printSchema()\n\n # Generate running word count\n\n # Start running the query that prints the running counts to the console\n query = lines \\\n .writeStream \\\n .outputMode(\"update\") \\\n .format(\"console\") \\\n .start()\n\n query.awaitTermination()\n\n\nif __name__ == '__main__':\n if len(sys.argv) != 2:\n print(\"Usage: spark-submit StreamingWordCountFromFiles \", file=sys.stderr)\n exit(-1)\n\n main(sys.argv[1])\n","repo_name":"mukankeideth/Spark_projects_python_university","sub_path":"src/main/python/StreamingParkingMalaga.py","file_name":"StreamingParkingMalaga.py","file_ext":"py","file_size_in_byte":2238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23381891057","text":"import os\nimport unittest\n\nimport arcpy\n\n# Add parent folder to python path if running test case standalone\nimport sys\nsys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__), '..')))\n\nimport UnitTestUtilities\nimport Configuration\n\nclass AddLLOSFieldsTestCase(unittest.TestCase):\n ''' Test all tools and methods related to the Add LLOS Fields tool\n in the Military Tools toolbox'''\n\n inputObservers = None\n inputTargets = None\n\n def setUp(self):\n ''' Initialization needed if running Test Case standalone '''\n Configuration.GetLogger()\n Configuration.GetPlatform()\n ''' End standalone initialization '''\n \n Configuration.Logger.debug(\".....AddLLOSFieldsTestCase.setUp\")\n\n UnitTestUtilities.checkArcPy()\n if not arcpy.Exists(Configuration.militaryScratchGDB):\n Configuration.militaryScratchGDB = UnitTestUtilities.createScratch(Configuration.currentPath)\n\n originalObservers = os.path.join(Configuration.militaryInputDataGDB, \"LLOS_Observers\")\n originalTargets = os.path.join(Configuration.militaryInputDataGDB, \"LLOS_Targets\")\n\n self.inputObservers = os.path.join(Configuration.militaryScratchGDB, \"LLOS_Observers\")\n self.inputTargets = os.path.join(Configuration.militaryScratchGDB, \"LLOS_Targets\")\n\n arcpy.env.overwriteOutput = True\n\n Configuration.Logger.debug(\"Copying %s to %s...\" % (originalObservers, self.inputObservers))\n arcpy.CopyFeatures_management(originalObservers, self.inputObservers)\n Configuration.Logger.debug(\"Copying %s to %s...\" % (originalTargets, self.inputTargets))\n arcpy.CopyFeatures_management(originalTargets, self.inputTargets)\n\n arcpy.ImportToolbox(Configuration.toolboxUnderTest) \n\n return\n\n def tearDown(self):\n Configuration.Logger.debug(\".....AddLLOSFieldsTestCase.tearDown\")\n # UnitTestUtilities.deleteScratch(Configuration.militaryScratchGDB)\n return\n\n def test_add_llos_fields(self):\n Configuration.Logger.info(\"...AddLLOSFieldsTestCase.test_add_llos_fields\")\n\n self.assertTrue(arcpy.Exists(self.inputObservers), \"Input dataset does not exist, %s\" % self.inputObservers)\n self.assertTrue(arcpy.Exists(self.inputTargets), \"Input dataset does not exist, %s\" % self.inputTargets)\n\n arcpy.AddLinearLineOfSightFields_mt(self.inputObservers, 2, self.inputTargets, 0)\n \n fieldList = arcpy.ListFields(self.inputObservers, \"height\")\n fieldCount = len(fieldList)\n self.assertEqual(fieldCount, 1, \"Expected a field count of 1 for Observers but got %s.\" % str(fieldCount))\n fieldList = arcpy.ListFields(self.inputTargets, \"height\")\n fieldCount = len(fieldList)\n self.assertEqual(fieldCount, 1, \"Expected a field count of 1 for Targets but got %s.\" % str(fieldCount))\n return\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"Esri/military-tools-geoprocessing-toolbox","sub_path":"utils/test/visibility_tests/AddLLOSFieldsTestCase.py","file_name":"AddLLOSFieldsTestCase.py","file_ext":"py","file_size_in_byte":2940,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"81"} +{"seq_id":"19365930971","text":"from nose.tools import assert_equal, assert_greater, assert_greater_equal, assert_less, assert_raises\nimport time\nimport numpy as np\nimport audioio.playaudio as ap\nimport audioio.audiomodules as am\n\n\ndef test_beep():\n am.enable_module()\n print()\n print('default module...')\n ap.beep(blocking=True)\n ap.beep(0.5, 'a4', blocking=True)\n ap.beep(blocking=False)\n time.sleep(2.0)\n ap.handle.close()\n for lib in am.installed_modules('device'):\n print('%s module...' % lib)\n am.select_module(lib)\n ap.beep(blocking=True, verbose=2)\n ap.beep(blocking=False, verbose=2)\n time.sleep(2.0)\n ap.handle.close()\n am.enable_module()\n\n\ndef test_play():\n am.enable_module()\n print()\n # sine wave:\n rate = 44100.0\n t = np.arange(0.0, 0.5, 1.0/rate)\n mono_data = np.sin(2.0*np.pi*800.0*t)\n stereo_data = np.tile(mono_data, (2, 1)).T\n # fade in and out:\n ap.fade(mono_data, rate, 0.1)\n ap.fade(stereo_data, rate, 0.1)\n print('default module mono...')\n ap.play(mono_data, rate, blocking=True)\n ap.play(mono_data, rate, blocking=False)\n time.sleep(2.0)\n print('default module stereo...')\n ap.play(stereo_data, rate, blocking=True)\n ap.play(stereo_data, rate, blocking=False)\n time.sleep(2.0)\n ap.handle.close()\n for lib in am.installed_modules('device'):\n print('%s module mono...' % lib)\n am.select_module(lib)\n ap.play(mono_data, rate, blocking=True, verbose=2)\n ap.play(mono_data, rate, blocking=False, verbose=2)\n time.sleep(2.0)\n print('%s module stereo...' % lib)\n ap.play(stereo_data, rate, blocking=True)\n ap.play(stereo_data, rate, blocking=False)\n time.sleep(2.0)\n ap.handle.close()\n am.enable_module()\n\n\ndef test_downsample():\n def sinewave(rate):\n t = np.arange(0.0, 0.5, 1.0/rate)\n mono_data = np.sin(2.0*np.pi*800.0*t)\n stereo_data = np.tile(mono_data, (2, 1)).T\n # fade in and out:\n ap.fade(mono_data, rate, 0.1)\n ap.fade(stereo_data, rate, 0.1)\n return mono_data, stereo_data\n \n am.enable_module()\n print()\n for lib in am.installed_modules('device'):\n am.select_module(lib)\n print('%s module ...' % lib)\n for rate in [45555.0, 100000.0, 600000.0]:\n print(' rate %.0f Hz ...' % rate)\n mono_data, stereo_data = sinewave(rate)\n ap.play(mono_data, rate, verbose=2)\n ap.play(stereo_data, rate, verbose=2)\n ap.handle.close()\n am.enable_module()\n\n\ndef test_note2freq():\n fa = 460.0\n assert_less(np.abs(ap.note2freq('a4', fa)-fa), 1e-6, 'wrong a4 frequency')\n fp = 0.5*ap.note2freq('a0')\n for o in range(10):\n for n in 'cdefgab':\n note = '%s%d' % (n, o)\n f = ap.note2freq(note)\n assert_greater(f, fp, 'frequency of %s should be greater than the one of previous note' % note)\n note = '%s#%d' % (n, o)\n fs = ap.note2freq(note)\n assert_greater(fs, f, 'frequency of %s should be greater' % note)\n note = '%sb%d' % (n, o)\n fb = ap.note2freq(note)\n assert_less(fb, f, 'frequency of %s should be greater' % note)\n fp = f\n assert_raises(ValueError, ap.note2freq, 'h')\n assert_raises(ValueError, ap.note2freq, 'da')\n assert_raises(ValueError, ap.note2freq, 'dx#')\n assert_raises(ValueError, ap.note2freq, 'd4#')\n assert_raises(ValueError, ap.note2freq, 'd4x')\n assert_raises(ValueError, ap.note2freq, 'd#4x')\n assert_raises(ValueError, ap.note2freq, 'd-2')\n assert_raises(ValueError, ap.note2freq, '')\n assert_raises(ValueError, ap.note2freq, 0)\n\n\ndef test_demo():\n am.enable_module()\n ap.demo()\n\n\ndef test_main():\n am.enable_module()\n ap.main(['prog', '-h'])\n ap.main(['prog'])\n ap.main(['prog', '-m', 'sounddevice'])\n ap.main(['prog', 'x'])\n","repo_name":"bendalab/audioio","sub_path":"tests/test_playaudio.py","file_name":"test_playaudio.py","file_ext":"py","file_size_in_byte":3956,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"7887499044","text":"from sys import stdin, stdout\n\n# 3 3\n# 1 2 1\n# 1 1 1\n# 1 2\n# 2 3\n# 1 3\n#\n# 2 2 2\n# -1 0 -1\n#\n# 1,3,2\n#\n# 4 4\n# 1 2 0 1\n# 1 3\n# 1 2\n# 2 3\n# 2 4\n# 2 3 2 1\n# ALIVE\n# 1 3 2 4\n#\n# define s = number of friends who like food i. if s > w then no answer exist\n# proof contradiction: last person, s=1, if s >= w, there is an answer, then w should >=1, which is not true\n# w: 1 2 1\n# s: 2 2 2\n#\nif __name__ == '__main__':\n\n def dead_lee(n, m, w, xy, s):\n queue = []\n for i in range(n):\n if w[i] >= len(s[i]):\n queue.append(i)\n\n #print(w)\n #print(xy)\n #print(s)\n #print(queue)\n\n pl = []\n while len(queue) > 0:\n cur = queue.pop()\n w[cur] = 0\n for i in s[cur]:\n pl.append(i+1)\n if xy[i][0] != cur:\n s[xy[i][0]].remove(i)\n if w[xy[i][0]] == len(s[xy[i][0]]) and w[xy[i][0]] > 0:\n queue.append(xy[i][0])\n else:\n #print(\"-------------\")\n #print(cur)\n #print(s[xy[i][1]])\n #print(i)\n s[xy[i][1]].remove(i)\n if w[xy[i][1]] == len(s[xy[i][1]]) and w[xy[i][1]] > 0:\n queue.append(xy[i][1])\n\n if len(pl) == m:\n pl.reverse()\n return [\"ALIVE\", pl]\n else:\n return [\"DEAD\"]\n\n\n n, m = map(int, stdin.readline().split())\n w = list(map(int, stdin.readline().split()))\n xy = [None for i in range(m)]\n s = [set() for i in range(n)]\n for i in range(m):\n xy[i] = list(map(int, stdin.readline().split()))\n xy[i][0] -= 1\n xy[i][1] -= 1\n\n s[xy[i][0]].add(i)\n s[xy[i][1]].add(i)\n\n res = dead_lee(n, m, w, xy, s)\n stdout.write(res[0] + '\\n')\n if len(res) > 1:\n stdout.write(\" \".join(map(str, res[1])) + '\\n')\n","repo_name":"tycyd/codeforces","sub_path":"greedy/1369E DeadLee.py","file_name":"1369E DeadLee.py","file_ext":"py","file_size_in_byte":1949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37901776690","text":"from __future__ import print_function\n\nimport os\nimport pandas as pd\nimport numpy as np\nimport xarray as xr\n\ndef read_rad_lost(filename, force_override=False, verbose=False):\n \"\"\"\n Function to read rad_lost.txt and pickle\n \n Parameters:\n filename : string\n Name of the file to open, including extension\n force_override: bool\n Flag to force read of rad_lost file even when pickle exists\n\n Returns:\n df, da : tuple\n (pandas dataframe, xarray dataarray)\n \"\"\"\n \n fpkl = filename + '.p'\n if not force_override and os.path.exists(fpkl) and \\\n os.path.getmtime(fpkl) > os.path.getmtime(filename):\n df = pd.read_pickle(fpkl)\n # if verbose:\n # print('[read_radiators]: reading from existing pickle.')\n\n # if verbose:\n # print('[read_radiators]: pickle does not exist or file updated.' + \\\n # ' Reading {0:s}'.format(filename))\n\n df = pd.read_csv(filename, sep=' ', header=None, skiprows=0)\n\n # drop nan column (due to space at the end of line in output file)\n df = df.drop(labels=[df.columns[-1]], axis=1)\n col = {0:'time',1:'nfreq',2:'nsrc',3:'N_mu'}\n nfreq = df[1][0]\n N_mu = df[3][0]\n for i in range(4, 4 + nfreq):\n col[i] = 'L_tot{0:d}'.format(i-4)\n\n df = df.rename(columns=col)\n\n return df\n # time = df.time\n # mu = np.arange(-1.0, 1.0, 2.0/N_mu)\n # mu = mu + 1.0/N_mu\n # da = xr.DataArray(df.iloc[:, 5:].T,\n # coords=dict(mu=mu, time=time),\n # dims=('mu', 'time'))\n\n # df['L_lost0'] = da.sum(dim='mu')\n\n # lost_mup = df.columns[5:5 + df.N_mu[0]/2]\n # lost_mum = df.columns[5 + df.N_mu[0]/2:5 + df.N_mu[0]]\n # df['L_lost0p'] = df[lost_mup].sum(axis=1)\n # df['L_lost0m'] = df[lost_mum].sum(axis=1)\n \n # df.to_pickle(fpkl)\n # return df, da\n","repo_name":"jeonggyukim/pyathena","sub_path":"pyathena/io/read_rad_lost.py","file_name":"read_rad_lost.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"43311417715","text":"\"\"\"\nClean and standardize column headers for a DataFrame.\n\"\"\"\nimport re\nfrom typing import Any, Dict, List, Optional, Union\nfrom unicodedata import normalize\n\nimport dask.dataframe as dd\nimport numpy as np\nimport pandas as pd\n\nNULL_VALUES = {np.nan, \"\", None}\n\nCASE_STYLES = {\n \"snake\",\n \"kebab\",\n \"camel\",\n \"pascal\",\n \"const\",\n \"sentence\",\n \"title\",\n \"lower\",\n \"upper\",\n}\n\n\ndef clean_headers(\n df: Union[pd.DataFrame, dd.DataFrame],\n case: str = \"snake\",\n replace: Optional[Dict[str, str]] = None,\n remove_accents: bool = True,\n report: bool = True,\n) -> pd.DataFrame:\n \"\"\"\n Function to clean column headers (column names).\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n df\n Dataframe from which column names are to be cleaned.\n case\n The desired case style of the column name.\n - 'snake': 'column_name'\n - 'kebab': 'column-name'\n - 'camel': 'columnName'\n - 'pascal': 'ColumnName'\n - 'const': 'COLUMN_NAME'\n - 'sentence': 'Column name'\n - 'title': 'Column Name'\n - 'lower': 'column name'\n - 'upper': 'COLUMN NAME'\n\n (default: 'snake')\n replace\n Values to replace in the column names.\n - {'old_value': 'new_value'}\n\n (default: None)\n remove_accents\n If True, strip accents from the column names.\n\n (default: True)\n report\n If True, output the summary report. Otherwise, no report is outputted.\n\n (default: True)\n\n Examples\n --------\n Clean column names by converting the names to camel case style, removing accents,\n and correcting a mispelling.\n\n >>> df = pd.DataFrame({'FirstNom': ['Philip', 'Turanga'], 'lastName': ['Fry', 'Leela'], \\\n'Téléphone': ['555-234-5678', '(604) 111-2335']})\n >>> clean_headers(df, case='camel', replace={'Nom': 'Name'})\n Column Headers Cleaning Report:\n 2 values cleaned (66.67%)\n firstName lastName telephone\n 0 Philip Fry 555-234-5678\n 1 Turanga Leela (604) 111-2335\n \"\"\"\n if case not in CASE_STYLES:\n raise ValueError(\n f\"case {case} is invalid, it needs to be one of {', '.join(c for c in CASE_STYLES)}\"\n )\n\n # Store original column names for creating cleaning report\n orig_columns = df.columns.astype(str).tolist()\n\n if replace:\n df = df.rename(columns=lambda col: _replace_values(col, replace))\n\n if remove_accents:\n df = df.rename(columns=_remove_accents)\n\n df = df.rename(columns=lambda col: _convert_case(col, case))\n\n df.columns = _rename_duplicates(df.columns, case)\n\n # Count the number of changed column names\n new_columns = df.columns.astype(str).tolist()\n cleaned = [1 if new_columns[i] != orig_columns[i] else 0 for i in range(len(orig_columns))]\n stats = {\"cleaned\": sum(cleaned)}\n\n # Output a report describing the result of clean_headers\n if report:\n _create_report(stats, len(df.columns))\n\n return df\n\n\ndef _convert_case(name: Any, case: str) -> Any:\n \"\"\"\n Convert case style of a column name.\n\n Parameters\n ----------\n name\n Column name.\n case\n The desired case style of the column name.\n \"\"\"\n if name in NULL_VALUES:\n name = \"header\"\n\n if case in {\"snake\", \"kebab\", \"camel\", \"pascal\", \"const\"}:\n words = _split_strip_string(str(name))\n else:\n words = _split_string(str(name))\n\n if case == \"snake\":\n name = \"_\".join(words).lower()\n elif case == \"kebab\":\n name = \"-\".join(words).lower()\n elif case == \"camel\":\n name = words[0].lower() + \"\".join(w.capitalize() for w in words[1:])\n elif case == \"pascal\":\n name = \"\".join(w.capitalize() for w in words)\n elif case == \"const\":\n name = \"_\".join(words).upper()\n elif case == \"sentence\":\n name = \" \".join(words).capitalize()\n elif case == \"title\":\n name = \" \".join(w.capitalize() for w in words)\n elif case == \"lower\":\n name = \" \".join(words).lower()\n elif case == \"upper\":\n name = \" \".join(words).upper()\n\n return name\n\n\ndef _split_strip_string(string: str) -> List[str]:\n \"\"\"\n Split the string into separate words and strip punctuation\n and special characters.\n \"\"\"\n string = re.sub(r\"[!()*+\\,\\-./:;<=>?[\\]^_{|}~]\", \" \", string)\n string = re.sub(r\"[\\'\\\"\\`]\", \"\", string)\n\n return re.sub(r\"([A-Z][a-z]+)\", r\" \\1\", re.sub(r\"([A-Z]+|[0-9]+|\\W+)\", r\" \\1\", string)).split()\n\n\ndef _split_string(string: str) -> List[str]:\n \"\"\"\n Split the string into separate words.\n \"\"\"\n string = re.sub(r\"[\\-_]\", \" \", string)\n\n return re.sub(r\"([A-Z][a-z]+)\", r\" \\1\", re.sub(r\"([A-Z]+)\", r\"\\1\", string)).split()\n\n\ndef _replace_values(name: Any, mapping: Dict[str, str]) -> Any:\n \"\"\"\n Replace string values in the column name.\n\n Parameters\n ----------\n name\n Column name.\n mapping\n Maps old values in the column name to the new values.\n \"\"\"\n if name in NULL_VALUES:\n return name\n\n name = str(name)\n for old_value, new_value in mapping.items():\n # If the old value or the new value is not alphanumeric, add underscores to the\n # beginning and end so the new value will be parsed correctly for _convert_case()\n new_val = (\n rf\"{new_value}\" if old_value.isalnum() and new_value.isalnum() else rf\"_{new_value}_\"\n )\n name = re.sub(rf\"{old_value}\", new_val, name, flags=re.IGNORECASE)\n\n return name\n\n\ndef _remove_accents(name: Any) -> Any:\n \"\"\"\n Return the normal form for a Unicode string name using canonical\n decomposition.\n \"\"\"\n if not isinstance(name, str):\n return name\n\n return normalize(\"NFD\", name).encode(\"ascii\", \"ignore\").decode(\"ascii\")\n\n\ndef _rename_duplicates(names: pd.Index, case: str) -> Any:\n \"\"\"\n Rename duplicated column names to append a number at the end.\n \"\"\"\n if case in {\"snake\", \"const\"}:\n sep = \"_\"\n elif case in {\"camel\", \"pascal\"}:\n sep = \"\"\n elif case == \"kebab\":\n sep = \"-\"\n else:\n sep = \" \"\n\n names = list(names)\n counts: Dict[str, int] = {}\n\n for i, col in enumerate(names):\n cur_count = counts.get(col, 0)\n if cur_count > 0:\n names[i] = f\"{col}{sep}{cur_count}\"\n counts[col] = cur_count + 1\n\n return names\n\n\ndef _create_report(stats: Dict[str, int], ncols: int) -> None:\n \"\"\"\n Describe what was done in the cleaning process.\n \"\"\"\n print(\"Column Headers Cleaning Report:\")\n if stats[\"cleaned\"] > 0:\n nclnd = stats[\"cleaned\"]\n pclnd = round(nclnd / ncols * 100, 2)\n print(f\"\\t{nclnd} values cleaned ({pclnd}%)\")\n","repo_name":"sfu-db/dataprep","sub_path":"dataprep/clean/clean_headers.py","file_name":"clean_headers.py","file_ext":"py","file_size_in_byte":6805,"program_lang":"python","lang":"en","doc_type":"code","stars":1813,"dataset":"github-code","pt":"81"} +{"seq_id":"40062761378","text":"from django.urls import path\r\nfrom . import views\r\n\r\nurlpatterns = [\r\n # *router.urls,\r\n path('api/test-user', views.test_user),\r\n path('api/call-propreports', views.call_propreports),\r\n path('api/reports/create-trade-tag', views.create_trade_tag),\r\n path('api/reports/get-trade-tags', views.get_trade_tags),\r\n]\r\n","repo_name":"darinvi/ATP","sub_path":"backend/reports/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26364748757","text":"from django.conf.urls import url\nfrom bingeworthy import views\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^login/$', views.user_login, name='login'),\n url(r'^signup/$', views.user_login, name='signup'),\n url(r'^logout/$', views.user_logout, name='logout'),\n url(r'^contact-us/$', views.contact_us, name='contact_us'),\n url(r'^faq/$', views.faq, name='faq'),\n url(r'^about-us/$', views.about, name=\"about\"),\n url(r'^search-results/$', views.search_results, name=\"search_results\"),\n url(r'^genres/$', views.genres, name=\"genres\"),\n url(r'^platforms/$', views.platforms, name=\"platforms\"),\n url(r'^shows/$', views.shows, name=\"shows\"),\n url(r'^shows/top/$', views.shows_top, name=\"shows_top\"),\n url(r'^shows/all/$', views.shows_all, name=\"shows_all\"),\n url(r'^shows/(?P[\\w\\-]+)/$', views.shows_show, name=\"shows_show\"),\n url(r'^shows/(?P[\\w\\-]+)/make-review/$', views.make_review, name=\"make_review\"),\n url(r'^user/profile/(?P[\\w\\-]+)/$', views.user_profile, name=\"user_profile\"),\n # only using /profile/ because without it, you can't access the my-account\n # as it automatically takes you to the page of the user my-account\n url(r'^user/my-account/$', views.my_account, name='my_account'),\n url(r'^reviews/$', views.show_reviews, name='reviews'),\n]\n","repo_name":"AJPOD/Bingeworthy","sub_path":"bingeworthy/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"7647690924","text":"#!/usr/bin/env python3\nimport argparse\nimport calendar\nimport datetime\nimport os\nfrom pathlib import Path\nimport subprocess\nfrom typing import Optional, cast\n\nfrom syspath import get_git_root\n\n\nDESCRIPTION = \"This script generates a new note or reference\"\nEDITOR = os.environ.get('EDITOR', 'vim')\n\n\ndef get_reference_slug() -> Optional[str]:\n parser = argparse.ArgumentParser(description=DESCRIPTION)\n parser.add_argument('reference_slug', nargs='?', type=str)\n params = parser.parse_args()\n reference_slug = cast(Optional[str], params.reference_slug)\n return reference_slug\n\n\ndef generate_note(reference_name: Optional[str]) -> Path:\n current_time = datetime.datetime.utcnow()\n\n if reference_name:\n note_filename = f'{reference_name}.md'\n note_path = get_git_root() / 'app' / 'reference' / note_filename\n slug = reference_name\n else:\n note_filename = current_time.strftime('%Y%m%d-%H%M.md')\n note_path = get_git_root() / 'app' / 'notes' / note_filename\n slug = 'slug'\n\n timestamp = calendar.timegm(current_time.utctimetuple())\n note = f\"title\\n\\n{slug}\\n\\n{timestamp}\\n\\nnote\\n\"\n\n if note_path.exists():\n raise ValueError(f'note at {note_path} already exists')\n with open(note_path, 'w') as note_handle:\n note_handle.write(note)\n return note_path\n\n\ndef edit_note(note_path: Path) -> None:\n subprocess.call([EDITOR, str(note_path)])\n\n\nif __name__ == '__main__':\n reference_name = get_reference_slug()\n note_path = generate_note(reference_name)\n print('Note template written to %s' % note_path)\n edit_note(note_path)\n","repo_name":"albertyw/albertyw.com","sub_path":"bin/newpost.py","file_name":"newpost.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"19401312450","text":"#!/usr/bin/env python\n\n# Verbatim script for managing the SUMO (support.mozilla.com) project. More information at\n# https://wiki.mozilla.org/Verbatim\n#\n# [Note that this is a historical example only, as SUMO no longer uses tiki.]\n#\n# Author: Wil Clouser \n\nimport os\nimport logging\n\nfrom django.conf import settings\n\nfrom translate.convert import tiki2po, po2tiki\n\n\ndef initialize(projectdir, languagecode):\n \"\"\"The first parameter is the path to the project directory. It's up to this\n script to know any internal structure of the directory\"\"\"\n\n # Temporary code - projectdirs come from pootle with sumo/ab_CD form; we need just the former part\n # extract project root from projectdir\n projectroot = os.path.join(settings.PODIRECTORY, os.path.split(projectdir)[0])\n\n # Temporary code. Language codes come from pootle with underscores right now; they need to be dashes.\n languagecode = languagecode.replace(\"_\", \"-\")\n\n # Find the files we're working with\n tikifile = os.path.join(projectroot, languagecode, 'language.php')\n pofile = os.path.join(projectroot, languagecode, 'language.po')\n\n # Build our combined file\n logging.info(u\"Initializing %s to %s\", tikifile, pofile)\n tiki2po.converttiki(open(tikifile, \"r\"), open(pofile, \"w\"))\n\ndef precommit(committedfile, author, message):\n if os.path.basename(committedfile) == \"language.po\":\n\n # Get the files we'll be using\n tikifile = os.path.join(os.path.dirname(committedfile), 'language.php')\n\n # Update tikifile with new strings\n logging.info(u\"Converting po to tiki: %s > %s\", committedfile, tikifile)\n po2tiki.convertpo(open(committedfile, \"r\"), open(tikifile, \"w\"))\n\n # We want to commit language.php\n return [tikifile]\n return []\n\ndef postcommit(committedfile, success):\n if os.path.basename(committedfile) == \"language.po\":\n\n # Get the files we'll be using\n tikifile = os.path.join(os.path.dirname(committedfile), 'language.php')\n\n # Recreate .po with any new strings in tikifile\n logging.info(u\"Converting tiki to po: %s > %s\", tikifile, committedfile)\n tiki2po.converttiki(open(tikifile, \"r\"), open(committedfile, \"w\"))\n\ndef preupdate(updatedfile):\n if os.path.basename(updatedfile) == \"language.po\":\n\n # Get the files we'll be using\n tikifile = os.path.join(os.path.dirname(updatedfile), 'language.php')\n\n # We want to update language.php\n logging.info(u\"Updating %s\", tikifile)\n return tikifile\n return \"\"\n\ndef postupdate(updatedfile):\n if os.path.basename(updatedfile) == \"language.po\":\n\n # Get the files we'll be using\n tikifile = os.path.join(os.path.dirname(updatedfile), 'language.php')\n\n # Recreate .po with any new strings in tikifile\n logging.info(u\"Converting tiki to po: %s > %s\", tikifile, updatedfile)\n tiki2po.converttiki(open(tikifile, \"r\"), open(updatedfile, \"w\"))\n","repo_name":"moriaty2013/pootle","sub_path":"pootle/scripts/example-hooks/sumo.py","file_name":"sumo.py","file_ext":"py","file_size_in_byte":2977,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"74362076743","text":"import json\n\nattr = ['GoodForMeal', 'HairSpecializesIn', 'DogsAllowed', 'Ambience', 'CoatCheck', 'Music', 'BikeParking', 'GoodForDancing', 'Open24Hours', 'RestaurantsTakeOut', 'RestaurantsReservations', 'AgesAllowed', 'Smoking', 'WiFi', 'AcceptsInsurance', 'BYOB', 'DietaryRestrictions', 'RestaurantsAttire', 'RestaurantsPriceRange2', 'WheelchairAccessible', 'Alcohol', 'BYOBCorkage', 'OutdoorSeating', 'RestaurantsDelivery', 'HasTV', 'RestaurantsCounterService', 'ByAppointmentOnly', 'Corkage', 'RestaurantsGoodForGroups', 'BestNights', 'HappyHour', 'RestaurantsTableService', 'GoodForKids', 'BusinessParking', 'BusinessAcceptsCreditCards', 'NoiseLevel', 'BusinessAcceptsBitcoin', 'DriveThru', 'Caters']\nnested = ['GoodForMeal', 'HairSpecializesIn', 'Ambience', 'Music', 'DietaryRestrictions', 'BestNights', 'BusinessParking']\nnormal = list(set(attr)-set(nested))\nnormalAttr = {}\nfor i in normal:\n normalAttr[i] = set()\n\ndata = [json.loads(line) for line in open('../raw.nosync/yelp_dataset/business.json', 'r')]\n\nfor d in data:\n if d['attributes'] is not None:\n for k, v in d['attributes'].items():\n if v is not None and k in normal:\n normalAttr[k].add(str(v))\n\nwith open('../raw.nosync/normalAttr.txt', 'w') as f:\n for k, v in normalAttr.items():\n f.write(k + ': ')\n for i in v:\n f.write(i + ' ')\n f.write('\\n')","repo_name":"Tiankai-Jiang/Yelp-Dataset","sub_path":"01_preprocessing/15_businessAttributesCheck.py","file_name":"15_businessAttributesCheck.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"3739016015","text":"from pyramid.configuration import Configurator\nfrom som.models import get_root\n\ndef main(global_config, **settings):\n \"\"\" This function returns a Pyramid WSGI application.\n \"\"\"\n config = Configurator(root_factory=get_root, settings=settings)\n config.begin()\n config.add_view('som.views.my_view',\n context='som.models.MyModel',\n renderer='som:templates/mytemplate.pt')\n config.add_static_view('static', 'som:static')\n config.end()\n return config.make_wsgi_app()\n\n","repo_name":"bloodbare/Som","sub_path":"src/Som/som/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9466467263","text":"import os\n\nclass GenerarClase():\n #Switch para saber si el archivo esta abierto\n sw=False\n\n #Limpio el archivo del texto\n #nombre (String): Nombre del archivo a limpiar\n def LimpiarArchivo(self, nombre):\n estado=False\n if self.sw == False:\n archivo=open(nombre+\".py\",\"w\")\n self.sw = True\n if self.sw:\n print(\"Limpiar archivo :Se activo el switch luego de abrirlo\")\n #si se lograr cerrar el archivo devuelve True\n estadoR=self.Cerrar(archivo)\n \n if estadoR:\n estado=True\n print(\"Limpiar archivo :Se limpio el archivo\")\n else:\n print(\"Limpiar archivo: No se pudo limpiar el archivo\")\n return estado\n\n #Cierro el archivo\n #archivo (archivo:IO): variable tipo archivo #archivo=open()\n #Devuelve un estado(boolean):\n #False: No se pudo cerrar el archivo\n #True: Se cerro el archivo\n def Cerrar(self, archivo):\n estado=False\n if self.sw==True:\n archivo.close()\n self.sw=False\n estado=True\n print(\"Cerrar: Se cerro el correctamente el archivo\")\n else:\n print(\"Cerrar: No se cerro el archivo ya que no estaba abierto\")\n return estado\n \n\n #Obtengo el dato NombreC que se usara para nombrar la clase\n def ObtenerNombre(self, dic):\n nombre=\"\"\n try:\n nombre=str(dic['NombreC'])\n except Exception:\n nombre=\"Error\"\n return nombre\n \n #Genero un archivo con terminación .py\n #dic (dict): Se envia un diccionario con los datos\n #Orden de los datos={'NombreC':'Rol', 'atributo':['nombre1','nombre2','nombre3','nombre4'],'tipo':['texto','numero','texto','numero','texto','numero']}\n #retorna una boolean:\n #False: El archivo no pudo ser creado\n #True: El archivo se creo\n def CrearArchivo(self, dic):\n nombre=self.ObtenerNombre(dic)\n estado=False\n try:\n #creo el archivo\n os.system(\"PowerShell 'Borrar' > \"+nombre+\".py\")\n #Limpio el archivo y devuelve True si todo salio bien\n estado=self.LimpiarArchivo(nombre)\n #Lo cierro para guardar los cambios, el sw para a False\n #self.Cerrar(archivo)\n if self.sw==False:\n print(\"Correcto, Esta cerrado el archivo\")\n #edito el archivo\n archivo=self.AbrirArchivo(nombre)\n archivo.write(\"class \"+nombre+\"(): \\n\")\n #cierra el archivo, ¿para qué?, no sé\n archivo.close()\n estado=True\n print(\"Se creo correctamente el archivo \"+ nombre)\n except Exception as e:\n estado=False\n print(str(e))\n return estado\n\n #Calcula el indice de las clases a crear\n #dic (diccionario)\n def Indice(self, dic):\n indice=len(dic['atributo'])\n return indice\n \n \n\n #Se utiliza para darle valores a las variables\n #dic (diccionario)\n #respuesta(lista): devulve una lista\n def Inicializar(self,dic):\n respuesta=[]\n nIndice=self.Indice(dic)\n for indice in range(0,nIndice):\n tipo=dic['tipo'][indice]\n atributo=dic['atributo'][indice]\n if tipo =='texto':\n respuesta.append(atributo+\" = ''\")\n if tipo =='numero':\n respuesta.append(atributo+\" = 0\")\n if tipo =='boolean':\n respuesta.append(atributo+\" = False\")\n if tipo == 'float':\n respuesta.append(atributo+\" = 0.0\")\n return respuesta\n\n\n def InicializarInit(self,dic):\n respuesta=\"\"\n numero=0\n nIndice=self.Indice(dic)\n for indice in range(0,nIndice):\n tipo=dic['tipo'][indice]\n atributo=dic['atributo'][indice]\n if numero<=indice:\n if tipo =='texto':\n respuesta += \",\"+ atributo+\" = ''\"\n if tipo =='numero':\n respuesta += \",\"+ atributo+\" = 0\"\n if tipo =='boolean':\n respuesta += \",\"+ atributo+\" = False\"\n if tipo == 'float':\n respuesta += \",\"+ atributo+\" = 0.0\"\n numero+=1\n else:\n if tipo =='texto':\n respuesta += atributo+\" = ''\"\n if tipo =='numero':\n respuesta += atributo+\" = 0\"\n if tipo =='boolean':\n respuesta += atributo+\" = False\"\n if tipo == 'float':\n respuesta += atributo+\" = 0.0\"\n numero+=1\n return respuesta\n\n def CrearSelf(self,dic):\n respuesta=[]\n numero=0\n nIndice=self.Indice(dic)\n for indice in range(0,nIndice):\n atributo=dic['atributo'][indice]\n rSelf=\"\"\n rSelf=\" self.\"+atributo+\" = \"+atributo\n respuesta.append(rSelf)\n numero+=1\n return respuesta\n\n #Genera la función __init__\n #respuesta (String): la variable contiene los atributos inicializados y devuelve el titulo\n def CrearInit(self,respuesta):\n salida = ''\n salida =\" def __init__(self\"+respuesta+\"): \\n\"\n return salida\n\n #Agrega al documento el texto\n #texto (String): devuelve un estado boolean con la respuesta\n #devuelve un estado: \n #False: No se pudo escribir en el archivo\n #True: Se escribio en el archivo\n def AgregarTexto(self, texto, nombre):\n estado=False\n archivo=self.AbrirArchivo(nombre)\n archivo.writelines(texto)\n estado=True\n return estado\n\n #Agrego los self.valor=valor con un for\n def AgregarTextoS(self, nombre,respuesta):\n estado=False\n estadoR=False\n try:\n numero=0\n archivo=self.AbrirArchivo(nombre)\n for Self in range(0,len(respuesta)):\n archivo.writelines(respuesta[numero]+\"\\n\")\n print(\"Contador :\"+str(numero))\n print(\"Cantidad de :\"+str(Self))\n if numero==len(respuesta)-1:\n estadoR=self.Cerrar(archivo)\n if estadoR:\n estado=True\n numero +=1\n except Exception as e:\n print(str(e))\n return estado\n\n \n\n #Abre el archivo con el nombre\n #nombre (String): nombre del archivo para poder abrirlo\n #devuelve un estado: \n #False: No se pudo abrir el archivo\n #True: Se abrio el archivo\n def AbrirArchivo(self, nombre):\n archivo=open(\"log.txt\",\"a\")\n self.sw=True\n estadoR=self.Log(archivo, nombre)\n if estadoR:\n #if self.sw == False:\n archivo=open(nombre+\".py\",\"a\")\n self.sw=True\n print(\"Se logro abrir el archivo para modificarlo\")\n return archivo\n\n #Funcion principal para generar las clases\n #Variable dic tipo Diccionario\n #retorna una estado tipo boolean:\n #True: Se genero la clase\n #False: No se genero la clase\n def Principal(self, dic):\n estado=False\n estadoR=self.CrearArchivo(dic)\n try:\n if estadoR:\n print(\"ETAPA 1/6: ARCHIVO CREADO\")\n estadoInicializarInit=False\n estadoInicializarInit=self.InicializarInit(dic)\n if estadoInicializarInit !=\"\":\n print(\"ETAPA 2/6: CREADO TEXTO PARA INIT\")\n estadoInit=\"\"\n estadoInit=self.CrearInit(estadoInicializarInit)\n if estadoInit !=\"\":\n print(\"ETAPA 3/6 INIT GENERADO\")\n nombre=self.ObtenerNombre(dic)\n if nombre !=\"\":\n print(\"ETAPA 4/6 NOMBRE DEL ARCHIVO HA SIDO RECUPERADO\")\n estadoAgregar=False\n estadoAgregar=self.AgregarTexto(estadoInit,nombre)\n if estadoAgregar:\n print(\"ETAPA 5/6 INIT COMPLETO\")\n estadoCrearSelf=[]\n estadoCrearSelf=self.CrearSelf(dic)\n print(estadoCrearSelf)\n print(\"CANTIDAD DE SELF CREADOS: \"+str(len(estadoCrearSelf)))\n print(\"ETAPA 6/6 SELF GENERADOS CON :\"+str(len(estadoCrearSelf)))\n if len(estadoCrearSelf)>0:\n estadoAgregarS=False\n estadoAgregarS=self.AgregarTextoS(nombre,estadoCrearSelf)\n if estadoAgregarS:\n print(\"ETAPA 7/6 SELF INSERTADOS EN EL TEXTO\")\n listadoSet=[]\n listadoSet=self.CrearSet(dic)\n print(\"Cantidad Set a generar :\"+str(len(listadoSet)))\n estadoR=False\n estadoR=self.AgregarTextoSet(nombre,listadoSet)\n if estadoR:\n print(\"ETAPA 7/6 SE CREARON LAS FUNCIONES SET\")\n listaGet=self.CrearGet(dic)\n if len(listaGet)>0:\n estadoR=self.AgregarTextoGet(nombre,listaGet)\n if estadoR:\n print(\"ETAPA 8/8 SE INSERTARON LAS FUNCIONES GET\")\n listAtributo=self.getAtributos(dic)\n if len(listAtributo)>0:\n metodoStr=self.CrearSTR(dic)\n if len(metodoStr)>0:\n estadoR=self.AgregarTextoStr(nombre,metodoStr)\n if estadoR:\n print(\"ETAPA 9/9 SE INSERTO LA FUNCION STR\")\n \n\n estado=True\n\n \n #print(\"Se creo el archivo\")\n except Exception as e:\n print(str(e))\n return estado\n\n #Guardare un log de los errores para su posterior comprension\n def Log(self, archivo, nombre):\n estado=False\n estadoR=self.Cerrar(archivo)\n if estadoR:\n estado=True\n else:\n texto=\"No se logro abrir el archivo con nombre: \"+nombre\n self.AgregarError(texto,archivo)\n return estado\n\n\n def AgregarError(self, texto, archivo):\n archivo.write(texto)\n #return estado\n #Genera los set\n def CrearSet(self, dic):\n respuesta=[]\n numero=0\n nIndice=self.Indice(dic)\n for indice in range(0,nIndice):\n atributo=dic['atributo'][indice]\n #if indice==0:\n respuesta.append(\" \")\n nombreM=\"\"\n nombreM=str(atributo.capitalize())\n respuesta.append(\" def set\"+nombreM+\"(self, \"+atributo+\"):\")\n respuesta.append(\" self.\"+atributo+\" = \"+atributo)\n numero+=1\n #else:\n #respuesta.append(\" self.\"+atributo+\" = \"+atributo) \n numero+=1\n return respuesta\n ######\n #Verion modificada para crear los def setNombre..\n #Agrego los self.valor=valor con un for\n def AgregarTextoSet(self, nombre,respuesta):\n estado=False\n estadoR=False\n try:\n numero=0\n archivo=self.AbrirArchivo(nombre)\n for Self in range(0,len(respuesta)):\n archivo.writelines(respuesta[numero]+\"\\n\")\n print(\"Contador :\"+str(numero))\n print(\"Cantidad de :\"+str(Self))\n if numero==len(respuesta)-1:\n estadoR=self.Cerrar(archivo)\n if estadoR:\n estado=True\n numero +=1\n except Exception as e:\n print(str(e))\n return estado\n\n #Agrega las funciones get al archivo\n def AgregarTextoGet(self, nombre,respuesta):\n estado=False\n estadoR=False\n try:\n numero=0\n archivo=self.AbrirArchivo(nombre)\n for Self in range(0,len(respuesta)):\n archivo.writelines(respuesta[numero]+\"\\n\")\n print(\"Contador :\"+str(numero))\n print(\"Cantidad de :\"+str(Self))\n if numero==len(respuesta)-1:\n estadoR=self.Cerrar(archivo)\n if estadoR:\n estado=True\n numero +=1\n except Exception as e:\n print(str(e))\n return estado\n\n #Genera el texto la funcion\n def CrearGet(self, dic):\n respuesta=[]\n numero=0\n nIndice=self.Indice(dic)\n for indice in range(0,nIndice):\n atributo=dic['atributo'][indice]\n #if indice==0:\n respuesta.append(\" \")\n nombreM=\"\"\n nombreM=str(atributo.capitalize())\n respuesta.append(\" def get\"+nombreM+\"(self):\")\n respuesta.append(\" return self.\"+atributo)\n numero+=1 \n return respuesta \n\n #Genera la funcion __str__\n def CrearSTR(self, dic):\n respuesta=[]\n numero=0\n retorno = \"\"\n nIndice=self.Indice(dic)\n litadoAtributo=self.getAtributos(dic)\n try:\n if len(litadoAtributo)>0:\n retorno=\" return str(self.\"\n respuesta.append(\" \")\n respuesta.append(\" def __str__(self):\")\n for indice in range(0,nIndice):\n atributo=litadoAtributo[indice]\n if indice==0:\n retorno+=\"\"+atributo+\")\"\n if indice > 0 max_samples:\n nr_samples = max_samples\n if sample_func == \"uniform\":\n samples_index_target = np.random.randint(len(target), size=nr_samples)\n samples_index_source = np.random.randint(len(source), size=nr_samples)\n return source[samples_index_source], target[samples_index_target]\n if sample_func == \"random\" or sample_func == \"multires\":\n samples_index_target = np.random.choice(len(target), nr_samples, replace=False)\n samples_index_source = np.random.choice(len(source), nr_samples, replace=False)\n return source[samples_index_source], target[samples_index_target]\n elif sample_func == \"inforeg\":\n src_pcd = o3d.geometry.PointCloud()\n tar_pcd = o3d.geometry.PointCloud()\n\n src_pcd.points = o3d.utility.Vector3dVector(source)\n tar_pcd.points = o3d.utility.Vector3dVector(target)\n\n source_kps = o3d.geometry.keypoint.compute_iss_keypoints(src_pcd)\n target_kps = o3d.geometry.keypoint.compute_iss_keypoints(tar_pcd)\n\n source = np.asarray(source_kps.points)\n target = np.asarray(target_kps.points)\n\n if len(source) > len(target):\n return source[np.random.randint(len(source), size=len(target)), :], target\n else:\n return source, target[np.random.randint(len(target), size=len(source)), :]\n\n\n###### 3. transform point cloud with R and t\ndef transform(a1, R, t):\n a_1 = np.ones((a1.shape[1]+1, a1.shape[0]))\n a_1[0:3, :] = a1.T\n # # homogeneous transformation\n T = np.identity(4)\n T[0:3, 0:3] = R\n T[0:3, 3] = t\n new_a1 = np.dot(T, a_1)\n return new_a1[0:3, :].T\n\n###### 4. Find the closest point for each point in A1 based on A2 using brute-force approach\ndef closest_points(a1, a2):\n a1_size = a1.shape[0]\n closest_points = []\n distances = []\n for i in range(a1_size):\n a1_point = a1[i]\n distances = list(np.sum((a2 - a1_point) ** 2, axis=1))\n min_dist = min(distances)\n cp_index = distances.index(min_dist)\n distances.append(min_dist)\n closest_points.append(cp_index)\n return closest_points, np.array(distances)\n\n###### 5. Calculate RMS\ndef RMS_calc(distances):\n return np.sqrt(np.mean(distances))\n\n###### 6. Refine R and t using SVD\ndef svd(a1, a2):\n # find center\n # translate points to their centroids\n center_a1 = np.mean(a1, axis=0)\n center_a2 = np.mean(a2, axis=0)\n a1_centered = a1 - center_a1\n a2_centered = a2 - center_a2\n\n # rotation matrix\n H = np.dot(a1_centered.T, a2_centered)\n U, S, V_T = np.linalg.svd(H)\n R = np.dot(V_T.T, U.T)\n\n # translation\n t = center_a2.T - np.dot(R, center_a1.T)\n\n return R, t\n\n###### 7. Kd-Tree\ndef kd_tree(source, target):\n tree = KDTree(np.c_[target])\n indices, distances = [], []\n for i in source:\n dd, ii = tree.query(i, k=1)\n distances.append(dd)\n indices.append(ii)\n\n return indices, distances\n\n###### 8 Z-Buffer\ndef eucliDist(A,B):\n # calculate the distance of two points\n return np.sqrt(sum(np.power((A - B), 2)))\ndef z_buffer(source, target, H=20, W=20, m=5):\n xx = [min(min(target.T[0]), min(source.T[0])), max(max(target.T[0]), max(source.T[0]))]\n yy = [min(min(target.T[1]), min(source.T[1])), max(max(target.T[1]), max(source.T[1]))]\n\n # build the projection area\n x = np.linspace(xx[0], xx[1], H)\n y = np.linspace(yy[0], yy[1], W)\n # X, Y = np.meshgrid(x, y)\n z_buffer_A1=np.zeros((H,W))\n # suppose each pixel in projection plane is d(i,j) = np.inf\n z_buffer_A1+=np.inf\n location_A1 = np.zeros((H,W,2))\n location_A1+=np.nan\n z_buffer_A2=np.zeros((H,W))\n # suppose each pixel in projection plane is d(i,j) = np.inf\n z_buffer_A2+=np.inf\n location_A2 = np.zeros((H,W,2))\n location_A2+=np.nan\n\n dist_idx = np.zeros((H,W))\n # print('Project points of source into the source buffer, using the nearest point and record the geometry information')\n for x_s,y_s,z_s in source.T[:]:\n for i, j in product(range(H), range(W)):\n dist_idx[i,j] = eucliDist(np.array([x_s,y_s]), np.array([x[i], y[j]]))\n x_d, y_d = np.where(dist_idx==np.min(dist_idx))\n if z_s < z_buffer_A1[x_d,y_d]:\n z_buffer_A1[x_d,y_d] = z_s\n location_A1[x_d,y_d,0] = x_s\n location_A1[x_d,y_d,1] = y_s\n\n dist_idx = np.zeros((H,W))\n # print('Project points of target into the target buffer, using the nearest point and record the geometry information')\n for x_t,y_t,z_t in target.T[:]:\n for i, j in product(range(H), range(W)):\n dist_idx[i,j] = eucliDist(np.array([x_t,y_t]), np.array([x[i], y[j]]))\n x_d, y_d = np.where(dist_idx==np.min(dist_idx))\n if z_t < z_buffer_A2[x_d,y_d]:\n z_buffer_A2[x_d,y_d] = z_t\n location_A2[x_d,y_d,0] = x_t\n location_A2[x_d,y_d,1] = y_t\n # the last step map each point in source buffer with the target point in target buffer in m*m window using nearest distance\n distances = []\n new_source = np.array([])\n new_target = np.array([])\n dist_idx = np.zeros((H,W))\n dist_idx += np.inf\n for h, w in product(range(H), range(W)):\n if np.isnan(location_A1[h,w][0]):\n continue\n for h2, w2 in product(range(h-round(m/2), h+round(m/2)), range(w-round(m/2), w+round(m/2))):\n if h2<0 or h2>=H or w2<0 or w2>=W or np.isnan(location_A2[h2,w2][0]):\n continue\n else:\n dist_idx[h2,w2] = eucliDist(np.array(location_A1[h,w]), np.array(location_A2[h2,w2]))\n if np.min(dist_idx) != np.inf:\n min_dist = np.min(dist_idx)\n x_t, y_t = np.where(dist_idx==min_dist)\n distances.append(min_dist)\n if new_source.size == 0:\n new_source = np.array([location_A1[h,w][0], location_A1[h,w][1], z_buffer_A1[h,w]]).reshape(3,1)\n new_target = np.array([location_A2[int(x_t),int(y_t)][0], location_A2[int(x_t),int(y_t)][1], z_buffer_A2[int(x_t),int(y_t)]]).reshape(3,1)\n else:\n new_source = np.hstack((new_source, np.array([location_A1[h,w][0], location_A1[h,w][1], z_buffer_A1[h,w]]).reshape(3,1)))\n new_target = np.hstack((new_target, np.array([location_A2[int(x_t),int(y_t)][0], location_A2[int(x_t),int(y_t)][1], z_buffer_A2[int(x_t),int(y_t)]]).reshape(3,1)))\n return new_source, new_target, distances\n\n\ndef icp(source, target, sample_func, method, nr_samples=None):\n n_points = 100\n if sample_func == \"uniform\" or sample_func == \"random\":\n sampled_source, sampled_target = sample(source, target, nr_samples, sample_func)\n elif sample_func == 'multires':\n sampled_source, sampled_target = sample(source, target, n_points, sample_func)\n elif sample_func == 'inforeg':\n sampled_source, sampled_target = sample(source, target, nr_samples, sample_func)\n else:\n sampled_source, sampled_target = source, target\n\n src = copy.deepcopy(sampled_source)\n tar = copy.deepcopy(sampled_target)\n\n N = 5\n max_iterations = 40\n prev_error = np.inf\n max_samples = min([len(source), len(target)])\n\n R_total, t_total = [], [] \n\n for i in (range(max_iterations)):\n # find the nearest neighbours between the current source and destination points\n if method == 'zbuffer':\n src_x, tar_x, distances = z_buffer(src.T, tar.T)\n try:\n R, t = svd(src_x.T, tar_x.T)\n except:\n R, t = svd(sampled_source, src)\n return R, t\n # check error\n error = RMS_calc(distances)\n else:\n if method == 'kdtree':\n indices, distances = kd_tree(src, tar)\n # if method == 'zbuffer':\n # indices, distances = z_buffer(src, tar)\n else:\n indices, distances = closest_points(src, tar)\n\n # check error\n error = RMS_calc(distances)\n\n if converged(error, prev_error) and sample_func != \"multires\":\n R, t = svd(sampled_source, src)\n return R, t\n\n R, t = svd(src, tar[indices])\n R_total.append(R) \n t_total.append(t) \n # compute the transformation between the current source and nearest destination points\n if sample_func == \"random\":\n sampled_source, sampled_target = sample(source, target, nr_samples, sample_func)\n src = copy.deepcopy(sampled_source)\n tar = copy.deepcopy(sampled_target)\n for ind, r in reversed(list(enumerate(R_total))): \n src = transform(src, r, t_total[ind]) \n elif sample_func == \"multires\" and np.abs(prev_error - error) < 1e-3 and N > 2:\n N -= 1\n nr_samples = int(max_samples / N)\n sampled_source, sampled_target = sample(source, target, nr_samples, sample_func)\n src = copy.deepcopy(sampled_source)\n tar = copy.deepcopy(sampled_target)\n\n for ind, r in reversed(list(enumerate(R_total))): \n src = transform(src, r, t_total[ind]) \n else:\n # update the current source\n src = transform(src, R, t)\n \n prev_error = error\n R, t = svd(sampled_source, src)\n return R, t\n\n\ndef main():\n source, target = open_bunny_data()\n sample_func = \"multires\"\n method = \"kdtree\"\n nr_samples = 6000\n R, t = icp(source.T, target.T, sample_func, method, nr_samples)\n transformed_source = transform(source.T, R, t)\n vis_pcd = o3d.geometry.PointCloud()\n vis1_pcd = o3d.geometry.PointCloud()\n vis2_pcd = o3d.geometry.PointCloud()\n vis_pcd.points = o3d.utility.Vector3dVector(source.T)\n vis1_pcd.points = o3d.utility.Vector3dVector(transformed_source)\n vis2_pcd.points = o3d.utility.Vector3dVector(target.T)\n vis_pcd.paint_uniform_color([1, 0, 1])\n vis1_pcd.paint_uniform_color([0, 0, 1])\n vis2_pcd.paint_uniform_color([1, 1, 0])\n o3d.visualization.draw_geometries([vis_pcd, vis1_pcd, vis2_pcd])\n\nif __name__ == \"__main__\":\n main()\n\n\n############################\n# Additional Improvements #\n############################","repo_name":"HarryZhangHH/ComputerVision-1and2","sub_path":"ICP/em_icp.py","file_name":"em_icp.py","file_ext":"py","file_size_in_byte":12064,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"17402626310","text":"#https://www.hackerrank.com/challenges/designer-pdf-viewer/problem?isFullScreen=true&h_r=next-challenge&h_v=zen\r\n\r\n#!/bin/python3\r\n\r\nimport math\r\nimport os\r\nimport random\r\nimport re\r\nimport sys\r\n\r\n#\r\n# Complete the 'designerPdfViewer' function below.\r\n#\r\n# The function is expected to return an INTEGER.\r\n# The function accepts following parameters:\r\n# 1. INTEGER_ARRAY h\r\n# 2. STRING word\r\n#\r\n\r\ndef designerPdfViewer(h, word):\r\n # Write your code here\r\n word_heights = []\r\n letter_heights = {chr(ord('a') + i): h[i] for i in range(len(h))}\r\n for j in word:\r\n word_heights.append(letter_heights[j])\r\n return (max(word_heights) * len(word))\r\n\r\nif __name__ == '__main__':\r\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\r\n h = list(map(int, input().rstrip().split()))\r\n word = input()\r\n result = designerPdfViewer(h, word)\r\n fptr.write(str(result) + '\\n')\r\n fptr.close()\r\n","repo_name":"pskirank/Python-Practice","sub_path":"Designer_PDF_Viewer.py","file_name":"Designer_PDF_Viewer.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22228217310","text":"#!/usr/bin/python3\n# -*- coding:utf-8 -*-\n\nimport tensorflow as tf\nimport numpy as np\nfrom tqdm.notebook import tqdm\n\n\n\ndef load_image(image_path):\n\timg = tf.io.read_file(image_path)\n\timg = tf.image.decode_jpeg(img, channels=3)\n\timg = tf.image.resize(img, (299, 299))\n\timg = tf.keras.applications.inception_v3.preprocess_input(img)\n\treturn img, image_path\n\ndef data_augment(image, image_path=None):\n\n\timage = tf.image.random_flip_left_right(image)\n\n\treturn image, image_path\n\n\ndef get_inception_model():\n\timage_model = tf.keras.applications.InceptionV3(include_top=False,weights='imagenet')\n\timage_features_extract_model = tf.keras.Model(image_model.input, image_model.layers[-1].output)\n\n\treturn image_features_extract_model\n\n\ndef extract_features(dataset, image_features_extract_model):\n\t'''\n\tINPUT: tf dataset containing image ids \n\tOUTPUT : saves extracted features to numpy files\n\t'''\n\tfor img, path in tqdm(dataset):\n\t\tbatch_features = image_features_extract_model(img)\n\t\tbatch_features = tf.reshape(batch_features,\n\t\t\t\t\t\t\t (batch_features.shape[0], -1, batch_features.shape[3]))\n\n\t\tfor bf, p in zip(batch_features, path):\n\t\t\tpath_of_feature = p.numpy().decode(\"utf-8\")\n\t\t\tnp.save(path_of_feature, bf.numpy())\n\n\treturn True\n\n\n# Load the numpy files\ndef map_func(img_name, cap):\n\timg_tensor = np.load(img_name.decode('utf-8')+'.npy')\n\treturn img_tensor, cap","repo_name":"anna-tch/Image_captioning","sub_path":"preprocess_images.py","file_name":"preprocess_images.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36848361924","text":"import typing\nimport config\nimport requests\nimport sys\nimport datetime\nimport enum\nimport argparse\n\nTEXT_SEARCH: str = \"tech\"\n\n# Cambridge, MA\nLAT: float = 42.38\nLON: float = -71.13\n\n# measured in miles\nRADIUS: float = 3\n\n# minimum number of days before the start of event\nSTART_DAY: int = 1\n# maximum number of days before start of event\nEND_DAY: int = 8\n\n# earliest time of day that the event can start\nSTART_TIME: datetime.time = datetime.time(17, 00, 00)\n# latest time of day that the evnet can start\nEND_TIME: datetime.time = datetime.time(20, 00, 00)\n\n# total number of results to print\nMAX_DISPLAYED: int = 5\n\n\nclass SortMethod(enum.Enum):\n UNSORTED = 0\n RSVP_COUNT = 1\n\n\n# how the output will be sorted\nSORT_METHOD: SortMethod = SortMethod.RSVP_COUNT\n\n\ndef sort_rsvp_count(event_list):\n event_list.sort(key=lambda x: x['yes_rsvp_count'], reverse=True)\n\n\ndef sort_unsorted():\n pass\n\n\nsort_method_dict: typing.Dict = {\n SortMethod.UNSORTED: sort_unsorted,\n SortMethod.RSVP_COUNT: sort_rsvp_count,\n }\n\n\ndef format_request_string(search_term: str) -> str:\n dt_today = datetime.datetime.now()\n\n dt_start: datetime.datetime = dt_today + datetime.timedelta(days=START_DAY)\n dt_start = dt_start.replace(hour=START_TIME.hour, minute=START_TIME.minute)\n\n dt_end: datetime.datetime = dt_today + datetime.timedelta(days=END_DAY)\n dt_end = dt_end.replace(hour=END_TIME.hour, minute=END_TIME.minute)\n\n request_string: str = (\n f\"https://api.meetup.com/find/upcoming_events?\"\n f\"key={config.meetup_key}&\"\n f\"lon={LON}&lat={LAT}&radius={RADIUS}&\"\n f\"end_date_rage={dt_start.strftime('%Y-%m-%dT%H:%M:%S')}&\"\n f\"start_date_range={dt_end.strftime('%Y-%m-%dT%H:%M:%S')}&\"\n f\"fields=plain_text_no_images_description\"\n )\n if search_term is not None:\n request_string += f\"&text={search_term}\"\n return request_string\n\n\ndef format_header_string(city_dict: typing.Dict) -> str:\n header_string: str = (\n f\"\\033[93mCITY:\\033[0m {city_dict['city']}, \"\n f\"\\033[93mSTATE:\\033[0m {city_dict['state']}, \"\n f\"\\033[93mCOUNTRY:\\033[0m {city_dict['country']}, \"\n f\"\\033[93mZIP:\\033[0m {city_dict['zip']}, \"\n f\"\\033[93mTOTAL MEMBERS:\\033[0m {city_dict['member_count']}\"\n )\n return header_string\n\n\ndef dict_access(event_dict: typing.Dict, dict_key: str, def_val: str) -> str:\n try:\n return event_dict[dict_key]\n except KeyError:\n return def_val\n\n\ndef format_event_string(event_dict: typing.Dict) -> str:\n event_desc_key: str = 'plain_text_no_images_description'\n event_desc: str = dict_access(event_dict, event_desc_key, \"None provided\")\n fee_text: str = dict_access(event_dict, 'fee', \"No fee\")\n rsvp_limit_text: str = dict_access(event_dict, 'rsvp_limit', \"-1\")\n format_string: str = (\n f\"\\033[93mGROUP:\\033[0m {event_dict['group']['name']}\\n\"\n f\"\\033[93mEVENT:\\033[0m {event_dict['name']}\\n\"\n f\"\\033[93mDATE:\\033[0m {event_dict['local_date']}\\n\"\n f\"\\033[93mTIME:\\033[0m {event_dict['local_time']}\\n\"\n f\"\\033[93mATTENDANCE COUNT:\\033[0m\"\n f\" {event_dict['yes_rsvp_count']}/{rsvp_limit_text}\\n\"\n f\"\\033[93mFEE:\\033[0m {fee_text}\\n\"\n f\"\\033[93mDESCRIPTION:\\033[0m {event_desc}\\n\"\n )\n return format_string\n\n\ndef main() -> int:\n parser: argparse.ArgumentParser = argparse.ArgumentParser()\n parser.add_argument(\"-search\", type=str, default=TEXT_SEARCH,\n help=\"search term to filter events\")\n parser.add_argument(\"-all\", action='store_true',\n help=\"ignores any search term\")\n parser.add_argument(\"-max\", type=int, default=MAX_DISPLAYED,\n help=\"maximum number of events to display\")\n parser.add_argument(\"-sort\", type=SortMethod, default=SORT_METHOD,\n help=\"sort method used to display results\")\n args: argparse.Namespace = parser.parse_args()\n\n search_string: typing.Optional = args.search\n if args.all:\n search_string = None\n req_string: str = format_request_string(search_string)\n response: typing.Any = requests.get(req_string)\n response_dict: typing.Dict = response.json()\n\n header_string: str = format_header_string(response_dict['city'])\n separator: str = \"*\" * 80\n sys.stdout.write(\"%s\\n%s\\n\" % (header_string, separator))\n\n event_list: typing.List = response_dict['events']\n sort_method_dict[args.sort](event_list)\n for count, event in enumerate(event_list):\n if count == args.max:\n break\n event_string: str = format_event_string(event)\n sys.stdout.write(\"%s\\n%s\\n\" % (event_string, separator))\n return 0\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"kellencataldo/scripts","sub_path":"meetups.py","file_name":"meetups.py","file_ext":"py","file_size_in_byte":4863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7268263908","text":"from nltk.corpus import stopwords\nimport string, re\n\n\n_letters = list(string.ascii_lowercase)\n_numbers = [str(i) for i in range(0, 10)]\n_banned = [\n \"’\", \"’\", \"“\", \"—\", \"”\", \"‘\", \"–\", '#', '[', '/', '(', ')', \n '{', '}', '\\\\', '[', ']', '|', '@', ',', ';', '+', '-'\n]\n_banned = ''.join(_banned) + string.punctuation + ''.join(_numbers)\n_boilerplate = [\n ' ', 'https', 'http', 'www', '’s', '―', '/', 'playback', \n 'get', 'mr', 'mrs', 'ms', 'dr', 'prof', 'news', 'report', \n 'unsubscribe', 'they', 'must', 'share', 'that', 'view', 'hide', \n 'copy', 'something', 'enlarge', 'reprint', 'read', '_', 'videos', \n 'autoplay', 'watched', 'press', '’ve', 'toggle', 'around', 'the', \n 's.', 'said', 'here©', 'ad', '#', 'andhis', 'click', 'r', 'device', \n 'contributed', 'advertisement', 'the washington', '&', 'follow', \n 'copyright', 'mrs.', 'photo', 'to', 'also', 'times', 'for', 'however', \n 'fox', 'this', 'copyright ©', 'ofs', 'just', 'wait', 'n’t', 'told', \n 'unsupported', 'i', 'caption', 'ms.', '’m', 'paste', '’re', 'replay', \n 'photos', 'mr.', '©', 'skip', 'watch', '2018', 'cut', 'llc', 'more', \n 'post', 'embed', 'blog', 'b.', 'associated', 'permission'\n]\n_stop_list = set(stopwords.words('english') + _boilerplate + _letters)\n_translation_table = dict.fromkeys(map(ord, _banned), ' ')\n_translation_table\n\ndef clean_text(text_str):\n text_str = text_str.lower()\n text_str = text_str.translate(_translation_table)\n text_str = re.sub(' +', ' ', text_str)\n text_str = ' '.join([word for word in text_str.split() if word not in _stop_list])\n return text_str","repo_name":"slottoliver/isds_exam_code","sub_path":"user_library/news_scrape/clean.py","file_name":"clean.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3490540803","text":"# Standard library imports\nimport contextlib\n\n# Third-party imports\nimport aiohttp\nimport asyncio\nimport pytest\n\n# Local imports\nfrom uplink.clients import (\n AiohttpClient,\n aiohttp_,\n register,\n io,\n)\n\n\n@contextlib.contextmanager\ndef _patch(obj, attr, value):\n if obj is not None:\n old_value = getattr(obj, attr)\n setattr(obj, attr, value)\n yield\n if obj is not None:\n setattr(obj, attr, old_value)\n\n\n@pytest.fixture\ndef aiohttp_session_mock(mocker):\n return mocker.Mock(spec=aiohttp.ClientSession)\n\n\nclass AsyncMock(object):\n def __init__(self, result=None):\n self._result = result\n self._calls = 0\n\n async def __call__(self, *args, **kwargs):\n self._calls += 1\n f = asyncio.Future()\n f.set_result(self._result)\n return f\n\n @property\n def called(self):\n return self._calls > 0\n\n\nclass TestAiohttp(object):\n def test_init_when_aiohttp_is_not_installed(self):\n with _patch(aiohttp_, \"aiohttp\", None):\n with pytest.raises(NotImplementedError):\n AiohttpClient()\n\n def test_init_with_session_None(self, mocker):\n mocker.spy(AiohttpClient, \"_create_session\")\n AiohttpClient(kwarg1=\"value\")\n AiohttpClient._create_session.assert_called_with(kwarg1=\"value\")\n\n def test_get_client(self, aiohttp_session_mock):\n client = register.get_client(aiohttp_session_mock)\n assert isinstance(client, aiohttp_.AiohttpClient)\n\n @pytest.mark.asyncio\n async def test_request_send(self, mocker, aiohttp_session_mock):\n # Setup\n expected_response = mocker.Mock()\n\n async def request(*args, **kwargs):\n return expected_response\n\n aiohttp_session_mock.request = request\n client = aiohttp_.AiohttpClient(aiohttp_session_mock)\n\n # Run\n response = await client.send((1, 2, {}))\n\n # Verify\n assert response == expected_response\n\n @pytest.mark.asyncio\n async def test_callback(self, mocker, aiohttp_session_mock):\n # Setup\n expected_response = mocker.Mock(spec=aiohttp_.aiohttp.ClientResponse)\n expected_response.text = AsyncMock()\n\n async def request(*args, **kwargs):\n return expected_response\n\n aiohttp_session_mock.request = request\n client = aiohttp_.AiohttpClient(aiohttp_session_mock)\n\n # Run\n async def call():\n response = await client.send((1, 2, {}))\n response = await client.apply_callback(lambda x: 2, response)\n return response\n\n value = await call()\n\n # Verify\n assert value == 2\n assert expected_response.text.called\n\n def test_wrap_callback(self, mocker):\n # Setup\n c = AiohttpClient()\n mocker.spy(c, \"_sync_callback_adapter\")\n\n # Run: with callback that is not a coroutine\n def callback(*_):\n pass\n\n c.wrap_callback(callback)\n\n # Verify: Should wrap it\n c._sync_callback_adapter.assert_called_with(callback)\n\n # Run: with coroutine callback\n async def awaitable_callback():\n pass\n\n assert c.wrap_callback(awaitable_callback) is awaitable_callback\n\n @pytest.mark.asyncio\n async def test_threaded_callback(self, mocker):\n def callback(response):\n return response\n\n # Mock response.\n response = mocker.Mock(spec=aiohttp_.aiohttp.ClientResponse)\n response.text = AsyncMock()\n\n # Run\n new_callback = aiohttp_.threaded_callback(callback)\n return_value = await new_callback(response)\n\n # Verify\n assert response.text.called\n assert return_value == response\n\n # Run: Verify with callback that returns new value\n def callback(*_):\n return 1\n\n new_callback = aiohttp_.threaded_callback(callback)\n value = await new_callback(response)\n assert value == 1\n assert response.text.called\n\n # Run: Verify with response that is not ClientResponse (should not be wrapped)\n response = mocker.Mock()\n await new_callback(response)\n assert not response.text.called\n\n def test_threaded_coroutine(self):\n async def coroutine():\n return 1\n\n threaded_coroutine = aiohttp_.ThreadedCoroutine(coroutine)\n\n # Run -- should block\n response = threaded_coroutine()\n\n # Verify\n assert response == 1\n\n def test_threaded_response(self, mocker):\n async def coroutine():\n return 1\n\n def not_a_coroutine():\n return 2\n\n response = mocker.Mock()\n response.coroutine = coroutine\n response.not_coroutine = not_a_coroutine\n threaded_response = aiohttp_.ThreadedResponse(response)\n\n # Run\n threaded_coroutine = threaded_response.coroutine\n return_value = threaded_coroutine()\n\n # Verify\n assert isinstance(threaded_coroutine, aiohttp_.ThreadedCoroutine)\n assert return_value == 1\n assert threaded_response.not_coroutine is not_a_coroutine\n\n @pytest.mark.asyncio\n async def test_create(self, mocker):\n session_cls_mock = mocker.patch(\"aiohttp.ClientSession\")\n positionals = [1]\n keywords = {\"keyword\": 2}\n\n # Run: Create client\n client = aiohttp_.AiohttpClient.create(*positionals, **keywords)\n\n # Verify: session hasn't been created yet.\n assert not session_cls_mock.called\n\n # Run: Get session\n await client.session()\n\n # Verify: session created with args\n session_cls_mock.assert_called_with(*positionals, **keywords)\n\n @pytest.mark.asyncio\n async def test_close_auto_created_session(self, mocker):\n # Setup\n import gc\n\n mock_session = mocker.Mock(spec=aiohttp.ClientSession)\n session_cls_mock = mocker.patch(\"aiohttp.ClientSession\")\n session_cls_mock.return_value = mock_session\n\n positionals = [1]\n keywords = {\"keyword\": 2}\n\n # Run: Create client\n client = aiohttp_.AiohttpClient.create(*positionals, **keywords)\n\n # Run: Get session\n await client.session()\n\n # Verify: session created with args\n session_cls_mock.assert_called_with(*positionals, **keywords)\n\n # Verify: session closed on garbage collection\n del client\n gc.collect()\n session_cls_mock.return_value.close.assert_called_with()\n\n def test_exceptions(self):\n import aiohttp\n\n exceptions = aiohttp_.AiohttpClient.exceptions\n\n with pytest.raises(exceptions.BaseClientException):\n raise aiohttp.ClientError()\n\n with pytest.raises(exceptions.BaseClientException):\n # Test polymorphism\n raise aiohttp.InvalidURL(\"invalid\")\n\n with pytest.raises(exceptions.ConnectionError):\n raise aiohttp.ClientConnectionError()\n\n with pytest.raises(exceptions.ConnectionTimeout):\n raise aiohttp.ClientConnectorError.__new__(\n aiohttp.ClientConnectorError\n )\n\n with pytest.raises(exceptions.ServerTimeout):\n raise aiohttp.ServerTimeoutError()\n\n with pytest.raises(exceptions.SSLError):\n raise aiohttp.ClientSSLError.__new__(aiohttp.ClientSSLError)\n\n with pytest.raises(exceptions.InvalidURL):\n raise aiohttp.InvalidURL(\"invalid\")\n\n def test_io(self):\n assert isinstance(aiohttp_.AiohttpClient.io(), io.AsyncioStrategy)\n","repo_name":"prkumar/uplink","sub_path":"tests/unit/test_aiohttp_client.py","file_name":"test_aiohttp_client.py","file_ext":"py","file_size_in_byte":7543,"program_lang":"python","lang":"en","doc_type":"code","stars":1036,"dataset":"github-code","pt":"81"} +{"seq_id":"30071240408","text":"def isPhoneNumber(text): # 415-555-1234\n if len(text) != 12:\n return False # not phone number-sized\n for i in range(0, 3):\n if not text[i].isdecimal():\n return False # no area code\n if text[3] != '-':\n return False # missing dash\n for i in range(4, 7):\n if not text[i].isdecimal():\n return False\n if text[7] != '-':\n return False # missing dash\n for i in range(8, 12):\n if not text[i].isdecimal():\n return False\n return True\n\nmessage = 'Call me 412-222-3212 tomarrow, or at 123-343-2332 so that i can know that you are coming'\nfoundNumber = False\nfor i in range(len(message)):\n chunk = message[i: i+12]\n if isPhoneNumber(chunk):\n print('Phone number found: ' +chunk)\n foundNumber = True\nif not foundNumber:\n print('Could not find any phone numbers.')\n\n \n \n \n \n","repo_name":"TenzinTsundue/Python-micro-course","sub_path":"phoneNumberCheck.py","file_name":"phoneNumberCheck.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42914798735","text":"import numpy as np\r\nimport scipy as sp\r\nfrom random import *\r\nfrom matplotlib import pyplot as plt \r\nimport time\r\nstart_time = time.time()\r\n\r\n\r\nTau=100 \r\n#r=(random()) # Random number generator\r\n\r\n# Define storage arrays\r\nN=[]\r\nT=[]\r\n\r\n#Monte -Carlo\r\n\r\n\r\nSimul=10000 \r\nfor j in range(Simul):\r\n\r\n #initial Conditions since photon ejected from the centre\r\n #x,y,z=position\r\n #R=Radius\r\n #ns=number of scatters\r\n #t=time \r\n \r\n \r\n \r\n R,x,y,z,ns,t=(0,0,0,0,0,0)\r\n\r\n \r\n \r\n \r\n \r\n\r\n for i in range(10000):\r\n phi=(uniform(0, 360)) #random value of cos Theta between -1 and 1\r\n theta=(uniform(-1, 1)) #azimuthal angle between 0 and 2 pi\r\n \r\n #Positions friom random scattering \r\n x=x + r*np.cos(phi)\r\n y=y + r*np.sin(phi)\r\n z=z + r*theta\r\n R=(x**2 + y**2 + z**2)**(1/2) #Escape radius\r\n ns=ns+1 \r\n t=t+r #Time to photon escape\r\n \r\n N_Tau=r/R*Tau #New tau dependent on R\r\n r=(1-np.exp(-N_Tau))/N_Tau # N_tau step length\r\n if R>=2:\r\n break\r\n \r\n T.append(t)\r\n N.append(ns)\r\n#Plotting the Distributions\r\nfig=plt.figure(figsize=(15,10))\r\nplt.hist(T,bins=150, label='Tau = 100')\r\n\r\n\r\nplt.legend()\r\nplt.xlabel('T') \r\nplt.ylabel('Distrib')\r\nplt.show()\r\nplt.savefig('distributiontime100random')\r\n\r\nprint (time.time() - start_time)\r\n","repo_name":"dennzp1/Python-stuff","sub_path":"DiffusionLSE.py","file_name":"DiffusionLSE.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72659999306","text":"\nimport os\n\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.nn.parallel import DistributedDataParallel as DDP\n\n# On Windows platform, the torch.distributed package only\n# supports Gloo backend, FileStore and TcpStore.\n# For FileStore, set init_method parameter in init_process_group\n# to a local file. Example as follow:\n# init_method=\"file:///f:/libtmp/some_file\"\n# dist.init_process_group(\n# \"gloo\",\n# rank=rank,\n# init_method=init_method,\n# world_size=world_size)\n# For TcpStore, same way as on Linux.\nfrom opendp.network.odometer import assert_release_binary\nfrom opendp.network.odometer_reconstruction import ReconstructionPrivacyOdometer\n\nassert_release_binary()\n\n\ndef torch_mpc_setup(rank, world_size):\n os.environ['MASTER_ADDR'] = '127.0.0.1'\n os.environ['MASTER_PORT'] = '12355'\n os.environ['GLOO_SOCKET_IFNAME'] = 'lo0'\n\n # initialize the process group\n dist.init_process_group(\"gloo\", rank=rank, world_size=world_size)\n\n\ndef cleanup():\n dist.destroy_process_group()\n\n\nclass ToyModel(nn.Module):\n def __init__(self):\n super(ToyModel, self).__init__()\n self.net1 = nn.Linear(10, 10)\n self.relu = nn.ReLU()\n self.net2 = nn.Linear(10, 5)\n\n def forward(self, x):\n return self.net2(self.relu(self.net1(x)))\n\n\ndef demo_basic(rank, world_size):\n print(f\"Running basic DDP example on rank {rank}.\")\n torch_mpc_setup(rank, world_size)\n\n torch.manual_seed(0)\n # create model and move it to GPU with id rank\n model = ToyModel() # .to(rank)\n model = DDP(model) # , device_ids=[rank]\n\n odometer = ReconstructionPrivacyOdometer(step_epsilon=.1)\n # fill grads with `rank`\n odometer._set_fill(constant=rank)\n model = odometer.make_tracked_view(model)\n\n loss_fn = nn.MSELoss()\n optimizer = optim.SGD(model.parameters(), lr=0.001)\n\n optimizer.zero_grad()\n outputs = model(torch.randn(20, 10))\n labels = torch.randn(20, 5) # .to(rank)\n loss_fn(outputs, labels).backward()\n optimizer.step()\n\n mean = sum(range(world_size)) / world_size\n assert all(v == mean for v in model.module.net1.bias.grad.tolist())\n\n cleanup()\n\n\ndef test_multi():\n for world_size in range(3, 6):\n print(f\"Trying DDP with world_size {world_size}\")\n mp.spawn(demo_basic,\n args=(world_size,),\n nprocs=world_size,\n join=True)\n print(f\"DDP synchronized tensor grads successfully with {world_size} workers\")\n\n\nif __name__ == \"__main__\":\n test_multi()\n","repo_name":"opendp/opendp-pytorch","sub_path":"test/test_ddp.py","file_name":"test_ddp.py","file_ext":"py","file_size_in_byte":2611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39707210536","text":"#!/usr/bin/python\n\nimport gdax\nimport json\nimport logging\nimport configparser\nfrom datetime import datetime, timedelta, date\n\nconfig = configparser.ConfigParser(inline_comment_prefixes='#')\nconfig.read('gdax.config')\n\nbank_acct_id = config['DEFAULT']['bank_acct_id']\nbuy_amount_btcusd = float(config['DEFAULT']['buy_amount_btcusd'])\nbuy_amount_ethusd = float(config['DEFAULT']['buy_amount_ethusd'])\nfund_usd_threshold = float(config['DEFAULT']['fund_usd_threshold'])\nfund_usd_amount = float(config['DEFAULT']['fund_usd_amount'])\nbtc_withdrawal_threshold = float(config['DEFAULT']['btc_withdrawal_threshold'])\nbtc_withdrawal_address = config['DEFAULT']['btc_withdrawal_address']\nrecent_deposit_max = float(config['DEFAULT']['recent_deposit_max'])\n\nlogging.basicConfig(filename=config['DEFAULT']['logfile'],level=logging.DEBUG)\n\nclient = gdax.AuthenticatedClient(config['DEFAULT']['key'], config['DEFAULT']['secret'], config['DEFAULT']['passphrase'])\n\naccounts = client.get_accounts()\n\nlogging.debug( json.dumps(accounts, indent=4) )\n\n#Loop through accounts to get an account for each currency\nfor account in accounts:\n if account[\"currency\"] == 'USD' :\n usd_account = account\n elif account[\"currency\"] == 'BTC' :\n btc_account = account\n elif account[\"currency\"] == 'ETH' :\n eth_account = account\n\ntransfers = client.get_account_transfers(usd_account[\"id\"])\n\nlogging.debug( json.dumps(transfers, indent=4) )\n\n#Loop through transfers from the USD account, and sum the pending deposits.\nall_deposits = 0\nrecent_deposit_sum = 0\nfor transfer in transfers:\n if transfer[\"details\"].get(\"coinbase_payment_method_type\") == \"ach_bank_account\" and transfer[\"type\"] == \"deposit\" and datetime.strptime( transfer[\"created_at\"], \"%Y-%m-%d %H:%M:%S.%f+00\" ) > datetime.utcnow() - timedelta(days=30) :\n recent_deposit_sum += float(transfer[\"amount\"])\n\n if transfer[\"type\"] == \"deposit\" and transfer[\"completed_at\"] is None:\n all_deposits += float(transfer[\"amount\"])\n\n\n#deposit\nif recent_deposit_sum >= recent_deposit_max:\n logging.debug(\"Last 30 days: $%d \\t Limit: $%d\" % (recent_deposit_sum, recent_deposit_max) )\n logging.info(\"You've depoisted more than your monthly limit. Not depositing more.\")\n\nelif float(usd_account[\"available\"]) + all_deposits < fund_usd_threshold :\n logging.info(\"Current balance of $%d and Pending Transfers of $%d is less than $%d. Adding more funds +$%d \" % (float(usd_account[\"available\"]), all_deposits, fund_usd_threshold, fund_usd_amount) )\n ret = client.deposit( fund_usd_amount, \"USD\", bank_acct_id )\n logging.debug( json.dumps(ret, indent=4) )\nelse:\n logging.info(\"Not depositing more funds.\" ) \n\n\n#withdraw\nif float(btc_account[\"available\"]) > btc_withdrawal_threshold:\n logging.info(\"BTC balance is greater than %sBTC. Withdrawing all (%s)\" % (btc_withdrawal_threshold, btc_account[\"available\"]) )\n client.crypto_withdraw(btc_account[\"available\"], \"BTC\", btc_withdrawal_address)\n\n#current price BTC\ncurprice = float(client.get_product_ticker(\"BTC-USD\")[\"bid\"])\nbuy_price_btc = curprice\nlogging.debug(\"buy_price_btc: %s\" % buy_price_btc)\nbuy_size_btc = \"%.8f\" % max(buy_amount_btcusd / buy_price_btc, 0.001)\nlogging.debug(\"buy_size_btc: %s\" % buy_size_btc)\n\n#current price ETH\ncurprice = float(client.get_product_ticker(\"ETH-USD\")[\"bid\"])\nbuy_price_eth = curprice\nlogging.debug(\"buy_price_eth: %s\" % buy_price_eth)\nbuy_size_eth = \"%.6f\" % max(buy_amount_ethusd / buy_price_eth, 0.01)\nlogging.debug(\"buy_size: %s\" % buy_size_eth)\n\n#buy\nif float(usd_account[\"available\"]) >= buy_amount_btcusd + buy_amount_ethusd :\n logging.info(\"Buying $%d of BTC\" % buy_amount_btcusd)\n ret = client.buy(type=\"limit\", price=buy_price_btc, size=buy_size_btc, post_only=True, product_id= \"BTC-USD\")\n#logging.debug( json.dumps(ret, indent=4) )\n\n logging.info(\"Buying $%d of ETH\" % buy_amount_ethusd)\n ret = client.buy(type=\"limit\", price=buy_price_eth, size=buy_size_eth, post_only=True, product_id= \"ETH-USD\")\n# logging.debug( json.dumps(ret, indent=4) )\n\nelse :\n logging.info(\"Not enough USD to buy BTC\")\n\n","repo_name":"dantidote/tothemoon_coinbasepro","sub_path":"tothemoon.py","file_name":"tothemoon.py","file_ext":"py","file_size_in_byte":4048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32710625446","text":"#알아볼 수 없는 번호를 0\n\nfrom collections import defaultdict\n\ndef solution(lottos, win_nums):\n dic = defaultdict(int)\n count = 1\n for i in range(6, -1, -1):\n dic[str(i)] = count\n if count <6:\n count +=1\n min = 0\n zero_count = 0\n for i in range(len(lottos)):\n if lottos[i] == 0:\n zero_count +=1\n if lottos[i] in win_nums:\n min +=1\n\n max = min + zero_count\n return [dic[str(max)], dic[str(min)]]\n\nprint(solution([44, 1, 0, 0, 31, 25],\t[31, 10, 45, 1, 6, 19]))\n\nprint(solution([7,8,9,10,11,12], [1, 2, 3, 4, 5, 6]))","repo_name":"yec3168/algorithm","sub_path":"programmers/lv_1/로또의 최고 순위와 최저 순위.py","file_name":"로또의 최고 순위와 최저 순위.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20862797515","text":"#!/usr/bin/env python3\r\n\r\nfrom math import *\r\nimport math\r\nfrom operator import *\r\nimport sys\r\n\r\ndebut = 0.5 \r\ndebut2 = 0 \r\nfin = 1\r\nmax_iter = 200\r\nparam = [3,-1,0,6,-5,1,8,0.2]\r\nprecision = 10 ** - (param[6])\r\nopt=param[0]\r\nprint ('Pour bonus, exemple avec equation de degré 5)')\r\nprint ('precision: ', (precision))\r\n\r\ndef f(x):\r\n return param[7]*x**5 + param[5]*x**4+param[4]*x**3+param[3]*x**2+param[2]*x+param[1]\r\n\r\ndef df(x):\r\n return 5*param[7]*x**4 + 4*param[5]*x**3+3*param[4]*x**2+2*param[3]*x+param[2]\r\n\r\ndef bisection(debut,fin,precision):\r\n \r\n print('\\n*Bisection method*')\r\n print ( 'x = ', str(float(debut)))\r\n if (f(debut)*f(fin))>0:\r\n print ('No solution between limits') \r\n return\r\n x0 = debut\r\n x1 = fin\r\n for n in range(0,max_iter) :\r\n x2 = (x0 + x1) / 2\r\n print ( 'x = ',round(x2,6))\r\n if f(x0) * f(x2) < 0 :\r\n x1 = x2\r\n if abs(f(x1)) < precision :\r\n return \r\n else:\r\n x0 = x2\r\n return None\r\ndef newton(debut, fin, precision):\r\n xn = debut\r\n if (f(debut)*f(fin))>0:\r\n print ('No solution between limits') \r\n return\r\n\r\n for n in range(0,max_iter):\r\n fxn = f(xn)\r\n print ('x= ',round(xn,12))\r\n #print ((\"x = %.12f\")%(xn))\r\n if abs(fxn) < precision:\r\n print('Found solution after',n,'iterations.')\r\n return xn\r\n dfxn = df(xn)\r\n if dfxn == 0:\r\n print('Zero derivative. No solution found.')\r\n return None\r\n xn = xn - fxn/dfxn\r\n print('Exceeded maximum iterations. No solution found.')\r\n return None\r\n \r\n\r\ndef secant(debut, fin, precision):\r\n print('\\n*Secant method*')\r\n a_n = debut\r\n b_n = fin\r\n print ('x= ',a_n)\r\n for n in range(0,max_iter):\r\n \r\n m_n = a_n - f(a_n)*(b_n - a_n)/(f(b_n) - f(a_n))\r\n f_m_n = f(m_n)\r\n pt=round(m_n,8)\r\n #print (pt)\r\n print ('x= ',round(m_n,8))\r\n #print ((\"x = %.8f\")%(m_n))\r\n if abs(f_m_n) 0.0:\r\n print('Given guess values do not bracket the root.')\r\nelse:\r\n if opt==1:\r\n bisection(debut,fin,precision)\r\n elif opt==2:\r\n newton(debut, fin ,precision)\r\n elif opt==3:\r\n secant(debut, fin, precision)\r\n else:\r\n print((opt),' is an invalid option')\r\n \r\n\r\n \r\n\r\n\r\n\r\n","repo_name":"juliette0704/juliette0704","sub_path":"math_python/semestre 1/105torus/bonus/bonus.py","file_name":"bonus.py","file_ext":"py","file_size_in_byte":2942,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"10213249009","text":"import torch\nimport torch.nn.functional as F\nfrom losses.smooth_l1_loss import smooth_l1_loss\nfrom model.init_weights import *\nfrom utils.IoU_calculate.IoU_assign import IoU_assigner\nfrom functools import partial\nfrom six.moves import map, zip\nfrom utils.nms.bbox_nms import multiclass_nms\nfrom .bbox import Bbox\n\ndef pairs(x):\n assert type(x) != str\n return [x,x]\n\ndef multi_apply(func, *args, **kwargs):\n \"\"\"Apply function to a list of arguments.\n\n Note:\n This function applies the ``func`` to multiple inputs and\n map the multiple outputs of the ``func`` into different\n list. Each list contains the same type of outputs corresponding\n to different inputs.\n\n Args:\n func (Function): A function that will be applied to a list of\n arguments\n\n Returns:\n tuple(list): A tuple containing multiple list, each list contains \\\n a kind of returned results by the function\n \"\"\"\n pfunc = partial(func, **kwargs) if kwargs else func\n map_results = map(pfunc, *args)\n return tuple(map(list, zip(*map_results))) #转置矩阵后划分\n\ndef image_to_level(target, num_anchors_level):\n \"\"\"\n Turn [anchor_img0, anchor_img1..] to [anchor_level0, anchor_level2..]\n List length=batch size List length=level length\n (num_anchors,4) (batch_size, num_level_anchors, 4)\n :param target:\n :param num_anchors_lever:\n :return:\n \"\"\"\n target = torch.stack(target, 0)\n level_targets = []\n start = 0\n for n in num_anchors_level:\n end = start + n\n # level_targets.append(target[:, start:end].squeeze(0))\n level_targets.append(target[:, start:end])\n start = end\n return level_targets\n\nclass ssd_header(nn.Module):\n\n default_inplanes = [512, 1024, 512,256,256,256]\n\n def __init__(self,\n num_classes,\n anchor_cfg={'strides':[8,16,32,64,100,300],\n 'ratios':[[2],[2,3],[2,3],[2,3],[2,],[2]],\n 'scale_range':[0.2, 0.9],\n 'input_size':300},\n bbox_cfg={'means': (0., 0., 0., 0.), 'stds': (0.1, 0.1, 0.2, 0.2)}\n ,\n neg_pos_rate=3\n ):\n super(ssd_header, self).__init__()\n self.num_classes = num_classes\n self.anchor_cfg = anchor_cfg\n assert len(self.default_inplanes)==len(self.anchor_cfg['strides'])\n self.num_multi_feats = len(self.default_inplanes)\n self.neg_pos_rate = neg_pos_rate\n\n # anchor config\n self.anchor_strides = self.anchor_cfg['strides']\n self.anchor_ratios = self.anchor_cfg['ratios']\n self.anchor_input_size = self.anchor_cfg['input_size']\n self.featmap_size = [38, 19, 10, 5, 3, 1]\n\n # gen anchors\n self.base_len = [int(0.1 * self.anchor_input_size)]\n small, big = self.anchor_cfg['scale_range']\n self.step = (big - small) / (self.num_multi_feats - 2)\n self.step_ = int(self.step * self.anchor_input_size)\n for i in range(int(small*100), int(big*100+1), int(self.step*100)):\n self.base_len.append(int(i * self.anchor_input_size / 100))\n gen_ratios = []\n gen_scales = []\n for j, item in enumerate(self.base_len):\n try:\n gen_scale = torch.Tensor([1.,\n np.sqrt(self.base_len[j+1] /\n self.base_len[j])])\n except:\n gen_scale = torch.Tensor([1.,\n np.sqrt((self.base_len[j]+self.step_) /\n self.base_len[j])])\n gen_scales.append(gen_scale\n )\n gen_ratio = [1.,]\n for k in self.anchor_ratios[j]:\n gen_ratio.extend([1/k, k])\n gen_ratios.append(\n torch.Tensor(gen_ratio)\n )\n self.gen_ratios = gen_ratios\n self.gen_scales = gen_scales\n self.base_anchors = self.gen_base_anchors()\n self.num_anchors = [len(num_anchor) for num_anchor in self.base_anchors ]\n\n # bbox config\n self.bbox_menas = bbox_cfg['means']\n self.bbox_stds = bbox_cfg['stds']\n self.bbox = Bbox(means=self.bbox_menas, stds=self.bbox_stds)\n\n # forward convs\n cla_convs = []\n loc_convs = []\n for i in range(len(self.default_inplanes)):\n cla_convs.append(nn.Conv2d(self.default_inplanes[i],\n (self.num_classes+1)*self.num_anchors[i],\n kernel_size=3,padding=1))\n loc_convs.append(nn.Conv2d(self.default_inplanes[i],\n 4*self.num_anchors[i],\n kernel_size=3,padding=1))\n self.loc_convs = nn.ModuleList(loc_convs)\n self.cla_convs = nn.ModuleList(cla_convs)\n\n @property\n def num_base_anchors(self):\n return [base_anchors.size(0) for base_anchors in self.base_anchors]\n\n def forward(self,feats):\n cla_scores = []\n loc_results = []\n for feat, cla_layer, loc_layer in zip(feats, self.cla_convs, self.loc_convs):\n cla_scores.append(cla_layer(feat))\n loc_results.append(loc_layer(feat))\n return cla_scores, loc_results\n\n def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n xavier_init(m, distribution='uniform', bias=0)\n\n def gen_anchor_single_feat(self,base_len,stride,ratios,scales):\n w, h = base_len\n w_center = int(stride/2)\n h_center = int(stride/2)\n\n h_ratio = torch.sqrt(ratios)\n w_ratio = 1 / h_ratio\n\n d_w = w * scales[:,None] * w_ratio[None,:]\n d_h = h * scales[:,None] * h_ratio[None,:]\n\n d_w = d_w.view(-1)\n d_h = d_h.view(-1)\n\n anchors = [w_center - 0.5 * d_w,\n h_center - 0.5 * d_h,\n w_center + 0.5 * d_w,\n h_center + 0.5 * d_h,\n ]\n\n anchors = torch.stack(anchors, dim=-1)\n\n return anchors\n\n def gen_base_anchors(self,device='cuda'):\n mulit_feat = []\n for i in range(len(self.base_len)):\n base_anchors=self.gen_anchor_single_feat(pairs(self.base_len[i]),\n self.anchor_strides[i],\n self.gen_ratios[i],\n self.gen_scales[i])\n indices = list(range(len(self.gen_ratios[i])))\n indices.insert(1,len(indices))\n base_anchors = torch.index_select(base_anchors.to(device), 0, torch.LongTensor(indices).to(device))\n mulit_feat.append(base_anchors)\n return mulit_feat\n\n def grid_anchors(self,clamp=False,device='cuda'):\n \"\"\"\n :argument:\n self.base_anchors: List[Tensor]\n self.anchor_strides: List[int]\n :return:\n multi_grid_anchors: list[Tensor: num_levels, featmap_size, 4]\n \"\"\"\n multi_grid_anchors = []\n for num_levels in range(len(self.base_anchors)):\n x = torch.from_numpy(np.array(\n range(0, self.anchor_input_size, self.anchor_strides[num_levels]))).to(device)\n y = torch.from_numpy(np.array(\n range(0, self.anchor_input_size, self.anchor_strides[num_levels]))).to(device)\n x_, y_ = self.shift(x,y)\n shift = [x_, y_, x_, y_]\n shift = torch.stack(shift, dim=1)\n shift_=shift.type_as(self.base_anchors[num_levels].to(device))\n shift_anchors = self.base_anchors[num_levels].to(device)[None,:,:]+shift_[:,None,:]\n shift_anchors = shift_anchors.view(-1,4)\n if clamp:\n shift_anchors = shift_anchors.clamp(min=0,max=300)\n multi_grid_anchors.append(shift_anchors)\n return multi_grid_anchors\n\n def shift(self,x,y):\n \"\"\"\n :param x: Tensor, w of featmap\n :param y: int, h of featmap\n :return: shift: Tensor, wh * 4\n \"\"\"\n x_ = x.repeat(len(y))\n y_ = y.view(-1,1).repeat(1,len(x)).view(-1)\n\n return x_, y_\n\n def match_(self, anchors_list, valid_flag_list, gt_bboxes, gt_labels, batch_size):\n \"\"\"\n match anchors with gt_bboxes in a batch of imgs\n :param anchors_list: list[list[Tensor: num_levels, featmap_size, 4]]\n :param gt_bboxes: list[Tensor]\n :param gt_labels: list[Tensor]\n :return:\n \"\"\"\n num_anchors_level = [anchors.size(0) for anchors in anchors_list[0]]\n concat_anchor_list = []\n concat_valid_flag_list = []\n for i in range(batch_size):\n concat_anchor_list.append(torch.cat(anchors_list[i])) # list[Tensor: num_total_anchors, 4]\n concat_valid_flag_list.append(torch.cat(valid_flag_list[i]))\n\n result = multi_apply(self.match_single_img,\n concat_anchor_list, concat_valid_flag_list,\n gt_bboxes, gt_labels, batch_size=batch_size)\n (all_labels, all_label_weights, all_bbox_targets,\n all_bbox_weights, all_pos_idx, all_neg_idx) = result\n num_pos_total = sum([max(inds.numel(),1) for inds in all_pos_idx])\n num_neg_total = sum([max(inds.numel(),1) for inds in all_neg_idx])\n\n labels_list = image_to_level(all_labels, num_anchors_level)\n label_weights_list = image_to_level(all_label_weights, num_anchors_level)\n bboxes_target_list = image_to_level(all_bbox_targets, num_anchors_level)\n bboxes_weights_list = image_to_level(all_bbox_weights,\n num_anchors_level)\n return (labels_list, label_weights_list, bboxes_target_list, bboxes_weights_list, num_pos_total, num_neg_total)\n\n def match_single_img(self, flat_anchors, valid_flags, gt_bboxes, gt_labels, batch_size=4):\n \"\"\"\n match anchors with gt_bboxes in one img\n :param flat_anchors: Tensor: num_anchors, 4\n :param gt_bboxes: Tensor: num_gt, 4\n :param gt_labels: Tensor: num_gt,\n :param batch_size: int\n :return: labels: Tensor: num_anchors, ; num_classes(background) when not assigned with gt\n bbox_targets: Tensor: num_anchors, 4; 0 when not assigned\n pos_idx: list:\n \"\"\"\n inside_flags = valid_flags\n anchors = flat_anchors[inside_flags, :]\n assigner = IoU_assigner()\n # [num_anchors, ],\n assign_gt_idx, assign_label = assigner.assign(gt_bboxes, anchors, gt_labels)\n # len(pos_idx)+len(neg_idx)=num_anchors\n pos_idx = torch.nonzero(assign_gt_idx>0, as_tuple=False).squeeze(-1)\n neg_idx = torch.nonzero(assign_gt_idx==0, as_tuple=False).squeeze(-1)\n pos_bboxs = anchors[pos_idx]\n neg_bboxs = anchors[neg_idx]\n pos_gt_bboxes = gt_bboxes[assign_gt_idx[pos_idx]-1]\n\n num_valid_anchors = anchors.shape[0]\n bbox_targets = torch.zeros_like(anchors)\n #只有正样本的bbox需要算loss\n bbox_weights = torch.zeros_like(anchors)\n labels = anchors.new_full(\n (num_valid_anchors, ), self.num_classes , dtype=torch.long) #(num_anchors, )\n label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)\n if len(pos_idx) > 0:\n pos_bbox_targets = self.bbox.bbox_encoder(pos_bboxs,pos_gt_bboxes)\n bbox_targets[pos_idx, :] = pos_bbox_targets\n bbox_weights[pos_idx, :] = 1.0\n if gt_labels is not None:\n labels[pos_idx] = gt_labels[assign_gt_idx[pos_idx]-1]\n else:\n labels[pos_idx] = 0\n label_weights[pos_idx] = 1.0\n if len(neg_idx) > 0:\n label_weights[neg_idx] = 1.0\n return (labels, label_weights, bbox_targets, bbox_weights, pos_idx, neg_idx)\n\n def valid_flags(self,device='cuda'):\n multi_level_flags = []\n for i in range(self.num_multi_feats):\n anchor_stride = pairs(self.anchor_strides[i])\n feat_h, feat_w = pairs(self.featmap_size[i])\n h, w = pairs(self.anchor_input_size)\n valid_feat_h = min(int(np.ceil(h / anchor_stride[1])), feat_h)\n valid_feat_w = min(int(np.ceil(w / anchor_stride[0])), feat_w)\n flags = self.single_level_valid_flags((feat_h, feat_w),\n (valid_feat_h, valid_feat_w),\n self.num_base_anchors[i],\n device=device)\n multi_level_flags.append(flags)\n return multi_level_flags\n\n def single_level_valid_flags(self,\n featmap_size,\n valid_size,\n num_base_anchors,\n device='cuda'):\n feat_h, feat_w = featmap_size\n valid_h, valid_w = valid_size\n assert valid_h <= feat_h and valid_w <= feat_w\n valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device)\n valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device)\n valid_x[:valid_w] = 1\n valid_y[:valid_h] = 1\n valid_xx, valid_yy = self.shift(valid_x, valid_y)\n valid = valid_xx & valid_yy\n valid = valid[:, None].expand(valid.size(0),\n num_base_anchors).contiguous().view(-1)\n return valid\n\n def loss(self, cla_scores, loc_results, gt_bboxes, gt_labels):\n \"\"\"\n :param cla_scores: list[Tensor] (B, num_anchor*num_classes+1, h, w)\n :param loc_results: list[Tensor] (B, num_anchor*4, h, w)\n :param gt_bboxes: list[Tensor]\n :param gt_labels: list[Tensor]\n :return:\n \"\"\"\n multi_level_anchors = self.grid_anchors() # list[Tensor: num_levels, featmap_size, 4]\n batch_size = cla_scores[0].shape[0]\n anchors_list =[multi_level_anchors for _ in range(batch_size)]\n valid_flag_list=[]\n for i in range(batch_size):\n multi_level_flags = self.valid_flags()\n valid_flag_list.append(multi_level_flags)\n\n assert gt_labels != None\n (labels_list, labels_weight_list, bboxes_target_list,\n bboxes_weight_list, num_pos_total, num_neg_total) = \\\n self.match_(anchors_list, valid_flag_list,gt_bboxes,\n gt_labels, batch_size)\n num_total_samples = num_neg_total + num_pos_total\n all_cla_scores = torch.cat([\n s.permute(0, 2, 3, 1).reshape(\n batch_size, -1, self.num_classes+1) for s in cla_scores\n ], 1)\n\n all_bbox_preds = torch.cat([\n b.permute(0, 2, 3, 1).reshape(batch_size, -1, 4)\n for b in loc_results\n ], -2)\n all_bbox_targets = torch.cat(bboxes_target_list,-2).view(batch_size,-1,4)\n all_bbox_weights = torch.cat(bboxes_weight_list,-2).view(batch_size,-1,4)\n all_anchors = []\n for i in range(batch_size):\n all_anchors.append(torch.cat(anchors_list[i]))\n all_labels = torch.cat(labels_list, -1).view(batch_size, -1)\n all_label_weights = torch.cat(labels_weight_list,-1).view(batch_size, -1)\n\n\n # check NaN and Inf\n assert torch.isfinite(all_cla_scores).all().item(), \\\n 'classification scores become infinite or NaN!'\n assert torch.isfinite(all_bbox_preds).all().item(), \\\n 'bbox predications become infinite or NaN!'\n\n\n loss_cla, loss_bbox = multi_apply(self.loss_single_img, all_cla_scores, all_bbox_preds, all_bbox_weights,\n all_anchors, all_labels, all_label_weights,\n all_bbox_targets, num_total_samples=num_pos_total)\n return loss_cla, loss_bbox\n\n def loss_single_img(self, cla_scores, bbox_preds, bbox_weights, anchors, labels, label_weights,\n bbox_targets, num_total_samples):\n \"\"\"\n\n :param cla_scores: Tensor: num_total_anchor, num_classes+1\n :param bbox_preds: Tensor: num_total_anchor, 4 (dx,dy,dw,dh)\n :param anchors: Tensor: num_total_anchor, 4 (x1,y1,x2,y2)\n :param labels: Tensor: num_total, ; targets for anchors\n :param bbox_targets: num_total_anchor,4 (dx,dy,dw,dh); targets for bbox preds\n :param num_total_samples:\n :return:\n \"\"\"\n\n loss_cla_all = F.cross_entropy(cla_scores, labels, reduction='none') * label_weights\n # foreground: [0,num_class-1]; background: num_class\n pos_inds = ((labels >= 0) &\n (labels < self.num_classes)).nonzero(as_tuple=False).reshape(-1)\n neg_inds = (labels == self.num_classes).nonzero(as_tuple=False).view(-1)\n num_pos_samples = pos_inds.size(0)\n num_neg_samples = self.neg_pos_rate * num_pos_samples\n if num_neg_samples > neg_inds.size(0):\n num_neg_samples = neg_inds.size(0)\n topk_loss_cls_neg, _ = loss_cla_all[neg_inds].topk(num_neg_samples)\n loss_cla_pos = loss_cla_all[pos_inds].sum()\n loss_cla_neg = topk_loss_cls_neg.sum()\n loss_cla = (loss_cla_pos + loss_cla_neg) / num_total_samples\n\n loss_bbox = smooth_l1_loss(bbox_preds, bbox_targets, bbox_weights,avg_factor=num_total_samples)\n from IPython import embed;embed()\n return loss_cla[None], loss_bbox\n\n\n def get_bboxes(self, cla_scores, bbox_preds, img_infos, with_nms=True, rescale=False):\n \"\"\"\n Turn model ouputs to labeled bboxes\n :param cla_scores: list[Tensor] (B, num_anchor*num_classes+1, h, w)\n :param loc_results: list[Tensor] (B, num_anchor*4, h, w)\n :param with_nms:\n :return:\n \"\"\"\n num_levels = len(cla_scores)\n device = cla_scores[0].device\n multi_level_anchors = self.grid_anchors()\n batchsize = cla_scores[0].shape[0]\n multi_levels = len(cla_scores)\n\n result = []\n for i in range(batchsize):\n cla_list = [cla_scores[level][i].detach() for level in range(multi_levels)]\n bbox_list = [bbox_preds[level][i].detach() for level in range(multi_levels)]\n if with_nms:\n proposal = self._get_bboxes_single(cla_list, bbox_list, multi_level_anchors,\n img_shape=pairs(self.anchor_input_size),\n scale_factor=img_infos[i]['scale_factor'],\n rescale=rescale, with_nms=with_nms)\n result.append(proposal)\n\n return result\n\n def _get_bboxes_single(self,\n cls_score_list,\n bbox_pred_list,\n mlvl_anchors,\n img_shape,\n scale_factor,\n rescale=False,\n with_nms=True):\n \"\"\"Transform outputs for a single batch item into bbox predictions.\n\n Args:\n cls_score_list (list[Tensor]): Box scores for a single scale level\n Has shape (num_anchors * num_classes, H, W).\n bbox_pred_list (list[Tensor]): Box energies / deltas for a single\n scale level with shape (num_anchors * 4, H, W).\n mlvl_anchors (list[Tensor]): Box reference for a single scale level\n with shape (num_total_anchors, 4).\n img_shape (tuple[int]): Shape of the input image,\n (height, width, 3).\n scale_factor (ndarray): Scale factor of the image arange as\n (w_scale, h_scale, w_scale, h_scale).\n cfg (mmcv.Config): Test / postprocessing configuration,\n if None, test_cfg would be used.\n rescale (bool): If True, return boxes in original image space.\n Default: False.\n with_nms (bool): If True, do nms before return boxes.\n Default: True.\n\n Returns:\n Tensor: Labeled boxes in shape (n, 5), where the first 4 columns\n are bounding box positions (tl_x, tl_y, br_x, br_y) and the\n 5-th column is a score between 0 and 1.\n \"\"\"\n #cfg = self.test_cfg if cfg is None else cfg\n assert len(cls_score_list) == len(bbox_pred_list) == len(mlvl_anchors)\n mlvl_bboxes = []\n mlvl_scores = []\n for cls_score, bbox_pred, anchors in zip(cls_score_list,\n bbox_pred_list, mlvl_anchors):\n assert cls_score.size()[-2:] == bbox_pred.size()[-2:]\n cls_score = cls_score.permute(1, 2,\n 0).reshape(-1, self.num_classes+1)\n scores = cls_score.softmax(-1)\n bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)\n bboxes = self.bbox.bbox_decode(anchors, bbox_pred, max_shape=img_shape)\n mlvl_bboxes.append(bboxes)\n mlvl_scores.append(scores)\n mlvl_bboxes = torch.cat(mlvl_bboxes)\n if rescale:\n mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)\n mlvl_scores = torch.cat(mlvl_scores)\n\n if with_nms:\n det_bboxes, det_labels = multiclass_nms(mlvl_bboxes, mlvl_scores,\n 0.02, dict(type='nms', iou_threshold=0.45),\n 200)\n return det_bboxes, det_labels\n else:\n return mlvl_bboxes, mlvl_scores\n\n\n","repo_name":"Cohesion97/ssd-pytorch","sub_path":"model/header/ssd_header.py","file_name":"ssd_header.py","file_ext":"py","file_size_in_byte":21812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16428372785","text":"# Write a function that checks whether a character string \n# contains special characters (\\r, \\t, \\n, \\a, \\b, \\f, \\v)\n\ndef specialCharacters(str):\n specialChr = ('\\n', '\\t', '\\a', '\\b', '\\f', '\\v')\n for c in str:\n if c in specialChr: \n return True \n return False\nprint(specialCharacters(\"line ada\"))\n","repo_name":"AndreeaMihaelaP/Python","sub_path":"Lab_1/Ex_5.py","file_name":"Ex_5.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"15098641312","text":"def cluster(userList):\n\n \n userList.sort()\n\n clusterList = []\n j = -1\n for i in range(len(userList)):\n\n if i < j:\n continue\n\n cluster = []\n cluster.append(userList[i])\n\n for j in range(i + 1, len(userList)):\n\n if userList[i]*2 >= userList[j]:\n\n cluster.append(userList[j])\n\n elif userList[i]*2 <= userList[j]:\n\n break\n\n clusterList.append(cluster)\n\n return clusterList\n\ndef main():\n\n \n userInput = \"zxcvbnm123\"\n userList = []\n\n while userInput != \"n\":\n\n userInput = input(\"Integer to add to the list: \")\n\n if userInput != \"n\":\n\n try:\n\n userInput = int(userInput)\n userList.append(userInput)\n print(f\"Current list: {userList}\")\n\n except ValueError:\n\n print(\"Please enter an integer or type 'n'!\")\n \n cList = cluster(userList)\n print(f\"{cList}\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"kcyakici/IZTECHCENG","sub_path":"CENG113/Lab codes/10. lab/cluster.py","file_name":"cluster.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12556128544","text":"import sys\nimport math\nimport time\nfrom collections import defaultdict\n\nclass grid():\n def __init__(self, la_map):\n self.grille = la_map\n self.width = len(la_map[0])\n self.height = len(la_map)\n \n def show(self):\n for each_line in self.grille:\n print(each_line, file=sys.stderr)\n \n def update_map(self, pos1, pos2, dist):\n for y in range(self.height):\n for x in range(self.width):\n if self.grille[y][x] == 0:\n if (x,y) in memo_dist.keys():\n dist1 = memo_dist[(x,y)]\n else: \n dist1 = math.sqrt((pos1[0]-x)**2+(pos1[1]-y)**2) #distance from previous point \n dist2 = math.sqrt((pos2[0]-x)**2+(pos2[1]-y)**2) #distance from latest position\n memo_dist[(x,y)]=dist2\n if dist == \"COLDER\" and dist1 > dist2: #si on s'eloigne et que la distance d'origine est plus grande que la nouvelle\n self.grille[y][x]=1\n del memo_dist[(x,y)]\n elif dist == \"WARMER\" and dist2 > dist1:\n self.grille[y][x]=1\n del memo_dist[(x,y)]\n elif dist == \"SAME\" and dist1 != dist2:\n self.grille[y][x]=1\n del memo_dist[(x,y)]\n \n def set_offset(self, off_x, off_y):\n self.offset_x = off_x\n self.offset_y = off_y\n \nclass aDallas():\n def __init__(self, X0, Y0):\n self.position = (X0, Y0)\n self.previous_position = (-1,-1)\n self.distance = \"UNKNOWN\"\n \n def move(self, new_x, new_y):\n self.previous_position = self.position\n self.position = (new_x, new_y)\n self.shift = (self.position[0]-self.previous_position[0], self.position[1]-self.previous_position[1])\n print(str(new_x), str(new_y))\n \n def update_sensor(self, result):\n global min_x, max_x, min_y, max_y\n self.distance = result\n if self.direction == \"HORIZONTAL\":\n #si on descend et qu'on est plus proche, on garde la partie basse\n #si on monte et qu'on est plus loin, on garde la partie basse\n if (self.shift[1]>0 and result == \"WARMER\") or (self.shift[1]<0 and result == \"COLDER\"): \n min_y = math.floor((self.position[1]+self.previous_position[1])/2)\n #si on monte et qu'on est plus proche, on garde la partie haute\n #si on descend et qu'on est plus loin, on garde la partie haute\n elif (self.shift[1]<0 and result == \"WARMER\") or (self.shift[1]>0 and result == \"COLDER\"):\n max_y = math.ceil((self.position[1]+self.previous_position[1])/2)\n #si on est a la meme distance, on n'a qu'une ligne dispo\n elif result == \"SAME\":\n max_y = int((self.position[1]+self.previous_position[1])/2)\n min_y = max_y\n else:\n #si on va a gauche et qu'on est plus proche, on garde la partie gauche\n #si on va a droite et qu'on est plus loin, on garde la partie gauche\n if (self.shift[0]<0 and result == \"WARMER\") or (self.shift[0]>0 and result == \"COLDER\"): \n max_x = math.ceil((self.position[0]+self.previous_position[0])/2)\n #si on va a droite et qu'on est plus proche, on garde la partie droite\n #si on va a gauche et qu'on est plus loin, on garde la partie droite \n elif (self.shift[0]>0 and result == \"WARMER\") or (self.shift[0]<0 and result == \"COLDER\"):\n min_x = math.floor((self.position[0]+self.previous_position[0])/2)\n #si on est a la meme distance, on n'a qu'une colonne dispo\n elif result == \"SAME\":\n max_y = int((self.position[0]+self.previous_position[0])/2)\n min_y = max_y\n \n def invert_direction(self):\n if (max_x-min_x)==0: #si on a une seule colonne\n self.direction = \"HORIZONTAL\" #on split horizontalement\n elif (max_y-min_y)==0: #si on a une seule ligne\n self.direction = \"VERTICAL\" #on split verticalement\n else: #sinon, on a un rectangle\n if abs(max_y-min_y) > (max_x-min_x): #on split par le plus grand coté\n self.direction = \"HORIZONTAL\"\n else:\n self.direction = \"VERTICAL\"\n \n def first_move(self):\n if h > w:\n self.direction = \"HORIZONTAL\"\n self.move(self.position[0], h-self.position[1])\n else:\n self.direction = \"VERTICAL\"\n self.move(w-self.position[0], self.position[1])\n \n def split(self):\n global w, h\n self.invert_direction()\n if self.direction == \"VERTICAL\":\n if abs(min_x-self.position[0]) > abs(max_x-self.position[0]):\n next_x = min_x\n else:\n next_x = max_x\n self.move(next_x, self.position[1])\n elif self.direction == \"HORIZONTAL\":\n if abs(min_y-self.position[1]) > abs(max_y-self.position[1]):\n next_y = min_y\n else:\n next_y = max_y\n self.move(self.position[0], next_y)\n \n def move_second(self, new_x, new_y):\n global tower\n tower.grille[new_y-tower.offset_y-1][new_x-tower.offset_x-1] = 1\n self.previous_position = self.position\n self.position = (new_x, new_y)\n print(str(new_x), str(new_y))\n \n def update_sensor_second(self, result):\n self.distance = result\n \n def next_move_second(self, carte):\n max_dist = 0\n position = (0,0)\n for y in range(carte.height):\n for x in range(carte.width):\n if carte.grille[y][x]==0:\n dist = memo_dist[(x,y)]\n if dist > max_dist:\n max_dist = dist\n position = (x,y)\n self.move_second(position[0]+carte.offset_x, position[1]+carte.offset_y)\n \n# w: width of the building.\n# h: height of the building.\nw, h = [int(i) for i in input().split()]\nmin_x, max_x, min_y, max_y = 0, w-1, 0, h-1\nprint(w,h, file=sys.stderr)\nmemo_dist = defaultdict(list) \nn = int(input()) # maximum number of turns before game over.\nx0, y0 = [int(i) for i in input().split()]\nbatman = aDallas(x0, y0)\nfirst_time = True\n# game loop\nwhile True:\n bomb_dir = input() # Current distance to the bomb compared to previous distance (COLDER, WARMER, SAME or UNKNOWN)\n print(bomb_dir, file=sys.stderr)\n \n if bomb_dir == \"UNKNOWN\":\n batman.first_move()\n else:\n start_time_round = time.time() \n print(batman.direction, file=sys.stderr)\n if abs(max_x-min_x)*abs(max_y-min_y) > 10000:\n batman.update_sensor(bomb_dir)\n print(min_x, max_x, file=sys.stderr)\n print(min_y, max_y, file=sys.stderr)\n batman.split()\n else:\n if first_time == True:\n carte = [[0 for x in range(abs(max_x-min_x))] for x in range(abs(max_y-min_y))] \n tower = grid(carte)\n tower.set_offset(min_x, min_y)\n first_time = False\n batman.update_sensor_second(bomb_dir)\n tower.update_map(batman.previous_position, batman.position, batman.distance)\n #tower.show()\n batman.next_move_second(tower)\n interval = time.time() - start_time_round \n print(\"Round time : \", interval, file=sys.stderr)\n \n # Write an action using print\n # To debug: print(\"Debug messages...\", file=sys.stderr)\n\n","repo_name":"Coni63/scripts_Python","sub_path":"CG/training/very hard/trilateration hybride.py","file_name":"trilateration hybride.py","file_ext":"py","file_size_in_byte":7729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6709795699","text":"from http import HTTPStatus\n\nimport pytest\nfrom functional.testdata.users_data import test_auth_history\n\npytestmark = pytest.mark.asyncio\n\n\n@pytest.mark.order(after='test_register.py::test_register_user')\nasync def test_history(make_request, sign_in):\n access_token = sign_in['access_token']\n\n response = await make_request(\n 'GET',\n '/auth/history/1',\n headers={\n 'Authorization': access_token\n }\n )\n\n assert response['status'] == HTTPStatus.OK\n assert response['json'][-1].keys() == test_auth_history.keys()\n\n\n@pytest.mark.order(after='test_register.py::test_register_user')\nasync def test_history_without_auth(make_request):\n response = await make_request(\n 'GET',\n '/auth/history/1'\n )\n\n assert response['status'] == HTTPStatus.UNAUTHORIZED\n","repo_name":"salliko/async_api","sub_path":"auth-solution/tests/functional/src/auth/test_history.py","file_name":"test_history.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1679896506","text":"# all text files\nteam1F = \"textFiles/team1.txt\"\nteam2F = \"textFiles/team2.txt\"\nteam1scoreF = \"textFiles/team1score.txt\"\nteam2scoreF = \"textFiles/team2score.txt\"\nmatchN = \"textFiles/match.txt\"\nwinnerN = \"textFiles/winner.txt\"\n\n# default colours\nmain_colour = \"[magenta]\"\nt1_colour = \"[bright_cyan]\"\nt2_colour = \"[orange1]\"\narg_colour = \"[green]\"\n","repo_name":"Vaughan-Esports/OverlayWriter","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"34471892108","text":"try:\n reload # Python 2.7\nexcept NameError:\n try:\n from importlib import reload # Python 3.4+\n except ImportError:\n from imp import reload # Python 3.0 - 3.3\n\nimport os\nimport maya.cmds as cmds\nfrom maya import OpenMayaUI as omui\nfrom shiboken2 import wrapInstance\n\nimport Converter as Conv\nreload(Conv)\n\nscript_dir = os.path.dirname(__file__)\n\nfrom Qt import QtWidgets, QtCore, QtGui\n\n\nclass ConverterUI(QtWidgets.QWidget):\n\n def __init__(self):\n self.ui_name = 'Scene Converter'\n parent = getDock(QtWidgets.QWidget, self.ui_name + 'Dock', self.ui_name)\n super(ConverterUI, self).__init__(parent)\n\n self.Converter = Conv.ConverterClass()\n\n self.build_ui()\n self.populate_ui()\n self.parent().layout().addWidget(self)\n\n def build_ui(self):\n self.resize(400, 120)\n self.verticalLayout = QtWidgets.QVBoxLayout(self)\n\n self.selection_layout = QtWidgets.QHBoxLayout()\n self.rules_box = QtWidgets.QComboBox(self)\n self.selection_layout.addWidget(self.rules_box)\n self.refresh_button = QtWidgets.QPushButton(self)\n self.refresh_button.clicked.connect(self.populate_ui)\n self.refresh_button.setMaximumSize(QtCore.QSize(50, 16777215))\n self.selection_layout.addWidget(self.refresh_button)\n self.verticalLayout.addLayout(self.selection_layout)\n\n self.checks_layout = QtWidgets.QHBoxLayout()\n self.lights_check = QtWidgets.QCheckBox(self)\n self.lights_check.setChecked(True)\n self.checks_layout.addWidget(self.lights_check)\n self.material_check = QtWidgets.QCheckBox(self)\n self.material_check.setChecked(True)\n self.checks_layout.addWidget(self.material_check)\n self.selected_check = QtWidgets.QCheckBox(self)\n self.checks_layout.addWidget(self.selected_check)\n self.verticalLayout.addLayout(self.checks_layout)\n\n self.in_render_check = QtWidgets.QCheckBox(self)\n self.in_render_check.setChecked(True)\n self.verticalLayout.addWidget(self.in_render_check)\n\n self.buttons_layout = QtWidgets.QHBoxLayout()\n self.convert_button = QtWidgets.QPushButton(self)\n self.convert_button.clicked.connect(self.convert)\n self.convert_button.setMinimumSize(QtCore.QSize(0, 40))\n self.buttons_layout.addWidget(self.convert_button)\n self.editor_button = QtWidgets.QPushButton(self)\n self.editor_button.clicked.connect(self.editor)\n self.editor_button.setMinimumSize(QtCore.QSize(0, 40))\n self.buttons_layout.addWidget(self.editor_button)\n self.verticalLayout.addLayout(self.buttons_layout)\n\n self.web_link = QtWidgets.QLabel(self)\n url_link1 = \"'Linkedin'\"\n url_link2 = \"'Artsation'\"\n self.web_link.setText(url_link1 + '\\t\\t' + url_link2)\n self.web_link.setOpenExternalLinks(True)\n self.web_link.setAlignment(QtCore.Qt.AlignRight)\n self.verticalLayout.addWidget(self.web_link)\n\n self.refresh_button.setText(\"Refresh\")\n self.lights_check.setText(\"Lights\")\n self.material_check.setText(\"Materials\")\n self.selected_check.setText(\"Selected Only\")\n self.in_render_check.setText(\"Selected Render Engine Only\")\n self.convert_button.setText(\"Convert\")\n self.editor_button.setText(\"Editor\")\n\n def populate_ui(self):\n self.rules_box.clear()\n file_names = self.Converter.get_filenames(script_dir + '/Rules')\n self.rules_box.addItems(file_names)\n\n def editor(self):\n editor = EditorUI(self)\n editor.show_ui()\n\n def convert(self):\n lights = self.lights_check.isChecked()\n materials = self.material_check.isChecked()\n selected = self.selected_check.isChecked()\n in_render = self.in_render_check.isChecked()\n file_name = self.rules_box.currentText()\n self.Converter.convert_scene(file_name, lights=lights, materials=materials,\n selected=selected, in_render=in_render)\n\n\nclass EditorUI(QtWidgets.QWidget):\n\n def __init__(self, parent):\n\n self.ui_name = 'Editor'\n\n super(EditorUI, self).__init__(parent)\n self.newWindow = QtWidgets.QDialog(parent=parent)\n\n self.Converter = parent.Converter\n\n self.build_ui()\n self.populate_ui()\n\n # ---------------------------\n\n def show_ui(self):\n self.newWindow.show() # exec_() > modal\n\n def build_ui(self):\n self.newWindow.resize(1400, 800)\n self.newWindow.setWindowTitle(self.ui_name)\n self.all_layout = QtWidgets.QVBoxLayout(self.newWindow)\n self.main_widget = QtWidgets.QWidget(self.newWindow)\n self.main_Layout = QtWidgets.QHBoxLayout(self.main_widget)\n self.main_Layout.setContentsMargins(0, 0, 0, 0)\n\n self.render_in_Layout = QtWidgets.QVBoxLayout()\n self.render_in_box = QtWidgets.QComboBox(self.main_widget)\n self.render_in_box.currentIndexChanged.connect(self.populate_render_in)\n self.render_in_Layout.addWidget(self.render_in_box)\n\n self.checks_layout_in = QtWidgets.QHBoxLayout()\n\n self.inherited_check_in = QtWidgets.QCheckBox(self.main_widget)\n self.inherited_check_in.setText(\"Inherited\")\n self.inherited_check_in.setChecked(True)\n self.inherited_check_in.stateChanged.connect(self.populate_render_in)\n self.checks_layout_in.addWidget(self.inherited_check_in)\n\n self.maps_check_in = QtWidgets.QCheckBox(self.main_widget)\n self.maps_check_in.setText(\"Float3\")\n self.maps_check_in.setChecked(True)\n self.maps_check_in.stateChanged.connect(self.populate_render_in)\n self.checks_layout_in.addWidget(self.maps_check_in)\n\n self.float_check_in = QtWidgets.QCheckBox(self.main_widget)\n self.float_check_in.setText(\"Float\")\n self.float_check_in.setChecked(True)\n self.float_check_in.stateChanged.connect(self.populate_render_in)\n self.checks_layout_in.addWidget(self.float_check_in)\n\n self.integer_check_in = QtWidgets.QCheckBox(self.main_widget)\n self.integer_check_in.setText(\"Integer\")\n self.integer_check_in.setChecked(True)\n self.integer_check_in.stateChanged.connect(self.populate_render_in)\n self.checks_layout_in.addWidget(self.integer_check_in)\n\n self.bool_check_in = QtWidgets.QCheckBox(self.main_widget)\n self.bool_check_in.setText(\"Bool\")\n self.bool_check_in.setChecked(True)\n self.bool_check_in.stateChanged.connect(self.populate_render_in)\n self.checks_layout_in.addWidget(self.bool_check_in)\n\n self.other_check_in = QtWidgets.QCheckBox(self.main_widget)\n self.other_check_in.setText(\"Other\")\n self.other_check_in.setChecked(True)\n self.other_check_in.stateChanged.connect(self.populate_render_in)\n self.checks_layout_in.addWidget(self.other_check_in)\n\n self.expand_button_in = QtWidgets.QPushButton(self.main_widget)\n self.expand_button_in.setText(\"+\")\n self.expand_button_in.setMaximumSize(25, 25)\n self.checks_layout_in.addWidget(self.expand_button_in)\n\n self.collapse_button_in = QtWidgets.QPushButton(self.main_widget)\n self.collapse_button_in.setText(\"-\")\n self.collapse_button_in.setMaximumSize(25, 25)\n self.checks_layout_in.addWidget(self.collapse_button_in)\n\n self.render_in_Layout.addLayout(self.checks_layout_in)\n\n self.render_in_tree = QtWidgets.QTreeWidget(self.main_widget)\n self.render_in_tree.setAutoFillBackground(True)\n self.render_in_tree.setAlternatingRowColors(True)\n self.render_in_tree.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)\n self.render_in_tree.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)\n self.render_in_tree.setColumnCount(3)\n self.render_in_tree.setHeaderHidden(True)\n\n self.render_in_Layout.addWidget(self.render_in_tree)\n\n self.main_Layout.addLayout(self.render_in_Layout)\n\n self.render_out_Layout = QtWidgets.QVBoxLayout()\n self.render_out_box = QtWidgets.QComboBox(self.main_widget)\n self.render_out_box.currentIndexChanged.connect(self.populate_render_out)\n self.render_out_Layout.addWidget(self.render_out_box)\n\n self.checks_layout_out = QtWidgets.QHBoxLayout()\n\n self.inherited_check_out = QtWidgets.QCheckBox(self.main_widget)\n self.inherited_check_out.setText(\"Inherited\")\n self.inherited_check_out.setChecked(True)\n self.inherited_check_out.stateChanged.connect(self.populate_render_out)\n self.checks_layout_out.addWidget(self.inherited_check_out)\n\n self.maps_check_out = QtWidgets.QCheckBox(self.main_widget)\n self.maps_check_out.setText(\"Float3\")\n self.maps_check_out.setChecked(True)\n self.maps_check_out.stateChanged.connect(self.populate_render_out)\n self.checks_layout_out.addWidget(self.maps_check_out)\n\n self.float_check_out = QtWidgets.QCheckBox(self.main_widget)\n self.float_check_out.setText(\"Float\")\n self.float_check_out.setChecked(True)\n self.float_check_out.stateChanged.connect(self.populate_render_out)\n self.checks_layout_out.addWidget(self.float_check_out)\n\n self.integer_check_out = QtWidgets.QCheckBox(self.main_widget)\n self.integer_check_out.setText(\"Integer\")\n self.integer_check_out.setChecked(True)\n self.integer_check_out.stateChanged.connect(self.populate_render_out)\n self.checks_layout_out.addWidget(self.integer_check_out)\n\n self.bool_check_out = QtWidgets.QCheckBox(self.main_widget)\n self.bool_check_out.setText(\"Bool\")\n self.bool_check_out.setChecked(True)\n self.bool_check_out.stateChanged.connect(self.populate_render_out)\n self.checks_layout_out.addWidget(self.bool_check_out)\n\n self.other_check_out = QtWidgets.QCheckBox(self.main_widget)\n self.other_check_out.setText(\"Other\")\n self.other_check_out.setChecked(True)\n self.other_check_out.stateChanged.connect(self.populate_render_out)\n self.checks_layout_out.addWidget(self.other_check_out)\n\n self.expand_button_out = QtWidgets.QPushButton(self.main_widget)\n self.expand_button_out.setText(\"+\")\n self.expand_button_out.setMaximumSize(25, 25)\n self.checks_layout_out.addWidget(self.expand_button_out)\n\n self.collapse_button_out = QtWidgets.QPushButton(self.main_widget)\n self.collapse_button_out.setText(\"-\")\n self.collapse_button_out.setMaximumSize(25, 25)\n self.checks_layout_out.addWidget(self.collapse_button_out)\n\n self.render_out_Layout.addLayout(self.checks_layout_out)\n\n self.render_out_tree = QtWidgets.QTreeWidget(self.main_widget)\n self.render_out_tree.setAutoFillBackground(True)\n self.render_out_tree.setAlternatingRowColors(True)\n self.render_out_tree.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)\n self.render_out_tree.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)\n self.render_out_tree.setColumnCount(3)\n self.render_out_tree.setHeaderHidden(True)\n self.render_out_Layout.addWidget(self.render_out_tree)\n self.main_Layout.addLayout(self.render_out_Layout)\n\n self.match_widget = QtWidgets.QWidget(self.main_widget)\n self.match_layout = QtWidgets.QVBoxLayout(self.match_widget)\n\n self.match_button = QtWidgets.QPushButton(self.main_widget)\n self.match_button.setMinimumSize(15, 600)\n self.match_button.setMaximumSize(25, 700)\n self.match_button.clicked.connect(self.match_selection)\n self.match_layout.addWidget(self.match_button)\n\n self.main_Layout.addWidget(self.match_widget)\n\n self.render_tree = QtWidgets.QTreeWidget(self.main_widget)\n self.render_tree.setAutoFillBackground(True)\n self.render_tree.setAlternatingRowColors(True)\n self.render_tree.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)\n self.render_tree.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)\n self.render_tree.setColumnCount(4)\n self.render_tree.setHeaderLabels(['In', 'Out', 'Type', 'Factor'])\n self.render_tree.itemClicked.connect(self.select_source)\n self.main_Layout.addWidget(self.render_tree)\n\n self.all_layout.addWidget(self.main_widget)\n\n self.buttons_widget = QtWidgets.QWidget(self.main_widget)\n self.buttons_layout = QtWidgets.QVBoxLayout(self.buttons_widget)\n\n self.delete_button = QtWidgets.QPushButton(self.buttons_widget)\n self.delete_button.clicked.connect(self.remove_selected)\n self.buttons_layout.addWidget(self.delete_button)\n self.clear_button = QtWidgets.QPushButton(self.buttons_widget)\n self.clear_button.clicked.connect(self.clear_all)\n self.buttons_layout.addWidget(self.clear_button)\n\n self.inverse_button = QtWidgets.QPushButton(self.buttons_widget)\n self.inverse_button.clicked.connect(self.add_inverse)\n self.buttons_layout.addWidget(self.inverse_button)\n\n self.override_button = QtWidgets.QPushButton(self.buttons_widget)\n self.override_button.clicked.connect(self.add_override)\n self.buttons_layout.addWidget(self.override_button)\n\n self.remove_button = QtWidgets.QPushButton(self.buttons_widget)\n self.remove_button.clicked.connect(self.remove_override)\n self.buttons_layout.addWidget(self.remove_button)\n\n spacerItemB = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\n self.buttons_layout.addItem(spacerItemB)\n\n self.save_button = QtWidgets.QPushButton(self.buttons_widget)\n self.save_button.clicked.connect(self.save_file)\n self.buttons_layout.addWidget(self.save_button)\n self.load_button = QtWidgets.QPushButton(self.buttons_widget)\n self.load_button.clicked.connect(self.load_file)\n self.buttons_layout.addWidget(self.load_button)\n\n spacerItemC = QtWidgets.QSpacerItem(20, 500, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\n self.buttons_layout.addItem(spacerItemC)\n\n self.main_Layout.addWidget(self.buttons_widget)\n\n self.expand_button_in.clicked.connect(self.render_in_tree.expandAll)\n self.collapse_button_in.clicked.connect(self.render_in_tree.collapseAll)\n self.expand_button_out.clicked.connect(self.render_out_tree.expandAll)\n self.collapse_button_out.clicked.connect(self.render_out_tree.collapseAll)\n\n self.match_button.setText(\">\")\n self.delete_button.setText(\"Delete selected\")\n self.clear_button.setText(\"Clear All\")\n self.save_button.setText(\"Save\")\n self.load_button.setText(\"Load\")\n self.inverse_button.setText(\"Inverse Value\")\n self.override_button.setText(\"Override Value\")\n self.remove_button.setText(\"Remove Value\")\n\n def populate_ui(self):\n\n render_engines = self.Converter.render_engines\n self.render_in_box.addItems(render_engines)\n self.render_out_box.addItems(render_engines)\n current_renderer = self.Converter.current_engine\n self.render_in_box.setCurrentText(current_renderer)\n\n self.populate_render_in()\n self.populate_render_out()\n\n def populate_render_in(self):\n inherited = self.inherited_check_in.isChecked()\n others = self.other_check_in.isChecked()\n\n checks_dic = {'float3': self.maps_check_in.isChecked(), 'float': self.float_check_in.isChecked(),\n 'long': self.integer_check_in.isChecked(),\n 'bool': self.bool_check_in.isChecked()}\n\n render_in = self.render_in_box.currentText()\n render_dic = self.get_nodes_types(render_in)\n\n excluded_types = []\n for key, value in checks_dic.items():\n if not value:\n excluded_types.append(key)\n if render_in != '':\n self.populate_render_trees(self.render_in_tree, render_dic, inherited, others, excluded_types)\n\n def populate_render_out(self):\n inherited = self.inherited_check_out.isChecked()\n others = self.other_check_out.isChecked()\n\n checks_dic = {'float3': self.maps_check_out.isChecked(), 'float': self.float_check_out.isChecked(),\n 'long': self.integer_check_out.isChecked(),\n 'bool': self.bool_check_out.isChecked()}\n\n render_out = self.render_out_box.currentText()\n render_dic = self.get_nodes_types(render_out)\n\n excluded_types = []\n for key, value in checks_dic.items():\n if not value:\n excluded_types.append(key)\n if render_out != '':\n self.populate_render_trees(self.render_out_tree, render_dic, inherited, others, excluded_types)\n\n def get_nodes_types(self, render_engine):\n\n node_types = ['shader', 'light', 'texture', 'utility']\n render_in_dic = {}\n for node_type in node_types:\n render_in_nodes = self.Converter.get_type_nodes(render_engine, node_type)\n render_in_dic[node_type] = render_in_nodes\n return render_in_dic\n\n def resize_trees(self, tree_widget):\n\n tree_widget.expandAll()\n count = tree_widget.columnCount()\n for i in range(0, count):\n tree_widget.resizeColumnToContents(i)\n tree_widget.sortItems(0, QtCore.Qt.AscendingOrder)\n # tree_widget.collapseAll()\n\n def select_source(self):\n in_item = self.render_tree.currentItem().text(0)\n out_item = self.render_tree.currentItem().text(1)\n\n in_item_parent = self.render_tree.currentItem().parent()\n out_item_parent = self.render_tree.currentItem().parent()\n\n in_items = self.render_in_tree.findItems(in_item, QtCore.Qt.MatchExactly | QtCore.Qt.MatchRecursive, 0)\n if in_items:\n for item in in_items:\n if in_item_parent is not None:\n if item.parent().text(0) == in_item_parent.text(0):\n self.render_in_tree.setCurrentItem(item)\n else:\n self.render_in_tree.setCurrentItem(item)\n\n out_items = self.render_out_tree.findItems(out_item, QtCore.Qt.MatchExactly | QtCore.Qt.MatchRecursive, 0)\n if out_items:\n for item in out_items:\n if out_item_parent is not None:\n if item.parent().text(0) == out_item_parent.text(1):\n self.render_out_tree.setCurrentItem(item)\n else:\n self.render_out_tree.setCurrentItem(item)\n\n def populate_render_trees(self, tree_widget, render_nodes, inherited, others, excluded_types):\n tree_widget.clear()\n\n for node_type, nodes in render_nodes.items():\n parent_item = QtWidgets.QTreeWidgetItem([node_type])\n tree_widget.addTopLevelItem(parent_item)\n for node in nodes:\n child_item = QtWidgets.QTreeWidgetItem([node])\n for c in range(0, child_item.columnCount()):\n child_item.setForeground(c, QtGui.QBrush(QtGui.QColor(250, 180, 120)))\n parent_item.addChild(child_item)\n node_attributes = self.Converter.get_type_attributes(node, inherited, others, excluded_types)\n for node_attribute, node_info in node_attributes.items():\n item = QtWidgets.QTreeWidgetItem([node_attribute, node_info])\n child_item.addChild(item)\n if node_info == 'float3':\n item.setForeground(0, QtGui.QBrush(QtGui.QColor(250, 120, 120)))\n attibute_children = self.Converter.get_attribute_children(node_attribute, node)\n if attibute_children:\n for child_attribute in attibute_children:\n child_attribute_item = QtWidgets.QTreeWidgetItem([child_attribute, 'float'])\n item.addChild(child_attribute_item)\n child_attribute_item.setHidden(True)\n\n self.resize_trees(tree_widget)\n\n def get_parent_item(self, in_attribute, out_attribute):\n # Search for existing parent nodes if not then create new one.\n items = self.render_tree.findItems(in_attribute.text(0), QtCore.Qt.MatchExactly, 0)\n if items:\n parent_item = items[0]\n else:\n parent_item = QtWidgets.QTreeWidgetItem([in_attribute.text(0), out_attribute.text(0)])\n return parent_item\n\n def get_child_item(self, parent_item, in_attribute, out_attribute):\n # Search for existing child attributes but only in the same parent.\n items = self.render_tree.findItems(in_attribute.text(0), QtCore.Qt.MatchExactly | QtCore.Qt.MatchRecursive, 0)\n item_exists = False\n if items:\n for i in items:\n if parent_item.text(0) == i.parent().text(0): # Make sure the attribute has the same parent\n item_exists = True\n break # exit the loop because if there is an existing parent there will be no other.\n else:\n item_exists = False\n\n if not item_exists:\n item = QtWidgets.QTreeWidgetItem([in_attribute.text(0), out_attribute.text(0), in_attribute.text(1)])\n return item\n else:\n return\n\n def get_item_children(self, item):\n item_dic = {}\n in_child_count = item.childCount()\n for i in range(in_child_count):\n attr_item = item.child(i)\n item_dic[attr_item.text(0)] = attr_item\n return item_dic\n\n def count_item_visible_children(self, item):\n visible_child_items = []\n child_count = item.childCount()\n if child_count > 0:\n for i in range(0, child_count):\n child_item = item.child(i)\n if not child_item.isHidden():\n visible_child_items.append(child_item)\n return len(visible_child_items)\n\n def match_selection(self):\n\n in_item = self.render_in_tree.currentItem()\n out_item = self.render_out_tree.currentItem()\n parent_item = None\n if in_item is not None and out_item is not None:\n # Make sure both selections are the same type.\n if in_item.text(1) == out_item.text(1):\n\n in_item_child_count = self.count_item_visible_children(in_item)\n out_item_child_count = self.count_item_visible_children(out_item)\n\n # Check if both selections are attributes by comparing the number of visible children.\n if in_item_child_count == 0 and out_item_child_count == 0:\n\n attr_in_parent = self.render_in_tree.currentItem().parent()\n attr_out_parent = self.render_out_tree.currentItem().parent()\n parent_item = self.get_parent_item(attr_in_parent, attr_out_parent)\n self.render_tree.addTopLevelItem(parent_item)\n\n child_item = self.get_child_item(parent_item, in_item, out_item)\n if child_item is not None:\n parent_item.addChild(child_item)\n\n in_child_item_count = in_item.childCount()\n out_child_item_count = out_item.childCount()\n if in_child_item_count == out_child_item_count > 0:\n for i in range(0, in_child_item_count):\n in_child_item = in_item.child(i)\n out_child_item = out_item.child(i)\n\n sub_child_item = self.get_child_item(parent_item, in_child_item, out_child_item)\n if sub_child_item is not None:\n parent_item.addChild(sub_child_item)\n\n # If both selections are parent nodes.\n elif in_item.parent() is not None and out_item.parent() is not None:\n\n parent_item = self.get_parent_item(in_item, out_item)\n self.render_tree.addTopLevelItem(parent_item)\n\n # match same attributes\n in_attrs_items = self.get_item_children(in_item)\n out_attrs_items = self.get_item_children(out_item)\n\n for attr, item in in_attrs_items.items():\n if attr in out_attrs_items.keys():\n child_item = self.get_child_item(parent_item, item, item)\n if child_item is not None:\n parent_item.addChild(child_item)\n for i in range(0, child_item.columnCount()):\n child_item.setForeground(i, QtGui.QBrush(QtGui.QColor(100, 100, 100)))\n\n in_child_item_count = item.childCount()\n if in_child_item_count > 0:\n for i in range(0, in_child_item_count):\n in_child_item = item.child(i)\n sub_child_item = self.get_child_item(parent_item, in_child_item, in_child_item)\n if sub_child_item is not None:\n parent_item.addChild(sub_child_item)\n else:\n cmds.inViewMessage(amg='In-view message Wrong Selection.', pos='topCenter', fade=True)\n\n else:\n cmds.inViewMessage(amg='In-view message Incompatible Type.', pos='topCenter', fade=True)\n\n self.resize_trees(self.render_tree)\n self.render_tree.expandAll()\n self.set_item_colors()\n\n self.render_tree.setCurrentItem(parent_item)\n\n def set_item_colors(self):\n root = self.render_tree.invisibleRootItem()\n node_count = root.childCount()\n\n for i in range(node_count):\n node_item = root.child(i)\n for c in range(0, node_item.columnCount()):\n node_item.setForeground(c, QtGui.QBrush(QtGui.QColor(250, 180, 120)))\n attr_count = node_item.childCount()\n for a in range(attr_count):\n item = node_item.child(a)\n if item.text(0) == item.text(1):\n for c in range(0, item.columnCount()):\n item.setForeground(c, QtGui.QBrush(QtGui.QColor(100, 100, 100)))\n if item.text(2) == 'float3':\n for c in range(0, item.columnCount()):\n item.setForeground(c, QtGui.QBrush(QtGui.QColor(250, 120, 120)))\n\n def save_file(self):\n render_in = self.render_in_box.currentText()\n render_out = self.render_out_box.currentText()\n result_data = {'Engines': [render_in, render_out]}\n\n root = self.render_tree.invisibleRootItem()\n node_count = root.childCount()\n\n for i in range(node_count):\n node_item = root.child(i)\n attrs = {node_item.text(0): node_item.text(1)}\n attr_count = node_item.childCount()\n for a in range(attr_count):\n attr_item = node_item.child(a)\n attrs[attr_item.text(0)] = [attr_item.text(1), attr_item.text(2), attr_item.text(3)]\n result_data[node_item.text(0)] = attrs\n\n file_name = render_in + '_To_' + render_out\n file_path = cmds.fileDialog2(caption='Save File', fileFilter='*.json', fileMode=0,\n dir=script_dir + '/Rules/' + file_name + '.json')\n if file_path:\n self.Converter.save_json_file(file_path[0], result_data)\n\n def load_file(self):\n self.render_tree.clear()\n file_path = cmds.fileDialog2(caption='Load File', fileFilter='*.json', fileMode=1, dir=script_dir + '/Rules/')\n if file_path:\n data = self.Converter.load_json_file(file_path[0])\n for node_name, attr_list in data.items():\n if node_name == 'Engines':\n self.render_in_box.setCurrentText(attr_list[0])\n self.render_out_box.setCurrentText(attr_list[1])\n else:\n parent_item = QtWidgets.QTreeWidgetItem([node_name, attr_list[node_name]])\n self.render_tree.addTopLevelItem(parent_item)\n for attr_in, attrs_out in attr_list.items():\n if attr_in != node_name:\n attr_out = attrs_out[0]\n attr_type = attrs_out[1]\n inverse = attrs_out[2]\n child_item = QtWidgets.QTreeWidgetItem([attr_in, attr_out, attr_type, inverse])\n parent_item.addChild(child_item)\n\n self.resize_trees(self.render_tree)\n self.render_tree.expandAll()\n self.set_item_colors()\n\n def add_inverse(self):\n for item in self.render_tree.selectedItems():\n if item.text(2) in ['float3', 'float']:\n item.setText(3, 'Inverse')\n\n def add_override(self):\n value, ok_pressed = QtWidgets.QInputDialog.getDouble(self, \"Enter Value\", \"Override:\", 0, 0, 100, 2)\n if ok_pressed:\n for item in self.render_tree.selectedItems():\n item.setText(3, str(value))\n\n def remove_override(self):\n for item in self.render_tree.selectedItems():\n item.setText(3, '')\n\n def clear_all(self):\n self.render_tree.clear()\n\n def remove_selected(self):\n root = self.render_tree.invisibleRootItem()\n for item in self.render_tree.selectedItems():\n (item.parent() or root).removeChild(item)\n\n\ndef getDock(wrap, name, label):\n if cmds.workspaceControl(name, query=True, exists=True):\n cmds.deleteUI(name)\n ctrl = cmds.workspaceControl(name, r=True, rs=True, floating=True, label=label)\n # tabToControl=('ChannelBoxLayerEditor', 1)\n # dockToMainWindow=(\"right\", True)\n qtCtrl = omui.MQtUtil_findControl(ctrl)\n ptr = wrapInstance(int(qtCtrl), wrap)\n return ptr\n","repo_name":"mhdmhd/MayaSceneConverter","sub_path":"ConverterUI.py","file_name":"ConverterUI.py","file_ext":"py","file_size_in_byte":30557,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"81"} +{"seq_id":"1093199350","text":"# coding: UTF-8\n#! python3\n\n\"\"\"\nCriado 15 de novembro\n\nServidor FTP para Windows para se conectar ao RPi\n\"\"\"\n\nimport socket\nimport subprocess\nimport json\nimport os\nimport base64\nimport sys\nimport shutil\n\nclass FTPClient:\n def __init__(self):\n # Variaveis gerais\n self.tamanho_pacote = 8192\n self.total_recebido = 0\n # --- Funções de conexão e envio para o remoto ---\n def init_conn(self, ip, port):\n self.conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n self.conn.connect((ip, port))\n return \"[+] Conexão estabelecida!\"\n except ConnectionRefusedError:\n return \"[-] Conexão recusada pela máquina de destino (Refused)\"\n except TimeoutError:\n return \"[-] Endereço não encontrado (Timeout)\"\n except socket.gaierror:\n return \"[-] Endereço de IP inválido\"\n\n def enviar_comando(self, comando):\n comando = FTPClient.split_command(comando)\n self.enviar_pacote_json(comando)\n if 'exit' in comando:\n self.conn.close()\n return \"[-] Conexão encerrada\\n\"\n\n return self.receber_pacote_json()\n \n # --- Funções de tratamento de JSON ---\n def enviar_pacote_json(self, pack):\n pack_json = json.dumps(pack)\n self.conn.send(pack_json.encode()) \n\n def receber_pacote_json(self):\n pack_json = b''\n while True:\n try:\n pack_json += self.conn.recv(self.tamanho_pacote)\n return json.loads(pack_json) # Recebe bytes, retorna str (apenas em 3.6+)\n except ValueError:\n self.total_recebido = len(pack_json)/1000\n # O base64encode adiciona 33% no arquivo. Para normalizar, retirar 25% do tamanho total\n print(\"\\rRecebido: %d Kb\" % self.total_recebido, end=\"\")\n \n # --- Funções de arquivos locais ---\n def change_client_directory(self, path):\n if os.path.isdir(path):\n os.chdir(path)\n return \"[+] Diretorio mudado para %s\" % os.getcwd()\n else:\n return \"[-] Diretorio '%s' invalido\" % path\n \n def list_client_directory(self, path=\".\"):\n try:\n (_, dirs, arqs) = next(os.walk(path))\n except StopIteration:\n dirs = []\n arqs = []\n return dirs, arqs\n \n # --- Função de tratamento de comando ---\n @staticmethod\n def split_command(comando):\n try:\n comando = comando.split(' ')\n except AttributeError:\n # Já é uma lista\n pass\n \n return comando\n \n # --- Funções de escrita e leitura ---\n @staticmethod\n def read_file(path):\n if os.path.isfile(path):\n with open(path, 'rb') as arq:\n data = base64.b64encode(arq.read())\n return data.decode()\n return \"[-] Arquivo '%s' nao encontrado\" % path\n\n @staticmethod\n def save_file(data, path):\n with open(path, 'wb') as arq:\n arq.write(base64.b64decode(data))\n return \"[+] Download finalizado\"\n\nif __name__ == '__main__':\n import logging\n logging.basicConfig(level=logging.DEBUG, format = '%(asctime)s - %(levelname)s - %(message)s')\n logging.disable(logging.CRITICAL)\n \n","repo_name":"Matheus-Afonso/FTP_Server_Win_RPi","sub_path":"FTP_Client.py","file_name":"FTP_Client.py","file_ext":"py","file_size_in_byte":3316,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16249017037","text":"'''\nCreated on Jan 14, 2013\n\n@author: CarolinaFernandez\n'''\n\nfrom django.core.exceptions import ValidationError\nfrom django.core.validators import RegexValidator\nimport re\n\nRESOURCE_SR_RE = \"^([0-9a-zA-Z\\-]){1,64}$\"\nTEMPERATURE_SCALE_CHOICES = (\n ('C','Celsius'),\n ('F','Fahrenheit'),\n ('K','Kelvin'),\n )\n\ndef validate_temperature_scale(scale):\n \"\"\"\n Validates the chosen temperature scale against the set 'indices'.\n \"\"\"\n if scale not in [ t[0] for t in TEMPERATURE_SCALE_CHOICES ]:\n raise ValidationError(\"Invalid scale: please choose one from the list\")\n\nvalidate_resource_name = RegexValidator(re.compile(RESOURCE_SR_RE),\n u\"Please do not use accented characters, symbols, underscores or whitespaces.\",\n \"invalid\")\n\n","repo_name":"fp7-ofelia/ocf","sub_path":"expedient/doc/plugins/samples/plugin/sample_resource/utils/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"81"} +{"seq_id":"30088647393","text":"import json\nimport numpy as np\nfrom enum import Enum\nfrom pathlib import Path\nimport re\n\nimport sql_query\nimport request_lexical_resources\n\ndata_path = Path(\"data\")\nkeyw_path = Path(\"keywords_vectors\")\nembeddings_path = Path(\"embeddings\")\n\nkeywords_delimitor = \" |,|;|_|\\|\"\n\n\nclass EmbeddingsType(str, Enum):\n word2vec = \"word2vec\"\n wordnet = \"wordnet\"\n fasttext = \"fasttext\"\n bert = \"bert\"\n elmo = \"elmo\"\n\n\nclass SimilarityType(str, Enum):\n synonym = \"synonym\"\n hyponym = \"hyponym\"\n hypernym = \"hypernym\"\n holonym = \"holonym\"\n similar = \"similar\"\n\n\ndef get_senses_from_keyword(embeddings_type, keyword):\n\n \"\"\"\n Get the senses from keyword\n\n if model is wordnet: list of synset\n if model is not wordnet: list of size 1 containing only keyword\n\n output: list of senses\n \"\"\"\n\n return print(\"TODO\") if embeddings_type == EmbeddingsType.wordnet else [keyword]\n\n\ndef compute_feedback_score(original_keyword, proposed_keyword):\n\n \"\"\"\n Compute the feedback score\n Input: original_keyword: keyword at the origin of the proposition\n proposed_keyword: keyword proposed to the user\n Output: Feedback score, default value to 0.4 if no feedbacks available\n \"\"\"\n\n # This old version take into account when a user doesn't choose a keyword, I changed it so that a keyword not chosen doesn't get as much as a penality\n \"\"\"\n # get feedback for that particular search_id -> result_url sequence (TODO: check for similar search?)\n feedbacks = sql_query.get_feedback_for_reranking(user_search, result)\n if feedbacks != None and len(feedbacks) > 0:\n # Normalize mean of all feedbacks (-1->1 to 0->1)\n feedback_score = (np.mean(feedbacks) - (-1)) / (1 - (-1))\n else:\n # Default value if no feedbacks available\n feedback_score = 0\n return feedback_score\n \"\"\"\n\n # get feedback for that particular keyword1 -> keyword2 sequence (TODO: check for similar search?)\n feedbacks = sql_query.get_feedback_for_expansion(original_keyword, proposed_keyword)\n chosen = 0\n ignored = 0\n base_score = 0.4\n if feedbacks is not None and len(feedbacks) > 0:\n for feedback in feedbacks:\n if feedback == 1:\n chosen += 1\n if feedback == -1:\n ignored += 1\n\n # remove a point for every 10 users that didn't choose it\n chosen -= int(ignored / 10)\n\n feedback_score = base_score + (chosen / len(feedbacks))\n\n else:\n feedback_score = base_score\n\n # print(result.title, \":\", max(0, min(feedback_score, 1)))\n\n return max(0, min(feedback_score, 1))\n\n\ndef combine_similarity_and_feedback_score(feedback_score, similarity_score, alpha=0.5):\n\n \"\"\"\n Compute the combination of embedding similarity and feedback score\n\n Input: feedback_score: feedback score computed by compute_feedback_score, if no feedbacks, default to (1 - alpha)\n similarity_score: similarity between the two keywords\n alpha: higher alpha = higher feedback weight\n\n Output: score combination of similarity and feedback\n \"\"\"\n\n return (1 - alpha) * similarity_score + alpha * feedback_score\n\n\ndef use_feedback(original_keyword, keyword_sim_list, alpha=0.5):\n\n \"\"\"\n Apply feedback score to the list of similarity using the compute similarity feedback score method\n\n Input: original_keyword: keyword at the origin of the proposed keywords\n keyword_sim_list: list of tuple of type (keyword, similariy)\n alpha: higher alpha = higher feedback weight\n\n Output: list of tuple of type (keyword, similarity) with the newly computed scores\n \"\"\"\n\n new_list = []\n\n for keyword_sim in keyword_sim_list:\n\n feedback_score = compute_feedback_score(original_keyword, keyword_sim[0])\n\n new_sim = combine_similarity_and_feedback_score(\n feedback_score, keyword_sim[1], alpha\n )\n\n # print(keyword_sim[0], \":\", keyword_sim[1], \"->\", new_sim)\n\n new_list.append((keyword_sim[0], new_sim))\n\n return new_list\n\n\ndef split_user_entry(user_entry):\n \"\"\"\n Split the user entry into keywords\n\n Input: keywords as string\n Output: keywords as list\n\n Currently splitting them with space char\n \"\"\"\n\n return re.split(keywords_delimitor, user_entry)\n\n\ndef sort_array_of_tuple_with_second_value(array):\n \"\"\"\n Return an array of tuple sorted by second key values\n \"\"\"\n\n array.sort(key=lambda x: x[1], reverse=True)\n return array\n\n\ndef get_geoloc_parents(ref_name, tag_name):\n\n parent_list = request_lexical_resources.get_most_similar_referentiels(\n tag_name, ref_name, \"geoloc\"\n )\n\n parents = [parent[\"word\"] for parent in parent_list]\n\n return {\"name\": ref_name, \"type\": \"geoloc\", \"tags\": parents}\n\n\ndef get_cluster(\n keyword,\n referentiel,\n embeddings_type,\n embeddings_name,\n max_width,\n max_depth,\n only_vocabulary,\n current_depth,\n):\n\n \"\"\"\n Recursive function to build the data structure tree\n\n Input: keywords: a string\n embeddings_type: type of the embeddings\n embeddings_name: name of the embeddings\n width: maximum depth of keywords search\n depth: maximum width of keywords search\n current_depth: current depth\n\n Output: A cluster\n \"\"\"\n\n cluster = {}\n if type(keyword) == str:\n pass\n elif type(keyword) == dict:\n keyword = keyword[\"word\"]\n else:\n keyword = str(keyword)\n cluster[\"sense\"] = keyword\n cluster[\"similar_senses\"] = []\n\n if current_depth < max_depth:\n\n # to avoid looping on most similar words\n slider = 1 if current_depth > 0 else 0\n\n similar_words = request_lexical_resources.get_most_similar(\n keyword,\n referentiel.name,\n embeddings_type,\n embeddings_name,\n max_width,\n slider,\n only_vocabulary,\n )\n\n # Process for using feedback\n temp_sim = []\n for word_sim in similar_words[\"similar\"]:\n temp_sim.append((word_sim[\"word\"], word_sim[\"similarity\"]))\n temp_sim = use_feedback(keyword, temp_sim, 0.6)\n temp_sim = sort_array_of_tuple_with_second_value(temp_sim)\n similar_words[\"similar\"] = []\n for temp in temp_sim:\n similar_words[\"similar\"].append({\"word\": temp[0], \"similarity\": temp[1]})\n # Process for using feedback\n\n for word in similar_words[SimilarityType.synonym]:\n sub_cluster = {}\n sub_cluster[\"sense\"] = word\n cluster[\"similar_senses\"].append([sub_cluster, SimilarityType.synonym])\n\n for word in similar_words[SimilarityType.similar]:\n sub_cluster = get_cluster(\n word,\n referentiel,\n embeddings_type,\n embeddings_name,\n max_width,\n max_depth,\n only_vocabulary,\n current_depth + 1,\n )\n cluster[\"similar_senses\"].append([sub_cluster, SimilarityType.similar])\n\n if current_depth + 1 < max_depth:\n\n for sim_type in SimilarityType:\n if (\n sim_type != SimilarityType.synonym\n and sim_type != SimilarityType.similar\n ):\n for sense in similar_words[sim_type]:\n sub_cluster = get_cluster(\n sense,\n referentiel,\n embeddings_type,\n embeddings_name,\n max_width,\n max_depth,\n only_vocabulary,\n current_depth + 1,\n )\n cluster[\"similar_senses\"].append([sub_cluster, sim_type])\n\n if len(cluster[\"similar_senses\"]) == 0:\n cluster[\"similar_senses\"] = None\n\n return cluster\n\n\ndef build_tree(\n keyword,\n embeddings_type,\n embeddings_name,\n max_depth,\n max_width,\n only_vocabulary,\n referentiels,\n):\n\n \"\"\"\n Build the data structure tree for one particular sense list (originating from one keyword)\n\n Input: keywords: a string\n embeddings_type: type of the embeddings\n embeddings_name: name of the embeddings\n max_depth: maximum depth of keywords search\n max_width: maximum width of keywords search\n only_vocabulary: wether or not it should only return keywords part of the sources vocabulary\n referentiel: referentiel to use for expansion\n \n Output: Data tree for keyword\n \"\"\"\n\n search_result = {}\n search_result[\"original_keyword\"] = keyword\n\n tree = []\n\n senses = get_senses_from_keyword(embeddings_type, keyword)\n\n for sense in senses:\n\n referentiel_output = []\n if referentiels is not None:\n for referentiel in referentiels:\n if referentiel.type == \"tags\":\n\n results = request_lexical_resources.get_most_similar_referentiels(\n sense,\n referentiel.name,\n referentiel.type,\n embeddings_type,\n embeddings_name,\n referentiel.width,\n 0,\n )\n\n keyword_sim_list = []\n for result in results:\n keyword_sim_list.append((result[\"word\"], result[\"similarity\"]))\n keyword_sim_list = use_feedback(sense, keyword_sim_list, 0.6)\n keyword_sim_list = sort_array_of_tuple_with_second_value(\n keyword_sim_list\n )\n\n referentiel_output = {\n \"name\": referentiel.name,\n \"type\": referentiel.type,\n \"tags\": [x[0] for x in keyword_sim_list],\n }\n\n search_result[\"referentiel\"] = referentiel_output\n\n tree.append(\n get_cluster(\n sense,\n referentiel,\n embeddings_type,\n embeddings_name,\n max_width,\n max_depth,\n only_vocabulary,\n 0,\n )\n )\n\n search_result[\"tree\"] = tree\n\n return search_result\n\n\ndef expand_keywords(\n keywords,\n embeddings_type,\n embeddings_name,\n max_depth,\n max_width,\n only_vocabulary,\n referentiels,\n):\n \"\"\"\n Return the most similar keywords from the initial keywords\n\n Input: keywords: a string\n embeddings_type: type of the embeddings\n embeddings_name: name of the embeddings\n max_depth: maximum depth of keywords search\n max_width: maximum width of keywords search\n only_vocabulary: wether or not it should only return keywords part of the sources vocabulary\n referentiel: object of type main.referentiel\n \n Output: Data structure with most similar keywords found\n \"\"\"\n\n keywords_list = split_user_entry(keywords)\n\n data = []\n for keyword in keywords_list:\n if len(keyword) > 3:\n keyword = keyword.lower()\n data.append(\n build_tree(\n keyword,\n embeddings_type,\n embeddings_name,\n max_depth,\n max_width,\n only_vocabulary,\n referentiels,\n )\n )\n for referentiel in referentiels:\n if referentiel.type == \"geoloc\":\n data.append(\n {\n \"original_keyword\": referentiel.tag,\n \"referentiel\": get_geoloc_parents(\n referentiel.name, referentiel.tag\n ),\n }\n )\n return data\n","repo_name":"datactivist/fastapi-query-expansion","sub_path":"app/expansion.py","file_name":"expansion.py","file_ext":"py","file_size_in_byte":11967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40194310275","text":"\"\"\"\nproblem tier : Gold 3 (solved.ac)\n\"\"\"\n\nimport sys\nsys.stdin = open('../input.txt', 'r')\ninput = sys.stdin.readline\n\nN, M = map(int, input().split())\n\npoints = []\nfor i in range(N):\n s, e = map(int, input().split())\n if s > e:\n points.append([e, s])\n\npoints.sort(key=lambda x: x[0])\n\nlength = 0\ns, e = 0, 0\nfor p in points:\n if p[0] > e:\n length += e-s\n s, e = p[0], p[1]\n else:\n e = max(e, p[1])\n\nlength += e-s\n\nprint(M + length*2)\n","repo_name":"hyeongrokheo/baekjoon","sub_path":"solved/[2836] 수상 택시.py","file_name":"[2836] 수상 택시.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14303504298","text":"from tkinter import * # Import tkinter\n\ndef display():\n canvas.delete(\"line\")\n\n centerPoint = [(width/2), (height/2)]\n \n makeH(int(order.get()), 100, centerPoint[0], centerPoint[1])\n\ndef makeH(order, size, x, y):\n if (order == 0):\n return\n x0 = x - size/2\n x1 = x + size/2\n y0 = y - size/2\n y1 = y + size/2\n\n drawLine(x0, y, x1, y)\n drawLine(x0, y0, x0, y1)\n drawLine(x1, y0, x1, y1)\n\n makeH(order-1, size/2, x0, y0)\n makeH(order-1, size/2, x0, y1)\n makeH(order-1, size/2, x1, y0)\n makeH(order-1, size/2, x1, y1)\n \ndef drawLine(p1x, p1y, p2x, p2y):\n canvas.create_line(p1x, p1y, p2x, p2y, tags = \"line\")\n return\n \n# Return the midpoint between two points\ndef midpoint(p1x, p1y, p2x, p2y):\n p = 2 * [0]\n p[0] = (p1[0] + p2[0]) / 2\n p[1] = (p1[1] + p2[1]) / 2\n return p \n\nwindow = Tk() # Create a window\nwindow.title(\"H Tree\") # Set a title\n\nwidth = 400\nheight = 400\ncanvas = Canvas(window, width = width, height = height)\ncanvas.pack()\n\n# Add a label, an entry, and a button to frame1\nframe1 = Frame(window) # Create and add a frame to window\nframe1.pack()\n\nLabel(frame1, text = \"Enter an order: \").pack(side = LEFT)\norder = StringVar()\nentry = Entry(frame1, textvariable = order, justify = RIGHT).pack(side = LEFT)\nButton(frame1, text = \"Display H Tree\", command = display).pack(side = LEFT)\n\nwindow.mainloop() # Create an event loop\n \n","repo_name":"TheRealChrisM/CVGS-Computer-Science","sub_path":"Python/recursiveLab/hTreeFixed.py","file_name":"hTreeFixed.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41230272312","text":"\nimport numpy as np\nimport pandas as pd\n\npd.set_option(\"display.unicode.ambiguous_as_wide\", True)\npd.set_option(\"display.unicode.east_asian_width\", True)\npd.set_option(\"display.width\", None)\n\ndef main():\n df = pd.read_csv('new_data.csv')\n # df[df['景点排名'].str.contains('%') == True] = np.nan\n # df[df['评分'].str.contains('--')==True]=np.nan\n # df.dropna(axis=0,subset=[\"景点排名\"],inplace=True)\n # df.dropna(axis=0, subset=['地址'], inplace=True)\n\n df.sort_values(by=['攻略数量', '评论数量'], ascending=[False, False], inplace=True)\n # 索引重新排列\n data = df.head(5).reset_index(drop=True)\n print(data)\nif __name__ == '__main__':\n main()","repo_name":"2245467668/get_data","sub_path":"sort/map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"577738777","text":"import cv2\nimport numpy as np\nimport math\nimport pydicom\nfrom medphunc.misc import ssde\nfrom medphunc.image_io import ct\n\nimport logging\nlogger = logging.getLogger(__name__)\n\n\n\n#%% Class refactor\nclass WED:\n \"\"\"\n Calculate WED from either a single image, or a CT image stack.\n \n The method from_folder() should be used for loading a folder of CT images\n \n \n \"\"\"\n wed = None\n ssde = None\n wed_results = {}\n im = None\n scale = None\n threshold = None\n window = None\n region = None\n method = 'centre'\n verbose=False\n \n def __init__(self, im, scale, threshold=-300,window=False, region='body',\n verbose=False, method='center'):\n self.im = im\n self.scale=scale\n self.threshold=threshold\n self.window=window\n self.region=region\n self.verbose=verbose\n self.method=method\n self.calculate_wed()\n self.calculate_ssde()\n \n\n \n @classmethod\n def from_image(cls, im, scale, threshold=-300, window=False, region='body'):\n return cls(im, scale, threshold, window, region)\n \n \n @classmethod\n def from_folder(cls, folder, threshold=-300, region=None):\n vol, dcm, end_points = ct.load_ct_folder(folder)\n return cls.from_volume(vol, dcm, threshold, region)\n \n \n @classmethod\n def from_volume(cls, volume, dcm, threshold=-300, region=None, verbose=False, method='centre'):\n\n if region is None:\n try:\n if 'Body' in dcm.CTDIPhantomTypeCodeSequence[0].CodeMeaning:\n region = 'body'\n elif 'Head' in dcm.CTDIPhantomTypeCodeSequence[0].CodeMeaning:\n region='head'\n except AttributeError:\n if dcm.get('BodyPartExamined') == 'HEAD':\n region='head'\n else:\n region='body'\n logger.warning('region not found in dicom and not provided'+ \n ' manually - assumed body phantom')\n elif region not in ['body', 'head']:\n raise(ValueError(f'region must be one of [body,head], {region} was passed'))\n \n \n c = cls(volume, np.product(dcm.PixelSpacing), threshold=threshold, region=region, window=None, verbose=verbose, method=method)\n\n return c\n \n \n @classmethod\n def from_dicom_objects(cls, dcm_objects, threshold=-300, region=None):\n im, dcm, end_points = ct.load_ct_dicoms(dcm_objects)\n return cls.from_volume(im, dcm, threshold, region)\n \n \n def calculate_wed(self):\n if len(self.im.shape) == 3:\n self.wed_results = wed_from_volume(self.im, self.scale, self.threshold, self.window, self.verbose, self.method)\n if self.method=='full':\n self.wed_results['water_equiv_circle_diam'] = self.wed_results['mean_wed']\n else:\n self.wed_results = wed_from_image(self.im, self.scale, self.threshold, self.window)\n self.wed = self.wed_results['water_equiv_circle_diam']/10\n \n \n def calculate_ssde(self):\n self.ssde = ssde.ssde_from_wed(self.wed_results['water_equiv_circle_diam']/10, self.region)\n \n def __repr__(self):\n return f'Water equivalent diameter calculations \\nWED: {self.wed} cm\\nSSDE: {self.ssde.iloc[0]}'\n \n \ndef wed_from_volume(vol, scale, threshold=-300, window=False, verbose=False, method='centre'):\n if method=='centre':\n im = vol[vol.shape[0]//2,]\n output = wed_from_image(im, scale, threshold, window, verbose)\n elif method=='full':\n wed_results = []\n for i in range(vol.shape[0]):\n wed_results.append(wed_from_image(vol[i,], scale, threshold, window, verbose))\n weds = np.array([o['water_equiv_circle_diam'] for o in wed_results])\n output = {'median_wed': np.median(weds),'max_wed': np.max(weds), 'min_wed':np.min(weds), 'mean_wed':np.mean(weds)}\n if verbose:\n output['wed_slice_results'] = wed_results\n else:\n raise(ValueError('method argument not one of \"centre\",\"full\" (provided %s)' % method))\n return output\n \n \ndef wed_from_image(im, scale, threshold = -300, window = False, verbose=False):\n '''\n Calculate the water equivalent diameter from a CT image.\n \n Written for axial slices.\n\n Parameters\n ----------\n im : numpy array\n 2d axial CT slice, anatomy must be surrounded by air.\n threshold : int\n threshold value to separate air from human in image.\n scale : float\n Pixel size in image, in mm^2/pixel.\n window : tuple\n Trigger return of debugging image, set (WW, WL) in debugging image\n\n Returns\n -------\n output : dictionary\n dictionary containing wed information. Of particular note, \n water_equiv_circle_diam contains the main output,\n image_overlay contains a debugging image that can be used to validate\n segmentation/contouring\n\n '''\n # map ww/wl for contour detection (filter_img)\n thresh = ((im > threshold)*255).astype(np.uint8)\n\n contours, hierarchy = cv2.findContours(thresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)\n\n # calculate area and equivalent circle diameter for the largest contour (assumed to be the patient without table or clothing)\n # Assume scale is in mm2\n contour = max(contours, key=lambda a: cv2.contourArea(a))\n area = cv2.contourArea(contour) * scale\n equiv_circle_diam = 2.0*math.sqrt(area/math.pi)\n\n hull = cv2.convexHull(contour)\n hullarea = cv2.contourArea(hull) * scale\n hullequiv = 2.0*(hullarea/math.pi)**0.5\n\n # create mask of largest contour\n mask_img = np.zeros((im.shape), np.uint8)\n cv2.drawContours(mask_img,[contour],0,255,-1)\n\n # calculate mean HU of mask area\n roi_mean_hu = cv2.mean(im, mask=mask_img)[0]\n\n \n\n\n # calculate water equivalent area (Aw) and water equivalent circle diameter (Dw)\n # \n water_equiv_area = 0.001 * roi_mean_hu * area + area\n water_equiv_circle_diam = 2.0 * math.sqrt(water_equiv_area/math.pi)\n\n if window:\n # map ww/wl to human-viewable image (view_img)\n remap = lambda t: 255.0 * (1.0 * t - (window[1] - 0.5 * window[0])) / window[0] # create LUT function; window[0]: ww, window[1]: wl\n view_img = np.array([remap(row) for row in im]) # rescale\n view_img = np.clip(view_img, 0, 255) # limit to 8 bit\n view_img = view_img.astype(np.uint8) # set color depth\n view_img = cv2.cvtColor(view_img, cv2.COLOR_GRAY2RGB) # add RBG channels\n\n # create overlay to draw on human-viewable image (to be added as transparent layer)\n overlay_img = np.copy(view_img)\n\n # draw contour 3px wide on overlay layer, merge layers with transparency\n cv2.drawContours(overlay_img, [hull], -1, (0,255,255), 2, cv2.LINE_AA)\n cv2.drawContours(overlay_img, [contour], -1, (0,255,0), 2, cv2.LINE_AA)\n cv2.addWeighted(overlay_img, 0.40, view_img, 1 - 0.40, 0, view_img)\n \n # add text\n cv2.putText(view_img, \"patient:\", (10,20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,0), 2, cv2.LINE_AA)\n cv2.putText(view_img, \"patient:\", (10,20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1, cv2.LINE_AA)\n cv2.putText(view_img, \"{:.0f} mm^2, circle d = {:.0f} mm\".format(area,equiv_circle_diam), (100,20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,0), 2, cv2.LINE_AA)\n cv2.putText(view_img, \"{:.0f} mm^2, circle d = {:.0f} mm\".format(area,equiv_circle_diam), (100,20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1, cv2.LINE_AA)\n\n cv2.putText(view_img, \"water eq.:\", (10,36), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,0), 2, cv2.LINE_AA)\n cv2.putText(view_img, \"water eq.:\", (10,36), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1, cv2.LINE_AA)\n cv2.putText(view_img, \"{:.0f} mm^2, circle d = {:.0f} mm\".format(water_equiv_area, water_equiv_circle_diam), (100,36), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,0), 2, cv2.LINE_AA)\n cv2.putText(view_img, \"{:.0f} mm^2, circle d = {:.0f} mm\".format(water_equiv_area, water_equiv_circle_diam), (100,36), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1, cv2.LINE_AA)\n\n cv2.putText(view_img, \"hull:\", (10,60), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,0), 2, cv2.LINE_AA)\n cv2.putText(view_img, \"hull:\", (10,60), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,200,200), 1, cv2.LINE_AA)\n cv2.putText(view_img, \"{:.0f} mm^2, circle d = {:.0f} mm\".format(hullarea, hullequiv), (100,60), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,0), 2, cv2.LINE_AA)\n cv2.putText(view_img, \"{:.0f} mm^2, circle d = {:.0f} mm\".format(hullarea, hullequiv), (100,60), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,200,200), 1, cv2.LINE_AA)\n else:\n view_img = False\n \n output = {'area':area,\n 'equiv_circle_diam':equiv_circle_diam,\n 'water_equiv_area':water_equiv_area,\n 'water_equiv_circle_diam':water_equiv_circle_diam,\n 'hull_area':hullarea,\n 'hull_equiv':hullequiv\n }\n if verbose:\n output['image_overlay']:view_img\n return output\n\ndef wed_from_dicom_file(dicom_filename, threshold = -300, window = False):\n d = pydicom.read_file(dicom_filename)\n return wed_from_dicom(d, threshold, window)\n\n\ndef wed_from_dicom(dicom_pydicom, threshold = -300, window = False):\n '''\n Calculate the water equivalent diameter from a CT image.\n \n Written for axial slices.\n\n Parameters\n ----------\n dicom_pydicom : pydicom dataset\n Must containt 2d axial CT slice, anatomy must be surrounded by air.\n threshold : int\n threshold value to separate air from human in image.\n scale : float\n Pixel size in image, in mm^2/pixel.\n\n Returns\n -------\n output : dictionary\n dictionary containing wed information. Of particular note, \n water_equiv_circle_diam contains the main output,\n image_overlay contains a debugging image that can be used to validate\n segmentation/contouring\n\n '''\n\n im = dicom_pydicom.pixel_array # dicom pixel values as 2D numpy pixel array\n im = im - 1000.0 # remap scale 0:... to HU -1000:...\n\n # determine pixel area in mm²/px²\n scale = dicom_pydicom.PixelSpacing[0] * dicom_pydicom.PixelSpacing[1]\n \n return wed_from_image(im, scale, threshold, window)\n\ndef get_wed(dicom):\n '''input agnostic wrapping function that returns only WED'''\n if type(dicom) != pydicom.dataset.FileDataset:\n d = pydicom.read_file(dicom)\n \n output = wed_from_dicom(d)\n wed = output['water_equiv_circle_diam']\n return wed\n\n#%%\n\nif __name__ == \"__main__\":\n\n import sys\n try:\n filename = sys.argv[1]\n threshold = int(sys.argv[2])\n except:\n raise AttributeError('\\n\\nUsage:\\n$ DICOMwaterequivalent.py filename threshold\\nRead source code for details.')\n\n\n result = wed_from_dicom_file(filename, threshold, (1600,-400))\n\n # cv2.imwrite('out.png', result[6]) # to write numpy image as file\n print(result[0:6], flush=True) # results[0:6] = (Aw, Dw, Ap, Dp, Aph, Dph)\n cv2.imshow('DICOMwaterequivalent', result[6]) # results[6] = numpy image, press any key in graphical window to close\n cv2.waitKey(0)\n cv2.destroyAllWindows()","repo_name":"ckswilliams/medphunc","sub_path":"medphunc/image_analysis/water_equivalent_diameter.py","file_name":"water_equivalent_diameter.py","file_ext":"py","file_size_in_byte":11318,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"22399648238","text":"from time import sleep\nfrom ib.ext.Contract import Contract\nfrom ib.opt import ibConnection, message\n\ndef my_account_handler(msg):\n print(msg)\n\n\ndef my_tick_handler(msg):\n print(msg)\n\n\nif __name__ == '__main__':\n con = ibConnection(port=7496, clientId=996)\n con.register(my_account_handler, 'UpdateAccountValue')\n con.connect()\n\n def inner():\n\n con.reqAccountUpdates(1, '')\n\n inner()\n sleep(5)\n print('disconnected', con.disconnect())\n inner()\n sleep(3)\n","repo_name":"lightme16/ibpy_work","sub_path":"IBAccountInfo/print_account_info.py","file_name":"print_account_info.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"43771935037","text":"'''\n1099 : [기초-2차원배열] 성실한 개미\n개미는 오른쪽으로 움직이다가 벽을 만나면 아래쪽으로 움직여 가장 빠른 길로 움직였다.\n(오른쪽에 길이 나타나면 다시 오른쪽으로 움직인다.)\n\n미로 상자에 넣은 개미는 먹이를 찾았거나, 더 이상 움직일 수 없을 때까지\n오른쪽 또는 아래쪽으로만 움직였다.\n\n미로 상자의 구조가 0(갈 수 있는 곳), 1(벽 또는 장애물)로 주어지고,\n먹이가 2로 주어질 때, 성실한 개미의 이동 경로를 예상해보자.\n\n단, 맨 아래의 가장 오른쪽에 도착한 경우, 더 이상 움직일 수 없는 경우, 먹이를 찾은 경우에는\n더이상 이동하지 않고 그 곳에 머무른다고 가정한다.\n\n미로 상자의 테두리는 모두 벽으로 되어 있으며,\n개미집은 반드시 (2, 2)에 존재하기 때문에 개미는 (2, 2)에서 출발한다.\n'''\nMAX_VALUE = 10\nmaze = [[0 for i in range(MAX_VALUE)] for i in range(MAX_VALUE)]\nfor i in range(MAX_VALUE):\n temp = list(map(int, input().split()))\n for j in range(MAX_VALUE):\n maze[i][j] = temp[j]\nx = 1 # 아래로 이동\ny = 1 # 오른쪽 이동\nstop = False\n\nwhile(stop!=True):\n if(maze[x][y]==2):\n maze[x][y] = 9\n stop = True\n elif(maze[x][y+1]!=1):\n maze[x][y] = 9\n if(maze[x][y+1]==2):\n y = y+1\n maze[x][y] = 9\n stop = True\n else:\n y = y+1 # 계속 이동\n maze[x][y] = 9\n else: # 아래로 이동\n maze[x][y] = 9\n if(maze[x+1][y]==1):\n stop = True\n elif(maze[x+1][y]==2):\n x = x+1\n maze[x][y] = 9\n stop = True\n else:\n x = x+1\n maze[x][y] = 9\n\nfor i in range(MAX_VALUE):\n for j in range(MAX_VALUE):\n print(maze[i][j], end=\" \")\n print()\n","repo_name":"minhyeonlee/algorithm-python","sub_path":"codeUp/codeUpBasic/1099.py","file_name":"1099.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42935880264","text":"import random\r\n\r\nuser_score = 0\r\ncomputer_score = 0\r\nsigns = [\"rock\" , \"paper\" , \"scissors\"]\r\n\r\n# Default function for every pattern in the game\r\ndef patterns (sign1,sign2,message,score):\r\n if user_input == sign1 and computer_choice == sign2:\r\n print(message)\r\n score += 1\r\n return score\r\n\r\nwhile True:\r\n user_input = input(\"Type Rock/ Paper/ Scissors or 'Q' to quit : \").lower()\r\n\r\n print()\r\n\r\n if user_input == \"q\":\r\n break\r\n \r\n if user_input not in signs:\r\n print(\"Invalid input\")\r\n continue\r\n\r\n random_num = random.randint(0,2)\r\n # rock = 0 , paper = 1 , scissor = 2\r\n\r\n computer_choice = signs[random_num]\r\n print(\"Computer picked \" + computer_choice)\r\n \r\n # User winning patterns\r\n user_score = patterns (\"rock\",\"scissors\",\"You Won !\",user_score)\r\n user_score = patterns (\"paper\",\"rock\",\"You Won !\",user_score)\r\n user_score = patterns (\"scissors\",\"paper\",\"You Won !\",user_score)\r\n\r\n # Computer winning patterns\r\n computer_score = patterns (\"rock\",\"paper\",\"You lost !\",computer_score)\r\n computer_score = patterns (\"scissors\",\"rock\",\"You lost !\",computer_score)\r\n computer_score = patterns (\"paper\",\"scissors\",\"You lost !\",computer_score)\r\n \r\n # Both equal patterns\r\n if user_input == computer_choice:\r\n print(\"It is a tie\")\r\n\r\n print(\"------------------------------------\")\r\n print()\r\n\r\nprint(\"You won\" ,user_score, \"times.\")\r\nprint(\"Computer won\" ,computer_score, \"times.\")\r\nprint()\r\nprint(\"Thank you for playing\")","repo_name":"MiniduNimna/Simple-python-game","sub_path":"RPS code.py","file_name":"RPS code.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41309726847","text":"\"\"\"The KAS class builds the Flask app from openapi.yaml using Connexion.\"\"\"\n\nimport os\nimport connexion\nimport importlib_resources\nimport logging\nimport urllib.parse\n\nfrom . import services\n\nfrom .models import HealthzPluginRunner\nfrom .models import RewrapPluginRunner\nfrom .models import RewrapPluginRunnerV2\nfrom .models import UpsertPluginRunner\nfrom .models import UpsertPluginRunnerV2\nfrom .models import KeyMaster\n\nfrom .errors import PluginIsBadError\nfrom .errors import ServerStartupError\nfrom .errors import MiddlewareIsBadError\n\nfrom .abstractions import (\n AbstractHealthzPlugin,\n AbstractRewrapPlugin,\n AbstractUpsertPlugin,\n)\n\nfrom .util.utility import value_to_boolean\nfrom .util.reverse_proxy import ReverseProxied\nfrom .util.swagger_ui_bundle import swagger_ui_4_path\nfrom .util.hooks import hook_into, post_rewrap_v2_hook_default\n\nlogger = logging.getLogger(__name__)\n\n\ndef clean_trusted_url(u):\n r = urllib.parse.urlparse(u, scheme=\"https\")\n if r.fragment:\n logger.warning(\n \"Fragments in trusted entitlement URIs are ignored: [%s] won't be required\",\n r.fragment,\n )\n if r.query:\n logger.warning(\n \"Be careful. We use prefix matching for trusted entitlers, so query params are unusual [%s]\",\n u,\n )\n if r.path:\n return f\"{r.scheme}://{r.netloc}{r.path}?{r.query}\"\n return f\"{r.scheme}://{r.netloc}/?{r.query}\"\n if not r.path:\n return f\"{r.scheme}://{r.netloc}/\"\n return f\"{r.scheme}://{r.netloc}{r.path}\"\n\n\ndef create_session_ping(version):\n \"\"\"Create a session ping callable.\"\"\"\n\n def session_ping(request=None):\n return services.ping(version)\n\n return session_ping\n\n\ndef create_session_healthz(plugins):\n \"\"\"Wrapper for healthz plugins\"\"\"\n plugin_runner = HealthzPluginRunner(plugins)\n\n def session_healthz(*, probe=\"liveness\"):\n plugin_runner.healthz(probe=probe)\n\n return session_healthz\n\n\ndef create_session_rewrap(key_master, plugins):\n \"\"\"Create a simpler callable that accepts one argument, the data.\n\n The other components that the rewrap service needs are captured in the\n closure of this factory function. This pre-loaded dependency injection\n makes the service call cleaner and clearer.\n \"\"\"\n plugin_runner = RewrapPluginRunner(plugins)\n\n def session_rewrap(data, options):\n return services.rewrap(data, options, plugin_runner, key_master)\n\n return session_rewrap\n\n\ndef create_session_rewrap_v2(key_master, plugins, trusted_entitlers):\n \"\"\"Create a simpler callable that accepts one argument, the data.\n\n The other components that the rewrap service needs are captured in the\n closure of this factory function. This pre-loaded dependency injection\n makes the service call cleaner and clearer.\n \"\"\"\n plugin_runner = RewrapPluginRunnerV2(plugins)\n\n def session_rewrap(data, options):\n return hook_into(\n post=Kas.get_instance()._post_rewrap_hook,\n err=Kas.get_instance()._err_rewrap_hook,\n )(services.rewrap_v2)(\n data, options, plugin_runner, key_master, trusted_entitlers\n )\n\n return session_rewrap\n\n\ndef create_session_upsert(key_master, plugins):\n \"\"\"Create a simpler callable that accepts one argument, the data.\n\n The other components that the upsert service needs are captured in the\n closure of this factory function. This pre-loaded dependency injection\n makes the service call cleaner and clearer.\n \"\"\"\n plugin_runner = UpsertPluginRunner(plugins)\n\n def session_upsert(data, options):\n return services.upsert(data, options, plugin_runner, key_master)\n\n return session_upsert\n\n\ndef create_session_upsert_v2(key_master, plugins, trusted_entitlers):\n \"\"\"Create a simpler callable that accepts one argument, the data.\n\n The other components that the upsert service needs are captured in the\n closure of this factory function. This pre-loaded dependency injection\n makes the service call cleaner and clearer.\n\n (narrator voice: \"Translation: 'I made this one aspect simpler at the cost of making LITERALLY EVERYTHING ELSE MORE COMPLICATED'\")\n \"\"\"\n plugin_runner = UpsertPluginRunnerV2(plugins)\n\n def session_upsert(data, options):\n return services.upsert_v2(\n data, options, plugin_runner, key_master, trusted_entitlers\n )\n\n return session_upsert\n\n\ndef create_session_public_key(key_master):\n \"\"\"Create a session callable for getting the public key.\n\n The keymaster is carried in the closure of this function.\n \"\"\"\n\n def session_kas_public_key(algorithm, fmt, v):\n return services.kas_public_key(key_master, algorithm, fmt, v)\n\n return session_kas_public_key\n\n\nclass Kas(object):\n \"\"\"The KAS object is a singleton that contains the business logic callables for the Kas server.\n\n When the app method is called, Kas will dynamically construct the callables that handle requests.\n For upsert and rewrap, plugins can be loaded to add functionality.\n It will then create the Flask App using Connexion to bind API calls, through web package functions,\n to these callables.\n \"\"\"\n\n __instance = None\n\n @staticmethod\n def get_instance():\n if Kas.__instance == None:\n return Kas()\n return Kas.__instance\n\n def __init__(self):\n \"\"\"Construct an empty KAS object with root name.\"\"\"\n if Kas.__instance != None:\n raise ServerStartupError(\"Kas App was already created.\")\n self._root_name = \"kas\"\n self._version = \"0.0.0\"\n self._healthz_plugins = []\n self._rewrap_plugins = []\n self._rewrap_plugins_v2 = []\n self._trusted_entitlers = []\n self._upsert_plugins = []\n self._upsert_plugins_v2 = []\n self._post_rewrap_hook = post_rewrap_v2_hook_default\n self._err_rewrap_hook = lambda *args: None\n self._middleware = None\n self._key_master = KeyMaster()\n\n # These callables and the flask app will be constructed by the app() method after configuration\n self._session_ping = None\n self._session_rewrap = None\n self._session_rewrap_v2 = None\n self._session_upsert = None\n self._session_upsert_v2 = None\n self._session_kas_public_key = None\n self._app = None\n\n Kas.__instance = self\n\n def set_root_name(self, name):\n self._root_name = name\n\n def set_trusted_entitlers(self, trusted_entitlers):\n self._trusted_entitlers = [clean_trusted_url(e) for e in trusted_entitlers]\n\n def set_version(self, version=None):\n \"\"\"Set version for the heartbeat message.\"\"\"\n if version is not None:\n version = version.strip() # trim the string\n version = version.rstrip(\"/n\") # remove linefeed\n logger.debug(\"Setting version to %s\", version)\n self._version = version\n\n def set_key_pem(self, key_name, key_type, pem_key):\n \"\"\"Set a key directly with a PEM encoded string.\"\"\"\n self._key_master.set_key_pem(key_name, key_type, pem_key)\n\n def set_key_path(self, key_name, key_type, key_path):\n \"\"\"Set a key by providing a path to a file containing a PEM string.\"\"\"\n self._key_master.set_key_path(key_name, key_type, key_path)\n\n def use_upsert_plugin(self, plugin):\n \"\"\"Add an upsert plugin.\n\n This method adds policy side-effect plugins to the KAS. The order\n that this method is called in is important. Plugins get the Policy\n returned by the prior plugin. They are called in order.\n \"\"\"\n if isinstance(plugin, AbstractUpsertPlugin):\n self._upsert_plugins.append(plugin)\n else:\n raise PluginIsBadError(\"plugin is not a decendent of AbstractUpsertPlugin\")\n\n def use_upsert_plugin_v2(self, plugin):\n \"\"\"Add an upsert plugin.\n\n This method adds policy side-effect plugins to the KAS. The order\n that this method is called in is important. Plugins get the Policy\n returned by the prior plugin. They are called in order.\n \"\"\"\n if isinstance(plugin, AbstractUpsertPlugin):\n self._upsert_plugins_v2.append(plugin)\n else:\n raise PluginIsBadError(\"plugin is not a decendent of AbstractUpsertPlugin\")\n\n def use_rewrap_plugin(self, plugin):\n \"\"\"Add a rewrap plugin.\n\n This method adds policy side-effect plugins to the KAS. The order\n that this method is called in is important. Plugins get the Policy\n returned by the prior plugin. They are called in order.\n \"\"\"\n if isinstance(plugin, AbstractRewrapPlugin):\n self._rewrap_plugins.append(plugin)\n else:\n raise PluginIsBadError(\"plugin is not a decendent of AbstractRewrapPlugin\")\n\n def use_rewrap_plugin_v2(self, plugin):\n \"\"\"Add a rewrap plugin.\n\n This method adds policy side-effect plugins to the KAS. The order\n that this method is called in is important. Plugins get the Policy\n returned by the prior plugin. They are called in order.\n \"\"\"\n if isinstance(plugin, AbstractRewrapPlugin):\n self._rewrap_plugins_v2.append(plugin)\n else:\n raise PluginIsBadError(\"plugin is not a decendent of AbstractRewrapPlugin\")\n\n def use_healthz_plugin(self, plugin):\n \"\"\"Add a healthz plugin.\"\"\"\n if isinstance(plugin, AbstractHealthzPlugin):\n self._healthz_plugins.append(plugin)\n else:\n raise PluginIsBadError(\"plugin is not a decendent of AbstractHealthzPlugin\")\n\n def use_post_rewrap_hook(self, hook):\n \"\"\"Add a hook called after rewrap completes\"\"\"\n if not callable(hook):\n raise MiddlewareIsBadError(\"Provided error hook is not callable\")\n self._post_rewrap_hook = hook\n\n def use_err_rewrap_hook(self, hook):\n \"\"\"Add a hook called when rewrap returns an error\"\"\"\n if not callable(hook):\n raise MiddlewareIsBadError(\"Provided error hook is not callable\")\n self._err_rewrap_hook = hook\n\n def add_middleware(self, middleware):\n \"\"\"add middleware called with upsert and rewrap\"\"\"\n if not (callable(middleware) or None):\n raise MiddlewareIsBadError(\"Provided middleware is not callable\")\n self._middleware = middleware\n\n def get_middleware(self):\n \"\"\"return the callable middleare\"\"\"\n if self._middleware is not None:\n return self._middleware\n return lambda *args: None\n\n def get_session_healthz(self):\n \"\"\"return the callable to process healthz requests.\"\"\"\n return self._session_healthz\n\n def get_session_ping(self):\n \"\"\"return the callable to process ping requests.\"\"\"\n return self._session_ping\n\n def get_session_rewrap(self):\n \"\"\"return the callable to process rewrap requests.\"\"\"\n return self._session_rewrap\n\n def get_session_rewrap_v2(self):\n \"\"\"return the callable to process rewrap requests.\"\"\"\n return self._session_rewrap_v2\n\n def get_session_upsert(self):\n \"\"\"return the callable to process upsert requests.\"\"\"\n return self._session_upsert\n\n def get_session_upsert_v2(self):\n \"\"\"return the callable to process upsert requests.\"\"\"\n return self._session_upsert_v2\n\n def get_session_public_key(self):\n \"\"\"return the callable to process public key requests.\"\"\"\n return self._session_kas_public_key\n\n def app(self):\n \"\"\"Produce a wsgi-callable app.\n\n Build the callables that will be used to process requests.\n Build the Flask app from OpenAPI using Connexion\n The web package is used to connect REST requests to these callables via this Kas object\n \"\"\"\n\n if self._app != None:\n raise ServerStartupError(\"App was already constructed\")\n\n self._session_healthz = create_session_healthz(self._healthz_plugins)\n\n self._session_ping = create_session_ping(self._version)\n\n self._session_rewrap = create_session_rewrap(\n self._key_master, self._rewrap_plugins\n )\n\n self._session_rewrap_v2 = create_session_rewrap_v2(\n self._key_master, self._rewrap_plugins_v2, self._trusted_entitlers\n )\n\n self._session_upsert = create_session_upsert(\n self._key_master, self._upsert_plugins\n )\n\n self._session_upsert_v2 = create_session_upsert_v2(\n self._key_master, self._upsert_plugins_v2, self._trusted_entitlers\n )\n\n self._session_kas_public_key = create_session_public_key(self._key_master)\n\n flask_options = {\"swagger_url\": \"/docs\"}\n app = connexion.FlaskApp(\n self._root_name, specification_dir=\"api/\", options=flask_options\n )\n\n # Allow swagger_ui to be disabled\n options = {\"swagger_ui\": False}\n if swagger_enabled():\n # Turn off Swagger UI feature\n logger.warning(\"Enable Swagger UI\")\n flask_app = app.app\n\n proxied = ReverseProxied(flask_app.wsgi_app, script_name=\"/api/kas/\")\n flask_app.wsgi_app = proxied\n options.update({\"swagger_ui\": True, \"swagger_path\": swagger_ui_4_path})\n else:\n logger.debug(\"Disable Swagger UI\")\n\n # Connexion will link REST endpoints to handlers using the openapi.yaml file\n openapi_file = importlib_resources.files(__package__) / \"api\" / \"openapi.yaml\"\n app.add_api(openapi_file, options=options, strict_validation=True)\n\n logger.debug(\"KAS app starting.\")\n self._app = app.app\n return self._app\n\n\ndef swagger_enabled():\n \"\"\"Default false, but if SWAGGER_UI env variable is true or 1 then enable\"\"\"\n return value_to_boolean(os.getenv(\"SWAGGER_UI\", False))\n","repo_name":"opentdf/backend","sub_path":"containers/kas/kas_core/tdf3_kas_core/kas.py","file_name":"kas.py","file_ext":"py","file_size_in_byte":13888,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"81"} +{"seq_id":"3898508804","text":"import os\nfrom aiogram.types import InlineKeyboardMarkup, InlineKeyboardButton\nfrom aiogram import types\nfrom aiogram import Bot, Dispatcher\nfrom commands import Contact\nfrom json_db import AddUser, AddUserStates, DeleteUserStates, DeleteUser, show_data_json\nfrom sql_db import show_data\n\nbot = Bot(os.environ['TOKEN'])\ndp = Dispatcher(bot)\n\nadd_user = AddUser()\ndelete_user = DeleteUser()\n\ndp.register_message_handler(add_user.add_user_start)\ndp.register_message_handler(add_user.add_user_first_name, state=AddUserStates.waiting_for_first_name)\ndp.register_message_handler(add_user.add_user_second_name, state=AddUserStates.waiting_for_second_name)\ndp.register_message_handler(add_user.add_user_birthday, state=AddUserStates.waiting_for_birthday)\n\ndp.register_message_handler(delete_user.delete_user)\ndp.register_message_handler(delete_user.delete_users, state=DeleteUserStates.waiting_for_second_name)\n\nbuilder = InlineKeyboardMarkup(row_width=2).add(\nInlineKeyboardButton(text=\"Администрация\", callback_data=\"button1\"),\nInlineKeyboardButton(text=\"Прикассовая зона\", callback_data=\"button2\"),\nInlineKeyboardButton(text=\"КБТ\", callback_data=\"button3\"),\nInlineKeyboardButton(text=\"МБТ\", callback_data=\"button4\"),\nInlineKeyboardButton(text=\"ТВ\", callback_data=\"button5\"),\nInlineKeyboardButton(text=\"ИТ/Моб.Мир\", callback_data=\"button6\"))\n\nadmin_k = InlineKeyboardMarkup(row_width=2).add(\nInlineKeyboardButton('Данные DB', callback_data='show_data'),\nInlineKeyboardButton('Данные Json', callback_data='show_data_json'),\nInlineKeyboardButton('Добавить пользователя', callback_data='add_user'),\nInlineKeyboardButton('Удалить пользователя', callback_data='delete_user'))\n\n\nbuilder2 = InlineKeyboardMarkup(row_width=2).add(\nInlineKeyboardButton(text=\"Назад\", callback_data=\"back\"))\n\n\nasync def process_callback_button1(callback_query: types.CallbackQuery):\n await bot.answer_callback_query(callback_query.id)\n if callback_query.data == 'button1':\n await bot.send_message(callback_query.from_user.id, text=f'Администрация:{Contact.admin}', parse_mode=\"HTML\", reply_markup=builder2)\n await callback_query.message.delete()\n if callback_query.data == 'button2':\n await bot.send_message(callback_query.from_user.id, f'Прикассовая зона:{Contact.kassa}', parse_mode=\"HTML\", reply_markup=builder2)\n await callback_query.message.delete()\n if callback_query.data == 'button3':\n await bot.send_message(callback_query.from_user.id, f'КБТ:{Contact.kbt}', parse_mode=\"HTML\", reply_markup=builder2)\n await callback_query.message.delete()\n if callback_query.data == 'button4':\n await bot.send_message(callback_query.from_user.id, f'МБТ:{Contact.mbt}', parse_mode=\"HTML\", reply_markup=builder2)\n await callback_query.message.delete()\n if callback_query.data == 'button5':\n await bot.send_message(callback_query.from_user.id, f'ТВ:{Contact.tv}', parse_mode=\"HTML\", reply_markup=builder2)\n await callback_query.message.delete()\n if callback_query.data == 'button6':\n await bot.send_message(callback_query.from_user.id, f'ИТ/Моб.Мир:{Contact.it}', parse_mode=\"HTML\", reply_markup=builder2)\n await callback_query.message.delete()\n if callback_query.data == 'back':\n await bot.send_message(callback_query.from_user.id, \"Выбери группу контакта:\", reply_markup=builder)\n await callback_query.message.delete()\n\n\nasync def process_callback(callback_query: types.CallbackQuery):\n await bot.answer_callback_query(callback_query.id)\n if callback_query.data == 'show_data':\n await show_data(callback_query.message)\n if callback_query.data == 'add_user':\n await add_user.add_user_start(callback_query.message)\n if callback_query.data == 'delete_user':\n await delete_user.delete_user(callback_query.message)\n if callback_query.data == 'show_data_json':\n await show_data_json(callback_query.message)","repo_name":"traydee/TelegramBot","sub_path":"project1/keybords.py","file_name":"keybords.py","file_ext":"py","file_size_in_byte":4089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15811278717","text":"import sys\n\ndef exercise(base=8):\n\t\"\"\"\n\t\tPrint an invert triangle\n\n\t\tParams:\n\n\t\t\tbase -> The size of the base (is even)\n\n\t\tExample\n\n\t\t\t> exercise(base=8)\n\t\t\t########\n\t\t\t ######\n\t\t\t ####\n\t\t\t ##\n\t\"\"\"\n\tspaces = 0\n\n\twhile base != 0:\n\t\tprint(f\"{' ' * spaces}{'#' * base}\")\n\n\t\tspaces += 1\n\t\tbase -= 2\n\n\nif __name__ == \"__main__\":\n\tif(len(sys.argv) <= 1):\n\t\texercise()\n\telse:\n\t\texercise(int(sys.argv[1]))\n","repo_name":"HolmesAyala/think_like_a_programmer","sub_path":"chapter_2/exercise_2_1.py","file_name":"exercise_2_1.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19180248244","text":"import string\nimport sys\n\ndef read_spectrum(in_filename: string) -> tuple:\n energies = []\n photons = []\n\n with open(in_filename) as f:\n lines = f.readlines()\n\n for line in lines[18:]:\n substrs = line.split(\" \")\n energies.append(float(substrs[0]))\n photons.append(float(substrs[1][:-1]))\n \n return energies, photons\n\ndef get_spectrum(energies: list, photons: list) -> string:\n spectrum_list = \"\"\n\n energies_string = \"\".join([\"\\t\\t\"+str(e)+\",\\n\" for e in energies])\n energies_string = energies_string[:-2]\n spectrum_list += \"\\tstatic const double energies[] = {\\n\" + energies_string + \"\\n\\t};\\n\\n\"\n\n photons_string = \"\".join([\"\\t\\t\"+str(p)+\",\\n\" for p in photons])\n photons_string = photons_string[:-2]\n spectrum_list += \"\\tstatic const double photons[] = {\\n\" + photons_string + \"\\n\\t};\\n\\n\"\n\n return spectrum_list\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) == 1:\n in_filename = \"spectra/SPECTRA_60kVp_17deg_1Al.txt\"\n out_filename = \"../LIBRARY/include/constants.hpp\"\n elif len(sys.argv) == 3:\n in_filename = sys.argv[1]\n out_filename = \"../LIBRARY/include/\" + sys.argv[2]\n elif len(sys.argv) > 3:\n in_filename = sys.argv[1]\n out_filename = sys.argv[3] + sys.argv[2]\n\n energies, photons = read_spectrum(in_filename)\n print(get_spectrum(energies, photons))","repo_name":"dyc0/X_Ray_simulator_1","sub_path":"HELPERS/spectra_converter.py","file_name":"spectra_converter.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"3718007111","text":"from lxml import html\nimport requests as req\nfrom pymongo import MongoClient\nimport datetime\n\nclient = MongoClient('localhost', 27017)\ndb = client['News']\ncollections = db.news_collection\n\nheaders = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 11_2_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.86 YaBrowser/21.3.0.740 Yowser/2.5 Safari/537.36'}\n\nresponse = req.get(\"https://news.mail.ru/\", headers=headers)\ndom = html.fromstring(response.text)\n\nitems = dom.xpath(\"//table[@class = 'daynews__inner']//td[@class = 'daynews__main'] | //table[@class = 'daynews__inner']//div[@class = 'daynews__item']\")\nfor item in items:\n in_news_item = item.xpath(\".//a//@href\")[0]\n in_news_response = req.get(in_news_item, headers=headers)\n dom_in = html.fromstring(in_news_response.text)\n name = dom_in.xpath(\".//h1/text()\")[0]\n link = in_news_item\n source = dom_in.xpath(\".//div[@class = 'breadcrumbs breadcrumbs_article js-ago-wrapper']//span[@class = 'link__text'][1]/text()\")[0]\n date = str(datetime.date.today())\n print(name, link, source, date)\n document = {'name': name,\n 'date': date,\n 'link': link,\n 'source': source}\n db.news_mail.insert_one(document)\n","repo_name":"OctavianNekit/Parsing_Data","sub_path":"lesson4/mail_news_xpath.py","file_name":"mail_news_xpath.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37104713640","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# @File : utils.py\r\n# @Author: Wong\r\n# @Date : 2019/3/27\r\n# @Desc :\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport seaborn as sns\r\n\r\n\r\ndef generate_my_data(seq_length):\r\n df = pd.read_excel('./data/data_features.xlsx')\r\n # print(df.head())\r\n data = df.iloc[:, 1:].copy().values\r\n x_train = data[:-seq_length, :].copy()\r\n y_train = data[:-seq_length, -2:].copy() # 转换为二维tensor\r\n x_test = data[-3 * seq_length + 1:, :].copy()\r\n y_test = data[-3 * seq_length + 1:, -2:].copy()\r\n\r\n # z-score处理,对one-hot编码不需要进行处理\r\n col_one_hot = [4, 5]\r\n for i in range(x_train.shape[1]):\r\n if i not in col_one_hot:\r\n mean = np.mean(x_train[:, i])\r\n std = np.std(x_train[:, i])\r\n x_train[:, i] = (x_train[:, i] - mean) / std\r\n x_test[:, i] = (x_test[:, i] - mean) / std\r\n\r\n y_mean: np.ndarray = np.array([])\r\n y_std = np.array([])\r\n for i in range(y_train.shape[1]):\r\n mean_y = np.mean(y_train[:, i])\r\n std_y = np.std(y_train[:, i])\r\n y_train[:, i] = (y_train[:, i] - mean_y) / std_y\r\n y_test[:, i] = (y_test[:, i] - mean_y) / std_y\r\n y_mean = np.append(y_mean, mean_y)\r\n y_std = np.append(y_std, std_y)\r\n\r\n return x_train, y_train, x_test, y_test, y_mean, y_std\r\n\r\n\r\ndef generate_train_samples(x, y, input_seq_len,output_seq_len,batch_size=10):\r\n # 随机选择样本\r\n total_start_points = len(x) - input_seq_len - output_seq_len\r\n start_x_idx = np.random.choice(range(total_start_points), batch_size, replace=False)\r\n\r\n input_batch_idxs = [list(range(i, i + input_seq_len)) for i in start_x_idx]\r\n input_seq = np.take(x, input_batch_idxs, axis=0)\r\n\r\n output_batch_idxs = [list(range(i + input_seq_len, i + input_seq_len + output_seq_len)) for i in start_x_idx]\r\n output_seq = np.take(y, output_batch_idxs, axis=0)\r\n\r\n return input_seq, output_seq # in shape: (batch_size, time_steps, feature_dim)\r\n\r\n\r\ndef generate_test_samples(x, y, input_seq_len, output_seq_len):\r\n # make sure that input_seq_len > output_seq_len\r\n total_samples = x.shape[0]\r\n\r\n input_batch_idxs = [list(range(i, i + input_seq_len)) for i in\r\n range(input_seq_len)]\r\n input_seq = np.take(x, input_batch_idxs, axis=0)\r\n\r\n output_batch_idxs = [list(range(i + input_seq_len, i + input_seq_len + output_seq_len)) for i in\r\n range(output_seq_len)]\r\n output_seq = np.take(y, output_batch_idxs, axis=0)\r\n\r\n # shape(-1,seq_length,features)\r\n return input_seq, output_seq\r\n","repo_name":"EnowshWong/Multi-step_Multivariate_Time_Series_Prediction","sub_path":"Seq2seq/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2669,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"81"} +{"seq_id":"73960165066","text":"from mongoengine.django.auth import User\n\n\n\ndef user(request):\n\t\"\"\"A context processor that adds the user to template context\"\"\"\n\tprofile = {}\n\tif (request.user.is_authenticated()==True) and(request.user is not None):\n\t\ttry:\n\t\t\tuser_images = request.session['user_images']\n\t\texcept Exception as e:\n\t\t\tuser_images = \"\"\n\tif (request.user.is_authenticated()==True) and(request.user is not None):\n\t\tloggedUser = User.objects(username=str(request.user))\n\t\treturn {\n\t\t\t'user': request.user,\n\t\t\t'profile':profile,\n\t\t\t'loggedUser':loggedUser[0],\n\t\t\t'user_images':user_images\n\t\t}\n\telse:\n\t\treturn {\n\t\t'user': request.user,\n\t\t'profile':profile,\n\t\t'loggedUser':[],\n\t\t'user_images':\"\"\n\t}","repo_name":"cuongnmfis/dms","sub_path":"myapp/util/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"42771836166","text":"#CUSTOMER_RELATED FUNCTIONS (CRUD customer list)\n\ndef view_customer_list():\n print('The current customer information:')\n cursor.execute('SELECT * FROM customers')\n rows = cursor.fetchall()\n for row in rows:\n print(f'Name: {(row[1])}, Order ID: {row[2]}')\n\ndef update_customer_list():\n #view discrepancy in customers in orders list and customers in customer list\n #shows customers that are in orders but not in customer list yet\n cursor.execute('''SELECT id, name FROM orders as od\n WHERE (NOT EXISTS(SELECT name, order_ID\n FROM customers \n WHERE (name = od.name AND order_ID = od.id)))''')\n \n rows = cursor.fetchall()\n for row in rows:\n print(f'Order ID: {row[0]}, Name: {row[1]}') \n while True:\n user_input = input('Would you like to add these customers to the customer list?(y/n): ')\n if user_input == 'y':\n for row in rows:\n name = row[1]\n order_id = row[0]\n sql = 'INSERT INTO customers (name, order_id) VALUES (%s, %s)'\n val = (name, order_id)\n cursor.execute(sql, val)\n connection.commit() \n break\n \n elif user_input == 'n':\n print('Customer list has not been updated')\n break\n\n elif user_input != 'y' or user_input != 'n':\n print('You have selected an invalid option, please try again!') \n continue\n view_customer_list() \n\ndef delete_customer_list():\n del_ids = []\n cursor.execute('SELECT ID, name, order_id FROM customers') \n rows = cursor.fetchall()\n for row in rows:\n del_ids.append(str(row[0]))\n print(f'Customer ID: {row[0]}, Name: {row[1]}, Order ID: {row[2]}')\n while True:\n customer_id = input('ID of the customer you wish to delete: ')\n\n if customer_id in del_ids:\n sql = \"DELETE FROM customers WHERE id=%s\"\n val = (customer_id)\n cursor.execute(sql, val)\n connection.commit() \n view_customer_list()\n break \n if customer_id not in del_ids: \n print('You have selected an invalid customer ID, please try again!')\n continue \n\n","repo_name":"KiranS1999/Cafe-Menu-UI","sub_path":"src/functional/db_functions/db_customers.py","file_name":"db_customers.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38587030070","text":"# -*- coding: utf-8 -*-\nimport numpy as np\n\n'''\n主成分分析\n将X降维为K维\n'''\n\ndef PCA(X, K, normal=False):\n # N samples, M features\n N, M = X.shape[0], X.shape[1]\n\n # 降维后的样本 N*K\n X_ = np.zeros((N, K))\n\n # 计算每个特征的期望值并对样本集进行归一化\n # 按列求和并取平均\n U = np.sum(X, axis=0) / N\n # 不能广播, 故需要将求取的期望进行扩展\n X = X - U.reshape((1, M)).repeat(N, axis=0)\n # ***使用库函数***\n # U = np.mean(X, axis=0)\n # X = X - U\n\n # 计算每个特征的方差并对样本集进行归一化\n # 当样本集是图像时不需要进行方差归一化\n if normal:\n # 逐元素元素将每个样本特征值平方\n X2 = np.power(X, 2)\n # 按列求和并取平均, 开方得到方差\n cov = np.sqrt(np.sum(X2, axis=0) / N)\n # 先扩展再归一化\n X = X / cov.reshape((1, M)).repeat(N, axis=0)\n\n # 计算协方差矩阵\n E = np.zeros((M, M))\n # 求和 XX^T\n for i in range(N):\n E = E + np.dot(np.asarray(X[i]).reshape((M, 1)), np.asarray(X[i]).reshape((1, M)))\n # 取平均\n E = E / (N-1)\n # ***使用库函数***\n #E = np.cov(X, rowvar=0)\n\n ##############特征值分解############\n # 直接求解特征值和特征向量\n # lams, Us = np.linalg.eig(E)\n # # 使用'-'倒序\n # indexs = np.argsort(-lams)\n # # 返回的特征向量是列向量, 取Topk列即前K个特征值\n # topk_Us = Us[:, indexs[:K]]\n #\n # # 降维\n # X_ = np.dot(X, topk_Us)\n # # 重建\n # re_X = np.dot(X_, topk_Us.T) + U\n # # 由于使用eig函数计算返回的是复数形式\n # # 因此需要提取实数部分\n # re_X = np.real(re_X)\n\n ############SVD分解################\n #SVD分解特征值\n W, singulars, V = np.linalg.svd(E)\n # 降维\n X_ = np.dot(X, W[:, :K])\n # 重建\n re_X = np.dot(X_, W[:, :K].T) + U\n # 返回降维后样本矩阵和重建的样本矩阵\n return X_, re_X\n","repo_name":"iseesaw/MachineLearning","sub_path":"PCA/PCA.py","file_name":"PCA.py","file_ext":"py","file_size_in_byte":2029,"program_lang":"python","lang":"zh","doc_type":"code","stars":8,"dataset":"github-code","pt":"81"} +{"seq_id":"37712653548","text":"# 452. 用最少数量的箭引爆地球\nfrom typing import List\n\n\nclass Solution:\n # 失败\n # def findMinArrowShots(self, points: List[List[int]]) -> int:\n # if not points:\n # return 0\n # res = 1\n # points.sort()\n # i, j = 0, 1\n # while j < len(points):\n # if points[i][0] <= points[j][0] <= points[i][1]:\n # res += 0\n # else:\n # res += 1\n # i = j\n # j += 1\n # return res\n\n # 失败\n # def findMinArrowShots(self, points: List[List[int]]) -> int:\n # if not points:\n # return 0\n # res = 1\n # points.sort()\n # i, j = 0, 1\n # while j < len(points):\n # if points[j][0] > points[i][1]:\n # res += 1\n # i = j\n # j += 1\n # return res\n\n # 按左侧排序\n # def findMinArrowShots(self, points: List[List[int]]) -> int:\n # if not points:\n # return 0\n # res = 1\n # points.sort()\n # min_right = points[0][1]\n # for j in range(1, len(points)):\n # if points[j][0] > min_right:\n # res += 1\n # min_right = points[j][1]\n # else:\n # min_right = min(min_right, points[j][1])\n # return res\n\n # 按右侧排序\n def findMinArrowShots(self, points: List[List[int]]) -> int:\n if not points:\n return 0\n points.sort(key=lambda x:x[1])\n res = 1\n pos = points[0][1] # 右顶点\n for point in points[1:]:\n if point[0] > pos:\n res += 1\n pos = point[1]\n return res\n\n # def findMinArrowShots(self, points: List[List[int]]) -> int:\n # if not points:\n # return 0\n # points.sort(key=lambda x:x[1])\n # res = 1\n # pos = points[0][1] # 右顶点\n # for left, right in points:\n # if left > pos:\n # res += 1\n # pos = right\n # return res\n\n\ns = Solution()\npoints = [[10,16],[2,8],[1,6],[7,12]]\nprint(s.findMinArrowShots(points))\n\npoints = [[1,2],[3,4],[5,6],[7,8]]\nprint(s.findMinArrowShots(points))\n\npoints = [[1,2],[2,3],[3,4],[4,5]]\nprint(s.findMinArrowShots(points))\n\npoints = [[1,2]]\nprint(s.findMinArrowShots(points))\n\npoints = [[2,3],[2,3]]\nprint(s.findMinArrowShots(points))\n\npoints = [[9,12],[1,10],[4,11],[8,12],[3,9],[6,9],[6,7]]\nprint(s.findMinArrowShots(points))\n","repo_name":"BruceHi/leetcode","sub_path":"month3/findMinArrowShots.py","file_name":"findMinArrowShots.py","file_ext":"py","file_size_in_byte":2510,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"40378169086","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.http import QueryDict\n\nimport json\nfrom ibm_watson import NaturalLanguageUnderstandingV1\nfrom ibm_cloud_sdk_core.authenticators import IAMAuthenticator\nfrom ibm_watson.natural_language_understanding_v1 import Features,EntitiesOptions,ConceptsOptions,SyntaxOptions,KeywordsOptions\n\nfrom buscador.dicionario import Dicionario\na = Dicionario()\n# from templates.buscador import index\n\n\ndef index(request):\n # return HttpResponse('

Hello World

')\n return render(request,'index.html')\n\ndef listagem(request):\n token= 'sQ3sDnp9bK7cm-m6jtwtrsJmUQ9Rk_E1eIUOwahXEjri'\n\n authenticator = IAMAuthenticator(token)\n natural_language_understanding = NaturalLanguageUnderstandingV1(\n version='2020-08-01',\n authenticator=authenticator\n )\n natural_language_understanding.set_service_url(\n 'https://api.us-south.natural-language-understanding.watson.cloud.ibm.com/instances/c13d7eae-7792-45c5-931f-6a375605d467'\n )\n response = natural_language_understanding.analyze(\n text=request.GET['busca'],\n features=Features(\n entities=EntitiesOptions(limit=50),\n concepts=ConceptsOptions(),\n syntax=SyntaxOptions(sentences=True),\n keywords=KeywordsOptions(sentiment=True)\n\n )).get_result()\n\n valida_Busca = {\n 'Concepts':[],\n 'Keywords':[],\n 'Entities':[],\n 'Sintaxes':[]\n }\n\n concepts=[]\n for b in response['concepts']:\n if b['relevance'] > 0.4:\n concepts.append(b)\n valida_Busca['Concepts'].append(concepts)\n\n keywords =[]\n for c in response['keywords']:\n if c['relevance'] > 0.4:\n keywords.append(c)\n valida_Busca['Keywords'].append(keywords)\n\n entities=[]\n for e in response['entities']:\n if e[\"relevance\"] > 0.3:\n entities.append(e)\n valida_Busca['Entities'].append(entities)\n\n sitaxes=[]\n for d in response['syntax']['sentences']:\n sitaxes.append(d)\n valida_Busca['Sintaxes'].append(sitaxes)\n\n base = a.getDicionario()\n somaTotal=[]\n # print(base[0][\"Concepts\"])\n soma=0\n for i in base[0][\"Concepts\"]:\n # print(i)\n for compara in valida_Busca[\"Concepts\"]:\n for ele in compara:\n if i['text'] == ele[\"text\"]:\n soma+=1\n \n for index in range(3):\n soma=0\n for x in base[index][\"Concepts\"]:\n for compara in valida_Busca[\"Concepts\"]:\n for ele in compara:\n if x[\"text\"] in ele[\"text\"]:\n soma+=1\n for x in base[index][\"Keywords\"]:\n for compara in valida_Busca[\"Keywords\"]:\n for ele in compara:\n if x[\"text\"] in ele[\"text\"]:\n soma+=0.8\n for x in base[index][\"Entities\"]:\n for compara in valida_Busca[\"Entities\"]:\n for ele in compara:\n if x[\"text\"] in ele[\"text\"]:\n soma+=0.6\n for x in base[index][\"Sintaxes\"]:\n for compara in valida_Busca[\"Sintaxes\"]:\n for ele in compara:\n if x[\"text\"] in ele[\"text\"]:\n soma+=0.2\n somaTotal.append(soma)\n\n somatoria=0\n for calc in somaTotal:\n somatoria += calc\n somatoria/=len(somaTotal)\n\n \n retornoValores=[]\n for q in range(len(somaTotal)):\n retornoIndices=[]\n if somaTotal[q] >= somatoria:\n retornoIndices.append(q)\n retornoIndices.append(somaTotal[q])\n retornoValores.append(retornoIndices)\n\n print(retornoValores)\n retornoIndices = sorted(retornoValores, key= lambda valores:valores[1], reverse=True)\n print(retornoIndices)\n retorno={\n 'data':[]\n }\n retorno['data'].append({'buscado':request.GET['busca']})\n documento={\n 'lista':[]\n }\n for u in retornoIndices:\n documento['lista'].append({'documento':base[u[0]][\"Documento\"], 'resumo':base[u[0]][\"Resumo\"]})\n retorno['data'].append(documento)\n \n\n #return HttpResponse(json.dumps(retorno, indent=2))\n return render(request,'index.html',context=retorno)","repo_name":"ViniMapelli/buscador_tcc","sub_path":"buscador/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72842228105","text":"# -- coding: utf-8 --\n\"\"\"\n @time : 2023/9/6\n @file : TEST.py\n @author : zhenghao\n @software: PyCharm\n\"\"\"\nimport os\n\nimport pandas as pd\n\nnew_file_list = os.listdir(\"factor_res\")\n\nfor file in new_file_list:\n new_df = pd.read_csv(f\"factor_res/{file}/{file}_val.csv\",index_col=0,parse_dates=[0])\n old_df = pd.read_csv(f\"C:/Project/截面因子/factor_res/{file}/{file}_val.csv\",index_col=0,parse_dates=[0])\n new_df = new_df.reindex(old_df.index)\n diff = new_df - old_df\n bias = diff.sum().sum()\n\n print(f\"{file}:{bias}\")\n\n\n","repo_name":"zhenghaobaby/cta_factors_test","sub_path":"TEST.py","file_name":"TEST.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10718425541","text":"import numpy as np\nimport torch\nfrom typing import Tuple\nimport torch.nn as nn\n\nfrom torch.utils.data import DataLoader\nfrom deepgesture.Dataset.BlobDataset import gestureBlobMultiDataset, size_collate_fn\nfrom deepgesture.config import Config\n\n\nclass ConvNetStream(torch.nn.Module):\n def __init__(self, optical_flow_stream=False, out_features=512) -> None:\n super().__init__()\n if not optical_flow_stream:\n self.conv1 = torch.nn.Conv2d(in_channels=3, out_channels=96, kernel_size=5, stride=2)\n else:\n self.conv1 = torch.nn.Conv2d(in_channels=2 * 25, out_channels=96, kernel_size=5, stride=2)\n self.conv2 = torch.nn.Conv2d(in_channels=96, out_channels=256, kernel_size=3, stride=2)\n self.conv3 = torch.nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=1)\n self.conv4 = torch.nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1)\n self.conv5 = torch.nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1)\n\n self.linear1 = torch.nn.Linear(in_features=512 * 2 * 3, out_features=4096)\n self.linear2 = torch.nn.Linear(in_features=4096, out_features=out_features)\n self.dropout = torch.nn.Dropout(p=0.5)\n\n self.pool = torch.nn.MaxPool2d(2)\n\n self.softmax = torch.nn.Softmax(dim=1)\n self.relu = torch.nn.ReLU()\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = self.conv1(x)\n x = self.pool(x)\n # print('Shape of output after conv {} is {}'.format(1, x.size()))\n x = self.conv2(x)\n x = self.pool(x)\n # print('Shape of output after conv {} is {}'.format(2, x.size()))\n x = self.conv3(x)\n x = self.pool(x)\n # print('Shape of output after conv {} is {}'.format(3, x.size()))\n x = self.conv4(x)\n x = self.pool(x)\n # print(x.size())\n # print('Shape of output after conv {} is {}'.format(4, x.size()))\n # x = self.conv5(x)\n # x = self.pool(x)\n # print('Shape of output after conv {} is {}'.format(5, x.size()))\n\n x = x.view(-1, 512 * x.size()[2] * x.size()[3])\n x = self.linear1(x)\n x = self.relu(x)\n # x = self.dropout(x)\n\n x = self.linear2(x)\n # # x = self.dropout(x)\n\n # x = self.softmax(x)\n return x\n\n\nclass twoStreamNet(torch.nn.Module):\n def __init__(self) -> None:\n super().__init__()\n\n self.spatial_net_stream = ConvNetStream(optical_flow_stream=False)\n self.temporal_net_stream = ConvNetStream(optical_flow_stream=True)\n\n self.linear1 = torch.nn.Linear(in_features=2 * 2048, out_features=512)\n # self.batch_norm = torch.nn.BatchNorm1d(512)\n self.linear2 = torch.nn.Linear(in_features=512, out_features=15)\n self.softmax = torch.nn.Softmax(dim=1)\n\n def forward(self, x: Tuple[torch.Tensor]) -> torch.Tensor:\n x1 = self.spatial_net_stream(x[0])\n x2 = self.temporal_net_stream(x[1])\n # print(x[0[.size())\n\n x_net = torch.cat((x1, x2), dim=1)\n x_net = self.linear1(x_net)\n\n x_net = self.linear2(x_net)\n\n x_net = self.softmax(x_net)\n return x_net\n\n\nclass encoderDecoder(nn.Module):\n def __init__(self, embedding_dim: int) -> None:\n super().__init__()\n self.conv_net_stream = ConvNetStream(optical_flow_stream=True, out_features=embedding_dim)\n self.decoder = torch.nn.Sequential(\n torch.nn.Linear(embedding_dim, 128),\n nn.ReLU(),\n torch.nn.Linear(128, 1024),\n nn.ReLU(),\n nn.BatchNorm1d(1024),\n nn.Linear(1024, 4096),\n nn.BatchNorm1d(4096),\n nn.ReLU(),\n nn.Linear(4096, 25 * 76),\n )\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = self.conv_net_stream(x)\n x = self.decoder(x)\n x = x.view(-1, 25, 1, 76)\n return x\n\n\nif __name__ == \"__main__\":\n # load dataset\n gesture_dataset = gestureBlobMultiDataset(blobs_folder_paths_list=[Config.blobs_dir])\n dataloader = DataLoader(dataset=gesture_dataset, batch_size=128, shuffle=False, collate_fn=size_collate_fn)\n\n net = encoderDecoder(embedding_dim=2048)\n net = net.train()\n if torch.cuda.is_available():\n net.cuda()\n\n # Data accessing examples\n opt, kin = next(iter(dataloader))\n opt, kin = opt.cuda(), kin.cuda()\n out = net(opt)\n\n print(f\"Output shape: {out.shape}\")\n print(f\"Optical flow: {opt.shape}\")\n print(f\"Kinematics: {kin.shape}\")\n","repo_name":"jabarragann/cs682-DeepLearning-Project","sub_path":"deepgesture/Models/EncoderDecoder.py","file_name":"EncoderDecoder.py","file_ext":"py","file_size_in_byte":4547,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"43484484242","text":"import logging\nimport os\nimport re\n\nlogger = logging.getLogger(__name__)\n\n\nclass ImproperlyConfigured(Exception):\n pass\n\n\nclass NoValue(object):\n\n def __repr__(self):\n return '<{0}>'.format(self.__class__.__name__)\n\n\nclass Env(object):\n ENVIRON = os.environ\n NOTSET = NoValue()\n BOOLEAN_TRUE_STRINGS = ('true', 'on', 'ok', 'y', 'yes', '1', '+')\n\n def __init__(self, env_file=None, debug=False, prefix='', lazy=False,\n defaults=None):\n self.env_file = env_file\n self.prefix = prefix\n self.lazy = lazy\n self.debug = debug\n self.scheme = defaults\n if not self.lazy:\n self.read_env()\n\n def __getattr__(self, var):\n return self.get_value(var)\n\n def __call__(self, var, cast=None, default=NOTSET, parse_default=False):\n return self.get_value(var, cast=cast, default=default, parse_default=parse_default)\n\n def __contains__(self, var):\n return var in self.ENVIRON\n\n # Shortcuts\n\n def str(self, var, default=NOTSET, multiline=False):\n \"\"\"\n :rtype: str\n \"\"\"\n value = self.get_value(var, default=default)\n if multiline:\n return value.replace('\\\\n', '\\n')\n return value\n\n def bytes(self, var, default=NOTSET, encoding='utf8'):\n \"\"\"\n :rtype: bytes\n \"\"\"\n return self.get_value(var, cast=str).encode(encoding)\n\n def bool(self, var, default=NOTSET):\n \"\"\"\n :rtype: bool\n \"\"\"\n return self.get_value(var, cast=bool, default=default)\n\n def int(self, var, default=NOTSET):\n \"\"\"\n :rtype: int\n \"\"\"\n return self.get_value(var, cast=int, default=default)\n\n def float(self, var, default=NOTSET):\n \"\"\"\n :rtype: float\n \"\"\"\n return self.get_value(var, cast=float, default=default)\n\n def get_value(self, var, cast=None, default=NOTSET, # noqa: C901\n parse_default=False, raw=False):\n \"\"\"Return value for given environment variable.\n\n :param var: Name of variable.\n :param cast: Type to cast return value as.\n :param default: If var not present in environ, return this instead.\n :param parse_default: force to parse default..\n\n :returns: Value from environment or default (if set)\n \"\"\"\n\n if raw:\n env_var = var\n else:\n env_var = self.prefix + var\n\n # logger.debug(f\"get '{env_var}' casted as '{cast}' with default '{default}'\")\n\n if var in self.scheme:\n var_info = self.scheme[var]\n\n try:\n has_default = len(var_info) == 2\n except TypeError:\n has_default = False\n\n if has_default:\n if not cast:\n cast = var_info[0]\n\n if default is self.NOTSET:\n try:\n default = var_info[1]\n except IndexError:\n pass\n else:\n if not cast:\n cast = var_info\n\n try:\n value = self.ENVIRON[env_var]\n except KeyError:\n if default is self.NOTSET:\n error_msg = \"Set the {} environment variable\".format(env_var)\n raise ImproperlyConfigured(error_msg)\n\n value = default\n\n # Resolve any proxied values\n if hasattr(value, 'startswith') and '${' in value:\n m = re.search(r'(\\${(.*?)})', value)\n while m:\n value = re.sub(re.escape(m.group(1)), self.get_value(m.group(2), raw=True), value)\n m = re.search(r'(\\${(.*?)})', value)\n\n if value != default or (parse_default and value):\n value = self.parse_value(value, cast)\n\n logger.debug(\"get '{}' returns '{}'\".format(var, value))\n return value\n\n # Class and static methods\n\n @classmethod # noqa: C901\n def parse_value(cls, value, cast):\n \"\"\"Parse and cast provided value\n\n :param value: Stringed value.\n :param cast: Type to cast return value as.\n\n :returns: Casted value\n \"\"\"\n if cast is None:\n return value\n elif cast is bool:\n try:\n value = int(value) != 0\n except ValueError:\n value = value.lower() in cls.BOOLEAN_TRUE_STRINGS\n elif isinstance(cast, list):\n value = list(map(cast[0], [x for x in value.split(',') if x]))\n elif isinstance(cast, tuple):\n val = value.strip('(').strip(')').split(',')\n value = tuple(map(cast[0], [x for x in val if x]))\n elif isinstance(cast, dict):\n key_cast = cast.get('key', str)\n value_cast = cast.get('value', str)\n value_cast_by_key = cast.get('cast', dict())\n value = dict(map(\n lambda kv: (\n key_cast(kv[0]),\n cls.parse_value(kv[1], value_cast_by_key.get(kv[0], value_cast))\n ),\n [val.split('=') for val in value.split(';') if val]\n ))\n elif cast is dict:\n value = dict([val.split('=') for val in value.split(',') if val])\n elif cast is list:\n value = [x for x in value.split(',') if x]\n elif cast is tuple:\n val = value.strip('(').strip(')').split(',')\n value = tuple([x for x in val if x])\n elif cast is float:\n # clean string\n float_str = re.sub(r'[^\\d,\\.]', '', value)\n # split for avoid thousand separator and different locale comma/dot symbol\n parts = re.split(r'[,\\.]', float_str)\n if len(parts) == 1:\n float_str = parts[0]\n else:\n float_str = \"{0}.{1}\".format(''.join(parts[0:-1]), parts[-1])\n value = float(float_str)\n else:\n value = cast(value)\n return value\n\n def get_content(self):\n if self.env_file is None:\n self.env_file = os.environ.get('ENV_FILE', os.path.join(os.curdir, '.env'))\n\n if hasattr(self.env_file, 'read'):\n content = self.env_file.read()\n elif os.path.exists(self.env_file):\n with open(self.env_file) as f:\n content = f.read()\n else:\n # warnings.warn(\n # \"%s doesn't exist - if you're not configuring your \"\n # \"environment separately, create one.\" % self.env_file,\n # stacklevel=0)\n content = ''\n logger.debug('Read environment variables from: {0}'.format(self.env_file))\n return content\n\n def read_env(self):\n content = self.get_content()\n dot_values = {}\n for line in content.splitlines():\n m1 = re.match(r'\\A(?:export )?([A-Za-z_0-9]+)=(.*)\\Z', line)\n if m1:\n key, val = m1.group(1), m1.group(2)\n m2 = re.match(r\"\\A'(.*)'\\Z\", val)\n if m2:\n val = m2.group(1)\n m3 = re.match(r'\\A\"(.*)\"\\Z', val)\n if m3:\n val = re.sub(r'\\\\(.)', r'\\1', m3.group(1))\n if key in self.scheme:\n dot_values[key] = str(val)\n # self.ENVIRON.setdefault(key, str(val))\n\n # set defaults\n # for key, value in overrides.items():\n # cls.ENVIRON.setdefault(key, value)\n #\n for key, value in self.scheme.items():\n if isinstance(value, (list, tuple)):\n cast, default = value\n else:\n cast = type(value)\n default = value\n self.scheme[key] = (cast, default)\n os.environ.setdefault(key, dot_values.get(key, str(default)))\n #\n # os.environ.setdefault(key, str(default))\n","repo_name":"unicef/datamart-uniset","sub_path":"src/uniset/environ.py","file_name":"environ.py","file_ext":"py","file_size_in_byte":7933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41209387323","text":"import logging\nimport uuid\nfrom datetime import timedelta\nfrom typing import Dict, List, Optional, Type\n\nfrom django.db import IntegrityError, models, transaction\nfrom django.utils import dateformat, timezone\nfrom stripe.api_resources.abstract.api_resource import APIResource\nfrom stripe.error import InvalidRequestError\nfrom stripe.util import convert_to_stripe_object\n\nfrom ..exceptions import ImpossibleAPIRequest\nfrom ..fields import (\n JSONField,\n StripeDateTimeField,\n StripeForeignKey,\n StripeIdField,\n StripePercentField,\n)\nfrom ..managers import StripeModelManager\nfrom ..settings import djstripe_settings\nfrom ..utils import get_id_from_stripe_data\n\nlogger = logging.getLogger(__name__)\n\n\nclass StripeBaseModel(models.Model):\n stripe_class: Type[APIResource] = APIResource\n\n djstripe_created = models.DateTimeField(auto_now_add=True, editable=False)\n djstripe_updated = models.DateTimeField(auto_now=True, editable=False)\n stripe_data = JSONField(default=dict)\n\n class Meta:\n abstract = True\n\n @classmethod\n def get_expand_params(cls, api_key, **kwargs):\n \"\"\"Populate `expand` kwarg in stripe api calls by updating the kwargs passed.\"\"\"\n # To avoid Circular Import Error\n from djstripe.management.commands.djstripe_sync_models import Command\n\n # As api_list is a class method we will never get the stripe account unless we\n # default to the owner account of the api_key. But even that is pointless as we only care about expand\n # So no need to make a call to Stripe and again do an account object sync which would make\n # no sense if this is for a Stripe Connected Account\n expand = Command.get_default_list_kwargs(\n cls, {kwargs.get(\"stripe_account\", \"acct_fake\")}, api_key\n )[0].get(\"expand\", [])\n\n # Add expand to the provided list\n if kwargs.get(\"expand\"):\n kwargs[\"expand\"].extend(expand)\n else:\n kwargs[\"expand\"] = expand\n\n # Keep only unique elements\n kwargs[\"expand\"] = list(set(kwargs[\"expand\"]))\n\n return kwargs\n\n @classmethod\n def api_list(cls, api_key=djstripe_settings.STRIPE_SECRET_KEY, **kwargs):\n \"\"\"\n Call the stripe API's list operation for this model.\n\n :param api_key: The api key to use for this request. \\\n Defaults to djstripe_settings.STRIPE_SECRET_KEY.\n :type api_key: string\n\n See Stripe documentation for accepted kwargs for each object.\n\n :returns: an iterator over all items in the query\n \"\"\"\n # Update kwargs with `expand` param\n kwargs = cls.get_expand_params(api_key, **kwargs)\n\n return cls.stripe_class.list(\n api_key=api_key,\n stripe_version=djstripe_settings.STRIPE_API_VERSION,\n **kwargs,\n ).auto_paging_iter()\n\n\nclass StripeModel(StripeBaseModel):\n # This must be defined in descendants of this model/mixin\n # e.g. Event, Charge, Customer, etc.\n expand_fields: List[str] = []\n stripe_dashboard_item_name = \"\"\n\n objects = models.Manager()\n stripe_objects = StripeModelManager()\n\n djstripe_id = models.BigAutoField(\n verbose_name=\"ID\", serialize=False, primary_key=True\n )\n id = StripeIdField(unique=True)\n\n djstripe_owner_account: Optional[StripeForeignKey] = StripeForeignKey(\n \"djstripe.Account\",\n on_delete=models.CASCADE,\n to_field=\"id\",\n null=True,\n blank=True,\n help_text=\"The Stripe Account this object belongs to.\",\n )\n\n livemode = models.BooleanField(\n null=True,\n default=None,\n blank=True,\n help_text=(\n \"Null here indicates that the livemode status is unknown or was previously\"\n \" unrecorded. Otherwise, this field indicates whether this record comes\"\n \" from Stripe test mode or live mode operation.\"\n ),\n )\n created = StripeDateTimeField(\n null=True,\n blank=True,\n help_text=\"The datetime this object was created in stripe.\",\n )\n metadata = JSONField(\n null=True,\n blank=True,\n help_text=(\n \"A set of key/value pairs that you can attach to an object. \"\n \"It can be useful for storing additional information about an object in \"\n \"a structured format.\"\n ),\n )\n description = models.TextField(\n null=True, blank=True, help_text=\"A description of this object.\"\n )\n\n class Meta(StripeBaseModel.Meta):\n abstract = True\n get_latest_by = \"created\"\n\n def _get_base_stripe_dashboard_url(self):\n owner_path_prefix = (\n (self.djstripe_owner_account.id + \"/\")\n if self.djstripe_owner_account\n else \"\"\n )\n suffix = \"test/\" if not self.livemode else \"\"\n return f\"https://dashboard.stripe.com/{owner_path_prefix}{suffix}\"\n\n def get_stripe_dashboard_url(self) -> str:\n \"\"\"Get the stripe dashboard url for this object.\"\"\"\n if not self.stripe_dashboard_item_name or not self.id:\n return \"\"\n else:\n base_url = self._get_base_stripe_dashboard_url()\n item = self.stripe_dashboard_item_name\n return f\"{base_url}{item}/{self.id}\"\n\n @property\n def default_api_key(self) -> str:\n # If the class is abstract (StripeModel), fall back to default key.\n if not self._meta.abstract:\n if self.djstripe_owner_account:\n return self.djstripe_owner_account.get_default_api_key(self.livemode)\n return djstripe_settings.get_default_api_key(self.livemode)\n\n def _get_stripe_account_id(self, api_key=None) -> Optional[str]:\n \"\"\"\n Call the stripe API's retrieve operation for this model.\n\n :param api_key: The api key to use for this request. \\\n Defaults to djstripe_settings.STRIPE_SECRET_KEY.\n :type api_key: string\n :param stripe_account: The optional connected account \\\n for which this request is being made.\n :type stripe_account: string\n \"\"\"\n from djstripe.models import Account\n\n api_key = api_key or self.default_api_key\n\n try:\n djstripe_owner_account = self.djstripe_owner_account\n if djstripe_owner_account is not None:\n return djstripe_owner_account.id\n except (AttributeError, KeyError, ValueError):\n pass\n\n # Get reverse foreign key relations to Account in case we need to\n # retrieve ourselves using that Account ID.\n reverse_account_relations = (\n field\n for field in self._meta.get_fields(include_parents=True)\n if field.is_relation\n and field.one_to_many\n and field.related_model is Account\n )\n\n # Handle case where we have a reverse relation to Account and should pass\n # that account ID to the retrieve call.\n for field in reverse_account_relations:\n # Grab the related object, using the first one we find.\n reverse_lookup_attr = field.get_accessor_name()\n try:\n account = getattr(self, reverse_lookup_attr).first()\n except ValueError:\n if isinstance(self, Account):\n # return the id if self is the Account model itself.\n return self.id\n else:\n if account is not None:\n return account.id\n\n return None\n\n def api_retrieve(self, api_key=None, stripe_account=None):\n \"\"\"\n Call the stripe API's retrieve operation for this model.\n\n :param api_key: The api key to use for this request. \\\n Defaults to djstripe_settings.STRIPE_SECRET_KEY.\n :type api_key: string\n :param stripe_account: The optional connected account \\\n for which this request is being made.\n :type stripe_account: string\n \"\"\"\n api_key = api_key or self.default_api_key\n\n # Prefer passed in stripe_account if set.\n if not stripe_account:\n stripe_account = self._get_stripe_account_id(api_key)\n\n return self.stripe_class.retrieve(\n id=self.id,\n api_key=api_key,\n stripe_version=djstripe_settings.STRIPE_API_VERSION,\n expand=self.expand_fields,\n stripe_account=stripe_account,\n )\n\n @classmethod\n def _api_create(cls, api_key=djstripe_settings.STRIPE_SECRET_KEY, **kwargs):\n \"\"\"\n Call the stripe API's create operation for this model.\n\n :param api_key: The api key to use for this request. \\\n Defaults to djstripe_settings.STRIPE_SECRET_KEY.\n :type api_key: string\n \"\"\"\n\n return cls.stripe_class.create(\n api_key=api_key,\n stripe_version=djstripe_settings.STRIPE_API_VERSION,\n **kwargs,\n )\n\n def _api_delete(self, api_key=None, stripe_account=None, **kwargs):\n \"\"\"\n Call the stripe API's delete operation for this model\n\n :param api_key: The api key to use for this request. \\\n Defaults to djstripe_settings.STRIPE_SECRET_KEY.\n :type api_key: string\n :param stripe_account: The optional connected account \\\n for which this request is being made.\n :type stripe_account: string\n \"\"\"\n api_key = api_key or self.default_api_key\n\n # Prefer passed in stripe_account if set.\n if not stripe_account:\n stripe_account = self._get_stripe_account_id(api_key)\n\n return self.stripe_class.delete(\n self.id,\n api_key=api_key,\n stripe_account=stripe_account,\n stripe_version=djstripe_settings.STRIPE_API_VERSION,\n **kwargs,\n )\n\n def _api_update(self, api_key=None, stripe_account=None, **kwargs):\n \"\"\"\n Call the stripe API's modify operation for this model\n\n :param api_key: The api key to use for this request.\n Defaults to djstripe_settings.STRIPE_SECRET_KEY.\n :type api_key: string\n :param stripe_account: The optional connected account \\\n for which this request is being made.\n :type stripe_account: string\n \"\"\"\n api_key = api_key or self.default_api_key\n\n # Prefer passed in stripe_account if set.\n if not stripe_account:\n stripe_account = self._get_stripe_account_id(api_key)\n\n return self.stripe_class.modify(\n self.id,\n api_key=api_key,\n stripe_account=stripe_account,\n stripe_version=djstripe_settings.STRIPE_API_VERSION,\n **kwargs,\n )\n\n @classmethod\n def _manipulate_stripe_object_hook(cls, data):\n \"\"\"\n Gets called by this object's stripe object conversion method just before\n conversion.\n Use this to populate custom fields in a StripeModel from stripe data.\n \"\"\"\n return data\n\n @classmethod\n def _find_owner_account(cls, data, api_key=djstripe_settings.STRIPE_SECRET_KEY):\n \"\"\"\n Fetches the Stripe Account (djstripe_owner_account model field)\n linked to the class, cls.\n Tries to retreive using the Stripe_account if given.\n Otherwise uses the api_key.\n \"\"\"\n from .account import Account\n\n # try to fetch by stripe_account. Also takes care of Stripe Connected Accounts\n if data:\n # case of Webhook Event Trigger\n if data.get(\"object\") == \"event\":\n # if account key exists and has a not null value\n stripe_account_id = get_id_from_stripe_data(data.get(\"account\"))\n if stripe_account_id:\n return Account._get_or_retrieve(\n id=stripe_account_id, api_key=api_key\n )\n\n else:\n stripe_account = getattr(data, \"stripe_account\", None)\n stripe_account_id = get_id_from_stripe_data(stripe_account)\n if stripe_account_id:\n return Account._get_or_retrieve(\n id=stripe_account_id, api_key=api_key\n )\n\n # try to fetch by the given api_key.\n return Account.get_or_retrieve_for_api_key(api_key)\n\n @classmethod\n def _stripe_object_to_record(\n cls,\n data: dict,\n current_ids=None,\n pending_relations: list = None,\n stripe_account: str = None,\n api_key=djstripe_settings.STRIPE_SECRET_KEY,\n ) -> Dict:\n \"\"\"\n This takes an object, as it is formatted in Stripe's current API for our object\n type. In return, it provides a dict. The dict can be used to create a record or\n to update a record\n\n This function takes care of mapping from one field name to another, converting\n from cents to dollars, converting timestamps, and eliminating unused fields\n (so that an objects.create() call would not fail).\n\n :param data: the object, as sent by Stripe. Parsed from JSON, into a dict\n :param current_ids: stripe ids of objects that are currently being processed\n :type current_ids: set\n :param pending_relations: list of tuples of relations to be attached post-save\n :param stripe_account: The optional connected account \\\n for which this request is being made.\n :return: All the members from the input, translated, mutated, etc\n \"\"\"\n from .webhooks import WebhookEndpoint\n\n manipulated_data = cls._manipulate_stripe_object_hook(data)\n if not cls.is_valid_object(manipulated_data):\n raise ValueError(\n \"Trying to fit a %r into %r. Aborting.\"\n % (manipulated_data.get(\"object\", \"\"), cls.__name__)\n )\n\n # By default we put the raw stripe data in the stripe_data json field\n result = {\"stripe_data\": data}\n\n if current_ids is None:\n current_ids = set()\n\n # Iterate over all the fields that we know are related to Stripe,\n # let each field work its own magic\n ignore_fields = [\n \"date_purged\",\n \"subscriber\",\n \"stripe_data\",\n ] # XXX: Customer hack\n\n # get all forward and reverse relations for given cls\n for field in cls._meta.get_fields():\n if field.name.startswith(\"djstripe_\") or field.name in ignore_fields:\n continue\n\n # todo add support reverse ManyToManyField sync\n if isinstance(\n field, (models.ManyToManyRel, models.ManyToOneRel)\n ) and not isinstance(field, models.OneToOneRel):\n # We don't currently support syncing from\n # reverse side of Many relationship\n continue\n\n # todo for ManyToManyField one would also need to handle the case of an intermediate model being used\n # todo add support ManyToManyField sync\n if field.many_to_many:\n # We don't currently support syncing ManyToManyField\n continue\n\n # will work for Forward FK and OneToOneField relations and reverse OneToOneField relations\n if isinstance(field, (models.ForeignKey, models.OneToOneRel)):\n field_data, skip, is_nulled = cls._stripe_object_field_to_foreign_key(\n field=field,\n manipulated_data=manipulated_data,\n current_ids=current_ids,\n pending_relations=pending_relations,\n stripe_account=stripe_account,\n api_key=api_key,\n )\n\n if skip and not is_nulled:\n continue\n else:\n if hasattr(field, \"stripe_to_db\"):\n field_data = field.stripe_to_db(manipulated_data)\n else:\n field_data = manipulated_data.get(field.name)\n\n if (\n isinstance(field, (models.CharField, models.TextField))\n and field_data is None\n ):\n # do not add empty secret field for WebhookEndpoint model\n # as stripe does not return the secret except for the CREATE call\n if cls is WebhookEndpoint and field.name == \"secret\":\n continue\n else:\n # TODO - this applies to StripeEnumField as well, since it\n # sub-classes CharField, is that intentional?\n field_data = \"\"\n\n result[field.name] = field_data\n\n # For all objects other than the account object itself, get the API key\n # attached to the request, and get the matching Account for that key.\n owner_account = cls._find_owner_account(data, api_key=api_key)\n if owner_account:\n result[\"djstripe_owner_account\"] = owner_account\n\n return result\n\n @classmethod\n def _stripe_object_field_to_foreign_key(\n cls,\n field,\n manipulated_data,\n current_ids=None,\n pending_relations=None,\n stripe_account=None,\n api_key=djstripe_settings.STRIPE_SECRET_KEY,\n ):\n \"\"\"\n This converts a stripe API field to the dj stripe object it references,\n so that foreign keys can be connected up automatically.\n\n :param field:\n :type field: models.ForeignKey\n :param manipulated_data:\n :type manipulated_data: dict\n :param current_ids: stripe ids of objects that are currently being processed\n :type current_ids: set\n :param pending_relations: list of tuples of relations to be attached post-save\n :type pending_relations: list\n :param stripe_account: The optional connected account \\\n for which this request is being made.\n :type stripe_account: string\n :return:\n \"\"\"\n from djstripe.models import DjstripePaymentMethod\n\n field_data = None\n field_name = field.name\n refetch = False\n skip = False\n # a flag to indicate if the given field is null upstream on Stripe\n is_nulled = False\n\n if current_ids is None:\n current_ids = set()\n\n if issubclass(field.related_model, (StripeModel, DjstripePaymentMethod)):\n if field_name in manipulated_data:\n raw_field_data = manipulated_data.get(field_name)\n\n # field's value is None. Skip syncing but set as None.\n # Otherwise nulled FKs sync gets skipped.\n if not raw_field_data:\n is_nulled = True\n skip = True\n\n else:\n # field does not exist in manipulated_data dict. Skip Syncing\n skip = True\n raw_field_data = None\n\n id_ = get_id_from_stripe_data(raw_field_data)\n\n if id_ == raw_field_data:\n # A field like {\"subscription\": \"sub_6lsC8pt7IcFpjA\", ...}\n refetch = True\n else:\n # A field like {\"subscription\": {\"id\": sub_6lsC8pt7IcFpjA\", ...}}\n pass\n\n if id_ in current_ids:\n # this object is currently being fetched, don't try to fetch again,\n # to avoid recursion instead, record the relation that should be\n # created once \"object_id\" object exists\n if pending_relations is not None:\n object_id = manipulated_data[\"id\"]\n pending_relations.append((object_id, field, id_))\n skip = True\n\n # sync only if field exists and is not null\n if not skip and not is_nulled:\n # add the id of the current object to the list\n # of ids being processed.\n # This will avoid infinite recursive syncs in case a relatedmodel\n # requests the same object\n current_ids.add(id_)\n\n try:\n (\n field_data,\n _,\n ) = field.related_model._get_or_create_from_stripe_object(\n manipulated_data,\n field_name,\n refetch=refetch,\n current_ids=current_ids,\n pending_relations=pending_relations,\n stripe_account=stripe_account,\n api_key=api_key,\n )\n except ImpossibleAPIRequest:\n # Found to happen in the following situation:\n # Customer has a `default_source` set to a `card_` object,\n # and neither the Customer nor the Card are present in db.\n # This skip is a hack, but it will prevent a crash.\n skip = True\n\n # Remove the id of the current object from the list\n # after it has been created or retrieved\n current_ids.remove(id_)\n\n else:\n # eg PaymentMethod, handled in hooks\n skip = True\n\n return field_data, skip, is_nulled\n\n @classmethod\n def is_valid_object(cls, data):\n \"\"\"\n Returns whether the data is a valid object for the class\n \"\"\"\n # .OBJECT_NAME will not exist on the base type itself\n object_name: str = getattr(cls.stripe_class, \"OBJECT_NAME\", \"\")\n if not object_name:\n return False\n return data and data.get(\"object\") == object_name\n\n def _attach_objects_hook(\n self, cls, data, api_key=djstripe_settings.STRIPE_SECRET_KEY, current_ids=None\n ):\n \"\"\"\n Gets called by this object's create and sync methods just before save.\n Use this to populate fields before the model is saved.\n\n :param cls: The target class for the instantiated object.\n :param data: The data dictionary received from the Stripe API.\n :type data: dict\n :param current_ids: stripe ids of objects that are currently being processed\n :type current_ids: set\n \"\"\"\n\n pass\n\n def _attach_objects_post_save_hook(\n self,\n cls,\n data,\n api_key=djstripe_settings.STRIPE_SECRET_KEY,\n pending_relations=None,\n ):\n \"\"\"\n Gets called by this object's create and sync methods just after save.\n Use this to populate fields after the model is saved.\n\n :param cls: The target class for the instantiated object.\n :param data: The data dictionary received from the Stripe API.\n :type data: dict\n \"\"\"\n\n unprocessed_pending_relations = []\n if pending_relations is not None:\n for post_save_relation in pending_relations:\n object_id, field, id_ = post_save_relation\n\n if self.id == id_:\n # the target instance now exists\n target = field.model.objects.get(id=object_id)\n setattr(target, field.name, self)\n if isinstance(field, models.OneToOneRel):\n # this is a reverse relationship, so the relation exists on self\n self.save()\n else:\n # this is a forward relation on the target,\n # so we need to save it\n target.save()\n\n # reload so that indirect relations back to this object\n # eg self.charge.invoice = self are set\n # TODO - reverse the field reference here to avoid hitting the DB?\n self.refresh_from_db()\n else:\n unprocessed_pending_relations.append(post_save_relation)\n\n if len(pending_relations) != len(unprocessed_pending_relations):\n # replace in place so passed in list is updated in calling method\n pending_relations[:] = unprocessed_pending_relations\n\n @classmethod\n def _create_from_stripe_object(\n cls,\n data,\n current_ids=None,\n pending_relations=None,\n save=True,\n stripe_account=None,\n api_key=djstripe_settings.STRIPE_SECRET_KEY,\n ):\n \"\"\"\n Instantiates a model instance using the provided data object received\n from Stripe, and saves it to the database if specified.\n\n :param data: The data dictionary received from the Stripe API.\n :type data: dict\n :param current_ids: stripe ids of objects that are currently being processed\n :type current_ids: set\n :param pending_relations: list of tuples of relations to be attached post-save\n :type pending_relations: list\n :param save: If True, the object is saved after instantiation.\n :type save: bool\n :param stripe_account: The optional connected account \\\n for which this request is being made.\n :type stripe_account: string\n :returns: The instantiated object.\n \"\"\"\n stripe_data = cls._stripe_object_to_record(\n data,\n current_ids=current_ids,\n pending_relations=pending_relations,\n stripe_account=stripe_account,\n api_key=api_key,\n )\n try:\n id_ = get_id_from_stripe_data(stripe_data)\n if id_ is not None:\n instance = cls.stripe_objects.get(id=id_)\n else:\n # Raise error on purpose to resume the _create_from_stripe_object flow\n raise cls.DoesNotExist\n\n except cls.DoesNotExist:\n # try to create iff instance doesn't already exist in the DB\n # TODO dictionary unpacking will not work if cls has any ManyToManyField\n instance = cls(**stripe_data)\n\n instance._attach_objects_hook(\n cls, data, api_key=api_key, current_ids=current_ids\n )\n\n if save:\n instance.save()\n\n instance._attach_objects_post_save_hook(\n cls, data, api_key=api_key, pending_relations=pending_relations\n )\n\n return instance\n\n @classmethod\n def _get_or_create_from_stripe_object(\n cls,\n data,\n field_name=\"id\",\n refetch=True,\n current_ids=None,\n pending_relations=None,\n save=True,\n stripe_account=None,\n api_key=djstripe_settings.STRIPE_SECRET_KEY,\n ):\n \"\"\"\n\n :param data:\n :param field_name:\n :param refetch:\n :param current_ids: stripe ids of objects that are currently being processed\n :type current_ids: set\n :param pending_relations: list of tuples of relations to be attached post-save\n :type pending_relations: list\n :param save:\n :param stripe_account: The optional connected account \\\n for which this request is being made.\n :type stripe_account: string\n :return:\n :rtype: cls, bool\n \"\"\"\n field = data.get(field_name)\n is_nested_data = field_name != \"id\"\n should_expand = False\n\n if pending_relations is None:\n pending_relations = []\n\n id_ = get_id_from_stripe_data(field)\n\n if not field:\n # An empty field - We need to return nothing here because there is\n # no way of knowing what needs to be fetched!\n raise RuntimeError(\n f\"dj-stripe encountered an empty field {cls.__name__}.{field_name} =\"\n f\" {field}\"\n )\n elif id_ == field:\n # A field like {\"subscription\": \"sub_6lsC8pt7IcFpjA\", ...}\n # We'll have to expand if the field is not \"id\" (= is nested)\n should_expand = is_nested_data\n else:\n # A field like {\"subscription\": {\"id\": sub_6lsC8pt7IcFpjA\", ...}}\n data = field\n\n try:\n return cls.stripe_objects.get(id=id_), False\n except cls.DoesNotExist:\n if is_nested_data and refetch:\n # This is what `data` usually looks like:\n # {\"id\": \"cus_XXXX\", \"default_source\": \"card_XXXX\"}\n # Leaving the default field_name (\"id\") will get_or_create the customer.\n # If field_name=\"default_source\", we get_or_create the card instead.\n cls_instance = cls(id=id_)\n try:\n data = cls_instance.api_retrieve(\n stripe_account=stripe_account, api_key=api_key\n )\n except InvalidRequestError as e:\n if \"a similar object exists in\" in str(e):\n # HACK around a Stripe bug.\n # When a File is retrieved from the Account object,\n # a mismatch between live and test mode is possible depending\n # on whether the file (usually the logo) was uploaded in live\n # or test. Reported to Stripe in August 2020.\n # Context: https://github.com/dj-stripe/dj-stripe/issues/830\n pass\n elif \"No such PaymentMethod:\" in str(e):\n # payment methods (card_… etc) can be irretrievably deleted,\n # but still present during sync. For example, if a refund is\n # issued on a charge whose payment method has been deleted.\n return None, False\n else:\n raise\n should_expand = False\n\n # The next thing to happen will be the \"create from stripe object\" call.\n # At this point, if we don't have data to start with (field is a str),\n # *and* we didn't refetch by id, then `should_expand` is True and we\n # don't have the data to actually create the object.\n # If this happens when syncing Stripe data, it's a djstripe bug. Report it!\n if should_expand:\n raise ValueError(f\"No data to create {cls.__name__} from {field_name}\")\n\n try:\n # We wrap the `_create_from_stripe_object` in a transaction to\n # avoid TransactionManagementError on subsequent queries in case\n # of the IntegrityError catch below. See PR #903\n with transaction.atomic():\n return (\n cls._create_from_stripe_object(\n data,\n current_ids=current_ids,\n pending_relations=pending_relations,\n save=save,\n stripe_account=stripe_account,\n api_key=api_key,\n ),\n True,\n )\n except IntegrityError:\n # Handle the race condition that something else created the object\n # after the `get` and before `_create_from_stripe_object`.\n # This is common during webhook handling, since Stripe sends\n # multiple webhook events simultaneously,\n # each of which will cause recursive syncs. See issue #429\n return cls.stripe_objects.get(id=id_), False\n\n @classmethod\n def _stripe_object_to_customer(\n cls,\n target_cls,\n data,\n api_key=djstripe_settings.STRIPE_SECRET_KEY,\n current_ids=None,\n ):\n \"\"\"\n Search the given manager for the Customer matching this object's\n ``customer`` field.\n :param target_cls: The target class\n :type target_cls: Customer\n :param data: stripe object\n :type data: dict\n :param current_ids: stripe ids of objects that are currently being processed\n :type current_ids: set\n \"\"\"\n\n if \"customer\" in data and data[\"customer\"]:\n return target_cls._get_or_create_from_stripe_object(\n data, \"customer\", current_ids=current_ids, api_key=api_key\n )[0]\n\n @classmethod\n def _stripe_object_to_default_tax_rates(\n cls, target_cls, data, api_key=djstripe_settings.STRIPE_SECRET_KEY\n ):\n \"\"\"\n Retrieves TaxRates for a Subscription or Invoice\n :param target_cls:\n :param data:\n :param instance:\n :type instance: Union[djstripe.models.Invoice, djstripe.models.Subscription]\n :return:\n \"\"\"\n tax_rates = []\n\n for tax_rate_data in data.get(\"default_tax_rates\", []):\n tax_rate, _ = target_cls._get_or_create_from_stripe_object(\n tax_rate_data, refetch=False, api_key=api_key\n )\n tax_rates.append(tax_rate)\n\n return tax_rates\n\n @classmethod\n def _stripe_object_to_tax_rates(\n cls, target_cls, data, api_key=djstripe_settings.STRIPE_SECRET_KEY\n ):\n \"\"\"\n Retrieves TaxRates for a SubscriptionItem or InvoiceItem\n :param target_cls:\n :param data:\n :return:\n \"\"\"\n tax_rates = []\n\n for tax_rate_data in data.get(\"tax_rates\", []):\n tax_rate, _ = target_cls._get_or_create_from_stripe_object(\n tax_rate_data, refetch=False, api_key=api_key\n )\n tax_rates.append(tax_rate)\n\n return tax_rates\n\n @classmethod\n def _stripe_object_set_total_tax_amounts(\n cls, target_cls, data, instance, api_key=djstripe_settings.STRIPE_SECRET_KEY\n ):\n \"\"\"\n Set total tax amounts on Invoice instance\n :param target_cls:\n :param data:\n :param instance:\n :type instance: djstripe.models.Invoice\n :return:\n \"\"\"\n from .billing import TaxRate\n\n pks = []\n\n for tax_amount_data in data.get(\"total_tax_amounts\", []):\n tax_rate_data = tax_amount_data[\"tax_rate\"]\n if isinstance(tax_rate_data, str):\n tax_rate_data = {\"tax_rate\": tax_rate_data}\n\n tax_rate, _ = TaxRate._get_or_create_from_stripe_object(\n tax_rate_data,\n field_name=\"tax_rate\",\n refetch=True,\n api_key=api_key,\n )\n tax_amount, _ = target_cls.objects.update_or_create(\n invoice=instance,\n tax_rate=tax_rate,\n defaults={\n \"amount\": tax_amount_data[\"amount\"],\n \"inclusive\": tax_amount_data[\"inclusive\"],\n },\n )\n\n pks.append(tax_amount.pk)\n\n instance.total_tax_amounts.exclude(pk__in=pks).delete()\n\n @classmethod\n def _stripe_object_to_line_items(\n cls, target_cls, data, invoice, api_key=djstripe_settings.STRIPE_SECRET_KEY\n ):\n \"\"\"\n Retrieves LineItems for an invoice.\n\n If the line item doesn't exist already then it is created.\n\n If the invoice is an upcoming invoice that doesn't persist to the\n database (i.e. ephemeral) then the line items are also not saved.\n\n :param target_cls: The target class to instantiate per line item.\n :type target_cls: Type[djstripe.models.LineItem]\n :param data: The data dictionary received from the Stripe API.\n :type data: dict\n :param invoice: The invoice object that should hold the line items.\n :type invoice: ``djstripe.models.Invoice``\n \"\"\"\n lines = data.get(\"lines\")\n if not lines:\n return []\n\n lineitems = []\n for line in lines.auto_paging_iter():\n if invoice.id:\n save = True\n line.setdefault(\"invoice\", invoice.id)\n\n else:\n # Don't save invoice items for ephemeral invoices\n save = False\n\n line.setdefault(\"customer\", invoice.customer.id)\n line.setdefault(\"date\", int(dateformat.format(invoice.created, \"U\")))\n\n item, _ = target_cls._get_or_create_from_stripe_object(\n line, refetch=False, save=save, api_key=api_key\n )\n lineitems.append(item)\n\n return lineitems\n\n @classmethod\n def _stripe_object_to_subscription_items(\n cls, target_cls, data, subscription, api_key=djstripe_settings.STRIPE_SECRET_KEY\n ):\n \"\"\"\n Retrieves SubscriptionItems for a subscription.\n\n If the subscription item doesn't exist already then it is created.\n\n :param target_cls: The target class to instantiate per invoice item.\n :type target_cls: Type[djstripe.models.SubscriptionItem]\n :param data: The data dictionary received from the Stripe API.\n :type data: dict\n :param subscription: The subscription object that should hold the items.\n :type subscription: djstripe.models.Subscription\n \"\"\"\n\n items = data.get(\"items\")\n if not items:\n subscription.items.delete()\n return []\n\n pks = []\n subscriptionitems = []\n for item_data in items.auto_paging_iter():\n item, _ = target_cls._get_or_create_from_stripe_object(\n item_data, refetch=False, api_key=api_key\n )\n\n # sync the SubscriptionItem\n target_cls.sync_from_stripe_data(item_data, api_key=api_key)\n\n pks.append(item.pk)\n subscriptionitems.append(item)\n subscription.items.exclude(pk__in=pks).delete()\n\n return subscriptionitems\n\n @classmethod\n def _stripe_object_to_refunds(\n cls, target_cls, data, charge, api_key=djstripe_settings.STRIPE_SECRET_KEY\n ):\n \"\"\"\n Retrieves Refunds for a charge\n :param target_cls: The target class to instantiate per refund\n :type target_cls: Type[djstripe.models.Refund]\n :param data: The data dictionary received from the Stripe API.\n :type data: dict\n :param charge: The charge object that refunds are for.\n :type charge: djstripe.models.Refund\n :return:\n \"\"\"\n stripe_refunds = convert_to_stripe_object(data.get(\"refunds\"))\n\n if not stripe_refunds:\n return []\n\n refund_objs = []\n\n for refund_data in stripe_refunds.auto_paging_iter():\n item, _ = target_cls._get_or_create_from_stripe_object(\n refund_data,\n refetch=False,\n api_key=api_key,\n )\n refund_objs.append(item)\n\n return refund_objs\n\n @classmethod\n def sync_from_stripe_data(\n cls,\n data,\n api_key=djstripe_settings.STRIPE_SECRET_KEY,\n stripe_version=djstripe_settings.STRIPE_API_VERSION,\n ):\n \"\"\"\n Syncs this object from the stripe data provided.\n\n Foreign keys will also be retrieved and synced recursively.\n\n :param data: stripe object\n :type data: dict\n :rtype: cls\n \"\"\"\n current_ids = set()\n data_id = data.get(\"id\")\n stripe_account = getattr(data, \"stripe_account\", None)\n\n if data_id:\n # stop nested objects from trying to retrieve this object before\n # initial sync is complete\n current_ids.add(data_id)\n\n instance, created = cls._get_or_create_from_stripe_object(\n data,\n current_ids=current_ids,\n stripe_account=stripe_account,\n api_key=api_key,\n )\n\n if not created:\n record_data = cls._stripe_object_to_record(\n data, api_key=api_key, stripe_account=stripe_account\n )\n for attr, value in record_data.items():\n setattr(instance, attr, value)\n instance._attach_objects_hook(\n cls, data, api_key=api_key, current_ids=current_ids\n )\n instance.save()\n instance._attach_objects_post_save_hook(cls, data, api_key=api_key)\n\n for field in instance._meta.concrete_fields:\n if isinstance(field, (StripePercentField, models.UUIDField)):\n # get rid of cached values\n delattr(instance, field.name)\n\n return instance\n\n @classmethod\n def _get_or_retrieve(cls, id, stripe_account=None, **kwargs):\n \"\"\"\n Retrieve object from the db, if it exists. If it doesn't, query Stripe to fetch\n the object and sync with the db.\n \"\"\"\n try:\n return cls.objects.get(id=id)\n except cls.DoesNotExist:\n pass\n\n if stripe_account:\n kwargs[\"stripe_account\"] = str(stripe_account)\n\n # If no API key is specified, use the default one for the specified livemode\n # (or if no livemode is specified, the default one altogether)\n kwargs.setdefault(\n \"api_key\",\n djstripe_settings.get_default_api_key(livemode=kwargs.get(\"livemode\")),\n )\n data = cls.stripe_class.retrieve(\n id=id, stripe_version=djstripe_settings.STRIPE_API_VERSION, **kwargs\n )\n instance = cls.sync_from_stripe_data(data, api_key=kwargs.get(\"api_key\"))\n return instance\n\n def __str__(self):\n return f\"\"\n\n\nclass IdempotencyKey(models.Model):\n uuid = models.UUIDField(\n max_length=36, primary_key=True, editable=False, default=uuid.uuid4\n )\n action = models.CharField(max_length=100)\n livemode = models.BooleanField(\n help_text=\"Whether the key was used in live or test mode.\"\n )\n created = models.DateTimeField(auto_now_add=True)\n\n class Meta:\n unique_together = (\"action\", \"livemode\")\n\n def __str__(self):\n return str(self.uuid)\n\n @property\n def is_expired(self) -> bool:\n return timezone.now() > self.created + timedelta(hours=24)\n","repo_name":"dj-stripe/dj-stripe","sub_path":"djstripe/models/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":41919,"program_lang":"python","lang":"en","doc_type":"code","stars":1483,"dataset":"github-code","pt":"81"} +{"seq_id":"74013748424","text":"\"\"\"Demo-ing a dill/xarray crash.\"\"\"\nfrom arpes.io import example_data\nimport dill\n\ndata = example_data.map\ndata = data.assign_coords(**dict(data.coords)) # without this line there's a crash\n\nprint(data)\ndill.loads(dill.dumps(data))\n\nprint(\"Hi\")\n","repo_name":"chstan/arpes","sub_path":"scripts/crash_dill.py","file_name":"crash_dill.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"81"} +{"seq_id":"72762338824","text":"from .metric_base import MaskedMetric\nfrom .ops import mape\nfrom torch.nn import functional as F\nimport torch\n\nfrom torchmetrics.utilities.checks import _check_same_shape\n\nfrom ... import epsilon\n\n\nclass MaskedMAE(MaskedMetric):\n def __init__(self,\n mask_nans=False,\n mask_inf=False,\n compute_on_step=True,\n dist_sync_on_step=False,\n process_group=None,\n dist_sync_fn=None,\n at=None):\n super(MaskedMAE, self).__init__(metric_fn=F.l1_loss,\n mask_nans=mask_nans,\n mask_inf=mask_inf,\n compute_on_step=compute_on_step,\n dist_sync_on_step=dist_sync_on_step,\n process_group=process_group,\n dist_sync_fn=dist_sync_fn,\n metric_kwargs={'reduction': 'none'},\n at=at)\n\n\nclass MaskedMAPE(MaskedMetric):\n def __init__(self,\n mask_nans=False,\n compute_on_step=True,\n dist_sync_on_step=False,\n process_group=None,\n dist_sync_fn=None,\n at=None):\n super(MaskedMAPE, self).__init__(metric_fn=mape,\n mask_nans=mask_nans,\n mask_inf=True,\n compute_on_step=compute_on_step,\n dist_sync_on_step=dist_sync_on_step,\n process_group=process_group,\n dist_sync_fn=dist_sync_fn,\n at=at)\n\n\nclass MaskedMSE(MaskedMetric):\n def __init__(self,\n mask_nans=False,\n compute_on_step=True,\n dist_sync_on_step=False,\n process_group=None,\n dist_sync_fn=None,\n at=None):\n super(MaskedMSE, self).__init__(metric_fn=F.mse_loss,\n mask_nans=mask_nans,\n mask_inf=True,\n compute_on_step=compute_on_step,\n dist_sync_on_step=dist_sync_on_step,\n process_group=process_group,\n dist_sync_fn=dist_sync_fn,\n metric_kwargs={'reduction': 'none'},\n at=at)\n\n\nclass MaskedMRE(MaskedMetric):\n def __init__(self,\n mask_nans=False,\n mask_inf=False,\n compute_on_step=True,\n dist_sync_on_step=False,\n process_group=None,\n dist_sync_fn=None,\n at=None):\n super(MaskedMRE, self).__init__(metric_fn=F.l1_loss,\n mask_nans=mask_nans,\n mask_inf=mask_inf,\n compute_on_step=compute_on_step,\n dist_sync_on_step=dist_sync_on_step,\n process_group=process_group,\n dist_sync_fn=dist_sync_fn,\n metric_kwargs={'reduction': 'none'},\n at=at)\n self.add_state('tot', dist_reduce_fx='sum', default=torch.tensor(0., dtype=torch.float))\n\n def _compute_masked(self, y_hat, y, mask):\n _check_same_shape(y_hat, y)\n val = self.metric_fn(y_hat, y)\n mask = self._check_mask(mask, val)\n val = torch.where(mask, val, torch.tensor(0., device=y.device, dtype=torch.float))\n y_masked = torch.where(mask, y, torch.tensor(0., device=y.device, dtype=torch.float))\n return val.sum(), mask.sum(), y_masked.sum()\n\n def _compute_std(self, y_hat, y):\n _check_same_shape(y_hat, y)\n val = self.metric_fn(y_hat, y)\n return val.sum(), val.numel(), y.sum()\n\n def compute(self):\n if self.tot > epsilon:\n return self.value / self.tot\n return self.value\n\n def update(self, y_hat, y, mask=None):\n y_hat = y_hat[:, self.at]\n y = y[:, self.at]\n if mask is not None:\n mask = mask[:, self.at]\n if self.is_masked(mask):\n val, numel, tot = self._compute_masked(y_hat, y, mask)\n else:\n val, numel, tot = self._compute_std(y_hat, y)\n self.value += val\n self.numel += numel\n self.tot += tot\n\n\n","repo_name":"Graph-Machine-Learning-Group/grin","sub_path":"lib/nn/utils/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":4835,"program_lang":"python","lang":"en","doc_type":"code","stars":103,"dataset":"github-code","pt":"81"} +{"seq_id":"10015342842","text":"import csv\n\n\n# filename = \"./csv/abiturs.csv\"\nfilename = \"./csv/exam_balls.csv\"\n\nwith open(filename, \"r\", encoding=\"utf8\") as f: \n reader = csv.reader(f, delimiter=\";\")\n \n headers = next(reader) # читаем строку заголовков\n print(f'headers => {\",\".join(headers)}')\n\n lst = [[item[1], item[2], int(item[3])] for item in reader if item[2] == \"м\"]\n \n for row in sorted(lst, key=lambda x: -x[2]):\n print(f\"{row[2]}\\t{row[0]}\")\n\n\"\"\"\nвыбрать всех М и отсортировать по убыванию по оценке за математику\n\"\"\"","repo_name":"permCoding/algopro-23","sub_path":"part-1/lections/lect11-requests-csv-sort-set/22_reader_headers.py","file_name":"22_reader_headers.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21768711428","text":"from bs4 import BeautifulSoup\nfrom urllib.request import urlopen\nimport argparse, threading, time\nparse = argparse.ArgumentParser()\nparse.add_argument('-u', '--url', required=True)\nparse.add_argument('-f', '--find', default='', required=False)\nparse.add_argument('-o', '--output', required=False)\nargs = parse.parse_args()\nnothing = \"\"\npart = \"\"\noutput = \"\"\nurl = args.url\nfind = args.find\nlinks = []\nresults = []\nlinks.append(url)\nprint('''********************\nThis Program Is Buggy.\nTo Make Sure That It\\'s Completely Finished Crawling Then Give It A Moment After It Said It\\'s Finished.\nAlso, The Find Tag Feature Might Not Work In Some Cases.\n********************''')\ntime.sleep(5)\ntry:\n def spider( url, part):\n global links\n global results\n global find\n try:\n url = urlopen(url)\n soup = BeautifulSoup(url, 'html.parser')\n for result in soup.find_all(find):\n if result not in results:\n results.append(result)\n for link in soup.find_all('a'):\n try:\n if link.get('href') not in links:\n if link.get('href').startswith('/') and link.get('href').startswith('/') not in links:\n links.append(str(args.url + link.get('href')))\n part = str(link.get('href'))\n elif link.get('href').startswith('http') and link.get('href').startswith('http') not in links:\n print(link.get('href'))\n links.append(str(link.get('href')))\n url = link.get('href')\n part = \"\"\n threading.Thread(target=spider, args=(url, part)).start()\n except:\n continue\n except:\n pass\n print(url)\n spider( url, part)\n output += f\"{'-'*10}\\nLinks:\\n\"\n for link in links:\n output += f\"{link}\\n\"\n if results == []:\n output += f\"{'-'*10}\\nNumber Of Links: {str(len(links))}\"\n else:\n output += f\"{'-'*10}\\nResults:\\n\"\n for i in results:\n output += f\"{i}\\n\"\n output += f\"{'-'*10}\\nNumber Of Links: {len(links)}\\nNumber Of Results: {len(results)}\"\n if args.output:\n with open(args.output, 'a') as file:\n file.write(output)\n file.close()\n print(output)\n print(\"Ctrl + C To Exit\")\n exit()\nexcept KeyboardInterrupt:\n output += f\"{'-'*10}\\nLinks:\\n\"\n for link in links:\n output += f\"{link}\\n\"\n if results == []:\n output += f\"{'-'*10}\\nNumber Of Links: {str(len(links))}\"\n else:\n output += f\"{'-'*10}\\nResults:\\n\"\n for i in results:\n output += f\"{i}\\n\"\n output += f\"{'-'*10}\\nNumber Of Links: {len(links)}\\nNumber Of Results: {len(results)}\"\n if args.output:\n with open(args.output, 'a') as file:\n file.write(output)\n file.close()\n print(output)\n print(\"Ctrl + C To Exit\")\n exit()\n","repo_name":"INDA010010/Web-Spider","sub_path":"spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":3054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13525233322","text":"N = int(input())\r\ndice_price = []\r\n\r\nfor i in range(0, N):\r\n dice_1, dice_2, dice_3 = map(int, input().split())\r\n dice_list = [dice_1, dice_2, dice_3]\r\n if dice_1-dice_2==0 and dice_2-dice_3==0:\r\n dice_price.append(10000+(dice_1*1000))\r\n elif dice_1-dice_2==0 or dice_1-dice_3==0:\r\n dice_price.append(1000+dice_1*100)\r\n elif dice_2-dice_3==0: \r\n dice_price.append(1000+dice_2*100)\r\n else:\r\n dice_price.append(max(dice_list)*100)\r\n\r\nprint(max(dice_price))","repo_name":"yuihmoo/algorism","sub_path":"python study/2476.py","file_name":"2476.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3581540806","text":"# 내가 구현한 코드\n# TODO : 중복검사 하기.\n\nprompt = \"\"\"\n1. 추가\n2. 검색(이름)\n3. 수정\n4. 삭제\n5. 전체 출력\n6. 전체 삭제\n7. 종료\nEnter number :\n\"\"\"\nuserList = []\n\n\ndef add():\n global userList\n userInfo = nameCheck()\n userList.append(userInfo)\n\n\ndef nameCheck():\n while True:\n name = input(\"이름 :\")\n phoneNum = int(input(\"전화번호 :\"))\n address = input(\"주소 :\")\n userInfo = [name, phoneNum, address]\n\n search(name)\n if name == search(name):\n print('중복된 이름')\n else:\n break\n return userInfo\n\n\ndef search(name):\n for i in range(0, len(userList)):\n if name in userList[i][0]:\n # 변수 in list[] 로 리스트 안에 값이 있는지 확인 할 수있다.\n return name\n\n\ndef searchName():\n searchName = (input(\"이름:\"))\n for i in range(0, len(userList)):\n if userList[i][0] == searchName:\n print(\"정보 있음 :\", userList[i][0], userList[i][1], userList[i][2])\n break\n else:\n print(\"정보 없음\")\n\n\ndef edit():\n searchName = input(\"이름:\")\n for i in range(0, len(userList)):\n if userList[i][0] == searchName:\n print(\"정보 있음 :\", userList[i][0], userList[i][1], userList[i][2])\n quest = input(\"어떤 ��보를 수정하시겠습니까? [이름, 번호, 주소] :\")\n if quest == \"이름\":\n while True:\n etName = input(\"수정할 이름 :\")\n if search(etName) == etName:\n print(\"중복된 이름 입니다.\")\n else:\n userList[i][0] = etName\n break\n\n elif quest == \"번호\":\n userList[i][1] = input(\"수정할 번호 :\")\n\n elif quest == \"주소\":\n userList[i][2] = input(\"수정할 주소 :\")\n\n print()\n print(\"수정되었습니다.\")\n break\n\n elif userList[i][0] != searchName:\n print(\"정보 없음\")\n\n\ndef remove():\n searchName = input(\"이름:\")\n for i in range(0, len(userList)):\n if userList[i][0] == searchName:\n print(\"정보 있음 :\", userList[i][0], userList[i][1], userList[i][2])\n quest = input(\"삭제 하시겠습니까? [예, 아니오] :\")\n if quest == \"예\":\n userList.remove(userList[i])\n print(\"삭제 되었습니다.\")\n break\n elif quest == \"아니오\":\n break\n else:\n print(\"정보 없음\")\n\n\ndef removeAll():\n quest = input(\"전체삭제 하시겠습니까? [예, 아니오] :\")\n if quest == \"예\":\n userList.clear()\n\n\nnumber = 0\nwhile number != 7:\n print(prompt)\n number = int(input())\n\n if number == 1:\n print(\"추가\")\n add()\n\n if number == 2:\n print(\"검색\")\n searchName()\n\n if number == 3:\n print(\"수정\")\n edit()\n\n if number == 4:\n print(\"삭제\")\n remove()\n\n if number == 5:\n print(\"전체 출력\")\n userList.sort()\n print(userList)\n\n if number == 6:\n print(\"전체 삭제\")\n removeAll()\n\n if number == 7:\n print(\"종료\")\n\n if number >= 8:\n print(\"숫자를 다시 입력해주세요.\")\n\n# 강사님께서 구현한 코드\n# 1.추가 2.검색 3.수정 4.삭제 5.전체출력 6.전체삭제 7.종료\n\n# 1. 숫자\n\n'''datas = []\n\n\ndef add():\n global datas\n num = numCheck()\n datas.append(num)\n\n\ndef editNum():\n global datas\n num = int(input('edit num:'))\n flag = search(num)\n if flag == None:\n print('not found')\n else:\n num = numCheck()\n datas[flag] = num\n print('datas[', flag, '] 가 ', datas[flag], '로 수정됨')\n\n\ndef numCheck():\n while True:\n num = int(input('num:'))\n flag = search(num)\n if flag == None:\n break\n else:\n print('중복된 숫자')\n return num\n\n\ndef search(x):\n if x in datas:\n return datas.index(x)\n\n\ndef printAll():\n for i in datas:\n print(i, end=', ')\n print()\n\n\ndef stop():\n return False\n\n\ndef getNum():\n num = int(input('search num:'))\n flag = search(num)\n if flag == None:\n print('not found')\n else:\n print(flag, ' 방에 있다')\n\n\ndef delNum():\n global datas\n num = int(input('del num:'))\n flag = search(num)\n if flag == None:\n print('not found')\n else:\n del datas[flag]\n\n\ndef clearDatas():\n global datas\n datas.clear()\n\n\ndef main():\n li = [add, getNum, editNum, delNum, printAll, clearDatas, stop] # 룩업 테이블. 함수 객체 리스트\n flag = True\n while flag:\n menu = int(input('1.추가 2.검색 3.수정 4.삭제 5.전체출력 6.전체삭제 7.종료'))\n if 1 <= menu <= 6:\n li[menu - 1]()\n elif menu == 7:\n flag = li[menu - 1]()\n\n\nmain()'''\n# 2. 주소록\n'''members = []\n\n\ndef search(name):\n for idx, i in enumerate(members):\n if i[0] == name:\n return idx\n\n\ndef add():\n global members\n m = [\"\", \"\", \"\"]\n while True:\n name = input(\"name :\")\n flag = search(name)\n if flag == None:\n m[0] = name\n break\n else:\n print(\"중복된 이름. 다시 입력하세요\")\n m[1] = input(\"tel :\")\n m[2] = input(\"address :\")\n members.append(m)\n\n\ndef stop():\n return False\n\n\ndef printAll():\n for i in members:\n for j in i:\n print(j, end=\"/\")\n print()\n\n\ndef main():\n li = [add, None, None, None, printAll, None, stop] # 룩업 테이블. 함수 객체 리스트\n flag = True\n while flag:\n menu = int(input('1.추가 2.검색 3.수정 4.삭제 5.전체출력 6.전체삭제 7.종료'))\n if 1 <= menu <= 6:\n li[menu - 1]()\n elif menu == 7:\n flag = li[menu - 1]()\n\nmain()'''\n","repo_name":"u-n-joe/Python","sub_path":"주소록.py","file_name":"주소록.py","file_ext":"py","file_size_in_byte":6067,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5244891466","text":"import sys, random, argparse\nimport numpy\nimport math\nimport turtle\nimport random\nfrom PIL import Image\nfrom datetime import datetime\nfrom fractions import gcd\n\n# draws the Spirograph\nclass Spiro:\n # constructor\n def __init__(self, xc, yc, col, R, r, l):\n # create turtle object\n self.t = turtle.Turtle()\n # set the cursor shape\n self.t.shape('turtle')\n # set the step in degrees\n self.step = 5\n # set the drawing complete flag\n self.drawingComplete = False\n\n # set the parameters\n self.setparameters(xc, yc, col, R, r, l)\n\n # initialize drawing\n self.restart()\n \n # set the parameters\n def setparameters(self, xc, yc, col, R, r, l):\n # set Spirograph Parameters\n self.xc = xc\n self.yc = yc\n self.R = int(R)\n self.r = int(r)\n self.l = l\n self.col = col\n\n # Reduce r/R to its smallest form by dividing with the GCD(greatest common denom)\n gcdVal = gcd(self.r, self.R)\n self.nRot = self.r//gcdVal\n # get ratio of radii\n self.k = r/float(R)\n # set colour\n self.t.color(*col)\n # save angle\n self.a = 0\n\n # restart the drawing\n def restart(self):\n # set the flag\n self.drawingComplete = False\n # show turtle\n self.t.showturtle()\n # go to first point\n self.t.up()\n R, k, l = self.R, self.k, self.l\n a = 0.0\n x = R * ((1 - k) * math.cos(a) + l * k * math.cos((1-k) * a / k))\n y = R * ((1 - k) * math.sin(a) - l * k * math.sin((1-k) * a / k))\n self.t.setpos(self.xc + x, self.yc + y)\n self.t.down()\n\n # Draw the Spirograph\n def draw(self):\n # Draw the outstanding points\n R, k, l = self.R, self.k, self.l\n for i in range(0, 360 * self.nRot + 1, self.step):\n a = math.radians(i)\n x = R * ((1-k) * math.cos(a) + l * k * math.cos((1-k) * a / k))\n y = R * ((1-k) * math.sin(a) - l * k * math.sin((1-k) * a / k))\n self.t.setpos(self.xc + x, self.yc + y)\n \n # Hide turtle when drawing is done\n self.t.hideturtle()\n\n # Update by a single step\n def update(self):\n # skip the other steps if drawing is complete\n if self.drawingComplete:\n return\n # increment the angle\n self.a += self.step\n # draw the step\n R, k, l = self.R, self.k, self.l\n # set the angle\n a = math.radians(self.a)\n x = R * ((1-k) * math.cos(a) + l * k * math.cos((1-k) * a / k))\n y = R * ((1-k) * math.sin(a) - l * k * math.sin((1-k) * a / k))\n self.t.setpos(self.xc + x, self.yc + y)\n # if drawing is complete set flag for it\n if self.a >= 360 * self.nRot:\n self.drawingComplete = True\n # drawing is now done so hide turtle\n self.t.hideturtle()\n\n # clear everything\n def clear(self):\n self.t.clear()\n\n\n# Animates the Spirograph\nclass SpiroAnimator:\n # constructor\n def __init__(self, N):\n # set time value (MS)\n self.deltaT = 10\n # get the window dimensions\n self.width = turtle.window_width()\n self.height = turtle.window_height()\n #create the spiro objects\n self.spiros = []\n for i in range(N):\n # generate random params\n rparams = self.genRandomParameters()\n # set the params for the Spiro\n spiro = Spiro(*rparams)\n self.spiros.append(spiro)\n # call timer\n turtle.ontimer(self.update, self.deltaT)\n\n # restart spiro drawing\n def restart(self):\n for spiro in self.spiros:\n # clear\n spiro.clear()\n # generate random parameters\n rparams = self.genRandomParameters()\n # set the spiro parameters\n spiro.setparameters(*rparams)\n # restart drawing\n spiro.restart()\n\n # generate random parameters\n def genRandomParameters(self):\n width, height = self.width, self.height\n R = random.randint(50, min(width, height) // 2)\n r = random.randint(10, 9*R//10)\n l = random.uniform(0.1, 0.9)\n xc = random.randint(-width//2, width//2)\n yc = random.randint(-height//2, height//2)\n col = (random.random(), random.random(), random.random())\n return (xc, yc, col, R, r, l)\n\n def update(self):\n # update all spiros\n nComplete = 0\n for spiro in self.spiros:\n # update\n spiro.update()\n # count number of completed spiros\n if spiro.drawingComplete:\n nComplete += 1\n \n # restart if all spiros are complete\n if nComplete == len(self.spiros):\n self.restart()\n # call the timer\n turtle.ontimer(self.update, self.deltaT)\n\n # toggle turtle cursor on and off\n def toggleTurtles(self):\n for spiro in self.spiros:\n if spiro.t.isvisible():\n spiro.t.hideturtle()\n else:\n spiro.t.showturtle()\n\n# save drawings as PNG files\ndef saveDrawing():\n # hide turtle\n turtle.hideturtle()\n # gen filename\n dateStr = (datetime.now()).strftime(\"%d%b%Y - %H%M%S\")\n fileName = 'spiro-' + dateStr\n print(\"Saving drawing to %s.eps/png\" % fileName)\n # Get the tkinter canvas\n canvas = turtle.getcanvas()\n # save the drawing as a postscript image\n canvas.postscript(file = fileName + \".eps\")\n # Use pillow module to convert to PNG\n img = Image.open(fileName + \".eps\")\n img.save(fileName + \".png\", \"png\")\n # show the turtle cursor\n turtle.turtle.showturtle()\n\ndef main():\n print(\"Generating Spirograph...\")\n # argument parser\n descriptionString = \"\"\"This program draws Spirographs. When run with no arguments, this program draws random Spirographs.\n \n Terminology:\n\n R: radius of outer circle\n r: radius of inner circle\n l: ratio of hole distance to r\n \"\"\"\n\n parser = argparse.ArgumentParser(description=descriptionString)\n\n # expected arguments\n parser.add_argument(\"--sparams\", nargs=3, dest=\"sparams\", required=False, help=\"Arguments in sparams: R, r, l\")\n # parse the arguments\n args = parser.parse_args()\n\n # set with of drawing window to 80% of screen width\n turtle.setup(width=0.8)\n\n turtle.shape(\"turtle\")\n\n # set window title\n turtle.title(\"Spirographs!\")\n # add key handler to save drawwings\n turtle.onkey(saveDrawing, \"s\")\n # start listening\n turtle.listen()\n\n turtle.hideturtle()\n\n # check for any args and then draw the Spirogrph\n if args.sparams:\n params = [float(x) for x in args.sparams]\n # draw the spirograph with the set parameters\n col = (0.0, 0.0, 0.0)\n spiro = Spiro(0, 0, col, *params)\n spiro.draw()\n else:\n # create animator object\n spiroAnim = SpiroAnimator(4)\n # Add key handler to toggle turtle\n turtle.onkey(spiroAnim.toggleTurtles, \"t\")\n # Add key handler to restart animation\n turtle.onkey(spiroAnim.restart, \"space\")\n\n # Start the main turtle loop\n turtle.mainloop()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"JoshuaSGraham/SpirographToy","sub_path":"spiro.py","file_name":"spiro.py","file_ext":"py","file_size_in_byte":7278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21104009819","text":"import csv\nfrom decimal import Decimal\nimport math\n\n\nclass BM_25(object):\n\n def __init__(self, dataFile: str):\n \"\"\"\n Need: \n SUM(t in Q) of: \n \n N: Number of documents in the corpus\n df_t: The number of documents containing term t\n k_1: Typically set to 1.2\n f_t: Frequency of that term in the document\n b: typically set to .75\n |d|: length of the document\n k_2: Typically between 0 and 1000 (much larger than k_1\n qf_t: the frequency of the term t in query Q. \n \n \n 1. Number of documents in the corpus\n 2. Number of documents containing term t \n 3. k_1: Set to 1.2\n 4. f_t: frequency of term in the document\n 5. b: typically set to .75\n 6. |d| length of the document\n \n \"\"\"\n\n # Ingesting file.\n self.global_index: dict = {}\n self.document_tcount: dict = {} # aka document length in words.\n self.n: int = 0\n self.k1: Decimal = Decimal(1.2)\n self.k2: Decimal = Decimal(500)\n self.b: Decimal = Decimal(.75)\n self.avg_d: Decimal = Decimal(0) # Calculated after ingesting documents\n with open(dataFile, 'r') as csv_file:\n dict_reader: csv.DictReader = csv.DictReader(csv_file)\n # Iterating through documents\n for row in dict_reader:\n self.n += 1\n temp_dict: dict = {}\n doc_id: str = row['id']\n terms: list = row['description'].split(' ')\n if '' in terms:\n terms.remove('')\n # Tallying terms\n total_count: int = 0\n for term in terms:\n if term in temp_dict:\n temp_dict[term] += 1\n else:\n temp_dict[term] = 1\n total_count += 1\n # Logging document info\n for key in temp_dict:\n self._update_index(doc_id, key, temp_dict[key])\n self.document_tcount[doc_id] = total_count\n # Calculating average document length\n total_length: int = 0\n document_count: int = 0\n for key in self.document_tcount:\n document_count += 1\n total_length += self.document_tcount[key]\n self.avg_d = Decimal(total_length) / Decimal(document_count)\n\n def _update_index(self,\n doc_id: str,\n term: str,\n occurences: int):\n \"\"\" __init__ helper function. \"\"\"\n if term in self.global_index:\n # Managing posting order\n posting_list: list = self.global_index[term]\n for i, docpair in enumerate(posting_list):\n if occurences >= docpair[1]:\n self.global_index[term].insert(i, (doc_id, occurences))\n break\n if docpair is self.global_index[term][-1]:\n # New posting goes at the end.\n self.global_index[term].append((doc_id, occurences))\n else:\n self.global_index[term] = [(doc_id, occurences)]\n\n def _first_algoterm(self, term: str) -> Decimal:\n \"\"\"\n ln( (N - dft + .5)/(dft + .5) )\n \"\"\"\n\n dft: int = 0\n if term in self.global_index:\n term_posting: list = self.global_index[term]\n dft = sum([1 for docpair in term_posting])\n fraction: Decimal = Decimal((self.n - dft + .5) / (dft + .5))\n return Decimal(math.log(fraction))\n\n def _second_algoterm(self, term: str, doc_id: int) -> Decimal:\n \"\"\"\n ((k1 + 1) * ft) / ( k1*(1-b) + (b * (|d|/avg(|d|)) + ft\n\n :param term:\n :return:\n \"\"\"\n # Finding the given term/document in the postings\n ft: Decimal = Decimal(0)\n docpairs: list = self.global_index[term]\n for pair in docpairs:\n if pair[0] == doc_id:\n ft = pair[1]\n break\n d: Decimal = Decimal(self.document_tcount[doc_id])\n numerator: Decimal = (1 + self.k1) * Decimal(ft)\n # Staging pieces of the denominator\n denom_1: Decimal = self.k1 * (1 - self.b)\n denom_2: Decimal = self.b * (d / self.avg_d)\n denom_3: Decimal = ft\n # Putting it all together\n final: Decimal = numerator / (denom_1 + denom_2 + denom_3)\n return final\n\n def _third_algoterm(self, term: str, query: str) -> Decimal:\n \"\"\" ((k2 + 1) * qft) / (k2 + qft)\"\"\"\n\n qterms: list = query.split(' ')\n qft: int = sum([1 for qterm in qterms if qterm == term])\n numerator: Decimal = (1 + self.k2) * Decimal(qft)\n demonimator: Decimal = self.k2 + Decimal(qft)\n return numerator / demonimator\n\n def _relevant_docids(self, Q: str):\n \"\"\" tfidf helper function.\n Returns a list of document IDs containing one or more terms in Q\n \"\"\"\n terms = Q.split(' ')\n relevant_ids: list = []\n for term in terms:\n if term in self.global_index:\n term_posts: list = self.global_index[term]\n relevant_ids += [docpair[0] for docpair in term_posts]\n return set(relevant_ids)\n\n def _score(self, doc_id: int, query: str) -> Decimal:\n \"\"\" Runs the BM25 scoring algorithm against the given document \"\"\"\n terms: list = query.split(' ')\n sum: Decimal = Decimal(0)\n # Performing summation\n for term in terms:\n if term not in self.global_index:\n continue\n first_term: Decimal = self._first_algoterm(term)\n second_term: Decimal = self._second_algoterm(term, doc_id)\n third_term: Decimal = self._third_algoterm(term, query)\n sum += (first_term * second_term * third_term)\n return sum\n\n def bm25(self, query: str, k: int):\n relevant_docids: set = self._relevant_docids(query)\n results: list = []\n for docid in relevant_docids:\n score = float(self._score(docid, query))\n if score > 0:\n results.append((docid, score))\n\n results.sort(key=lambda pair: pair[1])\n results.reverse()\n return results[:k]\n\n","repo_name":"RyanEliopoulos/cs454_assignment2","sub_path":"BM25.py","file_name":"BM25.py","file_ext":"py","file_size_in_byte":6377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13313831689","text":"# -*- coding: utf-8 -*-\n\nfrom webserver import db\nfrom webserver.models import Address\nfrom webserver.tests import build_address\nfrom webserver.tests import delete_addresses\nfrom webserver.tests.functional import FunctionalTest\n\n\nclass Exists(FunctionalTest):\n \"\"\" Check if the webservice exists \"\"\"\n\n @classmethod\n def setup_class(cls):\n \"\"\" Add database fixtures \"\"\"\n\n build_address(id=5)\n db.session.commit()\n\n @classmethod\n def teardown_class(cls):\n \"\"\" Clear database fixtures \"\"\"\n\n delete_addresses()\n db.session.commit()\n\n def test_exists(self):\n \"\"\" DELETE /addresses/id: exists \"\"\"\n\n # Check request\n response = self.delete('/addresses/5')\n assert response.status_code != 404\n assert response.status_code != 500\n\n\nclass UnknownParameters(FunctionalTest):\n \"\"\" Check with no datas \"\"\"\n\n @classmethod\n def setup_class(cls):\n \"\"\" Add database fixtures \"\"\"\n\n pass\n\n @classmethod\n def teardown_class(cls):\n \"\"\" Clear database fixtures \"\"\"\n\n pass\n\n def test_unkown_id(self):\n \"\"\" DELETE /addresses/id: with unkown id \"\"\"\n\n # Check request\n response = self.delete('/addresses/5')\n assert response.status_code == 404\n assert response.data == \"L'adresse n'existe pas.\"\n\n\nclass Delete(FunctionalTest):\n \"\"\" Check with valid data \"\"\"\n\n @classmethod\n def setup_class(cls):\n \"\"\" Add database fixtures \"\"\"\n\n build_address(id=5)\n db.session.commit()\n\n @classmethod\n def teardown_class(cls):\n \"\"\" Clear database fixtures \"\"\"\n\n delete_addresses()\n db.session.commit()\n\n def test_delete(self):\n \"\"\" DELETE /addresses/id: with valid data \"\"\"\n\n # Check request\n response = self.delete('/addresses/5')\n assert response.status_code == 200\n\n # Check response\n result = self.parse(response.data)\n assert 'id' in result\n\n # Check in database\n address = db.session.query(Address).get(result['id'])\n assert address is None","repo_name":"richfab/LOG210-server","sub_path":"webserver/tests/functional/address/test_delete.py","file_name":"test_delete.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34287258576","text":"invList =[]\n\ndef getList(l):\n\tif len(l)==0:\n\t\tprint(\"Inventry is Empty.\")\n\telse:\n\t\tprint(\"Items in the Inventry:\")\n\t\tprint(l)\n\ndef sortList(l):\n\tif len(l)==0:\n\t\tprint(\"Inventry is Empty.\")\n\telse:\n\t\tprint(\"Items in the Inventry:\")\n\t\tprint(l.sort())\n\ndef add(l, item):\n\ttry:\n\t\tif len(l) >=10:\n\t\t\traise Exception(\"Overflow: Inventory Capacity Full.\")\n\t\t\n\t\tif item not in l:\n\t\t\tl.append(item)\n\t\telse:\n\t\t\traise Exception(\"Duplicate: {0} aleady present in the list.\".format(item))\n\texcept Exception as e:\n\t\tprint(e)\n\n\t# return l # No need to Return.\n\ndef remove(l, item):\n\ttry:\n\t\tl.remove(item)\n\texcept ValueError as e:\n\t\tprint(\"ValueError: {0} not in list.\".format(item))\n\texcept Exception as e:\n\t\tprint(e)\n\t# return l # No need to Return.\n","repo_name":"SaritaBhushan/sampletest","sub_path":"inventry.py","file_name":"inventry.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17329520099","text":"# 1544.MakTheStringGreat.py\n# https://leetcode.com/problems/make-the-string-great\nclass Solution:\n def makeGood(self, s: str) -> str:\n mystack = []\n for i in range(len(s)):\n if mystack and mystack[-1] == s[i].swapcase():\n mystack.pop()\n else:\n mystack.append(s[i])\n return ''.join(mystack)","repo_name":"djparul/leetcode","sub_path":"python/1544.MakTheStringGreat.py","file_name":"1544.MakTheStringGreat.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38131615585","text":"class Trainers:\r\n def __init__(self, stamina:int, color:str):\r\n self.stamina = stamina\r\n self.color = color\r\n\r\n def __repr__(self):\r\n return f'Trainers: [{self.stamina}, {self.color}]'\r\n \r\nclass Member:\r\n def __init__(self, name:str, age:int, trainers:Trainers):\r\n self.name = name\r\n self.age = age\r\n self.trainers = trainers\r\n self.__gym_list = []\r\n\r\n def get_all_gyms(self):\r\n return self.__gym_list\r\n\r\n def get_gyms(self):\r\n return [x.name for x in self.gyms]\r\n\r\n def __repr__(self):\r\n return f'[{self.name}], [{self.age}]: [{self.trainers}]'\r\n \r\nclass Gym:\r\n def __init__(self, name:str, max_members_number:int):\r\n self.name = name\r\n self.max_members_number = max_members_number\r\n self.member_list = []\r\n \r\n def can_add_member(self, member):\r\n if len(self.member_list) < self.max_members_number\\\r\n and isinstance(member, Member)\\\r\n and member.trainers != ''\\\r\n and member.trainers.stamina >= 0\\\r\n and member not in self.member_list: \r\n return True\r\n elif len(self.member_list) >= self.max_members_number\\\r\n and isinstance(member, Member)\\\r\n and member.trainers != ''\\\r\n and member.trainers.stamina >= 0\\\r\n and member not in self.member_list:\r\n remove_list = []\r\n for member in self.member_list:\r\n if member.trainers.stamina == min(member.trainers.stamina for member in self.member_list):\r\n remove_list.append(member)\r\n for member in remove_list:\r\n self.remove_member(member)\r\n return True\r\n\r\n else:\r\n return False\r\n \r\n def add_member(self, member: Member) -> Member:\r\n if self.can_add_member(member) == True:\r\n self.member_list.append(member)\r\n return f'{member}'\r\n else:\r\n return f'Gym full'\r\n\r\n def remove_member(self, member:Member):\r\n if member in self.member_list:\r\n self.member_list.remove(member)\r\n\r\n def get_total_stamina(self) -> list:\r\n stamina = 0\r\n for member in self.member_list:\r\n stamina += member.trainers.stamina\r\n return stamina\r\n\r\n def get_members_number(self) -> list:\r\n return len(self.member_list)\r\n\r\n def get_all_members(self) -> list:\r\n return self.member_list\r\n\r\n def get_average_age(self) -> list:\r\n age = 0\r\n for member in self.member_list:\r\n age += member.age\r\n average_age = age / len(self.member_list)\r\n return round(average_age, 2)\r\n\r\n def __repr__(self):\r\n return f'Gym: [{self.name}] : [{len(self.member_list)}] member(s)'\r\n \r\nclass City:\r\n def __init__(self, max_gym_number:int):\r\n self.max_gym_number = max_gym_number\r\n self.gym_list = []\r\n\r\n def can_build_gym(self, gym) -> bool:\r\n if len(self.gym_list) < self.max_gym_number\\\r\n and gym not in self.gym_list :\r\n return True\r\n else:\r\n return False\r\n\r\n def build_gym(self, gym: Gym) -> Gym:\r\n if self.can_build_gym(gym) == True:\r\n self.gym_list.append(gym)\r\n return f'{gym}'\r\n else:\r\n return f'gym limit reached'\r\n\r\n def destroy_gym(self, gym:Gym):\r\n if gym in self.gym_list:\r\n self.gym_list.remove(gym)\r\n\r\n def get_max_members_gym(self) -> list:\r\n for gym in self.gym_list:\r\n if gym.get_members_number() == max([gym.get_members_number() for gym in self.gym_list]):\r\n return gym.name\r\n \r\n def get_max_stamina_gym(self) -> list:\r\n for gym in self.gym_list:\r\n if gym.get_total_stamina() == max([gym.get_total_stamina() for gym in self.gym_list]):\r\n return gym.name\r\n\r\n def get_max_average_ages(self) -> list:\r\n for gym in self.gym_list:\r\n if gym.get_average_age() == max([gym.get_average_age() for gym in self.gym_list]):\r\n return gym.name\r\n \r\n def get_min_average_ages(self) -> list:\r\n for gym in self.gym_list:\r\n if gym.get_average_age() == min([gym.get_average_age() for gym in self.gym_list]):\r\n return gym.name\r\n\r\n def get_gyms_by_trainers_color(self, color:str) -> list:\r\n Gyms_with_color = [gym.name for gym in self.gym_list for gym.member in gym.member_list if gym.member.trainers.color == color]\r\n gym_name = {}\r\n for color in Gyms_with_color:\r\n if color in gym_name:\r\n gym_name[color] += 1\r\n else:\r\n gym_name[color] = 1\r\n return sorted(set(Gyms_with_color), key=lambda x: gym_name[x], reverse=True)\r\n\r\n def get_gyms_by_name(self, name:str) -> list:\r\n Gyms_with_name = [gym.name for gym in self.gym_list if gym.name == name]\r\n gym_name = {}\r\n for name in Gyms_with_name:\r\n if name in gym_name:\r\n gym_name[name] += 1\r\n else:\r\n gym_name[name] = 1\r\n return sorted(set(Gyms_with_name), key=lambda x: gym_name[x], reverse=True)\r\n\r\n def get_all_gyms(self) -> list:\r\n return self.gym_list\r\n\r\nif __name__ == '__main__':\r\n trainers1 = Trainers(67, \"Blue\")\r\n trainers2 = Trainers(30, \"Red\")\r\n trainers3 = Trainers(100, \"Acqua\")\r\n\r\n member1 = Member(\"Andrus\", 43, trainers1)\r\n member2 = Member(\"Mati\", 41, trainers2)\r\n member3 = Member(\"Annela\", 23, trainers3)\r\n\r\n gym1 = Gym(\"Golds\", 2)\r\n gym2 = Gym(\"247\", 70)\r\n\r\n gym1.add_member(member1)\r\n gym1.add_member(member2)\r\n print(gym1.add_member(member3))\r\n #gym2.add_member(member1)\r\n \r\n #print(gym1.can_add_member())\r\n print(gym1.get_all_members())\r\n #print(gym1.get_total_stamina())\r\n #print(gym1.get_members_number())\r\n #print(gym1.get_average_age())\r\n\r\n city1 = City(27)\r\n print(city1.build_gym(gym1))\r\n print(city1.build_gym(gym2))\r\n #print(city1.get_max_members_gym())\r\n #print(city1.get_max_stamina_gym())\r\n #print(city1.get_max_average_ages())\r\n #print(city1.get_min_average_ages())\r\n #print(city1.get_gyms_by_trainers_color(\"Blue\"))\r\n #print(city1.get_gyms_by_name(\"Golds\"))\r\n #print(city1.get_all_gyms())\r\n","repo_name":"ejvirkus/OOP_homeworks","sub_path":"Gym.py","file_name":"Gym.py","file_ext":"py","file_size_in_byte":6321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14814171206","text":"\"\"\"\nMatthew Beatty, Regan Bell, Akshay Saini\nCS51 Final Project\nQ Comment Summarization\n4/30/15\n\"\"\"\n\nfrom collections import defaultdict\nfrom nltk.corpus import stopwords\nfrom math import log, exp\nfrom re import split\n\npositivity_files = [('rt-polarity.neg', False), ('rt-polarity.pos', True)]\nsubjectivity_files = [('plot.tok.gt9.5000', False), ('quote.tok.gt9.5000', True)]\n\n\nclass Classifier:\n def __init__(self):\n \"\"\" trains the classifier on the positivity and subjectivity corpora \"\"\"\n self.train(positivity_files, 0)\n self.train(subjectivity_files, 1)\n\n def train(self, files, switch):\n \"\"\"\n :type files: (string, boolean) list\n :param files: list of size 2; the string is the filename to learn on, the boolean\n refers to which data is being trained (False for negative, True for positive)\n :type switch: int\n :param switch: 0 or 1: 0 to train positivity, 1 to train subjectivity\n :return: None (dictionary is loaded as instance variable)\n \"\"\"\n vocabulary = set()\n one_count_dict = defaultdict(float)\n zero_count_dict = defaultdict(float)\n one_word_count = 0\n zero_word_count = 0\n for file in files:\n filename, one = file\n f = open(filename, 'r')\n lines = f.readlines()\n f.close()\n for line in lines:\n words = line.split(\" \")\n for word in words:\n if len(word) > 2:\n vocabulary.add(word)\n if one:\n one_word_count += 1\n one_count_dict[word] += 1\n else:\n zero_word_count += 1\n zero_count_dict[word] += 1\n one_prob_dict = dict()\n zero_prob_dict = dict()\n for positive in [False, True]:\n for word in vocabulary:\n if positive:\n one_prob_dict[word] = (one_count_dict[word]+1.0)/(one_word_count+len(vocabulary))\n else:\n zero_prob_dict[word] = (zero_count_dict[word]+1.0)/(zero_word_count+len(vocabulary))\n if switch == 0:\n self.polar_vocab = vocabulary\n self.pos_word_count = one_word_count\n self.pos_prob_dict = one_prob_dict\n self.neg_word_count = zero_word_count\n self.neg_prob_dict = zero_prob_dict\n else:\n self.objective_vocab = vocabulary\n self.obj_word_count = one_word_count\n self.obj_prob_dict = one_prob_dict\n self.subj_word_count = zero_word_count\n self.subj_prob_dict = zero_prob_dict\n\n def positivity(self, sentence):\n \"\"\"\n :type sentence: string\n :param sentence: long sentences (above a dozen words) decrease accuracy\n :return: float: between -1 and 1; positivity of given sentence (-1 for completely negative, 1 for completely positive. 0 is ambiguous\n \"\"\"\n return self.evaluate(sentence, 0)\n\n def subjectivity(self, sentence):\n \"\"\"\n :type sentence: string\n :param sentence: long sentences (above a dozen words) decrease accuracy\n :return: float: between -1 and 1; subjectivity of given sentence (-1 for completely objective, 1 for completely subjective. 0 is ambiguous\n \"\"\"\n return self.evaluate(sentence, 1)\n\n def evaluate(self, sentence, switch):\n \"\"\"\n :type sentence: string\n :param sentence: long sentences (above a dozen words) decrease accuracy\n :type switch: int\n :param switch: 0 or 1: 0 to evaluate positivity of given sentence, 1 to evaluate subjectivity of given sentence\n :return: float: between -1 and 1; positivity/subjectivity of given sentence (-1 for the opposite, 1 for the most)\n \"\"\"\n stop_words = stopwords.words('english')\n words = filter(None, split(\"[^a-zA-Z0-9'_]\", sentence))\n if switch == 0:\n vocabulary = self.polar_vocab\n one_word_count = self.pos_word_count\n zero_word_count = self.neg_word_count\n one_prob_dict = self.pos_prob_dict\n zero_prob_dict = self.neg_prob_dict\n else:\n vocabulary = self.objective_vocab\n one_word_count = self.obj_word_count\n zero_word_count = self.subj_word_count\n one_prob_dict = self.obj_prob_dict\n zero_prob_dict = self.subj_prob_dict\n total_one_words = one_word_count + len(vocabulary)\n total_zero_words = zero_word_count + len(vocabulary)\n prob_one = 0\n prob_zero = 0\n num_words = 0\n for word in words:\n word = word.lower()\n if word not in stop_words:\n if (len(word) > 2) & (word in vocabulary):\n num_words += 1\n prob_one += log(one_prob_dict.get(word, 1.0/total_one_words))\n prob_zero += log(zero_prob_dict.get(word, 1.0/total_zero_words))\n prob_one = exp(prob_one)\n prob_zero = exp(prob_zero)\n return (prob_one - prob_zero) / (prob_one + prob_zero)","repo_name":"ReganBell/QReview","sub_path":"Classifier.py","file_name":"Classifier.py","file_ext":"py","file_size_in_byte":5201,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"23607226896","text":"#!/usr/bin/env python\nfrom setuptools import setup, find_packages\nimport pathlib\nimport pkg_resources\n# import versioneer\n\n\nwith pathlib.Path('requirements.txt').open() as requirements_txt:\n install_requires = [\n str(requirement)\n for requirement\n in pkg_resources.parse_requirements(requirements_txt)\n ]\n\nwith pathlib.Path('requirements_test.txt').open() as requirements_txt:\n test_install_requires = [\n str(requirement)\n for requirement\n in pkg_resources.parse_requirements(requirements_txt)\n ]\n\nsetup(name='examplepackage',\n description='An example package',\n author='Jacob Marlow',\n author_email='',\n url='',\n # version=versioneer.get_version(),\n packages=find_packages(),\n entry_points='''\n [console_scripts]\n hello=example_main:hello\n ''',\n install_requires=install_requires,\n extras_require={'test': test_install_requires}\n )\n","repo_name":"jhmarlow/python-template-repository","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"18356112862","text":"import argparse\nimport time\nimport sys\n\nimport math\n\nimport select\n\nfrom api.listener import twitter_stream\nfrom api.listener.keystore import keystore\n\nfrom exponent_server_sdk import PushClient\nfrom exponent_server_sdk import PushMessage\n\n# Command Line Arguments\nparser = argparse.ArgumentParser()\nparser.add_argument('--region', dest='region', action='store', required=True)\nparser.add_argument('--latitude', dest='latitude', action='store', required=True)\nparser.add_argument('--longitude', dest='longitude', action='store', required=True)\nparser.add_argument('--token', dest='token', action='store', required=True)\nparser.add_argument('--user', dest='user', action='store', required=True)\n# region, latitude, longitude = parser.parse_args()\nargs = parser.parse_args()\nregion, latitude, longitude = args.region, args.latitude, args.longitude\ntoken, user = args.token, args.user\n# Constants\nSECONDS_TO_LIVE = 600\n# This is how you get twitter keys!\n# dict with keys consumerKey, consumerSecret, tokenKey, tokenSecret\nTWITTER_KEYS = keystore.get_keys('twitter')\nDISTANCE = 10\nKEYWORD_LISTS = [\n # ('Cal', 'Washington', 'Huskies'),\n # ('protest', 'controversial speaker'),\n # ('amberalert', 'amber alert'),\n ('calhacks', 'calhacks4'),\n]\n\nexpiration = time.time() + SECONDS_TO_LIVE\n\n\ndef reset_lifetime():\n global expiration\n expiration = time.time() + SECONDS_TO_LIVE\n\n\ndef check_lifetime():\n return time.time() < expiration\n\n\ndef input_available():\n return select.select([sys.stdin, ], [], [], 0.0)[0]\n\n\ndef get_input():\n line = sys.stdin.readline()\n\n\nreset_lifetime()\n\nstart = time.time()\nwhile check_lifetime():\n if input_available():\n get_input()\n reset_lifetime()\n\n new_tweets = twitter_stream.get_filtered_tweets_by_location(latitude, longitude, DISTANCE, KEYWORD_LISTS)\n response = PushClient().publish(\n PushMessage(to=token,\n body=\"New tweet\",\n data=new_tweets[0]))\n\n\n delta = time.time() - start\n if delta < 120:\n time.sleep(math.ceil(delta))\n start = time.time()\n","repo_name":"brianlevis/Cal-Hacks-4-Server","sub_path":"api/listener/listener.py","file_name":"listener.py","file_ext":"py","file_size_in_byte":2098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72443875144","text":"def gridChallenge(grid):\n count = 0\n for index in range(len(grid[0])):\n compare = 0\n for row in grid:\n row = sorted(row)\n if compare <= ord(row[index]):\n compare = ord(row[index])\n count += 1\n return \"YES\" if count == len(grid[0])*len(grid) else \"NO\"\n\n\ngridChallenge(['ebacd', 'fghij', 'olmkn', 'trpqs', 'xywuv'])\ngridChallenge(['ebacf', 'fghij', 'olmkn', 'trpqs', 'xywuv'])\ngridChallenge(['mpxz', 'abcd', 'wlmf'])\ngridChallenge(['vpvv', 'pvvv', 'vzzp', 'zzyy'])\n","repo_name":"DanielBalda/hackerrank","sub_path":"1 Week Preparation Kit/Day 4/Grid Challenge.py","file_name":"Grid Challenge.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15992299745","text":"## spring model hexagonal lattice\n\na = 2\nrow_length = 10\nnum_rows = 3\npos = np.zeros((N,2))\nx_coordinate = np.zeros(N)\nx_coordinate_offset = np.zeros(N)\ny_coordinate = np.zeros(N)\ny_coordinate_offset = np.zeros(N)\n\nfor i in xrange(row_length):\n\tx_coordinate[i] = i*a\n\tx_coordinate_offset[i] = i*a + (0.5*a)\n\t\t\nfor j in range(num_rows):\n\ty_coordinate[i] = (np.sqrt(3) * 0.5)*a \n\t\nfor j in xrange(num_rows):\n\tfor i in xrange(row_length):\n\t\tif i % 2 ==0:\n\t\t\tpos[i,j] = x_coordinate[i],y_coordinate[j]\n\t\telse:\n\t\t\tpos[i,j] = x_coordinate_offset[i],y_coordinate[j]\n\t\t\n\t \n\t\t\n\t\t\n\n","repo_name":"arthurebtking/spring_model","sub_path":"hexagonal_lattice.py","file_name":"hexagonal_lattice.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21803025240","text":"import json, logging, urllib, os, random, re, zipfile\nfrom google.appengine.api import search, taskqueue, users\nfrom google.appengine.ext import blobstore, db, webapp\nfrom google.appengine.ext.webapp import blobstore_handlers, template\nfrom gaesessions import get_current_session\nimport model, unpack\n\ndef enforce_login(handler):\n session = get_current_session()\n account = session.get(\"account\")\n if account is None:\n session[\"message\"] = \"Please log in first\"\n handler.redirect(\"/message\")\n\ndef enforce_rights(handler, epub):\n if epub is None or epub.isPublicAccess():\n return;\n\n session = get_current_session()\n account = session.get(\"account\")\n if account is None:\n session[\"message\"] = \"Please log in first\"\n handler.redirect(\"/message\")\n return\n\n entry = model.LibraryEntry.all().filter(\"epub = \",epub).filter(\"user =\",db.get(account)).get()\n if entry is None:\n session[\"message\"] = \"Sorry! This book isn't public-access, and you don't have it in your library.\"\n handler.redirect(\"/message\")\n\ndef respondWithMessage(handler, message):\n template_values = {\n \"current_user\" : get_current_session().get(\"account\"),\n \"message\" : message\n }\n path = os.path.join(os.path.dirname(__file__), 'html/message.html')\n handler.response.out.write(template.render(path, template_values))\n\nclass About(webapp.RequestHandler):\n def get(self):\n template_values = {\n \"current_user\" : get_current_session().get(\"account\"),\n }\n path = os.path.join(os.path.dirname(__file__), 'html/about.html')\n self.response.out.write(template.render(path, template_values))\n\nclass Message(webapp.RequestHandler):\n def get(self):\n session = get_current_session()\n message = session.get(\"message\")\n respondWithMessage(self, message)\n\nclass Main(webapp.RequestHandler):\n def get(self):\n session = get_current_session()\n user = users.get_current_user()\n if user:\n account = db.GqlQuery(\"SELECT * FROM Account WHERE googleUserID = :1\", user.user_id()).get()\n if account is None:\n account_key = session.get(\"account\")\n account = None if account_key is None else db.get(account_key)\n if account is None:\n account = model.Account(googleUserID = user.user_id(), googleEmail = user.email())\n account.put()\n elif account.googleUserID is None:\n account.googleUserID = user.user_id()\n account.put()\n session[\"account\"] = account.key()\n\n account_key = session.get(\"account\")\n account = None if account_key is None else db.get(account_key)\n if account is None:\n epubs = model.ePubFile.all().filter(\"license IN\",[\"Public Domain\",\"Creative Commons\"])\n show = []\n for epub in epubs:\n if random.randint(0,1)==1:\n show.append(epub)\n if len(show)==3:\n break\n for epub in epubs:\n if len(show)==3:\n break\n if random.randint(0,1)==1:\n show.append(epub)\n template_values = {\n \"epubs\" : show,\n \"current_user\" : get_current_session().get(\"account\"),\n \"login_url\" : users.create_login_url(\"/\")\n }\n path = os.path.join(os.path.dirname(__file__), 'html/index.html')\n self.response.out.write(template.render(path, template_values))\n else:\n self.redirect('/list')\n\nclass LogOut(webapp.RequestHandler):\n def get(self):\n session = get_current_session()\n session.terminate()\n if users.get_current_user():\n self.redirect(users.create_logout_url(\"/\"))\n else:\n self.redirect(\"/\")\n\nclass UploadForm(webapp.RequestHandler):\n def get(self):\n enforce_login(self)\n template_values = {\n \"current_user\" : get_current_session().get(\"account\"),\n \"upload_url\" : blobstore.create_upload_url('/upload_complete')\n }\n path = os.path.join(os.path.dirname(__file__), 'html/upload.html')\n self.response.out.write(template.render(path, template_values))\n\nclass UploadHandler(blobstore_handlers.BlobstoreUploadHandler):\n def post(self):\n enforce_login(self)\n upload_files = self.get_uploads('file') # 'file' is file upload field in the form\n blob_info = upload_files[0]\n\n epub = model.ePubFile(blob = blob_info, blob_key = blob_info.key())\n epub.put()\n entry = model.LibraryEntry(epub = epub, user = get_current_session().get(\"account\"))\n entry.put()\n \n unpacker = unpack.Unpacker()\n existing, error = unpacker.unpack(epub)\n\n if error is None:\n epub_key = epub.key() if existing is None else existing.key()\n logging.info(\"Indexing epub with key %s\" % epub_key)\n taskqueue.add(queue_name = 'index', url='/index', countdown=2, params={\n 'key':epub_key,\n 'user':get_current_session().get(\"account\")\n })\n epub.get_cover()\n self.redirect(\"/book/\"+str(epub.key().id()))\n else:\n db.delete(entry)\n blobstore.delete(epub.blob.key())\n db.delete(epub)\n error = \"Invalid EPUB file\" if error.find(\"File is not a zip\")>0 else error\n respondWithMessage(self, \"Upload error: \"+error)\n\nclass UnpackInternal(webapp.RequestHandler):\n def get(self):\n key = self.request.get('key');\n epub = db.get(key)\n unpacker = unpack.Unpacker()\n unpacker.unpack_internal(epub)\n\nclass Index(webapp.RequestHandler):\n def get(self):\n if not users.is_current_user_admin():\n self.response.out.write(\"No\")\n return\n return self.post()\n\n def post(self):\n user = get_current_session().get(\"account\")\n user = self.request.get('user') if user is None else str(user)\n key = self.request.get('key')\n epub = db.get(key)\n if epub is None:\n logging.info(\"Unable to find epub with key %s\" % key)\n return\n unpacker = unpack.Unpacker()\n unpacker.index_epub(epub, \"private\", user)\n if epub.license == \"Public Domain\" or epub.license == \"Creative Commons\":\n unpacker.index_epub(epub, \"public\")\n \nclass List(webapp.RequestHandler):\n def get(self):\n account = get_current_session().get(\"account\")\n public = account is None or self.request.get('show')==\"public\"\n if public:\n epubs = model.ePubFile.all().filter(\"license IN\",[\"Public Domain\",\"Creative Commons\"])\n else:\n epubs = []\n entries = model.LibraryEntry.all().filter(\"user =\",db.get(account))\n for entry in entries:\n epubs.append(entry.epub)\n\n sort = self.request.get('sort')\n sort = \"author\" if sort is None or len(sort.strip())==0 else sort\n last = self.request.get('last')\n if sort==\"author\":\n epubs = sorted(epubs, key = lambda epub:epub.creator)\n epubs = reversed(epubs) if last==\"author\" else epubs\n if sort==\"title\":\n epubs = sorted(epubs, key = lambda epub:epub.title)\n epubs = reversed(epubs) if last==\"title\" else epubs\n if sort==\"date\":\n epubs = sorted(epubs, key = lambda epub:epub.timeCreated)\n epubs = reversed(epubs) if last==\"date\" else epubs\n\n results = []\n idx = 0\n for epub in epubs:\n results.append({ 'epub' : epub, 'third' : (idx+1)%3==0 })\n idx+=1\n template_values = {\n \"current_user\" : get_current_session().get(\"account\"),\n \"upload_url\" : blobstore.create_upload_url('/upload_complete'),\n \"results\" : None if len(results)==0 else results,\n \"show\" : \"public\" if public else \"own\",\n \"sort\" : None if sort==last else sort\n }\n path = os.path.join(os.path.dirname(__file__), 'html/books.html')\n self.response.out.write(template.render(path, template_values))\n\nclass Manifest(blobstore_handlers.BlobstoreDownloadHandler):\n def get(self):\n key = self.request.get('key')\n epub = db.get(key)\n template_values = {\n \"current_user\" : get_current_session().get(\"account\"),\n \"title\" : epub.blob.filename,\n \"key\" : key,\n \"id\" : epub.key().id(),\n \"files\" : epub.internals(),\n \"contents\" : False\n }\n path = os.path.join(os.path.dirname(__file__), 'html/contents.html')\n self.response.out.write(template.render(path, template_values))\n\nclass Contents(blobstore_handlers.BlobstoreDownloadHandler):\n def get(self):\n components = self.request.path.split(\"/\")\n id = urllib.unquote_plus(components[2])\n epub = model.ePubFile.get_by_id(long(id))\n template_values = {\n \"current_user\" : get_current_session().get(\"account\"),\n \"id\" : id,\n \"key\" : epub.key(),\n \"title\" : epub.title,\n \"cover_path\" : epub.cover_path,\n \"description\" : epub.description,\n \"files\" : epub.internals(only_chapters = True),\n \"contents\" : True\n }\n path = os.path.join(os.path.dirname(__file__), 'html/contents.html')\n self.response.out.write(template.render(path, template_values))\n\nclass Download(blobstore_handlers.BlobstoreDownloadHandler):\n def get(self):\n key = self.request.get('key')\n epub = db.get(key)\n enforce_rights(self, epub)\n self.send_blob(epub.blob, save_as = True)\n\nclass View(webapp.RequestHandler):\n def get(self):\n components = self.request.path.split(\"/\")\n if len(components)>1:\n id = components[2]\n epub = model.ePubFile.get_by_id(long(id))\n enforce_rights(self, epub)\n if len(components)<4:\n self.redirect(\"/book/\"+id)\n return\n path = urllib.unquote_plus(\"/\".join(components[3:]))\n internal = epub.internals().filter(\"path = \",path).get()\n renderer = unpack.Unpacker()\n self.response.headers['Content-Type'] = renderer.contentHeader(internal)\n self.response.out.write(renderer.content(internal))\n\nclass Search(webapp.RequestHandler):\n def get(self):\n return self.post()\n\n def post(self):\n try:\n q = self.request.get('q')\n include = self.request.get('include')\n logging.info(\"Searching for \"+q)\n query = \"(name:%s OR html:%s)\" % (q,q)\n book = self.request.get('book_filter')\n query = \"book:%s AND %s\" % (book, query) if book is not None and len(book.strip())>0 else query\n sort_opts = search.SortOptions(match_scorer=search.MatchScorer())\n opts = search.QueryOptions(limit = 100, snippeted_fields = ['html'], sort_options = sort_opts)\n results = []\n for indexName in [\"private\", \"public\"]:\n if include is not None and len(include.strip())>0 and include.find(indexName)==-1:\n results.append({'count' : -1, 'results' : [], 'show' : False})\n continue\n index_results = []\n index = search.Index(indexName)\n active_q = \"owners:%s AND %s\" % (get_current_session().get(\"account\"), query) if indexName==\"private\" else query\n search_query = search.Query(query_string = active_q, options=opts)\n search_results = index.search(search_query)\n for doc in search_results:\n internal = db.get(doc.doc_id)\n if internal is not None:\n logging.info(\"Got expressions %s\" % doc.expressions)\n index_results.append({ \"snippets\" : doc.expressions, \"internal\" : internal })\n results.append({'count' : search_results.number_found, 'results' : index_results, 'show' : True})\n \n template_values = {\n \"current_user\" : get_current_session().get(\"account\"),\n \"private_results\" : results[0]['results'],\n \"private_count\" : results[0]['count'],\n \"private_show\" : results[0]['show'],\n \"public_results\" : results[1]['results'],\n \"public_count\" : results[1]['count'],\n \"public_show\" : results[1]['show']\n }\n path = os.path.join(os.path.dirname(__file__), 'html/search_results.html')\n self.response.out.write(template.render(path, template_values))\n except search.Error:\n respondWithMessage(self, \"Search error\")\n\nclass Share(webapp.RequestHandler):\n def post(self):\n quote = model.Quote(\n epub = model.ePubFile.get_by_id(long(self.request.get('epub'))),\n file = db.get(self.request.get('file')),\n html = db.Text(self.request.get('html')),\n user = get_current_session().get(\"account\")\n )\n quote.put()\n unpacker = unpack.Unpacker()\n unpacker.index_quote(quote)\n self.response.headers['Content-Type'] = 'application/json'\n self.response.out.write('{\"result\":\"ok\",\"url\":\"/quote/%s\"}' % quote.key().id())\n\nclass Quotes(webapp.RequestHandler):\n def get(self):\n user = get_current_session().get(\"account\")\n quotes = model.Quote.all().filter(\"user = \", user).order(\"epub\")\n results = []\n for quote in quotes:\n html = re.sub('<[^<]+?>', '', quote.html)\n words = html.split(\" \")\n words = words[0:6] if len(words) > 7 else words\n text = \" \".join(words)\n results.append({ \"title\" : quote.epub.title, \"key\" : quote.key(), \"text\" : text})\n template_values = {\n \"current_user\" : get_current_session().get(\"account\"),\n \"quotes\" : results\n }\n path = os.path.join(os.path.dirname(__file__), 'html/quotes.html')\n self.response.out.write(template.render(path, template_values))\n\nclass Quote(webapp.RequestHandler):\n def get(self):\n components = self.request.path.split(\"/\")\n id = urllib.unquote_plus(components[2])\n quote = model.Quote.get_by_id(long(id))\n template_values = {\n \"current_user\" : get_current_session().get(\"account\"),\n \"quote\" : quote\n }\n path = os.path.join(os.path.dirname(__file__), 'html/quote.html')\n self.response.out.write(template.render(path, template_values))\n\n\nclass Edit(webapp.RequestHandler):\n def get(self):\n components = self.request.path.split(\"/\")\n id = urllib.unquote_plus(components[2])\n epub = model.ePubFile.get_by_id(long(id))\n template_values = {\n \"current_user\" : get_current_session().get(\"account\"),\n \"admin\" : users.is_current_user_admin(),\n \"edit\" : epub.entry_count()<=1,\n \"epub\" : epub,\n \"pr_license\" : \" selected\" if epub.license==\"Private\" else \"\",\n \"pd_license\" : \" selected\" if epub.license==\"Public Domain\" else \"\",\n \"cc_license\" : \" selected\" if epub.license==\"Creative Commons\" else \"\",\n }\n path = os.path.join(os.path.dirname(__file__), 'html/metadata.html')\n self.response.out.write(template.render(path, template_values))\n\n def post(self):\n enforce_login(self)\n if self.request.get('license') is not None and not users.is_current_user_admin():\n self.redirect(\"/\")\n epub = db.get(self.request.get('epub_key'))\n if not users.is_current_user_admin() and epub.entry_count()>1:\n self.redirect(\"/\")\n epub.language = self.request.get('language')\n epub.title = self.request.get('title')\n epub.creator = self.request.get('creator')\n epub.publisher = self.request.get('publisher')\n epub.rights = self.request.get('rights')\n epub.contributor = self.request.get('contributor')\n epub.identifier = self.request.get('identifier')\n epub.description = self.request.get('description')\n epub.date = self.request.get('date')\n\n license = self.request.get('license')\n if epub.license != license:\n if license==\"Public Domain\" or license==\"Creative Commons\":\n unpacker = unpack.Unpacker()\n unpacker.index_epub(epub, \"public\")\n else:\n index = search.Index(\"public\")\n opts = search.QueryOptions(limit = 1000, ids_only = True)\n query = search.Query(query_string = \"book:%s\" % epub.key(), options=opts)\n docs = index.search(query)\n for doc in docs:\n index.remove(doc.doc_id)\n\n epub.license = self.request.get('license')\n epub.put()\n self.redirect(\"/book/\"+str(epub.key().id()))\n\nclass Account(webapp.RequestHandler):\n def get(self):\n enforce_login(self)\n session = get_current_session()\n account_key = session.get(\"account\")\n account = None if account_key is None else db.get(account_key)\n template_values = {\n \"current_user\" : get_current_session().get(\"account\"),\n \"account\" : account,\n \"fbName\" : \"n/a\" if account.facebookInfo is None else json.loads(account.facebookInfo)[\"name\"]\n }\n path = os.path.join(os.path.dirname(__file__), 'html/account.html')\n self.response.out.write(template.render(path, template_values))\n\n def post(self):\n enforce_login(self)\n self.response.out.write(\"Change account\")\n\nclass Request(webapp.RequestHandler):\n def get(self):\n enforce_login(self)\n key = self.request.get('key')\n template_values = {\n \"current_user\" : get_current_session().get(\"account\"),\n \"epub_key\" : key,\n }\n path = os.path.join(os.path.dirname(__file__), 'html/request.html')\n self.response.out.write(template.render(path, template_values))\n\n def post(self):\n enforce_login(self)\n epub_key = self.request.get('epub_key')\n public_request = model.PublicRequest(\n epub = db.get(epub_key),\n user = db.get(get_current_session().get(\"account\")),\n )\n public_request.supporting_data = self.request.get('support').replace(\"\\n\",\"
\")\n public_request.put()\n respondWithMessage(self, \"Thank you! We have received your request.\")\n\nclass Delete(webapp.RequestHandler):\n def get(self):\n confirm = self.request.get('confirm')\n if confirm!=\"true\":\n return\n epub_key = self.request.get('key')\n epub = db.get(epub_key)\n account = get_current_session().get(\"account\")\n entry = model.LibraryEntry.all().filter(\"epub = \",epub).filter(\"user =\",db.get(account)).get()\n if entry is not None:\n db.delete(entry)\n if epub.entry_count()==0:\n for indexName in [\"private\",\"public\"]:\n index = search.Index(indexName)\n opts = search.QueryOptions(limit = 1000, ids_only = True)\n query = search.Query(query_string = \"book:%s\" % epub_key, options=opts)\n docs = index.search(query)\n for doc in docs:\n index.remove(doc.doc_id)\n blobstore.delete(epub.blob.key())\n db.delete(epub)\n\n self.redirect('/list')\n else:\n self.response.out.write(\"Not permitted\")\n\nclass DeleteQuote(webapp.RequestHandler):\n def get(self):\n confirm = self.request.get('confirm')\n if confirm!=\"true\":\n return\n quote_key = self.request.get('key')\n quote = db.get(quote_key)\n account = get_current_session().get(\"account\")\n if quote.user.key() == account:\n db.delete(quote)\n search.Index(\"quotes\").remove(quote_key)\n self.redirect('/quotes')\n else:\n self.response.out.write(\"Not permitted\")\n\nclass Clear(webapp.RequestHandler):\n def get(self):\n if not users.is_current_user_admin():\n self.response.out.write(\"No\")\n return\n for indexName in [\"private\",\"public\",\"chapters\"]:\n index = search.Index(indexName)\n for doc in index.list_documents(limit=1000, ids_only=True):\n index.remove(doc.doc_id)\n\napp = webapp.WSGIApplication([\n ('/', Main),\n ('/about', About),\n ('/message', Message),\n ('/logout', LogOut),\n ('/upload', UploadForm),\n ('/upload_complete', UploadHandler),\n ('/index', Index),\n ('/books', List),\n ('/list', List),\n ('/unpack_internal', UnpackInternal),\n ('/view/.*', View),\n ('/book/.*', Contents),\n ('/manifest', Manifest),\n ('/download', Download),\n ('/search', Search),\n ('/request', Request),\n ('/share', Share),\n ('/quote/.*', Quote),\n ('/quotes', Quotes),\n ('/edit', Edit),\n ('/edit/.*', Edit),\n ('/account', Account),\n ('/delete', Delete),\n ('/delete_quote', DeleteQuote),\n ('/clearindexes', Clear),\n ],\n debug=True)\n","repo_name":"rezendi/epubhub","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":21396,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"81"} +{"seq_id":"13783844546","text":"\"\"\"\nUSB\n---\n\"\"\"\n\nimport os\nimport logging\nfrom lxk_testlib import rob\nfrom lxk_testlib.utils import ssh\n\nLOGGER = logging.getLogger(__name__)\nLOGGER.setLevel(logging.DEBUG)\n\ndef configure_thumb_drive(ip_address, value):\n \"\"\"\n PlugIn/Unplug Thumbdrive through rob call.\n\n :Parameters:\n\n 1. ip_address, ````\n 2. value, ````, [connect, disconnect]\n\n :Returns: ``None``\n \"\"\"\n LOGGER.debug(ip_address)\n LOGGER.debug(value)\n cmd = \"ssh -o StrictHostKeyChecking=no -l root %s \\\"mkdir -p /var/thumb\\\"\" % (ip_address)\n returnoutput = ssh.execute(ip_address, cmd)\n cmd = \"ssh -o StrictHostKeyChecking=no -l root %s \\\"chmod 777 /var/thumb\\\"\" % (ip_address)\n returnoutput = ssh.execute(ip_address, cmd)\n cmd = '''sendevent USBHost connect '{siis}' event_type ''' + value + \\\n ''' dev_type 0x15 unsupp_conn 0 mount_name '/var/thumb' '''\n LOGGER.debug(cmd)\n return_output = rob.execute(ip_address, cmd)\n LOGGER.debug(return_output)\n if \"rc = PROXY_OK\" in return_output[1]:\n LOGGER.info(\"%s to USB thumb drive\", value)\n else:\n assert False, 'ERROR: Configuring USB thumb drive, {}'.format(value)\n cmd = \"mkdir -p /var/thumb\"\n LOGGER.debug(cmd)\n return_output = ssh.execute(ip_address, cmd)\n LOGGER.debug(return_output)\n","repo_name":"TrellixVulnTeam/CloudAutomation_KDSZ","sub_path":"venv/Lib/site-packages/lxk_testlib/usb.py","file_name":"usb.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39627694134","text":"\"\"\"\nplot chart dynamically\n\"\"\"\nfrom matplotlib.figure import Figure\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nimport tkinter as tk\nimport numpy as np\n \nfig = Figure(figsize = (9, 6), facecolor = \"white\")\n \naxis = fig.add_subplot(111)\nx_values = np.array([1,2,3,4,5,6,7])\naxis.plot(x_values, x_values, \"-r\")\naxis.plot(x_values, x_values ** 2, \"--g\")\naxis.grid()\n \nroot = tk.Tk()\n \ntk.Label(root, text = \"x =\" ).grid(row = 0, column = 0)\ntk.Label(root, text = \"y =\" ).grid(row = 1, column = 0)\n \nx = tk.DoubleVar()\ny = tk.DoubleVar()\n \nx_entry = tk.Entry(root, textvariable = x).grid(row = 0, column = 1)\ny_entry = tk.Entry(root, textvariable = y).grid(row = 1, column = 1)\n \n\ndef plotgraphs():\n axis.plot(x.get(), y.get(), \"ko\")\n \n canvas = FigureCanvasTkAgg(fig, master = root)\n canvas._tkcanvas.grid(row = 2, column = 1)\n \ndef newGraph():\n axis.clear()\n canvas = FigureCanvasTkAgg(fig, master = root)\n canvas._tkcanvas.grid(row = 2, column = 1)\n\ntk.Button(root, text = \"Add\", command = newGraph).grid(row = 0, column = 2)\ntk.Button(root, text = \"New Graphs\", command = newGraph).grid(row = 0, column = 2)\ntk.Button(root, text = \"Plot\", command = plotgraphs).grid(row = 1, column = 2)\n \ncanvas = FigureCanvasTkAgg(fig, master = root)\ncanvas._tkcanvas.grid(row = 2, column = 1)\n \nroot.mainloop()","repo_name":"jwang1122/python","sub_path":"src/tkinter/tkinter41.py","file_name":"tkinter41.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16087684285","text":"import os\nimport subprocess\nimport time\nfrom concurrent.futures import ThreadPoolExecutor\nfrom datetime import datetime\nfrom threading import Thread\n\nN_PROC = 10\n\n\nclass Process(object):\n \"\"\"Collects some process information.\"\"\"\n\n def __init__(self, proc, logfile, running):\n self.proc = proc\n self.logfile = logfile\n self.running = running\n\n\n# Pool for offloading I/O work\nio_pool_exc = ThreadPoolExecutor(max(os.cpu_count() * 5, N_PROC))\n\n\ndef watch_logfile(p):\n \"\"\"Watches a logfile `p.logfile` and counts lines containing ERROR.\n Runs until the process is dead and returns the error count.\n \"\"\"\n with open(p.logfile, 'r') as f_in:\n errcnt = 0\n while True:\n line = f_in.readline()\n if not line:\n if not p.running:\n break # Nothing to read and process dead\n time.sleep(1) # wait for more data\n elif \"ERROR\" in line:\n errcnt += 1\n return p.proc.pid, errcnt\n\n\ndef watch_sync(procs):\n \"\"\"Watch all logs generated by the procs synchronously with a thread pool.\n \"\"\"\n results = io_pool_exc.map(watch_logfile, procs)\n counts = [c for _, c in results]\n print(\"Average number of ERRORs:\", sum(counts) / len(counts))\n\n\ndef main():\n # Start some processes generating logs. All without asyncio magic yet. We just want them to write to the\n # files we're gonna watch.\n procs = []\n for i in range(N_PROC):\n logfile = \"logs/log.%03d.tmp\" % i\n with open(logfile, \"wb\") as f_out:\n p = subprocess.Popen(['python', 'log-generator.py', '10'], stdout=f_out)\n procs.append(Process(p, logfile, True))\n print(\"Started %d logger processes:\\n %s\" % (len(procs), '\\n '.join('PID %d: %s' % (p.proc.pid, p.logfile)\n for p in procs)))\n print(\"Starting at %s\" % datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3])\n # Run in separate thread so main thread can keep an eye on the processes\n t = Thread(target=lambda: watch_sync(procs))\n t.start()\n for p in procs:\n p.proc.wait()\n p.running = False\n t.join()\n print(\"Finished at %s\" % datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3])\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"dnswlt/python-asyncio","sub_path":"sync-filewatch.py","file_name":"sync-filewatch.py","file_ext":"py","file_size_in_byte":2335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7651855220","text":"# -*- coding: utf8 -*-\nimport click\n\nfrom fledgling.cli.config import IniFileConfig\n\n\n@click.command()\n@click.option('--overwrite', default=False, show_default=True, type=click.BOOL)\ndef create_config(overwrite):\n \"\"\"\n 创建一份空的配置文件。\n \"\"\"\n config = IniFileConfig()\n config.dump(overwrite)\n","repo_name":"Liutos/fledgling","sub_path":"fledgling/cli/command/create_config.py","file_name":"create_config.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"10709570062","text":"from django.shortcuts import redirect\nfrom user_operations.form import CommentValiForm\n\n# Create your views here.\ndef comment(request):\n url_source = request.META['HTTP_REFERER']\n comment_data = dict()\n product_id = request.POST.get('product',None)\n content = request.POST.get('content',None)\n comment_data['user'] = request.user.id\n comment_data['content'] = content\n comment_data['product'] = product_id\n data = CommentValiForm(comment_data)\n if not data.is_valid():\n return redirect(url_source)\n data.save()\n return redirect(url_source)","repo_name":"wyu0430/shop","sub_path":"user_operations/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13340083401","text":"from decimal import Decimal\n\n\ndef calculated_percent_diff(base_price: Decimal, new_price: Decimal) -> Decimal:\n \"\"\" 0 means price is lower or whatever \"\"\"\n decimal_zero = Decimal(0)\n\n if base_price >= new_price:\n return decimal_zero\n\n diff = new_price - base_price\n\n return Decimal(diff * 100 / base_price)\n\n\ndef truncate_price_or_qty(number: Decimal, digits: int) -> Decimal:\n str_number = str(number)\n i = str_number.index(\".\")\n return Decimal(str_number[:i + digits + 1])\n","repo_name":"mateusz-szczyrzyca/simple-binance-ta-bot","sub_path":"common/pricing.py","file_name":"pricing.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"10110146690","text":"import itertools\nimport sys\n\nfrom Query.Plan import Plan\nfrom Query.Operators.Join import Join\nfrom Query.Operators.Project import Project\nfrom Query.Operators.Select import Select\nfrom Query.Operators.Union import Union\nfrom Query.Operators.TableScan import TableScan\nfrom Utils.ExpressionInfo import ExpressionInfo\nfrom Catalog.Schema import DBSchema\nfrom Query.StatisticsManager import StatisticsManager\n\n\nclass Optimizer:\n \"\"\"\n A query optimization class.\n\n This implements System-R style query optimization, using dynamic programming.\n We only consider left-deep plan trees here.\n\n We provide doctests for example usage only.\n Implementations and cost heuristics may vary.\n\n >>> import Database, shutil, Storage\n >>> db = Database.Database()\n >>> try:\n ... db.createRelation('department', [('did', 'int'), ('eid', 'int')])\n ... except ValueError:\n ... pass\n >>> try:\n ... db.createRelation('employee', [('id', 'int'), ('age', 'int'), ('name', 'char(3)')])\n ... except ValueError:\n ... pass\n >>> try:\n ... db.createRelation('salarys', [('sid', 'int'), ('salary', 'int')])\n ... except ValueError:\n ... pass\n >>> try:\n ... db.createRelation('work', [('wid', 'int'), ('ewid', 'int')])\n ... except ValueError:\n ... pass\n\n # Populate relation\n >>> schema = db.relationSchema('employee')\n >>> for tup in [schema.pack(schema.instantiate(i, 2*i, 'e' + str(i))) for i in range(20)]:\n ... _ = db.insertTuple(schema.name, tup)\n ...\n\n >>> schema = db.relationSchema('department')\n >>> for tup in [schema.pack(schema.instantiate(i, 4*i)) for i in range(20)]:\n ... _ = db.insertTuple(schema.name, tup)\n ...\n\n >>> schema = db.relationSchema('salarys')\n >>> for tup in [schema.pack(schema.instantiate(i, 2*i)) for i in range(20)]:\n ... _ = db.insertTuple(schema.name, tup)\n ...\n\n >>> schema = db.relationSchema('work')\n >>> for tup in [schema.pack(schema.instantiate(i, 2*i)) for i in range(20)]:\n ... _ = db.insertTuple(schema.name, tup)\n ...\n\n # >>> query6 = db.query().fromTable('employee').join(\\\n # db.query().fromTable('department').select({'eid':('eid','int')}),\\\n # method='block-nested-loops', expr='id == eid').where('age > 0').join(\\\n # db.query().fromTable('salarys'),\\\n # method='block-nested-loops', expr='sid == id').where('id > 0').select({'id':('id', 'int')})\\\n # .union(\\\n # db.query().fromTable('employee').join(\\\n # db.query().fromTable('department').select({'eid':('eid','int')}),\\\n # method='block-nested-loops', expr='id == eid').where('age > 0').join(\\\n # db.query().fromTable('salarys'),\\\n # method='block-nested-loops', expr='sid == id').where('id > 0').select({'id':('id', 'int')})\\\n # ).finalize()\n\n # >>> print(query6.explain())\n # >>> q6results = [query6.schema().unpack(tup) for page in db.processQuery(query6) for tup in page[1]]\n # >>> print([tup for tup in q6results])\n #\n # >>> query6 = db.optimizer.optimizeQuery(query6)\n #\n # >>> print(query6.explain())\n # >>> q6results = [query6.schema().unpack(tup) for page in db.processQuery(query6) for tup in page[1]]\n # >>> print([tup for tup in q6results])\n\n >>> query7 = db.query().fromTable('employee').join(\\\n db.query().fromTable('department').select({'eid':('eid','int')}),\\\n method='block-nested-loops', expr='id == eid').join(\\\n db.query().fromTable('salarys'),\\\n method='block-nested-loops', expr='sid == id').where('sid > 0').select({'age':('age', 'int')}).finalize()\n\n >>> query7.sample(1.0)\n >>> print(query7.explain())\n >>> q7results = [query7.schema().unpack(tup) for page in db.processQuery(query7) for tup in page[1]]\n >>> print([tup for tup in q7results])\n\n >>> query8 = db.query().fromTable('employee').join(\\\n db.query().fromTable('department').select({'eid':('eid','int')}),\\\n method='block-nested-loops', expr='id == eid').join(\\\n db.query().fromTable('salarys'),\\\n method='block-nested-loops', expr='sid == id').where('sid > 0').select({'name':('name', 'char(3)'), 'age':('age', 'int')}).finalize()\n\n >>> query8 = db.optimizer.optimizeQuery(query8)\n >>> query8.sample(1.0)\n >>> print(query8.explain())\n >>> q8results = [query8.schema().unpack(tup) for page in db.processQuery(query8) for tup in page[1]]\n >>> print([tup for tup in q8results])\n\n ### SELECT * FROM employee JOIN department ON id = eid\n # >>> try:\n # ... db.createRelation('student', [('sid', 'int'), ('year', 'int')])\n # ... db.createRelation('course', [('cid', 'int'), ('level', 'int')])\n # ... except ValueError:\n # ... pass\n # >>> query7 = db.query().fromTable('employee').join( \\\n # db.query().fromTable('department'), \\\n # method='block-nested-loops', expr='id == eid').where('age > 5 or eid > 3').join( \\\n # db.query().fromTable('student'), \\\n # method = 'block-nested-loops', expr = 'eid == sid').join( \\\n # db.query().fromTable('course'), \\\n # method = 'block-nested-loops', expr = 'year == level').select({'id': ('id', 'int'), 'age':('age','int')}).finalize()\n #\n # >>> print(query7.explain() )\n #\n # >>> print( db.optimizer.optimizeQuery(query7).explain() )\n\n ## Clean up the doctest\n >>> shutil.rmtree(Storage.FileManager.FileManager.defaultDataDir)\n \"\"\"\n\n def __init__(self, db):\n self.db = db\n self.statsCache = {}\n self.costCache = {}\n self.statsMgr = StatisticsManager(db)\n\n # Caches the cost of a plan computed during query optimization.\n def addPlanCost(self, plan, cost):\n if isinstance(plan, Plan):\n key = plan.getPlanKey()\n self.costCache[key] = cost\n else:\n self.costCache[plan] = cost\n\n # Checks if we have already computed the cost of this plan.\n def getPlanCost(self, plan):\n key = plan.getPlanKey()\n if key not in self.costCache:\n plan.sample(10.0)\n cost = plan.cost(estimated=True)\n self.addPlanCost(key, cost)\n return self.costCache[key]\n\n # Given a plan, return an optimized plan with both selection and\n # projection operations pushed down to their nearest defining relation\n # This does not need to cascade operators, but should determine a\n # suitable ordering for selection predicates based on the cost model below.\n def pushdownOperators(self, plan):\n \"\"\"\n push down Select and Project operators\n :param plan:\n :return: Plan\n \"\"\"\n return Plan(root=self.pushdownOperator(plan.root))\n\n def pushdownOperator(self, op):\n \"\"\"\n Push down Select and Project\n :param op: current operator\n :return: an operator that its children has been processed by the pushdownOperator()\n \"\"\"\n\n if op.operatorType() == \"TableScan\":\n # base relation\n return op\n\n elif op.operatorType() == \"Select\":\n # need to pushdown this operator close to its respective base relation\n return self.pushdownSelect(op)\n\n elif op.operatorType() == \"Project\":\n # need to pushdown this operator close to its respective base relation\n return self.pushdownProject(op)\n\n elif op.operatorType() in [\"Sort\", \"GroupBy\"]:\n # no need to push down these operators, but need to check their children\n op.subPlan = self.pushdownOperator(op.subPlan)\n return op\n\n elif op.operatorType() == \"UnionAll\" or \"Join\" in op.operatorType():\n # no need to push down these operators, but need to check their children (left and right children)\n op.lhsPlan = self.pushdownOperator(op.lhsPlan)\n op.rhsPlan = self.pushdownOperator(op.rhsPlan)\n return op\n\n def pushdownSelect(self, op):\n # pushdown its child first and then push down the current operator\n op.subPlan = self.pushdownOperator(op.subPlan)\n\n if op.subPlan.operatorType() in [\"TableScan\", \"GroupBy\", \"Project\"]:\n return op\n\n if op.subPlan.operatorType() == \"Select\":\n # Combine two consecutive Selects\n # According to the \"Equivalence of Expression\" on Lecture 8, page 24,\n # we can combine two consecutive Select operators and the result will\n # be the same.\n # i.e. Project_{predicateA ^ predicateB}(E) = Project_predicateA(Project_predicateB(E))\n op.selectExpr = \"(%s) and (%s)\" % (op.selectExpr, op.subPlan.selectExpr)\n op.subPlan = op.subPlan.subPlan\n return op\n\n elif op.subPlan.operatorType() == \"Sort\":\n # Select can be push down to the button of the Sort.\n # After push down the Select, we still have to try to push down it further.\n topOp = op.subPlan\n op.subPlan = op.subPlan.subPlan\n topOp.subPlan = self.pushdownOperator(op)\n return topOp\n\n elif op.subPlan.operatorType() == \"UnionAll\":\n # push select to the button of a union to reduce the union complexity\n # because the schemas of lhs and rhs are the same, we can use the original selectExpr for two subPlans\n topOp = op.subPlan\n lsubPlan = Select(op.subPlan.lhsPlan, op.selectExpr)\n rsubPlan = Select(op.subPlan.rhsPlan, op.selectExpr)\n topOp.lhsPlan = self.pushdownOperator(lsubPlan)\n topOp.rhsPlan = self.pushdownOperator(rsubPlan)\n return topOp\n\n elif \"Join\" in op.subPlan.operatorType():\n # First decompose the select expression into predicates.\n # Then check each predicate belongs to which subPlan.\n # For those remaining predicates (not assigned to a subPlan), create a new Select operator containing\n # the remaining predicates, and assign the new Select to the top of the Join operator.\n exprs = ExpressionInfo(op.selectExpr).decomposeCNF()\n\n lhsExprs = []\n rhsExprs = []\n remainExprs = []\n\n lhsAttrs = set(op.subPlan.lhsPlan.schema().fields)\n rhsAttrs = set(op.subPlan.rhsPlan.schema().fields)\n\n # dispatch predicates to subPlans that have the attribute\n for e in exprs:\n attrs = ExpressionInfo(e).getAttributes()\n add = False\n if attrs.issubset(lhsAttrs):\n lhsExprs.append(e)\n add = True\n if attrs.issubset(rhsAttrs):\n rhsExprs.append(e)\n add = True\n if not add:\n remainExprs.append(e)\n\n if lhsExprs:\n newLhsExpr = ' and '.join(lhsExprs)\n lhsSelect = Select(op.subPlan.lhsPlan, newLhsExpr)\n op.subPlan.lhsPlan = self.pushdownOperator(lhsSelect)\n\n if rhsExprs:\n newRhsExpr = ' and '.join(rhsExprs)\n rhsSelect = Select(op.subPlan.rhsPlan, newRhsExpr)\n op.subPlan.rhsPlan = self.pushdownOperator(rhsSelect)\n\n # deal with the remaining predicates\n if remainExprs:\n newExpr = ' and '.join(remainExprs)\n return Select(op.subPlan, newExpr)\n else:\n return op.subPlan\n\n def pushdownProject(self, op):\n # pushdown op's child first and then push down the current operator\n op.subPlan = self.pushdownOperator(op.subPlan)\n\n if op.subPlan.operatorType() in [\"GroupBy\", \"TableScan\", \"Sort\"]:\n return op\n\n if op.subPlan.operatorType() == \"Project\":\n # Two consecutive Projects, remove duplicate.\n # We cannot directly discard the lower Project because the upper Project might use\n # other new attributes (not original attributes) generated from the lower Project.\n # If the upper Project contains attributes that is the subset of the attributes of\n # the lower Project, then we can discard the lower Project.\n\n opAttr = set(op.schema().fields)\n subPlanAttr = set(op.subPlan.schema().fields)\n if opAttr.issubset(subPlanAttr):\n op.subPlan = op.subPlan.subPlan\n return self.pushdownProject(op)\n else:\n return op\n\n elif op.subPlan.operatorType() == \"Select\":\n # If the attributes of Select is a subset of that of Project, then we can swap\n # the order of Project and Select and pushdown the Project operator.\n\n projectAttrs = set(op.schema().fields)\n selectAttrs = ExpressionInfo(op.subPlan.selectExpr).getAttributes()\n if selectAttrs.issubset(projectAttrs):\n topOp = op.subPlan\n op.subPlan = op.subPlan.subPlan\n topOp.subPlan = self.pushdownOperator(op)\n return topOp\n else:\n return op\n\n elif op.subPlan.operatorType() == \"UnionAll\":\n # Because the attributes of the Project must be a subset of that of the UnionAll,\n # we can directly pushdown the Project to the subPlans of the UnionAll.\n\n topOp = op.subPlan\n lhsProject = Project(op.subPlan.lhsPlan, op.projectExprs)\n rhsProject = Project(op.subPlan.rhsPlan, op.projectExprs)\n topOp.lhsPlan = self.pushdownOperator(lhsProject)\n topOp.rhsPlan = self.pushdownOperator(rhsProject)\n return topOp\n\n elif \"Join\" in op.subPlan.operatorType():\n # First we extract all the attributes of the Project and the two subPlans of the Join.\n # Then, we check whether the attributes belong to which subPlan and assign it to the\n # Project expression.\n # For those attributes that have not been assigned, we keep a new Project containing\n # those remained attributes above the Join.\n lhsAttrs = set(op.subPlan.lhsPlan.schema().fields)\n rhsAttrs = set(op.subPlan.rhsPlan.schema().fields)\n lhsProjectExprs = {}\n rhsProjectExprs = {}\n skipCurrOp = True\n\n joinAttrs = ExpressionInfo(op.subPlan.joinExpr).getAttributes()\n\n for attr in op.projectExprs:\n requiredAttrs = ExpressionInfo(op.projectExprs[attr][0]).getAttributes()\n add = False\n if requiredAttrs.issubset(lhsAttrs):\n lhsProjectExprs[attr] = op.projectExprs[attr]\n add = True\n if requiredAttrs.issubset(rhsAttrs):\n rhsProjectExprs[attr] = op.projectExprs[attr]\n add = True\n if not add:\n skipCurrOp = False\n\n projectAttrs = set(op.projectExprs.keys())\n\n if joinAttrs.issubset(projectAttrs):\n if lhsProjectExprs:\n lhsProject = Project(op.subPlan.lhsPlan, lhsProjectExprs)\n op.subPlan.lhsPlan = self.pushdownOperator(lhsProject)\n if rhsProjectExprs:\n rhsProject = Project(op.subPlan.rhsPlan, rhsProjectExprs)\n op.subPlan.rhsPlan = self.pushdownOperator(rhsProject)\n\n # check if the Project's expressions are assigned to appropriate sub-plans.\n # If one exprs is empty or there are remaining exprs, we still need to keep the\n # original Project operator.\n # If all Project's exprs are assign to two sub-plans, then we can discard this\n # Project operator.\n if lhsProjectExprs and rhsProjectExprs or skipCurrOp:\n return op.subPlan\n else:\n return op\n\n # Returns an optimized query plan with joins ordered via a System-R style\n # dyanmic programming algorithm. The plan cost should be compared with the\n # use of the cost model below.\n def pickJoinOrder(self, plan):\n \"\"\"\n Use dynamic programming to find a best join order (System-R).\n First extract base relation (stop at UnionAll because two joins above and under UnionAll cannot be exchanged)\n Perform pickJoinOrder for subPlans under each UnionAll.\n Keep top operators before the first Join and connect it back after reordering Joins\n \"\"\"\n # Extract all base relations, along with any unary operators immediately above.\n # UnionAll will be seen as a base relation\n baseRelations = set(plan.joinSources)\n\n # Perform pickJoinOrder under UnionAll first.\n for op in baseRelations:\n while op and not isinstance(op, TableScan):\n if isinstance(op, Union):\n op.lhsPlan = self.pickJoinOrder(Plan(root=op.lhsPlan)).root\n op.rhsPlan = self.pickJoinOrder(Plan(root=op.rhsPlan)).root\n break\n op = op.subPlan\n\n # Keep the top operators before the first Join.\n # After picking the join order, connect the top operators back to the new operation tree.\n end = Select(None, \"\")\n end.subPlan = plan.root\n while end:\n if isinstance(end, TableScan) or isinstance(end.subPlan, Join):\n break\n elif isinstance(end.subPlan, Union):\n # When encounter Union before Joins, return the original plan because we already\n # perform pickJoinOrder the the subPlans of UnionAll.\n return plan\n end = end.subPlan\n\n # Extract all joins in original plan, they serve as the set of joins actually necessary.\n # Since we already perform pickJoinOrder for the sub-plans of UnionAlls, here we only\n # perform pickJoinOrder for those join above the UnionAlls.\n joins = set(plan.joinBeforeUnion)\n\n # For dynamic programming\n dpPlan = {}\n\n # Establish optimal access paths.\n for relation in baseRelations:\n dpPlan[frozenset((relation,))] = relation\n\n # Calculate cost using dynamic programming\n for i in range(2, len(baseRelations) + 1):\n for subset in itertools.combinations(baseRelations, i):\n\n # Build the set of candidate joins.\n candidateJoins = set()\n for candidateRelation in subset:\n candidateJoins.add((dpPlan[frozenset(tupleWithout(subset, candidateRelation))],\n dpPlan[frozenset((candidateRelation,))]))\n\n # Find the current best join plan and store it for next iteration\n dpPlan[frozenset(subset)] = self.findBestJoin(candidateJoins, joins)\n\n # Connect the operators above the first join\n end.subPlan = dpPlan[frozenset(baseRelations)]\n\n # Reconstruct the best plan, prepare and return.\n bestPlan = Plan(root=plan.root)\n bestPlan.prepare(self.db)\n return bestPlan\n\n def findBestJoin(self, candidates, joins):\n bestCost = sys.maxsize\n bestPlan = None\n\n for lhs, rhs in candidates:\n relevantExpr = None\n\n # Find the joinExpr corresponding to the current join candidate. If there is none, it's a\n # cartesian product.\n for join in joins:\n attrs = ExpressionInfo(join.joinExpr).getAttributes()\n hashJoin = False\n\n rhsAttr = set(rhs.schema().fields).intersection(attrs)\n lhsAttr = set(lhs.schema().fields).intersection(attrs)\n if lhsAttr and rhsAttr:\n relevantExpr = join.joinExpr\n\n # construct relevant schema for hash join\n rhsKeySchema = self.buildKeySchema(\"rhsKey\", rhs.schema().fields, rhs.schema().types, rhsAttr, updateAttr=True)\n lhsKeySchema = self.buildKeySchema(\"lhsKey\", lhs.schema().fields, lhs.schema().types, lhsAttr, updateAttr=False)\n rhsFields = [\"rhsKey_\" + f for f in rhs.schema().fields]\n attrMap = {}\n orgFileds = rhs.schema().fields\n for i in range(len(rhsFields)):\n attrMap[orgFileds[i]] = rhsFields[i]\n\n # Construct a new schema for rhs to prevent from joining two relations that\n # have the same attribute name.\n rhsNewSchema = rhs.schema().rename(\"rhsSchema2\", attrMap)\n\n hashJoin = True\n break\n\n else:\n relevantExpr = 'True'\n\n # We don't use index-join because we don't necessarily have index for the join.\n # Construct a join plan for the current candidate, for each possible join algorithm.\n for algo in [\"nested-loops\", \"block-nested-loops\"]:\n\n if algo != \"hash\":\n testPlan = Plan(root=Join(\n lhsPlan=lhs,\n rhsPlan=rhs,\n method=algo,\n expr=relevantExpr\n ))\n\n elif hashJoin:\n lhsHashFn = \"hash(\" + lhsKeySchema.fields[0] + \") % 8\"\n rhsHashFn = \"hash(\" + rhsKeySchema.fields[0] + \") % 8\"\n\n joinPlan = Join(\n lhsPlan=lhs,\n rhsPlan=rhs,\n method='hash',\n rhsSchema=rhsNewSchema,\n lhsHashFn=lhsHashFn, lhsKeySchema=lhsKeySchema,\n rhsHashFn=rhsHashFn, rhsKeySchema=rhsKeySchema\n )\n testPlan = Plan(root=joinPlan)\n\n else:\n # we don't have enough infor for hash join, so skip the hash join test.\n continue\n\n # Prepare and run the plan in sampling mode, and get the estimated cost.\n testPlan.prepare(self.db)\n cost = self.getPlanCost(testPlan)\n\n # update best plan\n if cost < bestCost:\n bestCost = cost\n bestPlan = testPlan\n\n # Need to return the root operator rather than the plan itself, since it's going back into the\n # table.\n return bestPlan.root\n\n def buildKeySchema(self, name, fields, types, attrs, updateAttr=False):\n keys = []\n for attr in attrs:\n if updateAttr:\n keys.append((name + \"_\" + attr, types[fields.index(attr)]))\n else:\n keys.append((attr, types[fields.index(attr)]))\n return DBSchema(name, keys)\n\n # Optimize the given query plan, returning the resulting improved plan.\n # This should perform operation pushdown, followed by join order selection.\n def optimizeQuery(self, plan):\n pushedDown_plan = self.pushdownOperators(plan)\n joinPicked_plan = self.pickJoinOrder(pushedDown_plan)\n return joinPicked_plan\n\n\ndef tupleWithout(t, x):\n s = list(t)\n s.remove(x)\n return tuple(s)\n\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n","repo_name":"jasonlingo/Database_Systems","sub_path":"dbsys-hw3/Query/Optimizer.py","file_name":"Optimizer.py","file_ext":"py","file_size_in_byte":21004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3024977207","text":"from Prestamos import prestamos\nfrom conexion import Conexion\nfrom beautifultable import BeautifulTable\n\nclass prestamosDAO:\n def __init__(self) -> None:\n pass\n def eliminarPrestamos(self, ID_prestamo):\n \n Conexion.cursor.execute(\"delete from PRESTAMOS where id_prestamo=:1\", [ID_prestamo])\n Conexion.connection.commit()\n return \"Se ha eliminado correctamente\"\n \n\n def buscarPrestamos(self, ID_Rut)->prestamos:\n tabla=BeautifulTable()\n tabla.columns.header=[\"Id Prestamo\",\"Rut\", \"Nombre\", \"id libro\", \"titulo libro\"]\n for row in Conexion.cursor.execute(\"SELECT ID_PRESTAMO, p.ID_RUT, pe.Nombre, ID_LIBRO, li.titulo FROM PRESTAMOS p FULL OUTER JOIN LIBRO Li on Li.Id_codigo = p.ID_LIBRO FULL OUTER JOIN PERSONA pe on pe.ID_RUT = p.ID_RUT where p.id_rut = :1\", [ID_Rut]):\n tabla.rows.append(row)\n if len(tabla.rows)>0:\n print(tabla)\n else:\n print(\"No existen prestamos asociados con este rut\")\n \n def insertarPrestamos(self, Prestamos):\n Conexion.cursor.execute(\"\"\"\n insert into prestamos (id_prestamo, id_rut, id_Libro) values(:idp,:Prut,:plib)\"\"\",[Prestamos.ID_Prestamo, Prestamos.ID_Rut, Prestamos.ID_Libro])\n Conexion.connection.commit()\n return \"Los datos fueron ingresados de forma correcta\"\n \n \n def obtenerPrestamos(self):\n tabla=BeautifulTable()\n tabla.columns.header=[\"Id Prestamo\",\"Rut\", \"Nombre\", \"Id Libro\", \"Titulo Libro\"]\n for row in Conexion.cursor.execute(\"SELECT ID_PRESTAMO, p.ID_RUT, pe.Nombre, ID_LIBRO, li.titulo FROM PRESTAMOS p FULL OUTER JOIN LIBRO Li on Li.Id_codigo = p.ID_LIBRO FULL OUTER JOIN PERSONA pe on pe.ID_RUT = p.ID_RUT where p.id_rut = pe.id_rut order by 1\"):\n tabla.rows.append(row)\n if len(tabla.rows)>0:\n print(tabla)\n else:\n print(\"Prestamo no ingresado\")","repo_name":"CarolinaMendoza19/Taller-4-avanzado","sub_path":"PrestamosDAO.py","file_name":"PrestamosDAO.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6726323054","text":"\"\"\"\nCreati o functie care calculeaza suma patratelor argumentelor date functiei, un numar nedeterminat de argumente\n(hint: *args).\nFolositi try-except pentru a verifica daca argumentele date functiei sunt valide.\n\"\"\"\n\ndef sum_of_squares(*args):\n sum_squares = 0\n for arg in args:\n try:\n sum_squares += arg * arg\n except TypeError:\n print(f\"Expected type int or float, received {type(arg)} \")\n return sum_squares\n\ntry:\n # print(sum_of_squares(1, g, 2, 'e', 4)) # => Undeclared variable name 'g' is not defined\n print(sum_of_squares(1, 2, 'e', 4)) # => Expected type int or float, received \\n 21\nexcept NameError as er:\n print(f\"Undeclared variable {er}\")","repo_name":"BogdanChisu/practice_2023_05_29_sda48","sub_path":"ex11.py","file_name":"ex11.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4911541281","text":"\"\"\"\nRead this first:\n https://www.geeksforgeeks.org/subset-sum-problem-dp-25/\n\nThis problem, in fact convers multiple topics, including:\n * Partition problem which is available at partition2, partition k\n more generally. The idea is to find 2 subset which sum of them\n is equal, or more generally k subsets which satisfy the condition.\n https://www.techiedelight.com/partition-problem/\n https://www.youtube.com/watch?v=7BynUy5ml0I\n https://www.geeksforgeeks.org/partition-problem-dp-18/\n\n There there is sum subset which is part of the above problem as well,\n which you have a set of numbers and an integer m for example and you're\n looking for n numbers which sum of them equals to m.\n https://www.techiedelight.com/subset-sum-problem/\n\"\"\"\nfrom typing import List\n\n\ndef partition(array: List[int], n: int):\n \"\"\"\n Naive approach to solve partition2 problem.\n \"\"\"\n total = sum(array)\n if total % 2 != 0: # because we only have positive integers.\n return False\n # return is_subset(array, n, total // 2)\n return is_subset_dp(array, n, total // 2)\n\n\ndef is_subset(array: List[int], n: int, total: int):\n \"\"\"\n Time complexity: O(2**n)\n Space complexity: O(1)\n n = len(array)\n \"\"\"\n if total == 0:\n return True\n if n == 0 and total != 0:\n return False\n\n if array[n - 1] > total:\n return is_subset(array, n - 1, total)\n\n exclude = is_subset(array, n - 1, total)\n include = is_subset(array, n - 1, total - array[n - 1])\n return exclude or include\n\n\ndef is_subset_dp(array, n, total) -> bool:\n \"\"\"\n Top-down approach\n https://www.techiedelight.com/subset-sum-problem/\n \"\"\"\n memo = {}\n\n def is_subset(array, n, total) -> bool:\n nonlocal memo\n\n if total == 0:\n return True\n\n if n == 0 and total != 0:\n return False\n\n key = (n, total)\n\n if key not in memo:\n include = is_subset(array, n - 1, total - array[n - 1])\n exclude = is_subset(array, n - 1, total)\n memo[key] = include or exclude\n\n return memo[key]\n\n return is_subset(array, len(array), total)\n\n\ndef bt_is_subset_dp(array, total) -> bool:\n \"\"\"\n Bottom-up approach:\n https://www.techiedelight.com/subset-sum-problem/\n \"\"\"\n # First True is for the column 0\n row = [True] + [False for _ in range(total)]\n memo = [list(row) for _ in range(len(array) + 1)]\n\n n = len(array)\n\n for j in range(1, n + 1):\n for i in range(1, total + 1):\n # If we cannot use this element, then the answer\n # is the same as previous one.\n if array[j - 1] > i:\n memo[j][i] = memo[j - 1][i]\n else:\n memo[j][i] = memo[j - 1][i] or memo[j - 1][i - array[j - 1]]\n\n return memo[n][total]\n","repo_name":"GreatBahram/dsa","sub_path":"algorithmic-toolbox/week06/assigment/partition2.py","file_name":"partition2.py","file_ext":"py","file_size_in_byte":2844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35036533865","text":"import psycopg2\nimport csv\n\nconn = psycopg2.connect('host=localhost dbname=winnersdb user=postgres password=1234')\ncur = conn.cursor()\n\ncur.execute(\"\"\"\n DROP TABLE averagedrive;\n DROP TABLE totalydsto;\n DROP TABLE passing;\n DROP TABLE rushing;\n DROP TABLE penalties;\n DROP TABLE misc;\n DROP TABLE player;\n DROP TABLE winner;\n DROP TABLE team;\n\"\"\")\nconn.commit()\n\n# cur.execute(\"\"\"\n# DELETE FROM averagedrive;\n# DELETE FROM totalydsto;\n# DELETE FROM passing;\n# DELETE FROM rushing;\n# DELETE FROM penalties;\n# DELETE FROM misc;\n# DELETE FROM player;\n# DELETE FROM winner;\n# DELETE FROM team;\n# \"\"\")\n# conn.commit()\n\ncur.execute(\"\"\"\n CREATE TABLE Team (\n id integer PRIMARY KEY,\n name text\n )\n\"\"\")\n\nconn.commit()\n\ncur.execute(\"\"\"\n CREATE TABLE Winner (\n year integer PRIMARY KEY,\n tid integer,\n CONSTRAINT fk_team\n FOREIGN KEY(tid)\n REFERENCES Team(id)\n )\n\"\"\")\n\nconn.commit()\n\ncur.execute(\"\"\"\n CREATE TABLE Player (\n id integer PRIMARY KEY,\n name text\n )\n\"\"\")\n\nconn.commit()\n\n'''\npf: Total Points scored by team\nyds: Total yards scored by team\nply: Offensive plays - Includes pass attempts, rush attempts, and sacks\nyp: Yards per offensive play\nturnOver: Team turnovers lost\n'''\ncur.execute(\"\"\"\n CREATE TABLE TotalYdsTO (\n year integer,\n pid integer,\n PRIMARY KEY (year, pid),\n CONSTRAINT fk_Winner\n FOREIGN KEY(year)\n REFERENCES Winner(year),\n CONSTRAINT fk_Player\n FOREIGN KEY(pid)\n REFERENCES Player(id),\n pf integer,\n yds integer,\n ply integer,\n yp float,\n turnOver float\n )\n\"\"\")\n\nconn.commit()\n\n'''\ncmp: Passes completed\natt: Passes attempted\nyds: Yards gained by passing\ntd: Passing touchdowns\nint: Interceptions thrown\nnya: Net yards gained per pass attempt\nfirstdown: 1st downs by passing\n'''\ncur.execute(\"\"\"\n CREATE TABLE Passing (\n year integer,\n pid integer,\n PRIMARY KEY (year, pid),\n CONSTRAINT fk_Winner\n FOREIGN KEY(year)\n REFERENCES Winner(year),\n CONSTRAINT fk_Player\n FOREIGN KEY(pid)\n REFERENCES Player(id),\n cmp integer,\n att integer,\n yds integer,\n td integer,\n int integer,\n nya float,\n firstdown integer\n )\n\"\"\")\n\nconn.commit()\n\n'''\natt: Rushing attempts\nyds: Rushing yards gained\ntd: Rushing touchdowns\nya: Rushing yards per attempt\nfirstdown: First downs by rushing\n'''\ncur.execute(\"\"\"\n CREATE TABLE Rushing (\n year integer,\n pid integer,\n PRIMARY KEY (year, pid),\n CONSTRAINT fk_Winner\n FOREIGN KEY(year)\n REFERENCES Winner(year),\n CONSTRAINT fk_Player\n FOREIGN KEY(pid)\n REFERENCES Player(id),\n att integer,\n yds integer,\n td integer,\n ya float,\n firstdown integer\n )\n\"\"\")\n\nconn.commit()\n\n'''\npen: Penalties committed by team and accepted\nyds: Penalties in yards committed by teams\nfirstdownpy: 1st down by penalty\n'''\ncur.execute(\"\"\"\n CREATE TABLE Penalties (\n year integer,\n pid integer,\n PRIMARY KEY (year, pid),\n CONSTRAINT fk_Winner\n FOREIGN KEY(year)\n REFERENCES Winner(year),\n CONSTRAINT fk_Player\n FOREIGN KEY(pid)\n REFERENCES Player(id),\n pen integer,\n yds integer,\n firstdownpy integer\n )\n\"\"\")\n\nconn.commit()\n\n'''\nstart: Average starting field position\ntime: Average time per drive\nplays: Average # of plays per drive\nyds: Net yards per drive\npts: Average points per drive\n'''\ncur.execute(\"\"\"\n CREATE TABLE AverageDrive (\n year integer,\n pid integer,\n PRIMARY KEY (year, pid),\n CONSTRAINT fk_Winner\n FOREIGN KEY(year)\n REFERENCES Winner(year),\n CONSTRAINT fk_Player\n FOREIGN KEY(pid)\n REFERENCES Player(id),\n start text,\n time text,\n plays float,\n yds float,\n pts float\n )\n\"\"\")\n\nconn.commit()\n\n'''\nfl: Fumbles lost by player\nfirstdown: Total first downs\nnumOfDrives: Number of drives\nscorePercent: % of drives ending in an offensive score\ntoPercent: % of drives ending in an offensive turnover\n'''\ncur.execute(\"\"\"\n CREATE TABLE Misc (\n year integer,\n pid integer,\n PRIMARY KEY (year, pid),\n CONSTRAINT fk_Winner\n FOREIGN KEY(year)\n REFERENCES Winner(year),\n CONSTRAINT fk_Player\n FOREIGN KEY(pid)\n REFERENCES Player(id),\n fl float,\n firstdown integer,\n numOfDrives integer,\n scorePercent float,\n toPercent float\n )\n\"\"\")\n\nconn.commit()\n\n\nwith open('nfl_teams.csv', 'r') as csv_file:\n reader = csv.reader(csv_file)\n tid = 0\n next(reader)\n for row in reader:\n cur.execute('INSERT INTO Team VALUES (%s, %s)',\n (tid, row[1]))\n conn.commit()\n\n tid += 1\n\nwinners = [8, 16, 16, 24, 24, 22, 8, 24, 24, 22, 26, 31, 22, 26, 5, 20, 31, 26, 26, 20, 31, 8, 8, 26, 8, 11, 9, 9, 28, 2, 18, 29, 18, 18, 24, 13, 20, 24, 19, 11, 20, 2, 27, 18, 9, 18, 23, 18, 15, 29, 28, 15]\n\nindex = 0\nfor year in range (1971, 2023):\n cur.execute('INSERT INTO Winner VALUES (%s, %s)',\n (year, winners[index]))\n conn.commit()\n\n index += 1\n\n\nplayers = ['Team Stats', 'Opp. Stats', 'Lg Rank Offense', 'Lg Rank Defense']\n\nfor i in range(0, 4):\n cur.execute('INSERT INTO Player VALUES (%s, %s)',\n (i, players[i]))\n conn.commit()\n\n\n# Fill TotalYdsTO table\nfor i in range(1971, 2023):\n with open(str(i)+'.csv', 'r') as csv_file:\n reader = csv.reader(csv_file)\n pid = 0\n next(reader)\n\n for row in reader:\n for x in range(0, len(row)):\n if row[x] == '':\n row[x] = -1\n \n cur.execute('INSERT INTO TotalYdsTO VALUES (%s, %s, %s, %s, %s, %s, %s)',\n (i, pid, int(row[1]), int(row[2]), int(row[3]), float(row[4]), int(row[5])))\n conn.commit()\n\n pid +=1\n\n# Fill Passing table\nfor i in range(1971, 2023):\n with open(str(i)+'.csv', 'r') as csv_file:\n reader = csv.reader(csv_file)\n pid = 0\n next(reader)\n\n for row in reader:\n for x in range(0, len(row)):\n if row[x] == '':\n row[x] = -1\n \n cur.execute('INSERT INTO Passing VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)',\n (i, pid, int(row[8]), int(row[9]), int(row[10]), int(row[11]), int(row[12]), float(row[13]), int(row[14]) ))\n conn.commit()\n\n pid +=1\n\n# Fill Rushing table\nfor i in range(1971, 2023):\n with open(str(i)+'.csv', 'r') as csv_file:\n reader = csv.reader(csv_file)\n pid = 0\n next(reader)\n\n for row in reader:\n for x in range(0, len(row)):\n if row[x] == '':\n row[x] = -1\n \n cur.execute('INSERT INTO Rushing VALUES (%s, %s, %s, %s, %s, %s, %s)',\n (i, pid, int(row[15]), int(row[16]), int(row[17]), float(row[18]), int(row[19])))\n conn.commit()\n\n pid +=1\n\n# Fill Penalties table\nfor i in range(1971, 2023):\n with open(str(i)+'.csv', 'r') as csv_file:\n reader = csv.reader(csv_file)\n pid = 0\n next(reader)\n\n for row in reader:\n for x in range(0, len(row)):\n if row[x] == '':\n row[x] = -1\n \n cur.execute('INSERT INTO Penalties VALUES (%s, %s, %s, %s, %s)',\n (i, pid, int(row[20]), int(row[21]), int(row[22])))\n conn.commit()\n\n pid +=1\n\n# Fill AverageDrive table\nfor i in range(2000, 2023):\n with open(str(i)+'.csv', 'r') as csv_file:\n reader = csv.reader(csv_file)\n pid = 0\n next(reader)\n\n for row in reader:\n for x in range(0, len(row)):\n if row[x] == '':\n row[x] = -1\n \n cur.execute('INSERT INTO AverageDrive VALUES (%s, %s, %s, %s, %s, %s, %s)',\n (i, pid, row[26], row[27], float(row[28]), float(row[29]), float(row[30])))\n conn.commit()\n\n pid +=1\n\n# Fill Misc table\nfor i in range(2000, 2023):\n with open(str(i)+'.csv', 'r') as csv_file:\n reader = csv.reader(csv_file)\n pid = 0\n next(reader)\n\n for row in reader:\n for x in range(0, len(row)):\n if row[x] == '':\n row[x] = -1\n \n cur.execute('INSERT INTO Misc VALUES (%s, %s, %s, %s, %s, %s, %s)',\n (i, pid, float(row[6]), int(row[7]), int(row[23]), float(row[24]), float(row[25])))\n conn.commit()\n\n pid +=1\n\n# Queries\n\n# Finds the score percent of winning teams\n'''\nSELECT misc.scorePercent\nFROM misc\nWHERE misc.pid = 0;\n'''\n\n# Ranking teams based on the number of superbowl rings they have since 1971\n'''\nSELECT Team.name, COUNT(Team.name) AS count\nFROM Winner\nINNER JOIN Team ON Team.id = Winner.tid\nGROUP BY Team.name\nORDER BY COUNT(Team.name) desc;\n'''\n\n# Average number of plays per drive with every team's offense and the average league rank\n'''\nSELECT AVG(adTeam.plays) AS TeamStats, AVG(adLeague.plays) AS LgOffense\nFROM AverageDrive AS adTeam, AverageDrive as adLeague\nWHERE adTeam.pid = 0;\n'''\n\n'''\nSELECT AVG(ad.pts)\nFROM AverageDrive AS adOffense, AverageDrive AS adDefense\nGROUP BY ;\n'''","repo_name":"weeksForDays/nflWinnerProject","sub_path":"data-import.py","file_name":"data-import.py","file_ext":"py","file_size_in_byte":9755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5581351456","text":"# -*- coding: utf-8 -*-\n\"\"\"\nUNET, which can be used for Nodule Candidates Points Generation in our casex\n\"\"\"\nfrom keras import Input\nfrom keras.layers import *\nfrom keras.models import Model\nfrom keras.optimizers import Adam\n\nfrom LungCancer_V1.luna16Project.CandidateGenerationAndLuna16Preprocessing.dataProcessAndPlotShow import *\n'''\nThis funciton reads a '.mhd' file using SimpleITK and return the image array, \norigin and spacing of the image.\n'''\ndef load_itk(filename):\n # Reads the image using SimpleITK\n itkimage = sitk.ReadImage(filename)\n\n # Convert the image to a numpy array first and then shuffle the dimensions to get axis in the order z,y,x\n ct_scan = sitk.GetArrayFromImage(itkimage)\n\n # Read the origin of the ct_scan, will be used to convert the coordinates from world to voxel and vice versa.\n origin = np.array(list(reversed(itkimage.GetOrigin())))\n\n # Read the spacing along each dimension\n spacing = np.array(list(reversed(itkimage.GetSpacing())))\n\n return ct_scan, origin, spacing\n\n'''\nThis function is used to convert the world coordinates to voxel coordinates using \nthe origin and spacing of the ct_scan\n'''\n\ndef world_2_voxel(world_coordinates, origin, spacing):\n stretched_voxel_coordinates = np.absolute(world_coordinates - origin)\n voxel_coordinates = stretched_voxel_coordinates / spacing\n return voxel_coordinates\n\n'''\nThis function is used to convert the voxel coordinates to world coordinates using \nthe origin and spacing of the ct_scan.\n'''\n\ndef voxel_2_world(voxel_coordinates, origin, spacing):\n stretched_voxel_coordinates = voxel_coordinates * spacing\n world_coordinates = stretched_voxel_coordinates + origin\n return world_coordinates\n\ndef seq(start, stop, step=1):\n n = int(round((stop - start) / float(step)))\n if n > 1:\n return ([start + step * i for i in range(n + 1)])\n else:\n return ([])\n\n\n'''\nThis function is used to create spherical regions in binary masks\nat the given locations and radius.\n这个函数是用来创建半径区域的\n'''\ndef draw_circles(image,cands,origin,spacing):\n #make empty matrix, which will be filled with the mask\n RESIZE_SPACING = [1, 1, 1]\n image_mask = np.zeros(image.shape)\n #run over all the nodules in the lungs\n for ca in cands.values:\n #get middel x-,y-, and z-worldcoordinate of the nodule\n radius = np.ceil(ca[4])/2\n coord_x = ca[1]\n coord_y = ca[2]\n coord_z = ca[3]\n image_coord = np.array((coord_z,coord_y,coord_x))\n #determine voxel coordinate given the worldcoordinate\n image_coord = world_2_voxel(image_coord,origin,spacing)\n #determine the range of the nodule\n noduleRange = seq(-radius, radius, RESIZE_SPACING[0])\n #create the mask\n for x in noduleRange:\n for y in noduleRange:\n for z in noduleRange:\n coords = world_2_voxel(np.array((coord_z+z,coord_y+y,coord_x+x)),origin,spacing)\n if (np.linalg.norm(image_coord-coords) * RESIZE_SPACING[0]) < radius:\n image_mask[np.round(coords[0]),np.round(coords[1]),np.round(coords[2])] = int(1)\n return image_mask\n\n'''\nThis function takes the path to a '.mhd' file as input and \nis used to create the nodule masks and segmented lungs after \nrescaling to 1mm size in all directions. It saved them in the .npz\nformat. It also takes the list of nodule locations in that CT Scan as \ninput.\n'''\n\"\"\"\n'cands' are the list of nodule points with the radius given in the annotation.csv file of LUNA16 dataset\n\"\"\"\n\ndef create_nodule_mask(imagePath, maskPath, cands):\n # if os.path.isfile(imagePath.replace('original',SAVE_FOLDER_image)) == False:\n img, origin, spacing = load_itk(imagePath)\n # calculate resize factor\n RESIZE_SPACING = [1, 1, 1]\n resize_factor = spacing / RESIZE_SPACING\n new_real_shape = img.shape * resize_factor\n new_shape = np.round(new_real_shape)\n real_resize = new_shape / img.shape\n new_spacing = spacing / real_resize\n # resize image\n lung_img = scipy.ndimage.interpolation.zoom(img, real_resize)\n # Segment the lung structure\n lung_img = lung_img + 1024\n lung_mask = segment_lung_from_ct_scan(lung_img)\n lung_img = lung_img - 1024\n\n # create nodule mask\n nodule_mask = draw_circles(lung_img, cands, origin, new_spacing)\n lung_img_512, lung_mask_512, nodule_mask_512 = np.zeros((lung_img.shape[0], 512, 512)), np.zeros(\n (lung_mask.shape[0], 512, 512)), np.zeros((nodule_mask.shape[0], 512, 512))\n\n original_shape = lung_img.shape\n for z in range(lung_img.shape[0]):\n offset = (512 - original_shape[1])\n upper_offset = np.round(offset / 2)\n lower_offset = offset - upper_offset\n new_origin = voxel_2_world([-upper_offset, -lower_offset, 0], origin, new_spacing)\n lung_img_512[z, upper_offset:-lower_offset, upper_offset:-lower_offset] = lung_img[z, :, :]\n lung_mask_512[z, upper_offset:-lower_offset, upper_offset:-lower_offset] = lung_mask[z, :, :]\n nodule_mask_512[z, upper_offset:-lower_offset, upper_offset:-lower_offset] = nodule_mask[z, :, :]\n # save images.\n np.save(maskPath +'_NAME_' + '_lung_img.npz', lung_img_512)\n np.save(maskPath +'_NAME_' + '_lung_mask.npz', lung_mask_512)\n np.save(maskPath +'_NAME_' + '_nodule_mask.npz', nodule_mask_512)\n\n\n# change the loss function\ndef dice_coef(y_true, y_pred):\n smooth = 1.\n y_true_f = K.flatten(y_true)\n y_pred_f = K.flatten(y_pred)\n intersection = K.sum(y_true_f * y_pred_f)\n return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)\n\ndef dice_coef_loss(y_true, y_pred):\n return -dice_coef(y_true, y_pred)\n\n'''\nThe UNET model is compiled in this function.\n'''\n\"\"\"\nunet 模型结构\n\"\"\"\n\ndef unet_model():\n\tinputs = Input((1, 512, 512))\n\tconv1 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(inputs)\n\tconv1 = Dropout(0.2)(conv1)\n\tconv1 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv1)\n\tpool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n\n\tconv2 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(pool1)\n\tconv2 = Dropout(0.2)(conv2)\n\tconv2 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv2)\n\tpool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n\n\tconv3 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(pool2)\n\tconv3 = Dropout(0.2)(conv3)\n\tconv3 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv3)\n\tpool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n\n\tconv4 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(pool3)\n\tconv4 = Dropout(0.2)(conv4)\n\tconv4 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(conv4)\n\tpool4 = MaxPooling2D(pool_size=(2, 2))(conv4)\n\n\tconv5 = Convolution2D(1024, 3, 3, activation='relu', border_mode='same')(pool4)\n\tconv5 = Dropout(0.2)(conv5)\n\tconv5 = Convolution2D(1024, 3, 3, activation='relu', border_mode='same')(conv5)\n\n\tup6 = merge([UpSampling2D(size=(2, 2))(conv5), conv4], mode='concat', concat_axis=1)\n\tconv6 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(up6)\n\tconv6 = Dropout(0.2)(conv6)\n\tconv6 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(conv6)\n\n\tup7 = merge([UpSampling2D(size=(2, 2))(conv6), conv3], mode='concat', concat_axis=1)\n\tconv7 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(up7)\n\tconv7 = Dropout(0.2)(conv7)\n\tconv7 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv7)\n\n\tup8 = merge([UpSampling2D(size=(2, 2))(conv7), conv2], mode='concat', concat_axis=1)\n\tconv8 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(up8)\n\tconv8 = Dropout(0.2)(conv8)\n\tconv8 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv8)\n\n\tup9 = merge([UpSampling2D(size=(2, 2))(conv8), conv1], mode='concat', concat_axis=1)\n\tconv9 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up9)\n\tconv9 = Dropout(0.2)(conv9)\n\tconv9 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv9)\n\n\tconv10 = Convolution2D(1, 1, 1, activation='sigmoid')(conv9)\n\n\tmodel = Model(input=inputs, output=conv10)\n\tmodel.summary()\n\tmodel.compile(optimizer=Adam(lr=1e-5), loss=dice_coef_loss, metrics=[dice_coef])\n\n\treturn model\n\n","repo_name":"Jameskry/LungCancerProject","sub_path":"LungCancer_V1/luna16Project/CandidateGenerationAndLuna16Preprocessing/UNETForCandidatePointGeneration.py","file_name":"UNETForCandidatePointGeneration.py","file_ext":"py","file_size_in_byte":8346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25957410036","text":"import pickle\nfrom subprocess import Popen\nfrom logger import Logger\nfrom abstract import abstract\nimport os\n\nclass scheduler(object):\n def __init__(self):\n self.submitCmd = \"qsub\"\n self.statCmd = \"qstat -xf\"\n self.deleteCmd = \"qdel\"\n\n # Submit function: This will take the inputs from the user and submits the\n # job on the palmetto. The list of files needed are copied from the local\n # machine to the palmetto node\n def Submit(self, fileName, Job_, resource_):\n path = resource_.remoteTmp\n host = resource_.userName + '@' + resource_.hostName\n splitJobScriptLocation = fileName.split('/')\n inputFile = splitJobScriptLocation[len(splitJobScriptLocation) - 1]\n qsubCmd = self.submitCmd + ' ' + path + '/' + inputFile\n abstract_ = abstract()\n abstract_.transferFiles(inputFile, host, path, Job_)\n Job_.remoteId = abstract_.fileTransferType(qsubCmd, host, resource_.transferType)\n\n # Delete function: This will take the jobID as the input from the user and\n # deletes the particular job\n def Delete(self, Job_, resource_):\n host = resource_.userName + '@' + resource_.hostName\n qdelCmd = self.deleteCmd + ' ' + Job_.remoteId\n abstract_ = abstract()\n abstract_.fileTransferType(qdelCmd, host, resource_.transferType)\n\n def Query(self, Job_, resource_):\n host = resource_.userName + '@' + resource_.hostName\n qstatCmd = self.statCmd + ' ' + Job_.remoteId\n abstract_ = abstract()\n abstract_.fileTransferType(qstatCmd, host, resource_.transferType)\n\nclass PBS(scheduler):\n\n def __init__(self, scheduler):\n self.submitCmd = \"qsub\"\n self.statCmd = \"qstat -xf\"\n self.deleteCmd = \"qdel\"\n\n # Submit function: This will take the inputs from the user and submits the\n # job on the palmetto. The list of files needed are copied from the local\n # machine to the palmetto node\n def Submit(self, args, Job_, filename, resource_):\n fileName = args.inFile\n if os.path.splitext(fileName)[1] == \".pbs\":\n super(PBS, self).Submit(fileName, Job_, resource_)\n elif os.path.splitext(fileName)[1] == \".submit\":\n print(\"condor\")\n fromCondortoPBS(fileName)\n Logger_ = Logger()\n for line in Job_.remoteId.stdout:\n Job_.remoteId = line.rstrip()\n if Job_.remoteId != '0':\n with open(filename, 'wb') as f:\n pickle.dump(Job_, f)\n Logger_.map_job(args, filename)\n\n # Delete function: This will take the jobID as the input from the user and\n # deletes the particular job\n def Delete(self, args, Job_, resource_):\n super(PBS, self).Delete(Job_, resource_)\n\n def Query(self, args, Job_, resource_):\n super(PBS, self).Query(Job_, resource_)\n\nclass Condor(scheduler):\n def __init__(self, scheduler):\n self.submitCmd = \"condor_submit\"\n self.statCmd = \"condor_q\"\n self.deleteCmd = \"condor_rm\"\n\n # Submit function: This will take the inputs from the user and submits the\n # job on the OSG. The list of files needed are copied from the local\n # machine to the OSG node\n def Submit(self, args, Job_, filename, resource_):\n fileName = args.inFile\n Logger_ = Logger()\n if os.path.splitext(fileName)[1] == \".submit\":\n super(Condor, self).Submit(fileName, Job_, resource_)\n elif os.path.splitext(fileName)[1] == \".pbs\":\n file, scriptFile = Job_.fromPBStoCondor(fileName)\n print (scriptFile)\n Job_.transferInpFile = scriptFile\n super(Condor, self).Submit(file, Job_, resource_)\n\n for line in Job_.remoteId.stdout:\n print(line)\n if \"cluster\" in line:\n Job_.remoteId = line.split(\"cluster\", 1)[1]\n Job_.remoteId = Job_.remoteId.rstrip()\n print(Job_.remoteId )\n if Job_.remoteId != '0':\n with open(filename, 'wb') as f:\n pickle.dump(Job_, f)\n Logger_.map_job(args, filename)\n\n # Delete function: This will take the jobID as the input from the user and\n # deletes the particular job\n def Delete(self, Job_, resource_):\n super(Condor, self).Delete(Job_, resource_)\n\n def Query(self, Job_, resource_):\n super(Condor, self).Query(Job_, resource_)\n","repo_name":"clemsonciti/palmetto-meta-scheduler","sub_path":"scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":4434,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"42802028552","text":"from imports import *\nfrom utils import to_gpu, to_cpu\nfrom parameters import par\n\ndef clopath_update_plot(it, cl_in, cl_rnn, gr_in, gr_rnn):\n\n\tupdate_list = to_cpu([cl_in, cl_rnn, gr_in, gr_rnn])\n\tupdate_name = ['Clopath W_in', 'Clopath W_rnn', 'Grad W_in', 'Grad W_rnn']\n\n\tfig, ax = plt.subplots(2,2, figsize=[12,10])\n\tfor i, j in itertools.product([0,1], [0,1]):\n\t\tim = ax[i,j].imshow(update_list[i+2*j], aspect='auto')\n\t\tax[i,j].set_title(update_name[i+2*j])\n\t\tfig.colorbar(im, ax=ax[i,j])\n\n\tplt.savefig('./savedir/{}_clopath{:0>6}.png'.format(par['savefn'], it), bbox_inches='tight')\n\tif par['save_pdfs']:\n\t\tplt.savefig('./savedir/{}_clopath{:0>6}.pdf'.format(par['savefn'], it), bbox_inches='tight')\n\tplt.clf()\n\tplt.close()\n\n\ndef plot_grads_and_epsilons(it, trial_info, model, h, eps_v_rec, eps_w_rec, eps_ir_rec):\n\n\th = to_cpu(h[:,0,:])\n\teps_v_rec = to_cpu(eps_v_rec)\n\teps_w_rec = to_cpu(eps_w_rec)\n\teps_ir_rec = to_cpu(eps_ir_rec)\n\n\tV_min = to_cpu(model.v[:,0,:,:].T.min())\n\n\tfig, ax = plt.subplots(8, 1, figsize=[16,22], sharex=True)\n\n\tax[0].imshow(trial_info['neural_input'][:,0,:].T, aspect='auto')\n\tax[0].set_title('Input Data')\n\tax[0].set_ylabel('Input Neuron')\n\n\tax[1].imshow(to_cpu(model.z[:,0,:].T), aspect='auto')\n\tax[1].set_title('Spiking')\n\tax[1].set_ylabel('Hidden Neuron')\n\n\tax[2].plot(to_cpu(model.z[:,0,0]), label='Spike')\n\tax[2].plot(to_cpu(model.v[:,0,0,0]) * -10, label='- Voltage x 10')\n\tax[2].plot(h[:,0], label='Gradient')\n\tax[2].legend()\n\tax[2].set_title('Single Neuron')\n\n\tax[3].imshow(h.T, aspect='auto', clim=(0, par['gamma_psd']))\n\tax[3].set_title('Pseudogradient (${} \\\\leq h \\\\leq {}$) | Sum: $h = {:6.3f}$'.format(0., par['gamma_psd'], np.sum(h)))\n\tax[3].set_ylabel('Hidden Neuron')\n\n\tax[4].imshow(to_cpu(model.v[:,0,0,:].T), aspect='auto')\n\tax[4].set_title('Membrane Voltage ($(V_r = {:5.3f}), {:5.3f} \\\\leq V_j^t \\\\leq 0$)'.format(par[par['spike_model']]['V_r'].min(), V_min))\n\tax[4].set_ylabel('Hidden Neuron')\n\n\tax[5].imshow(eps_v_rec.T, aspect='auto')\n\tax[5].set_title('Voltage Eligibility (${:6.3f} \\\\leq e_{{v,rec}} \\\\leq {:6.3f}$)'.format(eps_v_rec.min(), eps_v_rec.max()))\n\tax[5].set_ylabel('Hidden Neuron')\n\n\tax[6].imshow(eps_w_rec.T, aspect='auto')\n\tax[6].set_title('Adaptation Eligibility (${:6.3f} \\\\leq e_{{w,rec}} \\\\leq {:6.3f}$)'.format(eps_w_rec.min(), eps_w_rec.max()))\n\tax[6].set_ylabel('Hidden Neuron')\n\n\tax[7].imshow(eps_ir_rec.T, aspect='auto')\n\tax[7].set_title('Current Eligibility (${:6.3f} \\\\leq e_{{ir,rec}} \\\\leq {:6.3f}$)'.format(eps_ir_rec.min(), eps_ir_rec.max()))\n\tax[7].set_ylabel('Hidden Neuron')\n\n\t# ax[0,1].imshow(trial_info['neural_input'][:,0,:].T, aspect='auto')\n\t# ax[1,1].imshow(to_cpu(model.z[:,0,:].T), aspect='auto')\n\t# ax[2,1].imshow(h.T, aspect='auto', clim=(0, par['gamma_psd']))\n\t# ax[3,1].imshow(to_cpu(model.v[:,0,0,:].T), aspect='auto')\n\n\t# ax[4,1].imshow(eps_v_rec.T, aspect='auto')\n\t# ax[4,1].set_xlabel('Time')\n\n\t# for i in range(4):\n\t# \tax[i,0].set_xticks([])\n\n\t# for i in range(5):\n\t# \tax[i,1].set_xlim(200,350)\n\n\tplt.savefig('./savedir/{}_epsilon_iter{:0>6}.png'.format(par['savefn'], it), bbox_inches='tight')\n\tif par['save_pdfs']:\n\t\tplt.savefig('./savedir/{}_epsilon_iter{:0>6}.pdf'.format(par['savefn'], it), bbox_inches='tight')\n\tplt.clf()\n\tplt.close()\n\n\ndef output_behavior(it, trial_info, y):\n\n\n\tif par['task'] == 'dmswitch':\n\t\ttask_info = trial_info['task']\n\t\ttask_names = ['dms', 'dmc']\n\t\tnum_tasks = 2\n\t\theight = 14\n\telse:\n\t\ttask_names = [par['task']]\n\t\tnum_tasks = 1\n\t\theight = 8\n\n\tmatch_info, timings = trial_info['match'], trial_info['timings']\n\n\tfig, ax = plt.subplots(2*num_tasks, 1, figsize=[16,height], sharex=True)\n\n\tfor task in range(num_tasks):\n\n\t\tif par['task'] == 'dmswitch':\n\t\t\ttask_mask = (task_info == task)\n\t\t\tmatch = np.where(np.logical_and(task_mask, match_info))[0]\n\t\t\tnonmatch = np.where(np.logical_and(task_mask, np.logical_not(match_info)))[0]\n\n\t\telse:\n\t\t\tmatch = np.where(match_info)[0]\n\t\t\tnonmatch = np.where(np.logical_not(match_info))[0]\n\n\t\ttime = np.arange(par['num_time_steps'])\n\n\t\ty_match = to_cpu(cp.mean(y[:,match,:], axis=1))\n\t\ty_nonmatch = to_cpu(cp.mean(y[:,nonmatch,:], axis=1))\n\n\t\ty_match_err = to_cpu(cp.std(y[:,match,:], axis=1))\n\t\ty_nonmatch_err = to_cpu(cp.std(y[:,nonmatch,:], axis=1))\n\n\t\tc_res = [[60/255, 21/255, 59/255, 1.0], [164/255, 14/255, 76/255, 1.0], [77/255, 126/255, 168/255, 1.0]]\n\t\tc_err = [[60/255, 21/255, 59/255, 0.5], [164/255, 14/255, 76/255, 0.5], [77/255, 126/255, 168/255, 0.5]]\n\n\t\tfor i, (r, e) in enumerate(zip([y_match, y_nonmatch], [y_match_err, y_nonmatch_err])):\n\t\t\tj = 2*task + i\n\n\t\t\terr_low = r - e\n\t\t\terr_high = r + e\n\n\t\t\tax[j].fill_between(time, err_low[:,0], err_high[:,0], color=c_err[0])\n\t\t\tax[j].fill_between(time, err_low[:,1], err_high[:,1], color=c_err[1])\n\t\t\tax[j].fill_between(time, err_low[:,2], err_high[:,2], color=c_err[2])\n\n\t\t\tax[j].plot(time, r[:,0], c=c_res[0], label='Fixation')\n\t\t\tax[j].plot(time, r[:,1], c=c_res[1], label='Cat. 1 / Match')\n\t\t\tax[j].plot(time, r[:,2], c=c_res[2], label='Cat. 2 / Non-Match')\n\n\t\t\tfor t in range(timings.shape[0]):\n\t\t\t\tax[j].axvline(timings[t,:].min(), c='k', ls='--')\n\n\tfig.suptitle('Output Neuron Behavior')\n\tfor task in range(num_tasks):\n\t\tj = task*2\n\t\tax[j].set_title('Task: {} | Cat. 1 / Match Trials'.format(task_names[task].upper()))\n\t\tax[j+1].set_title('Task: {} | Cat. 2 / Non-Match Trials'.format(task_names[task].upper()))\n\n\tfor j in range(2*num_tasks):\n\t\tax[j].legend(loc=\"upper left\")\n\t\tax[j].set_ylabel('Mean Response')\n\tax[0].set_xlim(time.min(), time.max())\n\tax[2*num_tasks-1].set_xlabel('Time')\n\n\tplt.savefig('./savedir/{}_outputs_iter{:0>6}.png'.format(par['savefn'], it), bbox_inches='tight')\n\tif par['save_pdfs']:\n\t\tplt.savefig('./savedir/{}_outputs_iter{:0>6}.pdf'.format(par['savefn'], it), bbox_inches='tight')\n\tplt.clf()\n\tplt.close()\n\n\ndef visualize_delta(i, var_dict, grad_dict):\n\n\tfor n in [k for k in grad_dict.keys() if 'rnn' in k]:\n\t\tfig, ax = plt.subplots(1,2, figsize=[16,8])\n\t\tim = ax[0].imshow(to_cpu(par['learning_rate']*grad_dict[n]), aspect='auto')\n\t\tfig.colorbar(im, ax=ax[0])\n\t\tim = ax[1].imshow(to_cpu(var_dict[n]), aspect='auto')\n\t\tfig.colorbar(im, ax=ax[1])\n\n\t\tfig.suptitle(n)\n\t\tax[0].set_title('Gradient')\n\t\tax[1].set_title('Variable')\n\n\t\tplt.savefig('./savedir/{}_delta_{}_iter{:0>6}.png'.format(par['savefn'], n, i), bbox_inches='tight')\n\t\tif par['save_pdfs']:\n\t\t\tplt.savefig('./savedir/{}_delta_{}_iter{:0>6}.pdf'.format(par['savefn'], n, i), bbox_inches='tight')\n\t\tplt.clf()\n\t\tplt.close()\n\n\ndef activity_plots(i, model):\n\n\tV_min = to_cpu(model.v[:,0,:,:].T.min())\n\n\tfig, ax = plt.subplots(4,1, figsize=(15,11), sharex=True)\n\tax[0].imshow(to_cpu(model.input_data[:,0,:].T), aspect='auto')\n\tax[0].set_title('Input Data')\n\tax[1].imshow(to_cpu((model.input_data[:,0,:] @ model.eff_var['W_in']).T), aspect='auto')\n\tax[1].set_title('Projected Inputs')\n\tax[2].imshow(to_cpu(model.z[:,0,:].T), aspect='auto')\n\tax[2].set_title('Spiking')\n\tax[3].imshow(to_cpu(model.v[:,0,0,:].T), aspect='auto', clim=(V_min,0.))\n\tax[3].set_title('Membrane Voltage ($(V_r = {:5.3f}), {:5.3f} \\\\leq V_j^t \\\\leq 0$)'.format(par[par['spike_model']]['V_r'].min(), V_min))\n\n\tax[0].set_ylabel('Input Neuron')\n\tax[1].set_ylabel('Hidden Neuron')\n\tax[2].set_ylabel('Hidden Neuron')\n\tax[3].set_ylabel('Hidden Neuron')\n\n\tplt.savefig('./savedir/{}_activity_iter{:0>6}.png'.format(par['savefn'], i), bbox_inches='tight')\n\tif par['save_pdfs']:\n\t\tplt.savefig('./savedir/{}_activity_iter{:0>6}.pdf'.format(par['savefn'], i), bbox_inches='tight')\n\tplt.clf()\n\tplt.close()\n\n\ndef training_curve(i, iter_record, full_acc_record, task_acc_record):\n\t\n\tfig, ax = plt.subplots(1,1, figsize=(8,8))\n\tax.plot(iter_record, full_acc_record, label='Full Accuracy')\n\tax.plot(iter_record, task_acc_record, label='Match/Nonmatch Accuracy')\n\tax.axhline(0.5, c='k', ls='--', label='Match/Nonmatch Chance Level')\n\tax.legend(loc='upper left')\n\tax.set_xlabel('Iteration')\n\tax.set_ylabel('Accuracy')\n\tax.set_title('Accuracy Training Curve')\n\tax.set_ylim(0,1)\n\tax.set_xlim(0,i)\n\tax.grid()\n\n\tplt.savefig('./savedir/{}_training_curve_iter{:0>6}.png'.format(par['savefn'], i), bbox_inches='tight')\n\tif par['save_pdfs']:\n\t\tplt.savefig('./savedir/{}_training_curve_iter{:0>6}.pdf'.format(par['savefn'], i), bbox_inches='tight')\n\tplt.clf()\n\tplt.close()\n\n\ndef run_pev_analysis(sample, syn_eff, z, I_sqr_record, i):\n\n\t### Run PEV analysis on the voltage and synaptic efficacy\n\t### to determine where the match/nonmatch information is stored\n\n\tdef pev_analysis(a, b):\n\t\tweights = np.linalg.lstsq(a, b, rcond=None)\n\t\terror = b - a @ weights[0]\n\n\t\terror = error.reshape(b.shape)\n\t\tmse = np.mean(error**2)\n\t\trvar = np.var(b)\n\t\tpev = 1 - mse/(rvar+1e-9) if rvar > 1e-9 else 0\n\n\t\treturn pev, weights[0]\n\n\tsyn_eff = np.squeeze(syn_eff)\n\n\tfiltered_z = np.zeros_like(z)\n\talpha = 0.98\n\tfor t in range(1, z.shape[0]):\n\t\tfiltered_z[t, :, :] = alpha*filtered_z[t-1, :, :] + (1-alpha)*z[t,:,:]\n\n\tsample_dir = np.ones((par['batch_size'], 3))\n\tsample_dir[:,1] = np.cos(2*np.pi*sample/par['num_motion_dirs'])\n\tsample_dir[:,2] = np.sin(2*np.pi*sample/par['num_motion_dirs'])\n\n\tpev_z = np.zeros([par['num_time_steps'], par['n_hidden']])\n\tpev_syn = np.zeros([par['num_time_steps'], par['n_hidden']])\n\n\tfor n, t in product(range(par['n_hidden']), range(par['num_time_steps'])):\n\n\t\tpev_z[t,n], _ = pev_analysis(sample_dir, filtered_z[t,:,n,np.newaxis])\n\t\tpev_syn[t,n], _ = pev_analysis(sample_dir, syn_eff[t,:,n,np.newaxis])\n\n\n\tfig, ax = plt.subplots(2,2, figsize=(10,8))\n\tax[0,0].imshow(pev_z.T, aspect='auto', clim=(0,1))\n\tax[0,1].imshow(pev_syn.T, aspect='auto', clim=(0,1))\n\tax[1,0].plot(np.percentile(pev_z, 95, axis=1),'g', label = 'spikes 95pct')\n\tax[1,0].plot(np.percentile(pev_syn, 95, axis=1),'m', label = 'synapses 95pct')\n\tax[1,0].plot(np.percentile(pev_z, 80, axis=1),'g--', label = 'spikes 80pct')\n\tax[1,0].plot(np.percentile(pev_syn, 80, axis=1),'m--', label = 'synapses 80pct')\n\tax[1,0].legend()\n\tax[1,1].plot(I_sqr_record)\n\n\tax[0,0].set_title('Spike PEV')\n\tax[0,1].set_title('Synaptic PEV')\n\n\tax[1,0].set_xlabel('Time (ms)')\n\tax[1,1].set_xlabel('Iterations')\n\tax[1,0].set_ylabel('PEV')\n\tax[0,0].set_ylabel('Neurons')\n\n\n\tplt.savefig('./savedir/{}_pev{:0>6}.png'.format(par['savefn'], i), bbox_inches='tight')\n\tif par['save_pdfs']:\n\t\tplt.savefig('./savedir/{}_pev{:0>6}.pdf'.format(par['savefn'], i), bbox_inches='tight')\n\tplt.clf()\n\tplt.close()\n\n\ndef EI_testing_plots(i, I_sqr_record, W_rnn_grad_sum_record, W_rnn_grad_norm_record):\n\n\t# Plot I square\n\tplt.figure()\n\tplt.plot(I_sqr_record)\n\tplt.savefig('./savedir/{}_I_sqr_iter{:0>6}.png'.format(par['savefn'], i), bbox_inches='tight')\n\tplt.clf()\n\tplt.close()\n\n\t# Plot W_rnn sum update\n\tplt.figure()\n\tplt.plot(W_rnn_grad_sum_record)\n\tplt.savefig('./savedir/{}_W_rnn_grad_sum_iter{:0>6}.png'.format(par['savefn'], i), bbox_inches='tight')\n\tplt.clf()\n\tplt.close()\n\n\t# Plot W_rnn norm update\n\tplt.figure()\n\tplt.plot(W_rnn_grad_norm_record)\n\tplt.savefig('./savedir/{}_W_rnn_grad_norm_iter{:0>6}.png'.format(par['savefn'], i), bbox_inches='tight')\n\tplt.clf()\n\tplt.close()\n\n\n\ndef run_pev_analysis(sample, syn_eff, z, I_sqr_record, i):\n\n\tdef pev_analysis(a, b):\n\n\t\tweights = np.linalg.lstsq(a, b, rcond=None)\n\t\terror = b - a @ weights[0]\n\n\t\terror = error.reshape(b.shape)\n\t\tmse = np.mean(error**2)\n\t\trvar = np.var(b)\n\t\tpev = 1 - mse/(rvar+1e-9) if rvar > 1e-9 else 0\n\n\t\treturn pev, weights[0]\n\n\t### Run PEV analysis on the voltage and synaptic efficacy\n\t### to determine where the match/nonmatch information is stored\n\n\tsyn_eff = np.squeeze(syn_eff)\n\n\tfiltered_z = np.zeros_like(z)\n\talpha = 0.98\n\tfor t in range(1, z.shape[0]):\n\t\tfiltered_z[t, :, :] = alpha*filtered_z[t-1, :, :] + (1-alpha)*z[t,:,:]\n\n\tsample_dir = np.ones((par['batch_size'], 3))\n\tsample_dir[:,1] = np.cos(2*np.pi*sample/par['num_motion_dirs'])\n\tsample_dir[:,2] = np.sin(2*np.pi*sample/par['num_motion_dirs'])\n\n\tpev_z = np.zeros([par['num_time_steps'], par['n_hidden']])\n\tpev_syn = np.zeros([par['num_time_steps'], par['n_hidden']])\n\n\tfor n, t in itertools.product(range(par['n_hidden']), range(par['num_time_steps'])):\n\n\t\tpev_z[t,n], _ = pev_analysis(sample_dir, filtered_z[t,:,n,np.newaxis])\n\t\tpev_syn[t,n], _ = pev_analysis(sample_dir, syn_eff[t,:,n,np.newaxis])\n\n\n\tfig, ax = plt.subplots(2,2, figsize=(10,8))\n\tax[0,0].imshow(pev_z.T, aspect='auto', clim=(0,1))\n\tax[0,1].imshow(pev_syn.T, aspect='auto', clim=(0,1))\n\tax[1,0].plot(np.percentile(pev_z, 95, axis=1),'g', label = 'spikes 95pct')\n\tax[1,0].plot(np.percentile(pev_syn, 95, axis=1),'m', label = 'synapses 95pct')\n\tax[1,0].plot(np.percentile(pev_z, 80, axis=1),'g--', label = 'spikes 80pct')\n\tax[1,0].plot(np.percentile(pev_syn, 80, axis=1),'m--', label = 'synapses 80pct')\n\tax[1,0].legend()\n\tax[1,1].plot(I_sqr_record)\n\n\tax[0,0].set_title('Spike PEV')\n\tax[0,1].set_title('Synaptic PEV')\n\n\tax[1,0].set_xlabel('Time (ms)')\n\tax[1,1].set_xlabel('Iterations')\n\tax[1,0].set_ylabel('PEV')\n\tax[0,0].set_ylabel('Neurons')\n\n\n\tplt.savefig('./savedir/{}_pev_iter{:0>6}.png'.format(par['savefn'], i), bbox_inches='tight')\n\tif par['save_pdfs']:\n\t\tplt.savefig('./savedir/{}_pev_iter{:0>6}.pdf'.format(par['savefn'], i), bbox_inches='tight')\n\tplt.clf()\n\tplt.close()","repo_name":"gdgrant/Spiking-RNN","sub_path":"plotting_functions.py","file_name":"plotting_functions.py","file_ext":"py","file_size_in_byte":13140,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"70655159306","text":"import numpy as np\nimport glob\nimport xesmf as xe\nimport netCDF4 as nc\nimport os\nfrom sklearn.cross_decomposition import PLSRegression\n\ndef polar_winter_temps(tas_data_regridded):\n polar_temps = tas_data_regridded.sel(lat=slice(60, 90)).tas.values\n weights = np.cos(np.deg2rad(tas_data_regridded.lat.sel(lat=slice(60,90)).values))\n weighted_polar_temps = np.multiply(polar_temps, weights[np.newaxis,:,np.newaxis])\n avg_polar_temps = np.nansum(np.reshape(weighted_polar_temps, (1980, 12*144)), axis=1)/(np.nansum(weights)*144)\n avg_polar_temps_cal = np.reshape(avg_polar_temps, (165,12))\n ndjfm_polar_cal = [avg_polar_temps_cal[:,0], avg_polar_temps_cal[:,1], avg_polar_temps_cal[:,2], \n avg_polar_temps_cal[:,10], avg_polar_temps_cal[:,11]]\n ndjfm_polar_cal = np.swapaxes(ndjfm_polar_cal, 0,1)\n return(ndjfm_polar_cal)\n\ndef nh_winter_press(psl_data_regridded):\n nh_psl = psl_data_regridded.sel(lat=slice(20, 90)).psl.values\n nh_psl_cal = np.reshape(nh_psl, (165,12,np.shape(nh_psl)[1], np.shape(nh_psl)[2]))\n ndjfm_nh_psl_cal = [nh_psl_cal[:,0], nh_psl_cal[:,1], nh_psl_cal[:,2], nh_psl_cal[:,10], nh_psl_cal[:,11]]\n ndjfm_nh_psl_cal = np.swapaxes(ndjfm_nh_psl_cal, 0,1)\n return(ndjfm_nh_psl_cal)\n\n# find all potential models\npsl_models = glob.glob('/home/disk/pna2/aodhan/CMIP6/historical_monthly_psl_google/*')\n\nfor model_path in psl_models[1:]:\n model_paths = model_path + '/*'\n psl_files = glob.glob(model_paths)\n\n psl_and_tas_files = []\n for psl_file in psl_files:\n tas_file = psl_file[:48] + 'tas' + psl_file[51:]\n psl_and_tas_files.append([psl_file, tas_file])\n\n # define times for final netcdf\n winter_times = np.arange(1850,2015,1)\n\n true_and_dynamic_ts = []\n for file_set in psl_and_tas_files:\n try:\n psl_data = xr.open_dataset(file_set[0])\n tas_data = xr.open_dataset(file_set[1])\n except:\n print('Error opening file set: ', file_set)\n print('Opened file set: ', file_set[0])\n # CMIP6 models must be regridded, below we define input and output grids\n latitudes = psl_data.lat.values # psl and tas have same grid\n longitudes = psl_data.lon.values\n InputGrid = {\"lon\": longitudes, \"lat\": latitudes}\n OutputGrid = {\"lon\": np.arange(1.25, 358.751, 2.5), \"lat\": np.arange(-88.75, 88.751, 2.5)}\n regridder = xe.Regridder(InputGrid, OutputGrid, \"bilinear\", periodic=True)\n psl_data_regridded = regridder(psl_data)\n tas_data_regridded = regridder(tas_data)\n\n # get polar temperature during winter\n ndjfm_polar_temps_cal = polar_winter_temps(tas_data_regridded)\n\n # get NH pressure data during winter then weight this by latitude\n ndjfm_nh_psl_cal = nh_winter_press(psl_data_regridded)\n \n # preform cross validation of dynamic adjustment\n dynamical_contributions = []\n for x in range(165):\n temp_minus_one_winter = np.delete(ndjfm_polar_temps_cal, x, axis=0)\n temp_minus_one_winter = np.nanmean(temp_minus_one_winter, axis=1)\n pres_minus_one_winter = np.delete(ndjfm_nh_psl_cal, x, axis=0)\n pres_minus_one_winter = np.nanmean(pres_minus_one_winter, axis=1)\n \n # scale X train data \n pres_minus_one_winter_ts = np.reshape(pres_minus_one_winter, (164,28,144))\n pres_minus_one_winter_mean = np.nanmean(pres_minus_one_winter_ts, axis=0)\n pres_minus_one_winter_mr = pres_minus_one_winter_ts - pres_minus_one_winter_mean\n pres_minus_one_winter_std = np.nanstd(pres_minus_one_winter_mr, axis=0)\n pres_minus_one_winter_scaled = pres_minus_one_winter_mr/pres_minus_one_winter_std\n\n # scale X test data\n pres_all_ts = np.nanmean(ndjfm_nh_psl_cal, axis=1)\n pres_all_mr = pres_all_ts - pres_minus_one_winter_mean\n pres_all_scaled = pres_all_mr/pres_minus_one_winter_std\n\n # weight X data by latitude\n weights = np.cos(np.deg2rad(psl_data_regridded.lat.sel(lat=slice(20,90)).values))\n pres_minus_one_winter_weighted = np.multiply(pres_minus_one_winter_scaled, weights[np.newaxis, :,np.newaxis])\n pres_all_scaled_weighted = np.multiply(pres_all_scaled, weights[np.newaxis, :,np.newaxis])\n\n # scale Y data\n temp_minus_one_winter_ts = np.reshape(temp_minus_one_winter, (164))\n temp_minus_one_winter_mean = np.nanmean(temp_minus_one_winter_ts, axis=0)\n temp_minus_one_winter_mr = temp_minus_one_winter_ts - temp_minus_one_winter_mean\n temp_minus_one_winter_std = np.nanstd(temp_minus_one_winter_mr, axis=0)\n temp_minus_one_winter_scaled = temp_minus_one_winter_mr/temp_minus_one_winter_std\n\n # define X and Y data\n X = np.reshape(pres_minus_one_winter_weighted, (164, 28*144))\n Y = np.reshape(temp_minus_one_winter_scaled, (164))\n \n # create PLS model with 2 components\n pls = PLSRegression(n_components=2, scale='False')\n pls.fit(X, Y)\n\n # deploy on all pressure data\n all_pressures = np.reshape(pres_all_scaled_weighted, (165, 28*144))\n temp_dynamical = pls.predict(all_pressures)\n\n # unscale the data so that units are again in K\n temp_dynamical_multiplied_by_std = temp_dynamical*temp_minus_one_winter_std\n dynamical_contributions.append(temp_dynamical_multiplied_by_std)\n break\n\n # find mean dynamical contribution over all cross validations\n dynamical_mean = np.nanmean(dynamical_contributions, axis=0)[:,0]\n\n # reshape the raw polar timeseries\n polar_temp_timeseries = np.nanmean(ndjfm_polar_temps_cal, axis=1)\n\n # create a netcdf file\n storage_path = '/home/disk/pna2/aodhan/CMIP6/historical_dynamical_ts'\n completed_models = glob.glob(storage_path + '/*')\n completed_model_strings = [completed_models[i].split('/')[7] for i in range(0, len(completed_models))]\n model = file_set[0].split('/')[7]\n if model not in completed_model_strings:\n os.mkdir(storage_path + '/' + model)\n simulation = file_set[0].split('/')[8]\n fileName = storage_path + '/' + model + '/' + simulation + '.nc'\n ds = nc.Dataset(fileName, 'w', format='NETCDF4')\n \n DynamicalContribution = ds.createDimension('DynamicalContribution', 2)\n time = ds.createDimension('time', 165)\n\n # Add variables to dimensions\n DynamicalContribution = ds.createVariable('DynamicalContribution', int, ('DynamicalContribution',))\n time = ds.createVariable(varname='time', datatype=int, dimensions=('time',))\n timeseries = ds.createVariable('temp', 'f4', ('DynamicalContribution', 'time'))\n\n # Assing values to variables\n DynamicalContribution[:] = [0,1]\n time[:] = winter_times\n timeseries[:] = [polar_temp_timeseries, dynamical_mean]\n\n # close netcdf \n ds.close()\n","repo_name":"AodhanSweeney/SurfaceTrendLearing","sub_path":"FileCreators/Dynamic_ts_finder.py","file_name":"Dynamic_ts_finder.py","file_ext":"py","file_size_in_byte":7097,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"34657163336","text":"import pdb\n\nclass Graph():\n def __init__(self, graph_dict=None):\n if graph_dict:\n self.__graph_dict = graph_dict\n\n def getAdjNodes(self, node):\n return self.__graph_dict[node]\n\n def buildProjects(self, projects, dependencies):\n projectOrder = []\n while projects:\n for project in projects:\n if project in projectOrder:\n return False\n current = self.__graph_dict[project]\n if not current['incoming']:\n children = current['outgoing']\n self.removeDependencies(project, children)\n projectOrder.append(project)\n projects.remove(project)\n print(projectOrder)\n\n def removeDependencies(self, project, children):\n for child in children:\n self.__graph_dict[child]['incoming'].remove(project)\n\n\n\nif __name__ == \"__main__\":\n projects = ['a', 'b', 'c', 'd', 'e', 'f']\n dependencies = [('a', 'd'), ('f', 'b'), ('b', 'd'), ('f', 'a'), ('d', 'c')]\n nodes = {}\n for project in projects:\n nodes[project] = {\n 'outgoing': [], 'incoming': []\n }\n for dep in dependencies:\n if project == dep[0]:\n nodes[project]['outgoing'].append(dep[1])\n if project == dep[1]:\n nodes[project]['incoming'].append(dep[0])\n\n graph = Graph(nodes)\n graph.buildProjects(projects, dependencies)\n","repo_name":"redixhumayun/ctci","sub_path":"Graphs/graphs_old.py","file_name":"graphs_old.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24421113380","text":"import os\nfrom importlib import import_module\nfrom os.path import join as opjoin\n\nimport numpy as np\nimport torch\nfrom tqdm import tqdm\nfrom data_loader import LungNodule3Ddetector\nfrom layers_se import *\nfrom patient_data_loader import PatientDataLoader\nfrom torch.utils.data import DataLoader\nfrom abc import abstractmethod, ABC\n\nfrom config_training import config as config_training\n\ndefault_data_path = os.path.join('/content/drive/My Drive/DeepSEED-3D-ConvNets-for-Pulmonary-Nodule-Detection',\n config_training['preprocess_result_path'])\nbase_path = '/content/drive/My Drive/DeepSEED-3D-ConvNets-for-Pulmonary-Nodule-Detection'\nluna_path = opjoin(base_path, 'luna_detector')\n\nmodel = import_module('res18_se')\n\n\ndef run_test(ltest, left=-3.5, right=5, thr_number=20, mode='roc', net_number=0):\n result = {}\n f = ltest.test_luna if mode == 'roc' else ltest.froc_eval\n for thr in np.linspace(left, right, thr_number):\n result[thr] = f(thr, net_number)\n return result\n\n\nclass AbstractTest:\n def __init__(self, data_path=None, paths2model=None, start=0, end=0, r_rand=0.9, stage=0, all_tta=False):\n if paths2model is None:\n paths2model = ['']\n self.data_path = default_data_path if data_path is None else data_path\n self.stage = stage\n print('creating model')\n self.nets = [self._init_net(path2model) for path2model in paths2model]\n self.gp = GetPBB(self.config)\n self.start = start\n self.end = end\n self.r_rand = r_rand\n if all_tta:\n self.config['augtype'] = {'flip': True, 'swap': True, 'scale': True, 'rotate': True}\n print('ALL TTA')\n dataset = self.create_dataset()\n data_loader = DataLoader(dataset, batch_size=1, shuffle=False, num_workers=1,\n pin_memory=True)\n\n self.outputs, self.targets = self.predict_on_data(data_loader)\n\n def _init_net(self, path2model):\n config, net, loss, get_pbb = model.get_model()\n net = net.cuda()\n checkpoint = torch.load(opjoin(luna_path, 'test_results', path2model))\n net.load_state_dict(checkpoint['state_dict'])\n self.config = config\n return net\n\n\n @abstractmethod\n def create_dataset(self):\n pass\n\n @abstractmethod\n def predict_on_data(self, data_loader):\n pass\n\n def transform_target(self, target):\n return target.cpu().detach().numpy()[0]\n\n @abstractmethod\n def is_positive(self, target):\n pass\n\n def roc_eval(self, threshold):\n return self.common_test(threshold)\n\n def froc_eval(self, threshold, net_number):\n tn, tp, n, p, fp_bboxes = 0, 0, 0, 0, 0\n print('evaluating froc results...')\n for output, target in tqdm(zip(self.outputs[net_number], self.targets)):\n pred = self.gp(output, threshold)\n true = self.gp(target, 0.8)\n # print('pred ', pred)\n # print('true ', true)\n if len(true) == 0:\n continue\n p += 1\n found_true = False\n for pred_bbox in pred:\n for true_bbox in true:\n if iou(true_bbox[1:], pred_bbox[1:]) > 0.5:\n found_true = True\n break\n if not found_true:\n fp_bboxes += 1\n if found_true:\n tp += 1\n # print('pred: {}'.format(pred))\n # print('true: {}'.format(true))\n # print(tp, tn, p, n)\n return [tp, tn, p, n, fp_bboxes]\n\n\n def common_test(self, threshold):\n tn, tp, n, p = 0, 0, 0, 0\n print('evaluating roc results...')\n for output, target in tqdm(zip(self.outputs, self.targets)):\n pred = self.gp(output, threshold)\n if self.is_positive(target):\n p += 1\n if len(pred) > 0:\n tp += 1\n else:\n n += 1\n if len(pred) == 0:\n tn += 1\n # print('pred: {}'.format(pred))\n # print('true: {}'.format(true))\n # print(tp, tn, p, n)\n return [tp, tn, p, n]\n\n\n\nclass SimpleTest(AbstractTest):\n\n def create_dataset(self):\n luna_test = np.load('./luna_test_{}.npy'.format(self.stage))\n dataset = LungNodule3Ddetector(self.data_path, luna_test, self.config, start=0, end=0, r_rand=self.r_rand)\n return dataset\n\n\n def is_positive(self, target):\n return len(self.gp(target, 0.8)) > 0\n\n def predict_on_data(self, data_loader):\n outputs, targets = [[] for _ in self.nets], []\n for i, (data, target, coord) in enumerate(data_loader):\n data, target, coord = data.cuda(), target.cuda(), coord.cuda()\n data = data.type(torch.cuda.FloatTensor)\n coord = coord.type(torch.cuda.FloatTensor)\n print('data shape: ', data.shape)\n print('coord shape: ', coord.shape)\n # print('coord shape: ', coord.shape)\n for j, net in enumerate(self.nets):\n output = net(data, coord)\n outputs[j].append(output.cpu().detach().numpy()[0])\n targets.append(target)\n return outputs, [self.transform_target(target) for target in targets]\n\n\n def test_luna(self, threshold):\n return self.common_test(threshold=threshold)\n\n\n\n\nclass PatientTest(AbstractTest):\n\n def create_dataset(self):\n luna_test = np.load('./luna_test.npy')\n dataset = PatientDataLoader(self.data_path, luna_test, self.config, start=0, end=0)\n return dataset\n\n def is_positive(self, target):\n return target\n\n def predict_on_data(self, data_loader):\n outputs, targets = [], []\n print('feeding crops to the net..')\n for i, (data, one_scan_labels, coord) in tqdm(enumerate(data_loader)):\n # print('data shape ', data.shape)\n # print('data shape ', one_scan_labels.shape)\n data = data.transpose(0, 1)\n one_scan_labels = one_scan_labels.transpose(0, 1)\n for crop, label in zip(data, one_scan_labels):\n # print('crop shape ', crop.shape)\n crop, label, coord = crop.cuda(), label.cuda(), coord.cuda()\n crop = crop.type(torch.cuda.FloatTensor)\n coord = coord.type(torch.cuda.FloatTensor)\n\n output = self.net(crop, coord)\n outputs.append(output.cpu().detach().numpy()[0])\n targets.append(label)\n return outputs, [self.transform_target(target) for target in targets]\n","repo_name":"tilacyn/lung-cancer-detection","sub_path":"DeepSEED-3D-ConvNets-for-Pulmonary-Nodule-Detection/luna_detector/roc_eval.py","file_name":"roc_eval.py","file_ext":"py","file_size_in_byte":6646,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"75136327625","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def maxProduct(self, root) -> int:\n mod=10**9+7\n def dfs(node):\n if node is None:\n return 0\n ans=node.val+dfs(node.left)+dfs(node.right)\n sum_list.append(ans)\n return ans\n sum_list=list()\n dfs(root)\n total=max(sum_list)\n return max(i*(total-i) for i in sum_list)%mod","repo_name":"bamblebam/competitive-programming","sub_path":"2021/8-August-21/19-8-21/productofsumofbinarysplittedtrees.py","file_name":"productofsumofbinarysplittedtrees.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7164569616","text":"from mock import patch, MagicMock\nfrom odoo_tools.cli.odot import command\nfrom odoo_tools.api.services import ServiceApi\n\n\ndef test_service_package(runner):\n\n with patch.object(ServiceApi, 'get_services') as get_services, \\\n patch.object(ServiceApi, 'package') as package:\n manifests = MagicMock()\n get_services.return_value = manifests\n package.return_value = []\n\n result = runner.invoke(\n command,\n [\n 'service',\n 'package',\n 'service.toml',\n 'odoo',\n ]\n )\n\n assert result.exception is None\n manifests.services.get.assert_called_with('odoo')\n service = manifests.services.get('odoo').resolved\n\n package.assert_called_with(\n service,\n None, # output\n None, # cache\n None # decrypt_key\n )\n\n\ndef test_service_checkout(runner):\n\n with patch.object(ServiceApi, 'get_services') as get_services, \\\n patch.object(ServiceApi, 'checkout') as checkout:\n\n manifests = MagicMock()\n get_services.return_value = manifests\n\n result = runner.invoke(\n command,\n [\n 'service',\n 'checkout',\n '--cache', 'cache',\n '--credentials', 'a:b:c',\n 'service.toml',\n 'odoo',\n 'addons'\n ]\n )\n\n assert result.exception is None\n manifests.services.get.assert_called_with('odoo')\n service = manifests.services.get('odoo').resolved\n\n checkout.assert_called_with(\n service,\n 'addons',\n 'cache',\n None,\n {\n 'a': {\n \"username\": \"b\",\n \"password\": \"c\"\n }\n }\n )\n","repo_name":"odoo-plus/odootools","sub_path":"tests/cli/test_services.py","file_name":"test_services.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"81"} +{"seq_id":"35439041257","text":"import urllib.request\nimport urllib.parse\nimport json\n\nservice_url = 'http://maps.googleapis.com/maps/api/geocode/json?'\n\nwhile True:\n address = input('Enter location: ')\n url = service_url + urllib.parse.urlencode({'address': address})\n\n print('Retrieving', url)\n\n uh = urllib.request.urlopen(url)\n data = uh.read().decode()\n print('Retrieved', len(data), 'characters')\n\n try:\n js = json.loads(data)\n except:\n js = None\n\n if not js or 'status' not in js or js['status'] != 'OK':\n print('Failure')\n print(data)\n continue\n lat = js[\"results\"][0][\"geometry\"][\"location\"][\"lat\"]\n lon = js[\"results\"][0][\"geometry\"][\"location\"][\"lng\"]\n print('lat', lat, 'lon', lon)\n location = js['results'][0]['formatted_address']\n print(location)\n","repo_name":"milanKaran/getting-started-with-python","sub_path":"apiExample.py","file_name":"apiExample.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6162966940","text":"import numpy as np\nfrom bezier.curve import Curve\nfrom xplane_airports.AptDat import RowCode\n\n_DEFAULT_BEZIER_RESOLUTION = 16\n\n\ndef _calculate_quadratic_bezier(p0, p1, p2, resolution):\n if p0 == p2:\n return [p0]\n\n curve = Curve.from_nodes(np.asarray([p0, p1, p2]).T)\n curve_points = curve.evaluate_multi(np.linspace(0.0, 1.0, resolution))\n\n return curve_points.T.tolist()\n\n\ndef _calculate_cubic_bezier(p0, p1, p2, p3, resolution):\n if p0 == p3:\n return [p0]\n\n curve = Curve.from_nodes(np.asarray([p0, p1, p2, p3]).T)\n curve_points = curve.evaluate_multi(np.linspace(0.0, 1.0, resolution))\n\n return curve_points.T.tolist()\n\n\ndef _calculate_bezier(p0, p1, p2, p3=None, resolution=_DEFAULT_BEZIER_RESOLUTION):\n if p3 is None:\n return _calculate_quadratic_bezier(p0, p1, p2, resolution)\n else:\n return _calculate_cubic_bezier(p0, p1, p2, p3, resolution)\n\n\ndef get_paths(row_iterator, bezier_resolution, mode=\"line\"):\n # https://forums.x-plane.org/index.php?/forums/topic/66713-understanding-the-logic-of-bezier-control-points-in-aptdat/\n\n assert mode == \"line\" or mode == \"polygon\"\n\n coordinates = []\n properties = {}\n\n def _start_segment():\n nonlocal coordinates, properties\n coordinates = []\n properties = {}\n\n def _finish_segment():\n nonlocal coordinates, properties\n if len(coordinates) > 1:\n # simplify line. remove consecutive duplicates\n prev_c = None\n fixed_coordinates = []\n for c in coordinates:\n if prev_c is not None and tuple(c) == tuple(prev_c):\n continue\n\n fixed_coordinates.append(c)\n\n prev_c = c\n\n coordinates_list.append(fixed_coordinates)\n properties_list.append(properties)\n\n def _process_row(is_bezier, tokens):\n nonlocal in_bezier, temp_bezier_nodes, coordinates, properties\n lat, lon = float(tokens[1]), float(tokens[2])\n\n if not is_bezier:\n if in_bezier:\n temp_bezier_nodes.append((lon, lat))\n coordinates.extend(\n _calculate_bezier(*temp_bezier_nodes)\n ) # TODO: pass resolution argument\n temp_bezier_nodes = []\n else:\n coordinates.append((lon, lat))\n\n in_bezier = False\n\n painted_line_type = int(tokens[3]) if len(tokens) > 3 else None\n lighting_line_type = int(tokens[4]) if len(tokens) > 4 else None\n\n if mode == \"line\" and (\n (\n painted_line_type is not None\n and properties.get(\"painted_line_type\") is not None\n and painted_line_type != properties[\"painted_line_type\"]\n )\n or (\n lighting_line_type is not None\n and properties.get(\"lighting_line_type\") is not None\n and lighting_line_type != properties[\"lighting_line_type\"]\n )\n ):\n if row_iterator.has_next():\n _finish_segment()\n _start_segment()\n row_iterator.unnext() # reuse row for the new segment\n else:\n if painted_line_type is not None:\n properties[\"painted_line_type\"] = painted_line_type\n\n if lighting_line_type is not None:\n properties[\"lighting_line_type\"] = lighting_line_type\n\n else:\n bzp_lat, bzp_lon = float(tokens[3]), float(tokens[4])\n\n if in_bezier:\n diff_lat = bzp_lat - lat\n diff_lon = bzp_lon - lon\n mirr_lat = lat - diff_lat\n mirr_lon = lon - diff_lon\n\n temp_bezier_nodes.append((mirr_lon, mirr_lat))\n temp_bezier_nodes.append((lon, lat))\n coordinates.extend(\n _calculate_bezier(*temp_bezier_nodes, resolution=bezier_resolution)\n )\n temp_bezier_nodes = []\n else:\n if len(coordinates):\n diff_lat = bzp_lat - lat\n diff_lon = bzp_lon - lon\n mirr_lat = lat - diff_lat\n mirr_lon = lon - diff_lon\n\n temp_bezier_nodes.append(coordinates[-1])\n temp_bezier_nodes.append((mirr_lon, mirr_lat))\n temp_bezier_nodes.append((lon, lat))\n coordinates.extend(\n _calculate_bezier(\n *temp_bezier_nodes, resolution=bezier_resolution\n )\n )\n temp_bezier_nodes = []\n\n temp_bezier_nodes.append((lon, lat))\n temp_bezier_nodes.append((bzp_lon, bzp_lat))\n\n # else:\n in_bezier = True\n\n painted_line_type = int(tokens[5]) if len(tokens) > 5 else None\n lighting_line_type = int(tokens[6]) if len(tokens) > 6 else None\n\n if mode == \"line\" and (\n (\n painted_line_type is not None\n and properties.get(\"painted_line_type\") is not None\n and painted_line_type != properties[\"painted_line_type\"]\n )\n or (\n lighting_line_type is not None\n and properties.get(\"lighting_line_type\") is not None\n and lighting_line_type != properties[\"lighting_line_type\"]\n )\n ):\n if row_iterator.has_next():\n _finish_segment()\n _start_segment()\n row_iterator.unnext() # reuse row for the new segment\n else:\n if painted_line_type is not None:\n properties[\"painted_line_type\"] = painted_line_type\n\n if lighting_line_type is not None:\n properties[\"lighting_line_type\"] = lighting_line_type\n\n coordinates_list = []\n properties_list = []\n more_segments = True\n\n while more_segments:\n temp_bezier_nodes = []\n in_bezier = False\n first_row = None\n first_row_is_bezier = None\n\n _start_segment()\n\n for row in row_iterator:\n if first_row is None:\n first_row = row\n first_row_is_bezier = row.row_code in [\n RowCode.LINE_CURVE,\n RowCode.RING_CURVE,\n RowCode.END_CURVE,\n ]\n\n row_code = row.row_code\n tokens = row.tokens\n\n if row_code == RowCode.LINE_SEGMENT:\n _process_row(False, tokens)\n elif row_code == RowCode.LINE_CURVE:\n _process_row(True, tokens)\n elif row_code == RowCode.RING_SEGMENT:\n _process_row(False, tokens)\n _process_row(first_row_is_bezier, first_row.tokens)\n break\n elif row_code == RowCode.RING_CURVE:\n _process_row(True, tokens)\n _process_row(first_row_is_bezier, first_row.tokens)\n break\n elif row_code == RowCode.END_SEGMENT:\n _process_row(False, tokens)\n break\n elif row_code == RowCode.END_CURVE:\n _process_row(True, tokens)\n break\n else:\n row_iterator.unnext()\n more_segments = False\n break\n else:\n # there is no more rows\n more_segments = False\n\n _finish_segment()\n\n assert len(coordinates_list) == len(properties_list)\n return coordinates_list, properties_list\n","repo_name":"CarlosBergillos/xplane_apt_convert","sub_path":"xplane_apt_convert/geometry.py","file_name":"geometry.py","file_ext":"py","file_size_in_byte":7795,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"81"} +{"seq_id":"35108020702","text":"from rest_framework import serializers\nfrom drf_extra_fields.fields import Base64ImageField\nfrom django.shortcuts import get_object_or_404\n\nfrom recipes.models import (\n Tag, Recipe, Ingredient,\n IngredientToRecipe, ShoppingCart, Favorite\n)\nfrom users.models import User, Follow\nfrom djoser.serializers import UserCreateSerializer, UserSerializer\nfrom rest_framework.serializers import SerializerMethodField\n\n\nclass UserRegistrationSerializer(UserCreateSerializer):\n \"\"\"Сериализатор для регистрации пользователей.\"\"\"\n\n class Meta(UserCreateSerializer.Meta):\n fields = (\n 'email',\n 'id',\n 'username',\n 'first_name',\n 'last_name',\n 'password',\n )\n read_only_fields = ('id',)\n extra_kwargs = {\n 'password': {'write_only': True}\n }\n\n\nclass CustomUserSerializer(UserSerializer):\n \"\"\"Сериализатор для обработки данных пользователей.\"\"\"\n\n is_subscribed = SerializerMethodField()\n\n class Meta(UserCreateSerializer.Meta):\n fields = (\n 'email',\n 'id',\n 'username',\n 'first_name',\n 'last_name',\n 'is_subscribed'\n )\n\n def get_is_subscribed(self, obj):\n request = self.context.get('request', None)\n if request:\n current_user = request.user\n return Follow.objects.filter(\n user=current_user.id,\n author=obj.id\n ).exists()\n\n\nclass IngredientSerializer(serializers.ModelSerializer):\n \"\"\"Сериализатор для обработки данных ингредиентов.\"\"\"\n\n class Meta:\n model = Ingredient\n fields = ('id', 'name', 'measurement_unit')\n\n\nclass TegSerializer(serializers.ModelSerializer):\n \"\"\"Сериализатор тегов\"\"\"\n\n class Meta:\n model = Tag\n fields = ('id', 'name', 'color', 'slug')\n\n\nclass ShortResipeSerializer(serializers.ModelSerializer):\n \"\"\"Сериализатор для упрощённого отображения рецептов.\"\"\"\n\n class Meta:\n model = Recipe\n fields = ('id', 'name', 'image', 'cooking_time')\n read_only_fields = ('id', 'name', 'image', 'cooking_time')\n\n\nclass ShoppingCartSerializer(ShortResipeSerializer):\n \"\"\"Сериализатор для обработки данных списка покупок.\"\"\"\n\n def validate(self, data):\n request = self.context.get('request', None)\n current_recipe_id = self.context.get('request').parser_context.get(\n 'kwargs').get('recipe_id')\n recipe = get_object_or_404(Recipe, pk=current_recipe_id)\n\n if ShoppingCart.objects.filter(\n user=request.user,\n recipe=recipe\n ).exists():\n raise serializers.ValidationError(\n 'Рецепт уже в списке покупок!'\n )\n return data\n\n def create(self, validated_data):\n request = self.context.get('request', None)\n current_user = request.user\n current_recipe_id = self.context.get('request').parser_context.get(\n 'kwargs').get('recipe_id')\n recipe = get_object_or_404(Recipe, pk=current_recipe_id)\n ShoppingCart.objects.create(user=current_user, recipe=recipe)\n return recipe\n\n\nclass FavoriteSerializer(ShortResipeSerializer):\n \"\"\"Сериализатор для обработки данных избранных рецептов.\"\"\"\n\n def validate(self, data):\n request = self.context.get('request', None)\n current_recipe_id = self.context.get('request').parser_context.get(\n 'kwargs').get('recipe_id')\n recipe = get_object_or_404(Recipe, pk=current_recipe_id)\n\n if Favorite.objects.filter(\n user=request.user,\n recipe=recipe\n ).exists():\n raise serializers.ValidationError(\n 'Этот рецепт уже добавлен в избранные рецепты!'\n )\n return data\n\n def create(self, validated_data):\n request = self.context.get('request', None)\n current_user = request.user\n current_recipe_id = self.context.get('request').parser_context.get(\n 'kwargs').get('recipe_id')\n recipe = get_object_or_404(Recipe, pk=current_recipe_id)\n Favorite.objects.create(user=current_user, recipe=recipe)\n return recipe\n\n\nclass IngredientToRecipeSerializer(serializers.ModelSerializer):\n \"\"\"Сериализатор для обработки данных\n связующей модели ингредиентов и рецептов.\n \"\"\"\n\n id = serializers.IntegerField(\n source='ingredient.id'\n )\n name = serializers.ReadOnlyField(\n source='ingredient.name'\n )\n measurement_unit = serializers.ReadOnlyField(\n source='ingredient.measurement_unit'\n )\n\n class Meta:\n model = IngredientToRecipe\n fields = (\n 'id',\n 'amount',\n 'name',\n 'measurement_unit',\n )\n\n\nclass RecipeReadSerializer(serializers.ModelSerializer):\n \"\"\"Сериализатор для обработки данных рецептов.\"\"\"\n\n tags = serializers.SerializerMethodField()\n ingredients = IngredientToRecipeSerializer(\n many=True,\n source='ingredienttorecipe'\n )\n author = CustomUserSerializer(read_only=True)\n image = Base64ImageField()\n is_favorited = serializers.SerializerMethodField()\n is_in_shopping_cart = serializers.SerializerMethodField()\n\n class Meta:\n model = Recipe\n fields = (\n 'id',\n 'tags',\n 'author',\n 'ingredients',\n 'is_favorited',\n 'is_in_shopping_cart',\n 'name',\n 'image',\n 'text',\n 'cooking_time',\n )\n read_only_fields = (\n 'id',\n 'author',\n 'is_favorited',\n 'is_favorited'\n )\n\n def get_tags(self, obj):\n return TegSerializer(\n Tag.objects.filter(recipes=obj),\n many=True\n ).data\n\n def get_is_in_shopping_cart(self, obj):\n request = self.context.get('request', None)\n if request:\n current_user = request.user\n return ShoppingCart.objects.filter(\n user=current_user.id,\n recipe=obj.id,\n ).exists()\n\n def get_is_favorited(self, obj):\n request = self.context.get('request', None)\n if request:\n current_user = request.user\n return Favorite.objects.filter(\n user=current_user.id,\n recipe=obj.id\n ).exists()\n\n\nclass RecipeCreateSerializer(serializers.ModelSerializer):\n \"\"\"Сериализатор для создания рецептов.\"\"\"\n\n tags = serializers.PrimaryKeyRelatedField(\n queryset=Tag.objects.all(),\n many=True\n )\n ingredients = IngredientToRecipeSerializer(\n many=True,\n source='ingredienttorecipe'\n )\n image = Base64ImageField()\n\n class Meta:\n model = Recipe\n fields = (\n 'tags',\n 'ingredients',\n 'name',\n 'image',\n 'text',\n 'cooking_time',\n )\n\n def validate_tags(self, data):\n request = self.context.get('request', None)\n tags_list = []\n request_methods = ['POST', 'PATCH']\n if request.method in request_methods:\n if 'tags' in data:\n tags = data['tags']\n for tag in tags:\n if tag.id in tags_list:\n raise serializers.ValidationError(\n f'Тег {tag} повторяется'\n )\n tags_list.append(tag.id)\n if len(tags_list) == 0:\n raise serializers.ValidationError(\n 'Должен присутствовать хотя бы 1 тег!'\n )\n all_tags = Tag.objects.all().values_list('id', flat=True)\n if not set(tags_list).issubset(all_tags):\n raise serializers.ValidationError(\n f'Тега {tag} не существует!'\n )\n return data\n\n def validate_ingredients(self, data):\n request = self.context.get('request', None)\n ingredients_list = []\n request_methods = ['POST', 'PATCH']\n if request.method in request_methods:\n if 'ingredienttorecipe' in data:\n ingredients = data['ingredienttorecipe']\n for ingredient in ingredients:\n ingredient = ingredient['ingredient'].get('id')\n if ingredient in ingredients_list:\n raise serializers.ValidationError(\n f'Ингредиент {ingredient} уже был добавлен!'\n )\n ingredients_list.append(ingredient)\n all_ingredients = Ingredient.objects.all().values_list(\n 'id', flat=True\n )\n if not set(ingredients_list).issubset(all_ingredients):\n raise serializers.ValidationError(\n 'Указанного ингредиента не существует!'\n )\n if len(ingredients_list) == 0:\n raise serializers.ValidationError(\n 'Список ингредиентов не должен быть пустым!'\n )\n return data\n\n @staticmethod\n def create_ingredients(recipe, ingredients):\n ingredient_liist = []\n for ingredient_data in ingredients:\n ingredient_obj = Ingredient.objects.get(\n id=ingredient_data.get('ingredient')['id'])\n ingredient_liist.append(\n IngredientToRecipe(\n ingredient=ingredient_obj,\n amount=ingredient_data.get('amount'),\n recipe=recipe,\n )\n )\n IngredientToRecipe.objects.bulk_create(ingredient_liist)\n\n def create(self, validated_data):\n request = self.context.get('request', None)\n tags = validated_data.pop('tags')\n ingredients = validated_data.pop('ingredienttorecipe')\n recipe = Recipe.objects.create(author=request.user, **validated_data)\n recipe.tags.set(tags)\n self.create_ingredients(recipe, ingredients)\n return recipe\n\n def update(self, instance, validated_data):\n instance.tags.clear()\n IngredientToRecipe.objects.filter(recipe=instance).delete()\n instance.tags.set(validated_data.pop('tags'))\n ingredients = validated_data.pop('ingredienttorecipe')\n self.create_ingredients(instance, ingredients)\n return super().update(instance, validated_data)\n\n def to_representation(self, instance):\n return RecipeReadSerializer(instance, context={\n 'request': self.context.get('request')\n }).data\n\n\nclass FollowSerializer(CustomUserSerializer):\n \"\"\"Сериализатор для обработки данных ингредиентов.\"\"\"\n recipes = serializers.SerializerMethodField(read_only=True)\n recipes_count = serializers.SerializerMethodField(read_only=True)\n\n class Meta:\n model = User\n fields = (\n 'email',\n 'id',\n 'username',\n 'first_name',\n 'last_name',\n 'is_subscribed',\n 'recipes',\n 'recipes_count'\n )\n read_only_fields = (\n 'email',\n 'username',\n 'first_name',\n 'last_name',\n 'is_subscribed',\n 'recipes',\n 'recipes_count'\n )\n\n def get_recipes(self, obj):\n limit = self.context.get('request').query_params.get('recipes_limit')\n if limit:\n queryset = Recipe.objects.filter(\n author=obj).order_by('-id')[:int(limit)]\n else:\n queryset = Recipe.objects.filter(author=obj)\n return ShortResipeSerializer(queryset, many=True).data\n\n def get_recipes_count(self, obj):\n return Recipe.objects.filter(author=obj).count()\n\n def create(self, validated_data):\n request = self.context.get('request', None)\n author_id = self.context.get('request').parser_context.get(\n 'kwargs').get('user_id')\n current_user = request.user\n author = get_object_or_404(User, pk=author_id)\n Follow.objects.create(user=current_user, author=author)\n return author\n","repo_name":"Nikping/foodgram_final_project","sub_path":"backend/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":12875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20045841915","text":"import requests\n\ndef get_html_page(url, page = None):\n print('start parsing')\n auto_url = url\n params = {\n \"p\": page\n }\n try:\n result = requests.get(auto_url, params=params)\n result.raise_for_status()\n return result.text\n\n except(requests.RequestException, ValueError):\n print('Network error')\n return False","repo_name":"ipsorus/bulletin_test","sub_path":"bulletin/get_html.py","file_name":"get_html.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31795350176","text":"\"\"\"fpn.py\nThe feature pyramid network https://arxiv.org/abs/1612.03144\n\"\"\"\n\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass FPN(nn.Module):\n def __init__(self, in_features, extra_feature_num, out_c = 256):\n super(FPN, self).__init__() \n self.fpn_1x1blocks = nn.ModuleList([])\n # self.fpn_out_features = []\n self.in_feaures_num = len(in_features)\n \n # compute how many level by in_features\n # 1x1 conv\n for feature in in_features:\n _, in_c, _, _ = feature.size()\n self.fpn_1x1blocks.append(nn.Conv2d(in_c, out_c, kernel_size = 1))\n\n # 3x3 conv\n self.fpn_3x3blocks = nn.ModuleList([])\n self.total_feature_num = self.in_feaures_num + extra_feature_num\n for index in range(self.total_feature_num):\n if index >= self.in_feaures_num:\n self.fpn_3x3blocks.append(nn.Conv2d(out_c, out_c, kernel_size = 3, stride = 2, padding=1))\n else:\n self.fpn_3x3blocks.append(nn.Conv2d(out_c, out_c, kernel_size = 3, padding=1))\n\n self.apply(self.init_conv_kaiming)\n \n def up_and_add(self, deep, fine):\n \n return F.interpolate(deep, size=(fine.size()[2], fine.size()[3]),\n mode='nearest') + fine\n \n \n def init_conv_kaiming(self,module):\n if isinstance(module, nn.Conv2d):\n nn.init.kaiming_uniform_(module.weight, a=1)\n\n if module.bias is not None:\n nn.init.constant_(module.bias, 0)\n \n \n def forward(self, features):\n \n # features should be C3, C4, C5 as example\n \n # 1x1 part\n FPN_feature = []\n for idx, feature in enumerate(features):\n FPN_feature.append(self.fpn_1x1blocks[idx](feature))\n\n # up and add from deeper feature\n total_fpn_stage = len(FPN_feature)\n for idx, fpn_feature in reversed(list(enumerate(FPN_feature))):\n # the deepest feature do nothing\n if idx == (total_fpn_stage-1):\n deep_feature = fpn_feature\n else:\n # fine_feature = FPN_feature[idx]\n deep_feature = self.up_and_add(deep_feature, fpn_feature)\n FPN_feature[idx] = deep_feature\n \n out_features = []\n # final 3x3 conv for fpn\n for idx in range(self.total_feature_num):\n # extra conv stage\n if idx >= self.in_feaures_num:\n feature = self.fpn_3x3blocks[idx](feature)\n # origin input feature stage\n else:\n feature = self.fpn_3x3blocks[idx](FPN_feature[idx])\n \n out_features.append(feature)\n \n return out_features\n","repo_name":"ricky40403/Fcos_seg","sub_path":"Fcos_seg/detector/fpn.py","file_name":"fpn.py","file_ext":"py","file_size_in_byte":2771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70524262664","text":"import os\nimport numpy as np\nfrom osgeo import gdal\nimport matplotlib.pyplot as plt\nfrom sklearn.cluster import KMeans\n#obtenemos direccion de las imagenes.\n\nimagesFolder = \"dataset/tif//\"\nlistImages = os.listdir(os.path.join(imagesFolder, \"512\"))\n\ndef nvdi_indexPath(imagePath):\n if (len(imagePath.split('.')) == 2):\n # reading image\n img_tif = gdal.Open(imagesFolder + \"512/\" + imagePath)\n # getting red band(4):\n red_band = img_tif.GetRasterBand(4).ReadAsArray()\n # getting nir band(5):\n nir_band = img_tif.GetRasterBand(5).ReadAsArray()\n\n ndvi_image = (nir_band - red_band) / (nir_band + red_band)\n return ndvi_image\n\narrayofpixels = np.zeros((512*512*len(listImages),1))\ni = 0\nfor imagePath in listImages:\n if len(imagePath.split('.')) == 2:\n arrayofpixels[i*512*512:(i+1)*(512*512)] =nvdi_indexPath(imagePath).reshape(-1,1)\n i += 1\n\nkmeans = KMeans(n_clusters=5, n_init=10)\nkmeans.fit(arrayofpixels)\n\nnvdi_image_segmented = kmeans.labels_\n\ndef save_image(imageName, labelsPixel,format):\n img = labelsPixel.reshape(512,512)\n if format == 'png':\n nameImage = imageName.split('.')[0] + 'cluster-nvdi.png'\n plt.imsave(\"dataset/clustering/nvdi+kmeans/seasons/\" + nameImage, img)\n\ni = 0\nfor imagePath in listImages:\n if len(imagePath.split('.')) == 2:\n save_image(imagePath,nvdi_image_segmented[i * 512 * 512:(i + 1) * (512 * 512)],'png')\n i +=1\n\n","repo_name":"ilukep00/semantic-segmentation-on-satellite-images","sub_path":"local-geoespatial-preprocess/nvdi+kmeans+allimages.py","file_name":"nvdi+kmeans+allimages.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35306136505","text":"def solve_nqueens(n):\n placement = [-1] * n\n solve(placement, 0, n)\n\ndef solve(placement, row, n):\n if row == n:\n print_solution(placement)\n return\n\n for i in range(n):\n if is_valid(placement, row, i):\n placement[row] = i\n solve(placement, row + 1, n)\n\ndef is_valid(placement, row, col):\n for i in range(row):\n if placement[i] == col or abs(placement[i] - col) == abs(i - row):\n return False\n return True\n\ndef print_solution(placement):\n n = len(placement)\n for i in range(n):\n for j in range(n):\n if placement[i] == j:\n print(\"Q\", end=\" \")\n else:\n print(\".\", end=\" \")\n print()\n print()\n\n# Example usage with N = 8\nsolve_nqueens(8)","repo_name":"subhrojitGit/devOpsBasic","sub_path":"N_Queen.py","file_name":"N_Queen.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"75092568583","text":"import requests, json, os\nfrom dotenv import load_dotenv \nfrom telegram import Update, Chat\nfrom telegram.ext import Updater, CommandHandler, MessageHandler, CallbackContext\n\n\n\"\"\"\nEnvironment Variables\n\"\"\"\nload_dotenv()\nbot_token = os.getenv('TELEGRAM_BOT_TOKEN')\nchat_id_ck = os.getenv('TELEGRAM_CHAT_ID_SELF') # default chat id\nchat_id_dev = os.getenv('TELEGRAM_CHAT_ID_DEV')\n\n\"\"\"\nTelegramBot Instance to send message to user\n\"\"\"\ntelegram_bot = Updater(bot_token, use_context=True)\ntelegram_bot_dispatcher = telegram_bot.dispatcher\n\n\n\n\"\"\"\nFunction to send message to message to specified chat_id\n\n@param: \n - ChatId of chat to send message (Chat with CK by default if not specified)\n - Message to be sent\n@return: N/A if success, prints error message if failed to send message\n\"\"\"\ndef send_message(message, chat_id=chat_id_ck):\n telegram_bot.bot.send_message(chat_id=chat_id, text=message)\n\n\n\n\"\"\"\"\nFunction to send current chatID to user\n\"\"\"\ndef send_chat_id(update: Update, context: CallbackContext):\n chat_id = update.message.chat_id\n \n context.bot.send_message(chat_id=chat_id, text=f\"Chat ID: {chat_id}\")\n print(f\"Chat ID: {chat_id}\")\n\n\n\n\"\"\"\nCommand Handlers\n\"\"\"\nsend_chat_id = CommandHandler('id', send_chat_id)\ntelegram_bot_dispatcher.add_handler(send_chat_id)\n\n\n\n\"\"\"\nModule initializer\n\"\"\"\n\ndef main():\n telegram_bot.start_polling()\n print(\"SpotiSense Telegram Bot is now live!\")\n telegram_bot.idle()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"avock/TunesBlend-Engine","sub_path":"src/telegram_bot.py","file_name":"telegram_bot.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21713488416","text":"import joblib\n\nfrom fastapi import FastAPI\n\nfrom utils import classify_message\nfrom model import Message\n\napp = FastAPI()\n\nmodel = joblib.load('models/spam_classifier.joblib')\n\n\n@app.get('/')\ndef get_root():\n\treturn {'message': 'Welcome to the spam detection API'}\n\n\n@app.get('/spam_detection_query/')\nasync def detect_spam_query(message: str):\n\treturn classify_message(model, message)\n\n\n@app.get('/spam_detection_path/{message}')\nasync def detect_spam_path(message: str):\n\treturn classify_message(model, message)\n\n\n@app.post('/spam_detection/')\nasync def detect_spam_message(message: Message):\n\treturn classify_message(model, message.messageText)","repo_name":"SkillsHats/Spam-Detection-FastAPI","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"13278713650","text":"from __future__ import division\n\nimport argparse\nimport os\nimport shutil\n\nimport numpy as np\nfrom PIL import Image, ImageChops, ImageDraw, ImageStat\n\nimport genalg as ga\n\n\nclass ImageChromo(ga.Chromosone):\n COLOR = {'num': 3, 'dtype': np.dtype('uint8')}\n POS = {'num': 2, 'dtype': np.dtype('uint32')}\n SIZE = {'num': 1, 'dtype': np.dtype('uint32')}\n\n BITS_PER_GENE = (COLOR['num'] * COLOR['dtype'].itemsize * 8\n + POS['num'] * POS['dtype'].itemsize * 8\n + SIZE['num'] * SIZE['dtype'].itemsize * 8)\n\n def __init__(self, ngenes, target, copying=False):\n super(ImageChromo, self).__init__(ngenes, ImageChromo.BITS_PER_GENE)\n\n if not copying:\n self.target = Image.open(target)\n self.width, self.height = self.target.size\n self.image = Image.new('RGB', self.target.size)\n self.draw = ImageDraw.Draw(self.image)\n\n self.dir = os.path.join('..', 'output',\n os.path.splitext(os.path.basename(target))[0])\n shutil.rmtree(self.dir, ignore_errors=True)\n os.makedirs(self.dir)\n\n def __deepcopy__(self, memo):\n new = ImageChromo(self.ngenes, self.target, True)\n\n new.target = self.target.copy()\n new.width, new.height = self.target.size\n new.image = self.image.copy()\n new.draw = ImageDraw.Draw(new.image)\n\n new.genes = np.copy(self.genes)\n\n return new\n\n def fitness(self):\n if not self._recalc:\n return self._fit\n\n self._recalc = False\n self.decode()\n\n dif = ImageChops.difference(self.target, self.image)\n self._fit = sum(ImageStat.Stat(dif).sum2)\n return self._fit\n\n def decode(self):\n self.draw.rectangle(((0, 0), self.image.size), fill=(0, 0, 0))\n c_data = ImageChromo.COLOR\n p_data = ImageChromo.POS\n s_data = ImageChromo.SIZE\n\n c_size = c_data['num'] * c_data['dtype'].itemsize * 8\n p_size = p_data['num'] * p_data['dtype'].itemsize * 8\n\n for gene in np.split(self.genes, self.ngenes):\n color = self._decode_gene(gene[:c_size], **c_data)\n pos = self._decode_gene(gene[c_size + 1:c_size + p_size], **p_data)\n size = self._decode_gene(gene[c_size + p_size + 1:], **s_data)\n\n pos = (pos.astype(float) / np.iinfo(p_data['dtype']).max) * self.image.size\n size = (size.astype(float) / np.iinfo(s_data['dtype']).max) * min(self.image.size)\n\n self.draw.ellipse((tuple(pos), tuple(pos + size)),\n fill=tuple(color))\n\n @staticmethod\n def _decode_gene(gene, num, dtype):\n bytes = np.packbits(gene).tobytes()\n decoded = np.fromstring(bytes, dtype=dtype, count=num)\n return decoded if len(decoded) > 1 else decoded[0]\n\n def log(self, gen):\n path = os.path.join(self.dir, str(gen) + '.png')\n self.image.save(path)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('target')\n parser.add_argument('-m', '--max_gen', default=None, type=int)\n parser.add_argument('-i', '--interval', default=20, type=int)\n parser.add_argument('-p', '--progress', action='store_true')\n parser.add_argument('-s', '--select_ratio', default=0.3, type=float)\n parser.add_argument('-c', '--pool_size', default=30, type=int)\n parser.add_argument('-g', '--ngenes', default=300, type=int)\n\n kwargs = vars(parser.parse_args())\n\n ga = ga.GenAlg(ImageChromo, **kwargs)\n answer = ga.evolve()\n","repo_name":"whonore/ImageEvolver","sub_path":"python/image_evolve.py","file_name":"image_evolve.py","file_ext":"py","file_size_in_byte":3585,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"16717625797","text":"import socket\nimport pdb\nimport random\nimport math\nfrom os import path\nimport pickle\n\nimport numpy as np\n\nimport settings\n\nclass Hyperparams:\n\n\n def __init__(self):\n\n self.NCLASSES = settings.n_labels\n self.TIMESTEPS = 49\n self.NFEATURES = settings.n_features\n self.BATCHSIZE = 4\n\n # TODO: just changed for convenience\n self.EPOCHS = 2\n\n # TODO: Use MIN_EPOCHS and MAX_EPOCHS when using early stopping\n\n\n self.OUTPUT_THRESHOLD = 0.5\n\n # TRAIN_SCENES = list(range(1, 41))\n self.TRAIN_SCENES = 53\n\n # TODO: just changed for convenience\n self.ALL_FOLDS = list(range(1, 3)) # folds: 1 - 2\n self.ALL_FOLDS = list(range(1, 7)) # folds: 1 - 2\n\n self.LABEL_MODE = 'blockbased'\n self.MASK_VAL = -1\n\n self.VAL_STATEFUL = False\n\n self.epochs_finished = [0] * len(self.ALL_FOLDS)\n\n self.val_acc = [-1] * len(self.ALL_FOLDS)\n\n self.val_acc_mean = -1\n\n # indicates whether this combination is already finished\n self.finished = False\n\n\n # Hyperparams for LDL#\n self.LDL_BUFFER = 10\n self.LDL_LINES_PER_BATCH = 10\n self.LDL_BATCHSIZE = 2\n self.LDL_OVERLAP = 25\n self.LDL_TIMESTEPS = 49\n self.TIMESTEPS = self.LDL_TIMESTEPS #self.Timesteps is is not the timesteps in DataLoader(!!)\n\n\n\n\n #new hyperparams according to the .doc\n\n ##ratemap\n self.nr_conv_layers_ratemap = np.array([3, 4, 5]) # four not so good -> maxpooling reduces to fast\n\n ##ams\n self.nr_conv_layers_ams = np.array([3, 4, 5])\n\n\n ##both\n self.feature_maps_layer = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90]) # all combinations for all layers\n\n ##other\n self.batchsize = np.array([1,2,4,8]) #blocks of framesizes (*49 in timeseries)\n self.number_fully_connected_layers = np.array([2,3]) #!\n self.number_neurons_fully_connected_layers = np.array([[190,90],[190,100,50]]) #start with 380 and end with 13\n self.epochs_per_k_fold_cross_validation = 500\n\n if socket.gethostname() == \"eltanin\":\n self.epochs_per_k_fold_cross_validation = 500\n\n else:\n self.epochs_per_k_fold_cross_validation = 2\n\n\n\n\n\n\n\n\n def build_pooling_sequences(self, nr_conv_layers, filtersize_time, filtersize_ratemap, filtersize_ams_center, filtersize_ams_modulation):\n\n def build_time_pooling_sequence():\n possible_poolingsize_time = np.arange(filtersize_time)+1\n poolingsize_time = np.random.choice(possible_poolingsize_time)\n\n if poolingsize_time==1 or poolingsize_time==2:\n poolingsize_time_reduced = False\n\n if poolingsize_time == 3 or poolingsize_time == 4:\n poolingsize_time_reduced = random.choice([True, False])\n\n if poolingsize_time > 4:\n poolingsize_time_reduced = True\n\n\n if poolingsize_time_reduced==True:\n sequence = np.ones(nr_conv_layers)*poolingsize_time\n\n sequence = map(lambda ie: ie[1] - ie[0], enumerate(sequence))\n sequence = np.fromiter(sequence, dtype=np.int)\n\n #sequence = map(lambda (i, e): e - i, enumerate(sequence))\n else:\n sequence = np.ones(nr_conv_layers)*poolingsize_time\n\n return np.array(sequence)\n\n\n possible_poolingsize_ratemap = np.arange(filtersize_ratemap)+1\n possible_poolingsize_ams_center = np.arange(filtersize_ams_center)+1\n possible_poolingsize_ams_modulation = np.arange(filtersize_ams_modulation)+1\n\n time_pooling_sequence = build_time_pooling_sequence()\n\n single_ratemap_pool = np.array(np.random.choice(possible_poolingsize_ratemap))\n single_ams_pool = np.array([np.random.choice(possible_poolingsize_ams_center), np.random.choice(possible_poolingsize_ams_modulation)])\n\n expanded_ams_pool = np.repeat(np.expand_dims(single_ams_pool, axis=0), nr_conv_layers, axis=0)\n expanded_ratemap_pool = np.repeat(single_ratemap_pool, nr_conv_layers, axis=0)\n\n\n ams_pool_sequence = np.concatenate( (expanded_ams_pool, np.expand_dims(time_pooling_sequence, axis=1)), axis=1) #ams center, ams modulation, time\n ratemap_pool_sequence = np.stack( (expanded_ratemap_pool,time_pooling_sequence), axis=0) #ratemap, time\n\n\n return ams_pool_sequence, ratemap_pool_sequence.T\n\n\n\n\n def build_featuremap_scaling_sequence(self, nr_conv_layers):\n f_s = np.ones(nr_conv_layers) * int(np.random.uniform(50, 500))\n f_s = map(lambda ie: (ie[1]* math.pow(2, ie[0])), enumerate(f_s)) #.astype(int) , enumerate(f_s))\n f_s = np.fromiter(f_s, dtype=np.int)\n\n return f_s\n\n\n def build_filter_sequences(self, nr_conv_layers):\n\n def build_time_filter_sequence():\n possible_filtersize_time = np.array([2, 3, 4, 5, 6, 7, 8])\n filtersize_time = np.random.choice(possible_filtersize_time)\n\n if filtersize_time==2 or filtersize_time==3:\n filtersize_time_reduced = False\n\n if filtersize_time>5:\n filtersize_time_reduced = True\n\n if filtersize_time==4 or filtersize_time==5:\n filtersize_time_reduced = random.choice([True, False])\n\n if filtersize_time_reduced==True:\n sequence = np.ones(nr_conv_layers)*filtersize_time\n sequence = map(lambda ie: ie[1] - ie[0], enumerate(sequence))\n sequence = np.fromiter(sequence, dtype=np.int)\n\n else:\n sequence = np.ones(nr_conv_layers)*filtersize_time\n\n return np.array(sequence)\n\n\n possible_filtersize_ratemap = np.array([2,3])\n possible_filtersize_ams_center = np.array([2,3])\n possible_filtersize_ams_modulation = np.array([2,3])\n\n time_filter_sequence = build_time_filter_sequence()\n\n single_ratemap_filter = np.array(np.random.choice(possible_filtersize_ratemap))\n single_ams_filter = np.array([np.random.choice(possible_filtersize_ams_center), np.random.choice(possible_filtersize_ams_modulation)])\n\n\n expanded_ams_filter = np.repeat(np.expand_dims(single_ams_filter, axis=0), nr_conv_layers, axis=0)\n expanded_ratemap_filter = np.repeat(single_ratemap_filter, nr_conv_layers, axis=0)\n\n\n ams_filter_sequence = np.concatenate( (expanded_ams_filter, np.expand_dims(time_filter_sequence, axis=1)), axis=1) #ams center, ams modulation, time\n ratemap_filter_sequence = np.stack( (expanded_ratemap_filter,time_filter_sequence), axis=0) #ratemap, time\n\n return ams_filter_sequence, ratemap_filter_sequence.T\n\n\n\n\n\n def save_to_dir(self, model_dir):\n filepath = path.join(model_dir, 'hyperparameters.pickle')\n attr_val_dict = self.__dict__\n with open(filepath, 'wb') as handle:\n pickle.dump(attr_val_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n # attr_val_df = pd.DataFrame.from_dict(attr_val_dict, orient='index', columns=['value'])\n # with open(filepath, 'w+') as file:\n # file.write(attr_val_df.to_csv())\n\n\n\n\n def getworkingHyperparams(self):\n conv_layers = np.random.choice(self.nr_conv_layers_ams)\n conv_layers = 2\n ams_filter_sequence, ratemap_filter_sequence = self.build_filter_sequences(conv_layers)\n featuremap_scaling_sequence = self.build_featuremap_scaling_sequence(conv_layers)\n ams_pool_sequence, ratemap_pool_sequence = self.build_pooling_sequences(conv_layers, ams_filter_sequence[0][2],ratemap_filter_sequence[0][0], ams_filter_sequence[0][0], ams_filter_sequence[0][1]) #ams_filter_sequence[0][1] - because heightest time filter\n\n\n hyperparams = {\n \"learning_rate\" : self.loguniform(low=0.0001, high=0.01, size=None),\n \"nr_conv_layers_ratemap\": conv_layers, #done - but see above\n \"sequence_ratemap_pool_window_size\": ratemap_pool_sequence, #done\n \"nr_conv_layers_ams\": conv_layers, #done - but see above\n \"sequence_ams_pool_window_size\": ams_pool_sequence, #done\n \"featuremap_scaling_sequence\": featuremap_scaling_sequence,\n \"epochs_per_k_fold_cross_validation\": self.epochs_per_k_fold_cross_validation,\n \"ams_filter_sequence\": ams_filter_sequence.astype(int), #done\n \"ratemap_filter_sequence\": ratemap_filter_sequence.astype(int), #done\n \"number_neurons_fully_connected_layers\" : random.choice(self.number_neurons_fully_connected_layers), #done\n \"batchsize\": random.choice(self.batchsize), #done\n \"ldl_timesteps\" : 49,\n \"ldl_lines_per_batch\" : self.LDL_LINES_PER_BATCH,\n \"ldl_batchsize\" : self.LDL_BATCHSIZE\n\n }\n return hyperparams\n\n\n def loguniform(self, low=0, high=1, size=None):\n low = np.exp(low)\n high = np.exp(high)\n return np.log(np.random.uniform(low, high, size))\n\n\n\n\n\n\nif __name__ == '__main__':\n hyperparamClass = Hyperparams()\n nr_conv_layers = 4\n ams_seq, ratemap_seq = hyperparamClass.build_filter_sequences(nr_conv_layers)\n ams_pool_seq, ratemap_pool_seq = hyperparamClass.build_pooling_sequences(nr_conv_layers,ams_seq[0][2],ratemap_seq[0][0],ams_seq[0][0],ams_seq[0][1])","repo_name":"nigroup/binaural_audition","sub_path":"feedforward/modelsv2/hyperparams.py","file_name":"hyperparams.py","file_ext":"py","file_size_in_byte":9318,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"36093765737","text":"\"\"\"\nyield主要是用于生成器操作中,与传统的操作相比,yield的最大特点在于不会生成全部的数据,而是根据数据需要动态的控制生成器的数据,\n这样的好处是避免生成的数据占用过多的内存,从而影响到程序的执行性能,yield的作用于return类似,最大的区别在于,yield调用时需要通过\n外部提供的next()控制,当调用next()方法后才可以出发yield返回数据,同时外部也可以利用\"生成器对象.send()\"方法想yield调用处发送信息,并返回下一次\n的yield内容,\n\"\"\"\n# coding : UTF-8\ndef generator():\t\t\t\t\t# 生成器\n print(\"【generator()】yield代码执行前。\") \t\t# 提示信息\n res = yield \"yootk-001\" \t\t\t\t# 返回数据并接收发送来的内容\n print(\"【generator()】yield代码执行后,res = %s\" % res) \t# 接收到发送来的数据后继续执行\n yield \"yootk-%s\" % res\t\t\t\t# 返回数据\ndef main():\t\t\t\t\t# 主函数\n result = generator()\t\t\t\t# 获取生成器对象\n print(\"【main()】调用next()函数获取yield返回内容:%s\" % next(result)) \t# 接收yield返回数据\n print(\"【main()】向yield发送数据:%s\" % result.send(125)) \t\t# yield发送并返回数据\nif __name__ == \"__main__\":\t\t\t\t# 判断程序执行名称\n main()\t\t\t\t\t\t# 调用主函数\n","repo_name":"kongziqing/Python-2lever","sub_path":"基础篇/第13章程序结构扩展/13.2生成器/13.2.1yield实现生成器/观察yield关键字基本使用.py","file_name":"观察yield关键字基本使用.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16131768005","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jun 15 17:32:27 2021\r\n\r\n@author: canal\r\n\"\"\"\r\n\r\n\r\n#all strings and lists have individual characters and items respectfully represented by an index number\r\n#this starts at 0, and can be accessed in bracket notation\r\nthing = \"car\"\r\nthing[0]#c\r\nthing[2]#r\r\nlist = [\"one\",2,\"3hree\"]\r\nlist[2] # 3hree\r\n\r\n#print both lists and then return the even one and print it again.\r\n\r\n#assignment 5\r\n\r\nnames_list = [\"bob\",\"jimmy\",\"max b\", \"bernie\", \"jordan\", \"future hendrix\"]\r\neven = []\r\nodd = []\r\n\r\ndef index():\r\n for i in names_list:\r\n if len(i)%2 == 0 :\r\n even.append(i)\r\n else:\r\n odd.append(i)\r\nindex()\r\nprint(odd)\r\nprint(even)\r\n","repo_name":"dking11/preworkrepo","sub_path":"TKH Prework/assignment5derrick.py","file_name":"assignment5derrick.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34413871663","text":"\"\"\"\nLog utils for handling logs with stdout and Datadog\n\nOperates with environment variables:\n\n- DD_API_KEY: Datadog API Key\n- DD_APP_KEY: Datadog APP Key\n- DD_SITE: Datadog site. For example datadog.eu\n- ENV: Environment the logging is done\n- HOST_NAME: Name of the host instance\n- SERVICE_NAME: Name of the service\n\nIf using a docker container, additional env variables can be provided\n\n- DOCKER_CONTAINER_NAME: Name of docker container\n- DOCKER_IMAGE_NAME: Name of docker image used\n\n\"\"\"\nimport os\nimport sys\nimport json\nimport logging\nimport platform\nfrom loguru import logger\nimport functools\nimport time\n\nfrom datadog_api_client import ApiClient, Configuration\nfrom datadog_api_client.v2.model.http_log import HTTPLog\nfrom datadog_api_client.v2.model.http_log_item import HTTPLogItem\nfrom datadog_api_client.v2.api.logs_api import LogsApi\nfrom datadog_api_client.v2.model.content_encoding import ContentEncoding\n\n\ndef logger_wraps(*, entry=True, exit=True, level=\"DEBUG\"):\n def wrapper(func):\n name = func.__name__\n\n @functools.wraps(func)\n def wrapped(*args, **kwargs):\n logger_ = logger.opt(depth=1)\n start = time.time()\n if entry:\n logger_.log(\n level, \"Entering '{}' (args={}, kwargs={})\", name, args, kwargs\n )\n result = func(*args, **kwargs)\n end = time.time()\n logger_.log(level, \"Function '{}' executed in {:f} s\", name, end - start)\n\n if exit:\n logger_.log(level, \"Exiting '{}' (result={})\", name, result)\n\n return result\n\n return wrapped\n\n return wrapper\n\n\nclass DatadogHandler(logging.Handler):\n def __init__(self, api_key: str, app_key: str, site: str = \"datadoghq.com\"):\n super().__init__()\n self.api_key = api_key\n self.app_key = app_key\n self.site = site\n self.configuration = None\n self.api_client = ApiClient(self.configuration)\n self.logs = LogsApi(self.api_client)\n self.datadog_tags = None\n\n @property\n def configuration(self):\n if self._configuration is None:\n configuration = Configuration()\n configuration.api_key[\"apiKeyAuth\"] = self.api_key\n configuration.api_key[\"appKeyAuth\"] = self.app_key\n configuration.server_variables[\"site\"] = self.site\n\n self._configuration = configuration\n\n return self._configuration\n\n @configuration.setter\n def configuration(self, configuration: Configuration):\n self._configuration = configuration\n\n @property\n def datadog_tags(self):\n if self._datadog_tags is None:\n ddtags = [f\"env:{os.getenv('ENV')}\"]\n\n if os.getenv(\"DOCKER_CONTAINER_NAME\"):\n ddtags.append(f\"container_name:{os.getenv('DOCKER_CONTAINER_NAME')}\")\n\n if os.getenv(\"DOCKER_IMAGE_NAME\"):\n ddtags.append(f\"image_name:{os.getenv('DOCKER_IMAGE_NAME')}\")\n\n self._datadog_tags = ddtags\n\n return self._datadog_tags\n\n @datadog_tags.setter\n def datadog_tags(self, ddtags: list[str]):\n self._datadog_tags = ddtags\n\n def emit(self, record):\n toJson = json.dumps(\n {\n \"python-logging\": {\n \"py-env\": os.getenv(\"ENV\"),\n \"py-message\": record.getMessage(),\n \"py-status\": record.levelname.lower(),\n \"py-logger\": record.name,\n \"py-stacktrace\": str(record.exc_info),\n \"py-exception\": record.exc_text,\n \"py-line\": record.lineno,\n \"py-file\": record.filename,\n \"py-function\": record.funcName,\n \"py-level\": record.levelno,\n \"py-path\": record.pathname,\n \"py-thread\": record.thread,\n \"py-threadName\": record.threadName,\n \"py-process\": record.process,\n \"py-processName\": record.processName,\n \"py-args\": record.args,\n \"py-msecs\": record.msecs,\n \"py-relativeCreated\": record.relativeCreated,\n \"py-created\": record.created,\n \"py-module\": record.module,\n }\n }\n )\n\n # Send the log to Datadog using the Logs API\n try:\n body = HTTPLog(\n [\n HTTPLogItem(\n ddsource=\"Python\",\n ddtags=\",\".join(self.datadog_tags),\n hostname=os.getenv(\"HOST_NAME\", platform.uname()[1]),\n message=toJson,\n service=os.getenv(\"SERVICE_NAME\"),\n status=record.levelname.lower(),\n ),\n ]\n )\n\n self.logs.submit_log(content_encoding=ContentEncoding.DEFLATE, body=body)\n\n except Exception as exc:\n print(f\"Error sending log to Datadog: {exc}\")\n\n\nlogger.remove()\nlogger.add(sys.stdout)\n\nif os.getenv(\"DD_API_KEY\") and os.getenv(\"DD_APP_KEY\"):\n logger.add(\n DatadogHandler(\n api_key=os.getenv(\"DD_API_KEY\"),\n app_key=os.getenv(\"DD_APP_KEY\"),\n site=os.getenv(\"DD_SITE\", \"us5.datadoghq.com\"),\n )\n )\n","repo_name":"corp-momenti/rnd-utils","sub_path":"rnd_utils/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":5372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72118522184","text":"from petsc4py import PETSc\nfrom mpi4py import MPI\nimport time\n\nstart = time.time()\n\n# The size of the linear system\nn = 8000000\n\n# Initialize a PETSc matrix and vectors\nA = PETSc.Mat().createAIJ([n, n], comm=PETSc.COMM_WORLD)\nb = PETSc.Vec().createMPI(n, comm=PETSc.COMM_WORLD)\nx = PETSc.Vec().createMPI(n, comm=PETSc.COMM_WORLD)\n\n# Set values for the matrix and vectors\nA.setPreallocationNNZ(3)\nfor i in range(n):\n if i > 0:\n A[i, i-1] = -1.0\n if i < n - 1:\n A[i, i+1] = -1.0\n A[i, i] = 2.0\n b[i] = i + 1\n\n# Assemble the matrix and vectors\nA.assemblyBegin()\nA.assemblyEnd()\nb.assemblyBegin()\nb.assemblyEnd()\n\n# Create Krylov subspace method (KSP) solver and solve the system\nksp = PETSc.KSP().create()\nksp.setOperators(A)\nksp.setFromOptions()\nksp.solve(b, x)\n\n# Print the solution vector\nx.view()\n\n# Destroy objects to clean up PETSc\nA.destroy()\nb.destroy()\nx.destroy()\nksp.destroy()\n\nend = time.time()\nprint(end-start)","repo_name":"RexPlanalp/TDSE","sub_path":"testing/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41273255994","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import plot, show, xticks, xlabel, ylabel, legend, yscale, title, savefig, rcParams, figure, hist, text, bar, subplots\nimport Image\n\ndef variance_f(sigma):\n weights=[1.0/sigma.shape[0]]*sigma.shape[0]\n return 1.0/((np.matrix(weights)*sigma*np.matrix(weights).transpose())[0,0]**.5)\n\ndef make_corr(dimension, offdiag=0.0):\n\n corr=np.array([[offdiag]*dimension]*dimension)\n corr[np.diag_indices(dimension)]=1.0\n return corr\n\n\n#cfactors=[0.1, 0.5, 0.6, 0.75, 0.8]\n#ndimlist=[1, 2, 3,4,5, 11, 15, 20]\n#maxdim=[3, 4, 10, 15, 20 ]\ncfactors=[.75]\nndimlist=[1,11]\nmaxdim=[100]\n\n#costs=[0.0005, 0.0033]\ncosts=[0]*len(ndimlist)\n\napplyzero=[False, False, False, False, False, False, False, False, False]\n\nbasestd=0.27\nbasearithmean=0.05\nriskfree=0.000\n\nresults_sr=[]\nresults_gmm=[]\nresults_std=[]\n\nfor (cidx, cfactor) in enumerate(cfactors):\n smallres_sr=[]\n smallres_gmm=[]\n smallres_std=[]\n\n for nx, ndim in enumerate(ndimlist):\n if ndim<=maxdim[cidx]:\n if applyzero[cidx]:\n gsr=0.0\n gmm=0.0\n new_std=basestd\n else:\n div_factor=variance_f(make_corr(ndim, cfactor))\n new_std=basestd/div_factor\n variance=new_std**2\n gmm=basearithmean- costs[nx] - variance/2.0\n gsr=(gmm - riskfree) / new_std\n \n smallres_sr.append(gsr)\n smallres_gmm.append(gmm*100.0)\n smallres_std.append(new_std)\n \n print(\"Cfactor %f ndim %d GMM %f STD %f SR %f\" % (cfactor, ndim, gmm, new_std, gsr))\n \n results_sr.append(smallres_sr)\n results_gmm.append(smallres_gmm)\n results_std.append(smallres_std)\n\n\nfrom itertools import cycle\n\nlines = [\"-\", \"--\", \"-.\", \":\", \"-\"]\nlinecycler = cycle(lines) \ncolorcycler=cycle([\"red\", \"blue\", \"green\", \"red\", \"blue\"])\n\nfor r in range(len(results_sr)):\n plot(results_sr[r], color=next(colorcycler), linestyle=next(linecycler), linewidth=3)\n \n#xticks(range(len(ndimlist))[0::2], ndimlist[0::2])\nxticks(range(len(ndimlist)), ndimlist)\nlegend(cfactors, loc=\"top left\", title=\"Correlations\", prop={'size': 18})\n#title(\"Diversification for various average correlations\")\nylabel(\"Sharpe ratio\")\nxlabel(\"Number of assets\")\n\nframe=plt.gca()\nframe.set_ylim([0.05, 0.25])\nframe.set_yticks([ 0.05, 0.10, 0.15, 0.20, 0.25])\n\n\nrcParams.update({'font.size': 18})\nax=plt.gca()\nax.get_legend().get_title().set_fontsize('18')\n\ndef file_process(filename):\n fig = plt.gcf()\n fig.set_size_inches(18.5,10.5)\n fig.savefig(\"/home/rob/%s.png\" % filename,dpi=300)\n fig.savefig(\"/home/rob/%sLOWRES.png\" % filename,dpi=50)\n \n Image.open(\"/home/rob/%s.png\" % filename).convert('L').save(\"/home/rob/%s.jpg\" % filename)\n Image.open(\"/home/rob/%sLOWRES.png\" % filename).convert('L').save(\"/home/rob/%sLOWRES.jpg\" % filename)\n\nfile_process(\"divbenefit_sr_all_unstacked\")\n\nshow()\n\n\nfor r in range(len(results_gmm)):\n plot(results_gmm[r], color=next(colorcycler), linestyle=next(linecycler), linewidth=3)\n \n#xticks(range(len(ndimlist))[0::2], ndimlist[0::2])\nxticks(range(len(ndimlist)), ndimlist)\nlegend(cfactors, loc=\"top left\", title=\"Correlations\", prop={'size': 18})\n#title(\"Diversification for various average correlations\")\nylabel(\"Geometric mean %\")\nxlabel(\"Number of assets\")\n\nrcParams.update({'font.size': 18})\nax=plt.gca()\nax.get_legend().get_title().set_fontsize('18')\n\nfile_process(\"divbenefit_gmm_all_unstacked\")\n\nshow()\n\n\nfor r in range(len(results_std)):\n plot(results_std[r], color=next(colorcycler), linestyle=next(linecycler), linewidth=3)\n \n#xticks(range(len(ndimlist))[0::2], ndimlist[0::2])\nxticks(range(len(ndimlist)), ndimlist)\nlegend(cfactors, loc=\"top left\", title=\"Correlations\", prop={'size': 18})\n#title(\"Diversification for various average correlations\")\nylabel(\"Standard deviation\")\nxlabel(\"Number of assets\")\n\nrcParams.update({'font.size': 18})\nax=plt.gca()\nax.get_legend().get_title().set_fontsize('18')\n\nfile_process(\"divbenefit_std_all_unstacked\")\n\nshow()\n","repo_name":"robcarver17/systematictradingexamples","sub_path":"plots_for_perhaps/diversificationbenefits.py","file_name":"diversificationbenefits.py","file_ext":"py","file_size_in_byte":4150,"program_lang":"python","lang":"en","doc_type":"code","stars":238,"dataset":"github-code","pt":"81"} +{"seq_id":"15434092469","text":"import numpy as np\nimport sys, os\nimport matplotlib.pyplot as plt\nimport scipy.ndimage\n#import astropy.stats.sigma_clipping\nimport scipy.signal\nimport time\nimport os\n\n\n\ndef openStored(path):\n files = sorted(os.listdir(path))\n I_list = [os.path.join(path, filename) for filename in files if filename.startswith('I')]\n Q_list = [os.path.join(path, filename) for filename in files if filename.startswith('Q')]\n chan_I = np.array([np.load(filename) for filename in I_list])\n chan_Q = np.array([np.load(filename) for filename in Q_list])\n return chan_I, chan_Q\n\ndef normalize_and_stack(path, bb_freqs, lo_freqs):\n\n\n chan_I, chan_Q = openStored(path)\n channels = np.arange(np.shape(chan_I)[1])\n print(\"VNA with \", len(channels), \" channels\")\n mag = np.zeros((len(channels),len(lo_freqs)))\n chan_freq = np.zeros((len(channels),len(lo_freqs)))\n\n for chan in channels: \n mag[chan] = (np.sqrt(chan_I[:,chan]**2 + chan_Q[:,chan]**2)) \n chan_freq[chan] = (lo_freqs + bb_freqs[chan])/1.0e6 #era lo_freqs/2\n \n '''\n Normalization and conversion in dB\n '''\n \n for chan in channels:\n mag[chan] /= (2**31-1)\n mag[chan] /= ((2**21-1)/512)\n mag[chan] = 20*np.log10(mag[chan])\n \n for chan in channels:\n delta = mag[chan-1][-1]-mag[chan][0]\n mag[chan] += delta\n \n mags = np.hstack(mag) \n chan_freqs = np.hstack(chan_freq)\n\n return chan_freqs, mags\n\ndef lowpass_cosine( y, tau, f_3db, width, padd_data=True):\n \n import numpy as nm\n # padd_data = True means we are going to symmetric copies of the data to the start and stop\n # to reduce/eliminate the discontinuities at the start and stop of a dataset due to filtering\n #\n # False means we're going to have transients at the start and stop of the data\n\n # kill the last data point if y has an odd length\n if nm.mod(len(y),2):\n y = y[0:-1]\n\n # add the weird padd\n # so, make a backwards copy of the data, then the data, then another backwards copy of the data\n if padd_data:\n y = nm.append( nm.append(nm.flipud(y),y) , nm.flipud(y) )\n\n # take the FFT\n import scipy\n import scipy.fftpack\n ffty=scipy.fftpack.fft(y)\n ffty=scipy.fftpack.fftshift(ffty)\n\n # make the companion frequency array\n delta = 1.0/(len(y)*tau)\n nyquist = 1.0/(2.0*tau)\n freq = nm.arange(-nyquist,nyquist,delta)\n # turn this into a positive frequency array\n pos_freq = freq[(len(ffty)//2):]\n\n # make the transfer function for the first half of the data\n i_f_3db = min( nm.where(pos_freq >= f_3db)[0] )\n f_min = f_3db - (width/2.0)\n i_f_min = min( nm.where(pos_freq >= f_min)[0] )\n f_max = f_3db + (width/2);\n i_f_max = min( nm.where(pos_freq >= f_max)[0] )\n\n transfer_function = nm.zeros(int(len(y)//2))\n transfer_function[0:i_f_min] = 1\n transfer_function[i_f_min:i_f_max] = (1 + nm.sin(-nm.pi * ((freq[i_f_min:i_f_max] - freq[i_f_3db])/width)))/2.0\n transfer_function[i_f_max:(len(freq)//2)] = 0\n\n # symmetrize this to be [0 0 0 ... .8 .9 1 1 1 1 1 1 1 1 .9 .8 ... 0 0 0] to match the FFT\n transfer_function = nm.append(nm.flipud(transfer_function),transfer_function)\n\n # apply the filter, undo the fft shift, and invert the fft\n filtered=nm.real(scipy.fftpack.ifft(scipy.fftpack.ifftshift(ffty*transfer_function)))\n\n # remove the padd, if we applied it\n if padd_data:\n filtered = filtered[(len(y)//3):(2*(len(y)//3))]\n\n # return the filtered data\n return filtered\n\n'''\n adaptive_iteratively_reweighted_penalized_least_squares_smoothing function\n'''\ndef adaptive_iteratively_reweighted_penalized_least_squares_smoothing(data, lam=1.0e6, N_iter=5):\n '''\n lam: adjusting parameter\n N_iter: number of iteration\n '''\n from scipy.sparse import spdiags, linalg, diags\n from scipy.linalg import norm\n L = len(data)\n D = diags([1,-2,1],[0,-1,-2], shape=(L,L-2))\n w = np.ones(L)\n for i in range(N_iter):\n W = spdiags(w, 0, L, L)\n Z = W + lam * D.dot(D.transpose())\n z = linalg.spsolve(Z, w*data)\n d_mod = norm((z-data)[z>data])\n if d_mod < 0.001 * norm(data):\n return z\n p = np.exp(i*(data-z)/d_mod)\n w = 0.0*(data < z) + p*(data >= z)\n return z\n\n\ndef main(path, savefile=False):\n print(\"Searching for KIDs\")\n \n bb_freqs = np.load(path + \"/bb_freqs.npy\")\n lo_freqs = np.load(path + \"/sweep_freqs.npy\")\n \n chan_freqs, mags = normalize_and_stack(path, bb_freqs, lo_freqs)\n \n sweep_step = 1.25 # kHz\n smoothing_scale = 2500.0 # kHz\n \n #filtered = lowpass_cosine( mags, sweep_step, 1./smoothing_scale, 0.1 * (1.0/smoothing_scale))\n filtered = adaptive_iteratively_reweighted_penalized_least_squares_smoothing(mags)\n from scipy.signal import find_peaks\n\n # parametri buoni per MISTRAL 415 v4\n peak_width=(1.0, 150.0)\n peak_height=0.3\n peak_prominence=(0.2, 30.0)\n\n peaks, roba = find_peaks(-(mags-filtered),\n width=peak_width,\n prominence=peak_prominence,\n height=peak_height)\n \n \n \n\n target_freqs = chan_freqs[peaks]\n print(target_freqs)\n np.save(path + '/target_freqs.npy', target_freqs)\n np.savetxt(path + '/target_freqs.dat', target_freqs)\n \n\n plt.plot(chan_freqs, mags-filtered, label=\"VNA sweep, low-passed\")\n\n print(\"Found \", len(peaks), \"KIDs\")\n \n plt.title(\"VNA sweep and automatic KIDs search\")\n plt.xlabel(\"Frequency [MHz]\")\n plt.ylabel(\"Magnitude [dB]\")\n plt.plot(chan_freqs[peaks], mags[peaks]-filtered[peaks],\"x\", label=\"KIDs\")\n plt.show()\n \n '''\n kids_distance = []\n \n for i in range(0, len(peaks)):\n kids_distance.append(chan_freqs[peaks[i]]-chan_freqs[peaks[i-1]])\n \n print(kids_distance)\n min_distance = np.argmin(kids_distance[1:])\n print(\"Min sep between kids=\", kids_distance[min_distance],\"MHz at\",chan_freqs[peaks[min_distance]])\n \n plt.plot( (chan_freqs[peaks[min_distance]], chan_freqs[peaks[min_distance+1]]), \n (mags[peaks[min_distance]]-filtered[peaks[min_distance]],mags[peaks[min_distance+1]]-filtered[peaks[min_distance]+1]), \"o\", color=\"red\", label=\"Closest KIDs: \"+str(round(kids_distance[min_distance]*100, 2))+\"kHz\")\n '''\n \n plt.legend()\n\n return target_freqs\n","repo_name":"eesopee/mistral_readout","sub_path":"find_kids_mistral_new.py","file_name":"find_kids_mistral_new.py","file_ext":"py","file_size_in_byte":6512,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"33530144625","text":"#!/usr/bin/env python\n\n\"\"\"\n\nIn this problem, a tree is an undirected graph that is connected and has no cycles.\n\nYou are given a graph that started as a tree with n nodes labeled from 1 to n, with one additional edge added. The added edge has two different vertices chosen from 1 to n, and was not an edge that already existed. The graph is represented as an array edges of length n where edges[i] = [ai, bi] indicates that there is an edge between nodes ai and bi in the graph.\n\nReturn an edge that can be removed so that the resulting graph is a tree of n nodes. If there are multiple answers, return the answer that occurs last in the input.\n\nExample 1:\nhttps://assets.leetcode.com/uploads/2021/05/02/reduntant1-1-graph.jpg\n\nInput: edges = [[1,2],[1,3],[2,3]]\nOutput: [2,3]\n\nExample 2:\nhttps://assets.leetcode.com/uploads/2021/05/02/reduntant1-2-graph.jpg\n\nInput: edges = [[1,2],[2,3],[3,4],[1,4],[1,5]]\nOutput: [1,4]\n\nConstraints:\n\nn == edges.length\n3 <= n <= 1000\nedges[i].length == 2\n1 <= ai < bi <= edges.length\nai != bi\nThere are no repeated edges.\nThe given graph is connected.\n\n\"\"\"\n\nclass Set:\n def __init__(self, n):\n self.id = list(range(n))\n self.rank = [0] * n\n\n def union(self, u, v):\n i = self.find(u)\n j = self.find(v)\n if i == j:\n return False\n if self.rank[i] < self.rank[j]:\n self.id[i] = self.id[j]\n elif self.rank[i] > self.rank[j]:\n self.id[j] = self.id[i]\n else:\n self.id[i] = self.id[j]\n self.rank[j] += 1\n return True\n\n def find(self, u):\n if self.id[u] != u:\n self.id[u] = self.find(self.id[u])\n return self.id[u]\n\ndef redundant_connection(edges):\n uf = Set(len(edges) + 1)\n for edge in edges:\n if not uf.union(edge[0], edge[1]):\n return edge\n return None\n\ndef main():\n assert(redundant_connection([[1, 2], [1, 3], [2, 3]]) == [2, 3])\n assert(redundant_connection([[1, 2], [2, 3], [3, 4], [1, 4], [1, 5]]) == [1, 4])\n\nmain()\n","repo_name":"qeedquan/challenges","sub_path":"leetcode/redundant-connection.py","file_name":"redundant-connection.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35530213594","text":"import base64\n\n\ndef base64_encode(data: str):\n # Convert the string to bytes\n bytes = data.encode(\"utf-8\")\n\n # Encode the bytes using base64\n encoded_bytes = base64.b64encode(bytes)\n\n # Convert the encoded bytes back to a string\n encoded_string = encoded_bytes.decode(\"utf-8\")\n\n return encoded_string\n","repo_name":"tomiambro/fastapi-template","sub_path":"fastapi/src/utilities/base64.py","file_name":"base64.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"4828226603","text":"import matplotlib.pyplot as plt\nimport decimal\nfrom datetime import datetime\n\nctx = decimal.Context()\nctx.prec = 20\ntoken_store = './output/tokens.json'\n\nplt.style.use('./assets/presentation.mplstyle')\n\n\nclass Chart:\n @staticmethod\n def generate_line_chart(coin_id, y):\n x = [x for x in range(len(y))]\n\n fig, ax = plt.subplots()\n ax.plot(x, y, color='black')\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.margins(x=0)\n\n fig.set_size_inches(mm_to_inch(50.55), mm_to_inch(25.71))\n frame1 = plt.gca()\n frame1.axes.get_xaxis().set_ticks([])\n\n chart_path = './output/{coin_id}.png'.format(coin_id=coin_id)\n plt.savefig(chart_path, transparent=True, bbox_inches='tight', dpi=130)\n\n price = y[-1]\n percentage = (price - y[0]) / y[0] * 100\n\n formatted_price = (float_to_str(price) if price < 0.01 else \"{0:,.2f}\".format(price))\n formatted_percentage = \"{:.1f}\".format(percentage) + '%'\n\n last_updated = datetime.now().strftime(\"%d %b %Y, %H:%M\")\n\n return chart_path, formatted_price, formatted_percentage, last_updated\n\n\ndef mm_to_inch(mm):\n return mm * 0.0393701\n\n\ndef float_to_str(f):\n \"\"\"\n Convert the given float to a string,\n without resorting to scientific notation\n \"\"\"\n d1 = ctx.create_decimal(repr(f))\n return format(d1, 'f')[:14]\n","repo_name":"lumtwj/crypto-pi","sub_path":"app/chart.py","file_name":"chart.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"14142658350","text":"import os\nimport re\nimport json\nimport pandas as pd\nfrom pathlib import Path\nfrom typing import List, Dict, Union, Tuple, Any\nfrom glob import glob\nfrom tqdm import tqdm\nfrom sklearn.model_selection import StratifiedKFold\nimport cv2\nimport matplotlib.pyplot as plt\n\nimport numpy as np\nfrom scipy.interpolate import interp1d\nfrom sklearn.linear_model import LinearRegression\n# -\n\n# ### read_annotations\n\npd.set_option('display.max_colwidth', 1000)\n\npercent_image_ids = []\n\n\n# +\ndef series2cat(series):\n return [str(x) for x in series]\n\ndef series2num(series):\n return [float(x) for x in series]\n\n\n# Must be True for every value in the series\ndef is_numerical(x):\n \"\"\"Test whether input can be parsed as a Python float.\"\"\"\n try:\n float(x)\n return True\n except ValueError:\n return False\n\n\ndef is_numerical_all(series):\n return all([is_numerical(x) for x in series])\n\n\n# -\n\ndef get_gt_icadr_line(data,chart_type,margin = 5):\n df_ds = pd.DataFrame(data[\"task6\"][\"output\"]['data series'][0][\"data\"])\n df_ve = pd.DataFrame(data[\"task6\"][\"output\"][\"visual elements\"][\"lines\"][0])\n \n # interplは指定ピクセル分マージンをとっておく\n ve_x = df_ve[\"x\"].tolist()\n ve_y = df_ve[\"y\"].tolist()\n ve_x = [ve_x[0]-margin] + ve_x + [ve_x[-1]+margin]\n ve_y = [ve_y[0]] + ve_y + [ve_y[-1]]\n f_interp = interp1d(ve_x, ve_y)\n\n df_xaxis = pd.DataFrame(data[\"task4\"][\"output\"][\"axes\"][\"x-axis\"])\n if len(df_xaxis)==0:\n return None\n df_text = pd.DataFrame(data[\"task2\"][\"output\"]['text_blocks'])\n\n df_text = df_text.rename(columns={\"text\":\"x_plot\"})\n df_tgt = df_xaxis.merge(df_text[[\"id\",\"x_plot\"]],on=[\"id\"],how=\"left\")\n df_tgt[\"x_img\"] = df_tgt[\"tick_pt\"].apply(lambda x:x[\"x\"])\n\n df_tgt = df_tgt[df_tgt[\"x_img\"].between(min(ve_x),max(ve_x))].reset_index(drop=True)\n if len(df_tgt)==0:\n return None\n df_tgt[\"y_img\"] = df_tgt[\"x_img\"].apply(lambda x:f_interp(x))\n y_img2plot = LinearRegression().fit(df_ve[\"y\"].values.reshape(-1,1), df_ds[\"y\"])\n df_tgt[\"y_plot\"] = y_img2plot.predict(df_tgt[\"y_img\"].values.reshape(-1,1))\n all_x = df_tgt[\"x_plot\"].astype(str).tolist()\n all_y = df_tgt[\"y_plot\"].tolist()\n return {\n \"x\" : all_x,\n \"y\" : all_y,\n }\n\n\ndef get_gt_icadr_bar(data,chart_type):\n data_series = data[\"task6\"][\"output\"]['data series'][0][\"data\"]\n all_x, all_y = [], []\n for d in data_series:\n x = d[\"x\"]\n y = d[\"y\"]\n # Ignore nan values\n if (str(x) == \"nan\" and isinstance(x, float)) or (\n str(y) == \"nan\" and isinstance(y, float)\n ):\n continue\n all_x.append(x)\n all_y.append(y)\n if chart_type in [\"vertical_bar\"]:\n all_x = series2cat(all_x)\n all_y = series2num(all_y)\n elif chart_type == \"horizontal_bar\":\n all_x,all_y = all_y,all_x # icdarはxとyが逆\n all_x = series2num(all_x)\n all_y = series2cat(all_y)\n else:\n ValueError\n return {\n \"x\" : all_x,\n \"y\" : all_y,\n }\n\n\ndef care_percent(data,gt,chart_type):\n df_axis = pd.DataFrame(data[\"task4\"][\"output\"][\"axes\"][\"y-axis\"])\n if len(df_axis)==0:\n return gt\n df_text = pd.DataFrame(data[\"task2\"][\"output\"]['text_blocks'])\n df_axis = df_axis.merge(df_text[[\"id\",\"text\"]],on=[\"id\"],how=\"left\")\n # y_ticskに%含んでいれば100倍する\n if df_axis[\"text\"].apply(lambda x:\"%\" in x).sum()> 0:\n percent_image_ids.append(data[\"image_id\"])\n if chart_type in [\"line\",\"vertical_bar\"]:\n gt[\"y\"] = (np.array(gt[\"y\"])*100).tolist()\n elif chart_type == \"horizontal_bar\":\n gt[\"x\"] = (np.array(gt[\"x\"])*100).tolist()\n return gt\n\n\ntarget_types = [\"horizontal_bar\",\"vertical_bar\",\"line\"]\nfilepaths = glob(\"./../input/ICPR2022*/**/*.json\", recursive=True)\n\ngt_all = []\nfor path in tqdm(filepaths):\n image_id = path.split(\"/\")[-1].split(\".json\")[0]\n with open(path) as fp:\n data = json.load(fp)\n data[\"image_id\"] = image_id\n task6 = data.get(\"task6\")\n if task6 is None:\n continue\n if task6[\"output\"] is None:\n continue\n data_series = task6[\"output\"][\"data series\"]\n if len(data_series)==0:# 系列が1つ以外のやつも使う\n continue\n series_len = len(data_series)\n data_series = data_series[0][\"data\"]\n chart_type = data[\"task1\"][\"output\"][\"chart_type\"].replace(\" \",\"_\")\n if chart_type not in target_types:\n continue\n if \"y\" not in data_series[0].keys():\n # print(image_id,chart_type)\n continue\n if chart_type in [\"line\"]:\n gt = get_gt_icadr_line(data,chart_type)\n elif chart_type in [\"horizontal_bar\",\"vertical_bar\"]:\n gt = get_gt_icadr_bar(data,chart_type)\n else:\n ValueError\n if gt is None:\n continue\n gt = care_percent(data,gt,chart_type)\n gt_all.append((image_id,gt[\"x\"],gt[\"y\"],chart_type,path,series_len))\n\ndf = pd.DataFrame(gt_all,columns = [\"image_id\",\"xs\",\"ys\",\"chart_type\",\"json_path\",\"series_len\"])\n\nimage_paths = glob(\"./../input/ICPR2022*/**/*.jpg\", recursive=True)\nid2imgpath = {p.split(\"/\")[-1].split(\".jpg\")[0]: p for p in image_paths}\ndf[\"image_path\"] = df[\"image_id\"].map(id2imgpath)\n\nimage_ids_to_rm = ['PMC5660682___fgene-08-00145-g0003']\ndf = df[~df[\"image_id\"].isin(image_ids_to_rm)].reset_index(drop=True)\ndf[\"source\"] = df[\"image_path\"].apply(lambda x:\"icdar2022_test\" if \"TEST\" in x else \"icdar2022_train\")\n\n\n# ### GT for DEPLOT\n\ndef round_float(x):\n if isinstance(x, float):\n x = str(x)\n if \".\" in x:\n integer, decimal = x.split(\".\")\n if abs(float(integer)) > 1:\n decimal = decimal[:1]\n else:\n decimal = decimal[:4]\n\n x = float(integer + \".\" + decimal)\n return x\n\n\ntexts = []\nfor i in tqdm(range(len(df))):\n xs = df[\"xs\"].values[i]\n ys = df[\"ys\"].values[i]\n \n if len(xs) < len(ys):\n xs.extend([\"nan\"] * (len(ys) - len(xs)))\n elif len(xs) > len(ys):\n ys.extend([\"nan\"] * (len(xs) - len(ys)))\n\n xys = []\n for x,y in zip(xs,ys):\n x = round_float(x)\n y = round_float(y)\n xys.append(str(x) + \"|\" + str(y))\n text = \"<0x0A>\".join(xys) + \"\" \n texts.append(text)\n\ndf[\"text\"] = texts\ndf[\"is_percentile\"] = df[\"image_id\"].isin(percent_image_ids)\n\ndf.to_csv(\"./../input/processed_df_icdar_v5.csv\",index=False)\n\n\n\n\n\n\n","repo_name":"fuumin621/Kaggle-Benetech-4th","sub_path":"preprocess/preprocess_icdar.py","file_name":"preprocess_icdar.py","file_ext":"py","file_size_in_byte":6431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18845238429","text":"import numpy as np\r\nfrom astropy.io import fits\r\nimport matplotlib.pyplot as plt\r\n\r\n# Load the FITS file\r\nfile_path = 'lum-bin-2-min-15-0049lum-2-60.fit'\r\nhdul = fits.open(file_path)\r\n\r\n# Extract the data from the primary HDU (index 0)\r\ndata = hdul[0].data\r\n\r\n# Print some basic statistics\r\nprint(f\"Mean ADU: {np.mean(data):.2f}\")\r\nprint(f\"Median ADU: {np.median(data):.2f}\")\r\nprint(f\"Standard Deviation: {np.std(data):.2f}\")\r\nprint(f\"Max ADU: {np.max(data)}\")\r\nprint(f\"Min ADU: {np.min(data)}\")\r\n\r\n# Optionally, visualize the data with a histogram\r\nplt.hist(data.flatten(), bins=100, log=True)\r\nplt.xlabel('ADU')\r\nplt.ylabel('Number of Pixels')\r\nplt.title('Histogram of ADU values')\r\nplt.show()\r\n\r\n# Close the FITS file\r\nhdul.close()\r\n","repo_name":"IraLeeBell/Exoplanet-Research-ASU-SESE","sub_path":"Exoplanet-Research/get-adu-from-single-fit-file.py","file_name":"get-adu-from-single-fit-file.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11817731699","text":"import time\nfrom typing import NoReturn\nstart_time = time.time()\n\n# f = open(\"packets_p2_e.txt\", \"r\")\n# f = open(\"packets_tiny.txt\", \"r\")\n# f = open(\"packets_small_c.txt\", \"r\")\nf = open(\"packets.txt\", \"r\")\nif f:\n print(\"Successfully opened data...\")\n\n# binary = 0b010\n# print(\"{0:b}\".format(binary))\n# print(binary)\n\n# hexo = '3b'\n# print(int(hexo, 16))\n# print(bin(int(hexo, 16)))\n\ninput = f.readline()\ninput = input.strip()\n\nglobal TOTAL_VERSION\nTOTAL_VERSION = 0\n\ndef trimQueue(queue, numBits):\n # Create a mask by shifting the 1 left up until the original number of bits, then flipping\n mask = (1 << numBits) - 1\n queue = queue & mask\n return queue\n\ndef parsePackets(queue, numBits):\n (version, queue, numBits) = getVersionOrType(queue, numBits)\n (typeID, queue, numBits) = getVersionOrType(queue, numBits)\n global TOTAL_VERSION\n TOTAL_VERSION += version\n if typeID == 4:\n (value, queue, numBits) = getLiteralValue(queue, numBits)\n print(\"LITERAL: \" + str(value))\n else:\n (value, queue, numBits) = procOperator(queue, numBits, typeID)\n\n return (value, queue, numBits)\n\ndef getVersionOrType(queue, numBits):\n # Change numbits to the number of bits after the version/type has been stripped out\n numBits = numBits - 3\n\n # Get version/type by shifting the queue to the right until only relevant bits remain\n version = queue >> numBits\n\n queue = trimQueue(queue, numBits)\n return version, queue, numBits\n\ndef getLiteralValue(queue, numBits):\n keepCounting = 1\n value = 0\n while keepCounting:\n pQueue = bin(queue)\n numBits -= 1\n keepCounting = queue >> numBits\n queue = trimQueue(queue, numBits)\n numBits -= 4\n value = value << 4\n value = value | queue >> numBits\n queue = trimQueue(queue, numBits)\n \n return (value, queue, numBits)\n\ndef calcValue (subValue, value, typeID):\n if typeID == 0:\n value = value + subValue\n elif typeID == 1:\n value = value * subValue\n elif typeID == 2:\n value = min(value, subValue)\n elif typeID == 3:\n value = max(value, subValue)\n elif typeID == 5:\n if value > subValue: \n value = 1\n else: \n value = 0\n elif typeID == 6:\n if value < subValue: \n value = 1\n else: \n value = 0\n elif typeID == 7:\n if value == 0:\n value = subValue\n else: \n if value == subValue: \n value = 1\n else: \n value = 0\n else: \n print(\"Fucked up the typeID :(\")\n return value\n\ndef procOperator(queue, numBits, typeID):\n numBits -= 1\n lengthID = queue >> numBits\n queue = trimQueue(queue, numBits)\n print(\"Operator: type \" + str(lengthID))\n if lengthID:\n numBits -= 11\n numSubPackets = queue >> numBits\n queue = trimQueue(queue, numBits)\n while numSubPackets:\n (subValue, queue, numBits) = parsePackets(queue, numBits)\n if not 'value' in locals():\n value = subValue\n else:\n value = calcValue(subValue, value, typeID)\n numSubPackets -= 1\n elif not lengthID:\n numBits -= 15\n numSubBits = queue >> numBits\n queue = trimQueue(queue, numBits)\n while numSubBits > 1:\n tempBits = numBits\n (subValue, queue, numBits) = parsePackets(queue, numBits)\n if not 'value' in locals():\n value = subValue\n else:\n value = calcValue(subValue, value, typeID)\n numSubBits = numSubBits - (tempBits - numBits)\n \n return (value, queue, numBits)\n\nnumBits = 0\nqueue = 0\nfor ii in range(len(input)):\n queue = queue << 4\n queue = queue | int(input[ii], 16)\n numBits += 4\n\nprint(\"Original queue: \")\nprint(bin(queue))\n\n(value, queue, numBits) = parsePackets(queue, numBits)\n\nprint(\"TOTAL_VERSION: \" + str(TOTAL_VERSION))\nprint(\"TOTAL VALUE: \" + str(value))\n","repo_name":"YogurtBoy/AdventOfCode","sub_path":"2021/day16/day16.py","file_name":"day16.py","file_ext":"py","file_size_in_byte":4032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32514693832","text":"import taichi as ti\n\nti.init(arch=ti.gpu)\n\nreal = ti.f32\ndim = 2\nn_particle_x = 100\nn_particle_y = 8\nn_particles = n_particle_x * n_particle_y\n# triangle element thus *2\nn_elements = (n_particle_x - 1) * (n_particle_y - 1) * 2\nn_grid = 64\ndx = 1 / n_grid\ninv_dx = 1 / dx\ndt = 1e-4\np_mass = 1\np_vol = 1\nmu = 1\nla = 1\n#particle position\nx = ti.Vector.field(dim, dtype=ti.f32, shape=n_particles, needs_grad=True)\n#particle velocity\nv = ti.Vector.field(dim, dtype=ti.f32, shape=n_particles)\n#particle affine matrix\nC = ti.Matrix.field(dim, dim, dtype=ti.f32, shape=n_particles)\ngrid_v = ti.Vector.field(dim, dtype=ti.f32, shape=(n_grid, n_grid))\ngrid_m = ti.field(dtype=ti.f32, shape=(n_grid, n_grid))\n# restT remains unchanged.\n# for the computation of deformation gradient F based on element??\nrestT = ti.Matrix.field(dim,\n dim,\n dtype=ti.f32,\n shape=n_particles,\n needs_grad=True)\ntotal_energy = ti.field(dtype=ti.f32, shape=(), needs_grad=True)\nvertices = ti.field(dtype=ti.i32, shape=(n_elements, 3))\n\n\n@ti.func\ndef compute_T(i):\n a = vertices[i, 0]\n b = vertices[i, 1]\n c = vertices[i, 2]\n ab = x[b] - x[a]\n ac = x[c] - x[a]\n return ti.Matrix([[ab[0], ac[0]], [ab[1], ac[1]]])\n\n\n@ti.kernel\ndef compute_rest_T():\n for i in range(n_elements):\n restT[i] = compute_T(i)\n\n\n@ti.kernel\ndef compute_total_energy():\n for i in range(n_elements):\n currentT = compute_T(i)\n F = currentT @ restT[i].inverse()\n # NeoHookean\n I1 = (F @ F.transpose()).trace()\n J = F.determinant()\n element_energy = 0.5 * mu * (\n I1 - 2) - mu * ti.log(J) + 0.5 * la * ti.log(J)**2\n ti.atomic_add(total_energy[None], element_energy * 1e-3)\n\n\n@ti.kernel\ndef p2g():\n for p in x:\n base = ti.cast(x[p] * inv_dx - 0.5, ti.i32)\n fx = x[p] * inv_dx - ti.cast(base, ti.f32)\n w = [0.5 * (1.5 - fx)**2, 0.75 - (fx - 1)**2, 0.5 * (fx - 0.5)**2]\n affine = p_mass * C[p]\n for i in ti.static(range(3)):\n for j in ti.static(range(3)):\n I = ti.Vector([i, j])\n dpos = (float(I) - fx) * dx\n weight = w[i].x * w[j].y\n # force = -x.grad[p] (=-w/x)\n grid_v[base + I] += weight * (p_mass * v[p] - x.grad[p] +\n affine @ dpos)\n grid_m[base + I] += weight * p_mass\n\n\nbound = 3\n\n@ti.kernel\ndef grid_op():\n # grid operations\n for i, j in grid_m:\n if grid_m[i, j] > 0:\n inv_m = 1 / grid_m[i, j]\n grid_v[i, j] = inv_m * grid_v[i, j]\n grid_v[i, j].y -= dt * 9.8\n\n # center collision circle\n dist = ti.Vector([i * dx - 0.5, j * dx - 0.5])\n if dist.norm_sqr() < 0.005:\n dist = dist.normalized()\n grid_v[i, j] -= dist * min(0, grid_v[i, j].dot(dist))\n\n # box\n if i < bound and grid_v[i, j].x < 0:\n grid_v[i, j].x = 0\n if i > n_grid - bound and grid_v[i, j].x > 0:\n grid_v[i, j].x = 0\n if j < bound and grid_v[i, j].y < 0:\n grid_v[i, j].y = 0\n if j > n_grid - bound and grid_v[i, j].y > 0:\n grid_v[i, j].y = 0\n\n\n@ti.kernel\ndef g2p():\n for p in x:\n base = ti.cast(x[p] * inv_dx - 0.5, ti.i32)\n fx = x[p] * inv_dx - ti.cast(base, ti.f32)\n w = [0.5 * (1.5 - fx)**2, 0.75 - (fx - 1.0)**2, 0.5 * (fx - 0.5)**2]\n new_v = ti.Vector([0.0, 0.0])\n new_C = ti.Matrix([[0.0, 0.0], [0.0, 0.0]])\n\n for i in ti.static(range(3)):\n for j in ti.static(range(3)):\n I = ti.Vector([i, j])\n dpos = float(I) - fx\n g_v = grid_v[base + I]\n weight = w[i].x * w[j].y\n new_v += weight * g_v\n new_C += 4 * weight * g_v.outer_product(dpos) * inv_dx\n\n v[p] = new_v\n x[p] += dt * v[p]\n C[p] = new_C\n\n\ngui = ti.GUI(\"MPM\", (640, 640), background_color=0x112F41)\n\nmesh = lambda i, j: i * n_particle_y + j\n\n\ndef main():\n # position+velocity initialization\n for i in range(n_particle_x):\n for j in range(n_particle_y):\n # t means particle id\n t = mesh(i, j)\n x[t] = [0.1 + i * dx * 0.5, 0.7 + j * dx * 0.5]\n v[t] = [0, -1]\n\n # build mesh (element id + element vertice id)\n for i in range(n_particle_x - 1):\n for j in range(n_particle_y - 1):\n # element id\n eid = (i * (n_particle_y - 1) + j) * 2\n vertices[eid, 0] = mesh(i, j)\n vertices[eid, 1] = mesh(i + 1, j)\n vertices[eid, 2] = mesh(i, j + 1)\n\n eid = (i * (n_particle_y - 1) + j) * 2 + 1\n vertices[eid, 0] = mesh(i, j + 1)\n vertices[eid, 1] = mesh(i + 1, j + 1)\n vertices[eid, 2] = mesh(i + 1, j)\n\n compute_rest_T()\n\n vertices_ = vertices.to_numpy()\n\n for f in range(600):\n for s in range(50):\n grid_m.fill(0)\n grid_v.fill(0)\n # Note that we are now differentiating the total energy w.r.t. the particle position.\n # Recall that F = - \\partial (total_energy) / \\partial x\n with ti.Tape(total_energy):\n # Do the forward computation of total energy and backward propagation for x.grad, which is later used in p2g\n compute_total_energy()\n # It's OK not to use the computed total_energy at all, since we only need x.grad\n p2g()\n grid_op()\n g2p()\n\n gui.circle((0.5, 0.5), radius=45, color=0x068587)\n # TODO: why is visualization so slow?\n particle_pos = x.to_numpy()\n for i in range(n_elements):\n for j in range(3):\n a, b = vertices_[i, j], vertices_[i, (j + 1) % 3]\n gui.line((particle_pos[a][0], particle_pos[a][1]),\n (particle_pos[b][0], particle_pos[b][1]),\n radius=1,\n color=0x4FB99F)\n gui.circles(particle_pos, radius=1.5, color=0xF2B134)\n gui.line((0.00, 0.03), (1.0, 0.03), color=0xFFFFFF, radius=3)\n gui.show()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Zhijie-YU/Taichi_Related","sub_path":"examples/mpm_lagrangian_forces.py","file_name":"mpm_lagrangian_forces.py","file_ext":"py","file_size_in_byte":6343,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"32660857761","text":"#!/usr/bin/python3\n\nfrom bs4 import BeautifulSoup\nimport urllib.request\n\nwebsite = \"http://php.net\"\n\n# data downloading\nweb_data=urllib.request.urlopen(website)\n\n#printing data\n#print(web_data.read())\n\n\n#reading web data with html tag\nclean_data = web_data.read()\n\n\n#applying library of HTML5 to scrape\nget_clean = BeautifulSoup(clean_data,'html5lib')\n\n# getting only text fromat data\nfinal_data = get_clean.get_text()\n\n#removing unnecessary spaces\ngood_data=final_data.strip().split()\n\nprint(good_data)\n\n","repo_name":"divyangB/nlp","sub_path":"webdata_nlp.py","file_name":"webdata_nlp.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9709505986","text":"from .settings import mysettings\n\n\n__all__ = ['get_context']\n\n\ndef get_context(request=None, site_name=None, title=None, hide_container=None, show_sidenav=None, fixed_sidenav=None,\n primary_color=None, secondary_color=None, primary_color_light=None, primary_color_dark=None,\n success_color=None, error_color=None, link_color=None):\n \"\"\"Context processor to add nav context to every view.\n\n Args:\n request (HttpRequest): http request.\n\n site_name (str)[None]: Name of the site to display in the navbar.\n title (str)[None]: Title to display in the browser tab.\n\n hide_container (bool)[None]: If True do not use the container and make the contents the full width.\n show_sidenav (bool)[None]: Show the side navigation panel. For larger screens show a menu button to access it.\n fixed_sidenav (bool)[None]: Make the side navigation panel fixed. Setting show_sidenav must be True for this.\n\n primary_color (str)[None]: String name or hex color to change the primary color.\n secondary_color (str)[None]: String name or hex color to change the secondary color.\n primary_color_light (str)[None]: String name or hex color to change the primary light color.\n This is calculated by default from the primary color.\n primary_color_dark (str)[None]: String name or hex color to change the primary dark color.\n This is calculated by default from the primary color.\n success_color (str)[None]: String name or hex color to change the success color.\n error_color (str)[None]: String name or hex color to change the error color.\n link_color (str)[None]: String name or hex color to change the link color.\n \"\"\"\n if site_name is None:\n site_name = mysettings.MATERIALIZE_SITE_NAME\n if title is None:\n title = mysettings.MATERIALIZE_TITLE\n\n if hide_container is None:\n hide_container = mysettings.MATERIALIZE_HIDE_CONTAINER\n if show_sidenav is None:\n show_sidenav = mysettings.MATERIALIZE_SHOW_SIDENAV\n if fixed_sidenav is None:\n fixed_sidenav = mysettings.MATERIALIZE_FIXED_SIDENAV\n\n if primary_color is None:\n primary_color = mysettings.MATERIALIZE_PRIMARY_COLOR\n if secondary_color is None:\n secondary_color = mysettings.MATERIALIZE_SECONDARY_COLOR\n if primary_color_light is None:\n primary_color_light = mysettings.MATERIALIZE_PRIMARY_COLOR_LIGHT\n if primary_color_dark is None:\n primary_color_dark = mysettings.MATERIALIZE_PRIMARY_COLOR_DARK\n if success_color is None:\n success_color = mysettings.MATERIALIZE_SUCCESS_COLOR\n if error_color is None:\n error_color = mysettings.MATERIALIZE_ERROR_COLOR\n if link_color is None:\n link_color = mysettings.MATERIALIZE_LINK_COLOR\n\n return {\n 'DJANGO_BASE_TEMPLATE': 'materialize_nav/base.html',\n\n 'MATERIALIZE_SITE_NAME': site_name,\n 'MATERIALIZE_TITLE': title,\n\n 'HIDE_CONTAINER': hide_container,\n 'SHOW_SIDENAV': show_sidenav,\n 'FIXED_SIDENAV': fixed_sidenav,\n 'PRIMARY_COLOR': primary_color,\n 'SECONDARY_COLOR': secondary_color,\n 'PRIMARY_COLOR_LIGHT': primary_color_light,\n 'PRIMARY_COLOR_DARK': primary_color_dark,\n 'SUCCESS_COLOR': success_color,\n 'ERROR_COLOR': error_color,\n 'LINK_COLOR': link_color,\n }\n","repo_name":"justengel-web/django_materialize_nav","sub_path":"materialize_nav/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":3407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"75108350665","text":"from PySide6.QtWidgets import (QApplication, QVBoxLayout,\n QHBoxLayout, QMainWindow,\n QWidget, QHeaderView,\n QMessageBox, QDialog,\n QLabel, QLineEdit, QDialogButtonBox)\nfrom PySide6.QtGui import QStandardItemModel, QStandardItem\nfrom PySide6.QtCore import QModelIndex\n\nclass EditDialog(QDialog):\n def __init__(self, parent=None):\n super().__init__(parent)\n self.layout = QVBoxLayout()\n self.setWindowTitle(\"Modify Item\")\n\n self.line_edit = []\n self.new_values = None\n self.setLayout(self.layout)\n\n def setup_fields(self, headers, item):\n for header, value in zip(headers, item):\n layout = QHBoxLayout()\n layout.addWidget(QLabel(header))\n line_edit = QLineEdit(str(value))\n layout.addWidget(line_edit)\n self.line_edit.append(line_edit)\n self.layout.addLayout(layout)\n\n button_box = QDialogButtonBox(QDialogButtonBox.StandardButton.Ok | QDialogButtonBox.StandardButton.Cancel)\n button_box.accepted.connect(self.accept)\n button_box.rejected.connect(self.reject)\n self.layout.addWidget(button_box)\n\n def get_new_values(self):\n # for item in self.line_edit:\n # data = int(item.text()) if item.text().isdigit() else item.text()\n\n self.new_values = [int(item.text()) if item.text().isdigit() else item.text() for item in self.line_edit]\n\n return self.new_values","repo_name":"devloulou/2023_februar_ui","sub_path":"6. alkalom/modules/window_module/edit_dialog.py","file_name":"edit_dialog.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24351504161","text":"# -*- coding: UTF-8 -*-\nimport tensorflow as tf\nfrom model import inception_resnet_v2\nfrom reader import decode_from_tfrecords\nfrom reader import get_train_data\nimport time\nimport sys\nimport matplotlib.pyplot as plt \nimport numpy as np\n\n# make a copy of original stdout route\nstdout_backup = sys.stdout\n# define the log file that receives your log info \nlog_file = open(\"message_valid.log\", \"w\")\n# redirect print output to log file\nsys.stdout = log_file\n\n\npath=\"/root/train/\"\npd_path=\"./model/\"\nbatch_size=5\nfilename=[path+'TFcodeX_'+str(10)+\".tfrecord\"]\nfilename_queue = tf.train.string_input_producer(filename,num_epochs=None) #读入流中\ninputs, labels ,ids= decode_from_tfrecords(filename_queue, is_batch=False,batch_size=batch_size)\ninputs=tf.clip_by_value((inputs/2+0.5)*255,0,255)\ninputs=tf.cast(inputs,tf.uint8)\ninputs=tf.expand_dims(inputs,3)\ninputs=get_train_data(inputs,height=256,width=256,batch_size=batch_size)\nlabels=tf.subtract(labels,1)\nonehots=tf.squeeze(tf.one_hot(labels,5,dtype=tf.int64),squeeze_dims=1)\n\ninputs_aloc=tf.placeholder(\n dtype=tf.float32,\n shape=[None,256,256,1],\n name='input'\n)\nonehots_aloc=tf.placeholder(\n dtype=tf.int64,\n shape=[None,5],\n name='one_hot'\n)\nlabels_aloc=tf.placeholder(\n dtype=tf.int64,\n shape=[None,1],\n name='label'\n)\n\n#global_steps = tf.Variable(1, trainable=False)\n\naux,prediction = inception_resnet_v2(inputs_aloc, num_classes=5, is_training=False,\n\n reuse=None,\n\n scope='InceptionResnetV2',\n\n create_aux_logits=True,\n\n activation_fn=tf.nn.leaky_relu)\nloss = tf.losses.softmax_cross_entropy(\n onehots_aloc,\n aux,\n scope='Loss'\n)\n\n#tf.summary.scalar(\n# 'Loss',\n# loss\n#)\n\n#train_step=tf.train.AdamOptimizer(0.0001).minimize(loss,global_step=global_steps)\n\n\n \naccuraty=tf.reduce_mean(tf.cast(tf.equal(tf.arg_max(prediction,dimension=1),tf.squeeze(labels_aloc)),tf.float32))\n#tf.summary.scalar(\n# 'Accuraty',\n# accuraty\n#)\n\n#merged = tf.summary.merge_all()\n\ninit = tf.global_variables_initializer()\nvar_list = tf.trainable_variables()\n\ng_list = tf.global_variables()\n\nbn_moving_vars = [g for g in g_list if 'moving_mean' in g.name]\n\nbn_moving_vars += [g for g in g_list if 'moving_variance' in g.name]\n\nvar_list += bn_moving_vars\n\nsaver = tf.train.Saver(var_list=var_list)\n\nwith tf.Session() as sess:\n sess.run(init)\n status=tf.train.latest_checkpoint(pd_path)\n if status:\n saver.restore(sess,status)\n \n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n ls=0.0\n acc=0.0\n for i in range(70):\n print('========>',i/70*100,'%')\n inputs_,labels_,onehots_=sess.run([inputs,labels,onehots])\n loss_,accuraty_=sess.run([loss,accuraty],\\\n feed_dict={inputs_aloc:inputs_,labels_aloc:labels_,onehots_aloc:onehots_})\n ls+=loss_;\n acc+=accuraty_;\n\n print('accuracy is ',acc/70)\n print('loss is ',ls/70) \n print('train done') \n sys.stdout.flush()\n coord.request_stop()\n coord.join(threads)\n","repo_name":"JiabinTan/competition_pano","sub_path":"NNCompet/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":3129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26348428436","text":"\"\"\"\nA simple demo with preset trajectories.\n\npython -m dedo.demo_preset --logdir=/tmp/dedo_preset --env=HangGarment-v1 \\\n --max_episode_len=200 --robot anchor --cam_resolution 0 --viz\n\n\nNote: this code is for research i.e. quick experimentation; it has minimal\ncomments for now, but if we see further interest from the community -- we will\nadd further comments, unify the style, improve efficiency and add unittests.\n\n@yonkshi\n\n\"\"\"\nimport os\nimport time\nimport gym\nimport scipy.linalg\nimport matplotlib.pyplot as plt\nfrom matplotlib import interactive\n\ninteractive(True)\nimport numpy as np\n\nfrom dedo.utils.args import get_args\nfrom dedo.utils.anchor_utils import create_anchor_geom\nfrom dedo.utils.preset_info import preset_traj\nfrom dedo.utils.pcd_utils import visualize_data, render_video\nimport wandb\nimport cv2\n\nfrom dedo.utils.bullet_manipulator import convert_all\n\n\ndef play(env, num_episodes, args):\n if args.task == 'ButtonProc':\n deform_obj = 'cloth/button_cloth.obj'\n elif args.task == 'HangProcCloth':\n deform_obj = 'cloth/apron_0.obj'\n elif args.task == 'FoodPacking':\n deform_obj = 'food'\n else:\n deform_obj = env.deform_obj\n\n assert deform_obj in preset_traj, \\\n f'deform_obj {deform_obj:s} not in presets {preset_traj.keys()}'\n preset_wp = preset_traj[deform_obj]['waypoints']\n vidwriter = None\n if args.cam_resolution > 0 and args.logdir is not None:\n if not os.path.exists(args.logdir):\n os.makedirs(args.logdir)\n savepath = os.path.join(args.logdir, f'{args.env}_{args.seed}.mp4')\n vidwriter = cv2.VideoWriter(\n savepath, cv2.VideoWriter_fourcc(*'mp4v'), 24,\n (args.cam_resolution, args.cam_resolution))\n print('saving to ', savepath)\n if args.use_wandb:\n wandb.init(project='dedo', name=f'{args.env}-preset',\n config={'env': f'{args.task}-preset'},\n tags=['preset', args.env])\n\n for epsd in range(num_episodes):\n print('------------ Play episode ', epsd, '------------------')\n obs = env.reset()\n if args.cam_resolution > 0:\n img = env.render(mode='rgb_array', width=args.cam_resolution,\n height=args.cam_resolution)\n if vidwriter is not None:\n vidwriter.write(img[..., ::-1])\n if args.debug:\n viz_waypoints(env.sim, preset_wp['a'], (1, 0, 0, 1))\n if 'b' in preset_wp:\n viz_waypoints(env.sim, preset_wp['b'], (1, 0, 0, 0.5))\n # Need to step to get low-dim state from info.\n step = 0\n ctrl_freq = args.sim_freq / args.sim_steps_per_action\n robot = None\n if hasattr(env, 'robot'):\n robot = env.robot\n pos_traj, vel_traj = build_traj(\n env, preset_wp, 'a', anchor_idx=0, ctrl_freq=ctrl_freq, robot=robot)\n pos_traj_b, traj_b = None, None\n if 'b' in preset_wp:\n pos_traj_b, traj_b = build_traj(env, preset_wp, 'b', anchor_idx=1,\n ctrl_freq=ctrl_freq, robot=robot)\n if robot is None: # velocity control for anchors\n traj = vel_traj\n if traj_b is not None:\n traj = merge_traj(traj, traj_b)\n last_action = np.zeros_like(traj[0])\n else: # position control for robot\n traj = pos_traj\n if pos_traj_b is not None:\n traj = merge_traj(pos_traj, pos_traj_b)\n last_action = traj[-1]\n traj_ori = preset_wp.get('a_theta', None)\n if traj_ori is not None:\n traj_ori = convert_all(np.array(traj_ori), 'theta_to_sin_cos')\n n_repeats = traj.shape[0] // len(traj_ori)\n traj_ori = np.repeat(traj_ori, n_repeats, axis=0)\n print('traj_ori', traj_ori.shape, traj_ori)\n assert (traj_ori.shape[0] == traj.shape[0])\n assert (traj_ori.shape[1] == 6) # Euler sin,sin,sin,cos,cos,cos\n traj = np.hstack([traj, traj_ori])\n\n gif_frames = []\n rwds = []\n print(f'# {args.env}:')\n\n if args.pcd:\n pcd_fig = plt.figure(figsize=(10,5))\n while True:\n assert (not isinstance(env.action_space, gym.spaces.Discrete))\n\n act = traj[step] if step < len(traj) else last_action\n\n next_obs, rwd, done, info = env.step(act, unscaled=True)\n rwds.append(rwd)\n\n if done and vidwriter is not None: # Record the internal steps after anchor drop\n for ob in info['final_obs'][1:]:\n vidwriter.write(np.uint8(ob[..., ::-1] * 255))\n if args.cam_resolution > 0:\n img = env.render(mode='rgb_array', width=args.cam_resolution,\n height=args.cam_resolution)\n if vidwriter is not None:\n vidwriter.write(img[..., ::-1])\n\n if args.pcd:\n # Grab additional obs from the environment\n pcd_obs = env.get_pcd_obs()\n img, pcd, ids = pcd_obs.values()\n\n os.makedirs(f\"{args.logdir}/pcd\", exist_ok=True) # tmpfolder\n save_path = f'{args.logdir}/pcd/{step:06d}.png'\n visualize_data(img, pcd, ids, fig=pcd_fig, \n save_path=save_path)\n\n\n # gif_frames.append(obs)\n if done:\n break\n\n # if step > len(traj) + 50: break;\n obs = next_obs\n\n step += 1\n\n print(f'episode reward: {env.episode_reward:.4f}')\n print('traj_length:', len(traj))\n if args.use_wandb:\n mean_rwd = np.sum(rwds)\n for i in range(31):\n wandb.log({'rollout/ep_rew_mean': mean_rwd, 'Step': i}, step=i)\n if vidwriter is not None:\n vidwriter.release()\n\n if args.pcd:\n render_video(f'{args.logdir}/pcd', \n f'{args.logdir}/pcd_preset_test.mp4') \n\n\ndef viz_waypoints(sim, waypoints, rgba):\n waypoints = np.array(waypoints)\n for waypoint in waypoints:\n create_anchor_geom(sim, waypoint[:3], mass=0, rgba=rgba, use_collision=False)\n\n\ndef merge_traj(traj_a, traj_b):\n if traj_a.shape[0] != traj_b.shape[0]: # padding is required\n n_pad = np.abs(traj_a.shape[0] - traj_b.shape[0])\n zero_pad = np.zeros((n_pad, traj_a.shape[1]))\n if traj_a.shape[0] > traj_b.shape[0]: # pad b\n traj_b = np.concatenate([traj_b, zero_pad, ], axis=0)\n else: # pad a\n traj_a = np.concatenate([traj_a, zero_pad, ], axis=0)\n traj = np.concatenate([traj_a, traj_b, ], axis=-1)\n return traj\n\n\ndef build_traj(env, preset_wp, left_or_right, anchor_idx, ctrl_freq, robot):\n if robot is not None:\n init_anc_pos = env.robot.get_ee_pos(left=anchor_idx > 0)\n else:\n anc_id = list(env.anchors.keys())[anchor_idx]\n init_anc_pos = env.anchors[anc_id]['pos']\n print(f'init_anc_pos {left_or_right}', init_anc_pos)\n wp = np.array(preset_wp[left_or_right])\n steps = (wp[:, -1] * ctrl_freq).round().astype(np.int32) # seconds -> ctrl steps\n\n print('ATTENTION: Need to use scipy interpolate for preset trajs')\n # exit(1)\n # WARNING: old code below.\n\n from scipy.interpolate import interp1d\n wpt = np.concatenate([[init_anc_pos], wp[:, :3]], axis=0)\n ids = np.arange(wpt.shape[0])\n interp_type = 'linear'\n # Creates the respective time interval for each way point\n interp_i = []\n for i, num_step in enumerate(steps):\n interp_i.append(np.linspace(i, i + 1, num_step, endpoint=False))\n\n interp_i = np.concatenate(interp_i)\n # interp_i = np.linspace(0, 1, steps[0], endpoint=False) # np.arange(0, wpt.shape[0]-1, 0.1)\n xi = interp1d(ids, wpt[:, 0], kind=interp_type)(interp_i)\n yi = interp1d(ids, wpt[:, 1], kind=interp_type)(interp_i)\n zi = interp1d(ids, wpt[:, 2], kind=interp_type)(interp_i)\n\n traj = np.array([xi, yi, zi]).T\n\n dv = (traj[1:] - traj[:-1]) # * ctrl_freq\n\n # Calculating the avg velocity for each control step\n chunks = []\n chunk_size = int(np.round(ctrl_freq))\n start = 0\n for i in range(99999):\n\n if start + chunk_size > dv.shape[0]:\n # last chunk\n chunk_size = dv.shape[0] - start\n chunk = dv[start:start + chunk_size]\n mean_chunk = np.sum(chunk, axis=0, keepdims=True)\n mean_chunk = np.repeat(mean_chunk, chunk_size, axis=0, ) # scale back to original shape\n chunks.append(mean_chunk)\n start = start + chunk_size\n if start >= dv.shape[0]:\n break\n\n # Add the last point:\n chunks = chunks + [[chunks[-1][-1]]]\n velocities = np.concatenate(chunks, axis=0)\n\n return traj, velocities\n\n\ndef plot_traj(traj):\n import matplotlib.pyplot as plt\n clrs = np.linspace(0, 1, traj.shape[0])\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n ax.scatter(traj[:, 0], traj[:, 1], traj[:, 2], c=clrs, cmap=plt.cm.jet)\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('z')\n ax.set_zlim3d(min(0, traj[:, 2].min()), traj[:, 2].max())\n plt.show()\n input('Continue')\n\n\ndef main(args):\n np.set_printoptions(precision=4, linewidth=150, suppress=True)\n kwargs = {'args': args}\n env = gym.make(args.env, **kwargs)\n env.seed(env.args.seed)\n print('Created', args.task, 'with observation_space',\n env.observation_space.shape, 'action_space', env.action_space.shape)\n play(env, 1, args)\n env.close()\n\n\nif __name__ == \"__main__\":\n main(get_args())\n","repo_name":"contactrika/dedo","sub_path":"dedo/demo_preset.py","file_name":"demo_preset.py","file_ext":"py","file_size_in_byte":9680,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"81"} +{"seq_id":"2051928887","text":"from functools import reduce\nimport operator\nimport datetime\nimport time\n\nfrom django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\nfrom django.db.models import Q\nfrom django.utils.timezone import make_aware, get_current_timezone\n\nfrom obcy.models import Joke\n\n\nSITE_URL = {\n 'wykop': 'http://wykop.pl',\n 'codzienny': 'https://facebook.com/sucharcodzienny',\n 'zacny': 'https://www.facebook.com/1zacnysucharmilordzie1',\n 'sucharnia': 'https://www.facebook.com/groups/495903230481274'\n}\nSITE_IMAGE_EXTENSION = {\n 'wykop': 'png',\n 'codzienny': 'jpg',\n 'zacny': 'jpg',\n 'sucharnia': 'png'\n}\n\n\ndef __sort_recalculate(sort, joke):\n if sort == 'votes':\n votes = joke.votes\n if joke.site == 'wykop' or joke.site == 'sucharnia':\n votes *= 4.5\n return votes\n else:\n return joke.__getattribute__(sort)\n\n\ndef __add_pages(request, jokes):\n paginator = Paginator(jokes, 15)\n page = request.GET.get('page')\n try:\n jokes = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n jokes = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n jokes = paginator.page(paginator.num_pages)\n return jokes\n\n\ndef __add_user(request, context):\n page = request.GET.get('page', 1)\n user = request.user\n if page != 1:\n if user.is_authenticated():\n if user.first_name:\n name = user.first_name\n if user.last_name:\n name += ' ' + user.last_name\n else:\n name = user.username\n username = user.username\n else:\n name = None\n username = None\n\n context.update({'user_fullname': name, 'username': username})\n\n moderator = True if user.groups.filter(name='Moderator') else False\n context.update({'moderator': moderator})\n\n\ndef __last_seen(request):\n last = request.session.get('last_seen', False)\n request.session['last_seen'] = time.time()\n return last\n\n\ndef __order_by(request):\n sort = request.GET.get('sort', 'added')\n if sort == 'date':\n sort = 'added'\n\n reverse = request.GET.get('reversed', True)\n if reverse != 'true':\n sort = '-' + sort\n\n return sort\n\n\ndef all_sites(request, pages=True, show_hidden=False):\n context = {}\n jokes = Joke.objects.filter(duplicate=None)\n if not show_hidden:\n jokes = jokes.filter(hidden=None)\n search = request.GET.get('q', '')\n if search.strip() != '':\n items = search.split()\n filter = reduce(operator.and_, (Q(body__icontains=x) for x in items))\n jokes = jokes.filter(filter)\n context.update({'search': search})\n\n jokes = jokes.order_by(__order_by(request))\n if pages:\n jokes = __add_pages(request, jokes)\n context.update({'jokes': jokes, 'site': 'all'})\n\n context.update({'site_image_extension': SITE_IMAGE_EXTENSION})\n\n __add_user(request, context)\n\n last_seen = __last_seen(request)\n if last_seen and time.time() - last_seen > 1:\n context.update(\n {'last_seen': make_aware(datetime.datetime.fromtimestamp(last_seen + 1), get_current_timezone())})\n\n return context\n\n\ndef one_joke(request, jokeslug):\n joke = Joke.objects.get(slug=jokeslug)\n site_url = SITE_URL[joke.site]\n context = {'joke': joke, 'site_url': site_url, 'site_image_extension': SITE_IMAGE_EXTENSION}\n __add_user(request, context)\n return context\n\n\ndef random(request, pages=True):\n jokes = Joke.objects.filter(duplicate=None).filter(hidden=None).order_by('?')\n if pages:\n jokes = __add_pages(request, jokes)\n context = {'jokes': jokes, 'site': 'all', 'site_image_extension': SITE_IMAGE_EXTENSION, 'random': True}\n __add_user(request, context)\n return context\n\n\ndef unverified(request):\n jokes = Joke.objects.filter(duplicate=None).filter(hidden=None).filter(verified=None).order_by(__order_by(request))\n jokes = __add_pages(request, jokes)\n context = {'jokes': jokes, 'site': 'all', 'site_image_extension': SITE_IMAGE_EXTENSION}\n __add_user(request, context)\n return context","repo_name":"jchmura/suchary-django","sub_path":"obcy/extras/prepare_view.py","file_name":"prepare_view.py","file_ext":"py","file_size_in_byte":4222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40131286265","text":"#!/usr/bin/python\n\nimport unittest\nimport os\nfrom Problems_1_to_100.Problem_34.problem_34 \\\n import sum_of_factorials, is_equal, main\n\n\nclass MyTestCase(unittest.TestCase):\n\n print(\"Running unit tests from: \" + os.path.basename(__file__) + \"\\n\")\n\n def test_sum_of_factorials_basic(self):\n number = 145\n self.assertEqual(number,\n sum_of_factorials(number))\n\n def test_sum_is_equal_basic(self):\n number = 145\n self.assertEqual(True,\n is_equal(number))\n\n def test_main(self):\n result = 40730\n max_number = 41000\n self.assertEqual(result,\n main(max_number))\n","repo_name":"ikostan/ProjectEuler","sub_path":"Problems_1_to_100/Problem_34/tests/test_problem_34.py","file_name":"test_problem_34.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14707394179","text":"from typing import List, Union\n\nimport torch\nimport torch.nn as nn\nfrom discrete_key_value_bottleneck_pytorch import DiscreteKeyValueBottleneck\nfrom vector_quantize_pytorch import VectorQuantize\n\nfrom networks.layers import OutConv, UpSimple\n\n\nclass DKVBBin(nn.Module):\n def __init__(\n self,\n architecture_type: str,\n pretrained_encoder: str,\n hidden_dims_decoder: List[int],\n embedding_dim: int = 1024,\n codebook_size: int = 8192,\n num_codebooks: int = 1,\n vq_decay: float = 0.99,\n threshold_ema_dead_code: int = 2,\n value_dimension: Union[int, str] = \"same\",\n freeze_encoder: bool = True,\n freeze_decoder: bool = False,\n **kwargs,\n ):\n super(DKVBBin, self).__init__()\n\n self.architecture_type = architecture_type\n self.pretrained_encoder = pretrained_encoder\n self.hidden_dims_decoder = hidden_dims_decoder\n self.embedding_dim = embedding_dim\n self.codebook_size = codebook_size\n self.num_codebooks = num_codebooks\n self.value_dimension = value_dimension\n self.vq_decay = vq_decay\n self.threshold_ema_dead_code = threshold_ema_dead_code\n self.freeze_encoder = freeze_encoder\n self.freeze_decoder = freeze_decoder\n\n # embedding dimension and number of key-value pairs must be divisible by number of codes\n assert (self.embedding_dim % num_codebooks) == 0\n assert (self.codebook_size & num_codebooks) == 0\n\n if self.pretrained_encoder == \"dino_resnet_50\":\n encoder_raw = torch.hub.load(\"facebookresearch/dino:main\", \"dino_resnet50\")\n\n # Getting all the model up to the second to last bottleneck block\n self.encoder = nn.Sequential(\n encoder_raw.conv1,\n encoder_raw.bn1,\n encoder_raw.relu,\n encoder_raw.maxpool,\n encoder_raw.layer1,\n encoder_raw.layer2,\n encoder_raw.layer3,\n )\n\n if self.freeze_encoder:\n # Freezing the encoder\n for param in self.encoder.parameters():\n param.requires_grad = False\n\n if self.architecture_type == \"discrete_key_value_bottleneck\":\n\n if isinstance(self.value_dimension, str):\n self.value_dimension = self.embedding_dim // self.num_codebooks\n\n self.key_value_bottleneck = DiscreteKeyValueBottleneck(\n dim=self.embedding_dim, # input dimension\n codebook_dim=self.embedding_dim // self.num_codebooks,\n num_memory_codebooks=self.num_codebooks, # number of memory codebook\n num_memories=self.codebook_size, # number of memories\n dim_memory=self.embedding_dim // self.num_codebooks, # dimension of the output memories\n decay=self.vq_decay, # the exponential moving average decay, lower means the keys will change faster\n threshold_ema_dead_code=self.threshold_ema_dead_code, # (0.8·batch-size·h·w·mz/num-pairs)\n )\n\n elif self.architecture_type == \"vector_quantizer\":\n self.vector_quantizer = VectorQuantize(\n dim=self.embedding_dim,\n codebook_dim=self.embedding_dim // self.num_codebooks,\n codebook_size=self.codebook_size,\n heads=self.num_codebooks,\n separate_codebook_per_head=True,\n decay=self.vq_decay,\n threshold_ema_dead_code=self.threshold_ema_dead_code, # (0.8·batch-size·h·w·mz/num-pairs)\n )\n\n # Building decoder\n decoder_module_list = nn.ModuleList()\n decoder_module_list.append(UpSimple(self.embedding_dim, self.hidden_dims_decoder[0]))\n for i in range(len(self.hidden_dims_decoder) - 1):\n decoder_module_list.append(UpSimple(self.hidden_dims_decoder[i], self.hidden_dims_decoder[i + 1]))\n decoder_module_list.append(OutConv(self.hidden_dims_decoder[-1], 1))\n decoder_module_list.append(nn.Sigmoid())\n\n self.decoder = nn.Sequential(*decoder_module_list)\n\n if self.freeze_decoder:\n # Freezing the encoder\n for param in self.decoder.parameters():\n param.requires_grad = False\n\n def forward(self, x):\n\n if self.freeze_encoder:\n with torch.no_grad():\n embeddings = self.encoder(x)\n embeddings.detach_()\n else:\n embeddings = self.encoder(x)\n\n if self.architecture_type == \"discrete_key_value_bottleneck\" or self.architecture_type == \"vector_quantizer\":\n\n encoder_output_size = embeddings.shape[-1]\n batch_size = x.size()[0]\n\n embeddings = torch.reshape(\n embeddings, (embeddings.shape[0], self.embedding_dim, encoder_output_size**2)\n ) # B, Dim, H, W -> B, Dim, N\n embeddings = torch.permute(embeddings, (0, 2, 1)) # B, Dim, N -> B, N, Dim\n\n if self.architecture_type == \"discrete_key_value_bottleneck\":\n memories = self.key_value_bottleneck(embeddings)\n else:\n memories, _, _ = self.vector_quantizer(embeddings) # quantized, indices, commitment loss\n\n memories = torch.permute(memories, (0, 2, 1)) # B, N, Dim -> B, Dim, N\n memories = torch.reshape(\n memories, (batch_size, self.embedding_dim, encoder_output_size, encoder_output_size)\n ) # B, Dim, N -> B, Dim, H, W\n\n # Processing final output\n return self.decoder(memories)\n\n elif self.architecture_type == \"baseline\": # baseline classifier\n output = self.decoder(embeddings)\n\n return output\n","repo_name":"pedrocg42/continual-learning-binarization","sub_path":"networks/discrete_key_value_bottleneck.py","file_name":"discrete_key_value_bottleneck.py","file_ext":"py","file_size_in_byte":5748,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"42848019571","text":"from loader import bot\nfrom database.db_connection import *\nfrom query import rapidapi_hotels\nfrom telebot.types import ReplyKeyboardRemove, Message, CallbackQuery\nfrom keyboards.inline.buttons import *\nfrom calibration.tools import *\n\n\n@bot.message_handler(commands=['history'])\ndef history(message: Message) -> None:\n with db:\n is_empty = True\n for request in Request.select().where(Request.telegram_id == message.from_user.id):\n is_empty = False\n request_button = InlineKeyboardMarkup()\n request_button.add(InlineKeyboardButton(text='Перейти к запросу', callback_data=request.id))\n bot.send_message(message.chat.id, '📍*{city}*\\n'\n '_{date1} - {date2}_\\n'\n '{mode}\\n'\n 'Запрос от _{request_date}'\n ' ({time})_'.format(city=request.city_ru,\n date1=reformat_date(request.checkin),\n date2=reformat_date(request.checkout),\n mode=get_the_mode(request.mode),\n request_date=reformat_date(request.request_date),\n time=request.request_time),\n reply_markup=request_button, parse_mode='Markdown')\n\n if is_empty:\n bot.send_message(message.chat.id, 'История поиска пуста')\n else:\n bot.send_message(message.chat.id, 'Очистить историю поиска', reply_markup=basket())\n\n\n@bot.callback_query_handler(func=lambda call: True)\ndef callback_inline(call: CallbackQuery) -> None:\n with db:\n if call.data == 'delete_history':\n for request in Request.select().where(Request.telegram_id == int(call.from_user.id)):\n request.delete_instance()\n bot.send_message(call.message.chat.id, 'История поиска очищена')\n else:\n\n for request in Request.select().where(Request.id == int(call.data)):\n\n msg = bot.send_message(call.message.chat.id, '_Загрузка..._', parse_mode='Markdown',\n reply_markup=ReplyKeyboardRemove())\n is_deleted = False\n\n data = {'destination_id': request.location_id,\n 'start_date':\n {'year': request.checkin.year,\n 'month': request.checkin.month,\n 'day': request.checkin.day},\n 'end_date':\n {'year': request.checkout.year,\n 'month': request.checkout.month,\n 'day': request.checkout.day},\n 'mode': request.mode,\n 'num_hotels_output': request.hotels_num,\n 'photos_necessity': request.photo_nes,\n 'photos_num': request.photos_num,\n 'request_time': '{}:{}'.format(datetime.datetime.now().hour,\n datetime.datetime.now().minute),\n 'request_date': '{}-{}-{}'.format(datetime.datetime.today().year,\n datetime.datetime.today().month,\n datetime.datetime.today().day),\n 'city_en': request.city_en,\n 'city_users': request.city_ru,\n 'min_price': request.min_price,\n 'max_price': request.max_price,\n 'min_distance': request.min_distance,\n 'max_distance': request.max_distance\n }\n\n data['hotel_list'] = rapidapi_hotels.get_hotels(data)\n data['hotel_list'] = rapidapi_hotels.get_properties_list(data, data['hotel_list'])\n text = []\n\n start_date = datetime.date(day=int(data['start_date']['day']), month=int(data['start_date']['month']),\n year=int(data['start_date']['year']))\n end_date = datetime.date(day=int(data['end_date']['day']), month=int(data['end_date']['month']),\n year=int(data['end_date']['year']))\n summary_days = abs(end_date - start_date)\n\n for hotel in data['hotel_list']:\n text.append('*{}*'.format(hotel['name']))\n text.append('⭐' * hotel['stars'])\n text.append('Адрес: {}'.format(hotel['address']))\n text.append('Расстояние до центра города: {}'.format(hotel['city_center']))\n text.append('Цена за сутки: {}'.format(hotel['cost']))\n text.append('Цена за период с {}.{}.{} по {}.{}.{}'\n ' ({} суток): ${}'.format(data['start_date']['year'],\n data['start_date']['month'],\n data['start_date']['day'],\n data['end_date']['year'],\n data['end_date']['month'],\n data['end_date']['day'], summary_days.days,\n hotel['cost'] * summary_days.days))\n if not is_deleted:\n is_deleted = True\n bot.delete_message(call.message.chat.id, msg.id)\n\n if data['photos_necessity']:\n hotel['photos_list'][0].caption = '\\n'.join(text)\n hotel['photos_list'][0].parse_mode = 'Markdown'\n bot.send_media_group(call.message.chat.id, hotel['photos_list'])\n else:\n bot.send_message(call.message.chat.id, '\\n'.join(text), parse_mode=\"Markdown\",\n reply_markup=ReplyKeyboardRemove())\n text.clear()\n with db:\n Request.create(telegram_id=call.from_user.id,\n request_time=data['request_time'],\n request_date=data['request_date'],\n checkin='-'.join(map(str, data['start_date'].values())),\n checkout='-'.join(map(str, data['end_date'].values())),\n mode=data['mode'], location_id=data['destination_id'],\n city_en=data['city_en'],\n city_ru=data['city_users'],\n hotels_num=data['num_hotels_output'],\n photo_nes=data['photos_necessity'],\n photos_num=data['photos_num'])\n","repo_name":"neenael/hotel_bot","sub_path":"handlers/all_heandlers/history.py","file_name":"history.py","file_ext":"py","file_size_in_byte":7410,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"18827952403","text":"# Import necessary modules\nfrom langchain.text_splitter import RecursiveCharacterTextSplitter\nfrom tqdm import tqdm\nimport os\nfrom langchain.embeddings import OpenAIEmbeddings\nfrom langchain.vectorstores import FAISS\n\n# Define a class named 'Embeddings'\nclass Embeddings:\n def __init__(self):\n # Initialize the OpenAI API key from the environment variable\n self.OPENAI_API_KEY = os.environ[\"OPENAI_API_KEY\"]\n # Create an instance of the OpenAIEmbeddings class with the API key\n self.embedding = OpenAIEmbeddings(openai_api_key=self.OPENAI_API_KEY)\n\n def create_article_index(self, articles, domain):\n # Create a RecursiveCharacterTextSplitter instance with specified chunk size and overlap\n rec_splitter = RecursiveCharacterTextSplitter(chunk_size=1500, chunk_overlap=150)\n # Lists to store split text chunks and their corresponding metadata\n web_docs, meta = [], []\n # Iterate through each article and split its content into chunks\n for article in tqdm(articles, desc=\"Splitting articles into chunks\"):\n splits = rec_splitter.split_text(article[\"content\"])\n # Extend the lists with the split chunks and their metadata\n web_docs.extend(splits)\n meta.extend([{\n \"title\": article['title'],\n \"thumbnail\": article['thumbnail'],\n \"source\": article['link'] + '|' + article['local_path'],\n \"local_path\": article['local_path']\n }] * len(splits))\n # Create an FAISS vector store from the split text chunks and their metadata\n article_store = FAISS.from_texts(\n texts=web_docs, embedding=self.embedding, metadatas=meta\n )\n # Save the article store locally with a filename based on the domain\n article_store.save_local(\"indices/INDEX-\"+domain)\n # Return the article store\n return article_store\n \n def load_article_index(self, article_index):\n # Load an FAISS vector store from a local file based on the given article_index and embedding\n return FAISS.load_local(article_index, self.embedding)","repo_name":"sanket0211/llm-google-scrape","sub_path":"src/embeddings.py","file_name":"embeddings.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"72176804106","text":"from torchvision import models\r\n\r\nfrom munch import Munch\r\nfrom drloc import DenseRelativeLoc\r\n\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\nimport math, re\r\nimport numpy as np\r\n\r\nclass config_task():\r\n # File allowing to change which task is currently used for training/testing\r\n task = 0\r\n mode = 'normal'\r\n proj = '11'\r\n factor = 1.\r\n dropouts_args = '00'\r\n\r\n wd3x3, wd1x1, wd = [1.], [1.], 1\r\n decay3x3 = np.array(wd3x3) * 0.0001\r\n decay1x1 = np.array(wd1x1) * 0.0001\r\n wd = wd * 0.0001\r\n\r\n isdropout1 = (dropouts_args[0] == '1')\r\n isdropout2 = (dropouts_args[1] == '1')\r\n\r\n\r\ndef conv3x3(in_planes, out_planes, stride=1):\r\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)\r\n\r\ndef conv1x1_fonc(in_planes, out_planes=None, stride=1, bias=False):\r\n if out_planes is None:\r\n return nn.Conv2d(in_planes, in_planes, kernel_size=1, stride=stride, padding=0, bias=bias)\r\n else:\r\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=0, bias=bias)\r\n\r\nclass conv1x1(nn.Module):\r\n\r\n def __init__(self, planes, out_planes=None, stride=1):\r\n super(conv1x1, self).__init__()\r\n if config_task.mode == 'series_adapters':\r\n self.conv = nn.Sequential(nn.BatchNorm2d(planes), conv1x1_fonc(planes))\r\n elif config_task.mode == 'parallel_adapters':\r\n self.conv = conv1x1_fonc(planes, out_planes, stride)\r\n else:\r\n self.conv = conv1x1_fonc(planes)\r\n def forward(self, x):\r\n y = self.conv(x)\r\n if config_task.mode == 'series_adapters':\r\n y += x\r\n return y\r\n\r\nclass conv_task(nn.Module):\r\n\r\n def __init__(self, in_planes, planes, stride=1, nb_tasks=1, is_proj=1, second=0):\r\n super(conv_task, self).__init__()\r\n self.is_proj = is_proj\r\n self.second = second\r\n self.conv = conv3x3(in_planes, planes, stride)\r\n if config_task.mode == 'series_adapters' and is_proj:\r\n self.bns = nn.ModuleList([nn.Sequential(conv1x1(planes), nn.BatchNorm2d(planes)) for i in range(nb_tasks)])\r\n elif config_task.mode == 'parallel_adapters' and is_proj:\r\n self.parallel_conv = nn.ModuleList([conv1x1(in_planes, planes, stride) for i in range(nb_tasks)])\r\n\r\n self.bns = nn.ModuleList([nn.BatchNorm2d(planes) for i in range(nb_tasks)])\r\n else:\r\n self.bns = nn.ModuleList([nn.BatchNorm2d(planes) for i in range(nb_tasks)])\r\n\r\n def forward(self, x):\r\n task = config_task.task\r\n y = self.conv(x)\r\n if self.second == 0:\r\n if config_task.isdropout1:\r\n x = F.dropout2d(x, p=0.5, training = self.training)\r\n else:\r\n if config_task.isdropout2:\r\n x = F.dropout2d(x, p=0.5, training = self.training)\r\n if config_task.mode == 'parallel_adapters' and self.is_proj:\r\n y = y + self.parallel_conv[task](x)\r\n y = self.bns[task](y)\r\n return y\r\n\r\n# No projection: identity shortcut\r\nclass BasicBlock(nn.Module):\r\n expansion = 1\r\n\r\n def __init__(self, in_planes, planes, stride=1, shortcut=0, nb_tasks=1):\r\n super(BasicBlock, self).__init__()\r\n self.conv1 = conv_task(in_planes, planes, stride, nb_tasks, is_proj=int(config_task.proj[0]))\r\n self.conv2 = nn.Sequential(nn.ReLU(True), conv_task(planes, planes, 1, nb_tasks, is_proj=int(config_task.proj[1]), second=1))\r\n self.shortcut = shortcut\r\n if self.shortcut == 1:\r\n self.avgpool = nn.AvgPool2d(2)\r\n\r\n def forward(self, x):\r\n residual = x\r\n y = self.conv1(x)\r\n y = self.conv2(y)\r\n if self.shortcut == 1:\r\n residual = self.avgpool(x)\r\n residual = torch.cat((residual, residual*0),1)\r\n y += residual\r\n y = F.relu(y)\r\n return y\r\n\r\n\r\nclass CustumResNet(nn.Module):\r\n def __init__(self, block, nblocks, num_classes=[10]):\r\n super(CustumResNet, self).__init__()\r\n num_classes = [num_classes]\r\n nb_tasks = len(num_classes)\r\n blocks = [block, block, block]\r\n factor = config_task.factor\r\n self.in_planes = int(32*factor)\r\n self.pre_layers_conv = conv_task(3,int(32*factor), 1, nb_tasks)\r\n self.layer1 = self._make_layer(blocks[0], int(64*factor), nblocks[0], stride=2, nb_tasks=nb_tasks)\r\n self.layer2 = self._make_layer(blocks[1], int(128*factor), nblocks[1], stride=2, nb_tasks=nb_tasks)\r\n self.layer3 = self._make_layer(blocks[2], int(256*factor), nblocks[2], stride=2, nb_tasks=nb_tasks)\r\n self.end_bns = nn.ModuleList([nn.Sequential(nn.BatchNorm2d(int(256*factor)),nn.ReLU(True)) for i in range(nb_tasks)])\r\n self.avgpool = nn.AdaptiveAvgPool2d(1)\r\n self.linears = nn.ModuleList([nn.Linear(int(256*factor), num_classes[i]) for i in range(nb_tasks)])\r\n\r\n for m in self.modules():\r\n if isinstance(m, nn.Conv2d):\r\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\r\n m.weight.data.normal_(0, math.sqrt(2. / n))\r\n elif isinstance(m, nn.BatchNorm2d):\r\n m.weight.data.fill_(1)\r\n m.bias.data.zero_()\r\n\r\n def _make_layer(self, block, planes, nblocks, stride=1, nb_tasks=1):\r\n shortcut = 0\r\n if stride != 1 or self.in_planes != planes * block.expansion:\r\n shortcut = 1\r\n layers = []\r\n layers.append(block(self.in_planes, planes, stride, shortcut, nb_tasks=nb_tasks))\r\n self.in_planes = planes * block.expansion\r\n for i in range(1, nblocks):\r\n layers.append(block(self.in_planes, planes, nb_tasks=nb_tasks))\r\n return nn.Sequential(*layers)\r\n\r\n def forward(self, x):\r\n x = self.pre_layers_conv(x)\r\n task = config_task.task\r\n x = self.layer1(x)\r\n x = self.layer2(x)\r\n x = self.layer3(x)\r\n x = self.end_bns[task](x)\r\n x = self.avgpool(x)\r\n x = x.view(x.size(0), -1)\r\n x = self.linears[task](x)\r\n return x\r\n\r\ndef resnet26(num_classes=10, blocks=BasicBlock):\r\n return CustumResNet(blocks, [4,4,4], num_classes)\r\n\r\nclass ResidualResNet(nn.Module):\r\n def __init__(\r\n self,\r\n num_classes,\r\n pretrained_path,\r\n pretrained_bool=1,\r\n use_drloc=False, # relative distance prediction\r\n drloc_mode=\"l1\",\r\n sample_size=32,\r\n use_abs=False,\r\n residual_mode=\"parallel_adapters\"\r\n ):\r\n super().__init__()\r\n self.use_drloc = use_drloc\r\n # don't use the pretrained model\r\n\r\n if pretrained_bool == 1:\r\n # Finetunning with Parellel Adapters\r\n\r\n # Load checkpoint and initialize the networks with the weights of a pretrained network\r\n print('\\t\\t Loading Resnet26 pretrained network with Residuals')\r\n # checkpoint = torch.load(pretrained_path, encoding='latin1')\r\n # checkpoint = torch.load(pretrained_path)\r\n\r\n # net_old = checkpoint['net']\r\n net_old = resnet26(num_classes)\r\n checkpoint = torch.load(pretrained_path, map_location='cpu')\r\n msg = net_old.load_state_dict(checkpoint, strict=False)\r\n print(\"\\t\\t Message: \", msg)\r\n # net_old = torch.load(pretrained_path)\r\n net = resnet26(num_classes)\r\n\r\n store_data = []\r\n for name, m in net_old.named_modules():\r\n if isinstance(m, nn.Conv2d) and (m.kernel_size[0] == 3):\r\n store_data.append(m.weight.data)\r\n\r\n element = 0\r\n for name, m in net.named_modules():\r\n if isinstance(m, nn.Conv2d) and (m.kernel_size[0] == 3):\r\n m.weight.data = store_data[element]\r\n element += 1\r\n\r\n store_data = []\r\n store_data_bias = []\r\n store_data_rm = []\r\n store_data_rv = []\r\n names = []\r\n\r\n for name, m in net_old.named_modules():\r\n if isinstance(m, nn.BatchNorm2d) and 'bns.' in name:\r\n names.append(name)\r\n store_data.append(m.weight.data)\r\n store_data_bias.append(m.bias.data)\r\n store_data_rm.append(m.running_mean)\r\n store_data_rv.append(m.running_var)\r\n\r\n # Special case to copy the weight for the BN layers when the target and source networks have not the same number of BNs\r\n condition_bn = 'noproblem'\r\n if len(names) != 51 and residual_mode == 'series_adapters':\r\n condition_bn = 'bns.....conv'\r\n\r\n tasks = [num_classes]\r\n for id_task in range(len(tasks)):\r\n element = 0\r\n for name, m in net.named_modules():\r\n if isinstance(m, nn.BatchNorm2d) and 'bns.' + str(id_task) in name and not re.search(condition_bn,\r\n name):\r\n m.weight.data = store_data[element].clone()\r\n m.bias.data = store_data_bias[element].clone()\r\n m.running_var = store_data_rv[element].clone()\r\n m.running_mean = store_data_rm[element].clone()\r\n element += 1\r\n\r\n del net_old\r\n\r\n # Freeze 3*3 convolution layers\r\n for name, m in net.named_modules():\r\n if isinstance(m, nn.Conv2d) and (m.kernel_size[0] == 3):\r\n m.weight.requires_grad = False\r\n\r\n self.model = net\r\n #self.num_ftrs = net.linears[0].in_features\r\n #net.linears = nn.Linear(self.num_ftrs, num_classes)\r\n\r\n #layers = [v for v in net.children()]\r\n #self.model = nn.Sequential(*layers[:-2])\r\n #self.avgpool = layers[-2]\r\n #self.linears = layers[-1]\r\n\r\n else:\r\n self.model = resnet26(num_classes)\r\n\r\n if self.use_drloc:\r\n self.drloc = nn.ModuleList()\r\n self.drloc.append(DenseRelativeLoc(\r\n in_dim=self.num_ftrs,\r\n out_dim=2 if drloc_mode==\"l1\" else 14,\r\n sample_size=sample_size,\r\n drloc_mode=drloc_mode,\r\n use_abs=use_abs))\r\n\r\n def forward(self, x):\r\n #TODO: Complete DR_Loc code\r\n outs = Munch()\r\n # SSUP\r\n B, C, H, W = x.size()\r\n outs.drloc = []\r\n outs.deltaxy = []\r\n outs.plz = []\r\n\r\n #x = self.fc(torch.flatten(self.pool(x), 1))\r\n x = self.model(x)\r\n outs.sup = x\r\n return outs","repo_name":"IemProg/MiMi","sub_path":"models/residual_resnet.py","file_name":"residual_resnet.py","file_ext":"py","file_size_in_byte":10767,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"8057856741","text":"from __future__ import print_function\n\nimport os\nimport logging\nimport scipy.io as sio\n\nimport utils\n\ndef setup_arg_parsing():\n \"\"\"\n Parse the commandline arguments\n \"\"\"\n import argparse\n from argparse import RawTextHelpFormatter\n\n parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter)\n\n parser.add_argument('--dataset', dest='dataset', required=False,\n help='The type of dataset could be (train|test|valid)')\n\n parser.add_argument('--input_mat', dest='input_path', required=True,\n help='Path to the MAT source file')\n\n parser.add_argument('--output_path', dest='output_path', required=True,\n help='Path to folder for saving generated images')\n\n parser.add_argument('--grayscale', dest='grayscale', action='store_true', required=False,\n help='Convert images to grayscale (default=%(default)s).')\n\n parser.add_argument('--logging', dest='logging', required=False,\n help='Logging level (default=%(default)s). '\n 'Options: debug, info, warning, error, critical')\n\n parser.set_defaults(grayscale=False)\n parser.set_defaults(logging='warning')\n\n return parser.parse_args()\n\n\ndef check_args(args):\n \"\"\"\n Validates the arguments\n\n :param args: The commandline arguments\n \"\"\"\n if not os.path.exists(args.input_path):\n logging.error('The input path is not valid: ' + args.input_path)\n exit(1)\n\n if not os.path.isdir(args.output_path):\n logging.error('The output path is not valid: ' + args.output_path)\n exit(1)\n\n\ndef main():\n \"\"\"\n The main scope of the preprocessor containing the high level code\n \"\"\"\n\n args = setup_arg_parsing()\n\n # Setup logging\n log_format = \"[%(filename)s:%(lineno)s - %(funcName)s() - %(levelname)s] %(message)s\"\n logging.basicConfig(format=log_format, level=utils.logger_level(args.logging))\n\n # Validate args\n check_args(args)\n\n # Load MAT source file\n try:\n data = sio.loadmat(args.input_path)\n except Exception as ex:\n logging.error('Failed to load input MAT file: ' + args.input_path)\n logging.error('Exception: %s', ex)\n exit(1)\n\n # Split data into features and labels\n features = data['X']\n labels = data['y']\n\n # Start preprocessing images\n utils.preprocess(args.dataset, features, labels, args.output_path, args.grayscale)\n\nif __name__ == '__main__':\n main()\n","repo_name":"Cerenaut/Preprocess-SVHN","sub_path":"src/svhn.py","file_name":"svhn.py","file_ext":"py","file_size_in_byte":2513,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"81"} +{"seq_id":"74245150986","text":"unidades = ('um', 'dois', 'três', 'quatro',\n 'cinco','seis','sete', 'oito', 'nove')\noutros = ('dez', 'onze', 'doze','treze','quatorze','quinze',\n 'dezesseis', 'dezeseete', 'dezoito', 'dezenove')\ndezenas = ('vinte', 'trinta', 'quarenta', 'cinquenta', 'sessenta',\n 'setenta','oitenta','noventa')\n\nnum = int(input('Digite um número de 0 à 99: '))\n\nif num > 99:\n print('Numero inválido!')\nelif 20 < num:\n print(f'{dezenas[num // 10 - 2]}'.capitalize()\n + f' e {unidades[num % 10 - 1]}.' if num % 10 else '.')\nelif num >= 10:\n print(f'{outros[num - 10]}.'.capitalize())\nelif num > 0:\n print(f'{unidades[num - 1]}.'.capitalize())\n\n","repo_name":"EderOBarreto/exercicios-python","sub_path":"ex072.py","file_name":"ex072.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1490445103","text":"import cv2\r\nfrom keras.preprocessing.image import load_img, img_to_array\r\nfrom helper import create_top_model, class_labels, target_size\r\nimport numpy as np\r\nfrom keras.applications.vgg16 import VGG16\r\nimport operator\r\nimport sys\r\nimport threading\r\nimport tensorflow as tf\r\n\r\ntf.get_logger().setLevel('ERROR')\r\n\r\nlabel = ''\r\nframe = None\r\nmodel = VGG16(include_top=False, weights='imagenet')\r\n\r\nclass MyThread(threading.Thread):\r\n def __init__(self):\r\n threading.Thread.__init__(self)\r\n\r\n def run(self):\r\n global label\r\n global frame\r\n # Loading the VGG16 network\r\n while (~(frame is None)):\r\n \r\n label = self.predict(frame)\r\n\r\n def predict(self, frame):\r\n global model\r\n \r\n image_arr = img_to_array(frame)\r\n image_arr /= 255\r\n image_arr = np.expand_dims(image_arr, axis=0)\r\n bottleneck_features = model.predict(image_arr)\r\n model1 = create_top_model(\"softmax\", bottleneck_features.shape[1:])\r\n model1.load_weights(\"./res/_top_model_weights.h5\")\r\n predicted = model1.predict(bottleneck_features)\r\n decoded_predictions = dict(zip(class_labels, predicted[0]))\r\n decoded_predictions = max(decoded_predictions.items(), key=operator.itemgetter(1))\r\n return decoded_predictions\r\n\r\ncap = cv2.VideoCapture(0)\r\nif (cap.isOpened()):\r\n print(\"Camera OK\")\r\nelse:\r\n cap.open()\r\n\r\nret, original = cap.read()\r\n\r\nframe = cv2.resize(original, (224, 224))\r\n\r\nkeras_thread = MyThread()\r\nkeras_thread.start()\r\n\r\nwhile (True):\r\n ret, original = cap.read()\r\n\r\n frame = cv2.resize(original, (224, 224))\r\n # Display the predictions\r\n \r\n cv2.putText(original, \"Label: {}\".format(label), (10,30), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (8, 5, 173), 2)\r\n cv2.imshow(\"Classification\", original)\r\n\r\n if (cv2.waitKey(1) & 0xFF == ord('q')):\r\n break;\r\n\r\ncap.release()\r\nframe = None\r\ncv2.destroyAllWindows()\r\nsys.exit()","repo_name":"BounteHunter/Distracted-Driver-Behaviour-Recognition-Using-Convolutional-Neural-Network","sub_path":"result.py","file_name":"result.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30461659021","text":"\nfrom monteur.vcs import VCSCheckout, VCS\nfrom monteur.session import MultiCommand\nfrom monteur.error import InstallationError\n\n\nclass VCSCommand(MultiCommand):\n \"\"\"Manage VCS access to the current package.\n \"\"\"\n\n def __init__(self, session):\n super(VCSCommand, self).__init__(session)\n __status__ = u\"Initializing VCS checkout\"\n setup = session.configuration['setup']\n self.repository = None\n self.checkout = None\n if 'repository' in setup:\n VCS.initialize()\n option = setup['repository']\n directory = setup['prefix_directory'].as_text()\n name = 'setup'\n if 'egginfo' in session.configuration:\n egginfo = session.configuration['egginfo']\n if 'name' in egginfo:\n name = egginfo['name']\n self.repository = option.as_words()\n self.checkout = VCSCheckout(\n name, option, self.repository, directory=directory)\n\n def do_update(self, args):\n \"\"\"Update the current package to the latest version.\n \"\"\"\n if self.checkout is not None:\n vcs = VCS(self.checkout)\n if vcs.inspect(checkout=False, update=True):\n vcs.install()\n self.session.need_reconfigure()\n else:\n raise InstallationError(\n u\"No repository is defined for the working environment.\")\n return False\n\n\n COMMANDS = MultiCommand.COMMANDS.copy()\n COMMANDS.update({\n 'update': do_update,\n None: do_update})\n","repo_name":"thefunny42/Zeam-Setup","sub_path":"src/monteur/vcs/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"16764827397","text":"from django.core.management.base import BaseCommand\nfrom django.db.utils import ProgrammingError\nimport datetime\nfrom training.models import Training\nfrom training.galaxy import disassociate_role\n\n\nclass Command(BaseCommand):\n help = \"Removes users from all expired trainings\"\n\n def add_arguments(self, parser):\n parser.add_argument(\"--commit\", action=\"store_true\")\n\n def handle(self, commit, *args, **options):\n yesterday = datetime.date.today() - datetime.timedelta(days=1)\n for event in Training.objects.filter(end__lte=yesterday):\n print(f\"Removing users from {event}\")\n if event.gdpr_clean:\n event._redact()\n\n try:\n print(\n disassociate_role(event.training_identifier.lower(), commit=commit)\n )\n except ProgrammingError as pe:\n print(pe)\n","repo_name":"galaxyproject/tiaas2","sub_path":"training/management/commands/disassociate_training_roles.py","file_name":"disassociate_training_roles.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"81"} +{"seq_id":"37225021024","text":"# The isBadVersion API is already defined for you.\n# @param version, an integer\n# @return an integer\n# def isBadVersion(version):\n\nclass Solution:\n def firstBadVersion(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n begin = 1 \n end = n\n mid = begin + end // 2\n\n while not isBadVersion(mid-1) ^ isBadVersion(mid):\n print(begin, mid, end)\n if not isBadVersion(mid):# if over shot\n print('right')\n begin = mid\n mid = (begin + end) // 2 + 1\n \n else: #if not yet reached\n print('left')\n end = mid\n mid = (begin + end) // 2\n \n return mid","repo_name":"dstadz/leetCode","sub_path":"Leetcode/may/wk1/firstVadVersion.py","file_name":"firstVadVersion.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27011516125","text":"\"\"\"\nselect + 回调 + 事件循环\n\"\"\"\n\nfrom selectors import DefaultSelector, EVENT_WRITE, EVENT_READ\nfrom urllib.parse import urlparse\nfrom socket import socket, AF_INET, SOCK_STREAM\n\n\nselector = DefaultSelector()\n\nclass Fetcher:\n def get_content(self, key):\n res = self.client.recv(1024)\n if res:\n self.data += res\n else:\n selector.unregister(key.fd)\n data = self.data.decode('utf-8')\n print(data)\n self.client.close()\n \n\n def send_content(self, key):\n selector.unregister(key.fd)\n self.client.send('GET {} HTTP/1.1\\r\\nHost:{}\\r\\nConnection:close\\r\\n\\r\\n'.format(self.path, self.host).encode('utf-8'))\n selector.register(self.client.fileno(), EVENT_READ, self.get_content)\n\n def get_html(self, url):\n url = urlparse(url)\n self.host = url.netloc\n self.path = url.path\n if self.path == '':\n self.path = '/'\n\n self.data = b''\n self.client = socket(AF_INET, SOCK_STREAM)\n self.client.setblocking(False)\n try:\n self.client.connect((self.host, 80))\n except BlockingIOError:\n pass \n # 注册这个事件,就绪后进行回调\n selector.register(self.client.fileno(), EVENT_WRITE, self.send_content)\n\ndef loop():\n while True:\n ready = selector.select()\n for key, mask in ready:\n callback = key.data\n callback(key)\n\n\nif __name__ == \"__main__\":\n f = Fetcher()\n f.get_html('http://www.baidu.com')\n loop()","repo_name":"Rockyzsu/CodePool","sub_path":"python_web_select/select_http.py","file_name":"select_http.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"81"} +{"seq_id":"7304605718","text":"#https://www.acmicpc.net/problem/11916\n#볼질 [Silver 4]\nimport sys\nfrom collections import deque\nI = sys.stdin.readline\n\nn = int(I())\nruns = 0\npitches = list(map(int,I().split()))\nrunners = deque([0,0,0])\nballCount = 0\n\ndef goToFirstBase(runners):\n result = [1,0,0]\n run = 0\n if runners[0] == 1:\n result[1] = 1\n\n if runners[1] == 1:\n result[2] = 1\n\n if runners[2] == 1:\n run = 1\n\n else:\n result[2] = runners[2]\n\n else:\n result[1] = runners[1]\n result[2] = runners[2]\n\n return deque(result),run\n\nfor pitch in pitches:\n if pitch == 1:\n ballCount += 1\n if ballCount == 4:\n runners, r = goToFirstBase(runners)\n runs += r\n ballCount = 0\n\n elif pitch == 2:\n runners, r = goToFirstBase(runners)\n runs += r\n ballCount = 0\n\n elif pitch == 3:\n ballCount += 1\n if ballCount == 4:\n runners.appendleft(1)\n runs += runners.pop()\n ballCount = 0\n\n else:\n runners.appendleft(0)\n runs += runners.pop()\n\nprint(runs)","repo_name":"dlwhd990/BOJ-2022","sub_path":"BOJ/[11916]볼질.py","file_name":"[11916]볼질.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36581618752","text":"\n\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: Chiu-Cheng-Chun\n\"\"\"\n\nfrom __future__ import division, print_function, absolute_import\n\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\n\nt0 = time.clock()\n\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=False)\n\n\"\"\"\n宣告參數\n\"\"\"\nlearning_rate = 0.005\nepoches = 3000\nexamples_to_show = 10\nbatch_size = 275\n\nn_input = 784\n\nX = tf.placeholder(\"float\", [None, n_input])\n\ninput_size = 784\noutput_size = 784\n\n#batch normalization\nscale_en = []\nshift_en = []\nscale_de = []\nshift_de = []\n\nW_en = []\nb_en = []\nW_de = []\nb_de = []\n\n\"\"\"\n定義編碼器及解碼器的神經元數目\n\"\"\"\nen_n_neurons = [input_size, 256, 128]\nde_n_neurons = [128, 256, output_size]\n\n\"\"\"\n建構權重及偏差\n歸一化參數建構\n\"\"\"\nfor i in range(0, len(en_n_neurons)-1):\n W_en.append(tf.Variable(tf.random_normal([en_n_neurons[i],en_n_neurons[i+1]])))\n b_en.append(tf.Variable(tf.zeros([en_n_neurons[i+1]])))\n W_de.append(tf.Variable(tf.random_normal([de_n_neurons[i],de_n_neurons[i+1]])))\n b_de.append(tf.Variable(tf.zeros([de_n_neurons[i+1]])))\n scale_en.append(tf.Variable(tf.ones([en_n_neurons[i+1]])))\n shift_en.append(tf.Variable(tf.zeros([en_n_neurons[i+1]])))\n scale_de.append(tf.Variable(tf.ones([de_n_neurons[i+1]])))\n shift_de.append(tf.Variable(tf.zeros([de_n_neurons[i+1]])))\n\n\"\"\"\n定義Batch Normalization\n\"\"\"\ndef Batch_norm_en(Wx_plus_b, i):\n fc_mean, fc_var = tf.nn.moments(Wx_plus_b, axes=[0, 1])\n Wx_plus_b = tf.nn.batch_normalization(Wx_plus_b, fc_mean, fc_var, shift_en[i], scale_en[i], 10**(-3))\n return Wx_plus_b\n\ndef Batch_norm_de(Wx_plus_b, i):\n fc_mean, fc_var = tf.nn.moments(Wx_plus_b, axes=[0, 1])\n Wx_plus_b = tf.nn.batch_normalization(Wx_plus_b, fc_mean, fc_var, shift_de[i], scale_de[i], 10**(-3))\n return Wx_plus_b\n\n\"\"\"\n定義編碼器\n\"\"\"\ndef encoder_model(x):\n res = x\n for i in range(0, len(en_n_neurons)-1):\n Wx_plus_b = tf.matmul(res,W_en[i]) + b_en[i]\n Wx_plus_b = Batch_norm_en(Wx_plus_b, i)\n res = tf.nn.sigmoid(Wx_plus_b)\n return res\n\n\"\"\"\n定義解碼器\n\"\"\"\ndef decoder_model(x):\n res = x\n for i in range(0, len(de_n_neurons)-1):\n Wx_plus_b = tf.matmul(res,W_de[i]) + b_de[i]\n Wx_plus_b = Batch_norm_de(Wx_plus_b, i)\n res = tf.nn.sigmoid(Wx_plus_b)\n return res\n\n\"\"\"\n定義損失函數,此為平方差代價函數\n\"\"\"\ndef Cost(x, prediction):\n square_error = tf.reduce_mean(tf.squared_difference(x, prediction))\n return square_error\n\nencoder_output = encoder_model(X)\ndecoder_output = decoder_model(encoder_output)\n\ncost = Cost(X, decoder_output)\n\ntrain = tf.train.AdamOptimizer(learning_rate).minimize(cost)\n\n###########################Train#######################################\n\nsess = tf.Session()\ninit = tf.initialize_all_variables()\nsess.run(init)\nfor i in range(epoches):\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n sess.run(train, feed_dict={X: batch_xs})\n if i % 500 == 0 or i % epoches == 0:\n print(\"Epoch:\", i,\"cost=\", sess.run(cost, feed_dict={X: batch_xs}))\n\n###########################Test#######################################\n \nencode_decode = sess.run(decoder_output, feed_dict={X: mnist.test.images[:examples_to_show]})\nf, a = plt.subplots(2, 10, figsize=(10, 2))\nfor i in range(examples_to_show):\n a[0][i].imshow(np.reshape(mnist.test.images[i], (28, 28)))#印出結果\n a[1][i].imshow(np.reshape(encode_decode[i], (28, 28)))\nplt.show()\n \nprint(\"總共費時:\", time.clock()-t0, \"秒\")\n\n\n\n\n","repo_name":"Chiu-Cheng-Chun/Neural-Network","sub_path":"Autoencoder_Tensorflow.py","file_name":"Autoencoder_Tensorflow.py","file_ext":"py","file_size_in_byte":3627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6328463990","text":"import os\r\nimport subprocess\r\nimport sys\r\nfrom pathlib import Path\r\n\r\n# you need gdal installed, but for this script to use some extra features,\r\n# you might want to have gdal python bindings installed, but not needed for the merge\r\nfrom osgeo import gdal\r\n\r\n# this allows GDAL to throw Python Exceptions, not needed for the merge\r\ngdal.UseExceptions()\r\n\r\nfilelist = []\r\n\r\nfor filename in os.listdir('.'):\r\n # go into the folders starting with ... to find the TIFFs\r\n if filename.startswith(\"ESM2012_Rel2017\"):\r\n\r\n # it's very specific use case here, these nested folders\r\n tifval = filename.replace('ESM2012_Rel2017_', '')\r\n longpath = os.path.join(filename, tifval)\r\n thistif = os.path.join(longpath, tifval + '.TIF')\r\n try:\r\n Path(thistif).resolve()\r\n except FileNotFoundError:\r\n sys.exit(thistif + ' doesnt exist?')\r\n\r\n else:\r\n # if you want to check if the file can be opened by gdal, not needed for the merge\r\n # raster = gdal.Open(thistif)\r\n filelist.append(thistif)\r\n else:\r\n print(\"skipping file: \" + filename)\r\n\r\nfor filename in filelist:\r\n print(\"using file: \" + filename)\r\n subprocess.run([\"/usr/bin/gdalinfo\", filename])\r\n\r\n# instead of copying uncompressing memory-crashing working with big geotiffs directly,\r\n# you can build a virtual geotiff by linking them together\r\n# the VRT file is only an XML description of the linkage and some parameters\r\nvrt_cmd = [\"/usr/bin/gdalbuildvrt\", \"-vrtnodata\", \"256\", \"-r\", \"nearest\", \"vrt_merged_raster.vrt\"]\r\nvrt_build = vrt_cmd + filelist\r\n\r\nprint(\"starting cmd 1: {}\".format(\" \".join(vrt_build)))\r\n# subprocess.run(vrt_build)\r\n\r\n# this virtual linkage file can then serve as input for gdal tools, like gdalwarp, gdal_translate etc\r\n# here we just\r\n# -expand gray ; -r nearest; -colorinterp gray .. >= 2.3-stats; \"-co\", \"USE_TIF_OVR=TRUE\"?\r\ngdaltranslate_cmd = [\"/usr/bin/gdal_translate\", \"-ot\", \"UInt16\", \"-of\", \"Gtiff\",\r\n \"-r\", \"nearest\", \r\n \"-mo\", \"DataRepresentation=THEMATIC\", \"-mo\", \"DataType=Thematic\",\r\n \"-mo\", \"BandName=Band_1\", \r\n \"-co\", \"BIGTIFF=YES\", \"-co\", \"COMPRESS=LZW\",\r\n \"-co\", \"GDAL_CACHEMAX=16240\", \"-co\", \"GDAL_NUM_TRHEADS=4\", \"-co\", \"TFW=YES\",\r\n \"-stats\", \"vrt_merged_raster.vrt\", \"spain_merged.tif\"]\r\nprint(\"starting cmd 2: {}\".format(\" \".join(gdaltranslate_cmd)))\r\n# subprocess.run(gdaltranslate_cmd)\r\n\r\n# inspired by https://www.northrivergeographic.com/archives/pyramid-layers-qgis-arcgis\r\n# \"-minsize\", \"128\", is for gdal >= 2.3 not here\r\ngdaladdo_cmd = [\"/usr/bin/gdaladdo\", \"-r\", \"nearest\", \"-ro\", \"--config\", \"COMPRESS_OVERVIEW\", \"LZW\", \"--config\",\r\n \"BIGTIFF_OVERVIEW\", \"IF_SAFER\", \"--config\", \"USE_RRD\", \"NO\", \"--config\", \"GDAL_NUM_TRHEADS\", \"4\",\r\n \"--config\", \"TILED\", \"YES\", \"--config\", \"GDAL_CACHEMAX\", \"16240\", \r\n \"spain_merged.tif\", \"2\", \"4\", \"8\", \"16\", \"32\", \"64\", \"128\", \"256\", \"512\", \"1024\", \"2048\" ,\"4096\"]\r\nprint(\"starting cmd 3: {}\".format(\" \".join(gdaladdo_cmd)))\r\n# subprocess.run(gdaladdo_cmd)\r\n\r\ngdalstats_cmd = [\"/usr/bin/gdalinfo\", \"-stats\", \"-hist\", \"spain_merged.tif\"]\r\nprint(\"starting cmd 4: {}\".format(\" \".join(gdalstats_cmd)))\r\n# subprocess.run(gdalstats_cmd)\r\n\r\nprint(\"done\")\r\n","repo_name":"allixender/random_raster_gdal_processing_scripts","sub_path":"gdal_merge_virtual_and_pyramids.py","file_name":"gdal_merge_virtual_and_pyramids.py","file_ext":"py","file_size_in_byte":3379,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"37432699867","text":"import re\nimport ast\nimport functools\nimport logging\nimport json\nfrom datetime import datetime, date\nimport pytz\nfrom dateutil.relativedelta import *\nimport werkzeug.wrappers\nfrom odoo.exceptions import AccessError\nfrom odoo.addons.jakc_redemption_api.common import invalid_response, valid_response\nfrom odoo.tools import DEFAULT_SERVER_DATE_FORMAT as DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT as DATETIME_FORMAT\nfrom odoo import http\n\nfrom odoo.addons.jakc_redemption_api.common import (\n extract_arguments,\n invalid_response,\n valid_response,\n)\n\nfrom odoo.http import request\n\n_logger = logging.getLogger(__name__)\n\nexpires_in = \"jakc_redemption_api.access_token_expires_in\"\n\n# import library\nimport math, random\n\n\nclass RdmOTPController(http.Controller):\n \"\"\".\"\"\"\n\n def __init__(self):\n\n self._token = request.env[\"api.access_token\"]\n self._expires_in = request.env.ref(expires_in).sudo().value\n\n @http.route(\"/api/rdm/v1.0/resend_otp\", type=\"http\", auth=\"none\", methods=[\"POST\"], csrf=False)\n def resendotp(self, **post):\n\n customer_id = post['customer_id'] or False if 'customer_id' in post else False\n\n _fields_includes_in_body = all([\n customer_id,\n ])\n\n customer_id = int(customer_id)\n\n if not _fields_includes_in_body:\n data = {\n \"status\": False,\n \"message\": \"Missing fields\",\n }\n return valid_response(data)\n\n # Declare a digits variable\n # which stores all digits\n digits = \"0123456789\"\n OTP = \"\"\n\n # length of password can be chaged\n # by changing value in range\n for i in range(4):\n OTP += digits[math.floor(random.random() * 10)]\n\n utc_now = pytz.utc.localize(datetime.utcnow())\n date_now = utc_now.astimezone(pytz.timezone(\"Asia/Jakarta\"))\n\n expired_date_otp = date_now + relativedelta(hours=+1)\n\n _logger.info(date_now)\n _logger.info(expired_date_otp)\n\n vals = {}\n vals.update({'rdm_customer_id': customer_id})\n vals.update({'otp_code': OTP})\n vals.update({'expired': expired_date_otp})\n vals.update({'create_date': date_now})\n rdm_customer_otp_obj = http.request.env['rdm.customer.otp'].sudo().create(vals)\n\n if not rdm_customer_otp_obj:\n data = {\n \"status\": False,\n \"message\": \"Can not create OTP Code\",\n }\n return valid_response(data)\n\n data = {\n \"status\": True,\n \"message\": \"Resend OTP Successfully\",\n }\n return valid_response(data)\n\n\n @http.route(\"/api/rdm/v1.0/check_otp\", type=\"http\", auth=\"none\", methods=[\"POST\"], csrf=False)\n def checkotp(self, **post):\n\n customer_id = post['customer_id'] or False if 'customer_id' in post else False\n code_otp = post['code_otp'] or False if 'code_otp' in post else False\n\n _fields_includes_in_body = all([\n customer_id,\n code_otp,\n ])\n\n customer_id = int(customer_id)\n\n\n if not _fields_includes_in_body:\n data = {\n \"status\": False,\n \"message\": \"Missing fields\",\n }\n return valid_response(data)\n\n # domain = [\n # ('rdm_customer_id','=', int(customer_id)),\n # ]\n #\n # rdm_customer_otp_ids = http.request.env['rdm.customer.otp'].sudo().search(domain, order=\"create_date DESC\", limit=1)\n #\n # if not rdm_customer_otp_ids:\n # data = {\n # \"status\": False,\n # \"message\": \"Missing fields\",\n # \"data\": []\n # }\n # return valid_response(data)\n #\n # utc_now = pytz.utc.localize(datetime.utcnow())\n # date_now = utc_now.astimezone(pytz.timezone(\"Asia/Jakarta\"))\n #\n # date_now = date_now.strftime(\"%Y-%m-%d %H:%M:%S\")\n # expired_date = rdm_customer_otp_ids.expired.strftime(\"%Y-%m-%d %H:%M:%S\")\n # otp_code = rdm_customer_otp_ids.otp_code\n # _logger.info(date_now)\n # _logger.info(expired_date)\n # _logger.info(otp_code)\n #\n # if expired_date <= date_now:\n # data = {\n # \"status\": False,\n # \"message\": \"Code OTP Expired\",\n # \"data\": []\n # }\n # return valid_response(data)\n\n if \"1111\" != code_otp: # rdm_customer_otp_ids.otp_code --> dummy OTP 1111\n data = {\n \"status\": False,\n \"message\": \"Code OTP not Match\",\n }\n return valid_response(data)\n\n # data = {\n # \"status\": True,\n # \"message\": \"OTP Successfully\",\n # \"data\": []\n # }\n # return valid_response(data)\n\n # rdm_customer_obj = http.request.env['rdm.customer'].browse(customer_id)\n # rdm_customer_obj.sudo().write({\n # 'state': \"active\",\n # })\n\n _token = request.env[\"api.access_token\"]\n # Generate tokens\n access_token = _token.find_one_or_create_token(customer_id=customer_id, create=True)\n # Successful response:\n return werkzeug.wrappers.Response(\n status=200,\n content_type=\"application/json; charset=utf-8\",\n headers=[(\"Cache-Control\", \"no-store\"), (\"Pragma\", \"no-cache\")],\n response=json.dumps(\n {\n \"status\": True,\n \"message\": \"\",\n \"data\": {\n \"id\": customer_id,\n \"access_token\": access_token,\n \"expires_in\": self._expires_in,\n }\n }\n ),\n )","repo_name":"yogiabdulyusuf/rdm-dev","sub_path":"rdm/jakc_redemption_api/controllers/otp.py","file_name":"otp.py","file_ext":"py","file_size_in_byte":5797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8416167608","text":"# pylint: disable=missing-docstring\nfrom _asyncio import Future\nfrom . service import _HeosService, HeosError\n\n# pylint: disable=too-many-public-methods\nclass _HeosPlayer(_HeosService):\n prefix = \"player\"\n\n #\n # aid enumeration for 'add_to_queue'\n #\n\n PLAY_NOW = 1\n PLAY_NEXT = 2\n ADD_TO_END = 3\n REPLACE_AND_PLAY = 4\n\n def __init__(self, protocol, player_id=None):\n super().__init__(protocol)\n self.play_states = {\"stop\", \"pause\", \"play\"}\n self.mute_states = {\"on\", \"off\"}\n self.repeat_states = {\"on_all\", \"on_one\", \"off\"}\n self.shuffle_states = {\"on\", \"off\"}\n self._pid = player_id\n\n def pid(self):\n return self._pid\n\n async def get_players(self) -> Future:\n return await self._run(\n \"get_players\",\n )\n\n async def get_player_info(self) -> Future:\n\n return await self._run(\n \"get_player_info\",\n pid=self._pid\n )\n\n async def get_play_state(self) -> Future:\n return await self._run(\n \"get_play_state\",\n pid=self._pid\n )\n\n async def set_play_state(self, state=\"stop\") -> Future:\n if state not in self.play_states:\n raise ValueError(f\"Play state must be one of {self.play_states} instead of {state}\")\n\n return await self._run(\n \"set_play_state\",\n pid=self._pid,\n state=state\n )\n\n async def get_now_playing_media(self) -> Future:\n return await self._run(\n \"get_now_playing_media\",\n pid=self._pid\n )\n\n async def get_volume(self) -> Future:\n return await self._run(\n \"get_volume\",\n pid=self._pid\n )\n\n async def set_volume(self, level=0) -> Future:\n return await self._run(\n \"set_volume\",\n pid=self._pid,\n level=level,\n )\n\n async def volume_up(self, step=5) -> Future:\n return await self._run(\n \"volume_up\",\n pid=self._pid,\n step=step\n )\n\n async def volume_down(self, step=5) -> Future:\n return await self._run(\n \"volume_down\",\n pid=self._pid,\n step=step\n )\n\n async def get_mute(self) -> Future:\n return await self._run(\n \"get_mute\",\n pid=self._pid\n )\n\n async def set_mute(self, state=\"on\") -> Future:\n if state not in self.mute_states:\n raise ValueError(f\"Mute state must be one of {self.mute_states} instead of {state}\")\n\n return await self._run(\n \"set_mute\",\n pid=self._pid,\n state=state\n )\n\n async def toggle_mute(self) -> Future:\n return await self._run(\n \"toggle_mute\",\n pid=self._pid\n )\n\n async def get_play_mode(self) -> Future:\n return await self._run(\n \"get_play_mode\",\n pid=self._pid\n )\n\n async def set_play_mode(self, repeat=\"off\", shuffle=\"off\") -> Future:\n if repeat not in self.repeat_states:\n raise ValueError(\n f\"Repeat state must be one of {self.repeat_states} instead of {repeat}\"\n )\n\n if shuffle not in self.shuffle_states:\n raise ValueError(\n f\"Shuffle state must be one of {self.shuffle_states} instead of {shuffle}\"\n )\n\n return await self._run(\n \"set_play_mode\",\n pid=self._pid,\n repeat=repeat,\n shuffle=shuffle\n )\n\n async def get_queue(self, _range=None) -> Future:\n arguments = dict(pid=self._pid)\n if _range:\n arguments[\"range\"] = _range\n\n return await self._run(\n \"get_queue\",\n **arguments\n )\n\n\n\n async def play_queue(self, qid=1) -> Future:\n return await self._run(\n \"play_queue\",\n pid=self._pid,\n qid=qid\n )\n\n async def remove_from_queue(self, qid) -> Future:\n if not isinstance(qid, list):\n qid = [qid]\n\n return await self._run(\n \"remove_from_queue\",\n pid=self._pid,\n qid=\",\".join(map(str, qid))\n )\n\n #\n # this doesn't seem to be working at the moment!\n #\n\n async def save_queue(self, name: str) -> Future:\n if not name or len(name) > 128:\n raise ValueError(\"The playlist name must be less than 128 characters\")\n\n return await self._run(\n \"save_queue\",\n pid=self._pid,\n name=name\n )\n\n async def clear_queue(self) -> Future:\n try:\n await self._run(\n \"clear_queue\",\n pid=self._pid,\n )\n except HeosError as error:\n if error.error_id == 4:\n return\n raise\n\n async def play_next(self) -> Future:\n return await self._run(\n \"play_next\",\n pid=self._pid\n )\n\n async def play_previous(self) -> Future:\n return await self._run(\n \"play_previous\",\n pid=self._pid\n )\n\n async def play_stream(self, sid, cid, mid, name):\n return await self._run(\n \"play_stream\",\n sid=sid,\n cid=cid,\n mid=mid,\n pid=self._pid,\n name=name\n )\n","repo_name":"paulhoule/tentacruel","sub_path":"tentacruel/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":5368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34051652236","text":"#!/usr/bin/env python2\n\"\"\"Main function for PyWall.\"\"\"\n\nfrom __future__ import print_function\nimport multiprocessing as mp\nimport logging\nimport argparse\n\nimport config\nimport egress\nimport contrack\nfrom py_log import initialize_logging, log_server\n\n\ndef run_pywall(conf, packet_queue, query_pipe, kwargs):\n \"\"\"Utility function to run PyWall. (target function for the Process)\n\n Run PyWall with a configuration file, as well as the queue for reporting\n TCP packets, and the query pipe for querying state. KWargs are passed to\n the erect() function of PyWall.\n\n \"\"\"\n # Get logging information from the kwargs, so we can setup logging.\n logqueue = kwargs.pop('logqueue', mp.Queue())\n loglevel = kwargs.pop('loglevel', logging.INFO)\n initialize_logging(loglevel, logqueue)\n\n cfg = config.PyWallConfig(conf)\n the_wall = cfg.create_pywall(packet_queue, query_pipe)\n the_wall.erect(**kwargs)\n\n\ndef run_egress(packet_queue, loglevel, logqueue):\n \"\"\"Utility function to run the egress function. (target of Process)\n\n Given the queue to report TCP connections, as well as logging variables,\n run the egress monitor.\n\n \"\"\"\n initialize_logging(loglevel, logqueue)\n ct = egress.PyWallEgress(packet_queue)\n ct.run()\n\n\ndef main(conf, loglevel, filename, **kwargs):\n \"\"\"Main function of the whole program.\n\n Runs a PyWall given a configuration file, a loglevel, and a filename. This\n spawns three processes (log_process, egress_process, and pywall_process).\n It then runs the connection tracker on thes process (the \"master process\").\n\n \"\"\"\n # Create multiprocessing queues for IPC.\n egress_queue = mp.Queue()\n ingress_queue = mp.Queue()\n log_queue = mp.Queue()\n query_pywall, query_contrack = mp.Pipe()\n kwargs['loglevel'] = loglevel\n kwargs['logqueue'] = log_queue\n\n # Start logging for the connection tracker.\n initialize_logging(loglevel, log_queue)\n\n # Initialize the connection tracker with the IPC channels.\n ct = contrack.PyWallCracker(ingress_queue, egress_queue, query_contrack)\n\n # Create and start log_process.\n log_process = mp.Process(target=log_server, args=(loglevel, log_queue,\n filename))\n log_process.start()\n\n # Create and start egress_process.\n egress_process = mp.Process(target=run_egress, args=(egress_queue,\n loglevel, log_queue))\n egress_process.start()\n\n # Create and start PyWall process.\n pywall_process = mp.Process(target=run_pywall, args=(conf, ingress_queue,\n query_pywall, kwargs))\n pywall_process.start()\n\n # Run the connection tracker on the \"master process.\"\n ct.run()\n\n\nif __name__ == '__main__':\n # This is run if main.py is executed. Gets arguments from the command line.\n parser = argparse.ArgumentParser(description='Build a PyWall')\n parser.add_argument('config', help='JSON configuration file')\n parser.add_argument('-l', '--log-level', choices=['DEBUG', 'INFO',\n 'WARNING', 'ERROR',\n 'CRITICAL'],\n help='set verbosity of logging', default='INFO')\n parser.add_argument('-f', '--log-file', help='set log file', default=None)\n args = parser.parse_args()\n main(args.config, args.log_level, args.log_file)\n","repo_name":"brenns10/pywall","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3487,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"81"} +{"seq_id":"69817175627","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n'''\n File:\tScatter_plot.py\n Author:\tShixu He\n Email:\theshixu@genomics.cn\n Date:\t2019-08-23\n ------\n Plot for SOAPMetaS\n ------\n Version:\n'''\n\n#import numpy\n\n\nimport numpy as np\nfrom numpy.polynomial.polynomial import polyfit\nfrom matplotlib.patches import Rectangle\nimport scipy.stats\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport math\n\nimport sys\nimport re\nimport argparse\n\ndef plotTest():\n # Sample data\n x = -np.random.rand(20)\n y = 4*x + 0.01\n p = list(x)#.extend([0,0,0,0.1235,0.16153,0.8891])\n q = list(y)#.extend([0.1235,0.16153,0.8891,0,0,0])\n p.extend([0,0,0,0.1235,0.16153,0.8891])\n q.extend([0.1235,0.16153,0.8891,0,0,0])\n # Fit with polyfit\n b, m = polyfit(x, y, 1)\n plt.title(\"TEST plot\")\n plt.xlabel(\"xxxx\")\n plt.ylabel(\"yyy\")\n x_max = max(p)\n y_max = max(q)\n y_min = min(q)\n l1norm_all = 0.1234\n l1norm_valid = 0.2345\n plt.text(x_max, y_max, \"l1norm(all): {0:.4}\".format(l1norm_all), {\"ha\": \"right\", \"va\": \"top\"})\n plt.text(x_max, y_max-(y_max-y_min)*0.1, \"l1norm(valid): {0:.4}\".format(l1norm_valid), {\"ha\": \"right\", \"va\": \"top\"})\n plt.grid(True)\n plt.plot(p, q, '.', color=\"red\")\n plt.plot(x, b + m * x, '-', color=\"grey\")\n #plt.savefig(\"output.pdf\", quality=100, format=\"pdf\")\n plt.show()\n\ndef readGeneProfiling(abundanceFile, colN, mod):\n taxIDAbunDict = dict()\n if mod.startswith(\"simple\"):\n with open(abundanceFile, 'rt', encoding='utf-8') as _abunf:\n if colN == 2:\n while True:\n lines = _abunf.readlines(65535)\n if not lines:\n break\n for line in lines:\n lineEle = re.split(r\"\\t\", line.rstrip())\n taxIDAbunDict[lineEle[0]] = float(lineEle[1])\n elif colN == 5:\n while True:\n lines = _abunf.readlines(65535)\n if not lines:\n break\n for line in lines:\n lineEle = re.split(r\"\\t\", line.rstrip())\n taxIDAbunDict[lineEle[0]] = float(lineEle[4])\n else:\n cn = colN - 1\n while True:\n lines = _abunf.readlines(65535)\n if not lines:\n break\n for line in lines:\n lineEle = re.split(r\"\\t\", line.rstrip())\n taxIDAbunDict[lineEle[0]] = float(lineEle[cn])\n elif mod.startswith(\"cami\"):\n with open(abundanceFile, 'rt', encoding='utf-8') as _abunf:\n while True:\n lastPos = _abunf.tell()\n line = _abunf.readline()\n if line.startswith(\"@\"):\n continue\n if line.startswith(\"#\"):\n continue\n if len(re.split(r\"\\t\", line.rstrip())) < 4:\n continue\n _abunf.seek(lastPos)\n break\n while True:\n lines = _abunf.readlines(65535)\n if not lines:\n break\n for line in lines:\n lineEle = re.split(r\"\\t\", line.rstrip())\n try:\n if (lineEle[1] == \"species\"):\n taxIDAbunDict[lineEle[0]] = float(lineEle[4])\n except IndexError as e:\n print(e + \" . Current line: \" + str(lineEle))\n return taxIDAbunDict\n\ndef drawLine(title=\"Sample Plot\", xAbunDict=dict(), xLabel=\"x\", yAbunDict=dict(), yLabel=\"y\", pic_format=\"pdf\", outFile = \"SOAPMetas_Scatter_plot_sample.pdf\", doLog=False, doReg=False):\n # all values for draw dots\n #x_values = []\n #y_values = []\n xScaleNum = 1.0\n yScaleNum = 1.0\n l1norm_all = 0\n l1norm_valid = 0\n if (sum(xAbunDict.values()) > 1 ):\n xScaleNum = 100.0\n if (sum(yAbunDict.values()) > 1 ):\n yScaleNum = 100.0\n\n # common values for plot fit\n x_common = [] \n y_common = []\n\n # uncommon values\n x_uncon = []\n y_uncon = []\n\n taxSet = set(xAbunDict.keys()).union(yAbunDict.keys())\n if doLog:\n for tax in taxSet:\n x0 = xAbunDict.get(tax, 0.0)/xScaleNum\n y0 = yAbunDict.get(tax, 0.0)/yScaleNum\n l1norm_all += abs(x0-y0)\n #x_values.append(x0)\n #y_values.append(y0)\n if (x0>0 and y0>0):\n l1norm_valid += abs(x0-y0)\n x_common.append(math.log10(x0))\n y_common.append(math.log10(y0))\n elif (x0>0 or y0>0):\n x_uncon.append(math.log10(x0) if x0>0 else 0)\n y_uncon.append(math.log10(y0) if y0>0 else 0)\n else:\n continue\n else:\n for tax in taxSet:\n x0 = xAbunDict.get(tax, 0.0)/xScaleNum\n y0 = yAbunDict.get(tax, 0.0)/yScaleNum\n l1norm_all += abs(x0-y0)\n #x_values.append(x0)\n #y_values.append(y0)\n if (x0>0 and y0>0):\n l1norm_valid += abs(x0-y0)\n x_common.append(x0)\n y_common.append(y0)\n elif (x0>0 or y0>0):\n x_uncon.append(x0)\n y_uncon.append(y0)\n else:\n continue\n \n c_0, c_1 = polyfit(x_common, y_common, 1)\n slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(x_common, y_common)\n #x_max = max(max(x_common) if len(x_common) > 0 else 0, max(x_uncon) if len(x_uncon) > 0 else 0)\n #y_min = min(min(y_common) if len(y_common) > 0 else 0, min(y_uncon) if len(y_uncon) > 0 else 0, min(x_common) if len(x_common) > 0 else 0, min(x_uncon) if len(x_uncon) > 0 else 0)\n #plt.text(x_max, y_min*0.86, \"Proximity (all): {0:.4}\".format(2-l1norm_all), {\"ha\": \"right\", \"va\": \"top\"})\n #plt.text(x_max, y_min*0.9, \"Proximity (valid): {0:.4}\".format(2-l1norm_valid), {\"ha\": \"right\", \"va\": \"top\"})\n\n\n fig, axes = plt.subplots(1, 1)\n if title != None:\n plt.title(title)\n if doLog:\n plt.xlabel(xLabel+ \" (log)\")\n plt.ylabel(yLabel+ \" (log)\")\n else:\n plt.xlabel(xLabel)\n plt.ylabel(yLabel)\n\n if doReg:\n fig.text(0.90, 0.85, \"r^2 = {0:.4}\".format(r_value**2), ha=\"right\")\n fig.text(0.90, 0.80, \"p = {0:.4}\".format(p_value), ha=\"right\")\n fig.text(0.90, 0.75, \"slope = {0:.4}\".format(slope), ha=\"right\")\n fig.text(0.90, 0.70, \"intercept = {0:.4}\".format(intercept), ha=\"right\")\n axes.grid(True)\n axes.plot(x_uncon, y_uncon, \".\", color=\"gray\")\n axes.plot(x_common, y_common, '.', color=\"orchid\")\n xc = np.asarray(x_common)\n plt.plot(xc, intercept + slope * xc, '-', color=\"tomato\")\n #axes.plot(xc, xc, \"-\", color=\"tomato\")\n else:\n axes.set_xlim(-6, 1)\n axes.set_ylim(-6, 1)\n dots1, = axes.plot(x_uncon, y_uncon, \"x\", color=\"gray\")\n dots2, = axes.plot(x_common, y_common, 's', color=\"red\")\n xc = np.asarray(x_common)\n #plt.plot(xc, c_0 + c_1 * xc, '-', color=\"tomato\")\n line1 = axes.plot(xc, xc, \"-\", color=\"black\", linewidth=1)\n extra1 = Rectangle((0, 0), 1, 1, fc=\"w\", fill=False, edgecolor='none', linewidth=0)\n extra2 = Rectangle((0, 0), 1, 1, fc=\"w\", fill=False, edgecolor='none', linewidth=0)\n axes.legend([dots1, dots2, extra1, extra2], (\"Uncommon\", \"Common\", \"l1norm (all): {0:.4}\".format(l1norm_all), \"l1norm (>0): {0:.4}\".format(l1norm_valid)), loc = 1)\n \n #plt.axis(\"equal\")\n #fig.tight_layout()\n plt.tight_layout()\n plt.savefig(outFile, quality=100, format=pic_format)\n #plt.show()\n\ndef checkArgv():\n '''Parse the command line parameters.'''\n _parser = argparse.ArgumentParser(description=\"The script is used for abundance calculation from alignment of bowtie/bwa.\")\n \n _parser.add_argument(\n \"--title\",\n required=False,\n default=\"Sample Plot\",\n help=\"Plot title.\"\n )\n\n _parser.add_argument(\n \"--xlabel\",\n required=False,\n default=\"x label\",\n help=\"Lagend of axis-x.\"\n )\n\n _parser.add_argument(\n \"--ylabel\",\n required=False,\n default=\"y label\",\n help=\"Lagend of axis-y.\"\n )\n \n _parser.add_argument(\n \"--output\",\n required=False,\n default=\"./SOAPMetas_Scatter_plot_sample.pdf\",\n help=\"Output plot file name. (pdf format)\"\n )\n\n _parser.add_argument(\n \"--input-abun-x\",\n required=True,\n help=\"CAMI format abundance file, the data is used for x-coordinate.\"\n )\n\n _parser.add_argument(\n \"--input-abun-y\",\n required=True,\n help=\"CAMI format abundance file, the data is used for y-coordinate.\"\n )\n\n _parser.add_argument(\n \"--pic-format\",\n required=False,\n default=\"pdf\",\n help=\"Plot format. [pdf, png, jpg, ...]\"\n )\n\n _parser.add_argument(\n \"--log-norm\",\n action=\"store_true\",\n help=\"whether calculate the log value\"\n )\n\n _parser.add_argument(\n \"--abun-col\",\n type=int,\n default=2,\n help=\"The abundance column in .abundance file. 2 for SOAPMetaS default output, 5 for SOAOPMetaS detailed output\"\n )\n\n _parser.add_argument(\n \"--regression\",\n action=\"store_true\",\n help=\"Do regression analysis, and the straight line will be fitted line.\"\n )\n\n _parser.add_argument(\n \"--mode\",\n required=False,\n type=str,\n default=\"simple\",\n choices=[\"cami\",\"simple\"],\n help=\"species or gene abundance\"\n )\n\n\n return _parser.parse_args()\n\ndef main():\n args = checkArgv()\n #plotTest()\n xInputFile = args.input_abun_x\n yInputFile = args.input_abun_y\n xAbunDict = readGeneProfiling(xInputFile, args.abun_col, args.mode)\n yAbunDict = readGeneProfiling(yInputFile, args.abun_col, args.mode)\n drawLine(title=args.title, xAbunDict = xAbunDict, xLabel=args.xlabel, yAbunDict = yAbunDict, yLabel=args.ylabel, pic_format=args.pic_format, outFile=args.output, doLog =args.log_norm, doReg=args.regression)\n\n\nif __name__ == \"__main__\":\n #plotTest()\n main()\n","repo_name":"BGI-flexlab/SOAPMetaS","sub_path":"utils/Scatter_plot_gene.py","file_name":"Scatter_plot_gene.py","file_ext":"py","file_size_in_byte":10325,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"81"} +{"seq_id":"38322377020","text":"import csv\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nrace_data = np.array([])\nwith open('./f1db_csv/lap_times.csv',newline='') as csvfile:\n\tnext(csvfile)\n\treader = csv.reader(csvfile,delimiter=',')\n\tfor row in reader:\n\t\tif int(row[0])==1034:\n\t\t\trowData = np.array(row[1:4]+[row[-1]]).astype(int)\n\t\t\tif len(race_data) == 0:\n\t\t\t\trace_data = rowData\n\t\t\t\trace_data = np.reshape(race_data,(1, race_data.size))\n\t\t\telse:\n\t\t\t\trace_data = np.vstack([race_data,rowData])\n\ndrivers = np.unique(race_data[:,0])\ndrivernames = np.empty(len(drivers),dtype='U50')\n\nwith open('./f1db_csv/drivers.csv',newline='') as csvfile:\n\tnext(csvfile)\n\treader = csv.reader(csvfile,delimiter=',')\n\tfor row in reader:\n\t\tfor i in range(len(drivers)):\n\t\t\tif int(row[0])==drivers[i]:\n\t\t\t\tdrivernames[i]=row[1]\n\ndef getDriverLapData(race_data,driver):\n\tdata = race_data[race_data[:,0]==driver]\n\treturn data\n\ndef getDriverDelta(race_data,driver1,driver2):\n\tdata1 = getDriverLapData(race_data,driver1)\n\tdata2 = getDriverLapData(race_data,driver2)\n\n\tdelta = (data1[:,3]-data2[:,3])/1000\n\treturn delta\n\nfor i in range(len(drivers)):\n\tif drivernames[i]=='albon' or drivernames[i]=='max_verstappen' or drivernames[i]=='leclerc':\n\t\tdata = getDriverLapData(race_data,drivers[i])\n\n\t\tplt.plot(data[:,1],data[:,3]/1000,label=drivernames[i])\nplt.legend()\nplt.ylim(88,95)\nplt.show()\n\nprint(drivernames)\n# delta = getDriverDelta(race_data,1,830)\n# plt.plot(delta)\n# plt.ylim()\n# plt.show()\n\n","repo_name":"abcdefg781/f1.github.io","sub_path":"race_viewer.py","file_name":"race_viewer.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16036695420","text":"def fibonacci(n):\n '''Calculate the nth fibonacci number using recursion\n\n Examples\n --------\n >>> fibonacci(7)\n 13\n '''\n\n if n < 0:\n raise ValueError(f'n must be >= 0, got {n}')\n\n if n == 0:\n return 0\n\n if n == 1:\n return 1\n\n return fibonacci(n - 1) + fibonacci(n - 2)\n","repo_name":"escape2020/school2021","sub_path":"testing/fibonacci/fibonacci/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","stars":246,"dataset":"github-code","pt":"81"} +{"seq_id":"43443526307","text":"class Page():\n def __init__(self,page_num,total_count,url_prefix,per_page=10,max_page=11):\n '''\n\n :param page_num: 当前页\n :param total_count: 总数据\n :param url_prefix: url标识\n :param per_page: 每一页展示的数据\n :param max_page: 每一页允许展示的最大页码数\n '''\n self.url_prefix = url_prefix\n self.max_page = max_page\n # 计算总页数\n page_count, m = divmod(total_count, per_page)\n if m:\n page_count += 1\n self.page_count = page_count\n # 用户输入错误时的解决办法\n try:\n page_num = int(page_num)\n # 当用户输入的页数超过最大页数时,返回最后一页\n if page_num > page_count:\n page_num = page_count\n # 当输入数据小于1时:\n if page_num < 1:\n page_num = 1\n except Exception as e:\n # 当输入的页码不是正常的数字时\n page_num = 1\n self.page_num = page_num\n # 每一页的数据起始和结束\n self.data_start = (page_num - 1) * 10\n self.data_end = page_num * 10\n # 对称两边的页数\n half_page = self.max_page // 2\n # 起始和结束页码\n start_page = page_num - half_page\n end_page = page_num + half_page\n # 判断总页数小于per_page的情况\n if page_count < self.max_page:\n self.max_page = page_count\n # 如果起始页比1还小\n if start_page < 1:\n start_page = 1\n end_page = self.max_page\n # 如果尾页比总页数还要大\n if end_page > page_count:\n end_page = page_count\n start_page = end_page - self.max_page + 1\n self.start_page =start_page\n self.end_page =end_page\n\n @property\n def start(self):\n return self.data_start\n\n @property\n def end(self):\n return self.data_end\n\n def page_html(self):\n # 自己拼接分页的html代码\n html_str_list = []\n # 添加首页页码\n html_str_list.append('
  • 首页
  • '.format(self.url_prefix))\n # 添加上一页功能\n if self.page_num <= 1:\n html_str_list.append(\n '
  • «
  • ')\n else:\n html_str_list.append(\n '
  • «
  • '.format(\n self.page_num - 1))\n\n for i in range(self.start_page, self.end_page + 1):\n if i == self.page_num:\n tmp = '
  • {1}
  • '.format(self.url_prefix,i)\n else:\n tmp = '
  • {1}
  • '.format(self.url_prefix,i)\n html_str_list.append(tmp)\n\n # 添加下一页功能\n if self.page_num >= self.page_count:\n html_str_list.append(\n '
  • »
  • ')\n else:\n html_str_list.append(\n '
  • »
  • '.format(\n self.page_num + 1))\n # 添加尾页页码\n html_str_list.append('
  • 尾页
  • '.format(self.url_prefix,self.page_count))\n page_html = ''.join(html_str_list)\n return page_html","repo_name":"DCooooo/python_django","sub_path":"day71_page/utils/pages.py","file_name":"pages.py","file_ext":"py","file_size_in_byte":3662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10047760122","text":"from typing import Any\nfrom unittest import mock\nimport os\nimport asyncio\nimport time\n\nimport pytest\n\nfrom meeshkan.core.tracker import TrackerBase, TrackingPoller\nfrom meeshkan.core.job import Job\nimport meeshkan.exceptions\n\n\ndef test_tracker_history():\n tb = TrackerBase()\n scalar_name = \"tracked_value\"\n tracked_value = 0\n tb.add_tracked(scalar_name, tracked_value) # Test adding (integer) values\n tracked_value += 1e-7\n tb.add_tracked(scalar_name, tracked_value) # Test adding scientic notation\n history = tb._history_by_scalar\n assert len(history) == 1, \"There should only be one scalar value tracked\" # Number of value_names tracked\n assert scalar_name in history , \"The scalar history must contain the scalar name\" # Keeps correct naming\n history = history[\"tracked_value\"]\n assert len(history) == 2, \"There have been two reports for this scalar!\" # Correct number of values tracked\n assert history[0].value == 0, \"First reported value was zero\"\n assert history[1].value == 1e-7, \"Second report valued was 1e-7\"\n\n tb.add_tracked(\"another value\", -2.3) # Checks multiple value names\n assert len(tb._history_by_scalar) == 2, \"There are now two reported scalars\"\n assert tb._history_by_scalar[\"another value\"][0].value == -2.3, \"The new scalar's only reported value is -2.3!\"\n\n\ndef test_generate_image():\n tb = TrackerBase()\n scalar_name = \"tracked_value\"\n tb.add_tracked(scalar_name, 0)\n tb.add_tracked(scalar_name, 2)\n history = tb._history_by_scalar\n fname = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"temp\")\n tb.generate_image(history, output_path=fname)\n new_fname = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"temp.\" + TrackerBase.DEF_IMG_EXT)\n assert os.path.isfile(new_fname), \"The file '{}' was not created by `generate_image`!\".format(new_fname)\n os.remove(new_fname)\n\n\ndef test_get_updates_with_image():\n tb = TrackerBase()\n scalar_name = \"tracked_value\"\n tb.add_tracked(scalar_name, 1)\n tb.add_tracked(scalar_name, 2)\n history, fname = tb.get_updates()\n\n assert scalar_name in history, \"The only reported scalar should be available as key in the scalar history!\"\n assert len(history) == 1, \"There was only one reported scalar\"\n history = history[scalar_name]\n assert len(history) == 2, \"The reported scalar had only 2 reported values\"\n assert history[0].value == 1, \"The first reported value was 1\"\n assert history[1].value == 2, \"The second reported value was 2\"\n assert os.path.isfile(fname), \"No image was generated at '{}'\".format(fname)\n os.remove(fname)\n\n\ndef test_get_latest_updates():\n tb = TrackerBase()\n scalar_name = \"tracked_value\"\n tb.add_tracked(scalar_name, 1)\n tb.add_tracked(scalar_name, 2.2)\n tb.add_tracked(scalar_name, -4.1)\n history, fname = tb.get_updates(plot=False)\n history = history[scalar_name]\n assert fname is None, \"A plot should not have been generated at this point\"\n assert len(history) == 3, \"There were 3 reported values for the same scalar at this point\"\n assert [timevalue.value for timevalue in history] == [1, 2.2, -4.1], \"The reported values were 1, 2.2, -4.1\"\n\n tb.add_tracked(scalar_name, 0)\n history, _ = tb.get_updates(plot=False)\n history = history[scalar_name]\n assert len(history) == 1, \"Only one value was reported since last time we called `get_updates`!\"\n\n history, _ = tb.get_updates(plot=False, latest=False)\n history = history[scalar_name]\n assert len(history) == 4, \"There were a total of 4 values reported!\"\n\n\ndef test_get_updates_with_name():\n tb = TrackerBase()\n scalar_name = \"tracked_value\"\n tb.add_tracked(scalar_name, 1)\n tb.add_tracked(\"another value\", 1)\n history, _ = tb.get_updates(scalar_name, plot=False, latest=True)\n assert len(history) == 1, \"We've requested updates for a specific scalar name, expecting only one key\"\n assert len(history[scalar_name]) == 1, \"The requested scalar had only one value repoted\"\n\n\ndef test_base_clean():\n tb = TrackerBase()\n tb.add_tracked(\"my value\", 2)\n tb.add_tracked(\"another value\", 0.4)\n assert len(tb._history_by_scalar) == 2, \"There were two reported scalars!\"\n tb.clean()\n assert len(tb._history_by_scalar) == 0, \"After cleaning, we expect the history to be... well, cleaned.\"\n\n\ndef test_base_refresh():\n # Should just call clean...\n tb = TrackerBase()\n tb.add_tracked(\"my value\", 2)\n tb.add_tracked(\"another value\", 0.4)\n assert len(tb._history_by_scalar) == 2, \"There were two reported scalars!\"\n tb.refresh()\n assert len(tb._history_by_scalar) == 0, \"After refreshing, we expect the history to be cleaned by default.\"\n\n\ndef test_missing_value():\n tb = TrackerBase()\n with pytest.raises(meeshkan.exceptions.TrackedScalarNotFoundException):\n tb.get_updates(\"hello world\")\n\n\n@pytest.mark.asyncio\nasync def test_tracker_polling():\n counter = 0\n def notify_function(job):\n nonlocal counter\n counter += 1\n if counter == 2:\n task.cancel()\n fake_job = Job(None, job_number=0, poll_interval=0.5) # No executable\n tp = TrackingPoller(notify_function) # Call notify_function in each loop\n\n t_start = time.time()\n event_loop = asyncio.get_event_loop()\n task = event_loop.create_task(tp.poll(fake_job, fake_job.poll_time))\n await task # Wait for the task.cancel(), otherwise it would run indefinitely\n assert counter == 2, \"`counter` is expected to stop after being called twice!\"\n tot_time = time.time() - t_start\n max_time = fake_job.poll_time * (counter+1)\n assert tot_time < max_time, \"Runtime should be poll_time*2 + overhead (poll_time = {})\".format(fake_job.poll_time)\n\n","repo_name":"meeshkan/meeshkan-client","sub_path":"tests/test_tracker.py","file_name":"test_tracker.py","file_ext":"py","file_size_in_byte":5738,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"74833900105","text":"from docker.models.containers import Container\nfrom docker.models.networks import Network\nfrom docker.models.volumes import Volume\nimport pytest\n\nfrom pytest_docker_tools.utils import (\n check_signature,\n hash_params,\n is_using_network,\n is_using_volume,\n set_signature,\n)\n\n\ndef test_hash_params():\n \"\"\"\n Test generating signatures for fixture factory kwargs.\n \"\"\"\n assert (\n hash_params(\n {\n \"name\": \"my-name\",\n \"labels\": {\n \"label1\": \"label\",\n },\n }\n )\n == \"1c67a2e8dd405725a4cdf7b58fed3e948aed135ac25c494a3b336c83a72ac0c8\"\n )\n\n\ndef test_hash_params_raises():\n \"\"\"\n Make sure that we still catch invalid kwargs when generating signature hashes.\n\n This is really just to get full coverage of the JSONEncoder subclass.\n We shouldn't hit this exception on the happy path.\n \"\"\"\n with pytest.raises(TypeError):\n hash_params(\n {\n \"name\": \"my-name\",\n \"labels\": {\n \"label1\": object(),\n },\n }\n )\n\n\ndef test_check_signature():\n signature = \"1c67a2e8dd405725a4cdf7b58fed3e948aed135ac25c494a3b336c83a72ac0c8\"\n labels = {\n \"pytest-docker-tools.signature\": signature,\n }\n\n assert check_signature(labels, signature)\n\n\ndef test_set_signature_no_existing_labels():\n signature = \"1c67a2e8dd405725a4cdf7b58fed3e948aed135ac25c494a3b336c83a72ac0c8\"\n\n kwargs = {\n \"name\": \"hello\",\n \"image\": \"alpine:3.13\",\n }\n set_signature(kwargs, signature)\n\n assert kwargs == {\n \"name\": \"hello\",\n \"image\": \"alpine:3.13\",\n \"labels\": {\n \"pytest-docker-tools.signature\": signature,\n },\n }\n\n\ndef test_set_signature_preserve_labels():\n signature = \"1c67a2e8dd405725a4cdf7b58fed3e948aed135ac25c494a3b336c83a72ac0c8\"\n\n kwargs = {\n \"name\": \"hello\",\n \"image\": \"alpine:3.13\",\n \"labels\": {\n \"mylabel\": \"hello\",\n },\n }\n set_signature(kwargs, signature)\n\n assert kwargs == {\n \"name\": \"hello\",\n \"image\": \"alpine:3.13\",\n \"labels\": {\n \"mylabel\": \"hello\",\n \"pytest-docker-tools.signature\": signature,\n },\n }\n\n\ndef test_is_using_network():\n network1 = Network(attrs={\"Name\": \"test\"})\n\n network2 = Network(attrs={\"Name\": \"other-test\"})\n\n container = Container(attrs={\"NetworkSettings\": {\"Networks\": {\"test\": {}}}})\n\n assert is_using_network(container, network1)\n assert not is_using_network(container, network2)\n\n\ndef test_is_using_network_no_networks():\n network1 = Network(attrs={\"Name\": \"test\"})\n\n container = Container(attrs={\"NetworkSettings\": {\"Networks\": {}}})\n\n assert not is_using_network(container, network1)\n\n\ndef test_is_using_volume():\n volume1 = Volume(attrs={\"Name\": \"test\"})\n\n volume2 = Volume(attrs={\"Name\": \"other-test\"})\n\n container = Container(\n attrs={\n \"Mounts\": [\n {\n \"Name\": \"test\",\n \"Type\": \"volume\",\n }\n ]\n }\n )\n\n assert is_using_volume(container, volume1)\n assert not is_using_volume(container, volume2)\n\n\ndef test_is_using_volume_no_mounts():\n volume1 = Volume(attrs={\"Name\": \"test\"})\n\n container = Container(attrs={})\n\n assert not is_using_volume(container, volume1)\n\n\ndef test_is_using_volume_bind_mounts():\n volume1 = Volume(attrs={\"Name\": \"test\"})\n\n container = Container(\n attrs={\n \"Mounts\": [\n {\n \"Type\": \"bind\",\n \"Name\": \"test\",\n }\n ]\n }\n )\n\n assert not is_using_volume(container, volume1)\n","repo_name":"Jc2k/pytest-docker-tools","sub_path":"tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":3795,"program_lang":"python","lang":"en","doc_type":"code","stars":72,"dataset":"github-code","pt":"81"} +{"seq_id":"24335326502","text":"print(\"-------exercicio 10-------\")\nnumero1=int(input(\"digite um numero inteiro ? \"))\nnumero2=int(input(\"digite outro numero inteiro ? \"))\nif numero1 < numero2:\n for i in range(numero1+1,numero2+1):\n print(i)\n\na=numero2\nb=numero1\nfor i in range(a+1,b,1):\n print(i)","repo_name":"henriquemene/treinamento-python","sub_path":"estrutura de repetição/10-exercicio.py","file_name":"10-exercicio.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24087634078","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\n\r\nif __name__ == \"__main__\":\r\n page = requests.get(\"https://www.nationsonline.org/oneworld/countries_of_the_world.htm\")\r\n soup = BeautifulSoup(page.text, \"html.parser\")\r\n \r\n list1 = soup.findAll(\"td\", {'class': 'tdx'})\r\n list2 = soup.findAll(\"td\", {'class': 'tdb'})\r\n contents = list1 + list2\r\n countries = []\r\n for content in contents:\r\n try:\r\n country = content.find(\"a\")\r\n if country.text:\r\n print(country.text)\r\n countries.append(country.text)\r\n \r\n if \"Zimbabwe\" in country.text:\r\n print(\"Total:\", len(countries))\r\n break\r\n except:\r\n pass\r\n","repo_name":"sauleni/shizzniz2","sub_path":"src/files/getCountries.py","file_name":"getCountries.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19019448204","text":"# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n def printTree(self):\n if not self:\n return None\n print(self.val)\n TreeNode.printTree(self.left)\n TreeNode.printTree(self.right)\n\n\nclass Solution:\n def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode'):\n if root:\n if max(q.val, p.val) < root.val:\n return self.lowestCommonAncestor(root.left, p, q)\n elif min(q.val, p.val) > root.val:\n return self.lowestCommonAncestor(root.right, p, q)\n else:\n return root\n\n\n# node1 = TreeNode(0, None, None)\n# node2 = TreeNode(4, None, None)\n# node3 = TreeNode(2, node1, node2)\n# node4 = TreeNode(7, None, None)\n# node5 = TreeNode(9, None, None)\n# node6 = TreeNode(8, node4, node5)\n# root = TreeNode(6, node3, node6)\n\nnode1 = TreeNode(1, None, None)\nroot = TreeNode(2, node1, None)\n\nroot.printTree()\nprint()\nprint(Solution.lowestCommonAncestor(Solution, root, root, node1).val)\n\n# 6\n# 2 8\n# 0 4 7 9","repo_name":"kunata928/LeetCode-solutions","sub_path":"srcs/part_I/235-LowestCommonAncestorofaBinarySearchTree/235-LowestCommonAncestorofaBinarySearchTree.py","file_name":"235-LowestCommonAncestorofaBinarySearchTree.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26349649699","text":"import numpy as np\nimport itertools\nimport re\n\n\ndef checkDuplicates(geneList):\n '''Returns False if their are multiple of the same allele in the np.array'''\n return np.array([len(np.unique(np.char.lower(geneList[i]))) == len(geneList[i]) for i in range(len(geneList))])\n\n \ndef deleteDuplicates(geneList,findAlleles):\n '''Returns np.array of alleles np.arrays w/o duplicates'''\n newGeneList=[]\n for i in range(len(geneList)):\n if findAlleles[i] == True:\n newGeneList.append(geneList[i])\n return np.array(newGeneList)\n\n \ndef determineChildGenotypes(alleles1, alleles2, nHyb1):\n '''Returns all possible genotypes of the child and their likelihoods'''\n \n tempJ=[]\n tempFlattenJ=[]\n allChild = []\n for i in range(len(alleles1)):\n for j in range(len(alleles2)):\n for k in range(nHyb1):\n tempJ.append(np.array(sorted(np.array([alleles1[i],alleles2[j]])[:,k])))\n tempFlattenJ = np.array(tempJ).flatten()\n allChild.append('-'.join(re.findall('..',''.join(tempFlattenJ))))\n tempFlattenJ=[]\n tempJ=[]\n \n childGenDict = {i:(allChild.count(i))/len(allChild) for i in allChild}\n \n return childGenDict\n \n\ndef punnettSquare(gen1=0,gen2=0):\n '''Returns all possible genotypes of the child'''\n if gen1==0:\n genotype1 = input(\"Please enter genotype of parent 1 (eg. rr-yy-Ww-ss): \") # Parent 1\n genotype2 = input(\"Please enter genotype of parent 2: \") # Parent 2\n else:\n genotype1 = gen1\n genotype2 = gen2\n \n \n # Determine punnett square alleles for parent 1 \n genotype1ListTemp=np.array(genotype1.split('-'))\n nHybrid1 = genotype1ListTemp.size\n genotype1List=np.array([np.array(list(genotype1ListTemp[i])) for i in range(nHybrid1)])\n \n genotype1CombList = np.array(list(itertools.combinations(genotype1List.flatten(),nHybrid1)))\n genotype1FindAlleles = checkDuplicates(genotype1CombList)\n genotype1AllelesList = deleteDuplicates(genotype1CombList,genotype1FindAlleles)\n \n \n # Determine punnett square alleles for parent 2 \n genotype2ListTemp=np.array(genotype2.split('-'))\n nHybrid2 = genotype2ListTemp.size\n genotype2List=np.array([np.array(list(genotype2ListTemp[i])) for i in range(nHybrid2)])\n \n genotype2CombList = np.array(list(itertools.combinations(genotype2List.flatten(),nHybrid2)))\n genotype2FindAlleles = checkDuplicates(genotype2CombList)\n genotype2AllelesList = deleteDuplicates(genotype2CombList,genotype2FindAlleles)\n \n\n childGenAndProb = determineChildGenotypes(genotype1AllelesList, genotype2AllelesList, nHybrid1)\n \n print(\"The potential genotypes and corresponding probabilities of the child are:\")\n for key, value in childGenAndProb.items():\n print('{} ({:.2%})'.format(key, value))\n\n\n# Can either enter parent genotypes after running code or add them as arguments to the function below\n# eg. punnettSquare('rr-yy-Ww-ss','RR-yy-WW-Ss')\n\npunnettSquare()","repo_name":"m-fuller/Personal-Projects","sub_path":"n-Hybrid Punnett Square.py","file_name":"n-Hybrid Punnett Square.py","file_ext":"py","file_size_in_byte":3038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72490230026","text":"'''\nCreated on Feb 6, 2017\n\n@author: BanhDzui\n'''\n\nimport sys\n\nfrom common.DataSet import DataSet\nfrom common.CommandArgs import CommandArgs\nfrom rules_mining.RuleMiner import RuleMiner\n\nif __name__ == '__main__':\n config = CommandArgs({'input' : ('', 'Path of data-set file'),\n 'format' : ('mydefault', 'Format of input data'),\n 'minsup' : (0.1, 'Minimum support'),\n 'minconf' : (0.3, 'Minimum confidence'),\n 'maxitems': (-1, 'Maximum number of items in the rules'),\n 'class' : (-1, 'Class index')\n }) \n \n if not config.load(sys.argv):\n print ('Argument is not correct. Please try again')\n sys.exit(2)\n \n print('Loading data....')\n train_data_set = DataSet()\n class_index = int(config.get_value('class'))\n train_data_set.load(config.get_value('input'), class_index)\n \n print('Generating rules ....')\n min_sup_src = float(config.get_value('minsup'))\n min_conf = float(config.get_value('minconf'))\n itemset_max_size = int(config.get_value('maxitems'))\n \n miner = RuleMiner(config.get_value('format'), train_data_set)\n miner.generate_itemsets_and_rules(min_sup_src, min_conf, itemset_max_size)\n \n print('Finished!!!')\n ","repo_name":"banhdzui/MoMAC-v1","sub_path":"GenerateAssociationRules.py","file_name":"GenerateAssociationRules.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"27205933190","text":"import requests\nfrom typing import Any\n\n\ndef get_product_id(nickname: str, BASE_URL: str ='https://api.mercadolibre.com', \n site_id: str = 'MLA', offset: int = 0, limit: int = 50)-> list[str]:\n \"\"\"Get the product ID from the vendor nickname\n \n Parameters:\n nickname (str): The vendor nickname\n site_id (str): The site ID name\n BASE_URL (str): The base URL\n offset (int): The offset to retrieve the results, by default is 0 \n FYC the limit value by default is 50 \n Returns:\n list: Returns fifty product ID's\n \"\"\"\n \n vendor = requests.get(f'{BASE_URL}/sites/{site_id}/search?nickname={nickname}&offset={offset}&limit={limit}').json()\n return [result['id'] for result in vendor['results']]\n\ndef product_details(product_id: str, BASE_URL: str ='https://api.mercadolibre.com') -> list[dict[str, Any]]:\n \"\"\"Get the product details from product ID\n \n Parameters:\n product_id (str): The product_id to search\n BASE_URL (str): The base URL\n \n Returns:\n dict: Returns a dict with the details of the product\n \"\"\"\n item_list = []\n percent = lambda part, whole: 100 * float(part) / float(whole)\n item = requests.get(f'{BASE_URL}/items/{product_id}')\n if item.status_code == 200:\n item = item.json()\n original_price = item.get('original_price') if item.get('original_price') is not None else item.get('base_price')\n discount_ammount = abs(original_price - item.get('base_price')) if original_price != 0 else 0\n discount_percent = percent(discount_ammount, original_price)\n item_list.append({\n 'seller_id': item.get('seller_id'),\n 'title': item.get('title'),\n 'brand': item['attributes'][2]['values'][0]['name'],\n 'base_price': item.get('base_price'), #precio con descuento, menos precio\n 'original_price': original_price, # precio original sin descuento. aveces en null\n 'discount_ammount': discount_ammount,\n 'discount_percent': round(discount_percent, 2),\n 'brand_discount_allow': 0 # 0 by default for every product\n })\n return item_list\n return None\n\ndef update_product_data(obj, item_details):\n obj.base_price = item_details[0]['base_price']\n obj.discount_ammount = item_details[0]['discount_ammount']\n obj.discount_percent = item_details[0]['discount_percent']\n if not obj.raise_alert(): \n obj.fault_date = None\n obj.save()\n print('Data Updated Success')","repo_name":"Fer-Bar/meli-py","sub_path":"market/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":2560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8665866684","text":"class Node:\r\n def __init__(self):\r\n self.val=None\r\n self.next=None\r\n\r\n\r\n\r\n\r\nn1=int(input())\r\n\r\nfirst1=None\r\n\r\nv=Node()\r\nv.val=-1\r\nv.next=first1\r\nfirst1=v\r\n#pierwsza to posortowana\r\nfor i in range(n1):\r\n v=Node()\r\n k=int(input())\r\n v.val=k\r\n v.next=first1\r\n first1=v\r\n\r\nv=Node()\r\nv.val=-1\r\nv.next=first1\r\nfirst1=v\r\n\r\nn2=int(input())\r\n\r\nfirst2=None\r\n\r\np=Node()\r\np.val=-1\r\np.next=first2\r\nfirst2=p\r\nfor i in range(n2):\r\n p=Node()\r\n k=int(input())\r\n p.val=k\r\n p.next=first2\r\n first2=p\r\n\r\np=Node()\r\np.val=-1\r\np.next=first2\r\nfirst2=p\r\n\r\nf=1\r\nzap=p\r\nprint(\" \")\r\nwhile (v.next).val!=-1:\r\n p=zap\r\n while (p.next).val!=-1:\r\n if (p.next).val==(v.next).val:\r\n f=0\r\n q=p.next\r\n p.next=q.next\r\n q=None\r\n else:\r\n p=p.next\r\n if f==0:\r\n q=v.next\r\n v.next=q.next\r\n q=None\r\n if f==1:\r\n v=v.next\r\n f=1\r\n\r\n\r\nv=first1\r\np=first2\r\n\r\nwhile v!=None:\r\n print(v.val)\r\n v=v.next\r\n\r\nprint(\" \")\r\nwhile p!=None:\r\n print(p.val)\r\n p=p.next\r\n\r\n\r\n#jeszcze usunac -1\r\n\r\n\r\n\r\n\r\n","repo_name":"sharpshadowoftheship/listy_jednokierunkowe_python","sub_path":"zad28.py","file_name":"zad28.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1839155268","text":"import os\n\nfrom mnist.loader import MNIST\nimport numpy as np\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch import Tensor\nfrom itertools import chain\nimport torchvision.transforms as transforms\n\n\nclass DatasetWrapper:\n\tclass __DatasetWrapper:\n\t\t\"\"\"\n\t\tA standard PyTorch definition of Dataset which defines the functions __len__ and __getitem__.\n\t\t\"\"\"\n\t\tdef __init__(self, cv_iters):\n\t\t\t\"\"\"\n\t\t\tcreate df for features and labels\n\t\t\tremove samples that are not shared between the two tables\n\t\t\t\"\"\"\n\t\t\tassert cv_iters > 2, 'Cross validation folds must be more than 2 folds'\n\t\t\tself.cv_iters = cv_iters\n\t\t\tmndata = MNIST('data')\n\t\t\tself.features, self.labels = mndata.load_training()\n\t\t\timages, labels = mndata.load_testing()\n\t\t\tself.features = self.features + images\n\t\t\tself.features = np.array(self.features)\n\t\t\tself.labels = self.labels + labels\n\t\t\tself.labels = np.reshape(np.array(self.labels),(-1,1))\n\t\t\tself.labels = self.labels == 8 #6825 samles of 8 in total of 70000 samples\n\n\t\t\tself.shuffle()\n\n\t\tdef shuffle(self):\n\t\t\t\"\"\"\n\t\t\tcategorize sample ID by label\n\t\t\t\"\"\"\n\t\t\t#keys to feature where label is 1\n\t\t\tself.ones = np.array([i for i in range(self.labels.shape[0]) if self.labels[i]==1])\n\t\t\tself.ones = np.reshape(self.ones,(self.cv_iters, -1))\n\n\t\t\t#keys to feature where label is 0\n\t\t\tself.zeros = np.array([i for i in range(self.labels.shape[0]) if self.labels[i]==0])\n\t\t\tself.zeros = np.reshape(self.zeros,(self.cv_iters, -1))\n\t\t\t\n\t\t\t#index of valication set\n\t\t\tself.CVindex = 0\n\t\t\tself.Testindex = 0\n\n\t\tdef next(self):\n\t\t\t'''\n\t\t\trotate to the next cross validation process\n\t\t\t'''\n\t\t\tnext_test = False\n\t\t\tif self.CVindex < self.cv_iters-1:\n\t\t\t\tself.CVindex += 1\n\t\t\t\tif self.Testindex < self.cv_iters-1:\n\t\t\t\t\tif self.Testindex == self.CVindex:\n\t\t\t\t\t\tself.CVindex += 1\n\t\t\t\telse:\n\t\t\t\t\tif self.Testindex == self.CVindex:\n\t\t\t\t\t\tself.CVindex = 0\n\t\t\t\t\t\tnext_test = True\n\t\t\telse:\n\t\t\t\tself.CVindex = 0\n\t\t\t\tnext_test = True\n\t\t\t\n\t\t\tif next_test:\n\t\t\t\tif self.Testindex < self.cv_iters-1:\n\t\t\t\t\tself.Testindex += 1\n\t\t\t\telse:\n\t\t\t\t\tself.Testindex = 0\n\n\n\tinstance = None\n\tdef __init__(self, cv_iters, shuffle = 0):\n\t\tif not DatasetWrapper.instance:\n\t\t\tDatasetWrapper.instance = DatasetWrapper.__DatasetWrapper(cv_iters)\n\n\t\tif shuffle:\n\t\t\tDatasetWrapper.instance.shuffle()\n\n\tdef __getattr__(self, name):\n\t\treturn getattr(self.instance, name)\n\n\tdef features(self, key):\n\t\t\"\"\"\n\t\tArgs: \n\t\t\tkey:(string) value from dataset\t\n\t\tReturns:\n\t\t\tfeatures in list\t\n\t\t\"\"\"\n\t\treturn DatasetWrapper.instance.features[key]\n\n\tdef label(self, key):\n\t\t\"\"\"\n\t\tArgs: \n\t\t\tkey:(string) the sample key/id\t\n\t\tReturns:\n\t\t\tlabel to number 8 or other\n\t\t\"\"\"\n\t\treturn DatasetWrapper.instance.labels[key]\n\n\tdef next(self):\n\t\tDatasetWrapper.instance.next()\n\n\tdef shuffle(self):\n\t\tDatasetWrapper.instance.shuffle()\n\n\tdef __trainSet(self):\n\t\t\"\"\"\n\t\tReturns:\n\t\t\tdataset: (np.ndarray) array of key/id of trainning set\n\t\t\"\"\"\n\n\t\tind = list(range(DatasetWrapper.instance.cv_iters))\n\t\tind = np.delete(ind, [DatasetWrapper.instance.CVindex, DatasetWrapper.instance.Testindex])\n\n\t\ttrainSet = np.concatenate((DatasetWrapper.instance.zeros[ind].flatten(), DatasetWrapper.instance.ones[ind].flatten())).flatten()\n\t\tnp.random.shuffle(trainSet)\n\t\treturn trainSet\n\t\n\tdef __valSet(self):\n\t\t\"\"\"\n\t\tReturns:\n\t\t\tdataset: (np.ndarray) array of key/id of validation set\n\t\t\"\"\"\n\n\t\tvalSet = np.concatenate((DatasetWrapper.instance.zeros[DatasetWrapper.instance.CVindex].flatten(), DatasetWrapper.instance.ones[DatasetWrapper.instance.CVindex].flatten())).flatten()\n\t\tnp.random.shuffle(valSet)\n\t\treturn valSet\n\n\tdef __testSet(self):\n\t\t\"\"\"\n\t\tReturns:\n\t\t\tdataset: (np.ndarray) array of key/id of full dataset\n\t\t\"\"\"\n\n\t\ttestSet = np.concatenate((DatasetWrapper.instance.zeros[DatasetWrapper.instance.Testindex].flatten(), DatasetWrapper.instance.ones[DatasetWrapper.instance.Testindex].flatten())).flatten()\n\t\tnp.random.shuffle(testSet)\n\t\treturn testSet\n\n\tdef getDataSet(self, dataSetType = 'train'):\n\t\t\"\"\"\n\t\tArgs: \n\t\t\tdataSetType: (string) 'train' or 'val'\t\n\t\tReturns:\n\t\t\tdataset: (np.ndarray) array of key/id of data set\n\t\t\"\"\"\n\n\t\tif dataSetType == 'train':\n\t\t\treturn self.__trainSet()\n\n\t\tif dataSetType == 'val':\n\t\t\treturn self.__valSet()\n\n\t\tif dataSetType == 'test':\n\t\t\treturn self.__testSet()\n\n\t\treturn self.__testSet()\n\t\t\n\n\nclass imageDataset(Dataset):\n\t\"\"\"\n\tA standard PyTorch definition of Dataset which defines the functions __len__ and __getitem__.\n\t\"\"\"\n\tdef __init__(self, dataSetType, CV_iters):\n\t\t\"\"\"\n\t\tinitialize DatasetWrapper\n\t\t\"\"\"\n\t\tself.DatasetWrapper = DatasetWrapper(CV_iters)\n\n\t\tself.samples = self.DatasetWrapper.getDataSet(dataSetType)\n\n\t\tself.transformer = [\n\t\t\t\ttransforms.Compose([\n\t\t\t\t\ttransforms.Grayscale(num_output_channels=1), # convert RGB image to greyscale (optional, 1 vs. 3 channels)\n\t\t\t\t\ttransforms.RandomCrop(size = 28, padding = 2), # randomly Crop image \n\t\t\t\t\ttransforms.RandomRotation(10, fill=(0,)), # randomly rotate image by 10 degrees\n\t\t\t\t\ttransforms.ToTensor()]), # transform it into a torch tensor\n\t\t\t\ttransforms.Compose([\n\t\t\t\t\ttransforms.Grayscale(num_output_channels=1), # convert RGB image to greyscale (optional, 1 vs. 3 channels)\n\t\t\t\t\ttransforms.ToTensor()])]\n\n\tdef __len__(self):\n\t\t# return size of dataset\n\t\treturn len(self.samples)\n\n\n\n\tdef __getitem__(self, idx):\n\t\t\"\"\"\n\t\tFetch feature and labels from dataset using index of the sample.\n\n\t\tArgs:\n\t\t idx: (int) index of the sample\n\n\t\tReturns:\n\t\t feature: (Tensor) feature image\n\t\t label: (int) corresponding label of sample\n\t\t\"\"\"\n\t\tsample = self.samples[idx]\n\t\tfrom PIL import Image\n\t\timage = Image.fromarray(np.reshape(self.DatasetWrapper.features(sample).astype(np.uint8), (28,28)), 'L')\n\t\tlabel = self.DatasetWrapper.label(sample)\n\t\timage = self.transformer[int(label)](image)\n\t\treturn image, label\n\n\ndef fetch_dataloader(types, params):\n\t\"\"\"\n\tFetches the DataLoader object for each type in types.\n\n\tArgs:\n\ttypes: (list) has one or more of 'train', 'val'depending on which data is required '' to get the full dataSet\n\tparams: (Params) hyperparameters\n\n\tReturns:\n\tdata: (dict) contains the DataLoader object for each type in types\n\t\"\"\"\n\tdataloaders = {}\n\t\n\tif len(types)>0:\n\t\tfor split in types:\n\t\t\tif split in ['train', 'val', 'test']:\n\t\t\t\tdl = DataLoader(imageDataset(split, params.CV_iters), batch_size=params.batch_size, shuffle=True,\n\t\t\t\t\tnum_workers=params.num_workers,\n\t\t\t\t\tpin_memory=params.cuda)\n\n\t\t\t\tdataloaders[split] = dl\n\telse:\n\t\tdl = DataLoader(imageDataset('',params.CV_iters), batch_size=params.batch_size, shuffle=True,\n\t\t\tnum_workers=params.num_workers,\n\t\t\tpin_memory=params.cuda)\n\n\t\treturn dl\n\n\treturn dataloaders\n\ndef get_next_CV_set(CV_iters):\n\tWrapper = DatasetWrapper(CV_iters)\n\tWrapper.next()\n","repo_name":"Bozhao-Liu/Master_thesis","sub_path":"data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":6669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70222143306","text":"\"\"\"\nEjercicio 3. Programa que compruebe si una variable esta vacia\ny si esta vacia, rellenarla con texto en minusculas y \nmostrarlo en mayusculos.\n\"\"\"\nvariable = input(\"Ingrese valor: \")\n\nif len(variable) <= 0:\n variable = \"Valor minusculas a mayusculas\"\n print(variable.upper())\nelse:\n print(\"No esta vacia la variable: \" + variable)\n\n# -------------------------------\n# Ejercicio por CIV\ntexto = \"\"\n\nif len(texto.strip()) <= 0:\n texto = \"Hola soy un texto en minusculas\"\n print(texto.upper())\nelse:\n print(\"La la variable tiene contenido: \" + texto)\n","repo_name":"CiberNefty/Python_dv","sub_path":"11-ejercicios/ejercicio3.py","file_name":"ejercicio3.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"74529334984","text":"import math\n\n__all__ = ['adjust_learning_rate']\n\n\ndef adjust_learning_rate(args, optimizer, epoch):\n lr = optimizer.param_groups[0]['lr']\n \"\"\"\n Sets the learning rate to the initial LR decayed by 10 following schedule\n \"\"\"\n if args.lr_decay == 'step':\n lr = args.lr * (args.gamma**(epoch // args.step))\n elif args.lr_decay == 'cos':\n lr = args.lr * (1 + math.cos(math.pi * epoch / args.epochs)) / 2\n elif args.lr_decay == 'linear':\n lr = args.lr * (1 - epoch / args.epochs)\n elif args.lr_decay == 'linear2exp':\n if epoch < args.turning_point + 1:\n # learning rate decay as 95%\n # at the turning point (1 / 95% = 1.0526)\n lr = args.lr * (1 - epoch / int(args.turning_point * 1.0526))\n else:\n lr *= args.gamma\n elif args.lr_decay == 'schedule':\n if epoch in args.schedule:\n lr *= args.gamma\n else:\n raise ValueError('Unknown lr mode {}'.format(args.lr_decay))\n\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n return lr\n","repo_name":"yumingj/Talk-to-Edit","sub_path":"language/utils/lr_schedule.py","file_name":"lr_schedule.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","stars":286,"dataset":"github-code","pt":"81"} +{"seq_id":"2321111634","text":"import matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\nf = open(\"Utility_vs_Runtime.txt\",\"r\")\r\nx = []\r\ny = []\r\nparam = []\r\nline = []\r\nlines = f.readlines()\r\nfor i in lines:\r\n\tline = i.split(\",\")\r\n\tparam.append(round(float(line[0].split(\"_\")[0])/328.0,2))\r\n\tx.append(float(line[1]))\r\n\ty.append(round(float(line[2][:-1]),1))\r\nf.close()\r\n\r\nfig, (ax1,ax2) = plt.subplots(nrows =1, ncols =2)\r\nax1.set_title(\"Changing Resolution: Utility runtime tradeoff \")\r\nax1.set_ylabel(\"Run-time\")\r\nax1.set_xlabel(\"Utility\")\r\nax1.scatter(x, y, label='Data')\r\nax1.plot(x, y, linestyle = 'dashed')\r\nfor i in range(len(x)):\r\n\tax1.annotate(\" (\" + str(x[i]) + \",\" + str(y[i]) + \")\" ,(x[i], y[i]), horizontalalignment='right')\r\nax1.grid()\r\n\r\nax2.set_title(\"Changing Resolution: Runtime vs Resolution\")\r\nax2.set_ylabel(\"Run-time\")\r\nax2.set_xlabel(\"Resolution parameter\")\r\nax2.scatter(param,y, label='Data')\r\nax2.plot(param,y, linestyle = 'dashed')\r\nfor i in range(len(y)):\r\n\tax2.annotate(\" (\" + str(param[i]) + \",\" + str(y[i]) + \")\" ,(param[i], y[i]), horizontalalignment='right')\r\nax2.grid()\r\nplt.tight_layout()\r\nplt.show()\r\n\r\n","repo_name":"Prat1510/Traffic-Density-Estimation","sub_path":"Subtask3/Method2/Analysis/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41366693056","text":"import datetime as dt\nfrom collections import defaultdict\n\nimport simplejson\nimport markdown\n\nimport persistence as p\nfrom settings import SITE, USE_GIT, CACHE\n\nclass DisableUpdates(object):\n def __init__(self):\n pass\n\n def __enter__(self):\n print('enter')\n updater.disable()\n\n def __exit__(self, type, value, traceback):\n print('exit')\n updater.enable()\n updater.apply_pending_updates()\n\n\nclass Page(p.Model):\n title = p.Field(str)\n order = p.Field(int)\n html_edited = p.Field(bool, default=False)\n html = p.ContentField('html', default='')\n md = p.ContentField('md', default='')\n\n def save(self):\n if not self.html_edited:\n self.html = markdown.markdown(self.md)\n super(Page, self).save()\n updater.update_site('save page:{0}'.format(self.path))\n\n def delete(self):\n super(Page, self).delete()\n updater.update_site('delete page:{0}'.format(self.path))\n\n def change_path(self, new_path):\n super(Page, self).change_path(new_path)\n self.path = new_path\n updater.update_site('change_path page:{0}'.format(self.path))\n\n def __repr__(self):\n return '<{0}: {1}>'.format(type(self).__name__, self.path)\n\n\nclass Post(p.Model):\n title = p.Field(str)\n summary = p.Field(str)\n date = p.Field(dt.datetime, default=dt.datetime.now())\n published = p.Field(bool, default=False)\n html_edited = p.Field(bool, default=False)\n html = p.ContentField('html', default='')\n md = p.ContentField('md', default='')\n\n def save(self):\n if not self.html_edited:\n self.html = markdown.markdown(self.md)\n super(Post, self).save()\n updater.update_posts('save post:{0}'.format(self.path))\n\n def delete(self):\n super(Post, self).delete()\n updater.update_posts('delete post:{0}'.format(self.path))\n\n def change_path(self, new_path):\n super(Post, self).change_path(new_path)\n self.path = new_path\n updater.update_posts('change_path post:{0}'.format(self.path))\n\n def __repr__(self):\n return '<{0}: {1}>'.format(type(self).__name__, self.path)\n\nclass Dir(p.Model):\n title = p.Field(str)\n order = p.Field(int)\n directory = p.DirField()\n\n def save(self):\n super(Dir, self).save()\n updater.update_site('save dir:{0}'.format(self.path))\n\n def delete(self):\n super(Dir, self).delete()\n updater.update_site('delete dir:{0}'.format(self.path))\n\n def change_path(self, new_path):\n super(Dir, self).change_path(new_path)\n self.path = new_path\n updater.update_posts('change_path dir:{0}'.format(self.path))\n\n def __repr__(self):\n return '<{0}: {1}>'.format(type(self).__name__, self.path)\n\nclass Updater(object):\n def __init__(self):\n self.pending_updates = defaultdict(list)\n self.enabled = True\n\n def disable(self):\n self.enabled = False\n\n def enable(self):\n self.enabled = True\n\n def apply_pending_updates(self):\n if 'site' in self.pending_updates:\n self.update_posts('\\n'.join(self.pending_updates.pop('site')))\n if 'posts' in self.pending_updates:\n self.update_posts('\\n'.join(self.pending_updates.pop('posts')))\n\n def update_site(self, message=''):\n if not self.enabled:\n self.pending_updates['site'].append(message)\n return\n print('update_site:{0}'.format(message))\n all_pages_dirs = Page.objects.filter(path__not__contains='/')\n dirs = Dir.objects.all()\n all_pages_dirs.extend(dirs)\n all_pages_dirs.sort(key=lambda p: p.order)\n\n json_pages = []\n for pd in all_pages_dirs:\n if isinstance(pd, Page):\n json_pages.append({'title': pd.title, \n 'path': '/' + pd.path,\n 'order': pd.order,\n 'type': 'Page'})\n elif isinstance(pd, Dir):\n dir_pages = Page.objects.filter(path__contains=pd.path)\n dir_pages.sort(key=lambda p: p.order)\n subpages = []\n for d in dir_pages:\n subpages.append({'title': d.title, \n 'path': '/' + d.path,\n 'order': d.order,\n 'type': 'Page'})\n json_pages.append({'title': pd.title, \n 'path': '/' + pd.path,\n 'order': pd.order,\n 'has_subpages': True,\n 'subpages': subpages,\n 'type': 'Dir'})\n\n with open('json/pages.json', 'w') as f:\n simplejson.dump(json_pages, f)\n\n if USE_GIT:\n import git\n repo = git.Repo(SITE)\n repo.git.add('-A')\n if repo.git.status('--porcelain') != '':\n repo.git.add('-A')\n repo.git.commit(m=message)\n\n cache = p.Cache()\n cache.clear()\n\n\n def update_posts(self, message=''):\n if not self.enabled:\n self.pending_updates['posts'].append(message)\n return\n print('update_posts:{0}'.format(message))\n posts = Post.objects.all(order_by='date')\n json_posts = []\n for post in posts:\n date = dt.datetime.strftime(post.date, p.DATE_FMT)\n json_posts.append({'type': 'Post', \n 'title': post.title,\n 'published': post.published,\n 'date': date,\n 'summary': post.summary,\n 'path': '/' + post.path})\n with open('json/posts.json', 'w') as f:\n simplejson.dump(json_posts, f)\n\n if USE_GIT:\n import git\n repo = git.Repo(SITE)\n if repo.git.status('--porcelain') != '':\n repo.git.add('-A')\n repo.git.commit(m=message)\n\n cache = p.Cache()\n cache.clear()\n\nupdater = Updater()\n","repo_name":"markmuetz/flask-1000earths","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36120905315","text":"# -*- coding: utf-8 -*-\n\"\"\"\n/***************************************************************************\n LandsatQATools\n A QGIS plugin\n Decode Landsat QA bands.\n -------------------\n begin : 2017-05-17\n git sha : $Format:%H$\n author : Steve Foga, SGT Inc., Contractor to USGS\n EROS Center\n email : steven.foga.ctr@usgs.gov\n ***************************************************************************/\n\n/***************************************************************************\n * *\n * This program is free software; you can redistribute it and/or modify *\n * it under the terms of the GNU General Public License as published by *\n * the Free Software Foundation; either version 2 of the License, or *\n * (at your option) any later version. *\n * *\n ***************************************************************************/\n\"\"\"\nfrom PyQt4.QtCore import QSettings, QTranslator, qVersion, QCoreApplication\nfrom PyQt4.QtGui import QAction, QIcon, QColor, QFileDialog\n# Initialize Qt resources from file resources.py\n# import resources\n# Import the code for the dialog\nfrom decode_qa_dialog import LandsatQAToolsDialog\nimport lookup_dict\nimport os\nimport sys\nfrom random import randint\nimport numpy as np\nfrom osgeo import gdal, gdalconst\nfrom qgis.core import *\n\n\nclass LandsatQATools:\n \"\"\"QGIS Plugin Implementation.\"\"\"\n\n def __init__(self, iface):\n \"\"\"Constructor.\n\n :param iface: An interface instance that will be passed to this class\n which provides the hook by which you can manipulate the QGIS\n application at run time.\n :type iface: QgsInterface\n \"\"\"\n # Save reference to the QGIS interface\n self.iface = iface\n # initialize plugin directory\n self.plugin_dir = os.path.dirname(__file__)\n # initialize locale\n locale = QSettings().value('locale/userLocale')[0:2]\n locale_path = os.path.join(\n self.plugin_dir,\n 'i18n',\n 'LandsatQATools_{}.qm'.format(locale))\n\n if os.path.exists(locale_path):\n self.translator = QTranslator()\n self.translator.load(locale_path)\n\n if qVersion() > '4.3.3':\n QCoreApplication.installTranslator(self.translator)\n\n # Declare instance attributes\n self.actions = []\n self.menu = self.tr(u'&Landsat QA QGIS Tools')\n self.toolbar = self.iface.addToolBar(u'LandsatQATools')\n self.toolbar.setObjectName(u'LandsatQATools')\n\n # noinspection PyMethodMayBeStatic\n def tr(self, message):\n \"\"\"Get the translation for a string using Qt translation API.\n\n We implement this ourselves since we do not inherit QObject.\n\n :param message: String for translation.\n :type message: str, QString\n\n :returns: Translated version of message.\n :rtype: QString\n \"\"\"\n # noinspection PyTypeChecker,PyArgumentList,PyCallByClass\n return QCoreApplication.translate('LandsatQATools', message)\n\n def add_action(\n self,\n icon_path,\n text,\n callback,\n enabled_flag=True,\n add_to_menu=True,\n add_to_toolbar=True,\n status_tip=None,\n whats_this=None,\n parent=None):\n \"\"\"Add a toolbar icon to the toolbar.\n\n :param icon_path: Path to the icon for this action. Can be a resource\n path (e.g. ':/plugins/foo/bar.png') or a normal file system path.\n :type icon_path: str\n\n :param text: Text that should be shown in menu items for this action.\n :type text: str\n\n :param callback: Function to be called when the action is triggered.\n :type callback: function\n\n :param enabled_flag: A flag indicating if the action should be enabled\n by default. Defaults to True.\n :type enabled_flag: bool\n\n :param add_to_menu: Flag indicating whether the action should also\n be added to the menu. Defaults to True.\n :type add_to_menu: bool\n\n :param add_to_toolbar: Flag indicating whether the action should also\n be added to the toolbar. Defaults to True.\n :type add_to_toolbar: bool\n\n :param status_tip: Optional text to show in a popup when mouse pointer\n hovers over the action.\n :type status_tip: str\n\n :param parent: Parent widget for the new action. Defaults None.\n :type parent: QWidget\n\n :param whats_this: Optional text to show in the status bar when the\n mouse pointer hovers over the action.\n\n :returns: The action that was created. Note that the action is also\n added to self.actions list.\n :rtype: QAction\n \"\"\"\n\n # Create the dialog (after translation) and keep reference\n self.dlg = LandsatQAToolsDialog()\n\n icon = QIcon(icon_path)\n action = QAction(icon, text, parent)\n action.triggered.connect(callback)\n action.setEnabled(enabled_flag)\n\n if status_tip is not None:\n action.setStatusTip(status_tip)\n\n if whats_this is not None:\n action.setWhatsThis(whats_this)\n\n if add_to_toolbar:\n self.toolbar.addAction(action)\n\n if add_to_menu:\n self.iface.addPluginToRasterMenu(\n self.menu,\n action)\n\n self.actions.append(action)\n\n # Configure \"Browse\" button\n self.dlg.rasterBox.clear()\n self.dlg.browseButton.clicked.connect(self.select_output_file)\n\n return action\n\n def initGui(self):\n \"\"\"Create the menu entries and toolbar icons inside the QGIS GUI.\"\"\"\n\n icon_path = ':/plugins/LandsatQATools/icon.png'\n self.add_action(\n icon_path,\n text=self.tr(u'Decode QA'),\n callback=self.run,\n parent=self.iface.mainWindow())\n\n def unload(self):\n \"\"\"Removes the plugin menu item and icon from QGIS GUI.\"\"\"\n for action in self.actions:\n self.iface.removePluginRasterMenu(\n self.tr(u'&Landsat QA QGIS Tools'),\n action)\n self.iface.removeToolBarIcon(action)\n # remove the toolbar\n del self.toolbar\n\n def select_output_file(self):\n \"\"\"\n Enables ability to browse file system for input file.\n :return:\n \"\"\"\n filename = QFileDialog.getOpenFileName(self.dlg, \"Select input file \",\n \"\", '*')\n self.dlg.rasterBox.addItem(filename)\n\n def run(self):\n \"\"\"Run method that performs all the real work\"\"\"\n # show the dialog\n self.dlg.show()\n\n # add all raster layers in current session to UI as potential inputs\n layers = QgsMapLayerRegistry.instance().mapLayers().values()\n for layer in layers:\n if layer.type() == QgsMapLayer.RasterLayer:\n self.dlg.rasterBox.addItem(layer.name(), layer)\n\n # Run the dialog event loop\n result = self.dlg.exec_()\n\n # TODO: add logic to auto-detect band and sensor using input_raster\n\n # See if OK was pressed\n if result:\n # get variable names from input\n input_raster = str(self.dlg.rasterBox.currentText())\n band = str(self.dlg.bandBox.currentText())\n sensor = str(self.dlg.sensorBox.currentText())\n rm_low = self.dlg.rmLowBox.isChecked()\n\n # use gdal to get unique values\n ds = gdal.Open(input_raster)\n rb = ds.GetRasterBand(1)\n values = sorted(list(np.unique(np.array(rb.ReadAsArray()))))\n #ds = None\n\n # define lookup table\n bit_flags = lookup_dict.bit_flags\n #qa_values = lookup_dict.qa_values\n\n # convert input_sensor to sensor values used in qa_values\n if sensor == \"Landsat 4-5, 7\":\n sens = \"L47\"\n elif sensor == \"Landsat 8\":\n sens = \"L8\"\n else:\n sys.exit(\"Incorrect sensor provided. Input: {0}; Potential \"\n \"options: Landsat 4-5, 7; Landsat 8\"\n .format(sensor))\n\n # get all possible bit values for sensor and band combination\n bit_values = sorted(bit_flags[band][sens].values())\n qa_labels = []\n for row in values:\n bit_bool = []\n for bv in bit_values:\n if len(bv) == 1: # single bit\n bit_bool.append(row & 1 << bv[0] > 0)\n\n elif len(bv) > 1: # 2+ bits\n bits = []\n for b in bv:\n bits.append(row & 1 << b > 0)\n if all(item == True for item in bits):\n bit_bool.append(True)\n else:\n bit_bool.append(False)\n\n else:\n sys.exit(\"No valid bits found for target band.\")\n\n '''\n NEW logic for getting labels using bit wise dictionary\n '''\n # create description of each value based upon all possible bits\n true_bits = [i for (i, bb) in zip(bit_values, bit_bool) if bb]\n\n # if double bits exist, eliminate single bit descriptions,\n # otherwise, the descriptions will duplicate themselves.\n bb_double = [len(i) > 1 for i in true_bits]\n if any(bb_double):\n # get only the double bits\n dbit_nest = [i for (i, db) in zip(true_bits, bb_double)\n if db]\n\n # collapse the bits into a single list\n dbits = [item for sublist in dbit_nest for item in sublist]\n\n # remove matching single bits out of true_bits list\n tbo = []\n for t in true_bits:\n tb_out = []\n for d in dbits:\n if t[0] != d or len(t) > 1:\n tb_out.append(True)\n else:\n tb_out.append(False)\n if all(tb_out):\n tbo.append(t)\n\n # replace true_bits with filtered list\n true_bits = tbo\n\n def get_label(bits):\n \"\"\"\n Generate label for value in attribute table.\n\n :param bits: List of True or False for bit position\n :return: Attribute label\n \"\"\"\n if len(bits) == 0:\n if band == 'radsat_qa':\n return 'No Saturation'\n\n elif band == 'sr_cloud_qa' or band == 'sr_aerosol':\n return 'None'\n\n elif band == 'BQA':\n return 'Not Determined'\n\n # build description from all bits represented in value\n desc = []\n for tb in bits:\n k = next(key for key, value in\n bit_flags[band][sens].items() if value == tb)\n\n # if 'low' labels are disabled, do not add them here\n if rm_low and band != 'BQA' and 'low' in k.lower():\n continue\n\n # if last check, and not radiometric sat, set to 'clear'\n elif rm_low and band == 'BQA' and 'low' in k.lower() \\\n and tb == bits[-1] and \\\n 'radiometric' not in k.lower() and \\\n not desc:\n k = 'Clear'\n\n # if BQA and bit is low radiometric sat, keep it\n elif rm_low and band == 'BQA' and 'low' in k.lower():\n if 'radiometric' not in k.lower():\n continue\n\n # if radsat_qa, handle differently to display cleaner\n if band == 'radsat_qa':\n if not desc:\n desc = \"Band {0} Data Saturation\".format(tb[0])\n\n else:\n desc = \"{0},{1} Data Saturation\".format(\n desc[:desc.find('Data') - 1], tb[0])\n\n # string creation for all other bands\n else:\n if not desc:\n desc = \"{0}\".format(k)\n\n else:\n desc += \", {0}\".format(k)\n\n # final check to make sure something was set\n if not desc:\n desc = 'ERROR: bit set incorrectly'\n\n return desc\n\n # add desc to row description\n qa_labels.append(get_label(true_bits))\n\n '''\n OLD logic for getting lookup values\n\n # use unique raster values (and sensor+band pair) to get defs\n if band == 'radsat_qa':\n qa_labels = {i:qa_values[band][i] for i in qa_values[band] if i\n in list(values)}\n\n elif band == 'pixel_qa' and sens == 'L8': # terrain occl. check\n qa_labels = {}\n for i in qa_values[band]:\n if i >= 1024:\n qa_labels[i] = 'Terrain occlusion'\n else:\n qa_labels[i] = qa_values[band][sens][i]\n\n else:\n qa_labels = {i:qa_values[band][sens][i] for i in\n qa_values[band][sens] if i in list(values)}\n\n '''\n\n '''\n Use gdal.RasterAttributeTable to embed qa values in raster\n '''\n # create table\n rat = gdal.RasterAttributeTable()\n\n # get column count (for indexing columns)\n rat_cc = rat.GetColumnCount()\n\n # add 'value' and 'descr' columns to table\n rat.CreateColumn(\"Value\", gdalconst.GFT_Integer,\n gdalconst.GFU_MinMax)\n rat.CreateColumn(\"Descr\", gdalconst.GFT_String,\n gdalconst.GFU_MinMax)\n\n # populate table with contents of 'qa_labels'\n uid = 0\n for val, lab in zip(values, qa_labels):\n\n # 'value' column\n rat.SetValueAsInt(uid, rat_cc, int(val))\n\n # 'descr' column\n rat.SetValueAsString(uid, rat_cc + 1, lab)\n\n uid += 1\n\n # set raster attribute table to raster\n rb.SetDefaultRAT(rat)\n\n\n '''\n METHOD 1: use RasterAttributeTable to display values.\n\n QGIS' UI does not currently support reading Attribute Tables\n embedded in raster datasets. Instead, we'll assign labels and\n random colors to the raster's color palette in the QGIS UI.\n\n Feature request: https://issues.qgis.org/issues/4321\n\n # open raster with QGIS API\n q_raster = QgsRasterLayer(input_raster,\n os.path.basename(input_raster))\n # make sure the raster is valid\n if not q_raster.isValid():\n sys.exit(\"Layer {0} not valid!\".format(input_raster))\n\n\n # save changes and close raster\n ds = None\n\n # add raster to QGIS interface\n QgsMapLayerRegistry.instance().addMapLayer(q_raster)\n '''\n\n '''\n METHOD 2: re-assign colors in QGIS\n '''\n # open raster\n q_raster = QgsRasterLayer(input_raster,\n os.path.basename(input_raster))\n if not q_raster.isValid():\n sys.exit(\"Layer {0} not valid!\".format(input_raster))\n\n # define color shader\n shader = QgsRasterShader()\n\n # define ramp for color shader\n c_ramp_shader = QgsColorRampShader()\n c_ramp_shader.setColorRampType(QgsColorRampShader.EXACT)\n\n # assign a random color to each value, and apply label\n c_ramp_vals = []\n for val, lab in zip(values, qa_labels):\n c_ramp_vals.append(QgsColorRampShader.\n ColorRampItem(\n float(val),\n QColor('#%06x' % randint(0, 2 ** 24)),\n lab))\n\n # apply new color/label combo to color ramps\n c_ramp_shader.setColorRampItemList(c_ramp_vals)\n shader.setRasterShaderFunction(c_ramp_shader)\n\n # apply color ramps to raster\n ps_ramp = QgsSingleBandPseudoColorRenderer(q_raster.dataProvider(),\n 1, shader)\n q_raster.setRenderer(ps_ramp)\n\n # add raster to QGIS interface\n QgsMapLayerRegistry.instance().addMapLayer(q_raster)\n","repo_name":"stevefoga/landsat-tools","sub_path":"landsat-qa-qgis-toolbox/src/decode_qa.py","file_name":"decode_qa.py","file_ext":"py","file_size_in_byte":17705,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"71404538505","text":"import time\nfrom socket import *\n# Server\n_host = \"127.0.0.1\"\n_port = 12000\n# make a UDP socket\nsock = socket(AF_INET, SOCK_DGRAM)\n# bind\nsock.bind((_host, _port))\ncountHeartbeat = 1\n\nwhile 1:\n # get the client packet\n message, addr = sock.recvfrom(1024)\n\n message = message.upper().decode()\n myMsg = str(message)\n\n myMsg = myMsg.split(' ')\n received_msg = myMsg[0]\n received_ping_number = myMsg[1]\n total_time = time.time() - float(myMsg[2])\n\n # when the time more then 10 sec -> the client application is stop\n if total_time > 10:\n msgRecv = \"heartbeat number : \"+str(countHeartbeat) + \" ,Cause : application of the client has stopped\"\n print(msgRecv)\n sock.sendto(msgRecv.encode(), addr)\n\n # when the time more then 4 sec and less then 10 sec -> the client application is missing\n elif total_time > 4:\n msgRecv = \"heartbeat number :\" + str(countHeartbeat) + \" ,Cause : missing\"\n print(msgRecv)\n sock.sendto(msgRecv.encode(), addr)\n\n # when the client application is received\n else:\n msgRecv = \"heartbeat number : \" + received_ping_number + \" ,Cause : received\"\n print(msgRecv)\n sock.sendto(msgRecv.encode(), addr)\n\n countHeartbeat+=1","repo_name":"yuvalYah/final_network_project","sub_path":"partA/UDPHeartbeatServer.py","file_name":"UDPHeartbeatServer.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3274175949","text":"import jsonlines\nimport json\nimport nltk\nimport time\nimport re\n\nfrom tqdm import tqdm\n\ndef clean_list(L: list) -> list:\n \"\"\"\n Säubert eine Spalte aus der Rechtschreibungstabelle von korrekturen.de\n \"\"\"\n return_list = []\n for i in range(len(L)):\n item = L[i]\n fixed_item = item.split(\"\\n\")[0]\n fixed_item = fixed_item.split(\";\")[0]\n\n # wenn es klammern gibt, werden diese mit inhalt entfernt\n try:\n p_content = re.search(r'\\((.*?)\\)', fixed_item).group(1)\n except AttributeError:\n p_content = \"\"\n fixed_item = fixed_item.replace(f\"({p_content})\", '')\n fixed_item = fixed_item.split(\"(\")[0]\n fixed_item = fixed_item.split(\"/\")[0].strip()\n \n # Erläuterungen werden ebenfalls entfernt\n if \"Bedeutung\" in fixed_item:\n fixed_item = fixed_item.replace(\"in übertragener Bedeutung\", \"\")\n fixed_item = fixed_item.replace(\"In übertragener Bedeutung\", \"\")\n fixed_item = fixed_item.replace(\"in wörtlicher Bedeutung\", \"\")\n fixed_item = fixed_item.replace(\"In wörtlicher Bedeutung\", \"\")\n if \":\" in fixed_item:\n fixed_item = fixed_item.replace(\", auch:\", \"\")\n return_list.append(fixed_item)\n return return_list\n\ndef ortho_raw_to_readable(raw_triplets: dict, path: str):\n\n all_triples = raw_triplets[\"errors\"]\n\n # tabelle wird in spalten gesplittet\n ancient_orthography = [str(item[0]) for item in all_triples]\n revolutionized_orthography = [str(item[1]) for item in all_triples]\n modern_orthography = [str(item[2]) for item in all_triples]\n\n ortho_dict = {\"orthographies\": {}}\n for ortho in (ancient_orthography, revolutionized_orthography, modern_orthography):\n time.sleep(1)\n clean_orthography = clean_list(ortho)\n if ortho is ancient_orthography:\n var_name = \"ancient\"\n elif ortho is revolutionized_orthography:\n var_name = \"revolutionized\"\n elif ortho is modern_orthography:\n var_name = \"modern\"\n else:\n raise KeyError\n ortho_dict[\"orthographies\"][var_name] = clean_orthography\n\n with open(f\"{path}/annotation/orthography.json\", \"w\") as f:\n json.dump(ortho_dict, f)","repo_name":"kobrue02/BERTective","sub_path":"scraping_tools/korrekturen_test.py","file_name":"korrekturen_test.py","file_ext":"py","file_size_in_byte":2384,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"8200351512","text":"from random import randint\r\nfrom time import sleep\r\nlista = list()\r\njogos = list()\r\nquant = int(input('Quandos jogos da sena quer fazer '))\r\ntot = 1\r\nwhile tot <= quant:\r\n cont = 0\r\n while True:\r\n nu = randint(1, 60)\r\n if nu not in lista:\r\n lista.append(nu)\r\n cont = cont + 1\r\n if cont >= 6:\r\n break\r\n lista.sort()\r\n jogos.append(lista[:])\r\n lista.clear()\r\n tot = tot + 1\r\nfor i, p in enumerate(jogos):\r\n print(f'{p}')\r\n sleep(1)","repo_name":"DiegoMaraujo/100-exerc-cios-em-Python-","sub_path":"100Exercicios em Python/ex88.py","file_name":"ex88.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74013289544","text":"#visual recognition service used for student authentication and mood analysis\nimport json\nfrom ibm_watson import VisualRecognitionV3\nfrom ibm_cloud_sdk_core.authenticators import IAMAuthenticator\nfrom ibm_watson import ApiException\n\ndef main(args):\n authenticator = IAMAuthenticator('tmbr2hVDD4UjHHyVkpP99wcLoZz1jU2NoHc7vGH7mAFd')\n visual_recognition = VisualRecognitionV3(\n version='2018-03-19',\n authenticator=authenticator\n )\n visual_recognition.set_service_url('https://api.us-south.visual-recognition.watson.cloud.ibm.com/instances/c46b05ae-21d1-4ba1-bea0-ede174674834')\n classifier_ids = [\"Mood_Model_101620031\"]\n #share the location of the image file\n url = args[\"mydata\"]\n try:\n classes = visual_recognition.classify(url=url, classifier_ids=classifier_ids)\n myresult = classes.get_result()['images'][0]['classifiers'][0]['classes'][0]['class']\n return {\"msg\": json.dumps(myresult, indent=2)}\n except ApiException as ex:\n print(\"Method failed with status code \" + str(ex.code) + \": \" + ex.message)","repo_name":"chsubhasis/CFC2020_studE","sub_path":"watson_services/visual_recognition.py","file_name":"visual_recognition.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"11897256167","text":"import pygame\nfrom View import Text as t\nfrom View import Board as b\nfrom View import Sign as s\nfrom View import Window as w\n\n\nclass StartView(w.Window):\n def __init__(self, board_array, board_path, signs_paths):\n w.Window.__init__(self)\n\n self.__text_player1 = t.Text(\"Ruch gracza pierwszego:\", 30, self.GREEN_COLOR)\n self.__text_sign1 = t.Text(\"O\", 30, self.BLACK_COLOR)\n self.__text_player2 = t.Text(\"Ruch gracza drugiego:\", 30, self.GREEN_COLOR)\n self.__text_sign2 = t.Text(\"X\", 30, self.BLACK_COLOR)\n self.__text_first_width = self.__text_player1.get_length() + self.__text_sign1.get_length()\n self._text_second_width = self.__text_player2.get_length() + self.__text_sign2.get_length()\n self._board_coordinates = (150, 200)\n self.__p1_turn = True\n\n self.__board = b.Board(board_array, board_path, self._board_coordinates)\n self.__nought = s.Sign(signs_paths[0])\n self.__cross = s.Sign(signs_paths[1])\n\n def draw(self):\n super(StartView, self).draw()\n self.__board.draw(self.screen, (self.__nought, self.__cross))\n if self.__p1_turn:\n self.__text_player1.draw(self.screen, (self.RESOLUTION[1] / 2 - self.__text_first_width / 2, 80))\n self.__text_sign1.draw(self.screen, (self.RESOLUTION[1] / 2 + self.__text_player1.get_length() / 2, 80))\n else:\n self.__text_player2.draw(self.screen, (self.RESOLUTION[1] / 2 - self._text_second_width / 2, 80))\n self.__text_sign2.draw(self.screen, (self.RESOLUTION[1] / 2 + self.__text_player2.get_length() / 2, 80))\n pygame.display.flip()\n\n def get_field_coordinates(self, coordinates):\n return self.__board.getFieldCoordinates(coordinates)\n\n def update_player_turn(self, p1_turn):\n self.__p1_turn = p1_turn\n","repo_name":"WalMichal/Tic-tac-toe","sub_path":"View/StartView.py","file_name":"StartView.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"24506324511","text":"import json\nimport jmespath\nimport requests\nfrom ninja import Router\nfrom django.shortcuts import get_object_or_404\nfrom backend.common import response, Error, model_to_dict\nfrom cases.models import TestCase, Module, TestExtract\nfrom cases.apis.api_schema import CaseIn, CaseDebugIn, CaseAssertIn, CaseOut, checkExtractIn\nfrom cases.apis.common import get_replace_string\n\nrouter = Router(tags=[\"cases\"])\n\n\n@router.post(\"/\", auth=None)\ndef create_case(request, data: CaseIn):\n \"\"\"\n 创建用例\n auth=None 该接口不需要认证\n \"\"\"\n module = get_object_or_404(Module, id=data.module_id)\n\n case = TestCase.objects.create(\n name=data.name,\n module_id=data.module_id,\n url=data.url,\n method=data.method,\n header=data.header,\n params_type=data.params_type,\n params_body=data.params_body,\n response=data.response,\n assert_type=data.assert_type,\n assert_text=data.assert_text\n )\n\n for extract in data.extract_list:\n if extract[\"name\"] == \"\" or extract[\"value\"] == \"\":\n continue\n extract_obj = TestExtract.objects.filter(\n project_id=module.project_id, name=extract[\"name\"])\n if len(extract_obj) > 0:\n extract_obj.extract = extract[\"value\"]\n else:\n TestExtract.objects.create(\n project_id=module.project_id,\n case_id=case.id,\n name=extract[\"name\"],\n extract=extract[\"value\"]\n )\n return response(item=case)\n\n\n@router.put(\"/{case_id}/\", auth=None)\ndef update_case(request, case_id: int, payload: CaseIn):\n \"\"\"\n 更新用例\n auth=None 该接口不需要认证\n \"\"\"\n case = get_object_or_404(TestCase, id=case_id)\n for attr, value in payload.dict().items():\n setattr(case, attr, value)\n case.save()\n\n module = get_object_or_404(Module, id=payload.module_id)\n for extract in payload.extract_list:\n if extract[\"name\"] == \"\" or extract[\"value\"] == \"\":\n continue\n extract_obj = TestExtract.objects.filter(\n project_id=module.project_id, name=extract[\"name\"])\n if len(extract_obj) > 0:\n extract_obj.extract = extract[\"value\"]\n else:\n TestExtract.objects.create(\n project_id=module.project_id,\n case_id=case.id,\n name=extract[\"name\"],\n extract=extract[\"value\"]\n )\n return response()\n\n\n@router.delete(\"/{case_id}/\", auth=None)\ndef delete_case(request, case_id: int):\n \"\"\"\n 删除用例\n auth=None 该接口不需要认证\n \"\"\"\n case = get_object_or_404(TestCase, id=case_id)\n case.is_delete = True\n case.save()\n\n return response()\n\n\n@router.get(\"/{case_id}/\", auth=None)\ndef case_detail(request, case_id: int):\n \"\"\"\n 获取用例详情\n auth=None 该接口不需要认证\n \"\"\"\n case = get_object_or_404(TestCase, id=case_id)\n if case.is_delete is True:\n return response(error=Error.CASE_DELETE_ERROR)\n test_extract = TestExtract.objects.filter(case_id=case.id)\n extract_list = []\n for extract in test_extract:\n extract_list.append({\n \"name\": extract.name,\n \"value\": extract.extract\n })\n case_dict = model_to_dict(case)\n case_dict[\"module_id\"] = case_dict[\"module\"]\n case_dict[\"extract_list\"] = extract_list\n return response(item=case_dict)\n\n\n@router.post(\"/debug\", auth=None)\ndef debug_case(request, data: CaseDebugIn):\n \"\"\"\n 用例调试\n auth=None 该接口不需要认证\n \"\"\"\n url = data.url\n method = data.method\n header = data.header\n params_type = data.params_type\n params_body = data.params_body\n\n header = json.loads(header)\n params_body = json.loads(params_body)\n\n url = get_replace_string(url)\n\n header_new = {}\n for key, value in header.items():\n header_new[key] = get_replace_string(value)\n\n params_body_new = {}\n for key, value in params_body.items():\n params_body_new[key] = get_replace_string(value)\n\n resp = \"\"\n if method == \"get\":\n resp = requests.get(url, headers=header_new,\n params=params_body_new).text\n\n if method == \"post\":\n if params_type == \"form\":\n resp = requests.post(url, headers=header_new,\n data=params_body_new).text\n elif params_type == \"json\":\n resp = requests.post(url, headers=header_new,\n json=params_body_new).text\n else:\n return response(error=Error.CASE_PARAMS_ERROR)\n\n if method == \"put\":\n if params_type == \"form\":\n resp = requests.put(url, headers=header_new,\n data=params_body_new).text\n elif params_type == \"json\":\n resp = requests.put(url, headers=header_new,\n json=params_body_new).text\n else:\n return response(error=Error.CASE_PARAMS_ERROR)\n\n if method == \"delete\":\n if params_type == \"form\":\n resp = requests.delete(\n url, headers=header_new, data=params_body_new).text\n elif params_type == \"json\":\n resp = requests.delete(\n url, headers=header_new, json=params_body_new).text\n else:\n return response(error=Error.CASE_PARAMS_ERROR)\n\n return response(item={\"response\": resp})\n\n\n@router.post(\"/assert\", auth=None)\ndef assert_case(request, data: CaseAssertIn):\n \"\"\"\n 用例断言\n auth=None 该接口不需要认证\n \"\"\"\n resp = data.response\n assert_type = data.assert_type\n assert_text = data.assert_text\n\n if assert_type == \"include\":\n if assert_text in resp:\n return response()\n else:\n return response(success=False)\n elif assert_type == \"equal\":\n if assert_text == resp:\n return response()\n else:\n return response(success=False)\n\n return response()\n\n\n@router.post(\"/extract\", auth=None)\ndef check_extract(request, data: checkExtractIn):\n \"\"\"\n 检查用例提取器\n auth=None 该接口不需要认证\n \"\"\"\n resp = json.loads(data.response)\n extract_list = data.extractList\n print(type(resp), resp)\n print(extract_list)\n for extract in extract_list:\n extract_name = extract[\"name\"]\n extract_value = extract[\"value\"]\n if extract_name == \"\" or extract_value == \"\":\n continue\n print(extract_value, type(extract_value))\n result = jmespath.search(extract_value, resp)\n if result is None:\n return response(error={\"10057\": f\"提取器错误: {extract_value}\"})\n\n return response()\n","repo_name":"defnngj/test_dev06","sub_path":"hornet/backend/cases/apis/case_api.py","file_name":"case_api.py","file_ext":"py","file_size_in_byte":6766,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"12487151736","text":"class Solution:\n def removeElement(self, nums, val):\n \"\"\"\n :type nums: List[int]\n :type val: int\n :rtype: int\n \"\"\"\n position=0\n for i in nums:\n if(i!=val):\n nums[position]=i\n position+=1\n return position","repo_name":"cosJin/LeetCode","sub_path":"old/Array/27.py","file_name":"27.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5189262351","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom .models import Restaurant, FoodDish, Cuisine\nfrom django.core.paginator import Paginator\nfrom django.db.models import Q\n\n\ndef home(request):\n \"\"\"\n This function is responsible for rendering home page with passing context dictionary to template\n\n takes http request as argument\n\n \"\"\"\n\n # get search query from http request\n query = request.GET.get('q') if request.GET.get('q') != None else ''\n\n # food_items based on search query \n food_items = FoodDish.objects.filter(\n Q(name__icontains = query)\n ).order_by(\n '-restaurant__average_rating',\n '-restaurant__total_reviews').values('name','price','restaurant_id','restaurant__name','restaurant__average_rating')\n\n # paginate the results\n paginator = Paginator(food_items, 100)\n\n page_number = request.GET.get('page')\n\n page_obj = paginator.get_page(page_number)\n \n context = {\n 'food_items': page_obj,\n 'query': query,\n }\n \n return render(request, 'home.html', context)\n\ndef restaurant_view(request,id):\n \"\"\"\n This function is responsible for rendering restaurant page with passing context dictionary to template\n\n takes http request as first argument\n takes restaurant id as second argument\n\n \"\"\"\n\n # get the restaurant based on the id\n restaurant = Restaurant.objects.get(id = id)\n\n # find all the dishes served by the restaurant\n dishes = FoodDish.objects.filter(restaurant_id = id)\n\n #cuisines available in the restaurant\n cuisines = Cuisine.objects.filter(restaurant = restaurant)\n context = {\n 'restaurant' : restaurant,\n 'food_items' : dishes,\n 'cuisines': cuisines\n }\n\n return render(request, 'restaurant.html', context)","repo_name":"Nandan26/FoodItems-assignment","sub_path":"search/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18936536942","text":"\"\"\"Load data from mat or npz recording files.\"\"\"\n\nfrom datetime import datetime\nfrom pathlib import Path\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport soundfile as sf\nfrom measuretf.utils import plot_rec\n\n\ndef load_recording(fname, n_out=1, n_avg=1, cut=True):\n \"\"\"Load recording and split according to number of sources and averages.\n\n Parameters\n ----------\n fname : str\n Name of the mat file. Can also pass open file-like object. Reference\n channel is assumed to be recorded in first channel.\n n_out : int, optional\n Number of simultaneously, in series recorded output channels.\n n_avg : int, optional\n Number of recorded averages.\n\n Returns\n -------\n recs : ndarray, shape (n_in, n_out, n_avg, n_tap)\n Recordings, sliced into averages and output channels.\n fs : int\n Sampling frequency.\n\n \"\"\"\n fname = Path(fname)\n\n if fname.suffix == '.npz':\n with np.load(fname, allow_pickle=True) as data:\n fs = data[\"fs\"]\n orecs = data[\"recs\"] # has shape n_in x n_tap\n else:\n orecs, fs = sf.read(str(fname)) # sf.read needs str\n\n if orecs.ndim == 1:\n orecs = orecs[None]\n\n if orecs.ndim == 2:\n if cut:\n n_in, n_otap = orecs.shape\n n_tap = n_otap / n_out / n_avg\n if n_tap.is_integer():\n n_tap = int(n_tap)\n else:\n raise ValueError(\"Can't split recording: n_tap is not an integer\")\n\n recs = np.zeros((n_in, n_out, n_avg, n_tap))\n for i in range(n_in):\n # shape (ntaps*n_avg*n_out, ) -> (n_out, ntaps*n_avg)\n temp = np.array(np.split(orecs[i], n_out))\n temp = np.array(np.split(temp, n_avg, axis=-1)) # (n_avg, n_out, n_taps)\n temp = np.moveaxis(temp, 0, 1) # (n_out, n_avg, n_taps)\n recs[i] = temp\n else:\n recs = orecs[..., 0][None, None, None, :]\n\n elif orecs.ndim == 3:\n recs = orecs[None]\n elif orecs.ndim == 4:\n recs = orecs\n else:\n raise ValueError(f\"orces.ndim == {orecs.ndim}!\")\n\n return recs, fs\n\n\ndef convert_wav_to_recording(\n wavfname,\n recname,\n ref_ch,\n n_out,\n add_datetime_to_name=False,\n description=None,\n plot=False,\n):\n \"\"\"Convert WAV recording to npz recording. Split for fit.\n\n Parameters\n ----------\n wavfname : str or path\n Path to WAV file.\n recname : str or path\n Save at this path.\n ref_ch : int\n Channel of inputs that is reference.\n n_out : int\n Number of separate output channels that were recorded in this one WAV file.\n The WAV file will be split equally into n_out segments.\n add_datetime_to_name : bool, optional\n Description\n description : None, optional\n Description\n plot : bool, optional\n Description\n\n \"\"\"\n recs, fs = sf.read(wavfname, always_2d=True)\n recs = recs.T # (n_ch, n_samples) -> (n_samples, n_ch)\n\n # remove samples for clean split\n remove_samples = recs.shape[0] % n_out\n if remove_samples > 0:\n print(\"removing samples:\", remove_samples, \"nt:\", recs.shape[0])\n recs = recs[:-remove_samples, :]\n\n if plot:\n fig = plot_rec(fs, recs.T)\n fig.suptitle(\"before split\")\n\n recs = np.stack(np.split(recs.T, n_out, axis=1), axis=0)\n\n if plot:\n fig = plot_rec(fs, recs)\n fig.suptitle(\"after split\")\n plt.show()\n\n if add_datetime_to_name:\n fn = recname + \" - {}\".format(datetime.now())\n else:\n fn = recname\n\n np.savez(\n fn,\n recs=recs,\n ref_ch=ref_ch,\n fs=fs,\n datetime=datetime.now(),\n description=description,\n sound=None,\n )\n return recs, fs\n","repo_name":"fhchl/measuretf","sub_path":"measuretf/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":3817,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"34682828626","text":"from django.db import transaction\nfrom django.db.models.signals import pre_save\nfrom django.dispatch import receiver\nfrom django.forms import inlineformset_factory\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render, get_object_or_404\nfrom django.urls import reverse_lazy, reverse\nfrom django.views.generic import ListView, DetailView, CreateView, DeleteView, UpdateView\n\nfrom basketapp.models import BasketItem\nfrom ordersapp.forms import OrderItemForm, OrderForm\nfrom ordersapp.models import Order, OrderItem\n\n\nclass OrderList(ListView):\n model = Order\n\n def qet_query_set(self):\n return Order.objects.filter(user=self.request.user)\n\n\nclass OrderItemsCreate(CreateView):\n model = Order\n fields = []\n success_url = reverse_lazy('ordersapp:orders_list')\n\n def get_context_data(self, **kwargs):\n data = super().get_context_data(**kwargs)\n OrderFormSet = inlineformset_factory(Order,\n OrderItem,\n form=OrderItemForm,\n extra=1)\n\n if self.request.POST:\n formset = OrderFormSet(self.request.POST)\n else:\n basket_items = BasketItem.get_items(self.request.user)\n if len(basket_items):\n OrderFormSet = inlineformset_factory(Order,\n OrderItem,\n form=OrderItemForm,\n extra=len(basket_items))\n formset = OrderFormSet()\n\n for form, basket_item in zip(formset.forms, basket_items):\n form.initial['product'] = basket_item.product\n form.initial['quantity'] = basket_item.quantity\n form.initial['price'] = basket_item.product.price\n\n basket_items.delete()\n else:\n formset = OrderFormSet()\n\n data['orderitems'] = formset\n return data\n\n def form_valid(self, form):\n context = self.get_context_data()\n orderitems = context['orderitems']\n\n with transaction.atomic():\n form.instance.user = self.request.user\n self.object = form.save()\n if orderitems.is_valid():\n orderitems.instance = self.object\n orderitems.save()\n\n # удаляем пустой заказ\n if self.object.get_total_cost() == 0:\n self.object.delete()\n\n return super().form_valid(form)\n\n\nclass OrderDelete(DeleteView):\n model = Order\n success_url = reverse_lazy('ordersapp:orders_list')\n\n\nclass OrderRead(DetailView):\n model = Order\n\n\nclass OrderUpdate(UpdateView):\n model = Order\n fields = []\n success_url = reverse_lazy('ordersapp:orders_list')\n\n def get_context_data(self, **kwargs):\n data = super().get_context_data(**kwargs)\n OrderFormSet = inlineformset_factory(Order,\n OrderItem,\n form=OrderItemForm,\n extra=1)\n\n if self.request.POST:\n formset = OrderFormSet(self.request.POST, instance=self.object)\n else:\n formset = OrderFormSet(instance=self.object)\n for form in formset.forms:\n if form.instance.pk:\n form.initial['price'] = form.instance.product.price\n\n data['orderitems'] = formset\n return data\n\n def form_valid(self, form):\n context = self.get_context_data()\n orderitems = context['orderitems']\n\n with transaction.atomic():\n self.object = form.save()\n if orderitems.is_valid():\n orderitems.instance = self.object\n orderitems.save()\n\n # удаляем пустой заказ\n if self.object.get_total_cost() == 0:\n self.object.delete()\n\n return super().form_valid(form)\n\n\ndef order_forming_complete(request, pk):\n order = get_object_or_404(Order, pk=pk)\n order.status = Order.SENT_TO_PROCEED\n order.save()\n\n return HttpResponseRedirect(reverse('ordersapp:orders_list'))\n\n\n","repo_name":"aboronilov/django_basic_furniture_store","sub_path":"ordersapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17427433490","text":"from ui.qgs_feature import Building, Floor, Room, Landmark, Path\nfrom PyQt5.QtCore import pyqtSignal, QObject\nfrom rpn import constants\n\nclass QgsMap(QObject):\n \"\"\"Encapsulates the current QGIS Map\"\"\"\n\n map_created = pyqtSignal()\n \"\"\"Emits an event when a new map is created\"\"\"\n\n levels_changed = pyqtSignal()\n \"\"\"Emits an event when the currently viewed level is changed\"\"\"\n\n def __init__(self, name=\"Untitled\"):\n \"\"\"Constructor.\n :param name: The name of the Map, set to \"Untitled\" by default\n :type name: str\n \"\"\"\n super().__init__()\n self.layers = None\n self.name = name\n # Set the Map's CRS to EPSG:3857 for display purposes\n self.crs = constants.EPSG_3857\n\n def new_map(self, name, layers):\n \"\"\"Creates a new Map.\n :param name: The name of the new Map\n :type name: str\n :param layers: a array containing layers for the new Map\n :type layers: list of Layer\n \"\"\"\n self.name = name\n self.layers = layers\n # Connected the levels_changed signal of the rooms layer\n # to the levels_changed signal of the Map\n self.layers['rooms'].levels_changed.connect(\n lambda: self.levels_changed.emit()\n )\n # Emit the map_created signal\n self.map_created.emit()\n\n def get_name(self):\n \"\"\"Getter for name property\n \n :returns: The Map's name.\n :rtype: str\n \"\"\"\n return self.name\n\n def get_buildings(self, bbox=None):\n \"\"\"Get all of the buildings in the Map.\n :param bbox: The area containing the buildings,\n the default is None (the whole map) \n :type bbox: QgsRectangle\n\n :note: The building objects are created new everytime this method is\n called. You should not store them as they will not be updated\n if the map is changed.\n\n :returns: A list of buildings\n :rtype: list of Building\n \"\"\"\n if self.layers is None:\n return []\n\n layer = self.layers['buildings']\n floors_layer = self.layers['rooms']\n lm_layer = self.layers['landmarks']\n\n buildings = [Building(b, layer.fields) for b in layer.get_features(bbox=bbox)]\n\n for building in buildings:\n box = building.get_bounding_box()\n floor_nos = floors_layer.get_levels(box)\n floors = [Floor(f, floors_layer) for f in floor_nos]\n building.add_floors(floors)\n for floor in floors:\n query = '\"level\" = \\'{}\\''.format(floor.get_number())\n rooms = [Room(r, floors_layer.fields) for r in floors_layer.get_features(query=query, bbox=box)]\n floor.add_rooms(rooms)\n query += ' and \"indoor\" = \\'yes\\''\n landmarks = [Landmark(l, lm_layer.fields) for l in lm_layer.get_features(query=query, bbox=box)]\n floor.add_landmarks(landmarks)\n\n return buildings\n\n def get_landmarks(self):\n \"\"\"Get all of the Landmarks in the Map not contained\n within Buildings.\n\n :returns: A list of Landmarks\n :rtype: list of Landmark\n \"\"\"\n layer = self.layers['landmarks']\n query = '\"indoor\" = \\'no\\''\n return [Landmark(f, layer.fields) for f in layer.get_features(query=query)]\n\n def get_paths(self):\n \"\"\"Get all of the Paths in the Map.\n :returns: A list of Paths\n :rtype: list of Path\n \"\"\"\n layer = self.layers['paths']\n return [Path(layer, f) for f in layer.get_features()]\n\n def get_level(self):\n return self.layers['rooms'].get_level()\n\n def get_layers(self):\n \"\"\"Getter for the layers property.\n :returns: A list of Layers\n :rtype: list of Layer\n \"\"\"\n return self.layers\n\n def add_feature(self, layer, fields, geom):\n \"\"\"Adds a feature to the Map.\n :param layer: The Layer to add the feature to \n :type layer: Layer\n :param fields: The Fields to add to the new feature (must be\n supported by the layer) \n :type fields: dict of (str, str)\n :param geom: The geometry of the feature \n :type geom: list of ()\n :returns: The new feature\n :rtype: QgsFeature\n \"\"\"\n return self.layers[layer].add_feature(fields, geom)\n\n def set_crs(self, crs):\n self.crs = crs\n for name, layer in self.layers.items():\n layer.set_crs(crs)\n\n def get_crs(self):\n return self.crs","repo_name":"ReferencePointNavigation/MapCreator","sub_path":"map_builder/ui/qgs_map.py","file_name":"qgs_map.py","file_ext":"py","file_size_in_byte":4565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13373044058","text":"import argparse\nimport os\nimport pickle\nimport sys\nfrom pathlib import Path\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\n\nsys.path.append(os.path.join(os.path.dirname(__file__), \"..\"))\nfrom variables import MAPPING_LANG_CODE_TO_TEXT\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--plot-file-path\", type=Path, required=True)\n parser.add_argument(\"--statistics-pickle-file\", type=Path, required=True)\n args = parser.parse_args()\n return args\n\n\ndef filter_out_empty_doc(df, lang):\n len_before = len(df)\n df_filtered = df.drop(df[df[\"bytes per document\"] == 0].index)\n len_after = len(df_filtered)\n if len_before != len_after:\n df_debug = df.drop(df[df[\"bytes per document\"] != 0].index)\n\n print(\n f\"len_before: {len_before} | len_after: {len_after} | lang: {lang} | datasets: {pd.unique(df_debug['dataset'])}\"\n )\n return df\n\n\ndef process_df_per_lang(all_data_point, lang):\n data_points = all_data_point[lang]\n df = pd.DataFrame(\n data_points, columns=[\"mean\", \"median\", \"bytes per document\", \"dataset\"]\n )\n df = (\n df.set_index([\"mean\", \"median\", \"dataset\"])\n .apply(lambda x: x.explode())\n .reset_index()\n )\n\n df = df.astype({\"bytes per document\": \"float\"})\n df[\"lang\"] = MAPPING_LANG_CODE_TO_TEXT[lang]\n\n df_filtered = filter_out_empty_doc(df, lang)\n return df_filtered[[\"lang\", \"bytes per document\"]]\n\n\ndef get_order(df_all):\n df_all_median = df_all.groupby(\"lang\").median()\n print(\"Median all: \", df_all_median)\n\n df_all_median = df_all_median.reset_index()\n\n df_all_median = df_all_median.sort_values(by=\"bytes per document\", ascending=False)\n return df_all_median[\"lang\"].to_list()\n\n\ndef main():\n args = get_args()\n\n with open(args.statistics_pickle_file, \"rb\") as handle:\n all_data_point = pickle.load(handle)\n\n sub_df = []\n for _, lang in enumerate(all_data_point.keys()):\n print(f\"Processing {lang}\")\n sub_df.append(process_df_per_lang(all_data_point, lang))\n\n df_all = pd.concat(sub_df, ignore_index=True)\n\n order = get_order(df_all)\n print(\"Order: \", order)\n\n width_box = 0.4\n _, ax = plt.subplots(figsize=(len(order) * width_box + 2, 6))\n ax.set_yscale(\"log\")\n plt.xticks(rotation=40, ha=\"right\")\n sns.boxplot(\n x=\"lang\",\n y=\"bytes per document\",\n palette=\"rainbow\",\n data=df_all,\n ax=ax,\n order=order,\n width=width_box,\n )\n ax.set_ylabel(\"Number of bytes per document (log scale)\")\n ax.set_xlabel(f\"dataset per language\")\n\n ax.xaxis.set_label_text(\"\")\n plt.tight_layout()\n plt.savefig(args.plot_file_path, dpi=300)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"bigscience-workshop/data-preparation","sub_path":"analysis/document-sizes/python-scripts/plot_per_lang.py","file_name":"plot_per_lang.py","file_ext":"py","file_size_in_byte":2781,"program_lang":"python","lang":"en","doc_type":"code","stars":239,"dataset":"github-code","pt":"81"} +{"seq_id":"73379297864","text":"from setuptools import setup, find_packages\nfrom os import path\n\nhere = path.abspath(path.dirname(__file__))\n# Get the long description from the README file\nwith open(path.join(here, 'README.md'), 'r') as f:\n long_description = f.read()\n\nsetup(\n name='sbio_project',\n version='1.0',\n description='project to compute geometric feature of RepeatDB protein',\n # url\n author='Federico Baldo',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n\n 'Programming Language :: Python :: 2.7'\n ],\n author_email='federico.baldo.1@studenti.unipd.it',\n packages=find_packages(),\n install_requires=[\"biopython\", \"numpy\", \"pymol\", \"pyinquirer\"],\n python_requires='>=2.7',\n entry_points={ \n 'console_scripts': [\n 'main=bio_project.cli:main',\n ],\n }\n)","repo_name":"Federic0Bald0/structural_bioinformatics","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36693861866","text":"from django.conf.urls import patterns, url\n\nfrom main import views\n\nurlpatterns = patterns('',\n\t\t\t#Home page\n\t\t\turl(r'^$',views.IndexView.as_view(),name='index'),\n\t\t\t#About\n\t\t\turl(r'^about$',views.AboutView.as_view(),name='about'),\n\t\t\t#Tools Used\n\t\t\turl(r'^tools$',views.ToolsView.as_view(),name='tools'),\n\t\t\t#Algorithm\n\t\t\turl(r'^algo$',views.AlgoView.as_view(),name='algo'),\n\t\t\t)\n","repo_name":"notpratheek/stockmemaybe","sub_path":"main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"25241950738","text":"#\n# @lc app=leetcode.cn id=1431 lang=python3\n#\n# [1431] 拥有最多糖果的孩子\n#\n\n# @lc code=start\nclass Solution:\n def kidsWithCandies(self, candies: List[int], extraCandies: int) -> List[bool]:\n maxcandie = max(candies)\n res = []\n for item in candies:\n if item + extraCandies >= maxcandie:\n res.append(True)\n else:\n res.append(False)\n return res\n\n# @lc code=end\n","repo_name":"LeungLoh/algorithm","sub_path":"LeetCode/1431.拥有最多糖果的孩子.py","file_name":"1431.拥有最多糖果的孩子.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"73239898505","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 9 12:36:35 2023\n\n@author: Ashleigh Womack (UCT)\n\nWavelet Analysis of the Drifter\n - This yields information about the time series and frequencies together. By\n decomposing a time series into a time-frequency space, it allows for the \n determination of the dominant modes of variability, and how these modes vary\n in time. \n \nFirst run script: wavelet_functions.py \n\"\"\"\n\n#%%\nfrom matplotlib.gridspec import GridSpec\nimport matplotlib.ticker as ticker\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nfrom waveletFunctions import wavelet, wave_signif\nfrom matplotlib.dates import DayLocator,DateFormatter\n \n#%% Read in the buoy drifter data \nBDIR = '../data/'\ntheBuoy = 'ISVP1' \ndrifter = pd.read_csv(BDIR+theBuoy+'.csv', index_col='time', parse_dates=True)\n\n#%% If NOT appling the high pass filter\n# - Use these variables for the wavelet analysis\n# First index is nan - therefore skip it. \n\nx = drifter['u (m/s)'][1:] # zonal velocity\ny = drifter['v (m/s)'][1:] # meridional velocity\nvar_x = np.std(x, ddof=1) ** 2 # variance of x (standard deiviation)\nvar_y = np.std(y, ddof=1) ** 2 # variance of y\nt = drifter.index[1:]\ndt = 1 # sampling interval of drifter (in hours)\n\n#%% Application of a Butterworth High-Pass Filter \n# This is a 12th order filter used to remove all frequencies below the cutoff\n# frequency of 0.04 Hz which is approximately the daily frequency, allowing \n# the sub-daily frequencies to become more significant. \n\ndt = 1 # sampling intervals in hours\nFs = 1/dt # sampling frequency (1/dt)\nb, a = signal.butter(12, 0.04, 'high', Fs) # 12th order with 0.04 h-1 cutoff frequency \nw, h = signal.freqs(b, a) # frequency response of a digital filter\nx_filt = signal.filtfilt(b, a, drifter['u (m/s)'][1:], padtype=None) # filter data series of zonal component (or speed)\ny_filt = signal.filtfilt(b ,a, drifter['v (m/s)'][1:], padtype=None) # filter data series of meridional component\n\nx = x_filt \ny = y_filt \nvar_x = np.std(x, ddof=1) ** 2 # varience of x\nvar_y = np.std(y, ddof=1) ** 2 # varience of y\nt = drifter.index[1:]\n\n#%% Load the variables for the Wavelet transform\nNFFT = np.size(x) # FFT length\npad = 1 # pad the time series with zeroes (recommended)\ndj = 0.25 # this will do 4 sub-octaves per octavet\ns0 = 2 * dt # the smallest scale of the wavelet\nj1 = 7 / dj # does 7 powers-of-two with dj sub-octaves each\nlag1 = 0.72 # lag-1 autocorrelation for red noise background\nprint(\"lag1 = \", lag1) \nmother = 'MORLET' # choice of mother wavelet function\n\n# Complete the wavelet transform:\nwave, period, scale, coi = wavelet(x, dt, pad, dj, s0, j1, mother) # change to y\npower = (np.abs(wave)) ** 2 # compute wavelet power spectrum\nwt_spectrum = (np.nansum(power, axis=1) /NFFT) # time-average over all times\n\n# Significance levels:\nsignif = wave_signif(([var_x]), dt=dt, sigtest=0, scale=scale, # change to var_y\n lag1=lag1, mother=mother)\nsig95 = signif[:, np.newaxis].dot(np.ones(NFFT)[np.newaxis, :]) # expand signif --> (J+1)x(N) array\nsig95 = power / sig95 # where ratio > 1, power is significant\n\n# Wavelet spectrum & significance levels:\ndof = NFFT - scale # the -scale corrects for padding at edges\nsignif_wt = wave_signif(var_x, dt=dt, scale=scale, sigtest=1, # change to var_y\n lag1=lag1, dof=dof, mother=mother)\n\n# Repeat for the meridional component of the drifter - where it has been stated \"change to y or var_y\"\n\n#%% Plotting Wavelet Power Spectrum and Wavelet Spectrum - Plots zonal component (change to meridional)\nmpl.rcParams['font.size'] = 30\nfig = plt.figure(figsize=(30, 23))\ngs = GridSpec(3, 4, wspace=0.025, hspace=0.05)\nplt.subplots_adjust(left=0.1, bottom=0.05, right=0.9, top=0.95, wspace=0, hspace=0)\n\n# Wave Power Spectrum \nplt1 = plt.subplot(gs[1, 0:3])\nCS = plt.contourf(t, period, power, cmap=plt.cm.BuGn) # , vmin=0, vmax=2.4) \nplt.xlabel('Date')\nplt.ylabel('Period (hours)')\nplt.title('Wavelet Power Spectrum', fontsize= 30) \nplt.contour(t, period, sig95, [-99, 1], colors='k') # significance contour, levels at -99 (fake) and 1 (95% signif) \n# cone-of-influence (coi), anything \"below\" is dubious - edge effects \nplt.plot(t, coi[1:], 'r', linewidth=2.5) # remove the 1 if sizes don't match \nplt1.set_yscale('log', base=2, subs=None)\nplt1.xaxis.set_major_locator(DayLocator(interval=10))\nplt1.xaxis.set_major_formatter(DateFormatter('%d-%m'))\nplt.ylim(2.5,256)\nax = plt.gca().yaxis\nax.set_major_formatter(ticker.ScalarFormatter())\nplt1.ticklabel_format(axis='y', style='plain')\nplt1.invert_yaxis()\nplt.colorbar(CS)\n# if you need to scale the colorbar\n#from matplotlib.cm import ScalarMappable\n#cb = plt.colorbar(ScalarMappable(norm=CS.norm,cmap=plt.cm.BuGn),\n# ticks=[0, 0.3, 0.6, 0.9, 1.2, 1.5, 1.8, 2.1, 2.4],\n# boundaries=np.arange(0, 2.5, 0.3))\n\n# Wavelet Spectrum\nmpl.rcParams['font.size'] = 30\nplt2 = plt.subplot(gs[1, -1])\nplt.plot(wt_spectrum, period, linewidth=3, color='mediumseagreen') # wavelet power spectrum\nplt.plot(signif_wt, period, '--', linewidth=3, color='black') # wavelet significance level \nplt.xlabel('Power (variance)') \nplt.title('Wavelet Spectrum', fontsize= 30) \nplt.xlim([0, 1.25 * np.max(wt_spectrum)])\nplt2.set_yscale('log', base=2, subs=None)\nplt.ylim(2.5,256)\nplt.xlim(0,1.5)\nax = plt.gca().yaxis\nax.set_major_formatter(ticker.ScalarFormatter())\nax.set_major_formatter(plt.NullFormatter())\nplt2.invert_yaxis()\n\n# end of code\n","repo_name":"mvichi/antarctic-buoys","sub_path":"scripts/Drifter_wavelet_analysis.py","file_name":"Drifter_wavelet_analysis.py","file_ext":"py","file_size_in_byte":7020,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"39717645044","text":"import math\n\ndef grayCode(n: int):\n for bit in range(0, 1 << n):\n yield str(bin(bit ^ (bit >> 1))[2:]).zfill(n)\n\ndef commonDigit(d1, d2):\n tmp = d1 ^ d2\n cnt = 0\n while tmp:\n if tmp % 2 == 1:\n cnt += 1\n tmp = tmp >> 1\n return cnt\n\n\nncase = int(input().strip())\nbetsWinning, betDis = {}, {}\nfor i in range(ncase):\n bet = input().strip()\n betsWinning[bet] = 0\n betDis[bet] = [int(bet, base=2), None]\nbetlen = len(bet)\n\nfor idx, ele in enumerate(grayCode(betlen)):\n intele = int(ele, base=2)\n maxcnt, maxbet = -1, None\n found = True\n if idx == 0:\n for key in betDis:\n cnt = betlen - commonDigit(intele, betDis[key][0])\n betDis[key][1] = cnt\n if cnt > maxcnt:\n maxcnt, maxbet = cnt, key\n found = True\n elif cnt == maxcnt:\n found = False\n else:\n diffbit = int(betlen - math.log2(intele ^ previous) - 1)\n for key in betDis:\n cnt = betDis[key][1] + 1 if ele[diffbit] == key[diffbit] else betDis[key][1] - 1\n betDis[key][1] = cnt\n if cnt > maxcnt:\n maxcnt, maxbet = cnt, key\n found = True\n elif cnt == maxcnt:\n found = False\n\n previous = intele\n if found:\n betsWinning[maxbet] += 1\n # print(ele, betsWinning, betDis)\n\nminres = None\nfor key in betsWinning:\n if not minres:\n minres = betsWinning[key]\n else:\n if betsWinning[key] < minres:\n minres = betsWinning[key]\n\nprint(minres)\n","repo_name":"shiyutang/DL-Prep","sub_path":"04_Algorithms/Leetcode/Xtreme13.Luck-i-flip.py","file_name":"Xtreme13.Luck-i-flip.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"81"} +{"seq_id":"70140788426","text":"import logging\nimport os\nimport sys\n\nfrom xdg import BaseDirectory\n\n\nclass StreamToLogger(object):\n \"\"\"\n Fake file-like stream object that redirects writes to a logger instance.\n Adapted from: http://bit.ly/1xLpNuF\n \"\"\"\n\n def __init__(self, logger, log_level=logging.DEBUG):\n self.logger = logger\n self.log_level = log_level\n self.linebuf = ''\n\n def write(self, buf):\n for line in buf.rstrip().splitlines():\n self.logger.log(self.log_level, line.rstrip())\n\n def flush(*arg):\n pass\n\n\nclass GoopgLogger(object):\n \"\"\"\n A simple class wich configure the basic logger\n \"\"\"\n filelog = os.path.join(BaseDirectory.save_cache_path('goopg'), 'log')\n logging.basicConfig(filename=filelog,\n filemode='a',\n level=logging.ERROR,\n format='%(asctime)s:%(levelname)s:%(name)s:%(message)s')\n # redirect stderr to logger\n sys.stderr = StreamToLogger(logging.getLogger('STDERR'), logging.ERROR)\n","repo_name":"LeoIannacone/goopg","sub_path":"host/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"81"} +{"seq_id":"38837565731","text":"# Python program to bulk upload json files as blobs to azure storage\nimport os\nfrom azure.storage.blob import BlobServiceClient, BlobClient, ContentSettings, ContainerClient\n \n# IMPORTANT: Replace connection string with your storage account connection string\n# Usually starts with DefaultEndpointsProtocol=https;...\nMY_CONNECTION_STRING = \"CHANGEME\"\n \n# Replace with blob container. This should be already created in azure storage.\nMY_EVENTS_CONTAINER = \"default/events\"\n \n# Replace with the local folder which contains the json files for upload\nLOCAL_EVENTS_PATH = \"./\"\n \nclass AzureBlobFileUploader:\n def __init__(self):\n print(\"Intializing AzureBlobFileUploader\")\n \n # Initialize the connection to Azure storage account\n self.blob_service_client = BlobServiceClient.from_connection_string(MY_CONNECTION_STRING)\n \n def upload_all_json_in_folder(self):\n # Get all files with json extension and exclude directories\n all_file_names = [f for f in os.listdir(LOCAL_EVENTS_PATH)\n if os.path.isfile(os.path.join(LOCAL_EVENTS_PATH, f)) and \".json\" in f]\n \n # Upload each file\n for file_name in all_file_names:\n self.upload_json(file_name)\n \n def upload_json(self,file_name):\n # Create blob with same name as local file name\n blob_client = self.blob_service_client.get_blob_client(container=MY_EVENTS_CONTAINER,\n blob=file_name)\n # Get full path to the file\n upload_file_path = os.path.join(LOCAL_EVENTS_PATH, file_name)\n \n # Create blob on storage\n # Overwrite if it already exists!\n json_content_setting = ContentSettings(content_type='text/json')\n print(f\"uploading file - {file_name}\")\n with open(upload_file_path, \"rb\") as data:\n blob_client.upload_blob(data,overwrite=True,content_settings=json_content_setting)\n \n \n# Initialize class and upload files\nazure_blob_file_uploader = AzureBlobFileUploader()\nazure_blob_file_uploader.upload_all_json_in_folder()\n","repo_name":"bryanz994/azure_pipeline_test","sub_path":"ingest_blob.py","file_name":"ingest_blob.py","file_ext":"py","file_size_in_byte":1993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39716765414","text":"# -*- coding:utf-8 -*-\n# 顺时针,则方向按照右下左上切换,每次走到底了就改变方向,同时更新边界,直到改变了两次方向就说明走完了\nclass Solution:\n # matrix类型为二维列表,需要返回列表\n def printMatrix(self, matrix):\n rows = [-1, len(matrix)]\n cols = [-1, len(matrix[0])]\n print(rows,cols)\n if matrix == [[]] or matrix == []:\n return []\n\n def isvalid(coord):\n return rows[0] < coord[0] < rows[1] and cols[0] < coord[1] < cols[1]\n\n def nextcoor(coord, dir):\n if dir == 'right':\n nextCoord = (coord[0], coord[1] + 1)\n elif dir == 'left':\n nextCoord = (coord[0], coord[1] - 1)\n elif dir == 'up':\n nextCoord = (coord[0] - 1, coord[1])\n else:\n nextCoord = (coord[0] + 1, coord[1])\n\n return nextCoord\n\n def updateboundary(coord): # 更新走完的第i行(列),则边界不能超过第i行(列)\n if direction == 'left':\n rows[1] = coord[0]\n elif direction == 'right':\n rows[0] = coord[0]\n elif direction == 'up':\n cols[0] = coord[1]\n else:\n cols[1] = coord[1]\n print(rows,cols)\n\n dir = {'right': 'down', 'down': 'left', 'left': 'up', 'up': 'right'}\n direction = 'right'\n coordinate = (0, 0)\n ret = [matrix[0][0]]\n while 1:\n if isvalid(nextcoor(coordinate, direction)):\n coordinate = nextcoor(coordinate, direction)\n ret.append(matrix[coordinate[0]][coordinate[1]])\n else:\n print(coordinate)\n updateboundary(coordinate)\n direction = dir[direction]\n if not isvalid(nextcoor(coordinate, direction)):\n break\n return ret\n\n\nsol = Solution()\n# print(sol.printMatrix([[1, 2, 3, 4],\n# [5, 6, 7, 8],\n# [9, 10, 11, 12],\n# [13, 14, 15, 16]]))\n\nprint(sol.printMatrix([[1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12]]))\n# print(sol.printMatrix([[]]))","repo_name":"shiyutang/DL-Prep","sub_path":"04_Algorithms/Leetcode/JZ19 顺时针打印矩阵.py","file_name":"JZ19 顺时针打印矩阵.py","file_ext":"py","file_size_in_byte":2290,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"81"} +{"seq_id":"11498157010","text":"from collections import namedtuple\nimport requests\nimport sys\n\n\n#osoba = namedtuple(\"osoba\", ['imie', 'wiek'])\n\n#os1 = osoba(imie='Wojtek', wiek=39)\n\n#print(os1)\n\n#url = \"https://www.metaweather.com/api/location/search/?query=london\"\n\n#r = requests.get(url)\n#json_data = json.loads(r.text)\n#for i in json_data:\n# print(i)\n\n#weather = json_data[0]\n#print(weather.get(\"woeid\"))\n\n\nWeather = namedtuple(\"Weather\", ['location_name', 'the_temp', 'air_pressure', 'humidity'])\n\n\ndef get_locatrion_id(location_id):\n resp = requests.get(f\"https://www.metaweather.com/api/location/search/?query={location_id}\")\n return resp.json()[0][\"woeid\"]\n\n\ndef get_location_weather(location_id):\n resp = requests.get(f\"https://www.metaweather.com/api/location/{location_id}/\")\n location = resp.json()['title']\n curr_data = resp.json()[\"consolidated_weather\"][0]\n weather = Weather(location_name=location, the_temp=curr_data['the_temp'], air_pressure=curr_data['air_pressure'],\n humidity=curr_data['humidity'])\n return weather\n\n\ndef weather_report(weather):\n report = f\"\"\"Pogoda w {weather.location_name}\ntemperatura: {weather.the_temp}\nwilgotnosc: {weather.humidity}\ncisnienie: {weather.air_pressure}\n\"\"\"\n return report\n\n\nif __name__ == \"__main__\":\n try:\n location_name = sys.argv[1]\n location_id = get_locatrion_id(location_name)\n weather = get_location_weather(location_id)\n report = weather_report(weather)\n print(report)\n except IndexError:\n print(\"podaj nazwe lokalizacji\")\n\n\n\n\n","repo_name":"wbkusy/pythonalx","sub_path":"baza-danych/pogoda.py","file_name":"pogoda.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35160489065","text":"from ..base.exchange import Exchange\nfrom ..base.utils.errors import (\n InsufficientBalance,\n)\n\nfrom ..base.utils.retry import retry\nfrom typing import Optional\nimport datetime\n\n# from pytz import timezone\nimport time\n\n\nclass Klayswap(Exchange):\n has = {\n \"createSwap\": True,\n \"fetchTicker\": True,\n \"fetchBalance\": True,\n }\n\n def __init__(self, config_change: Optional[dict] = {}):\n super().__init__()\n\n config = {\n \"chainName\": \"KLAYTN\",\n \"exchangeName\": \"klayswap\",\n \"retries\": 3,\n \"retriesTime\": 10,\n \"host\": 0,\n \"account\": None,\n \"privateKey\": None,\n \"log\": None,\n \"proxy\": False,\n }\n\n config.update(config_change)\n\n # market info\n self.id = 1\n self.chainName = config[\"chainName\"]\n self.exchangeName = config[\"exchangeName\"]\n self.duration = False\n self.addNounce = 0\n self.retries = config[\"retries\"]\n self.retriesTime = config[\"retriesTime\"]\n self.host = config[\"host\"]\n self.account = config[\"account\"]\n self.privateKey = config[\"privateKey\"]\n self.log = config[\"log\"]\n self.proxy = config[\"proxy\"]\n\n # self.load_exchange(self.chainName, self.exchangeName)\n # self.set_logger(self.log)\n\n # @retry\n async def fetch_ticker(self, amountAin, tokenAsymbol, tokenBsymbol):\n\n await self.load_exchange(self.chainName, self.exchangeName)\n\n amountIn = self.from_value(value=amountAin, exp=await self.decimals(tokenAsymbol))\n\n pool = await self.get_pool(tokenAsymbol, tokenBsymbol)\n\n pool = self.set_checksum(pool)\n\n amountBout = await self.get_amount_out(pool, tokenAsymbol, amountIn)\n decimal = await self.decimals(tokenBsymbol)\n amountout = self.to_value(value=amountBout, exp=decimal)\n\n result = {\n \"amountAin\": amountAin,\n \"amountBout\": amountout,\n \"tokenAsymbol\": tokenAsymbol,\n \"tokenBsymbol\": tokenBsymbol,\n }\n\n return result\n\n @retry\n async def create_swap(\n self, amountA, tokenAsymbol, amountBMin, tokenBsymbol, path=None, *args, **kwargs\n ):\n \"\"\"\n Parameters\n ----------\n amountA : tokenA amount input\n tokenAsymbol: symbol of token input\n amountBMin : tokenB amount output which is expactation as minimun\n tokenBsymbol : symbol of tokenB output\n\n Return\n {\n 'transaction_hash': '0x21895bbec44e6dab91668fb338a43b3eb59fa78ae623499bf8f313ef827301c4',\n 'status': 1,\n 'block': 34314499,\n 'timestamp': datetime.datetime(2022, 10, 14, 10, 17, 58, 885156),\n 'function': ,\n 'from': '0x78352F58E3ae5C0ee221E64F6Dc82c7ef77E5cDF',\n 'amountIn': 0.1,\n 'tokenA': 'USDC',\n 'to': '0x10f4A785F458Bc144e3706575924889954946639',\n 'amountOut': 0.623371,\n 'tokenB': 'oZEMIT',\n 'transaction_fee:': 0.023495964646856035\n }\n \"\"\"\n\n await self.load_exchange(self.chainName, self.exchangeName)\n\n if (path != None) and (len(path) > 2):\n self.path = [self.set_checksum(self.tokens[token][\"contract\"]) for token in path[1:-1]]\n\n else:\n self.path = []\n\n self.tokenSymbol = tokenAsymbol\n self.tokenBsymbol = tokenBsymbol\n self.amount = amountA\n\n # self.require(amountA <= amountBMin, ValueError(\"amountA is Less then amountBMin\"))\n self.require(tokenAsymbol == tokenBsymbol, ValueError(\"Same Symbol\"))\n\n tokenA = self.tokens[tokenAsymbol]\n tokenB = self.tokens[tokenBsymbol]\n amountA = self.from_value(value=amountA, exp=int(tokenA[\"decimals\"]))\n # amountBMin = self.from_value(value=amountBMin, exp=int(tokenB[\"decimals\"]))\n amountBMin = 1\n\n tokenAaddress = self.set_checksum(tokenA[\"contract\"])\n tokenBaddress = self.set_checksum(tokenB[\"contract\"])\n self.account = self.set_checksum(self.account)\n routerAddress = self.set_checksum(self.markets[\"routerAddress\"])\n\n current_nonce = await self.w3.eth.get_transaction_count(self.account)\n self.nonce = current_nonce + self.addNounce\n\n build = {\n \"from\": self.account,\n \"gas\": 4000000,\n \"nonce\": self.nonce,\n }\n\n await self.check_approve(\n amount=amountA,\n token=tokenAaddress,\n account=self.account,\n router=routerAddress,\n build=build,\n )\n\n self.routerContract = await self.get_contract(routerAddress, self.markets[\"routerAbi\"])\n\n current_nonce = await self.w3.eth.get_transaction_count(self.account)\n self.nonce = current_nonce + self.addNounce\n\n build[\"nonce\"] = self.nonce\n\n if tokenAsymbol == self.baseCurrency:\n tx = await self.eth_to_token(amountA, tokenBaddress, amountBMin, build)\n # elif tokenBsymbol == self.baseCurrency:\n # tx = self.token_to_eth(tokenAaddress, amountA)\n else:\n tx = await self.token_to_token(tokenAaddress, amountA, tokenBaddress, amountBMin, build)\n\n # gas = await self.w3.eth.estimate_gas(tx)\n # tx[\"gas\"] = gas\n\n tx_receipt = await self.fetch_transaction(tx, \"SWAP\")\n\n return tx_receipt\n\n async def token_to_token(self, tokenAaddress, amountA, tokenBaddress, amountBMin, build):\n tx = await self.routerContract.functions.exchangeKctPos(\n tokenAaddress, amountA, tokenBaddress, amountBMin, self.path\n ).build_transaction(build)\n\n return tx\n\n async def eth_to_token(self, amountA, tokenBaddress, amountBMin, build):\n\n build[\"value\"] = amountA\n\n tx = await self.routerContract.functions.exchangeKlayPos(\n tokenBaddress, amountBMin, self.path\n ).build_transaction(build)\n\n return tx\n\n # def token_to_eth(self, tokenAaddress, amountA):\n\n # tx = self.routerContract.functions.exchangeKlayNeg(\n # tokenAaddress, amountA, self.path\n # ).build_transaction(\n # {\n # \"from\": self.account,\n # \"gas\": 4000000,\n # \"nonce\": self.nonce,\n # }\n # )\n\n # return tx\n\n async def get_amount_out(self, pool, tokenAsymbol, amountIn):\n tokenA = self.tokens[tokenAsymbol]\n\n tokenAaddress = self.set_checksum(tokenA[\"contract\"])\n\n poolAddress = self.set_checksum(pool)\n\n self.factoryContract = await self.get_contract(poolAddress, self.markets[\"factoryAbi\"])\n\n amountOut = await self.factoryContract.functions.estimatePos(tokenAaddress, amountIn).call()\n\n # amountOut = type(self.factoryContract.functions.estimatePos(tokenAaddress, amountIn).call())\n\n return amountOut\n\n async def get_reserves(self, tokenAsymbol, tokenBsymbol):\n pool = self.get_pool(tokenAsymbol, tokenBsymbol)\n\n pool = self.set_checksum(pool)\n\n tokenA = self.tokens[tokenAsymbol]\n\n tokenAaddress = self.set_checksum(tokenA[\"contract\"])\n\n factoryContract = await self.get_contract(pool, self.markets[\"factoryAbi\"])\n\n tokenA = factoryContract.functions.tokenA().call()\n\n routerContract = await self.get_contract(pool, self.markets[\"routerAbi\"])\n reserves = await routerContract.functions.getCurrentPool().call()\n\n if tokenA != tokenAaddress:\n reservesA = self.to_value(reserves[1], await self.decimals(tokenAsymbol))\n reservesB = self.to_value(reserves[0], await self.decimals(tokenBsymbol))\n\n else:\n reservesA = self.to_value(reserves[0], await self.decimals(tokenAsymbol))\n reservesB = self.to_value(reserves[1], await self.decimals(tokenBsymbol))\n\n reserve = reservesB / reservesA\n\n return {\n \"pool\": f\"{tokenAsymbol}-{tokenBsymbol}\",\n \"tokenAsymbol\": tokenAsymbol,\n \"tokenBsymbol\": tokenBsymbol,\n \"tokenAreserves\": reservesA,\n \"tokenBreserves\": reservesB,\n \"poolPrice\": reserve,\n \"created_at\": datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),\n }\n\n # def set_logger(self, logfilePath):\n\n # if logfilePath == None :\n\n # #default_log\n # basePath = Path(__file__).resolve().parent.parent\n # logfile_name = 'logfile.log'\n # logfilePath = str(os.path.join(basePath, logfile_name))\n\n # print(logfilePath)\n\n # logging.basicConfig(\n # level=logging.INFO,\n # filename=logfilePath,\n # filemode=\"w\",\n # format=\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\",\n # )\n\n # # logging.Formatter.converter = lambda *args: datetime.datetime.now(\n # # tz=timezone(\"Asia/Seoul\")\n # # ).timetuple()\n\n # # log_formatter = logging.Formatter(\n # # fmt =\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\",\n\n # # )\n\n # # log_formatter.converter = lambda *args: datetime.datetime.now(\n # # tz=timezone(\"Asia/Seoul\")\n # # ).timetuple()\n\n # # file_log = RotatingFileHandler(\n # # filename=logfilePath,\n # # mode=\"a\",\n # # maxBytes=5 * 1024 * 1024,\n # # backupCount=2,\n # # encoding=None,\n # # delay=0,\n # # )\n\n # # file_log.setFormatter(log_formatter)\n # # file_log.setLevel(logging.INFO)\n\n # self.logger = logging.getLogger(__name__)\n\n # print(\"test log\")\n","repo_name":"munsunouk/ccdxt","sub_path":"exchange/klayswap.py","file_name":"klayswap.py","file_ext":"py","file_size_in_byte":9722,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"34211341478","text":"from zipfile import ZipFile\n\nfilename = []\nfilesize = []\n# Create a ZipFile Object and load sample.zip in it\nPath = '/Users/purnaraghavaraokalva/Desktop/sampledata.zip'\nwith ZipFile(Path, 'r') as zipObj:\n # Get list of files names in zip\n listOffiles = zipObj.namelist()\n #sizeoffiles = zipObj.file_size\n listoffilesinsidezip = []\n print(type(listOffiles))\n # Iterate over the list of file names in given list & print them\n# file_end = \".txt\"\nfor key, filename in enumerate(listOffiles):\n if filename.__contains__(\"_\"):\n continue\n if filename.__contains__(\".txt\"):\n listoffilesinsidezip.append(filename.split(\"/\")[1])\n print(listoffilesinsidezip)\n","repo_name":"krishnabala02/Data-Engineering-Training","sub_path":"myassesment_numoffilesinzip.py","file_name":"myassesment_numoffilesinzip.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12276271706","text":"#!/usr/bin/env python\n#\n#author:Distantskyline\n#date:19812\n#usage:zhuangshiqi\n\n\n\n# def myDecorator(func):\n# def wrapper ():\n# print('wrapper of decorator')\n# func()\n# return wrapper\n#\n# @myDecorator\n# def greet():\n# print('hello world')\n#\n# Greet = myDecorator(greet)\n# Greet()\n\n\n# def jsq(func):\n# def dy(*args,**kwargs):\n# print('+-------+')\n# func(*args,**kwargs)\n# print('+-------+')\n# return dy\n#\n# @jsq\n# def greet(name,class1):\n# print('|',name,class1,'|')\n#\n# greet('zgh',7)\n\n# import functools\n#\n# def myDecorator(greet):\n# @functools.wraps(greet)\n# def wrapper(*args,**kwargs):\n# print('hello nihao')\n# func(*args,**kwargs)\n# return wrapper\n# @myDecorator\n# def greet(name,class2):\n# print(name,class2)\n#\n# print(greet.__name__)\n# print(help(greet))\n\n# import time\n# import datetime\n\n# altime = datetime.datetime.now()\n# print(altime)\n# print(altime.year,altime.month,altime.day,altime.hour,altime.minute,altime.second)\n\n# first = time.perf_counter()\n# listA = [x for x in range(10)]\n# print(listA)\n#\n# end = time.perf_counter()\n# print('时间差: {:f}'.format(end - first))\n\n# with open(file='err0r.log',mode='w',encoding='utf8')as log:\n# log.write('{} [Error]:this opera must be allow''administrator'.format(datetime.datetime.now()\n# ))\n\n# import time\n# # a = time.time()\n# # print(a)\n\n\n#装饰器判断函数执行时间\n\n\nimport time\nimport functools\n\n#\ndef waibu(func):\n @functools.wraps(func)\n def neibu(*args,**kwargs):\n a = time.perf_counter()\n func(*args,**kwargs)\n b = time.perf_counter()\n print('时间差: {:f}'.format(b - a))\n return neibu\n\n@waibu\ndef jisuan(parm):\n a = parm + 11\n return a\n # print(a)\njisuan(100)\nprint(time.strftime('%Y'))\n\n","repo_name":"Distantskyline/python3","sub_path":"2.面向对象/1.装饰器/进阶01/zhuangshiqi.py","file_name":"zhuangshiqi.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36261261362","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# {# pkglts, pysetup.kwds\n# format setup arguments\n\nfrom setuptools import setup, find_packages\n\n\nshort_descr = \"OpenAlea.Core is able to discover and manage packages and logical components, build and evaluate dataflows and Generate final applications\"\nreadme = open('README.rst').read()\nhistory = open('HISTORY.rst').read()\n\n\n# find version number in src/openalea/core/version.py\nversion = {}\nwith open(\"src/openalea/core/version.py\") as fp:\n exec(fp.read(), version)\n\n# find packages\npkgs = find_packages('src')\n\n\n\nsetup_kwds = dict(\n name='openalea.core',\n version=version[\"__version__\"],\n description=short_descr,\n long_description=readme + '\\n\\n' + history,\n author=\"Christophe Pradal\",\n author_email=\"christophe dot pradal at cirad dot fr\",\n url='https://github.com/openalea/core',\n license='cecill-c',\n zip_safe=False,\n\n packages=pkgs,\n namespace_packages=['openalea'],\n package_dir={'': 'src'},\n setup_requires=[\n \"pytest-runner\",\n ],\n install_requires=[\n ],\n tests_require=[\n \"coverage\",\n \"pytest\",\n \"pytest-cov\",\n \"pytest-mock\",\n \"sphinx\",\n ],\n entry_points={},\n keywords='openalea',\n )\n# #}\n# change setup_kwds below before the next pkglts tag\n\nsetup_kwds['setup_requires'] = ['openalea.deploy']\nsetup_kwds['share_dirs'] = {'share': 'share'}\nsetup_kwds['entry_points'][\"wralea\"] = [\"openalea.flow control = openalea.core.system\", ]\nsetup_kwds['entry_points'][\"console_scripts\"] = [\"alea = openalea.core.alea:main\"]\nsetup_kwds['entry_points']['openalea.core'] = [\n 'openalea.core/openalea = openalea.core.plugin.builtin',\n ]\n\n# do not change things below\n# {# pkglts, pysetup.call\nsetup(**setup_kwds)\n# #}\n","repo_name":"christian34/core","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"5197601348","text":"\"\"\" smashlib.handle\n\"\"\"\n\nfrom smashlib import get_smash\n\nclass AbstractInterface(object):\n\n \"\"\" \"\"\"\n\n _user_ns_var = None\n\n def __init__(self, parent):\n self._parent = parent\n self._parent.__class__.interface = self\n get_smash().shell.user_ns[self._user_ns_var] = self\n\n def __repr__(self):\n return \"{0} bound to {1}. Use '{2}?' for more information\".format(\n self.__class__.__name__,\n self._parent.__class__.__name__,\n self._user_ns_var\n )\n\n __str__ = __repr__\n\n @property\n def __doc__(self):\n self.update()\n\n @property\n def edit(self):\n raise RuntimeError(\"edit not defined for {0}\".format(self))\n","repo_name":"mattvonrocketstein/smash","sub_path":"smashlib/handle.py","file_name":"handle.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"81"} +{"seq_id":"41475613847","text":"def mxdiflg(a1, a2):\n # your code\n if a1 == [] or a2 == []:\n \treturn -1\n array1 = []\n array2 = []\n for x in a1:\n \tarray1.append(len(x))\n for y in a2:\n \tarray2.append(len(y))\n return max(abs(max(array1)-min(array2)),abs(max(array2)-min(array1)))\n\n#别人的解法\ndef mxdiflg(a1, a2):\n if a1 and a2:\n return max(abs(len(x) - len(y)) for x in a1 for y in a2)\n return -1\n\na1 = [\"abc\",\"bf\"]\na2 = [\"a\",\"b\"]\n\nprint(mxdiflg(a1,a2))\n","repo_name":"lennystudy/codewars","sub_path":"python/MaximumLengthDifference.py","file_name":"MaximumLengthDifference.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20252301930","text":"import re\nfrom typing import NamedTuple, NewType\n\nimport pydantic.validators\n\nimport apischema\n\n\n# Serialization can only be customized into the enclosing models\nclass RGB(NamedTuple):\n red: int\n green: int\n blue: int\n\n # If you don't put this method, RGB schema will be:\n # {'title': 'Rgb', 'type': 'array', 'items': {}}\n @classmethod\n def __modify_schema__(cls, field_schema) -> None:\n field_schema.update({\"type\": \"string\", \"pattern\": r\"#[0-9A-Fa-f]{6}\"})\n field_schema.pop(\"items\", ...)\n\n @classmethod\n def __get_validators__(cls):\n yield pydantic.validators.str_validator\n yield cls.validate\n\n @classmethod\n def validate(cls, value) -> \"RGB\":\n if (\n not isinstance(value, str)\n or re.fullmatch(r\"#[0-9A-Fa-f]{6}\", value) is None\n ):\n raise ValueError(\"Invalid RGB\")\n return RGB(\n red=int(value[1:3], 16), green=int(value[3:5], 16), blue=int(value[5:7], 16)\n )\n\n\n# Simpler with apischema\n\n\nclass RGB(NamedTuple):\n red: int\n green: int\n blue: int\n\n\n# NewType can be used to add schema to conversion source/target\n# but Annotated[str, apischema.schema(pattern=r\"#[0-9A-Fa-f]{6}\")] would have worked too\nHexaRGB = NewType(\"HexaRGB\", str)\n# pattern is used in JSON schema and in deserialization validation\napischema.schema(pattern=r\"#[0-9A-Fa-f]{6}\")(HexaRGB)\n\n\n@apischema.deserializer # could be declared as a staticmethod of RGB class\ndef from_hexa(hexa: HexaRGB) -> RGB:\n return RGB(int(hexa[1:3], 16), int(hexa[3:5], 16), int(hexa[5:7], 16))\n\n\n@apischema.serializer # could be declared as a method/property of RGB class\ndef to_hexa(rgb: RGB) -> HexaRGB:\n return HexaRGB(f\"#{rgb.red:02x}{rgb.green:02x}{rgb.blue:02x}\")\n\n\nassert ( # schema is inherited from deserialized type\n apischema.json_schema.deserialization_schema(RGB)\n == apischema.json_schema.deserialization_schema(HexaRGB)\n == {\n \"$schema\": \"http://json-schema.org/draft/2020-12/schema#\",\n \"type\": \"string\",\n \"pattern\": \"#[0-9A-Fa-f]{6}\",\n }\n)\n","repo_name":"wyfo/apischema","sub_path":"examples/pydantic_conversion.py","file_name":"pydantic_conversion.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","stars":206,"dataset":"github-code","pt":"81"} +{"seq_id":"1808260533","text":"import psutil\nimport time\nimport os\nimport sys\nimport psutil as p\n\npid = os.getpid()\np = psutil.Process(pid)\n\nwhile(1):\n #print(p.cpu_percent(interval=1))\n print(psutil.cpu_percent(interval=0.1, percpu=True))\ncpu_info_recorder = []\nmemory_recorder = []\nfor i in range(0,5,1):\n a = psutil.cpu_times_percent(interval=None, percpu=True)\n print(a)\n cpu_info_recorder.append(psutil.cpu_times_percent(interval=None, percpu=True))\n memory_recorder.append(str(psutil.virtual_memory()))\n #print(psutil.cpu_times_percent(interval=0.1, percpu=True))\n #print(str(psutil.virtual_memory()))\n time.sleep(0.5)\n\n#print(memory_recorder)\n#print(cpu_info_recorder)","repo_name":"Kyrie-Zhao/IoTensorJuly","sub_path":"interference_test/sysmonitor.py","file_name":"sysmonitor.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31472741248","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 29 14:08:08 2019\n\n@author: malrawi\n\"\"\"\n\nimport torchvision.transforms as transforms\nfrom datasets import ImageDataset\nimport torch\nfrom models import GeneratorResNet\nfrom matplotlib import pyplot as plt\n\n# A is the fashion image\n# B is the pixel-level annotation\n\nmodel_name = 'G_AB_60.pth' \ndataset_name = 'ClothCoParse'\nexperiment_name = '' # to be added \npath2model = './saved_models/ClothCoParse/'\n\nprint('model used', model_name) \n\n# loads a saved model\ndef get_GAN_AB_model(folder_model, model_name, device): \n n_residual_blocks = 9 # this should be the same values used in training the G_AB model \n G_AB = GeneratorResNet(input_shape=(3,0), num_residual_blocks = n_residual_blocks) \n G_AB.load_state_dict(torch.load(folder_model + model_name, map_location=device ), ) \n # if cuda: \n # G_AB = G_AB.to(device)\n return G_AB\n\n\n# changed to display as color map and not image\n\ndef show_tensor(img, show_img=True):\n to_pil = transforms.ToPILImage() \n img = to_pil(img.squeeze()) # we can also use test_set[1121][0].numpy() \n if show_img: \n plt.imshow(img.convert('L'), cmap= plt.cm.get_cmap(\"nipy_spectral\"), vmin=0, vmax=255)\n # img.show() \n # img.save('/home/malrawi/GAN_seg_img_414/'+'gg-col'+'.png') # can be used to save the image\n \n return img\n\n\ntransforms_used = transforms.Compose( [ transforms.ToTensor(), \n transforms.Normalize((0.5,0.5,0.5), (.5,.5,.5)) \n ] ) \n\ndata_set = ImageDataset(\"../data/%s\" % dataset_name, \n transforms_ = None, \n unaligned=False, \n mode='train' )\n\nimg_id=110 # getting some image, here index 100\nPIL_A_img = data_set[img_id]['A']\nPIL_B_img = data_set[img_id]['B']\nreal_A = transforms_used(PIL_A_img) # tensor image\n\ncuda = False # this will definetly work on the cpu if it is false\nif cuda:\n cuda = True if torch.cuda.is_available() else False\ndevice = torch.device('cuda' if cuda else 'cpu')\n\nG_AB = get_GAN_AB_model(path2model, model_name, device) # load the model\nG_AB.eval()\n\nif cuda: real_A = real_A.to(device)\nwith torch.no_grad():\n B_output = G_AB(real_A.unsqueeze(0))\n\nPIL_A_img.show() # show the original image \nplt.imshow(PIL_B_img.convert('L'), cmap= plt.cm.get_cmap(\"nipy_spectral\"), vmin=0, vmax=255) # show the pixel-level annotation\nshow_tensor(B_output) # show the segmented image we get from the model \n\n","repo_name":"morawi/ClothExtract","sub_path":"load_then_test_model.py","file_name":"load_then_test_model.py","file_ext":"py","file_size_in_byte":2569,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"40834659564","text":"from tinydb import TinyDB, Query, where\n\ndb = TinyDB(\"data.json\", indent=4)\n\n#mettre à jour une information \n#db.update({\"score\": 40}, where('name') == 'Skin')\n\n#insérer un champ dans la base de donées\n#db.update({\"role\":\"Junior\"})\n\n#mettre à jour les données d'un champ spécifique\ndb.update({\"role\":\"Pythonista\"}, where('name') == 'Skin')\n\n#mettre à jour ou insérer des données\ndb.upsert({\"name\" : \"Betsa\",\"score\" : 300, \"role\" : \"Pianist\"}, where('name') == 'Betsa')\n\n#supprimer unze donnée\ndb.remove(where('score')<= 100)\n\n#supprimer toutes les données\ndb.truncate()","repo_name":"Fabrice-sangwa/Exercies_python","sub_path":"les_bases_de_donne_TinyDB/exercice2.py","file_name":"exercice2.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"8524219524","text":"from selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom time import sleep\r\nfrom watchdog.observers.polling import PollingObserver\r\nfrom watchdog.events import RegexMatchingEventHandler\r\nimport time\r\nimport requests\r\nimport yaml\r\n\r\n\r\ndef report_scraping():\r\n url = 'http://localhost:8686/#/dice1/dashboard'\r\n driver = webdriver.Chrome(r'C:\\Apps\\ChromeDriver\\chromedriver.exe')\r\n\r\n # ダッシュボード画面を開く\r\n driver.get(url)\r\n sleep(7)\r\n \r\n # テスト結果サマリーをスクレイピング\r\n td = driver.find_element(By.CSS_SELECTOR, \"table > tbody > tr:nth-child(1)\")\r\n td = td.text\r\n \r\n # 終了する\r\n driver.quit()\r\n \r\n return td\r\n\r\n \r\n# LINE にメッセージを送信する\r\ndef line_notify(msg, msg2, TOKEN):\r\n try:\r\n access_token = TOKEN\r\n msg = msg + \"\\nテストが完了しました。\"\r\n msg = msg + \"\\n\" + msg2\r\n \r\n headers = {'Authorization': 'Bearer ' + access_token}\r\n payload = {'message': msg}\r\n \r\n print(\"LINE メッセージを POST します。\")\r\n r = requests.post(LINE_NOTIFY_URL, headers=headers, params=payload,)\r\n if r.status_code == 200:\r\n print(\"LINE メッセージを送信しました。\")\r\n else:\r\n print(\"ステータスコード: \" + r.status_code)\r\n raise Exception\r\n except Exception as e:\r\n print(e)\r\n\r\nclass MyFileWatchHandler(RegexMatchingEventHandler):\r\n def __init__(self, regexes):\r\n super().__init__(regexes=regexes)\r\n\r\n # ファイル作成時の動作\r\n def on_created(self, event):\r\n filepath = event.src_path\r\n print(filepath)\r\n tests_summary = report_scraping()\r\n line_notify(filepath, tests_summary, TOKEN)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n # config を読み込む\r\n try:\r\n with open(\"./config.yaml\", \"r\", encoding=\"utf-8\") as f:\r\n config = yaml.safe_load(f)\r\n\r\n # LINE_NOTIFY_URL\r\n LINE_NOTIFY_URL = config[\"chat_url\"]\r\n # LINE access_token\r\n TOKEN = config[\"access_token\"]\r\n # 監視対象パス\r\n TARGET_PATH = config[\"target_path\"]\r\n except Exception as e:\r\n print(e)\r\n\r\n # 対象ファイルパスのパターン\r\n PATTERNS = [r'.+report-.+.html$']\r\n\r\n # 監視を開始する\r\n event_handler = MyFileWatchHandler(PATTERNS)\r\n observer = PollingObserver()\r\n observer.schedule(event_handler, TARGET_PATH, recursive=True)\r\n observer.start()\r\n try:\r\n while True:\r\n time.sleep(5)\r\n except KeyboardInterrupt:\r\n observer.stop()\r\n observer.join()\r\n ","repo_name":"lilucso01git/t-dash_end.watcher2","sub_path":"end.watcher2.py","file_name":"end.watcher2.py","file_ext":"py","file_size_in_byte":2690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70789226185","text":"def get_players(table, hascards=False, haschips=False):\n \"\"\" Returns a list of seats at the table. If the button is set, it is\n ordered from first after button, to Button Last. Can specify if seats\n have cards and/or chips.\n \"\"\"\n if table.TOKENS['D'] == -1:\n btn = 0\n else:\n btn = table.TOKENS['D']\n\n length = len(table)\n first = (btn + 1) % length\n seats = list(range(first, length)) + list(range(first))\n\n seatlist = [table.seats[s] for s in seats if table.seats[s].occupied()]\n\n if hascards is True:\n seatlist = list(filter((lambda x: x.has_hand() == True), seatlist))\n\n if haschips is True:\n seatlist = list(filter((lambda x: x.has_chips() == True), seatlist))\n\n return seatlist\n\n\ndef next_player(table, from_seat, step=1, hascards=False):\n \"\"\" Attempts to find the index of the next valid player from the from_seat.\n If step is -1 it will search backwards on the table. Step can only be\n 1 or -1. We can also specify to search for the next player with cards\n by setting hascards to True. If no player is found after searching\n the length of the table, an exception is raised.\n \"\"\"\n if from_seat < -1 or from_seat >= len(table):\n raise ValueError('from_seat is out of bounds!')\n if abs(step) != 1:\n raise ValueError('step needs to be 1 or -1.')\n\n length = len(table)\n for i in range(1, length + 1):\n _seat = (from_seat + (i * step)) % length\n s = table.seats[_seat]\n\n if s.vacant():\n continue\n elif hascards and not s.has_hand():\n continue\n return _seat\n\n raise Exception('Error finding player!')\n\n\ndef get_broke_players(table):\n \"\"\" Returns a list of all the seats that have no chips in front of them. \"\"\"\n return [s for s in table if s.occupied() and s.has_chips() is False]\n\n\ndef get_playerdict(table):\n \"\"\" Returns a dictionary of seat indexes and players. \"\"\"\n players = {}\n for i, s in enumerate(table.seats):\n if s.occupied():\n players[i] = s.player\n return players\n\n\ndef stackdict(table):\n \"\"\" Returns a seat number/stacksize dictionary for each player at the table. \"\"\"\n stacks = {}\n for s in table:\n stacks[s.NUM] = s.stack\n return stacks\n\n\ndef stacklist(table):\n \"\"\" Returns a list of all the stack sizes. \"\"\"\n return [s.stack for s in table]\n\n\ndef player_listing(table):\n \"\"\" Returns the list of seats with players and stacks, for the hand history. \"\"\"\n _str = ''\n for i, s in enumerate(table.seats):\n if s.player is None:\n _str += 'Seat #{}:\\n'.format(i)\n else:\n _str += 'Seat #{}: {}(${})\\n'.format(i, str(s.player), s.stack)\n return _str\n\n\ndef position(table, _seat, postflop=False):\n \"\"\" Returns how many seats from the button the seat is. \"\"\"\n # Raise an exception if the button is not set\n\n if postflop:\n seats = table.get_players(hascards=True)\n else:\n seats = table.get_players()\n\n return len(seats) - seats.index(_seat) - 1\n\n\ndef set_blinds(self, table):\n if len(table.get_players()) == 2:\n table.TOKENS['SB'] = table.TOKENS['D']\n table.TOKENS['BB'] = table.next_player(table.TOKENS['D'])\n elif len(table.get_players()) > 2:\n table.TOKENS['SB'] = table.next_player(table.TOKENS['D'])\n table.TOKENS['BB'] = table.next_player(table.TOKENS['SB'])\n else:\n raise ValueError('Not enough players at the table!')\n","repo_name":"elunna/ponyup","sub_path":"src/dealer.py","file_name":"dealer.py","file_ext":"py","file_size_in_byte":3507,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"22594482331","text":"\"\"\"\n\"\"\"\n\ndef set(node, types):\n\n funcs = node.program[1] \n\n # Functions\n for name in types.keys():\n\n if name in funcs.names:\n\n types_ = types[name]\n func = funcs[funcs.names.index(name)]\n declares, returns, params = func[:3]\n\n for key in types_.keys():\n\n if key in declares.names:\n\n if key in returns.names:\n var = returns[returns.names.index(key)]\n var.type = types_[key]\n\n var = declares[declares.names.index(key)]\n var.type = types_[key]\n\n elif key in params.names:\n var = params[params.names.index(key)]\n var.type = types_[key]\n\ndef get(node):\n\n funcs = node.program[1]\n\n types = {}\n\n for func in funcs:\n\n types[func.name] = types_ = {}\n\n declares, params = func[0], func[2]\n for var in declares[:]+params[:]:\n\n type = var.type\n if type == \"TYPE\":\n type = \"\"\n types_[var.name] = type\n\n if not type:\n\n type = var.prop[\"suggest\"]\n if type == \"TYPE\":\n type = \"\"\n\n return types\n\n\nclass Ftypes(object):\n \"Access to function types from program node\"\n def __get__(self, instance, owner):\n return get(instance)\n def __set__(self, instance, value):\n set(instance, value)\n\n\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n","repo_name":"jonathf/matlab2cpp","sub_path":"src/matlab2cpp/supplement/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","stars":178,"dataset":"github-code","pt":"81"} +{"seq_id":"70915134346","text":"from collections import deque\r\n\r\ndef bfs(graph, start):\r\n visited = set()\r\n queue = deque([start])\r\n visited.add(start)\r\n \r\n while queue:\r\n current = queue.popleft()\r\n print(current)\r\n \r\n for neighbor in graph[current]:\r\n if neighbor not in visited:\r\n visited.add(neighbor)\r\n queue.append(neighbor)\r\n","repo_name":"Nagendra36prasad/Assignment-4-Graphs","sub_path":"Breadth_First_Traversal_for_a_Graph.py","file_name":"Breadth_First_Traversal_for_a_Graph.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13216012533","text":"from random import choice\n\nrjecnik = ['abeceda','godina','riječ','škola']\n\nrijec = choice(rjecnik)\n\nn = 0\nnP = 0\n\npogodak=[]\n\nfor i in range(0,len(rijec)):\n pogodak.append('_')\n\nwhile True:\n print(' '.join(pogodak))\n pokusaj=str(input(\"Unesite slovo kao pogodak: \"))\n for i in range(0,len(rijec)):\n if pokusaj == rijec[i]:\n pogodak[i]=rijec[i]\n n+=1\n nP+=1\n if n==len(pogodak):\n break\nprint(' '.join(pogodak))\n\nprint(\"BRAVO!!! Riječ je bila \",rijec,\".\")\nprint(\"Trebalo ti je \",nP,\" pokušaja da ju pogodiš.\")\n","repo_name":"KingOfBadHabits/TiF","sub_path":"PRG/Python/hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"hr","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"9897749666","text":"from odoo import fields, models\nimport tempfile\nimport os\nimport base64\nimport pandas as pd\nimport time\nimport datetime\n\n\nclass ReportDangKyNhuCau(models.TransientModel):\n _name = \"report_dang_ky_nhu_cau\"\n _description = \"Report đăng ký nhu cầu\"\n\n dot_dang_ky_nhu_cau_id = fields.Many2one(\n comodel_name=\"dot_dang_ky_nhu_cau\", string=\"Đợt đăng ký nhu cầu\")\n\n def export_report(self):\n export_data = {}\n danh_sach_nhu_cau_theo_dot = self.env[\"nv_hoc_phan\"].search([\n (\"dot_dk_nhu_cau_id\", \"=\", self.dot_dang_ky_nhu_cau_id.id)\n ])\n for record in danh_sach_nhu_cau_theo_dot:\n key_hoc_phan = (record.ten_hoc_phan,\n record.hoc_phan_id.ma_hoc_phan_moi)\n if key_hoc_phan not in export_data:\n export_data[key_hoc_phan] = {\n \"tongSoSinhVien\": 1,\n \"tongHocPhi\": record.hoc_phi,\n }\n else:\n export_data[key_hoc_phan][\"tongSoSinhVien\"] += 1\n export_data[key_hoc_phan][\"tongHocPhi\"] += record.hoc_phi\n fd, path = tempfile.mkstemp()\n df = pd.DataFrame(columns=[\n \"tenHocPhan\", \"maHocPhan\", \"tongSoSinhVien\", \"tongHocPhi\"\n ])\n phieu_dk_nhu_cau = self.env[\"phieu_dang_ky_hoc_phan\"].search([\n (\"dot_dang_ky_id\", \"=\", self.dot_dang_ky_nhu_cau_id.id),\n (\"nv_hoc_phan_id\", \"!=\", False),\n ])\n tongSinhVienDangKy = len(phieu_dk_nhu_cau)\n tongSinhVienToanKhoa = 0\n for khoi_lop in self.dot_dang_ky_nhu_cau_id.khoi_lop_ids:\n for lop_hanh_chinh in khoi_lop.lop_hanh_chinh_ids:\n tongSinhVienToanKhoa += len(lop_hanh_chinh.sinh_vien_ids)\n for hp in export_data:\n tongSoSinhVien = export_data[hp][\"tongSoSinhVien\"]\n tongHocPhi = export_data[hp][\"tongHocPhi\"]\n df = df.append(\n {\n \"tenHocPhan\": hp[0],\n \"maHocPhan\": hp[1],\n \"tongSoSinhVien\": tongSoSinhVien,\n \"tongHocPhi\": tongHocPhi,\n },\n ignore_index=True)\n df = df.append(\n {\n \"tenHocPhan\":\n f\"Tổng số sinh viên đã đăng ký: {tongSinhVienDangKy}\",\n \"maHocPhan\": \"\",\n \"tongSoSinhVien\": \"\",\n \"tongHocPhi\": \"\",\n },\n ignore_index=True)\n df = df.append(\n {\n \"tenHocPhan\":\n f\"Tổng số sinh viên toàn đợt đăng ký: {tongSinhVienToanKhoa}\",\n \"maHocPhan\": \"\",\n \"tongSoSinhVien\": \"\",\n \"tongHocPhi\": \"\",\n },\n ignore_index=True)\n df.to_csv(path, index=False, encoding=\"utf-8-sig\")\n result = base64.b64encode(os.fdopen(fd, \"rb\").read())\n attachment = self.env['ir.attachment'].create({\n 'name': \"tong_hop_so_lieu_nhu_cau.csv\",\n 'store_fname': 'thsl.csv',\n 'datas': result\n })\n return {\n 'type': 'ir.actions.act_url',\n 'url': '/web/content/%s?download=true' % (attachment.id),\n 'target': 'self',\n }\n\n\nclass ExportTemplate(models.TransientModel):\n _name = \"export_template\"\n _description = \"Export templates\"\n\n hinh_thuc_dao_tao = fields.Many2one(comodel_name=\"hinh_thuc_dao_tao\",\n string=\"Hình thức đào tạo\",\n default=lambda self: self.env.user.hinh_thuc_dao_tao_id)\n \n co_masv = fields.Boolean(\"Đã có mã sinh viên\")\n\n def export_mau_import_sv_nv(self):\n fd, path = tempfile.mkstemp(suffix='.xlsx')\n \n import_template = pd.DataFrame()\n if self.co_masv:\n import_template = pd.DataFrame(columns=[\"Mã sinh viên\",\n \"Ngành học\", \"Họ và tên\", \"Họ đệm\", \"Tên\", \"Ngày sinh\", \"Email\",\n \"Chứng minh nhân dân/Căn cước công dân\", \"Số điện thoại\"\n ])\n elif not self.co_masv:\n import_template = pd.DataFrame(columns=[\n \"Ngành học\", \"Họ và tên\", \"Họ đệm\", \"Tên\", \"Ngày sinh\", \"Email\",\n \"Chứng minh nhân dân/Căn cước công dân\", \"Số điện thoại\"\n ])\n \n import_template.to_excel(path, index=False)\n result = base64.b64encode(os.fdopen(fd, \"rb\").read())\n attachment = self.env['ir.attachment'].create({\n 'name': \"mau_import_sv_nv.xlsx\",\n 'store_fname': 'test.xlsx',\n 'datas': result\n })\n return {\n 'type': 'ir.actions.act_url',\n 'url': '/web/content/%s?download=true' % (attachment.id),\n 'target': 'self',\n }\n\nclass ExportHoaDon(models.TransientModel):\n _name = \"export_hoa_don\"\n _description = \"Export Hóa Đơn\"\n\n hinh_thuc_dao_tao_id = fields.Many2one(\n comodel_name=\"hinh_thuc_dao_tao\",\n ondelete=\"set null\",\n string=\"Hình thức đào tạo\",\n )\n ngay_bat_dau = fields.Datetime(\"Từ ngày\")\n ngay_ket_thuc = fields.Datetime(\"Đến ngày\")\n khoa_nganh_ids = fields.Many2one(comodel_name=\"khoa_nganh\",\n string=\"Danh sách khóa ngành\")\n trang_thai_thanh_toan = fields.Selection([(\"chua_thanh_toan\", \"Hóa đơn chưa thanh toán\"),\n (\"da_thanh_toan\", \"Hóa đơn đã thanh toán\"),\n (\"tat_ca\", \"Tất cả hóa đơn\")], string=\"Trạng thái hóa đơn\", required=True)\n\n def action_export_bao_cao_hoa_don(self):\n fd, path = tempfile.mkstemp(suffix='.xlsx')\n\n df2 = pd.DataFrame(columns=[\"Ngày ghi sổ\", \"Số chứng từ\", \"Số hóa đơn\", \"Hình thức thanh toán\", \"Mã đối tượng\", \"Tên đối tượng\",\n \"Mã lớp\", \"Tên lớp\", \"Mã khoản thu\", \"Tên khoản thu\", \"Nội dung\", \"Đơn vị tính\", \"Số lượng\",\n \"Đơn giá\", \"Số tiền\", \"Số tiền Nợ\", \"Số tiền thực Nợ\", \"Kỳ thu phí\", \"Mã loại, TĐ ĐT\", \"Tên loại hình đào tạo\", \"Đối tượng TK Nợ\",\n \"Tên đối tượng TK Nợ\", \"Đối tượng TK Có\", \"Tên đối tượng TK Có\", \"Địa chỉ\", \"Mã thanh toán\"])\n\n domain = []\n if self.hinh_thuc_dao_tao_id:\n domain.append((\"hinh_thuc_dao_tao_id\", \"=\", self.hinh_thuc_dao_tao_id.id))\n if self.ngay_bat_dau:\n # ngay_bat_dau = self.ngay_bat_dau - datetime.timedelta(hours=7, minutes=0)\n ngay_bat_dau = self.ngay_bat_dau\n domain.append((\"write_date\", \">=\", ngay_bat_dau))\n if self.trang_thai_thanh_toan == \"da_thanh_toan\":\n domain.append((\"ngay_nop_tien\", \">=\", ngay_bat_dau))\n if self.ngay_ket_thuc:\n # ngay_ket_thuc = self.ngay_ket_thuc - datetime.timedelta(hours=7, minutes=0)\n ngay_ket_thuc = self.ngay_ket_thuc\n domain.append((\"write_date\", \"<=\", ngay_ket_thuc))\n if self.trang_thai_thanh_toan == \"da_thanh_toan\":\n domain.append((\"ngay_nop_tien\", \"<=\", ngay_ket_thuc))\n print(self.ngay_bat_dau, self.ngay_ket_thuc)\n danh_sach_hoa_don = self.env[\"qldt.hoa_don\"].search(domain)\n print(danh_sach_hoa_don)\n for vl in danh_sach_hoa_don:\n if self.trang_thai_thanh_toan == \"da_thanh_toan\":\n if vl.so_tien_da_nhan > 0 or vl.trang_thai == '0' or vl.trang_thai == '1' or vl.trang_thai == '2':\n df2 = df2.append(\n {\n \"Ngày ghi sổ\": vl.ngay_nop_tien + datetime.timedelta(hours=7, minutes=0),\n \"Số chứng từ\": \"\",\n \"Số hóa đơn\": \"\",\n \"Hình thức thanh toán\": \"CK2\",\n \"Mã đối tượng\": vl.sinh_vien_id.ma_dinh_danh,\n \"Tên đối tượng\": vl.sinh_vien_id.name,\n \"Mã lớp\": vl.sinh_vien_id.lop_hanh_chinh_id.ten_lop_hanh_chinh,\n \"Tên lớp\": vl.sinh_vien_id.lop_hanh_chinh_id.ten_lop_hanh_chinh,\n \"Mã khoản thu\": vl.khoan_thu_id.ma_khoan_thu,\n \"Tên khoản thu\": vl.khoan_thu_id.ten_khoan_thu,\n \"Nội dung\": vl.ma_hoa_don,\n \"Đơn vị tính\": \"\",\n \"Số lượng\": vl.so_luong_don_vi_dich_vu,\n \"Đơn giá\": vl.gia_tien_mot_dich_vu,\n \"Số tiền\": vl.tong_so_tien,\n \"Số tiền Nợ\": -(vl.cong_no_hoa_don),\n \"Số tiền thực Nợ\": -(vl.cong_no_hoa_don),\n \"Kỳ thu phí\": str(vl.ky_nam_hoc_id.ten_ky_nam_hoc + '/'+vl.ky_nam_hoc_id.nam_hoc_id.ten_nam_hoc),\n \"Mã loại, TĐ ĐT\": vl.hinh_thuc_dao_tao_id.ten_hinh_thuc_dao_tao,\n \"Tên loại hình đào tạo\": vl.hinh_thuc_dao_tao_id.ten_hinh_thuc_dao_tao,\n \"Đối tượng TK Nợ\": \"\",\n \"Tên đối tượng TK Nợ\": \"\",\n \"Đối tượng TK Có\": \"\",\n \"Tên đối tượng TK Có\": \"\",\n \"Địa chỉ\": vl.sinh_vien_id.lop_hanh_chinh_id.ten_lop_hanh_chinh,\n \"Mã thanh toán\": vl.ma_thanh_toan,\n },\n ignore_index=True)\n elif self.trang_thai_thanh_toan == \"chua_thanh_toan\":\n if vl.so_tien_da_nhan == 0 or vl.trang_thai == '-1':\n df2 = df2.append(\n {\n \"Ngày ghi sổ\": vl.write_date + datetime.timedelta(hours=7, minutes=0),\n \"Số chứng từ\": \"\",\n \"Số hóa đơn\": \"\",\n \"Hình thức thanh toán\": \"CK2\",\n \"Mã đối tượng\": vl.sinh_vien_id.ma_dinh_danh,\n \"Tên đối tượng\": vl.sinh_vien_id.name,\n \"Mã lớp\": vl.sinh_vien_id.lop_hanh_chinh_id.ten_lop_hanh_chinh,\n \"Tên lớp\": vl.sinh_vien_id.lop_hanh_chinh_id.ten_lop_hanh_chinh,\n \"Mã khoản thu\": vl.khoan_thu_id.ma_khoan_thu,\n \"Tên khoản thu\": vl.khoan_thu_id.ten_khoan_thu,\n \"Nội dung\": vl.ma_hoa_don,\n \"Đơn vị tính\": \"\",\n \"Số lượng\": vl.so_luong_don_vi_dich_vu,\n \"Đơn giá\": vl.gia_tien_mot_dich_vu,\n \"Số tiền\": vl.tong_so_tien,\n \"Số tiền Nợ\": -(vl.cong_no_hoa_don),\n \"Số tiền thực Nợ\": -(vl.cong_no_hoa_don),\n \"Kỳ thu phí\": str(vl.ky_nam_hoc_id.ten_ky_nam_hoc + '/'+vl.ky_nam_hoc_id.nam_hoc_id.ten_nam_hoc),\n \"Mã loại, TĐ ĐT\": vl.hinh_thuc_dao_tao_id.ten_hinh_thuc_dao_tao,\n \"Tên loại hình đào tạo\": vl.hinh_thuc_dao_tao_id.ten_hinh_thuc_dao_tao,\n \"Đối tượng TK Nợ\": \"\",\n \"Tên đối tượng TK Nợ\": \"\",\n \"Đối tượng TK Có\": \"\",\n \"Tên đối tượng TK Có\": \"\",\n \"Địa chỉ\": vl.sinh_vien_id.lop_hanh_chinh_id.ten_lop_hanh_chinh,\n \"Mã thanh toán\": vl.ma_thanh_toan,\n },\n ignore_index=True)\n elif self.trang_thai_thanh_toan == \"tat_ca\":\n df2 = df2.append(\n {\n \"Ngày ghi sổ\": vl.ngay_nop_tien + datetime.timedelta(hours=7, minutes=0),\n \"Số chứng từ\": \"\",\n \"Số hóa đơn\": \"\",\n \"Hình thức thanh toán\": \"CK2\",\n \"Mã đối tượng\": vl.sinh_vien_id.ma_dinh_danh,\n \"Tên đối tượng\": vl.sinh_vien_id.name,\n \"Mã lớp\": vl.sinh_vien_id.lop_hanh_chinh_id.ten_lop_hanh_chinh,\n \"Tên lớp\": vl.sinh_vien_id.lop_hanh_chinh_id.ten_lop_hanh_chinh,\n \"Mã khoản thu\": vl.khoan_thu_id.ma_khoan_thu,\n \"Tên khoản thu\": vl.khoan_thu_id.ten_khoan_thu,\n \"Nội dung\": vl.ma_hoa_don,\n \"Đơn vị tính\": \"\",\n \"Số lượng\": vl.so_luong_don_vi_dich_vu,\n \"Đơn giá\": vl.gia_tien_mot_dich_vu,\n \"Số tiền\": vl.tong_so_tien,\n \"Số tiền Nợ\": -(vl.cong_no_hoa_don),\n \"Số tiền thực Nợ\": -(vl.cong_no_hoa_don),\n \"Kỳ thu phí\": str(\n vl.ky_nam_hoc_id.ten_ky_nam_hoc + '/' + vl.ky_nam_hoc_id.nam_hoc_id.ten_nam_hoc),\n \"Mã loại, TĐ ĐT\": vl.hinh_thuc_dao_tao_id.ten_hinh_thuc_dao_tao,\n \"Tên loại hình đào tạo\": vl.hinh_thuc_dao_tao_id.ten_hinh_thuc_dao_tao,\n \"Đối tượng TK Nợ\": \"\",\n \"Tên đối tượng TK Nợ\": \"\",\n \"Đối tượng TK Có\": \"\",\n \"Tên đối tượng TK Có\": \"\",\n \"Địa chỉ\": vl.sinh_vien_id.lop_hanh_chinh_id.ten_lop_hanh_chinh,\n \"Mã thanh toán\": vl.ma_thanh_toan,\n },\n ignore_index=True)\n with pd.ExcelWriter(path) as writer:\n df2.to_excel(writer,\n sheet_name='Báo cáo hóa đơn',\n index=False)\n result = base64.b64encode(os.fdopen(fd, \"rb\").read())\n attachment = self.env['ir.attachment'].create({\n 'name': \"bao_cao_hoa_don.xlsx\",\n 'store_fname': 'thsl.xlsx',\n 'datas': result\n })\n return {\n 'type': 'ir.actions.act_url',\n 'url': '/web/content/%s?download=true' % (attachment.id),\n 'target': 'self',\n }\n\n\n\n\n\n\n","repo_name":"nminhquang380/odoo","sub_path":"wizard/custom_report.py","file_name":"custom_report.py","file_ext":"py","file_size_in_byte":14709,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2478382570","text":"from flask import Flask, request, render_template\r\nfrom flask import Flask\r\nimport t_models\r\nfrom database import Base, session, engine\r\n\r\napp = Flask(__name__)\r\n# app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\r\n\r\n#create new tables\r\nBase.metadata.create_all(bind=engine)\r\n\r\n\r\n\r\n@app.route('/', methods=['GET', 'POST'])\r\ndef bot():\r\n if request.method == 'GET':\r\n# t_models.get_tweets()\r\n# return {\r\n# \"status\": \"retweets okay\",\r\n\r\n# }\r\n print('doing manual check') \r\n print({'status': 'checking walmart'})\r\n time.sleep(3)\r\n print({'status': 'checking best buy'})\r\n time.sleep(3)\r\n print({'status': 'checking target'})\r\n time.sleep(3)\r\n print({'inventory status': 'sold out'})\r\n time.sleep(1)\r\n print({'message type': 'text'})\r\n time.sleep(1)\r\n print({'text mae': false})\r\n \r\n \r\n else:\r\n title = request.form['title']\r\n poster_url = m.build_imgurl(title)\r\n r = requests.get(poster_url)\r\n if r.status_code != 200:\r\n message = \"No match\"\r\n return {\"status\": \"no retweets\"}\r\n\r\n # return render_template(\"photo.html\", image=poster_url)\r\n\r\n \r\n \r\n \r\n \r\nif __name__ == \"__main__\":\r\n app.run()\r\n","repo_name":"jakeb1090/Twitter-sentiment-aggregator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73323244426","text":"from django.shortcuts import render\nfrom django.http import JsonResponse\nfrom django.template.loader import render_to_string\n\n# Create your views here.\n\ndef home(request):\n return render(request, \"site/home.html\", {})\n\ndef book_list(request):\n return render(request, 'app/book_list.html', {})\n\ndef save_book_form(request, form, template_name):\n data = dict()\n if request.method == 'POST':\n if form.is_valid():\n form.save()\n data['form_is_valid'] = True\n #books = Book.objects.all()\n books = None\n data['html_book_list'] = render_to_string('app/includes/partial_book_list.html', {\n 'books': books\n })\n else:\n data['form_is_valid'] = False\n context = {'form': form}\n data['html_form'] = render_to_string(template_name, context, request=request)\n return JsonResponse(data)\n\n# def book_create(request):\n# if request.method == 'POST':\n# form = None\n# return save_book_form(request, form, 'app/includes/calc_modal.html')\n\ndef book_create(request):\n data = dict()\n data['html_form'] = render_to_string('app/includes/calc_modal.html', request=request)\n return JsonResponse(data)\n\ndef app_calc_res(request):\n return render(request, 'app/app_resume.html', {})\n\n\ndef app_calc_form(request):\n data = dict()\n pointhx=dict()\n wynik = 0\n\n if request.method == 'POST':\n data['form_is_valid'] = True\n tdb = request.POST.get('tdb')\n pointhx['tdb']=tdb\n print(tdb)\n fi = request.POST.get('fi')\n pointhx['fi']=fi\n print(fi)\n wynik = (int(tdb)*int(fi))\n print(str(wynik))\n\n data['html_book_list'] = render_to_string('app/app_resume.html', {'wynik': wynik})\n \n\n #data['html_form'] = render_to_string('app/includes/calc_me.html', {'wynik': wynik}, request=request)\n data['html_form'] = render_to_string('app/includes/calc_me.html', {'wynik': wynik, 'pointhx':pointhx}, request=request)\n return JsonResponse(data)\n ","repo_name":"pwlewandowski/modal_ajax","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1865312736","text":"from copy import deepcopy # pylint: disable=C0302\nimport numpy as np\n\nfrom te import tvm\nfrom te.platform import cce_params\nfrom te.platform.cce_conf import intrinsic_check_support\nfrom te.platform.cce_conf import api_check_support\nfrom te.platform.cce_params import scope_cc\nfrom te.platform.cce_params import scope_cbuf\nfrom te.platform.cce_params import scope_cb\nfrom te.platform.cce_params import scope_smask\nfrom te.platform.cce_params import scope_ca\nfrom te.platform.cce_params import scope_ubuf\nfrom te.platform.cce_params import scope_gm\nfrom te.platform.cce_params import VEC\nfrom te.platform.cce_params import HI3796CV300ES\nfrom te.platform.cce_params import AIC\nfrom te.platform.cce_params import ASCEND_910\nfrom te.platform.cce_params import ASCEND_310AIC\nfrom te.platform.cce_params import ASCEND_910AIC\nfrom te.platform.cce_params import HI3796CV300ESAIC\nfrom te.platform.cce_params import HI3796CV300CSAIC\nfrom te.platform.cce_params import ASCEND_610\nfrom te.platform.cce_params import ASCEND_620\nfrom te.tik.common.util import TikUtil, DTYPE_SIZE, ceil_div, get_bit_len,\\\n check_integer_in_range, reduce_mul, check_vnchwconv_overflow,\\\n is_immediate_number, is_basic_expr, check_scalar_dtype, \\\n check_scalar_dtype_float\nfrom .tik_expr import Expr\nfrom .. import debug\nfrom ..api.tik_ir_builder import TikIRBuilder\nfrom .tik_util import type_convert, concat_params, change_dtype_str, \\\n emit_scatter_instr, set_tensor_addr_to_scalar\nfrom ..api.tik_scalar import mask_concat, Scalar\nfrom ..api.tik_tensor import get_addr_list, Tensor\nfrom .tik_params import VA_REG, PAD_LENGTH,\\\n MAX_PADDING, MAX_TENSOR_WIDTH, MAX_TENSOR_HEIGHT,\\\n MIN_TENSOR_WIDTH, MIN_TENSOR_HEIGHT, MAX_FETCH_POS,\\\n MAX_START_POINT, MIN_START_POINT, MAX_STRIDE, MIN_REPEAT_TIMES,\\\n MIN_STRIDE, MAX_FILTER_WIDTH, MIN_FILTER_WIDTH,\\\n MAX_FILTER_HEIGHT, MIN_FILTER_HEIGHT, MAX_DILATION,\\\n MIN_DILATION, MAX_VA_ADDR_NUM, MAX_BLK_STRIDE_DOUBLE_BYTE,\\\n PIPE_V, MAX_REP_STRIDE_DOUBLE_BYTE, MAX_REP_STRIDE_SINGLE_BYTE,\\\n MAX_REPEAT_TIMES, MAX_SID, MAX_DST_GAP_DOUBLE_BYTE,\\\n MAX_START_INDEX, PIPE_MTE1, MAX_REPEAT_MODE, VA0_INDEX, VA2_INDEX, PIPE_S,\\\n DST_TYPE_LEN, MIN_NBURST, MAX_NBURST_SINGLE_BYTE, MAX_DST_GAP_SINGLE_BYTE,\\\n MAX_SRC_GAP, MIN_BURST_REPEAT, MAX_BURST_REPEAT, ONE_BYTE_BIT_LEN,\\\n ONE_REP_BYTE_SIZE, PADDING_LEFT_IDX, PADDING_RIGHT_IDX, PADDING_TOP_IDX,\\\n PADDING_BOT_IDX, FMATRIX_OFFSET_LIST, FMATRIX_SEGMENT_LIST,\\\n LOAD3DV1_REG_XM_OFFSET_LIST, LOAD3DV1_REG_XM_SEGMENT_LIST,\\\n LOAD3DV1_REG_XT_OFFSET_LIST, LOAD3DV1_REG_XT_SEGMENT_LIST,\\\n COL2IMG_REG_XT_OFFSET_LIST, COL2IMG_REG_XT_SEGMENT_LIST, BLK_NUM_PER_REP,\\\n REG_FCOL2IMG_OFFSET_LIST, REG_FCOL2IMG_SEGMENT_LIST, MASK_VALUE_128,\\\n COL2IMG_REG_XM_OFFSET_LIST, COL2IMG_REG_XM_SEGMENT_LIST,\\\n CONV_RELU_VECTOR_QUANT, CONV_RELU_QUANT, BYTE_SIZE, SCALE_ADDR_BIT_POS,\\\n ONE_BLK_SIZE, MIN_M_LEN, MAX_M_LEN, MIN_CHANNELS, MAX_CHANNELS,\\\n V4DTRANS_OFFSET_LIST, V4DTRANS_SEGMENT_LIST, MAX_PAD_MODE,\\\n MAX_STRIDE_UNIT, VPADDING_OFFSET_LIST, VPADDING_SEGMENT_LIST,\\\n MIN_EXTENSION, MAX_EXTENSION, MAX_START_PT, LOAD3DV2_REG_XM_OFFSET_LIST,\\\n LOAD3DV2_REG_XM_SEGMENT_LIST, LOAD3DV2_REG_XT_OFFSET_LIST,\\\n LOAD3DV2_REG_XT_SEGMENT_LIST, ONE_IR, TWO_IR, MASK_VALUE_64,\\\n VNCHWCONV_LIST_LEN,\\\n ELE_PER_FRACTAL_EDGE, MAX_C_SIZE, PIPE_MTE3, PIPE_MTE2, BYTE_PER_FRACTAL,\\\n MAX_BLK_STRIDE_SINGLE_BYTE, VALUE_128, VALUE_127, LOAD_SMASK_OFFSET_LIST,\\\n LOAD_SMASK_SEGMENT_LIST, MAX_NBURST_DOUBLE_BYTE, MIN_BURST_LEN,\\\n MAX_BURST_LEN_DOUBLE_BYTE, MAX_PADMODE, PADMODE_NO_PADDING, INC_MODE,\\\n DEC_MODE, HAS_PARAM_CONCAT, NEED_PARAM_CONCAT,\\\n PADDING_ONE_BYTE_OFFSET_LIST, PADDING_ONE_BYTE_SEGMENT_LIST,\\\n PADDING_TWO_BYTE_OFFSET_LIST, PADDING_TWO_BYTE_SEGMENT_LIST, MAX_C1_INDEX,\\\n MAX_JUMP_OFFSET, MIN_JUMP_OFFSET, MAX_BURST_LEN_SINGLE_BYTE, STRIDES_LEN,\\\n DST_BLK_STRIDE_IDX, SRC_BLK_STRIDE_IDX, DEQSCALE_SHIFT_POS,\\\n SCALE_SHIFT_POS, TENSOR_PADDING_OFFSET_LIST, TENSOR_PADDING_SEGMENT_LIST,\\\n THREE_IR, VSCATTER_VGATHER_XT_OFFSET_LIST, \\\n VSCATTER_VGATHER_XT_SEGMENT_LIST, UINT_MIN, UINT8_MAX, \\\n INT8_MIN, INT8_MAX, AIPP0_OFFSET_LIST, \\\n AIPP0_SEGMENT_LIST, AIPP1_OFFSET_LIST, AIPP1_SEGMENT_LIST, \\\n AIPP2_OFFSET_LIST, AIPP2_SEGMENT_LIST, AIPP3_OFFSET_LIST, \\\n AIPP3_SEGMENT_LIST, AIPP4_OFFSET_LIST, AIPP4_SEGMENT_LIST, \\\n AIPP5_OFFSET_LIST, AIPP5_SEGMENT_LIST, AIPP6_OFFSET_LIST, \\\n AIPP6_SEGMENT_LIST, AIPP7_OFFSET_LIST, AIPP7_SEGMENT_LIST, \\\n AIPP8_OFFSET_LIST, AIPP8_SEGMENT_LIST, AIPP9_OFFSET_LIST, \\\n AIPP9_SEGMENT_LIST, AIPP10_OFFSET_LIST, AIPP10_SEGMENT_LIST, \\\n AIPP11_OFFSET_LIST, AIPP11_SEGMENT_LIST, AIPP12_OFFSET_LIST, \\\n AIPP12_SEGMENT_LIST, \\\n AIPP13_OFFSET_LIST, AIPP13_SEGMENT_LIST, AIPP15_OFFSET_LIST, \\\n AIPP15_SEGMENT_LIST, AIPP16_OFFSET_LIST, AIPP16_SEGMENT_LIST, \\\n AIPP17_OFFSET_LIST, AIPP17_SEGMENT_LIST, BIT_16, YUV420, XRGB8888, \\\n NC1HWC0DI_INT8, NC1HWC0DI_FP16, RGB888, ARGB8888, YUYV, YUV422, AYUV444, \\\n YUV400, RAW10, RAW12, RAW16, RAW24, AIPP_INPUT_VERSON_AND_FUNCTION, \\\n SWAP, CSC, DTC, AERA_PADDING, PRE_CLIP, SCF, POST_CLIP, FLIP, STRETCH, \\\n RAW, AIPP_INPUT_TYPE_SWAP_ALIGN, AIPP_FORMAT_CONVERT, AIPP_INIT_VALUE, \\\n AIPP_ENABLE, AIPP_DISABLE, AIPP_INIT_FLOAT_VALUE_ZERO, \\\n AIPP_INIT_FLOAT_VALUE_ONE, SCALE_COF, RAW_TO_16_N, \\\n INSTR_DTYPE_SUPPORT_STATEMENT\nfrom .tik_params import WINO_WGT_OFFSET_LIST\nfrom .tik_params import WINO_WGT_SEGMENT_LIST\nfrom .tik_params import WINO_FM_XT_OFFSET_LIST\nfrom .tik_params import WINO_FM_XT_SEGMENT_LIST\nfrom .tik_params import WINO_FM_XM_OFFSET_LIST\nfrom .tik_params import WINO_FM_XM_SEGMENT_LIST\nfrom .tik_params import MAX_REP_DIR\nfrom .tik_params import MIN_ONTHEFLY_MODE\nfrom .tik_params import MAX_ONTHEFLY_MODE\nfrom .tik_params import SHIFT_BIT_POS_8\nfrom .tik_params import SHIFT_BIT_POS_2\nfrom .tik_params import VALUE_3\nfrom .tik_params import VALUE_1\nfrom .tik_params import MAX_COL_INDIC\nfrom .tik_params import MASK_LEN_CONTINOUS_MODE\nfrom .tik_params import MASK_HIGH_IDX\nfrom .tik_api_constants import DTYPE_MAP\nfrom .tik_api_constants import SCOPE_MAP\nfrom .tik_api_constants import LOAD3DV2_FUNC_MAP\nfrom .tik_api_constants import VNCHWCONV_INSTR_APPENDIX_MAP\nfrom .tik_api_constants import LOAD2D_DMA_LIST\nfrom .tik_api_constants import CR_MODE_MAP\nfrom .tik_api_constants import ARCHVERSION_ONTHEFLY\nfrom .tik_api_constants import WINO_PAD_MAP\nfrom .tik_api_util import check_repeat_times, set_ctrl_counter_mask, \\\n reset_ctrl_counter_mask, check_stride_unit, do_load3d_padding, \\\n check_weight_offset\nfrom .tik_vector_scatter_api_ import check_scatter_address_overlap\nfrom ..common.common_util import check_vector_stride, \\\n check_load3dv2_channel_size, check_load3dv2_m_extension, \\\n check_dilation_filter_size, check_load3dv2_k_extension, \\\n vector_tensor_overflow_check, check_tensor_overflow, \\\n check_address_overlapping, check_addr_overlap_v4dtrans, \\\n float16format2uint16, check_dict_and_not_none, check_aipp_one_src_overflow,\\\n check_aipp_two_src_overflow, aipp_get_enable_bit, \\\n cal_extent_stride_unit_mask, check_wino_ft_params, get_mask_len, \\\n get_vbi_src0_offset_need_size, check_vbi_overlap, \\\n check_vbi_src1_tensor_overflow, get_vbi_src1_tensor_need_size\nfrom ..common.tik_get_soc_name import get_soc_name\nfrom ..common.tik_get_soc_name import get_soc_core_type\nfrom .tik_check_util import TikCheckUtil\nfrom .tik_source_info import source_info_decorator\n\n\n_ADDR_MODE_BIT_INCREASE = 1\n_ADDR_MODE_BIT_DECREASE = 0\n_SRC_BURST_LEN_SIZE_ELE = 16\n_DST_BURST_LEN_SIZE_ELE = 256\n_SRC_GAP_SIZE_BYTE = 32\n_DEFAULT_STRIDE = 0\n_STRIDE_UNIT_ZERO = 0\n_STRIDE_UNIT_ONE = 1\n\n\ndef _regen_tensor_mov_scope(src, dst, block_mode):\n \"\"\" regenerate src_scope and dst_scope in tensor_mov\n\n Parameters\n ----------\n src: src operator\n dst: dst operator\n block_mode\n\n Returns\n -------\n src_scope\n dst_scope\n \"\"\"\n src_scope = SCOPE_MAP[src.scope]\n dst_scope = SCOPE_MAP[dst.scope]\n if src_scope == \"cc\":\n src_scope = src_scope + \"_\" + block_mode\n if dst_scope == \"cc\":\n dst_scope = dst_scope + \"_\" + block_mode\n return src_scope, dst_scope\n\n\ndef _calculate_winograd_ft_extent(repeat_times, src_rep_stride, dst_rep_stride,\n dst_blk_stride):\n \"\"\" calculate winograd_weight_transform src/dst extent\n\n Parameters\n ----------\n repeat_times: the number of iterations this instruction would be\n executed\n src_rep_stride: source repeat stride between the base source addresses\n of 2 successive iterations\n dst_rep_stride: destination repeat stride between the desitination\n addresses of 2 successive interations\n dst_blk_stride: inner destination stride between the 4 weight matrixes\n to be written into L0B in one single iteration in unit\n of fractal matrix\n\n Returns\n -------\n src_extent\n dst_extent\n \"\"\"\n # one instr reads 9 fractals from L1\n src_frac_num = 9\n src_extent = ((repeat_times - 1) * src_rep_stride + src_frac_num) * \\\n BYTE_PER_FRACTAL\n src_extent = Expr(src_extent).get()\n # one instr writes 4 fractals to L0B\n dst_frac_num = 4\n dst_extent = ((repeat_times - 1) * dst_rep_stride + (dst_frac_num - 1) *\n dst_blk_stride + 1) * BYTE_PER_FRACTAL\n dst_extent = Expr(dst_extent).get()\n return src_extent, dst_extent\n\n\ndef cal_vbi_extent(mask_len, dst, src1, # pylint: disable=R0913\n src0_offset, horizontal_repeat_times, repeat_mode,\n dst_blk_stride, vertical_repeat_offset,\n vertical_repeat_times):\n \"\"\"calculate dst/src0_offset/src1 extent for vbi instrction\"\"\"\n # cal extent\n dst_extent = Expr(get_vbi_dst_need_size(\n mask_len, ONE_BLK_SIZE // DTYPE_SIZE[dst.dtype],\n vertical_repeat_times, vertical_repeat_offset, dst_blk_stride)*\\\n DTYPE_SIZE[dst.dtype]).get()\n src0_offset_extent = Expr(get_vbi_src0_offset_need_size(\n src1.dtype, mask_len,\n vertical_repeat_times*horizontal_repeat_times)*\\\n DTYPE_SIZE[src0_offset.dtype]).get()\n src1_extent = Expr(get_vbi_src1_tensor_need_size(\n src1.dtype, mask_len, repeat_mode,\n vertical_repeat_times*horizontal_repeat_times)*\\\n DTYPE_SIZE[src1.dtype]).get()\n return dst_extent, src0_offset_extent, src1_extent\n\n\ndef get_vbi_mask_len(mask):\n \"\"\"get vbi mask_len corresponding to mask\n\n Parameters\n ----------\n mask: Effective operation on element\n Returns\n -------\n mask_len: mask_len is None(mask is scalar-list) or\n imme(mask is imme/imme-list) or scalar(mask is scalar)\n \"\"\"\n if not isinstance(mask, (list, tuple)):\n mask = [mask]\n if len(mask) == MASK_LEN_CONTINOUS_MODE:\n mask_len = mask[MASK_HIGH_IDX]\n else:\n if is_basic_expr(TikUtil.to_list(mask)):\n return None\n mask_len, _ = get_mask_len(mask)\n return mask_len\n\n\ndef get_vbi_dst_need_size(mask_len, block_len, vertical_repeat_times,\n dst_repeat_offset, dst_blk_stride):\n \"\"\"for vbi instruction, get dst operator need size, unit is element\n when mask is scalar/scalar-list, mask is considered as 128\n\n Parameters\n ----------\n mask_len: length between lowest digit and top effective digit of mask\n block_len: elements num per block(32B)\n vertical_repeat_times: repeat_times in vertical direction\n dst_repeat_offset: vertical repeat offset between dst address of\n iterations in the vertical direction\n dst_blk_stride: destination block stride\n Returns\n -------\n max_offset\n \"\"\"\n # for cal dst_extent when mask is scalar/scalar-list\n if mask_len is None or is_basic_expr(TikUtil.to_list(mask_len)):\n max_offset = (vertical_repeat_times - 1)*dst_repeat_offset + \\\n (BLK_NUM_PER_REP - 1)*dst_blk_stride*block_len + \\\n block_len\n return max_offset\n if mask_len % block_len == 0:\n max_offset = (vertical_repeat_times - 1)*dst_repeat_offset + \\\n (mask_len // block_len - 1)*dst_blk_stride*block_len + \\\n block_len\n else:\n max_offset = (vertical_repeat_times - 1)*dst_repeat_offset + \\\n (mask_len // block_len)*dst_blk_stride*block_len + \\\n mask_len % block_len\n return max_offset\n\n\ndef check_vbi_dst_offset_overflow(dst, src0_offset, #pylint: disable=R0913\n mask_len, horizontal_repeat_times,\n vertical_repeat_times, dst_blk_stride,\n dst_repeat_offset):\n \"\"\"for vbi instruction, check whether dst and src0_offset is overflow\n\n Parameters\n ----------\n dst: operator dst\n src0_offset: operator src0_offset\n mask_len: length between lowest digit and top effective digit of mask\n horizontal_repeat_times: repeat_times in horizontal direction\n vertical_repeat_times: repeat_times in vertical direction\n dst_blk_stride: destination block stride\n dst_repeat_offset: vertical repeat offset between dst address of\n iterations in the vertical direction\n Returns\n -------\n None\n \"\"\"\n if mask_len is None or is_basic_expr(TikUtil.to_list(mask_len)):\n return\n if is_basic_expr([horizontal_repeat_times, vertical_repeat_times]):\n return\n total_repeat_times = Expr(horizontal_repeat_times*\\\n vertical_repeat_times).eval_value()\n if total_repeat_times is not None and total_repeat_times == 0:\n return\n block_len = ONE_BLK_SIZE // DTYPE_SIZE[dst.dtype]\n # check src0_offset tensor overflow\n valid_block_len = ceil_div(mask_len, block_len)\n check_tensor_overflow((src0_offset,), valid_block_len,\n horizontal_repeat_times*vertical_repeat_times,\n (MIN_STRIDE,), (MIN_STRIDE,),\n (\"src0_offset\",))\n # check dst tensor overflow\n max_offset = get_vbi_dst_need_size(mask_len, block_len,\n vertical_repeat_times, dst_repeat_offset,\n dst_blk_stride)\n offset = dst.offset\n total_size = reduce_mul(dst.indice.origin_shape)\n need_offset = Expr(max_offset + offset).eval_value()\n if need_offset is not None:\n TikCheckUtil.check_le(need_offset, total_size,\n \"dst tensor overflow, instruction need {} but \"\n \"only {}\".format(need_offset, total_size))\n\n\ndef _check_dst_overflow_load3dv2(k_start_pt, m_start_pt, k_extension,\n m_extension, dst):\n \"\"\"calculate src_extent and dst_extent of instruction load2dv1 and load2dv2\n\n Parameters\n ----------\n k_start_pt: k direction start position of the feature matrix\n m_start_pt: m direction start position of the feature matrix\n k_extension: k direction extension steps from the start position\n m_extension: m direction extension steps from the start position\n dst: destination operator\n\n Returns\n -------\n None\n \"\"\"\n if all(Expr(value).eval_value() is not None for value in\n [k_start_pt, m_start_pt, k_extension, m_extension, dst.offset]):\n k_ext = ceil_div(k_extension*DTYPE_SIZE[dst.dtype], ONE_BLK_SIZE)*\\\n ONE_BLK_SIZE // DTYPE_SIZE[dst.dtype]\n m_ext = ceil_div(m_extension, ELE_PER_FRACTAL_EDGE)*ELE_PER_FRACTAL_EDGE\n dst_expected_ele = Expr(k_ext*m_ext + dst.offset).eval_value()\n dst_actual_ele = reduce_mul(dst.indice.origin_shape)\n TikCheckUtil.check_ge(\n dst_actual_ele, dst_expected_ele,\n \"dst tensor overflow, expected dst shape: {}, actual dst shape: {}\"\n .format(dst_expected_ele, dst_actual_ele))\n\n\ndef _calculate_extent_load2d(start_index, repeat_times, src_stride, dst_gap):\n \"\"\"calculate src_extent and dst_extent of instruction load2dv1 and load2dv2\n\n Parameters\n ----------\n start_index: start fractal index\n repeat_times: Repeated iterations times\n src_stride: gap of dst tensor between adjacent data segment\n dst_gap: stride of src tensor between adjacent data segment\n\n Returns\n -------\n src_extent\n dst_extent\n \"\"\"\n src_extent = Expr((start_index + (repeat_times - 1) * src_stride + 1) *\n BYTE_PER_FRACTAL).get()\n # repeat_times*512\n if dst_gap is None:\n dst_gap = 0\n dst_extent = Expr(repeat_times * BYTE_PER_FRACTAL + (repeat_times - 1) *\n dst_gap * BYTE_PER_FRACTAL).get()\n return src_extent, dst_extent\n\n\ndef _load3dv2_check(k_extension, m_extension, m_start_pt, pad_value,\n channel_size):\n # check extension\n TikCheckUtil.check_type_match(\n k_extension, (int, Scalar, Expr),\n \"k_extension should be int, Scalar, Expr, input type of k_extension: \"\n \"{}\".format(type(k_extension)))\n check_scalar_dtype(k_extension,\n \"scalar_k_extension should be a scalar of int/uint\")\n check_integer_in_range(\n k_extension, range(MIN_EXTENSION, MAX_EXTENSION),\n \"k_extension should be in the range of [1, 65535], \"\n \"input k_extension: {}\".format(k_extension))\n TikCheckUtil.check_type_match(\n m_extension, (int, Scalar, Expr),\n \"m_extension should be int, Scalar, Expr, input type of m_extension: \"\n \"{}\".format(type(m_extension)))\n check_scalar_dtype(m_extension,\n \"scalar_m_extension should be a scalar of int/uint\")\n check_integer_in_range(\n m_extension, range(MIN_EXTENSION, MAX_EXTENSION),\n \"m_extension should be in the range of [1, 65535], \"\n \"input m_extension: {}\".format(m_extension))\n TikCheckUtil.check_type_match(\n m_start_pt, (int, Scalar, Expr),\n \"m_start_pt should be int, Scalar, Expr, input type of m_start_pt: \"\n \"{}\".format(type(m_start_pt)))\n check_scalar_dtype(m_start_pt,\n \"scalar_m_start_pt should be a scalar of int/uint\")\n check_integer_in_range(\n m_start_pt, range(MAX_START_PT),\n \"m_start_pt should be in the range of [0, 65535], \"\n \"input m_start_pt: {}\".format(m_start_pt))\n if isinstance(m_start_pt, int):\n m_start_pt_ele_align = 16\n if m_start_pt % m_start_pt_ele_align != 0:\n TikCheckUtil.raise_error(\n \"m_start_ptshould be multiple of 16, input \"\n \"m_start_pt: {}\".format(m_start_pt))\n # check pad_value\n if pad_value is not None:\n TikCheckUtil.check_type_match(\n pad_value, (int, float),\n \"pad_value should be python int or float, input type of pad_value: \"\n \"{}\".format(type(pad_value)))\n # check channel_size\n TikCheckUtil.check_type_match(\n channel_size, (int, Scalar, Expr),\n \"channel_size should be int, Scalar or Expr, input type of \"\n \"channel_size:{}\".format(type(channel_size)))\n check_scalar_dtype(channel_size,\n \"scalar_channel_size should be a scalar of int/uint\")\n\n\ndef _calculate_dst_extent_load3dv1(dst, repeat_mode, repeat_time, jump_offset):\n \"\"\"\n Calculate load3dv's dst extent.\n :param dst: dst tensor\n :param repeat_mode: 0 or 1\n :param repeat_time: run times.\n :param jump_offset: offset that instruction jump\n :return: dst extent\n \"\"\"\n if repeat_mode == 0:\n # dst_extent: repeat_time*512\n dst_extent = Expr(repeat_time*BYTE_PER_FRACTAL).get()\n elif repeat_mode == 1:\n # dst_extent: (repeat_time - 1)*jump_offset*512 + 512\n dst_extent = Expr((Expr(repeat_time) - 1)*Expr(jump_offset)*\n BYTE_PER_FRACTAL + BYTE_PER_FRACTAL).get()\n else:\n dst_extent = Expr(reduce_mul(dst.indice.origin_shape)*\n DTYPE_SIZE[dst.dtype]).get()\n return dst_extent\n\n\ndef _calculate_extent_load3dv1(dst, repeat_mode, repeat_time, jump_offset):\n \"\"\"\n Calculate dst load3dv's extent by different type\n :param dst: dst tensor\n :param repeat_mode: 0 or 1\n :param repeat_time: run times.\n :param jump_offset: offset that instruction jump\n :return: dst extent\n \"\"\"\n # cal dst_extent, not support repeat_mode SCALAR\n repeat_mode = Expr(repeat_mode).eval_value()\n if repeat_mode is not None:\n dst_extent = _calculate_dst_extent_load3dv1(\n dst, repeat_mode, repeat_time, jump_offset)\n else:\n dst_extent = (reduce_mul(dst.indice.origin_shape) - dst.offset)*\\\n DTYPE_SIZE[dst.dtype]\n dst_extent = Expr(dst_extent).get()\n return dst_extent\n\n\ndef _get_scope_str(scope_map, block_mode, src, dst):\n \"\"\"\n get scope in string format\n :param scope_map: the map store scope\n :param block_mode: data move's mode, like \"m\" \"v\" ...\n :param src: src tensor\n :param dst: dst tensor\n :return: scope string\n \"\"\"\n src_scope = scope_map[src.scope]\n dst_scope = scope_map[dst.scope]\n if src_scope == \"cc\":\n src_scope = src_scope + \"_\" + block_mode + str(\n get_bit_len(src.dtype))\n if dst_scope == \"cc\":\n dst_scope = dst_scope + \"_\" + block_mode + str(\n get_bit_len(dst.dtype))\n return src_scope + '2' + dst_scope\n\n\ndef _calculate_extent(block_mode, src, dst, params, # pylint: disable=R0913\n is_src, en_onthefly=False):\n \"\"\"\n calculate data move instruction's extent\n :param block_mode: data move's mode, like \"m\" \"v\" ...\n :param src: src tensor\n :param dst: dst tensor\n :param params: args\n :param is_src: if calculate src or calculate dst\n :param en_onthefly: if enable on-the-fly\n :return: src_extent or dst_extent\n \"\"\"\n # params: [nburst, burst_length, gap_block]\n burst_length_size = _cal_burst_len_size(block_mode, src, dst)\n if is_src:\n gap_block_size = _cal_src_gap_block_size(block_mode, src, dst)\n else:\n gap_block_size = _cal_dst_gap_block_size(block_mode, src, dst)\n if en_onthefly:\n # if enable on-the-fly, only low 8 bits store dst_stride\n params[2] = params[2] & 0x00FF\n\n extent = Expr(params[0]*params[1]*burst_length_size + (params[0] - 1)*\n params[2]*gap_block_size)\n return extent.get()\n\n\ndef _calculate_extent_broadcast_ub_to_l0c(dst, src, nburst, burst_len,\n strides):\n \"\"\"\n calculate extent of broadcast ub to l0c\n :param dst: dst tensor\n :param src: src tensor\n :param nburst: number of burst\n :param burst_len: burst length\n :param strides: src_stride and dst_stride\n :return: dst_extent and src_extent\n \"\"\"\n # strides: [dst_stride, src_stride]\n # Byte\n src_extent = Expr((nburst*burst_len*_SRC_BURST_LEN_SIZE_ELE\n *DTYPE_SIZE[src.dtype]) +\n ((nburst - 1)*strides[1]*ONE_BLK_SIZE)).get()\n # Byte\n dst_extent = Expr((nburst*burst_len*_DST_BURST_LEN_SIZE_ELE\n *DTYPE_SIZE[dst.dtype]) +\n ((nburst - 1)*strides[0]*_DST_BURST_LEN_SIZE_ELE\n *DTYPE_SIZE[dst.dtype])).get()\n return [dst_extent, src_extent]\n\n\ndef _cal_burst_len_size(block_mode, src, dst):\n \"\"\"\n get the space of per burst length\n :param block_mode: data move's mode, like \"m\" \"v\" ...\n :param src: src tensor\n :param dst: dst tensor\n :return: burst len size. Byte\n \"\"\"\n scope_str = _get_scope_str(SCOPE_MAP, block_mode, src, dst)\n # Byte. according to ISA DMA table\n burst_len_size_map = {\n \"cc_m322ubuf\": 1024,\n \"cc_m162ubuf\": 512,\n \"cc_v322ubuf\": 64,\n \"cc_v162ubuf\": 32,\n \"cc_sc32ubuf\": 256,\n \"cc_dp16ubuf\": 256,\n \"cc_dp32ubuf\": 512,\n \"ubuf2cc_m32\": 1024,\n \"ubuf2cc_m16\": 512,\n \"ubuf2cc_v32\": 64,\n \"ubuf2cc_v16\": 32,\n \"ubuf2cc_sc32\": 256,\n }\n if scope_str == \"cc_dp32ubuf\":\n if src.dtype == \"f32\":\n return 512\n if src.dtype == \"s32\":\n return 1024\n else:\n if scope_str in burst_len_size_map:\n return burst_len_size_map.get(scope_str)\n return 32\n\n\ndef _cal_src_gap_block_size(block_mode, src, dst):\n \"\"\"\n get the space of per src gap block\n :param block_mode: data move's mode, like \"m\" \"v\" ...\n :param src: src tensor\n :param dst: dst tensor\n :return: src block size. Byte\n \"\"\"\n scope_str = _get_scope_str(SCOPE_MAP, block_mode, src, dst)\n src_gap_size_map = {\n \"cc_m322ubuf\": 1024,\n \"cc_m162ubuf\": 512,\n \"cc_v322ubuf\": 1024,\n \"cc_v162ubuf\": 512,\n \"cc_sc32ubuf\": 256,\n \"cc_dp16ubuf\": 256,\n \"cc_dp32ubuf\": 512,\n }\n if scope_str == \"cc_dp32ubuf\":\n if src.dtype == \"f32\":\n return 512\n if src.dtype == \"s32\":\n return 1024\n else:\n if scope_str in src_gap_size_map:\n return src_gap_size_map.get(scope_str)\n return 32\n\n\ndef _cal_dst_gap_block_size(block_mode, src, dst):\n \"\"\"\n get the space of per dst gap block\n :param block_mode: data move's mode, like \"m\" \"v\" ...\n :param src: src tensor\n :param dst: dst tensor\n :return: dst block size. Byte\n \"\"\"\n scope_str = _get_scope_str(SCOPE_MAP, block_mode, src, dst)\n dst_gap_size_map = {\n \"ubuf2cc_m32\": 1024,\n \"ubuf2cc_m16\": 512,\n \"ubuf2cc_v32\": 1024,\n \"ubuf2cc_v16\": 512,\n \"ubuf2cc_sc32\": 256,\n }\n if scope_str in dst_gap_size_map:\n return dst_gap_size_map.get(scope_str)\n return 32\n\n\ndef _check_src_overflow_brc(src, nburst, burst_len, src_gap):\n \"\"\"\n check src tensor if overflow\n :param src: src tensor\n :param nburst: number of burst\n :param burst_len: burst length\n :param src_gap: src gap\n :return: None\n \"\"\"\n offset = src.offset\n total_size = reduce_mul(src.indice.origin_shape)\n byte_len = DTYPE_SIZE[src.dtype]\n extend_offset = nburst*(burst_len*_SRC_BURST_LEN_SIZE_ELE +\n src_gap*_SRC_GAP_SIZE_BYTE // byte_len) -\\\n src_gap*_SRC_GAP_SIZE_BYTE // byte_len\n if Expr(extend_offset + offset).eval_value() is not None:\n TikCheckUtil.check_le(Expr(extend_offset + offset).eval_value(),\n total_size, \"src tensor overflow\")\n\n\ndef _check_dst_overflow_brc(dst, nburst, burst_len, dst_gap):\n \"\"\"\n check dst tensor if overflow\n :param dst: dst tensor\n :param nburst: number of burst\n :param burst_len: burst length\n :param dst_gap: dst gap\n :return: None\n \"\"\"\n offset = dst.offset\n total_size = reduce_mul(dst.indice.origin_shape)\n extend_offset = nburst*(burst_len + dst_gap)*_DST_BURST_LEN_SIZE_ELE -\\\n dst_gap*_DST_BURST_LEN_SIZE_ELE\n if Expr(extend_offset + offset).eval_value() is not None:\n TikCheckUtil.check_le(Expr(extend_offset + offset).eval_value(),\n total_size, \"dst tensor overflow\")\n\n\ndef _dtype_convert(value, dtype):\n \"\"\"Get target's scope\n\n Parameters\n ----------\n name : str, The scope name\n\n Returns\n -------\n str : the key of scope\n \"\"\"\n valuet = type_convert(value)\n return valuet.astype(dtype)\n\n\ndef _load3dv1_load3dv2_col2img_check(pad, l1_w, l1_h, # pylint: disable=R0913\n stride_w, stride_h, filter_w, filter_h,\n dilation_filter_w, dilation_filter_h):\n \"\"\"check load3dv2's and col2img's params\"\"\"\n # check pad\n TikCheckUtil.check_type_match(\n pad, (list, tuple), \"pad_list should be list or tuple, please specify \"\n \"padding: [left, right, top, bottom].\")\n TikCheckUtil.check_equality(len(pad), PAD_LENGTH,\n \"pad length should be 4, input pad length: \"\n \"{}\".format(len(pad)))\n TikCheckUtil.check_type_match(pad[PADDING_LEFT_IDX], (int, Scalar, Expr),\n \"pad[0] should be int, Scalar or Expr\")\n check_scalar_dtype(pad[PADDING_LEFT_IDX],\n \"scalar_pad[0] should be a scalar of int/uint\")\n check_integer_in_range(pad[PADDING_LEFT_IDX], range(MAX_PADDING),\n \"pad[0] should be in the range of [0, 255], \"\n \"input pad[0]: {}\".format(pad[PADDING_LEFT_IDX]))\n TikCheckUtil.check_type_match(pad[PADDING_RIGHT_IDX], (int, Scalar, Expr),\n \"pad[1] should be int, Scalar or Expr\")\n check_scalar_dtype(pad[PADDING_RIGHT_IDX],\n \"scalar_pad[1] should be a scalar of int/uint\")\n check_integer_in_range(pad[PADDING_RIGHT_IDX], range(MAX_PADDING),\n \"pad[1] should be in the range of [0, 255],\"\n \"input pad[1]: {}\".format(pad[PADDING_RIGHT_IDX]))\n TikCheckUtil.check_type_match(pad[PADDING_TOP_IDX], (int, Scalar, Expr),\n \"pad[2] should be int, Scalar or Expr\")\n check_scalar_dtype(pad[PADDING_TOP_IDX],\n \"scalar_pad[2] should be a scalar of int/uint\")\n check_integer_in_range(pad[PADDING_TOP_IDX], range(MAX_PADDING),\n \"pad[2] should be in the range of [0, 255], \"\n \"input pad[2]: {}\".format(pad[PADDING_TOP_IDX]))\n TikCheckUtil.check_type_match(pad[PADDING_BOT_IDX], (int, Scalar, Expr),\n \"pad[3] should be int, Scalar or Expr\")\n check_scalar_dtype(pad[PADDING_BOT_IDX],\n \"scalar_pad[3] should be a scalar of int/uint\")\n check_integer_in_range(pad[PADDING_BOT_IDX], range(MAX_PADDING),\n \"pad[3] should be in the range of [0, 255], \"\n \"input pad[3]: {}\".format(pad[PADDING_BOT_IDX]))\n # check feature map\n TikCheckUtil.check_type_match(l1_w, (int, Scalar, Expr),\n \"l1_w should be int, Scalar or Expr\")\n check_scalar_dtype(l1_w,\n \"scalar_l1_w should be a scalar of int/uint\")\n check_integer_in_range(l1_w, range(MIN_TENSOR_WIDTH, MAX_TENSOR_WIDTH),\n \"l1_w should be in the range of [1, 32767], \"\n \"input value is %s\" % str(l1_w))\n TikCheckUtil.check_type_match(l1_h, (int, Scalar, Expr),\n \"l1_h should be int, Scalar or Expr\")\n check_scalar_dtype(l1_h,\n \"scalar_l1_h should be a scalar of int/uint\")\n check_integer_in_range(l1_h, range(MIN_TENSOR_HEIGHT, MAX_TENSOR_HEIGHT),\n \"l1_h should be in the range of [1, 32767]\")\n # check stride\n TikCheckUtil.check_type_match(stride_w, (int, Scalar, Expr),\n \"stride_w should be int, Scalar or Expr\")\n check_scalar_dtype(stride_w,\n \"scalar_stride_w should be a scalar of int/uint\")\n check_integer_in_range(stride_w, range(MIN_STRIDE, MAX_STRIDE),\n \"stride_w should be in the range of [1, 63], \"\n \"input stride_w: {}\".format(stride_w))\n TikCheckUtil.check_type_match(stride_h, (int, Scalar, Expr),\n \"stride_h should be int, Scalar or Expr\")\n check_scalar_dtype(stride_h,\n \"scalar_stride_h should be a scalar of int/uint\")\n check_integer_in_range(stride_h, range(MIN_STRIDE, MAX_STRIDE),\n \"stride_h should be in the range of [1, 63], \"\n \"input stride_h: {}\".format(stride_h))\n # check filter\n TikCheckUtil.check_type_match(filter_w, (int, Scalar, Expr),\n \"filter_w should be int, Scalar or Expr\")\n check_scalar_dtype(filter_w,\n \"scalar_filter_w should be a scalar of int/uint\")\n check_integer_in_range(filter_w, range(MIN_FILTER_WIDTH, MAX_FILTER_WIDTH),\n \"filter_w should be in the range of [1, 255], \"\n \"input filter_w: {}\".format(filter_w))\n TikCheckUtil.check_type_match(filter_h, (int, Scalar, Expr),\n \"filter_h should be int, Scalar or Expr\")\n check_scalar_dtype(filter_h,\n \"scalar_filter_h should be a scalar of int/uint\")\n check_integer_in_range(\n filter_h, range(MIN_FILTER_HEIGHT, MAX_FILTER_HEIGHT),\n \"filter_h should be in the range of [1, 255], \"\n \"input filter_h: {}\".format(filter_h))\n # check dilation\n TikCheckUtil.check_type_match(\n dilation_filter_w, (int, Scalar, Expr),\n \"dilation_filter_w should be int, Scalar or Expr\")\n check_scalar_dtype(dilation_filter_w,\n \"scalar_dilation_filter_w \"\n \"should be a scalar of int/uint\")\n check_integer_in_range(\n dilation_filter_w, range(MIN_DILATION, MAX_DILATION),\n \"dilation_filter_w should be in the range of [1, 255], \"\n \"input dilation_filter_w: {}\".format(dilation_filter_w))\n TikCheckUtil.check_type_match(\n dilation_filter_h, (int, Scalar, Expr),\n \"dilation_filter_h should be int, Scalar or Expr\")\n check_scalar_dtype(dilation_filter_h,\n \"scalar_dilation_filter_h should be a scalar of\"\n \" int/uint\")\n check_integer_in_range(\n dilation_filter_h, range(MIN_DILATION, MAX_DILATION),\n \"dilation_filter_h should be in the range of [1, 255], \"\n \"input dilation_filter_h: {}\".format(dilation_filter_h))\n\n\ndef _load3dv1_col2img_check(fetch_filter_w, fetch_filter_h, left_top_w,\n left_top_h):\n \"\"\"check load3dv1's and col2img's params\"\"\"\n # check fetch pos in filter\n TikCheckUtil.check_type_match(\n fetch_filter_w, (int, Scalar, Expr),\n \"fetch_filter_w should be int, Scalar or Expr\")\n check_scalar_dtype(fetch_filter_w,\n \"scalar_fetch_filter_w should be a scalar of int/uint\")\n check_integer_in_range(fetch_filter_w, range(MAX_FETCH_POS),\n \"fetch_filter_w should be in the range of [0, 255], \"\n \"input fetch_filter_w: {}\".format(fetch_filter_w))\n TikCheckUtil.check_type_match(\n fetch_filter_h, (int, Scalar, Expr),\n \"fetch_filter_h should be int, Scalar or Expr\")\n check_scalar_dtype(fetch_filter_h,\n \"scalar_fetch_filter_h should be a scalar of int/uint\")\n check_integer_in_range(fetch_filter_h, range(MAX_FETCH_POS),\n \"fetch_filter_h should be in the range of [0, 255], \"\n \"input fetch_filter_h: {}\".format(fetch_filter_h))\n # check start-point\n TikCheckUtil.check_type_match(left_top_h, (int, Scalar, Expr),\n \"left_top_h should be int, Scalar or Expr\")\n check_scalar_dtype(left_top_h,\n \"scalar_left_top_h should be a scalar of int/uint\")\n check_integer_in_range(left_top_h, range(MIN_START_POINT, MAX_START_POINT),\n \"left_top_h should be in the range of [-255, 32767],\"\n \" input left_top_h: {}\".format(left_top_h))\n TikCheckUtil.check_type_match(left_top_w, (int, Scalar, Expr),\n \"left_top_w should be int, Scalar or Expr\")\n check_scalar_dtype(left_top_w,\n \"scalar_left_top_w should be a scalar of int/uint\")\n check_integer_in_range(left_top_w, range(MIN_START_POINT, MAX_START_POINT),\n \"left_top_w should be in the range of [-255, 32767],\"\n \" input left_top_w: {}\".format(left_top_w))\n\n\ndef _get_s32f16_deq_mode(deqscale):\n \"\"\"\n get deq mode when dtype is s32f16\n :param deqscale: deqscale\n :return: deq mode\n \"\"\"\n if not isinstance(deqscale, (float, int)):\n if not (isinstance(deqscale, (Scalar, Tensor))\n and (deqscale.dtype in ('uint64', 'float16'))):\n TikCheckUtil.raise_error(\"deqscale type error.\")\n if isinstance(deqscale, float) \\\n or (isinstance(deqscale, Scalar)\n and (deqscale.dtype in ('float16',))): # deq\n deq_mode = 'deq'\n elif isinstance(deqscale, Tensor)\\\n and (deqscale.dtype in ('float16',)): # vdeq\n deq_mode = 'vdeq'\n elif isinstance(deqscale, int)\\\n or (isinstance(deqscale, Scalar)\n and (deqscale.dtype in ('uint64',))): # deq16\n deq_mode = 'deq16'\n else:\n deq_mode = 'vdeq16'\n return deq_mode\n\n\ndef _get_f16f16_deq_mode(deqscale):\n \"\"\"\n get deq mode when dtype is f16f16\n :param deqscale: deqscale\n :return: deq mode\n \"\"\"\n if deqscale is None:\n deq_mode = ''\n else:\n if not isinstance(deqscale, float):\n if not (isinstance(deqscale, Scalar) and\n (deqscale.dtype in ('float16',))):\n TikCheckUtil.raise_error(\"deqscale type error.\")\n deq_mode = 'deq'\n return deq_mode\n\n\ndef _get_s32s8u8_deq_mode(deqscale):\n \"\"\"\n get deq mode when dtype is s32s8 or s32u8\n :param deqscale: deqscale\n :return: deq mode\n \"\"\"\n if not isinstance(deqscale, int):\n if not (isinstance(deqscale, (Scalar, Tensor))\n and deqscale.dtype in 'uint64'):\n TikCheckUtil.raise_error(\"deqscale type error.\")\n if isinstance(deqscale, (int, Scalar)):\n deq_mode = 'deq8'\n else:\n deq_mode = 'vdeq8'\n return deq_mode\n\n\ndef _get_s32s16_deq_mode(deqscale):\n \"\"\"\n get deq mode when dtype is s32s16\n :param deqscale: deqscale\n :return: deq mode\n \"\"\"\n if not isinstance(deqscale, int):\n if not (isinstance(deqscale, (Scalar, Tensor))\n and deqscale.dtype in 'uint64'):\n TikCheckUtil.raise_error(\"deqscale type error.\")\n if isinstance(deqscale, (int, Scalar)):\n deq_mode = 'deqs16'\n else:\n deq_mode = 'vdeqs16'\n return deq_mode\n\n\ndef _make_deq_mode(dtype_str, deqscale):\n \"\"\"get deq_mode\"\"\"\n # deq/vdeq/deq16/vdeq16\n if dtype_str in (\"s32f16\",):\n deq_mode = _get_s32f16_deq_mode(deqscale)\n # deq\n elif dtype_str in (\"f16f16\",):\n deq_mode = _get_f16f16_deq_mode(deqscale)\n # deq8/vdeq8\n elif dtype_str in (\"s32s8\", \"s32u8\"):\n deq_mode = _get_s32s8u8_deq_mode(deqscale)\n # deqs16/vdeqs16\n elif dtype_str in (\"s32s16\",):\n deq_mode = _get_s32s16_deq_mode(deqscale)\n else:\n deq_mode = ''\n return deq_mode\n\n\ndef _get_addr_list(dst_list, src_list, extents):\n \"\"\"get addr list\"\"\"\n dst_addr_list0 = []\n dst_addr_list1 = []\n for index in range(MAX_VA_ADDR_NUM):\n get_addr_list(dst_addr_list0, dst_list[index], \"w\",\n extent=extents[0]) # dst_extent\n get_addr_list(dst_addr_list1, dst_list[index + MAX_VA_ADDR_NUM], \"w\",\n extent=extents[0]) # dst_extent\n\n src_addr_list0 = []\n src_addr_list1 = []\n for index in range(MAX_VA_ADDR_NUM):\n get_addr_list(src_addr_list0, src_list[index], \"r\",\n extent=extents[1]) # src_extent\n get_addr_list(src_addr_list1, src_list[index + MAX_VA_ADDR_NUM], \"r\",\n extent=extents[1]) # src_extent\n return dst_addr_list0, dst_addr_list1, src_addr_list0, src_addr_list1\n\n\ndef _get_load2d_dtype_str(src_scope, args, transpose_bit, # pylint: disable=R0913\n arch_version_str, addr_mode_bit, dtype_str, dst):\n \"\"\" get dtype str for load2d instruciton\"\"\"\n if src_scope in ('cbuf',):\n args.append(transpose_bit)\n if not arch_version_str in (ASCEND_310AIC,):\n args.append(addr_mode_bit)\n if dtype_str in (\"s4s4\", \"u4u4\"):\n dtype_str = \"int4\"\n elif dtype_str in (\"s16s16\", \"u16u16\"):\n dtype_str = \"float16\"\n else:\n dtype_str = dst.dtype\n return dtype_str\n\n\ndef _extend_args(param_key, args, argv):\n \"\"\"extend args\n\n Parameters\n ----------\n param_key : param name\n args : to extend args\n argv : to extend args\n\n Returns\n -------\n the extended args\n \"\"\"\n if args:\n if argv:\n TikCheckUtil.raise_error(\"argv should be None\")\n TikCheckUtil.check_equality(len(args), 1, \"args length should be 1\")\n return args\n if argv:\n if not ((len(argv.key()) == 1) and (argv.get(param_key))):\n TikCheckUtil.raise_error(\"argv value error\")\n TikCheckUtil.check_type_match(\n argv.get(param_key), (int, bool),\n \"argv value of %s should be int or bool\" % (param_key))\n return [argv.get(param_key)]\n return [0]\n\n\ndef check_dma_instr_params(dst, src, nburst, burst_len, # pylint: disable=R0913\n src_stride, dst_stride, en_onthefly=False,\n src_onthefly_stride=0):\n \"\"\"check params of data_move data_move_quant tensor_move\"\"\"\n TikCheckUtil.check_type_match(dst, Tensor, \"dst should be tensor\")\n TikCheckUtil.check_type_match(src, Tensor, \"src should be tensor\")\n TikCheckUtil.check_type_match(nburst, (int, Scalar, Expr),\n \"nburst should be int, Scalar or Expr\")\n check_scalar_dtype(nburst,\n \"scalar_nburst should be a scalar of int/uint\")\n TikCheckUtil.check_type_match(burst_len, (int, Scalar, Expr),\n \"burst_len should be int, Scalar or Expr\")\n check_scalar_dtype(burst_len,\n \"scalar_burst_len should be a scalar of int/uint\")\n TikCheckUtil.check_type_match(src_stride, (int, Scalar, Expr),\n \"src_stride should be int, Scalar or Expr\")\n check_scalar_dtype(src_stride,\n \"scalar_src_stride should be a scalar of int/uint\")\n TikCheckUtil.check_type_match(dst_stride, (int, Scalar, Expr),\n \"dst_stride should be int, Scalar or Expr\")\n check_scalar_dtype(dst_stride,\n \"scalar_dst_stride should be a scalar of int/uint\")\n check_integer_in_range(nburst, range(MIN_NBURST, MAX_NBURST_DOUBLE_BYTE),\n \"nburst should be in the range of [1, 4095], input \"\n \"value is: {}\".format(nburst))\n check_integer_in_range(\n burst_len, range(MIN_BURST_LEN, MAX_BURST_LEN_DOUBLE_BYTE),\n \"burst_len should be in the range of [1, 65535], input value is: \"\n \"{}\".format(burst_len))\n check_integer_in_range(src_stride, range(MAX_BLK_STRIDE_DOUBLE_BYTE),\n \"src_stride should be in the range of [0, 65535], \"\n \"input value is: {}\".format(src_stride))\n if en_onthefly:\n check_integer_in_range(\n dst_stride, range(MAX_BLK_STRIDE_SINGLE_BYTE),\n \"dst_stride should be in the range of [0, 255], input value is: \"\n \"{}\".format(dst_stride))\n check_integer_in_range(\n src_onthefly_stride, range(VALUE_1),\n \"tensor_mov onthefly doesn't support src_onthefly_stride which is \"\n \"greater than 0, input src_onthefly_stride: %s\" %\n str(src_onthefly_stride))\n check_integer_in_range(\n src_onthefly_stride, range(MAX_BLK_STRIDE_SINGLE_BYTE),\n \"src_onthefly_stride should be in the range of [0, 255], input \"\n \"value is: %s\" % (str(src_onthefly_stride)))\n else:\n check_integer_in_range(\n dst_stride, range(MAX_BLK_STRIDE_DOUBLE_BYTE),\n \"dst_stride should be in the range of [0, 65535], input value is: \"\n \"{}\".format(dst_stride))\n\n\ndef _aipp_check_arch_version(arch_version):\n \"\"\"check arch version\"\"\"\n if arch_version not in [ASCEND_310AIC, ASCEND_910AIC,\n HI3796CV300ESAIC, AIC]:\n TikCheckUtil.raise_error(\n \"arch_version not support aipp, \"\n \"arch_version: {}\".format(arch_version))\n\n\ndef _aipp_check_dst(input_format, dst):\n \"\"\"check dst tensor\"\"\"\n TikCheckUtil.check_type_match(dst, Tensor,\n \"dst should be tensor, \"\n \"input: {}\".format(type(dst)))\n dst_scope = SCOPE_MAP[dst.scope]\n TikCheckUtil.check_equality(dst_scope, \"cbuf\",\n \"dst scope must be cubf, \"\n \"input: {}\".format(dst_scope))\n\n TikCheckUtil.check_in_range(dst.dtype,\n [\"uint8\", \"int8\", \"float16\"],\n \"dst type error, \"\n \"input type: {}\".format(dst.dtype))\n imm_input_format = Expr(input_format).eval_value()\n if imm_input_format is not None:\n if imm_input_format == NC1HWC0DI_INT8:\n TikCheckUtil.check_equality(dst.dtype, \"int8\",\n \"NC1HWC0DI_INT8 dst type must int8, \"\n \"input type: {}\".format(dst.dtype))\n elif imm_input_format == NC1HWC0DI_FP16:\n TikCheckUtil.check_equality(dst.dtype, \"float16\",\n \"NC1HWC0DI_FP16 dst type must float16, \"\n \"input type: {}\".format(dst.dtype))\n\n\ndef _aipp_check_src(input_format, src0, src1, src_horizontal_size,\n src_vertical_size):\n \"\"\"check src tensor\"\"\"\n TikCheckUtil.check_type_match(src0, Tensor,\n \"src0 should be Tensor, \"\n \"input: {}\".format(type(src0)))\n src0_scope = SCOPE_MAP[src0.scope]\n TikCheckUtil.check_equality(src0_scope, \"gm\",\n \"src0 scope must be gm, \"\n \"input scope: {}\".format(src0_scope))\n\n imm_input_format = Expr(input_format).eval_value()\n if imm_input_format is not None:\n TikCheckUtil.check_in_range(\n src0.dtype,\n AIPP_INPUT_TYPE_SWAP_ALIGN[imm_input_format][\"type\"],\n \"src0 type error, input type: {}\".format(src0.dtype))\n\n imm_src_horizontal_size = Expr(src_horizontal_size).eval_value()\n imm_src_vertical_size = Expr(src_vertical_size).eval_value()\n if src1 is None:\n # check src extend\n if imm_src_horizontal_size is not None and \\\n imm_src_vertical_size is not None:\n check_aipp_one_src_overflow(src0, imm_input_format,\n imm_src_horizontal_size,\n imm_src_vertical_size)\n else:\n\n TikCheckUtil.check_type_match(src1, Tensor,\n \"src1 should be Tensor, \"\n \"input: {}\".format(type(src1)))\n src1_scope = SCOPE_MAP[src1.scope]\n TikCheckUtil.check_equality(src1_scope, \"gm\",\n \"src1 scope must be gm, \"\n \"input scope: {}\".format(src1_scope))\n\n # two input\n if imm_input_format in [YUV420, NC1HWC0DI_INT8,\n NC1HWC0DI_FP16, YUV422]:\n TikCheckUtil.check_in_range(\n src1.dtype,\n AIPP_INPUT_TYPE_SWAP_ALIGN[imm_input_format][\"type\"],\n \"src1 type error, input type: {}\".format(src1.dtype))\n\n # check src extend\n if imm_src_horizontal_size is not None and \\\n imm_src_vertical_size is not None:\n check_aipp_two_src_overflow(src0, src1, imm_input_format,\n imm_src_horizontal_size,\n imm_src_vertical_size)\n\n else:\n TikCheckUtil.raise_error(\"input_format not support two src\")\n\n\ndef _aipp_check_input_format(arch_version, input_format):\n \"\"\"check input format\"\"\"\n imm_input_format = Expr(input_format).eval_value()\n if imm_input_format is not None:\n format_ = AIPP_INPUT_VERSON_AND_FUNCTION.get(arch_version).get(\n imm_input_format)\n\n if format_ is None:\n TikCheckUtil.raise_error(\n \"arch_version not support \"\n \"input_format: {}\".format(imm_input_format))\n\n\ndef _aipp_check_function_switch(arch_version, input_format, # pylint: disable=R0913\n swap_enable, csc_enable, dtc_enable,\n area_pad_enable, pre_clip_enable, scf_enable,\n post_clip_enable, flip_enable, stretch_enable,\n raw_enable):\n \"\"\"check aipp function switch\"\"\"\n # function's input params is too much, so disable them\n imm_input_format = Expr(input_format).eval_value()\n if imm_input_format is not None:\n functions = AIPP_INPUT_VERSON_AND_FUNCTION.get(arch_version).get(\n imm_input_format)\n\n if pre_clip_enable == 1:\n TikCheckUtil.check_in_range(PRE_CLIP, functions,\n \"pre clip not support, \"\n \"input: {}\".format(functions))\n\n if swap_enable == 1:\n TikCheckUtil.check_in_range(SWAP, functions,\n \"swap not support, \"\n \"input: {}\".format(functions))\n\n if csc_enable == 1:\n TikCheckUtil.check_in_range(CSC, functions,\n \"csc not support, \"\n \"input: {}\".format(functions))\n\n if scf_enable == 1:\n TikCheckUtil.check_in_range(SCF, functions,\n \"scf not support, \"\n \"input: {}\".format(functions))\n\n if post_clip_enable == 1:\n TikCheckUtil.check_in_range(POST_CLIP, functions,\n \"post clip not support, \"\n \"input: {}\".format(functions))\n\n if dtc_enable == 1:\n TikCheckUtil.check_in_range(DTC, functions,\n \"dtc not support, \"\n \"input: {}\".format(functions))\n\n if area_pad_enable == 1:\n TikCheckUtil.check_in_range(AERA_PADDING, functions,\n \"area_pad not support, \"\n \"input: {}\".format(functions))\n\n if stretch_enable == 1:\n TikCheckUtil.check_in_range(STRETCH, functions,\n \"stretch not support, \"\n \"input: {}\".format(functions))\n\n if raw_enable == 1:\n TikCheckUtil.check_in_range(RAW, functions,\n \"raw channel raw not support, \"\n \"input: {}\".format(functions))\n\n if flip_enable == 1:\n TikCheckUtil.check_in_range(FLIP, functions,\n \"flip not support, \"\n \"input: {}\".format(functions))\n\n\ndef _aipp_check_src_info(arch_version, input_format, src_horizontal_size,\n src_vertical_size):\n \"\"\"check src tensor info\"\"\"\n\n TikCheckUtil.check_type_match(\n src_horizontal_size, (int, Scalar, Expr),\n \"src_horizontal_size type error, \"\n \"input type {}\".format(type(src_horizontal_size)))\n check_scalar_dtype(src_horizontal_size,\n \"src_horizontal_size should be a scalar of int/uint\")\n TikCheckUtil.check_type_match(\n src_vertical_size, (int, Scalar, Expr),\n \"src_vertical_size type error, \"\n \"input type: {}\".format(src_vertical_size))\n check_scalar_dtype(src_vertical_size,\n \"src_vertical_size should be a scalar of int/uint\")\n\n imm_src_horizontal_size = Expr(src_horizontal_size).eval_value()\n if imm_src_horizontal_size is not None:\n TikCheckUtil.check_in_range(\n imm_src_horizontal_size, range(8, 4097),\n \"horizontal_resolution should in \"\n \"[8, 4096], input: {}\".format(imm_src_horizontal_size))\n\n imm_input_format = Expr(input_format).eval_value()\n if imm_input_format is not None:\n if imm_input_format in [YUV420, YUYV, YUV422]:\n TikCheckUtil.check_equality(\n imm_src_horizontal_size % 2, 0,\n \"src_horizontal_size should be even, \"\n \"input: {}\".format(imm_src_horizontal_size))\n\n if arch_version == HI3796CV300ESAIC:\n if imm_input_format in [YUV420, YUV422,\n YUYV, YUV400, RAW10, RAW12,\n RAW16, RGB888]:\n TikCheckUtil.check_equality(\n imm_src_horizontal_size % 16, 0,\n \"src_horizontal_size should be multiple of 16,\"\n \" input: {}\".format(imm_src_horizontal_size))\n elif imm_input_format in [AYUV444, ARGB8888, XRGB8888]:\n TikCheckUtil.check_equality(\n imm_src_horizontal_size % 4, 0,\n \"src_horizontal_size should be multiple of 4,\"\n \" input: {}\".format(imm_src_horizontal_size))\n\n imm_src_vertical_size = Expr(src_vertical_size).eval_value()\n if imm_src_vertical_size is not None:\n TikCheckUtil.check_ge(imm_src_vertical_size, 1,\n \"src_vertical_size should more than 1, \"\n \"input: {}\".format(imm_src_vertical_size))\n\n\ndef _aipp_check_crop_info(input_format, single_line_mode, # pylint: disable=R0913\n horizontal_size, vertical_size,\n horizontal_start, vertical_start):\n \"\"\"check crop info\"\"\"\n # function's input params is too much, so disable them\n TikCheckUtil.check_type_match(\n single_line_mode, (int, Scalar, Expr),\n \"single_line_mode type error, \"\n \"input type: {}\".format(type(single_line_mode)))\n check_scalar_dtype(single_line_mode,\n \"single_line_mode should be a scalar of int/uint\")\n TikCheckUtil.check_type_match(\n horizontal_size, (int, Scalar, Expr),\n \"horizontal_size type error, \"\n \"input type: {}\".format(type(horizontal_size)))\n check_scalar_dtype(horizontal_size,\n \"horizontal_size should be a scalar of int/uint\")\n TikCheckUtil.check_type_match(\n vertical_size, (int, Scalar, Expr),\n \"vertical_size type error, \"\n \"input type: {}\".format(type(vertical_size)))\n check_scalar_dtype(vertical_size,\n \"vertical_size should be a scalar of int/uint\")\n TikCheckUtil.check_type_match(\n horizontal_start, (int, Scalar, Expr),\n \"horizontal_start type error, \"\n \"input type: {}\".format(type(horizontal_start)))\n check_scalar_dtype(horizontal_start,\n \"horizontal_start should be a scalar of int/uint\")\n TikCheckUtil.check_type_match(\n vertical_start, (int, Scalar, Expr),\n \"vertical_start type error, \"\n \"input type: {}\".format(type(vertical_start)))\n check_scalar_dtype(vertical_start,\n \"vertical_start should be a scalar of int/uint\")\n\n imm_input_format = Expr(input_format).eval_value()\n imm_single_line_mode = Expr(single_line_mode).eval_value()\n if imm_input_format is not None:\n imm_horizontal_size = Expr(horizontal_size).eval_value()\n if imm_horizontal_size is not None:\n # even YUV420/YUV422 semi-plannar and YUYV packed\n if imm_input_format in [YUV420, YUYV, YUV422]:\n TikCheckUtil.check_equality(\n imm_horizontal_size % 2, 0,\n \"horizontal_size should be even, \"\n \"input: {}\".format(imm_horizontal_size))\n # range\n TikCheckUtil.check_in_range(\n imm_horizontal_size, range(8, 4097),\n \"horizontal_size should in [8, 4096], \"\n \"input: {}\".format(imm_horizontal_size))\n\n imm_vertical_size = Expr(vertical_size).eval_value()\n if imm_vertical_size is not None:\n\n if imm_single_line_mode is not None and imm_single_line_mode == 0:\n # even YUV420\n if imm_input_format == YUV420:\n TikCheckUtil.check_equality(\n imm_vertical_size % 2, 0,\n \"vertical_size should be even, \"\n \"input: {}\".format(imm_vertical_size))\n\n # range\n TikCheckUtil.check_in_range(\n imm_vertical_size, range(8, 4097),\n \"vertical_size should in [8, 4096], \"\n \"input: {}\".format(imm_vertical_size))\n\n imm_horizontal_start = Expr(horizontal_start).eval_value()\n if imm_horizontal_start is not None:\n # even YUV420/YUV422 semi-plannar and YUYV packed\n if imm_input_format in [YUV420, YUYV, YUV422, XRGB8888,\n NC1HWC0DI_INT8, NC1HWC0DI_FP16,\n RGB888, YUV400]:\n TikCheckUtil.check_equality(\n imm_horizontal_start % 2, 0,\n 'horizontal_start should be even, '\n 'input: {}'.format(imm_horizontal_start))\n\n # range\n TikCheckUtil.check_in_range(\n imm_horizontal_start, range(0, 4096),\n 'horizontal_start should in [0, 4095], '\n 'input: {}'.format(imm_horizontal_start))\n\n imm_vertical_start = Expr(vertical_start).eval_value()\n if imm_vertical_start is not None and imm_single_line_mode is not None:\n # even YUV420\n if imm_input_format in [YUV420, XRGB8888, NC1HWC0DI_INT8,\n NC1HWC0DI_FP16, RGB888,\n YUV400] and imm_single_line_mode == 0:\n TikCheckUtil.check_equality(\n imm_vertical_start % 2, 0,\n 'vertical_start should be even, '\n 'input: {}'.format(imm_vertical_start))\n\n # range\n TikCheckUtil.check_in_range(\n imm_vertical_start, range(0, 4096),\n 'vertical_start should in [0, 4095], '\n 'input: {}'.format(imm_vertical_start))\n\n\ndef _aipp_check_crop_in_picture(src_horizontal_size, # pylint: disable=R0913\n src_vertical_size, horizontal_size,\n vertical_size, horizontal_start,\n vertical_start):\n \"\"\"check crop in picture\"\"\"\n # function's input params is too much, so disable them\n imm_src_horizontal_size = Expr(src_horizontal_size).eval_value()\n imm_horizontal_start = Expr(horizontal_start).eval_value()\n if imm_src_horizontal_size is not None and imm_horizontal_start is not None:\n # range\n TikCheckUtil.check_in_range(\n imm_horizontal_start, range(0, imm_src_horizontal_size + 1),\n 'horizontal_start should in src_horizontal_size, '\n 'input: {}'.format(imm_horizontal_start))\n\n imm_horizontal_size = Expr(horizontal_size).eval_value()\n if imm_horizontal_size is not None:\n # range\n TikCheckUtil.check_in_range(\n imm_horizontal_start + imm_horizontal_size,\n range(0, imm_src_horizontal_size + 1),\n 'horizontal_start+horizontal_size should in '\n 'src_horizontal_size, '\n 'input: {}'.format(imm_horizontal_start + imm_horizontal_size))\n\n imm_src_vertical_size = Expr(src_vertical_size).eval_value()\n imm_vertical_start = Expr(vertical_start).eval_value()\n if imm_src_vertical_size is not None and imm_vertical_start is not None:\n # range\n TikCheckUtil.check_in_range(\n imm_vertical_start, range(0, imm_src_vertical_size + 1),\n 'imm_vertical_start should in src_vertical_size, '\n 'imm_vertical_start: {}'.format(imm_vertical_start))\n\n imm_vertical_size = Expr(vertical_size).eval_value()\n if imm_vertical_size is not None:\n # range\n TikCheckUtil.check_in_range(\n imm_vertical_start + imm_vertical_size,\n range(0, imm_src_vertical_size + 1),\n \"vertical_start + vertical_size should in src_vertical_size,\"\n \" vertical_start + vertical_size: {}\".format(vertical_start +\n vertical_size))\n\n\ndef _aipp_check_crop_single_line_mode(arch_version, single_line_mode):\n \"\"\"check crop single line mode\"\"\"\n imm_single_line_mode = Expr(single_line_mode).eval_value()\n if imm_single_line_mode is not None:\n if arch_version in [ASCEND_910AIC, HI3796CV300ESAIC]:\n TikCheckUtil.check_equality(\n imm_single_line_mode, 0,\n 'single_line_mode value error, '\n 'input: {}'.format(imm_single_line_mode))\n else:\n TikCheckUtil.check_in_range(\n imm_single_line_mode, range(0, 2),\n 'single_line_mode should in [0, 1], '\n 'input: {}'.format(imm_single_line_mode))\n\n\ndef _check_crop_vertical_size_by_single_line(arch_version, crop_vertical_size,\n single_line_mode):\n if arch_version in [ASCEND_310AIC, AIC]:\n imm_single_line_mode = Expr(single_line_mode).eval_value()\n if imm_single_line_mode is not None and imm_single_line_mode == 1:\n imm_crop_vertical_size = Expr(crop_vertical_size).eval_value()\n if imm_crop_vertical_size is not None:\n TikCheckUtil.check_equality(\n imm_crop_vertical_size, 1,\n 'crop_vertical_size should '\n 'be 1 when single_line_mode enable')\n\n\ndef _aipp_check_format_convert(arch_version, format_convert):\n \"\"\"check format convert\"\"\"\n TikCheckUtil.check_type_match(\n format_convert, (int),\n \"format_convert should be int, input: {}\".format(type(format_convert)))\n\n if arch_version == HI3796CV300ESAIC:\n # range\n TikCheckUtil.check_in_range(\n format_convert,\n [0, 10, 11, 12, 13, 14, 15, 16, 17],\n 'format_convert should be 0 or in [10, 17] for v200hisi-es, '\n 'input: {}'.format(format_convert))\n else:\n TikCheckUtil.check_in_range(\n format_convert, range(0, 10),\n 'format_convert should in [0, 9] '\n 'for arch_version not hs, '\n 'input: {} {}'.format(arch_version, format_convert))\n\n\ndef _check_matrix_type_and_range(matrix, shape, in_type, input_range=None):\n \"\"\"check matrix type and range\"\"\"\n # matrix\n if len(matrix) != shape[0] or len(matrix[0]) != shape[1]:\n TikCheckUtil.raise_error(\"csc_matrix shape error,should 3*3\")\n\n for i in range(shape[0]):\n for j in range(shape[1]):\n TikCheckUtil.check_type_match(\n matrix[i][j], in_type,\n \"csc_matrix type error, input: {}\".format(matrix[i][j]))\n check_scalar_dtype(matrix[i][j],\n \"matrix[\" + str(i) + \"][\" + str(j) +\n \"] should be a scalar of int/uint\")\n if input_range:\n imm_matrix = Expr(matrix[i][j]).eval_value()\n if imm_matrix is not None:\n TikCheckUtil.check_in_range(\n imm_matrix, input_range,\n \"matrix[\" + str(i) + \"][\" + str(j) +\n \"] number out of range\")\n\n\ndef _check_list_type_and_range(input_list, length, in_type, input_range=None,\n name=''):\n \"\"\"check list type and range\"\"\"\n if input_list is None:\n TikCheckUtil.raise_error(name + \" is None\")\n\n if len(input_list) != length:\n TikCheckUtil.raise_error(name + \"input_list length error, \"\n \"input: {}\".format(len(input_list)))\n\n for i in range(length):\n TikCheckUtil.check_type_match(\n input_list[i], in_type, name + '[' + str(i) + \"] type error\")\n\n if int in in_type:\n check_scalar_dtype(input_list[i], name + '[' +\n str(i) + \"] should be a scalar of int/uint\")\n if input_range:\n imm_input = Expr(input_list[i]).eval_value()\n if imm_input is not None:\n TikCheckUtil.check_in_range(\n imm_input, input_range,\n name + '[' + str(i) + \"] out of range\")\n else:\n check_scalar_dtype_float(input_list[i], name + '[' + str(i) +\n \"] should be a scalar of float\")\n\n\ndef _aipp_check_csc_info(csc_matrix, csc_out_bias, csc_in_bias):\n \"\"\"check csc info\"\"\"\n _check_matrix_type_and_range(\n csc_matrix, [3, 3], (int, Scalar, Expr), range(-32768, 32768))\n _check_list_type_and_range(\n csc_out_bias, 3, (int, Scalar, Expr), range(0, 255), 'csc_out_bias')\n _check_list_type_and_range(\n csc_in_bias, 3, (int, Scalar, Expr), range(0, 255), 'csc_in_bias')\n\n\n# csc info\ndef _get_csc_parameter(format_convert, csc_info):\n \"\"\"get csc parameter\"\"\"\n if format_convert == 0:\n csc_matrix = csc_info.get('csc_matrix')\n csc_out_bias = csc_info.get('csc_out_bias')\n csc_in_bias = csc_info.get('csc_in_bias')\n\n _aipp_check_csc_info(csc_matrix, csc_out_bias, csc_in_bias)\n\n else:\n csc_para = AIPP_FORMAT_CONVERT.get(format_convert)\n csc_matrix = csc_para.get('csc_matrix')\n csc_out_bias = csc_para.get('csc_out_bias')\n csc_in_bias = csc_para.get('csc_in_bias')\n\n return csc_matrix, csc_out_bias, csc_in_bias\n\n\ndef _aipp_check_swap(input_format, rb_swap, uv_swap, ax_swap):\n imm_input_format = Expr(input_format).eval_value()\n if imm_input_format is not None:\n imm_rb_swap = Expr(rb_swap).eval_value()\n if imm_rb_swap is not None:\n TikCheckUtil.check_in_range(\n imm_rb_swap,\n AIPP_INPUT_TYPE_SWAP_ALIGN.get(imm_input_format).get('swap')[0],\n 'rb_swap not support, '\n 'input_format: {}'.format(imm_input_format))\n\n imm_uv_swap = Expr(uv_swap).eval_value()\n if imm_uv_swap is not None:\n TikCheckUtil.check_in_range(\n imm_uv_swap,\n AIPP_INPUT_TYPE_SWAP_ALIGN.get(imm_input_format).get('swap')[1],\n 'uv_swap not support, '\n 'input_format: {}'.format(imm_input_format))\n\n imm_ax_swap = Expr(ax_swap).eval_value()\n if imm_ax_swap is not None:\n TikCheckUtil.check_in_range(\n imm_ax_swap,\n AIPP_INPUT_TYPE_SWAP_ALIGN.get(imm_input_format).get('swap')[2],\n 'ax_swap not support, '\n 'input_format: {}'.format(imm_input_format))\n\n\ndef _aipp_check_pre_clip(pre_top_clip_number, pre_botton_clip_number,\n crop_vertical_size):\n \"\"\"check pre clip\"\"\"\n TikCheckUtil.check_type_match(\n pre_top_clip_number, (int, Scalar, Expr),\n \"pre_top_clip_number should be int, Scalar, Expr, \"\n \"input: {}\".format(type(pre_top_clip_number)))\n check_scalar_dtype(pre_top_clip_number,\n \"pre_top_clip_number should be a scalar of int/uint\")\n TikCheckUtil.check_type_match(\n pre_botton_clip_number, (int, Scalar, Expr),\n \"pre_botton_clip_number should be int, Scalar, Expr, \"\n \"input: {}\".format(type(pre_botton_clip_number)))\n check_scalar_dtype(pre_botton_clip_number,\n \"pre_top_clip_number should be a scalar of int/uint\")\n\n imm_pre_top_clip_number = Expr(pre_top_clip_number).eval_value()\n if imm_pre_top_clip_number is not None:\n TikCheckUtil.check_in_range(\n imm_pre_top_clip_number, range(0, 2),\n 'pre_top_clip_number should in [0, 1], '\n 'input: {}'.format(imm_pre_top_clip_number))\n\n imm_pre_botton_clip_number = Expr(pre_botton_clip_number).eval_value()\n if imm_pre_botton_clip_number is not None:\n TikCheckUtil.check_in_range(\n imm_pre_botton_clip_number, range(0, 2),\n 'pre_botton_clip_number should in [0, 1], '\n 'input: {}'.format(imm_pre_botton_clip_number))\n\n imm_crop_vertical_size = Expr(crop_vertical_size).eval_value()\n if imm_pre_top_clip_number is not None and imm_pre_botton_clip_number \\\n is not None and imm_crop_vertical_size is not None:\n TikCheckUtil.check_ge(imm_crop_vertical_size,\n imm_pre_top_clip_number +\n imm_pre_botton_clip_number + 1,\n 'crop_vertical_size should more than 0, '\n 'after preclip, crop_vertical_size:'\n '{}'.format(crop_vertical_size))\n\n\ndef _aipp_check_scf(scf_horizontal_size, scf_vertical_size,\n scf_horizontal_start, scf_vertical_start, scaling_mode):\n TikCheckUtil.check_type_match(\n scf_horizontal_size, (int, Scalar, Expr),\n \"scf_horizontal_size should be int, Scalar, Expr, \"\n \"input: {}\".format(type(scf_horizontal_size)))\n check_scalar_dtype(scf_horizontal_size,\n \"src_horizontal_size should be a scalar of int/uint\")\n\n imm_scf_horizontal_size = Expr(scf_horizontal_size).eval_value()\n if imm_scf_horizontal_size is not None:\n TikCheckUtil.check_in_range(\n imm_scf_horizontal_size, range(16, 1921),\n 'scf_horizontal_size out of range, '\n 'input:{}'.format(imm_scf_horizontal_size))\n\n TikCheckUtil.check_type_match(\n scf_vertical_size, (int, Scalar, Expr),\n \"scf_vertical_size should be int, Scalar, Expr, \"\n \"input: {}\".format(type(scf_vertical_size)))\n check_scalar_dtype(scf_vertical_size,\n \"scf_vertical_size should be a scalar of int/uint\")\n\n imm_scf_vertical_size = Expr(scf_vertical_size).eval_value()\n if imm_scf_vertical_size is not None:\n TikCheckUtil.check_in_range(\n imm_scf_vertical_size, range(16, 1081),\n 'scf_vertical_size out of range, '\n 'input: {}'.format(imm_scf_vertical_size))\n\n TikCheckUtil.check_type_match(\n scf_horizontal_start, (int, Scalar, Expr),\n \"scf_horizontal_start should be int, Scalar, Expr, \"\n \"input: {}\".format(type(scf_horizontal_start)))\n check_scalar_dtype(scf_horizontal_start,\n \"scf_horizontal_start should be a scalar of int/uint\")\n TikCheckUtil.check_type_match(\n scf_vertical_start, (int, Scalar, Expr),\n \"scf_vertical_start should be int, Scalar, Expr, \"\n \"input: {}\".format(scf_vertical_start))\n check_scalar_dtype(scf_vertical_start,\n \"scf_vertical_start should be a scalar of int/uint\")\n TikCheckUtil.check_type_match(\n scaling_mode, (int, Scalar, Expr),\n \"scaling_mode should be int, Scalar, Expr, \"\n \"input: {}\".format(type(scaling_mode)))\n check_scalar_dtype(scaling_mode,\n \"scaling_mode should be a scalar of int/uint\")\n\n imm_scaling_mode = Expr(scaling_mode).eval_value()\n if imm_scaling_mode is not None:\n TikCheckUtil.check_in_range(imm_scaling_mode, range(0, 2),\n 'scaling_mode out of range, '\n 'input: {}'.format(type(imm_scaling_mode)))\n\n\ndef _aipp_check_post_clip(post_botton_clip_number, post_top_clip_number,\n post_right_clip_number, post_left_clip_number):\n \"\"\"check post clip\"\"\"\n TikCheckUtil.check_type_match(\n post_botton_clip_number, (int, Scalar, Expr),\n \"post_botton_clip_number should be int, Scalar, Expr, \"\n \"input: {}\".format(type(post_botton_clip_number)))\n check_scalar_dtype(post_botton_clip_number,\n \"post_botton_clip_number should be a scalar of int/uint\")\n imm_post_botton_clip_number = Expr(post_botton_clip_number).eval_value()\n if imm_post_botton_clip_number is not None:\n TikCheckUtil.check_in_range(\n imm_post_botton_clip_number, range(0, 64),\n 'post_botton_clip_number out of range, '\n 'input: {}'.format(imm_post_botton_clip_number))\n\n TikCheckUtil.check_type_match(\n post_top_clip_number, (int, Scalar, Expr),\n \"post_top_clip_number should be int, Scalar, Expr, \"\n \"input: {}\".format(type(post_top_clip_number)))\n check_scalar_dtype(post_top_clip_number,\n \"post_top_clip_number should be a scalar of int/uint\")\n imm_post_top_clip_number = Expr(post_top_clip_number).eval_value()\n if imm_post_top_clip_number is not None:\n TikCheckUtil.check_in_range(\n imm_post_top_clip_number, range(0, 64),\n 'post_top_clip_number out of range, '\n 'input: {}'.format(imm_post_top_clip_number))\n\n TikCheckUtil.check_type_match(\n post_right_clip_number, (int, Scalar, Expr),\n \"post_right_clip_number should be int, Scalar, Expr, \"\n \"input: {}\".format(type(post_right_clip_number)))\n check_scalar_dtype(post_right_clip_number,\n \"post_right_clip_number should be a scalar of int/uint\")\n imm_post_right_clip_number = Expr(post_right_clip_number).eval_value()\n if imm_post_right_clip_number is not None:\n TikCheckUtil.check_in_range(\n imm_post_right_clip_number, range(0, 64),\n 'post_right_clip_number out of range, '\n 'input: {}'.format(post_right_clip_number))\n\n TikCheckUtil.check_type_match(\n post_left_clip_number, (int, Scalar, Expr),\n \"post_left_clip_number should be int, Scalar, Expr, \"\n \"input: {}\".format(type(post_left_clip_number)))\n check_scalar_dtype(post_left_clip_number,\n \"post_left_clip_number should be a scalar of int/uint\")\n imm_post_left_clip_number = Expr(post_left_clip_number).eval_value()\n if imm_post_left_clip_number is not None:\n TikCheckUtil.check_in_range(\n imm_post_left_clip_number, range(0, 64),\n 'post_left_clip_number out of range, '\n 'input: {}'.format(post_left_clip_number))\n\n\ndef _aipp_check_dtc_mean(dtc_mean_type, dtc_mean):\n \"\"\"check dtc mean\"\"\"\n TikCheckUtil.check_type_match(\n dtc_mean_type, (int, Scalar, Expr),\n \"dtc_mean_type type error, input: {}\".format(type(dtc_mean_type)))\n check_scalar_dtype(dtc_mean_type,\n \"dtc_mean_type should be a scalar of int/uint\")\n\n # dtc mean uint8/int8/b24\n imm_dtc_mean_type = Expr(dtc_mean_type).eval_value()\n if imm_dtc_mean_type is not None:\n TikCheckUtil.check_in_range(\n imm_dtc_mean_type, [0, 1],\n 'dtc_mean_type out of range, input: {}'.format(imm_dtc_mean_type))\n\n if imm_dtc_mean_type == 0:\n _check_list_type_and_range(\n dtc_mean, 4, (int, Scalar, Expr), None, 'dtc_mean')\n else:\n _check_list_type_and_range(\n dtc_mean, 4, (float, Scalar, Expr), None, 'dtc_mean')\n\n\ndef _cal_dtc_mean_by_type(dtc_mean):\n \"\"\"calculate dtc mean\"\"\"\n if Expr(dtc_mean).eval_value() is None:\n if dtc_mean.dtype == 'float16':\n dtc_mean_uint32 = dtc_mean.reinterpret_cast_to(\n 'uint16')\n else:\n dtc_mean_uint32 = dtc_mean\n else:\n if isinstance(dtc_mean, float):\n dtc_mean_uint32 = float16format2uint16(dtc_mean)\n else:\n dtc_mean_uint32 = dtc_mean\n\n return dtc_mean_uint32\n\n\ndef _aipp_get_dtc_mean(dtc_mean):\n \"\"\"get dtc mean\"\"\"\n dtc_mean0_uint32 = _cal_dtc_mean_by_type(dtc_mean[0])\n dtc_mean1_uint32 = _cal_dtc_mean_by_type(dtc_mean[1])\n dtc_mean2_uint32 = _cal_dtc_mean_by_type(dtc_mean[2])\n dtc_mean3_uint32 = _cal_dtc_mean_by_type(dtc_mean[3])\n\n return dtc_mean0_uint32, dtc_mean1_uint32, \\\n dtc_mean2_uint32, dtc_mean3_uint32\n\n\ndef _aipp_get_dtc_min_value(dtc_min):\n \"\"\"check get dtc min value\"\"\"\n imm_dtc_min0 = Expr(dtc_min[0]).eval_value()\n if imm_dtc_min0 is None:\n dtc_min0_uint32 = dtc_min[0].reinterpret_cast_to('uint16')\n else:\n dtc_min0_uint32_tmp = float16format2uint16(imm_dtc_min0)\n dtc_min0_uint32 = dtc_min0_uint32_tmp\n\n imm_dtc_min1 = Expr(dtc_min[1]).eval_value()\n if imm_dtc_min1 is None:\n dtc_min1_uint32 = dtc_min[1].reinterpret_cast_to('uint16')\n else:\n dtc_min1_uint32_tmp = float16format2uint16(imm_dtc_min1)\n dtc_min1_uint32 = dtc_min1_uint32_tmp\n\n imm_dtc_min2 = Expr(dtc_min[1]).eval_value()\n if imm_dtc_min1 is None:\n dtc_min2_uint32 = dtc_min[2].reinterpret_cast_to('uint16')\n else:\n dtc_min2_uint32_tmp = float16format2uint16(imm_dtc_min2)\n dtc_min2_uint32 = dtc_min2_uint32_tmp\n\n imm_dtc_min3 = Expr(dtc_min[1]).eval_value()\n if imm_dtc_min3 is None:\n dtc_min3_uint32 = dtc_min[3].reinterpret_cast_to('uint16')\n else:\n dtc_min3_uint32_tmp = float16format2uint16(imm_dtc_min3)\n dtc_min3_uint32 = dtc_min3_uint32_tmp\n\n return dtc_min0_uint32, dtc_min1_uint32, dtc_min2_uint32, dtc_min3_uint32\n\n\ndef _aipp_get_dtc_var_value(dtc_var):\n \"\"\"get dtc var value\"\"\"\n imm_dtc_var0 = Expr(dtc_var[0]).eval_value()\n if imm_dtc_var0 is None:\n dtc_var0_uint32 = dtc_var[0].reinterpret_cast_to('uint16')\n else:\n dtc_var0_uint32_tmp = float16format2uint16(imm_dtc_var0)\n dtc_var0_uint32 = dtc_var0_uint32_tmp\n\n imm_dtc_var1 = Expr(dtc_var[0]).eval_value()\n if imm_dtc_var1 is None:\n dtc_var1_uint32 = dtc_var[1].reinterpret_cast_to('uint16')\n else:\n dtc_var1_uint32_tmp = float16format2uint16(imm_dtc_var1)\n dtc_var1_uint32 = dtc_var1_uint32_tmp\n\n imm_dtc_var2 = Expr(dtc_var[0]).eval_value()\n if imm_dtc_var2 is None:\n dtc_var2_uint32 = dtc_var[2].reinterpret_cast_to('uint16')\n else:\n dtc_var2_uint32_tmp = float16format2uint16(imm_dtc_var2)\n dtc_var2_uint32 = dtc_var2_uint32_tmp\n\n imm_dtc_var3 = Expr(dtc_var[0]).eval_value()\n if imm_dtc_var3 is None:\n dtc_var3_uint32 = dtc_var[3].reinterpret_cast_to('uint16')\n else:\n dtc_var3_uint32_tmp = float16format2uint16(imm_dtc_var3)\n dtc_var3_uint32 = dtc_var3_uint32_tmp\n\n return dtc_var0_uint32, dtc_var1_uint32, dtc_var2_uint32, dtc_var3_uint32\n\n\ndef _aipp_get_channel_pad_value(dst_type, channel0_pad_value,\n channel1_pad_value, channel2_pad_value,\n channel3_pad_value):\n \"\"\"get channel pad value\"\"\"\n if dst_type == 'float16':\n imm_channel0_pad_value = Expr(channel0_pad_value).eval_value()\n if imm_channel0_pad_value is None:\n channel0_pad_value_uint32 = channel0_pad_value.reinterpret_cast_to(\n 'uint16')\n else:\n channel0_pad_value_uint32 = float16format2uint16(\n imm_channel0_pad_value)\n\n imm_channel1_pad_value = Expr(channel0_pad_value).eval_value()\n if imm_channel1_pad_value is None:\n channel1_pad_value_uint32 = channel1_pad_value.reinterpret_cast_to(\n 'uint16')\n else:\n channel1_pad_value_uint32 = float16format2uint16(\n imm_channel1_pad_value)\n\n imm_channel2_pad_value = Expr(channel0_pad_value).eval_value()\n if imm_channel2_pad_value is None:\n channel2_pad_value_uint32 = channel2_pad_value.reinterpret_cast_to(\n 'uint16')\n else:\n channel2_pad_value_uint32 = float16format2uint16(\n imm_channel2_pad_value)\n\n imm_channel3_pad_value = Expr(channel0_pad_value).eval_value()\n if imm_channel3_pad_value is None:\n channel3_pad_value_uint32 = channel3_pad_value.reinterpret_cast_to(\n 'uint16')\n else:\n channel3_pad_value_uint32 = float16format2uint16(\n imm_channel3_pad_value)\n else:\n channel0_pad_value_uint32 = channel0_pad_value\n channel1_pad_value_uint32 = channel1_pad_value\n channel2_pad_value_uint32 = channel2_pad_value\n channel3_pad_value_uint32 = channel3_pad_value\n\n return channel0_pad_value_uint32, channel1_pad_value_uint32, \\\n channel2_pad_value_uint32, channel3_pad_value_uint32\n\n\ndef _aipp_check_dtc_raw_info(raw_to_f16_n):\n \"\"\"check dtc raw info\"\"\"\n\n TikCheckUtil.check_type_match(\n raw_to_f16_n, (int, Scalar, Expr),\n \"raw_to_f16_n type error, input: {}\".format(type(raw_to_f16_n)))\n check_scalar_dtype(raw_to_f16_n,\n \"raw_to_f16_n should be a scalar of int/uint\")\n imm_raw_to_f16_n = Expr(raw_to_f16_n).eval_value()\n if imm_raw_to_f16_n is not None:\n TikCheckUtil.check_in_range(\n imm_raw_to_f16_n, RAW_TO_16_N,\n 'raw_to_f16_n value out range, input: {}'.format(imm_raw_to_f16_n))\n\n\ndef _aipp_check_flip_dict(arch_version_str, flip_mode):\n \"\"\"\n check flip mode\n :param arch_version_str: arch_version_str\n :param flip_mode: 0-3\n :return: None\n \"\"\"\n if arch_version_str in [HI3796CV300ESAIC]:\n TikCheckUtil.check_type_match(\n flip_mode, (int, Scalar, Expr),\n \"flip_mode should be int, Scalar, Expr, \"\n \"input: {}\".format(type(flip_mode)))\n check_scalar_dtype(flip_mode,\n \"flip_mode should be a scalar of int/uint\")\n imm_flip_mode = Expr(flip_mode).eval_value()\n if imm_flip_mode is not None:\n TikCheckUtil.check_in_range(\n imm_flip_mode, range(4), 'flip_mode value out range, '\n 'input: {}'.format(imm_flip_mode))\n\n\ndef _aipp_check_cpad(arch_version_str, dst, sfr_cpadding, cpadding_mode):\n \"\"\"check cpadding\"\"\"\n\n TikCheckUtil.check_type_match(\n cpadding_mode, (int, Scalar, Expr),\n 'cpadding_mode type error, input: {}'.format(type(cpadding_mode)))\n check_scalar_dtype(cpadding_mode,\n \"cpadding_mode should be a scalar of int/uint\")\n\n imm_cpadding_mode = Expr(cpadding_mode).eval_value()\n if imm_cpadding_mode is not None:\n # C PADDING\n TikCheckUtil.check_in_range(\n imm_cpadding_mode, range(4), 'cpadding_mode out of range, '\n 'input: {}'.format(imm_cpadding_mode))\n if imm_cpadding_mode == 1:\n TikCheckUtil.check_in_range(\n arch_version_str, [HI3796CV300ESAIC],\n 'only v200hisi support no padding, '\n 'now: {}'.format(arch_version_str))\n else:\n if dst.dtype == 'float16':\n TikCheckUtil.check_type_match(\n sfr_cpadding, (float, Scalar, Expr),\n \"sfr_cpadding type error, \"\n \"input: {}\".format(type(sfr_cpadding)))\n check_scalar_dtype_float(sfr_cpadding,\n \"sfr_cpadding should be\"\n \" a scalar of int/uint\")\n elif dst.dtype == 'int8':\n TikCheckUtil.check_type_match(\n sfr_cpadding, (int, Scalar, Expr),\n \"sfr_cpadding type error, \"\n \"input: {}\".format(type(sfr_cpadding)))\n check_scalar_dtype(sfr_cpadding,\n \"sfr_cpadding should \"\n \"be a scalar of int/uint\")\n imm_sfr_cpadding = Expr(sfr_cpadding).eval_value()\n if imm_sfr_cpadding is not None:\n TikCheckUtil.check_in_range(\n imm_sfr_cpadding,\n range(INT8_MIN, INT8_MAX + 1),\n \"sfr_cpadding out range, \"\n \"input: {}\".format(imm_sfr_cpadding))\n else:\n TikCheckUtil.check_type_match(\n sfr_cpadding, (int, Scalar, Expr),\n \"sfr_cpadding type error, \"\n \"input: {}\".format(type(sfr_cpadding)))\n check_scalar_dtype(sfr_cpadding,\n \"sfr_cpadding should \"\n \"be a scalar of int/uint\")\n imm_sfr_cpadding = Expr(sfr_cpadding).eval_value()\n if imm_sfr_cpadding is not None:\n TikCheckUtil.check_in_range(\n imm_sfr_cpadding,\n range(UINT_MIN, UINT8_MAX + 1),\n \"sfr_cpadding out range, \"\n \"input: {}\".format(imm_sfr_cpadding))\n\n\ndef _aipp_top_area_pad_check(arch_version, imm_padding_mode, imm_top_pad_size):\n \"\"\"check top area padding\"\"\"\n if arch_version in [ASCEND_310AIC, ASCEND_910AIC, AIC]:\n if imm_padding_mode is not None:\n if imm_padding_mode == 0:\n TikCheckUtil.check_equality(\n imm_top_pad_size, 0,\n arch_version + ' do not support top_pad')\n\n\ndef _aipp_botton_area_pad_check(arch_version, imm_padding_mode,\n imm_botton_pad_size):\n \"\"\"check botton area padding\"\"\"\n if arch_version in [ASCEND_310AIC, ASCEND_910AIC, AIC]:\n if imm_padding_mode is not None:\n if imm_padding_mode == 0:\n TikCheckUtil.check_equality(\n imm_botton_pad_size, 0,\n arch_version + ' do not support botton_pad')\n\n\ndef _aipp_check_config_pad_value(imm_padding_mode, input_format, dst,\n filling_hblank):\n \"\"\"check padding config value\"\"\"\n # check config value padding\n if imm_padding_mode == 0:\n imm_input_format = Expr(input_format).eval_value()\n if imm_input_format is not None:\n if imm_input_format not in [NC1HWC0DI_FP16, NC1HWC0DI_INT8, RAW24,\n RAW16,\n RAW12, RAW10]:\n if dst.dtype == 'float16':\n _check_list_type_and_range(filling_hblank, 4,\n (float, Scalar, Expr), None,\n 'filling_hblank')\n elif dst.dtype == 'int8':\n _check_list_type_and_range(filling_hblank, 4,\n (int, Scalar, Expr),\n range(INT8_MIN,\n INT8_MAX + 1),\n 'filling_hblank')\n else:\n _check_list_type_and_range(filling_hblank, 4,\n (int, Scalar, Expr),\n range(UINT_MIN,\n UINT8_MAX + 1),\n 'filling_hblank')\n\n\ndef _aipp_check_area_pad(input_format, dst, # pylint: disable=R0913\n padding_mode, top_pad_size, botton_pad_size,\n left_pad_size, right_pad_size, filling_hblank,\n arch_version='v200hisi'):\n \"\"\"check area padding\"\"\"\n # function's input params is too much, so disable them\n\n TikCheckUtil.check_type_match(\n padding_mode, (int, Scalar, Expr),\n 'padding_mode type error, input: {}'.format(type(padding_mode)))\n check_scalar_dtype(padding_mode,\n \"padding_mode should be a scalar of int/uint\")\n imm_padding_mode = Expr(padding_mode).eval_value()\n if imm_padding_mode is not None:\n TikCheckUtil.check_in_range(imm_padding_mode, range(4),\n 'padding_mode out of range, '\n 'input: {}'.format(imm_padding_mode))\n\n TikCheckUtil.check_type_match(\n top_pad_size, (int, Scalar, Expr),\n \"top_pad_size should be int, Scalar, Expr, \"\n \"input: {}\".format(type(top_pad_size)))\n check_scalar_dtype(top_pad_size,\n \"top_pad_size should be a scalar of int/uint\")\n imm_top_pad_size = Expr(top_pad_size).eval_value()\n if imm_top_pad_size is not None:\n TikCheckUtil.check_in_range(\n imm_top_pad_size, range(33),\n 'top_pad_size out of range, input: {}'.format(imm_top_pad_size))\n _aipp_top_area_pad_check(arch_version, imm_padding_mode,\n imm_top_pad_size)\n\n TikCheckUtil.check_type_match(\n botton_pad_size, (int, Scalar, Expr),\n \"botton_pad_size should be int, Scalar, Expr, \"\n \"input: {}\".format(type(botton_pad_size)))\n check_scalar_dtype(botton_pad_size,\n \"botton_pad_size should be a scalar of int/uint\")\n imm_botton_pad_size = Expr(botton_pad_size).eval_value()\n if imm_botton_pad_size is not None:\n TikCheckUtil.check_in_range(\n imm_botton_pad_size, range(33),\n 'botton_pad_size should in [0, 32]')\n _aipp_botton_area_pad_check(arch_version, imm_padding_mode,\n imm_botton_pad_size)\n\n if imm_padding_mode is not None:\n TikCheckUtil.check_type_match(\n left_pad_size, (int, Scalar, Expr),\n \"left_pad_size should be int, Scalar, Expr, \"\n \"input: {}\".format(type(left_pad_size)))\n check_scalar_dtype(left_pad_size,\n \"left_pad_size should be a scalar of int/uint\")\n imm_left_pad_size = Expr(left_pad_size).eval_value()\n if imm_left_pad_size is not None:\n TikCheckUtil.check_in_range(\n imm_left_pad_size, range(33),\n 'left_pad_size out of range, '\n 'input: {}'.format(imm_left_pad_size))\n TikCheckUtil.check_type_match(\n right_pad_size, (int, Scalar, Expr),\n \"right_pad_size should be int, Scalar, Expr, \"\n \"input: {}\".format(type(right_pad_size)))\n check_scalar_dtype(right_pad_size,\n \"right_pad_size should be a scalar of int/uint\")\n imm_right_pad_size = Expr(right_pad_size).eval_value()\n if imm_right_pad_size is not None:\n TikCheckUtil.check_in_range(\n imm_right_pad_size, range(33),\n 'right_pad_size out of range, '\n 'input: {}'.format(imm_right_pad_size))\n\n # check config value padding\n _aipp_check_config_pad_value(imm_padding_mode, input_format, dst,\n filling_hblank)\n\n\ndef _aipp_check_raw_info(raw_image_channel, raw_start_channel):\n \"\"\"check raw info\"\"\"\n\n TikCheckUtil.check_type_match(\n raw_image_channel, (int, Scalar, Expr),\n \"raw_image_channel should be (int, Scalar, Expr), \"\n \"input: {}\".format(type(raw_image_channel)))\n check_scalar_dtype(raw_image_channel,\n \"raw_image_channel should be a scalar of int/uint\")\n imm_raw_image_channel = Expr(raw_image_channel).eval_value()\n if imm_raw_image_channel is not None:\n TikCheckUtil.check_in_range(\n imm_raw_image_channel, range(4),\n 'raw_image_channel value out range, '\n 'input: {}'.format(imm_raw_image_channel))\n\n TikCheckUtil.check_type_match(\n raw_start_channel, (int, Scalar, Expr),\n \"raw_start_channel should be (int, Scalar, Expr), \"\n \"input: {}\".format(type(raw_start_channel)))\n check_scalar_dtype(raw_start_channel,\n \"raw_start_channel should be a scalar of int/uint\")\n imm_raw_start_channel = Expr(raw_start_channel).eval_value()\n if imm_raw_start_channel is not None:\n TikCheckUtil.check_in_range(\n imm_raw_start_channel, range(4),\n 'raw_start_channel value out range, '\n 'input: {}'.format(imm_raw_start_channel))\n\n\ndef _aipp_check_stretch(dst_stride_pixel):\n \"\"\"check stretch\"\"\"\n\n TikCheckUtil.check_type_match(\n dst_stride_pixel, (int, Scalar, Expr),\n \"dst_stride_pixel should be int, Scalar, Expr, \"\n \"input: {}\".format(type(dst_stride_pixel)))\n check_scalar_dtype(dst_stride_pixel,\n \"dst_stride_pixel should be a scalar of int/uint\")\n imm_dst_stride_pixel = Expr(dst_stride_pixel).eval_value()\n if imm_dst_stride_pixel is not None:\n TikCheckUtil.check_in_range(\n imm_dst_stride_pixel, range(65536),\n 'dst_stride_pixel out of range, '\n 'input: {}'.format(imm_dst_stride_pixel))\n\n\ndef _aipp_check_sid(arch_version, sid):\n \"\"\"check sid\"\"\"\n\n TikCheckUtil.check_type_match(\n sid, (int, Scalar, Expr), \"sid should be int, Scalar, Expr, \"\n \"input: {}\".format(type(sid)))\n check_scalar_dtype(sid,\n \"sid should be a scalar of int/uint\")\n if arch_version in [HI3796CV300ESAIC]:\n imm_sid = Expr(sid).eval_value()\n if imm_sid is not None:\n TikCheckUtil.check_in_range(\n imm_sid, range(11),\n 'sid value out range, input: {}'.format(imm_sid))\n\n\ndef _check_vscatter_vgather_operator_scope(src, dst, offset, offset_name):\n \"\"\"check scope for vscatter and vgather\n\n Parameters\n ----------\n src: src operator\n dst: dst operator\n offset: addr offset tensor\n offset_name: offset tensor name\n\n Returns\n -------\n None\n \"\"\"\n TikCheckUtil.check_equality(src.scope,\n cce_params.scope_ubuf,\n \"src's scope must be UB. \"\n \"input scope: {}\".format(src.scope))\n TikCheckUtil.check_equality(dst.scope,\n cce_params.scope_ubuf,\n \"dst's scope must be UB. \"\n \"input scope: {}\".format(dst.scope))\n TikCheckUtil.check_equality(offset.scope,\n cce_params.scope_ubuf,\n \"{}'s scope must be UB. \"\n \"input scope: {}\".format(offset_name,\n offset.scope))\n\n\nclass TikDataOpApi(TikIRBuilder): # pylint: disable=R0904\n \"\"\"\n Data convert, Data fill, Data move Api\n \"\"\"\n def __init__(self):\n super(TikDataOpApi, self).__init__()\n\n @source_info_decorator()\n @debug.vtranspose_decorator\n def vtranspose(self, dst, src):\n \"\"\"Transpose a continuous 16*16 two-dimensional matrix data block\n\n Parameters\n ----------\n src : destination operator\n dst : destination operator\n\n Returns\n -------\n None\n \"\"\"\n TikCheckUtil.check_type_match(dst, Tensor, \"dst should be tensor\")\n TikCheckUtil.check_type_match(src, Tensor, \"src should be tensor\")\n src_elements_count = reduce_mul(src.indice.origin_shape)\n dst_elements_count = reduce_mul(dst.indice.origin_shape)\n required_elements_count = 256\n TikCheckUtil.check_ge(\n src_elements_count, required_elements_count,\n \"elements of src should be more than 256\")\n TikCheckUtil.check_ge(\n dst_elements_count, required_elements_count,\n \"elements of dst should be more than 256\")\n # all arch-version support\n dst_src_map = [\"u16u16\", \"s16s16\", \"f16f16\"]\n\n # check dtype\n dtype_str = DTYPE_MAP[dst.dtype] + DTYPE_MAP[src.dtype]\n if dtype_str not in dst_src_map:\n TikCheckUtil.raise_error(\n \"dtype of dst and src should be u16u16, s16s16 or f16f16\")\n\n # check address overlapping\n src_offset = Expr(src.offset).eval_value()\n dst_offset = Expr(dst.offset).eval_value()\n if all(isinstance(value, int) for value in (src_offset, dst_offset)):\n if src.buffer == dst.buffer:\n if src_offset == dst_offset or \\\n src_offset + required_elements_count <= \\\n dst_offset or dst_offset + \\\n required_elements_count <= src_offset:\n pass\n else:\n TikCheckUtil.raise_error(\n \"vtranspose not support partially address overlapping\")\n # gen\n with self.new_scope():\n self.scope_attr(cce_params.CCE_AXIS, \"coproc_scope\", PIPE_V)\n # 2 is size of b16, 2 Bytes\n extent = Expr(required_elements_count*2)\n # one ir is call_extern\n self.emit(\n tvm.call_extern(\n dst.dtype, \"vtranspose\",\n dst.reinterpret_cast_to(\"uint16\").access_ptr(\n \"w\", extent=extent.get()),\n src.reinterpret_cast_to(\"uint16\").access_ptr(\n \"r\", extent=extent.get())),\n ONE_IR)\n\n # VA mode - vnchwconv\n @source_info_decorator()\n @debug.vnchwconv_decorator\n def vnchwconv(self, dst_high_half, src_high_half, # pylint: disable=R0913\n dst_list, src_list, repeat_times, dst_rep_stride,\n src_rep_stride, name=None):\n \"\"\"used for NCHW to NHWC\n\n Parameters\n ----------\n dst_high_half : bool\n src_high_half : bool\n src_list : the src operation list\n dst_list: the des operation list\n repeat_times : Repeated iterations times\n dst_rep_stride : offset of dst operator in the same block\n between adjacent iterations\n src_rep_stride : offset of src operator in the same block\n between adjacent iterations\n\n Returns\n -------\n Nones\n \"\"\"\n if name is None:\n name = \"vnchwconv\"\n # check dst_high_half, src_high_half\n TikCheckUtil.check_type_match(\n dst_high_half, bool, \"dst_high_half should be bool, input type: {}\"\n .format(type(dst_high_half)))\n TikCheckUtil.check_type_match(\n src_high_half, bool, \"src_high_half should be bool, input type: {}\"\n .format(type(src_high_half)))\n check_repeat_times(repeat_times)\n # check strides\n check_vector_stride(None, [dst_rep_stride, src_rep_stride],\n None, MAX_REP_STRIDE_DOUBLE_BYTE, [\"dst\", \"src\"])\n # check tensor list number\n TikCheckUtil.check_type_match(dst_list, (tuple, list),\n \"dst_list should be tuple or list\")\n TikCheckUtil.check_type_match(src_list, (tuple, list),\n \"src_list should be tuple or list\")\n TikCheckUtil.check_equality(len(dst_list), VNCHWCONV_LIST_LEN,\n \"there should be 16 addresses in dst_list\")\n TikCheckUtil.check_equality(len(src_list), VNCHWCONV_LIST_LEN,\n \"there should be 16 addresses in src_list\")\n for src in src_list:\n TikCheckUtil.check_type_match(src, Tensor, \"src should be tensor\")\n for dst in dst_list:\n TikCheckUtil.check_type_match(dst, Tensor, \"dst should be tensor\")\n\n # check tensor list dtype\n dtype_str = self._get_dtype_str(src_list, dst_list, name)\n\n # check address overlap\n if VNCHWCONV_INSTR_APPENDIX_MAP[dtype_str] == \"b8\":\n mask_len = MASK_VALUE_128\n else:\n mask_len = ONE_BLK_SIZE*VNCHWCONV_LIST_LEN // \\\n DTYPE_SIZE[dst_list[VA0_INDEX].dtype]\n if all(isinstance(value, int) for value\n in (dst_rep_stride, src_rep_stride)):\n check_scatter_address_overlap(\n mask_len, dst_list, src_list, repeat_times,\n dst_rep_stride, src_rep_stride,\n store_high_half=dst_high_half,\n src_store_high_half=src_high_half,\n name=name, msg=\"dst_list and src_list\")\n\n # check tensor overflow(static)\n check_vnchwconv_overflow(\n [src_list, dst_list], [\"src_list\", \"dst_list\"], repeat_times,\n [src_rep_stride, dst_rep_stride],\n [src_high_half, dst_high_half],\n VNCHWCONV_INSTR_APPENDIX_MAP[dtype_str])\n # code gen\n config = [_dtype_convert(repeat_times, \"int64\"), dst_rep_stride,\n src_rep_stride]\n if VNCHWCONV_INSTR_APPENDIX_MAP[dtype_str] == \"b8\":\n config.append(int(dst_high_half))\n config.append(int(src_high_half))\n self._config_vas([dst_list, src_list], dtype_str, config,\n # [dst_extent, src_extent]\n [Expr(((repeat_times - 1)*dst_rep_stride + 1)\n *ONE_BLK_SIZE).get(),\n Expr(((repeat_times - 1)*src_rep_stride + 1)\n *ONE_BLK_SIZE).get()])\n\n def _get_dtype_str(self, src_list, dst_list, name):\n dtype_str = \"\"\n for dst, src in zip(dst_list, src_list):\n dtype_str = DTYPE_MAP[dst.dtype] + DTYPE_MAP[src.dtype]\n TikCheckUtil.check_equality(dst.dtype, src.dtype,\n \"Intrinsic {}'s src's dtype \"\n \"should be equal to dst's dtype\".\n format(name))\n TikCheckUtil.check_equality(api_check_support(\"tik.\"\n + name,\n dst.dtype), True,\n INSTR_DTYPE_SUPPORT_STATEMENT.\n format(dst.dtype, name))\n return dtype_str\n\n def _config_vas(self, dst_list_src_list, dtype_str, config, extents=None):\n # can't find the function in library, so disable it\n # config VAs\n with self.new_scope():\n intrin = tvm.call_extern(\"uint64\", \"scatter_vnchwconv_\" +\n VNCHWCONV_INSTR_APPENDIX_MAP[dtype_str],\n VA_REG[VA0_INDEX], VA_REG[VA2_INDEX],\n *type_convert(config))\n addr_list_tuple = _get_addr_list(dst_list_src_list[0], # dst_list\n dst_list_src_list[1], # src_list\n extents)\n intrin_block = tvm.make.Evaluate(0)\n self.source_info.set_node_loc(intrin_block)\n total_ir_num = ONE_IR\n for index, addr_list in enumerate(addr_list_tuple):\n intrin_setva = tvm.call_extern(\"uint64\", \"VA_reg_set\",\n VA_REG[index], *addr_list)\n tmp_instr = tvm.make.Evaluate(intrin_setva)\n self.source_info.set_node_loc(tmp_instr)\n intrin_block = tvm.make.Block(intrin_block, tmp_instr)\n self.source_info.set_node_loc(intrin_block)\n total_ir_num += ONE_IR\n\n tmp_instr = tvm.make.Evaluate(intrin)\n self.source_info.set_node_loc(tmp_instr)\n intrin_block = tvm.make.Block(intrin_block, tmp_instr)\n self.source_info.set_node_loc(intrin_block)\n emit_scatter_instr(self, total_ir_num, intrin_block)\n\n @source_info_decorator()\n @debug.load2dv1_decorator\n def load2dv1(self, # pylint: disable=R0913\n dst,\n src,\n index,\n repeat_times,\n src_stride,\n sid,\n if_transpose=False,\n addr_mode=None):\n \"\"\"Pass the offline processed convolution right\n matrix (davinci format) from gm to ca/cb/cbuf or from cbuf to ca/cb\n\n Parameters\n ----------\n dst : destination tensor\n src : source tensor\n index : [0, 65535] data index\n repeat_times : [1, 255]\n sid: default 0\n src_stride : offset of src tensor between adjacent data segment\n if_transpose : if transport. True/False\n\n Returns\n -------\n None\n \"\"\"\n return self.load2d(dst, src, index, repeat_times, None, src_stride,\n sid, if_transpose, addr_mode)\n\n @source_info_decorator()\n @debug.load2dv2_decorator\n def load2dv2(self, dst, src, start_index, # pylint: disable=R0913\n repeat_times, dst_gap, src_stride,\n sid, if_transpose=False, addr_mode=None):\n \"\"\"Pass the offline processed convolution right\n matrix (davinci format) to scope_ca/scope_cb\n\n Parameters\n ----------\n dst : destination tensor\n src : source tensor\n start_index : [0, 65535] data index\n repeat_times : [1, 255]\n dst_gap: gap of dst tensor between adjacent data segment\n src_stride : stride of src tensor between adjacent data segment\n sid: default 0\n if_transpose : if transport. True/False\n addr_mode: address mode, default is None\n\n Returns\n -------\n None\n \"\"\"\n # too many arguments, so disable R0913\n return self.load2d(dst, src, start_index, repeat_times, dst_gap,\n src_stride, sid, if_transpose, addr_mode)\n\n def load2d(self, dst, src, start_index, # pylint: disable=R0913, R0914\n repeat_times, dst_gap, src_stride, sid,\n en_transpose=False, addr_mode=None):\n \"\"\"Pass the offline processed convolution right\n matrix (davinci format) to scope_ca/scope_cb\n Note: dst_gap is tail-to-head, src_stride is head-to-head.\n\n Parameters\n ----------\n dst : destination tensor\n src : source tensor\n start_index : [0, 65535] data index\n repeat_times : [1, 255]\n dst_gap: gap of dst tensor between adjacent data segment\n src_stride : stride of src tensor between adjacent data segment\n sid: default 0\n en_transpose : enable transport. True/False\n addr_mode\n\n Returns\n -------\n None\n \"\"\"\n # too many arguments, so disable R0914\n # check instruction\n arch_version_str = get_soc_name() + get_soc_core_type()\n # check scope\n src_scope = SCOPE_MAP[src.scope]\n dst_scope = SCOPE_MAP[dst.scope]\n TikCheckUtil.check_in_range(\n (src_scope, dst_scope), LOAD2D_DMA_LIST,\n \"load2d not support from %s to %s\" % (src_scope, dst_scope))\n\n pipe_line, intrin_name = LOAD2D_DMA_LIST[(src_scope, dst_scope)]\n # check dtype\n dtype_str = DTYPE_MAP[dst.dtype] + DTYPE_MAP[src.dtype]\n TikCheckUtil.check_equality(dst.dtype, src.dtype,\n \"Intrinsic {}'s src's dtype should\"\n \" be equal to dst's dtype\".\n format(\"load2d\"))\n TikCheckUtil.check_equality(api_check_support(\"tik.\" +\n \"load2dv1\",\n dst.dtype), True,\n INSTR_DTYPE_SUPPORT_STATEMENT.\n format(dst.dtype, \"load2d\"))\n # check addr_mode\n # not support online config yet\n TikCheckUtil.check_in_range(\n addr_mode, ('inc', INC_MODE, 'dec', DEC_MODE, None),\n \"addr_mode should be 'inc', 'dec', 0, 1 or None\")\n\n if addr_mode in (\"dec\", DEC_MODE):\n TikCheckUtil.check_equality((get_soc_name() in\n (ASCEND_910, HI3796CV300ES)\n or get_soc_name() +\n get_soc_core_type() == AIC), True,\n \"current soc not support \"\n \"addr_dec_mode\")\n # 1 increase\n addr_mode_bit = _ADDR_MODE_BIT_INCREASE\n else:\n # 0 decrease\n addr_mode_bit = _ADDR_MODE_BIT_DECREASE\n # check en_transpose\n # not support online config yet\n TikCheckUtil.check_type_match(en_transpose, bool,\n \"en_transpose should be bool\")\n\n if en_transpose:\n TikCheckUtil.check_in_range(\n src_scope, ('cbuf', ),\n \"src_scope should be cbuf if enabling transpose\")\n TikCheckUtil.check_in_range(\n dst_scope, ('ca', 'cb'),\n \"dst_scope should be ca or cb if enabling transpose\")\n # 1 enable transpose\n transpose_bit = 1\n else:\n # 0 disable transpose\n transpose_bit = 0\n # check repeat_times\n check_repeat_times(repeat_times)\n # gen start_index\n TikCheckUtil.check_type_match(\n start_index, (int, Scalar, Expr),\n \"start_index should be int, Scalar or Expr\")\n check_scalar_dtype(start_index,\n \"scalar_start_index should be a scalar of int/uint\")\n check_integer_in_range(\n start_index, range(MAX_START_INDEX),\n \"start_index should be in the range of [0, 65535], input value is: \"\n \"{}\".format(start_index))\n # check dst_gap\n if dst_gap is not None:\n TikCheckUtil.check_equality((get_soc_name() == HI3796CV300ES\n or get_soc_name() +\n get_soc_core_type() == AIC), True,\n \"current soc not support dst_gap\")\n TikCheckUtil.check_type_match(dst_gap, (int, Scalar, Expr),\n \"dst_gap should be int, Scalar, Expr\")\n check_scalar_dtype(dst_gap,\n \"scalar_dst_gap should be a scalar of int/uint\")\n check_integer_in_range(\n dst_gap, range(MAX_DST_GAP_DOUBLE_BYTE),\n \"dst_gap should be in the range of [0, 65535], input value is: \"\n \"{}\".format(dst_gap))\n # check src_stride\n TikCheckUtil.check_type_match(\n src_stride, (int, Scalar, Expr),\n \"src_stride should be int, Scalar or Expr\")\n check_scalar_dtype(src_stride,\n \"scalar_src_stride should be a scalar of int/uint\")\n check_integer_in_range(\n src_stride, range(MAX_BLK_STRIDE_DOUBLE_BYTE),\n \"src_stride should be in the range of [0, 65535], input value is: \"\n \"{}\".format(src_stride))\n # check sid\n check_integer_in_range(sid, range(MAX_SID),\n \"sid should be in the range of [0, 15]\")\n # gen\n if dst_gap is None:\n config = [start_index, repeat_times, src_stride, sid]\n else:\n config = [start_index, repeat_times, src_stride, dst_gap, sid]\n args = config\n\n dtype_str = _get_load2d_dtype_str(src_scope, args, transpose_bit,\n arch_version_str, addr_mode_bit,\n dtype_str, dst)\n # calculate extent\n src_extent, dst_extent = _calculate_extent_load2d(\n start_index, repeat_times, src_stride, dst_gap)\n\n with self.new_scope():\n instr = tvm.call_extern(dst.dtype,\n intrin_name,\n dst.reinterpret_cast_to(dtype_str)\n .access_ptr(\"w\", extent=dst_extent),\n src.reinterpret_cast_to(dtype_str)\n .access_ptr(\"r\", extent=src_extent),\n *(type_convert(args)))\n self.scope_attr(cce_params.CCE_AXIS, \"coproc_scope\", pipe_line)\n # one ir is call_extern\n self.emit(instr, ONE_IR)\n\n def assign(self, dst, src, dst_offset=0, src_offset=None):\n \"\"\"assign src to dst\n\n Parameters\n ----------\n dst : destination tensor\n src : source tensor\n dst_offset: dst tensor offset\n src_offset: src tensor offset\n\n Returns\n -------\n None\n \"\"\"\n type_list = (Tensor, Scalar, Expr)\n TikCheckUtil.check_type_match(dst, type_list,\n \"assign only support load or \"\n \"store data between UB and REG\")\n TikCheckUtil.check_type_match(src, type_list,\n \"assign only support load or \"\n \"store data between UB and REG\")\n with self.new_scope():\n self.scope_attr(cce_params.CCE_AXIS, \"coproc_scope\", PIPE_S)\n if isinstance(dst, (Scalar, Expr)):\n dst_side = tvm.call_extern(dst.dtype, \"reg\", dst.get())\n else:\n if is_immediate_number(dst_offset):\n dst_side = dst.access_ptr(\n \"w\", extent=Expr(DTYPE_SIZE[dst.dtype]).get(),\n offset=dst_offset)\n else:\n dst_side = dst.access_ptr(\n \"w\", extent=Expr(DTYPE_SIZE[dst.dtype]).get(),\n offset=Expr(dst_offset).get())\n if isinstance(src, (Scalar, Expr)):\n src_side = tvm.call_extern(src.dtype, \"reg\", src.get())\n else:\n if src_offset is None:\n src_side = src.access_ptr(\n \"r\", extent=Expr(DTYPE_SIZE[src.dtype]).get())\n elif is_immediate_number(src_offset):\n src_side = src.access_ptr(\n \"r\", extent=Expr(DTYPE_SIZE[src.dtype]).get(),\n offset=src_offset)\n else:\n src_side = src.access_ptr(\n \"r\", extent=Expr(DTYPE_SIZE[src.dtype]).get(),\n offset=Expr(src_offset).get())\n\n # one ir is reg_mov\n self.emit(\n tvm.call_extern(\n dst.dtype,\n \"reg_mov\",\n dst_side,\n src_side,\n ), ONE_IR)\n\n def _set_fmatrix(self, *value):\n if len(value) == HAS_PARAM_CONCAT:\n fmatrix_value = value[0]\n elif len(value) == NEED_PARAM_CONCAT:\n pad, l1_h, l1_w = value\n TikCheckUtil.check_type_match(pad, list, \"pad should be list\")\n TikCheckUtil.check_type_match(l1_h, int, \"l1_h should be int\")\n TikCheckUtil.check_type_match(l1_w, int, \"l1_w should be int\")\n params = [l1_w, l1_h, pad[PADDING_LEFT_IDX],\n pad[PADDING_RIGHT_IDX], pad[PADDING_TOP_IDX],\n pad[PADDING_BOT_IDX]]\n offset_list = FMATRIX_OFFSET_LIST\n segment_list = FMATRIX_SEGMENT_LIST\n fmatrix_value = concat_params(params, offset_list, segment_list)\n # one ir is call_extern\n self.emit(tvm.call_extern(\"int64\", \"set_fmatrix\", fmatrix_value),\n ONE_IR)\n\n def _set_padding(self, value, dtype):\n if not is_basic_expr(value):\n TikCheckUtil.check_type_match(\n value, (int, float),\n \"set value should be float16, uint8 or int8\")\n if dtype in (\"uint8\", \"int8\"):\n params = [value, value]\n offset_list = PADDING_ONE_BYTE_OFFSET_LIST\n segment_list = PADDING_ONE_BYTE_SEGMENT_LIST\n else:\n params = [value]\n offset_list = PADDING_TWO_BYTE_OFFSET_LIST\n segment_list = PADDING_TWO_BYTE_SEGMENT_LIST\n padding = concat_params(params, offset_list, segment_list)\n with self.new_scope():\n # one ir is call_extern\n self.emit(tvm.call_extern(\"uint64\", \"set_padding\", padding),\n ONE_IR)\n\n @source_info_decorator()\n @debug.set_l0_set_value_decorator\n def set_l0_set_value(self, value, dtype):\n \"\"\"for tensor padding with matrix\n\n Parameters\n ----------\n value : input\n dtype : input's type\n\n Returns\n -------\n None\n \"\"\"\n TikCheckUtil.check_type_match(value, (int, float, Scalar),\n \"value should be int, float or Scalar\")\n TikCheckUtil.check_in_range(dtype, (\"float16\"),\n \"dtype only support float16\")\n if isinstance(value, Scalar):\n TikCheckUtil.check_equality(value.dtype, \"float16\",\n \"scalar_value should be float16\")\n if not isinstance(value, Scalar):\n if dtype == \"int16\":\n l0_set_2d_value = np.int16(value)\n elif dtype == \"uint16\":\n l0_set_2d_value = np.uint16(value)\n else:\n l0_set_2d_value = np.float16(value)\n l0_set_2d_value = l0_set_2d_value.view(np.float16)\n l0_set_2d_value = float(l0_set_2d_value)\n l0_set_2d_temp = _dtype_convert(l0_set_2d_value, dtype)\n else:\n l0_set_2d_temp = _dtype_convert(value, dtype)\n with self.new_scope():\n # one ir is call_extern\n self.emit(\n tvm.call_extern(\"float16\", \"set_l0_set_value\",\n l0_set_2d_temp), ONE_IR)\n\n def _do_load3d_fmatrix(self, reg_fmatrix):\n if \"fmatrix\" in self.global_dict: # pylint: disable=E1101\n fmatrix = self.global_dict[\"fmatrix\"] # pylint: disable=E1101\n else:\n fmatrix = self.global_scalar(dtype=\"int64\") # pylint: disable=E1101\n self.global_dict[\"fmatrix\"] = fmatrix # pylint: disable=E1101\n with self.context.freeze(): # pylint: disable=E1101\n with self.new_scope():\n t_fmatrix = self.Scalar_(dtype=\"int64\") # pylint: disable=E1101\n t_fmatrix.set_as(reg_fmatrix)\n self.scope_attr(cce_params.CCE_AXIS, \"if_protect\", PIPE_MTE1)\n with self.if_scope_(fmatrix != t_fmatrix):\n fmatrix.set_as(t_fmatrix)\n # one ir is call_extern\n self.emit(\n tvm.call_extern(\"int64\", \"set_fmatrix\", fmatrix.get()),\n ONE_IR)\n\n @source_info_decorator()\n @debug.load3dv1_decorator\n def load3dv1(self, dst, src, pad, l1_h, l1_w, # pylint: disable=R0913, R0914\n c1_index, fetch_filter_w, fetch_filter_h, left_top_w,\n left_top_h, stride_w, stride_h, filter_w, filter_h,\n dilation_filter_w, dilation_filter_h, jump_offset, repeat_mode,\n repeat_time, _csize=0, pad_value=None):\n \"\"\"image to colomn, only support L1 to L0A/L0B/UB\n\n Parameters\n ----------\n dst: destination operator\n src: source operator\n pad_list: [left, right, top, bottom]\n l1_h: height of src tensor\n l1_w: width of src tensor\n c1_index: C channel position/16 for f16, C channel position/32 for b8\n fetch_filter_w: fetch position in filter w dimension\n fetch_filter_h: fetch position in filter h dimension\n left_top_w: the start left top corner coordinate of windown in feature\n map(1st window position in w dimension)\n left_top_h: the start left top corner coordinate of windown in feature\n map(1st window position in h dimension)\n stride_w: filter stride size in w dimension\n stride_h: filter stride size in h dimension\n filter_w: width of filter\n filter_h: height of filter\n dilation_filter_w: dilation size of filter in w dimension\n dilation_filter_h: dilation size of filter in h dimension\n jump_offset: jump offset size of destination\n repeat_mode:\n repeat_time:\n _csize:\n pad_value: value for padding, default = None\n\n Returns\n -------\n None\n \"\"\"\n # too many arguments, so disable R0914\n dst_src_dtype_list = [\"u8u8\", \"s8s8\", \"f16f16\"]\n scope_map = {'l0a': 'ca', 'l0b': 'cb', 'ub': 'ub'}\n # check core_arch\n TikCheckUtil.check_not_equality(get_soc_name() +\n get_soc_core_type(), VEC,\n \"current soc does't support load3dv1\")\n # check tensor scope\n dst_scope = dst.scope.split(\".\")[-1].lower()\n src_scope = src.scope.split(\".\")[-1].lower()\n TikCheckUtil.check_in_range(dst_scope, ('l0a', 'l0b', 'ub'),\n \"dst_scope %s is not supported for \"\n \"load3dv1.\" % dst.scope)\n TikCheckUtil.check_in_range(src_scope, ('l1',),\n \"src_scope %s is not supported for \"\n \"load3dv1.\" % src.scope)\n # check tensor dtype\n dtype_str = DTYPE_MAP[dst.dtype] + DTYPE_MAP[src.dtype]\n TikCheckUtil.check_in_range(\n dtype_str, dst_src_dtype_list,\n \"dtype of dst should be u8u8, s8s8, or f16f16.\")\n _load3dv1_col2img_check(fetch_filter_w, fetch_filter_h, left_top_w,\n left_top_h)\n _load3dv1_load3dv2_col2img_check(pad, l1_w, l1_h, stride_w, stride_h,\n filter_w, filter_h, dilation_filter_w,\n dilation_filter_h)\n\n # check index\n TikCheckUtil.check_type_match(c1_index, (int, Scalar, Expr),\n \"c1_index should be int, Scalar or Expr\")\n check_scalar_dtype(c1_index,\n \"scalar_c1_index should be a scalar of int/uint\")\n check_integer_in_range(c1_index, range(MAX_C1_INDEX),\n \"c1_index should be in the range of [0, 4095], \"\n \"input value is: {}\".format(c1_index))\n\n # check jumpOffset\n TikCheckUtil.check_type_match(\n jump_offset, (int, Scalar, Expr),\n \"jump_offset should be int, Scalar or Expr\")\n check_scalar_dtype(jump_offset,\n \"scalar_jump_offset should be a scalar of int/uint\")\n check_integer_in_range(\n jump_offset, range(MIN_JUMP_OFFSET, MAX_JUMP_OFFSET),\n \"jump_offset should be in the range of [1, 127], input value is: \"\n \"{}\".format(jump_offset))\n # check repeat_time\n check_repeat_times(repeat_time)\n # check repeatMode\n TikCheckUtil.check_type_match(\n repeat_mode, (int, Scalar, Expr),\n \"repeat_mode should be int, Scalar or Expr\")\n check_scalar_dtype(repeat_mode,\n \"scalar_repeat_mode should be a scalar of int/uint\")\n check_integer_in_range(repeat_mode, range(MAX_REPEAT_MODE),\n \"repeat_mode should be 0 or 1, input value is: \"\n \"{}\".format(repeat_mode))\n # check pad_value\n TikCheckUtil.check_type_match(\n pad_value, (int, float),\n \"pad_value should be python int or float, \"\n \"input type is: {}\".format(type(pad_value)))\n # check _csize\n TikCheckUtil.check_type_match(_csize, (int, Expr),\n \"_csize should be int or Expr, input type\"\n \" of_csize: {}\".format(_csize))\n check_integer_in_range(_csize, range(MAX_C_SIZE))\n # FMATRIX\n orig_params = []\n params = [l1_w, l1_h, pad[PADDING_LEFT_IDX], pad[PADDING_RIGHT_IDX],\n pad[PADDING_TOP_IDX], pad[PADDING_BOT_IDX]]\n reg_fmatrix = concat_params(params, FMATRIX_OFFSET_LIST,\n FMATRIX_SEGMENT_LIST)\n orig_params += params[:]\n\n self._do_load3d_fmatrix(reg_fmatrix)\n # padding\n do_load3d_padding(self, src, pad_value)\n # code gen\n params = [\n c1_index, fetch_filter_w, fetch_filter_h, left_top_w, left_top_h\n ]\n offset_list = LOAD3DV1_REG_XM_OFFSET_LIST\n segment_list = LOAD3DV1_REG_XM_SEGMENT_LIST\n reg_xm = concat_params(params, offset_list, segment_list)\n orig_params += params[:]\n\n params = [\n stride_w, stride_h, filter_w, filter_h, dilation_filter_w,\n dilation_filter_h, jump_offset, repeat_mode, repeat_time\n ]\n offset_list = LOAD3DV1_REG_XT_OFFSET_LIST\n segment_list = LOAD3DV1_REG_XT_SEGMENT_LIST\n reg_xt = concat_params(params, offset_list, segment_list)\n orig_params += params[:]\n\n # cal extent\n dst_extent = _calculate_extent_load3dv1(\n dst, repeat_mode, repeat_time, jump_offset)\n\n with self.new_scope():\n self.scope_attr(cce_params.CCE_AXIS, \"coproc_scope\", PIPE_MTE1)\n if get_bit_len(dst.dtype) == DST_TYPE_LEN:\n instr = tvm.call_extern(\n dst.dtype, \"img2col_cbuf_to_\" + scope_map[dst_scope],\n dst.reinterpret_cast_to(\"float16\").access_ptr(\n \"w\", extent=dst_extent),\n src.reinterpret_cast_to(\"float16\").access_ptr(\n \"r\"), reg_xm, reg_xt, _csize)\n else:\n instr = tvm.call_extern(\n dst.dtype, \"img2col_cbuf_to_\" + scope_map[dst_scope],\n dst.access_ptr(\"w\", extent=dst_extent),\n src.access_ptr(\"r\"), reg_xm, reg_xt, _csize)\n # one ir is call_extern\n self.emit(instr, ONE_IR)\n\n @source_info_decorator()\n @debug.col2img_decorator\n def col2img(self, dst, src, pad, l1_h, # pylint: disable=R0913, R0914\n l1_w, fetch_filter_w, fetch_filter_h, left_top_w, left_top_h,\n stride_w, stride_h, filter_w, filter_h, dilation_filter_w,\n dilation_filter_h, repeat_time):\n \"\"\"\n only support L1 to L0A/L0B/UB\n pad : [left, right, top, bottom]\n no Csize<=4, C0=16\n \"\"\"\n # subclass has the member but parent class call it, so disable E1101\n # too many arguments, so disable R0914\n # check tensor scope\n # last_string\n dst_scope = dst.scope.split(\".\")[-1].lower()\n src_scope = src.scope.split(\".\")[-1].lower()\n TikCheckUtil.check_equality(dst_scope, \"ub\", \"dst scope should be ub.\")\n TikCheckUtil.check_equality(src_scope, \"ub\", \"src scope should be ub.\")\n # check tensor dtype\n TikCheckUtil.check_equality(dst.dtype, src.dtype,\n \"Intrinsic {}'s src's dtype should\"\n \" be equal to dst's dtype\".\n format(\"col2img\"))\n TikCheckUtil.check_equality(api_check_support(\"tik.\" + \"col2img\",\n dst.dtype), True,\n INSTR_DTYPE_SUPPORT_STATEMENT.\n format(dst.dtype, \"col2img\"))\n _load3dv1_col2img_check(fetch_filter_w, fetch_filter_h, left_top_w,\n left_top_h)\n _load3dv1_load3dv2_col2img_check(pad, l1_w, l1_h, stride_w, stride_h,\n filter_w, filter_h, dilation_filter_w,\n dilation_filter_h)\n # check repeatMode\n repeat_mode = 1\n orig_params = []\n # gen\n params = [l1_w, l1_h, pad[PADDING_LEFT_IDX], pad[PADDING_RIGHT_IDX],\n pad[PADDING_TOP_IDX], pad[PADDING_BOT_IDX]]\n offset_list = REG_FCOL2IMG_OFFSET_LIST\n segment_list = REG_FCOL2IMG_SEGMENT_LIST\n reg_fcol2_img = concat_params(params, offset_list, segment_list)\n orig_params += params[:]\n\n params = [fetch_filter_w, fetch_filter_h, left_top_w, left_top_h]\n offset_list = COL2IMG_REG_XM_OFFSET_LIST\n segment_list = COL2IMG_REG_XM_SEGMENT_LIST\n reg_xm = concat_params(params, offset_list, segment_list)\n orig_params += params[:]\n\n params = [\n stride_w, stride_h, filter_w, filter_h, dilation_filter_w,\n dilation_filter_h, repeat_mode, repeat_time\n ]\n offset_list = COL2IMG_REG_XT_OFFSET_LIST\n segment_list = COL2IMG_REG_XT_SEGMENT_LIST\n reg_xt = concat_params(params, offset_list, segment_list)\n orig_params += params[:]\n\n with self.context.freeze(): # pylint: disable=E1101\n if \"fcol2img\" in self.global_dict: # pylint: disable=E1101\n fcol2img = self.global_dict[\"fcol2img\"] # pylint: disable=E1101\n else:\n fcol2img = self.global_scalar(dtype=\"int64\") # pylint: disable=E1101\n self.global_dict[\"fcol2img\"] = fcol2img # pylint: disable=E1101\n TikCheckUtil.check_type_match(fcol2img, Scalar,\n \"fcol2img should be Scalar\")\n with self.new_scope():\n temp_scalar = self.Scalar_(dtype=\"int64\") # pylint: disable=E1101\n temp_scalar.set_as(reg_fcol2_img)\n self.scope_attr(cce_params.CCE_AXIS, \"if_protect\", PIPE_MTE1)\n with self.if_scope_(fcol2img != temp_scalar):\n fcol2img.set_as(temp_scalar)\n # one ir is call_extern\n self.emit(\n tvm.call_extern(\"int64\", \"set_fcol2img\",\n fcol2img.get()), ONE_IR)\n with self.new_scope():\n self.scope_attr(cce_params.CCE_AXIS, \"coproc_scope\", PIPE_V)\n instr = tvm.call_extern(dst.dtype, \"col2img\",\n dst.access_ptr(\"w\"),\n src.access_ptr(\"r\"),\n reg_xm, reg_xt)\n # one ir is call_extern\n self.emit(instr, ONE_IR)\n\n @source_info_decorator()\n @debug.broadcast_ub_to_l0c_decorator\n def broadcast_ub_to_l0c(self, dst, src, nburst, burst_len, *strides):\n \"\"\"copy the data from tik.ubuf to tik.cc's tensor\n\n Parameters\n ----------\n dst : destination operator\n src : source operation\n nburst : [1, 255] continuous data segment for transfer instruction\n burst_len: nburst's length [1, 255]\n *strides: [src_gap, dst_gap]\n\n Returns\n -------\n None\n \"\"\"\n # check nburst\n TikCheckUtil.check_type_match(nburst, (int, Scalar, Expr),\n \"nburst should be int, Scalar or Expr\")\n check_scalar_dtype(nburst, \"scalar_nburst should be a scalar of int\")\n check_integer_in_range(\n nburst, range(MIN_NBURST, MAX_NBURST_SINGLE_BYTE),\n \"nburst should be in the range of [1, 255], input value is {}\"\n .format(nburst))\n # check burst_len\n TikCheckUtil.check_type_match(burst_len, (int, Scalar, Expr),\n \"burst_len should be int, Scalar or Expr\")\n check_scalar_dtype(burst_len,\n \"scalar_burst_len should be a scalar of int\")\n check_integer_in_range(\n burst_len, range(MIN_BURST_LEN, MAX_BURST_LEN_SINGLE_BYTE),\n \"burst_len should be in the range of [1, 255], input value is {}\"\n .format(burst_len))\n # check stride\n TikCheckUtil.check_type_match(\n strides[SRC_BLK_STRIDE_IDX], (int, Scalar, Expr),\n \"src_blk_stride should be int, Scalar or Expr\")\n TikCheckUtil.check_type_match(\n strides[DST_BLK_STRIDE_IDX], (int, Scalar, Expr),\n \"dst_blk_stride should be int, Scalar or Expr\")\n check_scalar_dtype(strides[SRC_BLK_STRIDE_IDX],\n \"scalar_src_blk_stride should be a scalar of int\")\n check_scalar_dtype(strides[DST_BLK_STRIDE_IDX],\n \"scalar_dst_blk_stride should be a scalar of int\")\n TikCheckUtil.check_equality(len(strides), STRIDES_LEN,\n \"length of strides should be 2\")\n\n if is_immediate_number(strides):\n check_integer_in_range(\n strides[DST_BLK_STRIDE_IDX], range(MAX_BLK_STRIDE_SINGLE_BYTE),\n \"dst_blk_stride should be in the range of [0, 255], input value\"\n \" is {}\".format(strides[DST_BLK_STRIDE_IDX]))\n check_integer_in_range(\n strides[SRC_BLK_STRIDE_IDX], range(MAX_BLK_STRIDE_SINGLE_BYTE),\n \"src_blk_stride should be in the range of [0, 255], input value\"\n \" is {}\".format(strides[SRC_BLK_STRIDE_IDX]))\n # check tensor dtype\n TikCheckUtil.check_equality(dst.dtype, src.dtype,\n \"Intrinsic {}'s src's dtype should be \"\n \"equal to dst's dtype\".\n format(\"broadcast_ub_to_l0c\"))\n TikCheckUtil.check_equality(\n intrinsic_check_support(\"Intrinsic_\" + \"broadcast_ub_to_cc\",\n dst.dtype), True,\n INSTR_DTYPE_SUPPORT_STATEMENT.format(dst.dtype,\n \"broadcast_ub_to_l0c\"))\n # check tensor overflow\n _check_src_overflow_brc(src, nburst, burst_len,\n strides[SRC_BLK_STRIDE_IDX])\n _check_dst_overflow_brc(dst, nburst, burst_len,\n strides[DST_BLK_STRIDE_IDX])\n # gen\n params = [nburst, burst_len, strides[SRC_BLK_STRIDE_IDX],\n strides[DST_BLK_STRIDE_IDX]]\n # src burst_len: 16 element\n # src gap: 32 Byte\n # dst burst_len: 256 element\n # dst gap: 256 element\n # function _calculate_extent_broadcast_ub_to_l0c returns a list, idx 0\n # represents dst_extent and 1 represents src_extent\n extents = _calculate_extent_broadcast_ub_to_l0c(\n dst, src, nburst, burst_len, [strides[DST_BLK_STRIDE_IDX],\n strides[SRC_BLK_STRIDE_IDX]])\n self._gen_brc_code(params, dst, src, extents)\n\n def _gen_brc_code(self, params, dst, src, extents):\n \"\"\"generate IR for broadcast_ub_to_l0c\n\n Parameters\n ----------\n params: params list\n dst : destination operator\n src : source operation\n extents: dst_extent, src_extent\n\n Returns\n -------\n None\n \"\"\"\n with self.new_scope():\n instr = tvm.call_extern(\n dst.dtype, \"broadcast_ub_to_cc\",\n dst.reinterpret_cast_to(dst.dtype).access_ptr(\n \"w\", extent=extents[0]),\n src.reinterpret_cast_to(src.dtype).access_ptr(\n \"r\", extent=extents[1]),\n *type_convert(params))\n self.scope_attr(cce_params.CCE_AXIS, \"coproc_scope\", PIPE_V)\n # one ir is call_extern\n self.emit(instr, ONE_IR)\n\n def _gen_mmad_broadcast_code(self, params, scope_map, src, dst):\n new_scope_map = deepcopy(scope_map)\n new_scope_map[scope_cbuf] = \"cbuf\"\n args = type_convert(params)\n with self.new_scope():\n instr = tvm.call_extern(\n dst.dtype, \"broadcast_\" + new_scope_map[src.scope] +\n \"_to_\" + new_scope_map[dst.scope], dst.access_ptr(\"w\"),\n src.access_ptr(\"r\"), *args)\n self.scope_attr(cce_params.CCE_AXIS, \"coproc_scope\", PIPE_V)\n # one ir is call_extern\n self.emit(instr, ONE_IR)\n\n @source_info_decorator()\n @debug.mmad_brc_decorator\n def mmad_broadcast(self, dst, src, repeat_mode, # pylint: disable=R0913\n nburst, burst_repeat, dst_gap, src_gap):\n \"\"\"\n Note: busrt on src side is given in term of 1*16 elements,\n busrt on dst side is given in term of 16*16 elements,\n gap on src side is given in term of 32B,\n gap on dst side is given in term of 16*16 elements.\n repeatMode=0: burst is effective, each 1*16 is broadcast\n to 16*16 fractal(repeat on N-dim)\n repeatMode=1: burst is restricted to 1, each busrt(1*16) is\n broadcast to repeat*(16*16) fractals(repeat on M-dim);\n \"\"\"\n arch_version_dst_src_scope_map = {\n ASCEND_310AIC: ['l0cub'],\n ASCEND_910AIC: ['l0cub'],\n HI3796CV300ESAIC: ['l0cub', 'l0cl1'],\n HI3796CV300CSAIC: ['l0cub', 'l0cl1'],\n AIC: ['l0cub', 'l0cl1']\n }\n new_scope_map = deepcopy(SCOPE_MAP)\n new_scope_map[scope_ubuf] = \"ub\"\n # check scope\n dst_scope = dst.scope.split('.')[-1].lower()\n src_scope = src.scope.split('.')[-1].lower()\n TikCheckUtil.check_in_range(\n dst_scope + src_scope,\n arch_version_dst_src_scope_map[get_soc_name() +\n get_soc_core_type()],\n \"%s Instruction mmad_broadcast doesn't support \"\n \"broadcast %s to %s\" %\n (get_soc_name() + get_soc_core_type(), src_scope, dst_scope))\n # check repeatMode\n TikCheckUtil.check_type_match(repeat_mode, (int, Scalar),\n \"repeat_mode should be int or Scalar\")\n check_integer_in_range(repeat_mode, range(MAX_REPEAT_MODE),\n \"repeat_mode should be 0 or 1\")\n # check nburst\n check_integer_in_range(\n nburst, range(MIN_NBURST, MAX_NBURST_SINGLE_BYTE),\n \"nburst should be in the range of [1, 255]\")\n # check burst_repeat\n check_integer_in_range(\n burst_repeat, range(MIN_BURST_REPEAT, MAX_BURST_REPEAT),\n \"burst_repeat should be in the range of [1, 255]\")\n # check gap\n check_integer_in_range(dst_gap, range(MAX_DST_GAP_SINGLE_BYTE),\n \"dst_gap should be in the range of [0, 255]\")\n check_integer_in_range(src_gap, range(MAX_SRC_GAP),\n \"src_gap should be in the range of [0, 255]\")\n # check tensor dtype\n dtype_str = DTYPE_MAP[dst.dtype] + DTYPE_MAP[src.dtype]\n TikCheckUtil.check_equality(api_check_support(\"tik.\" +\n \"mmad_broadcast\",\n dtype_str), True,\n INSTR_DTYPE_SUPPORT_STATEMENT.\n format(dtype_str, \"mmad_broadcast\"))\n # code gen\n params = [nburst, burst_repeat, src_gap, dst_gap, repeat_mode]\n self._gen_mmad_broadcast_code(params, new_scope_map, src, dst)\n\n def _check_padding_scope_dtype(self, dst, value):\n \"\"\"check tensor padding input scope and dtype\"\"\"\n if get_soc_name() in (ASCEND_610, ASCEND_620, HI3796CV300ES):\n TikCheckUtil.check_in_range(\n dst.scope, [scope_ca, scope_cb, scope_cbuf],\n \"dst scope should be L0A, L0B or L1, input dst scope: %s.\"\n % dst.scope)\n else:\n TikCheckUtil.check_in_range(\n dst.scope, [scope_ca, scope_cb],\n \"dst scope should be L0A or L0B, input dst scope: %s.\"\n % dst.scope)\n # check dst dtype\n if isinstance(value, Scalar):\n dtype_str = DTYPE_MAP[dst.dtype] + DTYPE_MAP[value.dtype]\n else:\n # dtype_str: dst_dtype add dst_dtype\n dtype_str = DTYPE_MAP[dst.dtype]*2\n TikCheckUtil.check_in_range(\n dtype_str, [\"u16u16\", \"s16s16\", \"f16f16\"],\n \"dtype of dst and src should be u16u16, s16s16 or f16f16\")\n\n @source_info_decorator()\n @debug.set_2d_decorator\n def tensor_padding_with_matrix(self, dst, repeat_times, value=None):\n \"\"\"Move value to dst tensor\n\n Parameters\n ----------\n dst : destination tensor\n value : the value\n repeat_times : [1, 255] the invoke times\n\n Returns\n -------\n None\n \"\"\"\n # subclass has the member but parent class call it, so disable E1101\n scope_map = {\n scope_ca: \"l0a\",\n scope_cb: \"l0b\",\n scope_cc: \"l0c\",\n scope_cbuf: \"l1\",\n scope_ubuf: \"ub\",\n scope_gm: \"out\"\n }\n # check repeat_times\n check_integer_in_range(\n repeat_times, range(MIN_REPEAT_TIMES, MAX_REPEAT_TIMES),\n \"repeat_times should be in the range of [1, 255], input value is \"\n \"{}\".format(repeat_times))\n # check dst scope\n TikCheckUtil.check_not_equality(get_soc_name() + get_soc_core_type(),\n VEC, \"%s doesn't support \"\n \"tensor_padding_with_matrix.\"\n % (get_soc_name() +\n get_soc_core_type()))\n self._check_padding_scope_dtype(dst, value)\n # padding value\n if value is not None:\n TikCheckUtil.check_type_match(\n value, (int, float, Scalar),\n \"value should be int or float or Scalar\")\n with self.context.freeze(): # pylint: disable=E1101\n if not isinstance(value, Scalar): # immediate\n if \"l0_set_2d\" in self.global_dict: # pylint: disable=E1101\n l0_set_2d = self.global_dict[\"l0_set_2d\"] # pylint: disable=E1101\n else:\n l0_set_2d = self.global_scalar(dtype=\"int16\") # pylint: disable=E1101\n self.global_dict[\"l0_set_2d\"] = l0_set_2d # pylint: disable=E1101\n t_l0_set_2d = self.Scalar_(dtype=\"int16\") # pylint: disable=E1101\n if dst.dtype == \"float16\":\n l0_set_2d_value = np.float16(value)\n elif dst.dtype == \"int16\":\n l0_set_2d_value = np.int16(value)\n else:\n l0_set_2d_value = np.uint16(value)\n l0_set_2d_value = l0_set_2d_value.view(np.int16)\n l0_set_2d_value = int(l0_set_2d_value)\n t_l0_set_2d.set_as(l0_set_2d_value)\n self.scope_attr(cce_params.CCE_AXIS, \"if_protect\",\n PIPE_MTE1)\n with self.if_scope_(l0_set_2d != t_l0_set_2d):\n l0_set_2d.set_as(t_l0_set_2d)\n with self.new_scope():\n # one ir is call_extern\n self.emit(\n tvm.call_extern(\n \"float16\", \"set_l0_set_value\",\n tvm.call_extern(\"float16\",\n \"reinterpret_cast\",\n l0_set_2d.get())), ONE_IR)\n else: # scalar\n with self.new_scope():\n # one ir is call_extern\n self.emit(\n tvm.call_extern(\n \"float16\", \"set_l0_set_value\",\n tvm.call_extern(\"float16\", \"reinterpret_cast\",\n value.get())), ONE_IR)\n # code gen\n args = concat_params([repeat_times],\n TENSOR_PADDING_OFFSET_LIST,\n TENSOR_PADDING_SEGMENT_LIST)\n with self.new_scope():\n self.scope_attr(cce_params.CCE_AXIS, \"coproc_scope\", PIPE_V)\n instr = tvm.call_extern(dst.dtype,\n \"set_\" + scope_map[dst.scope] + \"_2d\",\n dst.access_ptr(\"w\"), type_convert(args))\n # one ir is call_extern\n self.emit(instr, ONE_IR)\n\n def _gen_vector_scalar_code(self, scalar, dst, name, # pylint: disable=R0913\n config, mask_o, mask_mode, dst_extent):\n scalar_tmp = _dtype_convert(scalar, dst.dtype)\n with self.new_scope():\n if mask_mode == \"counter\":\n # save orig_ctrl\n orig_ctrl = set_ctrl_counter_mask(self)\n\n instr = tvm.call_extern(dst.dtype, name,\n dst.access_ptr(\"w\", extent=dst_extent),\n scalar_tmp, *type_convert(config))\n self.emit(tvm.call_extern(\"int64\", \"set_vector_mask\", *mask_o))\n self.scope_attr(cce_params.CCE_AXIS, \"coproc_scope\", PIPE_V)\n # one ir is call_extern, one ir is set_vector_mask\n self.emit(instr, TWO_IR)\n\n # reset CTRL SPR as orig_ctrl\n if mask_mode == \"counter\":\n reset_ctrl_counter_mask(self, orig_ctrl)\n\n def _check_vector_scalar_operator_and_get_dst_name(self, name, dst, scalar,\n print_name):\n \"\"\"check operator for vector_scalar_elewise_func and\n get special dst name for different instructions\n \"\"\"\n # check instruction\n if name == \"vci\":\n TikCheckUtil.check_equality(\n get_soc_name() + get_soc_core_type(), VEC,\n \"only {} support instruction vci\".format(VEC))\n dst_name = \"dst_index\"\n scalar_name = \"start_point\"\n else:\n dst_name = \"dst\"\n scalar_name = \"scalar\"\n # check dst\n TikCheckUtil.check_type_match(dst, Tensor,\n \"{} should be tensor, input type is\"\n \" {}\".format(dst_name, type(dst)))\n TikCheckUtil.check_equality(dst.scope, \"local.UB\",\n \"{}'s scope must be UB, not support \"\n \"scope: {}\".format(dst_name, dst.scope))\n # check scalar\n TikCheckUtil.check_type_match(scalar, (int, float, Expr, Scalar),\n \"{} should be int, float, Expr or Scalar,\"\n \" input type is {}\".format(scalar_name,\n type(scalar)))\n # check dtype\n if isinstance(scalar, Scalar):\n TikCheckUtil.check_equality(dst.dtype, scalar.dtype,\n \"Intrinsic {}'s scalar's \"\n \"dtype should be\"\n \" equal to dst's dtype\".\n format(print_name))\n TikCheckUtil.check_equality(api_check_support(\"tik.\" +\n name, dst.dtype), True,\n INSTR_DTYPE_SUPPORT_STATEMENT.\n format(dst.dtype, print_name))\n if \"int\" in dst.dtype:\n TikCheckUtil.check_not_equality(\n type(scalar), float,\n \"{} should not be float when {}.dtype is {}\".format(\n scalar_name, dst_name, dst.dtype))\n return dst_name\n\n def _check_vector_scalar_params_and_get_mask_and_extent( # pylint: disable=R0913\n self, repeat_times, mask_mode, dst_blk_stride,\n dst_rep_stride, stride_unit, mask, dst, dst_name, mask_o):\n \"\"\"check params for vector_scalar_elewise_func and\n get mask_o and dst extent\n \"\"\"\n # check repeat\n check_repeat_times(repeat_times)\n # check mask_mode\n TikCheckUtil.check_in_range(\n mask_mode, (\"normal\", \"counter\"),\n \"mask_mode should be 'normal' or 'counter'.\")\n # check strides\n check_vector_stride([dst_blk_stride], [dst_rep_stride],\n MAX_BLK_STRIDE_DOUBLE_BYTE,\n MAX_REP_STRIDE_SINGLE_BYTE, [dst_name])\n # check stride_unit\n check_stride_unit(stride_unit)\n # check mask and get mask_o\n if mask_o is None:\n mask_o = mask_concat(self, mask, mask_mode=mask_mode,\n tensor_bit_len=get_bit_len(dst.dtype))\n # check tensor overflow(static)\n if is_immediate_number(dst_blk_stride) and \\\n stride_unit in (_STRIDE_UNIT_ZERO, _STRIDE_UNIT_ONE) \\\n and dst_blk_stride == _DEFAULT_STRIDE:\n new_dst_bs = 1\n else:\n new_dst_bs = dst_blk_stride\n check_tensor_overflow((dst,), mask, repeat_times, (new_dst_bs,),\n (dst_rep_stride,), (dst_name,),\n stride_unit=stride_unit, mask_mode=mask_mode)\n # calculate dst_extent Byte\n dst_extent = cal_extent_stride_unit_mask(mask, repeat_times, dst,\n stride_unit, new_dst_bs,\n dst_rep_stride,\n mask_mode=mask_mode)\n if stride_unit in (_STRIDE_UNIT_ZERO, _STRIDE_UNIT_ONE) and \\\n is_basic_expr(dst_blk_stride):\n dst_extent = dst_extent + ONE_REP_BYTE_SIZE\n return mask_o, dst_extent\n\n @source_info_decorator(depth=2)\n @debug.vec_scalar_elewise_func_dec\n def _vector_scalar_elewise_func(self, name, # pylint: disable=R0913, R0914\n mask, dst, scalar, repeat_times,\n dst_blk_stride, dst_rep_stride, stride_unit,\n mask_mode=\"normal\",\n print_name=None, mask_o=None):\n \"\"\"copy scalar to vector\n\n Parameters\n ----------\n dst : destination operator\n mask : Effective operation on element, divided into two model:\n Continuous and bit by bit.\n dst : destination operator\n scalar : the copied scalar\n repeat_times : Repeated iterations times\n dst_blk_stride : offset of dst operator between different block\n in one iteration\n dst_rep_stride : offset of dst operator in the same block\n between adjacent iterations\n stride_unit : address and offset unit both affect it. default = 0\n\n Returns\n -------\n None\n \"\"\"\n # check dst and scalar\n if print_name is None:\n print_name = name\n dst_name = self._check_vector_scalar_operator_and_get_dst_name(\n name, dst, scalar, print_name)\n # check params and get mask_o, dst_extent\n mask_o, dst_extent = \\\n self._check_vector_scalar_params_and_get_mask_and_extent(\n repeat_times, mask_mode, dst_blk_stride, dst_rep_stride,\n stride_unit, mask, dst, dst_name, mask_o)\n\n if name == \"vci\":\n config = [repeat_times, dst_blk_stride, dst_rep_stride,\n stride_unit & 0b01, (stride_unit & 0b10) >> 1]\n else:\n config = [repeat_times, dst_blk_stride, _DEFAULT_STRIDE,\n dst_rep_stride, _DEFAULT_STRIDE]\n # code gen\n self._gen_vector_scalar_code(scalar, dst, name, config, mask_o,\n mask_mode, dst_extent)\n\n def vector_dup(self, # pylint: disable=R0913\n mask,\n dst,\n scalar,\n repeat_times,\n dst_blk_stride,\n dst_rep_stride,\n stride_unit=0):\n \"\"\"copy scalar to vector\n\n Parameters\n ----------\n dst : destination operator\n mask : Effective operation on element, divided into two model:\n Continuous and bit by bit.\n scalar : the copied scalar\n repeat_times : Repeated iterations times\n dst_blk_stride : offset of dst operator between different block\n in one iteration\n dst_rep_stride : offset of dst operator in the same block\n between adjacent iterations\n stride_unit : address and offset unit both affect it. default = 0\n\n Returns\n -------\n None\n \"\"\"\n return self._vector_scalar_elewise_func('vector_dup', mask, dst,\n scalar, repeat_times,\n dst_blk_stride, dst_rep_stride,\n stride_unit)\n\n def vci(self, mask, dst_index, start_point, # pylint: disable=R0913\n repeat_times, dst_blk_stride, dst_rep_stride, stride_unit=0,\n mask_mode=\"normal\"):\n \"\"\"creat vector indexes from a start point\n\n mask : Effective operation on element, divided into two model:\n Continuous and bit by bit.\n dst_index : destination operator\n start_point : the start point\n repeat_times : Repeated iterations times\n dst_blk_stride : offset of dst operator between different block\n in one iteration\n dst_rep_stride : offset of dst operator in the same block\n between adjacent iterations\n stride_unit : address and offset unit both affect it. default = 0\n\n Returns\n -------\n None\n \"\"\"\n return self._vector_scalar_elewise_func('vci', mask, dst_index,\n start_point, repeat_times,\n dst_blk_stride, dst_rep_stride,\n stride_unit,\n mask_mode=mask_mode)\n\n def _gen_data_move_code(self, src, dst, # pylint: disable=R0913, R0914\n dma_list, type_args, args, argv, name=\"data_move\"):\n src_key_str = TikUtil.get_storage_scope(src.scope)\n dst_key_str = TikUtil.get_storage_scope(dst.scope)\n key = src_key_str + \" \" + dst_key_str\n if key == \"OUT L1\":\n e_args = _extend_args(\"PadMode\", args, argv)\n elif key in [\"UB L0C\", \"L0C UB\"]:\n e_args = _extend_args(\"ConvReluMode\", args, argv)\n else:\n e_args = []\n TikCheckUtil.check_not_is(dma_list.get(key), None,\n \"%s doesn't support %s to %s\" %\n (name, src_key_str, dst_key_str))\n pipe_line, intrin_name = dma_list[src_key_str + \" \" + dst_key_str]\n with self.new_scope():\n # type_args : sid, nburst, burst, src_stride, dst_stride\n if src.dtype != \"int32\" or dst.dtype != \"float16\":\n self.scope_attr(cce_params.CCE_AXIS, \"coproc_scope\", pipe_line)\n instr = tvm.call_extern(\n dst.dtype, intrin_name, dst.access_ptr(\n \"w\", extent=_calculate_extent(\"\", src, dst,\n [type_args[1],\n type_args[2],\n type_args[4]], False)),\n src.access_ptr(\n \"r\", extent=_calculate_extent(\"\", src, dst,\n [type_args[1],\n type_args[2],\n type_args[3]], True)),\n *(type_convert(type_args + list(e_args))))\n # one ir is call_extern\n self.emit(instr, ONE_IR)\n\n def _dma_quant_set_deqscale_tensor(self, quant_param, relu_flag):\n # subclass has the member but parent class call it, so disable E1101\n cr_mode = CONV_RELU_VECTOR_QUANT # 7\n with self.context.freeze(): # pylint: disable=E1101\n scale_addr = self.Scalar_(dtype=\"int64\") # pylint: disable=E1101\n # lsb: 32B\n # one ir is call_extern\n self.emit(tvm.call_extern(\n scale_addr.dtype, \"reg_set\", scale_addr.get(),\n tvm.expr.Cast(\"int64\", quant_param.access_ptr(\"r\")) //\n tvm.const(BYTE_SIZE, \"int64\")), ONE_IR)\n if relu_flag:\n scale_addr.set_as(scale_addr | (1 <<\n SCALE_ADDR_BIT_POS - 1))\n with self.new_scope():\n self.scope_attr(cce_params.CCE_AXIS, \"coproc_scope\", PIPE_V)\n # one ir is call_extern\n self.emit(\n tvm.call_extern(\n \"float16\", \"set_deqscale\",\n tvm.call_extern(\"float16\", \"reinterpret_cast\",\n scale_addr.get())), ONE_IR)\n return cr_mode\n\n @source_info_decorator()\n @debug.dma_dquant_decorator\n def data_move_quant(self, # pylint: disable=R0913\n dst,\n src,\n sid,\n nburst,\n burst,\n src_stride,\n dst_stride,\n quant_param,\n relu_flag=False):\n \"\"\"Move tensor from tik.cbuf to tik.ubuf\n\n Parameters\n ----------\n dst : destination operator\n src : source operation\n sid: 0, float16\n nburst : [1, 4095] continuous data segment for transfer instruction\n burst: nburst's length [1, 65535]\n dst_stride : offset of dst tensor between adjacent data segment\n src_stride : offset of src tensor between adjacent data segment\n quant_param : Anti-quantization parameter tensor start element\n relu_flag : True/False\n\n Returns\n -------\n None\n \"\"\"\n check_integer_in_range(sid, range(MAX_SID),\n \"sid should be in the range of [0, 15]\")\n check_dma_instr_params(dst, src, nburst, burst, src_stride, dst_stride)\n if isinstance(quant_param, Tensor):\n cr_mode = self._dma_quant_set_deqscale_tensor(quant_param,\n relu_flag)\n else:\n cr_mode = CONV_RELU_QUANT # 3\n with self.new_scope():\n self.scope_attr(cce_params.CCE_AXIS, \"coproc_scope\", PIPE_V)\n # one ir is call_extern\n self.emit(tvm.call_extern(\"float16\", \"set_deqscale\",\n quant_param), ONE_IR)\n with self.new_scope():\n self.scope_attr(cce_params.CCE_AXIS, \"coproc_scope\", PIPE_V)\n args = type_convert(\n [sid, nburst, burst, src_stride, dst_stride, cr_mode])\n\n instr = tvm.call_extern(\n dst.dtype, \"copy_matrix_cc_to_ubuf\", dst.access_ptr(\n \"w\", extent=_calculate_extent(\"\", src, dst,\n [args[1], args[2], args[4]],\n False)),\n src.access_ptr(\n \"r\", extent=_calculate_extent(\"\", src, dst,\n [args[1], args[2], args[3]],\n True)),\n *args)\n # one ir is call extern\n self.emit(instr, ONE_IR)\n\n def _gen_tensor_mov_code(self, config, # pylint: disable=R0913, R0914\n src_scope, dst_scope, dma_list, pad_mode, src, dst,\n dtype_str, block_mode, deqscale, src_onthefly,\n en_onthefly):\n # arguments too many, so disabled.\n # instruction issue\n TikCheckUtil.check_in_range((src_scope, dst_scope), dma_list,\n \"tensor_move doesn't support %s to %s\" %\n (src_scope, dst_scope))\n # config: sid_store_mode, nburst, burst_len, src_stride, dst_stride\n src_extent = _calculate_extent(block_mode, src, dst,\n [config[1], config[2], config[3]], True)\n dst_extent = _calculate_extent(\n block_mode, src, dst, [config[1], config[2], config[4]], False,\n en_onthefly)\n if (src_scope, dst_scope) == (\"gm\", \"cbuf\"):\n config.append(pad_mode)\n elif (src_scope, dst_scope) in [(\"ubuf\", \"cc_m\"), (\"ubuf\", \"cc_v\"),\n (\"ubuf\", \"cc_sc\"), (\"cc_m\", \"ubuf\"),\n (\"cc_v\", \"ubuf\"), (\"cc_sc\", \"ubuf\"),\n (\"cc_dp\", \"ubuf\")]:\n config.append(CR_MODE_MAP[dtype_str])\n pipe_line, intrin_name = dma_list[(src_scope, dst_scope)]\n with self.new_scope():\n if en_onthefly:\n self.scope_attr(\n cce_params.CCE_AXIS, \"critical_bank_conflict\",\n tvm.call_extern(\n dst.dtype, \"tvm_tuple\", dst.access_ptr(\"w\"),\n deqscale.access_ptr(\"r\"),\n src_onthefly.access_ptr(\"r\")))\n if intrin_name == \"copy_matrix_cc_to_ubuf\"\\\n and DTYPE_MAP[dst.dtype] in ('s8', 'u8'):\n instr = tvm.call_extern(\"int8\", intrin_name + \"_s8\",\n dst.reinterpret_cast_to(\"int8\")\n .access_ptr(\"w\", extent=dst_extent),\n src.access_ptr(\"r\", extent=src_extent),\n *type_convert(config))\n else:\n instr = tvm.call_extern(dst.dtype, intrin_name,\n dst.access_ptr(\"w\", extent=dst_extent),\n src.access_ptr(\"r\", extent=src_extent),\n *type_convert(config))\n self.scope_attr(cce_params.CCE_AXIS, \"coproc_scope\", pipe_line)\n # one ir is call_extern\n self.emit(instr, ONE_IR)\n\n def _check_tensor_mov_scope(self, scope_map, # pylint: disable=R0913\n src, dst, block_mode, archversion_scope):\n arch_version_str = get_soc_name() + get_soc_core_type()\n src_scope = scope_map[src.scope]\n dst_scope = scope_map[dst.scope]\n scope_str = _get_scope_str(scope_map, block_mode, src, dst)\n TikCheckUtil.check_in_range(\n scope_str, archversion_scope[arch_version_str],\n \"%s Instruction tensor_mov doesn't support %s to %s.\" %\n (arch_version_str, src_scope, dst_scope))\n return arch_version_str\n\n @source_info_decorator()\n @debug.tensor_move_decorator\n def tensor_mov(self, dst, src, block_mode, # pylint: disable=R0913, R0914\n nburst, burst_len, dst_stride, src_stride, deqscale=None,\n sid_store_mode=0, relu=False, pad_mode=None,\n pad_value=None, # pylint: disable=W0613\n onthefly_mode=0, src_onthefly=None, src_onthefly_stride=0):\n # function's unused params(pad_value) is used in decorators, so disable\n # them\n \"\"\"\n pad_value is not support yet, please use _set_padding().\n Leaky-Relu is not supported yet.\n block_mode: '' for 32B\n 'm' for 16*16 matrix(only cc)\n 'v' for 1*16 matrix(only cc)\n 'sc' for 16*4 matrix(only cc)\n 'dp' for 16*8 matrix(only cc)\n deqscale: None for normal-mode\n (float, Scalar.float) for deq-mode;\n (int, Scalar.uint) for deq8/deq16/deqs16-mode;\n (tensor.float) for vdeq-mode;\n (tensor.uint) for vdeq8/vdeq16/vdeqs16-mode;\n sid_storeMode: sid for gm\n 0 for store-high-16B(only cc)\n 1 for store-low-b6B(only cc)\n 2 for store-compact(only cc)\n Note: if you don't attempt to use/modify onthefly_mode/padMode,\n please config it as None or let it alone,otherwise, the\n perfermance will shrink.\n \"\"\"\n # too many arguments, so disalbe R0914\n # arch_version_str+scope & scope+dtype_convrelu_onthefly &\n # arch_version_str+lrelu\n archversion_scope = {\n ASCEND_310AIC: [\n 'gm2cbuf', 'gm2ubuf', 'cbuf2ubuf', 'cc_m162ubuf',\n 'cc_m322ubuf', 'cc_v162ubuf', 'cc_v322ubuf', 'ubuf2gm',\n 'ubuf2cbuf', 'ubuf2ubuf', 'ubuf2cc_m16', 'ubuf2cc_m32',\n 'ubuf2cc_v16', 'ubuf2cc_v32'\n ],\n ASCEND_910AIC: [\n 'gm2cbuf', 'gm2ubuf', 'cbuf2ubuf', 'cc_m162ubuf',\n 'cc_m322ubuf', 'cc_v162ubuf', 'cc_v322ubuf', 'ubuf2gm',\n 'ubuf2cbuf', 'ubuf2ubuf', 'ubuf2cc_m16', 'ubuf2cc_m32',\n 'ubuf2cc_v16', 'ubuf2cc_v32'\n ],\n HI3796CV300ESAIC: [\n 'gm2cbuf', 'gm2ubuf', 'cbuf2ubuf', 'cc_m162ubuf',\n 'cc_m322ubuf', 'cc_v162ubuf', 'cc_v322ubuf', 'cc_dp162ubuf',\n 'ubuf2gm', 'ubuf2cbuf', 'ubuf2ubuf', 'ubuf2cc_m16',\n 'ubuf2cc_m32', 'ubuf2cc_v16', 'ubuf2cc_v32'\n ],\n HI3796CV300CSAIC: [\n 'gm2cbuf', 'gm2ubuf', 'cbuf2ubuf', 'cbuf2cc_m16',\n 'cbuf2cc_m32', 'cbuf2cc_v16', 'cbuf2cc_v32', 'cbuf2cc_sc32',\n 'cc_m162ubuf', 'cc_m322ubuf', 'cc_v162ubuf', 'cc_v322ubuf',\n 'cc_dp162ubuf', 'ubuf2gm', 'ubuf2cbuf', 'ubuf2ubuf',\n 'ubuf2cc_m16', 'ubuf2cc_m32', 'ubuf2cc_v16', 'ubuf2cc_v32'\n ],\n AIC: [\n 'gm2cbuf', 'gm2ubuf', 'cbuf2ubuf', 'cbuf2cc_m16',\n 'cbuf2cc_m32', 'cbuf2cc_v16', 'cbuf2cc_v32', 'cbuf2cc_sc32',\n 'cc_m162ubuf', 'cc_m322ubuf', 'cc_v162ubuf', 'cc_v322ubuf',\n 'cc_dp162ubuf', 'cc_dp322ubuf', 'ubuf2gm', 'ubuf2cbuf',\n 'ubuf2ubuf', 'ubuf2cc_m16', 'ubuf2cc_m32', 'ubuf2cc_v16',\n 'ubuf2cc_v32'\n ],\n VEC: ['gm2ubuf', 'ubuf2gm']\n }\n archversion_convrelu = {\n ASCEND_310AIC: [0, 1, 2, 3, 4, 5, 6, 7],\n ASCEND_910AIC: [0, 1, 2, 3, 4, 5, 6, 7],\n HI3796CV300ESAIC: [0, 3, 5, 6, 7, 8, 9],\n HI3796CV300CSAIC: [0, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13],\n AIC: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]\n }\n src_dst_scope_dtype_map = {\n \"gm\": {\n \"cbuf\": [\n \"u8u8\", \"s8s8\", \"f16f16\", \"u16u16\", \"s16s16\", \"f32f32\",\n \"s32s32\", \"u32u32\", \"f64f64\", \"u64u64\", \"s64s64\"\n ],\n \"ubuf\": [\n \"u8u8\", \"s8s8\", \"f16f16\", \"u16u16\", \"s16s16\", \"f32f32\",\n \"s32s32\", \"u32u32\", \"f64f64\", \"u64u64\", \"s64s64\"\n ]\n },\n \"cbuf\": {\n \"ubuf\": [\n \"u8u8\", \"s8s8\", \"f16f16\", \"u16u16\", \"s16s16\", \"f32f32\",\n \"s32s32\", \"u32u32\", \"f64f64\", \"u64u64\", \"s64s64\"\n ],\n \"cc_m\":\n [\"f16f16\", \"u16u16\", \"s16s16\", \"f32f32\", \"s32s32\",\n \"u32u32\"],\n \"cc_v\": [\"f32f32\", \"s32s32\", \"u32u32\"],\n \"cc_sc\": [\"f32f32\", \"s32s32\", \"u32u32\"]\n },\n \"cc_m\": {\n \"ubuf\": [\n \"f16f16\", \"f32f32\", \"s32s32\", \"f32f16\", \"f32f16relu\",\n \"s32f16deq\", \"f16f16relu\", \"f32f32relu\", \"s32s32relu\",\n \"f16f16deq\", \"s32f16vdeq\", \"s32f16vdeqrelu\", \"s32s8vdeq8\",\n \"s32u8vdeq8\", \"s32s8vdeq8relu\", \"s32u8vdeq8relu\",\n \"s32s8deq8\", \"s32u8deq8\", \"s32s8deq8relu\", \"s32u8deq8relu\",\n \"s32f16vdeq16\", \"s32f16vdeq16relu\", \"s32f16deq16\",\n \"s32f16deq16relu\", \"s32s16vdeqs16\", \"s32s16vdeqs16relu\",\n \"s32s16deqs16\", \"s32s16deqs16relu\", \"u32u32\"\n ]\n },\n \"cc_v\": {\n \"ubuf\": [\n \"f16f16\", \"f32f32\", \"s32s32\", \"f32f16\", \"f32f16relu\",\n \"s32f16deq\", \"f16f16relu\", \"f32f32relu\", \"s32s32relu\",\n \"f16f16deq\", \"s32f16vdeq\", \"s32f16vdeqrelu\", \"s32s8vdeq8\",\n \"s32u8vdeq8\", \"s32s8vdeq8relu\", \"s32u8vdeq8relu\",\n \"s32s8deq8\", \"s32u8deq8\", \"s32s8deq8relu\", \"s32u8deq8relu\",\n \"s32f16vdeq16\", \"s32f16vdeq16relu\", \"s32f16deq16\",\n \"s32f16deq16relu\", \"s32s16vdeqs16\", \"s32s16vdeqs16relu\",\n \"s32s16deqs16\", \"s32s16deqs16relu\"\n ]\n },\n \"cc_sc\": {\n \"ubuf\": [\n \"f32f32\", \"s32s32\", \"s32f16deq\", \"f32f32relu\",\n \"s32s32relu\", \"s32f16vdeq\", \"s32f16vdeqrelu\", \"s32s8vdeq8\",\n \"s32u8vdeq8\", \"s32s8vdeq8relu\", \"s32u8vdeq8relu\",\n \"s32s8deq8\", \"s32u8deq8\", \"s32s8deq8relu\", \"s32u8deq8relu\",\n \"s32f16vdeq16\", \"s32f16vdeq16relu\", \"s32f16deq16\",\n \"s32f16deq16relu\", \"s32s16vdeqs16\", \"s32s16vdeqs16relu\",\n \"s32s16deqs16\", \"s32s16deqs16relu\"\n ]\n },\n \"cc_dp\": {\n \"ubuf\": [\n \"f16f16\", \"f32f32\", \"s32s32\", \"f32f16\", \"f32f16relu\",\n \"f16f16relu\", \"f16f16deq\"\n ]\n },\n \"ubuf\": {\n \"gm\": [\n \"u8u8\", \"s8s8\", \"f16f16\", \"u16u16\", \"s16s16\", \"f32f32\",\n \"s32s32\", \"u32u32\", \"f64f64\", \"u64u64\", \"s64s64\"\n ],\n \"cbuf\": [\n \"u8u8\", \"s8s8\", \"f16f16\", \"u16u16\", \"s16s16\", \"f32f32\",\n \"s32s32\", \"u32u32\", \"f64f64\", \"u64u64\", \"s64s64\"\n ],\n \"ubuf\": [\n \"u8u8\", \"s8s8\", \"f16f16\", \"u16u16\", \"s16s16\", \"f32f32\",\n \"s32s32\", \"u32u32\", \"f64f64\", \"u64u64\", \"s64s64\"\n ],\n \"cc_m\": [\"f16f16\", \"f32f32\", \"s32s32\", \"f32f16\", \"u32u32\"],\n \"cc_v\": [\"f16f16\", \"f32f32\", \"s32s32\", \"f32f16\", \"u32u32\"],\n \"cc_sc\": [\"f32f32\", \"s32s32\", \"f32f16\"]\n }\n }\n block_mode_appendix_map = {\n \"\": \"\",\n \"m\": \"_matrix\",\n \"v\": \"_vector\",\n \"sc\": \"_small_matrix\",\n \"dp\": \"_depthwise\"\n }\n dma_list = {\n # l0c16/l0c32-ub\n (\"cc_m\", \"ubuf\"): (2, 'copy_matrix_cc_to_ubuf'),\n # ub-l0c16/l0c32\n (\"ubuf\", \"cc_m\"): (2, 'copy_matrix_ubuf_to_cc'),\n # l0c16v/l0c32v-ub\n (\"cc_v\", \"ubuf\"): (2, 'copy_vector_cc_to_ubuf'),\n # ub-l0c16v/l0c32v\n (\"ubuf\", \"cc_v\"): (2, 'copy_vector_ubuf_to_cc'),\n # l0c32sc-ub\n (\"cc_sc\", \"ubuf\"): (2, 'copy_small_matrix_cc_to_ubuf'),\n # ub-l0c32sc\n (\"ubuf\", \"cc_sc\"): (2, 'copy_small_matrix_ubuf_to_cc'),\n # l0cdpf16/l0cdpf32-ub\n (\"cc_dp\", \"ubuf\"): (2, 'copy_depthwise_cc_to_ubuf'),\n (\"ubuf\", \"ubuf\"): (2, 'copy_ubuf_to_ubuf'),\n (\"cbuf\", \"ubuf\"): (4, 'copy_cbuf_to_ubuf'),\n (\"cbuf\", \"cc_m\"): (4, 'copy_cbuf_to_matrix_cc'),\n (\"cbuf\", \"cc_v\"): (4, 'copy_cbuf_to_vector_cc'),\n (\"cbuf\", \"cc_sc\"): (4, 'copy_cbuf_to_small_matrix_cc'),\n # LSU2\n (\"gm\", \"cbuf\"): (5, 'copy_gm_to_cbuf'),\n (\"gm\", \"ubuf\"): (5, 'copy_gm_to_ubuf'),\n # LSU3\n (\"ubuf\", \"gm\"): (6, 'copy_ubuf_to_gm'),\n (\"ubuf\", \"cbuf\"): (6, 'copy_ubuf_to_cbuf')\n }\n # check block_mode\n if block_mode == \"\":\n block_mode = \"m\"\n TikCheckUtil.check_in_range(\n block_mode, block_mode_appendix_map,\n \"Please specify block_mode: ''/'m'/'v'/'sc'.\")\n # check onthefly_mode\n TikCheckUtil.check_type_match(\n onthefly_mode, (int), \"onthefly_mode should be int, input type: {}\"\n .format(type(onthefly_mode)))\n # 0 is onthefly_mode default value\n arch_version_str = get_soc_name() + get_soc_core_type()\n if onthefly_mode != 0:\n TikCheckUtil.check_in_range(\n onthefly_mode, range(MIN_ONTHEFLY_MODE, MAX_ONTHEFLY_MODE),\n \"Please specify onthefly_mode 1-add, 2-sub, input \"\n \"onthefly_mode: {}\".format(onthefly_mode))\n en_onthefly = True\n TikCheckUtil.check_in_range(\n arch_version_str, ARCHVERSION_ONTHEFLY.keys(),\n \"%s doesn't support onthefly.\" % (arch_version_str))\n TikCheckUtil.check_type_match(\n src_onthefly, (Tensor),\n \"when onthefly_mode is not 0, src_onthefly should be Tensor, \"\n \"input type: {}\".format(type(src_onthefly)))\n TikCheckUtil.check_in_range(\n DTYPE_MAP[src_onthefly.dtype],\n ARCHVERSION_ONTHEFLY[arch_version_str],\n \"src_onthefly dtype should be in {}, input dtype: {}\"\n .format(ARCHVERSION_ONTHEFLY[arch_version_str],\n DTYPE_MAP[src_onthefly.dtype]))\n TikCheckUtil.check_not_equality(\n src_onthefly_stride, None,\n \"when onthefly_mode is not 0, src_onthefly_stride should not \"\n \"be None\")\n else:\n en_onthefly = False\n # check params\n check_dma_instr_params(dst, src, nburst, burst_len, src_stride,\n dst_stride, en_onthefly, src_onthefly_stride)\n # check scope\n arch_version_str = self._check_tensor_mov_scope(SCOPE_MAP, src, dst,\n block_mode,\n archversion_scope)\n # check deqscale\n dtype_str = DTYPE_MAP[src.dtype] + DTYPE_MAP[dst.dtype]\n deq_mode = _make_deq_mode(dtype_str, deqscale)\n\n # regen scope\n src_scope, dst_scope = _regen_tensor_mov_scope(src, dst, block_mode)\n\n # check dtype\n dtype_str = dtype_str + deq_mode\n if relu:\n dtype_str = dtype_str + \"relu\"\n # check convrelu\n TikCheckUtil.check_in_range(\n arch_version_str, archversion_convrelu,\n \"%s doesn't support convrelu feature.\" %\n (get_soc_name() + get_soc_core_type()))\n TikCheckUtil.check_in_range(\n CR_MODE_MAP[dtype_str], archversion_convrelu[arch_version_str],\n \"%s doesn't support this convrelu mode: %s.\" %\n (get_soc_name() + get_soc_core_type(), CR_MODE_MAP[dtype_str]))\n TikCheckUtil.check_in_range(\n dtype_str, src_dst_scope_dtype_map[src_scope][dst_scope],\n \"%s Instruction tensor_mov doesn't support %s in %s \"\n \"to %s in %s.\" % (get_soc_name() + get_soc_core_type(), src.dtype,\n src_scope, dst.dtype, dst_scope))\n # check padMode\n if pad_mode is not None:\n TikCheckUtil.check_type_match(\n pad_mode, int,\n \"padMode doesn't support online config or expr.\")\n TikCheckUtil.check_in_range(pad_mode, range(MAX_PADMODE),\n \"padMode should in range [0, 5].\")\n TikCheckUtil.check_equality((src_scope, dst_scope), (\"gm\", \"cbuf\"),\n \"PadMode only support OUT to L1.\")\n else:\n pad_mode = PADMODE_NO_PADDING\n # check deqscale\n if isinstance(deqscale, Tensor):\n TikCheckUtil.check_equality(SCOPE_MAP[deqscale.scope], \"ubuf\",\n \"deqscale should in UB.\")\n # check sid\n check_integer_in_range(sid_store_mode, range(VALUE_3),\n \"sid_store_mode should be in the range of \"\n \"[0, 2], input value: %s\" % str(sid_store_mode))\n # set deqscale\n self._set_deqscale(deq_mode, deqscale, relu, en_onthefly, src_onthefly)\n # xt gen\n # low 2 bits: sid_store_mode high 2 bits: onthefly_mode\n sid_store_mode = sid_store_mode + (onthefly_mode << SHIFT_BIT_POS_2)\n if en_onthefly:\n # low 8 bits: dst_stride high 8 bits: src_onthefly_stride\n dst_onthefly_stride = dst_stride + \\\n src_onthefly_stride << SHIFT_BIT_POS_8\n else:\n dst_onthefly_stride = dst_stride\n config = [sid_store_mode, nburst, burst_len, src_stride,\n dst_onthefly_stride]\n self._gen_tensor_mov_code(config, src_scope, dst_scope, dma_list,\n pad_mode, src, dst, dtype_str, block_mode,\n deqscale, src_onthefly, en_onthefly)\n\n def _set_deqscale(self, deq_mode, deqscale, relu, # pylint: disable=R0913\n en_onthefly, src_onthefly):\n # subclass has the member but parent class call it, so disable E1101\n if deq_mode in (\"deq\",):\n if en_onthefly:\n with self.context.freeze(): # pylint: disable=E1101\n scale_addr = self.Scalar_(\"int64\") # pylint: disable=E1101\n set_tensor_addr_to_scalar(self, scale_addr, src_onthefly)\n scale_addr.set_as(\n (scale_addr << DEQSCALE_SHIFT_POS) | deqscale)\n with self.new_scope():\n self.scope_attr(cce_params.CCE_AXIS, \"coproc_scope\",\n PIPE_V)\n self.emit(tvm.call_extern(\"float16\", \"set_deqscale\",\n scale_addr.get()), ONE_IR)\n else:\n with self.new_scope():\n self.scope_attr(cce_params.CCE_AXIS,\n \"coproc_scope\", PIPE_V)\n # one ir is call_extern\n self.emit(\n tvm.call_extern(\"float16\", \"set_deqscale\",\n _dtype_convert(deqscale, \"float16\")),\n ONE_IR)\n elif deq_mode in (\"deq8\", \"deq16\", \"deqs16\"):\n with self.context.freeze(): # pylint: disable=E1101\n scale_addr = self.Scalar_(\"int64\") # pylint: disable=E1101\n if en_onthefly:\n set_tensor_addr_to_scalar(self, scale_addr, src_onthefly)\n scale_addr.set_as(\n (scale_addr << DEQSCALE_SHIFT_POS) | deqscale)\n else:\n scale_addr.set_as(deqscale)\n scale_addr.set_as(scale_addr | (int(relu) << SCALE_SHIFT_POS))\n with self.new_scope():\n self.scope_attr(cce_params.CCE_AXIS, \"coproc_scope\",\n PIPE_V)\n # one ir is call_extern\n self.emit(\n tvm.call_extern(\"int64\", \"set_deqscale\",\n scale_addr.get()), ONE_IR)\n else:\n self._set_deqscale_expansion(deqscale, relu, en_onthefly,\n src_onthefly, deq_mode)\n\n def _set_deqscale_expansion(self, deqscale, relu, # pylint: disable=R0913\n en_onthefly, src_onthefly, deq_mode):\n \"\"\"deq_mode in (\"vdeq\", \"vdeq8\", \"vdeqs16\") or else scope\"\"\"\n if deq_mode in (\"vdeq\", \"vdeq8\", \"vdeqs16\"):\n with self.context.freeze(): # pylint: disable=E1101\n scale_addr = self.Scalar_(\"int64\") # pylint: disable=E1101\n # lsb: 32B\n set_tensor_addr_to_scalar(self, scale_addr, deqscale)\n if deq_mode == \"vdeq\":\n scale_addr.set_as(scale_addr |\n (int(relu) << SCALE_ADDR_BIT_POS))\n else:\n scale_addr.set_as(scale_addr |\n (int(relu) << SCALE_SHIFT_POS))\n if en_onthefly:\n onthefly_addr = self.Scalar_(\n \"int64\") # pylint: disable=E1101\n set_tensor_addr_to_scalar(self, onthefly_addr,\n src_onthefly)\n scale_addr.set_as(scale_addr |\n (onthefly_addr << DEQSCALE_SHIFT_POS))\n with self.new_scope():\n self.scope_attr(cce_params.CCE_AXIS, \"coproc_scope\",\n PIPE_V)\n # one ir is call_extern\n self.emit(tvm.call_extern(\"int64\", \"set_deqscale\",\n scale_addr.get()), ONE_IR)\n else:\n if en_onthefly:\n with self.context.freeze(): # pylint: disable=E1101\n scale_addr = self.Scalar(\"int64\") # pylint: disable=E1101\n set_tensor_addr_to_scalar(self, scale_addr, src_onthefly)\n scale_addr.set_as(scale_addr << DEQSCALE_SHIFT_POS)\n with self.new_scope():\n self.scope_attr(cce_params.CCE_AXIS, \"coproc_scope\",\n PIPE_V)\n self.emit(tvm.call_extern(\"int64\", \"set_deqscale\",\n scale_addr.get()))\n\n @source_info_decorator()\n @debug.v4dtrans_decorator\n def v4dtrans(self, chw2hwc, dst, src, # pylint: disable=R0913\n m_len, channels):\n \"\"\" transform data between chw and hwc\n\n Parameters\n ----------\n chw2hwc : bool, True - chw->hwc; False - hwc->chw\n dst : destination operator\n src : source operation\n m_len : H*W direction dimension\n channels: size of C\n\n Returns\n -------\n None\n \"\"\"\n # check tensor\n TikCheckUtil.check_type_match(src, Tensor,\n \"src's type should be tensor, input \"\n \"type: %s\" % type(src))\n TikCheckUtil.check_type_match(dst, Tensor,\n \"dst's type should be tensor, input \"\n \"type: %s\" % type(dst))\n # check scope\n TikCheckUtil.check_equality(src.scope, scope_ubuf,\n \"src's scope must be UB, \"\n \"input scope is: %s\" % src.scope)\n TikCheckUtil.check_equality(dst.scope, scope_ubuf,\n \"dst's scope must be UB, \"\n \"input scope is: %s\" % dst.scope)\n # check dtype\n TikCheckUtil.check_equality(dst.dtype, src.dtype,\n \"Intrinsic {}'s src's dtype should \"\n \"be equal to dst's dtype\".\n format(\"v4dtrans\"))\n TikCheckUtil.check_equality(intrinsic_check_support(\"Intrinsic_\" +\n \"v4dtrans\",\n dst.dtype), True,\n INSTR_DTYPE_SUPPORT_STATEMENT.\n format(dst.dtype, \"v4dtrans\"))\n # check mode\n TikCheckUtil.check_type_match(chw2hwc, bool, \"chw2hwc should be bool.\")\n if chw2hwc:\n dir_v4dtrans = 0\n else:\n dir_v4dtrans = 1\n # check m_len\n if isinstance(m_len, (int, float)):\n TikCheckUtil.check_in_range(\n m_len, range(MIN_M_LEN, MAX_M_LEN),\n \"m_len should be in the range of [1,4095], \"\n \"input m_len: %s\" % str(m_len))\n image_size = m_len*get_bit_len(src.dtype) // ONE_BYTE_BIT_LEN\n TikCheckUtil.check_equality(\n image_size % ONE_BLK_SIZE, 0,\n \"H*W*dtype_size should be 32 Byte aligned, \"\n \"input size is %s\" % str(image_size))\n # check channels\n if isinstance(channels, (int, float)):\n TikCheckUtil.check_in_range(\n channels, range(MIN_CHANNELS, MAX_CHANNELS),\n \"channels should be in the range of [1,4095], \"\n \"input channels: %s\" % str(channels))\n # change dtype_str\n dtype_str = change_dtype_str(dst)\n # code gen\n config = [m_len, channels, dir_v4dtrans]\n args = concat_params(config, V4DTRANS_OFFSET_LIST,\n V4DTRANS_SEGMENT_LIST)\n # check tensor overflow, only for immediate\n if all(Expr(value).eval_value() is not None\n for value in (m_len, channels, src.offset, dst.offset)):\n # check address overlap\n check_addr_overlap_v4dtrans(dst, src, m_len, channels,\n Expr(dst.offset).eval_value(),\n Expr(src.offset).eval_value())\n\n TikCheckUtil.check_le(\n m_len*channels, reduce_mul(src.indice.origin_shape) -\n Expr(src.offset).eval_value(),\n \"src tensor overflow, m_len*channels is too big\")\n TikCheckUtil.check_le(\n m_len*channels, reduce_mul(dst.indice.origin_shape) -\n Expr(dst.offset).eval_value(),\n \"dst tensor overflow, m_len*channels is too big\")\n # cal extent\n src_extent = Expr(m_len*channels*DTYPE_SIZE[src.dtype]).get()\n dst_extent = Expr(m_len*channels*DTYPE_SIZE[dst.dtype]).get()\n # issue instruction\n with self.new_scope():\n instr = tvm.call_extern(\n dst.dtype, \"v4dtrans\",\n dst.reinterpret_cast_to(dtype_str).access_ptr(\n \"w\", extent=dst_extent),\n src.reinterpret_cast_to(dtype_str).access_ptr(\n \"r\", extent=src_extent), args)\n self.scope_attr(cce_params.CCE_AXIS, \"coproc_scope\", PIPE_V)\n self.emit(instr)\n\n @source_info_decorator()\n @debug.vpadding_decorator\n def vpadding(self, mask, pad_mode, # pylint: disable=R0913, R0914\n pad_side, dst, src, repeat_times, dst_blk_stride,\n src_blk_stride, dst_rep_stride,\n src_rep_stride, stride_unit=0, mask_mode=\"normal\"):\n \"\"\" padding src tensor\n\n Parameters\n ----------\n mask:\n pad_mode: 0 -> nearest-padding(aaa|a)\n 1 -> symmetric_padding0(abc|cba)\n 2 -> symmetric_padding1(ab|cba)\n pad_side: 'left'/'right'.\n dst: dst operator\n src: src operator\n repeat_times: Repeated iterations times\n dst_blk_stride: offset of dst operator between different block\n in one iteration\n src_blk_stride: offset of src operator between different block\n in one iteration\n dst_rep_stride: offset of dst operator in the same block\n between adjacent iterations\n src_rep_stride: offset of src operator in the same block\n between adjacent iterations\n stride_unit: address and offset unit both affect it. default = 0\n\n Returns\n -------\n None\n \"\"\"\n # check pad_mode\n TikCheckUtil.check_type_match(\n pad_mode, int,\n \"pad_mode should be int, input pad_mode: {}\".format(type(pad_mode)))\n check_integer_in_range(\n pad_mode, range(MAX_PAD_MODE),\n \"pad_mode should be in the range of [0, 2], input pad_mode: {}\"\n .format(pad_mode))\n # check pad_side\n TikCheckUtil.check_in_range(pad_side, (\"left\", \"right\"),\n \"pad_side should be 'left' or 'right', \"\n \"input pad_side: {}\".format(pad_side))\n if pad_side == \"left\":\n pad_side_t = 0\n else:\n pad_side_t = 1\n # check mask_mode\n TikCheckUtil.check_in_range(\n mask_mode, (\"normal\", \"counter\"),\n \"mask_mode should be 'normal' or 'counter'.\")\n # check repeat\n check_repeat_times(repeat_times)\n # check strides\n check_vector_stride(\n [dst_blk_stride, src_blk_stride], [dst_rep_stride, src_rep_stride],\n MAX_BLK_STRIDE_DOUBLE_BYTE, MAX_REP_STRIDE_SINGLE_BYTE,\n [\"dst\", \"src\"])\n # check stride_unit\n TikCheckUtil.check_type_match(stride_unit, int,\n \"stride_unit shoule be int, input stride_\"\n \"unit: {}\".format(type(stride_unit)))\n check_integer_in_range(\n stride_unit, range(MAX_STRIDE_UNIT),\n \"stride_unit should be in the range of [0, 3], \"\n \"input stride_unit: {}\".format(stride_unit))\n # check tensor dtype\n TikCheckUtil.check_type_match(src, Tensor, \"src should be Tensor.\")\n TikCheckUtil.check_type_match(dst, Tensor, \"dst should be Tensor.\")\n TikCheckUtil.check_equality(dst.dtype, src.dtype,\n \"Intrinsic {}'s src's dtype should be \"\n \"equal to dst's dtype\".format(\"vpadding\"))\n TikCheckUtil.check_equality(intrinsic_check_support(\"Intrinsic_\" +\n \"vpadding\",\n dst.dtype), True,\n INSTR_DTYPE_SUPPORT_STATEMENT.\n format(dst.dtype, \"vpadding\"))\n # check tensor scope\n TikCheckUtil.check_equality(src.scope, scope_ubuf,\n \"src's scope must be UB\")\n TikCheckUtil.check_equality(dst.scope, scope_ubuf,\n \"dst's scope must be UB\")\n # mask\n mask_o = mask_concat(self, mask, mask_mode,\n tensor_bit_len=max(get_bit_len(dst.dtype),\n get_bit_len(src.dtype)))\n # check tensor overflow(static)\n if mask_mode == \"normal\":\n # all elements in src are read even their mask bits are invalid\n if get_bit_len(src.dtype) == 32:\n mask_len = MASK_VALUE_64\n else:\n mask_len = MASK_VALUE_128\n else:\n mask_len = mask\n\n # check address overlapping\n if src.buffer == dst.buffer:\n if all(isinstance(value, int) for \\\n value in (repeat_times, dst_blk_stride, src_blk_stride,\n dst_rep_stride, src_rep_stride)):\n check_address_overlapping(\n \"vpadding\", mask,\n dst, src, BLK_NUM_PER_REP,\n ONE_REP_BYTE_SIZE // max(\n get_bit_len(dst.dtype), get_bit_len(src.dtype)),\n ONE_REP_BYTE_SIZE // get_bit_len(dst.dtype),\n ONE_REP_BYTE_SIZE // get_bit_len(src.dtype),\n repeat_times, dst_blk_stride, src_blk_stride,\n dst_rep_stride, src_rep_stride,\n Expr(dst.offset).eval_value(),\n Expr(src.offset).eval_value(),\n stride_unit, mask_mode, src_mask=mask_len)\n\n vector_tensor_overflow_check(src, mask_len, BLK_NUM_PER_REP,\n ONE_REP_BYTE_SIZE // get_bit_len(\n src.dtype),\n repeat_times, src_blk_stride,\n src_rep_stride, \"src tensor overflow\",\n stride_unit, mask_mode)\n vector_tensor_overflow_check(dst, mask, BLK_NUM_PER_REP,\n ONE_REP_BYTE_SIZE // get_bit_len(\n dst.dtype),\n repeat_times, dst_blk_stride,\n dst_rep_stride, \"dst tensor overflow\",\n stride_unit, mask_mode)\n # change dtype_str\n dtype_str = change_dtype_str(dst)\n # cal extent\n src_extent = cal_extent_stride_unit_mask(\n mask, repeat_times, src, stride_unit, src_blk_stride,\n src_rep_stride, mask_mode)\n dst_extent = cal_extent_stride_unit_mask(\n mask, repeat_times, dst, stride_unit, src_blk_stride,\n src_rep_stride, mask_mode)\n # code gen\n config = [dst_blk_stride, src_blk_stride, dst_rep_stride,\n src_rep_stride, pad_mode, pad_side_t, stride_unit,\n repeat_times]\n args = concat_params(config, VPADDING_OFFSET_LIST,\n VPADDING_SEGMENT_LIST)\n with self.new_scope():\n if mask_mode == \"counter\":\n # save orig_ctrl\n orig_ctrl = set_ctrl_counter_mask(self)\n\n self.emit(tvm.call_extern(\"int64\", \"set_vector_mask\", *mask_o))\n instr = tvm.call_extern(\n dst.dtype, \"vpadding\",\n dst.reinterpret_cast_to(dtype_str).access_ptr(\n \"w\", extent=dst_extent),\n src.reinterpret_cast_to(dtype_str).access_ptr(\n \"r\", extent=src_extent), args)\n\n self.scope_attr(cce_params.CCE_AXIS, \"coproc_scope\", PIPE_V)\n self.emit(instr)\n\n # reset CTRL SPR as orig_ctrl\n if mask_mode == \"counter\":\n reset_ctrl_counter_mask(self, orig_ctrl)\n\n @source_info_decorator()\n @debug.vscatter_decorator\n def vscatter(self, mask, dst, src, dst_offset, # pylint: disable=R0913\n repeat_times,\n src_rep_stride, base_addr=0,\n stride_unit=0, mask_mode=\"normal\"):\n \"\"\"\n Scatter elements from src to dst according to offsets in dst_offset,\n programmer should ensure all elements in dst space.\n\n Parameters\n ----------\n mask:\n dst: dst operator\n src: src operator\n dst_offset: addr offset tensor\n repeat_times: Repeated iterations times\n src_rep_stride: offset of src operator in the same block\n between adjacent iterations\n stride_unit: address and offset unit both affect it. default = 0\n base_addr: init offset for dst, default = 0\n mask_mode: mode of mask, counter or normal. default = normal\n\n Returns\n -------\n None\n \"\"\"\n # check tensor dtype\n TikCheckUtil.check_type_match(src, Tensor,\n \"src should be Tensor.\"\n \" input type: {}\".format(type(src)))\n TikCheckUtil.check_type_match(dst, Tensor,\n \"dst should be Tensor.\"\n \" input type: {}\".format(type(dst)))\n TikCheckUtil.check_type_match(dst_offset, Tensor,\n \"dst_offset should be Tensor. \"\n \"input type: {}\".format(type(dst_offset)))\n TikCheckUtil.check_equality(dst.dtype, src.dtype,\n \"Intrinsic {}'s src's dtype should be \"\n \"equal to dst's dtype\".format(\"vscatter\"))\n TikCheckUtil.check_equality(intrinsic_check_support(\"Intrinsic_\" +\n \"vscatter\",\n dst.dtype), True,\n INSTR_DTYPE_SUPPORT_STATEMENT.\n format(dst.dtype, \"vscatter\"))\n # check offset tensor dtype\n TikCheckUtil.check_equality(dst_offset.dtype, \"int32\",\n \"dtype of dst_offset should be 'int32', \"\n \"but input dtype is \"\n \"'{}'\".format(dst_offset.dtype))\n # check tensor scope\n _check_vscatter_vgather_operator_scope(src, dst, dst_offset,\n \"dst_offset\")\n\n # check mask_mode\n TikCheckUtil.check_in_range(\n mask_mode, (\"normal\", \"counter\"),\n \"mask_mode should be a str of 'normal' or 'counter'.\"\n \" input mask_mode: {}\".format(mask_mode))\n # mask\n mask_o = mask_concat(self, mask, mask_mode,\n tensor_bit_len=max(get_bit_len(src.dtype),\n get_bit_len(dst.dtype)))\n # check repeat\n check_repeat_times(repeat_times)\n # check strides\n check_vector_stride(\n None, [src_rep_stride],\n None, MAX_REP_STRIDE_SINGLE_BYTE,\n [\"src\"])\n # check base_addr\n self._vscatter_check_base_addr(base_addr, dst)\n # check stride_unit\n TikCheckUtil.check_type_match(\n stride_unit, int,\n \"stride_unit shoule be int, input stride_unit:\"\n \" {}\".format(type(stride_unit)))\n check_integer_in_range(\n stride_unit, range(MAX_STRIDE_UNIT),\n \"stride_unit should be in the range of [0, 3], \"\n \"input stride_unit: {}\".format(stride_unit))\n\n # check tensor overflow(static)\n self._vscatter_check_overflow(mask, src, dst_offset, repeat_times,\n src_rep_stride, stride_unit, mask_mode)\n # code gen\n if isinstance(base_addr, Scalar):\n with self.context.freeze(): # pylint: disable=E1101\n with self.new_scope():\n self.scope_attr(cce_params.CCE_AXIS, \"coproc_scope\", PIPE_S)\n base_addr.set_as(base_addr +\n tvm.expr.Cast(\"int64\",\n tvm.call_extern(\n \"handle\", \"\",\n dst.access_ptr(\"rw\"))))\n self.total_ir_lines += TWO_IR\n else:\n base_addr = base_addr + tvm.expr.Cast(\n \"int64\", tvm.call_extern(\"handle\", \"\", dst.access_ptr(\"rw\")))\n self._vscatter_code_gen(dst, src, dst_offset, mask_o,\n repeat_times, src_rep_stride,\n base_addr, stride_unit, mask_mode)\n\n def _vscatter_code_gen(self, dst, src, # pylint: disable=R0913, R0914\n dst_offset, mask_o, repeat_times, src_rep_stride,\n base_addr, stride_unit, mask_mode):\n \"\"\"vscatter code gen part\"\"\"\n default_stride = 0\n config = [base_addr, default_stride,\n src_rep_stride, stride_unit, repeat_times]\n args = concat_params(config,\n VSCATTER_VGATHER_XT_OFFSET_LIST,\n VSCATTER_VGATHER_XT_SEGMENT_LIST)\n if get_bit_len(src.dtype) == 16:\n dtype_str = \"uint16\"\n else:\n dtype_str = \"uint32\"\n with self.new_scope():\n if mask_mode == \"counter\":\n # save orig_ctrl\n orig_ctrl = set_ctrl_counter_mask(self)\n\n self.emit(tvm.call_extern(\"int64\", \"set_vector_mask\", *mask_o))\n\n instr = tvm.call_extern(\n dst_offset.dtype, \"vscatter\",\n dst_offset.reinterpret_cast_to('uint32').access_ptr(\"r\"),\n src.reinterpret_cast_to(dtype_str).access_ptr(\"r\"), args)\n self.scope_attr(cce_params.CCE_AXIS, \"coproc_scope\", PIPE_V)\n self.scope_attr(cce_params.CCE_AXIS, \"append_mem\",\n tvm.call_extern(\"handle\", \"mem_vector\",\n dst.access_ptr(\"rw\")))\n self.emit(instr, THREE_IR)\n\n # reset CTRL SPR as orig_ctrl\n if mask_mode == \"counter\":\n reset_ctrl_counter_mask(self, orig_ctrl)\n\n @staticmethod\n def _vscatter_check_base_addr(base_addr, dst):\n \"\"\"check vscatter base_addr param\"\"\"\n TikCheckUtil.check_type_match(\n base_addr, (int, Scalar),\n \"base_addr's type should be int or Scalar,\"\n \" input type is {}\".format(type(base_addr)))\n if isinstance(base_addr, int):\n # 2**31 - 1 is max base_addr, 31 bit len\n TikCheckUtil.check_in_range(\n base_addr, range(2 ** 31),\n \"base_addr should be in range of [0, 2**31 - 1].\"\n \" input base_addr: {}\".format(base_addr))\n\n # check valid\n dst_scope_size = reduce_mul(dst.indice.origin_shape) * \\\n get_bit_len(dst.dtype) // ONE_BYTE_BIT_LEN\n TikCheckUtil.check_le(base_addr, dst_scope_size,\n \"base_addr should be less equal than\"\n \" dst tensor's buffer size: {},\"\n \" input base_add: {}\".format(\n dst_scope_size, base_addr))\n elif isinstance(base_addr, Scalar):\n TikCheckUtil.check_equality(base_addr.dtype, \"uint32\",\n \"Scalar base_addr \"\n \"should be dtype of uint32\")\n\n @staticmethod\n def _vscatter_check_overflow(mask, src, dst_offset, # pylint: disable=R0913\n repeat_times, src_rep_stride,\n stride_unit, mask_mode):\n \"\"\"vscatter instr check overflow\"\"\"\n if mask_mode == \"normal\":\n # all elements in src are read even their mask bits are invalid\n if get_bit_len(src.dtype) == 32:\n mask_len = MASK_VALUE_64\n else:\n mask_len = MASK_VALUE_128\n else:\n mask_len = mask\n\n default_blk_stride = 1\n default_rep_stride = 8\n # check dst_offset imm read mem.\n # change offset dtype temporarily, for check overflow\n dst_offset.dtype = src.dtype\n vector_tensor_overflow_check(dst_offset, mask_len,\n BLK_NUM_PER_REP,\n ONE_REP_BYTE_SIZE // get_bit_len(\n src.dtype),\n repeat_times, default_blk_stride,\n default_rep_stride,\n \"dst_offset tensor overflow\",\n mask_mode=mask_mode)\n # reset dst_offset dtype: int32\n dst_offset.dtype = \"int32\"\n\n if stride_unit in (2, 3):\n # stide_unit:2,3 for gap, unit is element\n default_blk_stride = 0\n vector_tensor_overflow_check(src, mask_len, BLK_NUM_PER_REP,\n ONE_REP_BYTE_SIZE // get_bit_len(\n src.dtype),\n repeat_times, default_blk_stride,\n src_rep_stride, \"src tensor overflow\",\n stride_unit, mask_mode)\n\n @source_info_decorator()\n @debug.vgather_decorator\n def vgather(self, mask, dst, src, src_offset, # pylint: disable=R0913\n repeat_times, dst_rep_stride,\n base_addr=0, stride_unit=0, mask_mode=\"normal\"):\n \"\"\"\n Gather elements from src to dst according to address\n in src_offset, programmer should ensure all elements in src.\n\n Parameters\n ----------\n mask:\n dst: dst operator\n src: src operator\n src_offset: addr offset tensor\n repeat_times: Repeated iterations times\n dst_rep_stride: offset of dst operator in the same block\n between adjacent iterations\n stride_unit: address and offset unit both affect it. default = 0\n base_addr: init offset for dst, default = 0\n mask_mode: mode of mask, counter or normal. default = normal\n\n Returns\n -------\n None\n \"\"\"\n # check tensor dtype\n TikCheckUtil.check_type_match(src, Tensor,\n \"src should be Tensor.\"\n \" input type: {}\".format(type(src)))\n TikCheckUtil.check_type_match(dst, Tensor,\n \"dst should be Tensor.\"\n \" input type: {}\".format(type(dst)))\n TikCheckUtil.check_type_match(src_offset, Tensor,\n \"src_offset should be Tensor. input \"\n \"type: {}\".format(type(src_offset)))\n TikCheckUtil.check_equality(dst.dtype, src.dtype,\n \"Intrinsic {}'s src's dtype should\"\n \" be equal to dst's dtype\".\n format(\"vgather\"))\n TikCheckUtil.check_equality(intrinsic_check_support(\"Intrinsic_\"\n + \"vgather\",\n dst.dtype), True,\n INSTR_DTYPE_SUPPORT_STATEMENT.\n format(dst.dtype, \"vgather\"))\n # check offset tensor dtype\n TikCheckUtil.check_equality(\n src_offset.dtype, \"int32\",\n \"dtype of src_offset should be 'int32', but input dtype is \"\n \"'{}'\".format(src_offset.dtype))\n # check tensor scope\n _check_vscatter_vgather_operator_scope(src, dst, src_offset,\n \"src_offset\")\n\n # check mask_mode\n TikCheckUtil.check_in_range(\n mask_mode, (\"normal\", \"counter\"),\n \"mask_mode should be a str of 'normal' or 'counter'.\"\n \" input mask_mode: {}\".format(mask_mode))\n # mask\n mask_o = mask_concat(self, mask, mask_mode,\n tensor_bit_len=max(get_bit_len(src.dtype),\n get_bit_len(dst.dtype)))\n # check repeat\n check_repeat_times(repeat_times)\n # check strides\n check_vector_stride(\n None, [dst_rep_stride],\n None, MAX_REP_STRIDE_SINGLE_BYTE,\n [\"dst\"])\n # check base_addr\n self._vgather_check_base_addr(base_addr, src)\n # check stride_unit\n TikCheckUtil.check_type_match(\n stride_unit, int,\n \"stride_unit shoule be int, input stride_unit: {}\".format(\n type(stride_unit)))\n check_integer_in_range(\n stride_unit, range(MAX_STRIDE_UNIT),\n \"stride_unit should be in the range of [0, 3], \"\n \"input stride_unit: {}\".format(stride_unit))\n # check tensor overflow(static)\n self._vgather_check_overflow(mask, dst, src, src_offset, repeat_times,\n dst_rep_stride, stride_unit, mask_mode)\n # code gen\n if isinstance(base_addr, Scalar):\n with self.context.freeze(): # pylint: disable=E1101\n with self.new_scope():\n self.scope_attr(cce_params.CCE_AXIS, \"coproc_scope\", PIPE_S)\n base_addr.set_as(base_addr +\n tvm.expr.Cast(\"int64\", tvm.call_extern(\n \"handle\", \"\", src.access_ptr(\"r\"))))\n self.total_ir_lines += TWO_IR\n else:\n base_addr = base_addr + tvm.expr.Cast(\"int64\",\n tvm.call_extern(\n \"handle\", \"\",\n src.access_ptr(\"r\")))\n\n self._vgather_code_gen(dst, src, src_offset,\n mask_o, repeat_times, dst_rep_stride,\n base_addr, stride_unit, mask_mode)\n\n def _vgather_code_gen(self, dst, src, # pylint: disable=R0913, R0914\n src_offset, mask_o, repeat_times, dst_rep_stride,\n base_addr, stride_unit, mask_mode):\n \"\"\"vgather code gen part\"\"\"\n default_stride = 0\n config = [base_addr, dst_rep_stride,\n default_stride, stride_unit, repeat_times]\n args = concat_params(config,\n VSCATTER_VGATHER_XT_OFFSET_LIST,\n VSCATTER_VGATHER_XT_SEGMENT_LIST)\n if get_bit_len(src.dtype) == 16:\n dtype_str = \"uint16\"\n else:\n dtype_str = \"uint32\"\n with self.new_scope():\n if mask_mode == \"counter\":\n # save orig_ctrl\n orig_ctrl = set_ctrl_counter_mask(self)\n\n self.emit(tvm.call_extern(\"int64\", \"set_vector_mask\", *mask_o))\n instr = tvm.call_extern(\n src_offset.dtype, \"vgather\",\n dst.reinterpret_cast_to(dtype_str).access_ptr(\"w\"),\n src_offset.reinterpret_cast_to(\"uint32\").access_ptr(\"r\"), args)\n self.scope_attr(cce_params.CCE_AXIS, \"coproc_scope\", PIPE_V)\n self.scope_attr(cce_params.CCE_AXIS, \"append_mem\",\n tvm.call_extern(\"handle\",\n \"mem_vector\", src.access_ptr(\"r\")))\n self.emit(instr, THREE_IR)\n\n # reset CTRL SPR as orig_ctrl\n if mask_mode == \"counter\":\n reset_ctrl_counter_mask(self, orig_ctrl)\n\n @staticmethod\n def _vgather_check_base_addr(base_addr, src):\n \"\"\"check vgather base_addr param\"\"\"\n TikCheckUtil.check_type_match(\n base_addr, (int, Scalar),\n \"base_addr's type should be int or Scalar,\"\n \" input type is {}\".format(type(base_addr)))\n if isinstance(base_addr, int):\n # 2**31 - 1 is max base_addr, for 31 bit len.\n TikCheckUtil.check_in_range(\n base_addr, range(2**31),\n \"base_addr should in range [0, 2**31-1].\")\n\n # check valid\n src_scope_size = reduce_mul(src.indice.origin_shape) * \\\n get_bit_len(src.dtype) // ONE_BYTE_BIT_LEN\n TikCheckUtil.check_le(\n base_addr, src_scope_size,\n \"base_addr should be less equal than src tensor's buffer \"\n \"size: {}, input base_add: {}\".format(\n src_scope_size, base_addr))\n elif isinstance(base_addr, Scalar):\n TikCheckUtil.check_equality(\n base_addr.dtype, \"uint32\",\n \"Scalar base_addr should be type of uint32\")\n\n @staticmethod\n def _vgather_check_overflow(mask, dst, src, # pylint: disable=R0913\n src_offset, repeat_times, dst_rep_stride,\n stride_unit, mask_mode):\n \"\"\"vgather instr check tensor overflow\"\"\"\n if mask_mode == \"normal\":\n # all elements in src are read even their mask bits are invalid\n if get_bit_len(src.dtype) == 32:\n # b32: 64\n mask_len = MASK_VALUE_64\n else:\n mask_len = MASK_VALUE_128\n else:\n mask_len = mask\n\n # default stride_unit: 0, 1 for stride, unit is 32B\n default_blk_stride = 1\n default_rep_stride = 8\n # check src_offset imm read mem.\n # change offset dtype temporarily, for check overflow\n src_offset.dtype = src.dtype\n vector_tensor_overflow_check(src_offset, mask_len, BLK_NUM_PER_REP,\n ONE_REP_BYTE_SIZE // get_bit_len(\n src.dtype),\n repeat_times, default_blk_stride,\n default_rep_stride,\n \"src_offset tensor overflow\",\n mask_mode=mask_mode)\n # reset src_offset dtype: int32\n src_offset.dtype = \"int32\"\n\n # stride_unit: 2, 3 is for gap, unit is element.\n if stride_unit in (2, 3):\n default_blk_stride = 0\n\n # check dst imm write mem.\n vector_tensor_overflow_check(dst, mask, BLK_NUM_PER_REP,\n ONE_REP_BYTE_SIZE // get_bit_len(\n dst.dtype),\n repeat_times, default_blk_stride,\n dst_rep_stride, \"dst tensor overflow\",\n stride_unit, mask_mode)\n\n @source_info_decorator()\n @debug.load3dv2_decorator\n def load3dv2(self, dst, src, pad_list, # pylint: disable=R0913, R0914\n l1_h, l1_w, channel_size, k_extension, m_extension, k_start_pt,\n m_start_pt, stride_w, stride_h, filter_w, filter_h,\n dilation_filter_w, dilation_filter_h,\n en_transpose=False, en_small_k=False, pad_value=None):\n \"\"\"image to colomn, only support v200, only support L1 to L0A/L0B/UB\n\n Parameters\n ----------\n dst: destination operator\n src: source operator\n pad_list: [left, right, top, bottom]\n l1_h: height of src tensor\n l1_w: width of src tensor\n channel_size: number of src tensor's channels\n k_extension: k direction extension steps from the start position\n m_extension: m direction extension steps from the start position\n k_start_pt: k direction start position of the feature matrix\n m_start_pt: m direction start position of the feature matrix\n stride_w: filter stride size in w dimension\n stride_h: filter stride size in h dimension\n filter_w: width of filter\n filter_h: height of filter\n dilation_filter_w: dilation size of filter in w dimension\n dilation_filter_h: dilation size of filter in h dimension\n en_transpose: enable transpose, default = None\n en_small_k: enable small_k, default = None\n pad_value: value for padding, default = None\n\n Returns\n -------\n None\n \"\"\"\n scope_map = {scope_ca: 'ca', scope_cb: 'cb', scope_ubuf: 'ub',\n scope_cbuf: 'cbuf'}\n # check dst type\n TikCheckUtil.check_type_match(dst, Tensor,\n \"dst should be tensor, input type\"\n \" of dst: {}\".format(type(dst)))\n # check src type\n TikCheckUtil.check_type_match(src, Tensor,\n \"src should be tensor, input type\"\n \" of src: {}\".format(type(src)))\n # check tensor scope\n TikCheckUtil.check_in_range(\n scope_map[dst.scope], ('ca', 'cb', 'ub'),\n \"dst_scope should be l0a, l0b or ub for load3dv2, \"\n \"input dst_scope: {}\".format(dst.scope))\n TikCheckUtil.check_in_range(scope_map[src.scope], ('cbuf'),\n \"src_scope should be l1 for load3dv2, \"\n \"input src_scope: {}\".format(src.scope))\n # check tensor dtype\n dtype_str = DTYPE_MAP[dst.dtype] + DTYPE_MAP[src.dtype]\n TikCheckUtil.check_equality(dst.dtype, src.dtype,\n \"Intrinsic {}'s src's dtype should be \"\n \"equal to dst's dtype\".format(\"load3dv2\"))\n TikCheckUtil.check_equality(api_check_support(\"tik.\" + \"load3dv2\",\n dst.dtype), True,\n INSTR_DTYPE_SUPPORT_STATEMENT.\n format(dst.dtype, \"load3dv2\"))\n # check\n _load3dv1_load3dv2_col2img_check(pad_list, l1_w, l1_h, stride_w,\n stride_h, filter_w, filter_h,\n dilation_filter_w, dilation_filter_h)\n _load3dv2_check(k_extension, m_extension, m_start_pt, pad_value,\n channel_size)\n # check channel_size\n check_load3dv2_channel_size(channel_size, src.dtype)\n # check start-point\n TikCheckUtil.check_type_match(\n k_start_pt, (int, Scalar, Expr),\n \"k_start_pt should be int, Scalar, Expr, input type of \"\n \"k_start_pt: {}\".format(type(k_start_pt)))\n check_scalar_dtype(k_start_pt,\n \"scalar_k_start_pt should be a scalar of int/uint\")\n check_integer_in_range(\n k_start_pt, range(MAX_START_PT),\n \"k_start_pt should be in the range of [0, 65535], \"\n \"input k_start_pt: {}\".format(k_start_pt))\n if isinstance(k_start_pt, int):\n k_start_pt_byte_align = 32\n if k_start_pt*DTYPE_SIZE[src.dtype] % k_start_pt_byte_align != 0:\n TikCheckUtil.raise_error(\n \"k_start_pt in Byte should be multiple of 32B, input \"\n \"k_start_pt: {}, input src dtype: {}\"\n .format(k_start_pt, src.dtype))\n # check en_small_k en_and transpose\n sk_tp_bit = self._check_sk_tp(en_small_k, en_transpose)\n # check dilation filter size and l1_h_w size\n check_dilation_filter_size(\n filter_w, dilation_filter_w, l1_w, pad_list[PADDING_LEFT_IDX],\n pad_list[PADDING_RIGHT_IDX], \"W\")\n check_dilation_filter_size(\n filter_h, dilation_filter_h, l1_h, pad_list[PADDING_TOP_IDX],\n pad_list[PADDING_BOT_IDX], \"H\")\n\n # check m_extension and k_extension\n check_load3dv2_m_extension(\n filter_w, filter_h, dilation_filter_w, dilation_filter_h, pad_list,\n m_extension, l1_w, l1_h, stride_w, stride_h, m_start_pt)\n check_load3dv2_k_extension(channel_size, k_extension, filter_h,\n filter_w, k_start_pt, src.dtype)\n\n # check dst tensor overflow\n _check_dst_overflow_load3dv2(k_start_pt, m_start_pt, k_extension,\n m_extension, dst)\n # FMATRIX\n orig_params = []\n params = [l1_w, l1_h, pad_list[PADDING_LEFT_IDX],\n pad_list[PADDING_RIGHT_IDX], pad_list[PADDING_TOP_IDX],\n pad_list[PADDING_BOT_IDX]]\n orig_params += params[:]\n reg_fmatrix = concat_params(params, FMATRIX_OFFSET_LIST,\n FMATRIX_SEGMENT_LIST)\n self._do_load3d_fmatrix(reg_fmatrix)\n # padding\n do_load3d_padding(self, src, pad_value)\n # cal extent\n dst_extent = Expr((m_extension + 1)*(k_extension + 1)*\n DTYPE_SIZE[dst.dtype]).get()\n # code gen\n params = [k_extension, m_extension, k_start_pt, m_start_pt]\n reg_xm = concat_params(params, LOAD3DV2_REG_XM_OFFSET_LIST,\n LOAD3DV2_REG_XM_SEGMENT_LIST)\n orig_params += params[:]\n\n params = [stride_w, stride_h, filter_w, filter_h,\n dilation_filter_w, dilation_filter_h,\n sk_tp_bit, channel_size]\n reg_xt = concat_params(params, LOAD3DV2_REG_XT_OFFSET_LIST,\n LOAD3DV2_REG_XT_SEGMENT_LIST)\n orig_params += params[:]\n\n if dtype_str in (\"s4s4\", \"u4u4\"):\n dtype_str = \"int4\"\n else:\n dtype_str = dst.dtype\n with self.new_scope():\n self.scope_attr(cce_params.CCE_AXIS, \"coproc_scope\", PIPE_MTE1)\n instr = tvm.call_extern(\n dst.dtype, \"img2colv2_cbuf_to_\" + scope_map[dst.scope],\n dst.reinterpret_cast_to(dtype_str).access_ptr(\n \"w\", extent=dst_extent),\n src.reinterpret_cast_to(dtype_str).access_ptr(\"r\"), reg_xm,\n reg_xt)\n self.emit(instr)\n\n def _check_sk_tp(self, en_small_k, en_transpose):\n arch_version_str = get_soc_name() + get_soc_core_type()\n TikCheckUtil.check_type_match(\n en_small_k, bool, \"en_small_k should be bool, input type of \"\n \"en_small_k: {}\".format(type(en_small_k)))\n if en_small_k:\n TikCheckUtil.check_in_range(\n arch_version_str, LOAD3DV2_FUNC_MAP[\"sk\"],\n \"{} doesn't support small_k\"\n .format(arch_version_str))\n sk_tp_bit = 1\n else:\n sk_tp_bit = 0\n # check transpose\n TikCheckUtil.check_type_match(\n en_transpose, bool, \"en_transpose should be bool, input type of \"\n \"en_transpose: {}\".format(type(en_transpose)))\n if en_transpose:\n TikCheckUtil.check_in_range(\n arch_version_str, (HI3796CV300CSAIC, AIC),\n \"{} doesn't support transpose\"\n .format(arch_version_str))\n sk_tp_bit = 1\n else:\n sk_tp_bit = 0\n\n return sk_tp_bit\n\n @source_info_decorator()\n @debug.load_smask_decorator\n def load_smask(self, dst, src, load_size, sid=0):\n \"\"\"load src to smask\n\n Parameters\n ----------\n dst: destination operator\n src: source operator\n load_size: load size, unit: 2B\n sid: SID for OUTSMMU\n\n Returns\n -------\n None\n \"\"\"\n instr_map = {\n scope_ubuf: [\"load_smask_table_from_ub\", PIPE_MTE3],\n scope_gm: [\"load_smask_table_from_gm\", PIPE_MTE2]\n }\n TikCheckUtil.check_type_match(\n dst, Tensor, \"dst should be Tensor, input type: {}\"\n .format(type(dst)))\n TikCheckUtil.check_equality(\n dst.scope, scope_smask,\n \"dst scope should be SMASK, input scope: {}\".format(dst.scope))\n TikCheckUtil.check_type_match(\n src, Tensor,\n \"dst should be Tensor, input type: {}\".format(type(src)))\n TikCheckUtil.check_in_range(\n src.scope, (scope_ubuf, scope_gm),\n \"src scope should be gm or ub, input scope: {}\".format(src.scope))\n TikCheckUtil.check_type_match(\n load_size, int,\n \"load_size should be int, input type: {}\".format(type(load_size)))\n check_integer_in_range(\n load_size, range(VALUE_128),\n \"load_size should be in the range of [0, 127], input load_size: {}\"\n .format(load_size))\n instr_name, pipe_line = instr_map[src.scope]\n len_1 = 0\n len_7 = load_size & VALUE_127\n params = [len_1, len_7, sid]\n args = [concat_params(params, LOAD_SMASK_OFFSET_LIST,\n LOAD_SMASK_SEGMENT_LIST)]\n # load_size, unit:2B\n smask_extent = load_size*2\n with self.new_scope():\n instr = tvm.call_extern(\"uint16\", instr_name,\n dst.access_ptr(\"w\", extent=smask_extent),\n src.access_ptr(\"r\", extent=smask_extent),\n *args)\n self.scope_attr(cce_params.CCE_AXIS, \"coproc_scope\", pipe_line)\n self.emit(instr, ONE_IR)\n\n @staticmethod\n def _get_channels(dst, arch_version, channel_pad_mode, input_format):\n \"\"\"get channels\"\"\"\n channels = 32 // DTYPE_SIZE[dst.dtype]\n\n if (channel_pad_mode == 1) and arch_version == HI3796CV300ESAIC:\n channels = AIPP_INPUT_TYPE_SWAP_ALIGN.get(input_format).get(\n 'channels')\n\n if (channel_pad_mode == 2) and \\\n arch_version in (HI3796CV300ESAIC, AIC):\n channels = 4\n return channels\n\n def calculate_dst_extent_aipp(self, arch_version, # pylint: disable=R0913, R0914\n input_format, dst, crop_horizontal_size,\n crop_vertical_size, channel_pad_mode,\n top_pad_rows, botton_pad_rows, left_pad_cols,\n right_pad_cols, scf_horizontal_size,\n scf_vertical_size, post_botton_clip_number,\n post_top_clip_number, post_right_clip_number,\n post_left_clip_number, dst_stride_pixel,\n raw_image_channel, raw_enable):\n \"\"\"calculate dst extent\"\"\"\n # function's input params is too much, so disable them\n imm_input_format = Expr(input_format).eval_value()\n imm_channel_pad_mode = Expr(channel_pad_mode).eval_value()\n if any(value is None for value in [imm_input_format,\n imm_channel_pad_mode]):\n return dst.buffer_size\n\n channels = self._get_channels(dst, arch_version,\n imm_channel_pad_mode, imm_input_format)\n\n imm_top_pad_rows = Expr(top_pad_rows).eval_value()\n imm_botton_pad_rows = Expr(botton_pad_rows).eval_value()\n imm_left_pad_cols = Expr(left_pad_cols).eval_value()\n imm_right_pad_cols = Expr(right_pad_cols).eval_value()\n\n if any(value is None for value in [imm_top_pad_rows,\n imm_botton_pad_rows,\n imm_left_pad_cols,\n imm_right_pad_cols]):\n return dst.buffer_size\n\n if arch_version == HI3796CV300ESAIC:\n imm_scf_horizontal_size = Expr(scf_horizontal_size).eval_value()\n imm_scf_vertical_size = Expr(scf_vertical_size).eval_value()\n imm_post_right_clip_number = Expr(\n post_right_clip_number).eval_value()\n imm_post_left_clip_number = Expr(post_left_clip_number).eval_value()\n imm_post_botton_clip_number = Expr(\n post_botton_clip_number).eval_value()\n imm_post_top_clip_number = Expr(post_top_clip_number).eval_value()\n imm_dst_stride_pixel = Expr(dst_stride_pixel).eval_value()\n if any(value is None for value in [imm_scf_horizontal_size,\n imm_scf_vertical_size,\n imm_post_right_clip_number,\n imm_post_left_clip_number,\n imm_post_botton_clip_number,\n imm_post_top_clip_number,\n imm_dst_stride_pixel]):\n return dst.buffer_size\n if imm_dst_stride_pixel == 0:\n extent = (imm_scf_horizontal_size -\n imm_post_right_clip_number -\n imm_post_left_clip_number +\n imm_left_pad_cols + imm_right_pad_cols)*\\\n (imm_scf_vertical_size - imm_post_botton_clip_number -\n imm_post_top_clip_number + imm_top_pad_rows +\n imm_botton_pad_rows)*channels\n else:\n extent = imm_dst_stride_pixel*(imm_scf_vertical_size -\n imm_post_botton_clip_number -\n imm_post_top_clip_number +\n imm_top_pad_rows +\n imm_botton_pad_rows)*channels\n else:\n imm_crop_horizontal_size = Expr(crop_horizontal_size).eval_value()\n imm_crop_vertical_size = Expr(crop_vertical_size).eval_value()\n if any(value is None for value in [imm_crop_horizontal_size,\n imm_crop_vertical_size]):\n return dst.buffer_size\n extent = (imm_crop_horizontal_size +\n imm_left_pad_cols + imm_right_pad_cols)*(\n imm_crop_vertical_size + imm_top_pad_rows +\n imm_botton_pad_rows)*channels\n if arch_version == AIC and \\\n imm_input_format in [RAW16, RAW24] and \\\n raw_enable == 1:\n imm_raw_image_channel = Expr(raw_image_channel).eval_value()\n if imm_raw_image_channel is not None:\n if imm_raw_image_channel == 0:\n extent = extent // 4\n else:\n return dst.buffer_size\n\n extent = extent*DTYPE_SIZE[dst.dtype]\n return extent\n\n @staticmethod\n def _handel_crop_info(arch_version, input_format, # pylint: disable=R0913\n crop_enbale, src_horizontal_size, src_vertical_size,\n crop_info):\n # crop info\n if crop_enbale == AIPP_DISABLE:\n crop_horizontal_size = src_horizontal_size\n crop_vertical_size = src_vertical_size\n crop_horizontal_start = AIPP_INIT_VALUE\n crop_vertical_start = AIPP_INIT_VALUE\n single_line_mode = AIPP_INIT_VALUE\n else:\n check_dict_and_not_none(crop_info, 'crop_info')\n crop_horizontal_size = crop_info.get('dst_horizontal_size')\n crop_vertical_size = crop_info.get('dst_vertical_size')\n crop_horizontal_start = crop_info.get('crop_horizontal_start')\n crop_vertical_start = crop_info.get('crop_vertical_start')\n single_line_mode = crop_info.get('single_line_enable')\n\n _aipp_check_crop_info(input_format, single_line_mode,\n crop_horizontal_size, crop_vertical_size,\n crop_horizontal_start, crop_vertical_start)\n _aipp_check_crop_in_picture(src_horizontal_size, src_vertical_size,\n crop_horizontal_size,\n crop_vertical_size,\n crop_horizontal_start,\n crop_vertical_start)\n _aipp_check_crop_single_line_mode(arch_version, single_line_mode)\n _check_crop_vertical_size_by_single_line(arch_version,\n crop_vertical_size,\n single_line_mode)\n\n return crop_horizontal_size, crop_vertical_size, \\\n crop_horizontal_start, crop_vertical_start, single_line_mode\n\n @staticmethod\n def _handle_csc_info(arch_version, csc_enable, csc_info):\n if csc_enable == AIPP_DISABLE:\n csc_matrix, csc_out_bias, csc_in_bias = \\\n [[AIPP_INIT_VALUE, AIPP_INIT_VALUE, AIPP_INIT_VALUE],\n [AIPP_INIT_VALUE, AIPP_INIT_VALUE, AIPP_INIT_VALUE],\n [AIPP_INIT_VALUE, AIPP_INIT_VALUE, AIPP_INIT_VALUE]],\\\n [AIPP_INIT_VALUE, AIPP_INIT_VALUE, AIPP_INIT_VALUE],\\\n [AIPP_INIT_VALUE, AIPP_INIT_VALUE, AIPP_INIT_VALUE]\n else:\n check_dict_and_not_none(csc_info, 'csc_info')\n format_convert = csc_info.get('format_convert')\n _aipp_check_format_convert(arch_version, format_convert)\n csc_matrix, csc_out_bias, csc_in_bias = _get_csc_parameter(\n format_convert, csc_info)\n\n return csc_matrix, csc_out_bias, csc_in_bias\n\n @staticmethod\n def _handle_swap_info(input_format, swap_enable, swap_list):\n if swap_enable == AIPP_DISABLE:\n rb_swap, uv_swap, ax_swap = AIPP_INIT_VALUE, \\\n AIPP_INIT_VALUE, AIPP_INIT_VALUE\n else:\n _check_list_type_and_range(swap_list, 3, (int, Scalar, Expr),\n range(0, 2), 'swap')\n rb_swap = swap_list[0]\n uv_swap = swap_list[1]\n ax_swap = swap_list[2]\n\n _aipp_check_swap(input_format, rb_swap, uv_swap, ax_swap)\n\n return rb_swap, uv_swap, ax_swap\n\n @staticmethod\n def _handle_pre_clip_info(pre_clip_enable, pre_clip_info,\n crop_vertical_size):\n if pre_clip_enable == AIPP_DISABLE:\n pre_top_clip_number = AIPP_INIT_VALUE\n pre_botton_clip_number = AIPP_INIT_VALUE\n else:\n check_dict_and_not_none(pre_clip_info, 'pre_clip_info')\n pre_top_clip_number = pre_clip_info.get('pre_top_clip_number')\n pre_botton_clip_number = pre_clip_info.get(\n 'pre_botton_clip_number')\n\n _aipp_check_pre_clip(pre_top_clip_number,\n pre_botton_clip_number, crop_vertical_size)\n return pre_top_clip_number, pre_botton_clip_number\n\n def _handle_scf_info(self, scf_enable, # pylint: disable=R0913, R0914, R0915\n scf_info, crop_horizontal_size, crop_vertical_size,\n pre_top_clip_number, pre_botton_clip_number):\n if scf_enable == AIPP_DISABLE:\n scf_horizontal_size = crop_horizontal_size\n scf_vertical_size = crop_vertical_size - \\\n pre_top_clip_number - pre_botton_clip_number\n alpha_hori_scaling_mode = AIPP_INIT_VALUE\n hori_scaling_mode = AIPP_INIT_VALUE\n alpha_vert_scaling_mode = AIPP_INIT_VALUE\n vert_scaling_mode = AIPP_INIT_VALUE\n order_hori_vert_filter = AIPP_INIT_VALUE\n vertical_scaling_enable = AIPP_INIT_VALUE\n hori_scaling_enable = AIPP_INIT_VALUE\n vert_scaling = AIPP_INIT_VALUE\n hori_scaling = AIPP_INIT_VALUE\n init_vert_phase = AIPP_INIT_VALUE\n init_hori_phase = AIPP_INIT_VALUE\n\n else:\n check_dict_and_not_none(scf_info, 'scf_info')\n # scf_info\n scf_horizontal_size = scf_info.get('scf_horizontal_size')\n scf_vertical_size = scf_info.get('scf_vertical_size')\n scf_horizontal_start = scf_info.get('scf_horizontal_start')\n scf_vertical_start = scf_info.get('scf_vertical_start')\n scaling_mode = scf_info.get('scaling_mode')\n\n _aipp_check_scf(scf_horizontal_size, scf_vertical_size,\n scf_horizontal_start, scf_vertical_start,\n scaling_mode)\n\n # SPR12\n pre_scf_horizontal_size = crop_horizontal_size\n pre_scf_vertical_size = crop_vertical_size - \\\n pre_botton_clip_number - \\\n pre_top_clip_number\n\n # spr13\n alpha_hori_scaling_mode = scaling_mode\n hori_scaling_mode = scaling_mode\n alpha_vert_scaling_mode = scaling_mode\n vert_scaling_mode = scaling_mode\n\n vertical_scaling_enable = AIPP_ENABLE\n hori_scaling_enable = AIPP_ENABLE\n\n imm_scf_horizontal_size = Expr(scf_horizontal_size).eval_value()\n imm_pre_scf_horizontal_size = Expr(\n pre_scf_horizontal_size).eval_value()\n if imm_scf_horizontal_size is not None and \\\n imm_pre_scf_horizontal_size is not None:\n if imm_scf_horizontal_size > imm_pre_scf_horizontal_size:\n order_hori_vert_filter = AIPP_ENABLE\n else:\n order_hori_vert_filter = AIPP_DISABLE\n else:\n with self.context.freeze(): # pylint: disable=E1101\n order_hori_vert_filter = self.Scalar_( # pylint: disable=E1101\n 'uint16', 'order_hori_vert_filter', AIPP_ENABLE)\n\n with self.if_scope(\n scf_horizontal_size < pre_scf_horizontal_size):\n order_hori_vert_filter.set_as(AIPP_DISABLE)\n\n # SPR16\n hori_scaling = scf_info.get(\n 'scf_horizontal_scale',\n (pre_scf_horizontal_size - 1)*SCALE_COF//\n (scf_horizontal_size - 1)//4*4)\n TikCheckUtil.check_type_match(hori_scaling, (int, Scalar, Expr),\n 'scf_horizontal_scale type error, '\n 'input: '\n '{}'.format(type(hori_scaling)))\n check_scalar_dtype(hori_scaling,\n \"scf_horizontal_scale should be\"\n \" a scalar of int/uint\")\n\n vert_scaling = scf_info.get(\n 'scf_vertical_scale',\n (pre_scf_vertical_size - 1)*SCALE_COF //\n (scf_vertical_size - 1) // 4*4)\n TikCheckUtil.check_type_match(vert_scaling, (int, Scalar, Expr),\n 'scf_vertical_scale type error, '\n 'input:'\n ' {}'.format(type(vert_scaling)))\n check_scalar_dtype(vert_scaling,\n \"scf_vertical_scale should be\"\n \" a scalar of int/uint\")\n\n init_vert_phase = scf_vertical_start\n init_hori_phase = scf_horizontal_start\n\n return scf_horizontal_size, scf_vertical_size, \\\n alpha_hori_scaling_mode, hori_scaling_mode, \\\n alpha_vert_scaling_mode, vert_scaling_mode, \\\n order_hori_vert_filter, vertical_scaling_enable, \\\n hori_scaling_enable, vert_scaling, hori_scaling, \\\n init_vert_phase, init_hori_phase\n\n @staticmethod\n def _handle_post_clip(post_clip_enable, post_clip_info):\n if post_clip_enable == AIPP_DISABLE:\n post_botton_clip_number = AIPP_INIT_VALUE\n post_top_clip_number = AIPP_INIT_VALUE\n post_right_clip_number = AIPP_INIT_VALUE\n post_left_clip_number = AIPP_INIT_VALUE\n else:\n check_dict_and_not_none(post_clip_info, 'post_clip_info')\n post_botton_clip_number = post_clip_info.get(\n 'post_botton_clip_number')\n post_top_clip_number = post_clip_info.get(\n 'post_top_clip_number')\n post_right_clip_number = post_clip_info.get(\n 'post_right_clip_number')\n post_left_clip_number = post_clip_info.get(\n 'post_left_clip_number')\n\n _aipp_check_post_clip(post_botton_clip_number,\n post_top_clip_number,\n post_right_clip_number,\n post_left_clip_number)\n\n return post_botton_clip_number, post_top_clip_number, \\\n post_right_clip_number, post_left_clip_number\n\n @staticmethod\n def _handle_dtc_info(dtc_enable, dtc_info): # pylint: disable=R0914\n if dtc_enable == AIPP_DISABLE:\n dtc_mean_type = AIPP_INIT_VALUE\n dtc_mean0_uint32 = AIPP_INIT_VALUE\n dtc_mean1_uint32 = AIPP_INIT_VALUE\n dtc_mean2_uint32 = AIPP_INIT_VALUE\n dtc_mean3_uint32 = AIPP_INIT_VALUE\n dtc_min0_uint32 = float16format2uint16(AIPP_INIT_FLOAT_VALUE_ZERO)\n dtc_min1_uint32 = float16format2uint16(AIPP_INIT_FLOAT_VALUE_ZERO)\n dtc_min2_uint32 = float16format2uint16(AIPP_INIT_FLOAT_VALUE_ZERO)\n dtc_min3_uint32 = float16format2uint16(AIPP_INIT_FLOAT_VALUE_ZERO)\n dtc_var0_uint32 = float16format2uint16(AIPP_INIT_FLOAT_VALUE_ONE)\n dtc_var1_uint32 = float16format2uint16(AIPP_INIT_FLOAT_VALUE_ONE)\n dtc_var2_uint32 = float16format2uint16(AIPP_INIT_FLOAT_VALUE_ONE)\n dtc_var3_uint32 = float16format2uint16(AIPP_INIT_FLOAT_VALUE_ONE)\n raw_to_f16_n = AIPP_INIT_VALUE\n else:\n check_dict_and_not_none(dtc_info, 'dtc_info')\n dtc_mean_type = dtc_info.get('dtc_mean_type')\n dtc_mean = dtc_info.get('dtc_mean')\n _aipp_check_dtc_mean(dtc_mean_type, dtc_mean)\n dtc_mean0_uint32, dtc_mean1_uint32, \\\n dtc_mean2_uint32, dtc_mean3_uint32 = _aipp_get_dtc_mean(dtc_mean)\n\n dtc_min = dtc_info.get('dtc_min')\n _check_list_type_and_range(dtc_min, 4, (float, Scalar, Expr), None,\n 'dtc_min')\n\n dtc_min0_uint32, dtc_min1_uint32, \\\n dtc_min2_uint32, dtc_min3_uint32 = _aipp_get_dtc_min_value(dtc_min)\n\n dtc_var = dtc_info.get('dtc_var')\n _check_list_type_and_range(dtc_var, 4, (float, Scalar, Expr), None,\n 'dtc_var')\n\n dtc_var0_uint32, dtc_var1_uint32, \\\n dtc_var2_uint32, dtc_var3_uint32 = _aipp_get_dtc_var_value(dtc_var)\n\n raw_to_f16_n = dtc_info.get('raw_to_f16_n')\n _aipp_check_dtc_raw_info(raw_to_f16_n)\n\n return dtc_mean_type, dtc_mean0_uint32, dtc_mean1_uint32,\\\n dtc_mean2_uint32, dtc_mean3_uint32, dtc_min0_uint32,\\\n dtc_min1_uint32, dtc_min2_uint32, dtc_min3_uint32,\\\n dtc_var0_uint32, dtc_var1_uint32, dtc_var2_uint32,\\\n dtc_var3_uint32, raw_to_f16_n\n\n @staticmethod\n def _handle_flip_mode(arch_version, flip_enable, flip_mode):\n if flip_enable == AIPP_DISABLE:\n flip_mode = 0\n else:\n _aipp_check_flip_dict(arch_version, flip_mode)\n return flip_mode\n\n @staticmethod\n def _handle_channel_pad_info(arch_version, channel_pad_enable,\n channel_pad_info, dst):\n if channel_pad_enable == AIPP_DISABLE:\n channel_pad_mode = 0\n channel_pad_value_uint32 = 0\n else:\n check_dict_and_not_none(channel_pad_info, 'channel_pad_info')\n channel_pad_mode = channel_pad_info.get('channel_pad_mode')\n channel_pad_value = channel_pad_info.get('channel_pad_value')\n _aipp_check_cpad(arch_version, dst, channel_pad_value,\n channel_pad_mode)\n\n if dst.dtype == 'float16':\n if Expr(channel_pad_value).eval_value() is None:\n channel_pad_value_uint32 = \\\n channel_pad_value.reinterpret_cast_to('uint16')\n else:\n channel_pad_value_uint32 = float16format2uint16(\n channel_pad_value)\n else:\n channel_pad_value_uint32 = channel_pad_value\n\n return channel_pad_mode, channel_pad_value_uint32\n\n @staticmethod\n def _handle_area_pad_info(arch_version, input_format, # pylint: disable=R0914\n area_pad_enable, area_pad_info, dst):\n if area_pad_enable == AIPP_DISABLE:\n area_pad_mode = AIPP_INIT_VALUE\n top_pad_rows = AIPP_INIT_VALUE\n left_pad_cols = AIPP_INIT_VALUE\n right_pad_cols = AIPP_INIT_VALUE\n botton_pad_rows = AIPP_INIT_VALUE\n channel0_pad_value_uint32 = AIPP_INIT_VALUE\n channel1_pad_value_uint32 = AIPP_INIT_VALUE\n channel2_pad_value_uint32 = AIPP_INIT_VALUE\n channel3_pad_value_uint32 = AIPP_INIT_VALUE\n else:\n\n check_dict_and_not_none(area_pad_info, 'area_pad_info')\n area_pad_mode = area_pad_info.get('area_pad_mode')\n\n # area pad value\n top_pad_rows = area_pad_info.get('top_pad_rows')\n botton_pad_rows = area_pad_info.get('botton_pad_rows')\n left_pad_cols = area_pad_info.get('left_pad_cols')\n right_pad_cols = area_pad_info.get('right_pad_cols')\n\n # channel pad value\n channel0_pad_value = area_pad_info.get('channel0_pad_value')\n channel1_pad_value = area_pad_info.get('channel1_pad_value')\n channel2_pad_value = area_pad_info.get('channel2_pad_value')\n channel3_pad_value = area_pad_info.get('channel3_pad_value')\n\n _aipp_check_area_pad(input_format, dst, area_pad_mode, top_pad_rows,\n botton_pad_rows, left_pad_cols, right_pad_cols,\n [channel0_pad_value, channel1_pad_value,\n channel2_pad_value, channel3_pad_value],\n arch_version)\n\n channel0_pad_value_uint32, channel1_pad_value_uint32, \\\n channel2_pad_value_uint32, channel3_pad_value_uint32 = \\\n _aipp_get_channel_pad_value(\n dst.dtype, channel0_pad_value, channel1_pad_value,\n channel2_pad_value, channel3_pad_value)\n\n return area_pad_mode, top_pad_rows, left_pad_cols, right_pad_cols, \\\n botton_pad_rows, channel0_pad_value_uint32, \\\n channel1_pad_value_uint32, channel2_pad_value_uint32, \\\n channel3_pad_value_uint32\n\n @staticmethod\n def _handle_stretch_info(stretch_enable, stretch_info):\n if stretch_enable == AIPP_DISABLE:\n dst_stride_pixel = AIPP_INIT_VALUE\n else:\n check_dict_and_not_none(stretch_info, 'stretch_info')\n dst_stride_pixel = stretch_info.get('dst_stride_pixel')\n _aipp_check_stretch(dst_stride_pixel)\n return dst_stride_pixel\n\n @staticmethod\n def _handle_raw_info(raw_enable, raw_info):\n if raw_enable == AIPP_DISABLE:\n raw_image_channel = AIPP_INIT_VALUE\n raw_start_channel = AIPP_INIT_VALUE\n else:\n check_dict_and_not_none(raw_info, 'raw_info')\n raw_image_channel = raw_info.get('raw_image_channel')\n raw_start_channel = raw_info.get('raw_start_channel')\n _aipp_check_raw_info(raw_image_channel, raw_start_channel)\n\n return raw_image_channel, raw_start_channel\n\n @source_info_decorator()\n @debug.load_image_decorator\n def load_image(self, dst, src0, src1, # pylint: disable=R0913, R0914, R0915\n input_format, function_switch, src_info, crop_info,\n pre_clip_info, swap_list, csc_info, scf_info, post_clip_info,\n dtc_info, flip_mode, channel_pad_info, area_pad_info,\n stretch_info, raw_info, sid):\n \"\"\"load image api\"\"\"\n # function's input params is too much, so disable them\n arch_version = get_soc_name() + get_soc_core_type()\n # check arch_version\n _aipp_check_arch_version(arch_version)\n\n # check input format\n _aipp_check_input_format(arch_version, input_format)\n\n # check dst format by input format\n _aipp_check_dst(input_format, dst)\n\n # src_info\n check_dict_and_not_none(src_info, 'src_info')\n src_horizontal_size = src_info.get('src_horizontal_size')\n src_vertical_size = src_info.get('src_vertical_size')\n _aipp_check_src_info(arch_version, input_format, src_horizontal_size,\n src_vertical_size)\n _aipp_check_src(input_format, src0, src1, src_horizontal_size,\n src_vertical_size)\n\n # enable switch\n crop_enbale, swap_enable, csc_enable, dtc_enable, area_pad_enable, \\\n channel_pad_enable, pre_clip_enable, scf_enable, post_clip_enable, \\\n flip_enable, stretch_enable, raw_enable = aipp_get_enable_bit(\n arch_version, function_switch)\n _aipp_check_function_switch(arch_version, input_format,\n swap_enable, csc_enable, dtc_enable,\n area_pad_enable, pre_clip_enable,\n scf_enable, post_clip_enable, flip_enable,\n stretch_enable, raw_enable)\n\n # crop info check\n crop_horizontal_size, crop_vertical_size, crop_horizontal_start,\\\n crop_vertical_start, single_line_mode = self._handel_crop_info(\n arch_version, input_format, crop_enbale,\n src_horizontal_size, src_vertical_size, crop_info)\n\n # csc info\n csc_matrix, csc_out_bias, csc_in_bias = self._handle_csc_info(\n arch_version, csc_enable, csc_info)\n\n # swap list check\n rb_swap, uv_swap, ax_swap = self._handle_swap_info(\n input_format, swap_enable, swap_list)\n\n if arch_version == HI3796CV300ESAIC:\n\n # pre clip info\n pre_top_clip_number, pre_botton_clip_number =\\\n self._handle_pre_clip_info(pre_clip_enable, pre_clip_info,\n crop_vertical_size)\n\n # scf info\n scf_horizontal_size, scf_vertical_size, alpha_hori_scaling_mode,\\\n hori_scaling_mode, alpha_vert_scaling_mode, vert_scaling_mode,\\\n order_hori_vert_filter, vertical_scaling_enable,\\\n hori_scaling_enable, vert_scaling, hori_scaling, init_vert_phase,\\\n init_hori_phase = self._handle_scf_info(\n scf_enable, scf_info, crop_horizontal_size, crop_vertical_size,\n pre_top_clip_number, pre_botton_clip_number)\n\n # post_clip_info\n post_botton_clip_number, post_top_clip_number,\\\n post_right_clip_number, post_left_clip_number =\\\n self._handle_post_clip(post_clip_enable, post_clip_info)\n\n else:\n pre_top_clip_number = AIPP_INIT_VALUE\n pre_botton_clip_number = AIPP_INIT_VALUE\n scf_horizontal_size = AIPP_INIT_VALUE\n scf_vertical_size = AIPP_INIT_VALUE\n alpha_hori_scaling_mode = AIPP_INIT_VALUE\n hori_scaling_mode = AIPP_INIT_VALUE\n alpha_vert_scaling_mode = AIPP_INIT_VALUE\n vert_scaling_mode = AIPP_INIT_VALUE\n order_hori_vert_filter = AIPP_INIT_VALUE\n vertical_scaling_enable = AIPP_INIT_VALUE\n hori_scaling_enable = AIPP_INIT_VALUE\n init_vert_phase = AIPP_INIT_VALUE\n init_hori_phase = AIPP_INIT_VALUE\n vert_scaling = AIPP_INIT_VALUE\n hori_scaling = AIPP_INIT_VALUE\n post_botton_clip_number = AIPP_INIT_VALUE\n post_top_clip_number = AIPP_INIT_VALUE\n post_right_clip_number = AIPP_INIT_VALUE\n post_left_clip_number = AIPP_INIT_VALUE\n\n # dtc_info\n dtc_mean_type, dtc_mean0_uint32, dtc_mean1_uint32, dtc_mean2_uint32,\\\n dtc_mean3_uint32, dtc_min0_uint32, dtc_min1_uint32, dtc_min2_uint32,\\\n dtc_min3_uint32, dtc_var0_uint32, dtc_var1_uint32, dtc_var2_uint32,\\\n dtc_var3_uint32, raw_to_f16_n = self._handle_dtc_info(\n dtc_enable, dtc_info)\n\n # flip_mode\n flip_mode = self._handle_flip_mode(arch_version, flip_enable, flip_mode)\n\n # channel_pad_info\n channel_pad_mode, channel_pad_value_uint32 =\\\n self._handle_channel_pad_info(arch_version, channel_pad_enable,\n channel_pad_info, dst)\n\n # area_pad_info\n area_pad_mode, top_pad_rows, left_pad_cols, right_pad_cols, \\\n botton_pad_rows, channel0_pad_value_uint32, \\\n channel1_pad_value_uint32, channel2_pad_value_uint32, \\\n channel3_pad_value_uint32 = self._handle_area_pad_info(\n arch_version, input_format, area_pad_enable, area_pad_info, dst)\n\n # stretch_info\n dst_stride_pixel = self._handle_stretch_info(stretch_enable,\n stretch_info)\n\n # raw handle\n raw_image_channel, raw_start_channel = self._handle_raw_info(raw_enable,\n raw_info)\n\n # sid\n _aipp_check_sid(arch_version, sid)\n\n dst_extent = self.calculate_dst_extent_aipp(arch_version, input_format,\n dst, crop_horizontal_size,\n crop_vertical_size,\n channel_pad_mode,\n top_pad_rows,\n botton_pad_rows,\n left_pad_cols,\n right_pad_cols,\n scf_horizontal_size,\n scf_vertical_size,\n post_botton_clip_number,\n post_top_clip_number,\n post_right_clip_number,\n post_left_clip_number,\n dst_stride_pixel,\n raw_image_channel,\n raw_enable)\n\n imm_dst_extent = Expr(dst_extent).eval_value()\n if imm_dst_extent is not None:\n TikCheckUtil.check_le(\n imm_dst_extent, dst.buffer_size,\n \"output out of dst size, input: {}\".format(imm_dst_extent))\n TikCheckUtil.check_equality(\n imm_dst_extent % 32, 0,\n \"output should be 32 align, input: {}\".format(imm_dst_extent))\n\n # handle\n self._aipp(\n dst, src0, src1, input_format, src_horizontal_size,\n src_vertical_size, crop_horizontal_size, crop_vertical_size,\n crop_horizontal_start, crop_vertical_start, single_line_mode,\n pre_top_clip_number, pre_botton_clip_number, csc_matrix,\n csc_out_bias, csc_in_bias, rb_swap, uv_swap, ax_swap,\n scf_horizontal_size, scf_vertical_size,\n alpha_hori_scaling_mode, hori_scaling_mode,\n alpha_vert_scaling_mode, vert_scaling_mode,\n order_hori_vert_filter,\n vertical_scaling_enable, hori_scaling_enable, init_vert_phase,\n init_hori_phase, vert_scaling, hori_scaling,\n post_botton_clip_number, post_top_clip_number,\n post_right_clip_number, post_left_clip_number,\n dtc_mean_type, dtc_mean0_uint32, dtc_mean1_uint32,\n dtc_mean2_uint32, dtc_mean3_uint32, dtc_min0_uint32,\n dtc_min1_uint32, dtc_min2_uint32, dtc_min3_uint32,\n dtc_var0_uint32, dtc_var1_uint32, dtc_var2_uint32,\n dtc_var3_uint32, raw_to_f16_n, flip_mode, channel_pad_mode,\n channel_pad_value_uint32, area_pad_mode,\n top_pad_rows, botton_pad_rows, left_pad_cols, right_pad_cols,\n channel0_pad_value_uint32, channel1_pad_value_uint32,\n channel2_pad_value_uint32, channel3_pad_value_uint32,\n dst_stride_pixel, raw_image_channel, raw_start_channel, sid,\n arch_version, csc_enable, post_clip_enable, dst_extent)\n\n def _set_aipp_spr0(self, arch_version,\n dtc_mean0_uint32, dtc_mean1_uint32, src0):\n \"\"\"set spr0\"\"\"\n if arch_version == AIC:\n sfr_dtc_pixel_mean_ch0 = Expr(dtc_mean0_uint32 // BIT_16)\n sfr_dtc_pixel_mean_ch1 = Expr(dtc_mean1_uint32 // BIT_16)\n else:\n sfr_dtc_pixel_mean_ch0 = AIPP_INIT_VALUE\n sfr_dtc_pixel_mean_ch1 = AIPP_INIT_VALUE\n\n with self.context.freeze(): # pylint: disable=E1101\n scalar_addr_y = self.Scalar_(dtype='uint64', # pylint: disable=E1101\n name='scalar_addr_y', init_value=0)\n\n scalar_addr_y.set_as(tvm.expr.Cast(\"uint64\",\n tvm.call_extern(\"handle\", \"\",\n src0.access_ptr(\n \"r\"))))\n aipp0_config = [scalar_addr_y, sfr_dtc_pixel_mean_ch0,\n sfr_dtc_pixel_mean_ch1]\n\n aipp0_register = concat_params(aipp0_config, AIPP0_OFFSET_LIST,\n AIPP0_SEGMENT_LIST, dtype=\"uint64\")\n\n with self.new_scope():\n aipp_spr0 = self.Scalar_(dtype=\"uint64\") # pylint: disable=E1101\n aipp_spr0.set_as(aipp0_register)\n self.emit(tvm.call_extern(\"uint64\", \"set_aipp_spr_0\",\n aipp_spr0.get()))\n return scalar_addr_y\n\n def _set_aipp_spr1(self, input_format, csc_enable, src1, #pylint: disable=R0913\n scalar_addr_y, src_horizontal_size, src_vertical_size):\n \"\"\"set spr1\"\"\"\n with self.context.freeze(): # pylint: disable=E1101\n scalar_addr_uv = self.Scalar_(dtype='uint64', # pylint: disable=E1101\n name='scalar_addr_uv', init_value=0)\n\n if src1 is not None:\n scalar_addr_uv.set_as(\n tvm.expr.Cast(\"uint64\",\n tvm.call_extern(\n \"handle\", \"\", src1.access_ptr(\"r\"))))\n else:\n imm_input_format = Expr(input_format).eval_value()\n if imm_input_format is None:\n src_channel_scalar = self.Scalar_('uint32', # pylint: disable=E1101\n 'src_channel_scalar', 0)\n with self.if_scope(input_format == 0):\n src_channel_scalar.set_as(\n 1*src_horizontal_size*src_vertical_size)\n scalar_addr_uv.set_as(\n scalar_addr_y + src_channel_scalar)\n with self.if_scope(input_format == 2):\n src_channel_scalar.set_as(\n 8*src_horizontal_size*src_vertical_size)\n scalar_addr_uv.set_as(\n scalar_addr_y + src_channel_scalar)\n with self.if_scope(input_format == 3):\n src_channel_scalar.set_as(\n 4*src_horizontal_size*src_vertical_size)\n scalar_addr_uv.set_as(\n scalar_addr_y + src_channel_scalar)\n with self.if_scope(input_format == 7):\n src_channel_scalar.set_as(\n 1*src_horizontal_size*src_vertical_size)\n scalar_addr_uv.set_as(\n scalar_addr_y + src_channel_scalar)\n\n else:\n if input_format in (0, 2, 3, 7):\n src_channel = AIPP_INPUT_TYPE_SWAP_ALIGN.get(\n input_format).get('src0_size_bytes')\n scalar_addr_uv.set_as(\n scalar_addr_y +\n src_horizontal_size*src_vertical_size*src_channel)\n\n aipp1_config = [scalar_addr_uv, csc_enable]\n\n aipp1_register = concat_params(aipp1_config, AIPP1_OFFSET_LIST,\n AIPP1_SEGMENT_LIST, dtype=\"uint64\")\n with self.new_scope():\n aipp_spr1 = self.Scalar_(dtype=\"uint64\") # pylint: disable=E1101\n aipp_spr1.set_as(aipp1_register)\n self.emit(tvm.call_extern(\"uint64\", \"set_aipp_spr_1\",\n aipp_spr1.get()))\n\n def _set_aipp_spr2(self, csc_matrix):\n \"\"\"set spr2\"\"\"\n aipp2_config = [csc_matrix[0][0], csc_matrix[0][1], csc_matrix[0][2],\n csc_matrix[1][0]]\n\n aipp2_register = concat_params(aipp2_config, AIPP2_OFFSET_LIST,\n AIPP2_SEGMENT_LIST, dtype=\"uint64\")\n\n with self.context.freeze(): # pylint: disable=E1101\n with self.new_scope():\n aipp_spr2 = self.Scalar_(dtype=\"uint64\") # pylint: disable=E1101\n aipp_spr2.set_as(aipp2_register)\n self.emit(tvm.call_extern(\"uint64\", \"set_aipp_spr_2\",\n aipp_spr2.get()))\n\n def _set_aipp_spr3(self, csc_matrix):\n \"\"\"set spr3\"\"\"\n aipp3_config = [csc_matrix[1][1], csc_matrix[1][2], csc_matrix[2][0],\n csc_matrix[2][1]]\n\n aipp3_register = concat_params(aipp3_config, AIPP3_OFFSET_LIST,\n AIPP3_SEGMENT_LIST, dtype=\"uint64\")\n\n with self.context.freeze(): # pylint: disable=E1101\n with self.new_scope():\n aipp_spr3 = self.Scalar_(dtype=\"uint64\") # pylint: disable=E1101\n aipp_spr3.set_as(aipp3_register)\n self.emit(tvm.call_extern(\"uint64\", \"set_aipp_spr_3\",\n aipp_spr3.get()))\n\n def _set_aipp_spr4(self, csc_matrix, csc_out_bias, csc_in_bias):\n \"\"\"set spr4\"\"\"\n aipp4_config = [csc_matrix[2][2], csc_out_bias[0], csc_out_bias[1],\n csc_out_bias[2], csc_in_bias[0], csc_in_bias[1],\n csc_in_bias[2]]\n\n aipp4_register = concat_params(aipp4_config, AIPP4_OFFSET_LIST,\n AIPP4_SEGMENT_LIST, dtype=\"uint64\")\n\n with self.context.freeze(): # pylint: disable=E1101\n with self.new_scope():\n aipp_spr4 = self.Scalar_(dtype=\"uint64\") # pylint: disable=E1101\n aipp_spr4.set_as(aipp4_register)\n self.emit(tvm.call_extern(\"uint64\", \"set_aipp_spr_4\",\n aipp_spr4.get()))\n\n def _set_aipp_spr5(self, dtc_mean0_uint32, dtc_mean1_uint32,\n dtc_mean2_uint32, dtc_mean3_uint32):\n \"\"\"set spr5\"\"\"\n aipp5_config = [dtc_mean0_uint32 % BIT_16, dtc_mean1_uint32 % BIT_16,\n dtc_mean2_uint32 % BIT_16, dtc_mean3_uint32 % BIT_16]\n\n aipp5_register = concat_params(aipp5_config, AIPP5_OFFSET_LIST,\n AIPP5_SEGMENT_LIST, dtype=\"uint64\")\n\n with self.context.freeze(): # pylint: disable=E1101\n with self.new_scope():\n aipp_spr5 = self.Scalar_(dtype=\"uint64\") # pylint: disable=E1101\n aipp_spr5.set_as(aipp5_register)\n self.emit(tvm.call_extern(\"uint64\", \"set_aipp_spr_5\",\n aipp_spr5.get()))\n\n def _set_aipp_spr6(self, dtc_min0_uint32, dtc_min1_uint32, dtc_min2_uint32,\n dtc_min3_uint32):\n \"\"\"set spr6\"\"\"\n aipp6_config = [dtc_min0_uint32, dtc_min1_uint32, dtc_min2_uint32,\n dtc_min3_uint32]\n aipp6_register = concat_params(aipp6_config, AIPP6_OFFSET_LIST,\n AIPP6_SEGMENT_LIST, dtype=\"uint64\")\n\n with self.context.freeze(): # pylint: disable=E1101\n with self.new_scope():\n aipp_spr6 = self.Scalar_(dtype=\"uint64\") # pylint: disable=E1101\n aipp_spr6.set_as(aipp6_register)\n self.emit(tvm.call_extern(\"uint64\", \"set_aipp_spr_6\",\n aipp_spr6.get()))\n\n def _set_aipp_spr7(self, dtc_var0_uint32, dtc_var1_uint32, dtc_var2_uint32,\n dtc_var3_uint32):\n \"\"\"set spr7\"\"\"\n aipp7_config = [dtc_var0_uint32, dtc_var1_uint32, dtc_var2_uint32,\n dtc_var3_uint32]\n\n aipp7_register = concat_params(aipp7_config, AIPP7_OFFSET_LIST,\n AIPP7_SEGMENT_LIST, dtype=\"uint64\")\n\n with self.context.freeze(): # pylint: disable=E1101\n with self.new_scope():\n aipp_spr7 = self.Scalar_(dtype=\"uint64\") # pylint: disable=E1101\n aipp_spr7.set_as(aipp7_register)\n self.emit(tvm.call_extern(\"uint64\", \"set_aipp_spr_7\",\n aipp_spr7.get()))\n\n def _set_aipp_spr8(self, channel0_pad_value_uint32,\n channel1_pad_value_uint32, channel2_pad_value_uint32,\n channel3_pad_value_uint32):\n \"\"\"set spr8\"\"\"\n aipp8_config = [channel0_pad_value_uint32, channel1_pad_value_uint32,\n channel2_pad_value_uint32, channel3_pad_value_uint32]\n\n aipp8_register = concat_params(aipp8_config, AIPP8_OFFSET_LIST,\n AIPP8_SEGMENT_LIST, dtype=\"uint64\")\n\n with self.context.freeze(): # pylint: disable=E1101\n with self.new_scope():\n aipp_spr8 = self.Scalar_(dtype=\"uint64\") # pylint: disable=E1101\n aipp_spr8.set_as(aipp8_register)\n self.emit(tvm.call_extern(\"uint64\", \"set_aipp_spr_8\",\n aipp_spr8.get()))\n\n def _set_aipp_spr9(self, arch_version, # pylint: disable=R0913, R0914\n dtc_mean2_uint32, dtc_mean3_uint32,\n channel_pad_mode, flip_mode, channel_pad_value_uint32,\n rb_swap, uv_swap, ax_swap, input_format,\n single_line_mode, area_pad_mode, raw_to_f16_n,\n dtc_mean_type, raw_image_channel, raw_start_channel):\n \"\"\"set spr9\"\"\"\n if arch_version == AIC:\n sfr_dtc_pixel_mean_ch2 = dtc_mean2_uint32 // BIT_16\n sfr_dtc_pixel_mean_ch3 = dtc_mean3_uint32 // BIT_16\n else:\n sfr_dtc_pixel_mean_ch2 = 0\n sfr_dtc_pixel_mean_ch3 = 0\n no_padding = channel_pad_mode & 1\n padd_4channels = channel_pad_mode // 2\n horizontal_flip_enable = flip_mode & 1\n vertical_flip_enable = flip_mode // 2\n aipp9_config = [channel_pad_value_uint32, rb_swap, uv_swap, ax_swap,\n input_format, single_line_mode, horizontal_flip_enable,\n vertical_flip_enable, area_pad_mode, no_padding,\n raw_to_f16_n, dtc_mean_type, raw_image_channel,\n raw_start_channel, padd_4channels,\n sfr_dtc_pixel_mean_ch2, sfr_dtc_pixel_mean_ch3]\n\n aipp9_register = concat_params(aipp9_config, AIPP9_OFFSET_LIST,\n AIPP9_SEGMENT_LIST, dtype=\"uint64\")\n\n with self.context.freeze(): # pylint: disable=E1101\n with self.new_scope():\n aipp_spr9 = self.Scalar_(dtype=\"uint64\") # pylint: disable=E1101\n aipp_spr9.set_as(aipp9_register)\n self.emit(tvm.call_extern(\"uint64\", \"set_aipp_spr_9\",\n aipp_spr9.get()))\n\n def _set_aipp_spr10(self, dst_stride_pixel):\n \"\"\"set spr10\"\"\"\n aipp10_config = [dst_stride_pixel]\n aipp10_register = concat_params(aipp10_config, AIPP10_OFFSET_LIST,\n AIPP10_SEGMENT_LIST, dtype=\"uint64\")\n with self.context.freeze(): # pylint: disable=E1101\n with self.new_scope():\n aipp_spr10 = self.Scalar_(dtype=\"uint64\") # pylint: disable=E1101\n aipp_spr10.set_as(aipp10_register)\n self.emit(tvm.call_extern(\"uint64\", \"set_aipp_spr_10\",\n aipp_spr10.get()))\n\n def _set_aipp_spr11(self, pre_botton_clip_number, pre_top_clip_number):\n \"\"\"set spr11\"\"\"\n aipp11_config = [pre_botton_clip_number, pre_top_clip_number]\n aipp11_register = concat_params(aipp11_config, AIPP11_OFFSET_LIST,\n AIPP11_SEGMENT_LIST, dtype=\"uint64\")\n with self.context.freeze(): # pylint: disable=E1101\n with self.new_scope():\n aipp_spr11 = self.Scalar_(dtype=\"uint64\") # pylint: disable=E1101\n aipp_spr11.set_as(aipp11_register)\n self.emit(tvm.call_extern(\"uint64\", \"set_aipp_spr_11\",\n aipp_spr11.get()))\n\n def _set_aipp_spr12(self, scf_vertical_size, scf_horizontal_size):\n \"\"\"set spr12\"\"\"\n aipp12_config = [scf_vertical_size - 1, scf_horizontal_size - 1]\n aipp12_register = concat_params(aipp12_config, AIPP12_OFFSET_LIST,\n AIPP12_SEGMENT_LIST, dtype=\"uint64\")\n\n with self.context.freeze(): # pylint: disable=E1101\n with self.new_scope():\n aipp_spr12 = self.Scalar_(dtype=\"uint64\") # pylint: disable=E1101\n aipp_spr12.set_as(aipp12_register)\n self.emit(tvm.call_extern(\"uint64\", \"set_aipp_spr_12\",\n aipp_spr12.get()))\n\n def _set_aipp_spr13(self, hori_scaling_enable, # pylint: disable=R0913\n vertical_scaling_enable, order_hori_vert_filter,\n vert_scaling_mode, alpha_vert_scaling_mode,\n hori_scaling_mode, alpha_hori_scaling_mode):\n \"\"\"set spr13\"\"\"\n aipp13_config = [hori_scaling_enable, vertical_scaling_enable,\n order_hori_vert_filter, vert_scaling_mode,\n alpha_vert_scaling_mode, hori_scaling_mode,\n alpha_hori_scaling_mode]\n\n aipp13_register = concat_params(aipp13_config, AIPP13_OFFSET_LIST,\n AIPP13_SEGMENT_LIST, dtype=\"uint64\")\n\n with self.context.freeze(): # pylint: disable=E1101\n with self.new_scope():\n aipp_spr13 = self.Scalar_(dtype=\"uint64\") # pylint: disable=E1101\n aipp_spr13.set_as(aipp13_register)\n self.emit(tvm.call_extern(\"uint64\", \"set_aipp_spr_13\",\n aipp_spr13.get()))\n\n def _set_aipp_spr15(self, init_vert_phase, init_hori_phase):\n \"\"\"set spr15\"\"\"\n aipp15_config = [init_vert_phase, init_hori_phase]\n\n aipp15_register = concat_params(aipp15_config, AIPP15_OFFSET_LIST,\n AIPP15_SEGMENT_LIST, dtype=\"uint64\")\n\n with self.context.freeze(): # pylint: disable=E1101\n with self.new_scope():\n aipp_spr15 = self.Scalar_(dtype=\"uint64\") # pylint: disable=E1101\n aipp_spr15.set_as(aipp15_register)\n self.emit(tvm.call_extern(\"uint64\", \"set_aipp_spr_15\",\n aipp_spr15.get()))\n\n def _set_aipp_spr16(self, vert_scaling, hori_scaling):\n \"\"\"set spr16\"\"\"\n aipp16_config = [vert_scaling, hori_scaling]\n\n aipp16_register = concat_params(aipp16_config, AIPP16_OFFSET_LIST,\n AIPP16_SEGMENT_LIST, dtype=\"uint64\")\n\n with self.context.freeze(): # pylint: disable=E1101\n with self.new_scope():\n aipp_spr16 = self.Scalar_(dtype=\"uint64\") # pylint: disable=E1101\n aipp_spr16.set_as(aipp16_register)\n self.emit(tvm.call_extern(\"uint64\", \"set_aipp_spr_16\",\n aipp_spr16.get()))\n\n def _set_aipp_spr17(self, post_botton_clip_number, # pylint: disable=R0913\n post_top_clip_number, post_right_clip_number,\n post_left_clip_number, post_clip_enable):\n \"\"\"set spr17\"\"\"\n aipp17_config = [post_botton_clip_number, post_top_clip_number,\n post_right_clip_number, post_left_clip_number,\n post_clip_enable]\n\n aipp17_register = concat_params(aipp17_config, AIPP17_OFFSET_LIST,\n AIPP17_SEGMENT_LIST, dtype=\"uint64\")\n\n with self.context.freeze(): # pylint: disable=E1101\n with self.new_scope():\n aipp_spr17 = self.Scalar_(dtype=\"uint64\") # pylint: disable=E1101\n aipp_spr17.set_as(aipp17_register)\n self.emit(tvm.call_extern(\"uint64\", \"set_aipp_spr_17\",\n aipp_spr17.get()))\n\n def _aipp(self, dst, src0, src1, # pylint: disable=R0913, R0914\n input_format, src_horizontal_size, src_vertical_size,\n horizontal_size, vertical_size, horizontal_start,\n vertical_start, single_line_mode, pre_top_clip_number,\n pre_botton_clip_number, csc_matrix, csc_out_bias,\n csc_in_bias, rb_swap, uv_swap, ax_swap,\n scf_horizontal_size, scf_vertical_size,\n alpha_hori_scaling_mode, hori_scaling_mode,\n alpha_vert_scaling_mode, vert_scaling_mode,\n order_hori_vert_filter,\n vertical_scaling_enable, hori_scaling_enable, init_vert_phase,\n init_hori_phase, vert_scaling, hori_scaling,\n post_botton_clip_number, post_top_clip_number,\n post_right_clip_number, post_left_clip_number,\n dtc_mean_type, dtc_mean0_uint32, dtc_mean1_uint32,\n dtc_mean2_uint32, dtc_mean3_uint32, dtc_min0_uint32,\n dtc_min1_uint32, dtc_min2_uint32, dtc_min3_uint32,\n dtc_var0_uint32, dtc_var1_uint32, dtc_var2_uint32,\n dtc_var3_uint32, raw_to_f16_n, flip_mode, channel_pad_mode,\n channel_pad_value_uint32, area_pad_mode,\n top_pad_rows, botton_pad_rows, left_pad_cols, right_pad_cols,\n channel0_pad_value_uint32, channel1_pad_value_uint32,\n channel2_pad_value_uint32, channel3_pad_value_uint32,\n dst_stride_pixel, raw_image_channel, raw_start_channel, sid,\n arch_version, csc_enable, post_clip_enable, dst_extent):\n \"\"\"aipp function\"\"\"\n # function's input params is too much, so disable them\n\n # set aipp spr\n # aipp SPR0\n scalar_addr_y = self._set_aipp_spr0(arch_version, dtc_mean0_uint32,\n dtc_mean1_uint32, src0)\n\n # aipp SPR1\n self._set_aipp_spr1(input_format, csc_enable, src1, scalar_addr_y,\n src_horizontal_size, src_vertical_size)\n\n # aipp SPR2\n self._set_aipp_spr2(csc_matrix)\n\n # aipp SPR3\n self._set_aipp_spr3(csc_matrix)\n\n # aipp SPR4\n self._set_aipp_spr4(csc_matrix, csc_out_bias, csc_in_bias)\n\n # aipp SPR5\n self._set_aipp_spr5(dtc_mean0_uint32, dtc_mean1_uint32,\n dtc_mean2_uint32, dtc_mean3_uint32)\n\n # aipp SPR6\n self._set_aipp_spr6(dtc_min0_uint32, dtc_min1_uint32, dtc_min2_uint32,\n dtc_min3_uint32)\n\n # aipp SPR7\n self._set_aipp_spr7(dtc_var0_uint32, dtc_var1_uint32, dtc_var2_uint32,\n dtc_var3_uint32)\n\n # padding_mode:0 set\n # aipp SPR8 (padding_mode)\n self._set_aipp_spr8(channel0_pad_value_uint32,\n channel1_pad_value_uint32,\n channel2_pad_value_uint32,\n channel3_pad_value_uint32)\n\n # aipp SPR9\n self._set_aipp_spr9(arch_version, dtc_mean2_uint32, dtc_mean3_uint32,\n channel_pad_mode, flip_mode,\n channel_pad_value_uint32, rb_swap, uv_swap, ax_swap,\n input_format, single_line_mode, area_pad_mode,\n raw_to_f16_n, dtc_mean_type, raw_image_channel,\n raw_start_channel)\n\n if arch_version == HI3796CV300ESAIC:\n # aipp SPR10\n self._set_aipp_spr10(dst_stride_pixel)\n\n # aipp SPR11\n self._set_aipp_spr11(pre_botton_clip_number, pre_top_clip_number)\n\n # aipp SPR12\n self._set_aipp_spr12(scf_vertical_size, scf_horizontal_size)\n\n # aipp SPR13\n self._set_aipp_spr13(hori_scaling_enable, vertical_scaling_enable,\n order_hori_vert_filter, vert_scaling_mode,\n alpha_vert_scaling_mode, hori_scaling_mode,\n alpha_hori_scaling_mode)\n\n # aipp SPR15\n self._set_aipp_spr15(init_vert_phase, init_hori_phase)\n\n # aipp SPR16\n self._set_aipp_spr16(vert_scaling, hori_scaling)\n\n # aipp SPR17\n self._set_aipp_spr17(post_botton_clip_number, post_top_clip_number,\n post_right_clip_number, post_left_clip_number,\n post_clip_enable)\n\n # set aipp info\n # xs info\n xs_config = [horizontal_size - 1, vertical_size - 1, horizontal_start,\n vertical_start]\n\n # xt info\n xt_config = [src_horizontal_size - 1, top_pad_rows, botton_pad_rows,\n left_pad_cols, right_pad_cols, sid]\n\n # config\n load_image_config = xs_config + xt_config\n\n with self.new_scope():\n self.scope_attr(cce_params.CCE_AXIS, \"coproc_scope\", PIPE_MTE2)\n\n if src1 is not None:\n self.scope_attr(cce_params.CCE_AXIS, \"append_mem\",\n tvm.call_extern(\"\", \"mem_vector\",\n src0.access_ptr(\"r\"),\n src1.access_ptr(\"r\")))\n else:\n self.scope_attr(cce_params.CCE_AXIS, \"append_mem\",\n tvm.call_extern(\"\", \"mem_vector\",\n src0.access_ptr(\"r\")))\n\n instr = tvm.call_extern(dst.dtype, \"load_image_to_cbuf\",\n dst.access_ptr(\"w\", extent=dst_extent),\n *type_convert(load_image_config))\n self.emit(instr)\n\n @source_info_decorator()\n @debug.winograd_fm_transform_decorator\n def winograd_feature_map_transform( # pylint: disable=R0913, R0914\n self, dst, src, l1_h, l1_w, l1_c, pad_left, pad_right, pad_top,\n pad_bottom, m_extension, m_start_pt, k_extension, k_start_pt,\n column_indicator, dst_stride):\n \"\"\" load input feature map from L1 to L0A and do partial winograd\n transform on-the-fly\n\n Parameters\n ----------\n dst: destination operator, scope_cbuf\n src: src operator, scope_ca\n l1_h: height of input feature_map\n l1_w: width of input feature_map\n l1_c: channels of input feature_map\n pad_left: col nums of padding left\n pad_right: col nums of padding left\n pad_top: row nums of padding top\n pad_bottom: row nums of padding bottom\n m_extension: m direction extension steps from the start position\n m_start_pt: m direction start position of the feature matrix\n k_extension: k direction extension steps from the start position\n k_start_pt: k direction start position of the feature matrix\n column_indicator: partial weight matrix indicator\n 0: the 1st column\n 1: the 2nd column\n 2: the 3rd column\n 3: the 4th column\n dst_stride: inner destination gap between 4 generated expansion feature\n maps in terms of fractal matrix(512B)\n\n Returns\n -------\n None\n \"\"\"\n arch_version_dst_src_map = {\n HI3796CV300ESAIC: ['s8s8'],\n HI3796CV300CSAIC: ['s8s8', 'u8u8'],\n AIC: ['s8s8', 'u8u8', 'f16f16']\n }\n arch_version = get_soc_name() + get_soc_core_type()\n # check instruction\n TikCheckUtil.check_in_range(\n arch_version, arch_version_dst_src_map.keys(),\n \"input core_arch: {} doesn't support \"\n \"winograd_feature_map_transform.\".format(arch_version))\n # check scope\n TikCheckUtil.check_equality(\n dst.scope, \"local.L0A\",\n \"dst scope should be l0a, input scope: {}\".format(dst.scope))\n TikCheckUtil.check_equality(src.scope, \"local.L1\",\n \"src scope should be l1, input scope: {}\"\n .format(src.scope))\n # check dtype\n dtype_str = DTYPE_MAP[dst.dtype] + DTYPE_MAP[src.dtype]\n TikCheckUtil.check_in_range(\n dtype_str, arch_version_dst_src_map[arch_version],\n \"{} winograd_feature_map_transform doesn't support from {} to {}\"\n .format(arch_version, src.dtype, dst.dtype))\n # check m & k\n TikCheckUtil.check_type_match(\n m_extension, (int, Scalar, Expr),\n \"m_extension should be int, Scalar, Expr, input type of m_extension\"\n \": {}\".format(type(m_extension)))\n check_scalar_dtype(m_extension,\n \"scalar_m_extension should be a scalar of int/uint\")\n TikCheckUtil.check_type_match(\n m_start_pt, (int, Scalar, Expr),\n \"m_start_pt should be int, Scalar, Expr, input type of m_start_pt: \"\n \"{}\".format(type(m_start_pt)))\n check_scalar_dtype(m_start_pt,\n \"scalar_m_start_pt should be a scalar of int/uint\")\n TikCheckUtil.check_type_match(\n k_start_pt, (int, Scalar, Expr),\n \"k_start_pt should be int, Scalar, Expr, input type of \"\n \"k_start_pt: {}\".format(type(k_start_pt)))\n check_scalar_dtype(k_start_pt,\n \"scalar_k_start_pt should be a scalar of int/uint\")\n TikCheckUtil.check_type_match(\n k_extension, (int, Scalar, Expr),\n \"k_extension should be int, Scalar, Expr, input type of k_extension\"\n \": {}\".format(type(k_extension)))\n check_scalar_dtype(k_extension,\n \"scalar_k_extension should be a scalar of int/uint\")\n check_wino_ft_params(l1_h, l1_w, l1_c, dst_stride, pad_left, pad_right,\n pad_top, pad_bottom, m_extension, m_start_pt,\n k_extension, k_start_pt, column_indicator, src)\n # calculate extent\n src_extent = Expr(l1_w*l1_h*l1_c*DTYPE_SIZE[src.dtype]).get()\n # one instr writes 4 expand feature maps\n expand_fm_num = 4\n dst_extent = m_extension*k_extension*expand_fm_num*\\\n DTYPE_SIZE[src.dtype] + \\\n dst_stride*BYTE_PER_FRACTAL*(expand_fm_num - 1)\n dst_extent = Expr(dst_extent).get()\n # code gen\n config = [l1_w, l1_h, l1_c, dst_stride, column_indicator,\n WINO_PAD_MAP[(pad_left, pad_right)],\n WINO_PAD_MAP[(pad_top, pad_bottom)]]\n reg_xm = concat_params(config, WINO_FM_XM_OFFSET_LIST,\n WINO_FM_XM_SEGMENT_LIST)\n config = [k_extension, k_start_pt, m_extension, m_start_pt]\n reg_xt = concat_params(config, WINO_FM_XT_OFFSET_LIST,\n WINO_FM_XT_SEGMENT_LIST)\n with self.new_scope():\n self.scope_attr(cce_params.CCE_AXIS, \"coproc_scope\", PIPE_MTE1)\n instr = tvm.call_extern(\n dst.dtype, \"load_cbuf_to_ca_winograd\",\n dst.access_ptr(\"w\", extent=dst_extent),\n src.access_ptr(\"r\", extent=src_extent), reg_xm, reg_xt)\n self.emit(instr, ONE_IR)\n\n @source_info_decorator()\n @debug.winograd_weight_trans_decorator\n def winograd_weight_transform( # pylint: disable=R0913, R0914\n self, dst, src, column_indicator, repeat_dir, repeat_times,\n dst_blk_stride, dst_rep_stride, src_rep_stride,\n en_weight_offset=False, smask=None):\n \"\"\" reads 9 fractal matrixes from L1, performs partial winograd\n transform and writes 4 transformed fractal matrix into L0B\n\n Parameters\n ----------\n dst: destination operator, scope_cbuf\n src: src operator, scope_cb\n column_indicator: partial weight indicator\n 0: the 1st column\n 1: the 2nd column\n 2: the 3rd column\n 3: the 4th column\n repeat_dir: repeating direction indicator which is used to indicate on\n which direction this instruction is repeating\n 0: vertical\n 1: horizontal\n repeat_times: the number of iterations this instruction would be\n executed\n dst_blk_stride: inner destination stride between the 4 weight matrixes\n to be written into L0B in one single iteration in unit\n of fractal matrix\n dst_rep_stride: destination repeat stride between the desitination\n addresses of 2 successive interations\n src_rep_stride: source repeat stride between the base source addresses\n of 2 successive iterations\n en_weight_offset: not support yet.\n smask: not support yet.\n\n Returns\n -------\n None\n \"\"\"\n arch_version_dst_src_map = {\n 'v200': {'hisi-es': ['s8s8'], 'hisi-cs': ['s8s8', 'u8u8'],\n 'aic': ['s8s8', 'u8u8', 'f16f16']}}\n woff_arch_version_map = ['v200hisi-cs']\n # check instruction\n TikCheckUtil.check_in_range(\n self.core_arch, arch_version_dst_src_map.keys(),\n \"input core_arch: {} doesn't support \"\n \"winograd_weight_transform.\".format(self.core_arch))\n TikCheckUtil.check_in_range(\n self.core_version,\n arch_version_dst_src_map[self.core_arch].keys(),\n \"{}-{} doesn't support winograd_weight_transform.\"\n .format(self.core_arch, self.core_version))\n # check scope\n TikCheckUtil.check_equality(\n dst.scope, \"local.L0B\",\n \"dst scope should be l0b, input scope: {}\".format(dst.scope))\n TikCheckUtil.check_equality(src.scope, \"local.L1\",\n \"src scope should be l1, input scope: {}\"\n .format(src.scope))\n # check dtype\n dtype_str = DTYPE_MAP[dst.dtype] + DTYPE_MAP[src.dtype]\n TikCheckUtil.check_in_range(\n dtype_str, arch_version_dst_src_map[self.core_arch][\n self.core_version],\n \"{}-{} winograd_weight_transform doesn't support from {} to {}\"\n .format(self.core_arch, self.core_version, src.dtype, dst.dtype))\n # check column_indicator\n check_integer_in_range(\n column_indicator, range(MAX_COL_INDIC),\n \"column_indicator should be in the range of [0, 3], input \"\n \"column_indicator: {}\".format(column_indicator))\n # check repeat_dir\n check_integer_in_range(\n repeat_dir, range(MAX_REP_DIR),\n \"repeat_dir should be in the range of [0, 1], input repeat_dir: {}\"\n .format(repeat_dir))\n # check repeat_times\n check_repeat_times(repeat_times)\n # check stride\n check_integer_in_range(\n dst_blk_stride, range(MAX_BLK_STRIDE_SINGLE_BYTE),\n \"dst_blk_stride should be in the range of [0, 255], input \"\n \"dst_blk_stride: {}\".format(dst_blk_stride))\n check_integer_in_range(\n dst_rep_stride, range(MAX_REP_STRIDE_SINGLE_BYTE),\n \"dst_rep_stride should be in the range of [0, 255], input \"\n \"dst_rep_stride: {}\".format(dst_rep_stride))\n check_integer_in_range(\n src_rep_stride, range(MAX_REP_STRIDE_DOUBLE_BYTE),\n \"src_rep_stride should be in the range of [0, 65535], input \"\n \"src_rep_stride: {}\".format(src_rep_stride))\n # check en_weight_offset\n arch_version_str = self.core_arch + self.core_version\n if en_weight_offset:\n TikCheckUtil.check_in_range(\n arch_version_str, woff_arch_version_map,\n \"{}-{} doesn't support weight_offset.\"\n .format(self.core_arch, self.core_version))\n check_weight_offset(smask, \"winograd_weight_transform\", \"smask\")\n # not support smask yet\n smask_addr = smask.accss_ptr(\"r\")\n woff_bit = 1\n else:\n smask_addr = 0\n woff_bit = 0\n # calculate extent\n src_extent, dst_extent = _calculate_winograd_ft_extent(\n repeat_times, src_rep_stride, dst_rep_stride, dst_blk_stride)\n # code gen\n # smask cannot support config mode\n config = [dst_blk_stride, src_rep_stride, dst_rep_stride, smask_addr,\n column_indicator, repeat_dir, woff_bit, repeat_times]\n args = concat_params(config, WINO_WGT_OFFSET_LIST,\n WINO_WGT_SEGMENT_LIST)\n with self.new_scope():\n self.scope_attr(cce_params.CCE_AXIS, \"coproc_scope\", PIPE_MTE1)\n instr = tvm.call_extern(\n dst.dtype, \"load_cbuf_to_cb_winograd\",\n dst.access_ptr(\"w\", extent=dst_extent),\n src.access_ptr(\"r\", extent=src_extent), args)\n self.emit(instr, ONE_IR)\n\n def check_vbi_param(self, dst, src0, src1, # pylint: disable=R0913\n src0_offset, dst_blk_stride,\n vertical_repeat_times, horizontal_repeat_times,\n repeat_mode, vertical_repeat_offset):\n \"\"\"check param for vbi instruction\"\"\"\n # check operator\n TikCheckUtil.check_type_match(dst, Tensor, \"dst should be tensor but \"\n \"get %s\" % type(dst))\n TikCheckUtil.check_type_match(src0, Tensor, \"src0 should be tensor but \"\n \"get %s\" % type(src0))\n TikCheckUtil.check_type_match(src1, Tensor, \"src1 should be tensor but \"\n \"get %s\" % type(src1))\n TikCheckUtil.check_type_match(src0_offset, Tensor,\n \"src0_offset should be tensor but \"\n \"get %s\" % type(src0_offset))\n TikCheckUtil.check_equality(src0.scope, scope_ubuf,\n \"src0's scope must be UB\")\n TikCheckUtil.check_equality(src1.scope, scope_ubuf,\n \"src1's scope must be UB\")\n TikCheckUtil.check_equality(src0_offset.scope, scope_ubuf,\n \"src0_offset's scope must be UB\")\n TikCheckUtil.check_equality(dst.scope, scope_ubuf,\n \"dst's scope must be UB\")\n # check operator dtype\n TikCheckUtil.check_equality(dst.dtype, src0.dtype,\n \"Intrinsic {}'s src0's dtype should be \"\n \"equal to dst's dtype\".\n format(\"vbi\"))\n TikCheckUtil.check_equality(dst.dtype, src1.dtype,\n \"Intrinsic {}'s src1's dtype should be \"\n \"equal to dst's dtype\".\n format(\"vbi\"))\n TikCheckUtil.check_equality(\n intrinsic_check_support(\"Intrinsic_\" + \"vbi\",\n dst.dtype), True,\n INSTR_DTYPE_SUPPORT_STATEMENT.format(dst.dtype,\n \"vbi\"))\n # check dst_blk_stride\n TikCheckUtil.check_type_match(\n dst_blk_stride, (int, Scalar, Expr),\n \"dst_blk_stride should be int, Expr or Scalar, input type is\"\n \" %s\" % type(dst_blk_stride))\n check_scalar_dtype(dst_blk_stride, \"scalar_dst_blk_stride should \"\n \"be a scalar of int/uint\")\n check_integer_in_range(\n dst_blk_stride, range(MAX_BLK_STRIDE_DOUBLE_BYTE),\n \"dst_blk_stride should be in the range of [0, %s], input value\"\n \" is %s\" % (MAX_BLK_STRIDE_DOUBLE_BYTE - 1, dst_blk_stride))\n # check vertical_repeat_times\n check_repeat_times(vertical_repeat_times)\n # check horizontal_repeat_times\n check_repeat_times(horizontal_repeat_times)\n # check repeat_mode\n TikCheckUtil.check_equality(\n type(repeat_mode), int, \"repeat_mode should be int, input type \"\n \"is %s\" % type(repeat_mode))\n TikCheckUtil.check_in_range(\n repeat_mode, (0, 1),\n \"repeat_mode only support 0 and 1, input value is %s\" % repeat_mode)\n # check vertical_repeat_offset\n TikCheckUtil.check_type_match(\n vertical_repeat_offset, (int, Scalar, Expr),\n \"vertical_repeat_offset should be int, Expr and Scalar, \"\n \"input type is %s\" % type(vertical_repeat_offset))\n check_scalar_dtype(vertical_repeat_offset,\n \"scalar_vertical_repeat_offset should \"\n \"be a scalar of int/uint\")\n check_integer_in_range(\n vertical_repeat_offset, range(MAX_REP_STRIDE_DOUBLE_BYTE),\n \"vertical_repeat_offset should be in the range of [0, %s], \"\n \"input value is %s\" % (MAX_REP_STRIDE_DOUBLE_BYTE - 1,\n vertical_repeat_offset))\n\n @source_info_decorator()\n @debug.vbi_decorator\n def vbi(self, mask, dst, src0, src1, src0_offset, # pylint: disable=R0913\n dst_blk_stride, vertical_repeat_times, horizontal_repeat_times,\n repeat_mode, vertical_repeat_offset):\n \"\"\"vbi instruction, used for bilinear interpolation in ROI alignment\n\n Parameters\n ----------\n mask: Effective operation on element\n dst: destination tensor\n src0: src0 tensor\n src1: src1 tensor\n src0_offset: src0_offset tensor\n dst_blk_stride: offset of dst operator between different block\n in one iteration\n vertical_repeat_times: repeat_times in vertical direction\n horizontal_repeat_times: repeat_times in horizontal direction\n repeat_mode: indicate how many elements at src1 are consumed\n in one iteration\n vertical_repeat_offset: vertical repeat offset between dst address of\n iterations in the vertical direction\n Returns\n -------\n None\n \"\"\"\n self.check_vbi_param(dst, src0, src1, src0_offset, dst_blk_stride,\n vertical_repeat_times, horizontal_repeat_times,\n repeat_mode, vertical_repeat_offset)\n\n # check mask and get mask_o\n mask_o = mask_concat(self, mask, tensor_bit_len=get_bit_len(dst.dtype))\n # check tensor overflow, including src1, dst and src_offset\n mask_len = get_vbi_mask_len(mask)\n check_vbi_dst_offset_overflow(dst, src0_offset, mask_len,\n horizontal_repeat_times,\n vertical_repeat_times,\n dst_blk_stride, vertical_repeat_offset)\n check_vbi_src1_tensor_overflow(src1, repeat_mode,\n vertical_repeat_times*\\\n horizontal_repeat_times, mask_len,\n src1.offset)\n # check src0_offset and src1 overlap\n check_vbi_overlap(src0_offset, src1, repeat_mode,\n vertical_repeat_times*horizontal_repeat_times,\n mask_len, src0_offset.offset, src1.offset)\n # gen code\n self._gen_vbi_code(mask_o, mask_len, dst, src0,\n src1, src0_offset, dst_blk_stride,\n vertical_repeat_times, horizontal_repeat_times,\n repeat_mode, vertical_repeat_offset)\n\n def _gen_vbi_code_vadds_part(self, mask_len, # pylint: disable=R0913\n dst, src0, src0_offset, total_repeat_times):\n \"\"\"for vbi instruction, generate vadds part code\"\"\"\n with self.context.freeze(): # pylint: disable=E1101\n base_addr = self.Scalar_(\"int32\") # pylint: disable=E1101\n base_addr.set_as(tvm.expr.Cast(\"int64\", tvm.call_extern(\n \"handle\", \"\", src0.access_ptr(\"r\"))).astype(\"int32\"))\n need_src0_block = get_vbi_src0_offset_need_size(\n dst.dtype, mask_len, total_repeat_times)\n self.vadds(need_src0_block, src0_offset, # pylint: disable=E1101\n src0_offset, base_addr, MIN_REPEAT_TIMES,\n MIN_STRIDE, MIN_STRIDE,\n BLK_NUM_PER_REP, BLK_NUM_PER_REP, mask_mode=\"counter\")\n\n def _gen_vbi_code_vbi_part(self, mask_o, dst, src1, # pylint: disable=R0913\n src0_offset, dst_extent, src0_offset_extent,\n src1_extent, config):\n \"\"\"for vbi instruction, generate vbi part code\"\"\"\n with self.new_scope():\n self.emit(tvm.call_extern(\"int64\", \"set_vector_mask\", *mask_o),\n ONE_IR)\n instr = tvm.call_extern(\n dst.dtype, \"vbi\",\n dst.access_ptr(\"w\", extent=dst_extent),\n src0_offset.reinterpret_cast_to(\"uint16\").access_ptr(\n \"r\", extent=src0_offset_extent),\n src1.access_ptr(\"r\", extent=src1_extent), *type_convert(config))\n self.scope_attr(cce_params.CCE_AXIS, \"coproc_scope\", PIPE_V)\n self.emit(instr, ONE_IR)\n\n def _gen_vbi_code(self, mask_o, mask_len, dst, # pylint: disable=R0913\n src0, src1, src0_offset, dst_blk_stride,\n vertical_repeat_times, horizontal_repeat_times,\n repeat_mode, vertical_repeat_offset):\n \"\"\"generate vbi code\"\"\"\n self._gen_vbi_code_vadds_part(\n mask_len, dst, src0, src0_offset,\n horizontal_repeat_times*vertical_repeat_times)\n self._gen_vbi_code_vbi_part(\n mask_o, dst, src1, src0_offset,\n *cal_vbi_extent(mask_len, dst, src1, src0_offset,\n horizontal_repeat_times, repeat_mode,\n dst_blk_stride, vertical_repeat_offset,\n vertical_repeat_times),\n [horizontal_repeat_times, repeat_mode, dst_blk_stride,\n vertical_repeat_offset, vertical_repeat_times])\n","repo_name":"jizhuoran/caffe-huawei-atlas-convertor","sub_path":"convertor/huawei/te/tik/tik_lib/tik_data_operation_api_.py","file_name":"tik_data_operation_api_.py","file_ext":"py","file_size_in_byte":301035,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"32309296195","text":"stopAfter=overlapper\n\n# original asm settings\nutgErrorRate = 0.25\nutgErrorLimit = 4.5\n\ncnsErrorRate = 0.25\ncgwErrorRate = 0.25\novlErrorRate = 0.25\n\nmerSize=14\n\nmerylMemory = 128000\nmerylThreads = 16\n\novlStoreMemory = 8192\n\n\n#ovlMemory=8GB --hashload 0.7\novlHashBits = 25\novlThreads = 4\novlHashBlockLength = 20000000\novlRefBlockSize = 50000000\n\n# for mer overlapper\nmerCompression = 1\nmerOverlapperSeedBatchSize = 500000\nmerOverlapperExtendBatchSize = 250000\n\nfrgCorrThreads = 2\nfrgCorrBatchSize = 100000\n\novlCorrBatchSize = 100000\n\n# non-Grid settings, if you set useGrid to 0 above these will be used\nmerylMemory = 128000\nmerylThreads = 4\n\novlStoreMemory = 8192\n\novlConcurrency = 6\n\ncnsConcurrency = 16\n\nmerOverlapperThreads = 2\nmerOverlapperSeedConcurrency = 6\nmerOverlapperExtendConcurrency = 6\n\nfrgCorrConcurrency = 8\novlCorrConcurrency = 16\ncnsConcurrency = 16\n","repo_name":"remiolsen/vp2015","sub_path":"assembly/pacbio.spec","file_name":"pacbio.spec","file_ext":"spec","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12311701699","text":"# Evaluate the value of an arithmetic expression in Reverse Polish Notation.\n\n# Valid operators are +, -, *, and /. Each operand may be an integer or another expression.\n\n# Note that division between two integers should truncate toward zero.\n\n# It is guaranteed that the given RPN expression is always valid. That means the expression would always evaluate to a result, and there will not be any division by zero operation.\n\n# =============================================================\n\nclass Solution:\n def evalRPN(self, tokens: list[str]) -> int:\n a = []\n\n\n for item in tokens:\n if item == '+':\n a.append(a.pop() + a.pop())\n \n elif item == '-':\n p,q = a.pop(), a.pop()\n a.append(q-p)\n\n elif item == '/':\n p,q = a.pop(), a.pop()\n a.append(int(q/p))\n\n elif item == '*':\n a.append(a.pop() * a.pop())\n\n else:\n a.append(int(item))\n \n return a[0]\n\n\n\nsol = Solution()\n\ntoken = [\"2\",\"1\",\"+\",\"3\",\"*\"]\n\nresult = sol.evalRPN(token)\n\nprint(f'Result for token :{result}')","repo_name":"Animeshrockn/Leetcode","sub_path":"Leetcode150-Evaluate_Reverse_Polish_Notation/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"72894089226","text":"'''\r\nAll models need to return a CSP object, and a list of lists of Variable objects \r\nrepresenting the board. The returned list of lists is used to access the \r\nsolution. \r\n\r\nFor example, after these three lines of code\r\n\r\n csp, var_array = kenken_csp_model(board)\r\n solver = BT(csp)\r\n solver.bt_search(prop_FC, var_ord)\r\n\r\nvar_array[0][0].get_assigned_value() should be the correct value in the top left\r\ncell of the KenKen puzzle.\r\n\r\nThe grid-only models do not need to encode the cage constraints.\r\n\r\n1. binary_ne_grid (worth 10/100 marks)\r\n - A model of a KenKen grid (without cage constraints) built using only \r\n binary not-equal constraints for both the row and column constraints.\r\n\r\n2. nary_ad_grid (worth 10/100 marks)\r\n - A model of a KenKen grid (without cage constraints) built using only n-ary \r\n all-different constraints for both the row and column constraints. \r\n\r\n3. kenken_csp_model (worth 20/100 marks) \r\n - A model built using your choice of (1) binary binary not-equal, or (2) \r\n n-ary all-different constraints for the grid.\r\n - Together with KenKen cage constraints.\r\n\r\n'''\r\n\r\nfrom cspbase import *\r\nimport itertools\r\n\r\n\r\n# All helper functions are designed for kenken_csp_model(kenken_grid).\r\n\r\n# In case that the first element in the kenken_grid is not the size, which can also be a cell-index or operation or\r\n# target-value.\r\ndef find_grid_size(kenken_grid):\r\n\r\n for lst in kenken_grid:\r\n\r\n if len(lst) == 1:\r\n\r\n return lst[0]\r\n\r\n\r\ndef create_vars_and_array(grid_size, domain):\r\n # The collections of all variables. List of variables.\r\n all_vars = []\r\n # The collection of variables representing the grid. List of lists(rows) of variables.\r\n var_array = []\r\n\r\n for r in range(grid_size):\r\n\r\n cur_row = []\r\n\r\n for c in range(grid_size):\r\n\r\n # Name \"K\" is shorted for \"Kenken-Cell.\"\r\n var = Variable(\"K{}{}\".format(r, c), domain)\r\n\r\n all_vars.append(var)\r\n cur_row.append(var)\r\n\r\n var_array.append(cur_row)\r\n\r\n return all_vars, var_array\r\n\r\n\r\ndef convert_cell_index(human_read_index):\r\n row_index = human_read_index // 10 - 1\r\n col_index = human_read_index % 10 - 1\r\n\r\n return row_index, col_index\r\n\r\n\r\n# These four check-functions are basically copied form test.py.\r\ndef add_check(lst_vals, target_val):\r\n return sum(list(lst_vals)) == target_val\r\n\r\n\r\ndef mult_check(lst_vals, target_val):\r\n prod = 1\r\n\r\n for v in lst_vals:\r\n\r\n prod *= v\r\n\r\n return prod == target_val\r\n\r\n\r\ndef sub_check(lst_vals, target_val):\r\n # We have to check all permutations! Otherwise, we will have \"CSP detected contradiction at root\" error.\r\n # For example, suppose there is a cage: 4 - 1 = 3. Then (4, 1) and (1, 4) are both satisfied tuples.\r\n # If we don't check both of them, then only (4, 1) might be added into the corresponding constraint.\r\n for perm in itertools.permutations(lst_vals):\r\n\r\n result = perm[0]\r\n i = 1\r\n\r\n while i < len(lst_vals):\r\n\r\n result -= perm[i]\r\n i += 1\r\n\r\n if result == target_val:\r\n return True\r\n\r\n return False\r\n\r\n\r\ndef div_check(lst_vals, target_val):\r\n for perm in itertools.permutations(lst_vals):\r\n\r\n result = perm[0]\r\n i = 1\r\n\r\n while i < len(lst_vals):\r\n\r\n # Use //, not / based on the Piazza post.\r\n result //= perm[i]\r\n i += 1\r\n\r\n if result == target_val:\r\n return True\r\n\r\n return False\r\n\r\n\r\n# This function should be similar to nQueens(n) in tests.py.\r\ndef binary_ne_grid(kenken_grid):\r\n grid_size = find_grid_size(kenken_grid)\r\n # Domain is decided by the grid size.\r\n dom = [i + 1 for i in range(grid_size)]\r\n\r\n all_vars, var_array = create_vars_and_array(grid_size, dom)\r\n\r\n # The collections of all constraints.\r\n cons = []\r\n # A row/column cannot have two same values, so function permutations can generate all possible tuples of values.\r\n # The satisfied tuples are the same for all cells at first.\r\n sat_tuples = list(itertools.permutations(dom, 2))\r\n\r\n for i in range(grid_size):\r\n\r\n for j in range(grid_size):\r\n\r\n for k in range(j + 1, grid_size):\r\n\r\n # Constraint of a cell and another cell in the same row.\r\n con_row = Constraint(\"C(K{}{}, K{}{})\".format(i, j, i, k), [var_array[i][j], var_array[i][k]])\r\n con_row.add_satisfying_tuples(sat_tuples)\r\n cons.append(con_row)\r\n\r\n # Constraint of a cell and another cell in the same column.\r\n con_col = Constraint(\"C(K{}{}, K{}{})\".format(j, i, k, i), [var_array[j][i], var_array[k][i]])\r\n con_col.add_satisfying_tuples(sat_tuples)\r\n cons.append(con_col)\r\n\r\n # Create CSP and add all cons into it.\r\n csp = CSP(\"{}-Kenken\".format(grid_size), all_vars)\r\n\r\n for con in cons:\r\n\r\n csp.add_constraint(con)\r\n\r\n return csp, var_array\r\n\r\n\r\n# This function should also be similar to nQueens(n) in tests.py.\r\ndef nary_ad_grid(kenken_grid):\r\n grid_size = find_grid_size(kenken_grid)\r\n dom = [i + 1 for i in range(grid_size)]\r\n\r\n all_vars, var_array = create_vars_and_array(grid_size, dom)\r\n\r\n cons = []\r\n # grid_size * (grid_size - 1) * ... * 2 * * 1 = number of all permutations.\r\n sat_tuples = list(itertools.permutations(dom, grid_size))\r\n\r\n for i in range(grid_size):\r\n\r\n row_vars = []\r\n col_vars = []\r\n\r\n for j in range(grid_size):\r\n\r\n row_vars.append(var_array[i][j])\r\n col_vars.append(var_array[j][i])\r\n\r\n # Constraint of all cells in a row.\r\n con_row = Constraint(\"C(Row{})\".format(i), row_vars)\r\n con_row.add_satisfying_tuples(sat_tuples)\r\n cons.append(con_row)\r\n\r\n # Constraint of all cells in a column.\r\n con_col = Constraint(\"C(Col{})\".format(i), col_vars)\r\n con_col.add_satisfying_tuples(sat_tuples)\r\n cons.append(con_col)\r\n\r\n # Create CSP and add all cons into it.\r\n csp = CSP(\"{}-Kenken\".format(grid_size), all_vars)\r\n\r\n for con in cons:\r\n\r\n csp.add_constraint(con)\r\n\r\n return csp, var_array\r\n\r\n\r\ndef kenken_csp_model(kenken_grid):\r\n # Use binary_ne_grid(kenken_grid) to build the complete model, it is much faster than nary_ad_grid(kenken_grid).\r\n # var_array is still the same, but more constraints need to be added into csp.\r\n csp, var_array = binary_ne_grid(kenken_grid)\r\n\r\n grid_size = find_grid_size(kenken_grid)\r\n dom = [i + 1 for i in range(grid_size)]\r\n\r\n cage_count = 0\r\n cons = []\r\n\r\n # Generate all cage-constraints.\r\n for cage in kenken_grid:\r\n\r\n # A forced value for a cell.\r\n if len(cage) == 2:\r\n row_index, col_index = convert_cell_index(cage[0])\r\n\r\n forced_value = cage[1]\r\n sat_tuples = [(forced_value,)]\r\n\r\n con = Constraint(\"Cage{}\".format(cage_count), [var_array[row_index][col_index]])\r\n con.add_satisfying_tuples(sat_tuples)\r\n cons.append(con)\r\n\r\n cage_count += 1\r\n\r\n # A cage with at least 2 cells.\r\n elif len(cage) > 2:\r\n operation = cage[-1]\r\n target_value = cage[-2]\r\n\r\n sat_tuples = []\r\n cage_vars = []\r\n\r\n # Get all variables in this cage.\r\n for cell in cage[0: len(cage) - 2]:\r\n\r\n row_index, col_index = convert_cell_index(cell)\r\n cage_vars.append(var_array[row_index][col_index])\r\n\r\n # Get all possible values.\r\n # Use product, not permutation, because a cage can have the same values.\r\n for vals in itertools.product(dom, repeat=len(cage) - 2):\r\n\r\n if ((operation == 0 and add_check(vals, target_value))\r\n or (operation == 1 and sub_check(vals, target_value))\r\n or (operation == 2 and div_check(vals, target_value))\r\n or (operation == 3 and mult_check(vals, target_value))):\r\n sat_tuples.append(vals)\r\n\r\n con = Constraint(\"Cage{}\".format(cage_count), cage_vars)\r\n con.add_satisfying_tuples(sat_tuples)\r\n cons.append(con)\r\n\r\n cage_count += 1\r\n\r\n for con in cons:\r\n\r\n csp.add_constraint(con)\r\n\r\n return csp, var_array\r\n","repo_name":"dam0nl12/CSC384","sub_path":"A2/kenken_csp.py","file_name":"kenken_csp.py","file_ext":"py","file_size_in_byte":8422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33843914466","text":"from game.create_maze import maze_data\nfrom game.maze import MazeTemplate, Searcher, Riches\n\ndef main():\n print(\"Hello!\\n It's the maze game\")\n load_file = input(\"You can load text data?\\ntype 1 - yes, 0 - no\")\n if load_file == 1:\n path = input('Type path')\n points, searcher, treasure, x_max, y_max = maze_data(path)\n else:\n points, searcher, treasure, x_max, y_max = maze_data('maze.txt')\n maze = MazeTemplate(row=x_max, col=y_max, points=points)\n print(\"Let's go for treasure\")\n searcher = Searcher(searcher[0],searcher[1])\n riches = Riches(treasure[0], treasure[1])\n while searcher.again:\n searcher.exit_maze(maze)\n if searcher.again:\n searcher.show(maze)\n searcher.check(maze, riches)\n maze.create_maze()\n print('And now you can go home')\n if riches.info:\n print('You go home taking digital, non-linear bitcoins with you')\n else:\n print('You go home after a pleasant walk 😊')\n\nif __name__ == '__main__':\n main()","repo_name":"Panacond/text_maze","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18778200936","text":"###############################################################################\n# Language Modeling on Penn Tree Bank\n#\n# With the default parameters, this should achieve ~116 perplexity on the\n# test set.\n###############################################################################\n\nimport argparse\nimport time\nimport math\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\nimport data\nimport model\n\nparser = argparse.ArgumentParser(description='PyTorch PTB Language Model')\n\n# Data parameters\nparser.add_argument('-data' , type=str, default='./data/penn', help='Location of the data corpus' )\n# Model parameters.\nparser.add_argument('-model' , type=str, default='LSTM' , help='Type of recurrent net. RNN_TANH, RNN_RELU, LSTM, or GRU.')\nparser.add_argument('-emsize' , type=int, default=200 , help='Size of word embeddings' )\nparser.add_argument('-nhid' , type=int, default=200 , help='Number of hidden units per layer.' )\nparser.add_argument('-nlayers' , type=int, default=2 , help='Number of layers.' )\n# Optimization parameters.\nparser.add_argument('-lr' , type=float, default=20 , help='Initial learning rate.' )\nparser.add_argument('-clip' , type=float, default=0.5 , help='Gradient clipping.' )\nparser.add_argument('-maxepoch' , type=int, default=6 , help='Upper epoch limit.' )\nparser.add_argument('-batchsize' , type=int, default=20 , help='Batch size.' )\nparser.add_argument('-bptt' , type=int, default=20 , help='Sequence length.' )\n# Device parameters.\nparser.add_argument('-seed' , type=int, default=1111 , help='Random seed.' )\nparser.add_argument('-cuda' , action='store_true' , help='Use CUDA.' )\n# Misc parameters.\nparser.add_argument('-reportint' , type=int, default=200 , help='Report interval.' )\nparser.add_argument('-save' , type=str, default='model.pt' , help='Path to save the final model.' )\nargs = parser.parse_args()\n\n# Set the random seed manually for reproducibility.\ntorch.manual_seed(args.seed)\n# If the GPU is enabled, do some plumbing.\n\nif torch.cuda.is_available() and not args.cuda:\n print(\"WARNING: You have a CUDA device, so you should probably run with -cuda\")\n\n###############################################################################\n## LOAD DATA\n###############################################################################\n\ncorpus = data.Corpus(args.data)\n\ndef batchify(data, bsz):\n nbatch = int(math.floor(data.size(0) / bsz))\n data = data.narrow(0, 0, nbatch * bsz)\n data = data.view(bsz, -1).t().contiguous()\n if args.cuda:\n data = data.cuda()\n return data\n\neval_bsz = 10\ntrain = batchify(corpus.train, args.batchsize)\nvalid = batchify(corpus.valid, eval_bsz)\ntest = batchify(corpus.test, eval_bsz)\nbptt = args.bptt\nbsz = args.batchsize\n\n###############################################################################\n# MAKE MODEL\n###############################################################################\n\nntokens = corpus.dic.ntokens()\nmodel = model.RNNModel(args.model, ntokens, args.emsize, args.nhid, args.nlayers)\nif args.cuda:\n model.cuda()\n\ncriterion = nn.CrossEntropyLoss()\n\n########################################\n# TRAINING\n########################################\n\nlr = args.lr\nclip = args.clip\nreportinterval = args.reportint\n\n\n# Perform the forward pass only.\ndef evaluate(model, data, criterion, bsz):\n loss = 0\n hidden = model.initHidden(bsz)\n # Loop over validation data.\n for i in range(0, data.size(0) - 1, bptt):\n seq_len = min(bptt, data.size(0) - 1 - i)\n output, hidden = model(Variable(data[i:i+seq_len], requires_grad=False), hidden)\n targets = data[i+1:i+seq_len+1].view(-1)\n loss += bptt * criterion(output.view(seq_len*bsz, -1), Variable(targets, requires_grad=False)).data\n hidden = repackageHidden(hidden)\n\n return loss[0] / data.size(0)\n\n# simple gradient clipping, using the total norm of the gradient\ndef clipGradient(model, clip):\n totalnorm = 0\n for p in model.parameters():\n modulenorm = p.grad.norm()\n totalnorm += modulenorm ** 2\n totalnorm = math.sqrt(totalnorm)\n return min(1, args.clip / (totalnorm + 1e-6))\n\n# Between bptt intervals, we want to maintain the hidden state data\n# but don't want to backprop gradients across bptt intervals.\n# So we have to rewrap the hidden state in a fresh Variable.\ndef repackageHidden(h):\n if type(h) == Variable:\n return Variable(h.data)\n else:\n return tuple(repackageHidden(v) for v in h)\n\n# Loop over epochs.\nprev_loss = None\nfor epoch in range(1, args.maxepoch+1):\n total_loss = 0\n epoch_start_time = time.time()\n # Start with an initial hidden state.\n hidden = model.initHidden(bsz)\n\n loss = 0\n i = 0\n model.zero_grad()\n total_loss = 0\n start_time = epoch_start_time = time.time()\n ntokens = corpus.dic.ntokens()\n # Loop over the training data.\n for batch, i in enumerate(range(0, train.size(0) - 1, bptt)):\n seq_len = min(bptt, train.size(0) - 1 - i)\n output, hidden = model(Variable(train[i:i+seq_len], requires_grad=False), hidden)\n targets = train[i+1:i+seq_len+1].view(-1)\n loss = criterion(output.view(-1, ntokens), Variable(targets, requires_grad=False))\n loss.backward()\n\n clipped_lr = lr * clipGradient(model, args.clip)\n\n for p in model.parameters():\n p.data.sub_(p.grad.mul(clipped_lr))\n\n hidden = repackageHidden(hidden)\n model.zero_grad()\n total_loss += loss.data\n loss = 0\n\n if batch % reportinterval == 0 and batch > 0:\n cur_loss = total_loss[0] / reportinterval\n elapsed = time.time() - start_time\n print(\n ('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.6f} | ms/batch {:5.2f} | '\n + 'train loss {:5.2f} | train ppl {:8.2f}').format(\n epoch, batch, train.size(0) // bptt, lr, elapsed * 1000 / reportinterval,\n cur_loss, math.exp(cur_loss)\n ))\n total_loss = 0\n start_time = time.time()\n\n val_loss = evaluate(model, valid, criterion, eval_bsz)\n\n print(\n '| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | valid ppl {:8.2f}'.format(\n epoch, (time.time() - epoch_start_time), val_loss, math.exp(val_loss)\n ))\n\n # The annealing schedule.\n if prev_loss and val_loss > prev_loss:\n lr = lr / 4\n\n prev_loss = val_loss\n\n# Run on test data.\ntest_loss = evaluate(model, test, criterion, eval_bsz)\nprint(\n '| End of training | test loss {:5.2f} | test ppl {:8.2f}'.format(\n test_loss, math.exp(test_loss)\n))\n\nif args.save != '' :\n with open(args.save, 'wb') as f:\n torch.save(model, f)\n","repo_name":"szagoruyko/examples","sub_path":"word_language_model/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7149,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"22968298095","text":"from apscheduler.schedulers.blocking import BlockingScheduler\r\nimport PT_Visit_Sign,analysis_config\r\n\r\ndef strif_hour_minute_seconds(raw:int) -> str:\r\n return str(raw) if raw >= 10 else '0' + str(raw)\r\n\r\nif __name__ == '__main__':\r\n my_config_dict = analysis_config.read_config()\r\n scheduler = BlockingScheduler(timezone='Asia/Shanghai')\r\n day_of_week = my_config_dict['定时']['周几至周几']\r\n hour = int( my_config_dict['定时']['时'] )\r\n minitue = int( my_config_dict['定时']['分'] )\r\n print('正在运行定时签到,时间规则:\\n每周{} {}:{} 执行签到'.format(day_of_week,strif_hour_minute_seconds(hour),strif_hour_minute_seconds(minitue) ) )\r\n #↓调整计算周几至周几,以适应scheduler.add_job day_of_week参数要求 最大0-6\r\n start_week,end_week = day_of_week.split('-')\r\n start_week = str ( int( start_week ) -1 )\r\n end_week = str ( int( end_week ) -1 )\r\n day_of_week = start_week + '-' + end_week\r\n scheduler.add_job( PT_Visit_Sign.visit_and_signin_all_websites , 'cron', day_of_week=day_of_week, hour=hour, minute=minitue ,args = [ my_config_dict ])\r\n scheduler.start()","repo_name":"wzxxh/PT","sub_path":"PT站签到脚本V1.2/PT站签到V1.2/run_by_time_control.py","file_name":"run_by_time_control.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40936946814","text":"import io\nimport os\nimport subprocess\nimport re\n\nCURRENT_DIR = os.path.dirname(os.path.abspath(__file__))\n\nfrom contextlib import redirect_stdout, redirect_stderr\nimport pytest\n\nfrom lib.db import DB\nfrom lib import utils\nfrom lib.global_config import GlobalConfig\nfrom runner import Runner\n\nrun_stderr = None\nrun_stdout = None\n\nRUN_NAME = 'test_' + utils.randomword(12)\n\n\n# override per test cleanup, as the module setup requires writing to DB\n@pytest.fixture(autouse=False)\ndef cleanup_after_test():\n pass\n\n#pylint: disable=unused-argument # unused arguement off for now - because there are no running tests in this file\ndef cleanup_after_module(autouse=True, scope=\"module\"):\n yield\n tables = DB().fetch_all(\"SELECT table_name FROM information_schema.tables WHERE table_schema = 'public'\")\n for table in tables:\n table_name = table[0]\n DB().query(f'TRUNCATE TABLE \"{table_name}\" RESTART IDENTITY CASCADE')\n\n# Runs once per file before any test(\n#pylint: disable=expression-not-assigned\ndef setup_module(module):\n out = io.StringIO()\n err = io.StringIO()\n GlobalConfig(config_name='test-config.yml').config\n with redirect_stdout(out), redirect_stderr(err):\n uri = os.path.abspath(os.path.join(\n CURRENT_DIR, 'stress-application/'))\n subprocess.run(['docker', 'compose', '-f', uri+'/compose.yml', 'build'], check=True)\n\n # Run the application\n runner = Runner(name=RUN_NAME, uri=uri, uri_type='folder', dev_repeat_run=True, skip_system_checks=True)\n runner.run()\n\n #pylint: disable=global-statement\n global run_stderr, run_stdout\n run_stderr = err.getvalue()\n run_stdout = out.getvalue()\n\ndef test_no_errors():\n # Assert that there is no std.err output\n assert run_stderr == ''\n\ndef test_cleanup_success():\n # Assert that Cleanup has run\n assert re.search(\n 'MEASUREMENT SUCCESSFULLY COMPLETED', run_stdout)\n\ndef test_db_rows_are_written_and_presented():\n # for every metric provider, check that there were rows written in the DB with info for that provider\n # also check (in the same test, to save on a DB call) that the output to STD.OUT\n # \"Imported XXX metrics from {metric_provider}\" displays the same count as in the DB\n\n run_id = utils.get_run_data(RUN_NAME)['id']\n assert(run_id is not None and run_id != '')\n query = \"\"\"\n SELECT\n metric, COUNT(*) as count\n FROM\n measurements\n WHERE run_id = %s\n GROUP BY\n metric\n \"\"\"\n data = DB().fetch_all(query, (run_id,))\n assert(data is not None and data != [])\n\n config = GlobalConfig(config_name='test-config.yml').config\n metric_providers = utils.get_metric_providers_names(config)\n\n # The network connection proxy provider writes to a different DB so we need to remove it here\n if 'NetworkConnectionsProxyContainerProvider' in metric_providers:\n metric_providers.remove('NetworkConnectionsProxyContainerProvider')\n\n for d in data:\n d_provider = utils.get_pascal_case(d[0]) + 'Provider'\n d_count = d[1]\n ## Assert the provider in DB matches one of the metric providers in config\n assert d_provider in metric_providers\n\n ## Assert the number of rows for that provider is at least 1\n assert d_count > 0\n\n ## Assert the information printed to std.out matches what's in the db\n match = re.search(rf\"Imported \\S* (\\d+) \\S* metrics from\\s*{d_provider}\", run_stdout)\n assert match is not None\n assert int(match.group(1)) == d_count\n\n ## Assert that all the providers in the config are represented\n metric_providers.remove(d_provider)\n assert len(metric_providers) == 0\n","repo_name":"green-coding-berlin/green-metrics-tool","sub_path":"tests/smoke_test.py","file_name":"smoke_test.py","file_ext":"py","file_size_in_byte":3761,"program_lang":"python","lang":"en","doc_type":"code","stars":89,"dataset":"github-code","pt":"81"} +{"seq_id":"24673890077","text":"import torch\nimport torch.nn as nn\n\nclass vggnet(nn.Module):\n def __init__(self, cfg, num_classes):\n super(vggnet, self).__init__()\n self.conv1 = nn.Sequential(\n nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3, padding=1),\n nn.ReLU()\n )\n self.conv2 = nn.Sequential(\n nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1),\n nn.ReLU()\n )\n self.conv3 = nn.Sequential(\n nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1),\n nn.ReLU()\n )\n self.conv4 = nn.Sequential(\n nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=1),\n nn.ReLU()\n )\n self.conv5 = nn.Sequential(\n nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=1),\n nn.ReLU()\n )\n self.fc1 = nn.Sequential(\n nn.Linear(in_features=512, out_features=4096, bias=True),\n nn.ReLU(),\n nn.Dropout(p=0.7, inplace=False)\n )\n self.fc2 = nn.Sequential(\n nn.Linear(in_features=4096, out_features=4096, bias=True),\n nn.ReLU(),\n nn.Dropout(p=0.7, inplace=False)\n )\n self.fc3 = nn.Sequential(\n nn.Linear(in_features=4096, out_features=num_classes, bias=True),\n nn.ReLU()\n )\n self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)\n self.softmax = nn.Softmax(dim=-1)\n \n # initialize weights\n self.apply(self.init_weight)\n\n def init_weight(self, module):\n if isinstance(module, nn.Linear):\n # He initialization with uniform distribution \n torch.nn.init.kaiming_uniform_(module.weight)\n module.bias.data.fill_(0.01)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = self.conv1(x)\n x = self.maxpool(x)\n x = self.conv2(x)\n x = self.maxpool(x)\n x = self.conv3(x)\n x = self.maxpool(x)\n x = self.conv4(x)\n x = self.maxpool(x)\n x = self.conv5(x)\n x = self.maxpool(x)\n x = x.view(-1, 512)\n x = self.fc1(x)\n x = self.fc2(x)\n x = self.fc3(x)\n return self.softmax(x)\n","repo_name":"AndesPooh258/Computer-Vision","sub_path":"Assignment 2 - Image Recognition/code/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73665317705","text":"import pandas as pd\nimport numpy as np\nimport math\nimport string\nfrom dmagellan.utils.py_utils.utils import get_str_cols, str2bytes, sample, split_df, \\\n tokenize_strings_wsp, build_inv_index, get_stopwords_for_downsample\nfrom dmagellan.blocker.overlap.overlapblocker import OverlapBlocker\nfrom dmagellan.tokenizer.whitespacetokenizer import WhiteSpaceTokenizer\nfrom dmagellan.sampler.downsample.downsample import preprocess_table\n\ndef sample_stratified_length(table, idcol, lencol='strlen', sample_proportion=0.1,\n nbins=10, seed=0):\n df = concat_string_attrs_comp_len(table, idcol, lencol)\n sampled = stratify_on_len(table, df, idcol, lencol, sample_proportion, nbins,\n seed=seed)\n return sampled\n\n\ndef stratify_on_len(table, stat_table, idcol, lencol, sample_proportion, nbins, seed):\n groups = stat_table.groupby(lencol)\n len_ids = {}\n for gid, g in groups:\n len_ids[gid] = list(g[idcol].values)\n strlens = list(stat_table[lencol].values)\n strlens += [max(strlens) + 1]\n freq, edges = np.histogram(strlens, bins=nbins)\n n = int(math.floor(sample_proportion * len(table)))\n bins = [[] for _ in range(nbins)]\n keys = sorted(len_ids.keys())\n positions = np.digitize(keys, edges)\n for i in range(len(keys)):\n k, p = keys[i], positions[i]\n bins[p - 1].extend(len_ids[k])\n len_bins = [len(bins[i]) for i in range(len(bins))]\n weights = [len_bins[i] / float(sum(len_bins)) for i in range(len(bins))]\n numtups = [int(math.ceil(weights[i] * n)) for i in range(len(weights))]\n\n sampled = []\n for i in range(len(bins)):\n nt = numtups[i]\n if len(bins[i]):\n np.random.seed(seed)\n tmp = np.random.choice(bins[i], nt)\n if len(tmp):\n sampled.extend(tmp)\n table.set_index(idcol, inplace=True, drop=False)\n table['_pos'] = list(range(len(stat_table)))\n s_ltable = table.loc[sampled]\n s_ltable = s_ltable.sort_values(['_pos'])\n s_ltable.reset_index(drop=True, inplace=True)\n s_ltable.drop(['_pos'], axis=1, inplace=True)\n return s_ltable\n\ndef concat_string_attrs_comp_len(table, idcol, lencol):\n strcols = list(get_str_cols(table))\n strcols.append(idcol)\n projdf = table[strcols]\n t_dict = {}\n for row in projdf.itertuples():\n colvalues = row[1:-1]\n uid = row[-1]\n strings = [colvalue.strip() for colvalue in colvalues if not pd.isnull(colvalue)]\n concatrow = ' '.join(strings).lower()\n concatrow = concatrow.translate(None, string.punctuation)\n t_dict[uid] = len(concatrow)\n\n return pd.DataFrame(t_dict.items(), columns=[idcol, lencol])\n##########\ndef concat_strings(table, idcol, concatcol):\n strcols = list(get_str_cols(table))\n strcols.append(idcol)\n projdf = table[strcols]\n t_dict = {}\n for row in projdf.itertuples():\n colvalues = row[1:-1]\n uid = row[-1]\n strings = [colvalue.strip() for colvalue in colvalues if not pd.isnull(colvalue)]\n concatrow = ' '.join(strings).lower()\n concatrow = concatrow.translate(None, string.punctuation)\n t_dict[uid] = concatrow\n\n return pd.DataFrame(t_dict.items(), columns=[idcol, concatcol])\n\n\n\ndef sample_stratified_probelen(table, othertable, idcol, oidcol,\n sample_proportion=10, lstopwords=[],\n rstopwords=[],\n lenprobes='probelen',\n nbins=10,\n seed=0):\n df = concat_strings_comp_probelen(table, othertable, oidcol, lenprobes, lstopwords,\n rstopwords)\n s_table = stratify_on_probelen(table, df, idcol, lenprobes, sample_proportion, nbins,\n seed)\n return s_table\n\n\ndef concat_strings_comp_probelen(table, othertable, idcol, lenprobes, lstopwords,\n rstopwords):\n tok = WhiteSpaceTokenizer()\n concatcol = 'concatcol'\n odf = concat_strings(othertable, idcol, concatcol)\n ob = OverlapBlocker()\n p = ob.process_and_tokenize_ltable(odf, idcol, concatcol,\n tok, lstopwords)\n inv_index = build_inv_index([p])\n df = concat_strings(table, idcol, concatcol)\n p = ob.process_and_tokenize_ltable(df, idcol, concatcol, tok, rstopwords)\n tok_cnt = {}\n tok_map = {}\n for i in range(p.size()):\n tid, tokens = p.get(i)\n cnt = 0\n for tok in tokens:\n if tok not in tok_map:\n tok_map[tok] = len(inv_index.values(tok))\n cnt += tok_map[tok]\n tok_cnt[tid] = cnt\n df = pd.DataFrame(tok_cnt.items(), columns=[idcol, lenprobes])\n return df\n\n\ndef stratify_on_probelen(table, stat_table, idcol, probelen, sample_proportion, nbins,\n seed):\n groups = stat_table.groupby(probelen)\n cnt_ids = {}\n for gid, g in groups:\n cnt_ids[gid] = list(g[idcol].values)\n cnts = list(stat_table[probelen].values)\n cnts += [max(cnts) + 1]\n freq, edges = np.histogram(cnts, bins=nbins)\n n = int(math.floor(sample_proportion * len(stat_table)))\n bins = [[] for _ in range(nbins)]\n keys = sorted(cnt_ids.keys())\n positions = np.digitize(keys, edges)\n\n for i in range(len(keys)):\n k, p = keys[i], positions[i]\n bins[p - 1].extend(cnt_ids[k])\n len_bins = [len(bins[i]) for i in range(len(bins))]\n\n weights = [len_bins[i] / float(sum(len_bins)) for i in range(len(bins))]\n numtups = [int(math.ceil(weights[i] * n)) for i in range(len(weights))]\n\n sampled = []\n for i in range(len(bins)):\n nt = numtups[i]\n np.random.seed(seed)\n if len(bins[i]):\n tmp = np.random.choice(bins[i], nt)\n if len(tmp):\n sampled.extend(tmp)\n table['_pos'] = list(range(len(stat_table)))\n table.set_index(idcol, inplace=True, drop=False)\n s_rtable = table.loc[sampled]\n s_rtable = s_rtable.sort_values('_pos')\n s_rtable.drop(['_pos'], axis=1, inplace=True)\n # rtable.drop(['_pos'], axis=1, inplace=True)\n return s_rtable","repo_name":"kvpradap/tuning_tool","sub_path":"downsample/sampler.py","file_name":"sampler.py","file_ext":"py","file_size_in_byte":6189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32247948055","text":"import streamlit as st\nimport requests, json\n\n\n# Fonction permettent d'avoir la météo.\ndef found_temperature(API_KEY:str, CITY:str):\n BASE_URL = \"https://api.openweathermap.org/data/2.5/weather?\"\n URL = BASE_URL + \"q=\" + CITY + \"&appid=\" + API_KEY\n response = requests.get(URL)\n if response.status_code == 200:\n # getting data in the json format\n data = response.json()\n # getting the main dict block\n main = data['main']\n # getting temperature\n temperature = main['temp']\n # getting the humidity\n humidity = main['humidity']\n # getting the pressure\n pressure = main['pressure']\n # weather report\n report = data['weather']\n return temperature, humidity, pressure, report[0]['description']\n else:\n print(\"error\")\n \n \n# Fonction permettent de mettre un background.\ndef background_front(url:str):\n st.markdown(\n f\"\"\"\n \n \"\"\",\n unsafe_allow_html=True\n )\n","repo_name":"Chemsdev/meteo","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5009979233","text":"from Mondrian import *\nfrom sklearn.svm import SVC\nfrom sklearn.metrics import accuracy_score\nimport matplotlib.pyplot as plt\n\ndef k(x1,x2):\n\treturn np.dot(x1,x2.T)\n\n\n\nX = np.genfromtxt('/home/saket/Desktop/train131.csv', delimiter = ',')\nXtest = np.genfromtxt('/home/saket/Desktop/test3.csv', delimiter=',')\ntrain_in_shape = np.shape(X)\ny = X[:,[0]]\n# X = X[:,1:train_in_shape[0]:1]\nX = X[:,[1,2]]\ntest_in_shape = np.shape(Xtest)\nytest = Xtest[:,[0]]\n# Xtest = Xtest[:,1:test_in_shape[0]:1]\nXtest = Xtest[:,[1,2]]\n\nposi = np.where(ytest == 1)\nnegi = np.where(ytest == -1)\n# print(len(posi[0]))\n# print(len(negi[0]))\nplt.plot(Xtest[posi[0],[0]], Xtest[posi[0],[1]],'ro')\n# plt.show()\nplt.plot(Xtest[negi[0],[0]], Xtest[negi[0],[1]], 'b+')\nplt.show()\n# print(\"shape of X_train: \",train_in_shape)\nprint(\"shape of X_test: \",np.shape(Xtest))\n\ncal_train_kernel = np.zeros((train_in_shape[0], train_in_shape[0]), dtype = 'float')\ncal_test_kernel = np.zeros((test_in_shape[0], train_in_shape[0]), dtype='float')\n# print(\"shape of cla_train:\",np.shape(cal_train_kernel))\n# print(\"shape of cla_test:\",np.shape(cal_test_kernel))\n\nfor m in range(100):\n\tprint(\"Iteration:\",m)\n\ttest_features, train_features = main(X,Xtest,7)\n\tcal_train_kernel += k(train_features, train_features) \n\tcal_test_kernel += k(test_features, train_features)\n\ncal_test_kernel /= 100\ncal_train_kernel /= 100\n# print(cal_train_kernel)\n# print(np.shape(X))\nclf = SVC(C=.05,kernel='linear')\nclf.fit(cal_train_kernel,y.ravel())\nprint(clf.score(cal_train_kernel,y.ravel()))\nprint(clf.score(cal_test_kernel,ytest.ravel()))\n","repo_name":"Saket97/Mondrian-Processes","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35407969239","text":"import csv,re\n\ndef main():\n # request_for_api(import_address())\n import_address()\n\ndef import_address():\n address_group=[]\n address_list=[]\n with open('D:/try_debug_for_automatic_tag.csv', 'r') as csvreader:\n csv_reader = csv.reader(csvreader)\n next(csv_reader)\n for row in csv_reader:\n address_group.append(row)\n for i in range(0,len(address_group),2):\n if i%2==0:\n address_every_two_group=[]\n address_every_two_group.append(address_group[i])\n address_every_two_group.append(address_group[i+1])\n address_list.append(address_every_two_group)\n print(len(address_every_two_group))\n for m in range(0,1):\n # request_for_api(address_every_two_group[m])\n print(address_every_two_group[m])\n return address_list\n\ndef request_for_api(list_a):\n tag=\"|\"\n for i in range(0,len(list_a)):\n number=0\n for item in list_a[i]:\n number=item[2].count(tag)\n # print(item[2])\n if tag in item[2]:\n word_1=item[2].split('|')[0].split('^')[0]\n if number >=1:\n temp_word_list=[]\n for k in range(0,number+1):\n word_temp=item[2].split('|')[k].split('^')[0]\n temp_word_list.append(word_temp)\n print(temp_word_list)\n else:\n print(\"unsolved\")\n\ndef split_times(split_sentence,number):\n tag = \"|\"\n return_list=[]\n address_list=split_sentence.split(tag,number)\n i=0\n for i in range(0,number):\n eachword=address_list[i].split('^')[0]\n return_list.append(eachword)\n i+=1\n return return_list\n\nif __name__ == '__main__':\n main()\n","repo_name":"brucegai/thulac_practice","sub_path":"tag_script/automatic_tag_a_type.py","file_name":"automatic_tag_a_type.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13464156957","text":"import numpy as np\n\ndef _numerical_gradient_ndim_one(f, x):\n h = 1e-4\n grad = np.zeros_like(x)\n\n for i in range(x.size):\n temp_x = x[i]\n x[i] = temp_x + h\n func_plus_x = f(x)\n\n x[i] = temp_x - h\n func_minus_x = f(x)\n\n grad[i] = (func_plus_x - func_minus_x) / (2 * h) # 미분\n x[i] = temp_x\n\n return grad\n\ndef numerical_gradient(f, x):\n if x.ndim == 1:\n return _numerical_gradient_ndim_one(f, x)\n else:\n grade = np.zeros_like(x)\n\n for i, data in enumerate(x):\n grade[i] = _numerical_gradient_ndim_one(f, data)\n\n return grade\n\ndef gradient_descent(f, init_x, lr=0.01, step_num=100):\n x = init_x\n x_history = []\n\n grade = None\n for i in range(step_num):\n x_history.append(x.copy())\n\n grade = numerical_gradient(f, x)\n x -= lr * grade\n print(x)\n\n return x, np.array(x_history)","repo_name":"Simon-Neo/Book_BeginOfBottom_of_AI","sub_path":"Gradient.py","file_name":"Gradient.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41153510476","text":"class Solution:\n def minFallingPathSum(self, matrix: List[List[int]]) -> int:\n rows, cols = len(matrix), len(matrix[0])\n directions = [(-1, -1), (-1, 0), (-1, 1)]\n for r in range(1, rows):\n for c in range(cols):\n temp = []\n \n for dr, dc in directions:\n nr, nc = dr + r, dc + c\n if nr < 0 or nr == rows or nc < 0 or nc == cols:\n continue\n temp.append(matrix[nr][nc])\n matrix[r][c] += min(temp)\n \n return min(matrix[-1])\n ","repo_name":"tejeshreddy/competitive-programming","sub_path":"0931-minimum-falling-path-sum/0931-minimum-falling-path-sum.py","file_name":"0931-minimum-falling-path-sum.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"28131556897","text":"template = Import('template.py' ) # import du fichier template (entete, pieds de page...)\nconnexion = Import('gestion_session.py')\ncompteur = Import('compteur.py')\nexploitant = Import('Exploitant.py')\nevenement = Import('evenement.py')\nimport json\n\ndef index(error=''):\n ret=template.afficherHautPage(error, titre='Voir le réseau')\n ret += corps_page()\n ret += template.afficherBasPage()\n return ret\n\ndef corps_page():\n html = '''\n
    \n
    \n

    Le réseau d'irrigation

    \n
    \n \n
    \n
    \n \n \n
    \n
    \n '''\n return html\n\ndef detail(id_compteur=1):\n id_compteur = int(id_compteur)\n selected_comp = compteur.Compteur(id_compteur)\n path = '../images/Compteurs/'\n photo = path+str(selected_comp.photo) if selected_comp.photo else path+'pompe.png'\n nom = selected_comp.nom if selected_comp.nom else 'Donnée manquante'\n latitude = selected_comp.lat if selected_comp.lat else 'Donnée manquante'\n longitude = selected_comp.lon if selected_comp.lon else'Donnée manquante'\n altitude = str(selected_comp.altitude) + ' m' if selected_comp.altitude else'Donnée manquante'\n\n Ids_ex = exploitant.Exploitant.get_compteur_exploitants_id(id_compteur)\n exploits = '
    '\n for id_ex in Ids_ex:\n name = exploitant.Exploitant(id_ex).nom\n exploits += ' - ' + name + '
    '\n conjug = '' if len(Ids_ex)==1 else 's'\n\n html='''\n\n \n
    \n \"pas\n

    Nom : {1}

    \n

    Position : {2}, {3}

    \n

    Altitude : {4}

    \n

    Exploitant{5} : {6}

    \n

    Evenements :

    \n '''.format(photo,nom,latitude,longitude,altitude,conjug,exploits)\n html+=make_event_article(id_compteur)\n html+='''\n
    \n
    '''\n return html\n\ndef compteurs_list():\n compteur_list = compteur.Compteur.get_compteurs_id(0)\n html=\"\"\"\n \"\n return html\n\n\ndef make_event_article(id_compteur):\n events_list = evenement.Evenement.get_event_from_compteurid(id_compteur)\n html = \"\"\n for eventid in reversed(events_list):\n event = evenement.Evenement(eventid[0])\n createur = exploitant.Exploitant(event.createur)\n html+=\"\"\"\n
    \n
    {0} a signalé :
    \n {1}
    \n \"pas\n
    \n \"\"\".format(createur.nom, event.descriptif, event.photo)\n return html\n\n\n\ndef get_json_compteurs(ex=not None):\n if ex and \"login\" in Session(): #une recherche ciblée necessite d'être connecté pour aboutir\n id_ex = Session()[\"Id_exploitant\"]\n else:\n id_ex = 0\n list_compteurs = compteur.Compteur.get_compteurs_id(id_ex)\n return compteurs_to_json(list_compteurs)\n\ndef compteurs_to_json(list_compteurs):\n dico = {}\n dico[\"type\"]=\"FeatureCollection\"\n list_json = []\n for id_com in list_compteurs:\n current_com = compteur.Compteur(id_com)\n altitude = current_com.altitude if current_com.altitude != None else 'n/a'\n if current_com.lon != None and current_com.lat != None:\n dico_current = {}\n dico_current[\"type\"] = \"Feature\"\n dico_current[\"properties\"] = {\"id\":current_com.id,\"nom\":current_com.nom,\"altitude\": altitude}\n dico_current[\"geometry\"]= {\"type\": \"Point\", \"coordinates\": [current_com.lon, current_com.lat]}\n list_json.append(dico_current)\n dico[\"features\"] = list_json\n objects = json.dumps(dico)\n return objects\n\n\ndef get_event(id_compteur):\n myevent = evenement.Evenement()\n myevent.load()\n\n\n\ndef traiterFormulaireConnexion(choix, login='',password=''):\n return connexion.Connexion(index, choix, login, password)\n","repo_name":"Fabien-B/Web_ASA_Sourdoire","sub_path":"www/page_reseau.py","file_name":"page_reseau.py","file_ext":"py","file_size_in_byte":5039,"program_lang":"python","lang":"fr","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"5849127510","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jun 1 16:08:04 2022\r\n535 simulation project\r\n@author: XU Yang\r\n\"\"\"\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom shapely.geometry import Polygon, Point\r\nfrom data import *\r\n\r\nrng1 = np.random.RandomState(42)\r\nrng2 = np.random.RandomState(44)\r\n\r\n#Defining the randomization generator\r\ndef polygon_random_points(poly, num_points):\r\n min_x, min_y, max_x, max_y = poly.bounds\r\n points = []\r\n while len(points) < num_points:\r\n random_point = Point([rng1.uniform(min_x, max_x), rng1.uniform(min_y, max_y)])\r\n if (random_point.within(poly)):\r\n points.append(random_point)\r\n return points\r\n\r\n\r\nev_set = []\r\nfor i in range(len(poly_set)):\r\n points = polygon_random_points(poly_set[i], ev_num[i])\r\n ev_regional = []\r\n # save the EV coordinates in the 1st region\r\n for p in points:\r\n ev_regional.append([p.x, p.y])\r\n ev_regional = np.array(ev_regional)\r\n # Then save all the EV coordinates\r\n ev_set.append(ev_regional)\r\n\r\n# np.savetxt(\"ev_locations.csv\", ev_set, delimiter=\",\", fmt='%s')\r\n\r\n# calculate distance\r\ndist = []\r\n\r\nfor ev in ev_set:\r\n num_ev = ev.shape[0]\r\n for i in range(num_ev):\r\n for j in range(len(cs_num)):\r\n dist.append(np.sqrt((ev[i,0] - cs_locations[j][0])**2 + (ev[i,1] - cs_locations[j][1])**2))\r\ndist = np.array(dist)\r\n\r\n# rank the distances to CS for each EV\r\nranks = []\r\nidx = []\r\nfor i in range(total_ev):\r\n sort = np.sort(dist[i * len(cs_num) : (i+1) * len(cs_num)])\r\n sort_idx = np.argsort(dist[i * len(cs_num) : (i+1) * len(cs_num)])\r\n ranks.append(sort) # each row contains the distance information for each EV\r\n idx.append(sort_idx) # each row contains the index of shortest CS for each EV \r\n \r\n# determine the likelihood using the sorted distances\r\np_dist = np.zeros((total_ev, len(cs_num)))\r\nfor j in range(total_ev):\r\n for i in range(0, 4):\r\n p_dist[j, idx[j][i]] = rng2.uniform(0.75, 0.95)\r\n for i in range(4, 8):\r\n p_dist[j, idx[j][i]] = rng2.uniform(0.65, 0.85)\r\n for i in range(8, 12):\r\n p_dist[j, idx[j][i]] = rng2.uniform(0.55, 0.75)\r\n for i in range(12, 16):\r\n p_dist[j, idx[j][i]] = rng2.uniform(0.45, 0.65)\r\n for i in range(16, 30):\r\n p_dist[j, idx[j][i]] = 0 # If the CS is too far, EV won't go there\r\n \r\n# The integrated decision matrix\r\np_cs_num = 1 - 1 / (np.array(cs_num) + 1)\r\n# p_final = p_dist + cs_num * rng2.uniform(0.0008, 0.0012)\r\np_final = p_dist * p_cs_num\r\n\r\n\r\n# sorted_p = p_final.sort(axis=1)\r\nsorted_p_idx = p_final.argmax(axis=1) # get the index of CS for each EV\r\n\r\ncount_ev = []\r\n# count number of EVs for each CS\r\nfor i in range(len(cs_num)):\r\n count_ev.append(list(sorted_p_idx).count(i))\r\nprint(count_ev)\r\n\r\n# calculate the ratio of EV and CS\r\nratio = np.array(count_ev) * (1 / (np.array(cs_num) + 1))\r\nprint(np.round(ratio, 2))\r\n\r\n# atmp1 = [] #select 20% EVs from the total EVs\r\n# # Simulation start\r\n# for i in range(len(ev_set)):\r\n# atmp1.append(np.floor(ev_set[i].shape[0] * 0.2))\r\n","repo_name":"yangxu4/EV-Charging-Simulation","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3073,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"24663928834","text":"from Users.models import Profile\n# from Users.models import Developer\nfrom django.contrib.auth.models import AnonymousUser, User\nfrom django.db import models\nfrom django.utils import timezone\nfrom django.core.urlresolvers import reverse\n\n\n# Game, Category and Purchase models. Since categories are a preset, it's easier developing a category.\nclass Category(models.Model):\n id = models.AutoField(primary_key=True)\n name = models.CharField(max_length=50, unique=True)\n\n def to_json_dict(self):\n res = {\n 'id': self.id,\n 'name': self.name}\n return res\n\n def __str__(self):\n # Alberto: Luigi pay attention! You MUST return a string, not an integer. I've spent\n # 20 minutes to figure this out.\n #return self.id\n return str(self.name)\n\n\nclass Game(models.Model):\n \"\"\"\n Developer can delete a Game => an Order shouldn't be affected, but PlayedMatch and SavedGame have a ForeignKey\n on Game => ON CASCADE DELETE\n \"\"\"\n\n id = models.AutoField(primary_key=True)\n name = models.CharField(max_length=80, unique=True)\n description = models.TextField()\n url = models.URLField(unique=True)\n price = models.FloatField(default=0)\n publicationDate = models.DateTimeField(default=timezone.now)\n logo = models.URLField(default=\"http://www.yourimage.com\") # URL of the image of the game\n popularity = models.IntegerField(default=0)\n _category = models.ForeignKey('Category', null=False)\n _developer = models.ForeignKey('Users.Profile', null=False)\n\n def __str__(self):\n return str(self.name)\n\n def to_json_dict(self, user=None):\n res = {\n 'id': self.id,\n 'name': self.name,\n 'description': self.description,\n 'url': reverse(\"play_game\", kwargs={'game_id': self.id}),\n 'price': self.price,\n 'publicationDate': str(self.publicationDate),\n 'category_id': self._category.id,\n 'category': self._category.name,\n 'popularity': self.popularity,\n 'logo': self.logo,\n 'developer': self._developer.user.username,\n 'leaderboard_url': reverse(\"leader_board_game\", kwargs={'game_id': self.id})\n }\n\n # Let's emulate a middleware over here...\n if user is not None and isinstance(user, User) and user.is_authenticated():\n owned = False\n o = user.profile._ownedGames.filter(id=self.id)\n if o.count()>0:\n res['owned'] = True\n else:\n res['owned'] = False\n\n return res\n\n\ndef get_default_yw():\n return timezone.now().date().strftime(\"%Y%U\")\n\n\ndef get_default_ym():\n return timezone.now().date().strftime(\"%Y%m\")\n\n\ndef get_default_y():\n return timezone.now().date().strftime(\"%Y\")\n\ndef get_default_ymd():\n return timezone.now().date().strftime(\"%Y%m%d\")\n\n\nclass Order(models.Model):\n # PaymentId\n id = models.AutoField(primary_key=True)\n\n # Buyer\n _player = models.ForeignKey('Users.Profile', null=False)\n\n # Items bought\n _games = models.ManyToManyField('Game', default=None, blank=True)\n\n # Represents the total of the order\n total = models.FloatField(default=0, null=False)\n\n # When the order was submitted\n orderDate = models.DateTimeField(default=timezone.now, null=False)\n orderDateYMD = models.CharField(max_length=8, default=get_default_ymd, null=False)\n orderDateYW = models.CharField(max_length=6, default=get_default_yw, null=False)\n orderDateYM = models.CharField(max_length=6, default=get_default_ym, null=False)\n orderDateY = models.CharField(max_length=4, default=get_default_y, null=False)\n\n # When the order was paid (if it was)\n paymentDate = models.DateTimeField(default=None, null=True)\n\n # Reference provided by the payment system\n paymentRef = models.IntegerField(null=True, default=0) # TODO: check this. What kind of refs are we expecting? Validation? Unique?\n\n # can be pending/success/error/cancel\n status = models.CharField(max_length=10, null=False, default=\"pending\")\n\n\n def __str__(self):\n #TODO: what should we return when printing an order?\n return str(self.total)\n","repo_name":"luispdm/GameStore","sub_path":"GameStore/Store/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4216,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"27183689579","text":"import pandas as pd\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import linear_kernel\nimport sys\nimport os\n\ncwd = os.getcwd()\nfilepath = cwd +\"/\"+\"sample-data.csv\"\n\nds = pd.read_csv(filepath)\nitem_lst = []\nfor item_det in ds['description']:\n\titem_lst.append(item_det)\nnewLst = []\nfor item in item_lst:\t\n\tnewLst.append(item.split(' - ')[0])\nds['itemName'] = newLst\n\ntf = TfidfVectorizer(analyzer='word', ngram_range=(1, 3), min_df=0, stop_words='english')\ntfidf_matrix = tf.fit_transform(ds['description'])\n\ncosine_similarities = linear_kernel(tfidf_matrix, tfidf_matrix)\n\nresults = {}\n\nfor idx, row in ds.iterrows():\n\tsimilar_indices = cosine_similarities[idx].argsort()[:-100:-1]\n\tsimilar_items = [(cosine_similarities[idx][i], ds['id'][i]) for i in similar_indices]\n\n\tresults[row['id']] = similar_items[1:]\n\t\ndef item(id):\n\treturn ds.loc[ds['id'] == id]['itemName'].tolist()[0]\n\ndef recommend(item_id, num):\n\tprint(\"***********************************\")\n\tprint(\"Recommending \" + str(num) + \" products similar to \" + item(item_id) + \"as below::\")\n\tprint(\"-----------------------------------\")\n\trecs = results[item_id][:num]\n\tfor rec in recs:\n\t\tprint(item(rec[1]) + \" (score:\" + str(rec[0]) + \")\")\n\nif __name__ == \"__main__\":\n\titem_id = input(\"Enter the itemID ranging between 1 to 500:\")\n\tnum = (input(\"Enter the number of recommendations needed to be displayed:\"))\n\trecommend(int(item_id), int(num))\n","repo_name":"mish1102/RecommendationEngine-ContentBased","sub_path":"recommender_system.py","file_name":"recommender_system.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"41187886153","text":"from .common import * # noqa\n\nALLOWED_HOSTS = [\n \"www.djangoproject.localhost\",\n \"djangoproject.localhost\",\n \"docs.djangoproject.localhost\",\n \"dashboard.djangoproject.localhost\",\n] + SECRETS.get(\"allowed_hosts\", [])\n\nLOCALE_MIDDLEWARE_EXCLUDED_HOSTS = [\"docs.djangoproject.localhost\"]\n\nDEBUG = True\nTHUMBNAIL_DEBUG = DEBUG\n\nCACHES = {\n \"default\": {\n \"BACKEND\": \"django.core.cache.backends.locmem.LocMemCache\",\n \"LOCATION\": \"trololololol\",\n },\n \"docs-pages\": {\n \"BACKEND\": \"django.core.cache.backends.locmem.LocMemCache\",\n \"LOCATION\": \"docs-pages\",\n },\n}\n\nCSRF_COOKIE_SECURE = False\n\nEMAIL_BACKEND = \"django.core.mail.backends.console.EmailBackend\"\n\nMEDIA_ROOT = str(DATA_DIR.joinpath(\"media_root\"))\n\nSESSION_COOKIE_SECURE = False\n\nSTATIC_ROOT = str(DATA_DIR.joinpath(\"static_root\"))\n\n# Docs settings\nDOCS_BUILD_ROOT = DATA_DIR.joinpath(\"djangodocs\")\n\n# django-hosts settings\n\nPARENT_HOST = \"djangoproject.localhost:8000\"\n\n# django-push settings\n\nPUSH_SSL_CALLBACK = False\n\n# Enable optional components\n\nif DEBUG:\n try:\n import debug_toolbar # NOQA\n except ImportError:\n pass\n else:\n INSTALLED_APPS.append(\"debug_toolbar\")\n INTERNAL_IPS = [\"127.0.0.1\"]\n MIDDLEWARE.insert(\n MIDDLEWARE.index(\"django.middleware.common.CommonMiddleware\") + 1,\n \"debug_toolbar.middleware.DebugToolbarMiddleware\",\n )\n MIDDLEWARE.insert(\n MIDDLEWARE.index(\"debug_toolbar.middleware.DebugToolbarMiddleware\") + 1,\n \"djangoproject.middleware.CORSMiddleware\",\n )\n\nSILENCED_SYSTEM_CHECKS = [\"captcha.recaptcha_test_key_error\"]\n","repo_name":"django/djangoproject.com","sub_path":"djangoproject/settings/dev.py","file_name":"dev.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","stars":1791,"dataset":"github-code","pt":"81"} +{"seq_id":"18323580007","text":"from selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.common.exceptions import StaleElementReferenceException, NoSuchElementException\nfrom time import time, sleep\nfrom selenium.webdriver.chrome.options import Options\nfrom random import choice\nimport smtplib as sm\n\n# ======================= constants ====================== #\nsite_fast = 'https://fast.com/'\nsite_twitter = 'https://twitter.com/'\nemail = 'epbfpm@gmail.com'\npwd = '^pf3ed+j=B%TAth'\nuser = 'epbfpm'\nchrome_driver = \"G:/My Drive/Programming/z - tools/chromedriver.exe\"\nreceiver = 'matmourase@gmail.com'\nreceiver2 = \"meucu@gmail.com\"\nsmtp, gmail, pwd2 = 'smtp.gmail.com', 'elder.estuda.voce.recebe.email@gmail.com', 'ndwhhxioybtizozb'\n\n# =================== keep window open =================== #\nchrome_options = Options()\nchrome_options.add_experimental_option(\"detach\", True)\n\n\n# ======================= functions ======================= #\nclass TwitterBot:\n def __init__(self, chrome_driver):\n self.driver = webdriver.Chrome(options=chrome_options, service=Service(chrome_driver))\n self.up = 0\n self.down = 0\n self.frase_de_hoje = ''\n self.frases = []\n\n def choose_phrase(self):\n # == == == == == == == == == == == = frases == == == == == == == == == == == == = #\n try:\n with open('txt.txt', encoding='utf-8') as data:\n self.frases = data.readlines()\n self.frase_de_hoje = choice(self.frases)\n self.frases.remove(self.frase_de_hoje)\n\n with open(\"txt.txt\", \"w\", encoding='utf-8') as f:\n for line in self.frases:\n f.write(line)\n print(self.frase_de_hoje)\n except IndexError:\n self.frase_de_hoje = ('Foi um prazer desmotivá-lo. Já estou com saudades')\n pass\n # self.frase_de_hoje = \"@matmourase Mensagem desmotivacional 3/30: Motivação faz você começar e o hábito faz você DESISTIR.\"\n\n def tweet(self, site):\n # ======================================================== #\n # LOGIN TO TWITTER #\n # ======================================================== #\n self.driver.get(url=site)\n sleep(2)\n # ====================== click login ===================== #\n self.driver.find_element(By.XPATH,\n '//*[@id=\"layers\"]/div/div[1]/div/div/div/div/div/div/div/div[1]/a/div/span/span').click()\n sleep(2)\n # ================ click sign in with emal =============== #\n login = self.driver.find_element(By.XPATH,\n '//*[@id=\"layers\"]/div[2]/div/div/div/div/div/div[2]/div[2]/div/div/div[2]/div[2]/div/div/div/div[5]/label/div/div[2]/div/input')\n login.click()\n login.send_keys(email)\n login.send_keys(Keys.ENTER)\n sleep(2)\n\n try:\n # ================= try to input password ================ #\n login = self.driver.find_element(\n By.XPATH,\n '//*[@id=\"layers\"]/div[2]/div/div/div/div/div/div[2]/div[2]/div/div/div[2]/div[2]/div[1]/div/div/div[3]/div/label/div/div[2]/div[1]/input')\n login.click()\n login.send_keys(pwd)\n login.send_keys(Keys.ENTER)\n except NoSuchElementException:\n # ============= pass strange behaviour prompt ============ #\n login = self.driver.find_element(\n By.XPATH,\n '//*[@id=\"layers\"]/div[2]/div/div/div/div/div/div[2]/div[2]/div/div/div[2]/div[2]/div[1]/div/div[2]/label/div/div[2]/div/input')\n login.click()\n login.send_keys(user)\n login.send_keys(Keys.ENTER)\n # ==================== input password ==================== #\n sleep(2)\n login = self.driver.find_element(\n By.XPATH,\n '//*[@id=\"layers\"]/div[2]/div/div/div/div/div/div[2]/div[2]/div/div/div[2]/div[2]/div[1]/div/div/div[3]/div/label/div/div[2]/div[1]/input')\n login.click()\n login.send_keys(pwd)\n login.send_keys(Keys.ENTER)\n\n # ======================================================== #\n # actualy tweet #\n # ======================================================== #\n\n sleep(2)\n input = self.driver.find_element(By.XPATH, \"//div[contains(@aria-label, 'Tweet text')]\")\n input.click()\n sleep(1)\n msg = f'@matmourase Mensagem desmotivacional {30 - len(self.frases)}/30: {self.frase_de_hoje}'\n input.send_keys(\n msg)\n input = self.driver.find_element(\n By.XPATH,\n '//*[@id=\"react-root\"]/div/div/div[2]/main/div/div/div/div/div/div[3]/div/div[2]/div[1]/div/div/div/div[2]/div[3]/div/div/div[2]/div/div/span/span').click()\n ok = False\n self.send(msg)\n\n def run(self):\n self.choose_phrase()\n self.tweet(site_twitter)\n\n def send(self, msg):\n with sm.SMTP(smtp, port=587) as mail:\n mail.starttls()\n mail.login(user=gmail, password=pwd2)\n mail.sendmail(from_addr=gmail, to_addrs=receiver, msg=f'Subject: Desmotive-se!\\n\\n{msg.encode(\"utf-8\")}')\n print(msg)\n\n\nbot = TwitterBot(chrome_driver)\nbot.run()\n# msg = '@matmourase Mensagem desmotivacional 5/30: Cabe a você escolher aquilo de lhe aflige.'\n# bot.send(msg)","repo_name":"epbfpm/macaca.py","sub_path":"macaca.py","file_name":"macaca.py","file_ext":"py","file_size_in_byte":5667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6232619197","text":"#######\n# This provides examples of Dash HTML Components.\n# Feel free to add things to it that you find useful.\n######\nimport dash\nimport dash_html_components as html\n\napp = dash.Dash()\n\napp.layout = html.Div([\n 'This is the outermost Div',\n html.Div(\n 'This is an inner Div',\n style={'color':'blue', 'border':'2px blue solid', 'borderRadius':5,\n 'padding':10, 'width':220}\n ),\n html.Div(\n 'This is another inner Div',\n style={'color':'green', 'border':'2px green solid',\n 'margin':10, 'width':220}\n ),\n],\n# this styles the outermost Div:\nstyle={'width':500, 'height':200, 'color':'red', 'border':'2px red dotted'})\n\nif __name__ == '__main__':\n app.run_server()\n","repo_name":"Pierian-Data/Plotly-Dashboards-with-Dash","sub_path":"2-05-DashComponents/HTMLComponents.py","file_name":"HTMLComponents.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","stars":822,"dataset":"github-code","pt":"81"} +{"seq_id":"6545471498","text":"import os\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom animate import animate\n\ndef cut_length(path_input_npy, path_output, image_start, image_end):\n image_start = 602\n image_end = 1370\n ## files imports\n Ytemp = []\n files = os.listdir(path_input_npy)\n for file in tqdm(files):\n if file.endswith('.npy'):\n img_array= np.load(path_input_npy+\"/\"+file)\n Ytemp.append([img_array])\n Y = Ytemp[image_start:image_end]\n return Y\n\n\nif __name__ == \"__main__\":\n\n experiment = 'experiment_132'\n\n input_data_folder = '/home/jeremy/Documents/Postdoc/Projects/Memory/Computational_Principles_of_Memory/optopatch/data/2020_03_02'\n path_input_npy = input_data_folder + '/{}/raw_data'.format(experiment)\n path_output_data = input_data_folder + '/{}/'.format(experiment)\n path_output_images_denoised = input_data_folder + '/{}/denoised_images'.format(experiment)\n\n cut_data = cut_length(path_input_npy, path_output, 602, 1370)\n","repo_name":"jeremyforest/whole_optic_analysis_pipeline","sub_path":"OPTIMAS/cut_video_length.py","file_name":"cut_video_length.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36409550302","text":"import math\nfrom .. import lut, manip, sort, printing\n\n\ndef _cfour_exp(e):\n '''Formats an exponent for CFour'''\n return e.replace('E', 'D') + ' '\n\n\ndef _cfour_coef(c):\n '''Formats a coefficient for CFour'''\n return c.replace('E', 'D') + ' '\n\n\ndef _aces_exp(e):\n '''Formats an exponent for AcesII'''\n\n e = float(e)\n # Some basis sets have negative exponents???\n mag = int(math.log(abs(e), 10))\n\n mag = max(mag, 1)\n\n # Make room for the negative sign\n if e < 0.0:\n mag += 1\n\n # Number of decimal places to show\n ndec = min(7, 14 - 2 - mag)\n\n fmtstr = '{{:14.{}f}}'.format(ndec)\n s = fmtstr.format(e)\n\n # Trim a single trailing zero if there is one\n # and our string takes up all 14 characters\n if s[0] != ' ' and s[-1] == '0':\n s = ' ' + s[:-1]\n\n return s\n\n\ndef _aces_coef(c):\n '''Formats a coefficient for AcesII'''\n c = float(c)\n return '{:10.7f} '.format(c)\n\n\ndef _print_columns(data, ncol):\n s = ''\n for i in range(0, len(data), ncol):\n s += ''.join(data[i:i + ncol]) + '\\n'\n return s\n\n\ndef _write_genbas_internal(basis, exp_formatter, coef_formatter):\n # Uncontract all, then make general\n basis = manip.make_general(basis, False, True)\n basis = sort.sort_basis(basis, False)\n\n # Elements for which we have electron basis\n electron_elements = [k for k, v in basis['elements'].items() if 'electron_shells' in v]\n\n # Elements for which we have ECP\n ecp_elements = [k for k, v in basis['elements'].items() if 'ecp_potentials' in v]\n\n s = '\\n'\n\n if electron_elements:\n # Electron Basis\n for z in electron_elements:\n data = basis['elements'][z]\n sym = lut.element_sym_from_Z(z).upper()\n nshell = len(data['electron_shells'])\n\n s += '{}:{}\\n'.format(sym, basis['name'])\n s += basis['description'] + '\\n'\n s += '\\n'\n s += '{:>3}\\n'.format(nshell)\n\n s_am = ''\n s_ngen = ''\n s_nprim = ''\n for sh in data['electron_shells']:\n s_am += '{:>5}'.format(sh['angular_momentum'][0])\n s_ngen += '{:>5}'.format(len(sh['coefficients']))\n s_nprim += '{:>5}'.format(len(sh['exponents']))\n\n s += s_am + '\\n'\n s += s_ngen + '\\n'\n s += s_nprim + '\\n'\n s += '\\n'\n\n for shell in data['electron_shells']:\n exponents = [exp_formatter(x) for x in shell['exponents']]\n coefficients = [[coef_formatter(x) for x in y] for y in shell['coefficients']]\n coefficients = list(map(list, zip(*coefficients)))\n\n s += _print_columns(exponents, 5) + '\\n'\n for c in coefficients:\n s += _print_columns(c, 7)\n s += '\\n'\n\n # Write out ECP\n if ecp_elements:\n s += '\\n\\n! Effective core Potentials\\n'\n\n for z in ecp_elements:\n data = basis['elements'][z]\n sym = lut.element_sym_from_Z(z).upper()\n max_ecp_am = max([x['angular_momentum'][0] for x in data['ecp_potentials']])\n max_ecp_amchar = lut.amint_to_char([max_ecp_am]).lower()\n\n # Sort lowest->highest, then put the highest at the beginning\n ecp_list = sorted(data['ecp_potentials'], key=lambda x: x['angular_momentum'])\n ecp_list.insert(0, ecp_list.pop())\n\n s += '*\\n'\n s += '{}:{}\\n'.format(sym, basis['name'])\n s += '# ' + basis['description'] + '\\n'\n s += '*\\n'\n s += ' NCORE = {} LMAX = {}\\n'.format(data['ecp_electrons'], max_ecp_am)\n\n for pot in ecp_list:\n rexponents = pot['r_exponents']\n gexponents = pot['gaussian_exponents']\n coefficients = pot['coefficients']\n\n am = pot['angular_momentum']\n amchar = lut.amint_to_char(am).lower()\n\n if am[0] == max_ecp_am:\n s += '{}\\n'.format(amchar)\n else:\n s += '{}-{}\\n'.format(amchar, max_ecp_amchar)\n\n point_places = [6, 18, 25]\n s += printing.write_matrix([*coefficients, rexponents, gexponents], point_places)\n #for p in range(len(rexponents)):\n # s += '{} {} {};\\n'.format(gexponents[p], rexponents[p], coefficients[0][p])\n s += '*\\n'\n return s\n\n\ndef write_cfour(basis):\n '''Converts a basis set to cfour\n '''\n\n # March 2019\n # Format determined from http://slater.chemie.uni-mainz.de/cfour/index.php?n=Main.NewFormatOfAnEntryInTheGENBASFile\n\n return _write_genbas_internal(basis, _cfour_exp, _cfour_coef)\n\n\ndef write_aces2(basis):\n '''Converts a basis set to cfour\n '''\n\n # March 2019\n # Format determined from http://slater.chemie.uni-mainz.de/cfour/index.php?n=Main.OldFormatOfAnEntryInTheGENBASFile\n\n return _write_genbas_internal(basis, _aces_exp, _aces_coef)\n","repo_name":"MolSSI-BSE/basis_set_exchange","sub_path":"basis_set_exchange/writers/genbas.py","file_name":"genbas.py","file_ext":"py","file_size_in_byte":5016,"program_lang":"python","lang":"en","doc_type":"code","stars":132,"dataset":"github-code","pt":"81"} +{"seq_id":"72807845065","text":"from collections import defaultdict\nimport itertools\n\n\ndef valid_twin(weight1, weight2):\n distance = [2, 3, 4]\n for i in range(3):\n for j in range(3):\n if weight1 * distance[i] == weight2 * distance[j]:\n # print(weight1, weight2)\n return True\n return False\n\n\ndef change_twin(weights, diary):\n distance = [2, 3, 4]\n for i in range(len(weights)):\n for dist in distance:\n diary[dist * weights[i]].append(i)\n\n\ndef solution(weights):\n diary = defaultdict(list)\n\n change_twin(weights, diary)\n\n answer = set()\n for key, value in diary.items():\n if len(value) >= 2:\n\n for i in itertools.combinations(value, 2):\n answer.add(i)\n\n return len(answer)","repo_name":"fineman999/Algorithm","sub_path":"Programmers/Level2/PracticeQuestion/siso.py","file_name":"siso.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23709309876","text":"import random\nimport os\nimport json\nimport requests\nfrom datetime import date\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.urls import reverse\nfrom django.contrib.auth import login as auth_login, logout as auth_logout, authenticate\nfrom django.contrib.auth.decorators import login_required\nfrom django.db.models import F, Q\nfrom django.http import HttpResponseRedirect, HttpRequest\nfrom django.shortcuts import render, redirect\nfrom django.utils import timezone\n\nfrom friendship.models import Friend, FriendshipRequest\nfrom simple_email_confirmation.models import EmailAddress\nfrom six.moves import urllib\nimport shopify\n\nfrom .choices import *\nfrom .email import *\nfrom .forms import *\nfrom .models import *\nfrom .redeem_points import *\n\ndef index(request):\n if not request.user.is_anonymous:return HttpResponseRedirect(reverse('challenge_list'))\n return render(request,'index.html')\n\ndef register(request,friend_id,organization_id):\n # if valid registration form, create user and send email confirmation.\n # user can't log in till email confirmed\n registered = False\n email = False\n if request.method == 'POST':\n form = UserForm(request.POST)\n if form.is_valid():\n email = request.POST.get('email')\n user = create_user(request,email)\n send_confirmation_email(request,user) # from email.py\n extra_user_fields(user,friend_id)\n registered = True\n else:\n form = UserForm(organization_id=organization_id)\n return render(request, 'register.html',{\n 'form':form,\n 'registered':registered,\n 'email':email,\n })\ndef confirm_email(request,user_id,confirmation_key):\n # check if confirmation key matches users email key,\n # confirm email if it does.\n error = False\n try:\n user = User.objects.get(id=user_id)\n email = EmailAddress.objects.get(email=user.email)\n if confirmation_key == email.key:\n user.confirm_email(confirmation_key)\n auth_login(request, user)\n else:\n error = confirmation_error(request,user) # from email.py\n except ObjectDoesNotExist:\n error = confirmation_error(request,user) # from email.py\n return render(request, 'confirm_email.html',{'error':error})\ndef resend_email(request,email):\n # reset users confirmation key and resend email\n email_address = EmailAddress.objects.get(email=email)\n email_address.reset_confirmation()\n user = User.objects.get(email=email)\n send_confirmation_email(request,user) # from email.py\n request.session['sent'] = True # to have email sent pop up on login page\n return HttpResponseRedirect(reverse('login'))\ndef login(request):\n error = False\n if request.method == 'POST':\n error = check_login(request)\n if not error: return HttpResponseRedirect(reverse('challenge_list'))\n\n # used for resend_email succesfully sent pop up\n try:\n sent = request.session['sent']\n except KeyError:\n sent = False\n return render(request,'login.html',{'error':error,'sent':sent})\ndef logout(request):\n auth_logout(request)\n return HttpResponseRedirect(reverse('login'))\n\n@login_required\ndef challenge_list(request):\n user = request.user\n try:\n main_challenge = Challenge.objects.get(is_main_challenge=True)\n except ObjectDoesNotExist:\n main_challenge = False\n try:\n bonus_challenge = Challenge.objects.get(is_bonus_challenge=True)\n except ObjectDoesNotExist:\n bonus_challenge = False\n return render(request,'challenge_list.html',{\n 'main':main_challenge,\n 'bonus':bonus_challenge,\n })\n\n@login_required\ndef challenge_detail(request,challenge_id):\n user = request.user\n challenge = Challenge.objects.get(pk=challenge_id)\n feelings = [random.choice(GROUP1_CHOICES),random.choice(GROUP2_CHOICES),random.choice(GROUP3_CHOICES)]\n if request.method == 'POST':\n form = LocationForm(request.POST)\n if form.is_valid():\n feeling = request.POST.get('feelingChoice')\n complete_challenge(user,challenge,form,feeling)\n return HttpResponseRedirect(reverse('challenge_list'))\n else:\n form = LocationForm()\n return render(request,'challenge_detail.html',{\n 'challenge':challenge,\n 'form':form,\n 'feelings':feelings,\n })\n\n@login_required\ndef accept_challenge(request,challenge_id):\n user = request.user\n challenge = Challenge.objects.get(pk=challenge_id)\n if not Challenge_Status.objects.filter(user=user,challenge=challenge).exists():\n Challenge_Status.objects.create(user=user,challenge=challenge,status=2)\n return HttpResponseRedirect(reverse('challenge_list'))\n\n@login_required\ndef act_entry(request):\n user = request.user\n if request.method == 'POST':\n form = ActForm(request.POST)\n if form.is_valid():\n complete_act(user,form)\n return HttpResponseRedirect(reverse('challenge_list'))\n else:\n form = ActForm()\n return render(request,'act_entry.html',{\n 'form':form,\n })\n\n@login_required\ndef my_activity(request):\n user = request.user\n activities = user.get_activities()\n return render(request,'my_activity.html',{\n 'user':user,\n 'activities':activities,\n })\n\n@login_required\ndef friend_request(request):\n user = request.user\n sent = False\n already_friends = False\n if request.method == 'POST':\n email = request.POST.get('email')\n msg = 'Hi! ' + user.get_full_name() + ' would like to add you!'\n try:\n other_user = User.objects.get(email=email)\n if not Friend.objects.are_friends(user, other_user) == True:\n Friend.objects.add_friend(\n request.user, # sender\n other_user, # recipient\n message=msg\n )\n sent = True\n else:\n already_friends = True\n except ObjectDoesNotExist:\n send_request_email(request,user,email)\n sent = True\n return render(request,'friend_request.html',{\n 'sent':sent,\n 'already':already_friends,\n })\n\n@login_required\ndef friend_activity(request):\n user = request.user\n friends = Friend.objects.friends(user)\n requests = Friend.objects.unrejected_requests(user=user)\n return render(request,'friend_activity.html',{\n 'friends':friends,\n 'requests':requests,\n })\n\n@login_required\ndef accept_reject_request(request,request_id,accept):\n friend_request = FriendshipRequest.objects.get(pk=request_id)\n friend_request.accept() if int(accept) == 1 else friend_request.reject()\n return HttpResponseRedirect(reverse('friend_activity'))\n\n@login_required\ndef remove_friend(request,friend_id):\n user = request.user\n friend = User.objects.get(pk=friend_id)\n Friend.objects.remove_friend(user, friend)\n return HttpResponseRedirect(reverse('friend_activity'))\n\n@login_required\ndef redeem_points(request):\n user = request.user\n error = False\n if request.method == 'POST':\n points = request.POST.get('points')\n error = check_points(points,user)\n if not error:\n cash = '%.2f' % (int(points)/100)\n discount_code = create_discount(cash)\n if discount_code:\n return HttpResponseRedirect(reverse('redeem_confirmation',kwargs={'discount_code':discount_code,'value':cash,'points':points}))\n else:\n error = 'Something strange has happened, contact an administrator!'\n return render(request, 'redeem_points.html',{\n 'user':user,\n 'error':error\n })\n\n@login_required\ndef redeem_confirmation(request,discount_code,value,points):\n user = request.user\n User.objects.filter(pk=request.user.id).update(redeemed_points=F('redeemed_points')+points)\n send_redeem_points_email(user,discount_code,value,points)\n return render(request, 'redeem_confirmation.html',{\n 'value':value,\n 'points':points,\n })\n########## HELPERS ###########\n\ndef check_login(request):\n email = request.POST.get('email')\n password = request.POST.get('password')\n if EmailAddress.objects.filter(email=email).exists():\n user = User.objects.get(email=email)\n if user.check_password(password):\n if user.is_confirmed:\n auth_login(request, user)\n else:\n return confirmation_error(request,user)\n else:\n return incorrect_pass() # from email.py\n else:\n return email_does_not_exist() # from email.py\n return False\ndef create_user(request,email):\n user = User.objects.create_user(\n email,\n email=email,\n first_name=request.POST.get('first_name'),\n last_name=request.POST.get('last_name'),\n zip_code=request.POST.get('zip_code'),\n organization=Organization.objects.get(pk=request.POST.get('organization')),\n password=request.POST.get('password'),\n )\n return user\n\ndef extra_user_fields(user,friend_id):\n if User.objects.filter(pk=friend_id).exists():\n # if registering from friend request email\n friend = User.objects.get(pk=friend_id)\n Friend.objects.add_friend(friend,user)\n\ndef complete_challenge(user,challenge,form,feeling):\n location_form = form.save(commit=False)\n location = Location.objects.get_or_create(\n address=location_form.address, city=location_form.city,\n state=location_form.state, zip_code=location_form.zip_code\n )[0]\n challenge_status = Challenge_Status.objects.filter(user=user,challenge=challenge)\n challenge_status.update(status=3)\n challenge_status.update(location=location)\n challenge_status.update(feeling=feeling)\n challenge_status.update(date_completed=timezone.now())\n\ndef complete_act(user,form):\n public = form.cleaned_data['public']\n act = form.save(commit=False)\n act.user = user\n act.public = public\n act.save()\n\ndef check_points(points,user):\n if points is '':\n return 'Please fill in the number of points to redeem'\n if int(points) < 0 or int(points) > user.get_points():\n return 'Please fill in a valid number of points'\n if int(points) % 5 != 0:\n return 'Please make your points divisible by 5'\n return False\n","repo_name":"vivaelnino9/beamobile","sub_path":"bea_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13614990724","text":"# -*- coding:utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals # compatible with python3 unicode coding\n\nimport os\n\nfrom visual_caption.base.config.base_config import BaseConfig\n\n\nclass BaseDataConfig(BaseConfig):\n \"\"\"\n set default parameters for data config\n \"\"\"\n\n def __init__(self, model_name, mode='train'):\n super(BaseDataConfig, self).__init__()\n\n \"\"\"set default model dirs\"\"\"\n\n self.builder_batch_size = self.batch_size\n self.reader_batch_size = self.batch_size\n self.mode = mode\n\n \"\"\"set default model data dirs\"\"\"\n self.model_name = model_name\n self.model_data_dir = os.path.join(self.project_data_dir, self.model_name)\n\n self.train_data_dir = os.path.join(self.model_data_dir, \"train\")\n self.valid_data_dir = os.path.join(self.model_data_dir, \"valid\")\n self.test_data_dir = os.path.join(self.model_data_dir, \"test\")\n\n \"\"\"set default of model running\"\"\"\n self.num_epoches = 100\n\n \"\"\"Sets the default model hyperparameters.\"\"\"\n # File pattern of sharded TFRecord file containing SequenceExample protos.\n # Must be provided in training and evaluation modes.\n self.input_file_pattern = None\n # Approximate number of values per input shard. Used to ensure sufficient\n # mixing between shards in training.\n self.values_per_input_shard = 10000\n # Minimum number of shards to keep in the input queue.\n self.input_queue_capacity_factor = 2\n # Number of threads for prefetching SequenceExample protos.\n self.num_threads = 4\n\n # Number of threads for preprocessing. Should be a multiple of 2.\n self.num_preprocess_threads = 4\n self.output_buffer_size = 1000\n self.random_seed = 123\n","repo_name":"changquanyou/visual_to_caption","sub_path":"visual_caption/base/data/base_data_config.py","file_name":"base_data_config.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"37584241106","text":"from flask import redirect, url_for, session\nfrom flask import render_template, request\n\nfrom project import app\nfrom project.com.dao.LoginDAO import LoginDAO\nfrom project.com.vo.LoginVO import LoginVO\n\n\n@app.route('/admin/login', methods=['GET', 'POST'])\ndef adminLoadLogin():\n try:\n session.clear()\n return render_template('admin/login.html')\n except Exception as ex:\n print(ex)\n\n\n@app.route(\"/admin/validateLogin\", methods=['POST'])\ndef adminValidateLogin():\n try:\n loginUsername = request.form['loginUsername']\n loginPassword = request.form['loginPassword']\n loginVO = LoginVO()\n loginDAO = LoginDAO()\n loginVO.loginUsername = loginUsername\n loginVO.loginPassword = loginPassword\n\n loginVOList = loginDAO.validateLogin(loginVO)\n loginDictList = [i.as_dict() for i in loginVOList]\n print(loginDictList)\n lenLoginDictList = len(loginDictList)\n if lenLoginDictList == 0:\n msg = 'Username Or Password is Incorrect !'\n return render_template('admin/login.html', error=msg)\n elif loginDictList[0]['loginStatus'] == 'unactive':\n msg = 'You are BLOCKED.'\n return render_template('admin/login.html', error=msg)\n else:\n for row1 in loginDictList:\n loginId = row1['loginId']\n loginUsername = row1['loginUsername']\n loginRole = row1['loginRole']\n session['session_loginId'] = loginId\n session['session_loginUsername'] = loginUsername\n session['session_loginRole'] = loginRole\n session.permanent = True\n if loginRole == 'admin':\n return redirect(url_for('adminLoadDashboard'))\n elif loginRole == 'user':\n return redirect(url_for('userLoadDashboard'))\n except Exception as ex:\n print(ex)\n\n\n@app.route('/admin/loadDashboard', methods=['GET'])\ndef adminLoadDashboard():\n try:\n if adminLoginSession() == 'admin':\n return render_template('admin/index.html')\n else:\n return adminLogoutSession()\n except Exception as ex:\n print(ex)\n\n\n@app.route('/user/loadDashboard', methods=['GET'])\ndef userLoadDashboard():\n try:\n if adminLoginSession() == 'user':\n return render_template('user/index.html')\n else:\n return adminLogoutSession()\n except Exception as ex:\n print(ex)\n\n\n@app.route('/admin/loginSession')\ndef adminLoginSession():\n try:\n if 'session_loginId' and 'session_loginRole' in session:\n if session['session_loginRole'] == 'admin':\n return 'admin'\n elif session['session_loginRole'] == 'user':\n return 'user'\n print(\"<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>\")\n else:\n print(\"<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>\")\n return False\n except Exception as ex:\n print(ex)\n\n\n@app.route('/admin/blockUser')\ndef adminBlockUser():\n try:\n if adminLoginSession() == 'admin':\n loginId = request.args.get('loginId')\n loginStatus = 'unactive'\n loginVO = LoginVO()\n loginDAO = LoginDAO()\n loginVO.loginId = loginId\n loginVO.loginStatus = loginStatus\n loginDAO.updateLogin(loginVO)\n return redirect(url_for('adminViewUser'))\n else:\n return redirect(url_for('adminLogoutSession'))\n except Exception as ex:\n print(ex)\n\n\n@app.route('/admin/unblockUser')\ndef adminUnblockUser():\n try:\n if adminLoginSession() == 'admin':\n loginId = request.args.get('loginId')\n loginStatus = 'active'\n loginVO = LoginVO()\n loginDAO = LoginDAO()\n loginVO.loginId = loginId\n loginVO.loginStatus = loginStatus\n loginDAO.updateLogin(loginVO)\n return redirect(url_for('adminViewUser'))\n else:\n return redirect(url_for('adminLogoutSession'))\n except Exception as ex:\n print(ex)\n\n\n@app.route(\"/admin/logoutSession\", methods=['GET'])\ndef adminLogoutSession():\n try:\n session.clear()\n return redirect('/')\n # return redirect(url_for('adminLoadLogin'))\n except Exception as ex:\n print(ex)\n","repo_name":"soham2512/Agripedia","sub_path":"project/com/controller/LoginController.py","file_name":"LoginController.py","file_ext":"py","file_size_in_byte":4383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24718148646","text":"########################################################################\n############################# ASSIGNMENT 1 #############################\n########################################################################\n\n# Group 3:\n# Lourdes Gutiérrez De San Miguel, \n# Max Heinze, \n# Gustav Pirich, \n# Berk Uzunonat;\n# September 5, 2023\n\n\n\n########################################################################\n################################ Task 1 ################################\n########################################################################\n\n\n# %% Importing Libraries\n\nimport os\nimport pandas as pd\nimport numpy as np \nimport matplotlib.pyplot as plt\n\n\n# %% Problem 1.1\n\n# !!! CHANGE THE FOLLOWING LINE TO MAKE THE CODE WORK ON YOUR DEVICE !!!\n\n# Changing the working directory\nos.chdir(\"/Users/heinzemax/Documents/GitHub/inequality_sse/assignment_1\")\n\n# Reading in the data\nincome_data = pd.read_stata(\"psid1999.dta\")\n\n# Filtering the data so that only household heads are contained.\n# Note that filtering by relationhead==10 is kind of a \"best guess\" after\n# consulting PSID codebooks available online, since the variable in question \n# is not labeled. Source for the \"best guess\": Page 4 of the document available at\n# https://psidonline.isr.umich.edu/documents/psid/codebook/MX19REL_codebook.pdf\nincome_data = income_data.query(\"relationhead == 10\")\n\n# %% Problem 1.2\n\n# Filtering the data set by age older than 24 and younger than 66\nincome_data = income_data.query(\"age > 24 & age < 66\")\n\n# Filtering the data set by married HHH with spouse present\nincome_data = income_data.query(\"(mls=='married' & sp_in_fu=='Spouse/Partner in FU now')\")\n\n\n# %% Problem 1.3\n\n# Filtering out all household heads who worked zero hours\nincome_data = income_data.query(\"head_hrs > 0\")\n\n# Creating a logged hourly earnings variable from yearly income and yearly hrs, \n# adding 1 to avoid taking the log of 0\nincome_data['log_hr_earnings'] = np.log((income_data['head_li'] / income_data['head_hrs']) + 1)\n\n# Computing averages by age and education\naverage_wages = income_data.groupby(['age', 'edu'])['log_hr_earnings'].mean().reset_index()\n\n\n# %% Problem 1.4\n\n# Create a scatter plot showing avg log hrly earnings by age, with education\n# levels represented as different colored dots\nplt.figure(figsize=(8,6), dpi = 300)\n\neducation_levels = average_wages['edu'].unique()\n\nfor this_education_level in education_levels:\n subset = average_wages[average_wages['edu'] == this_education_level]\n plt.scatter(subset['age'], subset['log_hr_earnings'], label=this_education_level)\n\nplt.title('Average Log Hourly Earnings by Age and Education')\nplt.xlabel('Age')\nplt.ylabel('Average Log Hourly Earnings')\nplt.legend(title = \"Education\")\nplt.grid(True, which='both', linestyle='--', linewidth=0.5)\nplt.show()\n\n\n\n########################################################################\n################################ Task 2 ################################\n########################################################################\n\n\n# %% Problem 2.1\n\n# Creating the matrix\nour_matrix = np.array(\n [\n [1, 2, 3],\n [1, 2, 3],\n [1, 2, 3],\n ]\n )\n\n# Creating the vector\nour_vector = np.array([10,20,30])\n\n# Adding the vector to each column\nresult_matrix_21 = our_matrix + our_vector[:, np.newaxis]\n\n\n# %% Problem 2.2\n\n# Creating the function as specified in the assignment\ndef add_vector_columnwise(input_matrix, input_vector):\n\n if input_matrix.shape[0] == input_vector.shape[0] :\n return input_matrix + input_vector[:, np.newaxis]\n\n else :\n raise ValueError(\"Length of vector and number of rows of matrix must coincide!\") \n\n# Create a vector that will throw an error\nour_error_vector = np.array([10, 20, 30, 40])\n\n# Use the function with appropriate inputs\nresult_matrix_22 = add_vector_columnwise(our_matrix, our_vector)\n\n# Use the function with inappropriate inputs\ntry:\n add_vector_columnwise(our_matrix, our_error_vector)\nexcept ValueError as e:\n print(f\"Error encountered: {e}\")\n\n\n# %% Problem 2.3\n\n# Creating the function as specified in the assignment\ndef check_matrices_equal(matrix_1, matrix_2) :\n \n # Check whether the dimensions match. If not, return false\n if matrix_1.shape[0] != matrix_2.shape[0] or matrix_1.shape[1] != matrix_2.shape[1]:\n return False\n \n # Check whether the elements match. If not, return false\n for i in range(matrix_1.shape[0]):\n for j in range(matrix_1.shape[1]):\n if matrix_1[i][j] != matrix_2[i][j]:\n return False\n \n # Otherwise, return true\n return True\n\n# Apply the function to check whether the matrices are the same\nprint(\"\\nProblem 2.3: Matrix equality check\")\nprint(check_matrices_equal(result_matrix_21, result_matrix_22))\n \n \n# %% Problem 2.4\n\n# Creating the class as specified in the assignment\nclass matrix_mod:\n def __init__(self, input_matrix):\n self.matrix = input_matrix\n def modify(self, input_vector):\n self.matrix = add_vector_columnwise(self.matrix, input_vector)\n \n# Performing the modification\nresult_matrix_24 = matrix_mod(our_matrix)\nresult_matrix_24.modify(our_vector)\n\n# Checking equality\nprint(\"\\nProblem 2.4: Matrix equality check\")\nprint(check_matrices_equal(result_matrix_21, result_matrix_24.matrix))\n\n","repo_name":"maxmheinze/inequality_sse","sub_path":"assignment_1/assignment_1_full.py","file_name":"assignment_1_full.py","file_ext":"py","file_size_in_byte":5335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25322231099","text":"from odoo import models, fields, api\n\n\nclass ShipInfo(models.Model):\n _name = 'ship.info'\n _inherit = [\"mail.thread\", \"mail.activity.mixin\"]\n _description = 'Ship Info'\n\n image_128 = fields.Image(\"Logo\", max_width=128, max_height=128)\n model = fields.Char()\n state = fields.Selection(\n [('new_request', 'New Request'), ('to_order', 'To order'), ('registered', 'Registered')], default='new_request')\n # state not in views\n type_ship = fields.Selection(\n [('tug_boat', 'Tug Boat'), ('pilot_boat', 'Pilot Boat'), ('mooring_boat', 'Mooring Boat')],\n default='tug_boat', group_expand=True)\n name = fields.Char(string=\"Ship Name\")\n IMO_number = fields.Char(string=\"IMO#\")\n grt = fields.Char(string=\"GRT\")\n # grt not\n draft = fields.Char(string=\"Draft\")\n # draft not\n length_overall = fields.Char(string=\"Length Overall\") #\n width = fields.Char(string=\"Width\")#\n call_sign = fields.Char(string=\"Call Sign\")\n total_hp = fields.Integer(string=\"Total HP\")\n total_kw = fields.Integer(string=\"Total KW\")\n flag = fields.Char(string=\"Flag\")\n flag_b = fields.Char()\n fuel_capacity = fields.Float(string=\"Fuel Capacity\")#\n puller_pull_capacity = fields.Float(string=\"Puller Pull Capacity\")#\n #\n m_e_manufacture = fields.Char(string=\"M/E Manufacture\")#\n m_e_model = fields.Char(string=\"M/E Model#\")#\n stbd_me_sr = fields.Char(string=\"STBD M/E SR#\")#\n psd_me_sr = fields.Char(string=\"PSD M/E SR#\")#\n a_e_manufacture = fields.Char(string=\"A/E Manufacture\")#\n a_e_model = fields.Char(string=\"A/E Model#\")#\n a_e1_sr = fields.Char(string=\"A/E1 SR#\")#\n a_e2_sr = fields.Char(string=\"A/E2 SR#\")#\n a_e3_sr = fields.Char(string=\"A/E2 SR#\")#\n type = fields.Selection([])#\n port_if_register = fields.Char()\n #\n tank_capacity = fields.Float(required=True)\n fuel_exist_in_tank = fields.Float(compute='_compute_fuel_exist_in_tank_func')\n fuel_progress_bar = fields.Float(compute='_compute_fuel_progress_bar', string=\"Fuel\")\n current_running_hours = fields.Float(compute='_compute_fuel_progress_bar', string=\"Running Hours\", store=True)\n #\n gross_tonnage = fields.Char()\n net_tonnage = fields.Char()\n dead_weight = fields.Char()\n overall_length = fields.Char()\n lpp = fields.Char()\n breadth = fields.Char()\n depth = fields.Char()\n draught = fields.Integer()\n free_board = fields.Integer()\n #\n main_manufacturer = fields.Char()\n main_model = fields.Char()\n main_stbd_sr_no = fields.Char()\n main_psd_sr_no = fields.Char()\n main_center_sr_no = fields.Char()#\n auxiliary_manufacturer = fields.Char()\n auxiliary_model = fields.Char()\n auxiliary_stbd_sr_no = fields.Char()\n auxiliary_psd_sr_no = fields.Char()\n auxiliary_center_sr_no = fields.Char()\n certificate_ids = fields.One2many('ship.certificate', 'ship_id')\n certificate_id = fields.Many2one('ship.certificate', compute='_compute_last_certificate')\n certificate_state = fields.Selection(related='certificate_id.state')\n certificate_number = fields.Char(related='certificate_id.number')\n certificate_type = fields.Selection(related='certificate_id.type')\n certificate_start_date = fields.Date(related='certificate_id.start_date')\n certificate_expire_date = fields.Date(related='certificate_id.expire_date')\n certificate_attach_file = fields.Binary(related='certificate_id.attach_file')\n certificate_attach_file_1 = fields.Binary(related='certificate_id.attach_file_1')\n report_ids = fields.One2many('ship.report', 'name')\n\n @api.depends('model', 'type', 'tank_capacity', 'report_ids.fuel_consumption')\n def _compute_fuel_progress_bar(self):\n for rec in self:\n if rec.report_ids:\n rec.fuel_progress_bar = 0\n fuel_consumption = sum([x.fuel_consumption for x in rec.report_ids])\n fuel_add = sum([x.add_fuel for x in rec.report_ids])\n\n if rec.tank_capacity:\n rec.fuel_progress_bar = (rec.fuel_exist_in_tank / rec.tank_capacity) * 100\n rec.current_running_hours = sum([x.running_hours for x in rec.report_ids])\n else:\n rec.fuel_progress_bar = 0\n rec.current_running_hours = 0\n\n @api.depends('model', 'type', 'tank_capacity', 'report_ids.fuel_consumption')\n def _compute_fuel_exist_in_tank_func(self):\n for rec in self:\n if rec.report_ids:\n fuel_consumption = sum([x.fuel_consumption for x in rec.report_ids])\n fuel_add = sum([x.add_fuel for x in rec.report_ids])\n rec.fuel_exist_in_tank = fuel_add - fuel_consumption\n else:\n rec.fuel_exist_in_tank = 0\n\n @api.depends('certificate_ids')\n def _compute_last_certificate(self):\n for rec in self:\n if rec.certificate_ids:\n if len(rec.certificate_ids) == 1:\n certificate = rec.certificate_ids.filtered(\n lambda l: l.create_date == max(\n [max([x.create_date for x in rec.certificate_ids]) if rec.certificate_ids else 0]))\n else:\n certificate = rec.certificate_ids.filtered(\n lambda l: l.create_date == max(\n [x.create_date for x in rec.certificate_ids if\n x.create_date]))\n rec.certificate_id = certificate.id or False\n else:\n rec.certificate_id = False\n\n def open_request_maintenance(self):\n return {\n 'name': 'Request Maintenance',\n 'type': 'ir.actions.act_window',\n 'res_model': 'request.maintenance',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'view_id': self.env.ref('naval_fleet.request_maintenance_form_view').id,\n 'context': {'default_ship_id': self.id},\n 'target': 'new',\n }\n\n def open_create_report_daily(self):\n return {\n 'name': 'Report Daily',\n 'type': 'ir.actions.act_window',\n 'res_model': 'report.daily.wizard',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'view_id': self.env.ref('naval_fleet.report_daily_form_view').id,\n 'context': {'default_name': self.id},\n 'target': 'new',\n }\n\n def print_report_daily(self):\n return {\n 'name': 'Report Daily',\n 'type': 'ir.actions.act_window',\n 'res_model': 'print.report.daily.wizard',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'view_id': self.env.ref('naval_fleet.print_report_daily_form_view_wizard').id,\n 'context': {'default_name': self.id},\n 'target': 'new',\n }\n\n def add_fuel_func(self):\n return {\n 'name': 'Add Fuel',\n 'type': 'ir.actions.act_window',\n 'res_model': 'add.fuel.wizard',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'view_id': self.env.ref('naval_fleet.add_fuel_form_view').id,\n 'context': {'default_name': self.id},\n 'target': 'new',\n }\n\n ship_status = fields.Selection([\n ('1', 'Available'),\n ('2', 'In Maintenance'),\n ('3', 'Out Of Service'),\n ], string='Ship Status', widget='circle_selection', default='1')\n","repo_name":"omaragueroisc/Custome-Modules","sub_path":"naval_fleet/models/ship.py","file_name":"ship.py","file_ext":"py","file_size_in_byte":7440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9784791098","text":"from datetime import datetime\n\nfrom flask import request\n\nfrom chessleague.models import Game, db\nfrom chessleague.decorators import etag, paginate, json\nfrom chessleague.api import api, API_BASE_URL\n\n\n@api.route(API_BASE_URL + '/games/', methods=['GET'])\n@etag\n@paginate()\ndef get_games():\n return Game.query\n\n\n@api.route(API_BASE_URL + '/games/', methods=['GET'])\n@etag\n@json\ndef get_game(id):\n return Game.query.get_or_404(id)\n\n\n@api.route(API_BASE_URL + '/games/', methods=['POST'])\n@json\ndef new_game():\n game = Game().from_json(request.json)\n db.session.add(game)\n db.session.commit()\n return {}, 201, {'Location': game.get_url()}\n\n\n@api.route(API_BASE_URL + '/games/', methods=['PUT'])\n@json\ndef edit_game(id):\n game = Game.query.get_or_404(id)\n game.from_json(request.json)\n db.session.add(game)\n db.session.commit()\n return {}\n\n\n@api.route(API_BASE_URL + '/games/', methods=['DELETE'])\n@json\ndef delete_game(id):\n game = Game.query.get_or_404(id)\n game.deleted = True\n db.session.commit()\n return {}, 204\n","repo_name":"profcalculus/chessleague","sub_path":"chessleague/api/game_api.py","file_name":"game_api.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9400177593","text":"# Record of Revisions\n#\n# Date Programmers Descriptions of Change\n# ==== ================ ======================\n# ? Amin Ghaderi Original code\n# 13-Mar-23 Michael Nunez Adaption for Snellius (Dutch supercomputing cluster)\n\n\nimport os\nimport numpy as np\nfrom numba import njit\nimport bayesflow as bf\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import r2_score\nfrom pyhddmjagsutils import recovery\n\nmodel_name = 'NDDM_rel_ndt_bound_four_betas'\n\n\n\"\"\"Generative Model Specifications \nUser Defined Functions.\"\"\" \ndef draw_prior():\n \"\"\"Samples from the prior \"\"\"\n p_samples = np.random.uniform(low =(-3.0, 0.1, 0.01, 0.05, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, -3.0, -3.0, -3.0, -3.0, -3.0, -3.0, -3.0, -3.0),\n high=(3.0, 2.0, 0.99, 2.0, 0.5 , 1.0, 1.0, 1.0, 1.0, 1.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0))\n return p_samples\n\ndef prior_N(n_min=60, n_max=300):\n \"\"\"A prior for the random number of observation\"\"\"\n return np.random.randint(n_min, n_max+1)\n\n@njit\ndef diffusion_trial(delta, mu_alpha, beta, mu_tau, s_alpha, s_tau, sigma_1, sigma_2, sigma_3, sigma_4, gamma_11, gamma_12, gamma_21, gamma_22, gamma_31, gamma_32, gamma_41, gamma_42, dc=1.0, dt=.001):\n \"\"\"Simulates a trial from the diffusion model.\"\"\"\n\n n_steps = 0.\n \n # trial-to-trial boundary variability\n while True:\n alpha_trial = np.random.normal(mu_alpha, s_alpha) \n if alpha_trial>0:\n break \n # visual encoding time for each trial\n while True:\n tau_trial = np.random.normal(mu_tau, s_tau)\n if tau_trial>0:\n break \n\n # starting evidence for each trial\n evidence = alpha_trial * beta\n \n # Simulate a single DM path\n while (evidence > 0 and evidence < alpha_trial):\n\n # DDM equation\n evidence += delta*dt + np.sqrt(dt) * dc * np.random.normal()\n\n # Increment step\n n_steps += 1.0\n\n rt = n_steps * dt\n\n # b weights for each trial and each roi\n b = np.random.normal(gamma_11*alpha_trial + gamma_12*tau_trial, sigma_1)\n c = np.random.normal(gamma_21*alpha_trial + gamma_22*tau_trial, sigma_2)\n d = np.random.normal(gamma_31*alpha_trial + gamma_32*tau_trial, sigma_3)\n e = np.random.normal(gamma_41*alpha_trial + gamma_42*tau_trial, sigma_4)\n\n if evidence >= alpha_trial:\n choicert = tau_trial + rt\n else:\n choicert = -tau_trial - rt\n \n return choicert, b, c, d, e\n\n@njit\ndef simulate_trials(params, n_trials):\n \"\"\"Simulates a diffusion process for trials .\"\"\"\n\n delta, mu_alpha, beta, mu_tau, s_alpha, s_tau, sigma_1, sigma_2, sigma_3, sigma_4, gamma_11, gamma_12, gamma_21, gamma_22, gamma_31, gamma_32, gamma_41, gamma_42 = params\n choicert = np.empty(n_trials)\n b = np.empty(n_trials)\n c = np.empty(n_trials)\n d = np.empty(n_trials)\n e = np.empty(n_trials)\n for i in range(n_trials):\n choicert[i], b[i], c[i], d[i], e[i] = diffusion_trial(delta, mu_alpha, beta, mu_tau, s_alpha, s_tau, sigma_1, sigma_2, sigma_3, sigma_4, gamma_11, gamma_12, gamma_21, gamma_22, gamma_31, gamma_32, gamma_41, gamma_42) \n \n sim_data = np.stack((choicert, b, c, d, e), axis=-1)\n return sim_data\n\n\"\"\"Connect via BayesFlow Wrappers\nNote, that the same can be achieved using custom functions or classes, as long as the simulator and configurator interact well.\"\"\"\nprior = bf.simulation.Prior(prior_fun=draw_prior)\nvar_num_obs = bf.simulation.ContextGenerator(non_batchable_context_fun=prior_N)\nsimulator = bf.simulation.Simulator(simulator_fun=simulate_trials, context_generator=var_num_obs)\ngenerative_model = bf.simulation.GenerativeModel(prior, simulator)\n\n\n\"\"\"Create Configurator\nWe need this, since the variable N cannot be processed directly by the nets.\"\"\" \ndef configurator(sim_dict):\n \"\"\"Configures the outputs of a generative model for interaction with \n BayesFlow modules.\"\"\"\n \n out = dict()\n # These will be passed through the summary network. In this case,\n # it's just the data, but it can be other stuff as well.\n data = sim_dict['sim_data'].astype(np.float32)\n out['summary_conditions'] = data\n \n # These will be concatenated to the outputs of the summary network\n # Convert N to log N since neural nets cant deal well with large numbers\n N = np.log(sim_dict['sim_non_batchable_context'])\n # Repeat N for each sim (since shared across batch), notice the\n # extra dimension needed\n N_vec = N * np.ones((data.shape[0], 1), dtype=np.float32)\n out['direct_conditions'] = N_vec\n \n # Finally, extract parameters. Any transformations (e.g., standardization)\n # should happen here.\n out['parameters'] = sim_dict['prior_draws'].astype(np.float32)\n return out\n\n# BayesFlow Setup\nsummary_net = bf.networks.InvariantNetwork()\ninference_net = bf.networks.InvertibleNetwork(num_params=18)\namortizer = bf.amortizers.AmortizedPosterior(inference_net, summary_net)\n\n# If the checkpoint path does not exist, create it\ncheckpoint_path = f\"checkpoint/{model_name}\"\n\n# We need to pass the custom configurator here\ntrainer = bf.trainers.Trainer(\n amortizer=amortizer, \n generative_model=generative_model, \n configurator=configurator,\n checkpoint_path=checkpoint_path)\n\n\n\"\"\"Create validation simulations with some random N, if specific N is desired, need to \ncall simulator explicitly or define it with keyword arguments which can control behavior\nAll trainer.train_*** can take additional keyword arguments controling the behavior of\nconfigurators, generative models and networks\"\"\"\nnum_val = 300\nval_sims = generative_model(num_val)\n\n\"\"\"Quickcheck, var N is slow on my laptop, should definitely train longer for an actual application!\"\"\"\nh = trainer.train_experience_replay(epochs=300, iterations_per_epoch=1000, batch_size=32, validation_sims=val_sims)\n\n# If the recovery plot path does not exist, create it\nplot_path = f\"recovery_plots/{model_name}\"\nif not os.path.exists(plot_path):\n os.makedirs(plot_path)\n\n\"\"\"Validation, Loss Curves\"\"\"\nf = bf.diagnostics.plot_losses(h['train_losses'], h['val_losses'])\nf.savefig(f\"{plot_path}/{model_name}_validation.png\")\n\n\"\"\"Computational Adequacy\"\"\"\n# Need to test for different Ns, not just a random one!\nnum_test = 500\nnum_posterior_draws_recovery = 1000\nnew_sims = configurator(generative_model(num_test))\n\nposterior_draws = amortizer.sample(new_sims, n_samples=num_posterior_draws_recovery)\nfig = bf.diagnostics.plot_recovery(posterior_draws, new_sims['parameters'], param_names = ['delta', 'mu_alpha', 'beta', 'mu_tau', 's_alpha', 's_tau', 'sigma_1', 'sigma_2', 'sigma_3', 'sigma_4', 'gamma_11', 'gamma_12', 'gamma_21', 'gamma_22', 'gamma_31', 'gamma_32', 'gamma_41', 'gamma_42'] )\nfig.savefig(f\"{plot_path}/{model_name}_true_vs_estimate.png\")","repo_name":"mdnunez/bayesflow_nddms","sub_path":"retired_models/NDDM_rel_ndt_bound_four_betas.py","file_name":"NDDM_rel_ndt_bound_four_betas.py","file_ext":"py","file_size_in_byte":6895,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"29630430274","text":"import numpy as np\r\nimport tensorflow as tf\r\nimport random as rn\r\nimport os\r\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\r\n\r\nimport utils.parameters as params\r\nFIXED_PARAMETERS = params.load_parameters()\r\nos.environ['PYTHONHASHSEED']=str(int(FIXED_PARAMETERS['version'][1:]))\r\nnp.random.seed(int(FIXED_PARAMETERS['version'][1:]))\r\nrn.seed(int(FIXED_PARAMETERS['version'][1:]))\r\ntf.set_random_seed(int(FIXED_PARAMETERS['version'][1:]))\r\n\r\nfrom sklearn.model_selection import train_test_split, StratifiedShuffleSplit\r\nfrom keras.utils import to_categorical\r\nfrom sklearn.cluster import KMeans\r\nimport pandas as pd\r\n\r\n\r\ndef _load_processed_dataset(FIXED_PARAMETERS, dataset, merge_bool=True, data_string = ''):\r\n X = np.load(FIXED_PARAMETERS['data_path'] + '/X2' + '_' + dataset + '.npy')\r\n y = np.load(FIXED_PARAMETERS['data_path'] + '/Y_CPC1_2_' + dataset + '.npy')\r\n\r\n # if data_string == 'Coronary_Angiography':\r\n # y = np.load(FIXED_PARAMETERS['data_path'] + '/Y_' + data_string + '_' + dataset + '.npy')\r\n #\r\n # if (data_string == 'Cardiac_Stent') or (data_string == 'CABG'):\r\n # y = np.load(FIXED_PARAMETERS['data_path'] + '/Y_' + data_string + '_' + dataset + '.npy')\r\n\r\n if merge_bool:\r\n y[y == 2] = 1\r\n return X, y\r\n\r\ndef train_val_split(X, y, test_size=0.2, random_state=0, prob_duplicate=0.0):\r\n train_labels, train_counts = np.unique(y, return_counts=True)\r\n X_train, Y_train = [], []\r\n X_val, Y_val = [], []\r\n\r\n for label, max_cnt in zip(train_labels, train_counts):\r\n samples = X[y == label, :]\r\n try:\r\n train_samples, val_samples = train_test_split(samples, test_size=test_size, random_state=random_state)\r\n except:\r\n val_samples = samples ## Error handling sends all samples to the validation set\r\n train_samples = np.array([])\r\n\r\n for i in range(len(train_samples)):\r\n X_train.append(train_samples[i])\r\n Y_train.append(label)\r\n if (rn.random() < prob_duplicate) and (label == 0):\r\n X_train.append(train_samples[i])\r\n Y_train.append(label)\r\n\r\n for i in range(len(val_samples)):\r\n X_val.append(val_samples[i])\r\n Y_val.append(label)\r\n\r\n X_train = np.asarray(X_train)\r\n X_val = np.asarray(X_val)\r\n Y_train = np.asarray(Y_train)\r\n Y_val = np.asarray(Y_val)\r\n\r\n return X_train, X_val, Y_train, Y_val\r\n\r\ndef prepare_dataset_merged_keras_combined(FIXED_PARAMETERS, agency_list, print_bool=False, data_string = '', hospital_list=[]):\r\n X_train, y_train, X_val, y_val, X_test, y_test, X_sen, y_sen, metadata = {},{},{},{},{},{},{},{}, {}\r\n\r\n for a in FIXED_PARAMETERS[agency_list].split(' '):\r\n X, y = _load_processed_dataset(FIXED_PARAMETERS, a, data_string=data_string)\r\n X, X_sen[a], y, y_sen[a] = train_test_split(X, y, test_size=0.15, random_state=0)\r\n X_train[a], X_test[a], y_train[a], y_test[a] = train_test_split(X, y, test_size=0.2, random_state=0)\r\n X_train[a], X_val[a], y_train[a], y_val[a] = train_val_split(X_train[a], y_train[a], test_size=0.2, random_state=0)\r\n\r\n X_train = np.concatenate([X_train[a] for a in FIXED_PARAMETERS[agency_list].split(' ')], axis=0)\r\n X_val = np.concatenate([X_val[a] for a in FIXED_PARAMETERS[agency_list].split(' ')], axis=0)\r\n X_test = np.concatenate([X_test[a] for a in FIXED_PARAMETERS[agency_list].split(' ')], axis=0)\r\n X_sen = np.concatenate([X_sen[a] for a in FIXED_PARAMETERS[agency_list].split(' ')], axis=0)\r\n\r\n y_train = np.concatenate([y_train[a] for a in FIXED_PARAMETERS[agency_list].split(' ')], axis=0)\r\n y_val = np.concatenate([y_val[a] for a in FIXED_PARAMETERS[agency_list].split(' ')], axis=0)\r\n y_test = np.concatenate([y_test[a] for a in FIXED_PARAMETERS[agency_list].split(' ')], axis=0)\r\n y_sen = np.concatenate([y_sen[a] for a in FIXED_PARAMETERS[agency_list].split(' ')], axis=0)\r\n\r\n X_train = X_train.astype(np.int32)\r\n X_val = X_val.astype(np.int32)\r\n X_test = X_test.astype(np.int32)\r\n X_sen = X_sen.astype(np.int32)\r\n\r\n if len(hospital_list) != 0:\r\n y_train = y_train[[e in hospital_list for e in X_train[:,4]]]\r\n y_val = y_val[[e in hospital_list for e in X_val[:,4]]]\r\n y_test = y_test[[e in hospital_list for e in X_test[:,4]]]\r\n y_sen = y_sen[[e in hospital_list for e in X_sen[:,4]]]\r\n\r\n X_train = X_train[[e in hospital_list for e in X_train[:,4]],:]\r\n X_val = X_val[[e in hospital_list for e in X_val[:,4]],:]\r\n X_test = X_test[[e in hospital_list for e in X_test[:,4]],:]\r\n X_sen = X_sen[[e in hospital_list for e in X_sen[:,4]],:]\r\n\r\n if data_string == 'Coronary_Angiography':\r\n y_train = X_train[:,23]\r\n y_val = X_val[:,23]\r\n y_test = X_test[:,23]\r\n y_sen = X_sen[:,23]\r\n\r\n X_train = X_train[:,:23]\r\n X_val = X_val[:,:23]\r\n X_test = X_test[:,:23]\r\n X_sen = X_sen[:,:23]\r\n\r\n if (data_string == 'Cardiac_Stent') or (data_string == 'CABG'):\r\n if data_string == 'Cardiac_Stent':\r\n y_train, y_val, y_test, y_sen = X_train[:,24], X_val[:,24], X_test[:,24], X_sen[:,24]\r\n if data_string == 'CABG':\r\n y_train, y_val, y_test, y_sen = X_train[:,25], X_val[:,25], X_test[:,25], X_sen[:,25]\r\n\r\n X_train = X_train[:,:24]\r\n X_val = X_val[:,:24]\r\n X_test = X_test[:,:24]\r\n X_sen = X_sen[:,:24]\r\n\r\n if data_string == 'CPC1_2':\r\n X_train = X_train[:,:25]\r\n X_val = X_val[:,:25]\r\n X_test = X_test[:,:25]\r\n X_sen = X_sen[:,:25]\r\n\r\n\r\n X = np.concatenate((X_train, X_val), axis=0)\r\n num_cols = X.shape[1]\r\n metadata = []\r\n for col_id in range(num_cols):\r\n data = X[:, col_id]\r\n num_unique = max(np.unique(data))+1\r\n metadata.append(num_unique)\r\n\r\n for i in range(X_test.shape[0]):\r\n for j in range(X_test.shape[1]):\r\n if X_test[i][j] >= metadata[j]:\r\n X_test[i][j] = metadata[j]-1\r\n\r\n for i in range(X_sen.shape[0]):\r\n for j in range(X_sen.shape[1]):\r\n if X_sen[i][j] >= metadata[j]:\r\n X_sen[i][j] = metadata[j]-1\r\n\r\n X_train = [X_train[:, i] for i in range(num_cols)]\r\n X_val = [X_val[:, i] for i in range(num_cols)]\r\n X_test = [X_test[:, i] for i in range(num_cols)]\r\n X_sen = [X_sen[:, i] for i in range(num_cols)]\r\n\r\n train_labels, train_counts = np.unique(y_train, return_counts=True)\r\n val_labels, val_counts = np.unique(y_val, return_counts=True)\r\n test_labels, test_counts = np.unique(y_test, return_counts=True)\r\n sen_labels, sen_counts = np.unique(y_sen, return_counts=True)\r\n\r\n\r\n if print_bool: print(\"Agency set : \", a)\r\n if print_bool: print(\"Train set : \", (X_train[0].shape[0], num_cols), \" Class Label Counts: \", train_counts, \" CPC1/2 Rate: \", 1-train_counts[0]/sum(train_counts))\r\n if print_bool: print(\"Val set : \", (X_val[0].shape[0], num_cols), \" Class Label Counts: \", val_counts, \" CPC1/2 Rate: \", 1-val_counts[0]/sum(val_counts))\r\n if print_bool: print(\"Test set : \", (X_test[0].shape[0], num_cols), \" Class Label Counts: \", test_counts, \" CPC1/2 Rate: \", 1-test_counts[0]/sum(test_counts))\r\n if print_bool: print(\"Sen set : \", (X_sen[0].shape[0], num_cols), \" Class Label Counts: \", sen_counts, \" CPC1/2 Rate: \", 1-sen_counts[0]/sum(sen_counts))\r\n if print_bool: print()\r\n\r\n\r\n num_classes = len(np.unique(y_train))\r\n y_train = to_categorical(y_train, num_classes=num_classes)\r\n y_val = to_categorical(y_val, num_classes=num_classes)\r\n y_test = to_categorical(y_test, num_classes=num_classes)\r\n y_sen = to_categorical(y_sen, num_classes=num_classes)\r\n\r\n return (X_train, y_train), (X_val, y_val), (X_test, y_test), (X_sen, y_sen), metadata\r\n\r\n\r\n\r\ndef change_problem(X, y, FIXED_PARAMETERS):\r\n keep = [int(i) for i in FIXED_PARAMETERS['inputs'].split(' ')]\r\n label = [int(i) for i in FIXED_PARAMETERS['outputs'].split(' ')]\r\n\r\n y_new = {}\r\n for l in label:\r\n y_new[l] = to_categorical(X[l], num_classes=2)\r\n # X_new = [X[e] for e in keep]\r\n return X, y_new\r\n\r\ndef change_meta(metadata, FIXED_PARAMETERS):\r\n keep = [int(i) for i in FIXED_PARAMETERS['inputs'].split(' ')]\r\n metadata = [metadata[e] for e in keep]\r\n return metadata\r\n\r\n\r\ndef subset_inputs(X_train, X_val, metadata, features):\r\n X_train = [X_train[e] for e in features]\r\n X_val = [X_val[e] for e in features]\r\n metadata = [metadata[e] for e in features]\r\n\r\n return X_train, X_val, metadata\r\n\r\ndef subset_inputs_all(X_train, X_val, X_test, metadata, features):\r\n X_train = [X_train[e] for e in features]\r\n X_val = [X_val[e] for e in features]\r\n X_test = [X_test[e] for e in features]\r\n metadata = [metadata[e] for e in features]\r\n\r\n return X_train, X_val, X_test, metadata","repo_name":"sharford5/Cardiac-Arrest-Decision","sub_path":"utils/data_hospitals_utils.py","file_name":"data_hospitals_utils.py","file_ext":"py","file_size_in_byte":8861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23077864657","text":"def DFS(V):\n global res\n ch[V]=1\n for i in graph[V]:\n if ch[i]==0:\n res+=1\n DFS(i)\n\nn=int(input()) #컴퓨터의 수\nm=int(input()) #직접 연결되어 있는 컴퓨터 쌍의 수\ngraph=[[]*n for _ in range(n+1)]\nfor _ in range(m):\n a,b=map(int,input().split())\n graph[a].append(b)\n graph[b].append(a)\n\nres=0\nch=[0]*(n+1)\nDFS(1)\nprint(res)","repo_name":"gayoungee/gy-algorithm","sub_path":"pythonProject/DFS_BFS/2606.py","file_name":"2606.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5004615280","text":"from . import AbstractExperiment\nimport experimentalCollections.TrecCollections as tc\nimport experimentalCollections.qppCollections as qc\nfrom utils import recursiveSplitGop, getUniqueFactors\nimport pandas as pd\n\nimport qppMeasures\nimport commonParams\nimport time\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom scipy.stats import gaussian_kde\n\nimport matplotlib\n\n#matplotlib.rcParams['text.usetex'] = True\n#plt.rcParams['font.family'] = 'serif'\n#plt.rcParams['font.serif'] = ['Times New Roman'] + plt.rcParams['font.serif']\n\n#plt.rcParams['text.usetex'] = True\nplt.rcParams.update({\n \"text.usetex\": True,\n \"font.family\": \"serif\",\n \"font.serif\": [\"Computer Modern Roman\"],\n})\nclass CompareMeasures(AbstractExperiment):\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n\n def run_experiment(self):\n #dashes = ['-', '--', '-.', ':', (5, 2)]\n dashes =[(None, None), (1, 1), (4, 3), (3, 1, 1, 1), (2, 2)]\n tieStrategies = ['average', 'min', 'max', 'first', 'dense']\n\n dss = [pd.read_csv(f\"../../data/distributional_measures/DistributionalMeasureTruncated_{self.collectionId}_{self.mLabel}_{self.distrMeasure}_{ts}_titleQueries.csv\") for ts in tieStrategies]\n\n for e, ds in enumerate(dss):\n ds = ds[(ds['stemmer']=='porter') & (ds['stoplist']=='indri')]\n dss[e] = ds\n\n\n #msrs = [gaussian_kde(np.array(ds[self.distrMeasure]), bw_method='silverman')for ds in dss]\n\n #x = np.linspace(0, 1, 10000)\n\n #ests = [f(x) for f in msrs]\n\n #plt.figure()\n #for i, est in enumerate(ests):\n # plt.plot(x, est, lw=2, label=tieStrategies[i], dashes=dashes[i])\n #plt.legend()\n #plt.show()\n\n\n fig, axs = plt.subplots(1, 5, figsize=(30, 8))\n for i, est in enumerate(dss):\n ax = axs[i]\n c, br = np.histogram(est[self.distrMeasure], bins=40, density=True)\n ax.plot(br[:-1]+(br[1:]-br[:-1])/2, c, label=rf\"{self.distrMeasure}\")\n\n\n means = est[['predictor', 'sARE']].groupby('predictor').mean().reset_index()\n f = gaussian_kde(np.array(means[self.distrMeasure]), bw_method='silverman')\n x = np.linspace(br[0], br[-1], 10000)\n y = f(x)\n\n ax.plot(x, y, label=rf\"{self.distrMeasure.replace('s', 'sM')}\")\n ax.legend(fontsize=24)\n ax.tick_params(labelsize=22)\n ax.set_title(rf\"{tieStrategies[i]}\", fontsize=24)\n ax.set_ylabel(r\"density\", fontsize=24)\n\n #fig.tight_layout(pad=1.0)\n plt.savefig(\"comparison-tiebreaking.pdf\")\n\n\n\n measures = ['sARE', 'sRE', 'sRSRE', 'sSR']\n dss = [pd.read_csv(f\"../../data/distributional_measures/DistributionalMeasure_{self.collectionId}_{self.mLabel}_{ms}_average_titleQueries.csv\") for ms in measures]\n\n for e, ds in enumerate(dss):\n ds = ds[(ds['stemmer']=='porter') & (ds['stoplist']=='indri')]\n dss[e] = ds\n print(dss[e][measures[e]])\n\n\n #msrs = [gaussian_kde(np.array(dss[e][ms]), bw_method=0.1) for e, ms in enumerate(measures)]\n\n #x = np.linspace(-1.5, 3, 10000)\n\n #ests = [f(x) for f in msrs]\n\n\n fig, axs = plt.subplots(1, 4, figsize=(30, 8))\n for i, est in enumerate(dss):\n ax = axs[i]\n c, br = np.histogram(est[measures[i]], bins=40, density=True)\n ax.plot(br[:-1]+(br[1:]-br[:-1])/2, c, label=rf\"{measures[i]}\")\n\n\n means = est[['predictor', measures[i]]].groupby('predictor').mean().reset_index()\n f = gaussian_kde(np.array(means[measures[i]]), bw_method='silverman')\n x = np.linspace(br[0], br[-1], 10000)\n y = f(x)\n\n ax.plot(x, y, label=rf\"{measures[i].replace('s', 'sM')}\")\n ax.legend(fontsize=24)\n ax.tick_params(labelsize=22)\n ax.set_ylabel(r\"density\", fontsize=24)\n\n\n plt.savefig(\"comparison-distrMeasures.pdf\")\n\n\n\n","repo_name":"Zendelo/QPP-EnhancedEval","sub_path":"code/python/experiments/CompareMeasures.py","file_name":"CompareMeasures.py","file_ext":"py","file_size_in_byte":3987,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"40582843289","text":"import json\nfrom enum import Enum\n\nfrom django.contrib.auth.models import User\nfrom django.db import models\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\n\nfrom apps.accounts.fields import AutoCreatedField, AutoLastModifiedField\nfrom apps.accounts.views.utils import is_cloud_version\n\n\nclass SiteStatus(str, Enum):\n CREATED = \"Created\"\n INITIALIZING = \"Initializing\"\n READY = \"Ready\"\n ERROR = \"Error\"\n\n\nclass Site(models.Model):\n title = models.CharField(\n max_length=256, help_text=\"Name of Mercury Site\", blank=False, null=False\n )\n slug = models.CharField(\n max_length=256,\n help_text=\"Subdomain\",\n blank=False,\n null=False,\n unique=True,\n )\n domain = models.CharField(\n default=\"runmercury.com\",\n max_length=256,\n help_text=\"Domain address\",\n blank=True,\n null=True,\n )\n custom_domain = models.CharField(\n max_length=256,\n help_text=\"Custom domain address\",\n blank=True,\n null=True,\n )\n\n PUBLIC = \"PUBLIC\"\n PRIVATE = \"PRIVATE\"\n SHARE_CHOICES = (\n (PUBLIC, \"Anyone can access notebooks and execute\"),\n (PRIVATE, \"Only selected users have access to notebooks\"),\n )\n share = models.CharField(\n default=PUBLIC, max_length=32, choices=SHARE_CHOICES, blank=False, null=False\n )\n\n # Created\n # Initializing\n # Ready\n # Error\n status = models.CharField(default=\"Ready\", max_length=32, blank=False, null=False)\n welcome = models.TextField(default=\"\", blank=True, null=True)\n info = models.TextField(blank=True, null=True)\n\n active = models.BooleanField(default=True, blank=False, null=False)\n created_at = AutoCreatedField()\n updated_at = AutoLastModifiedField()\n created_by = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n )\n\n\nclass UserProfile(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE, related_name=\"profile\")\n # custom fields for user\n info = models.TextField(default=\"{}\", blank=True)\n usage = models.TextField(default=\"{}\", blank=True)\n\n\n@receiver(post_save, sender=User)\ndef create_user_profile(sender, instance, created, **kwargs):\n if created:\n info = \"{}\"\n if is_cloud_version():\n info = json.dumps({\"plan\": \"starter\"})\n UserProfile.objects.create(user=instance, info=info)\n\n\n@receiver(post_save, sender=User)\ndef save_user_profile(sender, instance, **kwargs):\n instance.profile.save()\n\n\n@receiver(post_save, sender=User)\ndef check_invitations(sender, instance, **kwargs):\n invitations = Invitation.objects.filter(invited=instance.email)\n for invitation in invitations:\n previous_memberships = Membership.objects.filter(\n user=instance, host=invitation.hosted_on\n )\n if not previous_memberships:\n Membership.objects.create(\n user=instance,\n host=invitation.hosted_on,\n rights=invitation.rights,\n created_by=invitation.created_by,\n )\n invitation.delete()\n\n\nclass Membership(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n host = models.ForeignKey(Site, on_delete=models.CASCADE, related_name=\"hosts\")\n # view, edit, admin\n VIEW = \"VIEW\"\n EDIT = \"EDIT\"\n RIGHTS_CHOICES = (\n (VIEW, \"View and execute notebooks\"),\n (EDIT, \"Edit and view site, files and execute notebooks\"),\n )\n rights = models.CharField(\n default=VIEW,\n choices=RIGHTS_CHOICES,\n max_length=32,\n help_text=\"Rights for user\",\n blank=False,\n null=False,\n )\n created_at = AutoCreatedField()\n updated_at = AutoLastModifiedField()\n created_by = models.ForeignKey(\n User, on_delete=models.CASCADE, related_name=\"created_by\"\n )\n\n\nclass Invitation(models.Model):\n invited = models.CharField(max_length=256, blank=False, null=False)\n created_at = AutoCreatedField()\n created_by = models.ForeignKey(User, on_delete=models.CASCADE)\n hosted_on = models.ForeignKey(Site, on_delete=models.CASCADE)\n rights = models.CharField(\n default=Membership.VIEW,\n choices=Membership.RIGHTS_CHOICES,\n max_length=32,\n help_text=\"Rights for user\",\n blank=False,\n null=False,\n )\n\n\nclass Secret(models.Model):\n name = models.CharField(max_length=256, blank=False, null=False)\n token = models.TextField(blank=False, null=False)\n created_at = AutoCreatedField()\n created_by = models.ForeignKey(User, on_delete=models.CASCADE)\n hosted_on = models.ForeignKey(Site, on_delete=models.CASCADE)\n","repo_name":"irvinbma/Mercury-Jupyter","sub_path":"mercury/apps/accounts/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34602690661","text":"import yfinance as yf\nimport time\nfrom bs4 import BeautifulSoup\nfrom urllib.parse import urljoin\nfrom selenium import webdriver\nimport random\n\nchrome_agent = \"Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Mobile Safari/537.36\"\ncheader = {\"User-Agent\":chrome_agent}\nchrome_options = webdriver.ChromeOptions()\nchrome_options.add_argument('--headless')\nchrome_options.add_argument('--disable-gpu')\ndriver = webdriver.Chrome(options=chrome_options)\n\ndef js_requests(url, *args):\n driver.delete_all_cookies()\n driver.get(url)\n time.sleep(20.0 + random.random())\n if len(args) != 0:\n driver.switch_to.frame(args[0])\n html = driver.page_source\n soup = BeautifulSoup(html, 'html.parser')\n return soup\n\ndef scrape(url, samples=5):\n # url of articles\n urls = []\n # 2d array of [[title, article], ...]\n res = []\n url = \"https://www.reuters.com/site-search/?query=\" + url\n soup = js_requests(url)\n\n links = soup.find_all('a', class_=\"text__text__1FZLe text__dark-grey__3Ml43 text__inherit-font__1Y8w3 text__inherit-size__1DZJi link__underline_on_hover__2zGL4 media-story-card__heading__eqhp9\")\n for link in links:\n try:\n href = link.get('href')\n if href: urls.append(urljoin(url, href))\n except:\n continue\n\n # Randomly select 5 articles as samples\n chosen = []\n # for i in range(samples):\n for i in range(len(urls)-1):\n if len(urls) <= i: break\n luckyNumber = random.randint(0, len(urls) - 1)\n while luckyNumber in chosen: luckyNumber = random.randint(0, len(urls) - 1)\n newUrl = urls[i]\n time.sleep(10.0 + random.random())\n newSoup = js_requests(newUrl)\n # Scrape the article page\n try:\n title = newSoup.find('h1')\n article = \"\"\n for p in newSoup.find_all('p', class_=\"text__text__1FZLe text__dark-grey__3Ml43 text__regular__2N1Xr text__small__1kGq2 body__full_width__ekUdw body__small_body__2vQyf article-body__paragraph__2-BtD\"):\n article += p.text\n print(i, title.text)\n res.append([title.text, article])\n chosen.append(luckyNumber)\n except:\n i -= 1\n continue\n\n return res\n\nres = scrape(\"tesla\", 10)\nfor i in res:\n print(\"Topic:\")\n print(i[0])\n print(\"Article: \")\n for j in i[1].split(\". \"): print(j)\n print()\n","repo_name":"pvpswaghd/stock-sentiment-prediction","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14132557398","text":"import random\nimport time\nimport sys\nimport pickle\n\n# Using the Python Device SDK for IoT Hub:\n# https://github.com/Azure/azure-iot-sdk-python\n# The sample connects to a device-specific MQTT endpoint on your IoT Hub.\nimport iothub_client\n# pylint: disable=E0611\nfrom iothub_client import IoTHubClient, IoTHubClientError, IoTHubTransportProvider, IoTHubClientResult\nfrom iothub_client import IoTHubMessage, IoTHubMessageDispositionResult, IoTHubError, DeviceMethodReturnValue\n\n# import modules that connect to sensors on the device\nfrom dht22 import get_dht22_data\n\n\n# String containing Hostname, Device Id & Device Key in the format:\n# \"HostName=;DeviceId=;SharedAccessKey=\"\nfp_iot_hub_credentials = r\"../credentials/iothub_credentials\"\nwith open(fp_iot_hub_credentials, 'rb') as f:\n CONNECTION_STRING = pickle.load(f)\n\n\n# Using the MQTT protocol.\nPROTOCOL = IoTHubTransportProvider.MQTT\nMESSAGE_TIMEOUT = 10000\n\n# Define the JSON message to send to IoT Hub.\nTEMPERATURE = 20.0\nHUMIDITY = 60\nMSG_TXT = \"{\\\"temperature\\\": %.2f,\\\"humidity\\\": %.2f}\"\n\ndef send_confirmation_callback(message, result, user_context):\n print ( \"IoT Hub responded to message with status: %s\" % (result) )\n\ndef iothub_client_init():\n # Create an IoT Hub client\n client = IoTHubClient(CONNECTION_STRING, PROTOCOL)\n return client\n\ndef iothub_client_telemetry_sample_run():\n\n try:\n client = iothub_client_init()\n print ( \"IoT Hub device sending periodic messages, press Ctrl-C to exit\" )\n\n while True:\n # Build the message with simulated telemetry values.\n _, humidity, temperature = get_dht22_data()\n msg_txt_formatted = MSG_TXT % (temperature, humidity)\n message = IoTHubMessage(msg_txt_formatted)\n\n # Add a custom application property to the message.\n # An IoT hub can filter on these properties without access to the message body.\n prop_map = message.properties()\n if temperature > 30:\n prop_map.add(\"temperatureAlert\", \"true\")\n else:\n prop_map.add(\"temperatureAlert\", \"false\")\n\n # Send the message.\n print( \"Sending message: %s\" % message.get_string() )\n client.send_event_async(message, send_confirmation_callback, None)\n time.sleep(5)\n\n except IoTHubError as iothub_error:\n print ( \"Unexpected error %s from IoTHub\" % iothub_error )\n return\n except KeyboardInterrupt:\n print ( \"IoTHubClient sample stopped\" )\n\nif __name__ == '__main__':\n print ( \"IoT Hub Quickstart #1 - Simulated device\" )\n print ( \"Press Ctrl-C to exit\" )\n iothub_client_telemetry_sample_run()","repo_name":"edsase/dev297","sub_path":"send_data_iothub.py","file_name":"send_data_iothub.py","file_ext":"py","file_size_in_byte":2720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72604668426","text":"from pydantic import BaseModel, Field\nfrom typing import List, Optional\nfrom datetime import datetime\nfrom bson.objectid import ObjectId, InvalidId\nimport motor.motor_asyncio\nimport logging\n\n\nclass OID(str):\n @classmethod\n def __get_validators__(cls):\n yield cls.validate\n\n @classmethod\n def validate(cls, v):\n try:\n return ObjectId(str(v))\n except InvalidId:\n raise ValueError(\"Not a valid ObjectId\")\n\n\nclass GiveRequest(BaseModel):\n book_id: OID = Field()\n given_by: OID = Field()\n taken_by: OID = Field()\n\n class Config:\n json_encoders = {\n datetime: lambda dt: dt.isoformat(),\n ObjectId: lambda oid: str(oid),\n }\n\n\nclass Category(BaseModel):\n _id: OID\n name: str = ''\n\n\nclass User(BaseModel):\n _id: OID\n name: str = ''\n signup_ts: Optional[datetime] = None\n role: str = None\n email: str = ''\n password: str = ''\n\n class Config:\n json_encoders = {\n datetime: lambda dt: dt.isoformat(),\n ObjectId: lambda oid: str(oid),\n }\n\n\nclass Book(BaseModel):\n _id: OID\n title: str = None\n categories: Optional[List[OID]] = None\n assign_ts: Optional[datetime] = None\n taken_by: OID = None\n given_by: OID = None\n content: str = ''\n\n class Config:\n json_encoders = {\n datetime: lambda dt: dt.isoformat(),\n ObjectId: lambda oid: str(oid),\n }\n\n\nclass Database:\n\n def __init__(self):\n self.mongo_conn = motor.motor_asyncio.AsyncIOMotorClient(\"mongodb+srv://hornet:gnuIjj8EdK1tXZ6U@cluster0.ofprt.mongodb.net\")\n self.db = self.mongo_conn['library']\n\n async def _parse_books(self, cur) -> List[Book]:\n parsed_books = [Book(**book) for book in await cur.to_list(length=10000)]\n return parsed_books\n\n async def get_free_books(self) -> List[Book]:\n # Получение книги по ID\n cursor = self.db.books.find({'taken_by': None})\n books = await self._parse_books(cursor)\n return books\n\n async def get_busy_books(self) -> List[Book]:\n # Получение книги по ID\n cursor = self.db.books.find({'taken_by': {'$ne': None}})\n books = await self._parse_books(cursor)\n return books\n\n async def give_book(self, request: GiveRequest) -> Optional[Book]:\n \"\"\"\n Выдать книгу\n :return:\n \"\"\"\n if self.is_admin(request.given_by):\n book = await self.db.books.find_one({'_id': request.book_id})\n book = Book(**book)\n book.assign_ts = datetime.now()\n book.given_by = request.given_by\n book.taken_by = request.taken_by\n await self.db.books.replace_one({'_id': request.book_id}, book.dict())\n return book\n else:\n raise Exception('not Admin')\n\n async def is_admin(self, user_id: OID) -> bool:\n \"\"\"\n Проверка права на выдачу книг\n :param user_id:\n :return:\n \"\"\"\n user = await self.db.users.find_one({'_id': user_id})\n if user['role'] == 'admin':\n return True\n return False\n\n async def add_book(self, book: Book) -> bool:\n \"\"\"\n Добавить книгу в базу\n :param book:\n :return:\n \"\"\"\n await self.db.books.insert_one(book.dict())\n return True\n\n async def delete_book(self, book_id: OID) -> bool:\n # Удаление книги\n await self.db.books.delete_one({'_id': book_id})\n return True\n","repo_name":"h0rn3t/books-api-service","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"66958885","text":"import os\nimport sys\nimport numpy as np\nimport torch\nimport pickle\n\nimport useful_functions as uf\n# import time\nfrom EOM_integrate import Np, Nz, nu, mu, Ncores_BC\n\n# t0 = time.time()\n\ndtype = torch.double\n\ndef surf_node(ind):\n \"\"\"\n This function returns the surface a node is on\n Input : index of the node (position of surface nodes...)\n Output : surface the node is on (if it is in more than one surface\n returns all of them)\n \"\"\"\n surf = []\n node_coor = pos_surf_final[ind]\n if node_coor[0] == 0:\n surf += ['lb']\n if node_coor[0] == Np:\n surf += ['rf']\n if node_coor[1] == 0:\n surf += ['rb']\n if node_coor[1] == Np:\n surf += ['lf']\n if node_coor[2] == 0:\n surf += ['b']\n if node_coor[2] == Nz:\n surf += ['u']\n return surf\n\n# Correspondance between surface name and normal vector\ncorr_name_norm = {'lb': torch.tensor([[-1],[0],[0]], dtype = dtype), 'rf': torch.tensor([[1],[0],[0]], dtype = dtype), \\\n 'rb': torch.tensor([[0],[-1],[0]], dtype = dtype), 'lf': torch.tensor([[0],[1],[0]], dtype = dtype), \\\n 'b': torch.tensor([[0],[0],[-1]], dtype = dtype), 'u': torch.tensor([[0],[0],[1]], dtype = dtype)}\n\n# We go and fetch the coordinates of each node on the surface in 'surf.txt'\ndir = os.getcwd()\nfile_mesh_surface = open(dir + '/Cast3m/surf.txt', 'r')\npos_surf_init = file_mesh_surface.read().split('\\n')\n\npos_surf_final = [[float(x) for x in pos_surf_init[5].split()][1:]]\nfor line in pos_surf_init[7:-3]:\n pos_surf_final += [[float(x) for x in line.split()][1:]]\nfile_mesh_surface.close()\n\npos_surf_final = torch.tensor(pos_surf_final, dtype = dtype)\n\n# We fetch the coordinates of all the defects and their dipole tensor components\ndis_position_final = []\ndis_dtensor_final = []\n\nos.chdir(dir + '/Cast3m/simu')\nlist_files = os.listdir(dir + '/Cast3m/simu/')\nfor file in list_files:\n if ('pos.txt' in file):\n file_dis_position = open(file, 'r')\n dis_position_init = [float(x) for x in file_dis_position.read().split()]\n dis_position_final += [[dis_position_init[0], dis_position_init[1], dis_position_init[2]]]\n # Close the open files\n file_dis_position.close()\n\n if ('dt.txt' in file):\n file_dis_dtensor = open(file, 'r')\n dis_dtensor_init = [float(x) for x in file_dis_dtensor.read().split()]\n dis_dtensor_final += [[[dis_dtensor_init[0],dis_dtensor_init[1] ,dis_dtensor_init[2]]\\\n ,[dis_dtensor_init[3], dis_dtensor_init[4], dis_dtensor_init[5]]\\\n ,[dis_dtensor_init[6], dis_dtensor_init[7], dis_dtensor_init[8]]]]\n # Close the open files\n file_dis_dtensor.close()\n\ndis_dtensor_final = torch.tensor(dis_dtensor_final, dtype = dtype)\ndis_position_final = torch.tensor(dis_position_final, dtype = dtype)\n\n# We import the useful_function script so that we can compute the\n# analytical sigma associated\n\n# Use the functions defined previously to compute the sigma Components\n# on the cell boundary.\n\n# os.chdir(dir + '/Cast3m/sigmaSurf')\nos.chdir(dir + '/Cast3m')\n\nfilename_castem_sigma = dir + '/Cast3m/solution.dgibi'\nfile_castem_sigma = open(filename_castem_sigma, 'r')\n\ntexte = file_castem_sigma.readlines()\n\nfile_castem_sigma.close()\n\nind = -1\nfor line in texte:\n if ('HERE' in line):\n ind_int = ind + 1\n ind += 1\n\nind_act = ind_int + 1\ntexte.insert(ind_act, 'Fload = VIDE CHPOINT/DISC;\\n')\nind_act += 1\n\n# Let's compute the elastic compliance tensor\nelast_compl = uf.define_C(nu,mu)\n\n# Let's pickle the input data (dtensor, loop position and node position)\nf = open('init.pckl', 'wb')\npickle.dump([pos_surf_final, dis_position_final, dis_dtensor_final], f)\nf.close()\n\nif (sys.argv[1] == 'True'):\n # Let's compute the sigma with mpiexec (cpu parallelisation)\n os.system('mpiexec -n {} python ../mpi_sigma.py'.format(Ncores_BC))\n\n # Let's fetch the sigma components computed above\n f = open('sigma.pckl', 'rb')\n stress_node = pickle.load(f)\n f.close()\n\nfor ind_position in range(len(pos_surf_final)):\n surfaces_name = surf_node(ind_position)\n surfaces_normv = [corr_name_norm[x] for x in surfaces_name]\n\n if (sys.argv[1] == 'False'):\n stress_node = torch.zeros((3,3), dtype = dtype)\n for ind_loops in range(len(dis_position_final)):\n r = torch.tensor([pos_surf_final[ind_position][x] - dis_position_final[ind_loops][x] for x in range(3)])\n stress_node -= uf.sigma(dis_dtensor_final[ind_loops], r, nu, mu, elast_compl)\n\n traction = sum([torch.tensordot(stress_node, surfaces_normv[x], 1).div(2**(len(surfaces_name)-1)) for x in range(len(surfaces_name))])\n\n else:\n traction = sum([torch.tensordot(stress_node[ind_position], surfaces_normv[x], 1).div(2**(len(surfaces_name)-1)) for x in range(len(surfaces_name))])\n\n Tx = str(traction[0].item())\n Ty = str(traction[1].item())\n Tz = str(traction[2].item())\n\n texte.insert(ind_act, 'Fload = Fload ET (FORCE FX (' + Tx + \\\n ') FY (' + Ty + \\\n ') FZ (' + Tz + \\\n ') (surfPoi POIN ' + str(ind_position + 1)+ '));\\n')\n\n ind_act += 1\n nx = str(pos_surf_final[ind_position][0].item() - Np/2.0)\n ny = str(pos_surf_final[ind_position][1].item() - Np/2.0)\n nz = str(pos_surf_final[ind_position][2].item() - Nz/2.0)\n\n\n texte.insert(ind_act, 'Mtotx = Mtotx + ('+ny+'*'+Tz+') - ('+nz+'*'+Ty+');\\n')\n ind_act += 1\n texte.insert(ind_act, 'Mtoty = Mtoty + ('+nz+'*'+Tx+') - ('+nx+'*'+Tz+');\\n')\n ind_act += 1\n texte.insert(ind_act, 'Mtotz = Mtotz + ('+nx+'*'+Ty+') - ('+ny+'*'+Tx+');\\n')\n ind_act += 1\n\n # file_sigma = open(dir + '/Cast3m/sigmaSurf/sigma' + str(ind_position) + '.txt', 'w')\nfilename_castem_sigma = dir + '/Cast3m/solution_mod.dgibi'\nfile_castem_sigma = open(filename_castem_sigma, 'w')\nfor line in texte:\n file_castem_sigma.write(line)\n\nfile_castem_sigma.close()\n# print('Total time : ', time.time() - t0, ' s' )\n","repo_name":"FedericoBar/langevin-defects-dynamics","sub_path":"stressBC.py","file_name":"stressBC.py","file_ext":"py","file_size_in_byte":6064,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"19536191651","text":"import logging\nimport pyaudio\nimport struct\nimport sys\nimport datetime\nimport alsaaudio\n\n\nFORMAT = pyaudio.paInt16 # We use 16bit format per sample\nCHANNELS = 1\nRATE = 44100\nCHUNK = 1024 # 1024bytes of data red from a buffer\n\nMIXER = alsaaudio.Mixer()\nMUTE_HOLD_TIME = datetime.timedelta(seconds=5)\nMUTE_LOCKED = datetime.datetime.min\nVOLUME = None\n\n# checking the user input\ndef ask(question, options):\n result = None\n while result not in options:\n print(question)\n result = input().strip()\n return result\n\n# binding the input to MIC_CONF value\ndef sensitivity_converter(sens):\n return{\n 1: '32000',\n 2: '31000',\n 3: '29000',\n 4: '27000',\n 5: '25000',\n 6: '23000',\n 7: '21000',\n 8: '19000',\n 9: '17000',\n 11: '15000',\n 12: '13000',\n 13: '11000',\n 14: '9000',\n 15: '7000',\n 16: '6000',\n }.get(sens, 17)\n\n#asking user for input\nMIC_CONF = int(sensitivity_converter(int(ask(\"Please enter the MIC_CONF to the amount of sound from 1 to 16: \", [\n \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\",\"11\",\"12\",\"13\",\"14\",\"15\",\"16\"]))))\n\n\ndef plot_data(in_data):\n global MUTE_LOCKED\n global VOLUME\n global MIXER\n\n # get and convert the data to float\n audio_data = struct.unpack(\"h\", in_data[:2])\n ct = datetime.datetime.now()\n\n # comparing the MIC_CONF with incoming noise\n if audio_data[0] > MIC_CONF:\n if VOLUME != 0:\n MIXER.setmute(1) # Mute the system\n # MIXER.setvolume(0) # Set the volume to 0% (MUTE).\n VOLUME = 0\n print(\"MUTED!!!\")\n MUTE_LOCKED = ct + MUTE_HOLD_TIME\n\n elif ct > MUTE_LOCKED:\n if VOLUME != 100:\n MIXER.setmute(0) # Unmute the system\n # MIXER.setvolume(100) # Set the volume to 100%.\n VOLUME = 100\n print(\"UNMUTED Again!!!\")\n\n print(\"current time:-\", ct)\n if VOLUME == 0:\n print(\"MUTED!!! Noise higher than MIC_CONF value: {a}\".format(a=MIC_CONF))\n else:\n print(\"UNMUTE!!!! MIC_CONF value: {a}\".format(a=MIC_CONF))\n\n #print(\"volume:\", VOLUME)\n\n\ndef main():\n # making a LOGFILE\n logging.basicConfig(filename='logFile.log', level=logging.DEBUG)\n audio = pyaudio.PyAudio()\n stream = audio.open(format=FORMAT,\n channels=CHANNELS,\n rate=RATE,\n input=True) # ,\n stream.start_stream()\n print(\"\\n+---------------------------------+\")\n print(\"| Press Ctrl+C to Break Recording |\")\n print(\"+---------------------------------+\\n\")\n\n # Loop so program doesn't end while the stream callback's\n # itself for new data\n keep_going = True\n while keep_going:\n try:\n plot_data(stream.read(CHUNK, exception_on_overflow=False))\n except KeyboardInterrupt:\n keep_going = False\n\n # Close up shop (currently not used because KeyboardInterrupt\n # is the only way to close)\n stream.stop_stream()\n stream.close()\n\n audio.terminate()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"esiRonaldo/soundRecorderFrank29N","sub_path":"soundA.py","file_name":"soundA.py","file_ext":"py","file_size_in_byte":3119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16788797812","text":"#!/usr/local/bin/python3\n# coding=utf8\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport time\nimport random\n\n# from six.moves import xrange # pylint: disable=redefined-builtin\n\n\nfrom tqdm import tqdm\nfrom vocab import Vocab as V\nfrom options import Options as opt\nfrom utils.fileIO import fetchSentencesAsWords\nimport tensorflow as tf\nimport numpy as np\n\nrandom.seed(time.time())\n\nflags = tf.app.flags\n\nflags.DEFINE_string(\"output\", '.pkl', \"Directory to write the model and training summaries.\")\nflags.DEFINE_string(\"train\", None, \"Training text file. E.g., unzipped file http://mattmahoney.net/dc/text8.zip.\")\nflags.DEFINE_string(\"vocab\", None, \"The vocabulary file path.\")\nflags.DEFINE_string(\"save_vocab\", None, \"If not None, save the vocabulary to this path.\")\nflags.DEFINE_string(\"covariance\", \"diagnal\", \"Shape of covariance matrix, default is diagnal. Possible value is 'diagnal' or \")\nflags.DEFINE_integer(\"size\", 50, \"The embedding dimension size. Default is 100.\")\nflags.DEFINE_integer(\"window\", 3, \"The number of words to predict to the left and right of the target word.\")\nflags.DEFINE_integer(\"negative\", 5, \"Negative samples per sense. Default is 5.\")\nflags.DEFINE_integer(\"iter\", 10, \"Number of iterations to train. Each iteration processes the training data once completely. Default is 15.\")\nflags.DEFINE_integer(\"min_count\", 5, \"The minimum number of word occurrences for it to be included in the vocabulary. Default is 5.\")\nflags.DEFINE_integer(\"sentence_length\", 20, \"The length of one sentence.\")\nflags.DEFINE_integer(\"max_sense_per_word\", 5, \"The maximum number of one word.\")\nflags.DEFINE_integer(\"batch_size\", 50, \"Number of training examples processed per step (size of a minibatch).\")\nflags.DEFINE_float(\"alpha\", 0.001, \"Initial learning rate. Default is 0.001.\")\nflags.DEFINE_float(\"margin\", 100, \"Margin between positive and negative training pairs. Default is 100.\")\n# flags.DEFINE_boolean(\"gpu\", False, \"If true, use GPU instead of CPU.\")\nflags.DEFINE_string(\"energy\", \"EL\", \"What energy function is used, default is EL.\")\n\nFLAGS = flags.FLAGS\n\nopt.train = FLAGS.train\nopt.vocab = FLAGS.vocab\nopt.saveVocab = FLAGS.save_vocab\nopt.covarShape = FLAGS.covariance\nopt.embSize = FLAGS.size\nopt.windowSize = FLAGS.window\nopt.negative = FLAGS.negative\nopt.iter = FLAGS.iter\nopt.minCount = FLAGS.min_count\nopt.sentenceLength = FLAGS.sentence_length\nopt.maxSensePerWord = FLAGS.max_sense_per_word\nopt.batchSize = FLAGS.batch_size\nopt.alpha = FLAGS.alpha\nopt.margin = FLAGS.margin\n# opt.gpu = FLAGS.gpu\nopt.energy = FLAGS.energy\nopt.savePath = './data/' + opt.energy + '.' + time.strftime(\"%m%d%H%M\", time.localtime()) + 'w' + str(opt.windowSize) +\\\n 'b' + str(opt.batchSize) + 'lr' + str(opt.alpha) + 'm' + str(opt.margin) + 'n' + str(opt.negative) + FLAGS.output + '.pkl'\n\nvocabulary = None\ne_step = True\nm_step = True\nopt.verboseTime = False\n\ngradMin = -5\ngradMax = 5\n\ndef main(_):\n \"\"\" Train a sense2guass model. \"\"\"\n global vocabulary\n\n if not FLAGS.train: # or not FLAGS.output: # Whether the train corpus and output path is set\n print(\"--train must be specified.\")\n sys.exit(1)\n\n config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False) # Config to make tensorflow not take up all the GPU memory\n config.gpu_options.allow_growth=True\n\n with tf.Session(config=config) as sess, tf.device('CPU:0'), open('console_output.small.txt', 'w') as of:\n global_step = tf.Variable(0, trainable = False)\n learning_rate = tf.train.exponential_decay(opt.alpha, global_step,\n 3000, 0.96, staircase = True)\n # learning_rate = opt.alpha\n # Passing global_step to minimize() will increment it at each step.\n # optimizer = tf.train.AdagradOptimizer(learning_rate)\n optimizer = tf.train.AdamOptimizer(learning_rate)\n # meanOpt = tf.train.AdamOptimizer()\n # optimizer = tf.train.RMSPropOptimizer(learning_rate)\n # optimizer = tf.train.MomentumOptimizer(learning_rate, 0.9)\n # optimizer = tf.train.FtrlOptimizer(learning_rate)\n # optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n # sigmaOpt = tf.train.AdadeltaOptimizer(opt.alpha)\n\n # Build vocabulary or load vocabulary from file\n if opt.vocab != None:\n vocabulary = V()\n vocabulary.load(opt.vocab)\n else:\n vocabulary = V(opt.train)\n vocabulary.initAllSenses()\n\n if opt.saveVocab:\n if vocabulary.saveVocab(opt.saveVocab):\n print('Vocab saved at %s.' % opt.saveVocab)\n else:\n print('Vocab save FAILED!')\n\n##----------------- Build Window Loss Graph ------------------\n print('Building Window Loss Graph...')\n from graph import windowLossGraph\n windowLossGraph, window = windowLossGraph(vocabulary)\n print('Finished.')\n##----------------- Build Window Loss Graph ------------------\n\n if m_step:\n ##---------------------------- Build NCE Loss --------------------------------\n print('Building NCE Loss...')\n from graph import batchNCELossGraph\n\n lossList = batchNCELossGraph(vocabulary)\n senseIdxPlaceholder = tf.get_collection('POS_PHDR')[0]\n\n posLosses = tf.get_collection('POS_LOSS')\n negLosses = tf.get_collection('NEG_LOSS')\n\n # Margin Loss\n loss = tf.reduce_sum(lossList)\n avgLoss = loss / opt.batchSize / opt.negative / len(lossList)\n avgPosLoss = tf.reduce_sum(posLosses) / len(posLosses) / opt.batchSize\n avgNegLoss = tf.reduce_sum(negLosses) / opt.sentenceLength / opt.negative / opt.batchSize\n\n posgrad = optimizer.compute_gradients(avgPosLoss, var_list = [vocabulary.trainableMeans, vocabulary.trainableOutputMeans], gate_gradients = optimizer.GATE_NONE)[0][0]\n neggrad = optimizer.compute_gradients(avgNegLoss, var_list = [vocabulary.trainableMeans, vocabulary.trainableOutputMeans], gate_gradients = optimizer.GATE_NONE)[0][0]\n\n # Cross Entropy Loss\n # posLen = len(posLosses)\n # posLosses = tf.concat(posLosses, 0)\n # avgPosLoss = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels = tf.ones_like(posLosses), logits = posLosses)) / posLen / opt.batchSize\n # avgNegLoss = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels = tf.zeros_like(negLosses), logits = negLosses)) / opt.sentenceLength / opt.negative / opt.batchSize\n # loss = avgPosLoss + avgNegLoss\n # avgLoss = loss / 2\n\n with tf.name_scope(\"Summary\"):\n tf.summary.scalar(\"Mean Positive Grad\", tf.reduce_sum(posgrad))\n tf.summary.scalar(\"Mean Negative Grad\", tf.reduce_sum(neggrad))\n\n tf.summary.scalar(\"Positive Loss\", avgPosLoss)\n tf.summary.scalar(\"Negative Loss\", avgNegLoss)\n # tf.summary.scalar(\"Num of Nonzero Loss\", nonzeroNum)\n tf.summary.scalar(\"NCE Loss\", avgLoss)\n avg_mean_norm, mean_var = tf.nn.moments(tf.norm(vocabulary.means, axis = 1), axes = [0])\n avg_cov_norm, cov_var = tf.nn.moments(tf.norm(vocabulary.sigmas, axis = 1), axes = [0])\n tf.summary.scalar(\"Average Mean Norm\", avg_mean_norm)\n tf.summary.scalar(\"Mean Norm Variance\", mean_var)\n\n if opt.energy == 'EL':\n tf.summary.scalar(\"Average Sigma Norm\", avg_cov_norm)\n tf.summary.scalar(\"Sigma Norm Variance\", cov_var)\n posleng = len(tf.get_collection('POS_LOG_EL_FIRST'))\n possecleng = len(tf.get_collection('POS_LOG_EL_SECOND'))\n negleng = len(tf.get_collection('NEG_LOG_EL_FIRST'))\n negsecleng = len(tf.get_collection('NEG_LOG_EL_SECOND'))\n tf.summary.scalar(\"POS Log EL First\", tf.reduce_sum(tf.add_n(tf.get_collection('POS_LOG_EL_FIRST'))) / opt.batchSize / posleng)\n tf.summary.scalar(\"POS Log EL Second\", tf.reduce_sum(tf.add_n(tf.get_collection('POS_LOG_EL_SECOND'))) / opt.batchSize / possecleng)\n tf.summary.scalar(\"NEG Log EL First\", tf.reduce_sum(tf.add_n(tf.get_collection('NEG_LOG_EL_FIRST'))) / opt.batchSize / opt.sentenceLength / opt.negative / negleng)\n tf.summary.scalar(\"NEG Log EL Second\", tf.reduce_sum(tf.add_n(tf.get_collection('NEG_LOG_EL_SECOND'))) / opt.batchSize / opt.sentenceLength / opt.negative / negsecleng)\n summary_op = tf.summary.merge_all()\n summary_writer = tf.summary.FileWriter('log' + opt.energy + '/' + time.strftime(\"%m%d\", time.localtime()) + '/' + time.strftime(\"%H:%M\", time.localtime()) +\n '_w' + str(opt.windowSize) + 'b' + str(opt.batchSize) + 'lr' + str(opt.alpha) + '' + str(opt.margin) +\\\n 'n' + str(opt.negative) + 'adam', graph = sess.graph)\n print('Finished.')\n regular = 0 # -tf.norm(vocabulary.sigmas, ord = 'euclidean') if opt.covarShape != 'none' else 0\n obj = loss\n\n print('Building Optimizer...')\n\n if opt.energy == 'IP':\n grad = optimizer.compute_gradients(obj, var_list = [vocabulary.means, vocabulary.outputMeans], gate_gradients = optimizer.GATE_NONE)\n elif opt.energy == 'EL':\n grad = optimizer.compute_gradients(obj, var_list = [vocabulary.trainableSigmas, vocabulary.trainableOutputSigmas], gate_gradients = optimizer.GATE_NONE)\n elif opt.energy == 'MSE':\n grad = optimizer.compute_gradients(obj, var_list = [vocabulary.trainableMeans, vocabulary.trainableOutputMeans], gate_gradients = optimizer.GATE_NONE)\n else:\n grad = optimizer.compute_gradients(obj, gate_gradients = optimizer.GATE_NONE)\n clipedGrad = grad\n # clipedGrad = [(tf.clip_by_value(g, gradMin, gradMax), var) for g, var in grad]\n # # clipedGrad = [\n # # (tf.multiply(g, opt.gradConstraint, name = 'Scaled_Cov_Grad'), var) if 'igmas' in var.name else\n # # (tf.clip_by_value(g, gradMin, gradMax, name = 'Cliped_Mean_Grad'), var) for g, var in grad] # Limit covariance gradients\n #\n op = optimizer.apply_gradients(clipedGrad, global_step = global_step)\n # tf.nn.sigmoid_cross_entropy_with_logits()\n # op = optimizer.minimize(loss + regular)\n # # op = optimizer(avgBatchStcLoss)\n print('Finished.')\n ##---------------------------- Build NCE Loss --------------------------------\n\n tf.global_variables_initializer().run(session=sess)\n # Train iteration\n print('Start training...\\n')\n\n from e_step.cinference import batchDPInference\n # from e_step.inference import batchDPInference as pyinference\n # from multiprocessing import Pool\n # pool = Pool()\n\n for i in range(opt.iter):\n if os.path.isfile(opt.train):\n with open(opt.train) as f:\n # negativeSamplesList = []\n batchStcW = []\n\n try:\n if opt.verboseTime:\n wT = 0\n iT = 0\n cT = 0\n tIT = 0\n count = 0\n\n for stcW in fetchSentencesAsWords(f, vocabulary, 20000, opt.sentenceLength, verbose=False):\n ##----------------------------- Train Batch ------------------------------\n if len(stcW) > opt.windowSize and len(stcW) == opt.sentenceLength:\n batchStcW.append(stcW)\n\n if len(batchStcW) == opt.batchSize:\n batchLossSenseIdxList = []\n # negativeSamplesList = np.random.randint(vocabulary.totalSenseCount, size=(opt.batchSize, opt.sentenceLength, opt.negative))\n ##--------------------------------- Inference By Batch ----------------------------------\n start = time.time()\n if opt.maxSensePerWord == 1:\n for p in batchStcW:\n tmpList = []\n\n for q in p:\n tmpList.append(q.senseStart)\n\n batchLossSenseIdxList.append(tmpList)\n else:\n batchLossSenseIdxList, twT, tcT, tiT = batchDPInference(batchStcW, sess, windowLossGraph, window)\n\n # if opt.verboseTime:\n # tIT += time.time() - start\n # wT += twT\n # cT += tcT\n # iT += tiT\n # count += 1\n # print(\"\\n\\nTotal Inference Time:\", tIT / count, '\\n')\n # print(\"Get Windows Time Takes: %.2f%%\" % (wT * 100 / tIT))\n # print(\"Calculate Time Takes: %.2f%%\" % (cT * 100 / tIT))\n # print(\"DP Time Takes: %.2f%%\" % (iT * 100 / tIT))\n # print(\"Data Transfer Time Takes: %.2f%%\" % ((tIT - iT - cT - wT) * 100 / tIT))\n pass\n ##--------------------------------- Inference By Batch ----------------------------------\n\n if m_step:\n # summ, posloss = sess.run([merged, reduceAvgLoss], feed_dict={senseIdxPlaceholder: batchLossSenseIdxList})\n # writer.add_summary(summ, i)\n start = time.time()\n posloss = sess.run(avgPosLoss, feed_dict={senseIdxPlaceholder: batchLossSenseIdxList})\n negloss = sess.run(avgNegLoss, feed_dict={senseIdxPlaceholder: batchLossSenseIdxList})\n nceloss = sess.run(avgLoss, feed_dict={senseIdxPlaceholder: batchLossSenseIdxList})\n # negloss = sess.run(avgNegLoss, feed_dict={senseIdxPlaceholder: batchLossSenseIdxList, negSamples: negativeSamplesList})\n # nceloss = sess.run(loss, feed_dict={senseIdxPlaceholder: batchLossSenseIdxList, negSamples: negativeSamplesList})\n # print(sess.run(grad, feed_dict={senseIdxPlaceholder: batchLossSenseIdxList, negSamples: negativeSamplesList}))\n\n if isinstance(learning_rate, float):\n lr = learning_rate\n #print(global_step.eval(), learning_rate)\n else:\n lr = learning_rate.eval()\n #print(global_step.eval(), learning_rate.eval())\n\n if global_step.eval() % 10 == 1:\n of.write('Iter:%d/%d, Step:%d, Lr:%.5f POSLoss:%.5f, NEGLoss:%.5f, NCELoss:%.5f, Progress:%.2f%%.\\n' % (i + 1, opt.iter, global_step.eval(), lr, posloss, negloss, nceloss, (float(f.tell()) * 100 / os.path.getsize(opt.train))))\n\n print(grad)\n for g, var in grad:\n if isinstance(g, tf.Tensor):\n l = []\n il = []\n covg = g.eval(feed_dict={senseIdxPlaceholder: batchLossSenseIdxList})\n for idx, tmpi in enumerate(covg):\n if np.sum(tmpi) != 0:\n l.append(tmpi)\n il.append(idx)\n of.write('Gradients ')\n of.write(repr(l))\n of.write('\\n')\n of.write(var.name)\n of.write(' ')\n of.write(repr(tf.gather(var, il).eval(feed_dict={senseIdxPlaceholder: batchLossSenseIdxList})))\n of.write('\\n')\n # print('Gradients', l)\n # print(var.name, tf.gather(var, il).eval(feed_dict={senseIdxPlaceholder: batchLossSenseIdxList}))\n # print('Gradients', tf.count_nonzero(g).eval(feed_dict={senseIdxPlaceholder: batchLossSenseIdxList}))\n else:\n of.write('Index: ' + repr(g.indices.eval(feed_dict={senseIdxPlaceholder: batchLossSenseIdxList})) +\n '\\nGradients: ' + repr(g.values.eval(feed_dict = {senseIdxPlaceholder: batchLossSenseIdxList})) +\n '\\n' + var.name + ' ' + repr(tf.gather(var, g.indices).eval(feed_dict={senseIdxPlaceholder: batchLossSenseIdxList})) +\n '\\n'\n )\n\n # print('\\nIndex:', g.indices.eval(feed_dict={senseIdxPlaceholder: batchLossSenseIdxList}))\n # print('Gradients:', g.values.eval(feed_dict={senseIdxPlaceholder: batchLossSenseIdxList}))\n # # print('Unknown:', g.dense_shape.eval(feed_dict={senseIdxPlaceholder: batchLossSenseIdxList}))\n # print(var.name, tf.gather(var, g.indices).eval(feed_dict={senseIdxPlaceholder: batchLossSenseIdxList}))\n\n sys.stdout.write('\\rIter:%d/%d, Step:%d, Lr:%.5f POSLoss:%.5f, NEGLoss:%.5f, NCELoss:%.5f, Progress:%.2f%%.' % (i + 1, opt.iter, global_step.eval(), lr, posloss, negloss, nceloss, (float(f.tell()) * 100 / os.path.getsize(opt.train))))\n sys.stdout.flush()\n\n # print(sess.run(grad, feed_dict ={senseIdxPlaceholder: batchLossSenseIdxList, negSamples: negativeSamplesList})[0])\n\n summary_writer.add_summary(summary_op.eval(feed_dict={senseIdxPlaceholder: batchLossSenseIdxList}), global_step.eval())\n # summary_writer.add_summary(summary_op.eval(feed_dict={senseIdxPlaceholder: batchLossSenseIdxList, negSamples: negativeSamplesList}), global_step.eval())\n # print(\"Cal Loss Time:\", time.time() - start)\n\n # if posloss < 0:\n # print(sess.run(l, feed_dict={senseIdxPlaceholder: batchLossSenseIdxList}))\n\n # if posloss > 1000:\n # print('')\n # print(\"ASSIGN:\", batchLossSenseIdxList)\n # energys = sess.run(l, feed_dict = {senseIdxPlaceholder: batchLossSenseIdxList, mid: batchLossSenseIdxList, negSamples: negativeSamplesList})\n # print(\"ENERGYS:\", energys)\n # print(\"VARLS:\", sess.run(varl, feed_dict={senseIdxPlaceholder: batchLossSenseIdxList}))\n #\n # for ind in range(len(energys)):\n # if energys[ind] > 1000:\n # pair = sess.run(varl[i], feed_dict={senseIdxPlaceholder: batchLossSenseIdxList, mid: batchLossSenseIdxList, negSamples: negativeSamplesList})\n #\n # mm = tf.nn.embedding_lookup(vocabulary.means, pair[0])\n # sigm = tf.nn.embedding_lookup(vocabulary.sigmas, pair[0])\n # moth = tf.nn.embedding_lookup(vocabulary.outputMeans, pair[1])\n # sigoth = tf.nn.embedding_lookup(vocabulary.outputSigmas, pair[1])\n #\n # m = mm - moth\n # sig = sigm + sigoth\n #\n # from utils.distance import diagEL\n #\n # print(\"ENERGY:\", energys[ind])\n # print(\"ENERGY REAL:\", sess.run(diagEL(mm, sigm, moth, sigoth)))\n # print(\"TRACE VALUE:\", sess.run(tf.log(tf.reduce_prod(sig, 1))))\n # print(\"SQUARE VALUE:\", sess.run(tf.reduce_sum(tf.square(m) * tf.reciprocal(sig), 1)))\n # print(\"SQUARE SUM:\", sess.run(tf.reduce_sum(tf.square(m), 1)))\n # print(\"SIGMA:\", sess.run(sig))\n # print(\"MEAN:\", sess.run(m))\n\n # start = time.time()\n for _ in range(1):\n sess.run(op, feed_dict={senseIdxPlaceholder: batchLossSenseIdxList})\n # sess.run([mop, sop], feed_dict={senseIdxPlaceholder: batchLossSenseIdxList})\n # sess.run(op, feed_dict={senseIdxPlaceholder: batchLossSenseIdxList, negSamples: negativeSamplesList})\n if opt.verboseTime:\n print('OP Time:', time.time() - start)\n\n # print(batchStcW)\n # print(\"Input Embedding\", vocabulary.means[vocabulary.getWord('without').senseStart].eval())\n # print(\"Input Embedding\", vocabulary.sigmas[vocabulary.getWord('without').senseStart].eval())\n # print(\"Output Embedding\", vocabulary.outputMeans[vocabulary.getWord('without').senseStart].eval())\n # print(\"Output Embedding\", vocabulary.outputSigmas[vocabulary.getWord('without').senseStart].eval())\n # print(\"Gradient:\", vocabulary.getWord('without').senseStart in sess.run(tf.gradients(avgBatchStcLoss, vocabulary.means), feed_dict={senseIdxPlaceholder: batchLossSenseIdxList})[0].indices)\n # gr = sess.run(grad, feed_dict={senseIdxPlaceholder: batchLossSenseIdxList})\n # print(gr)\n # gr[67320]\n # fi.write(str(batchStcW))\n # fi.write('\\n')\n # fi.write(str(batchLossSenseIdxList))\n # fi.write('\\n')\n # fi.write(str(list(gr[0][0].values)).replace('\\n', ''))\n # fi.write('\\n')\n # fi.write(str(list(gr[0][0].indices)))\n # fi.write('\\n')\n # fi.write('\\n')\n # print('OK')\n\n batchStcW = []\n # negativeSamplesList = []\n ##----------------------------- Train Batch ------------------------------\n except KeyboardInterrupt:\n print(\"Canceled by user, save data?(y/N)\")\n ans = input()\n if ans == 'y':\n vocabulary.saveVocabWithEmbeddings(opt.savePath, sess)\n return\n\n # print('is', vocabulary.getWord('is').senseCount, vocabulary.getWord('is').senseNum)\n # print('english', vocabulary.getWord('english').senseCount, vocabulary.getWord('english').senseNum)\n # print('latin', vocabulary.getWord('latin').senseCount, vocabulary.getWord('latin').senseNum)\n # print('victoria', vocabulary.getWord('victoria').senseCount, vocabulary.getWord('victoria').senseNum)\n # print('a', vocabulary.getWord('a').senseCount, vocabulary.getWord('a').senseNum)\n #\n\n # aftMeans = vocabulary.means.eval()\n # aftSigmas = vocabulary.sigmas.eval()\n #\n # import pickle as pk\n #\n # with open('iter' + str(i) + '.pkl', 'w') as f:\n # pk.dump({'orgMeans': orgMeans, 'orgSigmas': orgSigmas, 'aftMeans': aftMeans, 'aftSigmas': aftSigmas}, f)\n\n vocabulary.saveVocabWithEmbeddings(opt.savePath, sess)\n else:\n raise Exception(opt.train)\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n","repo_name":"killa1218/sense2guass","sub_path":"sense2gauss.py","file_name":"sense2gauss.py","file_ext":"py","file_size_in_byte":26715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4884727364","text":"SERVER_ADDRESS = \"127.0.1.1\"\nSERVER_PORT = 5555\n\nWIDTH = 500\nHEIGHT = 500\n\nPLAYER_RADIUS = 25\nPLAYER_SPEED = 3\n\nPROJECTILE_RADIUS = 7\nPROJECTILE_MAX_RANGE = 200\nPROJECTILE_SPEED = 5\nPROJECTILE_DAMAGE = 10\n","repo_name":"Pkepowicz/shooter-servers-and-security","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15381750360","text":"\"\"\"This module contains my implementation of an AVL Tree.\"\"\"\n\n\nclass AVLTreeNode:\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n self.balance_factor = 0\n self.height = 0\n\n\nclass AVLTree:\n def __init__(self):\n self.root = None\n\n def insert(self, data):\n if data is None:\n raise ValueError(\"Node value cannot be None\")\n self.root = self._insert(self.root, data)\n\n def _insert(self, current_node, data):\n if not current_node:\n return AVLTreeNode(data)\n if data < current_node.data:\n current_node.left = self._insert(current_node.left, data)\n else:\n current_node.right = self._insert(current_node.right, data)\n self._update_balance_factor_and_height(current_node)\n return self._rebalance(current_node)\n\n def delete(self, data):\n if data is None:\n raise ValueError(\"Node value cannot be None\")\n self.root = self._delete(self.root, data)\n\n def _delete(self, current_node, data):\n if not current_node:\n return current_node\n if data < current_node.data:\n current_node.left = self._delete(current_node.left, data)\n elif data > current_node.data:\n current_node.right = self._delete(current_node.right, data)\n else:\n if not current_node.left and not current_node.right:\n current_node = None\n elif not current_node.left:\n current_node = current_node.right\n elif not current_node.right:\n current_node = current_node.left\n else:\n # small optimization to prevent unecessary rebalancing\n if current_node.left.height > current_node.right.height:\n max_node = self._find_max(current_node.left)\n current_node.data = max_node.data\n current_node.left = self._delete(\n current_node.left, current_node.data\n )\n else:\n min_node = self._find_min(current_node.right)\n current_node.data = min_node.data\n current_node.right = self._delete(\n current_node.right, current_node.data\n )\n\n self._update_balance_factor_and_height(current_node)\n return self._rebalance(current_node)\n\n def _find_max(self, current_node):\n if not current_node:\n return None\n if not current_node.right:\n return current_node\n return self._find_max(current_node.right)\n\n def _find_min(self, current_node):\n if not current_node:\n return None\n if not current_node.left:\n return current_node\n return self._find_min(current_node.left)\n\n def _update_balance_factor_and_height(self, current_node):\n if not current_node:\n return\n left_subtree_height = -1\n right_subtree_height = -1\n if current_node.left:\n left_subtree_height = current_node.left.height\n if current_node.right:\n right_subtree_height = current_node.right.height\n current_node.height = max(left_subtree_height, right_subtree_height) + 1\n current_node.balance_factor = right_subtree_height - left_subtree_height\n\n def _rebalance(self, node):\n if not node:\n return None\n new_node = node\n # right heavy\n if node.balance_factor == 2:\n if node.right.balance_factor < 0: # zig-zag, need two rotations\n node.right = self._rotate_right(node.right)\n new_node = self._rotate_left(node)\n # left heavy\n elif node.balance_factor == -2:\n if node.left.balance_factor > 0: # zig-zag, need two rotations\n node.left = self._rotate_left(node.left)\n new_node = self._rotate_right(node)\n return new_node\n\n def _rotate_left(self, node):\n right_child = node.right\n node.right = right_child.left\n right_child.left = node\n self._update_balance_factor_and_height(node)\n self._update_balance_factor_and_height(right_child)\n return right_child\n\n def _rotate_right(self, node):\n left_child = node.left\n node.left = left_child.right\n left_child.right = node\n self._update_balance_factor_and_height(node)\n self._update_balance_factor_and_height(left_child)\n return left_child\n\n def __str__(self):\n node_values = []\n\n def preorder_traversal(root):\n if not root:\n node_values.append(\"#\")\n else:\n node_values.append(str(root.data))\n preorder_traversal(root.left)\n preorder_traversal(root.right)\n\n preorder_traversal(self.root)\n return \"\".join(node_values)\n\n\n# Small informal test. Will make an official test later\nbalanced = True\n\n\ndef is_balanced(root):\n global balanced\n if not root:\n return -1\n left_subtree_height = is_balanced(root.left)\n right_subtree_height = is_balanced(root.right)\n balanced = abs(right_subtree_height - left_subtree_height) <= 1\n return max(left_subtree_height, right_subtree_height) + 1\n\n\n# test insertions\navl_tree = AVLTree()\nfor value in range(5):\n print(value)\n avl_tree.insert(value)\nis_balanced(avl_tree.root)\nprint(balanced)\nprint(avl_tree, \"\\n\")\n\n\n# test deletions\navl_tree.delete(1)\nis_balanced(avl_tree.root)\nprint(balanced)\nprint(avl_tree, \"\\n\")\n\n# second deletion\navl_tree.delete(3)\nis_balanced(avl_tree.root)\nprint(balanced)\nprint(avl_tree, \"\\n\")\n\n# not in tree\navl_tree.delete(15)\nis_balanced(avl_tree.root)\nprint(balanced)\nprint(avl_tree, \"\\n\")\n","repo_name":"EricMontague/Datastructures-and-Algorithms","sub_path":"data_structures/balanced_trees/avl_tree.py","file_name":"avl_tree.py","file_ext":"py","file_size_in_byte":5823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70142661064","text":"\n#from shutil import which\nimport os\nimport shutil\n\n\ndef is_tool(name):\n \"\"\"Check whether `name` is on PATH and marked as executable.\"\"\"\n\n # from whichcraft import which\n\n return shutil.which(name) is not None\n\n\ndef check_output_dir(\n dir,\n overwrite = False):\n\n #check if output folder exists, and whether to overwrite\n if os.path.exists(dir) and overwrite == False:\n raise ValueError(\"{dir} exists and overwrite not set\".format(dir = dir))\n elif os.path.exists(dir) and overwrite == True:\n shutil.rmtree(dir)\n os.makedirs(dir)\n else: # make output directory\n os.makedirs(dir)\n","repo_name":"djw533/hamburger","sub_path":"hamburger/tool_check.py","file_name":"tool_check.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"81"} +{"seq_id":"31284742530","text":"import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport time\nimport pandas as pd\n\nlines = open('./input.txt', 'r').read()\n\nvocab = sorted(list(set(lines)))\nitos = {i:ch for i, ch in enumerate(vocab)}\nstoi = {ch:i for i, ch in enumerate(vocab)}\n\nMASTER_CONFIG = {\n \"vocab_size\": len(vocab)\n}\n\n# simple tokenization by characters\ndef encode(s):\n return [stoi[ch] for ch in s]\n\ndef decode(l):\n return ''.join([itos[i] for i in l])\n\ndataset = torch.tensor(encode(lines), dtype=torch.int8)\n# print(dataset.shape)\n\n# Method to generate training data and labels for batches\ndef get_batches(data, split, batch_size, context_window, config=MASTER_CONFIG):\n train = data[:int(.8 * len(data))]\n val = data[int(.8 * len(data)): int(.9 * len(data))] # validation\n test = data[int(.9 * len(data))]\n \n batch_data = train\n if split == \"val\":\n batch_data = val\n \n if split == \"test\":\n batch_data = test\n \n # pick random starting points\n ix = torch.randint(0, batch_data.size(0) - context_window - 1, (batch_size,))\n x = torch.stack([batch_data[i:i+context_window] for i in ix]).long()\n y = torch.stack([batch_data[i+1:i+context_window+1] for i in ix]).long()\n return x, y\n\nMASTER_CONFIG.update({\n 'batch_size': 8,\n 'context_window': 16\n})\n\nxs, ys = get_batches(dataset, 'train', MASTER_CONFIG['batch_size'], MASTER_CONFIG['context_window'])\n\nprint([(decode(xs[i].tolist()), decode(ys[i].tolist())) for i in range(len(xs))])","repo_name":"itsrainingmani/tinyllama","sub_path":"llama.py","file_name":"llama.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9002864676","text":"import cvzone\nimport cv2\n\n# Initialize the FPS class with an average count of 30 frames for smoothing\nfpsReader = cvzone.FPS(avgCount=30)\n\n# Initialize the webcam and set it to capture\ncap = cv2.VideoCapture(0)\ncap.set(cv2.CAP_PROP_FPS, 30) # Set the frames per second to 30\n\n# Main loop to capture frames and display FPS\nwhile True:\n # Read a frame from the webcam\n success, img = cap.read()\n\n # Update the FPS counter and draw the FPS on the image\n # fpsReader.update returns the current FPS and the updated image\n fps, img = fpsReader.update(img, pos=(20, 50),\n bgColor=(255, 0, 255), textColor=(255, 255, 255),\n scale=3, thickness=3)\n\n # Display the image with the FPS counter\n cv2.imshow(\"Image\", img)\n\n # Wait for 1 ms to show this frame, then continue to the next frame\n cv2.waitKey(1)","repo_name":"cvzone/cvzone","sub_path":"Examples/FpsExample.py","file_name":"FpsExample.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","stars":888,"dataset":"github-code","pt":"81"} +{"seq_id":"33500430883","text":"\n#!/usr/local/bin/python3.8\nimport random\nfrom random import randint\nList1=[]\ncount=0\nf=open(\"file.txt\",\"r\") \nlines=f.readlines()\nfor i in lines:\n List1.append(i)\nnum=len(lines)\nran=randint(0,num-1)\nword1=List1[ran]\ny=len(word1)\nz=randint(0,y-1)\ncon=list(word1)\nlet=con[z]\ncon[z] = \"_\"\nword2=' '.join([str(elem) for elem in con])\nprint (word2)\n\nturn=0\nwhile turn <= 2:\n guelet=input(\"Please guess the lettr: \")\n if guelet == let:\n x=0\n break\n else:\n turn=turn+1\n x=1\n\nif x == 0:\n print (\"You won\")\nelif x == 1:\n print (\"You loss\")\n\n","repo_name":"LavanyaYetham/Repo","sub_path":"Python_Scripts/Hangman2.py","file_name":"Hangman2.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39040698396","text":"import chromosome\nimport random\nimport copy\nfrom matplotlib import pyplot as plt\nfrom matplotlib import style\nfrom CSV import solutions_to_csv\n\n\nclass nsga2():\n\n def __init__(self, seed, nombrePop, iterationNumber):\n\n self.seed = seed\n self.popSize = nombrePop\n self.iterationNumber = iterationNumber\n self.population = list()\n self.front = list()\n\n def init_population(self):\n \"\"\"\" Init random population solutions with size popSize \"\"\"\n\n random.seed(self.seed)\n\n for _ in range(self.popSize):\n solution = chromosome.tour(random.randint(0, 1000))\n self.population.append(solution)\n\n def non_dominated_sorting(self):\n \"\"\" Sort solutions saved in the front according to the principle of dominance \"\"\"\n\n frontIndex = 0\n\n # Pour tous les elements deux a deux, on regarde qui domine qui\n\n for i in range(len(self.front[frontIndex])):\n for j in range(i + 1, len(self.front[frontIndex])):\n\n firstChallenger = self.front[frontIndex][i]\n secondChallenger = self.front[frontIndex][j]\n\n if (firstChallenger.dominates(secondChallenger)):\n firstChallenger.addDominated(secondChallenger)\n secondChallenger.incrementDominationCount()\n\n elif (secondChallenger.dominates(firstChallenger)):\n secondChallenger.addDominated(firstChallenger)\n firstChallenger.incrementDominationCount()\n\n # On a donc chaque \"tour\" qui a dominationCount (nombre de solutions qui le dominent)\n # et dominatedList : la liste des solutions qu'il domine\n\n # liste qui va garder les solutions a deplacer au rank suivant (car dominationCount != 0)\n\n toMove = list()\n\n while frontIndex < 3:\n\n newFront = list()\n\n # Debut deplacement solution\n\n for solution in list(self.front[frontIndex]):\n\n if solution.getDominationCount() != 0:\n\n newFront.append(solution) # ajout dans le nouveau front\n # on le retire du front precedent\n self.front[frontIndex].remove(solution)\n\n else:\n\n # On garde la solution dans le front courant\n # On regarde dans le front en frontIndex pour voir les villes que ses solutions dominent\n # par exemple si la solution non dominee A domine la solution B,C\n # on va decrementer B,C de 1 au niveau du dominationCount\n\n listeDomines = solution.getDominatedList()\n\n for domines in listeDomines:\n domines.decrementDominationCount()\n\n # on rajoute le front dans self.front et on reitere\n self.front.append(newFront)\n frontIndex += 1\n toMove.clear()\n\n print(\"Fin dominated sorting:\")\n k = 0\n for rank in self.front:\n k += 1\n print(\"Rank {} :\".format(k))\n for solution in rank:\n tot_dist, tot_risk = solution.get_total_dist()\n print(\"Solution avec tot dist {} et tot risk {}\".format(\n tot_dist, tot_risk))\n\n def createOffsprings(self):\n \"\"\" Create offsprings through crossover and population solutions \"\"\"\n\n self.front.clear()\n\n firstFront = copy.deepcopy(self.population)\n self.front.append(firstFront)\n\n for i in range(len(self.population)):\n for j in range(i + 1, len(self.population)):\n\n newSolution = copy.deepcopy(self.population[i])\n toCross = copy.deepcopy(self.population[j])\n\n newSolution.crossover_type_1(toCross)\n\n if self.findDuplicate(newSolution, self.front[0]):\n print(\"Impossible, duplicate found\")\n\n else:\n print(\"Solution ajoutee au front [0]\")\n self.front[0].append(newSolution)\n\n if len(self.front[0]) >= self.popSize:\n break\n else:\n continue\n\n if len(self.front[0]) >= self.popSize:\n break\n\n def findDuplicate(self, solution, front):\n \"\"\" return True if solution already in front \"\"\"\n\n duplicate = False\n\n dist, risk = solution.get_total_dist()\n\n for sol in front:\n sol_dist, sol_risk = sol.get_total_dist()\n\n if dist == sol_dist and risk == sol_risk:\n duplicate = True\n\n return duplicate\n\n def combineAndSort(self):\n \"\"\"Combine population and front and use the non dominated sorting \"\"\"\n\n for pop in self.population:\n self.front[0].append(copy.deepcopy(pop))\n\n self.non_dominated_sorting()\n\n def choosePopulation(self):\n \"\"\"Choose next generation population (no duplicate) \"\"\"\n\n self.population.clear()\n\n for rank in self.front:\n\n for solution in rank:\n # check if the current solution is already in population\n if not self.findDuplicate(solution, self.population):\n if len(self.population) < self.popSize:\n self.population.append(solution)\n else:\n return None\n\n def showResultPareto(self):\n \"\"\" print pareto frontier results \"\"\"\n\n print(\"End result :\")\n\n k = 0\n risk_p = []\n pareto = []\n dist_p = []\n text_file = open(str(self.popSize)+\"_\" +\n str(random.randint(0, 100))+\".txt\", \"w\")\n\n for rank in self.front:\n k += 1\n\n if k != 1:\n print(\"Rank {} non parreto :\".format(k))\n else:\n print(\"Rank Parreto!\")\n\n risk_p.append([])\n dist_p.append([])\n for solution in rank:\n tot_dist, tot_risk = solution.get_total_dist()\n if tot_dist not in dist_p[k-1] or tot_risk not in risk_p[k-1]: # no duplicates\n if k == 1:\n pareto.append(solution)\n text_file.write(str(solution)+\"\\n\")\n risk_p[0].append(tot_risk)\n dist_p[0].append(tot_dist)\n else:\n risk_p[k-1].append(tot_risk)\n dist_p[k-1].append(tot_dist)\n print(\"Solution avec tot dist {} et tot risk {}\".format(\n tot_dist, tot_risk))\n text_file.close()\n self.createPlot(risk_p, dist_p, pareto)\n\n def createPlot(self, risk_p, dist_p, pareto):\n \"\"\"Create and show a plot of all pareto solutions and other fronts solutions\"\"\"\n solutions_to_csv(pareto, \"NSGAII\")\n\n # Make a graph with the soltions\n style.use('ggplot')\n plt.subplots(1)\n # Pareto optimum colored in blue\n plt.scatter(risk_p[0], dist_p[0], c=\"blue\")\n # Front 2 colored in yellow\n plt.scatter(risk_p[1], dist_p[1], c=\"yellow\")\n # Front 3 colored in green\n plt.scatter(risk_p[2], dist_p[2], c=\"green\")\n plt.scatter(risk_p[3], dist_p[3], c=\"red\") # Front 4 colored in red\n plt.title('NSGA II')\n plt.ylabel('Total Distance (m)')\n plt.xlabel('Risk')\n plt.autoscale(enable=True, axis='both')\n plt.savefig(str(self.popSize)+\"_\"+str(random.randint(0, 100))+\".png\")\n plt.show()\n\n def execute(self):\n \"\"\"Execute all the functions to handle NSGA II algorithm\"\"\"\n self.init_population()\n\n i = 0\n while (i < self.iterationNumber):\n\n self.createOffsprings()\n self.combineAndSort()\n self.choosePopulation()\n\n i += 1\n\n self.showResultPareto()\n","repo_name":"Phucyro/algorithm-project","sub_path":"nsga2.py","file_name":"nsga2.py","file_ext":"py","file_size_in_byte":7906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20446458238","text":"import argparse\nimport gc\nimport gcsfs\nimport nibabel as nib\nimport nilearn\nimport nobrainer\nimport numpy as np\nimport os\nimport os.path as op\nimport pandas as pd\nimport tensorflow as tf\n\n\ndef interpolate_images(baseline, image, alphas):\n alphas_x = alphas[:, tf.newaxis, tf.newaxis, tf.newaxis, tf.newaxis]\n baseline_x = tf.expand_dims(baseline, axis=0)\n input_x = tf.expand_dims(image, axis=0)\n delta = input_x - baseline_x\n images = baseline_x + alphas_x * delta\n return images\n\n\ndef compute_gradients(model, images, target_class):\n with tf.GradientTape() as tape:\n tape.watch(images)\n raw_probs = model(images)\n probs = (1 - raw_probs) * (1 - target_class) + raw_probs * target_class\n\n gradients = tape.gradient(probs, images)\n return gradients\n\n\ndef integral_approximation(gradients):\n # riemann_trapezoidal\n grads = (gradients[:-1] + gradients[1:]) / tf.constant(2.0)\n return tf.math.reduce_mean(grads, axis=0)\n\n\n@tf.function\ndef integrated_gradients(\n model, baseline, image, target_class, m_steps=50, batch_size=32\n):\n # 1. Generate alphas.\n alphas = tf.linspace(start=0.0, stop=1.0, num=m_steps + 1)\n\n # Initialize TensorArray outside loop to collect gradients.\n gradient_batches = tf.TensorArray(tf.float32, size=m_steps + 1)\n\n # Iterate alphas range and batch computation for speed, memory efficiency, and scaling to larger m_steps.\n for alpha in tf.range(0, len(alphas), batch_size):\n from_ = alpha\n to = tf.minimum(from_ + batch_size, len(alphas))\n alpha_batch = alphas[from_:to]\n\n # 2. Generate interpolated inputs between baseline and input.\n interpolated_path_input_batch = interpolate_images(\n baseline=baseline, image=image, alphas=alpha_batch\n )\n\n # 3. Compute gradients between model outputs and interpolated inputs.\n gradient_batch = compute_gradients(\n model=model,\n images=interpolated_path_input_batch,\n target_class=target_class,\n )\n\n # Write batch indices and gradients to extend TensorArray.\n gradient_batches = gradient_batches.scatter(tf.range(from_, to), gradient_batch)\n\n # Stack path gradients together row-wise into single tensor.\n total_gradients = gradient_batches.stack()\n\n # 4. Integral approximation through averaging gradients.\n avg_gradients = integral_approximation(gradients=total_gradients)\n\n # 5. Scale integrated gradients with respect to input.\n return (image - baseline) * avg_gradients\n\n\ndef main(\n gcs_bucket,\n n_channels=5,\n dataset_name=\"b0-tensorfa-dwiqc\",\n model_dir=\"b0_tensorfa_dwiqc\",\n dataset_seed=8,\n target_class=1,\n confusion_class=\"true_pos\",\n):\n print(\"Setting gpu thread mode to gpu_private.\")\n os.environ[\"TF_GPU_THREAD_MODE\"] = \"gpu_private\"\n\n print(\"Configuring distribution strategy\")\n use_tpu = False\n\n try:\n resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=\"\")\n tf.config.experimental_connect_to_cluster(resolver)\n # This is the TPU initialization code that has to be at the beginning.\n tf.tpu.experimental.initialize_tpu_system(resolver)\n strategy = tf.distribute.TPUStrategy(resolver)\n\n use_tpu = True\n print(\"TPU detected.\")\n print(\"All devices: \", tf.config.list_logical_devices(\"TPU\"))\n except ValueError:\n strategy = tf.distribute.MirroredStrategy()\n print(\"GPUs detected.\")\n print(\"Number of accelerators: \", strategy.num_replicas_in_sync)\n\n # Train using mixed-precision policy\n tf.keras.mixed_precision.set_global_policy(\"mixed_float16\")\n\n scope = strategy.scope()\n\n # Setting location were training logs and checkpoints will be stored\n GCS_BASE_PATH = f\"gs://{gcs_bucket}/{model_dir}/seed_{dataset_seed}\"\n GCS_SAVED_MODEL_DIR = op.join(GCS_BASE_PATH, \"saved_model\")\n GCS_OUTPUT_DIR = op.join(GCS_BASE_PATH, \"integrated_gradients\")\n\n fs = gcsfs.GCSFileSystem()\n\n LOCAL_SAVED_MODEL_DIR = \"saved_model\"\n LOCAL_OUTPUT_DIR = \"output\"\n os.makedirs(LOCAL_SAVED_MODEL_DIR, exist_ok=True)\n os.makedirs(LOCAL_OUTPUT_DIR, exist_ok=True)\n\n fs.get(GCS_SAVED_MODEL_DIR, LOCAL_SAVED_MODEL_DIR, recursive=True)\n\n # Specify the datasets on GCP storage\n GCS_DATA_PATH = f\"gs://{gcs_bucket}\"\n GCS_ALLDATA_DIR = op.join(GCS_DATA_PATH, \"tfrecs\", dataset_name, \"all-data\")\n\n if use_tpu:\n device_alldata_dir = GCS_ALLDATA_DIR\n else:\n LOCAL_ALLDATA_DIR = op.join(\".\", \"tfrecs\", dataset_name, \"all-data\")\n os.makedirs(LOCAL_ALLDATA_DIR, exist_ok=True)\n fs.get(GCS_ALLDATA_DIR, LOCAL_ALLDATA_DIR, recursive=True)\n device_alldata_dir = LOCAL_ALLDATA_DIR\n\n volume_shape = (128, 128, 128, n_channels)\n element_spec = (\n tf.TensorSpec(shape=(), dtype=tf.int64, name=None),\n (\n tf.TensorSpec(shape=(1, 128, 128, 128, 5), dtype=tf.float32, name=None),\n tf.TensorSpec(shape=(1,), dtype=tf.float32, name=None),\n ),\n )\n\n dataset = tf.data.experimental.load(\n op.join(device_alldata_dir, confusion_class),\n element_spec=element_spec,\n )\n volumes = [tf.squeeze(tensor[0]) for _, tensor in dataset]\n baseline = tf.zeros(shape=volume_shape, dtype=tf.float32)\n\n print(\"Computing integrated gradients\")\n\n with scope:\n model = tf.keras.models.load_model(LOCAL_SAVED_MODEL_DIR)\n\n ig_attributions = [\n integrated_gradients(\n model=model,\n baseline=baseline,\n image=volume,\n target_class=target_class,\n m_steps=128,\n batch_size=1,\n )\n for volume in volumes\n ]\n\n if target_class == 1:\n postfix = \"attribution_pass\"\n else:\n postfix = \"attribution_fail\"\n\n ig_dataset = tf.data.Dataset.from_tensor_slices(tf.stack(ig_attributions))\n tf.data.experimental.save(\n ig_dataset,\n op.join(LOCAL_OUTPUT_DIR, f\"ig_{confusion_class}_{postfix}\"),\n )\n\n affine = np.diag([1, 1, 1, 1])\n volume_niftis = [\n {\n \"b0\": nib.Nifti1Image(volume[:, :, :, 3].numpy(), affine),\n \"color_fa\": nib.Nifti1Image(volume[:, :, :, :3].numpy(), affine),\n }\n for volume in volumes\n ]\n ig_niftis = [\n {\n \"b0\": nib.Nifti1Image(attribution[:, :, :, 3].numpy(), affine),\n \"color_fa\": nib.Nifti1Image(attribution[:, :, :, :3].numpy(), affine),\n \"sum\": nib.Nifti1Image(\n tf.math.reduce_sum(attribution[:, :, :, :4], axis=-1).numpy(), affine\n ),\n }\n for attribution in ig_attributions\n ]\n\n for idx, (volume_nifti, ig_nifti) in enumerate(zip(volume_niftis, ig_niftis)):\n for key, value in volume_nifti.items():\n nib.save(\n value,\n op.join(LOCAL_OUTPUT_DIR, f\"{confusion_class}_{key}_{idx}.nii.gz\"),\n )\n\n for key, value in ig_nifti.items():\n nib.save(\n value,\n op.join(\n LOCAL_OUTPUT_DIR, f\"{confusion_class}_{postfix}_{key}_{idx}.nii.gz\"\n ),\n )\n\n fs.put(LOCAL_OUTPUT_DIR, GCS_OUTPUT_DIR, recursive=True)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--gcs_bucket\",\n type=str,\n help=(\n \"The name of the gcs bucket that will contain the saved models, \"\n \"checkpoints, etc.\"\n ),\n )\n parser.add_argument(\n \"--n_channels\",\n type=int,\n help=\"The number of channels in the data.\",\n default=5,\n )\n parser.add_argument(\n \"--dataset_name\",\n type=str,\n help=\"The name of the dataset in the tfrecs folder of the GCS bucket.\",\n default=\"b0-tensorfa-dwiqc\",\n )\n parser.add_argument(\n \"--model_dir\",\n type=str,\n help=\"The name of the GCS directory in which the tensorflow model is saved.\",\n default=\"b0_tensorfa_dwiqc\",\n )\n parser.add_argument(\n \"--dataset_seed\",\n type=int,\n help=\"The seed for the dataset\",\n default=8,\n )\n parser.add_argument(\n \"--target_class\",\n type=int,\n help=\"The target class for the integrated gradients.\",\n default=1,\n )\n parser.add_argument(\n \"--confusion_class\",\n type=str,\n help=\"The confusion class for which to compute integrated gradients\",\n default=\"true_pos\",\n )\n\n args = parser.parse_args()\n\n main(\n gcs_bucket=args.gcs_bucket,\n n_channels=args.n_channels,\n dataset_name=args.dataset_name,\n model_dir=args.model_dir,\n dataset_seed=args.dataset_seed,\n target_class=args.target_class,\n confusion_class=args.confusion_class,\n )\n","repo_name":"richford/hbn-pod2-qc","sub_path":"docker/dl-integrated-gradients-gcp/ig/ig/integrated_gradients.py","file_name":"integrated_gradients.py","file_ext":"py","file_size_in_byte":8902,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"73605874185","text":"from __future__ import absolute_import, unicode_literals\n\nimport json\nimport logging\nimport os\nimport pathlib\nimport tempfile\nfrom typing import Any, Dict, List, Optional, Tuple\n\nfrom airflow.exceptions import AirflowException\nfrom airflow.hooks.S3_hook import S3Hook\nfrom airflow.models import BaseOperator\nfrom airflow.utils.decorators import apply_defaults\n\nfrom airflow_salesforce_plugin.hooks.salesforce_hook import SalesforceHook\n\n\nclass SalesforceToS3Operator(BaseOperator):\n\n \"\"\"\n Writes results of a Salesforce soql statement to a given S3 Bucket.\n \"\"\"\n\n template_fields = (\n \"soql\",\n \"soql_args\",\n \"s3_conn_id\",\n \"s3_bucket\",\n \"upload_timeout\",\n \"object_name\",\n )\n\n @apply_defaults\n def __init__(\n self,\n conn_id: str,\n soql: str = None,\n soql_args: str = None,\n s3_conn_id: str = None,\n s3_bucket: str = None,\n object_key: str = None,\n upload_timeout: int = 15,\n *args,\n **kwargs,\n ) -> None:\n \"\"\"\n Build an instance of the Salesforce -> Attachment operator.\n\n :param conn_id: The HTTP or Direct connection id for Salesforce\n :type conn_id: str\n :param soql: The SOQL statement to execute (templated)\n :type soql: str\n :param soql_args: A string of comma delimited SOQL args to use (templated)\n :type soql_args: str\n :param s3_conn_id: The S3 Connection ID (templated)\n :type s3_conn_id: str\n :param s3_bucket: The S3 Bucket ID (templated)\n :type s3_bucket: str\n :param object_key: Name of the target S3 key (templated)\n :type object_key: str\n :param upload_timeout: Upload timeout, default 15 (templated)\n :type upload_timeout: int\n \"\"\"\n\n super(SalesforceToS3Operator, self).__init__(*args, **kwargs)\n # NOTE: template fields do not get applied until call to `self.execute`\n self.conn_id = conn_id\n self.soql = soql\n self.soql_args = soql_args\n self.s3_conn_id = s3_conn_id\n self.s3_bucket = s3_bucket\n self.upload_timeout = upload_timeout\n self.object_key = object_key\n\n def execute(self, context: Dict[str, Any]) -> List[str]:\n \"\"\"\n Executes the given context to extract attachments and load them into S3.\n\n :param context: The context of the running DAG\n :type context: Dict[str, Any]\n :returns: The S3 keys of the loaded attachments\n :rtype: List[str]\n \"\"\"\n\n sf_hook = SalesforceHook(self.conn_id)\n filepath = tempfile.NamedTemporaryFile(suffix=\".csv\", delete=False)\n self.filepath = filepath.name\n\n resultfile = sf_hook.export(\n self.soql % tuple(self.soql_args.split(\",\")), self.filepath\n )\n self._upload_file(resultfile)\n\n def _upload_file(self, resultfile: str) -> str:\n s3_hook = S3Hook(self.s3_conn_id)\n record_key = self.object_key\n if s3_hook.check_for_key(record_key, bucket_name=self.s3_bucket):\n self.log.warning(\n f\"overwriting existing file for {record_key!r} in {s3_hook!r}\"\n )\n else:\n self.log.info(\n f\"creating object from {resultfile!r} at {record_key!r} in {s3_hook!r}\"\n )\n\n try:\n attachment_content = pathlib.Path(resultfile).read_bytes()\n except Exception as exc:\n raise AirflowException(f\"error occured on file from {resultfile!r}, {exc!s}\")\n try:\n s3_hook.load_bytes(attachment_content, record_key, bucket_name=self.s3_bucket)\n except Exception as exc:\n raise AirflowException(\n f\"error occured while trying to upload attachment from {resultfile!r}\"\n f\" to key {record_key!r} in {s3_hook!r}, {exc}\"\n )\n return record_key\n","repo_name":"techalchemy/airflow-salesforce-plugin","sub_path":"src/airflow_salesforce_plugin/operators/salesforce_to_s3_operator.py","file_name":"salesforce_to_s3_operator.py","file_ext":"py","file_size_in_byte":3907,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"30538106124","text":"from collections import deque\nfrom math import gcd, prod\nfrom operator import floordiv, mod\nclass Ops:\n def __init__(self, operator, value) -> None:\n self.operator = operator\n self.value = value\n \nclass Monkey:\n def __init__(self, id=0, items=None, ops= None, test=None, action=[]) -> None:\n self.id = id\n self.items = items\n self.ops = ops\n self.test = test\n self.action = action\n \n\ndef parse_input():\n monkeys = []\n monkey = None\n id = -1\n with open(\"input.txt\") as file:\n for line in file:\n line = line.strip()\n if 'Monkey' in line:\n id += 1\n monkey = Monkey(id=id)\n monkey.total = 0\n monkey.action = []\n elif 'Starting items' in line:\n currentItems = list(map(int, line.split(\":\")[1].strip().split(\",\")))\n monkey.items = deque(currentItems)\n elif 'Operation' in line:\n operation = line.split(\"=\")[1].strip()\n operator = None\n operands = None\n if \"+\" in operation:\n operator = \"+\"\n operands = operation.split(\"+\")\n else:\n operator = \"*\"\n operands = operation.split(\"*\")\n \n monkey.ops = Ops(operator, operands)\n elif \"Test\" in line:\n test = int(line.split()[-1])\n monkey.test = test\n elif \"If true\" in line:\n val = int(line.split()[-1])\n monkey.action.append(val)\n elif \"If false\" in line:\n val = int(line.split()[-1])\n monkey.action.append(val)\n elif line == \"\":\n monkeys.append(monkey)\n return monkeys\n \ndef perform_operation(sign, old, new):\n if sign == \"+\":\n return old + new\n elif sign == \"*\":\n return old * new\n\ndef solve(monkeys, T, div, _op):\n while T > 0:\n for monkey in monkeys:\n while monkey.items:\n item = monkey.items.popleft()\n monkey.total += 1\n ops = monkey.ops\n op1, op2 = ops.value\n op1 = op1.strip()\n op2 = op2.strip()\n if op1 == \"old\":\n op1 = item\n else:\n op1 = int(op1)\n if op2 == \"old\":\n op2 = item\n else:\n op2 = int(op2)\n \n currentVal = _op(perform_operation(ops.operator, op1, op2), div)\n \n if currentVal % monkey.test == 0:\n monkeys[monkey.action[0]].items.append(currentVal)\n else:\n monkeys[monkey.action[1]].items.append(currentVal)\n T -= 1\n\n tmp = []\n for monkey in monkeys:\n tmp.append(monkey.total)\n tmp.sort()\n return tmp[-1]*tmp[-2]\n \n\n\nmonkeys = parse_input()\n#print(solve(monkeys, 20, 3, floordiv)) # part 1\n\ndiv = 1\nfor m in monkeys:\n div *= m.test\n\nprint(solve(monkeys, 10000, div, mod)) # part 2\n\n","repo_name":"AjayKrP/AOC2022","sub_path":"day11/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31668578543","text":"from django.conf.urls import patterns, url\n\nfrom smartcontract import views\n\nurlpatterns = patterns('',\n url(r'^test/$', views.test_render_view, name='test'),\n url(r'^detail/(?P\\d+)/$', views.test_render_form_view,name='detail'),\n url(r'^test/pdf\\.js/(?P\\d+)/$', views.test_pdf_view),\n url(r'^index/$', views.index_view,name='index'),\n\n url(r'^download409/(?P\\d+)/$', views.download_0409_view, name='download409'),\n)\n\n","repo_name":"tianshuwei/legal-consultation-website","sub_path":"smartcontract/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3181416761","text":"import json\nimport os\nfrom concurrent.futures.thread import ThreadPoolExecutor\n\nfrom django.http import HttpResponse, JsonResponse\nfrom rest_framework import status\n\nfrom daksha.settings import REPO_NAME, BRANCH_NAME, TEST_RESULT_DB\nfrom .errors import UnsupportedFileSourceError, BadArgumentsError\nfrom .testreport_generator import *\nfrom .thread_executor import thread_executor\nfrom .utils.utils import read_yaml, read_local_yaml, get_yml_files_in_folder_local, get_yml_files_in_folder_git\nfrom .models import TestResults, GetTestResultsResponse\n\n\n# Create your views here.\ndef executor(request):\n \"\"\"\n Receives http request and starts execution of Test\n\n \"\"\"\n if request.method == 'POST':\n try:\n test_uuid = generate_test_uuid()\n os.makedirs(f\"{STORAGE_PATH}/{test_uuid}\")\n logger.info('Directory created at: ' + f\"{STORAGE_PATH}/{test_uuid}\")\n received_json_data = json.loads(request.body.decode())\n try:\n test_ymls, initial_variable_dictionary = __extract_test_data(test_uuid, received_json_data['test'])\n except BadArgumentsError as e:\n return HttpResponse(str(e), status=status.HTTP_400_BAD_REQUEST)\n pool_executor = ThreadPoolExecutor(max_workers=1)\n try:\n pool_executor.submit(thread_executor, test_ymls, initial_variable_dictionary, test_uuid,\n received_json_data['email'])\n logger.info(\"task submitted to thread pool executor\")\n except Exception as e:\n logger.error(\"Exception occurred\", e)\n response_message = \"Your Test UUID is: \" + test_uuid + \". We'll send you an email with report shortly\"\n return HttpResponse(response_message, status=status.HTTP_200_OK)\n except Exception as e:\n logger.error(\"Exception caught\", exc_info=True)\n return HttpResponse(e, status=status.HTTP_400_BAD_REQUEST)\n\n else:\n return HttpResponse(status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n\ndef testresultsretriever(request, testuuid):\n \"\"\"\n Receives POST request and returns relevant data from the database\n\n \"\"\"\n errors = []\n testresults = []\n if request.method == \"GET\":\n try:\n logger.info(f\"Fetching Test Results from database for TestUUID {testuuid}\")\n if TEST_RESULT_DB != None and TEST_RESULT_DB.lower() == \"postgres\":\n logger.info(\"Database Functionality is opted for\")\n test_results_for_uuid = (\n TestResults.objects.all().filter(TestUUID=testuuid).values()\n )\n if not test_results_for_uuid:\n logger.info(f\"No Test with TestUUID {testuuid} is present in the database\")\n errors.append(\n f\"Bad Request : No Test with the TestUUID {testuuid} \"\n )\n fetched_test_results = GetTestResultsResponse(testresults, errors)\n #Since the model GetTestResultsResponse is not serializable, we dump the contents in a string and load it in JSON format\n fetched_test_results_json_string = json.dumps(\n fetched_test_results.__dict__, default=str\n )\n fetched_test_results_json = json.loads(\n fetched_test_results_json_string\n )\n return JsonResponse(\n fetched_test_results_json, status=status.HTTP_400_BAD_REQUEST\n )\n\n if request.GET.get('testName',None) != None:\n testname = request.GET.get('testName')\n logger.info(f\"User has opted for test results of test {testname} in the TestUUID {testuuid}\")\n test_result_for_testname = (\n TestResults.objects.all()\n .filter(TestUUID=testuuid, TestName=testname)\n .values()\n )\n if test_result_for_testname:\n logger.info(f\"Fetching Test result for test name {testname} of TestUUID {testuuid}\")\n testresults.append(test_result_for_testname[0])\n else:\n logger.info(f\"No Test in the TestUUID {testuuid} is with TestName {testname} \")\n errors.append(\n f\"Bad Request : No Test in the TestUUID {testuuid} is with TestName {testname} \"\n )\n else:\n logger.info(\n f\"Since no Test names are provided in the query parameters, All Test in TestUUID {testuuid} would be returned\"\n )\n for test_result_for_uuid in test_results_for_uuid:\n testresults.append(test_result_for_uuid)\n\n if errors:\n testresults.clear()\n fetched_test_results = GetTestResultsResponse(testresults, errors)\n fetched_test_results_json_string = json.dumps(\n fetched_test_results.__dict__, default=str\n )\n fetched_test_results_json = json.loads(\n fetched_test_results_json_string\n )\n logger.error(f\"Bad Request : {fetched_test_results_json}\")\n return JsonResponse(\n fetched_test_results_json, status=status.HTTP_400_BAD_REQUEST\n )\n else:\n fetched_test_results = GetTestResultsResponse(testresults, errors)\n fetched_test_results_json_string = json.dumps(\n fetched_test_results.__dict__, default=str\n )\n fetched_test_results_json = json.loads(\n fetched_test_results_json_string\n )\n logger.info(f\"Returning data : {fetched_test_results_json}\")\n return JsonResponse(fetched_test_results_json)\n\n else:\n logger.error( \"Database Functionality is not opted for.Hence GET request can't be processed\")\n errors.append( f\"Database Functionality is not opted for.Hence GET request can't be processed\" )\n fetched_test_results = GetTestResultsResponse(testresults, errors)\n fetched_test_results_json_string = json.dumps( fetched_test_results.__dict__, default=str)\n fetched_test_results_json = json.loads( fetched_test_results_json_string )\n return JsonResponse( fetched_test_results_json, status=status.HTTP_400_BAD_REQUEST)\n except Exception as e:\n logger.error(\"Exception caught\", exc_info=True)\n errors.append(\"Exception Caught: \" + str(e))\n fetched_test_results = GetTestResultsResponse(testresults, errors)\n fetched_test_results_json_string = json.dumps( fetched_test_results.__dict__, default=str)\n fetched_test_results_json = json.loads( fetched_test_results_json_string )\n return JsonResponse( fetched_test_results_json, status=status.HTTP_400_BAD_REQUEST)\n else:\n logger.error(\"Method not allowed\")\n errors.append(\"Method not allowed\")\n fetched_test_results = GetTestResultsResponse(testresults, errors)\n fetched_test_results_json_string = json.dumps( fetched_test_results.__dict__, default=str)\n fetched_test_results_json = json.loads( fetched_test_results_json_string )\n return JsonResponse( fetched_test_results_json, status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n\ndef __extract_test_data(test_uuid, test):\n initial_variable_dictionary = {}\n if \"variables\" in test:\n initial_variable_dictionary = test['variables']\n test_ymls = []\n source_location = test['source']\n location_type = test['type']\n path = test['path']\n if \"git\" in source_location.lower():\n if \"file\" == location_type:\n files = [path]\n else:\n files = get_yml_files_in_folder_git(REPO_NAME, BRANCH_NAME, path)\n for file_path in files:\n test_yml = read_yaml(REPO_NAME, BRANCH_NAME, file_path, test_uuid)\n test_ymls.append(test_yml)\n\n elif \"local\" in source_location.lower():\n if \"file\" == location_type:\n files = [path]\n else:\n files = get_yml_files_in_folder_local(path)\n for file_path in files:\n test_yml = read_local_yaml(file_path)\n test_ymls.append(test_yml)\n\n else:\n error_message = \"source_location = %s is not supported, please use git or local\" % source_location\n logger.error(error_message)\n raise UnsupportedFileSourceError(error_message)\n\n return test_ymls, initial_variable_dictionary\n","repo_name":"mykaarma/daksha","sub_path":"engine/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8955,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"81"} +{"seq_id":"74691361226","text":"import concurrent.futures\nimport unittest\nfrom math import floor\nfrom random import randint\n\nimport connexion\n\nfrom test_base import BaseTestCase\n\n\nclass ConcurrencyTestCase(BaseTestCase):\n def setUp(self):\n app = connexion.FlaskApp(__name__)\n app.debug = True\n app.testing = True\n app.add_api(\"../api_schema.yml\")\n self.client = app.app.test_client()\n\n def get_random_event(self):\n event_response_obj = super().get_all_events()\n num_events = len(event_response_obj)\n random_event_num = randint(1, num_events-1)\n\n return event_response_obj[random_event_num]\n\n @staticmethod\n def get_random_number_tickets(maximum):\n if maximum == 1:\n return 1\n\n # minimize chance of collision in case two threads simultaenously choose the same event\n return randint(1, floor(maximum/2))\n\n # A test to simulate a burst of traffic to the API.\n # num_requests threads are spawned simultaneously and each thread:\n # 1. creates a new reservation for a random event for a random number of tickets\n # and either\n # 2a. sends a modify_reservation request to add a ticket to the reservation\n # 2b or cancels the reservation\n def test_concurrent_requests(self):\n num_requests = 10\n\n def make_requests():\n event = self.get_random_event()\n event_id = event['id']\n tickets_available = event['tickets_available']\n if tickets_available < 1:\n return\n\n number_tickets = self.get_random_number_tickets(tickets_available)\n\n reservation_id = self.create_reservation(event_id, number_tickets)\n self.assertGreater(reservation_id, 0)\n\n new_num_tickets_available = self.assert_or_get_number_tickets_remaining(event_id)\n if randint(1,10) <= 5 and new_num_tickets_available > 1:\n # up to 50% of threads will attempt to modify their reservation\n response = self.client.patch(f\"/events/{event_id}/reservations/{reservation_id}\", json={\"tickets\": 1})\n self.assert_200_status_code(response, f\"testing patch for event_id:{event_id}/reservation_id:{reservation_id}\")\n else:\n # the remainder will cancel their reservation\n response = self.client.delete(f\"/events/{event_id}/reservations/{reservation_id}\")\n self.assert_200_status_code(response, f\"testing delete for event_id:{event_id}/reservation_id:{reservation_id}\")\n\n with concurrent.futures.ThreadPoolExecutor() as executor:\n # Submit the request function to the executor for concurrent execution\n futures = [executor.submit(make_requests) for _ in range(num_requests)]\n\n # Wait for all the requests to complete\n for future in concurrent.futures.as_completed(futures):\n try:\n future.result()\n except Exception as e:\n self.fail(f\"Request raised an exception: {e}\")\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"asyncro/TicketService","sub_path":"test/test_concurrency.py","file_name":"test_concurrency.py","file_ext":"py","file_size_in_byte":3110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30248234419","text":"\"\"\"\n@author: Amilcar Rodriguez\n@e-mail: aar.velasquez@gmail.com\n@github: mkhi26\n\"\"\"\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\nclass BstToNx:\n def __init__(self, tree):\n \"\"\"\n Recibe como prarametro un árbol binario\n \"\"\"\n self.tree = tree\n\n self.g = nx.Graph()\n self.g.add_node(self.tree.root.value)\n\n def addToGraph(self):\n \"\"\"\n Agrega un árbol en inorden al grafo de networkX\n \"\"\"\n return self.inorder(self.tree.root)\n \n def inorder(self, current):\n if current == None:\n return None\n else:\n parent = current.value\n left = current.left\n right = current.right\n if left:\n self.g.add_edge(parent, left.value)\n if right:\n self.g.add_edge(parent, right.value)\n self.inorder(current.left)\n \n self.inorder(current.right)\n\n def draw(self, nameImg):\n \"\"\"\n Dibuja el árbol\n \"\"\"\n plt.clf()\n G = self.g\n pos=nx.nx_agraph.graphviz_layout(G, prog=\"dot\")\n nx.draw(G, pos, with_labels=True, node_size=800, node_color=\"purple\", arrows=False, node_shape =\"o\", font_size=10 )\n plt.savefig(\"%s.png\"%nameImg)\n plt.show()\n return True\n","repo_name":"mkhi33/BST","sub_path":"Graph/Graph.py","file_name":"Graph.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38047792666","text":"# -*- coding: utf-8 -*-\nfrom typing import Dict\nimport numpy as np\n\nfrom scipy.sparse import linalg as sl\nfrom structengpy.common.tolerance import Tolerance\n\nfrom structengpy.core.fe_model.node import Node\nfrom structengpy.core.fe_model.element.line.beam import Beam\nfrom structengpy.core.fe_model.element.tri.membrane import Membrane3\nfrom structengpy.core.fe_model.element.quad.membrane import Membrane4\n\nclass Model:\n def __init__(self):\n self.__nodes:Dict[str,Node]={}\n self.__beams:Dict[str,Beam]={}\n self.__membrane3s={}\n self.__membrane4s={}\n\n self.__hid:Dict[str,Dict[str,int]]={}\n self.__hid['node']={}\n self.__hid['beam']={}\n self.__hid['membrane3s']={}\n self.__hid['membrane4s']={}\n \n self.__index=[]\n self.__dof=None\n\n self.__pattern={}\n self.__loadcase={}\n \n @property\n def node_count(self):\n return len(self.__nodes.items())\n\n @property\n def beam_count(self):\n return len(self.__beams.items())\n \n @property\n def nodes(self):\n return self.__nodes\n \n @property\n def beams(self):\n return self.__beams\n \n @property\n def membrane3s(self):\n return self.__membrane3s\n\n @property\n def membrane4s(self):\n return self.__membrane4s\n \n @property \n def index(self):\n return self.__index\n \n def add_node(self,name:str,x:float,y:float,z:float,check_dup=False)->int:\n node=Node(name,x,y,z)\n if check_dup:\n tol=Tolerance.abs_tol()\n res=[a for a in self.__nodes.values() if np.linalg.norm(a.loc-np.array([x,y,z]))